Snap for 7550844 from 50e98251ff58b2ec45a09840e774cdb67ac75594 to mainline-conscrypt-release

Change-Id: I6c1cb077c5b6a38cb6940c46308179acec481024
diff --git a/.vscode/c_cpp_properties.json b/.vscode/c_cpp_properties.json
index 86c10c8..1e849fb 100644
--- a/.vscode/c_cpp_properties.json
+++ b/.vscode/c_cpp_properties.json
@@ -10,7 +10,7 @@
             ],
             "defines": [
                 "HOST_BUILD",
-                "GOLDFISH_VULKAN"
+                "GFXSTREAM"
             ],
             "macFrameworkPath": [
                 "/System/Library/Frameworks",
@@ -24,4 +24,4 @@
         }
     ],
     "version": 4
-}
\ No newline at end of file
+}
diff --git a/Android.bp b/Android.bp
index 482e5a6..b4f361f 100644
--- a/Android.bp
+++ b/Android.bp
@@ -15,3 +15,35 @@
  */
 
 soong_namespace {}
+
+package {
+    default_applicable_licenses: ["device_generic_goldfish-opengl_license"],
+}
+
+// Added automatically by a large-scale-change that took the approach of
+// 'apply every license found to every target'. While this makes sure we respect
+// every license restriction, it may not be entirely correct.
+//
+// e.g. GPL in an MIT project might only apply to the contrib/ directory.
+//
+// Please consider splitting the single license below into multiple licenses,
+// taking care not to lose any license_kind information, and overriding the
+// default license using the 'licenses: [...]' property on targets as needed.
+//
+// For unused files, consider creating a 'fileGroup' with "//visibility:private"
+// to attach the license to, and including a comment whether the files may be
+// used in the current project.
+// See: http://go/android-license-faq
+license {
+    name: "device_generic_goldfish-opengl_license",
+    visibility: [":__subpackages__"],
+    license_kinds: [
+        "SPDX-license-identifier-Apache-2.0",
+        "SPDX-license-identifier-BSD",
+        "SPDX-license-identifier-GPL-2.0",
+        "SPDX-license-identifier-MIT",
+    ],
+    license_text: [
+        "LICENSE",
+    ],
+}
diff --git a/Android.mk b/Android.mk
index 90d028b..3444354 100644
--- a/Android.mk
+++ b/Android.mk
@@ -35,7 +35,7 @@
 EMUGL_COMMON_CFLAGS := -DWITH_GLES2
 
 # Whether or not to build the Vulkan library.
-BUILD_EMULATOR_VULKAN := false
+GFXSTREAM := false
 
 # Host build
 ifeq (true,$(GOLDFISH_OPENGL_BUILD_FOR_HOST))
@@ -43,7 +43,7 @@
 GOLDFISH_OPENGL_SHOULD_BUILD := true
 GOLDFISH_OPENGL_LIB_SUFFIX := _host
 
-BUILD_EMULATOR_VULKAN := true
+GFXSTREAM := true
 
 # Set modern defaults for the codename, version, etc.
 PLATFORM_VERSION_CODENAME:=Q
@@ -63,7 +63,7 @@
     -DGL_GLEXT_PROTOTYPES \
     -fvisibility=default \
     -DPAGE_SIZE=4096 \
-    -DGOLDFISH_VULKAN \
+    -DGFXSTREAM \
     -Wno-unused-parameter
 
 endif # GOLDFISH_OPENGL_BUILD_FOR_HOST
@@ -100,8 +100,8 @@
 endif
 
 ifeq ($(shell test $(PLATFORM_SDK_VERSION) -gt 27 && echo isApi28OrHigher),isApi28OrHigher)
-    BUILD_EMULATOR_VULKAN := true
-    EMUGL_COMMON_CFLAGS += -DGOLDFISH_VULKAN
+    GFXSTREAM := true
+    EMUGL_COMMON_CFLAGS += -DGFXSTREAM
 endif
 
 # Include common definitions used by all the modules included later
@@ -133,6 +133,11 @@
 include $(GOLDFISH_OPENGL_PATH)/shared/qemupipe/Android.mk
 include $(GOLDFISH_OPENGL_PATH)/shared/gralloc_cb/Android.mk
 include $(GOLDFISH_OPENGL_PATH)/shared/GoldfishAddressSpace/Android.mk
+
+ifeq (true,$(GFXSTREAM)) # android-emu
+    include $(GOLDFISH_OPENGL_PATH)/android-emu/Android.mk
+endif
+
 include $(GOLDFISH_OPENGL_PATH)/shared/OpenglCodecCommon/Android.mk
 
 # Encoder shared libraries
@@ -140,13 +145,15 @@
 include $(GOLDFISH_OPENGL_PATH)/system/GLESv2_enc/Android.mk
 include $(GOLDFISH_OPENGL_PATH)/system/renderControl_enc/Android.mk
 
-ifeq (true,$(BUILD_EMULATOR_VULKAN)) # Vulkan libs
-    include $(GOLDFISH_OPENGL_PATH)/android-emu/Android.mk
+ifeq (true,$(GFXSTREAM)) # Vulkan libs
     include $(GOLDFISH_OPENGL_PATH)/system/vulkan_enc/Android.mk
 endif
 
 include $(GOLDFISH_OPENGL_PATH)/system/OpenglSystemCommon/Android.mk
 
+# Profiler library
+include $(GOLDFISH_OPENGL_PATH)/system/profiler/Android.mk
+
 # System shared libraries
 include $(GOLDFISH_OPENGL_PATH)/system/GLESv1/Android.mk
 include $(GOLDFISH_OPENGL_PATH)/system/GLESv2/Android.mk
@@ -159,7 +166,7 @@
 
 include $(GOLDFISH_OPENGL_PATH)/system/egl/Android.mk
 
-ifeq (true,$(BUILD_EMULATOR_VULKAN)) # Vulkan libs
+ifeq (true,$(GFXSTREAM)) # Vulkan libs
     include $(GOLDFISH_OPENGL_PATH)/system/vulkan/Android.mk
 endif
 
diff --git a/BUILD.gn b/BUILD.gn
index 62cb990..2e204a7 100644
--- a/BUILD.gn
+++ b/BUILD.gn
@@ -2,32 +2,41 @@
   sources = [
     "android-emu/android/base/AlignedBuf.cpp",
     "android-emu/android/base/AlignedBuf.h",
-    "android-emu/android/base/Pool.cpp",
-    "android-emu/android/base/Pool.h",
-    "android-emu/android/base/ring_buffer.c",
+    "android-emu/android/base/Allocator.h",
     "android-emu/android/base/AndroidSubAllocator.cpp",
     "android-emu/android/base/AndroidSubAllocator.h",
+    "android-emu/android/base/BumpPool.h",
+    "android-emu/android/base/Pool.cpp",
+    "android-emu/android/base/Pool.h",
+    "android-emu/android/base/Tracing.cpp",
+    "android-emu/android/base/Tracing.h",
     "android-emu/android/base/files/MemStream.cpp",
     "android-emu/android/base/files/MemStream.h",
     "android-emu/android/base/files/Stream.cpp",
     "android-emu/android/base/files/Stream.h",
     "android-emu/android/base/files/StreamSerializing.cpp",
     "android-emu/android/base/files/StreamSerializing.h",
+    "android-emu/android/base/fit/Defer.h",
+    "android-emu/android/base/fit/Function.h",
+    "android-emu/android/base/fit/FunctionInternal.h",
+    "android-emu/android/base/fit/Nullable.h",
+    "android-emu/android/base/fit/ThreadChecker.h",
+    "android-emu/android/base/fit/ThreadSafety.h",
+    "android-emu/android/base/fit/UtilityInternal.h",
+    "android-emu/android/base/ring_buffer.c",
     "android-emu/android/base/synchronization/AndroidConditionVariable.h",
     "android-emu/android/base/synchronization/AndroidLock.h",
-    "android-emu/android/base/synchronization/AndroidMessageChannel.h",
     "android-emu/android/base/synchronization/AndroidMessageChannel.cpp",
+    "android-emu/android/base/synchronization/AndroidMessageChannel.h",
+    "android-emu/android/base/threads/AndroidFunctorThread.cpp",
     "android-emu/android/base/threads/AndroidFunctorThread.h",
     "android-emu/android/base/threads/AndroidThread.h",
     "android-emu/android/base/threads/AndroidThreadStore.h",
-    "android-emu/android/base/threads/AndroidThreadTypes.h",
-    "android-emu/android/base/threads/AndroidWorkPool.h",
-    "android-emu/android/base/threads/AndroidFunctorThread.cpp",
     "android-emu/android/base/threads/AndroidThreadStore.h",
+    "android-emu/android/base/threads/AndroidThreadTypes.h",
     "android-emu/android/base/threads/AndroidThread_pthread.cpp",
     "android-emu/android/base/threads/AndroidWorkPool.cpp",
-    "android-emu/android/base/Tracing.cpp",
-    "android-emu/android/base/Tracing.h",
+    "android-emu/android/base/threads/AndroidWorkPool.h",
     "shared/GoldfishAddressSpace/goldfish_address_space.cpp",
     "shared/GoldfishAddressSpace/goldfish_address_space.h",
     "shared/OpenglCodecCommon/ChecksumCalculator.cpp",
@@ -37,8 +46,8 @@
     "shared/OpenglCodecCommon/goldfish_dma.cpp",
     "shared/OpenglCodecCommon/goldfish_dma.h",
     "shared/gralloc_cb/include/gralloc_cb_bp.h",
-    "shared/qemupipe/include/qemu_pipe_bp.h",
     "shared/qemupipe/include-types/qemu_pipe_types_bp.h",
+    "shared/qemupipe/include/qemu_pipe_bp.h",
     "shared/qemupipe/qemu_pipe_common.cpp",
     "shared/qemupipe/qemu_pipe_guest.cpp",
     "system/OpenglSystemCommon/AddressSpaceStream.cpp",
@@ -52,9 +61,11 @@
     "system/OpenglSystemCommon/ThreadInfo.h",
     "system/renderControl_enc/renderControl_enc.cpp",
     "system/renderControl_enc/renderControl_enc.h",
-    "system/vulkan/func_table.cpp",
-    "system/vulkan/func_table.h",
     "system/vulkan/goldfish_vulkan.cpp",
+    "system/vulkan_enc/CommandBufferStagingStream.cpp",
+    "system/vulkan_enc/CommandBufferStagingStream.h",
+    "system/vulkan_enc/DescriptorSetVirtualization.cpp",
+    "system/vulkan_enc/DescriptorSetVirtualization.h",
     "system/vulkan_enc/HostVisibleMemoryVirtualization.cpp",
     "system/vulkan_enc/HostVisibleMemoryVirtualization.h",
     "system/vulkan_enc/ResourceTracker.cpp",
@@ -69,12 +80,18 @@
     "system/vulkan_enc/VulkanHandleMapping.h",
     "system/vulkan_enc/VulkanStreamGuest.cpp",
     "system/vulkan_enc/VulkanStreamGuest.h",
+    "system/vulkan_enc/func_table.cpp",
+    "system/vulkan_enc/func_table.h",
+    "system/vulkan_enc/goldfish_vk_counting_guest.cpp",
+    "system/vulkan_enc/goldfish_vk_counting_guest.h",
     "system/vulkan_enc/goldfish_vk_deepcopy_guest.cpp",
     "system/vulkan_enc/goldfish_vk_deepcopy_guest.h",
     "system/vulkan_enc/goldfish_vk_extension_structs_guest.cpp",
     "system/vulkan_enc/goldfish_vk_extension_structs_guest.h",
     "system/vulkan_enc/goldfish_vk_marshaling_guest.cpp",
     "system/vulkan_enc/goldfish_vk_marshaling_guest.h",
+    "system/vulkan_enc/goldfish_vk_reserved_marshaling_guest.cpp",
+    "system/vulkan_enc/goldfish_vk_reserved_marshaling_guest.h",
     "system/vulkan_enc/goldfish_vk_transform_guest.cpp",
     "system/vulkan_enc/goldfish_vk_transform_guest.h",
   ]
@@ -95,7 +112,7 @@
 
   defines = [
     "LOG_TAG=\"goldfish_vulkan\"",
-    "GOLDFISH_VULKAN",
+    "GFXSTREAM",
     "GOLDFISH_NO_GL",
     "VK_USE_PLATFORM_FUCHSIA",
     "PLATFORM_SDK_VERSION=1",
@@ -116,36 +133,47 @@
     "-Wno-unused-function",
     "-Wno-unused-value",
     "-Wno-unused-variable",
+    "-Wno-conversion",
   ]
 
   ldflags = [ "-static-libstdc++" ]
 
   if (target_os == "fuchsia") {
-    sources -= [ "system/OpenglSystemCommon/QemuPipeStream.cpp" ]
+    sources -= [
+      "shared/OpenglCodecCommon/goldfish_dma.cpp",
+      "shared/OpenglCodecCommon/goldfish_dma.h",
+      "shared/qemupipe/qemu_pipe_common.cpp",
+      "shared/qemupipe/qemu_pipe_guest.cpp",
+      "system/OpenglSystemCommon/QemuPipeStream.cpp",
+    ]
     sources += [
       "fuchsia/fuchsia_stdio.cc",
       "fuchsia/port.cc",
       "fuchsia/service_connector.cc",
       "system/OpenglSystemCommon/QemuPipeStreamFuchsia.cpp",
+      "system/OpenglSystemCommon/TraceProviderFuchsia.cpp",
+      "system/OpenglSystemCommon/TraceProviderFuchsia.h",
     ]
 
     include_dirs += [
       "fuchsia/include",
-      "//third_party/Vulkan-Headers/include"
-    ]
-
-    libs = [
-      "zircon"
+      "//third_party/Vulkan-Headers/include",
     ]
 
     deps = [
-      "//sdk/fidl/fuchsia.hardware.goldfish",
+      "//sdk/fidl/fuchsia.hardware.goldfish:fuchsia.hardware.goldfish_llcpp",
       "//sdk/fidl/fuchsia.logger:fuchsia.logger_llcpp",
-      "//sdk/fidl/fuchsia.sysmem",
+      "//sdk/fidl/fuchsia.sysmem:fuchsia.sysmem_llcpp",
+      "//sdk/lib/fdio",
+      "//src/zircon/lib/zircon",
+      "//zircon/public/lib/async-cpp",
       "//zircon/public/lib/zx",
-      "//zircon/public/lib/zxio",
+      "//zircon/system/ulib/async-default",
+      "//zircon/system/ulib/async-loop:async-loop-cpp",
       "//zircon/system/ulib/syslog:syslog-static",
       "//zircon/system/ulib/trace:trace-with-static-engine",
+      "//zircon/system/ulib/trace-provider:trace-provider-with-static-engine",
+      "//zircon/system/ulib/zxio",
     ]
 
     defines += [
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 80c99c4..88c8490 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -2,17 +2,18 @@
 # instead run make from .../device/generic/goldfish-opengl
 # which will re-generate this file.
 set(GOLDFISH_DEVICE_ROOT ${CMAKE_CURRENT_SOURCE_DIR})
-android_validate_sha256("${GOLDFISH_DEVICE_ROOT}/./Android.mk" "a370ddecca4c7d13811527b2f1f30e2f0ef5153a58edeb36eeb306c41efb9158")
+android_validate_sha256("${GOLDFISH_DEVICE_ROOT}/./Android.mk" "aa7ac7e9637b1c1db874a5876f990c6aa04b3b6f905d03f479ecb06e93957d31")
 add_subdirectory(shared/qemupipe)
 add_subdirectory(shared/gralloc_cb)
 add_subdirectory(shared/GoldfishAddressSpace)
+add_subdirectory(android-emu)
 add_subdirectory(shared/OpenglCodecCommon)
 add_subdirectory(system/GLESv1_enc)
 add_subdirectory(system/GLESv2_enc)
 add_subdirectory(system/renderControl_enc)
-add_subdirectory(android-emu)
 add_subdirectory(system/vulkan_enc)
 add_subdirectory(system/OpenglSystemCommon)
+add_subdirectory(system/profiler)
 add_subdirectory(system/GLESv1)
 add_subdirectory(system/GLESv2)
 add_subdirectory(system/gralloc)
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..49edca9
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,214 @@
+   Copyright (c) 2011-2020, The Android Open Source Project
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
diff --git a/METADATA b/METADATA
new file mode 100644
index 0000000..b64009c
--- /dev/null
+++ b/METADATA
@@ -0,0 +1,11 @@
+third_party {
+  # would be NOTICE save for:
+  #   android-emu/android/base/containers/Lookup.h
+  #   system/codecs/c2/store/include/goldfish_codec2/store/GoldfishComponentStore.h
+  #   system/codecs/c2/store/GoldfishComponentStore.cpp
+  # and RESTRICTED save for:
+  #  system/include/GLES/
+  #  system/include/GLES2/
+  #  system/include/GLES3/
+  license_type: BY_EXCEPTION_ONLY
+}
diff --git a/android-emu/Android.bp b/android-emu/Android.bp
index 68df03e..b83cab9 100644
--- a/android-emu/Android.bp
+++ b/android-emu/Android.bp
@@ -1,3 +1,13 @@
+package {
+    // See: http://go/android-license-faq
+    // A large-scale-change added 'default_applicable_licenses' to import
+    // all of the 'license_kinds' from "device_generic_goldfish-opengl_license"
+    // to get the below license kinds:
+    //   SPDX-license-identifier-Apache-2.0
+    //   SPDX-license-identifier-GPL-2.0
+    default_applicable_licenses: ["device_generic_goldfish-opengl_license"],
+}
+
 cc_library_shared {
     name: "libandroidemu",
     vendor: true,
@@ -7,7 +17,6 @@
         "android/base/files/Stream.cpp",
         "android/base/files/StreamSerializing.cpp",
         "android/base/Pool.cpp",
-        "android/base/ring_buffer.c",
         "android/base/StringFormat.cpp",
         "android/base/AndroidSubAllocator.cpp",
         "android/base/synchronization/AndroidMessageChannel.cpp",
@@ -33,3 +42,25 @@
         "-fstrict-aliasing",
     ],
 }
+
+cc_library_static {
+    name: "libringbuffer",
+    vendor: true,
+    srcs: [
+        "android/base/ring_buffer.c",
+    ],
+    shared_libs: [
+        "libcutils",
+        "libutils",
+        "liblog",
+    ],
+    export_include_dirs: [
+        ".",
+    ],
+    cflags: [
+        "-DLOG_TAG=\"androidemu\"",
+        "-Wno-missing-field-initializers",
+        "-fvisibility=default",
+        "-fstrict-aliasing",
+    ],
+}
diff --git a/android-emu/Android.mk b/android-emu/Android.mk
index ef67ddc..88a296c 100644
--- a/android-emu/Android.mk
+++ b/android-emu/Android.mk
@@ -17,7 +17,6 @@
     android/base/files/Stream.cpp \
     android/base/files/StreamSerializing.cpp \
     android/base/Pool.cpp \
-    android/base/ring_buffer.c \
     android/base/StringFormat.cpp \
     android/base/AndroidSubAllocator.cpp \
     android/base/synchronization/AndroidMessageChannel.cpp \
@@ -29,4 +28,12 @@
     android/utils/debug.c \
 
 $(call emugl-end-module)
+
+$(call emugl-begin-static-library,libringbuffer)
+$(call emugl-export,C_INCLUDES,$(LOCAL_PATH))
+
+LOCAL_SRC_FILES := \
+    android/base/ring_buffer.c \
+
+$(call emugl-end-module)
 endif
diff --git a/android-emu/CMakeLists.txt b/android-emu/CMakeLists.txt
index cfdf39b..f65c0fa 100644
--- a/android-emu/CMakeLists.txt
+++ b/android-emu/CMakeLists.txt
@@ -1,10 +1,20 @@
 # This is an autogenerated file! Do not edit!
 # instead run make from .../device/generic/goldfish-opengl
 # which will re-generate this file.
-android_validate_sha256("${GOLDFISH_DEVICE_ROOT}/android-emu/Android.mk" "bd7f25228f5fed42d7a66a35c585d88bd77221db773a6aeb42403089703819e9")
-set(androidemu_src android/base/AlignedBuf.cpp android/base/files/MemStream.cpp android/base/files/Stream.cpp android/base/files/StreamSerializing.cpp android/base/Pool.cpp android/base/ring_buffer.c android/base/StringFormat.cpp android/base/AndroidSubAllocator.cpp android/base/synchronization/AndroidMessageChannel.cpp android/base/threads/AndroidFunctorThread.cpp android/base/threads/AndroidThreadStore.cpp android/base/threads/AndroidThread_pthread.cpp android/base/threads/AndroidWorkPool.cpp android/base/Tracing.cpp android/utils/debug.c)
-android_add_library(TARGET androidemu SHARED LICENSE Apache-2.0 SRC android/base/AlignedBuf.cpp android/base/files/MemStream.cpp android/base/files/Stream.cpp android/base/files/StreamSerializing.cpp android/base/Pool.cpp android/base/ring_buffer.c android/base/StringFormat.cpp android/base/AndroidSubAllocator.cpp android/base/synchronization/AndroidMessageChannel.cpp android/base/threads/AndroidFunctorThread.cpp android/base/threads/AndroidThreadStore.cpp android/base/threads/AndroidThread_pthread.cpp android/base/threads/AndroidWorkPool.cpp android/base/Tracing.cpp android/utils/debug.c)
+android_validate_sha256("${GOLDFISH_DEVICE_ROOT}/android-emu/Android.mk" "e1609bbe85546522a8f71488322e5c9919d4e81107cf10a1413e5ed0d86211e2")
+set(androidemu_src android/base/AlignedBuf.cpp android/base/files/MemStream.cpp android/base/files/Stream.cpp android/base/files/StreamSerializing.cpp android/base/Pool.cpp android/base/StringFormat.cpp android/base/AndroidSubAllocator.cpp android/base/synchronization/AndroidMessageChannel.cpp android/base/threads/AndroidFunctorThread.cpp android/base/threads/AndroidThreadStore.cpp android/base/threads/AndroidThread_pthread.cpp android/base/threads/AndroidWorkPool.cpp android/base/Tracing.cpp android/utils/debug.c)
+android_add_library(TARGET androidemu SHARED LICENSE Apache-2.0 SRC android/base/AlignedBuf.cpp android/base/files/MemStream.cpp android/base/files/Stream.cpp android/base/files/StreamSerializing.cpp android/base/Pool.cpp android/base/StringFormat.cpp android/base/AndroidSubAllocator.cpp android/base/synchronization/AndroidMessageChannel.cpp android/base/threads/AndroidFunctorThread.cpp android/base/threads/AndroidThreadStore.cpp android/base/threads/AndroidThread_pthread.cpp android/base/threads/AndroidWorkPool.cpp android/base/Tracing.cpp android/utils/debug.c)
 target_include_directories(androidemu PRIVATE ${GOLDFISH_DEVICE_ROOT}/android-emu ${GOLDFISH_DEVICE_ROOT}/./host/include/libOpenglRender ${GOLDFISH_DEVICE_ROOT}/./system/include ${GOLDFISH_DEVICE_ROOT}/./../../../external/qemu/android/android-emugl/guest)
-target_compile_definitions(androidemu PRIVATE "-DWITH_GLES2" "-DPLATFORM_SDK_VERSION=29" "-DGOLDFISH_HIDL_GRALLOC" "-DEMULATOR_OPENGL_POST_O=1" "-DHOST_BUILD" "-DANDROID" "-DGL_GLEXT_PROTOTYPES" "-DPAGE_SIZE=4096" "-DGOLDFISH_VULKAN" "-DLOG_TAG=\"androidemu\"")
+target_compile_definitions(androidemu PRIVATE "-DWITH_GLES2" "-DPLATFORM_SDK_VERSION=29" "-DGOLDFISH_HIDL_GRALLOC" "-DEMULATOR_OPENGL_POST_O=1" "-DHOST_BUILD" "-DANDROID" "-DGL_GLEXT_PROTOTYPES" "-DPAGE_SIZE=4096" "-DGFXSTREAM" "-DLOG_TAG=\"androidemu\"")
 target_compile_options(androidemu PRIVATE "-fvisibility=default" "-Wno-unused-parameter" "-Wno-missing-field-initializers" "-fstrict-aliasing")
-target_link_libraries(androidemu PRIVATE cutils utils log OpenglCodecCommon_host android-emu-shared PRIVATE qemupipe_host)
\ No newline at end of file
+target_link_libraries(androidemu PRIVATE cutils utils log android-emu-shared)
+# This is an autogenerated file! Do not edit!
+# instead run make from .../device/generic/goldfish-opengl
+# which will re-generate this file.
+android_validate_sha256("${GOLDFISH_DEVICE_ROOT}/android-emu/Android.mk" "e1609bbe85546522a8f71488322e5c9919d4e81107cf10a1413e5ed0d86211e2")
+set(ringbuffer_src android/base/ring_buffer.c)
+android_add_library(TARGET ringbuffer LICENSE Apache-2.0 SRC android/base/ring_buffer.c)
+target_include_directories(ringbuffer PRIVATE ${GOLDFISH_DEVICE_ROOT}/android-emu ${GOLDFISH_DEVICE_ROOT}/./host/include/libOpenglRender ${GOLDFISH_DEVICE_ROOT}/./system/include ${GOLDFISH_DEVICE_ROOT}/./../../../external/qemu/android/android-emugl/guest)
+target_compile_definitions(ringbuffer PRIVATE "-DWITH_GLES2" "-DPLATFORM_SDK_VERSION=29" "-DGOLDFISH_HIDL_GRALLOC" "-DEMULATOR_OPENGL_POST_O=1" "-DHOST_BUILD" "-DANDROID" "-DGL_GLEXT_PROTOTYPES" "-DPAGE_SIZE=4096" "-DGFXSTREAM")
+target_compile_options(ringbuffer PRIVATE "-fvisibility=default" "-Wno-unused-parameter")
+target_link_libraries(ringbuffer PRIVATE cutils utils log android-emu-shared)
\ No newline at end of file
diff --git a/android-emu/android/base/Allocator.h b/android-emu/android/base/Allocator.h
new file mode 100644
index 0000000..d875de1
--- /dev/null
+++ b/android-emu/android/base/Allocator.h
@@ -0,0 +1,68 @@
+// Copyright 2021 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#pragma once
+
+#include <inttypes.h>
+#include <stddef.h>
+#include <string.h>
+
+namespace android {
+namespace base {
+
+// A generic memory allocator interface which could be used to allocate
+// a certain size of memory region, or memory region for arrays / strings.
+// How the memory are recycled / freed is up to derived classes.
+class Allocator {
+public:
+    Allocator() = default;
+    virtual ~Allocator() = default;
+
+    virtual void* alloc(size_t wantedSize) = 0;
+
+    // Convenience function to allocate an array
+    // of objects of type T.
+    template <class T>
+    T* allocArray(size_t count) {
+        size_t bytes = sizeof(T) * count;
+        void* res = alloc(bytes);
+        return (T*)res;
+    }
+
+    char* strDup(const char* toCopy) {
+        size_t bytes = strlen(toCopy) + 1;
+        void* res = alloc(bytes);
+        memset(res, 0x0, bytes);
+        memcpy(res, toCopy, bytes);
+        return (char*)res;
+    }
+
+    char** strDupArray(const char* const* arrayToCopy, size_t count) {
+        char** res = allocArray<char*>(count);
+
+        for (size_t i = 0; i < count; i++) {
+            res[i] = strDup(arrayToCopy[i]);
+        }
+
+        return res;
+    }
+
+    void* dupArray(const void* buf, size_t bytes) {
+        void* res = alloc(bytes);
+        memcpy(res, buf, bytes);
+        return res;
+    }
+};
+
+}  // namespace base
+}  // namespace android
diff --git a/android-emu/android/base/BumpPool.h b/android-emu/android/base/BumpPool.h
new file mode 100644
index 0000000..545e9e3
--- /dev/null
+++ b/android-emu/android/base/BumpPool.h
@@ -0,0 +1,78 @@
+// Copyright 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#pragma once
+
+#include "android/base/AlignedBuf.h"
+#include "android/base/Allocator.h"
+
+#include <vector>
+#include <unordered_set>
+
+#include <inttypes.h>
+
+namespace android {
+namespace base {
+
+// Class to make it easier to set up memory regions where it is fast
+// to allocate buffers AND we don't care about freeing individual pieces,
+// BUT it's necessary to preserve previous pointer values in between the first
+// alloc() after a freeAll(), and the freeAll() itself, allowing some sloppy use of
+// malloc in the first pass while we find out how much data was needed.
+class BumpPool : public Allocator {
+public:
+    BumpPool(size_t startingBytes = 4096) : mStorage(startingBytes / sizeof(uint64_t))  { }
+    // All memory allocated by this pool
+    // is automatically deleted when the pool
+    // is deconstructed.
+    ~BumpPool() { }
+
+    void* alloc(size_t wantedSize) override {
+        size_t wantedSizeRoundedUp =
+            sizeof(uint64_t) * ((wantedSize + sizeof(uint64_t) - 1) / (sizeof(uint64_t)));
+
+        mTotalWantedThisGeneration += wantedSizeRoundedUp;
+        if (mAllocPos + wantedSizeRoundedUp > mStorage.size() * sizeof(uint64_t)) {
+            mNeedRealloc = true;
+            void* fallbackPtr = malloc(wantedSizeRoundedUp);
+            mFallbackPtrs.insert(fallbackPtr);
+            return fallbackPtr;
+        }
+        size_t avail = mStorage.size() * sizeof(uint64_t) - mAllocPos;
+        void* allocPtr = (void*)(((unsigned char*)mStorage.data()) + mAllocPos);
+        mAllocPos += wantedSizeRoundedUp;
+        return allocPtr;
+    }
+
+    void freeAll() {
+        mAllocPos = 0;
+        if (mNeedRealloc) {
+            mStorage.resize((mTotalWantedThisGeneration * 2) / sizeof(uint64_t));
+            mNeedRealloc = false;
+            for (auto ptr : mFallbackPtrs) {
+                free(ptr);
+            }
+            mFallbackPtrs.clear();
+        }
+        mTotalWantedThisGeneration = 0;
+    }
+private:
+    AlignedBuf<uint64_t, 8> mStorage;
+    std::unordered_set<void*> mFallbackPtrs;
+    size_t mAllocPos = 0;
+    size_t mTotalWantedThisGeneration = 0;
+    bool mNeedRealloc = false;
+};
+
+} // namespace base
+} // namespace android
diff --git a/android-emu/android/base/Pool.h b/android-emu/android/base/Pool.h
index bce2cc1..14a39f8 100644
--- a/android-emu/android/base/Pool.h
+++ b/android-emu/android/base/Pool.h
@@ -13,6 +13,8 @@
 // limitations under the License.
 #pragma once
 
+#include "android/base/Allocator.h"
+
 #include <unordered_set>
 
 #include <inttypes.h>
@@ -25,7 +27,7 @@
 // Class to make it easier to set up memory regions where it is fast
 // to allocate/deallocate buffers that have size within
 // the specified range.
-class Pool {
+class Pool : public Allocator {
 public:
     // minSize/maxSize: the target range of sizes for which we want to
     // make allocations fast. the greater the range, the more space
@@ -36,54 +38,21 @@
     //
     // Rough space cost formula:
     // O(chunksPerSize * log2(maxSize / minSize) * maxSize)
-    Pool(size_t minSize = 8,
+    Pool(size_t minSize = 4,
          size_t maxSize = 4096,
-         size_t chunksPerSize = 256);
+         size_t chunksPerSize = 1024);
 
     // All memory allocated by this pool
     // is automatically deleted when the pool
     // is deconstructed.
     ~Pool();
 
-    void* alloc(size_t wantedSize);
+    void* alloc(size_t wantedSize) override;
     void free(void* ptr);
 
     // Convenience function to free everything currently allocated.
     void freeAll();
 
-    // Convenience function to allocate an array
-    // of objects of type T.
-    template <class T>
-    T* allocArray(size_t count) {
-        size_t bytes = sizeof(T) * count;
-        void* res = alloc(bytes);
-        return (T*) res;
-    }
-
-    char* strDup(const char* toCopy) {
-        size_t bytes = strlen(toCopy) + 1;
-        void* res = alloc(bytes);
-        memset(res, 0x0, bytes);
-        memcpy(res, toCopy, bytes);
-        return (char*)res;
-    }
-
-    char** strDupArray(const char* const* arrayToCopy, size_t count) {
-        char** res = allocArray<char*>(count);
-
-        for (size_t i = 0; i < count; i++) {
-            res[i] = strDup(arrayToCopy[i]);
-        }
-
-        return res;
-    }
-
-    void* dupArray(const void* buf, size_t bytes) {
-        void* res = alloc(bytes);
-        memcpy(res, buf, bytes);
-        return res;
-    }
-
 private:
     class Impl;
     Impl* mImpl = nullptr;
diff --git a/android-emu/android/base/Tracing.cpp b/android-emu/android/base/Tracing.cpp
index a4c097f..dd8e44b 100644
--- a/android-emu/android/base/Tracing.cpp
+++ b/android-emu/android/base/Tracing.cpp
@@ -16,6 +16,8 @@
 
 #if defined(__ANDROID__) || defined(HOST_BUILD)
 
+#define ATRACE_TAG ATRACE_TAG_GRAPHICS
+
 #include <cutils/trace.h>
 
 #define VK_TRACE_TAG ATRACE_TAG_GRAPHICS
@@ -23,11 +25,15 @@
 namespace android {
 namespace base {
 
-void ScopedTrace::beginTraceImpl(const char* name) {
+bool isTracingEnabled() {
+    return atrace_is_tag_enabled(ATRACE_TAG_GRAPHICS);
+}
+
+void ScopedTraceGuest::beginTraceImpl(const char* name) {
     atrace_begin(VK_TRACE_TAG, name);
 }
 
-void ScopedTrace::endTraceImpl(const char*) {
+void ScopedTraceGuest::endTraceImpl(const char*) {
     atrace_end(VK_TRACE_TAG);
 }
 
@@ -45,13 +51,18 @@
 namespace android {
 namespace base {
 
-void ScopedTrace::beginTraceImpl(const char* name) {
+bool isTracingEnabled() {
+    // TODO: Fuchsia
+    return false;
+}
+
+void ScopedTraceGuest::beginTraceImpl(const char* name) {
 #ifndef FUCHSIA_NO_TRACE
     TRACE_DURATION_BEGIN(VK_TRACE_TAG, name);
 #endif
 }
 
-void ScopedTrace::endTraceImpl(const char* name) {
+void ScopedTraceGuest::endTraceImpl(const char* name) {
 #ifndef FUCHSIA_NO_TRACE
     TRACE_DURATION_END(VK_TRACE_TAG, name);
 #endif
diff --git a/android-emu/android/base/Tracing.h b/android-emu/android/base/Tracing.h
index 046a5bf..e13d202 100644
--- a/android-emu/android/base/Tracing.h
+++ b/android-emu/android/base/Tracing.h
@@ -16,16 +16,41 @@
 
 // Library to perform tracing. Talks to platform-specific
 // tracing libraries.
+
 namespace android {
 namespace base {
 
+#ifdef HOST_BUILD
+void initializeTracing();
+void enableTracing();
+void disableTracing();
+// Some platform tracing libraries such as Perfetto can be enabled/disabled at
+// runtime. Allow the user to query if they are disabled or not, and take
+// further action based on it. The use case is to enable/disable tracing on the
+// host alongside.
+bool isTracingEnabled();
+
 class ScopedTrace {
 public:
-    ScopedTrace(const char* name) : name_(name) {
+    ScopedTrace(const char* name);
+    ~ScopedTrace();
+};
+
+class ScopedTraceDerived : public ScopedTrace {
+public:
+    void* member = nullptr;
+};
+#endif
+
+bool isTracingEnabled();
+
+class ScopedTraceGuest {
+public:
+    ScopedTraceGuest(const char* name) : name_(name) {
         beginTraceImpl(name_);
     }
 
-    ~ScopedTrace() {
+    ~ScopedTraceGuest() {
         endTraceImpl(name_);
     }
 private:
@@ -42,4 +67,8 @@
 #define __AEMU_GENSYM1(x,y) __AEMU_GENSYM2(x,y)
 #define AEMU_GENSYM(x) __AEMU_GENSYM1(x,__COUNTER__)
 
+#ifdef HOST_BUILD
 #define AEMU_SCOPED_TRACE(tag) __attribute__ ((unused)) android::base::ScopedTrace AEMU_GENSYM(aemuScopedTrace_)(tag)
+#else
+#define AEMU_SCOPED_TRACE(tag) __attribute__ ((unused)) android::base::ScopedTraceGuest AEMU_GENSYM(aemuScopedTrace_)(tag)
+#endif
diff --git a/android-emu/android/base/containers/EntityManager.h b/android-emu/android/base/containers/EntityManager.h
new file mode 100644
index 0000000..ee0d305
--- /dev/null
+++ b/android-emu/android/base/containers/EntityManager.h
@@ -0,0 +1,620 @@
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#pragma once
+
+#include "android/base/containers/Lookup.h"
+#include "android/base/Optional.h"
+
+#include <functional>
+#include <unordered_map>
+#include <vector>
+
+#include <inttypes.h>
+#include <stdio.h>
+
+#define ENTITY_MANAGER_DEBUG 0
+
+#if ENTITY_MANAGER_DEBUG
+
+#define EM_DBG(fmt,...) fprintf(stderr, "%s:%d " fmt "\n", __func__, __LINE__, ##__VA_ARGS__);
+
+#else
+#define EM_DBG(...)
+#endif
+
+#define INVALID_ENTITY_HANDLE 0
+#define INVALID_COMPONENT_HANDLE 0
+
+namespace android {
+namespace base {
+
+// EntityManager: A way to represent an abstrat space of objects with handles.
+// Each handle is associated with data of type Item for quick access from handles to data.
+// Otherwise, entity data is spread through ComponentManagers.
+template<size_t indexBits,
+         size_t generationBits,
+         size_t typeBits,
+         class Item>
+class EntityManager {
+public:
+
+    static_assert(indexBits == 64 - generationBits - typeBits,
+                  "bits of index, generation, and type must add to 64");
+
+    // https://stackoverflow.com/questions/45225925/create-bitmask-based-on-a-pattern-as-constexpr
+    // There is another answer based on a function that returns constexpr but
+    // it doesn't actually fold to a constant when given a constant argument,
+    // according to godbolt.
+    template<class T, int count> struct bit_repeater;
+    template<class T>
+    struct bit_repeater<T, 1> {
+        static constexpr T value = 0x1;
+    };
+    template<class T, int count>
+    struct bit_repeater {
+        static constexpr T value = (bit_repeater<T, count-1>::value << 1) | 0x1;
+    };
+
+    static constexpr uint64_t indexMaskBase = bit_repeater<uint64_t, indexBits>().value;
+    static constexpr uint64_t generationMaskBase = bit_repeater<uint64_t, generationBits>().value;
+    static constexpr uint64_t typeMaskBase = bit_repeater<uint64_t, typeBits>().value;
+
+    static constexpr uint64_t indexMask = indexMaskBase;
+    static constexpr uint64_t generationMask = generationMaskBase << indexBits;
+    static constexpr uint64_t typeMask = typeMaskBase << (indexBits + generationBits);
+
+    using EntityHandle = uint64_t;
+    using IteratorFunc = std::function<void(bool live, EntityHandle h, Item& item)>;
+    using ConstIteratorFunc = std::function<void(bool live, EntityHandle h, const Item& item)>;
+
+    static size_t getHandleIndex(EntityHandle h) {
+        return static_cast<size_t>(h & indexMask);
+    }
+
+    static size_t getHandleGeneration(EntityHandle h) {
+        return static_cast<size_t>((h & generationMask) >> indexBits);
+    }
+
+    static size_t getHandleType(EntityHandle h) {
+        return static_cast<size_t>((h & typeMask) >> (indexBits + generationBits));
+    }
+
+    static EntityHandle makeHandle(
+        size_t index,
+        size_t generation,
+        size_t type) {
+        EntityHandle res = (index & indexMask);
+        res |= (((uint64_t)generation) << indexBits) & generationMask;
+        res |= (((uint64_t)type) << (indexBits + generationBits)) & typeMask;
+        return res;
+    }
+
+    static EntityHandle withIndex(EntityHandle h, size_t i) {
+        return makeHandle(i, getHandleGeneration(h), getHandleType(h));
+    }
+
+    static EntityHandle withGeneration(EntityHandle h, size_t nextGen) {
+        return makeHandle(getHandleIndex(h), nextGen, getHandleType(h));
+    }
+
+    static EntityHandle withType(EntityHandle h, size_t newType) {
+        return makeHandle(getHandleIndex(h), getHandleGeneration(h), newType);
+    }
+
+    EntityManager() : EntityManager(0) { }
+
+    EntityManager(size_t initialItems) :
+        mEntries(initialItems),
+        mFirstFreeIndex(0),
+        mLiveEntries(0) {
+        reserve(initialItems);
+    }
+
+    ~EntityManager() { clear(); }
+
+    struct EntityEntry {
+        EntityHandle handle = 0;
+        size_t nextFreeIndex = 0;
+        // 0 is a special generation for brand new entries
+        // that are not used yet
+        size_t liveGeneration = 1;
+        Item item;
+    };
+
+    void clear() {
+        reserve(mEntries.size());
+        mFirstFreeIndex = 0;
+        mLiveEntries = 0;
+    }
+
+    size_t nextFreeIndex() const {
+        return mFirstFreeIndex;
+    }
+
+    EntityHandle add(const Item& item, size_t type) {
+
+        if (!type) return INVALID_ENTITY_HANDLE;
+
+        uint64_t maxElements = (1ULL << indexBits);
+        if (mLiveEntries == maxElements) return INVALID_ENTITY_HANDLE;
+
+        uint64_t newIndex = mFirstFreeIndex;
+
+        EM_DBG("newIndex/firstFree: %zu type: %zu", newIndex, type);
+
+        uint64_t neededCapacity = newIndex + 1;
+        if (maxElements < neededCapacity) return INVALID_ENTITY_HANDLE;
+
+        uint64_t currentCapacity = mEntries.size();
+        uint64_t nextCapacity = neededCapacity << 1;
+        if (nextCapacity > maxElements) nextCapacity = maxElements;
+
+        EM_DBG("needed/current/next capacity: %zu %zu %zu",
+               neededCapacity,
+               currentCapacity,
+               nextCapacity);
+
+        if (neededCapacity > mEntries.size()) {
+            mEntries.resize((size_t)nextCapacity);
+            for (uint64_t i = currentCapacity; i < nextCapacity; ++i) {
+                mEntries[i].handle = makeHandle(i, 0, type);
+                mEntries[i].nextFreeIndex = i + 1;
+                EM_DBG("new un-init entry: index %zu nextFree %zu",
+                       i, i + 1);
+            }
+        }
+
+        mEntries[newIndex].handle =
+            makeHandle(newIndex, mEntries[newIndex].liveGeneration, type);
+        mEntries[newIndex].item = item;
+
+        mFirstFreeIndex = mEntries[newIndex].nextFreeIndex;
+        EM_DBG("created. new first free: %zu", mFirstFreeIndex);
+
+        ++mLiveEntries;
+
+        EM_DBG("result handle: 0x%llx", (unsigned long long)mEntries[newIndex].handle);
+
+        return mEntries[newIndex].handle;
+    }
+
+    EntityHandle addFixed(EntityHandle fixedHandle, const Item& item, size_t type) {
+        // 3 cases:
+        // 1. handle is not allocated and doesn't correspond to mFirstFreeIndex
+        bool isFreeListNonHead = false;
+        // 2. handle already exists (replace)
+        bool isAlloced = false;
+        // 3. index(handle) == mFirstFreeIndex
+        bool isFreeListHead = false;
+
+        if (!type) return INVALID_ENTITY_HANDLE;
+
+        uint64_t maxElements = (1ULL << indexBits);
+        if (mLiveEntries == maxElements) return INVALID_ENTITY_HANDLE;
+
+        uint64_t newIndex = getHandleIndex(fixedHandle);
+
+        EM_DBG("newIndex/firstFree: %zu type: %zu", newIndex, type);
+
+        uint64_t neededCapacity = newIndex + 1;
+
+        if (maxElements < neededCapacity) return INVALID_ENTITY_HANDLE;
+
+        uint64_t currentCapacity = mEntries.size();
+        uint64_t nextCapacity = neededCapacity << 1;
+        if (nextCapacity > maxElements) nextCapacity = maxElements;
+
+        EM_DBG("needed/current/next capacity: %zu %zu %zu",
+               neededCapacity,
+               currentCapacity,
+               nextCapacity);
+
+        if (neededCapacity > mEntries.size()) {
+            mEntries.resize((size_t)nextCapacity);
+            for (uint64_t i = currentCapacity; i < nextCapacity; ++i) {
+                mEntries[i].handle = makeHandle(i, 0, type);
+                mEntries[i].nextFreeIndex = i + 1;
+                EM_DBG("new un-init entry: index %zu nextFree %zu",
+                       i, i + 1);
+            }
+        }
+
+        // Now we ensured that there is enough space to talk about the entry of
+        // this |fixedHandle|.
+        if (mFirstFreeIndex == newIndex) {
+            isFreeListHead = true;
+        } else {
+            auto& entry = mEntries[newIndex];
+            if (entry.liveGeneration == getHandleGeneration(entry.handle)) {
+                isAlloced = true;
+            } else {
+                isFreeListNonHead = true;
+            }
+        }
+
+        mEntries[newIndex].handle = fixedHandle;
+        mEntries[newIndex].liveGeneration = getHandleGeneration(fixedHandle);
+        mEntries[newIndex].item = item;
+
+        EM_DBG("new index: %zu", newIndex);
+
+        if (isFreeListHead) {
+
+            EM_DBG("first free index reset from %zu to %zu",
+                    mFirstFreeIndex, mEntries[newIndex].nextFreeIndex);
+
+            mFirstFreeIndex = mEntries[newIndex].nextFreeIndex;
+
+            ++mLiveEntries;
+
+        } else if (isAlloced) {
+            // Already replaced whatever is there, and since it's already allocated,
+            // no need to update freelist.
+            EM_DBG("entry at %zu already alloced. replacing.", newIndex);
+        } else if (isFreeListNonHead) {
+            // Go through the freelist and skip over the entry we just added.
+            uint64_t prevEntryIndex = mFirstFreeIndex;
+
+            EM_DBG("in free list but not head. reorganizing freelist. "
+                   "start at %zu -> %zu",
+                   mFirstFreeIndex, mEntries[prevEntryIndex].nextFreeIndex);
+
+            while (mEntries[prevEntryIndex].nextFreeIndex != newIndex) {
+                EM_DBG("next: %zu -> %zu",
+                       prevEntryIndex,
+                       mEntries[prevEntryIndex].nextFreeIndex);
+                prevEntryIndex =
+                    mEntries[prevEntryIndex].nextFreeIndex;
+            }
+
+            EM_DBG("finished. set prev entry %zu to new entry's next, %zu",
+                    prevEntryIndex, mEntries[newIndex].nextFreeIndex);
+
+            mEntries[prevEntryIndex].nextFreeIndex =
+                mEntries[newIndex].nextFreeIndex;
+
+            ++mLiveEntries;
+        }
+
+        return fixedHandle;
+    }
+    void remove(EntityHandle h) {
+
+        if (get(h) == nullptr) return;
+
+        uint64_t index = getHandleIndex(h);
+
+        EM_DBG("remove handle: 0x%llx -> index %zu", (unsigned long long)h, index);
+
+        auto& entry = mEntries[index];
+
+        EM_DBG("handle gen: %zu entry gen: %zu", getHandleGeneration(h), entry.liveGeneration);
+
+        ++entry.liveGeneration;
+        if ((entry.liveGeneration == 0) ||
+            (entry.liveGeneration == (1ULL << generationBits))) {
+            entry.liveGeneration = 1;
+        }
+
+        entry.nextFreeIndex = mFirstFreeIndex;
+
+        mFirstFreeIndex = index;
+
+        EM_DBG("new first free: %zu next free: %zu", mFirstFreeIndex, entry.nextFreeIndex);
+
+        --mLiveEntries;
+    }
+
+    Item* get(EntityHandle h) {
+        uint64_t index = getHandleIndex(h);
+        if (index >= mEntries.size()) return nullptr;
+
+        auto& entry = mEntries[index];
+        if (entry.liveGeneration != getHandleGeneration(h)) {
+            return nullptr;
+        }
+
+        return &entry.item;
+    }
+
+    const Item* get_const(EntityHandle h) const {
+        uint64_t index = getHandleIndex(h);
+        if (index >= mEntries.size()) return nullptr;
+
+        const auto& entry = mEntries.data() + index;
+        if (entry->liveGeneration != getHandleGeneration(h)) return nullptr;
+
+        return &entry->item;
+    }
+
+    bool isLive(EntityHandle h) const {
+        uint64_t index = getHandleIndex(h);
+        if (index >= mEntries.size()) return false;
+
+        const auto& entry = mEntries[index];
+
+        return (entry.liveGeneration == getHandleGeneration(h));
+    }
+
+    void forEachEntry(IteratorFunc func) {
+        for (auto& entry: mEntries) {
+            auto handle = entry.handle;
+            bool live = isLive(handle);
+            auto& item = entry.item;
+            func(live, handle, item);
+        }
+    }
+
+    void forEachLiveEntry(IteratorFunc func) {
+        for (auto& entry: mEntries) {
+            auto handle = entry.handle;
+            bool live = isLive(handle);
+
+            if (!live) continue;
+
+            auto& item = entry.item;
+            func(live, handle, item);
+        }
+    }
+
+    void forEachLiveEntry_const(ConstIteratorFunc func) const {
+        for (auto& entry: mEntries) {
+            auto handle = entry.handle;
+            bool live = isLive(handle);
+
+            if (!live) continue;
+
+            const auto& item = entry.item;
+            func(live, handle, item);
+        }
+    }
+
+private:
+    void reserve(size_t count) {
+        if (mEntries.size() < count) {
+            mEntries.resize(count);
+        }
+        for (size_t i = 0; i < count; ++i) {
+            mEntries[i].handle = makeHandle(i, 0, 1);
+            mEntries[i].nextFreeIndex = i + 1;
+            ++mEntries[i].liveGeneration;
+            if ((mEntries[i].liveGeneration == 0) ||
+                    (mEntries[i].liveGeneration == (1ULL << generationBits))) {
+                mEntries[i].liveGeneration = 1;
+            }
+            EM_DBG("new un-init entry: index %zu nextFree %zu",
+                    i, i + 1);
+        }
+    }
+
+    std::vector<EntityEntry> mEntries;
+    uint64_t mFirstFreeIndex;
+    uint64_t mLiveEntries;
+};
+
+// Tracks components over a given space of entities.
+// Looking up by entity index is slower, but takes less space overall versus
+// a flat array that parallels the entities.
+template<size_t indexBits,
+         size_t generationBits,
+         size_t typeBits,
+         class Data>
+class ComponentManager {
+public:
+
+    static_assert(64 == (indexBits + generationBits + typeBits),
+                  "bits of index, generation, and type must add to 64");
+
+    using ComponentHandle = uint64_t;
+    using EntityHandle = uint64_t;
+    using ComponentIteratorFunc = std::function<void(bool, ComponentHandle componentHandle, EntityHandle entityHandle, Data& data)>;
+    using ConstComponentIteratorFunc = std::function<void(bool, ComponentHandle componentHandle, EntityHandle entityHandle, const Data& data)>;
+
+    // Adds the given |data| and associates it with EntityHandle.
+    // We can also opt-in to immediately tracking the handle in the reverse mapping,
+    // which has an upfront cost in runtime.
+    // Many uses of ComponentManager don't really need to track the associated entity handle,
+    // so it is opt-in.
+
+    ComponentHandle add(
+        EntityHandle h,
+        const Data& data,
+        size_t type,
+        bool tracked = false) {
+
+        InternalItem item = { h, data, tracked };
+        auto res = static_cast<ComponentHandle>(mData.add(item, type));
+
+        if (tracked) {
+            mEntityToComponentMap[h] = res;
+        }
+
+        return res;
+    }
+
+    void clear() {
+        mData.clear();
+        mEntityToComponentMap.clear();
+    }
+
+    // If we didn't explicitly track, just fail.
+    ComponentHandle getComponentHandle(EntityHandle h) const {
+        auto componentHandlePtr = android::base::find(mEntityToComponentMap, h);
+        if (!componentHandlePtr) return INVALID_COMPONENT_HANDLE;
+        return *componentHandlePtr;
+    }
+
+    EntityHandle getEntityHandle(ComponentHandle h) const {
+        return mData.get(h)->entityHandle;
+    }
+
+    void removeByEntity(EntityHandle h) {
+        auto componentHandle = getComponentHandle(h);
+        removeByComponent(componentHandle);
+    }
+
+    void removeByComponent(ComponentHandle h) {
+        auto item = mData.get(h);
+
+        if (!item) return;
+        if (item->tracked) {
+            mEntityToComponentMap.erase(item->entityHandle);
+        }
+
+        mData.remove(h);
+    }
+
+    Data* getByEntity(EntityHandle h) {
+        return getByComponent(getComponentHandle(h));
+    }
+
+    Data* getByComponent(ComponentHandle h) {
+        auto item = mData.get(h);
+        if (!item) return nullptr;
+        return &(item->data);
+    }
+
+    void forEachComponent(ComponentIteratorFunc func) {
+        mData.forEachEntry(
+            [func](bool live, typename InternalEntityManager::EntityHandle componentHandle, InternalItem& item) {
+                func(live, componentHandle, item.entityHandle, item.data);
+        });
+    }
+
+    void forEachLiveComponent(ComponentIteratorFunc func) {
+        mData.forEachLiveEntry(
+            [func](bool live, typename InternalEntityManager::EntityHandle componentHandle, InternalItem& item) {
+                func(live, componentHandle, item.entityHandle, item.data);
+        });
+    }
+
+    void forEachLiveComponent_const(ConstComponentIteratorFunc func) const {
+        mData.forEachLiveEntry_const(
+            [func](bool live, typename InternalEntityManager::EntityHandle componentHandle, const InternalItem& item) {
+                func(live, componentHandle, item.entityHandle, item.data);
+        });
+    }
+
+private:
+    struct InternalItem {
+        EntityHandle entityHandle;
+        Data data;
+        bool tracked;
+    };
+
+    using InternalEntityManager = EntityManager<indexBits, generationBits, typeBits, InternalItem>;
+    using EntityToComponentMap = std::unordered_map<EntityHandle, ComponentHandle>;
+
+    mutable InternalEntityManager mData;
+    EntityToComponentMap mEntityToComponentMap;
+};
+
+// ComponentManager, but unpacked; uses the same index space as the associated
+// entities. Takes more space by default, but not more if all entities have this component.
+template<size_t indexBits,
+         size_t generationBits,
+         size_t typeBits,
+         class Data>
+class UnpackedComponentManager {
+public:
+    using ComponentHandle = uint64_t;
+    using EntityHandle = uint64_t;
+    using ComponentIteratorFunc =
+        std::function<void(bool, ComponentHandle componentHandle, EntityHandle entityHandle, Data& data)>;
+    using ConstComponentIteratorFunc =
+        std::function<void(bool, ComponentHandle componentHandle, EntityHandle entityHandle, const Data& data)>;
+
+    EntityHandle add(EntityHandle h, const Data& data) {
+
+        size_t index = indexOfEntity(h);
+
+        if (index + 1 > mItems.size()) {
+            mItems.resize((index + 1) * 2);
+        }
+
+        mItems[index].live = true;
+        mItems[index].handle = h;
+        mItems[index].data = data;
+
+        return h;
+    }
+
+    void clear() {
+        mItems.clear();
+    }
+
+    void remove(EntityHandle h) {
+        size_t index = indexOfEntity(h);
+        if (index >= mItems.size()) return;
+        mItems[index].live = false;
+    }
+
+    Data* get(EntityHandle h) {
+        size_t index = indexOfEntity(h);
+
+        if (index + 1 > mItems.size()) {
+            mItems.resize((index + 1) * 2);
+        }
+
+        auto item = mItems.data() + index;
+        if (!item->live) return nullptr;
+        return &item->data;
+    }
+
+    const Data* get_const(EntityHandle h) const {
+        size_t index = indexOfEntity(h);
+
+        if (index + 1 > mItems.size()) {
+            return nullptr;
+        }
+
+        auto item = mItems.data() + index;
+        if (!item->live) return nullptr;
+        return &item->data;
+    }
+
+    void forEachComponent(ComponentIteratorFunc func) {
+        for (auto& item : mItems) {
+            func(item.live, item.handle, item.handle, item.data);
+        }
+    }
+
+    void forEachLiveComponent(ComponentIteratorFunc func) {
+        for (auto& item : mItems) {
+            if (item.live) func(item.live, item.handle, item.handle, item.data);
+        }
+    }
+
+    void forEachLiveComponent_const(ConstComponentIteratorFunc func) const {
+        for (auto& item : mItems) {
+            if (item.live) func(item.live, item.handle, item.handle, item.data);
+        }
+    }
+
+private:
+    static size_t indexOfEntity(EntityHandle h) {
+        return EntityManager<indexBits, generationBits, typeBits, int>::getHandleIndex(h);
+    }
+
+    struct InternalItem {
+        bool live = false;
+        EntityHandle handle = 0;
+        Data data;
+    };
+
+    std::vector<InternalItem> mItems;
+};
+
+} // namespace android
+} // namespace base
diff --git a/android-emu/android/base/containers/HybridComponentManager.h b/android-emu/android/base/containers/HybridComponentManager.h
new file mode 100644
index 0000000..02c6224
--- /dev/null
+++ b/android-emu/android/base/containers/HybridComponentManager.h
@@ -0,0 +1,132 @@
+// Copyright (C) 2020 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#pragma once
+
+#include "android/base/containers/Lookup.h"
+#include "android/base/containers/EntityManager.h"
+
+#include <unordered_map>
+
+namespace android {
+namespace base {
+
+template <size_t maxIndex,
+          class IndexType, // must be castable to uint64_t
+          class Data>
+class HybridComponentManager {
+public:
+    using UCM = UnpackedComponentManager<32, 16, 16, Data>;
+    using EM = EntityManager<32, 16, 16, Data>;
+    using IterFunc = typename UCM::ComponentIteratorFunc;
+    using ConstIterFunc = typename UCM::ConstComponentIteratorFunc;
+    using Handle = typename EM::EntityHandle;
+
+    void add(IndexType index, const Data& data) {
+        uint64_t index_u64 = (uint64_t)index;
+        if (index_u64 < maxIndex) {
+            auto internal_handle = index2Handle(index_u64);
+            mComponentManager.add(internal_handle, data);
+        } else {
+            mMap[index] = data;
+        }
+
+    }
+
+    void clear() {
+        mComponentManager.clear();
+        mMap.clear();
+    }
+
+    void remove(IndexType index) {
+        uint64_t index_u64 = (uint64_t)index;
+        if (index_u64 < maxIndex) {
+            auto internal_handle = index2Handle(index_u64);
+            mComponentManager.remove(internal_handle);
+        } else {
+            mMap.erase(index);
+        }
+    }
+
+    Data* get(IndexType index) {
+        uint64_t index_u64 = (uint64_t)index;
+        if (index_u64 < maxIndex) {
+            auto internal_handle = index2Handle(index_u64);
+            return mComponentManager.get(internal_handle);
+        } else {
+            return android::base::find(mMap, index);
+        }
+    }
+
+    const Data* get_const(IndexType index) const {
+        uint64_t index_u64 = (uint64_t)index;
+        if (index_u64 < maxIndex) {
+            auto internal_handle = index2Handle(index_u64);
+            return mComponentManager.get_const(internal_handle);
+        } else {
+            return android::base::find(mMap, index);
+        }
+    }
+
+    Data* getExceptZero(IndexType index) {
+        Data* res = get(index);
+        if (!res) return nullptr;
+        if (!(*res)) return nullptr;
+        return res;
+    }
+
+    const Data* getExceptZero_const(IndexType index) const {
+        const Data* res = get_const(index);
+        if (!res) return nullptr;
+        if (!(*res)) return nullptr;
+        return res;
+    }
+
+    void forEach(IterFunc func) {
+        mComponentManager.forEach(func);
+
+        for (auto it : mMap) {
+            auto handle = index2Handle(it.first);
+            func(true /* live */, handle, handle, it.second);
+        }
+    }
+
+    void forEachLive(IterFunc func) {
+        mComponentManager.forEachLiveComponent(func);
+
+        for (auto it : mMap) {
+            auto handle = index2Handle(it.first);
+            func(true /* live */, handle, handle, it.second);
+        }
+    }
+
+    void forEachLive_const(ConstIterFunc func) const {
+        mComponentManager.forEachLiveComponent_const(func);
+
+        for (const auto it : mMap) {
+            auto handle = index2Handle(it.first);
+            func(true /* live */, handle, handle, it.second);
+        }
+    }
+
+private:
+    static Handle index2Handle(uint64_t index) {
+        return EM::makeHandle((uint32_t)index, 1, 1);
+    }
+
+    UCM mComponentManager;
+    std::unordered_map<IndexType, Data> mMap;
+};
+
+} // namespace android
+} // namespace base
diff --git a/android-emu/android/base/containers/Lookup.h b/android-emu/android/base/containers/Lookup.h
new file mode 100644
index 0000000..8043286
--- /dev/null
+++ b/android-emu/android/base/containers/Lookup.h
@@ -0,0 +1,168 @@
+// Copyright 2016 The Android Open Source Project
+//
+// This software is licensed under the terms of the GNU General Public
+// License version 2, as published by the Free Software Foundation, and
+// may be copied, distributed, and modified under those terms.
+//
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+// GNU General Public License for more details.
+
+#pragma once
+
+#include "android/base/TypeTraits.h"
+
+#include <initializer_list>
+#include <set>
+#include <map>
+#include <unordered_map>
+#include <unordered_set>
+#include <utility>
+
+// A set of convenience functions for map and set lookups. They allow a simpler
+// syntax, e.g.
+//   if (auto val = find(map, "key")) {
+//       <process the value>
+//   }
+// ... or
+//   auto value = find(funcThatReturnsMap(), "other_key");
+//   if (!value) ...
+//
+// Note: these don't work for multimaps, as there's no single value
+//  to return (and, more importantly, as those are completely useless).
+
+namespace android {
+namespace base {
+
+// Helper predicates that check if the template argument is a map / set /
+// a mutlikey collection of any kind.
+// These are used as a constraints for the lookup functions to get better error
+// messages if the arguments don't support the map interface.
+template <class T>
+using is_any_map = std::integral_constant<
+        bool,
+        is_template_instantiation_of<T, std::map>::value ||
+                is_template_instantiation_of<T, std::unordered_map>::value>;
+
+template <class T>
+using is_any_set = std::integral_constant<
+        bool,
+        is_template_instantiation_of<T, std::set>::value ||
+                is_template_instantiation_of<T, std::unordered_set>::value>;
+
+template <class T>
+using is_any_multikey = std::integral_constant<
+        bool,
+        is_template_instantiation_of<T, std::multimap>::value ||
+                is_template_instantiation_of<T, std::unordered_multimap>::value ||
+                is_template_instantiation_of<T, std::multiset>::value ||
+                is_template_instantiation_of<T, std::unordered_multiset>::value>;
+
+template <class T, class = enable_if<is_any_map<T>>>
+const typename T::mapped_type* find(const T& map,
+                                    const typename T::key_type& key) {
+    const auto it = map.find(key);
+    if (it == map.end()) {
+        return nullptr;
+    }
+
+    return &it->second;
+}
+
+// Version that returns a modifiable value.
+template <class T, class = enable_if<is_any_map<T>>>
+typename T::mapped_type* find(T& map, const typename T::key_type& key) {
+    auto it = map.find(key);
+    if (it == map.end()) {
+        return nullptr;
+    }
+
+    return &it->second;
+}
+
+// Version with a default, returns a _copy_ because of the possible fallback
+// to a default - it might be destroyed after the call.
+template <class T,
+          class U = typename T::mapped_type,
+          class = enable_if_c<
+                  is_any_map<T>::value &&
+                  std::is_convertible<U, typename T::mapped_type>::value>>
+typename T::mapped_type findOrDefault(const T& map,
+                                      const typename T::key_type& key,
+                                      U&& defaultVal = {}) {
+    if (auto valPtr = find(map, key)) {
+        return *valPtr;
+    }
+    return defaultVal;
+}
+
+// Version that finds the first of the values passed in |keys| in the order they
+// are passed. E.g., the following code finds '2' as the first value in |keys|:
+//   set<int> s = {1, 2, 3};
+//   auto val = findFirstOf(s, {2, 1});
+//   EXPECT_EQ(2, *val);
+template <class T, class = enable_if<is_any_map<T>>>
+const typename T::mapped_type* findFirstOf(
+        const T& map,
+        std::initializer_list<typename T::key_type> keys) {
+    for (const auto& key : keys) {
+        if (const auto valPtr = find(map, key)) {
+            return valPtr;
+        }
+    }
+    return nullptr;
+}
+
+template <class T, class = enable_if<is_any_map<T>>>
+typename T::mapped_type* findFirstOf(
+        T& map,
+        std::initializer_list<typename T::key_type> keys) {
+    for (const auto& key : keys) {
+        if (const auto valPtr = find(map, key)) {
+            return valPtr;
+        }
+    }
+    return nullptr;
+}
+
+// Version that finds first of the passed |key| values or returns the
+// |defaultVal| if none were found.
+template <class T,
+          class U,
+          class = enable_if_c<
+                  is_any_map<T>::value &&
+                  std::is_convertible<U, typename T::mapped_type>::value>>
+typename T::mapped_type findFirstOfOrDefault(
+        const T& map,
+        std::initializer_list<typename T::key_type> keys,
+        U&& defaultVal) {
+    if (const auto valPtr = findFirstOf(map, keys)) {
+        return *valPtr;
+    }
+    return std::forward<U>(defaultVal);
+}
+
+template <class T,
+          class = enable_if_c<is_any_map<T>::value || is_any_set<T>::value ||
+                              is_any_multikey<T>::value>>
+bool contains(const T& c, const typename T::key_type& key) {
+    const auto it = c.find(key);
+    return it != c.end();
+}
+
+template <class T,
+          class = enable_if_c<is_any_map<T>::value || is_any_set<T>::value ||
+                              is_any_multikey<T>::value>>
+bool containsAnyOf(const T& c,
+                   std::initializer_list<typename T::key_type> keys) {
+    for (const auto& key : keys) {
+        if (contains(c, key)) {
+            return true;
+        }
+    }
+    return false;
+}
+
+}  // namespace base
+}  // namespace android
diff --git a/android-emu/android/base/files/Stream.cpp b/android-emu/android/base/files/Stream.cpp
index db34ed3..85730d0 100644
--- a/android-emu/android/base/files/Stream.cpp
+++ b/android-emu/android/base/files/Stream.cpp
@@ -183,5 +183,79 @@
     return sign ? -int64_t(num >> 1) : (num >> 1);
 }
 
+// Static big-endian conversions
+
+// the |v| pointer is unlikely to be aligned---use memcpy throughout
+
+void Stream::toByte(uint8_t*) { } // no conversion
+
+void Stream::toBe16(uint8_t* v) {
+    uint16_t value;
+    memcpy(&value, v, sizeof(uint16_t));
+    uint8_t b[2] = { (uint8_t)(value >> 8), (uint8_t)value };
+    memcpy(v, b, sizeof(uint16_t));
+}
+
+void Stream::toBe32(uint8_t* v) {
+    uint32_t value;
+    memcpy(&value, v, sizeof(uint32_t));
+    uint8_t b[4] = {
+            (uint8_t)(value >> 24),
+            (uint8_t)(value >> 16),
+            (uint8_t)(value >> 8),
+            (uint8_t)value };
+    memcpy(v, b, sizeof(uint32_t));
+}
+
+void Stream::toBe64(uint8_t* v) {
+    uint64_t value;
+    memcpy(&value, v, sizeof(uint64_t));
+    uint8_t b[8] = {
+            (uint8_t)(value >> 56),
+            (uint8_t)(value >> 48),
+            (uint8_t)(value >> 40),
+            (uint8_t)(value >> 32),
+            (uint8_t)(value >> 24),
+            (uint8_t)(value >> 16),
+            (uint8_t)(value >> 8),
+            (uint8_t)value };
+    memcpy(v, b, sizeof(uint64_t));
+}
+
+void Stream::fromByte(uint8_t*) { } // no conversion
+
+void Stream::fromBe16(uint8_t* v) {
+    uint8_t b[2];
+    memcpy(b, v, sizeof(uint16_t));
+    uint16_t value = ((uint16_t)b[0] << 8) | (uint16_t)b[1];
+    memcpy(v, &value, sizeof(uint16_t));
+}
+
+void Stream::fromBe32(uint8_t* v) {
+    uint8_t b[4];
+    memcpy(b, v, sizeof(uint32_t));
+    uint32_t value =
+        ((uint32_t)b[0] << 24) |
+        ((uint32_t)b[1] << 16) |
+        ((uint32_t)b[2] << 8) |
+        (uint32_t)b[3];
+    memcpy(v, &value, sizeof(uint32_t));
+}
+
+void Stream::fromBe64(uint8_t* v) {
+    uint8_t b[8];
+    memcpy(b, v, sizeof(uint64_t));
+    uint64_t value =
+        ((uint64_t)b[0] << 56) |
+        ((uint64_t)b[1] << 48) |
+        ((uint64_t)b[2] << 40) |
+        ((uint64_t)b[3] << 32) |
+        ((uint64_t)b[4] << 24) |
+        ((uint64_t)b[5] << 16) |
+        ((uint64_t)b[6] << 8) |
+        (uint64_t)b[7];
+    memcpy(v, &value, sizeof(uint64_t));
+}
+
 }  // namespace base
 }  // namespace android
diff --git a/android-emu/android/base/files/Stream.h b/android-emu/android/base/files/Stream.h
index 3a5632e..fa175f3 100644
--- a/android-emu/android/base/files/Stream.h
+++ b/android-emu/android/base/files/Stream.h
@@ -106,6 +106,16 @@
     // bit + packed unsigned representation)
     void putPackedSignedNum(int64_t num);
     int64_t getPackedSignedNum();
+
+    // Static big-endian conversions
+    static void toByte(uint8_t*);
+    static void toBe16(uint8_t*);
+    static void toBe32(uint8_t*);
+    static void toBe64(uint8_t*);
+    static void fromByte(uint8_t*);
+    static void fromBe16(uint8_t*);
+    static void fromBe32(uint8_t*);
+    static void fromBe64(uint8_t*);
 };
 
 }  // namespace base
diff --git a/android-emu/android/base/files/StreamSerializing.h b/android-emu/android/base/files/StreamSerializing.h
index 56613f5..b8e91bc 100644
--- a/android-emu/android/base/files/StreamSerializing.h
+++ b/android-emu/android/base/files/StreamSerializing.h
@@ -45,7 +45,7 @@
 bool loadBuffer(Stream* stream, std::vector<T>* buffer) {
     auto len = stream->getBe32();
     buffer->resize(len);
-    int ret = (int)stream->read(buffer->data(), len * sizeof(T));
+    auto ret = static_cast<std::size_t>(stream->read(buffer->data(), len * sizeof(T)));
     return ret == len * sizeof(T);
 }
 
diff --git a/android-emu/android/base/fit/Defer.h b/android-emu/android/base/fit/Defer.h
new file mode 100644
index 0000000..d120c32
--- /dev/null
+++ b/android-emu/android/base/fit/Defer.h
@@ -0,0 +1,157 @@
+// Copyright 2021 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Copyright 2018 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#pragma once
+
+#include <utility>
+
+#include "Function.h"
+#include "Nullable.h"
+
+namespace android::base {
+namespace fit {
+
+// A move-only deferred action wrapper with RAII semantics.
+// This class is not thread safe.
+//
+// The wrapper holds a function-like callable target with no arguments
+// which it invokes when it goes out of scope unless canceled, called, or
+// moved to a wrapper in a different scope.
+//
+// See |fit::defer()| for idiomatic usage.
+template <typename T>
+class DeferredAction final {
+public:
+    // Creates a deferred action without a pending target.
+    DeferredAction() = default;
+    explicit DeferredAction(decltype(nullptr)) {}
+
+    // Creates a deferred action with a pending target.
+    explicit DeferredAction(T target) : mTarget(std::move(target)) {}
+
+    // Creates a deferred action with a pending target moved from another
+    // deferred action, leaving the other one without a pending target.
+    DeferredAction(DeferredAction&& other) : mTarget(std::move(other.mTarget)) {
+        other.mTarget.reset();
+    }
+
+    // Invokes and releases the deferred action's pending target (if any).
+    ~DeferredAction() { call(); }
+
+    // Returns true if the deferred action has a pending target.
+    explicit operator bool() const { return !!mTarget; }
+
+    // Invokes and releases the deferred action's pending target (if any),
+    // then move-assigns it from another deferred action, leaving the latter
+    // one without a pending target.
+    DeferredAction& operator=(DeferredAction&& other) {
+        if (&other == this)
+            return *this;
+        call();
+        mTarget = std::move(other.mTarget);
+        other.mTarget.reset();
+        return *this;
+    }
+
+    // Invokes and releases the deferred action's pending target (if any).
+    void call() {
+        if (mTarget) {
+            // Move to a local to guard against re-entrance.
+            T local_target = std::move(*mTarget);
+            mTarget.reset();
+            local_target();
+        }
+    }
+
+    // Releases the deferred action's pending target (if any) without
+    // invoking it.
+    void cancel() { mTarget.reset(); }
+    DeferredAction& operator=(decltype(nullptr)) {
+        cancel();
+        return *this;
+    }
+
+    // Assigns a new target to the deferred action.
+    DeferredAction& operator=(T target) {
+        mTarget = std::move(target);
+        return *this;
+    }
+
+    DeferredAction(const DeferredAction& other) = delete;
+    DeferredAction& operator=(const DeferredAction& other) = delete;
+
+private:
+    Nullable<T> mTarget;
+};
+
+template <typename T>
+bool operator==(const DeferredAction<T>& action, decltype(nullptr)) {
+    return !action;
+}
+template <typename T>
+bool operator==(decltype(nullptr), const DeferredAction<T>& action) {
+    return !action;
+}
+template <typename T>
+bool operator!=(const DeferredAction<T>& action, decltype(nullptr)) {
+    return !!action;
+}
+template <typename T>
+bool operator!=(decltype(nullptr), const DeferredAction<T>& action) {
+    return !!action;
+}
+
+// Defers execution of a function-like callable target with no arguments
+// until the value returned by this function goes out of scope unless canceled,
+// called, or moved to a wrapper in a different scope.
+//
+// // This example prints "Hello..." then "Goodbye!".
+// void test() {
+//     auto d = fit::defer([]{ puts("Goodbye!"); });
+//     puts("Hello...");
+// }
+//
+// // This example prints nothing because the deferred action is canceled.
+// void do_nothing() {
+//     auto d = fit::defer([]{ puts("I'm not here."); });
+//     d.cancel();
+// }
+//
+// // This example shows how the deferred action can be reassigned assuming
+// // the new target has the same type and the old one, in this case by
+// // representing the target as a |fit::Closure|.
+// void reassign() {
+//     auto d = fit::defer<fit::Closure>([] { puts("This runs first."); });
+//     d = fit::defer<fit::Closure>([] { puts("This runs afterwards."); });
+// }
+template <typename T>
+inline DeferredAction<T> defer(T target) {
+    return DeferredAction<T>(std::move(target));
+}
+
+// Alias for a deferred_action using a fit::Callback.
+using DeferredCallback = DeferredAction<fit::Callback<void()>>;
+
+// Defers execution of a fit::Callback with no arguments. See |fit::defer| for
+// details.
+inline DeferredCallback deferCallback(fit::Callback<void()> target) {
+    return DeferredCallback(std::move(target));
+}
+
+}  // namespace fit
+}  // namespace android::base
diff --git a/android-emu/android/base/fit/Function.h b/android-emu/android/base/fit/Function.h
new file mode 100644
index 0000000..4f4b17d
--- /dev/null
+++ b/android-emu/android/base/fit/Function.h
@@ -0,0 +1,513 @@
+// Copyright 2021 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Copyright 2017 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#pragma once
+
+#include "FunctionInternal.h"
+#include "UtilityInternal.h"
+
+namespace android::base {
+namespace fit {
+
+template <size_t inlineTargetSize, bool requireInline, typename Result, typename... Args>
+class FunctionImpl;
+
+template <size_t inlineTargetSize, bool requireInline, typename Result, typename... Args>
+class CallbackImpl;
+
+// The default size allowance for storing a target inline within a function
+// object, in bytes.  This default allows for inline storage of targets
+// as big as two pointers, such as an object pointer and a pointer to a member
+// function.
+constexpr size_t kDefaultInlineTargetSize = sizeof(void*) * 2;
+
+// A |fit::Function| is a move-only polymorphic function wrapper.
+//
+// If you need a class with similar characteristics that also ensures
+// "run-once" semantics (such as callbacks shared with timeouts, or for
+// service requests with redundant, failover, or fallback service providers),
+// see |fit::Callback|.
+//
+// |fit::Function<T>| behaves like |std::function<T>| except that it is
+// move-only instead of copyable, so it can hold targets that cannot be copied,
+// such as mutable lambdas, and immutable lambdas that capture move-only
+// objects.
+//
+// Targets of up to |inlineTargetSize| bytes in size (rounded up for memory
+// alignment) are stored inline within the function object without incurring
+// any heap allocation.  Larger callable objects will be moved to the heap as
+// required.
+//
+// See also |fit::InlineFunction<T, size>| for more control over allocation
+// behavior.
+//
+// SYNOPSIS
+//
+// |T| is the function's signature.  e.g. void(int, std::string).
+//
+// |inlineTargetSize| is the minimum size of target that is guaranteed to
+// fit within a function without requiring heap allocation.
+// Defaults to |kDefaultInlineTargetSize|.
+//
+// Class members are documented in |fit::FunctionImpl|, below.
+//
+// EXAMPLES
+//
+// -
+// https://fuchsia.googlesource.com/fuchsia/+/HEAD/sdk/lib/fit/test/examples/function_example1.cc
+// -
+// https://fuchsia.googlesource.com/fuchsia/+/HEAD/sdk/lib/fit/test/examples/function_example2.cc
+//
+template <typename T, size_t inlineTargetSize = kDefaultInlineTargetSize>
+using function = FunctionImpl<inlineTargetSize, /*requireInline=*/false, T>;
+
+// A move-only callable object wrapper that forces callables to be stored inline
+// and never performs heap allocation.
+//
+// Behaves just like |fit::Function<T, inlineTargetSize>| except that
+// attempting to store a target larger than |inlineTargetSize| will fail to
+// compile.
+template <typename T, size_t inlineTargetSize = kDefaultInlineTargetSize>
+using InlineFunction = FunctionImpl<inlineTargetSize,
+                                    /*requireInline=*/true,
+                                    T>;
+
+// Synonym for a function which takes no arguments and produces no result.
+using closure = function<void()>;
+
+// A |fit::Callback| is a move-only polymorphic function wrapper that also
+// ensures "run-once" semantics (such as callbacks shared with timeouts, or for
+// service requests with redundant, failover, or fallback service providers).
+// A |fit::Callback| releases it's resources after the first call, and can be
+// inspected before calling, so a potential caller can know if it should call
+// the function, or skip the call because the target was already called.
+//
+// If you need a move-only function class with typical function characteristics,
+// that permits multiple invocations of the same function, see |fit::Function|.
+//
+// |fit::Callback<T>| behaves like |std::function<T>| except:
+//
+//   1. It is move-only instead of copyable, so it can hold targets that cannot
+//      be copied, such as mutable lambdas, and immutable lambdas that capture
+//      move-only objects.
+//   2. On the first call to invoke a |fit::Callback|, the target function held
+//      by the |fit::Callback| cannot be called again.
+//
+// When a |fit::Callback| is invoked for the first time, the target function is
+// released and destructed, along with any resources owned by that function
+// (typically the objects captured by a lambda).
+//
+// A |fit::Callback| in the "already called" state has the same state as a
+// |fit::Callback| that has been assigned to |nullptr|. It can be compared to
+// |nullptr| (via "==" or "!=", and its "operator bool()" returns false, which
+// provides a convenient way to gate whether or not the |fit::Callback| should
+// be called. (Note that invoking an empty |fit::Callback| or |fit::Function|
+// will cause a program abort!)
+//
+// As an example, sharing |fit::Callback| between both a service and a timeout
+// might look something like this:
+//
+//  void service_with_timeout(fit::Callback<void(bool)> cb, uint timeout_ms) {
+//    service_request([cb = cb.share()]() mutable { if (cb) cb(false); });
+//    timeout(timeout_ms, [cb = std::move(cb)]() mutable { if (cb) cb(true); });
+//  }
+//
+// Since |fit::Callback| objects are move-only, and not copyable, duplicate
+// references to the same |fit::Callback| can be obtained via share(), as shown
+// in the example above. This method converts the |fit::Callback| into a
+// reference-counted version of the |fit::Callback| and returns a copy of the
+// reference as another |fit::Callback| with the same target function.
+//
+// What is notable about |fit::Callback<T>.share()| is that invoking any shared
+// copy will "nullify" all shared copies, as shown in the example.
+//
+// Note that |fit::Callback| is NOT thread-safe by default. If multi-threaded
+// support is required, you would need to implement your own mutex, or similar
+// guard, before checking and calling a |fit::Callback|.
+//
+// Targets of up to |inlineTargetSize| bytes in size (rounded up for memory
+// alignment) are stored inline within the callback object without incurring
+// any heap allocation.  Larger callable objects will be moved to the heap as
+// required.
+//
+// See also |fit::inline_callback<T, size>| for more control over allocation
+// behavior.
+//
+// SYNOPSIS
+//
+// |T| is the callback's signature.  e.g. void(int, std::string).
+//
+// |inlineTargetSize| is the minimum size of target that is guaranteed to
+// fit within a callback without requiring heap allocation.
+// Defaults to |kDefaultInlineTargetSize|.
+//
+// Class members are documented in |fit::CallbackImpl|, below.
+//
+template <typename T, size_t inlineTargetSize = kDefaultInlineTargetSize>
+using Callback = CallbackImpl<inlineTargetSize, /*requireInline=*/false, T>;
+
+// A move-only, run-once, callable object wrapper that forces callables to be
+// stored inline and never performs heap allocation.
+//
+// Behaves just like |fit::Callback<T, inlineTargetSize>| except that
+// attempting to store a target larger than |inlineTargetSize| will fail to
+// compile.
+template <typename T, size_t inlineTargetSize = kDefaultInlineTargetSize>
+using InlineCallback = CallbackImpl<inlineTargetSize,
+                                    /*requireInline=*/true,
+                                    T>;
+
+template <size_t inlineTargetSize, bool requireInline, typename Result, typename... Args>
+class FunctionImpl<inlineTargetSize, requireInline, Result(Args...)> final
+    : private ::android::base::fit::internal::
+          function_base<inlineTargetSize, requireInline, Result(Args...)> {
+    using Base = ::android::base::fit::internal::
+        function_base<inlineTargetSize, requireInline, Result(Args...)>;
+
+    // function_base requires private access during share()
+    friend class ::android::base::fit::internal::
+        function_base<inlineTargetSize, requireInline, Result(Args...)>;
+
+    // supports target() for shared functions
+    friend const void* ::android::base::fit::internal::get_target_type_id<>(
+        const FunctionImpl<inlineTargetSize, requireInline, Result(Args...)>&);
+
+    template <typename U>
+    using NotSelfType = ::android::base::fit::internal::NotSameType<FunctionImpl, U>;
+
+    template <typename... Conditions>
+    using RequiresConditions = ::android::base::fit::internal::RequiresConditions<Conditions...>;
+
+    template <typename... Conditions>
+    using AssignmentRequiresConditions =
+        ::android::base::fit::internal::AssignmentRequiresConditions<FunctionImpl&, Conditions...>;
+
+public:
+    // The function's result type.
+    using typename Base::result_type;
+
+    // Initializes an empty (null) function. Attempting to call an empty
+    // function will abort the program.
+    FunctionImpl() = default;
+
+    // Creates a function with an empty target (same outcome as the default
+    // constructor).
+    FunctionImpl(decltype(nullptr)) : Base(nullptr) {}
+
+    // Creates a function bound to the specified function pointer.
+    // If target == nullptr, assigns an empty target.
+    FunctionImpl(Result (*target)(Args...)) : Base(target) {}
+
+    // Creates a function bound to the specified callable object.
+    // If target == nullptr, assigns an empty target.
+    //
+    // For functors, we need to capture the raw type but also restrict on the
+    // existence of an appropriate operator () to resolve overloads and implicit
+    // casts properly.
+    //
+    // Note that specializations of this template method that take fit::Callback
+    // objects as the target Callable are deleted (see below).
+    template <typename Callable,
+              RequiresConditions<
+                  std::is_convertible<decltype(std::declval<Callable&>()(std::declval<Args>()...)),
+                                      result_type>,
+                  NotSelfType<Callable>> = true>
+    FunctionImpl(Callable&& target) : Base(std::forward<Callable>(target)) {}
+
+    // Deletes the specializations of FunctionImpl(Callable) that would allow
+    // a |fit::Function| to be constructed from a |fit::Callback|. This prevents
+    // unexpected behavior of a |fit::Function| that would otherwise fail after
+    // one call. To explicitly allow this, simply wrap the |fit::Callback| in a
+    // pass-through lambda before passing it to the |fit::Function|.
+    template <size_t otherInlineTargetSize, bool otherRequireInline>
+    FunctionImpl(::android::base::fit::CallbackImpl<otherInlineTargetSize,
+                                                    otherRequireInline,
+                                                    Result(Args...)>) = delete;
+
+    // Creates a function with a target moved from another function,
+    // leaving the other function with an empty target.
+    FunctionImpl(FunctionImpl&& other) : Base(static_cast<Base&&>(other)) {}
+
+    // Destroys the function, releasing its target.
+    ~FunctionImpl() = default;
+
+    // Assigns the function to an empty target. Attempting to invoke the
+    // function will abort the program.
+    FunctionImpl& operator=(decltype(nullptr)) {
+        Base::assign(nullptr);
+        return *this;
+    }
+
+    // Assigns the function to the specified callable object. If target ==
+    // nullptr, assigns an empty target.
+    //
+    // For functors, we need to capture the raw type but also restrict on the
+    // existence of an appropriate operator () to resolve overloads and implicit
+    // casts properly.
+    //
+    // Note that specializations of this template method that take fit::Callback
+    // objects as the target Callable are deleted (see below).
+    template <typename Callable>
+    AssignmentRequiresConditions<
+        std::is_convertible<decltype(std::declval<Callable&>()(std::declval<Args>()...)),
+                            result_type>,
+        NotSelfType<Callable>>
+    operator=(Callable&& target) {
+        Base::assign(std::forward<Callable>(target));
+        return *this;
+    }
+
+    // Deletes the specializations of operator=(Callable) that would allow
+    // a |fit::Function| to be assigned from a |fit::Callback|. This
+    // prevents unexpected behavior of a |fit::Function| that would otherwise
+    // fail after one call. To explicitly allow this, simply wrap the
+    // |fit::Callback| in a pass-through lambda before assigning it to the
+    // |fit::Function|.
+    template <size_t otherInlineTargetSize, bool otherRequireInline>
+    FunctionImpl& operator=(::android::base::fit::CallbackImpl<otherInlineTargetSize,
+                                                               otherRequireInline,
+                                                               Result(Args...)>) = delete;
+
+    // Move assignment
+    FunctionImpl& operator=(FunctionImpl&& other) {
+        if (&other == this)
+            return *this;
+        Base::assign(static_cast<Base&&>(other));
+        return *this;
+    }
+
+    // Swaps the functions' targets.
+    void swap(FunctionImpl& other) { Base::swap(other); }
+
+    // Returns a pointer to the function's target.
+    using Base::target;
+
+    // Returns true if the function has a non-empty target.
+    using Base::operator bool;
+
+    // Invokes the function's target.
+    // Aborts if the function's target is empty.
+    Result operator()(Args... args) const { return Base::invoke(std::forward<Args>(args)...); }
+
+    // Returns a new function object that invokes the same target.
+    // The target itself is not copied; it is moved to the heap and its
+    // lifetime is extended until all references have been released.
+    //
+    // Note: This method is not supported on |fit::InlineFunction<>|
+    //       because it may incur a heap allocation which is contrary to
+    //       the stated purpose of |fit::InlineFunction<>|.
+    FunctionImpl share() {
+        FunctionImpl copy;
+        Base::template share_with<FunctionImpl>(copy);
+        return copy;
+    }
+};
+
+template <size_t inlineTargetSize, bool requireInline, typename Result, typename... Args>
+void swap(FunctionImpl<inlineTargetSize, requireInline, Result, Args...>& a,
+          FunctionImpl<inlineTargetSize, requireInline, Result, Args...>& b) {
+    a.swap(b);
+}
+
+template <size_t inlineTargetSize, bool requireInline, typename Result, typename... Args>
+bool operator==(const FunctionImpl<inlineTargetSize, requireInline, Result, Args...>& f,
+                decltype(nullptr)) {
+    return !f;
+}
+template <size_t inlineTargetSize, bool requireInline, typename Result, typename... Args>
+bool operator==(decltype(nullptr),
+                const FunctionImpl<inlineTargetSize, requireInline, Result, Args...>& f) {
+    return !f;
+}
+template <size_t inlineTargetSize, bool requireInline, typename Result, typename... Args>
+bool operator!=(const FunctionImpl<inlineTargetSize, requireInline, Result, Args...>& f,
+                decltype(nullptr)) {
+    return !!f;
+}
+template <size_t inlineTargetSize, bool requireInline, typename Result, typename... Args>
+bool operator!=(decltype(nullptr),
+                const FunctionImpl<inlineTargetSize, requireInline, Result, Args...>& f) {
+    return !!f;
+}
+
+template <size_t inlineTargetSize, bool requireInline, typename Result, typename... Args>
+class CallbackImpl<inlineTargetSize, requireInline, Result(Args...)> final
+    : private ::android::base::fit::internal::
+          function_base<inlineTargetSize, requireInline, Result(Args...)> {
+    using Base = ::android::base::fit::internal::
+        function_base<inlineTargetSize, requireInline, Result(Args...)>;
+
+    // function_base requires private access during share()
+    friend class ::android::base::fit::internal::
+        function_base<inlineTargetSize, requireInline, Result(Args...)>;
+
+    // supports target() for shared functions
+    friend const void* ::android::base::fit::internal::get_target_type_id<>(
+        const CallbackImpl<inlineTargetSize, requireInline, Result(Args...)>&);
+
+    template <typename U>
+    using NotSelfType = ::android::base::fit::internal::NotSameType<CallbackImpl, U>;
+
+    template <typename... Conditions>
+    using RequiresConditions = ::android::base::fit::internal::RequiresConditions<Conditions...>;
+
+    template <typename... Conditions>
+    using AssignmentRequiresConditions =
+        ::android::base::fit::internal::AssignmentRequiresConditions<CallbackImpl&, Conditions...>;
+
+public:
+    // The callback function's result type.
+    using typename Base::result_type;
+
+    // Initializes an empty (null) callback. Attempting to call an empty
+    // callback will abort the program.
+    CallbackImpl() = default;
+
+    // Creates a callback with an empty target (same outcome as the default
+    // constructor).
+    CallbackImpl(decltype(nullptr)) : Base(nullptr) {}
+
+    // Creates a callback bound to the specified function pointer.
+    // If target == nullptr, assigns an empty target.
+    CallbackImpl(Result (*target)(Args...)) : Base(target) {}
+
+    // Creates a callback bound to the specified callable object.
+    // If target == nullptr, assigns an empty target.
+    //
+    // For functors, we need to capture the raw type but also restrict on the
+    // existence of an appropriate operator () to resolve overloads and implicit
+    // casts properly.
+    template <typename Callable,
+              RequiresConditions<
+                  std::is_convertible<decltype(std::declval<Callable&>()(std::declval<Args>()...)),
+                                      result_type>,
+                  NotSelfType<Callable>> = true>
+    CallbackImpl(Callable&& target) : Base(std::forward<Callable>(target)) {}
+
+    // Creates a callback with a target moved from another callback,
+    // leaving the other callback with an empty target.
+    CallbackImpl(CallbackImpl&& other) : Base(static_cast<Base&&>(other)) {}
+
+    // Destroys the callback, releasing its target.
+    ~CallbackImpl() = default;
+
+    // Assigns the callback to an empty target. Attempting to invoke the
+    // callback will abort the program.
+    CallbackImpl& operator=(decltype(nullptr)) {
+        Base::assign(nullptr);
+        return *this;
+    }
+
+    // Assigns the callback to the specified callable object. If target ==
+    // nullptr, assigns an empty target.
+    //
+    // For functors, we need to capture the raw type but also restrict on the
+    // existence of an appropriate operator () to resolve overloads and implicit
+    // casts properly.
+    template <typename Callable>
+    AssignmentRequiresConditions<
+        std::is_convertible<decltype(std::declval<Callable&>()(std::declval<Args>()...)),
+                            result_type>,
+        NotSelfType<Callable>>
+    operator=(Callable&& target) {
+        Base::assign(std::forward<Callable>(target));
+        return *this;
+    }
+
+    // Move assignment
+    CallbackImpl& operator=(CallbackImpl&& other) {
+        if (&other == this)
+            return *this;
+        Base::assign(static_cast<Base&&>(other));
+        return *this;
+    }
+
+    // Swaps the callbacks' targets.
+    void swap(CallbackImpl& other) { Base::swap(other); }
+
+    // Returns a pointer to the callback's target.
+    using Base::target;
+
+    // Returns true if the callback has a non-empty target.
+    using Base::operator bool;
+
+    // Invokes the callback's target.
+    // Aborts if the callback's target is empty.
+    // |fit::Callback| must be non-const to invoke. Before the target function
+    // is actually called, the fit::Callback will be set to the default empty
+    // state (== nullptr, and operator bool() will subsequently return |false|).
+    // The target function will then be released after the function is called.
+    // If the callback was shared, any remaining copies will also be cleared.
+    Result operator()(Args... args) {
+        auto temp = std::move(*this);
+        return temp.invoke(std::forward<Args>(args)...);
+    }
+
+    // Returns a new callback object that invokes the same target.
+    // The target itself is not copied; it is moved to the heap and its
+    // lifetime is extended until all references have been released.
+    // For |fit::Callback| (unlike fit::Function), the first invocation of the
+    // callback will release all references to the target. All callbacks
+    // derived from the same original callback (via share()) will be cleared,
+    // as if set to |nullptr|, and "operator bool()" will return false.
+    //
+    // Note: This method is not supported on |fit::InlineFunction<>|
+    //       because it may incur a heap allocation which is contrary to
+    //       the stated purpose of |fit::InlineFunction<>|.
+    CallbackImpl share() {
+        CallbackImpl copy;
+        Base::template share_with<CallbackImpl>(copy);
+        return copy;
+    }
+};
+
+template <size_t inlineTargetSize, bool requireInline, typename Result, typename... Args>
+void swap(CallbackImpl<inlineTargetSize, requireInline, Result, Args...>& a,
+          CallbackImpl<inlineTargetSize, requireInline, Result, Args...>& b) {
+    a.swap(b);
+}
+
+template <size_t inlineTargetSize, bool requireInline, typename Result, typename... Args>
+bool operator==(const CallbackImpl<inlineTargetSize, requireInline, Result, Args...>& f,
+                decltype(nullptr)) {
+    return !f;
+}
+template <size_t inlineTargetSize, bool requireInline, typename Result, typename... Args>
+bool operator==(decltype(nullptr),
+                const CallbackImpl<inlineTargetSize, requireInline, Result, Args...>& f) {
+    return !f;
+}
+template <size_t inlineTargetSize, bool requireInline, typename Result, typename... Args>
+bool operator!=(const CallbackImpl<inlineTargetSize, requireInline, Result, Args...>& f,
+                decltype(nullptr)) {
+    return !!f;
+}
+template <size_t inlineTargetSize, bool requireInline, typename Result, typename... Args>
+bool operator!=(decltype(nullptr),
+                const CallbackImpl<inlineTargetSize, requireInline, Result, Args...>& f) {
+    return !!f;
+}
+
+// Returns a Callable object that invokes a member function of an object.
+template <typename R, typename T, typename... Args>
+auto bindMember(T* instance, R (T::*fn)(Args...)) {
+    return [instance, fn](Args... args) { return (instance->*fn)(std::forward<Args>(args)...); };
+}
+
+}  // namespace fit
+}  // namespace android::base
diff --git a/android-emu/android/base/fit/FunctionInternal.h b/android-emu/android/base/fit/FunctionInternal.h
new file mode 100644
index 0000000..3fb1ac5
--- /dev/null
+++ b/android-emu/android/base/fit/FunctionInternal.h
@@ -0,0 +1,456 @@
+// Copyright 2021 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Copyright 2017 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#pragma once
+
+#include <stddef.h>
+#include <stdlib.h>
+
+#include <memory>
+
+#include "Nullable.h"
+
+#include <new>
+#include <type_traits>
+#include <utility>
+
+namespace android::base {
+namespace fit {
+namespace internal {
+
+template <typename Result, typename... Args>
+struct target_ops final {
+    const void* (*target_type_id)(void* bits, const void* impl_ops);
+    void* (*get)(void* bits);
+    Result (*invoke)(void* bits, Args... args);
+    void (*move)(void* from_bits, void* to_bits);
+    void (*destroy)(void* bits);
+};
+
+template <typename Callable, bool is_inline, bool is_shared, typename Result, typename... Args>
+struct target;
+
+inline const void* unshared_target_type_id(void* bits, const void* impl_ops) {
+    return impl_ops;
+}
+
+// vtable for nullptr (empty target function)
+
+template <typename Result, typename... Args>
+struct target<decltype(nullptr),
+              /*is_inline=*/true,
+              /*is_shared=*/false,
+              Result,
+              Args...>
+    final {
+    static Result invoke(void* bits, Args... args) { __builtin_abort(); }
+
+    static const target_ops<Result, Args...> ops;
+};
+
+inline void* null_target_get(void* bits) {
+    return nullptr;
+}
+inline void null_target_move(void* from_bits, void* to_bits) {}
+inline void null_target_destroy(void* bits) {}
+
+template <typename Result, typename... Args>
+constexpr target_ops<Result, Args...> target<decltype(nullptr),
+                                             /*is_inline=*/true,
+                                             /*is_shared=*/false,
+                                             Result,
+                                             Args...>::ops = {
+    &unshared_target_type_id, &null_target_get, &target::invoke, &null_target_move,
+    &null_target_destroy};
+
+// vtable for inline target function
+
+template <typename Callable, typename Result, typename... Args>
+struct target<Callable,
+              /*is_inline=*/true,
+              /*is_shared=*/false,
+              Result,
+              Args...>
+    final {
+    template <typename Callable_>
+    static void initialize(void* bits, Callable_&& target) {
+        new (bits) Callable(std::forward<Callable_>(target));
+    }
+    static Result invoke(void* bits, Args... args) {
+        auto& target = *static_cast<Callable*>(bits);
+        return target(std::forward<Args>(args)...);
+    }
+    static void move(void* from_bits, void* to_bits) {
+        auto& from_target = *static_cast<Callable*>(from_bits);
+        new (to_bits) Callable(std::move(from_target));
+        from_target.~Callable();
+    }
+    static void destroy(void* bits) {
+        auto& target = *static_cast<Callable*>(bits);
+        target.~Callable();
+    }
+
+    static const target_ops<Result, Args...> ops;
+};
+
+inline void* inline_target_get(void* bits) {
+    return bits;
+}
+
+template <typename Callable, typename Result, typename... Args>
+constexpr target_ops<Result, Args...> target<Callable,
+                                             /*is_inline=*/true,
+                                             /*is_shared=*/false,
+                                             Result,
+                                             Args...>::ops = {
+    &unshared_target_type_id, &inline_target_get, &target::invoke, &target::move, &target::destroy};
+
+// vtable for pointer to target function
+
+template <typename Callable, typename Result, typename... Args>
+struct target<Callable,
+              /*is_inline=*/false,
+              /*is_shared=*/false,
+              Result,
+              Args...>
+    final {
+    template <typename Callable_>
+    static void initialize(void* bits, Callable_&& target) {
+        auto ptr = static_cast<Callable**>(bits);
+        *ptr = new Callable(std::forward<Callable_>(target));
+    }
+    static Result invoke(void* bits, Args... args) {
+        auto& target = **static_cast<Callable**>(bits);
+        return target(std::forward<Args>(args)...);
+    }
+    static void move(void* from_bits, void* to_bits) {
+        auto from_ptr = static_cast<Callable**>(from_bits);
+        auto to_ptr = static_cast<Callable**>(to_bits);
+        *to_ptr = *from_ptr;
+    }
+    static void destroy(void* bits) {
+        auto ptr = static_cast<Callable**>(bits);
+        delete *ptr;
+    }
+
+    static const target_ops<Result, Args...> ops;
+};
+
+inline void* heap_target_get(void* bits) {
+    return *static_cast<void**>(bits);
+}
+
+template <typename Callable, typename Result, typename... Args>
+constexpr target_ops<Result, Args...> target<Callable,
+                                             /*is_inline=*/false,
+                                             /*is_shared=*/false,
+                                             Result,
+                                             Args...>::ops = {
+    &unshared_target_type_id, &heap_target_get, &target::invoke, &target::move, &target::destroy};
+
+// vtable for fit::function std::shared_ptr to target function
+
+template <typename SharedFunction>
+const void* get_target_type_id(const SharedFunction& function_or_callback) {
+    return function_or_callback.target_type_id();
+}
+
+// For this vtable,
+// Callable by definition will be either a fit::function or fit::callback
+template <typename SharedFunction, typename Result, typename... Args>
+struct target<SharedFunction,
+              /*is_inline=*/false,
+              /*is_shared=*/true,
+              Result,
+              Args...>
+    final {
+    static void initialize(void* bits, SharedFunction target) {
+        new (bits) std::shared_ptr<SharedFunction>(
+            std::move(std::make_shared<SharedFunction>(std::move(target))));
+    }
+    static void copy_shared_ptr(void* from_bits, void* to_bits) {
+        auto& from_shared_ptr = *static_cast<std::shared_ptr<SharedFunction>*>(from_bits);
+        new (to_bits) std::shared_ptr<SharedFunction>(from_shared_ptr);
+    }
+    static const void* target_type_id(void* bits, const void* impl_ops) {
+        auto& function_or_callback = **static_cast<std::shared_ptr<SharedFunction>*>(bits);
+        return ::android::base::fit::internal::get_target_type_id(function_or_callback);
+    }
+    static void* get(void* bits) {
+        auto& function_or_callback = **static_cast<std::shared_ptr<SharedFunction>*>(bits);
+        return function_or_callback.template target<SharedFunction>(
+            /*check=*/false);  // void* will fail the check
+    }
+    static Result invoke(void* bits, Args... args) {
+        auto& function_or_callback = **static_cast<std::shared_ptr<SharedFunction>*>(bits);
+        return function_or_callback(std::forward<Args>(args)...);
+    }
+    static void move(void* from_bits, void* to_bits) {
+        auto from_shared_ptr = std::move(*static_cast<std::shared_ptr<SharedFunction>*>(from_bits));
+        new (to_bits) std::shared_ptr<SharedFunction>(std::move(from_shared_ptr));
+    }
+    static void destroy(void* bits) {
+        static_cast<std::shared_ptr<SharedFunction>*>(bits)->reset();
+    }
+
+    static const target_ops<Result, Args...> ops;
+};
+
+template <typename SharedFunction, typename Result, typename... Args>
+constexpr target_ops<Result, Args...> target<SharedFunction,
+                                             /*is_inline=*/false,
+                                             /*is_shared=*/true,
+                                             Result,
+                                             Args...>::ops = {
+    &target::target_type_id, &target::get, &target::invoke, &target::move, &target::destroy};
+
+template <size_t inline_target_size, bool requireInline, typename Result, typename... Args>
+class function_base;
+
+// Function implementation details.
+// See |fit::function| and |fit::callback| documentation for more information.
+template <size_t inline_target_size, bool requireInline, typename Result, typename... Args>
+class function_base<inline_target_size, requireInline, Result(Args...)> {
+    using ops_type = const target_ops<Result, Args...>*;
+    using storage_type = typename std::aligned_storage<(
+        inline_target_size >= sizeof(void*) ? inline_target_size : sizeof(void*))>::
+        type;  // avoid including <algorithm> for max
+    template <typename Callable>
+    using target_type = target<Callable,
+                               (sizeof(Callable) <= sizeof(storage_type)),
+                               /*is_shared=*/false,
+                               Result,
+                               Args...>;
+    template <typename SharedFunction>
+    using shared_target_type = target<SharedFunction,
+                                      /*is_inline=*/false,
+                                      /*is_shared=*/true,
+                                      Result,
+                                      Args...>;
+    using null_target_type = target_type<decltype(nullptr)>;
+
+protected:
+    using result_type = Result;
+
+    function_base() { initialize_null_target(); }
+
+    function_base(decltype(nullptr)) { initialize_null_target(); }
+
+    function_base(Result (*target)(Args...)) { initialize_target(target); }
+
+    template <typename Callable,
+              typename = std::enable_if_t<
+                  std::is_convertible<decltype(std::declval<Callable&>()(std::declval<Args>()...)),
+                                      result_type>::value>>
+    function_base(Callable&& target) {
+        initialize_target(std::forward<Callable>(target));
+    }
+
+    function_base(function_base&& other) { move_target_from(std::move(other)); }
+
+    ~function_base() { destroy_target(); }
+
+    // Returns true if the function has a non-empty target.
+    explicit operator bool() const { return ops_->get(&bits_) != nullptr; }
+
+    // Returns a pointer to the function's target.
+    // If |check| is true (the default), the function _may_ abort if the
+    // caller tries to assign the target to a varible of the wrong type. (This
+    // check is currently skipped for share()d objects.)
+    // Note the shared pointer vtable must set |check| to false to assign the
+    // target to |void*|.
+    template <typename Callable>
+    Callable* target(bool check = true) {
+        if (check)
+            check_target_type<Callable>();
+        return static_cast<Callable*>(ops_->get(&bits_));
+    }
+
+    // Returns a pointer to the function's target (const version).
+    // If |check| is true (the default), the function _may_ abort if the
+    // caller tries to assign the target to a varible of the wrong type. (This
+    // check is currently skipped for share()d objects.)
+    // Note the shared pointer vtable must set |check| to false to assign the
+    // target to |void*|.
+    template <typename Callable>
+    const Callable* target(bool check = true) const {
+        if (check)
+            check_target_type<Callable>();
+        return static_cast<Callable*>(ops_->get(&bits_));
+    }
+
+    // Used by the derived "impl" classes to implement share().
+    //
+    // The caller creates a new object of the same type as itself, and passes in
+    // the empty object. This function first checks if |this| is already shared,
+    // and if not, creates a new version of itself containing a
+    // |std::shared_ptr| to its original self, and updates |ops_| to the vtable
+    // for the shared version.
+    //
+    // Then it copies its |shared_ptr| to the |bits_| of the given |copy|,
+    // and assigns the same shared pointer vtable to the copy's |ops_|.
+    //
+    // The target itself is not copied; it is moved to the heap and its
+    // lifetime is extended until all references have been released.
+    //
+    // Note: This method is not supported on |fit::InlineFunction<>|
+    //       because it may incur a heap allocation which is contrary to
+    //       the stated purpose of |fit::InlineFunction<>|.
+    template <typename SharedFunction>
+    void share_with(SharedFunction& copy) {
+        static_assert(!requireInline, "Inline functions cannot be shared.");
+        if (ops_->get(&bits_) != nullptr) {
+            if (ops_ != &shared_target_type<SharedFunction>::ops) {
+                convert_to_shared_target<SharedFunction>();
+            }
+            copy_shared_target_to(copy);
+        }
+    }
+
+    // Used by derived "impl" classes to implement operator()().
+    // Invokes the function's target.
+    // Note that fit::callback will release the target immediately after
+    // invoke() (also affecting any share()d copies).
+    // Aborts if the function's target is empty.
+    Result invoke(Args... args) const { return ops_->invoke(&bits_, std::forward<Args>(args)...); }
+
+    // Used by derived "impl" classes to implement operator=().
+    // Assigns an empty target.
+    void assign(decltype(nullptr)) {
+        destroy_target();
+        initialize_null_target();
+    }
+
+    // Used by derived "impl" classes to implement operator=().
+    // Assigns the function's target.
+    // If target == nullptr, assigns an empty target.
+    template <typename Callable,
+              typename = std::enable_if_t<
+                  std::is_convertible<decltype(std::declval<Callable&>()(std::declval<Args>()...)),
+                                      result_type>::value>>
+    void assign(Callable&& target) {
+        destroy_target();
+        initialize_target(std::forward<Callable>(target));
+    }
+
+    // Used by derived "impl" classes to implement operator=().
+    // Assigns the function with a target moved from another function,
+    // leaving the other function with an empty target.
+    void assign(function_base&& other) {
+        destroy_target();
+        move_target_from(std::move(other));
+    }
+
+    void swap(function_base& other) {
+        if (&other == this)
+            return;
+        ops_type temp_ops = ops_;
+        storage_type temp_bits;
+        ops_->move(&bits_, &temp_bits);
+
+        ops_ = other.ops_;
+        other.ops_->move(&other.bits_, &bits_);
+
+        other.ops_ = temp_ops;
+        temp_ops->move(&temp_bits, &other.bits_);
+    }
+
+    // returns an opaque ID unique to the |Callable| type of the target.
+    // Used by check_target_type.
+    const void* target_type_id() const { return ops_->target_type_id(&bits_, ops_); }
+
+    // Deleted copy constructor and assign. |function_base| implementations are
+    // move-only.
+    function_base(const function_base& other) = delete;
+    function_base& operator=(const function_base& other) = delete;
+
+    // Move assignment must be provided by subclasses.
+    function_base& operator=(function_base&& other) = delete;
+
+private:
+    // Implements the move operation, used by move construction and move
+    // assignment. Leaves other target initialized to null.
+    void move_target_from(function_base&& other) {
+        ops_ = other.ops_;
+        other.ops_->move(&other.bits_, &bits_);
+        other.initialize_null_target();
+    }
+
+    // fit::function and fit::callback are not directly copyable, but share()
+    // will create shared references to the original object. This method
+    // implements the copy operation for the |std::shared_ptr| wrapper.
+    template <typename SharedFunction>
+    void copy_shared_target_to(SharedFunction& copy) {
+        copy.destroy_target();
+        assert(ops_ == &shared_target_type<SharedFunction>::ops);
+        shared_target_type<SharedFunction>::copy_shared_ptr(&bits_, &copy.bits_);
+        copy.ops_ = ops_;
+    }
+
+    // assumes target is uninitialized
+    void initialize_null_target() { ops_ = &null_target_type::ops; }
+
+    // target may or may not be initialized.
+    template <typename Callable>
+    void initialize_target(Callable&& target) {
+        // Convert function or function references to function pointer.
+        using DecayedCallable = std::decay_t<Callable>;
+        static_assert(
+            std::alignment_of<DecayedCallable>::value <= std::alignment_of<storage_type>::value,
+            "Alignment of Callable must be <= alignment of max_align_t.");
+        static_assert(!requireInline || sizeof(DecayedCallable) <= inline_target_size,
+                      "Callable too large to store inline as requested.");
+        if (is_null(target)) {
+            initialize_null_target();
+        } else {
+            ops_ = &target_type<DecayedCallable>::ops;
+            target_type<DecayedCallable>::initialize(&bits_, std::forward<Callable>(target));
+        }
+    }
+
+    // assumes target is uninitialized
+    template <typename SharedFunction>
+    void convert_to_shared_target() {
+        shared_target_type<SharedFunction>::initialize(
+            &bits_, std::move(*static_cast<SharedFunction*>(this)));
+        ops_ = &shared_target_type<SharedFunction>::ops;
+    }
+
+    // leaves target uninitialized
+    void destroy_target() { ops_->destroy(&bits_); }
+
+    // Called by target() if |check| is true.
+    // Checks the template parameter, usually inferred from the context of
+    // the call to target(), and aborts the program if it can determine that
+    // the Callable type is not compatible with the function's Result and Args.
+    template <typename Callable>
+    void check_target_type() const {
+        if (target_type<Callable>::ops.target_type_id(nullptr, &target_type<Callable>::ops) !=
+            target_type_id()) {
+            __builtin_abort();
+        }
+    }
+
+    ops_type ops_;
+    mutable storage_type bits_;
+};
+
+}  // namespace internal
+
+}  // namespace fit
+}  // namespace android::base
diff --git a/android-emu/android/base/fit/Nullable.h b/android-emu/android/base/fit/Nullable.h
new file mode 100644
index 0000000..e05021b
--- /dev/null
+++ b/android-emu/android/base/fit/Nullable.h
@@ -0,0 +1,265 @@
+// Copyright 2021 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Copyright 2018 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#pragma once
+
+#include <assert.h>
+
+#include <optional>
+#include <type_traits>
+#include <utility>
+
+namespace android::base {
+namespace fit {
+
+// Determines whether a type can be compared with nullptr.
+template <typename T, typename Comparable = bool>
+struct IsComparableWithNull : public std::false_type {};
+template <typename T>
+struct IsComparableWithNull<T, decltype(std::declval<const T&>() == nullptr)>
+    : public std::true_type {};
+
+// Suppress the warning when the compiler can see that a nullable value is
+// never equal to nullptr.
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Waddress"
+template <typename T, std::enable_if_t<IsComparableWithNull<T>::value, bool> = true>
+constexpr inline bool isNull(T&& value) {
+    return std::forward<T>(value) == nullptr;
+}
+#pragma GCC diagnostic pop
+
+template <typename T, std::enable_if_t<!IsComparableWithNull<T>::value, bool> = false>
+constexpr inline bool isNull(T&&) {
+    return false;
+}
+
+// Determines whether a type can be initialized, assigned, and compared
+// with nullptr.
+template <typename T>
+struct IsNullable
+    : public std::integral_constant<bool,
+                                    std::is_constructible<T, decltype(nullptr)>::value &&
+                                        std::is_assignable<T&, decltype(nullptr)>::value &&
+                                        IsComparableWithNull<T>::value> {};
+template <>
+struct IsNullable<void> : public std::false_type {};
+
+// Holds a value or nullptr.
+//
+// This class is similar to |std::optional<T>| except that it uses less
+// storage when the value type can be initialized, assigned, and compared
+// with nullptr.
+//
+// For example:
+// - sizeof(fit::nullable<void*>) == sizeof(void*)
+// - sizeof(std::optional<void*>) == sizeof(struct { bool; void*; })
+// - sizeof(fit::nullable<int>) == sizeof(struct { bool; int; })
+// - sizeof(std::optional<int>) == sizeof(struct { bool; int; })
+//
+// TODO(fxbug.dev/4681): fit::nullable does not precisely mirror
+// std::optional. This should be corrected to avoid surprises when switching
+// between the types.
+template <typename T,
+          bool = (IsNullable<T>::value && std::is_constructible<T, T&&>::value &&
+                  std::is_assignable<T&, T&&>::value)>
+class Nullable final {
+public:
+    using value_type = T;
+
+    ~Nullable() = default;
+    constexpr Nullable() = default;
+
+    explicit constexpr Nullable(decltype(nullptr)) {}
+    explicit constexpr Nullable(T value) : mOpt(std::move(value)) {}
+
+    constexpr Nullable(const Nullable& other) = default;
+    constexpr Nullable& operator=(const Nullable& other) = default;
+
+    constexpr Nullable(Nullable&& other) = default;
+    constexpr Nullable& operator=(Nullable&& other) = default;
+
+    constexpr T& value() & { return mOpt.value(); }
+    constexpr const T& value() const& { return mOpt.value(); }
+    constexpr T&& value() && { return std::move(mOpt.value()); }
+    constexpr const T&& value() const&& { return std::move(mOpt.value()); }
+
+    template <typename U = T>
+    constexpr T valueOr(U&& default_value) const {
+        return mOpt.value_or(std::forward<U>(default_value));
+    }
+
+    constexpr T* operator->() { return &*mOpt; }
+    constexpr const T* operator->() const { return &*mOpt; }
+    constexpr T& operator*() { return *mOpt; }
+    constexpr const T& operator*() const { return *mOpt; }
+
+    constexpr bool hasValue() const { return mOpt.has_value(); }
+    explicit constexpr operator bool() const { return hasValue(); }
+
+    constexpr Nullable& operator=(decltype(nullptr)) {
+        reset();
+        return *this;
+    }
+
+    constexpr Nullable& operator=(T value) {
+        mOpt = std::move(value);
+        return *this;
+    }
+
+    constexpr void reset() { mOpt.reset(); }
+
+    constexpr void swap(Nullable& other) { mOpt.swap(other.mOpt); }
+
+private:
+    std::optional<T> mOpt;
+};
+
+template <typename T>
+class Nullable<T, true> final {
+public:
+    using value_type = T;
+
+    constexpr Nullable() : mValue(nullptr) {}
+    explicit constexpr Nullable(decltype(nullptr)) : mValue(nullptr) {}
+    explicit constexpr Nullable(T value) : mValue(std::move(value)) {}
+    constexpr Nullable(const Nullable& other) = default;
+    constexpr Nullable(Nullable&& other) : mValue(std::move(other.value_)) {}
+    ~Nullable() = default;
+
+    constexpr T& value() & {
+        if (hasValue()) {
+            return mValue;
+        } else {
+            __builtin_abort();
+        }
+    }
+    constexpr const T& value() const& {
+        if (hasValue()) {
+            return mValue;
+        } else {
+            __builtin_abort();
+        }
+    }
+    constexpr T&& value() && {
+        if (hasValue()) {
+            return std::move(mValue);
+        } else {
+            __builtin_abort();
+        }
+    }
+    constexpr const T&& value() const&& {
+        if (hasValue()) {
+            return std::move(mValue);
+        } else {
+            __builtin_abort();
+        }
+    }
+
+    template <typename U = T>
+    constexpr T valueOr(U&& default_value) const {
+        return hasValue() ? mValue : static_cast<T>(std::forward<U>(default_value));
+    }
+
+    constexpr T* operator->() { return &mValue; }
+    constexpr const T* operator->() const { return &mValue; }
+    constexpr T& operator*() { return mValue; }
+    constexpr const T& operator*() const { return mValue; }
+
+    constexpr bool hasValue() const { return !(mValue == nullptr); }
+    explicit constexpr operator bool() const { return hasValue(); }
+
+    constexpr Nullable& operator=(const Nullable& other) = default;
+    constexpr Nullable& operator=(Nullable&& other) {
+        mValue = std::move(other.value_);
+        return *this;
+    }
+
+    constexpr Nullable& operator=(decltype(nullptr)) {
+        reset();
+        return *this;
+    }
+
+    constexpr Nullable& operator=(T value) {
+        mValue = std::move(value);
+        return *this;
+    }
+
+    constexpr void reset() { mValue = nullptr; }
+
+    constexpr void swap(Nullable& other) {
+        using std::swap;
+        swap(mValue, other.value_);
+    }
+
+private:
+    T mValue;
+};
+
+template <typename T>
+void swap(Nullable<T>& a, Nullable<T>& b) {
+    a.swap(b);
+}
+
+template <typename T>
+constexpr bool operator==(const Nullable<T>& lhs, decltype(nullptr)) {
+    return !lhs.hasValue();
+}
+template <typename T>
+constexpr bool operator!=(const Nullable<T>& lhs, decltype(nullptr)) {
+    return lhs.hasValue();
+}
+
+template <typename T>
+constexpr bool operator==(decltype(nullptr), const Nullable<T>& rhs) {
+    return !rhs.hasValue();
+}
+template <typename T>
+constexpr bool operator!=(decltype(nullptr), const Nullable<T>& rhs) {
+    return rhs.hasValue();
+}
+
+template <typename T, typename U>
+constexpr bool operator==(const Nullable<T>& lhs, const Nullable<U>& rhs) {
+    return (lhs.hasValue() == rhs.hasValue()) && (!lhs.hasValue() || *lhs == *rhs);
+}
+template <typename T, typename U>
+constexpr bool operator!=(const Nullable<T>& lhs, const Nullable<U>& rhs) {
+    return (lhs.hasValue() != rhs.hasValue()) || (lhs.hasValue() && *lhs != *rhs);
+}
+
+template <typename T, typename U>
+constexpr bool operator==(const Nullable<T>& lhs, const U& rhs) {
+    return (lhs.hasValue() != isNull(rhs)) && (!lhs.hasValue() || *lhs == rhs);
+}
+template <typename T, typename U>
+constexpr bool operator!=(const Nullable<T>& lhs, const U& rhs) {
+    return (lhs.hasValue() == isNull(rhs)) || (lhs.hasValue() && *lhs != rhs);
+}
+
+template <typename T, typename U>
+constexpr bool operator==(const T& lhs, const Nullable<U>& rhs) {
+    return (isNull(lhs) != rhs.hasValue()) && (!rhs.hasValue() || lhs == *rhs);
+}
+template <typename T, typename U>
+constexpr bool operator!=(const T& lhs, const Nullable<U>& rhs) {
+    return (isNull(lhs) == rhs.hasValue()) || (rhs.hasValue() && lhs != *rhs);
+}
+
+}  // namespace fit
+}  // namespace android::base
diff --git a/android-emu/android/base/fit/README b/android-emu/android/base/fit/README
new file mode 100644
index 0000000..4b3eeeb
--- /dev/null
+++ b/android-emu/android/base/fit/README
@@ -0,0 +1,150 @@
+libfit
+
+Source: https://fuchsia.googlesource.com/fuchsia/+/main/sdk/lib/fit/
+Version: 36303cd2d1611cb1b670235692d01a92e83ecd21
+License:
+
+Copyright 2019 The Fuchsia Authors.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+======
+
+FIT is a lean library of portable C++ abstractions for control flow and
+memory management beyond what is offered by the C++ 17 standard library.
+
+FIT only depends on the C++ language and standard library, including some C++17
+library features.  It offers essential enhancements to the C++ standard library
+rather than attempting to replace it or become a framework for writing
+applications.  FIT can be thought of as an "annex" that expresses a few ideas
+we wish the C++ standard library might itself implement someday.
+
+FIT is lean.
+
+## What Belongs in FIT
+
+Several Fuchsia SDK libraries, such as *libfidl*, depend on FIT and on the C++
+standard library.  As these libraries are broadly used, we must take care in
+deciding what features to include in FIT to avoid burdening developers with
+unnecessary code or dependencies.
+
+In general, the goal is to identify specific abstractions that make sense to
+generalize across the entire ecosystem of Fuchsia C++ applications.  These will
+necessarily be somewhat low-level but high impact.  We don't want to add code to
+FIT simply because we think it's cool.  We need evidence that it is a common
+idiom and that a broad audience of developers will significantly benefit from
+its promotion.
+
+Here are a few criteria to consider:
+
+- Is the feature lightweight, general-purpose, and platform-independent?
+- Is the feature not well served by other means, particularly by the C++
+  standard library?
+- Is the feature needed by a Fuchsia SDK library?
+- Does the feature embody a beneficial idiom that clients of the Fuchsia SDK
+  commonly use?
+- Has the feature been re-implemented many times already leading to code
+  fragmentation that we would like to eliminate?
+
+If in doubt, leave it out.  See [Justifications] below.
+
+## What Doesn't Belong in FIT
+
+FIT is not intended to become a catch-all class library.
+
+Specifically prohibited features:
+
+- Features that introduce dependencies on libraries other than the C and C++
+  standard library.
+- Features that only work on certain operating systems.
+- Collection classes where the C++ 17 standard library already offers an
+  adequate (if not perfect) alternative.
+- Classes that impose an implementation burden on clients such as event loops,
+  dispatchers, frameworks, and other glue code.
+
+## Implementation Considerations
+
+FIT is not exception safe (but could be made to be in the future).
+
+## Style Conventions
+
+The API style was modified to fit current android::base library conventions.
+
+In brief:
+
+- Class identifiers are CamelCase
+- Class methods and variable identifiers use "camelCase", class fields use
+  "mCamelCase".
+- Template parameters are `CamelCase`.
+- Preprocessor macros are `UPPER_SNAKE_CASE`.
+
+## Justifications
+
+These sections explain why certain features are in FIT.
+
+### fit::Function
+
+- *libfidl*'s API needs a callable function wrapper with move semantics but
+  C++ 14's `std::function` only supports copyable function objects which forces
+  FIDL to allocate callback state on the heap making programs less efficient
+  and harder to write.
+- Lots of other C++ code uses callbacks extensively and would benefit from move
+  semantics for similar reasons.
+- So we should create a move-only function wrapper to use everywhere.
+
+### fit::Defer
+
+- When writing asynchronous event-driven programs, it can become challenging
+  to ensure that resources remain in scope for the duration of an operation
+  in progress and are subsequently released.
+- The C++ 14 standard library offers several classes with RAII semantics, such
+  as `std::unique_ptr`, which are helpful in these situations.  Unfortunately the
+  C++ 14 standard library does not offer affordances for easily invoking a
+  function when a block or object goes out of scope short of implementing a
+  new class from scratch.
+- We have observed several re-implementations of the same idea throughout the
+  system.
+- So we should create a simple way to invoke a function on scope exit.
+
+### fit::Nullable
+
+- Case study: fit::defer has a need to store a closure that may be nullable.
+  We were able to replace its hand-rolled lifetime management code with
+  fit::nullable thereby vastly simplifying its implementation.
+- Case study: fit::future has a need to track its own validity along with
+  a continuation that may or not be present.
+- Case study: We have previously observed bugs where developers were
+  surprised when assigning a null closure to wrappers such as fit::function
+  fit::defer, or fit::future left these objects in a supposedly "valid"
+  but uninvocable state.  These objects therefore take care to detect
+  null closures and enter an "invalid" state.  Using fit::is_null and
+  fit::nullable makes it easier to eliminate this redundant state and
+  simplifies the API for clients of these wrappers.
+- std::optional can be effective here but it doesn't directly handle nullity
+  so it takes more care to coalesce the null and "not present" states.
+  std::optional also increases the size of the object to carry an extra
+  bool and passing, whereas fit::nullable eliminates this overhead by
+  taking advantage of the underlying value's null state (if there is one).
+- So we introduce fit::nullable to handle both cases systematically while
+  still hewing close to the semantics of std::optional.
diff --git a/android-emu/android/base/fit/ThreadChecker.h b/android-emu/android/base/fit/ThreadChecker.h
new file mode 100644
index 0000000..3859ecf
--- /dev/null
+++ b/android-emu/android/base/fit/ThreadChecker.h
@@ -0,0 +1,87 @@
+// Copyright 2021 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Copyright 2016 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// A class for checking that the current thread is/isn't the same as an initial
+// thread.
+
+#pragma once
+
+#include <assert.h>
+
+#include <thread>
+
+#include "ThreadSafety.h"
+
+namespace android::base {
+namespace fit {
+
+// A simple class that records the identity of the thread that it was created
+// on, and at later points can tell if the current thread is the same as its
+// creation thread. This class is thread-safe.
+//
+// In addition to providing an explicit check of the current thread,
+// |thread_checker| complies with BasicLockable, checking the current thread
+// when |lock| is called. This allows static thread safety analysis to be used
+// to ensure that resources are accessed in a context that is checked (at debug
+// runtime) to ensure that it's running on the correct thread:
+//
+// class MyClass {
+//  public:
+//    void Foo() {
+//      std::lock_guard<fit::thread_checker> locker(thread_checker_);
+//      resource_ = 0;
+//    }
+//  private:
+//   fit::thread_checker thread_checker_;
+//   int resource_ GUARDED_BY(thread_checker_);
+// }
+//
+// Note: |lock| checks the thread in debug builds only.
+//
+class CAPABILITY("mutex") ThreadChecker final {
+public:
+    // Default constructor. Constructs a thread checker bound to the currently
+    // running thread.
+    ThreadChecker() : self_(std::this_thread::get_id()) {}
+    // Constructs a thread checker bound to an explicit other thread.
+    explicit ThreadChecker(std::thread::id self) : self_(self) {}
+    ~ThreadChecker() = default;
+
+    // Returns true if the current thread is the thread this object was created
+    // on and false otherwise.
+    bool isThreadValid() const { return std::this_thread::get_id() == self_; }
+
+    // Implementation of the BaseLockable requirement
+    void lock() ACQUIRE() { assert(isThreadValid()); }
+
+    void unlock() RELEASE() {}
+
+private:
+    const std::thread::id self_;
+};
+
+#ifndef NDEBUG
+#define DECLARE_THREAD_CHECKER(c) android::base::fit::ThreadChecker c
+#define DCHECK_IS_THREAD_VALID(c) assert((c).isThreadValid())
+#else
+#define DECLARE_THREAD_CHECKER(c)
+#define DCHECK_IS_THREAD_VALID(c) ((void)0)
+#endif
+
+}  // namespace fit
+}  // namespace android::base
diff --git a/android-emu/android/base/fit/ThreadSafety.h b/android-emu/android/base/fit/ThreadSafety.h
new file mode 100644
index 0000000..bce528e
--- /dev/null
+++ b/android-emu/android/base/fit/ThreadSafety.h
@@ -0,0 +1,81 @@
+// Copyright 2021 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Copyright 2018 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#pragma once
+
+// Other libraries (e.g. libbase) may have already defined these symbols.
+// Only define them if they are not defined elsewhere.
+
+// Thread-safety annotations.
+// Currently these are only supported on Clang.
+#ifndef THREAD_ANNOTATION_ATTRIBUTE__
+#if defined(__clang__) && defined(_LIBCPP_ENABLE_THREAD_SAFETY_ANNOTATIONS) && \
+    __has_attribute(acquire_capability)
+#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x))
+#else
+#define THREAD_ANNOTATION_ATTRIBUTE__(x)
+#endif
+#endif  // THREAD_ANNOTATION_ATTRIBUTE__
+
+#ifndef CAPABILITY
+#define CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(__capability__(x))
+#endif  // CAPABILITY
+
+#ifndef GUARDED_BY
+#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(__guarded_by__(x))
+#endif  // GUARDED_BY
+
+#ifndef ACQUIRE
+#define ACQUIRE(...) THREAD_ANNOTATION_ATTRIBUTE__(__acquire_capability__(__VA_ARGS__))
+#endif  // ACQUIRE
+
+#ifndef TRY_ACQUIRE
+#define TRY_ACQUIRE(...) THREAD_ANNOTATION_ATTRIBUTE__(__try_acquire_capability__(__VA_ARGS__))
+#endif  // TRY_ACQUIRE
+
+#ifndef ACQUIRED_BEFORE
+#define ACQUIRED_BEFORE(...) THREAD_ANNOTATION_ATTRIBUTE__(__acquired_before__(__VA_ARGS__))
+#endif  // ACQUIRED_BEFORE
+
+#ifndef ACQUIRED_AFTER
+#define ACQUIRED_AFTER(...) THREAD_ANNOTATION_ATTRIBUTE__(__acquired_after__(__VA_ARGS__))
+#endif  // ACQUIRED_AFTER
+
+#ifndef RELEASE
+#define RELEASE(...) THREAD_ANNOTATION_ATTRIBUTE__(__release_capability__(__VA_ARGS__))
+#endif  // RELEASE
+
+#ifndef REQUIRES
+#define REQUIRES(...) THREAD_ANNOTATION_ATTRIBUTE__(__requires_capability__(__VA_ARGS__))
+#endif  // REQUIRES
+
+#ifndef EXCLUDES
+#define EXCLUDES(...) THREAD_ANNOTATION_ATTRIBUTE__(__locks_excluded__(__VA_ARGS__))
+#endif  // EXCLUDES
+
+#ifndef RETURN_CAPABILITY
+#define RETURN_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(__lock_returned__(x))
+#endif  // RETURN_CAPABILITY
+
+#ifndef SCOPED_CAPABILITY
+#define SCOPED_CAPABILITY THREAD_ANNOTATION_ATTRIBUTE__(__scoped_lockable__)
+#endif  // SCOPED_CAPABILITY
+
+#ifndef NO_THREAD_SAFETY_ANALYSIS
+#define NO_THREAD_SAFETY_ANALYSIS THREAD_ANNOTATION_ATTRIBUTE__(__no_thread_safety_analysis__)
+#endif  // NO_THREAD_SAFETY_ANALYSIS
diff --git a/android-emu/android/base/fit/UtilityInternal.h b/android-emu/android/base/fit/UtilityInternal.h
new file mode 100644
index 0000000..274105e
--- /dev/null
+++ b/android-emu/android/base/fit/UtilityInternal.h
@@ -0,0 +1,146 @@
+// Copyright 2021 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Copyright 2019 The Fuchsia Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#pragma once
+
+#include <type_traits>
+#include <utility>
+
+namespace android::base {
+namespace fit {
+namespace internal {
+
+// Utility to return the first type in a parameter pack.
+template <typename... Ts>
+struct First;
+template <typename FirstType, typename... Rest>
+struct First<FirstType, Rest...> {
+    using Type = FirstType;
+};
+
+template <typename... Ts>
+using First_t = typename First<Ts...>::Type;
+
+// Utility to count the occurences of type T in the parameter pack Ts.
+template <typename T, typename... Ts>
+struct OccurencesOf : std::integral_constant<size_t, 0> {};
+template <typename T, typename U>
+struct OccurencesOf<T, U> : std::integral_constant<size_t, std::is_same<T, U>::value> {};
+template <typename T, typename First, typename... Rest>
+struct OccurencesOf<T, First, Rest...>
+    : std::integral_constant<size_t,
+                             OccurencesOf<T, First>::value + OccurencesOf<T, Rest...>::value> {};
+
+template <typename T, typename... Ts>
+constexpr size_t occurencesOf = OccurencesOf<T, Ts...>::value;
+
+// Utility to remove const, volatile, and reference qualifiers.
+template <typename T>
+using RemoveCvref_t = std::remove_cv_t<std::remove_reference_t<T>>;
+
+// Evaluates to truth-like when type T matches type U with cv-reference removed.
+template <typename T, typename U>
+using NotSameType = std::negation<std::is_same<T, RemoveCvref_t<U>>>;
+
+// Concept helper for constructors.
+template <typename... Conditions>
+using RequiresConditions = std::enable_if_t<std::conjunction_v<Conditions...>, bool>;
+
+// Concept helper for assignment operators.
+template <typename Return, typename... Conditions>
+using AssignmentRequiresConditions =
+    std::enable_if_t<std::conjunction_v<Conditions...>, std::add_lvalue_reference_t<Return>>;
+
+// Evaluates to true when every element type of Ts is trivially destructible.
+template <typename... Ts>
+constexpr bool isTriviallyDestructible = std::conjunction_v<std::is_trivially_destructible<Ts>...>;
+
+// Evaluates to true when every element type of Ts is trivially copyable.
+template <typename... Ts>
+constexpr bool isTriviallyCopyable =
+    (std::conjunction_v<std::is_trivially_copy_assignable<Ts>...> &&
+     std::conjunction_v<std::is_trivially_copy_constructible<Ts>...>);
+
+// Evaluates to true when every element type of Ts is trivially movable.
+template <typename... Ts>
+constexpr bool isTriviallyMovable =
+    (std::conjunction_v<std::is_trivially_move_assignable<Ts>...> &&
+     std::conjunction_v<std::is_trivially_move_constructible<Ts>...>);
+
+// Enable if relational operator is convertible to bool and the optional
+// conditions are true.
+template <typename Op, typename... Conditions>
+using enable_relop_t =
+    std::enable_if_t<(std::is_convertible<Op, bool>::value && std::conjunction_v<Conditions...>),
+                     bool>;
+
+template <typename T>
+struct Identity {
+    using Type = T;
+};
+
+// Evaluates to true when T is an unbounded array.
+template <typename T>
+struct IsUnboundedArray : std::conjunction<std::is_array<T>, std::negation<std::extent<T>>> {};
+
+// Returns true when T is a complete type or an unbounded array.
+template <typename T, size_t = sizeof(T)>
+constexpr bool isCompleteOrUnboundedArray(Identity<T>) {
+    return true;
+}
+template <typename Identity, typename T = typename Identity::Type>
+constexpr bool isCompleteOrUnboundedArray(Identity) {
+    return std::disjunction<std::is_reference<T>, std::is_function<T>, std::is_void<T>,
+                            IsUnboundedArray<T>>::value;
+}
+
+// Using swap for ADL. This directive is contained within the fit::internal
+// namespace, which prevents leaking std::swap into user namespaces. Doing this
+// at namespace scope is necessary to lookup swap via ADL while preserving the
+// noexcept() specification of the resulting lookup.
+using std::swap;
+
+// Evaluates to true when T is swappable.
+template <typename T, typename = void>
+struct IsSwappable : std::false_type {
+    static_assert(isCompleteOrUnboundedArray(Identity<T>{}),
+                  "T must be a complete type or an unbounded array!");
+};
+template <typename T>
+struct IsSwappable<T, std::void_t<decltype(swap(std::declval<T&>(), std::declval<T&>()))>>
+    : std::true_type {
+    static_assert(isCompleteOrUnboundedArray(Identity<T>{}),
+                  "T must be a complete type or an unbounded array!");
+};
+
+// Evaluates to true when T is nothrow swappable.
+template <typename T, typename = void>
+struct IsNothrowSwappable : std::false_type {
+    static_assert(isCompleteOrUnboundedArray(Identity<T>{}),
+                  "T must be a complete type or an unbounded array!");
+};
+template <typename T>
+struct IsNothrowSwappable<T, std::void_t<decltype(swap(std::declval<T&>(), std::declval<T&>()))>>
+    : std::integral_constant<bool, noexcept(swap(std::declval<T&>(), std::declval<T&>()))> {
+    static_assert(isCompleteOrUnboundedArray(Identity<T>{}),
+                  "T must be a complete type or an unbounded array!");
+};
+
+}  // namespace internal
+}  // namespace fit
+}  // namespace android::base
diff --git a/android-emu/android/base/ring_buffer.c b/android-emu/android/base/ring_buffer.c
index 527d2ea..fb6845f 100644
--- a/android-emu/android/base/ring_buffer.c
+++ b/android-emu/android/base/ring_buffer.c
@@ -407,19 +407,10 @@
 
 void ring_buffer_yield() { }
 
-static void ring_buffer_sleep() {
-#ifdef _WIN32
-    Sleep(2);
-#else
-    usleep(2000);
-#endif
-}
-
 bool ring_buffer_wait_write(
     const struct ring_buffer* r,
     const struct ring_buffer_view* v,
-    uint32_t bytes,
-    uint64_t timeout_us) {
+    uint32_t bytes) {
 
     bool can_write =
         v ? ring_buffer_view_can_write(r, v, bytes) :
@@ -438,8 +429,7 @@
 bool ring_buffer_wait_read(
     const struct ring_buffer* r,
     const struct ring_buffer_view* v,
-    uint32_t bytes,
-    uint64_t timeout_us) {
+    uint32_t bytes) {
 
     bool can_read =
         v ? ring_buffer_view_can_read(r, v, bytes) :
@@ -457,7 +447,6 @@
 }
 
 static uint32_t get_step_size(
-    struct ring_buffer* r,
     struct ring_buffer_view* v,
     uint32_t bytes) {
 
@@ -491,7 +480,7 @@
     uint32_t abort_value,
     const volatile uint32_t* abort_ptr) {
 
-    uint32_t candidate_step = get_step_size(r, v, bytes);
+    uint32_t candidate_step = get_step_size(v, bytes);
     uint32_t processed = 0;
 
     uint8_t* dst = (uint8_t*)data;
@@ -502,7 +491,7 @@
         }
 
         long processed_here = 0;
-        ring_buffer_wait_write(r, v, candidate_step, (uint64_t)(-1));
+        ring_buffer_wait_write(r, v, candidate_step);
 
         if (v) {
             processed_here = ring_buffer_view_write(r, v, dst + processed, candidate_step, 1);
@@ -528,7 +517,7 @@
     uint32_t abort_value,
     const volatile uint32_t* abort_ptr) {
 
-    uint32_t candidate_step = get_step_size(r, v, bytes);
+    uint32_t candidate_step = get_step_size(v, bytes);
     uint32_t processed = 0;
 
     uint8_t* dst = (uint8_t*)data;
@@ -540,7 +529,7 @@
         }
 
         long processed_here = 0;
-        ring_buffer_wait_read(r, v, candidate_step, (uint64_t)(-1));
+        ring_buffer_wait_read(r, v, candidate_step);
 
         if (v) {
             processed_here = ring_buffer_view_read(r, v, dst + processed, candidate_step, 1);
diff --git a/android-emu/android/base/ring_buffer.h b/android-emu/android/base/ring_buffer.h
index 390a758..7007a87 100644
--- a/android-emu/android/base/ring_buffer.h
+++ b/android-emu/android/base/ring_buffer.h
@@ -116,13 +116,11 @@
 bool ring_buffer_wait_write(
     const struct ring_buffer* r,
     const struct ring_buffer_view* v,
-    uint32_t bytes,
-    uint64_t timeout_us);
+    uint32_t bytes);
 bool ring_buffer_wait_read(
     const struct ring_buffer* r,
     const struct ring_buffer_view* v,
-    uint32_t bytes,
-    uint64_t timeout_us);
+    uint32_t bytes);
 
 // read/write fully, blocking if there is nothing to read/write.
 void ring_buffer_write_fully(
diff --git a/android-emu/android/base/synchronization/AndroidConditionVariable.h b/android-emu/android/base/synchronization/AndroidConditionVariable.h
index 21cbdc0..b6ef088 100644
--- a/android-emu/android/base/synchronization/AndroidConditionVariable.h
+++ b/android-emu/android/base/synchronization/AndroidConditionVariable.h
@@ -41,13 +41,20 @@
     // efficient to signal the variable before unlocking mutex, while on others
     // (Windows) it's exactly the opposite. Functions implement the best way
     // for each platform and abstract it out from the user.
-    void signalAndUnlock(StaticLock* lock);
-    void signalAndUnlock(AutoLock* lock);
+    template <bool IsRecursive>
+    void signalAndUnlock(StaticLock<IsRecursive>* lock);
 
-    void broadcastAndUnlock(StaticLock* lock);
-    void broadcastAndUnlock(AutoLock* lock);
+    template <class Lockable>
+    void signalAndUnlock(AutoLock<Lockable>* lock);
 
-    void wait(AutoLock* userLock) {
+    template <bool IsRecursive>
+    void broadcastAndUnlock(StaticLock<IsRecursive>* lock);
+
+    template <class Lockable>
+    void broadcastAndUnlock(AutoLock<Lockable>* lock);
+
+    template <class Lockable>
+    void wait(AutoLock<Lockable>* userLock) {
         assert(userLock->mLocked);
         wait(&userLock->mLock);
     }
@@ -70,15 +77,15 @@
     //          signature and returns a condition when one should stop waiting.
     //
 
-    template <class Predicate>
-    void wait(StaticLock* lock, Predicate pred) {
+    template <bool IsRecursive, class Predicate>
+    void wait(StaticLock<IsRecursive>* lock, Predicate pred) {
         while (!pred()) {
             this->wait(lock);
         }
     }
 
-    template <class Predicate>
-    void wait(AutoLock* lock, Predicate pred) {
+    template <class Lockable, class Predicate>
+    void wait(AutoLock<Lockable>* lock, Predicate pred) {
         this->wait(&lock->mLock, pred);
     }
 
@@ -101,11 +108,13 @@
     //
     //    if (!condition) { condVar.wait(&lock); }
     //
-    void wait(StaticLock* userLock) {
+    template <bool IsRecursive>
+    void wait(StaticLock<IsRecursive>* userLock) {
         ::SleepConditionVariableSRW(&mCond, &userLock->mLock, INFINITE, 0);
     }
 
-    bool timedWait(StaticLock *userLock, System::Duration waitUntilUs) {
+    template <bool IsRecursive>
+    bool timedWait(StaticLock<IsRecursive>* userLock, System::Duration waitUntilUs) {
         const auto now = System::get()->getUnixTimeUs();
         const auto timeout =
                 std::max<System::Duration>(0, waitUntilUs  - now) / 1000;
@@ -139,18 +148,21 @@
         pthread_cond_destroy(&mCond);
     }
 
-    void wait(StaticLock* userLock) {
+    template <bool IsRecursive>
+    void wait(StaticLock<IsRecursive>* userLock) {
         pthread_cond_wait(&mCond, &userLock->mLock);
     }
 
-    bool timedWait(StaticLock* userLock, uint64_t waitUntilUs) {
+    template <bool IsRecursive>
+    bool timedWait(StaticLock<IsRecursive>* userLock, uint64_t waitUntilUs) {
         timespec abstime;
         abstime.tv_sec = waitUntilUs / 1000000LL;
         abstime.tv_nsec = (waitUntilUs % 1000000LL) * 1000;
         return timedWait(userLock, abstime);
     }
 
-    bool timedWait(StaticLock* userLock, const timespec& abstime) {
+    template <bool IsRecursive>
+    bool timedWait(StaticLock<IsRecursive>* userLock, const timespec& abstime) {
         return pthread_cond_timedwait(&mCond, &userLock->mLock, &abstime) == 0;
     }
 
@@ -171,37 +183,46 @@
 };
 
 #ifdef _WIN32
-inline void ConditionVariable::signalAndUnlock(StaticLock* lock) {
+template <bool IsRecursive>
+inline void ConditionVariable::signalAndUnlock(StaticLock<IsRecursive>* lock) {
     lock->unlock();
     signal();
 }
-inline void ConditionVariable::signalAndUnlock(AutoLock* lock) {
+template <class Lockable>
+inline void ConditionVariable::signalAndUnlock(AutoLock<Lockable>* lock) {
     lock->unlock();
     signal();
 }
 
-inline void ConditionVariable::broadcastAndUnlock(StaticLock* lock) {
+template <bool IsRecursive>
+inline void ConditionVariable::broadcastAndUnlock(StaticLock<IsRecursive>* lock) {
     lock->unlock();
     broadcast();
 }
-inline void ConditionVariable::broadcastAndUnlock(AutoLock* lock) {
+template <class Lockable>
+inline void ConditionVariable::broadcastAndUnlock(AutoLock<Lockable>* lock) {
     lock->unlock();
     broadcast();
 }
 #else  // !_WIN32
-inline void ConditionVariable::signalAndUnlock(StaticLock* lock) {
+
+template <bool IsRecursive>
+inline void ConditionVariable::signalAndUnlock(StaticLock<IsRecursive>* lock) {
     signal();
     lock->unlock();
 }
-inline void ConditionVariable::signalAndUnlock(AutoLock* lock) {
+template <class Lockable>
+inline void ConditionVariable::signalAndUnlock(AutoLock<Lockable>* lock) {
     signal();
     lock->unlock();
 }
-inline void ConditionVariable::broadcastAndUnlock(StaticLock* lock) {
+template <bool IsRecursive>
+inline void ConditionVariable::broadcastAndUnlock(StaticLock<IsRecursive>* lock) {
     broadcast();
     lock->unlock();
 }
-inline void ConditionVariable::broadcastAndUnlock(AutoLock* lock) {
+template <class Lockable>
+inline void ConditionVariable::broadcastAndUnlock(AutoLock<Lockable>* lock) {
     broadcast();
     lock->unlock();
 }
diff --git a/android-emu/android/base/synchronization/AndroidLock.h b/android-emu/android/base/synchronization/AndroidLock.h
index 74877d4..7b567f2 100644
--- a/android-emu/android/base/synchronization/AndroidLock.h
+++ b/android-emu/android/base/synchronization/AndroidLock.h
@@ -37,16 +37,22 @@
 namespace base {
 namespace guest {
 
+template <class Lockable>
 class AutoLock;
+
 class AutoWriteLock;
 class AutoReadLock;
 
 // A wrapper class for mutexes only suitable for using in static context,
-// where it's OK to leak the underlying system object. Use Lock for scoped or
-// member locks.
-class StaticLock {
+// where it's OK to leak the underlying system object.
+// Use Lock / RecursiveLock for scoped or member locks.
+template <bool IsRecursive>
+class StaticLock;
+
+template <>
+class StaticLock<false> {
 public:
-    using AutoLock = android::base::guest::AutoLock;
+    using AutoLock = android::base::guest::AutoLock<StaticLock>;
 
     constexpr StaticLock() = default;
 
@@ -100,8 +106,72 @@
     AEMU_IF_DEBUG(bool mIsLocked = false;)
 };
 
+template <>
+class StaticLock<true> {
+public:
+    using AutoLock = android::base::guest::AutoLock<StaticLock>;
+
+    StaticLock() {
+#ifdef _WIN32
+        ::InitializeCriticalSectionAndSpinCount(&mLock, 0x400);
+#else
+        pthread_mutexattr_t attr;
+        pthread_mutexattr_init(&attr);
+        pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
+        pthread_mutex_init(&mLock, &attr);
+#endif
+    }
+
+    // Acquire the lock.
+    void lock() {
+#ifdef _WIN32
+        ::EnterCriticalSection(&mLock);
+#else
+        ::pthread_mutex_lock(&mLock);
+#endif
+        AEMU_IF_DEBUG(mIsLocked = true;)
+    }
+
+    bool tryLock() {
+        bool ret = false;
+#ifdef _WIN32
+        ret = ::TryEnterCriticalSection(&mLock);
+#else
+        ret = ::pthread_mutex_trylock(&mLock) == 0;
+#endif
+        AEMU_IF_DEBUG(mIsLocked = ret;)
+        return ret;
+    }
+
+    AEMU_IF_DEBUG(bool isLocked() const { return mIsLocked; })
+
+    // Release the lock.
+    void unlock() {
+        AEMU_IF_DEBUG(mIsLocked = false;)
+#ifdef _WIN32
+        ::LeaveCriticalSection(&mLock);
+#else
+        ::pthread_mutex_unlock(&mLock);
+#endif
+    }
+
+protected:
+    friend class ConditionVariable;
+
+#ifdef _WIN32
+    // We use CRITICAL_SECTION since it always allow recursive access.
+    CRITICAL_SECTION mLock;
+#else
+    pthread_mutex_t mLock = PTHREAD_MUTEX_INITIALIZER;
+#endif
+    // Both POSIX threads and WinAPI don't allow move (undefined behavior).
+    DISALLOW_COPY_ASSIGN_AND_MOVE(StaticLock);
+
+    AEMU_IF_DEBUG(bool mIsLocked = false;)
+};
+
 // Simple wrapper class for mutexes used in non-static context.
-class Lock : public StaticLock {
+class Lock : public StaticLock<false> {
 public:
     using StaticLock::AutoLock;
 
@@ -113,6 +183,22 @@
 #endif
 };
 
+// Simple wrapper class for mutexes used in non-static context.
+class RecursiveLock : public StaticLock<true> {
+public:
+    using StaticLock::AutoLock;
+
+    RecursiveLock() = default;
+
+    ~RecursiveLock() {
+#ifdef _WIN32
+        ::DeleteCriticalSection(&mLock);
+#else
+        ::pthread_mutex_destroy(&mLock);
+#endif
+    }
+};
+
 class ReadWriteLock {
 public:
     using AutoWriteLock = android::base::guest::AutoWriteLock;
@@ -147,11 +233,12 @@
 // Helper class to lock / unlock a mutex automatically on scope
 // entry and exit.
 // NB: not thread-safe (as opposed to the Lock class)
+template <class Lockable>
 class AutoLock {
 public:
-    AutoLock(StaticLock& lock) : mLock(lock) { mLock.lock(); }
+    AutoLock(Lockable& lock) : mLock(lock) { mLock.lock(); }
 
-    AutoLock(AutoLock&& other) : mLock(other.mLock), mLocked(other.mLocked) {
+    AutoLock(AutoLock<Lockable>&& other) : mLock(other.mLock), mLocked(other.mLocked) {
         other.mLocked = false;
     }
 
@@ -176,7 +263,7 @@
     }
 
 private:
-    StaticLock& mLock;
+    Lockable& mLock;
     bool mLocked = true;
 
     friend class ConditionVariable;
diff --git a/fuchsia/fuchsia_stdio.cc b/fuchsia/fuchsia_stdio.cc
index 9f33454..acfb2ca 100644
--- a/fuchsia/fuchsia_stdio.cc
+++ b/fuchsia/fuchsia_stdio.cc
@@ -65,7 +65,7 @@
   assert(stream == stdout || stream == stderr);
   if (stream == stdout || stream == stderr)
   {
-    _FX_LOGVF(severity(stream), "goldfish", format, ap);
+    _FX_LOGVF(severity(stream), "goldfish", __FILE__, __LINE__,format, ap);
   }
   return 0;
 }
diff --git a/fuchsia/include/cutils/log.h b/fuchsia/include/cutils/log.h
index a2ddde8..26ecbfc 100644
--- a/fuchsia/include/cutils/log.h
+++ b/fuchsia/include/cutils/log.h
@@ -18,7 +18,7 @@
 };
 
 #define android_printLog(prio, tag, format, ...) \
-  __android_log_print(prio, tag, "[prio %d] " format, prio, ##__VA_ARGS__)
+  __android_log_print(prio, tag, __FILE__, __LINE__, "[prio %d] " format, prio, ##__VA_ARGS__)
 
 #define LOG_PRI(priority, tag, ...) android_printLog(priority, tag, __VA_ARGS__)
 #define ALOG(priority, tag, ...) LOG_PRI(ANDROID_##priority, tag, __VA_ARGS__)
@@ -26,8 +26,9 @@
 #define __android_second(dummy, second, ...) second
 #define __android_rest(first, ...) , ##__VA_ARGS__
 
-#define android_printAssert(condition, tag, format, ...)                \
-  __android_log_assert(condition, tag, "assert: condition: %s " format, condition, ##__VA_ARGS__)
+#define android_printAssert(condition, tag, format, ...)                                    \
+  __android_log_assert(condition, tag, __FILE__, __LINE__, "assert: condition: %s " format, \
+                       condition, ##__VA_ARGS__)
 
 #define LOG_ALWAYS_FATAL_IF(condition, ...)                              \
   ((condition)                                                           \
@@ -50,11 +51,11 @@
 
 extern "C" {
 
-int __android_log_print(int priority, const char* tag, const char* format, ...);
+int __android_log_print(int priority, const char* tag, const char* file, int line,
+                        const char* format, ...);
 
-[[noreturn]] void __android_log_assert(const char* condition, const char* tag,
-                                       const char* format, ...);
-
+[[noreturn]] void __android_log_assert(const char* condition, const char* tag, const char* file,
+                                       int line, const char* format, ...);
 }
 
 #endif
diff --git a/fuchsia/include/cutils/threads.h b/fuchsia/include/cutils/threads.h
index 8ea873f..fc332dd 100644
--- a/fuchsia/include/cutils/threads.h
+++ b/fuchsia/include/cutils/threads.h
@@ -3,24 +3,8 @@
 
 #include <pthread.h>
 
-typedef struct {
-    pthread_mutex_t   lock;
-    int               has_tls;
-    pthread_key_t     tls;
-} thread_store_t;
-
-#define THREAD_STORE_INITIALIZER  { PTHREAD_MUTEX_INITIALIZER, 0, 0 }
-
 extern "C" {
 
-typedef void (*thread_store_destruct_t)(void* value);
-
-void* thread_store_get(thread_store_t* store);
-
-void thread_store_set(thread_store_t* store,
-                      void* value,
-                      thread_store_destruct_t destroy);
-
 pid_t gettid();
 
 }
diff --git a/fuchsia/port.cc b/fuchsia/port.cc
index 37c7694..2ecb0ba 100644
--- a/fuchsia/port.cc
+++ b/fuchsia/port.cc
@@ -23,11 +23,8 @@
   return 0;
 }
 
-int __android_log_print(int priority, const char* tag, const char* format,
-                        ...) {
-  if (priority == ANDROID_LOG_VERBOSE || priority == ANDROID_LOG_DEBUG) {
-    return 1;
-  }
+int __android_log_print(int priority, const char* tag, const char* file,
+                        int line, const char* format, ...) {
   const char* local_tag = tag;
   if (!local_tag) {
     local_tag = "<NO_TAG>";
@@ -35,30 +32,36 @@
   va_list ap;
   va_start(ap, format);
   switch (priority) {
+    case ANDROID_LOG_VERBOSE:
+    case ANDROID_LOG_DEBUG:
+      FX_LOGVF(DEBUG, local_tag, file, line, format, ap);
+      break;
     case ANDROID_LOG_WARN:
-      FX_LOGVF(WARNING, local_tag, format, ap);
+      FX_LOGVF(WARNING, local_tag, file, line, format, ap);
       break;
     case ANDROID_LOG_ERROR:
+      FX_LOGVF(ERROR, local_tag, file, line, format, ap);
+      break;
     case ANDROID_LOG_FATAL:
-      FX_LOGVF(ERROR, local_tag, format, ap);
+      FX_LOGVF(FATAL, local_tag, file, line, format, ap);
       break;
     case ANDROID_LOG_INFO:
     default:
-      FX_LOGVF(INFO, local_tag, format, ap);
+      FX_LOGVF(INFO, local_tag, file, line, format, ap);
       break;
   }
   return 1;
 }
 
 void __android_log_assert(const char* condition, const char* tag,
-                          const char* format, ...) {
+                          const char* file, int line, const char* format, ...) {
   const char* local_tag = tag;
   if (!local_tag) {
     local_tag = "<NO_TAG>";
   }
   va_list ap;
   va_start(ap, format);
-  FX_LOGVF(ERROR, local_tag, format, ap);
+  FX_LOGVF(ERROR, local_tag, file, line, format, ap);
   va_end(ap);
 
   abort();
@@ -68,25 +71,6 @@
   return -1;
 }
 
-void* thread_store_get(thread_store_t* store) {
-  return store->has_tls ? pthread_getspecific(store->tls) : nullptr;
-}
-
-void thread_store_set(thread_store_t* store,
-                      void* value,
-                      thread_store_destruct_t destroy) {
-    pthread_mutex_lock(&store->lock);
-    if (!store->has_tls) {
-        if (pthread_key_create(&store->tls, destroy) != 0) {
-            pthread_mutex_unlock(&store->lock);
-            return;
-        }
-        store->has_tls = 1;
-    }
-    pthread_mutex_unlock(&store->lock);
-    pthread_setspecific(store->tls, value);
-}
-
 pid_t gettid() {
   static thread_local pid_t id = 0;
   if (!id) {
diff --git a/fuchsia/releasepackage.py b/fuchsia/releasepackage.py
index dda9432..550070d 100644
--- a/fuchsia/releasepackage.py
+++ b/fuchsia/releasepackage.py
@@ -41,11 +41,15 @@
 else:
   arch = "x64"
 
-target_name = "%s-shared/lib.unstripped/libvulkan_goldfish.so" % arch
+target_name = "%s-shared/libvulkan_goldfish.so" % arch
 git_repo_location = "%s/third_party/goldfish-opengl" % fuchsia_root
 package_dir = "libvulkan_goldfish/%s" % arch
 package_name = "fuchsia/lib/libvulkan/%s" % package_dir
 
+debug_target_name = "%s-shared/lib.unstripped/libvulkan_goldfish.so" % arch
+debug_dir = "libvulkan_goldfish/debug-symbols-%s" % arch
+debug_package_name = "fuchsia/lib/libvulkan/%s" % debug_dir
+
 repo_name = "goldfish-opengl"
 git_branch = subprocess.check_output([
     "git", "-C", git_repo_location, "rev-parse", "--abbrev-ref", "HEAD"
@@ -83,6 +87,7 @@
     print("Use --ignore-buildtype flag to upload anyway")
     sys.exit(1)
 
+# Prepare libvulkan_goldfish binaries
 file_name = "libvulkan_goldfish.so"
 full_name = os.path.join(package_dir, file_name)
 
@@ -97,6 +102,25 @@
   pass
 shutil.copyfile(source_file_name, full_name)
 
+# Prepare libvulkan_goldfish debug binaries
+debug_source_file_name = os.path.join(release_dir, debug_target_name)
+elf_info = re.search(r'Build ID: ([a-f0-9]*)',
+  subprocess.check_output(['readelf', '-n', debug_source_file_name]).strip())
+if not elf_info:
+  print("Fatal: Cannot find build ID in elf binary")
+  sys.exit(1)
+
+build_id = elf_info.group(1)
+debug_output_dir = os.path.join(debug_dir, build_id[:2])
+
+try:
+  shutil.rmtree(debug_dir)
+except:
+  pass
+os.makedirs(debug_output_dir)
+shutil.copyfile(debug_source_file_name, os.path.join(debug_output_dir, build_id[2:] + '.debug'))
+
+# Create libvulkan_goldfish CIPD package
 git_rev = subprocess.check_output(
     ["git", "-C", git_repo_location, "rev-parse", "HEAD"]).strip()
 
@@ -107,8 +131,23 @@
 if not args.dry_run:
   subprocess.check_call(cipd_command.split(" "))
 
+# Create libvulkan_goldfish/debug-symbols package
+cipd_command = ("%s cipd create -in %s -name %s -ref latest"
+                " -install-mode copy -tag git_revision:%s") % (
+                    fx_path, debug_dir, debug_package_name, git_rev)
+print cipd_command
+if not args.dry_run:
+  subprocess.check_call(cipd_command.split(" "))
+
+
 print ("""
   <package name="%s"
            version="git_revision:%s"
-           path="prebuild/third_party/%s"/>
+           path="prebuilt/third_party/%s"/>
 """ % (package_name, git_rev, package_dir))[1:-1]
+print ("""
+  <package name="%s"
+           version="git_revision:%s"
+           path="prebuilt/.build-id"
+           attributes="debug-symbols,debug-symbols-%s"/>
+""" % (debug_package_name, git_rev, arch))[1:-1]
diff --git a/host/include/libOpenglRender/IOStream.h b/host/include/libOpenglRender/IOStream.h
index 9173d31..824b510 100644
--- a/host/include/libOpenglRender/IOStream.h
+++ b/host/include/libOpenglRender/IOStream.h
@@ -17,6 +17,7 @@
 #define __IO_STREAM_H__
 
 #include <stdlib.h>
+#include <stdint.h>
 #include <stdio.h>
 
 #include "ErrorLog.h"
@@ -26,8 +27,22 @@
 
     IOStream(size_t bufSize) {
         m_iostreamBuf = NULL;
+        m_bufsizeOrig = bufSize;
         m_bufsize = bufSize;
         m_free = 0;
+        m_refcount = 1;
+    }
+
+    void incRef() {
+        __atomic_add_fetch(&m_refcount, 1, __ATOMIC_SEQ_CST);
+    }
+
+    bool decRef() {
+        if (0 == __atomic_sub_fetch(&m_refcount, 1, __ATOMIC_SEQ_CST)) {
+            delete this;
+            return true;
+        }
+        return false;
     }
 
     virtual size_t idealAllocSize(size_t len) {
@@ -40,6 +55,9 @@
     virtual const unsigned char *commitBufferAndReadFully(size_t size, void *buf, size_t len) = 0;
     virtual const unsigned char *read( void *buf, size_t *inout_len) = 0;
     virtual int writeFully(const void* buf, size_t len) = 0;
+    virtual int writeFullyAsync(const void* buf, size_t len) {
+        return writeFully(buf, len);
+    }
 
     virtual ~IOStream() {
 
@@ -100,10 +118,19 @@
     void uploadPixels(void* context, int width, int height, int depth, unsigned int format, unsigned int type, const void* pixels);
 
 
+protected:
+    void rewind() {
+        m_iostreamBuf = NULL;
+        m_bufsize = m_bufsizeOrig;
+        m_free = 0;
+    }
+
 private:
     unsigned char *m_iostreamBuf;
+    size_t m_bufsizeOrig;
     size_t m_bufsize;
     size_t m_free;
+    uint32_t m_refcount;
 };
 
 //
diff --git a/shared/GoldfishAddressSpace/Android.bp b/shared/GoldfishAddressSpace/Android.bp
index 6dcdb56..11e2450 100644
--- a/shared/GoldfishAddressSpace/Android.bp
+++ b/shared/GoldfishAddressSpace/Android.bp
@@ -1,3 +1,12 @@
+package {
+    // See: http://go/android-license-faq
+    // A large-scale-change added 'default_applicable_licenses' to import
+    // all of the 'license_kinds' from "device_generic_goldfish-opengl_license"
+    // to get the below license kinds:
+    //   SPDX-license-identifier-Apache-2.0
+    default_applicable_licenses: ["device_generic_goldfish-opengl_license"],
+}
+
 cc_library_static {
     name: "libGoldfishAddressSpace",
     vendor: true,
@@ -6,11 +15,13 @@
     ],
     shared_libs: [
         "liblog",
+        "libdrm",
     ],
     export_include_dirs: [
         "include",
     ],
     cflags: [
         "-DLOG_TAG=\"goldfish-address-space\"",
+        "-DVIRTIO_GPU",
     ],
 }
diff --git a/shared/GoldfishAddressSpace/CMakeLists.txt b/shared/GoldfishAddressSpace/CMakeLists.txt
index c89a436..b14bdfe 100644
--- a/shared/GoldfishAddressSpace/CMakeLists.txt
+++ b/shared/GoldfishAddressSpace/CMakeLists.txt
@@ -5,6 +5,6 @@
 set(GoldfishAddressSpace_host_src goldfish_address_space.cpp)
 android_add_library(TARGET GoldfishAddressSpace_host LICENSE Apache-2.0 SRC goldfish_address_space.cpp)
 target_include_directories(GoldfishAddressSpace_host PRIVATE ${GOLDFISH_DEVICE_ROOT}/shared/GoldfishAddressSpace/include ${GOLDFISH_DEVICE_ROOT}/./host/include/libOpenglRender ${GOLDFISH_DEVICE_ROOT}/./system/include ${GOLDFISH_DEVICE_ROOT}/./../../../external/qemu/android/android-emugl/guest)
-target_compile_definitions(GoldfishAddressSpace_host PRIVATE "-DWITH_GLES2" "-DPLATFORM_SDK_VERSION=29" "-DGOLDFISH_HIDL_GRALLOC" "-DEMULATOR_OPENGL_POST_O=1" "-DHOST_BUILD" "-DANDROID" "-DGL_GLEXT_PROTOTYPES" "-DPAGE_SIZE=4096" "-DGOLDFISH_VULKAN" "-DLOG_TAG=\"goldfish-address-space\"")
+target_compile_definitions(GoldfishAddressSpace_host PRIVATE "-DWITH_GLES2" "-DPLATFORM_SDK_VERSION=29" "-DGOLDFISH_HIDL_GRALLOC" "-DEMULATOR_OPENGL_POST_O=1" "-DHOST_BUILD" "-DANDROID" "-DGL_GLEXT_PROTOTYPES" "-DPAGE_SIZE=4096" "-DGFXSTREAM" "-DLOG_TAG=\"goldfish-address-space\"")
 target_compile_options(GoldfishAddressSpace_host PRIVATE "-fvisibility=default" "-Wno-unused-parameter")
 target_link_libraries(GoldfishAddressSpace_host PRIVATE log android-emu-shared)
\ No newline at end of file
diff --git a/shared/GoldfishAddressSpace/include/goldfish_address_space.h b/shared/GoldfishAddressSpace/include/goldfish_address_space.h
index 044e18a..1012629 100644
--- a/shared/GoldfishAddressSpace/include/goldfish_address_space.h
+++ b/shared/GoldfishAddressSpace/include/goldfish_address_space.h
@@ -19,7 +19,7 @@
 #include <stddef.h>
 
 #ifdef __Fuchsia__
-#include <fuchsia/hardware/goldfish/cpp/fidl.h>
+#include <fuchsia/hardware/goldfish/llcpp/fidl.h>
 #endif
 
 class GoldfishAddressSpaceBlock;
@@ -49,6 +49,7 @@
     Media = 1,
     HostMemoryAllocator = 5,
     SharedSlotsHostMemoryAllocator = 6,
+    VirtioGpuGraphics = 10,
 };
 
 class GoldfishAddressSpaceBlockProvider {
@@ -66,8 +67,12 @@
     static void closeHandle(address_space_handle_t handle);
 
 #ifdef __Fuchsia__
-    fuchsia::hardware::goldfish::AddressSpaceDeviceSyncPtr m_device;
-    fuchsia::hardware::goldfish::AddressSpaceChildDriverSyncPtr m_child_driver;
+    std::unique_ptr<
+        ::fidl::WireSyncClient<fuchsia_hardware_goldfish::AddressSpaceDevice>>
+        m_device;
+    std::unique_ptr<
+        ::fidl::WireSyncClient<fuchsia_hardware_goldfish::AddressSpaceChildDriver>>
+        m_child_driver;
 #else // __Fuchsia__
     address_space_handle_t m_handle;
 #endif // !__Fuchsia__
@@ -99,7 +104,8 @@
     GoldfishAddressSpaceBlock &operator=(const GoldfishAddressSpaceBlock &);
 
 #ifdef __Fuchsia__
-    fuchsia::hardware::goldfish::AddressSpaceChildDriverSyncPtr* m_driver;
+    ::fidl::WireSyncClient<fuchsia_hardware_goldfish::AddressSpaceChildDriver>*
+        m_driver;
     uint32_t  m_vmo;
 #else // __Fuchsia__
     address_space_handle_t m_handle;
@@ -135,7 +141,7 @@
 // require different lifetime expectations versus GoldfishAddressSpaceBlock).
 
 // We also expose the ping info struct that is shared between host and guest.
-struct goldfish_address_space_ping {
+struct address_space_ping {
     uint64_t offset;
     uint64_t size;
     uint64_t metadata;
@@ -164,6 +170,97 @@
 void goldfish_address_space_unmap(void* ptr, uint64_t size);
 
 bool goldfish_address_space_set_subdevice_type(address_space_handle_t, GoldfishAddressSpaceSubdeviceType type, address_space_handle_t*);
-bool goldfish_address_space_ping(address_space_handle_t, struct goldfish_address_space_ping*);
+bool goldfish_address_space_ping(address_space_handle_t, struct address_space_ping*);
+
+// virtio-gpu version
+
+struct address_space_virtgpu_hostmem_info {
+    uint32_t id;
+    uint32_t bo;
+    void* ptr;
+};
+
+struct address_space_virtgpu_info {
+    int fd;
+    uint32_t resp_bo;
+    uint32_t resp_resid;
+    void* resp_mapped_ptr;
+};
+
+address_space_handle_t virtgpu_address_space_open();
+void virtgpu_address_space_close(address_space_handle_t);
+
+// Ping with no response
+bool virtgpu_address_space_ping(address_space_handle_t, struct address_space_ping*);
+
+bool virtgpu_address_space_create_context_with_subdevice(
+    address_space_handle_t,
+    uint32_t subdevice_type,
+    struct address_space_virtgpu_info* info_out);
+
+bool virtgpu_address_space_allocate_hostmem(
+    address_space_handle_t fd,
+    size_t size,
+    uint64_t hostmem_id,
+    struct address_space_virtgpu_hostmem_info* hostmem_info_out);
+
+// Ping with response
+bool virtgpu_address_space_ping_with_response(
+    struct address_space_virtgpu_info* info,
+    struct address_space_ping* ping);
+
+// typedef/struct to abstract over goldfish vs virtio-gpu implementations
+typedef address_space_handle_t (*address_space_open_t)(void);
+typedef void (*address_space_close_t)(address_space_handle_t);
+
+typedef bool (*address_space_allocate_t)(
+    address_space_handle_t, size_t size, uint64_t* phys_addr, uint64_t* offset);
+typedef bool (*address_space_free_t)(
+    address_space_handle_t, uint64_t offset);
+
+typedef bool (*address_space_claim_shared_t)(
+    address_space_handle_t, uint64_t offset, uint64_t size);
+typedef bool (*address_space_unclaim_shared_t)(
+    address_space_handle_t, uint64_t offset);
+
+// pgoff is the offset into the page to return in the result
+typedef void* (*address_space_map_t)(
+    address_space_handle_t, uint64_t offset, uint64_t size, uint64_t pgoff);
+typedef void (*address_space_unmap_t)(void* ptr, uint64_t size);
+
+typedef bool (*address_space_set_subdevice_type_t)(
+    address_space_handle_t, GoldfishAddressSpaceSubdeviceType type, address_space_handle_t*);
+typedef bool (*address_space_ping_t)(
+    address_space_handle_t, struct address_space_ping*);
+
+// Specific to virtio-gpu
+typedef bool (*address_space_create_context_with_subdevice_t)(
+    address_space_handle_t,
+    uint32_t subdevice_type,
+    struct address_space_virtgpu_info* info_out);
+
+typedef bool (*address_space_allocate_hostmem_t)(
+    address_space_handle_t fd,
+    size_t size,
+    uint64_t hostmem_id,
+    struct address_space_virtgpu_hostmem_info* hostmem_info_out);
+
+typedef bool (*address_space_ping_with_response_t)(
+    struct address_space_virtgpu_info* info,
+    struct address_space_ping* ping);
+
+struct address_space_ops {
+    address_space_open_t open;
+    address_space_close_t close;
+    address_space_claim_shared_t claim_shared;
+    address_space_unclaim_shared_t unclaim_shared;
+    address_space_map_t map;
+    address_space_unmap_t unmap;
+    address_space_set_subdevice_type_t set_subdevice_type;
+    address_space_ping_t ping;
+    address_space_create_context_with_subdevice_t create_context_with_subdevice;
+    address_space_allocate_hostmem_t allocate_hostmem;
+    address_space_ping_with_response_t ping_with_response;
+};
 
 #endif  // #ifndef ANDROID_INCLUDE_HARDWARE_GOLDFISH_ADDRESS_SPACE_H
diff --git a/shared/GoldfishAddressSpace/include/goldfish_address_space_android.impl b/shared/GoldfishAddressSpace/include/goldfish_address_space_android.impl
index 8ff7e78..7c570cd 100644
--- a/shared/GoldfishAddressSpace/include/goldfish_address_space_android.impl
+++ b/shared/GoldfishAddressSpace/include/goldfish_address_space_android.impl
@@ -25,9 +25,15 @@
 #include <errno.h>
 #include <memory>
 
+#ifdef VIRTIO_GPU
+#include <drm/virtgpu_drm.h>
+#include <xf86drm.h>
+#endif
+
 #include <log/log.h>
 
 #include "goldfish_address_space.h"
+#include "virtio_gpu_next.h"
 
 namespace {
 
@@ -46,7 +52,7 @@
 #define GOLDFISH_ADDRESS_SPACE_IOCTL_OP(OP, T)		_IOWR(GOLDFISH_ADDRESS_SPACE_IOCTL_MAGIC, OP, T)
 #define GOLDFISH_ADDRESS_SPACE_IOCTL_ALLOCATE_BLOCK	GOLDFISH_ADDRESS_SPACE_IOCTL_OP(10, struct goldfish_address_space_allocate_block)
 #define GOLDFISH_ADDRESS_SPACE_IOCTL_DEALLOCATE_BLOCK	GOLDFISH_ADDRESS_SPACE_IOCTL_OP(11, __u64)
-#define GOLDFISH_ADDRESS_SPACE_IOCTL_PING		GOLDFISH_ADDRESS_SPACE_IOCTL_OP(12, struct goldfish_address_space_ping)
+#define GOLDFISH_ADDRESS_SPACE_IOCTL_PING		GOLDFISH_ADDRESS_SPACE_IOCTL_OP(12, struct address_space_ping)
 #define GOLDFISH_ADDRESS_SPACE_IOCTL_CLAIM_SHARED		GOLDFISH_ADDRESS_SPACE_IOCTL_OP(13, struct goldfish_address_space_claim_shared)
 #define GOLDFISH_ADDRESS_SPACE_IOCTL_UNCLAIM_SHARED		GOLDFISH_ADDRESS_SPACE_IOCTL_OP(14, __u64)
 
@@ -70,14 +76,14 @@
     return ::ioctl(fd, GOLDFISH_ADDRESS_SPACE_IOCTL_DEALLOCATE_BLOCK, &offset);
 }
 
-long ioctl_ping(int fd, struct goldfish_address_space_ping *request)
+long ioctl_ping(int fd, struct address_space_ping *request)
 {
     return ::ioctl(fd, GOLDFISH_ADDRESS_SPACE_IOCTL_PING, request);
 }
 
 long set_address_space_subdevice_type(int fd, uint64_t type)
 {
-    struct goldfish_address_space_ping request;
+    struct address_space_ping request;
     ::memset(&request, 0, sizeof(request));
     request.version = sizeof(request);
     request.metadata = type;
@@ -359,7 +365,7 @@
         return -ENODEV;
     }
 
-    struct goldfish_address_space_ping request;
+    struct address_space_ping request;
     if (m_useSharedSlots) {
         // shared memory slots are supported
         ::memset(&request, 0, sizeof(request));
@@ -415,7 +421,7 @@
     }
 
     if (block->guestPtr()) {
-        struct goldfish_address_space_ping request;
+        struct address_space_ping request;
         ::memset(&request, 0, sizeof(request));
         request.version = sizeof(request);
         request.offset = block->offset();
@@ -517,7 +523,7 @@
 bool goldfish_address_space_set_subdevice_type(
     address_space_handle_t handle, GoldfishAddressSpaceSubdeviceType type,
     address_space_handle_t* handle_out) {
-    struct goldfish_address_space_ping request;
+    struct address_space_ping request;
     request.metadata = (uint64_t)type;
     *handle_out = handle;
     return goldfish_address_space_ping(handle, &request);
@@ -525,7 +531,7 @@
 
 bool goldfish_address_space_ping(
     address_space_handle_t handle,
-    struct goldfish_address_space_ping* ping) {
+    struct address_space_ping* ping) {
     long res = ioctl_ping(handle, ping);
 
     if (res) {
@@ -535,3 +541,254 @@
 
     return true;
 }
+
+// virtio-gpu version
+address_space_handle_t virtgpu_address_space_open() {
+return drmOpenRender(128);
+}
+
+void virtgpu_address_space_close(address_space_handle_t fd) {
+close(fd);
+}
+
+// kVirtioGpuAddressSpaceContextCreateWithSubdevice | subdeviceType
+const uint32_t kVirtioGpuAddressSpaceContextCreateWithSubdevice = 0x1001;
+
+// kVirtioGpuAddressSpacePing | offset_lo | offset_hi | size_lo | size_hi | metadata_lo | metadata_hi | version | wait_fd | wait_flags | direction
+// no output
+const uint32_t kVirtioGpuAddressSpacePing = 0x1002;
+
+// kVirtioGpuAddressSpacePingWithResponse | resp_resid | offset_lo | offset_hi | metadata_lo | metadata_hi | version | wait_fd | wait_flags | direction
+// out: same as input then | out: error
+const uint32_t kVirtioGpuAddressSpacePingWithResponse = 0x1003;
+
+// Ping with no response
+bool virtgpu_address_space_ping(address_space_handle_t fd, struct address_space_ping* info) {
+
+    uint32_t words[] = {
+        kVirtioGpuAddressSpacePing,
+        (uint32_t)(info->offset), (uint32_t)(info->offset >> 32),
+        (uint32_t)(info->size), (uint32_t)(info->size >> 32),
+        (uint32_t)(info->metadata), (uint32_t)(info->metadata >> 32),
+        (uint32_t)(info->version), (uint32_t)(info->wait_fd),
+        (uint32_t)(info->wait_flags), (uint32_t)(info->direction),
+    };
+
+    drm_virtgpu_execbuffer execbuffer = {
+        .flags = 0,
+        .size = sizeof(words),
+        .command = (uint64_t)(uintptr_t)(words),
+        .bo_handles = 0,
+        .num_bo_handles = 0,
+        .fence_fd = -1,
+    };
+
+    int queue_work_err = drmIoctl(fd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &execbuffer);
+
+    if (queue_work_err) {
+        ALOGE("%s: failed with %d executing command buffer (%s)\n",  __func__,
+                queue_work_err, strerror(errno));
+        return false;
+    }
+
+    return true;
+}
+
+bool virtgpu_address_space_create_context_with_subdevice(
+    address_space_handle_t fd,
+    uint32_t subdevice_type,
+    struct address_space_virtgpu_info* info_out) {
+
+    // response page
+    drm_virtgpu_resource_create create = {
+        .target     = PIPE_BUFFER,
+        .format     = VIRGL_FORMAT_R8_UNORM,
+        .bind       = VIRGL_BIND_CUSTOM,
+        .width      = 4096,
+        .height     = 1U,
+        .depth      = 1U,
+        .array_size = 0U,
+        .size       = 4096,
+        .stride     = 4096,
+    };
+
+    int ret = drmIoctl(fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE, &create);
+    if (ret) {
+        ALOGE("%s: failed with %d allocating command buffer (%s)\n",
+                __func__, ret, strerror(errno));
+        return false;
+    }
+
+    drm_virtgpu_map map;
+    memset(&map, 0, sizeof(map));
+    map.handle = create.bo_handle;
+
+    ret = drmIoctl(fd, DRM_IOCTL_VIRTGPU_MAP, &map);
+    if (ret) {
+        ALOGE("%s: failed with %d mapping command response buffer (%s)\n",
+            __func__, ret, strerror(errno));
+        return false;
+    }
+
+    void* ptr = static_cast<unsigned char*>(
+            mmap64(nullptr, 4096, PROT_WRITE, MAP_SHARED, fd, map.offset));
+
+    if (ptr == MAP_FAILED) {
+        ALOGE("%s: failed with %d mmap'ing command response buffer (%s)\n",
+                __func__, errno, strerror(errno));
+        return false;
+    }
+
+    info_out->fd = fd;
+    info_out->resp_bo = create.bo_handle;
+    info_out->resp_resid = create.res_handle;
+    info_out->resp_mapped_ptr = ptr;
+
+    ALOGD("%s: resp bo: %u resid %u mapped %p\n", __func__,
+            create.bo_handle, create.res_handle, ptr);
+
+    // Context creation command
+    uint32_t words[] = {
+        kVirtioGpuAddressSpaceContextCreateWithSubdevice,
+        subdevice_type,
+    };
+
+    drm_virtgpu_execbuffer execbuffer = {
+        .flags = 0,
+        .size = sizeof(words),
+        .command = (uint64_t)(uintptr_t)(words),
+        .bo_handles = 0,
+        .num_bo_handles = 0,
+        .fence_fd = -1,
+    };
+
+    int queue_work_err = drmIoctl(fd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &execbuffer);
+
+    if (queue_work_err) {
+        ALOGE("%s: failed with %d executing command buffer (%s)\n",  __func__,
+                queue_work_err, strerror(errno));
+        return false;
+    }
+
+    return true;
+}
+
+bool virtgpu_address_space_allocate_hostmem(
+    address_space_handle_t fd,
+    size_t size,
+    uint64_t hostmem_id,
+    struct address_space_virtgpu_hostmem_info* hostmem_info_out) {
+
+    struct drm_virtgpu_resource_create_blob drm_rc_blob = {};
+    drm_rc_blob.blob_mem = VIRTGPU_BLOB_MEM_HOST;
+    drm_rc_blob.blob_flags = VIRTGPU_BLOB_FLAG_MAPPABLE;
+    drm_rc_blob.blob_id = hostmem_id;
+    drm_rc_blob.size = size;
+
+    int res = drmIoctl(
+            fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB, &drm_rc_blob);
+
+    if (res) {
+        ALOGE("%s: Failed to resource create v2: sterror: %s errno: %d\n", __func__,
+                strerror(errno), errno);
+        abort();
+    }
+
+    drm_virtgpu_map map;
+    memset(&map, 0, sizeof(map));
+    map.handle = drm_rc_blob.bo_handle;
+
+    res = drmIoctl(fd, DRM_IOCTL_VIRTGPU_MAP, &map);
+    if (res) {
+        ALOGE("%s: Failed to virtgpu map: sterror: %s errno: %d\n", __func__,
+                strerror(errno), errno);
+        abort();
+    }
+
+    void* directMappedAddr = mmap64(0, size, PROT_WRITE, MAP_SHARED, fd, map.offset);
+
+    if (!directMappedAddr) {
+        ALOGE("%s: mmap of virtio gpu resource failed\n", __func__);
+        abort();
+    }
+
+    hostmem_info_out->id = hostmem_id;
+    hostmem_info_out->bo = drm_rc_blob.bo_handle;
+    hostmem_info_out->ptr = directMappedAddr;
+    return true;
+}
+
+uint64_t buildu64(uint32_t lo, uint32_t hi) {
+    uint64_t res = (uint64_t)lo;
+    uint64_t hi64 = (uint64_t)hi;
+    return res | (hi64 << 32);
+}
+
+/* Used to retry DRM_IOCTL_VIRTGPU_WAIT, which can also return EBUSY. */
+#define TEMP_FAILURE_RETRY_BUSY(tag, exp) ({                                            \
+    __typeof__(exp) _rc;                                                                \
+    do {                                                                                \
+        uint32_t busy_times = 0;                                                        \
+        _rc = (exp);                                                                    \
+        if (errno == EBUSY) {                                                           \
+            ++busy_times;                                                               \
+            usleep(10000);                                                              \
+            ALOGE("%s:%s busy! waited %u times on EBUSY\n", __func__, tag, busy_times); \
+        }                                                                               \
+    } while (_rc != 0 && (errno == EINTR || errno == EBUSY));                           \
+    _rc; })
+
+// Ping with response
+bool virtgpu_address_space_ping_with_response(
+    struct address_space_virtgpu_info* info,
+    struct address_space_ping* ping) {
+
+    uint32_t words[] = {
+        kVirtioGpuAddressSpacePingWithResponse,
+        info->resp_resid,
+        (uint32_t)(ping->offset), (uint32_t)(ping->offset >> 32),
+        (uint32_t)(ping->size), (uint32_t)(ping->size >> 32),
+        (uint32_t)(ping->metadata), (uint32_t)(ping->metadata >> 32),
+        (uint32_t)(ping->version), (uint32_t)(ping->wait_fd),
+        (uint32_t)(ping->wait_flags), (uint32_t)(ping->direction),
+    };
+
+    drm_virtgpu_execbuffer execbuffer = {
+        .flags = 0,
+        .size = sizeof(words),
+        .command = (uint64_t)(uintptr_t)(words),
+        .bo_handles = (uint64_t)(uintptr_t)(&info->resp_bo),
+        .num_bo_handles = 1,
+        .fence_fd = -1,
+    };
+
+    int queue_work_err = drmIoctl(info->fd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &execbuffer);
+
+    if (queue_work_err) {
+        ALOGE("%s: failed with %d executing command buffer (%s)\n",  __func__,
+                queue_work_err, strerror(errno));
+        return false;
+    }
+
+    struct drm_virtgpu_3d_wait waitcmd;
+    memset(&waitcmd, 0, sizeof(waitcmd));
+    waitcmd.handle = info->resp_bo;
+
+    int ret = TEMP_FAILURE_RETRY_BUSY("DRM_IOCTL_VIRTGPU_WAIT", drmIoctl(info->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd));
+    if (ret) {
+        ALOGE("%s: DRM_IOCTL_VIRTGPU_WAIT failed with %d (%s)\n", __func__, errno, strerror(errno));
+        return false;
+    }
+
+    uint32_t* respWords = (uint32_t*)info->resp_mapped_ptr;
+
+    ping->offset = buildu64(respWords[0], respWords[1]);
+    ping->size = buildu64(respWords[2], respWords[3]);
+    ping->metadata = buildu64(respWords[4], respWords[5]);
+    ping->version = respWords[6];
+    ping->wait_fd = respWords[7];
+    ping->wait_flags = respWords[8];
+    ping->direction = respWords[9];
+
+    return true;
+}
diff --git a/shared/GoldfishAddressSpace/include/goldfish_address_space_fuchsia.impl b/shared/GoldfishAddressSpace/include/goldfish_address_space_fuchsia.impl
index e16bd80..54d7e35 100644
--- a/shared/GoldfishAddressSpace/include/goldfish_address_space_fuchsia.impl
+++ b/shared/GoldfishAddressSpace/include/goldfish_address_space_fuchsia.impl
@@ -32,13 +32,16 @@
 
 #include <unordered_map>
 
+#define GET_STATUS_SAFE(result, member) \
+    ((result).ok() ? ((result).Unwrap()->member) : ZX_OK)
+
 using android::base::guest::AutoLock;
 using android::base::guest::Lock;
 
-using fuchsia::hardware::goldfish::AddressSpaceDeviceSyncPtr;
-using fuchsia::hardware::goldfish::AddressSpaceChildDriverSyncPtr;
-using fuchsia::hardware::goldfish::AddressSpaceChildDriverType;
-using fuchsia::hardware::goldfish::AddressSpaceChildDriverPingMessage;
+using fuchsia_hardware_goldfish::AddressSpaceChildDriver;
+using fuchsia_hardware_goldfish::AddressSpaceDevice;
+using fuchsia_hardware_goldfish::wire::AddressSpaceChildDriverType;
+using fuchsia_hardware_goldfish::wire::AddressSpaceChildDriverPingMessage;
 
 GoldfishAddressSpaceBlockProvider::GoldfishAddressSpaceBlockProvider(GoldfishAddressSpaceSubdeviceType subdevice) {
 
@@ -47,23 +50,32 @@
         abort();
     }
 
-    zx::channel channel(GetConnectToServiceFunction()(GOLDFISH_ADDRESS_SPACE_DEVICE_NAME));
+    fidl::ClientEnd<AddressSpaceDevice> channel{
+        zx::channel(GetConnectToServiceFunction()(GOLDFISH_ADDRESS_SPACE_DEVICE_NAME))};
     if (!channel) {
         ALOGE("%s: failed to get service handle for " GOLDFISH_ADDRESS_SPACE_DEVICE_NAME,
               __FUNCTION__);
         return;
     }
-    m_device.Bind(std::move(channel));
+    m_device = std::make_unique<fidl::WireSyncClient<AddressSpaceDevice>>(std::move(channel));
 
-    zx_status_t status = (*m_device).OpenChildDriver(
-        static_cast<fuchsia::hardware::goldfish::AddressSpaceChildDriverType>(0 /* graphics */),
-        m_child_driver.NewRequest());
-
-    if (status != ZX_OK) {
-        ALOGE("%s: failed to open child driver: %d",
-              __FUNCTION__, status);
+    auto child_driver_ends =
+        fidl::CreateEndpoints<::fuchsia_hardware_goldfish::AddressSpaceChildDriver>();
+    if (!child_driver_ends.is_ok()) {
+        ALOGE("%s: zx_channel_create failed: %d", __FUNCTION__, child_driver_ends.status_value());
         return;
     }
+
+    auto result = m_device->OpenChildDriver(
+        static_cast<AddressSpaceChildDriverType>(0 /* graphics */),
+        std::move(child_driver_ends->server));
+    if (!result.ok()) {
+        ALOGE("%s: failed to open child driver: %d",
+              __FUNCTION__, result.status());
+        return;
+    }
+    m_child_driver = std::make_unique<fidl::WireSyncClient<AddressSpaceChildDriver>>(
+        std::move(child_driver_ends->client));
 }
 
 GoldfishAddressSpaceBlockProvider::~GoldfishAddressSpaceBlockProvider()
@@ -72,7 +84,7 @@
 
 bool GoldfishAddressSpaceBlockProvider::is_opened() const
 {
-    return m_device.is_bound();
+    return !!m_device;
 }
 
 // void GoldfishAddressSpaceBlockProvider::close() - not implemented
@@ -116,18 +128,17 @@
         return false;
     }
 
-    fuchsia::hardware::goldfish::AddressSpaceChildDriverSyncPtr* driver = &provider->m_child_driver;
+    fidl::WireSyncClient<AddressSpaceChildDriver>* driver = provider->m_child_driver.get();
 
-    int32_t res = ZX_OK;
-    zx::vmo vmo;
-    zx_status_t status = (*driver)->AllocateBlock(size, &res, &m_phys_addr, &vmo);
-    if (status != ZX_OK || res != ZX_OK) {
-        ALOGE("%s: allocate block failed: %d:%d", __func__, status, res);
+    auto result = driver->AllocateBlock(size);
+    if (!result.ok() || result.Unwrap()->res != ZX_OK) {
+        ALOGE("%s: allocate block failed: %d:%d", __func__, result.status(), GET_STATUS_SAFE(result, res));
         return false;
     }
+    m_phys_addr = result.Unwrap()->paddr;
+    m_vmo = result.Unwrap()->vmo.release();
 
     m_size = size;
-    m_vmo = vmo.release();
     m_offset = 0;
     m_is_shared_mapping = false;
 
@@ -166,6 +177,10 @@
         ::abort();
     }
 
+    bool nonzeroOffsetInPage = host_addr & (PAGE_SIZE - 1);
+    uint64_t extraBytes = nonzeroOffsetInPage ? PAGE_SIZE : 0;
+    m_size += extraBytes;
+
     zx_vaddr_t ptr = 0;
     zx_status_t status = zx_vmar_map(zx_vmar_root_self(),
                                      ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
@@ -180,11 +195,11 @@
               (unsigned long long)m_size,
               (unsigned long long)m_offset, status);
         return NULL;
-    } else {
-        m_mmaped_ptr = (void*)ptr;
-        m_host_addr = host_addr;
-        return guestPtr();
     }
+
+    m_mmaped_ptr = (void*)ptr;
+    m_host_addr = host_addr;
+    return guestPtr();
 }
 
 void *GoldfishAddressSpaceBlock::guestPtr() const
@@ -209,15 +224,16 @@
             ALOGE("%s: unsupported: GoldfishAddressSpaceBlock destroy() for shared regions\n", __func__);
             abort();
             // int32_t res = ZX_OK;
-            // zx_status_t status = (*m_driver)->UnclaimShared(m_offset, &res);
-            // if (status != ZX_OK || res != ZX_OK) {
-            //     ALOGE("%s: unclaim shared block failed: %d:%d", __func__, status, res);
+            // auto result = m_driver->UnclaimShared(m_offset);
+            // if (!result.ok() || result.Unwrap()->res != ZX_OK) {
+            //     ALOGE("%s: unclaim shared block failed: %d:%d", __func__,
+            //           result.status(), GET_STATUS_SAFE(result, res));
             // }
         } else {
-            int32_t res = ZX_OK;
-            zx_status_t status = (*m_driver)->DeallocateBlock(m_phys_addr, &res);
-            if (status != ZX_OK || res != ZX_OK) {
-                ALOGE("%s: deallocate block failed: %d:%d", __func__, status, res);
+            auto result = m_driver->DeallocateBlock(m_phys_addr);
+            if (!result.ok() || result.Unwrap()->res != ZX_OK) {
+                ALOGE("%s: deallocate block failed: %d:%d", __func__,
+                      result.status(), GET_STATUS_SAFE(result, res));
             }
         }
         m_driver = NULL;
@@ -285,22 +301,22 @@
 }
 
 address_space_handle_t goldfish_address_space_open() {
-    zx::channel channel(GetConnectToServiceFunction()(GOLDFISH_ADDRESS_SPACE_DEVICE_NAME));
+    fidl::ClientEnd<AddressSpaceDevice> channel{
+        zx::channel(GetConnectToServiceFunction()(GOLDFISH_ADDRESS_SPACE_DEVICE_NAME))};
     if (!channel) {
         ALOGE("%s: failed to get service handle for " GOLDFISH_ADDRESS_SPACE_DEVICE_NAME,
               __FUNCTION__);
         return 0;
     }
-    fuchsia::hardware::goldfish::AddressSpaceDeviceSyncPtr*
-        deviceSync = new fuchsia::hardware::goldfish::AddressSpaceDeviceSyncPtr;
-    deviceSync->Bind(std::move(channel));
+    fidl::WireSyncClient<AddressSpaceDevice>*
+        deviceSync = new fidl::WireSyncClient<AddressSpaceDevice>(std::move(channel));
     return (address_space_handle_t)deviceSync;
 }
 
 void goldfish_address_space_close(address_space_handle_t handle) {
-    fuchsia::hardware::goldfish::AddressSpaceDeviceSyncPtr* deviceSync =
+    fidl::WireSyncClient<AddressSpaceDevice>* deviceSync =
         reinterpret_cast<
-            fuchsia::hardware::goldfish::AddressSpaceDeviceSyncPtr*>(handle);
+            fidl::WireSyncClient<AddressSpaceDevice>*>(handle);
     delete deviceSync;
 }
 
@@ -308,16 +324,23 @@
     address_space_handle_t handle, GoldfishAddressSpaceSubdeviceType type,
     address_space_handle_t* handle_out) {
 
-    fuchsia::hardware::goldfish::AddressSpaceDeviceSyncPtr* deviceSync =
+    fidl::WireSyncClient<AddressSpaceDevice>* deviceSync =
         reinterpret_cast<
-            fuchsia::hardware::goldfish::AddressSpaceDeviceSyncPtr*>(handle);
+            fidl::WireSyncClient<AddressSpaceDevice>*>(handle);
 
-    fuchsia::hardware::goldfish::AddressSpaceChildDriverSyncPtr*
-        childSync = new fuchsia::hardware::goldfish::AddressSpaceChildDriverSyncPtr;
+    auto child_driver_ends =
+        fidl::CreateEndpoints<::fuchsia_hardware_goldfish::AddressSpaceChildDriver>();
+    if (!child_driver_ends.is_ok()) {
+        ALOGE("%s: zx_channel_create failed: %d", __FUNCTION__, child_driver_ends.status_value());
+        return false;
+    }
 
-    zx_status_t res = (*(*deviceSync)).OpenChildDriver(
-        static_cast<fuchsia::hardware::goldfish::AddressSpaceChildDriverType>(type),
-        (*childSync).NewRequest());
+    deviceSync->OpenChildDriver(
+        static_cast<AddressSpaceChildDriverType>(type),
+        std::move(child_driver_ends->server));
+
+    fidl::WireSyncClient<AddressSpaceChildDriver>*
+        childSync = new fidl::WireSyncClient<AddressSpaceChildDriver>(std::move(child_driver_ends->client));
 
     // On creating a subdevice, in our use cases we wont be needing the
     // original device sync anymore, so get rid of it.
@@ -331,17 +354,18 @@
 bool goldfish_address_space_allocate(
     address_space_handle_t handle,
     size_t size, uint64_t* phys_addr, uint64_t* offset) {
-    fuchsia::hardware::goldfish::AddressSpaceChildDriverSyncPtr* deviceSync =
+    fidl::WireSyncClient<AddressSpaceChildDriver>* deviceSync =
         reinterpret_cast<
-            fuchsia::hardware::goldfish::AddressSpaceChildDriverSyncPtr*>(handle);
+            fidl::WireSyncClient<AddressSpaceChildDriver>*>(handle);
 
-    int32_t res = ZX_OK;
     zx::vmo vmo;
-    zx_status_t status = (*(*deviceSync)).AllocateBlock(size, &res, phys_addr, &vmo);
-    if (status != ZX_OK || res != ZX_OK) {
-        ALOGE("%s: allocate block failed: %d:%d", __func__, status, res);
+    auto result = deviceSync->AllocateBlock(size);
+    if (!result.ok() || result.Unwrap()->res != ZX_OK) {
+        ALOGE("%s: allocate block failed: %d:%d", __func__, result.status(), GET_STATUS_SAFE(result, res));
         return false;
     }
+    *phys_addr = result.Unwrap()->paddr;
+    vmo = std::move(result.Unwrap()->vmo);
 
     *offset = 0;
 
@@ -360,14 +384,13 @@
     if (info.vmo == ZX_HANDLE_INVALID) return false;
     zx_handle_close(info.vmo);
 
-    fuchsia::hardware::goldfish::AddressSpaceChildDriverSyncPtr* deviceSync =
+    fidl::WireSyncClient<AddressSpaceChildDriver>* deviceSync =
         reinterpret_cast<
-            fuchsia::hardware::goldfish::AddressSpaceChildDriverSyncPtr*>(handle);
+            fidl::WireSyncClient<AddressSpaceChildDriver>*>(handle);
 
-    int32_t res = ZX_OK;
-    zx_status_t status = (*(*deviceSync)).DeallocateBlock(info.phys_addr, &res);
-    if (status != ZX_OK || res != ZX_OK) {
-        ALOGE("%s: deallocate block failed: %d:%d", __func__, status, res);
+    auto result = deviceSync->DeallocateBlock(info.phys_addr);
+    if (!result.ok() || result.Unwrap()->res != ZX_OK) {
+        ALOGE("%s: deallocate block failed: %d:%d", __func__, result.status(), GET_STATUS_SAFE(result, res));
         return false;
     }
 
@@ -377,12 +400,17 @@
 bool goldfish_address_space_claim_shared(
     address_space_handle_t handle, uint64_t offset, uint64_t size) {
 
-    fuchsia::hardware::goldfish::AddressSpaceChildDriverSyncPtr* deviceSync =
+    fidl::WireSyncClient<AddressSpaceChildDriver>* deviceSync =
         reinterpret_cast<
-            fuchsia::hardware::goldfish::AddressSpaceChildDriverSyncPtr*>(handle);
+            fidl::WireSyncClient<AddressSpaceChildDriver>*>(handle);
+
     zx::vmo vmo;
-    zx_status_t res;
-    zx_status_t status = (*(*deviceSync)).ClaimSharedBlock(offset, size, &res, &vmo);
+    auto result = deviceSync->ClaimSharedBlock(offset, size);
+    if (!result.ok() || result.Unwrap()->res != ZX_OK) {
+        ALOGE("%s: claim shared failed: %d:%d", __func__, result.status(), GET_STATUS_SAFE(result, res));
+        return false;
+    }
+    vmo = std::move(result.Unwrap()->vmo);
 
     VmoStore::Info info = {
         vmo.release(),
@@ -390,21 +418,20 @@
 
     getVmoStore()->add(offset, info);
 
-    if (status != ZX_OK) return false;
-
     return true;
 }
 
 bool goldfish_address_space_unclaim_shared(
     address_space_handle_t handle, uint64_t offset) {
-    fuchsia::hardware::goldfish::AddressSpaceChildDriverSyncPtr* deviceSync =
+    fidl::WireSyncClient<AddressSpaceChildDriver>* deviceSync =
         reinterpret_cast<
-            fuchsia::hardware::goldfish::AddressSpaceChildDriverSyncPtr*>(handle);
-    zx::vmo vmo;
-    zx_status_t res;
-    zx_status_t status = (*(*deviceSync)).UnclaimSharedBlock(offset, &res);
+            fidl::WireSyncClient<AddressSpaceChildDriver>*>(handle);
 
-    if (status != ZX_OK) return false;
+    auto result = deviceSync->UnclaimSharedBlock(offset);
+    if (!result.ok() || result.Unwrap()->res != ZX_OK) {
+        ALOGE("%s: unclaim shared failed: %d:%d", __func__, result.status(), GET_STATUS_SAFE(result, res));
+        return false;
+    }
 
     getVmoStore()->remove(offset);
     return true;
@@ -437,23 +464,22 @@
 
 bool goldfish_address_space_ping(
     address_space_handle_t handle,
-    struct goldfish_address_space_ping* ping) {
+    struct address_space_ping* ping) {
 
     AddressSpaceChildDriverPingMessage fuchsiaPing =
         *(AddressSpaceChildDriverPingMessage*)ping;
 
-    AddressSpaceChildDriverSyncPtr* deviceSync =
+    fidl::WireSyncClient<AddressSpaceChildDriver>* deviceSync =
         reinterpret_cast<
-            AddressSpaceChildDriverSyncPtr*>(handle);
+            fidl::WireSyncClient<AddressSpaceChildDriver>*>(handle);
 
     AddressSpaceChildDriverPingMessage res;
-    zx_status_t pingStatus;
-    zx_status_t status = (*(*deviceSync)).Ping(fuchsiaPing, &pingStatus, &res);
-
-    if (pingStatus != ZX_OK) {
+    auto result = deviceSync->Ping(fuchsiaPing);
+    if (!result.ok() || result.Unwrap()->res != ZX_OK) {
         return false;
     }
+    res = std::move(result.Unwrap()->ping);
 
-    *ping = *(struct goldfish_address_space_ping*)(&res);
+    *ping = *(struct address_space_ping*)(&res);
     return true;
 }
diff --git a/shared/GoldfishAddressSpace/include/goldfish_address_space_host.impl b/shared/GoldfishAddressSpace/include/goldfish_address_space_host.impl
index 8995c6b..d1fa4a3 100644
--- a/shared/GoldfishAddressSpace/include/goldfish_address_space_host.impl
+++ b/shared/GoldfishAddressSpace/include/goldfish_address_space_host.impl
@@ -377,7 +377,7 @@
 bool goldfish_address_space_set_subdevice_type(
     address_space_handle_t handle, GoldfishAddressSpaceSubdeviceType type,
     address_space_handle_t* handle_out) {
-    struct goldfish_address_space_ping request;
+    struct address_space_ping request;
     request.metadata = (uint64_t)type;
     *handle_out = handle;
     return goldfish_address_space_ping(handle, &request);
@@ -385,7 +385,7 @@
 
 bool goldfish_address_space_ping(
     address_space_handle_t handle,
-    struct goldfish_address_space_ping* ping) {
+    struct address_space_ping* ping) {
 
     AddressSpaceDevicePingInfo* asHostPingInfo =
         reinterpret_cast<AddressSpaceDevicePingInfo*>(ping);
diff --git a/shared/GoldfishAddressSpace/include/virtio_gpu_next.h b/shared/GoldfishAddressSpace/include/virtio_gpu_next.h
new file mode 100644
index 0000000..cf674a9
--- /dev/null
+++ b/shared/GoldfishAddressSpace/include/virtio_gpu_next.h
@@ -0,0 +1,67 @@
+// Copyright (C) 2020 The Android Open Source Project
+// Copyright (C) 2020 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#pragma once
+
+#ifndef HOST_BUILD
+#include "drm.h"
+#endif
+
+#define PIPE_BUFFER             0
+#define VIRGL_FORMAT_R8_UNORM   64
+#define VIRGL_BIND_CUSTOM       (1 << 17)
+
+#define DRM_VIRTGPU_RESOURCE_CREATE_BLOB 0x0a
+
+#define VIRTGPU_PARAM_RESOURCE_BLOB 3 /* DRM_VIRTGPU_RESOURCE_CREATE_BLOB */
+#define VIRTGPU_PARAM_HOST_VISIBLE 4
+
+#ifndef VIRTGPU_BLOB_MEM_HOST3D
+struct drm_virtgpu_resource_create_blob {
+#define VIRTGPU_BLOB_MEM_GUEST              0x0001
+#define VIRTGPU_BLOB_MEM_HOST               0x0002
+#define VIRTGPU_BLOB_MEM_HOST_GUEST         0x0003
+
+#define VIRTGPU_BLOB_FLAG_MAPPABLE          0x0001
+#define VIRTGPU_BLOB_FLAG_SHAREABLE         0x0002
+#define VIRTGPU_BLOB_FLAG_CROSS_DEVICE      0x0004
+	/* zero is invalid blob_mem */
+    uint32_t blob_mem;
+    uint32_t blob_flags;
+    uint32_t bo_handle;
+    uint32_t res_handle;
+    uint64_t size;
+
+	/*
+	 * for 3D contexts with VIRTGPU_BLOB_MEM_HOSTGUEST and
+	 * VIRTGPU_BLOB_MEM_HOST otherwise, must be zero.
+	 */
+	uint32_t pad;
+    uint32_t cmd_size;
+    uint64_t cmd;
+    uint64_t blob_id;
+};
+#else
+#define VIRTGPU_BLOB_MEM_HOST               VIRTGPU_BLOB_MEM_HOST3D
+#define VIRTGPU_BLOB_MEM_HOST_GUEST         VIRTGPU_BLOB_MEM_HOST3D_GUEST
+
+#define VIRTGPU_BLOB_FLAG_MAPPABLE          VIRTGPU_BLOB_FLAG_USE_MAPPABLE
+#define VIRTGPU_BLOB_FLAG_SHAREABLE         VIRTGPU_BLOB_FLAG_USE_SHAREABLE
+#define VIRTGPU_BLOB_FLAG_CROSS_DEVICE      VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE
+#endif
+
+
+#define DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB              \
+        DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_CREATE_BLOB,   \
+                        struct drm_virtgpu_resource_create_blob)
diff --git a/shared/OpenglCodecCommon/Android.mk b/shared/OpenglCodecCommon/Android.mk
index d9970d5..46d3fda 100644
--- a/shared/OpenglCodecCommon/Android.mk
+++ b/shared/OpenglCodecCommon/Android.mk
@@ -13,6 +13,7 @@
         SocketStream.cpp \
         TcpStream.cpp \
         auto_goldfish_dma_context.cpp \
+        etc.cpp \
 
 ifeq (true,$(GOLDFISH_OPENGL_BUILD_FOR_HOST))
 
@@ -40,10 +41,16 @@
 LOCAL_CFLAGS += -DLOG_TAG=\"eglCodecCommon\"
 LOCAL_CFLAGS += -Wno-unused-private-field
 
-$(call emugl-export,SHARED_LIBRARIES,libcutils libutils liblog)
 
 ifeq (true,$(GOLDFISH_OPENGL_BUILD_FOR_HOST))
-$(call emugl-export,SHARED_LIBRARIES,android-emu-shared)
+$(call emugl-import,libandroidemu)
+$(call emugl-export,SHARED_LIBRARIES,libcutils libutils liblog)
+else
+ifeq (true,$(GFXSTREAM))
+$(call emugl-export,SHARED_LIBRARIES,libcutils libutils liblog libandroidemu)
+else
+$(call emugl-export,SHARED_LIBRARIES,libcutils libutils liblog)
+endif
 endif
 
 $(call emugl-export,C_INCLUDES,$(LOCAL_PATH))
diff --git a/shared/OpenglCodecCommon/CMakeLists.txt b/shared/OpenglCodecCommon/CMakeLists.txt
index 6a1ac4a..545525f 100644
--- a/shared/OpenglCodecCommon/CMakeLists.txt
+++ b/shared/OpenglCodecCommon/CMakeLists.txt
@@ -1,10 +1,10 @@
 # This is an autogenerated file! Do not edit!
 # instead run make from .../device/generic/goldfish-opengl
 # which will re-generate this file.
-android_validate_sha256("${GOLDFISH_DEVICE_ROOT}/shared/OpenglCodecCommon/Android.mk" "87f5c2c694647e3a38b4da1b4c653b99cd27d934b60ecd43aff59d3765daabfd")
-set(OpenglCodecCommon_host_src GLClientState.cpp GLESTextureUtils.cpp ChecksumCalculator.cpp GLSharedGroup.cpp glUtils.cpp IndexRangeCache.cpp SocketStream.cpp TcpStream.cpp auto_goldfish_dma_context.cpp goldfish_dma_host.cpp)
-android_add_library(TARGET OpenglCodecCommon_host SHARED LICENSE Apache-2.0 SRC GLClientState.cpp GLESTextureUtils.cpp ChecksumCalculator.cpp GLSharedGroup.cpp glUtils.cpp IndexRangeCache.cpp SocketStream.cpp TcpStream.cpp auto_goldfish_dma_context.cpp goldfish_dma_host.cpp)
-target_include_directories(OpenglCodecCommon_host PRIVATE ${GOLDFISH_DEVICE_ROOT}/shared/OpenglCodecCommon ${GOLDFISH_DEVICE_ROOT}/shared/qemupipe/include-types ${GOLDFISH_DEVICE_ROOT}/shared/qemupipe/include ${GOLDFISH_DEVICE_ROOT}/./host/include/libOpenglRender ${GOLDFISH_DEVICE_ROOT}/./system/include ${GOLDFISH_DEVICE_ROOT}/./../../../external/qemu/android/android-emugl/guest)
-target_compile_definitions(OpenglCodecCommon_host PRIVATE "-DWITH_GLES2" "-DPLATFORM_SDK_VERSION=29" "-DGOLDFISH_HIDL_GRALLOC" "-DEMULATOR_OPENGL_POST_O=1" "-DHOST_BUILD" "-DANDROID" "-DGL_GLEXT_PROTOTYPES" "-DPAGE_SIZE=4096" "-DGOLDFISH_VULKAN" "-DLOG_TAG=\"eglCodecCommon\"")
+android_validate_sha256("${GOLDFISH_DEVICE_ROOT}/shared/OpenglCodecCommon/Android.mk" "a9f19fd167bcc8bf102e288cccc0c4a780fa6671ff52f1f94c3b5b26d7482728")
+set(OpenglCodecCommon_host_src GLClientState.cpp GLESTextureUtils.cpp ChecksumCalculator.cpp GLSharedGroup.cpp glUtils.cpp IndexRangeCache.cpp SocketStream.cpp TcpStream.cpp auto_goldfish_dma_context.cpp etc.cpp goldfish_dma_host.cpp)
+android_add_library(TARGET OpenglCodecCommon_host SHARED LICENSE Apache-2.0 SRC GLClientState.cpp GLESTextureUtils.cpp ChecksumCalculator.cpp GLSharedGroup.cpp glUtils.cpp IndexRangeCache.cpp SocketStream.cpp TcpStream.cpp auto_goldfish_dma_context.cpp etc.cpp goldfish_dma_host.cpp)
+target_include_directories(OpenglCodecCommon_host PRIVATE ${GOLDFISH_DEVICE_ROOT}/shared/OpenglCodecCommon ${GOLDFISH_DEVICE_ROOT}/android-emu ${GOLDFISH_DEVICE_ROOT}/shared/qemupipe/include-types ${GOLDFISH_DEVICE_ROOT}/shared/qemupipe/include ${GOLDFISH_DEVICE_ROOT}/./host/include/libOpenglRender ${GOLDFISH_DEVICE_ROOT}/./system/include ${GOLDFISH_DEVICE_ROOT}/./../../../external/qemu/android/android-emugl/guest)
+target_compile_definitions(OpenglCodecCommon_host PRIVATE "-DWITH_GLES2" "-DPLATFORM_SDK_VERSION=29" "-DGOLDFISH_HIDL_GRALLOC" "-DEMULATOR_OPENGL_POST_O=1" "-DHOST_BUILD" "-DANDROID" "-DGL_GLEXT_PROTOTYPES" "-DPAGE_SIZE=4096" "-DGFXSTREAM" "-DLOG_TAG=\"eglCodecCommon\"")
 target_compile_options(OpenglCodecCommon_host PRIVATE "-fvisibility=default" "-Wno-unused-parameter" "-Wno-unused-private-field")
-target_link_libraries(OpenglCodecCommon_host PRIVATE android-emu-shared cutils utils log PRIVATE qemupipe_host)
\ No newline at end of file
+target_link_libraries(OpenglCodecCommon_host PRIVATE cutils utils log androidemu android-emu-shared PRIVATE qemupipe_host)
\ No newline at end of file
diff --git a/shared/OpenglCodecCommon/GLClientState.cpp b/shared/OpenglCodecCommon/GLClientState.cpp
index a31f697..0915341 100644
--- a/shared/OpenglCodecCommon/GLClientState.cpp
+++ b/shared/OpenglCodecCommon/GLClientState.cpp
@@ -32,12 +32,30 @@
 #endif
 
 // Don't include these in the .h file, or we get weird compile errors.
+#include <GLES2/gl2ext.h>
 #include <GLES3/gl3.h>
 #include <GLES3/gl31.h>
 
 void GLClientState::init() {
     m_initialized = false;
-    m_nLocations = CODEC_MAX_VERTEX_ATTRIBUTES;
+
+    state_GL_STENCIL_TEST = false;
+    state_GL_STENCIL_FUNC = GL_ALWAYS;
+    state_GL_STENCIL_VALUE_MASK = ~(0);
+    state_GL_STENCIL_REF = 0;
+    state_GL_STENCIL_FAIL = GL_KEEP;
+    state_GL_STENCIL_PASS_DEPTH_FAIL = GL_KEEP;
+    state_GL_STENCIL_PASS_DEPTH_PASS = GL_KEEP;
+    state_GL_STENCIL_BACK_FUNC = GL_ALWAYS;
+    state_GL_STENCIL_BACK_VALUE_MASK = ~(0);
+    state_GL_STENCIL_BACK_REF = 0;
+    state_GL_STENCIL_BACK_FAIL = GL_KEEP;
+    state_GL_STENCIL_BACK_PASS_DEPTH_FAIL = GL_KEEP;
+    state_GL_STENCIL_BACK_PASS_DEPTH_PASS = GL_KEEP;
+    state_GL_STENCIL_WRITEMASK = ~(0);
+    state_GL_STENCIL_BACK_WRITEMASK = ~(0);
+    state_GL_STENCIL_CLEAR_VALUE = 0;
+
 
     m_arrayBuffer = 0;
     m_arrayBuffer_lastEncode = 0;
@@ -48,7 +66,6 @@
     m_vaoAttribBindingHasVboCache = 0;
     m_noClientArraysCache = 0;
 
-    m_max_vertex_attrib_bindings = m_nLocations;
     addVertexArrayObject(0);
     setVertexArrayObject(0);
     // init gl constans;
@@ -78,13 +95,9 @@
     m_drawIndirectBuffer = 0;
     m_shaderStorageBuffer = 0;
 
-    m_transformFeedbackActiveUnpaused = false;
-
-    // to be modified later when these are queried from host.
-    m_max_transform_feedback_separate_attribs = 0;
-    m_max_uniform_buffer_bindings = 0;
-    m_max_atomic_counter_buffer_bindings = 0;
-    m_max_shader_storage_buffer_bindings = 0;
+    m_transformFeedbackActive = false;
+    m_transformFeedbackUnpaused = false;
+    m_transformFeedbackVaryingsCountForLinking = 0;
 
     m_activeTexture = 0;
     m_currentProgram = 0;
@@ -108,14 +121,28 @@
     m_tex.textureRecs = NULL;
 
     mRboState.boundRenderbuffer = 0;
-    mRboState.boundRenderbufferIndex = 0;
 
     mFboState.boundDrawFramebuffer = 0;
     mFboState.boundReadFramebuffer = 0;
     mFboState.drawFboCheckStatus = GL_NONE;
     mFboState.readFboCheckStatus = GL_NONE;
 
-    m_maxVertexAttribsDirty = true;
+    m_extensions_set = false;
+
+#ifdef GFXSTREAM
+    // The default transform feedback buffer object
+    // The default sampler object
+    GLuint defaultId = 0;
+    setExistence(ObjectType::TransformFeedback, true, 1, &defaultId);
+
+    mBoundTransformFeedbackValidity.id = 0;
+    mBoundTransformFeedbackValidity.valid = true;
+
+    // query must take id that was created via glGenQueries
+    mBoundQueryValidity_AnySamplesPassed.valid = false;
+    mBoundQueryValidity_AnySamplesPassedConservative.valid = false;
+    mBoundQueryValidity_TransformFeedbackPrimitivesWritten.valid = false;
+#endif
 }
 
 GLClientState::GLClientState()
@@ -225,10 +252,10 @@
     m_vaoMap.insert(
             VAOStateMap::value_type(
                 name,
-                VAOState(0, m_nLocations, std::max(m_nLocations, m_max_vertex_attrib_bindings))));
+                VAOState(0, CODEC_MAX_VERTEX_ATTRIBUTES, CODEC_MAX_VERTEX_ATTRIBUTES)));
     VertexAttribStateVector& attribState =
         m_vaoMap.find(name)->second.attribState;
-    for (int i = 0; i < m_nLocations; i++) {
+    for (int i = 0; i < CODEC_MAX_VERTEX_ATTRIBUTES; i++) {
         attribState[i].enabled = 0;
         attribState[i].enableDirty = false;
         attribState[i].data = 0;
@@ -421,6 +448,222 @@
     }
 }
 
+#ifdef GFXSTREAM
+
+void GLClientState::addBuffer(GLuint id) {
+    mBufferIds.add(id);
+    mBufferIds.set(id, true);
+    mHostMappedBufferDirty.add(id);
+}
+
+void GLClientState::removeBuffer(GLuint id) {
+    mHostMappedBufferDirty.remove(id);
+    mBufferIds.remove(id);
+}
+
+bool GLClientState::bufferIdExists(GLuint id) const {
+    return mBufferIds.get(id);
+}
+
+void GLClientState::setBufferHostMapDirty(GLuint id, bool dirty) {
+    mHostMappedBufferDirty.set(id, dirty);
+}
+
+bool GLClientState::isBufferHostMapDirty(GLuint id) const {
+    return mHostMappedBufferDirty.get(id);
+}
+
+void GLClientState::setExistence(ObjectType type, bool exists, GLsizei count, const GLuint* ids) {
+    if (type == ObjectType::Sampler) {
+        SamplerInfo::ScopedView view(mSamplerInfo);
+        if (exists) {
+            for (GLsizei i = 0; i < count; ++i) {
+                view.addFresh(ids[i]);
+            }
+        } else {
+            for (GLsizei i = 0; i < count; ++i) {
+                view.unref(ids[i]);
+            }
+        }
+    } else {
+        ExistenceMap* existenceMap = &mBufferIds;
+
+        switch (type) {
+            case ObjectType::Buffer:
+                existenceMap = &mBufferIds;
+                break;
+            case ObjectType::TransformFeedback:
+                existenceMap = &mTransformFeedbackIds;
+                break;
+            case ObjectType::Query:
+                existenceMap = &mQueryIds;
+                for (GLsizei i = 0; i < count; ++i) {
+                    // reset the last query target
+                    mLastQueryTargets.add(ids[i], 0);
+                }
+                break;
+            case ObjectType::Sampler:
+            default:
+                ALOGE("%s: Unreachable code\n", __func__);
+                abort();
+        }
+
+        if (exists) {
+            for (GLsizei i = 0; i < count; ++i) {
+                existenceMap->add(ids[i]);
+                existenceMap->set(ids[i], true);
+            }
+        } else {
+            for (GLsizei i = 0; i < count; ++i) {
+                existenceMap->remove(ids[i]);
+            }
+        }
+    }
+}
+
+bool GLClientState::queryExistence(ObjectType type, GLuint id) const {
+    switch (type) {
+        case ObjectType::Buffer:
+            return mBufferIds.get(id);
+        case ObjectType::TransformFeedback:
+            return mTransformFeedbackIds.get(id);
+        case ObjectType::Sampler:
+            return samplerExists(id);
+        case ObjectType::Query:
+            return mQueryIds.get(id);
+        default:
+            ALOGD("%s: unknown object type: 0x%x\n", __func__, type);
+            abort();
+    }
+}
+
+bool GLClientState::samplerExists(GLuint id) const {
+    if (!id) return true;
+    SamplerInfo::ScopedView view(mSamplerInfo);
+    return view.samplerExists(id);
+}
+
+bool GLClientState::tryBind(GLenum target, GLuint id) {
+    if (0 == id) { // unbind operation
+        switch (target) {
+            case GL_TRANSFORM_FEEDBACK:
+                mBoundTransformFeedbackValidity.id = 0;
+                mBoundTransformFeedbackValidity.valid = true;
+                break;
+            case GL_ANY_SAMPLES_PASSED:
+                mBoundQueryValidity_AnySamplesPassed.id = 0;
+                mBoundQueryValidity_AnySamplesPassed.valid = false;
+                break;
+            case GL_ANY_SAMPLES_PASSED_CONSERVATIVE:
+                mBoundQueryValidity_AnySamplesPassedConservative.id = 0;
+                mBoundQueryValidity_AnySamplesPassedConservative.valid = false;
+                break;
+            case GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN:
+                mBoundQueryValidity_TransformFeedbackPrimitivesWritten.id = 0;
+                mBoundQueryValidity_TransformFeedbackPrimitivesWritten.valid = false;
+                break;
+            default:
+                ALOGE("%s: target 0x%x not yet supported in new state tracking model\n", __func__, target);
+                abort();
+        }
+        return true;
+    }
+
+    switch (target) {
+        case GL_TRANSFORM_FEEDBACK:
+            if (!queryExistence(ObjectType::TransformFeedback, id)) return false;
+            break;
+        case GL_ANY_SAMPLES_PASSED:
+        case GL_ANY_SAMPLES_PASSED_CONSERVATIVE:
+        case GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN:
+            if (!queryExistence(ObjectType::Query, id)) {
+                return false;
+            }
+            break;
+        default:
+            ALOGE("%s: target 0x%x not yet supported in new state tracking model\n", __func__, target);
+            abort();
+    }
+
+    // valid bind
+    switch (target) {
+    case GL_TRANSFORM_FEEDBACK:
+        mBoundTransformFeedbackValidity.id = id;
+        mBoundTransformFeedbackValidity.valid = true;
+        break;
+    case GL_ANY_SAMPLES_PASSED:
+        mBoundQueryValidity_AnySamplesPassed.id = id;
+        mBoundQueryValidity_AnySamplesPassed.valid = true;
+        break;
+    case GL_ANY_SAMPLES_PASSED_CONSERVATIVE:
+        mBoundQueryValidity_AnySamplesPassedConservative.id = id;
+        mBoundQueryValidity_AnySamplesPassedConservative.valid = true;
+        break;
+    case GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN:
+        mBoundQueryValidity_TransformFeedbackPrimitivesWritten.id = id;
+        mBoundQueryValidity_TransformFeedbackPrimitivesWritten.valid = true;
+        break;
+    default:
+        ALOGE("%s: target 0x%x not yet supported in new state tracking model\n", __func__, target);
+        abort();
+    }
+    return true;
+}
+
+bool GLClientState::isBoundTargetValid(GLenum target) {
+    switch (target) {
+    case GL_TRANSFORM_FEEDBACK:
+        return mBoundTransformFeedbackValidity.valid;
+    case GL_ANY_SAMPLES_PASSED:
+        return mBoundQueryValidity_AnySamplesPassed.valid;
+    case GL_ANY_SAMPLES_PASSED_CONSERVATIVE:
+        return mBoundQueryValidity_AnySamplesPassedConservative.valid;
+    case GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN:
+        return mBoundQueryValidity_TransformFeedbackPrimitivesWritten.valid;
+    default:
+        ALOGE("%s: target 0x%x not yet supported in new state tracking model\n", __func__, target);
+        abort();
+    }
+}
+
+bool GLClientState::isQueryBound(GLenum target) {
+    switch (target) {
+    case GL_ANY_SAMPLES_PASSED:
+        return mBoundQueryValidity_AnySamplesPassed.valid;
+    case GL_ANY_SAMPLES_PASSED_CONSERVATIVE:
+        return mBoundQueryValidity_AnySamplesPassedConservative.valid;
+    case GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN:
+        return mBoundQueryValidity_TransformFeedbackPrimitivesWritten.valid;
+    default:
+        return false;
+    }
+}
+
+bool GLClientState::isQueryObjectActive(GLuint id) {
+    if (mBoundQueryValidity_AnySamplesPassed.valid &&
+        (id == mBoundQueryValidity_AnySamplesPassed.id))
+        return true;
+    if (mBoundQueryValidity_AnySamplesPassedConservative.valid &&
+        (id == mBoundQueryValidity_AnySamplesPassedConservative.id))
+        return true;
+    if (mBoundQueryValidity_TransformFeedbackPrimitivesWritten.valid &&
+        (id == mBoundQueryValidity_TransformFeedbackPrimitivesWritten.id))
+        return true;
+    return false;
+}
+
+void GLClientState::setLastQueryTarget(GLenum target, GLuint id) {
+    mLastQueryTargets.add(id, target);
+}
+
+GLenum GLClientState::getLastQueryTarget(GLuint id) {
+    auto targetPtr = mLastQueryTargets.get_const(id);
+    if (!targetPtr) return 0;
+    return *targetPtr;
+}
+
+#else // GFXSTREAM
+
 void GLClientState::addBuffer(GLuint id) {
     mBufferIds.insert(id);
 }
@@ -433,6 +676,70 @@
     return mBufferIds.find(id) != mBufferIds.end();
 }
 
+void GLClientState::setBufferHostMapDirty(GLuint id, bool dirty) {
+    (void)id;
+    (void)dirty;
+}
+
+bool GLClientState::isBufferHostMapDirty(GLuint id) const {
+    (void)id;
+    return true;
+}
+
+void GLClientState::setExistence(ObjectType, bool, GLsizei, const GLuint*) {
+    // no-op in non-gfxstream
+}
+
+#endif // !GFXSTREAM
+
+void GLClientState::setBoundPixelPackBufferDirtyForHostMap() {
+    if (m_pixelPackBuffer)
+        setBufferHostMapDirty(m_pixelPackBuffer, true /* dirty */);
+}
+
+void GLClientState::setBoundTransformFeedbackBuffersDirtyForHostMap() {
+    if (m_transformFeedbackBuffer)
+        setBufferHostMapDirty(
+            m_transformFeedbackBuffer,
+            true /* dirty */);
+
+    for (size_t i = 0; i < m_indexedTransformFeedbackBuffers.size(); ++i)
+        if (m_indexedTransformFeedbackBuffers[i].buffer)
+            setBufferHostMapDirty(
+                m_indexedTransformFeedbackBuffers[i].buffer,
+                true /* dirty */);
+}
+
+void GLClientState::setBoundShaderStorageBuffersDirtyForHostMap() {
+    if (m_glesMajorVersion == 3 && m_glesMinorVersion == 0) return;
+
+    if (m_shaderStorageBuffer)
+        setBufferHostMapDirty(
+            m_shaderStorageBuffer,
+            true /* dirty */);
+
+    for (size_t i = 0; i < m_indexedShaderStorageBuffers.size(); ++i)
+        if (m_indexedShaderStorageBuffers[i].buffer)
+            setBufferHostMapDirty(
+                m_indexedShaderStorageBuffers[i].buffer,
+                true /* dirty */);
+}
+
+void GLClientState::setBoundAtomicCounterBuffersDirtyForHostMap() {
+    if (m_glesMajorVersion == 3 && m_glesMinorVersion == 0) return;
+
+    if (m_atomicCounterBuffer)
+        setBufferHostMapDirty(
+            m_atomicCounterBuffer,
+            true /* dirty */);
+
+    for (size_t i = 0; i < m_indexedAtomicCounterBuffers.size(); ++i)
+        if (m_indexedAtomicCounterBuffers[i].buffer)
+            setBufferHostMapDirty(
+                m_indexedAtomicCounterBuffers[i].buffer,
+                true /* dirty */);
+}
+
 void GLClientState::unBindBuffer(GLuint id) {
     if (m_arrayBuffer == id) {
         m_arrayBuffer = 0;
@@ -619,6 +926,47 @@
     }
 }
 
+int GLClientState::getMaxTextureSize() const {
+    return m_hostDriverCaps.max_texture_size;
+}
+
+int GLClientState::getMaxTextureSize3D() const {
+    return m_hostDriverCaps.max_texture_size_3d;
+}
+
+int GLClientState::getMaxTextureSizeCubeMap() const {
+    return m_hostDriverCaps.max_texture_size_cube_map;
+}
+
+int GLClientState::getLog2MaxTextureSize() const {
+    return m_log2MaxTextureSize;
+}
+
+void GLClientState::postDraw() {
+    setBoundTransformFeedbackBuffersDirtyForHostMap();
+    setBoundShaderStorageBuffersDirtyForHostMap();
+    setBoundAtomicCounterBuffersDirtyForHostMap();
+}
+
+void GLClientState::postReadPixels() {
+    setBoundPixelPackBufferDirtyForHostMap();
+}
+
+void GLClientState::postDispatchCompute() {
+    setBoundShaderStorageBuffersDirtyForHostMap();
+    setBoundAtomicCounterBuffersDirtyForHostMap();
+}
+
+bool GLClientState::shouldSkipHostMapBuffer(GLenum target) {
+    GLuint id = getBuffer(target);
+    return !isBufferHostMapDirty(id);
+}
+
+void GLClientState::onHostMappedBuffer(GLenum target) {
+    GLuint id = getBuffer(target);
+    setBufferHostMapDirty(id, false /* not dirty */);
+}
+
 int GLClientState::getBuffer(GLenum target) {
     int ret=0;
     switch (target) {
@@ -699,6 +1047,50 @@
     }
 }
 
+bool GLClientState::isTexture(GLuint tex_name) const {
+    return getTextureRec(tex_name);
+}
+
+bool GLClientState::isTextureWithStorage(GLuint tex_name) const {
+    TextureRec* rec = getTextureRec(tex_name);
+    if (!rec) return false;
+    return rec->hasStorage;
+}
+
+bool GLClientState::isTextureCubeMap(GLuint tex_name) const {
+    TextureRec* texrec = getTextureRec(tex_name);
+    if (!texrec) return false;
+    switch (texrec->target) {
+        case GL_TEXTURE_CUBE_MAP:
+        case GL_TEXTURE_CUBE_MAP_NEGATIVE_X:
+        case GL_TEXTURE_CUBE_MAP_POSITIVE_X:
+        case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y:
+        case GL_TEXTURE_CUBE_MAP_POSITIVE_Y:
+        case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z:
+        case GL_TEXTURE_CUBE_MAP_POSITIVE_Z:
+            return true;
+        default:
+            return false;
+    }
+}
+
+bool GLClientState::isRenderbuffer(GLuint name) const {
+    if (!name) return false;
+
+    RenderbufferInfo::ScopedView view(mRboState.rboData);
+    return view.hasRbo(name);
+}
+
+bool GLClientState::isRenderbufferThatWasBound(GLuint name) const {
+    if (!name) return true;
+
+    RenderbufferInfo::ScopedView view(mRboState.rboData);
+    if (!view.hasRbo(name)) return false;
+
+    const RboProps* props = view.get_const(name);
+    return props->previouslyBound;
+}
+
 void GLClientState::getClientStatePointer(GLenum pname, GLvoid** params)
 {
     GLenum which_state = -1;
@@ -1001,8 +1393,14 @@
     }
 }
 
-void GLClientState::bindSampler(GLuint unit, GLuint sampler) {
+bool GLClientState::bindSampler(GLuint unit, GLuint sampler) {
+    SamplerInfo::ScopedView view(mSamplerInfo);
+    view.ref(sampler);
+    if (m_tex.unit[unit].boundSampler) {
+        view.unref(sampler);
+    }
     m_tex.unit[unit].boundSampler = sampler;
+    return true;
 }
 
 bool GLClientState::isSamplerBindNoOp(GLuint unit, GLuint sampler) {
@@ -1052,7 +1450,7 @@
     if (texture && target != texrec->target &&
         (target != GL_TEXTURE_EXTERNAL_OES &&
          texrec->target != GL_TEXTURE_EXTERNAL_OES)) {
-        ALOGD("%s: issue GL_INVALID_OPERATION: target 0x%x texrectarget 0x%x texture %u", __FUNCTION__, target, texrec->target, texture);
+        return GL_INVALID_OPERATION;
     }
 
     switch (target) {
@@ -1083,13 +1481,26 @@
     return GL_NO_ERROR;
 }
 
-void GLClientState::setBoundEGLImage(GLenum target, GLeglImageOES image) {
+void GLClientState::setBoundEGLImage(GLenum target, GLeglImageOES image, int width, int height) {
     (void)image;
 
-    GLuint texture = getBoundTexture(target);
-    TextureRec* texrec = getTextureRec(texture);
-    if (!texrec) return;
-    texrec->boundEGLImage = true;
+    if (target == GL_RENDERBUFFER) {
+        if (!boundRenderbuffer()) return;
+        setBoundRenderbufferEGLImageBacked();
+        setBoundRenderbufferFormat(GL_RGBA);
+        setBoundRenderbufferSamples(0);
+        setBoundRenderbufferDimensions(width, height);
+    } else {
+        GLuint texture = getBoundTexture(target);
+        TextureRec* texrec = getTextureRec(texture);
+        if (!texrec) return;
+        texrec->boundEGLImage = true;
+        setBoundTextureInternalFormat(target, GL_RGBA);
+        setBoundTextureFormat(target, GL_RGBA);
+        setBoundTextureType(target, GL_UNSIGNED_BYTE);
+        setBoundTextureSamples(target, 0);
+        setBoundTextureDims(target, target, 0, width, height, 1);
+    }
 }
 
 TextureRec* GLClientState::addTextureRec(GLuint id, GLenum target)
@@ -1101,7 +1512,14 @@
     tex->multisamples = 0;
     tex->immutable = false;
     tex->boundEGLImage = false;
-    tex->dims = new TextureDims;
+    tex->hasStorage = false;
+    tex->dims = new TextureDims[6];
+    tex->hasCubeNegX = false;
+    tex->hasCubePosX = false;
+    tex->hasCubeNegY = false;
+    tex->hasCubePosY = false;
+    tex->hasCubeNegZ = false;
+    tex->hasCubePosZ = false;
 
     (*(m_tex.textureRecs))[id] = tex;
     return tex;
@@ -1137,13 +1555,49 @@
     texrec->type = type;
 }
 
-void GLClientState::setBoundTextureDims(GLenum target, GLsizei level, GLsizei width, GLsizei height, GLsizei depth) {
+static size_t textureDimArrayOfCubeTarget(GLenum cubetarget) {
+    switch (cubetarget) {
+        case GL_TEXTURE_CUBE_MAP_NEGATIVE_X:
+            return 0;
+        case GL_TEXTURE_CUBE_MAP_POSITIVE_X:
+            return 1;
+        case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y:
+            return 2;
+        case GL_TEXTURE_CUBE_MAP_POSITIVE_Y:
+            return 3;
+        case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z:
+            return 4;
+        case GL_TEXTURE_CUBE_MAP_POSITIVE_Z:
+            return 5;
+    }
+    return 0;
+}
+
+void GLClientState::setBoundTextureDims(GLenum target, GLenum cubetarget, GLsizei level, GLsizei width, GLsizei height, GLsizei depth) {
     GLuint texture = getBoundTexture(target);
     TextureRec* texrec = getTextureRec(texture);
     if (!texrec) {
         return;
     }
 
+    texrec->hasStorage = true;
+
+    size_t indexToSet = 0;
+
+    if (target == GL_TEXTURE_CUBE_MAP) {
+        if (-1 == cubetarget) {
+            setBoundTextureDims(target, GL_TEXTURE_CUBE_MAP_NEGATIVE_X, level, width, height, depth);
+            setBoundTextureDims(target, GL_TEXTURE_CUBE_MAP_POSITIVE_X, level, width, height, depth);
+            setBoundTextureDims(target, GL_TEXTURE_CUBE_MAP_NEGATIVE_Y, level, width, height, depth);
+            setBoundTextureDims(target, GL_TEXTURE_CUBE_MAP_POSITIVE_Y, level, width, height, depth);
+            setBoundTextureDims(target, GL_TEXTURE_CUBE_MAP_NEGATIVE_Z, level, width, height, depth);
+            setBoundTextureDims(target, GL_TEXTURE_CUBE_MAP_POSITIVE_Z, level, width, height, depth);
+            return;
+        }
+        indexToSet = textureDimArrayOfCubeTarget(cubetarget);
+    }
+
+
     if (level == -1) {
         GLsizei curr_width = width;
         GLsizei curr_height = height;
@@ -1151,9 +1605,9 @@
         GLsizei curr_level = 0;
 
         while (true) {
-            texrec->dims->widths[curr_level] = curr_width;
-            texrec->dims->heights[curr_level] = curr_height;
-            texrec->dims->depths[curr_level] = curr_depth;
+            texrec->dims[indexToSet].widths[curr_level] = curr_width;
+            texrec->dims[indexToSet].heights[curr_level] = curr_height;
+            texrec->dims[indexToSet].depths[curr_level] = curr_depth;
             if (curr_width >> 1 == 0 &&
                 curr_height >> 1 == 0 &&
                 ((target == GL_TEXTURE_3D && curr_depth == 0) ||
@@ -1169,10 +1623,12 @@
         }
 
     } else {
-        texrec->dims->widths[level] = width;
-        texrec->dims->heights[level] = height;
-        texrec->dims->depths[level] = depth;
+        texrec->dims[indexToSet].widths[level] = width;
+        texrec->dims[indexToSet].heights[level] = height;
+        texrec->dims[indexToSet].depths[level] = depth;
     }
+
+    setFboCompletenessDirtyForTexture(texture);
 }
 
 void GLClientState::setBoundTextureSamples(GLenum target, GLsizei samples) {
@@ -1182,11 +1638,48 @@
     texrec->multisamples = samples;
 }
 
+void GLClientState::addTextureCubeMapImage(GLenum stateTarget, GLenum cubeTarget) {
+    if (stateTarget != GL_TEXTURE_CUBE_MAP) return;
+
+    GLuint texture = getBoundTexture(stateTarget);
+    TextureRec* texrec = getTextureRec(texture);
+    if (!texrec) return;
+
+    switch (cubeTarget) {
+        case GL_TEXTURE_CUBE_MAP_NEGATIVE_X:
+            texrec->hasCubeNegX = true;
+            return;
+        case GL_TEXTURE_CUBE_MAP_POSITIVE_X:
+            texrec->hasCubePosX = true;
+            return;
+        case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y:
+            texrec->hasCubeNegY = true;
+            return;
+        case GL_TEXTURE_CUBE_MAP_POSITIVE_Y:
+            texrec->hasCubePosY = true;
+            return;
+        case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z:
+            texrec->hasCubeNegZ = true;
+            return;
+        case GL_TEXTURE_CUBE_MAP_POSITIVE_Z:
+            texrec->hasCubePosZ = true;
+            return;
+    }
+}
+
 void GLClientState::setBoundTextureImmutableFormat(GLenum target) {
     GLuint texture = getBoundTexture(target);
     TextureRec* texrec = getTextureRec(texture);
     if (!texrec) return;
     texrec->immutable = true;
+    if (target == GL_TEXTURE_CUBE_MAP) {
+        texrec->hasCubeNegX = true;
+        texrec->hasCubePosX = true;
+        texrec->hasCubeNegY = true;
+        texrec->hasCubePosY = true;
+        texrec->hasCubeNegZ = true;
+        texrec->hasCubePosZ = true;
+    }
 }
 
 bool GLClientState::isBoundTextureImmutableFormat(GLenum target) const {
@@ -1196,6 +1689,41 @@
     return texrec->immutable;
 }
 
+bool GLClientState::isBoundTextureComplete(GLenum target) const {
+    GLuint texture = getBoundTexture(target);
+    TextureRec* texrec = getTextureRec(texture);
+    if (!texrec) return false;
+
+    if (texrec->immutable) return true;
+    if (!texrec->hasStorage) return true;
+
+    if (target == GL_TEXTURE_CUBE_MAP) {
+        if (!(texrec->hasCubeNegX &&
+             texrec->hasCubePosX &&
+             texrec->hasCubeNegY &&
+             texrec->hasCubePosY &&
+             texrec->hasCubeNegZ &&
+             texrec->hasCubePosZ)) return false;
+
+        size_t currBaseLevel = texrec->dims[0].widths.begin()->first;
+        size_t currWidth = texrec->dims[0].widths.begin()->second;
+        size_t currHeight = texrec->dims[0].heights.begin()->second;
+        for (size_t i = 1; i < 6; ++i) {
+            size_t nextLevel = texrec->dims[i].widths.begin()->first;
+            size_t nextWidth = texrec->dims[i].widths.begin()->second;
+            size_t nextHeight = texrec->dims[i].heights.begin()->second;
+            if (currBaseLevel != nextLevel) return false;
+            if (currWidth != nextWidth) return false;
+            if (currHeight != nextHeight) return false;
+        }
+
+        return true;
+    }
+
+    return true;
+}
+
+
 GLuint GLClientState::getBoundTexture(GLenum target) const
 {
     switch (target) {
@@ -1216,6 +1744,225 @@
     }
 }
 
+GLuint GLClientState::getBoundFramebuffer(GLenum target) const
+{
+    switch (target) {
+    case GL_FRAMEBUFFER:
+    case GL_DRAW_FRAMEBUFFER:
+        return mFboState.boundDrawFramebuffer;
+    case GL_READ_FRAMEBUFFER:
+        return mFboState.boundReadFramebuffer;
+    default:
+        return 0;
+    }
+}
+
+GLenum GLClientState::checkFramebufferCompleteness(GLenum target) {
+    // Default framebuffer is complete
+    // TODO: Check the case where the default framebuffer is 0x0
+    if (0 == boundFramebuffer(target)) {
+        return GL_FRAMEBUFFER_COMPLETE;
+    }
+
+    bool hasAttachment = false;
+    FboProps& props = boundFboProps(target);
+
+    if (!props.completenessDirty) {
+        return props.cachedCompleteness;
+    }
+
+    int currentSamples = -1;
+
+    for (int i = 0; i < getMaxColorAttachments(); i++) {
+        if (!props.colorAttachmenti_hasTex[i] &&
+            !props.colorAttachmenti_hasRbo[i]) continue;
+
+        GLenum attachmentRes = checkFramebufferAttachmentCompleteness(target, glUtilsColorAttachmentName(i), &currentSamples);
+        if (attachmentRes != GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT) {
+            hasAttachment = true;
+        }
+        if (attachmentRes) {
+            ALOGD("%s: color attachment %d not complete: 0x%x\n", __func__, i, attachmentRes);
+            return attachmentRes;
+        }
+    }
+
+    bool hasDepth = (props.depthAttachment_hasTexObj || props.depthAttachment_hasRbo || props.depthstencilAttachment_hasTexObj || props.depthstencilAttachment_hasRbo);
+    bool hasStencil = (props.stencilAttachment_hasTexObj || props.stencilAttachment_hasRbo || props.depthstencilAttachment_hasTexObj || props.depthstencilAttachment_hasRbo);
+
+    if (hasDepth) {
+        GLenum depthAttachmentRes = checkFramebufferAttachmentCompleteness(target, GL_DEPTH_ATTACHMENT, &currentSamples);
+        if (depthAttachmentRes != GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT) {
+            hasAttachment = true;
+        }
+        if (depthAttachmentRes) {
+            ALOGD("%s: depth attachment not complete: 0x%x\n", __func__, depthAttachmentRes);
+            return depthAttachmentRes;
+        }
+    }
+
+    if (hasStencil) {
+        GLenum stencilAttachmentRes = checkFramebufferAttachmentCompleteness(target, GL_STENCIL_ATTACHMENT, &currentSamples);
+        if (stencilAttachmentRes != GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT) {
+            hasAttachment = true;
+        }
+        if (stencilAttachmentRes) {
+            ALOGD("%s: stencil attachment not complete: 0x%x\n", __func__, stencilAttachmentRes);
+            return stencilAttachmentRes;
+        }
+    }
+
+    if (hasDepth && hasStencil) {
+        // In gles3, depth/stencil must use the same image.
+        if (m_glesMajorVersion > 2) {
+            if ((props.depthAttachment_hasTexObj && props.stencilAttachment_hasRbo) ||
+                (props.stencilAttachment_hasTexObj && props.depthAttachment_hasRbo)) {
+                ALOGD("%s: GL_FRAMEBUFFER_UNSUPPORTED: using different types of depth/stencil attachment images in GLES 3+\n", __func__);
+                return GL_FRAMEBUFFER_UNSUPPORTED;
+            }
+            if (props.depthAttachment_hasTexObj) {
+                if (props.depthAttachment_texture != props.stencilAttachment_texture) {
+                    ALOGD("%s: GL_FRAMEBUFFER_UNSUPPORTED: using different texture images for depth and stencil attachments in GLES 3+\n", __func__);
+                    return GL_FRAMEBUFFER_UNSUPPORTED;
+                }
+            }
+            if (props.depthAttachment_hasRbo) {
+                if (props.depthAttachment_rbo != props.stencilAttachment_rbo) {
+                    ALOGD("%s: GL_FRAMEBUFFER_UNSUPPORTED: using different renderbuffers for depth and stencil attachments in GLES 3+\n", __func__);
+                    return GL_FRAMEBUFFER_UNSUPPORTED;
+                }
+            }
+        }
+    }
+
+    if (!hasAttachment) {
+        // Framebuffers may be missing an attachment if they have nonzero
+        // default width and height
+        if (props.defaultWidth == 0 || props.defaultHeight == 0) {
+            return GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT;
+        }
+    }
+
+    props.completenessDirty = false;
+    props.cachedCompleteness = GL_FRAMEBUFFER_COMPLETE;
+    return GL_FRAMEBUFFER_COMPLETE;
+}
+
+GLenum GLClientState::checkFramebufferAttachmentCompleteness(GLenum target, GLenum attachment, int* currentSamples) const {
+    FboFormatInfo fbo_format_info;
+    getBoundFramebufferFormat(target, attachment, &fbo_format_info);
+
+    // Check format and renderability
+    bool renderable = false;
+    switch (fbo_format_info.type) {
+        case FBO_ATTACHMENT_RENDERBUFFER:
+            switch (attachment) {
+                case GL_DEPTH_ATTACHMENT:
+                    renderable = fbo_format_info.rb_external || depthRenderableFormat(fbo_format_info.rb_format);
+                    break;
+                case GL_STENCIL_ATTACHMENT:
+                    renderable = fbo_format_info.rb_external || stencilRenderableFormat(fbo_format_info.rb_format);
+                    break;
+                default:
+                    renderable = fbo_format_info.rb_external || colorRenderableFormat(
+                            fbo_format_info.rb_format,
+                            GL_UNSIGNED_BYTE,
+                            m_glesMajorVersion, m_glesMinorVersion,
+                            m_has_color_buffer_float_extension,
+                            m_has_color_buffer_half_float_extension);
+                    if (!renderable) {
+                        ALOGD("%s: rbo not color renderable. format: 0x%x\n", __func__, fbo_format_info.rb_format); }
+                    break;
+            }
+            break;
+        case FBO_ATTACHMENT_TEXTURE:
+            switch (attachment) {
+                case GL_DEPTH_ATTACHMENT:
+                    renderable = fbo_format_info.tex_external || depthRenderableFormat(fbo_format_info.tex_internalformat);
+                    break;
+                case GL_STENCIL_ATTACHMENT:
+                    renderable = fbo_format_info.tex_external || stencilRenderableFormat(fbo_format_info.tex_internalformat);
+                    break;
+                default:
+                    renderable = fbo_format_info.tex_external || colorRenderableFormat(
+                            fbo_format_info.tex_internalformat,
+                            fbo_format_info.tex_type,
+                            m_glesMajorVersion, m_glesMinorVersion,
+                            m_has_color_buffer_float_extension,
+                            m_has_color_buffer_half_float_extension);
+                    if (!renderable) {
+                        ALOGD("%s: tex not color renderable. format: 0x%x type 0x%x maj min %d %d floatext %d hfloatext %d\n", __func__, fbo_format_info.tex_internalformat, fbo_format_info.tex_type, m_glesMajorVersion, m_glesMinorVersion, m_has_color_buffer_float_extension, m_has_color_buffer_half_float_extension);
+                    }
+                    break;
+            }
+            break;
+        case FBO_ATTACHMENT_NONE:
+        default:
+            return GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT;
+    }
+
+    if (!renderable) return GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT;
+
+    // Check dimensions
+    GLuint id;
+    switch (fbo_format_info.type) {
+    case FBO_ATTACHMENT_RENDERBUFFER:
+        id = getFboAttachmentRboId(target, attachment);
+        if (!fbo_format_info.rb_external) {
+            if (0 == queryRboWidth(id) || 0 == queryRboHeight(id)) {
+                ALOGD("%s: rbo has zero dimension\n", __func__);
+                return GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT;
+            }
+        }
+        break;
+    case FBO_ATTACHMENT_TEXTURE:
+        id = getFboAttachmentTextureId(target, attachment);
+        if (!fbo_format_info.tex_external) {
+            if (0 == queryTexWidth(fbo_format_info.tex_level, id) || 0 == queryTexHeight(fbo_format_info.tex_level, id)) {
+                ALOGD("%s: texture has zero dimension\n", __func__);
+                return GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT;
+            }
+            GLsizei depth = queryTexDepth(fbo_format_info.tex_level, id);
+            if (fbo_format_info.tex_layer >= depth) {
+                ALOGD("%s: texture layer/zoffset too high, wanted %d but only have %d layers\n", __func__,
+                      fbo_format_info.tex_layer, depth);
+                return GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT;
+            }
+        }
+        break;
+    case FBO_ATTACHMENT_NONE:
+    default:
+        return GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT;
+    }
+
+    // Check samples
+    int currSamplesVal = *currentSamples;
+    bool firstTime = -1 == currSamplesVal;
+    int samplesThisAttachment = 0;
+    switch (fbo_format_info.type) {
+    case FBO_ATTACHMENT_RENDERBUFFER:
+        samplesThisAttachment = fbo_format_info.rb_multisamples;
+        break;
+    case FBO_ATTACHMENT_TEXTURE:
+        samplesThisAttachment = fbo_format_info.tex_multisamples;
+        break;
+    case FBO_ATTACHMENT_NONE:
+        break;
+    default:
+        return GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT;
+    }
+
+    if (firstTime) {
+        *currentSamples = samplesThisAttachment;
+    } else {
+        if (samplesThisAttachment != currSamplesVal) {
+            return GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE;
+        }
+    }
+
+    return 0;
+}
+
 // BEGIN driver workarounds-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-
 // (>' ')><(' '<)(>' ')><(' '<)(>' ')><(' '<)(>' ')><(' '<)(>' ')><(' '<)(>' ')>
 
@@ -1284,6 +2031,10 @@
 
 void GLClientState::deleteTextures(GLsizei n, const GLuint* textures)
 {
+    for (const GLuint* texture = textures; texture != textures + n; texture++) {
+        setFboCompletenessDirtyForTexture(*texture);
+    }
+
     // Updating the textures array could be made more efficient when deleting
     // several textures:
     // - compacting the array could be done in a single pass once the deleted
@@ -1293,7 +2044,7 @@
     for (const GLuint* texture = textures; texture != textures + n; texture++) {
         texrec = getTextureRec(*texture);
         if (texrec && texrec->dims) {
-            delete texrec->dims;
+            delete [] texrec->dims;
         }
         if (texrec) {
             m_tex.textureRecs->erase(*texture);
@@ -1315,20 +2066,10 @@
 // RBO//////////////////////////////////////////////////////////////////////////
 
 void GLClientState::addFreshRenderbuffer(GLuint name) {
-    // if underlying opengl says these are fresh names,
-    // but we are keeping a stale one, reset it.
-    RboProps props;
-    props.target = GL_RENDERBUFFER;
-    props.name = name;
-    props.format = GL_NONE;
-    props.multisamples = 0;
-    props.previouslyBound = false;
+    if (!name) return;
 
-    if (usedRenderbufferName(name)) {
-        mRboState.rboData[getRboIndex(name)] = props;
-    } else {
-        mRboState.rboData.push_back(props);
-    }
+    RenderbufferInfo::ScopedView view(mRboState.rboData);
+    view.addFresh(name);
 }
 
 void GLClientState::addRenderbuffers(GLsizei n, GLuint* renderbuffers) {
@@ -1337,84 +2078,87 @@
     }
 }
 
-size_t GLClientState::getRboIndex(GLuint name) const {
-    for (size_t i = 0; i < mRboState.rboData.size(); i++) {
-        if (mRboState.rboData[i].name == name) {
-            return i;
-        }
-    }
-    return -1;
-}
-
 void GLClientState::removeRenderbuffers(GLsizei n, const GLuint* renderbuffers) {
-    size_t bound_rbo_idx = getRboIndex(boundRboProps_const().name);
-
     std::vector<GLuint> to_remove;
     for (size_t i = 0; i < n; i++) {
         if (renderbuffers[i] != 0) { // Never remove the zero rb.
-            to_remove.push_back(getRboIndex(renderbuffers[i]));
+            to_remove.push_back(renderbuffers[i]);
+            setFboCompletenessDirtyForRbo(renderbuffers[i]);
         }
     }
 
-    for (size_t i = 0; i < to_remove.size(); i++) {
-        mRboState.rboData[to_remove[i]] = mRboState.rboData.back();
-        mRboState.rboData.pop_back();
+    bool unbindCurrent = false;
+    {
+        RenderbufferInfo::ScopedView view(mRboState.rboData);
+        for (size_t i = 0; i < to_remove.size(); i++) {
+            view.unref(to_remove[i]);
+        }
+        for (size_t i = 0; i < to_remove.size(); i++) {
+            if (mRboState.boundRenderbuffer == renderbuffers[i]) {
+                unbindCurrent = true;
+                break;
+            }
+        }
     }
 
-    // If we just deleted the currently bound rb,
-    // bind the zero rb
-    if (getRboIndex(boundRboProps_const().name) != bound_rbo_idx) {
+    if (unbindCurrent) {
         bindRenderbuffer(GL_RENDERBUFFER, 0);
     }
 }
 
 bool GLClientState::usedRenderbufferName(GLuint name) const {
-    for (size_t i = 0; i < mRboState.rboData.size(); i++) {
-        if (mRboState.rboData[i].name == name) {
-            return true;
-        }
-    }
-    return false;
-}
+    if (!name) return false;
 
-void GLClientState::setBoundRenderbufferIndex() {
-    for (size_t i = 0; i < mRboState.rboData.size(); i++) {
-        if (mRboState.rboData[i].name == mRboState.boundRenderbuffer) {
-            mRboState.boundRenderbufferIndex = i;
-            break;
-        }
-    }
-}
-
-RboProps& GLClientState::boundRboProps() {
-    return mRboState.rboData[mRboState.boundRenderbufferIndex];
-}
-
-const RboProps& GLClientState::boundRboProps_const() const {
-    return mRboState.rboData[mRboState.boundRenderbufferIndex];
+    RenderbufferInfo::ScopedView view(mRboState.rboData);
+    return view.get_const(name) != 0;
 }
 
 void GLClientState::bindRenderbuffer(GLenum target, GLuint name) {
-    // If unused, add it.
-    if (!usedRenderbufferName(name)) {
-        addFreshRenderbuffer(name);
+
+    (void)target; // Must be GL_RENDERBUFFER
+    RenderbufferInfo::ScopedView view(mRboState.rboData);
+    if (name != mRboState.boundRenderbuffer) {
+        view.unref(mRboState.boundRenderbuffer);
     }
+
     mRboState.boundRenderbuffer = name;
-    setBoundRenderbufferIndex();
-    boundRboProps().target = target;
-    boundRboProps().previouslyBound = true;
+
+    if (!name) return;
+
+    view.bind(name);
 }
 
 GLuint GLClientState::boundRenderbuffer() const {
-    return boundRboProps_const().name;
+    return mRboState.boundRenderbuffer;
 }
 
 void GLClientState::setBoundRenderbufferFormat(GLenum format) {
-    boundRboProps().format = format;
+    RenderbufferInfo::ScopedView view(mRboState.rboData);
+    RboProps* props = view.get(mRboState.boundRenderbuffer);
+    if (!props) return;
+    props->format = format;
 }
 
 void GLClientState::setBoundRenderbufferSamples(GLsizei samples) {
-    boundRboProps().multisamples = samples;
+    RenderbufferInfo::ScopedView view(mRboState.rboData);
+    RboProps* props = view.get(mRboState.boundRenderbuffer);
+    if (!props) return;
+    props->multisamples = samples;
+}
+
+void GLClientState::setBoundRenderbufferDimensions(GLsizei width, GLsizei height) {
+    RenderbufferInfo::ScopedView view(mRboState.rboData);
+    RboProps* props = view.get(mRboState.boundRenderbuffer);
+    if (!props) return;
+    props->width = width;
+    props->height = height;
+}
+
+void GLClientState::setBoundRenderbufferEGLImageBacked() {
+    RenderbufferInfo::ScopedView view(mRboState.rboData);
+    RboProps* props = view.get(mRboState.boundRenderbuffer);
+    if (!props) return;
+    props->boundEGLImage = true;
 }
 
 // FBO//////////////////////////////////////////////////////////////////////////
@@ -1422,11 +2166,38 @@
 // Format querying
 
 GLenum GLClientState::queryRboFormat(GLuint rbo_name) const {
-    return mRboState.rboData[getRboIndex(rbo_name)].format;
+    RenderbufferInfo::ScopedView view(mRboState.rboData);
+    const RboProps* props = view.get(rbo_name);
+    if (!props) return 0;
+    return props->format;
 }
 
 GLsizei GLClientState::queryRboSamples(GLuint rbo_name) const {
-    return mRboState.rboData[getRboIndex(rbo_name)].multisamples;
+    RenderbufferInfo::ScopedView view(mRboState.rboData);
+    const RboProps* props = view.get(rbo_name);
+    if (!props) return 0;
+    return props->multisamples;
+}
+
+GLsizei GLClientState::queryRboWidth(GLuint rbo_name) const {
+    RenderbufferInfo::ScopedView view(mRboState.rboData);
+    const RboProps* props = view.get(rbo_name);
+    if (!props) return 0;
+    return props->width;
+}
+
+GLsizei GLClientState::queryRboHeight(GLuint rbo_name) const {
+    RenderbufferInfo::ScopedView view(mRboState.rboData);
+    const RboProps* props = view.get(rbo_name);
+    if (!props) return 0;
+    return props->height;
+}
+
+bool GLClientState::queryRboEGLImageBacked(GLuint rbo_name) const {
+    RenderbufferInfo::ScopedView view(mRboState.rboData);
+    const RboProps* props = view.get(rbo_name);
+    if (!props) return 0;
+    return props->boundEGLImage;
 }
 
 GLint GLClientState::queryTexInternalFormat(GLuint tex_name) const {
@@ -1493,10 +2264,12 @@
     res_info->type = FBO_ATTACHMENT_NONE;
     res_info->rb_format = GL_NONE;
     res_info->rb_multisamples = 0;
+    res_info->rb_external = false;
     res_info->tex_internalformat = -1;
     res_info->tex_format = GL_NONE;
     res_info->tex_type = GL_NONE;
     res_info->tex_multisamples = 0;
+    res_info->tex_external = false;
 
     int colorAttachmentIndex =
         glUtilsColorAttachmentIndex(attachment);
@@ -1510,8 +2283,13 @@
             res_info->rb_multisamples =
                 queryRboSamples(
                         props.colorAttachmenti_rbos[colorAttachmentIndex]);
+            res_info->rb_external =
+                queryRboEGLImageBacked(
+                        props.colorAttachmenti_rbos[colorAttachmentIndex]);
         } else if (props.colorAttachmenti_hasTex[colorAttachmentIndex]) {
             res_info->type = FBO_ATTACHMENT_TEXTURE;
+            res_info->tex_external = queryTexEGLImageBacked(
+                    props.colorAttachmenti_textures[colorAttachmentIndex]);
             res_info->tex_internalformat =
                 queryTexInternalFormat(
                         props.colorAttachmenti_textures[colorAttachmentIndex]);
@@ -1522,6 +2300,8 @@
                 queryTexType(props.colorAttachmenti_textures[colorAttachmentIndex]);
             res_info->tex_multisamples =
                 queryTexSamples(props.colorAttachmenti_textures[colorAttachmentIndex]);
+            res_info->tex_level = props.colorAttachmenti_texture_levels[colorAttachmentIndex];
+            res_info->tex_layer = props.colorAttachmenti_texture_layers[colorAttachmentIndex];
         } else {
             res_info->type = FBO_ATTACHMENT_NONE;
         }
@@ -1535,13 +2315,19 @@
             res_info->rb_multisamples =
                 queryRboSamples(
                         props.depthAttachment_rbo);
+            res_info->rb_external =
+                queryRboEGLImageBacked(
+                        props.depthAttachment_rbo);
         } else if (props.depthAttachment_hasTexObj) {
             res_info->type = FBO_ATTACHMENT_TEXTURE;
+            res_info->tex_external = queryTexEGLImageBacked(props.depthAttachment_texture);
             res_info->tex_internalformat = queryTexInternalFormat(props.depthAttachment_texture);
             res_info->tex_format = queryTexFormat(props.depthAttachment_texture);
             res_info->tex_type = queryTexType(props.depthAttachment_texture);
             res_info->tex_multisamples =
                 queryTexSamples(props.depthAttachment_texture);
+            res_info->tex_level = props.depthAttachment_texture_level;
+            res_info->tex_layer = props.depthAttachment_texture_layer;
         } else {
             res_info->type = FBO_ATTACHMENT_NONE;
         }
@@ -1553,13 +2339,19 @@
             res_info->rb_multisamples =
                 queryRboSamples(
                         props.stencilAttachment_rbo);
+            res_info->rb_external =
+                queryRboEGLImageBacked(
+                        props.stencilAttachment_rbo);
         } else if (props.stencilAttachment_hasTexObj) {
             res_info->type = FBO_ATTACHMENT_TEXTURE;
+            res_info->tex_external = queryTexEGLImageBacked(props.stencilAttachment_texture);
             res_info->tex_internalformat = queryTexInternalFormat(props.stencilAttachment_texture);
             res_info->tex_format = queryTexFormat(props.stencilAttachment_texture);
             res_info->tex_type = queryTexType(props.stencilAttachment_texture);
             res_info->tex_multisamples =
                 queryTexSamples(props.stencilAttachment_texture);
+            res_info->tex_level = props.depthAttachment_texture_level;
+            res_info->tex_layer = props.depthAttachment_texture_layer;
         } else {
             res_info->type = FBO_ATTACHMENT_NONE;
         }
@@ -1571,13 +2363,19 @@
             res_info->rb_multisamples =
                 queryRboSamples(
                         props.depthstencilAttachment_rbo);
+            res_info->rb_external =
+                queryRboEGLImageBacked(
+                        props.depthstencilAttachment_rbo);
         } else if (props.depthstencilAttachment_hasTexObj) {
             res_info->type = FBO_ATTACHMENT_TEXTURE;
+            res_info->tex_external = queryTexEGLImageBacked(props.depthstencilAttachment_texture);
             res_info->tex_internalformat = queryTexInternalFormat(props.depthstencilAttachment_texture);
             res_info->tex_format = queryTexFormat(props.depthstencilAttachment_texture);
             res_info->tex_type = queryTexType(props.depthstencilAttachment_texture);
             res_info->tex_multisamples =
                 queryTexSamples(props.depthstencilAttachment_texture);
+            res_info->tex_level = props.depthAttachment_texture_level;
+            res_info->tex_layer = props.depthAttachment_texture_layer;
         } else {
             res_info->type = FBO_ATTACHMENT_NONE;
         }
@@ -1591,13 +2389,52 @@
     return info.type;
 }
 
-
 int GLClientState::getMaxColorAttachments() const {
-    return m_max_color_attachments;
+    return m_hostDriverCaps.max_color_attachments;
 }
 
 int GLClientState::getMaxDrawBuffers() const {
-    return m_max_draw_buffers;
+    return m_hostDriverCaps.max_draw_buffers;
+}
+
+#define UNIFORM_VALIDATION_ERR_COND(cond, code) if (cond) { *err = code; return; }
+
+#define UNIFORM_VALIDATION_INFO_VAR_NAME info
+
+#define UNIFORM_VALIDATION_TYPE_VIOLATION_FOR_FLOATS \
+    (!(UNIFORM_VALIDATION_INFO_VAR_NAME->isBool) && (UNIFORM_VALIDATION_INFO_VAR_NAME->isInt || UNIFORM_VALIDATION_INFO_VAR_NAME->isSampler))
+
+#define UNIFORM_VALIDATION_TYPE_VIOLATION_FOR_INTS \
+    (!(UNIFORM_VALIDATION_INFO_VAR_NAME->isBool) && (!UNIFORM_VALIDATION_TYPE_VIOLATION_FOR_FLOATS || UNIFORM_VALIDATION_INFO_VAR_NAME->isUnsigned))
+
+#define UNIFORM_VALIDATION_TYPE_VIOLATION_FOR_UNSIGNED_INTS \
+    (!(UNIFORM_VALIDATION_INFO_VAR_NAME->isBool) && (!UNIFORM_VALIDATION_TYPE_VIOLATION_FOR_FLOATS || !(UNIFORM_VALIDATION_INFO_VAR_NAME->isUnsigned)))
+
+#define UNIFORM_VALIDATION_INLINING
+
+void GLClientState::validateUniform(bool isFloat, bool isUnsigned, GLint columns, GLint rows, GLint location, GLsizei count, GLenum* err) {
+    UNIFORM_VALIDATION_ERR_COND(!m_currentProgram && !m_currentShaderProgram, GL_INVALID_OPERATION);
+    if (-1 == location) return; \
+    auto info = currentUniformValidationInfo.get_const(location); \
+    UNIFORM_VALIDATION_ERR_COND(!info || !info->valid, GL_INVALID_OPERATION); \
+    UNIFORM_VALIDATION_ERR_COND(columns != info->columns || rows != info->rows, GL_INVALID_OPERATION); \
+    UNIFORM_VALIDATION_ERR_COND(count > 1 && !info->isArray, GL_INVALID_OPERATION);
+    if (isFloat) {
+        UNIFORM_VALIDATION_ERR_COND(UNIFORM_VALIDATION_TYPE_VIOLATION_FOR_FLOATS, GL_INVALID_OPERATION);
+    } else {
+        if (isUnsigned) {
+            UNIFORM_VALIDATION_ERR_COND(UNIFORM_VALIDATION_TYPE_VIOLATION_FOR_UNSIGNED_INTS, GL_INVALID_OPERATION);
+        } else {
+            UNIFORM_VALIDATION_ERR_COND(UNIFORM_VALIDATION_TYPE_VIOLATION_FOR_INTS, GL_INVALID_OPERATION);
+        }
+    }
+}
+
+bool GLClientState::isAttribIndexUsedByProgram(int index) {
+    auto info = currentAttribValidationInfo.get_const(index);
+    if (!info) return false;
+    if (!info->validInProgram) return false;
+    return true;
 }
 
 void GLClientState::addFreshFramebuffer(GLuint name) {
@@ -1605,25 +2442,39 @@
     props.name = name;
     props.previouslyBound = false;
 
-    props.colorAttachmenti_textures.resize(m_max_color_attachments, 0);
+    props.completenessDirty = true;
+
+    props.colorAttachmenti_textures.resize(m_hostDriverCaps.max_color_attachments, 0);
+    props.colorAttachmenti_texture_levels.resize(m_hostDriverCaps.max_color_attachments, 0);
+    props.colorAttachmenti_texture_layers.resize(m_hostDriverCaps.max_color_attachments, 0);
+
+    props.depthAttachment_texture_level = 0;
+    props.depthAttachment_texture_layer = 0;
+    props.stencilAttachment_texture_level = 0;
+    props.stencilAttachment_texture_layer = 0;
+
     props.depthAttachment_texture = 0;
     props.stencilAttachment_texture = 0;
     props.depthstencilAttachment_texture = 0;
 
-    props.colorAttachmenti_hasTex.resize(m_max_color_attachments, false);
+    props.colorAttachmenti_hasTex.resize(m_hostDriverCaps.max_color_attachments, false);
     props.depthAttachment_hasTexObj = false;
     props.stencilAttachment_hasTexObj = false;
     props.depthstencilAttachment_hasTexObj = false;
 
-    props.colorAttachmenti_rbos.resize(m_max_color_attachments, 0);
+    props.colorAttachmenti_rbos.resize(m_hostDriverCaps.max_color_attachments, 0);
     props.depthAttachment_rbo = 0;
     props.stencilAttachment_rbo = 0;
     props.depthstencilAttachment_rbo = 0;
 
-    props.colorAttachmenti_hasRbo.resize(m_max_color_attachments, false);
+    props.colorAttachmenti_hasRbo.resize(m_hostDriverCaps.max_color_attachments, false);
     props.depthAttachment_hasRbo = false;
     props.stencilAttachment_hasRbo = false;
     props.depthstencilAttachment_hasRbo = false;
+
+    props.defaultWidth = 0;
+    props.defaultHeight = 0;
+
     mFboState.fboData[name] = props;
 }
 
@@ -1709,6 +2560,19 @@
     }
 }
 
+void GLClientState::setFramebufferParameter(GLenum target, GLenum pname, GLint param) {
+    switch (pname) {
+        case GL_FRAMEBUFFER_DEFAULT_WIDTH:
+            boundFboProps(target).defaultWidth = param;
+            boundFboProps(target).completenessDirty = true;
+            break;
+        case GL_FRAMEBUFFER_DEFAULT_HEIGHT:
+            boundFboProps(target).defaultHeight = param;
+            boundFboProps(target).completenessDirty = true;
+            break;
+    }
+}
+
 GLenum GLClientState::getCheckFramebufferStatus(GLenum target) const {
     switch (target) {
     case GL_DRAW_FRAMEBUFFER:
@@ -1729,32 +2593,46 @@
 
 void GLClientState::attachTextureObject(
         GLenum target,
-        GLenum attachment, GLuint texture) {
+        GLenum attachment, GLuint texture, GLint level, GLint layer) {
+
+    bool attach = texture != 0;
 
     int colorAttachmentIndex =
         glUtilsColorAttachmentIndex(attachment);
 
+    boundFboProps(target).completenessDirty = true;
+
     if (colorAttachmentIndex != -1) {
         boundFboProps(target).colorAttachmenti_textures[colorAttachmentIndex] = texture;
-        boundFboProps(target).colorAttachmenti_hasTex[colorAttachmentIndex] = true;
+        boundFboProps(target).colorAttachmenti_texture_levels[colorAttachmentIndex] = level;
+        boundFboProps(target).colorAttachmenti_texture_layers[colorAttachmentIndex] = layer;
+        boundFboProps(target).colorAttachmenti_hasTex[colorAttachmentIndex] = attach;
     }
 
     switch (attachment) {
     case GL_DEPTH_ATTACHMENT:
         boundFboProps(target).depthAttachment_texture = texture;
-        boundFboProps(target).depthAttachment_hasTexObj = true;
+        boundFboProps(target).depthAttachment_texture_level = level;
+        boundFboProps(target).depthAttachment_texture_layer = layer;
+        boundFboProps(target).depthAttachment_hasTexObj = attach;
         break;
     case GL_STENCIL_ATTACHMENT:
         boundFboProps(target).stencilAttachment_texture = texture;
-        boundFboProps(target).stencilAttachment_hasTexObj = true;
+        boundFboProps(target).stencilAttachment_texture_level = level;
+        boundFboProps(target).stencilAttachment_texture_layer = layer;
+        boundFboProps(target).stencilAttachment_hasTexObj = attach;
         break;
     case GL_DEPTH_STENCIL_ATTACHMENT:
         boundFboProps(target).depthstencilAttachment_texture = texture;
-        boundFboProps(target).depthstencilAttachment_hasTexObj = true;
+        boundFboProps(target).depthstencilAttachment_hasTexObj = attach;
         boundFboProps(target).stencilAttachment_texture = texture;
-        boundFboProps(target).stencilAttachment_hasTexObj = true;
+        boundFboProps(target).stencilAttachment_hasTexObj = attach;
         boundFboProps(target).depthAttachment_texture = texture;
-        boundFboProps(target).depthAttachment_hasTexObj = true;
+        boundFboProps(target).depthAttachment_hasTexObj = attach;
+        boundFboProps(target).depthAttachment_texture_level = level;
+        boundFboProps(target).depthAttachment_texture_layer = layer;
+        boundFboProps(target).stencilAttachment_texture_level = level;
+        boundFboProps(target).stencilAttachment_texture_layer = layer;
         break;
     }
 }
@@ -1786,7 +2664,7 @@
 // RBOs for FBOs////////////////////////////////////////////////////////////////
 
 void GLClientState::detachRbo(GLuint renderbuffer) {
-    for (int i = 0; i < m_max_color_attachments; i++) {
+    for (int i = 0; i < m_hostDriverCaps.max_color_attachments; i++) {
         detachRboFromFbo(GL_DRAW_FRAMEBUFFER, glUtilsColorAttachmentName(i), renderbuffer);
         detachRboFromFbo(GL_READ_FRAMEBUFFER, glUtilsColorAttachmentName(i), renderbuffer);
     }
@@ -1805,6 +2683,8 @@
     int colorAttachmentIndex =
         glUtilsColorAttachmentIndex(attachment);
 
+    boundFboProps(target).completenessDirty = true;
+
     if (colorAttachmentIndex != -1) {
         if (boundFboProps(target).colorAttachmenti_hasRbo[colorAttachmentIndex] &&
             boundFboProps(target).colorAttachmenti_rbos[colorAttachmentIndex] == renderbuffer) {
@@ -1850,30 +2730,34 @@
 
 void GLClientState::attachRbo(GLenum target, GLenum attachment, GLuint renderbuffer) {
 
+    bool attach = 0 != renderbuffer;
+
     int colorAttachmentIndex =
         glUtilsColorAttachmentIndex(attachment);
 
+    boundFboProps(target).completenessDirty = true;
+
     if (colorAttachmentIndex != -1) {
         boundFboProps(target).colorAttachmenti_rbos[colorAttachmentIndex] = renderbuffer;
-        boundFboProps(target).colorAttachmenti_hasRbo[colorAttachmentIndex] = true;
+        boundFboProps(target).colorAttachmenti_hasRbo[colorAttachmentIndex] = attach;
     }
 
     switch (attachment) {
     case GL_DEPTH_ATTACHMENT:
         boundFboProps(target).depthAttachment_rbo = renderbuffer;
-        boundFboProps(target).depthAttachment_hasRbo = true;
+        boundFboProps(target).depthAttachment_hasRbo = attach;
         break;
     case GL_STENCIL_ATTACHMENT:
         boundFboProps(target).stencilAttachment_rbo = renderbuffer;
-        boundFboProps(target).stencilAttachment_hasRbo = true;
+        boundFboProps(target).stencilAttachment_hasRbo = attach;
         break;
     case GL_DEPTH_STENCIL_ATTACHMENT:
         boundFboProps(target).depthAttachment_rbo = renderbuffer;
-        boundFboProps(target).depthAttachment_hasRbo = true;
+        boundFboProps(target).depthAttachment_hasRbo = attach;
         boundFboProps(target).stencilAttachment_rbo = renderbuffer;
-        boundFboProps(target).stencilAttachment_hasRbo = true;
+        boundFboProps(target).stencilAttachment_hasRbo = attach;
         boundFboProps(target).depthstencilAttachment_rbo = renderbuffer;
-        boundFboProps(target).depthstencilAttachment_hasRbo = true;
+        boundFboProps(target).depthstencilAttachment_hasRbo = attach;
         break;
     }
 }
@@ -1902,6 +2786,80 @@
     return res;
 }
 
+void GLClientState::setFboCompletenessDirtyForTexture(GLuint texture) {
+    std::map<GLuint, FboProps>::iterator it = mFboState.fboData.begin();
+    while (it != mFboState.fboData.end()) {
+        FboProps& props = it->second;
+        for (int i = 0; i < m_hostDriverCaps.max_color_attachments; ++i) {
+            if (props.colorAttachmenti_hasTex[i]) {
+                if (texture == props.colorAttachmenti_textures[i]) {
+                    props.completenessDirty = true;
+                    return;
+                }
+            }
+        }
+
+        if (props.depthAttachment_hasTexObj) {
+            if (texture == props.depthAttachment_texture) {
+                    props.completenessDirty = true;
+                    return;
+            }
+        }
+
+        if (props.stencilAttachment_hasTexObj) {
+            if (texture == props.stencilAttachment_texture) {
+                props.completenessDirty = true;
+                return;
+            }
+        }
+
+        if (props.depthstencilAttachment_hasTexObj) {
+            if (texture == props.depthstencilAttachment_texture) {
+                props.completenessDirty = true;
+                return;
+            }
+        }
+        ++it;
+    }
+}
+
+void GLClientState::setFboCompletenessDirtyForRbo(GLuint rbo) {
+    std::map<GLuint, FboProps>::iterator it = mFboState.fboData.begin();
+    while (it != mFboState.fboData.end()) {
+        FboProps& props = it->second;
+        for (int i = 0; i < m_hostDriverCaps.max_color_attachments; ++i) {
+            if (props.colorAttachmenti_hasTex[i]) {
+                if (rbo == props.colorAttachmenti_rbos[i]) {
+                    props.completenessDirty = true;
+                    return;
+                }
+            }
+        }
+
+        if (props.depthAttachment_hasTexObj) {
+            if (rbo == props.depthAttachment_rbo) {
+                    props.completenessDirty = true;
+                    return;
+            }
+        }
+
+        if (props.stencilAttachment_hasTexObj) {
+            if (rbo == props.stencilAttachment_rbo) {
+                props.completenessDirty = true;
+                return;
+            }
+        }
+
+        if (props.depthstencilAttachment_hasRbo) {
+            if (rbo == props.depthstencilAttachment_rbo) {
+                props.completenessDirty = true;
+                return;
+            }
+        }
+        ++it;
+    }
+}
+
 bool GLClientState::attachmentHasObject(GLenum target, GLenum attachment) const {
     bool res = true; // liberal
 
@@ -1977,22 +2935,101 @@
     return 0;
 }
 
-void GLClientState::setTransformFeedbackActiveUnpaused(bool activeUnpaused) {
-    m_transformFeedbackActiveUnpaused = activeUnpaused;
+void GLClientState::setTransformFeedbackActive(bool active) {
+    m_transformFeedbackActive = active;
+}
+
+void GLClientState::setTransformFeedbackUnpaused(bool unpaused) {
+    m_transformFeedbackUnpaused = unpaused;
+}
+
+void GLClientState::setTransformFeedbackVaryingsCountForLinking(uint32_t count) {
+    m_transformFeedbackVaryingsCountForLinking = count;
+}
+
+bool GLClientState::getTransformFeedbackActive() const {
+    return m_transformFeedbackActive;
+}
+
+bool GLClientState::getTransformFeedbackUnpaused() const {
+    return m_transformFeedbackUnpaused;
 }
 
 bool GLClientState::getTransformFeedbackActiveUnpaused() const {
-    return m_transformFeedbackActiveUnpaused;
+    return m_transformFeedbackActive && m_transformFeedbackUnpaused;
+}
+
+uint32_t GLClientState::getTransformFeedbackVaryingsCountForLinking() const {
+    return m_transformFeedbackVaryingsCountForLinking;
+}
+
+void GLClientState::stencilFuncSeparate(GLenum face, GLenum func, GLint ref, GLuint mask) {
+    if (face == GL_FRONT || face == GL_FRONT_AND_BACK) {
+        state_GL_STENCIL_FUNC = func;
+        state_GL_STENCIL_REF = ref;
+        state_GL_STENCIL_VALUE_MASK = mask;
+    }
+
+    if (face == GL_BACK || face == GL_FRONT_AND_BACK) {
+        state_GL_STENCIL_BACK_FUNC = func;
+        state_GL_STENCIL_BACK_REF = ref;
+        state_GL_STENCIL_BACK_VALUE_MASK = mask;
+    }
+}
+
+void GLClientState::stencilMaskSeparate(GLenum face, GLuint mask) {
+    if (face == GL_FRONT || face == GL_FRONT_AND_BACK) {
+        state_GL_STENCIL_WRITEMASK = mask;
+    }
+
+    if (face == GL_BACK || face == GL_FRONT_AND_BACK) {
+        state_GL_STENCIL_BACK_WRITEMASK = mask;
+    }
+}
+
+void GLClientState::stencilOpSeparate(GLenum face, GLenum fail, GLenum zfail, GLenum zpass) {
+    if (face == GL_FRONT || face == GL_FRONT_AND_BACK) {
+        state_GL_STENCIL_FAIL = fail;
+        state_GL_STENCIL_PASS_DEPTH_FAIL = zfail;
+        state_GL_STENCIL_PASS_DEPTH_PASS = zpass;
+    }
+
+    if (face == GL_BACK || face == GL_FRONT_AND_BACK) {
+        state_GL_STENCIL_BACK_FAIL = fail;
+        state_GL_STENCIL_BACK_PASS_DEPTH_FAIL = zfail;
+        state_GL_STENCIL_BACK_PASS_DEPTH_PASS = zpass;
+    }
 }
 
 void GLClientState::setTextureData(SharedTextureDataMap* sharedTexData) {
     m_tex.textureRecs = sharedTexData;
 }
 
+void GLClientState::setRenderbufferInfo(RenderbufferInfo* rbInfo) {
+    mRboState.rboData = rbInfo;
+}
+
+void GLClientState::setSamplerInfo(SamplerInfo* samplerInfo) {
+    mSamplerInfo = samplerInfo;
+}
+
+bool GLClientState::compressedTexImageSizeCompatible(GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLsizei imageSize) {
+    bool error = false;
+    GLsizei compressedSize = GLESTextureUtils::getCompressedImageSize(internalformat, width, height, depth, &error);
+    if (error) return false;
+    return imageSize == compressedSize;
+}
+
 void GLClientState::fromMakeCurrent() {
     if (mFboState.fboData.find(0) == mFboState.fboData.end()) {
         addFreshFramebuffer(0);
     }
+
+    if (!samplerExists(0)) {
+        GLuint id = 0;
+        setExistence(ObjectType::Sampler, true, 1, &id);
+    }
+
     FboProps& default_fb_props = mFboState.fboData[0];
     default_fb_props.colorAttachmenti_hasRbo[0] = true;
     default_fb_props.depthAttachment_hasRbo = true;
@@ -2001,30 +3038,30 @@
 }
 
 void GLClientState::initFromCaps(
-    int max_transform_feedback_separate_attribs,
-    int max_uniform_buffer_bindings,
-    int max_atomic_counter_buffer_bindings,
-    int max_shader_storage_buffer_bindings,
-    int max_vertex_attrib_bindings,
-    int max_color_attachments,
-    int max_draw_buffers) {
+    const HostDriverCaps& caps) {
+    m_hostDriverCaps = caps;
 
-    m_max_vertex_attrib_bindings = max_vertex_attrib_bindings;
+    // Override some of them
+    m_hostDriverCaps.max_vertex_attribs = CODEC_MAX_VERTEX_ATTRIBUTES;
+    m_hostDriverCaps.max_vertex_attrib_bindings = m_hostDriverCaps.max_vertex_attribs;
+
+    // Derive some other settings
+    m_log2MaxTextureSize = 0;
+    uint32_t current = 1;
+    while (current < m_hostDriverCaps.max_texture_size) {
+        current = current << 1;
+        ++m_log2MaxTextureSize;
+    }
 
     if (m_glesMajorVersion >= 3) {
-        m_max_transform_feedback_separate_attribs = max_transform_feedback_separate_attribs;
-        m_max_uniform_buffer_bindings = max_uniform_buffer_bindings;
-        m_max_atomic_counter_buffer_bindings = max_atomic_counter_buffer_bindings;
-        m_max_shader_storage_buffer_bindings = max_shader_storage_buffer_bindings;
-
-        if (m_max_transform_feedback_separate_attribs)
-            m_indexedTransformFeedbackBuffers.resize(m_max_transform_feedback_separate_attribs);
-        if (m_max_uniform_buffer_bindings)
-            m_indexedUniformBuffers.resize(m_max_uniform_buffer_bindings);
-        if (m_max_atomic_counter_buffer_bindings)
-            m_indexedAtomicCounterBuffers.resize(m_max_atomic_counter_buffer_bindings);
-        if (m_max_shader_storage_buffer_bindings)
-            m_indexedShaderStorageBuffers.resize(m_max_shader_storage_buffer_bindings);
+        if (m_hostDriverCaps.max_transform_feedback_separate_attribs)
+            m_indexedTransformFeedbackBuffers.resize(m_hostDriverCaps.max_transform_feedback_separate_attribs);
+        if (m_hostDriverCaps.max_uniform_buffer_bindings)
+            m_indexedUniformBuffers.resize(m_hostDriverCaps.max_uniform_buffer_bindings);
+        if (m_hostDriverCaps.max_atomic_counter_buffer_bindings)
+            m_indexedAtomicCounterBuffers.resize(m_hostDriverCaps.max_atomic_counter_buffer_bindings);
+        if (m_hostDriverCaps.max_shader_storage_buffer_bindings)
+            m_indexedShaderStorageBuffers.resize(m_hostDriverCaps.max_shader_storage_buffer_bindings);
 
         BufferBinding buf0Binding;
         buf0Binding.buffer = 0;
@@ -2043,10 +3080,6 @@
             m_indexedShaderStorageBuffers[i] = buf0Binding;
     }
 
-    m_max_color_attachments = max_color_attachments;
-    m_max_draw_buffers = max_draw_buffers;
-
-    addFreshRenderbuffer(0);
     addFreshFramebuffer(0);
 
     m_initialized = true;
@@ -2055,3 +3088,60 @@
 bool GLClientState::needsInitFromCaps() const {
     return !m_initialized;
 }
+
+void GLClientState::setExtensions(const std::string& extensions) {
+    if (!m_extensions_set) m_extensions = extensions;
+
+    m_has_color_buffer_float_extension =
+        hasExtension("GL_EXT_color_buffer_float");
+    m_has_color_buffer_half_float_extension =
+        hasExtension("GL_EXT_color_buffer_half_float");
+    m_extensions_set = true;
+}
+
+bool GLClientState::hasExtension(const char* ext) const {
+    return m_extensions.find(ext) != std::string::npos;
+}
+
+using android::base::guest::AutoLock;
+using android::base::guest::Lock;
+
+// A process-wide fence registry (because we can use fence sync objects across multiple contexts)
+struct FenceRegistry {
+    Lock lock;
+    PredicateMap<uint64_t, false> existence;
+
+    void onFenceCreated(GLsync sync) {
+        AutoLock scopedLock(lock);
+        uint64_t asUint64 = (uint64_t)(uintptr_t)(sync);
+        existence.add(asUint64);
+        existence.set(asUint64, true);
+    }
+
+    void onFenceDestroyed(GLsync sync) {
+        AutoLock scopedLock(lock);
+        uint64_t asUint64 = (uint64_t)(uintptr_t)(sync);
+        existence.remove(asUint64);
+    }
+
+    bool exists(GLsync sync) {
+        AutoLock scopedLock(lock);
+        uint64_t asUint64 = (uint64_t)(uintptr_t)(sync);
+        return existence.get(asUint64);
+    }
+};
+
+static FenceRegistry sFenceRegistry;
+
+void GLClientState::onFenceCreated(GLsync sync) {
+    sFenceRegistry.onFenceCreated(sync);
+}
+
+void GLClientState::onFenceDestroyed(GLsync sync) {
+    sFenceRegistry.onFenceDestroyed(sync);
+}
+
+bool GLClientState::fenceExists(GLsync sync) {
+    return sFenceRegistry.exists(sync);
+}
+
diff --git a/shared/OpenglCodecCommon/GLClientState.h b/shared/OpenglCodecCommon/GLClientState.h
index b7f5655..ff20287 100644
--- a/shared/OpenglCodecCommon/GLClientState.h
+++ b/shared/OpenglCodecCommon/GLClientState.h
@@ -22,6 +22,10 @@
 #define GL_APIENTRYP
 #endif
 
+#ifdef GFXSTREAM
+#include "StateTrackingSupport.h"
+#endif
+
 #include "TextureSharedData.h"
 
 #include <GLES/gl.h>
@@ -37,6 +41,36 @@
 #include <vector>
 #include <map>
 #include <set>
+#include <string>
+
+// Caps of host driver that make it easy to validate stuff
+struct HostDriverCaps {
+    // ES 2
+    int max_vertex_attribs;
+    int max_combined_texture_image_units;
+    int max_color_attachments;
+
+    int max_texture_size;
+    int max_texture_size_cube_map;
+    int max_renderbuffer_size;
+
+    // ES 3.0
+    int max_draw_buffers;
+
+    int ubo_offset_alignment;
+    int max_uniform_buffer_bindings;
+    int max_transform_feedback_separate_attribs;
+
+    int max_texture_size_3d;
+    int max_array_texture_layers;
+
+    // ES 3.1
+    int max_atomic_counter_buffer_bindings;
+    int max_shader_storage_buffer_bindings;
+    int max_vertex_attrib_bindings;
+    int max_vertex_attrib_stride;
+    int ssbo_offset_alignment;
+};
 
 // Tracking framebuffer objects:
 // which framebuffer is bound,
@@ -45,7 +79,17 @@
 struct FboProps {
     GLuint name;
     bool previouslyBound;
+    bool completenessDirty;
+    GLenum cachedCompleteness;
     std::vector<GLuint> colorAttachmenti_textures;
+    std::vector<GLint> colorAttachmenti_texture_levels;
+    std::vector<GLint> colorAttachmenti_texture_layers;
+
+    GLint depthAttachment_texture_level;
+    GLint depthAttachment_texture_layer;
+    GLint stencilAttachment_texture_level;
+    GLint stencilAttachment_texture_layer;
+
     GLuint depthAttachment_texture;
     GLuint stencilAttachment_texture;
     GLuint depthstencilAttachment_texture;
@@ -64,15 +108,9 @@
     bool depthAttachment_hasRbo;
     bool stencilAttachment_hasRbo;
     bool depthstencilAttachment_hasRbo;
-};
 
-// Same for Rbo's
-struct RboProps {
-    GLenum target;
-    GLuint name;
-    GLenum format;
-    GLsizei multisamples;
-    bool previouslyBound;
+    GLuint defaultWidth;
+    GLuint defaultHeight;
 };
 
 // Enum for describing whether a framebuffer attachment
@@ -88,15 +126,27 @@
     FboAttachmentType type;
     GLenum rb_format;
     GLsizei rb_multisamples;
+    bool rb_external;
 
     GLint tex_internalformat;
     GLenum tex_format;
     GLenum tex_type;
     GLsizei tex_multisamples;
+    GLint tex_level;
+    GLint tex_layer;
+    bool tex_external;
 };
 
 class GLClientState {
 public:
+    // TODO: Unify everything in here
+    typedef enum {
+        Buffer,
+        TransformFeedback,
+        Sampler,
+        Query,
+    } ObjectType;
+
     typedef enum {
         VERTEX_LOCATION = 0,
         NORMAL_LOCATION = 1,
@@ -199,7 +249,7 @@
     GLClientState();
     GLClientState(int majorVersion, int minorVersion);
     ~GLClientState();
-    int nLocations() { return m_nLocations; }
+    int nLocations() { return CODEC_MAX_VERTEX_ATTRIBUTES; }
     const PixelStoreState *pixelStoreState() { return &m_pixelStore; }
     int setPixelStore(GLenum param, GLint value);
     GLuint currentVertexArrayObject() const { return m_currVaoState.vaoId(); }
@@ -230,22 +280,52 @@
     int getLocation(GLenum loc);
     void setActiveTexture(int texUnit) {m_activeTexture = texUnit; };
     int getActiveTexture() const { return m_activeTexture; }
-    void setMaxVertexAttribs(int val) {
-        m_maxVertexAttribs = val;
-        m_maxVertexAttribsDirty = false;
-    }
 
     void addBuffer(GLuint id);
     void removeBuffer(GLuint id);
     bool bufferIdExists(GLuint id) const;
     void unBindBuffer(GLuint id);
 
+    void setBufferHostMapDirty(GLuint id, bool dirty);
+    bool isBufferHostMapDirty(GLuint id) const;
+
+    void setExistence(ObjectType type, bool exists, GLsizei count, const GLuint* ids);
+    bool queryExistence(ObjectType type, GLuint id) const;
+    bool samplerExists(GLuint id) const;
+    bool tryBind(GLenum target, GLuint id);
+    bool isBoundTargetValid(GLenum target);
+    bool isQueryBound(GLenum target);
+    bool isQueryObjectActive(GLuint id);
+    void setLastQueryTarget(GLenum target, GLuint id);
+    GLenum getLastQueryTarget(GLuint id);
+
+    static void onFenceCreated(GLsync sync);
+    static void onFenceDestroyed(GLsync sync);
+    static bool fenceExists(GLsync sync);
+
+    void setBoundPixelPackBufferDirtyForHostMap();
+    void setBoundTransformFeedbackBuffersDirtyForHostMap();
+    void setBoundShaderStorageBuffersDirtyForHostMap();
+    void setBoundAtomicCounterBuffersDirtyForHostMap();
+
     int bindBuffer(GLenum target, GLuint id);
     void bindIndexedBuffer(GLenum target, GLuint index, GLuint buffer, GLintptr offset, GLsizeiptr size, GLintptr stride, GLintptr effectiveStride);
     int getMaxIndexedBufferBindings(GLenum target) const;
     bool isNonIndexedBindNoOp(GLenum target, GLuint buffer);
     bool isIndexedBindNoOp(GLenum target, GLuint index, GLuint buffer, GLintptr offset, GLsizeiptr size, GLintptr stride, GLintptr effectiveStride);
 
+    int getMaxTextureSize() const;
+    int getMaxTextureSize3D() const;
+    int getMaxTextureSizeCubeMap() const;
+    int getLog2MaxTextureSize() const;
+
+    void postDraw();
+    void postReadPixels();
+    void postDispatchCompute();
+
+    bool shouldSkipHostMapBuffer(GLenum target);
+    void onHostMappedBuffer(GLenum target);
+
     int getBuffer(GLenum target);
     GLuint getLastEncodedBufferBind(GLenum target);
     void setLastEncodedBufferBind(GLenum target, GLuint id);
@@ -313,7 +393,7 @@
     // glDisable(GL_TEXTURE_(2D|EXTERNAL_OES))
     void disableTextureTarget(GLenum target);
 
-    void bindSampler(GLuint unit, GLuint sampler);
+    bool bindSampler(GLuint unit, GLuint sampler);
     bool isSamplerBindNoOp(GLuint unit, GLuint sampler);
     void onDeleteSamplers(GLsizei n, const GLuint* samplers);
 
@@ -334,10 +414,19 @@
     // For accurate error detection, bindTexture should be called for *all*
     // targets, not just 2D and EXTERNAL_OES.
     GLenum bindTexture(GLenum target, GLuint texture, GLboolean* firstUse);
-    void setBoundEGLImage(GLenum target, GLeglImageOES image);
+    void setBoundEGLImage(GLenum target, GLeglImageOES image, int width, int height);
 
     // Return the texture currently bound to GL_TEXTURE_(2D|EXTERNAL_OES).
     GLuint getBoundTexture(GLenum target) const;
+    // Return bound framebuffer for target
+    GLuint getBoundFramebuffer(GLenum target) const;
+
+    // Check framebuffer completeness
+    GLenum checkFramebufferCompleteness(GLenum target);
+    // |currentSamples|: threads through the current sample count of attachments so far,
+    // for validating consistent number of samples across attachments
+    GLenum checkFramebufferAttachmentCompleteness(GLenum target, GLenum attachment, int* currentSamples) const;
+
     // Other publicly-visible texture queries
     GLenum queryTexLastBoundTarget(GLuint name) const;
     GLenum queryTexFormat(GLuint name) const;
@@ -364,12 +453,14 @@
     void setBoundTextureInternalFormat(GLenum target, GLint format);
     void setBoundTextureFormat(GLenum target, GLenum format);
     void setBoundTextureType(GLenum target, GLenum type);
-    void setBoundTextureDims(GLenum target, GLsizei level, GLsizei width, GLsizei height, GLsizei depth);
+    void setBoundTextureDims(GLenum target, GLenum cubetarget, GLsizei level, GLsizei width, GLsizei height, GLsizei depth);
     void setBoundTextureSamples(GLenum target, GLsizei samples);
+    void addTextureCubeMapImage(GLenum stateTarget, GLenum cubeTarget);
 
     // glTexStorage2D disallows any change in texture format after it is set for a particular texture.
     void setBoundTextureImmutableFormat(GLenum target);
     bool isBoundTextureImmutableFormat(GLenum target) const;
+    bool isBoundTextureComplete(GLenum target) const;
 
     // glDeleteTextures(...)
     // Remove references to the to-be-deleted textures.
@@ -383,6 +474,8 @@
     GLuint boundRenderbuffer() const;
     void setBoundRenderbufferFormat(GLenum format);
     void setBoundRenderbufferSamples(GLsizei samples);
+    void setBoundRenderbufferDimensions(GLsizei width, GLsizei height);
+    void setBoundRenderbufferEGLImageBacked();
 
     // Frame buffer objects
     void addFramebuffers(GLsizei n, GLuint* framebuffers);
@@ -390,11 +483,12 @@
     bool usedFramebufferName(GLuint name) const;
     void bindFramebuffer(GLenum target, GLuint name);
     void setCheckFramebufferStatus(GLenum target, GLenum status);
+    void setFramebufferParameter(GLenum target, GLenum pname, GLint param);
     GLenum getCheckFramebufferStatus(GLenum target) const;
     GLuint boundFramebuffer(GLenum target) const;
 
     // Texture object -> FBO
-    void attachTextureObject(GLenum target, GLenum attachment, GLuint texture);
+    void attachTextureObject(GLenum target, GLenum attachment, GLuint texture, GLint level, GLint layer);
     GLuint getFboAttachmentTextureId(GLenum target, GLenum attachment) const;
 
     // RBO -> FBO
@@ -407,11 +501,29 @@
     bool attachmentHasObject(GLenum target, GLenum attachment) const;
     GLuint objectOfAttachment(GLenum target, GLenum attachment) const;
 
+    // Dirty FBO completeness
+    void setFboCompletenessDirtyForTexture(GLuint texture);
+    void setFboCompletenessDirtyForRbo(GLuint rbo_name);
+
     // Transform feedback state
-    void setTransformFeedbackActiveUnpaused(bool activeUnpaused);
+    void setTransformFeedbackActive(bool active);
+    void setTransformFeedbackUnpaused(bool unpaused);
+    void setTransformFeedbackVaryingsCountForLinking(uint32_t count);
+    bool getTransformFeedbackActive() const;
+    bool getTransformFeedbackUnpaused() const;
     bool getTransformFeedbackActiveUnpaused() const;
+    uint32_t getTransformFeedbackVaryingsCountForLinking() const;
+
+    // Stencil state
+    void stencilFuncSeparate(GLenum face, GLenum func, GLint ref, GLuint mask);
+    void stencilMaskSeparate(GLenum face, GLuint mask);
+    void stencilOpSeparate(GLenum face, GLenum fail, GLenum zfail, GLenum zpass);
 
     void setTextureData(SharedTextureDataMap* sharedTexData);
+    void setRenderbufferInfo(RenderbufferInfo* rbInfo);
+    void setSamplerInfo(SamplerInfo* samplerInfo);
+
+    bool compressedTexImageSizeCompatible(GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLsizei imageSize);
     // set eglsurface property on default framebuffer
     // if coming from eglMakeCurrent
     void fromMakeCurrent();
@@ -420,14 +532,10 @@
     // accurate values for indexed buffers
     // and # render targets.
     void initFromCaps(
-        int max_transform_feedback_separate_attribs,
-        int max_uniform_buffer_bindings,
-        int max_atomic_counter_buffer_bindings,
-        int max_shader_storage_buffer_bindings,
-        int max_vertex_attrib_bindings,
-        int max_color_attachments,
-        int max_draw_buffers);
+        const HostDriverCaps& caps);
     bool needsInitFromCaps() const;
+    void setExtensions(const std::string& extensions);
+    bool hasExtension(const char* ext) const;
 
     // Queries the format backing the current framebuffer.
     // Type differs depending on whether the attachment
@@ -441,12 +549,69 @@
             GLenum attachment) const;
     int getMaxColorAttachments() const;
     int getMaxDrawBuffers() const;
+
+    // Uniform/attribute validation info
+    UniformValidationInfo currentUniformValidationInfo;
+    AttribValidationInfo currentAttribValidationInfo;;
+
+    // Uniform validation api
+    void validateUniform(bool isFloat, bool isUnsigned, GLint columns, GLint rows, GLint location, GLsizei count, GLenum* err);
+    // Attrib validation
+    bool isAttribIndexUsedByProgram(int attribIndex);
+
+    // Fast access to some enables and stencil related glGet's
+    bool state_GL_STENCIL_TEST;
+    GLenum state_GL_STENCIL_FUNC;
+    unsigned int state_GL_STENCIL_VALUE_MASK;
+    int state_GL_STENCIL_REF;
+    GLenum state_GL_STENCIL_FAIL;
+    GLenum state_GL_STENCIL_PASS_DEPTH_FAIL;
+    GLenum state_GL_STENCIL_PASS_DEPTH_PASS;
+    GLenum state_GL_STENCIL_BACK_FUNC;
+    unsigned int state_GL_STENCIL_BACK_VALUE_MASK;
+    int state_GL_STENCIL_BACK_REF;
+    GLenum state_GL_STENCIL_BACK_FAIL;
+    GLenum state_GL_STENCIL_BACK_PASS_DEPTH_FAIL;
+    GLenum state_GL_STENCIL_BACK_PASS_DEPTH_PASS;
+    unsigned int state_GL_STENCIL_WRITEMASK;
+    unsigned int state_GL_STENCIL_BACK_WRITEMASK;
+    int state_GL_STENCIL_CLEAR_VALUE;
 private:
     void init();
     bool m_initialized;
     PixelStoreState m_pixelStore;
 
+#ifdef GFXSTREAM
+    using DirtyMap = PredicateMap<uint32_t, true>;
+
+    ExistenceMap mBufferIds;
+    ExistenceMap mTransformFeedbackIds;
+    SamplerInfo* mSamplerInfo;
+    ExistenceMap mQueryIds;
+    LastQueryTargetInfo mLastQueryTargets;
+
+    // Bound query target validity and tracking
+    struct BoundTargetInfo {
+        GLuint id;
+        bool valid;
+    };
+   
+    // Transform feedback
+    BoundTargetInfo mBoundTransformFeedbackValidity;
+
+    // Queries
+    // GL_ANY_SAMPLES_PASSED
+    BoundTargetInfo mBoundQueryValidity_AnySamplesPassed;
+    // GL_ANY_SAMPLES_PASSED_CONSERVATIVE
+    BoundTargetInfo mBoundQueryValidity_AnySamplesPassedConservative;
+    // GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN
+    BoundTargetInfo mBoundQueryValidity_TransformFeedbackPrimitivesWritten;
+
+    // Dirty maps
+    DirtyMap mHostMappedBufferDirty;
+#else
     std::set<GLuint> mBufferIds;
+#endif
 
     // GL_ARRAY_BUFFER_BINDING is separate from VAO state
     GLuint m_arrayBuffer;
@@ -475,23 +640,23 @@
     GLuint m_drawIndirectBuffer;
     GLuint m_shaderStorageBuffer;
 
-    bool m_transformFeedbackActiveUnpaused;
+    bool m_transformFeedbackActive;
+    bool m_transformFeedbackUnpaused;
+    uint32_t m_transformFeedbackVaryingsCountForLinking;
 
-    int m_max_transform_feedback_separate_attribs;
-    int m_max_uniform_buffer_bindings;
-    int m_max_atomic_counter_buffer_bindings;
-    int m_max_shader_storage_buffer_bindings;
-    int m_max_vertex_attrib_bindings;
+    HostDriverCaps m_hostDriverCaps;
+    bool m_extensions_set;
+    std::string m_extensions;
+    bool m_has_color_buffer_float_extension;
+    bool m_has_color_buffer_half_float_extension;
     std::vector<BufferBinding> m_indexedTransformFeedbackBuffers;
     std::vector<BufferBinding> m_indexedUniformBuffers;
     std::vector<BufferBinding> m_indexedAtomicCounterBuffers;
     std::vector<BufferBinding> m_indexedShaderStorageBuffers;
+    int m_log2MaxTextureSize;
 
     int m_glesMajorVersion;
     int m_glesMinorVersion;
-    int m_maxVertexAttribs;
-    bool m_maxVertexAttribsDirty;
-    int m_nLocations;
     int m_activeTexture;
     GLint m_currentProgram;
     GLint m_currentShaderProgram;
@@ -546,19 +711,14 @@
     GLenum copyTexImageNeededTarget(GLenum target, GLint level,
                                     GLenum internalformat);
 
-    int m_max_color_attachments;
-    int m_max_draw_buffers;
     struct RboState {
         GLuint boundRenderbuffer;
-        size_t boundRenderbufferIndex;
-        std::vector<RboProps> rboData;
+        // Connects to share group.
+        // Expected that share group lifetime outlives this context.
+        RenderbufferInfo* rboData;
     };
     RboState mRboState;
     void addFreshRenderbuffer(GLuint name);
-    void setBoundRenderbufferIndex();
-    size_t getRboIndex(GLuint name) const;
-    RboProps& boundRboProps();
-    const RboProps& boundRboProps_const() const;
 
     struct FboState {
         GLuint boundDrawFramebuffer;
@@ -576,6 +736,9 @@
     // Querying framebuffer format
     GLenum queryRboFormat(GLuint name) const;
     GLsizei queryRboSamples(GLuint name) const;
+    GLsizei queryRboWidth(GLuint name) const;
+    GLsizei queryRboHeight(GLuint name) const;
+    bool queryRboEGLImageBacked(GLuint name) const;
     GLenum queryTexType(GLuint name) const;
     GLsizei queryTexSamples(GLuint name) const;
 
@@ -584,6 +747,13 @@
     TextureRec* getTextureRec(GLuint id) const;
 
 public:
+    bool isTexture(GLuint name) const;
+    bool isTextureWithStorage(GLuint name) const;
+    bool isTextureWithTarget(GLuint name) const;
+    bool isTextureCubeMap(GLuint name) const;
+    bool isRenderbuffer(GLuint name) const;
+    bool isRenderbufferThatWasBound(GLuint name) const;
+
     void getClientStatePointer(GLenum pname, GLvoid** params);
 
     template <class T>
@@ -815,14 +985,83 @@
             break;
             }
         case GL_MAX_VERTEX_ATTRIBS: {
-            if (m_maxVertexAttribsDirty) {
-                isClientStateParam = false;
-            } else {
-                *out = m_maxVertexAttribs;
-                isClientStateParam = true;
-            }
+            *out = CODEC_MAX_VERTEX_ATTRIBUTES;
+            isClientStateParam = true;
             break;
         }
+        case GL_FRAMEBUFFER_BINDING:
+        // also case GL_DRAW_FRAMEBUFFER_BINDING:
+            *out = (T)mFboState.boundDrawFramebuffer;
+            isClientStateParam = true;
+            break;
+        case 0x8CAA: // GL_READ_FRAMEBUFFER_BINDING
+            *out = (T)mFboState.boundReadFramebuffer;
+            isClientStateParam = true;
+            break;
+        case GL_STENCIL_TEST:
+            *out = (T)state_GL_STENCIL_TEST;
+            isClientStateParam = true;
+            break;
+        case GL_STENCIL_FUNC:
+            *out = (T)state_GL_STENCIL_FUNC;
+            isClientStateParam = true;
+            break;
+        case GL_STENCIL_VALUE_MASK:
+            *out = (T)state_GL_STENCIL_VALUE_MASK;
+            isClientStateParam = true;
+            break;
+        case GL_STENCIL_REF:
+            *out = (T)state_GL_STENCIL_REF;
+            isClientStateParam = true;
+            break;
+        case GL_STENCIL_FAIL:
+            *out = (T)state_GL_STENCIL_FAIL;
+            isClientStateParam = true;
+            break;
+        case GL_STENCIL_PASS_DEPTH_FAIL:
+            *out = (T)state_GL_STENCIL_PASS_DEPTH_FAIL;
+            isClientStateParam = true;
+            break;
+        case GL_STENCIL_PASS_DEPTH_PASS:
+            *out = (T)state_GL_STENCIL_PASS_DEPTH_PASS;
+            isClientStateParam = true;
+            break;
+        case GL_STENCIL_BACK_FUNC:
+            *out = (T)state_GL_STENCIL_BACK_FUNC;
+            isClientStateParam = true;
+            break;
+        case GL_STENCIL_BACK_VALUE_MASK:
+            *out = (T)state_GL_STENCIL_BACK_VALUE_MASK;
+            isClientStateParam = true;
+            break;
+        case GL_STENCIL_BACK_REF:
+            *out = (T)state_GL_STENCIL_BACK_REF;
+            isClientStateParam = true;
+            break;
+        case GL_STENCIL_BACK_FAIL:
+            *out = (T)state_GL_STENCIL_BACK_FAIL;
+            isClientStateParam = true;
+            break;
+        case GL_STENCIL_BACK_PASS_DEPTH_FAIL:
+            *out = (T)state_GL_STENCIL_BACK_PASS_DEPTH_FAIL;
+            isClientStateParam = true;
+            break;
+        case GL_STENCIL_BACK_PASS_DEPTH_PASS:
+            *out = (T)state_GL_STENCIL_BACK_PASS_DEPTH_PASS;
+            isClientStateParam = true;
+            break;
+        case GL_STENCIL_WRITEMASK:
+            *out = (T)state_GL_STENCIL_WRITEMASK;
+            isClientStateParam = true;
+            break;
+        case GL_STENCIL_BACK_WRITEMASK:
+            *out = (T)state_GL_STENCIL_BACK_WRITEMASK;
+            isClientStateParam = true;
+            break;
+        case GL_STENCIL_CLEAR_VALUE:
+            *out = (T)state_GL_STENCIL_CLEAR_VALUE;
+            isClientStateParam = true;
+            break;
         }
         return isClientStateParam;
     }
diff --git a/shared/OpenglCodecCommon/GLESTextureUtils.cpp b/shared/OpenglCodecCommon/GLESTextureUtils.cpp
index cedcda8..ceb917b 100644
--- a/shared/OpenglCodecCommon/GLESTextureUtils.cpp
+++ b/shared/OpenglCodecCommon/GLESTextureUtils.cpp
@@ -1,6 +1,8 @@
 #include "GLESTextureUtils.h"
 
 #include "glUtils.h"
+#include "etc.h"
+#include "astc-codec.h"
 
 #if PLATFORM_SDK_VERSION < 26
 #include <cutils/log.h>
@@ -8,6 +10,36 @@
 #include <log/log.h>
 #endif
 
+#define ASTC_FORMATS_LIST(EXPAND_MACRO) \
+    EXPAND_MACRO(GL_COMPRESSED_RGBA_ASTC_4x4_KHR, astc_codec::FootprintType::k4x4, false) \
+    EXPAND_MACRO(GL_COMPRESSED_RGBA_ASTC_5x4_KHR, astc_codec::FootprintType::k5x4, false) \
+    EXPAND_MACRO(GL_COMPRESSED_RGBA_ASTC_5x5_KHR, astc_codec::FootprintType::k5x5, false) \
+    EXPAND_MACRO(GL_COMPRESSED_RGBA_ASTC_6x5_KHR, astc_codec::FootprintType::k6x5, false) \
+    EXPAND_MACRO(GL_COMPRESSED_RGBA_ASTC_6x6_KHR, astc_codec::FootprintType::k6x6, false) \
+    EXPAND_MACRO(GL_COMPRESSED_RGBA_ASTC_8x5_KHR, astc_codec::FootprintType::k8x5, false) \
+    EXPAND_MACRO(GL_COMPRESSED_RGBA_ASTC_8x6_KHR, astc_codec::FootprintType::k8x6, false) \
+    EXPAND_MACRO(GL_COMPRESSED_RGBA_ASTC_8x8_KHR, astc_codec::FootprintType::k8x8, false) \
+    EXPAND_MACRO(GL_COMPRESSED_RGBA_ASTC_10x5_KHR, astc_codec::FootprintType::k10x5, false) \
+    EXPAND_MACRO(GL_COMPRESSED_RGBA_ASTC_10x6_KHR, astc_codec::FootprintType::k10x6, false) \
+    EXPAND_MACRO(GL_COMPRESSED_RGBA_ASTC_10x8_KHR, astc_codec::FootprintType::k10x8, false) \
+    EXPAND_MACRO(GL_COMPRESSED_RGBA_ASTC_10x10_KHR, astc_codec::FootprintType::k10x10, false) \
+    EXPAND_MACRO(GL_COMPRESSED_RGBA_ASTC_12x10_KHR, astc_codec::FootprintType::k12x10, false) \
+    EXPAND_MACRO(GL_COMPRESSED_RGBA_ASTC_12x12_KHR, astc_codec::FootprintType::k12x12, false) \
+    EXPAND_MACRO(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4_KHR, astc_codec::FootprintType::k4x4, true) \
+    EXPAND_MACRO(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x4_KHR, astc_codec::FootprintType::k5x4, true) \
+    EXPAND_MACRO(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5_KHR, astc_codec::FootprintType::k5x5, true) \
+    EXPAND_MACRO(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x5_KHR, astc_codec::FootprintType::k6x5, true) \
+    EXPAND_MACRO(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6_KHR, astc_codec::FootprintType::k6x6, true) \
+    EXPAND_MACRO(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x5_KHR, astc_codec::FootprintType::k8x5, true) \
+    EXPAND_MACRO(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x6_KHR, astc_codec::FootprintType::k8x6, true) \
+    EXPAND_MACRO(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x8_KHR, astc_codec::FootprintType::k8x8, true) \
+    EXPAND_MACRO(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x5_KHR, astc_codec::FootprintType::k10x5, true) \
+    EXPAND_MACRO(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x6_KHR, astc_codec::FootprintType::k10x6, true) \
+    EXPAND_MACRO(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x8_KHR, astc_codec::FootprintType::k10x8, true) \
+    EXPAND_MACRO(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x10_KHR, astc_codec::FootprintType::k10x10, true) \
+    EXPAND_MACRO(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x10_KHR, astc_codec::FootprintType::k12x10, true) \
+    EXPAND_MACRO(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x12_KHR, astc_codec::FootprintType::k12x12, true) \
+
 namespace GLESTextureUtils {
 
 // Based on computations in
@@ -332,6 +364,8 @@
         int* packingPixelImageSize,
         int* packingTotalImageSize) {
 
+    (void)depth;
+
     int widthTotal = (packRowLength == 0) ? width : packRowLength;
     int totalRowSize = computePitch(widthTotal, format, type, packAlignment);
     int pixelsOnlyRowSize = computePitch(width, format, type, packAlignment);
@@ -352,4 +386,244 @@
     if (packingTotalImageSize) *packingTotalImageSize = totalImageSize;
 }
 
+bool isEtcFormat(GLenum internalformat) {
+    switch (internalformat) {
+    case GL_ETC1_RGB8_OES:
+    case GL_COMPRESSED_RGB8_ETC2:
+    case GL_COMPRESSED_SRGB8_ETC2:
+    case GL_COMPRESSED_RGBA8_ETC2_EAC:
+    case GL_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC:
+    case GL_COMPRESSED_R11_EAC:
+    case GL_COMPRESSED_SIGNED_R11_EAC:
+    case GL_COMPRESSED_RG11_EAC:
+    case GL_COMPRESSED_SIGNED_RG11_EAC:
+    case GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2:
+    case GL_COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2:
+        return true;
+    }
+    return false;
+}
+
+bool isEtc2Format(GLenum internalformat) {
+    return internalformat != GL_ETC1_RGB8_OES &&
+        isEtcFormat(internalformat);
+}
+
+ETC2ImageFormat getEtcFormat(GLenum internalformat) {
+    ETC2ImageFormat etcFormat = EtcRGB8;
+    switch (internalformat) {
+        case GL_COMPRESSED_RGB8_ETC2:
+        case GL_ETC1_RGB8_OES:
+            break;
+        case GL_COMPRESSED_RGBA8_ETC2_EAC:
+            etcFormat = EtcRGBA8;
+            break;
+        case GL_COMPRESSED_SRGB8_ETC2:
+            break;
+        case GL_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC:
+            etcFormat = EtcRGBA8;
+            break;
+        case GL_COMPRESSED_R11_EAC:
+            etcFormat = EtcR11;
+            break;
+        case GL_COMPRESSED_SIGNED_R11_EAC:
+            etcFormat = EtcSignedR11;
+            break;
+        case GL_COMPRESSED_RG11_EAC:
+            etcFormat = EtcRG11;
+            break;
+        case GL_COMPRESSED_SIGNED_RG11_EAC:
+            etcFormat = EtcSignedRG11;
+            break;
+        case GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2:
+            etcFormat = EtcRGB8A1;
+            break;
+        case GL_COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2:
+            etcFormat = EtcRGB8A1;
+            break;
+    }
+    return etcFormat;
+}
+
+bool isAstcFormat(GLenum internalformat) {
+    switch (internalformat) {
+#define ASTC_FORMAT(typeName, footprintType, srgbValue) \
+        case typeName:
+
+        ASTC_FORMATS_LIST(ASTC_FORMAT)
+#undef ASTC_FORMAT
+            return true;
+        default:
+            return false;
+    }
+}
+
+bool isBptcFormat(GLenum internalformat) {
+    switch (internalformat) {
+        case GL_COMPRESSED_RGBA_BPTC_UNORM_EXT:
+        case GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM_EXT:
+        case GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT_EXT:
+        case GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT_EXT:
+            return true;
+        default:
+            return false;
+    }
+}
+
+bool isS3tcFormat(GLenum internalformat) {
+    switch (internalformat) {
+        case GL_COMPRESSED_RGB_S3TC_DXT1_EXT:
+        case GL_COMPRESSED_RGBA_S3TC_DXT1_EXT:
+        case GL_COMPRESSED_RGBA_S3TC_DXT3_EXT:
+        case GL_COMPRESSED_RGBA_S3TC_DXT5_EXT:
+        case GL_COMPRESSED_SRGB_S3TC_DXT1_EXT:
+        case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT:
+        case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT:
+        case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT:
+            return true;
+    }
+
+    return false;
+}
+
+void getAstcFormatInfo(GLenum internalformat,
+                       astc_codec::FootprintType* footprint,
+                       bool* srgb) {
+    switch (internalformat) {
+#define ASTC_FORMAT(typeName, footprintType, srgbValue) \
+        case typeName: \
+            *footprint = footprintType; *srgb = srgbValue; break; \
+
+        ASTC_FORMATS_LIST(ASTC_FORMAT)
+#undef ASTC_FORMAT
+        default:
+            ALOGE("%s: invalid astc format: 0x%x\n", __func__, internalformat);
+            abort();
+    }
+}
+
+int getAstcFootprintWidth(astc_codec::FootprintType footprint) {
+    switch (footprint) {
+        case astc_codec::FootprintType::k4x4: return 4;
+        case astc_codec::FootprintType::k5x4: return 5;
+        case astc_codec::FootprintType::k5x5: return 5;
+        case astc_codec::FootprintType::k6x5: return 6;
+        case astc_codec::FootprintType::k6x6: return 6;
+        case astc_codec::FootprintType::k8x5: return 8;
+        case astc_codec::FootprintType::k8x6: return 8;
+        case astc_codec::FootprintType::k10x5: return 10;
+        case astc_codec::FootprintType::k10x6: return 10;
+        case astc_codec::FootprintType::k8x8: return 8;
+        case astc_codec::FootprintType::k10x8: return 10;
+        case astc_codec::FootprintType::k10x10: return 10;
+        case astc_codec::FootprintType::k12x10: return 12;
+        case astc_codec::FootprintType::k12x12: return 12;
+        default:
+            ALOGE("%s: invalid astc footprint: 0x%x\n", __func__, footprint);
+            abort();
+    }
+}
+
+int getAstcFootprintHeight(astc_codec::FootprintType footprint) {
+    switch (footprint) {
+        case astc_codec::FootprintType::k4x4: return 4;
+        case astc_codec::FootprintType::k5x4: return 4;
+        case astc_codec::FootprintType::k5x5: return 5;
+        case astc_codec::FootprintType::k6x5: return 5;
+        case astc_codec::FootprintType::k6x6: return 6;
+        case astc_codec::FootprintType::k8x5: return 5;
+        case astc_codec::FootprintType::k8x6: return 6;
+        case astc_codec::FootprintType::k10x5: return 5;
+        case astc_codec::FootprintType::k10x6: return 6;
+        case astc_codec::FootprintType::k8x8: return 8;
+        case astc_codec::FootprintType::k10x8: return 8;
+        case astc_codec::FootprintType::k10x10: return 10;
+        case astc_codec::FootprintType::k12x10: return 10;
+        case astc_codec::FootprintType::k12x12: return 12;
+        default:
+            ALOGE("%s: invalid astc footprint: 0x%x\n", __func__, footprint);
+            abort();
+    }
+}
+
+GLsizei getAstcCompressedSize(GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, bool* error) {
+    bool srgb;
+    astc_codec::FootprintType footprintType;
+    getAstcFormatInfo(internalformat, &footprintType, &srgb);
+
+    int fpWidth = getAstcFootprintWidth(footprintType);
+    int fpHeight = getAstcFootprintHeight(footprintType);
+
+    if (width == 0 || height == 0 || depth == 0) {
+        *error = true;
+        return 0;
+    }
+
+    const size_t blocks_wide = (width + fpWidth - 1) / fpWidth;
+    if (blocks_wide == 0) {
+        *error = true;
+        return 0;
+    }
+
+    const size_t expected_block_count =
+        ((width + fpWidth - 1) / fpWidth) *
+        ((height + fpHeight - 1) / fpHeight);
+
+    const size_t kPhysBlockSizeBytes = 16;
+
+    GLsizei res = kPhysBlockSizeBytes * expected_block_count * depth;
+
+    return res;
+}
+
+GLsizei getCompressedImageBlocksize(GLenum internalformat) {
+    if (isBptcFormat(internalformat)) {
+        return 16;
+    }
+
+    switch (internalformat) {
+        case GL_COMPRESSED_RGB_S3TC_DXT1_EXT:
+        case GL_COMPRESSED_RGBA_S3TC_DXT1_EXT:
+        case GL_COMPRESSED_RGBA_S3TC_DXT3_EXT:
+        case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT:
+            return 8;
+        case GL_COMPRESSED_RGBA_S3TC_DXT5_EXT:
+        case GL_COMPRESSED_SRGB_S3TC_DXT1_EXT:
+        case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT:
+        case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT:
+            return 16;
+    }
+
+    ALOGE("%s: Unknown blocksize for internal format: 0x%x\n", __func__, internalformat);
+    abort();
+}
+
+GLsizei get4x4CompressedSize(GLsizei width, GLsizei height, GLsizei depth, GLsizei blocksize, bool* error) {
+    *error = false;
+    return blocksize * ((width + 3) / 4) * ((height + 3) / 4) * depth;
+}
+
+GLsizei getCompressedImageSize(GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, bool* error) {
+    if (isEtcFormat(internalformat)) {
+        GLsizei total = 0;
+        GLsizei one = etc_get_encoded_data_size(getEtcFormat(internalformat), width, height);
+        for (GLsizei i = 0; i < depth; ++i) {
+            total += one;
+        }
+        return total;
+    }
+
+    if (isAstcFormat(internalformat)) {
+        return getAstcCompressedSize(internalformat, width, height, depth, error);
+    }
+
+    if (isBptcFormat(internalformat) || isS3tcFormat(internalformat)) {
+        GLsizei blocksize = getCompressedImageBlocksize(internalformat);
+        return get4x4CompressedSize(width, height, depth, blocksize, error);
+    }
+
+    ALOGE("%s: Unknown compressed internal format: 0x%x\n", __func__, internalformat);
+    abort();
+}
+
 } // namespace GLESTextureUtils
diff --git a/shared/OpenglCodecCommon/GLESTextureUtils.h b/shared/OpenglCodecCommon/GLESTextureUtils.h
index 1d26b3a..5dae8c7 100644
--- a/shared/OpenglCodecCommon/GLESTextureUtils.h
+++ b/shared/OpenglCodecCommon/GLESTextureUtils.h
@@ -71,5 +71,14 @@
         int* packingPixelImageSize,
         int* packingTotalImageSize);
 
+// For calculating compressed sizes of ETC/EAC formatted images in the guest.
+GLsizei getCompressedImageSize(GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, bool* error);
+
+// Format queries
+bool isEtc2Format(GLenum internalformat);
+bool isAstcFormat(GLenum internalformat);
+bool isBptcFormat(GLenum internalformat);
+bool isS3tcFormat(GLenum internalformat);
+
 } // namespace GLESTextureUtils
 #endif
diff --git a/shared/OpenglCodecCommon/GLSharedGroup.cpp b/shared/OpenglCodecCommon/GLSharedGroup.cpp
index 5273e4c..e2ed36c 100755
--- a/shared/OpenglCodecCommon/GLSharedGroup.cpp
+++ b/shared/OpenglCodecCommon/GLSharedGroup.cpp
@@ -17,6 +17,7 @@
 #include "GLSharedGroup.h"
 
 #include "KeyedVectorUtils.h"
+#include "glUtils.h"
 
 /**** BufferData ****/
 
@@ -36,17 +37,26 @@
 
 /**** ProgramData ****/
 ProgramData::ProgramData() : m_numIndexes(0),
+                             m_numAttributes(0),
                              m_initialized(false) {
     m_Indexes = NULL;
+    m_attribIndexes = NULL;
+    m_refcount = 1;
+    m_linkStatus = 0;
+    m_activeUniformBlockCount = 0;
+    m_transformFeedbackVaryingsCount = 0;
 }
 
-void ProgramData::initProgramData(GLuint numIndexes) {
+void ProgramData::initProgramData(GLuint numIndexes, GLuint numAttributes) {
     m_initialized = true;
     m_numIndexes = numIndexes;
+    m_numAttributes = numAttributes;
 
     delete [] m_Indexes;
+    delete [] m_attribIndexes;
 
     m_Indexes = new IndexInfo[numIndexes];
+    m_attribIndexes = new AttribInfo[m_numAttributes];
 }
 
 bool ProgramData::isInitialized() {
@@ -56,6 +66,7 @@
 ProgramData::~ProgramData() {
 
     delete [] m_Indexes;
+    delete [] m_attribIndexes;
 
     m_Indexes = NULL;
 }
@@ -73,6 +84,16 @@
     m_Indexes[index].samplerValue = 0;
 }
 
+void ProgramData::setAttribInfo(
+    GLuint index, GLint attribLoc, GLint size, GLenum type) {
+
+    if (index >= m_numAttributes) return;
+
+    m_attribIndexes[index].attribLoc = attribLoc;
+    m_attribIndexes[index].size = size;
+    m_attribIndexes[index].type = type;
+}
+
 void ProgramData::setIndexFlags(GLuint index, GLuint flags) {
 
     if (index >= m_numIndexes) return;
@@ -104,6 +125,16 @@
     return 0;
 }
 
+bool ProgramData::isValidUniformLocation(GLint location) {
+    for (GLuint i = 0; i < m_numIndexes; ++i) {
+        if (location >= m_Indexes[i].base &&
+            location < m_Indexes[i].base + m_Indexes[i].size)
+            return true;
+    }
+
+    return false;
+}
+
 GLint ProgramData::getNextSamplerUniform(
     GLint index, GLint* val, GLenum* target) {
 
@@ -154,15 +185,18 @@
     return false;
 }
 
-bool ProgramData::attachShader(GLuint shader) {
+bool ProgramData::attachShader(GLuint shader, GLenum shaderType) {
     size_t n = m_shaders.size();
 
     for (size_t i = 0; i < n; i++) {
         if (m_shaders[i] == shader) {
             return false;
+        } else if (m_shaderTypes[i] == shaderType) {
+            return false;
         }
     }
     m_shaders.push_back(shader);
+    m_shaderTypes.push_back(shaderType);
     return true;
 }
 
@@ -172,6 +206,7 @@
     for (size_t i = 0; i < n; i++) {
         if (m_shaders[i] == shader) {
             m_shaders.erase(m_shaders.begin() + i);
+            m_shaderTypes.erase(m_shaderTypes.begin() + i);
             return true;
         }
     }
@@ -179,6 +214,55 @@
     return false;
 }
 
+UniformValidationInfo ProgramData::compileValidationInfo(bool* error) const {
+    UniformValidationInfo res;
+    if (!m_Indexes) {
+        *error = true;
+        return res;
+    }
+
+    for (GLuint i = 0; i < m_numIndexes; ++i) {
+        if (m_Indexes[i].base < 0) continue;
+
+        UniformLocationInfo info = {
+            .valid = true,
+            .columns = getColumnsOfType(m_Indexes[i].type),
+            .rows = getRowsOfType(m_Indexes[i].type),
+            .isSampler = isSamplerType(m_Indexes[i].type),
+            .isInt = isIntegerType(m_Indexes[i].type),
+            .isArray = m_Indexes[i].size > 1,
+            .isUnsigned = isUnsignedIntType(m_Indexes[i].type),
+            .isBool = isBoolType(m_Indexes[i].type),
+        };
+        for (GLuint j = 0; j < m_Indexes[i].size; ++j) {
+            res.add(m_Indexes[i].base + j, info);
+        }
+    }
+
+    return res;
+}
+
+AttribValidationInfo ProgramData::compileAttribValidationInfo(bool* error) const {
+    AttribValidationInfo res;
+    if (!m_attribIndexes) {
+        *error = true;
+        return res;
+    }
+
+    for (GLuint i = 0; i < m_numAttributes; ++i) {
+        if (m_attribIndexes[i].attribLoc < 0) continue;
+
+        AttribIndexInfo info = {
+            .validInProgram = true,
+        };
+
+        for (GLuint j = 0; j < getAttributeCountOfType(m_attribIndexes[i].type) * m_attribIndexes[i].size ; ++j) {
+            res.add(m_attribIndexes[i].attribLoc + j, info);
+        }
+    }
+
+    return res;
+}
 /***** GLSharedGroup ****/
 
 GLSharedGroup::GLSharedGroup() { }
@@ -212,6 +296,14 @@
     return &m_textureRecs;
 }
 
+RenderbufferInfo* GLSharedGroup::getRenderbufferInfo() {
+    return &m_renderbufferInfo;
+}
+
+SamplerInfo* GLSharedGroup::getSamplerInfo() {
+    return &m_samplerInfo;
+}
+
 void GLSharedGroup::addBufferData(GLuint bufferId, GLsizeiptr size, const void* data) {
 
     android::AutoMutex _lock(m_lock);
@@ -302,16 +394,37 @@
     m_programs[program] = new ProgramData();
 }
 
-void GLSharedGroup::initProgramData(GLuint program, GLuint numIndexes) {
+void GLSharedGroup::initProgramData(GLuint program, GLuint numIndexes, GLuint numAttributes) {
 
     android::AutoMutex _lock(m_lock);
 
     ProgramData* pData = findObjectOrDefault(m_programs, program);
     if (pData) {
-        pData->initProgramData(numIndexes);
+        pData->initProgramData(numIndexes, numAttributes);
     }
 }
 
+void GLSharedGroup::refProgramData(GLuint program) {
+    android::AutoMutex _lock(m_lock);
+    ProgramData* pData = findObjectOrDefault(m_programs, program);
+    if (!pData) return;
+    pData->incRef();
+}
+
+void GLSharedGroup::onUseProgram(GLuint previous, GLuint next) {
+    if (previous == next) return;
+
+    android::AutoMutex _lock(m_lock);
+
+    if (previous) {
+        deleteProgramDataLocked(previous);
+    }
+
+    ProgramData* pData = findObjectOrDefault(m_programs, next);
+    if (!pData) return;
+    pData->incRef();
+}
+
 bool GLSharedGroup::isProgramInitialized(GLuint program) {
 
     android::AutoMutex _lock(m_lock);
@@ -337,14 +450,23 @@
 }
 
 void GLSharedGroup::deleteProgramData(GLuint program) {
-
     android::AutoMutex _lock(m_lock);
+    deleteProgramDataLocked(program);
+}
+
+void GLSharedGroup::deleteProgramDataLocked(GLuint program) {
 
     ProgramData* pData = findObjectOrDefault(m_programs, program);
 
-    if (pData) delete pData;
-
-    m_programs.erase(program);
+    if (pData && pData->decRef()) {
+        size_t numShaders = pData->getNumShaders();
+        for (size_t i = 0; i < numShaders; ++i) {
+            // changes the first one
+            detachShaderLocked(program, pData->getShader(0));
+        }
+        delete pData;
+        m_programs.erase(program);
+    }
 
     if (m_shaderProgramIdMap.find(program) ==
         m_shaderProgramIdMap.end()) return;
@@ -360,31 +482,43 @@
 }
 
 // No such thing for separable shader programs.
-void GLSharedGroup::attachShader(GLuint program, GLuint shader) {
-
+bool GLSharedGroup::attachShader(GLuint program, GLuint shader) {
     android::AutoMutex _lock(m_lock);
 
     ProgramData* pData = findObjectOrDefault(m_programs, program);
     ShaderData* sData = findObjectOrDefault(m_shaders, shader);
 
+    bool res = false;
+
     if (pData && sData) {
-        if (pData->attachShader(shader)) {
+        res = pData->attachShader(shader, sData->shaderType);
+        if (res) {
             refShaderDataLocked(shader);
         }
     }
+
+    return res;
 }
 
-void GLSharedGroup::detachShader(GLuint program, GLuint shader) {
-
+bool GLSharedGroup::detachShader(GLuint program, GLuint shader) {
     android::AutoMutex _lock(m_lock);
+    return detachShaderLocked(program, shader);
+}
 
+bool GLSharedGroup::detachShaderLocked(GLuint program, GLuint shader) {
     ProgramData* pData = findObjectOrDefault(m_programs, program);
     ShaderData* sData = findObjectOrDefault(m_shaders, shader);
+
+    bool res = false;
+
     if (pData && sData) {
-        if (pData->detachShader(shader)) {
+        res = pData->detachShader(shader);
+        if (res) {
             unrefShaderDataLocked(shader);
         }
     }
+
+    return res;
 }
 
 // Not needed/used for separate shader programs.
@@ -422,6 +556,19 @@
     }
 }
 
+void GLSharedGroup::setProgramAttribInfo(
+    GLuint program, GLuint index, GLint attribLoc,
+    GLint size, GLenum type, const char* name) {
+
+    android::AutoMutex _lock(m_lock);
+
+    ProgramData* pData = getProgramDataLocked(program);
+
+    if (pData) {
+        pData->setAttribInfo(index,attribLoc,size,type);
+    }
+}
+
 GLenum GLSharedGroup::getProgramUniformType(GLuint program, GLint location) {
 
     android::AutoMutex _lock(m_lock);
@@ -509,6 +656,19 @@
     return false;
 }
 
+bool GLSharedGroup::isProgramUniformLocationValid(GLuint program, GLint location) {
+    if (location < 0) return false;
+
+    android::AutoMutex _lock(m_lock);
+
+    ProgramData* pData =
+        findObjectOrDefault(m_programs, program);
+
+    if (!pData) return false;
+
+    return pData->isValidUniformLocation(location);
+}
+
 bool GLSharedGroup::isShader(GLuint shader) {
 
     android::AutoMutex _lock(m_lock);
@@ -518,7 +678,7 @@
     return pData != NULL;
 }
 
-bool GLSharedGroup::addShaderData(GLuint shader) {
+bool GLSharedGroup::addShaderData(GLuint shader, GLenum shaderType) {
 
     android::AutoMutex _lock(m_lock);
 
@@ -527,6 +687,7 @@
     if (data) {
         m_shaders[shader] = data;
         data->refcount = 1;
+        data->shaderType = shaderType;
     }
 
     return data != NULL;
@@ -562,6 +723,21 @@
     }
 }
 
+ProgramData* GLSharedGroup::getProgramDataLocked(GLuint program) {
+    // Check the space of normal programs, then separable ones
+    ProgramData* pData = findObjectOrDefault(m_programs, program);
+
+    if (pData) return pData;
+
+    std::map<GLuint, uint32_t>::const_iterator it =
+        m_shaderProgramIdMap.find(program);
+    if (it == m_shaderProgramIdMap.end()) return NULL;
+
+    ShaderProgramData* spData = findObjectOrDefault(m_shaderPrograms, it->second);
+    if (!spData) return NULL;
+    return &spData->programData;
+}
+
 uint32_t GLSharedGroup::addNewShaderProgramData() {
 
     android::AutoMutex _lock(m_lock);
@@ -569,8 +745,6 @@
     ShaderProgramData* data = new ShaderProgramData;
     uint32_t currId = m_shaderProgramId;
 
-    ALOGD("%s: new data %p id %u", __FUNCTION__, data, currId);
-
     m_shaderPrograms[currId] = data;
     m_shaderProgramId++;
     return currId;
@@ -590,8 +764,6 @@
 
     ShaderProgramData* res = findObjectOrDefault(m_shaderPrograms, id);
 
-    ALOGD("%s: id=%u res=%p", __FUNCTION__, id, res);
-
     return res;
 }
 
@@ -630,9 +802,9 @@
     m_shaderProgramIdMap.erase(shaderProgramName);
 }
 
-void GLSharedGroup::initShaderProgramData(GLuint shaderProgram, GLuint numIndices) {
+void GLSharedGroup::initShaderProgramData(GLuint shaderProgram, GLuint numIndices, GLuint numAttributes) {
     ShaderProgramData* spData = getShaderProgramData(shaderProgram);
-    spData->programData.initProgramData(numIndices);
+    spData->programData.initProgramData(numIndices, numAttributes);
 }
 
 void GLSharedGroup::setShaderProgramIndexInfo(
@@ -662,3 +834,101 @@
         }
     }
 }
+
+UniformValidationInfo GLSharedGroup::getUniformValidationInfo(GLuint program) {
+    UniformValidationInfo res;
+
+    android::AutoMutex _lock(m_lock);
+
+    ProgramData* pData =
+        getProgramDataLocked(program);
+
+    if (!pData) return res;
+
+    bool error; (void)error;
+    return pData->compileValidationInfo(&error);
+}
+
+AttribValidationInfo GLSharedGroup::getAttribValidationInfo(GLuint program) {
+    AttribValidationInfo res;
+
+    android::AutoMutex _lock(m_lock);
+
+    ProgramData* pData =
+        getProgramDataLocked(program);
+
+    if (!pData) return res;
+
+    bool error; (void)error;
+    return pData->compileAttribValidationInfo(&error);
+}
+
+void GLSharedGroup::setProgramLinkStatus(GLuint program, GLint linkStatus) {
+    android::AutoMutex _lock(m_lock);
+    ProgramData* pData =
+        getProgramDataLocked(program);
+    if (!pData) return;
+    pData->setLinkStatus(linkStatus);
+}
+
+GLint GLSharedGroup::getProgramLinkStatus(GLuint program) {
+    android::AutoMutex _lock(m_lock);
+    ProgramData* pData = getProgramDataLocked(program);
+    if (!pData) return 0;
+    return pData->getLinkStatus();
+}
+
+void GLSharedGroup::setActiveUniformBlockCountForProgram(GLuint program, GLint count) {
+    android::AutoMutex _lock(m_lock);
+    ProgramData* pData =
+        getProgramDataLocked(program);
+
+    if (!pData) return;
+
+    pData->setActiveUniformBlockCount(count);
+}
+
+GLint GLSharedGroup::getActiveUniformBlockCount(GLuint program) {
+    android::AutoMutex _lock(m_lock);
+    ProgramData* pData =
+        getProgramDataLocked(program);
+
+    if (!pData) return 0;
+
+    return pData->getActiveUniformBlockCount();
+}
+
+void GLSharedGroup::setTransformFeedbackVaryingsCountForProgram(GLuint program, GLint count) {
+    android::AutoMutex _lock(m_lock);
+    ProgramData* pData = getProgramDataLocked(program);
+    if (!pData) return;
+    pData->setTransformFeedbackVaryingsCount(count);
+}
+
+GLint GLSharedGroup::getTransformFeedbackVaryingsCountForProgram(GLuint program) {
+    android::AutoMutex _lock(m_lock);
+    ProgramData* pData = getProgramDataLocked(program);
+    if (!pData) return 0;
+    return pData->getTransformFeedbackVaryingsCount();
+}
+
+int GLSharedGroup::getActiveUniformsCountForProgram(GLuint program) {
+    android::AutoMutex _lock(m_lock);
+    ProgramData* pData =
+        getProgramDataLocked(program);
+
+    if (!pData) return 0;
+
+    return pData->getActiveUniformsCount();
+}
+
+int GLSharedGroup::getActiveAttributesCountForProgram(GLuint program) {
+    android::AutoMutex _lock(m_lock);
+    ProgramData* pData =
+        getProgramDataLocked(program);
+
+    if (!pData) return 0;
+
+    return pData->getActiveAttributesCount();
+}
+
diff --git a/shared/OpenglCodecCommon/GLSharedGroup.h b/shared/OpenglCodecCommon/GLSharedGroup.h
index bbee27a..9832378 100755
--- a/shared/OpenglCodecCommon/GLSharedGroup.h
+++ b/shared/OpenglCodecCommon/GLSharedGroup.h
@@ -30,6 +30,7 @@
 #include <GLES2/gl2ext.h>
 
 #include <map>
+#include <memory>
 #include <string>
 #include <vector>
 
@@ -39,7 +40,7 @@
 #include <utils/threads.h>
 #include "auto_goldfish_dma_context.h"
 #include "IndexRangeCache.h"
-#include "SmartPtr.h"
+#include "StateTrackingSupport.h"
 
 struct BufferData {
     BufferData();
@@ -75,11 +76,26 @@
         GLint samplerValue; // only set for sampler uniforms
     } IndexInfo;
 
+    typedef struct _AttribInfo {
+        GLint attribLoc;
+        GLint size;
+        GLenum type;
+    } AttribInfo;
+
     GLuint m_numIndexes;
+    GLuint m_numAttributes;
     IndexInfo* m_Indexes;
+    AttribInfo* m_attribIndexes;
     bool m_initialized;
 
     std::vector<GLuint> m_shaders;
+    std::vector<GLenum> m_shaderTypes;
+
+    uint32_t m_refcount;
+    GLint m_linkStatus;
+
+    uint32_t m_activeUniformBlockCount;
+    uint32_t m_transformFeedbackVaryingsCount;;
 
 public:
     enum {
@@ -87,21 +103,58 @@
     };
 
     ProgramData();
-    void initProgramData(GLuint numIndexes);
+    void initProgramData(GLuint numIndexes, GLuint numAttributes);
     bool isInitialized();
     virtual ~ProgramData();
     void setIndexInfo(GLuint index, GLint base, GLint size, GLenum type);
+    void setAttribInfo(GLuint index, GLint base, GLint size, GLenum type);
     void setIndexFlags(GLuint index, GLuint flags);
     GLuint getIndexForLocation(GLint location);
     GLenum getTypeForLocation(GLint location);
+    bool isValidUniformLocation(GLint location);
 
     GLint getNextSamplerUniform(GLint index, GLint* val, GLenum* target);
     bool setSamplerUniform(GLint appLoc, GLint val, GLenum* target);
 
-    bool attachShader(GLuint shader);
+    bool attachShader(GLuint shader, GLenum shaderType);
     bool detachShader(GLuint shader);
     size_t getNumShaders() const { return m_shaders.size(); }
     GLuint getShader(size_t i) const { return m_shaders[i]; }
+
+    void incRef() { ++m_refcount; }
+    bool decRef() {
+        --m_refcount;
+        return 0 == m_refcount;
+    }
+
+    UniformValidationInfo compileValidationInfo(bool* error) const;
+    AttribValidationInfo compileAttribValidationInfo(bool* error) const;
+    void setLinkStatus(GLint status) { m_linkStatus = status; }
+    GLint getLinkStatus() { return m_linkStatus; }
+
+    void setActiveUniformBlockCount(uint32_t count) {
+        m_activeUniformBlockCount = count;
+    }
+
+    uint32_t getActiveUniformBlockCount() const {
+        return m_activeUniformBlockCount;
+    }
+
+    void setTransformFeedbackVaryingsCount(uint32_t count) {
+        m_transformFeedbackVaryingsCount = count;
+    }
+
+    uint32_t getTransformFeedbackVaryingsCount() const {
+        return m_transformFeedbackVaryingsCount;
+    }
+
+    GLuint getActiveUniformsCount() const {
+        return m_numIndexes;
+    }
+
+    GLuint getActiveAttributesCount() const {
+        return m_numAttributes;
+    }
 };
 
 struct ShaderData {
@@ -109,6 +162,7 @@
     StringList samplerExternalNames;
     int refcount;
     std::vector<std::string> sources;
+    GLenum shaderType;
 };
 
 class ShaderProgramData {
@@ -125,6 +179,8 @@
     std::map<GLuint, ShaderData*> m_shaders;
     std::map<uint32_t, ShaderProgramData*> m_shaderPrograms;
     std::map<GLuint, uint32_t> m_shaderProgramIdMap;
+    RenderbufferInfo m_renderbufferInfo;
+    SamplerInfo m_samplerInfo;
 
     mutable android::Mutex m_lock;
 
@@ -133,12 +189,15 @@
 
     uint32_t m_shaderProgramId;
 
+    ProgramData* getProgramDataLocked(GLuint program);
 public:
     GLSharedGroup();
     ~GLSharedGroup();
     bool isShaderOrProgramObject(GLuint obj);
     BufferData * getBufferData(GLuint bufferId);
     SharedTextureDataMap* getTextureData();
+    RenderbufferInfo* getRenderbufferInfo();
+    SamplerInfo* getSamplerInfo();
     void    addBufferData(GLuint bufferId, GLsizeiptr size, const void* data);
     void    updateBufferData(GLuint bufferId, GLsizeiptr size, const void* data);
     void    setBufferUsage(GLuint bufferId, GLenum usage);
@@ -151,17 +210,23 @@
     bool    isProgram(GLuint program);
     bool    isProgramInitialized(GLuint program);
     void    addProgramData(GLuint program); 
-    void    initProgramData(GLuint program, GLuint numIndexes);
-    void    attachShader(GLuint program, GLuint shader);
-    void    detachShader(GLuint program, GLuint shader);
+    void    initProgramData(GLuint program, GLuint numIndexes, GLuint numAttributes);
+    void    refProgramData(GLuint program);
+    void    onUseProgram(GLuint previous, GLuint next);
+    bool    attachShader(GLuint program, GLuint shader);
+    bool    detachShader(GLuint program, GLuint shader);
+    bool    detachShaderLocked(GLuint program, GLuint shader);
     void    deleteProgramData(GLuint program);
+    void    deleteProgramDataLocked(GLuint program);
     void    setProgramIndexInfo(GLuint program, GLuint index, GLint base, GLint size, GLenum type, const char* name);
+    void    setProgramAttribInfo(GLuint program, GLuint index, GLint attribLoc, GLint size, GLenum type, const char* name);
     GLenum  getProgramUniformType(GLuint program, GLint location);
     GLint   getNextSamplerUniform(GLuint program, GLint index, GLint* val, GLenum* target) const;
     bool    setSamplerUniform(GLuint program, GLint appLoc, GLint val, GLenum* target);
+    bool    isProgramUniformLocationValid(GLuint program, GLint location);
 
     bool    isShader(GLuint shader);
-    bool    addShaderData(GLuint shader);
+    bool    addShaderData(GLuint shader, GLenum shaderType);
     // caller must hold a reference to the shader as long as it holds the pointer
     ShaderData* getShaderData(GLuint shader);
     void    unrefShaderData(GLuint shader);
@@ -173,10 +238,26 @@
     ShaderProgramData* getShaderProgramData(GLuint shaderProgramName);
     void deleteShaderProgramDataById(uint32_t id);
     void deleteShaderProgramData(GLuint shaderProgramName);
-    void initShaderProgramData(GLuint shaderProgram, GLuint numIndices);
+    void initShaderProgramData(GLuint shaderProgram, GLuint numIndices, GLuint numAttributes);
     void setShaderProgramIndexInfo(GLuint shaderProgram, GLuint index, GLint base, GLint size, GLenum type, const char* name);
+
+    // Validation info
+    UniformValidationInfo getUniformValidationInfo(GLuint program);
+    AttribValidationInfo getAttribValidationInfo(GLuint program);
+
+    void setProgramLinkStatus(GLuint program, GLint linkStatus);
+    GLint getProgramLinkStatus(GLuint program);
+
+    void setActiveUniformBlockCountForProgram(GLuint program, GLint numBlocks);
+    GLint getActiveUniformBlockCount(GLuint program);
+
+    void setTransformFeedbackVaryingsCountForProgram(GLuint program, GLint count);
+    GLint getTransformFeedbackVaryingsCountForProgram(GLuint program);
+
+    int getActiveUniformsCountForProgram(GLuint program);
+    int getActiveAttributesCountForProgram(GLuint program);
 };
 
-typedef SmartPtr<GLSharedGroup> GLSharedGroupPtr; 
+typedef std::shared_ptr<GLSharedGroup> GLSharedGroupPtr;
 
 #endif //_GL_SHARED_GROUP_H_
diff --git a/shared/OpenglCodecCommon/SmartPtr.h b/shared/OpenglCodecCommon/SmartPtr.h
deleted file mode 100644
index 3d821c8..0000000
--- a/shared/OpenglCodecCommon/SmartPtr.h
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
-* Copyright (C) 2011 The Android Open Source Project
-*
-* Licensed under the Apache License, Version 2.0 (the "License");
-* you may not use this file except in compliance with the License.
-* You may obtain a copy of the License at
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-#ifndef __SMART_PTR_H
-#define __SMART_PTR_H
-
-#include <PortableMutex.h>
-
-#include <cutils/atomic.h>
-
-template <class T, bool threadSafe = false>
-class SmartPtr
-{
-public:
-    explicit SmartPtr(T* ptr = (T*)NULL) {
-        if (threadSafe) {
-            m_lock = new mutex_t;
-            mutex_init(m_lock);
-        }
-        else m_lock = NULL;
-
-        m_ptr = ptr;
-        if (ptr)
-           m_pRefCount = new int32_t(1);
-        else
-           m_pRefCount = NULL;
-    }
-
-    SmartPtr<T,threadSafe>(const SmartPtr<T,false>& rhs) {
-        if (threadSafe) {
-            m_lock = new mutex_t;
-            mutex_init(m_lock);
-        }
-        else m_lock = NULL;
-
-        m_pRefCount = rhs.m_pRefCount;
-        m_ptr       = rhs.m_ptr;
-        use();
-    }
-
-    SmartPtr<T,threadSafe>(SmartPtr<T,true>& rhs) {
-        if (threadSafe) {
-            m_lock = new mutex_t;
-            mutex_init(m_lock);
-        }
-        else m_lock = NULL;
-
-        if (rhs.m_lock) mutex_lock(rhs.m_lock);
-        m_pRefCount = rhs.m_pRefCount;
-        m_ptr       = rhs.m_ptr;
-        use();
-        if (rhs.m_lock) mutex_unlock(rhs.m_lock);
-    }
-
-    ~SmartPtr() {
-        if (m_lock) mutex_lock(m_lock);
-        release();
-        if (m_lock)
-        {
-            mutex_unlock(m_lock);
-            mutex_destroy(m_lock);
-            delete m_lock;
-        }
-    }
-
-    T* Ptr() const {
-        return m_ptr;
-    }
-
-    const T* constPtr() const
-    {
-        return m_ptr;
-    }
-
-    T* operator->() const {
-        return m_ptr;
-    }
-
-    T& operator*() const {
-        return *m_ptr;
-    }
-
-    operator void*() const {
-        return (void *)m_ptr;
-    }
-
-    // This gives STL lists something to compare.
-    bool operator <(const SmartPtr<T>& t1) const {
-        return m_ptr < t1.m_ptr;
-    }
-
-    SmartPtr<T,threadSafe>& operator=(const SmartPtr<T,false>& rhs)
-    {
-        if (m_ptr == rhs.m_ptr)
-            return *this;
-
-        if (m_lock) mutex_lock(m_lock);
-        release();
-        m_pRefCount = rhs.m_pRefCount;
-        m_ptr       = rhs.m_ptr;
-        use();
-        if (m_lock) mutex_unlock(m_lock);
-
-        return *this;
-    }
-
-    SmartPtr<T,threadSafe>& operator=(SmartPtr<T,true>& rhs)
-    {
-        if (m_ptr == rhs.m_ptr)
-            return *this;
-
-        if (m_lock) mutex_lock(m_lock);
-        release();
-        if (rhs.m_lock) mutex_lock(rhs.m_lock);
-        m_pRefCount = rhs.m_pRefCount;
-        m_ptr       = rhs.m_ptr;
-        use();
-        if (rhs.m_lock) mutex_unlock(rhs.m_lock);
-        if (m_lock) mutex_unlock(m_lock);
-
-        return *this;
-    }
-
-private:
-    int32_t  *m_pRefCount;
-    mutex_t  *m_lock;
-    T* m_ptr;
-
-    // Increment the reference count on this pointer by 1.
-    int use() {
-        if (!m_pRefCount) return 0;
-        return android_atomic_inc(m_pRefCount) + 1;
-    }
-
-    // Decrement the reference count on the pointer by 1.
-    // If the reference count goes to (or below) 0, the pointer is deleted.
-    int release() {
-        if (!m_pRefCount) return 0;
-
-        int iVal = android_atomic_dec(m_pRefCount);
-        if (iVal > 1)
-            return iVal - 1;
-
-        delete m_pRefCount;
-        m_pRefCount = NULL;
-
-        if (m_ptr) {
-            delete m_ptr;
-            m_ptr = NULL;
-        }
-        return 0;
-    }
-
-};
-
-#endif // of  __SMART_PTR_H
diff --git a/shared/OpenglCodecCommon/StateTrackingSupport.h b/shared/OpenglCodecCommon/StateTrackingSupport.h
new file mode 100644
index 0000000..b742c13
--- /dev/null
+++ b/shared/OpenglCodecCommon/StateTrackingSupport.h
@@ -0,0 +1,245 @@
+/*
+* Copyright (C) 2020 The Android Open Source Project
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+#ifndef _STATE_TRACKING_SUPPORT_H_
+#define _STATE_TRACKING_SUPPORT_H_
+
+#include "android/base/containers/HybridComponentManager.h"
+#include "android/base/synchronization/AndroidLock.h"
+
+#include <GLES2/gl2.h>
+
+template <class IndexType, bool initialIsTrue>
+class PredicateMap {
+public:
+    static const uint64_t kBitsPerEntry = 64;
+    void add(IndexType objId) {
+        static const uint64_t kNone = 0ULL;
+        static const uint64_t kAll = ~kNone;
+        IndexType index = objId / kBitsPerEntry;
+        if (!mStorage.get_const(index)) {
+            mStorage.add(index, initialIsTrue ? kAll : kNone);
+        }
+    }
+
+    void remove(IndexType objId) {
+        if (initialIsTrue) {
+            set(objId, true);
+        } else {
+            set(objId, false);
+        }
+    }
+
+    void set(IndexType objId, bool predicate) {
+        IndexType index = objId / kBitsPerEntry;
+
+        if (!mStorage.get_const(index)) return;
+
+        uint64_t* current = mStorage.get(index);
+
+        uint64_t flag = 1ULL << (objId % kBitsPerEntry);
+
+        if (predicate) {
+            *current = *current | flag;
+        } else {
+            *current = *current & (~flag);
+        }
+    }
+
+    bool get(IndexType objId) const {
+        IndexType index = objId / kBitsPerEntry;
+
+        const uint64_t* current = mStorage.get_const(index);
+
+        if (!current) return initialIsTrue;
+
+        uint64_t flag = 1ULL << (objId % kBitsPerEntry);
+        return (flag & (*current)) != 0;
+    }
+
+private:
+    using Storage = android::base::HybridComponentManager<10000, IndexType, uint64_t>;
+    Storage mStorage;
+};
+
+// Structures for fast validation of uniforms/attribs.
+
+struct UniformLocationInfo {
+    bool valid = false;
+    uint32_t columns;
+    uint32_t rows;
+    bool isSampler;
+    bool isInt;
+    bool isArray;
+    bool isUnsigned;
+    bool isBool;
+};
+
+struct AttribIndexInfo {
+    bool validInProgram = false;
+};
+
+using UniformValidationInfo = android::base::HybridComponentManager<1000, uint32_t, UniformLocationInfo>;
+using AttribValidationInfo = android::base::HybridComponentManager<16, uint32_t, AttribIndexInfo>;
+
+using LastQueryTargetInfo = android::base::HybridComponentManager<1000, uint32_t, uint32_t>;
+
+using ExistenceMap = PredicateMap<uint32_t, false>;
+
+struct RboProps {
+    GLenum format;
+    GLsizei multisamples;
+    GLsizei width;
+    GLsizei height;
+    bool previouslyBound;
+    uint32_t refcount;
+    bool boundEGLImage;
+};
+
+struct SamplerProps {
+    uint32_t refcount;
+};
+
+template <class T>
+class ScopedLockedView {
+public:
+    ScopedLockedView(T* info) : mInfo(info) {
+        mInfo->lock();
+    }
+    virtual ~ScopedLockedView() {
+        mInfo->unlock();
+    }
+protected:
+    T* mInfo;
+
+    T* internalInfo() { return mInfo; }
+    const T* internalInfo_const() const { return mInfo; }
+};
+
+struct RenderbufferInfo {
+    android::base::guest::Lock infoLock;
+    android::base::HybridComponentManager<1000, uint32_t, RboProps> component;
+
+    void lock() { infoLock.lock(); }
+    void unlock() { infoLock.unlock(); }
+
+    class ScopedView : public ScopedLockedView<RenderbufferInfo> {
+        public:
+            ScopedView(RenderbufferInfo* info) : ScopedLockedView<RenderbufferInfo>(info) { }
+            bool hasRbo(GLuint id) const {
+                const RboProps* info = internalInfo_const()->component.get_const(id);
+                if (!info) return false;
+                return 0 != info->refcount;
+            }
+            virtual ~ScopedView() = default;
+            RboProps* get(GLuint id) {
+                return internalInfo()->component.get(id);
+            }
+            const RboProps* get_const(GLuint id) {
+                return internalInfo_const()->component.get_const(id);
+            }
+            void addFresh(GLuint id) {
+                RboProps props;
+                props.format = GL_NONE;
+                props.multisamples = 0;
+                props.width = 0;
+                props.height = 0;
+                props.previouslyBound = false;
+                props.refcount = 1;
+                props.boundEGLImage = false;
+                internalInfo()->component.add(id, props);
+            }
+            RboProps* bind(GLuint id) {
+                if (!hasRbo(id)) addFresh(id);
+                ref(id);
+                RboProps* res = get(id);
+                res->previouslyBound = true;
+                return res;
+            }
+            void ref(GLuint id) {
+                RboProps* props = get(id);
+                if (!props) return;
+                ++props->refcount;
+            }
+            bool unref(GLuint id) {
+                RboProps* props = get(id);
+                if (!props) return false;
+                if (!props->refcount) return false;
+                --props->refcount;
+                bool gone = 0 == props->refcount;
+                if (gone) {
+                    props->format = 0;
+                    props->multisamples = 0;
+                    props->width = 0;
+                    props->height = 0;
+                    props->previouslyBound = false;
+                    props->boundEGLImage = false;
+                }
+                return gone;
+            }
+    };
+};
+
+struct SamplerInfo {
+    android::base::guest::Lock infoLock;
+    android::base::HybridComponentManager<1000, uint32_t, SamplerProps> component;
+
+    void lock() { infoLock.lock(); }
+    void unlock() { infoLock.unlock(); }
+
+    class ScopedView : public ScopedLockedView<SamplerInfo> {
+        public:
+            ScopedView(SamplerInfo* info) : ScopedLockedView<SamplerInfo>(info) { }
+            bool samplerExists(GLuint id) const {
+                const SamplerProps* info = internalInfo_const()->component.get_const(id);
+                if (!info) return false;
+                return 0 != info->refcount;
+            }
+            virtual ~ScopedView() = default;
+            SamplerProps* get(GLuint id) {
+                return internalInfo()->component.get(id);
+            }
+            const SamplerProps* get_const(GLuint id) {
+                return internalInfo_const()->component.get_const(id);
+            }
+            void addFresh(GLuint id) {
+                SamplerProps props;
+                props.refcount = 1;
+                internalInfo()->component.add(id, props);
+            }
+            SamplerProps* bind(GLuint id) {
+                if (!samplerExists(id)) return 0;
+                ref(id);
+                SamplerProps* res = get(id);
+                return res;
+            }
+            void ref(GLuint id) {
+                SamplerProps* props = get(id);
+                if (!props) return;
+                ++props->refcount;
+            }
+            bool unref(GLuint id) {
+                SamplerProps* props = get(id);
+                if (!props) return false;
+                if (!props->refcount) return false;
+                --props->refcount;
+                bool gone = 0 == props->refcount;
+                return gone;
+            }
+    };
+};
+
+
+#endif
diff --git a/shared/OpenglCodecCommon/TextureSharedData.h b/shared/OpenglCodecCommon/TextureSharedData.h
index 1372f7a..96827ef 100644
--- a/shared/OpenglCodecCommon/TextureSharedData.h
+++ b/shared/OpenglCodecCommon/TextureSharedData.h
@@ -35,6 +35,13 @@
     TextureDims* dims;
     bool immutable;
     bool boundEGLImage;
+    bool hasStorage;
+    bool hasCubeNegX;
+    bool hasCubePosX;
+    bool hasCubeNegY;
+    bool hasCubePosY;
+    bool hasCubeNegZ;
+    bool hasCubePosZ;
 };
 
 typedef std::map<GLuint, TextureRec*> SharedTextureDataMap;
diff --git a/shared/OpenglCodecCommon/astc-codec.h b/shared/OpenglCodecCommon/astc-codec.h
new file mode 100644
index 0000000..e15fe47
--- /dev/null
+++ b/shared/OpenglCodecCommon/astc-codec.h
@@ -0,0 +1,47 @@
+// Copyright 2018 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ASTC_CODEC_ASTC_CODEC_H_
+#define ASTC_CODEC_ASTC_CODEC_H_
+
+namespace astc_codec {
+
+// These are the valid ASTC footprints according to the specification in
+// Section C.2.7.
+enum FootprintType {
+  k4x4,
+  k5x4,
+  k5x5,
+  k6x5,
+  k6x6,
+  k8x5,
+  k8x6,
+  k10x5,
+  k10x6,
+  k8x8,
+  k10x8,
+  k10x10,
+  k12x10,
+  k12x12,
+
+  kCount
+};
+
+struct AstcFootprint {
+    AstcFootprint(int width, int height);
+};
+
+}  // namespace astc_codec
+
+#endif  // ASTC_CODEC_ASTC_CODEC_H_
diff --git a/shared/OpenglCodecCommon/etc.cpp b/shared/OpenglCodecCommon/etc.cpp
new file mode 100644
index 0000000..dea3228
--- /dev/null
+++ b/shared/OpenglCodecCommon/etc.cpp
@@ -0,0 +1,1031 @@
+// Copyright 2009 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "etc.h"
+
+#include <algorithm>
+#include <assert.h>
+#include <string.h>
+#include <stdint.h>
+#include <stdio.h>
+
+typedef uint16_t etc1_uint16;
+
+/* From http://www.khronos.org/registry/gles/extensions/OES/OES_compressed_ETC1_RGB8_texture.txt
+
+ The number of bits that represent a 4x4 texel block is 64 bits if
+ <internalformat> is given by ETC1_RGB8_OES.
+
+ The data for a block is a number of bytes,
+
+ {q0, q1, q2, q3, q4, q5, q6, q7}
+
+ where byte q0 is located at the lowest memory address and q7 at
+ the highest. The 64 bits specifying the block is then represented
+ by the following 64 bit integer:
+
+ int64bit = 256*(256*(256*(256*(256*(256*(256*q0+q1)+q2)+q3)+q4)+q5)+q6)+q7;
+
+ ETC1_RGB8_OES:
+
+ a) bit layout in bits 63 through 32 if diffbit = 0
+
+ 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48
+ -----------------------------------------------
+ | base col1 | base col2 | base col1 | base col2 |
+ | R1 (4bits)| R2 (4bits)| G1 (4bits)| G2 (4bits)|
+ -----------------------------------------------
+
+ 47 46 45 44 43 42 41 40 39 38 37 36 35 34  33  32
+ ---------------------------------------------------
+ | base col1 | base col2 | table  | table  |diff|flip|
+ | B1 (4bits)| B2 (4bits)| cw 1   | cw 2   |bit |bit |
+ ---------------------------------------------------
+
+
+ b) bit layout in bits 63 through 32 if diffbit = 1
+
+ 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48
+ -----------------------------------------------
+ | base col1    | dcol 2 | base col1    | dcol 2 |
+ | R1' (5 bits) | dR2    | G1' (5 bits) | dG2    |
+ -----------------------------------------------
+
+ 47 46 45 44 43 42 41 40 39 38 37 36 35 34  33  32
+ ---------------------------------------------------
+ | base col 1   | dcol 2 | table  | table  |diff|flip|
+ | B1' (5 bits) | dB2    | cw 1   | cw 2   |bit |bit |
+ ---------------------------------------------------
+
+
+ c) bit layout in bits 31 through 0 (in both cases)
+
+ 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16
+ -----------------------------------------------
+ |       most significant pixel index bits       |
+ | p| o| n| m| l| k| j| i| h| g| f| e| d| c| b| a|
+ -----------------------------------------------
+
+ 15 14 13 12 11 10  9  8  7  6  5  4  3   2   1  0
+ --------------------------------------------------
+ |         least significant pixel index bits       |
+ | p| o| n| m| l| k| j| i| h| g| f| e| d| c | b | a |
+ --------------------------------------------------
+
+
+ Add table 3.17.2: Intensity modifier sets for ETC1 compressed textures:
+
+ table codeword                modifier table
+ ------------------        ----------------------
+ 0                     -8  -2  2   8
+ 1                    -17  -5  5  17
+ 2                    -29  -9  9  29
+ 3                    -42 -13 13  42
+ 4                    -60 -18 18  60
+ 5                    -80 -24 24  80
+ 6                   -106 -33 33 106
+ 7                   -183 -47 47 183
+
+
+ Add table 3.17.3 Mapping from pixel index values to modifier values for
+ ETC1 compressed textures:
+
+ pixel index value
+ ---------------
+ msb     lsb           resulting modifier value
+ -----   -----          -------------------------
+ 1       1            -b (large negative value)
+ 1       0            -a (small negative value)
+ 0       0             a (small positive value)
+ 0       1             b (large positive value)
+
+ ETC2 codec:
+     from https://www.khronos.org/registry/gles/specs/3.0/es_spec_3.0.4.pdf
+     page 289
+ */
+
+static const int kRGBModifierTable[] = {
+/* 0 */2, 8, -2, -8,
+/* 1 */5, 17, -5, -17,
+/* 2 */9, 29, -9, -29,
+/* 3 */13, 42, -13, -42,
+/* 4 */18, 60, -18, -60,
+/* 5 */24, 80, -24, -80,
+/* 6 */33, 106, -33, -106,
+/* 7 */47, 183, -47, -183 };
+
+static const int kRGBOpaqueModifierTable[] = {
+/* 0 */0, 8, 0, -8,
+/* 1 */0, 17, 0, -17,
+/* 2 */0, 29, 0, -29,
+/* 3 */0, 42, 0, -42,
+/* 4 */0, 60, 0, -60,
+/* 5 */0, 80, 0, -80,
+/* 6 */0, 106, 0, -106,
+/* 7 */0, 183, 0, -183 };
+
+static const int kAlphaModifierTable[] = {
+/* 0 */ -3, -6, -9, -15, 2, 5, 8, 14,
+/* 1 */ -3, -7, -10, -13, 2, 6, 9, 12,
+/* 2 */ -2, -5, -8, -13, 1, 4, 7, 12,
+/* 3 */ -2, -4, -6, -13, 1, 3, 5, 12,
+/* 4 */ -3, -6, -8, -12, 2, 5, 7, 11,
+/* 5 */ -3, -7, -9, -11, 2, 6, 8, 10,
+/* 6 */ -4, -7, -8, -11, 3, 6, 7, 10,
+/* 7 */ -3, -5, -8, -11, 2, 4, 7, 10,
+/* 8 */ -2, -6, -8, -10, 1, 5, 7, 9,
+/* 9 */ -2, -5, -8, -10, 1, 4, 7, 9,
+/* 10 */ -2, -4, -8, -10, 1, 3, 7, 9,
+/* 11 */ -2, -5, -7, -10, 1, 4, 6, 9,
+/* 12 */ -3, -4, -7, -10, 2, 3, 6, 9,
+/* 13 */ -1, -2, -3, -10, 0, 1, 2, 9,
+/* 14 */ -4, -6, -8, -9, 3, 5, 7, 8,
+/* 15 */ -3, -5, -7, -9, 2, 4, 6, 8
+};
+
+static const int kLookup[8] = { 0, 1, 2, 3, -4, -3, -2, -1 };
+
+static inline int clamp(int x) {
+    return (x >= 0 ? (x < 255 ? x : 255) : 0);
+}
+
+static inline int clamp2047(int x) {
+    return (x >= 0 ? (x < 2047 ? x : 2047) : 0);
+}
+
+static inline int clampSigned1023(int x) {
+    return (x >= -1023 ? (x < 1023 ? x : 1023) : -1023);
+}
+
+static
+inline int convert4To8(int b) {
+    int c = b & 0xf;
+    return (c << 4) | c;
+}
+
+static
+inline int convert5To8(int b) {
+    int c = b & 0x1f;
+    return (c << 3) | (c >> 2);
+}
+
+static
+inline int convert6To8(int b) {
+    int c = b & 0x3f;
+    return (c << 2) | (c >> 4);
+}
+
+static
+inline int convert7To8(int b) {
+    int c = b & 0x7f;
+    return (c << 1) | (c >> 6);
+}
+
+static
+inline int divideBy255(int d) {
+    return (d + 128 + (d >> 8)) >> 8;
+}
+
+static
+inline int convert8To4(int b) {
+    int c = b & 0xff;
+    return divideBy255(c * 15);
+}
+
+static
+inline int convert8To5(int b) {
+    int c = b & 0xff;
+    return divideBy255(c * 31);
+}
+
+static
+inline int convertDiff(int base, int diff) {
+    return convert5To8((0x1f & base) + kLookup[0x7 & diff]);
+}
+static
+int isOverflowed(int base, int diff) {
+    int val = (0x1f & base) + kLookup[0x7 & diff];
+    return val < 0 || val >= 32;
+}
+
+static
+void decode_subblock(etc1_byte* pOut, int r, int g, int b, const int* table,
+        etc1_uint32 low, bool second, bool flipped, bool isPunchthroughAlpha,
+        bool opaque) {
+    int baseX = 0;
+    int baseY = 0;
+    int channels = isPunchthroughAlpha ? 4 : 3;
+    if (second) {
+        if (flipped) {
+            baseY = 2;
+        } else {
+            baseX = 2;
+        }
+    }
+    for (int i = 0; i < 8; i++) {
+        int x, y;
+        if (flipped) {
+            x = baseX + (i >> 1);
+            y = baseY + (i & 1);
+        } else {
+            x = baseX + (i >> 2);
+            y = baseY + (i & 3);
+        }
+        int k = y + (x * 4);
+        int msb = ((low >> (k + 15)) & 2);
+        int lsb = ((low >> k) & 1);
+        etc1_byte* q = pOut + channels * (x + 4 * y);
+        if (isPunchthroughAlpha && !opaque && msb && !lsb) {
+            // rgba all 0
+            memset(q, 0, 4);
+            q += 4;
+        } else {
+            int offset = lsb | msb;
+            int delta = table[offset];
+            *q++ = clamp(r + delta);
+            *q++ = clamp(g + delta);
+            *q++ = clamp(b + delta);
+            if (isPunchthroughAlpha) {
+                *q++ = 255;
+            }
+        }
+    }
+}
+
+static void etc2_T_H_index(const int* clrTable, etc1_uint32 low,
+                           bool isPunchthroughAlpha, bool opaque,
+                           etc1_byte* pOut) {
+    etc1_byte* q = pOut;
+    for (int y = 0; y < 4; y++) {
+        for (int x = 0; x < 4; x++) {
+            int k = y + x * 4;
+            int msb = (low >> (k + 15)) & 2;
+            int lsb = (low >> k) & 1;
+            if (isPunchthroughAlpha && !opaque && msb && !lsb) {
+                // rgba all 0
+                memset(q, 0, 4);
+                q += 4;
+            } else {
+                int offset = lsb | msb;
+                for (int c = 0; c < 3; c++) {
+                    *q++ = clrTable[offset*3 + c];
+                }
+                if (isPunchthroughAlpha) {
+                    *q++ = 255;
+                }
+            }
+        }
+    }
+}
+
+// ETC2 codec:
+//     from https://www.khronos.org/registry/gles/specs/3.0/es_spec_3.0.4.pdf
+//     page 289
+
+static void etc2_decode_block_T(etc1_uint32 high, etc1_uint32 low,
+        bool isPunchthroughAlpha, bool opaque, etc1_byte* pOut) {
+    const int LUT[] = {3, 6, 11, 16, 23, 32, 41, 64};
+    int r1, r2, g1, g2, b1, b2;
+    r1 = convert4To8((((high >> 27) & 3) << 2) | ((high >> 24) & 3));
+    g1 = convert4To8(high >> 20);
+    b1 = convert4To8(high >> 16);
+    r2 = convert4To8(high >> 12);
+    g2 = convert4To8(high >> 8);
+    b2 = convert4To8(high >> 4);
+    // 3 bits intense modifier
+    int intenseIdx = (((high >> 2) & 3) << 1) | (high & 1);
+    int intenseMod = LUT[intenseIdx];
+    int clrTable[12];
+    clrTable[0] = r1;
+    clrTable[1] = g1;
+    clrTable[2] = b1;
+    clrTable[3] = clamp(r2 + intenseMod);
+    clrTable[4] = clamp(g2 + intenseMod);
+    clrTable[5] = clamp(b2 + intenseMod);
+    clrTable[6] = r2;
+    clrTable[7] = g2;
+    clrTable[8] = b2;
+    clrTable[9] = clamp(r2 - intenseMod);
+    clrTable[10] = clamp(g2 - intenseMod);
+    clrTable[11] = clamp(b2 - intenseMod);
+    etc2_T_H_index(clrTable, low, isPunchthroughAlpha, opaque, pOut);
+}
+
+static void etc2_decode_block_H(etc1_uint32 high, etc1_uint32 low,
+        bool isPunchthroughAlpha, bool opaque, etc1_byte* pOut) {
+    const int LUT[] = {3, 6, 11, 16, 23, 32, 41, 64};
+    int r1, r2, g1, g2, b1, b2;
+    r1 = convert4To8(high >> 27);
+    g1 = convert4To8((high >> 24) << 1 | ((high >> 20) & 1));
+    b1 = convert4To8((high >> 19) << 3 | ((high >> 15) & 7));
+    r2 = convert4To8(high >> 11);
+    g2 = convert4To8(high >> 7);
+    b2 = convert4To8(high >> 3);
+    // 3 bits intense modifier
+    int intenseIdx = high & 4;
+    intenseIdx |= (high & 1) << 1;
+    intenseIdx |= (((r1 << 16) | (g1 << 8) | b1) >= ((r2 << 16) | (g2 << 8) | b2));
+    int intenseMod = LUT[intenseIdx];
+    int clrTable[12];
+    clrTable[0] = clamp(r1 + intenseMod);
+    clrTable[1] = clamp(g1 + intenseMod);
+    clrTable[2] = clamp(b1 + intenseMod);
+    clrTable[3] = clamp(r1 - intenseMod);
+    clrTable[4] = clamp(g1 - intenseMod);
+    clrTable[5] = clamp(b1 - intenseMod);
+    clrTable[6] = clamp(r2 + intenseMod);
+    clrTable[7] = clamp(g2 + intenseMod);
+    clrTable[8] = clamp(b2 + intenseMod);
+    clrTable[9] = clamp(r2 - intenseMod);
+    clrTable[10] = clamp(g2 - intenseMod);
+    clrTable[11] = clamp(b2 - intenseMod);
+    etc2_T_H_index(clrTable, low, isPunchthroughAlpha, opaque, pOut);
+}
+
+static void etc2_decode_block_P(etc1_uint32 high, etc1_uint32 low,
+        bool isPunchthroughAlpha, etc1_byte* pOut) {
+    int ro, go, bo, rh, gh, bh, rv, gv, bv;
+    uint64_t data = high;
+    data = data << 32 | low;
+    ro = convert6To8(data >> 57);
+    go = convert7To8((data >> 56 << 6) | ((data >> 49) & 63));
+    bo = convert6To8((data >> 48 << 5)
+            | (((data >> 43) & 3 ) << 3)
+            | ((data >> 39) & 7));
+    rh = convert6To8((data >> 34 << 1) | ((data >> 32) & 1));
+    gh = convert7To8(data >> 25);
+    bh = convert6To8(data >> 19);
+    rv = convert6To8(data >> 13);
+    gv = convert7To8(data >> 6);
+    bv = convert6To8(data);
+    etc1_byte* q = pOut;
+    for (int i = 0; i < 16; i++) {
+        int y = i >> 2;
+        int x = i & 3;
+        *q++ = clamp((x * (rh - ro) + y * (rv - ro) + 4 * ro + 2) >> 2);
+        *q++ = clamp((x * (gh - go) + y * (gv - go) + 4 * go + 2) >> 2);
+        *q++ = clamp((x * (bh - bo) + y * (bv - bo) + 4 * bo + 2) >> 2);
+        if (isPunchthroughAlpha) *q++ = 255;
+    }
+}
+
+// Input is an ETC1 / ETC2 compressed version of the data.
+// Output is a 4 x 4 square of 3-byte pixels in form R, G, B
+// ETC2 codec:
+//     from https://www.khronos.org/registry/gles/specs/3.0/es_spec_3.0.4.pdf
+//     page 289
+
+void etc2_decode_rgb_block(const etc1_byte* pIn, bool isPunchthroughAlpha,
+                           etc1_byte* pOut) {
+    etc1_uint32 high = (pIn[0] << 24) | (pIn[1] << 16) | (pIn[2] << 8) | pIn[3];
+    etc1_uint32 low = (pIn[4] << 24) | (pIn[5] << 16) | (pIn[6] << 8) | pIn[7];
+    bool opaque = (high >> 1) & 1;
+    int r1, r2, g1, g2, b1, b2;
+    if (isPunchthroughAlpha || high & 2) {
+        // differential
+        int rBase = high >> 27;
+        int gBase = high >> 19;
+        int bBase = high >> 11;
+        if (isOverflowed(rBase, high >> 24)) {
+            etc2_decode_block_T(high, low, isPunchthroughAlpha, opaque, pOut);
+            return;
+        }
+        if (isOverflowed(gBase, high >> 16)) {
+            etc2_decode_block_H(high, low, isPunchthroughAlpha, opaque, pOut);
+            return;
+        }
+        if (isOverflowed(bBase, high >> 8)) {
+            etc2_decode_block_P(high, low, isPunchthroughAlpha, pOut);
+            return;
+        }
+        r1 = convert5To8(rBase);
+        r2 = convertDiff(rBase, high >> 24);
+        g1 = convert5To8(gBase);
+        g2 = convertDiff(gBase, high >> 16);
+        b1 = convert5To8(bBase);
+        b2 = convertDiff(bBase, high >> 8);
+    } else {
+        // not differential
+        r1 = convert4To8(high >> 28);
+        r2 = convert4To8(high >> 24);
+        g1 = convert4To8(high >> 20);
+        g2 = convert4To8(high >> 16);
+        b1 = convert4To8(high >> 12);
+        b2 = convert4To8(high >> 8);
+    }
+    int tableIndexA = 7 & (high >> 5);
+    int tableIndexB = 7 & (high >> 2);
+    const int* rgbModifierTable = opaque || !isPunchthroughAlpha ?
+                                  kRGBModifierTable : kRGBOpaqueModifierTable;
+    const int* tableA = rgbModifierTable + tableIndexA * 4;
+    const int* tableB = rgbModifierTable + tableIndexB * 4;
+    bool flipped = (high & 1) != 0;
+    decode_subblock(pOut, r1, g1, b1, tableA, low, false, flipped,
+                    isPunchthroughAlpha, opaque);
+    decode_subblock(pOut, r2, g2, b2, tableB, low, true, flipped,
+                    isPunchthroughAlpha, opaque);
+}
+
+void eac_decode_single_channel_block(const etc1_byte* pIn,
+                                     int decodedElementBytes, bool isSigned,
+                                     etc1_byte* pOut) {
+    assert(decodedElementBytes == 1 || decodedElementBytes == 2 || decodedElementBytes == 4);
+    int base_codeword = isSigned ? reinterpret_cast<const char*>(pIn)[0]
+                                 : pIn[0];
+    if (base_codeword == -128) base_codeword = -127;
+    int multiplier = pIn[1] >> 4;
+    int tblIdx = pIn[1] & 15;
+    const int* table = kAlphaModifierTable + tblIdx * 8;
+    const etc1_byte* p = pIn + 2;
+    // position in a byte of the next 3-bit index:
+    // | a a a | b b b | c c c | d d d ...
+    // | byte               | byte...
+    int bitOffset = 5;
+    for (int i = 0; i < 16; i ++) {
+        // flip x, y in output
+        int outIdx = (i % 4) * 4 + i / 4;
+        etc1_byte* q = pOut + outIdx * decodedElementBytes;
+
+        int modifier = 0;
+        if (bitOffset < 0) { // (Part of) the index is in the next byte.
+            modifier += p[0] << (-bitOffset);
+            p ++;
+            bitOffset += 8;
+        }
+        modifier += p[0] >> bitOffset;
+        modifier &= 7;
+        bitOffset -= 3; // move to the next index
+        if (bitOffset == -3) {
+            bitOffset = 5;
+            p++;
+        }
+        int modifierValue = table[modifier];
+        int decoded = base_codeword + modifierValue * multiplier;
+        if (decodedElementBytes == 1) {
+            *q = clamp(decoded);
+        } else { // decodedElementBytes == 4
+            decoded *= 8;
+            if (multiplier == 0) {
+                decoded += modifierValue;
+            }
+            if (isSigned) {
+                decoded = clampSigned1023(decoded);
+                reinterpret_cast<float*>(q)[0] = (float)decoded / 1023.0;
+            } else {
+                decoded += 4;
+                decoded = clamp2047(decoded);
+                reinterpret_cast<float*>(q)[0] = (float)decoded / 2047.0;
+            }
+        }
+    }
+}
+
+typedef struct {
+    etc1_uint32 high;
+    etc1_uint32 low;
+    etc1_uint32 score; // Lower is more accurate
+} etc_compressed;
+
+static
+inline void take_best(etc_compressed* a, const etc_compressed* b) {
+    if (a->score > b->score) {
+        *a = *b;
+    }
+}
+
+static
+void etc_average_colors_subblock(const etc1_byte* pIn, etc1_uint32 inMask,
+        etc1_byte* pColors, bool flipped, bool second) {
+    int r = 0;
+    int g = 0;
+    int b = 0;
+
+    if (flipped) {
+        int by = 0;
+        if (second) {
+            by = 2;
+        }
+        for (int y = 0; y < 2; y++) {
+            int yy = by + y;
+            for (int x = 0; x < 4; x++) {
+                int i = x + 4 * yy;
+                if (inMask & (1 << i)) {
+                    const etc1_byte* p = pIn + i * 3;
+                    r += *(p++);
+                    g += *(p++);
+                    b += *(p++);
+                }
+            }
+        }
+    } else {
+        int bx = 0;
+        if (second) {
+            bx = 2;
+        }
+        for (int y = 0; y < 4; y++) {
+            for (int x = 0; x < 2; x++) {
+                int xx = bx + x;
+                int i = xx + 4 * y;
+                if (inMask & (1 << i)) {
+                    const etc1_byte* p = pIn + i * 3;
+                    r += *(p++);
+                    g += *(p++);
+                    b += *(p++);
+                }
+            }
+        }
+    }
+    pColors[0] = (etc1_byte)((r + 4) >> 3);
+    pColors[1] = (etc1_byte)((g + 4) >> 3);
+    pColors[2] = (etc1_byte)((b + 4) >> 3);
+}
+
+static
+inline int square(int x) {
+    return x * x;
+}
+
+static etc1_uint32 chooseModifier(const etc1_byte* pBaseColors,
+        const etc1_byte* pIn, etc1_uint32 *pLow, int bitIndex,
+        const int* pModifierTable) {
+    etc1_uint32 bestScore = ~0;
+    int bestIndex = 0;
+    int pixelR = pIn[0];
+    int pixelG = pIn[1];
+    int pixelB = pIn[2];
+    int r = pBaseColors[0];
+    int g = pBaseColors[1];
+    int b = pBaseColors[2];
+    for (int i = 0; i < 4; i++) {
+        int modifier = pModifierTable[i];
+        int decodedG = clamp(g + modifier);
+        etc1_uint32 score = (etc1_uint32) (6 * square(decodedG - pixelG));
+        if (score >= bestScore) {
+            continue;
+        }
+        int decodedR = clamp(r + modifier);
+        score += (etc1_uint32) (3 * square(decodedR - pixelR));
+        if (score >= bestScore) {
+            continue;
+        }
+        int decodedB = clamp(b + modifier);
+        score += (etc1_uint32) square(decodedB - pixelB);
+        if (score < bestScore) {
+            bestScore = score;
+            bestIndex = i;
+        }
+    }
+    etc1_uint32 lowMask = (((bestIndex >> 1) << 16) | (bestIndex & 1))
+            << bitIndex;
+    *pLow |= lowMask;
+    return bestScore;
+}
+
+static
+void etc_encode_subblock_helper(const etc1_byte* pIn, etc1_uint32 inMask,
+        etc_compressed* pCompressed, bool flipped, bool second,
+        const etc1_byte* pBaseColors, const int* pModifierTable) {
+    int score = pCompressed->score;
+    if (flipped) {
+        int by = 0;
+        if (second) {
+            by = 2;
+        }
+        for (int y = 0; y < 2; y++) {
+            int yy = by + y;
+            for (int x = 0; x < 4; x++) {
+                int i = x + 4 * yy;
+                if (inMask & (1 << i)) {
+                    score += chooseModifier(pBaseColors, pIn + i * 3,
+                            &pCompressed->low, yy + x * 4, pModifierTable);
+                }
+            }
+        }
+    } else {
+        int bx = 0;
+        if (second) {
+            bx = 2;
+        }
+        for (int y = 0; y < 4; y++) {
+            for (int x = 0; x < 2; x++) {
+                int xx = bx + x;
+                int i = xx + 4 * y;
+                if (inMask & (1 << i)) {
+                    score += chooseModifier(pBaseColors, pIn + i * 3,
+                            &pCompressed->low, y + xx * 4, pModifierTable);
+                }
+            }
+        }
+    }
+    pCompressed->score = score;
+}
+
+static bool inRange4bitSigned(int color) {
+    return color >= -4 && color <= 3;
+}
+
+static void etc_encodeBaseColors(etc1_byte* pBaseColors,
+        const etc1_byte* pColors, etc_compressed* pCompressed) {
+    int r1, g1, b1, r2, g2, b2; // 8 bit base colors for sub-blocks
+    bool differential;
+    {
+        int r51 = convert8To5(pColors[0]);
+        int g51 = convert8To5(pColors[1]);
+        int b51 = convert8To5(pColors[2]);
+        int r52 = convert8To5(pColors[3]);
+        int g52 = convert8To5(pColors[4]);
+        int b52 = convert8To5(pColors[5]);
+
+        r1 = convert5To8(r51);
+        g1 = convert5To8(g51);
+        b1 = convert5To8(b51);
+
+        int dr = r52 - r51;
+        int dg = g52 - g51;
+        int db = b52 - b51;
+
+        differential = inRange4bitSigned(dr) && inRange4bitSigned(dg)
+                && inRange4bitSigned(db);
+        if (differential) {
+            r2 = convert5To8(r51 + dr);
+            g2 = convert5To8(g51 + dg);
+            b2 = convert5To8(b51 + db);
+            pCompressed->high |= (r51 << 27) | ((7 & dr) << 24) | (g51 << 19)
+                    | ((7 & dg) << 16) | (b51 << 11) | ((7 & db) << 8) | 2;
+        }
+    }
+
+    if (!differential) {
+        int r41 = convert8To4(pColors[0]);
+        int g41 = convert8To4(pColors[1]);
+        int b41 = convert8To4(pColors[2]);
+        int r42 = convert8To4(pColors[3]);
+        int g42 = convert8To4(pColors[4]);
+        int b42 = convert8To4(pColors[5]);
+        r1 = convert4To8(r41);
+        g1 = convert4To8(g41);
+        b1 = convert4To8(b41);
+        r2 = convert4To8(r42);
+        g2 = convert4To8(g42);
+        b2 = convert4To8(b42);
+        pCompressed->high |= (r41 << 28) | (r42 << 24) | (g41 << 20) | (g42
+                << 16) | (b41 << 12) | (b42 << 8);
+    }
+    pBaseColors[0] = r1;
+    pBaseColors[1] = g1;
+    pBaseColors[2] = b1;
+    pBaseColors[3] = r2;
+    pBaseColors[4] = g2;
+    pBaseColors[5] = b2;
+}
+
+static
+void etc_encode_block_helper(const etc1_byte* pIn, etc1_uint32 inMask,
+        const etc1_byte* pColors, etc_compressed* pCompressed, bool flipped) {
+    pCompressed->score = ~0;
+    pCompressed->high = (flipped ? 1 : 0);
+    pCompressed->low = 0;
+
+    etc1_byte pBaseColors[6];
+
+    etc_encodeBaseColors(pBaseColors, pColors, pCompressed);
+
+    int originalHigh = pCompressed->high;
+
+    const int* pModifierTable = kRGBModifierTable;
+    for (int i = 0; i < 8; i++, pModifierTable += 4) {
+        etc_compressed temp;
+        temp.score = 0;
+        temp.high = originalHigh | (i << 5);
+        temp.low = 0;
+        etc_encode_subblock_helper(pIn, inMask, &temp, flipped, false,
+                pBaseColors, pModifierTable);
+        take_best(pCompressed, &temp);
+    }
+    pModifierTable = kRGBModifierTable;
+    etc_compressed firstHalf = *pCompressed;
+    for (int i = 0; i < 8; i++, pModifierTable += 4) {
+        etc_compressed temp;
+        temp.score = firstHalf.score;
+        temp.high = firstHalf.high | (i << 2);
+        temp.low = firstHalf.low;
+        etc_encode_subblock_helper(pIn, inMask, &temp, flipped, true,
+                pBaseColors + 3, pModifierTable);
+        if (i == 0) {
+            *pCompressed = temp;
+        } else {
+            take_best(pCompressed, &temp);
+        }
+    }
+}
+
+static void writeBigEndian(etc1_byte* pOut, etc1_uint32 d) {
+    pOut[0] = (etc1_byte)(d >> 24);
+    pOut[1] = (etc1_byte)(d >> 16);
+    pOut[2] = (etc1_byte)(d >> 8);
+    pOut[3] = (etc1_byte) d;
+}
+
+// Input is a 4 x 4 square of 3-byte pixels in form R, G, B
+// inmask is a 16-bit mask where bit (1 << (x + y * 4)) tells whether the corresponding (x,y)
+// pixel is valid or not. Invalid pixel color values are ignored when compressing.
+// Output is an ETC1 compressed version of the data.
+
+void etc1_encode_block(const etc1_byte* pIn, etc1_uint32 inMask,
+        etc1_byte* pOut) {
+    etc1_byte colors[6];
+    etc1_byte flippedColors[6];
+    etc_average_colors_subblock(pIn, inMask, colors, false, false);
+    etc_average_colors_subblock(pIn, inMask, colors + 3, false, true);
+    etc_average_colors_subblock(pIn, inMask, flippedColors, true, false);
+    etc_average_colors_subblock(pIn, inMask, flippedColors + 3, true, true);
+
+    etc_compressed a, b;
+    etc_encode_block_helper(pIn, inMask, colors, &a, false);
+    etc_encode_block_helper(pIn, inMask, flippedColors, &b, true);
+    take_best(&a, &b);
+    writeBigEndian(pOut, a.high);
+    writeBigEndian(pOut + 4, a.low);
+}
+
+// Return the size of the encoded image data (does not include size of PKM header).
+
+etc1_uint32 etc1_get_encoded_data_size(etc1_uint32 width, etc1_uint32 height) {
+    return (((width + 3) & ~3) * ((height + 3) & ~3)) >> 1;
+}
+
+etc1_uint32 etc_get_encoded_data_size(ETC2ImageFormat format, etc1_uint32 width,
+                                      etc1_uint32 height) {
+    etc1_uint32 size = ((width + 3) & ~3) * ((height + 3) & ~3);
+    switch (format) {
+        case EtcRGB8:
+        case EtcRGB8A1:
+        case EtcR11:
+        case EtcSignedR11:
+            return size >> 1;
+        case EtcRG11:
+        case EtcSignedRG11:
+        case EtcRGBA8:
+            return size;
+        default:
+            assert(0);
+            return 0;
+    }
+}
+
+etc1_uint32 etc_get_decoded_pixel_size(ETC2ImageFormat format) {
+    switch (format) {
+        case EtcRGB8:
+            return 3;
+        case EtcRGBA8:
+            return 4;
+        case EtcRGB8A1:
+        case EtcR11:
+        case EtcSignedR11:
+            return 4;
+        case EtcRG11:
+        case EtcSignedRG11:
+            return 8;
+        default:
+            assert(0);
+            return 0;
+    }
+}
+
+// Encode an entire image.
+// pIn - pointer to the image data. Formatted such that the Red component of
+//       pixel (x,y) is at pIn + pixelSize * x + stride * y + redOffset;
+// pOut - pointer to encoded data. Must be large enough to store entire encoded image.
+
+int etc1_encode_image(const etc1_byte* pIn, etc1_uint32 width, etc1_uint32 height,
+        etc1_uint32 pixelSize, etc1_uint32 stride, etc1_byte* pOut) {
+    if (pixelSize < 2 || pixelSize > 3) {
+        return -1;
+    }
+    static const unsigned short kYMask[] = { 0x0, 0xf, 0xff, 0xfff, 0xffff };
+    static const unsigned short kXMask[] = { 0x0, 0x1111, 0x3333, 0x7777,
+            0xffff };
+    etc1_byte block[ETC1_DECODED_BLOCK_SIZE];
+    etc1_byte encoded[ETC1_ENCODED_BLOCK_SIZE];
+
+    etc1_uint32 encodedWidth = (width + 3) & ~3;
+    etc1_uint32 encodedHeight = (height + 3) & ~3;
+
+    for (etc1_uint32 y = 0; y < encodedHeight; y += 4) {
+        etc1_uint32 yEnd = height - y;
+        if (yEnd > 4) {
+            yEnd = 4;
+        }
+        int ymask = kYMask[yEnd];
+        for (etc1_uint32 x = 0; x < encodedWidth; x += 4) {
+            etc1_uint32 xEnd = width - x;
+            if (xEnd > 4) {
+                xEnd = 4;
+            }
+            int mask = ymask & kXMask[xEnd];
+            for (etc1_uint32 cy = 0; cy < yEnd; cy++) {
+                etc1_byte* q = block + (cy * 4) * 3;
+                const etc1_byte* p = pIn + pixelSize * x + stride * (y + cy);
+                if (pixelSize == 3) {
+                    memcpy(q, p, xEnd * 3);
+                } else {
+                    for (etc1_uint32 cx = 0; cx < xEnd; cx++) {
+                        int pixel = (p[1] << 8) | p[0];
+                        *q++ = convert5To8(pixel >> 11);
+                        *q++ = convert6To8(pixel >> 5);
+                        *q++ = convert5To8(pixel);
+                        p += pixelSize;
+                    }
+                }
+            }
+            etc1_encode_block(block, mask, encoded);
+            memcpy(pOut, encoded, sizeof(encoded));
+            pOut += sizeof(encoded);
+        }
+    }
+    return 0;
+}
+
+// Decode an entire image.
+// pIn - pointer to encoded data.
+// pOut - pointer to the image data. Will be written such that the Red component of
+//       pixel (x,y) is at pIn + pixelSize * x + stride * y + redOffset. Must be
+//        large enough to store entire image.
+
+
+int etc2_decode_image(const etc1_byte* pIn, ETC2ImageFormat format,
+        etc1_byte* pOut,
+        etc1_uint32 width, etc1_uint32 height,
+        etc1_uint32 stride) {
+    etc1_byte block[std::max({ETC1_DECODED_BLOCK_SIZE,
+                              ETC2_DECODED_RGB8A1_BLOCK_SIZE,
+                              EAC_DECODED_R11_BLOCK_SIZE,
+                              EAC_DECODED_RG11_BLOCK_SIZE})];
+    etc1_byte alphaBlock[EAC_DECODED_ALPHA_BLOCK_SIZE];
+
+    etc1_uint32 encodedWidth = (width + 3) & ~3;
+    etc1_uint32 encodedHeight = (height + 3) & ~3;
+
+    int pixelSize = etc_get_decoded_pixel_size(format);
+    bool isSigned = (format == EtcSignedR11 || format == EtcSignedRG11);
+
+    for (etc1_uint32 y = 0; y < encodedHeight; y += 4) {
+        etc1_uint32 yEnd = height - y;
+        if (yEnd > 4) {
+            yEnd = 4;
+        }
+        for (etc1_uint32 x = 0; x < encodedWidth; x += 4) {
+            etc1_uint32 xEnd = width - x;
+            if (xEnd > 4) {
+                xEnd = 4;
+            }
+            switch (format) {
+                case EtcRGBA8:
+                    eac_decode_single_channel_block(pIn, 1, false, alphaBlock);
+                    pIn += EAC_ENCODE_ALPHA_BLOCK_SIZE;
+                    // Do not break
+                    // Fall through to EtcRGB8 to decode the RGB part
+                    [[fallthrough]];
+                case EtcRGB8:
+                    etc2_decode_rgb_block(pIn, false, block);
+                    pIn += ETC1_ENCODED_BLOCK_SIZE;
+                    break;
+                case EtcRGB8A1:
+                    etc2_decode_rgb_block(pIn, true, block);
+                    pIn += ETC1_ENCODED_BLOCK_SIZE;
+                    break;
+                case EtcR11:
+                case EtcSignedR11:
+                    eac_decode_single_channel_block(pIn, 4, isSigned, block);
+                    pIn += EAC_ENCODE_R11_BLOCK_SIZE;
+                    break;
+                case EtcRG11:
+                case EtcSignedRG11:
+                    // r channel
+                    eac_decode_single_channel_block(pIn, 4, isSigned, block);
+                    pIn += EAC_ENCODE_R11_BLOCK_SIZE;
+                    // g channel
+                    eac_decode_single_channel_block(pIn, 4, isSigned,
+                            block + EAC_DECODED_R11_BLOCK_SIZE);
+                    pIn += EAC_ENCODE_R11_BLOCK_SIZE;
+                    break;
+                default:
+                    assert(0);
+            }
+            for (etc1_uint32 cy = 0; cy < yEnd; cy++) {
+                etc1_byte* p = pOut + pixelSize * x + stride * (y + cy);
+                switch (format) {
+                    case EtcRGB8:
+                    case EtcRGB8A1:
+                    case EtcR11:
+                    case EtcSignedR11: {
+                            const etc1_byte* q = block + (cy * 4) * pixelSize;
+                            memcpy(p, q, xEnd * pixelSize);
+                        }
+                        break;
+                    case EtcRG11:
+                    case EtcSignedRG11: {
+                            const etc1_byte* r = block + cy * EAC_DECODED_R11_BLOCK_SIZE / 4;
+                            const etc1_byte* g = block + cy * EAC_DECODED_R11_BLOCK_SIZE / 4 + EAC_DECODED_R11_BLOCK_SIZE;
+                            int channelSize = pixelSize / 2;
+                            for (etc1_uint32 cx = 0; cx < xEnd; cx++) {
+                                memcpy(p, r, channelSize);
+                                p += channelSize;
+                                r += channelSize;
+                                memcpy(p, g, channelSize);
+                                p += channelSize;
+                                g += channelSize;
+                            }
+                        }
+                        break;
+                    case EtcRGBA8: {
+                            const etc1_byte* q = block + (cy * 4) * 3;
+                            const etc1_byte* qa = alphaBlock + cy * 4;
+                            for (etc1_uint32 cx = 0; cx < xEnd; cx++) {
+                                // copy rgb data
+                                memcpy(p, q, 3);
+                                p += 3;
+                                q += 3;
+                                *p++ = *qa++;
+                            }
+                        }
+                        break;
+                    default:
+                        assert(0);
+                }
+            }
+        }
+    }
+    return 0;
+}
+
+static const char kMagic[] = { 'P', 'K', 'M', ' ', '1', '0' };
+
+static const etc1_uint32 ETC1_PKM_FORMAT_OFFSET = 6;
+static const etc1_uint32 ETC1_PKM_ENCODED_WIDTH_OFFSET = 8;
+static const etc1_uint32 ETC1_PKM_ENCODED_HEIGHT_OFFSET = 10;
+static const etc1_uint32 ETC1_PKM_WIDTH_OFFSET = 12;
+static const etc1_uint32 ETC1_PKM_HEIGHT_OFFSET = 14;
+
+static const etc1_uint32 ETC1_RGB_NO_MIPMAPS = 0;
+
+static void writeBEUint16(etc1_byte* pOut, etc1_uint32 data) {
+    pOut[0] = (etc1_byte) (data >> 8);
+    pOut[1] = (etc1_byte) data;
+}
+
+static etc1_uint32 readBEUint16(const etc1_byte* pIn) {
+    return (pIn[0] << 8) | pIn[1];
+}
+
+// Format a PKM header
+
+void etc1_pkm_format_header(etc1_byte* pHeader, etc1_uint32 width, etc1_uint32 height) {
+    memcpy(pHeader, kMagic, sizeof(kMagic));
+    etc1_uint32 encodedWidth = (width + 3) & ~3;
+    etc1_uint32 encodedHeight = (height + 3) & ~3;
+    writeBEUint16(pHeader + ETC1_PKM_FORMAT_OFFSET, ETC1_RGB_NO_MIPMAPS);
+    writeBEUint16(pHeader + ETC1_PKM_ENCODED_WIDTH_OFFSET, encodedWidth);
+    writeBEUint16(pHeader + ETC1_PKM_ENCODED_HEIGHT_OFFSET, encodedHeight);
+    writeBEUint16(pHeader + ETC1_PKM_WIDTH_OFFSET, width);
+    writeBEUint16(pHeader + ETC1_PKM_HEIGHT_OFFSET, height);
+}
+
+// Check if a PKM header is correctly formatted.
+
+etc1_bool etc1_pkm_is_valid(const etc1_byte* pHeader) {
+    if (memcmp(pHeader, kMagic, sizeof(kMagic))) {
+        return false;
+    }
+    etc1_uint32 format = readBEUint16(pHeader + ETC1_PKM_FORMAT_OFFSET);
+    etc1_uint32 encodedWidth = readBEUint16(pHeader + ETC1_PKM_ENCODED_WIDTH_OFFSET);
+    etc1_uint32 encodedHeight = readBEUint16(pHeader + ETC1_PKM_ENCODED_HEIGHT_OFFSET);
+    etc1_uint32 width = readBEUint16(pHeader + ETC1_PKM_WIDTH_OFFSET);
+    etc1_uint32 height = readBEUint16(pHeader + ETC1_PKM_HEIGHT_OFFSET);
+    return format == ETC1_RGB_NO_MIPMAPS &&
+            encodedWidth >= width && encodedWidth - width < 4 &&
+            encodedHeight >= height && encodedHeight - height < 4;
+}
+
+// Read the image width from a PKM header
+
+etc1_uint32 etc1_pkm_get_width(const etc1_byte* pHeader) {
+    return readBEUint16(pHeader + ETC1_PKM_WIDTH_OFFSET);
+}
+
+// Read the image height from a PKM header
+
+etc1_uint32 etc1_pkm_get_height(const etc1_byte* pHeader){
+    return readBEUint16(pHeader + ETC1_PKM_HEIGHT_OFFSET);
+}
diff --git a/shared/OpenglCodecCommon/etc.h b/shared/OpenglCodecCommon/etc.h
new file mode 100644
index 0000000..7d43c89
--- /dev/null
+++ b/shared/OpenglCodecCommon/etc.h
@@ -0,0 +1,130 @@
+// Copyright 2009 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#pragma once
+
+#define MAX_ETC_SUPPORTED 12
+
+#define ETC1_ENCODED_BLOCK_SIZE 8
+#define ETC1_DECODED_BLOCK_SIZE 48
+#define ETC2_DECODED_RGB8A1_BLOCK_SIZE 64
+#define EAC_ENCODE_ALPHA_BLOCK_SIZE 8
+#define EAC_DECODED_ALPHA_BLOCK_SIZE 16
+#define EAC_ENCODE_R11_BLOCK_SIZE 8
+#define EAC_DECODED_R11_BLOCK_SIZE 64
+#define EAC_DECODED_RG11_BLOCK_SIZE (EAC_DECODED_R11_BLOCK_SIZE*2)
+
+#ifndef ETC1_RGB8_OES
+#define ETC1_RGB8_OES 0x8D64
+#endif
+
+typedef unsigned char etc1_byte;
+typedef int etc1_bool;
+typedef unsigned int etc1_uint32;
+
+enum ETC2ImageFormat {
+    EtcRGB8, EtcRGBA8, EtcR11, EtcSignedR11, EtcRG11, EtcSignedRG11, EtcRGB8A1
+};
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// Encode a block of pixels.
+//
+// pIn is a pointer to a ETC_DECODED_BLOCK_SIZE array of bytes that represent a
+// 4 x 4 square of 3-byte pixels in form R, G, B. Byte (3 * (x + 4 * y) is the R
+// value of pixel (x, y).
+//
+// validPixelMask is a 16-bit mask where bit (1 << (x + y * 4)) indicates whether
+// the corresponding (x,y) pixel is valid. Invalid pixel color values are ignored when compressing.
+//
+// pOut is an ETC1 compressed version of the data.
+
+void etc1_encode_block(const etc1_byte* pIn, etc1_uint32 validPixelMask, etc1_byte* pOut);
+
+// Decode a block of rgb pixels.
+//
+// pIn is an ETC2 compressed version of the data.
+//
+// pOut is a pointer to a ETC_DECODED_BLOCK_SIZE array of bytes that represent a
+// 4 x 4 square of 3-byte pixels in form R, G, B. Byte (3 * (x + 4 * y) is the R
+// value of pixel (x, y).
+
+void etc2_decode_rgb_block(const etc1_byte* pIn, bool isPunchthroughAlpha, etc1_byte* pOut);
+
+// Decode a block of single channel pixels
+// This is used when decoding the alpha channel of RGBA8_ETC2_EAC format, or
+// when decoding R11_EAC format
+// decodedElementBytes: number of bits per element after decoding.
+// For RGBA8_ETC2_EAC it must be 1, for R11_EAC it must be 2
+
+void eac_decode_single_channel_block(const etc1_byte* pIn,
+                                     int decodedElementBytes, bool isSigned,
+                                     etc1_byte* pOut);
+
+// Return the size of the encoded image data (does not include size of PKM header).
+
+etc1_uint32 etc1_get_encoded_data_size(etc1_uint32 width, etc1_uint32 height);
+
+etc1_uint32 etc_get_encoded_data_size(ETC2ImageFormat format, etc1_uint32 width,
+                                      etc1_uint32 height);
+etc1_uint32 etc_get_decoded_pixel_size(ETC2ImageFormat format);
+
+// Encode an entire image.
+// pIn - pointer to the image data. Formatted such that
+//       pixel (x,y) is at pIn + pixelSize * x + stride * y;
+// pOut - pointer to encoded data. Must be large enough to store entire encoded image.
+// pixelSize can be 2 or 3. 2 is an GL_UNSIGNED_SHORT_5_6_5 image, 3 is a GL_BYTE RGB image.
+// returns non-zero if there is an error.
+
+int etc1_encode_image(const etc1_byte* pIn, etc1_uint32 width, etc1_uint32 height,
+        etc1_uint32 pixelSize, etc1_uint32 stride, etc1_byte* pOut);
+
+// Decode an entire image.
+// pIn - pointer to encoded data.
+// pOut - pointer to the image data. Will be written such that
+//        pixel (x,y) is at pIn + pixelSize * x + stride * y. Must be
+//        large enough to store entire image.
+//        (pixelSize=3 for rgb images, pixelSize=4 for images with alpha channel)
+// returns non-zero if there is an error.
+
+int etc2_decode_image(const etc1_byte* pIn, ETC2ImageFormat format,
+        etc1_byte* pOut,
+        etc1_uint32 width, etc1_uint32 height,
+        etc1_uint32 stride);
+
+// Size of a PKM header, in bytes.
+
+#define ETC_PKM_HEADER_SIZE 16
+
+// Format a PKM header
+
+void etc1_pkm_format_header(etc1_byte* pHeader, etc1_uint32 width, etc1_uint32 height);
+
+// Check if a PKM header is correctly formatted.
+
+etc1_bool etc1_pkm_is_valid(const etc1_byte* pHeader);
+
+// Read the image width from a PKM header
+
+etc1_uint32 etc1_pkm_get_width(const etc1_byte* pHeader);
+
+// Read the image height from a PKM header
+
+etc1_uint32 etc1_pkm_get_height(const etc1_byte* pHeader);
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/shared/OpenglCodecCommon/glUtils.cpp b/shared/OpenglCodecCommon/glUtils.cpp
index a0ec240..a6b0b7c 100644
--- a/shared/OpenglCodecCommon/glUtils.cpp
+++ b/shared/OpenglCodecCommon/glUtils.cpp
@@ -20,6 +20,367 @@
 
 #include <GLES3/gl31.h>
 
+bool isSamplerType(GLenum type) {
+    switch (type) {
+        case GL_SAMPLER_2D:
+        case GL_SAMPLER_3D:
+        case GL_SAMPLER_CUBE:
+        case GL_SAMPLER_2D_SHADOW:
+        case GL_SAMPLER_2D_ARRAY:
+        case GL_SAMPLER_2D_ARRAY_SHADOW:
+        case GL_SAMPLER_2D_MULTISAMPLE:
+        case GL_SAMPLER_CUBE_SHADOW:
+        case GL_INT_SAMPLER_2D:
+        case GL_INT_SAMPLER_3D:
+        case GL_INT_SAMPLER_CUBE:
+        case GL_INT_SAMPLER_2D_ARRAY:
+        case GL_INT_SAMPLER_2D_MULTISAMPLE:
+        case GL_UNSIGNED_INT_SAMPLER_2D:
+        case GL_UNSIGNED_INT_SAMPLER_3D:
+        case GL_UNSIGNED_INT_SAMPLER_CUBE:
+        case GL_UNSIGNED_INT_SAMPLER_2D_ARRAY:
+        case GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE:
+            return true;
+        default:
+            return false;
+    }
+}
+
+bool isIntegerType(GLenum type) {
+    switch (type) {
+        case GL_UNSIGNED_INT:
+        case GL_INT:
+        case GL_INT_VEC2:
+        case GL_UNSIGNED_INT_VEC2:
+        case GL_INT_VEC3:
+        case GL_UNSIGNED_INT_VEC3:
+        case GL_INT_VEC4:
+        case GL_UNSIGNED_INT_VEC4:
+        case GL_INT_IMAGE_2D:
+        case GL_INT_IMAGE_3D:
+        case GL_INT_IMAGE_CUBE:
+        case GL_INT_IMAGE_2D_ARRAY:
+        case GL_UNSIGNED_INT_IMAGE_2D:
+        case GL_UNSIGNED_INT_IMAGE_3D:
+        case GL_UNSIGNED_INT_IMAGE_CUBE:
+        case GL_UNSIGNED_INT_IMAGE_2D_ARRAY:
+        case GL_UNSIGNED_INT_ATOMIC_COUNTER:
+            return true;
+        default:
+            return false;
+    }
+}
+
+bool isUnsignedIntType(GLenum type) {
+    switch (type) {
+        case GL_UNSIGNED_INT:
+        case GL_UNSIGNED_INT_VEC2:
+        case GL_UNSIGNED_INT_VEC3:
+        case GL_UNSIGNED_INT_VEC4:
+        case GL_UNSIGNED_INT_IMAGE_2D:
+        case GL_UNSIGNED_INT_IMAGE_3D:
+        case GL_UNSIGNED_INT_IMAGE_CUBE:
+        case GL_UNSIGNED_INT_IMAGE_2D_ARRAY:
+        case GL_UNSIGNED_INT_ATOMIC_COUNTER:
+            return true;
+        default:
+            return false;
+    }
+}
+
+bool isBoolType(GLenum type) {
+    switch (type) {
+        case GL_BOOL:
+        case GL_BOOL_VEC2:
+        case GL_BOOL_VEC3:
+        case GL_BOOL_VEC4:
+            return true;
+        default:
+            return false;
+    }
+}
+
+uint32_t getColumnsOfType(GLenum type) {
+    switch (type) {
+    case GL_BYTE:
+    case GL_UNSIGNED_BYTE:
+    case GL_SHORT:
+    case GL_UNSIGNED_SHORT:
+    case GL_HALF_FLOAT:
+    case GL_HALF_FLOAT_OES:
+    case GL_IMAGE_2D:
+    case GL_IMAGE_3D:
+    case GL_UNSIGNED_INT:
+    case GL_INT:
+    case GL_FLOAT:
+    case GL_FIXED:
+    case GL_BOOL:
+        return 1;
+#ifdef GL_DOUBLE
+    case GL_DOUBLE:
+        return 1;
+    case GL_DOUBLE_VEC2:
+    case GL_DOUBLE_MAT2:
+    case GL_DOUBLE_MAT2x3:
+    case GL_DOUBLE_MAT2x4:
+        return 2;
+    case GL_DOUBLE_VEC3:
+    case GL_DOUBLE_MAT3:
+    case GL_DOUBLE_MAT3x2:
+    case GL_DOUBLE_MAT3x4:
+        return 3;
+    case GL_DOUBLE_VEC4:
+    case GL_DOUBLE_MAT4:
+    case GL_DOUBLE_MAT4x2:
+    case GL_DOUBLE_MAT4x3:
+        return 4;
+#endif
+    case GL_FLOAT_VEC2:
+    case GL_INT_VEC2:
+    case GL_UNSIGNED_INT_VEC2:
+    case GL_BOOL_VEC2:
+    case GL_FLOAT_MAT2:
+    case GL_FLOAT_MAT2x3:
+    case GL_FLOAT_MAT2x4:
+        return 2;
+    case GL_INT_VEC3:
+    case GL_UNSIGNED_INT_VEC3:
+    case GL_BOOL_VEC3:
+    case GL_FLOAT_VEC3:
+    case GL_FLOAT_MAT3:
+    case GL_FLOAT_MAT3x2:
+    case GL_FLOAT_MAT3x4:
+        return 3;
+    case GL_FLOAT_VEC4:
+    case GL_BOOL_VEC4:
+    case GL_INT_VEC4:
+    case GL_UNSIGNED_INT_VEC4:
+    case GL_FLOAT_MAT4:
+    case GL_FLOAT_MAT4x2:
+    case GL_FLOAT_MAT4x3:
+        return 4;
+    case GL_SAMPLER_2D:
+    case GL_SAMPLER_3D:
+    case GL_SAMPLER_CUBE:
+    case GL_SAMPLER_2D_SHADOW:
+    case GL_SAMPLER_2D_ARRAY:
+    case GL_SAMPLER_2D_ARRAY_SHADOW:
+    case GL_SAMPLER_2D_MULTISAMPLE:
+    case GL_SAMPLER_CUBE_SHADOW:
+    case GL_INT_SAMPLER_2D:
+    case GL_INT_SAMPLER_3D:
+    case GL_INT_SAMPLER_CUBE:
+    case GL_INT_SAMPLER_2D_ARRAY:
+    case GL_INT_SAMPLER_2D_MULTISAMPLE:
+    case GL_UNSIGNED_INT_SAMPLER_2D:
+    case GL_UNSIGNED_INT_SAMPLER_3D:
+    case GL_UNSIGNED_INT_SAMPLER_CUBE:
+    case GL_UNSIGNED_INT_SAMPLER_2D_ARRAY:
+    case GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE:
+    case GL_IMAGE_CUBE:
+    case GL_IMAGE_2D_ARRAY:
+    case GL_INT_IMAGE_2D:
+    case GL_INT_IMAGE_3D:
+    case GL_INT_IMAGE_CUBE:
+    case GL_INT_IMAGE_2D_ARRAY:
+    case GL_UNSIGNED_INT_IMAGE_2D:
+    case GL_UNSIGNED_INT_IMAGE_3D:
+    case GL_UNSIGNED_INT_IMAGE_CUBE:
+    case GL_UNSIGNED_INT_IMAGE_2D_ARRAY:
+    case GL_UNSIGNED_INT_ATOMIC_COUNTER:
+    default:
+        return 1;
+    }
+}
+
+uint32_t getRowsOfType(GLenum type) {
+    switch (type) {
+    case GL_BYTE:
+    case GL_UNSIGNED_BYTE:
+    case GL_SHORT:
+    case GL_UNSIGNED_SHORT:
+    case GL_HALF_FLOAT:
+    case GL_HALF_FLOAT_OES:
+    case GL_IMAGE_2D:
+    case GL_IMAGE_3D:
+    case GL_UNSIGNED_INT:
+    case GL_INT:
+    case GL_FLOAT:
+    case GL_FIXED:
+    case GL_BOOL:
+        return 1;
+#ifdef GL_DOUBLE
+    case GL_DOUBLE:
+    case GL_DOUBLE_VEC2:
+    case GL_DOUBLE_VEC3:
+    case GL_DOUBLE_VEC4:
+        return 1;
+    case GL_DOUBLE_MAT2:
+    case GL_DOUBLE_MAT3x2:
+    case GL_DOUBLE_MAT4x2:
+        return 2;
+    case GL_DOUBLE_MAT3:
+    case GL_DOUBLE_MAT2x3:
+    case GL_DOUBLE_MAT4x3:
+        return 3;
+    case GL_DOUBLE_MAT4:
+    case GL_DOUBLE_MAT3x4:
+    case GL_DOUBLE_MAT2x4:
+        return 4;
+#endif
+    case GL_FLOAT_VEC2:
+    case GL_INT_VEC2:
+    case GL_UNSIGNED_INT_VEC2:
+    case GL_BOOL_VEC2:
+    case GL_INT_VEC3:
+    case GL_UNSIGNED_INT_VEC3:
+    case GL_BOOL_VEC3:
+    case GL_FLOAT_VEC3:
+    case GL_FLOAT_VEC4:
+    case GL_BOOL_VEC4:
+    case GL_INT_VEC4:
+    case GL_UNSIGNED_INT_VEC4:
+        return 1;
+    case GL_FLOAT_MAT2:
+    case GL_FLOAT_MAT3x2:
+    case GL_FLOAT_MAT4x2:
+        return 2;
+    case GL_FLOAT_MAT3:
+    case GL_FLOAT_MAT2x3:
+    case GL_FLOAT_MAT4x3:
+        return 3;
+    case GL_FLOAT_MAT4:
+    case GL_FLOAT_MAT2x4:
+    case GL_FLOAT_MAT3x4:
+        return 4;
+    case GL_SAMPLER_2D:
+    case GL_SAMPLER_3D:
+    case GL_SAMPLER_CUBE:
+    case GL_SAMPLER_2D_SHADOW:
+    case GL_SAMPLER_2D_ARRAY:
+    case GL_SAMPLER_2D_ARRAY_SHADOW:
+    case GL_SAMPLER_2D_MULTISAMPLE:
+    case GL_SAMPLER_CUBE_SHADOW:
+    case GL_INT_SAMPLER_2D:
+    case GL_INT_SAMPLER_3D:
+    case GL_INT_SAMPLER_CUBE:
+    case GL_INT_SAMPLER_2D_ARRAY:
+    case GL_INT_SAMPLER_2D_MULTISAMPLE:
+    case GL_UNSIGNED_INT_SAMPLER_2D:
+    case GL_UNSIGNED_INT_SAMPLER_3D:
+    case GL_UNSIGNED_INT_SAMPLER_CUBE:
+    case GL_UNSIGNED_INT_SAMPLER_2D_ARRAY:
+    case GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE:
+    case GL_IMAGE_CUBE:
+    case GL_IMAGE_2D_ARRAY:
+    case GL_INT_IMAGE_2D:
+    case GL_INT_IMAGE_3D:
+    case GL_INT_IMAGE_CUBE:
+    case GL_INT_IMAGE_2D_ARRAY:
+    case GL_UNSIGNED_INT_IMAGE_2D:
+    case GL_UNSIGNED_INT_IMAGE_3D:
+    case GL_UNSIGNED_INT_IMAGE_CUBE:
+    case GL_UNSIGNED_INT_IMAGE_2D_ARRAY:
+    case GL_UNSIGNED_INT_ATOMIC_COUNTER:
+    default:
+        return 1;
+    }
+}
+
+uint32_t getAttributeCountOfType(GLenum type) {
+    switch (type) {
+    case GL_BYTE:
+    case GL_UNSIGNED_BYTE:
+    case GL_SHORT:
+    case GL_UNSIGNED_SHORT:
+    case GL_HALF_FLOAT:
+    case GL_HALF_FLOAT_OES:
+    case GL_IMAGE_2D:
+    case GL_IMAGE_3D:
+    case GL_UNSIGNED_INT:
+    case GL_INT:
+    case GL_FLOAT:
+    case GL_FIXED:
+    case GL_BOOL:
+        return 1;
+#ifdef GL_DOUBLE
+    case GL_DOUBLE:
+    case GL_DOUBLE_VEC2:
+    case GL_DOUBLE_VEC3:
+    case GL_DOUBLE_VEC4:
+        return 1;
+    case GL_DOUBLE_MAT2:
+    case GL_DOUBLE_MAT2x3:
+    case GL_DOUBLE_MAT2x4:
+        return 4;
+    case GL_DOUBLE_MAT3:
+    case GL_DOUBLE_MAT3x2:
+    case GL_DOUBLE_MAT3x4:
+        return 6;
+    case GL_DOUBLE_MAT4:
+    case GL_DOUBLE_MAT4x2:
+    case GL_DOUBLE_MAT4x3:
+        return 8;
+#endif
+    case GL_FLOAT_VEC2:
+    case GL_INT_VEC2:
+    case GL_UNSIGNED_INT_VEC2:
+    case GL_BOOL_VEC2:
+    case GL_INT_VEC3:
+    case GL_UNSIGNED_INT_VEC3:
+    case GL_BOOL_VEC3:
+    case GL_FLOAT_VEC3:
+    case GL_FLOAT_VEC4:
+    case GL_BOOL_VEC4:
+    case GL_INT_VEC4:
+    case GL_UNSIGNED_INT_VEC4:
+        return 1;
+    case GL_FLOAT_MAT2:
+    case GL_FLOAT_MAT2x3:
+    case GL_FLOAT_MAT2x4:
+        return 2;
+    case GL_FLOAT_MAT3:
+    case GL_FLOAT_MAT3x2:
+    case GL_FLOAT_MAT3x4:
+        return 3;
+    case GL_FLOAT_MAT4:
+    case GL_FLOAT_MAT4x2:
+    case GL_FLOAT_MAT4x3:
+        return 4;
+    case GL_SAMPLER_2D:
+    case GL_SAMPLER_3D:
+    case GL_SAMPLER_CUBE:
+    case GL_SAMPLER_2D_SHADOW:
+    case GL_SAMPLER_2D_ARRAY:
+    case GL_SAMPLER_2D_ARRAY_SHADOW:
+    case GL_SAMPLER_2D_MULTISAMPLE:
+    case GL_SAMPLER_CUBE_SHADOW:
+    case GL_INT_SAMPLER_2D:
+    case GL_INT_SAMPLER_3D:
+    case GL_INT_SAMPLER_CUBE:
+    case GL_INT_SAMPLER_2D_ARRAY:
+    case GL_INT_SAMPLER_2D_MULTISAMPLE:
+    case GL_UNSIGNED_INT_SAMPLER_2D:
+    case GL_UNSIGNED_INT_SAMPLER_3D:
+    case GL_UNSIGNED_INT_SAMPLER_CUBE:
+    case GL_UNSIGNED_INT_SAMPLER_2D_ARRAY:
+    case GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE:
+    case GL_IMAGE_CUBE:
+    case GL_IMAGE_2D_ARRAY:
+    case GL_INT_IMAGE_2D:
+    case GL_INT_IMAGE_3D:
+    case GL_INT_IMAGE_CUBE:
+    case GL_INT_IMAGE_2D_ARRAY:
+    case GL_UNSIGNED_INT_IMAGE_2D:
+    case GL_UNSIGNED_INT_IMAGE_3D:
+    case GL_UNSIGNED_INT_IMAGE_CUBE:
+    case GL_UNSIGNED_INT_IMAGE_2D_ARRAY:
+    case GL_UNSIGNED_INT_ATOMIC_COUNTER:
+    default:
+        return 1;
+    }
+}
+
 size_t glSizeof(GLenum type)
 {
     size_t retval = 0;
@@ -47,6 +408,36 @@
     case GL_DOUBLE:
         retval = 8;
         break;
+    case GL_DOUBLE_VEC2:
+        retval = 16;
+        break;
+    case GL_DOUBLE_VEC3:
+        retval = 24;
+        break;
+    case GL_DOUBLE_VEC4:
+        retval = 32;
+        break;
+    case GL_DOUBLE_MAT2:
+        retval = 8 * 4;
+        break;
+    case GL_DOUBLE_MAT3:
+        retval = 8 * 9;
+        break;
+    case GL_DOUBLE_MAT4:
+        retval = 8 * 16;
+        break;
+    case GL_DOUBLE_MAT2x3:
+    case GL_DOUBLE_MAT3x2:
+        retval = 8 * 6;
+        break;
+    case GL_DOUBLE_MAT2x4:
+    case GL_DOUBLE_MAT4x2:
+        retval = 8 * 8;
+        break;
+    case GL_DOUBLE_MAT3x4:
+    case GL_DOUBLE_MAT4x3:
+        retval = 8 * 12;
+        break;
 #endif
     case GL_FLOAT_VEC2:
     case GL_INT_VEC2:
@@ -88,6 +479,32 @@
     case GL_SAMPLER_2D:
     case GL_SAMPLER_3D:
     case GL_SAMPLER_CUBE:
+    case GL_SAMPLER_2D_SHADOW:
+    case GL_SAMPLER_2D_ARRAY:
+    case GL_SAMPLER_2D_ARRAY_SHADOW:
+    case GL_SAMPLER_2D_MULTISAMPLE:
+    case GL_SAMPLER_CUBE_SHADOW:
+    case GL_INT_SAMPLER_2D:
+    case GL_INT_SAMPLER_3D:
+    case GL_INT_SAMPLER_CUBE:
+    case GL_INT_SAMPLER_2D_ARRAY:
+    case GL_INT_SAMPLER_2D_MULTISAMPLE:
+    case GL_UNSIGNED_INT_SAMPLER_2D:
+    case GL_UNSIGNED_INT_SAMPLER_3D:
+    case GL_UNSIGNED_INT_SAMPLER_CUBE:
+    case GL_UNSIGNED_INT_SAMPLER_2D_ARRAY:
+    case GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE:
+    case GL_IMAGE_CUBE:
+    case GL_IMAGE_2D_ARRAY:
+    case GL_INT_IMAGE_2D:
+    case GL_INT_IMAGE_3D:
+    case GL_INT_IMAGE_CUBE:
+    case GL_INT_IMAGE_2D_ARRAY:
+    case GL_UNSIGNED_INT_IMAGE_2D:
+    case GL_UNSIGNED_INT_IMAGE_3D:
+    case GL_UNSIGNED_INT_IMAGE_CUBE:
+    case GL_UNSIGNED_INT_IMAGE_2D_ARRAY:
+    case GL_UNSIGNED_INT_ATOMIC_COUNTER:
         retval = 4;
         break;
     case GL_UNSIGNED_SHORT_4_4_4_4:
@@ -669,3 +1086,92 @@
     }
     return 4;
 }
+
+bool colorRenderableFormat(GLint internalformat, GLenum texturetype, int majorVersion, int minorVersion, bool hasColorBufferFloatExtension, bool hasColorBufferHalfFloatExtension) {
+    switch (internalformat) {
+        case GL_RGB:
+        case GL_RGBA:
+        case GL_BGRA_EXT:
+            switch (texturetype) {
+                case GL_FLOAT:
+                case GL_HALF_FLOAT_OES:
+                case GL_UNSIGNED_INT_10F_11F_11F_REV:
+                case GL_UNSIGNED_INT_2_10_10_10_REV:
+                    return false;
+                default:
+                    return true;
+            }
+            break;
+        case GL_R8:
+        case GL_RG8:
+        case GL_RGB8:
+        case GL_RGB565:
+        case GL_RGBA4:
+        case GL_RGB5_A1:
+        case GL_RGBA8:
+        case GL_RGB10_A2:
+        case GL_RGB10_A2UI:
+        case GL_SRGB8_ALPHA8:
+        case GL_R8I:
+        case GL_R8UI:
+        case GL_R16I:
+        case GL_R16UI:
+        case GL_R32I:
+        case GL_R32UI:
+        case GL_RG8I:
+        case GL_RG8UI:
+        case GL_RG16I:
+        case GL_RG16UI:
+        case GL_RG32I:
+        case GL_RG32UI:
+        case GL_RGBA8I:
+        case GL_RGBA8UI:
+        case GL_RGBA16I:
+        case GL_RGBA16UI:
+        case GL_RGBA32I:
+        case GL_RGBA32UI:
+        case GL_BGRA8_EXT:
+            return true;
+        case GL_R16F:
+        case GL_RG16F:
+        case GL_RGBA16F:
+        case GL_R32F:
+        case GL_RG32F:
+        case GL_RGBA32F:
+        case GL_R11F_G11F_B10F:
+            return majorVersion >= 3 && hasColorBufferFloatExtension;
+        case GL_RGB16F:
+            return majorVersion >= 3 && hasColorBufferHalfFloatExtension;
+        default:
+            return false;
+    }
+}
+
+bool depthRenderableFormat(GLint internalformat) {
+    switch (internalformat) {
+        case GL_DEPTH_STENCIL:
+        case GL_DEPTH_COMPONENT:
+        case GL_DEPTH_COMPONENT16:
+        case GL_DEPTH_COMPONENT24:
+        case GL_DEPTH_COMPONENT32F:
+        case GL_DEPTH24_STENCIL8:
+        case GL_DEPTH32F_STENCIL8:
+        case 0X81A7: // GL_DEPTH_COMPONENT32
+            return true;
+        default:
+            return false;
+    }
+}
+
+bool stencilRenderableFormat(GLint internalformat) {
+    switch (internalformat) {
+        case GL_STENCIL_INDEX:
+        case GL_DEPTH_STENCIL:
+        case GL_STENCIL_INDEX8:
+        case GL_DEPTH24_STENCIL8:
+        case GL_DEPTH32F_STENCIL8:
+            return true;
+        default:
+            return false;
+    }
+}
diff --git a/shared/OpenglCodecCommon/glUtils.h b/shared/OpenglCodecCommon/glUtils.h
index 2f20b68..09081c2 100644
--- a/shared/OpenglCodecCommon/glUtils.h
+++ b/shared/OpenglCodecCommon/glUtils.h
@@ -57,6 +57,13 @@
     INDIRECT_COMMAND_DRAWELEMENTS = 1,
 } IndirectCommandType;
 
+    bool isSamplerType(GLenum type);
+    bool isIntegerType(GLenum type);
+    bool isUnsignedIntType(GLenum type);
+    bool isBoolType(GLenum type);
+    uint32_t getColumnsOfType(GLenum type);
+    uint32_t getRowsOfType(GLenum type);
+    uint32_t getAttributeCountOfType(GLenum type);
     size_t glSizeof(GLenum type);
     size_t glUtilsParamSize(GLenum param);
     void   glUtilsPackPointerData(unsigned char *dst, unsigned char *str,
@@ -73,6 +80,11 @@
 
     GLuint glUtilsIndirectStructSize(IndirectCommandType cmdType);
 
+    bool colorRenderableFormat(GLint internalformat, GLenum texturetype, int majorVersion, int minorVersion, bool hasColorBufferFloatExtension, bool hasColorBufferHalfFloatExtension);
+
+    bool depthRenderableFormat(GLint internalformat);
+    bool stencilRenderableFormat(GLint internalformat);
+
 #ifdef __cplusplus
 };
 #endif
diff --git a/shared/gralloc_cb/Android.bp b/shared/gralloc_cb/Android.bp
index faea798..2e5976a 100644
--- a/shared/gralloc_cb/Android.bp
+++ b/shared/gralloc_cb/Android.bp
@@ -14,6 +14,15 @@
  * limitations under the License.
  */
 
+package {
+    // See: http://go/android-license-faq
+    // A large-scale-change added 'default_applicable_licenses' to import
+    // all of the 'license_kinds' from "device_generic_goldfish-opengl_license"
+    // to get the below license kinds:
+    //   SPDX-license-identifier-Apache-2.0
+    default_applicable_licenses: ["device_generic_goldfish-opengl_license"],
+}
+
 cc_library_headers {
     name: "libgralloc_cb.ranchu",
     vendor_available: true,
diff --git a/shared/gralloc_cb/CMakeLists.txt b/shared/gralloc_cb/CMakeLists.txt
index f9e7b71..865d9d7 100644
--- a/shared/gralloc_cb/CMakeLists.txt
+++ b/shared/gralloc_cb/CMakeLists.txt
@@ -5,6 +5,6 @@
 set(gralloc_cb_host_src empty.cpp)
 android_add_library(TARGET gralloc_cb_host LICENSE Apache-2.0 SRC empty.cpp)
 target_include_directories(gralloc_cb_host PRIVATE ${GOLDFISH_DEVICE_ROOT}/shared/gralloc_cb/include ${GOLDFISH_DEVICE_ROOT}/./host/include/libOpenglRender ${GOLDFISH_DEVICE_ROOT}/./system/include ${GOLDFISH_DEVICE_ROOT}/./../../../external/qemu/android/android-emugl/guest)
-target_compile_definitions(gralloc_cb_host PRIVATE "-DWITH_GLES2" "-DPLATFORM_SDK_VERSION=29" "-DGOLDFISH_HIDL_GRALLOC" "-DEMULATOR_OPENGL_POST_O=1" "-DHOST_BUILD" "-DANDROID" "-DGL_GLEXT_PROTOTYPES" "-DPAGE_SIZE=4096" "-DGOLDFISH_VULKAN")
+target_compile_definitions(gralloc_cb_host PRIVATE "-DWITH_GLES2" "-DPLATFORM_SDK_VERSION=29" "-DGOLDFISH_HIDL_GRALLOC" "-DEMULATOR_OPENGL_POST_O=1" "-DHOST_BUILD" "-DANDROID" "-DGL_GLEXT_PROTOTYPES" "-DPAGE_SIZE=4096" "-DGFXSTREAM")
 target_compile_options(gralloc_cb_host PRIVATE "-fvisibility=default" "-Wno-unused-parameter")
 target_link_libraries(gralloc_cb_host PRIVATE android-emu-shared)
\ No newline at end of file
diff --git a/shared/qemupipe/Android.bp b/shared/qemupipe/Android.bp
index fe72943..478328c 100644
--- a/shared/qemupipe/Android.bp
+++ b/shared/qemupipe/Android.bp
@@ -14,6 +14,15 @@
  * limitations under the License.
  */
 
+package {
+    // See: http://go/android-license-faq
+    // A large-scale-change added 'default_applicable_licenses' to import
+    // all of the 'license_kinds' from "device_generic_goldfish-opengl_license"
+    // to get the below license kinds:
+    //   SPDX-license-identifier-Apache-2.0
+    default_applicable_licenses: ["device_generic_goldfish-opengl_license"],
+}
+
 cc_library_headers {
     name: "libqemupipe-types.ranchu",
     vendor_available: true,
diff --git a/shared/qemupipe/CMakeLists.txt b/shared/qemupipe/CMakeLists.txt
index 2229681..90785d6 100644
--- a/shared/qemupipe/CMakeLists.txt
+++ b/shared/qemupipe/CMakeLists.txt
@@ -5,6 +5,6 @@
 set(qemupipe_host_src qemu_pipe_common.cpp qemu_pipe_host.cpp)
 android_add_library(TARGET qemupipe_host LICENSE Apache-2.0 SRC qemu_pipe_common.cpp qemu_pipe_host.cpp)
 target_include_directories(qemupipe_host PRIVATE ${GOLDFISH_DEVICE_ROOT}/shared/qemupipe/include ${GOLDFISH_DEVICE_ROOT}/shared/qemupipe/include-types ${GOLDFISH_DEVICE_ROOT}/./host/include/libOpenglRender ${GOLDFISH_DEVICE_ROOT}/./system/include ${GOLDFISH_DEVICE_ROOT}/./../../../external/qemu/android/android-emugl/guest)
-target_compile_definitions(qemupipe_host PRIVATE "-DWITH_GLES2" "-DPLATFORM_SDK_VERSION=29" "-DGOLDFISH_HIDL_GRALLOC" "-DEMULATOR_OPENGL_POST_O=1" "-DHOST_BUILD" "-DANDROID" "-DGL_GLEXT_PROTOTYPES" "-DPAGE_SIZE=4096" "-DGOLDFISH_VULKAN")
+target_compile_definitions(qemupipe_host PRIVATE "-DWITH_GLES2" "-DPLATFORM_SDK_VERSION=29" "-DGOLDFISH_HIDL_GRALLOC" "-DEMULATOR_OPENGL_POST_O=1" "-DHOST_BUILD" "-DANDROID" "-DGL_GLEXT_PROTOTYPES" "-DPAGE_SIZE=4096" "-DGFXSTREAM")
 target_compile_options(qemupipe_host PRIVATE "-fvisibility=default" "-Wno-unused-parameter")
 target_link_libraries(qemupipe_host PRIVATE android-emu-shared)
\ No newline at end of file
diff --git a/shared/qemupipe/include-types/qemu_pipe_types_bp.h b/shared/qemupipe/include-types/qemu_pipe_types_bp.h
index d786fe5..4bd5b7c 100644
--- a/shared/qemupipe/include-types/qemu_pipe_types_bp.h
+++ b/shared/qemupipe/include-types/qemu_pipe_types_bp.h
@@ -18,22 +18,9 @@
 #include <stddef.h>
 #include <stdbool.h>
 
-#ifdef HOST_BUILD
-
-typedef void* QEMU_PIPE_HANDLE;
-#define QEMU_PIPE_INVALID_HANDLE NULL
-
-inline bool qemu_pipe_valid(QEMU_PIPE_HANDLE h) {
-    return h != QEMU_PIPE_INVALID_HANDLE;
-}
-
-#else  // ifdef HOST_BUILD
-
 typedef int QEMU_PIPE_HANDLE;
 #define QEMU_PIPE_INVALID_HANDLE (-1)
 
 inline bool qemu_pipe_valid(QEMU_PIPE_HANDLE h) {
     return h > QEMU_PIPE_INVALID_HANDLE;
 }
-
-#endif // ifdef HOST_BUILD
diff --git a/shared/qemupipe/qemu_pipe_guest.cpp b/shared/qemupipe/qemu_pipe_guest.cpp
index 1732a51..da70197 100644
--- a/shared/qemupipe/qemu_pipe_guest.cpp
+++ b/shared/qemupipe/qemu_pipe_guest.cpp
@@ -14,18 +14,36 @@
  * limitations under the License.
  */
 
+#include <atomic>
 #include <errno.h>
 #include <log/log.h>
+#include <sys/socket.h>
 #include <sys/types.h>
 #include <fcntl.h>
 #include <stdint.h>
 #include <stdio.h>
 #include <string.h>
 #include <unistd.h>
+#include <linux/vm_sockets.h>
 #include <qemu_pipe_bp.h>
 
 namespace {
-int open_verbose(const char* name, int flags) {
+enum class VsockPort {
+    Data = 5000,
+    Ping = 5001,
+};
+
+std::atomic<bool> gVsockAvailable = false;
+
+bool is_graphics_pipe(const char* name) {
+    if (!strcmp(name, "opengles")) { return true; }
+    if (!strcmp(name, "GLProcessPipe")) { return true; }
+    if (!strcmp(name, "refcount")) { return true; }
+
+    return false;
+}
+
+int open_verbose_path(const char* name, const int flags) {
     const int fd = QEMU_PIPE_RETRY(open(name, flags));
     if (fd < 0) {
         ALOGE("%s:%d: Could not open '%s': %s",
@@ -34,6 +52,85 @@
     return fd;
 }
 
+int open_verbose_vsock(const VsockPort port, const int flags) {
+    const int fd = QEMU_PIPE_RETRY(socket(AF_VSOCK, SOCK_STREAM, 0));
+    if (fd < 0) {
+        // it is ok if socket(AF_VSOCK, ...) fails - vsock might be unsupported yet
+        return -1;
+    }
+
+    struct sockaddr_vm sa;
+    memset(&sa, 0, sizeof(sa));
+    sa.svm_family = AF_VSOCK;
+    sa.svm_port = static_cast<int>(port);
+    sa.svm_cid = VMADDR_CID_HOST;
+
+    int r;
+
+    r = QEMU_PIPE_RETRY(connect(fd,
+                                reinterpret_cast<const struct sockaddr*>(&sa),
+                                sizeof(sa)));
+    if (r < 0) {
+        // it is ok if connect(fd, &sa, ...) fails - vsock might be unsupported yet
+        close(fd);
+        return -1;
+    }
+
+    if (flags) {
+        const int oldFlags = QEMU_PIPE_RETRY(fcntl(fd, F_GETFL, 0));
+        if (oldFlags < 0) {
+            ALOGE("%s:%d fcntl(fd=%d, F_GETFL) failed with '%s' (%d)",
+                  __func__, __LINE__, fd, strerror(errno), errno);
+            close(fd);
+            return -1;
+        }
+
+        const int newFlags = oldFlags | flags;
+
+        r = QEMU_PIPE_RETRY(fcntl(fd, F_SETFL, newFlags));
+        if (r < 0) {
+            ALOGE("%s:%d fcntl(fd=%d, F_SETFL, flags=0x%X) failed with '%s' (%d)",
+                  __func__, __LINE__, fd, newFlags, strerror(errno), errno);
+            close(fd);
+            return -1;
+        }
+    }
+
+    return fd;
+}
+
+int open_verbose(const char *pipeName, const int flags) {
+    int fd;
+
+    // We can't use vsock for grapshics for security reasons,
+    // virtio-gpu should be used instead.
+    if (!is_graphics_pipe(pipeName)) {
+        fd = open_verbose_vsock(VsockPort::Data, flags);
+        if (fd >= 0) {
+            gVsockAvailable = true;
+            return fd;
+        }
+    }
+
+    fd = open_verbose_path("/dev/goldfish_pipe", flags);
+    if (fd >= 0) {
+        return fd;
+    }
+
+    ALOGE("%s:%d: both vsock and goldfish_pipe paths failed",
+          __func__, __LINE__);
+    return -1;
+}
+
+void vsock_ping() {
+    const int fd = open_verbose_vsock(VsockPort::Ping, 0);
+    if (fd >= 0) {
+        ALOGE("%s:%d open_verbose_vsock(kVsockPingPort) is expected to fail, "
+              "but it succeeded, fd=%d", __func__, __LINE__, fd);
+        close(fd);
+    }
+}
+
 }  // namespace
 
 extern "C" {
@@ -44,7 +141,7 @@
         return -1;
     }
 
-    const int fd = open_verbose("/dev/goldfish_pipe", flags);
+    const int fd = open_verbose(pipeName, flags);
     if (fd < 0) {
         return fd;
     }
@@ -60,6 +157,7 @@
     if (qemu_pipe_write_fully(fd, buf, bufLen + 1)) {
         ALOGE("%s:%d: Could not connect to the '%s' service: %s",
               __func__, __LINE__, buf, strerror(errno));
+        close(fd);
         return -1;
     }
 
@@ -83,7 +181,24 @@
 }
 
 int qemu_pipe_try_again(int ret) {
-    return (ret < 0) && (errno == EINTR || errno == EAGAIN);
+    if (ret >= 0) {
+        return 0;
+    }
+
+    switch (errno) {
+    case EAGAIN:
+        if (gVsockAvailable) {
+            vsock_ping();
+            errno = EAGAIN;
+        }
+        return 1;
+
+    case EINTR:
+        return 1;
+
+    default:
+        return 0;
+    }
 }
 
 void qemu_pipe_print_error(int pipe) {
diff --git a/shared/qemupipe/qemu_pipe_host.cpp b/shared/qemupipe/qemu_pipe_host.cpp
index 38f2f63..0e2e0c4 100644
--- a/shared/qemupipe/qemu_pipe_host.cpp
+++ b/shared/qemupipe/qemu_pipe_host.cpp
@@ -53,5 +53,5 @@
 
 void qemu_pipe_print_error(QEMU_PIPE_HANDLE pipe) {
     int err = HostGoldfishPipeDevice::get()->getErrno();
-    ALOGE("pipe error: pipe %p err %d", pipe, err);
+    ALOGE("pipe error: pipe %d err %d", pipe, err);
 }
diff --git a/system/GLESv1/CMakeLists.txt b/system/GLESv1/CMakeLists.txt
index 5436e5f..8143b9a 100644
--- a/system/GLESv1/CMakeLists.txt
+++ b/system/GLESv1/CMakeLists.txt
@@ -4,7 +4,7 @@
 android_validate_sha256("${GOLDFISH_DEVICE_ROOT}/system/GLESv1/Android.mk" "e095cb082e3791719749cfc80b90560afd7348eb0d7895449d2509aa129bea75")
 set(GLESv1_CM_emulation_src gl.cpp)
 android_add_library(TARGET GLESv1_CM_emulation SHARED LICENSE Apache-2.0 SRC gl.cpp)
-target_include_directories(GLESv1_CM_emulation PRIVATE ${GOLDFISH_DEVICE_ROOT}/system/OpenglSystemCommon/bionic-include ${GOLDFISH_DEVICE_ROOT}/system/OpenglSystemCommon ${GOLDFISH_DEVICE_ROOT}/bionic/libc/private ${GOLDFISH_DEVICE_ROOT}/bionic/libc/platform ${GOLDFISH_DEVICE_ROOT}/system/vulkan_enc ${GOLDFISH_DEVICE_ROOT}/android-emu ${GOLDFISH_DEVICE_ROOT}/shared/gralloc_cb/include ${GOLDFISH_DEVICE_ROOT}/shared/GoldfishAddressSpace/include ${GOLDFISH_DEVICE_ROOT}/system/renderControl_enc ${GOLDFISH_DEVICE_ROOT}/system/GLESv2_enc ${GOLDFISH_DEVICE_ROOT}/system/GLESv1_enc ${GOLDFISH_DEVICE_ROOT}/shared/OpenglCodecCommon ${GOLDFISH_DEVICE_ROOT}/shared/qemupipe/include-types ${GOLDFISH_DEVICE_ROOT}/shared/qemupipe/include ${GOLDFISH_DEVICE_ROOT}/./host/include/libOpenglRender ${GOLDFISH_DEVICE_ROOT}/./system/include ${GOLDFISH_DEVICE_ROOT}/./../../../external/qemu/android/android-emugl/guest)
-target_compile_definitions(GLESv1_CM_emulation PRIVATE "-DWITH_GLES2" "-DPLATFORM_SDK_VERSION=29" "-DGOLDFISH_HIDL_GRALLOC" "-DEMULATOR_OPENGL_POST_O=1" "-DHOST_BUILD" "-DANDROID" "-DGL_GLEXT_PROTOTYPES" "-DPAGE_SIZE=4096" "-DGOLDFISH_VULKAN" "-DLOG_TAG=\"GLES_emulation\"")
+target_include_directories(GLESv1_CM_emulation PRIVATE ${GOLDFISH_DEVICE_ROOT}/system/OpenglSystemCommon/bionic-include ${GOLDFISH_DEVICE_ROOT}/system/OpenglSystemCommon ${GOLDFISH_DEVICE_ROOT}/bionic/libc/private ${GOLDFISH_DEVICE_ROOT}/bionic/libc/platform ${GOLDFISH_DEVICE_ROOT}/system/vulkan_enc ${GOLDFISH_DEVICE_ROOT}/shared/gralloc_cb/include ${GOLDFISH_DEVICE_ROOT}/shared/GoldfishAddressSpace/include ${GOLDFISH_DEVICE_ROOT}/system/renderControl_enc ${GOLDFISH_DEVICE_ROOT}/system/GLESv2_enc ${GOLDFISH_DEVICE_ROOT}/system/GLESv1_enc ${GOLDFISH_DEVICE_ROOT}/shared/OpenglCodecCommon ${GOLDFISH_DEVICE_ROOT}/android-emu ${GOLDFISH_DEVICE_ROOT}/shared/qemupipe/include-types ${GOLDFISH_DEVICE_ROOT}/shared/qemupipe/include ${GOLDFISH_DEVICE_ROOT}/./host/include/libOpenglRender ${GOLDFISH_DEVICE_ROOT}/./system/include ${GOLDFISH_DEVICE_ROOT}/./../../../external/qemu/android/android-emugl/guest)
+target_compile_definitions(GLESv1_CM_emulation PRIVATE "-DWITH_GLES2" "-DPLATFORM_SDK_VERSION=29" "-DGOLDFISH_HIDL_GRALLOC" "-DEMULATOR_OPENGL_POST_O=1" "-DHOST_BUILD" "-DANDROID" "-DGL_GLEXT_PROTOTYPES" "-DPAGE_SIZE=4096" "-DGFXSTREAM" "-DLOG_TAG=\"GLES_emulation\"")
 target_compile_options(GLESv1_CM_emulation PRIVATE "-fvisibility=default" "-Wno-unused-parameter")
-target_link_libraries(GLESv1_CM_emulation PRIVATE OpenglSystemCommon android-emu-shared vulkan_enc gui androidemu cutils utils log _renderControl_enc GLESv2_enc GLESv1_enc OpenglCodecCommon_host PRIVATE gralloc_cb_host GoldfishAddressSpace_host qemupipe_host)
\ No newline at end of file
+target_link_libraries(GLESv1_CM_emulation PRIVATE OpenglSystemCommon android-emu-shared vulkan_enc gui log _renderControl_enc GLESv2_enc GLESv1_enc OpenglCodecCommon_host cutils utils androidemu PRIVATE gralloc_cb_host GoldfishAddressSpace_host qemupipe_host)
\ No newline at end of file
diff --git a/system/GLESv1_enc/CMakeLists.txt b/system/GLESv1_enc/CMakeLists.txt
index 0a9bc52..2fb5b03 100644
--- a/system/GLESv1_enc/CMakeLists.txt
+++ b/system/GLESv1_enc/CMakeLists.txt
@@ -4,7 +4,7 @@
 android_validate_sha256("${GOLDFISH_DEVICE_ROOT}/system/GLESv1_enc/Android.mk" "953e6b7371d10eed63a4be555f8f1fb6f347338484a78102fa8f55dff96f5d3b")
 set(GLESv1_enc_src GLEncoder.cpp GLEncoderUtils.cpp gl_client_context.cpp gl_enc.cpp gl_entry.cpp)
 android_add_library(TARGET GLESv1_enc SHARED LICENSE Apache-2.0 SRC GLEncoder.cpp GLEncoderUtils.cpp gl_client_context.cpp gl_enc.cpp gl_entry.cpp)
-target_include_directories(GLESv1_enc PRIVATE ${GOLDFISH_DEVICE_ROOT}/system/GLESv1_enc ${GOLDFISH_DEVICE_ROOT}/shared/OpenglCodecCommon ${GOLDFISH_DEVICE_ROOT}/shared/qemupipe/include-types ${GOLDFISH_DEVICE_ROOT}/shared/qemupipe/include ${GOLDFISH_DEVICE_ROOT}/./host/include/libOpenglRender ${GOLDFISH_DEVICE_ROOT}/./system/include ${GOLDFISH_DEVICE_ROOT}/./../../../external/qemu/android/android-emugl/guest)
-target_compile_definitions(GLESv1_enc PRIVATE "-DWITH_GLES2" "-DPLATFORM_SDK_VERSION=29" "-DGOLDFISH_HIDL_GRALLOC" "-DEMULATOR_OPENGL_POST_O=1" "-DHOST_BUILD" "-DANDROID" "-DGL_GLEXT_PROTOTYPES" "-DPAGE_SIZE=4096" "-DGOLDFISH_VULKAN" "-DLOG_TAG=\"emuglGLESv1_enc\"")
+target_include_directories(GLESv1_enc PRIVATE ${GOLDFISH_DEVICE_ROOT}/system/GLESv1_enc ${GOLDFISH_DEVICE_ROOT}/shared/OpenglCodecCommon ${GOLDFISH_DEVICE_ROOT}/android-emu ${GOLDFISH_DEVICE_ROOT}/shared/qemupipe/include-types ${GOLDFISH_DEVICE_ROOT}/shared/qemupipe/include ${GOLDFISH_DEVICE_ROOT}/./host/include/libOpenglRender ${GOLDFISH_DEVICE_ROOT}/./system/include ${GOLDFISH_DEVICE_ROOT}/./../../../external/qemu/android/android-emugl/guest)
+target_compile_definitions(GLESv1_enc PRIVATE "-DWITH_GLES2" "-DPLATFORM_SDK_VERSION=29" "-DGOLDFISH_HIDL_GRALLOC" "-DEMULATOR_OPENGL_POST_O=1" "-DHOST_BUILD" "-DANDROID" "-DGL_GLEXT_PROTOTYPES" "-DPAGE_SIZE=4096" "-DGFXSTREAM" "-DLOG_TAG=\"emuglGLESv1_enc\"")
 target_compile_options(GLESv1_enc PRIVATE "-fvisibility=default" "-Wno-unused-parameter")
-target_link_libraries(GLESv1_enc PRIVATE OpenglCodecCommon_host cutils utils log android-emu-shared PRIVATE qemupipe_host)
\ No newline at end of file
+target_link_libraries(GLESv1_enc PRIVATE OpenglCodecCommon_host cutils utils log androidemu android-emu-shared PRIVATE qemupipe_host)
\ No newline at end of file
diff --git a/system/GLESv1_enc/GLEncoder.cpp b/system/GLESv1_enc/GLEncoder.cpp
index 20f2c02..1ab13e3 100644
--- a/system/GLESv1_enc/GLEncoder.cpp
+++ b/system/GLESv1_enc/GLEncoder.cpp
@@ -989,7 +989,7 @@
     GLEncoder* ctx = (GLEncoder*)self;
     GLClientState* state = ctx->m_state;
 
-    state->attachTextureObject(target, attachment, texture);
+    state->attachTextureObject(target, attachment, texture, level, 0);
 
     ctx->m_glFramebufferTexture2DOES_enc(self, target, attachment, textarget, texture, level);
 }
@@ -1000,7 +1000,7 @@
     GLEncoder* ctx = (GLEncoder*)self;
     GLClientState* state = ctx->m_state;
 
-    state->attachTextureObject(target, attachment, texture);
+    state->attachTextureObject(target, attachment, texture, level, 0);
 
     ctx->m_glFramebufferTexture2DMultisampleIMG_enc(self, target, attachment, textarget, texture, level, samples);
 }
diff --git a/system/GLESv1_enc/GLEncoder.h b/system/GLESv1_enc/GLEncoder.h
index a26636c..7837838 100644
--- a/system/GLESv1_enc/GLEncoder.h
+++ b/system/GLESv1_enc/GLEncoder.h
@@ -33,7 +33,7 @@
     }
     void setSharedGroup(GLSharedGroupPtr shared) {
         m_shared = shared;
-        if (m_state && m_shared.Ptr())
+        if (m_state && m_shared)
             m_state->setTextureData(m_shared->getTextureData());
     }
     void flush() { m_stream->flush(); }
diff --git a/system/GLESv1_enc/gl_enc.cpp b/system/GLESv1_enc/gl_enc.cpp
index 4e672c6..8c146dc 100644
--- a/system/GLESv1_enc/gl_enc.cpp
+++ b/system/GLESv1_enc/gl_enc.cpp
@@ -12,6 +12,7 @@
 
 #include <stdio.h>
 
+#include "android/base/Tracing.h"
 namespace {
 
 void enc_unsupported()
@@ -21,6 +22,7 @@
 
 void glAlphaFunc_enc(void *self , GLenum func, GLclampf ref)
 {
+	AEMU_SCOPED_TRACE("glAlphaFunc encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -47,6 +49,7 @@
 
 void glClearColor_enc(void *self , GLclampf red, GLclampf green, GLclampf blue, GLclampf alpha)
 {
+	AEMU_SCOPED_TRACE("glClearColor encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -75,6 +78,7 @@
 
 void glClearDepthf_enc(void *self , GLclampf depth)
 {
+	AEMU_SCOPED_TRACE("glClearDepthf encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -100,6 +104,7 @@
 
 void glClipPlanef_enc(void *self , GLenum plane, const GLfloat* equation)
 {
+	AEMU_SCOPED_TRACE("glClipPlanef encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -118,7 +123,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &plane, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_equation; ptr += 4;
+	memcpy(ptr, &__size_equation, 4); ptr += 4;
 	memcpy(ptr, equation, __size_equation);ptr += __size_equation;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -128,6 +133,7 @@
 
 void glColor4f_enc(void *self , GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha)
 {
+	AEMU_SCOPED_TRACE("glColor4f encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -156,6 +162,7 @@
 
 void glDepthRangef_enc(void *self , GLclampf zNear, GLclampf zFar)
 {
+	AEMU_SCOPED_TRACE("glDepthRangef encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -182,6 +189,7 @@
 
 void glFogf_enc(void *self , GLenum pname, GLfloat param)
 {
+	AEMU_SCOPED_TRACE("glFogf encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -208,6 +216,7 @@
 
 void glFogfv_enc(void *self , GLenum pname, const GLfloat* params)
 {
+	AEMU_SCOPED_TRACE("glFogfv encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -226,7 +235,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 	memcpy(ptr, params, __size_params);ptr += __size_params;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -236,6 +245,7 @@
 
 void glFrustumf_enc(void *self , GLfloat left, GLfloat right, GLfloat bottom, GLfloat top, GLfloat zNear, GLfloat zFar)
 {
+	AEMU_SCOPED_TRACE("glFrustumf encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -266,6 +276,7 @@
 
 void glGetClipPlanef_enc(void *self , GLenum pname, GLfloat* eqn)
 {
+	AEMU_SCOPED_TRACE("glGetClipPlanef encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -284,7 +295,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_eqn; ptr += 4;
+	memcpy(ptr, &__size_eqn, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -305,6 +316,7 @@
 
 void glGetFloatv_enc(void *self , GLenum pname, GLfloat* params)
 {
+	AEMU_SCOPED_TRACE("glGetFloatv encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -323,7 +335,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -344,6 +356,7 @@
 
 void glGetLightfv_enc(void *self , GLenum light, GLenum pname, GLfloat* params)
 {
+	AEMU_SCOPED_TRACE("glGetLightfv encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -363,7 +376,7 @@
 
 		memcpy(ptr, &light, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -384,6 +397,7 @@
 
 void glGetMaterialfv_enc(void *self , GLenum face, GLenum pname, GLfloat* params)
 {
+	AEMU_SCOPED_TRACE("glGetMaterialfv encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -403,7 +417,7 @@
 
 		memcpy(ptr, &face, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -424,6 +438,7 @@
 
 void glGetTexEnvfv_enc(void *self , GLenum env, GLenum pname, GLfloat* params)
 {
+	AEMU_SCOPED_TRACE("glGetTexEnvfv encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -443,7 +458,7 @@
 
 		memcpy(ptr, &env, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -464,6 +479,7 @@
 
 void glGetTexParameterfv_enc(void *self , GLenum target, GLenum pname, GLfloat* params)
 {
+	AEMU_SCOPED_TRACE("glGetTexParameterfv encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -483,7 +499,7 @@
 
 		memcpy(ptr, &target, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -504,6 +520,7 @@
 
 void glLightModelf_enc(void *self , GLenum pname, GLfloat param)
 {
+	AEMU_SCOPED_TRACE("glLightModelf encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -530,6 +547,7 @@
 
 void glLightModelfv_enc(void *self , GLenum pname, const GLfloat* params)
 {
+	AEMU_SCOPED_TRACE("glLightModelfv encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -548,7 +566,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 	memcpy(ptr, params, __size_params);ptr += __size_params;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -558,6 +576,7 @@
 
 void glLightf_enc(void *self , GLenum light, GLenum pname, GLfloat param)
 {
+	AEMU_SCOPED_TRACE("glLightf encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -585,6 +604,7 @@
 
 void glLightfv_enc(void *self , GLenum light, GLenum pname, const GLfloat* params)
 {
+	AEMU_SCOPED_TRACE("glLightfv encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -604,7 +624,7 @@
 
 		memcpy(ptr, &light, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 	memcpy(ptr, params, __size_params);ptr += __size_params;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -614,6 +634,7 @@
 
 void glLineWidth_enc(void *self , GLfloat width)
 {
+	AEMU_SCOPED_TRACE("glLineWidth encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -639,6 +660,7 @@
 
 void glLoadMatrixf_enc(void *self , const GLfloat* m)
 {
+	AEMU_SCOPED_TRACE("glLoadMatrixf encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -656,7 +678,7 @@
 	int tmp = OP_glLoadMatrixf;memcpy(ptr, &tmp, 4); ptr += 4;
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
-	*(unsigned int *)(ptr) = __size_m; ptr += 4;
+	memcpy(ptr, &__size_m, 4); ptr += 4;
 	memcpy(ptr, m, __size_m);ptr += __size_m;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -666,6 +688,7 @@
 
 void glMaterialf_enc(void *self , GLenum face, GLenum pname, GLfloat param)
 {
+	AEMU_SCOPED_TRACE("glMaterialf encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -693,6 +716,7 @@
 
 void glMaterialfv_enc(void *self , GLenum face, GLenum pname, const GLfloat* params)
 {
+	AEMU_SCOPED_TRACE("glMaterialfv encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -712,7 +736,7 @@
 
 		memcpy(ptr, &face, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 	memcpy(ptr, params, __size_params);ptr += __size_params;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -722,6 +746,7 @@
 
 void glMultMatrixf_enc(void *self , const GLfloat* m)
 {
+	AEMU_SCOPED_TRACE("glMultMatrixf encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -739,7 +764,7 @@
 	int tmp = OP_glMultMatrixf;memcpy(ptr, &tmp, 4); ptr += 4;
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
-	*(unsigned int *)(ptr) = __size_m; ptr += 4;
+	memcpy(ptr, &__size_m, 4); ptr += 4;
 	memcpy(ptr, m, __size_m);ptr += __size_m;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -749,6 +774,7 @@
 
 void glMultiTexCoord4f_enc(void *self , GLenum target, GLfloat s, GLfloat t, GLfloat r, GLfloat q)
 {
+	AEMU_SCOPED_TRACE("glMultiTexCoord4f encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -778,6 +804,7 @@
 
 void glNormal3f_enc(void *self , GLfloat nx, GLfloat ny, GLfloat nz)
 {
+	AEMU_SCOPED_TRACE("glNormal3f encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -805,6 +832,7 @@
 
 void glOrthof_enc(void *self , GLfloat left, GLfloat right, GLfloat bottom, GLfloat top, GLfloat zNear, GLfloat zFar)
 {
+	AEMU_SCOPED_TRACE("glOrthof encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -835,6 +863,7 @@
 
 void glPointParameterf_enc(void *self , GLenum pname, GLfloat param)
 {
+	AEMU_SCOPED_TRACE("glPointParameterf encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -861,6 +890,7 @@
 
 void glPointParameterfv_enc(void *self , GLenum pname, const GLfloat* params)
 {
+	AEMU_SCOPED_TRACE("glPointParameterfv encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -879,7 +909,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 	memcpy(ptr, params, __size_params);ptr += __size_params;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -889,6 +919,7 @@
 
 void glPointSize_enc(void *self , GLfloat size)
 {
+	AEMU_SCOPED_TRACE("glPointSize encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -914,6 +945,7 @@
 
 void glPolygonOffset_enc(void *self , GLfloat factor, GLfloat units)
 {
+	AEMU_SCOPED_TRACE("glPolygonOffset encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -940,6 +972,7 @@
 
 void glRotatef_enc(void *self , GLfloat angle, GLfloat x, GLfloat y, GLfloat z)
 {
+	AEMU_SCOPED_TRACE("glRotatef encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -968,6 +1001,7 @@
 
 void glScalef_enc(void *self , GLfloat x, GLfloat y, GLfloat z)
 {
+	AEMU_SCOPED_TRACE("glScalef encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -995,6 +1029,7 @@
 
 void glTexEnvf_enc(void *self , GLenum target, GLenum pname, GLfloat param)
 {
+	AEMU_SCOPED_TRACE("glTexEnvf encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1022,6 +1057,7 @@
 
 void glTexEnvfv_enc(void *self , GLenum target, GLenum pname, const GLfloat* params)
 {
+	AEMU_SCOPED_TRACE("glTexEnvfv encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1041,7 +1077,7 @@
 
 		memcpy(ptr, &target, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 	memcpy(ptr, params, __size_params);ptr += __size_params;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -1051,6 +1087,7 @@
 
 void glTexParameterf_enc(void *self , GLenum target, GLenum pname, GLfloat param)
 {
+	AEMU_SCOPED_TRACE("glTexParameterf encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1078,6 +1115,7 @@
 
 void glTexParameterfv_enc(void *self , GLenum target, GLenum pname, const GLfloat* params)
 {
+	AEMU_SCOPED_TRACE("glTexParameterfv encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1097,7 +1135,7 @@
 
 		memcpy(ptr, &target, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 	memcpy(ptr, params, __size_params);ptr += __size_params;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -1107,6 +1145,7 @@
 
 void glTranslatef_enc(void *self , GLfloat x, GLfloat y, GLfloat z)
 {
+	AEMU_SCOPED_TRACE("glTranslatef encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1134,6 +1173,7 @@
 
 void glActiveTexture_enc(void *self , GLenum texture)
 {
+	AEMU_SCOPED_TRACE("glActiveTexture encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1159,6 +1199,7 @@
 
 void glAlphaFuncx_enc(void *self , GLenum func, GLclampx ref)
 {
+	AEMU_SCOPED_TRACE("glAlphaFuncx encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1185,6 +1226,7 @@
 
 void glBindBuffer_enc(void *self , GLenum target, GLuint buffer)
 {
+	AEMU_SCOPED_TRACE("glBindBuffer encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1211,6 +1253,7 @@
 
 void glBindTexture_enc(void *self , GLenum target, GLuint texture)
 {
+	AEMU_SCOPED_TRACE("glBindTexture encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1237,6 +1280,7 @@
 
 void glBlendFunc_enc(void *self , GLenum sfactor, GLenum dfactor)
 {
+	AEMU_SCOPED_TRACE("glBlendFunc encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1263,6 +1307,7 @@
 
 void glBufferData_enc(void *self , GLenum target, GLsizeiptr size, const GLvoid* data, GLenum usage)
 {
+	AEMU_SCOPED_TRACE("glBufferData encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1282,7 +1327,7 @@
 
 		memcpy(ptr, &target, 4); ptr += 4;
 		memcpy(ptr, &size, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_data; ptr += 4;
+	memcpy(ptr, &__size_data, 4); ptr += 4;
 	if (data != NULL) memcpy(ptr, data, __size_data);ptr += __size_data;
 		memcpy(ptr, &usage, 4); ptr += 4;
 
@@ -1293,6 +1338,7 @@
 
 void glBufferSubData_enc(void *self , GLenum target, GLintptr offset, GLsizeiptr size, const GLvoid* data)
 {
+	AEMU_SCOPED_TRACE("glBufferSubData encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1313,7 +1359,7 @@
 		memcpy(ptr, &target, 4); ptr += 4;
 		memcpy(ptr, &offset, 4); ptr += 4;
 		memcpy(ptr, &size, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_data; ptr += 4;
+	memcpy(ptr, &__size_data, 4); ptr += 4;
 	if (data != NULL) memcpy(ptr, data, __size_data);ptr += __size_data;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -1323,6 +1369,7 @@
 
 void glClear_enc(void *self , GLbitfield mask)
 {
+	AEMU_SCOPED_TRACE("glClear encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1348,6 +1395,7 @@
 
 void glClearColorx_enc(void *self , GLclampx red, GLclampx green, GLclampx blue, GLclampx alpha)
 {
+	AEMU_SCOPED_TRACE("glClearColorx encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1376,6 +1424,7 @@
 
 void glClearDepthx_enc(void *self , GLclampx depth)
 {
+	AEMU_SCOPED_TRACE("glClearDepthx encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1401,6 +1450,7 @@
 
 void glClearStencil_enc(void *self , GLint s)
 {
+	AEMU_SCOPED_TRACE("glClearStencil encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1426,6 +1476,7 @@
 
 void glClientActiveTexture_enc(void *self , GLenum texture)
 {
+	AEMU_SCOPED_TRACE("glClientActiveTexture encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1451,6 +1502,7 @@
 
 void glColor4ub_enc(void *self , GLubyte red, GLubyte green, GLubyte blue, GLubyte alpha)
 {
+	AEMU_SCOPED_TRACE("glColor4ub encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1479,6 +1531,7 @@
 
 void glColor4x_enc(void *self , GLfixed red, GLfixed green, GLfixed blue, GLfixed alpha)
 {
+	AEMU_SCOPED_TRACE("glColor4x encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1507,6 +1560,7 @@
 
 void glColorMask_enc(void *self , GLboolean red, GLboolean green, GLboolean blue, GLboolean alpha)
 {
+	AEMU_SCOPED_TRACE("glColorMask encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1535,6 +1589,7 @@
 
 void glCompressedTexImage2D_enc(void *self , GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, const GLvoid* data)
 {
+	AEMU_SCOPED_TRACE("glCompressedTexImage2D encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1559,7 +1614,7 @@
 		memcpy(ptr, &height, 4); ptr += 4;
 		memcpy(ptr, &border, 4); ptr += 4;
 		memcpy(ptr, &imageSize, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_data; ptr += 4;
+	memcpy(ptr, &__size_data, 4); ptr += 4;
 	if (data != NULL) memcpy(ptr, data, __size_data);ptr += __size_data;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -1569,6 +1624,7 @@
 
 void glCompressedTexSubImage2D_enc(void *self , GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, const GLvoid* data)
 {
+	AEMU_SCOPED_TRACE("glCompressedTexSubImage2D encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1594,7 +1650,7 @@
 		memcpy(ptr, &height, 4); ptr += 4;
 		memcpy(ptr, &format, 4); ptr += 4;
 		memcpy(ptr, &imageSize, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_data; ptr += 4;
+	memcpy(ptr, &__size_data, 4); ptr += 4;
 	if (data != NULL) memcpy(ptr, data, __size_data);ptr += __size_data;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -1604,6 +1660,7 @@
 
 void glCopyTexImage2D_enc(void *self , GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLsizei height, GLint border)
 {
+	AEMU_SCOPED_TRACE("glCopyTexImage2D encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1636,6 +1693,7 @@
 
 void glCopyTexSubImage2D_enc(void *self , GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height)
 {
+	AEMU_SCOPED_TRACE("glCopyTexSubImage2D encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1668,6 +1726,7 @@
 
 void glCullFace_enc(void *self , GLenum mode)
 {
+	AEMU_SCOPED_TRACE("glCullFace encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1693,6 +1752,7 @@
 
 void glDeleteBuffers_enc(void *self , GLsizei n, const GLuint* buffers)
 {
+	AEMU_SCOPED_TRACE("glDeleteBuffers encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1711,7 +1771,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &n, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_buffers; ptr += 4;
+	memcpy(ptr, &__size_buffers, 4); ptr += 4;
 	memcpy(ptr, buffers, __size_buffers);ptr += __size_buffers;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -1721,6 +1781,7 @@
 
 void glDeleteTextures_enc(void *self , GLsizei n, const GLuint* textures)
 {
+	AEMU_SCOPED_TRACE("glDeleteTextures encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1739,7 +1800,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &n, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_textures; ptr += 4;
+	memcpy(ptr, &__size_textures, 4); ptr += 4;
 	memcpy(ptr, textures, __size_textures);ptr += __size_textures;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -1749,6 +1810,7 @@
 
 void glDepthFunc_enc(void *self , GLenum func)
 {
+	AEMU_SCOPED_TRACE("glDepthFunc encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1774,6 +1836,7 @@
 
 void glDepthMask_enc(void *self , GLboolean flag)
 {
+	AEMU_SCOPED_TRACE("glDepthMask encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1799,6 +1862,7 @@
 
 void glDepthRangex_enc(void *self , GLclampx zNear, GLclampx zFar)
 {
+	AEMU_SCOPED_TRACE("glDepthRangex encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1825,6 +1889,7 @@
 
 void glDisable_enc(void *self , GLenum cap)
 {
+	AEMU_SCOPED_TRACE("glDisable encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1850,6 +1915,7 @@
 
 void glDisableClientState_enc(void *self , GLenum array)
 {
+	AEMU_SCOPED_TRACE("glDisableClientState encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1875,6 +1941,7 @@
 
 void glDrawArrays_enc(void *self , GLenum mode, GLint first, GLsizei count)
 {
+	AEMU_SCOPED_TRACE("glDrawArrays encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1902,6 +1969,7 @@
 
 void glEnable_enc(void *self , GLenum cap)
 {
+	AEMU_SCOPED_TRACE("glEnable encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1927,6 +1995,7 @@
 
 void glEnableClientState_enc(void *self , GLenum array)
 {
+	AEMU_SCOPED_TRACE("glEnableClientState encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1952,6 +2021,7 @@
 
 void glFinish_enc(void *self )
 {
+	AEMU_SCOPED_TRACE("glFinish encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1976,6 +2046,7 @@
 
 void glFlush_enc(void *self )
 {
+	AEMU_SCOPED_TRACE("glFlush encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2000,6 +2071,7 @@
 
 void glFogx_enc(void *self , GLenum pname, GLfixed param)
 {
+	AEMU_SCOPED_TRACE("glFogx encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2026,6 +2098,7 @@
 
 void glFogxv_enc(void *self , GLenum pname, const GLfixed* params)
 {
+	AEMU_SCOPED_TRACE("glFogxv encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2044,7 +2117,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 	memcpy(ptr, params, __size_params);ptr += __size_params;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -2054,6 +2127,7 @@
 
 void glFrontFace_enc(void *self , GLenum mode)
 {
+	AEMU_SCOPED_TRACE("glFrontFace encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2079,6 +2153,7 @@
 
 void glFrustumx_enc(void *self , GLfixed left, GLfixed right, GLfixed bottom, GLfixed top, GLfixed zNear, GLfixed zFar)
 {
+	AEMU_SCOPED_TRACE("glFrustumx encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2109,6 +2184,7 @@
 
 void glGetBooleanv_enc(void *self , GLenum pname, GLboolean* params)
 {
+	AEMU_SCOPED_TRACE("glGetBooleanv encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2127,7 +2203,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -2148,6 +2224,7 @@
 
 void glGetBufferParameteriv_enc(void *self , GLenum target, GLenum pname, GLint* params)
 {
+	AEMU_SCOPED_TRACE("glGetBufferParameteriv encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2167,7 +2244,7 @@
 
 		memcpy(ptr, &target, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -2188,6 +2265,7 @@
 
 void glClipPlanex_enc(void *self , GLenum pname, const GLfixed* eqn)
 {
+	AEMU_SCOPED_TRACE("glClipPlanex encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2206,7 +2284,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_eqn; ptr += 4;
+	memcpy(ptr, &__size_eqn, 4); ptr += 4;
 	memcpy(ptr, eqn, __size_eqn);ptr += __size_eqn;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -2216,6 +2294,7 @@
 
 void glGenBuffers_enc(void *self , GLsizei n, GLuint* buffers)
 {
+	AEMU_SCOPED_TRACE("glGenBuffers encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2234,7 +2313,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &n, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_buffers; ptr += 4;
+	memcpy(ptr, &__size_buffers, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -2255,6 +2334,7 @@
 
 void glGenTextures_enc(void *self , GLsizei n, GLuint* textures)
 {
+	AEMU_SCOPED_TRACE("glGenTextures encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2273,7 +2353,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &n, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_textures; ptr += 4;
+	memcpy(ptr, &__size_textures, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -2294,6 +2374,7 @@
 
 GLenum glGetError_enc(void *self )
 {
+	AEMU_SCOPED_TRACE("glGetError encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2333,6 +2414,7 @@
 
 void glGetFixedv_enc(void *self , GLenum pname, GLfixed* params)
 {
+	AEMU_SCOPED_TRACE("glGetFixedv encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2351,7 +2433,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -2372,6 +2454,7 @@
 
 void glGetIntegerv_enc(void *self , GLenum pname, GLint* params)
 {
+	AEMU_SCOPED_TRACE("glGetIntegerv encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2390,7 +2473,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -2411,6 +2494,7 @@
 
 void glGetLightxv_enc(void *self , GLenum light, GLenum pname, GLfixed* params)
 {
+	AEMU_SCOPED_TRACE("glGetLightxv encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2430,7 +2514,7 @@
 
 		memcpy(ptr, &light, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -2451,6 +2535,7 @@
 
 void glGetMaterialxv_enc(void *self , GLenum face, GLenum pname, GLfixed* params)
 {
+	AEMU_SCOPED_TRACE("glGetMaterialxv encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2470,7 +2555,7 @@
 
 		memcpy(ptr, &face, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -2491,6 +2576,7 @@
 
 void glGetTexEnviv_enc(void *self , GLenum env, GLenum pname, GLint* params)
 {
+	AEMU_SCOPED_TRACE("glGetTexEnviv encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2510,7 +2596,7 @@
 
 		memcpy(ptr, &env, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -2531,6 +2617,7 @@
 
 void glGetTexEnvxv_enc(void *self , GLenum env, GLenum pname, GLfixed* params)
 {
+	AEMU_SCOPED_TRACE("glGetTexEnvxv encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2550,7 +2637,7 @@
 
 		memcpy(ptr, &env, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -2571,6 +2658,7 @@
 
 void glGetTexParameteriv_enc(void *self , GLenum target, GLenum pname, GLint* params)
 {
+	AEMU_SCOPED_TRACE("glGetTexParameteriv encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2590,7 +2678,7 @@
 
 		memcpy(ptr, &target, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -2611,6 +2699,7 @@
 
 void glGetTexParameterxv_enc(void *self , GLenum target, GLenum pname, GLfixed* params)
 {
+	AEMU_SCOPED_TRACE("glGetTexParameterxv encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2630,7 +2719,7 @@
 
 		memcpy(ptr, &target, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -2651,6 +2740,7 @@
 
 void glHint_enc(void *self , GLenum target, GLenum mode)
 {
+	AEMU_SCOPED_TRACE("glHint encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2677,6 +2767,7 @@
 
 GLboolean glIsBuffer_enc(void *self , GLuint buffer)
 {
+	AEMU_SCOPED_TRACE("glIsBuffer encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2717,6 +2808,7 @@
 
 GLboolean glIsEnabled_enc(void *self , GLenum cap)
 {
+	AEMU_SCOPED_TRACE("glIsEnabled encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2757,6 +2849,7 @@
 
 GLboolean glIsTexture_enc(void *self , GLuint texture)
 {
+	AEMU_SCOPED_TRACE("glIsTexture encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2797,6 +2890,7 @@
 
 void glLightModelx_enc(void *self , GLenum pname, GLfixed param)
 {
+	AEMU_SCOPED_TRACE("glLightModelx encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2823,6 +2917,7 @@
 
 void glLightModelxv_enc(void *self , GLenum pname, const GLfixed* params)
 {
+	AEMU_SCOPED_TRACE("glLightModelxv encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2841,7 +2936,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 	memcpy(ptr, params, __size_params);ptr += __size_params;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -2851,6 +2946,7 @@
 
 void glLightx_enc(void *self , GLenum light, GLenum pname, GLfixed param)
 {
+	AEMU_SCOPED_TRACE("glLightx encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2878,6 +2974,7 @@
 
 void glLightxv_enc(void *self , GLenum light, GLenum pname, const GLfixed* params)
 {
+	AEMU_SCOPED_TRACE("glLightxv encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2897,7 +2994,7 @@
 
 		memcpy(ptr, &light, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 	memcpy(ptr, params, __size_params);ptr += __size_params;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -2907,6 +3004,7 @@
 
 void glLineWidthx_enc(void *self , GLfixed width)
 {
+	AEMU_SCOPED_TRACE("glLineWidthx encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2932,6 +3030,7 @@
 
 void glLoadIdentity_enc(void *self )
 {
+	AEMU_SCOPED_TRACE("glLoadIdentity encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2956,6 +3055,7 @@
 
 void glLoadMatrixx_enc(void *self , const GLfixed* m)
 {
+	AEMU_SCOPED_TRACE("glLoadMatrixx encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2973,7 +3073,7 @@
 	int tmp = OP_glLoadMatrixx;memcpy(ptr, &tmp, 4); ptr += 4;
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
-	*(unsigned int *)(ptr) = __size_m; ptr += 4;
+	memcpy(ptr, &__size_m, 4); ptr += 4;
 	memcpy(ptr, m, __size_m);ptr += __size_m;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -2983,6 +3083,7 @@
 
 void glLogicOp_enc(void *self , GLenum opcode)
 {
+	AEMU_SCOPED_TRACE("glLogicOp encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3008,6 +3109,7 @@
 
 void glMaterialx_enc(void *self , GLenum face, GLenum pname, GLfixed param)
 {
+	AEMU_SCOPED_TRACE("glMaterialx encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3035,6 +3137,7 @@
 
 void glMaterialxv_enc(void *self , GLenum face, GLenum pname, const GLfixed* params)
 {
+	AEMU_SCOPED_TRACE("glMaterialxv encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3054,7 +3157,7 @@
 
 		memcpy(ptr, &face, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 	memcpy(ptr, params, __size_params);ptr += __size_params;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -3064,6 +3167,7 @@
 
 void glMatrixMode_enc(void *self , GLenum mode)
 {
+	AEMU_SCOPED_TRACE("glMatrixMode encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3089,6 +3193,7 @@
 
 void glMultMatrixx_enc(void *self , const GLfixed* m)
 {
+	AEMU_SCOPED_TRACE("glMultMatrixx encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3106,7 +3211,7 @@
 	int tmp = OP_glMultMatrixx;memcpy(ptr, &tmp, 4); ptr += 4;
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
-	*(unsigned int *)(ptr) = __size_m; ptr += 4;
+	memcpy(ptr, &__size_m, 4); ptr += 4;
 	memcpy(ptr, m, __size_m);ptr += __size_m;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -3116,6 +3221,7 @@
 
 void glMultiTexCoord4x_enc(void *self , GLenum target, GLfixed s, GLfixed t, GLfixed r, GLfixed q)
 {
+	AEMU_SCOPED_TRACE("glMultiTexCoord4x encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3145,6 +3251,7 @@
 
 void glNormal3x_enc(void *self , GLfixed nx, GLfixed ny, GLfixed nz)
 {
+	AEMU_SCOPED_TRACE("glNormal3x encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3172,6 +3279,7 @@
 
 void glOrthox_enc(void *self , GLfixed left, GLfixed right, GLfixed bottom, GLfixed top, GLfixed zNear, GLfixed zFar)
 {
+	AEMU_SCOPED_TRACE("glOrthox encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3202,6 +3310,7 @@
 
 void glPixelStorei_enc(void *self , GLenum pname, GLint param)
 {
+	AEMU_SCOPED_TRACE("glPixelStorei encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3228,6 +3337,7 @@
 
 void glPointParameterx_enc(void *self , GLenum pname, GLfixed param)
 {
+	AEMU_SCOPED_TRACE("glPointParameterx encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3254,6 +3364,7 @@
 
 void glPointParameterxv_enc(void *self , GLenum pname, const GLfixed* params)
 {
+	AEMU_SCOPED_TRACE("glPointParameterxv encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3272,7 +3383,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 	memcpy(ptr, params, __size_params);ptr += __size_params;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -3282,6 +3393,7 @@
 
 void glPointSizex_enc(void *self , GLfixed size)
 {
+	AEMU_SCOPED_TRACE("glPointSizex encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3307,6 +3419,7 @@
 
 void glPolygonOffsetx_enc(void *self , GLfixed factor, GLfixed units)
 {
+	AEMU_SCOPED_TRACE("glPolygonOffsetx encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3333,6 +3446,7 @@
 
 void glPopMatrix_enc(void *self )
 {
+	AEMU_SCOPED_TRACE("glPopMatrix encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3357,6 +3471,7 @@
 
 void glPushMatrix_enc(void *self )
 {
+	AEMU_SCOPED_TRACE("glPushMatrix encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3381,6 +3496,7 @@
 
 void glReadPixels_enc(void *self , GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, GLvoid* pixels)
 {
+	AEMU_SCOPED_TRACE("glReadPixels encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3404,7 +3520,7 @@
 		memcpy(ptr, &height, 4); ptr += 4;
 		memcpy(ptr, &format, 4); ptr += 4;
 		memcpy(ptr, &type, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_pixels; ptr += 4;
+	memcpy(ptr, &__size_pixels, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -3425,6 +3541,7 @@
 
 void glRotatex_enc(void *self , GLfixed angle, GLfixed x, GLfixed y, GLfixed z)
 {
+	AEMU_SCOPED_TRACE("glRotatex encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3453,6 +3570,7 @@
 
 void glSampleCoverage_enc(void *self , GLclampf value, GLboolean invert)
 {
+	AEMU_SCOPED_TRACE("glSampleCoverage encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3479,6 +3597,7 @@
 
 void glSampleCoveragex_enc(void *self , GLclampx value, GLboolean invert)
 {
+	AEMU_SCOPED_TRACE("glSampleCoveragex encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3505,6 +3624,7 @@
 
 void glScalex_enc(void *self , GLfixed x, GLfixed y, GLfixed z)
 {
+	AEMU_SCOPED_TRACE("glScalex encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3532,6 +3652,7 @@
 
 void glScissor_enc(void *self , GLint x, GLint y, GLsizei width, GLsizei height)
 {
+	AEMU_SCOPED_TRACE("glScissor encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3560,6 +3681,7 @@
 
 void glShadeModel_enc(void *self , GLenum mode)
 {
+	AEMU_SCOPED_TRACE("glShadeModel encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3585,6 +3707,7 @@
 
 void glStencilFunc_enc(void *self , GLenum func, GLint ref, GLuint mask)
 {
+	AEMU_SCOPED_TRACE("glStencilFunc encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3612,6 +3735,7 @@
 
 void glStencilMask_enc(void *self , GLuint mask)
 {
+	AEMU_SCOPED_TRACE("glStencilMask encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3637,6 +3761,7 @@
 
 void glStencilOp_enc(void *self , GLenum fail, GLenum zfail, GLenum zpass)
 {
+	AEMU_SCOPED_TRACE("glStencilOp encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3664,6 +3789,7 @@
 
 void glTexEnvi_enc(void *self , GLenum target, GLenum pname, GLint param)
 {
+	AEMU_SCOPED_TRACE("glTexEnvi encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3691,6 +3817,7 @@
 
 void glTexEnvx_enc(void *self , GLenum target, GLenum pname, GLfixed param)
 {
+	AEMU_SCOPED_TRACE("glTexEnvx encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3718,6 +3845,7 @@
 
 void glTexEnviv_enc(void *self , GLenum target, GLenum pname, const GLint* params)
 {
+	AEMU_SCOPED_TRACE("glTexEnviv encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3737,7 +3865,7 @@
 
 		memcpy(ptr, &target, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 	memcpy(ptr, params, __size_params);ptr += __size_params;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -3747,6 +3875,7 @@
 
 void glTexEnvxv_enc(void *self , GLenum target, GLenum pname, const GLfixed* params)
 {
+	AEMU_SCOPED_TRACE("glTexEnvxv encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3766,7 +3895,7 @@
 
 		memcpy(ptr, &target, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 	memcpy(ptr, params, __size_params);ptr += __size_params;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -3776,6 +3905,7 @@
 
 void glTexImage2D_enc(void *self , GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, const GLvoid* pixels)
 {
+	AEMU_SCOPED_TRACE("glTexImage2D encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3817,6 +3947,7 @@
 
 void glTexParameteri_enc(void *self , GLenum target, GLenum pname, GLint param)
 {
+	AEMU_SCOPED_TRACE("glTexParameteri encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3844,6 +3975,7 @@
 
 void glTexParameterx_enc(void *self , GLenum target, GLenum pname, GLfixed param)
 {
+	AEMU_SCOPED_TRACE("glTexParameterx encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3871,6 +4003,7 @@
 
 void glTexParameteriv_enc(void *self , GLenum target, GLenum pname, const GLint* params)
 {
+	AEMU_SCOPED_TRACE("glTexParameteriv encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3890,7 +4023,7 @@
 
 		memcpy(ptr, &target, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 	memcpy(ptr, params, __size_params);ptr += __size_params;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -3900,6 +4033,7 @@
 
 void glTexParameterxv_enc(void *self , GLenum target, GLenum pname, const GLfixed* params)
 {
+	AEMU_SCOPED_TRACE("glTexParameterxv encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3919,7 +4053,7 @@
 
 		memcpy(ptr, &target, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 	memcpy(ptr, params, __size_params);ptr += __size_params;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -3929,6 +4063,7 @@
 
 void glTexSubImage2D_enc(void *self , GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const GLvoid* pixels)
 {
+	AEMU_SCOPED_TRACE("glTexSubImage2D encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3970,6 +4105,7 @@
 
 void glTranslatex_enc(void *self , GLfixed x, GLfixed y, GLfixed z)
 {
+	AEMU_SCOPED_TRACE("glTranslatex encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3997,6 +4133,7 @@
 
 void glViewport_enc(void *self , GLint x, GLint y, GLsizei width, GLsizei height)
 {
+	AEMU_SCOPED_TRACE("glViewport encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4025,6 +4162,7 @@
 
 void glVertexPointerOffset_enc(void *self , GLint size, GLenum type, GLsizei stride, GLuint offset)
 {
+	AEMU_SCOPED_TRACE("glVertexPointerOffset encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4053,6 +4191,7 @@
 
 void glColorPointerOffset_enc(void *self , GLint size, GLenum type, GLsizei stride, GLuint offset)
 {
+	AEMU_SCOPED_TRACE("glColorPointerOffset encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4081,6 +4220,7 @@
 
 void glNormalPointerOffset_enc(void *self , GLenum type, GLsizei stride, GLuint offset)
 {
+	AEMU_SCOPED_TRACE("glNormalPointerOffset encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4108,6 +4248,7 @@
 
 void glPointSizePointerOffset_enc(void *self , GLenum type, GLsizei stride, GLuint offset)
 {
+	AEMU_SCOPED_TRACE("glPointSizePointerOffset encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4135,6 +4276,7 @@
 
 void glTexCoordPointerOffset_enc(void *self , GLint size, GLenum type, GLsizei stride, GLuint offset)
 {
+	AEMU_SCOPED_TRACE("glTexCoordPointerOffset encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4163,6 +4305,7 @@
 
 void glWeightPointerOffset_enc(void *self , GLint size, GLenum type, GLsizei stride, GLuint offset)
 {
+	AEMU_SCOPED_TRACE("glWeightPointerOffset encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4191,6 +4334,7 @@
 
 void glMatrixIndexPointerOffset_enc(void *self , GLint size, GLenum type, GLsizei stride, GLuint offset)
 {
+	AEMU_SCOPED_TRACE("glMatrixIndexPointerOffset encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4219,6 +4363,7 @@
 
 void glVertexPointerData_enc(void *self , GLint size, GLenum type, GLsizei stride, void* data, GLuint datalen)
 {
+	AEMU_SCOPED_TRACE("glVertexPointerData encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4239,7 +4384,7 @@
 		memcpy(ptr, &size, 4); ptr += 4;
 		memcpy(ptr, &type, 4); ptr += 4;
 		memcpy(ptr, &stride, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_data; ptr += 4;
+	memcpy(ptr, &__size_data, 4); ptr += 4;
 	 glUtilsPackPointerData((unsigned char *)ptr, (unsigned char *)data, size, type, stride, datalen);ptr += __size_data;
 		memcpy(ptr, &datalen, 4); ptr += 4;
 
@@ -4250,6 +4395,7 @@
 
 void glColorPointerData_enc(void *self , GLint size, GLenum type, GLsizei stride, void* data, GLuint datalen)
 {
+	AEMU_SCOPED_TRACE("glColorPointerData encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4270,7 +4416,7 @@
 		memcpy(ptr, &size, 4); ptr += 4;
 		memcpy(ptr, &type, 4); ptr += 4;
 		memcpy(ptr, &stride, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_data; ptr += 4;
+	memcpy(ptr, &__size_data, 4); ptr += 4;
 	 glUtilsPackPointerData((unsigned char *)ptr, (unsigned char *)data, size, type, stride, datalen);ptr += __size_data;
 		memcpy(ptr, &datalen, 4); ptr += 4;
 
@@ -4281,6 +4427,7 @@
 
 void glNormalPointerData_enc(void *self , GLenum type, GLsizei stride, void* data, GLuint datalen)
 {
+	AEMU_SCOPED_TRACE("glNormalPointerData encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4300,7 +4447,7 @@
 
 		memcpy(ptr, &type, 4); ptr += 4;
 		memcpy(ptr, &stride, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_data; ptr += 4;
+	memcpy(ptr, &__size_data, 4); ptr += 4;
 	 glUtilsPackPointerData((unsigned char *)ptr, (unsigned char *)data, 3, type, stride, datalen);ptr += __size_data;
 		memcpy(ptr, &datalen, 4); ptr += 4;
 
@@ -4311,6 +4458,7 @@
 
 void glTexCoordPointerData_enc(void *self , GLint unit, GLint size, GLenum type, GLsizei stride, void* data, GLuint datalen)
 {
+	AEMU_SCOPED_TRACE("glTexCoordPointerData encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4332,7 +4480,7 @@
 		memcpy(ptr, &size, 4); ptr += 4;
 		memcpy(ptr, &type, 4); ptr += 4;
 		memcpy(ptr, &stride, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_data; ptr += 4;
+	memcpy(ptr, &__size_data, 4); ptr += 4;
 	 glUtilsPackPointerData((unsigned char *)ptr, (unsigned char *)data, size, type, stride, datalen);ptr += __size_data;
 		memcpy(ptr, &datalen, 4); ptr += 4;
 
@@ -4343,6 +4491,7 @@
 
 void glPointSizePointerData_enc(void *self , GLenum type, GLsizei stride, void* data, GLuint datalen)
 {
+	AEMU_SCOPED_TRACE("glPointSizePointerData encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4362,7 +4511,7 @@
 
 		memcpy(ptr, &type, 4); ptr += 4;
 		memcpy(ptr, &stride, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_data; ptr += 4;
+	memcpy(ptr, &__size_data, 4); ptr += 4;
 	 glUtilsPackPointerData((unsigned char *)ptr, (unsigned char *)data, 1, type, stride, datalen);ptr += __size_data;
 		memcpy(ptr, &datalen, 4); ptr += 4;
 
@@ -4373,6 +4522,7 @@
 
 void glWeightPointerData_enc(void *self , GLint size, GLenum type, GLsizei stride, void* data, GLuint datalen)
 {
+	AEMU_SCOPED_TRACE("glWeightPointerData encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4393,7 +4543,7 @@
 		memcpy(ptr, &size, 4); ptr += 4;
 		memcpy(ptr, &type, 4); ptr += 4;
 		memcpy(ptr, &stride, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_data; ptr += 4;
+	memcpy(ptr, &__size_data, 4); ptr += 4;
 	 glUtilsPackPointerData((unsigned char *)ptr, (unsigned char*)data, size, type, stride, datalen);ptr += __size_data;
 		memcpy(ptr, &datalen, 4); ptr += 4;
 
@@ -4404,6 +4554,7 @@
 
 void glMatrixIndexPointerData_enc(void *self , GLint size, GLenum type, GLsizei stride, void* data, GLuint datalen)
 {
+	AEMU_SCOPED_TRACE("glMatrixIndexPointerData encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4424,7 +4575,7 @@
 		memcpy(ptr, &size, 4); ptr += 4;
 		memcpy(ptr, &type, 4); ptr += 4;
 		memcpy(ptr, &stride, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_data; ptr += 4;
+	memcpy(ptr, &__size_data, 4); ptr += 4;
 	 glUtilsPackPointerData((unsigned char *)ptr, (unsigned char*)data, size, type, stride, datalen);ptr += __size_data;
 		memcpy(ptr, &datalen, 4); ptr += 4;
 
@@ -4435,6 +4586,7 @@
 
 void glDrawElementsOffset_enc(void *self , GLenum mode, GLsizei count, GLenum type, GLuint offset)
 {
+	AEMU_SCOPED_TRACE("glDrawElementsOffset encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4463,6 +4615,7 @@
 
 void glDrawElementsData_enc(void *self , GLenum mode, GLsizei count, GLenum type, void* data, GLuint datalen)
 {
+	AEMU_SCOPED_TRACE("glDrawElementsData encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4483,7 +4636,7 @@
 		memcpy(ptr, &mode, 4); ptr += 4;
 		memcpy(ptr, &count, 4); ptr += 4;
 		memcpy(ptr, &type, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_data; ptr += 4;
+	memcpy(ptr, &__size_data, 4); ptr += 4;
 	memcpy(ptr, data, __size_data);ptr += __size_data;
 		memcpy(ptr, &datalen, 4); ptr += 4;
 
@@ -4494,6 +4647,7 @@
 
 void glGetCompressedTextureFormats_enc(void *self , int count, GLint* formats)
 {
+	AEMU_SCOPED_TRACE("glGetCompressedTextureFormats encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4512,7 +4666,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &count, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_formats; ptr += 4;
+	memcpy(ptr, &__size_formats, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -4533,6 +4687,7 @@
 
 int glFinishRoundTrip_enc(void *self )
 {
+	AEMU_SCOPED_TRACE("glFinishRoundTrip encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4572,6 +4727,7 @@
 
 void glBlendEquationSeparateOES_enc(void *self , GLenum modeRGB, GLenum modeAlpha)
 {
+	AEMU_SCOPED_TRACE("glBlendEquationSeparateOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4598,6 +4754,7 @@
 
 void glBlendFuncSeparateOES_enc(void *self , GLenum srcRGB, GLenum dstRGB, GLenum srcAlpha, GLenum dstAlpha)
 {
+	AEMU_SCOPED_TRACE("glBlendFuncSeparateOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4626,6 +4783,7 @@
 
 void glBlendEquationOES_enc(void *self , GLenum mode)
 {
+	AEMU_SCOPED_TRACE("glBlendEquationOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4651,6 +4809,7 @@
 
 void glDrawTexsOES_enc(void *self , GLshort x, GLshort y, GLshort z, GLshort width, GLshort height)
 {
+	AEMU_SCOPED_TRACE("glDrawTexsOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4680,6 +4839,7 @@
 
 void glDrawTexiOES_enc(void *self , GLint x, GLint y, GLint z, GLint width, GLint height)
 {
+	AEMU_SCOPED_TRACE("glDrawTexiOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4709,6 +4869,7 @@
 
 void glDrawTexxOES_enc(void *self , GLfixed x, GLfixed y, GLfixed z, GLfixed width, GLfixed height)
 {
+	AEMU_SCOPED_TRACE("glDrawTexxOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4738,6 +4899,7 @@
 
 void glDrawTexsvOES_enc(void *self , const GLshort* coords)
 {
+	AEMU_SCOPED_TRACE("glDrawTexsvOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4755,7 +4917,7 @@
 	int tmp = OP_glDrawTexsvOES;memcpy(ptr, &tmp, 4); ptr += 4;
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
-	*(unsigned int *)(ptr) = __size_coords; ptr += 4;
+	memcpy(ptr, &__size_coords, 4); ptr += 4;
 	memcpy(ptr, coords, __size_coords);ptr += __size_coords;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -4765,6 +4927,7 @@
 
 void glDrawTexivOES_enc(void *self , const GLint* coords)
 {
+	AEMU_SCOPED_TRACE("glDrawTexivOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4782,7 +4945,7 @@
 	int tmp = OP_glDrawTexivOES;memcpy(ptr, &tmp, 4); ptr += 4;
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
-	*(unsigned int *)(ptr) = __size_coords; ptr += 4;
+	memcpy(ptr, &__size_coords, 4); ptr += 4;
 	memcpy(ptr, coords, __size_coords);ptr += __size_coords;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -4792,6 +4955,7 @@
 
 void glDrawTexxvOES_enc(void *self , const GLfixed* coords)
 {
+	AEMU_SCOPED_TRACE("glDrawTexxvOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4809,7 +4973,7 @@
 	int tmp = OP_glDrawTexxvOES;memcpy(ptr, &tmp, 4); ptr += 4;
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
-	*(unsigned int *)(ptr) = __size_coords; ptr += 4;
+	memcpy(ptr, &__size_coords, 4); ptr += 4;
 	memcpy(ptr, coords, __size_coords);ptr += __size_coords;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -4819,6 +4983,7 @@
 
 void glDrawTexfOES_enc(void *self , GLfloat x, GLfloat y, GLfloat z, GLfloat width, GLfloat height)
 {
+	AEMU_SCOPED_TRACE("glDrawTexfOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4848,6 +5013,7 @@
 
 void glDrawTexfvOES_enc(void *self , const GLfloat* coords)
 {
+	AEMU_SCOPED_TRACE("glDrawTexfvOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4865,7 +5031,7 @@
 	int tmp = OP_glDrawTexfvOES;memcpy(ptr, &tmp, 4); ptr += 4;
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
-	*(unsigned int *)(ptr) = __size_coords; ptr += 4;
+	memcpy(ptr, &__size_coords, 4); ptr += 4;
 	memcpy(ptr, coords, __size_coords);ptr += __size_coords;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -4875,6 +5041,7 @@
 
 void glEGLImageTargetTexture2DOES_enc(void *self , GLenum target, GLeglImageOES image)
 {
+	AEMU_SCOPED_TRACE("glEGLImageTargetTexture2DOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4901,6 +5068,7 @@
 
 void glEGLImageTargetRenderbufferStorageOES_enc(void *self , GLenum target, GLeglImageOES image)
 {
+	AEMU_SCOPED_TRACE("glEGLImageTargetRenderbufferStorageOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4927,6 +5095,7 @@
 
 void glAlphaFuncxOES_enc(void *self , GLenum func, GLclampx ref)
 {
+	AEMU_SCOPED_TRACE("glAlphaFuncxOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4953,6 +5122,7 @@
 
 void glClearColorxOES_enc(void *self , GLclampx red, GLclampx green, GLclampx blue, GLclampx alpha)
 {
+	AEMU_SCOPED_TRACE("glClearColorxOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4981,6 +5151,7 @@
 
 void glClearDepthxOES_enc(void *self , GLclampx depth)
 {
+	AEMU_SCOPED_TRACE("glClearDepthxOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5006,6 +5177,7 @@
 
 void glClipPlanexOES_enc(void *self , GLenum plane, const GLfixed* equation)
 {
+	AEMU_SCOPED_TRACE("glClipPlanexOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5024,7 +5196,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &plane, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_equation; ptr += 4;
+	memcpy(ptr, &__size_equation, 4); ptr += 4;
 	memcpy(ptr, equation, __size_equation);ptr += __size_equation;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -5034,6 +5206,7 @@
 
 void glClipPlanexIMG_enc(void *self , GLenum plane, const GLfixed* equation)
 {
+	AEMU_SCOPED_TRACE("glClipPlanexIMG encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5052,7 +5225,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &plane, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_equation; ptr += 4;
+	memcpy(ptr, &__size_equation, 4); ptr += 4;
 	memcpy(ptr, equation, __size_equation);ptr += __size_equation;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -5062,6 +5235,7 @@
 
 void glColor4xOES_enc(void *self , GLfixed red, GLfixed green, GLfixed blue, GLfixed alpha)
 {
+	AEMU_SCOPED_TRACE("glColor4xOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5090,6 +5264,7 @@
 
 void glDepthRangexOES_enc(void *self , GLclampx zNear, GLclampx zFar)
 {
+	AEMU_SCOPED_TRACE("glDepthRangexOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5116,6 +5291,7 @@
 
 void glFogxOES_enc(void *self , GLenum pname, GLfixed param)
 {
+	AEMU_SCOPED_TRACE("glFogxOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5142,6 +5318,7 @@
 
 void glFogxvOES_enc(void *self , GLenum pname, const GLfixed* params)
 {
+	AEMU_SCOPED_TRACE("glFogxvOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5160,7 +5337,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 	memcpy(ptr, params, __size_params);ptr += __size_params;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -5170,6 +5347,7 @@
 
 void glFrustumxOES_enc(void *self , GLfixed left, GLfixed right, GLfixed bottom, GLfixed top, GLfixed zNear, GLfixed zFar)
 {
+	AEMU_SCOPED_TRACE("glFrustumxOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5200,6 +5378,7 @@
 
 void glGetClipPlanexOES_enc(void *self , GLenum pname, GLfixed* eqn)
 {
+	AEMU_SCOPED_TRACE("glGetClipPlanexOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5218,7 +5397,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_eqn; ptr += 4;
+	memcpy(ptr, &__size_eqn, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -5239,6 +5418,7 @@
 
 void glGetClipPlanex_enc(void *self , GLenum pname, GLfixed* eqn)
 {
+	AEMU_SCOPED_TRACE("glGetClipPlanex encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5257,7 +5437,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_eqn; ptr += 4;
+	memcpy(ptr, &__size_eqn, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -5278,6 +5458,7 @@
 
 void glGetFixedvOES_enc(void *self , GLenum pname, GLfixed* params)
 {
+	AEMU_SCOPED_TRACE("glGetFixedvOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5296,7 +5477,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -5317,6 +5498,7 @@
 
 void glGetLightxvOES_enc(void *self , GLenum light, GLenum pname, GLfixed* params)
 {
+	AEMU_SCOPED_TRACE("glGetLightxvOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5336,7 +5518,7 @@
 
 		memcpy(ptr, &light, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -5357,6 +5539,7 @@
 
 void glGetMaterialxvOES_enc(void *self , GLenum face, GLenum pname, GLfixed* params)
 {
+	AEMU_SCOPED_TRACE("glGetMaterialxvOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5376,7 +5559,7 @@
 
 		memcpy(ptr, &face, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -5397,6 +5580,7 @@
 
 void glGetTexEnvxvOES_enc(void *self , GLenum env, GLenum pname, GLfixed* params)
 {
+	AEMU_SCOPED_TRACE("glGetTexEnvxvOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5416,7 +5600,7 @@
 
 		memcpy(ptr, &env, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -5437,6 +5621,7 @@
 
 void glGetTexParameterxvOES_enc(void *self , GLenum target, GLenum pname, GLfixed* params)
 {
+	AEMU_SCOPED_TRACE("glGetTexParameterxvOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5456,7 +5641,7 @@
 
 		memcpy(ptr, &target, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -5477,6 +5662,7 @@
 
 void glLightModelxOES_enc(void *self , GLenum pname, GLfixed param)
 {
+	AEMU_SCOPED_TRACE("glLightModelxOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5503,6 +5689,7 @@
 
 void glLightModelxvOES_enc(void *self , GLenum pname, const GLfixed* params)
 {
+	AEMU_SCOPED_TRACE("glLightModelxvOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5521,7 +5708,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 	memcpy(ptr, params, __size_params);ptr += __size_params;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -5531,6 +5718,7 @@
 
 void glLightxOES_enc(void *self , GLenum light, GLenum pname, GLfixed param)
 {
+	AEMU_SCOPED_TRACE("glLightxOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5558,6 +5746,7 @@
 
 void glLightxvOES_enc(void *self , GLenum light, GLenum pname, const GLfixed* params)
 {
+	AEMU_SCOPED_TRACE("glLightxvOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5577,7 +5766,7 @@
 
 		memcpy(ptr, &light, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 	memcpy(ptr, params, __size_params);ptr += __size_params;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -5587,6 +5776,7 @@
 
 void glLineWidthxOES_enc(void *self , GLfixed width)
 {
+	AEMU_SCOPED_TRACE("glLineWidthxOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5612,6 +5802,7 @@
 
 void glLoadMatrixxOES_enc(void *self , const GLfixed* m)
 {
+	AEMU_SCOPED_TRACE("glLoadMatrixxOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5629,7 +5820,7 @@
 	int tmp = OP_glLoadMatrixxOES;memcpy(ptr, &tmp, 4); ptr += 4;
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
-	*(unsigned int *)(ptr) = __size_m; ptr += 4;
+	memcpy(ptr, &__size_m, 4); ptr += 4;
 	memcpy(ptr, m, __size_m);ptr += __size_m;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -5639,6 +5830,7 @@
 
 void glMaterialxOES_enc(void *self , GLenum face, GLenum pname, GLfixed param)
 {
+	AEMU_SCOPED_TRACE("glMaterialxOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5666,6 +5858,7 @@
 
 void glMaterialxvOES_enc(void *self , GLenum face, GLenum pname, const GLfixed* params)
 {
+	AEMU_SCOPED_TRACE("glMaterialxvOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5685,7 +5878,7 @@
 
 		memcpy(ptr, &face, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 	memcpy(ptr, params, __size_params);ptr += __size_params;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -5695,6 +5888,7 @@
 
 void glMultMatrixxOES_enc(void *self , const GLfixed* m)
 {
+	AEMU_SCOPED_TRACE("glMultMatrixxOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5712,7 +5906,7 @@
 	int tmp = OP_glMultMatrixxOES;memcpy(ptr, &tmp, 4); ptr += 4;
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
-	*(unsigned int *)(ptr) = __size_m; ptr += 4;
+	memcpy(ptr, &__size_m, 4); ptr += 4;
 	memcpy(ptr, m, __size_m);ptr += __size_m;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -5722,6 +5916,7 @@
 
 void glMultiTexCoord4xOES_enc(void *self , GLenum target, GLfixed s, GLfixed t, GLfixed r, GLfixed q)
 {
+	AEMU_SCOPED_TRACE("glMultiTexCoord4xOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5751,6 +5946,7 @@
 
 void glNormal3xOES_enc(void *self , GLfixed nx, GLfixed ny, GLfixed nz)
 {
+	AEMU_SCOPED_TRACE("glNormal3xOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5778,6 +5974,7 @@
 
 void glOrthoxOES_enc(void *self , GLfixed left, GLfixed right, GLfixed bottom, GLfixed top, GLfixed zNear, GLfixed zFar)
 {
+	AEMU_SCOPED_TRACE("glOrthoxOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5808,6 +6005,7 @@
 
 void glPointParameterxOES_enc(void *self , GLenum pname, GLfixed param)
 {
+	AEMU_SCOPED_TRACE("glPointParameterxOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5834,6 +6032,7 @@
 
 void glPointParameterxvOES_enc(void *self , GLenum pname, const GLfixed* params)
 {
+	AEMU_SCOPED_TRACE("glPointParameterxvOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5852,7 +6051,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 	memcpy(ptr, params, __size_params);ptr += __size_params;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -5862,6 +6061,7 @@
 
 void glPointSizexOES_enc(void *self , GLfixed size)
 {
+	AEMU_SCOPED_TRACE("glPointSizexOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5887,6 +6087,7 @@
 
 void glPolygonOffsetxOES_enc(void *self , GLfixed factor, GLfixed units)
 {
+	AEMU_SCOPED_TRACE("glPolygonOffsetxOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5913,6 +6114,7 @@
 
 void glRotatexOES_enc(void *self , GLfixed angle, GLfixed x, GLfixed y, GLfixed z)
 {
+	AEMU_SCOPED_TRACE("glRotatexOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5941,6 +6143,7 @@
 
 void glSampleCoveragexOES_enc(void *self , GLclampx value, GLboolean invert)
 {
+	AEMU_SCOPED_TRACE("glSampleCoveragexOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5967,6 +6170,7 @@
 
 void glScalexOES_enc(void *self , GLfixed x, GLfixed y, GLfixed z)
 {
+	AEMU_SCOPED_TRACE("glScalexOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5994,6 +6198,7 @@
 
 void glTexEnvxOES_enc(void *self , GLenum target, GLenum pname, GLfixed param)
 {
+	AEMU_SCOPED_TRACE("glTexEnvxOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6021,6 +6226,7 @@
 
 void glTexEnvxvOES_enc(void *self , GLenum target, GLenum pname, const GLfixed* params)
 {
+	AEMU_SCOPED_TRACE("glTexEnvxvOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6040,7 +6246,7 @@
 
 		memcpy(ptr, &target, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 	memcpy(ptr, params, __size_params);ptr += __size_params;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -6050,6 +6256,7 @@
 
 void glTexParameterxOES_enc(void *self , GLenum target, GLenum pname, GLfixed param)
 {
+	AEMU_SCOPED_TRACE("glTexParameterxOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6077,6 +6284,7 @@
 
 void glTexParameterxvOES_enc(void *self , GLenum target, GLenum pname, const GLfixed* params)
 {
+	AEMU_SCOPED_TRACE("glTexParameterxvOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6096,7 +6304,7 @@
 
 		memcpy(ptr, &target, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 	memcpy(ptr, params, __size_params);ptr += __size_params;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -6106,6 +6314,7 @@
 
 void glTranslatexOES_enc(void *self , GLfixed x, GLfixed y, GLfixed z)
 {
+	AEMU_SCOPED_TRACE("glTranslatexOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6133,6 +6342,7 @@
 
 GLboolean glIsRenderbufferOES_enc(void *self , GLuint renderbuffer)
 {
+	AEMU_SCOPED_TRACE("glIsRenderbufferOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6173,6 +6383,7 @@
 
 void glBindRenderbufferOES_enc(void *self , GLenum target, GLuint renderbuffer)
 {
+	AEMU_SCOPED_TRACE("glBindRenderbufferOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6199,6 +6410,7 @@
 
 void glDeleteRenderbuffersOES_enc(void *self , GLsizei n, const GLuint* renderbuffers)
 {
+	AEMU_SCOPED_TRACE("glDeleteRenderbuffersOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6217,7 +6429,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &n, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_renderbuffers; ptr += 4;
+	memcpy(ptr, &__size_renderbuffers, 4); ptr += 4;
 	memcpy(ptr, renderbuffers, __size_renderbuffers);ptr += __size_renderbuffers;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -6227,6 +6439,7 @@
 
 void glGenRenderbuffersOES_enc(void *self , GLsizei n, GLuint* renderbuffers)
 {
+	AEMU_SCOPED_TRACE("glGenRenderbuffersOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6245,7 +6458,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &n, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_renderbuffers; ptr += 4;
+	memcpy(ptr, &__size_renderbuffers, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -6266,6 +6479,7 @@
 
 void glRenderbufferStorageOES_enc(void *self , GLenum target, GLenum internalformat, GLsizei width, GLsizei height)
 {
+	AEMU_SCOPED_TRACE("glRenderbufferStorageOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6294,6 +6508,7 @@
 
 void glGetRenderbufferParameterivOES_enc(void *self , GLenum target, GLenum pname, GLint* params)
 {
+	AEMU_SCOPED_TRACE("glGetRenderbufferParameterivOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6313,7 +6528,7 @@
 
 		memcpy(ptr, &target, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -6334,6 +6549,7 @@
 
 GLboolean glIsFramebufferOES_enc(void *self , GLuint framebuffer)
 {
+	AEMU_SCOPED_TRACE("glIsFramebufferOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6374,6 +6590,7 @@
 
 void glBindFramebufferOES_enc(void *self , GLenum target, GLuint framebuffer)
 {
+	AEMU_SCOPED_TRACE("glBindFramebufferOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6400,6 +6617,7 @@
 
 void glDeleteFramebuffersOES_enc(void *self , GLsizei n, const GLuint* framebuffers)
 {
+	AEMU_SCOPED_TRACE("glDeleteFramebuffersOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6418,7 +6636,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &n, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_framebuffers; ptr += 4;
+	memcpy(ptr, &__size_framebuffers, 4); ptr += 4;
 	memcpy(ptr, framebuffers, __size_framebuffers);ptr += __size_framebuffers;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -6428,6 +6646,7 @@
 
 void glGenFramebuffersOES_enc(void *self , GLsizei n, GLuint* framebuffers)
 {
+	AEMU_SCOPED_TRACE("glGenFramebuffersOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6446,7 +6665,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &n, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_framebuffers; ptr += 4;
+	memcpy(ptr, &__size_framebuffers, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -6467,6 +6686,7 @@
 
 GLenum glCheckFramebufferStatusOES_enc(void *self , GLenum target)
 {
+	AEMU_SCOPED_TRACE("glCheckFramebufferStatusOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6507,6 +6727,7 @@
 
 void glFramebufferRenderbufferOES_enc(void *self , GLenum target, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer)
 {
+	AEMU_SCOPED_TRACE("glFramebufferRenderbufferOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6535,6 +6756,7 @@
 
 void glFramebufferTexture2DOES_enc(void *self , GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level)
 {
+	AEMU_SCOPED_TRACE("glFramebufferTexture2DOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6564,6 +6786,7 @@
 
 void glGetFramebufferAttachmentParameterivOES_enc(void *self , GLenum target, GLenum attachment, GLenum pname, GLint* params)
 {
+	AEMU_SCOPED_TRACE("glGetFramebufferAttachmentParameterivOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6584,7 +6807,7 @@
 		memcpy(ptr, &target, 4); ptr += 4;
 		memcpy(ptr, &attachment, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -6605,6 +6828,7 @@
 
 void glGenerateMipmapOES_enc(void *self , GLenum target)
 {
+	AEMU_SCOPED_TRACE("glGenerateMipmapOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6630,6 +6854,7 @@
 
 GLboolean glUnmapBufferOES_enc(void *self , GLenum target)
 {
+	AEMU_SCOPED_TRACE("glUnmapBufferOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6670,6 +6895,7 @@
 
 void glCurrentPaletteMatrixOES_enc(void *self , GLuint matrixpaletteindex)
 {
+	AEMU_SCOPED_TRACE("glCurrentPaletteMatrixOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6695,6 +6921,7 @@
 
 void glLoadPaletteFromModelViewMatrixOES_enc(void *self )
 {
+	AEMU_SCOPED_TRACE("glLoadPaletteFromModelViewMatrixOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6719,6 +6946,7 @@
 
 GLbitfield glQueryMatrixxOES_enc(void *self , GLfixed* mantissa, GLint* exponent)
 {
+	AEMU_SCOPED_TRACE("glQueryMatrixxOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6737,8 +6965,8 @@
 	int tmp = OP_glQueryMatrixxOES;memcpy(ptr, &tmp, 4); ptr += 4;
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
-	*(unsigned int *)(ptr) = __size_mantissa; ptr += 4;
-	*(unsigned int *)(ptr) = __size_exponent; ptr += 4;
+	memcpy(ptr, &__size_mantissa, 4); ptr += 4;
+	memcpy(ptr, &__size_exponent, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -6766,6 +6994,7 @@
 
 void glDepthRangefOES_enc(void *self , GLclampf zNear, GLclampf zFar)
 {
+	AEMU_SCOPED_TRACE("glDepthRangefOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6792,6 +7021,7 @@
 
 void glFrustumfOES_enc(void *self , GLfloat left, GLfloat right, GLfloat bottom, GLfloat top, GLfloat zNear, GLfloat zFar)
 {
+	AEMU_SCOPED_TRACE("glFrustumfOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6822,6 +7052,7 @@
 
 void glOrthofOES_enc(void *self , GLfloat left, GLfloat right, GLfloat bottom, GLfloat top, GLfloat zNear, GLfloat zFar)
 {
+	AEMU_SCOPED_TRACE("glOrthofOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6852,6 +7083,7 @@
 
 void glClipPlanefOES_enc(void *self , GLenum plane, const GLfloat* equation)
 {
+	AEMU_SCOPED_TRACE("glClipPlanefOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6870,7 +7102,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &plane, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_equation; ptr += 4;
+	memcpy(ptr, &__size_equation, 4); ptr += 4;
 	memcpy(ptr, equation, __size_equation);ptr += __size_equation;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -6880,6 +7112,7 @@
 
 void glClipPlanefIMG_enc(void *self , GLenum plane, const GLfloat* equation)
 {
+	AEMU_SCOPED_TRACE("glClipPlanefIMG encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6898,7 +7131,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &plane, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_equation; ptr += 4;
+	memcpy(ptr, &__size_equation, 4); ptr += 4;
 	memcpy(ptr, equation, __size_equation);ptr += __size_equation;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -6908,6 +7141,7 @@
 
 void glGetClipPlanefOES_enc(void *self , GLenum pname, GLfloat* eqn)
 {
+	AEMU_SCOPED_TRACE("glGetClipPlanefOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6926,7 +7160,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_eqn; ptr += 4;
+	memcpy(ptr, &__size_eqn, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -6947,6 +7181,7 @@
 
 void glClearDepthfOES_enc(void *self , GLclampf depth)
 {
+	AEMU_SCOPED_TRACE("glClearDepthfOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6972,6 +7207,7 @@
 
 void glTexGenfOES_enc(void *self , GLenum coord, GLenum pname, GLfloat param)
 {
+	AEMU_SCOPED_TRACE("glTexGenfOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6999,6 +7235,7 @@
 
 void glTexGenfvOES_enc(void *self , GLenum coord, GLenum pname, const GLfloat* params)
 {
+	AEMU_SCOPED_TRACE("glTexGenfvOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7018,7 +7255,7 @@
 
 		memcpy(ptr, &coord, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 	memcpy(ptr, params, __size_params);ptr += __size_params;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -7028,6 +7265,7 @@
 
 void glTexGeniOES_enc(void *self , GLenum coord, GLenum pname, GLint param)
 {
+	AEMU_SCOPED_TRACE("glTexGeniOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7055,6 +7293,7 @@
 
 void glTexGenivOES_enc(void *self , GLenum coord, GLenum pname, const GLint* params)
 {
+	AEMU_SCOPED_TRACE("glTexGenivOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7074,7 +7313,7 @@
 
 		memcpy(ptr, &coord, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 	memcpy(ptr, params, __size_params);ptr += __size_params;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -7084,6 +7323,7 @@
 
 void glTexGenxOES_enc(void *self , GLenum coord, GLenum pname, GLfixed param)
 {
+	AEMU_SCOPED_TRACE("glTexGenxOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7111,6 +7351,7 @@
 
 void glTexGenxvOES_enc(void *self , GLenum coord, GLenum pname, const GLfixed* params)
 {
+	AEMU_SCOPED_TRACE("glTexGenxvOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7130,7 +7371,7 @@
 
 		memcpy(ptr, &coord, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 	memcpy(ptr, params, __size_params);ptr += __size_params;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -7140,6 +7381,7 @@
 
 void glGetTexGenfvOES_enc(void *self , GLenum coord, GLenum pname, GLfloat* params)
 {
+	AEMU_SCOPED_TRACE("glGetTexGenfvOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7159,7 +7401,7 @@
 
 		memcpy(ptr, &coord, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 	memcpy(ptr, params, __size_params);ptr += __size_params;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -7169,6 +7411,7 @@
 
 void glGetTexGenivOES_enc(void *self , GLenum coord, GLenum pname, GLint* params)
 {
+	AEMU_SCOPED_TRACE("glGetTexGenivOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7188,7 +7431,7 @@
 
 		memcpy(ptr, &coord, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 	memcpy(ptr, params, __size_params);ptr += __size_params;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -7198,6 +7441,7 @@
 
 void glGetTexGenxvOES_enc(void *self , GLenum coord, GLenum pname, GLfixed* params)
 {
+	AEMU_SCOPED_TRACE("glGetTexGenxvOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7217,7 +7461,7 @@
 
 		memcpy(ptr, &coord, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 	memcpy(ptr, params, __size_params);ptr += __size_params;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -7227,6 +7471,7 @@
 
 void glBindVertexArrayOES_enc(void *self , GLuint array)
 {
+	AEMU_SCOPED_TRACE("glBindVertexArrayOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7252,6 +7497,7 @@
 
 void glDeleteVertexArraysOES_enc(void *self , GLsizei n, const GLuint* arrays)
 {
+	AEMU_SCOPED_TRACE("glDeleteVertexArraysOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7270,7 +7516,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &n, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_arrays; ptr += 4;
+	memcpy(ptr, &__size_arrays, 4); ptr += 4;
 	memcpy(ptr, arrays, __size_arrays);ptr += __size_arrays;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -7280,6 +7526,7 @@
 
 void glGenVertexArraysOES_enc(void *self , GLsizei n, GLuint* arrays)
 {
+	AEMU_SCOPED_TRACE("glGenVertexArraysOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7298,7 +7545,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &n, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_arrays; ptr += 4;
+	memcpy(ptr, &__size_arrays, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -7319,6 +7566,7 @@
 
 GLboolean glIsVertexArrayOES_enc(void *self , GLuint array)
 {
+	AEMU_SCOPED_TRACE("glIsVertexArrayOES encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7359,6 +7607,7 @@
 
 void glDiscardFramebufferEXT_enc(void *self , GLenum target, GLsizei numAttachments, const GLenum* attachments)
 {
+	AEMU_SCOPED_TRACE("glDiscardFramebufferEXT encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7378,7 +7627,7 @@
 
 		memcpy(ptr, &target, 4); ptr += 4;
 		memcpy(ptr, &numAttachments, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_attachments; ptr += 4;
+	memcpy(ptr, &__size_attachments, 4); ptr += 4;
 	memcpy(ptr, attachments, __size_attachments);ptr += __size_attachments;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -7388,6 +7637,7 @@
 
 void glRenderbufferStorageMultisampleIMG_enc(void *self , GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height)
 {
+	AEMU_SCOPED_TRACE("glRenderbufferStorageMultisampleIMG encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7417,6 +7667,7 @@
 
 void glFramebufferTexture2DMultisampleIMG_enc(void *self , GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLsizei samples)
 {
+	AEMU_SCOPED_TRACE("glFramebufferTexture2DMultisampleIMG encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7447,6 +7698,7 @@
 
 void glDeleteFencesNV_enc(void *self , GLsizei n, const GLuint* fences)
 {
+	AEMU_SCOPED_TRACE("glDeleteFencesNV encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7465,7 +7717,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &n, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_fences; ptr += 4;
+	memcpy(ptr, &__size_fences, 4); ptr += 4;
 	memcpy(ptr, fences, __size_fences);ptr += __size_fences;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -7475,6 +7727,7 @@
 
 void glGenFencesNV_enc(void *self , GLsizei n, GLuint* fences)
 {
+	AEMU_SCOPED_TRACE("glGenFencesNV encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7493,7 +7746,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &n, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_fences; ptr += 4;
+	memcpy(ptr, &__size_fences, 4); ptr += 4;
 	memcpy(ptr, fences, __size_fences);ptr += __size_fences;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -7503,6 +7756,7 @@
 
 GLboolean glIsFenceNV_enc(void *self , GLuint fence)
 {
+	AEMU_SCOPED_TRACE("glIsFenceNV encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7543,6 +7797,7 @@
 
 GLboolean glTestFenceNV_enc(void *self , GLuint fence)
 {
+	AEMU_SCOPED_TRACE("glTestFenceNV encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7583,6 +7838,7 @@
 
 void glGetFenceivNV_enc(void *self , GLuint fence, GLenum pname, GLint* params)
 {
+	AEMU_SCOPED_TRACE("glGetFenceivNV encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7602,7 +7858,7 @@
 
 		memcpy(ptr, &fence, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -7623,6 +7879,7 @@
 
 void glFinishFenceNV_enc(void *self , GLuint fence)
 {
+	AEMU_SCOPED_TRACE("glFinishFenceNV encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7648,6 +7905,7 @@
 
 void glSetFenceNV_enc(void *self , GLuint fence, GLenum condition)
 {
+	AEMU_SCOPED_TRACE("glSetFenceNV encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7674,6 +7932,7 @@
 
 void glGetDriverControlsQCOM_enc(void *self , GLint* num, GLsizei size, GLuint* driverControls)
 {
+	AEMU_SCOPED_TRACE("glGetDriverControlsQCOM encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7692,9 +7951,9 @@
 	int tmp = OP_glGetDriverControlsQCOM;memcpy(ptr, &tmp, 4); ptr += 4;
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
-	*(unsigned int *)(ptr) = __size_num; ptr += 4;
+	memcpy(ptr, &__size_num, 4); ptr += 4;
 		memcpy(ptr, &size, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_driverControls; ptr += 4;
+	memcpy(ptr, &__size_driverControls, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -7717,6 +7976,7 @@
 
 void glGetDriverControlStringQCOM_enc(void *self , GLuint driverControl, GLsizei bufSize, GLsizei* length, GLchar* driverControlString)
 {
+	AEMU_SCOPED_TRACE("glGetDriverControlStringQCOM encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7737,8 +7997,8 @@
 
 		memcpy(ptr, &driverControl, 4); ptr += 4;
 		memcpy(ptr, &bufSize, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_length; ptr += 4;
-	*(unsigned int *)(ptr) = __size_driverControlString; ptr += 4;
+	memcpy(ptr, &__size_length, 4); ptr += 4;
+	memcpy(ptr, &__size_driverControlString, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -7761,6 +8021,7 @@
 
 void glEnableDriverControlQCOM_enc(void *self , GLuint driverControl)
 {
+	AEMU_SCOPED_TRACE("glEnableDriverControlQCOM encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7786,6 +8047,7 @@
 
 void glDisableDriverControlQCOM_enc(void *self , GLuint driverControl)
 {
+	AEMU_SCOPED_TRACE("glDisableDriverControlQCOM encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7811,6 +8073,7 @@
 
 void glExtGetTexturesQCOM_enc(void *self , GLuint* textures, GLint maxTextures, GLint* numTextures)
 {
+	AEMU_SCOPED_TRACE("glExtGetTexturesQCOM encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7829,9 +8092,9 @@
 	int tmp = OP_glExtGetTexturesQCOM;memcpy(ptr, &tmp, 4); ptr += 4;
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
-	*(unsigned int *)(ptr) = __size_textures; ptr += 4;
+	memcpy(ptr, &__size_textures, 4); ptr += 4;
 		memcpy(ptr, &maxTextures, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_numTextures; ptr += 4;
+	memcpy(ptr, &__size_numTextures, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -7854,6 +8117,7 @@
 
 void glExtGetBuffersQCOM_enc(void *self , GLuint* buffers, GLint maxBuffers, GLint* numBuffers)
 {
+	AEMU_SCOPED_TRACE("glExtGetBuffersQCOM encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7872,9 +8136,9 @@
 	int tmp = OP_glExtGetBuffersQCOM;memcpy(ptr, &tmp, 4); ptr += 4;
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
-	*(unsigned int *)(ptr) = __size_buffers; ptr += 4;
+	memcpy(ptr, &__size_buffers, 4); ptr += 4;
 		memcpy(ptr, &maxBuffers, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_numBuffers; ptr += 4;
+	memcpy(ptr, &__size_numBuffers, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -7897,6 +8161,7 @@
 
 void glExtGetRenderbuffersQCOM_enc(void *self , GLuint* renderbuffers, GLint maxRenderbuffers, GLint* numRenderbuffers)
 {
+	AEMU_SCOPED_TRACE("glExtGetRenderbuffersQCOM encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7915,9 +8180,9 @@
 	int tmp = OP_glExtGetRenderbuffersQCOM;memcpy(ptr, &tmp, 4); ptr += 4;
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
-	*(unsigned int *)(ptr) = __size_renderbuffers; ptr += 4;
+	memcpy(ptr, &__size_renderbuffers, 4); ptr += 4;
 		memcpy(ptr, &maxRenderbuffers, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_numRenderbuffers; ptr += 4;
+	memcpy(ptr, &__size_numRenderbuffers, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -7940,6 +8205,7 @@
 
 void glExtGetFramebuffersQCOM_enc(void *self , GLuint* framebuffers, GLint maxFramebuffers, GLint* numFramebuffers)
 {
+	AEMU_SCOPED_TRACE("glExtGetFramebuffersQCOM encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7958,9 +8224,9 @@
 	int tmp = OP_glExtGetFramebuffersQCOM;memcpy(ptr, &tmp, 4); ptr += 4;
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
-	*(unsigned int *)(ptr) = __size_framebuffers; ptr += 4;
+	memcpy(ptr, &__size_framebuffers, 4); ptr += 4;
 		memcpy(ptr, &maxFramebuffers, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_numFramebuffers; ptr += 4;
+	memcpy(ptr, &__size_numFramebuffers, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -7983,6 +8249,7 @@
 
 void glExtGetTexLevelParameterivQCOM_enc(void *self , GLuint texture, GLenum face, GLint level, GLenum pname, GLint* params)
 {
+	AEMU_SCOPED_TRACE("glExtGetTexLevelParameterivQCOM encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -8004,7 +8271,7 @@
 		memcpy(ptr, &face, 4); ptr += 4;
 		memcpy(ptr, &level, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -8025,6 +8292,7 @@
 
 void glExtTexObjectStateOverrideiQCOM_enc(void *self , GLenum target, GLenum pname, GLint param)
 {
+	AEMU_SCOPED_TRACE("glExtTexObjectStateOverrideiQCOM encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -8052,6 +8320,7 @@
 
 void glExtGetTexSubImageQCOM_enc(void *self , GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, GLvoid* texels)
 {
+	AEMU_SCOPED_TRACE("glExtGetTexSubImageQCOM encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -8079,7 +8348,7 @@
 		memcpy(ptr, &depth, 4); ptr += 4;
 		memcpy(ptr, &format, 4); ptr += 4;
 		memcpy(ptr, &type, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_texels; ptr += 4;
+	memcpy(ptr, &__size_texels, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -8100,6 +8369,7 @@
 
 void glExtGetShadersQCOM_enc(void *self , GLuint* shaders, GLint maxShaders, GLint* numShaders)
 {
+	AEMU_SCOPED_TRACE("glExtGetShadersQCOM encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -8118,9 +8388,9 @@
 	int tmp = OP_glExtGetShadersQCOM;memcpy(ptr, &tmp, 4); ptr += 4;
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
-	*(unsigned int *)(ptr) = __size_shaders; ptr += 4;
+	memcpy(ptr, &__size_shaders, 4); ptr += 4;
 		memcpy(ptr, &maxShaders, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_numShaders; ptr += 4;
+	memcpy(ptr, &__size_numShaders, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -8143,6 +8413,7 @@
 
 void glExtGetProgramsQCOM_enc(void *self , GLuint* programs, GLint maxPrograms, GLint* numPrograms)
 {
+	AEMU_SCOPED_TRACE("glExtGetProgramsQCOM encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -8161,9 +8432,9 @@
 	int tmp = OP_glExtGetProgramsQCOM;memcpy(ptr, &tmp, 4); ptr += 4;
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
-	*(unsigned int *)(ptr) = __size_programs; ptr += 4;
+	memcpy(ptr, &__size_programs, 4); ptr += 4;
 		memcpy(ptr, &maxPrograms, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_numPrograms; ptr += 4;
+	memcpy(ptr, &__size_numPrograms, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -8186,6 +8457,7 @@
 
 GLboolean glExtIsProgramBinaryQCOM_enc(void *self , GLuint program)
 {
+	AEMU_SCOPED_TRACE("glExtIsProgramBinaryQCOM encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -8226,6 +8498,7 @@
 
 void glStartTilingQCOM_enc(void *self , GLuint x, GLuint y, GLuint width, GLuint height, GLbitfield preserveMask)
 {
+	AEMU_SCOPED_TRACE("glStartTilingQCOM encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -8255,6 +8528,7 @@
 
 void glEndTilingQCOM_enc(void *self , GLbitfield preserveMask)
 {
+	AEMU_SCOPED_TRACE("glEndTilingQCOM encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -8280,6 +8554,7 @@
 
 GLenum glGetGraphicsResetStatusEXT_enc(void *self )
 {
+	AEMU_SCOPED_TRACE("glGetGraphicsResetStatusEXT encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -8319,6 +8594,7 @@
 
 void glReadnPixelsEXT_enc(void *self , GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, GLsizei bufSize, GLvoid* data)
 {
+	AEMU_SCOPED_TRACE("glReadnPixelsEXT encode");
 
 	gl_encoder_context_t *ctx = (gl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -8343,7 +8619,7 @@
 		memcpy(ptr, &format, 4); ptr += 4;
 		memcpy(ptr, &type, 4); ptr += 4;
 		memcpy(ptr, &bufSize, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_data; ptr += 4;
+	memcpy(ptr, &__size_data, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
diff --git a/system/GLESv2/CMakeLists.txt b/system/GLESv2/CMakeLists.txt
index d90502a..b04a4bb 100644
--- a/system/GLESv2/CMakeLists.txt
+++ b/system/GLESv2/CMakeLists.txt
@@ -4,7 +4,7 @@
 android_validate_sha256("${GOLDFISH_DEVICE_ROOT}/system/GLESv2/Android.mk" "d8f9dda69ec57ad8b7a65f02c3335b16a4724f612dec1d1a2cd793c28c0a10f9")
 set(GLESv2_emulation_src gl2.cpp)
 android_add_library(TARGET GLESv2_emulation SHARED LICENSE Apache-2.0 SRC gl2.cpp)
-target_include_directories(GLESv2_emulation PRIVATE ${GOLDFISH_DEVICE_ROOT}/system/OpenglSystemCommon/bionic-include ${GOLDFISH_DEVICE_ROOT}/system/OpenglSystemCommon ${GOLDFISH_DEVICE_ROOT}/bionic/libc/private ${GOLDFISH_DEVICE_ROOT}/bionic/libc/platform ${GOLDFISH_DEVICE_ROOT}/system/vulkan_enc ${GOLDFISH_DEVICE_ROOT}/android-emu ${GOLDFISH_DEVICE_ROOT}/shared/gralloc_cb/include ${GOLDFISH_DEVICE_ROOT}/shared/GoldfishAddressSpace/include ${GOLDFISH_DEVICE_ROOT}/system/renderControl_enc ${GOLDFISH_DEVICE_ROOT}/system/GLESv2_enc ${GOLDFISH_DEVICE_ROOT}/system/GLESv1_enc ${GOLDFISH_DEVICE_ROOT}/shared/OpenglCodecCommon ${GOLDFISH_DEVICE_ROOT}/shared/qemupipe/include-types ${GOLDFISH_DEVICE_ROOT}/shared/qemupipe/include ${GOLDFISH_DEVICE_ROOT}/./host/include/libOpenglRender ${GOLDFISH_DEVICE_ROOT}/./system/include ${GOLDFISH_DEVICE_ROOT}/./../../../external/qemu/android/android-emugl/guest)
-target_compile_definitions(GLESv2_emulation PRIVATE "-DWITH_GLES2" "-DPLATFORM_SDK_VERSION=29" "-DGOLDFISH_HIDL_GRALLOC" "-DEMULATOR_OPENGL_POST_O=1" "-DHOST_BUILD" "-DANDROID" "-DGL_GLEXT_PROTOTYPES" "-DPAGE_SIZE=4096" "-DGOLDFISH_VULKAN" "-DLOG_TAG=\"GLESv2_emulation\"")
+target_include_directories(GLESv2_emulation PRIVATE ${GOLDFISH_DEVICE_ROOT}/system/OpenglSystemCommon/bionic-include ${GOLDFISH_DEVICE_ROOT}/system/OpenglSystemCommon ${GOLDFISH_DEVICE_ROOT}/bionic/libc/private ${GOLDFISH_DEVICE_ROOT}/bionic/libc/platform ${GOLDFISH_DEVICE_ROOT}/system/vulkan_enc ${GOLDFISH_DEVICE_ROOT}/shared/gralloc_cb/include ${GOLDFISH_DEVICE_ROOT}/shared/GoldfishAddressSpace/include ${GOLDFISH_DEVICE_ROOT}/system/renderControl_enc ${GOLDFISH_DEVICE_ROOT}/system/GLESv2_enc ${GOLDFISH_DEVICE_ROOT}/system/GLESv1_enc ${GOLDFISH_DEVICE_ROOT}/shared/OpenglCodecCommon ${GOLDFISH_DEVICE_ROOT}/android-emu ${GOLDFISH_DEVICE_ROOT}/shared/qemupipe/include-types ${GOLDFISH_DEVICE_ROOT}/shared/qemupipe/include ${GOLDFISH_DEVICE_ROOT}/./host/include/libOpenglRender ${GOLDFISH_DEVICE_ROOT}/./system/include ${GOLDFISH_DEVICE_ROOT}/./../../../external/qemu/android/android-emugl/guest)
+target_compile_definitions(GLESv2_emulation PRIVATE "-DWITH_GLES2" "-DPLATFORM_SDK_VERSION=29" "-DGOLDFISH_HIDL_GRALLOC" "-DEMULATOR_OPENGL_POST_O=1" "-DHOST_BUILD" "-DANDROID" "-DGL_GLEXT_PROTOTYPES" "-DPAGE_SIZE=4096" "-DGFXSTREAM" "-DLOG_TAG=\"GLESv2_emulation\"")
 target_compile_options(GLESv2_emulation PRIVATE "-fvisibility=default" "-Wno-unused-parameter")
-target_link_libraries(GLESv2_emulation PRIVATE OpenglSystemCommon android-emu-shared vulkan_enc gui androidemu cutils utils log _renderControl_enc GLESv2_enc GLESv1_enc OpenglCodecCommon_host PRIVATE gralloc_cb_host GoldfishAddressSpace_host qemupipe_host)
\ No newline at end of file
+target_link_libraries(GLESv2_emulation PRIVATE OpenglSystemCommon android-emu-shared vulkan_enc gui log _renderControl_enc GLESv2_enc GLESv1_enc OpenglCodecCommon_host cutils utils androidemu PRIVATE gralloc_cb_host GoldfishAddressSpace_host qemupipe_host)
\ No newline at end of file
diff --git a/system/GLESv2/gl2.cpp b/system/GLESv2/gl2.cpp
index a36bdd3..9598a30 100644
--- a/system/GLESv2/gl2.cpp
+++ b/system/GLESv2/gl2.cpp
@@ -79,7 +79,7 @@
         DEFINE_AND_VALIDATE_HOST_CONNECTION();
 
         ctx->override2DTextureTarget(target);
-        ctx->associateEGLImage(target, hostImage);
+        ctx->associateEGLImage(target, hostImage, image->width, image->height);
         rcEnc->rcBindTexture(rcEnc,
                 grallocHelper->getHostHandle(native_buffer->handle));
         ctx->restore2DTextureTarget(target);
@@ -87,7 +87,7 @@
     else if (image->target == EGL_GL_TEXTURE_2D_KHR) {
         GET_CONTEXT;
         ctx->override2DTextureTarget(target);
-        ctx->associateEGLImage(target, hostImage);
+        ctx->associateEGLImage(target, hostImage, image->width, image->height);
         ctx->m_glEGLImageTargetTexture2DOES_enc(self, GL_TEXTURE_2D, hostImage);
         ctx->restore2DTextureTarget(target);
     }
@@ -101,6 +101,7 @@
     DBG("glEGLImageTargetRenderbufferStorageOES v2 image=%p\n", img);
     //TODO: check error - we don't have a way to set gl error
     EGLImage_t *image = (EGLImage_t*)img;
+    GLeglImageOES hostImage = reinterpret_cast<GLeglImageOES>((intptr_t)image->host_egl_image);
 
     if (image->target == EGL_NATIVE_BUFFER_ANDROID) {
         android_native_buffer_t* native_buffer = ((EGLImage_t*)image)->native_buffer;
@@ -114,6 +115,8 @@
         }
 
         DEFINE_AND_VALIDATE_HOST_CONNECTION();
+        GET_CONTEXT;
+        ctx->associateEGLImage(target, hostImage, image->width, image->height);
         rcEnc->rcBindRenderbuffer(rcEnc,
                 grallocHelper->getHostHandle(native_buffer->handle));
     } else {
diff --git a/system/GLESv2_enc/CMakeLists.txt b/system/GLESv2_enc/CMakeLists.txt
index 39a479f..5d0f049 100644
--- a/system/GLESv2_enc/CMakeLists.txt
+++ b/system/GLESv2_enc/CMakeLists.txt
@@ -4,7 +4,7 @@
 android_validate_sha256("${GOLDFISH_DEVICE_ROOT}/system/GLESv2_enc/Android.mk" "df543672d1f36e43fb783b08200aa85dbf3a2e7167f8ecd7e4c01c80e6fd1650")
 set(GLESv2_enc_src GL2EncoderUtils.cpp GL2Encoder.cpp GLESv2Validation.cpp gl2_client_context.cpp gl2_enc.cpp gl2_entry.cpp IOStream2.cpp)
 android_add_library(TARGET GLESv2_enc SHARED LICENSE Apache-2.0 SRC GL2EncoderUtils.cpp GL2Encoder.cpp GLESv2Validation.cpp gl2_client_context.cpp gl2_enc.cpp gl2_entry.cpp IOStream2.cpp)
-target_include_directories(GLESv2_enc PRIVATE ${GOLDFISH_DEVICE_ROOT}/shared/OpenglCodecCommon ${GOLDFISH_DEVICE_ROOT}/shared/qemupipe/include-types ${GOLDFISH_DEVICE_ROOT}/shared/qemupipe/include ${GOLDFISH_DEVICE_ROOT}/system/GLESv2_enc ${GOLDFISH_DEVICE_ROOT}/./host/include/libOpenglRender ${GOLDFISH_DEVICE_ROOT}/./system/include ${GOLDFISH_DEVICE_ROOT}/./../../../external/qemu/android/android-emugl/guest)
-target_compile_definitions(GLESv2_enc PRIVATE "-DWITH_GLES2" "-DPLATFORM_SDK_VERSION=29" "-DGOLDFISH_HIDL_GRALLOC" "-DEMULATOR_OPENGL_POST_O=1" "-DHOST_BUILD" "-DANDROID" "-DGL_GLEXT_PROTOTYPES" "-DPAGE_SIZE=4096" "-DGOLDFISH_VULKAN" "-DLOG_TAG=\"emuglGLESv2_enc\"")
+target_include_directories(GLESv2_enc PRIVATE ${GOLDFISH_DEVICE_ROOT}/shared/OpenglCodecCommon ${GOLDFISH_DEVICE_ROOT}/android-emu ${GOLDFISH_DEVICE_ROOT}/shared/qemupipe/include-types ${GOLDFISH_DEVICE_ROOT}/shared/qemupipe/include ${GOLDFISH_DEVICE_ROOT}/system/GLESv2_enc ${GOLDFISH_DEVICE_ROOT}/./host/include/libOpenglRender ${GOLDFISH_DEVICE_ROOT}/./system/include ${GOLDFISH_DEVICE_ROOT}/./../../../external/qemu/android/android-emugl/guest)
+target_compile_definitions(GLESv2_enc PRIVATE "-DWITH_GLES2" "-DPLATFORM_SDK_VERSION=29" "-DGOLDFISH_HIDL_GRALLOC" "-DEMULATOR_OPENGL_POST_O=1" "-DHOST_BUILD" "-DANDROID" "-DGL_GLEXT_PROTOTYPES" "-DPAGE_SIZE=4096" "-DGFXSTREAM" "-DLOG_TAG=\"emuglGLESv2_enc\"")
 target_compile_options(GLESv2_enc PRIVATE "-fvisibility=default" "-Wno-unused-parameter" "-Wno-unused-private-field")
-target_link_libraries(GLESv2_enc PRIVATE OpenglCodecCommon_host cutils utils log android-emu-shared PRIVATE qemupipe_host)
\ No newline at end of file
+target_link_libraries(GLESv2_enc PRIVATE OpenglCodecCommon_host cutils utils log androidemu android-emu-shared PRIVATE qemupipe_host)
\ No newline at end of file
diff --git a/system/GLESv2_enc/GL2Encoder.cpp b/system/GLESv2_enc/GL2Encoder.cpp
index bafd1b6..9fcce62 100755
--- a/system/GLESv2_enc/GL2Encoder.cpp
+++ b/system/GLESv2_enc/GL2Encoder.cpp
@@ -16,6 +16,7 @@
 
 #include "GL2Encoder.h"
 #include "GLESv2Validation.h"
+#include "GLESTextureUtils.h"
 
 #include <string>
 #include <map>
@@ -40,7 +41,7 @@
 static GLubyte *gExtensionsString= (GLubyte *) "GL_OES_EGL_image_external ";
 
 #define SET_ERROR_IF(condition, err) if((condition)) { \
-        ALOGE("%s:%s:%d GL error 0x%x\n", __FILE__, __FUNCTION__, __LINE__, err); \
+        ALOGE("%s:%s:%d GL error 0x%x condition [%s]\n", __FILE__, __FUNCTION__, __LINE__, err, #condition); \
         ctx->setError(err); \
         return; \
     }
@@ -73,6 +74,7 @@
     m_currMajorVersion = 2;
     m_currMinorVersion = 0;
     m_hasAsyncUnmapBuffer = false;
+    m_hasSyncBufferData = false;
     m_initialized = false;
     m_noHostError = false;
     m_state = NULL;
@@ -81,6 +83,7 @@
     m_num_compressedTextureFormats = 0;
     m_max_combinedTextureImageUnits = 0;
     m_max_vertexTextureImageUnits = 0;
+    m_max_array_texture_layers = 0;
     m_max_textureImageUnits = 0;
     m_max_cubeMapTextureSize = 0;
     m_max_renderBufferSize = 0;
@@ -201,6 +204,7 @@
     OVERRIDE(glGenFramebuffers);
     OVERRIDE(glDeleteFramebuffers);
     OVERRIDE(glBindFramebuffer);
+    OVERRIDE(glFramebufferParameteri);
     OVERRIDE(glFramebufferTexture2D);
     OVERRIDE(glFramebufferTexture3DOES);
     OVERRIDE(glGetFramebufferAttachmentParameteriv);
@@ -296,6 +300,7 @@
     OVERRIDE(glGenerateMipmap);
 
     OVERRIDE(glBindSampler);
+    OVERRIDE(glDeleteSamplers);
 
     OVERRIDE_CUSTOM(glFenceSync);
     OVERRIDE_CUSTOM(glClientWaitSync);
@@ -375,6 +380,84 @@
 
     OVERRIDE(glInvalidateFramebuffer);
     OVERRIDE(glInvalidateSubFramebuffer);
+
+    OVERRIDE(glDispatchCompute);
+    OVERRIDE(glDispatchComputeIndirect);
+
+    OVERRIDE(glGenTransformFeedbacks);
+    OVERRIDE(glDeleteTransformFeedbacks);
+    OVERRIDE(glGenSamplers);
+    OVERRIDE(glGenQueries);
+    OVERRIDE(glDeleteQueries);
+
+    OVERRIDE(glBindTransformFeedback);
+    OVERRIDE(glBeginQuery);
+    OVERRIDE(glEndQuery);
+
+    OVERRIDE(glClear);
+    OVERRIDE(glClearBufferfi);
+    OVERRIDE(glCopyTexSubImage2D);
+    OVERRIDE(glCopyTexSubImage3D);
+    OVERRIDE(glCompileShader);
+    OVERRIDE(glValidateProgram);
+    OVERRIDE(glProgramBinary);
+
+    OVERRIDE(glGetSamplerParameterfv);
+    OVERRIDE(glGetSamplerParameteriv);
+    OVERRIDE(glSamplerParameterf);
+    OVERRIDE(glSamplerParameteri);
+    OVERRIDE(glSamplerParameterfv);
+    OVERRIDE(glSamplerParameteriv);
+
+    OVERRIDE(glGetAttribLocation);
+
+    OVERRIDE(glBindAttribLocation);
+    OVERRIDE(glUniformBlockBinding);
+    OVERRIDE(glGetTransformFeedbackVarying);
+    OVERRIDE(glScissor);
+    OVERRIDE(glDepthFunc);
+    OVERRIDE(glViewport);
+    OVERRIDE(glStencilFunc);
+    OVERRIDE(glStencilFuncSeparate);
+    OVERRIDE(glStencilOp);
+    OVERRIDE(glStencilOpSeparate);
+    OVERRIDE(glStencilMaskSeparate);
+    OVERRIDE(glBlendEquation);
+    OVERRIDE(glBlendEquationSeparate);
+    OVERRIDE(glBlendFunc);
+    OVERRIDE(glBlendFuncSeparate);
+    OVERRIDE(glCullFace);
+    OVERRIDE(glFrontFace);
+    OVERRIDE(glLineWidth);
+    OVERRIDE(glVertexAttrib1f);
+    OVERRIDE(glVertexAttrib2f);
+    OVERRIDE(glVertexAttrib3f);
+    OVERRIDE(glVertexAttrib4f);
+    OVERRIDE(glVertexAttrib1fv);
+    OVERRIDE(glVertexAttrib2fv);
+    OVERRIDE(glVertexAttrib3fv);
+    OVERRIDE(glVertexAttrib4fv);
+    OVERRIDE(glVertexAttribI4i);
+    OVERRIDE(glVertexAttribI4ui);
+    OVERRIDE(glVertexAttribI4iv);
+    OVERRIDE(glVertexAttribI4uiv);
+
+    OVERRIDE(glGetShaderPrecisionFormat);
+    OVERRIDE(glGetProgramiv);
+    OVERRIDE(glGetActiveUniform);
+    OVERRIDE(glGetActiveUniformsiv);
+    OVERRIDE(glGetActiveUniformBlockName);
+    OVERRIDE(glGetActiveAttrib);
+    OVERRIDE(glGetRenderbufferParameteriv);
+    OVERRIDE(glGetQueryiv);
+    OVERRIDE(glGetQueryObjectuiv);
+    OVERRIDE(glIsEnabled);
+    OVERRIDE(glHint);
+
+    OVERRIDE(glGetFragDataLocation);
+
+    OVERRIDE(glStencilMask);
+    OVERRIDE(glClearStencil);
 }
 
 GL2Encoder::~GL2Encoder()
@@ -387,7 +470,9 @@
     GL2Encoder *ctx = (GL2Encoder *)self;
     GLenum err = ctx->getError();
     if(err != GL_NO_ERROR) {
-        ctx->m_glGetError_enc(ctx); // also clear host error
+        if (!ctx->m_noHostError) {
+            ctx->m_glGetError_enc(ctx); // also clear host error
+        }
         ctx->setError(GL_NO_ERROR);
         return err;
     }
@@ -405,6 +490,9 @@
         mCtx(ctx),
         guest_error(ctx->getError()),
         host_error(ctx->m_glGetError_enc(ctx)) {
+            if (ctx->m_noHostError) {
+                host_error = GL_NO_ERROR;
+            }
             // Preserve any existing GL error in the guest:
             // OpenGL ES 3.0.5 spec:
             // The command enum GetError( void ); is used to obtain error information.
@@ -577,10 +665,15 @@
     GLuint bufferId = ctx->m_state->getBuffer(target);
     SET_ERROR_IF(bufferId==0, GL_INVALID_OPERATION);
     SET_ERROR_IF(size<0, GL_INVALID_VALUE);
+    SET_ERROR_IF(!GLESv2Validation::bufferUsage(ctx, usage), GL_INVALID_ENUM);
 
     ctx->m_shared->updateBufferData(bufferId, size, data);
     ctx->m_shared->setBufferUsage(bufferId, usage);
-    ctx->m_glBufferData_enc(self, target, size, data, usage);
+    if (ctx->m_hasSyncBufferData) {
+        ctx->glBufferDataSyncAEMU(self, target, size, data, usage);
+    } else {
+        ctx->m_glBufferData_enc(self, target, size, data, usage);
+    }
 }
 
 void GL2Encoder::s_glBufferSubData(void * self, GLenum target, GLintptr offset, GLsizeiptr size, const GLvoid * data)
@@ -620,18 +713,8 @@
     }
 }
 
-static bool isValidVertexAttribIndex(void *self, GLuint indx)
-{
-    GL2Encoder *ctx = (GL2Encoder *)self;
-    GLint maxIndex;
-    ctx->glGetIntegerv(self, GL_MAX_VERTEX_ATTRIBS, &maxIndex);
-    return indx < maxIndex;
-}
-
 #define VALIDATE_VERTEX_ATTRIB_INDEX(index) \
-    SET_ERROR_WITH_MESSAGE_IF( \
-            !isValidVertexAttribIndex(self, index), GL_INVALID_VALUE, \
-            GLESv2Validation::vertexAttribIndexRangeErrorMsg, (ctx, index)); \
+    SET_ERROR_IF(index >= CODEC_MAX_VERTEX_ATTRIBUTES, GL_INVALID_VALUE); \
 
 void GL2Encoder::s_glVertexAttribPointer(void *self, GLuint indx, GLint size, GLenum type, GLboolean normalized, GLsizei stride, const GLvoid * ptr)
 {
@@ -719,6 +802,14 @@
             ctx->m_max_vertexTextureImageUnits = *ptr;
         }
         break;
+    case GL_MAX_ARRAY_TEXTURE_LAYERS:
+        if (ctx->m_max_array_texture_layers != 0) {
+            *ptr = ctx->m_max_array_texture_layers;
+        } else {
+            ctx->safe_glGetIntegerv(param, ptr);
+            ctx->m_max_array_texture_layers = *ptr;
+        }
+        break;
     case GL_MAX_TEXTURE_IMAGE_UNITS:
         if (ctx->m_max_textureImageUnits != 0) {
             *ptr = ctx->m_max_textureImageUnits;
@@ -728,20 +819,16 @@
         }
         break;
     case GL_TEXTURE_BINDING_2D:
-        SET_ERROR_IF(!state, GL_INVALID_OPERATION);
+        if (!state) return;
         *ptr = state->getBoundTexture(GL_TEXTURE_2D);
         break;
     case GL_TEXTURE_BINDING_EXTERNAL_OES:
-        SET_ERROR_IF(!state, GL_INVALID_OPERATION);
+        if (!state) return;
         *ptr = state->getBoundTexture(GL_TEXTURE_EXTERNAL_OES);
         break;
 
     case GL_MAX_VERTEX_ATTRIBS:
-        SET_ERROR_IF(!state, GL_INVALID_OPERATION);
-        if (!state->getClientStateParameter<GLint>(param, ptr)) {
-            ctx->safe_glGetIntegerv(param, ptr);
-            state->setMaxVertexAttribs(*ptr);
-        }
+        *ptr = CODEC_MAX_VERTEX_ATTRIBUTES;
         break;
     case GL_MAX_VERTEX_ATTRIB_STRIDE:
         if (ctx->m_max_vertexAttribStride != 0) {
@@ -773,6 +860,13 @@
         } else {
             ctx->safe_glGetIntegerv(param, ptr);
             ctx->m_max_textureSize = *ptr;
+            if (ctx->m_max_textureSize > 0) {
+                uint32_t current = 1;
+                while (current < ctx->m_max_textureSize) {
+                    ++ctx->m_log2MaxTextureSize;
+                    current = current << 1;
+                }
+            }
         }
         break;
     case GL_MAX_3D_TEXTURE_SIZE:
@@ -886,7 +980,7 @@
         *ptr = GL_LOSE_CONTEXT_ON_RESET_EXT;
         break;
     default:
-        SET_ERROR_IF(!state, GL_INVALID_OPERATION);
+        if (!state) return;
         if (!state->getClientStateParameter<GLint>(param, ptr)) {
             ctx->safe_glGetIntegerv(param, ptr);
         }
@@ -950,7 +1044,7 @@
     }
 
     default:
-        SET_ERROR_IF(!state, GL_INVALID_OPERATION);
+        if (!state) return;
         if (!state->getClientStateParameter<GLfloat>(param, ptr)) {
             ctx->safe_glGetFloatv(param, ptr);
         }
@@ -1014,11 +1108,15 @@
     }
 
     default:
-        SET_ERROR_IF(!state, GL_INVALID_OPERATION);
-        if (!state->getClientStateParameter<GLboolean>(param, ptr)) {
-            ctx->safe_glGetBooleanv(param, ptr);
+        if (!state) return;
+        {
+            GLint intVal;
+            if (!state->getClientStateParameter<GLint>(param, &intVal)) {
+                ctx->safe_glGetBooleanv(param, ptr);
+            } else {
+                *ptr = (intVal != 0) ? GL_TRUE : GL_FALSE;
+            }
         }
-        *ptr = (*ptr != 0) ? GL_TRUE : GL_FALSE;
         break;
     }
 }
@@ -1046,10 +1144,8 @@
 void GL2Encoder::s_glGetVertexAttribiv(void *self, GLuint index, GLenum pname, GLint *params)
 {
     GL2Encoder *ctx = (GL2Encoder *)self;
-    assert(ctx->m_state);
-    GLint maxIndex;
-    ctx->glGetIntegerv(self, GL_MAX_VERTEX_ATTRIBS, &maxIndex);
-    SET_ERROR_IF(!(index < maxIndex), GL_INVALID_VALUE);
+    VALIDATE_VERTEX_ATTRIB_INDEX(index);
+    SET_ERROR_IF(!GLESv2Validation::allowedGetVertexAttrib(pname), GL_INVALID_ENUM);
 
     if (!ctx->m_state->getVertexAttribParameter<GLint>(index, pname, params)) {
         ctx->m_glGetVertexAttribiv_enc(self, index, pname, params);
@@ -1059,10 +1155,8 @@
 void GL2Encoder::s_glGetVertexAttribfv(void *self, GLuint index, GLenum pname, GLfloat *params)
 {
     GL2Encoder *ctx = (GL2Encoder *)self;
-    assert(ctx->m_state);
-    GLint maxIndex;
-    ctx->glGetIntegerv(self, GL_MAX_VERTEX_ATTRIBS, &maxIndex);
-    SET_ERROR_IF(!(index < maxIndex), GL_INVALID_VALUE);
+    VALIDATE_VERTEX_ATTRIB_INDEX(index);
+    SET_ERROR_IF(!GLESv2Validation::allowedGetVertexAttrib(pname), GL_INVALID_ENUM);
 
     if (!ctx->m_state->getVertexAttribParameter<GLfloat>(index, pname, params)) {
         ctx->m_glGetVertexAttribfv_enc(self, index, pname, params);
@@ -1073,9 +1167,7 @@
 {
     GL2Encoder *ctx = (GL2Encoder *)self;
     if (ctx->m_state == NULL) return;
-    GLint maxIndex;
-    ctx->glGetIntegerv(self, GL_MAX_VERTEX_ATTRIBS, &maxIndex);
-    SET_ERROR_IF(!(index < maxIndex), GL_INVALID_VALUE);
+    VALIDATE_VERTEX_ATTRIB_INDEX(index);
     SET_ERROR_IF(pname != GL_VERTEX_ATTRIB_ARRAY_POINTER, GL_INVALID_ENUM);
     (void)pname;
 
@@ -1239,20 +1331,24 @@
                 }
                 if (state.elementSize == 0) {
                     // The vertex attribute array is uninitialized. Abandon it.
-                    ALOGE("a vertex attribute array is uninitialized. Skipping corresponding vertex attribute.");
                     this->m_glDisableVertexAttribArray_enc(this, i);
                     continue;
                 }
                 m_glEnableVertexAttribArray_enc(this, i);
 
                 if (datalen && (!offset || !((unsigned char*)offset + firstIndex))) {
-                    ALOGD("%s: bad offset / len!!!!!", __FUNCTION__);
                     continue;
                 }
+
+                unsigned char* data = (unsigned char*)offset + firstIndex;
+                if (!m_state->isAttribIndexUsedByProgram(i)) {
+                    continue;
+                }
+
                 if (state.isInt) {
-                    this->glVertexAttribIPointerDataAEMU(this, i, state.size, state.type, stride, (unsigned char *)offset + firstIndex, datalen);
+                    this->glVertexAttribIPointerDataAEMU(this, i, state.size, state.type, stride, data, datalen);
                 } else {
-                    this->glVertexAttribPointerData(this, i, state.size, state.type, state.normalized, stride, (unsigned char *)offset + firstIndex, datalen);
+                    this->glVertexAttribPointerData(this, i, state.size, state.type, state.normalized, stride, data, datalen);
                 }
             } else {
                 const BufferData* buf = m_shared->getBufferData(bufferObject);
@@ -1267,20 +1363,24 @@
                 if (buf && firstIndex >= 0 && firstIndex + bufLen <= buf->m_size) {
                     if (hasClientArrays) {
                         m_glEnableVertexAttribArray_enc(this, i);
-                        if (state.isInt) {
-                            this->glVertexAttribIPointerOffsetAEMU(this, i, state.size, state.type, stride, offset + firstIndex);
-                        } else {
-                            this->glVertexAttribPointerOffset(this, i, state.size, state.type, state.normalized, stride, offset + firstIndex);
+                        if (firstIndex) {
+                            if (state.isInt) {
+                                this->glVertexAttribIPointerOffsetAEMU(this, i, state.size, state.type, stride, offset + firstIndex);
+                            } else {
+                                this->glVertexAttribPointerOffset(this, i, state.size, state.type, state.normalized, stride, offset + firstIndex);
+                            }
                         }
                     }
                 } else {
-                    ALOGE("a vertex attribute index out of boundary is detected. Skipping corresponding vertex attribute. buf=%p", buf);
-                    if (buf) {
-                        ALOGE("Out of bounds vertex attribute info: "
-                                "clientArray? %d attribute %d vbo %u allocedBufferSize %u bufferDataSpecified? %d wantedStart %u wantedEnd %u",
-                                hasClientArrays, i, bufferObject, (unsigned int)buf->m_size, buf != NULL, firstIndex, firstIndex + bufLen);
+                    if (m_state->isAttribIndexUsedByProgram(i)) {
+                        ALOGE("a vertex attribute index out of boundary is detected. Skipping corresponding vertex attribute. buf=%p", buf);
+                        if (buf) {
+                            ALOGE("Out of bounds vertex attribute info: "
+                                    "clientArray? %d attribute %d vbo %u allocedBufferSize %u bufferDataSpecified? %d wantedStart %u wantedEnd %u",
+                                    hasClientArrays, i, bufferObject, (unsigned int)buf->m_size, buf != NULL, firstIndex, firstIndex + bufLen);
+                        }
+                        m_glDisableVertexAttribArray_enc(this, i);
                     }
-                    m_glDisableVertexAttribArray_enc(this, i);
                 }
             }
         } else {
@@ -1324,6 +1424,7 @@
     assert(ctx->m_state != NULL);
     SET_ERROR_IF(!isValidDrawMode(mode), GL_INVALID_ENUM);
     SET_ERROR_IF(count < 0, GL_INVALID_VALUE);
+    SET_ERROR_IF(ctx->m_state->checkFramebufferCompleteness(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE, GL_INVALID_FRAMEBUFFER_OPERATION);
 
     bool has_client_vertex_arrays = false;
     bool has_indirect_arrays = false;
@@ -1338,6 +1439,8 @@
     } else {
         ctx->m_glDrawArrays_enc(ctx, mode, first, count);
     }
+
+    ctx->m_state->postDraw();
 }
 
 
@@ -1350,6 +1453,7 @@
     SET_ERROR_IF(count < 0, GL_INVALID_VALUE);
     SET_ERROR_IF(!(type == GL_UNSIGNED_BYTE || type == GL_UNSIGNED_SHORT || type == GL_UNSIGNED_INT), GL_INVALID_ENUM);
     SET_ERROR_IF(ctx->m_state->getTransformFeedbackActiveUnpaused(), GL_INVALID_OPERATION);
+    SET_ERROR_IF(ctx->m_state->checkFramebufferCompleteness(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE, GL_INVALID_FRAMEBUFFER_OPERATION);
 
     bool has_client_vertex_arrays = false;
     bool has_indirect_arrays = false;
@@ -1359,7 +1463,7 @@
 
     if (!has_client_vertex_arrays && !has_indirect_arrays) {
         // ALOGW("glDrawElements: no vertex arrays / buffers bound to the command\n");
-        GLenum status = ctx->m_glCheckFramebufferStatus_enc(self, GL_FRAMEBUFFER);
+        GLenum status = ctx->glCheckFramebufferStatus(self, GL_FRAMEBUFFER);
         SET_ERROR_IF(status != GL_FRAMEBUFFER_COMPLETE, GL_INVALID_FRAMEBUFFER_OPERATION);
     }
 
@@ -1428,6 +1532,8 @@
             ALOGE("glDrawElements: direct index & direct buffer data - will be implemented in later versions;\n");
         }
     }
+
+    ctx->m_state->postDraw();
 }
 
 void GL2Encoder::s_glDrawArraysNullAEMU(void *self, GLenum mode, GLint first, GLsizei count)
@@ -1436,6 +1542,7 @@
     assert(ctx->m_state != NULL);
     SET_ERROR_IF(!isValidDrawMode(mode), GL_INVALID_ENUM);
     SET_ERROR_IF(count < 0, GL_INVALID_VALUE);
+    SET_ERROR_IF(ctx->m_state->checkFramebufferCompleteness(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE, GL_INVALID_FRAMEBUFFER_OPERATION);
 
     bool has_client_vertex_arrays = false;
     bool has_indirect_arrays = false;
@@ -1451,6 +1558,7 @@
         ctx->m_glDrawArraysNullAEMU_enc(ctx, mode, first, count);
     }
     ctx->flushDrawCall();
+    ctx->m_state->postDraw();
 }
 
 void GL2Encoder::s_glDrawElementsNullAEMU(void *self, GLenum mode, GLsizei count, GLenum type, const void *indices)
@@ -1462,6 +1570,7 @@
     SET_ERROR_IF(count < 0, GL_INVALID_VALUE);
     SET_ERROR_IF(!(type == GL_UNSIGNED_BYTE || type == GL_UNSIGNED_SHORT || type == GL_UNSIGNED_INT), GL_INVALID_ENUM);
     SET_ERROR_IF(ctx->m_state->getTransformFeedbackActiveUnpaused(), GL_INVALID_OPERATION);
+    SET_ERROR_IF(ctx->m_state->checkFramebufferCompleteness(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE, GL_INVALID_FRAMEBUFFER_OPERATION);
 
     bool has_client_vertex_arrays = false;
     bool has_indirect_arrays = false;
@@ -1471,7 +1580,7 @@
 
     if (!has_client_vertex_arrays && !has_indirect_arrays) {
         // ALOGW("glDrawElements: no vertex arrays / buffers bound to the command\n");
-        GLenum status = ctx->m_glCheckFramebufferStatus_enc(self, GL_FRAMEBUFFER);
+        GLenum status = ctx->glCheckFramebufferStatus(self, GL_FRAMEBUFFER);
         SET_ERROR_IF(status != GL_FRAMEBUFFER_COMPLETE, GL_INVALID_FRAMEBUFFER_OPERATION);
     }
 
@@ -1544,6 +1653,7 @@
             ALOGE("glDrawElementsNullAEMU: direct index & direct buffer data - will be implemented in later versions;\n");
         }
     }
+    ctx->m_state->postDraw();
 }
 
 GLint * GL2Encoder::getCompressedTextureFormats()
@@ -1563,6 +1673,7 @@
 // Replace uses of samplerExternalOES with sampler2D, recording the names of
 // modified shaders in data. Also remove
 //   #extension GL_OES_EGL_image_external : require
+//   #extension GL_OES_EGL_image_external_essl3 : require
 // statements.
 //
 // This implementation assumes the input has already been pre-processed. If not,
@@ -1652,34 +1763,40 @@
         }
         char* sampler_start = c;
         c += samplerExternalType.size();
-        if (!isspace(*c) && *c != '\0') {
+        if (!isspace(*c) && *c != '\0' && *c != ';') {
             continue;
+        } else {
+            // capture sampler name
+            while (isspace(*c) && *c != '\0') {
+                c++;
+            }
         }
 
-        // capture sampler name
-        while (isspace(*c) && *c != '\0') {
-            c++;
-        }
-        if (!isalpha(*c) && *c != '_') {
-            // not an identifier
-            return false;
-        }
-        char* name_start = c;
-        do {
-            c++;
-        } while (isalnum(*c) || *c == '_');
+        if ((!isalpha(*c) && *c != '_') || *c == ';') {
+            // not an identifier, but might have some effect anyway.
+            if (samplerExternalType == STR_SAMPLER_EXTERNAL_OES) {
+                memcpy(sampler_start, STR_SAMPLER2D_SPACE, sizeof(STR_SAMPLER2D_SPACE)-1);
+            }
+        } else {
+            char* name_start = c;
+            do {
+                c++;
+            } while (isalnum(*c) || *c == '_');
 
-        size_t len = (size_t)(c - name_start);
-        data->samplerExternalNames.push_back(
-            std::string(name_start, len));
+            size_t len = (size_t)(c - name_start);
+            if (len) {
+                data->samplerExternalNames.push_back(
+                        std::string(name_start, len));
+            }
 
-        // We only need to perform a string replacement for the original
-        // occurrence of samplerExternalOES if a #define was used.
-        //
-        // The important part was to record the name in
-        // |data->samplerExternalNames|.
-        if (samplerExternalType == STR_SAMPLER_EXTERNAL_OES) {
-            memcpy(sampler_start, STR_SAMPLER2D_SPACE, sizeof(STR_SAMPLER2D_SPACE)-1);
+            // We only need to perform a string replacement for the original
+            // occurrence of samplerExternalOES if a #define was used.
+            //
+            // The important part was to record the name in
+            // |data->samplerExternalNames|.
+            if (samplerExternalType == STR_SAMPLER_EXTERNAL_OES) {
+                memcpy(sampler_start, STR_SAMPLER2D_SPACE, sizeof(STR_SAMPLER2D_SPACE)-1);
+            }
         }
     }
 
@@ -1795,41 +1912,96 @@
     SET_ERROR_IF(!isProgram && !ctx->m_shared->isShader(program), GL_INVALID_VALUE);
     SET_ERROR_IF(!isProgram, GL_INVALID_OPERATION);
 
+    if (program == ctx->m_state->currentProgram() ||
+        (!ctx->m_state->currentProgram() &&
+         (program == ctx->m_state->currentShaderProgram()))) {
+        SET_ERROR_IF(ctx->m_state->getTransformFeedbackActive(), GL_INVALID_OPERATION);
+    }
+
     ctx->m_glLinkProgram_enc(self, program);
 
     GLint linkStatus = 0;
-    ctx->glGetProgramiv(self, program, GL_LINK_STATUS, &linkStatus);
+    ctx->m_glGetProgramiv_enc(self, program, GL_LINK_STATUS, &linkStatus);
+    ctx->m_shared->setProgramLinkStatus(program, linkStatus);
     if (!linkStatus) {
         return;
     }
 
-    //get number of active uniforms in the program
+    // get number of active uniforms and attributes in the program
     GLint numUniforms=0;
-    ctx->glGetProgramiv(self, program, GL_ACTIVE_UNIFORMS, &numUniforms);
-    ctx->m_shared->initProgramData(program,numUniforms);
+    GLint numAttributes=0;
+    ctx->m_glGetProgramiv_enc(self, program, GL_ACTIVE_UNIFORMS, &numUniforms);
+    ctx->m_glGetProgramiv_enc(self, program, GL_ACTIVE_ATTRIBUTES, &numAttributes);
+    ctx->m_shared->initProgramData(program,numUniforms,numAttributes);
 
     //get the length of the longest uniform name
     GLint maxLength=0;
-    ctx->glGetProgramiv(self, program, GL_ACTIVE_UNIFORM_MAX_LENGTH, &maxLength);
+    GLint maxAttribLength=0;
+    ctx->m_glGetProgramiv_enc(self, program, GL_ACTIVE_UNIFORM_MAX_LENGTH, &maxLength);
+    ctx->m_glGetProgramiv_enc(self, program, GL_ACTIVE_ATTRIBUTE_MAX_LENGTH, &maxAttribLength);
 
     GLint size;
     GLenum type;
-    GLchar *name = new GLchar[maxLength+1];
+    size_t bufLen = maxLength > maxAttribLength ? maxLength : maxAttribLength;
+    GLchar *name = new GLchar[bufLen + 1];
     GLint location;
     //for each active uniform, get its size and starting location.
     for (GLint i=0 ; i<numUniforms ; ++i)
     {
-        ctx->glGetActiveUniform(self, program, i, maxLength, NULL, &size, &type, name);
+        ctx->m_glGetActiveUniform_enc(self, program, i, maxLength, NULL, &size, &type, name);
         location = ctx->m_glGetUniformLocation_enc(self, program, name);
         ctx->m_shared->setProgramIndexInfo(program, i, location, size, type, name);
     }
 
+    for (GLint i = 0; i < numAttributes; ++i) {
+        ctx->m_glGetActiveAttrib_enc(self, program, i, maxAttribLength,  NULL, &size, &type, name);
+        location = ctx->m_glGetAttribLocation_enc(self, program, name);
+        ctx->m_shared->setProgramAttribInfo(program, i, location, size, type, name);
+    }
+
+    if (ctx->majorVersion() > 2) {
+        GLint numBlocks;
+        ctx->m_glGetProgramiv_enc(ctx, program, GL_ACTIVE_UNIFORM_BLOCKS, &numBlocks);
+        ctx->m_shared->setActiveUniformBlockCountForProgram(program, numBlocks);
+
+        GLint tfVaryingsCount;
+        ctx->m_glGetProgramiv_enc(ctx, program, GL_TRANSFORM_FEEDBACK_VARYINGS, &tfVaryingsCount);
+        ctx->m_shared->setTransformFeedbackVaryingsCountForProgram(program, tfVaryingsCount);
+    }
+
     delete[] name;
 }
 
+#define VALIDATE_PROGRAM_NAME(program) \
+    bool isShaderOrProgramObject = \
+        ctx->m_shared->isShaderOrProgramObject(program); \
+    bool isProgram = \
+        ctx->m_shared->isProgram(program); \
+    SET_ERROR_IF(!isShaderOrProgramObject, GL_INVALID_VALUE); \
+    SET_ERROR_IF(!isProgram, GL_INVALID_OPERATION); \
+
+#define VALIDATE_PROGRAM_NAME_RET(program, ret) \
+    bool isShaderOrProgramObject = \
+        ctx->m_shared->isShaderOrProgramObject(program); \
+    bool isProgram = \
+        ctx->m_shared->isProgram(program); \
+    RET_AND_SET_ERROR_IF(!isShaderOrProgramObject, GL_INVALID_VALUE, ret); \
+    RET_AND_SET_ERROR_IF(!isProgram, GL_INVALID_OPERATION, ret); \
+
+#define VALIDATE_SHADER_NAME(shader) \
+    bool isShaderOrProgramObject = \
+        ctx->m_shared->isShaderOrProgramObject(shader); \
+    bool isShader = \
+        ctx->m_shared->isShader(shader); \
+    SET_ERROR_IF(!isShaderOrProgramObject, GL_INVALID_VALUE); \
+    SET_ERROR_IF(!isShader, GL_INVALID_OPERATION); \
+
 void GL2Encoder::s_glDeleteProgram(void *self, GLuint program)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
+
+    VALIDATE_PROGRAM_NAME(program);
+
     ctx->m_glDeleteProgram_enc(self, program);
 
     ctx->m_shared->deleteProgramData(program);
@@ -1841,9 +2013,9 @@
     SET_ERROR_IF(!ctx->m_shared->isShaderOrProgramObject(program), GL_INVALID_VALUE);
     SET_ERROR_IF(!ctx->m_shared->isProgram(program), GL_INVALID_OPERATION);
     SET_ERROR_IF(!ctx->m_shared->isProgramInitialized(program), GL_INVALID_OPERATION);
-    GLint hostLoc = location;
-    SET_ERROR_IF(ctx->m_shared->getProgramUniformType(program,hostLoc)==0, GL_INVALID_OPERATION);
-    ctx->m_glGetUniformiv_enc(self, program, hostLoc, params);
+    SET_ERROR_IF(ctx->m_shared->getProgramUniformType(program,location)==0, GL_INVALID_OPERATION);
+    SET_ERROR_IF(!ctx->m_shared->isProgramUniformLocationValid(program,location), GL_INVALID_OPERATION);
+    ctx->m_glGetUniformiv_enc(self, program, location, params);
 }
 void GL2Encoder::s_glGetUniformfv(void *self, GLuint program, GLint location, GLfloat* params)
 {
@@ -1851,9 +2023,9 @@
     SET_ERROR_IF(!ctx->m_shared->isShaderOrProgramObject(program), GL_INVALID_VALUE);
     SET_ERROR_IF(!ctx->m_shared->isProgram(program), GL_INVALID_OPERATION);
     SET_ERROR_IF(!ctx->m_shared->isProgramInitialized(program), GL_INVALID_OPERATION);
-    GLint hostLoc = location;
-    SET_ERROR_IF(ctx->m_shared->getProgramUniformType(program,hostLoc)==0, GL_INVALID_OPERATION);
-    ctx->m_glGetUniformfv_enc(self, program, hostLoc, params);
+    SET_ERROR_IF(ctx->m_shared->getProgramUniformType(program,location)==0, GL_INVALID_OPERATION);
+    SET_ERROR_IF(!ctx->m_shared->isProgramUniformLocationValid(program,location), GL_INVALID_OPERATION);
+    ctx->m_glGetUniformfv_enc(self, program, location, params);
 }
 
 GLuint GL2Encoder::s_glCreateProgram(void * self)
@@ -1871,7 +2043,7 @@
     RET_AND_SET_ERROR_IF(!GLESv2Validation::shaderType(ctx, shaderType), GL_INVALID_ENUM, 0);
     GLuint shader = ctx->m_glCreateShader_enc(self, shaderType);
     if (shader != 0) {
-        if (!ctx->m_shared->addShaderData(shader)) {
+        if (!ctx->m_shared->addShaderData(shader, shaderType)) {
             ctx->m_glDeleteShader_enc(self, shader);
             return 0;
         }
@@ -1883,6 +2055,7 @@
         GLsizei* count, GLuint* shaders)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
+    VALIDATE_PROGRAM_NAME(program);
     SET_ERROR_IF(maxCount < 0, GL_INVALID_VALUE);
     ctx->m_glGetAttachedShaders_enc(self, program, maxCount, count, shaders);
 }
@@ -1891,6 +2064,7 @@
             GLsizei* length, GLchar* source)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
+    VALIDATE_SHADER_NAME(shader);
     SET_ERROR_IF(bufsize < 0, GL_INVALID_VALUE);
     ctx->m_glGetShaderSource_enc(self, shader, bufsize, length, source);
     ShaderData* shaderData = ctx->m_shared->getShaderData(shader);
@@ -1916,6 +2090,7 @@
         GLsizei* length, GLchar* infolog)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
+    VALIDATE_SHADER_NAME(shader);
     SET_ERROR_IF(bufsize < 0, GL_INVALID_VALUE);
     ctx->m_glGetShaderInfoLog_enc(self, shader, bufsize, length, infolog);
 }
@@ -1924,6 +2099,7 @@
         GLsizei* length, GLchar* infolog)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
+    VALIDATE_PROGRAM_NAME(program);
     SET_ERROR_IF(bufsize < 0, GL_INVALID_VALUE);
     ctx->m_glGetProgramInfoLog_enc(self, program, bufsize, length, infolog);
 }
@@ -1931,6 +2107,15 @@
 void GL2Encoder::s_glDeleteShader(void *self, GLenum shader)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
+
+    bool isShaderOrProgramObject =
+        ctx->m_shared->isShaderOrProgramObject(shader);
+    bool isShader =
+        ctx->m_shared->isShader(shader);
+
+    SET_ERROR_IF(isShaderOrProgramObject && !isShader, GL_INVALID_OPERATION);
+    SET_ERROR_IF(!isShaderOrProgramObject && !isShader, GL_INVALID_VALUE);
+
     ctx->m_glDeleteShader_enc(self,shader);
     ctx->m_shared->unrefShaderData(shader);
 }
@@ -1938,15 +2123,36 @@
 void GL2Encoder::s_glAttachShader(void *self, GLuint program, GLuint shader)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
+    bool programIsShaderOrProgram = ctx->m_shared->isShaderOrProgramObject(program);
+    bool programIsProgram = ctx->m_shared->isProgram(program);
+    bool shaderIsShaderOrProgram = ctx->m_shared->isShaderOrProgramObject(shader);
+    bool shaderIsShader = ctx->m_shared->isShader(shader);
+
+    SET_ERROR_IF(!programIsShaderOrProgram, GL_INVALID_VALUE);
+    SET_ERROR_IF(!shaderIsShaderOrProgram, GL_INVALID_VALUE);
+    SET_ERROR_IF(!programIsProgram, GL_INVALID_OPERATION);
+    SET_ERROR_IF(!shaderIsShader, GL_INVALID_OPERATION);
+    SET_ERROR_IF(!ctx->m_shared->attachShader(program, shader), GL_INVALID_OPERATION);
+
     ctx->m_glAttachShader_enc(self, program, shader);
-    ctx->m_shared->attachShader(program, shader);
 }
 
 void GL2Encoder::s_glDetachShader(void *self, GLuint program, GLuint shader)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
+
+    bool programIsShaderOrProgram = ctx->m_shared->isShaderOrProgramObject(program);
+    bool programIsProgram = ctx->m_shared->isProgram(program);
+    bool shaderIsShaderOrProgram = ctx->m_shared->isShaderOrProgramObject(shader);
+    bool shaderIsShader = ctx->m_shared->isShader(shader);
+
+    SET_ERROR_IF(!programIsShaderOrProgram, GL_INVALID_VALUE);
+    SET_ERROR_IF(!shaderIsShaderOrProgram, GL_INVALID_VALUE);
+    SET_ERROR_IF(!programIsProgram, GL_INVALID_OPERATION);
+    SET_ERROR_IF(!shaderIsShader, GL_INVALID_OPERATION);
+    SET_ERROR_IF(!ctx->m_shared->detachShader(program, shader), GL_INVALID_OPERATION);
+
     ctx->m_glDetachShader_enc(self, program, shader);
-    ctx->m_shared->detachShader(program, shader);
 }
 
 int sArrIndexOfUniformExpr(const char* name, int* err) {
@@ -1966,6 +2172,16 @@
 {
     if (!name) return -1;
     GL2Encoder *ctx = (GL2Encoder*)self;
+
+    bool isShaderOrProgramObject =
+        ctx->m_shared->isShaderOrProgramObject(program);
+    bool isProgram =
+        ctx->m_shared->isProgram(program);
+
+    RET_AND_SET_ERROR_IF(!isShaderOrProgramObject, GL_INVALID_VALUE, -1);
+    RET_AND_SET_ERROR_IF(!isProgram, GL_INVALID_OPERATION, -1);
+    RET_AND_SET_ERROR_IF(!ctx->m_shared->getProgramLinkStatus(program), GL_INVALID_OPERATION, -1);
+
     return ctx->m_glGetUniformLocation_enc(self, program, name);
 }
 
@@ -2026,26 +2242,35 @@
 
     SET_ERROR_IF(program && !shared->isShaderOrProgramObject(program), GL_INVALID_VALUE);
     SET_ERROR_IF(program && !shared->isProgram(program), GL_INVALID_OPERATION);
+    SET_ERROR_IF(ctx->m_state->getTransformFeedbackActiveUnpaused(), GL_INVALID_OPERATION);
 
     ctx->m_glUseProgram_enc(self, program);
+
+    GLuint currProgram = ctx->m_state->currentProgram();
+    ctx->m_shared->onUseProgram(currProgram, program);
+
     ctx->m_state->setCurrentProgram(program);
     ctx->m_state->setCurrentShaderProgram(program);
-
     ctx->updateHostTexture2DBindingsFromProgramData(program);
+
+    if (program) {
+        ctx->m_state->currentUniformValidationInfo = ctx->m_shared->getUniformValidationInfo(program);
+        ctx->m_state->currentAttribValidationInfo = ctx->m_shared->getAttribValidationInfo(program);
+    }
 }
 
 void GL2Encoder::s_glUniform1f(void *self , GLint location, GLfloat x)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glUniform1f_enc(self, hostLoc, x);
+    ctx->m_state->validateUniform(true /* is float? */, false /* is unsigned? */, 1 /* columns */, 1 /* rows */, location, 1 /* count */, ctx->getErrorPtr());
+    ctx->m_glUniform1f_enc(self, location, x);
 }
 
 void GL2Encoder::s_glUniform1fv(void *self , GLint location, GLsizei count, const GLfloat* v)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glUniform1fv_enc(self, hostLoc, count, v);
+    ctx->m_state->validateUniform(true /* is float? */, false /* is unsigned? */, 1 /* columns */, 1 /* rows */, location, count /* count */, ctx->getErrorPtr());
+    ctx->m_glUniform1fv_enc(self, location, count, v);
 }
 
 void GL2Encoder::s_glUniform1i(void *self , GLint location, GLint x)
@@ -2054,8 +2279,9 @@
     GLClientState* state = ctx->m_state;
     GLSharedGroupPtr shared = ctx->m_shared;
 
-    GLint hostLoc = location;
-    ctx->m_glUniform1i_enc(self, hostLoc, x);
+    ctx->m_state->validateUniform(false /* is float? */, false /* is unsigned? */, 1 /* columns */, 1 /* rows */, location, 1 /* count */, ctx->getErrorPtr());
+
+    ctx->m_glUniform1i_enc(self, location, x);
 
     GLenum target;
     if (shared->setSamplerUniform(state->currentShaderProgram(), location, x, &target)) {
@@ -2070,113 +2296,113 @@
 void GL2Encoder::s_glUniform1iv(void *self , GLint location, GLsizei count, const GLint* v)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glUniform1iv_enc(self, hostLoc, count, v);
+    ctx->m_state->validateUniform(false /* is float? */, false /* is unsigned? */, 1 /* columns */, 1 /* rows */, location, count /* count */, ctx->getErrorPtr());
+    ctx->m_glUniform1iv_enc(self, location, count, v);
 }
 
 void GL2Encoder::s_glUniform2f(void *self , GLint location, GLfloat x, GLfloat y)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glUniform2f_enc(self, hostLoc, x, y);
+    ctx->m_state->validateUniform(true /* is float? */, false /* is unsigned? */, 2 /* columns */, 1 /* rows */, location, 1 /* count */, ctx->getErrorPtr());
+    ctx->m_glUniform2f_enc(self, location, x, y);
 }
 
 void GL2Encoder::s_glUniform2fv(void *self , GLint location, GLsizei count, const GLfloat* v)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glUniform2fv_enc(self, hostLoc, count, v);
+    ctx->m_state->validateUniform(true /* is float? */, false /* is unsigned? */, 2 /* columns */, 1 /* rows */, location, count /* count */, ctx->getErrorPtr());
+    ctx->m_glUniform2fv_enc(self, location, count, v);
 }
 
 void GL2Encoder::s_glUniform2i(void *self , GLint location, GLint x, GLint y)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glUniform2i_enc(self, hostLoc, x, y);
+    ctx->m_state->validateUniform(false /* is float? */, false /* is unsigned? */, 2 /* columns */, 1 /* rows */, location, 1 /* count */, ctx->getErrorPtr());
+    ctx->m_glUniform2i_enc(self, location, x, y);
 }
 
 void GL2Encoder::s_glUniform2iv(void *self , GLint location, GLsizei count, const GLint* v)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glUniform2iv_enc(self, hostLoc, count, v);
+    ctx->m_state->validateUniform(false /* is float? */, false /* is unsigned? */, 2 /* columns */, 1 /* rows */, location, count /* count */, ctx->getErrorPtr());
+    ctx->m_glUniform2iv_enc(self, location, count, v);
 }
 
 void GL2Encoder::s_glUniform3f(void *self , GLint location, GLfloat x, GLfloat y, GLfloat z)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glUniform3f_enc(self, hostLoc, x, y, z);
+    ctx->m_state->validateUniform(true /* is float? */, false /* is unsigned? */, 3 /* columns */, 1 /* rows */, location, 1 /* count */, ctx->getErrorPtr());
+    ctx->m_glUniform3f_enc(self, location, x, y, z);
 }
 
 void GL2Encoder::s_glUniform3fv(void *self , GLint location, GLsizei count, const GLfloat* v)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glUniform3fv_enc(self, hostLoc, count, v);
+    ctx->m_state->validateUniform(true /* is float? */, false /* is unsigned? */, 3 /* columns */, 1 /* rows */, location, count /* count */, ctx->getErrorPtr());
+    ctx->m_glUniform3fv_enc(self, location, count, v);
 }
 
 void GL2Encoder::s_glUniform3i(void *self , GLint location, GLint x, GLint y, GLint z)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glUniform3i_enc(self, hostLoc, x, y, z);
+    ctx->m_state->validateUniform(false /* is float? */, false /* is unsigned? */, 3 /* columns */, 1 /* rows */, location, 1 /* count */, ctx->getErrorPtr());
+    ctx->m_glUniform3i_enc(self, location, x, y, z);
 }
 
 void GL2Encoder::s_glUniform3iv(void *self , GLint location, GLsizei count, const GLint* v)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glUniform3iv_enc(self, hostLoc, count, v);
+    ctx->m_state->validateUniform(false /* is float? */, false /* is unsigned? */, 3 /* columns */, 1 /* rows */, location, count /* count */, ctx->getErrorPtr());
+    ctx->m_glUniform3iv_enc(self, location, count, v);
 }
 
 void GL2Encoder::s_glUniform4f(void *self , GLint location, GLfloat x, GLfloat y, GLfloat z, GLfloat w)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glUniform4f_enc(self, hostLoc, x, y, z, w);
+    ctx->m_state->validateUniform(true /* is float? */, false /* is unsigned? */, 4 /* columns */, 1 /* rows */, location, 1 /* count */, ctx->getErrorPtr());
+    ctx->m_glUniform4f_enc(self, location, x, y, z, w);
 }
 
 void GL2Encoder::s_glUniform4fv(void *self , GLint location, GLsizei count, const GLfloat* v)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glUniform4fv_enc(self, hostLoc, count, v);
+    ctx->m_state->validateUniform(true /* is float? */, false /* is unsigned? */, 4 /* columns */, 1 /* rows */, location, count /* count */, ctx->getErrorPtr());
+    ctx->m_glUniform4fv_enc(self, location, count, v);
 }
 
 void GL2Encoder::s_glUniform4i(void *self , GLint location, GLint x, GLint y, GLint z, GLint w)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glUniform4i_enc(self, hostLoc, x, y, z, w);
+    ctx->m_state->validateUniform(false /* is float? */, false /* is unsigned? */, 4 /* columns */, 1 /* rows */, location, 1 /* count */, ctx->getErrorPtr());
+    ctx->m_glUniform4i_enc(self, location, x, y, z, w);
 }
 
 void GL2Encoder::s_glUniform4iv(void *self , GLint location, GLsizei count, const GLint* v)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glUniform4iv_enc(self, hostLoc, count, v);
+    ctx->m_state->validateUniform(false /* is float? */, false /* is unsigned? */, 4 /* columns */, 1 /* rows */, location, count /* count */, ctx->getErrorPtr());
+    ctx->m_glUniform4iv_enc(self, location, count, v);
 }
 
 void GL2Encoder::s_glUniformMatrix2fv(void *self , GLint location, GLsizei count, GLboolean transpose, const GLfloat* value)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glUniformMatrix2fv_enc(self, hostLoc, count, transpose, value);
+    ctx->m_state->validateUniform(true /* is float? */, false /* is unsigned? */, 2 /* columns */, 2 /* rows */, location, count /* count */, ctx->getErrorPtr());
+    ctx->m_glUniformMatrix2fv_enc(self, location, count, transpose, value);
 }
 
 void GL2Encoder::s_glUniformMatrix3fv(void *self , GLint location, GLsizei count, GLboolean transpose, const GLfloat* value)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glUniformMatrix3fv_enc(self, hostLoc, count, transpose, value);
+    ctx->m_state->validateUniform(true /* is float? */, false /* is unsigned? */, 3 /* columns */, 3 /* rows */, location, count /* count */, ctx->getErrorPtr());
+    ctx->m_glUniformMatrix3fv_enc(self, location, count, transpose, value);
 }
 
 void GL2Encoder::s_glUniformMatrix4fv(void *self , GLint location, GLsizei count, GLboolean transpose, const GLfloat* value)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glUniformMatrix4fv_enc(self, hostLoc, count, transpose, value);
+    ctx->m_state->validateUniform(true /* is float? */, false /* is unsigned? */, 4 /* columns */, 4 /* rows */, location, count /* count */, ctx->getErrorPtr());
+    ctx->m_glUniformMatrix4fv_enc(self, location, count, transpose, value);
 }
 
 void GL2Encoder::s_glActiveTexture(void* self, GLenum texture)
@@ -2185,6 +2411,10 @@
     GLClientState* state = ctx->m_state;
     GLenum err;
 
+    GLint maxCombinedUnits;
+    ctx->glGetIntegerv(ctx, GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS, &maxCombinedUnits);
+
+    SET_ERROR_IF(texture - GL_TEXTURE0 > maxCombinedUnits - 1, GL_INVALID_ENUM);
     SET_ERROR_IF((err = state->setActiveTextureUnit(texture)) != GL_NO_ERROR, err);
 
     ctx->m_glActiveTexture_enc(ctx, texture);
@@ -2241,6 +2471,10 @@
 {
     GL2Encoder* ctx = (GL2Encoder*)self;
 
+    SET_ERROR_IF(!GLESv2Validation::textureTarget(ctx, target), GL_INVALID_ENUM);
+    SET_ERROR_IF(!GLESv2Validation::textureParams(ctx, pname), GL_INVALID_ENUM);
+    if (!params) return;
+
     if (target == GL_TEXTURE_2D || target == GL_TEXTURE_EXTERNAL_OES) {
         ctx->override2DTextureTarget(target);
         ctx->m_glGetTexParameterfv_enc(ctx, GL_TEXTURE_2D, pname, params);
@@ -2255,6 +2489,11 @@
 {
     GL2Encoder* ctx = (GL2Encoder*)self;
 
+    SET_ERROR_IF(!GLESv2Validation::textureTarget(ctx, target), GL_INVALID_ENUM);
+    SET_ERROR_IF(!GLESv2Validation::textureParams(ctx, pname), GL_INVALID_ENUM);
+
+    if (!params) return;
+
     switch (pname) {
     case GL_REQUIRED_TEXTURE_IMAGE_UNITS_OES:
         *params = 1;
@@ -2296,6 +2535,9 @@
     SET_ERROR_IF((target == GL_TEXTURE_EXTERNAL_OES &&
             !isValidTextureExternalParam(pname, (GLenum)param)),
             GL_INVALID_ENUM);
+    SET_ERROR_IF(!GLESv2Validation::textureTarget(ctx, target), GL_INVALID_ENUM);
+    SET_ERROR_IF(!GLESv2Validation::textureParams(ctx, pname), GL_INVALID_ENUM);
+    SET_ERROR_IF(!GLESv2Validation::textureParamValue(ctx, pname, (GLint)param, param, (GLenum)param), GL_INVALID_ENUM);
 
     if (target == GL_TEXTURE_2D || target == GL_TEXTURE_EXTERNAL_OES) {
         ctx->override2DTextureTarget(target);
@@ -2314,6 +2556,11 @@
     SET_ERROR_IF((target == GL_TEXTURE_EXTERNAL_OES &&
             !isValidTextureExternalParam(pname, (GLenum)params[0])),
             GL_INVALID_ENUM);
+    SET_ERROR_IF(!GLESv2Validation::textureTarget(ctx, target), GL_INVALID_ENUM);
+    SET_ERROR_IF(!GLESv2Validation::textureParams(ctx, pname), GL_INVALID_ENUM);
+    SET_ERROR_IF(!params, GL_INVALID_VALUE);
+    GLfloat param = *params;
+    SET_ERROR_IF(!GLESv2Validation::textureParamValue(ctx, pname, (GLint)param, param, (GLenum)param), GL_INVALID_ENUM);
 
     if (target == GL_TEXTURE_2D || target == GL_TEXTURE_EXTERNAL_OES) {
         ctx->override2DTextureTarget(target);
@@ -2332,6 +2579,9 @@
     SET_ERROR_IF((target == GL_TEXTURE_EXTERNAL_OES &&
             !isValidTextureExternalParam(pname, (GLenum)param)),
             GL_INVALID_ENUM);
+    SET_ERROR_IF(!GLESv2Validation::textureTarget(ctx, target), GL_INVALID_ENUM);
+    SET_ERROR_IF(!GLESv2Validation::textureParams(ctx, pname), GL_INVALID_ENUM);
+    SET_ERROR_IF(!GLESv2Validation::textureParamValue(ctx, pname, param, (GLfloat)param, (GLenum)param), GL_INVALID_ENUM);
 
     if (target == GL_TEXTURE_2D || target == GL_TEXTURE_EXTERNAL_OES) {
         ctx->override2DTextureTarget(target);
@@ -2359,6 +2609,9 @@
     SET_ERROR_IF(!GLESv2Validation::textureTarget(ctx, target), GL_INVALID_ENUM);
     SET_ERROR_IF(!GLESv2Validation::pixelType(ctx, type), GL_INVALID_ENUM);
     SET_ERROR_IF(!GLESv2Validation::pixelFormat(ctx, format), GL_INVALID_ENUM);
+    SET_ERROR_IF(!GLESv2Validation::pixelFormat(ctx, internalformat) && !GLESv2Validation::pixelInternalFormat(internalformat), GL_INVALID_VALUE);
+    SET_ERROR_IF(!(GLESv2Validation::pixelOp(format,type)),GL_INVALID_OPERATION);
+    SET_ERROR_IF(!GLESv2Validation::pixelSizedFormat(ctx, internalformat, format, type), GL_INVALID_OPERATION);
     // If unpack buffer is nonzero, verify unmapped state.
     SET_ERROR_IF(ctx->isBufferTargetMapped(GL_PIXEL_UNPACK_BUFFER), GL_INVALID_OPERATION);
 
@@ -2375,6 +2628,7 @@
     SET_ERROR_IF(height > max_texture_size, GL_INVALID_VALUE);
     SET_ERROR_IF(GLESv2Validation::isCubeMapTarget(target) && width > max_cube_map_texture_size, GL_INVALID_VALUE);
     SET_ERROR_IF(GLESv2Validation::isCubeMapTarget(target) && height > max_cube_map_texture_size, GL_INVALID_VALUE);
+    SET_ERROR_IF(GLESv2Validation::isCubeMapTarget(target) && (width != height), GL_INVALID_VALUE);
     SET_ERROR_IF(border != 0, GL_INVALID_VALUE);
     // If unpack buffer is nonzero, verify buffer data fits and is evenly divisible by the type.
     SET_ERROR_IF(ctx->boundBuffer(GL_PIXEL_UNPACK_BUFFER) &&
@@ -2405,7 +2659,8 @@
     state->setBoundTextureInternalFormat(stateTarget, internalformat);
     state->setBoundTextureFormat(stateTarget, format);
     state->setBoundTextureType(stateTarget, type);
-    state->setBoundTextureDims(stateTarget, level, width, height, 1);
+    state->setBoundTextureDims(stateTarget, target, level, width, height, 1);
+    state->addTextureCubeMapImage(stateTarget, target);
 
     if (target == GL_TEXTURE_2D || target == GL_TEXTURE_EXTERNAL_OES) {
         ctx->override2DTextureTarget(target);
@@ -2466,14 +2721,15 @@
     }
 
     // If unpack buffer is nonzero, verify buffer data fits and is evenly divisible by the type.
+
     SET_ERROR_IF(ctx->boundBuffer(GL_PIXEL_UNPACK_BUFFER) &&
                  ctx->getBufferData(GL_PIXEL_UNPACK_BUFFER) &&
-                 (state->pboNeededDataSize(width, height, 1, format, type, 0) >
+                 (state->pboNeededDataSize(width, height, 1, format, type, 0) + (uintptr_t)pixels >
                   ctx->getBufferData(GL_PIXEL_UNPACK_BUFFER)->m_size),
                  GL_INVALID_OPERATION);
     SET_ERROR_IF(ctx->boundBuffer(GL_PIXEL_UNPACK_BUFFER) &&
                  ctx->getBufferData(GL_PIXEL_UNPACK_BUFFER) &&
-                 (ctx->getBufferData(GL_PIXEL_UNPACK_BUFFER)->m_size %
+                 ((uintptr_t)pixels %
                   glSizeof(type)),
                  GL_INVALID_OPERATION);
     SET_ERROR_IF(!ctx->boundBuffer(GL_PIXEL_UNPACK_BUFFER) && !pixels, GL_INVALID_OPERATION);
@@ -2504,6 +2760,35 @@
     GL2Encoder* ctx = (GL2Encoder*)self;
     GLClientState* state = ctx->m_state;
 
+    SET_ERROR_IF(!GLESv2Validation::textureTarget(ctx, target), GL_INVALID_ENUM);
+    SET_ERROR_IF(!GLESv2Validation::pixelFormat(ctx, internalformat) && !GLESv2Validation::pixelInternalFormat(internalformat), GL_INVALID_VALUE);
+    GLint max_texture_size;
+    GLint max_cube_map_texture_size;
+    ctx->glGetIntegerv(ctx, GL_MAX_TEXTURE_SIZE, &max_texture_size);
+    ctx->glGetIntegerv(ctx, GL_MAX_CUBE_MAP_TEXTURE_SIZE, &max_cube_map_texture_size);
+    SET_ERROR_IF(level < 0, GL_INVALID_VALUE);
+    SET_ERROR_IF(level > ilog2(max_texture_size), GL_INVALID_VALUE);
+    SET_ERROR_IF((target == GL_TEXTURE_CUBE_MAP) &&
+                 (level > ilog2(max_cube_map_texture_size)), GL_INVALID_VALUE);
+    SET_ERROR_IF(width < 0 || height < 0, GL_INVALID_VALUE);
+    SET_ERROR_IF(width > max_texture_size, GL_INVALID_VALUE);
+    SET_ERROR_IF(height > max_texture_size, GL_INVALID_VALUE);
+    SET_ERROR_IF(GLESv2Validation::isCubeMapTarget(target) && width > max_cube_map_texture_size, GL_INVALID_VALUE);
+    SET_ERROR_IF(GLESv2Validation::isCubeMapTarget(target) && height > max_cube_map_texture_size, GL_INVALID_VALUE);
+    SET_ERROR_IF(GLESv2Validation::isCubeMapTarget(target) && (width != height), GL_INVALID_VALUE);
+    SET_ERROR_IF(border != 0, GL_INVALID_VALUE);
+
+    GLenum stateTarget = target;
+    if (target == GL_TEXTURE_CUBE_MAP_POSITIVE_X ||
+        target == GL_TEXTURE_CUBE_MAP_POSITIVE_Y ||
+        target == GL_TEXTURE_CUBE_MAP_POSITIVE_Z ||
+        target == GL_TEXTURE_CUBE_MAP_NEGATIVE_X ||
+        target == GL_TEXTURE_CUBE_MAP_NEGATIVE_Y ||
+        target == GL_TEXTURE_CUBE_MAP_NEGATIVE_Z)
+        stateTarget = GL_TEXTURE_CUBE_MAP;
+
+    SET_ERROR_IF(state->isBoundTextureImmutableFormat(target), GL_INVALID_OPERATION);
+
     SET_ERROR_IF(ctx->glCheckFramebufferStatus(ctx, GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE,
                  GL_INVALID_FRAMEBUFFER_OPERATION);
     // This is needed to work around underlying OpenGL drivers
@@ -2514,6 +2799,10 @@
         state->copyTexImageLuminanceCubeMapAMDWorkaround
             (target, level, internalformat);
 
+    state->setBoundTextureInternalFormat(stateTarget, internalformat);
+    state->setBoundTextureDims(stateTarget, target, level, width, height, 1);
+    state->addTextureCubeMapImage(stateTarget, target);
+
     if (extraTarget) {
         ctx->m_glCopyTexImage2D_enc(ctx, extraTarget, level, internalformat,
                                     x, y, width, height, border);
@@ -2521,9 +2810,6 @@
 
     ctx->m_glCopyTexImage2D_enc(ctx, target, level, internalformat,
                                 x, y, width, height, border);
-
-    state->setBoundTextureInternalFormat(target, internalformat);
-    state->setBoundTextureDims(target, level, width, height, 1);
 }
 
 void GL2Encoder::s_glTexParameteriv(void* self,
@@ -2534,6 +2820,11 @@
     SET_ERROR_IF((target == GL_TEXTURE_EXTERNAL_OES &&
             !isValidTextureExternalParam(pname, (GLenum)params[0])),
             GL_INVALID_ENUM);
+    SET_ERROR_IF(!GLESv2Validation::textureTarget(ctx, target), GL_INVALID_ENUM);
+    SET_ERROR_IF(!GLESv2Validation::textureParams(ctx, pname), GL_INVALID_ENUM);
+    SET_ERROR_IF(!params, GL_INVALID_VALUE);
+    GLint param = *params;
+    SET_ERROR_IF(!GLESv2Validation::textureParamValue(ctx, pname, param, (GLfloat)param, (GLenum)param), GL_INVALID_ENUM);
 
     if (target == GL_TEXTURE_2D || target == GL_TEXTURE_EXTERNAL_OES) {
         ctx->override2DTextureTarget(target);
@@ -2573,8 +2864,8 @@
     }
 }
 
-void GL2Encoder::associateEGLImage(GLenum target, GLeglImageOES eglImage) {
-    m_state->setBoundEGLImage(target, eglImage);
+void GL2Encoder::associateEGLImage(GLenum target, GLeglImageOES eglImage, int width, int height) {
+    m_state->setBoundEGLImage(target, eglImage, width, height);
 }
 
 
@@ -2634,7 +2925,7 @@
     for (int i = 0; i < n; i++) {
         state->detachRbo(renderbuffers[i]);
     }
-    // state->removeRenderbuffers(n, renderbuffers);
+    state->removeRenderbuffers(n, renderbuffers);
 }
 
 void GL2Encoder::s_glBindRenderbuffer(void* self,
@@ -2656,12 +2947,19 @@
     GLClientState* state = ctx->m_state;
 
     SET_ERROR_IF(target != GL_RENDERBUFFER, GL_INVALID_ENUM);
+    SET_ERROR_IF(0 == ctx->m_state->boundRenderbuffer(), GL_INVALID_OPERATION);
     SET_ERROR_IF(
         !GLESv2Validation::rboFormat(ctx, internalformat),
         GL_INVALID_ENUM);
 
+    SET_ERROR_IF(width < 0 || height < 0, GL_INVALID_VALUE);
+    GLint max_rb_size;
+    ctx->glGetIntegerv(ctx, GL_MAX_RENDERBUFFER_SIZE, &max_rb_size);
+    SET_ERROR_IF(width > max_rb_size || height > max_rb_size, GL_INVALID_VALUE);
+
     state->setBoundRenderbufferFormat(internalformat);
     state->setBoundRenderbufferSamples(0);
+    state->setBoundRenderbufferDimensions(width, height);
 
     ctx->m_glRenderbufferStorage_enc(self, target, internalformat,
                                      width, height);
@@ -2675,6 +2973,10 @@
 
     SET_ERROR_IF(!GLESv2Validation::framebufferTarget(ctx, target), GL_INVALID_ENUM);
     SET_ERROR_IF(!GLESv2Validation::framebufferAttachment(ctx, attachment), GL_INVALID_ENUM);
+    SET_ERROR_IF(GL_RENDERBUFFER != renderbuffertarget, GL_INVALID_ENUM);
+    SET_ERROR_IF(!state->getBoundFramebuffer(target), GL_INVALID_OPERATION);
+    SET_ERROR_IF(!state->isRenderbufferThatWasBound(renderbuffer), GL_INVALID_OPERATION);
+
     state->attachRbo(target, attachment, renderbuffer);
 
     ctx->m_glFramebufferRenderbuffer_enc(self, target, attachment, renderbuffertarget, renderbuffer);
@@ -2714,6 +3016,14 @@
     ctx->m_glBindFramebuffer_enc(self, target, framebuffer);
 }
 
+void GL2Encoder::s_glFramebufferParameteri(void *self,
+        GLenum target, GLenum pname, GLint param) {
+    GL2Encoder* ctx = (GL2Encoder*)self;
+    GLClientState* state = ctx->m_state;
+    state->setFramebufferParameter(target, pname, param);
+    ctx->m_glFramebufferParameteri_enc(self, target, pname, param);
+}
+
 void GL2Encoder::s_glFramebufferTexture2D(void* self,
         GLenum target, GLenum attachment,
         GLenum textarget, GLuint texture, GLint level) {
@@ -2721,8 +3031,21 @@
     GLClientState* state = ctx->m_state;
 
     SET_ERROR_IF(!GLESv2Validation::framebufferTarget(ctx, target), GL_INVALID_ENUM);
+    SET_ERROR_IF(!GLESv2Validation::textureTarget(ctx, textarget), GL_INVALID_ENUM);
     SET_ERROR_IF(!GLESv2Validation::framebufferAttachment(ctx, attachment), GL_INVALID_ENUM);
-    state->attachTextureObject(target, attachment, texture);
+    SET_ERROR_IF(!state->getBoundFramebuffer(target), GL_INVALID_OPERATION);
+    SET_ERROR_IF(texture && !state->isTexture(texture), GL_INVALID_OPERATION);
+    SET_ERROR_IF(GLESv2Validation::isCubeMapTarget(textarget) && !state->isTextureCubeMap(texture), GL_INVALID_OPERATION);
+    SET_ERROR_IF(!GLESv2Validation::isCubeMapTarget(textarget) && state->isTextureCubeMap(texture), GL_INVALID_OPERATION);
+    SET_ERROR_IF((texture && (level < 0)), GL_INVALID_VALUE);
+
+    if (target == GL_TEXTURE_2D) {
+        SET_ERROR_IF(level > ilog2(ctx->m_state->getMaxTextureSize()), GL_INVALID_VALUE);
+    } else {
+        SET_ERROR_IF(level > ilog2(ctx->m_state->getMaxTextureSizeCubeMap()), GL_INVALID_VALUE);
+    }
+
+    state->attachTextureObject(target, attachment, texture, level, 0);
 
     ctx->m_glFramebufferTexture2D_enc(self, target, attachment, textarget, texture, level);
 }
@@ -2733,7 +3056,7 @@
     GL2Encoder* ctx = (GL2Encoder*)self;
     GLClientState* state = ctx->m_state;
 
-    state->attachTextureObject(target, attachment, texture);
+    state->attachTextureObject(target, attachment, texture, level, zoffset);
 
     ctx->m_glFramebufferTexture3DOES_enc(self, target, attachment, textarget, texture, level, zoffset);
 }
@@ -2743,6 +3066,12 @@
     GL2Encoder* ctx = (GL2Encoder*)self;
     const GLClientState* state = ctx->m_state;
     SET_ERROR_IF(!GLESv2Validation::framebufferTarget(ctx, target), GL_INVALID_ENUM);
+    SET_ERROR_IF(!state->boundFramebuffer(target) &&
+                 attachment != GL_BACK &&
+                 attachment != GL_FRONT &&
+                 attachment != GL_DEPTH &&
+                 attachment != GL_STENCIL,
+                 GL_INVALID_OPERATION);
     SET_ERROR_IF(pname != GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME &&
                  pname != GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE &&
                  !state->attachmentHasObject(target, attachment),
@@ -2755,6 +3084,11 @@
                   FBO_ATTACHMENT_TEXTURE),
                  !state->attachmentHasObject(target, attachment) ?
                  GL_INVALID_OPERATION : GL_INVALID_ENUM);
+    SET_ERROR_IF(
+        (attachment == GL_FRONT ||
+         attachment == GL_BACK) &&
+        (pname == GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME),
+        GL_INVALID_ENUM);
     SET_ERROR_IF(attachment == GL_DEPTH_STENCIL_ATTACHMENT &&
                  pname == GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME &&
                  (state->objectOfAttachment(target, GL_DEPTH_ATTACHMENT) !=
@@ -2762,129 +3096,23 @@
                  GL_INVALID_OPERATION);
     SET_ERROR_IF(state->boundFramebuffer(target) &&
                  (attachment == GL_BACK ||
-                  attachment == GL_FRONT),
+                  attachment == GL_FRONT ||
+                  attachment == GL_DEPTH || 
+                  attachment == GL_STENCIL),
                  GL_INVALID_OPERATION);
     ctx->m_glGetFramebufferAttachmentParameteriv_enc(self, target, attachment, pname, params);
 }
 
-bool GL2Encoder::isCompleteFbo(GLenum target, const GLClientState* state,
-                               GLenum attachment) const {
-    FboFormatInfo fbo_format_info;
-    state->getBoundFramebufferFormat(target, attachment, &fbo_format_info);
-
-    bool res;
-    switch (fbo_format_info.type) {
-    case FBO_ATTACHMENT_RENDERBUFFER:
-        switch (fbo_format_info.rb_format) {
-        case GL_R16F:
-        case GL_RG16F:
-        case GL_RGBA16F:
-        case GL_R32F:
-        case GL_RG32F:
-        case GL_RGBA32F:
-        case GL_R11F_G11F_B10F:
-            res = majorVersion() >= 3 && hasExtension("GL_EXT_color_buffer_float");
-            break;
-        case GL_RGB16F:
-            res = majorVersion() >= 3 && hasExtension("GL_EXT_color_buffer_half_float");
-            break;
-        case GL_STENCIL_INDEX8:
-            if (attachment == GL_STENCIL_ATTACHMENT) {
-                res = true;
-            } else {
-                res = false;
-            }
-            break;
-        default:
-            res = true;
-        }
-        break;
-    case FBO_ATTACHMENT_TEXTURE:
-        switch (fbo_format_info.tex_internalformat) {
-        case GL_R16F:
-        case GL_RG16F:
-        case GL_RGBA16F:
-        case GL_R32F:
-        case GL_RG32F:
-        case GL_RGBA32F:
-        case GL_R11F_G11F_B10F:
-            res = majorVersion() >= 3 && hasExtension("GL_EXT_color_buffer_float");
-            break;
-        case GL_RGB16F:
-            res = majorVersion() >= 3 && hasExtension("GL_EXT_color_buffer_half_float");
-            break;
-        case GL_RED:
-        case GL_RG:
-        case GL_SRGB8:
-        case GL_RGB32UI:
-        case GL_RGB16UI:
-        case GL_RGB8UI:
-        case GL_RGB32I:
-        case GL_RGB16I:
-        case GL_RGB8I:
-        case GL_R8_SNORM:
-        case GL_RG8_SNORM:
-        case GL_RGB8_SNORM:
-        case GL_RGBA8_SNORM:
-            res = false;
-            break;
-        // No float/half-float formats allowed for RGB(A)
-        case GL_RGB:
-        case GL_RGBA:
-            switch (fbo_format_info.tex_type) {
-            case GL_FLOAT:
-            case GL_HALF_FLOAT_OES:
-            case GL_UNSIGNED_INT_10F_11F_11F_REV:
-            case GL_UNSIGNED_INT_2_10_10_10_REV:
-                res = false;
-                break;
-            default:
-                res = true;
-            }
-            break;
-        default:
-            res = true;
-        }
-        break;
-    case FBO_ATTACHMENT_NONE:
-        res = true;
-        break;
-    default:
-        res = true;
-    }
-    return res;
-}
-
-bool GL2Encoder::checkFramebufferCompleteness(GLenum target, const GLClientState* state) const {
-    bool res = true;
-
-    for (int i = 0; i < state->getMaxColorAttachments(); i++) {
-        res = res && isCompleteFbo(target, state, glUtilsColorAttachmentName(i));
-    }
-
-    res = res && isCompleteFbo(target, state, GL_DEPTH_ATTACHMENT);
-    res = res && isCompleteFbo(target, state, GL_STENCIL_ATTACHMENT);
-
-    return res;
-}
-
 GLenum GL2Encoder::s_glCheckFramebufferStatus(void* self, GLenum target) {
     GL2Encoder* ctx = (GL2Encoder*)self;
+
+    RET_AND_SET_ERROR_IF(
+        target != GL_DRAW_FRAMEBUFFER && target != GL_FRAMEBUFFER && target != GL_READ_FRAMEBUFFER,
+        GL_INVALID_ENUM, 0);
+
     GLClientState* state = ctx->m_state;
 
-    bool fboCompleteByCodec =
-        ctx->checkFramebufferCompleteness(target, state);
-
-    if (!fboCompleteByCodec) {
-        state->setCheckFramebufferStatus(target, GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT);
-        return GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT;
-    } else {
-        // double check with underlying opengl to avoid craziness.
-        GLenum host_checkstatus = ctx->m_glCheckFramebufferStatus_enc(self, target);
-        state->setCheckFramebufferStatus(target, host_checkstatus);
-        if (host_checkstatus == GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS) return GL_FRAMEBUFFER_COMPLETE;
-        return host_checkstatus;
-    }
+    return state->checkFramebufferCompleteness(target);
 }
 
 void GL2Encoder::s_glGenVertexArrays(void* self, GLsizei n, GLuint* arrays) {
@@ -2950,11 +3178,17 @@
         ((access & GL_MAP_WRITE_BIT) &&
         (!(access & GL_MAP_INVALIDATE_RANGE_BIT) &&
          !(access & GL_MAP_INVALIDATE_BUFFER_BIT)))) {
+
+        if (ctx->m_state->shouldSkipHostMapBuffer(target))
+            return bits;
+
         ctx->glMapBufferRangeAEMU(
                 ctx, target,
                 offset, length,
                 access,
                 bits);
+
+        ctx->m_state->onHostMappedBuffer(target);
     }
 
     return bits;
@@ -3139,6 +3373,8 @@
     GLClientState* state = ctx->m_state;
 
     SET_ERROR_IF(!GLESv2Validation::textureTarget(ctx, target), GL_INVALID_ENUM);
+    SET_ERROR_IF(target == GL_TEXTURE_CUBE_MAP, GL_INVALID_ENUM);
+    fprintf(stderr, "%s: format: 0x%x\n", __func__, internalformat);
     // Filter compressed formats support.
     SET_ERROR_IF(!GLESv2Validation::supportedCompressedFormat(ctx, internalformat), GL_INVALID_ENUM);
     // Verify level <= log2(GL_MAX_TEXTURE_SIZE).
@@ -3155,14 +3391,13 @@
     // If unpack buffer is nonzero, verify unmapped state.
     SET_ERROR_IF(ctx->isBufferTargetMapped(GL_PIXEL_UNPACK_BUFFER), GL_INVALID_OPERATION);
     SET_ERROR_IF(width < 0 || height < 0, GL_INVALID_VALUE);
+
     // If unpack buffer is nonzero, verify buffer data fits.
     SET_ERROR_IF(ctx->boundBuffer(GL_PIXEL_UNPACK_BUFFER) &&
                  ctx->getBufferData(GL_PIXEL_UNPACK_BUFFER) &&
                  (imageSize > ctx->getBufferData(GL_PIXEL_UNPACK_BUFFER)->m_size),
                  GL_INVALID_OPERATION);
-    // TODO: Fix:
-    // If |imageSize| is inconsistent with compressed dimensions.
-    // SET_ERROR_IF(GLESv2Validation::compressedTexImageSize(internalformat, width, height, 1) != imageSize, GL_INVALID_VALUE);
+    SET_ERROR_IF(!ctx->m_state->compressedTexImageSizeCompatible(internalformat, width, height, 1, imageSize), GL_INVALID_VALUE);
 
     GLenum stateTarget = target;
     if (target == GL_TEXTURE_CUBE_MAP_POSITIVE_X ||
@@ -3173,7 +3408,7 @@
         target == GL_TEXTURE_CUBE_MAP_NEGATIVE_Z)
         stateTarget = GL_TEXTURE_CUBE_MAP;
     state->setBoundTextureInternalFormat(stateTarget, (GLint)internalformat);
-    state->setBoundTextureDims(stateTarget, level, width, height, 1);
+    state->setBoundTextureDims(stateTarget, target, level, width, height, 1);
 
     if (target == GL_TEXTURE_2D || target == GL_TEXTURE_EXTERNAL_OES) {
         ctx->override2DTextureTarget(target);
@@ -3200,13 +3435,28 @@
     GL2Encoder* ctx = (GL2Encoder*)self;
 
     SET_ERROR_IF(!GLESv2Validation::textureTarget(ctx, target), GL_INVALID_ENUM);
+    SET_ERROR_IF(target == GL_TEXTURE_CUBE_MAP, GL_INVALID_ENUM);
     // If unpack buffer is nonzero, verify unmapped state.
     SET_ERROR_IF(ctx->isBufferTargetMapped(GL_PIXEL_UNPACK_BUFFER), GL_INVALID_OPERATION);
+
+    GLenum stateTarget = target;
+    if (target == GL_TEXTURE_CUBE_MAP_POSITIVE_X ||
+        target == GL_TEXTURE_CUBE_MAP_POSITIVE_Y ||
+        target == GL_TEXTURE_CUBE_MAP_POSITIVE_Z ||
+        target == GL_TEXTURE_CUBE_MAP_NEGATIVE_X ||
+        target == GL_TEXTURE_CUBE_MAP_NEGATIVE_Y ||
+        target == GL_TEXTURE_CUBE_MAP_NEGATIVE_Z)
+        stateTarget = GL_TEXTURE_CUBE_MAP;
+    GLuint tex = ctx->m_state->getBoundTexture(stateTarget);
+
+    GLint internalFormat = ctx->m_state->queryTexInternalFormat(tex);
+    SET_ERROR_IF(internalFormat != format, GL_INVALID_OPERATION);
+    SET_ERROR_IF(level < 0, GL_INVALID_VALUE);
+
     GLint max_texture_size;
     GLint max_cube_map_texture_size;
     ctx->glGetIntegerv(ctx, GL_MAX_TEXTURE_SIZE, &max_texture_size);
     ctx->glGetIntegerv(ctx, GL_MAX_CUBE_MAP_TEXTURE_SIZE, &max_cube_map_texture_size);
-    SET_ERROR_IF(level < 0, GL_INVALID_VALUE);
     SET_ERROR_IF(level > ilog2(max_texture_size), GL_INVALID_VALUE);
     SET_ERROR_IF(level > ilog2(max_cube_map_texture_size), GL_INVALID_VALUE);
     SET_ERROR_IF(width < 0 || height < 0, GL_INVALID_VALUE);
@@ -3217,6 +3467,20 @@
                  GL_INVALID_OPERATION);
     SET_ERROR_IF(xoffset < 0 || yoffset < 0, GL_INVALID_VALUE);
 
+    GLint totalWidth = ctx->m_state->queryTexWidth(level, tex);
+    GLint totalHeight = ctx->m_state->queryTexHeight(level, tex);
+
+    if (GLESTextureUtils::isEtc2Format(internalFormat)) {
+        SET_ERROR_IF((width % 4) && (totalWidth != xoffset + width), GL_INVALID_OPERATION);
+        SET_ERROR_IF((height % 4) && (totalHeight != yoffset + height), GL_INVALID_OPERATION);
+        SET_ERROR_IF((xoffset % 4) || (yoffset % 4), GL_INVALID_OPERATION);
+    }
+
+    SET_ERROR_IF(totalWidth < xoffset + width, GL_INVALID_VALUE);
+    SET_ERROR_IF(totalHeight < yoffset + height, GL_INVALID_VALUE);
+
+    SET_ERROR_IF(!ctx->m_state->compressedTexImageSizeCompatible(internalFormat, width, height, 1, imageSize), GL_INVALID_VALUE);
+
     if (target == GL_TEXTURE_2D || target == GL_TEXTURE_EXTERNAL_OES) {
         ctx->override2DTextureTarget(target);
     }
@@ -3265,11 +3529,15 @@
                  GL_INVALID_VALUE);
 
     GLint ssbo_offset_align, ubo_offset_align;
-    ctx->s_glGetIntegerv(ctx, GL_SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT, &ssbo_offset_align);
+
+    if (ctx->majorVersion() >= 3 && ctx->minorVersion() >= 1) {
+        ctx->s_glGetIntegerv(ctx, GL_SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT, &ssbo_offset_align);
+        SET_ERROR_IF(target == GL_SHADER_STORAGE_BUFFER &&
+                     offset % ssbo_offset_align,
+                     GL_INVALID_VALUE);
+    }
+
     ctx->s_glGetIntegerv(ctx, GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT, &ubo_offset_align);
-    SET_ERROR_IF(target == GL_SHADER_STORAGE_BUFFER &&
-                 offset % ssbo_offset_align,
-                 GL_INVALID_VALUE);
     SET_ERROR_IF(target == GL_UNIFORM_BUFFER &&
                  offset % ubo_offset_align,
                  GL_INVALID_VALUE);
@@ -3345,22 +3613,33 @@
                   writetarget == GL_DISPATCH_INDIRECT_BUFFER ||
                   writetarget == GL_DRAW_INDIRECT_BUFFER ||
                   writetarget == GL_SHADER_STORAGE_BUFFER), GL_INVALID_ENUM);
-    SET_ERROR_IF(!ctx->boundBuffer(readtarget), GL_INVALID_OPERATION);
-    SET_ERROR_IF(!ctx->boundBuffer(writetarget), GL_INVALID_OPERATION);
+
+    GLuint readBufferId = ctx->boundBuffer(readtarget);
+    GLuint writeBufferId = ctx->boundBuffer(writetarget);
+
+    SET_ERROR_IF(!readBufferId || !writeBufferId, GL_INVALID_OPERATION);
+
     SET_ERROR_IF(ctx->isBufferTargetMapped(readtarget), GL_INVALID_OPERATION);
     SET_ERROR_IF(ctx->isBufferTargetMapped(writetarget), GL_INVALID_OPERATION);
+
     SET_ERROR_IF(readoffset < 0, GL_INVALID_VALUE);
     SET_ERROR_IF(writeoffset < 0, GL_INVALID_VALUE);
     SET_ERROR_IF(size < 0, GL_INVALID_VALUE);
+
+    BufferData* readBufferData = ctx->getBufferData(readtarget);
+    BufferData* writeBufferData = ctx->getBufferData(writetarget);
+
     SET_ERROR_IF(
-        ctx->getBufferData(readtarget) &&
-        (readoffset + size > ctx->getBufferData(readtarget)->m_size),
+        readBufferData &&
+        (readoffset + size > readBufferData->m_size),
         GL_INVALID_VALUE);
+
     SET_ERROR_IF(
-        ctx->getBufferData(writetarget) &&
-        (writeoffset + size > ctx->getBufferData(writetarget)->m_size),
+        writeBufferData &&
+        (writeoffset + size > writeBufferData->m_size),
         GL_INVALID_VALUE);
-    SET_ERROR_IF(readtarget == writetarget &&
+
+    SET_ERROR_IF(readBufferId == writeBufferId &&
                  !((writeoffset >= readoffset + size) ||
                    (readoffset >= writeoffset + size)),
                  GL_INVALID_VALUE);
@@ -3522,6 +3801,8 @@
 void GL2Encoder::s_glGetUniformIndices(void* self, GLuint program, GLsizei uniformCount, const GLchar ** uniformNames, GLuint* uniformIndices) {
     GL2Encoder* ctx = (GL2Encoder*)self;
 
+    VALIDATE_PROGRAM_NAME(program);
+
     if (!uniformCount) return;
 
     GLint err = GL_NO_ERROR;
@@ -3546,8 +3827,8 @@
     GLClientState* state = ctx->m_state;
     GLSharedGroupPtr shared = ctx->m_shared;
 
-    GLint hostLoc = location;
-    ctx->m_glUniform1ui_enc(self, hostLoc, v0);
+    ctx->m_state->validateUniform(false /* is float? */, true /* is unsigned? */, 1 /* columns */, 1 /* rows */, location, 1 /* count */, ctx->getErrorPtr());
+    ctx->m_glUniform1ui_enc(self, location, v0);
 
     GLenum target;
     if (shared->setSamplerUniform(state->currentShaderProgram(), location, v0, &target)) {
@@ -3561,80 +3842,80 @@
 
 void GL2Encoder::s_glUniform2ui(void* self, GLint location, GLuint v0, GLuint v1) {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glUniform2ui_enc(self, hostLoc, v0, v1);
+    ctx->m_state->validateUniform(false /* is float? */, true /* is unsigned? */, 2 /* columns */, 1 /* rows */, location, 1 /* count */, ctx->getErrorPtr());
+    ctx->m_glUniform2ui_enc(self, location, v0, v1);
 }
 
 void GL2Encoder::s_glUniform3ui(void* self, GLint location, GLuint v0, GLuint v1, GLuint v2) {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glUniform3ui_enc(self, hostLoc, v0, v1, v2);
+    ctx->m_state->validateUniform(false /* is float? */, true /* is unsigned? */, 3 /* columns */, 1 /* rows */, location, 1 /* count */, ctx->getErrorPtr());
+    ctx->m_glUniform3ui_enc(self, location, v0, v1, v2);
 }
 
 void GL2Encoder::s_glUniform4ui(void* self, GLint location, GLint v0, GLuint v1, GLuint v2, GLuint v3) {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glUniform4ui_enc(self, hostLoc, v0, v1, v2, v3);
+    ctx->m_state->validateUniform(false /* is float? */, true /* is unsigned? */, 4 /* columns */, 1 /* rows */, location, 1 /* count */, ctx->getErrorPtr());
+    ctx->m_glUniform4ui_enc(self, location, v0, v1, v2, v3);
 }
 
 void GL2Encoder::s_glUniform1uiv(void* self, GLint location, GLsizei count, const GLuint *value) {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glUniform1uiv_enc(self, hostLoc, count, value);
+    ctx->m_state->validateUniform(false /* is float? */, true /* is unsigned? */, 1 /* columns */, 1 /* rows */, location, count /* count */, ctx->getErrorPtr());
+    ctx->m_glUniform1uiv_enc(self, location, count, value);
 }
 
 void GL2Encoder::s_glUniform2uiv(void* self, GLint location, GLsizei count, const GLuint *value) {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glUniform2uiv_enc(self, hostLoc, count, value);
+    ctx->m_state->validateUniform(false /* is float? */, true /* is unsigned? */, 2 /* columns */, 1 /* rows */, location, count /* count */, ctx->getErrorPtr());
+    ctx->m_glUniform2uiv_enc(self, location, count, value);
 }
 
 void GL2Encoder::s_glUniform3uiv(void* self, GLint location, GLsizei count, const GLuint *value) {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glUniform3uiv_enc(self, hostLoc, count, value);
+    ctx->m_state->validateUniform(false /* is float? */, true /* is unsigned? */, 3 /* columns */, 1 /* rows */, location, count /* count */, ctx->getErrorPtr());
+    ctx->m_glUniform3uiv_enc(self, location, count, value);
 }
 
 void GL2Encoder::s_glUniform4uiv(void* self, GLint location, GLsizei count, const GLuint *value) {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glUniform4uiv_enc(self, hostLoc, count, value);
+    ctx->m_state->validateUniform(false /* is float? */, true /* is unsigned? */, 4 /* columns */, 1 /* rows */, location, count /* count */, ctx->getErrorPtr());
+    ctx->m_glUniform4uiv_enc(self, location, count, value);
 }
 
 void GL2Encoder::s_glUniformMatrix2x3fv(void* self, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value) {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glUniformMatrix2x3fv_enc(self, hostLoc, count, transpose, value);
+    ctx->m_state->validateUniform(true /* is float? */, false /* is unsigned? */, 2 /* columns */, 3 /* rows */, location, count /* count */, ctx->getErrorPtr());
+    ctx->m_glUniformMatrix2x3fv_enc(self, location, count, transpose, value);
 }
 
 void GL2Encoder::s_glUniformMatrix3x2fv(void* self, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value) {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glUniformMatrix3x2fv_enc(self, hostLoc, count, transpose, value);
+    ctx->m_state->validateUniform(true /* is float? */, false /* is unsigned? */, 3 /* columns */, 2 /* rows */, location, count /* count */, ctx->getErrorPtr());
+    ctx->m_glUniformMatrix3x2fv_enc(self, location, count, transpose, value);
 }
 
 void GL2Encoder::s_glUniformMatrix2x4fv(void* self, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value) {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glUniformMatrix2x4fv_enc(self, hostLoc, count, transpose, value);
+    ctx->m_state->validateUniform(true /* is float? */, false /* is unsigned? */, 2 /* columns */, 4 /* rows */, location, count /* count */, ctx->getErrorPtr());
+    ctx->m_glUniformMatrix2x4fv_enc(self, location, count, transpose, value);
 }
 
 void GL2Encoder::s_glUniformMatrix4x2fv(void* self, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value) {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glUniformMatrix4x2fv_enc(self, hostLoc, count, transpose, value);
+    ctx->m_state->validateUniform(true /* is float? */, false /* is unsigned? */, 4 /* columns */, 2 /* rows */, location, count /* count */, ctx->getErrorPtr());
+    ctx->m_glUniformMatrix4x2fv_enc(self, location, count, transpose, value);
 }
 
 void GL2Encoder::s_glUniformMatrix3x4fv(void* self, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value) {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glUniformMatrix3x4fv_enc(self, hostLoc, count, transpose, value);
+    ctx->m_state->validateUniform(true /* is float? */, false /* is unsigned? */, 3 /* columns */, 4 /* rows */, location, count /* count */, ctx->getErrorPtr());
+    ctx->m_glUniformMatrix3x4fv_enc(self, location, count, transpose, value);
 }
 
 void GL2Encoder::s_glUniformMatrix4x3fv(void* self, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value) {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glUniformMatrix4x3fv_enc(self, hostLoc, count, transpose, value);
+    ctx->m_state->validateUniform(true /* is float? */, false /* is unsigned? */, 4 /* columns */, 3 /* rows */, location, count /* count */, ctx->getErrorPtr());
+    ctx->m_glUniformMatrix4x3fv_enc(self, location, count, transpose, value);
 }
 
 void GL2Encoder::s_glGetUniformuiv(void* self, GLuint program, GLint location, GLuint* params) {
@@ -3642,14 +3923,18 @@
     SET_ERROR_IF(!ctx->m_shared->isShaderOrProgramObject(program), GL_INVALID_VALUE);
     SET_ERROR_IF(!ctx->m_shared->isProgram(program), GL_INVALID_OPERATION);
     SET_ERROR_IF(!ctx->m_shared->isProgramInitialized(program), GL_INVALID_OPERATION);
-    GLint hostLoc = location;
-    SET_ERROR_IF(ctx->m_shared->getProgramUniformType(program,hostLoc)==0, GL_INVALID_OPERATION);
-    ctx->m_glGetUniformuiv_enc(self, program, hostLoc, params);
+    SET_ERROR_IF(ctx->m_shared->getProgramUniformType(program,location)==0, GL_INVALID_OPERATION);
+    SET_ERROR_IF(!ctx->m_shared->isProgramUniformLocationValid(program,location), GL_INVALID_OPERATION);
+    ctx->m_glGetUniformuiv_enc(self, program, location, params);
 }
 
 void GL2Encoder::s_glGetActiveUniformBlockiv(void* self, GLuint program, GLuint uniformBlockIndex, GLenum pname, GLint* params) {
     GL2Encoder* ctx = (GL2Encoder*)self;
 
+    VALIDATE_PROGRAM_NAME(program);
+    SET_ERROR_IF(!GLESv2Validation::allowedGetActiveUniformBlock(pname), GL_INVALID_ENUM);
+    SET_ERROR_IF(uniformBlockIndex >= ctx->m_shared->getActiveUniformBlockCount(program), GL_INVALID_VALUE);
+
     // refresh client state's # active uniforms in this block
     if (pname == GL_UNIFORM_BLOCK_ACTIVE_UNIFORM_INDICES) {
         // TODO if worth it: cache uniform count and other params,
@@ -3670,10 +3955,8 @@
 
 void GL2Encoder::s_glGetVertexAttribIiv(void* self, GLuint index, GLenum pname, GLint* params) {
     GL2Encoder *ctx = (GL2Encoder *)self;
-    assert(ctx->m_state);
-    GLint maxIndex;
-    ctx->glGetIntegerv(self, GL_MAX_VERTEX_ATTRIBS, &maxIndex);
-    SET_ERROR_IF(!(index < maxIndex), GL_INVALID_VALUE);
+    VALIDATE_VERTEX_ATTRIB_INDEX(index);
+    SET_ERROR_IF(!GLESv2Validation::allowedGetVertexAttrib(pname), GL_INVALID_ENUM);
 
     if (!ctx->m_state->getVertexAttribParameter<GLint>(index, pname, params)) {
         ctx->m_glGetVertexAttribIiv_enc(self, index, pname, params);
@@ -3682,10 +3965,8 @@
 
 void GL2Encoder::s_glGetVertexAttribIuiv(void* self, GLuint index, GLenum pname, GLuint* params) {
     GL2Encoder *ctx = (GL2Encoder *)self;
-    assert(ctx->m_state);
-    GLint maxIndex;
-    ctx->glGetIntegerv(self, GL_MAX_VERTEX_ATTRIBS, &maxIndex);
-    SET_ERROR_IF(!(index < maxIndex), GL_INVALID_VALUE);
+    VALIDATE_VERTEX_ATTRIB_INDEX(index);
+    SET_ERROR_IF(!GLESv2Validation::allowedGetVertexAttrib(pname), GL_INVALID_ENUM);
 
     if (!ctx->m_state->getVertexAttribParameter<GLuint>(index, pname, params)) {
         ctx->m_glGetVertexAttribIuiv_enc(self, index, pname, params);
@@ -3741,12 +4022,18 @@
     SET_ERROR_IF(target != GL_RENDERBUFFER, GL_INVALID_ENUM);
     SET_ERROR_IF(!GLESv2Validation::rboFormat(ctx, internalformat), GL_INVALID_ENUM);
 
+    SET_ERROR_IF(width < 0 || height < 0, GL_INVALID_VALUE);
+    GLint max_rb_size;
+    ctx->glGetIntegerv(ctx, GL_MAX_RENDERBUFFER_SIZE, &max_rb_size);
+    SET_ERROR_IF(width > max_rb_size || height > max_rb_size, GL_INVALID_VALUE);
+
     GLint max_samples;
     ctx->s_glGetInternalformativ(ctx, target, internalformat, GL_SAMPLES, 1, &max_samples);
     SET_ERROR_IF(samples > max_samples, GL_INVALID_OPERATION);
 
     state->setBoundRenderbufferFormat(internalformat);
     state->setBoundRenderbufferSamples(samples);
+    state->setBoundRenderbufferDimensions(width, height);
     ctx->m_glRenderbufferStorageMultisample_enc(
             self, target, samples, internalformat, width, height);
 }
@@ -3818,11 +4105,16 @@
 
     SET_ERROR_IF(!GLESv2Validation::framebufferTarget(ctx, target), GL_INVALID_ENUM);
     SET_ERROR_IF(!GLESv2Validation::framebufferAttachment(ctx, attachment), GL_INVALID_ENUM);
+    SET_ERROR_IF(texture != 0 && layer < 0, GL_INVALID_VALUE);
+    GLint maxArrayTextureLayers;
+    ctx->glGetIntegerv(ctx, GL_MAX_ARRAY_TEXTURE_LAYERS, &maxArrayTextureLayers);
+    SET_ERROR_IF(texture != 0 && layer > maxArrayTextureLayers - 1, GL_INVALID_VALUE);
+    SET_ERROR_IF(!ctx->m_state->boundFramebuffer(target), GL_INVALID_OPERATION);
     GLenum lastBoundTarget = state->queryTexLastBoundTarget(texture);
     SET_ERROR_IF(lastBoundTarget != GL_TEXTURE_2D_ARRAY &&
                  lastBoundTarget != GL_TEXTURE_3D,
                  GL_INVALID_OPERATION);
-    state->attachTextureObject(target, attachment, texture);
+    state->attachTextureObject(target, attachment, texture, level, layer);
 
     GLint max3DTextureSize;
     ctx->glGetIntegerv(ctx, GL_MAX_3D_TEXTURE_SIZE, &max3DTextureSize);
@@ -3849,7 +4141,7 @@
     SET_ERROR_IF(state->isBoundTextureImmutableFormat(target), GL_INVALID_OPERATION);
 
     state->setBoundTextureInternalFormat(target, internalformat);
-    state->setBoundTextureDims(target, -1, width, height, 1);
+    state->setBoundTextureDims(target, -1 /* set all cube dimensions */, -1, width, height, 1);
     state->setBoundTextureImmutableFormat(target);
 
     if (target == GL_TEXTURE_2D) {
@@ -3880,6 +4172,11 @@
         bufferMode != GL_SEPARATE_ATTRIBS,
         GL_INVALID_ENUM);
 
+    // NOTE: This only has an effect on the program that is being linked.
+    // The dEQP test in dEQP-GLES3.functional.negative_api doesn't know
+    // about this.
+    ctx->m_state->setTransformFeedbackVaryingsCountForLinking(count);
+
     if (!count) return;
 
     GLint err = GL_NO_ERROR;
@@ -3892,29 +4189,51 @@
 void GL2Encoder::s_glBeginTransformFeedback(void* self, GLenum primitiveMode) {
     GL2Encoder* ctx = (GL2Encoder*)self;
     GLClientState* state = ctx->m_state;
+    SET_ERROR_IF(
+        primitiveMode != GL_POINTS &&
+        primitiveMode != GL_LINES &&
+        primitiveMode != GL_TRIANGLES,
+        GL_INVALID_ENUM);
+    SET_ERROR_IF(
+        ctx->m_state->getTransformFeedbackActive(),
+        GL_INVALID_OPERATION);
+    // TODO:
+    // dEQP-GLES3.functional.lifetime.attach.deleted_output.buffer_transform_feedback
+    // SET_ERROR_IF(
+    //     !ctx->boundBuffer(GL_TRANSFORM_FEEDBACK_BUFFER),
+    //     GL_INVALID_OPERATION);
+    SET_ERROR_IF(
+        !ctx->m_state->currentProgram(), GL_INVALID_OPERATION);
     ctx->m_glBeginTransformFeedback_enc(ctx, primitiveMode);
-    state->setTransformFeedbackActiveUnpaused(true);
+    state->setTransformFeedbackActive(true);
+    state->setTransformFeedbackUnpaused(true);
 }
 
 void GL2Encoder::s_glEndTransformFeedback(void* self) {
     GL2Encoder* ctx = (GL2Encoder*)self;
     GLClientState* state = ctx->m_state;
+    SET_ERROR_IF(!state->getTransformFeedbackActive(), GL_INVALID_OPERATION);
     ctx->m_glEndTransformFeedback_enc(ctx);
-    state->setTransformFeedbackActiveUnpaused(false);
+    state->setTransformFeedbackActive(false);
+    state->setTransformFeedbackUnpaused(false);
 }
 
 void GL2Encoder::s_glPauseTransformFeedback(void* self) {
     GL2Encoder* ctx = (GL2Encoder*)self;
     GLClientState* state = ctx->m_state;
+    SET_ERROR_IF(!state->getTransformFeedbackActive(), GL_INVALID_OPERATION);
+    SET_ERROR_IF(!state->getTransformFeedbackUnpaused(), GL_INVALID_OPERATION);
     ctx->m_glPauseTransformFeedback_enc(ctx);
-    state->setTransformFeedbackActiveUnpaused(false);
+    state->setTransformFeedbackUnpaused(false);
 }
 
 void GL2Encoder::s_glResumeTransformFeedback(void* self) {
     GL2Encoder* ctx = (GL2Encoder*)self;
     GLClientState* state = ctx->m_state;
+    SET_ERROR_IF(!state->getTransformFeedbackActive(), GL_INVALID_OPERATION);
+    SET_ERROR_IF(state->getTransformFeedbackUnpaused(), GL_INVALID_OPERATION);
     ctx->m_glResumeTransformFeedback_enc(ctx);
-    state->setTransformFeedbackActiveUnpaused(true);
+    state->setTransformFeedbackUnpaused(true);
 }
 
 void GL2Encoder::s_glTexImage3D(void* self, GLenum target, GLint level, GLint internalFormat,
@@ -3928,6 +4247,11 @@
                  GL_INVALID_ENUM);
     SET_ERROR_IF(!GLESv2Validation::pixelType(ctx, type), GL_INVALID_ENUM);
     SET_ERROR_IF(!GLESv2Validation::pixelFormat(ctx, format), GL_INVALID_ENUM);
+    SET_ERROR_IF(!(GLESv2Validation::pixelOp(format,type)),GL_INVALID_OPERATION);
+    SET_ERROR_IF(!GLESv2Validation::pixelSizedFormat(ctx, internalFormat, format, type), GL_INVALID_OPERATION);
+    SET_ERROR_IF(target == GL_TEXTURE_3D &&
+        ((format == GL_DEPTH_COMPONENT) ||
+         (format == GL_DEPTH_STENCIL)), GL_INVALID_OPERATION);
 
     // If unpack buffer is nonzero, verify unmapped state.
     SET_ERROR_IF(ctx->isBufferTargetMapped(GL_PIXEL_UNPACK_BUFFER), GL_INVALID_OPERATION);
@@ -3943,7 +4267,13 @@
     SET_ERROR_IF(width < 0 || height < 0 || depth < 0, GL_INVALID_VALUE);
     SET_ERROR_IF(width > max_texture_size, GL_INVALID_VALUE);
     SET_ERROR_IF(height > max_texture_size, GL_INVALID_VALUE);
-    SET_ERROR_IF(depth > max_texture_size, GL_INVALID_VALUE);
+    if (target == GL_TEXTURE_3D) {
+        SET_ERROR_IF(depth > max_texture_size, GL_INVALID_VALUE);
+    } else {
+        GLint maxArrayTextureLayers;
+        ctx->glGetIntegerv(ctx, GL_MAX_ARRAY_TEXTURE_LAYERS, &maxArrayTextureLayers);
+        SET_ERROR_IF(depth > maxArrayTextureLayers, GL_INVALID_VALUE);
+    }
     SET_ERROR_IF(width > max_3d_texture_size, GL_INVALID_VALUE);
     SET_ERROR_IF(height > max_3d_texture_size, GL_INVALID_VALUE);
     SET_ERROR_IF(depth > max_3d_texture_size, GL_INVALID_VALUE);
@@ -3951,12 +4281,12 @@
     // If unpack buffer is nonzero, verify buffer data fits and is evenly divisible by the type.
     SET_ERROR_IF(ctx->boundBuffer(GL_PIXEL_UNPACK_BUFFER) &&
                  ctx->getBufferData(GL_PIXEL_UNPACK_BUFFER) &&
-                 (ctx->m_state->pboNeededDataSize(width, height, depth, format, type, 0) >
+                 ((uintptr_t)data + ctx->m_state->pboNeededDataSize(width, height, depth, format, type, 0) >
                   ctx->getBufferData(GL_PIXEL_UNPACK_BUFFER)->m_size),
                  GL_INVALID_OPERATION);
     SET_ERROR_IF(ctx->boundBuffer(GL_PIXEL_UNPACK_BUFFER) &&
                  ctx->getBufferData(GL_PIXEL_UNPACK_BUFFER) &&
-                 (ctx->getBufferData(GL_PIXEL_UNPACK_BUFFER)->m_size %
+                 ((uintptr_t)data %
                   glSizeof(type)),
                  GL_INVALID_OPERATION);
     SET_ERROR_IF(state->isBoundTextureImmutableFormat(target), GL_INVALID_OPERATION);
@@ -3964,7 +4294,7 @@
     state->setBoundTextureInternalFormat(target, internalFormat);
     state->setBoundTextureFormat(target, format);
     state->setBoundTextureType(target, type);
-    state->setBoundTextureDims(target, level, width, height, depth);
+    state->setBoundTextureDims(target, target, level, width, height, depth);
 
     if (ctx->boundBuffer(GL_PIXEL_UNPACK_BUFFER)) {
         ctx->glTexImage3DOffsetAEMU(
@@ -4012,13 +4342,12 @@
     // If unpack buffer is nonzero, verify buffer data fits and is evenly divisible by the type.
     SET_ERROR_IF(ctx->boundBuffer(GL_PIXEL_UNPACK_BUFFER) &&
                  ctx->getBufferData(GL_PIXEL_UNPACK_BUFFER) &&
-                 (ctx->m_state->pboNeededDataSize(width, height, depth, format, type, 0) >
+                 ((uintptr_t)data + ctx->m_state->pboNeededDataSize(width, height, depth, format, type, 0) >
                   ctx->getBufferData(GL_PIXEL_UNPACK_BUFFER)->m_size),
                  GL_INVALID_OPERATION);
     SET_ERROR_IF(ctx->boundBuffer(GL_PIXEL_UNPACK_BUFFER) &&
                  ctx->getBufferData(GL_PIXEL_UNPACK_BUFFER) &&
-                 (ctx->getBufferData(GL_PIXEL_UNPACK_BUFFER)->m_size %
-                  glSizeof(type)),
+                 ((uintptr_t)data % glSizeof(type)),
                  GL_INVALID_OPERATION);
     SET_ERROR_IF(!ctx->boundBuffer(GL_PIXEL_UNPACK_BUFFER) && !data, GL_INVALID_OPERATION);
     SET_ERROR_IF(xoffset < 0 || yoffset < 0 || zoffset < 0, GL_INVALID_VALUE);
@@ -4042,22 +4371,48 @@
     GL2Encoder* ctx = (GL2Encoder*)self;
     GLClientState* state = ctx->m_state;
 
+    SET_ERROR_IF(target != GL_TEXTURE_3D &&
+                 target != GL_TEXTURE_2D_ARRAY,
+                 GL_INVALID_ENUM);
     // Filter compressed formats support.
     SET_ERROR_IF(!GLESv2Validation::supportedCompressedFormat(ctx, internalformat), GL_INVALID_ENUM);
+    SET_ERROR_IF(target == GL_TEXTURE_CUBE_MAP, GL_INVALID_ENUM);
     // If unpack buffer is nonzero, verify unmapped state.
     SET_ERROR_IF(ctx->isBufferTargetMapped(GL_PIXEL_UNPACK_BUFFER), GL_INVALID_OPERATION);
     SET_ERROR_IF(width < 0 || height < 0 || depth < 0, GL_INVALID_VALUE);
     SET_ERROR_IF(border, GL_INVALID_VALUE);
+
+    GLint max_texture_size;
+    GLint max_3d_texture_size;
+    ctx->glGetIntegerv(ctx, GL_MAX_TEXTURE_SIZE, &max_texture_size);
+    ctx->glGetIntegerv(ctx, GL_MAX_3D_TEXTURE_SIZE, &max_3d_texture_size);
+    SET_ERROR_IF(level < 0, GL_INVALID_VALUE);
+    SET_ERROR_IF(level > ilog2(max_texture_size), GL_INVALID_VALUE);
+    SET_ERROR_IF(level > ilog2(max_3d_texture_size), GL_INVALID_VALUE);
+
+    SET_ERROR_IF(width < 0 || height < 0 || depth < 0, GL_INVALID_VALUE);
+    SET_ERROR_IF(width > max_texture_size, GL_INVALID_VALUE);
+    SET_ERROR_IF(height > max_texture_size, GL_INVALID_VALUE);
+    if (target == GL_TEXTURE_3D) {
+        SET_ERROR_IF(depth > max_texture_size, GL_INVALID_VALUE);
+    } else {
+        GLint maxArrayTextureLayers;
+        ctx->glGetIntegerv(ctx, GL_MAX_ARRAY_TEXTURE_LAYERS, &maxArrayTextureLayers);
+        SET_ERROR_IF(depth > maxArrayTextureLayers, GL_INVALID_VALUE);
+    }
+    SET_ERROR_IF(width > max_3d_texture_size, GL_INVALID_VALUE);
+    SET_ERROR_IF(height > max_3d_texture_size, GL_INVALID_VALUE);
+    SET_ERROR_IF(depth > max_3d_texture_size, GL_INVALID_VALUE);
+    SET_ERROR_IF(GLESTextureUtils::isAstcFormat(internalformat) && GL_TEXTURE_3D == target, GL_INVALID_OPERATION);
+
     // If unpack buffer is nonzero, verify buffer data fits.
     SET_ERROR_IF(ctx->boundBuffer(GL_PIXEL_UNPACK_BUFFER) &&
                  ctx->getBufferData(GL_PIXEL_UNPACK_BUFFER) &&
                  (imageSize > ctx->getBufferData(GL_PIXEL_UNPACK_BUFFER)->m_size),
                  GL_INVALID_OPERATION);
-    // TODO: Fix:
-    // If |imageSize| is too small for compressed dimensions.
-    // SET_ERROR_IF(GLESv2Validation::compressedTexImageSize(internalformat, width, height, depth) > imageSize, GL_INVALID_VALUE);
+    SET_ERROR_IF(!ctx->m_state->compressedTexImageSizeCompatible(internalformat, width, height, depth, imageSize), GL_INVALID_VALUE);
     state->setBoundTextureInternalFormat(target, (GLint)internalformat);
-    state->setBoundTextureDims(target, level, width, height, depth);
+    state->setBoundTextureDims(target, target, level, width, height, depth);
 
     if (ctx->boundBuffer(GL_PIXEL_UNPACK_BUFFER)) {
         ctx->glCompressedTexImage3DOffsetAEMU(
@@ -4076,6 +4431,7 @@
     GL2Encoder* ctx = (GL2Encoder*)self;
 
     SET_ERROR_IF(!GLESv2Validation::textureTarget(ctx, target), GL_INVALID_ENUM);
+    SET_ERROR_IF(target == GL_TEXTURE_CUBE_MAP, GL_INVALID_ENUM);
     // If unpack buffer is nonzero, verify unmapped state.
     SET_ERROR_IF(ctx->isBufferTargetMapped(GL_PIXEL_UNPACK_BUFFER), GL_INVALID_OPERATION);
     SET_ERROR_IF(width < 0 || height < 0 || depth < 0, GL_INVALID_VALUE);
@@ -4086,6 +4442,52 @@
                  GL_INVALID_OPERATION);
     SET_ERROR_IF(xoffset < 0 || yoffset < 0 || zoffset < 0, GL_INVALID_VALUE);
 
+    GLint max_texture_size;
+    GLint max_3d_texture_size;
+    ctx->glGetIntegerv(ctx, GL_MAX_TEXTURE_SIZE, &max_texture_size);
+    ctx->glGetIntegerv(ctx, GL_MAX_3D_TEXTURE_SIZE, &max_3d_texture_size);
+    SET_ERROR_IF(level < 0, GL_INVALID_VALUE);
+    SET_ERROR_IF(level > ilog2(max_texture_size), GL_INVALID_VALUE);
+    SET_ERROR_IF(level > ilog2(max_3d_texture_size), GL_INVALID_VALUE);
+    SET_ERROR_IF(width < 0 || height < 0 || depth < 0, GL_INVALID_VALUE);
+    SET_ERROR_IF(xoffset < 0 || yoffset < 0 || zoffset < 0, GL_INVALID_VALUE);
+    GLenum stateTarget = target;
+    if (target == GL_TEXTURE_CUBE_MAP_POSITIVE_X ||
+        target == GL_TEXTURE_CUBE_MAP_POSITIVE_Y ||
+        target == GL_TEXTURE_CUBE_MAP_POSITIVE_Z ||
+        target == GL_TEXTURE_CUBE_MAP_NEGATIVE_X ||
+        target == GL_TEXTURE_CUBE_MAP_NEGATIVE_Y ||
+        target == GL_TEXTURE_CUBE_MAP_NEGATIVE_Z)
+        stateTarget = GL_TEXTURE_CUBE_MAP;
+
+    GLuint tex = ctx->m_state->getBoundTexture(stateTarget);
+    GLsizei neededWidth = xoffset + width;
+    GLsizei neededHeight = yoffset + height;
+    GLsizei neededDepth = zoffset + depth;
+
+    SET_ERROR_IF(tex &&
+                 (neededWidth > ctx->m_state->queryTexWidth(level, tex) ||
+                  neededHeight > ctx->m_state->queryTexHeight(level, tex) ||
+                  neededDepth > ctx->m_state->queryTexDepth(level, tex)),
+                 GL_INVALID_VALUE);
+
+    GLint internalFormat = ctx->m_state->queryTexInternalFormat(tex);
+    SET_ERROR_IF(internalFormat != format, GL_INVALID_OPERATION);
+
+    GLint totalWidth = ctx->m_state->queryTexWidth(level, tex);
+    GLint totalHeight = ctx->m_state->queryTexHeight(level, tex);
+
+    if (GLESTextureUtils::isEtc2Format(internalFormat)) {
+        SET_ERROR_IF((width % 4) && (totalWidth != xoffset + width), GL_INVALID_OPERATION);
+        SET_ERROR_IF((height % 4) && (totalHeight != yoffset + height), GL_INVALID_OPERATION);
+        SET_ERROR_IF((xoffset % 4) || (yoffset % 4), GL_INVALID_OPERATION);
+    }
+
+    SET_ERROR_IF(totalWidth < xoffset + width, GL_INVALID_VALUE);
+    SET_ERROR_IF(totalHeight < yoffset + height, GL_INVALID_VALUE);
+
+    SET_ERROR_IF(!ctx->m_state->compressedTexImageSizeCompatible(internalFormat, width, height, depth, imageSize), GL_INVALID_VALUE);
+
     if (ctx->boundBuffer(GL_PIXEL_UNPACK_BUFFER)) {
         ctx->glCompressedTexSubImage3DOffsetAEMU(
                 ctx, target, level,
@@ -4110,7 +4512,27 @@
                  GL_INVALID_ENUM);
     SET_ERROR_IF(!GLESv2Validation::pixelInternalFormat(internalformat), GL_INVALID_ENUM);
     SET_ERROR_IF(!state->getBoundTexture(target), GL_INVALID_OPERATION);
-    SET_ERROR_IF(levels < 1 || width < 1 || height < 1, GL_INVALID_VALUE);
+    SET_ERROR_IF(levels < 1 || width < 1 || height < 1 || depth < 1, GL_INVALID_VALUE);
+    GLint max_texture_size;
+    GLint max_3d_texture_size;
+    ctx->glGetIntegerv(ctx, GL_MAX_TEXTURE_SIZE, &max_texture_size);
+    ctx->glGetIntegerv(ctx, GL_MAX_3D_TEXTURE_SIZE, &max_3d_texture_size);
+    SET_ERROR_IF(width > max_texture_size, GL_INVALID_VALUE);
+    SET_ERROR_IF(height > max_texture_size, GL_INVALID_VALUE);
+    if (target == GL_TEXTURE_3D) {
+        SET_ERROR_IF(depth > max_texture_size, GL_INVALID_VALUE);
+    } else {
+        GLint maxArrayTextureLayers;
+        ctx->glGetIntegerv(ctx, GL_MAX_ARRAY_TEXTURE_LAYERS, &maxArrayTextureLayers);
+        SET_ERROR_IF(depth > maxArrayTextureLayers, GL_INVALID_VALUE);
+    }
+
+    SET_ERROR_IF(width > max_3d_texture_size, GL_INVALID_VALUE);
+    SET_ERROR_IF(height > max_3d_texture_size, GL_INVALID_VALUE);
+    SET_ERROR_IF(depth > max_3d_texture_size, GL_INVALID_VALUE);
+
+    SET_ERROR_IF(GLESTextureUtils::isAstcFormat(internalformat) && GL_TEXTURE_3D == target, GL_INVALID_OPERATION);
+
     SET_ERROR_IF(target == GL_TEXTURE_3D && (levels > ilog2((uint32_t)std::max(width, std::max(height, depth))) + 1),
                  GL_INVALID_OPERATION);
     SET_ERROR_IF(target == GL_TEXTURE_2D_ARRAY && (levels > ilog2((uint32_t)std::max(width, height)) + 1),
@@ -4118,7 +4540,7 @@
     SET_ERROR_IF(state->isBoundTextureImmutableFormat(target), GL_INVALID_OPERATION);
 
     state->setBoundTextureInternalFormat(target, internalformat);
-    state->setBoundTextureDims(target, -1, width, height, depth);
+    state->setBoundTextureDims(target, target, -1, width, height, depth);
     state->setBoundTextureImmutableFormat(target);
     ctx->m_glTexStorage3D_enc(ctx, target, levels, internalformat, width, height, depth);
     state->setBoundTextureImmutableFormat(target);
@@ -4130,6 +4552,7 @@
     SET_ERROR_IF(!isValidDrawMode(mode), GL_INVALID_ENUM);
     SET_ERROR_IF(count < 0, GL_INVALID_VALUE);
     SET_ERROR_IF(primcount < 0, GL_INVALID_VALUE);
+    SET_ERROR_IF(ctx->m_state->checkFramebufferCompleteness(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE, GL_INVALID_FRAMEBUFFER_OPERATION);
 
     bool has_client_vertex_arrays = false;
     bool has_indirect_arrays = false;
@@ -4146,6 +4569,7 @@
         ctx->m_glDrawArraysInstanced_enc(ctx, mode, first, count, primcount);
     }
     ctx->m_stream->flush();
+    ctx->m_state->postDraw();
 }
 
 void GL2Encoder::s_glDrawElementsInstanced(void* self, GLenum mode, GLsizei count, GLenum type, const void* indices, GLsizei primcount)
@@ -4158,6 +4582,7 @@
     SET_ERROR_IF(primcount < 0, GL_INVALID_VALUE);
     SET_ERROR_IF(!(type == GL_UNSIGNED_BYTE || type == GL_UNSIGNED_SHORT || type == GL_UNSIGNED_INT), GL_INVALID_ENUM);
     SET_ERROR_IF(ctx->m_state->getTransformFeedbackActiveUnpaused(), GL_INVALID_OPERATION);
+    SET_ERROR_IF(ctx->m_state->checkFramebufferCompleteness(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE, GL_INVALID_FRAMEBUFFER_OPERATION);
 
     bool has_client_vertex_arrays = false;
     bool has_indirect_arrays = false;
@@ -4167,7 +4592,7 @@
 
     if (!has_client_vertex_arrays && !has_indirect_arrays) {
         // ALOGW("glDrawElements: no vertex arrays / buffers bound to the command\n");
-        GLenum status = ctx->m_glCheckFramebufferStatus_enc(self, GL_FRAMEBUFFER);
+        GLenum status = ctx->glCheckFramebufferStatus(self, GL_FRAMEBUFFER);
         SET_ERROR_IF(status != GL_FRAMEBUFFER_COMPLETE, GL_INVALID_FRAMEBUFFER_OPERATION);
     }
 
@@ -4237,6 +4662,7 @@
             ALOGE("glDrawElements: direct index & direct buffer data - will be implemented in later versions;\n");
         }
     }
+    ctx->m_state->postDraw();
 }
 
 void GL2Encoder::s_glDrawRangeElements(void* self, GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const void* indices)
@@ -4249,6 +4675,7 @@
     SET_ERROR_IF(count < 0, GL_INVALID_VALUE);
     SET_ERROR_IF(!(type == GL_UNSIGNED_BYTE || type == GL_UNSIGNED_SHORT || type == GL_UNSIGNED_INT), GL_INVALID_ENUM);
     SET_ERROR_IF(ctx->m_state->getTransformFeedbackActiveUnpaused(), GL_INVALID_OPERATION);
+    SET_ERROR_IF(ctx->m_state->checkFramebufferCompleteness(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE, GL_INVALID_FRAMEBUFFER_OPERATION);
 
     bool has_client_vertex_arrays = false;
     bool has_indirect_arrays = false;
@@ -4258,7 +4685,7 @@
 
     if (!has_client_vertex_arrays && !has_indirect_arrays) {
         // ALOGW("glDrawElements: no vertex arrays / buffers bound to the command\n");
-        GLenum status = ctx->m_glCheckFramebufferStatus_enc(self, GL_FRAMEBUFFER);
+        GLenum status = ctx->glCheckFramebufferStatus(self, GL_FRAMEBUFFER);
         SET_ERROR_IF(status != GL_FRAMEBUFFER_COMPLETE, GL_INVALID_FRAMEBUFFER_OPERATION);
     }
 
@@ -4334,6 +4761,7 @@
             ALOGE("glDrawElements: direct index & direct buffer data - will be implemented in later versions;\n");
         }
     }
+    ctx->m_state->postDraw();
 }
 
 const GLubyte* GL2Encoder::s_glGetStringi(void* self, GLenum name, GLuint index) {
@@ -4383,12 +4811,12 @@
 void GL2Encoder::s_glGetProgramBinary(void* self, GLuint program, GLsizei bufSize, GLsizei* length, GLenum* binaryFormat, void* binary) {
     GL2Encoder *ctx = (GL2Encoder *)self;
 
-    SET_ERROR_IF(!ctx->m_shared->isProgram(program), GL_INVALID_OPERATION);
+    VALIDATE_PROGRAM_NAME(program);
 
     GLint linkStatus = 0;
-    ctx->glGetProgramiv(self, program, GL_LINK_STATUS, &linkStatus);
+    ctx->m_glGetProgramiv_enc(self, program, GL_LINK_STATUS, &linkStatus);
     GLint properLength = 0;
-    ctx->glGetProgramiv(self, program, GL_PROGRAM_BINARY_LENGTH, &properLength);
+    ctx->m_glGetProgramiv_enc(self, program, GL_PROGRAM_BINARY_LENGTH, &properLength);
 
     SET_ERROR_IF(!linkStatus, GL_INVALID_OPERATION);
     SET_ERROR_IF(bufSize < properLength, GL_INVALID_OPERATION);
@@ -4401,6 +4829,7 @@
 
     SET_ERROR_IF(!GLESv2Validation::readPixelsFormat(format), GL_INVALID_ENUM);
     SET_ERROR_IF(!GLESv2Validation::readPixelsType(type), GL_INVALID_ENUM);
+    SET_ERROR_IF(!(GLESv2Validation::pixelOp(format,type)),GL_INVALID_OPERATION);
     SET_ERROR_IF(width < 0 || height < 0, GL_INVALID_VALUE);
     SET_ERROR_IF(ctx->isBufferTargetMapped(GL_PIXEL_PACK_BUFFER), GL_INVALID_OPERATION);
     SET_ERROR_IF(ctx->boundBuffer(GL_PIXEL_PACK_BUFFER) &&
@@ -4408,6 +4837,24 @@
                  (ctx->m_state->pboNeededDataSize(width, height, 1, format, type, 1) >
                   ctx->getBufferData(GL_PIXEL_PACK_BUFFER)->m_size),
                  GL_INVALID_OPERATION);
+    SET_ERROR_IF(ctx->s_glCheckFramebufferStatus(ctx, GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE, GL_INVALID_FRAMEBUFFER_OPERATION);
+
+    // now is complete
+    // GL_INVALID_OPERATION is generated if GL_READ_FRAMEBUFFER_BINDING is nonzero, the read fbo is complete, and the value of
+    // GL_SAMPLE_BUFFERS for the read framebuffer is greater than zero
+    if (ctx->m_state->boundFramebuffer(GL_READ_FRAMEBUFFER) &&
+        ctx->s_glCheckFramebufferStatus(ctx, GL_READ_FRAMEBUFFER) == GL_FRAMEBUFFER_COMPLETE) {
+        FboFormatInfo resInfo;
+        ctx->m_state->getBoundFramebufferFormat(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, &resInfo);
+        if (resInfo.type == FBO_ATTACHMENT_RENDERBUFFER) {
+            SET_ERROR_IF(resInfo.rb_multisamples > 0, GL_INVALID_OPERATION);
+        }
+        if (resInfo.type == FBO_ATTACHMENT_TEXTURE) {
+            SET_ERROR_IF(resInfo.tex_multisamples > 0, GL_INVALID_OPERATION);
+        }
+    }
+
+
     /*
 GL_INVALID_OPERATION is generated if the readbuffer of the currently bound framebuffer is a fixed point normalized surface and format and type are neither GL_RGBA and GL_UNSIGNED_BYTE, respectively, nor the format/type pair returned by querying GL_IMPLEMENTATION_COLOR_READ_FORMAT and GL_IMPLEMENTATION_COLOR_READ_TYPE.
 
@@ -4436,6 +4883,7 @@
                 ctx, x, y, width, height,
                 format, type, pixels);
     }
+    ctx->m_state->postReadPixels();
 }
 
 // Track enabled state for some things like:
@@ -4443,10 +4891,16 @@
 void GL2Encoder::s_glEnable(void* self, GLenum what) {
     GL2Encoder *ctx = (GL2Encoder *)self;
 
+	SET_ERROR_IF(!GLESv2Validation::allowedEnable(ctx->majorVersion(), ctx->minorVersion(), what), GL_INVALID_ENUM);
+    if (!ctx->m_state) return;
+
     switch (what) {
     case GL_PRIMITIVE_RESTART_FIXED_INDEX:
         ctx->m_primitiveRestartEnabled = true;
         break;
+    case GL_STENCIL_TEST:
+        ctx->m_state->state_GL_STENCIL_TEST = true;
+        break;
     }
 
     ctx->m_glEnable_enc(ctx, what);
@@ -4455,10 +4909,16 @@
 void GL2Encoder::s_glDisable(void* self, GLenum what) {
     GL2Encoder *ctx = (GL2Encoder *)self;
 
+	SET_ERROR_IF(!GLESv2Validation::allowedEnable(ctx->majorVersion(), ctx->minorVersion(), what), GL_INVALID_ENUM);
+    if (!ctx->m_state) return;
+
     switch (what) {
     case GL_PRIMITIVE_RESTART_FIXED_INDEX:
         ctx->m_primitiveRestartEnabled = false;
         break;
+    case GL_STENCIL_TEST:
+        ctx->m_state->state_GL_STENCIL_TEST = false;
+        break;
     }
 
     ctx->m_glDisable_enc(ctx, what);
@@ -4467,7 +4927,18 @@
 void GL2Encoder::s_glClearBufferiv(void* self, GLenum buffer, GLint drawBuffer, const GLint * value) {
     GL2Encoder *ctx = (GL2Encoder *)self;
 
-    SET_ERROR_IF(buffer == GL_DEPTH || buffer == GL_DEPTH_STENCIL, GL_INVALID_ENUM);
+    SET_ERROR_IF(buffer != GL_COLOR && buffer != GL_STENCIL, GL_INVALID_ENUM);
+
+    GLint maxDrawBuffers;
+    ctx->glGetIntegerv(ctx, GL_MAX_DRAW_BUFFERS, &maxDrawBuffers);
+
+    SET_ERROR_IF(!value, GL_INVALID_VALUE);
+
+    if (buffer == GL_COLOR) {
+        SET_ERROR_IF(drawBuffer < 0 || drawBuffer>= maxDrawBuffers, GL_INVALID_VALUE);
+    } else {
+        SET_ERROR_IF(drawBuffer != 0, GL_INVALID_VALUE);
+    }
 
     ctx->m_glClearBufferiv_enc(ctx, buffer, drawBuffer, value);
 }
@@ -4475,7 +4946,12 @@
 void GL2Encoder::s_glClearBufferuiv(void* self, GLenum buffer, GLint drawBuffer, const GLuint * value) {
     GL2Encoder *ctx = (GL2Encoder *)self;
 
-    SET_ERROR_IF(buffer == GL_DEPTH || buffer == GL_STENCIL || buffer == GL_DEPTH_STENCIL, GL_INVALID_ENUM);
+    SET_ERROR_IF(buffer != GL_COLOR, GL_INVALID_ENUM);
+    SET_ERROR_IF(!value, GL_INVALID_VALUE);
+
+    GLint maxDrawBuffers;
+    ctx->glGetIntegerv(ctx, GL_MAX_DRAW_BUFFERS, &maxDrawBuffers);
+    SET_ERROR_IF(drawBuffer < 0 || drawBuffer>= maxDrawBuffers, GL_INVALID_VALUE);
 
     ctx->m_glClearBufferuiv_enc(ctx, buffer, drawBuffer, value);
 }
@@ -4483,11 +4959,31 @@
 void GL2Encoder::s_glClearBufferfv(void* self, GLenum buffer, GLint drawBuffer, const GLfloat * value) {
     GL2Encoder *ctx = (GL2Encoder *)self;
 
-    SET_ERROR_IF(buffer == GL_STENCIL || buffer == GL_DEPTH_STENCIL, GL_INVALID_ENUM);
+    SET_ERROR_IF(buffer != GL_COLOR && buffer != GL_DEPTH, GL_INVALID_ENUM);
+
+    SET_ERROR_IF(!value, GL_INVALID_VALUE);
+
+    GLint maxDrawBuffers;
+    ctx->glGetIntegerv(ctx, GL_MAX_DRAW_BUFFERS, &maxDrawBuffers);
+
+    if (buffer == GL_COLOR) {
+        SET_ERROR_IF(drawBuffer < 0 || drawBuffer>= maxDrawBuffers, GL_INVALID_VALUE);
+    } else {
+        SET_ERROR_IF(drawBuffer != 0, GL_INVALID_VALUE);
+    }
 
     ctx->m_glClearBufferfv_enc(ctx, buffer, drawBuffer, value);
 }
 
+void GL2Encoder::s_glClearBufferfi(void* self, GLenum buffer, GLint drawBuffer, float depth, int stencil) {
+    GL2Encoder *ctx = (GL2Encoder *)self;
+
+    SET_ERROR_IF(buffer != GL_DEPTH_STENCIL, GL_INVALID_ENUM);
+    SET_ERROR_IF(drawBuffer != 0, GL_INVALID_VALUE);
+
+    ctx->m_glClearBufferfi_enc(ctx, buffer, drawBuffer, depth, stencil);
+}
+
 void GL2Encoder::s_glBlitFramebuffer(void* self, GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter) {
     GL2Encoder *ctx = (GL2Encoder *)self;
     GLClientState* state = ctx->m_state;
@@ -4495,6 +4991,7 @@
     bool validateColor = mask & GL_COLOR_BUFFER_BIT;
     bool validateDepth = mask & GL_DEPTH_BUFFER_BIT;
     bool validateStencil = mask & GL_STENCIL_BUFFER_BIT;
+    bool validateDepthOrStencil = validateDepth || validateStencil;
 
     FboFormatInfo read_fbo_format_info;
     FboFormatInfo draw_fbo_format_info;
@@ -4502,6 +4999,13 @@
         state->getBoundFramebufferFormat(GL_READ_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, &read_fbo_format_info);
         state->getBoundFramebufferFormat(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, &draw_fbo_format_info);
 
+        if (read_fbo_format_info.type == FBO_ATTACHMENT_TEXTURE) {
+            SET_ERROR_IF(
+                GL_LINEAR == filter &&
+                GLESv2Validation::isIntegerFormat(read_fbo_format_info.tex_format),
+                    GL_INVALID_OPERATION);
+        }
+
         if (read_fbo_format_info.type == FBO_ATTACHMENT_TEXTURE &&
             draw_fbo_format_info.type == FBO_ATTACHMENT_TEXTURE) {
             SET_ERROR_IF(
@@ -4546,6 +5050,10 @@
         }
     }
 
+    if (validateDepthOrStencil) {
+        SET_ERROR_IF(filter != GL_NEAREST, GL_INVALID_OPERATION);
+    }
+
     state->getBoundFramebufferFormat(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, &draw_fbo_format_info);
     SET_ERROR_IF(
             draw_fbo_format_info.type == FBO_ATTACHMENT_RENDERBUFFER &&
@@ -4633,6 +5141,17 @@
                    GLESv2Validation::filterableTexFormat(ctx, internalformat)),
                  GL_INVALID_OPERATION);
 
+    GLenum stateTarget = target;
+    if (target == GL_TEXTURE_CUBE_MAP_POSITIVE_X ||
+        target == GL_TEXTURE_CUBE_MAP_POSITIVE_Y ||
+        target == GL_TEXTURE_CUBE_MAP_POSITIVE_Z ||
+        target == GL_TEXTURE_CUBE_MAP_NEGATIVE_X ||
+        target == GL_TEXTURE_CUBE_MAP_NEGATIVE_Y ||
+        target == GL_TEXTURE_CUBE_MAP_NEGATIVE_Z)
+        stateTarget = GL_TEXTURE_CUBE_MAP;
+
+    SET_ERROR_IF(!ctx->m_state->isBoundTextureComplete(stateTarget), GL_INVALID_OPERATION);
+
     if (target == GL_TEXTURE_2D) {
         ctx->override2DTextureTarget(target);
     }
@@ -4649,35 +5168,42 @@
     GLint maxCombinedUnits;
     ctx->glGetIntegerv(ctx, GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS, &maxCombinedUnits);
     SET_ERROR_IF(unit >= maxCombinedUnits, GL_INVALID_VALUE);
-
-    ctx->doSamplerBindEncodeCached(unit, sampler);
-}
-
-void GL2Encoder::doSamplerBindEncodeCached(GLuint unit, GLuint sampler) {
-    if (m_state->isSamplerBindNoOp(unit, sampler)) return;
-    m_glBindSampler_enc(this, unit, sampler);
-    m_state->bindSampler(unit, sampler);
+    SET_ERROR_IF(!ctx->m_state->samplerExists(sampler), GL_INVALID_OPERATION);
+    if (ctx->m_state->isSamplerBindNoOp(unit, sampler)) return;
+    ctx->m_glBindSampler_enc(ctx, unit, sampler);
+    ctx->m_state->bindSampler(unit, sampler);
 }
 
 void GL2Encoder::s_glDeleteSamplers(void* self, GLsizei n, const GLuint* samplers) {
     GL2Encoder *ctx = (GL2Encoder *)self;
     ctx->m_state->onDeleteSamplers(n, samplers);
+    ctx->m_state->setExistence(GLClientState::ObjectType::Sampler, false, n, samplers);
     ctx->m_glDeleteSamplers_enc(ctx, n, samplers);
 }
 
 GLsync GL2Encoder::s_glFenceSync(void* self, GLenum condition, GLbitfield flags) {
     GL2Encoder *ctx = (GL2Encoder *)self;
+    RET_AND_SET_ERROR_IF(condition != GL_SYNC_GPU_COMMANDS_COMPLETE, GL_INVALID_ENUM, 0);
+    RET_AND_SET_ERROR_IF(flags != 0, GL_INVALID_VALUE, 0);
     uint64_t syncHandle = ctx->glFenceSyncAEMU(ctx, condition, flags);
-    return (GLsync)(uintptr_t)syncHandle;
+
+    GLsync res = (GLsync)(uintptr_t)syncHandle;
+    GLClientState::onFenceCreated(res);
+    return res;
 }
 
 GLenum GL2Encoder::s_glClientWaitSync(void* self, GLsync wait_on, GLbitfield flags, GLuint64 timeout) {
     GL2Encoder *ctx = (GL2Encoder *)self;
+    RET_AND_SET_ERROR_IF(!GLClientState::fenceExists(wait_on), GL_INVALID_VALUE, GL_WAIT_FAILED);
+    RET_AND_SET_ERROR_IF(flags && !(flags & GL_SYNC_FLUSH_COMMANDS_BIT), GL_INVALID_VALUE, GL_WAIT_FAILED);
     return ctx->glClientWaitSyncAEMU(ctx, (uint64_t)(uintptr_t)wait_on, flags, timeout);
 }
 
 void GL2Encoder::s_glWaitSync(void* self, GLsync wait_on, GLbitfield flags, GLuint64 timeout) {
     GL2Encoder *ctx = (GL2Encoder *)self;
+    SET_ERROR_IF(flags != 0, GL_INVALID_VALUE);
+    SET_ERROR_IF(timeout != GL_TIMEOUT_IGNORED, GL_INVALID_VALUE);
+    SET_ERROR_IF(!GLClientState::fenceExists(wait_on), GL_INVALID_VALUE);
     ctx->glWaitSyncAEMU(ctx, (uint64_t)(uintptr_t)wait_on, flags, timeout);
 }
 
@@ -4686,6 +5212,8 @@
 
     if (!sync) return;
 
+    SET_ERROR_IF(!GLClientState::fenceExists(sync), GL_INVALID_VALUE);
+    GLClientState::onFenceDestroyed(sync);
     ctx->glDeleteSyncAEMU(ctx, (uint64_t)(uintptr_t)sync);
 }
 
@@ -4697,7 +5225,9 @@
 void GL2Encoder::s_glGetSynciv(void* self, GLsync sync, GLenum pname, GLsizei bufSize, GLsizei *length, GLint *values) {
     GL2Encoder *ctx = (GL2Encoder *)self;
 
+    SET_ERROR_IF(!GLESv2Validation::allowedGetSyncParam(pname), GL_INVALID_ENUM);
     SET_ERROR_IF(bufSize < 0, GL_INVALID_VALUE);
+    SET_ERROR_IF(!GLClientState::fenceExists(sync), GL_INVALID_VALUE);
 
     return ctx->glGetSyncivAEMU(ctx, (uint64_t)(uintptr_t)sync, pname, bufSize, length, values);
 }
@@ -4819,6 +5349,10 @@
 void GL2Encoder::s_glGetShaderiv(void* self, GLuint shader, GLenum pname, GLint* params) {
     GL2Encoder *ctx = (GL2Encoder *)self;
     ctx->m_glGetShaderiv_enc(self, shader, pname, params);
+
+    SET_ERROR_IF(!GLESv2Validation::allowedGetShader(pname), GL_INVALID_ENUM);
+    VALIDATE_SHADER_NAME(shader);
+	
     if (pname == GL_SHADER_SOURCE_LENGTH) {
         ShaderData* shaderData = ctx->m_shared->getShaderData(shader);
         if (shaderData) {
@@ -4848,7 +5382,7 @@
     }
 }
 
-GLuint GL2Encoder::s_glCreateShaderProgramv(void* self, GLenum type, GLsizei count, const char** strings) {
+GLuint GL2Encoder::s_glCreateShaderProgramv(void* self, GLenum shaderType, GLsizei count, const char** strings) {
 
     GLint* length = NULL;
     GL2Encoder* ctx = (GL2Encoder*)self;
@@ -4869,12 +5403,13 @@
         return -1;
     }
 
-    GLuint res = ctx->glCreateShaderProgramvAEMU(ctx, type, count, str, len + 1);
+    GLuint res = ctx->glCreateShaderProgramvAEMU(ctx, shaderType, count, str, len + 1);
     delete [] str;
 
     // Phase 2: do glLinkProgram-related initialization for locationWorkARound
     GLint linkStatus = 0;
-    ctx->glGetProgramiv(self, res, GL_LINK_STATUS ,&linkStatus);
+    ctx->m_glGetProgramiv_enc(self, res, GL_LINK_STATUS ,&linkStatus);
+    ctx->m_shared->setProgramLinkStatus(res, linkStatus);
     if (!linkStatus) {
         ctx->m_shared->deleteShaderProgramDataById(spDataId);
         return -1;
@@ -4883,20 +5418,39 @@
     ctx->m_shared->associateGLShaderProgram(res, spDataId);
 
     GLint numUniforms = 0;
-    ctx->glGetProgramiv(self, res, GL_ACTIVE_UNIFORMS, &numUniforms);
-    ctx->m_shared->initShaderProgramData(res, numUniforms);
+    GLint numAttributes = 0;
+    ctx->m_glGetProgramiv_enc(self, res, GL_ACTIVE_UNIFORMS, &numUniforms);
+    ctx->m_glGetProgramiv_enc(self, res, GL_ACTIVE_ATTRIBUTES, &numAttributes);
+    ctx->m_shared->initShaderProgramData(res, numUniforms, numAttributes);
 
     GLint maxLength=0;
-    ctx->glGetProgramiv(self, res, GL_ACTIVE_UNIFORM_MAX_LENGTH, &maxLength);
+    GLint maxAttribLength=0;
+    ctx->m_glGetProgramiv_enc(self, res, GL_ACTIVE_UNIFORM_MAX_LENGTH, &maxLength);
+    ctx->m_glGetProgramiv_enc(self, res, GL_ACTIVE_ATTRIBUTE_MAX_LENGTH, &maxAttribLength);
 
-    GLint size; GLenum uniformType; GLchar* name = new GLchar[maxLength + 1];
+    size_t bufLen = maxLength > maxAttribLength ? maxLength : maxAttribLength;
+    GLint size; GLenum type; GLchar *name = new GLchar[bufLen + 1];
 
     for (GLint i = 0; i < numUniforms; ++i) {
-        ctx->glGetActiveUniform(self, res, i, maxLength, NULL, &size, &uniformType, name);
+        ctx->m_glGetActiveUniform_enc(self, res, i, maxLength, NULL, &size, &type, name);
         GLint location = ctx->m_glGetUniformLocation_enc(self, res, name);
-        ctx->m_shared->setShaderProgramIndexInfo(res, i, location, size, uniformType, name);
+        ctx->m_shared->setShaderProgramIndexInfo(res, i, location, size, type, name);
     }
 
+    for (GLint i = 0; i < numAttributes; ++i) {
+        ctx->m_glGetActiveAttrib_enc(self, res, i, maxAttribLength,  NULL, &size, &type, name);
+        GLint location = ctx->m_glGetAttribLocation_enc(self, res, name);
+        ctx->m_shared->setProgramAttribInfo(res, i, location, size, type, name);
+    }
+
+    GLint numBlocks;
+    ctx->m_glGetProgramiv_enc(ctx, res, GL_ACTIVE_UNIFORM_BLOCKS, &numBlocks);
+    ctx->m_shared->setActiveUniformBlockCountForProgram(res, numBlocks);
+
+    GLint tfVaryingsCount;
+    ctx->m_glGetProgramiv_enc(ctx, res, GL_TRANSFORM_FEEDBACK_VARYINGS, &tfVaryingsCount);
+    ctx->m_shared->setTransformFeedbackVaryingsCountForProgram(res, tfVaryingsCount);
+
     delete [] name;
 
     return res;
@@ -4905,22 +5459,19 @@
 void GL2Encoder::s_glProgramUniform1f(void* self, GLuint program, GLint location, GLfloat v0)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glProgramUniform1f_enc(self, program, hostLoc, v0);
+    ctx->m_glProgramUniform1f_enc(self, program, location, v0);
 }
 
 void GL2Encoder::s_glProgramUniform1fv(void* self, GLuint program, GLint location, GLsizei count, const GLfloat *value)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glProgramUniform1fv_enc(self, program, hostLoc, count, value);
+    ctx->m_glProgramUniform1fv_enc(self, program, location, count, value);
 }
 
 void GL2Encoder::s_glProgramUniform1i(void* self, GLuint program, GLint location, GLint v0)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glProgramUniform1i_enc(self, program, hostLoc, v0);
+    ctx->m_glProgramUniform1i_enc(self, program, location, v0);
 
     GLClientState* state = ctx->m_state;
     GLSharedGroupPtr shared = ctx->m_shared;
@@ -4938,15 +5489,13 @@
 void GL2Encoder::s_glProgramUniform1iv(void* self, GLuint program, GLint location, GLsizei count, const GLint *value)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glProgramUniform1iv_enc(self, program, hostLoc, count, value);
+    ctx->m_glProgramUniform1iv_enc(self, program, location, count, value);
 }
 
 void GL2Encoder::s_glProgramUniform1ui(void* self, GLuint program, GLint location, GLuint v0)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glProgramUniform1ui_enc(self, program, hostLoc, v0);
+    ctx->m_glProgramUniform1ui_enc(self, program, location, v0);
 
     GLClientState* state = ctx->m_state;
     GLSharedGroupPtr shared = ctx->m_shared;
@@ -4964,201 +5513,176 @@
 void GL2Encoder::s_glProgramUniform1uiv(void* self, GLuint program, GLint location, GLsizei count, const GLuint *value)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glProgramUniform1uiv_enc(self, program, hostLoc, count, value);
+    ctx->m_glProgramUniform1uiv_enc(self, program, location, count, value);
 }
 
 void GL2Encoder::s_glProgramUniform2f(void* self, GLuint program, GLint location, GLfloat v0, GLfloat v1)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glProgramUniform2f_enc(self, program, hostLoc, v0, v1);
+    ctx->m_glProgramUniform2f_enc(self, program, location, v0, v1);
 }
 
 void GL2Encoder::s_glProgramUniform2fv(void* self, GLuint program, GLint location, GLsizei count, const GLfloat *value)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glProgramUniform2fv_enc(self, program, hostLoc, count, value);
+    ctx->m_glProgramUniform2fv_enc(self, program, location, count, value);
 }
 
 void GL2Encoder::s_glProgramUniform2i(void* self, GLuint program, GLint location, GLint v0, GLint v1)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glProgramUniform2i_enc(self, program, hostLoc, v0, v1);
+    ctx->m_glProgramUniform2i_enc(self, program, location, v0, v1);
 }
 
 void GL2Encoder::s_glProgramUniform2iv(void* self, GLuint program, GLint location, GLsizei count, const GLint *value)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glProgramUniform2iv_enc(self, program, hostLoc, count, value);
+    ctx->m_glProgramUniform2iv_enc(self, program, location, count, value);
 }
 
 void GL2Encoder::s_glProgramUniform2ui(void* self, GLuint program, GLint location, GLint v0, GLuint v1)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glProgramUniform2ui_enc(self, program, hostLoc, v0, v1);
+    ctx->m_glProgramUniform2ui_enc(self, program, location, v0, v1);
 }
 
 void GL2Encoder::s_glProgramUniform2uiv(void* self, GLuint program, GLint location, GLsizei count, const GLuint *value)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glProgramUniform2uiv_enc(self, program, hostLoc, count, value);
+    ctx->m_glProgramUniform2uiv_enc(self, program, location, count, value);
 }
 
 void GL2Encoder::s_glProgramUniform3f(void* self, GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glProgramUniform3f_enc(self, program, hostLoc, v0, v1, v2);
+    ctx->m_glProgramUniform3f_enc(self, program, location, v0, v1, v2);
 }
 
 void GL2Encoder::s_glProgramUniform3fv(void* self, GLuint program, GLint location, GLsizei count, const GLfloat *value)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glProgramUniform3fv_enc(self, program, hostLoc, count, value);
+    ctx->m_glProgramUniform3fv_enc(self, program, location, count, value);
 }
 
 void GL2Encoder::s_glProgramUniform3i(void* self, GLuint program, GLint location, GLint v0, GLint v1, GLint v2)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glProgramUniform3i_enc(self, program, hostLoc, v0, v1, v2);
+    ctx->m_glProgramUniform3i_enc(self, program, location, v0, v1, v2);
 }
 
 void GL2Encoder::s_glProgramUniform3iv(void* self, GLuint program, GLint location, GLsizei count, const GLint *value)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glProgramUniform3iv_enc(self, program, hostLoc, count, value);
+    ctx->m_glProgramUniform3iv_enc(self, program, location, count, value);
 }
 
 void GL2Encoder::s_glProgramUniform3ui(void* self, GLuint program, GLint location, GLint v0, GLint v1, GLuint v2)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glProgramUniform3ui_enc(self, program, hostLoc, v0, v1, v2);
+    ctx->m_glProgramUniform3ui_enc(self, program, location, v0, v1, v2);
 }
 
 void GL2Encoder::s_glProgramUniform3uiv(void* self, GLuint program, GLint location, GLsizei count, const GLuint *value)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glProgramUniform3uiv_enc(self, program, hostLoc, count, value);
+    ctx->m_glProgramUniform3uiv_enc(self, program, location, count, value);
 }
 
 void GL2Encoder::s_glProgramUniform4f(void* self, GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glProgramUniform4f_enc(self, program, hostLoc, v0, v1, v2, v3);
+    ctx->m_glProgramUniform4f_enc(self, program, location, v0, v1, v2, v3);
 }
 
 void GL2Encoder::s_glProgramUniform4fv(void* self, GLuint program, GLint location, GLsizei count, const GLfloat *value)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glProgramUniform4fv_enc(self, program, hostLoc, count, value);
+    ctx->m_glProgramUniform4fv_enc(self, program, location, count, value);
 }
 
 void GL2Encoder::s_glProgramUniform4i(void* self, GLuint program, GLint location, GLint v0, GLint v1, GLint v2, GLint v3)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glProgramUniform4i_enc(self, program, hostLoc, v0, v1, v2, v3);
+    ctx->m_glProgramUniform4i_enc(self, program, location, v0, v1, v2, v3);
 }
 
 void GL2Encoder::s_glProgramUniform4iv(void* self, GLuint program, GLint location, GLsizei count, const GLint *value)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glProgramUniform4iv_enc(self, program, hostLoc, count, value);
+    ctx->m_glProgramUniform4iv_enc(self, program, location, count, value);
 }
 
 void GL2Encoder::s_glProgramUniform4ui(void* self, GLuint program, GLint location, GLint v0, GLint v1, GLint v2, GLuint v3)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glProgramUniform4ui_enc(self, program, hostLoc, v0, v1, v2, v3);
+    ctx->m_glProgramUniform4ui_enc(self, program, location, v0, v1, v2, v3);
 }
 
 void GL2Encoder::s_glProgramUniform4uiv(void* self, GLuint program, GLint location, GLsizei count, const GLuint *value)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glProgramUniform4uiv_enc(self, program, hostLoc, count, value);
+    ctx->m_glProgramUniform4uiv_enc(self, program, location, count, value);
 }
 
 void GL2Encoder::s_glProgramUniformMatrix2fv(void* self, GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glProgramUniformMatrix2fv_enc(self, program, hostLoc, count, transpose, value);
+    ctx->m_glProgramUniformMatrix2fv_enc(self, program, location, count, transpose, value);
 }
 
 void GL2Encoder::s_glProgramUniformMatrix2x3fv(void* self, GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glProgramUniformMatrix2x3fv_enc(self, program, hostLoc, count, transpose, value);
+    ctx->m_glProgramUniformMatrix2x3fv_enc(self, program, location, count, transpose, value);
 }
 
 void GL2Encoder::s_glProgramUniformMatrix2x4fv(void* self, GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glProgramUniformMatrix2x4fv_enc(self, program, hostLoc, count, transpose, value);
+    ctx->m_glProgramUniformMatrix2x4fv_enc(self, program, location, count, transpose, value);
 }
 
 void GL2Encoder::s_glProgramUniformMatrix3fv(void* self, GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glProgramUniformMatrix3fv_enc(self, program, hostLoc, count, transpose, value);
+    ctx->m_glProgramUniformMatrix3fv_enc(self, program, location, count, transpose, value);
 }
 
 void GL2Encoder::s_glProgramUniformMatrix3x2fv(void* self, GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glProgramUniformMatrix3x2fv_enc(self, program, hostLoc, count, transpose, value);
+    ctx->m_glProgramUniformMatrix3x2fv_enc(self, program, location, count, transpose, value);
 }
 
 void GL2Encoder::s_glProgramUniformMatrix3x4fv(void* self, GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glProgramUniformMatrix3x4fv_enc(self, program, hostLoc, count, transpose, value);
+    ctx->m_glProgramUniformMatrix3x4fv_enc(self, program, location, count, transpose, value);
 }
 
 void GL2Encoder::s_glProgramUniformMatrix4fv(void* self, GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glProgramUniformMatrix4fv_enc(self, program, hostLoc, count, transpose, value);
+    ctx->m_glProgramUniformMatrix4fv_enc(self, program, location, count, transpose, value);
 }
 
 void GL2Encoder::s_glProgramUniformMatrix4x2fv(void* self, GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glProgramUniformMatrix4x2fv_enc(self, program, hostLoc, count, transpose, value);
+    ctx->m_glProgramUniformMatrix4x2fv_enc(self, program, location, count, transpose, value);
 }
 
 void GL2Encoder::s_glProgramUniformMatrix4x3fv(void* self, GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value)
 {
     GL2Encoder *ctx = (GL2Encoder*)self;
-    GLint hostLoc = location;
-    ctx->m_glProgramUniformMatrix4x3fv_enc(self, program, hostLoc, count, transpose, value);
+    ctx->m_glProgramUniformMatrix4x3fv_enc(self, program, location, count, transpose, value);
 }
 
 void GL2Encoder::s_glProgramParameteri(void* self, GLuint program, GLenum pname, GLint value) {
     GL2Encoder* ctx = (GL2Encoder*)self;
+    VALIDATE_PROGRAM_NAME(program);
+    SET_ERROR_IF(pname != GL_PROGRAM_BINARY_RETRIEVABLE_HINT && pname != GL_PROGRAM_SEPARABLE, GL_INVALID_ENUM);
+    SET_ERROR_IF(value != GL_FALSE && value != GL_TRUE, GL_INVALID_VALUE);
     ctx->m_glProgramParameteri_enc(self, program, pname, value);
 }
 
@@ -5182,6 +5706,11 @@
 
     // Otherwise, update host texture 2D bindings.
     ctx->updateHostTexture2DBindingsFromProgramData(program);
+
+    if (program) {
+        ctx->m_state->currentUniformValidationInfo = ctx->m_shared->getUniformValidationInfo(program);
+        ctx->m_state->currentAttribValidationInfo = ctx->m_shared->getAttribValidationInfo(program);
+    }
 }
 
 void GL2Encoder::s_glBindProgramPipeline(void* self, GLuint pipeline)
@@ -5349,6 +5878,7 @@
     SET_ERROR_IF(hasClientArrays, GL_INVALID_OPERATION);
     SET_ERROR_IF(!state->currentVertexArrayObject(), GL_INVALID_OPERATION);
     SET_ERROR_IF(!ctx->boundBuffer(GL_DRAW_INDIRECT_BUFFER), GL_INVALID_OPERATION);
+    SET_ERROR_IF(ctx->m_state->checkFramebufferCompleteness(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE, GL_INVALID_FRAMEBUFFER_OPERATION);
 
     GLuint indirectStructSize = glUtilsIndirectStructSize(INDIRECT_COMMAND_DRAWARRAYS);
     if (ctx->boundBuffer(GL_DRAW_INDIRECT_BUFFER)) {
@@ -5362,6 +5892,7 @@
         // This is purely for debug/dev purposes.
         ctx->glDrawArraysIndirectDataAEMU(ctx, mode, indirect, indirectStructSize);
     }
+    ctx->m_state->postDraw();
 }
 
 void GL2Encoder::s_glDrawElementsIndirect(void* self, GLenum mode, GLenum type, const void* indirect) {
@@ -5378,6 +5909,7 @@
     SET_ERROR_IF(!ctx->boundBuffer(GL_DRAW_INDIRECT_BUFFER), GL_INVALID_OPERATION);
 
     SET_ERROR_IF(ctx->m_state->getTransformFeedbackActiveUnpaused(), GL_INVALID_OPERATION);
+    SET_ERROR_IF(ctx->m_state->checkFramebufferCompleteness(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE, GL_INVALID_FRAMEBUFFER_OPERATION);
 
     GLuint indirectStructSize = glUtilsIndirectStructSize(INDIRECT_COMMAND_DRAWELEMENTS);
     if (ctx->boundBuffer(GL_DRAW_INDIRECT_BUFFER)) {
@@ -5391,7 +5923,7 @@
         // This is purely for debug/dev purposes.
         ctx->glDrawElementsIndirectDataAEMU(ctx, mode, type, indirect, indirectStructSize);
     }
-
+    ctx->m_state->postDraw();
 }
 
 void GL2Encoder::s_glTexStorage2DMultisample(void* self, GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLboolean fixedsamplelocations) {
@@ -5408,7 +5940,7 @@
     SET_ERROR_IF(samples > max_samples, GL_INVALID_OPERATION);
 
     state->setBoundTextureInternalFormat(target, internalformat);
-    state->setBoundTextureDims(target, 0, width, height, 1);
+    state->setBoundTextureDims(target, target, 0, width, height, 1);
     state->setBoundTextureImmutableFormat(target);
     state->setBoundTextureSamples(target, samples);
 
@@ -5427,6 +5959,7 @@
     SET_ERROR_IF(bufSize < glesv2_enc::pixelDataSize(self, width, height, format,
         type, 1), GL_INVALID_OPERATION);
     s_glReadPixels(self, x, y, width, height, format, type, pixels);
+    ctx->m_state->postReadPixels();
 }
 
 void GL2Encoder::s_glGetnUniformfvEXT(void *self, GLuint program, GLint location,
@@ -5447,14 +5980,664 @@
 
 void GL2Encoder::s_glInvalidateFramebuffer(void* self, GLenum target, GLsizei numAttachments, const GLenum *attachments) {
     GL2Encoder *ctx = (GL2Encoder*)self;
+    SET_ERROR_IF((target != GL_FRAMEBUFFER) &&
+                 (target != GL_READ_FRAMEBUFFER) &&
+                 (target != GL_DRAW_FRAMEBUFFER), GL_INVALID_ENUM);
     SET_ERROR_IF(numAttachments < 0, GL_INVALID_VALUE);
+
+    GLint maxColorAttachments;
+    ctx->glGetIntegerv(ctx, GL_MAX_COLOR_ATTACHMENTS, &maxColorAttachments);
+    for (GLsizei i = 0; i < numAttachments; ++i) {
+        if (attachments[i] != GL_DEPTH_ATTACHMENT && attachments[i] != GL_STENCIL_ATTACHMENT && attachments[i] != GL_DEPTH_STENCIL_ATTACHMENT) {
+            SET_ERROR_IF(attachments[i] >= GL_COLOR_ATTACHMENT0 + maxColorAttachments, GL_INVALID_OPERATION);
+        }
+    }
+
     ctx->m_glInvalidateFramebuffer_enc(ctx, target, numAttachments, attachments);
 }
 
 void GL2Encoder::s_glInvalidateSubFramebuffer(void* self, GLenum target, GLsizei numAttachments, const GLenum *attachments, GLint x, GLint y, GLsizei width, GLsizei height) {
     GL2Encoder *ctx = (GL2Encoder*)self;
+    SET_ERROR_IF(target != GL_FRAMEBUFFER && target != GL_READ_FRAMEBUFFER && target != GL_DRAW_FRAMEBUFFER, GL_INVALID_ENUM);
     SET_ERROR_IF(numAttachments < 0, GL_INVALID_VALUE);
     SET_ERROR_IF(width < 0, GL_INVALID_VALUE);
     SET_ERROR_IF(height < 0, GL_INVALID_VALUE);
+    GLint maxColorAttachments;
+    ctx->glGetIntegerv(ctx, GL_MAX_COLOR_ATTACHMENTS, &maxColorAttachments);
+    for (GLsizei i = 0; i < numAttachments; ++i) {
+        if (attachments[i] != GL_DEPTH_ATTACHMENT && attachments[i] != GL_STENCIL_ATTACHMENT && attachments[i] != GL_DEPTH_STENCIL_ATTACHMENT) {
+            SET_ERROR_IF(attachments[i] >= GL_COLOR_ATTACHMENT0 + maxColorAttachments, GL_INVALID_OPERATION);
+        }
+    }
     ctx->m_glInvalidateSubFramebuffer_enc(ctx, target, numAttachments, attachments, x, y, width, height);
 }
+
+void GL2Encoder::s_glDispatchCompute(void* self, GLuint num_groups_x, GLuint num_groups_y, GLuint num_groups_z) {
+    GL2Encoder *ctx = (GL2Encoder*)self;
+    ctx->m_glDispatchCompute_enc(ctx, num_groups_x, num_groups_y, num_groups_z);
+    ctx->m_state->postDispatchCompute();
+}
+
+void GL2Encoder::s_glDispatchComputeIndirect(void* self, GLintptr indirect) {
+    GL2Encoder *ctx = (GL2Encoder*)self;
+    ctx->m_glDispatchComputeIndirect_enc(ctx, indirect);
+    ctx->m_state->postDispatchCompute();
+}
+
+void GL2Encoder::s_glGenTransformFeedbacks(void* self, GLsizei n, GLuint* ids) {
+    GL2Encoder *ctx = (GL2Encoder*)self;
+    ctx->m_glGenTransformFeedbacks_enc(ctx, n, ids);
+    ctx->m_state->setExistence(GLClientState::ObjectType::TransformFeedback, true, n, ids);
+}
+
+void GL2Encoder::s_glDeleteTransformFeedbacks(void* self, GLsizei n, const GLuint* ids) {
+    GL2Encoder *ctx = (GL2Encoder*)self;
+    SET_ERROR_IF(ctx->m_state->getTransformFeedbackActive(), GL_INVALID_OPERATION);
+
+    ctx->m_state->setExistence(GLClientState::ObjectType::TransformFeedback, false, n, ids);
+    ctx->m_glDeleteTransformFeedbacks_enc(ctx, n, ids);
+}
+
+void GL2Encoder::s_glGenSamplers(void* self, GLsizei n, GLuint* ids) {
+    GL2Encoder *ctx = (GL2Encoder*)self;
+    ctx->m_glGenSamplers_enc(ctx, n, ids);
+    ctx->m_state->setExistence(GLClientState::ObjectType::Sampler, true, n, ids);
+}
+
+void GL2Encoder::s_glGenQueries(void* self, GLsizei n, GLuint* ids) {
+    GL2Encoder *ctx = (GL2Encoder*)self;
+    ctx->m_glGenQueries_enc(ctx, n, ids);
+    ctx->m_state->setExistence(GLClientState::ObjectType::Query, true, n, ids);
+}
+
+void GL2Encoder::s_glDeleteQueries(void* self, GLsizei n, const GLuint* ids) {
+    GL2Encoder *ctx = (GL2Encoder*)self;
+    ctx->m_state->setExistence(GLClientState::ObjectType::Query, false, n, ids);
+    ctx->m_glDeleteQueries_enc(ctx, n, ids);
+}
+
+void GL2Encoder::s_glBindTransformFeedback(void* self, GLenum target, GLuint id) {
+    GL2Encoder *ctx = (GL2Encoder*)self;
+    SET_ERROR_IF(GL_TRANSFORM_FEEDBACK != target, GL_INVALID_ENUM);
+    SET_ERROR_IF(ctx->m_state->getTransformFeedbackActiveUnpaused(), GL_INVALID_OPERATION);
+    SET_ERROR_IF(!ctx->m_state->tryBind(target, id), GL_INVALID_OPERATION);
+    ctx->m_glBindTransformFeedback_enc(ctx, target, id);
+}
+
+void GL2Encoder::s_glBeginQuery(void* self, GLenum target, GLuint query) {
+    GL2Encoder *ctx = (GL2Encoder*)self;
+    SET_ERROR_IF(!GLESv2Validation::allowedQueryTarget(target), GL_INVALID_ENUM);
+
+    if (target != GL_ANY_SAMPLES_PASSED_CONSERVATIVE &&
+        target != GL_ANY_SAMPLES_PASSED) {
+        SET_ERROR_IF(ctx->m_state->isQueryBound(target), GL_INVALID_OPERATION);
+    } else {
+        SET_ERROR_IF(ctx->m_state->isQueryBound(GL_ANY_SAMPLES_PASSED_CONSERVATIVE), GL_INVALID_OPERATION);
+        SET_ERROR_IF(ctx->m_state->isQueryBound(GL_ANY_SAMPLES_PASSED), GL_INVALID_OPERATION);
+    }
+
+    GLenum lastTarget = ctx->m_state->getLastQueryTarget(query);
+
+    if (lastTarget) {
+        SET_ERROR_IF(target != lastTarget, GL_INVALID_OPERATION);
+    }
+
+    SET_ERROR_IF(!query, GL_INVALID_OPERATION);
+    SET_ERROR_IF(!ctx->m_state->tryBind(target, query), GL_INVALID_OPERATION);
+    ctx->m_state->setLastQueryTarget(target, query);
+    ctx->m_glBeginQuery_enc(ctx, target, query);
+}
+
+void GL2Encoder::s_glEndQuery(void* self, GLenum target) {
+    GL2Encoder *ctx = (GL2Encoder*)self;
+    SET_ERROR_IF(!GLESv2Validation::allowedQueryTarget(target), GL_INVALID_ENUM);
+    SET_ERROR_IF(!ctx->m_state->isBoundTargetValid(target), GL_INVALID_OPERATION);
+    SET_ERROR_IF(!ctx->m_state->tryBind(target, 0), GL_INVALID_OPERATION);
+    ctx->m_glEndQuery_enc(ctx, target);
+}
+
+void GL2Encoder::s_glClear(void* self, GLbitfield mask) {
+    GL2Encoder *ctx = (GL2Encoder*)self;
+
+    GLbitfield allowed_bits = GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT | GL_STENCIL_BUFFER_BIT;
+    GLbitfield has_disallowed_bits = (mask & ~allowed_bits);
+    SET_ERROR_IF(has_disallowed_bits, GL_INVALID_VALUE);
+
+    ctx->m_glClear_enc(ctx, mask);
+}
+
+void GL2Encoder::s_glCopyTexSubImage2D(void *self , GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height) {
+    GL2Encoder *ctx = (GL2Encoder*)self;
+    SET_ERROR_IF(!GLESv2Validation::textureTarget(ctx, target), GL_INVALID_ENUM);
+    SET_ERROR_IF(level < 0, GL_INVALID_VALUE);
+    GLint max_texture_size;
+    GLint max_cube_map_texture_size;
+    ctx->glGetIntegerv(ctx, GL_MAX_TEXTURE_SIZE, &max_texture_size);
+    ctx->glGetIntegerv(ctx, GL_MAX_CUBE_MAP_TEXTURE_SIZE, &max_cube_map_texture_size);
+    SET_ERROR_IF(level > ilog2(max_texture_size), GL_INVALID_VALUE);
+    SET_ERROR_IF((target == GL_TEXTURE_CUBE_MAP) &&
+                 (level > ilog2(max_cube_map_texture_size)), GL_INVALID_VALUE);
+    SET_ERROR_IF(xoffset < 0 || yoffset < 0, GL_INVALID_VALUE);
+    SET_ERROR_IF(width < 0 || height < 0, GL_INVALID_VALUE);
+    SET_ERROR_IF(width > max_texture_size, GL_INVALID_VALUE);
+    SET_ERROR_IF(height > max_texture_size, GL_INVALID_VALUE);
+    SET_ERROR_IF(GLESv2Validation::isCubeMapTarget(target) && width > max_cube_map_texture_size, GL_INVALID_VALUE);
+    SET_ERROR_IF(GLESv2Validation::isCubeMapTarget(target) && height > max_cube_map_texture_size, GL_INVALID_VALUE);
+    GLuint tex = ctx->m_state->getBoundTexture(target);
+    GLsizei neededWidth = xoffset + width;
+    GLsizei neededHeight = yoffset + height;
+    ALOGV("%s: tex %u needed width height %d %d xoff %d width %d yoff %d height %d (texture width %d height %d) level %d\n", __func__,
+            tex,
+            neededWidth,
+            neededHeight,
+            xoffset,
+            width,
+            yoffset,
+            height,
+            ctx->m_state->queryTexWidth(level, tex),
+            ctx->m_state->queryTexWidth(level, tex),
+            level);
+
+    SET_ERROR_IF(tex &&
+                 (neededWidth > ctx->m_state->queryTexWidth(level, tex) ||
+                  neededHeight > ctx->m_state->queryTexHeight(level, tex)),
+                 GL_INVALID_VALUE);
+    SET_ERROR_IF(ctx->glCheckFramebufferStatus(ctx, GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE,
+                 GL_INVALID_FRAMEBUFFER_OPERATION);
+
+    ctx->m_glCopyTexSubImage2D_enc(ctx, target, level, xoffset, yoffset, x, y, width, height);
+}
+
+void GL2Encoder::s_glCopyTexSubImage3D(void *self , GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint x, GLint y, GLsizei width, GLsizei height) {
+    GL2Encoder *ctx = (GL2Encoder*)self;
+    SET_ERROR_IF(target != GL_TEXTURE_3D &&
+                 target != GL_TEXTURE_2D_ARRAY,
+                 GL_INVALID_ENUM);
+    GLint max_texture_size;
+    GLint max_3d_texture_size;
+    ctx->glGetIntegerv(ctx, GL_MAX_TEXTURE_SIZE, &max_texture_size);
+    ctx->glGetIntegerv(ctx, GL_MAX_3D_TEXTURE_SIZE, &max_3d_texture_size);
+    SET_ERROR_IF(level < 0, GL_INVALID_VALUE);
+    SET_ERROR_IF(level > ilog2(max_texture_size), GL_INVALID_VALUE);
+    SET_ERROR_IF(level > ilog2(max_3d_texture_size), GL_INVALID_VALUE);
+    SET_ERROR_IF(width < 0 || height < 0, GL_INVALID_VALUE);
+    SET_ERROR_IF(xoffset < 0 || yoffset < 0 || zoffset < 0, GL_INVALID_VALUE);
+    GLuint tex = ctx->m_state->getBoundTexture(target);
+    GLsizei neededWidth = xoffset + width;
+    GLsizei neededHeight = yoffset + height;
+    GLsizei neededDepth = zoffset + 1;
+    SET_ERROR_IF(tex &&
+                 (neededWidth > ctx->m_state->queryTexWidth(level, tex) ||
+                  neededHeight > ctx->m_state->queryTexHeight(level, tex) ||
+                  neededDepth > ctx->m_state->queryTexDepth(level, tex)),
+                 GL_INVALID_VALUE);
+    SET_ERROR_IF(ctx->glCheckFramebufferStatus(ctx, GL_READ_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE,
+                 GL_INVALID_FRAMEBUFFER_OPERATION);
+
+    ctx->m_glCopyTexSubImage3D_enc(ctx, target, level, xoffset, yoffset, zoffset, x, y, width, height);
+}
+
+void GL2Encoder::s_glCompileShader(void* self, GLuint shader) {
+    GL2Encoder *ctx = (GL2Encoder*)self;
+    bool isShaderOrProgramObject =
+        ctx->m_shared->isShaderOrProgramObject(shader);
+    bool isShader =
+        ctx->m_shared->isShader(shader);
+
+    SET_ERROR_IF(isShaderOrProgramObject && !isShader, GL_INVALID_OPERATION);
+    SET_ERROR_IF(!isShaderOrProgramObject && !isShader, GL_INVALID_VALUE);
+
+    ctx->m_glCompileShader_enc(ctx, shader);
+}
+
+void GL2Encoder::s_glValidateProgram(void* self, GLuint program ) {
+    GL2Encoder *ctx = (GL2Encoder*)self;
+
+    VALIDATE_PROGRAM_NAME(program);
+
+    ctx->m_glValidateProgram_enc(self, program);
+}
+
+void GL2Encoder::s_glProgramBinary(void *self , GLuint program, GLenum binaryFormat, const void* binary, GLsizei length) {
+    GL2Encoder *ctx = (GL2Encoder*)self;
+
+    VALIDATE_PROGRAM_NAME(program);
+
+    SET_ERROR_IF(~0 == binaryFormat, GL_INVALID_ENUM);
+
+    ctx->m_glProgramBinary_enc(self, program, binaryFormat, binary, length);
+}
+
+void GL2Encoder::s_glGetSamplerParameterfv(void *self, GLuint sampler, GLenum pname, GLfloat* params) {
+    GL2Encoder *ctx = (GL2Encoder*)self;
+
+    SET_ERROR_IF(!ctx->m_state->samplerExists(sampler), GL_INVALID_OPERATION);
+    SET_ERROR_IF(!GLESv2Validation::samplerParams(ctx, pname), GL_INVALID_ENUM);
+
+    if (!params) return;
+
+    ctx->m_glGetSamplerParameterfv_enc(ctx, sampler, pname, params);
+}
+
+void GL2Encoder::s_glGetSamplerParameteriv(void *self, GLuint sampler, GLenum pname, GLint* params) {
+    GL2Encoder *ctx = (GL2Encoder*)self;
+    SET_ERROR_IF(!ctx->m_state->samplerExists(sampler), GL_INVALID_OPERATION);
+    SET_ERROR_IF(!GLESv2Validation::samplerParams(ctx, pname), GL_INVALID_ENUM);
+
+    if (!params) return;
+
+    ctx->m_glGetSamplerParameteriv_enc(ctx, sampler, pname, params);
+}
+
+void GL2Encoder::s_glSamplerParameterf(void *self , GLuint sampler, GLenum pname, GLfloat param) {
+    GL2Encoder *ctx = (GL2Encoder*)self;
+    SET_ERROR_IF(!ctx->m_state->samplerExists(sampler), GL_INVALID_OPERATION);
+    SET_ERROR_IF(!GLESv2Validation::samplerParams(ctx, pname), GL_INVALID_ENUM);
+    SET_ERROR_IF(!GLESv2Validation::textureParamValue(ctx, pname, (GLint)param, param, (GLenum)param), GL_INVALID_ENUM);
+
+    ctx->m_glSamplerParameterf_enc(ctx, sampler, pname, param);
+}
+
+void GL2Encoder::s_glSamplerParameteri(void *self , GLuint sampler, GLenum pname, GLint param) {
+    GL2Encoder *ctx = (GL2Encoder*)self;
+    SET_ERROR_IF(!ctx->m_state->samplerExists(sampler), GL_INVALID_OPERATION);
+    SET_ERROR_IF(!GLESv2Validation::samplerParams(ctx, pname), GL_INVALID_ENUM);
+    SET_ERROR_IF(!GLESv2Validation::textureParamValue(ctx, pname, param, (GLfloat)param, (GLenum)param), GL_INVALID_ENUM);
+
+    ctx->m_glSamplerParameteri_enc(ctx, sampler, pname, param);
+}
+
+void GL2Encoder::s_glSamplerParameterfv(void *self , GLuint sampler, GLenum pname, const GLfloat* params) {
+    GL2Encoder *ctx = (GL2Encoder*)self;
+    SET_ERROR_IF(!ctx->m_state->samplerExists(sampler), GL_INVALID_OPERATION);
+    SET_ERROR_IF(!GLESv2Validation::samplerParams(ctx, pname), GL_INVALID_ENUM);
+    SET_ERROR_IF(!params, GL_INVALID_VALUE);
+    GLfloat param = *params;
+    SET_ERROR_IF(!GLESv2Validation::textureParamValue(ctx, pname, (GLint)param, param, (GLenum)param), GL_INVALID_ENUM);
+
+    ctx->m_glSamplerParameterfv_enc(ctx, sampler, pname, params);
+}
+
+void GL2Encoder::s_glSamplerParameteriv(void *self , GLuint sampler, GLenum pname, const GLint* params) {
+    GL2Encoder *ctx = (GL2Encoder*)self;
+    SET_ERROR_IF(!ctx->m_state->samplerExists(sampler), GL_INVALID_OPERATION);
+    SET_ERROR_IF(!GLESv2Validation::samplerParams(ctx, pname), GL_INVALID_ENUM);
+    SET_ERROR_IF(!params, GL_INVALID_VALUE);
+    GLint param = *params;
+    SET_ERROR_IF(!GLESv2Validation::textureParamValue(ctx, pname, (GLint)param, param, (GLenum)param), GL_INVALID_ENUM);
+
+    ctx->m_glSamplerParameteriv_enc(ctx, sampler, pname, params);
+}
+
+int GL2Encoder::s_glGetAttribLocation(void *self , GLuint program, const GLchar* name) {
+    GL2Encoder *ctx = (GL2Encoder*)self;
+
+    bool isShaderOrProgramObject =
+        ctx->m_shared->isShaderOrProgramObject(program);
+    bool isProgram =
+        ctx->m_shared->isProgram(program);
+
+    RET_AND_SET_ERROR_IF(!isShaderOrProgramObject, GL_INVALID_VALUE, -1);
+    RET_AND_SET_ERROR_IF(!isProgram, GL_INVALID_OPERATION, -1);
+    RET_AND_SET_ERROR_IF(!ctx->m_shared->getProgramLinkStatus(program), GL_INVALID_OPERATION, -1);
+
+    return ctx->m_glGetAttribLocation_enc(ctx, program, name);
+}
+
+void GL2Encoder::s_glBindAttribLocation(void *self , GLuint program, GLuint index, const GLchar* name) {
+    GL2Encoder* ctx = (GL2Encoder*)self;
+
+    VALIDATE_PROGRAM_NAME(program);
+
+    GLint maxVertexAttribs;
+    ctx->glGetIntegerv(self, GL_MAX_VERTEX_ATTRIBS, &maxVertexAttribs);
+    SET_ERROR_IF(!(index < maxVertexAttribs), GL_INVALID_VALUE);
+    SET_ERROR_IF(index > maxVertexAttribs, GL_INVALID_VALUE);
+    SET_ERROR_IF(name && !strncmp("gl_", name, 3), GL_INVALID_OPERATION);
+
+    fprintf(stderr, "%s: bind attrib %u name %s\n", __func__, index, name);
+    ctx->m_glBindAttribLocation_enc(ctx, program, index, name);
+}
+
+// TODO-SLOW
+void GL2Encoder::s_glUniformBlockBinding(void *self , GLuint program, GLuint uniformBlockIndex, GLuint uniformBlockBinding) {
+    GL2Encoder* ctx = (GL2Encoder*)self;
+
+    VALIDATE_PROGRAM_NAME(program);
+    SET_ERROR_IF(uniformBlockIndex >= ctx->m_shared->getActiveUniformBlockCount(program), GL_INVALID_VALUE);
+
+    GLint maxUniformBufferBindings;
+    ctx->glGetIntegerv(ctx, GL_MAX_UNIFORM_BUFFER_BINDINGS, &maxUniformBufferBindings);
+    SET_ERROR_IF(uniformBlockBinding >= maxUniformBufferBindings, GL_INVALID_VALUE);
+
+    ctx->m_glUniformBlockBinding_enc(ctx, program, uniformBlockIndex, uniformBlockBinding);
+}
+
+void GL2Encoder::s_glGetTransformFeedbackVarying(void *self , GLuint program, GLuint index, GLsizei bufSize, GLsizei* length, GLsizei* size, GLenum* type, char* name) {
+    GL2Encoder* ctx = (GL2Encoder*)self;
+
+    VALIDATE_PROGRAM_NAME(program);
+    SET_ERROR_IF(!ctx->m_shared->getProgramLinkStatus(program), GL_INVALID_OPERATION);
+    SET_ERROR_IF(index >= ctx->m_shared->getTransformFeedbackVaryingsCountForProgram(program), GL_INVALID_VALUE);
+
+    ctx->m_glGetTransformFeedbackVarying_enc(ctx, program, index, bufSize, length, size, type, name);
+}
+
+void GL2Encoder::s_glScissor(void *self , GLint x, GLint y, GLsizei width, GLsizei height) {
+    GL2Encoder* ctx = (GL2Encoder*)self;
+    SET_ERROR_IF(width < 0 || height < 0, GL_INVALID_VALUE);
+    ctx->m_glScissor_enc(ctx, x, y, width, height);
+}
+
+void GL2Encoder::s_glDepthFunc(void *self , GLenum func) {
+    GL2Encoder* ctx = (GL2Encoder*)self;
+    SET_ERROR_IF(
+        (func != GL_NEVER) &&
+        (func != GL_ALWAYS) &&
+        (func != GL_LESS) &&
+        (func != GL_LEQUAL) &&
+        (func != GL_EQUAL) &&
+        (func != GL_GREATER) &&
+        (func != GL_GEQUAL) &&
+        (func != GL_NOTEQUAL),
+        GL_INVALID_ENUM);
+    ctx->m_glDepthFunc_enc(ctx, func);
+}
+
+void GL2Encoder::s_glViewport(void *self , GLint x, GLint y, GLsizei width, GLsizei height) {
+    GL2Encoder* ctx = (GL2Encoder*)self;
+    SET_ERROR_IF(width < 0 || height < 0, GL_INVALID_VALUE);
+    ctx->m_glViewport_enc(ctx, x, y, width, height);
+}
+
+void GL2Encoder::s_glStencilFunc(void *self , GLenum func, GLint ref, GLuint mask) {
+    GL2Encoder* ctx = (GL2Encoder*)self;
+    SET_ERROR_IF(!GLESv2Validation::allowedFunc(func), GL_INVALID_ENUM);
+    if (!ctx->m_state) return;
+    ctx->m_state->stencilFuncSeparate(GL_FRONT_AND_BACK, func, ref, mask);
+    ctx->m_glStencilFunc_enc(ctx, func, ref, mask);
+}
+
+void GL2Encoder::s_glStencilFuncSeparate(void *self , GLenum face, GLenum func, GLint ref, GLuint mask) {
+    GL2Encoder* ctx = (GL2Encoder*)self;
+    SET_ERROR_IF(!GLESv2Validation::allowedFace(face) || !GLESv2Validation::allowedFunc(func), GL_INVALID_ENUM);
+    if (!ctx->m_state) return;
+    ctx->m_state->stencilFuncSeparate(face, func, ref, mask);
+    ctx->m_glStencilFuncSeparate_enc(ctx, face, func, ref, mask);
+}
+
+void GL2Encoder::s_glStencilOp(void *self , GLenum fail, GLenum zfail, GLenum zpass) {
+    GL2Encoder* ctx = (GL2Encoder*)self;
+    SET_ERROR_IF(
+        !GLESv2Validation::allowedStencilOp(fail) ||
+        !GLESv2Validation::allowedStencilOp(zfail) ||
+        !GLESv2Validation::allowedStencilOp(zpass),
+        GL_INVALID_ENUM);
+    if (!ctx->m_state) return;
+    ctx->m_state->stencilOpSeparate(GL_FRONT_AND_BACK, fail, zfail, zpass);
+    ctx->m_glStencilOp_enc(ctx, fail, zfail, zpass);
+}
+
+void GL2Encoder::s_glStencilOpSeparate(void *self , GLenum face, GLenum fail, GLenum zfail, GLenum zpass) {
+    GL2Encoder* ctx = (GL2Encoder*)self;
+    SET_ERROR_IF(
+        !GLESv2Validation::allowedFace(face) ||
+        !GLESv2Validation::allowedStencilOp(fail) ||
+        !GLESv2Validation::allowedStencilOp(zfail) ||
+        !GLESv2Validation::allowedStencilOp(zpass),
+        GL_INVALID_ENUM);
+    if (!ctx->m_state) return;
+    ctx->m_state->stencilOpSeparate(face, fail, zfail, zpass);
+    ctx->m_glStencilOpSeparate_enc(ctx, face, fail, zfail, zpass);
+}
+
+void GL2Encoder::s_glStencilMaskSeparate(void *self , GLenum face, GLuint mask) {
+    GL2Encoder* ctx = (GL2Encoder*)self;
+    SET_ERROR_IF(
+        !GLESv2Validation::allowedFace(face),
+        GL_INVALID_ENUM);
+    if (!ctx->m_state) return;
+    ctx->m_state->stencilMaskSeparate(face, mask);
+    ctx->m_glStencilMaskSeparate_enc(ctx, face, mask);
+}
+
+void GL2Encoder::s_glBlendEquation(void *self , GLenum mode) {
+    GL2Encoder* ctx = (GL2Encoder*)self;
+    SET_ERROR_IF(
+        !GLESv2Validation::allowedBlendEquation(mode),
+        GL_INVALID_ENUM);
+    ctx->m_glBlendEquation_enc(ctx, mode);
+}
+
+void GL2Encoder::s_glBlendEquationSeparate(void *self , GLenum modeRGB, GLenum modeAlpha) {
+    GL2Encoder* ctx = (GL2Encoder*)self;
+    SET_ERROR_IF(
+        !GLESv2Validation::allowedBlendEquation(modeRGB) ||
+        !GLESv2Validation::allowedBlendEquation(modeAlpha),
+        GL_INVALID_ENUM);
+    ctx->m_glBlendEquationSeparate_enc(ctx, modeRGB, modeAlpha);
+}
+
+void GL2Encoder::s_glBlendFunc(void *self , GLenum sfactor, GLenum dfactor) {
+    GL2Encoder* ctx = (GL2Encoder*)self;
+    SET_ERROR_IF(
+        !GLESv2Validation::allowedBlendFunc(sfactor) ||
+        !GLESv2Validation::allowedBlendFunc(dfactor),
+        GL_INVALID_ENUM);
+    ctx->m_glBlendFunc_enc(ctx, sfactor, dfactor);
+}
+
+void GL2Encoder::s_glBlendFuncSeparate(void *self , GLenum srcRGB, GLenum dstRGB, GLenum srcAlpha, GLenum dstAlpha) {
+    GL2Encoder* ctx = (GL2Encoder*)self;
+    SET_ERROR_IF(
+        !GLESv2Validation::allowedBlendFunc(srcRGB) ||
+        !GLESv2Validation::allowedBlendFunc(dstRGB) ||
+        !GLESv2Validation::allowedBlendFunc(srcAlpha) ||
+        !GLESv2Validation::allowedBlendFunc(dstAlpha),
+        GL_INVALID_ENUM);
+    ctx->m_glBlendFuncSeparate_enc(ctx, srcRGB, dstRGB, srcAlpha, dstAlpha);
+}
+
+void GL2Encoder::s_glCullFace(void *self , GLenum mode) {
+    GL2Encoder* ctx = (GL2Encoder*)self;
+    SET_ERROR_IF(
+        !GLESv2Validation::allowedCullFace(mode),
+        GL_INVALID_ENUM);
+    ctx->m_glCullFace_enc(ctx, mode);
+}
+
+void GL2Encoder::s_glFrontFace(void *self , GLenum mode) {
+    GL2Encoder* ctx = (GL2Encoder*)self;
+    SET_ERROR_IF(
+        !GLESv2Validation::allowedFrontFace(mode),
+        GL_INVALID_ENUM);
+    ctx->m_glFrontFace_enc(ctx, mode);
+}
+
+void GL2Encoder::s_glLineWidth(void *self , GLfloat width) {
+    GL2Encoder* ctx = (GL2Encoder*)self;
+    SET_ERROR_IF(width <= 0.0f, GL_INVALID_VALUE);
+    ctx->m_glLineWidth_enc(ctx, width);
+}
+
+void GL2Encoder::s_glVertexAttrib1f(void *self , GLuint indx, GLfloat x) {
+    GL2Encoder* ctx = (GL2Encoder*)self;
+    VALIDATE_VERTEX_ATTRIB_INDEX(indx);
+    ctx->m_glVertexAttrib1f_enc(ctx, indx, x);
+}
+
+void GL2Encoder::s_glVertexAttrib2f(void *self , GLuint indx, GLfloat x, GLfloat y) {
+    GL2Encoder* ctx = (GL2Encoder*)self;
+    VALIDATE_VERTEX_ATTRIB_INDEX(indx);
+    ctx->m_glVertexAttrib2f_enc(ctx, indx, x, y);
+}
+
+void GL2Encoder::s_glVertexAttrib3f(void *self , GLuint indx, GLfloat x, GLfloat y, GLfloat z) {
+    GL2Encoder* ctx = (GL2Encoder*)self;
+    VALIDATE_VERTEX_ATTRIB_INDEX(indx);
+    ctx->m_glVertexAttrib3f_enc(ctx, indx, x, y, z);
+}
+
+void GL2Encoder::s_glVertexAttrib4f(void *self , GLuint indx, GLfloat x, GLfloat y, GLfloat z, GLfloat w) {
+    GL2Encoder* ctx = (GL2Encoder*)self;
+    VALIDATE_VERTEX_ATTRIB_INDEX(indx);
+    ctx->m_glVertexAttrib4f_enc(ctx, indx, x, y, z, w);
+}
+
+void GL2Encoder::s_glVertexAttrib1fv(void *self , GLuint indx, const GLfloat* values) {
+    GL2Encoder* ctx = (GL2Encoder*)self;
+    VALIDATE_VERTEX_ATTRIB_INDEX(indx);
+    ctx->m_glVertexAttrib1fv_enc(ctx, indx, values);
+}
+
+void GL2Encoder::s_glVertexAttrib2fv(void *self , GLuint indx, const GLfloat* values) {
+    GL2Encoder* ctx = (GL2Encoder*)self;
+    VALIDATE_VERTEX_ATTRIB_INDEX(indx);
+    ctx->m_glVertexAttrib2fv_enc(ctx, indx, values);
+}
+
+void GL2Encoder::s_glVertexAttrib3fv(void *self , GLuint indx, const GLfloat* values) {
+    GL2Encoder* ctx = (GL2Encoder*)self;
+    VALIDATE_VERTEX_ATTRIB_INDEX(indx);
+    ctx->m_glVertexAttrib3fv_enc(ctx, indx, values);
+}
+
+void GL2Encoder::s_glVertexAttrib4fv(void *self , GLuint indx, const GLfloat* values) {
+    GL2Encoder* ctx = (GL2Encoder*)self;
+    VALIDATE_VERTEX_ATTRIB_INDEX(indx);
+    ctx->m_glVertexAttrib4fv_enc(ctx, indx, values);
+}
+
+void GL2Encoder::s_glVertexAttribI4i(void *self , GLuint index, GLint v0, GLint v1, GLint v2, GLint v3) {
+    GL2Encoder* ctx = (GL2Encoder*)self;
+    VALIDATE_VERTEX_ATTRIB_INDEX(index);
+    ctx->m_glVertexAttribI4i_enc(ctx, index, v0, v1, v2, v3);
+}
+
+void GL2Encoder::s_glVertexAttribI4ui(void *self , GLuint index, GLuint v0, GLuint v1, GLuint v2, GLuint v3) {
+    GL2Encoder* ctx = (GL2Encoder*)self;
+    VALIDATE_VERTEX_ATTRIB_INDEX(index);
+    ctx->m_glVertexAttribI4ui_enc(ctx, index, v0, v1, v2, v3);
+}
+
+void GL2Encoder::s_glVertexAttribI4iv(void *self , GLuint index, const GLint* v) {
+    GL2Encoder* ctx = (GL2Encoder*)self;
+    VALIDATE_VERTEX_ATTRIB_INDEX(index);
+    ctx->m_glVertexAttribI4iv_enc(ctx, index, v);
+}
+
+void GL2Encoder::s_glVertexAttribI4uiv(void *self , GLuint index, const GLuint* v) {
+    GL2Encoder* ctx = (GL2Encoder*)self;
+    VALIDATE_VERTEX_ATTRIB_INDEX(index);
+    ctx->m_glVertexAttribI4uiv_enc(ctx, index, v);
+}
+
+void GL2Encoder::s_glGetShaderPrecisionFormat(void *self , GLenum shadertype, GLenum precisiontype, GLint* range, GLint* precision) {
+    GL2Encoder* ctx = (GL2Encoder*)self;
+    SET_ERROR_IF(!GLESv2Validation::allowedShaderType(shadertype), GL_INVALID_ENUM);
+    SET_ERROR_IF(!GLESv2Validation::allowedPrecisionType(precisiontype), GL_INVALID_ENUM);
+    ctx->m_glGetShaderPrecisionFormat_enc(ctx, shadertype, precisiontype, range, precision);
+}
+
+void GL2Encoder::s_glGetProgramiv(void *self , GLuint program, GLenum pname, GLint* params) {
+    GL2Encoder* ctx = (GL2Encoder*)self;
+    SET_ERROR_IF(!GLESv2Validation::allowedGetProgram(ctx->majorVersion(), ctx->minorVersion(), pname), GL_INVALID_ENUM);
+    VALIDATE_PROGRAM_NAME(program);
+    ctx->m_glGetProgramiv_enc(ctx, program, pname, params);
+}
+
+void GL2Encoder::s_glGetActiveUniform(void *self , GLuint program, GLuint index, GLsizei bufsize, GLsizei* length, GLint* size, GLenum* type, GLchar* name) {
+    GL2Encoder* ctx = (GL2Encoder*)self;
+    VALIDATE_PROGRAM_NAME(program);
+    SET_ERROR_IF(index >= ctx->m_shared->getActiveUniformsCountForProgram(program), GL_INVALID_VALUE);
+    ctx->m_glGetActiveUniform_enc(ctx, program, index, bufsize, length, size, type, name);
+}
+
+void GL2Encoder::s_glGetActiveUniformsiv(void *self , GLuint program, GLsizei uniformCount, const GLuint* uniformIndices, GLenum pname, GLint* params) {
+    GL2Encoder* ctx = (GL2Encoder*)self;
+    VALIDATE_PROGRAM_NAME(program);
+    SET_ERROR_IF(uniformCount < 0, GL_INVALID_VALUE);
+    SET_ERROR_IF(!GLESv2Validation::allowedGetActiveUniforms(pname), GL_INVALID_ENUM);
+    int activeUniformsCount = ctx->m_shared->getActiveUniformsCountForProgram(program);
+    for (GLsizei i = 0; i < uniformCount; ++i) {
+        SET_ERROR_IF(uniformIndices[i] >= activeUniformsCount, GL_INVALID_VALUE);
+    }
+    ctx->m_glGetActiveUniformsiv_enc(ctx, program, uniformCount, uniformIndices, pname, params);
+}
+
+void GL2Encoder::s_glGetActiveUniformBlockName(void *self , GLuint program, GLuint uniformBlockIndex, GLsizei bufSize, GLsizei* length, GLchar* uniformBlockName) {
+    GL2Encoder* ctx = (GL2Encoder*)self;
+    VALIDATE_PROGRAM_NAME(program);
+    SET_ERROR_IF(bufSize < 0, GL_INVALID_VALUE);
+    SET_ERROR_IF(uniformBlockIndex >= ctx->m_shared->getActiveUniformBlockCount(program), GL_INVALID_VALUE);
+    ctx->m_glGetActiveUniformBlockName_enc(ctx, program, uniformBlockIndex, bufSize, length, uniformBlockName);
+}
+
+void GL2Encoder::s_glGetActiveAttrib(void *self , GLuint program, GLuint index, GLsizei bufsize, GLsizei* length, GLint* size, GLenum* type, GLchar* name) {
+    GL2Encoder* ctx = (GL2Encoder*)self;
+    VALIDATE_PROGRAM_NAME(program);
+    VALIDATE_VERTEX_ATTRIB_INDEX(index);
+    SET_ERROR_IF(bufsize < 0, GL_INVALID_VALUE);
+    SET_ERROR_IF(index >= ctx->m_shared->getActiveAttributesCountForProgram(program), GL_INVALID_VALUE);
+    ctx->m_glGetActiveAttrib_enc(ctx, program, index, bufsize, length, size, type, name);
+}
+
+void GL2Encoder::s_glGetRenderbufferParameteriv(void *self , GLenum target, GLenum pname, GLint* params) {
+    GL2Encoder* ctx = (GL2Encoder*)self;
+    SET_ERROR_IF(target != GL_RENDERBUFFER, GL_INVALID_ENUM);
+    SET_ERROR_IF(!GLESv2Validation::allowedGetRenderbufferParameter(pname), GL_INVALID_ENUM);
+    SET_ERROR_IF(0 == ctx->m_state->boundRenderbuffer(), GL_INVALID_OPERATION);
+    ctx->m_glGetRenderbufferParameteriv_enc(ctx, target, pname, params);
+}
+
+void GL2Encoder::s_glGetQueryiv(void *self , GLenum target, GLenum pname, GLint* params) {
+    GL2Encoder* ctx = (GL2Encoder*)self;
+    SET_ERROR_IF(!GLESv2Validation::allowedQueryTarget(target), GL_INVALID_ENUM);
+    SET_ERROR_IF(!GLESv2Validation::allowedQueryParam(pname), GL_INVALID_ENUM);
+    ctx->m_glGetQueryiv_enc(ctx, target, pname, params);
+}
+
+void GL2Encoder::s_glGetQueryObjectuiv(void *self , GLuint query, GLenum pname, GLuint* params) {
+    GL2Encoder* ctx = (GL2Encoder*)self;
+    GLClientState* state = ctx->m_state;
+    SET_ERROR_IF(!GLESv2Validation::allowedQueryObjectParam(pname), GL_INVALID_ENUM);
+    SET_ERROR_IF(!state->queryExistence(GLClientState::ObjectType::Query, query), GL_INVALID_OPERATION);
+    SET_ERROR_IF(!state->getLastQueryTarget(query), GL_INVALID_OPERATION);
+    SET_ERROR_IF(ctx->m_state->isQueryObjectActive(query), GL_INVALID_OPERATION);
+
+    ctx->m_glGetQueryObjectuiv_enc(ctx, query, pname, params);
+}
+
+GLboolean GL2Encoder::s_glIsEnabled(void *self , GLenum cap) {
+    GL2Encoder* ctx = (GL2Encoder*)self;
+	RET_AND_SET_ERROR_IF(!GLESv2Validation::allowedEnable(ctx->majorVersion(), ctx->minorVersion(), cap), GL_INVALID_ENUM, 0);
+    return ctx->m_glIsEnabled_enc(ctx, cap);
+}
+
+void GL2Encoder::s_glHint(void *self , GLenum target, GLenum mode) {
+    GL2Encoder* ctx = (GL2Encoder*)self;
+    SET_ERROR_IF(!GLESv2Validation::allowedHintTarget(target), GL_INVALID_ENUM);
+    SET_ERROR_IF(!GLESv2Validation::allowedHintMode(mode), GL_INVALID_ENUM);
+    ctx->m_glHint_enc(ctx, target, mode);
+}
+
+GLint GL2Encoder::s_glGetFragDataLocation (void *self , GLuint program, const char* name) {
+    GL2Encoder* ctx = (GL2Encoder*)self;
+    VALIDATE_PROGRAM_NAME_RET(program, -1);
+    RET_AND_SET_ERROR_IF(!ctx->m_shared->getProgramLinkStatus(program), GL_INVALID_OPERATION, -1);
+    return ctx->m_glGetFragDataLocation_enc(ctx, program, name);
+}
+
+void GL2Encoder::s_glStencilMask(void* self, GLuint mask) {
+    GL2Encoder* ctx = (GL2Encoder*)self;
+    if (!ctx->m_state) return;
+    ctx->m_state->stencilMaskSeparate(GL_FRONT_AND_BACK, mask);
+    ctx->m_glStencilMask_enc(ctx, mask);
+}
+
+void GL2Encoder::s_glClearStencil(void* self, int v) {
+    GL2Encoder* ctx = (GL2Encoder*)self;
+    if (!ctx->m_state) return;
+    ctx->m_state->state_GL_STENCIL_CLEAR_VALUE = v;
+    ctx->m_glClearStencil_enc(ctx, v);
+}
diff --git a/system/GLESv2_enc/GL2Encoder.h b/system/GLESv2_enc/GL2Encoder.h
index 0ceb9de..90978be 100644
--- a/system/GLESv2_enc/GL2Encoder.h
+++ b/system/GLESv2_enc/GL2Encoder.h
@@ -35,6 +35,9 @@
     void setHasAsyncUnmapBuffer(int version) {
         m_hasAsyncUnmapBuffer = version;
     }
+    void setHasSyncBufferData(bool value) {
+        m_hasSyncBufferData = value;
+    }
     void setNoHostError(bool noHostError) {
         m_noHostError = noHostError;
     }
@@ -62,8 +65,11 @@
     }
     void setSharedGroup(GLSharedGroupPtr shared) {
         m_shared = shared;
-        if (m_state && m_shared.Ptr())
+        if (m_state && m_shared) {
             m_state->setTextureData(m_shared->getTextureData());
+            m_state->setRenderbufferInfo(m_shared->getRenderbufferInfo());
+            m_state->setSamplerInfo(m_shared->getSamplerInfo());
+        }
     }
     int majorVersion() const { return m_currMajorVersion; }
     int minorVersion() const { return m_currMinorVersion; }
@@ -71,6 +77,7 @@
                        const std::vector<std::string>& extArray) {
         m_currExtensions = std::string(exts);
         m_currExtensionsArray = extArray;
+        m_state->setExtensions(m_currExtensions);
     }
     bool hasExtension(const char* ext) const {
         return m_currExtensions.find(ext) != std::string::npos;
@@ -85,9 +92,12 @@
     virtual void setError(GLenum error){ m_error = error; };
     virtual GLenum getError() { return m_error; };
 
+    __attribute__((always_inline)) GLenum* getErrorPtr() { return &m_error; }
+    __attribute__((always_inline)) bool hasError() const { return m_error != GL_NO_ERROR; }
+
     void override2DTextureTarget(GLenum target);
     void restore2DTextureTarget(GLenum target);
-    void associateEGLImage(GLenum target, GLeglImageOES eglImage);
+    void associateEGLImage(GLenum target, GLeglImageOES eglImage, int width, int height);
 
     // Convenience functions for buffers
     GLuint boundBuffer(GLenum target) const;
@@ -106,6 +116,7 @@
     std::vector<std::string> m_currExtensionsArray;
 
     bool    m_hasAsyncUnmapBuffer;
+    bool    m_hasSyncBufferData;
     bool    m_initialized;
     bool    m_noHostError;
     GLClientState *m_state;
@@ -118,6 +129,7 @@
 
     GLint m_max_combinedTextureImageUnits;
     GLint m_max_vertexTextureImageUnits;
+    GLint m_max_array_texture_layers;;
     GLint m_max_textureImageUnits;
     GLint m_max_cubeMapTextureSize;
     GLint m_max_renderBufferSize;
@@ -137,6 +149,8 @@
     GLuint m_ssbo_offset_align;
     GLuint m_ubo_offset_align;
 
+    GLint m_log2MaxTextureSize;
+
     std::vector<char> m_fixedBuffer;
 
     uint32_t m_drawCallFlushInterval;
@@ -161,8 +175,6 @@
     bool updateHostTexture2DBinding(GLenum texUnit, GLenum newTarget);
     void updateHostTexture2DBindingsFromProgramData(GLuint program);
     bool texture2DNeedsOverride(GLenum target) const;
-    bool isCompleteFbo(GLenum target, const GLClientState* state, GLenum attachment) const;
-    bool checkFramebufferCompleteness(GLenum target, const GLClientState* state) const;
 
     // Utility classes for safe queries that
     // need access to private class members
@@ -399,6 +411,9 @@
     glBindFramebuffer_client_proc_t m_glBindFramebuffer_enc;
     static void s_glBindFramebuffer(void* self, GLenum target, GLuint framebuffer);
 
+    glFramebufferParameteri_client_proc_t m_glFramebufferParameteri_enc;
+    static void s_glFramebufferParameteri(void *self, GLenum target, GLenum pname, GLint param);
+
     glFramebufferTexture2D_client_proc_t m_glFramebufferTexture2D_enc;
     static void s_glFramebufferTexture2D(void* self, GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level);
 
@@ -614,7 +629,6 @@
 
     glBindSampler_client_proc_t m_glBindSampler_enc;
     static void s_glBindSampler(void* self, GLuint unit, GLuint sampler);
-    void doSamplerBindEncodeCached(GLuint unit, GLuint sampler);
 
     glDeleteSamplers_client_proc_t m_glDeleteSamplers_enc;
     static void s_glDeleteSamplers(void* self, GLsizei n, const GLuint* samplers);
@@ -769,6 +783,172 @@
     glInvalidateFramebuffer_client_proc_t m_glInvalidateFramebuffer_enc;
     glInvalidateSubFramebuffer_client_proc_t m_glInvalidateSubFramebuffer_enc;;
 
+    // Dispatch compute
+    static void s_glDispatchCompute(void* self, GLuint num_groups_x, GLuint num_groups_y, GLuint num_groups_z);
+    static void s_glDispatchComputeIndirect(void* self, GLintptr indirect);
+
+    glDispatchCompute_client_proc_t m_glDispatchCompute_enc;
+    glDispatchComputeIndirect_client_proc_t m_glDispatchComputeIndirect_enc;
+
+    // State tracking for transform feedbacks, samplers, and query objects
+    static void s_glGenTransformFeedbacks(void* self, GLsizei n, GLuint* ids);
+    static void s_glDeleteTransformFeedbacks(void* self, GLsizei n, const GLuint* ids);
+    static void s_glGenSamplers(void* self, GLsizei n, GLuint* ids);
+    static void s_glGenQueries(void* self, GLsizei n, GLuint* ids);
+    static void s_glDeleteQueries(void* self, GLsizei n, const GLuint* ids);
+
+    glGenTransformFeedbacks_client_proc_t m_glGenTransformFeedbacks_enc;
+    glDeleteTransformFeedbacks_client_proc_t m_glDeleteTransformFeedbacks_enc;
+    glGenSamplers_client_proc_t m_glGenSamplers_enc;
+    glGenQueries_client_proc_t m_glGenQueries_enc;
+    glDeleteQueries_client_proc_t m_glDeleteQueries_enc;
+
+    static void s_glBindTransformFeedback(void* self, GLenum target, GLuint id);
+    static void s_glBeginQuery(void* self, GLenum target, GLuint query);
+    static void s_glEndQuery(void* self, GLenum target);
+
+    glBindTransformFeedback_client_proc_t m_glBindTransformFeedback_enc;
+    glBeginQuery_client_proc_t m_glBeginQuery_enc;
+    glEndQuery_client_proc_t m_glEndQuery_enc;
+
+    static void s_glClear(void* self, GLbitfield mask);
+    glClear_client_proc_t m_glClear_enc;
+
+    static void s_glClearBufferfi(void* self, GLenum buffer, GLint drawBuffer, float depth, int stencil);
+    glClearBufferfi_client_proc_t m_glClearBufferfi_enc;
+
+    static void s_glCopyTexSubImage2D(void *self , GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height);
+    glCopyTexSubImage2D_client_proc_t m_glCopyTexSubImage2D_enc;
+
+    static void s_glCopyTexSubImage3D(void *self , GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint x, GLint y, GLsizei width, GLsizei height);
+    glCopyTexSubImage3D_client_proc_t m_glCopyTexSubImage3D_enc;
+
+    static void s_glCompileShader(void* self, GLuint shader);
+    glCompileShader_client_proc_t m_glCompileShader_enc;
+
+    static void s_glValidateProgram(void* self, GLuint program);
+    glValidateProgram_client_proc_t m_glValidateProgram_enc;
+
+    static void s_glProgramBinary(void *self , GLuint program, GLenum binaryFormat, const void* binary, GLsizei length);
+    glProgramBinary_client_proc_t m_glProgramBinary_enc;
+
+    static void s_glGetSamplerParameterfv(void *self, GLuint sampler, GLenum pname, GLfloat* params);
+    static void s_glGetSamplerParameteriv(void *self, GLuint sampler, GLenum pname, GLint* params);
+    glGetSamplerParameterfv_client_proc_t m_glGetSamplerParameterfv_enc;
+    glGetSamplerParameteriv_client_proc_t m_glGetSamplerParameteriv_enc;
+
+    static void s_glSamplerParameterf(void *self , GLuint sampler, GLenum pname, GLfloat param);
+    static void s_glSamplerParameteri(void *self , GLuint sampler, GLenum pname, GLint param);
+    static void s_glSamplerParameterfv(void *self , GLuint sampler, GLenum pname, const GLfloat* params);
+    static void s_glSamplerParameteriv(void *self , GLuint sampler, GLenum pname, const GLint* params);
+
+    glSamplerParameterf_client_proc_t m_glSamplerParameterf_enc;
+    glSamplerParameteri_client_proc_t m_glSamplerParameteri_enc;
+    glSamplerParameterfv_client_proc_t m_glSamplerParameterfv_enc;
+    glSamplerParameteriv_client_proc_t m_glSamplerParameteriv_enc;
+
+    static int s_glGetAttribLocation(void *self , GLuint program, const GLchar* name);
+    glGetAttribLocation_client_proc_t m_glGetAttribLocation_enc;
+
+    static void s_glBindAttribLocation(void *self , GLuint program, GLuint index, const GLchar* name);
+    static void s_glUniformBlockBinding(void *self , GLuint program, GLuint uniformBlockIndex, GLuint uniformBlockBinding);
+    static void s_glGetTransformFeedbackVarying(void *self , GLuint program, GLuint index, GLsizei bufSize, GLsizei* length, GLsizei* size, GLenum* type, char* name);
+    static void s_glScissor(void *self , GLint x, GLint y, GLsizei width, GLsizei height);
+    static void s_glDepthFunc(void *self , GLenum func);
+    static void s_glViewport(void *self , GLint x, GLint y, GLsizei width, GLsizei height);
+    static void s_glStencilFunc(void *self , GLenum func, GLint ref, GLuint mask);
+    static void s_glStencilFuncSeparate(void *self , GLenum face, GLenum func, GLint ref, GLuint mask);
+    static void s_glStencilOp(void *self , GLenum fail, GLenum zfail, GLenum zpass);
+    static void s_glStencilOpSeparate(void *self , GLenum face, GLenum fail, GLenum zfail, GLenum zpass);
+    static void s_glStencilMaskSeparate(void *self , GLenum face, GLuint mask);
+    static void s_glBlendEquation(void *self , GLenum mode);
+    static void s_glBlendEquationSeparate(void *self , GLenum modeRGB, GLenum modeAlpha);
+    static void s_glBlendFunc(void *self , GLenum sfactor, GLenum dfactor);
+    static void s_glBlendFuncSeparate(void *self , GLenum srcRGB, GLenum dstRGB, GLenum srcAlpha, GLenum dstAlpha);
+    static void s_glCullFace(void *self , GLenum mode);
+    static void s_glFrontFace(void *self , GLenum mode);
+    static void s_glLineWidth(void *self , GLfloat width);
+    static void s_glVertexAttrib1f(void *self , GLuint indx, GLfloat x);
+    static void s_glVertexAttrib2f(void *self , GLuint indx, GLfloat x, GLfloat y);
+    static void s_glVertexAttrib3f(void *self , GLuint indx, GLfloat x, GLfloat y, GLfloat z);
+    static void s_glVertexAttrib4f(void *self , GLuint indx, GLfloat x, GLfloat y, GLfloat z, GLfloat w);
+    static void s_glVertexAttrib1fv(void *self , GLuint indx, const GLfloat* values);
+    static void s_glVertexAttrib2fv(void *self , GLuint indx, const GLfloat* values);
+    static void s_glVertexAttrib3fv(void *self , GLuint indx, const GLfloat* values);
+    static void s_glVertexAttrib4fv(void *self , GLuint indx, const GLfloat* values);
+    static void s_glVertexAttribI4i(void *self , GLuint index, GLint v0, GLint v1, GLint v2, GLint v3);
+    static void s_glVertexAttribI4ui(void *self , GLuint index, GLuint v0, GLuint v1, GLuint v2, GLuint v3);
+    static void s_glVertexAttribI4iv(void *self , GLuint index, const GLint* v);
+    static void s_glVertexAttribI4uiv(void *self , GLuint index, const GLuint* v);
+
+    static void s_glGetShaderPrecisionFormat(void *self , GLenum shadertype, GLenum precisiontype, GLint* range, GLint* precision);
+    static void s_glGetProgramiv(void *self , GLuint program, GLenum pname, GLint* params);
+    static void s_glGetActiveUniform(void *self , GLuint program, GLuint index, GLsizei bufsize, GLsizei* length, GLint* size, GLenum* type, GLchar* name);
+    static void s_glGetActiveUniformsiv(void *self , GLuint program, GLsizei uniformCount, const GLuint* uniformIndices, GLenum pname, GLint* params);
+    static void s_glGetActiveUniformBlockName(void *self , GLuint program, GLuint uniformBlockIndex, GLsizei bufSize, GLsizei* length, GLchar* uniformBlockName);
+    static void s_glGetActiveAttrib(void *self , GLuint program, GLuint index, GLsizei bufsize, GLsizei* length, GLint* size, GLenum* type, GLchar* name);
+    static void s_glGetRenderbufferParameteriv(void *self , GLenum target, GLenum pname, GLint* params);
+    static void s_glGetQueryiv(void *self , GLenum target, GLenum pname, GLint* params);
+    static void s_glGetQueryObjectuiv(void *self , GLuint query, GLenum pname, GLuint* params);
+    static GLboolean s_glIsEnabled(void *self , GLenum cap);
+    static void s_glHint(void *self , GLenum target, GLenum mode);
+    static GLint s_glGetFragDataLocation (void *self , GLuint program, const char* name);
+
+    static void s_glStencilMask(void* self, GLuint mask);
+    static void s_glClearStencil(void* self, int v);
+
+#define LIST_REMAINING_FUNCTIONS_FOR_VALIDATION(f) \
+    f(glBindAttribLocation) \
+    f(glUniformBlockBinding) \
+    f(glGetTransformFeedbackVarying) \
+    f(glScissor) \
+    f(glDepthFunc) \
+    f(glViewport) \
+    f(glStencilFunc) \
+    f(glStencilFuncSeparate) \
+    f(glStencilOp) \
+    f(glStencilOpSeparate) \
+    f(glStencilMaskSeparate) \
+    f(glBlendEquation) \
+    f(glBlendEquationSeparate) \
+    f(glBlendFunc) \
+    f(glBlendFuncSeparate) \
+    f(glCullFace) \
+    f(glFrontFace) \
+    f(glLineWidth) \
+    f(glVertexAttrib1f) \
+    f(glVertexAttrib2f) \
+    f(glVertexAttrib3f) \
+    f(glVertexAttrib4f) \
+    f(glVertexAttrib1fv) \
+    f(glVertexAttrib2fv) \
+    f(glVertexAttrib3fv) \
+    f(glVertexAttrib4fv) \
+    f(glVertexAttribI4i) \
+    f(glVertexAttribI4ui) \
+    f(glVertexAttribI4iv) \
+    f(glVertexAttribI4uiv) \
+    f(glGetShaderPrecisionFormat) \
+    f(glGetProgramiv) \
+    f(glGetActiveUniform) \
+    f(glGetActiveUniformsiv) \
+    f(glGetActiveUniformBlockName) \
+    f(glGetActiveAttrib) \
+    f(glGetRenderbufferParameteriv) \
+    f(glGetQueryiv) \
+    f(glGetQueryObjectuiv) \
+    f(glIsEnabled) \
+    f(glHint) \
+    f(glGetFragDataLocation) \
+    f(glStencilMask) \
+    f(glClearStencil) \
+
+#define DECLARE_CLIENT_ENCODER_PROC(n) \
+    n##_client_proc_t m_##n##_enc;
+
+    LIST_REMAINING_FUNCTIONS_FOR_VALIDATION(DECLARE_CLIENT_ENCODER_PROC)
+
+
 public:
     glEGLImageTargetTexture2DOES_client_proc_t m_glEGLImageTargetTexture2DOES_enc;
 
diff --git a/system/GLESv2_enc/GLESv2Validation.cpp b/system/GLESv2_enc/GLESv2Validation.cpp
index aff7902..1702532 100644
--- a/system/GLESv2_enc/GLESv2Validation.cpp
+++ b/system/GLESv2_enc/GLESv2Validation.cpp
@@ -18,6 +18,267 @@
 
 #include <sstream>
 
+#define LIST_VALID_TEX_INTERNALFORMATS(f) \
+    f(GL_BGRA8_EXT) \
+    f(GL_R8) \
+    f(GL_R8_SNORM) \
+    f(GL_R16F) \
+    f(GL_R32F) \
+    f(GL_R8UI) \
+    f(GL_R8I) \
+    f(GL_R16UI) \
+    f(GL_R16I) \
+    f(GL_R32UI) \
+    f(GL_R32I) \
+    f(GL_RG8) \
+    f(GL_RG8_SNORM) \
+    f(GL_RG16F) \
+    f(GL_RG32F) \
+    f(GL_RG8UI) \
+    f(GL_RG8I) \
+    f(GL_RG16UI) \
+    f(GL_RG16I) \
+    f(GL_RG32UI) \
+    f(GL_RG32I) \
+    f(GL_RGB8) \
+    f(GL_SRGB8) \
+    f(GL_RGB565) \
+    f(GL_RGB8_SNORM) \
+    f(GL_R11F_G11F_B10F) \
+    f(GL_RGB9_E5) \
+    f(GL_RGB16F) \
+    f(GL_RGB32F) \
+    f(GL_RGB8UI) \
+    f(GL_RGB8I) \
+    f(GL_RGB16UI) \
+    f(GL_RGB16I) \
+    f(GL_RGB32UI) \
+    f(GL_RGB32I) \
+    f(GL_RGBA8) \
+    f(GL_SRGB8_ALPHA8) \
+    f(GL_RGBA8_SNORM) \
+    f(GL_RGB5_A1) \
+    f(GL_RGBA4) \
+    f(GL_RGB10_A2) \
+    f(GL_RGBA16F) \
+    f(GL_RGBA32F) \
+    f(GL_RGBA8UI) \
+    f(GL_RGBA8I) \
+    f(GL_RGB10_A2UI) \
+    f(GL_RGBA16UI) \
+    f(GL_RGBA16I) \
+    f(GL_RGBA32I) \
+    f(GL_RGBA32UI) \
+    f(GL_DEPTH_COMPONENT16) \
+    f(GL_DEPTH_COMPONENT24) \
+    f(GL_DEPTH_COMPONENT32F) \
+    f(GL_DEPTH24_STENCIL8) \
+    f(GL_DEPTH32F_STENCIL8) \
+    f(GL_ETC1_RGB8_OES) \
+    f(GL_COMPRESSED_R11_EAC) \
+    f(GL_COMPRESSED_SIGNED_R11_EAC) \
+    f(GL_COMPRESSED_RG11_EAC) \
+    f(GL_COMPRESSED_SIGNED_RG11_EAC) \
+    f(GL_COMPRESSED_RGB8_ETC2) \
+    f(GL_COMPRESSED_SRGB8_ETC2) \
+    f(GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2) \
+    f(GL_COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2) \
+    f(GL_COMPRESSED_RGBA8_ETC2_EAC) \
+    f(GL_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC) \
+    f(GL_COMPRESSED_RGBA_ASTC_4x4_KHR) \
+    f(GL_COMPRESSED_RGBA_ASTC_5x4_KHR) \
+    f(GL_COMPRESSED_RGBA_ASTC_5x5_KHR) \
+    f(GL_COMPRESSED_RGBA_ASTC_6x5_KHR) \
+    f(GL_COMPRESSED_RGBA_ASTC_6x6_KHR) \
+    f(GL_COMPRESSED_RGBA_ASTC_8x5_KHR) \
+    f(GL_COMPRESSED_RGBA_ASTC_8x6_KHR) \
+    f(GL_COMPRESSED_RGBA_ASTC_8x8_KHR) \
+    f(GL_COMPRESSED_RGBA_ASTC_10x5_KHR) \
+    f(GL_COMPRESSED_RGBA_ASTC_10x6_KHR) \
+    f(GL_COMPRESSED_RGBA_ASTC_10x8_KHR) \
+    f(GL_COMPRESSED_RGBA_ASTC_10x10_KHR) \
+    f(GL_COMPRESSED_RGBA_ASTC_12x10_KHR) \
+    f(GL_COMPRESSED_RGBA_ASTC_12x12_KHR) \
+    f(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4_KHR) \
+    f(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x4_KHR) \
+    f(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5_KHR) \
+    f(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x5_KHR) \
+    f(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6_KHR) \
+    f(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x5_KHR) \
+    f(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x6_KHR) \
+    f(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x8_KHR) \
+    f(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x5_KHR) \
+    f(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x6_KHR) \
+    f(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x8_KHR) \
+    f(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x10_KHR) \
+    f(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x10_KHR) \
+    f(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x12_KHR) \
+    f(GL_COMPRESSED_RGBA_BPTC_UNORM_EXT) \
+    f(GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM_EXT) \
+    f(GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT_EXT) \
+    f(GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT_EXT) \
+    f(GL_COMPRESSED_RGB_S3TC_DXT1_EXT) \
+    f(GL_COMPRESSED_RGBA_S3TC_DXT1_EXT) \
+    f(GL_COMPRESSED_RGBA_S3TC_DXT3_EXT) \
+    f(GL_COMPRESSED_RGBA_S3TC_DXT5_EXT) \
+    f(GL_COMPRESSED_SRGB_S3TC_DXT1_EXT) \
+    f(GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT) \
+    f(GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT) \
+    f(GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT)
+
+
+#define LIST_INTEGER_TEX_FORMATS(f) \
+    f(GL_RED_INTEGER) \
+    f(GL_RG_INTEGER) \
+    f(GL_RGB_INTEGER) \
+    f(GL_RGBA_INTEGER) \
+    f(GL_R8UI) \
+    f(GL_R8I) \
+    f(GL_R16UI) \
+    f(GL_R16I) \
+    f(GL_R32UI) \
+    f(GL_R32I) \
+    f(GL_RG8UI) \
+    f(GL_RG8I) \
+    f(GL_RG16UI) \
+    f(GL_RG16I) \
+    f(GL_RG32UI) \
+    f(GL_RG32I) \
+    f(GL_RGB8UI) \
+    f(GL_RGB8I) \
+    f(GL_RGB16UI) \
+    f(GL_RGB16I) \
+    f(GL_RGB32UI) \
+    f(GL_RGB32I) \
+    f(GL_RGBA8UI) \
+    f(GL_RGBA8I) \
+    f(GL_RGB10_A2UI) \
+    f(GL_RGBA16UI) \
+    f(GL_RGBA16I) \
+    f(GL_RGBA32I) \
+    f(GL_RGBA32UI) \
+
+#define LIST_VALID_TEXFORMAT_COMBINATIONS(f) \
+    f(GL_BGRA8_EXT, GL_BGRA_EXT, GL_UNSIGNED_BYTE) \
+    f(GL_R8, GL_RED, GL_UNSIGNED_BYTE) \
+    f(GL_R8_SNORM, GL_RED, GL_BYTE) \
+    f(GL_R16F, GL_RED, GL_FLOAT) \
+    f(GL_R16F, GL_RED, GL_HALF_FLOAT) \
+    f(GL_R32F, GL_RED, GL_FLOAT) \
+    f(GL_R8UI, GL_RED_INTEGER, GL_UNSIGNED_BYTE) \
+    f(GL_R8I, GL_RED_INTEGER, GL_BYTE) \
+    f(GL_R16UI, GL_RED_INTEGER, GL_UNSIGNED_SHORT) \
+    f(GL_R16I, GL_RED_INTEGER, GL_SHORT) \
+    f(GL_R32UI, GL_RED_INTEGER, GL_UNSIGNED_INT) \
+    f(GL_R32I, GL_RED_INTEGER, GL_INT) \
+    f(GL_RG8, GL_RG, GL_UNSIGNED_BYTE) \
+    f(GL_RG8_SNORM, GL_RG, GL_BYTE) \
+    f(GL_RG16F, GL_RG, GL_HALF_FLOAT) \
+    f(GL_RG16F, GL_RG, GL_FLOAT) \
+    f(GL_RG32F, GL_RG, GL_FLOAT) \
+    f(GL_RG8UI, GL_RG_INTEGER, GL_UNSIGNED_BYTE) \
+    f(GL_RG8I, GL_RG_INTEGER, GL_BYTE) \
+    f(GL_RG16UI, GL_RG_INTEGER, GL_UNSIGNED_SHORT) \
+    f(GL_RG16I, GL_RG_INTEGER, GL_SHORT) \
+    f(GL_RG32UI, GL_RG_INTEGER, GL_UNSIGNED_INT) \
+    f(GL_RG32I, GL_RG_INTEGER, GL_INT) \
+    f(GL_RGB8, GL_RGB, GL_UNSIGNED_BYTE) \
+    f(GL_SRGB8, GL_RGB, GL_UNSIGNED_BYTE) \
+    f(GL_RGB565, GL_RGB, GL_UNSIGNED_BYTE) \
+    f(GL_RGB565, GL_RGB, GL_UNSIGNED_SHORT_5_6_5) \
+    f(GL_RGB8_SNORM, GL_RGB, GL_BYTE) \
+    f(GL_R11F_G11F_B10F, GL_RGB, GL_UNSIGNED_INT_10F_11F_11F_REV) \
+    f(GL_R11F_G11F_B10F, GL_RGB, GL_HALF_FLOAT) \
+    f(GL_R11F_G11F_B10F, GL_RGB, GL_FLOAT) \
+    f(GL_RGB9_E5, GL_RGB, GL_UNSIGNED_INT_5_9_9_9_REV) \
+    f(GL_RGB9_E5, GL_RGB, GL_HALF_FLOAT) \
+    f(GL_RGB9_E5, GL_RGB, GL_FLOAT) \
+    f(GL_RGB16F, GL_RGB, GL_HALF_FLOAT) \
+    f(GL_RGB16F, GL_RGB, GL_FLOAT) \
+    f(GL_RGB32F, GL_RGB, GL_FLOAT) \
+    f(GL_RGB8UI, GL_RGB_INTEGER, GL_UNSIGNED_BYTE) \
+    f(GL_RGB8I, GL_RGB_INTEGER, GL_BYTE) \
+    f(GL_RGB16UI, GL_RGB_INTEGER, GL_UNSIGNED_SHORT) \
+    f(GL_RGB16I, GL_RGB_INTEGER, GL_SHORT) \
+    f(GL_RGB32UI, GL_RGB_INTEGER, GL_UNSIGNED_INT) \
+    f(GL_RGB32I, GL_RGB_INTEGER, GL_INT) \
+    f(GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE) \
+    f(GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE) \
+    f(GL_RGBA8_SNORM, GL_RGBA, GL_BYTE) \
+    f(GL_RGB5_A1, GL_RGBA, GL_UNSIGNED_BYTE) \
+    f(GL_RGB5_A1, GL_RGBA, GL_UNSIGNED_SHORT_5_5_5_1) \
+    f(GL_RGB5_A1, GL_RGBA, GL_UNSIGNED_INT_2_10_10_10_REV) \
+    f(GL_RGBA4, GL_RGBA, GL_UNSIGNED_BYTE) \
+    f(GL_RGBA4, GL_RGBA, GL_UNSIGNED_SHORT_4_4_4_4) \
+    f(GL_RGB10_A2, GL_RGBA, GL_UNSIGNED_INT_2_10_10_10_REV) \
+    f(GL_RGBA16F, GL_RGBA, GL_HALF_FLOAT) \
+    f(GL_RGBA16F, GL_RGBA, GL_FLOAT) \
+    f(GL_RGBA32F, GL_RGBA, GL_FLOAT) \
+    f(GL_RGBA8UI, GL_RGBA_INTEGER, GL_UNSIGNED_BYTE) \
+    f(GL_RGBA8I, GL_RGBA_INTEGER, GL_BYTE) \
+    f(GL_RGB10_A2UI, GL_RGBA_INTEGER, GL_UNSIGNED_INT_2_10_10_10_REV) \
+    f(GL_RGBA16UI, GL_RGBA_INTEGER, GL_UNSIGNED_SHORT) \
+    f(GL_RGBA16I, GL_RGBA_INTEGER, GL_SHORT) \
+    f(GL_RGBA32I, GL_RGBA_INTEGER, GL_INT) \
+    f(GL_RGBA32UI, GL_RGBA_INTEGER, GL_UNSIGNED_INT) \
+    f(GL_DEPTH_COMPONENT16, GL_DEPTH_COMPONENT, GL_UNSIGNED_SHORT) \
+    f(GL_DEPTH_COMPONENT16, GL_DEPTH_COMPONENT, GL_UNSIGNED_INT) \
+    f(GL_DEPTH_COMPONENT24, GL_DEPTH_COMPONENT, GL_UNSIGNED_INT) \
+    f(GL_DEPTH_COMPONENT32F, GL_DEPTH_COMPONENT, GL_FLOAT) \
+    f(GL_DEPTH24_STENCIL8, GL_DEPTH_STENCIL, GL_UNSIGNED_INT_24_8) \
+    f(GL_DEPTH32F_STENCIL8, GL_DEPTH_STENCIL, GL_FLOAT_32_UNSIGNED_INT_24_8_REV) \
+    f(GL_COMPRESSED_R11_EAC, GL_RED, GL_FLOAT) \
+    f(GL_COMPRESSED_SIGNED_R11_EAC, GL_RED, GL_FLOAT) \
+    f(GL_COMPRESSED_RG11_EAC, GL_RG, GL_FLOAT) \
+    f(GL_COMPRESSED_SIGNED_RG11_EAC, GL_RG, GL_FLOAT) \
+    f(GL_COMPRESSED_RGB8_ETC2, GL_RGB, GL_UNSIGNED_BYTE) \
+    f(GL_COMPRESSED_SRGB8_ETC2, GL_RGB, GL_UNSIGNED_BYTE) \
+    f(GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2, GL_RGBA, GL_UNSIGNED_BYTE) \
+    f(GL_COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2, GL_RGBA, GL_UNSIGNED_BYTE) \
+    f(GL_COMPRESSED_RGBA8_ETC2_EAC, GL_RGBA, GL_UNSIGNED_BYTE) \
+    f(GL_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC, GL_RGBA, GL_UNSIGNED_BYTE) \
+    f(GL_COMPRESSED_RGBA_ASTC_4x4_KHR, GL_RGBA, GL_UNSIGNED_BYTE) \
+    f(GL_COMPRESSED_RGBA_ASTC_5x4_KHR, GL_RGBA, GL_UNSIGNED_BYTE) \
+    f(GL_COMPRESSED_RGBA_ASTC_5x5_KHR, GL_RGBA, GL_UNSIGNED_BYTE) \
+    f(GL_COMPRESSED_RGBA_ASTC_6x5_KHR, GL_RGBA, GL_UNSIGNED_BYTE) \
+    f(GL_COMPRESSED_RGBA_ASTC_6x6_KHR, GL_RGBA, GL_UNSIGNED_BYTE) \
+    f(GL_COMPRESSED_RGBA_ASTC_8x5_KHR, GL_RGBA, GL_UNSIGNED_BYTE) \
+    f(GL_COMPRESSED_RGBA_ASTC_8x6_KHR, GL_RGBA, GL_UNSIGNED_BYTE) \
+    f(GL_COMPRESSED_RGBA_ASTC_8x8_KHR, GL_RGBA, GL_UNSIGNED_BYTE) \
+    f(GL_COMPRESSED_RGBA_ASTC_10x5_KHR, GL_RGBA, GL_UNSIGNED_BYTE) \
+    f(GL_COMPRESSED_RGBA_ASTC_10x6_KHR, GL_RGBA, GL_UNSIGNED_BYTE) \
+    f(GL_COMPRESSED_RGBA_ASTC_10x8_KHR, GL_RGBA, GL_UNSIGNED_BYTE) \
+    f(GL_COMPRESSED_RGBA_ASTC_10x10_KHR, GL_RGBA, GL_UNSIGNED_BYTE) \
+    f(GL_COMPRESSED_RGBA_ASTC_12x10_KHR, GL_RGBA, GL_UNSIGNED_BYTE) \
+    f(GL_COMPRESSED_RGBA_ASTC_12x12_KHR, GL_RGBA, GL_UNSIGNED_BYTE) \
+    f(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4_KHR, GL_RGBA, GL_UNSIGNED_BYTE) \
+    f(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x4_KHR, GL_RGBA, GL_UNSIGNED_BYTE) \
+    f(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5_KHR, GL_RGBA, GL_UNSIGNED_BYTE) \
+    f(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x5_KHR, GL_RGBA, GL_UNSIGNED_BYTE) \
+    f(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6_KHR, GL_RGBA, GL_UNSIGNED_BYTE) \
+    f(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x5_KHR, GL_RGBA, GL_UNSIGNED_BYTE) \
+    f(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x6_KHR, GL_RGBA, GL_UNSIGNED_BYTE) \
+    f(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x8_KHR, GL_RGBA, GL_UNSIGNED_BYTE) \
+    f(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x5_KHR, GL_RGBA, GL_UNSIGNED_BYTE) \
+    f(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x6_KHR, GL_RGBA, GL_UNSIGNED_BYTE) \
+    f(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x8_KHR, GL_RGBA, GL_UNSIGNED_BYTE) \
+    f(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x10_KHR, GL_RGBA, GL_UNSIGNED_BYTE) \
+    f(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x10_KHR, GL_RGBA, GL_UNSIGNED_BYTE) \
+    f(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x12_KHR, GL_RGBA, GL_UNSIGNED_BYTE) \
+    f(GL_COMPRESSED_RGBA_BPTC_UNORM_EXT, GL_RGBA, GL_UNSIGNED_BYTE) \
+    f(GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM_EXT, GL_RGBA, GL_UNSIGNED_BYTE) \
+    f(GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT_EXT, GL_RGB, GL_UNSIGNED_BYTE) \
+    f(GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT_EXT, GL_RGB, GL_UNSIGNED_BYTE) \
+    f(GL_COMPRESSED_RGB_S3TC_DXT1_EXT, GL_RGB, GL_UNSIGNED_BYTE) \
+    f(GL_COMPRESSED_RGBA_S3TC_DXT1_EXT, GL_RGBA, GL_UNSIGNED_BYTE) \
+    f(GL_COMPRESSED_RGBA_S3TC_DXT3_EXT, GL_RGBA, GL_UNSIGNED_BYTE) \
+    f(GL_COMPRESSED_RGBA_S3TC_DXT5_EXT, GL_RGBA, GL_UNSIGNED_BYTE) \
+    f(GL_COMPRESSED_SRGB_S3TC_DXT1_EXT, GL_RGBA, GL_UNSIGNED_BYTE) \
+    f(GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT, GL_RGBA, GL_UNSIGNED_BYTE) \
+    f(GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT, GL_RGBA, GL_UNSIGNED_BYTE) \
+    f(GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT, GL_RGBA, GL_UNSIGNED_BYTE)
+
+
 namespace GLESv2Validation {
 
 GLbitfield allBufferMapAccessFlags =
@@ -69,6 +330,24 @@
     }
 }
 
+bool bufferUsage(GL2Encoder* ctx, GLenum usage) {
+    int glesMajorVersion = ctx->majorVersion();
+    switch(usage) {
+        case GL_STREAM_DRAW:
+        case GL_STATIC_DRAW:
+        case GL_DYNAMIC_DRAW:
+            return true;
+        case GL_STREAM_READ:
+        case GL_STATIC_READ:
+        case GL_DYNAMIC_READ:
+        case GL_STREAM_COPY:
+        case GL_STATIC_COPY:
+        case GL_DYNAMIC_COPY:
+            return glesMajorVersion >= 3;
+    }
+    return false;
+
+}
 bool pixelStoreParam(GL2Encoder* ctx, GLenum param) {
     int glesMajorVersion = ctx->majorVersion();
     switch(param) {
@@ -260,6 +539,17 @@
     return false;
 }
 
+bool pixelOp(GLenum format,GLenum type) {
+     switch(type) {
+     case GL_UNSIGNED_SHORT_4_4_4_4:
+     case GL_UNSIGNED_SHORT_5_5_5_1:
+         return format == GL_RGBA;
+     case GL_UNSIGNED_SHORT_5_6_5:
+         return format == GL_RGB;
+     }
+     return true;
+}
+
 bool vertexAttribType(GL2Encoder* ctx, GLenum type)
 {
     int glesMajorVersion = ctx->majorVersion();
@@ -347,32 +637,133 @@
     return false;
 }
 
-static GLsizei ceildiv(GLsizei x, GLsizei y) {
-    return (x + y - 1) / y;
+bool textureParams(GL2Encoder* ctx, GLenum param) {
+    int glesMajorVersion = ctx->majorVersion();
+    int glesMinorVersion = ctx->minorVersion();
+    switch(param) {
+    case GL_TEXTURE_MIN_FILTER:
+    case GL_TEXTURE_MAG_FILTER:
+    case GL_TEXTURE_WRAP_S:
+    case GL_TEXTURE_WRAP_T:
+    case GL_TEXTURE_MAX_ANISOTROPY_EXT:
+        return true;
+    case GL_TEXTURE_SWIZZLE_R:
+    case GL_TEXTURE_SWIZZLE_G:
+    case GL_TEXTURE_SWIZZLE_B:
+    case GL_TEXTURE_SWIZZLE_A:
+    case GL_TEXTURE_MIN_LOD:
+    case GL_TEXTURE_MAX_LOD:
+    case GL_TEXTURE_BASE_LEVEL:
+    case GL_TEXTURE_MAX_LEVEL:
+    case GL_TEXTURE_COMPARE_MODE:
+    case GL_TEXTURE_COMPARE_FUNC:
+    case GL_TEXTURE_WRAP_R:
+    case GL_TEXTURE_IMMUTABLE_FORMAT:
+    case GL_TEXTURE_IMMUTABLE_LEVELS:
+        return glesMajorVersion >= 3;
+    case GL_DEPTH_STENCIL_TEXTURE_MODE:
+        return glesMajorVersion >= 3 && glesMinorVersion >= 1;
+    default:
+        return false;
+    }
 }
 
-GLsizei compressedTexImageSize(GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth) {
-    GLsizei base_size = ceildiv(width, 4) * ceildiv(height, 4) * depth;
-#define COMPRESSED_TEX_IMAGE_SIZE_CASE(internal, multiplier) \
-    case internal: \
-        return base_size * multiplier; \
-
-    switch (internalformat) {
-    COMPRESSED_TEX_IMAGE_SIZE_CASE(GL_COMPRESSED_R11_EAC, 8)
-    COMPRESSED_TEX_IMAGE_SIZE_CASE(GL_COMPRESSED_SIGNED_R11_EAC, 8)
-    COMPRESSED_TEX_IMAGE_SIZE_CASE(GL_COMPRESSED_RG11_EAC, 16)
-    COMPRESSED_TEX_IMAGE_SIZE_CASE(GL_COMPRESSED_SIGNED_RG11_EAC, 16)
-    COMPRESSED_TEX_IMAGE_SIZE_CASE(GL_COMPRESSED_RGB8_ETC2, 8)
-    COMPRESSED_TEX_IMAGE_SIZE_CASE(GL_COMPRESSED_SRGB8_ETC2, 8)
-    COMPRESSED_TEX_IMAGE_SIZE_CASE(GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2, 8)
-    COMPRESSED_TEX_IMAGE_SIZE_CASE(GL_COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2, 8)
-    COMPRESSED_TEX_IMAGE_SIZE_CASE(GL_COMPRESSED_RGBA8_ETC2_EAC, 16)
-    COMPRESSED_TEX_IMAGE_SIZE_CASE(GL_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC, 16)
-    default:
-        break;
+bool samplerParams(GL2Encoder* ctx, GLenum param) {
+    (void)ctx;
+    switch(param) {
+        case GL_TEXTURE_MAX_ANISOTROPY_EXT:
+        case GL_TEXTURE_MIN_FILTER:
+        case GL_TEXTURE_MAG_FILTER:
+        case GL_TEXTURE_WRAP_S:
+        case GL_TEXTURE_WRAP_T:
+        case GL_TEXTURE_WRAP_R:
+        case GL_TEXTURE_MIN_LOD:
+        case GL_TEXTURE_MAX_LOD:
+        case GL_TEXTURE_COMPARE_MODE:
+        case GL_TEXTURE_COMPARE_FUNC:
+            return true;
+        default:
+            return false;
     }
+}
 
-    return 0;
+bool textureParamValue(GL2Encoder* ctx, GLenum pname, GLint intval, GLfloat floatval, GLenum enumval) {
+    (void)ctx;
+    (void)floatval;
+    switch (pname) {
+    case GL_TEXTURE_BASE_LEVEL:
+        return intval >= 0;
+    case GL_TEXTURE_COMPARE_MODE:
+        return
+            (enumval == GL_NONE) ||
+            (enumval == GL_COMPARE_REF_TO_TEXTURE);
+    case GL_TEXTURE_COMPARE_FUNC:
+        return
+            (enumval == GL_LEQUAL) ||
+            (enumval == GL_GEQUAL) ||
+            (enumval == GL_LESS) ||
+            (enumval == GL_GREATER) ||
+            (enumval == GL_EQUAL) ||
+            (enumval == GL_NOTEQUAL) ||
+            (enumval == GL_ALWAYS) ||
+            (enumval == GL_NEVER);
+    case GL_TEXTURE_MAG_FILTER:
+        return
+            (enumval == GL_NEAREST) ||
+            (enumval == GL_LINEAR);
+    case GL_TEXTURE_MAX_LEVEL:
+        return intval >= 0;
+    case GL_TEXTURE_MAX_LOD:
+        return true;
+    case GL_TEXTURE_MIN_FILTER:
+        return
+            (enumval == GL_NEAREST) ||
+            (enumval == GL_LINEAR) ||
+            (enumval == GL_NEAREST_MIPMAP_NEAREST) ||
+            (enumval == GL_NEAREST_MIPMAP_LINEAR) ||
+            (enumval == GL_LINEAR_MIPMAP_NEAREST) ||
+            (enumval == GL_LINEAR_MIPMAP_LINEAR);
+    case GL_TEXTURE_MIN_LOD:
+        return true;
+    case GL_TEXTURE_SWIZZLE_R:
+    case GL_TEXTURE_SWIZZLE_G:
+    case GL_TEXTURE_SWIZZLE_B:
+    case GL_TEXTURE_SWIZZLE_A:
+        return
+            (enumval == GL_RED) ||
+            (enumval == GL_GREEN) ||
+            (enumval == GL_BLUE) ||
+            (enumval == GL_ALPHA) ||
+            (enumval == GL_ZERO) ||
+            (enumval == GL_ONE);
+    case GL_TEXTURE_WRAP_S:
+    case GL_TEXTURE_WRAP_T:
+    case GL_TEXTURE_WRAP_R:
+        return
+            (enumval == GL_CLAMP_TO_EDGE) ||
+            (enumval == GL_REPEAT) ||
+            (enumval == GL_MIRRORED_REPEAT);
+    case GL_TEXTURE_MAX_ANISOTROPY_EXT:
+        return true;
+    case GL_TEXTURE_IMMUTABLE_FORMAT:
+    case GL_TEXTURE_IMMUTABLE_LEVELS:
+    case GL_DEPTH_STENCIL_TEXTURE_MODE:
+        return true;
+    default:
+        return true;
+    }
+}
+
+bool isIntegerFormat(GLenum format) {
+
+#define CHECK_EQUAL(x) case x: return true;
+
+    switch (format) {
+        LIST_INTEGER_TEX_FORMATS(CHECK_EQUAL)
+
+    default:
+        return false;
+    }
 }
 
 bool isCompressedFormat(GLenum internalformat) {
@@ -433,6 +824,18 @@
     case internal: \
         return glesMajorVersion > 1 && ctx->hasExtension("GL_KHR_texture_compression_astc_ldr"); \
 
+#define COMPRESSED_TEX_IMAGE_SUPPORT_CASE_BPTC(internal) \
+    case internal: \
+        return glesMajorVersion > 1 && ctx->hasExtension("GL_EXT_texture_compression_bptc"); \
+
+#define COMPRESSED_TEX_IMAGE_SUPPORT_CASE_S3TC(internal) \
+    case internal: \
+      return glesMajorVersion > 1 && ctx->hasExtension("GL_EXT_texture_compression_s3tc"); \
+
+#define COMPRESSED_TEX_IMAGE_SUPPORT_CASE_S3TC_SRGB(internal)       \
+    case internal: \
+      return glesMajorVersion > 1 && ctx->hasExtension("GL_EXT_texture_compression_s3tc") && ctx->hasExtension("GL_EXT_texture_sRGB"); \
+
 #define COMPRESSED_TEX_IMAGE_SUPPORT_CASE(internal, maj, min) \
     case internal: \
         if (maj < 3) return true; \
@@ -445,6 +848,7 @@
         return false ; \
 
     switch (internalformat) {
+    COMPRESSED_TEX_IMAGE_SUPPORT_CASE(GL_ETC1_RGB8_OES, 2, 0)
     COMPRESSED_TEX_IMAGE_SUPPORT_CASE(GL_COMPRESSED_R11_EAC, 2, 0)
     COMPRESSED_TEX_IMAGE_SUPPORT_CASE(GL_COMPRESSED_SIGNED_R11_EAC, 2, 0)
     COMPRESSED_TEX_IMAGE_SUPPORT_CASE(GL_COMPRESSED_RG11_EAC, 2, 0)
@@ -483,10 +887,22 @@
     COMPRESSED_TEX_IMAGE_SUPPORT_CASE_ASTC(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x10_KHR)
     COMPRESSED_TEX_IMAGE_SUPPORT_CASE_ASTC(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x10_KHR)
     COMPRESSED_TEX_IMAGE_SUPPORT_CASE_ASTC(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x12_KHR)
+    COMPRESSED_TEX_IMAGE_SUPPORT_CASE_BPTC(GL_COMPRESSED_RGBA_BPTC_UNORM_EXT)
+    COMPRESSED_TEX_IMAGE_SUPPORT_CASE_BPTC(GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM_EXT)
+    COMPRESSED_TEX_IMAGE_SUPPORT_CASE_BPTC(GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT_EXT)
+    COMPRESSED_TEX_IMAGE_SUPPORT_CASE_BPTC(GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT_EXT)
+    COMPRESSED_TEX_IMAGE_SUPPORT_CASE_S3TC(GL_COMPRESSED_RGB_S3TC_DXT1_EXT)
+    COMPRESSED_TEX_IMAGE_SUPPORT_CASE_S3TC(GL_COMPRESSED_RGBA_S3TC_DXT1_EXT)
+    COMPRESSED_TEX_IMAGE_SUPPORT_CASE_S3TC(GL_COMPRESSED_RGBA_S3TC_DXT3_EXT)
+    COMPRESSED_TEX_IMAGE_SUPPORT_CASE_S3TC(GL_COMPRESSED_RGBA_S3TC_DXT5_EXT)
+    COMPRESSED_TEX_IMAGE_SUPPORT_CASE_S3TC(GL_COMPRESSED_SRGB_S3TC_DXT1_EXT)
+    COMPRESSED_TEX_IMAGE_SUPPORT_CASE_S3TC_SRGB(GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT)
+    COMPRESSED_TEX_IMAGE_SUPPORT_CASE_S3TC_SRGB(GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT)
+    COMPRESSED_TEX_IMAGE_SUPPORT_CASE_S3TC_SRGB(GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT)
     default:
         break;
     }
-    return true;
+    return false;
 }
 
 bool unsizedFormat(GLenum format) {
@@ -590,7 +1006,7 @@
     return false;
 }
 
-bool depthRenderableFormat(GL2Encoder*, GLenum internalformat) {
+bool depthRenderableFormat(GL2Encoder* ctx, GLenum internalformat) {
     switch (internalformat) {
     case GL_DEPTH_COMPONENT:
     case GL_DEPTH_STENCIL:
@@ -600,6 +1016,8 @@
     case GL_DEPTH24_STENCIL8:
     case GL_DEPTH32F_STENCIL8:
         return true;
+    case GL_DEPTH_COMPONENT32_OES:
+        return ctx->hasExtension("GL_OES_depth32");
     }
     return false;
 }
@@ -730,103 +1148,6 @@
     }
     return false;
 }
-#define LIST_VALID_TEX_INTERNALFORMATS(f) \
-    f(GL_BGRA8_EXT) \
-    f(GL_R8) \
-    f(GL_R8_SNORM) \
-    f(GL_R16F) \
-    f(GL_R32F) \
-    f(GL_R8UI) \
-    f(GL_R8I) \
-    f(GL_R16UI) \
-    f(GL_R16I) \
-    f(GL_R32UI) \
-    f(GL_R32I) \
-    f(GL_RG8) \
-    f(GL_RG8_SNORM) \
-    f(GL_RG16F) \
-    f(GL_RG32F) \
-    f(GL_RG8UI) \
-    f(GL_RG8I) \
-    f(GL_RG16UI) \
-    f(GL_RG16I) \
-    f(GL_RG32UI) \
-    f(GL_RG32I) \
-    f(GL_RGB8) \
-    f(GL_SRGB8) \
-    f(GL_RGB565) \
-    f(GL_RGB8_SNORM) \
-    f(GL_R11F_G11F_B10F) \
-    f(GL_RGB9_E5) \
-    f(GL_RGB16F) \
-    f(GL_RGB32F) \
-    f(GL_RGB8UI) \
-    f(GL_RGB8I) \
-    f(GL_RGB16UI) \
-    f(GL_RGB16I) \
-    f(GL_RGB32UI) \
-    f(GL_RGB32I) \
-    f(GL_RGBA8) \
-    f(GL_SRGB8_ALPHA8) \
-    f(GL_RGBA8_SNORM) \
-    f(GL_RGB5_A1) \
-    f(GL_RGBA4) \
-    f(GL_RGB10_A2) \
-    f(GL_RGBA16F) \
-    f(GL_RGBA32F) \
-    f(GL_RGBA8UI) \
-    f(GL_RGBA8I) \
-    f(GL_RGB10_A2UI) \
-    f(GL_RGBA16UI) \
-    f(GL_RGBA16I) \
-    f(GL_RGBA32I) \
-    f(GL_RGBA32UI) \
-    f(GL_DEPTH_COMPONENT16) \
-    f(GL_DEPTH_COMPONENT24) \
-    f(GL_DEPTH_COMPONENT32F) \
-    f(GL_DEPTH24_STENCIL8) \
-    f(GL_DEPTH32F_STENCIL8) \
-    f(GL_ETC1_RGB8_OES) \
-    f(GL_COMPRESSED_R11_EAC) \
-    f(GL_COMPRESSED_SIGNED_R11_EAC) \
-    f(GL_COMPRESSED_RG11_EAC) \
-    f(GL_COMPRESSED_SIGNED_RG11_EAC) \
-    f(GL_COMPRESSED_RGB8_ETC2) \
-    f(GL_COMPRESSED_SRGB8_ETC2) \
-    f(GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2) \
-    f(GL_COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2) \
-    f(GL_COMPRESSED_RGBA8_ETC2_EAC) \
-    f(GL_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC) \
-    f(GL_COMPRESSED_RGBA_ASTC_4x4_KHR) \
-    f(GL_COMPRESSED_RGBA_ASTC_5x4_KHR) \
-    f(GL_COMPRESSED_RGBA_ASTC_5x5_KHR) \
-    f(GL_COMPRESSED_RGBA_ASTC_6x5_KHR) \
-    f(GL_COMPRESSED_RGBA_ASTC_6x6_KHR) \
-    f(GL_COMPRESSED_RGBA_ASTC_8x5_KHR) \
-    f(GL_COMPRESSED_RGBA_ASTC_8x6_KHR) \
-    f(GL_COMPRESSED_RGBA_ASTC_8x8_KHR) \
-    f(GL_COMPRESSED_RGBA_ASTC_10x5_KHR) \
-    f(GL_COMPRESSED_RGBA_ASTC_10x6_KHR) \
-    f(GL_COMPRESSED_RGBA_ASTC_10x8_KHR) \
-    f(GL_COMPRESSED_RGBA_ASTC_10x10_KHR) \
-    f(GL_COMPRESSED_RGBA_ASTC_12x10_KHR) \
-    f(GL_COMPRESSED_RGBA_ASTC_12x12_KHR) \
-    f(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4_KHR) \
-    f(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x4_KHR) \
-    f(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5_KHR) \
-    f(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x5_KHR) \
-    f(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6_KHR) \
-    f(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x5_KHR) \
-    f(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x6_KHR) \
-    f(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x8_KHR) \
-    f(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x5_KHR) \
-    f(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x6_KHR) \
-    f(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x8_KHR) \
-    f(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x10_KHR) \
-    f(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x10_KHR) \
-    f(GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x12_KHR) \
-
-
 bool pixelInternalFormat(GLenum internalformat) {
 #define VALID_INTERNAL_FORMAT(format) \
     case format: \
@@ -837,9 +1158,88 @@
     default:
         break;
     }
+
+    ALOGW("error internal format: 0x%x is invalid\n", internalformat);
     return false;
 }
 
+bool pixelSizedFormat(GL2Encoder* ctx, GLenum internalformat, GLenum format, GLenum type) {
+    int glesMajorVersion = ctx->majorVersion();
+    if (internalformat == format) {
+        return true;
+    }
+
+    if (glesMajorVersion < 3) {
+        switch (format) {
+            case GL_RED:
+                switch (type) {
+                    case GL_UNSIGNED_BYTE:
+                        return internalformat == GL_R8;
+                    case GL_HALF_FLOAT:
+                    case GL_FLOAT:
+                        return internalformat == GL_R16F;
+                    case GL_BYTE:
+                        return internalformat == GL_R8_SNORM;
+                    default:
+                        return false;
+                }
+                break;
+            case GL_RG:
+                switch (type) {
+                    case GL_UNSIGNED_BYTE:
+                        return internalformat == GL_RG8;
+                    case GL_HALF_FLOAT:
+                    case GL_FLOAT:
+                        return internalformat == GL_RG16F;
+                    default:
+                        return false;
+                }
+                break;
+            case GL_RGB:
+                switch (type) {
+                    case GL_HALF_FLOAT:
+                    case GL_FLOAT:
+                        return internalformat == GL_RGB16F
+                            || internalformat == GL_R11F_G11F_B10F;
+                    case GL_UNSIGNED_INT_10F_11F_11F_REV:
+                        return internalformat == GL_R11F_G11F_B10F;
+                    default:
+                        return internalformat == GL_RGB8 ||
+                               internalformat == GL_RGB;
+                }
+                break;
+            case GL_RGBA:
+                switch (type) {
+                    case GL_HALF_FLOAT:
+                    case GL_FLOAT:
+                        return internalformat == GL_RGBA16F;
+                    default:
+                        return internalformat == GL_RGBA8 ||
+                               internalformat == GL_RGBA;
+                }
+                break;
+        }
+    }
+
+#define VALIDATE_FORMAT_COMBINATION(x, y, z) \
+    if (internalformat == x && format == y && type == z) return true; \
+
+    LIST_VALID_TEXFORMAT_COMBINATIONS(VALIDATE_FORMAT_COMBINATION)
+
+    return false;
+}
+
+void getCompatibleFormatTypeForInternalFormat(GLenum internalformat, GLenum* format_out, GLenum* type_out) {
+#define RETURN_COMPATIBLE_FORMAT(x, y, z) \
+    if (internalformat == x) { \
+        *format_out = y; \
+        *type_out = z; \
+        return; \
+    } \
+
+    LIST_VALID_TEXFORMAT_COMBINATIONS(RETURN_COMPATIBLE_FORMAT)
+}
+
 bool shaderType(GL2Encoder* ctx, GLenum type) {
     int glesMajorVersion = ctx->majorVersion();
     int glesMinorVersion = ctx->minorVersion();
@@ -873,4 +1273,323 @@
     return ss.str();
 }
 
+bool allowedFace(GLenum face) {
+    switch (face) {
+        case GL_FRONT:
+        case GL_BACK:
+        case GL_FRONT_AND_BACK:
+            return true;
+        default:
+            return false;
+    }
+}
+
+bool allowedFunc(GLenum func) {
+    switch (func) {
+        case GL_NEVER:
+        case GL_ALWAYS:
+        case GL_LESS:
+        case GL_LEQUAL:
+        case GL_EQUAL:
+        case GL_GREATER:
+        case GL_GEQUAL:
+        case GL_NOTEQUAL:
+            return true;
+        default:
+            return false;
+    }
+}
+
+bool allowedStencilOp(GLenum op) {
+    switch (op) {
+        case GL_KEEP:
+        case GL_ZERO:
+        case GL_REPLACE:
+        case GL_INCR:
+        case GL_DECR:
+        case GL_INVERT:
+        case GL_INCR_WRAP:
+        case GL_DECR_WRAP:
+            return true;
+        default:
+            return false;
+    }
+}
+
+bool allowedBlendEquation(GLenum eq) {
+    switch (eq) {
+        case GL_FUNC_ADD:
+        case GL_FUNC_SUBTRACT:
+        case GL_FUNC_REVERSE_SUBTRACT:
+        case GL_MIN:
+        case GL_MAX:
+            return true;
+        default:
+            return false;
+    }
+}
+
+bool allowedBlendFunc(GLenum func) {
+    switch (func) {
+        case GL_ZERO:
+        case GL_ONE:
+        case GL_SRC_COLOR:
+        case GL_ONE_MINUS_SRC_COLOR:
+        case GL_DST_COLOR:
+        case GL_ONE_MINUS_DST_COLOR:
+        case GL_SRC_ALPHA:
+        case GL_ONE_MINUS_SRC_ALPHA:
+        case GL_DST_ALPHA:
+        case GL_ONE_MINUS_DST_ALPHA:
+        case GL_CONSTANT_COLOR:
+        case GL_ONE_MINUS_CONSTANT_COLOR:
+        case GL_CONSTANT_ALPHA:
+        case GL_ONE_MINUS_CONSTANT_ALPHA:
+        case GL_SRC_ALPHA_SATURATE:
+            return true;
+        default:
+            return false;
+    }
+}
+
+bool allowedCullFace(GLenum mode) {
+    switch (mode) {
+        case GL_FRONT:
+        case GL_BACK:
+        case GL_FRONT_AND_BACK:
+            return true;
+        default:
+            return false;
+    }
+}
+
+bool allowedFrontFace(GLenum mode) {
+    switch (mode) {
+        case GL_CCW:
+        case GL_CW:
+            return true;
+        default:
+            return false;
+    }
+}
+
+bool allowedEnable(int majorVersion, int minorVersion, GLenum cap) {
+    switch (cap) {
+        case GL_CULL_FACE:
+        case GL_POLYGON_OFFSET_FILL:
+        case GL_SAMPLE_ALPHA_TO_COVERAGE:
+        case GL_SAMPLE_COVERAGE:
+        case GL_SCISSOR_TEST:
+        case GL_STENCIL_TEST:
+        case GL_DEPTH_TEST:
+        case GL_BLEND:
+        case GL_DITHER:
+            return true;
+        case GL_PRIMITIVE_RESTART_FIXED_INDEX:
+        case GL_RASTERIZER_DISCARD:
+            return majorVersion >= 3;
+        case GL_SAMPLE_MASK:
+            return majorVersion >= 3 && minorVersion >= 1;
+		default:
+			return false;
+    }
+}
+
+bool allowedGetShader(GLenum pname) {
+	switch (pname) {
+		case GL_SHADER_TYPE:
+		case GL_DELETE_STATUS:
+		case GL_COMPILE_STATUS:
+		case GL_INFO_LOG_LENGTH:
+		case GL_SHADER_SOURCE_LENGTH:
+			return true;
+		default:
+			return false;
+	}
+}
+
+bool allowedShaderType(GLenum shadertype) {
+	switch (shadertype) {
+		case GL_VERTEX_SHADER:
+		case GL_FRAGMENT_SHADER:
+            return true;
+		default:
+			return false;
+	}
+}
+
+bool allowedPrecisionType(GLenum precisiontype) {
+	switch (precisiontype) {
+		case GL_LOW_FLOAT:
+		case GL_MEDIUM_FLOAT:
+		case GL_HIGH_FLOAT:
+		case GL_LOW_INT:
+		case GL_MEDIUM_INT:
+		case GL_HIGH_INT:
+            return true;
+		default:
+			return false;
+	}
+}
+
+bool allowedGetProgram(int majorVersion, int minorVersion, GLenum pname) {
+    switch (pname) {
+        case GL_DELETE_STATUS:
+        case GL_LINK_STATUS:
+        case GL_VALIDATE_STATUS:
+        case GL_INFO_LOG_LENGTH:
+        case GL_ATTACHED_SHADERS:
+        case GL_ACTIVE_ATTRIBUTES:
+        case GL_ACTIVE_ATTRIBUTE_MAX_LENGTH:
+        case GL_ACTIVE_UNIFORMS:
+        case GL_ACTIVE_UNIFORM_MAX_LENGTH:
+            return true;
+        case GL_TRANSFORM_FEEDBACK_BUFFER_MODE:
+        case GL_PROGRAM_BINARY_RETRIEVABLE_HINT:
+        case GL_PROGRAM_BINARY_LENGTH:
+        case GL_TRANSFORM_FEEDBACK_VARYINGS:
+        case GL_TRANSFORM_FEEDBACK_VARYING_MAX_LENGTH:
+        case GL_ACTIVE_UNIFORM_BLOCKS:
+        case GL_ACTIVE_UNIFORM_BLOCK_MAX_NAME_LENGTH:
+            return majorVersion > 2;
+        case GL_COMPUTE_WORK_GROUP_SIZE:
+        case GL_PROGRAM_SEPARABLE:
+        case GL_ACTIVE_ATOMIC_COUNTER_BUFFERS:
+            return majorVersion > 2 && minorVersion > 0;
+        default:
+            return false;
+    }
+}
+
+bool allowedGetActiveUniforms(GLenum pname) {
+    switch (pname) {
+        case GL_UNIFORM_TYPE:
+        case GL_UNIFORM_SIZE:
+        case GL_UNIFORM_NAME_LENGTH:
+        case GL_UNIFORM_BLOCK_INDEX:
+        case GL_UNIFORM_OFFSET:
+        case GL_UNIFORM_ARRAY_STRIDE:
+        case GL_UNIFORM_MATRIX_STRIDE:
+        case GL_UNIFORM_IS_ROW_MAJOR:
+            return true;
+        default:
+            return false;
+    }
+}
+
+bool allowedGetActiveUniformBlock(GLenum pname) {
+    switch (pname) {
+        case GL_UNIFORM_BLOCK_BINDING:
+        case GL_UNIFORM_BLOCK_DATA_SIZE:
+        case GL_UNIFORM_BLOCK_NAME_LENGTH:
+        case GL_UNIFORM_BLOCK_ACTIVE_UNIFORMS:
+        case GL_UNIFORM_BLOCK_ACTIVE_UNIFORM_INDICES:
+        case GL_UNIFORM_BLOCK_REFERENCED_BY_VERTEX_SHADER:
+        case GL_UNIFORM_BLOCK_REFERENCED_BY_FRAGMENT_SHADER:
+            return true;
+        default:
+            return false;
+    }
+}
+
+bool allowedGetVertexAttrib(GLenum pname) {
+    switch (pname) {
+        case GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING:
+        case GL_VERTEX_ATTRIB_ARRAY_ENABLED:
+        case GL_VERTEX_ATTRIB_ARRAY_SIZE:
+        case GL_VERTEX_ATTRIB_ARRAY_STRIDE:
+        case GL_VERTEX_ATTRIB_ARRAY_TYPE:
+        case GL_VERTEX_ATTRIB_ARRAY_NORMALIZED:
+        case GL_VERTEX_ATTRIB_ARRAY_INTEGER:
+        case GL_VERTEX_ATTRIB_ARRAY_DIVISOR:
+        case GL_VERTEX_ATTRIB_BINDING:
+        case GL_VERTEX_ATTRIB_RELATIVE_OFFSET:
+        case GL_CURRENT_VERTEX_ATTRIB:
+            return true;
+        default:
+            return false;
+    }
+}
+
+bool allowedGetRenderbufferParameter(GLenum pname) {
+    switch (pname) {
+        case GL_RENDERBUFFER_WIDTH:
+        case GL_RENDERBUFFER_HEIGHT:
+        case GL_RENDERBUFFER_INTERNAL_FORMAT:
+        case GL_RENDERBUFFER_RED_SIZE:
+        case GL_RENDERBUFFER_GREEN_SIZE:
+        case GL_RENDERBUFFER_BLUE_SIZE:
+        case GL_RENDERBUFFER_ALPHA_SIZE:
+        case GL_RENDERBUFFER_DEPTH_SIZE:
+        case GL_RENDERBUFFER_STENCIL_SIZE:
+        case GL_RENDERBUFFER_SAMPLES:
+            return true;
+        default:
+            return false;
+    }
+}
+
+bool allowedQueryTarget(GLenum target) {
+    switch (target) {
+        case GL_ANY_SAMPLES_PASSED:
+        case GL_ANY_SAMPLES_PASSED_CONSERVATIVE:
+        case GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN:
+            return true;
+        default:
+            return false;
+    }
+}
+
+bool allowedQueryParam(GLenum pname) {
+    switch (pname) {
+        case GL_CURRENT_QUERY:
+            return true;
+        default:
+            return false;
+    }
+}
+
+bool allowedQueryObjectParam(GLenum pname) {
+    switch (pname) {
+        case GL_QUERY_RESULT:
+        case GL_QUERY_RESULT_AVAILABLE:
+            return true;
+        default:
+            return false;
+    }
+}
+
+bool allowedGetSyncParam(GLenum pname) {
+    switch (pname) {
+        case GL_OBJECT_TYPE:
+        case GL_SYNC_STATUS:
+        case GL_SYNC_CONDITION:
+        case GL_SYNC_FLAGS:
+            return true;
+        default:
+            return false;
+    }
+}
+
+bool allowedHintTarget(GLenum target) {
+    switch (target) {
+        case GL_GENERATE_MIPMAP_HINT:
+        case GL_FRAGMENT_SHADER_DERIVATIVE_HINT_OES:
+            return true;
+        default:
+            return false;
+    }
+}
+
+bool allowedHintMode(GLenum mode) {
+    switch (mode) {
+        case GL_DONT_CARE:
+        case GL_NICEST:
+        case GL_FASTEST:
+            return true;
+        default:
+            return false;
+    }
+}
+
 } // namespace GLESv2Validation
diff --git a/system/GLESv2_enc/GLESv2Validation.h b/system/GLESv2_enc/GLESv2Validation.h
index a37bd58..4ac5787 100644
--- a/system/GLESv2_enc/GLESv2Validation.h
+++ b/system/GLESv2_enc/GLESv2Validation.h
@@ -33,6 +33,7 @@
 extern GLbitfield allBufferMapAccessFlags;
 bool bufferTarget(GL2Encoder* ctx, GLenum target);
 bool bufferParam(GL2Encoder* ctx, GLenum param);
+bool bufferUsage(GL2Encoder* ctx, GLenum usage);
 
 bool pixelStoreParam(GL2Encoder* ctx, GLenum param);
 bool pixelStoreValue(GLenum param, GLint value);
@@ -44,6 +45,7 @@
 
 bool readPixelsFormat(GLenum format);
 bool readPixelsType(GLenum type);
+bool pixelOp(GLenum format, GLenum type);
 
 bool vertexAttribType(GL2Encoder* ctx, GLenum type);
 
@@ -51,9 +53,11 @@
 bool blitFramebufferFormat(GLenum readFormat, GLenum drawFormat);
 
 bool textureTarget(GL2Encoder* ctx, GLenum target);
+bool textureParams(GL2Encoder* ctx, GLenum pname);
+bool samplerParams(GL2Encoder* ctx, GLenum pname);
+bool textureParamValue(GL2Encoder* ctx, GLenum pname, GLint intval, GLfloat floatval, GLenum enumval);
 
-GLsizei compressedTexImageSize(GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth);
-
+bool isIntegerFormat(GLenum format);
 bool isCompressedFormat(GLenum internalformat);
 bool supportedCompressedFormat(GL2Encoder* ctx, GLenum internalformat);
 
@@ -70,6 +74,8 @@
 bool pixelFormat(GL2Encoder* ctx, GLenum format);
 
 bool pixelInternalFormat(GLenum internalformat);
+bool pixelSizedFormat(GL2Encoder* ctx, GLenum internalformat, GLenum format, GLenum type);
+void getCompatibleFormatTypeForInternalFormat(GLenum internalformat, GLenum* format_out, GLenum* type_out);
 
 bool shaderType(GL2Encoder* ctx, GLenum type);
 
@@ -77,6 +83,40 @@
 
 std::string vertexAttribIndexRangeErrorMsg(GL2Encoder* ctx, GLuint index);
 
+bool allowedFace(GLenum face);
+bool allowedFunc(GLenum func);
+bool allowedStencilOp(GLenum op);
+bool allowedBlendEquation(GLenum eq);
+bool allowedBlendFunc(GLenum func);
+
+bool allowedCullFace(GLenum mode);
+bool allowedFrontFace(GLenum mode);
+
+bool allowedEnable(int majorVersion, int minorVersion, GLenum mode);
+
+bool allowedGetShader(GLenum pname);
+
+bool allowedShaderType(GLenum shadertype);
+bool allowedPrecisionType(GLenum precisiontype);
+
+bool allowedGetProgram(int majorVersion, int minorVersion, GLenum pname);
+
+bool allowedGetActiveUniforms(GLenum pname) ;
+bool allowedGetActiveUniformBlock(GLenum pname) ;
+
+bool allowedGetVertexAttrib(GLenum pname) ;
+
+bool allowedGetRenderbufferParameter(GLenum pname);
+
+bool allowedQueryTarget(GLenum target);
+bool allowedQueryParam(GLenum pname);
+bool allowedQueryObjectParam(GLenum pname);
+
+bool allowedGetSyncParam(GLenum pname);
+
+bool allowedHintTarget(GLenum target);
+bool allowedHintMode(GLenum pname);
+
 } // namespace GLESv2Validation
 
 #endif
diff --git a/system/GLESv2_enc/gl2_client_context.cpp b/system/GLESv2_enc/gl2_client_context.cpp
index aaa0325..49b52a7 100644
--- a/system/GLESv2_enc/gl2_client_context.cpp
+++ b/system/GLESv2_enc/gl2_client_context.cpp
@@ -436,6 +436,7 @@
 	glDrawElementsDataNullAEMU = (glDrawElementsDataNullAEMU_client_proc_t) getProc("glDrawElementsDataNullAEMU", userData);
 	glUnmapBufferAsyncAEMU = (glUnmapBufferAsyncAEMU_client_proc_t) getProc("glUnmapBufferAsyncAEMU", userData);
 	glFlushMappedBufferRangeAEMU2 = (glFlushMappedBufferRangeAEMU2_client_proc_t) getProc("glFlushMappedBufferRangeAEMU2", userData);
+	glBufferDataSyncAEMU = (glBufferDataSyncAEMU_client_proc_t) getProc("glBufferDataSyncAEMU", userData);
 	return 0;
 }
 
diff --git a/system/GLESv2_enc/gl2_client_context.h b/system/GLESv2_enc/gl2_client_context.h
index 575395a..b3d5f70 100644
--- a/system/GLESv2_enc/gl2_client_context.h
+++ b/system/GLESv2_enc/gl2_client_context.h
@@ -436,6 +436,7 @@
 	glDrawElementsDataNullAEMU_client_proc_t glDrawElementsDataNullAEMU;
 	glUnmapBufferAsyncAEMU_client_proc_t glUnmapBufferAsyncAEMU;
 	glFlushMappedBufferRangeAEMU2_client_proc_t glFlushMappedBufferRangeAEMU2;
+	glBufferDataSyncAEMU_client_proc_t glBufferDataSyncAEMU;
 	virtual ~gl2_client_context_t() {}
 
 	typedef gl2_client_context_t *CONTEXT_ACCESSOR_TYPE(void);
diff --git a/system/GLESv2_enc/gl2_client_proc.h b/system/GLESv2_enc/gl2_client_proc.h
index 615b123..268cb99 100644
--- a/system/GLESv2_enc/gl2_client_proc.h
+++ b/system/GLESv2_enc/gl2_client_proc.h
@@ -438,6 +438,7 @@
 typedef void (gl2_APIENTRY *glDrawElementsDataNullAEMU_client_proc_t) (void * ctx, GLenum, GLsizei, GLenum, void*, GLuint);
 typedef void (gl2_APIENTRY *glUnmapBufferAsyncAEMU_client_proc_t) (void * ctx, GLenum, GLintptr, GLsizeiptr, GLbitfield, void*, GLboolean*);
 typedef void (gl2_APIENTRY *glFlushMappedBufferRangeAEMU2_client_proc_t) (void * ctx, GLenum, GLintptr, GLsizeiptr, GLbitfield, void*);
+typedef GLboolean (gl2_APIENTRY *glBufferDataSyncAEMU_client_proc_t) (void * ctx, GLenum, GLsizeiptr, const GLvoid*, GLenum);
 
 
 #endif
diff --git a/system/GLESv2_enc/gl2_enc.cpp b/system/GLESv2_enc/gl2_enc.cpp
index 814021b..e610c86 100644
--- a/system/GLESv2_enc/gl2_enc.cpp
+++ b/system/GLESv2_enc/gl2_enc.cpp
@@ -12,6 +12,7 @@
 
 #include <stdio.h>
 
+#include "android/base/Tracing.h"
 namespace {
 
 void enc_unsupported()
@@ -21,6 +22,7 @@
 
 void glActiveTexture_enc(void *self , GLenum texture)
 {
+	AEMU_SCOPED_TRACE("glActiveTexture encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -46,6 +48,7 @@
 
 void glAttachShader_enc(void *self , GLuint program, GLuint shader)
 {
+	AEMU_SCOPED_TRACE("glAttachShader encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -72,6 +75,7 @@
 
 void glBindAttribLocation_enc(void *self , GLuint program, GLuint index, const GLchar* name)
 {
+	AEMU_SCOPED_TRACE("glBindAttribLocation encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -91,7 +95,7 @@
 
 		memcpy(ptr, &program, 4); ptr += 4;
 		memcpy(ptr, &index, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_name; ptr += 4;
+	memcpy(ptr, &__size_name, 4); ptr += 4;
 	memcpy(ptr, name, __size_name);ptr += __size_name;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -101,6 +105,7 @@
 
 void glBindBuffer_enc(void *self , GLenum target, GLuint buffer)
 {
+	AEMU_SCOPED_TRACE("glBindBuffer encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -127,6 +132,7 @@
 
 void glBindFramebuffer_enc(void *self , GLenum target, GLuint framebuffer)
 {
+	AEMU_SCOPED_TRACE("glBindFramebuffer encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -153,6 +159,7 @@
 
 void glBindRenderbuffer_enc(void *self , GLenum target, GLuint renderbuffer)
 {
+	AEMU_SCOPED_TRACE("glBindRenderbuffer encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -179,6 +186,7 @@
 
 void glBindTexture_enc(void *self , GLenum target, GLuint texture)
 {
+	AEMU_SCOPED_TRACE("glBindTexture encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -205,6 +213,7 @@
 
 void glBlendColor_enc(void *self , GLclampf red, GLclampf green, GLclampf blue, GLclampf alpha)
 {
+	AEMU_SCOPED_TRACE("glBlendColor encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -233,6 +242,7 @@
 
 void glBlendEquation_enc(void *self , GLenum mode)
 {
+	AEMU_SCOPED_TRACE("glBlendEquation encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -258,6 +268,7 @@
 
 void glBlendEquationSeparate_enc(void *self , GLenum modeRGB, GLenum modeAlpha)
 {
+	AEMU_SCOPED_TRACE("glBlendEquationSeparate encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -284,6 +295,7 @@
 
 void glBlendFunc_enc(void *self , GLenum sfactor, GLenum dfactor)
 {
+	AEMU_SCOPED_TRACE("glBlendFunc encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -310,6 +322,7 @@
 
 void glBlendFuncSeparate_enc(void *self , GLenum srcRGB, GLenum dstRGB, GLenum srcAlpha, GLenum dstAlpha)
 {
+	AEMU_SCOPED_TRACE("glBlendFuncSeparate encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -338,6 +351,7 @@
 
 void glBufferData_enc(void *self , GLenum target, GLsizeiptr size, const GLvoid* data, GLenum usage)
 {
+	AEMU_SCOPED_TRACE("glBufferData encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -378,6 +392,7 @@
 
 void glBufferSubData_enc(void *self , GLenum target, GLintptr offset, GLsizeiptr size, const GLvoid* data)
 {
+	AEMU_SCOPED_TRACE("glBufferSubData encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -414,6 +429,7 @@
 
 GLenum glCheckFramebufferStatus_enc(void *self , GLenum target)
 {
+	AEMU_SCOPED_TRACE("glCheckFramebufferStatus encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -454,6 +470,7 @@
 
 void glClear_enc(void *self , GLbitfield mask)
 {
+	AEMU_SCOPED_TRACE("glClear encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -479,6 +496,7 @@
 
 void glClearColor_enc(void *self , GLclampf red, GLclampf green, GLclampf blue, GLclampf alpha)
 {
+	AEMU_SCOPED_TRACE("glClearColor encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -507,6 +525,7 @@
 
 void glClearDepthf_enc(void *self , GLclampf depth)
 {
+	AEMU_SCOPED_TRACE("glClearDepthf encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -532,6 +551,7 @@
 
 void glClearStencil_enc(void *self , GLint s)
 {
+	AEMU_SCOPED_TRACE("glClearStencil encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -557,6 +577,7 @@
 
 void glColorMask_enc(void *self , GLboolean red, GLboolean green, GLboolean blue, GLboolean alpha)
 {
+	AEMU_SCOPED_TRACE("glColorMask encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -585,6 +606,7 @@
 
 void glCompileShader_enc(void *self , GLuint shader)
 {
+	AEMU_SCOPED_TRACE("glCompileShader encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -610,6 +632,7 @@
 
 void glCompressedTexImage2D_enc(void *self , GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, const GLvoid* data)
 {
+	AEMU_SCOPED_TRACE("glCompressedTexImage2D encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -650,6 +673,7 @@
 
 void glCompressedTexSubImage2D_enc(void *self , GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, const GLvoid* data)
 {
+	AEMU_SCOPED_TRACE("glCompressedTexSubImage2D encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -691,6 +715,7 @@
 
 void glCopyTexImage2D_enc(void *self , GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLsizei height, GLint border)
 {
+	AEMU_SCOPED_TRACE("glCopyTexImage2D encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -723,6 +748,7 @@
 
 void glCopyTexSubImage2D_enc(void *self , GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height)
 {
+	AEMU_SCOPED_TRACE("glCopyTexSubImage2D encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -755,6 +781,7 @@
 
 GLuint glCreateProgram_enc(void *self )
 {
+	AEMU_SCOPED_TRACE("glCreateProgram encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -794,6 +821,7 @@
 
 GLuint glCreateShader_enc(void *self , GLenum type)
 {
+	AEMU_SCOPED_TRACE("glCreateShader encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -834,6 +862,7 @@
 
 void glCullFace_enc(void *self , GLenum mode)
 {
+	AEMU_SCOPED_TRACE("glCullFace encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -859,6 +888,7 @@
 
 void glDeleteBuffers_enc(void *self , GLsizei n, const GLuint* buffers)
 {
+	AEMU_SCOPED_TRACE("glDeleteBuffers encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -877,7 +907,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &n, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_buffers; ptr += 4;
+	memcpy(ptr, &__size_buffers, 4); ptr += 4;
 	memcpy(ptr, buffers, __size_buffers);ptr += __size_buffers;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -887,6 +917,7 @@
 
 void glDeleteFramebuffers_enc(void *self , GLsizei n, const GLuint* framebuffers)
 {
+	AEMU_SCOPED_TRACE("glDeleteFramebuffers encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -905,7 +936,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &n, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_framebuffers; ptr += 4;
+	memcpy(ptr, &__size_framebuffers, 4); ptr += 4;
 	memcpy(ptr, framebuffers, __size_framebuffers);ptr += __size_framebuffers;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -915,6 +946,7 @@
 
 void glDeleteProgram_enc(void *self , GLuint program)
 {
+	AEMU_SCOPED_TRACE("glDeleteProgram encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -940,6 +972,7 @@
 
 void glDeleteRenderbuffers_enc(void *self , GLsizei n, const GLuint* renderbuffers)
 {
+	AEMU_SCOPED_TRACE("glDeleteRenderbuffers encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -958,7 +991,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &n, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_renderbuffers; ptr += 4;
+	memcpy(ptr, &__size_renderbuffers, 4); ptr += 4;
 	memcpy(ptr, renderbuffers, __size_renderbuffers);ptr += __size_renderbuffers;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -968,6 +1001,7 @@
 
 void glDeleteShader_enc(void *self , GLuint shader)
 {
+	AEMU_SCOPED_TRACE("glDeleteShader encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -993,6 +1027,7 @@
 
 void glDeleteTextures_enc(void *self , GLsizei n, const GLuint* textures)
 {
+	AEMU_SCOPED_TRACE("glDeleteTextures encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1011,7 +1046,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &n, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_textures; ptr += 4;
+	memcpy(ptr, &__size_textures, 4); ptr += 4;
 	memcpy(ptr, textures, __size_textures);ptr += __size_textures;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -1021,6 +1056,7 @@
 
 void glDepthFunc_enc(void *self , GLenum func)
 {
+	AEMU_SCOPED_TRACE("glDepthFunc encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1046,6 +1082,7 @@
 
 void glDepthMask_enc(void *self , GLboolean flag)
 {
+	AEMU_SCOPED_TRACE("glDepthMask encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1071,6 +1108,7 @@
 
 void glDepthRangef_enc(void *self , GLclampf zNear, GLclampf zFar)
 {
+	AEMU_SCOPED_TRACE("glDepthRangef encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1097,6 +1135,7 @@
 
 void glDetachShader_enc(void *self , GLuint program, GLuint shader)
 {
+	AEMU_SCOPED_TRACE("glDetachShader encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1123,6 +1162,7 @@
 
 void glDisable_enc(void *self , GLenum cap)
 {
+	AEMU_SCOPED_TRACE("glDisable encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1148,6 +1188,7 @@
 
 void glDisableVertexAttribArray_enc(void *self , GLuint index)
 {
+	AEMU_SCOPED_TRACE("glDisableVertexAttribArray encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1173,6 +1214,7 @@
 
 void glDrawArrays_enc(void *self , GLenum mode, GLint first, GLsizei count)
 {
+	AEMU_SCOPED_TRACE("glDrawArrays encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1200,6 +1242,7 @@
 
 void glEnable_enc(void *self , GLenum cap)
 {
+	AEMU_SCOPED_TRACE("glEnable encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1225,6 +1268,7 @@
 
 void glEnableVertexAttribArray_enc(void *self , GLuint index)
 {
+	AEMU_SCOPED_TRACE("glEnableVertexAttribArray encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1250,6 +1294,7 @@
 
 void glFinish_enc(void *self )
 {
+	AEMU_SCOPED_TRACE("glFinish encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1274,6 +1319,7 @@
 
 void glFlush_enc(void *self )
 {
+	AEMU_SCOPED_TRACE("glFlush encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1298,6 +1344,7 @@
 
 void glFramebufferRenderbuffer_enc(void *self , GLenum target, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer)
 {
+	AEMU_SCOPED_TRACE("glFramebufferRenderbuffer encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1326,6 +1373,7 @@
 
 void glFramebufferTexture2D_enc(void *self , GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level)
 {
+	AEMU_SCOPED_TRACE("glFramebufferTexture2D encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1355,6 +1403,7 @@
 
 void glFrontFace_enc(void *self , GLenum mode)
 {
+	AEMU_SCOPED_TRACE("glFrontFace encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1380,6 +1429,7 @@
 
 void glGenBuffers_enc(void *self , GLsizei n, GLuint* buffers)
 {
+	AEMU_SCOPED_TRACE("glGenBuffers encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1398,7 +1448,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &n, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_buffers; ptr += 4;
+	memcpy(ptr, &__size_buffers, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -1419,6 +1469,7 @@
 
 void glGenerateMipmap_enc(void *self , GLenum target)
 {
+	AEMU_SCOPED_TRACE("glGenerateMipmap encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1444,6 +1495,7 @@
 
 void glGenFramebuffers_enc(void *self , GLsizei n, GLuint* framebuffers)
 {
+	AEMU_SCOPED_TRACE("glGenFramebuffers encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1462,7 +1514,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &n, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_framebuffers; ptr += 4;
+	memcpy(ptr, &__size_framebuffers, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -1483,6 +1535,7 @@
 
 void glGenRenderbuffers_enc(void *self , GLsizei n, GLuint* renderbuffers)
 {
+	AEMU_SCOPED_TRACE("glGenRenderbuffers encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1501,7 +1554,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &n, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_renderbuffers; ptr += 4;
+	memcpy(ptr, &__size_renderbuffers, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -1522,6 +1575,7 @@
 
 void glGenTextures_enc(void *self , GLsizei n, GLuint* textures)
 {
+	AEMU_SCOPED_TRACE("glGenTextures encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1540,7 +1594,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &n, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_textures; ptr += 4;
+	memcpy(ptr, &__size_textures, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -1561,6 +1615,7 @@
 
 void glGetActiveAttrib_enc(void *self , GLuint program, GLuint index, GLsizei bufsize, GLsizei* length, GLint* size, GLenum* type, GLchar* name)
 {
+	AEMU_SCOPED_TRACE("glGetActiveAttrib encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1584,10 +1639,10 @@
 		memcpy(ptr, &program, 4); ptr += 4;
 		memcpy(ptr, &index, 4); ptr += 4;
 		memcpy(ptr, &bufsize, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_length; ptr += 4;
-	*(unsigned int *)(ptr) = __size_size; ptr += 4;
-	*(unsigned int *)(ptr) = __size_type; ptr += 4;
-	*(unsigned int *)(ptr) = __size_name; ptr += 4;
+	memcpy(ptr, &__size_length, 4); ptr += 4;
+	memcpy(ptr, &__size_size, 4); ptr += 4;
+	memcpy(ptr, &__size_type, 4); ptr += 4;
+	memcpy(ptr, &__size_name, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -1622,6 +1677,7 @@
 
 void glGetActiveUniform_enc(void *self , GLuint program, GLuint index, GLsizei bufsize, GLsizei* length, GLint* size, GLenum* type, GLchar* name)
 {
+	AEMU_SCOPED_TRACE("glGetActiveUniform encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1645,10 +1701,10 @@
 		memcpy(ptr, &program, 4); ptr += 4;
 		memcpy(ptr, &index, 4); ptr += 4;
 		memcpy(ptr, &bufsize, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_length; ptr += 4;
-	*(unsigned int *)(ptr) = __size_size; ptr += 4;
-	*(unsigned int *)(ptr) = __size_type; ptr += 4;
-	*(unsigned int *)(ptr) = __size_name; ptr += 4;
+	memcpy(ptr, &__size_length, 4); ptr += 4;
+	memcpy(ptr, &__size_size, 4); ptr += 4;
+	memcpy(ptr, &__size_type, 4); ptr += 4;
+	memcpy(ptr, &__size_name, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -1683,6 +1739,7 @@
 
 void glGetAttachedShaders_enc(void *self , GLuint program, GLsizei maxcount, GLsizei* count, GLuint* shaders)
 {
+	AEMU_SCOPED_TRACE("glGetAttachedShaders encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1703,8 +1760,8 @@
 
 		memcpy(ptr, &program, 4); ptr += 4;
 		memcpy(ptr, &maxcount, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_count; ptr += 4;
-	*(unsigned int *)(ptr) = __size_shaders; ptr += 4;
+	memcpy(ptr, &__size_count, 4); ptr += 4;
+	memcpy(ptr, &__size_shaders, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -1729,6 +1786,7 @@
 
 int glGetAttribLocation_enc(void *self , GLuint program, const GLchar* name)
 {
+	AEMU_SCOPED_TRACE("glGetAttribLocation encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1747,7 +1805,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &program, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_name; ptr += 4;
+	memcpy(ptr, &__size_name, 4); ptr += 4;
 	memcpy(ptr, name, __size_name);ptr += __size_name;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -1772,6 +1830,7 @@
 
 void glGetBooleanv_enc(void *self , GLenum pname, GLboolean* params)
 {
+	AEMU_SCOPED_TRACE("glGetBooleanv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1790,7 +1849,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -1811,6 +1870,7 @@
 
 void glGetBufferParameteriv_enc(void *self , GLenum target, GLenum pname, GLint* params)
 {
+	AEMU_SCOPED_TRACE("glGetBufferParameteriv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1830,7 +1890,7 @@
 
 		memcpy(ptr, &target, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -1851,6 +1911,7 @@
 
 GLenum glGetError_enc(void *self )
 {
+	AEMU_SCOPED_TRACE("glGetError encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1890,6 +1951,7 @@
 
 void glGetFloatv_enc(void *self , GLenum pname, GLfloat* params)
 {
+	AEMU_SCOPED_TRACE("glGetFloatv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1908,7 +1970,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -1929,6 +1991,7 @@
 
 void glGetFramebufferAttachmentParameteriv_enc(void *self , GLenum target, GLenum attachment, GLenum pname, GLint* params)
 {
+	AEMU_SCOPED_TRACE("glGetFramebufferAttachmentParameteriv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1949,7 +2012,7 @@
 		memcpy(ptr, &target, 4); ptr += 4;
 		memcpy(ptr, &attachment, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -1970,6 +2033,7 @@
 
 void glGetIntegerv_enc(void *self , GLenum pname, GLint* params)
 {
+	AEMU_SCOPED_TRACE("glGetIntegerv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1988,7 +2052,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -2009,6 +2073,7 @@
 
 void glGetProgramiv_enc(void *self , GLuint program, GLenum pname, GLint* params)
 {
+	AEMU_SCOPED_TRACE("glGetProgramiv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2028,7 +2093,7 @@
 
 		memcpy(ptr, &program, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -2049,6 +2114,7 @@
 
 void glGetProgramInfoLog_enc(void *self , GLuint program, GLsizei bufsize, GLsizei* length, GLchar* infolog)
 {
+	AEMU_SCOPED_TRACE("glGetProgramInfoLog encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2069,8 +2135,8 @@
 
 		memcpy(ptr, &program, 4); ptr += 4;
 		memcpy(ptr, &bufsize, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_length; ptr += 4;
-	*(unsigned int *)(ptr) = __size_infolog; ptr += 4;
+	memcpy(ptr, &__size_length, 4); ptr += 4;
+	memcpy(ptr, &__size_infolog, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -2095,6 +2161,7 @@
 
 void glGetRenderbufferParameteriv_enc(void *self , GLenum target, GLenum pname, GLint* params)
 {
+	AEMU_SCOPED_TRACE("glGetRenderbufferParameteriv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2114,7 +2181,7 @@
 
 		memcpy(ptr, &target, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -2135,6 +2202,7 @@
 
 void glGetShaderiv_enc(void *self , GLuint shader, GLenum pname, GLint* params)
 {
+	AEMU_SCOPED_TRACE("glGetShaderiv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2154,7 +2222,7 @@
 
 		memcpy(ptr, &shader, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -2175,6 +2243,7 @@
 
 void glGetShaderInfoLog_enc(void *self , GLuint shader, GLsizei bufsize, GLsizei* length, GLchar* infolog)
 {
+	AEMU_SCOPED_TRACE("glGetShaderInfoLog encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2195,8 +2264,8 @@
 
 		memcpy(ptr, &shader, 4); ptr += 4;
 		memcpy(ptr, &bufsize, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_length; ptr += 4;
-	*(unsigned int *)(ptr) = __size_infolog; ptr += 4;
+	memcpy(ptr, &__size_length, 4); ptr += 4;
+	memcpy(ptr, &__size_infolog, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -2221,6 +2290,7 @@
 
 void glGetShaderPrecisionFormat_enc(void *self , GLenum shadertype, GLenum precisiontype, GLint* range, GLint* precision)
 {
+	AEMU_SCOPED_TRACE("glGetShaderPrecisionFormat encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2241,8 +2311,8 @@
 
 		memcpy(ptr, &shadertype, 4); ptr += 4;
 		memcpy(ptr, &precisiontype, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_range; ptr += 4;
-	*(unsigned int *)(ptr) = __size_precision; ptr += 4;
+	memcpy(ptr, &__size_range, 4); ptr += 4;
+	memcpy(ptr, &__size_precision, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -2265,6 +2335,7 @@
 
 void glGetShaderSource_enc(void *self , GLuint shader, GLsizei bufsize, GLsizei* length, GLchar* source)
 {
+	AEMU_SCOPED_TRACE("glGetShaderSource encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2285,8 +2356,8 @@
 
 		memcpy(ptr, &shader, 4); ptr += 4;
 		memcpy(ptr, &bufsize, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_length; ptr += 4;
-	*(unsigned int *)(ptr) = __size_source; ptr += 4;
+	memcpy(ptr, &__size_length, 4); ptr += 4;
+	memcpy(ptr, &__size_source, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -2311,6 +2382,7 @@
 
 void glGetTexParameterfv_enc(void *self , GLenum target, GLenum pname, GLfloat* params)
 {
+	AEMU_SCOPED_TRACE("glGetTexParameterfv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2330,7 +2402,7 @@
 
 		memcpy(ptr, &target, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -2351,6 +2423,7 @@
 
 void glGetTexParameteriv_enc(void *self , GLenum target, GLenum pname, GLint* params)
 {
+	AEMU_SCOPED_TRACE("glGetTexParameteriv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2370,7 +2443,7 @@
 
 		memcpy(ptr, &target, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -2391,6 +2464,7 @@
 
 void glGetUniformfv_enc(void *self , GLuint program, GLint location, GLfloat* params)
 {
+	AEMU_SCOPED_TRACE("glGetUniformfv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2410,7 +2484,7 @@
 
 		memcpy(ptr, &program, 4); ptr += 4;
 		memcpy(ptr, &location, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -2431,6 +2505,7 @@
 
 void glGetUniformiv_enc(void *self , GLuint program, GLint location, GLint* params)
 {
+	AEMU_SCOPED_TRACE("glGetUniformiv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2450,7 +2525,7 @@
 
 		memcpy(ptr, &program, 4); ptr += 4;
 		memcpy(ptr, &location, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -2471,6 +2546,7 @@
 
 int glGetUniformLocation_enc(void *self , GLuint program, const GLchar* name)
 {
+	AEMU_SCOPED_TRACE("glGetUniformLocation encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2489,7 +2565,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &program, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_name; ptr += 4;
+	memcpy(ptr, &__size_name, 4); ptr += 4;
 	memcpy(ptr, name, __size_name);ptr += __size_name;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -2514,6 +2590,7 @@
 
 void glGetVertexAttribfv_enc(void *self , GLuint index, GLenum pname, GLfloat* params)
 {
+	AEMU_SCOPED_TRACE("glGetVertexAttribfv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2533,7 +2610,7 @@
 
 		memcpy(ptr, &index, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -2554,6 +2631,7 @@
 
 void glGetVertexAttribiv_enc(void *self , GLuint index, GLenum pname, GLint* params)
 {
+	AEMU_SCOPED_TRACE("glGetVertexAttribiv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2573,7 +2651,7 @@
 
 		memcpy(ptr, &index, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -2594,6 +2672,7 @@
 
 void glHint_enc(void *self , GLenum target, GLenum mode)
 {
+	AEMU_SCOPED_TRACE("glHint encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2620,6 +2699,7 @@
 
 GLboolean glIsBuffer_enc(void *self , GLuint buffer)
 {
+	AEMU_SCOPED_TRACE("glIsBuffer encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2660,6 +2740,7 @@
 
 GLboolean glIsEnabled_enc(void *self , GLenum cap)
 {
+	AEMU_SCOPED_TRACE("glIsEnabled encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2700,6 +2781,7 @@
 
 GLboolean glIsFramebuffer_enc(void *self , GLuint framebuffer)
 {
+	AEMU_SCOPED_TRACE("glIsFramebuffer encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2740,6 +2822,7 @@
 
 GLboolean glIsProgram_enc(void *self , GLuint program)
 {
+	AEMU_SCOPED_TRACE("glIsProgram encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2780,6 +2863,7 @@
 
 GLboolean glIsRenderbuffer_enc(void *self , GLuint renderbuffer)
 {
+	AEMU_SCOPED_TRACE("glIsRenderbuffer encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2820,6 +2904,7 @@
 
 GLboolean glIsShader_enc(void *self , GLuint shader)
 {
+	AEMU_SCOPED_TRACE("glIsShader encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2860,6 +2945,7 @@
 
 GLboolean glIsTexture_enc(void *self , GLuint texture)
 {
+	AEMU_SCOPED_TRACE("glIsTexture encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2900,6 +2986,7 @@
 
 void glLineWidth_enc(void *self , GLfloat width)
 {
+	AEMU_SCOPED_TRACE("glLineWidth encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2925,6 +3012,7 @@
 
 void glLinkProgram_enc(void *self , GLuint program)
 {
+	AEMU_SCOPED_TRACE("glLinkProgram encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2950,6 +3038,7 @@
 
 void glPixelStorei_enc(void *self , GLenum pname, GLint param)
 {
+	AEMU_SCOPED_TRACE("glPixelStorei encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -2976,6 +3065,7 @@
 
 void glPolygonOffset_enc(void *self , GLfloat factor, GLfloat units)
 {
+	AEMU_SCOPED_TRACE("glPolygonOffset encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3002,6 +3092,7 @@
 
 void glReadPixels_enc(void *self , GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, GLvoid* pixels)
 {
+	AEMU_SCOPED_TRACE("glReadPixels encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3025,7 +3116,7 @@
 		memcpy(ptr, &height, 4); ptr += 4;
 		memcpy(ptr, &format, 4); ptr += 4;
 		memcpy(ptr, &type, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_pixels; ptr += 4;
+	memcpy(ptr, &__size_pixels, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -3046,6 +3137,7 @@
 
 void glReleaseShaderCompiler_enc(void *self )
 {
+	AEMU_SCOPED_TRACE("glReleaseShaderCompiler encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3070,6 +3162,7 @@
 
 void glRenderbufferStorage_enc(void *self , GLenum target, GLenum internalformat, GLsizei width, GLsizei height)
 {
+	AEMU_SCOPED_TRACE("glRenderbufferStorage encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3098,6 +3191,7 @@
 
 void glSampleCoverage_enc(void *self , GLclampf value, GLboolean invert)
 {
+	AEMU_SCOPED_TRACE("glSampleCoverage encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3124,6 +3218,7 @@
 
 void glScissor_enc(void *self , GLint x, GLint y, GLsizei width, GLsizei height)
 {
+	AEMU_SCOPED_TRACE("glScissor encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3152,6 +3247,7 @@
 
 void glStencilFunc_enc(void *self , GLenum func, GLint ref, GLuint mask)
 {
+	AEMU_SCOPED_TRACE("glStencilFunc encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3179,6 +3275,7 @@
 
 void glStencilFuncSeparate_enc(void *self , GLenum face, GLenum func, GLint ref, GLuint mask)
 {
+	AEMU_SCOPED_TRACE("glStencilFuncSeparate encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3207,6 +3304,7 @@
 
 void glStencilMask_enc(void *self , GLuint mask)
 {
+	AEMU_SCOPED_TRACE("glStencilMask encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3232,6 +3330,7 @@
 
 void glStencilMaskSeparate_enc(void *self , GLenum face, GLuint mask)
 {
+	AEMU_SCOPED_TRACE("glStencilMaskSeparate encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3258,6 +3357,7 @@
 
 void glStencilOp_enc(void *self , GLenum fail, GLenum zfail, GLenum zpass)
 {
+	AEMU_SCOPED_TRACE("glStencilOp encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3285,6 +3385,7 @@
 
 void glStencilOpSeparate_enc(void *self , GLenum face, GLenum fail, GLenum zfail, GLenum zpass)
 {
+	AEMU_SCOPED_TRACE("glStencilOpSeparate encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3313,6 +3414,7 @@
 
 void glTexImage2D_enc(void *self , GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, const GLvoid* pixels)
 {
+	AEMU_SCOPED_TRACE("glTexImage2D encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3354,6 +3456,7 @@
 
 void glTexParameterf_enc(void *self , GLenum target, GLenum pname, GLfloat param)
 {
+	AEMU_SCOPED_TRACE("glTexParameterf encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3381,6 +3484,7 @@
 
 void glTexParameterfv_enc(void *self , GLenum target, GLenum pname, const GLfloat* params)
 {
+	AEMU_SCOPED_TRACE("glTexParameterfv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3400,7 +3504,7 @@
 
 		memcpy(ptr, &target, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 	memcpy(ptr, params, __size_params);ptr += __size_params;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -3410,6 +3514,7 @@
 
 void glTexParameteri_enc(void *self , GLenum target, GLenum pname, GLint param)
 {
+	AEMU_SCOPED_TRACE("glTexParameteri encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3437,6 +3542,7 @@
 
 void glTexParameteriv_enc(void *self , GLenum target, GLenum pname, const GLint* params)
 {
+	AEMU_SCOPED_TRACE("glTexParameteriv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3456,7 +3562,7 @@
 
 		memcpy(ptr, &target, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 	memcpy(ptr, params, __size_params);ptr += __size_params;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -3466,6 +3572,7 @@
 
 void glTexSubImage2D_enc(void *self , GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const GLvoid* pixels)
 {
+	AEMU_SCOPED_TRACE("glTexSubImage2D encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3507,6 +3614,7 @@
 
 void glUniform1f_enc(void *self , GLint location, GLfloat x)
 {
+	AEMU_SCOPED_TRACE("glUniform1f encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3533,6 +3641,7 @@
 
 void glUniform1fv_enc(void *self , GLint location, GLsizei count, const GLfloat* v)
 {
+	AEMU_SCOPED_TRACE("glUniform1fv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3552,7 +3661,7 @@
 
 		memcpy(ptr, &location, 4); ptr += 4;
 		memcpy(ptr, &count, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_v; ptr += 4;
+	memcpy(ptr, &__size_v, 4); ptr += 4;
 	memcpy(ptr, v, __size_v);ptr += __size_v;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -3562,6 +3671,7 @@
 
 void glUniform1i_enc(void *self , GLint location, GLint x)
 {
+	AEMU_SCOPED_TRACE("glUniform1i encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3588,6 +3698,7 @@
 
 void glUniform1iv_enc(void *self , GLint location, GLsizei count, const GLint* v)
 {
+	AEMU_SCOPED_TRACE("glUniform1iv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3607,7 +3718,7 @@
 
 		memcpy(ptr, &location, 4); ptr += 4;
 		memcpy(ptr, &count, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_v; ptr += 4;
+	memcpy(ptr, &__size_v, 4); ptr += 4;
 	memcpy(ptr, v, __size_v);ptr += __size_v;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -3617,6 +3728,7 @@
 
 void glUniform2f_enc(void *self , GLint location, GLfloat x, GLfloat y)
 {
+	AEMU_SCOPED_TRACE("glUniform2f encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3644,6 +3756,7 @@
 
 void glUniform2fv_enc(void *self , GLint location, GLsizei count, const GLfloat* v)
 {
+	AEMU_SCOPED_TRACE("glUniform2fv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3663,7 +3776,7 @@
 
 		memcpy(ptr, &location, 4); ptr += 4;
 		memcpy(ptr, &count, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_v; ptr += 4;
+	memcpy(ptr, &__size_v, 4); ptr += 4;
 	memcpy(ptr, v, __size_v);ptr += __size_v;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -3673,6 +3786,7 @@
 
 void glUniform2i_enc(void *self , GLint location, GLint x, GLint y)
 {
+	AEMU_SCOPED_TRACE("glUniform2i encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3700,6 +3814,7 @@
 
 void glUniform2iv_enc(void *self , GLint location, GLsizei count, const GLint* v)
 {
+	AEMU_SCOPED_TRACE("glUniform2iv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3719,7 +3834,7 @@
 
 		memcpy(ptr, &location, 4); ptr += 4;
 		memcpy(ptr, &count, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_v; ptr += 4;
+	memcpy(ptr, &__size_v, 4); ptr += 4;
 	memcpy(ptr, v, __size_v);ptr += __size_v;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -3729,6 +3844,7 @@
 
 void glUniform3f_enc(void *self , GLint location, GLfloat x, GLfloat y, GLfloat z)
 {
+	AEMU_SCOPED_TRACE("glUniform3f encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3757,6 +3873,7 @@
 
 void glUniform3fv_enc(void *self , GLint location, GLsizei count, const GLfloat* v)
 {
+	AEMU_SCOPED_TRACE("glUniform3fv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3776,7 +3893,7 @@
 
 		memcpy(ptr, &location, 4); ptr += 4;
 		memcpy(ptr, &count, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_v; ptr += 4;
+	memcpy(ptr, &__size_v, 4); ptr += 4;
 	memcpy(ptr, v, __size_v);ptr += __size_v;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -3786,6 +3903,7 @@
 
 void glUniform3i_enc(void *self , GLint location, GLint x, GLint y, GLint z)
 {
+	AEMU_SCOPED_TRACE("glUniform3i encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3814,6 +3932,7 @@
 
 void glUniform3iv_enc(void *self , GLint location, GLsizei count, const GLint* v)
 {
+	AEMU_SCOPED_TRACE("glUniform3iv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3833,7 +3952,7 @@
 
 		memcpy(ptr, &location, 4); ptr += 4;
 		memcpy(ptr, &count, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_v; ptr += 4;
+	memcpy(ptr, &__size_v, 4); ptr += 4;
 	memcpy(ptr, v, __size_v);ptr += __size_v;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -3843,6 +3962,7 @@
 
 void glUniform4f_enc(void *self , GLint location, GLfloat x, GLfloat y, GLfloat z, GLfloat w)
 {
+	AEMU_SCOPED_TRACE("glUniform4f encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3872,6 +3992,7 @@
 
 void glUniform4fv_enc(void *self , GLint location, GLsizei count, const GLfloat* v)
 {
+	AEMU_SCOPED_TRACE("glUniform4fv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3891,7 +4012,7 @@
 
 		memcpy(ptr, &location, 4); ptr += 4;
 		memcpy(ptr, &count, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_v; ptr += 4;
+	memcpy(ptr, &__size_v, 4); ptr += 4;
 	memcpy(ptr, v, __size_v);ptr += __size_v;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -3901,6 +4022,7 @@
 
 void glUniform4i_enc(void *self , GLint location, GLint x, GLint y, GLint z, GLint w)
 {
+	AEMU_SCOPED_TRACE("glUniform4i encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3930,6 +4052,7 @@
 
 void glUniform4iv_enc(void *self , GLint location, GLsizei count, const GLint* v)
 {
+	AEMU_SCOPED_TRACE("glUniform4iv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3949,7 +4072,7 @@
 
 		memcpy(ptr, &location, 4); ptr += 4;
 		memcpy(ptr, &count, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_v; ptr += 4;
+	memcpy(ptr, &__size_v, 4); ptr += 4;
 	memcpy(ptr, v, __size_v);ptr += __size_v;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -3959,6 +4082,7 @@
 
 void glUniformMatrix2fv_enc(void *self , GLint location, GLsizei count, GLboolean transpose, const GLfloat* value)
 {
+	AEMU_SCOPED_TRACE("glUniformMatrix2fv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -3979,7 +4103,7 @@
 		memcpy(ptr, &location, 4); ptr += 4;
 		memcpy(ptr, &count, 4); ptr += 4;
 		memcpy(ptr, &transpose, 1); ptr += 1;
-	*(unsigned int *)(ptr) = __size_value; ptr += 4;
+	memcpy(ptr, &__size_value, 4); ptr += 4;
 	memcpy(ptr, value, __size_value);ptr += __size_value;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -3989,6 +4113,7 @@
 
 void glUniformMatrix3fv_enc(void *self , GLint location, GLsizei count, GLboolean transpose, const GLfloat* value)
 {
+	AEMU_SCOPED_TRACE("glUniformMatrix3fv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4009,7 +4134,7 @@
 		memcpy(ptr, &location, 4); ptr += 4;
 		memcpy(ptr, &count, 4); ptr += 4;
 		memcpy(ptr, &transpose, 1); ptr += 1;
-	*(unsigned int *)(ptr) = __size_value; ptr += 4;
+	memcpy(ptr, &__size_value, 4); ptr += 4;
 	memcpy(ptr, value, __size_value);ptr += __size_value;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -4019,6 +4144,7 @@
 
 void glUniformMatrix4fv_enc(void *self , GLint location, GLsizei count, GLboolean transpose, const GLfloat* value)
 {
+	AEMU_SCOPED_TRACE("glUniformMatrix4fv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4039,7 +4165,7 @@
 		memcpy(ptr, &location, 4); ptr += 4;
 		memcpy(ptr, &count, 4); ptr += 4;
 		memcpy(ptr, &transpose, 1); ptr += 1;
-	*(unsigned int *)(ptr) = __size_value; ptr += 4;
+	memcpy(ptr, &__size_value, 4); ptr += 4;
 	memcpy(ptr, value, __size_value);ptr += __size_value;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -4049,6 +4175,7 @@
 
 void glUseProgram_enc(void *self , GLuint program)
 {
+	AEMU_SCOPED_TRACE("glUseProgram encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4074,6 +4201,7 @@
 
 void glValidateProgram_enc(void *self , GLuint program)
 {
+	AEMU_SCOPED_TRACE("glValidateProgram encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4099,6 +4227,7 @@
 
 void glVertexAttrib1f_enc(void *self , GLuint indx, GLfloat x)
 {
+	AEMU_SCOPED_TRACE("glVertexAttrib1f encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4125,6 +4254,7 @@
 
 void glVertexAttrib1fv_enc(void *self , GLuint indx, const GLfloat* values)
 {
+	AEMU_SCOPED_TRACE("glVertexAttrib1fv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4143,7 +4273,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &indx, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_values; ptr += 4;
+	memcpy(ptr, &__size_values, 4); ptr += 4;
 	memcpy(ptr, values, __size_values);ptr += __size_values;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -4153,6 +4283,7 @@
 
 void glVertexAttrib2f_enc(void *self , GLuint indx, GLfloat x, GLfloat y)
 {
+	AEMU_SCOPED_TRACE("glVertexAttrib2f encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4180,6 +4311,7 @@
 
 void glVertexAttrib2fv_enc(void *self , GLuint indx, const GLfloat* values)
 {
+	AEMU_SCOPED_TRACE("glVertexAttrib2fv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4198,7 +4330,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &indx, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_values; ptr += 4;
+	memcpy(ptr, &__size_values, 4); ptr += 4;
 	memcpy(ptr, values, __size_values);ptr += __size_values;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -4208,6 +4340,7 @@
 
 void glVertexAttrib3f_enc(void *self , GLuint indx, GLfloat x, GLfloat y, GLfloat z)
 {
+	AEMU_SCOPED_TRACE("glVertexAttrib3f encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4236,6 +4369,7 @@
 
 void glVertexAttrib3fv_enc(void *self , GLuint indx, const GLfloat* values)
 {
+	AEMU_SCOPED_TRACE("glVertexAttrib3fv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4254,7 +4388,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &indx, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_values; ptr += 4;
+	memcpy(ptr, &__size_values, 4); ptr += 4;
 	memcpy(ptr, values, __size_values);ptr += __size_values;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -4264,6 +4398,7 @@
 
 void glVertexAttrib4f_enc(void *self , GLuint indx, GLfloat x, GLfloat y, GLfloat z, GLfloat w)
 {
+	AEMU_SCOPED_TRACE("glVertexAttrib4f encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4293,6 +4428,7 @@
 
 void glVertexAttrib4fv_enc(void *self , GLuint indx, const GLfloat* values)
 {
+	AEMU_SCOPED_TRACE("glVertexAttrib4fv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4311,7 +4447,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &indx, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_values; ptr += 4;
+	memcpy(ptr, &__size_values, 4); ptr += 4;
 	memcpy(ptr, values, __size_values);ptr += __size_values;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -4321,6 +4457,7 @@
 
 void glViewport_enc(void *self , GLint x, GLint y, GLsizei width, GLsizei height)
 {
+	AEMU_SCOPED_TRACE("glViewport encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4349,6 +4486,7 @@
 
 void glEGLImageTargetTexture2DOES_enc(void *self , GLenum target, GLeglImageOES image)
 {
+	AEMU_SCOPED_TRACE("glEGLImageTargetTexture2DOES encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4375,6 +4513,7 @@
 
 void glEGLImageTargetRenderbufferStorageOES_enc(void *self , GLenum target, GLeglImageOES image)
 {
+	AEMU_SCOPED_TRACE("glEGLImageTargetRenderbufferStorageOES encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4401,6 +4540,7 @@
 
 GLboolean glUnmapBufferOES_enc(void *self , GLenum target)
 {
+	AEMU_SCOPED_TRACE("glUnmapBufferOES encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4441,6 +4581,7 @@
 
 void glTexImage3DOES_enc(void *self , GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const GLvoid* pixels)
 {
+	AEMU_SCOPED_TRACE("glTexImage3DOES encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4483,6 +4624,7 @@
 
 void glTexSubImage3DOES_enc(void *self , GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const GLvoid* pixels)
 {
+	AEMU_SCOPED_TRACE("glTexSubImage3DOES encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4526,6 +4668,7 @@
 
 void glCopyTexSubImage3DOES_enc(void *self , GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint x, GLint y, GLsizei width, GLsizei height)
 {
+	AEMU_SCOPED_TRACE("glCopyTexSubImage3DOES encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4559,6 +4702,7 @@
 
 void glCompressedTexImage3DOES_enc(void *self , GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLsizei imageSize, const GLvoid* data)
 {
+	AEMU_SCOPED_TRACE("glCompressedTexImage3DOES encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4600,6 +4744,7 @@
 
 void glCompressedTexSubImage3DOES_enc(void *self , GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLsizei imageSize, const GLvoid* data)
 {
+	AEMU_SCOPED_TRACE("glCompressedTexSubImage3DOES encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4643,6 +4788,7 @@
 
 void glFramebufferTexture3DOES_enc(void *self , GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLint zoffset)
 {
+	AEMU_SCOPED_TRACE("glFramebufferTexture3DOES encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4673,6 +4819,7 @@
 
 void glBindVertexArrayOES_enc(void *self , GLuint array)
 {
+	AEMU_SCOPED_TRACE("glBindVertexArrayOES encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4698,6 +4845,7 @@
 
 void glDeleteVertexArraysOES_enc(void *self , GLsizei n, const GLuint* arrays)
 {
+	AEMU_SCOPED_TRACE("glDeleteVertexArraysOES encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4716,7 +4864,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &n, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_arrays; ptr += 4;
+	memcpy(ptr, &__size_arrays, 4); ptr += 4;
 	memcpy(ptr, arrays, __size_arrays);ptr += __size_arrays;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -4726,6 +4874,7 @@
 
 void glGenVertexArraysOES_enc(void *self , GLsizei n, GLuint* arrays)
 {
+	AEMU_SCOPED_TRACE("glGenVertexArraysOES encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4744,7 +4893,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &n, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_arrays; ptr += 4;
+	memcpy(ptr, &__size_arrays, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -4765,6 +4914,7 @@
 
 GLboolean glIsVertexArrayOES_enc(void *self , GLuint array)
 {
+	AEMU_SCOPED_TRACE("glIsVertexArrayOES encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4805,6 +4955,7 @@
 
 void glDiscardFramebufferEXT_enc(void *self , GLenum target, GLsizei numAttachments, const GLenum* attachments)
 {
+	AEMU_SCOPED_TRACE("glDiscardFramebufferEXT encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4824,7 +4975,7 @@
 
 		memcpy(ptr, &target, 4); ptr += 4;
 		memcpy(ptr, &numAttachments, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_attachments; ptr += 4;
+	memcpy(ptr, &__size_attachments, 4); ptr += 4;
 	memcpy(ptr, attachments, __size_attachments);ptr += __size_attachments;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -4834,6 +4985,7 @@
 
 void glVertexAttribPointerData_enc(void *self , GLuint indx, GLint size, GLenum type, GLboolean normalized, GLsizei stride, void* data, GLuint datalen)
 {
+	AEMU_SCOPED_TRACE("glVertexAttribPointerData encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4856,7 +5008,7 @@
 		memcpy(ptr, &type, 4); ptr += 4;
 		memcpy(ptr, &normalized, 1); ptr += 1;
 		memcpy(ptr, &stride, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_data; ptr += 4;
+	memcpy(ptr, &__size_data, 4); ptr += 4;
 	 glUtilsPackPointerData((unsigned char *)ptr, (unsigned char *)data, size, type, stride, datalen);ptr += __size_data;
 		memcpy(ptr, &datalen, 4); ptr += 4;
 
@@ -4867,6 +5019,7 @@
 
 void glVertexAttribPointerOffset_enc(void *self , GLuint indx, GLint size, GLenum type, GLboolean normalized, GLsizei stride, GLuint offset)
 {
+	AEMU_SCOPED_TRACE("glVertexAttribPointerOffset encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4897,6 +5050,7 @@
 
 void glDrawElementsOffset_enc(void *self , GLenum mode, GLsizei count, GLenum type, GLuint offset)
 {
+	AEMU_SCOPED_TRACE("glDrawElementsOffset encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4925,6 +5079,7 @@
 
 void glDrawElementsData_enc(void *self , GLenum mode, GLsizei count, GLenum type, void* data, GLuint datalen)
 {
+	AEMU_SCOPED_TRACE("glDrawElementsData encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4945,7 +5100,7 @@
 		memcpy(ptr, &mode, 4); ptr += 4;
 		memcpy(ptr, &count, 4); ptr += 4;
 		memcpy(ptr, &type, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_data; ptr += 4;
+	memcpy(ptr, &__size_data, 4); ptr += 4;
 	memcpy(ptr, data, __size_data);ptr += __size_data;
 		memcpy(ptr, &datalen, 4); ptr += 4;
 
@@ -4956,6 +5111,7 @@
 
 void glGetCompressedTextureFormats_enc(void *self , int count, GLint* formats)
 {
+	AEMU_SCOPED_TRACE("glGetCompressedTextureFormats encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -4974,7 +5130,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &count, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_formats; ptr += 4;
+	memcpy(ptr, &__size_formats, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -4995,6 +5151,7 @@
 
 void glShaderString_enc(void *self , GLuint shader, const GLchar* string, GLsizei len)
 {
+	AEMU_SCOPED_TRACE("glShaderString encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5013,7 +5170,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &shader, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_string; ptr += 4;
+	memcpy(ptr, &__size_string, 4); ptr += 4;
 	memcpy(ptr, string, __size_string);ptr += __size_string;
 		memcpy(ptr, &len, 4); ptr += 4;
 
@@ -5024,6 +5181,7 @@
 
 int glFinishRoundTrip_enc(void *self )
 {
+	AEMU_SCOPED_TRACE("glFinishRoundTrip encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5063,6 +5221,7 @@
 
 void glGenVertexArrays_enc(void *self , GLsizei n, GLuint* arrays)
 {
+	AEMU_SCOPED_TRACE("glGenVertexArrays encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5081,7 +5240,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &n, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_arrays; ptr += 4;
+	memcpy(ptr, &__size_arrays, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -5102,6 +5261,7 @@
 
 void glBindVertexArray_enc(void *self , GLuint array)
 {
+	AEMU_SCOPED_TRACE("glBindVertexArray encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5127,6 +5287,7 @@
 
 void glDeleteVertexArrays_enc(void *self , GLsizei n, const GLuint* arrays)
 {
+	AEMU_SCOPED_TRACE("glDeleteVertexArrays encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5145,7 +5306,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &n, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_arrays; ptr += 4;
+	memcpy(ptr, &__size_arrays, 4); ptr += 4;
 	memcpy(ptr, arrays, __size_arrays);ptr += __size_arrays;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -5155,6 +5316,7 @@
 
 GLboolean glIsVertexArray_enc(void *self , GLuint array)
 {
+	AEMU_SCOPED_TRACE("glIsVertexArray encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5195,6 +5357,7 @@
 
 void glMapBufferRangeAEMU_enc(void *self , GLenum target, GLintptr offset, GLsizeiptr length, GLbitfield access, void* mapped)
 {
+	AEMU_SCOPED_TRACE("glMapBufferRangeAEMU encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5216,7 +5379,7 @@
 		memcpy(ptr, &offset, 4); ptr += 4;
 		memcpy(ptr, &length, 4); ptr += 4;
 		memcpy(ptr, &access, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_mapped; ptr += 4;
+	memcpy(ptr, &__size_mapped, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -5239,6 +5402,7 @@
 
 void glUnmapBufferAEMU_enc(void *self , GLenum target, GLintptr offset, GLsizeiptr length, GLbitfield access, void* guest_buffer, GLboolean* out_res)
 {
+	AEMU_SCOPED_TRACE("glUnmapBufferAEMU encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5261,9 +5425,9 @@
 		memcpy(ptr, &offset, 4); ptr += 4;
 		memcpy(ptr, &length, 4); ptr += 4;
 		memcpy(ptr, &access, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_guest_buffer; ptr += 4;
+	memcpy(ptr, &__size_guest_buffer, 4); ptr += 4;
 	if (guest_buffer != NULL) memcpy(ptr, guest_buffer, __size_guest_buffer);ptr += __size_guest_buffer;
-	*(unsigned int *)(ptr) = __size_out_res; ptr += 4;
+	memcpy(ptr, &__size_out_res, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -5284,6 +5448,7 @@
 
 void glFlushMappedBufferRangeAEMU_enc(void *self , GLenum target, GLintptr offset, GLsizeiptr length, GLbitfield access, void* guest_buffer)
 {
+	AEMU_SCOPED_TRACE("glFlushMappedBufferRangeAEMU encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5305,7 +5470,7 @@
 		memcpy(ptr, &offset, 4); ptr += 4;
 		memcpy(ptr, &length, 4); ptr += 4;
 		memcpy(ptr, &access, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_guest_buffer; ptr += 4;
+	memcpy(ptr, &__size_guest_buffer, 4); ptr += 4;
 	if (guest_buffer != NULL) memcpy(ptr, guest_buffer, __size_guest_buffer);ptr += __size_guest_buffer;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -5315,6 +5480,7 @@
 
 void glReadPixelsOffsetAEMU_enc(void *self , GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, GLuint offset)
 {
+	AEMU_SCOPED_TRACE("glReadPixelsOffsetAEMU encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5346,6 +5512,7 @@
 
 void glCompressedTexImage2DOffsetAEMU_enc(void *self , GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, GLuint offset)
 {
+	AEMU_SCOPED_TRACE("glCompressedTexImage2DOffsetAEMU encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5378,6 +5545,7 @@
 
 void glCompressedTexSubImage2DOffsetAEMU_enc(void *self , GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, GLuint offset)
 {
+	AEMU_SCOPED_TRACE("glCompressedTexSubImage2DOffsetAEMU encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5411,6 +5579,7 @@
 
 void glTexImage2DOffsetAEMU_enc(void *self , GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, GLuint offset)
 {
+	AEMU_SCOPED_TRACE("glTexImage2DOffsetAEMU encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5444,6 +5613,7 @@
 
 void glTexSubImage2DOffsetAEMU_enc(void *self , GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, GLuint offset)
 {
+	AEMU_SCOPED_TRACE("glTexSubImage2DOffsetAEMU encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5477,6 +5647,7 @@
 
 void glBindBufferRange_enc(void *self , GLenum target, GLuint index, GLuint buffer, GLintptr offset, GLsizeiptr size)
 {
+	AEMU_SCOPED_TRACE("glBindBufferRange encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5506,6 +5677,7 @@
 
 void glBindBufferBase_enc(void *self , GLenum target, GLuint index, GLuint buffer)
 {
+	AEMU_SCOPED_TRACE("glBindBufferBase encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5533,6 +5705,7 @@
 
 void glCopyBufferSubData_enc(void *self , GLenum readtarget, GLenum writetarget, GLintptr readoffset, GLintptr writeoffset, GLsizeiptr size)
 {
+	AEMU_SCOPED_TRACE("glCopyBufferSubData encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5563,6 +5736,7 @@
 
 void glClearBufferiv_enc(void *self , GLenum buffer, GLint drawBuffer, const GLint* value)
 {
+	AEMU_SCOPED_TRACE("glClearBufferiv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5582,7 +5756,7 @@
 
 		memcpy(ptr, &buffer, 4); ptr += 4;
 		memcpy(ptr, &drawBuffer, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_value; ptr += 4;
+	memcpy(ptr, &__size_value, 4); ptr += 4;
 	memcpy(ptr, value, __size_value);ptr += __size_value;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -5592,6 +5766,7 @@
 
 void glClearBufferuiv_enc(void *self , GLenum buffer, GLint drawBuffer, const GLuint* value)
 {
+	AEMU_SCOPED_TRACE("glClearBufferuiv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5611,7 +5786,7 @@
 
 		memcpy(ptr, &buffer, 4); ptr += 4;
 		memcpy(ptr, &drawBuffer, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_value; ptr += 4;
+	memcpy(ptr, &__size_value, 4); ptr += 4;
 	memcpy(ptr, value, __size_value);ptr += __size_value;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -5621,6 +5796,7 @@
 
 void glClearBufferfv_enc(void *self , GLenum buffer, GLint drawBuffer, const GLfloat* value)
 {
+	AEMU_SCOPED_TRACE("glClearBufferfv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5640,7 +5816,7 @@
 
 		memcpy(ptr, &buffer, 4); ptr += 4;
 		memcpy(ptr, &drawBuffer, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_value; ptr += 4;
+	memcpy(ptr, &__size_value, 4); ptr += 4;
 	memcpy(ptr, value, __size_value);ptr += __size_value;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -5650,6 +5826,7 @@
 
 void glClearBufferfi_enc(void *self , GLenum buffer, GLint drawBuffer, GLfloat depth, GLint stencil)
 {
+	AEMU_SCOPED_TRACE("glClearBufferfi encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5678,6 +5855,7 @@
 
 void glUniformBlockBinding_enc(void *self , GLuint program, GLuint uniformBlockIndex, GLuint uniformBlockBinding)
 {
+	AEMU_SCOPED_TRACE("glUniformBlockBinding encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5705,6 +5883,7 @@
 
 GLuint glGetUniformBlockIndex_enc(void *self , GLuint program, const GLchar* uniformBlockName)
 {
+	AEMU_SCOPED_TRACE("glGetUniformBlockIndex encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5723,7 +5902,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &program, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_uniformBlockName; ptr += 4;
+	memcpy(ptr, &__size_uniformBlockName, 4); ptr += 4;
 	memcpy(ptr, uniformBlockName, __size_uniformBlockName);ptr += __size_uniformBlockName;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -5748,6 +5927,7 @@
 
 void glGetUniformIndicesAEMU_enc(void *self , GLuint program, GLsizei uniformCount, const GLchar* packedUniformNames, GLsizei packedLen, GLuint* uniformIndices)
 {
+	AEMU_SCOPED_TRACE("glGetUniformIndicesAEMU encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5768,10 +5948,10 @@
 
 		memcpy(ptr, &program, 4); ptr += 4;
 		memcpy(ptr, &uniformCount, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_packedUniformNames; ptr += 4;
+	memcpy(ptr, &__size_packedUniformNames, 4); ptr += 4;
 	memcpy(ptr, packedUniformNames, __size_packedUniformNames);ptr += __size_packedUniformNames;
 		memcpy(ptr, &packedLen, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_uniformIndices; ptr += 4;
+	memcpy(ptr, &__size_uniformIndices, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -5792,6 +5972,7 @@
 
 void glGetActiveUniformBlockiv_enc(void *self , GLuint program, GLuint uniformBlockIndex, GLenum pname, GLint* params)
 {
+	AEMU_SCOPED_TRACE("glGetActiveUniformBlockiv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5812,7 +5993,7 @@
 		memcpy(ptr, &program, 4); ptr += 4;
 		memcpy(ptr, &uniformBlockIndex, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -5833,6 +6014,7 @@
 
 void glGetActiveUniformBlockName_enc(void *self , GLuint program, GLuint uniformBlockIndex, GLsizei bufSize, GLsizei* length, GLchar* uniformBlockName)
 {
+	AEMU_SCOPED_TRACE("glGetActiveUniformBlockName encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5854,8 +6036,8 @@
 		memcpy(ptr, &program, 4); ptr += 4;
 		memcpy(ptr, &uniformBlockIndex, 4); ptr += 4;
 		memcpy(ptr, &bufSize, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_length; ptr += 4;
-	*(unsigned int *)(ptr) = __size_uniformBlockName; ptr += 4;
+	memcpy(ptr, &__size_length, 4); ptr += 4;
+	memcpy(ptr, &__size_uniformBlockName, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -5882,6 +6064,7 @@
 
 void glUniform1ui_enc(void *self , GLint location, GLuint v0)
 {
+	AEMU_SCOPED_TRACE("glUniform1ui encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5908,6 +6091,7 @@
 
 void glUniform2ui_enc(void *self , GLint location, GLuint v0, GLuint v1)
 {
+	AEMU_SCOPED_TRACE("glUniform2ui encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5935,6 +6119,7 @@
 
 void glUniform3ui_enc(void *self , GLint location, GLuint v0, GLuint v1, GLuint v2)
 {
+	AEMU_SCOPED_TRACE("glUniform3ui encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5963,6 +6148,7 @@
 
 void glUniform4ui_enc(void *self , GLint location, GLint v0, GLuint v1, GLuint v2, GLuint v3)
 {
+	AEMU_SCOPED_TRACE("glUniform4ui encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -5992,6 +6178,7 @@
 
 void glUniform1uiv_enc(void *self , GLint location, GLsizei count, const GLuint* value)
 {
+	AEMU_SCOPED_TRACE("glUniform1uiv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6011,7 +6198,7 @@
 
 		memcpy(ptr, &location, 4); ptr += 4;
 		memcpy(ptr, &count, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_value; ptr += 4;
+	memcpy(ptr, &__size_value, 4); ptr += 4;
 	memcpy(ptr, value, __size_value);ptr += __size_value;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -6021,6 +6208,7 @@
 
 void glUniform2uiv_enc(void *self , GLint location, GLsizei count, const GLuint* value)
 {
+	AEMU_SCOPED_TRACE("glUniform2uiv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6040,7 +6228,7 @@
 
 		memcpy(ptr, &location, 4); ptr += 4;
 		memcpy(ptr, &count, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_value; ptr += 4;
+	memcpy(ptr, &__size_value, 4); ptr += 4;
 	memcpy(ptr, value, __size_value);ptr += __size_value;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -6050,6 +6238,7 @@
 
 void glUniform3uiv_enc(void *self , GLint location, GLsizei count, const GLuint* value)
 {
+	AEMU_SCOPED_TRACE("glUniform3uiv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6069,7 +6258,7 @@
 
 		memcpy(ptr, &location, 4); ptr += 4;
 		memcpy(ptr, &count, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_value; ptr += 4;
+	memcpy(ptr, &__size_value, 4); ptr += 4;
 	memcpy(ptr, value, __size_value);ptr += __size_value;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -6079,6 +6268,7 @@
 
 void glUniform4uiv_enc(void *self , GLint location, GLsizei count, const GLuint* value)
 {
+	AEMU_SCOPED_TRACE("glUniform4uiv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6098,7 +6288,7 @@
 
 		memcpy(ptr, &location, 4); ptr += 4;
 		memcpy(ptr, &count, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_value; ptr += 4;
+	memcpy(ptr, &__size_value, 4); ptr += 4;
 	memcpy(ptr, value, __size_value);ptr += __size_value;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -6108,6 +6298,7 @@
 
 void glUniformMatrix2x3fv_enc(void *self , GLint location, GLsizei count, GLboolean transpose, const GLfloat* value)
 {
+	AEMU_SCOPED_TRACE("glUniformMatrix2x3fv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6128,7 +6319,7 @@
 		memcpy(ptr, &location, 4); ptr += 4;
 		memcpy(ptr, &count, 4); ptr += 4;
 		memcpy(ptr, &transpose, 1); ptr += 1;
-	*(unsigned int *)(ptr) = __size_value; ptr += 4;
+	memcpy(ptr, &__size_value, 4); ptr += 4;
 	memcpy(ptr, value, __size_value);ptr += __size_value;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -6138,6 +6329,7 @@
 
 void glUniformMatrix3x2fv_enc(void *self , GLint location, GLsizei count, GLboolean transpose, const GLfloat* value)
 {
+	AEMU_SCOPED_TRACE("glUniformMatrix3x2fv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6158,7 +6350,7 @@
 		memcpy(ptr, &location, 4); ptr += 4;
 		memcpy(ptr, &count, 4); ptr += 4;
 		memcpy(ptr, &transpose, 1); ptr += 1;
-	*(unsigned int *)(ptr) = __size_value; ptr += 4;
+	memcpy(ptr, &__size_value, 4); ptr += 4;
 	memcpy(ptr, value, __size_value);ptr += __size_value;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -6168,6 +6360,7 @@
 
 void glUniformMatrix2x4fv_enc(void *self , GLint location, GLsizei count, GLboolean transpose, const GLfloat* value)
 {
+	AEMU_SCOPED_TRACE("glUniformMatrix2x4fv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6188,7 +6381,7 @@
 		memcpy(ptr, &location, 4); ptr += 4;
 		memcpy(ptr, &count, 4); ptr += 4;
 		memcpy(ptr, &transpose, 1); ptr += 1;
-	*(unsigned int *)(ptr) = __size_value; ptr += 4;
+	memcpy(ptr, &__size_value, 4); ptr += 4;
 	memcpy(ptr, value, __size_value);ptr += __size_value;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -6198,6 +6391,7 @@
 
 void glUniformMatrix4x2fv_enc(void *self , GLint location, GLsizei count, GLboolean transpose, const GLfloat* value)
 {
+	AEMU_SCOPED_TRACE("glUniformMatrix4x2fv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6218,7 +6412,7 @@
 		memcpy(ptr, &location, 4); ptr += 4;
 		memcpy(ptr, &count, 4); ptr += 4;
 		memcpy(ptr, &transpose, 1); ptr += 1;
-	*(unsigned int *)(ptr) = __size_value; ptr += 4;
+	memcpy(ptr, &__size_value, 4); ptr += 4;
 	memcpy(ptr, value, __size_value);ptr += __size_value;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -6228,6 +6422,7 @@
 
 void glUniformMatrix3x4fv_enc(void *self , GLint location, GLsizei count, GLboolean transpose, const GLfloat* value)
 {
+	AEMU_SCOPED_TRACE("glUniformMatrix3x4fv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6248,7 +6443,7 @@
 		memcpy(ptr, &location, 4); ptr += 4;
 		memcpy(ptr, &count, 4); ptr += 4;
 		memcpy(ptr, &transpose, 1); ptr += 1;
-	*(unsigned int *)(ptr) = __size_value; ptr += 4;
+	memcpy(ptr, &__size_value, 4); ptr += 4;
 	memcpy(ptr, value, __size_value);ptr += __size_value;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -6258,6 +6453,7 @@
 
 void glUniformMatrix4x3fv_enc(void *self , GLint location, GLsizei count, GLboolean transpose, const GLfloat* value)
 {
+	AEMU_SCOPED_TRACE("glUniformMatrix4x3fv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6278,7 +6474,7 @@
 		memcpy(ptr, &location, 4); ptr += 4;
 		memcpy(ptr, &count, 4); ptr += 4;
 		memcpy(ptr, &transpose, 1); ptr += 1;
-	*(unsigned int *)(ptr) = __size_value; ptr += 4;
+	memcpy(ptr, &__size_value, 4); ptr += 4;
 	memcpy(ptr, value, __size_value);ptr += __size_value;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -6288,6 +6484,7 @@
 
 void glGetUniformuiv_enc(void *self , GLuint program, GLint location, GLuint* params)
 {
+	AEMU_SCOPED_TRACE("glGetUniformuiv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6307,7 +6504,7 @@
 
 		memcpy(ptr, &program, 4); ptr += 4;
 		memcpy(ptr, &location, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -6328,6 +6525,7 @@
 
 void glGetActiveUniformsiv_enc(void *self , GLuint program, GLsizei uniformCount, const GLuint* uniformIndices, GLenum pname, GLint* params)
 {
+	AEMU_SCOPED_TRACE("glGetActiveUniformsiv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6348,10 +6546,10 @@
 
 		memcpy(ptr, &program, 4); ptr += 4;
 		memcpy(ptr, &uniformCount, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_uniformIndices; ptr += 4;
+	memcpy(ptr, &__size_uniformIndices, 4); ptr += 4;
 	memcpy(ptr, uniformIndices, __size_uniformIndices);ptr += __size_uniformIndices;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -6372,6 +6570,7 @@
 
 void glVertexAttribI4i_enc(void *self , GLuint index, GLint v0, GLint v1, GLint v2, GLint v3)
 {
+	AEMU_SCOPED_TRACE("glVertexAttribI4i encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6401,6 +6600,7 @@
 
 void glVertexAttribI4ui_enc(void *self , GLuint index, GLuint v0, GLuint v1, GLuint v2, GLuint v3)
 {
+	AEMU_SCOPED_TRACE("glVertexAttribI4ui encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6430,6 +6630,7 @@
 
 void glVertexAttribI4iv_enc(void *self , GLuint index, const GLint* v)
 {
+	AEMU_SCOPED_TRACE("glVertexAttribI4iv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6448,7 +6649,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &index, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_v; ptr += 4;
+	memcpy(ptr, &__size_v, 4); ptr += 4;
 	memcpy(ptr, v, __size_v);ptr += __size_v;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -6458,6 +6659,7 @@
 
 void glVertexAttribI4uiv_enc(void *self , GLuint index, const GLuint* v)
 {
+	AEMU_SCOPED_TRACE("glVertexAttribI4uiv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6476,7 +6678,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &index, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_v; ptr += 4;
+	memcpy(ptr, &__size_v, 4); ptr += 4;
 	memcpy(ptr, v, __size_v);ptr += __size_v;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -6486,6 +6688,7 @@
 
 void glVertexAttribIPointerOffsetAEMU_enc(void *self , GLuint index, GLint size, GLenum type, GLsizei stride, GLuint offset)
 {
+	AEMU_SCOPED_TRACE("glVertexAttribIPointerOffsetAEMU encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6515,6 +6718,7 @@
 
 void glVertexAttribIPointerDataAEMU_enc(void *self , GLuint index, GLint size, GLenum type, GLsizei stride, void* data, GLuint datalen)
 {
+	AEMU_SCOPED_TRACE("glVertexAttribIPointerDataAEMU encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6536,7 +6740,7 @@
 		memcpy(ptr, &size, 4); ptr += 4;
 		memcpy(ptr, &type, 4); ptr += 4;
 		memcpy(ptr, &stride, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_data; ptr += 4;
+	memcpy(ptr, &__size_data, 4); ptr += 4;
 	 glUtilsPackPointerData((unsigned char *)ptr, (unsigned char *)data, size, type, stride, datalen);ptr += __size_data;
 		memcpy(ptr, &datalen, 4); ptr += 4;
 
@@ -6547,6 +6751,7 @@
 
 void glGetVertexAttribIiv_enc(void *self , GLuint index, GLenum pname, GLint* params)
 {
+	AEMU_SCOPED_TRACE("glGetVertexAttribIiv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6566,7 +6771,7 @@
 
 		memcpy(ptr, &index, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -6587,6 +6792,7 @@
 
 void glGetVertexAttribIuiv_enc(void *self , GLuint index, GLenum pname, GLuint* params)
 {
+	AEMU_SCOPED_TRACE("glGetVertexAttribIuiv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6606,7 +6812,7 @@
 
 		memcpy(ptr, &index, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -6627,6 +6833,7 @@
 
 void glVertexAttribDivisor_enc(void *self , GLuint index, GLuint divisor)
 {
+	AEMU_SCOPED_TRACE("glVertexAttribDivisor encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6653,6 +6860,7 @@
 
 void glDrawArraysInstanced_enc(void *self , GLenum mode, GLint first, GLsizei count, GLsizei primcount)
 {
+	AEMU_SCOPED_TRACE("glDrawArraysInstanced encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6681,6 +6889,7 @@
 
 void glDrawElementsInstancedDataAEMU_enc(void *self , GLenum mode, GLsizei count, GLenum type, const void* indices, GLsizei primcount, GLsizei datalen)
 {
+	AEMU_SCOPED_TRACE("glDrawElementsInstancedDataAEMU encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6701,7 +6910,7 @@
 		memcpy(ptr, &mode, 4); ptr += 4;
 		memcpy(ptr, &count, 4); ptr += 4;
 		memcpy(ptr, &type, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_indices; ptr += 4;
+	memcpy(ptr, &__size_indices, 4); ptr += 4;
 	memcpy(ptr, indices, __size_indices);ptr += __size_indices;
 		memcpy(ptr, &primcount, 4); ptr += 4;
 		memcpy(ptr, &datalen, 4); ptr += 4;
@@ -6713,6 +6922,7 @@
 
 void glDrawElementsInstancedOffsetAEMU_enc(void *self , GLenum mode, GLsizei count, GLenum type, GLuint offset, GLsizei primcount)
 {
+	AEMU_SCOPED_TRACE("glDrawElementsInstancedOffsetAEMU encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6742,6 +6952,7 @@
 
 void glDrawRangeElementsDataAEMU_enc(void *self , GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const GLvoid* indices, GLsizei datalen)
 {
+	AEMU_SCOPED_TRACE("glDrawRangeElementsDataAEMU encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6764,7 +6975,7 @@
 		memcpy(ptr, &end, 4); ptr += 4;
 		memcpy(ptr, &count, 4); ptr += 4;
 		memcpy(ptr, &type, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_indices; ptr += 4;
+	memcpy(ptr, &__size_indices, 4); ptr += 4;
 	memcpy(ptr, indices, __size_indices);ptr += __size_indices;
 		memcpy(ptr, &datalen, 4); ptr += 4;
 
@@ -6775,6 +6986,7 @@
 
 void glDrawRangeElementsOffsetAEMU_enc(void *self , GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, GLuint offset)
 {
+	AEMU_SCOPED_TRACE("glDrawRangeElementsOffsetAEMU encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6805,6 +7017,7 @@
 
 uint64_t glFenceSyncAEMU_enc(void *self , GLenum condition, GLbitfield flags)
 {
+	AEMU_SCOPED_TRACE("glFenceSyncAEMU encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6846,6 +7059,7 @@
 
 GLenum glClientWaitSyncAEMU_enc(void *self , uint64_t wait_on, GLbitfield flags, GLuint64 timeout)
 {
+	AEMU_SCOPED_TRACE("glClientWaitSyncAEMU encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6888,6 +7102,7 @@
 
 void glWaitSyncAEMU_enc(void *self , uint64_t wait_on, GLbitfield flags, GLuint64 timeout)
 {
+	AEMU_SCOPED_TRACE("glWaitSyncAEMU encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6915,6 +7130,7 @@
 
 void glDeleteSyncAEMU_enc(void *self , uint64_t to_delete)
 {
+	AEMU_SCOPED_TRACE("glDeleteSyncAEMU encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6940,6 +7156,7 @@
 
 GLboolean glIsSyncAEMU_enc(void *self , uint64_t sync)
 {
+	AEMU_SCOPED_TRACE("glIsSyncAEMU encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -6980,6 +7197,7 @@
 
 void glGetSyncivAEMU_enc(void *self , uint64_t sync, GLenum pname, GLsizei bufSize, GLsizei* length, GLint* values)
 {
+	AEMU_SCOPED_TRACE("glGetSyncivAEMU encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7001,8 +7219,8 @@
 		memcpy(ptr, &sync, 8); ptr += 8;
 		memcpy(ptr, &pname, 4); ptr += 4;
 		memcpy(ptr, &bufSize, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_length; ptr += 4;
-	*(unsigned int *)(ptr) = __size_values; ptr += 4;
+	memcpy(ptr, &__size_length, 4); ptr += 4;
+	memcpy(ptr, &__size_values, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -7027,6 +7245,7 @@
 
 void glDrawBuffers_enc(void *self , GLsizei n, const GLenum* bufs)
 {
+	AEMU_SCOPED_TRACE("glDrawBuffers encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7045,7 +7264,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &n, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_bufs; ptr += 4;
+	memcpy(ptr, &__size_bufs, 4); ptr += 4;
 	memcpy(ptr, bufs, __size_bufs);ptr += __size_bufs;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -7055,6 +7274,7 @@
 
 void glReadBuffer_enc(void *self , GLenum src)
 {
+	AEMU_SCOPED_TRACE("glReadBuffer encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7080,6 +7300,7 @@
 
 void glBlitFramebuffer_enc(void *self , GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter)
 {
+	AEMU_SCOPED_TRACE("glBlitFramebuffer encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7114,6 +7335,7 @@
 
 void glInvalidateFramebuffer_enc(void *self , GLenum target, GLsizei numAttachments, const GLenum* attachments)
 {
+	AEMU_SCOPED_TRACE("glInvalidateFramebuffer encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7133,7 +7355,7 @@
 
 		memcpy(ptr, &target, 4); ptr += 4;
 		memcpy(ptr, &numAttachments, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_attachments; ptr += 4;
+	memcpy(ptr, &__size_attachments, 4); ptr += 4;
 	memcpy(ptr, attachments, __size_attachments);ptr += __size_attachments;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -7143,6 +7365,7 @@
 
 void glInvalidateSubFramebuffer_enc(void *self , GLenum target, GLsizei numAttachments, const GLenum* attachments, GLint x, GLint y, GLsizei width, GLsizei height)
 {
+	AEMU_SCOPED_TRACE("glInvalidateSubFramebuffer encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7162,7 +7385,7 @@
 
 		memcpy(ptr, &target, 4); ptr += 4;
 		memcpy(ptr, &numAttachments, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_attachments; ptr += 4;
+	memcpy(ptr, &__size_attachments, 4); ptr += 4;
 	memcpy(ptr, attachments, __size_attachments);ptr += __size_attachments;
 		memcpy(ptr, &x, 4); ptr += 4;
 		memcpy(ptr, &y, 4); ptr += 4;
@@ -7176,6 +7399,7 @@
 
 void glFramebufferTextureLayer_enc(void *self , GLenum target, GLenum attachment, GLuint texture, GLint level, GLint layer)
 {
+	AEMU_SCOPED_TRACE("glFramebufferTextureLayer encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7205,6 +7429,7 @@
 
 void glRenderbufferStorageMultisample_enc(void *self , GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height)
 {
+	AEMU_SCOPED_TRACE("glRenderbufferStorageMultisample encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7234,6 +7459,7 @@
 
 void glTexStorage2D_enc(void *self , GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height)
 {
+	AEMU_SCOPED_TRACE("glTexStorage2D encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7263,6 +7489,7 @@
 
 void glGetInternalformativ_enc(void *self , GLenum target, GLenum internalformat, GLenum pname, GLsizei bufSize, GLint* params)
 {
+	AEMU_SCOPED_TRACE("glGetInternalformativ encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7284,7 +7511,7 @@
 		memcpy(ptr, &internalformat, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
 		memcpy(ptr, &bufSize, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -7305,6 +7532,7 @@
 
 void glBeginTransformFeedback_enc(void *self , GLenum primitiveMode)
 {
+	AEMU_SCOPED_TRACE("glBeginTransformFeedback encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7330,6 +7558,7 @@
 
 void glEndTransformFeedback_enc(void *self )
 {
+	AEMU_SCOPED_TRACE("glEndTransformFeedback encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7354,6 +7583,7 @@
 
 void glGenTransformFeedbacks_enc(void *self , GLsizei n, GLuint* ids)
 {
+	AEMU_SCOPED_TRACE("glGenTransformFeedbacks encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7372,7 +7602,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &n, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_ids; ptr += 4;
+	memcpy(ptr, &__size_ids, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -7393,6 +7623,7 @@
 
 void glDeleteTransformFeedbacks_enc(void *self , GLsizei n, const GLuint* ids)
 {
+	AEMU_SCOPED_TRACE("glDeleteTransformFeedbacks encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7411,7 +7642,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &n, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_ids; ptr += 4;
+	memcpy(ptr, &__size_ids, 4); ptr += 4;
 	memcpy(ptr, ids, __size_ids);ptr += __size_ids;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -7421,6 +7652,7 @@
 
 void glBindTransformFeedback_enc(void *self , GLenum target, GLuint id)
 {
+	AEMU_SCOPED_TRACE("glBindTransformFeedback encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7447,6 +7679,7 @@
 
 void glPauseTransformFeedback_enc(void *self )
 {
+	AEMU_SCOPED_TRACE("glPauseTransformFeedback encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7471,6 +7704,7 @@
 
 void glResumeTransformFeedback_enc(void *self )
 {
+	AEMU_SCOPED_TRACE("glResumeTransformFeedback encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7495,6 +7729,7 @@
 
 GLboolean glIsTransformFeedback_enc(void *self , GLuint id)
 {
+	AEMU_SCOPED_TRACE("glIsTransformFeedback encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7535,6 +7770,7 @@
 
 void glTransformFeedbackVaryingsAEMU_enc(void *self , GLuint program, GLsizei count, const char* packedVaryings, GLuint packedVaryingsLen, GLenum bufferMode)
 {
+	AEMU_SCOPED_TRACE("glTransformFeedbackVaryingsAEMU encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7554,7 +7790,7 @@
 
 		memcpy(ptr, &program, 4); ptr += 4;
 		memcpy(ptr, &count, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_packedVaryings; ptr += 4;
+	memcpy(ptr, &__size_packedVaryings, 4); ptr += 4;
 	memcpy(ptr, packedVaryings, __size_packedVaryings);ptr += __size_packedVaryings;
 		memcpy(ptr, &packedVaryingsLen, 4); ptr += 4;
 		memcpy(ptr, &bufferMode, 4); ptr += 4;
@@ -7566,6 +7802,7 @@
 
 void glGetTransformFeedbackVarying_enc(void *self , GLuint program, GLuint index, GLsizei bufSize, GLsizei* length, GLsizei* size, GLenum* type, char* name)
 {
+	AEMU_SCOPED_TRACE("glGetTransformFeedbackVarying encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7589,10 +7826,10 @@
 		memcpy(ptr, &program, 4); ptr += 4;
 		memcpy(ptr, &index, 4); ptr += 4;
 		memcpy(ptr, &bufSize, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_length; ptr += 4;
-	*(unsigned int *)(ptr) = __size_size; ptr += 4;
-	*(unsigned int *)(ptr) = __size_type; ptr += 4;
-	*(unsigned int *)(ptr) = __size_name; ptr += 4;
+	memcpy(ptr, &__size_length, 4); ptr += 4;
+	memcpy(ptr, &__size_size, 4); ptr += 4;
+	memcpy(ptr, &__size_type, 4); ptr += 4;
+	memcpy(ptr, &__size_name, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -7625,6 +7862,7 @@
 
 void glGenSamplers_enc(void *self , GLsizei n, GLuint* samplers)
 {
+	AEMU_SCOPED_TRACE("glGenSamplers encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7643,7 +7881,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &n, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_samplers; ptr += 4;
+	memcpy(ptr, &__size_samplers, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -7664,6 +7902,7 @@
 
 void glDeleteSamplers_enc(void *self , GLsizei n, const GLuint* samplers)
 {
+	AEMU_SCOPED_TRACE("glDeleteSamplers encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7682,7 +7921,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &n, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_samplers; ptr += 4;
+	memcpy(ptr, &__size_samplers, 4); ptr += 4;
 	memcpy(ptr, samplers, __size_samplers);ptr += __size_samplers;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -7692,6 +7931,7 @@
 
 void glBindSampler_enc(void *self , GLuint unit, GLuint sampler)
 {
+	AEMU_SCOPED_TRACE("glBindSampler encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7718,6 +7958,7 @@
 
 void glSamplerParameterf_enc(void *self , GLuint sampler, GLenum pname, GLfloat param)
 {
+	AEMU_SCOPED_TRACE("glSamplerParameterf encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7745,6 +7986,7 @@
 
 void glSamplerParameteri_enc(void *self , GLuint sampler, GLenum pname, GLint param)
 {
+	AEMU_SCOPED_TRACE("glSamplerParameteri encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7772,6 +8014,7 @@
 
 void glSamplerParameterfv_enc(void *self , GLuint sampler, GLenum pname, const GLfloat* params)
 {
+	AEMU_SCOPED_TRACE("glSamplerParameterfv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7791,7 +8034,7 @@
 
 		memcpy(ptr, &sampler, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 	memcpy(ptr, params, __size_params);ptr += __size_params;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -7801,6 +8044,7 @@
 
 void glSamplerParameteriv_enc(void *self , GLuint sampler, GLenum pname, const GLint* params)
 {
+	AEMU_SCOPED_TRACE("glSamplerParameteriv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7820,7 +8064,7 @@
 
 		memcpy(ptr, &sampler, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 	memcpy(ptr, params, __size_params);ptr += __size_params;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -7830,6 +8074,7 @@
 
 void glGetSamplerParameterfv_enc(void *self , GLuint sampler, GLenum pname, GLfloat* params)
 {
+	AEMU_SCOPED_TRACE("glGetSamplerParameterfv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7849,7 +8094,7 @@
 
 		memcpy(ptr, &sampler, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -7870,6 +8115,7 @@
 
 void glGetSamplerParameteriv_enc(void *self , GLuint sampler, GLenum pname, GLint* params)
 {
+	AEMU_SCOPED_TRACE("glGetSamplerParameteriv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7889,7 +8135,7 @@
 
 		memcpy(ptr, &sampler, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -7910,6 +8156,7 @@
 
 GLboolean glIsSampler_enc(void *self , GLuint sampler)
 {
+	AEMU_SCOPED_TRACE("glIsSampler encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7950,6 +8197,7 @@
 
 void glGenQueries_enc(void *self , GLsizei n, GLuint* queries)
 {
+	AEMU_SCOPED_TRACE("glGenQueries encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -7968,7 +8216,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &n, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_queries; ptr += 4;
+	memcpy(ptr, &__size_queries, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -7989,6 +8237,7 @@
 
 void glDeleteQueries_enc(void *self , GLsizei n, const GLuint* queries)
 {
+	AEMU_SCOPED_TRACE("glDeleteQueries encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -8007,7 +8256,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &n, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_queries; ptr += 4;
+	memcpy(ptr, &__size_queries, 4); ptr += 4;
 	memcpy(ptr, queries, __size_queries);ptr += __size_queries;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -8017,6 +8266,7 @@
 
 void glBeginQuery_enc(void *self , GLenum target, GLuint query)
 {
+	AEMU_SCOPED_TRACE("glBeginQuery encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -8043,6 +8293,7 @@
 
 void glEndQuery_enc(void *self , GLenum target)
 {
+	AEMU_SCOPED_TRACE("glEndQuery encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -8068,6 +8319,7 @@
 
 void glGetQueryiv_enc(void *self , GLenum target, GLenum pname, GLint* params)
 {
+	AEMU_SCOPED_TRACE("glGetQueryiv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -8087,7 +8339,7 @@
 
 		memcpy(ptr, &target, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -8108,6 +8360,7 @@
 
 void glGetQueryObjectuiv_enc(void *self , GLuint query, GLenum pname, GLuint* params)
 {
+	AEMU_SCOPED_TRACE("glGetQueryObjectuiv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -8127,7 +8380,7 @@
 
 		memcpy(ptr, &query, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -8148,6 +8401,7 @@
 
 GLboolean glIsQuery_enc(void *self , GLuint query)
 {
+	AEMU_SCOPED_TRACE("glIsQuery encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -8188,6 +8442,7 @@
 
 void glProgramParameteri_enc(void *self , GLuint program, GLenum pname, GLint value)
 {
+	AEMU_SCOPED_TRACE("glProgramParameteri encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -8215,6 +8470,7 @@
 
 void glProgramBinary_enc(void *self , GLuint program, GLenum binaryFormat, const void* binary, GLsizei length)
 {
+	AEMU_SCOPED_TRACE("glProgramBinary encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -8234,7 +8490,7 @@
 
 		memcpy(ptr, &program, 4); ptr += 4;
 		memcpy(ptr, &binaryFormat, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_binary; ptr += 4;
+	memcpy(ptr, &__size_binary, 4); ptr += 4;
 	memcpy(ptr, binary, __size_binary);ptr += __size_binary;
 		memcpy(ptr, &length, 4); ptr += 4;
 
@@ -8245,6 +8501,7 @@
 
 void glGetProgramBinary_enc(void *self , GLuint program, GLsizei bufSize, GLsizei* length, GLenum* binaryFormat, void* binary)
 {
+	AEMU_SCOPED_TRACE("glGetProgramBinary encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -8266,9 +8523,9 @@
 
 		memcpy(ptr, &program, 4); ptr += 4;
 		memcpy(ptr, &bufSize, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_length; ptr += 4;
-	*(unsigned int *)(ptr) = __size_binaryFormat; ptr += 4;
-	*(unsigned int *)(ptr) = __size_binary; ptr += 4;
+	memcpy(ptr, &__size_length, 4); ptr += 4;
+	memcpy(ptr, &__size_binaryFormat, 4); ptr += 4;
+	memcpy(ptr, &__size_binary, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -8295,6 +8552,7 @@
 
 GLint glGetFragDataLocation_enc(void *self , GLuint program, const char* name)
 {
+	AEMU_SCOPED_TRACE("glGetFragDataLocation encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -8313,7 +8571,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &program, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_name; ptr += 4;
+	memcpy(ptr, &__size_name, 4); ptr += 4;
 	memcpy(ptr, name, __size_name);ptr += __size_name;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -8338,6 +8596,7 @@
 
 void glGetInteger64v_enc(void *self , GLenum pname, GLint64* data)
 {
+	AEMU_SCOPED_TRACE("glGetInteger64v encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -8356,7 +8615,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_data; ptr += 4;
+	memcpy(ptr, &__size_data, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -8377,6 +8636,7 @@
 
 void glGetIntegeri_v_enc(void *self , GLenum target, GLuint index, GLint* data)
 {
+	AEMU_SCOPED_TRACE("glGetIntegeri_v encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -8396,7 +8656,7 @@
 
 		memcpy(ptr, &target, 4); ptr += 4;
 		memcpy(ptr, &index, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_data; ptr += 4;
+	memcpy(ptr, &__size_data, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -8417,6 +8677,7 @@
 
 void glGetInteger64i_v_enc(void *self , GLenum target, GLuint index, GLint64* data)
 {
+	AEMU_SCOPED_TRACE("glGetInteger64i_v encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -8436,7 +8697,7 @@
 
 		memcpy(ptr, &target, 4); ptr += 4;
 		memcpy(ptr, &index, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_data; ptr += 4;
+	memcpy(ptr, &__size_data, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -8457,6 +8718,7 @@
 
 void glTexImage3D_enc(void *self , GLenum target, GLint level, GLint internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const GLvoid* data)
 {
+	AEMU_SCOPED_TRACE("glTexImage3D encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -8499,6 +8761,7 @@
 
 void glTexImage3DOffsetAEMU_enc(void *self , GLenum target, GLint level, GLint internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, GLuint offset)
 {
+	AEMU_SCOPED_TRACE("glTexImage3DOffsetAEMU encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -8533,6 +8796,7 @@
 
 void glTexStorage3D_enc(void *self , GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth)
 {
+	AEMU_SCOPED_TRACE("glTexStorage3D encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -8563,6 +8827,7 @@
 
 void glTexSubImage3D_enc(void *self , GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const GLvoid* data)
 {
+	AEMU_SCOPED_TRACE("glTexSubImage3D encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -8606,6 +8871,7 @@
 
 void glTexSubImage3DOffsetAEMU_enc(void *self , GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, GLuint offset)
 {
+	AEMU_SCOPED_TRACE("glTexSubImage3DOffsetAEMU encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -8641,6 +8907,7 @@
 
 void glCompressedTexImage3D_enc(void *self , GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLsizei imageSize, const GLvoid* data)
 {
+	AEMU_SCOPED_TRACE("glCompressedTexImage3D encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -8682,6 +8949,7 @@
 
 void glCompressedTexImage3DOffsetAEMU_enc(void *self , GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLsizei imageSize, GLuint offset)
 {
+	AEMU_SCOPED_TRACE("glCompressedTexImage3DOffsetAEMU encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -8715,6 +8983,7 @@
 
 void glCompressedTexSubImage3D_enc(void *self , GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLsizei imageSize, const GLvoid* data)
 {
+	AEMU_SCOPED_TRACE("glCompressedTexSubImage3D encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -8758,6 +9027,7 @@
 
 void glCompressedTexSubImage3DOffsetAEMU_enc(void *self , GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLsizei imageSize, GLuint data)
 {
+	AEMU_SCOPED_TRACE("glCompressedTexSubImage3DOffsetAEMU encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -8793,6 +9063,7 @@
 
 void glCopyTexSubImage3D_enc(void *self , GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint x, GLint y, GLsizei width, GLsizei height)
 {
+	AEMU_SCOPED_TRACE("glCopyTexSubImage3D encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -8826,6 +9097,7 @@
 
 void glGetBooleani_v_enc(void *self , GLenum target, GLuint index, GLboolean* data)
 {
+	AEMU_SCOPED_TRACE("glGetBooleani_v encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -8845,7 +9117,7 @@
 
 		memcpy(ptr, &target, 4); ptr += 4;
 		memcpy(ptr, &index, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_data; ptr += 4;
+	memcpy(ptr, &__size_data, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -8866,6 +9138,7 @@
 
 void glMemoryBarrier_enc(void *self , GLbitfield barriers)
 {
+	AEMU_SCOPED_TRACE("glMemoryBarrier encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -8891,6 +9164,7 @@
 
 void glMemoryBarrierByRegion_enc(void *self , GLbitfield barriers)
 {
+	AEMU_SCOPED_TRACE("glMemoryBarrierByRegion encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -8916,6 +9190,7 @@
 
 void glGenProgramPipelines_enc(void *self , GLsizei n, GLuint* pipelines)
 {
+	AEMU_SCOPED_TRACE("glGenProgramPipelines encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -8934,7 +9209,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &n, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_pipelines; ptr += 4;
+	memcpy(ptr, &__size_pipelines, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -8955,6 +9230,7 @@
 
 void glDeleteProgramPipelines_enc(void *self , GLsizei n, const GLuint* pipelines)
 {
+	AEMU_SCOPED_TRACE("glDeleteProgramPipelines encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -8973,7 +9249,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &n, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_pipelines; ptr += 4;
+	memcpy(ptr, &__size_pipelines, 4); ptr += 4;
 	memcpy(ptr, pipelines, __size_pipelines);ptr += __size_pipelines;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -8983,6 +9259,7 @@
 
 void glBindProgramPipeline_enc(void *self , GLuint pipeline)
 {
+	AEMU_SCOPED_TRACE("glBindProgramPipeline encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -9008,6 +9285,7 @@
 
 void glGetProgramPipelineiv_enc(void *self , GLuint pipeline, GLenum pname, GLint* params)
 {
+	AEMU_SCOPED_TRACE("glGetProgramPipelineiv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -9027,7 +9305,7 @@
 
 		memcpy(ptr, &pipeline, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -9048,6 +9326,7 @@
 
 void glGetProgramPipelineInfoLog_enc(void *self , GLuint pipeline, GLsizei bufSize, GLsizei* length, GLchar* infoLog)
 {
+	AEMU_SCOPED_TRACE("glGetProgramPipelineInfoLog encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -9068,8 +9347,8 @@
 
 		memcpy(ptr, &pipeline, 4); ptr += 4;
 		memcpy(ptr, &bufSize, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_length; ptr += 4;
-	*(unsigned int *)(ptr) = __size_infoLog; ptr += 4;
+	memcpy(ptr, &__size_length, 4); ptr += 4;
+	memcpy(ptr, &__size_infoLog, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -9094,6 +9373,7 @@
 
 void glValidateProgramPipeline_enc(void *self , GLuint pipeline)
 {
+	AEMU_SCOPED_TRACE("glValidateProgramPipeline encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -9119,6 +9399,7 @@
 
 GLboolean glIsProgramPipeline_enc(void *self , GLuint pipeline)
 {
+	AEMU_SCOPED_TRACE("glIsProgramPipeline encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -9159,6 +9440,7 @@
 
 void glUseProgramStages_enc(void *self , GLuint pipeline, GLbitfield stages, GLuint program)
 {
+	AEMU_SCOPED_TRACE("glUseProgramStages encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -9186,6 +9468,7 @@
 
 void glActiveShaderProgram_enc(void *self , GLuint pipeline, GLuint program)
 {
+	AEMU_SCOPED_TRACE("glActiveShaderProgram encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -9212,6 +9495,7 @@
 
 GLuint glCreateShaderProgramvAEMU_enc(void *self , GLenum type, GLsizei count, const char* packedStrings, GLuint packedLen)
 {
+	AEMU_SCOPED_TRACE("glCreateShaderProgramvAEMU encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -9231,7 +9515,7 @@
 
 		memcpy(ptr, &type, 4); ptr += 4;
 		memcpy(ptr, &count, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_packedStrings; ptr += 4;
+	memcpy(ptr, &__size_packedStrings, 4); ptr += 4;
 	memcpy(ptr, packedStrings, __size_packedStrings);ptr += __size_packedStrings;
 		memcpy(ptr, &packedLen, 4); ptr += 4;
 
@@ -9257,6 +9541,7 @@
 
 void glProgramUniform1f_enc(void *self , GLuint program, GLint location, GLfloat v0)
 {
+	AEMU_SCOPED_TRACE("glProgramUniform1f encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -9284,6 +9569,7 @@
 
 void glProgramUniform2f_enc(void *self , GLuint program, GLint location, GLfloat v0, GLfloat v1)
 {
+	AEMU_SCOPED_TRACE("glProgramUniform2f encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -9312,6 +9598,7 @@
 
 void glProgramUniform3f_enc(void *self , GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2)
 {
+	AEMU_SCOPED_TRACE("glProgramUniform3f encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -9341,6 +9628,7 @@
 
 void glProgramUniform4f_enc(void *self , GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3)
 {
+	AEMU_SCOPED_TRACE("glProgramUniform4f encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -9371,6 +9659,7 @@
 
 void glProgramUniform1i_enc(void *self , GLuint program, GLint location, GLint v0)
 {
+	AEMU_SCOPED_TRACE("glProgramUniform1i encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -9398,6 +9687,7 @@
 
 void glProgramUniform2i_enc(void *self , GLuint program, GLint location, GLint v0, GLint v1)
 {
+	AEMU_SCOPED_TRACE("glProgramUniform2i encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -9426,6 +9716,7 @@
 
 void glProgramUniform3i_enc(void *self , GLuint program, GLint location, GLint v0, GLint v1, GLint v2)
 {
+	AEMU_SCOPED_TRACE("glProgramUniform3i encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -9455,6 +9746,7 @@
 
 void glProgramUniform4i_enc(void *self , GLuint program, GLint location, GLint v0, GLint v1, GLint v2, GLint v3)
 {
+	AEMU_SCOPED_TRACE("glProgramUniform4i encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -9485,6 +9777,7 @@
 
 void glProgramUniform1ui_enc(void *self , GLuint program, GLint location, GLuint v0)
 {
+	AEMU_SCOPED_TRACE("glProgramUniform1ui encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -9512,6 +9805,7 @@
 
 void glProgramUniform2ui_enc(void *self , GLuint program, GLint location, GLint v0, GLuint v1)
 {
+	AEMU_SCOPED_TRACE("glProgramUniform2ui encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -9540,6 +9834,7 @@
 
 void glProgramUniform3ui_enc(void *self , GLuint program, GLint location, GLint v0, GLint v1, GLuint v2)
 {
+	AEMU_SCOPED_TRACE("glProgramUniform3ui encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -9569,6 +9864,7 @@
 
 void glProgramUniform4ui_enc(void *self , GLuint program, GLint location, GLint v0, GLint v1, GLint v2, GLuint v3)
 {
+	AEMU_SCOPED_TRACE("glProgramUniform4ui encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -9599,6 +9895,7 @@
 
 void glProgramUniform1fv_enc(void *self , GLuint program, GLint location, GLsizei count, const GLfloat* value)
 {
+	AEMU_SCOPED_TRACE("glProgramUniform1fv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -9619,7 +9916,7 @@
 		memcpy(ptr, &program, 4); ptr += 4;
 		memcpy(ptr, &location, 4); ptr += 4;
 		memcpy(ptr, &count, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_value; ptr += 4;
+	memcpy(ptr, &__size_value, 4); ptr += 4;
 	memcpy(ptr, value, __size_value);ptr += __size_value;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -9629,6 +9926,7 @@
 
 void glProgramUniform2fv_enc(void *self , GLuint program, GLint location, GLsizei count, const GLfloat* value)
 {
+	AEMU_SCOPED_TRACE("glProgramUniform2fv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -9649,7 +9947,7 @@
 		memcpy(ptr, &program, 4); ptr += 4;
 		memcpy(ptr, &location, 4); ptr += 4;
 		memcpy(ptr, &count, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_value; ptr += 4;
+	memcpy(ptr, &__size_value, 4); ptr += 4;
 	memcpy(ptr, value, __size_value);ptr += __size_value;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -9659,6 +9957,7 @@
 
 void glProgramUniform3fv_enc(void *self , GLuint program, GLint location, GLsizei count, const GLfloat* value)
 {
+	AEMU_SCOPED_TRACE("glProgramUniform3fv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -9679,7 +9978,7 @@
 		memcpy(ptr, &program, 4); ptr += 4;
 		memcpy(ptr, &location, 4); ptr += 4;
 		memcpy(ptr, &count, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_value; ptr += 4;
+	memcpy(ptr, &__size_value, 4); ptr += 4;
 	memcpy(ptr, value, __size_value);ptr += __size_value;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -9689,6 +9988,7 @@
 
 void glProgramUniform4fv_enc(void *self , GLuint program, GLint location, GLsizei count, const GLfloat* value)
 {
+	AEMU_SCOPED_TRACE("glProgramUniform4fv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -9709,7 +10009,7 @@
 		memcpy(ptr, &program, 4); ptr += 4;
 		memcpy(ptr, &location, 4); ptr += 4;
 		memcpy(ptr, &count, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_value; ptr += 4;
+	memcpy(ptr, &__size_value, 4); ptr += 4;
 	memcpy(ptr, value, __size_value);ptr += __size_value;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -9719,6 +10019,7 @@
 
 void glProgramUniform1iv_enc(void *self , GLuint program, GLint location, GLsizei count, const GLint* value)
 {
+	AEMU_SCOPED_TRACE("glProgramUniform1iv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -9739,7 +10040,7 @@
 		memcpy(ptr, &program, 4); ptr += 4;
 		memcpy(ptr, &location, 4); ptr += 4;
 		memcpy(ptr, &count, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_value; ptr += 4;
+	memcpy(ptr, &__size_value, 4); ptr += 4;
 	memcpy(ptr, value, __size_value);ptr += __size_value;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -9749,6 +10050,7 @@
 
 void glProgramUniform2iv_enc(void *self , GLuint program, GLint location, GLsizei count, const GLint* value)
 {
+	AEMU_SCOPED_TRACE("glProgramUniform2iv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -9769,7 +10071,7 @@
 		memcpy(ptr, &program, 4); ptr += 4;
 		memcpy(ptr, &location, 4); ptr += 4;
 		memcpy(ptr, &count, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_value; ptr += 4;
+	memcpy(ptr, &__size_value, 4); ptr += 4;
 	memcpy(ptr, value, __size_value);ptr += __size_value;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -9779,6 +10081,7 @@
 
 void glProgramUniform3iv_enc(void *self , GLuint program, GLint location, GLsizei count, const GLint* value)
 {
+	AEMU_SCOPED_TRACE("glProgramUniform3iv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -9799,7 +10102,7 @@
 		memcpy(ptr, &program, 4); ptr += 4;
 		memcpy(ptr, &location, 4); ptr += 4;
 		memcpy(ptr, &count, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_value; ptr += 4;
+	memcpy(ptr, &__size_value, 4); ptr += 4;
 	memcpy(ptr, value, __size_value);ptr += __size_value;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -9809,6 +10112,7 @@
 
 void glProgramUniform4iv_enc(void *self , GLuint program, GLint location, GLsizei count, const GLint* value)
 {
+	AEMU_SCOPED_TRACE("glProgramUniform4iv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -9829,7 +10133,7 @@
 		memcpy(ptr, &program, 4); ptr += 4;
 		memcpy(ptr, &location, 4); ptr += 4;
 		memcpy(ptr, &count, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_value; ptr += 4;
+	memcpy(ptr, &__size_value, 4); ptr += 4;
 	memcpy(ptr, value, __size_value);ptr += __size_value;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -9839,6 +10143,7 @@
 
 void glProgramUniform1uiv_enc(void *self , GLuint program, GLint location, GLsizei count, const GLuint* value)
 {
+	AEMU_SCOPED_TRACE("glProgramUniform1uiv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -9859,7 +10164,7 @@
 		memcpy(ptr, &program, 4); ptr += 4;
 		memcpy(ptr, &location, 4); ptr += 4;
 		memcpy(ptr, &count, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_value; ptr += 4;
+	memcpy(ptr, &__size_value, 4); ptr += 4;
 	memcpy(ptr, value, __size_value);ptr += __size_value;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -9869,6 +10174,7 @@
 
 void glProgramUniform2uiv_enc(void *self , GLuint program, GLint location, GLsizei count, const GLuint* value)
 {
+	AEMU_SCOPED_TRACE("glProgramUniform2uiv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -9889,7 +10195,7 @@
 		memcpy(ptr, &program, 4); ptr += 4;
 		memcpy(ptr, &location, 4); ptr += 4;
 		memcpy(ptr, &count, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_value; ptr += 4;
+	memcpy(ptr, &__size_value, 4); ptr += 4;
 	memcpy(ptr, value, __size_value);ptr += __size_value;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -9899,6 +10205,7 @@
 
 void glProgramUniform3uiv_enc(void *self , GLuint program, GLint location, GLsizei count, const GLuint* value)
 {
+	AEMU_SCOPED_TRACE("glProgramUniform3uiv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -9919,7 +10226,7 @@
 		memcpy(ptr, &program, 4); ptr += 4;
 		memcpy(ptr, &location, 4); ptr += 4;
 		memcpy(ptr, &count, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_value; ptr += 4;
+	memcpy(ptr, &__size_value, 4); ptr += 4;
 	memcpy(ptr, value, __size_value);ptr += __size_value;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -9929,6 +10236,7 @@
 
 void glProgramUniform4uiv_enc(void *self , GLuint program, GLint location, GLsizei count, const GLuint* value)
 {
+	AEMU_SCOPED_TRACE("glProgramUniform4uiv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -9949,7 +10257,7 @@
 		memcpy(ptr, &program, 4); ptr += 4;
 		memcpy(ptr, &location, 4); ptr += 4;
 		memcpy(ptr, &count, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_value; ptr += 4;
+	memcpy(ptr, &__size_value, 4); ptr += 4;
 	memcpy(ptr, value, __size_value);ptr += __size_value;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -9959,6 +10267,7 @@
 
 void glProgramUniformMatrix2fv_enc(void *self , GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat* value)
 {
+	AEMU_SCOPED_TRACE("glProgramUniformMatrix2fv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -9980,7 +10289,7 @@
 		memcpy(ptr, &location, 4); ptr += 4;
 		memcpy(ptr, &count, 4); ptr += 4;
 		memcpy(ptr, &transpose, 1); ptr += 1;
-	*(unsigned int *)(ptr) = __size_value; ptr += 4;
+	memcpy(ptr, &__size_value, 4); ptr += 4;
 	memcpy(ptr, value, __size_value);ptr += __size_value;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -9990,6 +10299,7 @@
 
 void glProgramUniformMatrix3fv_enc(void *self , GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat* value)
 {
+	AEMU_SCOPED_TRACE("glProgramUniformMatrix3fv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -10011,7 +10321,7 @@
 		memcpy(ptr, &location, 4); ptr += 4;
 		memcpy(ptr, &count, 4); ptr += 4;
 		memcpy(ptr, &transpose, 1); ptr += 1;
-	*(unsigned int *)(ptr) = __size_value; ptr += 4;
+	memcpy(ptr, &__size_value, 4); ptr += 4;
 	memcpy(ptr, value, __size_value);ptr += __size_value;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -10021,6 +10331,7 @@
 
 void glProgramUniformMatrix4fv_enc(void *self , GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat* value)
 {
+	AEMU_SCOPED_TRACE("glProgramUniformMatrix4fv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -10042,7 +10353,7 @@
 		memcpy(ptr, &location, 4); ptr += 4;
 		memcpy(ptr, &count, 4); ptr += 4;
 		memcpy(ptr, &transpose, 1); ptr += 1;
-	*(unsigned int *)(ptr) = __size_value; ptr += 4;
+	memcpy(ptr, &__size_value, 4); ptr += 4;
 	memcpy(ptr, value, __size_value);ptr += __size_value;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -10052,6 +10363,7 @@
 
 void glProgramUniformMatrix2x3fv_enc(void *self , GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat* value)
 {
+	AEMU_SCOPED_TRACE("glProgramUniformMatrix2x3fv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -10073,7 +10385,7 @@
 		memcpy(ptr, &location, 4); ptr += 4;
 		memcpy(ptr, &count, 4); ptr += 4;
 		memcpy(ptr, &transpose, 1); ptr += 1;
-	*(unsigned int *)(ptr) = __size_value; ptr += 4;
+	memcpy(ptr, &__size_value, 4); ptr += 4;
 	memcpy(ptr, value, __size_value);ptr += __size_value;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -10083,6 +10395,7 @@
 
 void glProgramUniformMatrix3x2fv_enc(void *self , GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat* value)
 {
+	AEMU_SCOPED_TRACE("glProgramUniformMatrix3x2fv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -10104,7 +10417,7 @@
 		memcpy(ptr, &location, 4); ptr += 4;
 		memcpy(ptr, &count, 4); ptr += 4;
 		memcpy(ptr, &transpose, 1); ptr += 1;
-	*(unsigned int *)(ptr) = __size_value; ptr += 4;
+	memcpy(ptr, &__size_value, 4); ptr += 4;
 	memcpy(ptr, value, __size_value);ptr += __size_value;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -10114,6 +10427,7 @@
 
 void glProgramUniformMatrix2x4fv_enc(void *self , GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat* value)
 {
+	AEMU_SCOPED_TRACE("glProgramUniformMatrix2x4fv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -10135,7 +10449,7 @@
 		memcpy(ptr, &location, 4); ptr += 4;
 		memcpy(ptr, &count, 4); ptr += 4;
 		memcpy(ptr, &transpose, 1); ptr += 1;
-	*(unsigned int *)(ptr) = __size_value; ptr += 4;
+	memcpy(ptr, &__size_value, 4); ptr += 4;
 	memcpy(ptr, value, __size_value);ptr += __size_value;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -10145,6 +10459,7 @@
 
 void glProgramUniformMatrix4x2fv_enc(void *self , GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat* value)
 {
+	AEMU_SCOPED_TRACE("glProgramUniformMatrix4x2fv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -10166,7 +10481,7 @@
 		memcpy(ptr, &location, 4); ptr += 4;
 		memcpy(ptr, &count, 4); ptr += 4;
 		memcpy(ptr, &transpose, 1); ptr += 1;
-	*(unsigned int *)(ptr) = __size_value; ptr += 4;
+	memcpy(ptr, &__size_value, 4); ptr += 4;
 	memcpy(ptr, value, __size_value);ptr += __size_value;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -10176,6 +10491,7 @@
 
 void glProgramUniformMatrix3x4fv_enc(void *self , GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat* value)
 {
+	AEMU_SCOPED_TRACE("glProgramUniformMatrix3x4fv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -10197,7 +10513,7 @@
 		memcpy(ptr, &location, 4); ptr += 4;
 		memcpy(ptr, &count, 4); ptr += 4;
 		memcpy(ptr, &transpose, 1); ptr += 1;
-	*(unsigned int *)(ptr) = __size_value; ptr += 4;
+	memcpy(ptr, &__size_value, 4); ptr += 4;
 	memcpy(ptr, value, __size_value);ptr += __size_value;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -10207,6 +10523,7 @@
 
 void glProgramUniformMatrix4x3fv_enc(void *self , GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat* value)
 {
+	AEMU_SCOPED_TRACE("glProgramUniformMatrix4x3fv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -10228,7 +10545,7 @@
 		memcpy(ptr, &location, 4); ptr += 4;
 		memcpy(ptr, &count, 4); ptr += 4;
 		memcpy(ptr, &transpose, 1); ptr += 1;
-	*(unsigned int *)(ptr) = __size_value; ptr += 4;
+	memcpy(ptr, &__size_value, 4); ptr += 4;
 	memcpy(ptr, value, __size_value);ptr += __size_value;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -10238,6 +10555,7 @@
 
 void glGetProgramInterfaceiv_enc(void *self , GLuint program, GLenum programInterface, GLenum pname, GLint* params)
 {
+	AEMU_SCOPED_TRACE("glGetProgramInterfaceiv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -10258,7 +10576,7 @@
 		memcpy(ptr, &program, 4); ptr += 4;
 		memcpy(ptr, &programInterface, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -10279,6 +10597,7 @@
 
 void glGetProgramResourceiv_enc(void *self , GLuint program, GLenum programInterface, GLuint index, GLsizei propCount, const GLenum* props, GLsizei bufSize, GLsizei* length, GLint* params)
 {
+	AEMU_SCOPED_TRACE("glGetProgramResourceiv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -10302,11 +10621,11 @@
 		memcpy(ptr, &programInterface, 4); ptr += 4;
 		memcpy(ptr, &index, 4); ptr += 4;
 		memcpy(ptr, &propCount, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_props; ptr += 4;
+	memcpy(ptr, &__size_props, 4); ptr += 4;
 	memcpy(ptr, props, __size_props);ptr += __size_props;
 		memcpy(ptr, &bufSize, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_length; ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_length, 4); ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -10331,6 +10650,7 @@
 
 GLuint glGetProgramResourceIndex_enc(void *self , GLuint program, GLenum programInterface, const char* name)
 {
+	AEMU_SCOPED_TRACE("glGetProgramResourceIndex encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -10350,7 +10670,7 @@
 
 		memcpy(ptr, &program, 4); ptr += 4;
 		memcpy(ptr, &programInterface, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_name; ptr += 4;
+	memcpy(ptr, &__size_name, 4); ptr += 4;
 	memcpy(ptr, name, __size_name);ptr += __size_name;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -10375,6 +10695,7 @@
 
 GLint glGetProgramResourceLocation_enc(void *self , GLuint program, GLenum programInterface, const char* name)
 {
+	AEMU_SCOPED_TRACE("glGetProgramResourceLocation encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -10394,7 +10715,7 @@
 
 		memcpy(ptr, &program, 4); ptr += 4;
 		memcpy(ptr, &programInterface, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_name; ptr += 4;
+	memcpy(ptr, &__size_name, 4); ptr += 4;
 	memcpy(ptr, name, __size_name);ptr += __size_name;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -10419,6 +10740,7 @@
 
 void glGetProgramResourceName_enc(void *self , GLuint program, GLenum programInterface, GLuint index, GLsizei bufSize, GLsizei* length, char* name)
 {
+	AEMU_SCOPED_TRACE("glGetProgramResourceName encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -10441,8 +10763,8 @@
 		memcpy(ptr, &programInterface, 4); ptr += 4;
 		memcpy(ptr, &index, 4); ptr += 4;
 		memcpy(ptr, &bufSize, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_length; ptr += 4;
-	*(unsigned int *)(ptr) = __size_name; ptr += 4;
+	memcpy(ptr, &__size_length, 4); ptr += 4;
+	memcpy(ptr, &__size_name, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -10467,6 +10789,7 @@
 
 void glBindImageTexture_enc(void *self , GLuint unit, GLuint texture, GLint level, GLboolean layered, GLint layer, GLenum access, GLenum format)
 {
+	AEMU_SCOPED_TRACE("glBindImageTexture encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -10498,6 +10821,7 @@
 
 void glDispatchCompute_enc(void *self , GLuint num_groups_x, GLuint num_groups_y, GLuint num_groups_z)
 {
+	AEMU_SCOPED_TRACE("glDispatchCompute encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -10525,6 +10849,7 @@
 
 void glDispatchComputeIndirect_enc(void *self , GLintptr indirect)
 {
+	AEMU_SCOPED_TRACE("glDispatchComputeIndirect encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -10550,6 +10875,7 @@
 
 void glBindVertexBuffer_enc(void *self , GLuint bindingindex, GLuint buffer, GLintptr offset, GLintptr stride)
 {
+	AEMU_SCOPED_TRACE("glBindVertexBuffer encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -10578,6 +10904,7 @@
 
 void glVertexAttribBinding_enc(void *self , GLuint attribindex, GLuint bindingindex)
 {
+	AEMU_SCOPED_TRACE("glVertexAttribBinding encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -10604,6 +10931,7 @@
 
 void glVertexAttribFormat_enc(void *self , GLuint attribindex, GLint size, GLenum type, GLboolean normalized, GLuint relativeoffset)
 {
+	AEMU_SCOPED_TRACE("glVertexAttribFormat encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -10633,6 +10961,7 @@
 
 void glVertexAttribIFormat_enc(void *self , GLuint attribindex, GLint size, GLenum type, GLuint relativeoffset)
 {
+	AEMU_SCOPED_TRACE("glVertexAttribIFormat encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -10661,6 +10990,7 @@
 
 void glVertexBindingDivisor_enc(void *self , GLuint bindingindex, GLuint divisor)
 {
+	AEMU_SCOPED_TRACE("glVertexBindingDivisor encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -10687,6 +11017,7 @@
 
 void glDrawArraysIndirectDataAEMU_enc(void *self , GLenum mode, const void* indirect, GLuint datalen)
 {
+	AEMU_SCOPED_TRACE("glDrawArraysIndirectDataAEMU encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -10705,7 +11036,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &mode, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_indirect; ptr += 4;
+	memcpy(ptr, &__size_indirect, 4); ptr += 4;
 	memcpy(ptr, indirect, __size_indirect);ptr += __size_indirect;
 		memcpy(ptr, &datalen, 4); ptr += 4;
 
@@ -10716,6 +11047,7 @@
 
 void glDrawArraysIndirectOffsetAEMU_enc(void *self , GLenum mode, GLuint offset)
 {
+	AEMU_SCOPED_TRACE("glDrawArraysIndirectOffsetAEMU encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -10742,6 +11074,7 @@
 
 void glDrawElementsIndirectDataAEMU_enc(void *self , GLenum mode, GLenum type, const void* indirect, GLuint datalen)
 {
+	AEMU_SCOPED_TRACE("glDrawElementsIndirectDataAEMU encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -10761,7 +11094,7 @@
 
 		memcpy(ptr, &mode, 4); ptr += 4;
 		memcpy(ptr, &type, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_indirect; ptr += 4;
+	memcpy(ptr, &__size_indirect, 4); ptr += 4;
 	memcpy(ptr, indirect, __size_indirect);ptr += __size_indirect;
 		memcpy(ptr, &datalen, 4); ptr += 4;
 
@@ -10772,6 +11105,7 @@
 
 void glDrawElementsIndirectOffsetAEMU_enc(void *self , GLenum mode, GLenum type, GLuint offset)
 {
+	AEMU_SCOPED_TRACE("glDrawElementsIndirectOffsetAEMU encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -10799,6 +11133,7 @@
 
 void glTexStorage2DMultisample_enc(void *self , GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLboolean fixedsamplelocations)
 {
+	AEMU_SCOPED_TRACE("glTexStorage2DMultisample encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -10829,6 +11164,7 @@
 
 void glSampleMaski_enc(void *self , GLuint maskNumber, GLbitfield mask)
 {
+	AEMU_SCOPED_TRACE("glSampleMaski encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -10855,6 +11191,7 @@
 
 void glGetMultisamplefv_enc(void *self , GLenum pname, GLuint index, GLfloat* val)
 {
+	AEMU_SCOPED_TRACE("glGetMultisamplefv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -10874,7 +11211,7 @@
 
 		memcpy(ptr, &pname, 4); ptr += 4;
 		memcpy(ptr, &index, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_val; ptr += 4;
+	memcpy(ptr, &__size_val, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -10895,6 +11232,7 @@
 
 void glFramebufferParameteri_enc(void *self , GLenum target, GLenum pname, GLint param)
 {
+	AEMU_SCOPED_TRACE("glFramebufferParameteri encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -10922,6 +11260,7 @@
 
 void glGetFramebufferParameteriv_enc(void *self , GLenum target, GLenum pname, GLint* params)
 {
+	AEMU_SCOPED_TRACE("glGetFramebufferParameteriv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -10941,7 +11280,7 @@
 
 		memcpy(ptr, &target, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -10962,6 +11301,7 @@
 
 void glGetTexLevelParameterfv_enc(void *self , GLenum target, GLint level, GLenum pname, GLfloat* params)
 {
+	AEMU_SCOPED_TRACE("glGetTexLevelParameterfv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -10982,7 +11322,7 @@
 		memcpy(ptr, &target, 4); ptr += 4;
 		memcpy(ptr, &level, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -11003,6 +11343,7 @@
 
 void glGetTexLevelParameteriv_enc(void *self , GLenum target, GLint level, GLenum pname, GLint* params)
 {
+	AEMU_SCOPED_TRACE("glGetTexLevelParameteriv encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -11023,7 +11364,7 @@
 		memcpy(ptr, &target, 4); ptr += 4;
 		memcpy(ptr, &level, 4); ptr += 4;
 		memcpy(ptr, &pname, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -11044,6 +11385,7 @@
 
 void glMapBufferRangeDMA_enc(void *self , GLenum target, GLintptr offset, GLsizeiptr length, GLbitfield access, uint64_t paddr)
 {
+	AEMU_SCOPED_TRACE("glMapBufferRangeDMA encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -11073,6 +11415,7 @@
 
 void glUnmapBufferDMA_enc(void *self , GLenum target, GLintptr offset, GLsizeiptr length, GLbitfield access, uint64_t paddr, GLboolean* out_res)
 {
+	AEMU_SCOPED_TRACE("glUnmapBufferDMA encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -11095,7 +11438,7 @@
 		memcpy(ptr, &length, 4); ptr += 4;
 		memcpy(ptr, &access, 4); ptr += 4;
 		memcpy(ptr, &paddr, 8); ptr += 8;
-	*(unsigned int *)(ptr) = __size_out_res; ptr += 4;
+	memcpy(ptr, &__size_out_res, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -11116,6 +11459,7 @@
 
 uint64_t glMapBufferRangeDirect_enc(void *self , GLenum target, GLintptr offset, GLsizeiptr length, GLbitfield access, uint64_t paddr)
 {
+	AEMU_SCOPED_TRACE("glMapBufferRangeDirect encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -11160,6 +11504,7 @@
 
 void glUnmapBufferDirect_enc(void *self , GLenum target, GLintptr offset, GLsizeiptr length, GLbitfield access, uint64_t paddr, uint64_t guest_ptr, GLboolean* out_res)
 {
+	AEMU_SCOPED_TRACE("glUnmapBufferDirect encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -11183,7 +11528,7 @@
 		memcpy(ptr, &access, 4); ptr += 4;
 		memcpy(ptr, &paddr, 8); ptr += 8;
 		memcpy(ptr, &guest_ptr, 8); ptr += 8;
-	*(unsigned int *)(ptr) = __size_out_res; ptr += 4;
+	memcpy(ptr, &__size_out_res, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -11204,6 +11549,7 @@
 
 void glFlushMappedBufferRangeDirect_enc(void *self , GLenum target, GLintptr offset, GLsizeiptr length, GLbitfield access)
 {
+	AEMU_SCOPED_TRACE("glFlushMappedBufferRangeDirect encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -11232,6 +11578,7 @@
 
 GLenum glGetGraphicsResetStatusEXT_enc(void *self )
 {
+	AEMU_SCOPED_TRACE("glGetGraphicsResetStatusEXT encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -11271,6 +11618,7 @@
 
 void glReadnPixelsEXT_enc(void *self , GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, GLsizei bufSize, GLvoid* data)
 {
+	AEMU_SCOPED_TRACE("glReadnPixelsEXT encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -11295,7 +11643,7 @@
 		memcpy(ptr, &format, 4); ptr += 4;
 		memcpy(ptr, &type, 4); ptr += 4;
 		memcpy(ptr, &bufSize, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_data; ptr += 4;
+	memcpy(ptr, &__size_data, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -11316,6 +11664,7 @@
 
 void glGetnUniformfvEXT_enc(void *self , GLuint program, GLint location, GLsizei bufSize, GLfloat* params)
 {
+	AEMU_SCOPED_TRACE("glGetnUniformfvEXT encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -11336,7 +11685,7 @@
 		memcpy(ptr, &program, 4); ptr += 4;
 		memcpy(ptr, &location, 4); ptr += 4;
 		memcpy(ptr, &bufSize, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -11357,6 +11706,7 @@
 
 void glGetnUniformivEXT_enc(void *self , GLuint program, GLint location, GLsizei bufSize, GLint* params)
 {
+	AEMU_SCOPED_TRACE("glGetnUniformivEXT encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -11377,7 +11727,7 @@
 		memcpy(ptr, &program, 4); ptr += 4;
 		memcpy(ptr, &location, 4); ptr += 4;
 		memcpy(ptr, &bufSize, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_params; ptr += 4;
+	memcpy(ptr, &__size_params, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -11398,6 +11748,7 @@
 
 void glDrawArraysNullAEMU_enc(void *self , GLenum mode, GLint first, GLsizei count)
 {
+	AEMU_SCOPED_TRACE("glDrawArraysNullAEMU encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -11425,6 +11776,7 @@
 
 void glDrawElementsOffsetNullAEMU_enc(void *self , GLenum mode, GLsizei count, GLenum type, GLuint offset)
 {
+	AEMU_SCOPED_TRACE("glDrawElementsOffsetNullAEMU encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -11453,6 +11805,7 @@
 
 void glDrawElementsDataNullAEMU_enc(void *self , GLenum mode, GLsizei count, GLenum type, void* data, GLuint datalen)
 {
+	AEMU_SCOPED_TRACE("glDrawElementsDataNullAEMU encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -11473,7 +11826,7 @@
 		memcpy(ptr, &mode, 4); ptr += 4;
 		memcpy(ptr, &count, 4); ptr += 4;
 		memcpy(ptr, &type, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_data; ptr += 4;
+	memcpy(ptr, &__size_data, 4); ptr += 4;
 	memcpy(ptr, data, __size_data);ptr += __size_data;
 		memcpy(ptr, &datalen, 4); ptr += 4;
 
@@ -11484,6 +11837,7 @@
 
 void glUnmapBufferAsyncAEMU_enc(void *self , GLenum target, GLintptr offset, GLsizeiptr length, GLbitfield access, void* guest_buffer, GLboolean* out_res)
 {
+	AEMU_SCOPED_TRACE("glUnmapBufferAsyncAEMU encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -11517,7 +11871,7 @@
 	}
 	buf = stream->alloc(__size_out_res + 1*4);
 	ptr = buf;
-	*(unsigned int *)(ptr) = __size_out_res; ptr += 4;
+	memcpy(ptr, &__size_out_res, 4); ptr += 4;
 	memcpy(ptr, out_res, __size_out_res);ptr += __size_out_res;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -11528,6 +11882,7 @@
 
 void glFlushMappedBufferRangeAEMU2_enc(void *self , GLenum target, GLintptr offset, GLsizeiptr length, GLbitfield access, void* guest_buffer)
 {
+	AEMU_SCOPED_TRACE("glFlushMappedBufferRangeAEMU2 encode");
 
 	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -11563,6 +11918,52 @@
 
 }
 
+GLboolean glBufferDataSyncAEMU_enc(void *self , GLenum target, GLsizeiptr size, const GLvoid* data, GLenum usage)
+{
+	AEMU_SCOPED_TRACE("glBufferDataSyncAEMU encode");
+
+	gl2_encoder_context_t *ctx = (gl2_encoder_context_t *)self;
+	IOStream *stream = ctx->m_stream;
+	ChecksumCalculator *checksumCalculator = ctx->m_checksumCalculator;
+	bool useChecksum = checksumCalculator->getVersion() > 0;
+
+	const unsigned int __size_data = ((data != NULL) ?  size : 0);
+	 unsigned char *ptr;
+	 unsigned char *buf;
+	 const size_t sizeWithoutChecksum = 8 + 4 + 4 + __size_data + 4 + 1*4;
+	 const size_t checksumSize = checksumCalculator->checksumByteSize();
+	 const size_t totalSize = sizeWithoutChecksum + checksumSize;
+	buf = stream->alloc(totalSize);
+	ptr = buf;
+	int tmp = OP_glBufferDataSyncAEMU;memcpy(ptr, &tmp, 4); ptr += 4;
+	memcpy(ptr, &totalSize, 4);  ptr += 4;
+
+		memcpy(ptr, &target, 4); ptr += 4;
+		memcpy(ptr, &size, 4); ptr += 4;
+	memcpy(ptr, &__size_data, 4); ptr += 4;
+	if (data != NULL) memcpy(ptr, data, __size_data);ptr += __size_data;
+		memcpy(ptr, &usage, 4); ptr += 4;
+
+	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
+	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
+
+
+	GLboolean retval;
+	stream->readback(&retval, 1);
+	if (useChecksum) checksumCalculator->addBuffer(&retval, 1);
+	if (useChecksum) {
+		unsigned char *checksumBufPtr = NULL;
+		unsigned char checksumBuf[ChecksumCalculator::kMaxChecksumSize];
+		if (checksumSize > 0) checksumBufPtr = &checksumBuf[0];
+		stream->readback(checksumBufPtr, checksumSize);
+		if (!checksumCalculator->validate(checksumBufPtr, checksumSize)) {
+			ALOGE("glBufferDataSyncAEMU: GL communication error, please report this issue to b.android.com.\n");
+			abort();
+		}
+	}
+	return retval;
+}
+
 }  // namespace
 
 gl2_encoder_context_t::gl2_encoder_context_t(IOStream *stream, ChecksumCalculator *checksumCalculator)
@@ -11996,5 +12397,6 @@
 	this->glDrawElementsDataNullAEMU = &glDrawElementsDataNullAEMU_enc;
 	this->glUnmapBufferAsyncAEMU = &glUnmapBufferAsyncAEMU_enc;
 	this->glFlushMappedBufferRangeAEMU2 = &glFlushMappedBufferRangeAEMU2_enc;
+	this->glBufferDataSyncAEMU = &glBufferDataSyncAEMU_enc;
 }
 
diff --git a/system/GLESv2_enc/gl2_entry.cpp b/system/GLESv2_enc/gl2_entry.cpp
index a5d6c87..5ea44c2 100644
--- a/system/GLESv2_enc/gl2_entry.cpp
+++ b/system/GLESv2_enc/gl2_entry.cpp
@@ -431,6 +431,7 @@
 	void glDrawElementsDataNullAEMU(GLenum mode, GLsizei count, GLenum type, void* data, GLuint datalen);
 	void glUnmapBufferAsyncAEMU(GLenum target, GLintptr offset, GLsizeiptr length, GLbitfield access, void* guest_buffer, GLboolean* out_res);
 	void glFlushMappedBufferRangeAEMU2(GLenum target, GLintptr offset, GLsizeiptr length, GLbitfield access, void* guest_buffer);
+	GLboolean glBufferDataSyncAEMU(GLenum target, GLsizeiptr size, const GLvoid* data, GLenum usage);
 };
 
 #ifndef GET_CONTEXT
@@ -3032,3 +3033,9 @@
 	ctx->glFlushMappedBufferRangeAEMU2(ctx, target, offset, length, access, guest_buffer);
 }
 
+GLboolean glBufferDataSyncAEMU(GLenum target, GLsizeiptr size, const GLvoid* data, GLenum usage)
+{
+	GET_CONTEXT;
+	return ctx->glBufferDataSyncAEMU(ctx, target, size, data, usage);
+}
+
diff --git a/system/GLESv2_enc/gl2_opcodes.h b/system/GLESv2_enc/gl2_opcodes.h
index 9c619e1..25ab3e7 100644
--- a/system/GLESv2_enc/gl2_opcodes.h
+++ b/system/GLESv2_enc/gl2_opcodes.h
@@ -429,7 +429,8 @@
 #define OP_glDrawElementsDataNullAEMU 					2471
 #define OP_glUnmapBufferAsyncAEMU 					2472
 #define OP_glFlushMappedBufferRangeAEMU2 					2473
-#define OP_last 					2474
+#define OP_glBufferDataSyncAEMU 					2474
+#define OP_last 					2475
 
 
 #endif
diff --git a/system/OpenglSystemCommon/AddressSpaceStream.cpp b/system/OpenglSystemCommon/AddressSpaceStream.cpp
index ce7c720..84d635a 100644
--- a/system/OpenglSystemCommon/AddressSpaceStream.cpp
+++ b/system/OpenglSystemCommon/AddressSpaceStream.cpp
@@ -15,11 +15,14 @@
 */
 #include "AddressSpaceStream.h"
 
+#include "android/base/Tracing.h"
+
 #if PLATFORM_SDK_VERSION < 26
 #include <cutils/log.h>
 #else
 #include <log/log.h>
 #endif
+#include <cutils/properties.h>
 #include <errno.h>
 #include <stdio.h>
 #include <stdlib.h>
@@ -42,7 +45,7 @@
         return nullptr;
     }
 
-    struct goldfish_address_space_ping request;
+    struct address_space_ping request;
     request.metadata = ASG_GET_RING;
     if (!goldfish_address_space_ping(child_device_handle, &request)) {
         ALOGE("AddressSpaceStream::create failed (get ring)\n");
@@ -123,21 +126,164 @@
     context.ring_config->host_consumed_pos = 0;
     context.ring_config->guest_write_pos = 0;
 
+    struct address_space_ops ops = {
+        .open = goldfish_address_space_open,
+        .close = goldfish_address_space_close,
+        .claim_shared = goldfish_address_space_claim_shared,
+        .unclaim_shared = goldfish_address_space_unclaim_shared,
+        .map = goldfish_address_space_map,
+        .unmap = goldfish_address_space_unmap,
+        .set_subdevice_type = goldfish_address_space_set_subdevice_type,
+        .ping = goldfish_address_space_ping,
+    };
+
     AddressSpaceStream* res =
         new AddressSpaceStream(
             child_device_handle, version, context,
-            ringOffset, bufferOffset);
+            ringOffset, bufferOffset, false /* not virtio */, ops);
 
     return res;
 }
 
+#if defined(HOST_BUILD) || defined(__Fuchsia__)
+AddressSpaceStream* createVirtioGpuAddressSpaceStream(size_t ignored_bufSize) {
+    // Ignore incoming ignored_bufSize
+    (void)ignored_bufSize;
+    return nullptr;
+}
+#else
+static address_space_handle_t openVirtGpuAddressSpace() {
+    address_space_handle_t ret;
+    uint8_t retryCount = 64;
+    do {
+        ret = virtgpu_address_space_open();
+    } while(ret < 0 && retryCount-- > 0 && errno == EINTR);
+    return ret;
+}
+
+AddressSpaceStream* createVirtioGpuAddressSpaceStream(size_t ignored_bufSize) {
+    // Ignore incoming ignored_bufSize
+    (void)ignored_bufSize;
+
+    auto handle = openVirtGpuAddressSpace();
+    if (handle <= reinterpret_cast<address_space_handle_t>(-1)) {
+        ALOGE("AddressSpaceStream::create failed (open device) %d (%s)\n", errno, strerror(errno));
+        return nullptr;
+    }
+
+    struct address_space_virtgpu_info virtgpu_info;
+
+    ALOGD("%s: create subdevice and get resp\n", __func__);
+    if (!virtgpu_address_space_create_context_with_subdevice(
+            handle, GoldfishAddressSpaceSubdeviceType::VirtioGpuGraphics,
+            &virtgpu_info)) {
+        ALOGE("AddressSpaceStream::create failed (create subdevice)\n");
+        virtgpu_address_space_close(handle);
+        return nullptr;
+    }
+    ALOGD("%s: create subdevice and get resp (done)\n", __func__);
+
+    struct address_space_ping request;
+    uint32_t ringSize = 0;
+    uint32_t bufferSize = 0;
+
+    request.metadata = ASG_GET_RING;
+    if (!virtgpu_address_space_ping_with_response(
+        &virtgpu_info, &request)) {
+        ALOGE("AddressSpaceStream::create failed (get ring version)\n");
+        virtgpu_address_space_close(handle);
+        return nullptr;
+    }
+    ringSize = request.size;
+
+    request.metadata = ASG_GET_BUFFER;
+    if (!virtgpu_address_space_ping_with_response(
+        &virtgpu_info, &request)) {
+        ALOGE("AddressSpaceStream::create failed (get ring version)\n");
+        virtgpu_address_space_close(handle);
+        return nullptr;
+    }
+    bufferSize = request.size;
+
+    request.metadata = ASG_SET_VERSION;
+    request.size = 1; // version 1
+
+    if (!virtgpu_address_space_ping_with_response(
+        &virtgpu_info, &request)) {
+        ALOGE("AddressSpaceStream::create failed (set version)\n");
+        virtgpu_address_space_close(handle);
+        return nullptr;
+    }
+
+    ALOGD("%s: ping returned. context ring and buffer sizes %u %u\n", __func__,
+            ringSize, bufferSize);
+
+    uint64_t hostmem_id = request.metadata;
+    uint32_t version = request.size;
+    size_t hostmem_alloc_size =
+        (size_t)(ringSize + bufferSize);
+
+    ALOGD("%s: hostmem size: %zu\n", __func__, hostmem_alloc_size);
+
+    struct address_space_virtgpu_hostmem_info hostmem_info;
+    if (!virtgpu_address_space_allocate_hostmem(
+            handle,
+            hostmem_alloc_size,
+            hostmem_id,
+            &hostmem_info)) {
+        ALOGE("AddressSpaceStream::create failed (alloc hostmem)\n");
+        virtgpu_address_space_close(handle);
+        return nullptr;
+    }
+
+    request.metadata = ASG_GET_CONFIG;
+    if (!virtgpu_address_space_ping_with_response(
+        &virtgpu_info, &request)) {
+        ALOGE("AddressSpaceStream::create failed (get config)\n");
+        virtgpu_address_space_close(handle);
+        return nullptr;
+    }
+
+    char* ringPtr = (char*)hostmem_info.ptr;
+    char* bufferPtr = ((char*)hostmem_info.ptr) + sizeof(struct asg_ring_storage);
+
+    struct asg_context context =
+        asg_context_create(
+            (char*)ringPtr, (char*)bufferPtr, bufferSize);
+
+    context.ring_config->transfer_mode = 1;
+    context.ring_config->host_consumed_pos = 0;
+    context.ring_config->guest_write_pos = 0;
+
+    struct address_space_ops ops = {
+        .open = virtgpu_address_space_open,
+        .close = virtgpu_address_space_close,
+        .ping = virtgpu_address_space_ping,
+        .allocate_hostmem = virtgpu_address_space_allocate_hostmem,
+        .ping_with_response = virtgpu_address_space_ping_with_response,
+    };
+
+    AddressSpaceStream* res =
+        new AddressSpaceStream(
+            handle, version, context,
+            0, 0, true /* is virtio */, ops);
+
+    return res;
+}
+#endif // HOST_BUILD || __Fuchsia__
+
+
 AddressSpaceStream::AddressSpaceStream(
     address_space_handle_t handle,
     uint32_t version,
     struct asg_context context,
     uint64_t ringOffset,
-    uint64_t writeBufferOffset) :
+    uint64_t writeBufferOffset,
+    bool virtioMode,
+    struct address_space_ops ops) :
     IOStream(context.ring_config->flush_interval),
+    m_virtioMode(virtioMode),
+    m_ops(ops),
     m_tmpBuf(0),
     m_tmpBufSize(0),
     m_tmpBufXferSize(0),
@@ -156,18 +302,25 @@
     m_writeStart(m_buf),
     m_writeStep(context.ring_config->flush_interval),
     m_notifs(0),
-    m_written(0) {
+    m_written(0),
+    m_backoffIters(0),
+    m_backoffFactor(1) {
     // We'll use this in the future, but at the moment,
     // it's a potential compile Werror.
     (void)m_version;
 }
 
 AddressSpaceStream::~AddressSpaceStream() {
-    goldfish_address_space_unmap(m_context.to_host, sizeof(struct asg_ring_storage));
-    goldfish_address_space_unmap(m_context.buffer, m_writeBufferSize);
-    goldfish_address_space_unclaim_shared(m_handle, m_ringOffset);
-    goldfish_address_space_unclaim_shared(m_handle, m_writeBufferOffset);
-    goldfish_address_space_close(m_handle);
+    flush();
+    ensureType3Finished();
+    ensureType1Finished();
+    if (!m_virtioMode) {
+        m_ops.unmap(m_context.to_host, sizeof(struct asg_ring_storage));
+        m_ops.unmap(m_context.buffer, m_writeBufferSize);
+        m_ops.unclaim_shared(m_handle, m_ringOffset);
+        m_ops.unclaim_shared(m_handle, m_writeBufferOffset);
+    }
+    m_ops.close(m_handle);
     if (m_readBuf) free(m_readBuf);
     if (m_tmpBuf) free(m_tmpBuf);
 }
@@ -178,6 +331,9 @@
 }
 
 void *AddressSpaceStream::allocBuffer(size_t minSize) {
+    AEMU_SCOPED_TRACE("allocBuffer");
+    ensureType3Finished();
+
     if (!m_readBuf) {
         m_readBuf = (unsigned char*)malloc(kReadSize);
     }
@@ -302,6 +458,7 @@
         }
     }
 
+    resetBackoff();
     return userReadBuf;
 }
 
@@ -321,7 +478,7 @@
 
 int AddressSpaceStream::writeFully(const void *buf, size_t size)
 {
-    ensureConsumerFinishing();
+    AEMU_SCOPED_TRACE("writeFully");
     ensureType3Finished();
     ensureType1Finished();
 
@@ -329,10 +486,77 @@
     m_context.ring_config->transfer_mode = 3;
 
     size_t sent = 0;
-    size_t quarterRingSize = m_writeBufferSize / 4;
-    size_t chunkSize = size < quarterRingSize ? size : quarterRingSize;
+    size_t preferredChunkSize = m_writeBufferSize / 4;
+    size_t chunkSize = size < preferredChunkSize ? size : preferredChunkSize;
     const uint8_t* bufferBytes = (const uint8_t*)buf;
 
+    bool hostPinged = false;
+    while (sent < size) {
+        size_t remaining = size - sent;
+        size_t sendThisTime = remaining < chunkSize ? remaining : chunkSize;
+
+        long sentChunks =
+            ring_buffer_view_write(
+                m_context.to_host_large_xfer.ring,
+                &m_context.to_host_large_xfer.view,
+                bufferBytes + sent, sendThisTime, 1);
+
+        if (!hostPinged && *(m_context.host_state) != ASG_HOST_STATE_CAN_CONSUME &&
+            *(m_context.host_state) != ASG_HOST_STATE_RENDERING) {
+            notifyAvailable();
+            hostPinged = true;
+        }
+
+        if (sentChunks == 0) {
+            ring_buffer_yield();
+            backoff();
+        }
+
+        sent += sentChunks * sendThisTime;
+
+        if (isInError()) {
+            return -1;
+        }
+    }
+
+    bool isRenderingAfter = ASG_HOST_STATE_RENDERING == __atomic_load_n(m_context.host_state, __ATOMIC_ACQUIRE);
+
+    if (!isRenderingAfter) {
+        notifyAvailable();
+    }
+
+    ensureType3Finished();
+
+    resetBackoff();
+    m_context.ring_config->transfer_mode = 1;
+    m_written += size;
+
+    float mb = (float)m_written / 1048576.0f;
+    if (mb > 100.0f) {
+        ALOGD("%s: %f mb in %d notifs. %f mb/notif\n", __func__,
+              mb, m_notifs, m_notifs ? mb / (float)m_notifs : 0.0f);
+        m_notifs = 0;
+        m_written = 0;
+    }
+    return 0;
+}
+
+int AddressSpaceStream::writeFullyAsync(const void *buf, size_t size)
+{
+    AEMU_SCOPED_TRACE("writeFullyAsync");
+    ensureType3Finished();
+    ensureType1Finished();
+
+    __atomic_store_n(&m_context.ring_config->transfer_size, size, __ATOMIC_RELEASE);
+    m_context.ring_config->transfer_mode = 3;
+
+    size_t sent = 0;
+    size_t preferredChunkSize = m_writeBufferSize / 2;
+    size_t chunkSize = size < preferredChunkSize ? size : preferredChunkSize;
+    const uint8_t* bufferBytes = (const uint8_t*)buf;
+
+    bool pingedHost = false;
+
     while (sent < size) {
         size_t remaining = size - sent;
         size_t sendThisTime = remaining < chunkSize ? remaining : chunkSize;
@@ -343,12 +567,18 @@
                 &m_context.to_host_large_xfer.view,
                 bufferBytes + sent, sendThisTime, 1);
 
-        if (*(m_context.host_state) != ASG_HOST_STATE_CAN_CONSUME) {
+        uint32_t hostState = __atomic_load_n(m_context.host_state, __ATOMIC_ACQUIRE);
+
+        if (!pingedHost &&
+            hostState != ASG_HOST_STATE_CAN_CONSUME &&
+            hostState != ASG_HOST_STATE_RENDERING) {
+            pingedHost = true;
             notifyAvailable();
         }
 
         if (sentChunks == 0) {
             ring_buffer_yield();
+            backoff();
         }
 
         sent += sentChunks * sendThisTime;
@@ -358,9 +588,24 @@
         }
     }
 
-    ensureType3Finished();
+
+    bool isRenderingAfter = ASG_HOST_STATE_RENDERING == __atomic_load_n(m_context.host_state, __ATOMIC_ACQUIRE);
+
+    if (!isRenderingAfter) {
+        notifyAvailable();
+    }
+
+    resetBackoff();
     m_context.ring_config->transfer_mode = 1;
     m_written += size;
+
+    float mb = (float)m_written / 1048576.0f;
+    if (mb > 100.0f) {
+        ALOGD("%s: %f mb in %d notifs. %f mb/notif\n", __func__,
+              mb, m_notifs, m_notifs ? mb / (float)m_notifs : 0.0f);
+        m_notifs = 0;
+        m_written = 0;
+    }
     return 0;
 }
 
@@ -383,12 +628,15 @@
 }
 
 ssize_t AddressSpaceStream::speculativeRead(unsigned char* readBuffer, size_t trySize) {
-    ensureConsumerFinishing();
     ensureType3Finished();
     ensureType1Finished();
 
     size_t actuallyRead = 0;
+    size_t readIters = 0;
+
     while (!actuallyRead) {
+        ++readIters;
+
         uint32_t readAvail =
             ring_buffer_available_read(
                 m_context.from_host_large_xfer.ring,
@@ -396,6 +644,7 @@
 
         if (!readAvail) {
             ring_buffer_yield();
+            backoff();
             continue;
         }
 
@@ -417,9 +666,10 @@
 }
 
 void AddressSpaceStream::notifyAvailable() {
-    struct goldfish_address_space_ping request;
+    AEMU_SCOPED_TRACE("PING");
+    struct address_space_ping request;
     request.metadata = ASG_NOTIFY_AVAILABLE;
-    goldfish_address_space_ping(m_handle, &request);
+    m_ops.ping(m_handle, &request);
     ++m_notifs;
 }
 
@@ -446,20 +696,24 @@
             break;
         }
 
-        if (*(m_context.host_state) != ASG_HOST_STATE_CAN_CONSUME) {
+        if (*(m_context.host_state) != ASG_HOST_STATE_CAN_CONSUME &&
+            *(m_context.host_state) != ASG_HOST_STATE_RENDERING) {
             notifyAvailable();
             break;
         }
+
+        backoff();
     }
 }
 
 void AddressSpaceStream::ensureType1Finished() {
-    ensureConsumerFinishing();
+    AEMU_SCOPED_TRACE("ensureType1Finished");
 
     uint32_t currAvailRead =
         ring_buffer_available_read(m_context.to_host, 0);
 
     while (currAvailRead) {
+        backoff();
         ring_buffer_yield();
         currAvailRead = ring_buffer_available_read(m_context.to_host, 0);
         if (isInError()) {
@@ -469,17 +723,20 @@
 }
 
 void AddressSpaceStream::ensureType3Finished() {
+    AEMU_SCOPED_TRACE("ensureType3Finished");
     uint32_t availReadLarge =
         ring_buffer_available_read(
             m_context.to_host_large_xfer.ring,
             &m_context.to_host_large_xfer.view);
     while (availReadLarge) {
         ring_buffer_yield();
+        backoff();
         availReadLarge =
             ring_buffer_available_read(
                 m_context.to_host_large_xfer.ring,
                 &m_context.to_host_large_xfer.view);
-        if (*(m_context.host_state) != ASG_HOST_STATE_CAN_CONSUME) {
+        if (*(m_context.host_state) != ASG_HOST_STATE_CAN_CONSUME &&
+            *(m_context.host_state) != ASG_HOST_STATE_RENDERING) {
             notifyAvailable();
         }
         if (isInError()) {
@@ -489,6 +746,11 @@
 }
 
 int AddressSpaceStream::type1Write(uint32_t bufferOffset, size_t size) {
+
+    AEMU_SCOPED_TRACE("type1Write");
+
+    ensureType3Finished();
+
     size_t sent = 0;
     size_t sizeForRing = sizeof(struct asg_type1_xfer);
 
@@ -503,16 +765,15 @@
     uint32_t maxSteps = m_context.ring_config->buffer_size /
             m_context.ring_config->flush_interval;
 
-    if (maxSteps > 1) maxOutstanding = maxSteps >> 1;
+    if (maxSteps > 1) maxOutstanding = maxSteps - 1;
 
     uint32_t ringAvailReadNow = ring_buffer_available_read(m_context.to_host, 0);
 
-    while (ringAvailReadNow >= maxOutstanding) {
-        ensureConsumerFinishing();
-        ring_buffer_yield();
+    while (ringAvailReadNow >= maxOutstanding * sizeForRing) {
         ringAvailReadNow = ring_buffer_available_read(m_context.to_host, 0);
     }
 
+    bool hostPinged = false;
     while (sent < sizeForRing) {
 
         long sentChunks = ring_buffer_write(
@@ -520,12 +781,16 @@
             writeBufferBytes + sent,
             sizeForRing - sent, 1);
 
-        if (*(m_context.host_state) != ASG_HOST_STATE_CAN_CONSUME) {
+        if (!hostPinged &&
+            *(m_context.host_state) != ASG_HOST_STATE_CAN_CONSUME &&
+            *(m_context.host_state) != ASG_HOST_STATE_RENDERING) {
             notifyAvailable();
+            hostPinged = true;
         }
 
         if (sentChunks == 0) {
             ring_buffer_yield();
+            backoff();
         }
 
         sent += sentChunks * (sizeForRing - sent);
@@ -535,7 +800,12 @@
         }
     }
 
-    ensureConsumerFinishing();
+    bool isRenderingAfter = ASG_HOST_STATE_RENDERING == __atomic_load_n(m_context.host_state, __ATOMIC_ACQUIRE);
+
+    if (!isRenderingAfter) {
+        notifyAvailable();
+    }
+
     m_written += size;
 
     float mb = (float)m_written / 1048576.0f;
@@ -546,5 +816,32 @@
         m_written = 0;
     }
 
+    resetBackoff();
     return 0;
 }
+
+void AddressSpaceStream::backoff() {
+#if defined(HOST_BUILD) || defined(__APPLE__) || defined(__MACOSX) || defined(__Fuchsia__)
+    static const uint32_t kBackoffItersThreshold = 50000000;
+    static const uint32_t kBackoffFactorDoublingIncrement = 50000000;
+#else
+    static const uint32_t kBackoffItersThreshold = property_get_int32("ro.boot.asg.backoffiters", 50000000);
+    static const uint32_t kBackoffFactorDoublingIncrement = property_get_int32("ro.boot.asg.backoffincrement", 50000000);
+#endif
+    ++m_backoffIters;
+
+    if (m_backoffIters > kBackoffItersThreshold) {
+        usleep(m_backoffFactor);
+        uint32_t itersSoFarAfterThreshold = m_backoffIters - kBackoffItersThreshold;
+        if (itersSoFarAfterThreshold > kBackoffFactorDoublingIncrement) {
+            m_backoffFactor = m_backoffFactor << 1;
+            if (m_backoffFactor > 1000) m_backoffFactor = 1000;
+            m_backoffIters = kBackoffItersThreshold;
+        }
+    }
+}
+
+void AddressSpaceStream::resetBackoff() {
+    m_backoffIters = 0;
+    m_backoffFactor = 1;
+}
diff --git a/system/OpenglSystemCommon/AddressSpaceStream.h b/system/OpenglSystemCommon/AddressSpaceStream.h
index a4db5aa..62f235e 100644
--- a/system/OpenglSystemCommon/AddressSpaceStream.h
+++ b/system/OpenglSystemCommon/AddressSpaceStream.h
@@ -24,6 +24,7 @@
 class AddressSpaceStream;
 
 AddressSpaceStream* createAddressSpaceStream(size_t bufSize);
+AddressSpaceStream* createVirtioGpuAddressSpaceStream(size_t bufSize);
 
 class AddressSpaceStream : public IOStream {
 public:
@@ -32,7 +33,9 @@
         uint32_t version,
         struct asg_context context,
         uint64_t ringOffset,
-        uint64_t writeBufferOffset);
+        uint64_t writeBufferOffset,
+        bool virtioMode,
+        struct address_space_ops ops);
     ~AddressSpaceStream();
 
     virtual size_t idealAllocSize(size_t len);
@@ -41,8 +44,18 @@
     virtual const unsigned char *readFully( void *buf, size_t len);
     virtual const unsigned char *read( void *buf, size_t *inout_len);
     virtual int writeFully(const void *buf, size_t len);
+    virtual int writeFullyAsync(const void *buf, size_t len);
     virtual const unsigned char *commitBufferAndReadFully(size_t size, void *buf, size_t len);
 
+    int getRendernodeFd() const {
+#if defined(__Fuchsia__)
+        return -1;
+#else
+        if (!m_virtioMode) return -1;
+        return m_handle;
+#endif
+    }
+
 private:
     bool isInError() const;
     ssize_t speculativeRead(unsigned char* readBuffer, size_t trySize);
@@ -54,6 +67,12 @@
     void ensureType3Finished();
     int type1Write(uint32_t offset, size_t size);
 
+    void backoff();
+    void resetBackoff();
+
+    bool m_virtioMode;
+    struct address_space_ops m_ops;
+
     unsigned char* m_tmpBuf;
     size_t m_tmpBufSize;
     size_t m_tmpBufXferSize;
@@ -78,6 +97,9 @@
 
     uint32_t m_notifs;
     uint32_t m_written;
+
+    uint64_t m_backoffIters;
+    uint64_t m_backoffFactor;
 };
 
 #endif
diff --git a/system/OpenglSystemCommon/Android.mk b/system/OpenglSystemCommon/Android.mk
index fa24719..f9cd67a 100644
--- a/system/OpenglSystemCommon/Android.mk
+++ b/system/OpenglSystemCommon/Android.mk
@@ -7,7 +7,7 @@
 $(call emugl-import,libqemupipe$(GOLDFISH_OPENGL_LIB_SUFFIX))
 $(call emugl-import,libgralloc_cb$(GOLDFISH_OPENGL_LIB_SUFFIX))
 else
-$(call emugl-export,STATIC_LIBRARIES,libGoldfishAddressSpace)
+$(call emugl-export,STATIC_LIBRARIES,libGoldfishAddressSpace libringbuffer)
 $(call emugl-export,STATIC_LIBRARIES,libqemupipe.ranchu)
 $(call emugl-export,HEADER_LIBRARIES,libgralloc_cb.ranchu)
 endif
@@ -17,24 +17,22 @@
     HostConnection.cpp \
     QemuPipeStream.cpp \
     ProcessPipe.cpp    \
+    ThreadInfo.cpp \
 
-ifeq (true,$(BUILD_EMULATOR_VULKAN))
+ifeq (true,$(GFXSTREAM))
 $(call emugl-import,libvulkan_enc)
 
 LOCAL_SRC_FILES += AddressSpaceStream.cpp
 
 endif
 
-LOCAL_CFLAGS += -Wno-unused-variable -Wno-unused-parameter
+LOCAL_CFLAGS += -Wno-unused-variable -Wno-unused-parameter -fno-emulated-tls
 
 ifeq (true,$(GOLDFISH_OPENGL_BUILD_FOR_HOST))
 
-LOCAL_SRC_FILES += \
-    ThreadInfo_host.cpp \
-
 else
 
-ifeq (true,$(BUILD_EMULATOR_VULKAN))
+ifeq (true,$(GFXSTREAM))
 
 LOCAL_HEADER_LIBRARIES += vulkan_headers
 
@@ -48,9 +46,6 @@
 
 endif
 
-LOCAL_SRC_FILES += \
-    ThreadInfo.cpp \
-
 endif
 
 ifdef IS_AT_LEAST_OPD1
diff --git a/system/OpenglSystemCommon/CMakeLists.txt b/system/OpenglSystemCommon/CMakeLists.txt
index ff4947a..9cac2c6 100644
--- a/system/OpenglSystemCommon/CMakeLists.txt
+++ b/system/OpenglSystemCommon/CMakeLists.txt
@@ -1,10 +1,10 @@
 # This is an autogenerated file! Do not edit!
 # instead run make from .../device/generic/goldfish-opengl
 # which will re-generate this file.
-android_validate_sha256("${GOLDFISH_DEVICE_ROOT}/system/OpenglSystemCommon/Android.mk" "1a769e79e22604f569e4345e4f432bb4bd41ca810d479b69887007b5d9bec528")
-set(OpenglSystemCommon_src FormatConversions.cpp HostConnection.cpp QemuPipeStream.cpp ProcessPipe.cpp AddressSpaceStream.cpp ThreadInfo_host.cpp)
-android_add_library(TARGET OpenglSystemCommon SHARED LICENSE Apache-2.0 SRC FormatConversions.cpp HostConnection.cpp QemuPipeStream.cpp ProcessPipe.cpp AddressSpaceStream.cpp ThreadInfo_host.cpp)
-target_include_directories(OpenglSystemCommon PRIVATE ${GOLDFISH_DEVICE_ROOT}/system/OpenglSystemCommon ${GOLDFISH_DEVICE_ROOT}/bionic/libc/platform ${GOLDFISH_DEVICE_ROOT}/bionic/libc/private ${GOLDFISH_DEVICE_ROOT}/system/OpenglSystemCommon/bionic-include ${GOLDFISH_DEVICE_ROOT}/system/vulkan_enc ${GOLDFISH_DEVICE_ROOT}/android-emu ${GOLDFISH_DEVICE_ROOT}/shared/gralloc_cb/include ${GOLDFISH_DEVICE_ROOT}/shared/GoldfishAddressSpace/include ${GOLDFISH_DEVICE_ROOT}/system/renderControl_enc ${GOLDFISH_DEVICE_ROOT}/system/GLESv2_enc ${GOLDFISH_DEVICE_ROOT}/system/GLESv1_enc ${GOLDFISH_DEVICE_ROOT}/shared/OpenglCodecCommon ${GOLDFISH_DEVICE_ROOT}/shared/qemupipe/include-types ${GOLDFISH_DEVICE_ROOT}/shared/qemupipe/include ${GOLDFISH_DEVICE_ROOT}/./host/include/libOpenglRender ${GOLDFISH_DEVICE_ROOT}/./system/include ${GOLDFISH_DEVICE_ROOT}/./../../../external/qemu/android/android-emugl/guest)
-target_compile_definitions(OpenglSystemCommon PRIVATE "-DWITH_GLES2" "-DPLATFORM_SDK_VERSION=29" "-DGOLDFISH_HIDL_GRALLOC" "-DEMULATOR_OPENGL_POST_O=1" "-DHOST_BUILD" "-DANDROID" "-DGL_GLEXT_PROTOTYPES" "-DPAGE_SIZE=4096" "-DGOLDFISH_VULKAN")
-target_compile_options(OpenglSystemCommon PRIVATE "-fvisibility=default" "-Wno-unused-parameter" "-Wno-unused-variable")
-target_link_libraries(OpenglSystemCommon PRIVATE android-emu-shared vulkan_enc gui androidemu cutils utils log _renderControl_enc GLESv2_enc GLESv1_enc OpenglCodecCommon_host PRIVATE gralloc_cb_host GoldfishAddressSpace_host qemupipe_host)
\ No newline at end of file
+android_validate_sha256("${GOLDFISH_DEVICE_ROOT}/system/OpenglSystemCommon/Android.mk" "401986affa6f80625f00675980f448fa434d724df34034781651daffaa8e6b70")
+set(OpenglSystemCommon_src FormatConversions.cpp HostConnection.cpp QemuPipeStream.cpp ProcessPipe.cpp ThreadInfo.cpp AddressSpaceStream.cpp)
+android_add_library(TARGET OpenglSystemCommon SHARED LICENSE Apache-2.0 SRC FormatConversions.cpp HostConnection.cpp QemuPipeStream.cpp ProcessPipe.cpp ThreadInfo.cpp AddressSpaceStream.cpp)
+target_include_directories(OpenglSystemCommon PRIVATE ${GOLDFISH_DEVICE_ROOT}/system/OpenglSystemCommon ${GOLDFISH_DEVICE_ROOT}/bionic/libc/platform ${GOLDFISH_DEVICE_ROOT}/bionic/libc/private ${GOLDFISH_DEVICE_ROOT}/system/OpenglSystemCommon/bionic-include ${GOLDFISH_DEVICE_ROOT}/system/vulkan_enc ${GOLDFISH_DEVICE_ROOT}/shared/gralloc_cb/include ${GOLDFISH_DEVICE_ROOT}/shared/GoldfishAddressSpace/include ${GOLDFISH_DEVICE_ROOT}/system/renderControl_enc ${GOLDFISH_DEVICE_ROOT}/system/GLESv2_enc ${GOLDFISH_DEVICE_ROOT}/system/GLESv1_enc ${GOLDFISH_DEVICE_ROOT}/shared/OpenglCodecCommon ${GOLDFISH_DEVICE_ROOT}/android-emu ${GOLDFISH_DEVICE_ROOT}/shared/qemupipe/include-types ${GOLDFISH_DEVICE_ROOT}/shared/qemupipe/include ${GOLDFISH_DEVICE_ROOT}/./host/include/libOpenglRender ${GOLDFISH_DEVICE_ROOT}/./system/include ${GOLDFISH_DEVICE_ROOT}/./../../../external/qemu/android/android-emugl/guest)
+target_compile_definitions(OpenglSystemCommon PRIVATE "-DWITH_GLES2" "-DPLATFORM_SDK_VERSION=29" "-DGOLDFISH_HIDL_GRALLOC" "-DEMULATOR_OPENGL_POST_O=1" "-DHOST_BUILD" "-DANDROID" "-DGL_GLEXT_PROTOTYPES" "-DPAGE_SIZE=4096" "-DGFXSTREAM")
+target_compile_options(OpenglSystemCommon PRIVATE "-fvisibility=default" "-Wno-unused-parameter" "-Wno-unused-variable" "-fno-emulated-tls")
+target_link_libraries(OpenglSystemCommon PRIVATE android-emu-shared vulkan_enc gui log _renderControl_enc GLESv2_enc GLESv1_enc OpenglCodecCommon_host cutils utils androidemu PRIVATE gralloc_cb_host GoldfishAddressSpace_host qemupipe_host)
\ No newline at end of file
diff --git a/system/OpenglSystemCommon/EGLImage.h b/system/OpenglSystemCommon/EGLImage.h
index a83e1ad..8a1eb6d 100644
--- a/system/OpenglSystemCommon/EGLImage.h
+++ b/system/OpenglSystemCommon/EGLImage.h
@@ -36,6 +36,8 @@
 {
     EGLDisplay dpy;
     EGLenum target;
+    int width;
+    int height;
 
     union
     {
diff --git a/system/OpenglSystemCommon/EmulatorFeatureInfo.h b/system/OpenglSystemCommon/EmulatorFeatureInfo.h
index 626048f..f6d8ce5 100644
--- a/system/OpenglSystemCommon/EmulatorFeatureInfo.h
+++ b/system/OpenglSystemCommon/EmulatorFeatureInfo.h
@@ -105,6 +105,34 @@
 // Vulkan free memory sync
 static const char kVulkanFreeMemorySync[] = "ANDROID_EMU_vulkan_free_memory_sync";
 
+// virtio-gpu syncfd support
+static const char kVirtioGpuNativeSync[] = "ANDROID_EMU_virtio_gpu_native_sync";
+
+// Vulkan extension that required a protocol update (new marshaling structs)
+static const char kVulkanShaderFloat16Int8[] = "ANDROID_EMU_vulkan_shader_float16_int8";
+
+// Vulkan async queue submit
+static const char kVulkanAsyncQueueSubmit[] = "ANDROID_EMU_vulkan_async_queue_submit";
+
+// A flag to _not_ ignore host opengl errors (now host opengl errors are ignored by default)
+static const char kGLESUseHostError[] = "ANDROID_EMU_gles_use_host_error";
+
+// Host side tracing
+static const char kHostSideTracing[] = "ANDROID_EMU_host_side_tracing";
+
+// Make current async
+static const char kAsyncFrameCommands[] = "ANDROID_EMU_async_frame_commands";
+
+// Queue submit with commands
+static const char kVulkanQueueSubmitWithCommands[] = "ANDROID_EMU_vulkan_queue_submit_with_commands";
+//
+// Synchronized glBufferData call
+static const char kSyncBufferData[] = "ANDROID_EMU_sync_buffer_data";
+
+
+// Batched descriptor set update
+static const char kVulkanBatchedDescriptorSetUpdate[] = "ANDROID_EMU_vulkan_batched_descriptor_set_update";
+
 // Struct describing available emulator features
 struct EmulatorFeatureInfo {
 
@@ -123,7 +151,15 @@
         hasAsyncUnmapBuffer (false),
         hasVirtioGpuNext (false),
         hasSharedSlotsHostMemoryAllocator(false),
-        hasVulkanFreeMemorySync(false)
+        hasVulkanFreeMemorySync(false),
+        hasVirtioGpuNativeSync(false),
+        hasVulkanShaderFloat16Int8(false),
+        hasVulkanAsyncQueueSubmit(false),
+        hasHostSideTracing(false),
+        hasAsyncFrameCommands(false),
+        hasVulkanQueueSubmitWithCommands(false),
+        hasVulkanBatchedDescriptorSetUpdate(false),
+        hasSyncBufferData(false)
     { }
 
     SyncImpl syncImpl;
@@ -141,6 +177,14 @@
     bool hasVirtioGpuNext;
     bool hasSharedSlotsHostMemoryAllocator;
     bool hasVulkanFreeMemorySync;
+    bool hasVirtioGpuNativeSync;
+    bool hasVulkanShaderFloat16Int8;
+    bool hasVulkanAsyncQueueSubmit;
+    bool hasHostSideTracing;
+    bool hasAsyncFrameCommands;
+    bool hasVulkanQueueSubmitWithCommands;
+    bool hasVulkanBatchedDescriptorSetUpdate;
+    bool hasSyncBufferData;
 };
 
 enum HostConnectionType {
@@ -149,6 +193,7 @@
     HOST_CONNECTION_VIRTIO_GPU = 2,
     HOST_CONNECTION_ADDRESS_SPACE = 3,
     HOST_CONNECTION_VIRTIO_GPU_PIPE = 4,
+    HOST_CONNECTION_VIRTIO_GPU_ADDRESS_SPACE = 5,
 };
 
 enum GrallocType {
diff --git a/system/OpenglSystemCommon/FormatConversions.cpp b/system/OpenglSystemCommon/FormatConversions.cpp
index cc976ed..aae6bb9 100644
--- a/system/OpenglSystemCommon/FormatConversions.cpp
+++ b/system/OpenglSystemCommon/FormatConversions.cpp
@@ -35,11 +35,14 @@
     return row * width * rgbStride;
 }
 
+#define OMX_COLOR_FormatYUV420Planar 0x13
+
 bool gralloc_is_yuv_format(const int format) {
     switch (format) {
     case HAL_PIXEL_FORMAT_YV12:
     case HAL_PIXEL_FORMAT_YCbCr_420_888:
     case HAL_PIXEL_FORMAT_YCrCb_420_SP:
+    case OMX_COLOR_FormatYUV420Planar:
         return true;
 
     default:
diff --git a/system/OpenglSystemCommon/HostConnection.cpp b/system/OpenglSystemCommon/HostConnection.cpp
index b9c97c9..4afa20e 100644
--- a/system/OpenglSystemCommon/HostConnection.cpp
+++ b/system/OpenglSystemCommon/HostConnection.cpp
@@ -17,6 +17,10 @@
 
 #include "cutils/properties.h"
 
+#ifdef HOST_BUILD
+#include "android/base/Tracing.h"
+#endif
+
 #ifdef GOLDFISH_NO_GL
 struct gl_client_context_t {
     int placeholder;
@@ -36,19 +40,21 @@
     void setNoHostError(bool) { }
     void setDrawCallFlushInterval(uint32_t) { }
     void setHasAsyncUnmapBuffer(int) { }
+    void setHasSyncBufferData(int) { }
 };
 #else
 #include "GLEncoder.h"
 #include "GL2Encoder.h"
 #endif
 
-#ifdef GOLDFISH_VULKAN
+#ifdef GFXSTREAM
 #include "VkEncoder.h"
 #include "AddressSpaceStream.h"
 #else
 namespace goldfish_vk {
 struct VkEncoder {
     VkEncoder(IOStream*) { }
+    void decRef() { }
     int placeholder;
 };
 } // namespace goldfish_vk
@@ -58,6 +64,10 @@
     ALOGE("%s: FATAL: Trying to create ASG stream in unsupported build\n", __func__);
     abort();
 }
+AddressSpaceStream* createVirtioGpuAddressSpaceStream(size_t bufSize) {
+    ALOGE("%s: FATAL: Trying to create virtgpu ASG stream in unsupported build\n", __func__);
+    abort();
+}
 #endif
 
 using goldfish_vk::VkEncoder;
@@ -67,6 +77,7 @@
 #include "TcpStream.h"
 #include "ThreadInfo.h"
 #include <gralloc_cb_bp.h>
+#include <unistd.h>
 
 #ifdef VIRTIO_GPU
 
@@ -95,51 +106,49 @@
     return HOST_CONNECTION_ADDRESS_SPACE;
 #else
     char transportValue[PROPERTY_VALUE_MAX] = "";
-    property_get("ro.kernel.qemu.gltransport", transportValue, "");
 
-    bool isValid = transportValue[0] != '\0';
+    do {
+        property_get("ro.boot.qemu.gltransport.name", transportValue, "");
+        if (transportValue[0]) { break; }
 
-    if (!isValid) {
+        property_get("ro.boot.qemu.gltransport", transportValue, "");
+        if (transportValue[0]) { break; }
+
         property_get("ro.boot.hardware.gltransport", transportValue, "");
-        isValid = transportValue[0] != '\0';
-    }
+    } while (false);
 
-    if (!isValid) return HOST_CONNECTION_QEMU_PIPE;
+    if (!transportValue[0]) return HOST_CONNECTION_QEMU_PIPE;
 
     if (!strcmp("tcp", transportValue)) return HOST_CONNECTION_TCP;
     if (!strcmp("pipe", transportValue)) return HOST_CONNECTION_QEMU_PIPE;
     if (!strcmp("virtio-gpu", transportValue)) return HOST_CONNECTION_VIRTIO_GPU;
     if (!strcmp("asg", transportValue)) return HOST_CONNECTION_ADDRESS_SPACE;
     if (!strcmp("virtio-gpu-pipe", transportValue)) return HOST_CONNECTION_VIRTIO_GPU_PIPE;
+    if (!strcmp("virtio-gpu-asg", transportValue)) return HOST_CONNECTION_VIRTIO_GPU_ADDRESS_SPACE;
 
     return HOST_CONNECTION_QEMU_PIPE;
 #endif
 }
 
 static uint32_t getDrawCallFlushIntervalFromProperty() {
+    constexpr uint32_t kDefaultValue = 800;
+
     char flushValue[PROPERTY_VALUE_MAX] = "";
-    property_get("ro.kernel.qemu.gltransport.drawFlushInterval", flushValue, "");
+    property_get("ro.boot.qemu.gltransport.drawFlushInterval", flushValue, "");
+    if (!flushValue[0]) return kDefaultValue;
 
-    bool isValid = flushValue[0] != '\0';
-    if (!isValid) return 800;
-
-    long interval = strtol(flushValue, 0, 10);
-
-    if (!interval) return 800;
-
-    return (uint32_t)interval;
+    const long interval = strtol(flushValue, 0, 10);
+    return (interval > 0) ? uint32_t(interval) : kDefaultValue;
 }
 
 static GrallocType getGrallocTypeFromProperty() {
-    char prop[PROPERTY_VALUE_MAX] = "";
-    property_get("ro.hardware.gralloc", prop, "");
+    char value[PROPERTY_VALUE_MAX] = "";
+    property_get("ro.hardware.gralloc", value, "");
 
-    bool isValid = prop[0] != '\0';
+    if (!value[0]) return GRALLOC_TYPE_RANCHU;
 
-    if (!isValid) return GRALLOC_TYPE_RANCHU;
-
-    if (!strcmp("ranchu", prop)) return GRALLOC_TYPE_RANCHU;
-    if (!strcmp("minigbm", prop)) return GRALLOC_TYPE_MINIGBM;
+    if (!strcmp("ranchu", value)) return GRALLOC_TYPE_RANCHU;
+    if (!strcmp("minigbm", value)) return GRALLOC_TYPE_MINIGBM;
     return GRALLOC_TYPE_RANCHU;
 }
 
@@ -340,52 +349,63 @@
     {
         return ::processPipeInit(connType, rcEnc);
     }
+    
 };
 
 static GoldfishGralloc m_goldfishGralloc;
 static GoldfishProcessPipe m_goldfishProcessPipe;
 
 HostConnection::HostConnection() :
-    m_stream(NULL),
-    m_glEnc(NULL),
-    m_gl2Enc(NULL),
-    m_vkEnc(NULL),
-    m_rcEnc(NULL),
+    exitUncleanly(false),
     m_checksumHelper(),
     m_glExtensions(),
     m_grallocOnly(true),
-    m_noHostError(false) { }
+    m_noHostError(true),
+    m_rendernodeFd(-1),
+    m_rendernodeFdOwned(false) {
+#ifdef HOST_BUILD
+    android::base::initializeTracing();
+#endif
+}
 
 HostConnection::~HostConnection()
 {
     // round-trip to ensure that queued commands have been processed
     // before process pipe closure is detected.
-    if (m_rcEnc) {
-        (void) m_rcEnc->rcGetRendererVersion(m_rcEnc);
+    if (m_rcEnc && !exitUncleanly) {
+        (void)m_rcEnc->rcGetRendererVersion(m_rcEnc.get());
     }
+
     if (m_grallocType == GRALLOC_TYPE_MINIGBM) {
         delete m_grallocHelper;
     }
-    delete m_stream;
-    delete m_glEnc;
-    delete m_gl2Enc;
-    delete m_rcEnc;
+
+    if (m_rendernodeFdOwned) {
+        close(m_rendernodeFd);
+    }
+
+    if (m_vkEnc) {
+        m_vkEnc->decRef();
+    }
+
+    if (m_stream) {
+        m_stream->decRef();
+    }
 }
 
 // static
-HostConnection* HostConnection::connect(HostConnection* con) {
-    if (!con) return con;
-
+std::unique_ptr<HostConnection> HostConnection::connect() {
     const enum HostConnectionType connType = getConnectionTypeFromProperty();
     // const enum HostConnectionType connType = HOST_CONNECTION_VIRTIO_GPU;
 
+    // Use "new" to access a non-public constructor.
+    auto con = std::unique_ptr<HostConnection>(new HostConnection);
     switch (connType) {
         case HOST_CONNECTION_ADDRESS_SPACE: {
-            AddressSpaceStream *stream = createAddressSpaceStream(STREAM_BUFFER_SIZE);
+            auto stream = createAddressSpaceStream(STREAM_BUFFER_SIZE);
             if (!stream) {
                 ALOGE("Failed to create AddressSpaceStream for host connection!!!\n");
-                delete con;
-                return NULL;
+                return nullptr;
             }
             con->m_connectionType = HOST_CONNECTION_ADDRESS_SPACE;
             con->m_grallocType = GRALLOC_TYPE_RANCHU;
@@ -395,17 +415,14 @@
             break;
         }
         case HOST_CONNECTION_QEMU_PIPE: {
-            QemuPipeStream *stream = new QemuPipeStream(STREAM_BUFFER_SIZE);
+            auto stream = new QemuPipeStream(STREAM_BUFFER_SIZE);
             if (!stream) {
                 ALOGE("Failed to create QemuPipeStream for host connection!!!\n");
-                delete con;
-                return NULL;
+                return nullptr;
             }
             if (stream->connect() < 0) {
                 ALOGE("Failed to connect to host (QemuPipeStream)!!!\n");
-                delete stream;
-                delete con;
-                return NULL;
+                return nullptr;
             }
             con->m_connectionType = HOST_CONNECTION_QEMU_PIPE;
             con->m_grallocType = GRALLOC_TYPE_RANCHU;
@@ -417,22 +434,18 @@
         case HOST_CONNECTION_TCP: {
 #ifdef __Fuchsia__
             ALOGE("Fuchsia doesn't support HOST_CONNECTION_TCP!!!\n");
-            delete con;
-            return NULL;
+            return nullptr;
             break;
 #else
-            TcpStream *stream = new TcpStream(STREAM_BUFFER_SIZE);
+            auto stream = new TcpStream(STREAM_BUFFER_SIZE);
             if (!stream) {
                 ALOGE("Failed to create TcpStream for host connection!!!\n");
-                delete con;
-                return NULL;
+                return nullptr;
             }
 
             if (stream->connect("10.0.2.2", STREAM_PORT_NUM) < 0) {
                 ALOGE("Failed to connect to host (TcpStream)!!!\n");
-                delete stream;
-                delete con;
-                return NULL;
+                return nullptr;
             }
             con->m_connectionType = HOST_CONNECTION_TCP;
             con->m_grallocType = GRALLOC_TYPE_RANCHU;
@@ -444,50 +457,50 @@
         }
 #ifdef VIRTIO_GPU
         case HOST_CONNECTION_VIRTIO_GPU: {
-            VirtioGpuStream *stream = new VirtioGpuStream(STREAM_BUFFER_SIZE);
+            auto stream = new VirtioGpuStream(STREAM_BUFFER_SIZE);
             if (!stream) {
                 ALOGE("Failed to create VirtioGpu for host connection!!!\n");
-                delete con;
-                return NULL;
+                return nullptr;
             }
             if (stream->connect() < 0) {
                 ALOGE("Failed to connect to host (VirtioGpu)!!!\n");
-                delete stream;
-                delete con;
-                return NULL;
+                return nullptr;
             }
             con->m_connectionType = HOST_CONNECTION_VIRTIO_GPU;
             con->m_grallocType = GRALLOC_TYPE_MINIGBM;
-            con->m_stream = stream;
-            MinigbmGralloc* m = new MinigbmGralloc;
-            m->setFd(stream->getRendernodeFd());
-            con->m_grallocHelper = m;
+            auto rendernodeFd = stream->getRendernodeFd();
             con->m_processPipe = stream->getProcessPipe();
+            con->m_stream = stream;
+            con->m_rendernodeFdOwned = false;
+            con->m_rendernodeFdOwned = rendernodeFd;
+            MinigbmGralloc* m = new MinigbmGralloc;
+            m->setFd(rendernodeFd);
+            con->m_grallocHelper = m;
             break;
         }
         case HOST_CONNECTION_VIRTIO_GPU_PIPE: {
-            VirtioGpuPipeStream *stream = new VirtioGpuPipeStream(STREAM_BUFFER_SIZE);
+            auto stream = new VirtioGpuPipeStream(STREAM_BUFFER_SIZE);
             if (!stream) {
                 ALOGE("Failed to create VirtioGpu for host connection!!!\n");
-                delete con;
-                return NULL;
+                return nullptr;
             }
             if (stream->connect() < 0) {
                 ALOGE("Failed to connect to host (VirtioGpu)!!!\n");
-                delete stream;
-                delete con;
-                return NULL;
+                return nullptr;
             }
             con->m_connectionType = HOST_CONNECTION_VIRTIO_GPU_PIPE;
             con->m_grallocType = getGrallocTypeFromProperty();
+            con->m_rendernodeFdOwned = false;
+            auto rendernodeFd = stream->getRendernodeFd();
             con->m_stream = stream;
+            con->m_rendernodeFd = rendernodeFd;
             switch (con->m_grallocType) {
                 case GRALLOC_TYPE_RANCHU:
                     con->m_grallocHelper = &m_goldfishGralloc;
                     break;
                 case GRALLOC_TYPE_MINIGBM: {
                     MinigbmGralloc* m = new MinigbmGralloc;
-                    m->setFd(stream->getRendernodeFd());
+                    m->setFd(rendernodeFd);
                     con->m_grallocHelper = m;
                     break;
                 }
@@ -498,6 +511,37 @@
             con->m_processPipe = &m_goldfishProcessPipe;
             break;
         }
+#if !defined(HOST_BUILD) && !defined(__Fuchsia__)
+        case HOST_CONNECTION_VIRTIO_GPU_ADDRESS_SPACE: {
+            auto stream = createVirtioGpuAddressSpaceStream(STREAM_BUFFER_SIZE);
+            if (!stream) {
+                ALOGE("Failed to create virtgpu AddressSpaceStream for host connection!!!\n");
+                return nullptr;
+            }
+            con->m_connectionType = HOST_CONNECTION_VIRTIO_GPU_ADDRESS_SPACE;
+            con->m_grallocType = getGrallocTypeFromProperty();
+            con->m_rendernodeFdOwned = false;
+            auto rendernodeFd = stream->getRendernodeFd();
+            con->m_stream = stream;
+            con->m_rendernodeFd = rendernodeFd;
+            switch (con->m_grallocType) {
+                case GRALLOC_TYPE_RANCHU:
+                    con->m_grallocHelper = &m_goldfishGralloc;
+                    break;
+                case GRALLOC_TYPE_MINIGBM: {
+                    MinigbmGralloc* m = new MinigbmGralloc;
+                    m->setFd(rendernodeFd);
+                    con->m_grallocHelper = m;
+                    break;
+                }
+                default:
+                    ALOGE("Fatal: Unknown gralloc type 0x%x\n", con->m_grallocType);
+                    abort();
+            }
+            con->m_processPipe = &m_goldfishProcessPipe;
+            break;
+        }
+#endif // !HOST_BUILD && !__Fuchsia__
 #else
         default:
             break;
@@ -511,7 +555,7 @@
     con->m_stream->commitBuffer(sizeof(unsigned int));
 
     ALOGD("HostConnection::get() New Host Connection established %p, tid %d\n",
-          con, getCurrentThreadId());
+          con.get(), getCurrentThreadId());
 
     // ALOGD("Address space echo latency check done\n");
     return con;
@@ -528,13 +572,10 @@
     }
 
     if (tinfo->hostConn == NULL) {
-        HostConnection *con = new HostConnection();
-        con = connect(con);
-
-        tinfo->hostConn = con;
+        tinfo->hostConn = HostConnection::createUnique();
     }
 
-    return tinfo->hostConn;
+    return tinfo->hostConn.get();
 }
 
 void HostConnection::exit() {
@@ -543,38 +584,41 @@
         return;
     }
 
-    if (tinfo->hostConn) {
-        delete tinfo->hostConn;
-        tinfo->hostConn = NULL;
+    tinfo->hostConn.reset();
+}
+
+void HostConnection::exitUnclean() {
+    EGLThreadInfo *tinfo = getEGLThreadInfo();
+    if (!tinfo) {
+        return;
     }
+
+    tinfo->hostConn->exitUncleanly = true;
+    tinfo->hostConn.reset();
 }
 
 // static
-HostConnection *HostConnection::createUnique() {
+std::unique_ptr<HostConnection> HostConnection::createUnique() {
     ALOGD("%s: call\n", __func__);
-    return connect(new HostConnection());
-}
-
-// static
-void HostConnection::teardownUnique(HostConnection* con) {
-    delete con;
+    return connect();
 }
 
 GLEncoder *HostConnection::glEncoder()
 {
     if (!m_glEnc) {
-        m_glEnc = new GLEncoder(m_stream, checksumHelper());
+        m_glEnc = std::make_unique<GLEncoder>(m_stream, checksumHelper());
         DBG("HostConnection::glEncoder new encoder %p, tid %d",
             m_glEnc, getCurrentThreadId());
         m_glEnc->setContextAccessor(s_getGLContext);
     }
-    return m_glEnc;
+    return m_glEnc.get();
 }
 
 GL2Encoder *HostConnection::gl2Encoder()
 {
     if (!m_gl2Enc) {
-        m_gl2Enc = new GL2Encoder(m_stream, checksumHelper());
+        m_gl2Enc =
+            std::make_unique<GL2Encoder>(m_stream, checksumHelper());
         DBG("HostConnection::gl2Encoder new encoder %p, tid %d",
             m_gl2Enc, getCurrentThreadId());
         m_gl2Enc->setContextAccessor(s_getGL2Context);
@@ -582,12 +626,14 @@
         m_gl2Enc->setDrawCallFlushInterval(
             getDrawCallFlushIntervalFromProperty());
         m_gl2Enc->setHasAsyncUnmapBuffer(m_rcEnc->hasAsyncUnmapBuffer());
+        m_gl2Enc->setHasSyncBufferData(m_rcEnc->hasSyncBufferData());
     }
-    return m_gl2Enc;
+    return m_gl2Enc.get();
 }
 
 VkEncoder *HostConnection::vkEncoder()
 {
+    rcEncoder();
     if (!m_vkEnc) {
         m_vkEnc = new VkEncoder(m_stream);
     }
@@ -597,36 +643,72 @@
 ExtendedRCEncoderContext *HostConnection::rcEncoder()
 {
     if (!m_rcEnc) {
-        m_rcEnc = new ExtendedRCEncoderContext(m_stream, checksumHelper());
-        setChecksumHelper(m_rcEnc);
-        queryAndSetSyncImpl(m_rcEnc);
-        queryAndSetDmaImpl(m_rcEnc);
-        queryAndSetGLESMaxVersion(m_rcEnc);
-        queryAndSetNoErrorState(m_rcEnc);
-        queryAndSetHostCompositionImpl(m_rcEnc);
-        queryAndSetDirectMemSupport(m_rcEnc);
-        queryAndSetVulkanSupport(m_rcEnc);
-        queryAndSetDeferredVulkanCommandsSupport(m_rcEnc);
-        queryAndSetVulkanNullOptionalStringsSupport(m_rcEnc);
-        queryAndSetVulkanCreateResourcesWithRequirementsSupport(m_rcEnc);
-        queryAndSetVulkanIgnoredHandles(m_rcEnc);
-        queryAndSetYUVCache(m_rcEnc);
-        queryAndSetAsyncUnmapBuffer(m_rcEnc);
-        queryAndSetVirtioGpuNext(m_rcEnc);
-        queryHasSharedSlotsHostMemoryAllocator(m_rcEnc);
-        queryAndSetVulkanFreeMemorySync(m_rcEnc);
+        m_rcEnc = std::make_unique<ExtendedRCEncoderContext>(m_stream,
+                                                             checksumHelper());
+
+        ExtendedRCEncoderContext* rcEnc = m_rcEnc.get();
+        setChecksumHelper(rcEnc);
+        queryAndSetSyncImpl(rcEnc);
+        queryAndSetDmaImpl(rcEnc);
+        queryAndSetGLESMaxVersion(rcEnc);
+        queryAndSetNoErrorState(rcEnc);
+        queryAndSetHostCompositionImpl(rcEnc);
+        queryAndSetDirectMemSupport(rcEnc);
+        queryAndSetVulkanSupport(rcEnc);
+        queryAndSetDeferredVulkanCommandsSupport(rcEnc);
+        queryAndSetVulkanNullOptionalStringsSupport(rcEnc);
+        queryAndSetVulkanCreateResourcesWithRequirementsSupport(rcEnc);
+        queryAndSetVulkanIgnoredHandles(rcEnc);
+        queryAndSetYUVCache(rcEnc);
+        queryAndSetAsyncUnmapBuffer(rcEnc);
+        queryAndSetVirtioGpuNext(rcEnc);
+        queryHasSharedSlotsHostMemoryAllocator(rcEnc);
+        queryAndSetVulkanFreeMemorySync(rcEnc);
+        queryAndSetVirtioGpuNativeSync(rcEnc);
+        queryAndSetVulkanShaderFloat16Int8Support(rcEnc);
+        queryAndSetVulkanAsyncQueueSubmitSupport(rcEnc);
+        queryAndSetHostSideTracingSupport(rcEnc);
+        queryAndSetAsyncFrameCommands(rcEnc);
+        queryAndSetVulkanQueueSubmitWithCommandsSupport(rcEnc);
+        queryAndSetVulkanBatchedDescriptorSetUpdateSupport(rcEnc);
+        queryAndSetSyncBufferData(rcEnc);
+        queryVersion(rcEnc);
         if (m_processPipe) {
-            m_processPipe->processPipeInit(m_connectionType, m_rcEnc);
+            m_processPipe->processPipeInit(m_connectionType, rcEnc);
         }
     }
-    return m_rcEnc;
+    return m_rcEnc.get();
+}
+
+int HostConnection::getOrCreateRendernodeFd() {
+    if (m_rendernodeFd >= 0) return m_rendernodeFd;
+#ifdef __Fuchsia__
+    return -1;
+#else
+#ifdef VIRTIO_GPU
+    m_rendernodeFd = VirtioGpuPipeStream::openRendernode();
+    if (m_rendernodeFd < 0) {
+        ALOGE("%s: failed to create secondary "
+              "rendernode for host connection. "
+              "error: %s (%d)\n", __FUNCTION__,
+              strerror(errno), errno);
+        return -1;
+    }
+
+    // Remember to close it on exit
+    m_rendernodeFdOwned = true;
+    return m_rendernodeFd;
+#else
+    return -1;
+#endif
+#endif
 }
 
 gl_client_context_t *HostConnection::s_getGLContext()
 {
     EGLThreadInfo *ti = getEGLThreadInfo();
     if (ti->hostConn) {
-        return ti->hostConn->m_glEnc;
+        return ti->hostConn->m_glEnc.get();
     }
     return NULL;
 }
@@ -635,7 +717,7 @@
 {
     EGLThreadInfo *ti = getEGLThreadInfo();
     if (ti->hostConn) {
-        return ti->hostConn->m_gl2Enc;
+        return ti->hostConn->m_gl2Enc.get();
     }
     return NULL;
 }
@@ -751,8 +833,8 @@
 
 void HostConnection::queryAndSetNoErrorState(ExtendedRCEncoderContext* rcEnc) {
     std::string glExtensions = queryGLExtensions(rcEnc);
-    if (glExtensions.find(kGLESNoHostError) != std::string::npos) {
-        m_noHostError = true;
+    if (glExtensions.find(kGLESUseHostError) != std::string::npos) {
+        m_noHostError = false;
     }
 }
 
@@ -832,3 +914,64 @@
         rcEnc->featureInfo()->hasVulkanFreeMemorySync = true;
     }
 }
+
+void HostConnection::queryAndSetVirtioGpuNativeSync(ExtendedRCEncoderContext* rcEnc) {
+    std::string glExtensions = queryGLExtensions(rcEnc);
+    if (glExtensions.find(kVirtioGpuNativeSync) != std::string::npos) {
+        rcEnc->featureInfo()->hasVirtioGpuNativeSync = true;
+    }
+}
+
+void HostConnection::queryAndSetVulkanShaderFloat16Int8Support(ExtendedRCEncoderContext* rcEnc) {
+    std::string glExtensions = queryGLExtensions(rcEnc);
+    if (glExtensions.find(kVulkanShaderFloat16Int8) != std::string::npos) {
+        rcEnc->featureInfo()->hasVulkanShaderFloat16Int8 = true;
+    }
+}
+
+void HostConnection::queryAndSetVulkanAsyncQueueSubmitSupport(ExtendedRCEncoderContext* rcEnc) {
+    std::string glExtensions = queryGLExtensions(rcEnc);
+    if (glExtensions.find(kVulkanAsyncQueueSubmit) != std::string::npos) {
+        rcEnc->featureInfo()->hasVulkanAsyncQueueSubmit = true;
+    }
+}
+
+void HostConnection::queryAndSetHostSideTracingSupport(ExtendedRCEncoderContext* rcEnc) {
+    std::string glExtensions = queryGLExtensions(rcEnc);
+    if (glExtensions.find(kHostSideTracing) != std::string::npos) {
+        rcEnc->featureInfo()->hasHostSideTracing = true;
+    }
+}
+
+void HostConnection::queryAndSetAsyncFrameCommands(ExtendedRCEncoderContext* rcEnc) {
+    std::string glExtensions = queryGLExtensions(rcEnc);
+    if (glExtensions.find(kAsyncFrameCommands) != std::string::npos) {
+        rcEnc->featureInfo()->hasAsyncFrameCommands = true;
+    }
+}
+
+void HostConnection::queryAndSetVulkanQueueSubmitWithCommandsSupport(ExtendedRCEncoderContext* rcEnc) {
+    std::string glExtensions = queryGLExtensions(rcEnc);
+    if (glExtensions.find(kVulkanQueueSubmitWithCommands) != std::string::npos) {
+        rcEnc->featureInfo()->hasVulkanQueueSubmitWithCommands = true;
+    }
+}
+
+void HostConnection::queryAndSetVulkanBatchedDescriptorSetUpdateSupport(ExtendedRCEncoderContext* rcEnc) {
+    std::string glExtensions = queryGLExtensions(rcEnc);
+    if (glExtensions.find(kVulkanBatchedDescriptorSetUpdate) != std::string::npos) {
+        rcEnc->featureInfo()->hasVulkanBatchedDescriptorSetUpdate = true;
+    }
+}
+
+void HostConnection::queryAndSetSyncBufferData(ExtendedRCEncoderContext* rcEnc) {
+    std::string glExtensions = queryGLExtensions(rcEnc);
+    if (glExtensions.find(kSyncBufferData) != std::string::npos) {
+        rcEnc->featureInfo()->hasSyncBufferData = true;
+    }
+}
+
+GLint HostConnection::queryVersion(ExtendedRCEncoderContext* rcEnc) {
+    GLint version = m_rcEnc->rcGetRendererVersion(m_rcEnc.get());
+    return version;
+}
diff --git a/system/OpenglSystemCommon/HostConnection.h b/system/OpenglSystemCommon/HostConnection.h
index 02c8681..7c55629 100644
--- a/system/OpenglSystemCommon/HostConnection.h
+++ b/system/OpenglSystemCommon/HostConnection.h
@@ -20,16 +20,21 @@
 #include "IOStream.h"
 #include "renderControl_enc.h"
 #include "ChecksumCalculator.h"
+#ifdef __Fuchsia__
+struct goldfish_dma_context;
+#else
 #include "goldfish_dma.h"
+#endif
 
 #include <cutils/native_handle.h>
 
-#ifdef GOLDFISH_VULKAN
+#ifdef GFXSTREAM
 #include <mutex>
 #else
 #include <utils/threads.h>
 #endif
 
+#include <memory>
 #include <string>
 
 class GLEncoder;
@@ -55,6 +60,7 @@
     bool hasNativeSync() const { return m_featureInfo.syncImpl >= SYNC_IMPL_NATIVE_SYNC_V2; }
     bool hasNativeSyncV3() const { return m_featureInfo.syncImpl >= SYNC_IMPL_NATIVE_SYNC_V3; }
     bool hasNativeSyncV4() const { return m_featureInfo.syncImpl >= SYNC_IMPL_NATIVE_SYNC_V4; }
+    bool hasVirtioGpuNativeSync() const { return m_featureInfo.hasVirtioGpuNativeSync; }
     bool hasHostCompositionV1() const {
         return m_featureInfo.hostComposition == HOST_COMPOSITION_V1; }
     bool hasHostCompositionV2() const {
@@ -63,6 +69,14 @@
         return m_featureInfo.hasYUVCache; }
     bool hasAsyncUnmapBuffer() const {
         return m_featureInfo.hasAsyncUnmapBuffer; }
+    bool hasHostSideTracing() const {
+        return m_featureInfo.hasHostSideTracing;
+    }
+    bool hasAsyncFrameCommands() const {
+        return m_featureInfo.hasAsyncFrameCommands;
+    }
+    bool hasSyncBufferData() const {
+        return m_featureInfo.hasSyncBufferData; }
     DmaImpl getDmaVersion() const { return m_featureInfo.dmaImpl; }
     void bindDmaContext(struct goldfish_dma_context* cxt) { m_dmaCxt = cxt; }
     void bindDmaDirectly(void* dmaPtr, uint64_t dmaPhysAddr) {
@@ -96,6 +110,10 @@
 private:
     static uint64_t writeGoldfishDma(void* data, uint32_t size,
                                      struct goldfish_dma_context* dmaCxt) {
+#ifdef __Fuchsia__
+        ALOGE("%s Not implemented!", __FUNCTION__);
+        return 0u;
+#else
         ALOGV("%s(data=%p, size=%u): call", __func__, data, size);
 
         goldfish_dma_write(dmaCxt, data, size);
@@ -103,6 +121,7 @@
 
         ALOGV("%s: paddr=0x%llx", __func__, (unsigned long long)paddr);
         return paddr;
+#endif
     }
 
     EmulatorFeatureInfo m_featureInfo;
@@ -138,9 +157,10 @@
     static HostConnection *get();
     static HostConnection *getWithThreadInfo(EGLThreadInfo* tInfo);
     static void exit();
+    static void exitUnclean(); // for testing purposes
 
-    static HostConnection *createUnique();
-    static void teardownUnique(HostConnection* con);
+    static std::unique_ptr<HostConnection> createUnique();
+    HostConnection(const HostConnection&) = delete;
 
     ~HostConnection();
 
@@ -152,6 +172,12 @@
     GL2Encoder *gl2Encoder();
     goldfish_vk::VkEncoder *vkEncoder();
     ExtendedRCEncoderContext *rcEncoder();
+
+    // Returns rendernode fd, in case the stream is virtio-gpu based.
+    // Otherwise, attempts to create a rendernode fd assuming
+    // virtio-gpu is available.
+    int getOrCreateRendernodeFd();
+
     ChecksumCalculator *checksumHelper() { return &m_checksumHelper; }
     Gralloc *grallocHelper() { return m_grallocHelper; }
 
@@ -177,10 +203,12 @@
 #pragma clang diagnostic pop
 #endif
 
+    bool exitUncleanly; // for testing purposes
+
 private:
     // If the connection failed, |conn| is deleted.
     // Returns NULL if connection failed.
-    static HostConnection* connect(HostConnection* con);
+    static std::unique_ptr<HostConnection> connect();
 
     HostConnection();
     static gl_client_context_t  *s_getGLContext();
@@ -206,26 +234,43 @@
     void queryAndSetVirtioGpuNext(ExtendedRCEncoderContext *rcEnc);
     void queryHasSharedSlotsHostMemoryAllocator(ExtendedRCEncoderContext *rcEnc);
     void queryAndSetVulkanFreeMemorySync(ExtendedRCEncoderContext *rcEnc);
+    void queryAndSetVirtioGpuNativeSync(ExtendedRCEncoderContext *rcEnc);
+    void queryAndSetVulkanShaderFloat16Int8Support(ExtendedRCEncoderContext *rcEnc);
+    void queryAndSetVulkanAsyncQueueSubmitSupport(ExtendedRCEncoderContext *rcEnc);
+    void queryAndSetHostSideTracingSupport(ExtendedRCEncoderContext *rcEnc);
+    void queryAndSetAsyncFrameCommands(ExtendedRCEncoderContext *rcEnc);
+    void queryAndSetVulkanQueueSubmitWithCommandsSupport(ExtendedRCEncoderContext *rcEnc);
+    void queryAndSetVulkanBatchedDescriptorSetUpdateSupport(ExtendedRCEncoderContext *rcEnc);
+    void queryAndSetSyncBufferData(ExtendedRCEncoderContext *rcEnc);
+    GLint queryVersion(ExtendedRCEncoderContext* rcEnc);
 
 private:
     HostConnectionType m_connectionType;
     GrallocType m_grallocType;
-    IOStream *m_stream;
-    GLEncoder   *m_glEnc;
-    GL2Encoder  *m_gl2Enc;
-    goldfish_vk::VkEncoder  *m_vkEnc;
-    ExtendedRCEncoderContext *m_rcEnc;
+
+    // intrusively refcounted
+    IOStream* m_stream = nullptr;
+
+    std::unique_ptr<GLEncoder> m_glEnc;
+    std::unique_ptr<GL2Encoder> m_gl2Enc;
+
+    // intrusively refcounted
+    goldfish_vk::VkEncoder* m_vkEnc = nullptr;
+    std::unique_ptr<ExtendedRCEncoderContext> m_rcEnc;
+
     ChecksumCalculator m_checksumHelper;
-    Gralloc *m_grallocHelper;
-    ProcessPipe *m_processPipe;
+    Gralloc* m_grallocHelper = nullptr;
+    ProcessPipe* m_processPipe = nullptr;
     std::string m_glExtensions;
     bool m_grallocOnly;
     bool m_noHostError;
-#ifdef GOLDFISH_VULKAN
+#ifdef GFXSTREAM
     mutable std::mutex m_lock;
 #else
     mutable android::Mutex m_lock;
 #endif
+    int m_rendernodeFd;
+    bool m_rendernodeFdOwned;
 };
 
 #endif
diff --git a/system/OpenglSystemCommon/ProcessPipe.cpp b/system/OpenglSystemCommon/ProcessPipe.cpp
index 84764f4..c7254d1 100644
--- a/system/OpenglSystemCommon/ProcessPipe.cpp
+++ b/system/OpenglSystemCommon/ProcessPipe.cpp
@@ -15,6 +15,7 @@
 */
 
 #include "ProcessPipe.h"
+#include "HostConnection.h"
 #include "renderControl_enc.h"
 
 #include <qemu_pipe_bp.h>
@@ -28,11 +29,14 @@
 #include <errno.h>
 
 #ifdef __Fuchsia__
-#include <fuchsia/hardware/goldfish/cpp/fidl.h>
+#include <fuchsia/hardware/goldfish/llcpp/fidl.h>
 #include <lib/zx/vmo.h>
 
 #include "services/service_connector.h"
 
+#define GET_STATUS_SAFE(result, member) \
+    ((result).ok() ? ((result).Unwrap()->member) : ZX_OK)
+
 static QEMU_PIPE_HANDLE   sProcDevice = 0;
 #else // __Fuchsia__
 
@@ -48,6 +52,18 @@
 static uint64_t           sProcUID = 0;
 static volatile HostConnectionType sConnType = HOST_CONNECTION_VIRTIO_GPU_PIPE;
 
+static uint32_t* sSeqnoPtr = 0;
+
+// Meant to be called only once per process.
+static void initSeqno() {
+    // So why do we reinitialize here? It's for testing purposes only;
+    // we have a unit test that exercise the case where this sequence
+    // number is reset as a result of guest process kill.
+    if (sSeqnoPtr) delete sSeqnoPtr;
+    sSeqnoPtr = new uint32_t;
+    *sSeqnoPtr = 0;
+}
+
 // processPipeInitOnce is used to generate a process unique ID (puid).
 // processPipeInitOnce will only be called at most once per process.
 // Use it with pthread_once for thread safety.
@@ -57,39 +73,55 @@
 // host.
 #ifdef __Fuchsia__
 static void processPipeInitOnce() {
-    zx::channel channel(GetConnectToServiceFunction()(QEMU_PIPE_PATH));
+    initSeqno();
+
+    fidl::ClientEnd<fuchsia_hardware_goldfish::PipeDevice> channel{
+        zx::channel(GetConnectToServiceFunction()(QEMU_PIPE_PATH))};
     if (!channel) {
         ALOGE("%s: failed to open " QEMU_PIPE_PATH,
               __FUNCTION__);
         return;
     }
 
-    fuchsia::hardware::goldfish::PipeDeviceSyncPtr device;
-    device.Bind(std::move(channel));
+    fidl::WireSyncClient<fuchsia_hardware_goldfish::PipeDevice> device(
+        std::move(channel));
 
-    fuchsia::hardware::goldfish::PipeSyncPtr pipe;
-    device->OpenPipe(pipe.NewRequest());
-
-    zx_status_t status, status2 = ZX_OK;
-    zx::vmo vmo;
-    status = pipe->GetBuffer(&status2, &vmo);
-    if (status != ZX_OK || status2 != ZX_OK) {
-        ALOGE("%s: failed to get buffer: %d:%d", __FUNCTION__, status, status2);
+    auto pipe_ends =
+        fidl::CreateEndpoints<::fuchsia_hardware_goldfish::Pipe>();
+    if (!pipe_ends.is_ok()) {
+        ALOGE("%s: zx_channel_create failed: %d", __FUNCTION__, pipe_ends.status_value());
         return;
     }
 
+    fidl::WireSyncClient<fuchsia_hardware_goldfish::Pipe> pipe(
+        std::move(pipe_ends->client));
+    device.OpenPipe(std::move(pipe_ends->server));
+
+    zx::vmo vmo;
+    {
+        auto result = pipe.GetBuffer();
+        if (!result.ok() || result.Unwrap()->res != ZX_OK) {
+            ALOGE("%s: failed to get buffer: %d:%d", __FUNCTION__,
+                  result.status(), GET_STATUS_SAFE(result, res));
+            return;
+        }
+        vmo = std::move(result.Unwrap()->vmo);
+    }
+
     size_t len = strlen("pipe:GLProcessPipe");
-    status = vmo.write("pipe:GLProcessPipe", 0, len + 1);
+    zx_status_t status = vmo.write("pipe:GLProcessPipe", 0, len + 1);
     if (status != ZX_OK) {
         ALOGE("%s: failed write pipe name", __FUNCTION__);
         return;
     }
-    uint64_t actual;
-    status = pipe->Write(len + 1, 0, &status2, &actual);
-    if (status != ZX_OK || status2 != ZX_OK) {
-        ALOGD("%s: connecting to pipe service failed: %d:%d", __FUNCTION__,
-              status, status2);
-        return;
+
+    {
+        auto result = pipe.Write(len + 1, 0);
+        if (!result.ok() || result.Unwrap()->res != ZX_OK) {
+            ALOGD("%s: connecting to pipe service failed: %d:%d", __FUNCTION__,
+                  result.status(), GET_STATUS_SAFE(result, res));
+            return;
+        }
     }
 
     // Send a confirmation int to the host and get per-process unique ID back
@@ -99,19 +131,23 @@
         ALOGE("%s: failed write confirm int", __FUNCTION__);
         return;
     }
-    status = pipe->DoCall(sizeof(confirmInt), 0, sizeof(sProcUID), 0, &status2, &actual);
-    if (status != ZX_OK || status2 != ZX_OK) {
-        ALOGD("%s: failed to get per-process ID: %d:%d", __FUNCTION__,
-              status, status2);
-        return;
+
+    {
+        auto result = pipe.DoCall(sizeof(confirmInt), 0, sizeof(sProcUID), 0);
+        if (!result.ok() || result.Unwrap()->res != ZX_OK) {
+            ALOGD("%s: failed to get per-process ID: %d:%d", __FUNCTION__,
+                  result.status(), GET_STATUS_SAFE(result, res));
+            return;
+        }
     }
+
     status = vmo.read(&sProcUID, 0, sizeof(sProcUID));
     if (status != ZX_OK) {
         ALOGE("%s: failed read per-process ID: %d", __FUNCTION__, status);
         return;
     }
-    sProcDevice = device.Unbind().TakeChannel().release();
-    sProcPipe = pipe.Unbind().TakeChannel().release();
+    sProcDevice = device.mutable_channel()->release();
+    sProcPipe = pipe.mutable_channel()->release();
 }
 #else // __Fuchsia__
 
@@ -124,14 +160,7 @@
     }
     // Send a confirmation int to the host
     int32_t confirmInt = 100;
-    ssize_t stat = 0;
-    do {
-        stat =
-            qemu_pipe_write(sProcPipe, (const char*)&confirmInt,
-                sizeof(confirmInt));
-    } while (stat < 0 && errno == EINTR);
-
-    if (stat != sizeof(confirmInt)) { // failed
+    if (qemu_pipe_write_fully(sProcPipe, &confirmInt, sizeof(confirmInt))) { // failed
         qemu_pipe_close(sProcPipe);
         sProcPipe = 0;
         ALOGW("Process pipe failed");
@@ -139,13 +168,7 @@
     }
 
     // Ask the host for per-process unique ID
-    do {
-        stat =
-            qemu_pipe_read(sProcPipe, (char*)&sProcUID,
-                sizeof(sProcUID));
-    } while (stat < 0 && (errno == EINTR || errno == EAGAIN));
-
-    if (stat != sizeof(sProcUID)) {
+    if (qemu_pipe_read_fully(sProcPipe, &sProcUID, sizeof(sProcUID))) {
         qemu_pipe_close(sProcPipe);
         sProcPipe = 0;
         sProcUID = 0;
@@ -155,7 +178,9 @@
 }
 
 static void processPipeInitOnce() {
-#if defined(HOST_BUILD) || !defined(GOLDFISH_VULKAN)
+    initSeqno();
+
+#if defined(HOST_BUILD) || !defined(GFXSTREAM)
     sQemuPipeInit();
 #else // HOST_BUILD
     switch (sConnType) {
@@ -166,7 +191,8 @@
         case HOST_CONNECTION_VIRTIO_GPU:
             sQemuPipeInit();
             break;
-        case HOST_CONNECTION_VIRTIO_GPU_PIPE: {
+        case HOST_CONNECTION_VIRTIO_GPU_PIPE:
+        case HOST_CONNECTION_VIRTIO_GPU_ADDRESS_SPACE: {
             sVirtioGpuPipeStream = new VirtioGpuPipeStream(4096);
             sProcUID = sVirtioGpuPipeStream->initProcessPipe();
             break;
@@ -187,3 +213,57 @@
     rcEnc->rcSetPuid(rcEnc, sProcUID);
     return true;
 }
+
+uint64_t getPuid() {
+    return sProcUID;
+}
+
+void processPipeRestart() {
+    ALOGW("%s: restarting process pipe\n", __func__);
+    bool isPipe = false;
+
+    switch (sConnType) {
+        // TODO: Move those over too
+        case HOST_CONNECTION_QEMU_PIPE:
+        case HOST_CONNECTION_ADDRESS_SPACE:
+        case HOST_CONNECTION_TCP:
+        case HOST_CONNECTION_VIRTIO_GPU:
+            isPipe = true;
+            break;
+        case HOST_CONNECTION_VIRTIO_GPU_PIPE:
+        case HOST_CONNECTION_VIRTIO_GPU_ADDRESS_SPACE: {
+            isPipe = false;
+            break;
+        }
+    }
+
+    sProcUID = 0;
+
+#ifdef __Fuchsia__
+    zx_handle_close(sProcPipe);
+    sProcPipe = ZX_HANDLE_INVALID;
+#else
+    if (isPipe) {
+        if (qemu_pipe_valid(sProcPipe)) {
+            qemu_pipe_close(sProcPipe);
+            sProcPipe = 0;
+        }
+    } else {
+        delete sVirtioGpuPipeStream;
+        sVirtioGpuPipeStream = nullptr;
+    }
+#endif // __Fuchsia__
+
+    processPipeInitOnce();
+};
+
+void refreshHostConnection() {
+    HostConnection* hostConn = HostConnection::get();
+    ExtendedRCEncoderContext* rcEnc = hostConn->rcEncoder();
+    rcEnc->rcSetPuid(rcEnc, sProcUID);
+}
+
+uint32_t* getSeqnoPtrForProcess() {
+    // It's assumed process pipe state has already been initialized.
+    return sSeqnoPtr;
+}
diff --git a/system/OpenglSystemCommon/ProcessPipe.h b/system/OpenglSystemCommon/ProcessPipe.h
index dea2a3f..c2bd7ce 100644
--- a/system/OpenglSystemCommon/ProcessPipe.h
+++ b/system/OpenglSystemCommon/ProcessPipe.h
@@ -34,3 +34,12 @@
 struct renderControl_encoder_context_t;
 
 extern bool processPipeInit(HostConnectionType connType, renderControl_encoder_context_t *rcEnc);
+extern uint64_t getPuid();
+
+// For testing purposes; this will close the current process pipe if opened, reset the state to initial,
+// and open it again with the same parameters.
+extern void processPipeRestart();
+extern void refreshHostConnection();
+
+// Each process gets a sequence number field.
+uint32_t* getSeqnoPtrForProcess();
diff --git a/system/OpenglSystemCommon/QemuPipeStream.h b/system/OpenglSystemCommon/QemuPipeStream.h
index 4e779eb..e6aad15 100644
--- a/system/OpenglSystemCommon/QemuPipeStream.h
+++ b/system/OpenglSystemCommon/QemuPipeStream.h
@@ -21,12 +21,13 @@
  * <hardware/qemu_pipe.h> for more details.
  */
 #include <stdlib.h>
+#include <memory>
 #include "IOStream.h"
 
 #include <qemu_pipe_bp.h>
 
 #ifdef __Fuchsia__
-#include <fuchsia/hardware/goldfish/cpp/fidl.h>
+#include <fuchsia/hardware/goldfish/llcpp/fidl.h>
 #include <lib/zx/event.h>
 #include <lib/zx/vmo.h>
 #endif
@@ -58,8 +59,10 @@
     size_t m_read;
     size_t m_readLeft;
 #ifdef __Fuchsia__
-    fuchsia::hardware::goldfish::PipeDeviceSyncPtr m_device;
-    fuchsia::hardware::goldfish::PipeSyncPtr m_pipe;
+    std::unique_ptr<::fidl::WireSyncClient<fuchsia_hardware_goldfish::PipeDevice>>
+        m_device;
+    std::unique_ptr<::fidl::WireSyncClient<fuchsia_hardware_goldfish::Pipe>>
+        m_pipe;
     zx::event m_event;
     zx::vmo m_vmo;
 #endif
diff --git a/system/OpenglSystemCommon/QemuPipeStreamFuchsia.cpp b/system/OpenglSystemCommon/QemuPipeStreamFuchsia.cpp
index 5472f30..f445244 100644
--- a/system/OpenglSystemCommon/QemuPipeStreamFuchsia.cpp
+++ b/system/OpenglSystemCommon/QemuPipeStreamFuchsia.cpp
@@ -28,6 +28,9 @@
 
 #include "services/service_connector.h"
 
+#define GET_STATUS_SAFE(result, member) \
+    ((result).ok() ? ((result).Unwrap()->member) : ZX_OK)
+
 constexpr size_t kReadSize = 512 * 1024;
 constexpr size_t kWriteOffset = kReadSize;
 
@@ -53,7 +56,7 @@
 
 QemuPipeStream::~QemuPipeStream()
 {
-    if (m_device.is_bound()) {
+    if (m_device) {
         flush();
     }
     if (m_buf) {
@@ -69,15 +72,28 @@
 
 int QemuPipeStream::connect(void)
 {
-    zx::channel channel(GetConnectToServiceFunction()(QEMU_PIPE_PATH));
+    fidl::ClientEnd<fuchsia_hardware_goldfish::PipeDevice> channel{
+        zx::channel(GetConnectToServiceFunction()(QEMU_PIPE_PATH))};
     if (!channel) {
         ALOGE("%s: failed to get service handle for " QEMU_PIPE_PATH,
               __FUNCTION__);
         return -1;
     }
 
-    m_device.Bind(std::move(channel));
-    m_device->OpenPipe(m_pipe.NewRequest());
+    m_device = std::make_unique<
+        fidl::WireSyncClient<fuchsia_hardware_goldfish::PipeDevice>>(
+        std::move(channel));
+
+    auto pipe_ends =
+        fidl::CreateEndpoints<::fuchsia_hardware_goldfish::Pipe>();
+    if (!pipe_ends.is_ok()) {
+        ALOGE("zx::channel::create failed: %d", pipe_ends.status_value());
+        return ZX_HANDLE_INVALID;
+    }
+    m_device->OpenPipe(std::move(pipe_ends->server));
+    m_pipe =
+        std::make_unique<fidl::WireSyncClient<fuchsia_hardware_goldfish::Pipe>>(
+            std::move(pipe_ends->client));
 
     zx::event event;
     zx_status_t status = zx::event::create(0, &event);
@@ -92,10 +108,13 @@
         return -1;
     }
 
-    status = m_pipe->SetEvent(std::move(event_copy));
-    if (status != ZX_OK) {
-        ALOGE("%s: failed to set event: %d:%d", __FUNCTION__, status);
-        return -1;
+    {
+        auto result = m_pipe->SetEvent(std::move(event_copy));
+        if (!result.ok()) {
+            ALOGE("%s: failed to set event: %d:%d", __FUNCTION__,
+                  result.status());
+            return -1;
+        }
     }
 
     if (!allocBuffer(m_bufsize)) {
@@ -110,13 +129,13 @@
         return -1;
     }
 
-    uint64_t actual;
-    zx_status_t status2 = ZX_OK;
-    status = m_pipe->Write(len + 1, 0, &status2, &actual);
-    if (status != ZX_OK || status2 != ZX_OK) {
-        ALOGD("%s: connecting to pipe service failed: %d:%d", __FUNCTION__,
-              status, status2);
-        return -1;
+    {
+        auto result = m_pipe->Write(len + 1, 0);
+        if (!result.ok() || result.Unwrap()->res != ZX_OK) {
+            ALOGD("%s: connecting to pipe service failed: %d:%d", __FUNCTION__,
+                  result.status(), GET_STATUS_SAFE(result, res));
+            return -1;
+        }
     }
 
     m_event = std::move(event);
@@ -145,26 +164,32 @@
 
     size_t allocSize = m_bufsize < minSize ? minSize : m_bufsize;
 
-    zx_status_t status2 = ZX_OK;
-    status = m_pipe->SetBufferSize(allocSize, &status2);
-    if (status != ZX_OK || status2 != ZX_OK) {
-        ALOGE("%s: failed to get buffer: %d:%d", __FUNCTION__, status, status2);
-        return nullptr;
+    {
+        auto result = m_pipe->SetBufferSize(allocSize);
+        if (!result.ok() || result.Unwrap()->res != ZX_OK) {
+            ALOGE("%s: failed to get buffer: %d:%d", __FUNCTION__,
+                  result.status(), GET_STATUS_SAFE(result, res));
+            return nullptr;
+        }
     }
 
     zx::vmo vmo;
-    status = m_pipe->GetBuffer(&status2, &vmo);
-    if (status != ZX_OK || status2 != ZX_OK) {
-        ALOGE("%s: failed to get buffer: %d:%d", __FUNCTION__, status, status2);
-        return nullptr;
+    {
+        auto result = m_pipe->GetBuffer();
+        if (!result.ok() || result.Unwrap()->res != ZX_OK) {
+            ALOGE("%s: failed to get buffer: %d:%d", __FUNCTION__,
+                  result.status(), GET_STATUS_SAFE(result, res));
+            return nullptr;
+        }
+        vmo = std::move(result.Unwrap()->vmo);
     }
 
     zx_vaddr_t mapped_addr;
-    status = zx_vmar_map(zx_vmar_root_self(),
-                         ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
-                         0, vmo.get(), 0, allocSize, &mapped_addr);
+    status =
+        zx_vmar_map(zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, 0,
+                    vmo.get(), 0, allocSize, &mapped_addr);
     if (status != ZX_OK) {
-        ALOGE("%s: failed to map buffer: %d:%d", __FUNCTION__, status);
+        ALOGE("%s: failed to map buffer: %d", __FUNCTION__, status);
         return nullptr;
     }
 
@@ -178,11 +203,10 @@
 {
     if (size == 0) return 0;
 
-    uint64_t actual = 0;
-    zx_status_t status2 = ZX_OK;
-    zx_status_t status = m_pipe->DoCall(size, kWriteOffset, 0, 0, &status2, &actual);
-    if (status != ZX_OK || status2 != ZX_OK) {
-        ALOGD("%s: Pipe call failed: %d:%d", __FUNCTION__, status, status2);
+    auto result = m_pipe->DoCall(size, kWriteOffset, 0, 0);
+    if (!result.ok() || result.Unwrap()->res != ZX_OK) {
+        ALOGD("%s: Pipe call failed: %d:%d", __FUNCTION__, result.status(),
+              GET_STATUS_SAFE(result, res));
         return -1;
     }
 
@@ -207,7 +231,8 @@
 
 const unsigned char *QemuPipeStream::commitBufferAndReadFully(size_t size, void *buf, size_t len)
 {
-    if (!m_device.is_bound()) return nullptr;
+    if (!m_device)
+        return nullptr;
 
     if (!buf) {
         if (len > 0) {
@@ -236,17 +261,16 @@
 
     // Read up to kReadSize bytes if all buffered read has been consumed.
     size_t maxRead = (m_readLeft || !remaining) ? 0 : kReadSize;
-    uint64_t actual = 0;
-    zx_status_t status2 = ZX_OK;
-    zx_status_t status = m_pipe->DoCall(size, kWriteOffset, maxRead, 0, &status2, &actual);
-    if (status != ZX_OK) {
-        ALOGD("%s: Pipe call failed: %d", __FUNCTION__, status);
+
+    auto result = m_pipe->DoCall(size, kWriteOffset, maxRead, 0);
+    if (!result.ok()) {
+        ALOGD("%s: Pipe call failed: %d", __FUNCTION__, result.status());
         return nullptr;
     }
 
     // Updated buffered read size.
-    if (actual) {
-        m_read = m_readLeft = actual;
+    if (result.Unwrap()->actual) {
+        m_read = m_readLeft = result.Unwrap()->actual;
     }
 
     // Consume buffered read and read more if neccessary.
@@ -261,31 +285,33 @@
             continue;
         }
 
-        status2 = ZX_OK;
-        actual = 0;
-        status = m_pipe->Read(kReadSize, 0, &status2, &actual);
-        if (status != ZX_OK) {
-            ALOGD("%s: Failed reading from pipe: %d", __FUNCTION__, status);
+        auto result = m_pipe->Read(kReadSize, 0);
+        if (!result.ok()) {
+            ALOGD("%s: Failed reading from pipe: %d:%d", __FUNCTION__,
+                  result.status());
             return nullptr;
         }
-        if (actual) {
-            m_read = m_readLeft = actual;
+
+        if (result.Unwrap()->actual) {
+            m_read = m_readLeft = result.Unwrap()->actual;
             continue;
         }
-        if (status2 != ZX_ERR_SHOULD_WAIT) {
-            ALOGD("%s: Error reading from pipe: %d", __FUNCTION__, status2);
+        if (result.Unwrap()->res != ZX_ERR_SHOULD_WAIT) {
+            ALOGD("%s: Error reading from pipe: %d", __FUNCTION__,
+                  result.Unwrap()->res);
             return nullptr;
         }
+
         zx_signals_t observed = ZX_SIGNAL_NONE;
-        status = m_event.wait_one(
-            fuchsia::hardware::goldfish::SIGNAL_READABLE |
-            fuchsia::hardware::goldfish::SIGNAL_HANGUP,
+        zx_status_t status = m_event.wait_one(
+            fuchsia_hardware_goldfish::wire::kSignalReadable |
+                fuchsia_hardware_goldfish::wire::kSignalHangup,
             zx::time::infinite(), &observed);
         if (status != ZX_OK) {
             ALOGD("%s: wait_one failed: %d", __FUNCTION__, status);
             return nullptr;
         }
-        if (observed & fuchsia::hardware::goldfish::SIGNAL_HANGUP) {
+        if (observed & fuchsia_hardware_goldfish::wire::kSignalHangup) {
             ALOGD("%s: Remote end hungup", __FUNCTION__);
             return nullptr;
         }
diff --git a/system/OpenglSystemCommon/ThreadInfo.cpp b/system/OpenglSystemCommon/ThreadInfo.cpp
index fea6cb7..5f7372f 100644
--- a/system/OpenglSystemCommon/ThreadInfo.cpp
+++ b/system/OpenglSystemCommon/ThreadInfo.cpp
@@ -13,8 +13,43 @@
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
+
 #include "ThreadInfo.h"
+
+#ifdef HOST_BUILD
+#include "android/base/threads/AndroidThread.h"
+#else
 #include "cutils/threads.h"
+#endif
+
+#include <pthread.h>
+
+#if defined(HOST_BUILD) || defined(GFXSTREAM)
+
+static thread_local EGLThreadInfo sEglThreadInfoThreadLocal;
+
+EGLThreadInfo *goldfish_get_egl_tls()
+{
+    return &sEglThreadInfoThreadLocal;
+}
+
+EGLThreadInfo* getEGLThreadInfo() {
+    return goldfish_get_egl_tls();
+}
+
+int32_t getCurrentThreadId() {
+#ifdef HOST_BUILD
+    return (int32_t)android::base::guest::getCurrentThreadId();
+#else
+    return (int32_t)gettid();
+#endif
+}
+
+void setTlsDestructor(tlsDtorCallback func) {
+    getEGLThreadInfo()->dtor = func;
+}
+
+#else // GFXSTREAM
 
 #ifdef __BIONIC__
 #include <bionic/tls.h>
@@ -27,10 +62,6 @@
 #endif
 #endif
 
-#include <pthread.h>
-
-thread_store_t s_tls = THREAD_STORE_INITIALIZER;
-
 static bool sDefaultTlsDestructorCallback(__attribute__((__unused__)) void* ptr) {
   return true;
 }
@@ -45,7 +76,6 @@
 #endif
         ) {
         EGLThreadInfo *ti = (EGLThreadInfo *)ptr;
-        delete ti->hostConn;
         delete ti;
 #ifdef __ANDROID__
         ((void **)__get_tls())[TLS_SLOT_OPENGL] = NULL;
@@ -57,16 +87,20 @@
     sTlsDestructorCallback = func;
 }
 
+static pthread_key_t s_tls;
+
+static void init_key()
+{
+    pthread_key_create(&s_tls, tlsDestruct);
+    pthread_setspecific(s_tls, new EGLThreadInfo);
+}
+
 EGLThreadInfo *goldfish_get_egl_tls()
 {
-    EGLThreadInfo* ti = (EGLThreadInfo*)thread_store_get(&s_tls);
+   static pthread_once_t once = PTHREAD_ONCE_INIT;
+   pthread_once(&once, init_key);
 
-    if (ti) return ti;
-
-    ti = new EGLThreadInfo();
-    thread_store_set(&s_tls, ti, tlsDestruct);
-
-    return ti;
+   return (EGLThreadInfo *) pthread_getspecific(s_tls);
 }
 
 EGLThreadInfo* getEGLThreadInfo() {
@@ -86,3 +120,5 @@
 int32_t getCurrentThreadId() {
     return (int32_t)gettid();
 }
+
+#endif // !GFXSTREAM
diff --git a/system/OpenglSystemCommon/ThreadInfo.h b/system/OpenglSystemCommon/ThreadInfo.h
index 7d2260a..7f49273 100644
--- a/system/OpenglSystemCommon/ThreadInfo.h
+++ b/system/OpenglSystemCommon/ThreadInfo.h
@@ -22,16 +22,19 @@
 
 struct EGLContext_t;
 
+typedef bool (*tlsDtorCallback)(void*);
+
 struct EGLThreadInfo
 {
-    EGLThreadInfo() : currentContext(NULL), hostConn(NULL), eglError(EGL_SUCCESS) { }
+    EGLThreadInfo() : currentContext(NULL), eglError(EGL_SUCCESS), dtor(0) {}
+    ~EGLThreadInfo() { if (dtor) dtor(this); }
 
     EGLContext_t *currentContext;
-    HostConnection *hostConn;
+    std::unique_ptr<HostConnection> hostConn;
     int           eglError;
+    tlsDtorCallback dtor;
 };
 
-typedef bool (*tlsDtorCallback)(void*);
 void setTlsDestructor(tlsDtorCallback);
 
 extern "C" __attribute__((visibility("default"))) EGLThreadInfo *goldfish_get_egl_tls();
diff --git a/system/OpenglSystemCommon/TraceProviderFuchsia.cpp b/system/OpenglSystemCommon/TraceProviderFuchsia.cpp
new file mode 100644
index 0000000..fc38c0b
--- /dev/null
+++ b/system/OpenglSystemCommon/TraceProviderFuchsia.cpp
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "TraceProviderFuchsia.h"
+
+#include <log/log.h>
+
+#include <lib/async-loop/default.h>
+#include <lib/async/cpp/task.h>
+#include <lib/fdio/directory.h>
+#include <lib/zx/channel.h>
+
+#include "services/service_connector.h"
+
+TraceProviderFuchsia::~TraceProviderFuchsia() {
+    if (mTraceProvider) {
+        async::PostTask(mLoop.dispatcher(), [this]() {
+            // trace_provider_.reset() needs to run on loop_'s dispatcher or
+            // else its teardown can be racy and crash.
+            mTraceProvider.reset();
+            // Run Quit() in the loop to ensure this task executes before
+            // JoinThreads() returns and the destructor finishes.
+            mLoop.Quit();
+        });
+    } else {
+        mLoop.Quit();
+    }
+    mLoop.JoinThreads();
+}
+
+TraceProviderFuchsia::TraceProviderFuchsia()
+    : mLoop(&kAsyncLoopConfigNeverAttachToThread) {}
+
+bool TraceProviderFuchsia::Initialize() {
+    // Connect to fuchsia.tracing.provider.Registry service.
+    zx_handle_t client_channel =
+        GetConnectToServiceFunction()("/svc/fuchsia.tracing.provider.Registry");
+    if (client_channel == ZX_HANDLE_INVALID) {
+        ALOGE("Failed to connect to tracing provider service");
+        return false;
+    }
+
+    zx_status_t status = mLoop.StartThread();
+    if (status != ZX_OK) {
+        ALOGE("Failed to start async loop: %d", status);
+        return false;
+    }
+
+    mTraceProvider = std::make_unique<trace::TraceProvider>(
+        zx::channel(client_channel), mLoop.dispatcher());
+    return true;
+}
diff --git a/system/OpenglSystemCommon/TraceProviderFuchsia.h b/system/OpenglSystemCommon/TraceProviderFuchsia.h
new file mode 100644
index 0000000..86b8bdc
--- /dev/null
+++ b/system/OpenglSystemCommon/TraceProviderFuchsia.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __COMMON_TRACE_PROVIDER_FUCHSIA_H
+#define __COMMON_TRACE_PROVIDER_FUCHSIA_H
+
+#ifndef __Fuchsia__
+#error "TraceProviderFuchsia is only supported on Fuchsia!"
+#endif  // __Fuchsia__
+
+#include <memory>
+
+#include <lib/async-loop/cpp/loop.h>
+#include <lib/trace-provider/provider.h>
+
+class TraceProviderFuchsia {
+public:
+    TraceProviderFuchsia();
+    ~TraceProviderFuchsia();
+
+    bool Initialize();
+
+private:
+    async::Loop mLoop;
+    std::unique_ptr<trace::TraceProvider> mTraceProvider;
+};
+
+#endif  // __COMMON_TRACE_PROVIDER_FUCHSIA_H
\ No newline at end of file
diff --git a/system/OpenglSystemCommon/VirtioGpuPipeStream.cpp b/system/OpenglSystemCommon/VirtioGpuPipeStream.cpp
index d3618f3..6143bd5 100644
--- a/system/OpenglSystemCommon/VirtioGpuPipeStream.cpp
+++ b/system/OpenglSystemCommon/VirtioGpuPipeStream.cpp
@@ -74,7 +74,7 @@
 int VirtioGpuPipeStream::connect(const char* serviceName)
 {
     if (m_fd < 0) {
-        m_fd = drmOpenRender(RENDERNODE_MINOR);
+        m_fd = VirtioGpuPipeStream::openRendernode();
         if (m_fd < 0) {
             ERR("%s: failed with fd %d (%s)", __func__, m_fd, strerror(errno));
             return -1;
@@ -120,9 +120,9 @@
     }
 
     if (!m_virtio_mapped) {
-        drm_virtgpu_map map = {
-            .handle = m_virtio_bo,
-        };
+        drm_virtgpu_map map;
+        memset(&map, 0, sizeof(map));
+        map.handle = m_virtio_bo;
 
         int ret = drmIoctl(m_fd, DRM_IOCTL_VIRTGPU_MAP, &map);
         if (ret) {
@@ -154,6 +154,15 @@
     return 0;
 }
 
+int VirtioGpuPipeStream::openRendernode() {
+    int fd = drmOpenRender(RENDERNODE_MINOR);
+    if (fd < 0) {
+            ERR("%s: failed with fd %d (%s)", __func__, fd, strerror(errno));
+        return -1;
+    }
+    return fd;
+}
+
 uint64_t VirtioGpuPipeStream::initProcessPipe() {
     connect("pipe:GLProcessPipe");
     int32_t confirmInt = 100;
diff --git a/system/OpenglSystemCommon/VirtioGpuPipeStream.h b/system/OpenglSystemCommon/VirtioGpuPipeStream.h
index 3f3c537..91c76fc 100644
--- a/system/OpenglSystemCommon/VirtioGpuPipeStream.h
+++ b/system/OpenglSystemCommon/VirtioGpuPipeStream.h
@@ -33,6 +33,7 @@
     explicit VirtioGpuPipeStream(size_t bufsize = 10000);
     ~VirtioGpuPipeStream();
     int connect(const char* serviceName = 0);
+    static int openRendernode();
     uint64_t initProcessPipe();
 
     virtual void *allocBuffer(size_t minSize);
diff --git a/system/OpenglSystemCommon/VirtioGpuStream.cpp b/system/OpenglSystemCommon/VirtioGpuStream.cpp
index a0876dc..bbf9dd2 100644
--- a/system/OpenglSystemCommon/VirtioGpuStream.cpp
+++ b/system/OpenglSystemCommon/VirtioGpuStream.cpp
@@ -149,9 +149,10 @@
     }
 
     if (!m_cmdResp) {
-        drm_virtgpu_map map = {
-            .handle = m_cmdResp_bo,
-        };
+        drm_virtgpu_map map;
+        memset(&map, 0, sizeof(map));
+        map.handle = m_cmdResp_bo;
+
         int ret = drmIoctl(m_fd, DRM_IOCTL_VIRTGPU_MAP, &map);
         if (ret) {
             ERR("%s: failed with %d mapping command response buffer (%s)",
@@ -282,7 +283,7 @@
 
     if (m_flushPos + len > cmd->cmdSize) {
         ERR("%s: writeFully len %zu would overflow the command bounds, "
-            "cmd_pos=%zu, flush_pos=%zu, cmdsize=%zu, lethal error, exiting",
+            "cmd_pos=%zu, flush_pos=%zu, cmdsize=%" PRIu32 ", lethal error, exiting",
             __func__, len, m_cmdPos, m_flushPos, cmd->cmdSize);
         abort();
     }
@@ -344,7 +345,7 @@
 
     // Most likely a protocol implementation error
     if (m_cmdResp->cmdSize - sizeof(*m_cmdResp) < m_cmdRespPos + len) {
-        ERR("%s: failed, op %zu, len %zu, cmdSize %zu, pos %zu, lethal "
+        ERR("%s: failed, op %" PRIu32 ", len %zu, cmdSize %" PRIu32 ", pos %zu, lethal "
             "error, exiting.", __func__, m_cmdResp->op, len,
             m_cmdResp->cmdSize, m_cmdRespPos);
         abort();
@@ -380,7 +381,7 @@
 
         // Should never happen
         if (pos + cmd->cmdSize > m_bufSize) {
-            ERR("%s: failed, pos %zu, cmdSize %zu, bufSize %zu, lethal "
+            ERR("%s: failed, pos %zu, cmdSize %" PRIu32 ", bufSize %zu, lethal "
                 "error, exiting.", __func__, pos, cmd->cmdSize, m_bufSize);
             abort();
         }
diff --git a/system/OpenglSystemCommon/address_space_graphics_types.h b/system/OpenglSystemCommon/address_space_graphics_types.h
index 1ebad34..42c00f6 100644
--- a/system/OpenglSystemCommon/address_space_graphics_types.h
+++ b/system/OpenglSystemCommon/address_space_graphics_types.h
@@ -102,6 +102,9 @@
 
     // Error: Something weird happened and we need to exit.
     ASG_HOST_STATE_ERROR = 3,
+
+    // Host is rendering
+    ASG_HOST_STATE_RENDERING = 4,
 };
 
 struct asg_ring_config;
@@ -124,7 +127,7 @@
 // Helper function that will be common between guest and host:
 // Given ring storage and a write buffer, returns asg_context that
 // is the correct view into it.
-static struct asg_context asg_context_create(
+static inline struct asg_context asg_context_create(
     char* ring_storage,
     char* buffer,
     uint32_t buffer_size) {
@@ -342,6 +345,7 @@
     // version and can proceed with a protocol that works for both.
     // size (in): the version of the guest
     // size (out): the version of the host
+    // metadata (out): hostmem id
     // After this command runs, the consumer is
     // implicitly created.
     ASG_SET_VERSION = 2,
@@ -349,6 +353,9 @@
     // Ping(notiy_available): Wakes up the consumer from sleep so it
     // can read data via toHost
     ASG_NOTIFY_AVAILABLE = 3,
+
+    // Retrieve the host config
+    ASG_GET_CONFIG = 4,
 };
 
 } // extern "C"
diff --git a/system/codecs/c2/decoders/avcdec/Android.bp b/system/codecs/c2/decoders/avcdec/Android.bp
new file mode 100644
index 0000000..78ffd82
--- /dev/null
+++ b/system/codecs/c2/decoders/avcdec/Android.bp
@@ -0,0 +1,30 @@
+package {
+    // See: http://go/android-license-faq
+    // A large-scale-change added 'default_applicable_licenses' to import
+    // all of the 'license_kinds' from "device_generic_goldfish-opengl_license"
+    // to get the below license kinds:
+    //   SPDX-license-identifier-Apache-2.0
+    default_applicable_licenses: ["device_generic_goldfish-opengl_license"],
+}
+
+cc_library_shared {
+    name: "libcodec2_goldfish_avcdec",
+    vendor: true,
+    defaults: [
+        "libcodec2_goldfish-defaults",
+    ],
+
+    srcs: ["C2GoldfishAvcDec.cpp",
+        "MediaH264Decoder.cpp",
+    ],
+
+    shared_libs: [
+	    "android.hardware.graphics.allocator@3.0",
+		"android.hardware.graphics.mapper@3.0",
+        "libgoldfish_codec2_store",
+    ],
+
+   header_libs: [
+    "libgralloc_cb.ranchu",
+    ],
+}
diff --git a/system/codecs/c2/decoders/avcdec/C2GoldfishAvcDec.cpp b/system/codecs/c2/decoders/avcdec/C2GoldfishAvcDec.cpp
new file mode 100644
index 0000000..6c57922
--- /dev/null
+++ b/system/codecs/c2/decoders/avcdec/C2GoldfishAvcDec.cpp
@@ -0,0 +1,1105 @@
+/*
+ * Copyright 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "C2GoldfishAvcDec"
+#include <inttypes.h>
+#include <log/log.h>
+#include <media/stagefright/foundation/AUtils.h>
+#include <media/stagefright/foundation/MediaDefs.h>
+
+#include <C2AllocatorGralloc.h>
+#include <C2PlatformSupport.h>
+//#include <android/hardware/graphics/common/1.0/types.h>
+
+#include <android/hardware/graphics/allocator/3.0/IAllocator.h>
+#include <android/hardware/graphics/mapper/3.0/IMapper.h>
+#include <hidl/LegacySupport.h>
+
+#include <media/stagefright/foundation/MediaDefs.h>
+
+#include <C2Debug.h>
+#include <C2PlatformSupport.h>
+#include <Codec2Mapper.h>
+#include <SimpleC2Interface.h>
+#include <goldfish_codec2/store/GoldfishComponentStore.h>
+#include <gralloc_cb_bp.h>
+
+#include <color_buffer_utils.h>
+
+#include "C2GoldfishAvcDec.h"
+
+#define DEBUG 0
+#if DEBUG
+#define DDD(...) ALOGD(__VA_ARGS__)
+#else
+#define DDD(...) ((void)0)
+#endif
+
+using ::android::hardware::graphics::common::V1_0::BufferUsage;
+using ::android::hardware::graphics::common::V1_2::PixelFormat;
+
+namespace android {
+
+namespace {
+constexpr size_t kMinInputBufferSize = 2 * 1024 * 1024;
+constexpr char COMPONENT_NAME[] = "c2.goldfish.h264.decoder";
+constexpr uint32_t kDefaultOutputDelay = 8;
+/* avc specification allows for a maximum delay of 16 frames.
+   As soft avc decoder supports interlaced, this delay would be 32 fields.
+   And avc decoder implementation has an additional delay of 2 decode calls.
+   So total maximum output delay is 34 */
+constexpr uint32_t kMaxOutputDelay = 34;
+constexpr uint32_t kMinInputBytes = 4;
+} // namespace
+
+class C2GoldfishAvcDec::IntfImpl : public SimpleInterface<void>::BaseParams {
+  public:
+    explicit IntfImpl(const std::shared_ptr<C2ReflectorHelper> &helper)
+        : SimpleInterface<void>::BaseParams(
+              helper, COMPONENT_NAME, C2Component::KIND_DECODER,
+              C2Component::DOMAIN_VIDEO, MEDIA_MIMETYPE_VIDEO_AVC) {
+        noPrivateBuffers(); // TODO: account for our buffers here
+        noInputReferences();
+        noOutputReferences();
+        noInputLatency();
+        noTimeStretch();
+
+        // TODO: Proper support for reorder depth.
+        addParameter(
+            DefineParam(mActualOutputDelay, C2_PARAMKEY_OUTPUT_DELAY)
+                .withDefault(
+                    new C2PortActualDelayTuning::output(kDefaultOutputDelay))
+                .withFields({C2F(mActualOutputDelay, value)
+                                 .inRange(0, kMaxOutputDelay)})
+                .withSetter(
+                    Setter<
+                        decltype(*mActualOutputDelay)>::StrictValueWithNoDeps)
+                .build());
+
+        // TODO: output latency and reordering
+
+        addParameter(DefineParam(mAttrib, C2_PARAMKEY_COMPONENT_ATTRIBUTES)
+                         .withConstValue(new C2ComponentAttributesSetting(
+                             C2Component::ATTRIB_IS_TEMPORAL))
+                         .build());
+
+        // coded and output picture size is the same for this codec
+        addParameter(
+            DefineParam(mSize, C2_PARAMKEY_PICTURE_SIZE)
+                .withDefault(new C2StreamPictureSizeInfo::output(0u, 320, 240))
+                .withFields({
+                    C2F(mSize, width).inRange(2, 4080, 2),
+                    C2F(mSize, height).inRange(2, 4080, 2),
+                })
+                .withSetter(SizeSetter)
+                .build());
+
+        addParameter(DefineParam(mMaxSize, C2_PARAMKEY_MAX_PICTURE_SIZE)
+                         .withDefault(new C2StreamMaxPictureSizeTuning::output(
+                             0u, 320, 240))
+                         .withFields({
+                             C2F(mSize, width).inRange(2, 4080, 2),
+                             C2F(mSize, height).inRange(2, 4080, 2),
+                         })
+                         .withSetter(MaxPictureSizeSetter, mSize)
+                         .build());
+
+        addParameter(
+            DefineParam(mProfileLevel, C2_PARAMKEY_PROFILE_LEVEL)
+                .withDefault(new C2StreamProfileLevelInfo::input(
+                    0u, C2Config::PROFILE_AVC_CONSTRAINED_BASELINE,
+                    C2Config::LEVEL_AVC_5_2))
+                .withFields(
+                    {C2F(mProfileLevel, profile)
+                         .oneOf({C2Config::PROFILE_AVC_CONSTRAINED_BASELINE,
+                                 C2Config::PROFILE_AVC_BASELINE,
+                                 C2Config::PROFILE_AVC_MAIN,
+                                 C2Config::PROFILE_AVC_CONSTRAINED_HIGH,
+                                 C2Config::PROFILE_AVC_PROGRESSIVE_HIGH,
+                                 C2Config::PROFILE_AVC_HIGH}),
+                     C2F(mProfileLevel, level)
+                         .oneOf(
+                             {C2Config::LEVEL_AVC_1, C2Config::LEVEL_AVC_1B,
+                              C2Config::LEVEL_AVC_1_1, C2Config::LEVEL_AVC_1_2,
+                              C2Config::LEVEL_AVC_1_3, C2Config::LEVEL_AVC_2,
+                              C2Config::LEVEL_AVC_2_1, C2Config::LEVEL_AVC_2_2,
+                              C2Config::LEVEL_AVC_3, C2Config::LEVEL_AVC_3_1,
+                              C2Config::LEVEL_AVC_3_2, C2Config::LEVEL_AVC_4,
+                              C2Config::LEVEL_AVC_4_1, C2Config::LEVEL_AVC_4_2,
+                              C2Config::LEVEL_AVC_5, C2Config::LEVEL_AVC_5_1,
+                              C2Config::LEVEL_AVC_5_2})})
+                .withSetter(ProfileLevelSetter, mSize)
+                .build());
+
+        addParameter(
+            DefineParam(mMaxInputSize, C2_PARAMKEY_INPUT_MAX_BUFFER_SIZE)
+                .withDefault(new C2StreamMaxBufferSizeInfo::input(
+                    0u, kMinInputBufferSize))
+                .withFields({
+                    C2F(mMaxInputSize, value).any(),
+                })
+                .calculatedAs(MaxInputSizeSetter, mMaxSize)
+                .build());
+
+        C2ChromaOffsetStruct locations[1] = {
+            C2ChromaOffsetStruct::ITU_YUV_420_0()};
+        std::shared_ptr<C2StreamColorInfo::output> defaultColorInfo =
+            C2StreamColorInfo::output::AllocShared(1u, 0u, 8u /* bitDepth */,
+                                                   C2Color::YUV_420);
+        memcpy(defaultColorInfo->m.locations, locations, sizeof(locations));
+
+        defaultColorInfo = C2StreamColorInfo::output::AllocShared(
+            {C2ChromaOffsetStruct::ITU_YUV_420_0()}, 0u, 8u /* bitDepth */,
+            C2Color::YUV_420);
+        helper->addStructDescriptors<C2ChromaOffsetStruct>();
+
+        addParameter(DefineParam(mColorInfo, C2_PARAMKEY_CODED_COLOR_INFO)
+                         .withConstValue(defaultColorInfo)
+                         .build());
+
+        addParameter(
+            DefineParam(mDefaultColorAspects, C2_PARAMKEY_DEFAULT_COLOR_ASPECTS)
+                .withDefault(new C2StreamColorAspectsTuning::output(
+                    0u, C2Color::RANGE_UNSPECIFIED,
+                    C2Color::PRIMARIES_UNSPECIFIED,
+                    C2Color::TRANSFER_UNSPECIFIED, C2Color::MATRIX_UNSPECIFIED))
+                .withFields({C2F(mDefaultColorAspects, range)
+                                 .inRange(C2Color::RANGE_UNSPECIFIED,
+                                          C2Color::RANGE_OTHER),
+                             C2F(mDefaultColorAspects, primaries)
+                                 .inRange(C2Color::PRIMARIES_UNSPECIFIED,
+                                          C2Color::PRIMARIES_OTHER),
+                             C2F(mDefaultColorAspects, transfer)
+                                 .inRange(C2Color::TRANSFER_UNSPECIFIED,
+                                          C2Color::TRANSFER_OTHER),
+                             C2F(mDefaultColorAspects, matrix)
+                                 .inRange(C2Color::MATRIX_UNSPECIFIED,
+                                          C2Color::MATRIX_OTHER)})
+                .withSetter(DefaultColorAspectsSetter)
+                .build());
+
+        addParameter(
+            DefineParam(mCodedColorAspects, C2_PARAMKEY_VUI_COLOR_ASPECTS)
+                .withDefault(new C2StreamColorAspectsInfo::input(
+                    0u, C2Color::RANGE_LIMITED, C2Color::PRIMARIES_UNSPECIFIED,
+                    C2Color::TRANSFER_UNSPECIFIED, C2Color::MATRIX_UNSPECIFIED))
+                .withFields({C2F(mCodedColorAspects, range)
+                                 .inRange(C2Color::RANGE_UNSPECIFIED,
+                                          C2Color::RANGE_OTHER),
+                             C2F(mCodedColorAspects, primaries)
+                                 .inRange(C2Color::PRIMARIES_UNSPECIFIED,
+                                          C2Color::PRIMARIES_OTHER),
+                             C2F(mCodedColorAspects, transfer)
+                                 .inRange(C2Color::TRANSFER_UNSPECIFIED,
+                                          C2Color::TRANSFER_OTHER),
+                             C2F(mCodedColorAspects, matrix)
+                                 .inRange(C2Color::MATRIX_UNSPECIFIED,
+                                          C2Color::MATRIX_OTHER)})
+                .withSetter(CodedColorAspectsSetter)
+                .build());
+
+        addParameter(
+            DefineParam(mColorAspects, C2_PARAMKEY_COLOR_ASPECTS)
+                .withDefault(new C2StreamColorAspectsInfo::output(
+                    0u, C2Color::RANGE_UNSPECIFIED,
+                    C2Color::PRIMARIES_UNSPECIFIED,
+                    C2Color::TRANSFER_UNSPECIFIED, C2Color::MATRIX_UNSPECIFIED))
+                .withFields({C2F(mColorAspects, range)
+                                 .inRange(C2Color::RANGE_UNSPECIFIED,
+                                          C2Color::RANGE_OTHER),
+                             C2F(mColorAspects, primaries)
+                                 .inRange(C2Color::PRIMARIES_UNSPECIFIED,
+                                          C2Color::PRIMARIES_OTHER),
+                             C2F(mColorAspects, transfer)
+                                 .inRange(C2Color::TRANSFER_UNSPECIFIED,
+                                          C2Color::TRANSFER_OTHER),
+                             C2F(mColorAspects, matrix)
+                                 .inRange(C2Color::MATRIX_UNSPECIFIED,
+                                          C2Color::MATRIX_OTHER)})
+                .withSetter(ColorAspectsSetter, mDefaultColorAspects,
+                            mCodedColorAspects)
+                .build());
+
+        // TODO: support more formats?
+        addParameter(DefineParam(mPixelFormat, C2_PARAMKEY_PIXEL_FORMAT)
+                         .withConstValue(new C2StreamPixelFormatInfo::output(
+                             0u, HAL_PIXEL_FORMAT_YCBCR_420_888))
+                         .build());
+    }
+    static C2R SizeSetter(bool mayBlock,
+                          const C2P<C2StreamPictureSizeInfo::output> &oldMe,
+                          C2P<C2StreamPictureSizeInfo::output> &me) {
+        (void)mayBlock;
+        DDD("calling sizesetter now %d", oldMe.v.height);
+        DDD("new calling sizesetter now %d", me.v.height);
+
+        C2R res = C2R::Ok();
+        if (!me.F(me.v.width).supportsAtAll(me.v.width)) {
+            res = res.plus(C2SettingResultBuilder::BadValue(me.F(me.v.width)));
+            me.set().width = oldMe.v.width;
+        }
+        if (!me.F(me.v.height).supportsAtAll(me.v.height)) {
+            res = res.plus(C2SettingResultBuilder::BadValue(me.F(me.v.height)));
+            me.set().height = oldMe.v.height;
+        }
+        return res;
+    }
+
+    static C2R
+    MaxPictureSizeSetter(bool mayBlock,
+                         C2P<C2StreamMaxPictureSizeTuning::output> &me,
+                         const C2P<C2StreamPictureSizeInfo::output> &size) {
+        (void)mayBlock;
+        // TODO: get max width/height from the size's field helpers vs.
+        // hardcoding
+        me.set().width = c2_min(c2_max(me.v.width, size.v.width), 4080u);
+        me.set().height = c2_min(c2_max(me.v.height, size.v.height), 4080u);
+        return C2R::Ok();
+    }
+
+    static C2R MaxInputSizeSetter(
+        bool mayBlock, C2P<C2StreamMaxBufferSizeInfo::input> &me,
+        const C2P<C2StreamMaxPictureSizeTuning::output> &maxSize) {
+        (void)mayBlock;
+        // assume compression ratio of 2
+        me.set().value = c2_max((((maxSize.v.width + 15) / 16) *
+                                 ((maxSize.v.height + 15) / 16) * 192),
+                                kMinInputBufferSize);
+        return C2R::Ok();
+    }
+
+    static C2R
+    ProfileLevelSetter(bool mayBlock, C2P<C2StreamProfileLevelInfo::input> &me,
+                       const C2P<C2StreamPictureSizeInfo::output> &size) {
+        (void)mayBlock;
+        (void)size;
+        (void)me; // TODO: validate
+        return C2R::Ok();
+    }
+
+    static C2R
+    DefaultColorAspectsSetter(bool mayBlock,
+                              C2P<C2StreamColorAspectsTuning::output> &me) {
+        (void)mayBlock;
+        if (me.v.range > C2Color::RANGE_OTHER) {
+            me.set().range = C2Color::RANGE_OTHER;
+        }
+        if (me.v.primaries > C2Color::PRIMARIES_OTHER) {
+            me.set().primaries = C2Color::PRIMARIES_OTHER;
+        }
+        if (me.v.transfer > C2Color::TRANSFER_OTHER) {
+            me.set().transfer = C2Color::TRANSFER_OTHER;
+        }
+        if (me.v.matrix > C2Color::MATRIX_OTHER) {
+            me.set().matrix = C2Color::MATRIX_OTHER;
+        }
+        return C2R::Ok();
+    }
+
+    static C2R
+    CodedColorAspectsSetter(bool mayBlock,
+                            C2P<C2StreamColorAspectsInfo::input> &me) {
+        (void)mayBlock;
+        if (me.v.range > C2Color::RANGE_OTHER) {
+            me.set().range = C2Color::RANGE_OTHER;
+        }
+        if (me.v.primaries > C2Color::PRIMARIES_OTHER) {
+            me.set().primaries = C2Color::PRIMARIES_OTHER;
+        }
+        if (me.v.transfer > C2Color::TRANSFER_OTHER) {
+            me.set().transfer = C2Color::TRANSFER_OTHER;
+        }
+        if (me.v.matrix > C2Color::MATRIX_OTHER) {
+            me.set().matrix = C2Color::MATRIX_OTHER;
+        }
+        return C2R::Ok();
+    }
+
+    static C2R
+    ColorAspectsSetter(bool mayBlock, C2P<C2StreamColorAspectsInfo::output> &me,
+                       const C2P<C2StreamColorAspectsTuning::output> &def,
+                       const C2P<C2StreamColorAspectsInfo::input> &coded) {
+        (void)mayBlock;
+        // take default values for all unspecified fields, and coded values for
+        // specified ones
+        me.set().range =
+            coded.v.range == RANGE_UNSPECIFIED ? def.v.range : coded.v.range;
+        me.set().primaries = coded.v.primaries == PRIMARIES_UNSPECIFIED
+                                 ? def.v.primaries
+                                 : coded.v.primaries;
+        me.set().transfer = coded.v.transfer == TRANSFER_UNSPECIFIED
+                                ? def.v.transfer
+                                : coded.v.transfer;
+        me.set().matrix = coded.v.matrix == MATRIX_UNSPECIFIED ? def.v.matrix
+                                                               : coded.v.matrix;
+        return C2R::Ok();
+    }
+
+    std::shared_ptr<C2StreamColorAspectsInfo::output> getColorAspects_l() {
+        return mColorAspects;
+    }
+
+    int width() const { return mSize->width; }
+
+    int height() const { return mSize->height; }
+
+  private:
+    std::shared_ptr<C2StreamProfileLevelInfo::input> mProfileLevel;
+    std::shared_ptr<C2StreamPictureSizeInfo::output> mSize;
+    std::shared_ptr<C2StreamMaxPictureSizeTuning::output> mMaxSize;
+    std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mMaxInputSize;
+    std::shared_ptr<C2StreamColorInfo::output> mColorInfo;
+    std::shared_ptr<C2StreamColorAspectsInfo::input> mCodedColorAspects;
+    std::shared_ptr<C2StreamColorAspectsTuning::output> mDefaultColorAspects;
+    std::shared_ptr<C2StreamColorAspectsInfo::output> mColorAspects;
+    std::shared_ptr<C2StreamPixelFormatInfo::output> mPixelFormat;
+};
+
+static void *ivd_aligned_malloc(void *ctxt, uint32_t alignment, uint32_t size) {
+    (void)ctxt;
+    return memalign(alignment, size);
+}
+
+static void ivd_aligned_free(void *ctxt, void *mem) {
+    (void)ctxt;
+    free(mem);
+}
+
+C2GoldfishAvcDec::C2GoldfishAvcDec(const char *name, c2_node_id_t id,
+                                   const std::shared_ptr<IntfImpl> &intfImpl)
+    : SimpleC2Component(
+          std::make_shared<SimpleInterface<IntfImpl>>(name, id, intfImpl)),
+      mIntf(intfImpl), mOutBufferFlush(nullptr), mWidth(1920), mHeight(1080),
+      mHeaderDecoded(false), mOutIndex(0u) {
+    mWidth = mIntf->width();
+    mHeight = mIntf->height();
+    DDD("creating avc decoder now w %d h %d", mWidth, mHeight);
+}
+
+C2GoldfishAvcDec::~C2GoldfishAvcDec() { onRelease(); }
+
+c2_status_t C2GoldfishAvcDec::onInit() {
+    status_t err = initDecoder();
+    return err == OK ? C2_OK : C2_CORRUPTED;
+}
+
+c2_status_t C2GoldfishAvcDec::onStop() {
+    if (OK != resetDecoder())
+        return C2_CORRUPTED;
+    resetPlugin();
+    return C2_OK;
+}
+
+void C2GoldfishAvcDec::onReset() { (void)onStop(); }
+
+void C2GoldfishAvcDec::onRelease() {
+    deleteContext();
+    if (mOutBlock) {
+        mOutBlock.reset();
+    }
+}
+
+void C2GoldfishAvcDec::decodeHeaderAfterFlush() {
+    if (mContext && !mCsd0.empty() && !mCsd1.empty()) {
+        mContext->decodeFrame(&(mCsd0[0]), mCsd0.size(), 0);
+        mContext->decodeFrame(&(mCsd1[0]), mCsd1.size(), 0);
+        DDD("resending csd0 and csd1");
+    }
+}
+
+c2_status_t C2GoldfishAvcDec::onFlush_sm() {
+    if (OK != setFlushMode())
+        return C2_CORRUPTED;
+
+    if (!mContext) {
+        // just ignore if context is not even created
+        return C2_OK;
+    }
+
+    uint32_t bufferSize = mStride * mHeight * 3 / 2;
+    mOutBufferFlush = (uint8_t *)ivd_aligned_malloc(nullptr, 128, bufferSize);
+    if (!mOutBufferFlush) {
+        ALOGE("could not allocate tmp output buffer (for flush) of size %u ",
+              bufferSize);
+        return C2_NO_MEMORY;
+    }
+
+    while (true) {
+        mPts = 0;
+        setDecodeArgs(nullptr, nullptr, 0, 0, 0);
+        mImg = mContext->getImage();
+        if (mImg.data == nullptr) {
+            resetPlugin();
+            break;
+        }
+    }
+
+    if (mOutBufferFlush) {
+        ivd_aligned_free(nullptr, mOutBufferFlush);
+        mOutBufferFlush = nullptr;
+    }
+
+    deleteContext();
+    return C2_OK;
+}
+
+status_t C2GoldfishAvcDec::createDecoder() {
+
+    DDD("creating avc context now w %d h %d", mWidth, mHeight);
+    if (mEnableAndroidNativeBuffers) {
+        mContext.reset(new MediaH264Decoder(RenderMode::RENDER_BY_HOST_GPU));
+    } else {
+        mContext.reset(new MediaH264Decoder(RenderMode::RENDER_BY_GUEST_CPU));
+    }
+    mContext->initH264Context(mWidth, mHeight, mWidth, mHeight,
+                              MediaH264Decoder::PixelFormat::YUV420P);
+    return OK;
+}
+
+status_t C2GoldfishAvcDec::setParams(size_t stride) {
+    (void)stride;
+    return OK;
+}
+
+status_t C2GoldfishAvcDec::initDecoder() {
+    //    if (OK != createDecoder()) return UNKNOWN_ERROR;
+    mStride = ALIGN2(mWidth);
+    mSignalledError = false;
+    resetPlugin();
+
+    return OK;
+}
+
+bool C2GoldfishAvcDec::setDecodeArgs(C2ReadView *inBuffer,
+                                     C2GraphicView *outBuffer, size_t inOffset,
+                                     size_t inSize, uint32_t tsMarker) {
+    uint32_t displayStride = mStride;
+    (void)inBuffer;
+    (void)inOffset;
+    (void)inSize;
+    (void)tsMarker;
+    if (outBuffer) {
+        C2PlanarLayout layout;
+        layout = outBuffer->layout();
+        displayStride = layout.planes[C2PlanarLayout::PLANE_Y].rowInc;
+    }
+
+    if (inBuffer) {
+        //= tsMarker;
+        mInPBuffer = const_cast<uint8_t *>(inBuffer->data() + inOffset);
+        mInPBufferSize = inSize;
+        mInTsMarker = tsMarker;
+        insertPts(tsMarker, mPts);
+    }
+
+    // uint32_t displayHeight = mHeight;
+    // size_t lumaSize = displayStride * displayHeight;
+    // size_t chromaSize = lumaSize >> 2;
+
+    if (mStride != displayStride) {
+        mStride = displayStride;
+        if (OK != setParams(mStride))
+            return false;
+    }
+
+    return true;
+}
+
+status_t C2GoldfishAvcDec::setFlushMode() {
+    if (mContext) {
+        mContext->flush();
+    }
+    mHeaderDecoded = false;
+    return OK;
+}
+
+status_t C2GoldfishAvcDec::resetDecoder() {
+    mStride = 0;
+    mSignalledError = false;
+    mHeaderDecoded = false;
+    deleteContext();
+
+    return OK;
+}
+
+void C2GoldfishAvcDec::resetPlugin() {
+    mSignalledOutputEos = false;
+    gettimeofday(&mTimeStart, nullptr);
+    gettimeofday(&mTimeEnd, nullptr);
+}
+
+void C2GoldfishAvcDec::deleteContext() {
+    if (mContext) {
+        mContext->destroyH264Context();
+        mContext.reset(nullptr);
+        mPts2Index.clear();
+        mOldPts2Index.clear();
+        mIndex2Pts.clear();
+    }
+}
+
+static void fillEmptyWork(const std::unique_ptr<C2Work> &work) {
+    uint32_t flags = 0;
+    if (work->input.flags & C2FrameData::FLAG_END_OF_STREAM) {
+        flags |= C2FrameData::FLAG_END_OF_STREAM;
+        DDD("signalling eos");
+    }
+    DDD("fill empty work");
+    work->worklets.front()->output.flags = (C2FrameData::flags_t)flags;
+    work->worklets.front()->output.buffers.clear();
+    work->worklets.front()->output.ordinal = work->input.ordinal;
+    work->workletsProcessed = 1u;
+}
+
+void C2GoldfishAvcDec::finishWork(uint64_t index,
+                                  const std::unique_ptr<C2Work> &work) {
+    std::shared_ptr<C2Buffer> buffer =
+        createGraphicBuffer(std::move(mOutBlock), C2Rect(mWidth, mHeight));
+    mOutBlock = nullptr;
+    {
+        IntfImpl::Lock lock = mIntf->lock();
+        buffer->setInfo(mIntf->getColorAspects_l());
+    }
+
+    class FillWork {
+      public:
+        FillWork(uint32_t flags, C2WorkOrdinalStruct ordinal,
+                 const std::shared_ptr<C2Buffer> &buffer)
+            : mFlags(flags), mOrdinal(ordinal), mBuffer(buffer) {}
+        ~FillWork() = default;
+
+        void operator()(const std::unique_ptr<C2Work> &work) {
+            work->worklets.front()->output.flags = (C2FrameData::flags_t)mFlags;
+            work->worklets.front()->output.buffers.clear();
+            work->worklets.front()->output.ordinal = mOrdinal;
+            work->workletsProcessed = 1u;
+            work->result = C2_OK;
+            if (mBuffer) {
+                work->worklets.front()->output.buffers.push_back(mBuffer);
+            }
+            DDD("timestamp = %lld, index = %lld, w/%s buffer",
+                mOrdinal.timestamp.peekll(), mOrdinal.frameIndex.peekll(),
+                mBuffer ? "" : "o");
+        }
+
+      private:
+        const uint32_t mFlags;
+        const C2WorkOrdinalStruct mOrdinal;
+        const std::shared_ptr<C2Buffer> mBuffer;
+    };
+
+    auto fillWork = [buffer](const std::unique_ptr<C2Work> &work) {
+        work->worklets.front()->output.flags = (C2FrameData::flags_t)0;
+        work->worklets.front()->output.buffers.clear();
+        work->worklets.front()->output.buffers.push_back(buffer);
+        work->worklets.front()->output.ordinal = work->input.ordinal;
+        work->workletsProcessed = 1u;
+    };
+    if (work && c2_cntr64_t(index) == work->input.ordinal.frameIndex) {
+        bool eos = ((work->input.flags & C2FrameData::FLAG_END_OF_STREAM) != 0);
+        // TODO: Check if cloneAndSend can be avoided by tracking number of
+        // frames remaining
+        if (eos) {
+            if (buffer) {
+                mOutIndex = index;
+                C2WorkOrdinalStruct outOrdinal = work->input.ordinal;
+                DDD("%s %d: cloneAndSend ", __func__, __LINE__);
+                cloneAndSend(
+                    mOutIndex, work,
+                    FillWork(C2FrameData::FLAG_INCOMPLETE, outOrdinal, buffer));
+                buffer.reset();
+            }
+        } else {
+            DDD("%s %d: fill", __func__, __LINE__);
+            fillWork(work);
+        }
+    } else {
+        DDD("%s %d: finish", __func__, __LINE__);
+        finish(index, fillWork);
+    }
+}
+
+c2_status_t
+C2GoldfishAvcDec::ensureDecoderState(const std::shared_ptr<C2BlockPool> &pool) {
+    if (mOutBlock && (mOutBlock->width() != ALIGN2(mWidth) ||
+                      mOutBlock->height() != mHeight)) {
+        mOutBlock.reset();
+    }
+    if (!mOutBlock) {
+        uint32_t format = HAL_PIXEL_FORMAT_YCBCR_420_888;
+        C2MemoryUsage usage = {C2MemoryUsage::CPU_READ,
+                               C2MemoryUsage::CPU_WRITE};
+        usage.expected = (uint64_t)(BufferUsage::GPU_DATA_BUFFER);
+        // C2MemoryUsage usage = {(unsigned
+        // int)(BufferUsage::GPU_DATA_BUFFER)};// { C2MemoryUsage::CPU_READ,
+        // C2MemoryUsage::CPU_WRITE };
+        c2_status_t err = pool->fetchGraphicBlock(ALIGN2(mWidth), mHeight,
+                                                  format, usage, &mOutBlock);
+        if (err != C2_OK) {
+            ALOGE("fetchGraphicBlock for Output failed with status %d", err);
+            return err;
+        }
+        if (mEnableAndroidNativeBuffers) {
+            auto c2Handle = mOutBlock->handle();
+            native_handle_t *grallocHandle =
+                UnwrapNativeCodec2GrallocHandle(c2Handle);
+            mHostColorBufferId = getColorBufferHandle(grallocHandle);
+            DDD("found handle %d", mHostColorBufferId);
+        }
+        DDD("provided (%dx%d) required (%dx%d)", mOutBlock->width(),
+            mOutBlock->height(), ALIGN2(mWidth), mHeight);
+    }
+
+    return C2_OK;
+}
+
+void C2GoldfishAvcDec::checkMode(const std::shared_ptr<C2BlockPool> &pool) {
+    mWidth = mIntf->width();
+    mHeight = mIntf->height();
+    {
+        // now get the block
+        constexpr uint32_t format = HAL_PIXEL_FORMAT_YCBCR_420_888;
+        std::shared_ptr<C2GraphicBlock> block;
+        C2MemoryUsage usage = {C2MemoryUsage::CPU_READ,
+                               C2MemoryUsage::CPU_WRITE};
+        usage.expected = (uint64_t)(BufferUsage::GPU_DATA_BUFFER);
+
+        c2_status_t err = pool->fetchGraphicBlock(align(mWidth, 16), mHeight,
+                                                  format, usage, &block);
+        if (err != C2_OK) {
+            ALOGE("fetchGraphicBlock for Output failed with status %d", err);
+            return;
+        }
+        auto c2Handle = block->handle();
+        native_handle_t *grallocHandle =
+            UnwrapNativeCodec2GrallocHandle(c2Handle);
+        int hostColorBufferId = getColorBufferHandle(grallocHandle);
+        if (hostColorBufferId > 0) {
+            DDD("decoding to host color buffer");
+            mEnableAndroidNativeBuffers = true;
+        } else {
+            DDD("decoding to guest byte buffer");
+            mEnableAndroidNativeBuffers = false;
+        }
+    }
+}
+
+void C2GoldfishAvcDec::getVuiParams(h264_image_t &img) {
+
+    VuiColorAspects vuiColorAspects;
+    vuiColorAspects.primaries = img.color_primaries;
+    vuiColorAspects.transfer = img.color_trc;
+    vuiColorAspects.coeffs = img.colorspace;
+    vuiColorAspects.fullRange = img.color_range == 2 ? true : false;
+
+    // convert vui aspects to C2 values if changed
+    if (!(vuiColorAspects == mBitstreamColorAspects)) {
+        mBitstreamColorAspects = vuiColorAspects;
+        ColorAspects sfAspects;
+        C2StreamColorAspectsInfo::input codedAspects = {0u};
+        ColorUtils::convertIsoColorAspectsToCodecAspects(
+            vuiColorAspects.primaries, vuiColorAspects.transfer,
+            vuiColorAspects.coeffs, vuiColorAspects.fullRange, sfAspects);
+        if (!C2Mapper::map(sfAspects.mPrimaries, &codedAspects.primaries)) {
+            codedAspects.primaries = C2Color::PRIMARIES_UNSPECIFIED;
+        }
+        if (!C2Mapper::map(sfAspects.mRange, &codedAspects.range)) {
+            codedAspects.range = C2Color::RANGE_UNSPECIFIED;
+        }
+        if (!C2Mapper::map(sfAspects.mMatrixCoeffs, &codedAspects.matrix)) {
+            codedAspects.matrix = C2Color::MATRIX_UNSPECIFIED;
+        }
+        if (!C2Mapper::map(sfAspects.mTransfer, &codedAspects.transfer)) {
+            codedAspects.transfer = C2Color::TRANSFER_UNSPECIFIED;
+        }
+        std::vector<std::unique_ptr<C2SettingResult>> failures;
+        (void)mIntf->config({&codedAspects}, C2_MAY_BLOCK, &failures);
+    }
+}
+
+void C2GoldfishAvcDec::copyImageData(h264_image_t &img) {
+    getVuiParams(img);
+    if (mEnableAndroidNativeBuffers)
+        return;
+
+    auto writeView = mOutBlock->map().get();
+    if (writeView.error()) {
+        ALOGE("graphic view map failed %d", writeView.error());
+        return;
+    }
+    size_t dstYStride = writeView.layout().planes[C2PlanarLayout::PLANE_Y].rowInc;
+    size_t dstUVStride = writeView.layout().planes[C2PlanarLayout::PLANE_U].rowInc;
+
+    uint8_t *pYBuffer = const_cast<uint8_t *>(writeView.data()[C2PlanarLayout::PLANE_Y]);
+    uint8_t *pUBuffer = const_cast<uint8_t *>(writeView.data()[C2PlanarLayout::PLANE_U]);
+    uint8_t *pVBuffer = const_cast<uint8_t *>(writeView.data()[C2PlanarLayout::PLANE_V]);
+
+    for (int i = 0; i < mHeight; ++i) {
+        memcpy(pYBuffer + i * dstYStride, img.data + i * mWidth, mWidth);
+    }
+    for (int i = 0; i < mHeight / 2; ++i) {
+        memcpy(pUBuffer + i * dstUVStride,
+               img.data + mWidth * mHeight + i * mWidth / 2, mWidth / 2);
+    }
+    for (int i = 0; i < mHeight / 2; ++i) {
+        memcpy(pVBuffer + i * dstUVStride,
+               img.data + mWidth * mHeight * 5 / 4 + i * mWidth / 2,
+               mWidth / 2);
+    }
+}
+
+uint64_t C2GoldfishAvcDec::getWorkIndex(uint64_t pts) {
+    if (!mOldPts2Index.empty()) {
+        auto iter = mOldPts2Index.find(pts);
+        if (iter != mOldPts2Index.end()) {
+            auto index = iter->second;
+            DDD("found index %d for pts %" PRIu64, (int)index, pts);
+            return index;
+        }
+    }
+    auto iter = mPts2Index.find(pts);
+    if (iter != mPts2Index.end()) {
+        auto index = iter->second;
+        DDD("found index %d for pts %" PRIu64, (int)index, pts);
+        return index;
+    }
+    DDD("not found index for pts %" PRIu64, pts);
+    return 0;
+}
+
+void C2GoldfishAvcDec::insertPts(uint32_t work_index, uint64_t pts) {
+    auto iter = mPts2Index.find(pts);
+    if (iter != mPts2Index.end()) {
+        // we have a collision here:
+        // apparently, older session is not done yet,
+        // lets save them
+        DDD("inserted to old pts %" PRIu64 " with index %d", pts, (int)iter->second);
+        mOldPts2Index[iter->first] = iter->second;
+    }
+    DDD("inserted pts %" PRIu64 " with index %d", pts, (int)work_index);
+    mIndex2Pts[work_index] = pts;
+    mPts2Index[pts] = work_index;
+}
+
+void C2GoldfishAvcDec::removePts(uint64_t pts) {
+    bool found = false;
+    uint64_t index = 0;
+    // note: check old pts first to see
+    // if we have some left over, check them
+    if (!mOldPts2Index.empty()) {
+        auto iter = mOldPts2Index.find(pts);
+        if (iter != mOldPts2Index.end()) {
+            mOldPts2Index.erase(iter);
+            index = iter->second;
+            found = true;
+        }
+    } else {
+        auto iter = mPts2Index.find(pts);
+        if (iter != mPts2Index.end()) {
+            mPts2Index.erase(iter);
+            index = iter->second;
+            found = true;
+        }
+    }
+
+    if (!found) return;
+
+    auto iter2 = mIndex2Pts.find(index);
+    if (iter2 == mIndex2Pts.end()) return;
+    mIndex2Pts.erase(iter2);
+}
+
+// TODO: can overall error checking be improved?
+// TODO: allow configuration of color format and usage for graphic buffers
+// instead
+//       of hard coding them to HAL_PIXEL_FORMAT_YV12
+// TODO: pass coloraspects information to surface
+// TODO: test support for dynamic change in resolution
+// TODO: verify if the decoder sent back all frames
+void C2GoldfishAvcDec::process(const std::unique_ptr<C2Work> &work,
+                               const std::shared_ptr<C2BlockPool> &pool) {
+    // Initialize output work
+    work->result = C2_OK;
+    work->workletsProcessed = 0u;
+    work->worklets.front()->output.flags = work->input.flags;
+    if (mSignalledError || mSignalledOutputEos) {
+        work->result = C2_BAD_VALUE;
+        return;
+    }
+
+    DDD("process work");
+    if (!mContext) {
+        DDD("creating decoder context to host in process work");
+        checkMode(pool);
+        createDecoder();
+        decodeHeaderAfterFlush();
+    }
+
+    size_t inOffset = 0u;
+    size_t inSize = 0u;
+    uint32_t workIndex = work->input.ordinal.frameIndex.peeku() & 0xFFFFFFFF;
+    mPts = work->input.ordinal.timestamp.peeku();
+    C2ReadView rView = mDummyReadView;
+    if (!work->input.buffers.empty()) {
+        rView =
+            work->input.buffers[0]->data().linearBlocks().front().map().get();
+        inSize = rView.capacity();
+        if (inSize && rView.error()) {
+            ALOGE("read view map failed %d", rView.error());
+            work->result = rView.error();
+            return;
+        }
+    }
+    bool eos = ((work->input.flags & C2FrameData::FLAG_END_OF_STREAM) != 0);
+    bool hasPicture = (inSize > 0);
+
+    DDD("in buffer attr. size %zu timestamp %d frameindex %d, flags %x", inSize,
+        (int)work->input.ordinal.timestamp.peeku(),
+        (int)work->input.ordinal.frameIndex.peeku(), work->input.flags);
+    size_t inPos = 0;
+    while (inPos < inSize && inSize - inPos >= kMinInputBytes) {
+        if (C2_OK != ensureDecoderState(pool)) {
+            mSignalledError = true;
+            work->workletsProcessed = 1u;
+            work->result = C2_CORRUPTED;
+            return;
+        }
+
+        {
+            // C2GraphicView wView;// = mOutBlock->map().get();
+            // if (wView.error()) {
+            //    ALOGE("graphic view map failed %d", wView.error());
+            //    work->result = wView.error();
+            //    return;
+            //}
+            if (!setDecodeArgs(&rView, nullptr, inOffset + inPos,
+                               inSize - inPos, workIndex)) {
+                mSignalledError = true;
+                work->workletsProcessed = 1u;
+                work->result = C2_CORRUPTED;
+                return;
+            }
+
+            if (false == mHeaderDecoded) {
+                /* Decode header and get dimensions */
+                setParams(mStride);
+            }
+
+            DDD("flag is %x", work->input.flags);
+            if (work->input.flags & C2FrameData::FLAG_CODEC_CONFIG) {
+                hasPicture = false;
+                if (mCsd0.empty()) {
+                    mCsd0.assign(mInPBuffer, mInPBuffer + mInPBufferSize);
+                    DDD("assign to csd0 with %d bytpes", mInPBufferSize);
+                } else if (mCsd1.empty()) {
+                    mCsd1.assign(mInPBuffer, mInPBuffer + mInPBufferSize);
+                    DDD("assign to csd1 with %d bytpes", mInPBufferSize);
+                }
+                // this is not really a valid pts from config
+                removePts(mPts);
+            }
+
+            uint32_t delay;
+            GETTIME(&mTimeStart, nullptr);
+            TIME_DIFF(mTimeEnd, mTimeStart, delay);
+            //(void) ivdec_api_function(mDecHandle, &s_decode_ip, &s_decode_op);
+            DDD("decoding");
+            h264_result_t h264Res =
+                mContext->decodeFrame(mInPBuffer, mInPBufferSize, mIndex2Pts[mInTsMarker]);
+            mConsumedBytes = h264Res.bytesProcessed;
+            DDD("decoding consumed %d", (int)mConsumedBytes);
+
+            if (mHostColorBufferId > 0) {
+                mImg = mContext->renderOnHostAndReturnImageMetadata(
+                    mHostColorBufferId);
+            } else {
+                mImg = mContext->getImage();
+            }
+            uint32_t decodeTime;
+            GETTIME(&mTimeEnd, nullptr);
+            TIME_DIFF(mTimeStart, mTimeEnd, decodeTime);
+        }
+        // TODO: handle res change
+        if (0) {
+            DDD("resolution changed");
+            drainInternal(DRAIN_COMPONENT_NO_EOS, pool, work);
+            resetDecoder();
+            resetPlugin();
+            work->workletsProcessed = 0u;
+
+            /* Decode header and get new dimensions */
+            setParams(mStride);
+            //            (void) ivdec_api_function(mDecHandle, &s_decode_ip,
+            //            &s_decode_op);
+        }
+        if (mImg.data != nullptr) {
+            // check for new width and height
+            auto decodedW = mImg.width;
+            auto decodedH = mImg.height;
+            if (decodedW != mWidth || decodedH != mHeight) {
+                mWidth = decodedW;
+                mHeight = decodedH;
+
+                C2StreamPictureSizeInfo::output size(0u, mWidth, mHeight);
+                std::vector<std::unique_ptr<C2SettingResult>> failures;
+                c2_status_t err = mIntf->config({&size}, C2_MAY_BLOCK, &failures);
+                if (err == OK) {
+                    work->worklets.front()->output.configUpdate.push_back(
+                        C2Param::Copy(size));
+                    ensureDecoderState(pool);
+                } else {
+                    ALOGE("Cannot set width and height");
+                    mSignalledError = true;
+                    work->workletsProcessed = 1u;
+                    work->result = C2_CORRUPTED;
+                    return;
+                }
+            }
+
+            DDD("got data %" PRIu64 " with pts %" PRIu64,  getWorkIndex(mImg.pts), mImg.pts);
+            mHeaderDecoded = true;
+            copyImageData(mImg);
+            finishWork(getWorkIndex(mImg.pts), work);
+            removePts(mImg.pts);
+        } else {
+            work->workletsProcessed = 0u;
+        }
+
+        inPos += mConsumedBytes;
+    }
+    if (eos) {
+        DDD("drain because of eos");
+        drainInternal(DRAIN_COMPONENT_WITH_EOS, pool, work);
+        mSignalledOutputEos = true;
+    } else if (!hasPicture) {
+        DDD("no picture, fill empty work");
+        fillEmptyWork(work);
+    }
+
+    work->input.buffers.clear();
+}
+
+c2_status_t
+C2GoldfishAvcDec::drainInternal(uint32_t drainMode,
+                                const std::shared_ptr<C2BlockPool> &pool,
+                                const std::unique_ptr<C2Work> &work) {
+    if (drainMode == NO_DRAIN) {
+        ALOGW("drain with NO_DRAIN: no-op");
+        return C2_OK;
+    }
+    if (drainMode == DRAIN_CHAIN) {
+        ALOGW("DRAIN_CHAIN not supported");
+        return C2_OMITTED;
+    }
+
+    if (OK != setFlushMode())
+        return C2_CORRUPTED;
+    while (true) {
+        if (C2_OK != ensureDecoderState(pool)) {
+            mSignalledError = true;
+            work->workletsProcessed = 1u;
+            work->result = C2_CORRUPTED;
+            return C2_CORRUPTED;
+        }
+        /*
+        C2GraphicView wView = mOutBlock->map().get();
+        if (wView.error()) {
+            ALOGE("graphic view map failed %d", wView.error());
+            return C2_CORRUPTED;
+        }
+        if (!setDecodeArgs(nullptr, &wView, 0, 0, 0)) {
+            mSignalledError = true;
+            work->workletsProcessed = 1u;
+            return C2_CORRUPTED;
+        }
+        */
+
+        if (mHostColorBufferId > 0) {
+            mImg = mContext->renderOnHostAndReturnImageMetadata(
+                mHostColorBufferId);
+        } else {
+            mImg = mContext->getImage();
+        }
+
+        // TODO: maybe keep rendering to screen
+        //        mImg = mContext->getImage();
+        if (mImg.data != nullptr) {
+            DDD("got data in drain mode %" PRIu64 " with pts %" PRIu64,  getWorkIndex(mImg.pts), mImg.pts);
+            copyImageData(mImg);
+            finishWork(getWorkIndex(mImg.pts), work);
+            removePts(mImg.pts);
+        } else {
+            fillEmptyWork(work);
+            break;
+        }
+    }
+
+    return C2_OK;
+}
+
+c2_status_t C2GoldfishAvcDec::drain(uint32_t drainMode,
+                                    const std::shared_ptr<C2BlockPool> &pool) {
+    DDD("drainInternal because of drain");
+    return drainInternal(drainMode, pool, nullptr);
+}
+
+class C2GoldfishAvcDecFactory : public C2ComponentFactory {
+  public:
+    C2GoldfishAvcDecFactory()
+        : mHelper(std::static_pointer_cast<C2ReflectorHelper>(
+              GoldfishComponentStore::Create()->getParamReflector())) {}
+
+    virtual c2_status_t
+    createComponent(c2_node_id_t id,
+                    std::shared_ptr<C2Component> *const component,
+                    std::function<void(C2Component *)> deleter) override {
+        *component = std::shared_ptr<C2Component>(
+            new C2GoldfishAvcDec(
+                COMPONENT_NAME, id,
+                std::make_shared<C2GoldfishAvcDec::IntfImpl>(mHelper)),
+            deleter);
+        return C2_OK;
+    }
+
+    virtual c2_status_t createInterface(
+        c2_node_id_t id, std::shared_ptr<C2ComponentInterface> *const interface,
+        std::function<void(C2ComponentInterface *)> deleter) override {
+        *interface = std::shared_ptr<C2ComponentInterface>(
+            new SimpleInterface<C2GoldfishAvcDec::IntfImpl>(
+                COMPONENT_NAME, id,
+                std::make_shared<C2GoldfishAvcDec::IntfImpl>(mHelper)),
+            deleter);
+        return C2_OK;
+    }
+
+    virtual ~C2GoldfishAvcDecFactory() override = default;
+
+  private:
+    std::shared_ptr<C2ReflectorHelper> mHelper;
+};
+
+} // namespace android
+
+extern "C" ::C2ComponentFactory *CreateCodec2Factory() {
+    DDD("in %s", __func__);
+    return new ::android::C2GoldfishAvcDecFactory();
+}
+
+extern "C" void DestroyCodec2Factory(::C2ComponentFactory *factory) {
+    DDD("in %s", __func__);
+    delete factory;
+}
diff --git a/system/codecs/c2/decoders/avcdec/C2GoldfishAvcDec.h b/system/codecs/c2/decoders/avcdec/C2GoldfishAvcDec.h
new file mode 100644
index 0000000..914a10e
--- /dev/null
+++ b/system/codecs/c2/decoders/avcdec/C2GoldfishAvcDec.h
@@ -0,0 +1,160 @@
+/*
+ * Copyright 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_C2_SOFT_AVC_DEC_H_
+#define ANDROID_C2_SOFT_AVC_DEC_H_
+
+#include <sys/time.h>
+
+#include <media/stagefright/foundation/ColorUtils.h>
+
+#include "MediaH264Decoder.h"
+#include <SimpleC2Component.h>
+#include <atomic>
+#include <map>
+
+namespace android {
+
+#define ALIGN2(x) ((((x) + 1) >> 1) << 1)
+#define ALIGN8(x) ((((x) + 7) >> 3) << 3)
+#define ALIGN16(x) ((((x) + 15) >> 4) << 4)
+#define ALIGN32(x) ((((x) + 31) >> 5) << 5)
+#define MAX_NUM_CORES 4
+#define MIN(a, b) (((a) < (b)) ? (a) : (b))
+#define GETTIME(a, b) gettimeofday(a, b);
+#define TIME_DIFF(start, end, diff)                                            \
+    diff = (((end).tv_sec - (start).tv_sec) * 1000000) +                       \
+           ((end).tv_usec - (start).tv_usec);
+
+class C2GoldfishAvcDec : public SimpleC2Component {
+  public:
+    class IntfImpl;
+    C2GoldfishAvcDec(const char *name, c2_node_id_t id,
+                     const std::shared_ptr<IntfImpl> &intfImpl);
+    virtual ~C2GoldfishAvcDec();
+
+    // From SimpleC2Component
+    c2_status_t onInit() override;
+    c2_status_t onStop() override;
+    void onReset() override;
+    void onRelease() override;
+    c2_status_t onFlush_sm() override;
+    void process(const std::unique_ptr<C2Work> &work,
+                 const std::shared_ptr<C2BlockPool> &pool) override;
+    c2_status_t drain(uint32_t drainMode,
+                      const std::shared_ptr<C2BlockPool> &pool) override;
+
+  private:
+    std::unique_ptr<MediaH264Decoder> mContext;
+    bool mEnableAndroidNativeBuffers{true};
+
+    void checkMode(const std::shared_ptr<C2BlockPool> &pool);
+    //    status_t createDecoder();
+    status_t createDecoder();
+    status_t setParams(size_t stride);
+    status_t initDecoder();
+    bool setDecodeArgs(C2ReadView *inBuffer, C2GraphicView *outBuffer,
+                       size_t inOffset, size_t inSize, uint32_t tsMarker);
+    c2_status_t ensureDecoderState(const std::shared_ptr<C2BlockPool> &pool);
+    void finishWork(uint64_t index, const std::unique_ptr<C2Work> &work);
+    status_t setFlushMode();
+    c2_status_t drainInternal(uint32_t drainMode,
+                              const std::shared_ptr<C2BlockPool> &pool,
+                              const std::unique_ptr<C2Work> &work);
+    status_t resetDecoder();
+    void resetPlugin();
+    void deleteContext();
+
+    std::shared_ptr<IntfImpl> mIntf;
+
+    void removePts(uint64_t pts);
+    void insertPts(uint32_t work_index, uint64_t pts);
+    uint64_t getWorkIndex(uint64_t pts);
+
+    // there are same pts matching to different work indices
+    // this happen during csd0/csd1 switching
+    std::map<uint64_t, uint64_t> mOldPts2Index;
+    std::map<uint64_t, uint64_t> mPts2Index;
+    std::map<uint64_t, uint64_t> mIndex2Pts;
+    uint64_t  mPts {0};
+
+    // TODO:This is not the right place for this enum. These should
+    // be part of c2-vndk so that they can be accessed by all video plugins
+    // until then, make them feel at home
+    enum {
+        kNotSupported,
+        kPreferBitstream,
+        kPreferContainer,
+    };
+
+    std::shared_ptr<C2GraphicBlock> mOutBlock;
+    uint8_t *mOutBufferFlush;
+
+    int mHostColorBufferId{-1};
+
+    void getVuiParams(h264_image_t &img);
+    void copyImageData(h264_image_t &img);
+
+    h264_image_t mImg{};
+    uint32_t mConsumedBytes{0};
+    uint8_t *mInPBuffer{nullptr};
+    uint32_t mInPBufferSize;
+    uint32_t mInTsMarker;
+
+    // size_t mNumCores;
+    // uint32_t mOutputDelay;
+    uint32_t mWidth;
+    uint32_t mHeight;
+    uint32_t mStride;
+    bool mSignalledOutputEos;
+    bool mSignalledError;
+    bool mHeaderDecoded;
+    std::atomic_uint64_t mOutIndex;
+    // Color aspects. These are ISO values and are meant to detect changes in
+    // aspects to avoid converting them to C2 values for each frame
+    struct VuiColorAspects {
+        uint8_t primaries;
+        uint8_t transfer;
+        uint8_t coeffs;
+        uint8_t fullRange;
+
+        // default color aspects
+        VuiColorAspects()
+            : primaries(2), transfer(2), coeffs(2), fullRange(0) {}
+
+        bool operator==(const VuiColorAspects &o) {
+            return primaries == o.primaries && transfer == o.transfer &&
+                   coeffs == o.coeffs && fullRange == o.fullRange;
+        }
+    } mBitstreamColorAspects;
+
+    // profile
+    struct timeval mTimeStart;
+    struct timeval mTimeEnd;
+#ifdef FILE_DUMP_ENABLE
+    char mInFile[200];
+#endif /* FILE_DUMP_ENABLE */
+
+    std::vector<uint8_t> mCsd0;
+    std::vector<uint8_t> mCsd1;
+    void decodeHeaderAfterFlush();
+
+    C2_DO_NOT_COPY(C2GoldfishAvcDec);
+};
+
+} // namespace android
+
+#endif // ANDROID_C2_SOFT_AVC_DEC_H_
diff --git a/system/codecs/c2/decoders/avcdec/MediaH264Decoder.cpp b/system/codecs/c2/decoders/avcdec/MediaH264Decoder.cpp
new file mode 100644
index 0000000..7909aa9
--- /dev/null
+++ b/system/codecs/c2/decoders/avcdec/MediaH264Decoder.cpp
@@ -0,0 +1,213 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <utils/Log.h>
+
+#define DEBUG 0
+#if DEBUG
+#define DDD(...) ALOGD(__VA_ARGS__)
+#else
+#define DDD(...) ((void)0)
+#endif
+
+#include "MediaH264Decoder.h"
+#include "goldfish_media_utils.h"
+#include <string.h>
+
+MediaH264Decoder::MediaH264Decoder(RenderMode renderMode)
+    : mRenderMode(renderMode) {
+    if (renderMode == RenderMode::RENDER_BY_HOST_GPU) {
+        mVersion = 200;
+    } else if (renderMode == RenderMode::RENDER_BY_GUEST_CPU) {
+        mVersion = 100;
+    }
+}
+
+void MediaH264Decoder::initH264Context(unsigned int width, unsigned int height,
+                                       unsigned int outWidth,
+                                       unsigned int outHeight,
+                                       PixelFormat pixFmt) {
+    auto transport = GoldfishMediaTransport::getInstance();
+    if (!mHasAddressSpaceMemory) {
+        int slot = transport->getMemorySlot();
+        if (slot < 0) {
+            ALOGE("ERROR: Failed to initH264Context: cannot get memory slot");
+            return;
+        }
+        mSlot = slot;
+        mAddressOffSet = static_cast<unsigned int>(mSlot) * (1 << 20);
+        DDD("got memory lot %d addrr %x", mSlot, mAddressOffSet);
+        mHasAddressSpaceMemory = true;
+    }
+    transport->writeParam(mVersion, 0, mAddressOffSet);
+    transport->writeParam(width, 1, mAddressOffSet);
+    transport->writeParam(height, 2, mAddressOffSet);
+    transport->writeParam(outWidth, 3, mAddressOffSet);
+    transport->writeParam(outHeight, 4, mAddressOffSet);
+    transport->writeParam(static_cast<uint64_t>(pixFmt), 5, mAddressOffSet);
+    transport->sendOperation(MediaCodecType::H264Codec,
+                             MediaOperation::InitContext, mAddressOffSet);
+    auto *retptr = transport->getReturnAddr(mAddressOffSet);
+    mHostHandle = *(uint64_t *)(retptr);
+    DDD("initH264Context: got handle to host %lld", mHostHandle);
+}
+
+void MediaH264Decoder::resetH264Context(unsigned int width, unsigned int height,
+                                        unsigned int outWidth,
+                                        unsigned int outHeight,
+                                        PixelFormat pixFmt) {
+    auto transport = GoldfishMediaTransport::getInstance();
+    if (!mHasAddressSpaceMemory) {
+        ALOGE("%s no address space memory", __func__);
+        return;
+    }
+    transport->writeParam((uint64_t)mHostHandle, 0, mAddressOffSet);
+    transport->writeParam(width, 1, mAddressOffSet);
+    transport->writeParam(height, 2, mAddressOffSet);
+    transport->writeParam(outWidth, 3, mAddressOffSet);
+    transport->writeParam(outHeight, 4, mAddressOffSet);
+    transport->writeParam(static_cast<uint64_t>(pixFmt), 5, mAddressOffSet);
+    transport->sendOperation(MediaCodecType::H264Codec, MediaOperation::Reset,
+                             mAddressOffSet);
+    DDD("resetH264Context: done");
+}
+
+void MediaH264Decoder::destroyH264Context() {
+
+    DDD("return memory lot %d addrr %x", (int)(mAddressOffSet >> 23),
+        mAddressOffSet);
+    auto transport = GoldfishMediaTransport::getInstance();
+    transport->writeParam((uint64_t)mHostHandle, 0, mAddressOffSet);
+    transport->sendOperation(MediaCodecType::H264Codec,
+                             MediaOperation::DestroyContext, mAddressOffSet);
+    transport->returnMemorySlot(mSlot);
+    mHasAddressSpaceMemory = false;
+}
+
+h264_result_t MediaH264Decoder::decodeFrame(uint8_t *img, size_t szBytes,
+                                            uint64_t pts) {
+    DDD("decode frame: use handle to host %lld", mHostHandle);
+    h264_result_t res = {0, 0};
+    if (!mHasAddressSpaceMemory) {
+        ALOGE("%s no address space memory", __func__);
+        return res;
+    }
+    auto transport = GoldfishMediaTransport::getInstance();
+    uint8_t *hostSrc = transport->getInputAddr(mAddressOffSet);
+    if (img != nullptr && szBytes > 0) {
+        memcpy(hostSrc, img, szBytes);
+    }
+    transport->writeParam((uint64_t)mHostHandle, 0, mAddressOffSet);
+    transport->writeParam(transport->offsetOf((uint64_t)(hostSrc)) -
+                              mAddressOffSet,
+                          1, mAddressOffSet);
+    transport->writeParam((uint64_t)szBytes, 2, mAddressOffSet);
+    transport->writeParam((uint64_t)pts, 3, mAddressOffSet);
+    transport->sendOperation(MediaCodecType::H264Codec,
+                             MediaOperation::DecodeImage, mAddressOffSet);
+
+    auto *retptr = transport->getReturnAddr(mAddressOffSet);
+    res.bytesProcessed = *(uint64_t *)(retptr);
+    res.ret = *(int *)(retptr + 8);
+
+    return res;
+}
+
+void MediaH264Decoder::flush() {
+    if (!mHasAddressSpaceMemory) {
+        ALOGE("%s no address space memory", __func__);
+        return;
+    }
+    DDD("flush: use handle to host %lld", mHostHandle);
+    auto transport = GoldfishMediaTransport::getInstance();
+    transport->writeParam((uint64_t)mHostHandle, 0, mAddressOffSet);
+    transport->sendOperation(MediaCodecType::H264Codec, MediaOperation::Flush,
+                             mAddressOffSet);
+}
+
+h264_image_t MediaH264Decoder::getImage() {
+    DDD("getImage: use handle to host %lld", mHostHandle);
+    h264_image_t res{};
+    if (!mHasAddressSpaceMemory) {
+        ALOGE("%s no address space memory", __func__);
+        return res;
+    }
+    auto transport = GoldfishMediaTransport::getInstance();
+    uint8_t *dst = transport->getInputAddr(
+        mAddressOffSet); // Note: reuse the same addr for input and output
+    transport->writeParam((uint64_t)mHostHandle, 0, mAddressOffSet);
+    transport->writeParam(transport->offsetOf((uint64_t)(dst)) - mAddressOffSet,
+                          1, mAddressOffSet);
+    transport->writeParam(-1, 2, mAddressOffSet);
+    transport->sendOperation(MediaCodecType::H264Codec,
+                             MediaOperation::GetImage, mAddressOffSet);
+    auto *retptr = transport->getReturnAddr(mAddressOffSet);
+    res.ret = *(int *)(retptr);
+    if (res.ret >= 0) {
+        res.data = dst;
+        res.width = *(uint32_t *)(retptr + 8);
+        res.height = *(uint32_t *)(retptr + 16);
+        res.pts = *(uint64_t *)(retptr + 24);
+        res.color_primaries = *(uint32_t *)(retptr + 32);
+        res.color_range = *(uint32_t *)(retptr + 40);
+        res.color_trc = *(uint32_t *)(retptr + 48);
+        res.colorspace = *(uint32_t *)(retptr + 56);
+    } else if (res.ret == (int)(Err::DecoderRestarted)) {
+        res.width = *(uint32_t *)(retptr + 8);
+        res.height = *(uint32_t *)(retptr + 16);
+    }
+    return res;
+}
+
+h264_image_t
+MediaH264Decoder::renderOnHostAndReturnImageMetadata(int hostColorBufferId) {
+    DDD("%s: use handle to host %lld", __func__, mHostHandle);
+    h264_image_t res{};
+    if (hostColorBufferId < 0) {
+        ALOGE("%s negative color buffer id %d", __func__, hostColorBufferId);
+        return res;
+    }
+    DDD("%s send color buffer id %d", __func__, hostColorBufferId);
+    if (!mHasAddressSpaceMemory) {
+        ALOGE("%s no address space memory", __func__);
+        return res;
+    }
+    auto transport = GoldfishMediaTransport::getInstance();
+    uint8_t *dst = transport->getInputAddr(
+        mAddressOffSet); // Note: reuse the same addr for input and output
+    transport->writeParam((uint64_t)mHostHandle, 0, mAddressOffSet);
+    transport->writeParam(transport->offsetOf((uint64_t)(dst)) - mAddressOffSet,
+                          1, mAddressOffSet);
+    transport->writeParam((uint64_t)hostColorBufferId, 2, mAddressOffSet);
+    transport->sendOperation(MediaCodecType::H264Codec,
+                             MediaOperation::GetImage, mAddressOffSet);
+    auto *retptr = transport->getReturnAddr(mAddressOffSet);
+    res.ret = *(int *)(retptr);
+    if (res.ret >= 0) {
+        res.data = dst; // note: the data could be junk
+        res.width = *(uint32_t *)(retptr + 8);
+        res.height = *(uint32_t *)(retptr + 16);
+        res.pts = *(uint64_t *)(retptr + 24);
+        res.color_primaries = *(uint32_t *)(retptr + 32);
+        res.color_range = *(uint32_t *)(retptr + 40);
+        res.color_trc = *(uint32_t *)(retptr + 48);
+        res.colorspace = *(uint32_t *)(retptr + 56);
+    } else if (res.ret == (int)(Err::DecoderRestarted)) {
+        res.width = *(uint32_t *)(retptr + 8);
+        res.height = *(uint32_t *)(retptr + 16);
+    }
+    return res;
+}
diff --git a/system/codecs/c2/decoders/avcdec/MediaH264Decoder.h b/system/codecs/c2/decoders/avcdec/MediaH264Decoder.h
new file mode 100644
index 0000000..1c1b262
--- /dev/null
+++ b/system/codecs/c2/decoders/avcdec/MediaH264Decoder.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef GOLDFISH_MEDIA_H264_DEC_H_
+#define GOLDFISH_MEDIA_H264_DEC_H_
+
+struct h264_init_result_t {
+    uint64_t host_handle;
+    int ret;
+};
+
+struct h264_result_t {
+    int ret;
+    uint64_t bytesProcessed;
+};
+
+struct h264_image_t {
+    const uint8_t *data;
+    uint32_t width;
+    uint32_t height;
+    uint64_t pts; // presentation time stamp
+    uint64_t color_primaries;
+    uint64_t color_range;
+    uint64_t color_trc;
+    uint64_t colorspace;
+    // on success, |ret| will indicate the size of |data|.
+    // If failed, |ret| will contain some negative error code.
+    int ret;
+};
+
+enum class RenderMode {
+    RENDER_BY_HOST_GPU = 1,
+    RENDER_BY_GUEST_CPU = 2,
+};
+
+class MediaH264Decoder {
+    uint64_t mHostHandle = 0;
+    uint32_t mVersion = 100;
+    RenderMode mRenderMode = RenderMode::RENDER_BY_GUEST_CPU;
+
+    bool mHasAddressSpaceMemory = false;
+    uint64_t mAddressOffSet = 0;
+    int mSlot = -1;
+
+  public:
+    MediaH264Decoder(RenderMode renderMode);
+    virtual ~MediaH264Decoder() = default;
+
+    enum class PixelFormat : uint8_t {
+        YUV420P = 0,
+        UYVY422 = 1,
+        BGRA8888 = 2,
+    };
+
+    enum class Err : int {
+        NoErr = 0,
+        NoDecodedFrame = -1,
+        InitContextFailed = -2,
+        DecoderRestarted = -3,
+        NALUIgnored = -4,
+    };
+
+    bool getAddressSpaceMemory();
+    void initH264Context(unsigned int width, unsigned int height,
+                         unsigned int outWidth, unsigned int outHeight,
+                         PixelFormat pixFmt);
+    void resetH264Context(unsigned int width, unsigned int height,
+                          unsigned int outWidth, unsigned int outHeight,
+                          PixelFormat pixFmt);
+    void destroyH264Context();
+    h264_result_t decodeFrame(uint8_t *img, size_t szBytes, uint64_t pts);
+    void flush();
+    // ask host to copy image data back to guest, with image metadata
+    // to guest as well
+    h264_image_t getImage();
+    // ask host to render to hostColorBufferId, return only image metadata back
+    // to guest
+    h264_image_t renderOnHostAndReturnImageMetadata(int hostColorBufferId);
+};
+#endif
diff --git a/system/codecs/c2/decoders/base/Android.bp b/system/codecs/c2/decoders/base/Android.bp
new file mode 100644
index 0000000..58e26a3
--- /dev/null
+++ b/system/codecs/c2/decoders/base/Android.bp
@@ -0,0 +1,86 @@
+// DO NOT DEPEND ON THIS DIRECTLY
+// use libcodec2_soft-defaults instead
+package {
+    // See: http://go/android-license-faq
+    // A large-scale-change added 'default_applicable_licenses' to import
+    // all of the 'license_kinds' from "device_generic_goldfish-opengl_license"
+    // to get the below license kinds:
+    //   SPDX-license-identifier-Apache-2.0
+    default_applicable_licenses: ["device_generic_goldfish-opengl_license"],
+}
+
+cc_library_shared {
+    name: "libcodec2_goldfish_common",
+    defaults: ["libcodec2-impl-defaults"],
+    vendor: true,
+
+    srcs: [
+        "SimpleC2Component.cpp",
+        "SimpleC2Interface.cpp",
+        "goldfish_media_utils.cpp",
+        "color_buffer_utils.cpp",
+    ],
+
+    export_include_dirs: [
+        "include",
+    ],
+
+    export_shared_lib_headers: [
+        "libsfplugin_ccodec_utils",
+        "libgoldfish_codec2_store", // for goldfish store
+    ],
+
+    shared_libs: [
+        "libcutils", // for properties
+        "liblog",    // for ALOG
+        "libdrm",    // for ALOG
+        "libbase",   // for properties, parseint
+        "libsfplugin_ccodec_utils", // for ImageCopy
+        "libstagefright_foundation", // for Mutexed
+        "libgoldfish_codec2_store", // for goldfish store
+    ],
+
+    static_libs: [
+        "libGoldfishAddressSpace",
+    ],
+
+    header_libs: [
+        "libgralloc_cb.ranchu",
+    ],
+
+    sanitize: {
+        misc_undefined: [
+            "unsigned-integer-overflow",
+            "signed-integer-overflow",
+        ],
+        cfi: true,
+    },
+
+
+    ldflags: ["-Wl,-Bsymbolic"],
+}
+
+// public dependency for software codec implementation
+// to be used by code under media/codecs/* only as its stability is not guaranteed
+cc_defaults {
+    name: "libcodec2_goldfish-defaults",
+    defaults: ["libcodec2-impl-defaults"],
+    export_shared_lib_headers: [
+        "libsfplugin_ccodec_utils",
+    ],
+
+    shared_libs: [
+        "libcodec2_goldfish_common",
+        "libcutils", // for properties
+        "liblog", // for ALOG
+        "libsfplugin_ccodec_utils", // for ImageCopy
+        "libstagefright_foundation", // for ColorUtils and MIME
+    ],
+
+    cflags: [
+        "-Wall",
+        "-Werror",
+    ],
+
+    ldflags: ["-Wl,-Bsymbolic"],
+}
diff --git a/system/codecs/c2/decoders/base/SimpleC2Component.cpp b/system/codecs/c2/decoders/base/SimpleC2Component.cpp
new file mode 100644
index 0000000..1e1bbc7
--- /dev/null
+++ b/system/codecs/c2/decoders/base/SimpleC2Component.cpp
@@ -0,0 +1,624 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "SimpleC2Component"
+#include <log/log.h>
+
+#include <cutils/properties.h>
+#include <media/stagefright/foundation/AMessage.h>
+
+#include <inttypes.h>
+
+#include <C2Config.h>
+#include <C2Debug.h>
+#include <C2PlatformSupport.h>
+#include <SimpleC2Component.h>
+
+#define DEBUG 0
+#if DEBUG
+#define DDD(...) ALOGD(__VA_ARGS__)
+#else
+#define DDD(...) ((void)0)
+#endif
+
+namespace android {
+
+std::unique_ptr<C2Work> SimpleC2Component::WorkQueue::pop_front() {
+    std::unique_ptr<C2Work> work = std::move(mQueue.front().work);
+    mQueue.pop_front();
+    return work;
+}
+
+void SimpleC2Component::WorkQueue::push_back(std::unique_ptr<C2Work> work) {
+    mQueue.push_back({std::move(work), NO_DRAIN});
+}
+
+bool SimpleC2Component::WorkQueue::empty() const { return mQueue.empty(); }
+
+void SimpleC2Component::WorkQueue::clear() { mQueue.clear(); }
+
+uint32_t SimpleC2Component::WorkQueue::drainMode() const {
+    return mQueue.front().drainMode;
+}
+
+void SimpleC2Component::WorkQueue::markDrain(uint32_t drainMode) {
+    mQueue.push_back({nullptr, drainMode});
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+SimpleC2Component::WorkHandler::WorkHandler() : mRunning(false) {}
+
+void SimpleC2Component::WorkHandler::setComponent(
+    const std::shared_ptr<SimpleC2Component> &thiz) {
+    mThiz = thiz;
+}
+
+static void Reply(const sp<AMessage> &msg, int32_t *err = nullptr) {
+    sp<AReplyToken> replyId;
+    CHECK(msg->senderAwaitsResponse(&replyId));
+    sp<AMessage> reply = new AMessage;
+    if (err) {
+        reply->setInt32("err", *err);
+    }
+    reply->postReply(replyId);
+}
+
+void SimpleC2Component::WorkHandler::onMessageReceived(
+    const sp<AMessage> &msg) {
+    std::shared_ptr<SimpleC2Component> thiz = mThiz.lock();
+    if (!thiz) {
+        ALOGD("component not yet set; msg = %s", msg->debugString().c_str());
+        sp<AReplyToken> replyId;
+        if (msg->senderAwaitsResponse(&replyId)) {
+            sp<AMessage> reply = new AMessage;
+            reply->setInt32("err", C2_CORRUPTED);
+            reply->postReply(replyId);
+        }
+        return;
+    }
+
+    switch (msg->what()) {
+    case kWhatProcess: {
+        if (mRunning) {
+            if (thiz->processQueue()) {
+                (new AMessage(kWhatProcess, this))->post();
+            }
+        } else {
+            DDD("Ignore process message as we're not running");
+        }
+        break;
+    }
+    case kWhatInit: {
+        int32_t err = thiz->onInit();
+        Reply(msg, &err);
+        [[fallthrough]];
+    }
+    case kWhatStart: {
+        mRunning = true;
+        break;
+    }
+    case kWhatStop: {
+        int32_t err = thiz->onStop();
+        Reply(msg, &err);
+        break;
+    }
+    case kWhatReset: {
+        thiz->onReset();
+        mRunning = false;
+        Reply(msg);
+        break;
+    }
+    case kWhatRelease: {
+        thiz->onRelease();
+        mRunning = false;
+        Reply(msg);
+        break;
+    }
+    default: {
+        ALOGD("Unrecognized msg: %d", msg->what());
+        break;
+    }
+    }
+}
+
+class SimpleC2Component::BlockingBlockPool : public C2BlockPool {
+  public:
+    BlockingBlockPool(const std::shared_ptr<C2BlockPool> &base) : mBase{base} {}
+
+    virtual local_id_t getLocalId() const override {
+        return mBase->getLocalId();
+    }
+
+    virtual C2Allocator::id_t getAllocatorId() const override {
+        return mBase->getAllocatorId();
+    }
+
+    virtual c2_status_t
+    fetchLinearBlock(uint32_t capacity, C2MemoryUsage usage,
+                     std::shared_ptr<C2LinearBlock> *block) {
+        c2_status_t status;
+        do {
+            status = mBase->fetchLinearBlock(capacity, usage, block);
+        } while (status == C2_BLOCKING);
+        return status;
+    }
+
+    virtual c2_status_t
+    fetchCircularBlock(uint32_t capacity, C2MemoryUsage usage,
+                       std::shared_ptr<C2CircularBlock> *block) {
+        c2_status_t status;
+        do {
+            status = mBase->fetchCircularBlock(capacity, usage, block);
+        } while (status == C2_BLOCKING);
+        return status;
+    }
+
+    virtual c2_status_t
+    fetchGraphicBlock(uint32_t width, uint32_t height, uint32_t format,
+                      C2MemoryUsage usage,
+                      std::shared_ptr<C2GraphicBlock> *block) {
+        c2_status_t status;
+        do {
+            status =
+                mBase->fetchGraphicBlock(width, height, format, usage, block);
+        } while (status == C2_BLOCKING);
+        return status;
+    }
+
+  private:
+    std::shared_ptr<C2BlockPool> mBase;
+};
+
+////////////////////////////////////////////////////////////////////////////////
+
+namespace {
+
+struct DummyReadView : public C2ReadView {
+    DummyReadView() : C2ReadView(C2_NO_INIT) {}
+};
+
+} // namespace
+
+SimpleC2Component::SimpleC2Component(
+    const std::shared_ptr<C2ComponentInterface> &intf)
+    : mDummyReadView(DummyReadView()), mIntf(intf), mLooper(new ALooper),
+      mHandler(new WorkHandler) {
+    mLooper->setName(intf->getName().c_str());
+    (void)mLooper->registerHandler(mHandler);
+    mLooper->start(false, false, ANDROID_PRIORITY_VIDEO);
+}
+
+SimpleC2Component::~SimpleC2Component() {
+    mLooper->unregisterHandler(mHandler->id());
+    (void)mLooper->stop();
+}
+
+c2_status_t SimpleC2Component::setListener_vb(
+    const std::shared_ptr<C2Component::Listener> &listener,
+    c2_blocking_t mayBlock) {
+    mHandler->setComponent(shared_from_this());
+
+    Mutexed<ExecState>::Locked state(mExecState);
+    if (state->mState == RUNNING) {
+        if (listener) {
+            return C2_BAD_STATE;
+        } else if (!mayBlock) {
+            return C2_BLOCKING;
+        }
+    }
+    state->mListener = listener;
+    // TODO: wait for listener change to have taken place before returning
+    // (e.g. if there is an ongoing listener callback)
+    return C2_OK;
+}
+
+c2_status_t
+SimpleC2Component::queue_nb(std::list<std::unique_ptr<C2Work>> *const items) {
+    {
+        Mutexed<ExecState>::Locked state(mExecState);
+        if (state->mState != RUNNING) {
+            return C2_BAD_STATE;
+        }
+    }
+    bool queueWasEmpty = false;
+    {
+        Mutexed<WorkQueue>::Locked queue(mWorkQueue);
+        queueWasEmpty = queue->empty();
+        while (!items->empty()) {
+            queue->push_back(std::move(items->front()));
+            items->pop_front();
+        }
+    }
+    if (queueWasEmpty) {
+        (new AMessage(WorkHandler::kWhatProcess, mHandler))->post();
+    }
+    return C2_OK;
+}
+
+c2_status_t
+SimpleC2Component::announce_nb(const std::vector<C2WorkOutline> &items) {
+    (void)items;
+    return C2_OMITTED;
+}
+
+c2_status_t SimpleC2Component::flush_sm(
+    flush_mode_t flushMode,
+    std::list<std::unique_ptr<C2Work>> *const flushedWork) {
+    (void)flushMode;
+    {
+        Mutexed<ExecState>::Locked state(mExecState);
+        if (state->mState != RUNNING) {
+            return C2_BAD_STATE;
+        }
+    }
+    {
+        Mutexed<WorkQueue>::Locked queue(mWorkQueue);
+        queue->incGeneration();
+        // TODO: queue->splicedBy(flushedWork, flushedWork->end());
+        while (!queue->empty()) {
+            std::unique_ptr<C2Work> work = queue->pop_front();
+            if (work) {
+                flushedWork->push_back(std::move(work));
+            }
+        }
+        while (!queue->pending().empty()) {
+            flushedWork->push_back(std::move(queue->pending().begin()->second));
+            queue->pending().erase(queue->pending().begin());
+        }
+    }
+
+    return C2_OK;
+}
+
+c2_status_t SimpleC2Component::drain_nb(drain_mode_t drainMode) {
+    if (drainMode == DRAIN_CHAIN) {
+        return C2_OMITTED;
+    }
+    {
+        Mutexed<ExecState>::Locked state(mExecState);
+        if (state->mState != RUNNING) {
+            return C2_BAD_STATE;
+        }
+    }
+    bool queueWasEmpty = false;
+    {
+        Mutexed<WorkQueue>::Locked queue(mWorkQueue);
+        queueWasEmpty = queue->empty();
+        queue->markDrain(drainMode);
+    }
+    if (queueWasEmpty) {
+        (new AMessage(WorkHandler::kWhatProcess, mHandler))->post();
+    }
+
+    return C2_OK;
+}
+
+c2_status_t SimpleC2Component::start() {
+    Mutexed<ExecState>::Locked state(mExecState);
+    if (state->mState == RUNNING) {
+        return C2_BAD_STATE;
+    }
+    bool needsInit = (state->mState == UNINITIALIZED);
+    state.unlock();
+    if (needsInit) {
+        sp<AMessage> reply;
+        (new AMessage(WorkHandler::kWhatInit, mHandler))
+            ->postAndAwaitResponse(&reply);
+        int32_t err;
+        CHECK(reply->findInt32("err", &err));
+        if (err != C2_OK) {
+            return (c2_status_t)err;
+        }
+    } else {
+        (new AMessage(WorkHandler::kWhatStart, mHandler))->post();
+    }
+    state.lock();
+    state->mState = RUNNING;
+    return C2_OK;
+}
+
+c2_status_t SimpleC2Component::stop() {
+    DDD("stop");
+    {
+        Mutexed<ExecState>::Locked state(mExecState);
+        if (state->mState != RUNNING) {
+            return C2_BAD_STATE;
+        }
+        state->mState = STOPPED;
+    }
+    {
+        Mutexed<WorkQueue>::Locked queue(mWorkQueue);
+        queue->clear();
+        queue->pending().clear();
+    }
+    sp<AMessage> reply;
+    (new AMessage(WorkHandler::kWhatStop, mHandler))
+        ->postAndAwaitResponse(&reply);
+    int32_t err;
+    CHECK(reply->findInt32("err", &err));
+    if (err != C2_OK) {
+        return (c2_status_t)err;
+    }
+    return C2_OK;
+}
+
+c2_status_t SimpleC2Component::reset() {
+    DDD("reset");
+    {
+        Mutexed<ExecState>::Locked state(mExecState);
+        state->mState = UNINITIALIZED;
+    }
+    {
+        Mutexed<WorkQueue>::Locked queue(mWorkQueue);
+        queue->clear();
+        queue->pending().clear();
+    }
+    sp<AMessage> reply;
+    (new AMessage(WorkHandler::kWhatReset, mHandler))
+        ->postAndAwaitResponse(&reply);
+    return C2_OK;
+}
+
+c2_status_t SimpleC2Component::release() {
+    DDD("release");
+    sp<AMessage> reply;
+    (new AMessage(WorkHandler::kWhatRelease, mHandler))
+        ->postAndAwaitResponse(&reply);
+    return C2_OK;
+}
+
+std::shared_ptr<C2ComponentInterface> SimpleC2Component::intf() {
+    return mIntf;
+}
+
+namespace {
+
+std::list<std::unique_ptr<C2Work>> vec(std::unique_ptr<C2Work> &work) {
+    std::list<std::unique_ptr<C2Work>> ret;
+    ret.push_back(std::move(work));
+    return ret;
+}
+
+} // namespace
+
+void SimpleC2Component::finish(
+    uint64_t frameIndex,
+    std::function<void(const std::unique_ptr<C2Work> &)> fillWork) {
+    std::unique_ptr<C2Work> work;
+    {
+        Mutexed<WorkQueue>::Locked queue(mWorkQueue);
+        if (queue->pending().count(frameIndex) == 0) {
+            ALOGW("unknown frame index: %" PRIu64, frameIndex);
+            return;
+        }
+        work = std::move(queue->pending().at(frameIndex));
+        queue->pending().erase(frameIndex);
+    }
+    if (work) {
+        fillWork(work);
+        std::shared_ptr<C2Component::Listener> listener =
+            mExecState.lock()->mListener;
+        listener->onWorkDone_nb(shared_from_this(), vec(work));
+        DDD("returning pending work");
+    }
+}
+
+void SimpleC2Component::cloneAndSend(
+    uint64_t frameIndex, const std::unique_ptr<C2Work> &currentWork,
+    std::function<void(const std::unique_ptr<C2Work> &)> fillWork) {
+    std::unique_ptr<C2Work> work(new C2Work);
+    if (currentWork->input.ordinal.frameIndex == frameIndex) {
+        work->input.flags = currentWork->input.flags;
+        work->input.ordinal = currentWork->input.ordinal;
+    } else {
+        Mutexed<WorkQueue>::Locked queue(mWorkQueue);
+        if (queue->pending().count(frameIndex) == 0) {
+            ALOGW("unknown frame index: %" PRIu64, frameIndex);
+            return;
+        }
+        work->input.flags = queue->pending().at(frameIndex)->input.flags;
+        work->input.ordinal = queue->pending().at(frameIndex)->input.ordinal;
+    }
+    work->worklets.emplace_back(new C2Worklet);
+    if (work) {
+        fillWork(work);
+        std::shared_ptr<C2Component::Listener> listener =
+            mExecState.lock()->mListener;
+        listener->onWorkDone_nb(shared_from_this(), vec(work));
+        DDD("cloned and sending work");
+    }
+}
+
+bool SimpleC2Component::processQueue() {
+    std::unique_ptr<C2Work> work;
+    uint64_t generation;
+    int32_t drainMode;
+    bool isFlushPending = false;
+    bool hasQueuedWork = false;
+    {
+        Mutexed<WorkQueue>::Locked queue(mWorkQueue);
+        if (queue->empty()) {
+            return false;
+        }
+
+        generation = queue->generation();
+        drainMode = queue->drainMode();
+        isFlushPending = queue->popPendingFlush();
+        work = queue->pop_front();
+        hasQueuedWork = !queue->empty();
+    }
+    if (isFlushPending) {
+        DDD("processing pending flush");
+        c2_status_t err = onFlush_sm();
+        if (err != C2_OK) {
+            ALOGD("flush err: %d", err);
+            // TODO: error
+        }
+    }
+
+    if (!mOutputBlockPool) {
+        c2_status_t err = [this] {
+            // TODO: don't use query_vb
+            C2StreamBufferTypeSetting::output outputFormat(0u);
+            std::vector<std::unique_ptr<C2Param>> params;
+            c2_status_t err = intf()->query_vb(
+                {&outputFormat}, {C2PortBlockPoolsTuning::output::PARAM_TYPE},
+                C2_DONT_BLOCK, &params);
+            if (err != C2_OK && err != C2_BAD_INDEX) {
+                ALOGD("query err = %d", err);
+                return err;
+            }
+            C2BlockPool::local_id_t poolId =
+                outputFormat.value == C2BufferData::GRAPHIC
+                    ? C2BlockPool::BASIC_GRAPHIC
+                    : C2BlockPool::BASIC_LINEAR;
+            if (params.size()) {
+                C2PortBlockPoolsTuning::output *outputPools =
+                    C2PortBlockPoolsTuning::output::From(params[0].get());
+                if (outputPools && outputPools->flexCount() >= 1) {
+                    poolId = outputPools->m.values[0];
+                }
+            }
+
+            std::shared_ptr<C2BlockPool> blockPool;
+            err = GetCodec2BlockPool(poolId, shared_from_this(), &blockPool);
+            ALOGD("Using output block pool with poolID %llu => got %llu - %d",
+                  (unsigned long long)poolId,
+                  (unsigned long long)(blockPool ? blockPool->getLocalId()
+                                                 : 111000111),
+                  err);
+            if (err == C2_OK) {
+                mOutputBlockPool =
+                    std::make_shared<BlockingBlockPool>(blockPool);
+            }
+            return err;
+        }();
+        if (err != C2_OK) {
+            Mutexed<ExecState>::Locked state(mExecState);
+            std::shared_ptr<C2Component::Listener> listener = state->mListener;
+            state.unlock();
+            listener->onError_nb(shared_from_this(), err);
+            return hasQueuedWork;
+        }
+    }
+
+    if (!work) {
+        c2_status_t err = drain(drainMode, mOutputBlockPool);
+        if (err != C2_OK) {
+            Mutexed<ExecState>::Locked state(mExecState);
+            std::shared_ptr<C2Component::Listener> listener = state->mListener;
+            state.unlock();
+            listener->onError_nb(shared_from_this(), err);
+        }
+        return hasQueuedWork;
+    }
+
+    {
+        std::vector<C2Param *> updates;
+        for (const std::unique_ptr<C2Param> &param : work->input.configUpdate) {
+            if (param) {
+                updates.emplace_back(param.get());
+            }
+        }
+        if (!updates.empty()) {
+            std::vector<std::unique_ptr<C2SettingResult>> failures;
+            c2_status_t err =
+                intf()->config_vb(updates, C2_MAY_BLOCK, &failures);
+            ALOGD("applied %zu configUpdates => %s (%d)", updates.size(),
+                  asString(err), err);
+        }
+    }
+
+    DDD("start processing frame #%" PRIu64,
+        work->input.ordinal.frameIndex.peeku());
+    // If input buffer list is not empty, it means we have some input to process
+    // on. However, input could be a null buffer. In such case, clear the buffer
+    // list before making call to process().
+    if (!work->input.buffers.empty() && !work->input.buffers[0]) {
+        ALOGD("Encountered null input buffer. Clearing the input buffer");
+        work->input.buffers.clear();
+    }
+    process(work, mOutputBlockPool);
+    DDD("processed frame #%" PRIu64, work->input.ordinal.frameIndex.peeku());
+    Mutexed<WorkQueue>::Locked queue(mWorkQueue);
+    if (queue->generation() != generation) {
+        ALOGD("work form old generation: was %" PRIu64 " now %" PRIu64,
+              queue->generation(), generation);
+        work->result = C2_NOT_FOUND;
+        queue.unlock();
+
+        Mutexed<ExecState>::Locked state(mExecState);
+        std::shared_ptr<C2Component::Listener> listener = state->mListener;
+        state.unlock();
+        listener->onWorkDone_nb(shared_from_this(), vec(work));
+        return hasQueuedWork;
+    }
+    if (work->workletsProcessed != 0u) {
+        queue.unlock();
+        Mutexed<ExecState>::Locked state(mExecState);
+        DDD("returning this work");
+        std::shared_ptr<C2Component::Listener> listener = state->mListener;
+        state.unlock();
+        listener->onWorkDone_nb(shared_from_this(), vec(work));
+    } else {
+        work->input.buffers.clear();
+        std::unique_ptr<C2Work> unexpected;
+
+        uint64_t frameIndex = work->input.ordinal.frameIndex.peeku();
+        DDD("queue pending work %" PRIu64, frameIndex);
+        if (queue->pending().count(frameIndex) != 0) {
+            unexpected = std::move(queue->pending().at(frameIndex));
+            queue->pending().erase(frameIndex);
+        }
+        (void)queue->pending().insert({frameIndex, std::move(work)});
+
+        queue.unlock();
+        if (unexpected) {
+            ALOGD("unexpected pending work");
+            unexpected->result = C2_CORRUPTED;
+            Mutexed<ExecState>::Locked state(mExecState);
+            std::shared_ptr<C2Component::Listener> listener = state->mListener;
+            state.unlock();
+            listener->onWorkDone_nb(shared_from_this(), vec(unexpected));
+        }
+    }
+    return hasQueuedWork;
+}
+
+std::shared_ptr<C2Buffer> SimpleC2Component::createLinearBuffer(
+    const std::shared_ptr<C2LinearBlock> &block) {
+    return createLinearBuffer(block, block->offset(), block->size());
+}
+
+std::shared_ptr<C2Buffer> SimpleC2Component::createLinearBuffer(
+    const std::shared_ptr<C2LinearBlock> &block, size_t offset, size_t size) {
+    return C2Buffer::CreateLinearBuffer(
+        block->share(offset, size, ::C2Fence()));
+}
+
+std::shared_ptr<C2Buffer> SimpleC2Component::createGraphicBuffer(
+    const std::shared_ptr<C2GraphicBlock> &block) {
+    return createGraphicBuffer(block, C2Rect(block->width(), block->height()));
+}
+
+std::shared_ptr<C2Buffer> SimpleC2Component::createGraphicBuffer(
+    const std::shared_ptr<C2GraphicBlock> &block, const C2Rect &crop) {
+    return C2Buffer::CreateGraphicBuffer(block->share(crop, ::C2Fence()));
+}
+
+} // namespace android
diff --git a/system/codecs/c2/decoders/base/SimpleC2Interface.cpp b/system/codecs/c2/decoders/base/SimpleC2Interface.cpp
new file mode 100644
index 0000000..9f76520
--- /dev/null
+++ b/system/codecs/c2/decoders/base/SimpleC2Interface.cpp
@@ -0,0 +1,343 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "SimpleC2Interface"
+#include <utils/Log.h>
+
+// use MediaDefs here vs. MediaCodecConstants as this is not MediaCodec
+// specific/dependent
+#include <media/stagefright/foundation/MediaDefs.h>
+
+#include <C2PlatformSupport.h>
+#include <SimpleC2Interface.h>
+
+namespace android {
+
+/* SimpleInterface */
+
+SimpleInterface<void>::BaseParams::BaseParams(
+    const std::shared_ptr<C2ReflectorHelper> &reflector, C2String name,
+    C2Component::kind_t kind, C2Component::domain_t domain, C2String mediaType,
+    std::vector<C2String> aliases)
+    : C2InterfaceHelper(reflector) {
+    setDerivedInstance(this);
+
+    /*
+    addParameter(
+        DefineParam(mApiFeatures, C2_PARAMKEY_API_FEATURES)
+            .withConstValue(new C2ApiFeaturesSetting(C2Config::api_feature_t(
+                API_REFLECTION | API_VALUES | API_CURRENT_VALUES |
+                API_DEPENDENCY | API_SAME_INPUT_BUFFER)))
+            .build());
+*/
+
+    addParameter(DefineParam(mName, C2_PARAMKEY_COMPONENT_NAME)
+                     .withConstValue(AllocSharedString<C2ComponentNameSetting>(
+                         name.c_str()))
+                     .build());
+
+    if (aliases.size()) {
+        C2String joined;
+        for (const C2String &alias : aliases) {
+            if (joined.length()) {
+                joined += ",";
+            }
+            joined += alias;
+        }
+        addParameter(
+            DefineParam(mAliases, C2_PARAMKEY_COMPONENT_ALIASES)
+                .withConstValue(AllocSharedString<C2ComponentAliasesSetting>(
+                    joined.c_str()))
+                .build());
+    }
+
+    addParameter(DefineParam(mKind, C2_PARAMKEY_COMPONENT_KIND)
+                     .withConstValue(new C2ComponentKindSetting(kind))
+                     .build());
+
+    addParameter(DefineParam(mDomain, C2_PARAMKEY_COMPONENT_DOMAIN)
+                     .withConstValue(new C2ComponentDomainSetting(domain))
+                     .build());
+
+    // simple interfaces have single streams
+    addParameter(DefineParam(mInputStreamCount, C2_PARAMKEY_INPUT_STREAM_COUNT)
+                     .withConstValue(new C2PortStreamCountTuning::input(1))
+                     .build());
+
+    addParameter(
+        DefineParam(mOutputStreamCount, C2_PARAMKEY_OUTPUT_STREAM_COUNT)
+            .withConstValue(new C2PortStreamCountTuning::output(1))
+            .build());
+
+    // set up buffer formats and allocators
+
+    // default to linear buffers and no media type
+    C2BufferData::type_t rawBufferType = C2BufferData::LINEAR;
+    C2String rawMediaType;
+    C2Allocator::id_t rawAllocator = C2AllocatorStore::DEFAULT_LINEAR;
+    C2BlockPool::local_id_t rawPoolId = C2BlockPool::BASIC_LINEAR;
+    C2BufferData::type_t codedBufferType = C2BufferData::LINEAR;
+    int poolMask = GetCodec2PoolMask();
+    C2Allocator::id_t preferredLinearId =
+        GetPreferredLinearAllocatorId(poolMask);
+    C2Allocator::id_t codedAllocator = preferredLinearId;
+    C2BlockPool::local_id_t codedPoolId = C2BlockPool::BASIC_LINEAR;
+
+    switch (domain) {
+    case C2Component::DOMAIN_IMAGE:
+        [[fallthrough]];
+    case C2Component::DOMAIN_VIDEO:
+        // TODO: should we define raw image? The only difference is timestamp
+        // handling
+        rawBufferType = C2BufferData::GRAPHIC;
+        rawMediaType = MEDIA_MIMETYPE_VIDEO_RAW;
+        rawAllocator = C2PlatformAllocatorStore::GRALLOC;
+        rawPoolId = C2BlockPool::BASIC_GRAPHIC;
+        break;
+    case C2Component::DOMAIN_AUDIO:
+        rawBufferType = C2BufferData::LINEAR;
+        rawMediaType = MEDIA_MIMETYPE_AUDIO_RAW;
+        rawAllocator = preferredLinearId;
+        rawPoolId = C2BlockPool::BASIC_LINEAR;
+        break;
+    default:
+        break;
+    }
+    bool isEncoder = kind == C2Component::KIND_ENCODER;
+
+    // handle raw decoders
+    if (mediaType == rawMediaType) {
+        codedBufferType = rawBufferType;
+        codedAllocator = rawAllocator;
+        codedPoolId = rawPoolId;
+    }
+
+    addParameter(DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+                     .withConstValue(new C2StreamBufferTypeSetting::input(
+                         0u, isEncoder ? rawBufferType : codedBufferType))
+                     .build());
+
+    addParameter(
+        DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+            .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
+                isEncoder ? rawMediaType : mediaType))
+            .build());
+
+    addParameter(
+        DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+            .withConstValue(new C2StreamBufferTypeSetting::output(
+                0u, isEncoder ? codedBufferType : rawBufferType))
+            .build());
+
+    addParameter(
+        DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+            .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
+                isEncoder ? mediaType : rawMediaType))
+            .build());
+
+    C2Allocator::id_t inputAllocators[1] = {isEncoder ? rawAllocator
+                                                      : codedAllocator};
+    C2Allocator::id_t outputAllocators[1] = {isEncoder ? codedAllocator
+                                                       : rawAllocator};
+    C2BlockPool::local_id_t outputPoolIds[1] = {isEncoder ? codedPoolId
+                                                          : rawPoolId};
+
+    addParameter(
+        DefineParam(mInputAllocators, C2_PARAMKEY_INPUT_ALLOCATORS)
+            .withDefault(
+                C2PortAllocatorsTuning::input::AllocShared(inputAllocators))
+            .withFields({C2F(mInputAllocators, m.values[0]).any(),
+                         C2F(mInputAllocators, m.values).inRange(0, 1)})
+            .withSetter(
+                Setter<
+                    C2PortAllocatorsTuning::input>::NonStrictValuesWithNoDeps)
+            .build());
+
+    addParameter(
+        DefineParam(mOutputAllocators, C2_PARAMKEY_OUTPUT_ALLOCATORS)
+            .withDefault(
+                C2PortAllocatorsTuning::output::AllocShared(outputAllocators))
+            .withFields({C2F(mOutputAllocators, m.values[0]).any(),
+                         C2F(mOutputAllocators, m.values).inRange(0, 1)})
+            .withSetter(
+                Setter<
+                    C2PortAllocatorsTuning::output>::NonStrictValuesWithNoDeps)
+            .build());
+
+    addParameter(
+        DefineParam(mOutputPoolIds, C2_PARAMKEY_OUTPUT_BLOCK_POOLS)
+            .withDefault(
+                C2PortBlockPoolsTuning::output::AllocShared(outputPoolIds))
+            .withFields({C2F(mOutputPoolIds, m.values[0]).any(),
+                         C2F(mOutputPoolIds, m.values).inRange(0, 1)})
+            .withSetter(
+                Setter<
+                    C2PortBlockPoolsTuning::output>::NonStrictValuesWithNoDeps)
+            .build());
+
+    // add stateless params
+    addParameter(
+        DefineParam(mSubscribedParamIndices,
+                    C2_PARAMKEY_SUBSCRIBED_PARAM_INDICES)
+            .withDefault(C2SubscribedParamIndicesTuning::AllocShared(0u))
+            .withFields({C2F(mSubscribedParamIndices, m.values[0]).any(),
+                         C2F(mSubscribedParamIndices, m.values).any()})
+            .withSetter(
+                Setter<
+                    C2SubscribedParamIndicesTuning>::NonStrictValuesWithNoDeps)
+            .build());
+
+    /* TODO
+
+    addParameter(
+            DefineParam(mCurrentWorkOrdinal, C2_PARAMKEY_CURRENT_WORK)
+            .withDefault(new C2CurrentWorkTuning())
+            .withFields({ C2F(mCurrentWorkOrdinal, m.timeStamp).any(),
+                          C2F(mCurrentWorkOrdinal, m.frameIndex).any(),
+                          C2F(mCurrentWorkOrdinal, m.customOrdinal).any() })
+            .withSetter(Setter<C2CurrentWorkTuning>::NonStrictValuesWithNoDeps)
+            .build());
+
+    addParameter(
+            DefineParam(mLastInputQueuedWorkOrdinal,
+    C2_PARAMKEY_LAST_INPUT_QUEUED) .withDefault(new
+    C2LastWorkQueuedTuning::input()) .withFields({
+    C2F(mLastInputQueuedWorkOrdinal, m.timeStamp).any(),
+                          C2F(mLastInputQueuedWorkOrdinal, m.frameIndex).any(),
+                          C2F(mLastInputQueuedWorkOrdinal,
+    m.customOrdinal).any() })
+            .withSetter(Setter<C2LastWorkQueuedTuning::input>::NonStrictValuesWithNoDeps)
+            .build());
+
+    addParameter(
+            DefineParam(mLastOutputQueuedWorkOrdinal,
+    C2_PARAMKEY_LAST_OUTPUT_QUEUED) .withDefault(new
+    C2LastWorkQueuedTuning::output()) .withFields({
+    C2F(mLastOutputQueuedWorkOrdinal, m.timeStamp).any(),
+                          C2F(mLastOutputQueuedWorkOrdinal, m.frameIndex).any(),
+                          C2F(mLastOutputQueuedWorkOrdinal,
+    m.customOrdinal).any() })
+            .withSetter(Setter<C2LastWorkQueuedTuning::output>::NonStrictValuesWithNoDeps)
+            .build());
+
+    std::shared_ptr<C2OutOfMemoryTuning> mOutOfMemory;
+
+    std::shared_ptr<C2PortConfigCounterTuning::input> mInputConfigCounter;
+    std::shared_ptr<C2PortConfigCounterTuning::output> mOutputConfigCounter;
+    std::shared_ptr<C2ConfigCounterTuning> mDirectConfigCounter;
+
+    */
+}
+
+void SimpleInterface<void>::BaseParams::noInputLatency() {
+    addParameter(
+        DefineParam(mRequestedInputDelay, C2_PARAMKEY_INPUT_DELAY_REQUEST)
+            .withConstValue(new C2PortRequestedDelayTuning::input(0u))
+            .build());
+
+    addParameter(DefineParam(mActualInputDelay, C2_PARAMKEY_INPUT_DELAY)
+                     .withConstValue(new C2PortActualDelayTuning::input(0u))
+                     .build());
+}
+
+void SimpleInterface<void>::BaseParams::noOutputLatency() {
+    addParameter(
+        DefineParam(mRequestedOutputDelay, C2_PARAMKEY_OUTPUT_DELAY_REQUEST)
+            .withConstValue(new C2PortRequestedDelayTuning::output(0u))
+            .build());
+
+    addParameter(DefineParam(mActualOutputDelay, C2_PARAMKEY_OUTPUT_DELAY)
+                     .withConstValue(new C2PortActualDelayTuning::output(0u))
+                     .build());
+}
+
+void SimpleInterface<void>::BaseParams::noPipelineLatency() {
+    addParameter(
+        DefineParam(mRequestedPipelineDelay, C2_PARAMKEY_PIPELINE_DELAY_REQUEST)
+            .withConstValue(new C2RequestedPipelineDelayTuning(0u))
+            .build());
+
+    addParameter(DefineParam(mActualPipelineDelay, C2_PARAMKEY_PIPELINE_DELAY)
+                     .withConstValue(new C2ActualPipelineDelayTuning(0u))
+                     .build());
+}
+
+void SimpleInterface<void>::BaseParams::noPrivateBuffers() {
+    addParameter(DefineParam(mPrivateAllocators, C2_PARAMKEY_PRIVATE_ALLOCATORS)
+                     .withConstValue(C2PrivateAllocatorsTuning::AllocShared(0u))
+                     .build());
+
+    addParameter(
+        DefineParam(mMaxPrivateBufferCount,
+                    C2_PARAMKEY_MAX_PRIVATE_BUFFER_COUNT)
+            .withConstValue(C2MaxPrivateBufferCountTuning::AllocShared(0u))
+            .build());
+
+    addParameter(DefineParam(mPrivatePoolIds, C2_PARAMKEY_PRIVATE_BLOCK_POOLS)
+                     .withConstValue(C2PrivateBlockPoolsTuning::AllocShared(0u))
+                     .build());
+}
+
+void SimpleInterface<void>::BaseParams::noInputReferences() {
+    addParameter(
+        DefineParam(mMaxInputReferenceAge, C2_PARAMKEY_INPUT_MAX_REFERENCE_AGE)
+            .withConstValue(new C2StreamMaxReferenceAgeTuning::input(0u))
+            .build());
+
+    addParameter(
+        DefineParam(mMaxInputReferenceCount,
+                    C2_PARAMKEY_INPUT_MAX_REFERENCE_COUNT)
+            .withConstValue(new C2StreamMaxReferenceCountTuning::input(0u))
+            .build());
+}
+
+void SimpleInterface<void>::BaseParams::noOutputReferences() {
+    addParameter(
+        DefineParam(mMaxOutputReferenceAge,
+                    C2_PARAMKEY_OUTPUT_MAX_REFERENCE_AGE)
+            .withConstValue(new C2StreamMaxReferenceAgeTuning::output(0u))
+            .build());
+
+    addParameter(
+        DefineParam(mMaxOutputReferenceCount,
+                    C2_PARAMKEY_OUTPUT_MAX_REFERENCE_COUNT)
+            .withConstValue(new C2StreamMaxReferenceCountTuning::output(0u))
+            .build());
+}
+
+void SimpleInterface<void>::BaseParams::noTimeStretch() {
+    addParameter(DefineParam(mTimeStretch, C2_PARAMKEY_TIME_STRETCH)
+                     .withConstValue(new C2ComponentTimeStretchTuning(1.f))
+                     .build());
+}
+
+/*
+    Clients need to handle the following base params due to custom dependency.
+
+    std::shared_ptr<C2ApiLevelSetting> mApiLevel;
+    std::shared_ptr<C2ComponentAttributesSetting> mAttrib;
+
+    std::shared_ptr<C2PortSuggestedBufferCountTuning::input>
+   mSuggestedInputBufferCount;
+    std::shared_ptr<C2PortSuggestedBufferCountTuning::output>
+   mSuggestedOutputBufferCount;
+
+    std::shared_ptr<C2TrippedTuning> mTripped;
+
+*/
+
+} // namespace android
diff --git a/system/codecs/c2/decoders/base/color_buffer_utils.cpp b/system/codecs/c2/decoders/base/color_buffer_utils.cpp
new file mode 100644
index 0000000..36aa336
--- /dev/null
+++ b/system/codecs/c2/decoders/base/color_buffer_utils.cpp
@@ -0,0 +1,116 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <inttypes.h>
+#include <android-base/parseint.h>
+#include <android-base/properties.h>
+#include <android-base/strings.h>
+#include <log/log.h>
+#include <gralloc_cb_bp.h>
+#include <drm/virtgpu_drm.h>
+#include <xf86drm.h>
+
+#include "cros_gralloc_handle.h"
+
+static bool isMinigbmFromProperty() {
+  static constexpr const auto kGrallocProp = "ro.hardware.gralloc";
+
+  const auto grallocProp = android::base::GetProperty(kGrallocProp, "");
+  ALOGD("%s:codecs: minigbm query prop value is: %s", __FUNCTION__, grallocProp.c_str());
+
+  if (grallocProp == "minigbm") {
+    ALOGD("%s:codecs: Using minigbm, in minigbm mode.\n", __FUNCTION__);
+    return true;
+  } else {
+    ALOGD("%s:codecs: Is not using minigbm, in goldfish mode.\n", __FUNCTION__);
+    return false;
+  }
+}
+
+class ColorBufferUtilsGlobalState {
+public:
+    ColorBufferUtilsGlobalState() {
+        m_isMinigbm = isMinigbmFromProperty();
+
+        if (m_isMinigbm) {
+            static constexpr int kRendernodeMinor = 128;
+            m_rendernodeFd = drmOpenRender(kRendernodeMinor);
+        }
+    }
+
+    uint32_t getColorBufferHandle(native_handle_t const* handle) {
+        if (m_isMinigbm) {
+            struct drm_virtgpu_resource_info info;
+            if (!getResInfo(handle, &info)) {
+                ALOGE("%s: Error gtting color buffer handle (minigbm case)", __func__);
+                return -1;
+            }
+            return info.res_handle;
+        } else {
+            return cb_handle_t::from(handle)->hostHandle;
+        }
+    }
+
+private:
+
+    bool getResInfo(native_handle_t const* handle,
+                    struct drm_virtgpu_resource_info* info) {
+        memset(info, 0x0, sizeof(*info));
+        if (m_rendernodeFd < 0) {
+            ALOGE("%s: Error, rendernode fd missing\n", __func__);
+            return false;
+        }
+
+        struct drm_gem_close gem_close;
+        memset(&gem_close, 0x0, sizeof(gem_close));
+
+        cros_gralloc_handle const* cros_handle =
+            reinterpret_cast<cros_gralloc_handle const*>(handle);
+
+        uint32_t prime_handle;
+        int ret = drmPrimeFDToHandle(m_rendernodeFd, cros_handle->fds[0], &prime_handle);
+        if (ret) {
+            ALOGE("%s: DRM_IOCTL_PRIME_FD_TO_HANDLE failed: %s (errno %d)\n",
+                  __func__, strerror(errno), errno);
+            return false;
+        }
+
+        info->bo_handle = prime_handle;
+        gem_close.handle = prime_handle;
+
+        ret = drmIoctl(m_rendernodeFd, DRM_IOCTL_VIRTGPU_RESOURCE_INFO, info);
+        if (ret) {
+            ALOGE("%s: DRM_IOCTL_VIRTGPU_RESOURCE_INFO failed: %s (errno %d)\n",
+                  __func__, strerror(errno), errno);
+            drmIoctl(m_rendernodeFd, DRM_IOCTL_GEM_CLOSE, &gem_close);
+            return false;
+        }
+
+        drmIoctl(m_rendernodeFd, DRM_IOCTL_GEM_CLOSE, &gem_close);
+        return true;
+    }
+
+    bool m_isMinigbm;
+    int m_rendernodeFd = -1; // to be closed when this process dies
+};
+
+static ColorBufferUtilsGlobalState* getGlobals() {
+    static ColorBufferUtilsGlobalState* globals = new ColorBufferUtilsGlobalState;
+    return globals;
+}
+
+uint32_t getColorBufferHandle(native_handle_t const* handle) {
+    return getGlobals()->getColorBufferHandle(handle);
+}
diff --git a/system/codecs/c2/decoders/base/cros_gralloc_handle.h b/system/codecs/c2/decoders/base/cros_gralloc_handle.h
new file mode 100644
index 0000000..2b70d4b
--- /dev/null
+++ b/system/codecs/c2/decoders/base/cros_gralloc_handle.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2016 The Chromium OS Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef CROS_GRALLOC_HANDLE_H
+#define CROS_GRALLOC_HANDLE_H
+
+#include <cstdint>
+#include <cutils/native_handle.h>
+
+#define DRV_MAX_PLANES 4
+#define DRV_MAX_FDS (DRV_MAX_PLANES + 1)
+
+struct cros_gralloc_handle : public native_handle_t {
+	/*
+	 * File descriptors must immediately follow the native_handle_t base and used file
+	 * descriptors must be packed at the beginning of this array to work with
+	 * native_handle_clone().
+	 *
+	 * This field contains 'num_planes' plane file descriptors followed by an optional metadata
+	 * reserved region file descriptor if 'reserved_region_size' is greater than zero.
+	 */
+	int32_t fds[DRV_MAX_FDS];
+	uint32_t strides[DRV_MAX_PLANES];
+	uint32_t offsets[DRV_MAX_PLANES];
+	uint32_t sizes[DRV_MAX_PLANES];
+	uint32_t id;
+	uint32_t width;
+	uint32_t height;
+	uint32_t format; /* DRM format */
+	uint32_t tiling;
+	uint64_t format_modifier;
+	uint64_t use_flags; /* Buffer creation flags */
+	uint32_t magic;
+	uint32_t pixel_stride;
+	int32_t droid_format;
+	int32_t usage; /* Android usage. */
+	uint32_t num_planes;
+	uint64_t reserved_region_size;
+	uint64_t total_size; /* Total allocation size */
+	/*
+	 * Name is a null terminated char array located at handle->base.data[handle->name_offset].
+	 */
+	uint32_t name_offset;
+} __attribute__((packed));
+
+typedef const struct cros_gralloc_handle *cros_gralloc_handle_t;
+
+#endif
diff --git a/system/codecs/c2/decoders/base/exports.lds b/system/codecs/c2/decoders/base/exports.lds
new file mode 100644
index 0000000..641bae8
--- /dev/null
+++ b/system/codecs/c2/decoders/base/exports.lds
@@ -0,0 +1,7 @@
+{
+    global:
+        CreateCodec2Factory;
+        DestroyCodec2Factory;
+    local: *;
+};
+
diff --git a/system/codecs/c2/decoders/base/goldfish_media_utils.cpp b/system/codecs/c2/decoders/base/goldfish_media_utils.cpp
new file mode 100644
index 0000000..8013fe0
--- /dev/null
+++ b/system/codecs/c2/decoders/base/goldfish_media_utils.cpp
@@ -0,0 +1,227 @@
+// Copyright 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "goldfish_media_utils.h"
+
+#include "goldfish_address_space.h"
+
+#include <log/log.h>
+
+#define DEBUG 0
+#if DEBUG
+#define DDD(...) ALOGD(__VA_ARGS__)
+#else
+#define DDD(...) ((void)0)
+#endif
+
+#include <memory>
+#include <mutex>
+#include <vector>
+
+std::mutex sSingletonMutex;
+std::unique_ptr<GoldfishMediaTransport> sTransport;
+
+class GoldfishMediaTransportImpl : public GoldfishMediaTransport {
+  public:
+    GoldfishMediaTransportImpl();
+    ~GoldfishMediaTransportImpl();
+
+    virtual void writeParam(__u64 val, unsigned int num,
+                            unsigned int offSetToStartAddr = 0) override;
+    virtual bool sendOperation(MediaCodecType type, MediaOperation op,
+                               unsigned int offSetToStartAddr = 0) override;
+    virtual uint8_t *getBaseAddr() const override;
+    virtual uint8_t *getInputAddr(unsigned int offSet = 0) const override;
+    virtual uint8_t *getOutputAddr() const override;
+    virtual uint8_t *getReturnAddr(unsigned int offSet = 0) const override;
+    virtual __u64 offsetOf(uint64_t addr) const override;
+
+  public:
+    // each lot has 2 M
+    virtual int getMemorySlot() override {
+        std::lock_guard<std::mutex> g{mMemoryMutex};
+        // when there are just 1 decoder, it can pretty
+        // much use all the memory starting from 0;
+        // when there are two, each can use at least half
+        // the total memory, etc.
+        constexpr size_t search_order[] = {
+            0,                              // use 32M
+            16,                             // use 16M
+            8,  24,                         // use 8M
+            4,  12, 20, 28,                 // use 4M
+            2,  6,  10, 12, 18, 22, 26, 30, // use 2M
+            1,  3,  5,  7,  9,  11, 13, 15,
+            17, 19, 21, 23, 25, 27, 29, 31 // use 1M
+        };
+        for (size_t i = 0; i < sizeof(search_order) / sizeof(search_order[0]);
+             ++i) {
+            int slot = search_order[i];
+            if (mMemoryLotsAvailable[slot]) {
+                mMemoryLotsAvailable[slot] = false;
+                return slot;
+            }
+        }
+        return -1;
+    }
+    virtual void returnMemorySlot(int lot) override {
+        if (lot < 0 || lot >= mMemoryLotsAvailable.size()) {
+            return;
+        }
+        std::lock_guard<std::mutex> g{mMemoryMutex};
+        if (mMemoryLotsAvailable[lot] == false) {
+            mMemoryLotsAvailable[lot] = true;
+        } else {
+            ALOGE("Error, cannot twice");
+        }
+    }
+
+  private:
+    std::mutex mMemoryMutex;
+    std::vector<bool> mMemoryLotsAvailable = std::vector<bool>(32, true);
+
+    address_space_handle_t mHandle;
+    uint64_t mOffset;
+    uint64_t mPhysAddr;
+    uint64_t mSize;
+    void *mStartPtr = nullptr;
+
+    // MediaCodecType will be or'd together with the metadata, so the highest
+    // 8-bits will have the type.
+    static __u64 makeMetadata(MediaCodecType type, MediaOperation op,
+                              uint64_t offset);
+
+    // Chunk size for parameters/return data
+    static constexpr size_t kParamSizeBytes = 4096; // 4K
+    // Chunk size for input
+    static constexpr size_t kInputSizeBytes = 4096 * 4096; // 16M
+    // Chunk size for output
+    static constexpr size_t kOutputSizeBytes = 4096 * 4096; // 16M
+    // Maximum number of parameters that can be passed
+    static constexpr size_t kMaxParams = 32;
+    // Offset from the memory region for return data (8 is size of
+    // a parameter in bytes)
+    static constexpr size_t kReturnOffset = 8 * kMaxParams;
+};
+
+GoldfishMediaTransportImpl::~GoldfishMediaTransportImpl() {
+    if (mHandle >= 0) {
+        goldfish_address_space_close(mHandle);
+        mHandle = -1;
+    }
+}
+
+GoldfishMediaTransportImpl::GoldfishMediaTransportImpl() {
+    // Allocate host memory; the contiguous memory region will be laid out as
+    // follows:
+    // ========================================================
+    // | kParamSizeBytes | kInputSizeBytes | kOutputSizeBytes |
+    // ========================================================
+    mHandle = goldfish_address_space_open();
+    if (mHandle < 0) {
+        ALOGE("Failed to ping host to allocate memory");
+        abort();
+    }
+    mSize = kParamSizeBytes + kInputSizeBytes + kOutputSizeBytes;
+    bool success =
+        goldfish_address_space_allocate(mHandle, mSize, &mPhysAddr, &mOffset);
+    if (success) {
+        ALOGI("successfully allocated %d bytes in goldfish_address_block",
+              (int)mSize);
+        mStartPtr = goldfish_address_space_map(mHandle, mOffset, mSize);
+        ALOGI("guest address is %p", mStartPtr);
+
+        struct address_space_ping pingInfo;
+        pingInfo.metadata = GoldfishAddressSpaceSubdeviceType::Media;
+        pingInfo.offset = mOffset;
+        if (goldfish_address_space_ping(mHandle, &pingInfo) == false) {
+            ALOGE("Failed to ping host to allocate memory");
+            abort();
+            return;
+        } else {
+            ALOGI("successfully pinged host to allocate memory");
+        }
+    } else {
+        ALOGE("failed to allocate %d bytes in goldfish_address_block",
+              (int)mSize);
+        abort();
+    }
+}
+
+// static
+GoldfishMediaTransport *GoldfishMediaTransport::getInstance() {
+    std::lock_guard<std::mutex> g{sSingletonMutex};
+    if (sTransport == nullptr) {
+        sTransport.reset(new GoldfishMediaTransportImpl());
+    }
+    return sTransport.get();
+}
+
+// static
+__u64 GoldfishMediaTransportImpl::makeMetadata(MediaCodecType type,
+                                               MediaOperation op,
+                                               uint64_t offset) {
+    // Shift |type| into the highest 8-bits, leaving the lower bits for other
+    // metadata.
+    offset = offset >> 20;
+    if (offset < 0 || offset >= 32) {
+        ALOGE("offset %d is wrong", (int)offset);
+        abort();
+    }
+    return ((__u64)type << (64 - 8)) | (offset << 8) | static_cast<uint8_t>(op);
+}
+
+uint8_t *GoldfishMediaTransportImpl::getInputAddr(unsigned int offSet) const {
+    return (uint8_t *)mStartPtr + kParamSizeBytes + offSet;
+}
+
+uint8_t *GoldfishMediaTransportImpl::getOutputAddr() const {
+    return getInputAddr() + kInputSizeBytes;
+}
+
+uint8_t *GoldfishMediaTransportImpl::getBaseAddr() const {
+    return (uint8_t *)mStartPtr;
+}
+
+uint8_t *GoldfishMediaTransportImpl::getReturnAddr(unsigned int offSet) const {
+    return (uint8_t *)mStartPtr + kReturnOffset + offSet;
+}
+
+__u64 GoldfishMediaTransportImpl::offsetOf(uint64_t addr) const {
+    return addr - (uint64_t)mStartPtr;
+}
+
+void GoldfishMediaTransportImpl::writeParam(__u64 val, unsigned int num,
+                                            unsigned int offSetToStartAddr) {
+    uint8_t *p = (uint8_t *)mStartPtr + (offSetToStartAddr);
+    uint64_t *pint = (uint64_t *)(p + 8 * num);
+    *pint = val;
+}
+
+bool GoldfishMediaTransportImpl::sendOperation(MediaCodecType type,
+                                               MediaOperation op,
+                                               unsigned int offSetToStartAddr) {
+    struct address_space_ping pingInfo;
+    pingInfo.metadata = makeMetadata(type, op, offSetToStartAddr);
+    pingInfo.offset = mOffset; // + (offSetToStartAddr);
+    if (goldfish_address_space_ping(mHandle, &pingInfo) == false) {
+        ALOGE("failed to ping host");
+        abort();
+        return false;
+    } else {
+        DDD("successfully pinged host for operation type=%d, op=%d", (int)type,
+            (int)op);
+    }
+
+    return true;
+}
diff --git a/system/codecs/c2/decoders/base/include/SimpleC2Component.h b/system/codecs/c2/decoders/base/include/SimpleC2Component.h
new file mode 100644
index 0000000..2c960a7
--- /dev/null
+++ b/system/codecs/c2/decoders/base/include/SimpleC2Component.h
@@ -0,0 +1,254 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SIMPLE_C2_COMPONENT_H_
+#define SIMPLE_C2_COMPONENT_H_
+
+#include <list>
+#include <unordered_map>
+
+#include <C2Component.h>
+
+#include <media/stagefright/foundation/AHandler.h>
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/foundation/Mutexed.h>
+
+namespace android {
+
+class SimpleC2Component
+    : public C2Component,
+      public std::enable_shared_from_this<SimpleC2Component> {
+  public:
+    explicit SimpleC2Component(
+        const std::shared_ptr<C2ComponentInterface> &intf);
+    virtual ~SimpleC2Component();
+
+    // C2Component
+    // From C2Component
+    virtual c2_status_t
+    setListener_vb(const std::shared_ptr<Listener> &listener,
+                   c2_blocking_t mayBlock) override;
+    virtual c2_status_t
+    queue_nb(std::list<std::unique_ptr<C2Work>> *const items) override;
+    virtual c2_status_t
+    announce_nb(const std::vector<C2WorkOutline> &items) override;
+    virtual c2_status_t
+    flush_sm(flush_mode_t mode,
+             std::list<std::unique_ptr<C2Work>> *const flushedWork) override;
+    virtual c2_status_t drain_nb(drain_mode_t mode) override;
+    virtual c2_status_t start() override;
+    virtual c2_status_t stop() override;
+    virtual c2_status_t reset() override;
+    virtual c2_status_t release() override;
+    virtual std::shared_ptr<C2ComponentInterface> intf() override;
+
+    // for handler
+    bool processQueue();
+
+  protected:
+    /**
+     * Initialize internal states of the component according to the config set
+     * in the interface.
+     *
+     * This method is called during start(), but only at the first invocation or
+     * after reset().
+     */
+    virtual c2_status_t onInit() = 0;
+
+    /**
+     * Stop the component.
+     */
+    virtual c2_status_t onStop() = 0;
+
+    /**
+     * Reset the component.
+     */
+    virtual void onReset() = 0;
+
+    /**
+     * Release the component.
+     */
+    virtual void onRelease() = 0;
+
+    /**
+     * Flush the component.
+     */
+    virtual c2_status_t onFlush_sm() = 0;
+
+    /**
+     * Process the given work and finish pending work using finish().
+     *
+     * \param[in,out]   work    the work to process
+     * \param[in]       pool    the pool to use for allocating output blocks.
+     */
+    virtual void process(const std::unique_ptr<C2Work> &work,
+                         const std::shared_ptr<C2BlockPool> &pool) = 0;
+
+    /**
+     * Drain the component and finish pending work using finish().
+     *
+     * \param[in]   drainMode   mode of drain.
+     * \param[in]   pool        the pool to use for allocating output blocks.
+     *
+     * \retval C2_OK            The component has drained all pending output
+     *                          work.
+     * \retval C2_OMITTED       Unsupported mode (e.g. DRAIN_CHAIN)
+     */
+    virtual c2_status_t drain(uint32_t drainMode,
+                              const std::shared_ptr<C2BlockPool> &pool) = 0;
+
+    // for derived classes
+    /**
+     * Finish pending work.
+     *
+     * This method will retrieve the pending work according to |frameIndex| and
+     * feed the work into |fillWork| function. |fillWork| must be
+     * "non-blocking". Once |fillWork| returns the filled work will be returned
+     * to the client.
+     *
+     * \param[in]   frameIndex    the index of the pending work
+     * \param[in]   fillWork      the function to fill the retrieved work.
+     */
+    void finish(uint64_t frameIndex,
+                std::function<void(const std::unique_ptr<C2Work> &)> fillWork);
+
+    /**
+     * Clone pending or current work and send the work back to client.
+     *
+     * This method will retrieve and clone the pending or current work according
+     * to |frameIndex| and feed the work into |fillWork| function. |fillWork|
+     * must be "non-blocking". Once |fillWork| returns the filled work will be
+     * returned to the client.
+     *
+     * \param[in]   frameIndex    the index of the work
+     * \param[in]   currentWork   the current work under processing
+     * \param[in]   fillWork      the function to fill the retrieved work.
+     */
+    void
+    cloneAndSend(uint64_t frameIndex,
+                 const std::unique_ptr<C2Work> &currentWork,
+                 std::function<void(const std::unique_ptr<C2Work> &)> fillWork);
+
+    std::shared_ptr<C2Buffer>
+    createLinearBuffer(const std::shared_ptr<C2LinearBlock> &block);
+
+    std::shared_ptr<C2Buffer>
+    createLinearBuffer(const std::shared_ptr<C2LinearBlock> &block,
+                       size_t offset, size_t size);
+
+    std::shared_ptr<C2Buffer>
+    createGraphicBuffer(const std::shared_ptr<C2GraphicBlock> &block);
+
+    std::shared_ptr<C2Buffer>
+    createGraphicBuffer(const std::shared_ptr<C2GraphicBlock> &block,
+                        const C2Rect &crop);
+
+    static constexpr uint32_t NO_DRAIN = ~0u;
+
+    C2ReadView mDummyReadView;
+
+  private:
+    const std::shared_ptr<C2ComponentInterface> mIntf;
+
+    class WorkHandler : public AHandler {
+      public:
+        enum {
+            kWhatProcess,
+            kWhatInit,
+            kWhatStart,
+            kWhatStop,
+            kWhatReset,
+            kWhatRelease,
+        };
+
+        WorkHandler();
+        ~WorkHandler() override = default;
+
+        void setComponent(const std::shared_ptr<SimpleC2Component> &thiz);
+
+      protected:
+        void onMessageReceived(const sp<AMessage> &msg) override;
+
+      private:
+        std::weak_ptr<SimpleC2Component> mThiz;
+        bool mRunning;
+    };
+
+    enum {
+        UNINITIALIZED,
+        STOPPED,
+        RUNNING,
+    };
+
+    struct ExecState {
+        ExecState() : mState(UNINITIALIZED) {}
+
+        int mState;
+        std::shared_ptr<C2Component::Listener> mListener;
+    };
+    Mutexed<ExecState> mExecState;
+
+    sp<ALooper> mLooper;
+    sp<WorkHandler> mHandler;
+
+    class WorkQueue {
+      public:
+        typedef std::unordered_map<uint64_t, std::unique_ptr<C2Work>>
+            PendingWork;
+
+        inline WorkQueue() : mFlush(false), mGeneration(0ul) {}
+
+        inline uint64_t generation() const { return mGeneration; }
+        inline void incGeneration() {
+            ++mGeneration;
+            mFlush = true;
+        }
+
+        std::unique_ptr<C2Work> pop_front();
+        void push_back(std::unique_ptr<C2Work> work);
+        bool empty() const;
+        uint32_t drainMode() const;
+        void markDrain(uint32_t drainMode);
+        inline bool popPendingFlush() {
+            bool flush = mFlush;
+            mFlush = false;
+            return flush;
+        }
+        void clear();
+        PendingWork &pending() { return mPendingWork; }
+
+      private:
+        struct Entry {
+            std::unique_ptr<C2Work> work;
+            uint32_t drainMode;
+        };
+
+        bool mFlush;
+        uint64_t mGeneration;
+        std::list<Entry> mQueue;
+        PendingWork mPendingWork;
+    };
+    Mutexed<WorkQueue> mWorkQueue;
+
+    class BlockingBlockPool;
+    std::shared_ptr<BlockingBlockPool> mOutputBlockPool;
+
+    SimpleC2Component() = delete;
+};
+
+} // namespace android
+
+#endif // SIMPLE_C2_COMPONENT_H_
diff --git a/system/codecs/c2/decoders/base/include/SimpleC2Interface.h b/system/codecs/c2/decoders/base/include/SimpleC2Interface.h
new file mode 100644
index 0000000..5fbfa3f
--- /dev/null
+++ b/system/codecs/c2/decoders/base/include/SimpleC2Interface.h
@@ -0,0 +1,246 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SIMPLE_C2_INTERFACE_H_
+#define ANDROID_SIMPLE_C2_INTERFACE_H_
+
+#include <C2Component.h>
+#include <C2Config.h>
+#include <util/C2InterfaceHelper.h>
+
+namespace android {
+
+/**
+ * Wrap a common interface object (such as Codec2Client::Interface, or
+ * C2InterfaceHelper into a C2ComponentInterface.
+ *
+ * \param T common interface type
+ */
+template <typename T> class SimpleC2Interface : public C2ComponentInterface {
+  public:
+    SimpleC2Interface(const char *name, c2_node_id_t id,
+                      const std::shared_ptr<T> &impl)
+        : mName(name), mId(id), mImpl(impl) {}
+
+    ~SimpleC2Interface() override = default;
+
+    // From C2ComponentInterface
+    C2String getName() const override { return mName; }
+    c2_node_id_t getId() const override { return mId; }
+    c2_status_t query_vb(const std::vector<C2Param *> &stackParams,
+                         const std::vector<C2Param::Index> &heapParamIndices,
+                         c2_blocking_t mayBlock,
+                         std::vector<std::unique_ptr<C2Param>>
+                             *const heapParams) const override {
+        return mImpl->query(stackParams, heapParamIndices, mayBlock,
+                            heapParams);
+    }
+    c2_status_t
+    config_vb(const std::vector<C2Param *> &params, c2_blocking_t mayBlock,
+              std::vector<std::unique_ptr<C2SettingResult>> *const failures)
+        override {
+        return mImpl->config(params, mayBlock, failures);
+    }
+    c2_status_t createTunnel_sm(c2_node_id_t) override { return C2_OMITTED; }
+    c2_status_t releaseTunnel_sm(c2_node_id_t) override { return C2_OMITTED; }
+    c2_status_t querySupportedParams_nb(
+        std::vector<std::shared_ptr<C2ParamDescriptor>> *const params)
+        const override {
+        return mImpl->querySupportedParams(params);
+    }
+    c2_status_t
+    querySupportedValues_vb(std::vector<C2FieldSupportedValuesQuery> &fields,
+                            c2_blocking_t mayBlock) const override {
+        return mImpl->querySupportedValues(fields, mayBlock);
+    }
+
+  private:
+    C2String mName;
+    const c2_node_id_t mId;
+    const std::shared_ptr<T> mImpl;
+};
+
+/**
+ * Utility classes for common interfaces.
+ */
+template <> class SimpleC2Interface<void> {
+  public:
+    /**
+     * Base Codec 2.0 parameters required for all components.
+     */
+    struct BaseParams : C2InterfaceHelper {
+        explicit BaseParams(
+            const std::shared_ptr<C2ReflectorHelper> &helper, C2String name,
+            C2Component::kind_t kind, C2Component::domain_t domain,
+            C2String mediaType,
+            std::vector<C2String> aliases = std::vector<C2String>());
+
+        /// Marks that this component has no input latency. Otherwise, component
+        /// must add support for C2PortRequestedDelayTuning::input and
+        /// C2PortActualDelayTuning::input.
+        void noInputLatency();
+
+        /// Marks that this component has no output latency. Otherwise,
+        /// component must add support for C2PortRequestedDelayTuning::output
+        /// and C2PortActualDelayTuning::output.
+        void noOutputLatency();
+
+        /// Marks that this component has no pipeline latency. Otherwise,
+        /// component must add support for C2RequestedPipelineDelayTuning and
+        /// C2ActualPipelineDelayTuning.
+        void noPipelineLatency();
+
+        /// Marks that this component has no need for private buffers.
+        /// Otherwise, component must add support for
+        /// C2MaxPrivateBufferCountTuning, C2PrivateAllocatorsTuning and
+        /// C2PrivateBlockPoolsTuning.
+        void noPrivateBuffers();
+
+        /// Marks that this component holds no references to input buffers.
+        /// Otherwise, component must add support for
+        /// C2StreamMaxReferenceAgeTuning::input and
+        /// C2StreamMaxReferenceCountTuning::input.
+        void noInputReferences();
+
+        /// Marks that this component holds no references to output buffers.
+        /// Otherwise, component must add support for
+        /// C2StreamMaxReferenceAgeTuning::output and
+        /// C2StreamMaxReferenceCountTuning::output.
+        void noOutputReferences();
+
+        /// Marks that this component does not stretch time. Otherwise,
+        /// component must add support for C2ComponentTimeStretchTuning.
+        void noTimeStretch();
+
+        std::shared_ptr<C2ApiLevelSetting> mApiLevel;
+        std::shared_ptr<C2ApiFeaturesSetting> mApiFeatures;
+
+        std::shared_ptr<C2PlatformLevelSetting> mPlatformLevel;
+        std::shared_ptr<C2PlatformFeaturesSetting> mPlatformFeatures;
+
+        std::shared_ptr<C2ComponentNameSetting> mName;
+        std::shared_ptr<C2ComponentAliasesSetting> mAliases;
+        std::shared_ptr<C2ComponentKindSetting> mKind;
+        std::shared_ptr<C2ComponentDomainSetting> mDomain;
+        std::shared_ptr<C2ComponentAttributesSetting> mAttrib;
+        std::shared_ptr<C2ComponentTimeStretchTuning> mTimeStretch;
+
+        std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+        std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
+        std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+        std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+
+        std::shared_ptr<C2PortRequestedDelayTuning::input> mRequestedInputDelay;
+        std::shared_ptr<C2PortRequestedDelayTuning::output>
+            mRequestedOutputDelay;
+        std::shared_ptr<C2RequestedPipelineDelayTuning> mRequestedPipelineDelay;
+
+        std::shared_ptr<C2PortActualDelayTuning::input> mActualInputDelay;
+        std::shared_ptr<C2PortActualDelayTuning::output> mActualOutputDelay;
+        std::shared_ptr<C2ActualPipelineDelayTuning> mActualPipelineDelay;
+
+        std::shared_ptr<C2StreamMaxReferenceAgeTuning::input>
+            mMaxInputReferenceAge;
+        std::shared_ptr<C2StreamMaxReferenceCountTuning::input>
+            mMaxInputReferenceCount;
+        std::shared_ptr<C2StreamMaxReferenceAgeTuning::output>
+            mMaxOutputReferenceAge;
+        std::shared_ptr<C2StreamMaxReferenceCountTuning::output>
+            mMaxOutputReferenceCount;
+        std::shared_ptr<C2MaxPrivateBufferCountTuning> mMaxPrivateBufferCount;
+
+        std::shared_ptr<C2PortStreamCountTuning::input> mInputStreamCount;
+        std::shared_ptr<C2PortStreamCountTuning::output> mOutputStreamCount;
+
+        std::shared_ptr<C2SubscribedParamIndicesTuning> mSubscribedParamIndices;
+        std::shared_ptr<C2PortSuggestedBufferCountTuning::input>
+            mSuggestedInputBufferCount;
+        std::shared_ptr<C2PortSuggestedBufferCountTuning::output>
+            mSuggestedOutputBufferCount;
+
+        std::shared_ptr<C2CurrentWorkTuning> mCurrentWorkOrdinal;
+        std::shared_ptr<C2LastWorkQueuedTuning::input>
+            mLastInputQueuedWorkOrdinal;
+        std::shared_ptr<C2LastWorkQueuedTuning::output>
+            mLastOutputQueuedWorkOrdinal;
+
+        std::shared_ptr<C2PortAllocatorsTuning::input> mInputAllocators;
+        std::shared_ptr<C2PortAllocatorsTuning::output> mOutputAllocators;
+        std::shared_ptr<C2PrivateAllocatorsTuning> mPrivateAllocators;
+        std::shared_ptr<C2PortBlockPoolsTuning::output> mOutputPoolIds;
+        std::shared_ptr<C2PrivateBlockPoolsTuning> mPrivatePoolIds;
+
+        std::shared_ptr<C2TrippedTuning> mTripped;
+        std::shared_ptr<C2OutOfMemoryTuning> mOutOfMemory;
+
+        std::shared_ptr<C2PortConfigCounterTuning::input> mInputConfigCounter;
+        std::shared_ptr<C2PortConfigCounterTuning::output> mOutputConfigCounter;
+        std::shared_ptr<C2ConfigCounterTuning> mDirectConfigCounter;
+    };
+};
+
+template <typename T> using SimpleInterface = SimpleC2Interface<T>;
+
+template <typename T, typename... Args>
+std::shared_ptr<T> AllocSharedString(const Args(&...args), const char *str) {
+    size_t len = strlen(str) + 1;
+    std::shared_ptr<T> ret = T::AllocShared(len, args...);
+    strcpy(ret->m.value, str);
+    return ret;
+}
+
+template <typename T, typename... Args>
+std::shared_ptr<T> AllocSharedString(const Args(&...args),
+                                     const std::string &str) {
+    std::shared_ptr<T> ret = T::AllocShared(str.length() + 1, args...);
+    strcpy(ret->m.value, str.c_str());
+    return ret;
+}
+
+template <typename T> struct Setter {
+    typedef typename std::remove_reference<T>::type type;
+
+    static C2R NonStrictValueWithNoDeps(bool mayBlock,
+                                        C2InterfaceHelper::C2P<type> &me) {
+        (void)mayBlock;
+        return me.F(me.v.value).validatePossible(me.v.value);
+    }
+
+    static C2R NonStrictValuesWithNoDeps(bool mayBlock,
+                                         C2InterfaceHelper::C2P<type> &me) {
+        (void)mayBlock;
+        C2R res = C2R::Ok();
+        for (size_t ix = 0; ix < me.v.flexCount(); ++ix) {
+            res.plus(
+                me.F(me.v.m.values[ix]).validatePossible(me.v.m.values[ix]));
+        }
+        return res;
+    }
+
+    static C2R StrictValueWithNoDeps(bool mayBlock,
+                                     const C2InterfaceHelper::C2P<type> &old,
+                                     C2InterfaceHelper::C2P<type> &me) {
+        (void)mayBlock;
+        if (!me.F(me.v.value).supportsNow(me.v.value)) {
+            me.set().value = old.v.value;
+        }
+        return me.F(me.v.value).validatePossible(me.v.value);
+    }
+};
+
+} // namespace android
+
+#endif // ANDROID_SIMPLE_C2_INTERFACE_H_
diff --git a/system/codecs/c2/decoders/base/include/color_buffer_utils.h b/system/codecs/c2/decoders/base/include/color_buffer_utils.h
new file mode 100644
index 0000000..d0a7876
--- /dev/null
+++ b/system/codecs/c2/decoders/base/include/color_buffer_utils.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <cutils/native_handle.h>
+
+uint32_t getColorBufferHandle(native_handle_t const* handle);
diff --git a/system/codecs/c2/decoders/base/include/goldfish_media_utils.h b/system/codecs/c2/decoders/base/include/goldfish_media_utils.h
new file mode 100644
index 0000000..b3e26ad
--- /dev/null
+++ b/system/codecs/c2/decoders/base/include/goldfish_media_utils.h
@@ -0,0 +1,101 @@
+// Copyright 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <linux/types.h>
+#include <stdint.h>
+
+#ifndef GOLDFISH_COMMON_GOLDFISH_DEFS_H
+#define GOLDFISH_COMMON_GOLDFISH_DEFS_H
+
+enum class MediaCodecType : __u8 {
+    VP8Codec = 0,
+    VP9Codec = 1,
+    H264Codec = 2,
+    Max = 3,
+};
+
+enum class MediaOperation : __u8 {
+    InitContext = 0,
+    DestroyContext = 1,
+    DecodeImage = 2,
+    GetImage = 3,
+    Flush = 4,
+    Reset = 5,
+    Max = 6,
+};
+
+// This class will abstract away the knowledge required to send media codec data
+// to the host. The implementation should only need the following information to
+// properly send the data:
+//   1) Which codec to use (MediaCodecType)
+//   2) What operation to perform (MediaOperation)
+//
+// Example:
+//   auto transport = GoldfishMediaTransport::getInstance();
+//
+class GoldfishMediaTransport {
+  protected:
+    GoldfishMediaTransport() {}
+
+  public:
+    virtual ~GoldfishMediaTransport() {}
+
+    // Writes a parameter to send to the host. Each parameter will take up
+    // 64-bits. |val| is the value of the parameter, and |num| is the parameter
+    // number, starting from 0. If |val| is an address, wrap it around
+    // offsetOf(), e.g., writeParam(offsetOf((uint64_t)ptr), 2);
+    virtual void writeParam(__u64 val, unsigned int num,
+                            unsigned int offSetToStartAddr = 0) = 0;
+    // Send the operation to perform to the host. At the time of this call, any
+    // parameters that the host needs should have already been passed using
+    // writeParam().
+    virtual bool sendOperation(MediaCodecType codec, MediaOperation op,
+                               unsigned int offSetToStartAddr = 0) = 0;
+    // Get the address for input. This is usually given the codec context to
+    // write data into for the host to process.
+    virtual uint8_t *getInputAddr(unsigned int offSet = 0) const = 0;
+    // Get the address for base pointer
+    virtual uint8_t *getBaseAddr() const = 0;
+    // Get the address for output. This is usually given to the codec context to
+    // read data written there by the host.
+    virtual uint8_t *getOutputAddr() const = 0;
+    // Get the address for return data from the host. The guest codec
+    // implementation will have knowledge of how the return data is laid out.
+    virtual uint8_t *getReturnAddr(unsigned int offSet = 0) const = 0;
+    // Get the offset of an address relative to the starting address of the
+    // allocated memory region. Use this for passing pointers from the guest to
+    // the host, as the guest address will be translated, thus the offset is the
+    // only value of significance.
+    virtual __u64 offsetOf(uint64_t addr) const = 0;
+
+    // Get a slot of memory (8 M per slot) for use by a decoder instance.
+    // returns -1 for failure; or a slot >=0 on success.
+    // as of now, there are only 4 slots for use, each has 8 M, it is up
+    // to client on how to use it.
+    // 0th slot: [base, base+8M)
+    // ...
+    // ith slot: [base+8M*i, base+8M*(i+1))
+    virtual int getMemorySlot() = 0;
+
+    // Return a slot back to pool. the slot should be valid >=0 and less
+    // than the total size of slots. If nobody returns slot timely, the
+    // new client could get -1 from getMemorySlot()
+    virtual void returnMemorySlot(int slot) = 0;
+
+    static GoldfishMediaTransport *getInstance();
+};
+
+__u64 goldfish_create_media_metadata(MediaCodecType codecType, __u64 metadata);
+
+#endif
diff --git a/system/codecs/c2/decoders/vpxdec/Android.bp b/system/codecs/c2/decoders/vpxdec/Android.bp
new file mode 100644
index 0000000..7be2d50
--- /dev/null
+++ b/system/codecs/c2/decoders/vpxdec/Android.bp
@@ -0,0 +1,57 @@
+package {
+    // See: http://go/android-license-faq
+    // A large-scale-change added 'default_applicable_licenses' to import
+    // all of the 'license_kinds' from "device_generic_goldfish-opengl_license"
+    // to get the below license kinds:
+    //   SPDX-license-identifier-Apache-2.0
+    default_applicable_licenses: ["device_generic_goldfish-opengl_license"],
+}
+
+cc_library_shared {
+    name: "libcodec2_goldfish_vp9dec",
+    vendor: true,
+    defaults: [
+        "libcodec2_goldfish-defaults",
+    ],
+
+    srcs: ["C2GoldfishVpxDec.cpp",
+        "goldfish_vpx_impl.cpp",
+    ],
+
+    shared_libs: ["libvpx",
+	    "android.hardware.graphics.allocator@3.0",
+		"android.hardware.graphics.mapper@3.0",
+         "libgoldfish_codec2_store",
+    ],
+
+   header_libs: [
+    "libgralloc_cb.ranchu",
+    ],
+
+    cflags: [
+        "-DVP9",
+    ],
+}
+
+cc_library_shared {
+    name: "libcodec2_goldfish_vp8dec",
+    vendor: true,
+    defaults: [
+        "libcodec2_goldfish-defaults",
+    ],
+
+    srcs: ["C2GoldfishVpxDec.cpp",
+        "goldfish_vpx_impl.cpp",
+    ],
+
+
+   header_libs: [
+    "libgralloc_cb.ranchu",
+    ],
+
+    shared_libs: ["libvpx",
+	    "android.hardware.graphics.allocator@3.0",
+		"android.hardware.graphics.mapper@3.0",
+         "libgoldfish_codec2_store",
+    ],
+}
diff --git a/system/codecs/c2/decoders/vpxdec/C2GoldfishVpxDec.cpp b/system/codecs/c2/decoders/vpxdec/C2GoldfishVpxDec.cpp
new file mode 100644
index 0000000..7df99ff
--- /dev/null
+++ b/system/codecs/c2/decoders/vpxdec/C2GoldfishVpxDec.cpp
@@ -0,0 +1,923 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "C2GoldfishVpxDec"
+#include <log/log.h>
+
+#include <algorithm>
+
+#include <media/stagefright/foundation/AUtils.h>
+#include <media/stagefright/foundation/MediaDefs.h>
+
+#include <C2AllocatorGralloc.h>
+#include <C2PlatformSupport.h>
+//#include <android/hardware/graphics/common/1.0/types.h>
+
+#include <android/hardware/graphics/allocator/3.0/IAllocator.h>
+#include <android/hardware/graphics/mapper/3.0/IMapper.h>
+#include <hidl/LegacySupport.h>
+
+#include <C2Debug.h>
+#include <C2PlatformSupport.h>
+#include <SimpleC2Interface.h>
+#include <goldfish_codec2/store/GoldfishComponentStore.h>
+
+#include <gralloc_cb_bp.h>
+
+#include <color_buffer_utils.h>
+
+#include "C2GoldfishVpxDec.h"
+
+#define DEBUG 0
+#if DEBUG
+#define DDD(...) ALOGW(__VA_ARGS__)
+#else
+#define DDD(...) ((void)0)
+#endif
+using ::android::hardware::graphics::common::V1_0::BufferUsage;
+using ::android::hardware::graphics::common::V1_2::PixelFormat;
+
+namespace android {
+constexpr size_t kMinInputBufferSize = 2 * 1024 * 1024;
+#ifdef VP9
+constexpr char COMPONENT_NAME[] = "c2.goldfish.vp9.decoder";
+#else
+constexpr char COMPONENT_NAME[] = "c2.goldfish.vp8.decoder";
+#endif
+
+class C2GoldfishVpxDec::IntfImpl : public SimpleInterface<void>::BaseParams {
+  public:
+    explicit IntfImpl(const std::shared_ptr<C2ReflectorHelper> &helper)
+        : SimpleInterface<void>::BaseParams(helper, COMPONENT_NAME,
+                                            C2Component::KIND_DECODER,
+                                            C2Component::DOMAIN_VIDEO,
+#ifdef VP9
+                                            MEDIA_MIMETYPE_VIDEO_VP9
+#else
+                                            MEDIA_MIMETYPE_VIDEO_VP8
+#endif
+          ) {
+        DDD("calling IntfImpl now helper %p", helper.get());
+        noPrivateBuffers(); // TODO: account for our buffers here
+        noInputReferences();
+        noOutputReferences();
+        noInputLatency();
+        noTimeStretch();
+
+        // TODO: output latency and reordering
+
+        addParameter(DefineParam(mAttrib, C2_PARAMKEY_COMPONENT_ATTRIBUTES)
+                         .withConstValue(new C2ComponentAttributesSetting(
+                             C2Component::ATTRIB_IS_TEMPORAL))
+                         .build());
+
+        addParameter(
+            DefineParam(mSize, C2_PARAMKEY_PICTURE_SIZE)
+                .withDefault(new C2StreamPictureSizeInfo::output(0u, 320, 240))
+                .withFields({
+                    C2F(mSize, width).inRange(2, 4096, 2),
+                    C2F(mSize, height).inRange(2, 4096, 2),
+                })
+                .withSetter(SizeSetter)
+                .build());
+
+#ifdef VP9
+        // TODO: Add C2Config::PROFILE_VP9_2HDR ??
+        addParameter(
+            DefineParam(mProfileLevel, C2_PARAMKEY_PROFILE_LEVEL)
+                .withDefault(new C2StreamProfileLevelInfo::input(
+                    0u, C2Config::PROFILE_VP9_0, C2Config::LEVEL_VP9_5))
+                .withFields({C2F(mProfileLevel, profile)
+                                 .oneOf({C2Config::PROFILE_VP9_0,
+                                         C2Config::PROFILE_VP9_2}),
+                             C2F(mProfileLevel, level)
+                                 .oneOf({
+                                     C2Config::LEVEL_VP9_1,
+                                     C2Config::LEVEL_VP9_1_1,
+                                     C2Config::LEVEL_VP9_2,
+                                     C2Config::LEVEL_VP9_2_1,
+                                     C2Config::LEVEL_VP9_3,
+                                     C2Config::LEVEL_VP9_3_1,
+                                     C2Config::LEVEL_VP9_4,
+                                     C2Config::LEVEL_VP9_4_1,
+                                     C2Config::LEVEL_VP9_5,
+                                 })})
+                .withSetter(ProfileLevelSetter, mSize)
+                .build());
+
+        mHdr10PlusInfoInput = C2StreamHdr10PlusInfo::input::AllocShared(0);
+        addParameter(
+            DefineParam(mHdr10PlusInfoInput, C2_PARAMKEY_INPUT_HDR10_PLUS_INFO)
+                .withDefault(mHdr10PlusInfoInput)
+                .withFields({
+                    C2F(mHdr10PlusInfoInput, m.value).any(),
+                })
+                .withSetter(Hdr10PlusInfoInputSetter)
+                .build());
+
+        mHdr10PlusInfoOutput = C2StreamHdr10PlusInfo::output::AllocShared(0);
+        addParameter(DefineParam(mHdr10PlusInfoOutput,
+                                 C2_PARAMKEY_OUTPUT_HDR10_PLUS_INFO)
+                         .withDefault(mHdr10PlusInfoOutput)
+                         .withFields({
+                             C2F(mHdr10PlusInfoOutput, m.value).any(),
+                         })
+                         .withSetter(Hdr10PlusInfoOutputSetter)
+                         .build());
+
+#if 0
+        // sample BT.2020 static info
+        mHdrStaticInfo = std::make_shared<C2StreamHdrStaticInfo::output>();
+        mHdrStaticInfo->mastering = {
+            .red   = { .x = 0.708,  .y = 0.292 },
+            .green = { .x = 0.170,  .y = 0.797 },
+            .blue  = { .x = 0.131,  .y = 0.046 },
+            .white = { .x = 0.3127, .y = 0.3290 },
+            .maxLuminance = 1000,
+            .minLuminance = 0.1,
+        };
+        mHdrStaticInfo->maxCll = 1000;
+        mHdrStaticInfo->maxFall = 120;
+
+        mHdrStaticInfo->maxLuminance = 0; // disable static info
+
+        helper->addStructDescriptors<C2MasteringDisplayColorVolumeStruct, C2ColorXyStruct>();
+        addParameter(
+                DefineParam(mHdrStaticInfo, C2_PARAMKEY_HDR_STATIC_INFO)
+                .withDefault(mHdrStaticInfo)
+                .withFields({
+                    C2F(mHdrStaticInfo, mastering.red.x).inRange(0, 1),
+                    // TODO
+                })
+                .withSetter(HdrStaticInfoSetter)
+                .build());
+#endif
+#else
+        addParameter(
+            DefineParam(mProfileLevel, C2_PARAMKEY_PROFILE_LEVEL)
+                .withConstValue(new C2StreamProfileLevelInfo::input(
+                    0u, C2Config::PROFILE_UNUSED, C2Config::LEVEL_UNUSED))
+                .build());
+#endif
+
+        addParameter(DefineParam(mMaxSize, C2_PARAMKEY_MAX_PICTURE_SIZE)
+                         .withDefault(new C2StreamMaxPictureSizeTuning::output(
+                             0u, 320, 240))
+                         .withFields({
+                             C2F(mSize, width).inRange(2, 4096, 2),
+                             C2F(mSize, height).inRange(2, 4096, 2),
+                         })
+                         .withSetter(MaxPictureSizeSetter, mSize)
+                         .build());
+
+        addParameter(
+            DefineParam(mMaxInputSize, C2_PARAMKEY_INPUT_MAX_BUFFER_SIZE)
+                .withDefault(new C2StreamMaxBufferSizeInfo::input(
+                    0u, kMinInputBufferSize))
+                .withFields({
+                    C2F(mMaxInputSize, value).any(),
+                })
+                .calculatedAs(MaxInputSizeSetter, mMaxSize)
+                .build());
+
+        C2ChromaOffsetStruct locations[1] = {
+            C2ChromaOffsetStruct::ITU_YUV_420_0()};
+        std::shared_ptr<C2StreamColorInfo::output> defaultColorInfo =
+            C2StreamColorInfo::output::AllocShared(1u, 0u, 8u /* bitDepth */,
+                                                   C2Color::YUV_420);
+        memcpy(defaultColorInfo->m.locations, locations, sizeof(locations));
+
+        defaultColorInfo = C2StreamColorInfo::output::AllocShared(
+            {C2ChromaOffsetStruct::ITU_YUV_420_0()}, 0u, 8u /* bitDepth */,
+            C2Color::YUV_420);
+        helper->addStructDescriptors<C2ChromaOffsetStruct>();
+
+        addParameter(DefineParam(mColorInfo, C2_PARAMKEY_CODED_COLOR_INFO)
+                         .withConstValue(defaultColorInfo)
+                         .build());
+
+        addParameter(
+            DefineParam(mDefaultColorAspects, C2_PARAMKEY_DEFAULT_COLOR_ASPECTS)
+                .withDefault(new C2StreamColorAspectsTuning::output(
+                    0u, C2Color::RANGE_UNSPECIFIED,
+                    C2Color::PRIMARIES_UNSPECIFIED,
+                    C2Color::TRANSFER_UNSPECIFIED, C2Color::MATRIX_UNSPECIFIED))
+                .withFields({C2F(mDefaultColorAspects, range)
+                                 .inRange(C2Color::RANGE_UNSPECIFIED,
+                                          C2Color::RANGE_OTHER),
+                             C2F(mDefaultColorAspects, primaries)
+                                 .inRange(C2Color::PRIMARIES_UNSPECIFIED,
+                                          C2Color::PRIMARIES_OTHER),
+                             C2F(mDefaultColorAspects, transfer)
+                                 .inRange(C2Color::TRANSFER_UNSPECIFIED,
+                                          C2Color::TRANSFER_OTHER),
+                             C2F(mDefaultColorAspects, matrix)
+                                 .inRange(C2Color::MATRIX_UNSPECIFIED,
+                                          C2Color::MATRIX_OTHER)})
+                .withSetter(DefaultColorAspectsSetter)
+                .build());
+
+        // TODO: support more formats?
+        addParameter(DefineParam(mPixelFormat, C2_PARAMKEY_PIXEL_FORMAT)
+                         .withConstValue(new C2StreamPixelFormatInfo::output(
+                             0u, HAL_PIXEL_FORMAT_YCBCR_420_888))
+                         .build());
+    }
+
+    static C2R SizeSetter(bool mayBlock,
+                          const C2P<C2StreamPictureSizeInfo::output> &oldMe,
+                          C2P<C2StreamPictureSizeInfo::output> &me) {
+        (void)mayBlock;
+        DDD("calling sizesetter old w %d", oldMe.v.width);
+        DDD("calling sizesetter old h %d", oldMe.v.height);
+        DDD("calling sizesetter change to w %d", me.v.width);
+        DDD("calling sizesetter change to h %d", me.v.height);
+        C2R res = C2R::Ok();
+        auto mewidth = me.F(me.v.width);
+        auto meheight = me.F(me.v.height);
+
+        if (!mewidth.supportsAtAll(me.v.width)) {
+            res = res.plus(C2SettingResultBuilder::BadValue(me.F(me.v.width)));
+            DDD("override width with oldMe value");
+            me.set().width = oldMe.v.width;
+            DDD("something wrong here %s %d", __func__, __LINE__);
+        }
+        if (!meheight.supportsAtAll(me.v.height)) {
+            res = res.plus(C2SettingResultBuilder::BadValue(me.F(me.v.height)));
+            DDD("override height with oldMe value");
+            me.set().height = oldMe.v.height;
+            DDD("something wrong here %s %d", __func__, __LINE__);
+        }
+        return res;
+    }
+
+    static C2R
+    MaxPictureSizeSetter(bool mayBlock,
+                         C2P<C2StreamMaxPictureSizeTuning::output> &me,
+                         const C2P<C2StreamPictureSizeInfo::output> &size) {
+        (void)mayBlock;
+        // TODO: get max width/height from the size's field helpers vs.
+        // hardcoding
+        me.set().width = c2_min(c2_max(me.v.width, size.v.width), 4096u);
+        me.set().height = c2_min(c2_max(me.v.height, size.v.height), 4096u);
+        return C2R::Ok();
+    }
+
+    static C2R MaxInputSizeSetter(
+        bool mayBlock, C2P<C2StreamMaxBufferSizeInfo::input> &me,
+        const C2P<C2StreamMaxPictureSizeTuning::output> &maxSize) {
+        (void)mayBlock;
+        // assume compression ratio of 2
+        me.set().value = c2_max((((maxSize.v.width + 63) / 64) *
+                                 ((maxSize.v.height + 63) / 64) * 3072),
+                                kMinInputBufferSize);
+        return C2R::Ok();
+    }
+
+    static C2R
+    DefaultColorAspectsSetter(bool mayBlock,
+                              C2P<C2StreamColorAspectsTuning::output> &me) {
+        (void)mayBlock;
+        if (me.v.range > C2Color::RANGE_OTHER) {
+            me.set().range = C2Color::RANGE_OTHER;
+        }
+        if (me.v.primaries > C2Color::PRIMARIES_OTHER) {
+            me.set().primaries = C2Color::PRIMARIES_OTHER;
+        }
+        if (me.v.transfer > C2Color::TRANSFER_OTHER) {
+            me.set().transfer = C2Color::TRANSFER_OTHER;
+        }
+        if (me.v.matrix > C2Color::MATRIX_OTHER) {
+            me.set().matrix = C2Color::MATRIX_OTHER;
+        }
+        return C2R::Ok();
+    }
+
+    static C2R
+    ProfileLevelSetter(bool mayBlock, C2P<C2StreamProfileLevelInfo::input> &me,
+                       const C2P<C2StreamPictureSizeInfo::output> &size) {
+        (void)mayBlock;
+        (void)size;
+        (void)me; // TODO: validate
+        return C2R::Ok();
+    }
+    std::shared_ptr<C2StreamColorAspectsTuning::output>
+    getDefaultColorAspects_l() {
+        return mDefaultColorAspects;
+    }
+
+    int width() const { return mSize->width; }
+
+    int height() const { return mSize->height; }
+
+    static C2R Hdr10PlusInfoInputSetter(bool mayBlock,
+                                        C2P<C2StreamHdr10PlusInfo::input> &me) {
+        (void)mayBlock;
+        (void)me; // TODO: validate
+        return C2R::Ok();
+    }
+
+    static C2R
+    Hdr10PlusInfoOutputSetter(bool mayBlock,
+                              C2P<C2StreamHdr10PlusInfo::output> &me) {
+        (void)mayBlock;
+        (void)me; // TODO: validate
+        return C2R::Ok();
+    }
+
+  private:
+    std::shared_ptr<C2StreamProfileLevelInfo::input> mProfileLevel;
+    std::shared_ptr<C2StreamPictureSizeInfo::output> mSize;
+    std::shared_ptr<C2StreamMaxPictureSizeTuning::output> mMaxSize;
+    std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mMaxInputSize;
+    std::shared_ptr<C2StreamColorInfo::output> mColorInfo;
+    std::shared_ptr<C2StreamPixelFormatInfo::output> mPixelFormat;
+    std::shared_ptr<C2StreamColorAspectsTuning::output> mDefaultColorAspects;
+#ifdef VP9
+#if 0
+    std::shared_ptr<C2StreamHdrStaticInfo::output> mHdrStaticInfo;
+#endif
+    std::shared_ptr<C2StreamHdr10PlusInfo::input> mHdr10PlusInfoInput;
+    std::shared_ptr<C2StreamHdr10PlusInfo::output> mHdr10PlusInfoOutput;
+#endif
+};
+
+C2GoldfishVpxDec::ConverterThread::ConverterThread(
+    const std::shared_ptr<Mutexed<ConversionQueue>> &queue)
+    : Thread(false), mQueue(queue) {}
+
+bool C2GoldfishVpxDec::ConverterThread::threadLoop() {
+    Mutexed<ConversionQueue>::Locked queue(*mQueue);
+    if (queue->entries.empty()) {
+        queue.waitForCondition(queue->cond);
+        if (queue->entries.empty()) {
+            return true;
+        }
+    }
+    std::function<void()> convert = queue->entries.front();
+    queue->entries.pop_front();
+    if (!queue->entries.empty()) {
+        queue->cond.signal();
+    }
+    queue.unlock();
+
+    convert();
+
+    queue.lock();
+    if (--queue->numPending == 0u) {
+        queue->cond.broadcast();
+    }
+    return true;
+}
+
+C2GoldfishVpxDec::C2GoldfishVpxDec(const char *name, c2_node_id_t id,
+                                   const std::shared_ptr<IntfImpl> &intfImpl)
+    : SimpleC2Component(
+          std::make_shared<SimpleInterface<IntfImpl>>(name, id, intfImpl)),
+      mIntf(intfImpl), mCtx(nullptr), mQueue(new Mutexed<ConversionQueue>) {}
+
+C2GoldfishVpxDec::~C2GoldfishVpxDec() { onRelease(); }
+
+c2_status_t C2GoldfishVpxDec::onInit() {
+    status_t err = initDecoder();
+    return err == OK ? C2_OK : C2_CORRUPTED;
+}
+
+c2_status_t C2GoldfishVpxDec::onStop() {
+    mSignalledError = false;
+    mSignalledOutputEos = false;
+
+    return C2_OK;
+}
+
+void C2GoldfishVpxDec::onReset() {
+    (void)onStop();
+    c2_status_t err = onFlush_sm();
+    if (err != C2_OK) {
+        ALOGW("Failed to flush decoder. Try to hard reset decoder");
+        destroyDecoder();
+        (void)initDecoder();
+    }
+}
+
+void C2GoldfishVpxDec::onRelease() { destroyDecoder(); }
+
+c2_status_t C2GoldfishVpxDec::onFlush_sm() {
+    if (mFrameParallelMode) {
+        // Flush decoder by passing nullptr data ptr and 0 size.
+        // Ideally, this should never fail.
+        if (vpx_codec_flush(mCtx)) {
+            ALOGE("Failed to flush on2 decoder.");
+            return C2_CORRUPTED;
+        }
+    }
+
+    // Drop all the decoded frames in decoder.
+    if (mCtx) {
+        setup_ctx_parameters(mCtx);
+        while ((mImg = vpx_codec_get_frame(mCtx))) {
+        }
+    }
+
+    mSignalledError = false;
+    mSignalledOutputEos = false;
+    return C2_OK;
+}
+
+status_t C2GoldfishVpxDec::initDecoder() {
+    ALOGI("calling init GoldfishVPX");
+#ifdef VP9
+    mMode = MODE_VP9;
+#else
+    mMode = MODE_VP8;
+#endif
+
+    mWidth = 320;
+    mHeight = 240;
+    mFrameParallelMode = false;
+    mSignalledOutputEos = false;
+    mSignalledError = false;
+
+    return OK;
+}
+
+void C2GoldfishVpxDec::checkContext(const std::shared_ptr<C2BlockPool> &pool) {
+    if (mCtx)
+        return;
+
+    mWidth = mIntf->width();
+    mHeight = mIntf->height();
+    ALOGI("created decoder context w %d h %d", mWidth, mHeight);
+    mCtx = new vpx_codec_ctx_t;
+    mCtx->vpversion = mMode == MODE_VP8 ? 8 : 9;
+
+    // check for decoding mode:
+    {
+        // now get the block
+        constexpr uint32_t format = HAL_PIXEL_FORMAT_YCBCR_420_888;
+        std::shared_ptr<C2GraphicBlock> block;
+        C2MemoryUsage usage = {C2MemoryUsage::CPU_READ,
+                               C2MemoryUsage::CPU_WRITE};
+        usage.expected = (uint64_t)(BufferUsage::GPU_DATA_BUFFER);
+
+        c2_status_t err = pool->fetchGraphicBlock(align(mWidth, 2), mHeight,
+                                                  format, usage, &block);
+        if (err != C2_OK) {
+            ALOGE("fetchGraphicBlock for Output failed with status %d", err);
+            return;
+        }
+        auto c2Handle = block->handle();
+        native_handle_t *grallocHandle =
+            UnwrapNativeCodec2GrallocHandle(c2Handle);
+        int hostColorBufferId = getColorBufferHandle(grallocHandle);
+        if (hostColorBufferId > 0) {
+            DDD("decoding to host color buffer");
+            mEnableAndroidNativeBuffers = true;
+        } else {
+            DDD("decoding to guest byte buffer");
+            mEnableAndroidNativeBuffers = false;
+        }
+    }
+
+    mCtx->version = mEnableAndroidNativeBuffers ? 200 : 100;
+
+    int vpx_err = 0;
+    if ((vpx_err = vpx_codec_dec_init(mCtx))) {
+        ALOGE("vpx decoder failed to initialize. (%d)", vpx_err);
+        delete mCtx;
+        mCtx = NULL;
+    }
+}
+
+status_t C2GoldfishVpxDec::destroyDecoder() {
+    if (mCtx) {
+        ALOGI("calling destroying GoldfishVPX ctx %p", mCtx);
+        vpx_codec_destroy(mCtx);
+        delete mCtx;
+        mCtx = NULL;
+    }
+
+    return OK;
+}
+
+void fillEmptyWork(const std::unique_ptr<C2Work> &work) {
+    uint32_t flags = 0;
+    if (work->input.flags & C2FrameData::FLAG_END_OF_STREAM) {
+        flags |= C2FrameData::FLAG_END_OF_STREAM;
+        DDD("signalling eos");
+    }
+    work->worklets.front()->output.flags = (C2FrameData::flags_t)flags;
+    work->worklets.front()->output.buffers.clear();
+    work->worklets.front()->output.ordinal = work->input.ordinal;
+    work->workletsProcessed = 1u;
+}
+
+void C2GoldfishVpxDec::finishWork(
+    uint64_t index, const std::unique_ptr<C2Work> &work,
+    const std::shared_ptr<C2GraphicBlock> &block) {
+    std::shared_ptr<C2Buffer> buffer =
+        createGraphicBuffer(block, C2Rect(mWidth, mHeight));
+    auto fillWork = [buffer, index,
+                     intf = this->mIntf](const std::unique_ptr<C2Work> &work) {
+        uint32_t flags = 0;
+        if ((work->input.flags & C2FrameData::FLAG_END_OF_STREAM) &&
+            (c2_cntr64_t(index) == work->input.ordinal.frameIndex)) {
+            flags |= C2FrameData::FLAG_END_OF_STREAM;
+            DDD("signalling eos");
+        }
+        work->worklets.front()->output.flags = (C2FrameData::flags_t)flags;
+        work->worklets.front()->output.buffers.clear();
+        work->worklets.front()->output.buffers.push_back(buffer);
+        work->worklets.front()->output.ordinal = work->input.ordinal;
+        work->workletsProcessed = 1u;
+
+        for (const std::unique_ptr<C2Param> &param : work->input.configUpdate) {
+            if (param) {
+                C2StreamHdr10PlusInfo::input *hdr10PlusInfo =
+                    C2StreamHdr10PlusInfo::input::From(param.get());
+
+                if (hdr10PlusInfo != nullptr) {
+                    std::vector<std::unique_ptr<C2SettingResult>> failures;
+                    std::unique_ptr<C2Param> outParam = C2Param::CopyAsStream(
+                        *param.get(), true /*output*/, param->stream());
+                    c2_status_t err =
+                        intf->config({outParam.get()}, C2_MAY_BLOCK, &failures);
+                    if (err == C2_OK) {
+                        work->worklets.front()->output.configUpdate.push_back(
+                            C2Param::Copy(*outParam.get()));
+                    } else {
+                        ALOGE("finishWork: Config update size failed");
+                    }
+                    break;
+                }
+            }
+        }
+    };
+    if (work && c2_cntr64_t(index) == work->input.ordinal.frameIndex) {
+        fillWork(work);
+    } else {
+        finish(index, fillWork);
+    }
+}
+
+void C2GoldfishVpxDec::process(const std::unique_ptr<C2Work> &work,
+                               const std::shared_ptr<C2BlockPool> &pool) {
+    DDD("%s %d doing work now", __func__, __LINE__);
+    // Initialize output work
+    work->result = C2_OK;
+    work->workletsProcessed = 0u;
+    work->worklets.front()->output.configUpdate.clear();
+    work->worklets.front()->output.flags = work->input.flags;
+
+    if (mSignalledError || mSignalledOutputEos) {
+        work->result = C2_BAD_VALUE;
+        return;
+    }
+
+    size_t inOffset = 0u;
+    size_t inSize = 0u;
+    C2ReadView rView = mDummyReadView;
+    if (!work->input.buffers.empty()) {
+        rView =
+            work->input.buffers[0]->data().linearBlocks().front().map().get();
+        inSize = rView.capacity();
+        if (inSize && rView.error()) {
+            ALOGE("read view map failed %d", rView.error());
+            work->result = C2_CORRUPTED;
+            return;
+        }
+    }
+
+    checkContext(pool);
+
+    bool codecConfig =
+        ((work->input.flags & C2FrameData::FLAG_CODEC_CONFIG) != 0);
+    bool eos = ((work->input.flags & C2FrameData::FLAG_END_OF_STREAM) != 0);
+
+    DDD("in buffer attr. size %zu timestamp %d frameindex %d, flags %x", inSize,
+        (int)work->input.ordinal.timestamp.peeku(),
+        (int)work->input.ordinal.frameIndex.peeku(), work->input.flags);
+
+    // Software VP9 Decoder does not need the Codec Specific Data (CSD)
+    // (specified in http://www.webmproject.org/vp9/profiles/). Ignore it if
+    // it was passed.
+    if (codecConfig) {
+        // Ignore CSD buffer for VP9.
+        if (mMode == MODE_VP9) {
+            fillEmptyWork(work);
+            return;
+        } else {
+            // Tolerate the CSD buffer for VP8. This is a workaround
+            // for b/28689536. continue
+            ALOGW("WARNING: Got CSD buffer for VP8. Continue");
+        }
+    }
+
+    if (inSize) {
+        uint8_t *bitstream = const_cast<uint8_t *>(rView.data() + inOffset);
+        vpx_codec_err_t err = vpx_codec_decode(
+            mCtx, bitstream, inSize, &work->input.ordinal.frameIndex, 0);
+        if (err != 0) {
+            ALOGE("on2 decoder failed to decode frame. err: ");
+            mSignalledError = true;
+            work->workletsProcessed = 1u;
+            work->result = C2_CORRUPTED;
+            return;
+        }
+    }
+
+    status_t err = outputBuffer(pool, work);
+    if (err == NOT_ENOUGH_DATA) {
+        if (inSize > 0) {
+            DDD("Maybe non-display frame at %lld.",
+                work->input.ordinal.frameIndex.peekll());
+            // send the work back with empty buffer.
+            inSize = 0;
+        }
+    } else if (err != OK) {
+        ALOGD("Error while getting the output frame out");
+        // work->result would be already filled; do fillEmptyWork() below to
+        // send the work back.
+        inSize = 0;
+    }
+
+    if (eos) {
+        drainInternal(DRAIN_COMPONENT_WITH_EOS, pool, work);
+        mSignalledOutputEos = true;
+    } else if (!inSize) {
+        fillEmptyWork(work);
+    }
+}
+
+static void copyOutputBufferToYuvPlanarFrame(
+    uint8_t *dst, const uint8_t *srcY, const uint8_t *srcU, const uint8_t *srcV,
+    size_t srcYStride, size_t srcUStride, size_t srcVStride, size_t dstYStride,
+    size_t dstUVStride, uint32_t width, uint32_t height) {
+    uint8_t *dstStart = dst;
+
+    for (size_t i = 0; i < height; ++i) {
+        memcpy(dst, srcY, width);
+        srcY += srcYStride;
+        dst += dstYStride;
+    }
+
+    dst = dstStart + dstYStride * height;
+    for (size_t i = 0; i < height / 2; ++i) {
+        memcpy(dst, srcV, width / 2);
+        srcV += srcVStride;
+        dst += dstUVStride;
+    }
+
+    dst = dstStart + (dstYStride * height) + (dstUVStride * height / 2);
+    for (size_t i = 0; i < height / 2; ++i) {
+        memcpy(dst, srcU, width / 2);
+        srcU += srcUStride;
+        dst += dstUVStride;
+    }
+}
+
+void C2GoldfishVpxDec::setup_ctx_parameters(vpx_codec_ctx_t *ctx,
+                                            int hostColorBufferId) {
+    ctx->width = mWidth;
+    ctx->height = mHeight;
+    ctx->hostColorBufferId = hostColorBufferId;
+    ctx->outputBufferWidth = mWidth;
+    ctx->outputBufferHeight = mHeight;
+    int32_t bpp = 1;
+    ctx->bpp = bpp;
+}
+
+status_t
+C2GoldfishVpxDec::outputBuffer(const std::shared_ptr<C2BlockPool> &pool,
+                               const std::unique_ptr<C2Work> &work) {
+    if (!(work && pool))
+        return BAD_VALUE;
+
+    // now get the block
+    std::shared_ptr<C2GraphicBlock> block;
+    C2MemoryUsage usage = {C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE};
+    uint32_t format = HAL_PIXEL_FORMAT_YCBCR_420_888;
+    usage.expected = (uint64_t)(BufferUsage::GPU_DATA_BUFFER);
+
+    c2_status_t err = pool->fetchGraphicBlock(align(mWidth, 2), mHeight, format,
+                                              usage, &block);
+    if (err != C2_OK) {
+        ALOGE("fetchGraphicBlock for Output failed with status %d", err);
+        work->result = err;
+        return UNKNOWN_ERROR;
+    }
+
+    bool decodingToByteBuffer = false;
+    {
+        auto c2Handle = block->handle();
+        native_handle_t *grallocHandle =
+            UnwrapNativeCodec2GrallocHandle(c2Handle);
+        int hostColorBufferId = getColorBufferHandle(grallocHandle);
+        if (hostColorBufferId > 0) {
+            DDD("found handle %d", hostColorBufferId);
+        } else {
+            decodingToByteBuffer = true;
+            DDD("decode to buffer, because handle %d is invalid",
+                hostColorBufferId);
+            // change to -1 so host knows it is definitely invalid
+            // 0 is a bit confusing
+            hostColorBufferId = -1;
+        }
+        setup_ctx_parameters(mCtx, hostColorBufferId);
+    }
+
+    vpx_image_t *img = vpx_codec_get_frame(mCtx);
+
+    if (!img)
+        return NOT_ENOUGH_DATA;
+
+    if (img->d_w != mWidth || img->d_h != mHeight) {
+        DDD("updating w %d h %d to w %d h %d", mWidth, mHeight, img->d_w,
+            img->d_h);
+        mWidth = img->d_w;
+        mHeight = img->d_h;
+
+        // need to re-allocate since size changed, especially for byte buffer
+        // mode
+        if (true) {
+            c2_status_t err = pool->fetchGraphicBlock(align(mWidth, 2), mHeight,
+                                                      format, usage, &block);
+            if (err != C2_OK) {
+                ALOGE("fetchGraphicBlock for Output failed with status %d",
+                      err);
+                work->result = err;
+                return UNKNOWN_ERROR;
+            }
+        }
+
+        C2StreamPictureSizeInfo::output size(0u, mWidth, mHeight);
+        std::vector<std::unique_ptr<C2SettingResult>> failures;
+        c2_status_t err = mIntf->config({&size}, C2_MAY_BLOCK, &failures);
+        if (err == C2_OK) {
+            work->worklets.front()->output.configUpdate.push_back(
+                C2Param::Copy(size));
+        } else {
+            ALOGE("Config update size failed");
+            mSignalledError = true;
+            work->workletsProcessed = 1u;
+            work->result = C2_CORRUPTED;
+            return UNKNOWN_ERROR;
+        }
+    }
+    if (img->fmt != VPX_IMG_FMT_I420 && img->fmt != VPX_IMG_FMT_I42016) {
+        ALOGE("img->fmt %d not supported", img->fmt);
+        mSignalledError = true;
+        work->workletsProcessed = 1u;
+        work->result = C2_CORRUPTED;
+        return false;
+    }
+
+    if (img->fmt == VPX_IMG_FMT_I42016) {
+        IntfImpl::Lock lock = mIntf->lock();
+        std::shared_ptr<C2StreamColorAspectsTuning::output>
+            defaultColorAspects = mIntf->getDefaultColorAspects_l();
+
+        if (defaultColorAspects->primaries == C2Color::PRIMARIES_BT2020 &&
+            defaultColorAspects->matrix == C2Color::MATRIX_BT2020 &&
+            defaultColorAspects->transfer == C2Color::TRANSFER_ST2084) {
+            format = HAL_PIXEL_FORMAT_RGBA_1010102;
+        }
+    }
+
+    if (decodingToByteBuffer) {
+
+        C2GraphicView wView = block->map().get();
+        if (wView.error()) {
+            ALOGE("graphic view map failed %d", wView.error());
+            work->result = C2_CORRUPTED;
+            return UNKNOWN_ERROR;
+        }
+
+        DDD("provided (%dx%d) required (%dx%d), out frameindex %lld",
+            block->width(), block->height(), mWidth, mHeight,
+            ((c2_cntr64_t *)img->user_priv)->peekll());
+
+        uint8_t *dst =
+            const_cast<uint8_t *>(wView.data()[C2PlanarLayout::PLANE_Y]);
+        size_t srcYStride = mWidth;
+        size_t srcUStride = mWidth / 2;
+        size_t srcVStride = mWidth / 2;
+        C2PlanarLayout layout = wView.layout();
+        size_t dstYStride = layout.planes[C2PlanarLayout::PLANE_Y].rowInc;
+        size_t dstUVStride = layout.planes[C2PlanarLayout::PLANE_U].rowInc;
+
+        if (img->fmt == VPX_IMG_FMT_I42016) {
+            ALOGW("WARNING: not I42016 is not supported !!!");
+        } else if (1) {
+            const uint8_t *srcY = (const uint8_t *)mCtx->dst;
+            const uint8_t *srcV = srcY + mWidth * mHeight;
+            const uint8_t *srcU = srcV + mWidth * mHeight / 4;
+            // TODO: the following crashes
+            copyOutputBufferToYuvPlanarFrame(dst, srcY, srcU, srcV, srcYStride,
+                                             srcUStride, srcVStride, dstYStride,
+                                             dstUVStride, mWidth, mHeight);
+            // memcpy(dst, srcY, mWidth * mHeight / 2);
+        }
+    }
+    DDD("provided (%dx%d) required (%dx%d), out frameindex %lld",
+        block->width(), block->height(), mWidth, mHeight,
+        ((c2_cntr64_t *)img->user_priv)->peekll());
+
+    finishWork(((c2_cntr64_t *)img->user_priv)->peekull(), work,
+               std::move(block));
+    return OK;
+}
+
+c2_status_t
+C2GoldfishVpxDec::drainInternal(uint32_t drainMode,
+                                const std::shared_ptr<C2BlockPool> &pool,
+                                const std::unique_ptr<C2Work> &work) {
+    if (drainMode == NO_DRAIN) {
+        ALOGW("drain with NO_DRAIN: no-op");
+        return C2_OK;
+    }
+    if (drainMode == DRAIN_CHAIN) {
+        ALOGW("DRAIN_CHAIN not supported");
+        return C2_OMITTED;
+    }
+
+    while (outputBuffer(pool, work) == OK) {
+    }
+
+    if (drainMode == DRAIN_COMPONENT_WITH_EOS && work &&
+        work->workletsProcessed == 0u) {
+        fillEmptyWork(work);
+    }
+
+    return C2_OK;
+}
+c2_status_t C2GoldfishVpxDec::drain(uint32_t drainMode,
+                                    const std::shared_ptr<C2BlockPool> &pool) {
+    return drainInternal(drainMode, pool, nullptr);
+}
+
+class C2GoldfishVpxFactory : public C2ComponentFactory {
+  public:
+    C2GoldfishVpxFactory()
+        : mHelper(std::static_pointer_cast<C2ReflectorHelper>(
+              GoldfishComponentStore::Create()->getParamReflector())) {
+
+        ALOGI("platform store is %p, reflector is %p",
+              GetCodec2PlatformComponentStore().get(),
+              GetCodec2PlatformComponentStore()->getParamReflector().get());
+    }
+
+    virtual c2_status_t
+    createComponent(c2_node_id_t id,
+                    std::shared_ptr<C2Component> *const component,
+                    std::function<void(C2Component *)> deleter) override {
+        *component = std::shared_ptr<C2Component>(
+            new C2GoldfishVpxDec(
+                COMPONENT_NAME, id,
+                std::make_shared<C2GoldfishVpxDec::IntfImpl>(mHelper)),
+            deleter);
+        return C2_OK;
+    }
+
+    virtual c2_status_t createInterface(
+        c2_node_id_t id, std::shared_ptr<C2ComponentInterface> *const interface,
+        std::function<void(C2ComponentInterface *)> deleter) override {
+        *interface = std::shared_ptr<C2ComponentInterface>(
+            new SimpleInterface<C2GoldfishVpxDec::IntfImpl>(
+                COMPONENT_NAME, id,
+                std::make_shared<C2GoldfishVpxDec::IntfImpl>(mHelper)),
+            deleter);
+        return C2_OK;
+    }
+
+    virtual ~C2GoldfishVpxFactory() override = default;
+
+  private:
+    std::shared_ptr<C2ReflectorHelper> mHelper;
+};
+
+} // namespace android
+
+extern "C" ::C2ComponentFactory *CreateCodec2Factory() {
+    DDD("in %s", __func__);
+    return new ::android::C2GoldfishVpxFactory();
+}
+
+extern "C" void DestroyCodec2Factory(::C2ComponentFactory *factory) {
+    DDD("in %s", __func__);
+    delete factory;
+}
diff --git a/system/codecs/c2/decoders/vpxdec/C2GoldfishVpxDec.h b/system/codecs/c2/decoders/vpxdec/C2GoldfishVpxDec.h
new file mode 100644
index 0000000..4b356da
--- /dev/null
+++ b/system/codecs/c2/decoders/vpxdec/C2GoldfishVpxDec.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "goldfish_vpx_defs.h"
+#include <SimpleC2Component.h>
+
+namespace android {
+
+struct C2GoldfishVpxDec : public SimpleC2Component {
+    class IntfImpl;
+
+    C2GoldfishVpxDec(const char *name, c2_node_id_t id,
+                     const std::shared_ptr<IntfImpl> &intfImpl);
+    virtual ~C2GoldfishVpxDec();
+
+    // From SimpleC2Component
+    c2_status_t onInit() override;
+    c2_status_t onStop() override;
+    void onReset() override;
+    void onRelease() override;
+    c2_status_t onFlush_sm() override;
+    void process(const std::unique_ptr<C2Work> &work,
+                 const std::shared_ptr<C2BlockPool> &pool) override;
+    c2_status_t drain(uint32_t drainMode,
+                      const std::shared_ptr<C2BlockPool> &pool) override;
+
+  private:
+    enum {
+        MODE_VP8,
+        MODE_VP9,
+    } mMode;
+
+    struct ConversionQueue;
+
+    class ConverterThread : public Thread {
+      public:
+        explicit ConverterThread(
+            const std::shared_ptr<Mutexed<ConversionQueue>> &queue);
+        ~ConverterThread() override = default;
+        bool threadLoop() override;
+
+      private:
+        std::shared_ptr<Mutexed<ConversionQueue>> mQueue;
+    };
+
+    // create context that talks to host decoder: it needs to use
+    // pool to decide whether decoding to host color buffer ot
+    // decode to guest bytebuffer when pool cannot fetch valid host
+    // color buffer id
+    void checkContext(const std::shared_ptr<C2BlockPool> &pool);
+    bool mEnableAndroidNativeBuffers{true};
+
+    void setup_ctx_parameters(vpx_codec_ctx_t *ctx, int hostColorBufferId = -1);
+
+    std::shared_ptr<IntfImpl> mIntf;
+    vpx_codec_ctx_t *mCtx;
+    bool mFrameParallelMode; // Frame parallel is only supported by VP9 decoder.
+    vpx_image_t *mImg;
+
+    uint32_t mWidth;
+    uint32_t mHeight;
+    bool mSignalledOutputEos;
+    bool mSignalledError;
+
+    struct ConversionQueue {
+        std::list<std::function<void()>> entries;
+        Condition cond;
+        size_t numPending{0u};
+    };
+    std::shared_ptr<Mutexed<ConversionQueue>> mQueue;
+    std::vector<sp<ConverterThread>> mConverterThreads;
+
+    status_t initDecoder();
+    status_t destroyDecoder();
+    void finishWork(uint64_t index, const std::unique_ptr<C2Work> &work,
+                    const std::shared_ptr<C2GraphicBlock> &block);
+    status_t outputBuffer(const std::shared_ptr<C2BlockPool> &pool,
+                          const std::unique_ptr<C2Work> &work);
+    c2_status_t drainInternal(uint32_t drainMode,
+                              const std::shared_ptr<C2BlockPool> &pool,
+                              const std::unique_ptr<C2Work> &work);
+
+    C2_DO_NOT_COPY(C2GoldfishVpxDec);
+};
+
+} // namespace android
diff --git a/system/codecs/c2/decoders/vpxdec/goldfish_vpx_defs.h b/system/codecs/c2/decoders/vpxdec/goldfish_vpx_defs.h
new file mode 100644
index 0000000..bbcc805
--- /dev/null
+++ b/system/codecs/c2/decoders/vpxdec/goldfish_vpx_defs.h
@@ -0,0 +1,64 @@
+#ifndef MY_VPX_DEFS_H_
+#define MY_VPX_DEFS_H_
+
+#define VPX_IMG_FMT_PLANAR 0x100       /**< Image is a planar format. */
+#define VPX_IMG_FMT_UV_FLIP 0x200      /**< V plane precedes U in memory. */
+#define VPX_IMG_FMT_HAS_ALPHA 0x400    /**< Image has an alpha channel. */
+#define VPX_IMG_FMT_HIGHBITDEPTH 0x800 /**< Image uses 16bit framebuffer. */
+
+typedef unsigned char uint8_t;
+typedef int vpx_codec_err_t;
+
+enum class RenderMode {
+    RENDER_BY_HOST_GPU = 1,
+    RENDER_BY_GUEST_CPU = 2,
+};
+
+enum vpx_img_fmt_t {
+    VPX_IMG_FMT_NONE,
+    VPX_IMG_FMT_YV12 =
+        VPX_IMG_FMT_PLANAR | VPX_IMG_FMT_UV_FLIP | 1, /**< planar YVU */
+    VPX_IMG_FMT_I420 = VPX_IMG_FMT_PLANAR | 2,
+    VPX_IMG_FMT_I422 = VPX_IMG_FMT_PLANAR | 5,
+    VPX_IMG_FMT_I444 = VPX_IMG_FMT_PLANAR | 6,
+    VPX_IMG_FMT_I440 = VPX_IMG_FMT_PLANAR | 7,
+    VPX_IMG_FMT_I42016 = VPX_IMG_FMT_I420 | VPX_IMG_FMT_HIGHBITDEPTH,
+    VPX_IMG_FMT_I42216 = VPX_IMG_FMT_I422 | VPX_IMG_FMT_HIGHBITDEPTH,
+    VPX_IMG_FMT_I44416 = VPX_IMG_FMT_I444 | VPX_IMG_FMT_HIGHBITDEPTH,
+    VPX_IMG_FMT_I44016 = VPX_IMG_FMT_I440 | VPX_IMG_FMT_HIGHBITDEPTH
+};
+
+struct vpx_image_t {
+    vpx_img_fmt_t fmt; /**< Image Format */
+    unsigned int d_w;  /**< Displayed image width */
+    unsigned int d_h;  /**< Displayed image height */
+    void *user_priv;
+};
+
+#define VPX_CODEC_OK 0
+
+struct vpx_codec_ctx_t {
+    int vpversion; // 8: vp8 or 9: vp9
+    int version;   // 100: return decoded frame to guest; 200: render on host
+    int hostColorBufferId;
+    uint64_t id; // >= 1, unique
+    int memory_slot;
+    uint64_t address_offset = 0;
+    size_t outputBufferWidth;
+    size_t outputBufferHeight;
+    size_t width;
+    size_t height;
+    size_t bpp;
+    uint8_t *data;
+    uint8_t *dst;
+    vpx_image_t myImg;
+};
+
+int vpx_codec_destroy(vpx_codec_ctx_t *);
+int vpx_codec_dec_init(vpx_codec_ctx_t *);
+vpx_image_t *vpx_codec_get_frame(vpx_codec_ctx_t *, int hostColorBufferId = -1);
+int vpx_codec_flush(vpx_codec_ctx_t *ctx);
+int vpx_codec_decode(vpx_codec_ctx_t *ctx, const uint8_t *data,
+                     unsigned int data_sz, void *user_priv, long deadline);
+
+#endif // MY_VPX_DEFS_H_
diff --git a/system/codecs/c2/decoders/vpxdec/goldfish_vpx_impl.cpp b/system/codecs/c2/decoders/vpxdec/goldfish_vpx_impl.cpp
new file mode 100644
index 0000000..d008efe
--- /dev/null
+++ b/system/codecs/c2/decoders/vpxdec/goldfish_vpx_impl.cpp
@@ -0,0 +1,177 @@
+#include <log/log.h>
+
+#include "goldfish_media_utils.h"
+#include "goldfish_vpx_defs.h"
+#include <cstdlib>
+#include <errno.h>
+#include <fcntl.h>
+#include <linux/ioctl.h>
+#include <linux/types.h>
+#include <string>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <memory>
+#include <mutex>
+#include <vector>
+
+#define DEBUG 0
+#if DEBUG
+#define DDD(...) ALOGD(__VA_ARGS__)
+#else
+#define DDD(...) ((void)0)
+#endif
+
+// static vpx_image_t myImg;
+static uint64_t s_CtxId = 0;
+static std::mutex sCtxidMutex;
+
+static uint64_t applyForOneId() {
+    DDD("%s %d", __func__, __LINE__);
+    std::lock_guard<std::mutex> g{sCtxidMutex};
+    ++s_CtxId;
+    return s_CtxId;
+}
+
+static void sendVpxOperation(vpx_codec_ctx_t *ctx, MediaOperation op) {
+    DDD("%s %d", __func__, __LINE__);
+    if (ctx->memory_slot < 0) {
+        ALOGE("ERROR: Failed %s %d: there is no memory slot", __func__,
+              __LINE__);
+    }
+    auto transport = GoldfishMediaTransport::getInstance();
+    transport->sendOperation(ctx->vpversion == 9 ? MediaCodecType::VP9Codec
+                                                 : MediaCodecType::VP8Codec,
+                             op, ctx->address_offset);
+}
+
+int vpx_codec_destroy(vpx_codec_ctx_t *ctx) {
+    DDD("%s %d", __func__, __LINE__);
+    if (!ctx) {
+        ALOGE("ERROR: Failed %s %d: ctx is nullptr", __func__, __LINE__);
+        return -1;
+    }
+    auto transport = GoldfishMediaTransport::getInstance();
+    transport->writeParam(ctx->id, 0, ctx->address_offset);
+    sendVpxOperation(ctx, MediaOperation::DestroyContext);
+    transport->returnMemorySlot(ctx->memory_slot);
+    ctx->memory_slot = -1;
+    return 0;
+}
+
+int vpx_codec_dec_init(vpx_codec_ctx_t *ctx) {
+    DDD("%s %d", __func__, __LINE__);
+    auto transport = GoldfishMediaTransport::getInstance();
+    int slot = transport->getMemorySlot();
+    if (slot < 0) {
+        ALOGE("ERROR: Failed %s %d: cannot get memory slot", __func__,
+              __LINE__);
+        return -1;
+    } else {
+        DDD("got slot %d", slot);
+    }
+    ctx->id = applyForOneId();
+    ctx->memory_slot = slot;
+    ctx->address_offset =
+        static_cast<unsigned int>(ctx->memory_slot) * (1 << 20);
+    DDD("got address offset 0x%x version %d", (int)(ctx->address_offset),
+        ctx->version);
+
+    // data and dst are on the host side actually
+    ctx->data = transport->getInputAddr(ctx->address_offset);
+    ctx->dst =
+        transport->getInputAddr(ctx->address_offset); // re-use input address
+    transport->writeParam(ctx->id, 0, ctx->address_offset);
+    transport->writeParam(ctx->version, 1, ctx->address_offset);
+    sendVpxOperation(ctx, MediaOperation::InitContext);
+    return 0;
+}
+
+static int getReturnCode(uint8_t *ptr) {
+    int *pint = (int *)(ptr);
+    return *pint;
+}
+
+// vpx_image_t myImg;
+static void getVpxFrame(uint8_t *ptr, vpx_image_t &myImg) {
+    DDD("%s %d", __func__, __LINE__);
+    uint8_t *imgptr = (ptr + 8);
+    myImg.fmt = *(vpx_img_fmt_t *)imgptr;
+    imgptr += 8;
+    myImg.d_w = *(unsigned int *)imgptr;
+    imgptr += 8;
+    myImg.d_h = *(unsigned int *)imgptr;
+    imgptr += 8;
+    myImg.user_priv = (void *)(*(uint64_t *)imgptr);
+    DDD("fmt %d dw %d dh %d userpriv %p", (int)myImg.fmt, (int)myImg.d_w,
+        (int)myImg.d_h, myImg.user_priv);
+}
+
+// TODO: we might not need to do the putting all the time
+vpx_image_t *vpx_codec_get_frame(vpx_codec_ctx_t *ctx, int hostColorBufferId) {
+    DDD("%s %d %p", __func__, __LINE__);
+    (void)hostColorBufferId;
+    if (!ctx) {
+        ALOGE("ERROR: Failed %s %d: ctx is nullptr", __func__, __LINE__);
+        return nullptr;
+    }
+    auto transport = GoldfishMediaTransport::getInstance();
+
+    transport->writeParam(ctx->id, 0, ctx->address_offset);
+    transport->writeParam(ctx->outputBufferWidth, 1, ctx->address_offset);
+    transport->writeParam(ctx->outputBufferHeight, 2, ctx->address_offset);
+    transport->writeParam(ctx->width, 3, ctx->address_offset);
+    transport->writeParam(ctx->height, 4, ctx->address_offset);
+    transport->writeParam(ctx->bpp, 5, ctx->address_offset);
+    transport->writeParam(ctx->hostColorBufferId, 6, ctx->address_offset);
+    transport->writeParam(transport->offsetOf((uint64_t)(ctx->dst)) -
+                              ctx->address_offset,
+                          7, ctx->address_offset);
+
+    sendVpxOperation(ctx, MediaOperation::GetImage);
+
+    auto *retptr = transport->getReturnAddr(ctx->address_offset);
+    int ret = getReturnCode(retptr);
+    if (ret) {
+        return nullptr;
+    }
+    getVpxFrame(retptr, ctx->myImg);
+    return &(ctx->myImg);
+}
+
+int vpx_codec_flush(vpx_codec_ctx_t *ctx) {
+    DDD("%s %d", __func__, __LINE__);
+    if (!ctx) {
+        ALOGE("ERROR: Failed %s %d: ctx is nullptr", __func__, __LINE__);
+        return -1;
+    }
+    auto transport = GoldfishMediaTransport::getInstance();
+    transport->writeParam(ctx->id, 0, ctx->address_offset);
+    sendVpxOperation(ctx, MediaOperation::Flush);
+    return 0;
+}
+
+int vpx_codec_decode(vpx_codec_ctx_t *ctx, const uint8_t *data,
+                     unsigned int data_sz, void *user_priv, long deadline) {
+    if (!ctx) {
+        ALOGE("ERROR: Failed %s %d: ctx is nullptr", __func__, __LINE__);
+        return -1;
+    }
+    (void)deadline;
+    DDD("%s %d data size %d userpriv %p", __func__, __LINE__, (int)data_sz,
+        user_priv);
+    auto transport = GoldfishMediaTransport::getInstance();
+    memcpy(ctx->data, data, data_sz);
+
+    transport->writeParam(ctx->id, 0, ctx->address_offset);
+    transport->writeParam(transport->offsetOf((uint64_t)(ctx->data)) -
+                              ctx->address_offset,
+                          1, ctx->address_offset);
+    transport->writeParam((__u64)data_sz, 2, ctx->address_offset);
+    transport->writeParam((__u64)user_priv, 3, ctx->address_offset);
+    sendVpxOperation(ctx, MediaOperation::DecodeImage);
+    return 0;
+}
diff --git a/system/codecs/c2/readme.txt b/system/codecs/c2/readme.txt
new file mode 100644
index 0000000..a2b0d21
--- /dev/null
+++ b/system/codecs/c2/readme.txt
@@ -0,0 +1,11 @@
+This contains the c2 version of emulator's hardware decoders
+
+decoders/ contains avc(a.k.a. h264) and vpx(vp8 and vp9) decoders
+and base. All are based upon c2 sw codecs.
+
+store/ the store that creates decoders
+this is also borrowed from c2.
+
+service/ the hidl service that required by platform;
+to actually get it work, need to set this in file_contexts
+/vendor/bin/hw/android\.hardware\.media\.c2@1\.0-service-goldfish u:object_r:mediacodec_exec:s0
diff --git a/system/codecs/c2/service/Android.bp b/system/codecs/c2/service/Android.bp
new file mode 100644
index 0000000..40570a7
--- /dev/null
+++ b/system/codecs/c2/service/Android.bp
@@ -0,0 +1,36 @@
+package {
+    // See: http://go/android-license-faq
+    // A large-scale-change added 'default_applicable_licenses' to import
+    // all of the 'license_kinds' from "device_generic_goldfish-opengl_license"
+    // to get the below license kinds:
+    //   SPDX-license-identifier-BSD
+    default_applicable_licenses: ["device_generic_goldfish-opengl_license"],
+}
+
+cc_binary {
+    name: "android.hardware.media.c2@1.0-service-goldfish",
+
+    defaults: [
+        "hidl_defaults",
+        "libcodec2-hidl-defaults",
+    ],
+    vendor: true,
+    relative_install_path: "hw",
+
+    srcs: [
+        "service.cpp",
+    ],
+
+    init_rc: ["android.hardware.media.c2@1.0-service-goldfish.rc"],
+
+    shared_libs: [
+        "libgoldfish_codec2_store",
+        "libavservices_minijail_vendor",
+        "libcutils",
+        "libhidlbase",
+        "liblog",
+        "libutils",
+    ],
+
+    required: ["android.hardware.media.c2@1.2-default-seccomp_policy"],
+}
diff --git a/system/codecs/c2/service/android.hardware.media.c2@1.0-service-goldfish.rc b/system/codecs/c2/service/android.hardware.media.c2@1.0-service-goldfish.rc
new file mode 100644
index 0000000..afffa16
--- /dev/null
+++ b/system/codecs/c2/service/android.hardware.media.c2@1.0-service-goldfish.rc
@@ -0,0 +1,6 @@
+service android-hardware-media-c2-goldfish-hal-1-0 /vendor/bin/hw/android.hardware.media.c2@1.0-service-goldfish
+    class hal
+    user media
+    group mediadrm drmrpc
+    ioprio rt 4
+    writepid /dev/cpuset/foreground/tasks
diff --git a/system/codecs/c2/service/service.cpp b/system/codecs/c2/service/service.cpp
new file mode 100644
index 0000000..0e97dd2
--- /dev/null
+++ b/system/codecs/c2/service/service.cpp
@@ -0,0 +1,52 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "android.hardware.media.c2@1.0-service-goldfish"
+
+#include <C2Component.h>
+#include <codec2/hidl/1.0/ComponentStore.h>
+#include <hidl/HidlTransportSupport.h>
+#include <log/log.h>
+#include <minijail.h>
+
+#include <goldfish_codec2/store/GoldfishComponentStore.h>
+
+// Default policy for codec2.0 service.
+static constexpr char kBaseSeccompPolicyPath[] =
+    "/vendor/etc/seccomp_policy/"
+    "android.hardware.media.c2@1.2-default-seccomp_policy";
+
+// Additional device-specific seccomp permissions can be added in this file.
+static constexpr char kExtSeccompPolicyPath[] =
+    "/vendor/etc/seccomp_policy/codec2.vendor.ext.policy";
+
+int main(int /* argc */, char ** /* argv */) {
+    ALOGD("Goldfish C2 Service starting...");
+
+    signal(SIGPIPE, SIG_IGN);
+    android::SetUpMinijail(kBaseSeccompPolicyPath, kExtSeccompPolicyPath);
+
+    android::hardware::configureRpcThreadpool(8, true /* callerWillJoin */);
+
+    // Create IComponentStore service.
+    {
+        using namespace ::android::hardware::media::c2::V1_0;
+
+        ALOGD("Instantiating Codec2's Goldfish IComponentStore service...");
+        android::sp<IComponentStore> store(new utils::ComponentStore(
+            android::GoldfishComponentStore::Create()));
+        if (store == nullptr) {
+            ALOGE("Cannot create Codec2's Goldfish IComponentStore service.");
+        } else if (store->registerAsService("default") != android::OK) {
+            ALOGE("Cannot register Codec2's IComponentStore service.");
+        } else {
+            ALOGI("Codec2's IComponentStore service created.");
+        }
+    }
+
+    android::hardware::joinRpcThreadpool();
+    ALOGD("Service shutdown.");
+    return 0;
+}
diff --git a/system/codecs/c2/store/Android.bp b/system/codecs/c2/store/Android.bp
new file mode 100644
index 0000000..56b1895
--- /dev/null
+++ b/system/codecs/c2/store/Android.bp
@@ -0,0 +1,35 @@
+package {
+    // See: http://go/android-license-faq
+    // A large-scale-change added 'default_applicable_licenses' to import
+    // all of the 'license_kinds' from "device_generic_goldfish-opengl_license"
+    // to get the below license kinds:
+    //   SPDX-license-identifier-GPL-2.0
+    default_applicable_licenses: ["device_generic_goldfish-opengl_license"],
+}
+
+cc_library_shared {
+    name: "libgoldfish_codec2_store",
+    vendor: true,
+
+    defaults: [
+        "libcodec2-impl-defaults",
+    ],
+
+    srcs: [
+        "GoldfishComponentStore.cpp",
+    ],
+    export_include_dirs: [
+        "include",
+    ],
+
+    shared_libs: [
+        "libcutils",
+        "liblog",
+    ],
+
+    cflags: [
+      "-Werror",
+      "-Wall",
+      "-Wthread-safety",  // Check thread annotation at build time.
+    ],
+}
diff --git a/system/codecs/c2/store/GoldfishComponentStore.cpp b/system/codecs/c2/store/GoldfishComponentStore.cpp
new file mode 100644
index 0000000..49c5a32
--- /dev/null
+++ b/system/codecs/c2/store/GoldfishComponentStore.cpp
@@ -0,0 +1,386 @@
+/* Copyright (C) 2020 The Android Open Source Project
+**
+** This software is licensed under the terms of the GNU General Public
+** License version 2, as published by the Free Software Foundation, and
+** may be copied, distributed, and modified under those terms.
+**
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+** GNU General Public License for more details.
+*/
+
+#define LOG_TAG "GoldfishComponentStore"
+
+#include <goldfish_codec2/store/GoldfishComponentStore.h>
+
+#include <dlfcn.h>
+#include <stdint.h>
+
+#include <memory>
+#include <mutex>
+
+#include <C2.h>
+#include <C2Config.h>
+#include <cutils/properties.h>
+#include <log/log.h>
+
+namespace android {
+
+// static
+std::shared_ptr<C2ComponentStore> GoldfishComponentStore::Create() {
+    ALOGI("%s()", __func__);
+
+    static std::mutex mutex;
+    static std::weak_ptr<C2ComponentStore> platformStore;
+
+    std::lock_guard<std::mutex> lock(mutex);
+    std::shared_ptr<C2ComponentStore> store = platformStore.lock();
+    if (store != nullptr)
+        return store;
+
+    store = std::shared_ptr<C2ComponentStore>(new GoldfishComponentStore());
+    platformStore = store;
+    return store;
+}
+
+C2String GoldfishComponentStore::getName() const {
+    return "android.componentStore.goldfish";
+}
+
+c2_status_t GoldfishComponentStore::ComponentModule::init(std::string libPath) {
+    ALOGI("in %s", __func__);
+    ALOGI("loading dll of path %s", libPath.c_str());
+    mLibHandle = dlopen(libPath.c_str(), RTLD_NOW | RTLD_NODELETE);
+    LOG_ALWAYS_FATAL_IF(mLibHandle == nullptr, "could not dlopen %s: %s",
+                        libPath.c_str(), dlerror());
+
+    createFactory = (C2ComponentFactory::CreateCodec2FactoryFunc)dlsym(
+        mLibHandle, "CreateCodec2Factory");
+    LOG_ALWAYS_FATAL_IF(createFactory == nullptr, "createFactory is null in %s",
+                        libPath.c_str());
+
+    destroyFactory = (C2ComponentFactory::DestroyCodec2FactoryFunc)dlsym(
+        mLibHandle, "DestroyCodec2Factory");
+    LOG_ALWAYS_FATAL_IF(destroyFactory == nullptr,
+                        "destroyFactory is null in %s", libPath.c_str());
+
+    mComponentFactory = createFactory();
+    if (mComponentFactory == nullptr) {
+        ALOGD("could not create factory in %s", libPath.c_str());
+        mInit = C2_NO_MEMORY;
+    } else {
+        mInit = C2_OK;
+    }
+
+    if (mInit != C2_OK) {
+        return mInit;
+    }
+
+    std::shared_ptr<C2ComponentInterface> intf;
+    c2_status_t res = createInterface(0, &intf);
+    if (res != C2_OK) {
+        ALOGD("failed to create interface: %d", res);
+        return mInit;
+    }
+
+    std::shared_ptr<C2Component::Traits> traits(new (std::nothrow)
+                                                    C2Component::Traits);
+    if (traits) {
+        traits->name = intf->getName();
+
+        C2ComponentKindSetting kind;
+        C2ComponentDomainSetting domain;
+        res = intf->query_vb({&kind, &domain}, {}, C2_MAY_BLOCK, nullptr);
+        bool fixDomain = res != C2_OK;
+        if (res == C2_OK) {
+            traits->kind = kind.value;
+            traits->domain = domain.value;
+        } else {
+            // TODO: remove this fall-back
+            ALOGD("failed to query interface for kind and domain: %d", res);
+
+            traits->kind = (traits->name.find("encoder") != std::string::npos)
+                               ? C2Component::KIND_ENCODER
+                           : (traits->name.find("decoder") != std::string::npos)
+                               ? C2Component::KIND_DECODER
+                               : C2Component::KIND_OTHER;
+        }
+
+        uint32_t mediaTypeIndex =
+            traits->kind == C2Component::KIND_ENCODER
+                ? C2PortMediaTypeSetting::output::PARAM_TYPE
+                : C2PortMediaTypeSetting::input::PARAM_TYPE;
+        std::vector<std::unique_ptr<C2Param>> params;
+        res = intf->query_vb({}, {mediaTypeIndex}, C2_MAY_BLOCK, &params);
+        if (res != C2_OK) {
+            ALOGD("failed to query interface: %d", res);
+            return mInit;
+        }
+        if (params.size() != 1u) {
+            ALOGD("failed to query interface: unexpected vector size: %zu",
+                  params.size());
+            return mInit;
+        }
+        C2PortMediaTypeSetting *mediaTypeConfig =
+            C2PortMediaTypeSetting::From(params[0].get());
+        if (mediaTypeConfig == nullptr) {
+            ALOGD("failed to query media type");
+            return mInit;
+        }
+        traits->mediaType = std::string(
+            mediaTypeConfig->m.value,
+            strnlen(mediaTypeConfig->m.value, mediaTypeConfig->flexCount()));
+
+        if (fixDomain) {
+            if (strncmp(traits->mediaType.c_str(), "audio/", 6) == 0) {
+                traits->domain = C2Component::DOMAIN_AUDIO;
+            } else if (strncmp(traits->mediaType.c_str(), "video/", 6) == 0) {
+                traits->domain = C2Component::DOMAIN_VIDEO;
+            } else if (strncmp(traits->mediaType.c_str(), "image/", 6) == 0) {
+                traits->domain = C2Component::DOMAIN_IMAGE;
+            } else {
+                traits->domain = C2Component::DOMAIN_OTHER;
+            }
+        }
+
+        // TODO: get this properly from the store during emplace
+        switch (traits->domain) {
+        case C2Component::DOMAIN_AUDIO:
+            traits->rank = 8;
+            break;
+        default:
+            traits->rank = 512;
+        }
+
+        params.clear();
+        res = intf->query_vb({}, {C2ComponentAliasesSetting::PARAM_TYPE},
+                             C2_MAY_BLOCK, &params);
+        if (res == C2_OK && params.size() == 1u) {
+            C2ComponentAliasesSetting *aliasesSetting =
+                C2ComponentAliasesSetting::From(params[0].get());
+            if (aliasesSetting) {
+                // Split aliases on ','
+                // This looks simpler in plain C and even std::string would
+                // still make a copy.
+                char *aliases = ::strndup(aliasesSetting->m.value,
+                                          aliasesSetting->flexCount());
+                ALOGD("'%s' has aliases: '%s'", intf->getName().c_str(),
+                      aliases);
+
+                for (char *tok, *ptr, *str = aliases;
+                     (tok = ::strtok_r(str, ",", &ptr)); str = nullptr) {
+                    traits->aliases.push_back(tok);
+                    ALOGD("adding alias: '%s'", tok);
+                }
+                free(aliases);
+            }
+        }
+    }
+    mTraits = traits;
+
+    return mInit;
+}
+
+GoldfishComponentStore::ComponentModule::~ComponentModule() {
+    ALOGI("in %s", __func__);
+    if (destroyFactory && mComponentFactory) {
+        destroyFactory(mComponentFactory);
+    }
+    if (mLibHandle) {
+        ALOGI("unloading dll");
+        dlclose(mLibHandle);
+    }
+}
+
+c2_status_t GoldfishComponentStore::ComponentModule::createInterface(
+    c2_node_id_t id, std::shared_ptr<C2ComponentInterface> *interface,
+    std::function<void(::C2ComponentInterface *)> deleter) {
+    interface->reset();
+    if (mInit != C2_OK) {
+        return mInit;
+    }
+    std::shared_ptr<ComponentModule> module = shared_from_this();
+    c2_status_t res = mComponentFactory->createInterface(
+        id, interface, [module, deleter](C2ComponentInterface *p) mutable {
+            // capture module so that we ensure we still have it while deleting
+            // interface
+            deleter(p);     // delete interface first
+            module.reset(); // remove module ref (not technically needed)
+        });
+    ALOGI("created interface");
+    return res;
+}
+
+c2_status_t GoldfishComponentStore::ComponentModule::createComponent(
+    c2_node_id_t id, std::shared_ptr<C2Component> *component,
+    std::function<void(::C2Component *)> deleter) {
+    component->reset();
+    if (mInit != C2_OK) {
+        return mInit;
+    }
+    std::shared_ptr<ComponentModule> module = shared_from_this();
+    c2_status_t res = mComponentFactory->createComponent(
+        id, component, [module, deleter](C2Component *p) mutable {
+            // capture module so that we ensure we still have it while deleting
+            // component
+            deleter(p);     // delete component first
+            module.reset(); // remove module ref (not technically needed)
+        });
+    ALOGI("created component");
+    return res;
+}
+
+std::shared_ptr<const C2Component::Traits>
+GoldfishComponentStore::ComponentModule::getTraits() {
+    std::unique_lock<std::recursive_mutex> lock(mLock);
+    return mTraits;
+}
+
+// We have a property set indicating whether to use the host side codec
+// or not (ro.boot.qemu.hwcodec.<mLibNameSuffix>).
+static std::string BuildHWCodecPropName(const char *libname) {
+    using namespace std::literals::string_literals;
+    return "ro.boot.qemu.hwcodec."s + libname;
+}
+
+static bool useAndroidGoldfishComponentInstance(const char *libname) {
+    const std::string propName = BuildHWCodecPropName(libname);
+    char propValue[PROP_VALUE_MAX];
+    bool myret = property_get(propName.c_str(), propValue, "") > 0 &&
+                 strcmp("2", propValue) == 0;
+    if (myret) {
+        ALOGD("%s %d found prop %s val %s", __func__, __LINE__, propName.c_str(),
+              propValue);
+    }
+    return myret;
+}
+
+GoldfishComponentStore::GoldfishComponentStore()
+    : mVisited(false), mReflector(std::make_shared<C2ReflectorHelper>()) {
+
+    ALOGW("created goldfish store %p reflector of param %p", this,
+          mReflector.get());
+    auto emplace = [this](const char *libPath) {
+        mComponents.emplace(libPath, libPath);
+    };
+
+    if (useAndroidGoldfishComponentInstance("vpxdec")) {
+        emplace("libcodec2_goldfish_vp8dec.so");
+        emplace("libcodec2_goldfish_vp9dec.so");
+    }
+    if (useAndroidGoldfishComponentInstance("avcdec")) {
+        emplace("libcodec2_goldfish_avcdec.so");
+    }
+}
+
+c2_status_t
+GoldfishComponentStore::copyBuffer(std::shared_ptr<C2GraphicBuffer> src,
+                                   std::shared_ptr<C2GraphicBuffer> dst) {
+    (void)src;
+    (void)dst;
+    return C2_OMITTED;
+}
+
+c2_status_t GoldfishComponentStore::query_sm(
+    const std::vector<C2Param *> &stackParams,
+    const std::vector<C2Param::Index> &heapParamIndices,
+    std::vector<std::unique_ptr<C2Param>> *const heapParams) const {
+    (void)heapParams;
+    return stackParams.empty() && heapParamIndices.empty() ? C2_OK
+                                                           : C2_BAD_INDEX;
+}
+
+c2_status_t GoldfishComponentStore::config_sm(
+    const std::vector<C2Param *> &params,
+    std::vector<std::unique_ptr<C2SettingResult>> *const failures) {
+    (void)failures;
+    return params.empty() ? C2_OK : C2_BAD_INDEX;
+}
+
+void GoldfishComponentStore::visitComponents() {
+    std::lock_guard<std::mutex> lock(mMutex);
+    if (mVisited) {
+        return;
+    }
+    for (auto &pathAndLoader : mComponents) {
+        const C2String &path = pathAndLoader.first;
+        ComponentLoader &loader = pathAndLoader.second;
+        std::shared_ptr<ComponentModule> module;
+        if (loader.fetchModule(&module) == C2_OK) {
+            std::shared_ptr<const C2Component::Traits> traits =
+                module->getTraits();
+            if (traits) {
+                mComponentList.push_back(traits);
+                mComponentNameToPath.emplace(traits->name, path);
+                for (const C2String &alias : traits->aliases) {
+                    mComponentNameToPath.emplace(alias, path);
+                }
+            }
+        }
+    }
+    mVisited = true;
+}
+
+std::vector<std::shared_ptr<const C2Component::Traits>>
+GoldfishComponentStore::listComponents() {
+    // This method SHALL return within 500ms.
+    visitComponents();
+    return mComponentList;
+}
+
+c2_status_t GoldfishComponentStore::findComponent(
+    C2String name, std::shared_ptr<ComponentModule> *module) {
+    (*module).reset();
+    visitComponents();
+
+    auto pos = mComponentNameToPath.find(name);
+    if (pos != mComponentNameToPath.end()) {
+        return mComponents.at(pos->second).fetchModule(module);
+    }
+    return C2_NOT_FOUND;
+}
+
+c2_status_t GoldfishComponentStore::createComponent(
+    C2String name, std::shared_ptr<C2Component> *const component) {
+    // This method SHALL return within 100ms.
+    component->reset();
+    std::shared_ptr<ComponentModule> module;
+    c2_status_t res = findComponent(name, &module);
+    if (res == C2_OK) {
+        // TODO: get a unique node ID
+        res = module->createComponent(0, component);
+    }
+    return res;
+}
+
+c2_status_t GoldfishComponentStore::createInterface(
+    C2String name, std::shared_ptr<C2ComponentInterface> *const interface) {
+    // This method SHALL return within 100ms.
+    interface->reset();
+    std::shared_ptr<ComponentModule> module;
+    c2_status_t res = findComponent(name, &module);
+    if (res == C2_OK) {
+        // TODO: get a unique node ID
+        res = module->createInterface(0, interface);
+    }
+    return res;
+}
+
+c2_status_t GoldfishComponentStore::querySupportedParams_nb(
+    std::vector<std::shared_ptr<C2ParamDescriptor>> *const params) const {
+    (void)params;
+    return C2_OK;
+}
+
+c2_status_t GoldfishComponentStore::querySupportedValues_sm(
+    std::vector<C2FieldSupportedValuesQuery> &fields) const {
+    return fields.empty() ? C2_OK : C2_BAD_INDEX;
+}
+
+std::shared_ptr<C2ParamReflector>
+GoldfishComponentStore::getParamReflector() const {
+    return mReflector;
+}
+
+} // namespace android
diff --git a/system/codecs/c2/store/include/goldfish_codec2/store/GoldfishComponentStore.h b/system/codecs/c2/store/include/goldfish_codec2/store/GoldfishComponentStore.h
new file mode 100644
index 0000000..f484bd4
--- /dev/null
+++ b/system/codecs/c2/store/include/goldfish_codec2/store/GoldfishComponentStore.h
@@ -0,0 +1,214 @@
+/* Copyright (C) 2020 The Android Open Source Project
+**
+** This software is licensed under the terms of the GNU General Public
+** License version 2, as published by the Free Software Foundation, and
+** may be copied, distributed, and modified under those terms.
+**
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+** GNU General Public License for more details.
+*/
+
+#pragma once
+
+#include <map>
+#include <mutex>
+
+#include <C2Component.h>
+#include <C2ComponentFactory.h>
+#include <android-base/thread_annotations.h>
+#include <util/C2InterfaceHelper.h>
+
+namespace android {
+
+class GoldfishComponentStore : public C2ComponentStore {
+  public:
+    static std::shared_ptr<C2ComponentStore> Create();
+
+    virtual std::vector<std::shared_ptr<const C2Component::Traits>>
+    listComponents() override;
+    virtual std::shared_ptr<C2ParamReflector>
+    getParamReflector() const override;
+    virtual C2String getName() const override;
+    virtual c2_status_t querySupportedValues_sm(
+        std::vector<C2FieldSupportedValuesQuery> &fields) const override;
+    virtual c2_status_t querySupportedParams_nb(
+        std::vector<std::shared_ptr<C2ParamDescriptor>> *const params)
+        const override;
+    virtual c2_status_t query_sm(
+        const std::vector<C2Param *> &stackParams,
+        const std::vector<C2Param::Index> &heapParamIndices,
+        std::vector<std::unique_ptr<C2Param>> *const heapParams) const override;
+    virtual c2_status_t createInterface(
+        C2String name,
+        std::shared_ptr<C2ComponentInterface> *const interface) override;
+    virtual c2_status_t
+    createComponent(C2String name,
+                    std::shared_ptr<C2Component> *const component) override;
+    virtual c2_status_t
+    copyBuffer(std::shared_ptr<C2GraphicBuffer> src,
+               std::shared_ptr<C2GraphicBuffer> dst) override;
+    virtual c2_status_t config_sm(
+        const std::vector<C2Param *> &params,
+        std::vector<std::unique_ptr<C2SettingResult>> *const failures) override;
+    GoldfishComponentStore();
+
+    virtual ~GoldfishComponentStore() override = default;
+
+  private:
+    /**
+     * An object encapsulating a loaded component module.
+     *
+     * \todo provide a way to add traits to known components here to avoid
+     * loading the .so-s for listComponents
+     */
+    struct ComponentModule
+        : public C2ComponentFactory,
+          public std::enable_shared_from_this<ComponentModule> {
+        virtual c2_status_t
+        createComponent(c2_node_id_t id,
+                        std::shared_ptr<C2Component> *component,
+                        ComponentDeleter deleter =
+                            std::default_delete<C2Component>()) override;
+        virtual c2_status_t createInterface(
+            c2_node_id_t id, std::shared_ptr<C2ComponentInterface> *interface,
+            InterfaceDeleter deleter =
+                std::default_delete<C2ComponentInterface>()) override;
+
+        /**
+         * \returns the traits of the component in this module.
+         */
+        std::shared_ptr<const C2Component::Traits> getTraits();
+
+        /**
+         * Creates an uninitialized component module.
+         *
+         * \param name[in]  component name.
+         *
+         * \note Only used by ComponentLoader.
+         */
+        ComponentModule()
+            : mInit(C2_NO_INIT), mLibHandle(nullptr), createFactory(nullptr),
+              destroyFactory(nullptr), mComponentFactory(nullptr) {}
+
+        /**
+         * Initializes a component module with a given library path. Must be
+         * called exactly once.
+         *
+         * \note Only used by ComponentLoader.
+         *
+         * \param libPath[in] library path
+         *
+         * \retval C2_OK        the component module has been successfully
+         * loaded \retval C2_NO_MEMORY not enough memory to loading the
+         * component module \retval C2_NOT_FOUND could not locate the component
+         * module \retval C2_CORRUPTED the component module could not be loaded
+         * (unexpected) \retval C2_REFUSED   permission denied to load the
+         * component module (unexpected) \retval C2_TIMED_OUT could not load the
+         * module within the time limit (unexpected)
+         */
+        c2_status_t init(std::string libPath);
+
+        virtual ~ComponentModule() override;
+
+      protected:
+        std::recursive_mutex mLock; ///< lock protecting mTraits
+        std::shared_ptr<C2Component::Traits>
+            mTraits; ///< cached component traits
+
+        c2_status_t mInit; ///< initialization result
+
+        void *mLibHandle; ///< loaded library handle
+        C2ComponentFactory::CreateCodec2FactoryFunc
+            createFactory; ///< loaded create function
+        C2ComponentFactory::DestroyCodec2FactoryFunc
+            destroyFactory; ///< loaded destroy function
+        C2ComponentFactory
+            *mComponentFactory; ///< loaded/created component factory
+    };
+
+    /**
+     * An object encapsulating a loadable component module.
+     *
+     * \todo make this also work for enumerations
+     */
+    struct ComponentLoader {
+        /**
+         * Load the component module.
+         *
+         * This method simply returns the component module if it is already
+         * currently loaded, or attempts to load it if it is not.
+         *
+         * \param module[out] pointer to the shared pointer where the loaded
+         * module shall be stored. This will be nullptr on error.
+         *
+         * \retval C2_OK        the component module has been successfully
+         * loaded \retval C2_NO_MEMORY not enough memory to loading the
+         * component module \retval C2_NOT_FOUND could not locate the component
+         * module \retval C2_CORRUPTED the component module could not be loaded
+         * \retval C2_REFUSED   permission denied to load the component module
+         */
+        c2_status_t fetchModule(std::shared_ptr<ComponentModule> *module) {
+            c2_status_t res = C2_OK;
+            std::lock_guard<std::mutex> lock(mMutex);
+            std::shared_ptr<ComponentModule> localModule = mModule.lock();
+            if (localModule == nullptr) {
+                localModule = std::make_shared<ComponentModule>();
+                res = localModule->init(mLibPath);
+                if (res == C2_OK) {
+                    mModule = localModule;
+                }
+            }
+            *module = localModule;
+            return res;
+        }
+
+        /**
+         * Creates a component loader for a specific library path (or name).
+         */
+        ComponentLoader(std::string libPath) : mLibPath(libPath) {}
+
+      private:
+        std::mutex mMutex; ///< mutex guarding the module
+        std::weak_ptr<ComponentModule>
+            mModule;          ///< weak reference to the loaded module
+        std::string mLibPath; ///< library path
+    };
+
+    /**
+     * Retrieves the component module for a component.
+     *
+     * \param module pointer to a shared_pointer where the component module will
+     * be stored on success.
+     *
+     * \retval C2_OK        the component loader has been successfully retrieved
+     * \retval C2_NO_MEMORY not enough memory to locate the component loader
+     * \retval C2_NOT_FOUND could not locate the component to be loaded
+     * \retval C2_CORRUPTED the component loader could not be identified due to
+     * some modules being corrupted (this can happen if the name does not refer
+     * to an already identified component but some components could not be
+     * loaded due to bad library) \retval C2_REFUSED   permission denied to find
+     * the component loader for the named component (this can happen if the name
+     * does not refer to an already identified component but some components
+     * could not be loaded due to lack of permissions)
+     */
+    c2_status_t findComponent(C2String name,
+                              std::shared_ptr<ComponentModule> *module);
+
+    /**
+     * Loads each component module and discover its contents.
+     */
+    void visitComponents();
+
+    std::mutex
+        mMutex;    ///< mutex guarding the component lists during construction
+    bool mVisited; ///< component modules visited
+    std::map<C2String, ComponentLoader>
+        mComponents; ///< path -> component module
+    std::map<C2String, C2String> mComponentNameToPath; ///< name -> path
+    std::vector<std::shared_ptr<const C2Component::Traits>> mComponentList;
+
+    std::shared_ptr<C2ReflectorHelper> mReflector;
+};
+} // namespace android
diff --git a/system/codecs/omx/avcdec/GoldfishAVCDec.cpp b/system/codecs/omx/avcdec/GoldfishAVCDec.cpp
index c7e7b79..d90241d 100644
--- a/system/codecs/omx/avcdec/GoldfishAVCDec.cpp
+++ b/system/codecs/omx/avcdec/GoldfishAVCDec.cpp
@@ -14,9 +14,16 @@
  * limitations under the License.
  */
 
-//#define LOG_NDEBUG 0
+
 #include <utils/Log.h>
 
+#define DEBUG  0
+#if DEBUG
+#  define  DDD(...)    ALOGD(__VA_ARGS__)
+#else
+#  define  DDD(...)    ((void)0)
+#endif
+
 #include "GoldfishAVCDec.h"
 
 #include <media/stagefright/foundation/ADebug.h>
@@ -86,17 +93,17 @@
     // If input dump is enabled, then open create an empty file
     GENERATE_FILE_NAMES();
     CREATE_DUMP_FILE(mInFile);
-    ALOGD("created %s %d object %p", __func__, __LINE__, this);
+    ALOGI("created %s %d object %p", __func__, __LINE__, this);
 }
 
 GoldfishAVCDec::~GoldfishAVCDec() {
     CHECK_EQ(deInitDecoder(), (status_t)OK);
-    ALOGD("destroyed %s %d object %p", __func__, __LINE__, this);
+    DDD("destroyed %s %d object %p", __func__, __LINE__, this);
 }
 
 void GoldfishAVCDec::logVersion() {
     // TODO: get emulation decoder implementation version from the host.
-    ALOGV("GoldfishAVC decoder version 1.0");
+    ALOGI("GoldfishAVC decoder version 1.0");
 }
 
 status_t GoldfishAVCDec::resetPlugin() {
@@ -129,6 +136,9 @@
 
 status_t GoldfishAVCDec::initDecoder() {
     /* Initialize the decoder */
+    if (mEnableAndroidNativeBuffers == false) {
+        mRenderMode = RenderMode::RENDER_BY_GUEST_CPU;
+    }
     mContext.reset(new MediaH264Decoder(mRenderMode));
     mContext->initH264Context(mWidth,
                               mHeight,
@@ -175,7 +185,7 @@
     ColorUtils::convertIsoColorAspectsToCodecAspects(
             primaries, transfer, coeffs, fullRange, colorAspects);
 
-    ALOGD("img pts %lld, primaries %d, range %d transfer %d colorspace %d", (long long)img.pts,
+    DDD("img pts %lld, primaries %d, range %d transfer %d colorspace %d", (long long)img.pts,
             (int)img.color_primaries, (int)img.color_range, (int)img.color_trc, (int)img.colorspace);
 
     // Update color aspects if necessary.
@@ -198,7 +208,7 @@
     if (inHeader) {
         mConsumedBytes = inHeader->nFilledLen - mInputOffset;
         mInPBuffer = inHeader->pBuffer + inHeader->nOffset + mInputOffset;
-        ALOGD("got input timestamp %lld in-addr-base %p real-data-offset %d inputoffset %d", (long long)(inHeader->nTimeStamp),
+        DDD("got input timestamp %lld in-addr-base %p real-data-offset %d inputoffset %d", (long long)(inHeader->nTimeStamp),
                 inHeader->pBuffer, (int)(inHeader->nOffset + mInputOffset), (int)mInputOffset);
     } else {
         mConsumedBytes = 0;
@@ -224,7 +234,7 @@
     while (mContext) {
         h264_image_t img = mContext->getImage();
         if (img.data != nullptr) {
-            ALOGD("img pts %lld is discarded", (long long)img.pts);
+            DDD("img pts %lld is discarded", (long long)img.pts);
         } else {
             return;
         }
@@ -235,7 +245,7 @@
     /* Once the output buffers are flushed, ignore any buffers that are held in decoder */
     if (kOutputPortIndex == portIndex) {
         setFlushMode();
-        ALOGD("%s %d", __func__, __LINE__);
+        DDD("%s %d", __func__, __LINE__);
         readAndDiscardAllHostBuffers();
         mContext->resetH264Context(mWidth, mHeight, mWidth, mHeight, MediaH264Decoder::PixelFormat::YUV420P);
         if (!mCsd0.empty() && !mCsd1.empty()) {
@@ -267,18 +277,18 @@
 
 int GoldfishAVCDec::getHostColorBufferId(void* header) {
   if (mNWBuffers.find(header) == mNWBuffers.end()) {
-      ALOGD("cannot find color buffer for header %p", header);
+      DDD("cannot find color buffer for header %p", header);
     return -1;
   }
   sp<ANativeWindowBuffer> nBuf = mNWBuffers[header];
   cb_handle_t *handle = (cb_handle_t*)nBuf->handle;
-  ALOGD("found color buffer for header %p --> %d", header, handle->hostHandle);
+  DDD("found color buffer for header %p --> %d", header, handle->hostHandle);
   return handle->hostHandle;
 }
 
 void GoldfishAVCDec::onQueueFilled(OMX_U32 portIndex) {
     static int count1=0;
-    ALOGD("calling %s count %d object %p", __func__, ++count1, this);
+    DDD("calling %s count %d object %p", __func__, ++count1, this);
     UNUSED(portIndex);
     OMX_BUFFERHEADERTYPE *inHeader = NULL;
     BufferInfo *inInfo = NULL;
@@ -304,7 +314,7 @@
 
     int count2=0;
     while (!outQueue.empty()) {
-        ALOGD("calling %s in while loop count %d", __func__, ++count2);
+        DDD("calling %s in while loop count %d", __func__, ++count2);
         BufferInfo *outInfo;
         OMX_BUFFERHEADERTYPE *outHeader;
 
@@ -376,7 +386,7 @@
                         mCsd1.assign(mydata, mydata + mysize);
                     }
                 }
-                ALOGD("Decoding frame(sz=%lu)", (unsigned long)(inHeader->nFilledLen - mInputOffset));
+                DDD("Decoding frame(sz=%lu)", (unsigned long)(inHeader->nFilledLen - mInputOffset));
                 h264Res = mContext->decodeFrame(mInPBuffer,
                                                 inHeader->nFilledLen - mInputOffset,
                                                 inHeader->nTimeStamp);
@@ -385,7 +395,7 @@
                     mChangingResolution = true;
                 }
             } else {
-                ALOGD("No more input data. Attempting to get a decoded frame, if any.");
+                DDD("No more input data. Attempting to get a decoded frame, if any.");
             }
             h264_image_t img = {};
 
@@ -413,19 +423,19 @@
 
 
             if (inHeader) {
-                ALOGD("input time stamp %lld flag %d", inHeader->nTimeStamp, (int)(inHeader->nFlags));
+                DDD("input time stamp %lld flag %d", inHeader->nTimeStamp, (int)(inHeader->nFlags));
             }
 
             // If the decoder is in the changing resolution mode and there is no output present,
             // that means the switching is done and it's ready to reset the decoder and the plugin.
             if (mChangingResolution && img.data == nullptr) {
                 mChangingResolution = false;
-                ALOGD("re-create decoder because resolution changed");
+                DDD("re-create decoder because resolution changed");
                 bool portWillReset = false;
                 handlePortSettingsChange(&portWillReset, img.width, img.height);
                 {
-                    ALOGD("handling port reset");
-                    ALOGD("port resetting (img.width=%u, img.height=%u, mWidth=%u, mHeight=%u)",
+                    DDD("handling port reset");
+                    DDD("port resetting (img.width=%u, img.height=%u, mWidth=%u, mHeight=%u)",
                           img.width, img.height, mWidth, mHeight);
                     //resetDecoder();
                     resetPlugin();
@@ -452,10 +462,10 @@
                     mWidth = myWidth;
                     mHeight = myHeight;
                     if (portWillReset) {
-                        ALOGD("port will reset return now");
+                        DDD("port will reset return now");
                         return;
                     } else {
-                        ALOGD("port will NOT reset keep going now");
+                        DDD("port will NOT reset keep going now");
                     }
                 }
                 outHeader->nFilledLen =  (outputBufferWidth() * outputBufferHeight() * 3) / 2;
@@ -468,7 +478,7 @@
                 }
 
                 outHeader->nTimeStamp = img.pts;
-                ALOGD("got output timestamp %lld", (long long)(img.pts));
+                DDD("got output timestamp %lld", (long long)(img.pts));
 
                 outInfo->mOwnedByUs = false;
                 outQueue.erase(outQueue.begin());
@@ -476,7 +486,7 @@
                 notifyFillBufferDone(outHeader);
                 outHeader = NULL;
             } else if (mIsInFlush) {
-                ALOGD("not img.data and it is in flush mode");
+                DDD("not img.data and it is in flush mode");
                 /* If in flush mode and no output is returned by the codec,
                  * then come out of flush mode */
                 mIsInFlush = false;
@@ -484,7 +494,7 @@
                 /* If EOS was recieved on input port and there is no output
                  * from the codec, then signal EOS on output port */
                 if (mReceivedEOS) {
-                    ALOGD("recived EOS, re-create host context");
+                    ALOGI("received EOS, re-create host context");
                     outHeader->nFilledLen = 0;
                     outHeader->nFlags |= OMX_BUFFERFLAG_EOS;
 
@@ -536,7 +546,7 @@
     switch (indexFull) {
         case kGetAndroidNativeBufferUsageIndex:
         {
-            ALOGD("calling kGetAndroidNativeBufferUsageIndex");
+            DDD("calling kGetAndroidNativeBufferUsageIndex");
             GetAndroidNativeBufferUsageParams* nativeBuffersUsage = (GetAndroidNativeBufferUsageParams *) params;
             nativeBuffersUsage->nUsage = (unsigned int)(BufferUsage::GPU_DATA_BUFFER);
             return OMX_ErrorNone;
@@ -555,15 +565,15 @@
     switch (indexFull) {
         case kEnableAndroidNativeBuffersIndex:
         {
-            ALOGD("calling kEnableAndroidNativeBuffersIndex");
+            DDD("calling kEnableAndroidNativeBuffersIndex");
             EnableAndroidNativeBuffersParams* enableNativeBuffers = (EnableAndroidNativeBuffersParams *) params;
             if (enableNativeBuffers) {
                 mEnableAndroidNativeBuffers = enableNativeBuffers->enable;
                 if (mEnableAndroidNativeBuffers == false) {
                     mNWBuffers.clear();
-                    ALOGD("disabled kEnableAndroidNativeBuffersIndex");
+                    DDD("disabled kEnableAndroidNativeBuffersIndex");
                 } else {
-                    ALOGD("enabled kEnableAndroidNativeBuffersIndex");
+                    DDD("enabled kEnableAndroidNativeBuffersIndex");
                 }
             }
             return OMX_ErrorNone;
@@ -580,7 +590,7 @@
                 sp<ANativeWindowBuffer> nBuf = use_buffer_params->nativeBuffer;
                 cb_handle_t *handle = (cb_handle_t*)nBuf->handle;
                 void* dst = NULL;
-                ALOGD("kUseAndroidNativeBufferIndex with handle %p host color handle %d calling usebuffer", handle,
+                DDD("kUseAndroidNativeBufferIndex with handle %p host color handle %d calling usebuffer", handle,
                       handle->hostHandle);
                 useBufferCallerLockedAlready(use_buffer_params->bufferHeader,use_buffer_params->nPortIndex,
                         use_buffer_params->pAppPrivate,handle->allocatedSize(), (OMX_U8*)dst);
@@ -599,7 +609,7 @@
 
     if (mRenderMode == RenderMode::RENDER_BY_HOST_GPU) {
         if (!strcmp(name, "OMX.google.android.index.enableAndroidNativeBuffers")) {
-            ALOGD("calling getExtensionIndex for enable ANB");
+            DDD("calling getExtensionIndex for enable ANB");
             *(int32_t*)index = kEnableAndroidNativeBuffersIndex;
             return OMX_ErrorNone;
         } else if (!strcmp(name, "OMX.google.android.index.useAndroidNativeBuffer")) {
diff --git a/system/codecs/omx/avcdec/MediaH264Decoder.cpp b/system/codecs/omx/avcdec/MediaH264Decoder.cpp
index 50e9e79..6e14797 100644
--- a/system/codecs/omx/avcdec/MediaH264Decoder.cpp
+++ b/system/codecs/omx/avcdec/MediaH264Decoder.cpp
@@ -16,6 +16,13 @@
 
 #include <utils/Log.h>
 
+#define DEBUG  0
+#if DEBUG
+#  define  DDD(...)    ALOGD(__VA_ARGS__)
+#else
+#  define  DDD(...)    ((void)0)
+#endif
+
 #include "MediaH264Decoder.h"
 #include "goldfish_media_utils.h"
 #include <string.h>
@@ -40,8 +47,9 @@
             ALOGE("ERROR: Failed to initH264Context: cannot get memory slot");
             return;
         }
-        mAddressOffSet = static_cast<unsigned int>(slot) * 8 * (1 << 20);
-        ALOGD("got memory lot %d addrr %x", slot, mAddressOffSet);
+        mSlot = slot;
+        mAddressOffSet = static_cast<unsigned int>(mSlot) * (1 << 20);
+        DDD("got memory lot %d addrr %x", mSlot, mAddressOffSet);
         mHasAddressSpaceMemory = true;
     }
     transport->writeParam(mVersion, 0, mAddressOffSet);
@@ -54,7 +62,7 @@
                              MediaOperation::InitContext, mAddressOffSet);
     auto* retptr = transport->getReturnAddr(mAddressOffSet);
     mHostHandle = *(uint64_t*)(retptr);
-    ALOGD("initH264Context: got handle to host %lld", mHostHandle);
+    DDD("initH264Context: got handle to host %lld", mHostHandle);
 }
 
 
@@ -76,23 +84,23 @@
     transport->writeParam(static_cast<uint64_t>(pixFmt), 5, mAddressOffSet);
     transport->sendOperation(MediaCodecType::H264Codec,
                              MediaOperation::Reset, mAddressOffSet);
-    ALOGD("resetH264Context: done");
+    DDD("resetH264Context: done");
 }
 
 
 void MediaH264Decoder::destroyH264Context() {
 
-    ALOGD("return memory lot %d addrr %x", (int)(mAddressOffSet >> 23), mAddressOffSet);
+    DDD("return memory lot %d addrr %x", (int)(mAddressOffSet >> 23), mAddressOffSet);
     auto transport = GoldfishMediaTransport::getInstance();
     transport->writeParam((uint64_t)mHostHandle, 0, mAddressOffSet);
     transport->sendOperation(MediaCodecType::H264Codec,
                              MediaOperation::DestroyContext, mAddressOffSet);
-    transport->returnMemorySlot(mAddressOffSet >> 23);
+    transport->returnMemorySlot(mSlot);
     mHasAddressSpaceMemory = false;
 }
 
 h264_result_t MediaH264Decoder::decodeFrame(uint8_t* img, size_t szBytes, uint64_t pts) {
-    ALOGD("decode frame: use handle to host %lld", mHostHandle);
+    DDD("decode frame: use handle to host %lld", mHostHandle);
     h264_result_t res = {0, 0};
     if (!mHasAddressSpaceMemory) {
         ALOGE("%s no address space memory", __func__);
@@ -123,7 +131,7 @@
         ALOGE("%s no address space memory", __func__);
         return;
     }
-    ALOGD("flush: use handle to host %lld", mHostHandle);
+    DDD("flush: use handle to host %lld", mHostHandle);
     auto transport = GoldfishMediaTransport::getInstance();
     transport->writeParam((uint64_t)mHostHandle, 0, mAddressOffSet);
     transport->sendOperation(MediaCodecType::H264Codec,
@@ -131,7 +139,7 @@
 }
 
 h264_image_t MediaH264Decoder::getImage() {
-    ALOGD("getImage: use handle to host %lld", mHostHandle);
+    DDD("getImage: use handle to host %lld", mHostHandle);
     h264_image_t res { };
     if (!mHasAddressSpaceMemory) {
         ALOGE("%s no address space memory", __func__);
@@ -150,7 +158,7 @@
         res.data = dst;
         res.width = *(uint32_t*)(retptr + 8);
         res.height = *(uint32_t*)(retptr + 16);
-        res.pts = *(uint32_t*)(retptr + 24);
+        res.pts = *(uint64_t*)(retptr + 24);
         res.color_primaries = *(uint32_t*)(retptr + 32);
         res.color_range = *(uint32_t*)(retptr + 40);
         res.color_trc = *(uint32_t*)(retptr + 48);
@@ -164,13 +172,13 @@
 
 
 h264_image_t MediaH264Decoder::renderOnHostAndReturnImageMetadata(int hostColorBufferId) {
-    ALOGD("%s: use handle to host %lld", __func__, mHostHandle);
+    DDD("%s: use handle to host %lld", __func__, mHostHandle);
     h264_image_t res { };
     if (hostColorBufferId < 0) {
       ALOGE("%s negative color buffer id %d", __func__, hostColorBufferId);
       return res;
     }
-    ALOGD("%s send color buffer id %d", __func__, hostColorBufferId);
+    DDD("%s send color buffer id %d", __func__, hostColorBufferId);
     if (!mHasAddressSpaceMemory) {
         ALOGE("%s no address space memory", __func__);
         return res;
@@ -188,7 +196,7 @@
         res.data = dst; // note: the data could be junk
         res.width = *(uint32_t*)(retptr + 8);
         res.height = *(uint32_t*)(retptr + 16);
-        res.pts = *(uint32_t*)(retptr + 24);
+        res.pts = *(uint64_t*)(retptr + 24);
         res.color_primaries = *(uint32_t*)(retptr + 32);
         res.color_range = *(uint32_t*)(retptr + 40);
         res.color_trc = *(uint32_t*)(retptr + 48);
diff --git a/system/codecs/omx/avcdec/MediaH264Decoder.h b/system/codecs/omx/avcdec/MediaH264Decoder.h
index 8264d3c..a75758f 100644
--- a/system/codecs/omx/avcdec/MediaH264Decoder.h
+++ b/system/codecs/omx/avcdec/MediaH264Decoder.h
@@ -53,6 +53,7 @@
 
     bool mHasAddressSpaceMemory = false;
     uint64_t mAddressOffSet = 0;
+    int mSlot = -1;
 
 public:
     MediaH264Decoder(RenderMode renderMode);
diff --git a/system/codecs/omx/common/Android.mk b/system/codecs/omx/common/Android.mk
index 9e233e7..1ebdff9 100644
--- a/system/codecs/omx/common/Android.mk
+++ b/system/codecs/omx/common/Android.mk
@@ -33,6 +33,15 @@
 $(call emugl-import,libGoldfishAddressSpace$(GOLDFISH_OPENGL_LIB_SUFFIX))
 else
 $(call emugl-export,STATIC_LIBRARIES,libGoldfishAddressSpace)
+
+ifeq (true,$(GFXSTREAM))
+
+LOCAL_CFLAGS += -DVIRTIO_GPU
+LOCAL_C_INCLUDES += external/libdrm external/minigbm/cros_gralloc
+LOCAL_SHARED_LIBRARIES += libdrm
+
+endif
+
 endif
 
 $(call emugl-end-module)
diff --git a/system/codecs/omx/common/goldfish_media_utils.cpp b/system/codecs/omx/common/goldfish_media_utils.cpp
index ac1d1b0..2cf6ff6 100644
--- a/system/codecs/omx/common/goldfish_media_utils.cpp
+++ b/system/codecs/omx/common/goldfish_media_utils.cpp
@@ -17,6 +17,14 @@
 #include "goldfish_address_space.h"
 
 #include <log/log.h>
+
+#define DEBUG  0
+#if DEBUG
+#  define  DDD(...)    ALOGD(__VA_ARGS__)
+#else
+#  define  DDD(...)    ((void)0)
+#endif
+
 #include <memory>
 #include <vector>
 #include <mutex>
@@ -40,13 +48,26 @@
     virtual __u64 offsetOf(uint64_t addr) const override;
 
 public:
-    // each lot has 8 M
+    // each lot has 2 M
     virtual int getMemorySlot() override {
         std::lock_guard<std::mutex> g{mMemoryMutex};
-        for (int i = mMemoryLotsAvailable.size() - 1; i >=0 ; --i) {
-            if (mMemoryLotsAvailable[i]) {
-                mMemoryLotsAvailable[i] = false;
-                return i;
+        // when there are just 1 decoder, it can pretty
+        // much use all the memory starting from 0;
+        // when there are two, each can use at least half
+        // the total memory, etc.
+        constexpr size_t search_order[] = {
+                0, // use 32M
+                16, // use 16M
+                8, 24, // use 8M
+                4, 12, 20, 28, // use 4M
+                2, 6, 10, 12, 18, 22, 26, 30, // use 2M
+                1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 // use 1M
+        };
+        for (size_t i = 0; i < sizeof(search_order)/sizeof(search_order[0]); ++i) {
+            int slot = search_order[i];
+            if (mMemoryLotsAvailable[slot]) {
+                mMemoryLotsAvailable[slot] = false;
+                return slot;
             }
         }
         return -1;
@@ -64,7 +85,7 @@
     }
 private:
     std::mutex mMemoryMutex;
-    std::vector<bool> mMemoryLotsAvailable = {true, true, true, true};
+    std::vector<bool> mMemoryLotsAvailable = std::vector<bool>(32,true);
 
     address_space_handle_t mHandle;
     uint64_t  mOffset;
@@ -111,11 +132,11 @@
     mSize = kParamSizeBytes + kInputSizeBytes + kOutputSizeBytes;
     bool success = goldfish_address_space_allocate(mHandle, mSize, &mPhysAddr, &mOffset);
     if (success) {
-        ALOGD("successfully allocated %d bytes in goldfish_address_block", (int)mSize);
+        ALOGI("successfully allocated %d bytes in goldfish_address_block", (int)mSize);
         mStartPtr = goldfish_address_space_map(mHandle, mOffset, mSize);
-        ALOGD("guest address is %p", mStartPtr);
+        ALOGI("guest address is %p", mStartPtr);
 
-        struct goldfish_address_space_ping pingInfo;
+        struct address_space_ping pingInfo;
         pingInfo.metadata = GoldfishAddressSpaceSubdeviceType::Media;
         pingInfo.offset = mOffset;
         if (goldfish_address_space_ping(mHandle, &pingInfo) == false) {
@@ -123,7 +144,7 @@
             abort();
             return;
         } else {
-            ALOGD("successfully pinged host to allocate memory");
+            ALOGI("successfully pinged host to allocate memory");
         }
     } else {
         ALOGE("failed to allocate %d bytes in goldfish_address_block", (int)mSize);
@@ -146,6 +167,10 @@
     // Shift |type| into the highest 8-bits, leaving the lower bits for other
     // metadata.
     offset = offset >> 20;
+    if (offset < 0 || offset >= 32) {
+        ALOGE("offset %d is wrong", (int)offset);
+        abort();
+    }
     return ((__u64)type << (64 - 8)) | (offset << 8) | static_cast<uint8_t>(op);
 }
 
@@ -177,7 +202,7 @@
 
 bool GoldfishMediaTransportImpl::sendOperation(MediaCodecType type,
                                                MediaOperation op, unsigned int offSetToStartAddr) {
-    struct goldfish_address_space_ping pingInfo;
+    struct address_space_ping pingInfo;
     pingInfo.metadata = makeMetadata(type, op, offSetToStartAddr);
     pingInfo.offset = mOffset; // + (offSetToStartAddr);
     if (goldfish_address_space_ping(mHandle, &pingInfo) == false) {
@@ -185,7 +210,7 @@
         abort();
         return false;
     } else {
-        ALOGD("successfully pinged host for operation type=%d, op=%d", (int)type, (int)op);
+        DDD("successfully pinged host for operation type=%d, op=%d", (int)type, (int)op);
     }
 
     return true;
diff --git a/system/codecs/omx/plugin/GoldfishOMXPlugin.cpp b/system/codecs/omx/plugin/GoldfishOMXPlugin.cpp
index 7ff9fa4..6843381 100644
--- a/system/codecs/omx/plugin/GoldfishOMXPlugin.cpp
+++ b/system/codecs/omx/plugin/GoldfishOMXPlugin.cpp
@@ -45,32 +45,44 @@
     const char *mRole;
 };
 
-static bool useGoogleGoldfishComponentInstance(const char* libname) {
-    // We have a property set indicating whether to use the host side codec
-    // or not (ro.kernel.qemu.hwcodec.<mLibNameSuffix>).
+static bool useOmxCodecs() {
     char propValue[PROP_VALUE_MAX];
-    AString prop = "ro.kernel.qemu.hwcodec.";
-    prop.append(libname);
-
+    AString prop = "debug.stagefright.ccodec";
     bool myret = property_get(prop.c_str(), propValue, "") > 0 &&
-           strcmp("1", propValue) == 0;
+           strcmp("0", propValue) == 0;
     if (myret) {
         ALOGD("%s %d found prop %s val %s", __func__, __LINE__, prop.c_str(), propValue);
     }
     return myret;
 }
 
-static bool useAndroidGoldfishComponentInstance(const char* libname) {
-    // We have a property set indicating whether to use the host side codec
-    // or not (ro.kernel.qemu.hwcodec.<mLibNameSuffix>).
-    char propValue[PROP_VALUE_MAX];
-    AString prop = "ro.kernel.qemu.hwcodec.";
-    prop.append(libname);
+// We have a property set indicating whether to use the host side codec
+// or not (ro.boot.qemu.hwcodec.<mLibNameSuffix>).
+static std::string BuildHWCodecPropName(const char *libname) {
+    using namespace std::literals::string_literals;
+    return "ro.boot.qemu.hwcodec."s + libname;
+}
 
-    bool myret = property_get(prop.c_str(), propValue, "") > 0 &&
+static bool useGoogleGoldfishComponentInstance(const char* libname) {
+    const std::string propName = BuildHWCodecPropName(libname);
+    char propValue[PROP_VALUE_MAX];
+
+    bool myret = property_get(propName.c_str(), propValue, "") > 0 &&
+           strcmp("1", propValue) == 0;
+    if (myret) {
+        ALOGD("%s %d found prop %s val %s", __func__, __LINE__, propName.c_str(), propValue);
+    }
+    return myret;
+}
+
+static bool useAndroidGoldfishComponentInstance(const char* libname) {
+    const std::string propName = BuildHWCodecPropName(libname);
+    char propValue[PROP_VALUE_MAX];
+
+    bool myret = property_get(propName.c_str(), propValue, "") > 0 &&
            strcmp("2", propValue) == 0;
     if (myret) {
-        ALOGD("%s %d found prop %s val %s", __func__, __LINE__, prop.c_str(), propValue);
+        ALOGD("%s %d found prop %s val %s", __func__, __LINE__, propName.c_str(), propValue);
     }
     return myret;
 }
@@ -90,15 +102,21 @@
     sizeof(kComponents) / sizeof(kComponents[0]);
 
 GoldfishOMXPlugin::GoldfishOMXPlugin() {
-    for (int i = 0; i < kNumComponents; ++i) {
-        if ( !strncmp("OMX.google", kComponents[i].mName, 10) &&
-             useGoogleGoldfishComponentInstance(kComponents[i].mLibNameSuffix)) {
-            ALOGD("found and use kComponents[i].name %s", kComponents[i].mName);
-            kActiveComponents.push_back(kComponents[i]);
-        } else if (!strncmp("OMX.android", kComponents[i].mName, 11) &&
-                   useAndroidGoldfishComponentInstance(kComponents[i].mLibNameSuffix)) {
-            ALOGD("found and use kComponents[i].name %s", kComponents[i].mName);
-            kActiveComponents.push_back(kComponents[i]);
+    if (useOmxCodecs()) {
+        for (int i = 0; i < kNumComponents; ++i) {
+            if (!strncmp("OMX.google", kComponents[i].mName, 10) &&
+                useGoogleGoldfishComponentInstance(
+                    kComponents[i].mLibNameSuffix)) {
+                ALOGD("found and use kComponents[i].name %s",
+                      kComponents[i].mName);
+                kActiveComponents.push_back(kComponents[i]);
+            } else if (!strncmp("OMX.android", kComponents[i].mName, 11) &&
+                       useAndroidGoldfishComponentInstance(
+                           kComponents[i].mLibNameSuffix)) {
+                ALOGD("found and use kComponents[i].name %s",
+                      kComponents[i].mName);
+                kActiveComponents.push_back(kComponents[i]);
+            }
         }
     }
 }
diff --git a/system/codecs/omx/vpxdec/GoldfishVPX.cpp b/system/codecs/omx/vpxdec/GoldfishVPX.cpp
index 2e7043f..edf6aa3 100644
--- a/system/codecs/omx/vpxdec/GoldfishVPX.cpp
+++ b/system/codecs/omx/vpxdec/GoldfishVPX.cpp
@@ -75,7 +75,7 @@
       mRenderMode(renderMode),
       mEOSStatus(INPUT_DATA_AVAILABLE),
       mCtx(NULL),
-      mFrameParallelMode(false),
+      mFrameParallelMode(true),
       mTimeStampIdx(0),
       mImg(NULL) {
     // arbitrary from avc/hevc as vpx does not specify a min compression ratio
@@ -113,6 +113,8 @@
     int vpx_err = 0;
     if ((vpx_err = vpx_codec_dec_init(mCtx))) {
         ALOGE("vpx decoder failed to initialize. (%d)", vpx_err);
+        delete mCtx;
+        mCtx = NULL;
         return UNKNOWN_ERROR;
     }
 
diff --git a/system/codecs/omx/vpxdec/goldfish_vpx_impl.cpp b/system/codecs/omx/vpxdec/goldfish_vpx_impl.cpp
index 92dfcfa..380732b 100644
--- a/system/codecs/omx/vpxdec/goldfish_vpx_impl.cpp
+++ b/system/codecs/omx/vpxdec/goldfish_vpx_impl.cpp
@@ -50,6 +50,10 @@
 
 int vpx_codec_destroy(vpx_codec_ctx_t* ctx) {
     DDD("%s %d", __func__, __LINE__);
+    if (!ctx) {
+      ALOGE("ERROR: Failed %s %d: ctx is nullptr", __func__, __LINE__);
+      return -1;
+    }
     auto transport = GoldfishMediaTransport::getInstance();
     transport->writeParam(ctx->id, 0, ctx->address_offset);
     sendVpxOperation(ctx, MediaOperation::DestroyContext);
@@ -71,7 +75,7 @@
     }
     ctx->id = applyForOneId();
     ctx->memory_slot = slot;
-    ctx->address_offset = static_cast<unsigned int>(slot) * 8 * (1 << 20);
+    ctx->address_offset = static_cast<unsigned int>(ctx->memory_slot) * (1 << 20);
     DDD("got address offset 0x%x version %d", (int)(ctx->address_offset),
         ctx->version);
 
@@ -108,6 +112,10 @@
 //TODO: we might not need to do the putting all the time
 vpx_image_t* vpx_codec_get_frame(vpx_codec_ctx_t* ctx, int hostColorBufferId) {
     DDD("%s %d %p", __func__, __LINE__);
+    if (!ctx) {
+      ALOGE("ERROR: Failed %s %d: ctx is nullptr", __func__, __LINE__);
+      return nullptr;
+    }
     auto transport = GoldfishMediaTransport::getInstance();
 
     transport->writeParam(ctx->id, 0, ctx->address_offset);
@@ -134,6 +142,10 @@
 
 int vpx_codec_flush(vpx_codec_ctx_t* ctx) {
     DDD("%s %d", __func__, __LINE__);
+    if (!ctx) {
+      ALOGE("ERROR: Failed %s %d: ctx is nullptr", __func__, __LINE__);
+      return -1;
+    }
     auto transport = GoldfishMediaTransport::getInstance();
     transport->writeParam(ctx->id, 0, ctx->address_offset);
     sendVpxOperation(ctx, MediaOperation::Flush);
@@ -145,6 +157,10 @@
                      unsigned int data_sz,
                      void* user_priv,
                      long deadline) {
+    if (!ctx) {
+      ALOGE("ERROR: Failed %s %d: ctx is nullptr", __func__, __LINE__);
+      return -1;
+    }
     DDD("%s %d data size %d userpriv %p", __func__, __LINE__, (int)data_sz,
         user_priv);
     auto transport = GoldfishMediaTransport::getInstance();
diff --git a/system/egl/Android.mk b/system/egl/Android.mk
index 6e239b2..e63e7e0 100644
--- a/system/egl/Android.mk
+++ b/system/egl/Android.mk
@@ -3,7 +3,7 @@
 LOCAL_PATH := $(call my-dir)
 
 $(call emugl-begin-shared-library,libEGL_emulation)
-$(call emugl-import,libOpenglSystemCommon)
+$(call emugl-import,libOpenglSystemCommon libGoldfishProfiler)
 $(call emugl-set-shared-library-subpath,egl)
 
 ifeq (true,$(GOLDFISH_OPENGL_BUILD_FOR_HOST))
@@ -23,7 +23,12 @@
 ifneq (true,$(GOLDFISH_OPENGL_BUILD_FOR_HOST))
 
 LOCAL_SHARED_LIBRARIES += libdl
-endif
+ifeq (true,$(GFXSTREAM))
+LOCAL_CFLAGS += -DVIRTIO_GPU
+LOCAL_C_INCLUDES += external/libdrm
+LOCAL_SHARED_LIBRARIES += libdrm
+endif # GFXSTREAM
+endif # GOLDFISH_OPENGL_BUILD_FOR_HOST
 
 ifneq (true,$(GOLDFISH_OPENGL_BUILD_FOR_HOST))
 ifdef IS_AT_LEAST_OPM1
@@ -50,6 +55,9 @@
 include $(CLEAR_VARS)
 
 LOCAL_MODULE := egl.cfg
+LOCAL_LICENSE_KINDS := SPDX-license-identifier-Apache-2.0
+LOCAL_LICENSE_CONDITIONS := notice
+LOCAL_NOTICE_FILE := $(LOCAL_PATH)/../../LICENSE
 LOCAL_SRC_FILES := $(LOCAL_MODULE)
 
 LOCAL_MODULE_PATH := $(TARGET_OUT)/lib/egl
diff --git a/system/egl/CMakeLists.txt b/system/egl/CMakeLists.txt
index 08db0c8..c380911 100644
--- a/system/egl/CMakeLists.txt
+++ b/system/egl/CMakeLists.txt
@@ -1,10 +1,10 @@
 # This is an autogenerated file! Do not edit!
 # instead run make from .../device/generic/goldfish-opengl
 # which will re-generate this file.
-android_validate_sha256("${GOLDFISH_DEVICE_ROOT}/system/egl/Android.mk" "597fba46fce0876a62ef3c0bc8f3a0264503214b62b0b0da092ee90fe60f09e1")
+android_validate_sha256("${GOLDFISH_DEVICE_ROOT}/system/egl/Android.mk" "d7a50905a3d2624cf594158d9c370bf29bc6115d967bdfa919e9ffa42a87d0bb")
 set(EGL_emulation_src eglDisplay.cpp egl.cpp ClientAPIExts.cpp)
 android_add_library(TARGET EGL_emulation SHARED LICENSE Apache-2.0 SRC eglDisplay.cpp egl.cpp ClientAPIExts.cpp)
-target_include_directories(EGL_emulation PRIVATE ${GOLDFISH_DEVICE_ROOT}/system/OpenglSystemCommon/bionic-include ${GOLDFISH_DEVICE_ROOT}/system/OpenglSystemCommon ${GOLDFISH_DEVICE_ROOT}/bionic/libc/private ${GOLDFISH_DEVICE_ROOT}/bionic/libc/platform ${GOLDFISH_DEVICE_ROOT}/system/vulkan_enc ${GOLDFISH_DEVICE_ROOT}/android-emu ${GOLDFISH_DEVICE_ROOT}/shared/gralloc_cb/include ${GOLDFISH_DEVICE_ROOT}/shared/GoldfishAddressSpace/include ${GOLDFISH_DEVICE_ROOT}/system/renderControl_enc ${GOLDFISH_DEVICE_ROOT}/system/GLESv2_enc ${GOLDFISH_DEVICE_ROOT}/system/GLESv1_enc ${GOLDFISH_DEVICE_ROOT}/shared/OpenglCodecCommon ${GOLDFISH_DEVICE_ROOT}/shared/qemupipe/include-types ${GOLDFISH_DEVICE_ROOT}/shared/qemupipe/include ${GOLDFISH_DEVICE_ROOT}/./host/include/libOpenglRender ${GOLDFISH_DEVICE_ROOT}/./system/include ${GOLDFISH_DEVICE_ROOT}/./../../../external/qemu/android/android-emugl/guest)
-target_compile_definitions(EGL_emulation PRIVATE "-DWITH_GLES2" "-DPLATFORM_SDK_VERSION=29" "-DGOLDFISH_HIDL_GRALLOC" "-DEMULATOR_OPENGL_POST_O=1" "-DHOST_BUILD" "-DANDROID" "-DGL_GLEXT_PROTOTYPES" "-DPAGE_SIZE=4096" "-DGOLDFISH_VULKAN" "-DLOG_TAG=\"EGL_emulation\"" "-DEGL_EGLEXT_PROTOTYPES")
+target_include_directories(EGL_emulation PRIVATE ${GOLDFISH_DEVICE_ROOT}/system/profiler ${GOLDFISH_DEVICE_ROOT}/system/OpenglSystemCommon/bionic-include ${GOLDFISH_DEVICE_ROOT}/system/OpenglSystemCommon ${GOLDFISH_DEVICE_ROOT}/bionic/libc/private ${GOLDFISH_DEVICE_ROOT}/bionic/libc/platform ${GOLDFISH_DEVICE_ROOT}/system/vulkan_enc ${GOLDFISH_DEVICE_ROOT}/shared/gralloc_cb/include ${GOLDFISH_DEVICE_ROOT}/shared/GoldfishAddressSpace/include ${GOLDFISH_DEVICE_ROOT}/system/renderControl_enc ${GOLDFISH_DEVICE_ROOT}/system/GLESv2_enc ${GOLDFISH_DEVICE_ROOT}/system/GLESv1_enc ${GOLDFISH_DEVICE_ROOT}/shared/OpenglCodecCommon ${GOLDFISH_DEVICE_ROOT}/android-emu ${GOLDFISH_DEVICE_ROOT}/shared/qemupipe/include-types ${GOLDFISH_DEVICE_ROOT}/shared/qemupipe/include ${GOLDFISH_DEVICE_ROOT}/./host/include/libOpenglRender ${GOLDFISH_DEVICE_ROOT}/./system/include ${GOLDFISH_DEVICE_ROOT}/./../../../external/qemu/android/android-emugl/guest)
+target_compile_definitions(EGL_emulation PRIVATE "-DWITH_GLES2" "-DPLATFORM_SDK_VERSION=29" "-DGOLDFISH_HIDL_GRALLOC" "-DEMULATOR_OPENGL_POST_O=1" "-DHOST_BUILD" "-DANDROID" "-DGL_GLEXT_PROTOTYPES" "-DPAGE_SIZE=4096" "-DGFXSTREAM" "-DLOG_TAG=\"EGL_emulation\"" "-DEGL_EGLEXT_PROTOTYPES")
 target_compile_options(EGL_emulation PRIVATE "-fvisibility=default" "-Wno-unused-parameter" "-Wno-gnu-designator")
-target_link_libraries(EGL_emulation PRIVATE OpenglSystemCommon android-emu-shared vulkan_enc gui androidemu cutils utils log _renderControl_enc GLESv2_enc GLESv1_enc OpenglCodecCommon_host PRIVATE gralloc_cb_host GoldfishAddressSpace_host qemupipe_host)
\ No newline at end of file
+target_link_libraries(EGL_emulation PRIVATE GoldfishProfiler OpenglSystemCommon android-emu-shared vulkan_enc gui log _renderControl_enc GLESv2_enc GLESv1_enc OpenglCodecCommon_host cutils utils androidemu PRIVATE gralloc_cb_host GoldfishAddressSpace_host qemupipe_host)
\ No newline at end of file
diff --git a/system/egl/egl.cpp b/system/egl/egl.cpp
index 834207b..c08805c 100644
--- a/system/egl/egl.cpp
+++ b/system/egl/egl.cpp
@@ -14,6 +14,11 @@
 * limitations under the License.
 */
 
+#ifdef GFXSTREAM
+#include <atomic>
+#include <time.h>
+#endif
+
 #include <assert.h>
 #include "HostConnection.h"
 #include "ThreadInfo.h"
@@ -33,6 +38,7 @@
 #include "ClientAPIExts.h"
 #include "EGLImage.h"
 #include "ProcessPipe.h"
+#include "profiler.h"
 
 #include <qemu_pipe_bp.h>
 
@@ -43,6 +49,17 @@
 
 #include <GLES3/gl31.h>
 
+#ifdef VIRTIO_GPU
+#include <drm/virtgpu_drm.h>
+#include <xf86drm.h>
+#include <poll.h>
+#endif // VIRTIO_GPU
+
+#ifdef GFXSTREAM
+#include "android/base/Tracing.h"
+#endif
+#include <cutils/trace.h>
+
 #if PLATFORM_SDK_VERSION < 18
 #define override
 #endif
@@ -267,6 +284,84 @@
     delete [] extensionString;
 }
 
+uint64_t currGuestTimeNs() {
+    struct timespec ts;
+#ifdef __APPLE__
+    clock_gettime(CLOCK_REALTIME, &ts);
+#else
+    clock_gettime(CLOCK_BOOTTIME, &ts);
+#endif
+    uint64_t res = (uint64_t)(ts.tv_sec * 1000000000ULL + ts.tv_nsec);
+    return res;
+}
+
+struct app_time_metric_t {
+    uint64_t lastLogTime;
+    uint64_t lastSwapBuffersReturnTime;
+    unsigned int numSamples;
+    uint64_t totalAppTime;
+    uint64_t minAppTime;
+    uint64_t maxAppTime;
+
+    app_time_metric_t() :
+        lastLogTime(0),
+        lastSwapBuffersReturnTime(0),
+        numSamples(0),
+        totalAppTime(0),
+        minAppTime(0),
+        maxAppTime(0)
+    {
+    }
+
+    void onSwapBuffersReturn() {
+        lastSwapBuffersReturnTime = currGuestTimeNs();
+    }
+
+    static float ns2ms(uint64_t ns) {
+        return (float)ns / 1000000.0;
+    }
+
+    void onQueueBufferReturn() {
+        if(lastSwapBuffersReturnTime == 0) {
+            // First swapBuffers call, or last call failed.
+            return;
+        }
+
+        uint64_t now = currGuestTimeNs();
+        uint64_t appTime = now - lastSwapBuffersReturnTime;
+        if(numSamples == 0) {
+          minAppTime = appTime;
+          maxAppTime = appTime;
+        }
+        else {
+          minAppTime = fmin(minAppTime, appTime);
+          maxAppTime = fmax(maxAppTime, appTime);
+        }
+        totalAppTime += appTime;
+        numSamples++;
+        // Reset so we don't record a bad sample if swapBuffers fails
+        lastSwapBuffersReturnTime = 0;
+
+        if(lastLogTime == 0) {
+            lastLogTime = now;
+            return;
+        }
+
+        // Log/reset once every second
+        if(now - lastLogTime > 1000000000) {
+            float avgMs = ns2ms(totalAppTime) / numSamples;
+            float minMs = ns2ms(minAppTime);
+            float maxMs = ns2ms(maxAppTime);
+            ALOGD("app_time_stats: avg=%0.2fms min=%0.2fms max=%0.2fms count=%u", avgMs, minMs, maxMs, numSamples);
+            totalAppTime = 0;
+            minAppTime = 0;
+            maxAppTime = 0;
+            numSamples = 0;
+            lastLogTime = now;
+        }
+    }
+};
+
 // ----------------------------------------------------------------------------
 //egl_surface_t
 
@@ -324,6 +419,8 @@
 
     EGLint      surfaceType;
     uint32_t    rcSurface; //handle to surface created via remote control
+
+    app_time_metric_t appTimeMetric;
 };
 
 egl_surface_t::egl_surface_t(EGLDisplay dpy, EGLConfig config, EGLint surfaceType)
@@ -494,6 +591,101 @@
     return sync_handle;
 }
 
+// our cmd
+#define VIRTIO_GPU_NATIVE_SYNC_CREATE_EXPORT_FD 0x9000
+#define VIRTIO_GPU_NATIVE_SYNC_CREATE_IMPORT_FD 0x9001
+
+// createNativeSync_virtioGpu()
+// creates an OpenGL sync object on the host
+// using rcCreateSyncKHR.
+// If necessary, a native fence FD will be exported or imported.
+// Returns a handle to the host-side FenceSync object.
+static uint64_t createNativeSync_virtioGpu(
+    EGLenum type,
+    const EGLint* attrib_list,
+    int num_actual_attribs,
+    bool destroy_when_signaled,
+    int fd_in,
+    int* fd_out) {
+#ifndef VIRTIO_GPU
+    ALOGE("%s: Error: called with no virtio-gpu support built in\n", __func__);
+    return 0;
+#else
+    DEFINE_HOST_CONNECTION;
+
+    uint64_t sync_handle;
+    uint64_t thread_handle;
+
+    EGLint* actual_attribs =
+        (EGLint*)(num_actual_attribs == 0 ? NULL : attrib_list);
+
+    // Create a normal sync obj
+    rcEnc->rcCreateSyncKHR(rcEnc, type,
+                           actual_attribs,
+                           num_actual_attribs * sizeof(EGLint),
+                           destroy_when_signaled,
+                           &sync_handle,
+                           &thread_handle);
+
+    // Import fence fd; dup and close
+    if (type == EGL_SYNC_NATIVE_FENCE_ANDROID && fd_in >= 0) {
+        int importedFd = dup(fd_in);
+
+        if (importedFd < 0) {
+            ALOGE("%s: error: failed to dup imported fd. original: %d errno %d\n",
+                  __func__, fd_in, errno);
+        }
+
+        *fd_out = importedFd;
+
+        if (close(fd_in)) {
+            ALOGE("%s: error: failed to close imported fd. original: %d errno %d\n",
+                  __func__, fd_in, errno);
+        }
+
+    } else if (type == EGL_SYNC_NATIVE_FENCE_ANDROID && fd_in < 0) {
+        // Export fence fd
+
+        uint32_t sync_handle_lo = (uint32_t)sync_handle;
+        uint32_t sync_handle_hi = (uint32_t)(sync_handle >> 32);
+
+        uint32_t cmdDwords[3] = {
+            VIRTIO_GPU_NATIVE_SYNC_CREATE_EXPORT_FD,
+            sync_handle_lo,
+            sync_handle_hi,
+        };
+
+        drm_virtgpu_execbuffer createSyncExport = {
+            .flags = VIRTGPU_EXECBUF_FENCE_FD_OUT,
+            .size = 3 * sizeof(uint32_t),
+            .command = (uint64_t)(cmdDwords),
+            .bo_handles = 0,
+            .num_bo_handles = 0,
+            .fence_fd = -1,
+        };
+
+        int queue_work_err =
+            drmIoctl(
+                hostCon->getOrCreateRendernodeFd(),
+                DRM_IOCTL_VIRTGPU_EXECBUFFER, &createSyncExport);
+
+        if (queue_work_err) {
+            ERR("%s: failed with %d executing command buffer (%s)",  __func__,
+                queue_work_err, strerror(errno));
+            return 0;
+        }
+
+        *fd_out = createSyncExport.fence_fd;
+
+        DPRINT("virtio-gpu: got native fence fd=%d queue_work_err=%d",
+               *fd_out, queue_work_err);
+
+    }
+
+    return sync_handle;
+#endif
+}
+
 // createGoldfishOpenGLNativeSync() is for creating host-only sync objects
 // that are needed by only this goldfish opengl driver,
 // such as in swapBuffers().
@@ -510,6 +702,57 @@
                      fd_out);
 }
 
+struct FrameTracingState {
+    uint32_t frameNumber = 0;
+    bool tracingEnabled = false;
+    void onSwapBuffersSuccesful(ExtendedRCEncoderContext* rcEnc) {
+#ifdef GFXSTREAM
+        bool current = android::base::isTracingEnabled();
+        // edge trigger
+        if (android::base::isTracingEnabled() && !tracingEnabled) {
+            if (rcEnc->hasHostSideTracing()) {
+                rcEnc->rcSetTracingForPuid(rcEnc, getPuid(), 1, currGuestTimeNs());
+            }
+        }
+        if (!android::base::isTracingEnabled() && tracingEnabled) {
+            if (rcEnc->hasHostSideTracing()) {
+                rcEnc->rcSetTracingForPuid(rcEnc, getPuid(), 0, currGuestTimeNs());
+            }
+        }
+        tracingEnabled = android::base::isTracingEnabled();
+#endif
+        ++frameNumber;
+    }
+};
+
+static FrameTracingState sFrameTracingState;
+
+static void sFlushBufferAndCreateFence(
+    HostConnection* hostCon, ExtendedRCEncoderContext* rcEnc, uint32_t rcSurface, uint32_t frameNumber, int* presentFenceFd) {
+    atrace_int(ATRACE_TAG_GRAPHICS, "gfxstreamFrameNumber", (int32_t)frameNumber);
+
+    if (rcEnc->hasHostSideTracing()) {
+        rcEnc->rcFlushWindowColorBufferAsyncWithFrameNumber(rcEnc, rcSurface, frameNumber);
+    } else {
+        rcEnc->rcFlushWindowColorBufferAsync(rcEnc, rcSurface);
+    }
+
+    if (rcEnc->hasVirtioGpuNativeSync()) {
+        createNativeSync_virtioGpu(EGL_SYNC_NATIVE_FENCE_ANDROID,
+                     NULL /* empty attrib list */,
+                     0 /* 0 attrib count */,
+                     true /* destroy when signaled. this is host-only
+                             and there will only be one waiter */,
+                     -1 /* we want a new fd */,
+                     presentFenceFd);
+    } else if (rcEnc->hasNativeSync()) {
+        createGoldfishOpenGLNativeSync(presentFenceFd);
+    } else {
+        // equivalent to glFinish if no native sync
+        eglWaitClient();
+    }
+}
+
 EGLBoolean egl_window_surface_t::swapBuffers()
 {
 
@@ -544,19 +787,16 @@
     eglWaitClient();
     nativeWindow->queueBuffer(nativeWindow, buffer);
 #else
-    if (rcEnc->hasNativeSync()) {
-        rcEnc->rcFlushWindowColorBufferAsync(rcEnc, rcSurface);
-        createGoldfishOpenGLNativeSync(&presentFenceFd);
-    } else {
-        rcEnc->rcFlushWindowColorBuffer(rcEnc, rcSurface);
-        // equivalent to glFinish if no native sync
-        eglWaitClient();
-    }
+    sFlushBufferAndCreateFence(
+        hostCon, rcEnc, rcSurface,
+        sFrameTracingState.frameNumber, &presentFenceFd);
 
     DPRINT("queueBuffer with fence %d", presentFenceFd);
     nativeWindow->queueBuffer(nativeWindow, buffer, presentFenceFd);
 #endif
 
+    appTimeMetric.onQueueBufferReturn();
+
     DPRINT("calling dequeueBuffer...");
 
 #if PLATFORM_SDK_VERSION <= 16
@@ -584,6 +824,9 @@
     setWidth(buffer->width);
     setHeight(buffer->height);
 
+    sFrameTracingState.onSwapBuffersSuccesful(rcEnc);
+    appTimeMetric.onSwapBuffersReturn();
+
     return EGL_TRUE;
 }
 
@@ -726,6 +969,10 @@
         return res;
     }
 
+    if (tInfo->currentContext->extensionStringArray.size() > 0) {
+        return tInfo->currentContext->extensionStringArray;
+    }
+
 #define GL_EXTENSIONS                     0x1F03
 
     DEFINE_AND_VALIDATE_HOST_CONNECTION(res);
@@ -740,6 +987,7 @@
             hostStr = NULL;
         }
     }
+
     // push guest strings
     res.push_back("GL_EXT_robustness");
 
@@ -766,6 +1014,8 @@
         extEnd++;
     }
 
+    tInfo->currentContext->extensionStringArray = res;
+
     delete [] hostStr;
     return res;
 }
@@ -807,6 +1057,10 @@
         return NULL;
     }
 
+    if (*strPtr) {
+        return *strPtr;
+    }
+
     char* hostStr = NULL;
 
     if (glEnum == GL_EXTENSIONS) {
@@ -885,6 +1139,7 @@
         *major = s_display.getVersionMajor();
     if (minor!=NULL)
         *minor = s_display.getVersionMinor();
+    try_register_goldfish_perfetto();
     return EGL_TRUE;
 }
 
@@ -893,6 +1148,8 @@
     VALIDATE_DISPLAY_INIT(dpy, EGL_FALSE);
 
     s_display.terminate();
+    DEFINE_AND_VALIDATE_HOST_CONNECTION(EGL_FALSE);
+    rcEnc->rcGetRendererVersion(rcEnc);
     return EGL_TRUE;
 }
 
@@ -961,6 +1218,7 @@
     }
 
     int attribs_size = 0;
+    EGLint backup_attribs[1];
     if (attrib_list) {
         const EGLint * attrib_p = attrib_list;
         while (attrib_p[0] != EGL_NONE) {
@@ -968,6 +1226,10 @@
             attrib_p += 2;
         }
         attribs_size++; //for the terminating EGL_NONE
+    } else {
+        attribs_size = 1;
+        backup_attribs[0] = EGL_NONE;
+        attrib_list = backup_attribs;
     }
 
     // API 19 passes EGL_SWAP_BEHAVIOR_PRESERVED_BIT to surface type,
@@ -1100,6 +1362,7 @@
             case EGL_VG_COLORSPACE:
                 break;
             default:
+                ALOGE("%s:%d unknown attribute: 0x%x\n", __func__, __LINE__, attrib_list[0]);
                 setErrorReturn(EGL_BAD_ATTRIBUTE, EGL_NO_SURFACE);
         };
         attrib_list+=2;
@@ -1593,6 +1856,8 @@
         }
         break;
     default:
+        ALOGE("%s:%d EGL_BAD_CONFIG: invalid major GLES version: %d\n",
+              __func__, __LINE__, majorVersion);
         setErrorReturn(EGL_BAD_CONFIG, EGL_NO_CONTEXT);
     }
 
@@ -1707,9 +1972,10 @@
     }
 
     DEFINE_AND_VALIDATE_HOST_CONNECTION(EGL_FALSE);
-    if (rcEnc->rcMakeCurrent(rcEnc, ctxHandle, drawHandle, readHandle) == EGL_FALSE) {
-        ALOGE("rcMakeCurrent returned EGL_FALSE");
-        setErrorReturn(EGL_BAD_CONTEXT, EGL_FALSE);
+    if (rcEnc->hasAsyncFrameCommands()) {
+        rcEnc->rcMakeCurrentAsync(rcEnc, ctxHandle, drawHandle, readHandle);
+    } else {
+        rcEnc->rcMakeCurrent(rcEnc, ctxHandle, drawHandle, readHandle);
     }
 
     //Now make the local bind
@@ -1746,41 +2012,25 @@
                 context->minorVersion,
                 context->deviceMajorVersion,
                 context->deviceMinorVersion);
-            // Get caps for indexed buffers from host.
-            // Some need a current context.
-            int max_transform_feedback_separate_attribs = 0;
-            int max_uniform_buffer_bindings = 0;
-            int max_atomic_counter_buffer_bindings = 0;
-            int max_shader_storage_buffer_bindings = 0;
-            int max_vertex_attrib_bindings = 0;
-            int max_color_attachments = 1;
-            int max_draw_buffers = 1;
-            if (context->majorVersion > 2) {
-                s_display.gles2_iface()->getIntegerv(
-                        GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_ATTRIBS, &max_transform_feedback_separate_attribs);
-                s_display.gles2_iface()->getIntegerv(
-                        GL_MAX_UNIFORM_BUFFER_BINDINGS, &max_uniform_buffer_bindings);
-                if (context->minorVersion > 0) {
-                    s_display.gles2_iface()->getIntegerv(
-                            GL_MAX_ATOMIC_COUNTER_BUFFER_BINDINGS, &max_atomic_counter_buffer_bindings);
-                    s_display.gles2_iface()->getIntegerv(
-                            GL_MAX_SHADER_STORAGE_BUFFER_BINDINGS, &max_shader_storage_buffer_bindings);
-                    s_display.gles2_iface()->getIntegerv(
-                            GL_MAX_VERTEX_ATTRIB_BINDINGS, &max_vertex_attrib_bindings);
-                }
-                s_display.gles2_iface()->getIntegerv(
-                        GL_MAX_COLOR_ATTACHMENTS, &max_color_attachments);
-                s_display.gles2_iface()->getIntegerv(
-                        GL_MAX_DRAW_BUFFERS, &max_draw_buffers);
+            hostCon->gl2Encoder()->setClientState(contextState);
+            if (context->majorVersion > 1) {
+                HostDriverCaps caps = s_display.getHostDriverCaps(
+                    context->majorVersion,
+                    context->minorVersion);
+                contextState->initFromCaps(caps);
+            } else {
+                // Just put some stuff here to make gles1 happy
+                HostDriverCaps gles1Caps = {
+                    .max_vertex_attribs = 16,
+                    .max_combined_texture_image_units = 8,
+                    .max_color_attachments = 8,
+
+                    .max_texture_size = 4096,
+                    .max_texture_size_cube_map = 2048,
+                    .max_renderbuffer_size = 4096,
+                };
+                contextState->initFromCaps(gles1Caps);
             }
-            contextState->initFromCaps(
-                    max_transform_feedback_separate_attribs,
-                    max_uniform_buffer_bindings,
-                    max_atomic_counter_buffer_bindings,
-                    max_shader_storage_buffer_bindings,
-                    max_vertex_attrib_bindings,
-                    max_color_attachments,
-                    max_draw_buffers);
         }
 
         // update the client state, share group, and version
@@ -1824,7 +2074,7 @@
 
     //Check maybe we need to init the encoder, if it's first eglMakeCurrent
     if (tInfo->currentContext) {
-        if (tInfo->currentContext->majorVersion  > 1) {
+        if (tInfo->currentContext->majorVersion > 1) {
             if (!hostCon->gl2Encoder()->isInitialized()) {
                 s_display.gles2_iface()->init();
                 hostCon->gl2Encoder()->setInitialized();
@@ -1867,6 +2117,7 @@
         case EGL_DRAW:
             return context->draw;
         default:
+            ALOGE("%s:%d unknown parameter: 0x%x\n", __func__, __LINE__, readdraw);
             setErrorReturn(EGL_BAD_PARAMETER, EGL_NO_SURFACE);
     }
 }
@@ -2017,7 +2268,11 @@
             case HAL_PIXEL_FORMAT_YCBCR_420_888:
 #endif
                 break;
+            case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED:
+                ALOGW("%s:%d using HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED\n", __func__, __LINE__);
+                break;
             default:
+                ALOGE("%s:%d unknown parameter: 0x%x\n", __func__, __LINE__, format);
                 setErrorReturn(EGL_BAD_PARAMETER, EGL_NO_IMAGE_KHR);
         }
 
@@ -2027,6 +2282,8 @@
         image->dpy = dpy;
         image->target = target;
         image->native_buffer = native_buffer;
+        image->width = native_buffer->width;
+        image->height = native_buffer->width;
 
         return (EGLImageKHR)image;
     }
@@ -2043,6 +2300,8 @@
         image->dpy = dpy;
         image->target = target;
         image->host_egl_image = img;
+        image->width = context->getClientState()->queryTexWidth(0, texture);
+        image->height = context->getClientState()->queryTexHeight(0, texture);
 
         return (EGLImageKHR)image;
     }
@@ -2097,7 +2356,8 @@
     if ((type != EGL_SYNC_FENCE_KHR &&
          type != EGL_SYNC_NATIVE_FENCE_ANDROID) ||
         (type != EGL_SYNC_FENCE_KHR &&
-         !rcEnc->hasNativeSync())) {
+         !rcEnc->hasNativeSync() &&
+         !rcEnc->hasVirtioGpuNativeSync())) {
         setErrorReturn(EGL_BAD_ATTRIBUTE, EGL_NO_SYNC_KHR);
     }
 
@@ -2127,17 +2387,17 @@
 
         // Validate and input attribs
         for (int i = 0; i < num_actual_attribs; i += 2) {
-            if (attrib_list[i] == EGL_SYNC_TYPE_KHR) {
-                DPRINT("ERROR: attrib key = EGL_SYNC_TYPE_KHR");
-            }
-            if (attrib_list[i] == EGL_SYNC_STATUS_KHR) {
-                DPRINT("ERROR: attrib key = EGL_SYNC_STATUS_KHR");
-            }
-            if (attrib_list[i] == EGL_SYNC_CONDITION_KHR) {
-                DPRINT("ERROR: attrib key = EGL_SYNC_CONDITION_KHR");
-            }
             EGLint attrib_key = attrib_list[i];
             EGLint attrib_val = attrib_list[i + 1];
+            switch (attrib_key) {
+                case EGL_SYNC_TYPE_KHR:
+                case EGL_SYNC_STATUS_KHR:
+                case EGL_SYNC_CONDITION_KHR:
+                case EGL_SYNC_NATIVE_FENCE_FD_ANDROID:
+                    break;
+                default:
+                    setErrorReturn(EGL_BAD_ATTRIBUTE, EGL_NO_SYNC_KHR);
+            }
             if (attrib_key == EGL_SYNC_NATIVE_FENCE_FD_ANDROID) {
                 if (attrib_val != EGL_NO_NATIVE_FENCE_FD_ANDROID) {
                     inputFenceFd = attrib_val;
@@ -2150,14 +2410,23 @@
     uint64_t sync_handle = 0;
     int newFenceFd = -1;
 
-    if (rcEnc->hasNativeSync()) {
+    if (rcEnc->hasVirtioGpuNativeSync()) {
         sync_handle =
-            createNativeSync(type, attrib_list, num_actual_attribs,
-                             false /* don't destroy when signaled on the host;
-                                      let the guest clean this up,
-                                      because the guest called eglCreateSyncKHR. */,
-                             inputFenceFd,
-                             &newFenceFd);
+            createNativeSync_virtioGpu(
+                type, attrib_list, num_actual_attribs,
+                false /* don't destroy when signaled on the host;
+                         let the guest clean this up,
+                         because the guest called eglCreateSyncKHR. */,
+                inputFenceFd, &newFenceFd);
+    } else if (rcEnc->hasNativeSync()) {
+        sync_handle =
+            createNativeSync(
+                type, attrib_list, num_actual_attribs,
+                false /* don't destroy when signaled on the host;
+                         let the guest clean this up,
+                         because the guest called eglCreateSyncKHR. */,
+                inputFenceFd,
+                &newFenceFd);
 
     } else {
         // Just trigger a glFinish if the native sync on host
@@ -2170,17 +2439,21 @@
     if (type == EGL_SYNC_NATIVE_FENCE_ANDROID) {
         syncRes->type = EGL_SYNC_NATIVE_FENCE_ANDROID;
 
-        if (inputFenceFd < 0) {
+        if (rcEnc->hasVirtioGpuNativeSync()) {
             syncRes->android_native_fence_fd = newFenceFd;
         } else {
-            DPRINT("has input fence fd %d",
-                    inputFenceFd);
-            syncRes->android_native_fence_fd = inputFenceFd;
+            if (inputFenceFd < 0) {
+                syncRes->android_native_fence_fd = newFenceFd;
+            } else {
+                DPRINT("has input fence fd %d",
+                        inputFenceFd);
+                syncRes->android_native_fence_fd = inputFenceFd;
+            }
         }
     } else {
         syncRes->type = EGL_SYNC_FENCE_KHR;
         syncRes->android_native_fence_fd = -1;
-        if (!rcEnc->hasNativeSync()) {
+        if (!rcEnc->hasNativeSync() && !rcEnc->hasVirtioGpuNativeSync()) {
             syncRes->status = EGL_SIGNALED_KHR;
         }
     }
@@ -2193,8 +2466,8 @@
     (void)dpy;
 
     if (!eglsync) {
-        DPRINT("WARNING: null sync object")
-        return EGL_TRUE;
+        ALOGE("%s: null sync object!", __FUNCTION__);
+        setErrorReturn(EGL_BAD_PARAMETER, EGL_FALSE);
     }
 
     EGLSync_t* sync = static_cast<EGLSync_t*>(eglsync);
@@ -2206,8 +2479,12 @@
 
     if (sync) {
         DEFINE_HOST_CONNECTION;
-        if (rcEnc->hasNativeSync()) {
-            rcEnc->rcDestroySyncKHR(rcEnc, sync->handle);
+        if (rcEnc->hasVirtioGpuNativeSync() || rcEnc->hasNativeSync()) {
+            if (rcEnc->hasAsyncFrameCommands()) {
+                rcEnc->rcDestroySyncKHRAsync(rcEnc, sync->handle);
+            } else {
+                rcEnc->rcDestroySyncKHR(rcEnc, sync->handle);
+            }
         }
         delete sync;
     }
@@ -2221,8 +2498,8 @@
     (void)dpy;
 
     if (!eglsync) {
-        DPRINT("WARNING: null sync object");
-        return EGL_CONDITION_SATISFIED_KHR;
+        ALOGE("%s: null sync object!", __FUNCTION__);
+        setErrorReturn(EGL_BAD_PARAMETER, EGL_FALSE);
     }
 
     EGLSync_t* sync = (EGLSync_t*)eglsync;
@@ -2233,7 +2510,7 @@
     DEFINE_HOST_CONNECTION;
 
     EGLint retval;
-    if (rcEnc->hasNativeSync()) {
+    if (rcEnc->hasVirtioGpuNativeSync() || rcEnc->hasNativeSync()) {
         retval = rcEnc->rcClientWaitSyncKHR
             (rcEnc, sync->handle, flags, timeout);
     } else {
@@ -2261,6 +2538,14 @@
 
     EGLSync_t* sync = (EGLSync_t*)eglsync;
 
+    if (!sync) {
+        setErrorReturn(EGL_BAD_PARAMETER, EGL_FALSE);
+    }
+
+    if (!value) {
+        setErrorReturn(EGL_BAD_PARAMETER, EGL_FALSE);
+    }
+
     switch (attribute) {
     case EGL_SYNC_TYPE_KHR:
         *value = sync->type;
@@ -2272,7 +2557,7 @@
         } else {
             // ask the host again
             DEFINE_HOST_CONNECTION;
-            if (rcEnc->hasNativeSyncV4()) {
+            if (rcEnc->hasVirtioGpuNativeSync() || rcEnc->hasNativeSyncV4()) {
                 if (rcEnc->rcIsSyncSignaled(rcEnc, sync->handle)) {
                     sync->status = EGL_SIGNALED_KHR;
                 }
@@ -2308,16 +2593,16 @@
 
     if (!eglsync) {
         ALOGE("%s: null sync object!", __FUNCTION__);
-        return EGL_FALSE;
+        setErrorReturn(EGL_BAD_PARAMETER, EGL_FALSE);
     }
 
     if (flags) {
         ALOGE("%s: flags must be 0, got 0x%x", __FUNCTION__, flags);
-        return EGL_FALSE;
+        setErrorReturn(EGL_BAD_PARAMETER, EGL_FALSE);
     }
 
     DEFINE_HOST_CONNECTION;
-    if (rcEnc->hasNativeSyncV3()) {
+    if (rcEnc->hasVirtioGpuNativeSync() || rcEnc->hasNativeSyncV3()) {
         EGLSync_t* sync = (EGLSync_t*)eglsync;
         rcEnc->rcWaitSyncKHR(rcEnc, sync->handle, flags);
     }
diff --git a/system/egl/eglContext.h b/system/egl/eglContext.h
index 0674833..c71c7c4 100644
--- a/system/egl/eglContext.h
+++ b/system/egl/eglContext.h
@@ -19,6 +19,9 @@
 #include "GLClientState.h"
 #include "GLSharedGroup.h"
 
+#include <string>
+#include <vector>
+
 struct EGLContext_t {
 
     enum {
@@ -44,6 +47,7 @@
     const char*         rendererString;
     const char*         shaderVersionString;
     const char*         extensionString;
+    std::vector<std::string> extensionStringArray;
     EGLint              deletePending;
     GLClientState * getClientState(){ return clientState; }
     GLSharedGroupPtr getSharedGroup(){ return sharedGroup; }
diff --git a/system/egl/eglDisplay.cpp b/system/egl/eglDisplay.cpp
index b3f14da..7f4800c 100644
--- a/system/egl/eglDisplay.cpp
+++ b/system/egl/eglDisplay.cpp
@@ -26,6 +26,8 @@
 
 #include <dlfcn.h>
 
+#include <GLES3/gl31.h>
+
 static const int systemEGLVersionMajor = 1;
 static const int systemEGLVersionMinor = 4;
 static const char systemEGLVendor[] = "Google Android emulator";
@@ -73,7 +75,9 @@
     m_gles2_iface(NULL),
     m_versionString(NULL),
     m_vendorString(NULL),
-    m_extensionString(NULL)
+    m_extensionString(NULL),
+    m_hostDriverCaps_knownMajorVersion(0),
+    m_hostDriverCaps_knownMinorVersion(0)
 {
     pthread_mutex_init(&m_lock, NULL);
     pthread_mutex_init(&m_ctxLock, NULL);
@@ -171,7 +175,6 @@
 
         uint32_t nInts = m_numConfigAttribs * (m_numConfigs + 1);
         EGLint tmp_buf[nInts];
-        uint32_t configCount = nInts - m_numConfigAttribs;
 
         m_configs = new EGLint[nInts-m_numConfigAttribs];
 
@@ -357,11 +360,11 @@
 
         std::string dynamicEGLExtensions;
 
-        if (hcon->rcEncoder()->hasNativeSync() &&
+        if ((hcon->rcEncoder()->hasVirtioGpuNativeSync() || hcon->rcEncoder()->hasNativeSync()) &&
             !strstr(initialEGLExts, kDynamicEGLExtNativeSync)) {
             dynamicEGLExtensions += kDynamicEGLExtNativeSync;
 
-            if (hcon->rcEncoder()->hasNativeSyncV3()) {
+            if (hcon->rcEncoder()->hasVirtioGpuNativeSync() || hcon->rcEncoder()->hasNativeSyncV3()) {
                 dynamicEGLExtensions += kDynamicEGLExtWaitSync;
             }
         }
@@ -654,3 +657,55 @@
     pthread_mutex_unlock(&m_surfaceLock);
     return res;
 }
+
+HostDriverCaps eglDisplay::getHostDriverCaps(int majorVersion, int minorVersion) {
+    pthread_mutex_lock(&m_lock);
+    if (majorVersion <= m_hostDriverCaps_knownMajorVersion &&
+        minorVersion <= m_hostDriverCaps_knownMinorVersion) {
+        pthread_mutex_unlock(&m_lock);
+        return m_hostDriverCaps;
+    }
+
+    memset(&m_hostDriverCaps, 0x0, sizeof(m_hostDriverCaps));
+
+    m_hostDriverCaps.max_color_attachments = 8;
+
+    // Can we query gles2?
+    if (majorVersion >= 1) {
+        m_gles2_iface->getIntegerv(GL_MAX_VERTEX_ATTRIBS, &m_hostDriverCaps.max_vertex_attribs);
+        m_gles2_iface->getIntegerv(GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS, &m_hostDriverCaps.max_combined_texture_image_units);
+
+        m_gles2_iface->getIntegerv(GL_MAX_TEXTURE_SIZE, &m_hostDriverCaps.max_texture_size);
+        m_gles2_iface->getIntegerv(GL_MAX_CUBE_MAP_TEXTURE_SIZE, &m_hostDriverCaps.max_texture_size_cube_map);
+        m_gles2_iface->getIntegerv(GL_MAX_RENDERBUFFER_SIZE, &m_hostDriverCaps.max_renderbuffer_size);
+        m_hostDriverCaps_knownMajorVersion = 2;
+    }
+
+    // Can we query gles3.0?
+    if (majorVersion >= 3) {
+        m_gles2_iface->getIntegerv(GL_MAX_COLOR_ATTACHMENTS, &m_hostDriverCaps.max_color_attachments);
+        m_gles2_iface->getIntegerv(GL_MAX_DRAW_BUFFERS, &m_hostDriverCaps.max_draw_buffers);
+        m_gles2_iface->getIntegerv(GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT, &m_hostDriverCaps.ubo_offset_alignment);
+        m_gles2_iface->getIntegerv(GL_MAX_UNIFORM_BUFFER_BINDINGS, &m_hostDriverCaps.max_uniform_buffer_bindings);
+        m_gles2_iface->getIntegerv(GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_ATTRIBS, &m_hostDriverCaps.max_transform_feedback_separate_attribs);
+        m_gles2_iface->getIntegerv(GL_MAX_3D_TEXTURE_SIZE, &m_hostDriverCaps.max_texture_size_3d);
+        m_gles2_iface->getIntegerv(GL_MAX_ARRAY_TEXTURE_LAYERS, &m_hostDriverCaps.max_array_texture_layers);
+
+        m_hostDriverCaps_knownMajorVersion = 3;
+
+        // Can we query gles3.1?
+        if (minorVersion >= 1) {
+            m_gles2_iface->getIntegerv(GL_MAX_ATOMIC_COUNTER_BUFFER_BINDINGS, &m_hostDriverCaps.max_atomic_counter_buffer_bindings);
+            m_gles2_iface->getIntegerv(GL_MAX_SHADER_STORAGE_BUFFER_BINDINGS, &m_hostDriverCaps.max_shader_storage_buffer_bindings);
+            m_gles2_iface->getIntegerv(GL_MAX_VERTEX_ATTRIB_BINDINGS, &m_hostDriverCaps.max_vertex_attrib_bindings);
+            m_gles2_iface->getIntegerv(GL_MAX_VERTEX_ATTRIB_STRIDE, &m_hostDriverCaps.max_vertex_attrib_stride);
+            m_gles2_iface->getIntegerv(GL_SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT, &m_hostDriverCaps.ssbo_offset_alignment);
+            m_hostDriverCaps_knownMinorVersion = 1;
+        }
+    }
+
+    pthread_mutex_unlock(&m_lock);
+
+    return m_hostDriverCaps;
+}
+
diff --git a/system/egl/eglDisplay.h b/system/egl/eglDisplay.h
index 951075d..25494e1 100644
--- a/system/egl/eglDisplay.h
+++ b/system/egl/eglDisplay.h
@@ -22,6 +22,7 @@
 #include <EGL/egl.h>
 #include <EGL/eglext.h>
 #include "EGLClientIface.h"
+#include "GLClientState.h"
 
 #if __cplusplus >= 201103L
 #include <unordered_set>
@@ -76,6 +77,10 @@
 
     bool isContext(EGLContext ctx);
     bool isSurface(EGLSurface ctx);
+
+    // Needs a current context (put this near eglMakeCurrent)
+    HostDriverCaps getHostDriverCaps(int majorVersion, int minorVersion);
+
 private:
     EGLClient_glesInterface *loadGLESClientAPI(const char *libName,
                                                EGLClient_eglInterface *eglIface,
@@ -118,6 +123,10 @@
     EGLSurfaceSet m_surfaces;
     pthread_mutex_t m_ctxLock;
     pthread_mutex_t m_surfaceLock;
+
+    int m_hostDriverCaps_knownMajorVersion;
+    int m_hostDriverCaps_knownMinorVersion;
+    HostDriverCaps m_hostDriverCaps;
 };
 
 #endif
diff --git a/system/gralloc/Android.mk b/system/gralloc/Android.mk
index 8a12378..5d5667e 100644
--- a/system/gralloc/Android.mk
+++ b/system/gralloc/Android.mk
@@ -19,6 +19,13 @@
 
 ifneq (true,$(GOLDFISH_OPENGL_BUILD_FOR_HOST))
 LOCAL_SHARED_LIBRARIES += libdl
+
+ifeq (true,$(GFXSTREAM))
+LOCAL_CFLAGS += -DVIRTIO_GPU
+LOCAL_C_INCLUDES += external/libdrm external/minigbm/cros_gralloc
+LOCAL_SHARED_LIBRARIES += libdrm
+endif
+
 endif
 
 $$(call emugl-end-module)
diff --git a/system/gralloc/CMakeLists.txt b/system/gralloc/CMakeLists.txt
index b972006..c090eb3 100644
--- a/system/gralloc/CMakeLists.txt
+++ b/system/gralloc/CMakeLists.txt
@@ -1,20 +1,20 @@
 # This is an autogenerated file! Do not edit!
 # instead run make from .../device/generic/goldfish-opengl
 # which will re-generate this file.
-android_validate_sha256("${GOLDFISH_DEVICE_ROOT}/system/gralloc/Android.mk" "09618d9293855148fb310e67065028da8c7f6dcf936b02b5695292c82ed4724e")
+android_validate_sha256("${GOLDFISH_DEVICE_ROOT}/system/gralloc/Android.mk" "4c44a906197808b25fde953a476e026aac725e5286e0f91d5f8a3083f40f49da")
 set(gralloc.goldfish_src gralloc_old.cpp)
 android_add_library(TARGET gralloc.goldfish SHARED LICENSE Apache-2.0 SRC gralloc_old.cpp)
-target_include_directories(gralloc.goldfish PRIVATE ${GOLDFISH_DEVICE_ROOT}/system/OpenglSystemCommon/bionic-include ${GOLDFISH_DEVICE_ROOT}/system/OpenglSystemCommon ${GOLDFISH_DEVICE_ROOT}/bionic/libc/private ${GOLDFISH_DEVICE_ROOT}/bionic/libc/platform ${GOLDFISH_DEVICE_ROOT}/system/vulkan_enc ${GOLDFISH_DEVICE_ROOT}/android-emu ${GOLDFISH_DEVICE_ROOT}/shared/gralloc_cb/include ${GOLDFISH_DEVICE_ROOT}/shared/GoldfishAddressSpace/include ${GOLDFISH_DEVICE_ROOT}/system/GLESv2_enc ${GOLDFISH_DEVICE_ROOT}/system/renderControl_enc ${GOLDFISH_DEVICE_ROOT}/system/GLESv1_enc ${GOLDFISH_DEVICE_ROOT}/shared/OpenglCodecCommon ${GOLDFISH_DEVICE_ROOT}/shared/qemupipe/include-types ${GOLDFISH_DEVICE_ROOT}/shared/qemupipe/include ${GOLDFISH_DEVICE_ROOT}/./host/include/libOpenglRender ${GOLDFISH_DEVICE_ROOT}/./system/include ${GOLDFISH_DEVICE_ROOT}/./../../../external/qemu/android/android-emugl/guest)
-target_compile_definitions(gralloc.goldfish PRIVATE "-DWITH_GLES2" "-DPLATFORM_SDK_VERSION=29" "-DGOLDFISH_HIDL_GRALLOC" "-DEMULATOR_OPENGL_POST_O=1" "-DHOST_BUILD" "-DANDROID" "-DGL_GLEXT_PROTOTYPES" "-DPAGE_SIZE=4096" "-DGOLDFISH_VULKAN" "-DLOG_TAG=\"gralloc_goldfish\"")
+target_include_directories(gralloc.goldfish PRIVATE ${GOLDFISH_DEVICE_ROOT}/system/OpenglSystemCommon/bionic-include ${GOLDFISH_DEVICE_ROOT}/system/OpenglSystemCommon ${GOLDFISH_DEVICE_ROOT}/bionic/libc/private ${GOLDFISH_DEVICE_ROOT}/bionic/libc/platform ${GOLDFISH_DEVICE_ROOT}/system/vulkan_enc ${GOLDFISH_DEVICE_ROOT}/shared/gralloc_cb/include ${GOLDFISH_DEVICE_ROOT}/shared/GoldfishAddressSpace/include ${GOLDFISH_DEVICE_ROOT}/system/GLESv2_enc ${GOLDFISH_DEVICE_ROOT}/system/renderControl_enc ${GOLDFISH_DEVICE_ROOT}/system/GLESv1_enc ${GOLDFISH_DEVICE_ROOT}/shared/OpenglCodecCommon ${GOLDFISH_DEVICE_ROOT}/android-emu ${GOLDFISH_DEVICE_ROOT}/shared/qemupipe/include-types ${GOLDFISH_DEVICE_ROOT}/shared/qemupipe/include ${GOLDFISH_DEVICE_ROOT}/./host/include/libOpenglRender ${GOLDFISH_DEVICE_ROOT}/./system/include ${GOLDFISH_DEVICE_ROOT}/./../../../external/qemu/android/android-emugl/guest)
+target_compile_definitions(gralloc.goldfish PRIVATE "-DWITH_GLES2" "-DPLATFORM_SDK_VERSION=29" "-DGOLDFISH_HIDL_GRALLOC" "-DEMULATOR_OPENGL_POST_O=1" "-DHOST_BUILD" "-DANDROID" "-DGL_GLEXT_PROTOTYPES" "-DPAGE_SIZE=4096" "-DGFXSTREAM" "-DLOG_TAG=\"gralloc_goldfish\"")
 target_compile_options(gralloc.goldfish PRIVATE "-fvisibility=default" "-Wno-unused-parameter" "-Wno-missing-field-initializers" "-Wno-gnu-designator")
-target_link_libraries(gralloc.goldfish PRIVATE OpenglSystemCommon android-emu-shared vulkan_enc gui androidemu cutils utils log GLESv2_enc _renderControl_enc GLESv1_enc OpenglCodecCommon_host PRIVATE gralloc_cb_host GoldfishAddressSpace_host qemupipe_host)
+target_link_libraries(gralloc.goldfish PRIVATE OpenglSystemCommon android-emu-shared vulkan_enc gui log GLESv2_enc _renderControl_enc GLESv1_enc OpenglCodecCommon_host cutils utils androidemu PRIVATE gralloc_cb_host GoldfishAddressSpace_host qemupipe_host)
 # This is an autogenerated file! Do not edit!
 # instead run make from .../device/generic/goldfish-opengl
 # which will re-generate this file.
-android_validate_sha256("${GOLDFISH_DEVICE_ROOT}/system/gralloc/Android.mk" "09618d9293855148fb310e67065028da8c7f6dcf936b02b5695292c82ed4724e")
+android_validate_sha256("${GOLDFISH_DEVICE_ROOT}/system/gralloc/Android.mk" "4c44a906197808b25fde953a476e026aac725e5286e0f91d5f8a3083f40f49da")
 set(gralloc.ranchu_src gralloc_old.cpp)
 android_add_library(TARGET gralloc.ranchu SHARED LICENSE Apache-2.0 SRC gralloc_old.cpp)
-target_include_directories(gralloc.ranchu PRIVATE ${GOLDFISH_DEVICE_ROOT}/system/OpenglSystemCommon/bionic-include ${GOLDFISH_DEVICE_ROOT}/system/OpenglSystemCommon ${GOLDFISH_DEVICE_ROOT}/bionic/libc/private ${GOLDFISH_DEVICE_ROOT}/bionic/libc/platform ${GOLDFISH_DEVICE_ROOT}/system/vulkan_enc ${GOLDFISH_DEVICE_ROOT}/android-emu ${GOLDFISH_DEVICE_ROOT}/shared/gralloc_cb/include ${GOLDFISH_DEVICE_ROOT}/shared/GoldfishAddressSpace/include ${GOLDFISH_DEVICE_ROOT}/system/GLESv2_enc ${GOLDFISH_DEVICE_ROOT}/system/renderControl_enc ${GOLDFISH_DEVICE_ROOT}/system/GLESv1_enc ${GOLDFISH_DEVICE_ROOT}/shared/OpenglCodecCommon ${GOLDFISH_DEVICE_ROOT}/shared/qemupipe/include-types ${GOLDFISH_DEVICE_ROOT}/shared/qemupipe/include ${GOLDFISH_DEVICE_ROOT}/./host/include/libOpenglRender ${GOLDFISH_DEVICE_ROOT}/./system/include ${GOLDFISH_DEVICE_ROOT}/./../../../external/qemu/android/android-emugl/guest)
-target_compile_definitions(gralloc.ranchu PRIVATE "-DWITH_GLES2" "-DPLATFORM_SDK_VERSION=29" "-DGOLDFISH_HIDL_GRALLOC" "-DEMULATOR_OPENGL_POST_O=1" "-DHOST_BUILD" "-DANDROID" "-DGL_GLEXT_PROTOTYPES" "-DPAGE_SIZE=4096" "-DGOLDFISH_VULKAN" "-DLOG_TAG=\"gralloc_ranchu\"")
+target_include_directories(gralloc.ranchu PRIVATE ${GOLDFISH_DEVICE_ROOT}/system/OpenglSystemCommon/bionic-include ${GOLDFISH_DEVICE_ROOT}/system/OpenglSystemCommon ${GOLDFISH_DEVICE_ROOT}/bionic/libc/private ${GOLDFISH_DEVICE_ROOT}/bionic/libc/platform ${GOLDFISH_DEVICE_ROOT}/system/vulkan_enc ${GOLDFISH_DEVICE_ROOT}/shared/gralloc_cb/include ${GOLDFISH_DEVICE_ROOT}/shared/GoldfishAddressSpace/include ${GOLDFISH_DEVICE_ROOT}/system/GLESv2_enc ${GOLDFISH_DEVICE_ROOT}/system/renderControl_enc ${GOLDFISH_DEVICE_ROOT}/system/GLESv1_enc ${GOLDFISH_DEVICE_ROOT}/shared/OpenglCodecCommon ${GOLDFISH_DEVICE_ROOT}/android-emu ${GOLDFISH_DEVICE_ROOT}/shared/qemupipe/include-types ${GOLDFISH_DEVICE_ROOT}/shared/qemupipe/include ${GOLDFISH_DEVICE_ROOT}/./host/include/libOpenglRender ${GOLDFISH_DEVICE_ROOT}/./system/include ${GOLDFISH_DEVICE_ROOT}/./../../../external/qemu/android/android-emugl/guest)
+target_compile_definitions(gralloc.ranchu PRIVATE "-DWITH_GLES2" "-DPLATFORM_SDK_VERSION=29" "-DGOLDFISH_HIDL_GRALLOC" "-DEMULATOR_OPENGL_POST_O=1" "-DHOST_BUILD" "-DANDROID" "-DGL_GLEXT_PROTOTYPES" "-DPAGE_SIZE=4096" "-DGFXSTREAM" "-DLOG_TAG=\"gralloc_ranchu\"")
 target_compile_options(gralloc.ranchu PRIVATE "-fvisibility=default" "-Wno-unused-parameter" "-Wno-missing-field-initializers" "-Wno-gnu-designator")
-target_link_libraries(gralloc.ranchu PRIVATE OpenglSystemCommon android-emu-shared vulkan_enc gui androidemu cutils utils log GLESv2_enc _renderControl_enc GLESv1_enc OpenglCodecCommon_host PRIVATE gralloc_cb_host GoldfishAddressSpace_host qemupipe_host)
\ No newline at end of file
+target_link_libraries(gralloc.ranchu PRIVATE OpenglSystemCommon android-emu-shared vulkan_enc gui log GLESv2_enc _renderControl_enc GLESv1_enc OpenglCodecCommon_host cutils utils androidemu PRIVATE gralloc_cb_host GoldfishAddressSpace_host qemupipe_host)
\ No newline at end of file
diff --git a/system/gralloc/gralloc_30.cpp b/system/gralloc/gralloc_30.cpp
index b67f4c5..c012908 100644
--- a/system/gralloc/gralloc_30.cpp
+++ b/system/gralloc/gralloc_30.cpp
@@ -149,7 +149,9 @@
 
 class goldfish_gralloc30_module_t {
 public:
-    goldfish_gralloc30_module_t(): m_hostConn(HostConnection::createUnique()) {
+    // TODO(b/142677230): Use unique pointers instead.
+    goldfish_gralloc30_module_t()
+        : m_hostConn(HostConnection::createUnique().release()) {
         CRASH_IF(!m_hostConn, "m_hostConn cannot be nullptr");
         m_bufferManager = create_buffer_manager(this);
         CRASH_IF(!m_bufferManager, "m_bufferManager cannot be nullptr");
diff --git a/system/gralloc/gralloc_old.cpp b/system/gralloc/gralloc_old.cpp
index 7af9dfe..2cae482 100644
--- a/system/gralloc/gralloc_old.cpp
+++ b/system/gralloc/gralloc_old.cpp
@@ -446,7 +446,7 @@
 
 static HostConnection* createOrGetHostConnection() {
     if (!sHostCon) {
-        sHostCon = HostConnection::createUnique();
+        sHostCon = HostConnection::createUnique().release();
     }
     return sHostCon;
 }
@@ -997,6 +997,11 @@
         }
 
         delete d;
+
+        if (sHostCon) {
+            delete sHostCon;
+            sHostCon = nullptr;
+        }
     }
     return 0;
 }
@@ -1582,7 +1587,7 @@
     // qemu.gles=0 -> no GLES 2.x support (only 1.x through software).
     // qemu.gles=1 -> host-side GPU emulation through EmuGL
     // qemu.gles=2 -> guest-side GPU emulation.
-    property_get("ro.kernel.qemu.gles", prop, "999");
+    property_get("ro.boot.qemu.gles", prop, "999");
 
     bool useFallback = false;
     switch (atoi(prop)) {
diff --git a/system/hals/Android.mk b/system/hals/Android.mk
index 3cdb4bf..72ac6ce 100644
--- a/system/hals/Android.mk
+++ b/system/hals/Android.mk
@@ -18,6 +18,9 @@
 include $(CLEAR_VARS)
 
 LOCAL_MODULE := android.hardware.graphics.allocator@3.0-service
+LOCAL_LICENSE_KINDS := SPDX-license-identifier-Apache-2.0
+LOCAL_LICENSE_CONDITIONS := notice
+LOCAL_NOTICE_FILE := $(LOCAL_PATH)/../../LICENSE
 LOCAL_MODULE_RELATIVE_PATH := hw
 LOCAL_VENDOR_MODULE := true
 LOCAL_SRC_FILES := allocator3.cpp
@@ -45,11 +48,18 @@
     device/generic/goldfish-opengl/host/include/libOpenglRender \
     device/generic/goldfish-opengl/system/renderControl_enc \
 
+LOCAL_CFLAGS += -DVIRTIO_GPU
+LOCAL_C_INCLUDES += external/libdrm external/minigbm/cros_gralloc
+LOCAL_SHARED_LIBRARIES += libdrm
+
 include $(BUILD_EXECUTABLE)
 
 include $(CLEAR_VARS)
 
 LOCAL_MODULE := android.hardware.graphics.mapper@3.0-impl-ranchu
+LOCAL_LICENSE_KINDS := SPDX-license-identifier-Apache-2.0
+LOCAL_LICENSE_CONDITIONS := notice
+LOCAL_NOTICE_FILE := $(LOCAL_PATH)/../../LICENSE
 LOCAL_MODULE_RELATIVE_PATH := hw
 LOCAL_VENDOR_MODULE := true
 LOCAL_SRC_FILES := mapper3.cpp
@@ -78,5 +88,8 @@
     device/generic/goldfish-opengl/host/include/libOpenglRender \
     device/generic/goldfish-opengl/system/renderControl_enc \
 
-include $(BUILD_SHARED_LIBRARY)
+LOCAL_CFLAGS += -DVIRTIO_GPU
+LOCAL_C_INCLUDES += external/libdrm external/minigbm/cros_gralloc
+LOCAL_SHARED_LIBRARIES += libdrm
 
+include $(BUILD_SHARED_LIBRARY)
diff --git a/system/hals/allocator3.cpp b/system/hals/allocator3.cpp
index 8b433c2..5c103a5 100644
--- a/system/hals/allocator3.cpp
+++ b/system/hals/allocator3.cpp
@@ -200,6 +200,15 @@
             break;
 
         default:
+            if (static_cast<android::hardware::graphics::common::V1_1::PixelFormat>(format) ==
+                    android::hardware::graphics::common::V1_1::PixelFormat::YCBCR_P010) {
+                yuv_format = true;
+                glFormat = GL_RGBA;
+                glType = GL_UNSIGNED_BYTE;
+                bpp = 2;
+                break;
+            }
+
             ALOGE("%s:%d Unsupported format: format=%d, frameworkFormat=%d, usage=%x",
                   __func__, __LINE__, format, descriptor.format, usage);
             RETURN_ERROR(Error3::UNSUPPORTED);
@@ -309,6 +318,17 @@
     }
 
     static bool needHostCb(const uint32_t usage, const PixelFormat format) {
+        if (static_cast<android::hardware::graphics::common::V1_1::PixelFormat>(format) ==
+                android::hardware::graphics::common::V1_1::PixelFormat::YCBCR_P010) {
+            return false;
+        }
+
+        // b/186585177
+        if ((usage & (BufferUsage::CPU_READ_MASK | BufferUsage::CPU_WRITE_MASK)) &&
+                (0 == (usage & ~(BufferUsage::CPU_READ_MASK | BufferUsage::CPU_WRITE_MASK)))) {
+            return false;
+        }
+
         return ((usage & BufferUsage::GPU_DATA_BUFFER)
                    || (format != PixelFormat::BLOB &&
                        format != PixelFormat::RAW16 &&
diff --git a/system/hals/mapper3.cpp b/system/hals/mapper3.cpp
index 3221350..6903f9b 100644
--- a/system/hals/mapper3.cpp
+++ b/system/hals/mapper3.cpp
@@ -342,6 +342,16 @@
             break;
 
         default:
+            if (static_cast<android::hardware::graphics::common::V1_1::PixelFormat>(cb->format) ==
+                    android::hardware::graphics::common::V1_1::PixelFormat::YCBCR_P010) {
+                yStride = cb->width * 2;
+                cStride = yStride;
+                uOffset = cb->height * yStride;
+                vOffset = uOffset + 2;
+                cStep = 4;
+                break;
+            }
+
             ALOGE("%s:%d unexpected format (%d)", __func__, __LINE__, cb->format);
             RETURN_ERROR(Error3::BAD_BUFFER);
         }
@@ -383,7 +393,8 @@
 
         // camera delivers bits to the buffer directly and does not require
         // an explicit read.
-        if (usageSwRead && !usageHwCamera) {
+        const bool cbReadable = cb.usage & BufferUsage::CPU_READ_MASK;
+        if (usageSwRead && !usageHwCamera && cbReadable) {
             if (gralloc_is_yuv_format(cb.format)) {
                 if (rcEnc->hasYUVCache()) {
                     uint32_t bufferSize;
diff --git a/system/hwc2/Android.mk b/system/hwc2/Android.mk
index 24a8a58..4d207ff 100644
--- a/system/hwc2/Android.mk
+++ b/system/hwc2/Android.mk
@@ -19,15 +19,25 @@
 include $(CLEAR_VARS)
 LOCAL_VENDOR_MODULE := true
 emulator_hwcomposer_shared_libraries := \
-    liblog \
-    libutils \
-    libcutils \
+    android.hardware.graphics.mapper@2.0 \
+    android.hardware.graphics.mapper@4.0 \
+    libbase \
     libEGL \
-    libutils \
+    libcutils \
+    libcuttlefish_device_config \
+    libcuttlefish_device_config_proto \
+    libcuttlefish_utils \
+    libcuttlefish_fs \
+    libdrm \
+    libgralloctypes \
     libhardware \
+    libhidlbase \
+    libjpeg \
+    liblog \
     libsync \
     libui \
-    android.hardware.graphics.mapper@2.0 \
+    libutils \
+    libutils \
 
 emulator_hwcomposer_cflags += \
     -DLOG_TAG=\"hwc2\" \
@@ -35,30 +45,62 @@
     -DANDROID_BASE_UNIQUE_FD_DISABLE_IMPLICIT_CONVERSION
 
 emulator_hwcomposer_c_includes += \
+    device/generic/goldfish-opengl/host/include/libOpenglRender \
+    device/generic/goldfish-opengl/android-emu \
+    device/generic/goldfish-opengl/shared/OpenglCodecCommon \
+    device/generic/goldfish-opengl/system/OpenglSystemCommon \
+    device/generic/goldfish-opengl/system/include \
+    device/generic/goldfish-opengl/system/renderControl_enc \
+    external/libdrm \
     system/core/libsync \
     system/core/libsync/include \
-    device/generic/goldfish-opengl/system/include \
-    device/generic/goldfish-opengl/system/OpenglSystemCommon \
-    device/generic/goldfish-opengl/host/include/libOpenglRender \
-    device/generic/goldfish-opengl/shared/OpenglCodecCommon \
-    device/generic/goldfish-opengl/system/renderControl_enc
 
 emulator_hwcomposer_relative_path := hw
 
 emulator_hwcomposer2_src_files := \
-    EmuHWC2.cpp
+    Device.cpp \
+    Display.cpp \
+    Drm.cpp \
+    DrmPresenter.cpp \
+    Gralloc.cpp \
+    GuestComposer.cpp \
+    HostComposer.cpp \
+    Layer.cpp \
 
 include $(CLEAR_VARS)
 
 LOCAL_VENDOR_MODULE := true
+LOCAL_STATIC_LIBRARIES := libyuv_static
 LOCAL_SHARED_LIBRARIES := $(emulator_hwcomposer_shared_libraries)
 LOCAL_SHARED_LIBRARIES += libOpenglSystemCommon lib_renderControl_enc
 LOCAL_SHARED_LIBRARIES += libui
 LOCAL_SRC_FILES := $(emulator_hwcomposer2_src_files)
 LOCAL_C_INCLUDES := $(emulator_hwcomposer_c_includes)
+LOCAL_C_INCLUDES += external/drm_hwcomposer
+LOCAL_C_INCLUDES += external/minigbm/cros_gralloc
 LOCAL_MODULE_RELATIVE_PATH := $(emulator_hwcomposer_relative_path)
 
 LOCAL_MODULE := hwcomposer.ranchu
+LOCAL_LICENSE_KINDS := SPDX-license-identifier-Apache-2.0
+LOCAL_LICENSE_CONDITIONS := notice
+LOCAL_NOTICE_FILE := $(LOCAL_PATH)/../../LICENSE
 LOCAL_MODULE_TAGS := optional
 
 include $(BUILD_SHARED_LIBRARY)
+
+include $(CLEAR_VARS)
+LOCAL_VENDOR_MODULE := true
+LOCAL_SRC_FILES := drmTest.cpp
+LOCAL_SHARED_LIBRARIES := $(emulator_hwcomposer_shared_libraries)
+LOCAL_SHARED_LIBRARIES += libOpenglSystemCommon lib_renderControl_enc
+LOCAL_SHARED_LIBRARIES += libui
+LOCAL_SHARED_LIBRARIES += libdrm
+LOCAL_C_INCLUDES := $(emulator_hwcomposer_c_includes)
+LOCAL_C_INCLUDES += external/libdrm
+LOCAL_C_INCLUDES += external/drm_hwcomposer
+LOCAL_C_INCLUDES += external/minigbm/cros_gralloc
+LOCAL_MODULE := emulatorDrmTest
+LOCAL_LICENSE_KINDS := SPDX-license-identifier-Apache-2.0
+LOCAL_LICENSE_CONDITIONS := notice
+LOCAL_NOTICE_FILE := $(LOCAL_PATH)/../../LICENSE
+include $(BUILD_EXECUTABLE)
diff --git a/system/hwc2/Common.h b/system/hwc2/Common.h
new file mode 100644
index 0000000..949708d
--- /dev/null
+++ b/system/hwc2/Common.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HWC_COMMON_H
+#define ANDROID_HWC_COMMON_H
+
+#undef LOG_TAG
+#define LOG_TAG "RanchuHwc"
+
+#include <android-base/logging.h>
+#include <inttypes.h>
+#include <log/log.h>
+
+// Uncomment to enable additional debug logging.
+//#define DEBUG_RANCHU_HWC
+
+#if defined(DEBUG_RANCHU_HWC)
+#define DEBUG_LOG ALOGE
+#else
+#define DEBUG_LOG(...) ((void)0)
+#endif
+
+#define HWC2_INCLUDE_STRINGIFICATION
+#define HWC2_USE_CPP11
+#include <hardware/hwcomposer2.h>
+#undef HWC2_INCLUDE_STRINGIFICATION
+#undef HWC2_USE_CPP11
+
+#endif
diff --git a/system/hwc2/Composer.h b/system/hwc2/Composer.h
new file mode 100644
index 0000000..fdbb6b0
--- /dev/null
+++ b/system/hwc2/Composer.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HWC_COMPOSER_H
+#define ANDROID_HWC_COMPOSER_H
+
+#include <functional>
+#include <unordered_map>
+#include <vector>
+
+#include "Common.h"
+
+namespace android {
+class Device;
+class Display;
+
+class Composer {
+ public:
+  virtual ~Composer() {}
+
+  using HotplugCallback = std::function<void(
+      bool /*connected*/, uint32_t /*id*/, uint32_t /*width*/,
+      uint32_t /*height*/, uint32_t /*dpiX*/, uint32_t /*dpiY*/,
+      uint32_t /*refreshRate*/)>;
+
+  virtual HWC2::Error init(const HotplugCallback& cb) = 0;
+
+  using AddDisplayToDeviceFunction =
+      std::function<HWC2::Error(std::unique_ptr<Display>)>;
+
+  // Queries Cuttlefish/Goldfish/System configurations and creates displays
+  // for the given Device.
+  virtual HWC2::Error createDisplays(
+      Device* device,
+      const AddDisplayToDeviceFunction& addDisplayToDeviceFn) = 0;
+
+  virtual HWC2::Error createDisplay(
+      Device* device, uint32_t displayId, uint32_t width, uint32_t height,
+      uint32_t dpiX, uint32_t dpiY, uint32_t refreshRateHz,
+      const AddDisplayToDeviceFunction& addDisplayToDeviceFn) = 0;
+
+  virtual HWC2::Error onDisplayDestroy(Display* display) = 0;
+
+  virtual HWC2::Error onDisplayClientTargetSet(Display* display) = 0;
+
+  // Determines if this composer can compose the given layers and requests
+  // changes for layers that can't not be composed.
+  virtual HWC2::Error validateDisplay(
+      Display* display, std::unordered_map<hwc2_layer_t, HWC2::Composition>*
+                            outLayerCompositionChanges) = 0;
+
+  // Performs the actual composition of layers and presents the composed result
+  // to the display.
+  virtual HWC2::Error presentDisplay(Display* display,
+                                     int32_t* outPresentFence) = 0;
+};
+
+}  // namespace android
+
+#endif
diff --git a/system/hwc2/Device.cpp b/system/hwc2/Device.cpp
new file mode 100644
index 0000000..24df956
--- /dev/null
+++ b/system/hwc2/Device.cpp
@@ -0,0 +1,589 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Device.h"
+
+#include <android-base/properties.h>
+
+#include "GuestComposer.h"
+#include "HostComposer.h"
+
+namespace android {
+namespace {
+
+template <typename PFN, typename T>
+static hwc2_function_pointer_t asFP(T function) {
+  static_assert(std::is_same<PFN, T>::value, "Incompatible function pointer");
+  return reinterpret_cast<hwc2_function_pointer_t>(function);
+}
+
+static int CloseHook(hw_device_t* dev) {
+  Device* device = Device::fromDevice(dev);
+  delete device;
+  return 0;
+}
+
+bool IsCuttlefish() {
+  return android::base::GetProperty("ro.hardware.vulkan", "") == "pastel";
+}
+
+}  // namespace
+
+Device::Device() {
+  DEBUG_LOG("%s", __FUNCTION__);
+
+  common.tag = HARDWARE_DEVICE_TAG;
+  common.version = HWC_DEVICE_API_VERSION_2_0;
+  common.close = CloseHook;
+  hwc2_device_t::getCapabilities = getCapabilitiesHook;
+  hwc2_device_t::getFunction = getFunctionHook;
+}
+
+HWC2::Error Device::init() {
+  DEBUG_LOG("%s", __FUNCTION__);
+
+  if (IsCuttlefish()) {
+    mComposer = std::make_unique<GuestComposer>();
+  } else {
+    mComposer = std::make_unique<HostComposer>();
+  }
+
+  HWC2::Error error = mComposer->init(
+      [this](bool connected, uint32_t id, uint32_t width, uint32_t height,
+             uint32_t dpiX, uint32_t dpiY, uint32_t refreshRate) {
+        handleHotplug(connected, id, width, height, dpiX, dpiY, refreshRate);
+      });
+
+  if (error != HWC2::Error::None) {
+    ALOGE("%s failed to initialize Composer", __FUNCTION__);
+    return HWC2::Error::NoResources;
+  }
+
+  return HWC2::Error::None;
+}
+
+Device::~Device() {
+  DEBUG_LOG("%s", __FUNCTION__);
+
+  HWC2::Error error = HWC2::Error::None;
+
+  error = destroyDisplays();
+  if (error != HWC2::Error::None) {
+    ALOGE("%s failed to destroy displays", __FUNCTION__);
+  }
+}
+
+HWC2::Error Device::createDisplays() {
+  DEBUG_LOG("%s", __FUNCTION__);
+
+  std::unique_lock<std::mutex> lock(mStateMutex);
+
+  if (!mComposer) {
+    ALOGE("%s composer not initialized!", __FUNCTION__);
+    return HWC2::Error::NoResources;
+  }
+
+  auto addDisplayLockedFn = [this](std::unique_ptr<Display> display) {
+    auto displayId = display->getId();
+    DEBUG_LOG("%s: adding display:%" PRIu64, __FUNCTION__, displayId);
+    mDisplays.emplace(displayId, std::move(display));
+    return HWC2::Error::None;
+  };
+
+  HWC2::Error error = mComposer->createDisplays(this, addDisplayLockedFn);
+  if (error != HWC2::Error::None) {
+    ALOGE("%s composer failed to create displays", __FUNCTION__);
+    return error;
+  }
+
+  return HWC2::Error::None;
+}
+
+HWC2::Error Device::createDisplay(uint32_t displayId, uint32_t width,
+                                  uint32_t height, uint32_t dpiX, uint32_t dpiY,
+                                  uint32_t refreshRate) {
+  if (!mComposer) {
+    ALOGE("%s composer not initialized!", __FUNCTION__);
+    return HWC2::Error::NoResources;
+  }
+
+  auto addDisplayLockedFn = [this](std::unique_ptr<Display> display) {
+    auto displayId = display->getId();
+    DEBUG_LOG("%s: adding display:%" PRIu64, __FUNCTION__, displayId);
+    mDisplays.emplace(displayId, std::move(display));
+    return HWC2::Error::None;
+  };
+
+  HWC2::Error error =
+      mComposer->createDisplay(this, displayId, width, height, dpiX, dpiY,
+                               refreshRate, addDisplayLockedFn);
+  if (error != HWC2::Error::None) {
+    ALOGE("%s composer failed to create displays", __FUNCTION__);
+    return error;
+  }
+
+  return HWC2::Error::None;
+}
+
+HWC2::Error Device::destroyDisplays() {
+  DEBUG_LOG("%s", __FUNCTION__);
+
+  std::unique_lock<std::mutex> lock(mStateMutex);
+
+  if (!mComposer) {
+    ALOGE("%s composer not initialized!", __FUNCTION__);
+    return HWC2::Error::NoResources;
+  }
+
+  for (auto& [displayId, displayPtr] : mDisplays) {
+    HWC2::Error error = mComposer->onDisplayDestroy(displayPtr.get());
+    if (error != HWC2::Error::None) {
+      ALOGE("%s composer failed to destroy displays", __FUNCTION__);
+      return error;
+    }
+
+    displayPtr.reset();
+  }
+
+  mDisplays.clear();
+
+  return HWC2::Error::None;
+}
+
+void Device::getCapabilities(uint32_t* outCount, int32_t* outCapabilities) {
+  DEBUG_LOG("%s", __FUNCTION__);
+
+  if (outCapabilities == nullptr) {
+    *outCount = mCapabilities.size();
+    return;
+  }
+
+  auto capabilityIter = mCapabilities.cbegin();
+  for (size_t i = 0; i < *outCount; ++i) {
+    if (capabilityIter == mCapabilities.cend()) {
+      return;
+    }
+    outCapabilities[i] = static_cast<int32_t>(*capabilityIter);
+    ++capabilityIter;
+  }
+}
+
+/*static*/
+void Device::getCapabilitiesHook(hwc2_device_t* dev, uint32_t* outCount,
+                                 int32_t* outCapabilities) {
+  DEBUG_LOG("%s", __FUNCTION__);
+
+  Device* device = Device::fromDevice(dev);
+  device->getCapabilities(outCount, outCapabilities);
+}
+
+hwc2_function_pointer_t Device::getFunction(int32_t desc) {
+  const auto func = static_cast<HWC2::FunctionDescriptor>(desc);
+  const auto funcString = to_string(func);
+  DEBUG_LOG("%s(%s)", __FUNCTION__, funcString.c_str());
+
+  switch (func) {
+    // Device functions.
+    case HWC2::FunctionDescriptor::CreateVirtualDisplay:
+      return asFP<HWC2_PFN_CREATE_VIRTUAL_DISPLAY>(
+          DeviceHook<int32_t, decltype(&Device::createVirtualDisplay),
+                     &Device::createVirtualDisplay, uint32_t, uint32_t,
+                     int32_t*, hwc2_display_t*>);
+    case HWC2::FunctionDescriptor::DestroyVirtualDisplay:
+      return asFP<HWC2_PFN_DESTROY_VIRTUAL_DISPLAY>(
+          DeviceHook<int32_t, decltype(&Device::destroyVirtualDisplay),
+                     &Device::destroyVirtualDisplay, hwc2_display_t>);
+    case HWC2::FunctionDescriptor::Dump:
+      return asFP<HWC2_PFN_DUMP>(DeviceHook<void, decltype(&Device::dump),
+                                            &Device::dump, uint32_t*, char*>);
+    case HWC2::FunctionDescriptor::GetMaxVirtualDisplayCount:
+      return asFP<HWC2_PFN_GET_MAX_VIRTUAL_DISPLAY_COUNT>(
+          DeviceHook<uint32_t, decltype(&Device::getMaxVirtualDisplayCount),
+                     &Device::getMaxVirtualDisplayCount>);
+    case HWC2::FunctionDescriptor::RegisterCallback:
+      return asFP<HWC2_PFN_REGISTER_CALLBACK>(
+          DeviceHook<int32_t, decltype(&Device::registerCallback),
+                     &Device::registerCallback, int32_t, hwc2_callback_data_t,
+                     hwc2_function_pointer_t>);
+
+    // Display functions
+    case HWC2::FunctionDescriptor::AcceptDisplayChanges:
+      return asFP<HWC2_PFN_ACCEPT_DISPLAY_CHANGES>(
+          displayHook<decltype(&Display::acceptChanges),
+                      &Display::acceptChanges>);
+    case HWC2::FunctionDescriptor::CreateLayer:
+      return asFP<HWC2_PFN_CREATE_LAYER>(
+          displayHook<decltype(&Display::createLayer), &Display::createLayer,
+                      hwc2_layer_t*>);
+    case HWC2::FunctionDescriptor::DestroyLayer:
+      return asFP<HWC2_PFN_DESTROY_LAYER>(
+          displayHook<decltype(&Display::destroyLayer), &Display::destroyLayer,
+                      hwc2_layer_t>);
+    case HWC2::FunctionDescriptor::GetActiveConfig:
+      return asFP<HWC2_PFN_GET_ACTIVE_CONFIG>(
+          displayHook<decltype(&Display::getActiveConfig),
+                      &Display::getActiveConfig, hwc2_config_t*>);
+    case HWC2::FunctionDescriptor::GetChangedCompositionTypes:
+      return asFP<HWC2_PFN_GET_CHANGED_COMPOSITION_TYPES>(
+          displayHook<decltype(&Display::getChangedCompositionTypes),
+                      &Display::getChangedCompositionTypes, uint32_t*,
+                      hwc2_layer_t*, int32_t*>);
+    case HWC2::FunctionDescriptor::GetColorModes:
+      return asFP<HWC2_PFN_GET_COLOR_MODES>(
+          displayHook<decltype(&Display::getColorModes),
+                      &Display::getColorModes, uint32_t*, int32_t*>);
+    case HWC2::FunctionDescriptor::GetDisplayAttribute:
+      return asFP<HWC2_PFN_GET_DISPLAY_ATTRIBUTE>(
+          displayHook<decltype(&Display::getDisplayAttribute),
+                      &Display::getDisplayAttribute, hwc2_config_t, int32_t,
+                      int32_t*>);
+    case HWC2::FunctionDescriptor::GetDisplayConfigs:
+      return asFP<HWC2_PFN_GET_DISPLAY_CONFIGS>(
+          displayHook<decltype(&Display::getConfigs), &Display::getConfigs,
+                      uint32_t*, hwc2_config_t*>);
+    case HWC2::FunctionDescriptor::GetDisplayName:
+      return asFP<HWC2_PFN_GET_DISPLAY_NAME>(
+          displayHook<decltype(&Display::getName), &Display::getName, uint32_t*,
+                      char*>);
+    case HWC2::FunctionDescriptor::GetDisplayRequests:
+      return asFP<HWC2_PFN_GET_DISPLAY_REQUESTS>(
+          displayHook<decltype(&Display::getRequests), &Display::getRequests,
+                      int32_t*, uint32_t*, hwc2_layer_t*, int32_t*>);
+    case HWC2::FunctionDescriptor::GetDisplayType:
+      return asFP<HWC2_PFN_GET_DISPLAY_TYPE>(
+          displayHook<decltype(&Display::getType), &Display::getType,
+                      int32_t*>);
+    case HWC2::FunctionDescriptor::GetDozeSupport:
+      return asFP<HWC2_PFN_GET_DOZE_SUPPORT>(
+          displayHook<decltype(&Display::getDozeSupport),
+                      &Display::getDozeSupport, int32_t*>);
+    case HWC2::FunctionDescriptor::GetHdrCapabilities:
+      return asFP<HWC2_PFN_GET_HDR_CAPABILITIES>(
+          displayHook<decltype(&Display::getHdrCapabilities),
+                      &Display::getHdrCapabilities, uint32_t*, int32_t*, float*,
+                      float*, float*>);
+    case HWC2::FunctionDescriptor::GetReleaseFences:
+      return asFP<HWC2_PFN_GET_RELEASE_FENCES>(
+          displayHook<decltype(&Display::getReleaseFences),
+                      &Display::getReleaseFences, uint32_t*, hwc2_layer_t*,
+                      int32_t*>);
+    case HWC2::FunctionDescriptor::PresentDisplay:
+      return asFP<HWC2_PFN_PRESENT_DISPLAY>(
+          displayHook<decltype(&Display::present), &Display::present,
+                      int32_t*>);
+    case HWC2::FunctionDescriptor::SetActiveConfig:
+      return asFP<HWC2_PFN_SET_ACTIVE_CONFIG>(
+          displayHook<decltype(&Display::setActiveConfig),
+                      &Display::setActiveConfig, hwc2_config_t>);
+    case HWC2::FunctionDescriptor::SetClientTarget:
+      return asFP<HWC2_PFN_SET_CLIENT_TARGET>(
+          displayHook<decltype(&Display::setClientTarget),
+                      &Display::setClientTarget, buffer_handle_t, int32_t,
+                      int32_t, hwc_region_t>);
+    case HWC2::FunctionDescriptor::SetColorMode:
+      return asFP<HWC2_PFN_SET_COLOR_MODE>(
+          displayHook<decltype(&Display::setColorMode), &Display::setColorMode,
+                      int32_t>);
+    case HWC2::FunctionDescriptor::SetColorTransform:
+      return asFP<HWC2_PFN_SET_COLOR_TRANSFORM>(
+          displayHook<decltype(&Display::setColorTransform),
+                      &Display::setColorTransform, const float*, int32_t>);
+    case HWC2::FunctionDescriptor::SetOutputBuffer:
+      return asFP<HWC2_PFN_SET_OUTPUT_BUFFER>(
+          displayHook<decltype(&Display::setOutputBuffer),
+                      &Display::setOutputBuffer, buffer_handle_t, int32_t>);
+    case HWC2::FunctionDescriptor::SetPowerMode:
+      return asFP<HWC2_PFN_SET_POWER_MODE>(
+          displayHook<decltype(&Display::setPowerMode), &Display::setPowerMode,
+                      int32_t>);
+    case HWC2::FunctionDescriptor::SetVsyncEnabled:
+      return asFP<HWC2_PFN_SET_VSYNC_ENABLED>(
+          displayHook<decltype(&Display::setVsyncEnabled),
+                      &Display::setVsyncEnabled, int32_t>);
+    case HWC2::FunctionDescriptor::ValidateDisplay:
+      return asFP<HWC2_PFN_VALIDATE_DISPLAY>(
+          displayHook<decltype(&Display::validate), &Display::validate,
+                      uint32_t*, uint32_t*>);
+    case HWC2::FunctionDescriptor::GetClientTargetSupport:
+      return asFP<HWC2_PFN_GET_CLIENT_TARGET_SUPPORT>(
+          displayHook<decltype(&Display::getClientTargetSupport),
+                      &Display::getClientTargetSupport, uint32_t, uint32_t,
+                      int32_t, int32_t>);
+    case HWC2::FunctionDescriptor::GetDisplayIdentificationData:
+      return asFP<HWC2_PFN_GET_DISPLAY_IDENTIFICATION_DATA>(
+          displayHook<decltype(&Display::getDisplayIdentificationData),
+                      &Display::getDisplayIdentificationData, uint8_t*,
+                      uint32_t*, uint8_t*>);
+    case HWC2::FunctionDescriptor::GetDisplayCapabilities:
+      return asFP<HWC2_PFN_GET_DISPLAY_CAPABILITIES>(
+          displayHook<decltype(&Display::getDisplayCapabilities),
+                      &Display::getDisplayCapabilities, uint32_t*, uint32_t*>);
+    case HWC2::FunctionDescriptor::GetDisplayBrightnessSupport:
+      return asFP<HWC2_PFN_GET_DISPLAY_BRIGHTNESS_SUPPORT>(
+          displayHook<decltype(&Display::getDisplayBrightnessSupport),
+                      &Display::getDisplayBrightnessSupport, bool*>);
+    case HWC2::FunctionDescriptor::SetDisplayBrightness:
+      return asFP<HWC2_PFN_SET_DISPLAY_BRIGHTNESS>(
+          displayHook<decltype(&Display::setDisplayBrightness),
+                      &Display::setDisplayBrightness, float>);
+
+    // Layer functions
+    case HWC2::FunctionDescriptor::SetCursorPosition:
+      return asFP<HWC2_PFN_SET_CURSOR_POSITION>(
+          layerHook<decltype(&Layer::setCursorPosition),
+                    &Layer::setCursorPosition, int32_t, int32_t>);
+    case HWC2::FunctionDescriptor::SetLayerBuffer:
+      return asFP<HWC2_PFN_SET_LAYER_BUFFER>(
+          layerHook<decltype(&Layer::setBuffer), &Layer::setBuffer,
+                    buffer_handle_t, int32_t>);
+    case HWC2::FunctionDescriptor::SetLayerSurfaceDamage:
+      return asFP<HWC2_PFN_SET_LAYER_SURFACE_DAMAGE>(
+          layerHook<decltype(&Layer::setSurfaceDamage),
+                    &Layer::setSurfaceDamage, hwc_region_t>);
+    case HWC2::FunctionDescriptor::SetLayerBlendMode:
+      return asFP<HWC2_PFN_SET_LAYER_BLEND_MODE>(
+          layerHook<decltype(&Layer::setBlendMode), &Layer::setBlendMode,
+                    int32_t>);
+    case HWC2::FunctionDescriptor::SetLayerColor:
+      return asFP<HWC2_PFN_SET_LAYER_COLOR>(
+          layerHook<decltype(&Layer::setColor), &Layer::setColor, hwc_color_t>);
+    case HWC2::FunctionDescriptor::SetLayerCompositionType:
+      return asFP<HWC2_PFN_SET_LAYER_COMPOSITION_TYPE>(
+          layerHook<decltype(&Layer::setCompositionType),
+                    &Layer::setCompositionType, int32_t>);
+    case HWC2::FunctionDescriptor::SetLayerDataspace:
+      return asFP<HWC2_PFN_SET_LAYER_DATASPACE>(
+          layerHook<decltype(&Layer::setDataspace), &Layer::setDataspace,
+                    int32_t>);
+    case HWC2::FunctionDescriptor::SetLayerDisplayFrame:
+      return asFP<HWC2_PFN_SET_LAYER_DISPLAY_FRAME>(
+          layerHook<decltype(&Layer::setDisplayFrame), &Layer::setDisplayFrame,
+                    hwc_rect_t>);
+    case HWC2::FunctionDescriptor::SetLayerPlaneAlpha:
+      return asFP<HWC2_PFN_SET_LAYER_PLANE_ALPHA>(
+          layerHook<decltype(&Layer::setPlaneAlpha), &Layer::setPlaneAlpha,
+                    float>);
+    case HWC2::FunctionDescriptor::SetLayerSidebandStream:
+      return asFP<HWC2_PFN_SET_LAYER_SIDEBAND_STREAM>(
+          layerHook<decltype(&Layer::setSidebandStream),
+                    &Layer::setSidebandStream, const native_handle_t*>);
+    case HWC2::FunctionDescriptor::SetLayerSourceCrop:
+      return asFP<HWC2_PFN_SET_LAYER_SOURCE_CROP>(
+          layerHook<decltype(&Layer::setSourceCrop), &Layer::setSourceCrop,
+                    hwc_frect_t>);
+    case HWC2::FunctionDescriptor::SetLayerTransform:
+      return asFP<HWC2_PFN_SET_LAYER_TRANSFORM>(
+          layerHook<decltype(&Layer::setTransform), &Layer::setTransform,
+                    int32_t>);
+    case HWC2::FunctionDescriptor::SetLayerVisibleRegion:
+      return asFP<HWC2_PFN_SET_LAYER_VISIBLE_REGION>(
+          layerHook<decltype(&Layer::setVisibleRegion),
+                    &Layer::setVisibleRegion, hwc_region_t>);
+    case HWC2::FunctionDescriptor::SetLayerZOrder:
+      return asFP<HWC2_PFN_SET_LAYER_Z_ORDER>(
+          displayHook<decltype(&Display::updateLayerZ), &Display::updateLayerZ,
+                      hwc2_layer_t, uint32_t>);
+
+    default:
+      ALOGE("GetFunction: Unknown function descriptor: %d",
+            static_cast<int32_t>(desc));
+      return nullptr;
+  }
+}
+
+/*static*/
+hwc2_function_pointer_t Device::getFunctionHook(hwc2_device_t* dev,
+                                                int32_t desc) {
+  Device* device = Device::fromDevice(dev);
+  return device->getFunction(desc);
+}
+
+// Device functions
+
+HWC2::Error Device::createVirtualDisplay(uint32_t /*width*/,
+                                         uint32_t /*height*/,
+                                         int32_t* /*format*/,
+                                         hwc2_display_t* /*outDisplay*/) {
+  DEBUG_LOG("%s", __FUNCTION__);
+  // TODO: VirtualDisplay support
+  return HWC2::Error::None;
+}
+
+HWC2::Error Device::destroyVirtualDisplay(hwc2_display_t /*displayId*/) {
+  DEBUG_LOG("%s", __FUNCTION__);
+  // TODO: VirtualDisplay support
+  return HWC2::Error::None;
+}
+
+void Device::dump(uint32_t* /*outSize*/, char* /*outBuffer*/) {
+  DEBUG_LOG("%s", __FUNCTION__);
+  // TODO:
+  return;
+}
+
+uint32_t Device::getMaxVirtualDisplayCount() {
+  DEBUG_LOG("%s", __FUNCTION__);
+  // TODO: VirtualDisplay support
+  return 0;
+}
+
+static bool IsHandledCallback(HWC2::Callback descriptor) {
+  switch (descriptor) {
+    case HWC2::Callback::Hotplug: {
+      return true;
+    }
+    case HWC2::Callback::Refresh: {
+      return true;
+    }
+    case HWC2::Callback::Vsync: {
+      return true;
+    }
+    case HWC2::Callback::Vsync_2_4: {
+      return false;
+    }
+    case HWC2::Callback::VsyncPeriodTimingChanged: {
+      return false;
+    }
+    case HWC2::Callback::Invalid: {
+      return false;
+    }
+    case HWC2::Callback::SeamlessPossible: {
+      return false;
+    }
+  }
+  return false;
+}
+
+HWC2::Error Device::registerCallback(int32_t desc,
+                                     hwc2_callback_data_t callbackData,
+                                     hwc2_function_pointer_t pointer) {
+  const auto callbackType = static_cast<HWC2::Callback>(desc);
+  const auto callbackTypeString = to_string(callbackType);
+  DEBUG_LOG("%s callback %s", __FUNCTION__, callbackTypeString.c_str());
+
+  if (!IsHandledCallback(callbackType)) {
+    ALOGE("%s unhandled callback: %s", __FUNCTION__,
+          callbackTypeString.c_str());
+    return HWC2::Error::BadParameter;
+  }
+
+  std::unique_lock<std::mutex> lock(mStateMutex);
+
+  if (pointer != nullptr) {
+    mCallbacks[callbackType] = {callbackData, pointer};
+  } else {
+    mCallbacks.erase(callbackType);
+    return HWC2::Error::None;
+  }
+
+  if (callbackType == HWC2::Callback::Hotplug) {
+    // Callback without the state lock held
+    lock.unlock();
+    auto hotplug = reinterpret_cast<HWC2_PFN_HOTPLUG>(pointer);
+    auto hotplugConnect = static_cast<int32_t>(HWC2::Connection::Connected);
+    for (const auto& [displayId, display] : mDisplays) {
+      ALOGI("%s hotplug connecting display:%" PRIu64, __FUNCTION__, displayId);
+      hotplug(callbackData, displayId, hotplugConnect);
+    }
+  }
+
+  return HWC2::Error::None;
+}
+
+bool Device::handleHotplug(bool connected, uint32_t id, uint32_t width,
+                           uint32_t height, uint32_t dpiX, uint32_t dpiY,
+                           uint32_t refreshRate) {
+  std::unique_lock<std::mutex> lock(mStateMutex);
+  if (mCallbacks[HWC2::Callback::Hotplug].pointer == nullptr) {
+    return false;
+  }
+  auto hotplug = reinterpret_cast<HWC2_PFN_HOTPLUG>(
+      mCallbacks[HWC2::Callback::Hotplug].pointer);
+  auto hotplugConnect = static_cast<int32_t>(HWC2::Connection::Connected);
+  auto hotplugDisconnect = static_cast<int32_t>(HWC2::Connection::Disconnected);
+  Display* display = getDisplay(id);
+  if (display) {
+    // if existed, disconnect first
+    ALOGD("callback hotplugDisconnect display %" PRIu32, id);
+    hotplug(mCallbacks[HWC2::Callback::Hotplug].data, id, hotplugDisconnect);
+    display->lock();
+    mComposer->onDisplayDestroy(display);
+    display->unlock();
+  }
+  if (connected) {
+    createDisplay(id, width, height, dpiX, dpiY, refreshRate);
+    ALOGD("callback hotplugConnect display %" PRIu32 " width %" PRIu32
+          " height %" PRIu32 " dpiX %" PRIu32 " dpiY %" PRIu32
+          "fps %" PRIu32, id, width, height, dpiX, dpiY, refreshRate);
+    hotplug(mCallbacks[HWC2::Callback::Hotplug].data, id, hotplugConnect);
+  };
+
+  return true;
+}
+
+Display* Device::getDisplay(hwc2_display_t id) {
+  auto display = mDisplays.find(id);
+  if (display == mDisplays.end()) {
+    ALOGW("Failed to get display for id=%d", (uint32_t)id);
+    return nullptr;
+  }
+  return display->second.get();
+}
+
+static int OpenDevice(const struct hw_module_t* module, const char* name,
+                      struct hw_device_t** dev) {
+  DEBUG_LOG("%s ", __FUNCTION__);
+
+  if (strcmp(name, HWC_HARDWARE_COMPOSER)) {
+    ALOGE("Invalid module name- %s", name);
+    return -EINVAL;
+  }
+
+  std::unique_ptr<Device> device = std::make_unique<Device>();
+  HWC2::Error error = device->init();
+  if (error != HWC2::Error::None) {
+    ALOGE("%s: failed to initialize device", __FUNCTION__);
+    return -EINVAL;
+  }
+
+  error = device->createDisplays();
+  if (error != HWC2::Error::None) {
+    ALOGE("%s: failed to initialize device displays.", __FUNCTION__);
+    return -EINVAL;
+  }
+
+  device->common.module = const_cast<hw_module_t*>(module);
+  *dev = &device.release()->common;
+  return 0;
+}
+
+}  // namespace android
+
+static struct hw_module_methods_t hwc2_module_methods = {
+    .open = android::OpenDevice,
+};
+
+hw_module_t HAL_MODULE_INFO_SYM = {
+    .tag = HARDWARE_MODULE_TAG,
+    .version_major = 2,
+    .version_minor = 3,
+    .id = HWC_HARDWARE_MODULE_ID,
+    .name = "goldfish HWC2 module",
+    .author = "The Android Open Source Project",
+    .methods = &hwc2_module_methods,
+    .dso = NULL,
+    .reserved = {0},
+};
diff --git a/system/hwc2/Device.h b/system/hwc2/Device.h
new file mode 100644
index 0000000..7c990b9
--- /dev/null
+++ b/system/hwc2/Device.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HWC_DEVICE_H
+#define ANDROID_HWC_DEVICE_H
+
+#include <atomic>
+#include <map>
+#include <memory>
+#include <mutex>
+#include <set>
+#include <unordered_map>
+#include <unordered_set>
+#include <vector>
+
+#include "Common.h"
+#include "Composer.h"
+#include "Display.h"
+#include "Layer.h"
+
+namespace android {
+
+class Composer;
+class Display;
+
+class Device : public hwc2_device_t {
+ public:
+  static inline Device* fromDevice(hw_device_t* device) {
+    return reinterpret_cast<Device*>(device);
+  }
+
+  static inline Device* fromDevice(hwc2_device_t* device) {
+    return static_cast<Device*>(device);
+  }
+
+  Device();
+  ~Device();
+
+  HWC2::Error init();
+
+  HWC2::Error createDisplays();
+
+  HWC2::Error createDisplay(uint32_t displayId, uint32_t width, uint32_t height,
+                            uint32_t dpiX, uint32_t dpiY, uint32_t refreshRate);
+
+  Display* getDisplay(hwc2_display_t displayId);
+
+ private:
+  HWC2::Error destroyDisplays();
+
+  bool handleHotplug(bool connected, uint32_t id, uint32_t width,
+                     uint32_t height, uint32_t dpiX, uint32_t dpiY,
+                     uint32_t refreshRate);
+
+  void getCapabilities(uint32_t* outCount, int32_t* outCapabilities);
+  static void getCapabilitiesHook(hwc2_device_t* device, uint32_t* outCount,
+                                  int32_t* outCapabilities);
+
+  hwc2_function_pointer_t getFunction(int32_t descriptor);
+  static hwc2_function_pointer_t getFunctionHook(hwc2_device_t* device,
+                                                 int32_t descriptor);
+
+  // Wrapper to call a specific function on a specific device.
+  template <typename T, typename HookType, HookType func, typename... Args>
+  static T DeviceHook(hwc2_device_t* dev, Args... args) {
+    Device* device = Device::fromDevice(dev);
+    return static_cast<T>(((*device).*func)(std::forward<Args>(args)...));
+  }
+
+  // Wrapper to call a specific function on a specific display.
+  template <typename HookType, HookType func, typename... Args>
+  static int32_t displayHook(hwc2_device_t* dev, hwc2_display_t displayId,
+                             Args... args) {
+    Device* device = Device::fromDevice(dev);
+
+    Display* display = device->getDisplay(displayId);
+    if (display == nullptr) {
+      return static_cast<int32_t>(HWC2::Error::BadDisplay);
+    }
+
+    return static_cast<int32_t>((display->*func)(std::forward<Args>(args)...));
+  }
+
+  // Wrapper to call a specific function on a specific layer.
+  template <typename HookType, HookType func, typename... Args>
+  static int32_t layerHook(hwc2_device_t* dev, hwc2_display_t displayId,
+                           hwc2_layer_t layerId, Args... args) {
+    Device* device = Device::fromDevice(dev);
+
+    Display* display = device->getDisplay(displayId);
+    if (display == nullptr) {
+      return static_cast<int32_t>(HWC2::Error::BadDisplay);
+    }
+
+    Layer* layer = display->getLayer(layerId);
+    if (layer == nullptr) {
+      return static_cast<int32_t>(HWC2::Error::BadLayer);
+    }
+
+    return static_cast<int32_t>((layer->*func)(std::forward<Args>(args)...));
+  }
+
+  // Device functions
+  HWC2::Error createVirtualDisplay(uint32_t width, uint32_t height,
+                                   int32_t* format, hwc2_display_t* outDisplay);
+
+  HWC2::Error destroyVirtualDisplay(hwc2_display_t display);
+
+  void dump(uint32_t* outSize, char* outBuffer);
+
+  uint32_t getMaxVirtualDisplayCount();
+
+  HWC2::Error registerCallback(int32_t descriptor,
+                               hwc2_callback_data_t callbackData,
+                               hwc2_function_pointer_t pointer);
+
+  // These are potentially accessed from multiple threads, and are protected
+  // by this mutex.
+  std::mutex mStateMutex;
+
+  std::unique_ptr<Composer> mComposer;
+
+  std::unordered_set<HWC2::Capability> mCapabilities;
+
+  // For sharing Vsync callback with each displays Vsync thread.
+  friend class Display;
+
+  struct CallbackInfo {
+    hwc2_callback_data_t data;
+    hwc2_function_pointer_t pointer;
+  };
+  std::unordered_map<HWC2::Callback, CallbackInfo> mCallbacks;
+
+  std::map<hwc2_display_t, std::unique_ptr<Display>> mDisplays;
+};
+
+}  // namespace android
+#endif
diff --git a/system/hwc2/Display.cpp b/system/hwc2/Display.cpp
new file mode 100644
index 0000000..bed6b64
--- /dev/null
+++ b/system/hwc2/Display.cpp
@@ -0,0 +1,989 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Display.h"
+
+#include <sync/sync.h>
+
+#include <atomic>
+#include <numeric>
+
+#include "Device.h"
+
+namespace android {
+namespace {
+
+std::atomic<hwc2_config_t> sNextConfigId{0};
+
+bool IsValidColorMode(android_color_mode_t mode) {
+  switch (mode) {
+    case HAL_COLOR_MODE_NATIVE:                         // Fall-through
+    case HAL_COLOR_MODE_STANDARD_BT601_625:             // Fall-through
+    case HAL_COLOR_MODE_STANDARD_BT601_625_UNADJUSTED:  // Fall-through
+    case HAL_COLOR_MODE_STANDARD_BT601_525:             // Fall-through
+    case HAL_COLOR_MODE_STANDARD_BT601_525_UNADJUSTED:  // Fall-through
+    case HAL_COLOR_MODE_STANDARD_BT709:                 // Fall-through
+    case HAL_COLOR_MODE_DCI_P3:                         // Fall-through
+    case HAL_COLOR_MODE_SRGB:                           // Fall-through
+    case HAL_COLOR_MODE_ADOBE_RGB:                      // Fall-through
+    case HAL_COLOR_MODE_DISPLAY_P3:
+      return true;
+    default:
+      return false;
+  }
+}
+
+bool isValidPowerMode(HWC2::PowerMode mode) {
+  switch (mode) {
+    case HWC2::PowerMode::Off:          // Fall-through
+    case HWC2::PowerMode::DozeSuspend:  // Fall-through
+    case HWC2::PowerMode::Doze:         // Fall-through
+    case HWC2::PowerMode::On:
+      return true;
+    default:
+      return false;
+  }
+}
+
+}  // namespace
+
+Display::Display(Device& device, Composer* composer, hwc2_display_t id)
+    : mDevice(device),
+      mComposer(composer),
+      mId(id),
+      mVsyncThread(new VsyncThread(*this)) {}
+
+Display::~Display() {}
+
+HWC2::Error Display::init(uint32_t width, uint32_t height, uint32_t dpiX,
+                          uint32_t dpiY, uint32_t refreshRateHz,
+                          const std::optional<std::vector<uint8_t>>& edid) {
+  ALOGD("%s initializing display:%" PRIu64
+        " width:%d height:%d dpiX:%d dpiY:%d refreshRateHz:%d",
+        __FUNCTION__, mId, width, height, dpiX, dpiY, refreshRateHz);
+
+  std::unique_lock<std::recursive_mutex> lock(mStateMutex);
+
+  mVsyncPeriod = 1000 * 1000 * 1000 / refreshRateHz;
+  mVsyncThread->run("", ANDROID_PRIORITY_URGENT_DISPLAY);
+
+  hwc2_config_t configId = sNextConfigId++;
+
+  Config config(configId);
+  config.setAttribute(HWC2::Attribute::VsyncPeriod, mVsyncPeriod);
+  config.setAttribute(HWC2::Attribute::Width, width);
+  config.setAttribute(HWC2::Attribute::Height, height);
+  config.setAttribute(HWC2::Attribute::DpiX, dpiX * 1000);
+  config.setAttribute(HWC2::Attribute::DpiY, dpiY * 1000);
+  mConfigs.emplace(configId, config);
+
+  mActiveConfigId = configId;
+  mActiveColorMode = HAL_COLOR_MODE_NATIVE;
+  mColorModes.emplace((android_color_mode_t)HAL_COLOR_MODE_NATIVE);
+  mEdid = edid;
+
+  return HWC2::Error::None;
+}
+
+HWC2::Error Display::updateParameters(
+    uint32_t width, uint32_t height, uint32_t dpiX, uint32_t dpiY,
+    uint32_t refreshRateHz, const std::optional<std::vector<uint8_t>>& edid) {
+  DEBUG_LOG("%s updating display:%" PRIu64
+            " width:%d height:%d dpiX:%d dpiY:%d refreshRateHz:%d",
+            __FUNCTION__, mId, width, height, dpiX, dpiY, refreshRateHz);
+
+  std::unique_lock<std::recursive_mutex> lock(mStateMutex);
+
+  mVsyncPeriod = 1000 * 1000 * 1000 / refreshRateHz;
+
+  auto it = mConfigs.find(*mActiveConfigId);
+  if (it == mConfigs.end()) {
+    ALOGE("%s: failed to find config %" PRIu32, __func__, *mActiveConfigId);
+    return HWC2::Error::NoResources;
+  }
+  it->second.setAttribute(HWC2::Attribute::VsyncPeriod, mVsyncPeriod);
+  it->second.setAttribute(HWC2::Attribute::Width, width);
+  it->second.setAttribute(HWC2::Attribute::Height, height);
+  it->second.setAttribute(HWC2::Attribute::DpiX, dpiX * 1000);
+  it->second.setAttribute(HWC2::Attribute::DpiY, dpiY * 1000);
+
+  mEdid = edid;
+
+  return HWC2::Error::None;
+}
+
+Layer* Display::getLayer(hwc2_layer_t layerId) {
+  auto it = mLayers.find(layerId);
+  if (it == mLayers.end()) {
+    ALOGE("%s Unknown layer:%" PRIu64, __FUNCTION__, layerId);
+    return nullptr;
+  }
+
+  return it->second.get();
+}
+
+buffer_handle_t Display::waitAndGetClientTargetBuffer() {
+  DEBUG_LOG("%s: display:%" PRIu64, __FUNCTION__, mId);
+
+  int fence = mClientTarget.getFence();
+  if (fence != -1) {
+    int err = sync_wait(fence, 3000);
+    if (err < 0 && errno == ETIME) {
+      ALOGE("%s waited on fence %" PRId32 " for 3000 ms", __FUNCTION__, fence);
+    }
+    close(fence);
+  }
+
+  return mClientTarget.getBuffer();
+}
+
+HWC2::Error Display::acceptChanges() {
+  DEBUG_LOG("%s: display:%" PRIu64, __FUNCTION__, mId);
+
+  std::unique_lock<std::recursive_mutex> lock(mStateMutex);
+
+  if (!mChanges) {
+    ALOGE("%s: display %" PRIu64 " failed, not validated", __FUNCTION__, mId);
+    return HWC2::Error::NotValidated;
+  }
+
+  for (auto& [layerId, layerCompositionType] : mChanges->getTypeChanges()) {
+    auto* layer = getLayer(layerId);
+    if (layer == nullptr) {
+      ALOGE("%s: display:%" PRIu64 " layer:%" PRIu64
+            " dropped before AcceptChanges?",
+            __FUNCTION__, mId, layerId);
+      continue;
+    }
+
+    layer->setCompositionTypeEnum(layerCompositionType);
+  }
+  mChanges->clearTypeChanges();
+
+  return HWC2::Error::None;
+}
+
+HWC2::Error Display::createLayer(hwc2_layer_t* outLayerId) {
+  DEBUG_LOG("%s: display:%" PRIu64, __FUNCTION__, mId);
+
+  std::unique_lock<std::recursive_mutex> lock(mStateMutex);
+
+  auto layer = std::make_unique<Layer>();
+  auto layerId = layer->getId();
+  DEBUG_LOG("%s created layer:%" PRIu64, __FUNCTION__, layerId);
+
+  *outLayerId = layerId;
+
+  mLayers.emplace(layerId, std::move(layer));
+
+  return HWC2::Error::None;
+}
+
+HWC2::Error Display::destroyLayer(hwc2_layer_t layerId) {
+  DEBUG_LOG("%s destroy layer:%" PRIu64, __FUNCTION__, layerId);
+
+  std::unique_lock<std::recursive_mutex> lock(mStateMutex);
+
+  auto it = mLayers.find(layerId);
+  if (it == mLayers.end()) {
+    ALOGE("%s display:%" PRIu64 " has no such layer:%." PRIu64, __FUNCTION__,
+          mId, layerId);
+    return HWC2::Error::BadLayer;
+  }
+
+  mOrderedLayers.erase(std::remove_if(mOrderedLayers.begin(),  //
+                                      mOrderedLayers.end(),    //
+                                      [layerId](Layer* layer) {
+                                        return layer->getId() == layerId;
+                                      }),
+                       mOrderedLayers.end());
+
+  mLayers.erase(it);
+
+  DEBUG_LOG("%s destroyed layer:%" PRIu64, __FUNCTION__, layerId);
+  return HWC2::Error::None;
+}
+
+HWC2::Error Display::getActiveConfig(hwc2_config_t* outConfig) {
+  DEBUG_LOG("%s: display:%" PRIu64, __FUNCTION__, mId);
+
+  std::unique_lock<std::recursive_mutex> lock(mStateMutex);
+
+  if (!mActiveConfigId) {
+    ALOGW("%s: display:%" PRIu64 " has no active config.", __FUNCTION__, mId);
+    return HWC2::Error::BadConfig;
+  }
+
+  *outConfig = *mActiveConfigId;
+  return HWC2::Error::None;
+}
+
+HWC2::Error Display::getDisplayAttributeEnum(hwc2_config_t configId,
+                                             HWC2::Attribute attribute,
+                                             int32_t* outValue) {
+  auto attributeString = to_string(attribute);
+  DEBUG_LOG("%s: display:%" PRIu64 " attribute:%s", __FUNCTION__, mId,
+            attributeString.c_str());
+
+  std::unique_lock<std::recursive_mutex> lock(mStateMutex);
+
+  auto it = mConfigs.find(configId);
+  if (it == mConfigs.end()) {
+    ALOGW("%s: display:%" PRIu64 "bad config:%" PRIu32, __FUNCTION__, mId,
+          configId);
+    return HWC2::Error::BadConfig;
+  }
+
+  const Config& config = it->second;
+  *outValue = config.getAttribute(attribute);
+  DEBUG_LOG("%s: display:%" PRIu64 " attribute:%s value is %" PRIi32,
+            __FUNCTION__, mId, attributeString.c_str(), *outValue);
+  return HWC2::Error::None;
+}
+
+HWC2::Error Display::getDisplayAttribute(hwc2_config_t configId,
+                                         int32_t attribute, int32_t* outValue) {
+  return getDisplayAttributeEnum(
+      configId, static_cast<HWC2::Attribute>(attribute), outValue);
+}
+
+HWC2::Error Display::getChangedCompositionTypes(uint32_t* outNumElements,
+                                                hwc2_layer_t* outLayers,
+                                                int32_t* outTypes) {
+  DEBUG_LOG("%s: display:%" PRIu64, __FUNCTION__, mId);
+
+  std::unique_lock<std::recursive_mutex> lock(mStateMutex);
+
+  if (!mChanges) {
+    ALOGE("%s: for display:%" PRIu64 " failed, display not validated",
+          __FUNCTION__, mId);
+    return HWC2::Error::NotValidated;
+  }
+
+  if ((outLayers == nullptr) || (outTypes == nullptr)) {
+    *outNumElements = mChanges->getTypeChanges().size();
+    return HWC2::Error::None;
+  }
+
+  uint32_t numWritten = 0;
+  for (const auto& element : mChanges->getTypeChanges()) {
+    if (numWritten == *outNumElements) {
+      break;
+    }
+
+    auto layerId = element.first;
+    const auto layerCompositionType = element.second;
+    const auto layerCompositionTypeString = to_string(layerCompositionType);
+    DEBUG_LOG("%s: display:%" PRIu64 " layer:%" PRIu64 " changed to %s",
+              __FUNCTION__, mId, layerId, layerCompositionTypeString.c_str());
+
+    outLayers[numWritten] = layerId;
+    outTypes[numWritten] = static_cast<int32_t>(layerCompositionType);
+    ++numWritten;
+  }
+  *outNumElements = numWritten;
+  return HWC2::Error::None;
+}
+
+HWC2::Error Display::getColorModes(uint32_t* outNumModes, int32_t* outModes) {
+  DEBUG_LOG("%s: display:%" PRIu64, __FUNCTION__, mId);
+
+  std::unique_lock<std::recursive_mutex> lock(mStateMutex);
+
+  if (!outModes) {
+    *outNumModes = mColorModes.size();
+    return HWC2::Error::None;
+  }
+
+  // we only support HAL_COLOR_MODE_NATIVE so far
+  uint32_t numModes = std::min<uint32_t>(
+      *outNumModes, static_cast<uint32_t>(mColorModes.size()));
+  std::copy_n(mColorModes.cbegin(), numModes, outModes);
+  *outNumModes = numModes;
+  return HWC2::Error::None;
+}
+
+HWC2::Error Display::getConfigs(uint32_t* outNumConfigs,
+                                hwc2_config_t* outConfigs) {
+  DEBUG_LOG("%s: display:%" PRIu64, __FUNCTION__, mId);
+
+  std::unique_lock<std::recursive_mutex> lock(mStateMutex);
+
+  if (!outConfigs) {
+    *outNumConfigs = mConfigs.size();
+    return HWC2::Error::None;
+  }
+
+  uint32_t numWritten = 0;
+  for (const auto& [configId, config] : mConfigs) {
+    if (numWritten == *outNumConfigs) {
+      break;
+    }
+    outConfigs[numWritten] = configId;
+    ++numWritten;
+  }
+
+  *outNumConfigs = numWritten;
+  return HWC2::Error::None;
+}
+
+HWC2::Error Display::getDozeSupport(int32_t* outSupport) {
+  DEBUG_LOG("%s: display:%" PRIu64, __FUNCTION__, mId);
+
+  // We don't support so far
+  *outSupport = 0;
+  return HWC2::Error::None;
+}
+
+HWC2::Error Display::getHdrCapabilities(uint32_t* outNumTypes,
+                                        int32_t* /*outTypes*/,
+                                        float* /*outMaxLuminance*/,
+                                        float* /*outMaxAverageLuminance*/,
+                                        float* /*outMinLuminance*/) {
+  DEBUG_LOG("%s: display:%" PRIu64, __FUNCTION__, mId);
+
+  // We don't support so far
+  *outNumTypes = 0;
+  return HWC2::Error::None;
+}
+
+HWC2::Error Display::getName(uint32_t* outSize, char* outName) {
+  DEBUG_LOG("%s: display:%" PRIu64, __FUNCTION__, mId);
+
+  std::unique_lock<std::recursive_mutex> lock(mStateMutex);
+
+  if (!outName) {
+    *outSize = mName.size();
+    return HWC2::Error::None;
+  }
+  auto numCopied = mName.copy(outName, *outSize);
+  *outSize = numCopied;
+  return HWC2::Error::None;
+}
+
+HWC2::Error Display::addReleaseFenceLocked(int32_t fence) {
+  DEBUG_LOG("%s: display:%" PRIu64 " fence:%d", __FUNCTION__, mId, fence);
+
+  mReleaseFences.push_back(fence);
+  return HWC2::Error::None;
+}
+
+HWC2::Error Display::addReleaseLayerLocked(hwc2_layer_t layerId) {
+  DEBUG_LOG("%s: display:%" PRIu64 " layer:%" PRIu64, __FUNCTION__, mId,
+            layerId);
+
+  mReleaseLayerIds.push_back(layerId);
+  return HWC2::Error::None;
+}
+
+HWC2::Error Display::getReleaseFences(uint32_t* outNumElements,
+                                      hwc2_layer_t* outLayers,
+                                      int32_t* outFences) {
+  DEBUG_LOG("%s: display:%" PRIu64, __FUNCTION__, mId);
+
+  std::unique_lock<std::recursive_mutex> lock(mStateMutex);
+
+  *outNumElements = mReleaseLayerIds.size();
+
+  if (*outNumElements && outLayers) {
+    DEBUG_LOG("%s export release layers", __FUNCTION__);
+    memcpy(outLayers, mReleaseLayerIds.data(),
+           sizeof(hwc2_layer_t) * (*outNumElements));
+  }
+
+  if (*outNumElements && outFences) {
+    DEBUG_LOG("%s export release fences", __FUNCTION__);
+    memcpy(outFences, mReleaseFences.data(),
+           sizeof(int32_t) * (*outNumElements));
+  }
+
+  return HWC2::Error::None;
+}
+
+HWC2::Error Display::clearReleaseFencesAndIdsLocked() {
+  DEBUG_LOG("%s: display:%" PRIu64, __FUNCTION__, mId);
+
+  mReleaseLayerIds.clear();
+  mReleaseFences.clear();
+
+  return HWC2::Error::None;
+}
+
+HWC2::Error Display::getRequests(int32_t* outDisplayRequests,
+                                 uint32_t* outNumElements,
+                                 hwc2_layer_t* outLayers,
+                                 int32_t* outLayerRequests) {
+  DEBUG_LOG("%s: display:%" PRIu64, __FUNCTION__, mId);
+
+  std::unique_lock<std::recursive_mutex> lock(mStateMutex);
+
+  if (!mChanges) {
+    return HWC2::Error::NotValidated;
+  }
+
+  if (outLayers == nullptr || outLayerRequests == nullptr) {
+    *outNumElements = mChanges->getNumLayerRequests();
+    return HWC2::Error::None;
+  }
+
+  // TODO
+  //  Display requests (HWC2::DisplayRequest) are not supported so far:
+  *outDisplayRequests = 0;
+
+  uint32_t numWritten = 0;
+  for (const auto& request : mChanges->getLayerRequests()) {
+    if (numWritten == *outNumElements) {
+      break;
+    }
+    outLayers[numWritten] = request.first;
+    outLayerRequests[numWritten] = static_cast<int32_t>(request.second);
+    ++numWritten;
+  }
+
+  return HWC2::Error::None;
+}
+
+HWC2::Error Display::getType(int32_t* outType) {
+  DEBUG_LOG("%s: display:%" PRIu64, __FUNCTION__, mId);
+
+  std::unique_lock<std::recursive_mutex> lock(mStateMutex);
+
+  *outType = (int32_t)mType;
+  return HWC2::Error::None;
+}
+
+HWC2::Error Display::present(int32_t* outRetireFence) {
+  DEBUG_LOG("%s: display:%" PRIu64, __FUNCTION__, mId);
+
+  *outRetireFence = -1;
+
+  std::unique_lock<std::recursive_mutex> lock(mStateMutex);
+
+  if (!mChanges || (mChanges->getNumTypes() > 0)) {
+    ALOGE("%s: display:%" PRIu64 " failed, not validated", __FUNCTION__, mId);
+    return HWC2::Error::NotValidated;
+  }
+  mChanges.reset();
+
+  if (mComposer == nullptr) {
+    ALOGE("%s: display:%" PRIu64 " missing composer", __FUNCTION__, mId);
+    return HWC2::Error::NoResources;
+  }
+
+  HWC2::Error error = mComposer->presentDisplay(this, outRetireFence);
+  if (error != HWC2::Error::None) {
+    ALOGE("%s: display:%" PRIu64 " failed to present", __FUNCTION__, mId);
+    return error;
+  }
+
+  DEBUG_LOG("%s: display:%" PRIu64 " present done!", __FUNCTION__, mId);
+  return HWC2::Error::None;
+}
+
+HWC2::Error Display::setActiveConfig(hwc2_config_t configId) {
+  DEBUG_LOG("%s: display:%" PRIu64 " setting active config to %" PRIu32,
+            __FUNCTION__, mId, configId);
+
+  std::unique_lock<std::recursive_mutex> lock(mStateMutex);
+
+  if (mConfigs.find(configId) == mConfigs.end()) {
+    ALOGE("%s: display:%" PRIu64 " bad config:%" PRIu32, __FUNCTION__, mId,
+          configId);
+    return HWC2::Error::BadConfig;
+  }
+
+  mActiveConfigId = configId;
+  return HWC2::Error::None;
+}
+
+HWC2::Error Display::setClientTarget(buffer_handle_t target,
+                                     int32_t acquireFence,
+                                     int32_t /*dataspace*/,
+                                     hwc_region_t /*damage*/) {
+  DEBUG_LOG("%s: display:%" PRIu64, __FUNCTION__, mId);
+
+  std::unique_lock<std::recursive_mutex> lock(mStateMutex);
+  mClientTarget.setBuffer(target);
+  mClientTarget.setFence(acquireFence);
+  mComposer->onDisplayClientTargetSet(this);
+  return HWC2::Error::None;
+}
+
+HWC2::Error Display::setColorMode(int32_t intMode) {
+  DEBUG_LOG("%s: display:%" PRIu64 " setting color mode to %" PRId32,
+            __FUNCTION__, mId, intMode);
+
+  auto mode = static_cast<android_color_mode_t>(intMode);
+  if (!IsValidColorMode(mode)) {
+    ALOGE("%s: display:%" PRIu64 " invalid color mode %" PRId32, __FUNCTION__,
+          mId, intMode);
+    return HWC2::Error::BadParameter;
+  }
+
+  std::unique_lock<std::recursive_mutex> lock(mStateMutex);
+
+  if (mColorModes.count(mode) == 0) {
+    ALOGE("%s: display %" PRIu64 " mode %d not found", __FUNCTION__, mId,
+          intMode);
+    return HWC2::Error::Unsupported;
+  }
+  mActiveColorMode = mode;
+  return HWC2::Error::None;
+}
+
+HWC2::Error Display::setColorTransform(const float* /*matrix*/, int32_t hint) {
+  DEBUG_LOG("%s: display:%" PRIu64 " setting hint to %d", __FUNCTION__, mId,
+            hint);
+
+  std::unique_lock<std::recursive_mutex> lock(mStateMutex);
+  // we force client composition if this is set
+  if (hint == 0) {
+    mSetColorTransform = false;
+  } else {
+    mSetColorTransform = true;
+  }
+  return HWC2::Error::None;
+}
+
+HWC2::Error Display::setOutputBuffer(buffer_handle_t /*buffer*/,
+                                     int32_t /*releaseFence*/) {
+  DEBUG_LOG("%s: display:%" PRIu64, __FUNCTION__, mId);
+  // TODO: for virtual display
+  return HWC2::Error::None;
+}
+
+HWC2::Error Display::setPowerMode(int32_t intMode) {
+  auto mode = static_cast<HWC2::PowerMode>(intMode);
+  auto modeString = to_string(mode);
+  DEBUG_LOG("%s: display:%" PRIu64 " setting power mode to %s", __FUNCTION__,
+            mId, modeString.c_str());
+
+  if (!isValidPowerMode(mode)) {
+    return HWC2::Error::BadParameter;
+  }
+
+  if (mode == HWC2::PowerMode::Doze || mode == HWC2::PowerMode::DozeSuspend) {
+    ALOGE("%s display %" PRIu64 " power mode %s not supported", __FUNCTION__,
+          mId, modeString.c_str());
+    return HWC2::Error::Unsupported;
+  }
+
+  std::unique_lock<std::recursive_mutex> lock(mStateMutex);
+
+  mPowerMode = mode;
+  return HWC2::Error::None;
+}
+
+HWC2::Error Display::setVsyncEnabled(int32_t intEnable) {
+  auto enable = static_cast<HWC2::Vsync>(intEnable);
+  auto enableString = to_string(enable);
+  DEBUG_LOG("%s: display:%" PRIu64 " setting vsync to %s", __FUNCTION__, mId,
+            enableString.c_str());
+
+  if (enable == HWC2::Vsync::Invalid) {
+    return HWC2::Error::BadParameter;
+  }
+
+  std::unique_lock<std::recursive_mutex> lock(mStateMutex);
+  DEBUG_LOG("%s: display:%" PRIu64 " setting vsync locked to %s", __FUNCTION__,
+            mId, enableString.c_str());
+
+  mVsyncEnabled = enable;
+  return HWC2::Error::None;
+}
+
+HWC2::Error Display::setVsyncPeriod(uint32_t period) {
+  DEBUG_LOG("%s: display:%" PRIu64 " setting vsync period to %d", __FUNCTION__,
+            mId, period);
+
+  mVsyncPeriod = period;
+  return HWC2::Error::None;
+}
+
+HWC2::Error Display::validate(uint32_t* outNumTypes, uint32_t* outNumRequests) {
+  DEBUG_LOG("%s: display:%" PRIu64, __FUNCTION__, mId);
+
+  std::unique_lock<std::recursive_mutex> lock(mStateMutex);
+
+  mOrderedLayers.clear();
+  mOrderedLayers.reserve(mLayers.size());
+  for (auto& [_, layerPtr] : mLayers) {
+    mOrderedLayers.push_back(layerPtr.get());
+  }
+
+  std::sort(mOrderedLayers.begin(), mOrderedLayers.end(),
+            [](const Layer* layerA, const Layer* layerB) {
+              const auto zA = layerA->getZ();
+              const auto zB = layerB->getZ();
+              if (zA != zB) {
+                return zA < zB;
+              }
+              return layerA->getId() < layerB->getId();
+            });
+
+  if (!mChanges) {
+    mChanges.reset(new Changes);
+  } else {
+    ALOGE("Validate was called more than once!");
+  }
+
+  if (mComposer == nullptr) {
+    ALOGE("%s: display:%" PRIu64 " missing composer", __FUNCTION__, mId);
+    return HWC2::Error::NoResources;
+  }
+
+  std::unordered_map<hwc2_layer_t, HWC2::Composition> changes;
+
+  HWC2::Error error = mComposer->validateDisplay(this, &changes);
+  if (error != HWC2::Error::None) {
+    ALOGE("%s: display:%" PRIu64 " failed to validate", __FUNCTION__, mId);
+    return error;
+  }
+
+  for (const auto& [layerId, changedCompositionType] : changes) {
+    mChanges->addTypeChange(layerId, changedCompositionType);
+  }
+
+  *outNumTypes = mChanges->getNumTypes();
+  *outNumRequests = mChanges->getNumLayerRequests();
+  return *outNumTypes > 0 ? HWC2::Error::HasChanges : HWC2::Error::None;
+}
+
+HWC2::Error Display::updateLayerZ(hwc2_layer_t layerId, uint32_t z) {
+  DEBUG_LOG("%s: display:%" PRIu64 " update layer:%" PRIu64 " z:%d",
+            __FUNCTION__, mId, layerId, z);
+
+  std::unique_lock<std::recursive_mutex> lock(mStateMutex);
+
+  const auto layerIt = mLayers.find(layerId);
+  if (layerIt == mLayers.end()) {
+    ALOGE("%s failed to find layer %" PRIu64, __FUNCTION__, layerId);
+    return HWC2::Error::BadLayer;
+  }
+
+  auto& layer = layerIt->second;
+  layer->setZ(z);
+  return HWC2::Error::None;
+}
+
+HWC2::Error Display::getClientTargetSupport(uint32_t width, uint32_t height,
+                                            int32_t format, int32_t dataspace) {
+  DEBUG_LOG("%s: display:%" PRIu64, __FUNCTION__, mId);
+  std::unique_lock<std::recursive_mutex> lock(mStateMutex);
+
+  if (!mActiveConfigId) {
+    return HWC2::Error::Unsupported;
+  }
+
+  const auto it = mConfigs.find(*mActiveConfigId);
+  if (it == mConfigs.end()) {
+    ALOGE("%s failed to find active config:%" PRIu32, __FUNCTION__,
+          *mActiveConfigId);
+    return HWC2::Error::Unsupported;
+  }
+
+  const Config& activeConfig = it->second;
+  const uint32_t activeConfigWidth =
+      static_cast<uint32_t>(activeConfig.getAttribute(HWC2::Attribute::Width));
+  const uint32_t activeConfigHeight =
+      static_cast<uint32_t>(activeConfig.getAttribute(HWC2::Attribute::Height));
+  if (width == activeConfigWidth && height == activeConfigHeight &&
+      format == HAL_PIXEL_FORMAT_RGBA_8888 &&
+      dataspace == HAL_DATASPACE_UNKNOWN) {
+    return HWC2::Error::None;
+  }
+
+  return HWC2::Error::None;
+}
+
+// thess EDIDs are carefully generated according to the EDID spec version 1.3,
+// more info can be found from the following file:
+//   frameworks/native/services/surfaceflinger/DisplayHardware/DisplayIdentification.cpp
+// approved pnp ids can be found here: https://uefi.org/pnp_id_list
+// pnp id: GGL, name: EMU_display_0, last byte is checksum
+// display id is local:8141603649153536
+static const uint8_t sEDID0[] = {
+    0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x1c, 0xec, 0x01, 0x00,
+    0x01, 0x00, 0x00, 0x00, 0x1b, 0x10, 0x01, 0x03, 0x80, 0x50, 0x2d, 0x78,
+    0x0a, 0x0d, 0xc9, 0xa0, 0x57, 0x47, 0x98, 0x27, 0x12, 0x48, 0x4c, 0x00,
+    0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+    0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x3a, 0x80, 0x18, 0x71, 0x38,
+    0x2d, 0x40, 0x58, 0x2c, 0x45, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0xfc, 0x00, 0x45, 0x4d, 0x55, 0x5f, 0x64, 0x69, 0x73,
+    0x70, 0x6c, 0x61, 0x79, 0x5f, 0x30, 0x00, 0x4b};
+
+// pnp id: GGL, name: EMU_display_1
+// display id is local:8140900251843329
+static const uint8_t sEDID1[] = {
+    0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x1c, 0xec, 0x01, 0x00,
+    0x01, 0x00, 0x00, 0x00, 0x1b, 0x10, 0x01, 0x03, 0x80, 0x50, 0x2d, 0x78,
+    0x0a, 0x0d, 0xc9, 0xa0, 0x57, 0x47, 0x98, 0x27, 0x12, 0x48, 0x4c, 0x00,
+    0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+    0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x3a, 0x80, 0x18, 0x71, 0x38,
+    0x2d, 0x40, 0x58, 0x2c, 0x54, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0xfc, 0x00, 0x45, 0x4d, 0x55, 0x5f, 0x64, 0x69, 0x73,
+    0x70, 0x6c, 0x61, 0x79, 0x5f, 0x31, 0x00, 0x3b};
+
+// pnp id: GGL, name: EMU_display_2
+// display id is local:8140940453066754
+static const uint8_t sEDID2[] = {
+    0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x1c, 0xec, 0x01, 0x00,
+    0x01, 0x00, 0x00, 0x00, 0x1b, 0x10, 0x01, 0x03, 0x80, 0x50, 0x2d, 0x78,
+    0x0a, 0x0d, 0xc9, 0xa0, 0x57, 0x47, 0x98, 0x27, 0x12, 0x48, 0x4c, 0x00,
+    0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+    0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x3a, 0x80, 0x18, 0x71, 0x38,
+    0x2d, 0x40, 0x58, 0x2c, 0x45, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+    0x00, 0x00, 0x00, 0xfc, 0x00, 0x45, 0x4d, 0x55, 0x5f, 0x64, 0x69, 0x73,
+    0x70, 0x6c, 0x61, 0x79, 0x5f, 0x32, 0x00, 0x49};
+
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0]))
+
+HWC2::Error Display::getDisplayIdentificationData(uint8_t* outPort,
+                                                  uint32_t* outDataSize,
+                                                  uint8_t* outData) {
+  DEBUG_LOG("%s: display:%" PRIu64, __FUNCTION__, mId);
+
+  if (outPort == nullptr || outDataSize == nullptr) {
+    return HWC2::Error::BadParameter;
+  }
+
+  if (mEdid) {
+    if (outData) {
+      *outDataSize = std::min<uint32_t>(*outDataSize, (*mEdid).size());
+      memcpy(outData, (*mEdid).data(), *outDataSize);
+    } else {
+      *outDataSize = (*mEdid).size();
+    }
+    *outPort = mId;
+    return HWC2::Error::None;
+  }
+
+  // fallback to legacy EDID implementation
+  uint32_t len = std::min(*outDataSize, (uint32_t)ARRAY_SIZE(sEDID0));
+  if (outData != nullptr && len < (uint32_t)ARRAY_SIZE(sEDID0)) {
+    ALOGW("%s: display:%" PRIu64 " small buffer size: %u is specified",
+          __FUNCTION__, mId, len);
+  }
+  *outDataSize = ARRAY_SIZE(sEDID0);
+  switch (mId) {
+    case 0:
+      *outPort = 0;
+      if (outData) memcpy(outData, sEDID0, len);
+      break;
+
+    case 1:
+      *outPort = 1;
+      if (outData) memcpy(outData, sEDID1, len);
+      break;
+
+    case 2:
+      *outPort = 2;
+      if (outData) memcpy(outData, sEDID2, len);
+      break;
+
+    default:
+      *outPort = (uint8_t)mId;
+      if (outData) {
+        memcpy(outData, sEDID2, len);
+        uint32_t size = ARRAY_SIZE(sEDID0);
+        // change the name to EMU_display_<mID>
+        // note the 3rd char from back is the number, _0, _1, _2, etc.
+        if (len >= size - 2) outData[size - 3] = '0' + (uint8_t)mId;
+        if (len >= size) {
+          // update the last byte, which is checksum byte
+          uint8_t checksum = -(uint8_t)std::accumulate(
+              outData, outData + size - 1, static_cast<uint8_t>(0));
+          outData[size - 1] = checksum;
+        }
+      }
+      break;
+  }
+
+  return HWC2::Error::None;
+}
+
+HWC2::Error Display::getDisplayCapabilities(uint32_t* outNumCapabilities,
+                                            uint32_t* outCapabilities) {
+  DEBUG_LOG("%s: display:%" PRIu64, __FUNCTION__, mId);
+  if (outNumCapabilities == nullptr) {
+    return HWC2::Error::None;
+  }
+
+  bool brightness_support = false;
+  bool doze_support = false;
+
+  uint32_t count = 1 + (doze_support ? 1 : 0) + (brightness_support ? 1 : 0);
+  int index = 0;
+  if (outCapabilities != nullptr && (*outNumCapabilities >= count)) {
+    outCapabilities[index++] =
+        HWC2_DISPLAY_CAPABILITY_SKIP_CLIENT_COLOR_TRANSFORM;
+    if (doze_support) {
+      outCapabilities[index++] = HWC2_DISPLAY_CAPABILITY_DOZE;
+    }
+    if (brightness_support) {
+      outCapabilities[index++] = HWC2_DISPLAY_CAPABILITY_BRIGHTNESS;
+    }
+  }
+
+  *outNumCapabilities = count;
+  return HWC2::Error::None;
+}
+
+HWC2::Error Display::getDisplayBrightnessSupport(bool* out_support) {
+  DEBUG_LOG("%s: display:%" PRIu64, __FUNCTION__, mId);
+
+  *out_support = false;
+  return HWC2::Error::None;
+}
+
+HWC2::Error Display::setDisplayBrightness(float brightness) {
+  DEBUG_LOG("%s: display:%" PRIu64 " brightness %f", __FUNCTION__, mId,
+            brightness);
+
+  ALOGW("TODO: setDisplayBrightness() is not implemented yet: brightness=%f",
+        brightness);
+  return HWC2::Error::Unsupported;
+}
+
+void Display::Config::setAttribute(HWC2::Attribute attribute, int32_t value) {
+  mAttributes[attribute] = value;
+}
+
+int32_t Display::Config::getAttribute(HWC2::Attribute attribute) const {
+  if (mAttributes.count(attribute) == 0) {
+    return -1;
+  }
+  return mAttributes.at(attribute);
+}
+
+std::string Display::Config::toString() const {
+  std::string output;
+
+  auto widthIt = mAttributes.find(HWC2::Attribute::Width);
+  if (widthIt != mAttributes.end()) {
+    output += " w:" + std::to_string(widthIt->second);
+  }
+
+  auto heightIt = mAttributes.find(HWC2::Attribute::Height);
+  if (heightIt != mAttributes.end()) {
+    output += " h:" + std::to_string(heightIt->second);
+  }
+
+  auto vsyncIt = mAttributes.find(HWC2::Attribute::VsyncPeriod);
+  if (vsyncIt != mAttributes.end()) {
+    output += " vsync:" + std::to_string(1e9 / vsyncIt->second);
+  }
+
+  auto dpiXIt = mAttributes.find(HWC2::Attribute::DpiX);
+  if (dpiXIt != mAttributes.end()) {
+    output += " dpi-x:" + std::to_string(dpiXIt->second / 1000.0f);
+  }
+
+  auto dpiYIt = mAttributes.find(HWC2::Attribute::DpiY);
+  if (dpiYIt != mAttributes.end()) {
+    output += " dpi-y:" + std::to_string(dpiYIt->second / 1000.0f);
+  }
+
+  return output;
+}
+
+// VsyncThread function
+bool Display::VsyncThread::threadLoop() {
+  struct timespec rt;
+  if (clock_gettime(CLOCK_MONOTONIC, &rt) == -1) {
+    ALOGE("%s: error in vsync thread clock_gettime: %s", __FUNCTION__,
+          strerror(errno));
+    return true;
+  }
+  const int logInterval = 60;
+  int64_t lastLogged = rt.tv_sec;
+  int sent = 0;
+  int lastSent = 0;
+  bool vsyncEnabled = false;
+
+  struct timespec wait_time;
+  wait_time.tv_sec = 0;
+  wait_time.tv_nsec = mDisplay.mVsyncPeriod;
+  const int64_t kOneRefreshNs = mDisplay.mVsyncPeriod;
+  const int64_t kOneSecondNs = 1000ULL * 1000ULL * 1000ULL;
+  int64_t lastTimeNs = -1;
+  int64_t phasedWaitNs = 0;
+  int64_t currentNs = 0;
+
+  while (true) {
+    clock_gettime(CLOCK_MONOTONIC, &rt);
+    currentNs = rt.tv_nsec + rt.tv_sec * kOneSecondNs;
+
+    if (lastTimeNs < 0) {
+      phasedWaitNs = currentNs + kOneRefreshNs;
+    } else {
+      phasedWaitNs =
+          kOneRefreshNs * ((currentNs - lastTimeNs) / kOneRefreshNs + 1) +
+          lastTimeNs;
+    }
+
+    wait_time.tv_sec = phasedWaitNs / kOneSecondNs;
+    wait_time.tv_nsec = phasedWaitNs - wait_time.tv_sec * kOneSecondNs;
+
+    int ret;
+    do {
+      ret = clock_nanosleep(CLOCK_MONOTONIC, TIMER_ABSTIME, &wait_time, NULL);
+    } while (ret == -1 && errno == EINTR);
+
+    lastTimeNs = phasedWaitNs;
+
+    std::unique_lock<std::recursive_mutex> lock(mDisplay.mStateMutex);
+    vsyncEnabled = (mDisplay.mVsyncEnabled == HWC2::Vsync::Enable);
+    lock.unlock();
+
+    if (!vsyncEnabled) {
+      continue;
+    }
+
+    lock.lock();
+    const auto& callbackInfo =
+        mDisplay.mDevice.mCallbacks[HWC2::Callback::Vsync];
+    auto vsync = reinterpret_cast<HWC2_PFN_VSYNC>(callbackInfo.pointer);
+    lock.unlock();
+
+    if (vsync) {
+      DEBUG_LOG("%s: display:%" PRIu64 " calling vsync", __FUNCTION__,
+                mDisplay.mId);
+      vsync(callbackInfo.data, mDisplay.mId, lastTimeNs);
+    }
+
+    int64_t lastSentInterval = rt.tv_sec - lastLogged;
+    if (lastSentInterval >= logInterval) {
+      DEBUG_LOG("sent %d syncs in %" PRId64 "s", sent - lastSent,
+                lastSentInterval);
+      lastLogged = rt.tv_sec;
+      lastSent = sent;
+    }
+    ++sent;
+  }
+  return false;
+}
+
+}  // namespace android
diff --git a/system/hwc2/Display.h b/system/hwc2/Display.h
new file mode 100644
index 0000000..418e26c
--- /dev/null
+++ b/system/hwc2/Display.h
@@ -0,0 +1,237 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HWC_DISPLAY_H
+#define ANDROID_HWC_DISPLAY_H
+
+#include <utils/Thread.h>
+
+#include <optional>
+#include <set>
+#include <thread>
+#include <unordered_map>
+#include <vector>
+
+#include "Common.h"
+#include "Composer.h"
+#include "FencedBuffer.h"
+#include "Layer.h"
+
+namespace android {
+
+class Composer;
+class Device;
+
+class Display {
+ public:
+  Display(Device& device, Composer* composer, hwc2_display_t id);
+  ~Display();
+
+  Display(const Display& display) = delete;
+  Display& operator=(const Display& display) = delete;
+
+  Display(Display&& display) = delete;
+  Display& operator=(Display&& display) = delete;
+
+  HWC2::Error init(uint32_t width, uint32_t height, uint32_t dpiX,
+                   uint32_t dpiY, uint32_t refreshRateHz,
+                   const std::optional<std::vector<uint8_t>>& edid = std::nullopt);
+
+  HWC2::Error updateParameters(uint32_t width, uint32_t height, uint32_t dpiX,
+                               uint32_t dpiY, uint32_t refreshRateHz,
+                               const std::optional<std::vector<uint8_t>>& edid
+                                   = std::nullopt);
+
+  hwc2_display_t getId() const { return mId; }
+
+  Layer* getLayer(hwc2_layer_t layerHandle);
+
+  FencedBuffer& getClientTarget() { return mClientTarget; }
+  buffer_handle_t waitAndGetClientTargetBuffer();
+
+  const std::vector<Layer*>& getOrderedLayers() { return mOrderedLayers; }
+
+  HWC2::Error acceptChanges();
+  HWC2::Error createLayer(hwc2_layer_t* outLayerId);
+  HWC2::Error destroyLayer(hwc2_layer_t layerId);
+  HWC2::Error getActiveConfig(hwc2_config_t* outConfigId);
+  HWC2::Error getDisplayAttribute(hwc2_config_t configId, int32_t attribute,
+                                  int32_t* outValue);
+  HWC2::Error getDisplayAttributeEnum(hwc2_config_t configId,
+                                      HWC2::Attribute attribute,
+                                      int32_t* outValue);
+  HWC2::Error getChangedCompositionTypes(uint32_t* outNumElements,
+                                         hwc2_layer_t* outLayers,
+                                         int32_t* outTypes);
+  HWC2::Error getColorModes(uint32_t* outNumModes, int32_t* outModes);
+  HWC2::Error getConfigs(uint32_t* outNumConfigs, hwc2_config_t* outConfigIds);
+  HWC2::Error getDozeSupport(int32_t* outSupport);
+  HWC2::Error getHdrCapabilities(uint32_t* outNumTypes, int32_t* outTypes,
+                                 float* outMaxLuminance,
+                                 float* outMaxAverageLuminance,
+                                 float* outMinLuminance);
+  HWC2::Error getName(uint32_t* outSize, char* outName);
+  HWC2::Error addReleaseFenceLocked(int32_t fence);
+  HWC2::Error addReleaseLayerLocked(hwc2_layer_t layerId);
+  HWC2::Error getReleaseFences(uint32_t* outNumElements,
+                               hwc2_layer_t* outLayers, int32_t* outFences);
+  HWC2::Error clearReleaseFencesAndIdsLocked();
+  HWC2::Error getRequests(int32_t* outDisplayRequests, uint32_t* outNumElements,
+                          hwc2_layer_t* outLayers, int32_t* outLayerRequests);
+  HWC2::Error getType(int32_t* outType);
+  HWC2::Error present(int32_t* outRetireFence);
+  HWC2::Error setActiveConfig(hwc2_config_t configId);
+  HWC2::Error setClientTarget(buffer_handle_t target, int32_t acquireFence,
+                              int32_t dataspace, hwc_region_t damage);
+  HWC2::Error setColorMode(int32_t mode);
+  HWC2::Error setColorTransform(const float* matrix, int32_t hint);
+  bool hasColorTransform() const { return mSetColorTransform; }
+  HWC2::Error setOutputBuffer(buffer_handle_t buffer, int32_t releaseFence);
+  HWC2::Error setPowerMode(int32_t mode);
+  HWC2::Error setVsyncEnabled(int32_t enabled);
+  HWC2::Error setVsyncPeriod(uint32_t period);
+  HWC2::Error validate(uint32_t* outNumTypes, uint32_t* outNumRequests);
+  HWC2::Error updateLayerZ(hwc2_layer_t layerId, uint32_t z);
+  HWC2::Error getClientTargetSupport(uint32_t width, uint32_t height,
+                                     int32_t format, int32_t dataspace);
+  HWC2::Error getDisplayIdentificationData(uint8_t* outPort,
+                                           uint32_t* outDataSize,
+                                           uint8_t* outData);
+  HWC2::Error getDisplayCapabilities(uint32_t* outNumCapabilities,
+                                     uint32_t* outCapabilities);
+  HWC2::Error getDisplayBrightnessSupport(bool* out_support);
+  HWC2::Error setDisplayBrightness(float brightness);
+  void lock() { mStateMutex.lock(); }
+  void unlock() { mStateMutex.unlock(); }
+
+ private:
+  class Config {
+   public:
+    Config(hwc2_config_t configId) : mId(configId) {}
+
+    Config(const Config& display) = default;
+    Config& operator=(const Config& display) = default;
+
+    Config(Config&& display) = default;
+    Config& operator=(Config&& display) = default;
+
+    hwc2_config_t getId() const { return mId; }
+    void setId(hwc2_config_t id) { mId = id; }
+
+    int32_t getAttribute(HWC2::Attribute attribute) const;
+    void setAttribute(HWC2::Attribute attribute, int32_t value);
+
+    std::string toString() const;
+
+   private:
+    hwc2_config_t mId;
+    std::unordered_map<HWC2::Attribute, int32_t> mAttributes;
+  };
+
+  // Stores changes requested from the device upon calling prepare().
+  // Handles change request to:
+  //   - Layer composition type.
+  //   - Layer hints.
+  class Changes {
+   public:
+    uint32_t getNumTypes() const {
+      return static_cast<uint32_t>(mTypeChanges.size());
+    }
+
+    uint32_t getNumLayerRequests() const {
+      return static_cast<uint32_t>(mLayerRequests.size());
+    }
+
+    const std::unordered_map<hwc2_layer_t, HWC2::Composition>& getTypeChanges()
+        const {
+      return mTypeChanges;
+    }
+
+    const std::unordered_map<hwc2_layer_t, HWC2::LayerRequest>&
+    getLayerRequests() const {
+      return mLayerRequests;
+    }
+
+    void addTypeChange(hwc2_layer_t layerId, HWC2::Composition type) {
+      mTypeChanges.insert({layerId, type});
+    }
+
+    void clearTypeChanges() { mTypeChanges.clear(); }
+
+    void addLayerRequest(hwc2_layer_t layerId, HWC2::LayerRequest request) {
+      mLayerRequests.insert({layerId, request});
+    }
+
+   private:
+    std::unordered_map<hwc2_layer_t, HWC2::Composition> mTypeChanges;
+    std::unordered_map<hwc2_layer_t, HWC2::LayerRequest> mLayerRequests;
+  };
+
+  // Generate sw vsync signal
+  class VsyncThread : public Thread {
+   public:
+    VsyncThread(Display& display) : mDisplay(display) {}
+    virtual ~VsyncThread() {}
+
+    VsyncThread(const VsyncThread&) = default;
+    VsyncThread& operator=(const VsyncThread&) = default;
+
+    VsyncThread(VsyncThread&&) = default;
+    VsyncThread& operator=(VsyncThread&&) = default;
+
+   private:
+    Display& mDisplay;
+    bool threadLoop() final;
+  };
+
+ private:
+  // The state of this display should only be modified from
+  // SurfaceFlinger's main loop, with the exception of when dump is
+  // called. To prevent a bad state from crashing us during a dump
+  // call, all public calls into Display must acquire this mutex.
+  mutable std::recursive_mutex mStateMutex;
+
+  Device& mDevice;
+  Composer* mComposer = nullptr;
+  const hwc2_display_t mId;
+  std::string mName;
+  HWC2::DisplayType mType = HWC2::DisplayType::Physical;
+  HWC2::PowerMode mPowerMode = HWC2::PowerMode::Off;
+  HWC2::Vsync mVsyncEnabled = HWC2::Vsync::Invalid;
+  uint32_t mVsyncPeriod;
+  sp<VsyncThread> mVsyncThread;
+  FencedBuffer mClientTarget;
+  // Will only be non-null after the Display has been validated and
+  // before it has been presented
+  std::unique_ptr<Changes> mChanges;
+
+  std::unordered_map<hwc2_layer_t, std::unique_ptr<Layer>> mLayers;
+  // Ordered layers available after validate().
+  std::vector<Layer*> mOrderedLayers;
+
+  std::vector<hwc2_display_t> mReleaseLayerIds;
+  std::vector<int32_t> mReleaseFences;
+  std::optional<hwc2_config_t> mActiveConfigId;
+  std::unordered_map<hwc2_config_t, Config> mConfigs;
+  std::set<android_color_mode_t> mColorModes;
+  android_color_mode_t mActiveColorMode;
+  bool mSetColorTransform = false;
+  std::optional<std::vector<uint8_t>> mEdid;
+};
+
+}  // namespace android
+
+#endif
diff --git a/system/hwc2/Drm.cpp b/system/hwc2/Drm.cpp
new file mode 100644
index 0000000..4b27fa2
--- /dev/null
+++ b/system/hwc2/Drm.cpp
@@ -0,0 +1,180 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Drm.h"
+
+#include <drm_fourcc.h>
+#include <log/log.h>
+#include <system/graphics.h>
+
+namespace android {
+
+const char* GetDrmFormatString(uint32_t drm_format) {
+  switch (drm_format) {
+    case DRM_FORMAT_ABGR1555:
+      return "DRM_FORMAT_ABGR1555";
+    case DRM_FORMAT_ABGR2101010:
+      return "DRM_FORMAT_ABGR2101010";
+    case DRM_FORMAT_ABGR4444:
+      return "DRM_FORMAT_ABGR4444";
+    case DRM_FORMAT_ABGR8888:
+      return "DRM_FORMAT_ABGR8888";
+    case DRM_FORMAT_ARGB1555:
+      return "DRM_FORMAT_ARGB1555";
+    case DRM_FORMAT_ARGB2101010:
+      return "DRM_FORMAT_ARGB2101010";
+    case DRM_FORMAT_ARGB4444:
+      return "DRM_FORMAT_ARGB4444";
+    case DRM_FORMAT_ARGB8888:
+      return "DRM_FORMAT_ARGB8888";
+    case DRM_FORMAT_AYUV:
+      return "DRM_FORMAT_AYUV";
+    case DRM_FORMAT_BGR233:
+      return "DRM_FORMAT_BGR233";
+    case DRM_FORMAT_BGR565:
+      return "DRM_FORMAT_BGR565";
+    case DRM_FORMAT_BGR888:
+      return "DRM_FORMAT_BGR888";
+    case DRM_FORMAT_BGRA1010102:
+      return "DRM_FORMAT_BGRA1010102";
+    case DRM_FORMAT_BGRA4444:
+      return "DRM_FORMAT_BGRA4444";
+    case DRM_FORMAT_BGRA5551:
+      return "DRM_FORMAT_BGRA5551";
+    case DRM_FORMAT_BGRA8888:
+      return "DRM_FORMAT_BGRA8888";
+    case DRM_FORMAT_BGRX1010102:
+      return "DRM_FORMAT_BGRX1010102";
+    case DRM_FORMAT_BGRX4444:
+      return "DRM_FORMAT_BGRX4444";
+    case DRM_FORMAT_BGRX5551:
+      return "DRM_FORMAT_BGRX5551";
+    case DRM_FORMAT_BGRX8888:
+      return "DRM_FORMAT_BGRX8888";
+    case DRM_FORMAT_C8:
+      return "DRM_FORMAT_C8";
+    case DRM_FORMAT_GR88:
+      return "DRM_FORMAT_GR88";
+    case DRM_FORMAT_NV12:
+      return "DRM_FORMAT_NV12";
+    case DRM_FORMAT_NV21:
+      return "DRM_FORMAT_NV21";
+    case DRM_FORMAT_R8:
+      return "DRM_FORMAT_R8";
+    case DRM_FORMAT_RG88:
+      return "DRM_FORMAT_RG88";
+    case DRM_FORMAT_RGB332:
+      return "DRM_FORMAT_RGB332";
+    case DRM_FORMAT_RGB565:
+      return "DRM_FORMAT_RGB565";
+    case DRM_FORMAT_RGB888:
+      return "DRM_FORMAT_RGB888";
+    case DRM_FORMAT_RGBA1010102:
+      return "DRM_FORMAT_RGBA1010102";
+    case DRM_FORMAT_RGBA4444:
+      return "DRM_FORMAT_RGBA4444";
+    case DRM_FORMAT_RGBA5551:
+      return "DRM_FORMAT_RGBA5551";
+    case DRM_FORMAT_RGBA8888:
+      return "DRM_FORMAT_RGBA8888";
+    case DRM_FORMAT_RGBX1010102:
+      return "DRM_FORMAT_RGBX1010102";
+    case DRM_FORMAT_RGBX4444:
+      return "DRM_FORMAT_RGBX4444";
+    case DRM_FORMAT_RGBX5551:
+      return "DRM_FORMAT_RGBX5551";
+    case DRM_FORMAT_RGBX8888:
+      return "DRM_FORMAT_RGBX8888";
+    case DRM_FORMAT_UYVY:
+      return "DRM_FORMAT_UYVY";
+    case DRM_FORMAT_VYUY:
+      return "DRM_FORMAT_VYUY";
+    case DRM_FORMAT_XBGR1555:
+      return "DRM_FORMAT_XBGR1555";
+    case DRM_FORMAT_XBGR2101010:
+      return "DRM_FORMAT_XBGR2101010";
+    case DRM_FORMAT_XBGR4444:
+      return "DRM_FORMAT_XBGR4444";
+    case DRM_FORMAT_XBGR8888:
+      return "DRM_FORMAT_XBGR8888";
+    case DRM_FORMAT_XRGB1555:
+      return "DRM_FORMAT_XRGB1555";
+    case DRM_FORMAT_XRGB2101010:
+      return "DRM_FORMAT_XRGB2101010";
+    case DRM_FORMAT_XRGB4444:
+      return "DRM_FORMAT_XRGB4444";
+    case DRM_FORMAT_XRGB8888:
+      return "DRM_FORMAT_XRGB8888";
+    case DRM_FORMAT_YUYV:
+      return "DRM_FORMAT_YUYV";
+    case DRM_FORMAT_YVU420:
+      return "DRM_FORMAT_YVU420";
+    case DRM_FORMAT_YVYU:
+      return "DRM_FORMAT_YVYU";
+  }
+  return "Unknown";
+}
+
+int GetDrmFormatBytesPerPixel(uint32_t drm_format) {
+  switch (drm_format) {
+    case DRM_FORMAT_ABGR8888:
+    case DRM_FORMAT_ARGB8888:
+    case DRM_FORMAT_XBGR8888:
+      return 4;
+    case DRM_FORMAT_BGR888:
+      return 3;
+    case DRM_FORMAT_RGB565:
+    case DRM_FORMAT_YVU420:
+#ifdef GRALLOC_MODULE_API_VERSION_0_2
+    case DRM_FORMAT_FLEX_YCbCr_420_888:
+#endif
+      return 2;
+    case DRM_FORMAT_R8:
+      return 1;
+  }
+  ALOGE("%s: format size unknown %d(%s)", __FUNCTION__, drm_format,
+        GetDrmFormatString(drm_format));
+  return 8;
+}
+
+int GetDrmFormatFromHalFormat(int hal_format) {
+  switch (hal_format) {
+    case HAL_PIXEL_FORMAT_RGBA_FP16:
+      return DRM_FORMAT_ABGR16161616F;
+    case HAL_PIXEL_FORMAT_RGBA_8888:
+      return DRM_FORMAT_ABGR8888;
+    case HAL_PIXEL_FORMAT_RGBX_8888:
+      return DRM_FORMAT_XBGR8888;
+    case HAL_PIXEL_FORMAT_BGRA_8888:
+      return DRM_FORMAT_ARGB8888;
+    case HAL_PIXEL_FORMAT_RGB_888:
+      return DRM_FORMAT_BGR888;
+    case HAL_PIXEL_FORMAT_RGB_565:
+      return DRM_FORMAT_BGR565;
+    case HAL_PIXEL_FORMAT_YV12:
+      return DRM_FORMAT_YVU420;
+    case HAL_PIXEL_FORMAT_YCbCr_420_888:
+      return DRM_FORMAT_YVU420;
+    case HAL_PIXEL_FORMAT_BLOB:
+      return DRM_FORMAT_R8;
+    default:
+      break;
+  }
+  ALOGE("%s unhandled hal format: %d", __FUNCTION__, hal_format);
+  return 0;
+}
+
+}  // namespace android
\ No newline at end of file
diff --git a/system/hwc2/Drm.h b/system/hwc2/Drm.h
new file mode 100644
index 0000000..9bc18b3
--- /dev/null
+++ b/system/hwc2/Drm.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef ANDROID_HWC_DRM_H
+#define ANDROID_HWC_DRM_H
+
+#include <cstdlib>
+
+namespace android {
+
+const char* GetDrmFormatString(uint32_t drm_format);
+
+int GetDrmFormatBytesPerPixel(uint32_t drm_format);
+
+int GetDrmFormatFromHalFormat(int hal_format);
+
+}  // namespace android
+
+#endif
\ No newline at end of file
diff --git a/system/hwc2/DrmPresenter.cpp b/system/hwc2/DrmPresenter.cpp
new file mode 100644
index 0000000..4d63c58
--- /dev/null
+++ b/system/hwc2/DrmPresenter.cpp
@@ -0,0 +1,641 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "DrmPresenter.h"
+
+#include <cros_gralloc_handle.h>
+#include <linux/netlink.h>
+#include <sys/socket.h>
+
+using android::base::guest::AutoReadLock;
+using android::base::guest::AutoWriteLock;
+using android::base::guest::ReadWriteLock;
+
+namespace android {
+
+bool DrmPresenter::init(const HotplugCallback& cb) {
+  DEBUG_LOG("%s", __FUNCTION__);
+
+  mHotplugCallback = cb;
+  mFd = android::base::unique_fd(open("/dev/dri/card0", O_RDWR | O_CLOEXEC));
+  if (mFd < 0) {
+    ALOGE("%s HWC2::Error opening DrmPresenter device: %d", __FUNCTION__,
+          errno);
+    return false;
+  }
+
+  int univRet = drmSetClientCap(mFd.get(), DRM_CLIENT_CAP_UNIVERSAL_PLANES, 1);
+  if (univRet) {
+    ALOGE("%s: fail to set universal plane %d\n", __FUNCTION__, univRet);
+    return false;
+  }
+
+  int atomicRet = drmSetClientCap(mFd.get(), DRM_CLIENT_CAP_ATOMIC, 1);
+  if (atomicRet) {
+    ALOGE("%s: fail to set atomic operation %d, %d\n", __FUNCTION__, atomicRet,
+          errno);
+    return false;
+  }
+
+  {
+    AutoWriteLock lock(mStateMutex);
+    bool initDrmRet = initDrmElementsLocked();
+    if (initDrmRet) {
+      ALOGD("%s: Successfully initialized DRM backend", __FUNCTION__);
+    } else {
+      ALOGE("%s: Failed to initialize DRM backend", __FUNCTION__);
+      return false;
+    }
+  }
+
+  mDrmEventListener = sp<DrmEventListener>::make(*this);
+  if (mDrmEventListener->init()) {
+    ALOGD("%s: Successfully initialized DRM event listener", __FUNCTION__);
+  } else {
+    ALOGE("%s: Failed to initialize DRM event listener", __FUNCTION__);
+  }
+  mDrmEventListener->run("", ANDROID_PRIORITY_URGENT_DISPLAY);
+
+  return true;
+}
+
+bool DrmPresenter::initDrmElementsLocked() {
+  drmModeRes* res;
+  static const int32_t kUmPerInch = 25400;
+
+  res = drmModeGetResources(mFd.get());
+  if (res == nullptr) {
+    ALOGE("%s HWC2::Error reading drm resources: %d", __FUNCTION__, errno);
+    mFd.reset();
+    return false;
+  }
+
+  ALOGD(
+      "drmModeRes count fbs %d crtc %d connector %d encoder %d min w %d max w "
+      "%d min h %d max h %d",
+      res->count_fbs, res->count_crtcs, res->count_connectors,
+      res->count_encoders, res->min_width, res->max_width, res->min_height,
+      res->max_height);
+
+  for (uint32_t i = 0; i < res->count_crtcs; i++) {
+    DrmCrtc crtc = {};
+
+    drmModeCrtcPtr c = drmModeGetCrtc(mFd.get(), res->crtcs[i]);
+    crtc.mId = c->crtc_id;
+
+    drmModeObjectPropertiesPtr crtcProps =
+        drmModeObjectGetProperties(mFd.get(), c->crtc_id, DRM_MODE_OBJECT_CRTC);
+
+    for (uint32_t crtcPropsIndex = 0; crtcPropsIndex < crtcProps->count_props;
+         crtcPropsIndex++) {
+      drmModePropertyPtr crtcProp =
+          drmModeGetProperty(mFd.get(), crtcProps->props[crtcPropsIndex]);
+
+      if (!strcmp(crtcProp->name, "OUT_FENCE_PTR")) {
+        crtc.mFencePropertyId = crtcProp->prop_id;
+      } else if (!strcmp(crtcProp->name, "ACTIVE")) {
+        crtc.mActivePropertyId = crtcProp->prop_id;
+      } else if (!strcmp(crtcProp->name, "MODE_ID")) {
+        crtc.mModePropertyId = crtcProp->prop_id;
+      }
+
+      drmModeFreeProperty(crtcProp);
+    }
+
+    drmModeFreeObjectProperties(crtcProps);
+
+    mCrtcs.push_back(crtc);
+  }
+
+  drmModePlaneResPtr planeRes = drmModeGetPlaneResources(mFd.get());
+  for (uint32_t i = 0; i < planeRes->count_planes; ++i) {
+    DrmPlane plane = {};
+
+    drmModePlanePtr p = drmModeGetPlane(mFd.get(), planeRes->planes[i]);
+    plane.mId = p->plane_id;
+
+    ALOGD(
+        "%s: plane id: %u crtcid %u fbid %u crtc xy %d %d xy %d %d "
+        "possible ctrcs 0x%x",
+        __FUNCTION__, p->plane_id, p->crtc_id, p->fb_id, p->crtc_x, p->crtc_y,
+        p->x, p->y, p->possible_crtcs);
+
+    drmModeObjectPropertiesPtr planeProps =
+        drmModeObjectGetProperties(mFd.get(), plane.mId, DRM_MODE_OBJECT_PLANE);
+
+    for (uint32_t planePropIndex = 0; planePropIndex < planeProps->count_props;
+         ++planePropIndex) {
+      drmModePropertyPtr planeProp =
+          drmModeGetProperty(mFd.get(), planeProps->props[planePropIndex]);
+
+      if (!strcmp(planeProp->name, "CRTC_ID")) {
+        plane.mCrtcPropertyId = planeProp->prop_id;
+      } else if (!strcmp(planeProp->name, "FB_ID")) {
+        plane.mFbPropertyId = planeProp->prop_id;
+      } else if (!strcmp(planeProp->name, "CRTC_X")) {
+        plane.mCrtcXPropertyId = planeProp->prop_id;
+      } else if (!strcmp(planeProp->name, "CRTC_Y")) {
+        plane.mCrtcYPropertyId = planeProp->prop_id;
+      } else if (!strcmp(planeProp->name, "CRTC_W")) {
+        plane.mCrtcWPropertyId = planeProp->prop_id;
+      } else if (!strcmp(planeProp->name, "CRTC_H")) {
+        plane.mCrtcHPropertyId = planeProp->prop_id;
+      } else if (!strcmp(planeProp->name, "SRC_X")) {
+        plane.mSrcXPropertyId = planeProp->prop_id;
+      } else if (!strcmp(planeProp->name, "SRC_Y")) {
+        plane.mSrcYPropertyId = planeProp->prop_id;
+      } else if (!strcmp(planeProp->name, "SRC_W")) {
+        plane.mSrcWPropertyId = planeProp->prop_id;
+      } else if (!strcmp(planeProp->name, "SRC_H")) {
+        plane.mSrcHPropertyId = planeProp->prop_id;
+      } else if (!strcmp(planeProp->name, "type")) {
+        plane.mTypePropertyId = planeProp->prop_id;
+        uint64_t type = planeProp->values[0];
+        switch (type) {
+          case DRM_PLANE_TYPE_OVERLAY:
+            plane.mType = type;
+            ALOGD("%s: plane %" PRIu32 " is DRM_PLANE_TYPE_OVERLAY",
+                  __FUNCTION__, plane.mId);
+            break;
+          case DRM_PLANE_TYPE_PRIMARY:
+            plane.mType = type;
+            ALOGD("%s: plane %" PRIu32 " is DRM_PLANE_TYPE_PRIMARY",
+                  __FUNCTION__, plane.mId);
+            break;
+          default:
+            break;
+        }
+      }
+
+      drmModeFreeProperty(planeProp);
+    }
+
+    drmModeFreeObjectProperties(planeProps);
+
+    bool isPrimaryOrOverlay = plane.mType == DRM_PLANE_TYPE_OVERLAY ||
+                              plane.mType == DRM_PLANE_TYPE_PRIMARY;
+    if (isPrimaryOrOverlay) {
+      for (uint32_t j = 0; j < mCrtcs.size(); j++) {
+        if ((0x1 << j) & p->possible_crtcs) {
+          ALOGD("%s: plane %" PRIu32 " compatible with crtc mask %" PRIu32,
+                __FUNCTION__, plane.mId, p->possible_crtcs);
+          if (mCrtcs[j].mPlaneId == -1) {
+            mCrtcs[j].mPlaneId = plane.mId;
+            ALOGD("%s: plane %" PRIu32 " associated with crtc %" PRIu32,
+                  __FUNCTION__, plane.mId, j);
+            break;
+          }
+        }
+      }
+    }
+
+    drmModeFreePlane(p);
+    mPlanes[plane.mId] = plane;
+  }
+  drmModeFreePlaneResources(planeRes);
+
+  for (uint32_t i = 0; i < res->count_connectors; ++i) {
+    DrmConnector connector = {};
+    connector.mId = res->connectors[i];
+
+    {
+      drmModeObjectPropertiesPtr connectorProps = drmModeObjectGetProperties(
+          mFd.get(), connector.mId, DRM_MODE_OBJECT_CONNECTOR);
+
+      for (uint32_t connectorPropIndex = 0;
+           connectorPropIndex < connectorProps->count_props;
+           ++connectorPropIndex) {
+        drmModePropertyPtr connectorProp = drmModeGetProperty(
+            mFd.get(), connectorProps->props[connectorPropIndex]);
+        if (!strcmp(connectorProp->name, "CRTC_ID")) {
+          connector.mCrtcPropertyId = connectorProp->prop_id;
+        } else if (!strcmp(connectorProp->name, "EDID")) {
+          connector.mEdidBlobId = connectorProps->prop_values[connectorPropIndex];
+        }
+        drmModeFreeProperty(connectorProp);
+      }
+
+      drmModeFreeObjectProperties(connectorProps);
+    }
+    {
+      drmModeConnector* c = drmModeGetConnector(mFd.get(), connector.mId);
+      if (c == nullptr) {
+        ALOGE("%s: Failed to get connector %" PRIu32 ": %d", __FUNCTION__,
+              connector.mId, errno);
+        return false;
+      }
+      connector.connection = c->connection;
+      if (c->count_modes > 0) {
+        memcpy(&connector.mMode, &c->modes[0], sizeof(drmModeModeInfo));
+        drmModeCreatePropertyBlob(mFd.get(), &connector.mMode,
+                                  sizeof(connector.mMode),
+                                  &connector.mModeBlobId);
+
+        // Dots per 1000 inches
+        connector.dpiX =
+            c->mmWidth ? (c->modes[0].hdisplay * kUmPerInch) / (c->mmWidth)
+                       : -1;
+        // Dots per 1000 inches
+        connector.dpiY =
+            c->mmHeight ? (c->modes[0].vdisplay * kUmPerInch) / (c->mmHeight)
+                        : -1;
+      }
+      ALOGD("%s connector %" PRIu32 " dpiX %" PRIi32 " dpiY %" PRIi32
+            " connection %d",
+            __FUNCTION__, connector.mId, connector.dpiX, connector.dpiY,
+            connector.connection);
+
+      drmModeFreeConnector(c);
+
+      connector.mRefreshRateAsFloat =
+          1000.0f * connector.mMode.clock /
+          ((float)connector.mMode.vtotal * (float)connector.mMode.htotal);
+      connector.mRefreshRateAsInteger =
+          (uint32_t)(connector.mRefreshRateAsFloat + 0.5f);
+    }
+
+    mConnectors.push_back(connector);
+  }
+
+  drmModeFreeResources(res);
+  return true;
+}
+
+void DrmPresenter::resetDrmElementsLocked() {
+  for (auto& c : mConnectors) {
+    if (c.mModeBlobId) {
+      if (drmModeDestroyPropertyBlob(mFd.get(), c.mModeBlobId)) {
+        ALOGE("%s: Error destroy PropertyBlob %" PRIu32, __func__,
+              c.mModeBlobId);
+      }
+    }
+  }
+  mConnectors.clear();
+  mCrtcs.clear();
+  mPlanes.clear();
+}
+
+int DrmPresenter::getDrmFB(hwc_drm_bo_t& bo) {
+  int ret = drmPrimeFDToHandle(mFd.get(), bo.prime_fds[0], &bo.gem_handles[0]);
+  if (ret) {
+    ALOGE("%s: drmPrimeFDToHandle failed: %s (errno %d)", __FUNCTION__,
+          strerror(errno), errno);
+    return -1;
+  }
+  ret = drmModeAddFB2(mFd.get(), bo.width, bo.height, bo.format, bo.gem_handles,
+                      bo.pitches, bo.offsets, &bo.fb_id, 0);
+  if (ret) {
+    ALOGE("%s: drmModeAddFB2 failed: %s (errno %d)", __FUNCTION__,
+          strerror(errno), errno);
+    return -1;
+  }
+  return 0;
+}
+
+int DrmPresenter::clearDrmFB(hwc_drm_bo_t& bo) {
+  int ret = 0;
+  if (bo.fb_id) {
+    if (drmModeRmFB(mFd.get(), bo.fb_id)) {
+      ALOGE("%s: drmModeRmFB failed: %s (errno %d)", __FUNCTION__,
+            strerror(errno), errno);
+    }
+    ret = -1;
+  }
+  if (bo.gem_handles[0]) {
+    struct drm_gem_close gem_close = {};
+    gem_close.handle = bo.gem_handles[0];
+    if (drmIoctl(mFd.get(), DRM_IOCTL_GEM_CLOSE, &gem_close)) {
+      ALOGE("%s: DRM_IOCTL_GEM_CLOSE failed: %s (errno %d)", __FUNCTION__,
+            strerror(errno), errno);
+    }
+    ret = -1;
+  }
+  ALOGV("%s: drm FB %d", __FUNCTION__, bo.fb_id);
+  return ret;
+}
+
+bool DrmPresenter::handleHotplug() {
+  std::vector<DrmConnector> oldConnectors(mConnectors);
+  {
+    AutoReadLock lock(mStateMutex);
+    oldConnectors.assign(mConnectors.begin(), mConnectors.end());
+  }
+  {
+    AutoWriteLock lock(mStateMutex);
+    resetDrmElementsLocked();
+    if (!initDrmElementsLocked()) {
+      ALOGE(
+          "%s: failed to initialize drm elements during hotplug. Displays may "
+          "not function correctly!",
+          __FUNCTION__);
+      return false;
+    }
+  }
+
+  AutoReadLock lock(mStateMutex);
+  for (int i = 0; i < mConnectors.size(); i++) {
+    bool changed =
+        oldConnectors[i].dpiX != mConnectors[i].dpiX ||
+        oldConnectors[i].dpiY != mConnectors[i].dpiY ||
+        oldConnectors[i].connection != mConnectors[i].connection ||
+        oldConnectors[i].mMode.hdisplay != mConnectors[i].mMode.hdisplay ||
+        oldConnectors[i].mMode.vdisplay != mConnectors[i].mMode.vdisplay;
+    if (changed) {
+      if (i == 0) {
+        ALOGE(
+            "%s: Ignoring changes to display:0 which is not configurable by "
+            "multi-display interface.",
+            __FUNCTION__);
+        continue;
+      }
+
+      bool connected =
+          mConnectors[i].connection == DRM_MODE_CONNECTED ? true : false;
+      if (mHotplugCallback) {
+        mHotplugCallback(connected, i, mConnectors[i].mMode.hdisplay,
+                         mConnectors[i].mMode.vdisplay, mConnectors[i].dpiX,
+                         mConnectors[i].dpiY,
+                         mConnectors[i].mRefreshRateAsInteger);
+      }
+    }
+  }
+  return true;
+}
+
+HWC2::Error DrmPresenter::flushToDisplay(int display, hwc_drm_bo_t& bo,
+                                         int* outSyncFd) {
+  AutoReadLock lock(mStateMutex);
+
+  DrmConnector& connector = mConnectors[display];
+  DrmCrtc& crtc = mCrtcs[display];
+
+  HWC2::Error error = HWC2::Error::None;
+
+  *outSyncFd = -1;
+
+  drmModeAtomicReqPtr pset = drmModeAtomicAlloc();
+
+  int ret;
+
+  if (!crtc.mDidSetCrtc) {
+    DEBUG_LOG("%s: Setting crtc.\n", __FUNCTION__);
+    ret = drmModeAtomicAddProperty(pset, crtc.mId, crtc.mActivePropertyId, 1);
+    if (ret < 0) {
+      ALOGE("%s:%d: failed %d errno %d\n", __FUNCTION__, __LINE__, ret, errno);
+    }
+    ret = drmModeAtomicAddProperty(pset, crtc.mId, crtc.mModePropertyId,
+                                   connector.mModeBlobId);
+    if (ret < 0) {
+      ALOGE("%s:%d: failed %d errno %d\n", __FUNCTION__, __LINE__, ret, errno);
+    }
+    ret = drmModeAtomicAddProperty(pset, connector.mId,
+                                   connector.mCrtcPropertyId, crtc.mId);
+    if (ret < 0) {
+      ALOGE("%s:%d: failed %d errno %d\n", __FUNCTION__, __LINE__, ret, errno);
+    }
+
+    crtc.mDidSetCrtc = true;
+  } else {
+    DEBUG_LOG("%s: Already set crtc\n", __FUNCTION__);
+  }
+
+  uint64_t outSyncFdUint = (uint64_t)outSyncFd;
+
+  ret = drmModeAtomicAddProperty(pset, crtc.mId, crtc.mFencePropertyId,
+                                 outSyncFdUint);
+  if (ret < 0) {
+    ALOGE("%s:%d: failed %d errno %d\n", __FUNCTION__, __LINE__, ret, errno);
+  }
+
+  if (crtc.mPlaneId == -1) {
+    ALOGE("%s:%d: no plane available for crtc id %" PRIu32, __FUNCTION__,
+          __LINE__, crtc.mId);
+    return HWC2::Error::NoResources;
+  }
+
+  DrmPlane& plane = mPlanes[crtc.mPlaneId];
+
+  DEBUG_LOG("%s: set plane: plane id %d crtc id %d fbid %d bo w h %d %d\n",
+            __FUNCTION__, plane.mId, crtc.mId, bo.fb_id, bo.width, bo.height);
+
+  ret = drmModeAtomicAddProperty(pset, plane.mId, plane.mCrtcPropertyId,
+                                 crtc.mId);
+  if (ret < 0) {
+    ALOGE("%s:%d: failed %d errno %d\n", __FUNCTION__, __LINE__, ret, errno);
+  }
+  ret =
+      drmModeAtomicAddProperty(pset, plane.mId, plane.mFbPropertyId, bo.fb_id);
+  if (ret < 0) {
+    ALOGE("%s:%d: failed %d errno %d\n", __FUNCTION__, __LINE__, ret, errno);
+  }
+  ret = drmModeAtomicAddProperty(pset, plane.mId, plane.mCrtcXPropertyId, 0);
+  if (ret < 0) {
+    ALOGE("%s:%d: failed %d errno %d\n", __FUNCTION__, __LINE__, ret, errno);
+  }
+  ret = drmModeAtomicAddProperty(pset, plane.mId, plane.mCrtcYPropertyId, 0);
+  if (ret < 0) {
+    ALOGE("%s:%d: failed %d errno %d\n", __FUNCTION__, __LINE__, ret, errno);
+  }
+  ret = drmModeAtomicAddProperty(pset, plane.mId, plane.mCrtcWPropertyId,
+                                 bo.width);
+  if (ret < 0) {
+    ALOGE("%s:%d: failed %d errno %d\n", __FUNCTION__, __LINE__, ret, errno);
+  }
+  ret = drmModeAtomicAddProperty(pset, plane.mId, plane.mCrtcHPropertyId,
+                                 bo.height);
+  if (ret < 0) {
+    ALOGE("%s:%d: failed %d errno %d\n", __FUNCTION__, __LINE__, ret, errno);
+  }
+  ret = drmModeAtomicAddProperty(pset, plane.mId, plane.mSrcXPropertyId, 0);
+  if (ret < 0) {
+    ALOGE("%s:%d: failed %d errno %d\n", __FUNCTION__, __LINE__, ret, errno);
+  }
+  ret = drmModeAtomicAddProperty(pset, plane.mId, plane.mSrcYPropertyId, 0);
+  if (ret < 0) {
+    ALOGE("%s:%d: failed %d errno %d\n", __FUNCTION__, __LINE__, ret, errno);
+  }
+  ret = drmModeAtomicAddProperty(pset, plane.mId, plane.mSrcWPropertyId,
+                                 bo.width << 16);
+  if (ret < 0) {
+    ALOGE("%s:%d: failed %d errno %d\n", __FUNCTION__, __LINE__, ret, errno);
+  }
+  ret = drmModeAtomicAddProperty(pset, plane.mId, plane.mSrcHPropertyId,
+                                 bo.height << 16);
+  if (ret < 0) {
+    ALOGE("%s:%d: failed %d errno %d\n", __FUNCTION__, __LINE__, ret, errno);
+  }
+
+  uint32_t flags = DRM_MODE_ATOMIC_ALLOW_MODESET;
+  ret = drmModeAtomicCommit(mFd.get(), pset, flags, 0);
+
+  if (ret) {
+    ALOGE("%s: Atomic commit failed with %d %d\n", __FUNCTION__, ret, errno);
+    error = HWC2::Error::NoResources;
+  }
+
+  if (pset) {
+    drmModeAtomicFree(pset);
+  }
+
+  DEBUG_LOG("%s: out fence: %d\n", __FUNCTION__, *outSyncFd);
+  return error;
+}
+
+std::optional<std::vector<uint8_t>> DrmPresenter::getEdid(uint32_t id) {
+  AutoReadLock lock(mStateMutex);
+
+  if (mConnectors[id].mEdidBlobId == -1) {
+    ALOGW("%s: EDID not supported", __func__);
+    return std::nullopt;
+  }
+  drmModePropertyBlobPtr blob = drmModeGetPropertyBlob(mFd.get(),
+                                                       mConnectors[id].mEdidBlobId);
+  if (!blob) {
+    ALOGE("%s: fail to read EDID from DRM", __func__);
+    return std::nullopt;
+  }
+
+  std::vector<uint8_t> edid;
+  uint8_t* start = static_cast<uint8_t*>(blob->data);
+  edid.insert(edid.begin(), start, start + blob->length);
+
+  drmModeFreePropertyBlob(blob);
+
+  return edid;
+}
+
+DrmBuffer::DrmBuffer(const native_handle_t* handle, DrmPresenter& DrmPresenter)
+    : mDrmPresenter(DrmPresenter), mBo({}) {
+  if (!convertBoInfo(handle)) {
+    mDrmPresenter.getDrmFB(mBo);
+  }
+}
+
+DrmBuffer::~DrmBuffer() { mDrmPresenter.clearDrmFB(mBo); }
+
+int DrmBuffer::convertBoInfo(const native_handle_t* handle) {
+  cros_gralloc_handle* gr_handle = (cros_gralloc_handle*)handle;
+  if (!gr_handle) {
+    ALOGE("%s: Null buffer handle", __FUNCTION__);
+    return -1;
+  }
+  mBo.width = gr_handle->width;
+  mBo.height = gr_handle->height;
+  mBo.hal_format = gr_handle->droid_format;
+  mBo.format = gr_handle->format;
+  mBo.usage = gr_handle->usage;
+  mBo.prime_fds[0] = gr_handle->fds[0];
+  mBo.pitches[0] = gr_handle->strides[0];
+  return 0;
+}
+
+HWC2::Error DrmBuffer::flushToDisplay(int display, int* outFlushDoneSyncFd) {
+  return mDrmPresenter.flushToDisplay(display, mBo, outFlushDoneSyncFd);
+}
+
+DrmPresenter::DrmEventListener::DrmEventListener(DrmPresenter& presenter)
+    : mPresenter(presenter) {}
+
+DrmPresenter::DrmEventListener::~DrmEventListener() {}
+
+bool DrmPresenter::DrmEventListener::init() {
+  mEventFd = android::base::unique_fd(
+      socket(PF_NETLINK, SOCK_DGRAM, NETLINK_KOBJECT_UEVENT));
+  if (!mEventFd.ok()) {
+    ALOGE("Failed to open uevent socket: %s", strerror(errno));
+    return false;
+  }
+  struct sockaddr_nl addr;
+  memset(&addr, 0, sizeof(addr));
+  addr.nl_family = AF_NETLINK;
+  addr.nl_pid = 0;
+  addr.nl_groups = 0xFFFFFFFF;
+
+  int ret = bind(mEventFd, (struct sockaddr*)&addr, sizeof(addr));
+  if (ret) {
+    ALOGE("Failed to bind uevent socket: %s", strerror(errno));
+    return false;
+  }
+
+  FD_ZERO(&mMonitoredFds);
+  FD_SET(mPresenter.mFd.get(), &mMonitoredFds);
+  FD_SET(mEventFd.get(), &mMonitoredFds);
+  mMaxFd = std::max(mPresenter.mFd.get(), mEventFd.get());
+
+  return true;
+}
+
+bool DrmPresenter::DrmEventListener::threadLoop() {
+  int ret;
+  do {
+    ret = select(mMaxFd + 1, &mMonitoredFds, NULL, NULL, NULL);
+  } while (ret == -1 && errno == EINTR);
+
+  // if (FD_ISSET(mPresenter.mFd, &mFds)) {
+  //   TODO: handle drm related events
+  // }
+
+  if (FD_ISSET(mEventFd.get(), &mMonitoredFds)) {
+    eventThreadLoop();
+  }
+  return true;
+}
+
+void DrmPresenter::DrmEventListener::eventThreadLoop() {
+  char buffer[1024];
+  int ret;
+
+  struct timespec ts;
+  uint64_t timestamp = 0;
+  ret = clock_gettime(CLOCK_MONOTONIC, &ts);
+  if (!ret) {
+    timestamp = ts.tv_sec * 1000 * 1000 * 1000 + ts.tv_nsec;
+  } else {
+    ALOGE("Failed to get monotonic clock on hotplug %d", ret);
+  }
+
+  while (true) {
+    ret = read(mEventFd.get(), &buffer, sizeof(buffer));
+    if (ret == 0) {
+      return;
+    } else if (ret < 0) {
+      ALOGE("Got error reading uevent %d", ret);
+      return;
+    }
+
+    bool drmEvent = false, hotplugEvent = false;
+    for (int i = 0; i < ret;) {
+      char* event = buffer + i;
+      if (strcmp(event, "DEVTYPE=drm_minor")) {
+        drmEvent = true;
+      } else if (strcmp(event, "HOTPLUG=1")) {
+        hotplugEvent = true;
+      }
+
+      i += strlen(event) + 1;
+    }
+
+    if (drmEvent && hotplugEvent) {
+      processHotplug(timestamp);
+    }
+  }
+}
+
+void DrmPresenter::DrmEventListener::processHotplug(uint64_t timestamp) {
+  ALOGD("DrmEventListener detected hotplug event %" PRIu64, timestamp);
+  mPresenter.handleHotplug();
+}
+}  // namespace android
diff --git a/system/hwc2/DrmPresenter.h b/system/hwc2/DrmPresenter.h
new file mode 100644
index 0000000..7f9edb2
--- /dev/null
+++ b/system/hwc2/DrmPresenter.h
@@ -0,0 +1,167 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HWC_DRMPRESENTER_H
+#define ANDROID_HWC_DRMPRESENTER_H
+
+#include <android-base/unique_fd.h>
+#include <include/drmhwcgralloc.h>
+#include <utils/Thread.h>
+#include <xf86drm.h>
+#include <xf86drmMode.h>
+
+#include <map>
+#include <memory>
+#include <vector>
+
+#include "Common.h"
+#include "android/base/synchronization/AndroidLock.h"
+
+namespace android {
+
+class DrmBuffer;
+class DrmPresenter;
+
+// A RAII object that will clear a drm framebuffer upon destruction.
+class DrmBuffer {
+ public:
+  DrmBuffer(const native_handle_t* handle, DrmPresenter& drmPresenter);
+  ~DrmBuffer();
+
+  DrmBuffer(const DrmBuffer&) = delete;
+  DrmBuffer& operator=(const DrmBuffer&) = delete;
+
+  DrmBuffer(DrmBuffer&&) = delete;
+  DrmBuffer& operator=(DrmBuffer&&) = delete;
+
+  HWC2::Error flushToDisplay(int display, int* outFlushDoneSyncFd);
+
+ private:
+  int convertBoInfo(const native_handle_t* handle);
+
+  DrmPresenter& mDrmPresenter;
+  hwc_drm_bo_t mBo;
+};
+
+class DrmPresenter {
+ public:
+  DrmPresenter() = default;
+  ~DrmPresenter() = default;
+
+  DrmPresenter(const DrmPresenter&) = delete;
+  DrmPresenter& operator=(const DrmPresenter&) = delete;
+
+  DrmPresenter(DrmPresenter&&) = delete;
+  DrmPresenter& operator=(DrmPresenter&&) = delete;
+
+  using HotplugCallback = std::function<void(
+      bool /*connected*/, uint32_t /*id*/, uint32_t /*width*/,
+      uint32_t /*height*/, uint32_t /*dpiX*/, uint32_t /*dpiY*/,
+      uint32_t /*refreshRate*/)>;
+
+  bool init(const HotplugCallback& cb);
+
+  uint32_t refreshRate() const { return mConnectors[0].mRefreshRateAsInteger; }
+
+  HWC2::Error flushToDisplay(int display, hwc_drm_bo_t& fb, int* outSyncFd);
+
+  std::optional<std::vector<uint8_t>> getEdid(uint32_t id);
+
+ private:
+  // Grant visibility for getDrmFB and clearDrmFB to DrmBuffer.
+  friend class DrmBuffer;
+  int getDrmFB(hwc_drm_bo_t& bo);
+  int clearDrmFB(hwc_drm_bo_t& bo);
+
+  // Grant visibility for handleHotplug to DrmEventListener.
+  bool handleHotplug();
+
+  bool initDrmElementsLocked();
+  void resetDrmElementsLocked();
+
+  // Drm device.
+  android::base::unique_fd mFd;
+
+  HotplugCallback mHotplugCallback;
+
+  // Protects access to the below drm structs.
+  android::base::guest::ReadWriteLock mStateMutex;
+
+  struct DrmPlane {
+    uint32_t mId = -1;
+    uint32_t mCrtcPropertyId = -1;
+    uint32_t mFbPropertyId = -1;
+    uint32_t mCrtcXPropertyId = -1;
+    uint32_t mCrtcYPropertyId = -1;
+    uint32_t mCrtcWPropertyId = -1;
+    uint32_t mCrtcHPropertyId = -1;
+    uint32_t mSrcXPropertyId = -1;
+    uint32_t mSrcYPropertyId = -1;
+    uint32_t mSrcWPropertyId = -1;
+    uint32_t mSrcHPropertyId = -1;
+    uint32_t mTypePropertyId = -1;
+    uint64_t mType = -1;
+  };
+  std::map<uint32_t, DrmPlane> mPlanes;
+
+  struct DrmCrtc {
+    uint32_t mId = -1;
+    uint32_t mActivePropertyId = -1;
+    uint32_t mModePropertyId = -1;
+    uint32_t mFencePropertyId = -1;
+    uint32_t mPlaneId = -1;
+
+    bool mDidSetCrtc = false;
+  };
+  std::vector<DrmCrtc> mCrtcs;
+
+  struct DrmConnector {
+    uint32_t mId = -1;
+    uint32_t mCrtcPropertyId = -1;
+    drmModeModeInfo mMode;
+    int32_t dpiX;
+    int32_t dpiY;
+    drmModeConnection connection;
+    uint32_t mModeBlobId = 0;
+    float mRefreshRateAsFloat;
+    uint32_t mRefreshRateAsInteger;
+    uint64_t mEdidBlobId = -1;
+  };
+  std::vector<DrmConnector> mConnectors;
+
+  class DrmEventListener : public Thread {
+   public:
+    DrmEventListener(DrmPresenter& presenter);
+    virtual ~DrmEventListener();
+
+    bool init();
+
+   private:
+    bool threadLoop() final;
+    void eventThreadLoop();
+    void processHotplug(uint64_t timestamp);
+
+    DrmPresenter& mPresenter;
+    android::base::unique_fd mEventFd;
+    int mMaxFd;
+    fd_set mMonitoredFds;
+  };
+  android::sp<DrmEventListener> mDrmEventListener;
+};
+
+}  // namespace android
+
+#endif
diff --git a/system/hwc2/EmuHWC2.cpp b/system/hwc2/EmuHWC2.cpp
deleted file mode 100644
index 72399c7..0000000
--- a/system/hwc2/EmuHWC2.cpp
+++ /dev/null
@@ -1,1783 +0,0 @@
-/*
- * Copyright 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "EmuHWC2.h"
-//#define LOG_NDEBUG 0
-//#define LOG_NNDEBUG 0
-#undef LOG_TAG
-#define LOG_TAG "EmuHWC2"
-
-#include <errno.h>
-#include <cutils/properties.h>
-#include <log/log.h>
-#include <sync/sync.h>
-
-#include <EGL/egl.h>
-#include <EGL/eglext.h>
-#include <ui/GraphicBuffer.h>
-#include <ui/GraphicBufferAllocator.h>
-
-#include "../egl/goldfish_sync.h"
-
-#include "ThreadInfo.h"
-
-#if defined(LOG_NNDEBUG) && LOG_NNDEBUG == 0
-#define ALOGVV ALOGV
-#else
-#define ALOGVV(...) ((void)0)
-#endif
-
-template <typename PFN, typename T>
-static hwc2_function_pointer_t asFP(T function)
-{
-    static_assert(std::is_same<PFN, T>::value, "Incompatible function pointer");
-    return reinterpret_cast<hwc2_function_pointer_t>(function);
-}
-
-static HostConnection *sHostCon = nullptr;
-
-static HostConnection* createOrGetHostConnection() {
-    if (!sHostCon) {
-        sHostCon = HostConnection::createUnique();
-    }
-    return sHostCon;
-}
-
-#define DEFINE_AND_VALIDATE_HOST_CONNECTION \
-    HostConnection *hostCon = createOrGetHostConnection(); \
-    if (!hostCon) { \
-        ALOGE("EmuHWC2: Failed to get host connection\n"); \
-        return Error::NoResources; \
-    } \
-    ExtendedRCEncoderContext *rcEnc = hostCon->rcEncoder(); \
-    if (!rcEnc) { \
-        ALOGE("EmuHWC2: Failed to get renderControl encoder context\n"); \
-        return Error::NoResources; \
-    }
-
-using namespace HWC2;
-
-namespace android {
-
-EmuHWC2::EmuHWC2()
-  : mStateMutex()
-{
-    common.tag = HARDWARE_DEVICE_TAG;
-    common.version = HWC_DEVICE_API_VERSION_2_0;
-    common.close = closeHook;
-    getCapabilities = getCapabilitiesHook;
-    getFunction = getFunctionHook;
-    populateCapabilities();
-    initDisplayParameters();
-}
-
-Error EmuHWC2::initDisplayParameters() {
-    DEFINE_AND_VALIDATE_HOST_CONNECTION
-    hostCon->lock();
-
-    mDisplayWidth = rcEnc->rcGetFBParam(rcEnc, FB_WIDTH);
-    mDisplayHeight = rcEnc->rcGetFBParam(rcEnc, FB_HEIGHT);
-    mDisplayDpiX = rcEnc->rcGetFBParam(rcEnc, FB_XDPI);
-    mDisplayDpiY = rcEnc->rcGetFBParam(rcEnc, FB_YDPI);
-
-    hostCon->unlock();
-
-    return HWC2::Error::None;
-}
-
-void EmuHWC2::doGetCapabilities(uint32_t* outCount, int32_t* outCapabilities) {
-    if (outCapabilities == nullptr) {
-        *outCount = mCapabilities.size();
-        return;
-    }
-
-    auto capabilityIter = mCapabilities.cbegin();
-    for (size_t i = 0; i < *outCount; ++i) {
-        if (capabilityIter == mCapabilities.cend()) {
-            return;
-        }
-        outCapabilities[i] = static_cast<int32_t>(*capabilityIter);
-        ++capabilityIter;
-    }
-}
-
-hwc2_function_pointer_t EmuHWC2::doGetFunction(
-        FunctionDescriptor descriptor) {
-    switch(descriptor) {
-        case FunctionDescriptor::CreateVirtualDisplay:
-            return asFP<HWC2_PFN_CREATE_VIRTUAL_DISPLAY>(
-                    createVirtualDisplayHook);
-        case FunctionDescriptor::DestroyVirtualDisplay:
-            return asFP<HWC2_PFN_DESTROY_VIRTUAL_DISPLAY>(
-                    destroyVirtualDisplayHook);
-        case FunctionDescriptor::Dump:
-            return asFP<HWC2_PFN_DUMP>(dumpHook);
-        case FunctionDescriptor::GetMaxVirtualDisplayCount:
-            return asFP<HWC2_PFN_GET_MAX_VIRTUAL_DISPLAY_COUNT>(
-                    getMaxVirtualDisplayCountHook);
-        case FunctionDescriptor::RegisterCallback:
-            return asFP<HWC2_PFN_REGISTER_CALLBACK>(registerCallbackHook);
-
-            // Display functions
-        case FunctionDescriptor::AcceptDisplayChanges:
-            return asFP<HWC2_PFN_ACCEPT_DISPLAY_CHANGES>(
-                    displayHook<decltype(&Display::acceptChanges),
-                    &Display::acceptChanges>);
-        case FunctionDescriptor::CreateLayer:
-            return asFP<HWC2_PFN_CREATE_LAYER>(
-                    displayHook<decltype(&Display::createLayer),
-                    &Display::createLayer, hwc2_layer_t*>);
-        case FunctionDescriptor::DestroyLayer:
-            return asFP<HWC2_PFN_DESTROY_LAYER>(
-                    displayHook<decltype(&Display::destroyLayer),
-                    &Display::destroyLayer, hwc2_layer_t>);
-        case FunctionDescriptor::GetActiveConfig:
-            return asFP<HWC2_PFN_GET_ACTIVE_CONFIG>(
-                    displayHook<decltype(&Display::getActiveConfig),
-                    &Display::getActiveConfig, hwc2_config_t*>);
-        case FunctionDescriptor::GetChangedCompositionTypes:
-            return asFP<HWC2_PFN_GET_CHANGED_COMPOSITION_TYPES>(
-                    displayHook<decltype(&Display::getChangedCompositionTypes),
-                    &Display::getChangedCompositionTypes, uint32_t*,
-                    hwc2_layer_t*, int32_t*>);
-        case FunctionDescriptor::GetColorModes:
-            return asFP<HWC2_PFN_GET_COLOR_MODES>(
-                    displayHook<decltype(&Display::getColorModes),
-                    &Display::getColorModes, uint32_t*, int32_t*>);
-        case FunctionDescriptor::GetDisplayAttribute:
-            return asFP<HWC2_PFN_GET_DISPLAY_ATTRIBUTE>(
-                    displayHook<decltype(&Display::getDisplayAttribute),
-                    &Display::getDisplayAttribute, hwc2_config_t,
-                    int32_t, int32_t*>);
-        case FunctionDescriptor::GetDisplayConfigs:
-            return asFP<HWC2_PFN_GET_DISPLAY_CONFIGS>(
-                    displayHook<decltype(&Display::getConfigs),
-                    &Display::getConfigs, uint32_t*, hwc2_config_t*>);
-        case FunctionDescriptor::GetDisplayName:
-            return asFP<HWC2_PFN_GET_DISPLAY_NAME>(
-                    displayHook<decltype(&Display::getName),
-                    &Display::getName, uint32_t*, char*>);
-        case FunctionDescriptor::GetDisplayRequests:
-            return asFP<HWC2_PFN_GET_DISPLAY_REQUESTS>(
-                    displayHook<decltype(&Display::getRequests),
-                    &Display::getRequests, int32_t*, uint32_t*, hwc2_layer_t*,
-                    int32_t*>);
-        case FunctionDescriptor::GetDisplayType:
-            return asFP<HWC2_PFN_GET_DISPLAY_TYPE>(
-                    displayHook<decltype(&Display::getType),
-                    &Display::getType, int32_t*>);
-        case FunctionDescriptor::GetDozeSupport:
-            return asFP<HWC2_PFN_GET_DOZE_SUPPORT>(
-                    displayHook<decltype(&Display::getDozeSupport),
-                    &Display::getDozeSupport, int32_t*>);
-        case FunctionDescriptor::GetHdrCapabilities:
-            return asFP<HWC2_PFN_GET_HDR_CAPABILITIES>(
-                    displayHook<decltype(&Display::getHdrCapabilities),
-                    &Display::getHdrCapabilities, uint32_t*, int32_t*, float*,
-                    float*, float*>);
-        case FunctionDescriptor::GetReleaseFences:
-            return asFP<HWC2_PFN_GET_RELEASE_FENCES>(
-                    displayHook<decltype(&Display::getReleaseFences),
-                    &Display::getReleaseFences, uint32_t*, hwc2_layer_t*,
-                    int32_t*>);
-        case FunctionDescriptor::PresentDisplay:
-            return asFP<HWC2_PFN_PRESENT_DISPLAY>(
-                    displayHook<decltype(&Display::present),
-                    &Display::present, int32_t*>);
-        case FunctionDescriptor::SetActiveConfig:
-            return asFP<HWC2_PFN_SET_ACTIVE_CONFIG>(
-                    displayHook<decltype(&Display::setActiveConfig),
-                    &Display::setActiveConfig, hwc2_config_t>);
-        case FunctionDescriptor::SetClientTarget:
-            return asFP<HWC2_PFN_SET_CLIENT_TARGET>(
-                    displayHook<decltype(&Display::setClientTarget),
-                    &Display::setClientTarget, buffer_handle_t, int32_t,
-                    int32_t, hwc_region_t>);
-        case FunctionDescriptor::SetColorMode:
-            return asFP<HWC2_PFN_SET_COLOR_MODE>(
-                    displayHook<decltype(&Display::setColorMode),
-                    &Display::setColorMode, int32_t>);
-        case FunctionDescriptor::SetColorTransform:
-            return asFP<HWC2_PFN_SET_COLOR_TRANSFORM>(
-                    displayHook<decltype(&Display::setColorTransform),
-                    &Display::setColorTransform, const float*, int32_t>);
-        case FunctionDescriptor::SetOutputBuffer:
-            return asFP<HWC2_PFN_SET_OUTPUT_BUFFER>(
-                    displayHook<decltype(&Display::setOutputBuffer),
-                    &Display::setOutputBuffer, buffer_handle_t, int32_t>);
-        case FunctionDescriptor::SetPowerMode:
-            return asFP<HWC2_PFN_SET_POWER_MODE>(
-                    displayHook<decltype(&Display::setPowerMode),
-                    &Display::setPowerMode, int32_t>);
-        case FunctionDescriptor::SetVsyncEnabled:
-            return asFP<HWC2_PFN_SET_VSYNC_ENABLED>(
-                    displayHook<decltype(&Display::setVsyncEnabled),
-                    &Display::setVsyncEnabled, int32_t>);
-        case FunctionDescriptor::ValidateDisplay:
-            return asFP<HWC2_PFN_VALIDATE_DISPLAY>(
-                    displayHook<decltype(&Display::validate),
-                    &Display::validate, uint32_t*, uint32_t*>);
-        case FunctionDescriptor::GetClientTargetSupport:
-            return asFP<HWC2_PFN_GET_CLIENT_TARGET_SUPPORT>(
-                    displayHook<decltype(&Display::getClientTargetSupport),
-                    &Display::getClientTargetSupport, uint32_t, uint32_t,
-                                                      int32_t, int32_t>);
-        // 2.3 required functions
-        case FunctionDescriptor::GetDisplayIdentificationData:
-            return asFP<HWC2_PFN_GET_DISPLAY_IDENTIFICATION_DATA>(
-                    displayHook<decltype(&Display::getDisplayIdentificationData),
-                    &Display::getDisplayIdentificationData, uint8_t*, uint32_t*, uint8_t*>);
-        case FunctionDescriptor::GetDisplayCapabilities:
-            return asFP<HWC2_PFN_GET_DISPLAY_CAPABILITIES>(
-                    displayHook<decltype(&Display::getDisplayCapabilities),
-                    &Display::getDisplayCapabilities, uint32_t*, uint32_t*>);
-        case FunctionDescriptor::GetDisplayBrightnessSupport:
-            return asFP<HWC2_PFN_GET_DISPLAY_BRIGHTNESS_SUPPORT>(
-                    displayHook<decltype(&Display::getDisplayBrightnessSupport),
-                    &Display::getDisplayBrightnessSupport, bool*>);
-        case FunctionDescriptor::SetDisplayBrightness:
-            return asFP<HWC2_PFN_SET_DISPLAY_BRIGHTNESS>(
-                    displayHook<decltype(&Display::setDisplayBrightness),
-                    &Display::setDisplayBrightness, float>);
-        // Layer functions
-        case FunctionDescriptor::SetCursorPosition:
-            return asFP<HWC2_PFN_SET_CURSOR_POSITION>(
-                    layerHook<decltype(&Layer::setCursorPosition),
-                    &Layer::setCursorPosition, int32_t, int32_t>);
-        case FunctionDescriptor::SetLayerBuffer:
-            return asFP<HWC2_PFN_SET_LAYER_BUFFER>(
-                    layerHook<decltype(&Layer::setBuffer), &Layer::setBuffer,
-                    buffer_handle_t, int32_t>);
-        case FunctionDescriptor::SetLayerSurfaceDamage:
-            return asFP<HWC2_PFN_SET_LAYER_SURFACE_DAMAGE>(
-                    layerHook<decltype(&Layer::setSurfaceDamage),
-                    &Layer::setSurfaceDamage, hwc_region_t>);
-
-        // Layer state functions
-        case FunctionDescriptor::SetLayerBlendMode:
-            return asFP<HWC2_PFN_SET_LAYER_BLEND_MODE>(
-                    layerHook<decltype(&Layer::setBlendMode),
-                    &Layer::setBlendMode, int32_t>);
-        case FunctionDescriptor::SetLayerColor:
-            return asFP<HWC2_PFN_SET_LAYER_COLOR>(
-                    layerHook<decltype(&Layer::setColor), &Layer::setColor,
-                    hwc_color_t>);
-        case FunctionDescriptor::SetLayerCompositionType:
-            return asFP<HWC2_PFN_SET_LAYER_COMPOSITION_TYPE>(
-                    layerHook<decltype(&Layer::setCompositionType),
-                    &Layer::setCompositionType, int32_t>);
-        case FunctionDescriptor::SetLayerDataspace:
-            return asFP<HWC2_PFN_SET_LAYER_DATASPACE>(
-                    layerHook<decltype(&Layer::setDataspace),
-                    &Layer::setDataspace, int32_t>);
-        case FunctionDescriptor::SetLayerDisplayFrame:
-            return asFP<HWC2_PFN_SET_LAYER_DISPLAY_FRAME>(
-                    layerHook<decltype(&Layer::setDisplayFrame),
-                    &Layer::setDisplayFrame, hwc_rect_t>);
-        case FunctionDescriptor::SetLayerPlaneAlpha:
-            return asFP<HWC2_PFN_SET_LAYER_PLANE_ALPHA>(
-                    layerHook<decltype(&Layer::setPlaneAlpha),
-                    &Layer::setPlaneAlpha, float>);
-        case FunctionDescriptor::SetLayerSidebandStream:
-            return asFP<HWC2_PFN_SET_LAYER_SIDEBAND_STREAM>(
-                    layerHook<decltype(&Layer::setSidebandStream),
-                    &Layer::setSidebandStream, const native_handle_t*>);
-        case FunctionDescriptor::SetLayerSourceCrop:
-            return asFP<HWC2_PFN_SET_LAYER_SOURCE_CROP>(
-                    layerHook<decltype(&Layer::setSourceCrop),
-                    &Layer::setSourceCrop, hwc_frect_t>);
-        case FunctionDescriptor::SetLayerTransform:
-            return asFP<HWC2_PFN_SET_LAYER_TRANSFORM>(
-                    layerHook<decltype(&Layer::setTransform),
-                    &Layer::setTransform, int32_t>);
-        case FunctionDescriptor::SetLayerVisibleRegion:
-            return asFP<HWC2_PFN_SET_LAYER_VISIBLE_REGION>(
-                    layerHook<decltype(&Layer::setVisibleRegion),
-                    &Layer::setVisibleRegion, hwc_region_t>);
-        case FunctionDescriptor::SetLayerZOrder:
-            return asFP<HWC2_PFN_SET_LAYER_Z_ORDER>(
-                    displayHook<decltype(&Display::updateLayerZ),
-                    &Display::updateLayerZ, hwc2_layer_t, uint32_t>);
-
-        default:
-            ALOGE("doGetFunction: Unknown function descriptor: %d (%s)",
-                    static_cast<int32_t>(descriptor),
-                    to_string(descriptor).c_str());
-            return nullptr;
-    }
-}
-
-
-// Device functions
-
-Error EmuHWC2::createVirtualDisplay(uint32_t /*width*/, uint32_t /*height*/,
-        int32_t* /*format*/, hwc2_display_t* /*outDisplay*/) {
-    ALOGVV("%s", __FUNCTION__);
-    //TODO: VirtualDisplay support
-    return Error::None;
-}
-
-Error EmuHWC2::destroyVirtualDisplay(hwc2_display_t /*displayId*/) {
-    ALOGVV("%s", __FUNCTION__);
-    //TODO: VirtualDisplay support
-    return Error::None;
-}
-
-void EmuHWC2::dump(uint32_t* /*outSize*/, char* /*outBuffer*/) {
-    ALOGVV("%s", __FUNCTION__);
-    //TODO:
-    return;
-}
-
-uint32_t EmuHWC2::getMaxVirtualDisplayCount() {
-    ALOGVV("%s", __FUNCTION__);
-    //TODO: VirtualDisplay support
-    return 0;
-}
-
-static bool isValid(Callback descriptor) {
-    switch (descriptor) {
-        case Callback::Hotplug: // Fall-through
-        case Callback::Refresh: // Fall-through
-        case Callback::Vsync: return true;
-        default: return false;
-    }
-}
-
-Error EmuHWC2::registerCallback(Callback descriptor,
-        hwc2_callback_data_t callbackData, hwc2_function_pointer_t pointer) {
-    ALOGVV("%s", __FUNCTION__);
-    if (!isValid(descriptor)) {
-        ALOGE("registerCallback: Unkown function descriptor: %d",
-                static_cast<int32_t>(descriptor));
-        return Error::BadParameter;
-    }
-    ALOGV("registerCallback(%s, %p, %p)", to_string(descriptor).c_str(),
-            callbackData, pointer);
-
-    std::unique_lock<std::mutex> lock(mStateMutex);
-
-    if (pointer != nullptr) {
-        mCallbacks[descriptor] = {callbackData, pointer};
-    }
-    else {
-        ALOGV("unregisterCallback(%s)", to_string(descriptor).c_str());
-        mCallbacks.erase(descriptor);
-        return Error::None;
-    }
-
-    // Callback without the state lock held
-    if (descriptor == Callback::Hotplug) {
-        lock.unlock();
-        auto hotplug = reinterpret_cast<HWC2_PFN_VSYNC>(pointer);
-        for (const auto& iter : mDisplays) {
-            hotplug(callbackData, iter.first, static_cast<int32_t>(Connection::Connected));
-        }
-    }
-
-    return Error::None;
-}
-
-const native_handle_t* EmuHWC2::allocateDisplayColorBuffer() {
-    const uint32_t layerCount = 1;
-    const uint64_t graphicBufferId = 0; // not used
-
-    buffer_handle_t h;
-    uint32_t stride;
-
-    if (GraphicBufferAllocator::get().allocate(
-        mDisplayWidth, mDisplayHeight,
-        PIXEL_FORMAT_RGBA_8888,
-        layerCount,
-        (GraphicBuffer::USAGE_HW_COMPOSER | GraphicBuffer::USAGE_HW_RENDER),
-        &h, &stride,
-        graphicBufferId, "EmuHWC2") == OK) {
-        return static_cast<const native_handle_t*>(h);
-    } else {
-        return nullptr;
-    }
-}
-
-void EmuHWC2::freeDisplayColorBuffer(const native_handle_t* h) {
-    GraphicBufferAllocator::get().free(h);
-}
-
-// Display functions
-
-#define VSYNC_PERIOD_PROP "ro.kernel.qemu.vsync"
-
-static int getVsyncPeriodFromProperty() {
-    char displaysValue[PROPERTY_VALUE_MAX] = "";
-    property_get(VSYNC_PERIOD_PROP, displaysValue, "");
-    bool isValid = displaysValue[0] != '\0';
-
-    if (!isValid) return 60;
-
-    long vsyncPeriodParsed = strtol(displaysValue, 0, 10);
-
-    // On failure, strtol returns 0. Also, there's no reason to have 0
-    // as the vsync period.
-    if (!vsyncPeriodParsed) return 60;
-
-    return static_cast<int>(vsyncPeriodParsed);
-}
-
-std::atomic<hwc2_display_t> EmuHWC2::Display::sNextId(0);
-
-EmuHWC2::Display::Display(EmuHWC2& device, DisplayType type)
-  : mDevice(device),
-    mId(sNextId++),
-    mHostDisplayId(0),
-    mName(),
-    mType(type),
-    mPowerMode(PowerMode::Off),
-    mVsyncEnabled(Vsync::Invalid),
-    mVsyncPeriod(1000*1000*1000/getVsyncPeriodFromProperty()), // vsync is 60 hz
-    mVsyncThread(*this),
-    mClientTarget(),
-    mChanges(),
-    mLayers(),
-    mReleaseLayerIds(),
-    mReleaseFences(),
-    mConfigs(),
-    mActiveConfig(nullptr),
-    mColorModes(),
-    mSetColorTransform(false),
-    mStateMutex() {
-        mVsyncThread.run("", -19 /* ANDROID_PRIORITY_URGENT_AUDIO */);
-        mTargetCb = device.allocateDisplayColorBuffer();
-}
-
-EmuHWC2::Display::~Display() {
-    mDevice.freeDisplayColorBuffer(mTargetCb);
-}
-
-Error EmuHWC2::Display::acceptChanges() {
-    ALOGVV("%s: displayId %u", __FUNCTION__, (uint32_t)mId);
-    std::unique_lock<std::mutex> lock(mStateMutex);
-
-    if (!mChanges) {
-        ALOGW("%s: displayId %u acceptChanges failed, not validated",
-              __FUNCTION__, (uint32_t)mId);
-        return Error::NotValidated;
-    }
-
-
-    for (auto& change : mChanges->getTypeChanges()) {
-        auto layerId = change.first;
-        auto type = change.second;
-        if (mDevice.mLayers.count(layerId) == 0) {
-            // This should never happen but somehow does.
-            ALOGW("Cannot accept change for unknown layer %u",
-                  (uint32_t)layerId);
-            continue;
-        }
-        auto layer = mDevice.mLayers[layerId];
-        layer->setCompositionType((int32_t)type);
-    }
-
-    mChanges->clearTypeChanges();
-    return Error::None;
-}
-
-Error EmuHWC2::Display::createLayer(hwc2_layer_t* outLayerId) {
-    ALOGVV("%s", __FUNCTION__);
-    std::unique_lock<std::mutex> lock(mStateMutex);
-
-    auto layer = *mLayers.emplace(std::make_shared<Layer>(*this));
-    mDevice.mLayers.emplace(std::make_pair(layer->getId(), layer));
-    *outLayerId = layer->getId();
-    ALOGV("%s: Display %u created layer %u", __FUNCTION__, (uint32_t)mId,
-         (uint32_t)(*outLayerId));
-    return Error::None;
-}
-
-Error EmuHWC2::Display::destroyLayer(hwc2_layer_t layerId) {
-    ALOGVV("%s", __FUNCTION__);
-    std::unique_lock<std::mutex> lock(mStateMutex);
-
-    const auto mapLayer = mDevice.mLayers.find(layerId);
-    if (mapLayer == mDevice.mLayers.end()) {
-        ALOGW("%s failed: no such layer, displayId %u layerId %u",
-             __FUNCTION__, (uint32_t)mId, (uint32_t)layerId);
-        return Error::BadLayer;
-    }
-    const auto layer = mapLayer->second;
-    mDevice.mLayers.erase(mapLayer);
-    const auto zRange = mLayers.equal_range(layer);
-    for (auto current = zRange.first; current != zRange.second; ++current) {
-        if (**current == *layer) {
-            current = mLayers.erase(current);
-            break;
-        }
-    }
-    ALOGV("%s: displayId %d layerId %d", __FUNCTION__, (uint32_t)mId,
-         (uint32_t)layerId);
-    return Error::None;
-}
-
-Error EmuHWC2::Display::getActiveConfig(hwc2_config_t* outConfig) {
-    ALOGVV("%s", __FUNCTION__);
-    std::unique_lock<std::mutex> lock(mStateMutex);
-
-    if (!mActiveConfig) {
-        ALOGW("%s: displayId %d %s", __FUNCTION__, (uint32_t)mId,
-                to_string(Error::BadConfig).c_str());
-        return Error::BadConfig;
-    }
-    auto configId = mActiveConfig->getId();
-    ALOGV("%s: displayId %d configId %d", __FUNCTION__,
-          (uint32_t)mId, (uint32_t)configId);
-    *outConfig = configId;
-    return Error::None;
-}
-
-Error EmuHWC2::Display::getDisplayAttribute(hwc2_config_t configId,
-        int32_t attribute, int32_t* outValue) {
-    ALOGVV("%s", __FUNCTION__);
-    std::unique_lock<std::mutex> lock(mStateMutex);
-
-    if (configId > mConfigs.size() || !mConfigs[configId]->isOnDisplay(*this)) {
-        ALOGW("%s: bad config (%u %u)", __FUNCTION__, (uint32_t)mId, configId);
-        return Error::BadConfig;
-    }
-    *outValue = mConfigs[configId]->getAttribute((Attribute)attribute);
-    ALOGV("%s: (%d %d) %s --> %d", __FUNCTION__,
-          (uint32_t)mId, (uint32_t)configId,
-          to_string((Attribute)attribute).c_str(), *outValue);
-    return Error::None;
-}
-
-Error EmuHWC2::Display::getChangedCompositionTypes(
-        uint32_t* outNumElements, hwc2_layer_t* outLayers, int32_t* outTypes) {
-    ALOGVV("%s", __FUNCTION__);
-    std::unique_lock<std::mutex> lock(mStateMutex);
-
-    if (!mChanges) {
-        ALOGW("display %u getChangedCompositionTypes failed: not validated",
-                (uint32_t)mId);
-        return Error::NotValidated;
-    }
-
-    if ((outLayers == nullptr) || (outTypes == nullptr)) {
-        *outNumElements = mChanges->getTypeChanges().size();
-        return Error::None;
-    }
-
-    uint32_t numWritten = 0;
-    for (const auto& element : mChanges->getTypeChanges()) {
-        if (numWritten == *outNumElements) {
-            break;
-        }
-        auto layerId = element.first;
-        auto intType = static_cast<int32_t>(element.second);
-        ALOGV("%s: Adding layer %u %s", __FUNCTION__, (uint32_t)layerId,
-                to_string(element.second).c_str());
-        outLayers[numWritten] = layerId;
-        outTypes[numWritten] = intType;
-        ++numWritten;
-    }
-    *outNumElements = numWritten;
-    return Error::None;
-}
-
-Error EmuHWC2::Display::getColorModes(uint32_t* outNumModes,
-        int32_t* outModes) {
-    ALOGVV("%s", __FUNCTION__);
-    std::unique_lock<std::mutex> lock(mStateMutex);
-
-    if (!outModes) {
-        *outNumModes = mColorModes.size();
-        return Error::None;
-    }
-
-    // we only support HAL_COLOR_MODE_NATIVE so far
-    uint32_t numModes = std::min(*outNumModes,
-            static_cast<uint32_t>(mColorModes.size()));
-    std::copy_n(mColorModes.cbegin(), numModes, outModes);
-    *outNumModes = numModes;
-    return Error::None;
-}
-
-Error EmuHWC2::Display::getConfigs(uint32_t* outNumConfigs,
-        hwc2_config_t* outConfigs) {
-    ALOGVV("%s", __FUNCTION__);
-    std::unique_lock<std::mutex> lock(mStateMutex);
-
-    if (!outConfigs) {
-        *outNumConfigs = mConfigs.size();
-        return Error::None;
-    }
-    uint32_t numWritten = 0;
-    for (const auto config : mConfigs) {
-        if (numWritten == *outNumConfigs) {
-            break;
-        }
-        outConfigs[numWritten] = config->getId();
-        ++numWritten;
-    }
-    *outNumConfigs = numWritten;
-    return Error::None;
-}
-
-Error EmuHWC2::Display::getDozeSupport(int32_t* outSupport) {
-    ALOGVV("%s", __FUNCTION__);
-    // We don't support so far
-    *outSupport = 0;
-    return Error::None;
-}
-
-Error EmuHWC2::Display::getHdrCapabilities(uint32_t* outNumTypes,
-        int32_t* /*outTypes*/, float* /*outMaxLuminance*/,
-        float* /*outMaxAverageLuminance*/, float* /*outMinLuminance*/) {
-    ALOGVV("%s", __FUNCTION__);
-    // We don't support so far
-    *outNumTypes = 0;
-    return Error::None;
-}
-
-Error EmuHWC2::Display::getName(uint32_t* outSize, char* outName) {
-    ALOGVV("%s", __FUNCTION__);
-    std::unique_lock<std::mutex> lock(mStateMutex);
-
-    if (!outName) {
-        *outSize = mName.size();
-        return Error::None;
-    }
-    auto numCopied = mName.copy(outName, *outSize);
-    *outSize = numCopied;
-    return Error::None;
-}
-
-Error EmuHWC2::Display::getReleaseFences(uint32_t* outNumElements,
-        hwc2_layer_t* outLayers, int32_t* outFences) {
-    ALOGVV("%s", __FUNCTION__);
-
-    *outNumElements = mReleaseLayerIds.size();
-
-    ALOGVV("%s. Got %u elements", __FUNCTION__, *outNumElements);
-
-    if (*outNumElements && outLayers) {
-        ALOGVV("%s. export release layers", __FUNCTION__);
-        memcpy(outLayers, mReleaseLayerIds.data(),
-               sizeof(hwc2_layer_t) * (*outNumElements));
-    }
-
-    if (*outNumElements && outFences) {
-        ALOGVV("%s. export release fences", __FUNCTION__);
-        memcpy(outFences, mReleaseFences.data(),
-               sizeof(int32_t) * (*outNumElements));
-    }
-
-    return Error::None;
-}
-
-Error EmuHWC2::Display::getRequests(int32_t* outDisplayRequests,
-        uint32_t* outNumElements, hwc2_layer_t* outLayers,
-        int32_t* outLayerRequests) {
-    ALOGVV("%s", __FUNCTION__);
-    std::unique_lock<std::mutex> lock(mStateMutex);
-
-    if (!mChanges) {
-        return Error::NotValidated;
-    }
-
-    if (outLayers == nullptr || outLayerRequests == nullptr) {
-        *outNumElements = mChanges->getNumLayerRequests();
-        return Error::None;
-    }
-
-    //TODO
-    // Display requests (HWC2::DisplayRequest) are not supported so far:
-    *outDisplayRequests = 0;
-
-    uint32_t numWritten = 0;
-    for (const auto& request : mChanges->getLayerRequests()) {
-        if (numWritten == *outNumElements) {
-            break;
-        }
-        outLayers[numWritten] = request.first;
-        outLayerRequests[numWritten] = static_cast<int32_t>(request.second);
-        ++numWritten;
-    }
-
-    return Error::None;
-}
-
-Error EmuHWC2::Display::getType(int32_t* outType) {
-    ALOGVV("%s", __FUNCTION__);
-    std::unique_lock<std::mutex> lock(mStateMutex);
-
-    *outType = (int32_t)mType;
-    return Error::None;
-}
-
-Error EmuHWC2::Display::present(int32_t* outRetireFence) {
-    ALOGVV("%s", __FUNCTION__);
-
-    *outRetireFence = -1;
-
-    std::unique_lock<std::mutex> lock(mStateMutex);
-
-    if (!mChanges || (mChanges->getNumTypes() > 0)) {
-        ALOGE("%s display(%u) set failed: not validated", __FUNCTION__,
-              (uint32_t)mId);
-        return Error::NotValidated;
-    }
-    mChanges.reset();
-
-    DEFINE_AND_VALIDATE_HOST_CONNECTION
-    hostCon->lock();
-    bool hostCompositionV1 = rcEnc->hasHostCompositionV1();
-    bool hostCompositionV2 = rcEnc->hasHostCompositionV2();
-    hostCon->unlock();
-
-    // if we supports v2, then discard v1
-    if (hostCompositionV2) {
-        hostCompositionV1 = false;
-    }
-
-    if (hostCompositionV2 || hostCompositionV1) {
-        uint32_t numLayer = 0;
-        for (auto layer: mLayers) {
-            if (layer->getCompositionType() == Composition::Device ||
-                layer->getCompositionType() == Composition::SolidColor) {
-                numLayer++;
-            }
-        }
-
-        ALOGVV("present %d layers total %u layers",
-              numLayer, (uint32_t)mLayers.size());
-
-        mReleaseLayerIds.clear();
-        mReleaseFences.clear();
-
-        if (numLayer == 0) {
-            ALOGW("No layers, exit, buffer %p", mClientTarget.getBuffer());
-            if (mClientTarget.getBuffer()) {
-                post(hostCon, rcEnc, mClientTarget.getBuffer());
-                *outRetireFence = mClientTarget.getFence();
-            }
-            return Error::None;
-        }
-
-        if (hostCompositionV1) {
-            if (mComposeMsg == nullptr || mComposeMsg->getLayerCnt() < numLayer) {
-                mComposeMsg.reset(new ComposeMsg(numLayer));
-            }
-        } else {
-            if (mComposeMsg_v2 == nullptr || mComposeMsg_v2->getLayerCnt() < numLayer) {
-                mComposeMsg_v2.reset(new ComposeMsg_v2(numLayer));
-            }
-        }
-
-        // Handle the composition
-        ComposeDevice* p;
-        ComposeDevice_v2* p2;
-        ComposeLayer* l;
-
-        if (hostCompositionV1) {
-            p = mComposeMsg->get();
-            l = p->layer;
-        } else {
-            p2 = mComposeMsg_v2->get();
-            l = p2->layer;
-        }
-
-        for (auto layer: mLayers) {
-            if (layer->getCompositionType() != Composition::Device &&
-                layer->getCompositionType() != Composition::SolidColor) {
-                ALOGE("%s: Unsupported composition types %d layer %u",
-                      __FUNCTION__, layer->getCompositionType(),
-                      (uint32_t)layer->getId());
-                continue;
-            }
-            // send layer composition command to host
-            if (layer->getCompositionType() == Composition::Device) {
-                int fence = layer->getLayerBuffer().getFence();
-                mReleaseLayerIds.push_back(layer->getId());
-                if (fence != -1) {
-                    int err = sync_wait(fence, 3000);
-                    if (err < 0 && errno == ETIME) {
-                        ALOGE("%s waited on fence %d for 3000 ms",
-                            __FUNCTION__, fence);
-                    }
-                    close(fence);
-                }
-                else {
-                    ALOGV("%s: acquire fence not set for layer %u",
-                          __FUNCTION__, (uint32_t)layer->getId());
-                }
-                const native_handle_t *cb =
-                    layer->getLayerBuffer().getBuffer();
-                if (cb != nullptr) {
-                    l->cbHandle = hostCon->grallocHelper()->getHostHandle(cb);
-                }
-                else {
-                    ALOGE("%s null buffer for layer %d", __FUNCTION__,
-                          (uint32_t)layer->getId());
-                }
-            }
-            else {
-                // solidcolor has no buffer
-                l->cbHandle = 0;
-            }
-            l->composeMode = (hwc2_composition_t)layer->getCompositionType();
-            l->displayFrame = layer->getDisplayFrame();
-            l->crop = layer->getSourceCrop();
-            l->blendMode = layer->getBlendMode();
-            l->alpha = layer->getPlaneAlpha();
-            l->color = layer->getColor();
-            l->transform = layer->getTransform();
-            ALOGV("   cb %d blendmode %d alpha %f %d %d %d %d z %d"
-                  " composeMode %d, transform %d",
-                  l->cbHandle, l->blendMode, l->alpha,
-                  l->displayFrame.left, l->displayFrame.top,
-                  l->displayFrame.right, l->displayFrame.bottom,
-                  layer->getZ(), l->composeMode, l->transform);
-            l++;
-        }
-        if (hostCompositionV1) {
-            p->version = 1;
-            p->targetHandle = hostCon->grallocHelper()->getHostHandle(mTargetCb);
-            p->numLayers = numLayer;
-        } else {
-            p2->version = 2;
-            p2->displayId = mHostDisplayId;
-            p2->targetHandle = hostCon->grallocHelper()->getHostHandle(mTargetCb);
-            p2->numLayers = numLayer;
-        }
-
-        hostCon->lock();
-        if (hostCompositionV1) {
-            rcEnc->rcCompose(rcEnc,
-                             sizeof(ComposeDevice) + numLayer * sizeof(ComposeLayer),
-                             (void *)p);
-        } else {
-            rcEnc->rcCompose(rcEnc,
-                             sizeof(ComposeDevice_v2) + numLayer * sizeof(ComposeLayer),
-                             (void *)p2);
-        }
-
-        hostCon->unlock();
-
-        // Send a retire fence and use it as the release fence for all layers,
-        // since media expects it
-        EGLint attribs[] = { EGL_SYNC_NATIVE_FENCE_ANDROID, EGL_NO_NATIVE_FENCE_FD_ANDROID };
-
-        uint64_t sync_handle, thread_handle;
-        int retire_fd;
-
-        hostCon->lock();
-        rcEnc->rcCreateSyncKHR(rcEnc, EGL_SYNC_NATIVE_FENCE_ANDROID,
-                attribs, 2 * sizeof(EGLint), true /* destroy when signaled */,
-                &sync_handle, &thread_handle);
-        hostCon->unlock();
-
-        goldfish_sync_queue_work(mSyncDeviceFd,
-                sync_handle, thread_handle, &retire_fd);
-
-        for (size_t i = 0; i < mReleaseLayerIds.size(); ++i) {
-            mReleaseFences.push_back(dup(retire_fd));
-        }
-
-        *outRetireFence = dup(retire_fd);
-        close(retire_fd);
-        hostCon->lock();
-        rcEnc->rcDestroySyncKHR(rcEnc, sync_handle);
-        hostCon->unlock();
-    } else {
-        // we set all layers Composition::Client, so do nothing.
-        post(hostCon, rcEnc, mClientTarget.getBuffer());
-        *outRetireFence = mClientTarget.getFence();
-        ALOGV("%s fallback to post, returns outRetireFence %d",
-              __FUNCTION__, *outRetireFence);
-    }
-
-    return Error::None;
-}
-
-Error EmuHWC2::Display::setActiveConfig(hwc2_config_t configId) {
-    ALOGVV("%s %u", __FUNCTION__, (uint32_t)configId);
-    std::unique_lock<std::mutex> lock(mStateMutex);
-
-    if (configId > mConfigs.size() || !mConfigs[configId]->isOnDisplay(*this)) {
-        ALOGW("%s: bad config (%u %u)", __FUNCTION__, (uint32_t)mId,
-              (uint32_t)configId);
-        return Error::BadConfig;
-    }
-    auto config = mConfigs[configId];
-    if (config == mActiveConfig) {
-        return Error::None;
-    }
-
-    mActiveConfig = config;
-    return Error::None;
-}
-
-Error EmuHWC2::Display::setClientTarget(buffer_handle_t target,
-        int32_t acquireFence, int32_t /*dataspace*/, hwc_region_t /*damage*/) {
-    ALOGVV("%s", __FUNCTION__);
-
-    std::unique_lock<std::mutex> lock(mStateMutex);
-    mClientTarget.setBuffer(target);
-    mClientTarget.setFence(acquireFence);
-    return Error::None;
-}
-
-Error EmuHWC2::Display::setColorMode(int32_t intMode) {
-    ALOGVV("%s %d", __FUNCTION__, intMode);
-    std::unique_lock<std::mutex> lock(mStateMutex);
-
-    auto mode = static_cast<android_color_mode_t>(intMode);
-    ALOGV("%s: (display %u mode %d)", __FUNCTION__, (uint32_t)mId, intMode);
-    if (mode == mActiveColorMode) {
-        return Error::None;
-    }
-    if (mColorModes.count(mode) == 0) {
-        ALOGE("%s: display %d Mode %d not found in mColorModes",
-             __FUNCTION__, (uint32_t)mId, intMode);
-        return Error::Unsupported;
-    }
-    mActiveColorMode = mode;
-    return Error::None;
-}
-
-Error EmuHWC2::Display::setColorTransform(const float* /*matrix*/,
-                                          int32_t hint) {
-    ALOGVV("%s hint %d", __FUNCTION__, hint);
-    std::unique_lock<std::mutex> lock(mStateMutex);
-    //we force client composition if this is set
-    if (hint == 0 ) {
-        mSetColorTransform = false;
-    }
-    else {
-        mSetColorTransform = true;
-    }
-    return Error::None;
-}
-
-Error EmuHWC2::Display::setOutputBuffer(buffer_handle_t /*buffer*/,
-        int32_t /*releaseFence*/) {
-    ALOGVV("%s", __FUNCTION__);
-    //TODO: for virtual display
-    return Error::None;
-}
-
-static bool isValid(PowerMode mode) {
-    switch (mode) {
-        case PowerMode::Off: // Fall-through
-        case PowerMode::DozeSuspend: // Fall-through
-        case PowerMode::Doze: // Fall-through
-        case PowerMode::On: return true;
-        default: return false;
-    }
-}
-
-Error EmuHWC2::Display::setPowerMode(int32_t intMode) {
-    ALOGVV("%s", __FUNCTION__);
-    // Emulator always set screen ON
-    PowerMode mode = static_cast<PowerMode>(intMode);
-    if (!isValid(mode)) {
-        return Error::BadParameter;
-    }
-    if (mode == mPowerMode) {
-        return Error::None;
-    }
-    std::unique_lock<std::mutex> lock(mStateMutex);
-
-    ALOGV("%s: (display %u mode %s)", __FUNCTION__,
-          (uint32_t)mId, to_string(mode).c_str());
-    mPowerMode = mode;
-    return Error::None;
-}
-
-static bool isValid(Vsync enable) {
-    switch (enable) {
-        case Vsync::Enable: // Fall-through
-        case Vsync::Disable: return true;
-        case Vsync::Invalid: return false;
-    }
-}
-
-Error EmuHWC2::Display::setVsyncEnabled(int32_t intEnable) {
-    ALOGVV("%s %d", __FUNCTION__, intEnable);
-    Vsync enable = static_cast<Vsync>(intEnable);
-    if (!isValid(enable)) {
-        return Error::BadParameter;
-    }
-    if (enable == mVsyncEnabled) {
-        return Error::None;
-    }
-
-    std::unique_lock<std::mutex> lock(mStateMutex);
-
-    mVsyncEnabled = enable;
-    return Error::None;
-}
-
-Error EmuHWC2::Display::validate(uint32_t* outNumTypes,
-        uint32_t* outNumRequests) {
-    ALOGVV("%s", __FUNCTION__);
-    std::unique_lock<std::mutex> lock(mStateMutex);
-
-    if (!mChanges) {
-        mChanges.reset(new Changes);
-        DEFINE_AND_VALIDATE_HOST_CONNECTION
-        hostCon->lock();
-        bool hostCompositionV1 = rcEnc->hasHostCompositionV1();
-        bool hostCompositionV2 = rcEnc->hasHostCompositionV2();
-        hostCon->unlock();
-
-        if (hostCompositionV1 || hostCompositionV2) {
-            // Support Device and SolidColor, otherwise, fallback all layers
-            // to Client
-            bool fallBack = false;
-            for (auto& layer : mLayers) {
-                if (layer->getCompositionType() == Composition::Invalid) {
-                    // Log error for unused layers, layer leak?
-                    ALOGE("%s layer %u CompositionType(%d) not set",
-                          __FUNCTION__, (uint32_t)layer->getId(),
-                          layer->getCompositionType());
-                    continue;
-                }
-                if (layer->getCompositionType() == Composition::Client ||
-                    layer->getCompositionType() == Composition::Cursor ||
-                    layer->getCompositionType() == Composition::Sideband) {
-                    ALOGW("%s: layer %u CompositionType %d, fallback", __FUNCTION__,
-                         (uint32_t)layer->getId(), layer->getCompositionType());
-                    fallBack = true;
-                    break;
-                }
-            }
-            if (mSetColorTransform) {
-                fallBack = true;
-            }
-            if (fallBack) {
-                for (auto& layer : mLayers) {
-                    if (layer->getCompositionType() == Composition::Invalid) {
-                        continue;
-                    }
-                    if (layer->getCompositionType() != Composition::Client) {
-                        mChanges->addTypeChange(layer->getId(),
-                                                Composition::Client);
-                    }
-                }
-            }
-       }
-       else {
-            for (auto& layer : mLayers) {
-                if (layer->getCompositionType() != Composition::Client) {
-                    mChanges->addTypeChange(layer->getId(),
-                                            Composition::Client);
-                }
-            }
-        }
-    }
-    else {
-        ALOGE("Validate was called more than once!");
-    }
-
-    *outNumTypes = mChanges->getNumTypes();
-    *outNumRequests = mChanges->getNumLayerRequests();
-    ALOGV("%s: displayId %u types %u, requests %u", __FUNCTION__,
-          (uint32_t)mId, *outNumTypes, *outNumRequests);
-    return *outNumTypes > 0 ? Error::HasChanges : Error::None;
-}
-
-Error EmuHWC2::Display::updateLayerZ(hwc2_layer_t layerId, uint32_t z) {
-    ALOGVV("%s", __FUNCTION__);
-    std::unique_lock<std::mutex> lock(mStateMutex);
-
-    const auto mapLayer = mDevice.mLayers.find(layerId);
-    if (mapLayer == mDevice.mLayers.end()) {
-        ALOGE("%s failed to find layer %u", __FUNCTION__, (uint32_t)mId);
-        return Error::BadLayer;
-    }
-
-    const auto layer = mapLayer->second;
-    const auto zRange = mLayers.equal_range(layer);
-    bool layerOnDisplay = false;
-    for (auto current = zRange.first; current != zRange.second; ++current) {
-        if (**current == *layer) {
-            if ((*current)->getZ() == z) {
-                // Don't change anything if the Z hasn't changed
-                return Error::None;
-            }
-            current = mLayers.erase(current);
-            layerOnDisplay = true;
-            break;
-        }
-    }
-
-    if (!layerOnDisplay) {
-        ALOGE("%s failed to find layer %u on display", __FUNCTION__,
-              (uint32_t)mId);
-        return Error::BadLayer;
-    }
-
-    layer->setZ(z);
-    mLayers.emplace(std::move(layer));
-    return Error::None;
-}
-
-Error EmuHWC2::Display::getClientTargetSupport(uint32_t width, uint32_t height,
-                                      int32_t format, int32_t dataspace){
-    ALOGVV("%s", __FUNCTION__);
-    std::unique_lock<std::mutex> lock(mStateMutex);
-
-    if (mActiveConfig == nullptr) {
-        return Error::Unsupported;
-    }
-
-    if (width == (uint32_t)mActiveConfig->getAttribute(Attribute::Width) &&
-        height == (uint32_t)mActiveConfig->getAttribute(Attribute::Height) &&
-        format == HAL_PIXEL_FORMAT_RGBA_8888 &&
-        dataspace == HAL_DATASPACE_UNKNOWN) {
-        return Error::None;
-    }
-
-    return Error::None;
-}
-
-// thess EDIDs are carefully generated according to the EDID spec version 1.3, more info
-// can be found from the following file:
-//   frameworks/native/services/surfaceflinger/DisplayHardware/DisplayIdentification.cpp
-// approved pnp ids can be found here: https://uefi.org/pnp_id_list
-// pnp id: GGL, name: EMU_display_0, last byte is checksum
-// display id is local:8141603649153536
-static const uint8_t sEDID0[] = {
-    0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x1c, 0xec, 0x01, 0x00, 0x01, 0x00, 0x00, 0x00,
-    0x1b, 0x10, 0x01, 0x03, 0x80, 0x50, 0x2d, 0x78, 0x0a, 0x0d, 0xc9, 0xa0, 0x57, 0x47, 0x98, 0x27,
-    0x12, 0x48, 0x4c, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
-    0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x3a, 0x80, 0x18, 0x71, 0x38, 0x2d, 0x40, 0x58, 0x2c,
-    0x45, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc,
-    0x00, 0x45, 0x4d, 0x55, 0x5f, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x30, 0x00, 0x4b
-};
-
-// pnp id: GGL, name: EMU_display_1
-// display id is local:8140900251843329
-static const uint8_t sEDID1[] = {
-    0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x1c, 0xec, 0x01, 0x00, 0x01, 0x00, 0x00, 0x00,
-    0x1b, 0x10, 0x01, 0x03, 0x80, 0x50, 0x2d, 0x78, 0x0a, 0x0d, 0xc9, 0xa0, 0x57, 0x47, 0x98, 0x27,
-    0x12, 0x48, 0x4c, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
-    0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x3a, 0x80, 0x18, 0x71, 0x38, 0x2d, 0x40, 0x58, 0x2c,
-    0x54, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc,
-    0x00, 0x45, 0x4d, 0x55, 0x5f, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x31, 0x00, 0x3b
-};
-
-// pnp id: GGL, name: EMU_display_2
-// display id is local:8140940453066754
-static const uint8_t sEDID2[] = {
-    0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x1c, 0xec, 0x01, 0x00, 0x01, 0x00, 0x00, 0x00,
-    0x1b, 0x10, 0x01, 0x03, 0x80, 0x50, 0x2d, 0x78, 0x0a, 0x0d, 0xc9, 0xa0, 0x57, 0x47, 0x98, 0x27,
-    0x12, 0x48, 0x4c, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
-    0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x3a, 0x80, 0x18, 0x71, 0x38, 0x2d, 0x40, 0x58, 0x2c,
-    0x45, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-    0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc,
-    0x00, 0x45, 0x4d, 0x55, 0x5f, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x32, 0x00, 0x49
-};
-
-#define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0]))
-
-Error EmuHWC2::Display::getDisplayIdentificationData(uint8_t* outPort,
-        uint32_t* outDataSize, uint8_t* outData) {
-    ALOGVV("%s DisplayId %u", __FUNCTION__, (uint32_t)mId);
-    if (outPort == nullptr || outDataSize == nullptr)
-        return Error::BadParameter;
-
-    uint32_t len = std::min(*outDataSize, (uint32_t)ARRAY_SIZE(sEDID0));
-    if (outData != nullptr && len < (uint32_t)ARRAY_SIZE(sEDID0)) {
-        ALOGW("%s DisplayId %u, small buffer size: %u is specified",
-                __FUNCTION__, (uint32_t)mId, len);
-    }
-    *outDataSize = ARRAY_SIZE(sEDID0);
-    switch (mId) {
-        case 0:
-            *outPort = 0;
-            if (outData)
-                memcpy(outData, sEDID0, len);
-            break;
-
-        case 1:
-            *outPort = 1;
-            if (outData)
-                memcpy(outData, sEDID1, len);
-            break;
-
-        case 2:
-            *outPort = 2;
-            if (outData)
-                memcpy(outData, sEDID2, len);
-            break;
-
-        default:
-            *outPort = (uint8_t)mId;
-            if (outData) {
-                memcpy(outData, sEDID2, len);
-                uint32_t size = ARRAY_SIZE(sEDID0);
-                // change the name to EMU_display_<mID>
-                // note the 3rd char from back is the number, _0, _1, _2, etc.
-                if (len >= size - 2)
-                    outData[size-3] = '0' + (uint8_t)mId;
-                if (len >= size) {
-                    // update the last byte, which is checksum byte
-                    uint8_t checksum = -(uint8_t)std::accumulate(
-                            outData, outData + size - 1, static_cast<uint8_t>(0));
-                    outData[size - 1] = checksum;
-                }
-            }
-            break;
-    }
-
-    return Error::None;
-}
-
-Error EmuHWC2::Display::getDisplayCapabilities(uint32_t* outNumCapabilities,
-        uint32_t* outCapabilities) {
-    if (outNumCapabilities == nullptr) {
-        return Error::None;
-    }
-
-    bool brightness_support = true;
-    bool doze_support = true;
-
-    uint32_t count = 1  + static_cast<uint32_t>(doze_support) + (brightness_support ? 1 : 0);
-    int index = 0;
-    if (outCapabilities != nullptr && (*outNumCapabilities >= count)) {
-        outCapabilities[index++] = HWC2_DISPLAY_CAPABILITY_SKIP_CLIENT_COLOR_TRANSFORM;
-        if (doze_support) {
-            outCapabilities[index++] = HWC2_DISPLAY_CAPABILITY_DOZE;
-        }
-        if (brightness_support) {
-            outCapabilities[index++] = HWC2_DISPLAY_CAPABILITY_BRIGHTNESS;
-       }
-    }
-
-    *outNumCapabilities = count;
-    return Error::None;
-}
-
-Error EmuHWC2::Display::getDisplayBrightnessSupport(bool *out_support) {
-    *out_support = false;
-    return Error::None;
-}
-
-Error EmuHWC2::Display::setDisplayBrightness(float brightness) {
-    ALOGW("TODO: setDisplayBrightness() is not implemented yet: brightness=%f", brightness);
-    return Error::None;
-}
-
-int EmuHWC2::Display::populatePrimaryConfigs(int width, int height, int dpiX, int dpiY) {
-    ALOGVV("%s DisplayId %u", __FUNCTION__, (uint32_t)mId);
-    std::unique_lock<std::mutex> lock(mStateMutex);
-
-    auto newConfig = std::make_shared<Config>(*this);
-    // vsync is 60 hz;
-    newConfig->setAttribute(Attribute::VsyncPeriod, mVsyncPeriod);
-    newConfig->setAttribute(Attribute::Width, width);
-    newConfig->setAttribute(Attribute::Height, height);
-    newConfig->setAttribute(Attribute::DpiX, dpiX * 1000);
-    newConfig->setAttribute(Attribute::DpiY, dpiY * 1000);
-
-    newConfig->setId(static_cast<hwc2_config_t>(mConfigs.size()));
-    ALOGV("Found new config %d: %s", (uint32_t)newConfig->getId(),
-            newConfig->toString().c_str());
-    mConfigs.emplace_back(std::move(newConfig));
-
-    // Only have single config so far, it is activeConfig
-    mActiveConfig = mConfigs[0];
-    mActiveColorMode = HAL_COLOR_MODE_NATIVE;
-    mColorModes.emplace((android_color_mode_t)HAL_COLOR_MODE_NATIVE);
-
-    mSyncDeviceFd = goldfish_sync_open();
-
-    return 0;
-}
-
-void EmuHWC2::Display::post(HostConnection *hostCon,
-                            ExtendedRCEncoderContext *rcEnc,
-                            buffer_handle_t h) {
-    assert(cb && "native_handle_t::from(h) failed");
-
-    hostCon->lock();
-    rcEnc->rcFBPost(rcEnc, hostCon->grallocHelper()->getHostHandle(h));
-    hostCon->flush();
-    hostCon->unlock();
-}
-
-HWC2::Error EmuHWC2::Display::populateSecondaryConfigs(uint32_t width, uint32_t height,
-        uint32_t dpi, uint32_t idx) {
-    ALOGVV("%s DisplayId %u, width %u, height %u, dpi %u",
-            __FUNCTION__, (uint32_t)mId, width, height, dpi);
-    std::unique_lock<std::mutex> lock(mStateMutex);
-
-    auto newConfig = std::make_shared<Config>(*this);
-    // vsync is 60 hz;
-    newConfig->setAttribute(Attribute::VsyncPeriod, mVsyncPeriod);
-    newConfig->setAttribute(Attribute::Width, width);
-    newConfig->setAttribute(Attribute::Height, height);
-    newConfig->setAttribute(Attribute::DpiX, dpi*1000);
-    newConfig->setAttribute(Attribute::DpiY, dpi*1000);
-
-    newConfig->setId(static_cast<hwc2_config_t>(mConfigs.size()));
-    ALOGV("Found new secondary config %d: %s", (uint32_t)newConfig->getId(),
-            newConfig->toString().c_str());
-    mConfigs.emplace_back(std::move(newConfig));
-
-    // we need to reset these values after populatePrimaryConfigs()
-    mActiveConfig = mConfigs[0];
-    mActiveColorMode = HAL_COLOR_MODE_NATIVE;
-    mColorModes.emplace((android_color_mode_t)HAL_COLOR_MODE_NATIVE);
-
-    uint32_t displayId = hostDisplayIdStart + idx;
-    DEFINE_AND_VALIDATE_HOST_CONNECTION
-
-    hostCon->lock();
-    rcEnc->rcDestroyDisplay(rcEnc, displayId);
-    rcEnc->rcCreateDisplay(rcEnc, &displayId);
-    rcEnc->rcSetDisplayPose(rcEnc, displayId, -1, -1, width, height);
-    hostCon->unlock();
-
-    if (displayId != hostDisplayIdStart + idx) {
-        ALOGE("Something wrong with host displayId allocation, want %d "
-              "allocated %d", hostDisplayIdStart + idx, displayId);
-    }
-    mHostDisplayId = displayId;
-    ALOGVV("%s: mHostDisplayId=%d", __FUNCTION__, mHostDisplayId);
-
-    return HWC2::Error::None;
-}
-
-
-// Config functions
-
-void EmuHWC2::Display::Config::setAttribute(Attribute attribute,
-       int32_t value) {
-    mAttributes[attribute] = value;
-}
-
-int32_t EmuHWC2::Display::Config::getAttribute(Attribute attribute) const {
-    if (mAttributes.count(attribute) == 0) {
-        return -1;
-    }
-    return mAttributes.at(attribute);
-}
-
-std::string EmuHWC2::Display::Config::toString() const {
-    std::string output;
-
-    const size_t BUFFER_SIZE = 100;
-    char buffer[BUFFER_SIZE] = {};
-    auto writtenBytes = snprintf(buffer, BUFFER_SIZE,
-            "%u x %u", mAttributes.at(HWC2::Attribute::Width),
-            mAttributes.at(HWC2::Attribute::Height));
-    output.append(buffer, writtenBytes);
-
-    if (mAttributes.count(HWC2::Attribute::VsyncPeriod) != 0) {
-        std::memset(buffer, 0, BUFFER_SIZE);
-        writtenBytes = snprintf(buffer, BUFFER_SIZE, " @ %.1f Hz",
-                1e9 / mAttributes.at(HWC2::Attribute::VsyncPeriod));
-        output.append(buffer, writtenBytes);
-    }
-
-    if (mAttributes.count(HWC2::Attribute::DpiX) != 0 &&
-            mAttributes.at(HWC2::Attribute::DpiX) != -1) {
-        std::memset(buffer, 0, BUFFER_SIZE);
-        writtenBytes = snprintf(buffer, BUFFER_SIZE,
-                ", DPI: %.1f x %.1f",
-                mAttributes.at(HWC2::Attribute::DpiX) / 1000.0f,
-                mAttributes.at(HWC2::Attribute::DpiY) / 1000.0f);
-        output.append(buffer, writtenBytes);
-    }
-
-    return output;
-}
-
-// VsyncThread function
-bool EmuHWC2::Display::VsyncThread::threadLoop() {
-    struct timespec rt;
-    if (clock_gettime(CLOCK_MONOTONIC, &rt) == -1) {
-        ALOGE("%s: error in vsync thread clock_gettime: %s",
-              __FUNCTION__, strerror(errno));
-        return true;
-    }
-    const int logInterval = 60;
-    int64_t lastLogged = rt.tv_sec;
-    int sent = 0;
-    int lastSent = 0;
-    bool vsyncEnabled = false;
-
-    struct timespec wait_time;
-    wait_time.tv_sec = 0;
-    wait_time.tv_nsec = mDisplay.mVsyncPeriod;
-    const int64_t kOneRefreshNs = mDisplay.mVsyncPeriod;
-    const int64_t kOneSecondNs = 1000ULL * 1000ULL * 1000ULL;
-    int64_t lastTimeNs = -1;
-    int64_t phasedWaitNs = 0;
-    int64_t currentNs = 0;
-
-    while (true) {
-        clock_gettime(CLOCK_MONOTONIC, &rt);
-        currentNs = rt.tv_nsec + rt.tv_sec * kOneSecondNs;
-
-        if (lastTimeNs < 0) {
-            phasedWaitNs = currentNs + kOneRefreshNs;
-        } else {
-            phasedWaitNs = kOneRefreshNs *
-                (( currentNs - lastTimeNs) / kOneRefreshNs + 1) +
-                lastTimeNs;
-        }
-
-        wait_time.tv_sec = phasedWaitNs / kOneSecondNs;
-        wait_time.tv_nsec = phasedWaitNs - wait_time.tv_sec * kOneSecondNs;
-
-        int ret;
-        do {
-            ret = clock_nanosleep(CLOCK_MONOTONIC, TIMER_ABSTIME, &wait_time, NULL);
-        } while (ret == -1 && errno == EINTR);
-
-        lastTimeNs = phasedWaitNs;
-
-        std::unique_lock<std::mutex> lock(mDisplay.mStateMutex);
-        vsyncEnabled = (mDisplay.mVsyncEnabled == Vsync::Enable);
-        lock.unlock();
-
-        if (!vsyncEnabled) {
-            continue;
-        }
-
-        lock.lock();
-        const auto& callbackInfo = mDisplay.mDevice.mCallbacks[Callback::Vsync];
-        auto vsync = reinterpret_cast<HWC2_PFN_VSYNC>(callbackInfo.pointer);
-        lock.unlock();
-
-        if (vsync) {
-            vsync(callbackInfo.data, mDisplay.mId, lastTimeNs);
-        }
-
-        if (rt.tv_sec - lastLogged >= logInterval) {
-            ALOGVV("sent %d syncs in %ds", sent - lastSent, rt.tv_sec - lastLogged);
-            lastLogged = rt.tv_sec;
-            lastSent = sent;
-        }
-        ++sent;
-    }
-    return false;
-}
-
-
-// Layer functions
-bool EmuHWC2::SortLayersByZ::operator()(const std::shared_ptr<Layer>& lhs,
-        const std::shared_ptr<Layer>& rhs) const {
-    return lhs->getZ() < rhs->getZ();
-}
-
-std::atomic<hwc2_layer_t> EmuHWC2::Layer::sNextId(1);
-
-EmuHWC2::Layer::Layer(Display& display)
-  : mId(sNextId++),
-    mDisplay(display),
-    mBuffer(),
-    mSurfaceDamage(),
-    mBlendMode(BlendMode::None),
-    mColor({0, 0, 0, 0}),
-    mCompositionType(Composition::Invalid),
-    mDisplayFrame({0, 0, -1, -1}),
-    mPlaneAlpha(0.0f),
-    mSidebandStream(nullptr),
-    mSourceCrop({0.0f, 0.0f, -1.0f, -1.0f}),
-    mTransform(Transform::None),
-    mVisibleRegion(),
-    mZ(0)
-    {}
-
-Error EmuHWC2::Layer::setBuffer(buffer_handle_t buffer,
-        int32_t acquireFence) {
-    ALOGVV("%s: Setting acquireFence %d for layer %u", __FUNCTION__,
-          acquireFence, (uint32_t)mId);
-    mBuffer.setBuffer(buffer);
-    mBuffer.setFence(acquireFence);
-    return Error::None;
-}
-
-Error EmuHWC2::Layer::setCursorPosition(int32_t /*x*/,
-                                        int32_t /*y*/) {
-    ALOGVV("%s layer %u", __FUNCTION__, (uint32_t)mId);
-    if (mCompositionType != Composition::Cursor) {
-        ALOGE("%s: CompositionType not Cursor type", __FUNCTION__);
-        return Error::BadLayer;
-    }
-   //TODO
-    return Error::None;
-}
-
-Error EmuHWC2::Layer::setSurfaceDamage(hwc_region_t /*damage*/) {
-    // Emulator redraw whole layer per frame, so ignore this.
-    ALOGVV("%s", __FUNCTION__);
-    return Error::None;
-}
-
-// Layer state functions
-
-Error EmuHWC2::Layer::setBlendMode(int32_t mode) {
-    ALOGVV("%s %d for layer %u", __FUNCTION__, mode, (uint32_t)mId);
-    mBlendMode = static_cast<BlendMode>(mode);
-    return Error::None;
-}
-
-Error EmuHWC2::Layer::setColor(hwc_color_t color) {
-    ALOGVV("%s layer %u %d", __FUNCTION__, (uint32_t)mId, color);
-    mColor = color;
-    return Error::None;
-}
-
-Error EmuHWC2::Layer::setCompositionType(int32_t type) {
-    ALOGVV("%s layer %u %u", __FUNCTION__, (uint32_t)mId, type);
-    mCompositionType = static_cast<Composition>(type);
-    return Error::None;
-}
-
-Error EmuHWC2::Layer::setDataspace(int32_t) {
-    ALOGVV("%s", __FUNCTION__);
-    return Error::None;
-}
-
-Error EmuHWC2::Layer::setDisplayFrame(hwc_rect_t frame) {
-    ALOGVV("%s layer %u", __FUNCTION__, (uint32_t)mId);
-    mDisplayFrame = frame;
-    return Error::None;
-}
-
-Error EmuHWC2::Layer::setPlaneAlpha(float alpha) {
-    ALOGVV("%s layer %u %f", __FUNCTION__, (uint32_t)mId, alpha);
-    mPlaneAlpha = alpha;
-    return Error::None;
-}
-
-Error EmuHWC2::Layer::setSidebandStream(const native_handle_t* stream) {
-    ALOGVV("%s layer %u", __FUNCTION__, (uint32_t)mId);
-    mSidebandStream = stream;
-    return Error::None;
-}
-
-Error EmuHWC2::Layer::setSourceCrop(hwc_frect_t crop) {
-    ALOGVV("%s layer %u", __FUNCTION__, (uint32_t)mId);
-    mSourceCrop = crop;
-    return Error::None;
-}
-
-Error EmuHWC2::Layer::setTransform(int32_t transform) {
-    ALOGVV("%s layer %u", __FUNCTION__, (uint32_t)mId);
-    mTransform = static_cast<Transform>(transform);
-    return Error::None;
-}
-
-static bool compareRects(const hwc_rect_t& rect1, const hwc_rect_t& rect2) {
-    return rect1.left == rect2.left &&
-            rect1.right == rect2.right &&
-            rect1.top == rect2.top &&
-            rect1.bottom == rect2.bottom;
-}
-
-Error EmuHWC2::Layer::setVisibleRegion(hwc_region_t visible) {
-    ALOGVV("%s", __FUNCTION__);
-    if ((getNumVisibleRegions() != visible.numRects) ||
-        !std::equal(mVisibleRegion.begin(), mVisibleRegion.end(), visible.rects,
-                    compareRects)) {
-        mVisibleRegion.resize(visible.numRects);
-        std::copy_n(visible.rects, visible.numRects, mVisibleRegion.begin());
-    }
-    return Error::None;
-}
-
-Error EmuHWC2::Layer::setZ(uint32_t z) {
-    ALOGVV("%s layer %u %d", __FUNCTION__, (uint32_t)mId, z);
-    mZ = z;
-    return Error::None;
-}
-
-// Adaptor Helpers
-
-void EmuHWC2::populateCapabilities() {
-    //TODO: add Capabilities
-    // support virtualDisplay
-    // support sideBandStream
-    // support backGroundColor
-    // we should not set this for HWC2, TODO: remove
-    // mCapabilities.insert(Capability::PresentFenceIsNotReliable);
-}
-
-int EmuHWC2::populatePrimary() {
-    int ret = 0;
-    auto display = std::make_shared<Display>(*this, HWC2::DisplayType::Physical);
-    ret = display->populatePrimaryConfigs(mDisplayWidth, mDisplayHeight,
-                                          mDisplayDpiX, mDisplayDpiY);
-    if (ret != 0) {
-        return ret;
-    }
-    mDisplays.emplace(display->getId(), std::move(display));
-    return ret;
-}
-
-// Note "hwservicemanager." is used to avoid selinux issue
-#define EXTERANL_DISPLAY_PROP "hwservicemanager.external.displays"
-
-// return 0 for successful, 1 if no external displays are specified
-// return < 0 if failed
-int EmuHWC2::populateSecondaryDisplays() {
-    // this guest property, hwservicemanager.external.displays,
-    // specifies multi-display info, with comma (,) as separator
-    // each display has the following info:
-    //   physicalId,width,height,dpi,flags
-    // serveral displays can be provided, e.g., following has 2 displays:
-    // setprop hwservicemanager.external.displays 1,1200,800,120,0,2,1200,800,120,0
-    std::vector<uint64_t> values;
-    char displaysValue[PROPERTY_VALUE_MAX] = "";
-    property_get(EXTERANL_DISPLAY_PROP, displaysValue, "");
-    bool isValid = displaysValue[0] != '\0';
-    if (isValid) {
-        char *p = displaysValue;
-        while (*p) {
-            if (!isdigit(*p) && *p != ',' && *p != ' ') {
-                isValid = false;
-                break;
-            }
-            p ++;
-        }
-        if (!isValid) {
-            ALOGE("Invalid syntax for the value of system prop: %s", EXTERANL_DISPLAY_PROP);
-        }
-    }
-    if (!isValid) {
-        // no external displays are specified
-        return 1;
-    }
-    // parse all int values to a vector
-    std::istringstream stream(displaysValue);
-    for (uint64_t id; stream >> id;) {
-        values.push_back(id);
-        if (stream.peek() == ',')
-            stream.ignore();
-    }
-    // each display has 5 values
-    if ((values.size() % 5) != 0) {
-        ALOGE("%s: invalid value for system property: %s", __FUNCTION__, EXTERANL_DISPLAY_PROP);
-        return -1;
-    }
-    uint32_t idx = 0;
-    while (!values.empty()) {
-        // uint64_t physicalId = values[0];
-        uint32_t width = values[1];
-        uint32_t height = values[2];
-        uint32_t dpi = values[3];
-        // uint32_t flags = values[4];
-        values.erase(values.begin(), values.begin() + 5);
-
-        Error ret = Error::None;
-        auto display = std::make_shared<Display>(*this, HWC2::DisplayType::Physical);
-        ret = display->populateSecondaryConfigs(width, height, dpi, idx++);
-        if (ret != Error::None) {
-            return -2;
-        }
-        mDisplays.emplace(display->getId(), std::move(display));
-    }
-    return 0;
-}
-
-EmuHWC2::Display* EmuHWC2::getDisplay(hwc2_display_t id) {
-    auto display = mDisplays.find(id);
-    if (display == mDisplays.end()) {
-        ALOGE("Failed to get display for id=%d", (uint32_t)id);
-        return nullptr;
-    }
-    return display->second.get();
-}
-
-std::tuple<EmuHWC2::Layer*, Error> EmuHWC2::getLayer(
-        hwc2_display_t displayId, hwc2_layer_t layerId) {
-    auto display = getDisplay(displayId);
-    if (!display) {
-        ALOGE("%s: Fail to find display %d", __FUNCTION__, (uint32_t)displayId);
-        return std::make_tuple(static_cast<Layer*>(nullptr), Error::BadDisplay);
-    }
-
-    auto layerEntry = mLayers.find(layerId);
-    if (layerEntry == mLayers.end()) {
-        ALOGE("%s: Fail to find layer %d", __FUNCTION__, (uint32_t)layerId);
-        return std::make_tuple(static_cast<Layer*>(nullptr), Error::BadLayer);
-    }
-
-    auto layer = layerEntry->second;
-    if (layer->getDisplay().getId() != displayId) {
-        ALOGE("%s: layer %d not belongs to display %d", __FUNCTION__,
-              (uint32_t)layerId, (uint32_t)displayId);
-        return std::make_tuple(static_cast<Layer*>(nullptr), Error::BadLayer);
-    }
-    return std::make_tuple(layer.get(), Error::None);
-}
-
-static int hwc2DevOpen(const struct hw_module_t *module, const char *name,
-        struct hw_device_t **dev) {
-    ALOGVV("%s ", __FUNCTION__);
-    if (strcmp(name, HWC_HARDWARE_COMPOSER)) {
-        ALOGE("Invalid module name- %s", name);
-        return -EINVAL;
-    }
-
-    EmuHWC2* ctx = new EmuHWC2();
-    if (!ctx) {
-        ALOGE("Failed to allocate EmuHWC2");
-        return -ENOMEM;
-    }
-    int ret = ctx->populatePrimary();
-    if (ret != 0) {
-        ALOGE("Failed to populate primary display");
-        return ret;
-    }
-
-    ret = ctx->populateSecondaryDisplays();
-    if (ret < 0) {
-        ALOGE("Failed to populate secondary displays");
-        return ret;
-    }
-
-    ctx->common.module = const_cast<hw_module_t *>(module);
-    *dev = &ctx->common;
-    return 0;
-}
-}
-
-static struct hw_module_methods_t hwc2_module_methods = {
-    .open = android::hwc2DevOpen
-};
-
-hw_module_t HAL_MODULE_INFO_SYM = {
-    .tag = HARDWARE_MODULE_TAG,
-    .version_major = 2,
-    .version_minor = 0,
-    .id = HWC_HARDWARE_MODULE_ID,
-    .name = "goldfish HWC2 module",
-    .author = "The Android Open Source Project",
-    .methods = &hwc2_module_methods,
-    .dso = NULL,
-    .reserved = {0},
-};
diff --git a/system/hwc2/EmuHWC2.h b/system/hwc2/EmuHWC2.h
deleted file mode 100644
index 07218b6..0000000
--- a/system/hwc2/EmuHWC2.h
+++ /dev/null
@@ -1,496 +0,0 @@
-/*
- * Copyright 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HW_EMU_HWC2_H
-#define ANDROID_HW_EMU_HWC2_H
-
-#define HWC2_INCLUDE_STRINGIFICATION
-#define HWC2_USE_CPP11
-#include <hardware/hwcomposer2.h>
-#undef HWC2_INCLUDE_STRINGIFICATION
-#undef HWC2_USE_CPP11
-#include <utils/Thread.h>
-
-#include <android-base/unique_fd.h>
-#include <atomic>
-#include <map>
-#include <memory>
-#include <mutex>
-#include <numeric>
-#include <sstream>
-#include <vector>
-#include <unordered_set>
-#include <unordered_map>
-#include <set>
-
-#include <cutils/native_handle.h>
-
-#include "HostConnection.h"
-
-namespace android {
-
-class EmuHWC2 : public hwc2_device_t {
-public:
-    EmuHWC2();
-    int populatePrimary();
-    int populateSecondaryDisplays();
-
-private:
-    static inline EmuHWC2* getHWC2(hwc2_device_t* device) {
-        return static_cast<EmuHWC2*>(device);
-    }
-
-    static int closeHook(hw_device_t* device) {
-        EmuHWC2 *ctx = reinterpret_cast<EmuHWC2*>(device);
-        delete ctx;
-        return 0;
-    }
-
-    // getCapabilities
-    void doGetCapabilities(uint32_t* outCount, int32_t* outCapabilities);
-    static void getCapabilitiesHook(hwc2_device_t* device, uint32_t* outCount,
-                                int32_t* outCapabilities) {
-        getHWC2(device)->doGetCapabilities(outCount, outCapabilities);
-    }
-
-    // getFunction
-    hwc2_function_pointer_t doGetFunction(HWC2::FunctionDescriptor descriptor);
-    static hwc2_function_pointer_t getFunctionHook(hwc2_device_t* device,
-            int32_t desc) {
-        auto descriptor = static_cast<HWC2::FunctionDescriptor>(desc);
-        return getHWC2(device)->doGetFunction(descriptor);
-    }
-
-    // Device functions
-    HWC2::Error createVirtualDisplay(uint32_t width, uint32_t height,
-            int32_t* format, hwc2_display_t* outDisplay);
-    static int32_t createVirtualDisplayHook(hwc2_device_t* device,
-            uint32_t width, uint32_t height, int32_t* format,
-            hwc2_display_t* outDisplay) {
-        auto error = getHWC2(device)->createVirtualDisplay(width, height,
-                format, outDisplay);
-        return static_cast<int32_t>(error);
-    }
-
-    HWC2::Error destroyVirtualDisplay(hwc2_display_t display);
-    static int32_t destroyVirtualDisplayHook(hwc2_device_t* device,
-            hwc2_display_t display) {
-        auto error = getHWC2(device)->destroyVirtualDisplay(display);
-        return static_cast<int32_t>(error);
-    }
-
-    std::string mDumpString;
-    void dump(uint32_t* outSize, char* outBuffer);
-    static void dumpHook(hwc2_device_t* device, uint32_t* outSize,
-            char* outBuffer) {
-        getHWC2(device)->dump(outSize, outBuffer);
-    }
-
-    uint32_t getMaxVirtualDisplayCount();
-    static uint32_t getMaxVirtualDisplayCountHook(hwc2_device_t* device) {
-        return getHWC2(device)->getMaxVirtualDisplayCount();
-    }
-
-    HWC2::Error registerCallback(HWC2::Callback descriptor,
-            hwc2_callback_data_t callbackData, hwc2_function_pointer_t pointer);
-    static int32_t registerCallbackHook(hwc2_device_t* device,
-            int32_t intDesc, hwc2_callback_data_t callbackData,
-            hwc2_function_pointer_t pointer) {
-        auto descriptor = static_cast<HWC2::Callback>(intDesc);
-        auto error = getHWC2(device)->registerCallback(descriptor,
-                callbackData, pointer);
-        return static_cast<int32_t>(error);
-    }
-
-    class Layer;
-    class SortLayersByZ {
-    public:
-        bool operator()(const std::shared_ptr<Layer>& lhs,
-                    const std::shared_ptr<Layer>& rhs) const;
-    };
-
-    // SurfaceFlinger sets the ColorBuffer and its Fence handler for each
-    // layer. This class is a container for these two.
-    class FencedBuffer {
-        public:
-            FencedBuffer() : mBuffer(nullptr) {}
-
-            void setBuffer(buffer_handle_t buffer) { mBuffer = buffer; }
-            void setFence(int fenceFd) {
-                mFence = std::make_shared<base::unique_fd>(fenceFd);
-            }
-
-            buffer_handle_t getBuffer() const { return mBuffer; }
-            int getFence() const { return mFence ? dup(mFence->get()) : -1; }
-
-        private:
-            buffer_handle_t mBuffer;
-            std::shared_ptr<base::unique_fd> mFence;
-    };
-
-    typedef struct compose_layer {
-        uint32_t cbHandle;
-        hwc2_composition_t composeMode;
-        hwc_rect_t displayFrame;
-        hwc_frect_t crop;
-        int32_t blendMode;
-        float alpha;
-        hwc_color_t color;
-        hwc_transform_t transform;
-    } ComposeLayer;
-    typedef struct compose_device {
-        uint32_t version;
-        uint32_t targetHandle;
-        uint32_t numLayers;
-        struct compose_layer layer[0];
-    } ComposeDevice;
-    typedef struct compose_device_v2 {
-        uint32_t version;
-        uint32_t displayId;
-        uint32_t targetHandle;
-        uint32_t numLayers;
-        struct compose_layer layer[0];
-    } ComposeDevice_v2;
-
-    class ComposeMsg {
-    public:
-        ComposeMsg(uint32_t layerCnt = 0) :
-          mData(sizeof(ComposeDevice) + layerCnt * sizeof(ComposeLayer))
-        {
-            mComposeDevice = reinterpret_cast<ComposeDevice*>(mData.data());
-            mLayerCnt = layerCnt;
-        }
-
-        ComposeDevice* get() { return mComposeDevice; }
-
-        uint32_t getLayerCnt() { return mLayerCnt; }
-
-    private:
-        std::vector<uint8_t> mData;
-        uint32_t mLayerCnt;
-        ComposeDevice* mComposeDevice;
-    };
-
-    class ComposeMsg_v2 {
-    public:
-        ComposeMsg_v2(uint32_t layerCnt = 0) :
-          mData(sizeof(ComposeDevice_v2) + layerCnt * sizeof(ComposeLayer))
-        {
-            mComposeDevice = reinterpret_cast<ComposeDevice_v2*>(mData.data());
-            mLayerCnt = layerCnt;
-        }
-
-        ComposeDevice_v2* get() { return mComposeDevice; }
-
-        uint32_t getLayerCnt() { return mLayerCnt; }
-
-    private:
-        std::vector<uint8_t> mData;
-        uint32_t mLayerCnt;
-        ComposeDevice_v2* mComposeDevice;
-    };
-
-    class Display {
-    public:
-        Display(EmuHWC2& device, HWC2::DisplayType type);
-        ~Display();
-        hwc2_display_t getId() const {return mId;}
-
-        // HWC2 Display functions
-        HWC2::Error acceptChanges();
-        HWC2::Error createLayer(hwc2_layer_t* outLayerId);
-        HWC2::Error destroyLayer(hwc2_layer_t layerId);
-        HWC2::Error getActiveConfig(hwc2_config_t* outConfigId);
-        HWC2::Error getDisplayAttribute(hwc2_config_t configId,
-                int32_t attribute, int32_t* outValue);
-        HWC2::Error getChangedCompositionTypes(uint32_t* outNumElements,
-                hwc2_layer_t* outLayers, int32_t* outTypes);
-        HWC2::Error getColorModes(uint32_t* outNumModes, int32_t* outModes);
-        HWC2::Error getConfigs(uint32_t* outNumConfigs,
-                hwc2_config_t* outConfigIds);
-        HWC2::Error getDozeSupport(int32_t* outSupport);
-        HWC2::Error getHdrCapabilities(uint32_t* outNumTypes,
-                int32_t* outTypes, float* outMaxLuminance,
-                float* outMaxAverageLuminance, float* outMinLuminance);
-        HWC2::Error getName(uint32_t* outSize, char* outName);
-        HWC2::Error getReleaseFences(uint32_t* outNumElements,
-                hwc2_layer_t* outLayers, int32_t* outFences);
-        HWC2::Error getRequests(int32_t* outDisplayRequests,
-                uint32_t* outNumElements, hwc2_layer_t* outLayers,
-                int32_t* outLayerRequests);
-        HWC2::Error getType(int32_t* outType);
-        HWC2::Error present(int32_t* outRetireFence);
-        HWC2::Error setActiveConfig(hwc2_config_t configId);
-        HWC2::Error setClientTarget(buffer_handle_t target,
-                int32_t acquireFence, int32_t dataspace,
-                hwc_region_t damage);
-        HWC2::Error setColorMode(int32_t mode);
-        HWC2::Error setColorTransform(const float* matrix,
-                int32_t hint);
-        HWC2::Error setOutputBuffer(buffer_handle_t buffer,
-                int32_t releaseFence);
-        HWC2::Error setPowerMode(int32_t mode);
-        HWC2::Error setVsyncEnabled(int32_t enabled);
-        HWC2::Error validate(uint32_t* outNumTypes,
-                uint32_t* outNumRequests);
-        HWC2::Error updateLayerZ(hwc2_layer_t layerId, uint32_t z);
-        HWC2::Error getClientTargetSupport(uint32_t width, uint32_t height,
-                 int32_t format, int32_t dataspace);
-        // 2.3 required functions
-        HWC2::Error getDisplayIdentificationData(uint8_t* outPort,
-                 uint32_t* outDataSize, uint8_t* outData);
-        HWC2::Error getDisplayCapabilities(uint32_t* outNumCapabilities,
-                 uint32_t* outCapabilities);
-        HWC2::Error getDisplayBrightnessSupport(bool *out_support);
-        HWC2::Error setDisplayBrightness(float brightness);
-
-        // Read configs from PRIMARY Display
-        int populatePrimaryConfigs(int width, int height, int dpiX, int dpiY);
-        HWC2::Error populateSecondaryConfigs(uint32_t width, uint32_t height,
-                 uint32_t dpi, uint32_t idx);
-
-    private:
-        void post(HostConnection *hostCon, ExtendedRCEncoderContext *rcEnc,
-                  buffer_handle_t h);
-
-        class Config {
-        public:
-            Config(Display& display)
-              : mDisplay(display),
-                mId(0),
-                mAttributes() {}
-
-            bool isOnDisplay(const Display& display) const {
-                return display.getId() == mDisplay.getId();
-            }
-            void setAttribute(HWC2::Attribute attribute, int32_t value);
-            int32_t getAttribute(HWC2::Attribute attribute) const;
-            void setId(hwc2_config_t id) {mId = id; }
-            hwc2_config_t getId() const {return mId; }
-            std::string toString() const;
-
-        private:
-            Display& mDisplay;
-            hwc2_config_t mId;
-            std::unordered_map<HWC2::Attribute, int32_t> mAttributes;
-        };
-
-        // Stores changes requested from the device upon calling prepare().
-        // Handles change request to:
-        //   - Layer composition type.
-        //   - Layer hints.
-        class Changes {
-            public:
-                uint32_t getNumTypes() const {
-                    return static_cast<uint32_t>(mTypeChanges.size());
-                }
-
-                uint32_t getNumLayerRequests() const {
-                    return static_cast<uint32_t>(mLayerRequests.size());
-                }
-
-                const std::unordered_map<hwc2_layer_t, HWC2::Composition>&
-                        getTypeChanges() const {
-                    return mTypeChanges;
-                }
-
-                const std::unordered_map<hwc2_layer_t, HWC2::LayerRequest>&
-                        getLayerRequests() const {
-                    return mLayerRequests;
-                }
-
-                void addTypeChange(hwc2_layer_t layerId,
-                        HWC2::Composition type) {
-                    mTypeChanges.insert({layerId, type});
-                }
-
-                void clearTypeChanges() { mTypeChanges.clear(); }
-
-                void addLayerRequest(hwc2_layer_t layerId,
-                        HWC2::LayerRequest request) {
-                    mLayerRequests.insert({layerId, request});
-                }
-
-            private:
-                std::unordered_map<hwc2_layer_t, HWC2::Composition>
-                        mTypeChanges;
-                std::unordered_map<hwc2_layer_t, HWC2::LayerRequest>
-                        mLayerRequests;
-        };
-
-        // Generate sw vsync signal
-        class VsyncThread : public Thread {
-        public:
-            VsyncThread(Display& display)
-              : mDisplay(display) {}
-            virtual ~VsyncThread() {}
-        private:
-            Display& mDisplay;
-            bool threadLoop() final;
-        };
-
-    private:
-        EmuHWC2& mDevice;
-        // Display ID generator.
-        static std::atomic<hwc2_display_t> sNextId;
-        static const uint32_t hostDisplayIdStart = 6;
-        const hwc2_display_t mId;
-        // emulator side displayId
-        uint32_t mHostDisplayId;
-        std::string mName;
-        HWC2::DisplayType mType;
-        HWC2::PowerMode mPowerMode;
-        HWC2::Vsync mVsyncEnabled;
-        uint32_t mVsyncPeriod;
-        VsyncThread mVsyncThread;
-        FencedBuffer mClientTarget;
-        // Will only be non-null after the Display has been validated and
-        // before it has been presented
-        std::unique_ptr<Changes> mChanges;
-        // All layers this Display is aware of.
-        std::multiset<std::shared_ptr<Layer>, SortLayersByZ> mLayers;
-        std::vector<hwc2_display_t> mReleaseLayerIds;
-        std::vector<int32_t> mReleaseFences;
-        std::vector<std::shared_ptr<Config>> mConfigs;
-        std::shared_ptr<const Config> mActiveConfig;
-        std::set<android_color_mode_t> mColorModes;
-        android_color_mode_t mActiveColorMode;
-        bool mSetColorTransform;
-        // The state of this display should only be modified from
-        // SurfaceFlinger's main loop, with the exception of when dump is
-        // called. To prevent a bad state from crashing us during a dump
-        // call, all public calls into Display must acquire this mutex.
-        mutable std::mutex mStateMutex;
-        std::unique_ptr<ComposeMsg> mComposeMsg;
-        std::unique_ptr<ComposeMsg_v2> mComposeMsg_v2;
-        int mSyncDeviceFd;
-        const native_handle_t* mTargetCb;
-    };
-
-    template<typename MF, MF memFunc, typename ...Args>
-    static int32_t displayHook(hwc2_device_t* device, hwc2_display_t displayId,
-            Args... args) {
-        auto display = getHWC2(device)->getDisplay(displayId);
-        if (!display) {
-            return static_cast<int32_t>(HWC2::Error::BadDisplay);
-        }
-        auto error = ((*display).*memFunc)(std::forward<Args>(args)...);
-        return static_cast<int32_t>(error);
-    }
-
-    class Layer {
-    public:
-        explicit Layer(Display& display);
-        Display& getDisplay() const {return mDisplay;}
-        hwc2_layer_t getId() const {return mId;}
-        bool operator==(const Layer& other) { return mId == other.mId; }
-        bool operator!=(const Layer& other) { return !(*this == other); }
-
-        // HWC2 Layer functions
-        HWC2::Error setBuffer(buffer_handle_t buffer, int32_t acquireFence);
-        HWC2::Error setCursorPosition(int32_t x, int32_t y);
-        HWC2::Error setSurfaceDamage(hwc_region_t damage);
-
-        // HWC2 Layer state functions
-        HWC2::Error setBlendMode(int32_t mode);
-        HWC2::Error setColor(hwc_color_t color);
-        HWC2::Error setCompositionType(int32_t type);
-        HWC2::Error setDataspace(int32_t dataspace);
-        HWC2::Error setDisplayFrame(hwc_rect_t frame);
-        HWC2::Error setPlaneAlpha(float alpha);
-        HWC2::Error setSidebandStream(const native_handle_t* stream);
-        HWC2::Error setSourceCrop(hwc_frect_t crop);
-        HWC2::Error setTransform(int32_t transform);
-        HWC2::Error setVisibleRegion(hwc_region_t visible);
-        HWC2::Error setZ(uint32_t z);
-
-        HWC2::Composition getCompositionType() const {
-            return mCompositionType;
-        }
-        hwc_color_t getColor() {return mColor; }
-        uint32_t getZ() {return mZ; }
-        std::size_t getNumVisibleRegions() {return mVisibleRegion.size(); }
-        FencedBuffer& getLayerBuffer() {return mBuffer; }
-        int32_t getBlendMode() {return (int32_t)mBlendMode; }
-        float getPlaneAlpha() {return mPlaneAlpha; }
-        hwc_frect_t getSourceCrop() {return mSourceCrop; }
-        hwc_rect_t getDisplayFrame() {return mDisplayFrame; }
-        hwc_transform_t getTransform() {return (hwc_transform_t)mTransform; }
-    private:
-        static std::atomic<hwc2_layer_t> sNextId;
-        const hwc2_layer_t mId;
-        Display& mDisplay;
-        FencedBuffer mBuffer;
-        std::vector<hwc_rect_t> mSurfaceDamage;
-
-        HWC2::BlendMode mBlendMode;
-        hwc_color_t mColor;
-        HWC2::Composition mCompositionType;
-        hwc_rect_t mDisplayFrame;
-        float mPlaneAlpha;
-        const native_handle_t* mSidebandStream;
-        hwc_frect_t mSourceCrop;
-        HWC2::Transform mTransform;
-        std::vector<hwc_rect_t> mVisibleRegion;
-        uint32_t mZ;
-    };
-
-    template <typename MF, MF memFunc, typename ...Args>
-    static int32_t layerHook(hwc2_device_t* device, hwc2_display_t displayId,
-            hwc2_layer_t layerId, Args... args) {
-        auto result = getHWC2(device)->getLayer(displayId, layerId);
-        auto error = std::get<HWC2::Error>(result);
-        if (error == HWC2::Error::None) {
-            auto layer = std::get<Layer*>(result);
-            error = ((*layer).*memFunc)(std::forward<Args>(args)...);
-        }
-        return static_cast<int32_t>(error);
-    }
-
-    // helpers
-    void populateCapabilities();
-    Display* getDisplay(hwc2_display_t id);
-    std::tuple<Layer*, HWC2::Error> getLayer(hwc2_display_t displayId,
-            hwc2_layer_t layerId);
-
-    HWC2::Error initDisplayParameters();
-    const native_handle_t* allocateDisplayColorBuffer();
-    void freeDisplayColorBuffer(const native_handle_t* h);
-
-    std::unordered_set<HWC2::Capability> mCapabilities;
-
-    // These are potentially accessed from multiple threads, and are protected
-    // by this mutex.
-    std::mutex mStateMutex;
-
-    struct CallbackInfo {
-        hwc2_callback_data_t data;
-        hwc2_function_pointer_t pointer;
-    };
-    std::unordered_map<HWC2::Callback, CallbackInfo> mCallbacks;
-
-    // use map so displays can be pluged in by order of ID, 0, 1, 2, 3, etc.
-    std::map<hwc2_display_t, std::shared_ptr<Display>> mDisplays;
-    std::unordered_map<hwc2_layer_t, std::shared_ptr<Layer>> mLayers;
-
-    int mDisplayWidth;
-    int mDisplayHeight;
-    int mDisplayDpiX;
-    int mDisplayDpiY;
-};
-
-}
-#endif
diff --git a/system/hwc2/FencedBuffer.h b/system/hwc2/FencedBuffer.h
new file mode 100644
index 0000000..dcbd094
--- /dev/null
+++ b/system/hwc2/FencedBuffer.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HWC_FENCEDBUFFER_H
+#define ANDROID_HWC_FENCEDBUFFER_H
+
+#include <android-base/unique_fd.h>
+#include <cutils/native_handle.h>
+
+namespace android {
+
+class FencedBuffer {
+ public:
+  FencedBuffer() : mBuffer(nullptr) {}
+
+  void setBuffer(buffer_handle_t buffer) { mBuffer = buffer; }
+  void setFence(int fenceFd) {
+    mFence = std::make_shared<base::unique_fd>(fenceFd);
+  }
+
+  buffer_handle_t getBuffer() const { return mBuffer; }
+  int getFence() const { return mFence ? dup(mFence->get()) : -1; }
+
+ private:
+  buffer_handle_t mBuffer;
+  std::shared_ptr<android::base::unique_fd> mFence;
+};
+
+}  // namespace android
+
+#endif
\ No newline at end of file
diff --git a/system/hwc2/Gralloc.cpp b/system/hwc2/Gralloc.cpp
new file mode 100644
index 0000000..ec33240
--- /dev/null
+++ b/system/hwc2/Gralloc.cpp
@@ -0,0 +1,493 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Gralloc.h"
+
+#include <aidl/android/hardware/graphics/common/PlaneLayoutComponent.h>
+#include <aidl/android/hardware/graphics/common/PlaneLayoutComponentType.h>
+#include <drm_fourcc.h>
+#include <gralloctypes/Gralloc4.h>
+#include <hidl/ServiceManagement.h>
+#include <log/log.h>
+
+#include <algorithm>
+
+#include "Drm.h"
+
+using aidl::android::hardware::graphics::common::PlaneLayout;
+using aidl::android::hardware::graphics::common::PlaneLayoutComponent;
+using aidl::android::hardware::graphics::common::PlaneLayoutComponentType;
+using android::hardware::hidl_handle;
+using android::hardware::hidl_vec;
+using android::hardware::graphics::common::V1_2::BufferUsage;
+using android::hardware::graphics::mapper::V4_0::Error;
+using android::hardware::graphics::mapper::V4_0::IMapper;
+using MetadataType =
+    android::hardware::graphics::mapper::V4_0::IMapper::MetadataType;
+
+namespace android {
+
+Gralloc::Gralloc() {
+  android::hardware::preloadPassthroughService<IMapper>();
+
+  gralloc4_ = IMapper::getService();
+  if (gralloc4_ != nullptr) {
+    ALOGE("%s using Gralloc4.", __FUNCTION__);
+    return;
+  }
+  ALOGE("%s Gralloc4 not available.", __FUNCTION__);
+
+  ALOGE("%s No Grallocs available!", __FUNCTION__);
+}
+
+Error Gralloc::GetMetadata(buffer_handle_t buffer, MetadataType type,
+                           hidl_vec<uint8_t>* metadata) {
+  if (gralloc4_ == nullptr) {
+    ALOGE("%s Gralloc4 not available.", __FUNCTION__);
+    return Error::NO_RESOURCES;
+  }
+
+  if (metadata == nullptr) {
+    return Error::BAD_VALUE;
+  }
+
+  Error error = Error::NONE;
+
+  auto native_handle = const_cast<native_handle_t*>(buffer);
+
+  auto ret =
+      gralloc4_->get(native_handle, type,
+                     [&](const auto& get_error, const auto& get_metadata) {
+                       error = get_error;
+                       *metadata = get_metadata;
+                     });
+
+  if (!ret.isOk()) {
+    error = Error::NO_RESOURCES;
+  }
+
+  if (error != Error::NONE) {
+    ALOGE("%s failed to get metadata %s", __FUNCTION__, type.name.c_str());
+  }
+  return error;
+}
+
+std::optional<uint32_t> Gralloc::GetWidth(buffer_handle_t buffer) {
+  if (gralloc4_ == nullptr) {
+    ALOGE("%s Gralloc4 not available.", __FUNCTION__);
+    return std::nullopt;
+  }
+
+  hidl_vec<uint8_t> encoded_width;
+
+  Error error = GetMetadata(buffer, android::gralloc4::MetadataType_Width,
+                            &encoded_width);
+  if (error != Error::NONE) {
+    return std::nullopt;
+  }
+
+  uint64_t width = 0;
+  android::gralloc4::decodeWidth(encoded_width, &width);
+  return static_cast<uint32_t>(width);
+}
+
+std::optional<uint32_t> Gralloc::GetHeight(buffer_handle_t buffer) {
+  if (gralloc4_ == nullptr) {
+    ALOGE("%s Gralloc4 not available.", __FUNCTION__);
+    return std::nullopt;
+  }
+
+  hidl_vec<uint8_t> encoded_height;
+
+  Error error = GetMetadata(buffer, android::gralloc4::MetadataType_Height,
+                            &encoded_height);
+  if (error != Error::NONE) {
+    return std::nullopt;
+  }
+
+  uint64_t height = 0;
+  android::gralloc4::decodeHeight(encoded_height, &height);
+  return static_cast<uint32_t>(height);
+}
+
+std::optional<uint32_t> Gralloc::GetDrmFormat(buffer_handle_t buffer) {
+  if (gralloc4_ == nullptr) {
+    ALOGE("%s Gralloc4 not available.", __FUNCTION__);
+    return std::nullopt;
+  }
+
+  hidl_vec<uint8_t> encoded_format;
+
+  Error error =
+      GetMetadata(buffer, android::gralloc4::MetadataType_PixelFormatFourCC,
+                  &encoded_format);
+  if (error != Error::NONE) {
+    return std::nullopt;
+  }
+
+  uint32_t format = 0;
+  android::gralloc4::decodePixelFormatFourCC(encoded_format, &format);
+  return static_cast<uint32_t>(format);
+}
+
+std::optional<std::vector<PlaneLayout>> Gralloc::GetPlaneLayouts(
+    buffer_handle_t buffer) {
+  if (gralloc4_ == nullptr) {
+    ALOGE("%s Gralloc4 not available.", __FUNCTION__);
+    return std::nullopt;
+  }
+
+  hidl_vec<uint8_t> encoded_layouts;
+
+  Error error = GetMetadata(
+      buffer, android::gralloc4::MetadataType_PlaneLayouts, &encoded_layouts);
+  if (error != Error::NONE) {
+    return std::nullopt;
+  }
+
+  std::vector<PlaneLayout> plane_layouts;
+  android::gralloc4::decodePlaneLayouts(encoded_layouts, &plane_layouts);
+  return plane_layouts;
+}
+
+std::optional<uint32_t> Gralloc::GetMonoPlanarStrideBytes(
+    buffer_handle_t buffer) {
+  if (gralloc4_ == nullptr) {
+    ALOGE("%s Gralloc4 not available.", __FUNCTION__);
+    return std::nullopt;
+  }
+
+  auto plane_layouts_opt = GetPlaneLayouts(buffer);
+  if (!plane_layouts_opt) {
+    return std::nullopt;
+  }
+
+  std::vector<PlaneLayout>& plane_layouts = *plane_layouts_opt;
+  if (plane_layouts.size() != 1) {
+    return std::nullopt;
+  }
+
+  return static_cast<uint32_t>(plane_layouts[0].strideInBytes);
+}
+
+std::optional<GrallocBuffer> Gralloc::Import(buffer_handle_t buffer) {
+  if (gralloc4_ == nullptr) {
+    ALOGE("%s Gralloc4 not available.", __FUNCTION__);
+    return std::nullopt;
+  }
+
+  buffer_handle_t imported_buffer;
+
+  Error error;
+  auto ret =
+      gralloc4_->importBuffer(buffer, [&](const auto& err, const auto& buf) {
+        error = err;
+        if (err == Error::NONE) {
+          imported_buffer = static_cast<buffer_handle_t>(buf);
+        }
+      });
+
+  if (!ret.isOk() || error != Error::NONE) {
+    ALOGE("%s failed to import buffer", __FUNCTION__);
+    return std::nullopt;
+  }
+  return GrallocBuffer(this, imported_buffer);
+}
+
+void Gralloc::Release(buffer_handle_t buffer) {
+  if (gralloc4_ == nullptr) {
+    ALOGE("%s Gralloc4 not available.", __FUNCTION__);
+    return;
+  }
+
+  auto native_buffer = const_cast<native_handle_t*>(buffer);
+  auto ret = gralloc4_->freeBuffer(native_buffer);
+
+  if (!ret.isOk()) {
+    ALOGE("%s failed to release buffer", __FUNCTION__);
+  }
+}
+
+std::optional<void*> Gralloc::Lock(buffer_handle_t buffer) {
+  if (gralloc4_ == nullptr) {
+    ALOGE("%s Gralloc4 not available.", __FUNCTION__);
+    return std::nullopt;
+  }
+
+  auto native_buffer = const_cast<native_handle_t*>(buffer);
+
+  const auto buffer_usage = static_cast<uint64_t>(BufferUsage::CPU_READ_OFTEN |
+                                                  BufferUsage::CPU_WRITE_OFTEN);
+
+  auto width_opt = GetWidth(buffer);
+  if (!width_opt) {
+    return std::nullopt;
+  }
+
+  auto height_opt = GetHeight(buffer);
+  if (!height_opt) {
+    return std::nullopt;
+  }
+
+  IMapper::Rect buffer_region;
+  buffer_region.left = 0;
+  buffer_region.top = 0;
+  buffer_region.width = *width_opt;
+  buffer_region.height = *height_opt;
+
+  // Empty fence, lock immedietly.
+  hidl_handle fence;
+
+  Error error = Error::NONE;
+  void* data = nullptr;
+
+  auto ret =
+      gralloc4_->lock(native_buffer, buffer_usage, buffer_region, fence,
+                      [&](const auto& lock_error, const auto& lock_data) {
+                        error = lock_error;
+                        if (lock_error == Error::NONE) {
+                          data = lock_data;
+                        }
+                      });
+
+  if (!ret.isOk()) {
+    error = Error::NO_RESOURCES;
+  }
+
+  if (error != Error::NONE) {
+    ALOGE("%s failed to lock buffer", __FUNCTION__);
+    return std::nullopt;
+  }
+
+  return data;
+}
+
+std::optional<android_ycbcr> Gralloc::LockYCbCr(buffer_handle_t buffer) {
+  if (gralloc4_ == nullptr) {
+    ALOGE("%s Gralloc4 not available.", __FUNCTION__);
+    return std::nullopt;
+  }
+
+  auto format_opt = GetDrmFormat(buffer);
+  if (!format_opt) {
+    ALOGE("%s failed to check format of buffer", __FUNCTION__);
+    return std::nullopt;
+  }
+
+  if (*format_opt != DRM_FORMAT_NV12 && *format_opt != DRM_FORMAT_NV21 &&
+      *format_opt != DRM_FORMAT_YVU420) {
+    ALOGE("%s called on non-ycbcr buffer", __FUNCTION__);
+    return std::nullopt;
+  }
+
+  auto lock_opt = Lock(buffer);
+  if (!lock_opt) {
+    ALOGE("%s failed to lock buffer", __FUNCTION__);
+    return std::nullopt;
+  }
+
+  auto plane_layouts_opt = GetPlaneLayouts(buffer);
+  if (!plane_layouts_opt) {
+    ALOGE("%s failed to get plane layouts", __FUNCTION__);
+    return std::nullopt;
+  }
+
+  android_ycbcr buffer_ycbcr;
+  buffer_ycbcr.y = nullptr;
+  buffer_ycbcr.cb = nullptr;
+  buffer_ycbcr.cr = nullptr;
+  buffer_ycbcr.ystride = 0;
+  buffer_ycbcr.cstride = 0;
+  buffer_ycbcr.chroma_step = 0;
+
+  for (const auto& plane_layout : *plane_layouts_opt) {
+    for (const auto& plane_layout_component : plane_layout.components) {
+      const auto& type = plane_layout_component.type;
+
+      if (!android::gralloc4::isStandardPlaneLayoutComponentType(type)) {
+        continue;
+      }
+
+      auto* component_data = reinterpret_cast<uint8_t*>(*lock_opt) +
+                             plane_layout.offsetInBytes +
+                             plane_layout_component.offsetInBits / 8;
+
+      switch (static_cast<PlaneLayoutComponentType>(type.value)) {
+        case PlaneLayoutComponentType::Y:
+          buffer_ycbcr.y = component_data;
+          buffer_ycbcr.ystride = plane_layout.strideInBytes;
+          break;
+        case PlaneLayoutComponentType::CB:
+          buffer_ycbcr.cb = component_data;
+          buffer_ycbcr.cstride = plane_layout.strideInBytes;
+          buffer_ycbcr.chroma_step = plane_layout.sampleIncrementInBits / 8;
+          break;
+        case PlaneLayoutComponentType::CR:
+          buffer_ycbcr.cr = component_data;
+          buffer_ycbcr.cstride = plane_layout.strideInBytes;
+          buffer_ycbcr.chroma_step = plane_layout.sampleIncrementInBits / 8;
+          break;
+        default:
+          break;
+      }
+    }
+  }
+
+  return buffer_ycbcr;
+}
+
+void Gralloc::Unlock(buffer_handle_t buffer) {
+  if (gralloc4_ == nullptr) {
+    ALOGE("%s Gralloc4 not available.", __FUNCTION__);
+    return;
+  }
+
+  auto native_handle = const_cast<native_handle_t*>(buffer);
+
+  Error error = Error::NONE;
+  auto ret = gralloc4_->unlock(
+      native_handle,
+      [&](const auto& unlock_error, const auto&) { error = unlock_error; });
+
+  if (!ret.isOk()) {
+    error = Error::NO_RESOURCES;
+  }
+
+  if (error != Error::NONE) {
+    ALOGE("%s failed to unlock buffer", __FUNCTION__);
+  }
+}
+
+GrallocBuffer::GrallocBuffer(Gralloc* gralloc, buffer_handle_t buffer)
+    : gralloc_(gralloc), buffer_(buffer) {}
+
+GrallocBuffer::~GrallocBuffer() { Release(); }
+
+GrallocBuffer::GrallocBuffer(GrallocBuffer&& rhs) { *this = std::move(rhs); }
+
+GrallocBuffer& GrallocBuffer::operator=(GrallocBuffer&& rhs) {
+  gralloc_ = rhs.gralloc_;
+  buffer_ = rhs.buffer_;
+  rhs.gralloc_ = nullptr;
+  rhs.buffer_ = nullptr;
+  return *this;
+}
+
+void GrallocBuffer::Release() {
+  if (gralloc_ && buffer_) {
+    gralloc_->Release(buffer_);
+    gralloc_ = nullptr;
+    buffer_ = nullptr;
+  }
+}
+
+std::optional<GrallocBufferView> GrallocBuffer::Lock() {
+  if (gralloc_ && buffer_) {
+    auto format_opt = GetDrmFormat();
+    if (!format_opt) {
+      ALOGE("%s failed to check format of buffer", __FUNCTION__);
+      return std::nullopt;
+    }
+    if (*format_opt != DRM_FORMAT_NV12 && *format_opt != DRM_FORMAT_NV21 &&
+        *format_opt != DRM_FORMAT_YVU420) {
+      auto locked_opt = gralloc_->Lock(buffer_);
+      if (!locked_opt) {
+        return std::nullopt;
+      }
+      return GrallocBufferView(this, *locked_opt);
+    } else {
+      auto locked_ycbcr_opt = gralloc_->LockYCbCr(buffer_);
+      if (!locked_ycbcr_opt) {
+        ALOGE("%s failed to lock ycbcr buffer", __FUNCTION__);
+        return std::nullopt;
+      }
+      return GrallocBufferView(this, *locked_ycbcr_opt);
+    }
+  }
+  return std::nullopt;
+}
+
+void GrallocBuffer::Unlock() {
+  if (gralloc_ && buffer_) {
+    gralloc_->Unlock(buffer_);
+  }
+}
+
+std::optional<uint32_t> GrallocBuffer::GetWidth() {
+  if (gralloc_ && buffer_) {
+    return gralloc_->GetWidth(buffer_);
+  }
+  return std::nullopt;
+}
+
+std::optional<uint32_t> GrallocBuffer::GetHeight() {
+  if (gralloc_ && buffer_) {
+    return gralloc_->GetHeight(buffer_);
+  }
+  return std::nullopt;
+}
+
+std::optional<uint32_t> GrallocBuffer::GetDrmFormat() {
+  if (gralloc_ && buffer_) {
+    return gralloc_->GetDrmFormat(buffer_);
+  }
+  return std::nullopt;
+}
+
+std::optional<std::vector<PlaneLayout>> GrallocBuffer::GetPlaneLayouts() {
+  if (gralloc_ && buffer_) {
+    return gralloc_->GetPlaneLayouts(buffer_);
+  }
+  return std::nullopt;
+}
+
+std::optional<uint32_t> GrallocBuffer::GetMonoPlanarStrideBytes() {
+  if (gralloc_ && buffer_) {
+    return gralloc_->GetMonoPlanarStrideBytes(buffer_);
+  }
+  return std::nullopt;
+}
+
+GrallocBufferView::GrallocBufferView(GrallocBuffer* buffer, void* raw)
+    : gralloc_buffer_(buffer), locked_(raw) {}
+
+GrallocBufferView::GrallocBufferView(GrallocBuffer* buffer, android_ycbcr raw)
+    : gralloc_buffer_(buffer), locked_ycbcr_(raw) {}
+
+GrallocBufferView::~GrallocBufferView() {
+  if (gralloc_buffer_) {
+    gralloc_buffer_->Unlock();
+  }
+}
+
+GrallocBufferView::GrallocBufferView(GrallocBufferView&& rhs) {
+  *this = std::move(rhs);
+}
+
+GrallocBufferView& GrallocBufferView::operator=(GrallocBufferView&& rhs) {
+  std::swap(gralloc_buffer_, rhs.gralloc_buffer_);
+  std::swap(locked_, rhs.locked_);
+  std::swap(locked_ycbcr_, rhs.locked_ycbcr_);
+  return *this;
+}
+
+const std::optional<void*> GrallocBufferView::Get() const { return locked_; }
+
+const std::optional<android_ycbcr>& GrallocBufferView::GetYCbCr() const {
+  return locked_ycbcr_;
+}
+
+}  // namespace android
\ No newline at end of file
diff --git a/system/hwc2/Gralloc.h b/system/hwc2/Gralloc.h
new file mode 100644
index 0000000..9cb5153
--- /dev/null
+++ b/system/hwc2/Gralloc.h
@@ -0,0 +1,160 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HWC_GRALLOC_H
+#define ANDROID_HWC_GRALLOC_H
+
+#include <aidl/android/hardware/graphics/common/PlaneLayout.h>
+#include <android/hardware/graphics/mapper/4.0/IMapper.h>
+#include <hardware/gralloc.h>
+#include <system/graphics.h>
+#include <utils/StrongPointer.h>
+
+#include <memory>
+#include <optional>
+#include <vector>
+
+namespace android {
+
+class Gralloc;
+class GrallocBuffer;
+
+// An RAII object that will Unlock() a GrallocBuffer upon destruction.
+class GrallocBufferView {
+ public:
+  virtual ~GrallocBufferView();
+
+  GrallocBufferView(const GrallocBufferView& rhs) = delete;
+  GrallocBufferView& operator=(const GrallocBufferView& rhs) = delete;
+
+  GrallocBufferView(GrallocBufferView&& rhs);
+  GrallocBufferView& operator=(GrallocBufferView&& rhs);
+
+  const std::optional<void*> Get() const;
+
+  const std::optional<android_ycbcr>& GetYCbCr() const;
+
+ private:
+  friend class GrallocBuffer;
+  GrallocBufferView(GrallocBuffer* buffer, void* raw);
+  GrallocBufferView(GrallocBuffer* buffer, android_ycbcr raw);
+
+  // The GrallocBuffer that should be unlocked upon destruction of this object.
+  GrallocBuffer* gralloc_buffer_ = nullptr;
+
+  std::optional<void*> locked_;
+  std::optional<android_ycbcr> locked_ycbcr_;
+};
+
+// A gralloc 4.0 buffer that has been imported in the current process and
+// that will be released upon destruction. Users must ensure that the Gralloc
+// instance that this buffer is created with out lives this buffer.
+class GrallocBuffer {
+ public:
+  GrallocBuffer(Gralloc* gralloc, buffer_handle_t buffer);
+  virtual ~GrallocBuffer();
+
+  GrallocBuffer(const GrallocBuffer& rhs) = delete;
+  GrallocBuffer& operator=(const GrallocBuffer& rhs) = delete;
+
+  GrallocBuffer(GrallocBuffer&& rhs);
+  GrallocBuffer& operator=(GrallocBuffer&& rhs);
+
+  // Locks the buffer for reading and returns a view if successful.
+  std::optional<GrallocBufferView> Lock();
+
+  std::optional<uint32_t> GetWidth();
+  std::optional<uint32_t> GetHeight();
+  std::optional<uint32_t> GetDrmFormat();
+
+  // Returns the stride of the buffer if it is a single plane buffer or fails
+  // and returns nullopt if the buffer is for a multi plane buffer.
+  std::optional<uint32_t> GetMonoPlanarStrideBytes();
+
+  std::optional<
+      std::vector<aidl::android::hardware::graphics::common::PlaneLayout>>
+  GetPlaneLayouts();
+
+ private:
+  // Internal visibility for Unlock().
+  friend class GrallocBufferView;
+
+  // Unlocks the buffer from reading.
+  void Unlock();
+
+  void Release();
+
+  Gralloc* gralloc_ = nullptr;
+  buffer_handle_t buffer_ = nullptr;
+};
+
+class Gralloc {
+ public:
+  Gralloc();
+  virtual ~Gralloc() = default;
+
+  // Imports the given buffer handle into the current process and returns an
+  // imported buffer which can be used for reading. Users must ensure that the
+  // Gralloc instance outlives any GrallocBuffers.
+  std::optional<GrallocBuffer> Import(buffer_handle_t buffer);
+
+ private:
+  // The below functions are made avaialble only to GrallocBuffer so that
+  // users only call gralloc functions on *imported* buffers.
+  friend class GrallocBuffer;
+
+  // See GrallocBuffer::Release.
+  void Release(buffer_handle_t buffer);
+
+  // See GrallocBuffer::Lock.
+  std::optional<void*> Lock(buffer_handle_t buffer);
+
+  // See GrallocBuffer::LockYCbCr.
+  std::optional<android_ycbcr> LockYCbCr(buffer_handle_t buffer);
+
+  // See GrallocBuffer::Unlock.
+  void Unlock(buffer_handle_t buffer);
+
+  // See GrallocBuffer::GetWidth.
+  std::optional<uint32_t> GetWidth(buffer_handle_t buffer);
+
+  // See GrallocBuffer::GetHeight.
+  std::optional<uint32_t> GetHeight(buffer_handle_t buffer);
+
+  // See GrallocBuffer::GetDrmFormat.
+  std::optional<uint32_t> GetDrmFormat(buffer_handle_t buffer);
+
+  // See GrallocBuffer::GetPlaneLayouts.
+  std::optional<
+      std::vector<aidl::android::hardware::graphics::common::PlaneLayout>>
+  GetPlaneLayouts(buffer_handle_t buffer);
+
+  // Returns the stride of the buffer if it is a single plane buffer or fails
+  // and returns nullopt if the buffer is for a multi plane buffer.
+  std::optional<uint32_t> GetMonoPlanarStrideBytes(buffer_handle_t);
+
+  // See GrallocBuffer::GetMetadata.
+  android::hardware::graphics::mapper::V4_0::Error GetMetadata(
+      buffer_handle_t buffer,
+      android::hardware::graphics::mapper::V4_0::IMapper::MetadataType type,
+      android::hardware::hidl_vec<uint8_t>* metadata);
+
+  android::sp<android::hardware::graphics::mapper::V4_0::IMapper> gralloc4_;
+};
+
+}  // namespace android
+
+#endif
\ No newline at end of file
diff --git a/system/hwc2/GuestComposer.cpp b/system/hwc2/GuestComposer.cpp
new file mode 100644
index 0000000..33ad32e
--- /dev/null
+++ b/system/hwc2/GuestComposer.cpp
@@ -0,0 +1,1097 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "GuestComposer.h"
+
+#include <android-base/parseint.h>
+#include <android-base/properties.h>
+#include <android-base/strings.h>
+#include <device_config_shared.h>
+#include <drm_fourcc.h>
+#include <libyuv.h>
+#include <ui/GraphicBuffer.h>
+#include <ui/GraphicBufferAllocator.h>
+#include <ui/GraphicBufferMapper.h>
+
+#include "Device.h"
+#include "Display.h"
+#include "Drm.h"
+#include "Layer.h"
+
+namespace android {
+namespace {
+
+uint64_t AlignToPower2(uint64_t val, uint8_t align_log) {
+  uint64_t align = 1ULL << align_log;
+  return ((val + (align - 1)) / align) * align;
+}
+
+bool LayerNeedsScaling(const Layer& layer) {
+  hwc_rect_t crop = layer.getSourceCropInt();
+  hwc_rect_t frame = layer.getDisplayFrame();
+
+  int fromW = crop.right - crop.left;
+  int fromH = crop.bottom - crop.top;
+  int toW = frame.right - frame.left;
+  int toH = frame.bottom - frame.top;
+
+  bool not_rot_scale = fromW != toW || fromH != toH;
+  bool rot_scale = fromW != toH || fromH != toW;
+
+  bool needs_rot = layer.getTransform() & HAL_TRANSFORM_ROT_90;
+
+  return needs_rot ? rot_scale : not_rot_scale;
+}
+
+bool LayerNeedsBlending(const Layer& layer) {
+  return layer.getBlendMode() != HWC2::BlendMode::None;
+}
+
+bool LayerNeedsAttenuation(const Layer& layer) {
+  return layer.getBlendMode() == HWC2::BlendMode::Coverage;
+}
+
+struct BufferSpec;
+typedef int (*ConverterFunction)(const BufferSpec& src, const BufferSpec& dst,
+                                 bool v_flip);
+int DoCopy(const BufferSpec& src, const BufferSpec& dst, bool vFlip);
+int ConvertFromRGB565(const BufferSpec& src, const BufferSpec& dst, bool vFlip);
+int ConvertFromYV12(const BufferSpec& src, const BufferSpec& dst, bool vFlip);
+
+ConverterFunction GetConverterForDrmFormat(uint32_t drmFormat) {
+  switch (drmFormat) {
+    case DRM_FORMAT_ABGR8888:
+    case DRM_FORMAT_XBGR8888:
+      return &DoCopy;
+    case DRM_FORMAT_RGB565:
+      return &ConvertFromRGB565;
+    case DRM_FORMAT_YVU420:
+      return &ConvertFromYV12;
+  }
+  ALOGW("Unsupported drm format: %d(%s), returning null converter", drmFormat,
+        GetDrmFormatString(drmFormat));
+  return nullptr;
+}
+
+bool IsDrmFormatSupported(uint32_t drmFormat) {
+  return GetConverterForDrmFormat(drmFormat) != nullptr;
+}
+
+// Libyuv's convert functions only allow the combination of any rotation
+// (multiple of 90 degrees) and a vertical flip, but not horizontal flips.
+// Surfaceflinger's transformations are expressed in terms of a vertical flip,
+// a horizontal flip and/or a single 90 degrees clockwise rotation (see
+// NATIVE_WINDOW_TRANSFORM_HINT documentation on system/window.h for more
+// insight). The following code allows to turn a horizontal flip into a 180
+// degrees rotation and a vertical flip.
+libyuv::RotationMode GetRotationFromTransform(uint32_t transform) {
+  uint32_t rotation =
+      (transform & HAL_TRANSFORM_ROT_90) ? 1 : 0;          // 1 * ROT90 bit
+  rotation += (transform & HAL_TRANSFORM_FLIP_H) ? 2 : 0;  // 2 * VFLIP bit
+  return static_cast<libyuv::RotationMode>(90 * rotation);
+}
+
+bool GetVFlipFromTransform(uint32_t transform) {
+  // vertical flip xor horizontal flip
+  return ((transform & HAL_TRANSFORM_FLIP_V) >> 1) ^
+         (transform & HAL_TRANSFORM_FLIP_H);
+}
+
+struct BufferSpec {
+  uint8_t* buffer;
+  std::optional<android_ycbcr> buffer_ycbcr;
+  int width;
+  int height;
+  int cropX;
+  int cropY;
+  int cropWidth;
+  int cropHeight;
+  uint32_t drmFormat;
+  int strideBytes;
+  int sampleBytes;
+
+  BufferSpec(uint8_t* buffer, std::optional<android_ycbcr> buffer_ycbcr,
+             int width, int height, int cropX, int cropY, int cropWidth,
+             int cropHeight, uint32_t drmFormat, int strideBytes,
+             int sampleBytes)
+      : buffer(buffer),
+        buffer_ycbcr(buffer_ycbcr),
+        width(width),
+        height(height),
+        cropX(cropX),
+        cropY(cropY),
+        cropWidth(cropWidth),
+        cropHeight(cropHeight),
+        drmFormat(drmFormat),
+        strideBytes(strideBytes),
+        sampleBytes(sampleBytes) {}
+
+  BufferSpec(uint8_t* buffer, int width, int height, int strideBytes)
+      : BufferSpec(buffer,
+                   /*buffer_ycbcr=*/std::nullopt, width, height,
+                   /*cropX=*/0,
+                   /*cropY=*/0,
+                   /*cropWidth=*/width,
+                   /*cropHeight=*/height,
+                   /*drmFormat=*/DRM_FORMAT_ABGR8888, strideBytes,
+                   /*sampleBytes=*/4) {}
+};
+
+int ConvertFromRGB565(const BufferSpec& src, const BufferSpec& dst,
+                      bool vFlip) {
+  // Point to the upper left corner of the crop rectangle
+  uint8_t* srcBuffer =
+      src.buffer + src.cropY * src.strideBytes + src.cropX * src.sampleBytes;
+  uint8_t* dstBuffer =
+      dst.buffer + dst.cropY * dst.strideBytes + dst.cropX * dst.sampleBytes;
+
+  int width = src.cropWidth;
+  int height = src.cropHeight;
+  if (vFlip) {
+    height = -height;
+  }
+
+  return libyuv::RGB565ToARGB(srcBuffer, src.strideBytes,  //
+                              dstBuffer, dst.strideBytes,  //
+                              width, height);
+}
+
+int ConvertFromYV12(const BufferSpec& src, const BufferSpec& dst, bool vFlip) {
+  // The following calculation of plane offsets and alignments are based on
+  // swiftshader's Sampler::setTextureLevel() implementation
+  // (Renderer/Sampler.cpp:225)
+
+  auto& srcBufferYCbCrOpt = src.buffer_ycbcr;
+  if (!srcBufferYCbCrOpt) {
+    ALOGE("%s called on non ycbcr buffer", __FUNCTION__);
+    return -1;
+  }
+  auto& srcBufferYCbCr = *srcBufferYCbCrOpt;
+
+  // The libyuv::I420ToARGB() function is for tri-planar.
+  if (srcBufferYCbCr.chroma_step != 1) {
+    ALOGE("%s called with bad chroma step", __FUNCTION__);
+    return -1;
+  }
+
+  uint8_t* srcY = reinterpret_cast<uint8_t*>(srcBufferYCbCr.y);
+  int strideY = srcBufferYCbCr.ystride;
+  uint8_t* srcU = reinterpret_cast<uint8_t*>(srcBufferYCbCr.cb);
+  int strideU = srcBufferYCbCr.cstride;
+  uint8_t* srcV = reinterpret_cast<uint8_t*>(srcBufferYCbCr.cr);
+  int strideV = srcBufferYCbCr.cstride;
+
+  // Adjust for crop
+  srcY += src.cropY * strideY + src.cropX;
+  srcV += (src.cropY / 2) * strideV + (src.cropX / 2);
+  srcU += (src.cropY / 2) * strideU + (src.cropX / 2);
+  uint8_t* dstBuffer =
+      dst.buffer + dst.cropY * dst.strideBytes + dst.cropX * dst.sampleBytes;
+
+  int width = dst.cropWidth;
+  int height = dst.cropHeight;
+
+  if (vFlip) {
+    height = -height;
+  }
+
+  // YV12 is the same as I420, with the U and V planes swapped
+  return libyuv::I420ToARGB(srcY, strideY, srcV, strideV, srcU, strideU,
+                            dstBuffer, dst.strideBytes, width, height);
+}
+
+int DoConversion(const BufferSpec& src, const BufferSpec& dst, bool v_flip) {
+  ConverterFunction func = GetConverterForDrmFormat(src.drmFormat);
+  if (!func) {
+    // GetConverterForDrmFormat should've logged the issue for us.
+    return -1;
+  }
+  return func(src, dst, v_flip);
+}
+
+int DoCopy(const BufferSpec& src, const BufferSpec& dst, bool v_flip) {
+  // Point to the upper left corner of the crop rectangle
+  uint8_t* srcBuffer =
+      src.buffer + src.cropY * src.strideBytes + src.cropX * src.sampleBytes;
+  uint8_t* dstBuffer =
+      dst.buffer + dst.cropY * dst.strideBytes + dst.cropX * dst.sampleBytes;
+  int width = src.cropWidth;
+  int height = src.cropHeight;
+
+  if (v_flip) {
+    height = -height;
+  }
+
+  // HAL formats are named based on the order of the pixel components on the
+  // byte stream, while libyuv formats are named based on the order of those
+  // pixel components in an integer written from left to right. So
+  // libyuv::FOURCC_ARGB is equivalent to HAL_PIXEL_FORMAT_BGRA_8888.
+  auto ret = libyuv::ARGBCopy(srcBuffer, src.strideBytes, dstBuffer,
+                              dst.strideBytes, width, height);
+  return ret;
+}
+
+int DoRotation(const BufferSpec& src, const BufferSpec& dst,
+               libyuv::RotationMode rotation, bool v_flip) {
+  // Point to the upper left corner of the crop rectangles
+  uint8_t* srcBuffer =
+      src.buffer + src.cropY * src.strideBytes + src.cropX * src.sampleBytes;
+  uint8_t* dstBuffer =
+      dst.buffer + dst.cropY * dst.strideBytes + dst.cropX * dst.sampleBytes;
+  int width = src.cropWidth;
+  int height = src.cropHeight;
+
+  if (v_flip) {
+    height = -height;
+  }
+
+  return libyuv::ARGBRotate(srcBuffer, src.strideBytes, dstBuffer,
+                            dst.strideBytes, width, height, rotation);
+}
+
+int DoScaling(const BufferSpec& src, const BufferSpec& dst, bool v_flip) {
+  // Point to the upper left corner of the crop rectangles
+  uint8_t* srcBuffer =
+      src.buffer + src.cropY * src.strideBytes + src.cropX * src.sampleBytes;
+  uint8_t* dstBuffer =
+      dst.buffer + dst.cropY * dst.strideBytes + dst.cropX * dst.sampleBytes;
+  int srcWidth = src.cropWidth;
+  int srcHeight = src.cropHeight;
+  int dstWidth = dst.cropWidth;
+  int dstHeight = dst.cropHeight;
+
+  if (v_flip) {
+    srcHeight = -srcHeight;
+  }
+
+  return libyuv::ARGBScale(srcBuffer, src.strideBytes, srcWidth, srcHeight,
+                           dstBuffer, dst.strideBytes, dstWidth, dstHeight,
+                           libyuv::kFilterBilinear);
+}
+
+int DoAttenuation(const BufferSpec& src, const BufferSpec& dst, bool v_flip) {
+  // Point to the upper left corner of the crop rectangles
+  uint8_t* srcBuffer =
+      src.buffer + src.cropY * src.strideBytes + src.cropX * src.sampleBytes;
+  uint8_t* dstBuffer =
+      dst.buffer + dst.cropY * dst.strideBytes + dst.cropX * dst.sampleBytes;
+  int width = dst.cropWidth;
+  int height = dst.cropHeight;
+
+  if (v_flip) {
+    height = -height;
+  }
+
+  return libyuv::ARGBAttenuate(srcBuffer, src.strideBytes, dstBuffer,
+                               dst.strideBytes, width, height);
+}
+
+int DoBlending(const BufferSpec& src, const BufferSpec& dst, bool v_flip) {
+  // Point to the upper left corner of the crop rectangles
+  uint8_t* srcBuffer =
+      src.buffer + src.cropY * src.strideBytes + src.cropX * src.sampleBytes;
+  uint8_t* dstBuffer =
+      dst.buffer + dst.cropY * dst.strideBytes + dst.cropX * dst.sampleBytes;
+  int width = dst.cropWidth;
+  int height = dst.cropHeight;
+
+  if (v_flip) {
+    height = -height;
+  }
+
+  // libyuv's ARGB format is hwcomposer's BGRA format, since blending only cares
+  // for the position of alpha in the pixel and not the position of the colors
+  // this function is perfectly usable.
+  return libyuv::ARGBBlend(srcBuffer, src.strideBytes, dstBuffer,
+                           dst.strideBytes, dstBuffer, dst.strideBytes, width,
+                           height);
+}
+
+std::optional<BufferSpec> GetBufferSpec(GrallocBuffer& buffer,
+                                        GrallocBufferView& bufferView,
+                                        const hwc_rect_t& bufferCrop) {
+  auto bufferFormatOpt = buffer.GetDrmFormat();
+  if (!bufferFormatOpt) {
+    ALOGE("Failed to get gralloc buffer format.");
+    return std::nullopt;
+  }
+  uint32_t bufferFormat = *bufferFormatOpt;
+
+  auto bufferWidthOpt = buffer.GetWidth();
+  if (!bufferWidthOpt) {
+    ALOGE("Failed to get gralloc buffer width.");
+    return std::nullopt;
+  }
+  uint32_t bufferWidth = *bufferWidthOpt;
+
+  auto bufferHeightOpt = buffer.GetHeight();
+  if (!bufferHeightOpt) {
+    ALOGE("Failed to get gralloc buffer height.");
+    return std::nullopt;
+  }
+  uint32_t bufferHeight = *bufferHeightOpt;
+
+  uint8_t* bufferData = nullptr;
+  uint32_t bufferStrideBytes = 0;
+  std::optional<android_ycbcr> bufferYCbCrData;
+
+  if (bufferFormat == DRM_FORMAT_NV12 || bufferFormat == DRM_FORMAT_NV21 ||
+      bufferFormat == DRM_FORMAT_YVU420) {
+    bufferYCbCrData = bufferView.GetYCbCr();
+    if (!bufferYCbCrData) {
+      ALOGE("%s failed to get raw ycbcr from view.", __FUNCTION__);
+      return std::nullopt;
+    }
+  } else {
+    auto bufferDataOpt = bufferView.Get();
+    if (!bufferDataOpt) {
+      ALOGE("%s failed to lock gralloc buffer.", __FUNCTION__);
+      return std::nullopt;
+    }
+    bufferData = reinterpret_cast<uint8_t*>(*bufferDataOpt);
+
+    auto bufferStrideBytesOpt = buffer.GetMonoPlanarStrideBytes();
+    if (!bufferStrideBytesOpt) {
+      ALOGE("%s failed to get plane stride.", __FUNCTION__);
+      return std::nullopt;
+    }
+    bufferStrideBytes = *bufferStrideBytesOpt;
+  }
+
+  return BufferSpec(bufferData, bufferYCbCrData, bufferWidth, bufferHeight,
+                    bufferCrop.left, bufferCrop.top,
+                    bufferCrop.right - bufferCrop.left,
+                    bufferCrop.bottom - bufferCrop.top, bufferFormat,
+                    bufferStrideBytes, GetDrmFormatBytesPerPixel(bufferFormat));
+}
+
+}  // namespace
+
+HWC2::Error GuestComposer::init(const HotplugCallback& cb) {
+  DEBUG_LOG("%s", __FUNCTION__);
+
+  if (!mDrmPresenter.init(cb)) {
+    ALOGE("%s: failed to initialize DrmPresenter", __FUNCTION__);
+    return HWC2::Error::NoResources;
+  }
+
+  return HWC2::Error::None;
+}
+
+HWC2::Error GuestComposer::createDisplays(
+    Device* device, const AddDisplayToDeviceFunction& addDisplayToDeviceFn) {
+  DEBUG_LOG("%s", __FUNCTION__);
+
+  HWC2::Error error = HWC2::Error::None;
+
+  std::vector<DisplayConfig> displayConfigs;
+
+  error = getDisplayConfigsFromDeviceConfig(&displayConfigs);
+  if (error != HWC2::Error::None) {
+    ALOGE("%s failed to get display configs from device config", __FUNCTION__);
+    return error;
+  }
+
+  error = getDisplayConfigsFromSystemProp(&displayConfigs);
+  if (error != HWC2::Error::None) {
+    ALOGE("%s failed to get display configs from system prop", __FUNCTION__);
+    return error;
+  }
+  uint32_t id = 0;
+  for (const auto& displayConfig : displayConfigs) {
+    error = createDisplay(device, id, displayConfig.width, displayConfig.height,
+                          displayConfig.dpiX, displayConfig.dpiY,
+                          displayConfig.refreshRateHz, addDisplayToDeviceFn);
+    if (error != HWC2::Error::None) {
+      ALOGE("%s: failed to create display %d", __FUNCTION__, id);
+      return error;
+    }
+
+    ++id;
+  }
+
+  return HWC2::Error::None;
+}
+
+HWC2::Error GuestComposer::createDisplay(
+    Device* device, uint32_t id, uint32_t width, uint32_t height, uint32_t dpiX,
+    uint32_t dpiY, uint32_t refreshRateHz,
+    const AddDisplayToDeviceFunction& addDisplayToDeviceFn) {
+  auto display = std::make_unique<Display>(*device, this, id);
+  if (display == nullptr) {
+    ALOGE("%s failed to allocate display", __FUNCTION__);
+    return HWC2::Error::NoResources;
+  }
+
+  auto displayId = display->getId();
+
+  HWC2::Error error = display->init(width, height, dpiX, dpiY, refreshRateHz);
+  if (error != HWC2::Error::None) {
+    ALOGE("%s failed to initialize display:%" PRIu64, __FUNCTION__, displayId);
+    return error;
+  }
+
+  auto it = mDisplayInfos.find(displayId);
+  if (it != mDisplayInfos.end()) {
+    ALOGE("%s: display:%" PRIu64 " already created?", __FUNCTION__, displayId);
+  }
+
+  GuestComposerDisplayInfo& displayInfo = mDisplayInfos[displayId];
+
+  uint32_t bufferStride;
+  buffer_handle_t bufferHandle;
+
+  auto status = GraphicBufferAllocator::get().allocate(
+      width,                   //
+      height,                  //
+      PIXEL_FORMAT_RGBA_8888,  //
+      /*layerCount=*/1,        //
+      GraphicBuffer::USAGE_HW_COMPOSER | GraphicBuffer::USAGE_SW_READ_OFTEN |
+          GraphicBuffer::USAGE_SW_WRITE_OFTEN,  //
+      &bufferHandle,                            //
+      &bufferStride,                            //
+      "RanchuHwc");
+  if (status != OK) {
+    ALOGE("%s failed to allocate composition buffer for display:%" PRIu64,
+          __FUNCTION__, displayId);
+    return HWC2::Error::NoResources;
+  }
+
+  displayInfo.compositionResultBuffer = bufferHandle;
+
+  displayInfo.compositionResultDrmBuffer = std::make_unique<DrmBuffer>(
+      displayInfo.compositionResultBuffer, mDrmPresenter);
+
+  if (displayId == 0) {
+    int flushSyncFd = -1;
+
+    HWC2::Error flushError =
+        displayInfo.compositionResultDrmBuffer->flushToDisplay(displayId,
+                                                               &flushSyncFd);
+    if (flushError != HWC2::Error::None) {
+      ALOGW(
+          "%s: Initial display flush failed. HWComposer assuming that we are "
+          "running in QEMU without a display and disabling presenting.",
+          __FUNCTION__);
+      mPresentDisabled = true;
+    } else {
+      close(flushSyncFd);
+    }
+  }
+
+  error = addDisplayToDeviceFn(std::move(display));
+  if (error != HWC2::Error::None) {
+    ALOGE("%s failed to add display:%" PRIu64, __FUNCTION__, displayId);
+    return error;
+  }
+
+  return HWC2::Error::None;
+}
+
+HWC2::Error GuestComposer::onDisplayDestroy(Display* display) {
+  auto displayId = display->getId();
+
+  auto it = mDisplayInfos.find(displayId);
+  if (it == mDisplayInfos.end()) {
+    ALOGE("%s: display:%" PRIu64 " missing display buffers?", __FUNCTION__,
+          displayId);
+    return HWC2::Error::BadDisplay;
+  }
+
+  GuestComposerDisplayInfo& displayInfo = mDisplayInfos[displayId];
+
+  GraphicBufferAllocator::get().free(displayInfo.compositionResultBuffer);
+
+  mDisplayInfos.erase(it);
+
+  return HWC2::Error::None;
+}
+
+HWC2::Error GuestComposer::getDisplayConfigsFromDeviceConfig(
+    std::vector<GuestComposer::DisplayConfig>* configs) {
+  DEBUG_LOG("%s", __FUNCTION__);
+
+  const auto deviceConfig = cuttlefish::GetDeviceConfig();
+  for (const auto& deviceDisplayConfig : deviceConfig.display_config()) {
+    DisplayConfig displayConfig = {
+        .width = deviceDisplayConfig.width(),
+        .height = deviceDisplayConfig.height(),
+        .dpiX = deviceDisplayConfig.dpi(),
+        .dpiY = deviceDisplayConfig.dpi(),
+        .refreshRateHz = deviceDisplayConfig.refresh_rate_hz(),
+    };
+
+    configs->push_back(displayConfig);
+  }
+
+  return HWC2::Error::None;
+}
+
+HWC2::Error GuestComposer::getDisplayConfigsFromSystemProp(
+    std::vector<GuestComposer::DisplayConfig>* configs) {
+  DEBUG_LOG("%s", __FUNCTION__);
+
+  static constexpr const char kExternalDisplayProp[] =
+      "hwservicemanager.external.displays";
+
+  const auto propString = android::base::GetProperty(kExternalDisplayProp, "");
+  DEBUG_LOG("%s: prop value is: %s", __FUNCTION__, propString.c_str());
+
+  if (propString.empty()) {
+    return HWC2::Error::None;
+  }
+
+  const std::vector<std::string> propStringParts =
+      android::base::Split(propString, ",");
+  if (propStringParts.size() % 5 != 0) {
+    ALOGE("%s: Invalid syntax for system prop %s which is %s", __FUNCTION__,
+          kExternalDisplayProp, propString.c_str());
+    return HWC2::Error::BadParameter;
+  }
+
+  std::vector<int> propIntParts;
+  for (const std::string& propStringPart : propStringParts) {
+    int propIntPart;
+    if (!android::base::ParseInt(propStringPart, &propIntPart)) {
+      ALOGE("%s: Invalid syntax for system prop %s which is %s", __FUNCTION__,
+            kExternalDisplayProp, propString.c_str());
+      return HWC2::Error::BadParameter;
+    }
+    propIntParts.push_back(propIntPart);
+  }
+
+  while (!propIntParts.empty()) {
+    DisplayConfig display_config = {
+        .width = propIntParts[1],
+        .height = propIntParts[2],
+        .dpiX = propIntParts[3],
+        .dpiY = propIntParts[3],
+        .refreshRateHz = 160,
+    };
+
+    configs->push_back(display_config);
+
+    propIntParts.erase(propIntParts.begin(), propIntParts.begin() + 5);
+  }
+
+  return HWC2::Error::None;
+}
+
+HWC2::Error GuestComposer::validateDisplay(
+    Display* display, std::unordered_map<hwc2_layer_t, HWC2::Composition>*
+                          outLayerCompositionChanges) {
+  const auto displayId = display->getId();
+  DEBUG_LOG("%s display:%" PRIu64, __FUNCTION__, displayId);
+
+  const std::vector<Layer*>& layers = display->getOrderedLayers();
+
+  bool fallbackToClientComposition = false;
+  for (Layer* layer : layers) {
+    const auto layerId = layer->getId();
+    const auto layerCompositionType = layer->getCompositionType();
+    const auto layerCompositionTypeString = to_string(layerCompositionType);
+
+    if (layerCompositionType == HWC2::Composition::Invalid) {
+      ALOGE("%s display:%" PRIu64 " layer:%" PRIu64 " has Invalid composition",
+            __FUNCTION__, displayId, layerId);
+      continue;
+    }
+
+    if (layerCompositionType == HWC2::Composition::Client ||
+        layerCompositionType == HWC2::Composition::Cursor ||
+        layerCompositionType == HWC2::Composition::Sideband ||
+        layerCompositionType == HWC2::Composition::SolidColor) {
+      ALOGW("%s: display:%" PRIu64 " layer:%" PRIu64
+            " has composition type %s, falling back to client composition",
+            __FUNCTION__, displayId, layerId,
+            layerCompositionTypeString.c_str());
+      fallbackToClientComposition = true;
+      break;
+    }
+
+    if (!canComposeLayer(layer)) {
+      ALOGW("%s: display:%" PRIu64 " layer:%" PRIu64
+            " composition not supported, falling back to client composition",
+            __FUNCTION__, displayId, layerId);
+      fallbackToClientComposition = true;
+      break;
+    }
+  }
+
+  if (display->hasColorTransform()) {
+    fallbackToClientComposition = true;
+  }
+
+  if (fallbackToClientComposition) {
+    for (Layer* layer : layers) {
+      const auto layerId = layer->getId();
+      const auto layerCompositionType = layer->getCompositionType();
+
+      if (layerCompositionType == HWC2::Composition::Invalid) {
+        continue;
+      }
+      if (layerCompositionType != HWC2::Composition::Client) {
+        DEBUG_LOG("%s display:%" PRIu64 " layer:%" PRIu64
+                  "composition updated to Client",
+                  __FUNCTION__, displayId, layerId);
+        (*outLayerCompositionChanges)[layerId] = HWC2::Composition::Client;
+      }
+    }
+  }
+
+  // We can not draw below a Client (SurfaceFlinger) composed layer. Change all
+  // layers below a Client composed layer to also be Client composed.
+  if (layers.size() > 1) {
+    for (std::size_t layerIndex = layers.size() - 1; layerIndex > 0;
+         layerIndex--) {
+      auto layer = layers[layerIndex];
+      auto layerCompositionType = layer->getCompositionType();
+
+      if (layerCompositionType == HWC2::Composition::Client) {
+        for (std::size_t lowerLayerIndex = 0; lowerLayerIndex < layerIndex;
+             lowerLayerIndex++) {
+          auto lowerLayer = layers[lowerLayerIndex];
+          auto lowerLayerId = lowerLayer->getId();
+          auto lowerLayerCompositionType = lowerLayer->getCompositionType();
+
+          if (lowerLayerCompositionType != HWC2::Composition::Client) {
+            DEBUG_LOG("%s: display:%" PRIu64 " changing layer:%" PRIu64
+                      " to Client because"
+                      "hwcomposer can not draw below the Client composed "
+                      "layer:%" PRIu64,
+                      __FUNCTION__, displayId, lowerLayerId, layer->getId());
+
+            (*outLayerCompositionChanges)[lowerLayerId] =
+                HWC2::Composition::Client;
+          }
+        }
+      }
+    }
+  }
+
+  return HWC2::Error::None;
+}
+
+HWC2::Error GuestComposer::presentDisplay(Display* display,
+                                          int32_t* outRetireFence) {
+  const auto displayId = display->getId();
+  DEBUG_LOG("%s display:%" PRIu64, __FUNCTION__, displayId);
+
+  if (displayId != 0) {
+    // TODO(b/171305898): remove after multi-display fully supported.
+    return HWC2::Error::None;
+  }
+
+  if (mPresentDisabled) {
+    return HWC2::Error::None;
+  }
+
+  auto it = mDisplayInfos.find(displayId);
+  if (it == mDisplayInfos.end()) {
+    ALOGE("%s: display:%" PRIu64 " not found", __FUNCTION__, displayId);
+    return HWC2::Error::NoResources;
+  }
+
+  GuestComposerDisplayInfo& displayInfo = it->second;
+
+  if (displayInfo.compositionResultBuffer == nullptr) {
+    ALOGE("%s: display:%" PRIu64 " missing composition result buffer",
+          __FUNCTION__, displayId);
+    return HWC2::Error::NoResources;
+  }
+
+  std::optional<GrallocBuffer> compositionResultBufferOpt =
+      mGralloc.Import(displayInfo.compositionResultBuffer);
+  if (!compositionResultBufferOpt) {
+    ALOGE("%s: display:%" PRIu64 " failed to import buffer", __FUNCTION__,
+          displayId);
+    return HWC2::Error::NoResources;
+  }
+
+  std::optional<uint32_t> compositionResultBufferWidthOpt =
+      compositionResultBufferOpt->GetWidth();
+  if (!compositionResultBufferWidthOpt) {
+    ALOGE("%s: display:%" PRIu64 " failed to query buffer width", __FUNCTION__,
+          displayId);
+    return HWC2::Error::NoResources;
+  }
+
+  std::optional<uint32_t> compositionResultBufferHeightOpt =
+      compositionResultBufferOpt->GetHeight();
+  if (!compositionResultBufferHeightOpt) {
+    ALOGE("%s: display:%" PRIu64 " failed to query buffer height", __FUNCTION__,
+          displayId);
+    return HWC2::Error::NoResources;
+  }
+
+  std::optional<uint32_t> compositionResultBufferStrideOpt =
+      compositionResultBufferOpt->GetMonoPlanarStrideBytes();
+  if (!compositionResultBufferStrideOpt) {
+    ALOGE("%s: display:%" PRIu64 " failed to query buffer stride", __FUNCTION__,
+          displayId);
+    return HWC2::Error::NoResources;
+  }
+
+  std::optional<GrallocBufferView> compositionResultBufferViewOpt =
+      compositionResultBufferOpt->Lock();
+  if (!compositionResultBufferViewOpt) {
+    ALOGE("%s: display:%" PRIu64 " failed to get buffer view", __FUNCTION__,
+          displayId);
+    return HWC2::Error::NoResources;
+  }
+
+  const std::optional<void*> compositionResultBufferDataOpt =
+      compositionResultBufferViewOpt->Get();
+  if (!compositionResultBufferDataOpt) {
+    ALOGE("%s: display:%" PRIu64 " failed to get buffer data", __FUNCTION__,
+          displayId);
+    return HWC2::Error::NoResources;
+  }
+
+  uint32_t compositionResultBufferWidth = *compositionResultBufferWidthOpt;
+  uint32_t compositionResultBufferHeight = *compositionResultBufferHeightOpt;
+  uint32_t compositionResultBufferStride = *compositionResultBufferStrideOpt;
+  uint8_t* compositionResultBufferData =
+      reinterpret_cast<uint8_t*>(*compositionResultBufferDataOpt);
+
+  const std::vector<Layer*>& layers = display->getOrderedLayers();
+
+  const bool noOpComposition = layers.empty();
+  const bool allLayersClientComposed = std::all_of(
+      layers.begin(),  //
+      layers.end(),    //
+      [](const Layer* layer) {
+        return layer->getCompositionType() == HWC2::Composition::Client;
+      });
+
+  if (noOpComposition) {
+    ALOGW("%s: display:%" PRIu64 " empty composition", __FUNCTION__, displayId);
+  } else if (allLayersClientComposed) {
+    auto clientTargetBufferOpt =
+        mGralloc.Import(display->waitAndGetClientTargetBuffer());
+    if (!clientTargetBufferOpt) {
+      ALOGE("%s: failed to import client target buffer.", __FUNCTION__);
+      return HWC2::Error::NoResources;
+    }
+    GrallocBuffer& clientTargetBuffer = *clientTargetBufferOpt;
+
+    auto clientTargetBufferViewOpt = clientTargetBuffer.Lock();
+    if (!clientTargetBufferViewOpt) {
+      ALOGE("%s: failed to lock client target buffer.", __FUNCTION__);
+      return HWC2::Error::NoResources;
+    }
+    GrallocBufferView& clientTargetBufferView = *clientTargetBufferViewOpt;
+
+    auto clientTargetPlaneLayoutsOpt = clientTargetBuffer.GetPlaneLayouts();
+    if (!clientTargetPlaneLayoutsOpt) {
+      ALOGE("Failed to get client target buffer plane layouts.");
+      return HWC2::Error::NoResources;
+    }
+    auto& clientTargetPlaneLayouts = *clientTargetPlaneLayoutsOpt;
+
+    if (clientTargetPlaneLayouts.size() != 1) {
+      ALOGE("Unexpected number of plane layouts for client target buffer.");
+      return HWC2::Error::NoResources;
+    }
+
+    std::size_t clientTargetPlaneSize =
+        clientTargetPlaneLayouts[0].totalSizeInBytes;
+
+    auto clientTargetDataOpt = clientTargetBufferView.Get();
+    if (!clientTargetDataOpt) {
+      ALOGE("%s failed to lock gralloc buffer.", __FUNCTION__);
+      return HWC2::Error::NoResources;
+    }
+    auto* clientTargetData = reinterpret_cast<uint8_t*>(*clientTargetDataOpt);
+
+    std::memcpy(compositionResultBufferData, clientTargetData,
+                clientTargetPlaneSize);
+  } else {
+    for (Layer* layer : layers) {
+      const auto layerId = layer->getId();
+      const auto layerCompositionType = layer->getCompositionType();
+      if (layerCompositionType != HWC2::Composition::Device) {
+        continue;
+      }
+
+      HWC2::Error error = composeLayerInto(layer,                          //
+                                           compositionResultBufferData,    //
+                                           compositionResultBufferWidth,   //
+                                           compositionResultBufferHeight,  //
+                                           compositionResultBufferStride,  //
+                                           4);
+      if (error != HWC2::Error::None) {
+        ALOGE("%s: display:%" PRIu64 " failed to compose layer:%" PRIu64,
+              __FUNCTION__, displayId, layerId);
+        return error;
+      }
+    }
+  }
+
+  DEBUG_LOG("%s display:%" PRIu64 " flushing drm buffer", __FUNCTION__,
+            displayId);
+
+  HWC2::Error error = displayInfo.compositionResultDrmBuffer->flushToDisplay(
+      static_cast<int>(displayId), outRetireFence);
+  if (error != HWC2::Error::None) {
+    ALOGE("%s: display:%" PRIu64 " failed to flush drm buffer" PRIu64,
+          __FUNCTION__, displayId);
+  }
+  return error;
+}
+
+bool GuestComposer::canComposeLayer(Layer* layer) {
+  buffer_handle_t bufferHandle = layer->getBuffer().getBuffer();
+  if (bufferHandle == nullptr) {
+    ALOGW("%s received a layer with a null handle", __FUNCTION__);
+    return false;
+  }
+
+  auto bufferOpt = mGralloc.Import(bufferHandle);
+  if (!bufferOpt) {
+    ALOGE("Failed to import layer buffer.");
+    return false;
+  }
+  GrallocBuffer& buffer = *bufferOpt;
+
+  auto bufferFormatOpt = buffer.GetDrmFormat();
+  if (!bufferFormatOpt) {
+    ALOGE("Failed to get layer buffer format.");
+    return false;
+  }
+  uint32_t bufferFormat = *bufferFormatOpt;
+
+  if (!IsDrmFormatSupported(bufferFormat)) {
+    return false;
+  }
+
+  return true;
+}
+
+HWC2::Error GuestComposer::composeLayerInto(
+    Layer* srcLayer, std::uint8_t* dstBuffer, std::uint32_t dstBufferWidth,
+    std::uint32_t dstBufferHeight, std::uint32_t dstBufferStrideBytes,
+    std::uint32_t dstBufferBytesPerPixel) {
+  libyuv::RotationMode rotation =
+      GetRotationFromTransform(srcLayer->getTransform());
+
+  auto srcBufferOpt = mGralloc.Import(srcLayer->waitAndGetBuffer());
+  if (!srcBufferOpt) {
+    ALOGE("%s: failed to import layer buffer.", __FUNCTION__);
+    return HWC2::Error::NoResources;
+  }
+  GrallocBuffer& srcBuffer = *srcBufferOpt;
+
+  auto srcBufferViewOpt = srcBuffer.Lock();
+  if (!srcBufferViewOpt) {
+    ALOGE("%s: failed to lock import layer buffer.", __FUNCTION__);
+    return HWC2::Error::NoResources;
+  }
+  GrallocBufferView& srcBufferView = *srcBufferViewOpt;
+
+  hwc_rect_t srcLayerCrop = srcLayer->getSourceCropInt();
+  hwc_rect_t srcLayerDisplayFrame = srcLayer->getDisplayFrame();
+
+  auto srcLayerSpecOpt = GetBufferSpec(srcBuffer, srcBufferView, srcLayerCrop);
+  if (!srcLayerSpecOpt) {
+    return HWC2::Error::NoResources;
+  }
+  BufferSpec srcLayerSpec = *srcLayerSpecOpt;
+
+  // TODO(jemoreira): Remove the hardcoded fomat.
+  bool needsConversion = srcLayerSpec.drmFormat != DRM_FORMAT_XBGR8888;
+  bool needsScaling = LayerNeedsScaling(*srcLayer);
+  bool needsRotation = rotation != libyuv::kRotate0;
+  bool needsTranspose = needsRotation && rotation != libyuv::kRotate180;
+  bool needsVFlip = GetVFlipFromTransform(srcLayer->getTransform());
+  bool needsAttenuation = LayerNeedsAttenuation(*srcLayer);
+  bool needsBlending = LayerNeedsBlending(*srcLayer);
+  bool needsCopy = !(needsConversion || needsScaling || needsRotation ||
+                     needsVFlip || needsAttenuation || needsBlending);
+
+  BufferSpec dstLayerSpec(
+      dstBuffer,
+      /*buffer_ycbcr=*/std::nullopt, dstBufferWidth, dstBufferHeight,
+      srcLayerDisplayFrame.left, srcLayerDisplayFrame.top,
+      srcLayerDisplayFrame.right - srcLayerDisplayFrame.left,
+      srcLayerDisplayFrame.bottom - srcLayerDisplayFrame.top,
+      DRM_FORMAT_XBGR8888, dstBufferStrideBytes, dstBufferBytesPerPixel);
+
+  // Add the destination layer to the bottom of the buffer stack
+  std::vector<BufferSpec> dstBufferStack(1, dstLayerSpec);
+
+  // If more than operation is to be performed, a temporary buffer is needed for
+  // each additional operation
+
+  // N operations need N destination buffers, the destination layer (the
+  // framebuffer) is one of them, so only N-1 temporary buffers are needed.
+  // Vertical flip is not taken into account because it can be done together
+  // with any other operation.
+  int neededScratchBuffers = (needsConversion ? 1 : 0) +
+                             (needsScaling ? 1 : 0) + (needsRotation ? 1 : 0) +
+                             (needsAttenuation ? 1 : 0) +
+                             (needsBlending ? 1 : 0) + (needsCopy ? 1 : 0) - 1;
+
+  int mScratchBufferWidth =
+      srcLayerDisplayFrame.right - srcLayerDisplayFrame.left;
+  int mScratchBufferHeight =
+      srcLayerDisplayFrame.bottom - srcLayerDisplayFrame.top;
+  int mScratchBufferStrideBytes =
+      AlignToPower2(mScratchBufferWidth * dstBufferBytesPerPixel, 4);
+  int mScratchBufferSizeBytes =
+      mScratchBufferHeight * mScratchBufferStrideBytes;
+
+  for (int i = 0; i < neededScratchBuffers; i++) {
+    BufferSpec mScratchBufferspec(
+        getRotatingScratchBuffer(mScratchBufferSizeBytes, i),
+        mScratchBufferWidth, mScratchBufferHeight, mScratchBufferStrideBytes);
+    dstBufferStack.push_back(mScratchBufferspec);
+  }
+
+  // Conversion and scaling should always be the first operations, so that every
+  // other operation works on equally sized frames (guaranteed to fit in the
+  // scratch buffers).
+
+  // TODO(jemoreira): We are converting to ARGB as the first step under the
+  // assumption that scaling ARGB is faster than scaling I420 (the most common).
+  // This should be confirmed with testing.
+  if (needsConversion) {
+    BufferSpec& dstBufferSpec = dstBufferStack.back();
+    if (needsScaling || needsTranspose) {
+      // If a rotation or a scaling operation are needed the dimensions at the
+      // top of the buffer stack are wrong (wrong sizes for scaling, swapped
+      // width and height for 90 and 270 rotations).
+      // Make width and height match the crop sizes on the source
+      int srcWidth = srcLayerSpec.cropWidth;
+      int srcHeight = srcLayerSpec.cropHeight;
+      int dst_stride_bytes =
+          AlignToPower2(srcWidth * dstBufferBytesPerPixel, 4);
+      size_t needed_size = dst_stride_bytes * srcHeight;
+      dstBufferSpec.width = srcWidth;
+      dstBufferSpec.height = srcHeight;
+      // Adjust the stride accordingly
+      dstBufferSpec.strideBytes = dst_stride_bytes;
+      // Crop sizes also need to be adjusted
+      dstBufferSpec.cropWidth = srcWidth;
+      dstBufferSpec.cropHeight = srcHeight;
+      // cropX and y are fine at 0, format is already set to match destination
+
+      // In case of a scale, the source frame may be bigger than the default tmp
+      // buffer size
+      dstBufferSpec.buffer = getSpecialScratchBuffer(needed_size);
+    }
+
+    int retval = DoConversion(srcLayerSpec, dstBufferSpec, needsVFlip);
+    if (retval) {
+      ALOGE("Got error code %d from DoConversion function", retval);
+    }
+    needsVFlip = false;
+    srcLayerSpec = dstBufferSpec;
+    dstBufferStack.pop_back();
+  }
+
+  if (needsScaling) {
+    BufferSpec& dstBufferSpec = dstBufferStack.back();
+    if (needsTranspose) {
+      // If a rotation is needed, the temporary buffer has the correct size but
+      // needs to be transposed and have its stride updated accordingly. The
+      // crop sizes also needs to be transposed, but not the x and y since they
+      // are both zero in a temporary buffer (and it is a temporary buffer
+      // because a rotation will be performed next).
+      std::swap(dstBufferSpec.width, dstBufferSpec.height);
+      std::swap(dstBufferSpec.cropWidth, dstBufferSpec.cropHeight);
+      // TODO (jemoreira): Aligment (To align here may cause the needed size to
+      // be bigger than the buffer, so care should be taken)
+      dstBufferSpec.strideBytes = dstBufferSpec.width * dstBufferBytesPerPixel;
+    }
+    int retval = DoScaling(srcLayerSpec, dstBufferSpec, needsVFlip);
+    needsVFlip = false;
+    if (retval) {
+      ALOGE("Got error code %d from DoScaling function", retval);
+    }
+    srcLayerSpec = dstBufferSpec;
+    dstBufferStack.pop_back();
+  }
+
+  if (needsRotation) {
+    int retval =
+        DoRotation(srcLayerSpec, dstBufferStack.back(), rotation, needsVFlip);
+    needsVFlip = false;
+    if (retval) {
+      ALOGE("Got error code %d from DoTransform function", retval);
+    }
+    srcLayerSpec = dstBufferStack.back();
+    dstBufferStack.pop_back();
+  }
+
+  if (needsAttenuation) {
+    int retval = DoAttenuation(srcLayerSpec, dstBufferStack.back(), needsVFlip);
+    needsVFlip = false;
+    if (retval) {
+      ALOGE("Got error code %d from DoBlending function", retval);
+    }
+    srcLayerSpec = dstBufferStack.back();
+    dstBufferStack.pop_back();
+  }
+
+  if (needsCopy) {
+    int retval = DoCopy(srcLayerSpec, dstBufferStack.back(), needsVFlip);
+    needsVFlip = false;
+    if (retval) {
+      ALOGE("Got error code %d from DoBlending function", retval);
+    }
+    srcLayerSpec = dstBufferStack.back();
+    dstBufferStack.pop_back();
+  }
+
+  // Blending (if needed) should always be the last operation, so that it reads
+  // and writes in the destination layer and not some temporary buffer.
+  if (needsBlending) {
+    int retval = DoBlending(srcLayerSpec, dstBufferStack.back(), needsVFlip);
+    needsVFlip = false;
+    if (retval) {
+      ALOGE("Got error code %d from DoBlending function", retval);
+    }
+    // Don't need to assign destination to source in the last one
+    dstBufferStack.pop_back();
+  }
+
+  return HWC2::Error::None;
+}
+
+uint8_t* GuestComposer::getRotatingScratchBuffer(std::size_t neededSize,
+                                                 std::uint32_t order) {
+  static constexpr const int kNumScratchBufferPieces = 2;
+
+  std::size_t totalNeededSize = neededSize * kNumScratchBufferPieces;
+  if (mScratchBuffer.size() < totalNeededSize) {
+    mScratchBuffer.resize(totalNeededSize);
+  }
+
+  std::size_t bufferIndex = order % kNumScratchBufferPieces;
+  std::size_t bufferOffset = bufferIndex * neededSize;
+  return &mScratchBuffer[bufferOffset];
+}
+
+uint8_t* GuestComposer::getSpecialScratchBuffer(size_t neededSize) {
+  if (mSpecialScratchBuffer.size() < neededSize) {
+    mSpecialScratchBuffer.resize(neededSize);
+  }
+
+  return &mSpecialScratchBuffer[0];
+}
+
+}  // namespace android
diff --git a/system/hwc2/GuestComposer.h b/system/hwc2/GuestComposer.h
new file mode 100644
index 0000000..930cbee
--- /dev/null
+++ b/system/hwc2/GuestComposer.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HWC_GUESTCOMPOSER_H
+#define ANDROID_HWC_GUESTCOMPOSER_H
+
+#include "Common.h"
+#include "Composer.h"
+#include "DrmPresenter.h"
+#include "Gralloc.h"
+#include "Layer.h"
+
+namespace android {
+
+class GuestComposer : public Composer {
+ public:
+  GuestComposer() = default;
+
+  GuestComposer(const GuestComposer&) = delete;
+  GuestComposer& operator=(const GuestComposer&) = delete;
+
+  GuestComposer(GuestComposer&&) = delete;
+  GuestComposer& operator=(GuestComposer&&) = delete;
+
+  HWC2::Error init(const HotplugCallback& cb) override;
+
+  HWC2::Error createDisplays(
+      Device* device,
+      const AddDisplayToDeviceFunction& addDisplayToDeviceFn) override;
+
+  HWC2::Error createDisplay(
+      Device* device, uint32_t displayId, uint32_t width, uint32_t height,
+      uint32_t dpiX, uint32_t dpiY, uint32_t refreshRateHz,
+      const AddDisplayToDeviceFunction& addDisplayToDeviceFn) override;
+
+  HWC2::Error onDisplayDestroy(Display*) override;
+
+  HWC2::Error onDisplayClientTargetSet(Display*) override {
+    return HWC2::Error::None;
+  }
+
+  // Determines if this composer can compose the given layers on the given
+  // display and requests changes for layers that can't not be composed.
+  HWC2::Error validateDisplay(
+      Display* display, std::unordered_map<hwc2_layer_t, HWC2::Composition>*
+                            outLayerCompositionChanges) override;
+
+  // Performs the actual composition of layers and presents the composed result
+  // to the display.
+  HWC2::Error presentDisplay(Display* display,
+                             int32_t* outPresentFence) override;
+
+ private:
+  struct DisplayConfig {
+    int width;
+    int height;
+    int dpiX;
+    int dpiY;
+    int refreshRateHz;
+  };
+
+  HWC2::Error getDisplayConfigsFromDeviceConfig(
+      std::vector<DisplayConfig>* configs);
+
+  HWC2::Error getDisplayConfigsFromSystemProp(
+      std::vector<DisplayConfig>* configs);
+
+  // Returns true if the given layer's buffer has supported format.
+  bool canComposeLayer(Layer* layer);
+
+  // Composes the given layer into the given destination buffer.
+  HWC2::Error composeLayerInto(Layer* layer, std::uint8_t* dstBuffer,
+                               std::uint32_t dstBufferWidth,
+                               std::uint32_t dstBufferHeight,
+                               std::uint32_t dstBufferStrideBytes,
+                               std::uint32_t dstBufferBytesPerPixel);
+
+  struct GuestComposerDisplayInfo {
+    // Additional per display buffer for the composition result.
+    buffer_handle_t compositionResultBuffer = nullptr;
+
+    std::unique_ptr<DrmBuffer> compositionResultDrmBuffer;
+  };
+
+  std::unordered_map<hwc2_display_t, GuestComposerDisplayInfo> mDisplayInfos;
+
+  Gralloc mGralloc;
+
+  DrmPresenter mDrmPresenter;
+
+  // Cuttlefish on QEMU does not have a display. Disable presenting to avoid
+  // spamming logcat with DRM commit failures.
+  bool mPresentDisabled = false;
+
+  uint8_t* getRotatingScratchBuffer(std::size_t neededSize,
+                                    std::uint32_t order);
+  uint8_t* getSpecialScratchBuffer(std::size_t neededSize);
+
+  std::vector<uint8_t> mScratchBuffer;
+  std::vector<uint8_t> mSpecialScratchBuffer;
+};
+
+}  // namespace android
+
+#endif
diff --git a/system/hwc2/HostComposer.cpp b/system/hwc2/HostComposer.cpp
new file mode 100644
index 0000000..c55eccd
--- /dev/null
+++ b/system/hwc2/HostComposer.cpp
@@ -0,0 +1,858 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "HostComposer.h"
+
+#include <EGL/egl.h>
+#include <EGL/eglext.h>
+#include <android-base/parseint.h>
+#include <android-base/properties.h>
+#include <android-base/strings.h>
+#include <drm/virtgpu_drm.h>
+#include <poll.h>
+#include <sync/sync.h>
+#include <ui/GraphicBuffer.h>
+#include <ui/GraphicBufferAllocator.h>
+#include <ui/GraphicBufferMapper.h>
+
+#include "../egl/goldfish_sync.h"
+#include "Device.h"
+#include "Display.h"
+
+namespace android {
+namespace {
+
+static int getVsyncHzFromProperty() {
+  static constexpr const auto kVsyncProp = "ro.boot.qemu.vsync";
+
+  const auto vsyncProp = android::base::GetProperty(kVsyncProp, "");
+  DEBUG_LOG("%s: prop value is: %s", __FUNCTION__, vsyncProp.c_str());
+
+  uint64_t vsyncPeriod;
+  if (!android::base::ParseUint(vsyncProp, &vsyncPeriod)) {
+    ALOGE("%s: failed to parse vsync period '%s', returning default 60",
+          __FUNCTION__, vsyncProp.c_str());
+    return 60;
+  }
+
+  return static_cast<int>(vsyncPeriod);
+}
+
+static bool isMinigbmFromProperty() {
+  static constexpr const auto kGrallocProp = "ro.hardware.gralloc";
+
+  const auto grallocProp = android::base::GetProperty(kGrallocProp, "");
+  DEBUG_LOG("%s: prop value is: %s", __FUNCTION__, grallocProp.c_str());
+
+  if (grallocProp == "minigbm") {
+    ALOGD("%s: Using minigbm, in minigbm mode.\n", __FUNCTION__);
+    return true;
+  } else {
+    ALOGD("%s: Is not using minigbm, in goldfish mode.\n", __FUNCTION__);
+    return false;
+  }
+}
+
+#define DEFINE_AND_VALIDATE_HOST_CONNECTION                                   \
+  HostConnection* hostCon = createOrGetHostConnection();                      \
+  if (!hostCon) {                                                             \
+    ALOGE("%s: Failed to get host connection\n", __FUNCTION__);               \
+    return HWC2::Error::NoResources;                                          \
+  }                                                                           \
+  ExtendedRCEncoderContext* rcEnc = hostCon->rcEncoder();                     \
+  if (!rcEnc) {                                                               \
+    ALOGE("%s: Failed to get renderControl encoder context\n", __FUNCTION__); \
+    return HWC2::Error::NoResources;                                          \
+  }
+
+static std::unique_ptr<HostConnection> sHostCon;
+
+static HostConnection* createOrGetHostConnection() {
+  if (!sHostCon) {
+    sHostCon = HostConnection::createUnique();
+  }
+  return sHostCon.get();
+}
+
+typedef struct compose_layer {
+  uint32_t cbHandle;
+  hwc2_composition_t composeMode;
+  hwc_rect_t displayFrame;
+  hwc_frect_t crop;
+  int32_t blendMode;
+  float alpha;
+  hwc_color_t color;
+  hwc_transform_t transform;
+} ComposeLayer;
+
+typedef struct compose_device {
+  uint32_t version;
+  uint32_t targetHandle;
+  uint32_t numLayers;
+  struct compose_layer layer[0];
+} ComposeDevice;
+
+typedef struct compose_device_v2 {
+  uint32_t version;
+  uint32_t displayId;
+  uint32_t targetHandle;
+  uint32_t numLayers;
+  struct compose_layer layer[0];
+} ComposeDevice_v2;
+
+class ComposeMsg {
+ public:
+  ComposeMsg(uint32_t layerCnt = 0)
+      : mData(sizeof(ComposeDevice) + layerCnt * sizeof(ComposeLayer)) {
+    mComposeDevice = reinterpret_cast<ComposeDevice*>(mData.data());
+    mLayerCnt = layerCnt;
+  }
+
+  ComposeDevice* get() { return mComposeDevice; }
+
+  uint32_t getLayerCnt() { return mLayerCnt; }
+
+ private:
+  std::vector<uint8_t> mData;
+  uint32_t mLayerCnt;
+  ComposeDevice* mComposeDevice;
+};
+
+class ComposeMsg_v2 {
+ public:
+  ComposeMsg_v2(uint32_t layerCnt = 0)
+      : mData(sizeof(ComposeDevice_v2) + layerCnt * sizeof(ComposeLayer)) {
+    mComposeDevice = reinterpret_cast<ComposeDevice_v2*>(mData.data());
+    mLayerCnt = layerCnt;
+  }
+
+  ComposeDevice_v2* get() { return mComposeDevice; }
+
+  uint32_t getLayerCnt() { return mLayerCnt; }
+
+ private:
+  std::vector<uint8_t> mData;
+  uint32_t mLayerCnt;
+  ComposeDevice_v2* mComposeDevice;
+};
+
+const native_handle_t* AllocateDisplayColorBuffer(int width, int height) {
+  const uint32_t layerCount = 1;
+  const uint64_t graphicBufferId = 0;  // not used
+  buffer_handle_t h;
+  uint32_t stride;
+
+  if (GraphicBufferAllocator::get().allocate(
+          width, height, PIXEL_FORMAT_RGBA_8888, layerCount,
+          (GraphicBuffer::USAGE_HW_COMPOSER | GraphicBuffer::USAGE_HW_RENDER),
+          &h, &stride, graphicBufferId, "EmuHWC2") == OK) {
+    return static_cast<const native_handle_t*>(h);
+  } else {
+    return nullptr;
+  }
+}
+
+void FreeDisplayColorBuffer(const native_handle_t* h) {
+  GraphicBufferAllocator::get().free(h);
+}
+
+}  // namespace
+
+HWC2::Error HostComposer::init(const HotplugCallback& cb) {
+  mIsMinigbm = isMinigbmFromProperty();
+  if (mIsMinigbm) {
+    if (!mDrmPresenter.init(cb)) {
+      ALOGE("%s: failed to initialize DrmPresenter", __FUNCTION__);
+      return HWC2::Error::NoResources;
+    }
+  } else {
+    mSyncDeviceFd = goldfish_sync_open();
+  }
+
+  return HWC2::Error::None;
+}
+
+HWC2::Error HostComposer::createDisplays(
+    Device* device, const AddDisplayToDeviceFunction& addDisplayToDeviceFn) {
+  HWC2::Error error = HWC2::Error::None;
+
+  error = createPrimaryDisplay(device, addDisplayToDeviceFn);
+  if (error != HWC2::Error::None) {
+    ALOGE("%s failed to create primary display", __FUNCTION__);
+    return error;
+  }
+
+  error = createSecondaryDisplays(device, addDisplayToDeviceFn);
+  if (error != HWC2::Error::None) {
+    ALOGE("%s failed to create secondary displays", __FUNCTION__);
+    return error;
+  }
+
+  return HWC2::Error::None;
+}
+
+HWC2::Error HostComposer::createPrimaryDisplay(
+    Device* device, const AddDisplayToDeviceFunction& addDisplayToDeviceFn) {
+  HWC2::Error error = HWC2::Error::None;
+
+  DEFINE_AND_VALIDATE_HOST_CONNECTION
+  hostCon->lock();
+  int width = rcEnc->rcGetFBParam(rcEnc, FB_WIDTH);
+  int height = rcEnc->rcGetFBParam(rcEnc, FB_HEIGHT);
+  int dpiX = rcEnc->rcGetFBParam(rcEnc, FB_XDPI);
+  int dpiY = rcEnc->rcGetFBParam(rcEnc, FB_YDPI);
+  hostCon->unlock();
+
+  int refreshRateHz = getVsyncHzFromProperty();
+
+  auto display = std::make_unique<Display>(*device, this, 0);
+  if (display == nullptr) {
+    ALOGE("%s failed to allocate display", __FUNCTION__);
+    return HWC2::Error::NoResources;
+  }
+
+  auto displayId = display->getId();
+
+  error = display->init(width, height, dpiX, dpiY, refreshRateHz);
+  if (error != HWC2::Error::None) {
+    ALOGE("%s failed to initialize display:%" PRIu64, __FUNCTION__, displayId);
+    return error;
+  }
+
+  error = createHostComposerDisplayInfo(display.get(), /*hostDisplayId=*/0);
+  if (error != HWC2::Error::None) {
+    ALOGE("%s failed to initialize host info for display:%" PRIu64,
+          __FUNCTION__, displayId);
+    return error;
+  }
+
+  error = addDisplayToDeviceFn(std::move(display));
+  if (error != HWC2::Error::None) {
+    ALOGE("%s failed to add display:%" PRIu64, __FUNCTION__, displayId);
+    return error;
+  }
+
+  return HWC2::Error::None;
+}
+
+HWC2::Error HostComposer::createDisplay(
+    Device* device, uint32_t displayId, uint32_t width, uint32_t height,
+    uint32_t dpiX, uint32_t dpiY, uint32_t refreshRateHz,
+    const AddDisplayToDeviceFunction& addDisplayToDeviceFn) {
+  HWC2::Error error;
+  Display* display = device->getDisplay(displayId);
+  if (display) {
+    ALOGD("%s display %d already existed, then update", __func__, displayId);
+  }
+
+  DEFINE_AND_VALIDATE_HOST_CONNECTION
+  hostCon->lock();
+  if (rcEnc->rcCreateDisplayById(rcEnc, displayId)) {
+    ALOGE("%s host failed to create display %" PRIu32, __func__, displayId);
+    hostCon->unlock();
+    return HWC2::Error::NoResources;
+  }
+  if (rcEnc->rcSetDisplayPoseDpi(rcEnc, displayId, -1, -1, width, height, dpiX/1000)) {
+    ALOGE("%s host failed to set display %" PRIu32, __func__, displayId);
+    hostCon->unlock();
+    return HWC2::Error::NoResources;
+  }
+  hostCon->unlock();
+
+  std::optional<std::vector<uint8_t>> edid;
+  if (mIsMinigbm) {
+    edid = mDrmPresenter.getEdid(displayId);
+  }
+  if (!display) {
+    auto newDisplay = std::make_unique<Display>(*device, this, displayId);
+    if (newDisplay == nullptr) {
+      ALOGE("%s failed to allocate display", __FUNCTION__);
+      return HWC2::Error::NoResources;
+    }
+
+
+    error = newDisplay->init(width, height, dpiX, dpiY, refreshRateHz, edid);
+    if (error != HWC2::Error::None) {
+      ALOGE("%s failed to initialize display:%" PRIu32, __FUNCTION__,
+            displayId);
+      return error;
+    }
+
+    error =
+        createHostComposerDisplayInfo(newDisplay.get(), displayId);
+    if (error != HWC2::Error::None) {
+      ALOGE("%s failed to initialize host info for display:%" PRIu32,
+            __FUNCTION__, displayId);
+      return error;
+    }
+
+    error = addDisplayToDeviceFn(std::move(newDisplay));
+    if (error != HWC2::Error::None) {
+      ALOGE("%s failed to add display:%" PRIu32, __FUNCTION__, displayId);
+      return error;
+    }
+  } else {
+    display->lock();
+    // update display parameters
+    error = display->updateParameters(width, height, dpiX, dpiY,
+                                      refreshRateHz, edid);
+    if (error != HWC2::Error::None) {
+      ALOGE("%s failed to update display:%" PRIu32, __FUNCTION__, displayId);
+      display->unlock();
+      return error;
+    }
+
+    error = createHostComposerDisplayInfo(display, displayId);
+    if (error != HWC2::Error::None) {
+      ALOGE("%s failed to initialize host info for display:%" PRIu32,
+            __FUNCTION__, displayId);
+      display->unlock();
+      return error;
+    }
+    display->unlock();
+  }
+
+  return HWC2::Error::None;
+}
+
+HWC2::Error HostComposer::createSecondaryDisplays(
+    Device* device, const AddDisplayToDeviceFunction& addDisplayToDeviceFn) {
+  HWC2::Error error = HWC2::Error::None;
+
+  static constexpr const char kExternalDisplayProp[] =
+      "hwservicemanager.external.displays";
+
+  const auto propString = android::base::GetProperty(kExternalDisplayProp, "");
+  DEBUG_LOG("%s: prop value is: %s", __FUNCTION__, propString.c_str());
+
+  if (propString.empty()) {
+    return HWC2::Error::None;
+  }
+
+  const std::vector<std::string> propStringParts =
+      android::base::Split(propString, ",");
+  if (propStringParts.size() % 5 != 0) {
+    ALOGE("%s: Invalid syntax for system prop %s which is %s", __FUNCTION__,
+          kExternalDisplayProp, propString.c_str());
+    return HWC2::Error::BadParameter;
+  }
+
+  std::vector<int> propIntParts;
+  for (const std::string& propStringPart : propStringParts) {
+    uint64_t propUintPart;
+    if (!android::base::ParseUint(propStringPart, &propUintPart)) {
+      ALOGE("%s: Invalid syntax for system prop %s which is %s", __FUNCTION__,
+            kExternalDisplayProp, propString.c_str());
+      return HWC2::Error::BadParameter;
+    }
+    propIntParts.push_back(static_cast<int>(propUintPart));
+  }
+
+  static constexpr const uint32_t kHostDisplayIdStart = 6;
+
+  uint32_t secondaryDisplayIndex = 1;
+  while (!propIntParts.empty()) {
+    int width = propIntParts[1];
+    int height = propIntParts[2];
+    int dpiX = propIntParts[3];
+    int dpiY = propIntParts[3];
+    int refreshRateHz = 160;
+
+    propIntParts.erase(propIntParts.begin(), propIntParts.begin() + 5);
+
+    uint32_t expectedHostDisplayId =
+        kHostDisplayIdStart + secondaryDisplayIndex - 1;
+    uint32_t actualHostDisplayId = 0;
+
+    DEFINE_AND_VALIDATE_HOST_CONNECTION
+    hostCon->lock();
+    rcEnc->rcDestroyDisplay(rcEnc, expectedHostDisplayId);
+    rcEnc->rcCreateDisplay(rcEnc, &actualHostDisplayId);
+    rcEnc->rcSetDisplayPose(rcEnc, actualHostDisplayId, -1, -1, width, height);
+    hostCon->unlock();
+
+    if (actualHostDisplayId != expectedHostDisplayId) {
+      ALOGE(
+          "Something wrong with host displayId allocation, expected %d "
+          "but received %d",
+          expectedHostDisplayId, actualHostDisplayId);
+    }
+
+    auto display =
+        std::make_unique<Display>(*device, this, secondaryDisplayIndex++);
+    if (display == nullptr) {
+      ALOGE("%s failed to allocate display", __FUNCTION__);
+      return HWC2::Error::NoResources;
+    }
+
+    auto displayId = display->getId();
+
+    error = display->init(width, height, dpiX, dpiY, refreshRateHz);
+    if (error != HWC2::Error::None) {
+      ALOGE("%s failed to initialize display:%" PRIu64, __FUNCTION__,
+            displayId);
+      return error;
+    }
+
+    error = createHostComposerDisplayInfo(display.get(), actualHostDisplayId);
+    if (error != HWC2::Error::None) {
+      ALOGE("%s failed to initialize host info for display:%" PRIu64,
+            __FUNCTION__, displayId);
+      return error;
+    }
+
+    error = addDisplayToDeviceFn(std::move(display));
+    if (error != HWC2::Error::None) {
+      ALOGE("%s failed to add display:%" PRIu64, __FUNCTION__, displayId);
+      return error;
+    }
+  }
+
+  return HWC2::Error::None;
+}
+
+HWC2::Error HostComposer::createHostComposerDisplayInfo(
+    Display* display, uint32_t hostDisplayId) {
+  HWC2::Error error = HWC2::Error::None;
+
+  hwc2_display_t displayId = display->getId();
+  hwc2_config_t displayConfigId;
+  int32_t displayWidth;
+  int32_t displayHeight;
+
+  error = display->getActiveConfig(&displayConfigId);
+  if (error != HWC2::Error::None) {
+    ALOGE("%s: display:%" PRIu64 " has no active config", __FUNCTION__,
+          displayId);
+    return error;
+  }
+
+  error = display->getDisplayAttributeEnum(
+      displayConfigId, HWC2::Attribute::Width, &displayWidth);
+  if (error != HWC2::Error::None) {
+    ALOGE("%s: display:%" PRIu64 " failed to get width", __FUNCTION__,
+          displayId);
+    return error;
+  }
+
+  error = display->getDisplayAttributeEnum(
+      displayConfigId, HWC2::Attribute::Height, &displayHeight);
+  if (error != HWC2::Error::None) {
+    ALOGE("%s: display:%" PRIu64 " failed to get height", __FUNCTION__,
+          displayId);
+    return error;
+  }
+
+  auto it = mDisplayInfos.find(displayId);
+  if (it != mDisplayInfos.end()) {
+    ALOGE("%s: display:%" PRIu64 " already created?", __FUNCTION__, displayId);
+  }
+
+  HostComposerDisplayInfo& displayInfo = mDisplayInfos[displayId];
+
+  displayInfo.hostDisplayId = hostDisplayId;
+
+  displayInfo.compositionResultBuffer =
+      AllocateDisplayColorBuffer(displayWidth, displayHeight);
+  if (displayInfo.compositionResultBuffer == nullptr) {
+    ALOGE("%s: display:%" PRIu64 " failed to create target buffer",
+          __FUNCTION__, displayId);
+    return HWC2::Error::NoResources;
+  }
+
+  if (mIsMinigbm) {
+    displayInfo.compositionResultDrmBuffer.reset(
+        new DrmBuffer(displayInfo.compositionResultBuffer, mDrmPresenter));
+
+    uint32_t vsyncPeriod = 1000 * 1000 * 1000 / mDrmPresenter.refreshRate();
+    error = display->setVsyncPeriod(vsyncPeriod);
+    if (error != HWC2::Error::None) {
+      ALOGE("%s: display:%" PRIu64 " failed to set vsync height", __FUNCTION__,
+            displayId);
+      return error;
+    }
+  }
+
+  return HWC2::Error::None;
+}
+
+HWC2::Error HostComposer::onDisplayDestroy(Display* display) {
+  hwc2_display_t displayId = display->getId();
+
+  auto it = mDisplayInfos.find(displayId);
+  if (it == mDisplayInfos.end()) {
+    ALOGE("%s: display:%" PRIu64 " missing display buffers?", __FUNCTION__,
+          displayId);
+    return HWC2::Error::BadDisplay;
+  }
+
+  HostComposerDisplayInfo& displayInfo = mDisplayInfos[displayId];
+
+  if (displayId != 0) {
+    DEFINE_AND_VALIDATE_HOST_CONNECTION
+    hostCon->lock();
+    rcEnc->rcDestroyDisplay(rcEnc, displayInfo.hostDisplayId);
+    hostCon->unlock();
+  }
+
+  FreeDisplayColorBuffer(displayInfo.compositionResultBuffer);
+
+  mDisplayInfos.erase(it);
+
+  return HWC2::Error::None;
+}
+
+HWC2::Error HostComposer::onDisplayClientTargetSet(Display* display) {
+  hwc2_display_t displayId = display->getId();
+
+  auto it = mDisplayInfos.find(displayId);
+  if (it == mDisplayInfos.end()) {
+    ALOGE("%s: display:%" PRIu64 " missing display buffers?", __FUNCTION__,
+          displayId);
+    return HWC2::Error::BadDisplay;
+  }
+
+  HostComposerDisplayInfo& displayInfo = mDisplayInfos[displayId];
+
+  if (mIsMinigbm) {
+    FencedBuffer& clientTargetFencedBuffer = display->getClientTarget();
+
+    displayInfo.clientTargetDrmBuffer.reset(
+        new DrmBuffer(clientTargetFencedBuffer.getBuffer(), mDrmPresenter));
+  }
+
+  return HWC2::Error::None;
+}
+
+HWC2::Error HostComposer::validateDisplay(
+    Display* display, std::unordered_map<hwc2_layer_t, HWC2::Composition>*
+                          layerCompositionChanges) {
+  DEFINE_AND_VALIDATE_HOST_CONNECTION
+  hostCon->lock();
+  bool hostCompositionV1 = rcEnc->hasHostCompositionV1();
+  bool hostCompositionV2 = rcEnc->hasHostCompositionV2();
+  hostCon->unlock();
+
+  const std::vector<Layer*> layers = display->getOrderedLayers();
+
+  if (hostCompositionV1 || hostCompositionV2) {
+    // Support Device and SolidColor, otherwise, fallback all layers to Client.
+    bool fallBack = false;
+    // TODO: use local var compositiontype, avoid call getCompositionType() many
+    // times
+    for (auto& layer : layers) {
+      if (layer->getCompositionType() == HWC2::Composition::Invalid) {
+        // Log error for unused layers, layer leak?
+        ALOGE("%s layer %u CompositionType(%d) not set", __FUNCTION__,
+              (uint32_t)layer->getId(), layer->getCompositionType());
+        continue;
+      }
+      if (layer->getCompositionType() == HWC2::Composition::Client ||
+          layer->getCompositionType() == HWC2::Composition::Cursor ||
+          layer->getCompositionType() == HWC2::Composition::Sideband) {
+        ALOGW("%s: layer %u CompositionType %d, fallback", __FUNCTION__,
+              (uint32_t)layer->getId(), layer->getCompositionType());
+        fallBack = true;
+        break;
+      }
+    }
+
+    if (display->hasColorTransform()) {
+      fallBack = true;
+    }
+
+    if (fallBack) {
+      for (auto& layer : layers) {
+        if (layer->getCompositionType() == HWC2::Composition::Invalid) {
+          continue;
+        }
+        if (layer->getCompositionType() != HWC2::Composition::Client) {
+          (*layerCompositionChanges)[layer->getId()] =
+              HWC2::Composition::Client;
+        }
+      }
+    }
+  } else {
+    for (auto& layer : layers) {
+      if (layer->getCompositionType() != HWC2::Composition::Client) {
+        (*layerCompositionChanges)[layer->getId()] = HWC2::Composition::Client;
+      }
+    }
+  }
+
+  return HWC2::Error::None;
+}
+
+HWC2::Error HostComposer::presentDisplay(Display* display,
+                                         int32_t* outRetireFence) {
+  auto it = mDisplayInfos.find(display->getId());
+  if (it == mDisplayInfos.end()) {
+    ALOGE("%s: failed to find display buffers for display:%" PRIu64,
+          __FUNCTION__, display->getId());
+    return HWC2::Error::BadDisplay;
+  }
+
+  HostComposerDisplayInfo& displayInfo = it->second;
+
+  DEFINE_AND_VALIDATE_HOST_CONNECTION
+  hostCon->lock();
+  bool hostCompositionV1 = rcEnc->hasHostCompositionV1();
+  bool hostCompositionV2 = rcEnc->hasHostCompositionV2();
+  hostCon->unlock();
+
+  // Ff we supports v2, then discard v1
+  if (hostCompositionV2) {
+    hostCompositionV1 = false;
+  }
+
+  const std::vector<Layer*> layers = display->getOrderedLayers();
+  if (hostCompositionV2 || hostCompositionV1) {
+    uint32_t numLayer = 0;
+    for (auto layer : layers) {
+      if (layer->getCompositionType() == HWC2::Composition::Device ||
+          layer->getCompositionType() == HWC2::Composition::SolidColor) {
+        numLayer++;
+      }
+    }
+
+    DEBUG_LOG("%s: presenting display:%" PRIu64 " with %d layers", __FUNCTION__,
+              display->getId(), static_cast<int>(layers.size()));
+
+    display->clearReleaseFencesAndIdsLocked();
+
+    if (numLayer == 0) {
+      ALOGW(
+          "%s display has no layers to compose, flushing client target buffer.",
+          __FUNCTION__);
+
+      FencedBuffer& displayClientTarget = display->getClientTarget();
+      if (displayClientTarget.getBuffer() != nullptr) {
+        if (mIsMinigbm) {
+          int retireFence;
+          displayInfo.clientTargetDrmBuffer->flushToDisplay(display->getId(),
+                                                            &retireFence);
+          *outRetireFence = dup(retireFence);
+          close(retireFence);
+        } else {
+          post(hostCon, rcEnc, displayClientTarget.getBuffer());
+          *outRetireFence = displayClientTarget.getFence();
+        }
+      }
+      return HWC2::Error::None;
+    }
+
+    std::unique_ptr<ComposeMsg> composeMsg;
+    std::unique_ptr<ComposeMsg_v2> composeMsgV2;
+
+    if (hostCompositionV1) {
+      composeMsg.reset(new ComposeMsg(numLayer));
+    } else {
+      composeMsgV2.reset(new ComposeMsg_v2(numLayer));
+    }
+
+    // Handle the composition
+    ComposeDevice* p;
+    ComposeDevice_v2* p2;
+    ComposeLayer* l;
+
+    if (hostCompositionV1) {
+      p = composeMsg->get();
+      l = p->layer;
+    } else {
+      p2 = composeMsgV2->get();
+      l = p2->layer;
+    }
+
+    int releaseLayersCount = 0;
+    for (auto layer : layers) {
+      // TODO: use local var composisitonType to store getCompositionType()
+      if (layer->getCompositionType() != HWC2::Composition::Device &&
+          layer->getCompositionType() != HWC2::Composition::SolidColor) {
+        ALOGE("%s: Unsupported composition types %d layer %u", __FUNCTION__,
+              layer->getCompositionType(), (uint32_t)layer->getId());
+        continue;
+      }
+      // send layer composition command to host
+      if (layer->getCompositionType() == HWC2::Composition::Device) {
+        display->addReleaseLayerLocked(layer->getId());
+        releaseLayersCount++;
+
+        int fence = layer->getBuffer().getFence();
+        if (fence != -1) {
+          int err = sync_wait(fence, 3000);
+          if (err < 0 && errno == ETIME) {
+            ALOGE("%s waited on fence %d for 3000 ms", __FUNCTION__, fence);
+          }
+          close(fence);
+        } else {
+          ALOGV("%s: acquire fence not set for layer %u", __FUNCTION__,
+                (uint32_t)layer->getId());
+        }
+        const native_handle_t* cb = layer->getBuffer().getBuffer();
+        if (cb != nullptr) {
+          l->cbHandle = hostCon->grallocHelper()->getHostHandle(cb);
+        } else {
+          ALOGE("%s null buffer for layer %d", __FUNCTION__,
+                (uint32_t)layer->getId());
+        }
+      } else {
+        // solidcolor has no buffer
+        l->cbHandle = 0;
+      }
+      l->composeMode = (hwc2_composition_t)layer->getCompositionType();
+      l->displayFrame = layer->getDisplayFrame();
+      l->crop = layer->getSourceCrop();
+      l->blendMode = static_cast<int32_t>(layer->getBlendMode());
+      l->alpha = layer->getPlaneAlpha();
+      l->color = layer->getColor();
+      l->transform = layer->getTransform();
+      ALOGV(
+          "   cb %d blendmode %d alpha %f %d %d %d %d z %d"
+          " composeMode %d, transform %d",
+          l->cbHandle, l->blendMode, l->alpha, l->displayFrame.left,
+          l->displayFrame.top, l->displayFrame.right, l->displayFrame.bottom,
+          layer->getZ(), l->composeMode, l->transform);
+      l++;
+    }
+    if (hostCompositionV1) {
+      p->version = 1;
+      p->targetHandle = hostCon->grallocHelper()->getHostHandle(
+          displayInfo.compositionResultBuffer);
+      p->numLayers = numLayer;
+    } else {
+      p2->version = 2;
+      p2->displayId = displayInfo.hostDisplayId;
+      p2->targetHandle = hostCon->grallocHelper()->getHostHandle(
+          displayInfo.compositionResultBuffer);
+      p2->numLayers = numLayer;
+    }
+
+    hostCon->lock();
+    if (rcEnc->hasAsyncFrameCommands()) {
+      if (mIsMinigbm) {
+        if (hostCompositionV1) {
+          rcEnc->rcComposeAsyncWithoutPost(
+              rcEnc, sizeof(ComposeDevice) + numLayer * sizeof(ComposeLayer),
+              (void*)p);
+        } else {
+          rcEnc->rcComposeAsyncWithoutPost(
+              rcEnc, sizeof(ComposeDevice_v2) + numLayer * sizeof(ComposeLayer),
+              (void*)p2);
+        }
+      } else {
+        if (hostCompositionV1) {
+          rcEnc->rcComposeAsync(
+              rcEnc, sizeof(ComposeDevice) + numLayer * sizeof(ComposeLayer),
+              (void*)p);
+        } else {
+          rcEnc->rcComposeAsync(
+              rcEnc, sizeof(ComposeDevice_v2) + numLayer * sizeof(ComposeLayer),
+              (void*)p2);
+        }
+      }
+    } else {
+      if (mIsMinigbm) {
+        if (hostCompositionV1) {
+          rcEnc->rcComposeWithoutPost(
+              rcEnc, sizeof(ComposeDevice) + numLayer * sizeof(ComposeLayer),
+              (void*)p);
+        } else {
+          rcEnc->rcComposeWithoutPost(
+              rcEnc, sizeof(ComposeDevice_v2) + numLayer * sizeof(ComposeLayer),
+              (void*)p2);
+        }
+      } else {
+        if (hostCompositionV1) {
+          rcEnc->rcCompose(
+              rcEnc, sizeof(ComposeDevice) + numLayer * sizeof(ComposeLayer),
+              (void*)p);
+        } else {
+          rcEnc->rcCompose(
+              rcEnc, sizeof(ComposeDevice_v2) + numLayer * sizeof(ComposeLayer),
+              (void*)p2);
+        }
+      }
+    }
+
+    hostCon->unlock();
+
+    // Send a retire fence and use it as the release fence for all layers,
+    // since media expects it
+    EGLint attribs[] = {EGL_SYNC_NATIVE_FENCE_ANDROID,
+                        EGL_NO_NATIVE_FENCE_FD_ANDROID};
+
+    uint64_t sync_handle, thread_handle;
+    int retire_fd;
+
+    hostCon->lock();
+    rcEnc->rcCreateSyncKHR(rcEnc, EGL_SYNC_NATIVE_FENCE_ANDROID, attribs,
+                           2 * sizeof(EGLint), true /* destroy when signaled */,
+                           &sync_handle, &thread_handle);
+    hostCon->unlock();
+
+    if (mIsMinigbm) {
+      displayInfo.compositionResultDrmBuffer->flushToDisplay(display->getId(),
+                                                             &retire_fd);
+    } else {
+      goldfish_sync_queue_work(mSyncDeviceFd, sync_handle, thread_handle,
+                               &retire_fd);
+    }
+
+    for (size_t i = 0; i < releaseLayersCount; ++i) {
+      display->addReleaseFenceLocked(dup(retire_fd));
+    }
+
+    *outRetireFence = dup(retire_fd);
+    close(retire_fd);
+    hostCon->lock();
+    if (rcEnc->hasAsyncFrameCommands()) {
+      rcEnc->rcDestroySyncKHRAsync(rcEnc, sync_handle);
+    } else {
+      rcEnc->rcDestroySyncKHR(rcEnc, sync_handle);
+    }
+    hostCon->unlock();
+
+  } else {
+    // we set all layers Composition::Client, so do nothing.
+    if (mIsMinigbm) {
+      int retireFence;
+      displayInfo.clientTargetDrmBuffer->flushToDisplay(display->getId(),
+                                                        &retireFence);
+      *outRetireFence = dup(retireFence);
+      close(retireFence);
+    } else {
+      FencedBuffer& displayClientTarget = display->getClientTarget();
+      post(hostCon, rcEnc, displayClientTarget.getBuffer());
+      *outRetireFence = displayClientTarget.getFence();
+    }
+    ALOGV("%s fallback to post, returns outRetireFence %d", __FUNCTION__,
+          *outRetireFence);
+  }
+
+  return HWC2::Error::None;
+}
+
+void HostComposer::post(HostConnection* hostCon,
+                        ExtendedRCEncoderContext* rcEnc, buffer_handle_t h) {
+  assert(cb && "native_handle_t::from(h) failed");
+
+  hostCon->lock();
+  rcEnc->rcFBPost(rcEnc, hostCon->grallocHelper()->getHostHandle(h));
+  hostCon->flush();
+  hostCon->unlock();
+}
+
+}  // namespace android
diff --git a/system/hwc2/HostComposer.h b/system/hwc2/HostComposer.h
new file mode 100644
index 0000000..fe4a49f
--- /dev/null
+++ b/system/hwc2/HostComposer.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HWC_HOSTCOMPOSER_H
+#define ANDROID_HWC_HOSTCOMPOSER_H
+
+#include "Common.h"
+#include "Composer.h"
+#include "DrmPresenter.h"
+#include "HostConnection.h"
+
+namespace android {
+
+class HostComposer : public Composer {
+ public:
+  HostComposer() = default;
+
+  HostComposer(const HostComposer&) = delete;
+  HostComposer& operator=(const HostComposer&) = delete;
+
+  HostComposer(HostComposer&&) = delete;
+  HostComposer& operator=(HostComposer&&) = delete;
+
+  HWC2::Error init(const HotplugCallback& cb) override;
+
+  HWC2::Error createDisplays(
+      Device* device,
+      const AddDisplayToDeviceFunction& addDisplayToDeviceFn) override;
+
+  HWC2::Error createDisplay(
+      Device* device, uint32_t displayId, uint32_t width, uint32_t height,
+      uint32_t dpiX, uint32_t dpiY, uint32_t refreshRateHz,
+      const AddDisplayToDeviceFunction& addDisplayToDeviceFn) override;
+
+  HWC2::Error onDisplayDestroy(Display* display) override;
+
+  HWC2::Error onDisplayClientTargetSet(Display* display) override;
+
+  // Determines if this composer can compose the given layers on the given
+  // display and requests changes for layers that can't not be composed.
+  HWC2::Error validateDisplay(
+      Display* display, std::unordered_map<hwc2_layer_t, HWC2::Composition>*
+                            outLayerCompositionChanges) override;
+
+  // Performs the actual composition of layers and presents the composed result
+  // to the display.
+  HWC2::Error presentDisplay(Display* display,
+                             int32_t* outPresentFence) override;
+
+ private:
+  HWC2::Error createPrimaryDisplay(
+      Device* device, const AddDisplayToDeviceFunction& addDisplayToDeviceFn);
+
+  HWC2::Error createSecondaryDisplays(
+      Device* device, const AddDisplayToDeviceFunction& addDisplayToDeviceFn);
+
+  HWC2::Error createHostComposerDisplayInfo(Display* display,
+                                            uint32_t hostDisplayId);
+
+  void post(HostConnection* hostCon, ExtendedRCEncoderContext* rcEnc,
+            buffer_handle_t h);
+
+  bool mIsMinigbm = false;
+
+  int mSyncDeviceFd = -1;
+
+  struct HostComposerDisplayInfo {
+    uint32_t hostDisplayId = 0;
+
+    // Additional per display buffer for the composition result.
+    const native_handle_t* compositionResultBuffer = nullptr;
+
+    // Drm info for the additional composition result buffer.
+    std::unique_ptr<DrmBuffer> compositionResultDrmBuffer;
+
+    // Drm info for the displays client target buffer.
+    std::unique_ptr<DrmBuffer> clientTargetDrmBuffer;
+  };
+
+  std::unordered_map<hwc2_display_t, HostComposerDisplayInfo> mDisplayInfos;
+
+  DrmPresenter mDrmPresenter;
+};
+
+}  // namespace android
+
+#endif
diff --git a/system/hwc2/Layer.cpp b/system/hwc2/Layer.cpp
new file mode 100644
index 0000000..6cdfd33
--- /dev/null
+++ b/system/hwc2/Layer.cpp
@@ -0,0 +1,257 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Layer.h"
+
+#include <sync/sync.h>
+
+#include <atomic>
+
+namespace android {
+namespace {
+
+std::atomic<hwc2_layer_t> sNextId{1};
+
+}  // namespace
+
+Layer::Layer() : mId(sNextId++) {}
+
+HWC2::Error Layer::setBuffer(buffer_handle_t buffer, int32_t fence) {
+  DEBUG_LOG("%s: layer:%" PRIu64 " buffer:%p fence:%" PRIu32, __FUNCTION__, mId,
+            buffer, fence);
+  mBuffer.setBuffer(buffer);
+  mBuffer.setFence(fence);
+  return HWC2::Error::None;
+}
+
+buffer_handle_t Layer::waitAndGetBuffer() {
+  DEBUG_LOG("%s layer:%" PRIu64, __FUNCTION__, mId);
+
+  int fence = mBuffer.getFence();
+  if (fence != -1) {
+    int err = sync_wait(fence, 3000);
+    if (err < 0 && errno == ETIME) {
+      ALOGE("%s waited on fence %" PRId32 " for 3000 ms", __FUNCTION__, fence);
+    }
+    close(fence);
+  }
+
+  return mBuffer.getBuffer();
+}
+
+HWC2::Error Layer::setCursorPosition(int32_t /*x*/, int32_t /*y*/) {
+  DEBUG_LOG("%s layer:%" PRIu64, __FUNCTION__, mId);
+
+  if (mCompositionType != HWC2::Composition::Cursor) {
+    ALOGE("%s: CompositionType not Cursor type", __FUNCTION__);
+    return HWC2::Error::BadLayer;
+  }
+
+  return HWC2::Error::None;
+}
+
+HWC2::Error Layer::setSurfaceDamage(hwc_region_t /*damage*/) {
+  DEBUG_LOG("%s layer:%" PRIu64, __FUNCTION__, mId);
+
+  return HWC2::Error::None;
+}
+
+HWC2::Error Layer::setBlendMode(int32_t m) {
+  const auto blendMode = static_cast<HWC2::BlendMode>(m);
+  const auto blendModeString = to_string(blendMode);
+  DEBUG_LOG("%s layer:%" PRIu64 " blend mode:%s", __FUNCTION__, mId,
+            blendModeString.c_str());
+
+  mBlendMode = blendMode;
+  return HWC2::Error::None;
+}
+
+HWC2::BlendMode Layer::getBlendMode() const {
+  const auto blendMode = mBlendMode;
+  const auto blendModeString = to_string(blendMode);
+  DEBUG_LOG("%s layer:%" PRIu64 " blend mode:%s", __FUNCTION__, mId,
+            blendModeString.c_str());
+
+  return blendMode;
+}
+
+HWC2::Error Layer::setColor(hwc_color_t color) {
+  DEBUG_LOG("%s layer:%" PRIu64 " color-r:%d color-g:%d color-b:%d color-a:%d)",
+            __FUNCTION__, mId, color.r, color.g, color.b, color.a);
+
+  mColor = color;
+  return HWC2::Error::None;
+}
+
+hwc_color_t Layer::getColor() const {
+  auto color = mColor;
+  DEBUG_LOG("%s layer:%" PRIu64 " color-r:%d color-g:%d color-b:%d color-a:%d)",
+            __FUNCTION__, mId, color.r, color.g, color.b, color.a);
+
+  return color;
+}
+
+HWC2::Error Layer::setCompositionTypeEnum(HWC2::Composition compositionType) {
+  const auto compositionTypeString = to_string(compositionType);
+  DEBUG_LOG("%s layer:%" PRIu64 " composition type:%s", __FUNCTION__, mId,
+            compositionTypeString.c_str());
+
+  mCompositionType = compositionType;
+  return HWC2::Error::None;
+}
+
+HWC2::Error Layer::setCompositionType(int32_t type) {
+  const auto compositionType = static_cast<HWC2::Composition>(type);
+  return setCompositionTypeEnum(compositionType);
+}
+
+HWC2::Composition Layer::getCompositionType() const {
+  const auto compositionType = mCompositionType;
+  const auto compositionTypeString = to_string(compositionType);
+  DEBUG_LOG("%s layer:%" PRIu64 " composition type:%s", __FUNCTION__, mId,
+            compositionTypeString.c_str());
+
+  return compositionType;
+}
+
+HWC2::Error Layer::setDataspace(int32_t) {
+  DEBUG_LOG("%s layer:%" PRIu64, __FUNCTION__, mId);
+
+  return HWC2::Error::None;
+}
+
+HWC2::Error Layer::setDisplayFrame(hwc_rect_t frame) {
+  DEBUG_LOG("%s layer:%" PRIu64
+            " display frame rect-left:%d rect-top:%d rect-right:%d rect-bot:%d",
+            __FUNCTION__, mId, frame.left, frame.top, frame.right,
+            frame.bottom);
+
+  mDisplayFrame = frame;
+  return HWC2::Error::None;
+}
+
+hwc_rect_t Layer::getDisplayFrame() const {
+  auto frame = mDisplayFrame;
+  DEBUG_LOG("%s layer:%" PRIu64
+            " display frame rect-left:%d rect-top:%d rect-right:%d rect-bot:%d",
+            __FUNCTION__, mId, frame.left, frame.top, frame.right,
+            frame.bottom);
+
+  return frame;
+}
+
+HWC2::Error Layer::setPlaneAlpha(float alpha) {
+  DEBUG_LOG("%s layer:%" PRIu64 "alpha:%f", __FUNCTION__, mId, alpha);
+
+  mPlaneAlpha = alpha;
+  return HWC2::Error::None;
+}
+
+float Layer::getPlaneAlpha() const {
+  auto alpha = mPlaneAlpha;
+  DEBUG_LOG("%s layer:%" PRIu64 "alpha:%f", __FUNCTION__, mId, alpha);
+
+  return alpha;
+}
+
+HWC2::Error Layer::setSidebandStream(const native_handle_t* stream) {
+  DEBUG_LOG("%s layer:%" PRIu64, __FUNCTION__, mId);
+
+  mSidebandStream = stream;
+  return HWC2::Error::None;
+}
+
+HWC2::Error Layer::setSourceCrop(hwc_frect_t crop) {
+  DEBUG_LOG("%s layer:%" PRIu64
+            "crop rect-left:%f rect-top:%f rect-right:%f rect-bot:%f",
+            __FUNCTION__, mId, crop.left, crop.top, crop.right, crop.bottom);
+
+  mSourceCrop = crop;
+  return HWC2::Error::None;
+}
+
+hwc_frect_t Layer::getSourceCrop() const {
+  hwc_frect_t crop = mSourceCrop;
+  DEBUG_LOG("%s layer:%" PRIu64
+            "crop rect-left:%f rect-top:%f rect-right:%f rect-bot:%f",
+            __FUNCTION__, mId, crop.left, crop.top, crop.right, crop.bottom);
+
+  return crop;
+}
+
+hwc_rect_t Layer::getSourceCropInt() const {
+  hwc_rect_t crop = {};
+  crop.left = static_cast<int>(mSourceCrop.left);
+  crop.top = static_cast<int>(mSourceCrop.top);
+  crop.right = static_cast<int>(mSourceCrop.right);
+  crop.bottom = static_cast<int>(mSourceCrop.bottom);
+  DEBUG_LOG("%s layer:%" PRIu64
+            "crop rect-left:%d rect-top:%d rect-right:%d rect-bot:%d",
+            __FUNCTION__, mId, crop.left, crop.top, crop.right, crop.bottom);
+
+  return crop;
+}
+
+HWC2::Error Layer::setTransform(int32_t transform) {
+  const auto transformType = static_cast<HWC2::Transform>(transform);
+  const auto transformTypeString = to_string(transformType);
+  DEBUG_LOG("%s layer:%" PRIu64 " transform:%s", __FUNCTION__, mId,
+            transformTypeString.c_str());
+
+  mTransform = transformType;
+  return HWC2::Error::None;
+}
+
+hwc_transform_t Layer::getTransform() const {
+  const auto transform = mTransform;
+  const auto transformString = to_string(transform);
+  DEBUG_LOG("%s layer:%" PRIu64 " transform:%s", __FUNCTION__, mId,
+            transformString.c_str());
+
+  return static_cast<hwc_transform_t>(transform);
+}
+
+HWC2::Error Layer::setVisibleRegion(hwc_region_t visible) {
+  DEBUG_LOG("%s layer:%" PRIu64, __FUNCTION__, mId);
+
+  mVisibleRegion.resize(visible.numRects);
+  std::copy_n(visible.rects, visible.numRects, mVisibleRegion.data());
+  return HWC2::Error::None;
+}
+
+std::size_t Layer::getNumVisibleRegions() const {
+  std::size_t num = mVisibleRegion.size();
+  DEBUG_LOG("%s layer:%" PRIu64 " number of visible regions: %zu", __FUNCTION__,
+            mId, num);
+
+  return num;
+}
+
+HWC2::Error Layer::setZ(uint32_t z) {
+  DEBUG_LOG("%s layer:%" PRIu64 " z:%d", __FUNCTION__, mId, z);
+
+  mZ = z;
+  return HWC2::Error::None;
+}
+
+uint32_t Layer::getZ() const {
+  uint32_t z = mZ;
+  DEBUG_LOG("%s layer:%" PRIu64 " z:%d", __FUNCTION__, mId, z);
+
+  return z;
+}
+
+}  // namespace android
\ No newline at end of file
diff --git a/system/hwc2/Layer.h b/system/hwc2/Layer.h
new file mode 100644
index 0000000..f576201
--- /dev/null
+++ b/system/hwc2/Layer.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HWC_LAYER_H
+#define ANDROID_HWC_LAYER_H
+
+#include <vector>
+
+#include "Common.h"
+#include "FencedBuffer.h"
+
+namespace android {
+
+class Layer {
+ public:
+  explicit Layer();
+
+  Layer(const Layer&) = delete;
+  Layer& operator=(const Layer&) = delete;
+
+  Layer(Layer&&) = default;
+  Layer& operator=(Layer&&) = default;
+
+  HWC2::Error setBuffer(buffer_handle_t buffer, int32_t acquireFence);
+  FencedBuffer& getBuffer() { return mBuffer; }
+
+  buffer_handle_t waitAndGetBuffer();
+
+  hwc2_layer_t getId() const { return mId; }
+
+  HWC2::Error setCursorPosition(int32_t x, int32_t y);
+
+  HWC2::Error setSurfaceDamage(hwc_region_t damage);
+
+  HWC2::Error setBlendMode(int32_t mode);
+  HWC2::BlendMode getBlendMode() const;
+
+  HWC2::Error setColor(hwc_color_t color);
+  hwc_color_t getColor() const;
+
+  HWC2::Error setCompositionTypeEnum(HWC2::Composition type);
+  HWC2::Error setCompositionType(int32_t type);
+  HWC2::Composition getCompositionType() const;
+
+  HWC2::Error setDataspace(int32_t dataspace);
+
+  HWC2::Error setDisplayFrame(hwc_rect_t frame);
+  hwc_rect_t getDisplayFrame() const;
+
+  HWC2::Error setPlaneAlpha(float alpha);
+  float getPlaneAlpha() const;
+
+  HWC2::Error setSidebandStream(const native_handle_t* stream);
+
+  HWC2::Error setSourceCrop(hwc_frect_t crop);
+  hwc_frect_t getSourceCrop() const;
+  hwc_rect_t getSourceCropInt() const;
+
+  HWC2::Error setTransform(int32_t transform);
+  hwc_transform_t getTransform() const;
+
+  HWC2::Error setVisibleRegion(hwc_region_t visible);
+  std::size_t getNumVisibleRegions() const;
+
+  HWC2::Error setZ(uint32_t z);
+  uint32_t getZ() const;
+
+ private:
+  const hwc2_layer_t mId;
+  FencedBuffer mBuffer;
+  std::vector<hwc_rect_t> mSurfaceDamage;
+  HWC2::BlendMode mBlendMode = HWC2::BlendMode::None;
+  hwc_color_t mColor = {0, 0, 0, 0};
+  HWC2::Composition mCompositionType = HWC2::Composition::Invalid;
+  hwc_rect_t mDisplayFrame = {0, 0, -1, -1};
+  float mPlaneAlpha = 0.0f;
+  const native_handle_t* mSidebandStream = nullptr;
+  hwc_frect_t mSourceCrop = {0.0f, 0.0f, -1.0f, -1.0f};
+  HWC2::Transform mTransform = HWC2::Transform::None;
+  std::vector<hwc_rect_t> mVisibleRegion;
+  uint32_t mZ = 0;
+};
+
+}  // namespace android
+
+#endif
\ No newline at end of file
diff --git a/system/hwc2/drmTest.cpp b/system/hwc2/drmTest.cpp
new file mode 100644
index 0000000..86267da
--- /dev/null
+++ b/system/hwc2/drmTest.cpp
@@ -0,0 +1,290 @@
+#include <errno.h>
+#include <fcntl.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <time.h>
+#include <ui/GraphicBuffer.h>
+#include <ui/GraphicBufferAllocator.h>
+#include <unistd.h>
+#include <xf86drm.h>
+#include <xf86drmMode.h>
+
+#include "HostConnection.h"
+#include "cros_gralloc_handle.h"
+
+using namespace android;
+
+struct buffer_object {
+  uint32_t width;
+  uint32_t height;
+  uint32_t pitch;
+  uint32_t handle;
+  uint32_t size;
+  uint8_t *vaddr;
+  uint32_t fb_id;
+  const native_handle_t *fb;
+};
+
+struct buffer_object buf;
+
+static int modeset_create_fb(int fd, struct buffer_object *bo) {
+  struct drm_mode_create_dumb create = {};
+  struct drm_mode_map_dumb map = {};
+
+  create.width = bo->width;
+  create.height = bo->height;
+  create.bpp = 32;
+  drmIoctl(fd, DRM_IOCTL_MODE_CREATE_DUMB, &create);
+  printf("create dumb w %d h %d\n", bo->width, bo->height);
+  getchar();
+
+  bo->pitch = create.pitch;
+  bo->size = create.size;
+  bo->handle = create.handle;
+  drmModeAddFB(fd, bo->width, bo->height, 24, 32, bo->pitch, bo->handle,
+               &bo->fb_id);
+  printf("drmModeAddFB\n");
+  getchar();
+
+  map.handle = create.handle;
+  drmIoctl(fd, DRM_IOCTL_MODE_MAP_DUMB, &map);
+  printf("map dumb\n");
+  getchar();
+  bo->vaddr = static_cast<unsigned char *>(mmap64(
+      0, create.size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, map.offset));
+  memset(bo->vaddr, 0xff, bo->size);
+  return 0;
+}
+
+static void modeset_destroy_fb(int fd, struct buffer_object *bo) {
+  struct drm_mode_destroy_dumb destroy = {};
+
+  drmModeRmFB(fd, bo->fb_id);
+  munmap(bo->vaddr, bo->size);
+  destroy.handle = bo->handle;
+  drmIoctl(fd, DRM_IOCTL_MODE_DESTROY_DUMB, &destroy);
+}
+
+static uint32_t get_property_id(int fd, drmModeObjectProperties *props,
+                                const char *name) {
+  drmModePropertyPtr property;
+  uint32_t i, id = 0;
+
+  /* find property according to the name */
+  for (i = 0; i < props->count_props; i++) {
+    property = drmModeGetProperty(fd, props->props[i]);
+    if (!strcmp(property->name, name)) id = property->prop_id;
+    drmModeFreeProperty(property);
+    if (id) {
+      break;
+    }
+  }
+  return id;
+}
+
+static std::unique_ptr<HostConnection> sHostCon;
+
+static HostConnection *createOrGetHostConnection() {
+  if (!sHostCon) {
+    sHostCon = HostConnection::createUnique();
+  }
+  return sHostCon.get();
+}
+
+#define DEFINE_AND_VALIDATE_HOST_CONNECTION                          \
+  HostConnection *hostCon = createOrGetHostConnection();             \
+  if (!hostCon) {                                                    \
+    ALOGE("drmTest: Failed to get host connection\n");               \
+    return;                                                          \
+  }                                                                  \
+  ExtendedRCEncoderContext *rcEnc = hostCon->rcEncoder();            \
+  if (!rcEnc) {                                                      \
+    ALOGE("drmTest: Failed to get renderControl encoder context\n"); \
+    return;                                                          \
+  }
+
+#include "include/drmhwcgralloc.h"
+void convertBoInfo(buffer_handle_t handle, hwc_drm_bo_t *bo) {
+  cros_gralloc_handle *gr_handle = (cros_gralloc_handle *)handle;
+  if (!gr_handle) return;
+
+  bo->width = gr_handle->width;
+  bo->height = gr_handle->height;
+  bo->hal_format = gr_handle->droid_format;
+  bo->format = gr_handle->format;
+  bo->usage = gr_handle->usage;
+  bo->prime_fds[0] = gr_handle->fds[0];
+  bo->pitches[0] = gr_handle->strides[0];
+  bo->offsets[0] = gr_handle->offsets[0];
+}
+
+void grallocAllocBuffer(int fd, struct buffer_object *bo) {
+  buffer_handle_t h;
+  uint32_t stride;
+
+  if (GraphicBufferAllocator::get().allocate(
+          bo->width, bo->height, android::PIXEL_FORMAT_RGBA_8888, 1,
+          (GraphicBuffer::USAGE_HW_COMPOSER | GraphicBuffer::USAGE_HW_RENDER),
+          &h, &stride, 0, "emulatorDrmTest") == android::OK) {
+    hwc_drm_bo tmp_bo{};
+    convertBoInfo(h, &tmp_bo);
+
+    int ret = drmPrimeFDToHandle(fd, tmp_bo.prime_fds[0], tmp_bo.gem_handles);
+    for (int i = 1; i < HWC_DRM_BO_MAX_PLANES; i++) {
+      tmp_bo.gem_handles[i] = tmp_bo.gem_handles[0];
+    }
+    if (ret) {
+      printf("%s: DRM_IOCTL_PRIME_FD_TO_HANDLE failed: %s (errno %d)\n",
+             __func__, strerror(errno), errno);
+      return;
+    }
+    ret = drmModeAddFB2(fd, tmp_bo.width, tmp_bo.height, tmp_bo.format,
+                        tmp_bo.gem_handles, tmp_bo.pitches, tmp_bo.offsets,
+                        &bo->fb_id, 0);
+
+    printf("allocate buffer\n");
+    DEFINE_AND_VALIDATE_HOST_CONNECTION
+    bo->fb = static_cast<const native_handle_t *>(h);
+    getchar();
+    printf("resource id is %d\n",
+           hostCon->grallocHelper()->getHostHandle(bo->fb));
+  } else {
+    bo->fb = nullptr;
+  }
+}
+
+int main(int argc, char **argv) {
+  int fd;
+  drmModeConnector *conn;
+  drmModeRes *res;
+  drmModePlaneRes *plane_res = nullptr;
+  uint32_t conn_id;
+  uint32_t crtc_id;
+  uint32_t plane_id;
+
+  fd = open("/dev/dri/card0", O_RDWR | O_CLOEXEC);
+
+  int ret = drmSetClientCap(fd, DRM_CLIENT_CAP_UNIVERSAL_PLANES, 0);
+  if (ret) {
+    printf("fail to set universal plane %d\n", ret);
+  }
+
+  res = drmModeGetResources(fd);
+  crtc_id = res->crtcs[0];
+  conn_id = res->connectors[0];
+
+  plane_res = drmModeGetPlaneResources(fd);
+  plane_id = plane_res->planes[0];
+
+  conn = drmModeGetConnector(fd, conn_id);
+  buf.width = conn->modes[0].hdisplay;
+  buf.height = conn->modes[0].vdisplay;
+  // modeset_create_fb(fd, &buf);
+  grallocAllocBuffer(fd, &buf);
+
+  drmModeSetCrtc(fd, crtc_id, buf.fb_id, 0, 0, &conn_id, 1, &conn->modes[0]);
+  printf("drmModeSetCrtc\n");
+  getchar();
+
+  drmModePageFlip(fd, crtc_id, buf.fb_id, DRM_MODE_PAGE_FLIP_EVENT, &crtc_id);
+  printf("drmModePageFlip\n");
+  getchar();
+
+  //    drmModeSetPlane(fd, plane_id, crtc_id, buf.fb_id, 0, 50, 50, 320, 320,
+  //                    100 << 16, 150 << 16, 320 << 16, 320 << 16);
+  //    printf("drmModeSetPlane\n");
+  //    modeset_destroy_fb(fd, &buf);
+
+  drmModeFreeConnector(conn);
+  drmModeFreeResources(res);
+  close(fd);
+
+  return 0;
+}
+
+int main_atom(int argc, char **argv) {
+  int fd;
+  drmModeConnector *conn = nullptr;
+  drmModeRes *res = nullptr;
+  drmModePlaneRes *plane_res = nullptr;
+  drmModeObjectProperties *props = nullptr;
+  drmModeAtomicReq *req;
+  uint32_t conn_id;
+  uint32_t crtc_id;
+  uint32_t plane_id;
+  uint32_t blob_id;
+  uint32_t property_crtc_id;
+  uint32_t property_mode_id;
+  uint32_t property_active;
+
+  printf("drm available %d\n", drmAvailable());
+  fd = open("/dev/dri/card0", O_RDWR);
+  printf("openg drm fd %d\n", fd);
+
+  int ret = drmSetClientCap(fd, DRM_CLIENT_CAP_UNIVERSAL_PLANES, 1);
+  if (ret) {
+    printf("fail to set universal plane %d\n", ret);
+  }
+
+  ret = drmSetClientCap(fd, DRM_CLIENT_CAP_ATOMIC, 1);
+  if (ret) {
+    printf("fail to set atomic operation %d\n", ret);
+  }
+
+  res = drmModeGetResources(fd);
+  if (!res) {
+    printf("error to get drmModeGetResources: %d\n", errno);
+  }
+
+  crtc_id = res->crtcs[0];
+  conn_id = res->connectors[0];
+  plane_res = drmModeGetPlaneResources(fd);
+  plane_id = plane_res->planes[0];
+
+  conn = drmModeGetConnector(fd, conn_id);
+  buf.width = conn->modes[0].hdisplay;
+  buf.height = conn->modes[0].vdisplay;
+  modeset_create_fb(fd, &buf);
+
+  /* get connector properties */
+  props = drmModeObjectGetProperties(fd, conn_id, DRM_MODE_OBJECT_CONNECTOR);
+  property_crtc_id = get_property_id(fd, props, "CRTC_ID");
+  drmModeFreeObjectProperties(props);
+
+  /* get crtc properties */
+  props = drmModeObjectGetProperties(fd, crtc_id, DRM_MODE_OBJECT_CRTC);
+  property_active = get_property_id(fd, props, "ACTIVE");
+  property_mode_id = get_property_id(fd, props, "MODE_ID");
+  drmModeFreeObjectProperties(props);
+
+  /* create blob to store current mode, and return the blob id */
+  drmModeCreatePropertyBlob(fd, &conn->modes[0], sizeof(conn->modes[0]),
+                            &blob_id);
+
+  /* start modeseting */
+  req = drmModeAtomicAlloc();
+  drmModeAtomicAddProperty(req, crtc_id, property_active, 1);
+  drmModeAtomicAddProperty(req, crtc_id, property_mode_id, blob_id);
+  drmModeAtomicAddProperty(req, conn_id, property_crtc_id, crtc_id);
+  drmModeAtomicCommit(fd, req, DRM_MODE_ATOMIC_ALLOW_MODESET, NULL);
+  drmModeAtomicFree(req);
+  printf("drmModeAtomicCommit SetCrtc\n");
+  getchar();
+
+  drmModeSetPlane(fd, plane_id, crtc_id, buf.fb_id, 0, 50, 50, 320, 320, 0, 0,
+                  320 << 16, 320 << 16);
+  printf("drmModeSetPlane\n");
+  getchar();
+
+  modeset_destroy_fb(fd, &buf);
+  drmModeFreeConnector(conn);
+  drmModeFreePlaneResources(plane_res);
+  drmModeFreeResources(res);
+  close(fd);
+
+  return 0;
+}
diff --git a/system/include/GLES/glext.h b/system/include/GLES/glext.h
index 5843d5e..69ac4ee 100644
--- a/system/include/GLES/glext.h
+++ b/system/include/GLES/glext.h
@@ -373,6 +373,10 @@
 #define GL_SRGB_ALPHA_EXT                                       0x8C42
 #define GL_SRGB8_ALPHA8_EXT                                     0x8C43
 #define GL_FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING_EXT            0x8210
+#define GL_COMPRESSED_SRGB_S3TC_DXT1_EXT                        0x8C4C
+#define GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT                  0x8C4D
+#define GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT                  0x8C4E
+#define GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT                  0x8C4F
 #endif
 
 /* GL_EXT_texture_compression_dxt1 */
@@ -1275,4 +1279,3 @@
 #endif
 
 #endif /* __glext_h_ */
-
diff --git a/system/include/GLES2/gl2ext.h b/system/include/GLES2/gl2ext.h
index 9749f9f..7c7b421 100644
--- a/system/include/GLES2/gl2ext.h
+++ b/system/include/GLES2/gl2ext.h
@@ -1040,6 +1040,11 @@
 #define GL_SRGB_ALPHA_EXT                 0x8C42
 #define GL_SRGB8_ALPHA8_EXT               0x8C43
 #define GL_FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING_EXT 0x8210
+#define GL_COMPRESSED_SRGB_S3TC_DXT1_EXT                        0x8C4C
+#define GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT                  0x8C4D
+#define GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT                  0x8C4E
+#define GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT                  0x8C4F
+
 #endif /* GL_EXT_sRGB */
 
 #ifndef GL_EXT_sRGB_write_control
@@ -1282,6 +1287,14 @@
 #endif
 #endif /* GL_EXT_texture_buffer */
 
+#ifndef GL_EXT_texture_compression_bptc
+#define GL_EXT_texture_compression_bptc 1
+#define GL_COMPRESSED_RGBA_BPTC_UNORM_EXT 0x8E8C
+#define GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM_EXT 0x8E8D
+#define GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT_EXT 0x8E8E
+#define GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT_EXT 0x8E8F
+#endif /* GL_EXT_texture_compression_bptc */
+
 #ifndef GL_EXT_texture_compression_dxt1
 #define GL_EXT_texture_compression_dxt1 1
 #define GL_COMPRESSED_RGB_S3TC_DXT1_EXT   0x83F0
diff --git a/system/profiler/Android.mk b/system/profiler/Android.mk
new file mode 100644
index 0000000..f886742
--- /dev/null
+++ b/system/profiler/Android.mk
@@ -0,0 +1,24 @@
+LOCAL_PATH := $(call my-dir)
+
+### profiler ###########################################
+$(call emugl-begin-shared-library,libGoldfishProfiler)
+
+ifeq ($(shell test $(PLATFORM_SDK_VERSION) -ge 30 && echo isApi30OrHigher),isApi30OrHigher)
+    LOCAL_CFLAGS += -DLOG_TAG=\"emuglProfiler\"
+
+    LOCAL_SHARED_LIBRARIES := liblog \
+        libbase
+
+    LOCAL_SRC_FILES := \
+        profiler.cpp \
+        perfetto.cpp
+
+else
+    LOCAL_SRC_FILES := \
+        profiler_stub.cpp
+
+endif
+
+$(call emugl-export,C_INCLUDES,$(LOCAL_PATH))
+
+$(call emugl-end-module)
\ No newline at end of file
diff --git a/system/profiler/CMakeLists.txt b/system/profiler/CMakeLists.txt
new file mode 100644
index 0000000..dd769b5
--- /dev/null
+++ b/system/profiler/CMakeLists.txt
@@ -0,0 +1,10 @@
+# This is an autogenerated file! Do not edit!
+# instead run make from .../device/generic/goldfish-opengl
+# which will re-generate this file.
+android_validate_sha256("${GOLDFISH_DEVICE_ROOT}/system/profiler/Android.mk" "509a207b833d50fabb0eacdfe4168c395de928e3a84864768669ce1c63aa8553")
+set(GoldfishProfiler_src profiler_stub.cpp)
+android_add_library(TARGET GoldfishProfiler SHARED LICENSE Apache-2.0 SRC profiler_stub.cpp)
+target_include_directories(GoldfishProfiler PRIVATE ${GOLDFISH_DEVICE_ROOT}/system/profiler ${GOLDFISH_DEVICE_ROOT}/./host/include/libOpenglRender ${GOLDFISH_DEVICE_ROOT}/./system/include ${GOLDFISH_DEVICE_ROOT}/./../../../external/qemu/android/android-emugl/guest)
+target_compile_definitions(GoldfishProfiler PRIVATE "-DWITH_GLES2" "-DPLATFORM_SDK_VERSION=29" "-DGOLDFISH_HIDL_GRALLOC" "-DEMULATOR_OPENGL_POST_O=1" "-DHOST_BUILD" "-DANDROID" "-DGL_GLEXT_PROTOTYPES" "-DPAGE_SIZE=4096" "-DGFXSTREAM")
+target_compile_options(GoldfishProfiler PRIVATE "-fvisibility=default" "-Wno-unused-parameter")
+target_link_libraries(GoldfishProfiler PRIVATE android-emu-shared vulkan_enc gui log _renderControl_enc GLESv2_enc GLESv1_enc OpenglCodecCommon_host cutils utils androidemu PRIVATE gralloc_cb_host GoldfishAddressSpace_host qemupipe_host)
\ No newline at end of file
diff --git a/system/profiler/perfetto.cpp b/system/profiler/perfetto.cpp
new file mode 100644
index 0000000..dd401e1
--- /dev/null
+++ b/system/profiler/perfetto.cpp
@@ -0,0 +1,69071 @@
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This file is automatically generated by gen_amalgamated. Do not edit.
+
+// gen_amalgamated: predefined macros
+#if !defined(PERFETTO_IMPLEMENTATION)
+#define PERFETTO_IMPLEMENTATION
+#endif
+#if !defined(GOOGLE_PROTOBUF_NO_STATIC_INITIALIZER)
+#define GOOGLE_PROTOBUF_NO_STATIC_INITIALIZER
+#endif
+#if !defined(GOOGLE_PROTOBUF_NO_RTTI)
+#define GOOGLE_PROTOBUF_NO_RTTI
+#endif
+#include "perfetto.h"
+// gen_amalgamated begin source: src/base/ctrl_c_handler.cc
+// gen_amalgamated begin header: include/perfetto/ext/base/ctrl_c_handler.h
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_BASE_CTRL_C_HANDLER_H_
+#define INCLUDE_PERFETTO_EXT_BASE_CTRL_C_HANDLER_H_
+
+namespace perfetto {
+namespace base {
+
+// On Linux/Android/Mac: installs SIGINT + SIGTERM signal handlers.
+// On Windows: installs a SetConsoleCtrlHandler() handler.
+// The passed handler must be async safe.
+using CtrlCHandlerFunction = void (*)();
+void InstallCtrCHandler(CtrlCHandlerFunction);
+
+}  // namespace base
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_BASE_CTRL_C_HANDLER_H_
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/ext/base/ctrl_c_handler.h"
+
+// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
+// gen_amalgamated expanded: #include "perfetto/base/compiler.h"
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+#include <Windows.h>
+#include <io.h>
+#else
+#include <signal.h>
+#include <unistd.h>
+#endif
+
+namespace perfetto {
+namespace base {
+
+namespace {
+CtrlCHandlerFunction g_handler = nullptr;
+}
+
+void InstallCtrCHandler(CtrlCHandlerFunction handler) {
+  PERFETTO_CHECK(g_handler == nullptr);
+  g_handler = handler;
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+  auto trampoline = [](DWORD type) -> int {
+    if (type == CTRL_C_EVENT) {
+      g_handler();
+      return true;
+    }
+    return false;
+  };
+  ::SetConsoleCtrlHandler(trampoline, true);
+#elif PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
+    PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) || \
+    PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
+  // Setup signal handler.
+  struct sigaction sa {};
+
+// Glibc headers for sa_sigaction trigger this.
+#pragma GCC diagnostic push
+#if defined(__clang__)
+#pragma GCC diagnostic ignored "-Wdisabled-macro-expansion"
+#endif
+  sa.sa_handler = [](int) { g_handler(); };
+  sa.sa_flags = static_cast<decltype(sa.sa_flags)>(SA_RESETHAND | SA_RESTART);
+#pragma GCC diagnostic pop
+  sigaction(SIGINT, &sa, nullptr);
+  sigaction(SIGTERM, &sa, nullptr);
+#else
+  // Do nothing on NaCL and Fuchsia.
+  ignore_result(handler);
+#endif
+}
+
+}  // namespace base
+}  // namespace perfetto
+// gen_amalgamated begin source: src/base/event_fd.cc
+// gen_amalgamated begin header: include/perfetto/ext/base/event_fd.h
+// gen_amalgamated begin header: include/perfetto/base/platform_handle.h
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_BASE_PLATFORM_HANDLE_H_
+#define INCLUDE_PERFETTO_BASE_PLATFORM_HANDLE_H_
+
+// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
+
+namespace perfetto {
+namespace base {
+
+// PlatformHandle should be used only for types that are HANDLE(s) in Windows.
+// It should NOT be used to blanket-replace "int fd" in the codebase.
+// Windows has two types of "handles", which, in UNIX-land, both map to int:
+// 1. File handles returned by the posix-compatibility API like _open().
+//    These are just int(s) and should stay such, because all the posix-like API
+//    in Windows.h take an int, not a HANDLE.
+// 2. Handles returned by old-school WINAPI like CreateFile, CreateEvent etc.
+//    These are proper HANDLE(s). PlatformHandle should be used here.
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+// Windows.h typedefs HANDLE to void*. We use void* here to avoid leaking
+// Windows.h through our headers.
+using PlatformHandle = void*;
+
+// On Windows both nullptr and 0xffff... (INVALID_HANDLE_VALUE) are invalid.
+struct PlatformHandleChecker {
+  static inline bool IsValid(PlatformHandle h) {
+    return h && h != reinterpret_cast<PlatformHandle>(-1);
+  }
+};
+#else
+using PlatformHandle = int;
+struct PlatformHandleChecker {
+  static inline bool IsValid(PlatformHandle h) { return h >= 0; }
+};
+#endif
+
+// The definition of this lives in base/file_utils.cc (to avoid creating an
+// extra build edge for a one liner). This is really an alias for close() (UNIX)
+// CloseHandle() (Windows). THe indirection layer is just to avoid leaking
+// system headers like Windows.h through perfetto headers.
+// Thre return value is always UNIX-style: 0 on success, -1 on failure.
+int ClosePlatformHandle(PlatformHandle);
+
+}  // namespace base
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_BASE_PLATFORM_HANDLE_H_
+// gen_amalgamated begin header: include/perfetto/ext/base/scoped_file.h
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_BASE_SCOPED_FILE_H_
+#define INCLUDE_PERFETTO_EXT_BASE_SCOPED_FILE_H_
+
+// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
+
+#include <stdio.h>
+
+#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+#include <dirent.h>  // For DIR* / opendir().
+#endif
+
+#include <string>
+
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/base/platform_handle.h"
+
+namespace perfetto {
+namespace base {
+
+namespace internal {
+// Used for the most common cases of ScopedResource where there is only one
+// invalid value.
+template <typename T, T InvalidValue>
+struct DefaultValidityChecker {
+  static bool IsValid(T t) { return t != InvalidValue; }
+};
+}  // namespace internal
+
+// RAII classes for auto-releasing fds and dirs.
+// if T is a pointer type, InvalidValue must be nullptr. Doing otherwise
+// causes weird unexpected behaviors (See https://godbolt.org/z/5nGMW4).
+template <typename T,
+          int (*CloseFunction)(T),
+          T InvalidValue,
+          bool CheckClose = true,
+          class Checker = internal::DefaultValidityChecker<T, InvalidValue>>
+class PERFETTO_EXPORT ScopedResource {
+ public:
+  using ValidityChecker = Checker;
+  static constexpr T kInvalid = InvalidValue;
+
+  explicit ScopedResource(T t = InvalidValue) : t_(t) {}
+  ScopedResource(ScopedResource&& other) noexcept {
+    t_ = other.t_;
+    other.t_ = InvalidValue;
+  }
+  ScopedResource& operator=(ScopedResource&& other) {
+    reset(other.t_);
+    other.t_ = InvalidValue;
+    return *this;
+  }
+  T get() const { return t_; }
+  T operator*() const { return t_; }
+  explicit operator bool() const { return Checker::IsValid(t_); }
+  void reset(T r = InvalidValue) {
+    if (Checker::IsValid(t_)) {
+      int res = CloseFunction(t_);
+      if (CheckClose)
+        PERFETTO_CHECK(res == 0);
+    }
+    t_ = r;
+  }
+  T release() {
+    T t = t_;
+    t_ = InvalidValue;
+    return t;
+  }
+  ~ScopedResource() { reset(InvalidValue); }
+
+ private:
+  ScopedResource(const ScopedResource&) = delete;
+  ScopedResource& operator=(const ScopedResource&) = delete;
+  T t_;
+};
+
+// Declared in file_utils.h. Forward declared to avoid #include cycles.
+int PERFETTO_EXPORT CloseFile(int fd);
+
+// Use this for file resources obtained via open() and similar APIs.
+using ScopedFile = ScopedResource<int, CloseFile, -1>;
+using ScopedFstream = ScopedResource<FILE*, fclose, nullptr>;
+
+// Use this for resources that are HANDLE on Windows. See comments in
+// platform_handle.h
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+using ScopedPlatformHandle = ScopedResource<PlatformHandle,
+                                            ClosePlatformHandle,
+                                            /*InvalidValue=*/nullptr,
+                                            /*CheckClose=*/true,
+                                            PlatformHandleChecker>;
+#else
+// On non-windows systems we alias ScopedPlatformHandle to ScopedFile because
+// they are really the same. This is to allow assignments between the two in
+// Linux-specific code paths that predate ScopedPlatformHandle.
+static_assert(std::is_same<int, PlatformHandle>::value, "");
+using ScopedPlatformHandle = ScopedFile;
+
+// DIR* does not exist on Windows.
+using ScopedDir = ScopedResource<DIR*, closedir, nullptr>;
+#endif
+
+}  // namespace base
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_BASE_SCOPED_FILE_H_
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_BASE_EVENT_FD_H_
+#define INCLUDE_PERFETTO_EXT_BASE_EVENT_FD_H_
+
+// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
+// gen_amalgamated expanded: #include "perfetto/base/platform_handle.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
+
+namespace perfetto {
+namespace base {
+
+// A waitable event that can be used with poll/select.
+// This is really a wrapper around eventfd_create with a pipe-based fallback
+// for other platforms where eventfd is not supported.
+class EventFd {
+ public:
+  EventFd();
+  ~EventFd();
+  EventFd(EventFd&&) noexcept = default;
+  EventFd& operator=(EventFd&&) = default;
+
+  // The non-blocking file descriptor that can be polled to wait for the event.
+  PlatformHandle fd() const { return event_handle_.get(); }
+
+  // Can be called from any thread.
+  void Notify();
+
+  // Can be called from any thread. If more Notify() are queued a Clear() call
+  // can clear all of them (up to 16 per call).
+  void Clear();
+
+ private:
+  // The eventfd, when eventfd is supported, otherwise this is the read end of
+  // the pipe for fallback mode.
+  ScopedPlatformHandle event_handle_;
+
+#if !PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) &&   \
+    !PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) && \
+    !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+  // On Mac and other non-Linux UNIX platforms a pipe-based fallback is used.
+  // The write end of the wakeup pipe.
+  ScopedFile write_fd_;
+#endif
+};
+
+}  // namespace base
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_BASE_EVENT_FD_H_
+// gen_amalgamated begin header: include/perfetto/ext/base/pipe.h
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_BASE_PIPE_H_
+#define INCLUDE_PERFETTO_EXT_BASE_PIPE_H_
+
+// gen_amalgamated expanded: #include "perfetto/base/platform_handle.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
+
+namespace perfetto {
+namespace base {
+
+class Pipe {
+ public:
+  enum Flags {
+    kBothBlock = 0,
+#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+    kBothNonBlock,
+    kRdNonBlock,
+    kWrNonBlock,
+#endif
+  };
+
+  static Pipe Create(Flags = kBothBlock);
+
+  Pipe();
+  Pipe(Pipe&&) noexcept;
+  Pipe& operator=(Pipe&&);
+
+  ScopedPlatformHandle rd;
+  ScopedPlatformHandle wr;
+};
+
+}  // namespace base
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_BASE_PIPE_H_
+// gen_amalgamated begin header: include/perfetto/ext/base/utils.h
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_BASE_UTILS_H_
+#define INCLUDE_PERFETTO_EXT_BASE_UTILS_H_
+
+// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
+// gen_amalgamated expanded: #include "perfetto/base/compiler.h"
+
+#include <errno.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <sys/types.h>
+
+#include <atomic>
+#include <string>
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+// Even if Windows has errno.h, the all syscall-restart behavior does not apply.
+// Trying to handle EINTR can cause more harm than good if errno is left stale.
+// Chromium does the same.
+#define PERFETTO_EINTR(x) (x)
+#else
+#define PERFETTO_EINTR(x)                                   \
+  ([&] {                                                    \
+    decltype(x) eintr_wrapper_result;                       \
+    do {                                                    \
+      eintr_wrapper_result = (x);                           \
+    } while (eintr_wrapper_result == -1 && errno == EINTR); \
+    return eintr_wrapper_result;                            \
+  }())
+#endif
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+using uid_t = unsigned int;
+#if !PERFETTO_BUILDFLAG(PERFETTO_COMPILER_GCC)
+using pid_t = unsigned int;
+#endif
+#if defined(_WIN64)
+using ssize_t = int64_t;
+#else
+using ssize_t = long;
+#endif
+#endif
+
+namespace perfetto {
+namespace base {
+
+constexpr uid_t kInvalidUid = static_cast<uid_t>(-1);
+constexpr pid_t kInvalidPid = static_cast<pid_t>(-1);
+
+// Do not add new usages of kPageSize, consider using GetSysPageSize() below.
+// TODO(primiano): over time the semantic of kPageSize became too ambiguous.
+// Strictly speaking, this constant is incorrect on some new devices where the
+// page size can be 16K (e.g., crbug.com/1116576). Unfortunately too much code
+// ended up depending on kPageSize for purposes that are not strictly related
+// with the kernel's mm subsystem.
+constexpr size_t kPageSize = 4096;
+
+// Returns the system's page size. Use this when dealing with mmap, madvise and
+// similar mm-related syscalls.
+uint32_t GetSysPageSize();
+
+template <typename T>
+constexpr size_t ArraySize(const T& array) {
+  return sizeof(array) / sizeof(array[0]);
+}
+
+// Function object which invokes 'free' on its parameter, which must be
+// a pointer. Can be used to store malloc-allocated pointers in std::unique_ptr:
+//
+// std::unique_ptr<int, base::FreeDeleter> foo_ptr(
+//     static_cast<int*>(malloc(sizeof(int))));
+struct FreeDeleter {
+  inline void operator()(void* ptr) const { free(ptr); }
+};
+
+template <typename T>
+constexpr T AssumeLittleEndian(T value) {
+#if !PERFETTO_IS_LITTLE_ENDIAN()
+  static_assert(false, "Unimplemented on big-endian archs");
+#endif
+  return value;
+}
+
+// Round up |size| to a multiple of |alignment| (must be a power of two).
+template <size_t alignment>
+constexpr size_t AlignUp(size_t size) {
+  static_assert((alignment & (alignment - 1)) == 0, "alignment must be a pow2");
+  return (size + alignment - 1) & ~(alignment - 1);
+}
+
+inline bool IsAgain(int err) {
+  return err == EAGAIN || err == EWOULDBLOCK;
+}
+
+// setenv(2)-equivalent. Deals with Windows vs Posix discrepancies.
+void SetEnv(const std::string& key, const std::string& value);
+
+// Calls mallopt(M_PURGE, 0) on Android. Does nothing on other platforms.
+// This forces the allocator to release freed memory. This is used to work
+// around various Scudo inefficiencies. See b/170217718.
+void MaybeReleaseAllocatorMemToOS();
+
+// geteuid() on POSIX OSes, returns 0 on Windows (See comment in utils.cc).
+uid_t GetCurrentUserId();
+
+// Forks the process.
+// Parent: prints the PID of the child and exit(0).
+// Child: redirects stdio onto /dev/null and chdirs into .
+void Daemonize();
+
+}  // namespace base
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_BASE_UTILS_H_
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
+
+#include <errno.h>
+#include <stdint.h>
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+#include <Windows.h>
+#include <synchapi.h>
+#elif PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
+    PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
+#include <sys/eventfd.h>
+#include <unistd.h>
+#else  // Mac, Fuchsia and other non-Linux UNIXes
+#include <unistd.h>
+#endif
+
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/event_fd.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/pipe.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
+
+namespace perfetto {
+namespace base {
+
+EventFd::~EventFd() = default;
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+EventFd::EventFd() {
+  event_handle_.reset(
+      CreateEventA(/*lpEventAttributes=*/nullptr, /*bManualReset=*/true,
+                   /*bInitialState=*/false, /*bInitialState=*/nullptr));
+}
+
+void EventFd::Notify() {
+  if (!SetEvent(event_handle_.get()))  // 0: fail, !0: success, unlike UNIX.
+    PERFETTO_DFATAL("EventFd::Notify()");
+}
+
+void EventFd::Clear() {
+  if (!ResetEvent(event_handle_.get()))  // 0: fail, !0: success, unlike UNIX.
+    PERFETTO_DFATAL("EventFd::Clear()");
+}
+
+#elif PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
+    PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
+
+EventFd::EventFd() {
+  event_handle_.reset(eventfd(/*initval=*/0, EFD_CLOEXEC | EFD_NONBLOCK));
+  PERFETTO_CHECK(event_handle_);
+}
+
+void EventFd::Notify() {
+  const uint64_t value = 1;
+  ssize_t ret = write(event_handle_.get(), &value, sizeof(value));
+  if (ret <= 0 && errno != EAGAIN)
+    PERFETTO_DFATAL("EventFd::Notify()");
+}
+
+void EventFd::Clear() {
+  uint64_t value;
+  ssize_t ret = read(event_handle_.get(), &value, sizeof(value));
+  if (ret <= 0 && errno != EAGAIN)
+    PERFETTO_DFATAL("EventFd::Clear()");
+}
+
+#else
+
+EventFd::EventFd() {
+  // Make the pipe non-blocking so that we never block the waking thread (either
+  // the main thread or another one) when scheduling a wake-up.
+  Pipe pipe = Pipe::Create(Pipe::kBothNonBlock);
+  event_handle_ = ScopedPlatformHandle(std::move(pipe.rd).release());
+  write_fd_ = std::move(pipe.wr);
+}
+
+void EventFd::Notify() {
+  const uint64_t value = 1;
+  ssize_t ret = write(write_fd_.get(), &value, sizeof(uint8_t));
+  if (ret <= 0 && errno != EAGAIN)
+    PERFETTO_DFATAL("EventFd::Notify()");
+}
+
+void EventFd::Clear() {
+  // Drain the byte(s) written to the wake-up pipe. We can potentially read
+  // more than one byte if several wake-ups have been scheduled.
+  char buffer[16];
+  ssize_t ret = read(event_handle_.get(), &buffer[0], sizeof(buffer));
+  if (ret <= 0 && errno != EAGAIN)
+    PERFETTO_DFATAL("EventFd::Clear()");
+}
+#endif
+
+}  // namespace base
+}  // namespace perfetto
+// gen_amalgamated begin source: src/base/file_utils.cc
+// gen_amalgamated begin header: include/perfetto/ext/base/file_utils.h
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_BASE_FILE_UTILS_H_
+#define INCLUDE_PERFETTO_EXT_BASE_FILE_UTILS_H_
+
+#include <fcntl.h>  // For mode_t & O_RDONLY/RDWR. Exists also on Windows.
+#include <stddef.h>
+
+#include <string>
+
+// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
+
+namespace perfetto {
+namespace base {
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+using FileOpenMode = int;
+#else
+using FileOpenMode = mode_t;
+#endif
+
+constexpr FileOpenMode kFileModeInvalid = static_cast<FileOpenMode>(-1);
+
+bool ReadPlatformHandle(PlatformHandle, std::string* out);
+bool ReadFileDescriptor(int fd, std::string* out);
+bool ReadFileStream(FILE* f, std::string* out);
+bool ReadFile(const std::string& path, std::string* out);
+
+// A wrapper around read(2). It deals with Linux vs Windows includes. It also
+// deals with handling EINTR. Has the same semantics of UNIX's read(2).
+ssize_t Read(int fd, void* dst, size_t dst_size);
+
+// Call write until all data is written or an error is detected.
+//
+// man 2 write:
+//   If a write() is interrupted by a signal handler before any bytes are
+//   written, then the call fails with the error EINTR; if it is
+//   interrupted after at least one byte has been written, the call
+//   succeeds, and returns the number of bytes written.
+ssize_t WriteAll(int fd, const void* buf, size_t count);
+
+ssize_t WriteAllHandle(PlatformHandle, const void* buf, size_t count);
+
+ScopedFile OpenFile(const std::string& path,
+                    int flags,
+                    FileOpenMode = kFileModeInvalid);
+
+// This is an alias for close(). It's to avoid leaking Windows.h in headers.
+// Exported because ScopedFile is used in the /include/ext API by Chromium
+// component builds.
+int PERFETTO_EXPORT CloseFile(int fd);
+
+bool FlushFile(int fd);
+
+// Returns true if mkdir succeeds, false if it fails (see errno in that case).
+bool Mkdir(const std::string& path);
+
+// Calls rmdir() on UNIX, _rmdir() on Windows.
+bool Rmdir(const std::string& path);
+
+// Wrapper around access(path, F_OK).
+bool FileExists(const std::string& path);
+
+}  // namespace base
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_BASE_FILE_UTILS_H_
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/ext/base/file_utils.h"
+
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include <algorithm>
+
+// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/base/platform_handle.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+#include <Windows.h>
+#include <direct.h>
+#include <io.h>
+#else
+#include <dirent.h>
+#include <unistd.h>
+#endif
+
+namespace perfetto {
+namespace base {
+namespace {
+constexpr size_t kBufSize = 2048;
+}
+
+ssize_t Read(int fd, void* dst, size_t dst_size) {
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+  return _read(fd, dst, static_cast<unsigned>(dst_size));
+#else
+  return PERFETTO_EINTR(read(fd, dst, dst_size));
+#endif
+}
+
+bool ReadFileDescriptor(int fd, std::string* out) {
+  // Do not override existing data in string.
+  size_t i = out->size();
+
+  struct stat buf {};
+  if (fstat(fd, &buf) != -1) {
+    if (buf.st_size > 0)
+      out->resize(i + static_cast<size_t>(buf.st_size));
+  }
+
+  ssize_t bytes_read;
+  for (;;) {
+    if (out->size() < i + kBufSize)
+      out->resize(out->size() + kBufSize);
+
+    bytes_read = Read(fd, &((*out)[i]), kBufSize);
+    if (bytes_read > 0) {
+      i += static_cast<size_t>(bytes_read);
+    } else {
+      out->resize(i);
+      return bytes_read == 0;
+    }
+  }
+}
+
+bool ReadPlatformHandle(PlatformHandle h, std::string* out) {
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+  // Do not override existing data in string.
+  size_t i = out->size();
+
+  for (;;) {
+    if (out->size() < i + kBufSize)
+      out->resize(out->size() + kBufSize);
+    DWORD bytes_read = 0;
+    auto res = ::ReadFile(h, &((*out)[i]), kBufSize, &bytes_read, nullptr);
+    if (res && bytes_read > 0) {
+      i += static_cast<size_t>(bytes_read);
+    } else {
+      out->resize(i);
+      const bool is_eof = res && bytes_read == 0;
+      auto err = res ? 0 : GetLastError();
+      // The "Broken pipe" error on Windows is slighly different than Unix:
+      // On Unix: a "broken pipe" error can happen only on the writer side. On
+      // the reader there is no broken pipe, just a EOF.
+      // On windows: the reader also sees a broken pipe error.
+      // Here we normalize on the Unix behavior, treating broken pipe as EOF.
+      return is_eof || err == ERROR_BROKEN_PIPE;
+    }
+  }
+#else
+  return ReadFileDescriptor(h, out);
+#endif
+}
+
+bool ReadFileStream(FILE* f, std::string* out) {
+  return ReadFileDescriptor(fileno(f), out);
+}
+
+bool ReadFile(const std::string& path, std::string* out) {
+  base::ScopedFile fd = base::OpenFile(path, O_RDONLY);
+  if (!fd)
+    return false;
+
+  return ReadFileDescriptor(*fd, out);
+}
+
+ssize_t WriteAll(int fd, const void* buf, size_t count) {
+  size_t written = 0;
+  while (written < count) {
+    // write() on windows takes an unsigned int size.
+    uint32_t bytes_left = static_cast<uint32_t>(
+        std::min(count - written, static_cast<size_t>(UINT32_MAX)));
+    ssize_t wr = PERFETTO_EINTR(
+        write(fd, static_cast<const char*>(buf) + written, bytes_left));
+    if (wr == 0)
+      break;
+    if (wr < 0)
+      return wr;
+    written += static_cast<size_t>(wr);
+  }
+  return static_cast<ssize_t>(written);
+}
+
+ssize_t WriteAllHandle(PlatformHandle h, const void* buf, size_t count) {
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+  DWORD wsize = 0;
+  if (::WriteFile(h, buf, static_cast<DWORD>(count), &wsize, nullptr)) {
+    return wsize;
+  } else {
+    return -1;
+  }
+#else
+  return WriteAll(h, buf, count);
+#endif
+}
+
+bool FlushFile(int fd) {
+  PERFETTO_DCHECK(fd != 0);
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
+    PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
+  return !PERFETTO_EINTR(fdatasync(fd));
+#elif PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+  return !PERFETTO_EINTR(_commit(fd));
+#else
+  return !PERFETTO_EINTR(fsync(fd));
+#endif
+}
+
+bool Mkdir(const std::string& path) {
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+  return _mkdir(path.c_str()) == 0;
+#else
+  return mkdir(path.c_str(), 0755) == 0;
+#endif
+}
+
+bool Rmdir(const std::string& path) {
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+  return _rmdir(path.c_str()) == 0;
+#else
+  return rmdir(path.c_str()) == 0;
+#endif
+}
+
+int CloseFile(int fd) {
+  return close(fd);
+}
+
+ScopedFile OpenFile(const std::string& path, int flags, FileOpenMode mode) {
+  PERFETTO_DCHECK((flags & O_CREAT) == 0 || mode != kFileModeInvalid);
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+  // Always use O_BINARY on Windows, to avoid silly EOL translations.
+  ScopedFile fd(_open(path.c_str(), flags | O_BINARY, mode));
+#else
+  // Always open a ScopedFile with O_CLOEXEC so we can safely fork and exec.
+  ScopedFile fd(open(path.c_str(), flags | O_CLOEXEC, mode));
+#endif
+  return fd;
+}
+
+bool FileExists(const std::string& path) {
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+  return _access(path.c_str(), 0) == 0;
+#else
+  return access(path.c_str(), F_OK) == 0;
+#endif
+}
+
+// Declared in base/platform_handle.h.
+int ClosePlatformHandle(PlatformHandle handle) {
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+  // Make the return value UNIX-style.
+  return CloseHandle(handle) ? 0 : -1;
+#else
+  return close(handle);
+#endif
+}
+
+}  // namespace base
+}  // namespace perfetto
+// gen_amalgamated begin source: src/base/getopt_compat.cc
+// gen_amalgamated begin header: include/perfetto/ext/base/getopt_compat.h
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_BASE_GETOPT_COMPAT_H_
+#define INCLUDE_PERFETTO_EXT_BASE_GETOPT_COMPAT_H_
+
+#include <cstddef>  // For std::nullptr_t
+
+// No translation units other than base/getopt.h and getopt_compat_unittest.cc
+// should directly include this file. Use base/getopt.h instead.
+
+namespace perfetto {
+namespace base {
+namespace getopt_compat {
+
+// A tiny getopt() replacement for Windows, which doesn't have <getopt.h>.
+// This implementation is based on the subset of features that we use in the
+// Perfetto codebase. It doesn't even try to deal with the full surface of GNU's
+// getopt().
+// Limitations:
+// - getopt_long_only() is not supported.
+// - optional_argument is not supported. That is extremely subtle and caused us
+//   problems in the past with GNU's getopt.
+// - It does not reorder non-option arguments. It behaves like MacOS getopt, or
+//   GNU's when POSIXLY_CORRECT=1.
+// - Doesn't expose optopt or opterr.
+// - option.flag and longindex are not supported and must be nullptr.
+
+enum {
+  no_argument = 0,
+  required_argument = 1,
+};
+
+struct option {
+  const char* name;
+  int has_arg;
+  std::nullptr_t flag;  // Only nullptr is supported.
+  int val;
+};
+
+extern char* optarg;
+extern int optind;
+
+int getopt_long(int argc,
+                char** argv,
+                const char* shortopts,
+                const option* longopts,
+                std::nullptr_t /*longindex is not supported*/);
+
+int getopt(int argc, char** argv, const char* shortopts);
+
+}  // namespace getopt_compat
+}  // namespace base
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_BASE_GETOPT_COMPAT_H_
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/ext/base/getopt_compat.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <vector>
+
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+
+namespace perfetto {
+namespace base {
+namespace getopt_compat {
+
+char* optarg = nullptr;
+int optind = 0;
+
+namespace {
+
+char* nextchar = nullptr;
+
+const option* LookupLongOpt(const std::vector<option>& opts,
+                            const char* name,
+                            size_t len) {
+  for (const option& opt : opts) {
+    if (strncmp(opt.name, name, len) == 0 && strlen(opt.name) == len)
+      return &opt;
+  }
+  return nullptr;
+}
+
+const option* LookupShortOpt(const std::vector<option>& opts, char c) {
+  for (const option& opt : opts) {
+    if (opt.name == nullptr && opt.val == c)
+      return &opt;
+  }
+  return nullptr;
+}
+
+bool ParseOpts(const char* shortopts,
+               const option* longopts,
+               std::vector<option>* res) {
+  // Parse long options first.
+  for (const option* lopt = longopts; lopt && lopt->name; lopt++) {
+    PERFETTO_CHECK(lopt->flag == nullptr);
+    PERFETTO_CHECK(lopt->has_arg == no_argument ||
+                   lopt->has_arg == required_argument);
+    res->emplace_back(*lopt);
+  }
+
+  // Merge short options.
+  for (const char* sopt = shortopts; sopt && *sopt;) {
+    const size_t idx = static_cast<size_t>(sopt - shortopts);
+    char c = *sopt++;
+    bool valid = (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') ||
+                 (c >= '0' && c <= '9');
+    if (!valid) {
+      fprintf(stderr,
+              "Error parsing shortopts. Unexpected char '%c' at offset %zu\n",
+              c, idx);
+      return false;
+    }
+    res->emplace_back();
+    option& opt = res->back();
+    opt.val = c;
+    opt.has_arg = no_argument;
+    if (*sopt == ':') {
+      opt.has_arg = required_argument;
+      ++sopt;
+    }
+  }
+  return true;
+}
+
+}  // namespace
+
+int getopt_long(int argc,
+                char** argv,
+                const char* shortopts,
+                const option* longopts,
+                std::nullptr_t /*longind*/) {
+  std::vector<option> opts;
+  optarg = nullptr;
+
+  if (optind == 0)
+    optind = 1;
+
+  if (optind >= argc)
+    return -1;
+
+  if (!ParseOpts(shortopts, longopts, &opts))
+    return '?';
+
+  char* arg = argv[optind];
+
+  if (!nextchar) {
+    // If |nextchar| is null we are NOT in the middle of a short option and we
+    // should parse the next argv.
+    if (strncmp(arg, "--", 2) == 0 && strlen(arg) > 2) {
+      // A --long option.
+      arg += 2;
+      char* sep = strchr(arg, '=');
+      optind++;
+
+      size_t len = sep ? static_cast<size_t>(sep - arg) : strlen(arg);
+      const option* opt = LookupLongOpt(opts, arg, len);
+      if (!opt) {
+        fprintf(stderr, "unrecognized option '--%s'\n", arg);
+        return '?';
+      }
+
+      if (opt->has_arg == no_argument) {
+        if (sep) {
+          fprintf(stderr, "option '--%s' doesn't allow an argument\n", arg);
+          return '?';
+        } else {
+          return opt->val;
+        }
+      } else if (opt->has_arg == required_argument) {
+        if (sep) {
+          optarg = sep + 1;
+          return opt->val;
+        } else if (optind >= argc) {
+          fprintf(stderr, "option '--%s' requires an argument\n", arg);
+          return '?';
+        } else {
+          optarg = argv[optind++];
+          return opt->val;
+        }
+      }
+      // has_arg must be either |no_argument| or |required_argument|. We
+      // shoulnd't get here unless the check in ParseOpts() has a bug.
+      PERFETTO_CHECK(false);
+    }  // if (arg ~= "--*").
+
+    if (strlen(arg) > 1 && arg[0] == '-' && arg[1] != '-') {
+      // A sequence of short options. Parsing logic continues below.
+      nextchar = &arg[1];
+    }
+  }  // if(!nextchar)
+
+  if (nextchar) {
+    // At this point either:
+    // 1. This is the first char of a sequence of short options, and we fell
+    //    through here from the lines above.
+    // 2. This is the N (>1) char of a sequence of short options, and we got
+    //    here from a new getopt() call to getopt().
+    const char cur_char = *nextchar;
+    PERFETTO_CHECK(cur_char != '\0');
+
+    // Advance the option char in any case, before we start reasoning on them.
+    // if we got to the end of the "-abc" sequence, increment optind so the next
+    // getopt() call resumes from the next argv argument.
+    if (*(++nextchar) == '\0') {
+      nextchar = nullptr;
+      ++optind;
+    }
+
+    const option* opt = LookupShortOpt(opts, cur_char);
+    if (!opt) {
+      fprintf(stderr, "invalid option -- '%c'\n", cur_char);
+      return '?';
+    }
+    if (opt->has_arg == no_argument) {
+      return cur_char;
+    } else if (opt->has_arg == required_argument) {
+      // This is a subtle getopt behavior. Say you call `tar -fx`, there are
+      // two cases:
+      // 1. If 'f' is no_argument then 'x' (and anything else after) is
+      //    interpreted as an independent argument (like `tar -f -x`).
+      // 2. If 'f' is required_argument, than everything else after the 'f'
+      //    is interpreted as the option argument (like `tar -f x`)
+      if (!nextchar) {
+        // Case 1.
+        if (optind >= argc) {
+          fprintf(stderr, "option requires an argument -- '%c'\n", cur_char);
+          return '?';
+        } else {
+          optarg = argv[optind++];
+          return cur_char;
+        }
+      } else {
+        // Case 2.
+        optarg = nextchar;
+        nextchar = nullptr;
+        optind++;
+        return cur_char;
+      }
+    }
+    PERFETTO_CHECK(false);
+  }  // if (nextchar)
+
+  // If we get here, we found the first non-option argument. Stop here.
+
+  if (strcmp(arg, "--") == 0)
+    optind++;
+
+  return -1;
+}
+
+int getopt(int argc, char** argv, const char* shortopts) {
+  return getopt_long(argc, argv, shortopts, nullptr, nullptr);
+}
+
+}  // namespace getopt_compat
+}  // namespace base
+}  // namespace perfetto
+// gen_amalgamated begin source: src/base/logging.cc
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+
+#include <stdarg.h>
+#include <stdio.h>
+
+#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+#include <unistd.h>  // For isatty()
+#endif
+
+#include <atomic>
+#include <memory>
+
+// gen_amalgamated expanded: #include "perfetto/base/time.h"
+
+namespace perfetto {
+namespace base {
+
+namespace {
+const char kReset[] = "\x1b[0m";
+const char kDefault[] = "\x1b[39m";
+const char kDim[] = "\x1b[2m";
+const char kRed[] = "\x1b[31m";
+const char kBoldGreen[] = "\x1b[1m\x1b[32m";
+const char kLightGray[] = "\x1b[90m";
+
+std::atomic<LogMessageCallback> g_log_callback{};
+
+}  // namespace
+
+void SetLogMessageCallback(LogMessageCallback callback) {
+  g_log_callback.store(callback, std::memory_order_relaxed);
+}
+
+void LogMessage(LogLev level,
+                const char* fname,
+                int line,
+                const char* fmt,
+                ...) {
+  char stack_buf[512];
+  std::unique_ptr<char[]> large_buf;
+  char* log_msg = &stack_buf[0];
+
+  // By default use a stack allocated buffer because most log messages are quite
+  // short. In rare cases they can be larger (e.g. --help). In those cases we
+  // pay the cost of allocating the buffer on the heap.
+  for (size_t max_len = sizeof(stack_buf);;) {
+    va_list args;
+    va_start(args, fmt);
+    int res = vsnprintf(log_msg, max_len, fmt, args);
+    va_end(args);
+
+    // If for any reason the print fails, overwrite the message but still print
+    // it. The code below will attach the filename and line, which is still
+    // useful.
+    if (res < 0) {
+      strncpy(log_msg, "[printf format error]", max_len);
+      break;
+    }
+
+    // if res == max_len, vsnprintf saturated the input buffer. Retry with a
+    // larger buffer in that case (within reasonable limits).
+    if (res < static_cast<int>(max_len) || max_len >= 128 * 1024)
+      break;
+    max_len *= 4;
+    large_buf.reset(new char[max_len]);
+    log_msg = &large_buf[0];
+  }
+
+  LogMessageCallback cb = g_log_callback.load(std::memory_order_relaxed);
+  if (cb) {
+    cb({level, line, fname, log_msg});
+    return;
+  }
+
+  const char* color = kDefault;
+  switch (level) {
+    case kLogDebug:
+      color = kDim;
+      break;
+    case kLogInfo:
+      color = kDefault;
+      break;
+    case kLogImportant:
+      color = kBoldGreen;
+      break;
+    case kLogError:
+      color = kRed;
+      break;
+  }
+
+#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) && \
+    !PERFETTO_BUILDFLAG(PERFETTO_OS_WASM)
+  static const bool use_colors = isatty(STDERR_FILENO);
+#else
+  static const bool use_colors = false;
+#endif
+
+  // Formats file.cc:line as a space-padded fixed width string. If the file name
+  // |fname| is too long, truncate it on the left-hand side.
+  char line_str[10];
+  size_t line_len =
+      static_cast<size_t>(snprintf(line_str, sizeof(line_str), "%d", line));
+
+  // 24 will be the width of the file.cc:line column in the log event.
+  char file_and_line[24];
+  size_t fname_len = strlen(fname);
+  size_t fname_max = sizeof(file_and_line) - line_len - 2;  // 2 = ':' + '\0'.
+  size_t fname_offset = fname_len <= fname_max ? 0 : fname_len - fname_max;
+  int len = snprintf(file_and_line, sizeof(file_and_line), "%s:%s",
+                     fname + fname_offset, line_str);
+  memset(&file_and_line[len], ' ', sizeof(file_and_line) - size_t(len));
+  file_and_line[sizeof(file_and_line) - 1] = '\0';
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
+  // Logcat has already timestamping, don't re-emit it.
+  __android_log_print(ANDROID_LOG_DEBUG + level, "perfetto", "%s %s",
+                      file_and_line, log_msg);
+#endif
+
+  // When printing on stderr, print also the timestamp. We don't really care
+  // about the actual time. We just need some reference clock that can be used
+  // to correlated events across differrent processses (e.g. traced and
+  // traced_probes). The wall time % 1000 is good enough.
+  char timestamp[32];
+  uint32_t t_ms = static_cast<uint32_t>(GetWallTimeMs().count());
+  uint32_t t_sec = t_ms / 1000;
+  t_ms -= t_sec * 1000;
+  t_sec = t_sec % 1000;
+  snprintf(timestamp, sizeof(timestamp), "[%03u.%03u] ", t_sec, t_ms);
+
+  if (use_colors) {
+    fprintf(stderr, "%s%s%s%s %s%s%s\n", kLightGray, timestamp, file_and_line,
+            kReset, color, log_msg, kReset);
+  } else {
+    fprintf(stderr, "%s%s %s\n", timestamp, file_and_line, log_msg);
+  }
+}
+
+}  // namespace base
+}  // namespace perfetto
+// gen_amalgamated begin source: src/base/metatrace.cc
+// gen_amalgamated begin header: include/perfetto/ext/base/metatrace.h
+// gen_amalgamated begin header: include/perfetto/ext/base/metatrace_events.h
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_BASE_METATRACE_EVENTS_H_
+#define INCLUDE_PERFETTO_EXT_BASE_METATRACE_EVENTS_H_
+
+#include <stdint.h>
+
+namespace perfetto {
+namespace metatrace {
+
+enum Tags : uint32_t {
+  TAG_NONE = 0,
+  TAG_ANY = uint32_t(-1),
+  TAG_FTRACE = 1 << 0,
+  TAG_PROC_POLLERS = 1 << 1,
+  TAG_TRACE_WRITER = 1 << 2,
+  TAG_TRACE_SERVICE = 1 << 3,
+  TAG_PRODUCER = 1 << 4,
+};
+
+// The macros below generate matching enums and arrays of string literals.
+// This is to avoid maintaining string maps manually.
+
+// clang-format off
+
+// DO NOT remove or reshuffle items in this list, only append. The ID of these
+// events are an ABI, the trace processor relies on these to open old traces.
+#define PERFETTO_METATRACE_EVENTS(F) \
+  F(EVENT_ZERO_UNUSED), \
+  F(FTRACE_CPU_READER_READ), /*unused*/ \
+  F(FTRACE_DRAIN_CPUS), /*unused*/ \
+  F(FTRACE_UNBLOCK_READERS), /*unused*/ \
+  F(FTRACE_CPU_READ_NONBLOCK), /*unused*/ \
+  F(FTRACE_CPU_READ_BLOCK), /*unused*/ \
+  F(FTRACE_CPU_SPLICE_NONBLOCK), /*unused*/ \
+  F(FTRACE_CPU_SPLICE_BLOCK), /*unused*/ \
+  F(FTRACE_CPU_WAIT_CMD), /*unused*/ \
+  F(FTRACE_CPU_RUN_CYCLE), /*unused*/ \
+  F(FTRACE_CPU_FLUSH), \
+  F(FTRACE_CPU_DRAIN), /*unused*/ \
+  F(READ_SYS_STATS), \
+  F(PS_WRITE_ALL_PROCESSES), \
+  F(PS_ON_PIDS), \
+  F(PS_ON_RENAME_PIDS), \
+  F(PS_WRITE_ALL_PROCESS_STATS), \
+  F(TRACE_WRITER_COMMIT_STARTUP_WRITER_BATCH), \
+  F(FTRACE_READ_TICK), \
+  F(FTRACE_CPU_READ_CYCLE), \
+  F(FTRACE_CPU_READ_BATCH), \
+  F(KALLSYMS_PARSE), \
+  F(PROFILER_READ_TICK), \
+  F(PROFILER_READ_CPU), \
+  F(PROFILER_UNWIND_TICK), \
+  F(PROFILER_UNWIND_SAMPLE), \
+  F(PROFILER_UNWIND_INITIAL_ATTEMPT), \
+  F(PROFILER_UNWIND_ATTEMPT), \
+  F(PROFILER_MAPS_PARSE), \
+  F(PROFILER_MAPS_REPARSE), \
+  F(PROFILER_UNWIND_CACHE_CLEAR)
+
+// Append only, see above.
+//
+// Values that aren't used as counters:
+// * FTRACE_SERVICE_COMMIT_DATA is a bit-packed representation of an event, see
+//   tracing_service_impl.cc for the format.
+// * PROFILER_UNWIND_CURRENT_PID represents the PID that is being unwound.
+//
+#define PERFETTO_METATRACE_COUNTERS(F) \
+  F(COUNTER_ZERO_UNUSED),\
+  F(FTRACE_PAGES_DRAINED), \
+  F(PS_PIDS_SCANNED), \
+  F(TRACE_SERVICE_COMMIT_DATA), \
+  F(PROFILER_UNWIND_QUEUE_SZ), \
+  F(PROFILER_UNWIND_CURRENT_PID)
+
+// clang-format on
+
+#define PERFETTO_METATRACE_IDENTITY(name) name
+#define PERFETTO_METATRACE_TOSTRING(name) #name
+
+enum Events : uint16_t {
+  PERFETTO_METATRACE_EVENTS(PERFETTO_METATRACE_IDENTITY),
+  EVENTS_MAX
+};
+constexpr char const* kEventNames[] = {
+    PERFETTO_METATRACE_EVENTS(PERFETTO_METATRACE_TOSTRING)};
+
+enum Counters : uint16_t {
+  PERFETTO_METATRACE_COUNTERS(PERFETTO_METATRACE_IDENTITY),
+  COUNTERS_MAX
+};
+constexpr char const* kCounterNames[] = {
+    PERFETTO_METATRACE_COUNTERS(PERFETTO_METATRACE_TOSTRING)};
+
+inline void SuppressUnusedVarsInAmalgamatedBuild() {
+  (void)kCounterNames;
+  (void)kEventNames;
+}
+
+}  // namespace metatrace
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_BASE_METATRACE_EVENTS_H_
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_BASE_METATRACE_H_
+#define INCLUDE_PERFETTO_EXT_BASE_METATRACE_H_
+
+#include <array>
+#include <atomic>
+#include <functional>
+#include <string>
+
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/base/thread_utils.h"
+// gen_amalgamated expanded: #include "perfetto/base/time.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/metatrace_events.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
+
+// A facility to trace execution of the perfetto codebase itself.
+// The meta-tracing framework is organized into three layers:
+//
+// 1. A static ring-buffer in base/ (this file) that supports concurrent writes
+//    and a single reader.
+//    The responsibility of this layer is to store events and counters as
+//    efficiently as possible without re-entering any tracing code.
+//    This is really a static-storage-based ring-buffer based on a POD array.
+//    This layer does NOT deal with serializing the meta-trace buffer.
+//    It posts a task when it's half full and expects something outside of
+//    base/ to drain the ring-buffer and serialize it, eventually writing it
+//    into the trace itself, before it gets 100% full.
+//
+// 2. A class in tracing/core which takes care of serializing the meta-trace
+//    buffer into the trace using a TraceWriter. See metatrace_writer.h .
+//
+// 3. A data source in traced_probes that, when be enabled via the trace config,
+//    injects metatrace events into the trace. See metatrace_data_source.h .
+//
+// The available events and tags are defined in metatrace_events.h .
+
+namespace perfetto {
+
+namespace base {
+class TaskRunner;
+}  // namespace base
+
+namespace metatrace {
+
+// Meta-tracing is organized in "tags" that can be selectively enabled. This is
+// to enable meta-tracing only of one sub-system. This word has one "enabled"
+// bit for each tag. 0 -> meta-tracing off.
+extern std::atomic<uint32_t> g_enabled_tags;
+
+// Time of the Enable() call. Used as a reference for keeping delta timestmaps
+// in Record.
+extern std::atomic<uint64_t> g_enabled_timestamp;
+
+// Enables meta-tracing for one or more tags. Once enabled it will discard any
+// further Enable() calls and return false until disabled,
+// |read_task| is a closure that will be called enqueued |task_runner| when the
+// meta-tracing ring buffer is half full. The task is expected to read the ring
+// buffer using RingBuffer::GetReadIterator() and serialize the contents onto a
+// file or into the trace itself.
+// Must be called on the |task_runner| passed.
+// |task_runner| must have static lifetime.
+bool Enable(std::function<void()> read_task, base::TaskRunner*, uint32_t tags);
+
+// Disables meta-tracing.
+// Must be called on the same |task_runner| as Enable().
+void Disable();
+
+inline uint64_t TraceTimeNowNs() {
+  return static_cast<uint64_t>(base::GetBootTimeNs().count());
+}
+
+// Returns a relaxed view of whether metatracing is enabled for the given tag.
+// Useful for skipping unnecessary argument computation if metatracing is off.
+inline bool IsEnabled(uint32_t tag) {
+  auto enabled_tags = g_enabled_tags.load(std::memory_order_relaxed);
+  return PERFETTO_UNLIKELY((enabled_tags & tag) != 0);
+}
+
+// Holds the data for a metatrace event or counter.
+struct Record {
+  static constexpr uint16_t kTypeMask = 0x8000;
+  static constexpr uint16_t kTypeCounter = 0x8000;
+  static constexpr uint16_t kTypeEvent = 0;
+
+  uint64_t timestamp_ns() const {
+    auto base_ns = g_enabled_timestamp.load(std::memory_order_relaxed);
+    PERFETTO_DCHECK(base_ns);
+    return base_ns + ((static_cast<uint64_t>(timestamp_ns_high) << 32) |
+                      timestamp_ns_low);
+  }
+
+  void set_timestamp(uint64_t ts) {
+    auto t_start = g_enabled_timestamp.load(std::memory_order_relaxed);
+    uint64_t diff = ts - t_start;
+    PERFETTO_DCHECK(diff < (1ull << 48));
+    timestamp_ns_low = static_cast<uint32_t>(diff);
+    timestamp_ns_high = static_cast<uint16_t>(diff >> 32);
+  }
+
+  // We can't just memset() this class because on MSVC std::atomic<> is not
+  // trivially constructible anymore. Also std::atomic<> has a deleted copy
+  // constructor so we cant just do "*this = Record()" either.
+  // See http://bit.ly/339Jlzd .
+  void clear() {
+    this->~Record();
+    new (this) Record();
+  }
+
+  // This field holds the type (counter vs event) in the MSB and event ID (as
+  // defined in metatrace_events.h) in the lowest 15 bits. It is also used also
+  // as a linearization point: this is always written after all the other
+  // fields with a release-store. This is so the reader can determine whether it
+  // can safely process the other event fields after a load-acquire.
+  std::atomic<uint16_t> type_and_id{};
+
+  // Timestamp is stored as a 48-bits value diffed against g_enabled_timestamp.
+  // This gives us 78 hours from Enabled().
+  uint16_t timestamp_ns_high = 0;
+  uint32_t timestamp_ns_low = 0;
+
+  uint32_t thread_id = 0;
+
+  union {
+    // Only one of the two elements can be zero initialized, clang complains
+    // about "initializing multiple members of union" otherwise.
+    uint32_t duration_ns = 0;  // If type == event.
+    int32_t counter_value;     // If type == counter.
+  };
+};
+
+// Hold the meta-tracing data into a statically allocated array.
+// This class uses static storage (as opposite to being a singleton) to:
+// - Have the guarantee of always valid storage, so that meta-tracing can be
+//   safely used in any part of the codebase, including base/ itself.
+// - Avoid barriers that thread-safe static locals would require.
+class RingBuffer {
+ public:
+  static constexpr size_t kCapacity = 4096;  // 4096 * 16 bytes = 64K.
+
+  // This iterator is not idempotent and will bump the read index in the buffer
+  // at the end of the reads. There can be only one reader at any time.
+  // Usage: for (auto it = RingBuffer::GetReadIterator(); it; ++it) { it->... }
+  class ReadIterator {
+   public:
+    ReadIterator(ReadIterator&& other) {
+      PERFETTO_DCHECK(other.valid_);
+      cur_ = other.cur_;
+      end_ = other.end_;
+      valid_ = other.valid_;
+      other.valid_ = false;
+    }
+
+    ~ReadIterator() {
+      if (!valid_)
+        return;
+      PERFETTO_DCHECK(cur_ >= RingBuffer::rd_index_);
+      PERFETTO_DCHECK(cur_ <= RingBuffer::wr_index_);
+      RingBuffer::rd_index_.store(cur_, std::memory_order_release);
+    }
+
+    explicit operator bool() const { return cur_ < end_; }
+    const Record* operator->() const { return RingBuffer::At(cur_); }
+    const Record& operator*() const { return *operator->(); }
+
+    // This is for ++it. it++ is deliberately not supported.
+    ReadIterator& operator++() {
+      PERFETTO_DCHECK(cur_ < end_);
+      // Once a record has been read, mark it as free clearing its type_and_id,
+      // so if we encounter it in another read iteration while being written
+      // we know it's not fully written yet.
+      // The memory_order_relaxed below is enough because:
+      // - The reader is single-threaded and doesn't re-read the same records.
+      // - Before starting a read batch, the reader has an acquire barrier on
+      //   |rd_index_|.
+      // - After terminating a read batch, the ~ReadIterator dtor updates the
+      //   |rd_index_| with a release-store.
+      // - Reader and writer are typically kCapacity/2 apart. So unless an
+      //   overrun happens a writer won't reuse a newly released record any time
+      //   soon. If an overrun happens, everything is busted regardless.
+      At(cur_)->type_and_id.store(0, std::memory_order_relaxed);
+      ++cur_;
+      return *this;
+    }
+
+   private:
+    friend class RingBuffer;
+    ReadIterator(uint64_t begin, uint64_t end)
+        : cur_(begin), end_(end), valid_(true) {}
+    ReadIterator& operator=(const ReadIterator&) = delete;
+    ReadIterator(const ReadIterator&) = delete;
+
+    uint64_t cur_;
+    uint64_t end_;
+    bool valid_;
+  };
+
+  static Record* At(uint64_t index) {
+    // Doesn't really have to be pow2, but if not the compiler will emit
+    // arithmetic operations to compute the modulo instead of a bitwise AND.
+    static_assert(!(kCapacity & (kCapacity - 1)), "kCapacity must be pow2");
+    PERFETTO_DCHECK(index >= rd_index_);
+    PERFETTO_DCHECK(index <= wr_index_);
+    return &records_[index % kCapacity];
+  }
+
+  // Must be called on the same task runner passed to Enable()
+  static ReadIterator GetReadIterator() {
+    PERFETTO_DCHECK(RingBuffer::IsOnValidTaskRunner());
+    return ReadIterator(rd_index_.load(std::memory_order_acquire),
+                        wr_index_.load(std::memory_order_acquire));
+  }
+
+  static Record* AppendNewRecord();
+  static void Reset();
+
+  static bool has_overruns() {
+    return has_overruns_.load(std::memory_order_acquire);
+  }
+
+  // Can temporarily return a value >= kCapacity but is eventually consistent.
+  // This would happen in case of overruns until threads hit the --wr_index_
+  // in AppendNewRecord().
+  static uint64_t GetSizeForTesting() {
+    auto wr_index = wr_index_.load(std::memory_order_relaxed);
+    auto rd_index = rd_index_.load(std::memory_order_relaxed);
+    PERFETTO_DCHECK(wr_index >= rd_index);
+    return wr_index - rd_index;
+  }
+
+ private:
+  friend class ReadIterator;
+
+  // Returns true if the caller is on the task runner passed to Enable().
+  // Used only for DCHECKs.
+  static bool IsOnValidTaskRunner();
+
+  static std::array<Record, kCapacity> records_;
+  static std::atomic<bool> read_task_queued_;
+  static std::atomic<uint64_t> wr_index_;
+  static std::atomic<uint64_t> rd_index_;
+  static std::atomic<bool> has_overruns_;
+  static Record bankruptcy_record_;  // Used in case of overruns.
+};
+
+inline void TraceCounter(uint32_t tag, uint16_t id, int32_t value) {
+  // memory_order_relaxed is okay because the storage has static lifetime.
+  // It is safe to accidentally log an event soon after disabling.
+  auto enabled_tags = g_enabled_tags.load(std::memory_order_relaxed);
+  if (PERFETTO_LIKELY((enabled_tags & tag) == 0))
+    return;
+  Record* record = RingBuffer::AppendNewRecord();
+  record->thread_id = static_cast<uint32_t>(base::GetThreadId());
+  record->set_timestamp(TraceTimeNowNs());
+  record->counter_value = value;
+  record->type_and_id.store(Record::kTypeCounter | id,
+                            std::memory_order_release);
+}
+
+class ScopedEvent {
+ public:
+  ScopedEvent(uint32_t tag, uint16_t event_id) {
+    auto enabled_tags = g_enabled_tags.load(std::memory_order_relaxed);
+    if (PERFETTO_LIKELY((enabled_tags & tag) == 0))
+      return;
+    event_id_ = event_id;
+    record_ = RingBuffer::AppendNewRecord();
+    record_->thread_id = static_cast<uint32_t>(base::GetThreadId());
+    record_->set_timestamp(TraceTimeNowNs());
+  }
+
+  ~ScopedEvent() {
+    if (PERFETTO_LIKELY(!record_))
+      return;
+    auto now = TraceTimeNowNs();
+    record_->duration_ns = static_cast<uint32_t>(now - record_->timestamp_ns());
+    record_->type_and_id.store(Record::kTypeEvent | event_id_,
+                               std::memory_order_release);
+  }
+
+ private:
+  Record* record_ = nullptr;
+  uint16_t event_id_ = 0;
+  ScopedEvent(const ScopedEvent&) = delete;
+  ScopedEvent& operator=(const ScopedEvent&) = delete;
+};
+
+// Boilerplate to derive a unique variable name for the event.
+#define PERFETTO_METATRACE_UID2(a, b) a##b
+#define PERFETTO_METATRACE_UID(x) PERFETTO_METATRACE_UID2(metatrace_, x)
+
+#define PERFETTO_METATRACE_SCOPED(TAG, ID)                                \
+  ::perfetto::metatrace::ScopedEvent PERFETTO_METATRACE_UID(__COUNTER__)( \
+      ::perfetto::metatrace::TAG, ::perfetto::metatrace::ID)
+
+#define PERFETTO_METATRACE_COUNTER(TAG, ID, VALUE)                \
+  ::perfetto::metatrace::TraceCounter(::perfetto::metatrace::TAG, \
+                                      ::perfetto::metatrace::ID,  \
+                                      static_cast<int32_t>(VALUE))
+
+}  // namespace metatrace
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_BASE_METATRACE_H_
+// gen_amalgamated begin header: include/perfetto/base/task_runner.h
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_BASE_TASK_RUNNER_H_
+#define INCLUDE_PERFETTO_BASE_TASK_RUNNER_H_
+
+#include <stdint.h>
+
+#include <functional>
+
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+// gen_amalgamated expanded: #include "perfetto/base/platform_handle.h"
+
+namespace perfetto {
+namespace base {
+
+// A generic interface to allow the library clients to interleave the execution
+// of the tracing internals in their runtime environment.
+// The expectation is that all tasks, which are queued either via PostTask() or
+// AddFileDescriptorWatch(), are executed on the same sequence (either on the
+// same thread, or on a thread pool that gives sequencing guarantees).
+//
+// Tasks are never executed synchronously inside PostTask and there is a full
+// memory barrier between tasks.
+//
+// All methods of this interface can be called from any thread.
+class PERFETTO_EXPORT TaskRunner {
+ public:
+  virtual ~TaskRunner();
+
+  // Schedule a task for immediate execution. Immediate tasks are always
+  // executed in the order they are posted. Can be called from any thread.
+  virtual void PostTask(std::function<void()>) = 0;
+
+  // Schedule a task for execution after |delay_ms|. Note that there is no
+  // strict ordering guarantee between immediate and delayed tasks. Can be
+  // called from any thread.
+  virtual void PostDelayedTask(std::function<void()>, uint32_t delay_ms) = 0;
+
+  // Schedule a task to run when the handle becomes readable. The same handle
+  // can only be monitored by one function. Note that this function only needs
+  // to be implemented on platforms where the built-in ipc framework is used.
+  // Can be called from any thread.
+  // TODO(skyostil): Refactor this out of the shared interface.
+  virtual void AddFileDescriptorWatch(PlatformHandle,
+                                      std::function<void()>) = 0;
+
+  // Remove a previously scheduled watch for the handle. If this is run on the
+  // target thread of this TaskRunner, guarantees that the task registered to
+  // this handle will not be executed after this function call.
+  // Can be called from any thread.
+  virtual void RemoveFileDescriptorWatch(PlatformHandle) = 0;
+
+  // Checks if the current thread is the same thread where the TaskRunner's task
+  // run. This allows single threaded task runners (like the ones used in
+  // perfetto) to inform the caller that anything posted will run on the same
+  // thread/sequence. This can allow some callers to skip PostTask and instead
+  // directly execute the code. Can be called from any thread.
+  virtual bool RunsTasksOnCurrentThread() const = 0;
+};
+
+}  // namespace base
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_BASE_TASK_RUNNER_H_
+// gen_amalgamated begin header: include/perfetto/ext/base/thread_annotations.h
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_BASE_THREAD_ANNOTATIONS_H_
+#define INCLUDE_PERFETTO_EXT_BASE_THREAD_ANNOTATIONS_H_
+
+// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
+
+// Windows TSAN doesn't currently support these annotations.
+#if defined(THREAD_SANITIZER) && !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+extern "C" {
+void AnnotateBenignRaceSized(const char* file,
+                             int line,
+                             unsigned long address,
+                             unsigned long size,
+                             const char* description);
+}
+
+#define PERFETTO_ANNOTATE_BENIGN_RACE_SIZED(pointer, size, description)   \
+  AnnotateBenignRaceSized(__FILE__, __LINE__,                             \
+                          reinterpret_cast<unsigned long>(pointer), size, \
+                          description);
+#else  // defined(ADDRESS_SANITIZER)
+#define PERFETTO_ANNOTATE_BENIGN_RACE_SIZED(pointer, size, description)
+#endif  // defined(ADDRESS_SANITIZER)
+
+#endif  // INCLUDE_PERFETTO_EXT_BASE_THREAD_ANNOTATIONS_H_
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/ext/base/metatrace.h"
+
+// gen_amalgamated expanded: #include "perfetto/base/compiler.h"
+// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
+// gen_amalgamated expanded: #include "perfetto/base/time.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/file_utils.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/thread_annotations.h"
+
+namespace perfetto {
+namespace metatrace {
+
+std::atomic<uint32_t> g_enabled_tags{0};
+std::atomic<uint64_t> g_enabled_timestamp{0};
+
+// static members
+constexpr size_t RingBuffer::kCapacity;
+std::array<Record, RingBuffer::kCapacity> RingBuffer::records_;
+std::atomic<bool> RingBuffer::read_task_queued_;
+std::atomic<uint64_t> RingBuffer::wr_index_;
+std::atomic<uint64_t> RingBuffer::rd_index_;
+std::atomic<bool> RingBuffer::has_overruns_;
+Record RingBuffer::bankruptcy_record_;
+
+constexpr uint16_t Record::kTypeMask;
+constexpr uint16_t Record::kTypeCounter;
+constexpr uint16_t Record::kTypeEvent;
+
+namespace {
+
+// std::function<> is not trivially de/constructible. This struct wraps it in a
+// heap-allocated struct to avoid static initializers.
+struct Delegate {
+  static Delegate* GetInstance() {
+    static Delegate* instance = new Delegate();
+    return instance;
+  }
+
+  base::TaskRunner* task_runner = nullptr;
+  std::function<void()> read_task;
+};
+
+}  // namespace
+
+bool Enable(std::function<void()> read_task,
+            base::TaskRunner* task_runner,
+            uint32_t tags) {
+  PERFETTO_DCHECK(read_task);
+  PERFETTO_DCHECK(task_runner->RunsTasksOnCurrentThread());
+  if (g_enabled_tags.load(std::memory_order_acquire))
+    return false;
+
+  Delegate* dg = Delegate::GetInstance();
+  dg->task_runner = task_runner;
+  dg->read_task = std::move(read_task);
+  RingBuffer::Reset();
+  g_enabled_timestamp.store(TraceTimeNowNs(), std::memory_order_relaxed);
+  g_enabled_tags.store(tags, std::memory_order_release);
+  return true;
+}
+
+void Disable() {
+  g_enabled_tags.store(0, std::memory_order_release);
+  Delegate* dg = Delegate::GetInstance();
+  PERFETTO_DCHECK(!dg->task_runner ||
+                  dg->task_runner->RunsTasksOnCurrentThread());
+  dg->task_runner = nullptr;
+  dg->read_task = nullptr;
+}
+
+// static
+void RingBuffer::Reset() {
+  bankruptcy_record_.clear();
+  for (Record& record : records_)
+    record.clear();
+  wr_index_ = 0;
+  rd_index_ = 0;
+  has_overruns_ = false;
+  read_task_queued_ = false;
+}
+
+// static
+Record* RingBuffer::AppendNewRecord() {
+  auto wr_index = wr_index_.fetch_add(1, std::memory_order_acq_rel);
+
+  // rd_index can only monotonically increase, we don't care if we read an
+  // older value, we'll just hit the slow-path a bit earlier if it happens.
+  auto rd_index = rd_index_.load(std::memory_order_relaxed);
+
+  PERFETTO_DCHECK(wr_index >= rd_index);
+  auto size = wr_index - rd_index;
+  if (PERFETTO_LIKELY(size < kCapacity / 2))
+    return At(wr_index);
+
+  // Slow-path: Enqueue the read task and handle overruns.
+  bool expected = false;
+  if (RingBuffer::read_task_queued_.compare_exchange_strong(expected, true)) {
+    Delegate* dg = Delegate::GetInstance();
+    if (dg->task_runner) {
+      dg->task_runner->PostTask([] {
+        // Meta-tracing might have been disabled in the meantime.
+        auto read_task = Delegate::GetInstance()->read_task;
+        if (read_task)
+          read_task();
+        RingBuffer::read_task_queued_ = false;
+      });
+    }
+  }
+
+  if (PERFETTO_LIKELY(size < kCapacity))
+    return At(wr_index);
+
+  has_overruns_.store(true, std::memory_order_release);
+  wr_index_.fetch_sub(1, std::memory_order_acq_rel);
+
+  // In the case of overflows, threads will race writing on the same memory
+  // location and TSan will rightly complain. This is fine though because nobody
+  // will read the bankruptcy record and it's designed to contain garbage.
+  PERFETTO_ANNOTATE_BENIGN_RACE_SIZED(&bankruptcy_record_, sizeof(Record),
+                                      "nothing reads bankruptcy_record_")
+  return &bankruptcy_record_;
+}
+
+// static
+bool RingBuffer::IsOnValidTaskRunner() {
+  auto* task_runner = Delegate::GetInstance()->task_runner;
+  return task_runner && task_runner->RunsTasksOnCurrentThread();
+}
+
+}  // namespace metatrace
+}  // namespace perfetto
+// gen_amalgamated begin source: src/base/paged_memory.cc
+// gen_amalgamated begin header: include/perfetto/ext/base/paged_memory.h
+// gen_amalgamated begin header: include/perfetto/ext/base/container_annotations.h
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_BASE_CONTAINER_ANNOTATIONS_H_
+#define INCLUDE_PERFETTO_EXT_BASE_CONTAINER_ANNOTATIONS_H_
+
+// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
+
+// Windows ASAN doesn't currently support these annotations.
+#if defined(ADDRESS_SANITIZER) && !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) && \
+    !defined(ADDRESS_SANITIZER_WITHOUT_INSTRUMENTATION)
+
+#define ANNOTATE_NEW_BUFFER(buffer, capacity, new_size)                      \
+  if (buffer) {                                                              \
+    __sanitizer_annotate_contiguous_container(buffer, (buffer) + (capacity), \
+                                              (buffer) + (capacity),         \
+                                              (buffer) + (new_size));        \
+  }
+#define ANNOTATE_DELETE_BUFFER(buffer, capacity, old_size)                   \
+  if (buffer) {                                                              \
+    __sanitizer_annotate_contiguous_container(buffer, (buffer) + (capacity), \
+                                              (buffer) + (old_size),         \
+                                              (buffer) + (capacity));        \
+  }
+#define ANNOTATE_CHANGE_SIZE(buffer, capacity, old_size, new_size)           \
+  if (buffer) {                                                              \
+    __sanitizer_annotate_contiguous_container(buffer, (buffer) + (capacity), \
+                                              (buffer) + (old_size),         \
+                                              (buffer) + (new_size));        \
+  }
+#define ANNOTATE_CHANGE_CAPACITY(buffer, old_capacity, buffer_size, \
+                                 new_capacity)                      \
+  ANNOTATE_DELETE_BUFFER(buffer, old_capacity, buffer_size);        \
+  ANNOTATE_NEW_BUFFER(buffer, new_capacity, buffer_size);
+// Annotations require buffers to begin on an 8-byte boundary.
+#else  // defined(ADDRESS_SANITIZER)
+#define ANNOTATE_NEW_BUFFER(buffer, capacity, new_size)
+#define ANNOTATE_DELETE_BUFFER(buffer, capacity, old_size)
+#define ANNOTATE_CHANGE_SIZE(buffer, capacity, old_size, new_size)
+#define ANNOTATE_CHANGE_CAPACITY(buffer, old_capacity, buffer_size, \
+                                 new_capacity)
+#endif  // defined(ADDRESS_SANITIZER)
+
+#endif  // INCLUDE_PERFETTO_EXT_BASE_CONTAINER_ANNOTATIONS_H_
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_BASE_PAGED_MEMORY_H_
+#define INCLUDE_PERFETTO_EXT_BASE_PAGED_MEMORY_H_
+
+#include <memory>
+
+// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/container_annotations.h"
+
+// We need to track the committed size on windows and when ASAN is enabled.
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) || defined(ADDRESS_SANITIZER)
+#define TRACK_COMMITTED_SIZE() 1
+#else
+#define TRACK_COMMITTED_SIZE() 0
+#endif
+
+namespace perfetto {
+namespace base {
+
+class PagedMemory {
+ public:
+  // Initializes an invalid PagedMemory pointing to nullptr.
+  PagedMemory();
+
+  ~PagedMemory();
+
+  PagedMemory(PagedMemory&& other) noexcept;
+  PagedMemory& operator=(PagedMemory&& other);
+
+  enum AllocationFlags {
+    // By default, Allocate() crashes if the underlying mmap fails (e.g., if out
+    // of virtual address space). When this flag is provided, an invalid
+    // PagedMemory pointing to nullptr is returned in this case instead.
+    kMayFail = 1 << 0,
+
+    // By default, Allocate() commits the allocated memory immediately. When
+    // this flag is provided, the memory virtual address space may only be
+    // reserved and the user should call EnsureCommitted() before writing to
+    // memory addresses.
+    kDontCommit = 1 << 1,
+  };
+
+  // Allocates |size| bytes using mmap(MAP_ANONYMOUS). The returned memory is
+  // guaranteed to be page-aligned and guaranteed to be zeroed.
+  // For |flags|, see the AllocationFlags enum above.
+  static PagedMemory Allocate(size_t size, int flags = 0);
+
+  // Hint to the OS that the memory range is not needed and can be discarded.
+  // The memory remains accessible and its contents may be retained, or they
+  // may be zeroed. This function may be a NOP on some platforms. Returns true
+  // if implemented.
+  bool AdviseDontNeed(void* p, size_t size);
+
+  // Ensures that at least the first |committed_size| bytes of the allocated
+  // memory region are committed. The implementation may commit memory in larger
+  // chunks above |committed_size|. Crashes if the memory couldn't be committed.
+#if TRACK_COMMITTED_SIZE()
+  void EnsureCommitted(size_t committed_size);
+#else   // TRACK_COMMITTED_SIZE()
+  void EnsureCommitted(size_t /*committed_size*/) {}
+#endif  // TRACK_COMMITTED_SIZE()
+
+  inline void* Get() const noexcept { return p_; }
+  inline bool IsValid() const noexcept { return !!p_; }
+  inline size_t size() const noexcept { return size_; }
+
+ private:
+  PagedMemory(char* p, size_t size);
+
+  PagedMemory(const PagedMemory&) = delete;
+  // Defaulted for implementation of move constructor + assignment.
+  PagedMemory& operator=(const PagedMemory&) = default;
+
+  char* p_ = nullptr;
+
+  // The size originally passed to Allocate(). The actual virtual memory
+  // reservation will be larger due to: (i) guard pages; (ii) rounding up to
+  // the system page size.
+  size_t size_ = 0;
+
+#if TRACK_COMMITTED_SIZE()
+  size_t committed_size_ = 0u;
+#endif  // TRACK_COMMITTED_SIZE()
+};
+
+}  // namespace base
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_BASE_PAGED_MEMORY_H_
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/ext/base/paged_memory.h"
+
+#include <algorithm>
+#include <cmath>
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+#include <Windows.h>
+#else  // PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+#include <sys/mman.h>
+#endif  // PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/container_annotations.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
+
+namespace perfetto {
+namespace base {
+
+namespace {
+
+#if TRACK_COMMITTED_SIZE()
+constexpr size_t kCommitChunkSize = 4 * 1024 * 1024;  // 4MB
+#endif
+
+size_t RoundUpToSysPageSize(size_t req_size) {
+  const size_t page_size = GetSysPageSize();
+  return (req_size + page_size - 1) & ~(page_size - 1);
+}
+
+size_t GuardSize() {
+  return GetSysPageSize();
+}
+
+}  // namespace
+
+// static
+PagedMemory PagedMemory::Allocate(size_t req_size, int flags) {
+  size_t rounded_up_size = RoundUpToSysPageSize(req_size);
+  PERFETTO_CHECK(rounded_up_size >= req_size);
+  size_t outer_size = rounded_up_size + GuardSize() * 2;
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+  void* ptr = VirtualAlloc(nullptr, outer_size, MEM_RESERVE, PAGE_NOACCESS);
+  if (!ptr && (flags & kMayFail))
+    return PagedMemory();
+  PERFETTO_CHECK(ptr);
+  char* usable_region = reinterpret_cast<char*>(ptr) + GuardSize();
+#else   // PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+  void* ptr = mmap(nullptr, outer_size, PROT_READ | PROT_WRITE,
+                   MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+  if (ptr == MAP_FAILED && (flags & kMayFail))
+    return PagedMemory();
+  PERFETTO_CHECK(ptr && ptr != MAP_FAILED);
+  char* usable_region = reinterpret_cast<char*>(ptr) + GuardSize();
+  int res = mprotect(ptr, GuardSize(), PROT_NONE);
+  res |= mprotect(usable_region + rounded_up_size, GuardSize(), PROT_NONE);
+  PERFETTO_CHECK(res == 0);
+#endif  // PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+
+  auto memory = PagedMemory(usable_region, req_size);
+#if TRACK_COMMITTED_SIZE()
+  size_t initial_commit = req_size;
+  if (flags & kDontCommit)
+    initial_commit = std::min(initial_commit, kCommitChunkSize);
+  memory.EnsureCommitted(initial_commit);
+#endif  // TRACK_COMMITTED_SIZE()
+  return memory;
+}
+
+PagedMemory::PagedMemory() {}
+
+// clang-format off
+PagedMemory::PagedMemory(char* p, size_t size) : p_(p), size_(size) {
+  ANNOTATE_NEW_BUFFER(p_, size_, committed_size_)
+}
+
+PagedMemory::PagedMemory(PagedMemory&& other) noexcept {
+  *this = other;
+  other.p_ = nullptr;
+}
+// clang-format on
+
+PagedMemory& PagedMemory::operator=(PagedMemory&& other) {
+  this->~PagedMemory();
+  new (this) PagedMemory(std::move(other));
+  return *this;
+}
+
+PagedMemory::~PagedMemory() {
+  if (!p_)
+    return;
+  PERFETTO_CHECK(size_);
+  char* start = p_ - GuardSize();
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+  BOOL res = VirtualFree(start, 0, MEM_RELEASE);
+  PERFETTO_CHECK(res != 0);
+#else   // PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+  const size_t outer_size = RoundUpToSysPageSize(size_) + GuardSize() * 2;
+  int res = munmap(start, outer_size);
+  PERFETTO_CHECK(res == 0);
+#endif  // PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+  ANNOTATE_DELETE_BUFFER(p_, size_, committed_size_)
+}
+
+bool PagedMemory::AdviseDontNeed(void* p, size_t size) {
+  PERFETTO_DCHECK(p_);
+  PERFETTO_DCHECK(p >= p_);
+  PERFETTO_DCHECK(static_cast<char*>(p) + size <= p_ + size_);
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) || PERFETTO_BUILDFLAG(PERFETTO_OS_NACL)
+  // Discarding pages on Windows has more CPU cost than is justified for the
+  // possible memory savings.
+  return false;
+#else   // PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) ||
+        // PERFETTO_BUILDFLAG(PERFETTO_OS_NACL)
+  // http://man7.org/linux/man-pages/man2/madvise.2.html
+  int res = madvise(p, size, MADV_DONTNEED);
+  PERFETTO_DCHECK(res == 0);
+  return true;
+#endif  // PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) ||
+        // PERFETTO_BUILDFLAG(PERFETTO_OS_NACL)
+}
+
+#if TRACK_COMMITTED_SIZE()
+void PagedMemory::EnsureCommitted(size_t committed_size) {
+  PERFETTO_DCHECK(committed_size > 0u);
+  PERFETTO_DCHECK(committed_size <= size_);
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+  if (committed_size_ >= committed_size)
+    return;
+  // Rounding up.
+  size_t delta = committed_size - committed_size_;
+  size_t num_additional_chunks =
+      (delta + kCommitChunkSize - 1) / kCommitChunkSize;
+  PERFETTO_DCHECK(num_additional_chunks * kCommitChunkSize >= delta);
+  // Don't commit more than the total size.
+  size_t commit_size = std::min(num_additional_chunks * kCommitChunkSize,
+                                size_ - committed_size_);
+  void* res = VirtualAlloc(p_ + committed_size_, commit_size, MEM_COMMIT,
+                           PAGE_READWRITE);
+  PERFETTO_CHECK(res);
+  ANNOTATE_CHANGE_SIZE(p_, size_, committed_size_,
+                       committed_size_ + commit_size)
+  committed_size_ += commit_size;
+#else   // PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+  // mmap commits automatically as needed, so we only track here for ASAN.
+  committed_size = std::max(committed_size_, committed_size);
+  ANNOTATE_CHANGE_SIZE(p_, size_, committed_size_, committed_size)
+  committed_size_ = committed_size;
+#endif  // PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+}
+#endif  // TRACK_COMMITTED_SIZE()
+
+}  // namespace base
+}  // namespace perfetto
+// gen_amalgamated begin source: src/base/periodic_task.cc
+// gen_amalgamated begin header: include/perfetto/ext/base/periodic_task.h
+// gen_amalgamated begin header: include/perfetto/ext/base/thread_checker.h
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_BASE_THREAD_CHECKER_H_
+#define INCLUDE_PERFETTO_EXT_BASE_THREAD_CHECKER_H_
+
+// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
+
+#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+#include <pthread.h>
+#endif
+#include <atomic>
+
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
+
+namespace perfetto {
+namespace base {
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+using ThreadID = unsigned long;
+#else
+using ThreadID = pthread_t;
+#endif
+
+class PERFETTO_EXPORT ThreadChecker {
+ public:
+  ThreadChecker();
+  ~ThreadChecker();
+  ThreadChecker(const ThreadChecker&);
+  ThreadChecker& operator=(const ThreadChecker&);
+  bool CalledOnValidThread() const PERFETTO_WARN_UNUSED_RESULT;
+  void DetachFromThread();
+
+ private:
+  mutable std::atomic<ThreadID> thread_id_;
+};
+
+#if PERFETTO_DCHECK_IS_ON() && !PERFETTO_BUILDFLAG(PERFETTO_CHROMIUM_BUILD)
+// TODO(primiano) Use Chromium's thread checker in Chromium.
+#define PERFETTO_THREAD_CHECKER(name) base::ThreadChecker name;
+#define PERFETTO_DCHECK_THREAD(name) \
+  PERFETTO_DCHECK((name).CalledOnValidThread())
+#define PERFETTO_DETACH_FROM_THREAD(name) (name).DetachFromThread()
+#else
+#define PERFETTO_THREAD_CHECKER(name)
+#define PERFETTO_DCHECK_THREAD(name)
+#define PERFETTO_DETACH_FROM_THREAD(name)
+#endif  // PERFETTO_DCHECK_IS_ON()
+
+}  // namespace base
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_BASE_THREAD_CHECKER_H_
+// gen_amalgamated begin header: include/perfetto/ext/base/weak_ptr.h
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_BASE_WEAK_PTR_H_
+#define INCLUDE_PERFETTO_EXT_BASE_WEAK_PTR_H_
+
+// gen_amalgamated expanded: #include "perfetto/ext/base/thread_checker.h"
+
+#include <memory>
+
+namespace perfetto {
+namespace base {
+
+// A simple WeakPtr for single-threaded cases.
+// Generally keep the WeakPtrFactory as last fields in classes: it makes the
+// WeakPtr(s) invalidate as first thing in the class dtor.
+// Usage:
+// class MyClass {
+//  MyClass() : weak_factory_(this) {}
+//  WeakPtr<MyClass> GetWeakPtr() { return weak_factory_.GetWeakPtr(); }
+//
+// private:
+//  WeakPtrFactory<MyClass> weak_factory_;
+// }
+//
+// int main() {
+//  std::unique_ptr<MyClass> foo(new MyClass);
+//  auto wptr = foo.GetWeakPtr();
+//  ASSERT_TRUE(wptr);
+//  ASSERT_EQ(foo.get(), wptr->get());
+//  foo.reset();
+//  ASSERT_FALSE(wptr);
+//  ASSERT_EQ(nullptr, wptr->get());
+// }
+
+template <typename T>
+class WeakPtrFactory;  // Forward declaration, defined below.
+
+template <typename T>
+class WeakPtr {
+ public:
+  WeakPtr() {}
+  WeakPtr(const WeakPtr&) = default;
+  WeakPtr& operator=(const WeakPtr&) = default;
+  WeakPtr(WeakPtr&&) = default;
+  WeakPtr& operator=(WeakPtr&&) = default;
+
+  T* get() const {
+    PERFETTO_DCHECK_THREAD(thread_checker);
+    return handle_ ? *handle_.get() : nullptr;
+  }
+  T* operator->() const { return get(); }
+  T& operator*() const { return *get(); }
+
+  explicit operator bool() const { return !!get(); }
+
+ private:
+  friend class WeakPtrFactory<T>;
+  explicit WeakPtr(const std::shared_ptr<T*>& handle) : handle_(handle) {}
+
+  std::shared_ptr<T*> handle_;
+  PERFETTO_THREAD_CHECKER(thread_checker)
+};
+
+template <typename T>
+class WeakPtrFactory {
+ public:
+  explicit WeakPtrFactory(T* owner)
+      : weak_ptr_(std::shared_ptr<T*>(new T* {owner})) {
+    PERFETTO_DCHECK_THREAD(thread_checker);
+  }
+
+  ~WeakPtrFactory() {
+    PERFETTO_DCHECK_THREAD(thread_checker);
+    *(weak_ptr_.handle_.get()) = nullptr;
+  }
+
+  // Can be safely called on any thread, since it simply copies |weak_ptr_|.
+  // Note that any accesses to the returned pointer need to be made on the
+  // thread that created/reset the factory.
+  WeakPtr<T> GetWeakPtr() const { return weak_ptr_; }
+
+  // Reset the factory to a new owner & thread. May only be called before any
+  // weak pointers were passed out. Future weak pointers will be valid on the
+  // calling thread.
+  void Reset(T* owner) {
+    // Reset thread checker to current thread.
+    PERFETTO_DETACH_FROM_THREAD(thread_checker);
+    PERFETTO_DCHECK_THREAD(thread_checker);
+
+    // We should not have passed out any weak pointers yet at this point.
+    PERFETTO_DCHECK(weak_ptr_.handle_.use_count() == 1);
+
+    weak_ptr_ = WeakPtr<T>(std::shared_ptr<T*>(new T* {owner}));
+  }
+
+ private:
+  WeakPtrFactory(const WeakPtrFactory&) = delete;
+  WeakPtrFactory& operator=(const WeakPtrFactory&) = delete;
+
+  WeakPtr<T> weak_ptr_;
+  PERFETTO_THREAD_CHECKER(thread_checker)
+};
+
+}  // namespace base
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_BASE_WEAK_PTR_H_
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_BASE_PERIODIC_TASK_H_
+#define INCLUDE_PERFETTO_EXT_BASE_PERIODIC_TASK_H_
+
+#include <functional>
+
+// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/thread_checker.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/weak_ptr.h"
+
+namespace perfetto {
+namespace base {
+
+class TaskRunner;
+
+// A periodic task utility class. It wraps the logic necessary to do periodic
+// tasks using a TaskRunner, taking care of subtleties like ensuring that
+// outstanding tasks are cancelled after reset/dtor.
+// Tasks are aligned on wall time, this is to ensure that when using multiple
+// periodic tasks, they happen at the same time, minimizing wakeups.
+// On Linux/Android it also supports suspend-aware mode (via timerfd). On other
+// operating systems it falls back to PostDelayedTask, which is not
+// suspend-aware.
+// TODO(primiano): this should probably become a periodic timer scheduler, so we
+// can use one FD for everything rather than one FD per task. For now we take
+// the hit of a FD-per-task to keep this low-risk.
+class PeriodicTask {
+ public:
+  explicit PeriodicTask(base::TaskRunner*);
+  ~PeriodicTask();  // Calls Reset().
+
+  struct Args {
+    uint32_t period_ms = 0;
+    std::function<void()> task = nullptr;
+    bool start_first_task_immediately = false;
+    bool use_suspend_aware_timer = false;
+  };
+
+  void Start(Args);
+
+  // Safe to be called multiple times, even without calling Start():
+  void Reset();
+
+  // No copy or move. WeakPtr-wrapped pointers to |this| are posted on the
+  // task runner, this class is not easily movable.
+  PeriodicTask(const PeriodicTask&) = delete;
+  PeriodicTask& operator=(const PeriodicTask&) = delete;
+  PeriodicTask(PeriodicTask&&) = delete;
+  PeriodicTask& operator=(PeriodicTask&&) = delete;
+
+  base::PlatformHandle timer_fd_for_testing() { return *timer_fd_; }
+
+ private:
+  static void RunTaskAndPostNext(base::WeakPtr<PeriodicTask>,
+                                 uint32_t generation);
+  void PostNextTask();
+  void ResetTimerFd();
+
+  base::TaskRunner* const task_runner_;
+  Args args_;
+  uint32_t generation_ = 0;
+  base::ScopedPlatformHandle timer_fd_;
+
+  PERFETTO_THREAD_CHECKER(thread_checker_)
+  base::WeakPtrFactory<PeriodicTask> weak_ptr_factory_;  // Keep last.
+};
+
+}  // namespace base
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_BASE_PERIODIC_TASK_H_
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/ext/base/periodic_task.h"
+
+#include <limits>
+
+// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
+// gen_amalgamated expanded: #include "perfetto/base/time.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/file_utils.h"
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
+    (PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) && __ANDROID_API__ >= 19)
+#include <sys/timerfd.h>
+#endif
+
+namespace perfetto {
+namespace base {
+
+namespace {
+base::ScopedPlatformHandle CreateTimerFd(uint32_t period_ms) {
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
+    (PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) && __ANDROID_API__ >= 19)
+  base::ScopedPlatformHandle tfd(
+      timerfd_create(CLOCK_BOOTTIME, TFD_CLOEXEC | TFD_NONBLOCK));
+  // The initial phase, aligned on wall clock.
+  uint32_t phase_ms =
+      period_ms -
+      static_cast<uint32_t>(base::GetBootTimeNs().count() % period_ms);
+  struct itimerspec its {};
+  // The "1 +" is to make sure that we never pass a zero it_value in the
+  // unlikely case of phase_ms being 0. That would cause the timer to be
+  // considered disarmed by timerfd_settime.
+  its.it_value.tv_sec = static_cast<time_t>(phase_ms / 1000u);
+  its.it_value.tv_nsec = 1 + static_cast<long>((phase_ms % 1000u) * 1000000u);
+  its.it_interval.tv_sec = static_cast<time_t>(period_ms / 1000u);
+  its.it_interval.tv_nsec = static_cast<long>((period_ms % 1000u) * 1000000u);
+  if (timerfd_settime(*tfd, 0, &its, nullptr) < 0)
+    return base::ScopedPlatformHandle();
+  return tfd;
+#else
+  base::ignore_result(period_ms);
+  return base::ScopedPlatformHandle();
+#endif
+}
+}  // namespace
+
+PeriodicTask::PeriodicTask(base::TaskRunner* task_runner)
+    : task_runner_(task_runner), weak_ptr_factory_(this) {}
+
+PeriodicTask::~PeriodicTask() {
+  Reset();
+}
+
+void PeriodicTask::Start(Args args) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  Reset();
+  if (args.period_ms == 0 || !args.task) {
+    PERFETTO_DCHECK(args.period_ms > 0);
+    PERFETTO_DCHECK(args.task);
+    return;
+  }
+  args_ = std::move(args);
+  if (args_.use_suspend_aware_timer) {
+    timer_fd_ = CreateTimerFd(args_.period_ms);
+    if (timer_fd_) {
+      auto weak_this = weak_ptr_factory_.GetWeakPtr();
+      task_runner_->AddFileDescriptorWatch(
+          *timer_fd_,
+          std::bind(PeriodicTask::RunTaskAndPostNext, weak_this, generation_));
+    } else {
+      PERFETTO_DPLOG("timerfd not supported, falling back on PostDelayedTask");
+    }
+  }  // if (use_suspend_aware_timer).
+
+  if (!timer_fd_)
+    PostNextTask();
+
+  if (args_.start_first_task_immediately)
+    args_.task();
+}
+
+void PeriodicTask::PostNextTask() {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  PERFETTO_DCHECK(args_.period_ms > 0);
+  PERFETTO_DCHECK(!timer_fd_);
+  uint32_t delay_ms =
+      args_.period_ms -
+      static_cast<uint32_t>(base::GetWallTimeMs().count() % args_.period_ms);
+  auto weak_this = weak_ptr_factory_.GetWeakPtr();
+  task_runner_->PostDelayedTask(
+      std::bind(PeriodicTask::RunTaskAndPostNext, weak_this, generation_),
+      delay_ms);
+}
+
+// static
+// This function can be called in two ways (both from the TaskRunner):
+// 1. When using a timerfd, this task is registered as a FD watch.
+// 2. When using PostDelayedTask, this is the task posted on the TaskRunner.
+void PeriodicTask::RunTaskAndPostNext(base::WeakPtr<PeriodicTask> thiz,
+                                      uint32_t generation) {
+  if (!thiz || !thiz->args_.task || generation != thiz->generation_)
+    return;  // Destroyed or Reset() in the meanwhile.
+  PERFETTO_DCHECK_THREAD(thiz->thread_checker_);
+  if (thiz->timer_fd_) {
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+    PERFETTO_FATAL("timerfd for periodic tasks unsupported on Windows");
+#else
+    // If we are using a timerfd there is no need to repeatedly call
+    // PostDelayedTask(). The kernel will wakeup the timer fd periodically. We
+    // just need to read() it.
+    uint64_t ignored = 0;
+    errno = 0;
+    auto rsize = base::Read(*thiz->timer_fd_, &ignored, sizeof(&ignored));
+    if (rsize != sizeof(uint64_t)) {
+      if (errno == EAGAIN)
+        return;  // A spurious wakeup. Rare, but can happen, just ignore.
+      PERFETTO_PLOG("read(timerfd) failed, falling back on PostDelayedTask");
+      thiz->ResetTimerFd();
+    }
+#endif
+  }
+  // The repetition of the if() is to deal with the ResetTimerFd() case above.
+  if (!thiz->timer_fd_) {
+    thiz->PostNextTask();
+  }
+  // Create a copy of the task in the unlikely event that the task ends up
+  // up destroying the PeriodicTask object or calling Reset() on it. That would
+  // cause a reset of the args_.task itself, which would invalidate the task
+  // bind state while we are invoking it.
+  auto task = thiz->args_.task;
+  task();
+}
+
+void PeriodicTask::Reset() {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  ++generation_;
+  args_ = Args();
+  PERFETTO_DCHECK(!args_.task);
+  ResetTimerFd();
+}
+
+void PeriodicTask::ResetTimerFd() {
+  if (!timer_fd_)
+    return;
+  task_runner_->RemoveFileDescriptorWatch(*timer_fd_);
+  timer_fd_.reset();
+}
+
+}  // namespace base
+}  // namespace perfetto
+// gen_amalgamated begin source: src/base/pipe.cc
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/ext/base/pipe.h"
+
+// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
+
+#include <fcntl.h>  // For O_BINARY (Windows) and F_SETxx (UNIX)
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+#include <Windows.h>
+#include <namedpipeapi.h>
+#else
+#include <sys/types.h>
+#include <unistd.h>
+#endif
+
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+
+namespace perfetto {
+namespace base {
+
+Pipe::Pipe() = default;
+Pipe::Pipe(Pipe&&) noexcept = default;
+Pipe& Pipe::operator=(Pipe&&) = default;
+
+Pipe Pipe::Create(Flags flags) {
+  PlatformHandle fds[2];
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+  PERFETTO_CHECK(::CreatePipe(&fds[0], &fds[1], /*lpPipeAttributes=*/nullptr,
+                              0 /*default size*/));
+#else
+  PERFETTO_CHECK(pipe(fds) == 0);
+  PERFETTO_CHECK(fcntl(fds[0], F_SETFD, FD_CLOEXEC) == 0);
+  PERFETTO_CHECK(fcntl(fds[1], F_SETFD, FD_CLOEXEC) == 0);
+#endif
+  Pipe p;
+  p.rd.reset(fds[0]);
+  p.wr.reset(fds[1]);
+
+#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+  if (flags == kBothNonBlock || flags == kRdNonBlock) {
+    int cur_flags = fcntl(*p.rd, F_GETFL, 0);
+    PERFETTO_CHECK(cur_flags >= 0);
+    PERFETTO_CHECK(fcntl(*p.rd, F_SETFL, cur_flags | O_NONBLOCK) == 0);
+  }
+
+  if (flags == kBothNonBlock || flags == kWrNonBlock) {
+    int cur_flags = fcntl(*p.wr, F_GETFL, 0);
+    PERFETTO_CHECK(cur_flags >= 0);
+    PERFETTO_CHECK(fcntl(*p.wr, F_SETFL, cur_flags | O_NONBLOCK) == 0);
+  }
+#else
+  PERFETTO_CHECK(flags == kBothBlock);
+#endif
+  return p;
+}
+
+}  // namespace base
+}  // namespace perfetto
+// gen_amalgamated begin source: src/base/status.cc
+// gen_amalgamated begin header: include/perfetto/base/status.h
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_BASE_STATUS_H_
+#define INCLUDE_PERFETTO_BASE_STATUS_H_
+
+#include <string>
+
+// gen_amalgamated expanded: #include "perfetto/base/compiler.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+
+namespace perfetto {
+namespace base {
+
+// Represents either the success or the failure message of a function.
+// This can used as the return type of functions which would usually return an
+// bool for success or int for errno but also wants to add some string context
+// (ususally for logging).
+class PERFETTO_EXPORT Status {
+ public:
+  Status() : ok_(true) {}
+  explicit Status(std::string msg) : ok_(false), message_(std::move(msg)) {
+    PERFETTO_CHECK(!message_.empty());
+  }
+
+  // Copy operations.
+  Status(const Status&) = default;
+  Status& operator=(const Status&) = default;
+
+  // Move operations. The moved-from state is valid but unspecified.
+  Status(Status&&) noexcept = default;
+  Status& operator=(Status&&) = default;
+
+  bool ok() const { return ok_; }
+
+  // When ok() is false this returns the error message. Returns the empty string
+  // otherwise.
+  const std::string& message() const { return message_; }
+  const char* c_message() const { return message_.c_str(); }
+
+ private:
+  bool ok_ = false;
+  std::string message_;
+};
+
+// Returns a status object which represents the Ok status.
+inline Status OkStatus() {
+  return Status();
+}
+
+PERFETTO_PRINTF_FORMAT(1, 2) Status ErrStatus(const char* format, ...);
+
+}  // namespace base
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_BASE_STATUS_H_
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/base/status.h"
+
+#include <stdarg.h>
+
+namespace perfetto {
+namespace base {
+
+Status ErrStatus(const char* format, ...) {
+  char buffer[1024];
+  va_list ap;
+  va_start(ap, format);
+  vsnprintf(buffer, sizeof(buffer), format, ap);
+  va_end(ap);
+  Status status(buffer);
+  return status;
+}
+
+}  // namespace base
+}  // namespace perfetto
+// gen_amalgamated begin source: src/base/string_splitter.cc
+// gen_amalgamated begin header: include/perfetto/ext/base/string_splitter.h
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_BASE_STRING_SPLITTER_H_
+#define INCLUDE_PERFETTO_EXT_BASE_STRING_SPLITTER_H_
+
+#include <string>
+
+namespace perfetto {
+namespace base {
+
+// C++ version of strtok(). Splits a string without making copies or any heap
+// allocations. Destructs the original string passed in input.
+// Supports the special case of using \0 as a delimiter.
+// The token returned in output are valid as long as the input string is valid.
+class StringSplitter {
+ public:
+  // Can take ownership of the string if passed via std::move(), e.g.:
+  // StringSplitter(std::move(str), '\n');
+  StringSplitter(std::string, char delimiter);
+
+  // Splits a C-string. The input string will be forcefully null-terminated (so
+  // str[size - 1] should be == '\0' or the last char will be truncated).
+  StringSplitter(char* str, size_t size, char delimiter);
+
+  // Splits the current token from an outer StringSplitter instance. This is to
+  // chain splitters as follows:
+  // for (base::StringSplitter lines(x, '\n'); ss.Next();)
+  //   for (base::StringSplitter words(&lines, ' '); words.Next();)
+  StringSplitter(StringSplitter*, char delimiter);
+
+  // Returns true if a token is found (in which case it will be stored in
+  // cur_token()), false if no more tokens are found.
+  bool Next();
+
+  // Returns the current token iff last call to Next() returned true. In this
+  // case it guarantees that the returned string is always null terminated.
+  // In all other cases (before the 1st call to Next() and after Next() returns
+  // false) returns nullptr.
+  char* cur_token() { return cur_; }
+
+  // Returns the length of the current token (excluding the null terminator).
+  size_t cur_token_size() const { return cur_size_; }
+
+ private:
+  StringSplitter(const StringSplitter&) = delete;
+  StringSplitter& operator=(const StringSplitter&) = delete;
+  void Initialize(char* str, size_t size);
+
+  std::string str_;
+  char* cur_;
+  size_t cur_size_;
+  char* next_;
+  char* end_;  // STL-style, points one past the last char.
+  const char delimiter_;
+};
+
+}  // namespace base
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_BASE_STRING_SPLITTER_H_
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/ext/base/string_splitter.h"
+
+#include <utility>
+
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+
+namespace perfetto {
+namespace base {
+
+StringSplitter::StringSplitter(std::string str, char delimiter)
+    : str_(std::move(str)), delimiter_(delimiter) {
+  // It's legal to access str[str.size()] in C++11 (it always returns \0),
+  // hence the +1 (which becomes just size() after the -1 in Initialize()).
+  Initialize(&str_[0], str_.size() + 1);
+}
+
+StringSplitter::StringSplitter(char* str, size_t size, char delimiter)
+    : delimiter_(delimiter) {
+  Initialize(str, size);
+}
+
+StringSplitter::StringSplitter(StringSplitter* outer, char delimiter)
+    : delimiter_(delimiter) {
+  Initialize(outer->cur_token(), outer->cur_token_size() + 1);
+}
+
+void StringSplitter::Initialize(char* str, size_t size) {
+  PERFETTO_DCHECK(!size || str);
+  next_ = str;
+  end_ = str + size;
+  cur_ = nullptr;
+  cur_size_ = 0;
+  if (size)
+    next_[size - 1] = '\0';
+}
+
+bool StringSplitter::Next() {
+  for (; next_ < end_; next_++) {
+    if (*next_ == delimiter_)
+      continue;
+    cur_ = next_;
+    for (;; next_++) {
+      if (*next_ == delimiter_) {
+        cur_size_ = static_cast<size_t>(next_ - cur_);
+        *(next_++) = '\0';
+        break;
+      }
+      if (*next_ == '\0') {
+        cur_size_ = static_cast<size_t>(next_ - cur_);
+        next_ = end_;
+        break;
+      }
+    }
+    if (*cur_)
+      return true;
+    break;
+  }
+  cur_ = nullptr;
+  cur_size_ = 0;
+  return false;
+}
+
+}  // namespace base
+}  // namespace perfetto
+// gen_amalgamated begin source: src/base/string_utils.cc
+// gen_amalgamated begin header: include/perfetto/ext/base/string_utils.h
+// gen_amalgamated begin header: include/perfetto/ext/base/optional.h
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_BASE_OPTIONAL_H_
+#define INCLUDE_PERFETTO_EXT_BASE_OPTIONAL_H_
+
+#include <functional>
+#include <type_traits>
+#include <utility>
+
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+
+namespace perfetto {
+namespace base {
+
+// Specification:
+// http://en.cppreference.com/w/cpp/utility/optional/in_place_t
+struct in_place_t {};
+
+// Specification:
+// http://en.cppreference.com/w/cpp/utility/optional/nullopt_t
+struct nullopt_t {
+  constexpr explicit nullopt_t(int) {}
+};
+
+// Specification:
+// http://en.cppreference.com/w/cpp/utility/optional/in_place
+constexpr in_place_t in_place = {};
+
+// Specification:
+// http://en.cppreference.com/w/cpp/utility/optional/nullopt
+constexpr nullopt_t nullopt(0);
+
+// Forward declaration, which is referred by following helpers.
+template <typename T>
+class Optional;
+
+namespace internal {
+
+template <typename T, bool = std::is_trivially_destructible<T>::value>
+struct OptionalStorageBase {
+  // Initializing |empty_| here instead of using default member initializing
+  // to avoid errors in g++ 4.8.
+  constexpr OptionalStorageBase() : empty_('\0') {}
+
+  template <class... Args>
+  constexpr explicit OptionalStorageBase(in_place_t, Args&&... args)
+      : is_populated_(true), value_(std::forward<Args>(args)...) {}
+
+  // When T is not trivially destructible we must call its
+  // destructor before deallocating its memory.
+  // Note that this hides the (implicitly declared) move constructor, which
+  // would be used for constexpr move constructor in OptionalStorage<T>.
+  // It is needed iff T is trivially move constructible. However, the current
+  // is_trivially_{copy,move}_constructible implementation requires
+  // is_trivially_destructible (which looks a bug, cf:
+  // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=51452 and
+  // http://cplusplus.github.io/LWG/lwg-active.html#2116), so it is not
+  // necessary for this case at the moment. Please see also the destructor
+  // comment in "is_trivially_destructible = true" specialization below.
+  ~OptionalStorageBase() {
+    if (is_populated_)
+      value_.~T();
+  }
+
+  template <class... Args>
+  void Init(Args&&... args) {
+    PERFETTO_DCHECK(!is_populated_);
+    ::new (&value_) T(std::forward<Args>(args)...);
+    is_populated_ = true;
+  }
+
+  bool is_populated_ = false;
+  union {
+    // |empty_| exists so that the union will always be initialized, even when
+    // it doesn't contain a value. Union members must be initialized for the
+    // constructor to be 'constexpr'.
+    char empty_;
+    T value_;
+  };
+};
+
+template <typename T>
+struct OptionalStorageBase<T, true /* trivially destructible */> {
+  // Initializing |empty_| here instead of using default member initializing
+  // to avoid errors in g++ 4.8.
+  constexpr OptionalStorageBase() : empty_('\0') {}
+
+  template <class... Args>
+  constexpr explicit OptionalStorageBase(in_place_t, Args&&... args)
+      : is_populated_(true), value_(std::forward<Args>(args)...) {}
+
+  // When T is trivially destructible (i.e. its destructor does nothing) there
+  // is no need to call it. Implicitly defined destructor is trivial, because
+  // both members (bool and union containing only variants which are trivially
+  // destructible) are trivially destructible.
+  // Explicitly-defaulted destructor is also trivial, but do not use it here,
+  // because it hides the implicit move constructor. It is needed to implement
+  // constexpr move constructor in OptionalStorage iff T is trivially move
+  // constructible. Note that, if T is trivially move constructible, the move
+  // constructor of OptionalStorageBase<T> is also implicitly defined and it is
+  // trivially move constructor. If T is not trivially move constructible,
+  // "not declaring move constructor without destructor declaration" here means
+  // "delete move constructor", which works because any move constructor of
+  // OptionalStorage will not refer to it in that case.
+
+  template <class... Args>
+  void Init(Args&&... args) {
+    PERFETTO_DCHECK(!is_populated_);
+    ::new (&value_) T(std::forward<Args>(args)...);
+    is_populated_ = true;
+  }
+
+  bool is_populated_ = false;
+  union {
+    // |empty_| exists so that the union will always be initialized, even when
+    // it doesn't contain a value. Union members must be initialized for the
+    // constructor to be 'constexpr'.
+    char empty_;
+    T value_;
+  };
+};
+
+// Implement conditional constexpr copy and move constructors. These are
+// constexpr if is_trivially_{copy,move}_constructible<T>::value is true
+// respectively. If each is true, the corresponding constructor is defined as
+// "= default;", which generates a constexpr constructor (In this case,
+// the condition of constexpr-ness is satisfied because the base class also has
+// compiler generated constexpr {copy,move} constructors). Note that
+// placement-new is prohibited in constexpr.
+template <typename T, bool = std::is_trivially_copy_constructible<T>::value>
+struct OptionalStorage : OptionalStorageBase<T> {
+  // This is no trivially {copy,move} constructible case. Other cases are
+  // defined below as specializations.
+
+  // Accessing the members of template base class requires explicit
+  // declaration.
+  using OptionalStorageBase<T>::is_populated_;
+  using OptionalStorageBase<T>::value_;
+  using OptionalStorageBase<T>::Init;
+
+  // Inherit constructors (specifically, the in_place constructor).
+  using OptionalStorageBase<T>::OptionalStorageBase;
+
+  // User defined constructor deletes the default constructor.
+  // Define it explicitly.
+  OptionalStorage() = default;
+
+  OptionalStorage(const OptionalStorage& other) : OptionalStorageBase<T>() {
+    if (other.is_populated_)
+      Init(other.value_);
+  }
+
+  OptionalStorage(OptionalStorage&& other) noexcept(
+      std::is_nothrow_move_constructible<T>::value) {
+    if (other.is_populated_)
+      Init(std::move(other.value_));
+  }
+};
+
+template <typename T>
+struct OptionalStorage<T, true /* trivially copy constructible */>
+    : OptionalStorageBase<T> {
+  using OptionalStorageBase<T>::is_populated_;
+  using OptionalStorageBase<T>::value_;
+  using OptionalStorageBase<T>::Init;
+  using OptionalStorageBase<T>::OptionalStorageBase;
+
+  OptionalStorage() = default;
+  OptionalStorage(const OptionalStorage& other) = default;
+
+  OptionalStorage(OptionalStorage&& other) noexcept(
+      std::is_nothrow_move_constructible<T>::value) {
+    if (other.is_populated_)
+      Init(std::move(other.value_));
+  }
+};
+
+// Base class to support conditionally usable copy-/move- constructors
+// and assign operators.
+template <typename T>
+class OptionalBase {
+  // This class provides implementation rather than public API, so everything
+  // should be hidden. Often we use composition, but we cannot in this case
+  // because of C++ language restriction.
+ protected:
+  constexpr OptionalBase() = default;
+  constexpr OptionalBase(const OptionalBase& other) = default;
+  constexpr OptionalBase(OptionalBase&& other) = default;
+
+  template <class... Args>
+  constexpr explicit OptionalBase(in_place_t, Args&&... args)
+      : storage_(in_place, std::forward<Args>(args)...) {}
+
+  // Implementation of converting constructors.
+  template <typename U>
+  explicit OptionalBase(const OptionalBase<U>& other) {
+    if (other.storage_.is_populated_)
+      storage_.Init(other.storage_.value_);
+  }
+
+  template <typename U>
+  explicit OptionalBase(OptionalBase<U>&& other) {
+    if (other.storage_.is_populated_)
+      storage_.Init(std::move(other.storage_.value_));
+  }
+
+  ~OptionalBase() = default;
+
+  OptionalBase& operator=(const OptionalBase& other) {
+    CopyAssign(other);
+    return *this;
+  }
+
+  OptionalBase& operator=(OptionalBase&& other) noexcept(
+      std::is_nothrow_move_assignable<T>::value&&
+          std::is_nothrow_move_constructible<T>::value) {
+    MoveAssign(std::move(other));
+    return *this;
+  }
+
+  template <typename U>
+  void CopyAssign(const OptionalBase<U>& other) {
+    if (other.storage_.is_populated_)
+      InitOrAssign(other.storage_.value_);
+    else
+      FreeIfNeeded();
+  }
+
+  template <typename U>
+  void MoveAssign(OptionalBase<U>&& other) {
+    if (other.storage_.is_populated_)
+      InitOrAssign(std::move(other.storage_.value_));
+    else
+      FreeIfNeeded();
+  }
+
+  template <typename U>
+  void InitOrAssign(U&& value) {
+    if (storage_.is_populated_)
+      storage_.value_ = std::forward<U>(value);
+    else
+      storage_.Init(std::forward<U>(value));
+  }
+
+  void FreeIfNeeded() {
+    if (!storage_.is_populated_)
+      return;
+    storage_.value_.~T();
+    storage_.is_populated_ = false;
+  }
+
+  // For implementing conversion, allow access to other typed OptionalBase
+  // class.
+  template <typename U>
+  friend class OptionalBase;
+
+  OptionalStorage<T> storage_;
+};
+
+// The following {Copy,Move}{Constructible,Assignable} structs are helpers to
+// implement constructor/assign-operator overloading. Specifically, if T is
+// is not movable but copyable, Optional<T>'s move constructor should not
+// participate in overload resolution. This inheritance trick implements that.
+template <bool is_copy_constructible>
+struct CopyConstructible {};
+
+template <>
+struct CopyConstructible<false> {
+  constexpr CopyConstructible() = default;
+  constexpr CopyConstructible(const CopyConstructible&) = delete;
+  constexpr CopyConstructible(CopyConstructible&&) = default;
+  CopyConstructible& operator=(const CopyConstructible&) = default;
+  CopyConstructible& operator=(CopyConstructible&&) = default;
+};
+
+template <bool is_move_constructible>
+struct MoveConstructible {};
+
+template <>
+struct MoveConstructible<false> {
+  constexpr MoveConstructible() = default;
+  constexpr MoveConstructible(const MoveConstructible&) = default;
+  constexpr MoveConstructible(MoveConstructible&&) = delete;
+  MoveConstructible& operator=(const MoveConstructible&) = default;
+  MoveConstructible& operator=(MoveConstructible&&) = default;
+};
+
+template <bool is_copy_assignable>
+struct CopyAssignable {};
+
+template <>
+struct CopyAssignable<false> {
+  constexpr CopyAssignable() = default;
+  constexpr CopyAssignable(const CopyAssignable&) = default;
+  constexpr CopyAssignable(CopyAssignable&&) = default;
+  CopyAssignable& operator=(const CopyAssignable&) = delete;
+  CopyAssignable& operator=(CopyAssignable&&) = default;
+};
+
+template <bool is_move_assignable>
+struct MoveAssignable {};
+
+template <>
+struct MoveAssignable<false> {
+  constexpr MoveAssignable() = default;
+  constexpr MoveAssignable(const MoveAssignable&) = default;
+  constexpr MoveAssignable(MoveAssignable&&) = default;
+  MoveAssignable& operator=(const MoveAssignable&) = default;
+  MoveAssignable& operator=(MoveAssignable&&) = delete;
+};
+
+// Helper to conditionally enable converting constructors and assign operators.
+template <typename T, typename U>
+struct IsConvertibleFromOptional
+    : std::integral_constant<
+          bool,
+          std::is_constructible<T, Optional<U>&>::value ||
+              std::is_constructible<T, const Optional<U>&>::value ||
+              std::is_constructible<T, Optional<U>&&>::value ||
+              std::is_constructible<T, const Optional<U>&&>::value ||
+              std::is_convertible<Optional<U>&, T>::value ||
+              std::is_convertible<const Optional<U>&, T>::value ||
+              std::is_convertible<Optional<U>&&, T>::value ||
+              std::is_convertible<const Optional<U>&&, T>::value> {};
+
+template <typename T, typename U>
+struct IsAssignableFromOptional
+    : std::integral_constant<
+          bool,
+          IsConvertibleFromOptional<T, U>::value ||
+              std::is_assignable<T&, Optional<U>&>::value ||
+              std::is_assignable<T&, const Optional<U>&>::value ||
+              std::is_assignable<T&, Optional<U>&&>::value ||
+              std::is_assignable<T&, const Optional<U>&&>::value> {};
+
+// Forward compatibility for C++17.
+// Introduce one more deeper nested namespace to avoid leaking using std::swap.
+namespace swappable_impl {
+using std::swap;
+
+struct IsSwappableImpl {
+  // Tests if swap can be called. Check<T&>(0) returns true_type iff swap is
+  // available for T. Otherwise, Check's overload resolution falls back to
+  // Check(...) declared below thanks to SFINAE, so returns false_type.
+  template <typename T>
+  static auto Check(int)
+      -> decltype(swap(std::declval<T>(), std::declval<T>()), std::true_type());
+
+  template <typename T>
+  static std::false_type Check(...);
+};
+}  // namespace swappable_impl
+
+template <typename T>
+struct IsSwappable : decltype(swappable_impl::IsSwappableImpl::Check<T&>(0)) {};
+
+// Forward compatibility for C++20.
+template <typename T>
+using RemoveCvRefT =
+    typename std::remove_cv<typename std::remove_reference<T>::type>::type;
+
+}  // namespace internal
+
+// On Windows, by default, empty-base class optimization does not work,
+// which means even if the base class is empty struct, it still consumes one
+// byte for its body. __declspec(empty_bases) enables the optimization.
+// cf)
+// https://blogs.msdn.microsoft.com/vcblog/2016/03/30/optimizing-the-layout-of-empty-base-classes-in-vs2015-update-2-3/
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) && \
+    !PERFETTO_BUILDFLAG(PERFETTO_COMPILER_GCC)
+#define OPTIONAL_DECLSPEC_EMPTY_BASES __declspec(empty_bases)
+#else
+#define OPTIONAL_DECLSPEC_EMPTY_BASES
+#endif
+
+// base::Optional is a Chromium version of the C++17 optional class:
+// std::optional documentation:
+// http://en.cppreference.com/w/cpp/utility/optional
+// Chromium documentation:
+// https://chromium.googlesource.com/chromium/src/+/master/docs/optional.md
+//
+// These are the differences between the specification and the implementation:
+// - Constructors do not use 'constexpr' as it is a C++14 extension.
+// - 'constexpr' might be missing in some places for reasons specified locally.
+// - No exceptions are thrown, because they are banned from Chromium.
+//   Marked noexcept for only move constructor and move assign operators.
+// - All the non-members are in the 'base' namespace instead of 'std'.
+//
+// Note that T cannot have a constructor T(Optional<T>) etc. Optional<T>
+// PERFETTO_CHECKs T's constructor (specifically via IsConvertibleFromOptional),
+// and in the PERFETTO_CHECK whether T can be constructible from Optional<T>,
+// which is recursive so it does not work. As of Feb 2018, std::optional C++17
+// implementation in both clang and gcc has same limitation. MSVC SFINAE looks
+// to have different behavior, but anyway it reports an error, too.
+//
+// This file is a modified version of optional.h from Chromium at revision
+// 5e71bd454e60511c1293c0c686544aaa76094424. The changes remove C++14/C++17
+// specific code and replace with C++11 counterparts.
+template <typename T>
+class OPTIONAL_DECLSPEC_EMPTY_BASES Optional
+    : public internal::OptionalBase<T>,
+      public internal::CopyConstructible<std::is_copy_constructible<T>::value>,
+      public internal::MoveConstructible<std::is_move_constructible<T>::value>,
+      public internal::CopyAssignable<std::is_copy_constructible<T>::value &&
+                                      std::is_copy_assignable<T>::value>,
+      public internal::MoveAssignable<std::is_move_constructible<T>::value &&
+                                      std::is_move_assignable<T>::value> {
+ public:
+#undef OPTIONAL_DECLSPEC_EMPTY_BASES
+  using value_type = T;
+
+  // Defer default/copy/move constructor implementation to OptionalBase.
+  constexpr Optional() = default;
+  constexpr Optional(const Optional& other) = default;
+  constexpr Optional(Optional&& other) noexcept(
+      std::is_nothrow_move_constructible<T>::value) = default;
+
+  constexpr Optional(nullopt_t) {}  // NOLINT(runtime/explicit)
+
+  // Converting copy constructor. "explicit" only if
+  // std::is_convertible<const U&, T>::value is false. It is implemented by
+  // declaring two almost same constructors, but that condition in enable_if_t
+  // is different, so that either one is chosen, thanks to SFINAE.
+  template <typename U,
+            typename std::enable_if<
+                std::is_constructible<T, const U&>::value &&
+                    !internal::IsConvertibleFromOptional<T, U>::value &&
+                    std::is_convertible<const U&, T>::value,
+                bool>::type = false>
+  Optional(const Optional<U>& other) : internal::OptionalBase<T>(other) {}
+
+  template <typename U,
+            typename std::enable_if<
+                std::is_constructible<T, const U&>::value &&
+                    !internal::IsConvertibleFromOptional<T, U>::value &&
+                    !std::is_convertible<const U&, T>::value,
+                bool>::type = false>
+  explicit Optional(const Optional<U>& other)
+      : internal::OptionalBase<T>(other) {}
+
+  // Converting move constructor. Similar to converting copy constructor,
+  // declaring two (explicit and non-explicit) constructors.
+  template <typename U,
+            typename std::enable_if<
+                std::is_constructible<T, U&&>::value &&
+                    !internal::IsConvertibleFromOptional<T, U>::value &&
+                    std::is_convertible<U&&, T>::value,
+                bool>::type = false>
+  Optional(Optional<U>&& other) : internal::OptionalBase<T>(std::move(other)) {}
+
+  template <typename U,
+            typename std::enable_if<
+                std::is_constructible<T, U&&>::value &&
+                    !internal::IsConvertibleFromOptional<T, U>::value &&
+                    !std::is_convertible<U&&, T>::value,
+                bool>::type = false>
+  explicit Optional(Optional<U>&& other)
+      : internal::OptionalBase<T>(std::move(other)) {}
+
+  template <class... Args>
+  constexpr explicit Optional(in_place_t, Args&&... args)
+      : internal::OptionalBase<T>(in_place, std::forward<Args>(args)...) {}
+
+  template <class U,
+            class... Args,
+            class = typename std::enable_if<
+                std::is_constructible<value_type,
+                                      std::initializer_list<U>&,
+                                      Args...>::value>::type>
+  constexpr explicit Optional(in_place_t,
+                              std::initializer_list<U> il,
+                              Args&&... args)
+      : internal::OptionalBase<T>(in_place, il, std::forward<Args>(args)...) {}
+
+  // Forward value constructor. Similar to converting constructors,
+  // conditionally explicit.
+  template <
+      typename U = value_type,
+      typename std::enable_if<
+          std::is_constructible<T, U&&>::value &&
+              !std::is_same<internal::RemoveCvRefT<U>, in_place_t>::value &&
+              !std::is_same<internal::RemoveCvRefT<U>, Optional<T>>::value &&
+              std::is_convertible<U&&, T>::value,
+          bool>::type = false>
+  constexpr Optional(U&& value)
+      : internal::OptionalBase<T>(in_place, std::forward<U>(value)) {}
+
+  template <
+      typename U = value_type,
+      typename std::enable_if<
+          std::is_constructible<T, U&&>::value &&
+              !std::is_same<internal::RemoveCvRefT<U>, in_place_t>::value &&
+              !std::is_same<internal::RemoveCvRefT<U>, Optional<T>>::value &&
+              !std::is_convertible<U&&, T>::value,
+          bool>::type = false>
+  constexpr explicit Optional(U&& value)
+      : internal::OptionalBase<T>(in_place, std::forward<U>(value)) {}
+
+  ~Optional() = default;
+
+  // Defer copy-/move- assign operator implementation to OptionalBase.
+  Optional& operator=(const Optional& other) = default;
+  Optional& operator=(Optional&& other) noexcept(
+      std::is_nothrow_move_assignable<T>::value&&
+          std::is_nothrow_move_constructible<T>::value) = default;
+
+  Optional& operator=(nullopt_t) {
+    FreeIfNeeded();
+    return *this;
+  }
+
+  // Perfect-forwarded assignment.
+  template <typename U>
+  typename std::enable_if<
+      !std::is_same<internal::RemoveCvRefT<U>, Optional<T>>::value &&
+          std::is_constructible<T, U>::value &&
+          std::is_assignable<T&, U>::value &&
+          (!std::is_scalar<T>::value ||
+           !std::is_same<typename std::decay<U>::type, T>::value),
+      Optional&>::type
+  operator=(U&& value) {
+    InitOrAssign(std::forward<U>(value));
+    return *this;
+  }
+
+  // Copy assign the state of other.
+  template <typename U>
+  typename std::enable_if<!internal::IsAssignableFromOptional<T, U>::value &&
+                              std::is_constructible<T, const U&>::value &&
+                              std::is_assignable<T&, const U&>::value,
+                          Optional&>::type
+  operator=(const Optional<U>& other) {
+    CopyAssign(other);
+    return *this;
+  }
+
+  // Move assign the state of other.
+  template <typename U>
+  typename std::enable_if<!internal::IsAssignableFromOptional<T, U>::value &&
+                              std::is_constructible<T, U>::value &&
+                              std::is_assignable<T&, U>::value,
+                          Optional&>::type
+  operator=(Optional<U>&& other) {
+    MoveAssign(std::move(other));
+    return *this;
+  }
+
+  const T* operator->() const {
+    PERFETTO_DCHECK(storage_.is_populated_);
+    return &storage_.value_;
+  }
+
+  T* operator->() {
+    PERFETTO_DCHECK(storage_.is_populated_);
+    return &storage_.value_;
+  }
+
+  const T& operator*() const& {
+    PERFETTO_DCHECK(storage_.is_populated_);
+    return storage_.value_;
+  }
+
+  T& operator*() & {
+    PERFETTO_DCHECK(storage_.is_populated_);
+    return storage_.value_;
+  }
+
+  const T&& operator*() const&& {
+    PERFETTO_DCHECK(storage_.is_populated_);
+    return std::move(storage_.value_);
+  }
+
+  T&& operator*() && {
+    PERFETTO_DCHECK(storage_.is_populated_);
+    return std::move(storage_.value_);
+  }
+
+  constexpr explicit operator bool() const { return storage_.is_populated_; }
+
+  constexpr bool has_value() const { return storage_.is_populated_; }
+
+  T& value() & {
+    PERFETTO_CHECK(storage_.is_populated_);
+    return storage_.value_;
+  }
+
+  const T& value() const& {
+    PERFETTO_CHECK(storage_.is_populated_);
+    return storage_.value_;
+  }
+
+  T&& value() && {
+    PERFETTO_CHECK(storage_.is_populated_);
+    return std::move(storage_.value_);
+  }
+
+  const T&& value() const&& {
+    PERFETTO_CHECK(storage_.is_populated_);
+    return std::move(storage_.value_);
+  }
+
+  template <class U>
+  constexpr T value_or(U&& default_value) const& {
+    static_assert(std::is_convertible<U, T>::value,
+                  "U must be convertible to T");
+    return storage_.is_populated_
+               ? storage_.value_
+               : static_cast<T>(std::forward<U>(default_value));
+  }
+
+  template <class U>
+  T value_or(U&& default_value) && {
+    static_assert(std::is_convertible<U, T>::value,
+                  "U must be convertible to T");
+    return storage_.is_populated_
+               ? std::move(storage_.value_)
+               : static_cast<T>(std::forward<U>(default_value));
+  }
+
+  void swap(Optional& other) {
+    if (!storage_.is_populated_ && !other.storage_.is_populated_)
+      return;
+
+    if (storage_.is_populated_ != other.storage_.is_populated_) {
+      if (storage_.is_populated_) {
+        other.storage_.Init(std::move(storage_.value_));
+        FreeIfNeeded();
+      } else {
+        storage_.Init(std::move(other.storage_.value_));
+        other.FreeIfNeeded();
+      }
+      return;
+    }
+
+    PERFETTO_DCHECK(storage_.is_populated_ && other.storage_.is_populated_);
+    using std::swap;
+    swap(**this, *other);
+  }
+
+  void reset() { FreeIfNeeded(); }
+
+  template <class... Args>
+  T& emplace(Args&&... args) {
+    FreeIfNeeded();
+    storage_.Init(std::forward<Args>(args)...);
+    return storage_.value_;
+  }
+
+  template <class U, class... Args>
+  typename std::enable_if<
+      std::is_constructible<T, std::initializer_list<U>&, Args&&...>::value,
+      T&>::type
+  emplace(std::initializer_list<U> il, Args&&... args) {
+    FreeIfNeeded();
+    storage_.Init(il, std::forward<Args>(args)...);
+    return storage_.value_;
+  }
+
+ private:
+  // Accessing template base class's protected member needs explicit
+  // declaration to do so.
+  using internal::OptionalBase<T>::CopyAssign;
+  using internal::OptionalBase<T>::FreeIfNeeded;
+  using internal::OptionalBase<T>::InitOrAssign;
+  using internal::OptionalBase<T>::MoveAssign;
+  using internal::OptionalBase<T>::storage_;
+};
+
+// Here after defines comparation operators. The definition follows
+// http://en.cppreference.com/w/cpp/utility/optional/operator_cmp
+// while bool() casting is replaced by has_value() to meet the chromium
+// style guide.
+template <class T, class U>
+bool operator==(const Optional<T>& lhs, const Optional<U>& rhs) {
+  if (lhs.has_value() != rhs.has_value())
+    return false;
+  if (!lhs.has_value())
+    return true;
+  return *lhs == *rhs;
+}
+
+template <class T, class U>
+bool operator!=(const Optional<T>& lhs, const Optional<U>& rhs) {
+  if (lhs.has_value() != rhs.has_value())
+    return true;
+  if (!lhs.has_value())
+    return false;
+  return *lhs != *rhs;
+}
+
+template <class T, class U>
+bool operator<(const Optional<T>& lhs, const Optional<U>& rhs) {
+  if (!rhs.has_value())
+    return false;
+  if (!lhs.has_value())
+    return true;
+  return *lhs < *rhs;
+}
+
+template <class T, class U>
+bool operator<=(const Optional<T>& lhs, const Optional<U>& rhs) {
+  if (!lhs.has_value())
+    return true;
+  if (!rhs.has_value())
+    return false;
+  return *lhs <= *rhs;
+}
+
+template <class T, class U>
+bool operator>(const Optional<T>& lhs, const Optional<U>& rhs) {
+  if (!lhs.has_value())
+    return false;
+  if (!rhs.has_value())
+    return true;
+  return *lhs > *rhs;
+}
+
+template <class T, class U>
+bool operator>=(const Optional<T>& lhs, const Optional<U>& rhs) {
+  if (!rhs.has_value())
+    return true;
+  if (!lhs.has_value())
+    return false;
+  return *lhs >= *rhs;
+}
+
+template <class T>
+constexpr bool operator==(const Optional<T>& opt, nullopt_t) {
+  return !opt;
+}
+
+template <class T>
+constexpr bool operator==(nullopt_t, const Optional<T>& opt) {
+  return !opt;
+}
+
+template <class T>
+constexpr bool operator!=(const Optional<T>& opt, nullopt_t) {
+  return opt.has_value();
+}
+
+template <class T>
+constexpr bool operator!=(nullopt_t, const Optional<T>& opt) {
+  return opt.has_value();
+}
+
+template <class T>
+constexpr bool operator<(const Optional<T>&, nullopt_t) {
+  return false;
+}
+
+template <class T>
+constexpr bool operator<(nullopt_t, const Optional<T>& opt) {
+  return opt.has_value();
+}
+
+template <class T>
+constexpr bool operator<=(const Optional<T>& opt, nullopt_t) {
+  return !opt;
+}
+
+template <class T>
+constexpr bool operator<=(nullopt_t, const Optional<T>&) {
+  return true;
+}
+
+template <class T>
+constexpr bool operator>(const Optional<T>& opt, nullopt_t) {
+  return opt.has_value();
+}
+
+template <class T>
+constexpr bool operator>(nullopt_t, const Optional<T>&) {
+  return false;
+}
+
+template <class T>
+constexpr bool operator>=(const Optional<T>&, nullopt_t) {
+  return true;
+}
+
+template <class T>
+constexpr bool operator>=(nullopt_t, const Optional<T>& opt) {
+  return !opt;
+}
+
+template <class T, class U>
+constexpr bool operator==(const Optional<T>& opt, const U& value) {
+  return opt.has_value() ? *opt == value : false;
+}
+
+template <class T, class U>
+constexpr bool operator==(const U& value, const Optional<T>& opt) {
+  return opt.has_value() ? value == *opt : false;
+}
+
+template <class T, class U>
+constexpr bool operator!=(const Optional<T>& opt, const U& value) {
+  return opt.has_value() ? *opt != value : true;
+}
+
+template <class T, class U>
+constexpr bool operator!=(const U& value, const Optional<T>& opt) {
+  return opt.has_value() ? value != *opt : true;
+}
+
+template <class T, class U>
+constexpr bool operator<(const Optional<T>& opt, const U& value) {
+  return opt.has_value() ? *opt < value : true;
+}
+
+template <class T, class U>
+constexpr bool operator<(const U& value, const Optional<T>& opt) {
+  return opt.has_value() ? value < *opt : false;
+}
+
+template <class T, class U>
+constexpr bool operator<=(const Optional<T>& opt, const U& value) {
+  return opt.has_value() ? *opt <= value : true;
+}
+
+template <class T, class U>
+constexpr bool operator<=(const U& value, const Optional<T>& opt) {
+  return opt.has_value() ? value <= *opt : false;
+}
+
+template <class T, class U>
+constexpr bool operator>(const Optional<T>& opt, const U& value) {
+  return opt.has_value() ? *opt > value : false;
+}
+
+template <class T, class U>
+constexpr bool operator>(const U& value, const Optional<T>& opt) {
+  return opt.has_value() ? value > *opt : true;
+}
+
+template <class T, class U>
+constexpr bool operator>=(const Optional<T>& opt, const U& value) {
+  return opt.has_value() ? *opt >= value : false;
+}
+
+template <class T, class U>
+constexpr bool operator>=(const U& value, const Optional<T>& opt) {
+  return opt.has_value() ? value >= *opt : true;
+}
+
+template <class T>
+constexpr Optional<typename std::decay<T>::type> make_optional(T&& value) {
+  return Optional<typename std::decay<T>::type>(std::forward<T>(value));
+}
+
+template <class T, class... Args>
+constexpr Optional<T> make_optional(Args&&... args) {
+  return Optional<T>(in_place, std::forward<Args>(args)...);
+}
+
+template <class T, class U, class... Args>
+constexpr Optional<T> make_optional(std::initializer_list<U> il,
+                                    Args&&... args) {
+  return Optional<T>(in_place, il, std::forward<Args>(args)...);
+}
+
+// Partial specialization for a function template is not allowed. Also, it is
+// not allowed to add overload function to std namespace, while it is allowed
+// to specialize the template in std. Thus, swap() (kind of) overloading is
+// defined in base namespace, instead.
+template <class T>
+typename std::enable_if<std::is_move_constructible<T>::value &&
+                        internal::IsSwappable<T>::value>::type
+swap(Optional<T>& lhs, Optional<T>& rhs) {
+  lhs.swap(rhs);
+}
+
+}  // namespace base
+}  // namespace perfetto
+
+template <class T>
+struct std::hash<perfetto::base::Optional<T>> {
+  size_t operator()(const perfetto::base::Optional<T>& opt) const {
+    return opt == perfetto::base::nullopt ? 0 : std::hash<T>()(*opt);
+  }
+};
+
+#endif  // INCLUDE_PERFETTO_EXT_BASE_OPTIONAL_H_
+// gen_amalgamated begin header: include/perfetto/ext/base/string_view.h
+// gen_amalgamated begin header: include/perfetto/ext/base/hash.h
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_BASE_HASH_H_
+#define INCLUDE_PERFETTO_EXT_BASE_HASH_H_
+
+#include <stddef.h>
+#include <stdint.h>
+#include <type_traits>
+
+namespace perfetto {
+namespace base {
+
+// A helper class which computes a 64-bit hash of the input data.
+// The algorithm used is FNV-1a as it is fast and easy to implement and has
+// relatively few collisions.
+// WARNING: This hash function should not be used for any cryptographic purpose.
+class Hash {
+ public:
+  // Creates an empty hash object
+  Hash() {}
+
+  // Hashes a numeric value.
+  template <
+      typename T,
+      typename std::enable_if<std::is_arithmetic<T>::value, bool>::type = true>
+  void Update(T data) {
+    Update(reinterpret_cast<const char*>(&data), sizeof(data));
+  }
+
+  // Hashes a byte array.
+  void Update(const char* data, size_t size) {
+    for (size_t i = 0; i < size; i++) {
+      result_ ^= static_cast<uint8_t>(data[i]);
+      result_ *= kFnv1a64Prime;
+    }
+  }
+
+  uint64_t digest() { return result_; }
+
+ private:
+  static constexpr uint64_t kFnv1a64OffsetBasis = 0xcbf29ce484222325;
+  static constexpr uint64_t kFnv1a64Prime = 0x100000001b3;
+
+  uint64_t result_ = kFnv1a64OffsetBasis;
+};
+
+}  // namespace base
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_BASE_HASH_H_
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_BASE_STRING_VIEW_H_
+#define INCLUDE_PERFETTO_EXT_BASE_STRING_VIEW_H_
+
+#include <string.h>
+
+#include <algorithm>
+#include <string>
+
+// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/hash.h"
+
+namespace perfetto {
+namespace base {
+
+// A string-like object that refers to a non-owned piece of memory.
+// Strings are internally NOT null terminated.
+class StringView {
+ public:
+  static constexpr size_t npos = static_cast<size_t>(-1);
+
+  StringView() : data_(nullptr), size_(0) {}
+  StringView(const StringView&) = default;
+  StringView& operator=(const StringView&) = default;
+  StringView(const char* data, size_t size) : data_(data), size_(size) {
+    PERFETTO_DCHECK(size == 0 || data != nullptr);
+  }
+
+  // Allow implicit conversion from any class that has a |data| and |size| field
+  // and has the kConvertibleToStringView trait (e.g., protozero::ConstChars).
+  template <typename T, typename = std::enable_if<T::kConvertibleToStringView>>
+  StringView(const T& x) : StringView(x.data, x.size) {
+    PERFETTO_DCHECK(x.size == 0 || x.data != nullptr);
+  }
+
+  // Creates a StringView from a null-terminated C string.
+  // Deliberately not "explicit".
+  StringView(const char* cstr) : data_(cstr), size_(strlen(cstr)) {
+    PERFETTO_DCHECK(cstr != nullptr);
+  }
+
+  // This instead has to be explicit, as creating a StringView out of a
+  // std::string can be subtle.
+  explicit StringView(const std::string& str)
+      : data_(str.data()), size_(str.size()) {}
+
+  bool empty() const { return size_ == 0; }
+  size_t size() const { return size_; }
+  const char* data() const { return data_; }
+  const char* begin() const { return data_; }
+  const char* end() const { return data_ + size_; }
+
+  char at(size_t pos) const {
+    PERFETTO_DCHECK(pos < size_);
+    return data_[pos];
+  }
+
+  size_t find(char c, size_t start_pos = 0) const {
+    for (size_t i = start_pos; i < size_; ++i) {
+      if (data_[i] == c)
+        return i;
+    }
+    return npos;
+  }
+
+  size_t find(const StringView& str, size_t start_pos = 0) const {
+    if (start_pos > size())
+      return npos;
+    auto it = std::search(begin() + start_pos, end(), str.begin(), str.end());
+    size_t pos = static_cast<size_t>(it - begin());
+    return pos + str.size() <= size() ? pos : npos;
+  }
+
+  size_t find(const char* str, size_t start_pos = 0) const {
+    return find(StringView(str), start_pos);
+  }
+
+  size_t rfind(char c) const {
+    for (size_t i = size_; i > 0; --i) {
+      if (data_[i - 1] == c)
+        return i - 1;
+    }
+    return npos;
+  }
+
+  StringView substr(size_t pos, size_t count = npos) const {
+    if (pos >= size_)
+      return StringView("", 0);
+    size_t rcount = std::min(count, size_ - pos);
+    return StringView(data_ + pos, rcount);
+  }
+
+  bool CaseInsensitiveEq(const StringView& other) {
+    if (size() != other.size())
+      return false;
+    if (size() == 0)
+      return true;
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+    return _strnicmp(data(), other.data(), size()) == 0;
+#else
+    return strncasecmp(data(), other.data(), size()) == 0;
+#endif
+  }
+
+  std::string ToStdString() const {
+    return data_ == nullptr ? "" : std::string(data_, size_);
+  }
+
+  uint64_t Hash() const {
+    base::Hash hasher;
+    hasher.Update(data_, size_);
+    return hasher.digest();
+  }
+
+ private:
+  const char* data_ = nullptr;
+  size_t size_ = 0;
+};
+
+inline bool operator==(const StringView& x, const StringView& y) {
+  if (x.size() != y.size())
+    return false;
+  if (x.size() == 0)
+    return true;
+  return memcmp(x.data(), y.data(), x.size()) == 0;
+}
+
+inline bool operator!=(const StringView& x, const StringView& y) {
+  return !(x == y);
+}
+
+inline bool operator<(const StringView& x, const StringView& y) {
+  auto size = std::min(x.size(), y.size());
+  if (size == 0)
+    return x.size() < y.size();
+  int result = memcmp(x.data(), y.data(), size);
+  return result < 0 || (result == 0 && x.size() < y.size());
+}
+
+inline bool operator>=(const StringView& x, const StringView& y) {
+  return !(x < y);
+}
+
+inline bool operator>(const StringView& x, const StringView& y) {
+  return y < x;
+}
+
+inline bool operator<=(const StringView& x, const StringView& y) {
+  return !(y < x);
+}
+
+}  // namespace base
+}  // namespace perfetto
+
+template <>
+struct std::hash<::perfetto::base::StringView> {
+  size_t operator()(const ::perfetto::base::StringView& sv) const {
+    return static_cast<size_t>(sv.Hash());
+  }
+};
+
+#endif  // INCLUDE_PERFETTO_EXT_BASE_STRING_VIEW_H_
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_BASE_STRING_UTILS_H_
+#define INCLUDE_PERFETTO_EXT_BASE_STRING_UTILS_H_
+
+#include <string>
+#include <vector>
+
+#include <inttypes.h>
+#include <stdlib.h>
+
+// gen_amalgamated expanded: #include "perfetto/ext/base/optional.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/string_view.h"
+
+namespace perfetto {
+namespace base {
+
+std::string QuoteAndEscapeControlCodes(const std::string& raw);
+
+inline char Lowercase(char c) {
+  return ('A' <= c && c <= 'Z') ? static_cast<char>(c - ('A' - 'a')) : c;
+}
+
+inline char Uppercase(char c) {
+  return ('a' <= c && c <= 'z') ? static_cast<char>(c + ('A' - 'a')) : c;
+}
+
+inline Optional<uint32_t> CStringToUInt32(const char* s, int base = 10) {
+  char* endptr = nullptr;
+  auto value = static_cast<uint32_t>(strtoul(s, &endptr, base));
+  return (*s && !*endptr) ? base::make_optional(value) : base::nullopt;
+}
+
+inline Optional<int32_t> CStringToInt32(const char* s, int base = 10) {
+  char* endptr = nullptr;
+  auto value = static_cast<int32_t>(strtol(s, &endptr, base));
+  return (*s && !*endptr) ? base::make_optional(value) : base::nullopt;
+}
+
+// Note: it saturates to 7fffffffffffffff if parsing a hex number >= 0x8000...
+inline Optional<int64_t> CStringToInt64(const char* s, int base = 10) {
+  char* endptr = nullptr;
+  auto value = static_cast<int64_t>(strtoll(s, &endptr, base));
+  return (*s && !*endptr) ? base::make_optional(value) : base::nullopt;
+}
+
+inline Optional<uint64_t> CStringToUInt64(const char* s, int base = 10) {
+  char* endptr = nullptr;
+  auto value = static_cast<uint64_t>(strtoull(s, &endptr, base));
+  return (*s && !*endptr) ? base::make_optional(value) : base::nullopt;
+}
+
+double StrToD(const char* nptr, char** endptr);
+
+inline Optional<double> CStringToDouble(const char* s) {
+  char* endptr = nullptr;
+  double value = StrToD(s, &endptr);
+  Optional<double> result(base::nullopt);
+  if (*s != '\0' && *endptr == '\0')
+    result = value;
+  return result;
+}
+
+inline Optional<uint32_t> StringToUInt32(const std::string& s, int base = 10) {
+  return CStringToUInt32(s.c_str(), base);
+}
+
+inline Optional<int32_t> StringToInt32(const std::string& s, int base = 10) {
+  return CStringToInt32(s.c_str(), base);
+}
+
+inline Optional<uint64_t> StringToUInt64(const std::string& s, int base = 10) {
+  return CStringToUInt64(s.c_str(), base);
+}
+
+inline Optional<int64_t> StringToInt64(const std::string& s, int base = 10) {
+  return CStringToInt64(s.c_str(), base);
+}
+
+inline Optional<double> StringToDouble(const std::string& s) {
+  return CStringToDouble(s.c_str());
+}
+
+bool StartsWith(const std::string& str, const std::string& prefix);
+bool EndsWith(const std::string& str, const std::string& suffix);
+bool Contains(const std::string& haystack, const std::string& needle);
+bool Contains(const std::string& haystack, char needle);
+size_t Find(const StringView& needle, const StringView& haystack);
+bool CaseInsensitiveEqual(const std::string& first, const std::string& second);
+std::string Join(const std::vector<std::string>& parts,
+                 const std::string& delim);
+std::vector<std::string> SplitString(const std::string& text,
+                                     const std::string& delimiter);
+std::string StripPrefix(const std::string& str, const std::string& prefix);
+std::string StripSuffix(const std::string& str, const std::string& suffix);
+std::string ToLower(const std::string& str);
+std::string ToUpper(const std::string& str);
+std::string StripChars(const std::string& str,
+                       const std::string& chars,
+                       char replacement);
+std::string ToHex(const char* data, size_t size);
+inline std::string ToHex(const std::string& s) {
+  return ToHex(s.c_str(), s.size());
+}
+std::string IntToHexString(uint32_t number);
+std::string Uint64ToHexString(uint64_t number);
+std::string Uint64ToHexStringNoPrefix(uint64_t number);
+std::string ReplaceAll(std::string str,
+                       const std::string& to_replace,
+                       const std::string& replacement);
+std::string TrimLeading(const std::string& str);
+std::string Base64Encode(const void* raw, size_t size);
+
+}  // namespace base
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_BASE_STRING_UTILS_H_
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/ext/base/string_utils.h"
+
+#include <inttypes.h>
+#include <locale.h>
+#include <string.h>
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
+#include <xlocale.h>
+#endif
+
+#include <algorithm>
+
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+
+namespace perfetto {
+namespace base {
+namespace {
+constexpr char kBase64Table[] =
+    "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+    "abcdefghijklmnopqrstuvwxyz0123456789+/";
+}
+
+// Locale-independant as possible version of strtod.
+double StrToD(const char* nptr, char** endptr) {
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) || \
+    PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) ||   \
+    PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
+  static auto c_locale = newlocale(LC_ALL, "C", nullptr);
+  return strtod_l(nptr, endptr, c_locale);
+#else
+  return strtod(nptr, endptr);
+#endif
+}
+
+std::string QuoteAndEscapeControlCodes(const std::string& raw) {
+  std::string ret;
+  for (auto it = raw.cbegin(); it != raw.cend(); it++) {
+    switch (*it) {
+      case '\\':
+        ret += "\\\\";
+        break;
+      case '"':
+        ret += "\\\"";
+        break;
+      case '/':
+        ret += "\\/";
+        break;
+      case '\b':
+        ret += "\\b";
+        break;
+      case '\f':
+        ret += "\\f";
+        break;
+      case '\n':
+        ret += "\\n";
+        break;
+      case '\r':
+        ret += "\\r";
+        break;
+      case '\t':
+        ret += "\\t";
+        break;
+      default:
+        ret += *it;
+        break;
+    }
+  }
+  return '"' + ret + '"';
+}
+
+bool StartsWith(const std::string& str, const std::string& prefix) {
+  return str.compare(0, prefix.length(), prefix) == 0;
+}
+
+bool EndsWith(const std::string& str, const std::string& suffix) {
+  if (suffix.size() > str.size())
+    return false;
+  return str.compare(str.size() - suffix.size(), suffix.size(), suffix) == 0;
+}
+
+bool Contains(const std::string& haystack, const std::string& needle) {
+  return haystack.find(needle) != std::string::npos;
+}
+
+bool Contains(const std::string& haystack, const char needle) {
+  return haystack.find(needle) != std::string::npos;
+}
+
+size_t Find(const StringView& needle, const StringView& haystack) {
+  if (needle.empty())
+    return 0;
+  if (needle.size() > haystack.size())
+    return std::string::npos;
+  for (size_t i = 0; i < haystack.size() - (needle.size() - 1); ++i) {
+    if (strncmp(haystack.data() + i, needle.data(), needle.size()) == 0)
+      return i;
+  }
+  return std::string::npos;
+}
+
+bool CaseInsensitiveEqual(const std::string& first, const std::string& second) {
+  return first.size() == second.size() &&
+         std::equal(
+             first.begin(), first.end(), second.begin(),
+             [](char a, char b) { return Lowercase(a) == Lowercase(b); });
+}
+
+std::string Join(const std::vector<std::string>& parts,
+                 const std::string& delim) {
+  std::string acc;
+  for (size_t i = 0; i < parts.size(); ++i) {
+    acc += parts[i];
+    if (i + 1 != parts.size()) {
+      acc += delim;
+    }
+  }
+  return acc;
+}
+
+std::vector<std::string> SplitString(const std::string& text,
+                                     const std::string& delimiter) {
+  PERFETTO_CHECK(!delimiter.empty());
+
+  std::vector<std::string> output;
+  size_t start = 0;
+  size_t next;
+  for (;;) {
+    next = std::min(text.find(delimiter, start), text.size());
+    if (next > start)
+      output.emplace_back(&text[start], next - start);
+    start = next + delimiter.size();
+    if (start >= text.size())
+      break;
+  }
+  return output;
+}
+
+std::string StripPrefix(const std::string& str, const std::string& prefix) {
+  return StartsWith(str, prefix) ? str.substr(prefix.size()) : str;
+}
+
+std::string StripSuffix(const std::string& str, const std::string& suffix) {
+  return EndsWith(str, suffix) ? str.substr(0, str.size() - suffix.size())
+                               : str;
+}
+
+std::string ToUpper(const std::string& str) {
+  // Don't use toupper(), it depends on the locale.
+  std::string res(str);
+  auto end = res.end();
+  for (auto c = res.begin(); c != end; ++c)
+    *c = Uppercase(*c);
+  return res;
+}
+
+std::string ToLower(const std::string& str) {
+  // Don't use tolower(), it depends on the locale.
+  std::string res(str);
+  auto end = res.end();
+  for (auto c = res.begin(); c != end; ++c)
+    *c = Lowercase(*c);
+  return res;
+}
+
+std::string ToHex(const char* data, size_t size) {
+  std::string hex(2 * size + 1, 'x');
+  for (size_t i = 0; i < size; ++i) {
+    // snprintf prints 3 characters, the two hex digits and a null byte. As we
+    // write left to right, we keep overwriting the nullbytes, except for the
+    // last call to snprintf.
+    snprintf(&(hex[2 * i]), 3, "%02hhx", data[i]);
+  }
+  // Remove the trailing nullbyte produced by the last snprintf.
+  hex.resize(2 * size);
+  return hex;
+}
+
+std::string IntToHexString(uint32_t number) {
+  size_t max_size = 11;  // Max uint32 is 0xFFFFFFFF + 1 for null byte.
+  std::string buf;
+  buf.resize(max_size);
+  auto final_size = snprintf(&buf[0], max_size, "0x%02x", number);
+  PERFETTO_DCHECK(final_size >= 0);
+  buf.resize(static_cast<size_t>(final_size));  // Cuts off the final null byte.
+  return buf;
+}
+
+std::string Uint64ToHexString(uint64_t number) {
+  return "0x" + Uint64ToHexStringNoPrefix(number);
+}
+
+std::string Uint64ToHexStringNoPrefix(uint64_t number) {
+  size_t max_size = 17;  // Max uint64 is FFFFFFFFFFFFFFFF + 1 for null byte.
+  std::string buf;
+  buf.resize(max_size);
+  auto final_size = snprintf(&buf[0], max_size, "%" PRIx64 "", number);
+  PERFETTO_DCHECK(final_size >= 0);
+  buf.resize(static_cast<size_t>(final_size));  // Cuts off the final null byte.
+  return buf;
+}
+
+std::string StripChars(const std::string& str,
+                       const std::string& chars,
+                       char replacement) {
+  std::string res(str);
+  const char* start = res.c_str();
+  const char* remove = chars.c_str();
+  for (const char* c = strpbrk(start, remove); c; c = strpbrk(c + 1, remove))
+    res[static_cast<uintptr_t>(c - start)] = replacement;
+  return res;
+}
+
+std::string ReplaceAll(std::string str,
+                       const std::string& to_replace,
+                       const std::string& replacement) {
+  PERFETTO_CHECK(!to_replace.empty());
+  size_t pos = 0;
+  while ((pos = str.find(to_replace, pos)) != std::string::npos) {
+    str.replace(pos, to_replace.length(), replacement);
+    pos += replacement.length();
+  }
+  return str;
+}
+
+std::string TrimLeading(const std::string& str) {
+  size_t idx = str.find_first_not_of(' ');
+  return idx == std::string::npos ? str : str.substr(idx);
+}
+
+std::string Base64Encode(const void* raw, size_t size) {
+  // The following three cases are based on the tables in the example
+  // section in https://en.wikipedia.org/wiki/Base64. We process three
+  // input bytes at a time, emitting 4 output bytes at a time.
+  const uint8_t* ptr = static_cast<const uint8_t*>(raw);
+  size_t ii = 0;
+
+  std::string out;
+  out.reserve((size + 2) * 4 / 3);
+
+  // While possible, process three input bytes.
+  for (; ii + 3 <= size; ii += 3) {
+    uint32_t twentyfour_bits =
+        (uint32_t(ptr[ii]) << 16) | (uint32_t(ptr[ii + 1]) << 8) | ptr[ii + 2];
+    out.push_back(kBase64Table[(twentyfour_bits >> 18)]);
+    out.push_back(kBase64Table[(twentyfour_bits >> 12) & 0x3f]);
+    out.push_back(kBase64Table[(twentyfour_bits >> 6) & 0x3f]);
+    out.push_back(kBase64Table[twentyfour_bits & 0x3f]);
+  }
+  if (ii + 2 <= size) {  // Process two input bytes.
+    uint32_t twentyfour_bits =
+        (uint32_t(ptr[ii]) << 16) | (uint32_t(ptr[ii + 1]) << 8);
+    out.push_back(kBase64Table[(twentyfour_bits >> 18)]);
+    out.push_back(kBase64Table[(twentyfour_bits >> 12) & 0x3f]);
+    out.push_back(kBase64Table[(twentyfour_bits >> 6) & 0x3f]);
+    out.push_back('=');  // Emit padding.
+    return out;
+  }
+  if (ii + 1 <= size) {  // Process a single input byte.
+    uint32_t twentyfour_bits = (uint32_t(ptr[ii]) << 16);
+    out.push_back(kBase64Table[(twentyfour_bits >> 18)]);
+    out.push_back(kBase64Table[(twentyfour_bits >> 12) & 0x3f]);
+    out.push_back('=');  // Emit padding.
+    out.push_back('=');  // Emit padding.
+  }
+  return out;
+}
+
+}  // namespace base
+}  // namespace perfetto
+// gen_amalgamated begin source: src/base/string_view.cc
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/ext/base/string_view.h"
+
+namespace perfetto {
+namespace base {
+
+// static
+constexpr size_t StringView::npos;
+
+}  // namespace base
+}  // namespace perfetto
+// gen_amalgamated begin source: src/base/temp_file.cc
+// gen_amalgamated begin header: include/perfetto/ext/base/temp_file.h
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_BASE_TEMP_FILE_H_
+#define INCLUDE_PERFETTO_EXT_BASE_TEMP_FILE_H_
+
+#include <string>
+
+// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
+
+namespace perfetto {
+namespace base {
+
+std::string GetSysTempDir();
+
+class TempFile {
+ public:
+  static TempFile CreateUnlinked();
+  static TempFile Create();
+
+  TempFile(TempFile&&) noexcept;
+  TempFile& operator=(TempFile&&);
+  ~TempFile();
+
+  const std::string& path() const { return path_; }
+  int fd() const { return *fd_; }
+  int operator*() const { return *fd_; }
+
+  // Unlinks the file from the filesystem but keeps the fd() open.
+  // It is safe to call this multiple times.
+  void Unlink();
+
+  // Releases the underlying file descriptor. Will unlink the file from the
+  // filesystem if it was created via CreateUnlinked().
+  ScopedFile ReleaseFD();
+
+ private:
+  TempFile();
+  TempFile(const TempFile&) = delete;
+  TempFile& operator=(const TempFile&) = delete;
+
+  ScopedFile fd_;
+  std::string path_;
+};
+
+class TempDir {
+ public:
+  static TempDir Create();
+
+  TempDir(TempDir&&) noexcept;
+  TempDir& operator=(TempDir&&);
+  ~TempDir();
+
+  const std::string& path() const { return path_; }
+
+ private:
+  TempDir();
+  TempDir(const TempDir&) = delete;
+  TempDir& operator=(const TempDir&) = delete;
+
+  std::string path_;
+};
+
+}  // namespace base
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_BASE_TEMP_FILE_H_
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/ext/base/temp_file.h"
+
+// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+#include <Windows.h>
+#include <direct.h>
+#include <fileapi.h>
+#include <io.h>
+#else
+#include <unistd.h>
+#endif
+
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/file_utils.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/string_utils.h"
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+namespace {
+std::string GetTempName() {
+  char name[] = "perfetto-XXXXXX";
+  PERFETTO_CHECK(_mktemp_s(name, sizeof(name)) == 0);
+  return name;
+}
+}  // namespace
+#endif
+
+namespace perfetto {
+namespace base {
+
+std::string GetSysTempDir() {
+  const char* tmpdir = nullptr;
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+  if ((tmpdir = getenv("TMP")))
+    return tmpdir;
+  if ((tmpdir = getenv("TEMP")))
+    return tmpdir;
+  return "C:\\TEMP";
+#else
+  if ((tmpdir = getenv("TMPDIR")))
+    return base::StripSuffix(tmpdir, "/");
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
+  return "/data/local/tmp";
+#else
+  return "/tmp";
+#endif  // !OS_ANDROID
+#endif  // !OS_WIN
+}
+
+// static
+TempFile TempFile::Create() {
+  TempFile temp_file;
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+  temp_file.path_ = GetSysTempDir() + "\\" + GetTempName();
+  // Several tests want to read-back the temp file while still open. On Windows,
+  // that requires FILE_SHARE_READ. FILE_SHARE_READ is NOT settable when using
+  // the POSIX-compat equivalent function _open(). Hence the CreateFileA +
+  // _open_osfhandle dance here.
+  HANDLE h =
+      ::CreateFileA(temp_file.path_.c_str(), GENERIC_READ | GENERIC_WRITE,
+                    FILE_SHARE_DELETE | FILE_SHARE_READ, nullptr, CREATE_ALWAYS,
+                    FILE_ATTRIBUTE_TEMPORARY, nullptr);
+  PERFETTO_CHECK(PlatformHandleChecker::IsValid(h));
+  // According to MSDN, when using _open_osfhandle the caller must not call
+  // CloseHandle(). Ownership is moved to the file descriptor, which then needs
+  // to be closed with just with _close().
+  temp_file.fd_.reset(_open_osfhandle(reinterpret_cast<intptr_t>(h), 0));
+#else
+  temp_file.path_ = GetSysTempDir() + "/perfetto-XXXXXXXX";
+  temp_file.fd_.reset(mkstemp(&temp_file.path_[0]));
+#endif
+  if (PERFETTO_UNLIKELY(!temp_file.fd_)) {
+    PERFETTO_FATAL("Could not create temp file %s", temp_file.path_.c_str());
+  }
+  return temp_file;
+}
+
+// static
+TempFile TempFile::CreateUnlinked() {
+  TempFile temp_file = TempFile::Create();
+  temp_file.Unlink();
+  return temp_file;
+}
+
+TempFile::TempFile() = default;
+
+TempFile::~TempFile() {
+  Unlink();
+}
+
+ScopedFile TempFile::ReleaseFD() {
+  Unlink();
+  return std::move(fd_);
+}
+
+void TempFile::Unlink() {
+  if (path_.empty())
+    return;
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+  // If the FD is still open DeleteFile will mark the file as pending deletion
+  // and delete it only when the process exists.
+  PERFETTO_CHECK(DeleteFileA(path_.c_str()));
+#else
+  PERFETTO_CHECK(unlink(path_.c_str()) == 0);
+#endif
+  path_.clear();
+}
+
+TempFile::TempFile(TempFile&&) noexcept = default;
+TempFile& TempFile::operator=(TempFile&&) = default;
+
+// static
+TempDir TempDir::Create() {
+  TempDir temp_dir;
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+  temp_dir.path_ = GetSysTempDir() + "\\" + GetTempName();
+  PERFETTO_CHECK(_mkdir(temp_dir.path_.c_str()) == 0);
+#else
+  temp_dir.path_ = GetSysTempDir() + "/perfetto-XXXXXXXX";
+  PERFETTO_CHECK(mkdtemp(&temp_dir.path_[0]));
+#endif
+  return temp_dir;
+}
+
+TempDir::TempDir() = default;
+TempDir::TempDir(TempDir&&) noexcept = default;
+TempDir& TempDir::operator=(TempDir&&) = default;
+
+TempDir::~TempDir() {
+  if (path_.empty())
+    return;  // For objects that get std::move()d.
+  PERFETTO_CHECK(Rmdir(path_));
+}
+
+}  // namespace base
+}  // namespace perfetto
+// gen_amalgamated begin source: src/base/thread_checker.cc
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/ext/base/thread_checker.h"
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+#include <Windows.h>
+#endif
+
+namespace perfetto {
+namespace base {
+
+namespace {
+constexpr ThreadID kDetached{};
+
+ThreadID CurrentThreadId() {
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+  return ::GetCurrentThreadId();
+#else
+  return pthread_self();
+#endif
+}
+}  // namespace
+
+ThreadChecker::ThreadChecker() {
+  thread_id_.store(CurrentThreadId());
+}
+
+ThreadChecker::~ThreadChecker() = default;
+
+ThreadChecker::ThreadChecker(const ThreadChecker& other) {
+  thread_id_ = other.thread_id_.load();
+}
+
+ThreadChecker& ThreadChecker::operator=(const ThreadChecker& other) {
+  thread_id_ = other.thread_id_.load();
+  return *this;
+}
+
+bool ThreadChecker::CalledOnValidThread() const {
+  auto self = CurrentThreadId();
+
+  // Will re-attach if previously detached using DetachFromThread().
+  auto prev_value = kDetached;
+  if (thread_id_.compare_exchange_strong(prev_value, self))
+    return true;
+  return prev_value == self;
+}
+
+void ThreadChecker::DetachFromThread() {
+  thread_id_.store(kDetached);
+}
+
+}  // namespace base
+}  // namespace perfetto
+// gen_amalgamated begin source: src/base/time.cc
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/base/time.h"
+
+// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+#include <Windows.h>
+#else
+#include <unistd.h>
+#endif
+
+namespace perfetto {
+namespace base {
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+
+TimeNanos GetWallTimeNs() {
+  LARGE_INTEGER freq;
+  ::QueryPerformanceFrequency(&freq);
+  LARGE_INTEGER counter;
+  ::QueryPerformanceCounter(&counter);
+  double elapsed_nanoseconds = (1e9 * static_cast<double>(counter.QuadPart)) /
+                               static_cast<double>(freq.QuadPart);
+  return TimeNanos(static_cast<uint64_t>(elapsed_nanoseconds));
+}
+
+TimeNanos GetThreadCPUTimeNs() {
+  FILETIME dummy, kernel_ftime, user_ftime;
+  ::GetThreadTimes(GetCurrentThread(), &dummy, &dummy, &kernel_ftime,
+                   &user_ftime);
+  uint64_t kernel_time = kernel_ftime.dwHighDateTime * 0x100000000 +
+                         kernel_ftime.dwLowDateTime;
+  uint64_t user_time = user_ftime.dwHighDateTime * 0x100000000 +
+                       user_ftime.dwLowDateTime;
+
+  return TimeNanos((kernel_time + user_time) * 100);
+}
+
+void SleepMicroseconds(unsigned interval_us) {
+  // The Windows Sleep function takes a millisecond count. Round up so that
+  // short sleeps don't turn into a busy wait. Note that the sleep granularity
+  // on Windows can dynamically vary from 1 ms to ~16 ms, so don't count on this
+  // being a short sleep.
+  ::Sleep(static_cast<DWORD>((interval_us + 999) / 1000));
+}
+
+#else  // PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+
+void SleepMicroseconds(unsigned interval_us) {
+  ::usleep(static_cast<useconds_t>(interval_us));
+}
+
+#endif  // PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+
+std::string GetTimeFmt(const std::string& fmt) {
+  time_t raw_time;
+  time(&raw_time);
+  struct tm* local_tm;
+  local_tm = localtime(&raw_time);
+  char buf[128];
+  PERFETTO_CHECK(strftime(buf, 80, fmt.c_str(), local_tm) > 0);
+  return buf;
+}
+
+}  // namespace base
+}  // namespace perfetto
+// gen_amalgamated begin source: src/base/utils.cc
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/ext/base/file_utils.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
+
+// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) ||   \
+    PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) || \
+    PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
+#include <unistd.h>  // For getpagesize() and geteuid() & fork()
+#endif
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
+#include <mach/vm_page_size.h>
+#endif
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
+#include <dlfcn.h>
+#include <malloc.h>
+
+#ifdef M_PURGE
+#define PERFETTO_M_PURGE M_PURGE
+#else
+// Only available in in-tree builds and on newer SDKs.
+#define PERFETTO_M_PURGE -101
+#endif
+
+namespace {
+extern "C" {
+using MalloptType = void (*)(int, int);
+}
+}  // namespace
+#endif  // OS_ANDROID
+
+namespace perfetto {
+namespace base {
+
+void MaybeReleaseAllocatorMemToOS() {
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
+  // mallopt() on Android requires SDK level 26. Many targets and embedders
+  // still depend on a lower SDK level. Given mallopt() is a quite simple API,
+  // use reflection to do this rather than bumping the SDK level for all
+  // embedders. This keeps the behavior of standalone builds aligned with
+  // in-tree builds.
+  static MalloptType mallopt_fn =
+      reinterpret_cast<MalloptType>(dlsym(RTLD_DEFAULT, "mallopt"));
+  if (!mallopt_fn)
+    return;
+  mallopt_fn(PERFETTO_M_PURGE, 0);
+#endif
+}
+
+uint32_t GetSysPageSize() {
+  ignore_result(kPageSize);  // Just to keep the amalgamated build happy.
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
+    PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
+  static std::atomic<uint32_t> page_size{0};
+  // This function might be called in hot paths. Avoid calling getpagesize() all
+  // the times, in many implementations getpagesize() calls sysconf() which is
+  // not cheap.
+  uint32_t cached_value = page_size.load(std::memory_order_relaxed);
+  if (PERFETTO_UNLIKELY(cached_value == 0)) {
+    cached_value = static_cast<uint32_t>(getpagesize());
+    page_size.store(cached_value, std::memory_order_relaxed);
+  }
+  return cached_value;
+#elif PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
+  return static_cast<uint32_t>(vm_page_size);
+#else
+  return 4096;
+#endif
+}
+
+uid_t GetCurrentUserId() {
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) ||   \
+    PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) || \
+    PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
+  return geteuid();
+#else
+  // TODO(primiano): On Windows we could hash the current user SID and derive a
+  // numeric user id [1]. It is not clear whether we need that. Right now that
+  // would not bring any benefit. Returning 0 unil we can prove we need it.
+  // [1]:https://android-review.googlesource.com/c/platform/external/perfetto/+/1513879/25/src/base/utils.cc
+  return 0;
+#endif
+}
+
+void SetEnv(const std::string& key, const std::string& value) {
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+  PERFETTO_CHECK(::_putenv_s(key.c_str(), value.c_str()) == 0);
+#else
+  PERFETTO_CHECK(::setenv(key.c_str(), value.c_str(), /*overwrite=*/true) == 0);
+#endif
+}
+
+void Daemonize() {
+   #if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) ||   \
+       PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) || \
+       PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
+      pid_t pid;
+      switch (pid = fork()) {
+        case -1:
+          PERFETTO_FATAL("fork");
+        case 0: {
+          PERFETTO_CHECK(setsid() != -1);
+          base::ignore_result(chdir("/"));
+          base::ScopedFile null = base::OpenFile("/dev/null", O_RDONLY);
+          PERFETTO_CHECK(null);
+          PERFETTO_CHECK(dup2(*null, STDIN_FILENO) != -1);
+          PERFETTO_CHECK(dup2(*null, STDOUT_FILENO) != -1);
+          PERFETTO_CHECK(dup2(*null, STDERR_FILENO) != -1);
+          // Do not accidentally close stdin/stdout/stderr.
+          if (*null <= 2)
+            null.release();
+          break;
+        }
+        default:
+          printf("%d\n", pid);
+          exit(0);
+      }
+  #else
+    // Avoid -Wunreachable warnings.
+    if (reinterpret_cast<intptr_t>(&Daemonize) != 16)
+      PERFETTO_FATAL("--background is only supported on Linux/Android/Mac");
+  #endif  // OS_WIN
+}
+
+}  // namespace base
+}  // namespace perfetto
+// gen_amalgamated begin source: src/base/uuid.cc
+// gen_amalgamated begin header: include/perfetto/ext/base/uuid.h
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_BASE_UUID_H_
+#define INCLUDE_PERFETTO_EXT_BASE_UUID_H_
+
+#include <array>
+#include <string>
+
+// gen_amalgamated expanded: #include "perfetto/ext/base/optional.h"
+
+namespace perfetto {
+namespace base {
+
+class Uuid {
+ public:
+  explicit Uuid(const std::string& s);
+  explicit Uuid(int64_t lsb, int64_t msb);
+  Uuid();
+
+  std::array<uint8_t, 16>* data() { return &data_; }
+  const std::array<uint8_t, 16>* data() const { return &data_; }
+
+  bool operator==(const Uuid& other) const { return data_ == other.data_; }
+
+  bool operator!=(const Uuid& other) const { return !(*this == other); }
+
+  int64_t msb() const {
+    int64_t result;
+    memcpy(&result, data_.data() + 8, 8);
+    return result;
+  }
+
+  int64_t lsb() const {
+    int64_t result;
+    memcpy(&result, data_.data(), 8);
+    return result;
+  }
+
+  void set_lsb_msb(int64_t lsb, int64_t msb) {
+    set_lsb(lsb);
+    set_msb(msb);
+  }
+  void set_msb(int64_t msb) { memcpy(data_.data() + 8, &msb, 8); }
+  void set_lsb(int64_t lsb) { memcpy(data_.data(), &lsb, 8); }
+
+  std::string ToString() const;
+  std::string ToPrettyString() const;
+
+ private:
+  std::array<uint8_t, 16> data_{};
+};
+
+Uuid Uuidv4();
+
+}  // namespace base
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_BASE_UUID_H_
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/ext/base/uuid.h"
+
+#include <random>
+
+// gen_amalgamated expanded: #include "perfetto/base/time.h"
+
+namespace perfetto {
+namespace base {
+namespace {
+
+constexpr char kHexmap[] = {'0', '1', '2', '3', '4', '5', '6', '7',
+                            '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'};
+}  // namespace
+
+// See https://www.ietf.org/rfc/rfc4122.txt
+Uuid Uuidv4() {
+  static std::minstd_rand rng(static_cast<uint32_t>(GetBootTimeNs().count()));
+  Uuid uuid;
+  auto& data = *uuid.data();
+
+  for (size_t i = 0; i < 16; ++i)
+    data[i] = static_cast<uint8_t>(rng());
+
+  // version:
+  data[6] = (data[6] & 0x0f) | 0x40;
+  // clock_seq_hi_and_reserved:
+  data[8] = (data[8] & 0x3f) | 0x80;
+
+  return uuid;
+}
+
+Uuid::Uuid() {}
+
+Uuid::Uuid(const std::string& s) {
+  PERFETTO_CHECK(s.size() == data_.size());
+  memcpy(data_.data(), s.data(), s.size());
+}
+
+Uuid::Uuid(int64_t lsb, int64_t msb) {
+  set_lsb_msb(lsb, msb);
+}
+
+std::string Uuid::ToString() const {
+  return std::string(reinterpret_cast<const char*>(data_.data()), data_.size());
+}
+
+std::string Uuid::ToPrettyString() const {
+  std::string s(data_.size() * 2 + 4, '-');
+  // Format is 123e4567-e89b-12d3-a456-426655443322.
+  size_t j = 0;
+  for (size_t i = 0; i < data_.size(); ++i) {
+    if (i == 4 || i == 6 || i == 8 || i == 10)
+      j++;
+    s[2 * i + j] = kHexmap[(data_[data_.size() - i - 1] & 0xf0) >> 4];
+    s[2 * i + 1 + j] = kHexmap[(data_[data_.size() - i - 1] & 0x0f)];
+  }
+  return s;
+}
+
+}  // namespace base
+}  // namespace perfetto
+// gen_amalgamated begin source: src/base/version.cc
+// gen_amalgamated begin header: include/perfetto/ext/base/version.h
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_BASE_VERSION_H_
+#define INCLUDE_PERFETTO_EXT_BASE_VERSION_H_
+
+namespace perfetto {
+namespace base {
+
+// The returned pointer is a static string is safe to pass around.
+const char* GetVersionString();
+
+}  // namespace base
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_BASE_VERSION_H_
+// gen_amalgamated begin header: gen/perfetto_version.gen.h
+// Generated by write_version_header.py
+
+#ifndef GEN_PERFETTO_VERSION_GEN_H_
+#define GEN_PERFETTO_VERSION_GEN_H_
+
+#define PERFETTO_VERSION_STRING() "v15.0"
+#define PERFETTO_VERSION_SCM_REVISION() "f69a7701253a1c934595a31c581465a1acb0af80"
+
+#endif  // GEN_PERFETTO_VERSION_GEN_H_
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/ext/base/version.h"
+
+// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
+
+#include <stdio.h>
+
+#if PERFETTO_BUILDFLAG(PERFETTO_VERSION_GEN)
+// gen_amalgamated expanded: #include "perfetto_version.gen.h"
+#else
+#define PERFETTO_VERSION_STRING() "v0.0"
+#define PERFETTO_VERSION_SCM_REVISION() "unknown"
+#endif
+
+namespace perfetto {
+namespace base {
+
+const char* GetVersionString() {
+  static const char* version_str = [] {
+    static constexpr size_t kMaxLen = 256;
+    char* version = new char[kMaxLen + 1];
+    snprintf(version, kMaxLen, "Perfetto %s (%s)", PERFETTO_VERSION_STRING(),
+             PERFETTO_VERSION_SCM_REVISION());
+    return version;
+  }();
+  return version_str;
+}
+
+}  // namespace base
+}  // namespace perfetto
+// gen_amalgamated begin source: src/base/virtual_destructors.cc
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
+
+// This translation unit contains the definitions for the destructor of pure
+// virtual interfaces for the current build target. The alternative would be
+// introducing a one-liner .cc file for each pure virtual interface, which is
+// overkill. This is for compliance with -Wweak-vtables.
+
+namespace perfetto {
+namespace base {
+
+TaskRunner::~TaskRunner() = default;
+
+}  // namespace base
+}  // namespace perfetto
+// gen_amalgamated begin source: src/base/waitable_event.cc
+// gen_amalgamated begin header: include/perfetto/ext/base/waitable_event.h
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_BASE_WAITABLE_EVENT_H_
+#define INCLUDE_PERFETTO_EXT_BASE_WAITABLE_EVENT_H_
+
+#include <condition_variable>
+#include <mutex>
+
+namespace perfetto {
+namespace base {
+
+// A waitable event for cross-thread synchronization.
+// All methods on this class can be called from any thread.
+class WaitableEvent {
+ public:
+  WaitableEvent();
+  ~WaitableEvent();
+  WaitableEvent(const WaitableEvent&) = delete;
+  WaitableEvent operator=(const WaitableEvent&) = delete;
+
+  // Synchronously block until the event is notified.
+  void Wait();
+
+  // Signal the event, waking up blocked waiters.
+  void Notify();
+
+ private:
+  std::mutex mutex_;
+  std::condition_variable event_;
+  bool notified_ = false;
+};
+
+}  // namespace base
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_BASE_WAITABLE_EVENT_H_
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/ext/base/waitable_event.h"
+
+namespace perfetto {
+namespace base {
+
+WaitableEvent::WaitableEvent() = default;
+WaitableEvent::~WaitableEvent() = default;
+
+void WaitableEvent::Wait() {
+  std::unique_lock<std::mutex> lock(mutex_);
+  return event_.wait(lock, [this] { return notified_; });
+}
+
+void WaitableEvent::Notify() {
+  std::unique_lock<std::mutex> lock(mutex_);
+  notified_ = true;
+  event_.notify_all();
+}
+
+}  // namespace base
+}  // namespace perfetto
+// gen_amalgamated begin source: src/base/watchdog_posix.cc
+// gen_amalgamated begin header: include/perfetto/ext/base/watchdog.h
+// gen_amalgamated begin header: include/perfetto/ext/base/watchdog_noop.h
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_BASE_WATCHDOG_NOOP_H_
+#define INCLUDE_PERFETTO_EXT_BASE_WATCHDOG_NOOP_H_
+
+#include <stdint.h>
+
+namespace perfetto {
+namespace base {
+
+class Watchdog {
+ public:
+  class Timer {
+   public:
+    // Define an empty dtor to avoid "unused variable" errors on the call site.
+    Timer() {}
+    Timer(const Timer&) {}
+    ~Timer() {}
+  };
+  static Watchdog* GetInstance() {
+    static Watchdog* watchdog = new Watchdog();
+    return watchdog;
+  }
+  Timer CreateFatalTimer(uint32_t /*ms*/) { return Timer(); }
+  void Start() {}
+  void SetMemoryLimit(uint64_t /*bytes*/, uint32_t /*window_ms*/) {}
+  void SetCpuLimit(uint32_t /*percentage*/, uint32_t /*window_ms*/) {}
+};
+
+}  // namespace base
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_BASE_WATCHDOG_NOOP_H_
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_BASE_WATCHDOG_H_
+#define INCLUDE_PERFETTO_EXT_BASE_WATCHDOG_H_
+
+#include <functional>
+
+// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
+
+// The POSIX watchdog is only supported on Linux and Android in non-embedder
+// builds.
+#if PERFETTO_BUILDFLAG(PERFETTO_WATCHDOG)
+// gen_amalgamated expanded: #include "perfetto/ext/base/watchdog_posix.h"
+#else
+// gen_amalgamated expanded: #include "perfetto/ext/base/watchdog_noop.h"
+#endif
+
+namespace perfetto {
+namespace base {
+
+// Make the limits more relaxed on desktop, where multi-GB traces are likely.
+// Multi-GB traces can take bursts of cpu time to write into disk at the end of
+// the trace.
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
+constexpr uint32_t kWatchdogDefaultCpuLimit = 75;
+constexpr uint32_t kWatchdogDefaultCpuWindow = 5 * 60 * 1000;  // 5 minutes.
+#else
+constexpr uint32_t kWatchdogDefaultCpuLimit = 90;
+constexpr uint32_t kWatchdogDefaultCpuWindow = 10 * 60 * 1000;  // 10 minutes.
+#endif
+
+// The default memory margin we give to our processes. This is used as as a
+// constant to put on top of the trace buffers.
+constexpr uint64_t kWatchdogDefaultMemorySlack = 32 * 1024 * 1024;  // 32 MiB.
+constexpr uint32_t kWatchdogDefaultMemoryWindow = 30 * 1000;  // 30 seconds.
+
+inline void RunTaskWithWatchdogGuard(const std::function<void()>& task) {
+  // Maximum time a single task can take in a TaskRunner before the
+  // program suicides.
+  constexpr int64_t kWatchdogMillis = 30000;  // 30s
+
+  Watchdog::Timer handle =
+      base::Watchdog::GetInstance()->CreateFatalTimer(kWatchdogMillis);
+  task();
+
+  // Suppress unused variable warnings in the client library amalgamated build.
+  (void)kWatchdogDefaultCpuLimit;
+  (void)kWatchdogDefaultCpuWindow;
+  (void)kWatchdogDefaultMemorySlack;
+  (void)kWatchdogDefaultMemoryWindow;
+}
+
+}  // namespace base
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_BASE_WATCHDOG_H_
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/ext/base/watchdog.h"
+
+#if PERFETTO_BUILDFLAG(PERFETTO_WATCHDOG)
+
+#include <fcntl.h>
+#include <inttypes.h>
+#include <signal.h>
+#include <stdint.h>
+
+#include <fstream>
+#include <thread>
+
+// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/base/thread_utils.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/file_utils.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
+
+namespace perfetto {
+namespace base {
+
+namespace {
+
+constexpr uint32_t kDefaultPollingInterval = 30 * 1000;
+
+bool IsMultipleOf(uint32_t number, uint32_t divisor) {
+  return number >= divisor && number % divisor == 0;
+}
+
+double MeanForArray(const uint64_t array[], size_t size) {
+  uint64_t total = 0;
+  for (size_t i = 0; i < size; i++) {
+    total += array[i];
+  }
+  return static_cast<double>(total / size);
+
+}
+
+}  //  namespace
+
+bool ReadProcStat(int fd, ProcStat* out) {
+  char c[512];
+  size_t c_pos = 0;
+  while (c_pos < sizeof(c) - 1) {
+    ssize_t rd = PERFETTO_EINTR(read(fd, c + c_pos, sizeof(c) - c_pos));
+    if (rd < 0) {
+      PERFETTO_ELOG("Failed to read stat file to enforce resource limits.");
+      return false;
+    }
+    if (rd == 0)
+      break;
+    c_pos += static_cast<size_t>(rd);
+  }
+  PERFETTO_CHECK(c_pos < sizeof(c));
+  c[c_pos] = '\0';
+
+  if (sscanf(c,
+             "%*d %*s %*c %*d %*d %*d %*d %*d %*u %*u %*u %*u %*u %lu "
+             "%lu %*d %*d %*d %*d %*d %*d %*u %*u %ld",
+             &out->utime, &out->stime, &out->rss_pages) != 3) {
+    PERFETTO_ELOG("Invalid stat format: %s", c);
+    return false;
+  }
+  return true;
+}
+
+Watchdog::Watchdog(uint32_t polling_interval_ms)
+    : polling_interval_ms_(polling_interval_ms) {}
+
+Watchdog::~Watchdog() {
+  if (!thread_.joinable()) {
+    PERFETTO_DCHECK(!enabled_);
+    return;
+  }
+  PERFETTO_DCHECK(enabled_);
+  enabled_ = false;
+  exit_signal_.notify_one();
+  thread_.join();
+}
+
+Watchdog* Watchdog::GetInstance() {
+  static Watchdog* watchdog = new Watchdog(kDefaultPollingInterval);
+  return watchdog;
+}
+
+Watchdog::Timer Watchdog::CreateFatalTimer(uint32_t ms) {
+  if (!enabled_.load(std::memory_order_relaxed))
+    return Watchdog::Timer(0);
+
+  return Watchdog::Timer(ms);
+}
+
+void Watchdog::Start() {
+  std::lock_guard<std::mutex> guard(mutex_);
+  if (thread_.joinable()) {
+    PERFETTO_DCHECK(enabled_);
+  } else {
+    PERFETTO_DCHECK(!enabled_);
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
+    PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
+    // Kick the thread to start running but only on Android or Linux.
+    enabled_ = true;
+    thread_ = std::thread(&Watchdog::ThreadMain, this);
+#endif
+  }
+}
+
+void Watchdog::SetMemoryLimit(uint64_t bytes, uint32_t window_ms) {
+  // Update the fields under the lock.
+  std::lock_guard<std::mutex> guard(mutex_);
+
+  PERFETTO_CHECK(IsMultipleOf(window_ms, polling_interval_ms_) || bytes == 0);
+
+  size_t size = bytes == 0 ? 0 : window_ms / polling_interval_ms_ + 1;
+  memory_window_bytes_.Reset(size);
+  memory_limit_bytes_ = bytes;
+}
+
+void Watchdog::SetCpuLimit(uint32_t percentage, uint32_t window_ms) {
+  std::lock_guard<std::mutex> guard(mutex_);
+
+  PERFETTO_CHECK(percentage <= 100);
+  PERFETTO_CHECK(IsMultipleOf(window_ms, polling_interval_ms_) ||
+                 percentage == 0);
+
+  size_t size = percentage == 0 ? 0 : window_ms / polling_interval_ms_ + 1;
+  cpu_window_time_ticks_.Reset(size);
+  cpu_limit_percentage_ = percentage;
+}
+
+void Watchdog::ThreadMain() {
+  base::ScopedFile stat_fd(base::OpenFile("/proc/self/stat", O_RDONLY));
+  if (!stat_fd) {
+    PERFETTO_ELOG("Failed to open stat file to enforce resource limits.");
+    return;
+  }
+
+  std::unique_lock<std::mutex> guard(mutex_);
+  for (;;) {
+    exit_signal_.wait_for(guard,
+                          std::chrono::milliseconds(polling_interval_ms_));
+    if (!enabled_)
+      return;
+
+    lseek(stat_fd.get(), 0, SEEK_SET);
+
+    ProcStat stat;
+    if (!ReadProcStat(stat_fd.get(), &stat)) {
+      return;
+    }
+
+    uint64_t cpu_time = stat.utime + stat.stime;
+    uint64_t rss_bytes =
+        static_cast<uint64_t>(stat.rss_pages) * base::GetSysPageSize();
+
+    CheckMemory(rss_bytes);
+    CheckCpu(cpu_time);
+  }
+}
+
+void Watchdog::CheckMemory(uint64_t rss_bytes) {
+  if (memory_limit_bytes_ == 0)
+    return;
+
+  // Add the current stat value to the ring buffer and check that the mean
+  // remains under our threshold.
+  if (memory_window_bytes_.Push(rss_bytes)) {
+    if (memory_window_bytes_.Mean() > static_cast<double>(memory_limit_bytes_)) {
+      PERFETTO_ELOG(
+          "Memory watchdog trigger. Memory window of %f bytes is above the "
+          "%" PRIu64 " bytes limit.",
+          memory_window_bytes_.Mean(), memory_limit_bytes_);
+      kill(getpid(), SIGABRT);
+    }
+  }
+}
+
+void Watchdog::CheckCpu(uint64_t cpu_time) {
+  if (cpu_limit_percentage_ == 0)
+    return;
+
+  // Add the cpu time to the ring buffer.
+  if (cpu_window_time_ticks_.Push(cpu_time)) {
+    // Compute the percentage over the whole window and check that it remains
+    // under the threshold.
+    uint64_t difference_ticks = cpu_window_time_ticks_.NewestWhenFull() -
+                                cpu_window_time_ticks_.OldestWhenFull();
+    double window_interval_ticks =
+        (static_cast<double>(WindowTimeForRingBuffer(cpu_window_time_ticks_)) /
+         1000.0) *
+        static_cast<double>(sysconf(_SC_CLK_TCK));
+    double percentage = static_cast<double>(difference_ticks) /
+                        static_cast<double>(window_interval_ticks) * 100;
+    if (percentage > cpu_limit_percentage_) {
+      PERFETTO_ELOG("CPU watchdog trigger. %f%% CPU use is above the %" PRIu32
+                    "%% CPU limit.",
+                    percentage, cpu_limit_percentage_);
+      kill(getpid(), SIGABRT);
+    }
+  }
+}
+
+uint32_t Watchdog::WindowTimeForRingBuffer(const WindowedInterval& window) {
+  return static_cast<uint32_t>(window.size() - 1) * polling_interval_ms_;
+}
+
+bool Watchdog::WindowedInterval::Push(uint64_t sample) {
+  // Add the sample to the current position in the ring buffer.
+  buffer_[position_] = sample;
+
+  // Update the position with next one circularily.
+  position_ = (position_ + 1) % size_;
+
+  // Set the filled flag the first time we wrap.
+  filled_ = filled_ || position_ == 0;
+  return filled_;
+}
+
+double Watchdog::WindowedInterval::Mean() const {
+  return MeanForArray(buffer_.get(), size_);
+}
+
+void Watchdog::WindowedInterval::Clear() {
+  position_ = 0;
+  buffer_.reset(new uint64_t[size_]());
+}
+
+void Watchdog::WindowedInterval::Reset(size_t new_size) {
+  position_ = 0;
+  size_ = new_size;
+  buffer_.reset(new_size == 0 ? nullptr : new uint64_t[new_size]());
+}
+
+Watchdog::Timer::Timer(uint32_t ms) {
+  if (!ms)
+    return;  // No-op timer created when the watchdog is disabled.
+
+  struct sigevent sev = {};
+  timer_t timerid;
+  sev.sigev_notify = SIGEV_THREAD_ID;
+  sev._sigev_un._tid = base::GetThreadId();
+  sev.sigev_signo = SIGABRT;
+  PERFETTO_CHECK(timer_create(CLOCK_MONOTONIC, &sev, &timerid) != -1);
+  timerid_ = base::make_optional(timerid);
+  struct itimerspec its = {};
+  its.it_value.tv_sec = ms / 1000;
+  its.it_value.tv_nsec = 1000000L * (ms % 1000);
+  PERFETTO_CHECK(timer_settime(timerid_.value(), 0, &its, nullptr) != -1);
+}
+
+Watchdog::Timer::~Timer() {
+  if (timerid_) {
+    timer_delete(timerid_.value());
+  }
+}
+
+Watchdog::Timer::Timer(Timer&& other) noexcept {
+  timerid_ = std::move(other.timerid_);
+  other.timerid_ = base::nullopt;
+}
+
+}  // namespace base
+}  // namespace perfetto
+
+#endif  // PERFETTO_BUILDFLAG(PERFETTO_WATCHDOG)
+// gen_amalgamated begin source: src/base/thread_task_runner.cc
+// gen_amalgamated begin header: include/perfetto/ext/base/thread_task_runner.h
+// gen_amalgamated begin header: include/perfetto/ext/base/unix_task_runner.h
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_BASE_UNIX_TASK_RUNNER_H_
+#define INCLUDE_PERFETTO_EXT_BASE_UNIX_TASK_RUNNER_H_
+
+// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
+// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
+// gen_amalgamated expanded: #include "perfetto/base/thread_utils.h"
+// gen_amalgamated expanded: #include "perfetto/base/time.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/event_fd.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/thread_checker.h"
+
+#include <chrono>
+#include <deque>
+#include <map>
+#include <mutex>
+#include <vector>
+
+#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+#include <poll.h>
+#endif
+
+namespace perfetto {
+namespace base {
+
+// Runs a task runner on the current thread.
+//
+// Implementation note: we currently assume (and enforce in debug builds) that
+// Run() is called from the thread that constructed the UnixTaskRunner. This is
+// not strictly necessary, and we could instead track the thread that invokes
+// Run(). However, a related property that *might* be important to enforce is
+// that the destructor runs on the task-running thread. Otherwise, if there are
+// still-pending tasks at the time of destruction, we would destroy those
+// outside of the task thread (which might be unexpected to the caller). On the
+// other hand, the std::function task interface discourages use of any
+// resource-owning tasks (as the callable needs to be copyable), so this might
+// not be important in practice.
+//
+// TODO(rsavitski): consider adding a thread-check in the destructor, after
+// auditing existing usages.
+// TODO(primiano): rename this to TaskRunnerImpl. The "Unix" part is misleading
+// now as it supports also Windows.
+class UnixTaskRunner : public TaskRunner {
+ public:
+  UnixTaskRunner();
+  ~UnixTaskRunner() override;
+
+  // Start executing tasks. Doesn't return until Quit() is called. Run() may be
+  // called multiple times on the same task runner.
+  void Run();
+  void Quit();
+
+  // Checks whether there are any pending immediate tasks to run. Note that
+  // delayed tasks don't count even if they are due to run.
+  bool IsIdleForTesting();
+
+  // TaskRunner implementation:
+  void PostTask(std::function<void()>) override;
+  void PostDelayedTask(std::function<void()>, uint32_t delay_ms) override;
+  void AddFileDescriptorWatch(PlatformHandle, std::function<void()>) override;
+  void RemoveFileDescriptorWatch(PlatformHandle) override;
+  bool RunsTasksOnCurrentThread() const override;
+
+  // Returns true if the task runner is quitting, or has quit and hasn't been
+  // restarted since. Exposed primarily for ThreadTaskRunner, not necessary for
+  // normal use of this class.
+  bool QuitCalled();
+
+ private:
+  void WakeUp();
+  void UpdateWatchTasksLocked();
+  int GetDelayMsToNextTaskLocked() const;
+  void RunImmediateAndDelayedTask();
+  void PostFileDescriptorWatches(uint64_t windows_wait_result);
+  void RunFileDescriptorWatch(PlatformHandle);
+
+  ThreadChecker thread_checker_;
+  PlatformThreadId created_thread_id_ = GetThreadId();
+
+  EventFd event_;
+
+// The array of fds/handles passed to poll(2) / WaitForMultipleObjects().
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+  std::vector<PlatformHandle> poll_fds_;
+#else
+  std::vector<struct pollfd> poll_fds_;
+#endif
+
+  // --- Begin lock-protected members ---
+
+  std::mutex lock_;
+
+  std::deque<std::function<void()>> immediate_tasks_;
+  std::multimap<TimeMillis, std::function<void()>> delayed_tasks_;
+  bool quit_ = false;
+
+  struct WatchTask {
+    std::function<void()> callback;
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+    // On UNIX systems we make the FD number negative in |poll_fds_| to avoid
+    // polling it again until the queued task runs. On Windows we can't do that.
+    // Instead we keep track of its state here.
+    bool pending = false;
+#else
+    size_t poll_fd_index;  // Index into |poll_fds_|.
+#endif
+  };
+
+  std::map<PlatformHandle, WatchTask> watch_tasks_;
+  bool watch_tasks_changed_ = false;
+
+  // --- End lock-protected members ---
+};
+
+}  // namespace base
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_BASE_UNIX_TASK_RUNNER_H_
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_BASE_THREAD_TASK_RUNNER_H_
+#define INCLUDE_PERFETTO_EXT_BASE_THREAD_TASK_RUNNER_H_
+
+#include <functional>
+#include <thread>
+
+// gen_amalgamated expanded: #include "perfetto/ext/base/unix_task_runner.h"
+
+namespace perfetto {
+namespace base {
+
+// A UnixTaskRunner backed by a dedicated task thread. Shuts down the runner and
+// joins the thread upon destruction. Can be moved to transfer ownership.
+//
+// Guarantees that:
+// * the UnixTaskRunner will be constructed and destructed on the task thread.
+// * the task thread will live for the lifetime of the UnixTaskRunner.
+//
+class PERFETTO_EXPORT ThreadTaskRunner : public TaskRunner {
+ public:
+  static ThreadTaskRunner CreateAndStart(const std::string& name = "") {
+    return ThreadTaskRunner(name);
+  }
+
+  ThreadTaskRunner(const ThreadTaskRunner&) = delete;
+  ThreadTaskRunner& operator=(const ThreadTaskRunner&) = delete;
+
+  ThreadTaskRunner(ThreadTaskRunner&&) noexcept;
+  ThreadTaskRunner& operator=(ThreadTaskRunner&&);
+  ~ThreadTaskRunner() override;
+
+  // Executes the given function on the task runner thread and blocks the caller
+  // thread until the function has run.
+  void PostTaskAndWaitForTesting(std::function<void()>);
+
+  // Can be called from another thread to get the CPU time of the thread the
+  // task-runner is executing on.
+  uint64_t GetThreadCPUTimeNsForTesting();
+
+  // Returns a pointer to the UnixTaskRunner, which is valid for the lifetime of
+  // this ThreadTaskRunner object (unless this object is moved-from, in which
+  // case the pointer remains valid for the lifetime of the new owning
+  // ThreadTaskRunner).
+  //
+  // Warning: do not call Quit() on the returned runner pointer, the termination
+  // should be handled exclusively by this class' destructor.
+  UnixTaskRunner* get() const { return task_runner_; }
+
+  // TaskRunner implementation.
+  // These methods just proxy to the underlying task_runner_.
+  void PostTask(std::function<void()>) override;
+  void PostDelayedTask(std::function<void()>, uint32_t delay_ms) override;
+  void AddFileDescriptorWatch(PlatformHandle, std::function<void()>) override;
+  void RemoveFileDescriptorWatch(PlatformHandle) override;
+  bool RunsTasksOnCurrentThread() const override;
+
+ private:
+  explicit ThreadTaskRunner(const std::string& name);
+  void RunTaskThread(std::function<void(UnixTaskRunner*)> initializer);
+
+  std::thread thread_;
+  std::string name_;
+  UnixTaskRunner* task_runner_ = nullptr;
+};
+
+}  // namespace base
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_BASE_THREAD_TASK_RUNNER_H_
+// gen_amalgamated begin header: include/perfetto/ext/base/thread_utils.h
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_BASE_THREAD_UTILS_H_
+#define INCLUDE_PERFETTO_EXT_BASE_THREAD_UTILS_H_
+
+#include <string>
+
+// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) ||   \
+    PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) || \
+    PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
+#include <pthread.h>
+#include <string.h>
+#include <algorithm>
+#endif
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
+#include <sys/prctl.h>
+#endif
+
+// Internal implementation utils that aren't as widely useful/supported as
+// base/thread_utils.h.
+
+namespace perfetto {
+namespace base {
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) ||   \
+    PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) || \
+    PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
+// Sets the "comm" of the calling thread to the first 15 chars of the given
+// string.
+inline bool MaybeSetThreadName(const std::string& name) {
+  char buf[16] = {};
+  size_t sz = std::min(name.size(), static_cast<size_t>(15));
+  strncpy(buf, name.c_str(), sz);
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
+  return pthread_setname_np(buf) == 0;
+#else
+  return pthread_setname_np(pthread_self(), buf) == 0;
+#endif
+}
+
+inline bool GetThreadName(std::string& out_result) {
+  char buf[16] = {};
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
+  if (prctl(PR_GET_NAME, buf) != 0)
+    return false;
+#else
+  if (pthread_getname_np(pthread_self(), buf, sizeof(buf)) != 0)
+    return false;
+#endif
+  out_result = std::string(buf);
+  return true;
+}
+
+#else
+inline bool MaybeSetThreadName(const std::string&) {
+  return false;
+}
+inline bool GetThreadName(std::string&) {
+  return false;
+}
+#endif
+
+}  // namespace base
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_BASE_THREAD_UTILS_H_
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
+
+// gen_amalgamated expanded: #include "perfetto/ext/base/thread_task_runner.h"
+
+#include <condition_variable>
+#include <functional>
+#include <mutex>
+#include <thread>
+
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/thread_utils.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/unix_task_runner.h"
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
+    PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
+#include <sys/prctl.h>
+#endif
+
+namespace perfetto {
+namespace base {
+
+ThreadTaskRunner::ThreadTaskRunner(ThreadTaskRunner&& other) noexcept
+    : thread_(std::move(other.thread_)), task_runner_(other.task_runner_) {
+  other.task_runner_ = nullptr;
+}
+
+ThreadTaskRunner& ThreadTaskRunner::operator=(ThreadTaskRunner&& other) {
+  this->~ThreadTaskRunner();
+  new (this) ThreadTaskRunner(std::move(other));
+  return *this;
+}
+
+ThreadTaskRunner::~ThreadTaskRunner() {
+  if (task_runner_) {
+    PERFETTO_CHECK(!task_runner_->QuitCalled());
+    task_runner_->Quit();
+
+    PERFETTO_DCHECK(thread_.joinable());
+  }
+  if (thread_.joinable())
+    thread_.join();
+}
+
+ThreadTaskRunner::ThreadTaskRunner(const std::string& name) : name_(name) {
+  std::mutex init_lock;
+  std::condition_variable init_cv;
+
+  std::function<void(UnixTaskRunner*)> initializer =
+      [this, &init_lock, &init_cv](UnixTaskRunner* task_runner) {
+        std::lock_guard<std::mutex> lock(init_lock);
+        task_runner_ = task_runner;
+        // Notify while still holding the lock, as init_cv ceases to exist as
+        // soon as the main thread observes a non-null task_runner_, and it can
+        // wake up spuriously (i.e. before the notify if we had unlocked before
+        // notifying).
+        init_cv.notify_one();
+      };
+
+  thread_ = std::thread(&ThreadTaskRunner::RunTaskThread, this,
+                        std::move(initializer));
+
+  std::unique_lock<std::mutex> lock(init_lock);
+  init_cv.wait(lock, [this] { return !!task_runner_; });
+}
+
+void ThreadTaskRunner::RunTaskThread(
+    std::function<void(UnixTaskRunner*)> initializer) {
+  if (!name_.empty()) {
+    base::MaybeSetThreadName(name_);
+  }
+
+  UnixTaskRunner task_runner;
+  task_runner.PostTask(std::bind(std::move(initializer), &task_runner));
+  task_runner.Run();
+}
+
+void ThreadTaskRunner::PostTaskAndWaitForTesting(std::function<void()> fn) {
+  std::mutex mutex;
+  std::condition_variable cv;
+
+  std::unique_lock<std::mutex> lock(mutex);
+  bool done = false;
+  task_runner_->PostTask([&mutex, &cv, &done, &fn] {
+    fn();
+
+    std::lock_guard<std::mutex> inner_lock(mutex);
+    done = true;
+    cv.notify_one();
+  });
+  cv.wait(lock, [&done] { return done; });
+}
+
+uint64_t ThreadTaskRunner::GetThreadCPUTimeNsForTesting() {
+  uint64_t thread_time_ns = 0;
+  PostTaskAndWaitForTesting([&thread_time_ns] {
+    thread_time_ns = static_cast<uint64_t>(base::GetThreadCPUTimeNs().count());
+  });
+  return thread_time_ns;
+}
+
+void ThreadTaskRunner::PostTask(std::function<void()> task) {
+  task_runner_->PostTask(std::move(task));
+}
+
+void ThreadTaskRunner::PostDelayedTask(std::function<void()> task,
+                                       uint32_t delay_ms) {
+  task_runner_->PostDelayedTask(std::move(task), delay_ms);
+}
+
+void ThreadTaskRunner::AddFileDescriptorWatch(
+    PlatformHandle handle,
+    std::function<void()> watch_task) {
+  task_runner_->AddFileDescriptorWatch(handle, std::move(watch_task));
+}
+
+void ThreadTaskRunner::RemoveFileDescriptorWatch(PlatformHandle handle) {
+  task_runner_->RemoveFileDescriptorWatch(handle);
+}
+
+bool ThreadTaskRunner::RunsTasksOnCurrentThread() const {
+  return task_runner_->RunsTasksOnCurrentThread();
+}
+
+}  // namespace base
+}  // namespace perfetto
+// gen_amalgamated begin source: src/base/unix_task_runner.cc
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
+
+// gen_amalgamated expanded: #include "perfetto/ext/base/unix_task_runner.h"
+
+#include <errno.h>
+#include <stdlib.h>
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+#include <Windows.h>
+#include <synchapi.h>
+#else
+#include <unistd.h>
+#endif
+
+#include <algorithm>
+#include <limits>
+
+// gen_amalgamated expanded: #include "perfetto/ext/base/watchdog.h"
+
+namespace perfetto {
+namespace base {
+
+UnixTaskRunner::UnixTaskRunner() {
+  AddFileDescriptorWatch(event_.fd(), [] {
+    // Not reached -- see PostFileDescriptorWatches().
+    PERFETTO_DFATAL("Should be unreachable.");
+  });
+}
+
+UnixTaskRunner::~UnixTaskRunner() = default;
+
+void UnixTaskRunner::WakeUp() {
+  event_.Notify();
+}
+
+void UnixTaskRunner::Run() {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  created_thread_id_ = GetThreadId();
+  quit_ = false;
+  for (;;) {
+    int poll_timeout_ms;
+    {
+      std::lock_guard<std::mutex> lock(lock_);
+      if (quit_)
+        return;
+      poll_timeout_ms = GetDelayMsToNextTaskLocked();
+      UpdateWatchTasksLocked();
+    }
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+    DWORD timeout =
+        poll_timeout_ms >= 0 ? static_cast<DWORD>(poll_timeout_ms) : INFINITE;
+    DWORD ret =
+        WaitForMultipleObjects(static_cast<DWORD>(poll_fds_.size()),
+                               &poll_fds_[0], /*bWaitAll=*/false, timeout);
+    // Unlike poll(2), WaitForMultipleObjects() returns only *one* handle in the
+    // set, even when >1 is signalled. In order to avoid starvation,
+    // PostFileDescriptorWatches() will WaitForSingleObject() each other handle
+    // to ensure fairness. |ret| here is passed just to avoid an extra
+    // WaitForSingleObject() for the one handle that WaitForMultipleObject()
+    // returned.
+    PostFileDescriptorWatches(ret);
+#else
+    int ret = PERFETTO_EINTR(poll(
+        &poll_fds_[0], static_cast<nfds_t>(poll_fds_.size()), poll_timeout_ms));
+    PERFETTO_CHECK(ret >= 0);
+    PostFileDescriptorWatches(0 /*ignored*/);
+#endif
+
+    // To avoid starvation we always interleave all types of tasks -- immediate,
+    // delayed and file descriptor watches.
+    RunImmediateAndDelayedTask();
+  }
+}
+
+void UnixTaskRunner::Quit() {
+  std::lock_guard<std::mutex> lock(lock_);
+  quit_ = true;
+  WakeUp();
+}
+
+bool UnixTaskRunner::QuitCalled() {
+  std::lock_guard<std::mutex> lock(lock_);
+  return quit_;
+}
+
+bool UnixTaskRunner::IsIdleForTesting() {
+  std::lock_guard<std::mutex> lock(lock_);
+  return immediate_tasks_.empty();
+}
+
+void UnixTaskRunner::UpdateWatchTasksLocked() {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+  if (!watch_tasks_changed_)
+    return;
+  watch_tasks_changed_ = false;
+#endif
+  poll_fds_.clear();
+  for (auto& it : watch_tasks_) {
+    PlatformHandle handle = it.first;
+    WatchTask& watch_task = it.second;
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+    if (!watch_task.pending)
+      poll_fds_.push_back(handle);
+#else
+    watch_task.poll_fd_index = poll_fds_.size();
+    poll_fds_.push_back({handle, POLLIN | POLLHUP, 0});
+#endif
+  }
+}
+
+void UnixTaskRunner::RunImmediateAndDelayedTask() {
+  // If locking overhead becomes an issue, add a separate work queue.
+  std::function<void()> immediate_task;
+  std::function<void()> delayed_task;
+  TimeMillis now = GetWallTimeMs();
+  {
+    std::lock_guard<std::mutex> lock(lock_);
+    if (!immediate_tasks_.empty()) {
+      immediate_task = std::move(immediate_tasks_.front());
+      immediate_tasks_.pop_front();
+    }
+    if (!delayed_tasks_.empty()) {
+      auto it = delayed_tasks_.begin();
+      if (now >= it->first) {
+        delayed_task = std::move(it->second);
+        delayed_tasks_.erase(it);
+      }
+    }
+  }
+
+  errno = 0;
+  if (immediate_task)
+    RunTaskWithWatchdogGuard(immediate_task);
+  errno = 0;
+  if (delayed_task)
+    RunTaskWithWatchdogGuard(delayed_task);
+}
+
+void UnixTaskRunner::PostFileDescriptorWatches(uint64_t windows_wait_result) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  for (size_t i = 0; i < poll_fds_.size(); i++) {
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+    const PlatformHandle handle = poll_fds_[i];
+    // |windows_wait_result| is the result of WaitForMultipleObjects() call. If
+    // one of the objects was signalled, it will have a value between
+    // [0, poll_fds_.size()].
+    if (i != windows_wait_result &&
+        WaitForSingleObject(handle, 0) != WAIT_OBJECT_0) {
+      continue;
+    }
+#else
+    base::ignore_result(windows_wait_result);
+    const PlatformHandle handle = poll_fds_[i].fd;
+    if (!(poll_fds_[i].revents & (POLLIN | POLLHUP)))
+      continue;
+    poll_fds_[i].revents = 0;
+#endif
+
+    // The wake-up event is handled inline to avoid an infinite recursion of
+    // posted tasks.
+    if (handle == event_.fd()) {
+      event_.Clear();
+      continue;
+    }
+
+    // Binding to |this| is safe since we are the only object executing the
+    // task.
+    PostTask(std::bind(&UnixTaskRunner::RunFileDescriptorWatch, this, handle));
+
+    // Flag the task as pending.
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+    // On Windows this is done by marking the WatchTask entry as pending. This
+    // is more expensive than Linux as requires rebuilding the |poll_fds_|
+    // vector on each call. There doesn't seem to be a good alternative though.
+    auto it = watch_tasks_.find(handle);
+    PERFETTO_CHECK(it != watch_tasks_.end());
+    PERFETTO_DCHECK(!it->second.pending);
+    it->second.pending = true;
+#else
+    // On UNIX systems instead, we just make the fd negative while its task is
+    // pending. This makes poll(2) ignore the fd.
+    PERFETTO_DCHECK(poll_fds_[i].fd >= 0);
+    poll_fds_[i].fd = -poll_fds_[i].fd;
+#endif
+  }
+}
+
+void UnixTaskRunner::RunFileDescriptorWatch(PlatformHandle fd) {
+  std::function<void()> task;
+  {
+    std::lock_guard<std::mutex> lock(lock_);
+    auto it = watch_tasks_.find(fd);
+    if (it == watch_tasks_.end())
+      return;
+    WatchTask& watch_task = it->second;
+
+    // Make poll(2) pay attention to the fd again. Since another thread may have
+    // updated this watch we need to refresh the set first.
+    UpdateWatchTasksLocked();
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+    // On Windows we manually track the presence of outstanding tasks for the
+    // watch. The UpdateWatchTasksLocked() in the Run() loop will re-add the
+    // task to the |poll_fds_| vector.
+    PERFETTO_DCHECK(watch_task.pending);
+    watch_task.pending = false;
+#else
+    size_t fd_index = watch_task.poll_fd_index;
+    PERFETTO_DCHECK(fd_index < poll_fds_.size());
+    PERFETTO_DCHECK(::abs(poll_fds_[fd_index].fd) == fd);
+    poll_fds_[fd_index].fd = fd;
+#endif
+    task = watch_task.callback;
+  }
+  errno = 0;
+  RunTaskWithWatchdogGuard(task);
+}
+
+int UnixTaskRunner::GetDelayMsToNextTaskLocked() const {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  if (!immediate_tasks_.empty())
+    return 0;
+  if (!delayed_tasks_.empty()) {
+    TimeMillis diff = delayed_tasks_.begin()->first - GetWallTimeMs();
+    return std::max(0, static_cast<int>(diff.count()));
+  }
+  return -1;
+}
+
+void UnixTaskRunner::PostTask(std::function<void()> task) {
+  bool was_empty;
+  {
+    std::lock_guard<std::mutex> lock(lock_);
+    was_empty = immediate_tasks_.empty();
+    immediate_tasks_.push_back(std::move(task));
+  }
+  if (was_empty)
+    WakeUp();
+}
+
+void UnixTaskRunner::PostDelayedTask(std::function<void()> task,
+                                     uint32_t delay_ms) {
+  TimeMillis runtime = GetWallTimeMs() + TimeMillis(delay_ms);
+  {
+    std::lock_guard<std::mutex> lock(lock_);
+    delayed_tasks_.insert(std::make_pair(runtime, std::move(task)));
+  }
+  WakeUp();
+}
+
+void UnixTaskRunner::AddFileDescriptorWatch(PlatformHandle fd,
+                                            std::function<void()> task) {
+  PERFETTO_DCHECK(PlatformHandleChecker::IsValid(fd));
+  {
+    std::lock_guard<std::mutex> lock(lock_);
+    PERFETTO_DCHECK(!watch_tasks_.count(fd));
+    WatchTask& watch_task = watch_tasks_[fd];
+    watch_task.callback = std::move(task);
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+    watch_task.pending = false;
+#else
+    watch_task.poll_fd_index = SIZE_MAX;
+#endif
+    watch_tasks_changed_ = true;
+  }
+  WakeUp();
+}
+
+void UnixTaskRunner::RemoveFileDescriptorWatch(PlatformHandle fd) {
+  PERFETTO_DCHECK(PlatformHandleChecker::IsValid(fd));
+  {
+    std::lock_guard<std::mutex> lock(lock_);
+    PERFETTO_DCHECK(watch_tasks_.count(fd));
+    watch_tasks_.erase(fd);
+    watch_tasks_changed_ = true;
+  }
+  // No need to schedule a wake-up for this.
+}
+
+bool UnixTaskRunner::RunsTasksOnCurrentThread() const {
+  return GetThreadId() == created_thread_id_;
+}
+
+}  // namespace base
+}  // namespace perfetto
+// gen_amalgamated begin source: src/base/subprocess.cc
+// gen_amalgamated begin header: include/perfetto/ext/base/subprocess.h
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_BASE_SUBPROCESS_H_
+#define INCLUDE_PERFETTO_EXT_BASE_SUBPROCESS_H_
+
+#include <condition_variable>
+#include <functional>
+#include <initializer_list>
+#include <mutex>
+#include <string>
+#include <thread>
+#include <vector>
+
+// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/base/platform_handle.h"
+// gen_amalgamated expanded: #include "perfetto/base/proc_utils.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/event_fd.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/pipe.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
+
+namespace perfetto {
+namespace base {
+
+// Handles creation and lifecycle management of subprocesses, taking care of
+// all subtleties involved in handling processes on UNIX.
+// This class allows to deal with macro two use-cases:
+// 1) fork() + exec() equivalent: for spawning a brand new process image.
+//    This happens when |args.exec_cmd| is not empty.
+//    This is safe to use even in a multi-threaded environment.
+// 2) fork(): for spawning a process and running a function.
+//    This happens when |args.posix_entrypoint_for_testing| is not empty.
+//    This is intended only for tests as it is extremely subtle.
+//    This mode must be used with extreme care. Before the entrypoint is
+//    invoked all file descriptors other than stdin/out/err and the ones
+//    specified in |args.preserve_fds| will be closed, to avoid each process
+//    retaining a dupe of other subprocesses pipes. This however means that
+//    any non trivial calls (including logging) must be avoided as they might
+//    refer to FDs that are now closed. The entrypoint should really be used
+//    just to signal a pipe or similar for synchronizing sequencing in tests.
+
+//
+// This class allows to control stdin/out/err pipe redirection and takes care
+// of keeping all the pipes pumped (stdin) / drained (stdout/err), in a similar
+// fashion of python's subprocess.Communicate()
+// stdin: is always piped and closed once the |args.input| buffer is written.
+// stdout/err can be either:
+//   - dup()ed onto the parent process stdout/err.
+//   - redirected onto /dev/null.
+//   - piped onto a buffer (see output() method). There is only one output
+//     buffer in total. If both stdout and stderr are set to kBuffer mode, they
+//     will be merged onto the same. There doesn't seem any use case where they
+//     are needed distinctly.
+//
+// Some caveats worth mentioning:
+// - It always waitpid()s, to avoid leaving zombies around. If the process is
+//   not terminated by the time the destructor is reached, the dtor will
+//   send a SIGKILL and wait for the termination.
+// - After fork()-ing it will close all file descriptors, preserving only
+//   stdin/out/err and the fds listed in |args.preserve_fds|.
+// - On Linux/Android, the child process will be SIGKILL-ed if the calling
+//   thread exists, even if the Subprocess is std::move()-d onto another thread.
+//   This happens by virtue PR_SET_PDEATHSIG, which is used to avoid that
+//   child processes are leaked in the case of a crash of the parent (frequent
+//   in tests). However, the child process might still be leaked if execing
+//   a setuid/setgid binary (see man 2 prctl).
+//
+// Usage:
+// base::Subprocess p({"/bin/cat", "-"});
+// (or equivalently:
+//     base::Subprocess p;
+//     p.args.exec_cmd.push_back("/bin/cat");
+//     p.args.exec_cmd.push_back("-");
+//  )
+// p.args.stdout_mode = base::Subprocess::kBuffer;
+// p.args.stderr_mode = base::Subprocess::kInherit;
+// p.args.input = "stdin contents";
+// p.Call();
+// (or equivalently:
+//     p.Start();
+//     p.Wait();
+// )
+// EXPECT_EQ(p.status(), base::Subprocess::kTerminated);
+// EXPECT_EQ(p.returncode(), 0);
+class Subprocess {
+ public:
+  enum Status {
+    kNotStarted = 0,  // Before calling Start() or Call().
+    kRunning,         // After calling Start(), before Wait().
+    kTerminated,      // The subprocess terminated, either successfully or not.
+                      // This includes crashes or other signals on UNIX.
+  };
+
+  enum OutputMode {
+    kInherit = 0,  // Inherit's the caller process stdout/stderr.
+    kDevNull,      // dup() onto /dev/null
+    kBuffer,       // dup() onto a pipe and move it into the output() buffer.
+    kFd,           // dup() onto the passed args.fd.
+  };
+
+  // Input arguments for configuring the subprocess behavior.
+  struct Args {
+    Args(std::initializer_list<std::string> _cmd = {}) : exec_cmd(_cmd) {}
+    Args(Args&&) noexcept;
+    Args& operator=(Args&&);
+    // If non-empty this will cause an exec() when Start()/Call() are called.
+    std::vector<std::string> exec_cmd;
+
+#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+    // If non-empty, it changes the argv[0] argument passed to exec. If
+    // unset, argv[0] == exec_cmd[0]. This is to handle cases like:
+    // exec_cmd = {"/proc/self/exec"}, argv0: "my_custom_test_override".
+    std::string posix_argv0_override_for_testing;
+
+    // If non-empty this will be invoked on the fork()-ed child process, after
+    // stdin/out/err has been redirected and all other file descriptor are
+    // closed. It is valid to specify both |exec_cmd| AND
+    // |posix_entrypoint_for_testing|. In this case the latter will be invoked
+    // just before the exec() call, but after having closed all fds % stdin/o/e.
+    // This is for synchronization barriers in tests.
+    std::function<void()> posix_entrypoint_for_testing;
+#endif
+
+    // If non-empty, replaces the environment passed to exec().
+    std::vector<std::string> env;
+
+    // The file descriptors in this list will not be closed.
+    std::vector<int> preserve_fds;
+
+    // The data to push in the child process stdin.
+    std::string input;
+
+    OutputMode stdout_mode = kInherit;
+    OutputMode stderr_mode = kInherit;
+
+    base::ScopedPlatformHandle out_fd;
+
+    // Returns " ".join(exec_cmd), quoting arguments.
+    std::string GetCmdString() const;
+  };
+
+  struct ResourceUsage {
+    uint32_t cpu_utime_ms = 0;
+    uint32_t cpu_stime_ms = 0;
+    uint32_t max_rss_kb = 0;
+    uint32_t min_page_faults = 0;
+    uint32_t maj_page_faults = 0;
+    uint32_t vol_ctx_switch = 0;
+    uint32_t invol_ctx_switch = 0;
+
+    uint32_t cpu_time_ms() const { return cpu_utime_ms + cpu_stime_ms; }
+  };
+
+  explicit Subprocess(std::initializer_list<std::string> exec_cmd = {});
+  Subprocess(Subprocess&&) noexcept;
+  Subprocess& operator=(Subprocess&&);
+  ~Subprocess();  // It will KillAndWaitForTermination() if still alive.
+
+  // Starts the subprocess but doesn't wait for its termination. The caller
+  // is expected to either call Wait() or Poll() after this call.
+  void Start();
+
+  // Wait for process termination. Can be called more than once.
+  // Args:
+  //   |timeout_ms| = 0: wait indefinitely.
+  //   |timeout_ms| > 0: wait for at most |timeout_ms|.
+  // Returns:
+  //  True: The process terminated. See status() and returncode().
+  //  False: Timeout reached, the process is still running. In this case the
+  //         process will be left in the kRunning state.
+  bool Wait(int timeout_ms = 0);
+
+  // Equivalent of Start() + Wait();
+  // Returns true if the process exited cleanly with return code 0. False in
+  // any othe case.
+  bool Call(int timeout_ms = 0);
+
+  Status Poll();
+
+  // Sends a signal (SIGKILL if not specified) and wait for process termination.
+  void KillAndWaitForTermination(int sig_num = 0);
+
+  PlatformProcessId pid() const { return s_->pid; }
+
+  // The accessors below are updated only after a call to Poll(), Wait() or
+  // KillAndWaitForTermination().
+  // In most cases you want to call Poll() rather than these accessors.
+
+  Status status() const { return s_->status; }
+  int returncode() const { return s_->returncode; }
+  bool timed_out() const { return s_->timed_out; }
+
+  // This contains both stdout and stderr (if the corresponding _mode ==
+  // kBuffer). It's non-const so the caller can std::move() it.
+  std::string& output() { return s_->output; }
+  const std::string& output() const { return s_->output; }
+
+  const ResourceUsage& posix_rusage() const { return *s_->rusage; }
+
+  Args args;
+
+ private:
+  // The signal/exit code used when killing the process in case of a timeout.
+  static const int kTimeoutSignal;
+
+  Subprocess(const Subprocess&) = delete;
+  Subprocess& operator=(const Subprocess&) = delete;
+
+  // This is to deal robustly with the move operators, without having to
+  // manually maintain member-wise move instructions.
+  struct MovableState {
+    base::Pipe stdin_pipe;
+    base::Pipe stdouterr_pipe;
+    PlatformProcessId pid;
+    Status status = kNotStarted;
+    int returncode = -1;
+    std::string output;  // Stdin+stderr. Only when kBuffer.
+    std::unique_ptr<ResourceUsage> rusage{new ResourceUsage()};
+    bool timed_out = false;
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+    std::thread stdouterr_thread;
+    std::thread stdin_thread;
+    ScopedPlatformHandle win_proc_handle;
+    ScopedPlatformHandle win_thread_handle;
+
+    base::EventFd stdouterr_done_event;
+    std::mutex mutex;  // Protects locked_outerr_buf and the two pipes.
+    std::string locked_outerr_buf;
+#else
+    base::Pipe exit_status_pipe;
+    size_t input_written = 0;
+    std::thread waitpid_thread;
+#endif
+  };
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+  static void StdinThread(MovableState*, std::string input);
+  static void StdoutErrThread(MovableState*);
+#else
+  void TryPushStdin();
+  void TryReadStdoutAndErr();
+  void TryReadExitStatus();
+  void KillAtMostOnce();
+  bool PollInternal(int poll_timeout_ms);
+#endif
+
+  std::unique_ptr<MovableState> s_;
+};
+
+}  // namespace base
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_BASE_SUBPROCESS_H_
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/ext/base/subprocess.h"
+
+#include <tuple>
+
+// This file contains only the common bits (ctors / dtors / move operators).
+// The rest lives in subprocess_posix.cc and subprocess_windows.cc.
+
+namespace perfetto {
+namespace base {
+
+Subprocess::Args::Args(Args&&) noexcept = default;
+Subprocess::Args& Subprocess::Args::operator=(Args&&) = default;
+
+Subprocess::Subprocess(std::initializer_list<std::string> a)
+    : args(a), s_(new MovableState()) {}
+
+Subprocess::Subprocess(Subprocess&& other) noexcept {
+  static_assert(sizeof(Subprocess) ==
+                    sizeof(std::tuple<std::unique_ptr<MovableState>, Args>),
+                "base::Subprocess' move ctor needs updating");
+  s_ = std::move(other.s_);
+  args = std::move(other.args);
+
+  // Reset the state of the moved-from object.
+  other.s_.reset(new MovableState());
+  other.~Subprocess();
+  new (&other) Subprocess();
+}
+
+Subprocess& Subprocess::operator=(Subprocess&& other) {
+  this->~Subprocess();
+  new (this) Subprocess(std::move(other));
+  return *this;
+}
+
+Subprocess::~Subprocess() {
+  if (s_->status == kRunning)
+    KillAndWaitForTermination();
+}
+
+bool Subprocess::Call(int timeout_ms) {
+  PERFETTO_CHECK(s_->status == kNotStarted);
+  Start();
+
+  if (!Wait(timeout_ms)) {
+    s_->timed_out = true;
+    KillAndWaitForTermination(kTimeoutSignal);
+  }
+  PERFETTO_DCHECK(s_->status != kRunning);
+  return s_->status == kTerminated && s_->returncode == 0;
+}
+
+std::string Subprocess::Args::GetCmdString() const {
+  std::string str;
+  for (size_t i = 0; i < exec_cmd.size(); i++) {
+    str += i > 0 ? " \"" : "";
+    str += exec_cmd[i];
+    str += i > 0 ? "\"" : "";
+  }
+  return str;
+}
+
+}  // namespace base
+}  // namespace perfetto
+// gen_amalgamated begin source: src/base/subprocess_posix.cc
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/ext/base/subprocess.h"
+
+// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) ||   \
+    PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) || \
+    PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
+
+#include <fcntl.h>
+#include <poll.h>
+#include <signal.h>
+#include <stdio.h>
+#include <sys/resource.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include <algorithm>
+#include <thread>
+#include <tuple>
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
+    PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
+#include <sys/prctl.h>
+#endif
+
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/base/time.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
+
+// In MacOS this is not defined in any header.
+extern "C" char** environ;
+
+namespace perfetto {
+namespace base {
+
+namespace {
+
+struct ChildProcessArgs {
+  Subprocess::Args* create_args;
+  const char* exec_cmd = nullptr;
+  std::vector<char*> argv;
+  std::vector<char*> env;
+  int stdin_pipe_rd = -1;
+  int stdouterr_pipe_wr = -1;
+};
+
+// Don't add any dynamic allocation in this function. This will be invoked
+// under a fork(), potentially in a state where the allocator lock is held.
+void __attribute__((noreturn)) ChildProcess(ChildProcessArgs* args) {
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
+    PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
+  // In no case we want a child process to outlive its parent process. This is
+  // relevant for tests, so that a test failure/crash doesn't leave child
+  // processes around that get reparented to init.
+  prctl(PR_SET_PDEATHSIG, SIGKILL);
+#endif
+
+  auto die = [args](const char* err) __attribute__((noreturn)) {
+    base::ignore_result(write(args->stdouterr_pipe_wr, err, strlen(err)));
+    base::ignore_result(write(args->stdouterr_pipe_wr, "\n", 1));
+    // From https://www.gnu.org/software/libc/manual/html_node/Exit-Status.html
+    // "In particular, the value 128 is used to indicate failure to execute
+    // another program in a subprocess. This convention is not universally
+    // obeyed, but it is a good idea to follow it in your programs."
+    _exit(128);
+  };
+
+  auto set_fd_close_on_exec = [&die](int fd, bool close_on_exec) {
+    int flags = fcntl(fd, F_GETFD, 0);
+    if (flags < 0)
+      die("fcntl(F_GETFD) failed");
+    flags = close_on_exec ? (flags | FD_CLOEXEC) : (flags & ~FD_CLOEXEC);
+    if (fcntl(fd, F_SETFD, flags) < 0)
+      die("fcntl(F_SETFD) failed");
+  };
+
+  if (getppid() == 1)
+    die("terminating because parent process died");
+
+  if (dup2(args->stdin_pipe_rd, STDIN_FILENO) == -1)
+    die("Failed to dup2(STDIN)");
+  close(args->stdin_pipe_rd);
+
+  switch (args->create_args->stdout_mode) {
+    case Subprocess::kInherit:
+      break;
+    case Subprocess::kDevNull: {
+      if (dup2(open("/dev/null", O_RDWR), STDOUT_FILENO) == -1)
+        die("Failed to dup2(STDOUT)");
+      break;
+    }
+    case Subprocess::kBuffer:
+      if (dup2(args->stdouterr_pipe_wr, STDOUT_FILENO) == -1)
+        die("Failed to dup2(STDOUT)");
+      break;
+    case Subprocess::kFd:
+      if (dup2(*args->create_args->out_fd, STDOUT_FILENO) == -1)
+        die("Failed to dup2(STDOUT)");
+      break;
+  }
+
+  switch (args->create_args->stderr_mode) {
+    case Subprocess::kInherit:
+      break;
+    case Subprocess::kDevNull: {
+      if (dup2(open("/dev/null", O_RDWR), STDERR_FILENO) == -1)
+        die("Failed to dup2(STDERR)");
+      break;
+    }
+    case Subprocess::kBuffer:
+      if (dup2(args->stdouterr_pipe_wr, STDERR_FILENO) == -1)
+        die("Failed to dup2(STDERR)");
+      break;
+    case Subprocess::kFd:
+      if (dup2(*args->create_args->out_fd, STDERR_FILENO) == -1)
+        die("Failed to dup2(STDERR)");
+      break;
+  }
+
+  // Close all FDs % stdin/out/err and the ones that the client explicitly
+  // asked to retain. The reason for this is twofold:
+  // 1. For exec-only (i.e. entrypoint == empty) cases: it avoids leaking FDs
+  //    that didn't get marked as O_CLOEXEC by accident.
+  // 2. In fork() mode (entrypoint not empty) avoids retaining a dup of eventfds
+  //    that would prevent the parent process to receive EOFs (tests usually use
+  //    pipes as a synchronization mechanism between subprocesses).
+  const auto& preserve_fds = args->create_args->preserve_fds;
+  for (int i = 0; i < 512; i++) {
+    if (i != STDIN_FILENO && i != STDERR_FILENO && i != STDOUT_FILENO &&
+        i != args->stdouterr_pipe_wr &&
+        !std::count(preserve_fds.begin(), preserve_fds.end(), i)) {
+      close(i);
+    }
+  }
+
+  // Clears O_CLOEXEC from stdin/out/err. These are the only FDs that we want
+  // to be preserved after the exec().
+  set_fd_close_on_exec(STDIN_FILENO, false);
+  set_fd_close_on_exec(STDOUT_FILENO, false);
+  set_fd_close_on_exec(STDERR_FILENO, false);
+
+  // If the caller specified a std::function entrypoint, run that first.
+  if (args->create_args->posix_entrypoint_for_testing)
+    args->create_args->posix_entrypoint_for_testing();
+
+  // If the caller specified only an entrypoint, without any args, exit now.
+  // Otherwise proceed with the exec() below.
+  if (!args->exec_cmd)
+    _exit(0);
+
+  // If |args[0]| is a path use execv() (which takes a path), othewise use
+  // exevp(), which uses the shell and follows PATH.
+  if (strchr(args->exec_cmd, '/')) {
+    char** env = args->env.empty() ? environ : args->env.data();
+    execve(args->exec_cmd, args->argv.data(), env);
+  } else {
+    // There is no execvpe() on Mac.
+    if (!args->env.empty())
+      die("A full path is required for |exec_cmd| when setting |env|");
+    execvp(args->exec_cmd, args->argv.data());
+  }
+
+  // Reached only if execv fails.
+  die("execve() failed");
+}
+
+}  // namespace
+
+// static
+const int Subprocess::kTimeoutSignal = SIGKILL;
+
+void Subprocess::Start() {
+  ChildProcessArgs proc_args;
+  proc_args.create_args = &args;
+
+  // Setup argv.
+  if (!args.exec_cmd.empty()) {
+    proc_args.exec_cmd = args.exec_cmd[0].c_str();
+    for (const std::string& arg : args.exec_cmd)
+      proc_args.argv.push_back(const_cast<char*>(arg.c_str()));
+    proc_args.argv.push_back(nullptr);
+
+    if (!args.posix_argv0_override_for_testing.empty()) {
+      proc_args.argv[0] =
+          const_cast<char*>(args.posix_argv0_override_for_testing.c_str());
+    }
+  }
+
+  // Setup env.
+  if (!args.env.empty()) {
+    for (const std::string& str : args.env)
+      proc_args.env.push_back(const_cast<char*>(str.c_str()));
+    proc_args.env.push_back(nullptr);
+  }
+
+  // Setup the pipes for stdin/err redirection.
+  s_->stdin_pipe = base::Pipe::Create(base::Pipe::kWrNonBlock);
+  proc_args.stdin_pipe_rd = *s_->stdin_pipe.rd;
+  s_->stdouterr_pipe = base::Pipe::Create(base::Pipe::kRdNonBlock);
+  proc_args.stdouterr_pipe_wr = *s_->stdouterr_pipe.wr;
+
+  // Spawn the child process that will exec().
+  s_->pid = fork();
+  PERFETTO_CHECK(s_->pid >= 0);
+  if (s_->pid == 0) {
+    // Close the parent-ends of the pipes.
+    s_->stdin_pipe.wr.reset();
+    s_->stdouterr_pipe.rd.reset();
+    ChildProcess(&proc_args);
+    // ChildProcess() doesn't return, not even in case of failures.
+    PERFETTO_FATAL("not reached");
+  }
+
+  s_->status = kRunning;
+
+  // Close the child-end of the pipes.
+  // Deliberately NOT closing the s_->stdin_pipe.rd. This is to avoid crashing
+  // with a SIGPIPE if the process exits without consuming its stdin, while
+  // the parent tries to write() on the other end of the stdin pipe.
+  s_->stdouterr_pipe.wr.reset();
+  proc_args.create_args->out_fd.reset();
+
+  // Spawn a thread that is blocked on waitpid() and writes the termination
+  // status onto a pipe. The problem here is that waipid() doesn't have a
+  // timeout option and can't be passed to poll(). The alternative would be
+  // using a SIGCHLD handler, but anecdotally signal handlers introduce more
+  // problems than what they solve.
+  s_->exit_status_pipe = base::Pipe::Create(base::Pipe::kRdNonBlock);
+
+  // Both ends of the pipe are closed after the thread.join().
+  int pid = s_->pid;
+  int exit_status_pipe_wr = s_->exit_status_pipe.wr.release();
+  auto* rusage = s_->rusage.get();
+  s_->waitpid_thread = std::thread([pid, exit_status_pipe_wr, rusage] {
+    int pid_stat = -1;
+    struct rusage usg {};
+    int wait_res = PERFETTO_EINTR(wait4(pid, &pid_stat, 0, &usg));
+    PERFETTO_CHECK(wait_res == pid);
+
+    auto tv_to_ms = [](const struct timeval& tv) {
+      return static_cast<uint32_t>(tv.tv_sec * 1000 + tv.tv_usec / 1000);
+    };
+    rusage->cpu_utime_ms = tv_to_ms(usg.ru_utime);
+    rusage->cpu_stime_ms = tv_to_ms(usg.ru_stime);
+    rusage->max_rss_kb = static_cast<uint32_t>(usg.ru_maxrss) / 1000;
+    rusage->min_page_faults = static_cast<uint32_t>(usg.ru_minflt);
+    rusage->maj_page_faults = static_cast<uint32_t>(usg.ru_majflt);
+    rusage->vol_ctx_switch = static_cast<uint32_t>(usg.ru_nvcsw);
+    rusage->invol_ctx_switch = static_cast<uint32_t>(usg.ru_nivcsw);
+
+    base::ignore_result(PERFETTO_EINTR(
+        write(exit_status_pipe_wr, &pid_stat, sizeof(pid_stat))));
+    PERFETTO_CHECK(close(exit_status_pipe_wr) == 0 || errno == EINTR);
+  });
+}
+
+Subprocess::Status Subprocess::Poll() {
+  if (s_->status != kRunning)
+    return s_->status;  // Nothing to poll.
+  while (PollInternal(0 /* don't block*/)) {
+  }
+  return s_->status;
+}
+
+// |timeout_ms| semantic:
+//   -1: Block indefinitely.
+//    0: Don't block, return immediately.
+//   >0: Block for at most X ms.
+// Returns:
+//  True: Read at least one fd (so there might be more queued).
+//  False: if all fds reached quiescent (no data to read/write).
+bool Subprocess::PollInternal(int poll_timeout_ms) {
+  struct pollfd fds[3]{};
+  size_t num_fds = 0;
+  if (s_->exit_status_pipe.rd) {
+    fds[num_fds].fd = *s_->exit_status_pipe.rd;
+    fds[num_fds].events = POLLIN;
+    num_fds++;
+  }
+  if (s_->stdouterr_pipe.rd) {
+    fds[num_fds].fd = *s_->stdouterr_pipe.rd;
+    fds[num_fds].events = POLLIN;
+    num_fds++;
+  }
+  if (s_->stdin_pipe.wr) {
+    fds[num_fds].fd = *s_->stdin_pipe.wr;
+    fds[num_fds].events = POLLOUT;
+    num_fds++;
+  }
+
+  if (num_fds == 0)
+    return false;
+
+  auto nfds = static_cast<nfds_t>(num_fds);
+  int poll_res = PERFETTO_EINTR(poll(fds, nfds, poll_timeout_ms));
+  PERFETTO_CHECK(poll_res >= 0);
+
+  TryReadStdoutAndErr();
+  TryPushStdin();
+  TryReadExitStatus();
+
+  return poll_res > 0;
+}
+
+bool Subprocess::Wait(int timeout_ms) {
+  PERFETTO_CHECK(s_->status != kNotStarted);
+
+  // Break out of the loop only after both conditions are satisfied:
+  // - All stdout/stderr data has been read (if kBuffer).
+  // - The process exited.
+  // Note that the two events can happen arbitrary order. After the process
+  // exits, there might be still data in the pipe buffer, which we want to
+  // read fully.
+  //
+  // Instead, don't wait on the stdin to be fully written. The child process
+  // might exit prematurely (or crash). If that happens, we can end up in a
+  // state where the write(stdin_pipe_.wr) will never unblock.
+
+  const int64_t t_start = base::GetWallTimeMs().count();
+  while (s_->exit_status_pipe.rd || s_->stdouterr_pipe.rd) {
+    int poll_timeout_ms = -1;  // Block until a FD is ready.
+    if (timeout_ms > 0) {
+      const int64_t now = GetWallTimeMs().count();
+      poll_timeout_ms = timeout_ms - static_cast<int>(now - t_start);
+      if (poll_timeout_ms <= 0)
+        return false;
+    }
+    PollInternal(poll_timeout_ms);
+  }  // while(...)
+  return true;
+}
+
+void Subprocess::TryReadExitStatus() {
+  if (!s_->exit_status_pipe.rd)
+    return;
+
+  int pid_stat = -1;
+  int64_t rsize = PERFETTO_EINTR(
+      read(*s_->exit_status_pipe.rd, &pid_stat, sizeof(pid_stat)));
+  if (rsize < 0 && errno == EAGAIN)
+    return;
+
+  if (rsize > 0) {
+    PERFETTO_CHECK(rsize == sizeof(pid_stat));
+  } else if (rsize < 0) {
+    PERFETTO_PLOG("Subprocess read(s_->exit_status_pipe) failed");
+  }
+  s_->waitpid_thread.join();
+  s_->exit_status_pipe.rd.reset();
+
+  s_->status = kTerminated;
+  if (WIFEXITED(pid_stat)) {
+    s_->returncode = WEXITSTATUS(pid_stat);
+  } else if (WIFSIGNALED(pid_stat)) {
+    s_->returncode = 128 + WTERMSIG(pid_stat);  // Follow bash convention.
+  } else {
+    PERFETTO_FATAL("waitpid() returned an unexpected value (0x%x)", pid_stat);
+  }
+}
+
+// If the stidn pipe is still open, push input data and close it at the end.
+void Subprocess::TryPushStdin() {
+  if (!s_->stdin_pipe.wr)
+    return;
+
+  PERFETTO_DCHECK(args.input.empty() || s_->input_written < args.input.size());
+  if (!args.input.empty()) {
+    int64_t wsize =
+        PERFETTO_EINTR(write(*s_->stdin_pipe.wr, &args.input[s_->input_written],
+                             args.input.size() - s_->input_written));
+    if (wsize < 0 && errno == EAGAIN)
+      return;
+
+    if (wsize >= 0) {
+      // Whether write() can return 0 is one of the greatest mysteries of UNIX.
+      // Just ignore it.
+      s_->input_written += static_cast<size_t>(wsize);
+    } else {
+      PERFETTO_PLOG("Subprocess write(stdin) failed");
+      s_->stdin_pipe.wr.reset();
+    }
+  }
+  PERFETTO_DCHECK(s_->input_written <= args.input.size());
+  if (s_->input_written == args.input.size())
+    s_->stdin_pipe.wr.reset();  // Close stdin.
+}
+
+void Subprocess::TryReadStdoutAndErr() {
+  if (!s_->stdouterr_pipe.rd)
+    return;
+  char buf[4096];
+  int64_t rsize =
+      PERFETTO_EINTR(read(*s_->stdouterr_pipe.rd, buf, sizeof(buf)));
+  if (rsize < 0 && errno == EAGAIN)
+    return;
+
+  if (rsize > 0) {
+    s_->output.append(buf, static_cast<size_t>(rsize));
+  } else if (rsize == 0 /* EOF */) {
+    s_->stdouterr_pipe.rd.reset();
+  } else {
+    PERFETTO_PLOG("Subprocess read(stdout/err) failed");
+    s_->stdouterr_pipe.rd.reset();
+  }
+}
+
+void Subprocess::KillAndWaitForTermination(int sig_num) {
+  kill(s_->pid, sig_num ? sig_num : SIGKILL);
+  Wait();
+  // TryReadExitStatus must have joined the thread.
+  PERFETTO_DCHECK(!s_->waitpid_thread.joinable());
+}
+
+}  // namespace base
+}  // namespace perfetto
+
+#endif  // PERFETTO_OS_LINUX || PERFETTO_OS_ANDROID || PERFETTO_OS_APPLE
+// gen_amalgamated begin source: src/base/subprocess_windows.cc
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/ext/base/subprocess.h"
+
+// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+
+#include <stdio.h>
+
+#include <algorithm>
+#include <mutex>
+#include <tuple>
+
+#include <Windows.h>
+
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/base/time.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/pipe.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
+
+namespace perfetto {
+namespace base {
+
+// static
+const int Subprocess::kTimeoutSignal = static_cast<int>(STATUS_TIMEOUT);
+
+void Subprocess::Start() {
+  if (args.exec_cmd.empty()) {
+    PERFETTO_ELOG("Subprocess.exec_cmd cannot be empty on Windows");
+    return;
+  }
+
+  // Quote arguments but only when ambiguous. When quoting, CreateProcess()
+  // assumes that the command is an absolute path and does not search in the
+  // %PATH%. If non quoted, instead, CreateProcess() tries both. This is to
+  // allow Subprocess("cmd.exe", "/c", "shell command").
+  std::string cmd;
+  for (const auto& part : args.exec_cmd) {
+    if (part.find(" ") != std::string::npos) {
+      cmd += "\"" + part + "\" ";
+    } else {
+      cmd += part + " ";
+    }
+  }
+  // Remove trailing space.
+  if (!cmd.empty())
+    cmd.resize(cmd.size() - 1);
+
+  s_->stdin_pipe = Pipe::Create();
+  // Allow the child process to inherit the other end of the pipe.
+  PERFETTO_CHECK(
+      ::SetHandleInformation(*s_->stdin_pipe.rd, HANDLE_FLAG_INHERIT, 1));
+
+  if (args.stderr_mode == kBuffer || args.stdout_mode == kBuffer) {
+    s_->stdouterr_pipe = Pipe::Create();
+    PERFETTO_CHECK(
+        ::SetHandleInformation(*s_->stdouterr_pipe.wr, HANDLE_FLAG_INHERIT, 1));
+  }
+
+  ScopedPlatformHandle nul_handle;
+  if (args.stderr_mode == kDevNull || args.stdout_mode == kDevNull) {
+    nul_handle.reset(::CreateFileA("NUL", GENERIC_WRITE, FILE_SHARE_WRITE,
+                                   nullptr, OPEN_EXISTING,
+                                   FILE_ATTRIBUTE_NORMAL, nullptr));
+    PERFETTO_CHECK(::SetHandleInformation(*nul_handle, HANDLE_FLAG_INHERIT, 1));
+  }
+
+  PROCESS_INFORMATION proc_info{};
+  STARTUPINFOA start_info{};
+  start_info.cb = sizeof(STARTUPINFOA);
+
+  if (args.stderr_mode == kInherit) {
+    start_info.hStdError = ::GetStdHandle(STD_ERROR_HANDLE);
+  } else if (args.stderr_mode == kBuffer) {
+    start_info.hStdError = *s_->stdouterr_pipe.wr;
+  } else if (args.stderr_mode == kDevNull) {
+    start_info.hStdError = *nul_handle;
+  } else if (args.stderr_mode == kFd) {
+    PERFETTO_CHECK(
+        ::SetHandleInformation(*args.out_fd, HANDLE_FLAG_INHERIT, 1));
+    start_info.hStdError = *args.out_fd;
+  } else {
+    PERFETTO_CHECK(false);
+  }
+
+  if (args.stdout_mode == kInherit) {
+    start_info.hStdOutput = ::GetStdHandle(STD_OUTPUT_HANDLE);
+  } else if (args.stdout_mode == kBuffer) {
+    start_info.hStdOutput = *s_->stdouterr_pipe.wr;
+  } else if (args.stdout_mode == kDevNull) {
+    start_info.hStdOutput = *nul_handle;
+  } else if (args.stdout_mode == kFd) {
+    PERFETTO_CHECK(
+        ::SetHandleInformation(*args.out_fd, HANDLE_FLAG_INHERIT, 1));
+    start_info.hStdOutput = *args.out_fd;
+  } else {
+    PERFETTO_CHECK(false);
+  }
+
+  start_info.hStdInput = *s_->stdin_pipe.rd;
+  start_info.dwFlags |= STARTF_USESTDHANDLES;
+
+  // Create the child process.
+  bool success =
+      ::CreateProcessA(nullptr,      // App name. Needs to be null to use PATH.
+                       &cmd[0],      // Command line.
+                       nullptr,      // Process security attributes.
+                       nullptr,      // Primary thread security attributes.
+                       true,         // Handles are inherited.
+                       0,            // Flags.
+                       nullptr,      // Use parent's environment.
+                       nullptr,      // Use parent's current directory.
+                       &start_info,  // STARTUPINFO pointer.
+                       &proc_info);  // Receives PROCESS_INFORMATION.
+
+  // Close on our side the pipe ends that we passed to the child process.
+  s_->stdin_pipe.rd.reset();
+  s_->stdouterr_pipe.wr.reset();
+  args.out_fd.reset();
+
+  if (!success) {
+    s_->returncode = ERROR_FILE_NOT_FOUND;
+    s_->status = kTerminated;
+    s_->stdin_pipe.wr.reset();
+    s_->stdouterr_pipe.rd.reset();
+    PERFETTO_ELOG("CreateProcess failed: %lx, cmd: %s", GetLastError(),
+                  &cmd[0]);
+    return;
+  }
+
+  s_->pid = proc_info.dwProcessId;
+  s_->win_proc_handle = ScopedPlatformHandle(proc_info.hProcess);
+  s_->win_thread_handle = ScopedPlatformHandle(proc_info.hThread);
+  s_->status = kRunning;
+
+  MovableState* s = s_.get();
+  s_->stdin_thread = std::thread(&Subprocess::StdinThread, s, args.input);
+
+  if (args.stderr_mode == kBuffer || args.stdout_mode == kBuffer) {
+    PERFETTO_DCHECK(s_->stdouterr_pipe.rd);
+    s_->stdouterr_thread = std::thread(&Subprocess::StdoutErrThread, s);
+  }
+}
+
+// static
+void Subprocess::StdinThread(MovableState* s, std::string input) {
+  size_t input_written = 0;
+  while (input_written < input.size()) {
+    DWORD wsize = 0;
+    if (::WriteFile(*s->stdin_pipe.wr, input.data() + input_written,
+                    static_cast<DWORD>(input.size() - input_written), &wsize,
+                    nullptr)) {
+      input_written += wsize;
+    } else {
+      // ERROR_BROKEN_PIPE is WAI when the child just closes stdin and stops
+      // accepting input.
+      auto err = ::GetLastError();
+      if (err != ERROR_BROKEN_PIPE)
+        PERFETTO_PLOG("Subprocess WriteFile(stdin) failed %lx", err);
+      break;
+    }
+  }  // while(...)
+  std::unique_lock<std::mutex> lock(s->mutex);
+  s->stdin_pipe.wr.reset();
+}
+
+// static
+void Subprocess::StdoutErrThread(MovableState* s) {
+  char buf[4096];
+  for (;;) {
+    DWORD rsize = 0;
+    bool res =
+        ::ReadFile(*s->stdouterr_pipe.rd, buf, sizeof(buf), &rsize, nullptr);
+    if (!res) {
+      auto err = GetLastError();
+      if (err != ERROR_BROKEN_PIPE)
+        PERFETTO_PLOG("Subprocess ReadFile(stdouterr) failed %ld", err);
+    }
+
+    if (rsize > 0) {
+      std::unique_lock<std::mutex> lock(s->mutex);
+      s->locked_outerr_buf.append(buf, static_cast<size_t>(rsize));
+    } else {  // EOF or some error.
+      break;
+    }
+  }  // For(..)
+
+  // Close the stdouterr_pipe. The main loop looks at the pipe closure to
+  // determine whether the stdout/err thread has completed.
+  {
+    std::unique_lock<std::mutex> lock(s->mutex);
+    s->stdouterr_pipe.rd.reset();
+  }
+  s->stdouterr_done_event.Notify();
+}
+
+Subprocess::Status Subprocess::Poll() {
+  if (s_->status != kRunning)
+    return s_->status;  // Nothing to poll.
+  Wait(1 /*ms*/);
+  return s_->status;
+}
+
+bool Subprocess::Wait(int timeout_ms) {
+  PERFETTO_CHECK(s_->status != kNotStarted);
+  const bool wait_forever = timeout_ms == 0;
+  const int64_t wait_start_ms = base::GetWallTimeMs().count();
+
+  // Break out of the loop only after both conditions are satisfied:
+  // - All stdout/stderr data has been read (if kBuffer).
+  // - The process exited.
+  // Note that the two events can happen arbitrary order. After the process
+  // exits, there might be still data in the pipe buffer, which we want to
+  // read fully.
+  // Note also that stdout/err might be "complete" before starting, if neither
+  // is operating in kBuffer mode. In that case we just want to wait for the
+  // process termination.
+  //
+  // Instead, don't wait on the stdin to be fully written. The child process
+  // might exit prematurely (or crash). If that happens, we can end up in a
+  // state where the write(stdin_pipe_.wr) will never unblock.
+  bool stdouterr_complete = false;
+  for (;;) {
+    HANDLE wait_handles[2]{};
+    DWORD num_handles = 0;
+
+    // Check if the process exited.
+    bool process_exited = !s_->win_proc_handle;
+    if (!process_exited) {
+      DWORD exit_code = STILL_ACTIVE;
+      PERFETTO_CHECK(::GetExitCodeProcess(*s_->win_proc_handle, &exit_code));
+      if (exit_code != STILL_ACTIVE) {
+        s_->returncode = static_cast<int>(exit_code);
+        s_->status = kTerminated;
+        s_->win_proc_handle.reset();
+        s_->win_thread_handle.reset();
+        process_exited = true;
+      }
+    } else {
+      PERFETTO_DCHECK(s_->status != kRunning);
+    }
+    if (!process_exited) {
+      wait_handles[num_handles++] = *s_->win_proc_handle;
+    }
+
+    // Check if there is more output and if the stdout/err pipe has been closed.
+    {
+      std::unique_lock<std::mutex> lock(s_->mutex);
+      // Move the output from the internal buffer shared with the
+      // stdouterr_thread to the final buffer exposed to the client.
+      if (!s_->locked_outerr_buf.empty()) {
+        s_->output.append(std::move(s_->locked_outerr_buf));
+        s_->locked_outerr_buf.clear();
+      }
+      stdouterr_complete = !s_->stdouterr_pipe.rd;
+      if (!stdouterr_complete) {
+        wait_handles[num_handles++] = s_->stdouterr_done_event.fd();
+      }
+    }  // lock(s_->mutex)
+
+    if (num_handles == 0) {
+      PERFETTO_DCHECK(process_exited && stdouterr_complete);
+      break;
+    }
+
+    DWORD wait_ms;  // Note: DWORD is unsigned.
+    if (wait_forever) {
+      wait_ms = INFINITE;
+    } else {
+      const int64_t now = GetWallTimeMs().count();
+      const int64_t wait_left_ms = timeout_ms - (now - wait_start_ms);
+      if (wait_left_ms <= 0)
+        return false;  // Timed out
+      wait_ms = static_cast<DWORD>(wait_left_ms);
+    }
+
+    auto wait_res =
+        ::WaitForMultipleObjects(num_handles, wait_handles, false, wait_ms);
+    PERFETTO_CHECK(wait_res != WAIT_FAILED);
+  }
+
+  PERFETTO_DCHECK(!s_->win_proc_handle);
+  PERFETTO_DCHECK(!s_->win_thread_handle);
+
+  if (s_->stdin_thread.joinable())  // Might not exist if CreateProcess failed.
+    s_->stdin_thread.join();
+  if (s_->stdouterr_thread.joinable())
+    s_->stdouterr_thread.join();
+
+  // The stdin pipe is closed by the dedicated stdin thread. However if that is
+  // not started (e.g. because of no redirection) force close it now. Needs to
+  // happen after the join() to be thread safe.
+  s_->stdin_pipe.wr.reset();
+  s_->stdouterr_pipe.rd.reset();
+
+  return true;
+}
+
+void Subprocess::KillAndWaitForTermination(int exit_code) {
+  auto code = exit_code ? static_cast<DWORD>(exit_code) : STATUS_CONTROL_C_EXIT;
+  ::TerminateProcess(*s_->win_proc_handle, code);
+  Wait();
+  // TryReadExitStatus must have joined the threads.
+  PERFETTO_DCHECK(!s_->stdin_thread.joinable());
+  PERFETTO_DCHECK(!s_->stdouterr_thread.joinable());
+}
+
+}  // namespace base
+}  // namespace perfetto
+
+#endif  // PERFETTO_OS_WIN
+// gen_amalgamated begin source: src/protozero/field.cc
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field.h"
+
+// gen_amalgamated expanded: #include "perfetto/base/compiler.h"
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+
+#if !PERFETTO_IS_LITTLE_ENDIAN()
+// The memcpy() for fixed32/64 below needs to be adjusted if we want to
+// support big endian CPUs. There doesn't seem to be a compelling need today.
+#error Unimplemented for big endian archs.
+#endif
+
+namespace protozero {
+
+template <typename Container>
+void Field::SerializeAndAppendToInternal(Container* dst) const {
+  namespace pu = proto_utils;
+  size_t initial_size = dst->size();
+  dst->resize(initial_size + pu::kMaxSimpleFieldEncodedSize + size_);
+  uint8_t* start = reinterpret_cast<uint8_t*>(&(*dst)[initial_size]);
+  uint8_t* wptr = start;
+  switch (type_) {
+    case static_cast<int>(pu::ProtoWireType::kVarInt): {
+      wptr = pu::WriteVarInt(pu::MakeTagVarInt(id_), wptr);
+      wptr = pu::WriteVarInt(int_value_, wptr);
+      break;
+    }
+    case static_cast<int>(pu::ProtoWireType::kFixed32): {
+      wptr = pu::WriteVarInt(pu::MakeTagFixed<uint32_t>(id_), wptr);
+      uint32_t value32 = static_cast<uint32_t>(int_value_);
+      memcpy(wptr, &value32, sizeof(value32));
+      wptr += sizeof(uint32_t);
+      break;
+    }
+    case static_cast<int>(pu::ProtoWireType::kFixed64): {
+      wptr = pu::WriteVarInt(pu::MakeTagFixed<uint64_t>(id_), wptr);
+      memcpy(wptr, &int_value_, sizeof(int_value_));
+      wptr += sizeof(uint64_t);
+      break;
+    }
+    case static_cast<int>(pu::ProtoWireType::kLengthDelimited): {
+      ConstBytes payload = as_bytes();
+      wptr = pu::WriteVarInt(pu::MakeTagLengthDelimited(id_), wptr);
+      wptr = pu::WriteVarInt(payload.size, wptr);
+      memcpy(wptr, payload.data, payload.size);
+      wptr += payload.size;
+      break;
+    }
+    default:
+      PERFETTO_FATAL("Unknown field type %u", type_);
+  }
+  size_t written_size = static_cast<size_t>(wptr - start);
+  PERFETTO_DCHECK(written_size > 0 && written_size < pu::kMaxMessageLength);
+  PERFETTO_DCHECK(initial_size + written_size <= dst->size());
+  dst->resize(initial_size + written_size);
+}
+
+void Field::SerializeAndAppendTo(std::string* dst) const {
+  SerializeAndAppendToInternal(dst);
+}
+
+void Field::SerializeAndAppendTo(std::vector<uint8_t>* dst) const {
+  SerializeAndAppendToInternal(dst);
+}
+
+}  // namespace protozero
+// gen_amalgamated begin source: src/protozero/message.cc
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+
+#include <atomic>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/base/compiler.h"
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message_arena.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message_handle.h"
+
+#if !PERFETTO_IS_LITTLE_ENDIAN()
+// The memcpy() for float and double below needs to be adjusted if we want to
+// support big endian CPUs. There doesn't seem to be a compelling need today.
+#error Unimplemented for big endian archs.
+#endif
+
+namespace protozero {
+
+namespace {
+
+#if PERFETTO_DCHECK_IS_ON()
+std::atomic<uint32_t> g_generation;
+#endif
+
+}  // namespace
+
+// Do NOT put any code in the constructor or use default initialization.
+// Use the Reset() method below instead.
+
+// This method is called to initialize both root and nested messages.
+void Message::Reset(ScatteredStreamWriter* stream_writer, MessageArena* arena) {
+// Older versions of libstdcxx don't have is_trivially_constructible.
+#if !defined(__GLIBCXX__) || __GLIBCXX__ >= 20170516
+  static_assert(std::is_trivially_constructible<Message>::value,
+                "Message must be trivially constructible");
+#endif
+
+  static_assert(std::is_trivially_destructible<Message>::value,
+                "Message must be trivially destructible");
+  stream_writer_ = stream_writer;
+  arena_ = arena;
+  size_ = 0;
+  size_field_ = nullptr;
+  size_already_written_ = 0;
+  nested_message_ = nullptr;
+  finalized_ = false;
+#if PERFETTO_DCHECK_IS_ON()
+  handle_ = nullptr;
+  generation_ = g_generation.fetch_add(1, std::memory_order_relaxed);
+#endif
+}
+
+void Message::AppendString(uint32_t field_id, const char* str) {
+  AppendBytes(field_id, str, strlen(str));
+}
+
+void Message::AppendBytes(uint32_t field_id, const void* src, size_t size) {
+  if (nested_message_)
+    EndNestedMessage();
+
+  PERFETTO_DCHECK(size < proto_utils::kMaxMessageLength);
+  // Write the proto preamble (field id, type and length of the field).
+  uint8_t buffer[proto_utils::kMaxSimpleFieldEncodedSize];
+  uint8_t* pos = buffer;
+  pos = proto_utils::WriteVarInt(proto_utils::MakeTagLengthDelimited(field_id),
+                                 pos);
+  pos = proto_utils::WriteVarInt(static_cast<uint32_t>(size), pos);
+  WriteToStream(buffer, pos);
+
+  const uint8_t* src_u8 = reinterpret_cast<const uint8_t*>(src);
+  WriteToStream(src_u8, src_u8 + size);
+}
+
+size_t Message::AppendScatteredBytes(uint32_t field_id,
+                                     ContiguousMemoryRange* ranges,
+                                     size_t num_ranges) {
+  size_t size = 0;
+  for (size_t i = 0; i < num_ranges; ++i) {
+    size += ranges[i].size();
+  }
+
+  PERFETTO_DCHECK(size < proto_utils::kMaxMessageLength);
+
+  uint8_t buffer[proto_utils::kMaxSimpleFieldEncodedSize];
+  uint8_t* pos = buffer;
+  pos = proto_utils::WriteVarInt(proto_utils::MakeTagLengthDelimited(field_id),
+                                 pos);
+  pos = proto_utils::WriteVarInt(static_cast<uint32_t>(size), pos);
+  WriteToStream(buffer, pos);
+
+  for (size_t i = 0; i < num_ranges; ++i) {
+    auto& range = ranges[i];
+    WriteToStream(range.begin, range.end);
+  }
+
+  return size;
+}
+
+uint32_t Message::Finalize() {
+  if (finalized_)
+    return size_;
+
+  if (nested_message_)
+    EndNestedMessage();
+
+  // Write the length of the nested message a posteriori, using a leading-zero
+  // redundant varint encoding.
+  if (size_field_) {
+    PERFETTO_DCHECK(!finalized_);
+    PERFETTO_DCHECK(size_ < proto_utils::kMaxMessageLength);
+    PERFETTO_DCHECK(size_ >= size_already_written_);
+    proto_utils::WriteRedundantVarInt(size_ - size_already_written_,
+                                      size_field_);
+    size_field_ = nullptr;
+  }
+
+  finalized_ = true;
+#if PERFETTO_DCHECK_IS_ON()
+  if (handle_)
+    handle_->reset_message();
+#endif
+
+  return size_;
+}
+
+Message* Message::BeginNestedMessageInternal(uint32_t field_id) {
+  if (nested_message_)
+    EndNestedMessage();
+
+  // Write the proto preamble for the nested message.
+  uint8_t data[proto_utils::kMaxTagEncodedSize];
+  uint8_t* data_end = proto_utils::WriteVarInt(
+      proto_utils::MakeTagLengthDelimited(field_id), data);
+  WriteToStream(data, data_end);
+
+  Message* message = arena_->NewMessage();
+  message->Reset(stream_writer_, arena_);
+
+  // The length of the nested message cannot be known upfront. So right now
+  // just reserve the bytes to encode the size after the nested message is done.
+  message->set_size_field(
+      stream_writer_->ReserveBytes(proto_utils::kMessageLengthFieldSize));
+  size_ += proto_utils::kMessageLengthFieldSize;
+
+  nested_message_ = message;
+  return message;
+}
+
+void Message::EndNestedMessage() {
+  size_ += nested_message_->Finalize();
+  arena_->DeleteLastMessage(nested_message_);
+  nested_message_ = nullptr;
+}
+
+}  // namespace protozero
+// gen_amalgamated begin source: src/protozero/message_arena.cc
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/protozero/message_arena.h"
+
+#include <atomic>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message_handle.h"
+
+namespace protozero {
+
+MessageArena::MessageArena() {
+  // The code below assumes that there is always at least one block.
+  blocks_.emplace_front();
+  static_assert(std::alignment_of<decltype(blocks_.back().storage[0])>::value >=
+                    alignof(Message),
+                "MessageArea's storage is not properly aligned");
+}
+
+MessageArena::~MessageArena() = default;
+
+Message* MessageArena::NewMessage() {
+  PERFETTO_DCHECK(!blocks_.empty());  // Should never become empty.
+
+  Block* block = &blocks_.back();
+  if (PERFETTO_UNLIKELY(block->entries >= Block::kCapacity)) {
+    blocks_.emplace_back();
+    block = &blocks_.back();
+  }
+  const auto idx = block->entries++;
+  void* storage = &block->storage[idx];
+  PERFETTO_ASAN_UNPOISON(storage, sizeof(Message));
+  return new (storage) Message();
+}
+
+void MessageArena::DeleteLastMessageInternal() {
+  PERFETTO_DCHECK(!blocks_.empty());  // Should never be empty, see below.
+  Block* block = &blocks_.back();
+  PERFETTO_DCHECK(block->entries > 0);
+
+  // This is the reason why there is no ~Message() call here.
+  // MessageArea::Reset() (see header) also relies on dtor being trivial.
+  static_assert(std::is_trivially_destructible<Message>::value,
+                "Message must be trivially destructible");
+
+  --block->entries;
+  PERFETTO_ASAN_POISON(&block->storage[block->entries], sizeof(Message));
+
+  // Don't remove the first block to avoid malloc/free calls when the root
+  // message is reset. Hitting the allocator all the times is a waste of time.
+  if (block->entries == 0 && blocks_.size() > 1) {
+    blocks_.pop_back();
+  }
+}
+
+}  // namespace protozero
+// gen_amalgamated begin source: src/protozero/message_handle.cc
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/protozero/message_handle.h"
+
+#include <utility>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+
+namespace protozero {
+
+MessageHandleBase::MessageHandleBase(Message* message) : message_(message) {
+#if PERFETTO_DCHECK_IS_ON()
+  generation_ = message_ ? message->generation_ : 0;
+  if (message_)
+    message_->set_handle(this);
+#endif
+}
+
+MessageHandleBase::~MessageHandleBase() {
+  if (message_) {
+#if PERFETTO_DCHECK_IS_ON()
+    PERFETTO_DCHECK(generation_ == message_->generation_);
+#endif
+    FinalizeMessage();
+  }
+}
+
+MessageHandleBase::MessageHandleBase(MessageHandleBase&& other) noexcept {
+  Move(std::move(other));
+}
+
+MessageHandleBase& MessageHandleBase::operator=(MessageHandleBase&& other) {
+  // If the current handle was pointing to a message and is being reset to a new
+  // one, finalize the old message. However, if the other message is the same as
+  // the one we point to, don't finalize.
+  if (message_ && message_ != other.message_)
+    FinalizeMessage();
+  Move(std::move(other));
+  return *this;
+}
+
+void MessageHandleBase::Move(MessageHandleBase&& other) {
+  message_ = other.message_;
+  other.message_ = nullptr;
+#if PERFETTO_DCHECK_IS_ON()
+  if (message_) {
+    generation_ = message_->generation_;
+    message_->set_handle(this);
+  }
+#endif
+}
+
+}  // namespace protozero
+// gen_amalgamated begin source: src/protozero/packed_repeated_fields.cc
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+
+// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
+
+namespace protozero {
+
+// static
+constexpr size_t PackedBufferBase::kOnStackStorageSize;
+
+void PackedBufferBase::GrowSlowpath() {
+  size_t write_off = static_cast<size_t>(write_ptr_ - storage_begin_);
+  size_t old_size = static_cast<size_t>(storage_end_ - storage_begin_);
+  size_t new_size = old_size < 65536 ? (old_size * 2) : (old_size * 3 / 2);
+  new_size = perfetto::base::AlignUp<4096>(new_size);
+  std::unique_ptr<uint8_t[]> new_buf(new uint8_t[new_size]);
+  memcpy(new_buf.get(), storage_begin_, old_size);
+  heap_buf_ = std::move(new_buf);
+  storage_begin_ = heap_buf_.get();
+  storage_end_ = storage_begin_ + new_size;
+  write_ptr_ = storage_begin_ + write_off;
+}
+
+void PackedBufferBase::Reset() {
+  heap_buf_.reset();
+  storage_begin_ = reinterpret_cast<uint8_t*>(&stack_buf_[0]);
+  storage_end_ = reinterpret_cast<uint8_t*>(&stack_buf_[kOnStackStorageSize]);
+  write_ptr_ = storage_begin_;
+}
+
+}  // namespace protozero
+// gen_amalgamated begin source: src/protozero/proto_decoder.cc
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+
+#include <string.h>
+#include <limits>
+
+// gen_amalgamated expanded: #include "perfetto/base/compiler.h"
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace protozero {
+
+using namespace proto_utils;
+
+#if !PERFETTO_IS_LITTLE_ENDIAN()
+#error Unimplemented for big endian archs.
+#endif
+
+namespace {
+
+struct ParseFieldResult {
+  enum ParseResult { kAbort, kSkip, kOk };
+  ParseResult parse_res;
+  const uint8_t* next;
+  Field field;
+};
+
+// Parses one field and returns the field itself and a pointer to the next
+// field to parse. If parsing fails, the returned |next| == |buffer|.
+PERFETTO_ALWAYS_INLINE ParseFieldResult
+ParseOneField(const uint8_t* const buffer, const uint8_t* const end) {
+  ParseFieldResult res{ParseFieldResult::kAbort, buffer, Field{}};
+
+  // The first byte of a proto field is structured as follows:
+  // The least 3 significant bits determine the field type.
+  // The most 5 significant bits determine the field id. If MSB == 1, the
+  // field id continues on the next bytes following the VarInt encoding.
+  const uint8_t kFieldTypeNumBits = 3;
+  const uint64_t kFieldTypeMask = (1 << kFieldTypeNumBits) - 1;  // 0000 0111;
+  const uint8_t* pos = buffer;
+
+  // If we've already hit the end, just return an invalid field.
+  if (PERFETTO_UNLIKELY(pos >= end))
+    return res;
+
+  uint64_t preamble = 0;
+  if (PERFETTO_LIKELY(*pos < 0x80)) {  // Fastpath for fields with ID < 16.
+    preamble = *(pos++);
+  } else {
+    const uint8_t* next = ParseVarInt(pos, end, &preamble);
+    if (PERFETTO_UNLIKELY(pos == next))
+      return res;
+    pos = next;
+  }
+
+  uint32_t field_id = static_cast<uint32_t>(preamble >> kFieldTypeNumBits);
+  if (field_id == 0 || pos >= end)
+    return res;
+
+  auto field_type = static_cast<uint8_t>(preamble & kFieldTypeMask);
+  const uint8_t* new_pos = pos;
+  uint64_t int_value = 0;
+  uint64_t size = 0;
+
+  switch (field_type) {
+    case static_cast<uint8_t>(ProtoWireType::kVarInt): {
+      new_pos = ParseVarInt(pos, end, &int_value);
+
+      // new_pos not being greater than pos means ParseVarInt could not fully
+      // parse the number. This is because we are out of space in the buffer.
+      // Set the id to zero and return but don't update the offset so a future
+      // read can read this field.
+      if (PERFETTO_UNLIKELY(new_pos == pos))
+        return res;
+
+      break;
+    }
+
+    case static_cast<uint8_t>(ProtoWireType::kLengthDelimited): {
+      uint64_t payload_length;
+      new_pos = ParseVarInt(pos, end, &payload_length);
+      if (PERFETTO_UNLIKELY(new_pos == pos))
+        return res;
+
+      // ParseVarInt guarantees that |new_pos| <= |end| when it succeeds;
+      if (payload_length > static_cast<uint64_t>(end - new_pos))
+        return res;
+
+      const uintptr_t payload_start = reinterpret_cast<uintptr_t>(new_pos);
+      int_value = payload_start;
+      size = payload_length;
+      new_pos += payload_length;
+      break;
+    }
+
+    case static_cast<uint8_t>(ProtoWireType::kFixed64): {
+      new_pos = pos + sizeof(uint64_t);
+      if (PERFETTO_UNLIKELY(new_pos > end))
+        return res;
+      memcpy(&int_value, pos, sizeof(uint64_t));
+      break;
+    }
+
+    case static_cast<uint8_t>(ProtoWireType::kFixed32): {
+      new_pos = pos + sizeof(uint32_t);
+      if (PERFETTO_UNLIKELY(new_pos > end))
+        return res;
+      memcpy(&int_value, pos, sizeof(uint32_t));
+      break;
+    }
+
+    default:
+      PERFETTO_DLOG("Invalid proto field type: %u", field_type);
+      return res;
+  }
+
+  res.next = new_pos;
+
+  if (PERFETTO_UNLIKELY(field_id > std::numeric_limits<uint16_t>::max())) {
+    PERFETTO_DLOG("Skipping field %" PRIu32 " because its id > 0xFFFF",
+                  field_id);
+    res.parse_res = ParseFieldResult::kSkip;
+    return res;
+  }
+
+  if (PERFETTO_UNLIKELY(size > proto_utils::kMaxMessageLength)) {
+    PERFETTO_DLOG("Skipping field %" PRIu32 " because it's too big (%" PRIu64
+                  " KB)",
+                  field_id, size / 1024);
+    res.parse_res = ParseFieldResult::kSkip;
+    return res;
+  }
+
+  res.parse_res = ParseFieldResult::kOk;
+  res.field.initialize(static_cast<uint16_t>(field_id), field_type, int_value,
+                       static_cast<uint32_t>(size));
+  return res;
+}
+
+}  // namespace
+
+Field ProtoDecoder::FindField(uint32_t field_id) {
+  Field res{};
+  auto old_position = read_ptr_;
+  read_ptr_ = begin_;
+  for (auto f = ReadField(); f.valid(); f = ReadField()) {
+    if (f.id() == field_id) {
+      res = f;
+      break;
+    }
+  }
+  read_ptr_ = old_position;
+  return res;
+}
+
+PERFETTO_ALWAYS_INLINE
+Field ProtoDecoder::ReadField() {
+  ParseFieldResult res;
+  do {
+    res = ParseOneField(read_ptr_, end_);
+    read_ptr_ = res.next;
+  } while (PERFETTO_UNLIKELY(res.parse_res == ParseFieldResult::kSkip));
+  return res.field;
+}
+
+void TypedProtoDecoderBase::ParseAllFields() {
+  const uint8_t* cur = begin_;
+  ParseFieldResult res;
+  for (;;) {
+    res = ParseOneField(cur, end_);
+    PERFETTO_DCHECK(res.parse_res != ParseFieldResult::kOk || res.next != cur);
+    cur = res.next;
+    if (PERFETTO_UNLIKELY(res.parse_res == ParseFieldResult::kSkip)) {
+      continue;
+    } else if (PERFETTO_UNLIKELY(res.parse_res == ParseFieldResult::kAbort)) {
+      break;
+    }
+    PERFETTO_DCHECK(res.parse_res == ParseFieldResult::kOk);
+    PERFETTO_DCHECK(res.field.valid());
+    auto field_id = res.field.id();
+    if (PERFETTO_UNLIKELY(field_id >= num_fields_))
+      continue;
+
+    Field* fld = &fields_[field_id];
+    if (PERFETTO_LIKELY(!fld->valid())) {
+      // This is the first time we see this field.
+      *fld = std::move(res.field);
+    } else {
+      // Repeated field case.
+      // In this case we need to:
+      // 1. Append the last value of the field to end of the repeated field
+      //    storage.
+      // 2. Replace the default instance at offset |field_id| with the current
+      //    value. This is because in case of repeated field a call to Get(X) is
+      //    supposed to return the last value of X, not the first one.
+      // This is so that the RepeatedFieldIterator will iterate in the right
+      // order, see comments on RepeatedFieldIterator.
+      if (PERFETTO_UNLIKELY(size_ >= capacity_)) {
+        ExpandHeapStorage();
+        // ExpandHeapStorage moves fields_ so we need to update the ptr to fld:
+        fld = &fields_[field_id];
+        PERFETTO_DCHECK(size_ < capacity_);
+      }
+      fields_[size_++] = *fld;
+      *fld = std::move(res.field);
+    }
+  }
+  read_ptr_ = res.next;
+}
+
+void TypedProtoDecoderBase::ExpandHeapStorage() {
+  uint32_t new_capacity = capacity_ * 2;
+  PERFETTO_CHECK(new_capacity > size_);
+  std::unique_ptr<Field[]> new_storage(new Field[new_capacity]);
+
+  static_assert(std::is_trivially_copyable<Field>::value,
+                "Field must be trivially copyable");
+  memcpy(&new_storage[0], fields_, sizeof(Field) * size_);
+
+  heap_storage_ = std::move(new_storage);
+  fields_ = &heap_storage_[0];
+  capacity_ = new_capacity;
+}
+
+}  // namespace protozero
+// gen_amalgamated begin source: src/protozero/scattered_heap_buffer.cc
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+
+#include <algorithm>
+
+namespace protozero {
+
+ScatteredHeapBuffer::Slice::Slice()
+    : buffer_(nullptr), size_(0u), unused_bytes_(0u) {}
+
+ScatteredHeapBuffer::Slice::Slice(size_t size)
+    : buffer_(std::unique_ptr<uint8_t[]>(new uint8_t[size])),
+      size_(size),
+      unused_bytes_(size) {
+  PERFETTO_DCHECK(size);
+  Clear();
+}
+
+ScatteredHeapBuffer::Slice::Slice(Slice&& slice) noexcept = default;
+
+ScatteredHeapBuffer::Slice::~Slice() = default;
+
+ScatteredHeapBuffer::Slice& ScatteredHeapBuffer::Slice::operator=(Slice&&) =
+    default;
+
+void ScatteredHeapBuffer::Slice::Clear() {
+  unused_bytes_ = size_;
+#if PERFETTO_DCHECK_IS_ON()
+  memset(start(), 0xff, size_);
+#endif  // PERFETTO_DCHECK_IS_ON()
+}
+
+ScatteredHeapBuffer::ScatteredHeapBuffer(size_t initial_slice_size_bytes,
+                                         size_t maximum_slice_size_bytes)
+    : next_slice_size_(initial_slice_size_bytes),
+      maximum_slice_size_(maximum_slice_size_bytes) {
+  PERFETTO_DCHECK(next_slice_size_ && maximum_slice_size_);
+  PERFETTO_DCHECK(maximum_slice_size_ >= initial_slice_size_bytes);
+}
+
+ScatteredHeapBuffer::~ScatteredHeapBuffer() = default;
+
+protozero::ContiguousMemoryRange ScatteredHeapBuffer::GetNewBuffer() {
+  PERFETTO_CHECK(writer_);
+  AdjustUsedSizeOfCurrentSlice();
+
+  if (cached_slice_.start()) {
+    slices_.push_back(std::move(cached_slice_));
+    PERFETTO_DCHECK(!cached_slice_.start());
+  } else {
+    slices_.emplace_back(next_slice_size_);
+  }
+  next_slice_size_ = std::min(maximum_slice_size_, next_slice_size_ * 2);
+  return slices_.back().GetTotalRange();
+}
+
+const std::vector<ScatteredHeapBuffer::Slice>&
+ScatteredHeapBuffer::GetSlices() {
+  AdjustUsedSizeOfCurrentSlice();
+  return slices_;
+}
+
+std::vector<uint8_t> ScatteredHeapBuffer::StitchSlices() {
+  size_t stitched_size = 0u;
+  const auto& slices = GetSlices();
+  for (const auto& slice : slices)
+    stitched_size += slice.size() - slice.unused_bytes();
+
+  std::vector<uint8_t> buffer;
+  buffer.reserve(stitched_size);
+  for (const auto& slice : slices) {
+    auto used_range = slice.GetUsedRange();
+    buffer.insert(buffer.end(), used_range.begin, used_range.end);
+  }
+  return buffer;
+}
+
+std::vector<protozero::ContiguousMemoryRange> ScatteredHeapBuffer::GetRanges() {
+  std::vector<protozero::ContiguousMemoryRange> ranges;
+  for (const auto& slice : GetSlices())
+    ranges.push_back(slice.GetUsedRange());
+  return ranges;
+}
+
+void ScatteredHeapBuffer::AdjustUsedSizeOfCurrentSlice() {
+  if (!slices_.empty())
+    slices_.back().set_unused_bytes(writer_->bytes_available());
+}
+
+size_t ScatteredHeapBuffer::GetTotalSize() {
+  size_t total_size = 0;
+  for (auto& slice : slices_) {
+    total_size += slice.size();
+  }
+  return total_size;
+}
+
+void ScatteredHeapBuffer::Reset() {
+  if (slices_.empty())
+    return;
+  cached_slice_ = std::move(slices_.front());
+  cached_slice_.Clear();
+  slices_.clear();
+}
+
+}  // namespace protozero
+// gen_amalgamated begin source: src/protozero/scattered_stream_null_delegate.cc
+// gen_amalgamated begin header: include/perfetto/protozero/scattered_stream_null_delegate.h
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_PROTOZERO_SCATTERED_STREAM_NULL_DELEGATE_H_
+#define INCLUDE_PERFETTO_PROTOZERO_SCATTERED_STREAM_NULL_DELEGATE_H_
+
+#include <memory>
+#include <vector>
+
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/contiguous_memory_range.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_stream_writer.h"
+
+namespace protozero {
+
+class PERFETTO_EXPORT ScatteredStreamWriterNullDelegate
+    : public ScatteredStreamWriter::Delegate {
+ public:
+  explicit ScatteredStreamWriterNullDelegate(size_t chunk_size);
+  ~ScatteredStreamWriterNullDelegate() override;
+
+  // protozero::ScatteredStreamWriter::Delegate implementation.
+  ContiguousMemoryRange GetNewBuffer() override;
+
+ private:
+  const size_t chunk_size_;
+  std::unique_ptr<uint8_t[]> chunk_;
+};
+
+}  // namespace protozero
+
+#endif  // INCLUDE_PERFETTO_PROTOZERO_SCATTERED_STREAM_NULL_DELEGATE_H_
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_stream_null_delegate.h"
+
+namespace protozero {
+
+// An implementation of ScatteredStreamWriter::Delegate which always returns
+// the same piece of memory.
+// This is used when we need to no-op the writers (e.g. during teardown or in
+// case of resource exhaustion), avoiding that the clients have to deal with
+// nullptr checks.
+ScatteredStreamWriterNullDelegate::ScatteredStreamWriterNullDelegate(
+    size_t chunk_size)
+    : chunk_size_(chunk_size),
+      chunk_(std::unique_ptr<uint8_t[]>(new uint8_t[chunk_size_])) {}
+
+ScatteredStreamWriterNullDelegate::~ScatteredStreamWriterNullDelegate() {}
+
+ContiguousMemoryRange ScatteredStreamWriterNullDelegate::GetNewBuffer() {
+  return {chunk_.get(), chunk_.get() + chunk_size_};
+}
+
+}  // namespace protozero
+// gen_amalgamated begin source: src/protozero/scattered_stream_writer.cc
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_stream_writer.h"
+
+#include <algorithm>
+
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+
+namespace protozero {
+
+ScatteredStreamWriter::Delegate::~Delegate() {}
+
+ScatteredStreamWriter::ScatteredStreamWriter(Delegate* delegate)
+    : delegate_(delegate),
+      cur_range_({nullptr, nullptr}),
+      write_ptr_(nullptr) {}
+
+ScatteredStreamWriter::~ScatteredStreamWriter() {}
+
+void ScatteredStreamWriter::Reset(ContiguousMemoryRange range) {
+  written_previously_ += static_cast<uint64_t>(write_ptr_ - cur_range_.begin);
+  cur_range_ = range;
+  write_ptr_ = range.begin;
+  PERFETTO_DCHECK(!write_ptr_ || write_ptr_ < cur_range_.end);
+}
+
+void ScatteredStreamWriter::Extend() {
+  Reset(delegate_->GetNewBuffer());
+}
+
+void ScatteredStreamWriter::WriteBytesSlowPath(const uint8_t* src,
+                                               size_t size) {
+  size_t bytes_left = size;
+  while (bytes_left > 0) {
+    if (write_ptr_ >= cur_range_.end)
+      Extend();
+    const size_t burst_size = std::min(bytes_available(), bytes_left);
+    WriteBytesUnsafe(src, burst_size);
+    bytes_left -= burst_size;
+    src += burst_size;
+  }
+}
+
+// TODO(primiano): perf optimization: I suspect that at the end this will always
+// be called with |size| == 4, in which case we might just hardcode it.
+uint8_t* ScatteredStreamWriter::ReserveBytes(size_t size) {
+  if (write_ptr_ + size > cur_range_.end) {
+    // Assume the reservations are always < Delegate::GetNewBuffer().size(),
+    // so that one single call to Extend() will definitely give enough headroom.
+    Extend();
+    PERFETTO_DCHECK(write_ptr_ + size <= cur_range_.end);
+  }
+  uint8_t* begin = write_ptr_;
+  write_ptr_ += size;
+#if PERFETTO_DCHECK_IS_ON()
+  memset(begin, 0, size);
+#endif
+  return begin;
+}
+
+}  // namespace protozero
+// gen_amalgamated begin source: src/protozero/static_buffer.cc
+// gen_amalgamated begin header: include/perfetto/protozero/static_buffer.h
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_PROTOZERO_STATIC_BUFFER_H_
+#define INCLUDE_PERFETTO_PROTOZERO_STATIC_BUFFER_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/root_message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_stream_writer.h"
+
+namespace protozero {
+
+class Message;
+
+// A simple implementation of ScatteredStreamWriter::Delegate backed by a
+// fixed-size buffer. It doesn't support expansion. The caller needs to ensure
+// to never write more than the size of the buffer. Will CHECK() otherwise.
+class PERFETTO_EXPORT StaticBufferDelegate
+    : public ScatteredStreamWriter::Delegate {
+ public:
+  StaticBufferDelegate(uint8_t* buf, size_t len) : range_{buf, buf + len} {}
+  ~StaticBufferDelegate() override;
+
+  // ScatteredStreamWriter::Delegate implementation.
+  ContiguousMemoryRange GetNewBuffer() override;
+
+  ContiguousMemoryRange const range_;
+  bool get_new_buffer_called_once_ = false;
+};
+
+// Helper function to create protozero messages backed by a fixed-size buffer
+// in one line. You can write:
+//   protozero::Static<protozero::MyMessage> msg(buf.data(), buf.size());
+//   msg->set_stuff(...);
+//   size_t bytes_encoded = msg.Finalize();
+template <typename T /* protozero::Message */>
+class StaticBuffered {
+ public:
+  StaticBuffered(void* buf, size_t len)
+      : delegate_(reinterpret_cast<uint8_t*>(buf), len), writer_(&delegate_) {
+    msg_.Reset(&writer_);
+  }
+
+  // This can't be neither copied nor moved because Message hands out pointers
+  // to itself when creating submessages.
+  StaticBuffered(const StaticBuffered&) = delete;
+  StaticBuffered& operator=(const StaticBuffered&) = delete;
+  StaticBuffered(StaticBuffered&&) = delete;
+  StaticBuffered& operator=(StaticBuffered&&) = delete;
+
+  T* get() { return &msg_; }
+  T* operator->() { return &msg_; }
+
+  // The lack of a size() method is deliberate. It's to prevent that one
+  // accidentally calls size() before Finalize().
+
+  // Returns the number of encoded bytes (<= the size passed in the ctor).
+  size_t Finalize() {
+    msg_.Finalize();
+    return static_cast<size_t>(writer_.write_ptr() - delegate_.range_.begin);
+  }
+
+ private:
+  StaticBufferDelegate delegate_;
+  ScatteredStreamWriter writer_;
+  RootMessage<T> msg_;
+};
+
+// Helper function to create stack-based protozero messages in one line.
+// You can write:
+//   protozero::StackBuffered<protozero::MyMessage, 16> msg;
+//   msg->set_stuff(...);
+//   size_t bytes_encoded = msg.Finalize();
+template <typename T /* protozero::Message */, size_t N>
+class StackBuffered : public StaticBuffered<T> {
+ public:
+  StackBuffered() : StaticBuffered<T>(&buf_[0], N) {}
+
+ private:
+  uint8_t buf_[N];  // Deliberately not initialized.
+};
+
+}  // namespace protozero
+
+#endif  // INCLUDE_PERFETTO_PROTOZERO_STATIC_BUFFER_H_
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/protozero/static_buffer.h"
+
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+
+namespace protozero {
+
+StaticBufferDelegate::~StaticBufferDelegate() = default;
+
+ContiguousMemoryRange StaticBufferDelegate::GetNewBuffer() {
+  if (get_new_buffer_called_once_) {
+    // This is the 2nd time GetNewBuffer is called. The estimate is wrong. We
+    // shouldn't try to grow the buffer after the initial call.
+    PERFETTO_FATAL("Static buffer too small");
+  }
+  get_new_buffer_called_once_ = true;
+  return range_;
+}
+
+}  // namespace protozero
+// gen_amalgamated begin source: src/protozero/virtual_destructors.cc
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+
+namespace protozero {
+
+CppMessageObj::~CppMessageObj() = default;
+
+}  // namespace protozero
+// gen_amalgamated begin source: gen/protos/perfetto/common/android_energy_consumer_descriptor.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/common/android_energy_consumer_descriptor.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_ANDROID_ENERGY_CONSUMER_DESCRIPTOR_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_ANDROID_ENERGY_CONSUMER_DESCRIPTOR_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class AndroidEnergyConsumerDescriptor;
+class AndroidEnergyConsumer;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT AndroidEnergyConsumerDescriptor : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kEnergyConsumersFieldNumber = 1,
+  };
+
+  AndroidEnergyConsumerDescriptor();
+  ~AndroidEnergyConsumerDescriptor() override;
+  AndroidEnergyConsumerDescriptor(AndroidEnergyConsumerDescriptor&&) noexcept;
+  AndroidEnergyConsumerDescriptor& operator=(AndroidEnergyConsumerDescriptor&&);
+  AndroidEnergyConsumerDescriptor(const AndroidEnergyConsumerDescriptor&);
+  AndroidEnergyConsumerDescriptor& operator=(const AndroidEnergyConsumerDescriptor&);
+  bool operator==(const AndroidEnergyConsumerDescriptor&) const;
+  bool operator!=(const AndroidEnergyConsumerDescriptor& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  const std::vector<AndroidEnergyConsumer>& energy_consumers() const { return energy_consumers_; }
+  std::vector<AndroidEnergyConsumer>* mutable_energy_consumers() { return &energy_consumers_; }
+  int energy_consumers_size() const;
+  void clear_energy_consumers();
+  AndroidEnergyConsumer* add_energy_consumers();
+
+ private:
+  std::vector<AndroidEnergyConsumer> energy_consumers_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT AndroidEnergyConsumer : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kEnergyConsumerIdFieldNumber = 1,
+    kOrdinalFieldNumber = 2,
+    kTypeFieldNumber = 3,
+    kNameFieldNumber = 4,
+  };
+
+  AndroidEnergyConsumer();
+  ~AndroidEnergyConsumer() override;
+  AndroidEnergyConsumer(AndroidEnergyConsumer&&) noexcept;
+  AndroidEnergyConsumer& operator=(AndroidEnergyConsumer&&);
+  AndroidEnergyConsumer(const AndroidEnergyConsumer&);
+  AndroidEnergyConsumer& operator=(const AndroidEnergyConsumer&);
+  bool operator==(const AndroidEnergyConsumer&) const;
+  bool operator!=(const AndroidEnergyConsumer& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_energy_consumer_id() const { return _has_field_[1]; }
+  int32_t energy_consumer_id() const { return energy_consumer_id_; }
+  void set_energy_consumer_id(int32_t value) { energy_consumer_id_ = value; _has_field_.set(1); }
+
+  bool has_ordinal() const { return _has_field_[2]; }
+  int32_t ordinal() const { return ordinal_; }
+  void set_ordinal(int32_t value) { ordinal_ = value; _has_field_.set(2); }
+
+  bool has_type() const { return _has_field_[3]; }
+  const std::string& type() const { return type_; }
+  void set_type(const std::string& value) { type_ = value; _has_field_.set(3); }
+
+  bool has_name() const { return _has_field_[4]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(4); }
+
+ private:
+  int32_t energy_consumer_id_{};
+  int32_t ordinal_{};
+  std::string type_{};
+  std::string name_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<5> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_ANDROID_ENERGY_CONSUMER_DESCRIPTOR_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/common/android_energy_consumer_descriptor.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+AndroidEnergyConsumerDescriptor::AndroidEnergyConsumerDescriptor() = default;
+AndroidEnergyConsumerDescriptor::~AndroidEnergyConsumerDescriptor() = default;
+AndroidEnergyConsumerDescriptor::AndroidEnergyConsumerDescriptor(const AndroidEnergyConsumerDescriptor&) = default;
+AndroidEnergyConsumerDescriptor& AndroidEnergyConsumerDescriptor::operator=(const AndroidEnergyConsumerDescriptor&) = default;
+AndroidEnergyConsumerDescriptor::AndroidEnergyConsumerDescriptor(AndroidEnergyConsumerDescriptor&&) noexcept = default;
+AndroidEnergyConsumerDescriptor& AndroidEnergyConsumerDescriptor::operator=(AndroidEnergyConsumerDescriptor&&) = default;
+
+bool AndroidEnergyConsumerDescriptor::operator==(const AndroidEnergyConsumerDescriptor& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && energy_consumers_ == other.energy_consumers_;
+}
+
+int AndroidEnergyConsumerDescriptor::energy_consumers_size() const { return static_cast<int>(energy_consumers_.size()); }
+void AndroidEnergyConsumerDescriptor::clear_energy_consumers() { energy_consumers_.clear(); }
+AndroidEnergyConsumer* AndroidEnergyConsumerDescriptor::add_energy_consumers() { energy_consumers_.emplace_back(); return &energy_consumers_.back(); }
+bool AndroidEnergyConsumerDescriptor::ParseFromArray(const void* raw, size_t size) {
+  energy_consumers_.clear();
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* energy_consumers */:
+        energy_consumers_.emplace_back();
+        energy_consumers_.back().ParseFromArray(field.data(), field.size());
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string AndroidEnergyConsumerDescriptor::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> AndroidEnergyConsumerDescriptor::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void AndroidEnergyConsumerDescriptor::Serialize(::protozero::Message* msg) const {
+  // Field 1: energy_consumers
+  for (auto& it : energy_consumers_) {
+    it.Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+AndroidEnergyConsumer::AndroidEnergyConsumer() = default;
+AndroidEnergyConsumer::~AndroidEnergyConsumer() = default;
+AndroidEnergyConsumer::AndroidEnergyConsumer(const AndroidEnergyConsumer&) = default;
+AndroidEnergyConsumer& AndroidEnergyConsumer::operator=(const AndroidEnergyConsumer&) = default;
+AndroidEnergyConsumer::AndroidEnergyConsumer(AndroidEnergyConsumer&&) noexcept = default;
+AndroidEnergyConsumer& AndroidEnergyConsumer::operator=(AndroidEnergyConsumer&&) = default;
+
+bool AndroidEnergyConsumer::operator==(const AndroidEnergyConsumer& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && energy_consumer_id_ == other.energy_consumer_id_
+   && ordinal_ == other.ordinal_
+   && type_ == other.type_
+   && name_ == other.name_;
+}
+
+bool AndroidEnergyConsumer::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* energy_consumer_id */:
+        field.get(&energy_consumer_id_);
+        break;
+      case 2 /* ordinal */:
+        field.get(&ordinal_);
+        break;
+      case 3 /* type */:
+        field.get(&type_);
+        break;
+      case 4 /* name */:
+        field.get(&name_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string AndroidEnergyConsumer::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> AndroidEnergyConsumer::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void AndroidEnergyConsumer::Serialize(::protozero::Message* msg) const {
+  // Field 1: energy_consumer_id
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, energy_consumer_id_);
+  }
+
+  // Field 2: ordinal
+  if (_has_field_[2]) {
+    msg->AppendVarInt(2, ordinal_);
+  }
+
+  // Field 3: type
+  if (_has_field_[3]) {
+    msg->AppendString(3, type_);
+  }
+
+  // Field 4: name
+  if (_has_field_[4]) {
+    msg->AppendString(4, name_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/common/android_log_constants.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/common/android_log_constants.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_ANDROID_LOG_CONSTANTS_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_ANDROID_LOG_CONSTANTS_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum AndroidLogId : int;
+enum AndroidLogPriority : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum AndroidLogId : int {
+  LID_DEFAULT = 0,
+  LID_RADIO = 1,
+  LID_EVENTS = 2,
+  LID_SYSTEM = 3,
+  LID_CRASH = 4,
+  LID_STATS = 5,
+  LID_SECURITY = 6,
+  LID_KERNEL = 7,
+};
+enum AndroidLogPriority : int {
+  PRIO_UNSPECIFIED = 0,
+  PRIO_UNUSED = 1,
+  PRIO_VERBOSE = 2,
+  PRIO_DEBUG = 3,
+  PRIO_INFO = 4,
+  PRIO_WARN = 5,
+  PRIO_ERROR = 6,
+  PRIO_FATAL = 7,
+};
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_ANDROID_LOG_CONSTANTS_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/common/android_log_constants.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/common/builtin_clock.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/common/builtin_clock.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_BUILTIN_CLOCK_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_BUILTIN_CLOCK_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum BuiltinClock : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum BuiltinClock : int {
+  BUILTIN_CLOCK_UNKNOWN = 0,
+  BUILTIN_CLOCK_REALTIME = 1,
+  BUILTIN_CLOCK_REALTIME_COARSE = 2,
+  BUILTIN_CLOCK_MONOTONIC = 3,
+  BUILTIN_CLOCK_MONOTONIC_COARSE = 4,
+  BUILTIN_CLOCK_MONOTONIC_RAW = 5,
+  BUILTIN_CLOCK_BOOTTIME = 6,
+  BUILTIN_CLOCK_MAX_ID = 63,
+};
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_BUILTIN_CLOCK_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/common/builtin_clock.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/common/commit_data_request.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/common/commit_data_request.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_COMMIT_DATA_REQUEST_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_COMMIT_DATA_REQUEST_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class CommitDataRequest;
+class CommitDataRequest_ChunkToPatch;
+class CommitDataRequest_ChunkToPatch_Patch;
+class CommitDataRequest_ChunksToMove;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT CommitDataRequest : public ::protozero::CppMessageObj {
+ public:
+  using ChunksToMove = CommitDataRequest_ChunksToMove;
+  using ChunkToPatch = CommitDataRequest_ChunkToPatch;
+  enum FieldNumbers {
+    kChunksToMoveFieldNumber = 1,
+    kChunksToPatchFieldNumber = 2,
+    kFlushRequestIdFieldNumber = 3,
+  };
+
+  CommitDataRequest();
+  ~CommitDataRequest() override;
+  CommitDataRequest(CommitDataRequest&&) noexcept;
+  CommitDataRequest& operator=(CommitDataRequest&&);
+  CommitDataRequest(const CommitDataRequest&);
+  CommitDataRequest& operator=(const CommitDataRequest&);
+  bool operator==(const CommitDataRequest&) const;
+  bool operator!=(const CommitDataRequest& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  const std::vector<CommitDataRequest_ChunksToMove>& chunks_to_move() const { return chunks_to_move_; }
+  std::vector<CommitDataRequest_ChunksToMove>* mutable_chunks_to_move() { return &chunks_to_move_; }
+  int chunks_to_move_size() const;
+  void clear_chunks_to_move();
+  CommitDataRequest_ChunksToMove* add_chunks_to_move();
+
+  const std::vector<CommitDataRequest_ChunkToPatch>& chunks_to_patch() const { return chunks_to_patch_; }
+  std::vector<CommitDataRequest_ChunkToPatch>* mutable_chunks_to_patch() { return &chunks_to_patch_; }
+  int chunks_to_patch_size() const;
+  void clear_chunks_to_patch();
+  CommitDataRequest_ChunkToPatch* add_chunks_to_patch();
+
+  bool has_flush_request_id() const { return _has_field_[3]; }
+  uint64_t flush_request_id() const { return flush_request_id_; }
+  void set_flush_request_id(uint64_t value) { flush_request_id_ = value; _has_field_.set(3); }
+
+ private:
+  std::vector<CommitDataRequest_ChunksToMove> chunks_to_move_;
+  std::vector<CommitDataRequest_ChunkToPatch> chunks_to_patch_;
+  uint64_t flush_request_id_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<4> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT CommitDataRequest_ChunkToPatch : public ::protozero::CppMessageObj {
+ public:
+  using Patch = CommitDataRequest_ChunkToPatch_Patch;
+  enum FieldNumbers {
+    kTargetBufferFieldNumber = 1,
+    kWriterIdFieldNumber = 2,
+    kChunkIdFieldNumber = 3,
+    kPatchesFieldNumber = 4,
+    kHasMorePatchesFieldNumber = 5,
+  };
+
+  CommitDataRequest_ChunkToPatch();
+  ~CommitDataRequest_ChunkToPatch() override;
+  CommitDataRequest_ChunkToPatch(CommitDataRequest_ChunkToPatch&&) noexcept;
+  CommitDataRequest_ChunkToPatch& operator=(CommitDataRequest_ChunkToPatch&&);
+  CommitDataRequest_ChunkToPatch(const CommitDataRequest_ChunkToPatch&);
+  CommitDataRequest_ChunkToPatch& operator=(const CommitDataRequest_ChunkToPatch&);
+  bool operator==(const CommitDataRequest_ChunkToPatch&) const;
+  bool operator!=(const CommitDataRequest_ChunkToPatch& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_target_buffer() const { return _has_field_[1]; }
+  uint32_t target_buffer() const { return target_buffer_; }
+  void set_target_buffer(uint32_t value) { target_buffer_ = value; _has_field_.set(1); }
+
+  bool has_writer_id() const { return _has_field_[2]; }
+  uint32_t writer_id() const { return writer_id_; }
+  void set_writer_id(uint32_t value) { writer_id_ = value; _has_field_.set(2); }
+
+  bool has_chunk_id() const { return _has_field_[3]; }
+  uint32_t chunk_id() const { return chunk_id_; }
+  void set_chunk_id(uint32_t value) { chunk_id_ = value; _has_field_.set(3); }
+
+  const std::vector<CommitDataRequest_ChunkToPatch_Patch>& patches() const { return patches_; }
+  std::vector<CommitDataRequest_ChunkToPatch_Patch>* mutable_patches() { return &patches_; }
+  int patches_size() const;
+  void clear_patches();
+  CommitDataRequest_ChunkToPatch_Patch* add_patches();
+
+  bool has_has_more_patches() const { return _has_field_[5]; }
+  bool has_more_patches() const { return has_more_patches_; }
+  void set_has_more_patches(bool value) { has_more_patches_ = value; _has_field_.set(5); }
+
+ private:
+  uint32_t target_buffer_{};
+  uint32_t writer_id_{};
+  uint32_t chunk_id_{};
+  std::vector<CommitDataRequest_ChunkToPatch_Patch> patches_;
+  bool has_more_patches_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<6> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT CommitDataRequest_ChunkToPatch_Patch : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kOffsetFieldNumber = 1,
+    kDataFieldNumber = 2,
+  };
+
+  CommitDataRequest_ChunkToPatch_Patch();
+  ~CommitDataRequest_ChunkToPatch_Patch() override;
+  CommitDataRequest_ChunkToPatch_Patch(CommitDataRequest_ChunkToPatch_Patch&&) noexcept;
+  CommitDataRequest_ChunkToPatch_Patch& operator=(CommitDataRequest_ChunkToPatch_Patch&&);
+  CommitDataRequest_ChunkToPatch_Patch(const CommitDataRequest_ChunkToPatch_Patch&);
+  CommitDataRequest_ChunkToPatch_Patch& operator=(const CommitDataRequest_ChunkToPatch_Patch&);
+  bool operator==(const CommitDataRequest_ChunkToPatch_Patch&) const;
+  bool operator!=(const CommitDataRequest_ChunkToPatch_Patch& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_offset() const { return _has_field_[1]; }
+  uint32_t offset() const { return offset_; }
+  void set_offset(uint32_t value) { offset_ = value; _has_field_.set(1); }
+
+  bool has_data() const { return _has_field_[2]; }
+  const std::string& data() const { return data_; }
+  void set_data(const std::string& value) { data_ = value; _has_field_.set(2); }
+  void set_data(const void* p, size_t s) { data_.assign(reinterpret_cast<const char*>(p), s); _has_field_.set(2); }
+
+ private:
+  uint32_t offset_{};
+  std::string data_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT CommitDataRequest_ChunksToMove : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kPageFieldNumber = 1,
+    kChunkFieldNumber = 2,
+    kTargetBufferFieldNumber = 3,
+  };
+
+  CommitDataRequest_ChunksToMove();
+  ~CommitDataRequest_ChunksToMove() override;
+  CommitDataRequest_ChunksToMove(CommitDataRequest_ChunksToMove&&) noexcept;
+  CommitDataRequest_ChunksToMove& operator=(CommitDataRequest_ChunksToMove&&);
+  CommitDataRequest_ChunksToMove(const CommitDataRequest_ChunksToMove&);
+  CommitDataRequest_ChunksToMove& operator=(const CommitDataRequest_ChunksToMove&);
+  bool operator==(const CommitDataRequest_ChunksToMove&) const;
+  bool operator!=(const CommitDataRequest_ChunksToMove& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_page() const { return _has_field_[1]; }
+  uint32_t page() const { return page_; }
+  void set_page(uint32_t value) { page_ = value; _has_field_.set(1); }
+
+  bool has_chunk() const { return _has_field_[2]; }
+  uint32_t chunk() const { return chunk_; }
+  void set_chunk(uint32_t value) { chunk_ = value; _has_field_.set(2); }
+
+  bool has_target_buffer() const { return _has_field_[3]; }
+  uint32_t target_buffer() const { return target_buffer_; }
+  void set_target_buffer(uint32_t value) { target_buffer_ = value; _has_field_.set(3); }
+
+ private:
+  uint32_t page_{};
+  uint32_t chunk_{};
+  uint32_t target_buffer_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<4> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_COMMIT_DATA_REQUEST_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/common/commit_data_request.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+CommitDataRequest::CommitDataRequest() = default;
+CommitDataRequest::~CommitDataRequest() = default;
+CommitDataRequest::CommitDataRequest(const CommitDataRequest&) = default;
+CommitDataRequest& CommitDataRequest::operator=(const CommitDataRequest&) = default;
+CommitDataRequest::CommitDataRequest(CommitDataRequest&&) noexcept = default;
+CommitDataRequest& CommitDataRequest::operator=(CommitDataRequest&&) = default;
+
+bool CommitDataRequest::operator==(const CommitDataRequest& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && chunks_to_move_ == other.chunks_to_move_
+   && chunks_to_patch_ == other.chunks_to_patch_
+   && flush_request_id_ == other.flush_request_id_;
+}
+
+int CommitDataRequest::chunks_to_move_size() const { return static_cast<int>(chunks_to_move_.size()); }
+void CommitDataRequest::clear_chunks_to_move() { chunks_to_move_.clear(); }
+CommitDataRequest_ChunksToMove* CommitDataRequest::add_chunks_to_move() { chunks_to_move_.emplace_back(); return &chunks_to_move_.back(); }
+int CommitDataRequest::chunks_to_patch_size() const { return static_cast<int>(chunks_to_patch_.size()); }
+void CommitDataRequest::clear_chunks_to_patch() { chunks_to_patch_.clear(); }
+CommitDataRequest_ChunkToPatch* CommitDataRequest::add_chunks_to_patch() { chunks_to_patch_.emplace_back(); return &chunks_to_patch_.back(); }
+bool CommitDataRequest::ParseFromArray(const void* raw, size_t size) {
+  chunks_to_move_.clear();
+  chunks_to_patch_.clear();
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* chunks_to_move */:
+        chunks_to_move_.emplace_back();
+        chunks_to_move_.back().ParseFromArray(field.data(), field.size());
+        break;
+      case 2 /* chunks_to_patch */:
+        chunks_to_patch_.emplace_back();
+        chunks_to_patch_.back().ParseFromArray(field.data(), field.size());
+        break;
+      case 3 /* flush_request_id */:
+        field.get(&flush_request_id_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string CommitDataRequest::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> CommitDataRequest::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void CommitDataRequest::Serialize(::protozero::Message* msg) const {
+  // Field 1: chunks_to_move
+  for (auto& it : chunks_to_move_) {
+    it.Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
+  }
+
+  // Field 2: chunks_to_patch
+  for (auto& it : chunks_to_patch_) {
+    it.Serialize(msg->BeginNestedMessage<::protozero::Message>(2));
+  }
+
+  // Field 3: flush_request_id
+  if (_has_field_[3]) {
+    msg->AppendVarInt(3, flush_request_id_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+CommitDataRequest_ChunkToPatch::CommitDataRequest_ChunkToPatch() = default;
+CommitDataRequest_ChunkToPatch::~CommitDataRequest_ChunkToPatch() = default;
+CommitDataRequest_ChunkToPatch::CommitDataRequest_ChunkToPatch(const CommitDataRequest_ChunkToPatch&) = default;
+CommitDataRequest_ChunkToPatch& CommitDataRequest_ChunkToPatch::operator=(const CommitDataRequest_ChunkToPatch&) = default;
+CommitDataRequest_ChunkToPatch::CommitDataRequest_ChunkToPatch(CommitDataRequest_ChunkToPatch&&) noexcept = default;
+CommitDataRequest_ChunkToPatch& CommitDataRequest_ChunkToPatch::operator=(CommitDataRequest_ChunkToPatch&&) = default;
+
+bool CommitDataRequest_ChunkToPatch::operator==(const CommitDataRequest_ChunkToPatch& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && target_buffer_ == other.target_buffer_
+   && writer_id_ == other.writer_id_
+   && chunk_id_ == other.chunk_id_
+   && patches_ == other.patches_
+   && has_more_patches_ == other.has_more_patches_;
+}
+
+int CommitDataRequest_ChunkToPatch::patches_size() const { return static_cast<int>(patches_.size()); }
+void CommitDataRequest_ChunkToPatch::clear_patches() { patches_.clear(); }
+CommitDataRequest_ChunkToPatch_Patch* CommitDataRequest_ChunkToPatch::add_patches() { patches_.emplace_back(); return &patches_.back(); }
+bool CommitDataRequest_ChunkToPatch::ParseFromArray(const void* raw, size_t size) {
+  patches_.clear();
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* target_buffer */:
+        field.get(&target_buffer_);
+        break;
+      case 2 /* writer_id */:
+        field.get(&writer_id_);
+        break;
+      case 3 /* chunk_id */:
+        field.get(&chunk_id_);
+        break;
+      case 4 /* patches */:
+        patches_.emplace_back();
+        patches_.back().ParseFromArray(field.data(), field.size());
+        break;
+      case 5 /* has_more_patches */:
+        field.get(&has_more_patches_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string CommitDataRequest_ChunkToPatch::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> CommitDataRequest_ChunkToPatch::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void CommitDataRequest_ChunkToPatch::Serialize(::protozero::Message* msg) const {
+  // Field 1: target_buffer
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, target_buffer_);
+  }
+
+  // Field 2: writer_id
+  if (_has_field_[2]) {
+    msg->AppendVarInt(2, writer_id_);
+  }
+
+  // Field 3: chunk_id
+  if (_has_field_[3]) {
+    msg->AppendVarInt(3, chunk_id_);
+  }
+
+  // Field 4: patches
+  for (auto& it : patches_) {
+    it.Serialize(msg->BeginNestedMessage<::protozero::Message>(4));
+  }
+
+  // Field 5: has_more_patches
+  if (_has_field_[5]) {
+    msg->AppendTinyVarInt(5, has_more_patches_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+CommitDataRequest_ChunkToPatch_Patch::CommitDataRequest_ChunkToPatch_Patch() = default;
+CommitDataRequest_ChunkToPatch_Patch::~CommitDataRequest_ChunkToPatch_Patch() = default;
+CommitDataRequest_ChunkToPatch_Patch::CommitDataRequest_ChunkToPatch_Patch(const CommitDataRequest_ChunkToPatch_Patch&) = default;
+CommitDataRequest_ChunkToPatch_Patch& CommitDataRequest_ChunkToPatch_Patch::operator=(const CommitDataRequest_ChunkToPatch_Patch&) = default;
+CommitDataRequest_ChunkToPatch_Patch::CommitDataRequest_ChunkToPatch_Patch(CommitDataRequest_ChunkToPatch_Patch&&) noexcept = default;
+CommitDataRequest_ChunkToPatch_Patch& CommitDataRequest_ChunkToPatch_Patch::operator=(CommitDataRequest_ChunkToPatch_Patch&&) = default;
+
+bool CommitDataRequest_ChunkToPatch_Patch::operator==(const CommitDataRequest_ChunkToPatch_Patch& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && offset_ == other.offset_
+   && data_ == other.data_;
+}
+
+bool CommitDataRequest_ChunkToPatch_Patch::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* offset */:
+        field.get(&offset_);
+        break;
+      case 2 /* data */:
+        field.get(&data_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string CommitDataRequest_ChunkToPatch_Patch::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> CommitDataRequest_ChunkToPatch_Patch::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void CommitDataRequest_ChunkToPatch_Patch::Serialize(::protozero::Message* msg) const {
+  // Field 1: offset
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, offset_);
+  }
+
+  // Field 2: data
+  if (_has_field_[2]) {
+    msg->AppendString(2, data_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+CommitDataRequest_ChunksToMove::CommitDataRequest_ChunksToMove() = default;
+CommitDataRequest_ChunksToMove::~CommitDataRequest_ChunksToMove() = default;
+CommitDataRequest_ChunksToMove::CommitDataRequest_ChunksToMove(const CommitDataRequest_ChunksToMove&) = default;
+CommitDataRequest_ChunksToMove& CommitDataRequest_ChunksToMove::operator=(const CommitDataRequest_ChunksToMove&) = default;
+CommitDataRequest_ChunksToMove::CommitDataRequest_ChunksToMove(CommitDataRequest_ChunksToMove&&) noexcept = default;
+CommitDataRequest_ChunksToMove& CommitDataRequest_ChunksToMove::operator=(CommitDataRequest_ChunksToMove&&) = default;
+
+bool CommitDataRequest_ChunksToMove::operator==(const CommitDataRequest_ChunksToMove& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && page_ == other.page_
+   && chunk_ == other.chunk_
+   && target_buffer_ == other.target_buffer_;
+}
+
+bool CommitDataRequest_ChunksToMove::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* page */:
+        field.get(&page_);
+        break;
+      case 2 /* chunk */:
+        field.get(&chunk_);
+        break;
+      case 3 /* target_buffer */:
+        field.get(&target_buffer_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string CommitDataRequest_ChunksToMove::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> CommitDataRequest_ChunksToMove::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void CommitDataRequest_ChunksToMove::Serialize(::protozero::Message* msg) const {
+  // Field 1: page
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, page_);
+  }
+
+  // Field 2: chunk
+  if (_has_field_[2]) {
+    msg->AppendVarInt(2, chunk_);
+  }
+
+  // Field 3: target_buffer
+  if (_has_field_[3]) {
+    msg->AppendVarInt(3, target_buffer_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/common/data_source_descriptor.gen.cc
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/common/data_source_descriptor.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+DataSourceDescriptor::DataSourceDescriptor() = default;
+DataSourceDescriptor::~DataSourceDescriptor() = default;
+DataSourceDescriptor::DataSourceDescriptor(const DataSourceDescriptor&) = default;
+DataSourceDescriptor& DataSourceDescriptor::operator=(const DataSourceDescriptor&) = default;
+DataSourceDescriptor::DataSourceDescriptor(DataSourceDescriptor&&) noexcept = default;
+DataSourceDescriptor& DataSourceDescriptor::operator=(DataSourceDescriptor&&) = default;
+
+bool DataSourceDescriptor::operator==(const DataSourceDescriptor& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && name_ == other.name_
+   && will_notify_on_stop_ == other.will_notify_on_stop_
+   && will_notify_on_start_ == other.will_notify_on_start_
+   && handles_incremental_state_clear_ == other.handles_incremental_state_clear_
+   && gpu_counter_descriptor_ == other.gpu_counter_descriptor_
+   && track_event_descriptor_ == other.track_event_descriptor_;
+}
+
+bool DataSourceDescriptor::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* name */:
+        field.get(&name_);
+        break;
+      case 2 /* will_notify_on_stop */:
+        field.get(&will_notify_on_stop_);
+        break;
+      case 3 /* will_notify_on_start */:
+        field.get(&will_notify_on_start_);
+        break;
+      case 4 /* handles_incremental_state_clear */:
+        field.get(&handles_incremental_state_clear_);
+        break;
+      case 5 /* gpu_counter_descriptor */:
+        gpu_counter_descriptor_ = field.as_std_string();
+        break;
+      case 6 /* track_event_descriptor */:
+        track_event_descriptor_ = field.as_std_string();
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string DataSourceDescriptor::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> DataSourceDescriptor::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void DataSourceDescriptor::Serialize(::protozero::Message* msg) const {
+  // Field 1: name
+  if (_has_field_[1]) {
+    msg->AppendString(1, name_);
+  }
+
+  // Field 2: will_notify_on_stop
+  if (_has_field_[2]) {
+    msg->AppendTinyVarInt(2, will_notify_on_stop_);
+  }
+
+  // Field 3: will_notify_on_start
+  if (_has_field_[3]) {
+    msg->AppendTinyVarInt(3, will_notify_on_start_);
+  }
+
+  // Field 4: handles_incremental_state_clear
+  if (_has_field_[4]) {
+    msg->AppendTinyVarInt(4, handles_incremental_state_clear_);
+  }
+
+  // Field 5: gpu_counter_descriptor
+  if (_has_field_[5]) {
+    msg->AppendString(5, gpu_counter_descriptor_);
+  }
+
+  // Field 6: track_event_descriptor
+  if (_has_field_[6]) {
+    msg->AppendString(6, track_event_descriptor_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/common/descriptor.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/common/descriptor.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_DESCRIPTOR_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_DESCRIPTOR_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class OneofOptions;
+class EnumValueDescriptorProto;
+class EnumDescriptorProto;
+class OneofDescriptorProto;
+class FieldDescriptorProto;
+class DescriptorProto;
+class DescriptorProto_ReservedRange;
+class FileDescriptorProto;
+class FileDescriptorSet;
+enum FieldDescriptorProto_Type : int;
+enum FieldDescriptorProto_Label : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum FieldDescriptorProto_Type : int {
+  FieldDescriptorProto_Type_TYPE_DOUBLE = 1,
+  FieldDescriptorProto_Type_TYPE_FLOAT = 2,
+  FieldDescriptorProto_Type_TYPE_INT64 = 3,
+  FieldDescriptorProto_Type_TYPE_UINT64 = 4,
+  FieldDescriptorProto_Type_TYPE_INT32 = 5,
+  FieldDescriptorProto_Type_TYPE_FIXED64 = 6,
+  FieldDescriptorProto_Type_TYPE_FIXED32 = 7,
+  FieldDescriptorProto_Type_TYPE_BOOL = 8,
+  FieldDescriptorProto_Type_TYPE_STRING = 9,
+  FieldDescriptorProto_Type_TYPE_GROUP = 10,
+  FieldDescriptorProto_Type_TYPE_MESSAGE = 11,
+  FieldDescriptorProto_Type_TYPE_BYTES = 12,
+  FieldDescriptorProto_Type_TYPE_UINT32 = 13,
+  FieldDescriptorProto_Type_TYPE_ENUM = 14,
+  FieldDescriptorProto_Type_TYPE_SFIXED32 = 15,
+  FieldDescriptorProto_Type_TYPE_SFIXED64 = 16,
+  FieldDescriptorProto_Type_TYPE_SINT32 = 17,
+  FieldDescriptorProto_Type_TYPE_SINT64 = 18,
+};
+enum FieldDescriptorProto_Label : int {
+  FieldDescriptorProto_Label_LABEL_OPTIONAL = 1,
+  FieldDescriptorProto_Label_LABEL_REQUIRED = 2,
+  FieldDescriptorProto_Label_LABEL_REPEATED = 3,
+};
+
+class PERFETTO_EXPORT OneofOptions : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+  };
+
+  OneofOptions();
+  ~OneofOptions() override;
+  OneofOptions(OneofOptions&&) noexcept;
+  OneofOptions& operator=(OneofOptions&&);
+  OneofOptions(const OneofOptions&);
+  OneofOptions& operator=(const OneofOptions&);
+  bool operator==(const OneofOptions&) const;
+  bool operator!=(const OneofOptions& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+ private:
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT EnumValueDescriptorProto : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kNameFieldNumber = 1,
+    kNumberFieldNumber = 2,
+  };
+
+  EnumValueDescriptorProto();
+  ~EnumValueDescriptorProto() override;
+  EnumValueDescriptorProto(EnumValueDescriptorProto&&) noexcept;
+  EnumValueDescriptorProto& operator=(EnumValueDescriptorProto&&);
+  EnumValueDescriptorProto(const EnumValueDescriptorProto&);
+  EnumValueDescriptorProto& operator=(const EnumValueDescriptorProto&);
+  bool operator==(const EnumValueDescriptorProto&) const;
+  bool operator!=(const EnumValueDescriptorProto& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_name() const { return _has_field_[1]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(1); }
+
+  bool has_number() const { return _has_field_[2]; }
+  int32_t number() const { return number_; }
+  void set_number(int32_t value) { number_ = value; _has_field_.set(2); }
+
+ private:
+  std::string name_{};
+  int32_t number_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT EnumDescriptorProto : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kNameFieldNumber = 1,
+    kValueFieldNumber = 2,
+    kReservedNameFieldNumber = 5,
+  };
+
+  EnumDescriptorProto();
+  ~EnumDescriptorProto() override;
+  EnumDescriptorProto(EnumDescriptorProto&&) noexcept;
+  EnumDescriptorProto& operator=(EnumDescriptorProto&&);
+  EnumDescriptorProto(const EnumDescriptorProto&);
+  EnumDescriptorProto& operator=(const EnumDescriptorProto&);
+  bool operator==(const EnumDescriptorProto&) const;
+  bool operator!=(const EnumDescriptorProto& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_name() const { return _has_field_[1]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(1); }
+
+  const std::vector<EnumValueDescriptorProto>& value() const { return value_; }
+  std::vector<EnumValueDescriptorProto>* mutable_value() { return &value_; }
+  int value_size() const;
+  void clear_value();
+  EnumValueDescriptorProto* add_value();
+
+  const std::vector<std::string>& reserved_name() const { return reserved_name_; }
+  std::vector<std::string>* mutable_reserved_name() { return &reserved_name_; }
+  int reserved_name_size() const { return static_cast<int>(reserved_name_.size()); }
+  void clear_reserved_name() { reserved_name_.clear(); }
+  void add_reserved_name(std::string value) { reserved_name_.emplace_back(value); }
+  std::string* add_reserved_name() { reserved_name_.emplace_back(); return &reserved_name_.back(); }
+
+ private:
+  std::string name_{};
+  std::vector<EnumValueDescriptorProto> value_;
+  std::vector<std::string> reserved_name_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<6> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT OneofDescriptorProto : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kNameFieldNumber = 1,
+    kOptionsFieldNumber = 2,
+  };
+
+  OneofDescriptorProto();
+  ~OneofDescriptorProto() override;
+  OneofDescriptorProto(OneofDescriptorProto&&) noexcept;
+  OneofDescriptorProto& operator=(OneofDescriptorProto&&);
+  OneofDescriptorProto(const OneofDescriptorProto&);
+  OneofDescriptorProto& operator=(const OneofDescriptorProto&);
+  bool operator==(const OneofDescriptorProto&) const;
+  bool operator!=(const OneofDescriptorProto& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_name() const { return _has_field_[1]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(1); }
+
+  bool has_options() const { return _has_field_[2]; }
+  const OneofOptions& options() const { return *options_; }
+  OneofOptions* mutable_options() { _has_field_.set(2); return options_.get(); }
+
+ private:
+  std::string name_{};
+  ::protozero::CopyablePtr<OneofOptions> options_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT FieldDescriptorProto : public ::protozero::CppMessageObj {
+ public:
+  using Type = FieldDescriptorProto_Type;
+  static constexpr auto TYPE_DOUBLE = FieldDescriptorProto_Type_TYPE_DOUBLE;
+  static constexpr auto TYPE_FLOAT = FieldDescriptorProto_Type_TYPE_FLOAT;
+  static constexpr auto TYPE_INT64 = FieldDescriptorProto_Type_TYPE_INT64;
+  static constexpr auto TYPE_UINT64 = FieldDescriptorProto_Type_TYPE_UINT64;
+  static constexpr auto TYPE_INT32 = FieldDescriptorProto_Type_TYPE_INT32;
+  static constexpr auto TYPE_FIXED64 = FieldDescriptorProto_Type_TYPE_FIXED64;
+  static constexpr auto TYPE_FIXED32 = FieldDescriptorProto_Type_TYPE_FIXED32;
+  static constexpr auto TYPE_BOOL = FieldDescriptorProto_Type_TYPE_BOOL;
+  static constexpr auto TYPE_STRING = FieldDescriptorProto_Type_TYPE_STRING;
+  static constexpr auto TYPE_GROUP = FieldDescriptorProto_Type_TYPE_GROUP;
+  static constexpr auto TYPE_MESSAGE = FieldDescriptorProto_Type_TYPE_MESSAGE;
+  static constexpr auto TYPE_BYTES = FieldDescriptorProto_Type_TYPE_BYTES;
+  static constexpr auto TYPE_UINT32 = FieldDescriptorProto_Type_TYPE_UINT32;
+  static constexpr auto TYPE_ENUM = FieldDescriptorProto_Type_TYPE_ENUM;
+  static constexpr auto TYPE_SFIXED32 = FieldDescriptorProto_Type_TYPE_SFIXED32;
+  static constexpr auto TYPE_SFIXED64 = FieldDescriptorProto_Type_TYPE_SFIXED64;
+  static constexpr auto TYPE_SINT32 = FieldDescriptorProto_Type_TYPE_SINT32;
+  static constexpr auto TYPE_SINT64 = FieldDescriptorProto_Type_TYPE_SINT64;
+  static constexpr auto Type_MIN = FieldDescriptorProto_Type_TYPE_DOUBLE;
+  static constexpr auto Type_MAX = FieldDescriptorProto_Type_TYPE_SINT64;
+  using Label = FieldDescriptorProto_Label;
+  static constexpr auto LABEL_OPTIONAL = FieldDescriptorProto_Label_LABEL_OPTIONAL;
+  static constexpr auto LABEL_REQUIRED = FieldDescriptorProto_Label_LABEL_REQUIRED;
+  static constexpr auto LABEL_REPEATED = FieldDescriptorProto_Label_LABEL_REPEATED;
+  static constexpr auto Label_MIN = FieldDescriptorProto_Label_LABEL_OPTIONAL;
+  static constexpr auto Label_MAX = FieldDescriptorProto_Label_LABEL_REPEATED;
+  enum FieldNumbers {
+    kNameFieldNumber = 1,
+    kNumberFieldNumber = 3,
+    kLabelFieldNumber = 4,
+    kTypeFieldNumber = 5,
+    kTypeNameFieldNumber = 6,
+    kExtendeeFieldNumber = 2,
+    kDefaultValueFieldNumber = 7,
+    kOneofIndexFieldNumber = 9,
+  };
+
+  FieldDescriptorProto();
+  ~FieldDescriptorProto() override;
+  FieldDescriptorProto(FieldDescriptorProto&&) noexcept;
+  FieldDescriptorProto& operator=(FieldDescriptorProto&&);
+  FieldDescriptorProto(const FieldDescriptorProto&);
+  FieldDescriptorProto& operator=(const FieldDescriptorProto&);
+  bool operator==(const FieldDescriptorProto&) const;
+  bool operator!=(const FieldDescriptorProto& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_name() const { return _has_field_[1]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(1); }
+
+  bool has_number() const { return _has_field_[3]; }
+  int32_t number() const { return number_; }
+  void set_number(int32_t value) { number_ = value; _has_field_.set(3); }
+
+  bool has_label() const { return _has_field_[4]; }
+  FieldDescriptorProto_Label label() const { return label_; }
+  void set_label(FieldDescriptorProto_Label value) { label_ = value; _has_field_.set(4); }
+
+  bool has_type() const { return _has_field_[5]; }
+  FieldDescriptorProto_Type type() const { return type_; }
+  void set_type(FieldDescriptorProto_Type value) { type_ = value; _has_field_.set(5); }
+
+  bool has_type_name() const { return _has_field_[6]; }
+  const std::string& type_name() const { return type_name_; }
+  void set_type_name(const std::string& value) { type_name_ = value; _has_field_.set(6); }
+
+  bool has_extendee() const { return _has_field_[2]; }
+  const std::string& extendee() const { return extendee_; }
+  void set_extendee(const std::string& value) { extendee_ = value; _has_field_.set(2); }
+
+  bool has_default_value() const { return _has_field_[7]; }
+  const std::string& default_value() const { return default_value_; }
+  void set_default_value(const std::string& value) { default_value_ = value; _has_field_.set(7); }
+
+  bool has_oneof_index() const { return _has_field_[9]; }
+  int32_t oneof_index() const { return oneof_index_; }
+  void set_oneof_index(int32_t value) { oneof_index_ = value; _has_field_.set(9); }
+
+ private:
+  std::string name_{};
+  int32_t number_{};
+  FieldDescriptorProto_Label label_{};
+  FieldDescriptorProto_Type type_{};
+  std::string type_name_{};
+  std::string extendee_{};
+  std::string default_value_{};
+  int32_t oneof_index_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<10> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT DescriptorProto : public ::protozero::CppMessageObj {
+ public:
+  using ReservedRange = DescriptorProto_ReservedRange;
+  enum FieldNumbers {
+    kNameFieldNumber = 1,
+    kFieldFieldNumber = 2,
+    kExtensionFieldNumber = 6,
+    kNestedTypeFieldNumber = 3,
+    kEnumTypeFieldNumber = 4,
+    kOneofDeclFieldNumber = 8,
+    kReservedRangeFieldNumber = 9,
+    kReservedNameFieldNumber = 10,
+  };
+
+  DescriptorProto();
+  ~DescriptorProto() override;
+  DescriptorProto(DescriptorProto&&) noexcept;
+  DescriptorProto& operator=(DescriptorProto&&);
+  DescriptorProto(const DescriptorProto&);
+  DescriptorProto& operator=(const DescriptorProto&);
+  bool operator==(const DescriptorProto&) const;
+  bool operator!=(const DescriptorProto& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_name() const { return _has_field_[1]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(1); }
+
+  const std::vector<FieldDescriptorProto>& field() const { return field_; }
+  std::vector<FieldDescriptorProto>* mutable_field() { return &field_; }
+  int field_size() const;
+  void clear_field();
+  FieldDescriptorProto* add_field();
+
+  const std::vector<FieldDescriptorProto>& extension() const { return extension_; }
+  std::vector<FieldDescriptorProto>* mutable_extension() { return &extension_; }
+  int extension_size() const;
+  void clear_extension();
+  FieldDescriptorProto* add_extension();
+
+  const std::vector<DescriptorProto>& nested_type() const { return nested_type_; }
+  std::vector<DescriptorProto>* mutable_nested_type() { return &nested_type_; }
+  int nested_type_size() const;
+  void clear_nested_type();
+  DescriptorProto* add_nested_type();
+
+  const std::vector<EnumDescriptorProto>& enum_type() const { return enum_type_; }
+  std::vector<EnumDescriptorProto>* mutable_enum_type() { return &enum_type_; }
+  int enum_type_size() const;
+  void clear_enum_type();
+  EnumDescriptorProto* add_enum_type();
+
+  const std::vector<OneofDescriptorProto>& oneof_decl() const { return oneof_decl_; }
+  std::vector<OneofDescriptorProto>* mutable_oneof_decl() { return &oneof_decl_; }
+  int oneof_decl_size() const;
+  void clear_oneof_decl();
+  OneofDescriptorProto* add_oneof_decl();
+
+  const std::vector<DescriptorProto_ReservedRange>& reserved_range() const { return reserved_range_; }
+  std::vector<DescriptorProto_ReservedRange>* mutable_reserved_range() { return &reserved_range_; }
+  int reserved_range_size() const;
+  void clear_reserved_range();
+  DescriptorProto_ReservedRange* add_reserved_range();
+
+  const std::vector<std::string>& reserved_name() const { return reserved_name_; }
+  std::vector<std::string>* mutable_reserved_name() { return &reserved_name_; }
+  int reserved_name_size() const { return static_cast<int>(reserved_name_.size()); }
+  void clear_reserved_name() { reserved_name_.clear(); }
+  void add_reserved_name(std::string value) { reserved_name_.emplace_back(value); }
+  std::string* add_reserved_name() { reserved_name_.emplace_back(); return &reserved_name_.back(); }
+
+ private:
+  std::string name_{};
+  std::vector<FieldDescriptorProto> field_;
+  std::vector<FieldDescriptorProto> extension_;
+  std::vector<DescriptorProto> nested_type_;
+  std::vector<EnumDescriptorProto> enum_type_;
+  std::vector<OneofDescriptorProto> oneof_decl_;
+  std::vector<DescriptorProto_ReservedRange> reserved_range_;
+  std::vector<std::string> reserved_name_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<11> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT DescriptorProto_ReservedRange : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kStartFieldNumber = 1,
+    kEndFieldNumber = 2,
+  };
+
+  DescriptorProto_ReservedRange();
+  ~DescriptorProto_ReservedRange() override;
+  DescriptorProto_ReservedRange(DescriptorProto_ReservedRange&&) noexcept;
+  DescriptorProto_ReservedRange& operator=(DescriptorProto_ReservedRange&&);
+  DescriptorProto_ReservedRange(const DescriptorProto_ReservedRange&);
+  DescriptorProto_ReservedRange& operator=(const DescriptorProto_ReservedRange&);
+  bool operator==(const DescriptorProto_ReservedRange&) const;
+  bool operator!=(const DescriptorProto_ReservedRange& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_start() const { return _has_field_[1]; }
+  int32_t start() const { return start_; }
+  void set_start(int32_t value) { start_ = value; _has_field_.set(1); }
+
+  bool has_end() const { return _has_field_[2]; }
+  int32_t end() const { return end_; }
+  void set_end(int32_t value) { end_ = value; _has_field_.set(2); }
+
+ private:
+  int32_t start_{};
+  int32_t end_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT FileDescriptorProto : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kNameFieldNumber = 1,
+    kPackageFieldNumber = 2,
+    kDependencyFieldNumber = 3,
+    kPublicDependencyFieldNumber = 10,
+    kWeakDependencyFieldNumber = 11,
+    kMessageTypeFieldNumber = 4,
+    kEnumTypeFieldNumber = 5,
+    kExtensionFieldNumber = 7,
+  };
+
+  FileDescriptorProto();
+  ~FileDescriptorProto() override;
+  FileDescriptorProto(FileDescriptorProto&&) noexcept;
+  FileDescriptorProto& operator=(FileDescriptorProto&&);
+  FileDescriptorProto(const FileDescriptorProto&);
+  FileDescriptorProto& operator=(const FileDescriptorProto&);
+  bool operator==(const FileDescriptorProto&) const;
+  bool operator!=(const FileDescriptorProto& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_name() const { return _has_field_[1]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(1); }
+
+  bool has_package() const { return _has_field_[2]; }
+  const std::string& package() const { return package_; }
+  void set_package(const std::string& value) { package_ = value; _has_field_.set(2); }
+
+  const std::vector<std::string>& dependency() const { return dependency_; }
+  std::vector<std::string>* mutable_dependency() { return &dependency_; }
+  int dependency_size() const { return static_cast<int>(dependency_.size()); }
+  void clear_dependency() { dependency_.clear(); }
+  void add_dependency(std::string value) { dependency_.emplace_back(value); }
+  std::string* add_dependency() { dependency_.emplace_back(); return &dependency_.back(); }
+
+  const std::vector<int32_t>& public_dependency() const { return public_dependency_; }
+  std::vector<int32_t>* mutable_public_dependency() { return &public_dependency_; }
+  int public_dependency_size() const { return static_cast<int>(public_dependency_.size()); }
+  void clear_public_dependency() { public_dependency_.clear(); }
+  void add_public_dependency(int32_t value) { public_dependency_.emplace_back(value); }
+  int32_t* add_public_dependency() { public_dependency_.emplace_back(); return &public_dependency_.back(); }
+
+  const std::vector<int32_t>& weak_dependency() const { return weak_dependency_; }
+  std::vector<int32_t>* mutable_weak_dependency() { return &weak_dependency_; }
+  int weak_dependency_size() const { return static_cast<int>(weak_dependency_.size()); }
+  void clear_weak_dependency() { weak_dependency_.clear(); }
+  void add_weak_dependency(int32_t value) { weak_dependency_.emplace_back(value); }
+  int32_t* add_weak_dependency() { weak_dependency_.emplace_back(); return &weak_dependency_.back(); }
+
+  const std::vector<DescriptorProto>& message_type() const { return message_type_; }
+  std::vector<DescriptorProto>* mutable_message_type() { return &message_type_; }
+  int message_type_size() const;
+  void clear_message_type();
+  DescriptorProto* add_message_type();
+
+  const std::vector<EnumDescriptorProto>& enum_type() const { return enum_type_; }
+  std::vector<EnumDescriptorProto>* mutable_enum_type() { return &enum_type_; }
+  int enum_type_size() const;
+  void clear_enum_type();
+  EnumDescriptorProto* add_enum_type();
+
+  const std::vector<FieldDescriptorProto>& extension() const { return extension_; }
+  std::vector<FieldDescriptorProto>* mutable_extension() { return &extension_; }
+  int extension_size() const;
+  void clear_extension();
+  FieldDescriptorProto* add_extension();
+
+ private:
+  std::string name_{};
+  std::string package_{};
+  std::vector<std::string> dependency_;
+  std::vector<int32_t> public_dependency_;
+  std::vector<int32_t> weak_dependency_;
+  std::vector<DescriptorProto> message_type_;
+  std::vector<EnumDescriptorProto> enum_type_;
+  std::vector<FieldDescriptorProto> extension_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<12> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT FileDescriptorSet : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kFileFieldNumber = 1,
+  };
+
+  FileDescriptorSet();
+  ~FileDescriptorSet() override;
+  FileDescriptorSet(FileDescriptorSet&&) noexcept;
+  FileDescriptorSet& operator=(FileDescriptorSet&&);
+  FileDescriptorSet(const FileDescriptorSet&);
+  FileDescriptorSet& operator=(const FileDescriptorSet&);
+  bool operator==(const FileDescriptorSet&) const;
+  bool operator!=(const FileDescriptorSet& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  const std::vector<FileDescriptorProto>& file() const { return file_; }
+  std::vector<FileDescriptorProto>* mutable_file() { return &file_; }
+  int file_size() const;
+  void clear_file();
+  FileDescriptorProto* add_file();
+
+ private:
+  std::vector<FileDescriptorProto> file_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_DESCRIPTOR_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/common/descriptor.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+OneofOptions::OneofOptions() = default;
+OneofOptions::~OneofOptions() = default;
+OneofOptions::OneofOptions(const OneofOptions&) = default;
+OneofOptions& OneofOptions::operator=(const OneofOptions&) = default;
+OneofOptions::OneofOptions(OneofOptions&&) noexcept = default;
+OneofOptions& OneofOptions::operator=(OneofOptions&&) = default;
+
+bool OneofOptions::operator==(const OneofOptions& other) const {
+  return unknown_fields_ == other.unknown_fields_;
+}
+
+bool OneofOptions::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string OneofOptions::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> OneofOptions::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void OneofOptions::Serialize(::protozero::Message* msg) const {
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+EnumValueDescriptorProto::EnumValueDescriptorProto() = default;
+EnumValueDescriptorProto::~EnumValueDescriptorProto() = default;
+EnumValueDescriptorProto::EnumValueDescriptorProto(const EnumValueDescriptorProto&) = default;
+EnumValueDescriptorProto& EnumValueDescriptorProto::operator=(const EnumValueDescriptorProto&) = default;
+EnumValueDescriptorProto::EnumValueDescriptorProto(EnumValueDescriptorProto&&) noexcept = default;
+EnumValueDescriptorProto& EnumValueDescriptorProto::operator=(EnumValueDescriptorProto&&) = default;
+
+bool EnumValueDescriptorProto::operator==(const EnumValueDescriptorProto& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && name_ == other.name_
+   && number_ == other.number_;
+}
+
+bool EnumValueDescriptorProto::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* name */:
+        field.get(&name_);
+        break;
+      case 2 /* number */:
+        field.get(&number_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string EnumValueDescriptorProto::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> EnumValueDescriptorProto::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void EnumValueDescriptorProto::Serialize(::protozero::Message* msg) const {
+  // Field 1: name
+  if (_has_field_[1]) {
+    msg->AppendString(1, name_);
+  }
+
+  // Field 2: number
+  if (_has_field_[2]) {
+    msg->AppendVarInt(2, number_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+EnumDescriptorProto::EnumDescriptorProto() = default;
+EnumDescriptorProto::~EnumDescriptorProto() = default;
+EnumDescriptorProto::EnumDescriptorProto(const EnumDescriptorProto&) = default;
+EnumDescriptorProto& EnumDescriptorProto::operator=(const EnumDescriptorProto&) = default;
+EnumDescriptorProto::EnumDescriptorProto(EnumDescriptorProto&&) noexcept = default;
+EnumDescriptorProto& EnumDescriptorProto::operator=(EnumDescriptorProto&&) = default;
+
+bool EnumDescriptorProto::operator==(const EnumDescriptorProto& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && name_ == other.name_
+   && value_ == other.value_
+   && reserved_name_ == other.reserved_name_;
+}
+
+int EnumDescriptorProto::value_size() const { return static_cast<int>(value_.size()); }
+void EnumDescriptorProto::clear_value() { value_.clear(); }
+EnumValueDescriptorProto* EnumDescriptorProto::add_value() { value_.emplace_back(); return &value_.back(); }
+bool EnumDescriptorProto::ParseFromArray(const void* raw, size_t size) {
+  value_.clear();
+  reserved_name_.clear();
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* name */:
+        field.get(&name_);
+        break;
+      case 2 /* value */:
+        value_.emplace_back();
+        value_.back().ParseFromArray(field.data(), field.size());
+        break;
+      case 5 /* reserved_name */:
+        reserved_name_.emplace_back();
+        field.get(&reserved_name_.back());
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string EnumDescriptorProto::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> EnumDescriptorProto::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void EnumDescriptorProto::Serialize(::protozero::Message* msg) const {
+  // Field 1: name
+  if (_has_field_[1]) {
+    msg->AppendString(1, name_);
+  }
+
+  // Field 2: value
+  for (auto& it : value_) {
+    it.Serialize(msg->BeginNestedMessage<::protozero::Message>(2));
+  }
+
+  // Field 5: reserved_name
+  for (auto& it : reserved_name_) {
+    msg->AppendString(5, it);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+OneofDescriptorProto::OneofDescriptorProto() = default;
+OneofDescriptorProto::~OneofDescriptorProto() = default;
+OneofDescriptorProto::OneofDescriptorProto(const OneofDescriptorProto&) = default;
+OneofDescriptorProto& OneofDescriptorProto::operator=(const OneofDescriptorProto&) = default;
+OneofDescriptorProto::OneofDescriptorProto(OneofDescriptorProto&&) noexcept = default;
+OneofDescriptorProto& OneofDescriptorProto::operator=(OneofDescriptorProto&&) = default;
+
+bool OneofDescriptorProto::operator==(const OneofDescriptorProto& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && name_ == other.name_
+   && options_ == other.options_;
+}
+
+bool OneofDescriptorProto::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* name */:
+        field.get(&name_);
+        break;
+      case 2 /* options */:
+        (*options_).ParseFromArray(field.data(), field.size());
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string OneofDescriptorProto::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> OneofDescriptorProto::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void OneofDescriptorProto::Serialize(::protozero::Message* msg) const {
+  // Field 1: name
+  if (_has_field_[1]) {
+    msg->AppendString(1, name_);
+  }
+
+  // Field 2: options
+  if (_has_field_[2]) {
+    (*options_).Serialize(msg->BeginNestedMessage<::protozero::Message>(2));
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+FieldDescriptorProto::FieldDescriptorProto() = default;
+FieldDescriptorProto::~FieldDescriptorProto() = default;
+FieldDescriptorProto::FieldDescriptorProto(const FieldDescriptorProto&) = default;
+FieldDescriptorProto& FieldDescriptorProto::operator=(const FieldDescriptorProto&) = default;
+FieldDescriptorProto::FieldDescriptorProto(FieldDescriptorProto&&) noexcept = default;
+FieldDescriptorProto& FieldDescriptorProto::operator=(FieldDescriptorProto&&) = default;
+
+bool FieldDescriptorProto::operator==(const FieldDescriptorProto& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && name_ == other.name_
+   && number_ == other.number_
+   && label_ == other.label_
+   && type_ == other.type_
+   && type_name_ == other.type_name_
+   && extendee_ == other.extendee_
+   && default_value_ == other.default_value_
+   && oneof_index_ == other.oneof_index_;
+}
+
+bool FieldDescriptorProto::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* name */:
+        field.get(&name_);
+        break;
+      case 3 /* number */:
+        field.get(&number_);
+        break;
+      case 4 /* label */:
+        field.get(&label_);
+        break;
+      case 5 /* type */:
+        field.get(&type_);
+        break;
+      case 6 /* type_name */:
+        field.get(&type_name_);
+        break;
+      case 2 /* extendee */:
+        field.get(&extendee_);
+        break;
+      case 7 /* default_value */:
+        field.get(&default_value_);
+        break;
+      case 9 /* oneof_index */:
+        field.get(&oneof_index_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string FieldDescriptorProto::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> FieldDescriptorProto::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void FieldDescriptorProto::Serialize(::protozero::Message* msg) const {
+  // Field 1: name
+  if (_has_field_[1]) {
+    msg->AppendString(1, name_);
+  }
+
+  // Field 3: number
+  if (_has_field_[3]) {
+    msg->AppendVarInt(3, number_);
+  }
+
+  // Field 4: label
+  if (_has_field_[4]) {
+    msg->AppendVarInt(4, label_);
+  }
+
+  // Field 5: type
+  if (_has_field_[5]) {
+    msg->AppendVarInt(5, type_);
+  }
+
+  // Field 6: type_name
+  if (_has_field_[6]) {
+    msg->AppendString(6, type_name_);
+  }
+
+  // Field 2: extendee
+  if (_has_field_[2]) {
+    msg->AppendString(2, extendee_);
+  }
+
+  // Field 7: default_value
+  if (_has_field_[7]) {
+    msg->AppendString(7, default_value_);
+  }
+
+  // Field 9: oneof_index
+  if (_has_field_[9]) {
+    msg->AppendVarInt(9, oneof_index_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+DescriptorProto::DescriptorProto() = default;
+DescriptorProto::~DescriptorProto() = default;
+DescriptorProto::DescriptorProto(const DescriptorProto&) = default;
+DescriptorProto& DescriptorProto::operator=(const DescriptorProto&) = default;
+DescriptorProto::DescriptorProto(DescriptorProto&&) noexcept = default;
+DescriptorProto& DescriptorProto::operator=(DescriptorProto&&) = default;
+
+bool DescriptorProto::operator==(const DescriptorProto& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && name_ == other.name_
+   && field_ == other.field_
+   && extension_ == other.extension_
+   && nested_type_ == other.nested_type_
+   && enum_type_ == other.enum_type_
+   && oneof_decl_ == other.oneof_decl_
+   && reserved_range_ == other.reserved_range_
+   && reserved_name_ == other.reserved_name_;
+}
+
+int DescriptorProto::field_size() const { return static_cast<int>(field_.size()); }
+void DescriptorProto::clear_field() { field_.clear(); }
+FieldDescriptorProto* DescriptorProto::add_field() { field_.emplace_back(); return &field_.back(); }
+int DescriptorProto::extension_size() const { return static_cast<int>(extension_.size()); }
+void DescriptorProto::clear_extension() { extension_.clear(); }
+FieldDescriptorProto* DescriptorProto::add_extension() { extension_.emplace_back(); return &extension_.back(); }
+int DescriptorProto::nested_type_size() const { return static_cast<int>(nested_type_.size()); }
+void DescriptorProto::clear_nested_type() { nested_type_.clear(); }
+DescriptorProto* DescriptorProto::add_nested_type() { nested_type_.emplace_back(); return &nested_type_.back(); }
+int DescriptorProto::enum_type_size() const { return static_cast<int>(enum_type_.size()); }
+void DescriptorProto::clear_enum_type() { enum_type_.clear(); }
+EnumDescriptorProto* DescriptorProto::add_enum_type() { enum_type_.emplace_back(); return &enum_type_.back(); }
+int DescriptorProto::oneof_decl_size() const { return static_cast<int>(oneof_decl_.size()); }
+void DescriptorProto::clear_oneof_decl() { oneof_decl_.clear(); }
+OneofDescriptorProto* DescriptorProto::add_oneof_decl() { oneof_decl_.emplace_back(); return &oneof_decl_.back(); }
+int DescriptorProto::reserved_range_size() const { return static_cast<int>(reserved_range_.size()); }
+void DescriptorProto::clear_reserved_range() { reserved_range_.clear(); }
+DescriptorProto_ReservedRange* DescriptorProto::add_reserved_range() { reserved_range_.emplace_back(); return &reserved_range_.back(); }
+bool DescriptorProto::ParseFromArray(const void* raw, size_t size) {
+  field_.clear();
+  extension_.clear();
+  nested_type_.clear();
+  enum_type_.clear();
+  oneof_decl_.clear();
+  reserved_range_.clear();
+  reserved_name_.clear();
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* name */:
+        field.get(&name_);
+        break;
+      case 2 /* field */:
+        field_.emplace_back();
+        field_.back().ParseFromArray(field.data(), field.size());
+        break;
+      case 6 /* extension */:
+        extension_.emplace_back();
+        extension_.back().ParseFromArray(field.data(), field.size());
+        break;
+      case 3 /* nested_type */:
+        nested_type_.emplace_back();
+        nested_type_.back().ParseFromArray(field.data(), field.size());
+        break;
+      case 4 /* enum_type */:
+        enum_type_.emplace_back();
+        enum_type_.back().ParseFromArray(field.data(), field.size());
+        break;
+      case 8 /* oneof_decl */:
+        oneof_decl_.emplace_back();
+        oneof_decl_.back().ParseFromArray(field.data(), field.size());
+        break;
+      case 9 /* reserved_range */:
+        reserved_range_.emplace_back();
+        reserved_range_.back().ParseFromArray(field.data(), field.size());
+        break;
+      case 10 /* reserved_name */:
+        reserved_name_.emplace_back();
+        field.get(&reserved_name_.back());
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string DescriptorProto::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> DescriptorProto::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void DescriptorProto::Serialize(::protozero::Message* msg) const {
+  // Field 1: name
+  if (_has_field_[1]) {
+    msg->AppendString(1, name_);
+  }
+
+  // Field 2: field
+  for (auto& it : field_) {
+    it.Serialize(msg->BeginNestedMessage<::protozero::Message>(2));
+  }
+
+  // Field 6: extension
+  for (auto& it : extension_) {
+    it.Serialize(msg->BeginNestedMessage<::protozero::Message>(6));
+  }
+
+  // Field 3: nested_type
+  for (auto& it : nested_type_) {
+    it.Serialize(msg->BeginNestedMessage<::protozero::Message>(3));
+  }
+
+  // Field 4: enum_type
+  for (auto& it : enum_type_) {
+    it.Serialize(msg->BeginNestedMessage<::protozero::Message>(4));
+  }
+
+  // Field 8: oneof_decl
+  for (auto& it : oneof_decl_) {
+    it.Serialize(msg->BeginNestedMessage<::protozero::Message>(8));
+  }
+
+  // Field 9: reserved_range
+  for (auto& it : reserved_range_) {
+    it.Serialize(msg->BeginNestedMessage<::protozero::Message>(9));
+  }
+
+  // Field 10: reserved_name
+  for (auto& it : reserved_name_) {
+    msg->AppendString(10, it);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+DescriptorProto_ReservedRange::DescriptorProto_ReservedRange() = default;
+DescriptorProto_ReservedRange::~DescriptorProto_ReservedRange() = default;
+DescriptorProto_ReservedRange::DescriptorProto_ReservedRange(const DescriptorProto_ReservedRange&) = default;
+DescriptorProto_ReservedRange& DescriptorProto_ReservedRange::operator=(const DescriptorProto_ReservedRange&) = default;
+DescriptorProto_ReservedRange::DescriptorProto_ReservedRange(DescriptorProto_ReservedRange&&) noexcept = default;
+DescriptorProto_ReservedRange& DescriptorProto_ReservedRange::operator=(DescriptorProto_ReservedRange&&) = default;
+
+bool DescriptorProto_ReservedRange::operator==(const DescriptorProto_ReservedRange& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && start_ == other.start_
+   && end_ == other.end_;
+}
+
+bool DescriptorProto_ReservedRange::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* start */:
+        field.get(&start_);
+        break;
+      case 2 /* end */:
+        field.get(&end_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string DescriptorProto_ReservedRange::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> DescriptorProto_ReservedRange::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void DescriptorProto_ReservedRange::Serialize(::protozero::Message* msg) const {
+  // Field 1: start
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, start_);
+  }
+
+  // Field 2: end
+  if (_has_field_[2]) {
+    msg->AppendVarInt(2, end_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+FileDescriptorProto::FileDescriptorProto() = default;
+FileDescriptorProto::~FileDescriptorProto() = default;
+FileDescriptorProto::FileDescriptorProto(const FileDescriptorProto&) = default;
+FileDescriptorProto& FileDescriptorProto::operator=(const FileDescriptorProto&) = default;
+FileDescriptorProto::FileDescriptorProto(FileDescriptorProto&&) noexcept = default;
+FileDescriptorProto& FileDescriptorProto::operator=(FileDescriptorProto&&) = default;
+
+bool FileDescriptorProto::operator==(const FileDescriptorProto& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && name_ == other.name_
+   && package_ == other.package_
+   && dependency_ == other.dependency_
+   && public_dependency_ == other.public_dependency_
+   && weak_dependency_ == other.weak_dependency_
+   && message_type_ == other.message_type_
+   && enum_type_ == other.enum_type_
+   && extension_ == other.extension_;
+}
+
+int FileDescriptorProto::message_type_size() const { return static_cast<int>(message_type_.size()); }
+void FileDescriptorProto::clear_message_type() { message_type_.clear(); }
+DescriptorProto* FileDescriptorProto::add_message_type() { message_type_.emplace_back(); return &message_type_.back(); }
+int FileDescriptorProto::enum_type_size() const { return static_cast<int>(enum_type_.size()); }
+void FileDescriptorProto::clear_enum_type() { enum_type_.clear(); }
+EnumDescriptorProto* FileDescriptorProto::add_enum_type() { enum_type_.emplace_back(); return &enum_type_.back(); }
+int FileDescriptorProto::extension_size() const { return static_cast<int>(extension_.size()); }
+void FileDescriptorProto::clear_extension() { extension_.clear(); }
+FieldDescriptorProto* FileDescriptorProto::add_extension() { extension_.emplace_back(); return &extension_.back(); }
+bool FileDescriptorProto::ParseFromArray(const void* raw, size_t size) {
+  dependency_.clear();
+  public_dependency_.clear();
+  weak_dependency_.clear();
+  message_type_.clear();
+  enum_type_.clear();
+  extension_.clear();
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* name */:
+        field.get(&name_);
+        break;
+      case 2 /* package */:
+        field.get(&package_);
+        break;
+      case 3 /* dependency */:
+        dependency_.emplace_back();
+        field.get(&dependency_.back());
+        break;
+      case 10 /* public_dependency */:
+        public_dependency_.emplace_back();
+        field.get(&public_dependency_.back());
+        break;
+      case 11 /* weak_dependency */:
+        weak_dependency_.emplace_back();
+        field.get(&weak_dependency_.back());
+        break;
+      case 4 /* message_type */:
+        message_type_.emplace_back();
+        message_type_.back().ParseFromArray(field.data(), field.size());
+        break;
+      case 5 /* enum_type */:
+        enum_type_.emplace_back();
+        enum_type_.back().ParseFromArray(field.data(), field.size());
+        break;
+      case 7 /* extension */:
+        extension_.emplace_back();
+        extension_.back().ParseFromArray(field.data(), field.size());
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string FileDescriptorProto::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> FileDescriptorProto::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void FileDescriptorProto::Serialize(::protozero::Message* msg) const {
+  // Field 1: name
+  if (_has_field_[1]) {
+    msg->AppendString(1, name_);
+  }
+
+  // Field 2: package
+  if (_has_field_[2]) {
+    msg->AppendString(2, package_);
+  }
+
+  // Field 3: dependency
+  for (auto& it : dependency_) {
+    msg->AppendString(3, it);
+  }
+
+  // Field 10: public_dependency
+  for (auto& it : public_dependency_) {
+    msg->AppendVarInt(10, it);
+  }
+
+  // Field 11: weak_dependency
+  for (auto& it : weak_dependency_) {
+    msg->AppendVarInt(11, it);
+  }
+
+  // Field 4: message_type
+  for (auto& it : message_type_) {
+    it.Serialize(msg->BeginNestedMessage<::protozero::Message>(4));
+  }
+
+  // Field 5: enum_type
+  for (auto& it : enum_type_) {
+    it.Serialize(msg->BeginNestedMessage<::protozero::Message>(5));
+  }
+
+  // Field 7: extension
+  for (auto& it : extension_) {
+    it.Serialize(msg->BeginNestedMessage<::protozero::Message>(7));
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+FileDescriptorSet::FileDescriptorSet() = default;
+FileDescriptorSet::~FileDescriptorSet() = default;
+FileDescriptorSet::FileDescriptorSet(const FileDescriptorSet&) = default;
+FileDescriptorSet& FileDescriptorSet::operator=(const FileDescriptorSet&) = default;
+FileDescriptorSet::FileDescriptorSet(FileDescriptorSet&&) noexcept = default;
+FileDescriptorSet& FileDescriptorSet::operator=(FileDescriptorSet&&) = default;
+
+bool FileDescriptorSet::operator==(const FileDescriptorSet& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && file_ == other.file_;
+}
+
+int FileDescriptorSet::file_size() const { return static_cast<int>(file_.size()); }
+void FileDescriptorSet::clear_file() { file_.clear(); }
+FileDescriptorProto* FileDescriptorSet::add_file() { file_.emplace_back(); return &file_.back(); }
+bool FileDescriptorSet::ParseFromArray(const void* raw, size_t size) {
+  file_.clear();
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* file */:
+        file_.emplace_back();
+        file_.back().ParseFromArray(field.data(), field.size());
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string FileDescriptorSet::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> FileDescriptorSet::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void FileDescriptorSet::Serialize(::protozero::Message* msg) const {
+  // Field 1: file
+  for (auto& it : file_) {
+    it.Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/common/gpu_counter_descriptor.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/common/gpu_counter_descriptor.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_GPU_COUNTER_DESCRIPTOR_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_GPU_COUNTER_DESCRIPTOR_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class GpuCounterDescriptor;
+class GpuCounterDescriptor_GpuCounterBlock;
+class GpuCounterDescriptor_GpuCounterSpec;
+enum GpuCounterDescriptor_GpuCounterGroup : int;
+enum GpuCounterDescriptor_MeasureUnit : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum GpuCounterDescriptor_GpuCounterGroup : int {
+  GpuCounterDescriptor_GpuCounterGroup_UNCLASSIFIED = 0,
+  GpuCounterDescriptor_GpuCounterGroup_SYSTEM = 1,
+  GpuCounterDescriptor_GpuCounterGroup_VERTICES = 2,
+  GpuCounterDescriptor_GpuCounterGroup_FRAGMENTS = 3,
+  GpuCounterDescriptor_GpuCounterGroup_PRIMITIVES = 4,
+  GpuCounterDescriptor_GpuCounterGroup_MEMORY = 5,
+  GpuCounterDescriptor_GpuCounterGroup_COMPUTE = 6,
+};
+enum GpuCounterDescriptor_MeasureUnit : int {
+  GpuCounterDescriptor_MeasureUnit_NONE = 0,
+  GpuCounterDescriptor_MeasureUnit_BIT = 1,
+  GpuCounterDescriptor_MeasureUnit_KILOBIT = 2,
+  GpuCounterDescriptor_MeasureUnit_MEGABIT = 3,
+  GpuCounterDescriptor_MeasureUnit_GIGABIT = 4,
+  GpuCounterDescriptor_MeasureUnit_TERABIT = 5,
+  GpuCounterDescriptor_MeasureUnit_PETABIT = 6,
+  GpuCounterDescriptor_MeasureUnit_BYTE = 7,
+  GpuCounterDescriptor_MeasureUnit_KILOBYTE = 8,
+  GpuCounterDescriptor_MeasureUnit_MEGABYTE = 9,
+  GpuCounterDescriptor_MeasureUnit_GIGABYTE = 10,
+  GpuCounterDescriptor_MeasureUnit_TERABYTE = 11,
+  GpuCounterDescriptor_MeasureUnit_PETABYTE = 12,
+  GpuCounterDescriptor_MeasureUnit_HERTZ = 13,
+  GpuCounterDescriptor_MeasureUnit_KILOHERTZ = 14,
+  GpuCounterDescriptor_MeasureUnit_MEGAHERTZ = 15,
+  GpuCounterDescriptor_MeasureUnit_GIGAHERTZ = 16,
+  GpuCounterDescriptor_MeasureUnit_TERAHERTZ = 17,
+  GpuCounterDescriptor_MeasureUnit_PETAHERTZ = 18,
+  GpuCounterDescriptor_MeasureUnit_NANOSECOND = 19,
+  GpuCounterDescriptor_MeasureUnit_MICROSECOND = 20,
+  GpuCounterDescriptor_MeasureUnit_MILLISECOND = 21,
+  GpuCounterDescriptor_MeasureUnit_SECOND = 22,
+  GpuCounterDescriptor_MeasureUnit_MINUTE = 23,
+  GpuCounterDescriptor_MeasureUnit_HOUR = 24,
+  GpuCounterDescriptor_MeasureUnit_VERTEX = 25,
+  GpuCounterDescriptor_MeasureUnit_PIXEL = 26,
+  GpuCounterDescriptor_MeasureUnit_TRIANGLE = 27,
+  GpuCounterDescriptor_MeasureUnit_PRIMITIVE = 38,
+  GpuCounterDescriptor_MeasureUnit_FRAGMENT = 39,
+  GpuCounterDescriptor_MeasureUnit_MILLIWATT = 28,
+  GpuCounterDescriptor_MeasureUnit_WATT = 29,
+  GpuCounterDescriptor_MeasureUnit_KILOWATT = 30,
+  GpuCounterDescriptor_MeasureUnit_JOULE = 31,
+  GpuCounterDescriptor_MeasureUnit_VOLT = 32,
+  GpuCounterDescriptor_MeasureUnit_AMPERE = 33,
+  GpuCounterDescriptor_MeasureUnit_CELSIUS = 34,
+  GpuCounterDescriptor_MeasureUnit_FAHRENHEIT = 35,
+  GpuCounterDescriptor_MeasureUnit_KELVIN = 36,
+  GpuCounterDescriptor_MeasureUnit_PERCENT = 37,
+  GpuCounterDescriptor_MeasureUnit_INSTRUCTION = 40,
+};
+
+class PERFETTO_EXPORT GpuCounterDescriptor : public ::protozero::CppMessageObj {
+ public:
+  using GpuCounterSpec = GpuCounterDescriptor_GpuCounterSpec;
+  using GpuCounterBlock = GpuCounterDescriptor_GpuCounterBlock;
+  using GpuCounterGroup = GpuCounterDescriptor_GpuCounterGroup;
+  static constexpr auto UNCLASSIFIED = GpuCounterDescriptor_GpuCounterGroup_UNCLASSIFIED;
+  static constexpr auto SYSTEM = GpuCounterDescriptor_GpuCounterGroup_SYSTEM;
+  static constexpr auto VERTICES = GpuCounterDescriptor_GpuCounterGroup_VERTICES;
+  static constexpr auto FRAGMENTS = GpuCounterDescriptor_GpuCounterGroup_FRAGMENTS;
+  static constexpr auto PRIMITIVES = GpuCounterDescriptor_GpuCounterGroup_PRIMITIVES;
+  static constexpr auto MEMORY = GpuCounterDescriptor_GpuCounterGroup_MEMORY;
+  static constexpr auto COMPUTE = GpuCounterDescriptor_GpuCounterGroup_COMPUTE;
+  static constexpr auto GpuCounterGroup_MIN = GpuCounterDescriptor_GpuCounterGroup_UNCLASSIFIED;
+  static constexpr auto GpuCounterGroup_MAX = GpuCounterDescriptor_GpuCounterGroup_COMPUTE;
+  using MeasureUnit = GpuCounterDescriptor_MeasureUnit;
+  static constexpr auto NONE = GpuCounterDescriptor_MeasureUnit_NONE;
+  static constexpr auto BIT = GpuCounterDescriptor_MeasureUnit_BIT;
+  static constexpr auto KILOBIT = GpuCounterDescriptor_MeasureUnit_KILOBIT;
+  static constexpr auto MEGABIT = GpuCounterDescriptor_MeasureUnit_MEGABIT;
+  static constexpr auto GIGABIT = GpuCounterDescriptor_MeasureUnit_GIGABIT;
+  static constexpr auto TERABIT = GpuCounterDescriptor_MeasureUnit_TERABIT;
+  static constexpr auto PETABIT = GpuCounterDescriptor_MeasureUnit_PETABIT;
+  static constexpr auto BYTE = GpuCounterDescriptor_MeasureUnit_BYTE;
+  static constexpr auto KILOBYTE = GpuCounterDescriptor_MeasureUnit_KILOBYTE;
+  static constexpr auto MEGABYTE = GpuCounterDescriptor_MeasureUnit_MEGABYTE;
+  static constexpr auto GIGABYTE = GpuCounterDescriptor_MeasureUnit_GIGABYTE;
+  static constexpr auto TERABYTE = GpuCounterDescriptor_MeasureUnit_TERABYTE;
+  static constexpr auto PETABYTE = GpuCounterDescriptor_MeasureUnit_PETABYTE;
+  static constexpr auto HERTZ = GpuCounterDescriptor_MeasureUnit_HERTZ;
+  static constexpr auto KILOHERTZ = GpuCounterDescriptor_MeasureUnit_KILOHERTZ;
+  static constexpr auto MEGAHERTZ = GpuCounterDescriptor_MeasureUnit_MEGAHERTZ;
+  static constexpr auto GIGAHERTZ = GpuCounterDescriptor_MeasureUnit_GIGAHERTZ;
+  static constexpr auto TERAHERTZ = GpuCounterDescriptor_MeasureUnit_TERAHERTZ;
+  static constexpr auto PETAHERTZ = GpuCounterDescriptor_MeasureUnit_PETAHERTZ;
+  static constexpr auto NANOSECOND = GpuCounterDescriptor_MeasureUnit_NANOSECOND;
+  static constexpr auto MICROSECOND = GpuCounterDescriptor_MeasureUnit_MICROSECOND;
+  static constexpr auto MILLISECOND = GpuCounterDescriptor_MeasureUnit_MILLISECOND;
+  static constexpr auto SECOND = GpuCounterDescriptor_MeasureUnit_SECOND;
+  static constexpr auto MINUTE = GpuCounterDescriptor_MeasureUnit_MINUTE;
+  static constexpr auto HOUR = GpuCounterDescriptor_MeasureUnit_HOUR;
+  static constexpr auto VERTEX = GpuCounterDescriptor_MeasureUnit_VERTEX;
+  static constexpr auto PIXEL = GpuCounterDescriptor_MeasureUnit_PIXEL;
+  static constexpr auto TRIANGLE = GpuCounterDescriptor_MeasureUnit_TRIANGLE;
+  static constexpr auto PRIMITIVE = GpuCounterDescriptor_MeasureUnit_PRIMITIVE;
+  static constexpr auto FRAGMENT = GpuCounterDescriptor_MeasureUnit_FRAGMENT;
+  static constexpr auto MILLIWATT = GpuCounterDescriptor_MeasureUnit_MILLIWATT;
+  static constexpr auto WATT = GpuCounterDescriptor_MeasureUnit_WATT;
+  static constexpr auto KILOWATT = GpuCounterDescriptor_MeasureUnit_KILOWATT;
+  static constexpr auto JOULE = GpuCounterDescriptor_MeasureUnit_JOULE;
+  static constexpr auto VOLT = GpuCounterDescriptor_MeasureUnit_VOLT;
+  static constexpr auto AMPERE = GpuCounterDescriptor_MeasureUnit_AMPERE;
+  static constexpr auto CELSIUS = GpuCounterDescriptor_MeasureUnit_CELSIUS;
+  static constexpr auto FAHRENHEIT = GpuCounterDescriptor_MeasureUnit_FAHRENHEIT;
+  static constexpr auto KELVIN = GpuCounterDescriptor_MeasureUnit_KELVIN;
+  static constexpr auto PERCENT = GpuCounterDescriptor_MeasureUnit_PERCENT;
+  static constexpr auto INSTRUCTION = GpuCounterDescriptor_MeasureUnit_INSTRUCTION;
+  static constexpr auto MeasureUnit_MIN = GpuCounterDescriptor_MeasureUnit_NONE;
+  static constexpr auto MeasureUnit_MAX = GpuCounterDescriptor_MeasureUnit_INSTRUCTION;
+  enum FieldNumbers {
+    kSpecsFieldNumber = 1,
+    kBlocksFieldNumber = 2,
+    kMinSamplingPeriodNsFieldNumber = 3,
+    kMaxSamplingPeriodNsFieldNumber = 4,
+    kSupportsInstrumentedSamplingFieldNumber = 5,
+  };
+
+  GpuCounterDescriptor();
+  ~GpuCounterDescriptor() override;
+  GpuCounterDescriptor(GpuCounterDescriptor&&) noexcept;
+  GpuCounterDescriptor& operator=(GpuCounterDescriptor&&);
+  GpuCounterDescriptor(const GpuCounterDescriptor&);
+  GpuCounterDescriptor& operator=(const GpuCounterDescriptor&);
+  bool operator==(const GpuCounterDescriptor&) const;
+  bool operator!=(const GpuCounterDescriptor& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  const std::vector<GpuCounterDescriptor_GpuCounterSpec>& specs() const { return specs_; }
+  std::vector<GpuCounterDescriptor_GpuCounterSpec>* mutable_specs() { return &specs_; }
+  int specs_size() const;
+  void clear_specs();
+  GpuCounterDescriptor_GpuCounterSpec* add_specs();
+
+  const std::vector<GpuCounterDescriptor_GpuCounterBlock>& blocks() const { return blocks_; }
+  std::vector<GpuCounterDescriptor_GpuCounterBlock>* mutable_blocks() { return &blocks_; }
+  int blocks_size() const;
+  void clear_blocks();
+  GpuCounterDescriptor_GpuCounterBlock* add_blocks();
+
+  bool has_min_sampling_period_ns() const { return _has_field_[3]; }
+  uint64_t min_sampling_period_ns() const { return min_sampling_period_ns_; }
+  void set_min_sampling_period_ns(uint64_t value) { min_sampling_period_ns_ = value; _has_field_.set(3); }
+
+  bool has_max_sampling_period_ns() const { return _has_field_[4]; }
+  uint64_t max_sampling_period_ns() const { return max_sampling_period_ns_; }
+  void set_max_sampling_period_ns(uint64_t value) { max_sampling_period_ns_ = value; _has_field_.set(4); }
+
+  bool has_supports_instrumented_sampling() const { return _has_field_[5]; }
+  bool supports_instrumented_sampling() const { return supports_instrumented_sampling_; }
+  void set_supports_instrumented_sampling(bool value) { supports_instrumented_sampling_ = value; _has_field_.set(5); }
+
+ private:
+  std::vector<GpuCounterDescriptor_GpuCounterSpec> specs_;
+  std::vector<GpuCounterDescriptor_GpuCounterBlock> blocks_;
+  uint64_t min_sampling_period_ns_{};
+  uint64_t max_sampling_period_ns_{};
+  bool supports_instrumented_sampling_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<6> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT GpuCounterDescriptor_GpuCounterBlock : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kBlockIdFieldNumber = 1,
+    kBlockCapacityFieldNumber = 2,
+    kNameFieldNumber = 3,
+    kDescriptionFieldNumber = 4,
+    kCounterIdsFieldNumber = 5,
+  };
+
+  GpuCounterDescriptor_GpuCounterBlock();
+  ~GpuCounterDescriptor_GpuCounterBlock() override;
+  GpuCounterDescriptor_GpuCounterBlock(GpuCounterDescriptor_GpuCounterBlock&&) noexcept;
+  GpuCounterDescriptor_GpuCounterBlock& operator=(GpuCounterDescriptor_GpuCounterBlock&&);
+  GpuCounterDescriptor_GpuCounterBlock(const GpuCounterDescriptor_GpuCounterBlock&);
+  GpuCounterDescriptor_GpuCounterBlock& operator=(const GpuCounterDescriptor_GpuCounterBlock&);
+  bool operator==(const GpuCounterDescriptor_GpuCounterBlock&) const;
+  bool operator!=(const GpuCounterDescriptor_GpuCounterBlock& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_block_id() const { return _has_field_[1]; }
+  uint32_t block_id() const { return block_id_; }
+  void set_block_id(uint32_t value) { block_id_ = value; _has_field_.set(1); }
+
+  bool has_block_capacity() const { return _has_field_[2]; }
+  uint32_t block_capacity() const { return block_capacity_; }
+  void set_block_capacity(uint32_t value) { block_capacity_ = value; _has_field_.set(2); }
+
+  bool has_name() const { return _has_field_[3]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(3); }
+
+  bool has_description() const { return _has_field_[4]; }
+  const std::string& description() const { return description_; }
+  void set_description(const std::string& value) { description_ = value; _has_field_.set(4); }
+
+  const std::vector<uint32_t>& counter_ids() const { return counter_ids_; }
+  std::vector<uint32_t>* mutable_counter_ids() { return &counter_ids_; }
+  int counter_ids_size() const { return static_cast<int>(counter_ids_.size()); }
+  void clear_counter_ids() { counter_ids_.clear(); }
+  void add_counter_ids(uint32_t value) { counter_ids_.emplace_back(value); }
+  uint32_t* add_counter_ids() { counter_ids_.emplace_back(); return &counter_ids_.back(); }
+
+ private:
+  uint32_t block_id_{};
+  uint32_t block_capacity_{};
+  std::string name_{};
+  std::string description_{};
+  std::vector<uint32_t> counter_ids_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<6> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT GpuCounterDescriptor_GpuCounterSpec : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kCounterIdFieldNumber = 1,
+    kNameFieldNumber = 2,
+    kDescriptionFieldNumber = 3,
+    kIntPeakValueFieldNumber = 5,
+    kDoublePeakValueFieldNumber = 6,
+    kNumeratorUnitsFieldNumber = 7,
+    kDenominatorUnitsFieldNumber = 8,
+    kSelectByDefaultFieldNumber = 9,
+    kGroupsFieldNumber = 10,
+  };
+
+  GpuCounterDescriptor_GpuCounterSpec();
+  ~GpuCounterDescriptor_GpuCounterSpec() override;
+  GpuCounterDescriptor_GpuCounterSpec(GpuCounterDescriptor_GpuCounterSpec&&) noexcept;
+  GpuCounterDescriptor_GpuCounterSpec& operator=(GpuCounterDescriptor_GpuCounterSpec&&);
+  GpuCounterDescriptor_GpuCounterSpec(const GpuCounterDescriptor_GpuCounterSpec&);
+  GpuCounterDescriptor_GpuCounterSpec& operator=(const GpuCounterDescriptor_GpuCounterSpec&);
+  bool operator==(const GpuCounterDescriptor_GpuCounterSpec&) const;
+  bool operator!=(const GpuCounterDescriptor_GpuCounterSpec& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_counter_id() const { return _has_field_[1]; }
+  uint32_t counter_id() const { return counter_id_; }
+  void set_counter_id(uint32_t value) { counter_id_ = value; _has_field_.set(1); }
+
+  bool has_name() const { return _has_field_[2]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(2); }
+
+  bool has_description() const { return _has_field_[3]; }
+  const std::string& description() const { return description_; }
+  void set_description(const std::string& value) { description_ = value; _has_field_.set(3); }
+
+  bool has_int_peak_value() const { return _has_field_[5]; }
+  int64_t int_peak_value() const { return int_peak_value_; }
+  void set_int_peak_value(int64_t value) { int_peak_value_ = value; _has_field_.set(5); }
+
+  bool has_double_peak_value() const { return _has_field_[6]; }
+  double double_peak_value() const { return double_peak_value_; }
+  void set_double_peak_value(double value) { double_peak_value_ = value; _has_field_.set(6); }
+
+  const std::vector<GpuCounterDescriptor_MeasureUnit>& numerator_units() const { return numerator_units_; }
+  std::vector<GpuCounterDescriptor_MeasureUnit>* mutable_numerator_units() { return &numerator_units_; }
+  int numerator_units_size() const { return static_cast<int>(numerator_units_.size()); }
+  void clear_numerator_units() { numerator_units_.clear(); }
+  void add_numerator_units(GpuCounterDescriptor_MeasureUnit value) { numerator_units_.emplace_back(value); }
+  GpuCounterDescriptor_MeasureUnit* add_numerator_units() { numerator_units_.emplace_back(); return &numerator_units_.back(); }
+
+  const std::vector<GpuCounterDescriptor_MeasureUnit>& denominator_units() const { return denominator_units_; }
+  std::vector<GpuCounterDescriptor_MeasureUnit>* mutable_denominator_units() { return &denominator_units_; }
+  int denominator_units_size() const { return static_cast<int>(denominator_units_.size()); }
+  void clear_denominator_units() { denominator_units_.clear(); }
+  void add_denominator_units(GpuCounterDescriptor_MeasureUnit value) { denominator_units_.emplace_back(value); }
+  GpuCounterDescriptor_MeasureUnit* add_denominator_units() { denominator_units_.emplace_back(); return &denominator_units_.back(); }
+
+  bool has_select_by_default() const { return _has_field_[9]; }
+  bool select_by_default() const { return select_by_default_; }
+  void set_select_by_default(bool value) { select_by_default_ = value; _has_field_.set(9); }
+
+  const std::vector<GpuCounterDescriptor_GpuCounterGroup>& groups() const { return groups_; }
+  std::vector<GpuCounterDescriptor_GpuCounterGroup>* mutable_groups() { return &groups_; }
+  int groups_size() const { return static_cast<int>(groups_.size()); }
+  void clear_groups() { groups_.clear(); }
+  void add_groups(GpuCounterDescriptor_GpuCounterGroup value) { groups_.emplace_back(value); }
+  GpuCounterDescriptor_GpuCounterGroup* add_groups() { groups_.emplace_back(); return &groups_.back(); }
+
+ private:
+  uint32_t counter_id_{};
+  std::string name_{};
+  std::string description_{};
+  int64_t int_peak_value_{};
+  double double_peak_value_{};
+  std::vector<GpuCounterDescriptor_MeasureUnit> numerator_units_;
+  std::vector<GpuCounterDescriptor_MeasureUnit> denominator_units_;
+  bool select_by_default_{};
+  std::vector<GpuCounterDescriptor_GpuCounterGroup> groups_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<11> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_GPU_COUNTER_DESCRIPTOR_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/common/gpu_counter_descriptor.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+GpuCounterDescriptor::GpuCounterDescriptor() = default;
+GpuCounterDescriptor::~GpuCounterDescriptor() = default;
+GpuCounterDescriptor::GpuCounterDescriptor(const GpuCounterDescriptor&) = default;
+GpuCounterDescriptor& GpuCounterDescriptor::operator=(const GpuCounterDescriptor&) = default;
+GpuCounterDescriptor::GpuCounterDescriptor(GpuCounterDescriptor&&) noexcept = default;
+GpuCounterDescriptor& GpuCounterDescriptor::operator=(GpuCounterDescriptor&&) = default;
+
+bool GpuCounterDescriptor::operator==(const GpuCounterDescriptor& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && specs_ == other.specs_
+   && blocks_ == other.blocks_
+   && min_sampling_period_ns_ == other.min_sampling_period_ns_
+   && max_sampling_period_ns_ == other.max_sampling_period_ns_
+   && supports_instrumented_sampling_ == other.supports_instrumented_sampling_;
+}
+
+int GpuCounterDescriptor::specs_size() const { return static_cast<int>(specs_.size()); }
+void GpuCounterDescriptor::clear_specs() { specs_.clear(); }
+GpuCounterDescriptor_GpuCounterSpec* GpuCounterDescriptor::add_specs() { specs_.emplace_back(); return &specs_.back(); }
+int GpuCounterDescriptor::blocks_size() const { return static_cast<int>(blocks_.size()); }
+void GpuCounterDescriptor::clear_blocks() { blocks_.clear(); }
+GpuCounterDescriptor_GpuCounterBlock* GpuCounterDescriptor::add_blocks() { blocks_.emplace_back(); return &blocks_.back(); }
+bool GpuCounterDescriptor::ParseFromArray(const void* raw, size_t size) {
+  specs_.clear();
+  blocks_.clear();
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* specs */:
+        specs_.emplace_back();
+        specs_.back().ParseFromArray(field.data(), field.size());
+        break;
+      case 2 /* blocks */:
+        blocks_.emplace_back();
+        blocks_.back().ParseFromArray(field.data(), field.size());
+        break;
+      case 3 /* min_sampling_period_ns */:
+        field.get(&min_sampling_period_ns_);
+        break;
+      case 4 /* max_sampling_period_ns */:
+        field.get(&max_sampling_period_ns_);
+        break;
+      case 5 /* supports_instrumented_sampling */:
+        field.get(&supports_instrumented_sampling_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string GpuCounterDescriptor::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> GpuCounterDescriptor::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void GpuCounterDescriptor::Serialize(::protozero::Message* msg) const {
+  // Field 1: specs
+  for (auto& it : specs_) {
+    it.Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
+  }
+
+  // Field 2: blocks
+  for (auto& it : blocks_) {
+    it.Serialize(msg->BeginNestedMessage<::protozero::Message>(2));
+  }
+
+  // Field 3: min_sampling_period_ns
+  if (_has_field_[3]) {
+    msg->AppendVarInt(3, min_sampling_period_ns_);
+  }
+
+  // Field 4: max_sampling_period_ns
+  if (_has_field_[4]) {
+    msg->AppendVarInt(4, max_sampling_period_ns_);
+  }
+
+  // Field 5: supports_instrumented_sampling
+  if (_has_field_[5]) {
+    msg->AppendTinyVarInt(5, supports_instrumented_sampling_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+GpuCounterDescriptor_GpuCounterBlock::GpuCounterDescriptor_GpuCounterBlock() = default;
+GpuCounterDescriptor_GpuCounterBlock::~GpuCounterDescriptor_GpuCounterBlock() = default;
+GpuCounterDescriptor_GpuCounterBlock::GpuCounterDescriptor_GpuCounterBlock(const GpuCounterDescriptor_GpuCounterBlock&) = default;
+GpuCounterDescriptor_GpuCounterBlock& GpuCounterDescriptor_GpuCounterBlock::operator=(const GpuCounterDescriptor_GpuCounterBlock&) = default;
+GpuCounterDescriptor_GpuCounterBlock::GpuCounterDescriptor_GpuCounterBlock(GpuCounterDescriptor_GpuCounterBlock&&) noexcept = default;
+GpuCounterDescriptor_GpuCounterBlock& GpuCounterDescriptor_GpuCounterBlock::operator=(GpuCounterDescriptor_GpuCounterBlock&&) = default;
+
+bool GpuCounterDescriptor_GpuCounterBlock::operator==(const GpuCounterDescriptor_GpuCounterBlock& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && block_id_ == other.block_id_
+   && block_capacity_ == other.block_capacity_
+   && name_ == other.name_
+   && description_ == other.description_
+   && counter_ids_ == other.counter_ids_;
+}
+
+bool GpuCounterDescriptor_GpuCounterBlock::ParseFromArray(const void* raw, size_t size) {
+  counter_ids_.clear();
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* block_id */:
+        field.get(&block_id_);
+        break;
+      case 2 /* block_capacity */:
+        field.get(&block_capacity_);
+        break;
+      case 3 /* name */:
+        field.get(&name_);
+        break;
+      case 4 /* description */:
+        field.get(&description_);
+        break;
+      case 5 /* counter_ids */:
+        counter_ids_.emplace_back();
+        field.get(&counter_ids_.back());
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string GpuCounterDescriptor_GpuCounterBlock::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> GpuCounterDescriptor_GpuCounterBlock::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void GpuCounterDescriptor_GpuCounterBlock::Serialize(::protozero::Message* msg) const {
+  // Field 1: block_id
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, block_id_);
+  }
+
+  // Field 2: block_capacity
+  if (_has_field_[2]) {
+    msg->AppendVarInt(2, block_capacity_);
+  }
+
+  // Field 3: name
+  if (_has_field_[3]) {
+    msg->AppendString(3, name_);
+  }
+
+  // Field 4: description
+  if (_has_field_[4]) {
+    msg->AppendString(4, description_);
+  }
+
+  // Field 5: counter_ids
+  for (auto& it : counter_ids_) {
+    msg->AppendVarInt(5, it);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+GpuCounterDescriptor_GpuCounterSpec::GpuCounterDescriptor_GpuCounterSpec() = default;
+GpuCounterDescriptor_GpuCounterSpec::~GpuCounterDescriptor_GpuCounterSpec() = default;
+GpuCounterDescriptor_GpuCounterSpec::GpuCounterDescriptor_GpuCounterSpec(const GpuCounterDescriptor_GpuCounterSpec&) = default;
+GpuCounterDescriptor_GpuCounterSpec& GpuCounterDescriptor_GpuCounterSpec::operator=(const GpuCounterDescriptor_GpuCounterSpec&) = default;
+GpuCounterDescriptor_GpuCounterSpec::GpuCounterDescriptor_GpuCounterSpec(GpuCounterDescriptor_GpuCounterSpec&&) noexcept = default;
+GpuCounterDescriptor_GpuCounterSpec& GpuCounterDescriptor_GpuCounterSpec::operator=(GpuCounterDescriptor_GpuCounterSpec&&) = default;
+
+bool GpuCounterDescriptor_GpuCounterSpec::operator==(const GpuCounterDescriptor_GpuCounterSpec& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && counter_id_ == other.counter_id_
+   && name_ == other.name_
+   && description_ == other.description_
+   && int_peak_value_ == other.int_peak_value_
+   && double_peak_value_ == other.double_peak_value_
+   && numerator_units_ == other.numerator_units_
+   && denominator_units_ == other.denominator_units_
+   && select_by_default_ == other.select_by_default_
+   && groups_ == other.groups_;
+}
+
+bool GpuCounterDescriptor_GpuCounterSpec::ParseFromArray(const void* raw, size_t size) {
+  numerator_units_.clear();
+  denominator_units_.clear();
+  groups_.clear();
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* counter_id */:
+        field.get(&counter_id_);
+        break;
+      case 2 /* name */:
+        field.get(&name_);
+        break;
+      case 3 /* description */:
+        field.get(&description_);
+        break;
+      case 5 /* int_peak_value */:
+        field.get(&int_peak_value_);
+        break;
+      case 6 /* double_peak_value */:
+        field.get(&double_peak_value_);
+        break;
+      case 7 /* numerator_units */:
+        numerator_units_.emplace_back();
+        field.get(&numerator_units_.back());
+        break;
+      case 8 /* denominator_units */:
+        denominator_units_.emplace_back();
+        field.get(&denominator_units_.back());
+        break;
+      case 9 /* select_by_default */:
+        field.get(&select_by_default_);
+        break;
+      case 10 /* groups */:
+        groups_.emplace_back();
+        field.get(&groups_.back());
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string GpuCounterDescriptor_GpuCounterSpec::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> GpuCounterDescriptor_GpuCounterSpec::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void GpuCounterDescriptor_GpuCounterSpec::Serialize(::protozero::Message* msg) const {
+  // Field 1: counter_id
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, counter_id_);
+  }
+
+  // Field 2: name
+  if (_has_field_[2]) {
+    msg->AppendString(2, name_);
+  }
+
+  // Field 3: description
+  if (_has_field_[3]) {
+    msg->AppendString(3, description_);
+  }
+
+  // Field 5: int_peak_value
+  if (_has_field_[5]) {
+    msg->AppendVarInt(5, int_peak_value_);
+  }
+
+  // Field 6: double_peak_value
+  if (_has_field_[6]) {
+    msg->AppendFixed(6, double_peak_value_);
+  }
+
+  // Field 7: numerator_units
+  for (auto& it : numerator_units_) {
+    msg->AppendVarInt(7, it);
+  }
+
+  // Field 8: denominator_units
+  for (auto& it : denominator_units_) {
+    msg->AppendVarInt(8, it);
+  }
+
+  // Field 9: select_by_default
+  if (_has_field_[9]) {
+    msg->AppendTinyVarInt(9, select_by_default_);
+  }
+
+  // Field 10: groups
+  for (auto& it : groups_) {
+    msg->AppendVarInt(10, it);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/common/interceptor_descriptor.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/common/interceptor_descriptor.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_INTERCEPTOR_DESCRIPTOR_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_INTERCEPTOR_DESCRIPTOR_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class InterceptorDescriptor;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT InterceptorDescriptor : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kNameFieldNumber = 1,
+  };
+
+  InterceptorDescriptor();
+  ~InterceptorDescriptor() override;
+  InterceptorDescriptor(InterceptorDescriptor&&) noexcept;
+  InterceptorDescriptor& operator=(InterceptorDescriptor&&);
+  InterceptorDescriptor(const InterceptorDescriptor&);
+  InterceptorDescriptor& operator=(const InterceptorDescriptor&);
+  bool operator==(const InterceptorDescriptor&) const;
+  bool operator!=(const InterceptorDescriptor& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_name() const { return _has_field_[1]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(1); }
+
+ private:
+  std::string name_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_INTERCEPTOR_DESCRIPTOR_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/common/interceptor_descriptor.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+InterceptorDescriptor::InterceptorDescriptor() = default;
+InterceptorDescriptor::~InterceptorDescriptor() = default;
+InterceptorDescriptor::InterceptorDescriptor(const InterceptorDescriptor&) = default;
+InterceptorDescriptor& InterceptorDescriptor::operator=(const InterceptorDescriptor&) = default;
+InterceptorDescriptor::InterceptorDescriptor(InterceptorDescriptor&&) noexcept = default;
+InterceptorDescriptor& InterceptorDescriptor::operator=(InterceptorDescriptor&&) = default;
+
+bool InterceptorDescriptor::operator==(const InterceptorDescriptor& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && name_ == other.name_;
+}
+
+bool InterceptorDescriptor::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* name */:
+        field.get(&name_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string InterceptorDescriptor::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> InterceptorDescriptor::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void InterceptorDescriptor::Serialize(::protozero::Message* msg) const {
+  // Field 1: name
+  if (_has_field_[1]) {
+    msg->AppendString(1, name_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/common/observable_events.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/common/observable_events.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_OBSERVABLE_EVENTS_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_OBSERVABLE_EVENTS_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class ObservableEvents;
+class ObservableEvents_DataSourceInstanceStateChange;
+enum ObservableEvents_Type : int;
+enum ObservableEvents_DataSourceInstanceState : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum ObservableEvents_Type : int {
+  ObservableEvents_Type_TYPE_UNSPECIFIED = 0,
+  ObservableEvents_Type_TYPE_DATA_SOURCES_INSTANCES = 1,
+  ObservableEvents_Type_TYPE_ALL_DATA_SOURCES_STARTED = 2,
+};
+enum ObservableEvents_DataSourceInstanceState : int {
+  ObservableEvents_DataSourceInstanceState_DATA_SOURCE_INSTANCE_STATE_STOPPED = 1,
+  ObservableEvents_DataSourceInstanceState_DATA_SOURCE_INSTANCE_STATE_STARTED = 2,
+};
+
+class PERFETTO_EXPORT ObservableEvents : public ::protozero::CppMessageObj {
+ public:
+  using DataSourceInstanceStateChange = ObservableEvents_DataSourceInstanceStateChange;
+  using Type = ObservableEvents_Type;
+  static constexpr auto TYPE_UNSPECIFIED = ObservableEvents_Type_TYPE_UNSPECIFIED;
+  static constexpr auto TYPE_DATA_SOURCES_INSTANCES = ObservableEvents_Type_TYPE_DATA_SOURCES_INSTANCES;
+  static constexpr auto TYPE_ALL_DATA_SOURCES_STARTED = ObservableEvents_Type_TYPE_ALL_DATA_SOURCES_STARTED;
+  static constexpr auto Type_MIN = ObservableEvents_Type_TYPE_UNSPECIFIED;
+  static constexpr auto Type_MAX = ObservableEvents_Type_TYPE_ALL_DATA_SOURCES_STARTED;
+  using DataSourceInstanceState = ObservableEvents_DataSourceInstanceState;
+  static constexpr auto DATA_SOURCE_INSTANCE_STATE_STOPPED = ObservableEvents_DataSourceInstanceState_DATA_SOURCE_INSTANCE_STATE_STOPPED;
+  static constexpr auto DATA_SOURCE_INSTANCE_STATE_STARTED = ObservableEvents_DataSourceInstanceState_DATA_SOURCE_INSTANCE_STATE_STARTED;
+  static constexpr auto DataSourceInstanceState_MIN = ObservableEvents_DataSourceInstanceState_DATA_SOURCE_INSTANCE_STATE_STOPPED;
+  static constexpr auto DataSourceInstanceState_MAX = ObservableEvents_DataSourceInstanceState_DATA_SOURCE_INSTANCE_STATE_STARTED;
+  enum FieldNumbers {
+    kInstanceStateChangesFieldNumber = 1,
+    kAllDataSourcesStartedFieldNumber = 2,
+  };
+
+  ObservableEvents();
+  ~ObservableEvents() override;
+  ObservableEvents(ObservableEvents&&) noexcept;
+  ObservableEvents& operator=(ObservableEvents&&);
+  ObservableEvents(const ObservableEvents&);
+  ObservableEvents& operator=(const ObservableEvents&);
+  bool operator==(const ObservableEvents&) const;
+  bool operator!=(const ObservableEvents& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  const std::vector<ObservableEvents_DataSourceInstanceStateChange>& instance_state_changes() const { return instance_state_changes_; }
+  std::vector<ObservableEvents_DataSourceInstanceStateChange>* mutable_instance_state_changes() { return &instance_state_changes_; }
+  int instance_state_changes_size() const;
+  void clear_instance_state_changes();
+  ObservableEvents_DataSourceInstanceStateChange* add_instance_state_changes();
+
+  bool has_all_data_sources_started() const { return _has_field_[2]; }
+  bool all_data_sources_started() const { return all_data_sources_started_; }
+  void set_all_data_sources_started(bool value) { all_data_sources_started_ = value; _has_field_.set(2); }
+
+ private:
+  std::vector<ObservableEvents_DataSourceInstanceStateChange> instance_state_changes_;
+  bool all_data_sources_started_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT ObservableEvents_DataSourceInstanceStateChange : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kProducerNameFieldNumber = 1,
+    kDataSourceNameFieldNumber = 2,
+    kStateFieldNumber = 3,
+  };
+
+  ObservableEvents_DataSourceInstanceStateChange();
+  ~ObservableEvents_DataSourceInstanceStateChange() override;
+  ObservableEvents_DataSourceInstanceStateChange(ObservableEvents_DataSourceInstanceStateChange&&) noexcept;
+  ObservableEvents_DataSourceInstanceStateChange& operator=(ObservableEvents_DataSourceInstanceStateChange&&);
+  ObservableEvents_DataSourceInstanceStateChange(const ObservableEvents_DataSourceInstanceStateChange&);
+  ObservableEvents_DataSourceInstanceStateChange& operator=(const ObservableEvents_DataSourceInstanceStateChange&);
+  bool operator==(const ObservableEvents_DataSourceInstanceStateChange&) const;
+  bool operator!=(const ObservableEvents_DataSourceInstanceStateChange& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_producer_name() const { return _has_field_[1]; }
+  const std::string& producer_name() const { return producer_name_; }
+  void set_producer_name(const std::string& value) { producer_name_ = value; _has_field_.set(1); }
+
+  bool has_data_source_name() const { return _has_field_[2]; }
+  const std::string& data_source_name() const { return data_source_name_; }
+  void set_data_source_name(const std::string& value) { data_source_name_ = value; _has_field_.set(2); }
+
+  bool has_state() const { return _has_field_[3]; }
+  ObservableEvents_DataSourceInstanceState state() const { return state_; }
+  void set_state(ObservableEvents_DataSourceInstanceState value) { state_ = value; _has_field_.set(3); }
+
+ private:
+  std::string producer_name_{};
+  std::string data_source_name_{};
+  ObservableEvents_DataSourceInstanceState state_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<4> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_OBSERVABLE_EVENTS_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/common/observable_events.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+ObservableEvents::ObservableEvents() = default;
+ObservableEvents::~ObservableEvents() = default;
+ObservableEvents::ObservableEvents(const ObservableEvents&) = default;
+ObservableEvents& ObservableEvents::operator=(const ObservableEvents&) = default;
+ObservableEvents::ObservableEvents(ObservableEvents&&) noexcept = default;
+ObservableEvents& ObservableEvents::operator=(ObservableEvents&&) = default;
+
+bool ObservableEvents::operator==(const ObservableEvents& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && instance_state_changes_ == other.instance_state_changes_
+   && all_data_sources_started_ == other.all_data_sources_started_;
+}
+
+int ObservableEvents::instance_state_changes_size() const { return static_cast<int>(instance_state_changes_.size()); }
+void ObservableEvents::clear_instance_state_changes() { instance_state_changes_.clear(); }
+ObservableEvents_DataSourceInstanceStateChange* ObservableEvents::add_instance_state_changes() { instance_state_changes_.emplace_back(); return &instance_state_changes_.back(); }
+bool ObservableEvents::ParseFromArray(const void* raw, size_t size) {
+  instance_state_changes_.clear();
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* instance_state_changes */:
+        instance_state_changes_.emplace_back();
+        instance_state_changes_.back().ParseFromArray(field.data(), field.size());
+        break;
+      case 2 /* all_data_sources_started */:
+        field.get(&all_data_sources_started_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string ObservableEvents::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> ObservableEvents::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void ObservableEvents::Serialize(::protozero::Message* msg) const {
+  // Field 1: instance_state_changes
+  for (auto& it : instance_state_changes_) {
+    it.Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
+  }
+
+  // Field 2: all_data_sources_started
+  if (_has_field_[2]) {
+    msg->AppendTinyVarInt(2, all_data_sources_started_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+ObservableEvents_DataSourceInstanceStateChange::ObservableEvents_DataSourceInstanceStateChange() = default;
+ObservableEvents_DataSourceInstanceStateChange::~ObservableEvents_DataSourceInstanceStateChange() = default;
+ObservableEvents_DataSourceInstanceStateChange::ObservableEvents_DataSourceInstanceStateChange(const ObservableEvents_DataSourceInstanceStateChange&) = default;
+ObservableEvents_DataSourceInstanceStateChange& ObservableEvents_DataSourceInstanceStateChange::operator=(const ObservableEvents_DataSourceInstanceStateChange&) = default;
+ObservableEvents_DataSourceInstanceStateChange::ObservableEvents_DataSourceInstanceStateChange(ObservableEvents_DataSourceInstanceStateChange&&) noexcept = default;
+ObservableEvents_DataSourceInstanceStateChange& ObservableEvents_DataSourceInstanceStateChange::operator=(ObservableEvents_DataSourceInstanceStateChange&&) = default;
+
+bool ObservableEvents_DataSourceInstanceStateChange::operator==(const ObservableEvents_DataSourceInstanceStateChange& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && producer_name_ == other.producer_name_
+   && data_source_name_ == other.data_source_name_
+   && state_ == other.state_;
+}
+
+bool ObservableEvents_DataSourceInstanceStateChange::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* producer_name */:
+        field.get(&producer_name_);
+        break;
+      case 2 /* data_source_name */:
+        field.get(&data_source_name_);
+        break;
+      case 3 /* state */:
+        field.get(&state_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string ObservableEvents_DataSourceInstanceStateChange::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> ObservableEvents_DataSourceInstanceStateChange::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void ObservableEvents_DataSourceInstanceStateChange::Serialize(::protozero::Message* msg) const {
+  // Field 1: producer_name
+  if (_has_field_[1]) {
+    msg->AppendString(1, producer_name_);
+  }
+
+  // Field 2: data_source_name
+  if (_has_field_[2]) {
+    msg->AppendString(2, data_source_name_);
+  }
+
+  // Field 3: state
+  if (_has_field_[3]) {
+    msg->AppendVarInt(3, state_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/common/perf_events.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/common/perf_events.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_PERF_EVENTS_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_PERF_EVENTS_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class PerfEvents;
+class PerfEvents_Tracepoint;
+class PerfEvents_Timebase;
+enum PerfEvents_Counter : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum PerfEvents_Counter : int {
+  PerfEvents_Counter_UNKNOWN_COUNTER = 0,
+  PerfEvents_Counter_SW_CPU_CLOCK = 1,
+  PerfEvents_Counter_SW_PAGE_FAULTS = 2,
+  PerfEvents_Counter_HW_CPU_CYCLES = 10,
+  PerfEvents_Counter_HW_INSTRUCTIONS = 11,
+};
+
+class PERFETTO_EXPORT PerfEvents : public ::protozero::CppMessageObj {
+ public:
+  using Timebase = PerfEvents_Timebase;
+  using Tracepoint = PerfEvents_Tracepoint;
+  using Counter = PerfEvents_Counter;
+  static constexpr auto UNKNOWN_COUNTER = PerfEvents_Counter_UNKNOWN_COUNTER;
+  static constexpr auto SW_CPU_CLOCK = PerfEvents_Counter_SW_CPU_CLOCK;
+  static constexpr auto SW_PAGE_FAULTS = PerfEvents_Counter_SW_PAGE_FAULTS;
+  static constexpr auto HW_CPU_CYCLES = PerfEvents_Counter_HW_CPU_CYCLES;
+  static constexpr auto HW_INSTRUCTIONS = PerfEvents_Counter_HW_INSTRUCTIONS;
+  static constexpr auto Counter_MIN = PerfEvents_Counter_UNKNOWN_COUNTER;
+  static constexpr auto Counter_MAX = PerfEvents_Counter_HW_INSTRUCTIONS;
+  enum FieldNumbers {
+  };
+
+  PerfEvents();
+  ~PerfEvents() override;
+  PerfEvents(PerfEvents&&) noexcept;
+  PerfEvents& operator=(PerfEvents&&);
+  PerfEvents(const PerfEvents&);
+  PerfEvents& operator=(const PerfEvents&);
+  bool operator==(const PerfEvents&) const;
+  bool operator!=(const PerfEvents& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+ private:
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT PerfEvents_Tracepoint : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kNameFieldNumber = 1,
+    kFilterFieldNumber = 2,
+  };
+
+  PerfEvents_Tracepoint();
+  ~PerfEvents_Tracepoint() override;
+  PerfEvents_Tracepoint(PerfEvents_Tracepoint&&) noexcept;
+  PerfEvents_Tracepoint& operator=(PerfEvents_Tracepoint&&);
+  PerfEvents_Tracepoint(const PerfEvents_Tracepoint&);
+  PerfEvents_Tracepoint& operator=(const PerfEvents_Tracepoint&);
+  bool operator==(const PerfEvents_Tracepoint&) const;
+  bool operator!=(const PerfEvents_Tracepoint& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_name() const { return _has_field_[1]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(1); }
+
+  bool has_filter() const { return _has_field_[2]; }
+  const std::string& filter() const { return filter_; }
+  void set_filter(const std::string& value) { filter_ = value; _has_field_.set(2); }
+
+ private:
+  std::string name_{};
+  std::string filter_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT PerfEvents_Timebase : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kFrequencyFieldNumber = 2,
+    kPeriodFieldNumber = 1,
+    kCounterFieldNumber = 4,
+    kTracepointFieldNumber = 3,
+  };
+
+  PerfEvents_Timebase();
+  ~PerfEvents_Timebase() override;
+  PerfEvents_Timebase(PerfEvents_Timebase&&) noexcept;
+  PerfEvents_Timebase& operator=(PerfEvents_Timebase&&);
+  PerfEvents_Timebase(const PerfEvents_Timebase&);
+  PerfEvents_Timebase& operator=(const PerfEvents_Timebase&);
+  bool operator==(const PerfEvents_Timebase&) const;
+  bool operator!=(const PerfEvents_Timebase& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_frequency() const { return _has_field_[2]; }
+  uint64_t frequency() const { return frequency_; }
+  void set_frequency(uint64_t value) { frequency_ = value; _has_field_.set(2); }
+
+  bool has_period() const { return _has_field_[1]; }
+  uint64_t period() const { return period_; }
+  void set_period(uint64_t value) { period_ = value; _has_field_.set(1); }
+
+  bool has_counter() const { return _has_field_[4]; }
+  PerfEvents_Counter counter() const { return counter_; }
+  void set_counter(PerfEvents_Counter value) { counter_ = value; _has_field_.set(4); }
+
+  bool has_tracepoint() const { return _has_field_[3]; }
+  const PerfEvents_Tracepoint& tracepoint() const { return *tracepoint_; }
+  PerfEvents_Tracepoint* mutable_tracepoint() { _has_field_.set(3); return tracepoint_.get(); }
+
+ private:
+  uint64_t frequency_{};
+  uint64_t period_{};
+  PerfEvents_Counter counter_{};
+  ::protozero::CopyablePtr<PerfEvents_Tracepoint> tracepoint_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<5> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_PERF_EVENTS_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/common/perf_events.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+PerfEvents::PerfEvents() = default;
+PerfEvents::~PerfEvents() = default;
+PerfEvents::PerfEvents(const PerfEvents&) = default;
+PerfEvents& PerfEvents::operator=(const PerfEvents&) = default;
+PerfEvents::PerfEvents(PerfEvents&&) noexcept = default;
+PerfEvents& PerfEvents::operator=(PerfEvents&&) = default;
+
+bool PerfEvents::operator==(const PerfEvents& other) const {
+  return unknown_fields_ == other.unknown_fields_;
+}
+
+bool PerfEvents::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string PerfEvents::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> PerfEvents::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void PerfEvents::Serialize(::protozero::Message* msg) const {
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+PerfEvents_Tracepoint::PerfEvents_Tracepoint() = default;
+PerfEvents_Tracepoint::~PerfEvents_Tracepoint() = default;
+PerfEvents_Tracepoint::PerfEvents_Tracepoint(const PerfEvents_Tracepoint&) = default;
+PerfEvents_Tracepoint& PerfEvents_Tracepoint::operator=(const PerfEvents_Tracepoint&) = default;
+PerfEvents_Tracepoint::PerfEvents_Tracepoint(PerfEvents_Tracepoint&&) noexcept = default;
+PerfEvents_Tracepoint& PerfEvents_Tracepoint::operator=(PerfEvents_Tracepoint&&) = default;
+
+bool PerfEvents_Tracepoint::operator==(const PerfEvents_Tracepoint& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && name_ == other.name_
+   && filter_ == other.filter_;
+}
+
+bool PerfEvents_Tracepoint::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* name */:
+        field.get(&name_);
+        break;
+      case 2 /* filter */:
+        field.get(&filter_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string PerfEvents_Tracepoint::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> PerfEvents_Tracepoint::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void PerfEvents_Tracepoint::Serialize(::protozero::Message* msg) const {
+  // Field 1: name
+  if (_has_field_[1]) {
+    msg->AppendString(1, name_);
+  }
+
+  // Field 2: filter
+  if (_has_field_[2]) {
+    msg->AppendString(2, filter_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+PerfEvents_Timebase::PerfEvents_Timebase() = default;
+PerfEvents_Timebase::~PerfEvents_Timebase() = default;
+PerfEvents_Timebase::PerfEvents_Timebase(const PerfEvents_Timebase&) = default;
+PerfEvents_Timebase& PerfEvents_Timebase::operator=(const PerfEvents_Timebase&) = default;
+PerfEvents_Timebase::PerfEvents_Timebase(PerfEvents_Timebase&&) noexcept = default;
+PerfEvents_Timebase& PerfEvents_Timebase::operator=(PerfEvents_Timebase&&) = default;
+
+bool PerfEvents_Timebase::operator==(const PerfEvents_Timebase& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && frequency_ == other.frequency_
+   && period_ == other.period_
+   && counter_ == other.counter_
+   && tracepoint_ == other.tracepoint_;
+}
+
+bool PerfEvents_Timebase::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 2 /* frequency */:
+        field.get(&frequency_);
+        break;
+      case 1 /* period */:
+        field.get(&period_);
+        break;
+      case 4 /* counter */:
+        field.get(&counter_);
+        break;
+      case 3 /* tracepoint */:
+        (*tracepoint_).ParseFromArray(field.data(), field.size());
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string PerfEvents_Timebase::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> PerfEvents_Timebase::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void PerfEvents_Timebase::Serialize(::protozero::Message* msg) const {
+  // Field 2: frequency
+  if (_has_field_[2]) {
+    msg->AppendVarInt(2, frequency_);
+  }
+
+  // Field 1: period
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, period_);
+  }
+
+  // Field 4: counter
+  if (_has_field_[4]) {
+    msg->AppendVarInt(4, counter_);
+  }
+
+  // Field 3: tracepoint
+  if (_has_field_[3]) {
+    (*tracepoint_).Serialize(msg->BeginNestedMessage<::protozero::Message>(3));
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/common/sys_stats_counters.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/common/sys_stats_counters.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_SYS_STATS_COUNTERS_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_SYS_STATS_COUNTERS_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum MeminfoCounters : int;
+enum VmstatCounters : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum MeminfoCounters : int {
+  MEMINFO_UNSPECIFIED = 0,
+  MEMINFO_MEM_TOTAL = 1,
+  MEMINFO_MEM_FREE = 2,
+  MEMINFO_MEM_AVAILABLE = 3,
+  MEMINFO_BUFFERS = 4,
+  MEMINFO_CACHED = 5,
+  MEMINFO_SWAP_CACHED = 6,
+  MEMINFO_ACTIVE = 7,
+  MEMINFO_INACTIVE = 8,
+  MEMINFO_ACTIVE_ANON = 9,
+  MEMINFO_INACTIVE_ANON = 10,
+  MEMINFO_ACTIVE_FILE = 11,
+  MEMINFO_INACTIVE_FILE = 12,
+  MEMINFO_UNEVICTABLE = 13,
+  MEMINFO_MLOCKED = 14,
+  MEMINFO_SWAP_TOTAL = 15,
+  MEMINFO_SWAP_FREE = 16,
+  MEMINFO_DIRTY = 17,
+  MEMINFO_WRITEBACK = 18,
+  MEMINFO_ANON_PAGES = 19,
+  MEMINFO_MAPPED = 20,
+  MEMINFO_SHMEM = 21,
+  MEMINFO_SLAB = 22,
+  MEMINFO_SLAB_RECLAIMABLE = 23,
+  MEMINFO_SLAB_UNRECLAIMABLE = 24,
+  MEMINFO_KERNEL_STACK = 25,
+  MEMINFO_PAGE_TABLES = 26,
+  MEMINFO_COMMIT_LIMIT = 27,
+  MEMINFO_COMMITED_AS = 28,
+  MEMINFO_VMALLOC_TOTAL = 29,
+  MEMINFO_VMALLOC_USED = 30,
+  MEMINFO_VMALLOC_CHUNK = 31,
+  MEMINFO_CMA_TOTAL = 32,
+  MEMINFO_CMA_FREE = 33,
+};
+enum VmstatCounters : int {
+  VMSTAT_UNSPECIFIED = 0,
+  VMSTAT_NR_FREE_PAGES = 1,
+  VMSTAT_NR_ALLOC_BATCH = 2,
+  VMSTAT_NR_INACTIVE_ANON = 3,
+  VMSTAT_NR_ACTIVE_ANON = 4,
+  VMSTAT_NR_INACTIVE_FILE = 5,
+  VMSTAT_NR_ACTIVE_FILE = 6,
+  VMSTAT_NR_UNEVICTABLE = 7,
+  VMSTAT_NR_MLOCK = 8,
+  VMSTAT_NR_ANON_PAGES = 9,
+  VMSTAT_NR_MAPPED = 10,
+  VMSTAT_NR_FILE_PAGES = 11,
+  VMSTAT_NR_DIRTY = 12,
+  VMSTAT_NR_WRITEBACK = 13,
+  VMSTAT_NR_SLAB_RECLAIMABLE = 14,
+  VMSTAT_NR_SLAB_UNRECLAIMABLE = 15,
+  VMSTAT_NR_PAGE_TABLE_PAGES = 16,
+  VMSTAT_NR_KERNEL_STACK = 17,
+  VMSTAT_NR_OVERHEAD = 18,
+  VMSTAT_NR_UNSTABLE = 19,
+  VMSTAT_NR_BOUNCE = 20,
+  VMSTAT_NR_VMSCAN_WRITE = 21,
+  VMSTAT_NR_VMSCAN_IMMEDIATE_RECLAIM = 22,
+  VMSTAT_NR_WRITEBACK_TEMP = 23,
+  VMSTAT_NR_ISOLATED_ANON = 24,
+  VMSTAT_NR_ISOLATED_FILE = 25,
+  VMSTAT_NR_SHMEM = 26,
+  VMSTAT_NR_DIRTIED = 27,
+  VMSTAT_NR_WRITTEN = 28,
+  VMSTAT_NR_PAGES_SCANNED = 29,
+  VMSTAT_WORKINGSET_REFAULT = 30,
+  VMSTAT_WORKINGSET_ACTIVATE = 31,
+  VMSTAT_WORKINGSET_NODERECLAIM = 32,
+  VMSTAT_NR_ANON_TRANSPARENT_HUGEPAGES = 33,
+  VMSTAT_NR_FREE_CMA = 34,
+  VMSTAT_NR_SWAPCACHE = 35,
+  VMSTAT_NR_DIRTY_THRESHOLD = 36,
+  VMSTAT_NR_DIRTY_BACKGROUND_THRESHOLD = 37,
+  VMSTAT_PGPGIN = 38,
+  VMSTAT_PGPGOUT = 39,
+  VMSTAT_PGPGOUTCLEAN = 40,
+  VMSTAT_PSWPIN = 41,
+  VMSTAT_PSWPOUT = 42,
+  VMSTAT_PGALLOC_DMA = 43,
+  VMSTAT_PGALLOC_NORMAL = 44,
+  VMSTAT_PGALLOC_MOVABLE = 45,
+  VMSTAT_PGFREE = 46,
+  VMSTAT_PGACTIVATE = 47,
+  VMSTAT_PGDEACTIVATE = 48,
+  VMSTAT_PGFAULT = 49,
+  VMSTAT_PGMAJFAULT = 50,
+  VMSTAT_PGREFILL_DMA = 51,
+  VMSTAT_PGREFILL_NORMAL = 52,
+  VMSTAT_PGREFILL_MOVABLE = 53,
+  VMSTAT_PGSTEAL_KSWAPD_DMA = 54,
+  VMSTAT_PGSTEAL_KSWAPD_NORMAL = 55,
+  VMSTAT_PGSTEAL_KSWAPD_MOVABLE = 56,
+  VMSTAT_PGSTEAL_DIRECT_DMA = 57,
+  VMSTAT_PGSTEAL_DIRECT_NORMAL = 58,
+  VMSTAT_PGSTEAL_DIRECT_MOVABLE = 59,
+  VMSTAT_PGSCAN_KSWAPD_DMA = 60,
+  VMSTAT_PGSCAN_KSWAPD_NORMAL = 61,
+  VMSTAT_PGSCAN_KSWAPD_MOVABLE = 62,
+  VMSTAT_PGSCAN_DIRECT_DMA = 63,
+  VMSTAT_PGSCAN_DIRECT_NORMAL = 64,
+  VMSTAT_PGSCAN_DIRECT_MOVABLE = 65,
+  VMSTAT_PGSCAN_DIRECT_THROTTLE = 66,
+  VMSTAT_PGINODESTEAL = 67,
+  VMSTAT_SLABS_SCANNED = 68,
+  VMSTAT_KSWAPD_INODESTEAL = 69,
+  VMSTAT_KSWAPD_LOW_WMARK_HIT_QUICKLY = 70,
+  VMSTAT_KSWAPD_HIGH_WMARK_HIT_QUICKLY = 71,
+  VMSTAT_PAGEOUTRUN = 72,
+  VMSTAT_ALLOCSTALL = 73,
+  VMSTAT_PGROTATED = 74,
+  VMSTAT_DROP_PAGECACHE = 75,
+  VMSTAT_DROP_SLAB = 76,
+  VMSTAT_PGMIGRATE_SUCCESS = 77,
+  VMSTAT_PGMIGRATE_FAIL = 78,
+  VMSTAT_COMPACT_MIGRATE_SCANNED = 79,
+  VMSTAT_COMPACT_FREE_SCANNED = 80,
+  VMSTAT_COMPACT_ISOLATED = 81,
+  VMSTAT_COMPACT_STALL = 82,
+  VMSTAT_COMPACT_FAIL = 83,
+  VMSTAT_COMPACT_SUCCESS = 84,
+  VMSTAT_COMPACT_DAEMON_WAKE = 85,
+  VMSTAT_UNEVICTABLE_PGS_CULLED = 86,
+  VMSTAT_UNEVICTABLE_PGS_SCANNED = 87,
+  VMSTAT_UNEVICTABLE_PGS_RESCUED = 88,
+  VMSTAT_UNEVICTABLE_PGS_MLOCKED = 89,
+  VMSTAT_UNEVICTABLE_PGS_MUNLOCKED = 90,
+  VMSTAT_UNEVICTABLE_PGS_CLEARED = 91,
+  VMSTAT_UNEVICTABLE_PGS_STRANDED = 92,
+  VMSTAT_NR_ZSPAGES = 93,
+  VMSTAT_NR_ION_HEAP = 94,
+  VMSTAT_NR_GPU_HEAP = 95,
+  VMSTAT_ALLOCSTALL_DMA = 96,
+  VMSTAT_ALLOCSTALL_MOVABLE = 97,
+  VMSTAT_ALLOCSTALL_NORMAL = 98,
+  VMSTAT_COMPACT_DAEMON_FREE_SCANNED = 99,
+  VMSTAT_COMPACT_DAEMON_MIGRATE_SCANNED = 100,
+  VMSTAT_NR_FASTRPC = 101,
+  VMSTAT_NR_INDIRECTLY_RECLAIMABLE = 102,
+  VMSTAT_NR_ION_HEAP_POOL = 103,
+  VMSTAT_NR_KERNEL_MISC_RECLAIMABLE = 104,
+  VMSTAT_NR_SHADOW_CALL_STACK_BYTES = 105,
+  VMSTAT_NR_SHMEM_HUGEPAGES = 106,
+  VMSTAT_NR_SHMEM_PMDMAPPED = 107,
+  VMSTAT_NR_UNRECLAIMABLE_PAGES = 108,
+  VMSTAT_NR_ZONE_ACTIVE_ANON = 109,
+  VMSTAT_NR_ZONE_ACTIVE_FILE = 110,
+  VMSTAT_NR_ZONE_INACTIVE_ANON = 111,
+  VMSTAT_NR_ZONE_INACTIVE_FILE = 112,
+  VMSTAT_NR_ZONE_UNEVICTABLE = 113,
+  VMSTAT_NR_ZONE_WRITE_PENDING = 114,
+  VMSTAT_OOM_KILL = 115,
+  VMSTAT_PGLAZYFREE = 116,
+  VMSTAT_PGLAZYFREED = 117,
+  VMSTAT_PGREFILL = 118,
+  VMSTAT_PGSCAN_DIRECT = 119,
+  VMSTAT_PGSCAN_KSWAPD = 120,
+  VMSTAT_PGSKIP_DMA = 121,
+  VMSTAT_PGSKIP_MOVABLE = 122,
+  VMSTAT_PGSKIP_NORMAL = 123,
+  VMSTAT_PGSTEAL_DIRECT = 124,
+  VMSTAT_PGSTEAL_KSWAPD = 125,
+  VMSTAT_SWAP_RA = 126,
+  VMSTAT_SWAP_RA_HIT = 127,
+  VMSTAT_WORKINGSET_RESTORE = 128,
+};
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_SYS_STATS_COUNTERS_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/common/sys_stats_counters.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/common/trace_stats.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/common/trace_stats.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_TRACE_STATS_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_TRACE_STATS_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class TraceStats;
+class TraceStats_FilterStats;
+class TraceStats_BufferStats;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT TraceStats : public ::protozero::CppMessageObj {
+ public:
+  using BufferStats = TraceStats_BufferStats;
+  using FilterStats = TraceStats_FilterStats;
+  enum FieldNumbers {
+    kBufferStatsFieldNumber = 1,
+    kProducersConnectedFieldNumber = 2,
+    kProducersSeenFieldNumber = 3,
+    kDataSourcesRegisteredFieldNumber = 4,
+    kDataSourcesSeenFieldNumber = 5,
+    kTracingSessionsFieldNumber = 6,
+    kTotalBuffersFieldNumber = 7,
+    kChunksDiscardedFieldNumber = 8,
+    kPatchesDiscardedFieldNumber = 9,
+    kInvalidPacketsFieldNumber = 10,
+    kFilterStatsFieldNumber = 11,
+  };
+
+  TraceStats();
+  ~TraceStats() override;
+  TraceStats(TraceStats&&) noexcept;
+  TraceStats& operator=(TraceStats&&);
+  TraceStats(const TraceStats&);
+  TraceStats& operator=(const TraceStats&);
+  bool operator==(const TraceStats&) const;
+  bool operator!=(const TraceStats& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  const std::vector<TraceStats_BufferStats>& buffer_stats() const { return buffer_stats_; }
+  std::vector<TraceStats_BufferStats>* mutable_buffer_stats() { return &buffer_stats_; }
+  int buffer_stats_size() const;
+  void clear_buffer_stats();
+  TraceStats_BufferStats* add_buffer_stats();
+
+  bool has_producers_connected() const { return _has_field_[2]; }
+  uint32_t producers_connected() const { return producers_connected_; }
+  void set_producers_connected(uint32_t value) { producers_connected_ = value; _has_field_.set(2); }
+
+  bool has_producers_seen() const { return _has_field_[3]; }
+  uint64_t producers_seen() const { return producers_seen_; }
+  void set_producers_seen(uint64_t value) { producers_seen_ = value; _has_field_.set(3); }
+
+  bool has_data_sources_registered() const { return _has_field_[4]; }
+  uint32_t data_sources_registered() const { return data_sources_registered_; }
+  void set_data_sources_registered(uint32_t value) { data_sources_registered_ = value; _has_field_.set(4); }
+
+  bool has_data_sources_seen() const { return _has_field_[5]; }
+  uint64_t data_sources_seen() const { return data_sources_seen_; }
+  void set_data_sources_seen(uint64_t value) { data_sources_seen_ = value; _has_field_.set(5); }
+
+  bool has_tracing_sessions() const { return _has_field_[6]; }
+  uint32_t tracing_sessions() const { return tracing_sessions_; }
+  void set_tracing_sessions(uint32_t value) { tracing_sessions_ = value; _has_field_.set(6); }
+
+  bool has_total_buffers() const { return _has_field_[7]; }
+  uint32_t total_buffers() const { return total_buffers_; }
+  void set_total_buffers(uint32_t value) { total_buffers_ = value; _has_field_.set(7); }
+
+  bool has_chunks_discarded() const { return _has_field_[8]; }
+  uint64_t chunks_discarded() const { return chunks_discarded_; }
+  void set_chunks_discarded(uint64_t value) { chunks_discarded_ = value; _has_field_.set(8); }
+
+  bool has_patches_discarded() const { return _has_field_[9]; }
+  uint64_t patches_discarded() const { return patches_discarded_; }
+  void set_patches_discarded(uint64_t value) { patches_discarded_ = value; _has_field_.set(9); }
+
+  bool has_invalid_packets() const { return _has_field_[10]; }
+  uint64_t invalid_packets() const { return invalid_packets_; }
+  void set_invalid_packets(uint64_t value) { invalid_packets_ = value; _has_field_.set(10); }
+
+  bool has_filter_stats() const { return _has_field_[11]; }
+  const TraceStats_FilterStats& filter_stats() const { return *filter_stats_; }
+  TraceStats_FilterStats* mutable_filter_stats() { _has_field_.set(11); return filter_stats_.get(); }
+
+ private:
+  std::vector<TraceStats_BufferStats> buffer_stats_;
+  uint32_t producers_connected_{};
+  uint64_t producers_seen_{};
+  uint32_t data_sources_registered_{};
+  uint64_t data_sources_seen_{};
+  uint32_t tracing_sessions_{};
+  uint32_t total_buffers_{};
+  uint64_t chunks_discarded_{};
+  uint64_t patches_discarded_{};
+  uint64_t invalid_packets_{};
+  ::protozero::CopyablePtr<TraceStats_FilterStats> filter_stats_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<12> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT TraceStats_FilterStats : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kInputPacketsFieldNumber = 1,
+    kInputBytesFieldNumber = 2,
+    kOutputBytesFieldNumber = 3,
+    kErrorsFieldNumber = 4,
+  };
+
+  TraceStats_FilterStats();
+  ~TraceStats_FilterStats() override;
+  TraceStats_FilterStats(TraceStats_FilterStats&&) noexcept;
+  TraceStats_FilterStats& operator=(TraceStats_FilterStats&&);
+  TraceStats_FilterStats(const TraceStats_FilterStats&);
+  TraceStats_FilterStats& operator=(const TraceStats_FilterStats&);
+  bool operator==(const TraceStats_FilterStats&) const;
+  bool operator!=(const TraceStats_FilterStats& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_input_packets() const { return _has_field_[1]; }
+  uint64_t input_packets() const { return input_packets_; }
+  void set_input_packets(uint64_t value) { input_packets_ = value; _has_field_.set(1); }
+
+  bool has_input_bytes() const { return _has_field_[2]; }
+  uint64_t input_bytes() const { return input_bytes_; }
+  void set_input_bytes(uint64_t value) { input_bytes_ = value; _has_field_.set(2); }
+
+  bool has_output_bytes() const { return _has_field_[3]; }
+  uint64_t output_bytes() const { return output_bytes_; }
+  void set_output_bytes(uint64_t value) { output_bytes_ = value; _has_field_.set(3); }
+
+  bool has_errors() const { return _has_field_[4]; }
+  uint64_t errors() const { return errors_; }
+  void set_errors(uint64_t value) { errors_ = value; _has_field_.set(4); }
+
+ private:
+  uint64_t input_packets_{};
+  uint64_t input_bytes_{};
+  uint64_t output_bytes_{};
+  uint64_t errors_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<5> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT TraceStats_BufferStats : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kBufferSizeFieldNumber = 12,
+    kBytesWrittenFieldNumber = 1,
+    kBytesOverwrittenFieldNumber = 13,
+    kBytesReadFieldNumber = 14,
+    kPaddingBytesWrittenFieldNumber = 15,
+    kPaddingBytesClearedFieldNumber = 16,
+    kChunksWrittenFieldNumber = 2,
+    kChunksRewrittenFieldNumber = 10,
+    kChunksOverwrittenFieldNumber = 3,
+    kChunksDiscardedFieldNumber = 18,
+    kChunksReadFieldNumber = 17,
+    kChunksCommittedOutOfOrderFieldNumber = 11,
+    kWriteWrapCountFieldNumber = 4,
+    kPatchesSucceededFieldNumber = 5,
+    kPatchesFailedFieldNumber = 6,
+    kReadaheadsSucceededFieldNumber = 7,
+    kReadaheadsFailedFieldNumber = 8,
+    kAbiViolationsFieldNumber = 9,
+    kTraceWriterPacketLossFieldNumber = 19,
+  };
+
+  TraceStats_BufferStats();
+  ~TraceStats_BufferStats() override;
+  TraceStats_BufferStats(TraceStats_BufferStats&&) noexcept;
+  TraceStats_BufferStats& operator=(TraceStats_BufferStats&&);
+  TraceStats_BufferStats(const TraceStats_BufferStats&);
+  TraceStats_BufferStats& operator=(const TraceStats_BufferStats&);
+  bool operator==(const TraceStats_BufferStats&) const;
+  bool operator!=(const TraceStats_BufferStats& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_buffer_size() const { return _has_field_[12]; }
+  uint64_t buffer_size() const { return buffer_size_; }
+  void set_buffer_size(uint64_t value) { buffer_size_ = value; _has_field_.set(12); }
+
+  bool has_bytes_written() const { return _has_field_[1]; }
+  uint64_t bytes_written() const { return bytes_written_; }
+  void set_bytes_written(uint64_t value) { bytes_written_ = value; _has_field_.set(1); }
+
+  bool has_bytes_overwritten() const { return _has_field_[13]; }
+  uint64_t bytes_overwritten() const { return bytes_overwritten_; }
+  void set_bytes_overwritten(uint64_t value) { bytes_overwritten_ = value; _has_field_.set(13); }
+
+  bool has_bytes_read() const { return _has_field_[14]; }
+  uint64_t bytes_read() const { return bytes_read_; }
+  void set_bytes_read(uint64_t value) { bytes_read_ = value; _has_field_.set(14); }
+
+  bool has_padding_bytes_written() const { return _has_field_[15]; }
+  uint64_t padding_bytes_written() const { return padding_bytes_written_; }
+  void set_padding_bytes_written(uint64_t value) { padding_bytes_written_ = value; _has_field_.set(15); }
+
+  bool has_padding_bytes_cleared() const { return _has_field_[16]; }
+  uint64_t padding_bytes_cleared() const { return padding_bytes_cleared_; }
+  void set_padding_bytes_cleared(uint64_t value) { padding_bytes_cleared_ = value; _has_field_.set(16); }
+
+  bool has_chunks_written() const { return _has_field_[2]; }
+  uint64_t chunks_written() const { return chunks_written_; }
+  void set_chunks_written(uint64_t value) { chunks_written_ = value; _has_field_.set(2); }
+
+  bool has_chunks_rewritten() const { return _has_field_[10]; }
+  uint64_t chunks_rewritten() const { return chunks_rewritten_; }
+  void set_chunks_rewritten(uint64_t value) { chunks_rewritten_ = value; _has_field_.set(10); }
+
+  bool has_chunks_overwritten() const { return _has_field_[3]; }
+  uint64_t chunks_overwritten() const { return chunks_overwritten_; }
+  void set_chunks_overwritten(uint64_t value) { chunks_overwritten_ = value; _has_field_.set(3); }
+
+  bool has_chunks_discarded() const { return _has_field_[18]; }
+  uint64_t chunks_discarded() const { return chunks_discarded_; }
+  void set_chunks_discarded(uint64_t value) { chunks_discarded_ = value; _has_field_.set(18); }
+
+  bool has_chunks_read() const { return _has_field_[17]; }
+  uint64_t chunks_read() const { return chunks_read_; }
+  void set_chunks_read(uint64_t value) { chunks_read_ = value; _has_field_.set(17); }
+
+  bool has_chunks_committed_out_of_order() const { return _has_field_[11]; }
+  uint64_t chunks_committed_out_of_order() const { return chunks_committed_out_of_order_; }
+  void set_chunks_committed_out_of_order(uint64_t value) { chunks_committed_out_of_order_ = value; _has_field_.set(11); }
+
+  bool has_write_wrap_count() const { return _has_field_[4]; }
+  uint64_t write_wrap_count() const { return write_wrap_count_; }
+  void set_write_wrap_count(uint64_t value) { write_wrap_count_ = value; _has_field_.set(4); }
+
+  bool has_patches_succeeded() const { return _has_field_[5]; }
+  uint64_t patches_succeeded() const { return patches_succeeded_; }
+  void set_patches_succeeded(uint64_t value) { patches_succeeded_ = value; _has_field_.set(5); }
+
+  bool has_patches_failed() const { return _has_field_[6]; }
+  uint64_t patches_failed() const { return patches_failed_; }
+  void set_patches_failed(uint64_t value) { patches_failed_ = value; _has_field_.set(6); }
+
+  bool has_readaheads_succeeded() const { return _has_field_[7]; }
+  uint64_t readaheads_succeeded() const { return readaheads_succeeded_; }
+  void set_readaheads_succeeded(uint64_t value) { readaheads_succeeded_ = value; _has_field_.set(7); }
+
+  bool has_readaheads_failed() const { return _has_field_[8]; }
+  uint64_t readaheads_failed() const { return readaheads_failed_; }
+  void set_readaheads_failed(uint64_t value) { readaheads_failed_ = value; _has_field_.set(8); }
+
+  bool has_abi_violations() const { return _has_field_[9]; }
+  uint64_t abi_violations() const { return abi_violations_; }
+  void set_abi_violations(uint64_t value) { abi_violations_ = value; _has_field_.set(9); }
+
+  bool has_trace_writer_packet_loss() const { return _has_field_[19]; }
+  uint64_t trace_writer_packet_loss() const { return trace_writer_packet_loss_; }
+  void set_trace_writer_packet_loss(uint64_t value) { trace_writer_packet_loss_ = value; _has_field_.set(19); }
+
+ private:
+  uint64_t buffer_size_{};
+  uint64_t bytes_written_{};
+  uint64_t bytes_overwritten_{};
+  uint64_t bytes_read_{};
+  uint64_t padding_bytes_written_{};
+  uint64_t padding_bytes_cleared_{};
+  uint64_t chunks_written_{};
+  uint64_t chunks_rewritten_{};
+  uint64_t chunks_overwritten_{};
+  uint64_t chunks_discarded_{};
+  uint64_t chunks_read_{};
+  uint64_t chunks_committed_out_of_order_{};
+  uint64_t write_wrap_count_{};
+  uint64_t patches_succeeded_{};
+  uint64_t patches_failed_{};
+  uint64_t readaheads_succeeded_{};
+  uint64_t readaheads_failed_{};
+  uint64_t abi_violations_{};
+  uint64_t trace_writer_packet_loss_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<20> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_TRACE_STATS_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/common/trace_stats.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+TraceStats::TraceStats() = default;
+TraceStats::~TraceStats() = default;
+TraceStats::TraceStats(const TraceStats&) = default;
+TraceStats& TraceStats::operator=(const TraceStats&) = default;
+TraceStats::TraceStats(TraceStats&&) noexcept = default;
+TraceStats& TraceStats::operator=(TraceStats&&) = default;
+
+bool TraceStats::operator==(const TraceStats& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && buffer_stats_ == other.buffer_stats_
+   && producers_connected_ == other.producers_connected_
+   && producers_seen_ == other.producers_seen_
+   && data_sources_registered_ == other.data_sources_registered_
+   && data_sources_seen_ == other.data_sources_seen_
+   && tracing_sessions_ == other.tracing_sessions_
+   && total_buffers_ == other.total_buffers_
+   && chunks_discarded_ == other.chunks_discarded_
+   && patches_discarded_ == other.patches_discarded_
+   && invalid_packets_ == other.invalid_packets_
+   && filter_stats_ == other.filter_stats_;
+}
+
+int TraceStats::buffer_stats_size() const { return static_cast<int>(buffer_stats_.size()); }
+void TraceStats::clear_buffer_stats() { buffer_stats_.clear(); }
+TraceStats_BufferStats* TraceStats::add_buffer_stats() { buffer_stats_.emplace_back(); return &buffer_stats_.back(); }
+bool TraceStats::ParseFromArray(const void* raw, size_t size) {
+  buffer_stats_.clear();
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* buffer_stats */:
+        buffer_stats_.emplace_back();
+        buffer_stats_.back().ParseFromArray(field.data(), field.size());
+        break;
+      case 2 /* producers_connected */:
+        field.get(&producers_connected_);
+        break;
+      case 3 /* producers_seen */:
+        field.get(&producers_seen_);
+        break;
+      case 4 /* data_sources_registered */:
+        field.get(&data_sources_registered_);
+        break;
+      case 5 /* data_sources_seen */:
+        field.get(&data_sources_seen_);
+        break;
+      case 6 /* tracing_sessions */:
+        field.get(&tracing_sessions_);
+        break;
+      case 7 /* total_buffers */:
+        field.get(&total_buffers_);
+        break;
+      case 8 /* chunks_discarded */:
+        field.get(&chunks_discarded_);
+        break;
+      case 9 /* patches_discarded */:
+        field.get(&patches_discarded_);
+        break;
+      case 10 /* invalid_packets */:
+        field.get(&invalid_packets_);
+        break;
+      case 11 /* filter_stats */:
+        (*filter_stats_).ParseFromArray(field.data(), field.size());
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string TraceStats::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> TraceStats::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void TraceStats::Serialize(::protozero::Message* msg) const {
+  // Field 1: buffer_stats
+  for (auto& it : buffer_stats_) {
+    it.Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
+  }
+
+  // Field 2: producers_connected
+  if (_has_field_[2]) {
+    msg->AppendVarInt(2, producers_connected_);
+  }
+
+  // Field 3: producers_seen
+  if (_has_field_[3]) {
+    msg->AppendVarInt(3, producers_seen_);
+  }
+
+  // Field 4: data_sources_registered
+  if (_has_field_[4]) {
+    msg->AppendVarInt(4, data_sources_registered_);
+  }
+
+  // Field 5: data_sources_seen
+  if (_has_field_[5]) {
+    msg->AppendVarInt(5, data_sources_seen_);
+  }
+
+  // Field 6: tracing_sessions
+  if (_has_field_[6]) {
+    msg->AppendVarInt(6, tracing_sessions_);
+  }
+
+  // Field 7: total_buffers
+  if (_has_field_[7]) {
+    msg->AppendVarInt(7, total_buffers_);
+  }
+
+  // Field 8: chunks_discarded
+  if (_has_field_[8]) {
+    msg->AppendVarInt(8, chunks_discarded_);
+  }
+
+  // Field 9: patches_discarded
+  if (_has_field_[9]) {
+    msg->AppendVarInt(9, patches_discarded_);
+  }
+
+  // Field 10: invalid_packets
+  if (_has_field_[10]) {
+    msg->AppendVarInt(10, invalid_packets_);
+  }
+
+  // Field 11: filter_stats
+  if (_has_field_[11]) {
+    (*filter_stats_).Serialize(msg->BeginNestedMessage<::protozero::Message>(11));
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+TraceStats_FilterStats::TraceStats_FilterStats() = default;
+TraceStats_FilterStats::~TraceStats_FilterStats() = default;
+TraceStats_FilterStats::TraceStats_FilterStats(const TraceStats_FilterStats&) = default;
+TraceStats_FilterStats& TraceStats_FilterStats::operator=(const TraceStats_FilterStats&) = default;
+TraceStats_FilterStats::TraceStats_FilterStats(TraceStats_FilterStats&&) noexcept = default;
+TraceStats_FilterStats& TraceStats_FilterStats::operator=(TraceStats_FilterStats&&) = default;
+
+bool TraceStats_FilterStats::operator==(const TraceStats_FilterStats& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && input_packets_ == other.input_packets_
+   && input_bytes_ == other.input_bytes_
+   && output_bytes_ == other.output_bytes_
+   && errors_ == other.errors_;
+}
+
+bool TraceStats_FilterStats::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* input_packets */:
+        field.get(&input_packets_);
+        break;
+      case 2 /* input_bytes */:
+        field.get(&input_bytes_);
+        break;
+      case 3 /* output_bytes */:
+        field.get(&output_bytes_);
+        break;
+      case 4 /* errors */:
+        field.get(&errors_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string TraceStats_FilterStats::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> TraceStats_FilterStats::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void TraceStats_FilterStats::Serialize(::protozero::Message* msg) const {
+  // Field 1: input_packets
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, input_packets_);
+  }
+
+  // Field 2: input_bytes
+  if (_has_field_[2]) {
+    msg->AppendVarInt(2, input_bytes_);
+  }
+
+  // Field 3: output_bytes
+  if (_has_field_[3]) {
+    msg->AppendVarInt(3, output_bytes_);
+  }
+
+  // Field 4: errors
+  if (_has_field_[4]) {
+    msg->AppendVarInt(4, errors_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+TraceStats_BufferStats::TraceStats_BufferStats() = default;
+TraceStats_BufferStats::~TraceStats_BufferStats() = default;
+TraceStats_BufferStats::TraceStats_BufferStats(const TraceStats_BufferStats&) = default;
+TraceStats_BufferStats& TraceStats_BufferStats::operator=(const TraceStats_BufferStats&) = default;
+TraceStats_BufferStats::TraceStats_BufferStats(TraceStats_BufferStats&&) noexcept = default;
+TraceStats_BufferStats& TraceStats_BufferStats::operator=(TraceStats_BufferStats&&) = default;
+
+bool TraceStats_BufferStats::operator==(const TraceStats_BufferStats& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && buffer_size_ == other.buffer_size_
+   && bytes_written_ == other.bytes_written_
+   && bytes_overwritten_ == other.bytes_overwritten_
+   && bytes_read_ == other.bytes_read_
+   && padding_bytes_written_ == other.padding_bytes_written_
+   && padding_bytes_cleared_ == other.padding_bytes_cleared_
+   && chunks_written_ == other.chunks_written_
+   && chunks_rewritten_ == other.chunks_rewritten_
+   && chunks_overwritten_ == other.chunks_overwritten_
+   && chunks_discarded_ == other.chunks_discarded_
+   && chunks_read_ == other.chunks_read_
+   && chunks_committed_out_of_order_ == other.chunks_committed_out_of_order_
+   && write_wrap_count_ == other.write_wrap_count_
+   && patches_succeeded_ == other.patches_succeeded_
+   && patches_failed_ == other.patches_failed_
+   && readaheads_succeeded_ == other.readaheads_succeeded_
+   && readaheads_failed_ == other.readaheads_failed_
+   && abi_violations_ == other.abi_violations_
+   && trace_writer_packet_loss_ == other.trace_writer_packet_loss_;
+}
+
+bool TraceStats_BufferStats::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 12 /* buffer_size */:
+        field.get(&buffer_size_);
+        break;
+      case 1 /* bytes_written */:
+        field.get(&bytes_written_);
+        break;
+      case 13 /* bytes_overwritten */:
+        field.get(&bytes_overwritten_);
+        break;
+      case 14 /* bytes_read */:
+        field.get(&bytes_read_);
+        break;
+      case 15 /* padding_bytes_written */:
+        field.get(&padding_bytes_written_);
+        break;
+      case 16 /* padding_bytes_cleared */:
+        field.get(&padding_bytes_cleared_);
+        break;
+      case 2 /* chunks_written */:
+        field.get(&chunks_written_);
+        break;
+      case 10 /* chunks_rewritten */:
+        field.get(&chunks_rewritten_);
+        break;
+      case 3 /* chunks_overwritten */:
+        field.get(&chunks_overwritten_);
+        break;
+      case 18 /* chunks_discarded */:
+        field.get(&chunks_discarded_);
+        break;
+      case 17 /* chunks_read */:
+        field.get(&chunks_read_);
+        break;
+      case 11 /* chunks_committed_out_of_order */:
+        field.get(&chunks_committed_out_of_order_);
+        break;
+      case 4 /* write_wrap_count */:
+        field.get(&write_wrap_count_);
+        break;
+      case 5 /* patches_succeeded */:
+        field.get(&patches_succeeded_);
+        break;
+      case 6 /* patches_failed */:
+        field.get(&patches_failed_);
+        break;
+      case 7 /* readaheads_succeeded */:
+        field.get(&readaheads_succeeded_);
+        break;
+      case 8 /* readaheads_failed */:
+        field.get(&readaheads_failed_);
+        break;
+      case 9 /* abi_violations */:
+        field.get(&abi_violations_);
+        break;
+      case 19 /* trace_writer_packet_loss */:
+        field.get(&trace_writer_packet_loss_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string TraceStats_BufferStats::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> TraceStats_BufferStats::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void TraceStats_BufferStats::Serialize(::protozero::Message* msg) const {
+  // Field 12: buffer_size
+  if (_has_field_[12]) {
+    msg->AppendVarInt(12, buffer_size_);
+  }
+
+  // Field 1: bytes_written
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, bytes_written_);
+  }
+
+  // Field 13: bytes_overwritten
+  if (_has_field_[13]) {
+    msg->AppendVarInt(13, bytes_overwritten_);
+  }
+
+  // Field 14: bytes_read
+  if (_has_field_[14]) {
+    msg->AppendVarInt(14, bytes_read_);
+  }
+
+  // Field 15: padding_bytes_written
+  if (_has_field_[15]) {
+    msg->AppendVarInt(15, padding_bytes_written_);
+  }
+
+  // Field 16: padding_bytes_cleared
+  if (_has_field_[16]) {
+    msg->AppendVarInt(16, padding_bytes_cleared_);
+  }
+
+  // Field 2: chunks_written
+  if (_has_field_[2]) {
+    msg->AppendVarInt(2, chunks_written_);
+  }
+
+  // Field 10: chunks_rewritten
+  if (_has_field_[10]) {
+    msg->AppendVarInt(10, chunks_rewritten_);
+  }
+
+  // Field 3: chunks_overwritten
+  if (_has_field_[3]) {
+    msg->AppendVarInt(3, chunks_overwritten_);
+  }
+
+  // Field 18: chunks_discarded
+  if (_has_field_[18]) {
+    msg->AppendVarInt(18, chunks_discarded_);
+  }
+
+  // Field 17: chunks_read
+  if (_has_field_[17]) {
+    msg->AppendVarInt(17, chunks_read_);
+  }
+
+  // Field 11: chunks_committed_out_of_order
+  if (_has_field_[11]) {
+    msg->AppendVarInt(11, chunks_committed_out_of_order_);
+  }
+
+  // Field 4: write_wrap_count
+  if (_has_field_[4]) {
+    msg->AppendVarInt(4, write_wrap_count_);
+  }
+
+  // Field 5: patches_succeeded
+  if (_has_field_[5]) {
+    msg->AppendVarInt(5, patches_succeeded_);
+  }
+
+  // Field 6: patches_failed
+  if (_has_field_[6]) {
+    msg->AppendVarInt(6, patches_failed_);
+  }
+
+  // Field 7: readaheads_succeeded
+  if (_has_field_[7]) {
+    msg->AppendVarInt(7, readaheads_succeeded_);
+  }
+
+  // Field 8: readaheads_failed
+  if (_has_field_[8]) {
+    msg->AppendVarInt(8, readaheads_failed_);
+  }
+
+  // Field 9: abi_violations
+  if (_has_field_[9]) {
+    msg->AppendVarInt(9, abi_violations_);
+  }
+
+  // Field 19: trace_writer_packet_loss
+  if (_has_field_[19]) {
+    msg->AppendVarInt(19, trace_writer_packet_loss_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/common/tracing_service_capabilities.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/common/tracing_service_capabilities.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_TRACING_SERVICE_CAPABILITIES_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_TRACING_SERVICE_CAPABILITIES_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class TracingServiceCapabilities;
+enum ObservableEvents_Type : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT TracingServiceCapabilities : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kHasQueryCapabilitiesFieldNumber = 1,
+    kObservableEventsFieldNumber = 2,
+    kHasTraceConfigOutputPathFieldNumber = 3,
+  };
+
+  TracingServiceCapabilities();
+  ~TracingServiceCapabilities() override;
+  TracingServiceCapabilities(TracingServiceCapabilities&&) noexcept;
+  TracingServiceCapabilities& operator=(TracingServiceCapabilities&&);
+  TracingServiceCapabilities(const TracingServiceCapabilities&);
+  TracingServiceCapabilities& operator=(const TracingServiceCapabilities&);
+  bool operator==(const TracingServiceCapabilities&) const;
+  bool operator!=(const TracingServiceCapabilities& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_has_query_capabilities() const { return _has_field_[1]; }
+  bool has_query_capabilities() const { return has_query_capabilities_; }
+  void set_has_query_capabilities(bool value) { has_query_capabilities_ = value; _has_field_.set(1); }
+
+  const std::vector<ObservableEvents_Type>& observable_events() const { return observable_events_; }
+  std::vector<ObservableEvents_Type>* mutable_observable_events() { return &observable_events_; }
+  int observable_events_size() const { return static_cast<int>(observable_events_.size()); }
+  void clear_observable_events() { observable_events_.clear(); }
+  void add_observable_events(ObservableEvents_Type value) { observable_events_.emplace_back(value); }
+  ObservableEvents_Type* add_observable_events() { observable_events_.emplace_back(); return &observable_events_.back(); }
+
+  bool has_has_trace_config_output_path() const { return _has_field_[3]; }
+  bool has_trace_config_output_path() const { return has_trace_config_output_path_; }
+  void set_has_trace_config_output_path(bool value) { has_trace_config_output_path_ = value; _has_field_.set(3); }
+
+ private:
+  bool has_query_capabilities_{};
+  std::vector<ObservableEvents_Type> observable_events_;
+  bool has_trace_config_output_path_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<4> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_TRACING_SERVICE_CAPABILITIES_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/common/tracing_service_capabilities.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/common/observable_events.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+TracingServiceCapabilities::TracingServiceCapabilities() = default;
+TracingServiceCapabilities::~TracingServiceCapabilities() = default;
+TracingServiceCapabilities::TracingServiceCapabilities(const TracingServiceCapabilities&) = default;
+TracingServiceCapabilities& TracingServiceCapabilities::operator=(const TracingServiceCapabilities&) = default;
+TracingServiceCapabilities::TracingServiceCapabilities(TracingServiceCapabilities&&) noexcept = default;
+TracingServiceCapabilities& TracingServiceCapabilities::operator=(TracingServiceCapabilities&&) = default;
+
+bool TracingServiceCapabilities::operator==(const TracingServiceCapabilities& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && has_query_capabilities_ == other.has_query_capabilities_
+   && observable_events_ == other.observable_events_
+   && has_trace_config_output_path_ == other.has_trace_config_output_path_;
+}
+
+bool TracingServiceCapabilities::ParseFromArray(const void* raw, size_t size) {
+  observable_events_.clear();
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* has_query_capabilities */:
+        field.get(&has_query_capabilities_);
+        break;
+      case 2 /* observable_events */:
+        observable_events_.emplace_back();
+        field.get(&observable_events_.back());
+        break;
+      case 3 /* has_trace_config_output_path */:
+        field.get(&has_trace_config_output_path_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string TracingServiceCapabilities::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> TracingServiceCapabilities::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void TracingServiceCapabilities::Serialize(::protozero::Message* msg) const {
+  // Field 1: has_query_capabilities
+  if (_has_field_[1]) {
+    msg->AppendTinyVarInt(1, has_query_capabilities_);
+  }
+
+  // Field 2: observable_events
+  for (auto& it : observable_events_) {
+    msg->AppendVarInt(2, it);
+  }
+
+  // Field 3: has_trace_config_output_path
+  if (_has_field_[3]) {
+    msg->AppendTinyVarInt(3, has_trace_config_output_path_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/common/tracing_service_state.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/common/tracing_service_state.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_TRACING_SERVICE_STATE_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_TRACING_SERVICE_STATE_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class TracingServiceState;
+class TracingServiceState_DataSource;
+class DataSourceDescriptor;
+class TracingServiceState_Producer;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT TracingServiceState : public ::protozero::CppMessageObj {
+ public:
+  using Producer = TracingServiceState_Producer;
+  using DataSource = TracingServiceState_DataSource;
+  enum FieldNumbers {
+    kProducersFieldNumber = 1,
+    kDataSourcesFieldNumber = 2,
+    kNumSessionsFieldNumber = 3,
+    kNumSessionsStartedFieldNumber = 4,
+    kTracingServiceVersionFieldNumber = 5,
+  };
+
+  TracingServiceState();
+  ~TracingServiceState() override;
+  TracingServiceState(TracingServiceState&&) noexcept;
+  TracingServiceState& operator=(TracingServiceState&&);
+  TracingServiceState(const TracingServiceState&);
+  TracingServiceState& operator=(const TracingServiceState&);
+  bool operator==(const TracingServiceState&) const;
+  bool operator!=(const TracingServiceState& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  const std::vector<TracingServiceState_Producer>& producers() const { return producers_; }
+  std::vector<TracingServiceState_Producer>* mutable_producers() { return &producers_; }
+  int producers_size() const;
+  void clear_producers();
+  TracingServiceState_Producer* add_producers();
+
+  const std::vector<TracingServiceState_DataSource>& data_sources() const { return data_sources_; }
+  std::vector<TracingServiceState_DataSource>* mutable_data_sources() { return &data_sources_; }
+  int data_sources_size() const;
+  void clear_data_sources();
+  TracingServiceState_DataSource* add_data_sources();
+
+  bool has_num_sessions() const { return _has_field_[3]; }
+  int32_t num_sessions() const { return num_sessions_; }
+  void set_num_sessions(int32_t value) { num_sessions_ = value; _has_field_.set(3); }
+
+  bool has_num_sessions_started() const { return _has_field_[4]; }
+  int32_t num_sessions_started() const { return num_sessions_started_; }
+  void set_num_sessions_started(int32_t value) { num_sessions_started_ = value; _has_field_.set(4); }
+
+  bool has_tracing_service_version() const { return _has_field_[5]; }
+  const std::string& tracing_service_version() const { return tracing_service_version_; }
+  void set_tracing_service_version(const std::string& value) { tracing_service_version_ = value; _has_field_.set(5); }
+
+ private:
+  std::vector<TracingServiceState_Producer> producers_;
+  std::vector<TracingServiceState_DataSource> data_sources_;
+  int32_t num_sessions_{};
+  int32_t num_sessions_started_{};
+  std::string tracing_service_version_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<6> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT TracingServiceState_DataSource : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kDsDescriptorFieldNumber = 1,
+    kProducerIdFieldNumber = 2,
+  };
+
+  TracingServiceState_DataSource();
+  ~TracingServiceState_DataSource() override;
+  TracingServiceState_DataSource(TracingServiceState_DataSource&&) noexcept;
+  TracingServiceState_DataSource& operator=(TracingServiceState_DataSource&&);
+  TracingServiceState_DataSource(const TracingServiceState_DataSource&);
+  TracingServiceState_DataSource& operator=(const TracingServiceState_DataSource&);
+  bool operator==(const TracingServiceState_DataSource&) const;
+  bool operator!=(const TracingServiceState_DataSource& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_ds_descriptor() const { return _has_field_[1]; }
+  const DataSourceDescriptor& ds_descriptor() const { return *ds_descriptor_; }
+  DataSourceDescriptor* mutable_ds_descriptor() { _has_field_.set(1); return ds_descriptor_.get(); }
+
+  bool has_producer_id() const { return _has_field_[2]; }
+  int32_t producer_id() const { return producer_id_; }
+  void set_producer_id(int32_t value) { producer_id_ = value; _has_field_.set(2); }
+
+ private:
+  ::protozero::CopyablePtr<DataSourceDescriptor> ds_descriptor_;
+  int32_t producer_id_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT TracingServiceState_Producer : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kIdFieldNumber = 1,
+    kNameFieldNumber = 2,
+    kUidFieldNumber = 3,
+    kSdkVersionFieldNumber = 4,
+  };
+
+  TracingServiceState_Producer();
+  ~TracingServiceState_Producer() override;
+  TracingServiceState_Producer(TracingServiceState_Producer&&) noexcept;
+  TracingServiceState_Producer& operator=(TracingServiceState_Producer&&);
+  TracingServiceState_Producer(const TracingServiceState_Producer&);
+  TracingServiceState_Producer& operator=(const TracingServiceState_Producer&);
+  bool operator==(const TracingServiceState_Producer&) const;
+  bool operator!=(const TracingServiceState_Producer& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_id() const { return _has_field_[1]; }
+  int32_t id() const { return id_; }
+  void set_id(int32_t value) { id_ = value; _has_field_.set(1); }
+
+  bool has_name() const { return _has_field_[2]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(2); }
+
+  bool has_uid() const { return _has_field_[3]; }
+  int32_t uid() const { return uid_; }
+  void set_uid(int32_t value) { uid_ = value; _has_field_.set(3); }
+
+  bool has_sdk_version() const { return _has_field_[4]; }
+  const std::string& sdk_version() const { return sdk_version_; }
+  void set_sdk_version(const std::string& value) { sdk_version_ = value; _has_field_.set(4); }
+
+ private:
+  int32_t id_{};
+  std::string name_{};
+  int32_t uid_{};
+  std::string sdk_version_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<5> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_TRACING_SERVICE_STATE_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/common/track_event_descriptor.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_TRACK_EVENT_DESCRIPTOR_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_TRACK_EVENT_DESCRIPTOR_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class TrackEventDescriptor;
+class TrackEventCategory;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT TrackEventDescriptor : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kAvailableCategoriesFieldNumber = 1,
+  };
+
+  TrackEventDescriptor();
+  ~TrackEventDescriptor() override;
+  TrackEventDescriptor(TrackEventDescriptor&&) noexcept;
+  TrackEventDescriptor& operator=(TrackEventDescriptor&&);
+  TrackEventDescriptor(const TrackEventDescriptor&);
+  TrackEventDescriptor& operator=(const TrackEventDescriptor&);
+  bool operator==(const TrackEventDescriptor&) const;
+  bool operator!=(const TrackEventDescriptor& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  const std::vector<TrackEventCategory>& available_categories() const { return available_categories_; }
+  std::vector<TrackEventCategory>* mutable_available_categories() { return &available_categories_; }
+  int available_categories_size() const;
+  void clear_available_categories();
+  TrackEventCategory* add_available_categories();
+
+ private:
+  std::vector<TrackEventCategory> available_categories_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT TrackEventCategory : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kNameFieldNumber = 1,
+    kDescriptionFieldNumber = 2,
+    kTagsFieldNumber = 3,
+  };
+
+  TrackEventCategory();
+  ~TrackEventCategory() override;
+  TrackEventCategory(TrackEventCategory&&) noexcept;
+  TrackEventCategory& operator=(TrackEventCategory&&);
+  TrackEventCategory(const TrackEventCategory&);
+  TrackEventCategory& operator=(const TrackEventCategory&);
+  bool operator==(const TrackEventCategory&) const;
+  bool operator!=(const TrackEventCategory& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_name() const { return _has_field_[1]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(1); }
+
+  bool has_description() const { return _has_field_[2]; }
+  const std::string& description() const { return description_; }
+  void set_description(const std::string& value) { description_ = value; _has_field_.set(2); }
+
+  const std::vector<std::string>& tags() const { return tags_; }
+  std::vector<std::string>* mutable_tags() { return &tags_; }
+  int tags_size() const { return static_cast<int>(tags_.size()); }
+  void clear_tags() { tags_.clear(); }
+  void add_tags(std::string value) { tags_.emplace_back(value); }
+  std::string* add_tags() { tags_.emplace_back(); return &tags_.back(); }
+
+ private:
+  std::string name_{};
+  std::string description_{};
+  std::vector<std::string> tags_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<4> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_TRACK_EVENT_DESCRIPTOR_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/common/tracing_service_state.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/common/data_source_descriptor.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/common/track_event_descriptor.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/common/gpu_counter_descriptor.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+TracingServiceState::TracingServiceState() = default;
+TracingServiceState::~TracingServiceState() = default;
+TracingServiceState::TracingServiceState(const TracingServiceState&) = default;
+TracingServiceState& TracingServiceState::operator=(const TracingServiceState&) = default;
+TracingServiceState::TracingServiceState(TracingServiceState&&) noexcept = default;
+TracingServiceState& TracingServiceState::operator=(TracingServiceState&&) = default;
+
+bool TracingServiceState::operator==(const TracingServiceState& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && producers_ == other.producers_
+   && data_sources_ == other.data_sources_
+   && num_sessions_ == other.num_sessions_
+   && num_sessions_started_ == other.num_sessions_started_
+   && tracing_service_version_ == other.tracing_service_version_;
+}
+
+int TracingServiceState::producers_size() const { return static_cast<int>(producers_.size()); }
+void TracingServiceState::clear_producers() { producers_.clear(); }
+TracingServiceState_Producer* TracingServiceState::add_producers() { producers_.emplace_back(); return &producers_.back(); }
+int TracingServiceState::data_sources_size() const { return static_cast<int>(data_sources_.size()); }
+void TracingServiceState::clear_data_sources() { data_sources_.clear(); }
+TracingServiceState_DataSource* TracingServiceState::add_data_sources() { data_sources_.emplace_back(); return &data_sources_.back(); }
+bool TracingServiceState::ParseFromArray(const void* raw, size_t size) {
+  producers_.clear();
+  data_sources_.clear();
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* producers */:
+        producers_.emplace_back();
+        producers_.back().ParseFromArray(field.data(), field.size());
+        break;
+      case 2 /* data_sources */:
+        data_sources_.emplace_back();
+        data_sources_.back().ParseFromArray(field.data(), field.size());
+        break;
+      case 3 /* num_sessions */:
+        field.get(&num_sessions_);
+        break;
+      case 4 /* num_sessions_started */:
+        field.get(&num_sessions_started_);
+        break;
+      case 5 /* tracing_service_version */:
+        field.get(&tracing_service_version_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string TracingServiceState::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> TracingServiceState::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void TracingServiceState::Serialize(::protozero::Message* msg) const {
+  // Field 1: producers
+  for (auto& it : producers_) {
+    it.Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
+  }
+
+  // Field 2: data_sources
+  for (auto& it : data_sources_) {
+    it.Serialize(msg->BeginNestedMessage<::protozero::Message>(2));
+  }
+
+  // Field 3: num_sessions
+  if (_has_field_[3]) {
+    msg->AppendVarInt(3, num_sessions_);
+  }
+
+  // Field 4: num_sessions_started
+  if (_has_field_[4]) {
+    msg->AppendVarInt(4, num_sessions_started_);
+  }
+
+  // Field 5: tracing_service_version
+  if (_has_field_[5]) {
+    msg->AppendString(5, tracing_service_version_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+TracingServiceState_DataSource::TracingServiceState_DataSource() = default;
+TracingServiceState_DataSource::~TracingServiceState_DataSource() = default;
+TracingServiceState_DataSource::TracingServiceState_DataSource(const TracingServiceState_DataSource&) = default;
+TracingServiceState_DataSource& TracingServiceState_DataSource::operator=(const TracingServiceState_DataSource&) = default;
+TracingServiceState_DataSource::TracingServiceState_DataSource(TracingServiceState_DataSource&&) noexcept = default;
+TracingServiceState_DataSource& TracingServiceState_DataSource::operator=(TracingServiceState_DataSource&&) = default;
+
+bool TracingServiceState_DataSource::operator==(const TracingServiceState_DataSource& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && ds_descriptor_ == other.ds_descriptor_
+   && producer_id_ == other.producer_id_;
+}
+
+bool TracingServiceState_DataSource::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* ds_descriptor */:
+        (*ds_descriptor_).ParseFromArray(field.data(), field.size());
+        break;
+      case 2 /* producer_id */:
+        field.get(&producer_id_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string TracingServiceState_DataSource::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> TracingServiceState_DataSource::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void TracingServiceState_DataSource::Serialize(::protozero::Message* msg) const {
+  // Field 1: ds_descriptor
+  if (_has_field_[1]) {
+    (*ds_descriptor_).Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
+  }
+
+  // Field 2: producer_id
+  if (_has_field_[2]) {
+    msg->AppendVarInt(2, producer_id_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+TracingServiceState_Producer::TracingServiceState_Producer() = default;
+TracingServiceState_Producer::~TracingServiceState_Producer() = default;
+TracingServiceState_Producer::TracingServiceState_Producer(const TracingServiceState_Producer&) = default;
+TracingServiceState_Producer& TracingServiceState_Producer::operator=(const TracingServiceState_Producer&) = default;
+TracingServiceState_Producer::TracingServiceState_Producer(TracingServiceState_Producer&&) noexcept = default;
+TracingServiceState_Producer& TracingServiceState_Producer::operator=(TracingServiceState_Producer&&) = default;
+
+bool TracingServiceState_Producer::operator==(const TracingServiceState_Producer& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && id_ == other.id_
+   && name_ == other.name_
+   && uid_ == other.uid_
+   && sdk_version_ == other.sdk_version_;
+}
+
+bool TracingServiceState_Producer::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* id */:
+        field.get(&id_);
+        break;
+      case 2 /* name */:
+        field.get(&name_);
+        break;
+      case 3 /* uid */:
+        field.get(&uid_);
+        break;
+      case 4 /* sdk_version */:
+        field.get(&sdk_version_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string TracingServiceState_Producer::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> TracingServiceState_Producer::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void TracingServiceState_Producer::Serialize(::protozero::Message* msg) const {
+  // Field 1: id
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, id_);
+  }
+
+  // Field 2: name
+  if (_has_field_[2]) {
+    msg->AppendString(2, name_);
+  }
+
+  // Field 3: uid
+  if (_has_field_[3]) {
+    msg->AppendVarInt(3, uid_);
+  }
+
+  // Field 4: sdk_version
+  if (_has_field_[4]) {
+    msg->AppendString(4, sdk_version_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/common/track_event_descriptor.gen.cc
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/common/track_event_descriptor.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+TrackEventDescriptor::TrackEventDescriptor() = default;
+TrackEventDescriptor::~TrackEventDescriptor() = default;
+TrackEventDescriptor::TrackEventDescriptor(const TrackEventDescriptor&) = default;
+TrackEventDescriptor& TrackEventDescriptor::operator=(const TrackEventDescriptor&) = default;
+TrackEventDescriptor::TrackEventDescriptor(TrackEventDescriptor&&) noexcept = default;
+TrackEventDescriptor& TrackEventDescriptor::operator=(TrackEventDescriptor&&) = default;
+
+bool TrackEventDescriptor::operator==(const TrackEventDescriptor& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && available_categories_ == other.available_categories_;
+}
+
+int TrackEventDescriptor::available_categories_size() const { return static_cast<int>(available_categories_.size()); }
+void TrackEventDescriptor::clear_available_categories() { available_categories_.clear(); }
+TrackEventCategory* TrackEventDescriptor::add_available_categories() { available_categories_.emplace_back(); return &available_categories_.back(); }
+bool TrackEventDescriptor::ParseFromArray(const void* raw, size_t size) {
+  available_categories_.clear();
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* available_categories */:
+        available_categories_.emplace_back();
+        available_categories_.back().ParseFromArray(field.data(), field.size());
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string TrackEventDescriptor::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> TrackEventDescriptor::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void TrackEventDescriptor::Serialize(::protozero::Message* msg) const {
+  // Field 1: available_categories
+  for (auto& it : available_categories_) {
+    it.Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+TrackEventCategory::TrackEventCategory() = default;
+TrackEventCategory::~TrackEventCategory() = default;
+TrackEventCategory::TrackEventCategory(const TrackEventCategory&) = default;
+TrackEventCategory& TrackEventCategory::operator=(const TrackEventCategory&) = default;
+TrackEventCategory::TrackEventCategory(TrackEventCategory&&) noexcept = default;
+TrackEventCategory& TrackEventCategory::operator=(TrackEventCategory&&) = default;
+
+bool TrackEventCategory::operator==(const TrackEventCategory& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && name_ == other.name_
+   && description_ == other.description_
+   && tags_ == other.tags_;
+}
+
+bool TrackEventCategory::ParseFromArray(const void* raw, size_t size) {
+  tags_.clear();
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* name */:
+        field.get(&name_);
+        break;
+      case 2 /* description */:
+        field.get(&description_);
+        break;
+      case 3 /* tags */:
+        tags_.emplace_back();
+        field.get(&tags_.back());
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string TrackEventCategory::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> TrackEventCategory::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void TrackEventCategory::Serialize(::protozero::Message* msg) const {
+  // Field 1: name
+  if (_has_field_[1]) {
+    msg->AppendString(1, name_);
+  }
+
+  // Field 2: description
+  if (_has_field_[2]) {
+    msg->AppendString(2, description_);
+  }
+
+  // Field 3: tags
+  for (auto& it : tags_) {
+    msg->AppendString(3, it);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/config/android/android_log_config.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/config/android/android_log_config.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_ANDROID_ANDROID_LOG_CONFIG_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_ANDROID_ANDROID_LOG_CONFIG_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class AndroidLogConfig;
+enum AndroidLogId : int;
+enum AndroidLogPriority : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT AndroidLogConfig : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kLogIdsFieldNumber = 1,
+    kMinPrioFieldNumber = 3,
+    kFilterTagsFieldNumber = 4,
+  };
+
+  AndroidLogConfig();
+  ~AndroidLogConfig() override;
+  AndroidLogConfig(AndroidLogConfig&&) noexcept;
+  AndroidLogConfig& operator=(AndroidLogConfig&&);
+  AndroidLogConfig(const AndroidLogConfig&);
+  AndroidLogConfig& operator=(const AndroidLogConfig&);
+  bool operator==(const AndroidLogConfig&) const;
+  bool operator!=(const AndroidLogConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  const std::vector<AndroidLogId>& log_ids() const { return log_ids_; }
+  std::vector<AndroidLogId>* mutable_log_ids() { return &log_ids_; }
+  int log_ids_size() const { return static_cast<int>(log_ids_.size()); }
+  void clear_log_ids() { log_ids_.clear(); }
+  void add_log_ids(AndroidLogId value) { log_ids_.emplace_back(value); }
+  AndroidLogId* add_log_ids() { log_ids_.emplace_back(); return &log_ids_.back(); }
+
+  bool has_min_prio() const { return _has_field_[3]; }
+  AndroidLogPriority min_prio() const { return min_prio_; }
+  void set_min_prio(AndroidLogPriority value) { min_prio_ = value; _has_field_.set(3); }
+
+  const std::vector<std::string>& filter_tags() const { return filter_tags_; }
+  std::vector<std::string>* mutable_filter_tags() { return &filter_tags_; }
+  int filter_tags_size() const { return static_cast<int>(filter_tags_.size()); }
+  void clear_filter_tags() { filter_tags_.clear(); }
+  void add_filter_tags(std::string value) { filter_tags_.emplace_back(value); }
+  std::string* add_filter_tags() { filter_tags_.emplace_back(); return &filter_tags_.back(); }
+
+ private:
+  std::vector<AndroidLogId> log_ids_;
+  AndroidLogPriority min_prio_{};
+  std::vector<std::string> filter_tags_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<5> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_ANDROID_ANDROID_LOG_CONFIG_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_log_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/common/android_log_constants.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+AndroidLogConfig::AndroidLogConfig() = default;
+AndroidLogConfig::~AndroidLogConfig() = default;
+AndroidLogConfig::AndroidLogConfig(const AndroidLogConfig&) = default;
+AndroidLogConfig& AndroidLogConfig::operator=(const AndroidLogConfig&) = default;
+AndroidLogConfig::AndroidLogConfig(AndroidLogConfig&&) noexcept = default;
+AndroidLogConfig& AndroidLogConfig::operator=(AndroidLogConfig&&) = default;
+
+bool AndroidLogConfig::operator==(const AndroidLogConfig& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && log_ids_ == other.log_ids_
+   && min_prio_ == other.min_prio_
+   && filter_tags_ == other.filter_tags_;
+}
+
+bool AndroidLogConfig::ParseFromArray(const void* raw, size_t size) {
+  log_ids_.clear();
+  filter_tags_.clear();
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* log_ids */:
+        log_ids_.emplace_back();
+        field.get(&log_ids_.back());
+        break;
+      case 3 /* min_prio */:
+        field.get(&min_prio_);
+        break;
+      case 4 /* filter_tags */:
+        filter_tags_.emplace_back();
+        field.get(&filter_tags_.back());
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string AndroidLogConfig::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> AndroidLogConfig::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void AndroidLogConfig::Serialize(::protozero::Message* msg) const {
+  // Field 1: log_ids
+  for (auto& it : log_ids_) {
+    msg->AppendVarInt(1, it);
+  }
+
+  // Field 3: min_prio
+  if (_has_field_[3]) {
+    msg->AppendVarInt(3, min_prio_);
+  }
+
+  // Field 4: filter_tags
+  for (auto& it : filter_tags_) {
+    msg->AppendString(4, it);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/config/android/android_polled_state_config.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/config/android/android_polled_state_config.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_ANDROID_ANDROID_POLLED_STATE_CONFIG_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_ANDROID_ANDROID_POLLED_STATE_CONFIG_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class AndroidPolledStateConfig;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT AndroidPolledStateConfig : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kPollMsFieldNumber = 1,
+  };
+
+  AndroidPolledStateConfig();
+  ~AndroidPolledStateConfig() override;
+  AndroidPolledStateConfig(AndroidPolledStateConfig&&) noexcept;
+  AndroidPolledStateConfig& operator=(AndroidPolledStateConfig&&);
+  AndroidPolledStateConfig(const AndroidPolledStateConfig&);
+  AndroidPolledStateConfig& operator=(const AndroidPolledStateConfig&);
+  bool operator==(const AndroidPolledStateConfig&) const;
+  bool operator!=(const AndroidPolledStateConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_poll_ms() const { return _has_field_[1]; }
+  uint32_t poll_ms() const { return poll_ms_; }
+  void set_poll_ms(uint32_t value) { poll_ms_ = value; _has_field_.set(1); }
+
+ private:
+  uint32_t poll_ms_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_ANDROID_ANDROID_POLLED_STATE_CONFIG_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_polled_state_config.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+AndroidPolledStateConfig::AndroidPolledStateConfig() = default;
+AndroidPolledStateConfig::~AndroidPolledStateConfig() = default;
+AndroidPolledStateConfig::AndroidPolledStateConfig(const AndroidPolledStateConfig&) = default;
+AndroidPolledStateConfig& AndroidPolledStateConfig::operator=(const AndroidPolledStateConfig&) = default;
+AndroidPolledStateConfig::AndroidPolledStateConfig(AndroidPolledStateConfig&&) noexcept = default;
+AndroidPolledStateConfig& AndroidPolledStateConfig::operator=(AndroidPolledStateConfig&&) = default;
+
+bool AndroidPolledStateConfig::operator==(const AndroidPolledStateConfig& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && poll_ms_ == other.poll_ms_;
+}
+
+bool AndroidPolledStateConfig::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* poll_ms */:
+        field.get(&poll_ms_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string AndroidPolledStateConfig::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> AndroidPolledStateConfig::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void AndroidPolledStateConfig::Serialize(::protozero::Message* msg) const {
+  // Field 1: poll_ms
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, poll_ms_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/config/android/packages_list_config.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/config/android/packages_list_config.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_ANDROID_PACKAGES_LIST_CONFIG_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_ANDROID_PACKAGES_LIST_CONFIG_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class PackagesListConfig;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT PackagesListConfig : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kPackageNameFilterFieldNumber = 1,
+  };
+
+  PackagesListConfig();
+  ~PackagesListConfig() override;
+  PackagesListConfig(PackagesListConfig&&) noexcept;
+  PackagesListConfig& operator=(PackagesListConfig&&);
+  PackagesListConfig(const PackagesListConfig&);
+  PackagesListConfig& operator=(const PackagesListConfig&);
+  bool operator==(const PackagesListConfig&) const;
+  bool operator!=(const PackagesListConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  const std::vector<std::string>& package_name_filter() const { return package_name_filter_; }
+  std::vector<std::string>* mutable_package_name_filter() { return &package_name_filter_; }
+  int package_name_filter_size() const { return static_cast<int>(package_name_filter_.size()); }
+  void clear_package_name_filter() { package_name_filter_.clear(); }
+  void add_package_name_filter(std::string value) { package_name_filter_.emplace_back(value); }
+  std::string* add_package_name_filter() { package_name_filter_.emplace_back(); return &package_name_filter_.back(); }
+
+ private:
+  std::vector<std::string> package_name_filter_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_ANDROID_PACKAGES_LIST_CONFIG_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/config/android/packages_list_config.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+PackagesListConfig::PackagesListConfig() = default;
+PackagesListConfig::~PackagesListConfig() = default;
+PackagesListConfig::PackagesListConfig(const PackagesListConfig&) = default;
+PackagesListConfig& PackagesListConfig::operator=(const PackagesListConfig&) = default;
+PackagesListConfig::PackagesListConfig(PackagesListConfig&&) noexcept = default;
+PackagesListConfig& PackagesListConfig::operator=(PackagesListConfig&&) = default;
+
+bool PackagesListConfig::operator==(const PackagesListConfig& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && package_name_filter_ == other.package_name_filter_;
+}
+
+bool PackagesListConfig::ParseFromArray(const void* raw, size_t size) {
+  package_name_filter_.clear();
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* package_name_filter */:
+        package_name_filter_.emplace_back();
+        field.get(&package_name_filter_.back());
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string PackagesListConfig::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> PackagesListConfig::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void PackagesListConfig::Serialize(::protozero::Message* msg) const {
+  // Field 1: package_name_filter
+  for (auto& it : package_name_filter_) {
+    msg->AppendString(1, it);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/config/ftrace/ftrace_config.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/config/ftrace/ftrace_config.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_FTRACE_FTRACE_CONFIG_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_FTRACE_FTRACE_CONFIG_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class FtraceConfig;
+class FtraceConfig_CompactSchedConfig;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT FtraceConfig : public ::protozero::CppMessageObj {
+ public:
+  using CompactSchedConfig = FtraceConfig_CompactSchedConfig;
+  enum FieldNumbers {
+    kFtraceEventsFieldNumber = 1,
+    kAtraceCategoriesFieldNumber = 2,
+    kAtraceAppsFieldNumber = 3,
+    kBufferSizeKbFieldNumber = 10,
+    kDrainPeriodMsFieldNumber = 11,
+    kCompactSchedFieldNumber = 12,
+    kSymbolizeKsymsFieldNumber = 13,
+    kInitializeKsymsSynchronouslyForTestingFieldNumber = 14,
+  };
+
+  FtraceConfig();
+  ~FtraceConfig() override;
+  FtraceConfig(FtraceConfig&&) noexcept;
+  FtraceConfig& operator=(FtraceConfig&&);
+  FtraceConfig(const FtraceConfig&);
+  FtraceConfig& operator=(const FtraceConfig&);
+  bool operator==(const FtraceConfig&) const;
+  bool operator!=(const FtraceConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  const std::vector<std::string>& ftrace_events() const { return ftrace_events_; }
+  std::vector<std::string>* mutable_ftrace_events() { return &ftrace_events_; }
+  int ftrace_events_size() const { return static_cast<int>(ftrace_events_.size()); }
+  void clear_ftrace_events() { ftrace_events_.clear(); }
+  void add_ftrace_events(std::string value) { ftrace_events_.emplace_back(value); }
+  std::string* add_ftrace_events() { ftrace_events_.emplace_back(); return &ftrace_events_.back(); }
+
+  const std::vector<std::string>& atrace_categories() const { return atrace_categories_; }
+  std::vector<std::string>* mutable_atrace_categories() { return &atrace_categories_; }
+  int atrace_categories_size() const { return static_cast<int>(atrace_categories_.size()); }
+  void clear_atrace_categories() { atrace_categories_.clear(); }
+  void add_atrace_categories(std::string value) { atrace_categories_.emplace_back(value); }
+  std::string* add_atrace_categories() { atrace_categories_.emplace_back(); return &atrace_categories_.back(); }
+
+  const std::vector<std::string>& atrace_apps() const { return atrace_apps_; }
+  std::vector<std::string>* mutable_atrace_apps() { return &atrace_apps_; }
+  int atrace_apps_size() const { return static_cast<int>(atrace_apps_.size()); }
+  void clear_atrace_apps() { atrace_apps_.clear(); }
+  void add_atrace_apps(std::string value) { atrace_apps_.emplace_back(value); }
+  std::string* add_atrace_apps() { atrace_apps_.emplace_back(); return &atrace_apps_.back(); }
+
+  bool has_buffer_size_kb() const { return _has_field_[10]; }
+  uint32_t buffer_size_kb() const { return buffer_size_kb_; }
+  void set_buffer_size_kb(uint32_t value) { buffer_size_kb_ = value; _has_field_.set(10); }
+
+  bool has_drain_period_ms() const { return _has_field_[11]; }
+  uint32_t drain_period_ms() const { return drain_period_ms_; }
+  void set_drain_period_ms(uint32_t value) { drain_period_ms_ = value; _has_field_.set(11); }
+
+  bool has_compact_sched() const { return _has_field_[12]; }
+  const FtraceConfig_CompactSchedConfig& compact_sched() const { return *compact_sched_; }
+  FtraceConfig_CompactSchedConfig* mutable_compact_sched() { _has_field_.set(12); return compact_sched_.get(); }
+
+  bool has_symbolize_ksyms() const { return _has_field_[13]; }
+  bool symbolize_ksyms() const { return symbolize_ksyms_; }
+  void set_symbolize_ksyms(bool value) { symbolize_ksyms_ = value; _has_field_.set(13); }
+
+  bool has_initialize_ksyms_synchronously_for_testing() const { return _has_field_[14]; }
+  bool initialize_ksyms_synchronously_for_testing() const { return initialize_ksyms_synchronously_for_testing_; }
+  void set_initialize_ksyms_synchronously_for_testing(bool value) { initialize_ksyms_synchronously_for_testing_ = value; _has_field_.set(14); }
+
+ private:
+  std::vector<std::string> ftrace_events_;
+  std::vector<std::string> atrace_categories_;
+  std::vector<std::string> atrace_apps_;
+  uint32_t buffer_size_kb_{};
+  uint32_t drain_period_ms_{};
+  ::protozero::CopyablePtr<FtraceConfig_CompactSchedConfig> compact_sched_;
+  bool symbolize_ksyms_{};
+  bool initialize_ksyms_synchronously_for_testing_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<15> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT FtraceConfig_CompactSchedConfig : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kEnabledFieldNumber = 1,
+  };
+
+  FtraceConfig_CompactSchedConfig();
+  ~FtraceConfig_CompactSchedConfig() override;
+  FtraceConfig_CompactSchedConfig(FtraceConfig_CompactSchedConfig&&) noexcept;
+  FtraceConfig_CompactSchedConfig& operator=(FtraceConfig_CompactSchedConfig&&);
+  FtraceConfig_CompactSchedConfig(const FtraceConfig_CompactSchedConfig&);
+  FtraceConfig_CompactSchedConfig& operator=(const FtraceConfig_CompactSchedConfig&);
+  bool operator==(const FtraceConfig_CompactSchedConfig&) const;
+  bool operator!=(const FtraceConfig_CompactSchedConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_enabled() const { return _has_field_[1]; }
+  bool enabled() const { return enabled_; }
+  void set_enabled(bool value) { enabled_ = value; _has_field_.set(1); }
+
+ private:
+  bool enabled_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_FTRACE_FTRACE_CONFIG_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/config/ftrace/ftrace_config.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+FtraceConfig::FtraceConfig() = default;
+FtraceConfig::~FtraceConfig() = default;
+FtraceConfig::FtraceConfig(const FtraceConfig&) = default;
+FtraceConfig& FtraceConfig::operator=(const FtraceConfig&) = default;
+FtraceConfig::FtraceConfig(FtraceConfig&&) noexcept = default;
+FtraceConfig& FtraceConfig::operator=(FtraceConfig&&) = default;
+
+bool FtraceConfig::operator==(const FtraceConfig& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && ftrace_events_ == other.ftrace_events_
+   && atrace_categories_ == other.atrace_categories_
+   && atrace_apps_ == other.atrace_apps_
+   && buffer_size_kb_ == other.buffer_size_kb_
+   && drain_period_ms_ == other.drain_period_ms_
+   && compact_sched_ == other.compact_sched_
+   && symbolize_ksyms_ == other.symbolize_ksyms_
+   && initialize_ksyms_synchronously_for_testing_ == other.initialize_ksyms_synchronously_for_testing_;
+}
+
+bool FtraceConfig::ParseFromArray(const void* raw, size_t size) {
+  ftrace_events_.clear();
+  atrace_categories_.clear();
+  atrace_apps_.clear();
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* ftrace_events */:
+        ftrace_events_.emplace_back();
+        field.get(&ftrace_events_.back());
+        break;
+      case 2 /* atrace_categories */:
+        atrace_categories_.emplace_back();
+        field.get(&atrace_categories_.back());
+        break;
+      case 3 /* atrace_apps */:
+        atrace_apps_.emplace_back();
+        field.get(&atrace_apps_.back());
+        break;
+      case 10 /* buffer_size_kb */:
+        field.get(&buffer_size_kb_);
+        break;
+      case 11 /* drain_period_ms */:
+        field.get(&drain_period_ms_);
+        break;
+      case 12 /* compact_sched */:
+        (*compact_sched_).ParseFromArray(field.data(), field.size());
+        break;
+      case 13 /* symbolize_ksyms */:
+        field.get(&symbolize_ksyms_);
+        break;
+      case 14 /* initialize_ksyms_synchronously_for_testing */:
+        field.get(&initialize_ksyms_synchronously_for_testing_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string FtraceConfig::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> FtraceConfig::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void FtraceConfig::Serialize(::protozero::Message* msg) const {
+  // Field 1: ftrace_events
+  for (auto& it : ftrace_events_) {
+    msg->AppendString(1, it);
+  }
+
+  // Field 2: atrace_categories
+  for (auto& it : atrace_categories_) {
+    msg->AppendString(2, it);
+  }
+
+  // Field 3: atrace_apps
+  for (auto& it : atrace_apps_) {
+    msg->AppendString(3, it);
+  }
+
+  // Field 10: buffer_size_kb
+  if (_has_field_[10]) {
+    msg->AppendVarInt(10, buffer_size_kb_);
+  }
+
+  // Field 11: drain_period_ms
+  if (_has_field_[11]) {
+    msg->AppendVarInt(11, drain_period_ms_);
+  }
+
+  // Field 12: compact_sched
+  if (_has_field_[12]) {
+    (*compact_sched_).Serialize(msg->BeginNestedMessage<::protozero::Message>(12));
+  }
+
+  // Field 13: symbolize_ksyms
+  if (_has_field_[13]) {
+    msg->AppendTinyVarInt(13, symbolize_ksyms_);
+  }
+
+  // Field 14: initialize_ksyms_synchronously_for_testing
+  if (_has_field_[14]) {
+    msg->AppendTinyVarInt(14, initialize_ksyms_synchronously_for_testing_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+FtraceConfig_CompactSchedConfig::FtraceConfig_CompactSchedConfig() = default;
+FtraceConfig_CompactSchedConfig::~FtraceConfig_CompactSchedConfig() = default;
+FtraceConfig_CompactSchedConfig::FtraceConfig_CompactSchedConfig(const FtraceConfig_CompactSchedConfig&) = default;
+FtraceConfig_CompactSchedConfig& FtraceConfig_CompactSchedConfig::operator=(const FtraceConfig_CompactSchedConfig&) = default;
+FtraceConfig_CompactSchedConfig::FtraceConfig_CompactSchedConfig(FtraceConfig_CompactSchedConfig&&) noexcept = default;
+FtraceConfig_CompactSchedConfig& FtraceConfig_CompactSchedConfig::operator=(FtraceConfig_CompactSchedConfig&&) = default;
+
+bool FtraceConfig_CompactSchedConfig::operator==(const FtraceConfig_CompactSchedConfig& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && enabled_ == other.enabled_;
+}
+
+bool FtraceConfig_CompactSchedConfig::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* enabled */:
+        field.get(&enabled_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string FtraceConfig_CompactSchedConfig::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> FtraceConfig_CompactSchedConfig::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void FtraceConfig_CompactSchedConfig::Serialize(::protozero::Message* msg) const {
+  // Field 1: enabled
+  if (_has_field_[1]) {
+    msg->AppendTinyVarInt(1, enabled_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/config/gpu/gpu_counter_config.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/config/gpu/gpu_counter_config.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_GPU_GPU_COUNTER_CONFIG_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_GPU_GPU_COUNTER_CONFIG_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class GpuCounterConfig;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT GpuCounterConfig : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kCounterPeriodNsFieldNumber = 1,
+    kCounterIdsFieldNumber = 2,
+    kInstrumentedSamplingFieldNumber = 3,
+    kFixGpuClockFieldNumber = 4,
+  };
+
+  GpuCounterConfig();
+  ~GpuCounterConfig() override;
+  GpuCounterConfig(GpuCounterConfig&&) noexcept;
+  GpuCounterConfig& operator=(GpuCounterConfig&&);
+  GpuCounterConfig(const GpuCounterConfig&);
+  GpuCounterConfig& operator=(const GpuCounterConfig&);
+  bool operator==(const GpuCounterConfig&) const;
+  bool operator!=(const GpuCounterConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_counter_period_ns() const { return _has_field_[1]; }
+  uint64_t counter_period_ns() const { return counter_period_ns_; }
+  void set_counter_period_ns(uint64_t value) { counter_period_ns_ = value; _has_field_.set(1); }
+
+  const std::vector<uint32_t>& counter_ids() const { return counter_ids_; }
+  std::vector<uint32_t>* mutable_counter_ids() { return &counter_ids_; }
+  int counter_ids_size() const { return static_cast<int>(counter_ids_.size()); }
+  void clear_counter_ids() { counter_ids_.clear(); }
+  void add_counter_ids(uint32_t value) { counter_ids_.emplace_back(value); }
+  uint32_t* add_counter_ids() { counter_ids_.emplace_back(); return &counter_ids_.back(); }
+
+  bool has_instrumented_sampling() const { return _has_field_[3]; }
+  bool instrumented_sampling() const { return instrumented_sampling_; }
+  void set_instrumented_sampling(bool value) { instrumented_sampling_ = value; _has_field_.set(3); }
+
+  bool has_fix_gpu_clock() const { return _has_field_[4]; }
+  bool fix_gpu_clock() const { return fix_gpu_clock_; }
+  void set_fix_gpu_clock(bool value) { fix_gpu_clock_ = value; _has_field_.set(4); }
+
+ private:
+  uint64_t counter_period_ns_{};
+  std::vector<uint32_t> counter_ids_;
+  bool instrumented_sampling_{};
+  bool fix_gpu_clock_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<5> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_GPU_GPU_COUNTER_CONFIG_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/config/gpu/gpu_counter_config.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+GpuCounterConfig::GpuCounterConfig() = default;
+GpuCounterConfig::~GpuCounterConfig() = default;
+GpuCounterConfig::GpuCounterConfig(const GpuCounterConfig&) = default;
+GpuCounterConfig& GpuCounterConfig::operator=(const GpuCounterConfig&) = default;
+GpuCounterConfig::GpuCounterConfig(GpuCounterConfig&&) noexcept = default;
+GpuCounterConfig& GpuCounterConfig::operator=(GpuCounterConfig&&) = default;
+
+bool GpuCounterConfig::operator==(const GpuCounterConfig& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && counter_period_ns_ == other.counter_period_ns_
+   && counter_ids_ == other.counter_ids_
+   && instrumented_sampling_ == other.instrumented_sampling_
+   && fix_gpu_clock_ == other.fix_gpu_clock_;
+}
+
+bool GpuCounterConfig::ParseFromArray(const void* raw, size_t size) {
+  counter_ids_.clear();
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* counter_period_ns */:
+        field.get(&counter_period_ns_);
+        break;
+      case 2 /* counter_ids */:
+        counter_ids_.emplace_back();
+        field.get(&counter_ids_.back());
+        break;
+      case 3 /* instrumented_sampling */:
+        field.get(&instrumented_sampling_);
+        break;
+      case 4 /* fix_gpu_clock */:
+        field.get(&fix_gpu_clock_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string GpuCounterConfig::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> GpuCounterConfig::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void GpuCounterConfig::Serialize(::protozero::Message* msg) const {
+  // Field 1: counter_period_ns
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, counter_period_ns_);
+  }
+
+  // Field 2: counter_ids
+  for (auto& it : counter_ids_) {
+    msg->AppendVarInt(2, it);
+  }
+
+  // Field 3: instrumented_sampling
+  if (_has_field_[3]) {
+    msg->AppendTinyVarInt(3, instrumented_sampling_);
+  }
+
+  // Field 4: fix_gpu_clock
+  if (_has_field_[4]) {
+    msg->AppendTinyVarInt(4, fix_gpu_clock_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/config/gpu/vulkan_memory_config.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/config/gpu/vulkan_memory_config.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_GPU_VULKAN_MEMORY_CONFIG_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_GPU_VULKAN_MEMORY_CONFIG_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class VulkanMemoryConfig;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT VulkanMemoryConfig : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kTrackDriverMemoryUsageFieldNumber = 1,
+    kTrackDeviceMemoryUsageFieldNumber = 2,
+  };
+
+  VulkanMemoryConfig();
+  ~VulkanMemoryConfig() override;
+  VulkanMemoryConfig(VulkanMemoryConfig&&) noexcept;
+  VulkanMemoryConfig& operator=(VulkanMemoryConfig&&);
+  VulkanMemoryConfig(const VulkanMemoryConfig&);
+  VulkanMemoryConfig& operator=(const VulkanMemoryConfig&);
+  bool operator==(const VulkanMemoryConfig&) const;
+  bool operator!=(const VulkanMemoryConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_track_driver_memory_usage() const { return _has_field_[1]; }
+  bool track_driver_memory_usage() const { return track_driver_memory_usage_; }
+  void set_track_driver_memory_usage(bool value) { track_driver_memory_usage_ = value; _has_field_.set(1); }
+
+  bool has_track_device_memory_usage() const { return _has_field_[2]; }
+  bool track_device_memory_usage() const { return track_device_memory_usage_; }
+  void set_track_device_memory_usage(bool value) { track_device_memory_usage_ = value; _has_field_.set(2); }
+
+ private:
+  bool track_driver_memory_usage_{};
+  bool track_device_memory_usage_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_GPU_VULKAN_MEMORY_CONFIG_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/config/gpu/vulkan_memory_config.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+VulkanMemoryConfig::VulkanMemoryConfig() = default;
+VulkanMemoryConfig::~VulkanMemoryConfig() = default;
+VulkanMemoryConfig::VulkanMemoryConfig(const VulkanMemoryConfig&) = default;
+VulkanMemoryConfig& VulkanMemoryConfig::operator=(const VulkanMemoryConfig&) = default;
+VulkanMemoryConfig::VulkanMemoryConfig(VulkanMemoryConfig&&) noexcept = default;
+VulkanMemoryConfig& VulkanMemoryConfig::operator=(VulkanMemoryConfig&&) = default;
+
+bool VulkanMemoryConfig::operator==(const VulkanMemoryConfig& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && track_driver_memory_usage_ == other.track_driver_memory_usage_
+   && track_device_memory_usage_ == other.track_device_memory_usage_;
+}
+
+bool VulkanMemoryConfig::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* track_driver_memory_usage */:
+        field.get(&track_driver_memory_usage_);
+        break;
+      case 2 /* track_device_memory_usage */:
+        field.get(&track_device_memory_usage_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string VulkanMemoryConfig::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> VulkanMemoryConfig::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void VulkanMemoryConfig::Serialize(::protozero::Message* msg) const {
+  // Field 1: track_driver_memory_usage
+  if (_has_field_[1]) {
+    msg->AppendTinyVarInt(1, track_driver_memory_usage_);
+  }
+
+  // Field 2: track_device_memory_usage
+  if (_has_field_[2]) {
+    msg->AppendTinyVarInt(2, track_device_memory_usage_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/config/inode_file/inode_file_config.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/config/inode_file/inode_file_config.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_INODE_FILE_INODE_FILE_CONFIG_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_INODE_FILE_INODE_FILE_CONFIG_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class InodeFileConfig;
+class InodeFileConfig_MountPointMappingEntry;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT InodeFileConfig : public ::protozero::CppMessageObj {
+ public:
+  using MountPointMappingEntry = InodeFileConfig_MountPointMappingEntry;
+  enum FieldNumbers {
+    kScanIntervalMsFieldNumber = 1,
+    kScanDelayMsFieldNumber = 2,
+    kScanBatchSizeFieldNumber = 3,
+    kDoNotScanFieldNumber = 4,
+    kScanMountPointsFieldNumber = 5,
+    kMountPointMappingFieldNumber = 6,
+  };
+
+  InodeFileConfig();
+  ~InodeFileConfig() override;
+  InodeFileConfig(InodeFileConfig&&) noexcept;
+  InodeFileConfig& operator=(InodeFileConfig&&);
+  InodeFileConfig(const InodeFileConfig&);
+  InodeFileConfig& operator=(const InodeFileConfig&);
+  bool operator==(const InodeFileConfig&) const;
+  bool operator!=(const InodeFileConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_scan_interval_ms() const { return _has_field_[1]; }
+  uint32_t scan_interval_ms() const { return scan_interval_ms_; }
+  void set_scan_interval_ms(uint32_t value) { scan_interval_ms_ = value; _has_field_.set(1); }
+
+  bool has_scan_delay_ms() const { return _has_field_[2]; }
+  uint32_t scan_delay_ms() const { return scan_delay_ms_; }
+  void set_scan_delay_ms(uint32_t value) { scan_delay_ms_ = value; _has_field_.set(2); }
+
+  bool has_scan_batch_size() const { return _has_field_[3]; }
+  uint32_t scan_batch_size() const { return scan_batch_size_; }
+  void set_scan_batch_size(uint32_t value) { scan_batch_size_ = value; _has_field_.set(3); }
+
+  bool has_do_not_scan() const { return _has_field_[4]; }
+  bool do_not_scan() const { return do_not_scan_; }
+  void set_do_not_scan(bool value) { do_not_scan_ = value; _has_field_.set(4); }
+
+  const std::vector<std::string>& scan_mount_points() const { return scan_mount_points_; }
+  std::vector<std::string>* mutable_scan_mount_points() { return &scan_mount_points_; }
+  int scan_mount_points_size() const { return static_cast<int>(scan_mount_points_.size()); }
+  void clear_scan_mount_points() { scan_mount_points_.clear(); }
+  void add_scan_mount_points(std::string value) { scan_mount_points_.emplace_back(value); }
+  std::string* add_scan_mount_points() { scan_mount_points_.emplace_back(); return &scan_mount_points_.back(); }
+
+  const std::vector<InodeFileConfig_MountPointMappingEntry>& mount_point_mapping() const { return mount_point_mapping_; }
+  std::vector<InodeFileConfig_MountPointMappingEntry>* mutable_mount_point_mapping() { return &mount_point_mapping_; }
+  int mount_point_mapping_size() const;
+  void clear_mount_point_mapping();
+  InodeFileConfig_MountPointMappingEntry* add_mount_point_mapping();
+
+ private:
+  uint32_t scan_interval_ms_{};
+  uint32_t scan_delay_ms_{};
+  uint32_t scan_batch_size_{};
+  bool do_not_scan_{};
+  std::vector<std::string> scan_mount_points_;
+  std::vector<InodeFileConfig_MountPointMappingEntry> mount_point_mapping_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<7> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT InodeFileConfig_MountPointMappingEntry : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kMountpointFieldNumber = 1,
+    kScanRootsFieldNumber = 2,
+  };
+
+  InodeFileConfig_MountPointMappingEntry();
+  ~InodeFileConfig_MountPointMappingEntry() override;
+  InodeFileConfig_MountPointMappingEntry(InodeFileConfig_MountPointMappingEntry&&) noexcept;
+  InodeFileConfig_MountPointMappingEntry& operator=(InodeFileConfig_MountPointMappingEntry&&);
+  InodeFileConfig_MountPointMappingEntry(const InodeFileConfig_MountPointMappingEntry&);
+  InodeFileConfig_MountPointMappingEntry& operator=(const InodeFileConfig_MountPointMappingEntry&);
+  bool operator==(const InodeFileConfig_MountPointMappingEntry&) const;
+  bool operator!=(const InodeFileConfig_MountPointMappingEntry& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_mountpoint() const { return _has_field_[1]; }
+  const std::string& mountpoint() const { return mountpoint_; }
+  void set_mountpoint(const std::string& value) { mountpoint_ = value; _has_field_.set(1); }
+
+  const std::vector<std::string>& scan_roots() const { return scan_roots_; }
+  std::vector<std::string>* mutable_scan_roots() { return &scan_roots_; }
+  int scan_roots_size() const { return static_cast<int>(scan_roots_.size()); }
+  void clear_scan_roots() { scan_roots_.clear(); }
+  void add_scan_roots(std::string value) { scan_roots_.emplace_back(value); }
+  std::string* add_scan_roots() { scan_roots_.emplace_back(); return &scan_roots_.back(); }
+
+ private:
+  std::string mountpoint_{};
+  std::vector<std::string> scan_roots_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_INODE_FILE_INODE_FILE_CONFIG_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/config/inode_file/inode_file_config.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+InodeFileConfig::InodeFileConfig() = default;
+InodeFileConfig::~InodeFileConfig() = default;
+InodeFileConfig::InodeFileConfig(const InodeFileConfig&) = default;
+InodeFileConfig& InodeFileConfig::operator=(const InodeFileConfig&) = default;
+InodeFileConfig::InodeFileConfig(InodeFileConfig&&) noexcept = default;
+InodeFileConfig& InodeFileConfig::operator=(InodeFileConfig&&) = default;
+
+bool InodeFileConfig::operator==(const InodeFileConfig& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && scan_interval_ms_ == other.scan_interval_ms_
+   && scan_delay_ms_ == other.scan_delay_ms_
+   && scan_batch_size_ == other.scan_batch_size_
+   && do_not_scan_ == other.do_not_scan_
+   && scan_mount_points_ == other.scan_mount_points_
+   && mount_point_mapping_ == other.mount_point_mapping_;
+}
+
+int InodeFileConfig::mount_point_mapping_size() const { return static_cast<int>(mount_point_mapping_.size()); }
+void InodeFileConfig::clear_mount_point_mapping() { mount_point_mapping_.clear(); }
+InodeFileConfig_MountPointMappingEntry* InodeFileConfig::add_mount_point_mapping() { mount_point_mapping_.emplace_back(); return &mount_point_mapping_.back(); }
+bool InodeFileConfig::ParseFromArray(const void* raw, size_t size) {
+  scan_mount_points_.clear();
+  mount_point_mapping_.clear();
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* scan_interval_ms */:
+        field.get(&scan_interval_ms_);
+        break;
+      case 2 /* scan_delay_ms */:
+        field.get(&scan_delay_ms_);
+        break;
+      case 3 /* scan_batch_size */:
+        field.get(&scan_batch_size_);
+        break;
+      case 4 /* do_not_scan */:
+        field.get(&do_not_scan_);
+        break;
+      case 5 /* scan_mount_points */:
+        scan_mount_points_.emplace_back();
+        field.get(&scan_mount_points_.back());
+        break;
+      case 6 /* mount_point_mapping */:
+        mount_point_mapping_.emplace_back();
+        mount_point_mapping_.back().ParseFromArray(field.data(), field.size());
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string InodeFileConfig::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> InodeFileConfig::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void InodeFileConfig::Serialize(::protozero::Message* msg) const {
+  // Field 1: scan_interval_ms
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, scan_interval_ms_);
+  }
+
+  // Field 2: scan_delay_ms
+  if (_has_field_[2]) {
+    msg->AppendVarInt(2, scan_delay_ms_);
+  }
+
+  // Field 3: scan_batch_size
+  if (_has_field_[3]) {
+    msg->AppendVarInt(3, scan_batch_size_);
+  }
+
+  // Field 4: do_not_scan
+  if (_has_field_[4]) {
+    msg->AppendTinyVarInt(4, do_not_scan_);
+  }
+
+  // Field 5: scan_mount_points
+  for (auto& it : scan_mount_points_) {
+    msg->AppendString(5, it);
+  }
+
+  // Field 6: mount_point_mapping
+  for (auto& it : mount_point_mapping_) {
+    it.Serialize(msg->BeginNestedMessage<::protozero::Message>(6));
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+InodeFileConfig_MountPointMappingEntry::InodeFileConfig_MountPointMappingEntry() = default;
+InodeFileConfig_MountPointMappingEntry::~InodeFileConfig_MountPointMappingEntry() = default;
+InodeFileConfig_MountPointMappingEntry::InodeFileConfig_MountPointMappingEntry(const InodeFileConfig_MountPointMappingEntry&) = default;
+InodeFileConfig_MountPointMappingEntry& InodeFileConfig_MountPointMappingEntry::operator=(const InodeFileConfig_MountPointMappingEntry&) = default;
+InodeFileConfig_MountPointMappingEntry::InodeFileConfig_MountPointMappingEntry(InodeFileConfig_MountPointMappingEntry&&) noexcept = default;
+InodeFileConfig_MountPointMappingEntry& InodeFileConfig_MountPointMappingEntry::operator=(InodeFileConfig_MountPointMappingEntry&&) = default;
+
+bool InodeFileConfig_MountPointMappingEntry::operator==(const InodeFileConfig_MountPointMappingEntry& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && mountpoint_ == other.mountpoint_
+   && scan_roots_ == other.scan_roots_;
+}
+
+bool InodeFileConfig_MountPointMappingEntry::ParseFromArray(const void* raw, size_t size) {
+  scan_roots_.clear();
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* mountpoint */:
+        field.get(&mountpoint_);
+        break;
+      case 2 /* scan_roots */:
+        scan_roots_.emplace_back();
+        field.get(&scan_roots_.back());
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string InodeFileConfig_MountPointMappingEntry::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> InodeFileConfig_MountPointMappingEntry::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void InodeFileConfig_MountPointMappingEntry::Serialize(::protozero::Message* msg) const {
+  // Field 1: mountpoint
+  if (_has_field_[1]) {
+    msg->AppendString(1, mountpoint_);
+  }
+
+  // Field 2: scan_roots
+  for (auto& it : scan_roots_) {
+    msg->AppendString(2, it);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/config/interceptors/console_config.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/config/interceptors/console_config.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_INTERCEPTORS_CONSOLE_CONFIG_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_INTERCEPTORS_CONSOLE_CONFIG_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class ConsoleConfig;
+enum ConsoleConfig_Output : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum ConsoleConfig_Output : int {
+  ConsoleConfig_Output_OUTPUT_UNSPECIFIED = 0,
+  ConsoleConfig_Output_OUTPUT_STDOUT = 1,
+  ConsoleConfig_Output_OUTPUT_STDERR = 2,
+};
+
+class PERFETTO_EXPORT ConsoleConfig : public ::protozero::CppMessageObj {
+ public:
+  using Output = ConsoleConfig_Output;
+  static constexpr auto OUTPUT_UNSPECIFIED = ConsoleConfig_Output_OUTPUT_UNSPECIFIED;
+  static constexpr auto OUTPUT_STDOUT = ConsoleConfig_Output_OUTPUT_STDOUT;
+  static constexpr auto OUTPUT_STDERR = ConsoleConfig_Output_OUTPUT_STDERR;
+  static constexpr auto Output_MIN = ConsoleConfig_Output_OUTPUT_UNSPECIFIED;
+  static constexpr auto Output_MAX = ConsoleConfig_Output_OUTPUT_STDERR;
+  enum FieldNumbers {
+    kOutputFieldNumber = 1,
+    kEnableColorsFieldNumber = 2,
+  };
+
+  ConsoleConfig();
+  ~ConsoleConfig() override;
+  ConsoleConfig(ConsoleConfig&&) noexcept;
+  ConsoleConfig& operator=(ConsoleConfig&&);
+  ConsoleConfig(const ConsoleConfig&);
+  ConsoleConfig& operator=(const ConsoleConfig&);
+  bool operator==(const ConsoleConfig&) const;
+  bool operator!=(const ConsoleConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_output() const { return _has_field_[1]; }
+  ConsoleConfig_Output output() const { return output_; }
+  void set_output(ConsoleConfig_Output value) { output_ = value; _has_field_.set(1); }
+
+  bool has_enable_colors() const { return _has_field_[2]; }
+  bool enable_colors() const { return enable_colors_; }
+  void set_enable_colors(bool value) { enable_colors_ = value; _has_field_.set(2); }
+
+ private:
+  ConsoleConfig_Output output_{};
+  bool enable_colors_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_INTERCEPTORS_CONSOLE_CONFIG_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/config/interceptors/console_config.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+ConsoleConfig::ConsoleConfig() = default;
+ConsoleConfig::~ConsoleConfig() = default;
+ConsoleConfig::ConsoleConfig(const ConsoleConfig&) = default;
+ConsoleConfig& ConsoleConfig::operator=(const ConsoleConfig&) = default;
+ConsoleConfig::ConsoleConfig(ConsoleConfig&&) noexcept = default;
+ConsoleConfig& ConsoleConfig::operator=(ConsoleConfig&&) = default;
+
+bool ConsoleConfig::operator==(const ConsoleConfig& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && output_ == other.output_
+   && enable_colors_ == other.enable_colors_;
+}
+
+bool ConsoleConfig::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* output */:
+        field.get(&output_);
+        break;
+      case 2 /* enable_colors */:
+        field.get(&enable_colors_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string ConsoleConfig::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> ConsoleConfig::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void ConsoleConfig::Serialize(::protozero::Message* msg) const {
+  // Field 1: output
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, output_);
+  }
+
+  // Field 2: enable_colors
+  if (_has_field_[2]) {
+    msg->AppendTinyVarInt(2, enable_colors_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/config/power/android_power_config.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/config/power/android_power_config.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_POWER_ANDROID_POWER_CONFIG_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_POWER_ANDROID_POWER_CONFIG_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class AndroidPowerConfig;
+enum AndroidPowerConfig_BatteryCounters : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum AndroidPowerConfig_BatteryCounters : int {
+  AndroidPowerConfig_BatteryCounters_BATTERY_COUNTER_UNSPECIFIED = 0,
+  AndroidPowerConfig_BatteryCounters_BATTERY_COUNTER_CHARGE = 1,
+  AndroidPowerConfig_BatteryCounters_BATTERY_COUNTER_CAPACITY_PERCENT = 2,
+  AndroidPowerConfig_BatteryCounters_BATTERY_COUNTER_CURRENT = 3,
+  AndroidPowerConfig_BatteryCounters_BATTERY_COUNTER_CURRENT_AVG = 4,
+};
+
+class PERFETTO_EXPORT AndroidPowerConfig : public ::protozero::CppMessageObj {
+ public:
+  using BatteryCounters = AndroidPowerConfig_BatteryCounters;
+  static constexpr auto BATTERY_COUNTER_UNSPECIFIED = AndroidPowerConfig_BatteryCounters_BATTERY_COUNTER_UNSPECIFIED;
+  static constexpr auto BATTERY_COUNTER_CHARGE = AndroidPowerConfig_BatteryCounters_BATTERY_COUNTER_CHARGE;
+  static constexpr auto BATTERY_COUNTER_CAPACITY_PERCENT = AndroidPowerConfig_BatteryCounters_BATTERY_COUNTER_CAPACITY_PERCENT;
+  static constexpr auto BATTERY_COUNTER_CURRENT = AndroidPowerConfig_BatteryCounters_BATTERY_COUNTER_CURRENT;
+  static constexpr auto BATTERY_COUNTER_CURRENT_AVG = AndroidPowerConfig_BatteryCounters_BATTERY_COUNTER_CURRENT_AVG;
+  static constexpr auto BatteryCounters_MIN = AndroidPowerConfig_BatteryCounters_BATTERY_COUNTER_UNSPECIFIED;
+  static constexpr auto BatteryCounters_MAX = AndroidPowerConfig_BatteryCounters_BATTERY_COUNTER_CURRENT_AVG;
+  enum FieldNumbers {
+    kBatteryPollMsFieldNumber = 1,
+    kBatteryCountersFieldNumber = 2,
+    kCollectPowerRailsFieldNumber = 3,
+    kCollectEnergyEstimationBreakdownFieldNumber = 4,
+  };
+
+  AndroidPowerConfig();
+  ~AndroidPowerConfig() override;
+  AndroidPowerConfig(AndroidPowerConfig&&) noexcept;
+  AndroidPowerConfig& operator=(AndroidPowerConfig&&);
+  AndroidPowerConfig(const AndroidPowerConfig&);
+  AndroidPowerConfig& operator=(const AndroidPowerConfig&);
+  bool operator==(const AndroidPowerConfig&) const;
+  bool operator!=(const AndroidPowerConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_battery_poll_ms() const { return _has_field_[1]; }
+  uint32_t battery_poll_ms() const { return battery_poll_ms_; }
+  void set_battery_poll_ms(uint32_t value) { battery_poll_ms_ = value; _has_field_.set(1); }
+
+  const std::vector<AndroidPowerConfig_BatteryCounters>& battery_counters() const { return battery_counters_; }
+  std::vector<AndroidPowerConfig_BatteryCounters>* mutable_battery_counters() { return &battery_counters_; }
+  int battery_counters_size() const { return static_cast<int>(battery_counters_.size()); }
+  void clear_battery_counters() { battery_counters_.clear(); }
+  void add_battery_counters(AndroidPowerConfig_BatteryCounters value) { battery_counters_.emplace_back(value); }
+  AndroidPowerConfig_BatteryCounters* add_battery_counters() { battery_counters_.emplace_back(); return &battery_counters_.back(); }
+
+  bool has_collect_power_rails() const { return _has_field_[3]; }
+  bool collect_power_rails() const { return collect_power_rails_; }
+  void set_collect_power_rails(bool value) { collect_power_rails_ = value; _has_field_.set(3); }
+
+  bool has_collect_energy_estimation_breakdown() const { return _has_field_[4]; }
+  bool collect_energy_estimation_breakdown() const { return collect_energy_estimation_breakdown_; }
+  void set_collect_energy_estimation_breakdown(bool value) { collect_energy_estimation_breakdown_ = value; _has_field_.set(4); }
+
+ private:
+  uint32_t battery_poll_ms_{};
+  std::vector<AndroidPowerConfig_BatteryCounters> battery_counters_;
+  bool collect_power_rails_{};
+  bool collect_energy_estimation_breakdown_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<5> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_POWER_ANDROID_POWER_CONFIG_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/config/power/android_power_config.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+AndroidPowerConfig::AndroidPowerConfig() = default;
+AndroidPowerConfig::~AndroidPowerConfig() = default;
+AndroidPowerConfig::AndroidPowerConfig(const AndroidPowerConfig&) = default;
+AndroidPowerConfig& AndroidPowerConfig::operator=(const AndroidPowerConfig&) = default;
+AndroidPowerConfig::AndroidPowerConfig(AndroidPowerConfig&&) noexcept = default;
+AndroidPowerConfig& AndroidPowerConfig::operator=(AndroidPowerConfig&&) = default;
+
+bool AndroidPowerConfig::operator==(const AndroidPowerConfig& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && battery_poll_ms_ == other.battery_poll_ms_
+   && battery_counters_ == other.battery_counters_
+   && collect_power_rails_ == other.collect_power_rails_
+   && collect_energy_estimation_breakdown_ == other.collect_energy_estimation_breakdown_;
+}
+
+bool AndroidPowerConfig::ParseFromArray(const void* raw, size_t size) {
+  battery_counters_.clear();
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* battery_poll_ms */:
+        field.get(&battery_poll_ms_);
+        break;
+      case 2 /* battery_counters */:
+        battery_counters_.emplace_back();
+        field.get(&battery_counters_.back());
+        break;
+      case 3 /* collect_power_rails */:
+        field.get(&collect_power_rails_);
+        break;
+      case 4 /* collect_energy_estimation_breakdown */:
+        field.get(&collect_energy_estimation_breakdown_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string AndroidPowerConfig::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> AndroidPowerConfig::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void AndroidPowerConfig::Serialize(::protozero::Message* msg) const {
+  // Field 1: battery_poll_ms
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, battery_poll_ms_);
+  }
+
+  // Field 2: battery_counters
+  for (auto& it : battery_counters_) {
+    msg->AppendVarInt(2, it);
+  }
+
+  // Field 3: collect_power_rails
+  if (_has_field_[3]) {
+    msg->AppendTinyVarInt(3, collect_power_rails_);
+  }
+
+  // Field 4: collect_energy_estimation_breakdown
+  if (_has_field_[4]) {
+    msg->AppendTinyVarInt(4, collect_energy_estimation_breakdown_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/config/process_stats/process_stats_config.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/config/process_stats/process_stats_config.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_PROCESS_STATS_PROCESS_STATS_CONFIG_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_PROCESS_STATS_PROCESS_STATS_CONFIG_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class ProcessStatsConfig;
+enum ProcessStatsConfig_Quirks : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum ProcessStatsConfig_Quirks : int {
+  ProcessStatsConfig_Quirks_QUIRKS_UNSPECIFIED = 0,
+  ProcessStatsConfig_Quirks_DISABLE_INITIAL_DUMP = 1,
+  ProcessStatsConfig_Quirks_DISABLE_ON_DEMAND = 2,
+};
+
+class PERFETTO_EXPORT ProcessStatsConfig : public ::protozero::CppMessageObj {
+ public:
+  using Quirks = ProcessStatsConfig_Quirks;
+  static constexpr auto QUIRKS_UNSPECIFIED = ProcessStatsConfig_Quirks_QUIRKS_UNSPECIFIED;
+  static constexpr auto DISABLE_INITIAL_DUMP = ProcessStatsConfig_Quirks_DISABLE_INITIAL_DUMP;
+  static constexpr auto DISABLE_ON_DEMAND = ProcessStatsConfig_Quirks_DISABLE_ON_DEMAND;
+  static constexpr auto Quirks_MIN = ProcessStatsConfig_Quirks_QUIRKS_UNSPECIFIED;
+  static constexpr auto Quirks_MAX = ProcessStatsConfig_Quirks_DISABLE_ON_DEMAND;
+  enum FieldNumbers {
+    kQuirksFieldNumber = 1,
+    kScanAllProcessesOnStartFieldNumber = 2,
+    kRecordThreadNamesFieldNumber = 3,
+    kProcStatsPollMsFieldNumber = 4,
+    kProcStatsCacheTtlMsFieldNumber = 6,
+    kRecordThreadTimeInStateFieldNumber = 7,
+    kThreadTimeInStateCacheSizeFieldNumber = 8,
+  };
+
+  ProcessStatsConfig();
+  ~ProcessStatsConfig() override;
+  ProcessStatsConfig(ProcessStatsConfig&&) noexcept;
+  ProcessStatsConfig& operator=(ProcessStatsConfig&&);
+  ProcessStatsConfig(const ProcessStatsConfig&);
+  ProcessStatsConfig& operator=(const ProcessStatsConfig&);
+  bool operator==(const ProcessStatsConfig&) const;
+  bool operator!=(const ProcessStatsConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  const std::vector<ProcessStatsConfig_Quirks>& quirks() const { return quirks_; }
+  std::vector<ProcessStatsConfig_Quirks>* mutable_quirks() { return &quirks_; }
+  int quirks_size() const { return static_cast<int>(quirks_.size()); }
+  void clear_quirks() { quirks_.clear(); }
+  void add_quirks(ProcessStatsConfig_Quirks value) { quirks_.emplace_back(value); }
+  ProcessStatsConfig_Quirks* add_quirks() { quirks_.emplace_back(); return &quirks_.back(); }
+
+  bool has_scan_all_processes_on_start() const { return _has_field_[2]; }
+  bool scan_all_processes_on_start() const { return scan_all_processes_on_start_; }
+  void set_scan_all_processes_on_start(bool value) { scan_all_processes_on_start_ = value; _has_field_.set(2); }
+
+  bool has_record_thread_names() const { return _has_field_[3]; }
+  bool record_thread_names() const { return record_thread_names_; }
+  void set_record_thread_names(bool value) { record_thread_names_ = value; _has_field_.set(3); }
+
+  bool has_proc_stats_poll_ms() const { return _has_field_[4]; }
+  uint32_t proc_stats_poll_ms() const { return proc_stats_poll_ms_; }
+  void set_proc_stats_poll_ms(uint32_t value) { proc_stats_poll_ms_ = value; _has_field_.set(4); }
+
+  bool has_proc_stats_cache_ttl_ms() const { return _has_field_[6]; }
+  uint32_t proc_stats_cache_ttl_ms() const { return proc_stats_cache_ttl_ms_; }
+  void set_proc_stats_cache_ttl_ms(uint32_t value) { proc_stats_cache_ttl_ms_ = value; _has_field_.set(6); }
+
+  bool has_record_thread_time_in_state() const { return _has_field_[7]; }
+  bool record_thread_time_in_state() const { return record_thread_time_in_state_; }
+  void set_record_thread_time_in_state(bool value) { record_thread_time_in_state_ = value; _has_field_.set(7); }
+
+  bool has_thread_time_in_state_cache_size() const { return _has_field_[8]; }
+  uint32_t thread_time_in_state_cache_size() const { return thread_time_in_state_cache_size_; }
+  void set_thread_time_in_state_cache_size(uint32_t value) { thread_time_in_state_cache_size_ = value; _has_field_.set(8); }
+
+ private:
+  std::vector<ProcessStatsConfig_Quirks> quirks_;
+  bool scan_all_processes_on_start_{};
+  bool record_thread_names_{};
+  uint32_t proc_stats_poll_ms_{};
+  uint32_t proc_stats_cache_ttl_ms_{};
+  bool record_thread_time_in_state_{};
+  uint32_t thread_time_in_state_cache_size_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<9> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_PROCESS_STATS_PROCESS_STATS_CONFIG_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/config/process_stats/process_stats_config.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+ProcessStatsConfig::ProcessStatsConfig() = default;
+ProcessStatsConfig::~ProcessStatsConfig() = default;
+ProcessStatsConfig::ProcessStatsConfig(const ProcessStatsConfig&) = default;
+ProcessStatsConfig& ProcessStatsConfig::operator=(const ProcessStatsConfig&) = default;
+ProcessStatsConfig::ProcessStatsConfig(ProcessStatsConfig&&) noexcept = default;
+ProcessStatsConfig& ProcessStatsConfig::operator=(ProcessStatsConfig&&) = default;
+
+bool ProcessStatsConfig::operator==(const ProcessStatsConfig& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && quirks_ == other.quirks_
+   && scan_all_processes_on_start_ == other.scan_all_processes_on_start_
+   && record_thread_names_ == other.record_thread_names_
+   && proc_stats_poll_ms_ == other.proc_stats_poll_ms_
+   && proc_stats_cache_ttl_ms_ == other.proc_stats_cache_ttl_ms_
+   && record_thread_time_in_state_ == other.record_thread_time_in_state_
+   && thread_time_in_state_cache_size_ == other.thread_time_in_state_cache_size_;
+}
+
+bool ProcessStatsConfig::ParseFromArray(const void* raw, size_t size) {
+  quirks_.clear();
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* quirks */:
+        quirks_.emplace_back();
+        field.get(&quirks_.back());
+        break;
+      case 2 /* scan_all_processes_on_start */:
+        field.get(&scan_all_processes_on_start_);
+        break;
+      case 3 /* record_thread_names */:
+        field.get(&record_thread_names_);
+        break;
+      case 4 /* proc_stats_poll_ms */:
+        field.get(&proc_stats_poll_ms_);
+        break;
+      case 6 /* proc_stats_cache_ttl_ms */:
+        field.get(&proc_stats_cache_ttl_ms_);
+        break;
+      case 7 /* record_thread_time_in_state */:
+        field.get(&record_thread_time_in_state_);
+        break;
+      case 8 /* thread_time_in_state_cache_size */:
+        field.get(&thread_time_in_state_cache_size_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string ProcessStatsConfig::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> ProcessStatsConfig::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void ProcessStatsConfig::Serialize(::protozero::Message* msg) const {
+  // Field 1: quirks
+  for (auto& it : quirks_) {
+    msg->AppendVarInt(1, it);
+  }
+
+  // Field 2: scan_all_processes_on_start
+  if (_has_field_[2]) {
+    msg->AppendTinyVarInt(2, scan_all_processes_on_start_);
+  }
+
+  // Field 3: record_thread_names
+  if (_has_field_[3]) {
+    msg->AppendTinyVarInt(3, record_thread_names_);
+  }
+
+  // Field 4: proc_stats_poll_ms
+  if (_has_field_[4]) {
+    msg->AppendVarInt(4, proc_stats_poll_ms_);
+  }
+
+  // Field 6: proc_stats_cache_ttl_ms
+  if (_has_field_[6]) {
+    msg->AppendVarInt(6, proc_stats_cache_ttl_ms_);
+  }
+
+  // Field 7: record_thread_time_in_state
+  if (_has_field_[7]) {
+    msg->AppendTinyVarInt(7, record_thread_time_in_state_);
+  }
+
+  // Field 8: thread_time_in_state_cache_size
+  if (_has_field_[8]) {
+    msg->AppendVarInt(8, thread_time_in_state_cache_size_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/config/profiling/heapprofd_config.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/config/profiling/heapprofd_config.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_PROFILING_HEAPPROFD_CONFIG_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_PROFILING_HEAPPROFD_CONFIG_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class HeapprofdConfig;
+class HeapprofdConfig_ContinuousDumpConfig;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT HeapprofdConfig : public ::protozero::CppMessageObj {
+ public:
+  using ContinuousDumpConfig = HeapprofdConfig_ContinuousDumpConfig;
+  enum FieldNumbers {
+    kSamplingIntervalBytesFieldNumber = 1,
+    kAdaptiveSamplingShmemThresholdFieldNumber = 24,
+    kAdaptiveSamplingMaxSamplingIntervalBytesFieldNumber = 25,
+    kProcessCmdlineFieldNumber = 2,
+    kPidFieldNumber = 4,
+    kTargetInstalledByFieldNumber = 26,
+    kHeapsFieldNumber = 20,
+    kExcludeHeapsFieldNumber = 27,
+    kStreamAllocationsFieldNumber = 23,
+    kHeapSamplingIntervalsFieldNumber = 22,
+    kAllHeapsFieldNumber = 21,
+    kAllFieldNumber = 5,
+    kMinAnonymousMemoryKbFieldNumber = 15,
+    kMaxHeapprofdMemoryKbFieldNumber = 16,
+    kMaxHeapprofdCpuSecsFieldNumber = 17,
+    kSkipSymbolPrefixFieldNumber = 7,
+    kContinuousDumpConfigFieldNumber = 6,
+    kShmemSizeBytesFieldNumber = 8,
+    kBlockClientFieldNumber = 9,
+    kBlockClientTimeoutUsFieldNumber = 14,
+    kNoStartupFieldNumber = 10,
+    kNoRunningFieldNumber = 11,
+    kDumpAtMaxFieldNumber = 13,
+    kDisableForkTeardownFieldNumber = 18,
+    kDisableVforkDetectionFieldNumber = 19,
+  };
+
+  HeapprofdConfig();
+  ~HeapprofdConfig() override;
+  HeapprofdConfig(HeapprofdConfig&&) noexcept;
+  HeapprofdConfig& operator=(HeapprofdConfig&&);
+  HeapprofdConfig(const HeapprofdConfig&);
+  HeapprofdConfig& operator=(const HeapprofdConfig&);
+  bool operator==(const HeapprofdConfig&) const;
+  bool operator!=(const HeapprofdConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_sampling_interval_bytes() const { return _has_field_[1]; }
+  uint64_t sampling_interval_bytes() const { return sampling_interval_bytes_; }
+  void set_sampling_interval_bytes(uint64_t value) { sampling_interval_bytes_ = value; _has_field_.set(1); }
+
+  bool has_adaptive_sampling_shmem_threshold() const { return _has_field_[24]; }
+  uint64_t adaptive_sampling_shmem_threshold() const { return adaptive_sampling_shmem_threshold_; }
+  void set_adaptive_sampling_shmem_threshold(uint64_t value) { adaptive_sampling_shmem_threshold_ = value; _has_field_.set(24); }
+
+  bool has_adaptive_sampling_max_sampling_interval_bytes() const { return _has_field_[25]; }
+  uint64_t adaptive_sampling_max_sampling_interval_bytes() const { return adaptive_sampling_max_sampling_interval_bytes_; }
+  void set_adaptive_sampling_max_sampling_interval_bytes(uint64_t value) { adaptive_sampling_max_sampling_interval_bytes_ = value; _has_field_.set(25); }
+
+  const std::vector<std::string>& process_cmdline() const { return process_cmdline_; }
+  std::vector<std::string>* mutable_process_cmdline() { return &process_cmdline_; }
+  int process_cmdline_size() const { return static_cast<int>(process_cmdline_.size()); }
+  void clear_process_cmdline() { process_cmdline_.clear(); }
+  void add_process_cmdline(std::string value) { process_cmdline_.emplace_back(value); }
+  std::string* add_process_cmdline() { process_cmdline_.emplace_back(); return &process_cmdline_.back(); }
+
+  const std::vector<uint64_t>& pid() const { return pid_; }
+  std::vector<uint64_t>* mutable_pid() { return &pid_; }
+  int pid_size() const { return static_cast<int>(pid_.size()); }
+  void clear_pid() { pid_.clear(); }
+  void add_pid(uint64_t value) { pid_.emplace_back(value); }
+  uint64_t* add_pid() { pid_.emplace_back(); return &pid_.back(); }
+
+  const std::vector<std::string>& target_installed_by() const { return target_installed_by_; }
+  std::vector<std::string>* mutable_target_installed_by() { return &target_installed_by_; }
+  int target_installed_by_size() const { return static_cast<int>(target_installed_by_.size()); }
+  void clear_target_installed_by() { target_installed_by_.clear(); }
+  void add_target_installed_by(std::string value) { target_installed_by_.emplace_back(value); }
+  std::string* add_target_installed_by() { target_installed_by_.emplace_back(); return &target_installed_by_.back(); }
+
+  const std::vector<std::string>& heaps() const { return heaps_; }
+  std::vector<std::string>* mutable_heaps() { return &heaps_; }
+  int heaps_size() const { return static_cast<int>(heaps_.size()); }
+  void clear_heaps() { heaps_.clear(); }
+  void add_heaps(std::string value) { heaps_.emplace_back(value); }
+  std::string* add_heaps() { heaps_.emplace_back(); return &heaps_.back(); }
+
+  const std::vector<std::string>& exclude_heaps() const { return exclude_heaps_; }
+  std::vector<std::string>* mutable_exclude_heaps() { return &exclude_heaps_; }
+  int exclude_heaps_size() const { return static_cast<int>(exclude_heaps_.size()); }
+  void clear_exclude_heaps() { exclude_heaps_.clear(); }
+  void add_exclude_heaps(std::string value) { exclude_heaps_.emplace_back(value); }
+  std::string* add_exclude_heaps() { exclude_heaps_.emplace_back(); return &exclude_heaps_.back(); }
+
+  bool has_stream_allocations() const { return _has_field_[23]; }
+  bool stream_allocations() const { return stream_allocations_; }
+  void set_stream_allocations(bool value) { stream_allocations_ = value; _has_field_.set(23); }
+
+  const std::vector<uint64_t>& heap_sampling_intervals() const { return heap_sampling_intervals_; }
+  std::vector<uint64_t>* mutable_heap_sampling_intervals() { return &heap_sampling_intervals_; }
+  int heap_sampling_intervals_size() const { return static_cast<int>(heap_sampling_intervals_.size()); }
+  void clear_heap_sampling_intervals() { heap_sampling_intervals_.clear(); }
+  void add_heap_sampling_intervals(uint64_t value) { heap_sampling_intervals_.emplace_back(value); }
+  uint64_t* add_heap_sampling_intervals() { heap_sampling_intervals_.emplace_back(); return &heap_sampling_intervals_.back(); }
+
+  bool has_all_heaps() const { return _has_field_[21]; }
+  bool all_heaps() const { return all_heaps_; }
+  void set_all_heaps(bool value) { all_heaps_ = value; _has_field_.set(21); }
+
+  bool has_all() const { return _has_field_[5]; }
+  bool all() const { return all_; }
+  void set_all(bool value) { all_ = value; _has_field_.set(5); }
+
+  bool has_min_anonymous_memory_kb() const { return _has_field_[15]; }
+  uint32_t min_anonymous_memory_kb() const { return min_anonymous_memory_kb_; }
+  void set_min_anonymous_memory_kb(uint32_t value) { min_anonymous_memory_kb_ = value; _has_field_.set(15); }
+
+  bool has_max_heapprofd_memory_kb() const { return _has_field_[16]; }
+  uint32_t max_heapprofd_memory_kb() const { return max_heapprofd_memory_kb_; }
+  void set_max_heapprofd_memory_kb(uint32_t value) { max_heapprofd_memory_kb_ = value; _has_field_.set(16); }
+
+  bool has_max_heapprofd_cpu_secs() const { return _has_field_[17]; }
+  uint64_t max_heapprofd_cpu_secs() const { return max_heapprofd_cpu_secs_; }
+  void set_max_heapprofd_cpu_secs(uint64_t value) { max_heapprofd_cpu_secs_ = value; _has_field_.set(17); }
+
+  const std::vector<std::string>& skip_symbol_prefix() const { return skip_symbol_prefix_; }
+  std::vector<std::string>* mutable_skip_symbol_prefix() { return &skip_symbol_prefix_; }
+  int skip_symbol_prefix_size() const { return static_cast<int>(skip_symbol_prefix_.size()); }
+  void clear_skip_symbol_prefix() { skip_symbol_prefix_.clear(); }
+  void add_skip_symbol_prefix(std::string value) { skip_symbol_prefix_.emplace_back(value); }
+  std::string* add_skip_symbol_prefix() { skip_symbol_prefix_.emplace_back(); return &skip_symbol_prefix_.back(); }
+
+  bool has_continuous_dump_config() const { return _has_field_[6]; }
+  const HeapprofdConfig_ContinuousDumpConfig& continuous_dump_config() const { return *continuous_dump_config_; }
+  HeapprofdConfig_ContinuousDumpConfig* mutable_continuous_dump_config() { _has_field_.set(6); return continuous_dump_config_.get(); }
+
+  bool has_shmem_size_bytes() const { return _has_field_[8]; }
+  uint64_t shmem_size_bytes() const { return shmem_size_bytes_; }
+  void set_shmem_size_bytes(uint64_t value) { shmem_size_bytes_ = value; _has_field_.set(8); }
+
+  bool has_block_client() const { return _has_field_[9]; }
+  bool block_client() const { return block_client_; }
+  void set_block_client(bool value) { block_client_ = value; _has_field_.set(9); }
+
+  bool has_block_client_timeout_us() const { return _has_field_[14]; }
+  uint32_t block_client_timeout_us() const { return block_client_timeout_us_; }
+  void set_block_client_timeout_us(uint32_t value) { block_client_timeout_us_ = value; _has_field_.set(14); }
+
+  bool has_no_startup() const { return _has_field_[10]; }
+  bool no_startup() const { return no_startup_; }
+  void set_no_startup(bool value) { no_startup_ = value; _has_field_.set(10); }
+
+  bool has_no_running() const { return _has_field_[11]; }
+  bool no_running() const { return no_running_; }
+  void set_no_running(bool value) { no_running_ = value; _has_field_.set(11); }
+
+  bool has_dump_at_max() const { return _has_field_[13]; }
+  bool dump_at_max() const { return dump_at_max_; }
+  void set_dump_at_max(bool value) { dump_at_max_ = value; _has_field_.set(13); }
+
+  bool has_disable_fork_teardown() const { return _has_field_[18]; }
+  bool disable_fork_teardown() const { return disable_fork_teardown_; }
+  void set_disable_fork_teardown(bool value) { disable_fork_teardown_ = value; _has_field_.set(18); }
+
+  bool has_disable_vfork_detection() const { return _has_field_[19]; }
+  bool disable_vfork_detection() const { return disable_vfork_detection_; }
+  void set_disable_vfork_detection(bool value) { disable_vfork_detection_ = value; _has_field_.set(19); }
+
+ private:
+  uint64_t sampling_interval_bytes_{};
+  uint64_t adaptive_sampling_shmem_threshold_{};
+  uint64_t adaptive_sampling_max_sampling_interval_bytes_{};
+  std::vector<std::string> process_cmdline_;
+  std::vector<uint64_t> pid_;
+  std::vector<std::string> target_installed_by_;
+  std::vector<std::string> heaps_;
+  std::vector<std::string> exclude_heaps_;
+  bool stream_allocations_{};
+  std::vector<uint64_t> heap_sampling_intervals_;
+  bool all_heaps_{};
+  bool all_{};
+  uint32_t min_anonymous_memory_kb_{};
+  uint32_t max_heapprofd_memory_kb_{};
+  uint64_t max_heapprofd_cpu_secs_{};
+  std::vector<std::string> skip_symbol_prefix_;
+  ::protozero::CopyablePtr<HeapprofdConfig_ContinuousDumpConfig> continuous_dump_config_;
+  uint64_t shmem_size_bytes_{};
+  bool block_client_{};
+  uint32_t block_client_timeout_us_{};
+  bool no_startup_{};
+  bool no_running_{};
+  bool dump_at_max_{};
+  bool disable_fork_teardown_{};
+  bool disable_vfork_detection_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<28> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT HeapprofdConfig_ContinuousDumpConfig : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kDumpPhaseMsFieldNumber = 5,
+    kDumpIntervalMsFieldNumber = 6,
+  };
+
+  HeapprofdConfig_ContinuousDumpConfig();
+  ~HeapprofdConfig_ContinuousDumpConfig() override;
+  HeapprofdConfig_ContinuousDumpConfig(HeapprofdConfig_ContinuousDumpConfig&&) noexcept;
+  HeapprofdConfig_ContinuousDumpConfig& operator=(HeapprofdConfig_ContinuousDumpConfig&&);
+  HeapprofdConfig_ContinuousDumpConfig(const HeapprofdConfig_ContinuousDumpConfig&);
+  HeapprofdConfig_ContinuousDumpConfig& operator=(const HeapprofdConfig_ContinuousDumpConfig&);
+  bool operator==(const HeapprofdConfig_ContinuousDumpConfig&) const;
+  bool operator!=(const HeapprofdConfig_ContinuousDumpConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_dump_phase_ms() const { return _has_field_[5]; }
+  uint32_t dump_phase_ms() const { return dump_phase_ms_; }
+  void set_dump_phase_ms(uint32_t value) { dump_phase_ms_ = value; _has_field_.set(5); }
+
+  bool has_dump_interval_ms() const { return _has_field_[6]; }
+  uint32_t dump_interval_ms() const { return dump_interval_ms_; }
+  void set_dump_interval_ms(uint32_t value) { dump_interval_ms_ = value; _has_field_.set(6); }
+
+ private:
+  uint32_t dump_phase_ms_{};
+  uint32_t dump_interval_ms_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<7> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_PROFILING_HEAPPROFD_CONFIG_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/config/profiling/heapprofd_config.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+HeapprofdConfig::HeapprofdConfig() = default;
+HeapprofdConfig::~HeapprofdConfig() = default;
+HeapprofdConfig::HeapprofdConfig(const HeapprofdConfig&) = default;
+HeapprofdConfig& HeapprofdConfig::operator=(const HeapprofdConfig&) = default;
+HeapprofdConfig::HeapprofdConfig(HeapprofdConfig&&) noexcept = default;
+HeapprofdConfig& HeapprofdConfig::operator=(HeapprofdConfig&&) = default;
+
+bool HeapprofdConfig::operator==(const HeapprofdConfig& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && sampling_interval_bytes_ == other.sampling_interval_bytes_
+   && adaptive_sampling_shmem_threshold_ == other.adaptive_sampling_shmem_threshold_
+   && adaptive_sampling_max_sampling_interval_bytes_ == other.adaptive_sampling_max_sampling_interval_bytes_
+   && process_cmdline_ == other.process_cmdline_
+   && pid_ == other.pid_
+   && target_installed_by_ == other.target_installed_by_
+   && heaps_ == other.heaps_
+   && exclude_heaps_ == other.exclude_heaps_
+   && stream_allocations_ == other.stream_allocations_
+   && heap_sampling_intervals_ == other.heap_sampling_intervals_
+   && all_heaps_ == other.all_heaps_
+   && all_ == other.all_
+   && min_anonymous_memory_kb_ == other.min_anonymous_memory_kb_
+   && max_heapprofd_memory_kb_ == other.max_heapprofd_memory_kb_
+   && max_heapprofd_cpu_secs_ == other.max_heapprofd_cpu_secs_
+   && skip_symbol_prefix_ == other.skip_symbol_prefix_
+   && continuous_dump_config_ == other.continuous_dump_config_
+   && shmem_size_bytes_ == other.shmem_size_bytes_
+   && block_client_ == other.block_client_
+   && block_client_timeout_us_ == other.block_client_timeout_us_
+   && no_startup_ == other.no_startup_
+   && no_running_ == other.no_running_
+   && dump_at_max_ == other.dump_at_max_
+   && disable_fork_teardown_ == other.disable_fork_teardown_
+   && disable_vfork_detection_ == other.disable_vfork_detection_;
+}
+
+bool HeapprofdConfig::ParseFromArray(const void* raw, size_t size) {
+  process_cmdline_.clear();
+  pid_.clear();
+  target_installed_by_.clear();
+  heaps_.clear();
+  exclude_heaps_.clear();
+  heap_sampling_intervals_.clear();
+  skip_symbol_prefix_.clear();
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* sampling_interval_bytes */:
+        field.get(&sampling_interval_bytes_);
+        break;
+      case 24 /* adaptive_sampling_shmem_threshold */:
+        field.get(&adaptive_sampling_shmem_threshold_);
+        break;
+      case 25 /* adaptive_sampling_max_sampling_interval_bytes */:
+        field.get(&adaptive_sampling_max_sampling_interval_bytes_);
+        break;
+      case 2 /* process_cmdline */:
+        process_cmdline_.emplace_back();
+        field.get(&process_cmdline_.back());
+        break;
+      case 4 /* pid */:
+        pid_.emplace_back();
+        field.get(&pid_.back());
+        break;
+      case 26 /* target_installed_by */:
+        target_installed_by_.emplace_back();
+        field.get(&target_installed_by_.back());
+        break;
+      case 20 /* heaps */:
+        heaps_.emplace_back();
+        field.get(&heaps_.back());
+        break;
+      case 27 /* exclude_heaps */:
+        exclude_heaps_.emplace_back();
+        field.get(&exclude_heaps_.back());
+        break;
+      case 23 /* stream_allocations */:
+        field.get(&stream_allocations_);
+        break;
+      case 22 /* heap_sampling_intervals */:
+        heap_sampling_intervals_.emplace_back();
+        field.get(&heap_sampling_intervals_.back());
+        break;
+      case 21 /* all_heaps */:
+        field.get(&all_heaps_);
+        break;
+      case 5 /* all */:
+        field.get(&all_);
+        break;
+      case 15 /* min_anonymous_memory_kb */:
+        field.get(&min_anonymous_memory_kb_);
+        break;
+      case 16 /* max_heapprofd_memory_kb */:
+        field.get(&max_heapprofd_memory_kb_);
+        break;
+      case 17 /* max_heapprofd_cpu_secs */:
+        field.get(&max_heapprofd_cpu_secs_);
+        break;
+      case 7 /* skip_symbol_prefix */:
+        skip_symbol_prefix_.emplace_back();
+        field.get(&skip_symbol_prefix_.back());
+        break;
+      case 6 /* continuous_dump_config */:
+        (*continuous_dump_config_).ParseFromArray(field.data(), field.size());
+        break;
+      case 8 /* shmem_size_bytes */:
+        field.get(&shmem_size_bytes_);
+        break;
+      case 9 /* block_client */:
+        field.get(&block_client_);
+        break;
+      case 14 /* block_client_timeout_us */:
+        field.get(&block_client_timeout_us_);
+        break;
+      case 10 /* no_startup */:
+        field.get(&no_startup_);
+        break;
+      case 11 /* no_running */:
+        field.get(&no_running_);
+        break;
+      case 13 /* dump_at_max */:
+        field.get(&dump_at_max_);
+        break;
+      case 18 /* disable_fork_teardown */:
+        field.get(&disable_fork_teardown_);
+        break;
+      case 19 /* disable_vfork_detection */:
+        field.get(&disable_vfork_detection_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string HeapprofdConfig::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> HeapprofdConfig::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void HeapprofdConfig::Serialize(::protozero::Message* msg) const {
+  // Field 1: sampling_interval_bytes
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, sampling_interval_bytes_);
+  }
+
+  // Field 24: adaptive_sampling_shmem_threshold
+  if (_has_field_[24]) {
+    msg->AppendVarInt(24, adaptive_sampling_shmem_threshold_);
+  }
+
+  // Field 25: adaptive_sampling_max_sampling_interval_bytes
+  if (_has_field_[25]) {
+    msg->AppendVarInt(25, adaptive_sampling_max_sampling_interval_bytes_);
+  }
+
+  // Field 2: process_cmdline
+  for (auto& it : process_cmdline_) {
+    msg->AppendString(2, it);
+  }
+
+  // Field 4: pid
+  for (auto& it : pid_) {
+    msg->AppendVarInt(4, it);
+  }
+
+  // Field 26: target_installed_by
+  for (auto& it : target_installed_by_) {
+    msg->AppendString(26, it);
+  }
+
+  // Field 20: heaps
+  for (auto& it : heaps_) {
+    msg->AppendString(20, it);
+  }
+
+  // Field 27: exclude_heaps
+  for (auto& it : exclude_heaps_) {
+    msg->AppendString(27, it);
+  }
+
+  // Field 23: stream_allocations
+  if (_has_field_[23]) {
+    msg->AppendTinyVarInt(23, stream_allocations_);
+  }
+
+  // Field 22: heap_sampling_intervals
+  for (auto& it : heap_sampling_intervals_) {
+    msg->AppendVarInt(22, it);
+  }
+
+  // Field 21: all_heaps
+  if (_has_field_[21]) {
+    msg->AppendTinyVarInt(21, all_heaps_);
+  }
+
+  // Field 5: all
+  if (_has_field_[5]) {
+    msg->AppendTinyVarInt(5, all_);
+  }
+
+  // Field 15: min_anonymous_memory_kb
+  if (_has_field_[15]) {
+    msg->AppendVarInt(15, min_anonymous_memory_kb_);
+  }
+
+  // Field 16: max_heapprofd_memory_kb
+  if (_has_field_[16]) {
+    msg->AppendVarInt(16, max_heapprofd_memory_kb_);
+  }
+
+  // Field 17: max_heapprofd_cpu_secs
+  if (_has_field_[17]) {
+    msg->AppendVarInt(17, max_heapprofd_cpu_secs_);
+  }
+
+  // Field 7: skip_symbol_prefix
+  for (auto& it : skip_symbol_prefix_) {
+    msg->AppendString(7, it);
+  }
+
+  // Field 6: continuous_dump_config
+  if (_has_field_[6]) {
+    (*continuous_dump_config_).Serialize(msg->BeginNestedMessage<::protozero::Message>(6));
+  }
+
+  // Field 8: shmem_size_bytes
+  if (_has_field_[8]) {
+    msg->AppendVarInt(8, shmem_size_bytes_);
+  }
+
+  // Field 9: block_client
+  if (_has_field_[9]) {
+    msg->AppendTinyVarInt(9, block_client_);
+  }
+
+  // Field 14: block_client_timeout_us
+  if (_has_field_[14]) {
+    msg->AppendVarInt(14, block_client_timeout_us_);
+  }
+
+  // Field 10: no_startup
+  if (_has_field_[10]) {
+    msg->AppendTinyVarInt(10, no_startup_);
+  }
+
+  // Field 11: no_running
+  if (_has_field_[11]) {
+    msg->AppendTinyVarInt(11, no_running_);
+  }
+
+  // Field 13: dump_at_max
+  if (_has_field_[13]) {
+    msg->AppendTinyVarInt(13, dump_at_max_);
+  }
+
+  // Field 18: disable_fork_teardown
+  if (_has_field_[18]) {
+    msg->AppendTinyVarInt(18, disable_fork_teardown_);
+  }
+
+  // Field 19: disable_vfork_detection
+  if (_has_field_[19]) {
+    msg->AppendTinyVarInt(19, disable_vfork_detection_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+HeapprofdConfig_ContinuousDumpConfig::HeapprofdConfig_ContinuousDumpConfig() = default;
+HeapprofdConfig_ContinuousDumpConfig::~HeapprofdConfig_ContinuousDumpConfig() = default;
+HeapprofdConfig_ContinuousDumpConfig::HeapprofdConfig_ContinuousDumpConfig(const HeapprofdConfig_ContinuousDumpConfig&) = default;
+HeapprofdConfig_ContinuousDumpConfig& HeapprofdConfig_ContinuousDumpConfig::operator=(const HeapprofdConfig_ContinuousDumpConfig&) = default;
+HeapprofdConfig_ContinuousDumpConfig::HeapprofdConfig_ContinuousDumpConfig(HeapprofdConfig_ContinuousDumpConfig&&) noexcept = default;
+HeapprofdConfig_ContinuousDumpConfig& HeapprofdConfig_ContinuousDumpConfig::operator=(HeapprofdConfig_ContinuousDumpConfig&&) = default;
+
+bool HeapprofdConfig_ContinuousDumpConfig::operator==(const HeapprofdConfig_ContinuousDumpConfig& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && dump_phase_ms_ == other.dump_phase_ms_
+   && dump_interval_ms_ == other.dump_interval_ms_;
+}
+
+bool HeapprofdConfig_ContinuousDumpConfig::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 5 /* dump_phase_ms */:
+        field.get(&dump_phase_ms_);
+        break;
+      case 6 /* dump_interval_ms */:
+        field.get(&dump_interval_ms_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string HeapprofdConfig_ContinuousDumpConfig::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> HeapprofdConfig_ContinuousDumpConfig::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void HeapprofdConfig_ContinuousDumpConfig::Serialize(::protozero::Message* msg) const {
+  // Field 5: dump_phase_ms
+  if (_has_field_[5]) {
+    msg->AppendVarInt(5, dump_phase_ms_);
+  }
+
+  // Field 6: dump_interval_ms
+  if (_has_field_[6]) {
+    msg->AppendVarInt(6, dump_interval_ms_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/config/profiling/java_hprof_config.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/config/profiling/java_hprof_config.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_PROFILING_JAVA_HPROF_CONFIG_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_PROFILING_JAVA_HPROF_CONFIG_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class JavaHprofConfig;
+class JavaHprofConfig_ContinuousDumpConfig;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT JavaHprofConfig : public ::protozero::CppMessageObj {
+ public:
+  using ContinuousDumpConfig = JavaHprofConfig_ContinuousDumpConfig;
+  enum FieldNumbers {
+    kProcessCmdlineFieldNumber = 1,
+    kPidFieldNumber = 2,
+    kTargetInstalledByFieldNumber = 7,
+    kContinuousDumpConfigFieldNumber = 3,
+    kMinAnonymousMemoryKbFieldNumber = 4,
+    kDumpSmapsFieldNumber = 5,
+    kIgnoredTypesFieldNumber = 6,
+  };
+
+  JavaHprofConfig();
+  ~JavaHprofConfig() override;
+  JavaHprofConfig(JavaHprofConfig&&) noexcept;
+  JavaHprofConfig& operator=(JavaHprofConfig&&);
+  JavaHprofConfig(const JavaHprofConfig&);
+  JavaHprofConfig& operator=(const JavaHprofConfig&);
+  bool operator==(const JavaHprofConfig&) const;
+  bool operator!=(const JavaHprofConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  const std::vector<std::string>& process_cmdline() const { return process_cmdline_; }
+  std::vector<std::string>* mutable_process_cmdline() { return &process_cmdline_; }
+  int process_cmdline_size() const { return static_cast<int>(process_cmdline_.size()); }
+  void clear_process_cmdline() { process_cmdline_.clear(); }
+  void add_process_cmdline(std::string value) { process_cmdline_.emplace_back(value); }
+  std::string* add_process_cmdline() { process_cmdline_.emplace_back(); return &process_cmdline_.back(); }
+
+  const std::vector<uint64_t>& pid() const { return pid_; }
+  std::vector<uint64_t>* mutable_pid() { return &pid_; }
+  int pid_size() const { return static_cast<int>(pid_.size()); }
+  void clear_pid() { pid_.clear(); }
+  void add_pid(uint64_t value) { pid_.emplace_back(value); }
+  uint64_t* add_pid() { pid_.emplace_back(); return &pid_.back(); }
+
+  const std::vector<std::string>& target_installed_by() const { return target_installed_by_; }
+  std::vector<std::string>* mutable_target_installed_by() { return &target_installed_by_; }
+  int target_installed_by_size() const { return static_cast<int>(target_installed_by_.size()); }
+  void clear_target_installed_by() { target_installed_by_.clear(); }
+  void add_target_installed_by(std::string value) { target_installed_by_.emplace_back(value); }
+  std::string* add_target_installed_by() { target_installed_by_.emplace_back(); return &target_installed_by_.back(); }
+
+  bool has_continuous_dump_config() const { return _has_field_[3]; }
+  const JavaHprofConfig_ContinuousDumpConfig& continuous_dump_config() const { return *continuous_dump_config_; }
+  JavaHprofConfig_ContinuousDumpConfig* mutable_continuous_dump_config() { _has_field_.set(3); return continuous_dump_config_.get(); }
+
+  bool has_min_anonymous_memory_kb() const { return _has_field_[4]; }
+  uint32_t min_anonymous_memory_kb() const { return min_anonymous_memory_kb_; }
+  void set_min_anonymous_memory_kb(uint32_t value) { min_anonymous_memory_kb_ = value; _has_field_.set(4); }
+
+  bool has_dump_smaps() const { return _has_field_[5]; }
+  bool dump_smaps() const { return dump_smaps_; }
+  void set_dump_smaps(bool value) { dump_smaps_ = value; _has_field_.set(5); }
+
+  const std::vector<std::string>& ignored_types() const { return ignored_types_; }
+  std::vector<std::string>* mutable_ignored_types() { return &ignored_types_; }
+  int ignored_types_size() const { return static_cast<int>(ignored_types_.size()); }
+  void clear_ignored_types() { ignored_types_.clear(); }
+  void add_ignored_types(std::string value) { ignored_types_.emplace_back(value); }
+  std::string* add_ignored_types() { ignored_types_.emplace_back(); return &ignored_types_.back(); }
+
+ private:
+  std::vector<std::string> process_cmdline_;
+  std::vector<uint64_t> pid_;
+  std::vector<std::string> target_installed_by_;
+  ::protozero::CopyablePtr<JavaHprofConfig_ContinuousDumpConfig> continuous_dump_config_;
+  uint32_t min_anonymous_memory_kb_{};
+  bool dump_smaps_{};
+  std::vector<std::string> ignored_types_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<8> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT JavaHprofConfig_ContinuousDumpConfig : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kDumpPhaseMsFieldNumber = 1,
+    kDumpIntervalMsFieldNumber = 2,
+  };
+
+  JavaHprofConfig_ContinuousDumpConfig();
+  ~JavaHprofConfig_ContinuousDumpConfig() override;
+  JavaHprofConfig_ContinuousDumpConfig(JavaHprofConfig_ContinuousDumpConfig&&) noexcept;
+  JavaHprofConfig_ContinuousDumpConfig& operator=(JavaHprofConfig_ContinuousDumpConfig&&);
+  JavaHprofConfig_ContinuousDumpConfig(const JavaHprofConfig_ContinuousDumpConfig&);
+  JavaHprofConfig_ContinuousDumpConfig& operator=(const JavaHprofConfig_ContinuousDumpConfig&);
+  bool operator==(const JavaHprofConfig_ContinuousDumpConfig&) const;
+  bool operator!=(const JavaHprofConfig_ContinuousDumpConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_dump_phase_ms() const { return _has_field_[1]; }
+  uint32_t dump_phase_ms() const { return dump_phase_ms_; }
+  void set_dump_phase_ms(uint32_t value) { dump_phase_ms_ = value; _has_field_.set(1); }
+
+  bool has_dump_interval_ms() const { return _has_field_[2]; }
+  uint32_t dump_interval_ms() const { return dump_interval_ms_; }
+  void set_dump_interval_ms(uint32_t value) { dump_interval_ms_ = value; _has_field_.set(2); }
+
+ private:
+  uint32_t dump_phase_ms_{};
+  uint32_t dump_interval_ms_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_PROFILING_JAVA_HPROF_CONFIG_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/config/profiling/java_hprof_config.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+JavaHprofConfig::JavaHprofConfig() = default;
+JavaHprofConfig::~JavaHprofConfig() = default;
+JavaHprofConfig::JavaHprofConfig(const JavaHprofConfig&) = default;
+JavaHprofConfig& JavaHprofConfig::operator=(const JavaHprofConfig&) = default;
+JavaHprofConfig::JavaHprofConfig(JavaHprofConfig&&) noexcept = default;
+JavaHprofConfig& JavaHprofConfig::operator=(JavaHprofConfig&&) = default;
+
+bool JavaHprofConfig::operator==(const JavaHprofConfig& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && process_cmdline_ == other.process_cmdline_
+   && pid_ == other.pid_
+   && target_installed_by_ == other.target_installed_by_
+   && continuous_dump_config_ == other.continuous_dump_config_
+   && min_anonymous_memory_kb_ == other.min_anonymous_memory_kb_
+   && dump_smaps_ == other.dump_smaps_
+   && ignored_types_ == other.ignored_types_;
+}
+
+bool JavaHprofConfig::ParseFromArray(const void* raw, size_t size) {
+  process_cmdline_.clear();
+  pid_.clear();
+  target_installed_by_.clear();
+  ignored_types_.clear();
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* process_cmdline */:
+        process_cmdline_.emplace_back();
+        field.get(&process_cmdline_.back());
+        break;
+      case 2 /* pid */:
+        pid_.emplace_back();
+        field.get(&pid_.back());
+        break;
+      case 7 /* target_installed_by */:
+        target_installed_by_.emplace_back();
+        field.get(&target_installed_by_.back());
+        break;
+      case 3 /* continuous_dump_config */:
+        (*continuous_dump_config_).ParseFromArray(field.data(), field.size());
+        break;
+      case 4 /* min_anonymous_memory_kb */:
+        field.get(&min_anonymous_memory_kb_);
+        break;
+      case 5 /* dump_smaps */:
+        field.get(&dump_smaps_);
+        break;
+      case 6 /* ignored_types */:
+        ignored_types_.emplace_back();
+        field.get(&ignored_types_.back());
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string JavaHprofConfig::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> JavaHprofConfig::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void JavaHprofConfig::Serialize(::protozero::Message* msg) const {
+  // Field 1: process_cmdline
+  for (auto& it : process_cmdline_) {
+    msg->AppendString(1, it);
+  }
+
+  // Field 2: pid
+  for (auto& it : pid_) {
+    msg->AppendVarInt(2, it);
+  }
+
+  // Field 7: target_installed_by
+  for (auto& it : target_installed_by_) {
+    msg->AppendString(7, it);
+  }
+
+  // Field 3: continuous_dump_config
+  if (_has_field_[3]) {
+    (*continuous_dump_config_).Serialize(msg->BeginNestedMessage<::protozero::Message>(3));
+  }
+
+  // Field 4: min_anonymous_memory_kb
+  if (_has_field_[4]) {
+    msg->AppendVarInt(4, min_anonymous_memory_kb_);
+  }
+
+  // Field 5: dump_smaps
+  if (_has_field_[5]) {
+    msg->AppendTinyVarInt(5, dump_smaps_);
+  }
+
+  // Field 6: ignored_types
+  for (auto& it : ignored_types_) {
+    msg->AppendString(6, it);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+JavaHprofConfig_ContinuousDumpConfig::JavaHprofConfig_ContinuousDumpConfig() = default;
+JavaHprofConfig_ContinuousDumpConfig::~JavaHprofConfig_ContinuousDumpConfig() = default;
+JavaHprofConfig_ContinuousDumpConfig::JavaHprofConfig_ContinuousDumpConfig(const JavaHprofConfig_ContinuousDumpConfig&) = default;
+JavaHprofConfig_ContinuousDumpConfig& JavaHprofConfig_ContinuousDumpConfig::operator=(const JavaHprofConfig_ContinuousDumpConfig&) = default;
+JavaHprofConfig_ContinuousDumpConfig::JavaHprofConfig_ContinuousDumpConfig(JavaHprofConfig_ContinuousDumpConfig&&) noexcept = default;
+JavaHprofConfig_ContinuousDumpConfig& JavaHprofConfig_ContinuousDumpConfig::operator=(JavaHprofConfig_ContinuousDumpConfig&&) = default;
+
+bool JavaHprofConfig_ContinuousDumpConfig::operator==(const JavaHprofConfig_ContinuousDumpConfig& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && dump_phase_ms_ == other.dump_phase_ms_
+   && dump_interval_ms_ == other.dump_interval_ms_;
+}
+
+bool JavaHprofConfig_ContinuousDumpConfig::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* dump_phase_ms */:
+        field.get(&dump_phase_ms_);
+        break;
+      case 2 /* dump_interval_ms */:
+        field.get(&dump_interval_ms_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string JavaHprofConfig_ContinuousDumpConfig::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> JavaHprofConfig_ContinuousDumpConfig::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void JavaHprofConfig_ContinuousDumpConfig::Serialize(::protozero::Message* msg) const {
+  // Field 1: dump_phase_ms
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, dump_phase_ms_);
+  }
+
+  // Field 2: dump_interval_ms
+  if (_has_field_[2]) {
+    msg->AppendVarInt(2, dump_interval_ms_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/config/profiling/perf_event_config.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/config/profiling/perf_event_config.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_PROFILING_PERF_EVENT_CONFIG_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_PROFILING_PERF_EVENT_CONFIG_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class PerfEventConfig;
+class PerfEventConfig_CallstackSampling;
+class PerfEventConfig_Scope;
+class PerfEvents_Timebase;
+class PerfEvents_Tracepoint;
+enum PerfEvents_Counter : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT PerfEventConfig : public ::protozero::CppMessageObj {
+ public:
+  using CallstackSampling = PerfEventConfig_CallstackSampling;
+  using Scope = PerfEventConfig_Scope;
+  enum FieldNumbers {
+    kTimebaseFieldNumber = 15,
+    kCallstackSamplingFieldNumber = 16,
+    kRingBufferReadPeriodMsFieldNumber = 8,
+    kRingBufferPagesFieldNumber = 3,
+    kMaxEnqueuedFootprintKbFieldNumber = 17,
+    kMaxDaemonMemoryKbFieldNumber = 13,
+    kRemoteDescriptorTimeoutMsFieldNumber = 9,
+    kUnwindStateClearPeriodMsFieldNumber = 10,
+    kAllCpusFieldNumber = 1,
+    kSamplingFrequencyFieldNumber = 2,
+    kKernelFramesFieldNumber = 12,
+    kTargetPidFieldNumber = 4,
+    kTargetCmdlineFieldNumber = 5,
+    kTargetInstalledByFieldNumber = 18,
+    kExcludePidFieldNumber = 6,
+    kExcludeCmdlineFieldNumber = 7,
+    kAdditionalCmdlineCountFieldNumber = 11,
+  };
+
+  PerfEventConfig();
+  ~PerfEventConfig() override;
+  PerfEventConfig(PerfEventConfig&&) noexcept;
+  PerfEventConfig& operator=(PerfEventConfig&&);
+  PerfEventConfig(const PerfEventConfig&);
+  PerfEventConfig& operator=(const PerfEventConfig&);
+  bool operator==(const PerfEventConfig&) const;
+  bool operator!=(const PerfEventConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_timebase() const { return _has_field_[15]; }
+  const PerfEvents_Timebase& timebase() const { return *timebase_; }
+  PerfEvents_Timebase* mutable_timebase() { _has_field_.set(15); return timebase_.get(); }
+
+  bool has_callstack_sampling() const { return _has_field_[16]; }
+  const PerfEventConfig_CallstackSampling& callstack_sampling() const { return *callstack_sampling_; }
+  PerfEventConfig_CallstackSampling* mutable_callstack_sampling() { _has_field_.set(16); return callstack_sampling_.get(); }
+
+  bool has_ring_buffer_read_period_ms() const { return _has_field_[8]; }
+  uint32_t ring_buffer_read_period_ms() const { return ring_buffer_read_period_ms_; }
+  void set_ring_buffer_read_period_ms(uint32_t value) { ring_buffer_read_period_ms_ = value; _has_field_.set(8); }
+
+  bool has_ring_buffer_pages() const { return _has_field_[3]; }
+  uint32_t ring_buffer_pages() const { return ring_buffer_pages_; }
+  void set_ring_buffer_pages(uint32_t value) { ring_buffer_pages_ = value; _has_field_.set(3); }
+
+  bool has_max_enqueued_footprint_kb() const { return _has_field_[17]; }
+  uint64_t max_enqueued_footprint_kb() const { return max_enqueued_footprint_kb_; }
+  void set_max_enqueued_footprint_kb(uint64_t value) { max_enqueued_footprint_kb_ = value; _has_field_.set(17); }
+
+  bool has_max_daemon_memory_kb() const { return _has_field_[13]; }
+  uint32_t max_daemon_memory_kb() const { return max_daemon_memory_kb_; }
+  void set_max_daemon_memory_kb(uint32_t value) { max_daemon_memory_kb_ = value; _has_field_.set(13); }
+
+  bool has_remote_descriptor_timeout_ms() const { return _has_field_[9]; }
+  uint32_t remote_descriptor_timeout_ms() const { return remote_descriptor_timeout_ms_; }
+  void set_remote_descriptor_timeout_ms(uint32_t value) { remote_descriptor_timeout_ms_ = value; _has_field_.set(9); }
+
+  bool has_unwind_state_clear_period_ms() const { return _has_field_[10]; }
+  uint32_t unwind_state_clear_period_ms() const { return unwind_state_clear_period_ms_; }
+  void set_unwind_state_clear_period_ms(uint32_t value) { unwind_state_clear_period_ms_ = value; _has_field_.set(10); }
+
+  bool has_all_cpus() const { return _has_field_[1]; }
+  bool all_cpus() const { return all_cpus_; }
+  void set_all_cpus(bool value) { all_cpus_ = value; _has_field_.set(1); }
+
+  bool has_sampling_frequency() const { return _has_field_[2]; }
+  uint32_t sampling_frequency() const { return sampling_frequency_; }
+  void set_sampling_frequency(uint32_t value) { sampling_frequency_ = value; _has_field_.set(2); }
+
+  bool has_kernel_frames() const { return _has_field_[12]; }
+  bool kernel_frames() const { return kernel_frames_; }
+  void set_kernel_frames(bool value) { kernel_frames_ = value; _has_field_.set(12); }
+
+  const std::vector<int32_t>& target_pid() const { return target_pid_; }
+  std::vector<int32_t>* mutable_target_pid() { return &target_pid_; }
+  int target_pid_size() const { return static_cast<int>(target_pid_.size()); }
+  void clear_target_pid() { target_pid_.clear(); }
+  void add_target_pid(int32_t value) { target_pid_.emplace_back(value); }
+  int32_t* add_target_pid() { target_pid_.emplace_back(); return &target_pid_.back(); }
+
+  const std::vector<std::string>& target_cmdline() const { return target_cmdline_; }
+  std::vector<std::string>* mutable_target_cmdline() { return &target_cmdline_; }
+  int target_cmdline_size() const { return static_cast<int>(target_cmdline_.size()); }
+  void clear_target_cmdline() { target_cmdline_.clear(); }
+  void add_target_cmdline(std::string value) { target_cmdline_.emplace_back(value); }
+  std::string* add_target_cmdline() { target_cmdline_.emplace_back(); return &target_cmdline_.back(); }
+
+  const std::vector<std::string>& target_installed_by() const { return target_installed_by_; }
+  std::vector<std::string>* mutable_target_installed_by() { return &target_installed_by_; }
+  int target_installed_by_size() const { return static_cast<int>(target_installed_by_.size()); }
+  void clear_target_installed_by() { target_installed_by_.clear(); }
+  void add_target_installed_by(std::string value) { target_installed_by_.emplace_back(value); }
+  std::string* add_target_installed_by() { target_installed_by_.emplace_back(); return &target_installed_by_.back(); }
+
+  const std::vector<int32_t>& exclude_pid() const { return exclude_pid_; }
+  std::vector<int32_t>* mutable_exclude_pid() { return &exclude_pid_; }
+  int exclude_pid_size() const { return static_cast<int>(exclude_pid_.size()); }
+  void clear_exclude_pid() { exclude_pid_.clear(); }
+  void add_exclude_pid(int32_t value) { exclude_pid_.emplace_back(value); }
+  int32_t* add_exclude_pid() { exclude_pid_.emplace_back(); return &exclude_pid_.back(); }
+
+  const std::vector<std::string>& exclude_cmdline() const { return exclude_cmdline_; }
+  std::vector<std::string>* mutable_exclude_cmdline() { return &exclude_cmdline_; }
+  int exclude_cmdline_size() const { return static_cast<int>(exclude_cmdline_.size()); }
+  void clear_exclude_cmdline() { exclude_cmdline_.clear(); }
+  void add_exclude_cmdline(std::string value) { exclude_cmdline_.emplace_back(value); }
+  std::string* add_exclude_cmdline() { exclude_cmdline_.emplace_back(); return &exclude_cmdline_.back(); }
+
+  bool has_additional_cmdline_count() const { return _has_field_[11]; }
+  uint32_t additional_cmdline_count() const { return additional_cmdline_count_; }
+  void set_additional_cmdline_count(uint32_t value) { additional_cmdline_count_ = value; _has_field_.set(11); }
+
+ private:
+  ::protozero::CopyablePtr<PerfEvents_Timebase> timebase_;
+  ::protozero::CopyablePtr<PerfEventConfig_CallstackSampling> callstack_sampling_;
+  uint32_t ring_buffer_read_period_ms_{};
+  uint32_t ring_buffer_pages_{};
+  uint64_t max_enqueued_footprint_kb_{};
+  uint32_t max_daemon_memory_kb_{};
+  uint32_t remote_descriptor_timeout_ms_{};
+  uint32_t unwind_state_clear_period_ms_{};
+  bool all_cpus_{};
+  uint32_t sampling_frequency_{};
+  bool kernel_frames_{};
+  std::vector<int32_t> target_pid_;
+  std::vector<std::string> target_cmdline_;
+  std::vector<std::string> target_installed_by_;
+  std::vector<int32_t> exclude_pid_;
+  std::vector<std::string> exclude_cmdline_;
+  uint32_t additional_cmdline_count_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<19> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT PerfEventConfig_CallstackSampling : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kScopeFieldNumber = 1,
+    kKernelFramesFieldNumber = 2,
+  };
+
+  PerfEventConfig_CallstackSampling();
+  ~PerfEventConfig_CallstackSampling() override;
+  PerfEventConfig_CallstackSampling(PerfEventConfig_CallstackSampling&&) noexcept;
+  PerfEventConfig_CallstackSampling& operator=(PerfEventConfig_CallstackSampling&&);
+  PerfEventConfig_CallstackSampling(const PerfEventConfig_CallstackSampling&);
+  PerfEventConfig_CallstackSampling& operator=(const PerfEventConfig_CallstackSampling&);
+  bool operator==(const PerfEventConfig_CallstackSampling&) const;
+  bool operator!=(const PerfEventConfig_CallstackSampling& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_scope() const { return _has_field_[1]; }
+  const PerfEventConfig_Scope& scope() const { return *scope_; }
+  PerfEventConfig_Scope* mutable_scope() { _has_field_.set(1); return scope_.get(); }
+
+  bool has_kernel_frames() const { return _has_field_[2]; }
+  bool kernel_frames() const { return kernel_frames_; }
+  void set_kernel_frames(bool value) { kernel_frames_ = value; _has_field_.set(2); }
+
+ private:
+  ::protozero::CopyablePtr<PerfEventConfig_Scope> scope_;
+  bool kernel_frames_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT PerfEventConfig_Scope : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kTargetPidFieldNumber = 1,
+    kTargetCmdlineFieldNumber = 2,
+    kExcludePidFieldNumber = 3,
+    kExcludeCmdlineFieldNumber = 4,
+    kAdditionalCmdlineCountFieldNumber = 5,
+  };
+
+  PerfEventConfig_Scope();
+  ~PerfEventConfig_Scope() override;
+  PerfEventConfig_Scope(PerfEventConfig_Scope&&) noexcept;
+  PerfEventConfig_Scope& operator=(PerfEventConfig_Scope&&);
+  PerfEventConfig_Scope(const PerfEventConfig_Scope&);
+  PerfEventConfig_Scope& operator=(const PerfEventConfig_Scope&);
+  bool operator==(const PerfEventConfig_Scope&) const;
+  bool operator!=(const PerfEventConfig_Scope& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  const std::vector<int32_t>& target_pid() const { return target_pid_; }
+  std::vector<int32_t>* mutable_target_pid() { return &target_pid_; }
+  int target_pid_size() const { return static_cast<int>(target_pid_.size()); }
+  void clear_target_pid() { target_pid_.clear(); }
+  void add_target_pid(int32_t value) { target_pid_.emplace_back(value); }
+  int32_t* add_target_pid() { target_pid_.emplace_back(); return &target_pid_.back(); }
+
+  const std::vector<std::string>& target_cmdline() const { return target_cmdline_; }
+  std::vector<std::string>* mutable_target_cmdline() { return &target_cmdline_; }
+  int target_cmdline_size() const { return static_cast<int>(target_cmdline_.size()); }
+  void clear_target_cmdline() { target_cmdline_.clear(); }
+  void add_target_cmdline(std::string value) { target_cmdline_.emplace_back(value); }
+  std::string* add_target_cmdline() { target_cmdline_.emplace_back(); return &target_cmdline_.back(); }
+
+  const std::vector<int32_t>& exclude_pid() const { return exclude_pid_; }
+  std::vector<int32_t>* mutable_exclude_pid() { return &exclude_pid_; }
+  int exclude_pid_size() const { return static_cast<int>(exclude_pid_.size()); }
+  void clear_exclude_pid() { exclude_pid_.clear(); }
+  void add_exclude_pid(int32_t value) { exclude_pid_.emplace_back(value); }
+  int32_t* add_exclude_pid() { exclude_pid_.emplace_back(); return &exclude_pid_.back(); }
+
+  const std::vector<std::string>& exclude_cmdline() const { return exclude_cmdline_; }
+  std::vector<std::string>* mutable_exclude_cmdline() { return &exclude_cmdline_; }
+  int exclude_cmdline_size() const { return static_cast<int>(exclude_cmdline_.size()); }
+  void clear_exclude_cmdline() { exclude_cmdline_.clear(); }
+  void add_exclude_cmdline(std::string value) { exclude_cmdline_.emplace_back(value); }
+  std::string* add_exclude_cmdline() { exclude_cmdline_.emplace_back(); return &exclude_cmdline_.back(); }
+
+  bool has_additional_cmdline_count() const { return _has_field_[5]; }
+  uint32_t additional_cmdline_count() const { return additional_cmdline_count_; }
+  void set_additional_cmdline_count(uint32_t value) { additional_cmdline_count_ = value; _has_field_.set(5); }
+
+ private:
+  std::vector<int32_t> target_pid_;
+  std::vector<std::string> target_cmdline_;
+  std::vector<int32_t> exclude_pid_;
+  std::vector<std::string> exclude_cmdline_;
+  uint32_t additional_cmdline_count_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<6> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_PROFILING_PERF_EVENT_CONFIG_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/config/profiling/perf_event_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/common/perf_events.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+PerfEventConfig::PerfEventConfig() = default;
+PerfEventConfig::~PerfEventConfig() = default;
+PerfEventConfig::PerfEventConfig(const PerfEventConfig&) = default;
+PerfEventConfig& PerfEventConfig::operator=(const PerfEventConfig&) = default;
+PerfEventConfig::PerfEventConfig(PerfEventConfig&&) noexcept = default;
+PerfEventConfig& PerfEventConfig::operator=(PerfEventConfig&&) = default;
+
+bool PerfEventConfig::operator==(const PerfEventConfig& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && timebase_ == other.timebase_
+   && callstack_sampling_ == other.callstack_sampling_
+   && ring_buffer_read_period_ms_ == other.ring_buffer_read_period_ms_
+   && ring_buffer_pages_ == other.ring_buffer_pages_
+   && max_enqueued_footprint_kb_ == other.max_enqueued_footprint_kb_
+   && max_daemon_memory_kb_ == other.max_daemon_memory_kb_
+   && remote_descriptor_timeout_ms_ == other.remote_descriptor_timeout_ms_
+   && unwind_state_clear_period_ms_ == other.unwind_state_clear_period_ms_
+   && all_cpus_ == other.all_cpus_
+   && sampling_frequency_ == other.sampling_frequency_
+   && kernel_frames_ == other.kernel_frames_
+   && target_pid_ == other.target_pid_
+   && target_cmdline_ == other.target_cmdline_
+   && target_installed_by_ == other.target_installed_by_
+   && exclude_pid_ == other.exclude_pid_
+   && exclude_cmdline_ == other.exclude_cmdline_
+   && additional_cmdline_count_ == other.additional_cmdline_count_;
+}
+
+bool PerfEventConfig::ParseFromArray(const void* raw, size_t size) {
+  target_pid_.clear();
+  target_cmdline_.clear();
+  target_installed_by_.clear();
+  exclude_pid_.clear();
+  exclude_cmdline_.clear();
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 15 /* timebase */:
+        (*timebase_).ParseFromArray(field.data(), field.size());
+        break;
+      case 16 /* callstack_sampling */:
+        (*callstack_sampling_).ParseFromArray(field.data(), field.size());
+        break;
+      case 8 /* ring_buffer_read_period_ms */:
+        field.get(&ring_buffer_read_period_ms_);
+        break;
+      case 3 /* ring_buffer_pages */:
+        field.get(&ring_buffer_pages_);
+        break;
+      case 17 /* max_enqueued_footprint_kb */:
+        field.get(&max_enqueued_footprint_kb_);
+        break;
+      case 13 /* max_daemon_memory_kb */:
+        field.get(&max_daemon_memory_kb_);
+        break;
+      case 9 /* remote_descriptor_timeout_ms */:
+        field.get(&remote_descriptor_timeout_ms_);
+        break;
+      case 10 /* unwind_state_clear_period_ms */:
+        field.get(&unwind_state_clear_period_ms_);
+        break;
+      case 1 /* all_cpus */:
+        field.get(&all_cpus_);
+        break;
+      case 2 /* sampling_frequency */:
+        field.get(&sampling_frequency_);
+        break;
+      case 12 /* kernel_frames */:
+        field.get(&kernel_frames_);
+        break;
+      case 4 /* target_pid */:
+        target_pid_.emplace_back();
+        field.get(&target_pid_.back());
+        break;
+      case 5 /* target_cmdline */:
+        target_cmdline_.emplace_back();
+        field.get(&target_cmdline_.back());
+        break;
+      case 18 /* target_installed_by */:
+        target_installed_by_.emplace_back();
+        field.get(&target_installed_by_.back());
+        break;
+      case 6 /* exclude_pid */:
+        exclude_pid_.emplace_back();
+        field.get(&exclude_pid_.back());
+        break;
+      case 7 /* exclude_cmdline */:
+        exclude_cmdline_.emplace_back();
+        field.get(&exclude_cmdline_.back());
+        break;
+      case 11 /* additional_cmdline_count */:
+        field.get(&additional_cmdline_count_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string PerfEventConfig::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> PerfEventConfig::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void PerfEventConfig::Serialize(::protozero::Message* msg) const {
+  // Field 15: timebase
+  if (_has_field_[15]) {
+    (*timebase_).Serialize(msg->BeginNestedMessage<::protozero::Message>(15));
+  }
+
+  // Field 16: callstack_sampling
+  if (_has_field_[16]) {
+    (*callstack_sampling_).Serialize(msg->BeginNestedMessage<::protozero::Message>(16));
+  }
+
+  // Field 8: ring_buffer_read_period_ms
+  if (_has_field_[8]) {
+    msg->AppendVarInt(8, ring_buffer_read_period_ms_);
+  }
+
+  // Field 3: ring_buffer_pages
+  if (_has_field_[3]) {
+    msg->AppendVarInt(3, ring_buffer_pages_);
+  }
+
+  // Field 17: max_enqueued_footprint_kb
+  if (_has_field_[17]) {
+    msg->AppendVarInt(17, max_enqueued_footprint_kb_);
+  }
+
+  // Field 13: max_daemon_memory_kb
+  if (_has_field_[13]) {
+    msg->AppendVarInt(13, max_daemon_memory_kb_);
+  }
+
+  // Field 9: remote_descriptor_timeout_ms
+  if (_has_field_[9]) {
+    msg->AppendVarInt(9, remote_descriptor_timeout_ms_);
+  }
+
+  // Field 10: unwind_state_clear_period_ms
+  if (_has_field_[10]) {
+    msg->AppendVarInt(10, unwind_state_clear_period_ms_);
+  }
+
+  // Field 1: all_cpus
+  if (_has_field_[1]) {
+    msg->AppendTinyVarInt(1, all_cpus_);
+  }
+
+  // Field 2: sampling_frequency
+  if (_has_field_[2]) {
+    msg->AppendVarInt(2, sampling_frequency_);
+  }
+
+  // Field 12: kernel_frames
+  if (_has_field_[12]) {
+    msg->AppendTinyVarInt(12, kernel_frames_);
+  }
+
+  // Field 4: target_pid
+  for (auto& it : target_pid_) {
+    msg->AppendVarInt(4, it);
+  }
+
+  // Field 5: target_cmdline
+  for (auto& it : target_cmdline_) {
+    msg->AppendString(5, it);
+  }
+
+  // Field 18: target_installed_by
+  for (auto& it : target_installed_by_) {
+    msg->AppendString(18, it);
+  }
+
+  // Field 6: exclude_pid
+  for (auto& it : exclude_pid_) {
+    msg->AppendVarInt(6, it);
+  }
+
+  // Field 7: exclude_cmdline
+  for (auto& it : exclude_cmdline_) {
+    msg->AppendString(7, it);
+  }
+
+  // Field 11: additional_cmdline_count
+  if (_has_field_[11]) {
+    msg->AppendVarInt(11, additional_cmdline_count_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+PerfEventConfig_CallstackSampling::PerfEventConfig_CallstackSampling() = default;
+PerfEventConfig_CallstackSampling::~PerfEventConfig_CallstackSampling() = default;
+PerfEventConfig_CallstackSampling::PerfEventConfig_CallstackSampling(const PerfEventConfig_CallstackSampling&) = default;
+PerfEventConfig_CallstackSampling& PerfEventConfig_CallstackSampling::operator=(const PerfEventConfig_CallstackSampling&) = default;
+PerfEventConfig_CallstackSampling::PerfEventConfig_CallstackSampling(PerfEventConfig_CallstackSampling&&) noexcept = default;
+PerfEventConfig_CallstackSampling& PerfEventConfig_CallstackSampling::operator=(PerfEventConfig_CallstackSampling&&) = default;
+
+bool PerfEventConfig_CallstackSampling::operator==(const PerfEventConfig_CallstackSampling& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && scope_ == other.scope_
+   && kernel_frames_ == other.kernel_frames_;
+}
+
+bool PerfEventConfig_CallstackSampling::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* scope */:
+        (*scope_).ParseFromArray(field.data(), field.size());
+        break;
+      case 2 /* kernel_frames */:
+        field.get(&kernel_frames_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string PerfEventConfig_CallstackSampling::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> PerfEventConfig_CallstackSampling::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void PerfEventConfig_CallstackSampling::Serialize(::protozero::Message* msg) const {
+  // Field 1: scope
+  if (_has_field_[1]) {
+    (*scope_).Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
+  }
+
+  // Field 2: kernel_frames
+  if (_has_field_[2]) {
+    msg->AppendTinyVarInt(2, kernel_frames_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+PerfEventConfig_Scope::PerfEventConfig_Scope() = default;
+PerfEventConfig_Scope::~PerfEventConfig_Scope() = default;
+PerfEventConfig_Scope::PerfEventConfig_Scope(const PerfEventConfig_Scope&) = default;
+PerfEventConfig_Scope& PerfEventConfig_Scope::operator=(const PerfEventConfig_Scope&) = default;
+PerfEventConfig_Scope::PerfEventConfig_Scope(PerfEventConfig_Scope&&) noexcept = default;
+PerfEventConfig_Scope& PerfEventConfig_Scope::operator=(PerfEventConfig_Scope&&) = default;
+
+bool PerfEventConfig_Scope::operator==(const PerfEventConfig_Scope& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && target_pid_ == other.target_pid_
+   && target_cmdline_ == other.target_cmdline_
+   && exclude_pid_ == other.exclude_pid_
+   && exclude_cmdline_ == other.exclude_cmdline_
+   && additional_cmdline_count_ == other.additional_cmdline_count_;
+}
+
+bool PerfEventConfig_Scope::ParseFromArray(const void* raw, size_t size) {
+  target_pid_.clear();
+  target_cmdline_.clear();
+  exclude_pid_.clear();
+  exclude_cmdline_.clear();
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* target_pid */:
+        target_pid_.emplace_back();
+        field.get(&target_pid_.back());
+        break;
+      case 2 /* target_cmdline */:
+        target_cmdline_.emplace_back();
+        field.get(&target_cmdline_.back());
+        break;
+      case 3 /* exclude_pid */:
+        exclude_pid_.emplace_back();
+        field.get(&exclude_pid_.back());
+        break;
+      case 4 /* exclude_cmdline */:
+        exclude_cmdline_.emplace_back();
+        field.get(&exclude_cmdline_.back());
+        break;
+      case 5 /* additional_cmdline_count */:
+        field.get(&additional_cmdline_count_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string PerfEventConfig_Scope::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> PerfEventConfig_Scope::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void PerfEventConfig_Scope::Serialize(::protozero::Message* msg) const {
+  // Field 1: target_pid
+  for (auto& it : target_pid_) {
+    msg->AppendVarInt(1, it);
+  }
+
+  // Field 2: target_cmdline
+  for (auto& it : target_cmdline_) {
+    msg->AppendString(2, it);
+  }
+
+  // Field 3: exclude_pid
+  for (auto& it : exclude_pid_) {
+    msg->AppendVarInt(3, it);
+  }
+
+  // Field 4: exclude_cmdline
+  for (auto& it : exclude_cmdline_) {
+    msg->AppendString(4, it);
+  }
+
+  // Field 5: additional_cmdline_count
+  if (_has_field_[5]) {
+    msg->AppendVarInt(5, additional_cmdline_count_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/config/sys_stats/sys_stats_config.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/config/sys_stats/sys_stats_config.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_SYS_STATS_SYS_STATS_CONFIG_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_SYS_STATS_SYS_STATS_CONFIG_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class SysStatsConfig;
+enum SysStatsConfig_StatCounters : int;
+enum MeminfoCounters : int;
+enum VmstatCounters : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum SysStatsConfig_StatCounters : int {
+  SysStatsConfig_StatCounters_STAT_UNSPECIFIED = 0,
+  SysStatsConfig_StatCounters_STAT_CPU_TIMES = 1,
+  SysStatsConfig_StatCounters_STAT_IRQ_COUNTS = 2,
+  SysStatsConfig_StatCounters_STAT_SOFTIRQ_COUNTS = 3,
+  SysStatsConfig_StatCounters_STAT_FORK_COUNT = 4,
+};
+
+class PERFETTO_EXPORT SysStatsConfig : public ::protozero::CppMessageObj {
+ public:
+  using StatCounters = SysStatsConfig_StatCounters;
+  static constexpr auto STAT_UNSPECIFIED = SysStatsConfig_StatCounters_STAT_UNSPECIFIED;
+  static constexpr auto STAT_CPU_TIMES = SysStatsConfig_StatCounters_STAT_CPU_TIMES;
+  static constexpr auto STAT_IRQ_COUNTS = SysStatsConfig_StatCounters_STAT_IRQ_COUNTS;
+  static constexpr auto STAT_SOFTIRQ_COUNTS = SysStatsConfig_StatCounters_STAT_SOFTIRQ_COUNTS;
+  static constexpr auto STAT_FORK_COUNT = SysStatsConfig_StatCounters_STAT_FORK_COUNT;
+  static constexpr auto StatCounters_MIN = SysStatsConfig_StatCounters_STAT_UNSPECIFIED;
+  static constexpr auto StatCounters_MAX = SysStatsConfig_StatCounters_STAT_FORK_COUNT;
+  enum FieldNumbers {
+    kMeminfoPeriodMsFieldNumber = 1,
+    kMeminfoCountersFieldNumber = 2,
+    kVmstatPeriodMsFieldNumber = 3,
+    kVmstatCountersFieldNumber = 4,
+    kStatPeriodMsFieldNumber = 5,
+    kStatCountersFieldNumber = 6,
+    kDevfreqPeriodMsFieldNumber = 7,
+  };
+
+  SysStatsConfig();
+  ~SysStatsConfig() override;
+  SysStatsConfig(SysStatsConfig&&) noexcept;
+  SysStatsConfig& operator=(SysStatsConfig&&);
+  SysStatsConfig(const SysStatsConfig&);
+  SysStatsConfig& operator=(const SysStatsConfig&);
+  bool operator==(const SysStatsConfig&) const;
+  bool operator!=(const SysStatsConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_meminfo_period_ms() const { return _has_field_[1]; }
+  uint32_t meminfo_period_ms() const { return meminfo_period_ms_; }
+  void set_meminfo_period_ms(uint32_t value) { meminfo_period_ms_ = value; _has_field_.set(1); }
+
+  const std::vector<MeminfoCounters>& meminfo_counters() const { return meminfo_counters_; }
+  std::vector<MeminfoCounters>* mutable_meminfo_counters() { return &meminfo_counters_; }
+  int meminfo_counters_size() const { return static_cast<int>(meminfo_counters_.size()); }
+  void clear_meminfo_counters() { meminfo_counters_.clear(); }
+  void add_meminfo_counters(MeminfoCounters value) { meminfo_counters_.emplace_back(value); }
+  MeminfoCounters* add_meminfo_counters() { meminfo_counters_.emplace_back(); return &meminfo_counters_.back(); }
+
+  bool has_vmstat_period_ms() const { return _has_field_[3]; }
+  uint32_t vmstat_period_ms() const { return vmstat_period_ms_; }
+  void set_vmstat_period_ms(uint32_t value) { vmstat_period_ms_ = value; _has_field_.set(3); }
+
+  const std::vector<VmstatCounters>& vmstat_counters() const { return vmstat_counters_; }
+  std::vector<VmstatCounters>* mutable_vmstat_counters() { return &vmstat_counters_; }
+  int vmstat_counters_size() const { return static_cast<int>(vmstat_counters_.size()); }
+  void clear_vmstat_counters() { vmstat_counters_.clear(); }
+  void add_vmstat_counters(VmstatCounters value) { vmstat_counters_.emplace_back(value); }
+  VmstatCounters* add_vmstat_counters() { vmstat_counters_.emplace_back(); return &vmstat_counters_.back(); }
+
+  bool has_stat_period_ms() const { return _has_field_[5]; }
+  uint32_t stat_period_ms() const { return stat_period_ms_; }
+  void set_stat_period_ms(uint32_t value) { stat_period_ms_ = value; _has_field_.set(5); }
+
+  const std::vector<SysStatsConfig_StatCounters>& stat_counters() const { return stat_counters_; }
+  std::vector<SysStatsConfig_StatCounters>* mutable_stat_counters() { return &stat_counters_; }
+  int stat_counters_size() const { return static_cast<int>(stat_counters_.size()); }
+  void clear_stat_counters() { stat_counters_.clear(); }
+  void add_stat_counters(SysStatsConfig_StatCounters value) { stat_counters_.emplace_back(value); }
+  SysStatsConfig_StatCounters* add_stat_counters() { stat_counters_.emplace_back(); return &stat_counters_.back(); }
+
+  bool has_devfreq_period_ms() const { return _has_field_[7]; }
+  uint32_t devfreq_period_ms() const { return devfreq_period_ms_; }
+  void set_devfreq_period_ms(uint32_t value) { devfreq_period_ms_ = value; _has_field_.set(7); }
+
+ private:
+  uint32_t meminfo_period_ms_{};
+  std::vector<MeminfoCounters> meminfo_counters_;
+  uint32_t vmstat_period_ms_{};
+  std::vector<VmstatCounters> vmstat_counters_;
+  uint32_t stat_period_ms_{};
+  std::vector<SysStatsConfig_StatCounters> stat_counters_;
+  uint32_t devfreq_period_ms_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<8> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_SYS_STATS_SYS_STATS_CONFIG_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/config/sys_stats/sys_stats_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/common/sys_stats_counters.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+SysStatsConfig::SysStatsConfig() = default;
+SysStatsConfig::~SysStatsConfig() = default;
+SysStatsConfig::SysStatsConfig(const SysStatsConfig&) = default;
+SysStatsConfig& SysStatsConfig::operator=(const SysStatsConfig&) = default;
+SysStatsConfig::SysStatsConfig(SysStatsConfig&&) noexcept = default;
+SysStatsConfig& SysStatsConfig::operator=(SysStatsConfig&&) = default;
+
+bool SysStatsConfig::operator==(const SysStatsConfig& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && meminfo_period_ms_ == other.meminfo_period_ms_
+   && meminfo_counters_ == other.meminfo_counters_
+   && vmstat_period_ms_ == other.vmstat_period_ms_
+   && vmstat_counters_ == other.vmstat_counters_
+   && stat_period_ms_ == other.stat_period_ms_
+   && stat_counters_ == other.stat_counters_
+   && devfreq_period_ms_ == other.devfreq_period_ms_;
+}
+
+bool SysStatsConfig::ParseFromArray(const void* raw, size_t size) {
+  meminfo_counters_.clear();
+  vmstat_counters_.clear();
+  stat_counters_.clear();
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* meminfo_period_ms */:
+        field.get(&meminfo_period_ms_);
+        break;
+      case 2 /* meminfo_counters */:
+        meminfo_counters_.emplace_back();
+        field.get(&meminfo_counters_.back());
+        break;
+      case 3 /* vmstat_period_ms */:
+        field.get(&vmstat_period_ms_);
+        break;
+      case 4 /* vmstat_counters */:
+        vmstat_counters_.emplace_back();
+        field.get(&vmstat_counters_.back());
+        break;
+      case 5 /* stat_period_ms */:
+        field.get(&stat_period_ms_);
+        break;
+      case 6 /* stat_counters */:
+        stat_counters_.emplace_back();
+        field.get(&stat_counters_.back());
+        break;
+      case 7 /* devfreq_period_ms */:
+        field.get(&devfreq_period_ms_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string SysStatsConfig::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> SysStatsConfig::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void SysStatsConfig::Serialize(::protozero::Message* msg) const {
+  // Field 1: meminfo_period_ms
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, meminfo_period_ms_);
+  }
+
+  // Field 2: meminfo_counters
+  for (auto& it : meminfo_counters_) {
+    msg->AppendVarInt(2, it);
+  }
+
+  // Field 3: vmstat_period_ms
+  if (_has_field_[3]) {
+    msg->AppendVarInt(3, vmstat_period_ms_);
+  }
+
+  // Field 4: vmstat_counters
+  for (auto& it : vmstat_counters_) {
+    msg->AppendVarInt(4, it);
+  }
+
+  // Field 5: stat_period_ms
+  if (_has_field_[5]) {
+    msg->AppendVarInt(5, stat_period_ms_);
+  }
+
+  // Field 6: stat_counters
+  for (auto& it : stat_counters_) {
+    msg->AppendVarInt(6, it);
+  }
+
+  // Field 7: devfreq_period_ms
+  if (_has_field_[7]) {
+    msg->AppendVarInt(7, devfreq_period_ms_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/config/track_event/track_event_config.gen.cc
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/config/track_event/track_event_config.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+TrackEventConfig::TrackEventConfig() = default;
+TrackEventConfig::~TrackEventConfig() = default;
+TrackEventConfig::TrackEventConfig(const TrackEventConfig&) = default;
+TrackEventConfig& TrackEventConfig::operator=(const TrackEventConfig&) = default;
+TrackEventConfig::TrackEventConfig(TrackEventConfig&&) noexcept = default;
+TrackEventConfig& TrackEventConfig::operator=(TrackEventConfig&&) = default;
+
+bool TrackEventConfig::operator==(const TrackEventConfig& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && disabled_categories_ == other.disabled_categories_
+   && enabled_categories_ == other.enabled_categories_
+   && disabled_tags_ == other.disabled_tags_
+   && enabled_tags_ == other.enabled_tags_;
+}
+
+bool TrackEventConfig::ParseFromArray(const void* raw, size_t size) {
+  disabled_categories_.clear();
+  enabled_categories_.clear();
+  disabled_tags_.clear();
+  enabled_tags_.clear();
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* disabled_categories */:
+        disabled_categories_.emplace_back();
+        field.get(&disabled_categories_.back());
+        break;
+      case 2 /* enabled_categories */:
+        enabled_categories_.emplace_back();
+        field.get(&enabled_categories_.back());
+        break;
+      case 3 /* disabled_tags */:
+        disabled_tags_.emplace_back();
+        field.get(&disabled_tags_.back());
+        break;
+      case 4 /* enabled_tags */:
+        enabled_tags_.emplace_back();
+        field.get(&enabled_tags_.back());
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string TrackEventConfig::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> TrackEventConfig::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void TrackEventConfig::Serialize(::protozero::Message* msg) const {
+  // Field 1: disabled_categories
+  for (auto& it : disabled_categories_) {
+    msg->AppendString(1, it);
+  }
+
+  // Field 2: enabled_categories
+  for (auto& it : enabled_categories_) {
+    msg->AppendString(2, it);
+  }
+
+  // Field 3: disabled_tags
+  for (auto& it : disabled_tags_) {
+    msg->AppendString(3, it);
+  }
+
+  // Field 4: enabled_tags
+  for (auto& it : enabled_tags_) {
+    msg->AppendString(4, it);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/config/chrome/chrome_config.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/config/chrome/chrome_config.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_CHROME_CHROME_CONFIG_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_CHROME_CHROME_CONFIG_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class ChromeConfig;
+enum ChromeConfig_ClientPriority : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum ChromeConfig_ClientPriority : int {
+  ChromeConfig_ClientPriority_UNKNOWN = 0,
+  ChromeConfig_ClientPriority_BACKGROUND = 1,
+  ChromeConfig_ClientPriority_USER_INITIATED = 2,
+};
+
+class PERFETTO_EXPORT ChromeConfig : public ::protozero::CppMessageObj {
+ public:
+  using ClientPriority = ChromeConfig_ClientPriority;
+  static constexpr auto UNKNOWN = ChromeConfig_ClientPriority_UNKNOWN;
+  static constexpr auto BACKGROUND = ChromeConfig_ClientPriority_BACKGROUND;
+  static constexpr auto USER_INITIATED = ChromeConfig_ClientPriority_USER_INITIATED;
+  static constexpr auto ClientPriority_MIN = ChromeConfig_ClientPriority_UNKNOWN;
+  static constexpr auto ClientPriority_MAX = ChromeConfig_ClientPriority_USER_INITIATED;
+  enum FieldNumbers {
+    kTraceConfigFieldNumber = 1,
+    kPrivacyFilteringEnabledFieldNumber = 2,
+    kConvertToLegacyJsonFieldNumber = 3,
+    kClientPriorityFieldNumber = 4,
+    kJsonAgentLabelFilterFieldNumber = 5,
+  };
+
+  ChromeConfig();
+  ~ChromeConfig() override;
+  ChromeConfig(ChromeConfig&&) noexcept;
+  ChromeConfig& operator=(ChromeConfig&&);
+  ChromeConfig(const ChromeConfig&);
+  ChromeConfig& operator=(const ChromeConfig&);
+  bool operator==(const ChromeConfig&) const;
+  bool operator!=(const ChromeConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_trace_config() const { return _has_field_[1]; }
+  const std::string& trace_config() const { return trace_config_; }
+  void set_trace_config(const std::string& value) { trace_config_ = value; _has_field_.set(1); }
+
+  bool has_privacy_filtering_enabled() const { return _has_field_[2]; }
+  bool privacy_filtering_enabled() const { return privacy_filtering_enabled_; }
+  void set_privacy_filtering_enabled(bool value) { privacy_filtering_enabled_ = value; _has_field_.set(2); }
+
+  bool has_convert_to_legacy_json() const { return _has_field_[3]; }
+  bool convert_to_legacy_json() const { return convert_to_legacy_json_; }
+  void set_convert_to_legacy_json(bool value) { convert_to_legacy_json_ = value; _has_field_.set(3); }
+
+  bool has_client_priority() const { return _has_field_[4]; }
+  ChromeConfig_ClientPriority client_priority() const { return client_priority_; }
+  void set_client_priority(ChromeConfig_ClientPriority value) { client_priority_ = value; _has_field_.set(4); }
+
+  bool has_json_agent_label_filter() const { return _has_field_[5]; }
+  const std::string& json_agent_label_filter() const { return json_agent_label_filter_; }
+  void set_json_agent_label_filter(const std::string& value) { json_agent_label_filter_ = value; _has_field_.set(5); }
+
+ private:
+  std::string trace_config_{};
+  bool privacy_filtering_enabled_{};
+  bool convert_to_legacy_json_{};
+  ChromeConfig_ClientPriority client_priority_{};
+  std::string json_agent_label_filter_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<6> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_CHROME_CHROME_CONFIG_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/config/chrome/chrome_config.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+ChromeConfig::ChromeConfig() = default;
+ChromeConfig::~ChromeConfig() = default;
+ChromeConfig::ChromeConfig(const ChromeConfig&) = default;
+ChromeConfig& ChromeConfig::operator=(const ChromeConfig&) = default;
+ChromeConfig::ChromeConfig(ChromeConfig&&) noexcept = default;
+ChromeConfig& ChromeConfig::operator=(ChromeConfig&&) = default;
+
+bool ChromeConfig::operator==(const ChromeConfig& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && trace_config_ == other.trace_config_
+   && privacy_filtering_enabled_ == other.privacy_filtering_enabled_
+   && convert_to_legacy_json_ == other.convert_to_legacy_json_
+   && client_priority_ == other.client_priority_
+   && json_agent_label_filter_ == other.json_agent_label_filter_;
+}
+
+bool ChromeConfig::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* trace_config */:
+        field.get(&trace_config_);
+        break;
+      case 2 /* privacy_filtering_enabled */:
+        field.get(&privacy_filtering_enabled_);
+        break;
+      case 3 /* convert_to_legacy_json */:
+        field.get(&convert_to_legacy_json_);
+        break;
+      case 4 /* client_priority */:
+        field.get(&client_priority_);
+        break;
+      case 5 /* json_agent_label_filter */:
+        field.get(&json_agent_label_filter_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string ChromeConfig::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> ChromeConfig::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void ChromeConfig::Serialize(::protozero::Message* msg) const {
+  // Field 1: trace_config
+  if (_has_field_[1]) {
+    msg->AppendString(1, trace_config_);
+  }
+
+  // Field 2: privacy_filtering_enabled
+  if (_has_field_[2]) {
+    msg->AppendTinyVarInt(2, privacy_filtering_enabled_);
+  }
+
+  // Field 3: convert_to_legacy_json
+  if (_has_field_[3]) {
+    msg->AppendTinyVarInt(3, convert_to_legacy_json_);
+  }
+
+  // Field 4: client_priority
+  if (_has_field_[4]) {
+    msg->AppendVarInt(4, client_priority_);
+  }
+
+  // Field 5: json_agent_label_filter
+  if (_has_field_[5]) {
+    msg->AppendString(5, json_agent_label_filter_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/config/data_source_config.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/config/test_config.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_TEST_CONFIG_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_TEST_CONFIG_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class TestConfig;
+class TestConfig_DummyFields;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT TestConfig : public ::protozero::CppMessageObj {
+ public:
+  using DummyFields = TestConfig_DummyFields;
+  enum FieldNumbers {
+    kMessageCountFieldNumber = 1,
+    kMaxMessagesPerSecondFieldNumber = 2,
+    kSeedFieldNumber = 3,
+    kMessageSizeFieldNumber = 4,
+    kSendBatchOnRegisterFieldNumber = 5,
+    kDummyFieldsFieldNumber = 6,
+  };
+
+  TestConfig();
+  ~TestConfig() override;
+  TestConfig(TestConfig&&) noexcept;
+  TestConfig& operator=(TestConfig&&);
+  TestConfig(const TestConfig&);
+  TestConfig& operator=(const TestConfig&);
+  bool operator==(const TestConfig&) const;
+  bool operator!=(const TestConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_message_count() const { return _has_field_[1]; }
+  uint32_t message_count() const { return message_count_; }
+  void set_message_count(uint32_t value) { message_count_ = value; _has_field_.set(1); }
+
+  bool has_max_messages_per_second() const { return _has_field_[2]; }
+  uint32_t max_messages_per_second() const { return max_messages_per_second_; }
+  void set_max_messages_per_second(uint32_t value) { max_messages_per_second_ = value; _has_field_.set(2); }
+
+  bool has_seed() const { return _has_field_[3]; }
+  uint32_t seed() const { return seed_; }
+  void set_seed(uint32_t value) { seed_ = value; _has_field_.set(3); }
+
+  bool has_message_size() const { return _has_field_[4]; }
+  uint32_t message_size() const { return message_size_; }
+  void set_message_size(uint32_t value) { message_size_ = value; _has_field_.set(4); }
+
+  bool has_send_batch_on_register() const { return _has_field_[5]; }
+  bool send_batch_on_register() const { return send_batch_on_register_; }
+  void set_send_batch_on_register(bool value) { send_batch_on_register_ = value; _has_field_.set(5); }
+
+  bool has_dummy_fields() const { return _has_field_[6]; }
+  const TestConfig_DummyFields& dummy_fields() const { return *dummy_fields_; }
+  TestConfig_DummyFields* mutable_dummy_fields() { _has_field_.set(6); return dummy_fields_.get(); }
+
+ private:
+  uint32_t message_count_{};
+  uint32_t max_messages_per_second_{};
+  uint32_t seed_{};
+  uint32_t message_size_{};
+  bool send_batch_on_register_{};
+  ::protozero::CopyablePtr<TestConfig_DummyFields> dummy_fields_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<7> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT TestConfig_DummyFields : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kFieldUint32FieldNumber = 1,
+    kFieldInt32FieldNumber = 2,
+    kFieldUint64FieldNumber = 3,
+    kFieldInt64FieldNumber = 4,
+    kFieldFixed64FieldNumber = 5,
+    kFieldSfixed64FieldNumber = 6,
+    kFieldFixed32FieldNumber = 7,
+    kFieldSfixed32FieldNumber = 8,
+    kFieldDoubleFieldNumber = 9,
+    kFieldFloatFieldNumber = 10,
+    kFieldSint64FieldNumber = 11,
+    kFieldSint32FieldNumber = 12,
+    kFieldStringFieldNumber = 13,
+    kFieldBytesFieldNumber = 14,
+  };
+
+  TestConfig_DummyFields();
+  ~TestConfig_DummyFields() override;
+  TestConfig_DummyFields(TestConfig_DummyFields&&) noexcept;
+  TestConfig_DummyFields& operator=(TestConfig_DummyFields&&);
+  TestConfig_DummyFields(const TestConfig_DummyFields&);
+  TestConfig_DummyFields& operator=(const TestConfig_DummyFields&);
+  bool operator==(const TestConfig_DummyFields&) const;
+  bool operator!=(const TestConfig_DummyFields& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_field_uint32() const { return _has_field_[1]; }
+  uint32_t field_uint32() const { return field_uint32_; }
+  void set_field_uint32(uint32_t value) { field_uint32_ = value; _has_field_.set(1); }
+
+  bool has_field_int32() const { return _has_field_[2]; }
+  int32_t field_int32() const { return field_int32_; }
+  void set_field_int32(int32_t value) { field_int32_ = value; _has_field_.set(2); }
+
+  bool has_field_uint64() const { return _has_field_[3]; }
+  uint64_t field_uint64() const { return field_uint64_; }
+  void set_field_uint64(uint64_t value) { field_uint64_ = value; _has_field_.set(3); }
+
+  bool has_field_int64() const { return _has_field_[4]; }
+  int64_t field_int64() const { return field_int64_; }
+  void set_field_int64(int64_t value) { field_int64_ = value; _has_field_.set(4); }
+
+  bool has_field_fixed64() const { return _has_field_[5]; }
+  uint64_t field_fixed64() const { return field_fixed64_; }
+  void set_field_fixed64(uint64_t value) { field_fixed64_ = value; _has_field_.set(5); }
+
+  bool has_field_sfixed64() const { return _has_field_[6]; }
+  int64_t field_sfixed64() const { return field_sfixed64_; }
+  void set_field_sfixed64(int64_t value) { field_sfixed64_ = value; _has_field_.set(6); }
+
+  bool has_field_fixed32() const { return _has_field_[7]; }
+  uint32_t field_fixed32() const { return field_fixed32_; }
+  void set_field_fixed32(uint32_t value) { field_fixed32_ = value; _has_field_.set(7); }
+
+  bool has_field_sfixed32() const { return _has_field_[8]; }
+  int32_t field_sfixed32() const { return field_sfixed32_; }
+  void set_field_sfixed32(int32_t value) { field_sfixed32_ = value; _has_field_.set(8); }
+
+  bool has_field_double() const { return _has_field_[9]; }
+  double field_double() const { return field_double_; }
+  void set_field_double(double value) { field_double_ = value; _has_field_.set(9); }
+
+  bool has_field_float() const { return _has_field_[10]; }
+  float field_float() const { return field_float_; }
+  void set_field_float(float value) { field_float_ = value; _has_field_.set(10); }
+
+  bool has_field_sint64() const { return _has_field_[11]; }
+  int64_t field_sint64() const { return field_sint64_; }
+  void set_field_sint64(int64_t value) { field_sint64_ = value; _has_field_.set(11); }
+
+  bool has_field_sint32() const { return _has_field_[12]; }
+  int32_t field_sint32() const { return field_sint32_; }
+  void set_field_sint32(int32_t value) { field_sint32_ = value; _has_field_.set(12); }
+
+  bool has_field_string() const { return _has_field_[13]; }
+  const std::string& field_string() const { return field_string_; }
+  void set_field_string(const std::string& value) { field_string_ = value; _has_field_.set(13); }
+
+  bool has_field_bytes() const { return _has_field_[14]; }
+  const std::string& field_bytes() const { return field_bytes_; }
+  void set_field_bytes(const std::string& value) { field_bytes_ = value; _has_field_.set(14); }
+  void set_field_bytes(const void* p, size_t s) { field_bytes_.assign(reinterpret_cast<const char*>(p), s); _has_field_.set(14); }
+
+ private:
+  uint32_t field_uint32_{};
+  int32_t field_int32_{};
+  uint64_t field_uint64_{};
+  int64_t field_int64_{};
+  uint64_t field_fixed64_{};
+  int64_t field_sfixed64_{};
+  uint32_t field_fixed32_{};
+  int32_t field_sfixed32_{};
+  double field_double_{};
+  float field_float_{};
+  int64_t field_sint64_{};
+  int32_t field_sint32_{};
+  std::string field_string_{};
+  std::string field_bytes_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<15> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_TEST_CONFIG_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/config/interceptor_config.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_INTERCEPTOR_CONFIG_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_INTERCEPTOR_CONFIG_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class InterceptorConfig;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT InterceptorConfig : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kNameFieldNumber = 1,
+    kConsoleConfigFieldNumber = 100,
+  };
+
+  InterceptorConfig();
+  ~InterceptorConfig() override;
+  InterceptorConfig(InterceptorConfig&&) noexcept;
+  InterceptorConfig& operator=(InterceptorConfig&&);
+  InterceptorConfig(const InterceptorConfig&);
+  InterceptorConfig& operator=(const InterceptorConfig&);
+  bool operator==(const InterceptorConfig&) const;
+  bool operator!=(const InterceptorConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_name() const { return _has_field_[1]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(1); }
+
+  const std::string& console_config_raw() const { return console_config_; }
+  void set_console_config_raw(const std::string& raw) { console_config_ = raw; _has_field_.set(100); }
+
+ private:
+  std::string name_{};
+  std::string console_config_;  // [lazy=true]
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<101> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_INTERCEPTOR_CONFIG_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/config/data_source_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/test_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/interceptor_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/interceptors/console_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/chrome/chrome_config.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+DataSourceConfig::DataSourceConfig() = default;
+DataSourceConfig::~DataSourceConfig() = default;
+DataSourceConfig::DataSourceConfig(const DataSourceConfig&) = default;
+DataSourceConfig& DataSourceConfig::operator=(const DataSourceConfig&) = default;
+DataSourceConfig::DataSourceConfig(DataSourceConfig&&) noexcept = default;
+DataSourceConfig& DataSourceConfig::operator=(DataSourceConfig&&) = default;
+
+bool DataSourceConfig::operator==(const DataSourceConfig& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && name_ == other.name_
+   && target_buffer_ == other.target_buffer_
+   && trace_duration_ms_ == other.trace_duration_ms_
+   && stop_timeout_ms_ == other.stop_timeout_ms_
+   && enable_extra_guardrails_ == other.enable_extra_guardrails_
+   && session_initiator_ == other.session_initiator_
+   && tracing_session_id_ == other.tracing_session_id_
+   && ftrace_config_ == other.ftrace_config_
+   && inode_file_config_ == other.inode_file_config_
+   && process_stats_config_ == other.process_stats_config_
+   && sys_stats_config_ == other.sys_stats_config_
+   && heapprofd_config_ == other.heapprofd_config_
+   && java_hprof_config_ == other.java_hprof_config_
+   && android_power_config_ == other.android_power_config_
+   && android_log_config_ == other.android_log_config_
+   && gpu_counter_config_ == other.gpu_counter_config_
+   && packages_list_config_ == other.packages_list_config_
+   && perf_event_config_ == other.perf_event_config_
+   && vulkan_memory_config_ == other.vulkan_memory_config_
+   && track_event_config_ == other.track_event_config_
+   && android_polled_state_config_ == other.android_polled_state_config_
+   && chrome_config_ == other.chrome_config_
+   && interceptor_config_ == other.interceptor_config_
+   && legacy_config_ == other.legacy_config_
+   && for_testing_ == other.for_testing_;
+}
+
+bool DataSourceConfig::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* name */:
+        field.get(&name_);
+        break;
+      case 2 /* target_buffer */:
+        field.get(&target_buffer_);
+        break;
+      case 3 /* trace_duration_ms */:
+        field.get(&trace_duration_ms_);
+        break;
+      case 7 /* stop_timeout_ms */:
+        field.get(&stop_timeout_ms_);
+        break;
+      case 6 /* enable_extra_guardrails */:
+        field.get(&enable_extra_guardrails_);
+        break;
+      case 8 /* session_initiator */:
+        field.get(&session_initiator_);
+        break;
+      case 4 /* tracing_session_id */:
+        field.get(&tracing_session_id_);
+        break;
+      case 100 /* ftrace_config */:
+        ftrace_config_ = field.as_std_string();
+        break;
+      case 102 /* inode_file_config */:
+        inode_file_config_ = field.as_std_string();
+        break;
+      case 103 /* process_stats_config */:
+        process_stats_config_ = field.as_std_string();
+        break;
+      case 104 /* sys_stats_config */:
+        sys_stats_config_ = field.as_std_string();
+        break;
+      case 105 /* heapprofd_config */:
+        heapprofd_config_ = field.as_std_string();
+        break;
+      case 110 /* java_hprof_config */:
+        java_hprof_config_ = field.as_std_string();
+        break;
+      case 106 /* android_power_config */:
+        android_power_config_ = field.as_std_string();
+        break;
+      case 107 /* android_log_config */:
+        android_log_config_ = field.as_std_string();
+        break;
+      case 108 /* gpu_counter_config */:
+        gpu_counter_config_ = field.as_std_string();
+        break;
+      case 109 /* packages_list_config */:
+        packages_list_config_ = field.as_std_string();
+        break;
+      case 111 /* perf_event_config */:
+        perf_event_config_ = field.as_std_string();
+        break;
+      case 112 /* vulkan_memory_config */:
+        vulkan_memory_config_ = field.as_std_string();
+        break;
+      case 113 /* track_event_config */:
+        track_event_config_ = field.as_std_string();
+        break;
+      case 114 /* android_polled_state_config */:
+        android_polled_state_config_ = field.as_std_string();
+        break;
+      case 101 /* chrome_config */:
+        (*chrome_config_).ParseFromArray(field.data(), field.size());
+        break;
+      case 115 /* interceptor_config */:
+        (*interceptor_config_).ParseFromArray(field.data(), field.size());
+        break;
+      case 1000 /* legacy_config */:
+        field.get(&legacy_config_);
+        break;
+      case 1001 /* for_testing */:
+        (*for_testing_).ParseFromArray(field.data(), field.size());
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string DataSourceConfig::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> DataSourceConfig::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void DataSourceConfig::Serialize(::protozero::Message* msg) const {
+  // Field 1: name
+  if (_has_field_[1]) {
+    msg->AppendString(1, name_);
+  }
+
+  // Field 2: target_buffer
+  if (_has_field_[2]) {
+    msg->AppendVarInt(2, target_buffer_);
+  }
+
+  // Field 3: trace_duration_ms
+  if (_has_field_[3]) {
+    msg->AppendVarInt(3, trace_duration_ms_);
+  }
+
+  // Field 7: stop_timeout_ms
+  if (_has_field_[7]) {
+    msg->AppendVarInt(7, stop_timeout_ms_);
+  }
+
+  // Field 6: enable_extra_guardrails
+  if (_has_field_[6]) {
+    msg->AppendTinyVarInt(6, enable_extra_guardrails_);
+  }
+
+  // Field 8: session_initiator
+  if (_has_field_[8]) {
+    msg->AppendVarInt(8, session_initiator_);
+  }
+
+  // Field 4: tracing_session_id
+  if (_has_field_[4]) {
+    msg->AppendVarInt(4, tracing_session_id_);
+  }
+
+  // Field 100: ftrace_config
+  if (_has_field_[100]) {
+    msg->AppendString(100, ftrace_config_);
+  }
+
+  // Field 102: inode_file_config
+  if (_has_field_[102]) {
+    msg->AppendString(102, inode_file_config_);
+  }
+
+  // Field 103: process_stats_config
+  if (_has_field_[103]) {
+    msg->AppendString(103, process_stats_config_);
+  }
+
+  // Field 104: sys_stats_config
+  if (_has_field_[104]) {
+    msg->AppendString(104, sys_stats_config_);
+  }
+
+  // Field 105: heapprofd_config
+  if (_has_field_[105]) {
+    msg->AppendString(105, heapprofd_config_);
+  }
+
+  // Field 110: java_hprof_config
+  if (_has_field_[110]) {
+    msg->AppendString(110, java_hprof_config_);
+  }
+
+  // Field 106: android_power_config
+  if (_has_field_[106]) {
+    msg->AppendString(106, android_power_config_);
+  }
+
+  // Field 107: android_log_config
+  if (_has_field_[107]) {
+    msg->AppendString(107, android_log_config_);
+  }
+
+  // Field 108: gpu_counter_config
+  if (_has_field_[108]) {
+    msg->AppendString(108, gpu_counter_config_);
+  }
+
+  // Field 109: packages_list_config
+  if (_has_field_[109]) {
+    msg->AppendString(109, packages_list_config_);
+  }
+
+  // Field 111: perf_event_config
+  if (_has_field_[111]) {
+    msg->AppendString(111, perf_event_config_);
+  }
+
+  // Field 112: vulkan_memory_config
+  if (_has_field_[112]) {
+    msg->AppendString(112, vulkan_memory_config_);
+  }
+
+  // Field 113: track_event_config
+  if (_has_field_[113]) {
+    msg->AppendString(113, track_event_config_);
+  }
+
+  // Field 114: android_polled_state_config
+  if (_has_field_[114]) {
+    msg->AppendString(114, android_polled_state_config_);
+  }
+
+  // Field 101: chrome_config
+  if (_has_field_[101]) {
+    (*chrome_config_).Serialize(msg->BeginNestedMessage<::protozero::Message>(101));
+  }
+
+  // Field 115: interceptor_config
+  if (_has_field_[115]) {
+    (*interceptor_config_).Serialize(msg->BeginNestedMessage<::protozero::Message>(115));
+  }
+
+  // Field 1000: legacy_config
+  if (_has_field_[1000]) {
+    msg->AppendString(1000, legacy_config_);
+  }
+
+  // Field 1001: for_testing
+  if (_has_field_[1001]) {
+    (*for_testing_).Serialize(msg->BeginNestedMessage<::protozero::Message>(1001));
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/config/interceptor_config.gen.cc
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/config/interceptor_config.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+InterceptorConfig::InterceptorConfig() = default;
+InterceptorConfig::~InterceptorConfig() = default;
+InterceptorConfig::InterceptorConfig(const InterceptorConfig&) = default;
+InterceptorConfig& InterceptorConfig::operator=(const InterceptorConfig&) = default;
+InterceptorConfig::InterceptorConfig(InterceptorConfig&&) noexcept = default;
+InterceptorConfig& InterceptorConfig::operator=(InterceptorConfig&&) = default;
+
+bool InterceptorConfig::operator==(const InterceptorConfig& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && name_ == other.name_
+   && console_config_ == other.console_config_;
+}
+
+bool InterceptorConfig::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* name */:
+        field.get(&name_);
+        break;
+      case 100 /* console_config */:
+        console_config_ = field.as_std_string();
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string InterceptorConfig::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> InterceptorConfig::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void InterceptorConfig::Serialize(::protozero::Message* msg) const {
+  // Field 1: name
+  if (_has_field_[1]) {
+    msg->AppendString(1, name_);
+  }
+
+  // Field 100: console_config
+  if (_has_field_[100]) {
+    msg->AppendString(100, console_config_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/config/stress_test_config.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/config/stress_test_config.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_STRESS_TEST_CONFIG_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_STRESS_TEST_CONFIG_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class StressTestConfig;
+class StressTestConfig_WriterTiming;
+class TraceConfig;
+class TraceConfig_TraceFilter;
+class TraceConfig_IncidentReportConfig;
+class TraceConfig_IncrementalStateConfig;
+class TraceConfig_TriggerConfig;
+class TraceConfig_TriggerConfig_Trigger;
+class TraceConfig_GuardrailOverrides;
+class TraceConfig_StatsdMetadata;
+class TraceConfig_ProducerConfig;
+class TraceConfig_BuiltinDataSource;
+class TraceConfig_DataSource;
+class DataSourceConfig;
+class TestConfig;
+class TestConfig_DummyFields;
+class InterceptorConfig;
+class ChromeConfig;
+class TraceConfig_BufferConfig;
+enum TraceConfig_LockdownModeOperation : int;
+enum TraceConfig_CompressionType : int;
+enum TraceConfig_StatsdLogging : int;
+enum TraceConfig_TriggerConfig_TriggerMode : int;
+enum BuiltinClock : int;
+enum DataSourceConfig_SessionInitiator : int;
+enum ChromeConfig_ClientPriority : int;
+enum TraceConfig_BufferConfig_FillPolicy : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT StressTestConfig : public ::protozero::CppMessageObj {
+ public:
+  using WriterTiming = StressTestConfig_WriterTiming;
+  enum FieldNumbers {
+    kTraceConfigFieldNumber = 1,
+    kShmemSizeKbFieldNumber = 2,
+    kShmemPageSizeKbFieldNumber = 3,
+    kNumProcessesFieldNumber = 4,
+    kNumThreadsFieldNumber = 5,
+    kMaxEventsFieldNumber = 6,
+    kNestingFieldNumber = 7,
+    kSteadyStateTimingsFieldNumber = 8,
+    kBurstPeriodMsFieldNumber = 9,
+    kBurstDurationMsFieldNumber = 10,
+    kBurstTimingsFieldNumber = 11,
+  };
+
+  StressTestConfig();
+  ~StressTestConfig() override;
+  StressTestConfig(StressTestConfig&&) noexcept;
+  StressTestConfig& operator=(StressTestConfig&&);
+  StressTestConfig(const StressTestConfig&);
+  StressTestConfig& operator=(const StressTestConfig&);
+  bool operator==(const StressTestConfig&) const;
+  bool operator!=(const StressTestConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_trace_config() const { return _has_field_[1]; }
+  const TraceConfig& trace_config() const { return *trace_config_; }
+  TraceConfig* mutable_trace_config() { _has_field_.set(1); return trace_config_.get(); }
+
+  bool has_shmem_size_kb() const { return _has_field_[2]; }
+  uint32_t shmem_size_kb() const { return shmem_size_kb_; }
+  void set_shmem_size_kb(uint32_t value) { shmem_size_kb_ = value; _has_field_.set(2); }
+
+  bool has_shmem_page_size_kb() const { return _has_field_[3]; }
+  uint32_t shmem_page_size_kb() const { return shmem_page_size_kb_; }
+  void set_shmem_page_size_kb(uint32_t value) { shmem_page_size_kb_ = value; _has_field_.set(3); }
+
+  bool has_num_processes() const { return _has_field_[4]; }
+  uint32_t num_processes() const { return num_processes_; }
+  void set_num_processes(uint32_t value) { num_processes_ = value; _has_field_.set(4); }
+
+  bool has_num_threads() const { return _has_field_[5]; }
+  uint32_t num_threads() const { return num_threads_; }
+  void set_num_threads(uint32_t value) { num_threads_ = value; _has_field_.set(5); }
+
+  bool has_max_events() const { return _has_field_[6]; }
+  uint32_t max_events() const { return max_events_; }
+  void set_max_events(uint32_t value) { max_events_ = value; _has_field_.set(6); }
+
+  bool has_nesting() const { return _has_field_[7]; }
+  uint32_t nesting() const { return nesting_; }
+  void set_nesting(uint32_t value) { nesting_ = value; _has_field_.set(7); }
+
+  bool has_steady_state_timings() const { return _has_field_[8]; }
+  const StressTestConfig_WriterTiming& steady_state_timings() const { return *steady_state_timings_; }
+  StressTestConfig_WriterTiming* mutable_steady_state_timings() { _has_field_.set(8); return steady_state_timings_.get(); }
+
+  bool has_burst_period_ms() const { return _has_field_[9]; }
+  uint32_t burst_period_ms() const { return burst_period_ms_; }
+  void set_burst_period_ms(uint32_t value) { burst_period_ms_ = value; _has_field_.set(9); }
+
+  bool has_burst_duration_ms() const { return _has_field_[10]; }
+  uint32_t burst_duration_ms() const { return burst_duration_ms_; }
+  void set_burst_duration_ms(uint32_t value) { burst_duration_ms_ = value; _has_field_.set(10); }
+
+  bool has_burst_timings() const { return _has_field_[11]; }
+  const StressTestConfig_WriterTiming& burst_timings() const { return *burst_timings_; }
+  StressTestConfig_WriterTiming* mutable_burst_timings() { _has_field_.set(11); return burst_timings_.get(); }
+
+ private:
+  ::protozero::CopyablePtr<TraceConfig> trace_config_;
+  uint32_t shmem_size_kb_{};
+  uint32_t shmem_page_size_kb_{};
+  uint32_t num_processes_{};
+  uint32_t num_threads_{};
+  uint32_t max_events_{};
+  uint32_t nesting_{};
+  ::protozero::CopyablePtr<StressTestConfig_WriterTiming> steady_state_timings_;
+  uint32_t burst_period_ms_{};
+  uint32_t burst_duration_ms_{};
+  ::protozero::CopyablePtr<StressTestConfig_WriterTiming> burst_timings_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<12> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT StressTestConfig_WriterTiming : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kPayloadMeanFieldNumber = 1,
+    kPayloadStddevFieldNumber = 2,
+    kRateMeanFieldNumber = 3,
+    kRateStddevFieldNumber = 4,
+    kPayloadWriteTimeMsFieldNumber = 5,
+  };
+
+  StressTestConfig_WriterTiming();
+  ~StressTestConfig_WriterTiming() override;
+  StressTestConfig_WriterTiming(StressTestConfig_WriterTiming&&) noexcept;
+  StressTestConfig_WriterTiming& operator=(StressTestConfig_WriterTiming&&);
+  StressTestConfig_WriterTiming(const StressTestConfig_WriterTiming&);
+  StressTestConfig_WriterTiming& operator=(const StressTestConfig_WriterTiming&);
+  bool operator==(const StressTestConfig_WriterTiming&) const;
+  bool operator!=(const StressTestConfig_WriterTiming& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_payload_mean() const { return _has_field_[1]; }
+  double payload_mean() const { return payload_mean_; }
+  void set_payload_mean(double value) { payload_mean_ = value; _has_field_.set(1); }
+
+  bool has_payload_stddev() const { return _has_field_[2]; }
+  double payload_stddev() const { return payload_stddev_; }
+  void set_payload_stddev(double value) { payload_stddev_ = value; _has_field_.set(2); }
+
+  bool has_rate_mean() const { return _has_field_[3]; }
+  double rate_mean() const { return rate_mean_; }
+  void set_rate_mean(double value) { rate_mean_ = value; _has_field_.set(3); }
+
+  bool has_rate_stddev() const { return _has_field_[4]; }
+  double rate_stddev() const { return rate_stddev_; }
+  void set_rate_stddev(double value) { rate_stddev_ = value; _has_field_.set(4); }
+
+  bool has_payload_write_time_ms() const { return _has_field_[5]; }
+  uint32_t payload_write_time_ms() const { return payload_write_time_ms_; }
+  void set_payload_write_time_ms(uint32_t value) { payload_write_time_ms_ = value; _has_field_.set(5); }
+
+ private:
+  double payload_mean_{};
+  double payload_stddev_{};
+  double rate_mean_{};
+  double rate_stddev_{};
+  uint32_t payload_write_time_ms_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<6> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_STRESS_TEST_CONFIG_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/config/stress_test_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/trace_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/common/builtin_clock.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/data_source_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/track_event/track_event_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/test_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/sys_stats/sys_stats_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/common/sys_stats_counters.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/profiling/perf_event_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/common/perf_events.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/profiling/java_hprof_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/profiling/heapprofd_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/process_stats/process_stats_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/power/android_power_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/interceptor_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/interceptors/console_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/inode_file/inode_file_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/gpu/vulkan_memory_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/gpu/gpu_counter_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/ftrace/ftrace_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/chrome/chrome_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/android/packages_list_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_polled_state_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_log_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/common/android_log_constants.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+StressTestConfig::StressTestConfig() = default;
+StressTestConfig::~StressTestConfig() = default;
+StressTestConfig::StressTestConfig(const StressTestConfig&) = default;
+StressTestConfig& StressTestConfig::operator=(const StressTestConfig&) = default;
+StressTestConfig::StressTestConfig(StressTestConfig&&) noexcept = default;
+StressTestConfig& StressTestConfig::operator=(StressTestConfig&&) = default;
+
+bool StressTestConfig::operator==(const StressTestConfig& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && trace_config_ == other.trace_config_
+   && shmem_size_kb_ == other.shmem_size_kb_
+   && shmem_page_size_kb_ == other.shmem_page_size_kb_
+   && num_processes_ == other.num_processes_
+   && num_threads_ == other.num_threads_
+   && max_events_ == other.max_events_
+   && nesting_ == other.nesting_
+   && steady_state_timings_ == other.steady_state_timings_
+   && burst_period_ms_ == other.burst_period_ms_
+   && burst_duration_ms_ == other.burst_duration_ms_
+   && burst_timings_ == other.burst_timings_;
+}
+
+bool StressTestConfig::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* trace_config */:
+        (*trace_config_).ParseFromArray(field.data(), field.size());
+        break;
+      case 2 /* shmem_size_kb */:
+        field.get(&shmem_size_kb_);
+        break;
+      case 3 /* shmem_page_size_kb */:
+        field.get(&shmem_page_size_kb_);
+        break;
+      case 4 /* num_processes */:
+        field.get(&num_processes_);
+        break;
+      case 5 /* num_threads */:
+        field.get(&num_threads_);
+        break;
+      case 6 /* max_events */:
+        field.get(&max_events_);
+        break;
+      case 7 /* nesting */:
+        field.get(&nesting_);
+        break;
+      case 8 /* steady_state_timings */:
+        (*steady_state_timings_).ParseFromArray(field.data(), field.size());
+        break;
+      case 9 /* burst_period_ms */:
+        field.get(&burst_period_ms_);
+        break;
+      case 10 /* burst_duration_ms */:
+        field.get(&burst_duration_ms_);
+        break;
+      case 11 /* burst_timings */:
+        (*burst_timings_).ParseFromArray(field.data(), field.size());
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string StressTestConfig::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> StressTestConfig::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void StressTestConfig::Serialize(::protozero::Message* msg) const {
+  // Field 1: trace_config
+  if (_has_field_[1]) {
+    (*trace_config_).Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
+  }
+
+  // Field 2: shmem_size_kb
+  if (_has_field_[2]) {
+    msg->AppendVarInt(2, shmem_size_kb_);
+  }
+
+  // Field 3: shmem_page_size_kb
+  if (_has_field_[3]) {
+    msg->AppendVarInt(3, shmem_page_size_kb_);
+  }
+
+  // Field 4: num_processes
+  if (_has_field_[4]) {
+    msg->AppendVarInt(4, num_processes_);
+  }
+
+  // Field 5: num_threads
+  if (_has_field_[5]) {
+    msg->AppendVarInt(5, num_threads_);
+  }
+
+  // Field 6: max_events
+  if (_has_field_[6]) {
+    msg->AppendVarInt(6, max_events_);
+  }
+
+  // Field 7: nesting
+  if (_has_field_[7]) {
+    msg->AppendVarInt(7, nesting_);
+  }
+
+  // Field 8: steady_state_timings
+  if (_has_field_[8]) {
+    (*steady_state_timings_).Serialize(msg->BeginNestedMessage<::protozero::Message>(8));
+  }
+
+  // Field 9: burst_period_ms
+  if (_has_field_[9]) {
+    msg->AppendVarInt(9, burst_period_ms_);
+  }
+
+  // Field 10: burst_duration_ms
+  if (_has_field_[10]) {
+    msg->AppendVarInt(10, burst_duration_ms_);
+  }
+
+  // Field 11: burst_timings
+  if (_has_field_[11]) {
+    (*burst_timings_).Serialize(msg->BeginNestedMessage<::protozero::Message>(11));
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+StressTestConfig_WriterTiming::StressTestConfig_WriterTiming() = default;
+StressTestConfig_WriterTiming::~StressTestConfig_WriterTiming() = default;
+StressTestConfig_WriterTiming::StressTestConfig_WriterTiming(const StressTestConfig_WriterTiming&) = default;
+StressTestConfig_WriterTiming& StressTestConfig_WriterTiming::operator=(const StressTestConfig_WriterTiming&) = default;
+StressTestConfig_WriterTiming::StressTestConfig_WriterTiming(StressTestConfig_WriterTiming&&) noexcept = default;
+StressTestConfig_WriterTiming& StressTestConfig_WriterTiming::operator=(StressTestConfig_WriterTiming&&) = default;
+
+bool StressTestConfig_WriterTiming::operator==(const StressTestConfig_WriterTiming& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && payload_mean_ == other.payload_mean_
+   && payload_stddev_ == other.payload_stddev_
+   && rate_mean_ == other.rate_mean_
+   && rate_stddev_ == other.rate_stddev_
+   && payload_write_time_ms_ == other.payload_write_time_ms_;
+}
+
+bool StressTestConfig_WriterTiming::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* payload_mean */:
+        field.get(&payload_mean_);
+        break;
+      case 2 /* payload_stddev */:
+        field.get(&payload_stddev_);
+        break;
+      case 3 /* rate_mean */:
+        field.get(&rate_mean_);
+        break;
+      case 4 /* rate_stddev */:
+        field.get(&rate_stddev_);
+        break;
+      case 5 /* payload_write_time_ms */:
+        field.get(&payload_write_time_ms_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string StressTestConfig_WriterTiming::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> StressTestConfig_WriterTiming::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void StressTestConfig_WriterTiming::Serialize(::protozero::Message* msg) const {
+  // Field 1: payload_mean
+  if (_has_field_[1]) {
+    msg->AppendFixed(1, payload_mean_);
+  }
+
+  // Field 2: payload_stddev
+  if (_has_field_[2]) {
+    msg->AppendFixed(2, payload_stddev_);
+  }
+
+  // Field 3: rate_mean
+  if (_has_field_[3]) {
+    msg->AppendFixed(3, rate_mean_);
+  }
+
+  // Field 4: rate_stddev
+  if (_has_field_[4]) {
+    msg->AppendFixed(4, rate_stddev_);
+  }
+
+  // Field 5: payload_write_time_ms
+  if (_has_field_[5]) {
+    msg->AppendVarInt(5, payload_write_time_ms_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/config/test_config.gen.cc
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/config/test_config.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+TestConfig::TestConfig() = default;
+TestConfig::~TestConfig() = default;
+TestConfig::TestConfig(const TestConfig&) = default;
+TestConfig& TestConfig::operator=(const TestConfig&) = default;
+TestConfig::TestConfig(TestConfig&&) noexcept = default;
+TestConfig& TestConfig::operator=(TestConfig&&) = default;
+
+bool TestConfig::operator==(const TestConfig& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && message_count_ == other.message_count_
+   && max_messages_per_second_ == other.max_messages_per_second_
+   && seed_ == other.seed_
+   && message_size_ == other.message_size_
+   && send_batch_on_register_ == other.send_batch_on_register_
+   && dummy_fields_ == other.dummy_fields_;
+}
+
+bool TestConfig::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* message_count */:
+        field.get(&message_count_);
+        break;
+      case 2 /* max_messages_per_second */:
+        field.get(&max_messages_per_second_);
+        break;
+      case 3 /* seed */:
+        field.get(&seed_);
+        break;
+      case 4 /* message_size */:
+        field.get(&message_size_);
+        break;
+      case 5 /* send_batch_on_register */:
+        field.get(&send_batch_on_register_);
+        break;
+      case 6 /* dummy_fields */:
+        (*dummy_fields_).ParseFromArray(field.data(), field.size());
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string TestConfig::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> TestConfig::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void TestConfig::Serialize(::protozero::Message* msg) const {
+  // Field 1: message_count
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, message_count_);
+  }
+
+  // Field 2: max_messages_per_second
+  if (_has_field_[2]) {
+    msg->AppendVarInt(2, max_messages_per_second_);
+  }
+
+  // Field 3: seed
+  if (_has_field_[3]) {
+    msg->AppendVarInt(3, seed_);
+  }
+
+  // Field 4: message_size
+  if (_has_field_[4]) {
+    msg->AppendVarInt(4, message_size_);
+  }
+
+  // Field 5: send_batch_on_register
+  if (_has_field_[5]) {
+    msg->AppendTinyVarInt(5, send_batch_on_register_);
+  }
+
+  // Field 6: dummy_fields
+  if (_has_field_[6]) {
+    (*dummy_fields_).Serialize(msg->BeginNestedMessage<::protozero::Message>(6));
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+TestConfig_DummyFields::TestConfig_DummyFields() = default;
+TestConfig_DummyFields::~TestConfig_DummyFields() = default;
+TestConfig_DummyFields::TestConfig_DummyFields(const TestConfig_DummyFields&) = default;
+TestConfig_DummyFields& TestConfig_DummyFields::operator=(const TestConfig_DummyFields&) = default;
+TestConfig_DummyFields::TestConfig_DummyFields(TestConfig_DummyFields&&) noexcept = default;
+TestConfig_DummyFields& TestConfig_DummyFields::operator=(TestConfig_DummyFields&&) = default;
+
+bool TestConfig_DummyFields::operator==(const TestConfig_DummyFields& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && field_uint32_ == other.field_uint32_
+   && field_int32_ == other.field_int32_
+   && field_uint64_ == other.field_uint64_
+   && field_int64_ == other.field_int64_
+   && field_fixed64_ == other.field_fixed64_
+   && field_sfixed64_ == other.field_sfixed64_
+   && field_fixed32_ == other.field_fixed32_
+   && field_sfixed32_ == other.field_sfixed32_
+   && field_double_ == other.field_double_
+   && field_float_ == other.field_float_
+   && field_sint64_ == other.field_sint64_
+   && field_sint32_ == other.field_sint32_
+   && field_string_ == other.field_string_
+   && field_bytes_ == other.field_bytes_;
+}
+
+bool TestConfig_DummyFields::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* field_uint32 */:
+        field.get(&field_uint32_);
+        break;
+      case 2 /* field_int32 */:
+        field.get(&field_int32_);
+        break;
+      case 3 /* field_uint64 */:
+        field.get(&field_uint64_);
+        break;
+      case 4 /* field_int64 */:
+        field.get(&field_int64_);
+        break;
+      case 5 /* field_fixed64 */:
+        field.get(&field_fixed64_);
+        break;
+      case 6 /* field_sfixed64 */:
+        field.get(&field_sfixed64_);
+        break;
+      case 7 /* field_fixed32 */:
+        field.get(&field_fixed32_);
+        break;
+      case 8 /* field_sfixed32 */:
+        field.get(&field_sfixed32_);
+        break;
+      case 9 /* field_double */:
+        field.get(&field_double_);
+        break;
+      case 10 /* field_float */:
+        field.get(&field_float_);
+        break;
+      case 11 /* field_sint64 */:
+        field.get_signed(&field_sint64_);
+        break;
+      case 12 /* field_sint32 */:
+        field.get_signed(&field_sint32_);
+        break;
+      case 13 /* field_string */:
+        field.get(&field_string_);
+        break;
+      case 14 /* field_bytes */:
+        field.get(&field_bytes_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string TestConfig_DummyFields::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> TestConfig_DummyFields::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void TestConfig_DummyFields::Serialize(::protozero::Message* msg) const {
+  // Field 1: field_uint32
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, field_uint32_);
+  }
+
+  // Field 2: field_int32
+  if (_has_field_[2]) {
+    msg->AppendVarInt(2, field_int32_);
+  }
+
+  // Field 3: field_uint64
+  if (_has_field_[3]) {
+    msg->AppendVarInt(3, field_uint64_);
+  }
+
+  // Field 4: field_int64
+  if (_has_field_[4]) {
+    msg->AppendVarInt(4, field_int64_);
+  }
+
+  // Field 5: field_fixed64
+  if (_has_field_[5]) {
+    msg->AppendFixed(5, field_fixed64_);
+  }
+
+  // Field 6: field_sfixed64
+  if (_has_field_[6]) {
+    msg->AppendFixed(6, field_sfixed64_);
+  }
+
+  // Field 7: field_fixed32
+  if (_has_field_[7]) {
+    msg->AppendFixed(7, field_fixed32_);
+  }
+
+  // Field 8: field_sfixed32
+  if (_has_field_[8]) {
+    msg->AppendFixed(8, field_sfixed32_);
+  }
+
+  // Field 9: field_double
+  if (_has_field_[9]) {
+    msg->AppendFixed(9, field_double_);
+  }
+
+  // Field 10: field_float
+  if (_has_field_[10]) {
+    msg->AppendFixed(10, field_float_);
+  }
+
+  // Field 11: field_sint64
+  if (_has_field_[11]) {
+    msg->AppendSignedVarInt(11, field_sint64_);
+  }
+
+  // Field 12: field_sint32
+  if (_has_field_[12]) {
+    msg->AppendSignedVarInt(12, field_sint32_);
+  }
+
+  // Field 13: field_string
+  if (_has_field_[13]) {
+    msg->AppendString(13, field_string_);
+  }
+
+  // Field 14: field_bytes
+  if (_has_field_[14]) {
+    msg->AppendString(14, field_bytes_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/config/trace_config.gen.cc
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/config/trace_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/common/builtin_clock.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/data_source_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/track_event/track_event_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/test_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/sys_stats/sys_stats_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/common/sys_stats_counters.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/profiling/perf_event_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/common/perf_events.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/profiling/java_hprof_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/profiling/heapprofd_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/process_stats/process_stats_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/power/android_power_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/interceptor_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/interceptors/console_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/inode_file/inode_file_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/gpu/vulkan_memory_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/gpu/gpu_counter_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/ftrace/ftrace_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/chrome/chrome_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/android/packages_list_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_polled_state_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_log_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/common/android_log_constants.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+TraceConfig::TraceConfig() = default;
+TraceConfig::~TraceConfig() = default;
+TraceConfig::TraceConfig(const TraceConfig&) = default;
+TraceConfig& TraceConfig::operator=(const TraceConfig&) = default;
+TraceConfig::TraceConfig(TraceConfig&&) noexcept = default;
+TraceConfig& TraceConfig::operator=(TraceConfig&&) = default;
+
+bool TraceConfig::operator==(const TraceConfig& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && buffers_ == other.buffers_
+   && data_sources_ == other.data_sources_
+   && builtin_data_sources_ == other.builtin_data_sources_
+   && duration_ms_ == other.duration_ms_
+   && enable_extra_guardrails_ == other.enable_extra_guardrails_
+   && lockdown_mode_ == other.lockdown_mode_
+   && producers_ == other.producers_
+   && statsd_metadata_ == other.statsd_metadata_
+   && write_into_file_ == other.write_into_file_
+   && output_path_ == other.output_path_
+   && file_write_period_ms_ == other.file_write_period_ms_
+   && max_file_size_bytes_ == other.max_file_size_bytes_
+   && guardrail_overrides_ == other.guardrail_overrides_
+   && deferred_start_ == other.deferred_start_
+   && flush_period_ms_ == other.flush_period_ms_
+   && flush_timeout_ms_ == other.flush_timeout_ms_
+   && data_source_stop_timeout_ms_ == other.data_source_stop_timeout_ms_
+   && notify_traceur_ == other.notify_traceur_
+   && bugreport_score_ == other.bugreport_score_
+   && trigger_config_ == other.trigger_config_
+   && activate_triggers_ == other.activate_triggers_
+   && incremental_state_config_ == other.incremental_state_config_
+   && allow_user_build_tracing_ == other.allow_user_build_tracing_
+   && unique_session_name_ == other.unique_session_name_
+   && compression_type_ == other.compression_type_
+   && incident_report_config_ == other.incident_report_config_
+   && statsd_logging_ == other.statsd_logging_
+   && trace_uuid_msb_ == other.trace_uuid_msb_
+   && trace_uuid_lsb_ == other.trace_uuid_lsb_
+   && trace_filter_ == other.trace_filter_;
+}
+
+int TraceConfig::buffers_size() const { return static_cast<int>(buffers_.size()); }
+void TraceConfig::clear_buffers() { buffers_.clear(); }
+TraceConfig_BufferConfig* TraceConfig::add_buffers() { buffers_.emplace_back(); return &buffers_.back(); }
+int TraceConfig::data_sources_size() const { return static_cast<int>(data_sources_.size()); }
+void TraceConfig::clear_data_sources() { data_sources_.clear(); }
+TraceConfig_DataSource* TraceConfig::add_data_sources() { data_sources_.emplace_back(); return &data_sources_.back(); }
+int TraceConfig::producers_size() const { return static_cast<int>(producers_.size()); }
+void TraceConfig::clear_producers() { producers_.clear(); }
+TraceConfig_ProducerConfig* TraceConfig::add_producers() { producers_.emplace_back(); return &producers_.back(); }
+bool TraceConfig::ParseFromArray(const void* raw, size_t size) {
+  buffers_.clear();
+  data_sources_.clear();
+  producers_.clear();
+  activate_triggers_.clear();
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* buffers */:
+        buffers_.emplace_back();
+        buffers_.back().ParseFromArray(field.data(), field.size());
+        break;
+      case 2 /* data_sources */:
+        data_sources_.emplace_back();
+        data_sources_.back().ParseFromArray(field.data(), field.size());
+        break;
+      case 20 /* builtin_data_sources */:
+        (*builtin_data_sources_).ParseFromArray(field.data(), field.size());
+        break;
+      case 3 /* duration_ms */:
+        field.get(&duration_ms_);
+        break;
+      case 4 /* enable_extra_guardrails */:
+        field.get(&enable_extra_guardrails_);
+        break;
+      case 5 /* lockdown_mode */:
+        field.get(&lockdown_mode_);
+        break;
+      case 6 /* producers */:
+        producers_.emplace_back();
+        producers_.back().ParseFromArray(field.data(), field.size());
+        break;
+      case 7 /* statsd_metadata */:
+        (*statsd_metadata_).ParseFromArray(field.data(), field.size());
+        break;
+      case 8 /* write_into_file */:
+        field.get(&write_into_file_);
+        break;
+      case 29 /* output_path */:
+        field.get(&output_path_);
+        break;
+      case 9 /* file_write_period_ms */:
+        field.get(&file_write_period_ms_);
+        break;
+      case 10 /* max_file_size_bytes */:
+        field.get(&max_file_size_bytes_);
+        break;
+      case 11 /* guardrail_overrides */:
+        (*guardrail_overrides_).ParseFromArray(field.data(), field.size());
+        break;
+      case 12 /* deferred_start */:
+        field.get(&deferred_start_);
+        break;
+      case 13 /* flush_period_ms */:
+        field.get(&flush_period_ms_);
+        break;
+      case 14 /* flush_timeout_ms */:
+        field.get(&flush_timeout_ms_);
+        break;
+      case 23 /* data_source_stop_timeout_ms */:
+        field.get(&data_source_stop_timeout_ms_);
+        break;
+      case 16 /* notify_traceur */:
+        field.get(&notify_traceur_);
+        break;
+      case 30 /* bugreport_score */:
+        field.get(&bugreport_score_);
+        break;
+      case 17 /* trigger_config */:
+        (*trigger_config_).ParseFromArray(field.data(), field.size());
+        break;
+      case 18 /* activate_triggers */:
+        activate_triggers_.emplace_back();
+        field.get(&activate_triggers_.back());
+        break;
+      case 21 /* incremental_state_config */:
+        (*incremental_state_config_).ParseFromArray(field.data(), field.size());
+        break;
+      case 19 /* allow_user_build_tracing */:
+        field.get(&allow_user_build_tracing_);
+        break;
+      case 22 /* unique_session_name */:
+        field.get(&unique_session_name_);
+        break;
+      case 24 /* compression_type */:
+        field.get(&compression_type_);
+        break;
+      case 25 /* incident_report_config */:
+        (*incident_report_config_).ParseFromArray(field.data(), field.size());
+        break;
+      case 31 /* statsd_logging */:
+        field.get(&statsd_logging_);
+        break;
+      case 27 /* trace_uuid_msb */:
+        field.get(&trace_uuid_msb_);
+        break;
+      case 28 /* trace_uuid_lsb */:
+        field.get(&trace_uuid_lsb_);
+        break;
+      case 32 /* trace_filter */:
+        (*trace_filter_).ParseFromArray(field.data(), field.size());
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string TraceConfig::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> TraceConfig::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void TraceConfig::Serialize(::protozero::Message* msg) const {
+  // Field 1: buffers
+  for (auto& it : buffers_) {
+    it.Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
+  }
+
+  // Field 2: data_sources
+  for (auto& it : data_sources_) {
+    it.Serialize(msg->BeginNestedMessage<::protozero::Message>(2));
+  }
+
+  // Field 20: builtin_data_sources
+  if (_has_field_[20]) {
+    (*builtin_data_sources_).Serialize(msg->BeginNestedMessage<::protozero::Message>(20));
+  }
+
+  // Field 3: duration_ms
+  if (_has_field_[3]) {
+    msg->AppendVarInt(3, duration_ms_);
+  }
+
+  // Field 4: enable_extra_guardrails
+  if (_has_field_[4]) {
+    msg->AppendTinyVarInt(4, enable_extra_guardrails_);
+  }
+
+  // Field 5: lockdown_mode
+  if (_has_field_[5]) {
+    msg->AppendVarInt(5, lockdown_mode_);
+  }
+
+  // Field 6: producers
+  for (auto& it : producers_) {
+    it.Serialize(msg->BeginNestedMessage<::protozero::Message>(6));
+  }
+
+  // Field 7: statsd_metadata
+  if (_has_field_[7]) {
+    (*statsd_metadata_).Serialize(msg->BeginNestedMessage<::protozero::Message>(7));
+  }
+
+  // Field 8: write_into_file
+  if (_has_field_[8]) {
+    msg->AppendTinyVarInt(8, write_into_file_);
+  }
+
+  // Field 29: output_path
+  if (_has_field_[29]) {
+    msg->AppendString(29, output_path_);
+  }
+
+  // Field 9: file_write_period_ms
+  if (_has_field_[9]) {
+    msg->AppendVarInt(9, file_write_period_ms_);
+  }
+
+  // Field 10: max_file_size_bytes
+  if (_has_field_[10]) {
+    msg->AppendVarInt(10, max_file_size_bytes_);
+  }
+
+  // Field 11: guardrail_overrides
+  if (_has_field_[11]) {
+    (*guardrail_overrides_).Serialize(msg->BeginNestedMessage<::protozero::Message>(11));
+  }
+
+  // Field 12: deferred_start
+  if (_has_field_[12]) {
+    msg->AppendTinyVarInt(12, deferred_start_);
+  }
+
+  // Field 13: flush_period_ms
+  if (_has_field_[13]) {
+    msg->AppendVarInt(13, flush_period_ms_);
+  }
+
+  // Field 14: flush_timeout_ms
+  if (_has_field_[14]) {
+    msg->AppendVarInt(14, flush_timeout_ms_);
+  }
+
+  // Field 23: data_source_stop_timeout_ms
+  if (_has_field_[23]) {
+    msg->AppendVarInt(23, data_source_stop_timeout_ms_);
+  }
+
+  // Field 16: notify_traceur
+  if (_has_field_[16]) {
+    msg->AppendTinyVarInt(16, notify_traceur_);
+  }
+
+  // Field 30: bugreport_score
+  if (_has_field_[30]) {
+    msg->AppendVarInt(30, bugreport_score_);
+  }
+
+  // Field 17: trigger_config
+  if (_has_field_[17]) {
+    (*trigger_config_).Serialize(msg->BeginNestedMessage<::protozero::Message>(17));
+  }
+
+  // Field 18: activate_triggers
+  for (auto& it : activate_triggers_) {
+    msg->AppendString(18, it);
+  }
+
+  // Field 21: incremental_state_config
+  if (_has_field_[21]) {
+    (*incremental_state_config_).Serialize(msg->BeginNestedMessage<::protozero::Message>(21));
+  }
+
+  // Field 19: allow_user_build_tracing
+  if (_has_field_[19]) {
+    msg->AppendTinyVarInt(19, allow_user_build_tracing_);
+  }
+
+  // Field 22: unique_session_name
+  if (_has_field_[22]) {
+    msg->AppendString(22, unique_session_name_);
+  }
+
+  // Field 24: compression_type
+  if (_has_field_[24]) {
+    msg->AppendVarInt(24, compression_type_);
+  }
+
+  // Field 25: incident_report_config
+  if (_has_field_[25]) {
+    (*incident_report_config_).Serialize(msg->BeginNestedMessage<::protozero::Message>(25));
+  }
+
+  // Field 31: statsd_logging
+  if (_has_field_[31]) {
+    msg->AppendVarInt(31, statsd_logging_);
+  }
+
+  // Field 27: trace_uuid_msb
+  if (_has_field_[27]) {
+    msg->AppendVarInt(27, trace_uuid_msb_);
+  }
+
+  // Field 28: trace_uuid_lsb
+  if (_has_field_[28]) {
+    msg->AppendVarInt(28, trace_uuid_lsb_);
+  }
+
+  // Field 32: trace_filter
+  if (_has_field_[32]) {
+    (*trace_filter_).Serialize(msg->BeginNestedMessage<::protozero::Message>(32));
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+TraceConfig_TraceFilter::TraceConfig_TraceFilter() = default;
+TraceConfig_TraceFilter::~TraceConfig_TraceFilter() = default;
+TraceConfig_TraceFilter::TraceConfig_TraceFilter(const TraceConfig_TraceFilter&) = default;
+TraceConfig_TraceFilter& TraceConfig_TraceFilter::operator=(const TraceConfig_TraceFilter&) = default;
+TraceConfig_TraceFilter::TraceConfig_TraceFilter(TraceConfig_TraceFilter&&) noexcept = default;
+TraceConfig_TraceFilter& TraceConfig_TraceFilter::operator=(TraceConfig_TraceFilter&&) = default;
+
+bool TraceConfig_TraceFilter::operator==(const TraceConfig_TraceFilter& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && bytecode_ == other.bytecode_;
+}
+
+bool TraceConfig_TraceFilter::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* bytecode */:
+        field.get(&bytecode_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string TraceConfig_TraceFilter::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> TraceConfig_TraceFilter::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void TraceConfig_TraceFilter::Serialize(::protozero::Message* msg) const {
+  // Field 1: bytecode
+  if (_has_field_[1]) {
+    msg->AppendString(1, bytecode_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+TraceConfig_IncidentReportConfig::TraceConfig_IncidentReportConfig() = default;
+TraceConfig_IncidentReportConfig::~TraceConfig_IncidentReportConfig() = default;
+TraceConfig_IncidentReportConfig::TraceConfig_IncidentReportConfig(const TraceConfig_IncidentReportConfig&) = default;
+TraceConfig_IncidentReportConfig& TraceConfig_IncidentReportConfig::operator=(const TraceConfig_IncidentReportConfig&) = default;
+TraceConfig_IncidentReportConfig::TraceConfig_IncidentReportConfig(TraceConfig_IncidentReportConfig&&) noexcept = default;
+TraceConfig_IncidentReportConfig& TraceConfig_IncidentReportConfig::operator=(TraceConfig_IncidentReportConfig&&) = default;
+
+bool TraceConfig_IncidentReportConfig::operator==(const TraceConfig_IncidentReportConfig& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && destination_package_ == other.destination_package_
+   && destination_class_ == other.destination_class_
+   && privacy_level_ == other.privacy_level_
+   && skip_incidentd_ == other.skip_incidentd_
+   && skip_dropbox_ == other.skip_dropbox_;
+}
+
+bool TraceConfig_IncidentReportConfig::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* destination_package */:
+        field.get(&destination_package_);
+        break;
+      case 2 /* destination_class */:
+        field.get(&destination_class_);
+        break;
+      case 3 /* privacy_level */:
+        field.get(&privacy_level_);
+        break;
+      case 5 /* skip_incidentd */:
+        field.get(&skip_incidentd_);
+        break;
+      case 4 /* skip_dropbox */:
+        field.get(&skip_dropbox_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string TraceConfig_IncidentReportConfig::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> TraceConfig_IncidentReportConfig::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void TraceConfig_IncidentReportConfig::Serialize(::protozero::Message* msg) const {
+  // Field 1: destination_package
+  if (_has_field_[1]) {
+    msg->AppendString(1, destination_package_);
+  }
+
+  // Field 2: destination_class
+  if (_has_field_[2]) {
+    msg->AppendString(2, destination_class_);
+  }
+
+  // Field 3: privacy_level
+  if (_has_field_[3]) {
+    msg->AppendVarInt(3, privacy_level_);
+  }
+
+  // Field 5: skip_incidentd
+  if (_has_field_[5]) {
+    msg->AppendTinyVarInt(5, skip_incidentd_);
+  }
+
+  // Field 4: skip_dropbox
+  if (_has_field_[4]) {
+    msg->AppendTinyVarInt(4, skip_dropbox_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+TraceConfig_IncrementalStateConfig::TraceConfig_IncrementalStateConfig() = default;
+TraceConfig_IncrementalStateConfig::~TraceConfig_IncrementalStateConfig() = default;
+TraceConfig_IncrementalStateConfig::TraceConfig_IncrementalStateConfig(const TraceConfig_IncrementalStateConfig&) = default;
+TraceConfig_IncrementalStateConfig& TraceConfig_IncrementalStateConfig::operator=(const TraceConfig_IncrementalStateConfig&) = default;
+TraceConfig_IncrementalStateConfig::TraceConfig_IncrementalStateConfig(TraceConfig_IncrementalStateConfig&&) noexcept = default;
+TraceConfig_IncrementalStateConfig& TraceConfig_IncrementalStateConfig::operator=(TraceConfig_IncrementalStateConfig&&) = default;
+
+bool TraceConfig_IncrementalStateConfig::operator==(const TraceConfig_IncrementalStateConfig& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && clear_period_ms_ == other.clear_period_ms_;
+}
+
+bool TraceConfig_IncrementalStateConfig::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* clear_period_ms */:
+        field.get(&clear_period_ms_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string TraceConfig_IncrementalStateConfig::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> TraceConfig_IncrementalStateConfig::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void TraceConfig_IncrementalStateConfig::Serialize(::protozero::Message* msg) const {
+  // Field 1: clear_period_ms
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, clear_period_ms_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+TraceConfig_TriggerConfig::TraceConfig_TriggerConfig() = default;
+TraceConfig_TriggerConfig::~TraceConfig_TriggerConfig() = default;
+TraceConfig_TriggerConfig::TraceConfig_TriggerConfig(const TraceConfig_TriggerConfig&) = default;
+TraceConfig_TriggerConfig& TraceConfig_TriggerConfig::operator=(const TraceConfig_TriggerConfig&) = default;
+TraceConfig_TriggerConfig::TraceConfig_TriggerConfig(TraceConfig_TriggerConfig&&) noexcept = default;
+TraceConfig_TriggerConfig& TraceConfig_TriggerConfig::operator=(TraceConfig_TriggerConfig&&) = default;
+
+bool TraceConfig_TriggerConfig::operator==(const TraceConfig_TriggerConfig& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && trigger_mode_ == other.trigger_mode_
+   && triggers_ == other.triggers_
+   && trigger_timeout_ms_ == other.trigger_timeout_ms_;
+}
+
+int TraceConfig_TriggerConfig::triggers_size() const { return static_cast<int>(triggers_.size()); }
+void TraceConfig_TriggerConfig::clear_triggers() { triggers_.clear(); }
+TraceConfig_TriggerConfig_Trigger* TraceConfig_TriggerConfig::add_triggers() { triggers_.emplace_back(); return &triggers_.back(); }
+bool TraceConfig_TriggerConfig::ParseFromArray(const void* raw, size_t size) {
+  triggers_.clear();
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* trigger_mode */:
+        field.get(&trigger_mode_);
+        break;
+      case 2 /* triggers */:
+        triggers_.emplace_back();
+        triggers_.back().ParseFromArray(field.data(), field.size());
+        break;
+      case 3 /* trigger_timeout_ms */:
+        field.get(&trigger_timeout_ms_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string TraceConfig_TriggerConfig::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> TraceConfig_TriggerConfig::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void TraceConfig_TriggerConfig::Serialize(::protozero::Message* msg) const {
+  // Field 1: trigger_mode
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, trigger_mode_);
+  }
+
+  // Field 2: triggers
+  for (auto& it : triggers_) {
+    it.Serialize(msg->BeginNestedMessage<::protozero::Message>(2));
+  }
+
+  // Field 3: trigger_timeout_ms
+  if (_has_field_[3]) {
+    msg->AppendVarInt(3, trigger_timeout_ms_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+TraceConfig_TriggerConfig_Trigger::TraceConfig_TriggerConfig_Trigger() = default;
+TraceConfig_TriggerConfig_Trigger::~TraceConfig_TriggerConfig_Trigger() = default;
+TraceConfig_TriggerConfig_Trigger::TraceConfig_TriggerConfig_Trigger(const TraceConfig_TriggerConfig_Trigger&) = default;
+TraceConfig_TriggerConfig_Trigger& TraceConfig_TriggerConfig_Trigger::operator=(const TraceConfig_TriggerConfig_Trigger&) = default;
+TraceConfig_TriggerConfig_Trigger::TraceConfig_TriggerConfig_Trigger(TraceConfig_TriggerConfig_Trigger&&) noexcept = default;
+TraceConfig_TriggerConfig_Trigger& TraceConfig_TriggerConfig_Trigger::operator=(TraceConfig_TriggerConfig_Trigger&&) = default;
+
+bool TraceConfig_TriggerConfig_Trigger::operator==(const TraceConfig_TriggerConfig_Trigger& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && name_ == other.name_
+   && producer_name_regex_ == other.producer_name_regex_
+   && stop_delay_ms_ == other.stop_delay_ms_
+   && max_per_24_h_ == other.max_per_24_h_
+   && skip_probability_ == other.skip_probability_;
+}
+
+bool TraceConfig_TriggerConfig_Trigger::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* name */:
+        field.get(&name_);
+        break;
+      case 2 /* producer_name_regex */:
+        field.get(&producer_name_regex_);
+        break;
+      case 3 /* stop_delay_ms */:
+        field.get(&stop_delay_ms_);
+        break;
+      case 4 /* max_per_24_h */:
+        field.get(&max_per_24_h_);
+        break;
+      case 5 /* skip_probability */:
+        field.get(&skip_probability_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string TraceConfig_TriggerConfig_Trigger::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> TraceConfig_TriggerConfig_Trigger::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void TraceConfig_TriggerConfig_Trigger::Serialize(::protozero::Message* msg) const {
+  // Field 1: name
+  if (_has_field_[1]) {
+    msg->AppendString(1, name_);
+  }
+
+  // Field 2: producer_name_regex
+  if (_has_field_[2]) {
+    msg->AppendString(2, producer_name_regex_);
+  }
+
+  // Field 3: stop_delay_ms
+  if (_has_field_[3]) {
+    msg->AppendVarInt(3, stop_delay_ms_);
+  }
+
+  // Field 4: max_per_24_h
+  if (_has_field_[4]) {
+    msg->AppendVarInt(4, max_per_24_h_);
+  }
+
+  // Field 5: skip_probability
+  if (_has_field_[5]) {
+    msg->AppendFixed(5, skip_probability_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+TraceConfig_GuardrailOverrides::TraceConfig_GuardrailOverrides() = default;
+TraceConfig_GuardrailOverrides::~TraceConfig_GuardrailOverrides() = default;
+TraceConfig_GuardrailOverrides::TraceConfig_GuardrailOverrides(const TraceConfig_GuardrailOverrides&) = default;
+TraceConfig_GuardrailOverrides& TraceConfig_GuardrailOverrides::operator=(const TraceConfig_GuardrailOverrides&) = default;
+TraceConfig_GuardrailOverrides::TraceConfig_GuardrailOverrides(TraceConfig_GuardrailOverrides&&) noexcept = default;
+TraceConfig_GuardrailOverrides& TraceConfig_GuardrailOverrides::operator=(TraceConfig_GuardrailOverrides&&) = default;
+
+bool TraceConfig_GuardrailOverrides::operator==(const TraceConfig_GuardrailOverrides& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && max_upload_per_day_bytes_ == other.max_upload_per_day_bytes_;
+}
+
+bool TraceConfig_GuardrailOverrides::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* max_upload_per_day_bytes */:
+        field.get(&max_upload_per_day_bytes_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string TraceConfig_GuardrailOverrides::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> TraceConfig_GuardrailOverrides::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void TraceConfig_GuardrailOverrides::Serialize(::protozero::Message* msg) const {
+  // Field 1: max_upload_per_day_bytes
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, max_upload_per_day_bytes_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+TraceConfig_StatsdMetadata::TraceConfig_StatsdMetadata() = default;
+TraceConfig_StatsdMetadata::~TraceConfig_StatsdMetadata() = default;
+TraceConfig_StatsdMetadata::TraceConfig_StatsdMetadata(const TraceConfig_StatsdMetadata&) = default;
+TraceConfig_StatsdMetadata& TraceConfig_StatsdMetadata::operator=(const TraceConfig_StatsdMetadata&) = default;
+TraceConfig_StatsdMetadata::TraceConfig_StatsdMetadata(TraceConfig_StatsdMetadata&&) noexcept = default;
+TraceConfig_StatsdMetadata& TraceConfig_StatsdMetadata::operator=(TraceConfig_StatsdMetadata&&) = default;
+
+bool TraceConfig_StatsdMetadata::operator==(const TraceConfig_StatsdMetadata& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && triggering_alert_id_ == other.triggering_alert_id_
+   && triggering_config_uid_ == other.triggering_config_uid_
+   && triggering_config_id_ == other.triggering_config_id_
+   && triggering_subscription_id_ == other.triggering_subscription_id_;
+}
+
+bool TraceConfig_StatsdMetadata::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* triggering_alert_id */:
+        field.get(&triggering_alert_id_);
+        break;
+      case 2 /* triggering_config_uid */:
+        field.get(&triggering_config_uid_);
+        break;
+      case 3 /* triggering_config_id */:
+        field.get(&triggering_config_id_);
+        break;
+      case 4 /* triggering_subscription_id */:
+        field.get(&triggering_subscription_id_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string TraceConfig_StatsdMetadata::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> TraceConfig_StatsdMetadata::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void TraceConfig_StatsdMetadata::Serialize(::protozero::Message* msg) const {
+  // Field 1: triggering_alert_id
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, triggering_alert_id_);
+  }
+
+  // Field 2: triggering_config_uid
+  if (_has_field_[2]) {
+    msg->AppendVarInt(2, triggering_config_uid_);
+  }
+
+  // Field 3: triggering_config_id
+  if (_has_field_[3]) {
+    msg->AppendVarInt(3, triggering_config_id_);
+  }
+
+  // Field 4: triggering_subscription_id
+  if (_has_field_[4]) {
+    msg->AppendVarInt(4, triggering_subscription_id_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+TraceConfig_ProducerConfig::TraceConfig_ProducerConfig() = default;
+TraceConfig_ProducerConfig::~TraceConfig_ProducerConfig() = default;
+TraceConfig_ProducerConfig::TraceConfig_ProducerConfig(const TraceConfig_ProducerConfig&) = default;
+TraceConfig_ProducerConfig& TraceConfig_ProducerConfig::operator=(const TraceConfig_ProducerConfig&) = default;
+TraceConfig_ProducerConfig::TraceConfig_ProducerConfig(TraceConfig_ProducerConfig&&) noexcept = default;
+TraceConfig_ProducerConfig& TraceConfig_ProducerConfig::operator=(TraceConfig_ProducerConfig&&) = default;
+
+bool TraceConfig_ProducerConfig::operator==(const TraceConfig_ProducerConfig& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && producer_name_ == other.producer_name_
+   && shm_size_kb_ == other.shm_size_kb_
+   && page_size_kb_ == other.page_size_kb_;
+}
+
+bool TraceConfig_ProducerConfig::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* producer_name */:
+        field.get(&producer_name_);
+        break;
+      case 2 /* shm_size_kb */:
+        field.get(&shm_size_kb_);
+        break;
+      case 3 /* page_size_kb */:
+        field.get(&page_size_kb_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string TraceConfig_ProducerConfig::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> TraceConfig_ProducerConfig::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void TraceConfig_ProducerConfig::Serialize(::protozero::Message* msg) const {
+  // Field 1: producer_name
+  if (_has_field_[1]) {
+    msg->AppendString(1, producer_name_);
+  }
+
+  // Field 2: shm_size_kb
+  if (_has_field_[2]) {
+    msg->AppendVarInt(2, shm_size_kb_);
+  }
+
+  // Field 3: page_size_kb
+  if (_has_field_[3]) {
+    msg->AppendVarInt(3, page_size_kb_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+TraceConfig_BuiltinDataSource::TraceConfig_BuiltinDataSource() = default;
+TraceConfig_BuiltinDataSource::~TraceConfig_BuiltinDataSource() = default;
+TraceConfig_BuiltinDataSource::TraceConfig_BuiltinDataSource(const TraceConfig_BuiltinDataSource&) = default;
+TraceConfig_BuiltinDataSource& TraceConfig_BuiltinDataSource::operator=(const TraceConfig_BuiltinDataSource&) = default;
+TraceConfig_BuiltinDataSource::TraceConfig_BuiltinDataSource(TraceConfig_BuiltinDataSource&&) noexcept = default;
+TraceConfig_BuiltinDataSource& TraceConfig_BuiltinDataSource::operator=(TraceConfig_BuiltinDataSource&&) = default;
+
+bool TraceConfig_BuiltinDataSource::operator==(const TraceConfig_BuiltinDataSource& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && disable_clock_snapshotting_ == other.disable_clock_snapshotting_
+   && disable_trace_config_ == other.disable_trace_config_
+   && disable_system_info_ == other.disable_system_info_
+   && disable_service_events_ == other.disable_service_events_
+   && primary_trace_clock_ == other.primary_trace_clock_
+   && snapshot_interval_ms_ == other.snapshot_interval_ms_
+   && prefer_suspend_clock_for_snapshot_ == other.prefer_suspend_clock_for_snapshot_;
+}
+
+bool TraceConfig_BuiltinDataSource::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* disable_clock_snapshotting */:
+        field.get(&disable_clock_snapshotting_);
+        break;
+      case 2 /* disable_trace_config */:
+        field.get(&disable_trace_config_);
+        break;
+      case 3 /* disable_system_info */:
+        field.get(&disable_system_info_);
+        break;
+      case 4 /* disable_service_events */:
+        field.get(&disable_service_events_);
+        break;
+      case 5 /* primary_trace_clock */:
+        field.get(&primary_trace_clock_);
+        break;
+      case 6 /* snapshot_interval_ms */:
+        field.get(&snapshot_interval_ms_);
+        break;
+      case 7 /* prefer_suspend_clock_for_snapshot */:
+        field.get(&prefer_suspend_clock_for_snapshot_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string TraceConfig_BuiltinDataSource::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> TraceConfig_BuiltinDataSource::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void TraceConfig_BuiltinDataSource::Serialize(::protozero::Message* msg) const {
+  // Field 1: disable_clock_snapshotting
+  if (_has_field_[1]) {
+    msg->AppendTinyVarInt(1, disable_clock_snapshotting_);
+  }
+
+  // Field 2: disable_trace_config
+  if (_has_field_[2]) {
+    msg->AppendTinyVarInt(2, disable_trace_config_);
+  }
+
+  // Field 3: disable_system_info
+  if (_has_field_[3]) {
+    msg->AppendTinyVarInt(3, disable_system_info_);
+  }
+
+  // Field 4: disable_service_events
+  if (_has_field_[4]) {
+    msg->AppendTinyVarInt(4, disable_service_events_);
+  }
+
+  // Field 5: primary_trace_clock
+  if (_has_field_[5]) {
+    msg->AppendVarInt(5, primary_trace_clock_);
+  }
+
+  // Field 6: snapshot_interval_ms
+  if (_has_field_[6]) {
+    msg->AppendVarInt(6, snapshot_interval_ms_);
+  }
+
+  // Field 7: prefer_suspend_clock_for_snapshot
+  if (_has_field_[7]) {
+    msg->AppendTinyVarInt(7, prefer_suspend_clock_for_snapshot_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+TraceConfig_DataSource::TraceConfig_DataSource() = default;
+TraceConfig_DataSource::~TraceConfig_DataSource() = default;
+TraceConfig_DataSource::TraceConfig_DataSource(const TraceConfig_DataSource&) = default;
+TraceConfig_DataSource& TraceConfig_DataSource::operator=(const TraceConfig_DataSource&) = default;
+TraceConfig_DataSource::TraceConfig_DataSource(TraceConfig_DataSource&&) noexcept = default;
+TraceConfig_DataSource& TraceConfig_DataSource::operator=(TraceConfig_DataSource&&) = default;
+
+bool TraceConfig_DataSource::operator==(const TraceConfig_DataSource& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && config_ == other.config_
+   && producer_name_filter_ == other.producer_name_filter_
+   && producer_name_regex_filter_ == other.producer_name_regex_filter_;
+}
+
+bool TraceConfig_DataSource::ParseFromArray(const void* raw, size_t size) {
+  producer_name_filter_.clear();
+  producer_name_regex_filter_.clear();
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* config */:
+        (*config_).ParseFromArray(field.data(), field.size());
+        break;
+      case 2 /* producer_name_filter */:
+        producer_name_filter_.emplace_back();
+        field.get(&producer_name_filter_.back());
+        break;
+      case 3 /* producer_name_regex_filter */:
+        producer_name_regex_filter_.emplace_back();
+        field.get(&producer_name_regex_filter_.back());
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string TraceConfig_DataSource::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> TraceConfig_DataSource::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void TraceConfig_DataSource::Serialize(::protozero::Message* msg) const {
+  // Field 1: config
+  if (_has_field_[1]) {
+    (*config_).Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
+  }
+
+  // Field 2: producer_name_filter
+  for (auto& it : producer_name_filter_) {
+    msg->AppendString(2, it);
+  }
+
+  // Field 3: producer_name_regex_filter
+  for (auto& it : producer_name_regex_filter_) {
+    msg->AppendString(3, it);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+TraceConfig_BufferConfig::TraceConfig_BufferConfig() = default;
+TraceConfig_BufferConfig::~TraceConfig_BufferConfig() = default;
+TraceConfig_BufferConfig::TraceConfig_BufferConfig(const TraceConfig_BufferConfig&) = default;
+TraceConfig_BufferConfig& TraceConfig_BufferConfig::operator=(const TraceConfig_BufferConfig&) = default;
+TraceConfig_BufferConfig::TraceConfig_BufferConfig(TraceConfig_BufferConfig&&) noexcept = default;
+TraceConfig_BufferConfig& TraceConfig_BufferConfig::operator=(TraceConfig_BufferConfig&&) = default;
+
+bool TraceConfig_BufferConfig::operator==(const TraceConfig_BufferConfig& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && size_kb_ == other.size_kb_
+   && fill_policy_ == other.fill_policy_;
+}
+
+bool TraceConfig_BufferConfig::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* size_kb */:
+        field.get(&size_kb_);
+        break;
+      case 4 /* fill_policy */:
+        field.get(&fill_policy_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string TraceConfig_BufferConfig::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> TraceConfig_BufferConfig::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void TraceConfig_BufferConfig::Serialize(::protozero::Message* msg) const {
+  // Field 1: size_kb
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, size_kb_);
+  }
+
+  // Field 4: fill_policy
+  if (_has_field_[4]) {
+    msg->AppendVarInt(4, fill_policy_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/common/android_energy_consumer_descriptor.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/common/android_log_constants.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/common/builtin_clock.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/common/commit_data_request.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/common/data_source_descriptor.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/common/descriptor.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/common/gpu_counter_descriptor.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/common/interceptor_descriptor.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/common/observable_events.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/common/perf_events.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/common/sys_stats_counters.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/common/trace_stats.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/common/tracing_service_capabilities.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/common/tracing_service_state.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/common/track_event_descriptor.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/gpu/gpu_counter_event.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/gpu/gpu_log.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/gpu/gpu_render_stage_event.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/gpu/vulkan_api_event.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/gpu/vulkan_memory_event.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/profiling/deobfuscation.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/profiling/heap_graph.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/profiling/profile_common.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/profiling/profile_packet.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/profiling/smaps.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_application_state_info.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_compositor_scheduler_state.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_content_settings_event_info.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_frame_reporter.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_histogram_sample.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_keyed_service.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_latency_info.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_legacy_ipc.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_message_pump.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_mojo_event_info.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_process_descriptor.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_renderer_scheduler_state.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_thread_descriptor.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_user_event.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_window_handle_event_info.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/counter_descriptor.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/debug_annotation.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/log_message.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/process_descriptor.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/source_location.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/task_execution.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/thread_descriptor.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/track_descriptor.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/track_event.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/interned_data/interned_data.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_application_state_info.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_application_state_info.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_APPLICATION_STATE_INFO_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_APPLICATION_STATE_INFO_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class ChromeApplicationStateInfo;
+enum ChromeApplicationStateInfo_ChromeApplicationState : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum ChromeApplicationStateInfo_ChromeApplicationState : int {
+  ChromeApplicationStateInfo_ChromeApplicationState_APPLICATION_STATE_UNKNOWN = 0,
+  ChromeApplicationStateInfo_ChromeApplicationState_APPLICATION_STATE_HAS_RUNNING_ACTIVITIES = 1,
+  ChromeApplicationStateInfo_ChromeApplicationState_APPLICATION_STATE_HAS_PAUSED_ACTIVITIES = 2,
+  ChromeApplicationStateInfo_ChromeApplicationState_APPLICATION_STATE_HAS_STOPPED_ACTIVITIES = 3,
+  ChromeApplicationStateInfo_ChromeApplicationState_APPLICATION_STATE_HAS_DESTROYED_ACTIVITIES = 4,
+};
+
+class PERFETTO_EXPORT ChromeApplicationStateInfo : public ::protozero::CppMessageObj {
+ public:
+  using ChromeApplicationState = ChromeApplicationStateInfo_ChromeApplicationState;
+  static constexpr auto APPLICATION_STATE_UNKNOWN = ChromeApplicationStateInfo_ChromeApplicationState_APPLICATION_STATE_UNKNOWN;
+  static constexpr auto APPLICATION_STATE_HAS_RUNNING_ACTIVITIES = ChromeApplicationStateInfo_ChromeApplicationState_APPLICATION_STATE_HAS_RUNNING_ACTIVITIES;
+  static constexpr auto APPLICATION_STATE_HAS_PAUSED_ACTIVITIES = ChromeApplicationStateInfo_ChromeApplicationState_APPLICATION_STATE_HAS_PAUSED_ACTIVITIES;
+  static constexpr auto APPLICATION_STATE_HAS_STOPPED_ACTIVITIES = ChromeApplicationStateInfo_ChromeApplicationState_APPLICATION_STATE_HAS_STOPPED_ACTIVITIES;
+  static constexpr auto APPLICATION_STATE_HAS_DESTROYED_ACTIVITIES = ChromeApplicationStateInfo_ChromeApplicationState_APPLICATION_STATE_HAS_DESTROYED_ACTIVITIES;
+  static constexpr auto ChromeApplicationState_MIN = ChromeApplicationStateInfo_ChromeApplicationState_APPLICATION_STATE_UNKNOWN;
+  static constexpr auto ChromeApplicationState_MAX = ChromeApplicationStateInfo_ChromeApplicationState_APPLICATION_STATE_HAS_DESTROYED_ACTIVITIES;
+  enum FieldNumbers {
+    kApplicationStateFieldNumber = 1,
+  };
+
+  ChromeApplicationStateInfo();
+  ~ChromeApplicationStateInfo() override;
+  ChromeApplicationStateInfo(ChromeApplicationStateInfo&&) noexcept;
+  ChromeApplicationStateInfo& operator=(ChromeApplicationStateInfo&&);
+  ChromeApplicationStateInfo(const ChromeApplicationStateInfo&);
+  ChromeApplicationStateInfo& operator=(const ChromeApplicationStateInfo&);
+  bool operator==(const ChromeApplicationStateInfo&) const;
+  bool operator!=(const ChromeApplicationStateInfo& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_application_state() const { return _has_field_[1]; }
+  ChromeApplicationStateInfo_ChromeApplicationState application_state() const { return application_state_; }
+  void set_application_state(ChromeApplicationStateInfo_ChromeApplicationState value) { application_state_ = value; _has_field_.set(1); }
+
+ private:
+  ChromeApplicationStateInfo_ChromeApplicationState application_state_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_APPLICATION_STATE_INFO_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_application_state_info.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+ChromeApplicationStateInfo::ChromeApplicationStateInfo() = default;
+ChromeApplicationStateInfo::~ChromeApplicationStateInfo() = default;
+ChromeApplicationStateInfo::ChromeApplicationStateInfo(const ChromeApplicationStateInfo&) = default;
+ChromeApplicationStateInfo& ChromeApplicationStateInfo::operator=(const ChromeApplicationStateInfo&) = default;
+ChromeApplicationStateInfo::ChromeApplicationStateInfo(ChromeApplicationStateInfo&&) noexcept = default;
+ChromeApplicationStateInfo& ChromeApplicationStateInfo::operator=(ChromeApplicationStateInfo&&) = default;
+
+bool ChromeApplicationStateInfo::operator==(const ChromeApplicationStateInfo& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && application_state_ == other.application_state_;
+}
+
+bool ChromeApplicationStateInfo::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* application_state */:
+        field.get(&application_state_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string ChromeApplicationStateInfo::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> ChromeApplicationStateInfo::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void ChromeApplicationStateInfo::Serialize(::protozero::Message* msg) const {
+  // Field 1: application_state
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, application_state_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_compositor_scheduler_state.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_compositor_scheduler_state.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_COMPOSITOR_SCHEDULER_STATE_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_COMPOSITOR_SCHEDULER_STATE_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class CompositorTimingHistory;
+class BeginFrameSourceState;
+class BeginFrameArgs;
+class SourceLocation;
+class BeginFrameObserverState;
+class BeginImplFrameArgs;
+class BeginImplFrameArgs_TimestampsInUs;
+class ChromeCompositorStateMachine;
+class ChromeCompositorStateMachine_MinorState;
+class ChromeCompositorStateMachine_MajorState;
+class ChromeCompositorSchedulerState;
+enum ChromeCompositorSchedulerAction : int;
+enum BeginFrameArgs_BeginFrameArgsType : int;
+enum BeginImplFrameArgs_State : int;
+enum ChromeCompositorStateMachine_MinorState_TreePriority : int;
+enum ChromeCompositorStateMachine_MinorState_ScrollHandlerState : int;
+enum ChromeCompositorStateMachine_MajorState_BeginImplFrameState : int;
+enum ChromeCompositorStateMachine_MajorState_BeginMainFrameState : int;
+enum ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState : int;
+enum ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState : int;
+enum ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum ChromeCompositorSchedulerAction : int {
+  CC_SCHEDULER_ACTION_UNSPECIFIED = 0,
+  CC_SCHEDULER_ACTION_NONE = 1,
+  CC_SCHEDULER_ACTION_SEND_BEGIN_MAIN_FRAME = 2,
+  CC_SCHEDULER_ACTION_COMMIT = 3,
+  CC_SCHEDULER_ACTION_ACTIVATE_SYNC_TREE = 4,
+  CC_SCHEDULER_ACTION_DRAW_IF_POSSIBLE = 5,
+  CC_SCHEDULER_ACTION_DRAW_FORCED = 6,
+  CC_SCHEDULER_ACTION_DRAW_ABORT = 7,
+  CC_SCHEDULER_ACTION_BEGIN_LAYER_TREE_FRAME_SINK_CREATION = 8,
+  CC_SCHEDULER_ACTION_PREPARE_TILES = 9,
+  CC_SCHEDULER_ACTION_INVALIDATE_LAYER_TREE_FRAME_SINK = 10,
+  CC_SCHEDULER_ACTION_PERFORM_IMPL_SIDE_INVALIDATION = 11,
+  CC_SCHEDULER_ACTION_NOTIFY_BEGIN_MAIN_FRAME_NOT_EXPECTED_UNTIL = 12,
+  CC_SCHEDULER_ACTION_NOTIFY_BEGIN_MAIN_FRAME_NOT_EXPECTED_SOON = 13,
+};
+enum BeginFrameArgs_BeginFrameArgsType : int {
+  BeginFrameArgs_BeginFrameArgsType_BEGIN_FRAME_ARGS_TYPE_UNSPECIFIED = 0,
+  BeginFrameArgs_BeginFrameArgsType_BEGIN_FRAME_ARGS_TYPE_INVALID = 1,
+  BeginFrameArgs_BeginFrameArgsType_BEGIN_FRAME_ARGS_TYPE_NORMAL = 2,
+  BeginFrameArgs_BeginFrameArgsType_BEGIN_FRAME_ARGS_TYPE_MISSED = 3,
+};
+enum BeginImplFrameArgs_State : int {
+  BeginImplFrameArgs_State_BEGIN_FRAME_FINISHED = 0,
+  BeginImplFrameArgs_State_BEGIN_FRAME_USING = 1,
+};
+enum ChromeCompositorStateMachine_MinorState_TreePriority : int {
+  ChromeCompositorStateMachine_MinorState_TreePriority_TREE_PRIORITY_UNSPECIFIED = 0,
+  ChromeCompositorStateMachine_MinorState_TreePriority_TREE_PRIORITY_SAME_PRIORITY_FOR_BOTH_TREES = 1,
+  ChromeCompositorStateMachine_MinorState_TreePriority_TREE_PRIORITY_SMOOTHNESS_TAKES_PRIORITY = 2,
+  ChromeCompositorStateMachine_MinorState_TreePriority_TREE_PRIORITY_NEW_CONTENT_TAKES_PRIORITY = 3,
+};
+enum ChromeCompositorStateMachine_MinorState_ScrollHandlerState : int {
+  ChromeCompositorStateMachine_MinorState_ScrollHandlerState_SCROLL_HANDLER_UNSPECIFIED = 0,
+  ChromeCompositorStateMachine_MinorState_ScrollHandlerState_SCROLL_AFFECTS_SCROLL_HANDLER = 1,
+  ChromeCompositorStateMachine_MinorState_ScrollHandlerState_SCROLL_DOES_NOT_AFFECT_SCROLL_HANDLER = 2,
+};
+enum ChromeCompositorStateMachine_MajorState_BeginImplFrameState : int {
+  ChromeCompositorStateMachine_MajorState_BeginImplFrameState_BEGIN_IMPL_FRAME_UNSPECIFIED = 0,
+  ChromeCompositorStateMachine_MajorState_BeginImplFrameState_BEGIN_IMPL_FRAME_IDLE = 1,
+  ChromeCompositorStateMachine_MajorState_BeginImplFrameState_BEGIN_IMPL_FRAME_INSIDE_BEGIN_FRAME = 2,
+  ChromeCompositorStateMachine_MajorState_BeginImplFrameState_BEGIN_IMPL_FRAME_INSIDE_DEADLINE = 3,
+};
+enum ChromeCompositorStateMachine_MajorState_BeginMainFrameState : int {
+  ChromeCompositorStateMachine_MajorState_BeginMainFrameState_BEGIN_MAIN_FRAME_UNSPECIFIED = 0,
+  ChromeCompositorStateMachine_MajorState_BeginMainFrameState_BEGIN_MAIN_FRAME_IDLE = 1,
+  ChromeCompositorStateMachine_MajorState_BeginMainFrameState_BEGIN_MAIN_FRAME_SENT = 2,
+  ChromeCompositorStateMachine_MajorState_BeginMainFrameState_BEGIN_MAIN_FRAME_READY_TO_COMMIT = 3,
+};
+enum ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState : int {
+  ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_UNSPECIFIED = 0,
+  ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_NONE = 1,
+  ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_ACTIVE = 2,
+  ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_CREATING = 3,
+  ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_WAITING_FOR_FIRST_COMMIT = 4,
+  ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_WAITING_FOR_FIRST_ACTIVATION = 5,
+};
+enum ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState : int {
+  ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState_FORCED_REDRAW_UNSPECIFIED = 0,
+  ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState_FORCED_REDRAW_IDLE = 1,
+  ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState_FORCED_REDRAW_WAITING_FOR_COMMIT = 2,
+  ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState_FORCED_REDRAW_WAITING_FOR_ACTIVATION = 3,
+  ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState_FORCED_REDRAW_WAITING_FOR_DRAW = 4,
+};
+enum ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode : int {
+  ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_UNSPECIFIED = 0,
+  ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_NONE = 1,
+  ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_IMMEDIATE = 2,
+  ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_REGULAR = 3,
+  ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_LATE = 4,
+  ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_BLOCKED = 5,
+};
+
+class PERFETTO_EXPORT CompositorTimingHistory : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kBeginMainFrameQueueCriticalEstimateDeltaUsFieldNumber = 1,
+    kBeginMainFrameQueueNotCriticalEstimateDeltaUsFieldNumber = 2,
+    kBeginMainFrameStartToReadyToCommitEstimateDeltaUsFieldNumber = 3,
+    kCommitToReadyToActivateEstimateDeltaUsFieldNumber = 4,
+    kPrepareTilesEstimateDeltaUsFieldNumber = 5,
+    kActivateEstimateDeltaUsFieldNumber = 6,
+    kDrawEstimateDeltaUsFieldNumber = 7,
+  };
+
+  CompositorTimingHistory();
+  ~CompositorTimingHistory() override;
+  CompositorTimingHistory(CompositorTimingHistory&&) noexcept;
+  CompositorTimingHistory& operator=(CompositorTimingHistory&&);
+  CompositorTimingHistory(const CompositorTimingHistory&);
+  CompositorTimingHistory& operator=(const CompositorTimingHistory&);
+  bool operator==(const CompositorTimingHistory&) const;
+  bool operator!=(const CompositorTimingHistory& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_begin_main_frame_queue_critical_estimate_delta_us() const { return _has_field_[1]; }
+  int64_t begin_main_frame_queue_critical_estimate_delta_us() const { return begin_main_frame_queue_critical_estimate_delta_us_; }
+  void set_begin_main_frame_queue_critical_estimate_delta_us(int64_t value) { begin_main_frame_queue_critical_estimate_delta_us_ = value; _has_field_.set(1); }
+
+  bool has_begin_main_frame_queue_not_critical_estimate_delta_us() const { return _has_field_[2]; }
+  int64_t begin_main_frame_queue_not_critical_estimate_delta_us() const { return begin_main_frame_queue_not_critical_estimate_delta_us_; }
+  void set_begin_main_frame_queue_not_critical_estimate_delta_us(int64_t value) { begin_main_frame_queue_not_critical_estimate_delta_us_ = value; _has_field_.set(2); }
+
+  bool has_begin_main_frame_start_to_ready_to_commit_estimate_delta_us() const { return _has_field_[3]; }
+  int64_t begin_main_frame_start_to_ready_to_commit_estimate_delta_us() const { return begin_main_frame_start_to_ready_to_commit_estimate_delta_us_; }
+  void set_begin_main_frame_start_to_ready_to_commit_estimate_delta_us(int64_t value) { begin_main_frame_start_to_ready_to_commit_estimate_delta_us_ = value; _has_field_.set(3); }
+
+  bool has_commit_to_ready_to_activate_estimate_delta_us() const { return _has_field_[4]; }
+  int64_t commit_to_ready_to_activate_estimate_delta_us() const { return commit_to_ready_to_activate_estimate_delta_us_; }
+  void set_commit_to_ready_to_activate_estimate_delta_us(int64_t value) { commit_to_ready_to_activate_estimate_delta_us_ = value; _has_field_.set(4); }
+
+  bool has_prepare_tiles_estimate_delta_us() const { return _has_field_[5]; }
+  int64_t prepare_tiles_estimate_delta_us() const { return prepare_tiles_estimate_delta_us_; }
+  void set_prepare_tiles_estimate_delta_us(int64_t value) { prepare_tiles_estimate_delta_us_ = value; _has_field_.set(5); }
+
+  bool has_activate_estimate_delta_us() const { return _has_field_[6]; }
+  int64_t activate_estimate_delta_us() const { return activate_estimate_delta_us_; }
+  void set_activate_estimate_delta_us(int64_t value) { activate_estimate_delta_us_ = value; _has_field_.set(6); }
+
+  bool has_draw_estimate_delta_us() const { return _has_field_[7]; }
+  int64_t draw_estimate_delta_us() const { return draw_estimate_delta_us_; }
+  void set_draw_estimate_delta_us(int64_t value) { draw_estimate_delta_us_ = value; _has_field_.set(7); }
+
+ private:
+  int64_t begin_main_frame_queue_critical_estimate_delta_us_{};
+  int64_t begin_main_frame_queue_not_critical_estimate_delta_us_{};
+  int64_t begin_main_frame_start_to_ready_to_commit_estimate_delta_us_{};
+  int64_t commit_to_ready_to_activate_estimate_delta_us_{};
+  int64_t prepare_tiles_estimate_delta_us_{};
+  int64_t activate_estimate_delta_us_{};
+  int64_t draw_estimate_delta_us_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<8> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT BeginFrameSourceState : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kSourceIdFieldNumber = 1,
+    kPausedFieldNumber = 2,
+    kNumObserversFieldNumber = 3,
+    kLastBeginFrameArgsFieldNumber = 4,
+  };
+
+  BeginFrameSourceState();
+  ~BeginFrameSourceState() override;
+  BeginFrameSourceState(BeginFrameSourceState&&) noexcept;
+  BeginFrameSourceState& operator=(BeginFrameSourceState&&);
+  BeginFrameSourceState(const BeginFrameSourceState&);
+  BeginFrameSourceState& operator=(const BeginFrameSourceState&);
+  bool operator==(const BeginFrameSourceState&) const;
+  bool operator!=(const BeginFrameSourceState& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_source_id() const { return _has_field_[1]; }
+  uint32_t source_id() const { return source_id_; }
+  void set_source_id(uint32_t value) { source_id_ = value; _has_field_.set(1); }
+
+  bool has_paused() const { return _has_field_[2]; }
+  bool paused() const { return paused_; }
+  void set_paused(bool value) { paused_ = value; _has_field_.set(2); }
+
+  bool has_num_observers() const { return _has_field_[3]; }
+  uint32_t num_observers() const { return num_observers_; }
+  void set_num_observers(uint32_t value) { num_observers_ = value; _has_field_.set(3); }
+
+  bool has_last_begin_frame_args() const { return _has_field_[4]; }
+  const BeginFrameArgs& last_begin_frame_args() const { return *last_begin_frame_args_; }
+  BeginFrameArgs* mutable_last_begin_frame_args() { _has_field_.set(4); return last_begin_frame_args_.get(); }
+
+ private:
+  uint32_t source_id_{};
+  bool paused_{};
+  uint32_t num_observers_{};
+  ::protozero::CopyablePtr<BeginFrameArgs> last_begin_frame_args_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<5> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT BeginFrameArgs : public ::protozero::CppMessageObj {
+ public:
+  using BeginFrameArgsType = BeginFrameArgs_BeginFrameArgsType;
+  static constexpr auto BEGIN_FRAME_ARGS_TYPE_UNSPECIFIED = BeginFrameArgs_BeginFrameArgsType_BEGIN_FRAME_ARGS_TYPE_UNSPECIFIED;
+  static constexpr auto BEGIN_FRAME_ARGS_TYPE_INVALID = BeginFrameArgs_BeginFrameArgsType_BEGIN_FRAME_ARGS_TYPE_INVALID;
+  static constexpr auto BEGIN_FRAME_ARGS_TYPE_NORMAL = BeginFrameArgs_BeginFrameArgsType_BEGIN_FRAME_ARGS_TYPE_NORMAL;
+  static constexpr auto BEGIN_FRAME_ARGS_TYPE_MISSED = BeginFrameArgs_BeginFrameArgsType_BEGIN_FRAME_ARGS_TYPE_MISSED;
+  static constexpr auto BeginFrameArgsType_MIN = BeginFrameArgs_BeginFrameArgsType_BEGIN_FRAME_ARGS_TYPE_UNSPECIFIED;
+  static constexpr auto BeginFrameArgsType_MAX = BeginFrameArgs_BeginFrameArgsType_BEGIN_FRAME_ARGS_TYPE_MISSED;
+  enum FieldNumbers {
+    kTypeFieldNumber = 1,
+    kSourceIdFieldNumber = 2,
+    kSequenceNumberFieldNumber = 3,
+    kFrameTimeUsFieldNumber = 4,
+    kDeadlineUsFieldNumber = 5,
+    kIntervalDeltaUsFieldNumber = 6,
+    kOnCriticalPathFieldNumber = 7,
+    kAnimateOnlyFieldNumber = 8,
+    kSourceLocationIidFieldNumber = 9,
+    kSourceLocationFieldNumber = 10,
+  };
+
+  BeginFrameArgs();
+  ~BeginFrameArgs() override;
+  BeginFrameArgs(BeginFrameArgs&&) noexcept;
+  BeginFrameArgs& operator=(BeginFrameArgs&&);
+  BeginFrameArgs(const BeginFrameArgs&);
+  BeginFrameArgs& operator=(const BeginFrameArgs&);
+  bool operator==(const BeginFrameArgs&) const;
+  bool operator!=(const BeginFrameArgs& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_type() const { return _has_field_[1]; }
+  BeginFrameArgs_BeginFrameArgsType type() const { return type_; }
+  void set_type(BeginFrameArgs_BeginFrameArgsType value) { type_ = value; _has_field_.set(1); }
+
+  bool has_source_id() const { return _has_field_[2]; }
+  uint64_t source_id() const { return source_id_; }
+  void set_source_id(uint64_t value) { source_id_ = value; _has_field_.set(2); }
+
+  bool has_sequence_number() const { return _has_field_[3]; }
+  uint64_t sequence_number() const { return sequence_number_; }
+  void set_sequence_number(uint64_t value) { sequence_number_ = value; _has_field_.set(3); }
+
+  bool has_frame_time_us() const { return _has_field_[4]; }
+  int64_t frame_time_us() const { return frame_time_us_; }
+  void set_frame_time_us(int64_t value) { frame_time_us_ = value; _has_field_.set(4); }
+
+  bool has_deadline_us() const { return _has_field_[5]; }
+  int64_t deadline_us() const { return deadline_us_; }
+  void set_deadline_us(int64_t value) { deadline_us_ = value; _has_field_.set(5); }
+
+  bool has_interval_delta_us() const { return _has_field_[6]; }
+  int64_t interval_delta_us() const { return interval_delta_us_; }
+  void set_interval_delta_us(int64_t value) { interval_delta_us_ = value; _has_field_.set(6); }
+
+  bool has_on_critical_path() const { return _has_field_[7]; }
+  bool on_critical_path() const { return on_critical_path_; }
+  void set_on_critical_path(bool value) { on_critical_path_ = value; _has_field_.set(7); }
+
+  bool has_animate_only() const { return _has_field_[8]; }
+  bool animate_only() const { return animate_only_; }
+  void set_animate_only(bool value) { animate_only_ = value; _has_field_.set(8); }
+
+  bool has_source_location_iid() const { return _has_field_[9]; }
+  uint64_t source_location_iid() const { return source_location_iid_; }
+  void set_source_location_iid(uint64_t value) { source_location_iid_ = value; _has_field_.set(9); }
+
+  bool has_source_location() const { return _has_field_[10]; }
+  const SourceLocation& source_location() const { return *source_location_; }
+  SourceLocation* mutable_source_location() { _has_field_.set(10); return source_location_.get(); }
+
+ private:
+  BeginFrameArgs_BeginFrameArgsType type_{};
+  uint64_t source_id_{};
+  uint64_t sequence_number_{};
+  int64_t frame_time_us_{};
+  int64_t deadline_us_{};
+  int64_t interval_delta_us_{};
+  bool on_critical_path_{};
+  bool animate_only_{};
+  uint64_t source_location_iid_{};
+  ::protozero::CopyablePtr<SourceLocation> source_location_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<11> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT BeginFrameObserverState : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kDroppedBeginFrameArgsFieldNumber = 1,
+    kLastBeginFrameArgsFieldNumber = 2,
+  };
+
+  BeginFrameObserverState();
+  ~BeginFrameObserverState() override;
+  BeginFrameObserverState(BeginFrameObserverState&&) noexcept;
+  BeginFrameObserverState& operator=(BeginFrameObserverState&&);
+  BeginFrameObserverState(const BeginFrameObserverState&);
+  BeginFrameObserverState& operator=(const BeginFrameObserverState&);
+  bool operator==(const BeginFrameObserverState&) const;
+  bool operator!=(const BeginFrameObserverState& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_dropped_begin_frame_args() const { return _has_field_[1]; }
+  int64_t dropped_begin_frame_args() const { return dropped_begin_frame_args_; }
+  void set_dropped_begin_frame_args(int64_t value) { dropped_begin_frame_args_ = value; _has_field_.set(1); }
+
+  bool has_last_begin_frame_args() const { return _has_field_[2]; }
+  const BeginFrameArgs& last_begin_frame_args() const { return *last_begin_frame_args_; }
+  BeginFrameArgs* mutable_last_begin_frame_args() { _has_field_.set(2); return last_begin_frame_args_.get(); }
+
+ private:
+  int64_t dropped_begin_frame_args_{};
+  ::protozero::CopyablePtr<BeginFrameArgs> last_begin_frame_args_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT BeginImplFrameArgs : public ::protozero::CppMessageObj {
+ public:
+  using TimestampsInUs = BeginImplFrameArgs_TimestampsInUs;
+  using State = BeginImplFrameArgs_State;
+  static constexpr auto BEGIN_FRAME_FINISHED = BeginImplFrameArgs_State_BEGIN_FRAME_FINISHED;
+  static constexpr auto BEGIN_FRAME_USING = BeginImplFrameArgs_State_BEGIN_FRAME_USING;
+  static constexpr auto State_MIN = BeginImplFrameArgs_State_BEGIN_FRAME_FINISHED;
+  static constexpr auto State_MAX = BeginImplFrameArgs_State_BEGIN_FRAME_USING;
+  enum FieldNumbers {
+    kUpdatedAtUsFieldNumber = 1,
+    kFinishedAtUsFieldNumber = 2,
+    kStateFieldNumber = 3,
+    kCurrentArgsFieldNumber = 4,
+    kLastArgsFieldNumber = 5,
+    kTimestampsInUsFieldNumber = 6,
+  };
+
+  BeginImplFrameArgs();
+  ~BeginImplFrameArgs() override;
+  BeginImplFrameArgs(BeginImplFrameArgs&&) noexcept;
+  BeginImplFrameArgs& operator=(BeginImplFrameArgs&&);
+  BeginImplFrameArgs(const BeginImplFrameArgs&);
+  BeginImplFrameArgs& operator=(const BeginImplFrameArgs&);
+  bool operator==(const BeginImplFrameArgs&) const;
+  bool operator!=(const BeginImplFrameArgs& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_updated_at_us() const { return _has_field_[1]; }
+  int64_t updated_at_us() const { return updated_at_us_; }
+  void set_updated_at_us(int64_t value) { updated_at_us_ = value; _has_field_.set(1); }
+
+  bool has_finished_at_us() const { return _has_field_[2]; }
+  int64_t finished_at_us() const { return finished_at_us_; }
+  void set_finished_at_us(int64_t value) { finished_at_us_ = value; _has_field_.set(2); }
+
+  bool has_state() const { return _has_field_[3]; }
+  BeginImplFrameArgs_State state() const { return state_; }
+  void set_state(BeginImplFrameArgs_State value) { state_ = value; _has_field_.set(3); }
+
+  bool has_current_args() const { return _has_field_[4]; }
+  const BeginFrameArgs& current_args() const { return *current_args_; }
+  BeginFrameArgs* mutable_current_args() { _has_field_.set(4); return current_args_.get(); }
+
+  bool has_last_args() const { return _has_field_[5]; }
+  const BeginFrameArgs& last_args() const { return *last_args_; }
+  BeginFrameArgs* mutable_last_args() { _has_field_.set(5); return last_args_.get(); }
+
+  bool has_timestamps_in_us() const { return _has_field_[6]; }
+  const BeginImplFrameArgs_TimestampsInUs& timestamps_in_us() const { return *timestamps_in_us_; }
+  BeginImplFrameArgs_TimestampsInUs* mutable_timestamps_in_us() { _has_field_.set(6); return timestamps_in_us_.get(); }
+
+ private:
+  int64_t updated_at_us_{};
+  int64_t finished_at_us_{};
+  BeginImplFrameArgs_State state_{};
+  ::protozero::CopyablePtr<BeginFrameArgs> current_args_;
+  ::protozero::CopyablePtr<BeginFrameArgs> last_args_;
+  ::protozero::CopyablePtr<BeginImplFrameArgs_TimestampsInUs> timestamps_in_us_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<7> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT BeginImplFrameArgs_TimestampsInUs : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kIntervalDeltaFieldNumber = 1,
+    kNowToDeadlineDeltaFieldNumber = 2,
+    kFrameTimeToNowDeltaFieldNumber = 3,
+    kFrameTimeToDeadlineDeltaFieldNumber = 4,
+    kNowFieldNumber = 5,
+    kFrameTimeFieldNumber = 6,
+    kDeadlineFieldNumber = 7,
+  };
+
+  BeginImplFrameArgs_TimestampsInUs();
+  ~BeginImplFrameArgs_TimestampsInUs() override;
+  BeginImplFrameArgs_TimestampsInUs(BeginImplFrameArgs_TimestampsInUs&&) noexcept;
+  BeginImplFrameArgs_TimestampsInUs& operator=(BeginImplFrameArgs_TimestampsInUs&&);
+  BeginImplFrameArgs_TimestampsInUs(const BeginImplFrameArgs_TimestampsInUs&);
+  BeginImplFrameArgs_TimestampsInUs& operator=(const BeginImplFrameArgs_TimestampsInUs&);
+  bool operator==(const BeginImplFrameArgs_TimestampsInUs&) const;
+  bool operator!=(const BeginImplFrameArgs_TimestampsInUs& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_interval_delta() const { return _has_field_[1]; }
+  int64_t interval_delta() const { return interval_delta_; }
+  void set_interval_delta(int64_t value) { interval_delta_ = value; _has_field_.set(1); }
+
+  bool has_now_to_deadline_delta() const { return _has_field_[2]; }
+  int64_t now_to_deadline_delta() const { return now_to_deadline_delta_; }
+  void set_now_to_deadline_delta(int64_t value) { now_to_deadline_delta_ = value; _has_field_.set(2); }
+
+  bool has_frame_time_to_now_delta() const { return _has_field_[3]; }
+  int64_t frame_time_to_now_delta() const { return frame_time_to_now_delta_; }
+  void set_frame_time_to_now_delta(int64_t value) { frame_time_to_now_delta_ = value; _has_field_.set(3); }
+
+  bool has_frame_time_to_deadline_delta() const { return _has_field_[4]; }
+  int64_t frame_time_to_deadline_delta() const { return frame_time_to_deadline_delta_; }
+  void set_frame_time_to_deadline_delta(int64_t value) { frame_time_to_deadline_delta_ = value; _has_field_.set(4); }
+
+  bool has_now() const { return _has_field_[5]; }
+  int64_t now() const { return now_; }
+  void set_now(int64_t value) { now_ = value; _has_field_.set(5); }
+
+  bool has_frame_time() const { return _has_field_[6]; }
+  int64_t frame_time() const { return frame_time_; }
+  void set_frame_time(int64_t value) { frame_time_ = value; _has_field_.set(6); }
+
+  bool has_deadline() const { return _has_field_[7]; }
+  int64_t deadline() const { return deadline_; }
+  void set_deadline(int64_t value) { deadline_ = value; _has_field_.set(7); }
+
+ private:
+  int64_t interval_delta_{};
+  int64_t now_to_deadline_delta_{};
+  int64_t frame_time_to_now_delta_{};
+  int64_t frame_time_to_deadline_delta_{};
+  int64_t now_{};
+  int64_t frame_time_{};
+  int64_t deadline_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<8> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT ChromeCompositorStateMachine : public ::protozero::CppMessageObj {
+ public:
+  using MajorState = ChromeCompositorStateMachine_MajorState;
+  using MinorState = ChromeCompositorStateMachine_MinorState;
+  enum FieldNumbers {
+    kMajorStateFieldNumber = 1,
+    kMinorStateFieldNumber = 2,
+  };
+
+  ChromeCompositorStateMachine();
+  ~ChromeCompositorStateMachine() override;
+  ChromeCompositorStateMachine(ChromeCompositorStateMachine&&) noexcept;
+  ChromeCompositorStateMachine& operator=(ChromeCompositorStateMachine&&);
+  ChromeCompositorStateMachine(const ChromeCompositorStateMachine&);
+  ChromeCompositorStateMachine& operator=(const ChromeCompositorStateMachine&);
+  bool operator==(const ChromeCompositorStateMachine&) const;
+  bool operator!=(const ChromeCompositorStateMachine& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_major_state() const { return _has_field_[1]; }
+  const ChromeCompositorStateMachine_MajorState& major_state() const { return *major_state_; }
+  ChromeCompositorStateMachine_MajorState* mutable_major_state() { _has_field_.set(1); return major_state_.get(); }
+
+  bool has_minor_state() const { return _has_field_[2]; }
+  const ChromeCompositorStateMachine_MinorState& minor_state() const { return *minor_state_; }
+  ChromeCompositorStateMachine_MinorState* mutable_minor_state() { _has_field_.set(2); return minor_state_.get(); }
+
+ private:
+  ::protozero::CopyablePtr<ChromeCompositorStateMachine_MajorState> major_state_;
+  ::protozero::CopyablePtr<ChromeCompositorStateMachine_MinorState> minor_state_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT ChromeCompositorStateMachine_MinorState : public ::protozero::CppMessageObj {
+ public:
+  using TreePriority = ChromeCompositorStateMachine_MinorState_TreePriority;
+  static constexpr auto TREE_PRIORITY_UNSPECIFIED = ChromeCompositorStateMachine_MinorState_TreePriority_TREE_PRIORITY_UNSPECIFIED;
+  static constexpr auto TREE_PRIORITY_SAME_PRIORITY_FOR_BOTH_TREES = ChromeCompositorStateMachine_MinorState_TreePriority_TREE_PRIORITY_SAME_PRIORITY_FOR_BOTH_TREES;
+  static constexpr auto TREE_PRIORITY_SMOOTHNESS_TAKES_PRIORITY = ChromeCompositorStateMachine_MinorState_TreePriority_TREE_PRIORITY_SMOOTHNESS_TAKES_PRIORITY;
+  static constexpr auto TREE_PRIORITY_NEW_CONTENT_TAKES_PRIORITY = ChromeCompositorStateMachine_MinorState_TreePriority_TREE_PRIORITY_NEW_CONTENT_TAKES_PRIORITY;
+  static constexpr auto TreePriority_MIN = ChromeCompositorStateMachine_MinorState_TreePriority_TREE_PRIORITY_UNSPECIFIED;
+  static constexpr auto TreePriority_MAX = ChromeCompositorStateMachine_MinorState_TreePriority_TREE_PRIORITY_NEW_CONTENT_TAKES_PRIORITY;
+  using ScrollHandlerState = ChromeCompositorStateMachine_MinorState_ScrollHandlerState;
+  static constexpr auto SCROLL_HANDLER_UNSPECIFIED = ChromeCompositorStateMachine_MinorState_ScrollHandlerState_SCROLL_HANDLER_UNSPECIFIED;
+  static constexpr auto SCROLL_AFFECTS_SCROLL_HANDLER = ChromeCompositorStateMachine_MinorState_ScrollHandlerState_SCROLL_AFFECTS_SCROLL_HANDLER;
+  static constexpr auto SCROLL_DOES_NOT_AFFECT_SCROLL_HANDLER = ChromeCompositorStateMachine_MinorState_ScrollHandlerState_SCROLL_DOES_NOT_AFFECT_SCROLL_HANDLER;
+  static constexpr auto ScrollHandlerState_MIN = ChromeCompositorStateMachine_MinorState_ScrollHandlerState_SCROLL_HANDLER_UNSPECIFIED;
+  static constexpr auto ScrollHandlerState_MAX = ChromeCompositorStateMachine_MinorState_ScrollHandlerState_SCROLL_DOES_NOT_AFFECT_SCROLL_HANDLER;
+  enum FieldNumbers {
+    kCommitCountFieldNumber = 1,
+    kCurrentFrameNumberFieldNumber = 2,
+    kLastFrameNumberSubmitPerformedFieldNumber = 3,
+    kLastFrameNumberDrawPerformedFieldNumber = 4,
+    kLastFrameNumberBeginMainFrameSentFieldNumber = 5,
+    kDidDrawFieldNumber = 6,
+    kDidSendBeginMainFrameForCurrentFrameFieldNumber = 7,
+    kDidNotifyBeginMainFrameNotExpectedUntilFieldNumber = 8,
+    kDidNotifyBeginMainFrameNotExpectedSoonFieldNumber = 9,
+    kWantsBeginMainFrameNotExpectedFieldNumber = 10,
+    kDidCommitDuringFrameFieldNumber = 11,
+    kDidInvalidateLayerTreeFrameSinkFieldNumber = 12,
+    kDidPerformImplSideInvalidaionFieldNumber = 13,
+    kDidPrepareTilesFieldNumber = 14,
+    kConsecutiveCheckerboardAnimationsFieldNumber = 15,
+    kPendingSubmitFramesFieldNumber = 16,
+    kSubmitFramesWithCurrentLayerTreeFrameSinkFieldNumber = 17,
+    kNeedsRedrawFieldNumber = 18,
+    kNeedsPrepareTilesFieldNumber = 19,
+    kNeedsBeginMainFrameFieldNumber = 20,
+    kNeedsOneBeginImplFrameFieldNumber = 21,
+    kVisibleFieldNumber = 22,
+    kBeginFrameSourcePausedFieldNumber = 23,
+    kCanDrawFieldNumber = 24,
+    kResourcelessDrawFieldNumber = 25,
+    kHasPendingTreeFieldNumber = 26,
+    kPendingTreeIsReadyForActivationFieldNumber = 27,
+    kActiveTreeNeedsFirstDrawFieldNumber = 28,
+    kActiveTreeIsReadyToDrawFieldNumber = 29,
+    kDidCreateAndInitializeFirstLayerTreeFrameSinkFieldNumber = 30,
+    kTreePriorityFieldNumber = 31,
+    kScrollHandlerStateFieldNumber = 32,
+    kCriticalBeginMainFrameToActivateIsFastFieldNumber = 33,
+    kMainThreadMissedLastDeadlineFieldNumber = 34,
+    kSkipNextBeginMainFrameToReduceLatencyFieldNumber = 35,
+    kVideoNeedsBeginFramesFieldNumber = 36,
+    kDeferBeginMainFrameFieldNumber = 37,
+    kLastCommitHadNoUpdatesFieldNumber = 38,
+    kDidDrawInLastFrameFieldNumber = 39,
+    kDidSubmitInLastFrameFieldNumber = 40,
+    kNeedsImplSideInvalidationFieldNumber = 41,
+    kCurrentPendingTreeIsImplSideFieldNumber = 42,
+    kPreviousPendingTreeWasImplSideFieldNumber = 43,
+    kProcessingAnimationWorkletsForActiveTreeFieldNumber = 44,
+    kProcessingAnimationWorkletsForPendingTreeFieldNumber = 45,
+    kProcessingPaintWorkletsForPendingTreeFieldNumber = 46,
+  };
+
+  ChromeCompositorStateMachine_MinorState();
+  ~ChromeCompositorStateMachine_MinorState() override;
+  ChromeCompositorStateMachine_MinorState(ChromeCompositorStateMachine_MinorState&&) noexcept;
+  ChromeCompositorStateMachine_MinorState& operator=(ChromeCompositorStateMachine_MinorState&&);
+  ChromeCompositorStateMachine_MinorState(const ChromeCompositorStateMachine_MinorState&);
+  ChromeCompositorStateMachine_MinorState& operator=(const ChromeCompositorStateMachine_MinorState&);
+  bool operator==(const ChromeCompositorStateMachine_MinorState&) const;
+  bool operator!=(const ChromeCompositorStateMachine_MinorState& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_commit_count() const { return _has_field_[1]; }
+  int32_t commit_count() const { return commit_count_; }
+  void set_commit_count(int32_t value) { commit_count_ = value; _has_field_.set(1); }
+
+  bool has_current_frame_number() const { return _has_field_[2]; }
+  int32_t current_frame_number() const { return current_frame_number_; }
+  void set_current_frame_number(int32_t value) { current_frame_number_ = value; _has_field_.set(2); }
+
+  bool has_last_frame_number_submit_performed() const { return _has_field_[3]; }
+  int32_t last_frame_number_submit_performed() const { return last_frame_number_submit_performed_; }
+  void set_last_frame_number_submit_performed(int32_t value) { last_frame_number_submit_performed_ = value; _has_field_.set(3); }
+
+  bool has_last_frame_number_draw_performed() const { return _has_field_[4]; }
+  int32_t last_frame_number_draw_performed() const { return last_frame_number_draw_performed_; }
+  void set_last_frame_number_draw_performed(int32_t value) { last_frame_number_draw_performed_ = value; _has_field_.set(4); }
+
+  bool has_last_frame_number_begin_main_frame_sent() const { return _has_field_[5]; }
+  int32_t last_frame_number_begin_main_frame_sent() const { return last_frame_number_begin_main_frame_sent_; }
+  void set_last_frame_number_begin_main_frame_sent(int32_t value) { last_frame_number_begin_main_frame_sent_ = value; _has_field_.set(5); }
+
+  bool has_did_draw() const { return _has_field_[6]; }
+  bool did_draw() const { return did_draw_; }
+  void set_did_draw(bool value) { did_draw_ = value; _has_field_.set(6); }
+
+  bool has_did_send_begin_main_frame_for_current_frame() const { return _has_field_[7]; }
+  bool did_send_begin_main_frame_for_current_frame() const { return did_send_begin_main_frame_for_current_frame_; }
+  void set_did_send_begin_main_frame_for_current_frame(bool value) { did_send_begin_main_frame_for_current_frame_ = value; _has_field_.set(7); }
+
+  bool has_did_notify_begin_main_frame_not_expected_until() const { return _has_field_[8]; }
+  bool did_notify_begin_main_frame_not_expected_until() const { return did_notify_begin_main_frame_not_expected_until_; }
+  void set_did_notify_begin_main_frame_not_expected_until(bool value) { did_notify_begin_main_frame_not_expected_until_ = value; _has_field_.set(8); }
+
+  bool has_did_notify_begin_main_frame_not_expected_soon() const { return _has_field_[9]; }
+  bool did_notify_begin_main_frame_not_expected_soon() const { return did_notify_begin_main_frame_not_expected_soon_; }
+  void set_did_notify_begin_main_frame_not_expected_soon(bool value) { did_notify_begin_main_frame_not_expected_soon_ = value; _has_field_.set(9); }
+
+  bool has_wants_begin_main_frame_not_expected() const { return _has_field_[10]; }
+  bool wants_begin_main_frame_not_expected() const { return wants_begin_main_frame_not_expected_; }
+  void set_wants_begin_main_frame_not_expected(bool value) { wants_begin_main_frame_not_expected_ = value; _has_field_.set(10); }
+
+  bool has_did_commit_during_frame() const { return _has_field_[11]; }
+  bool did_commit_during_frame() const { return did_commit_during_frame_; }
+  void set_did_commit_during_frame(bool value) { did_commit_during_frame_ = value; _has_field_.set(11); }
+
+  bool has_did_invalidate_layer_tree_frame_sink() const { return _has_field_[12]; }
+  bool did_invalidate_layer_tree_frame_sink() const { return did_invalidate_layer_tree_frame_sink_; }
+  void set_did_invalidate_layer_tree_frame_sink(bool value) { did_invalidate_layer_tree_frame_sink_ = value; _has_field_.set(12); }
+
+  bool has_did_perform_impl_side_invalidaion() const { return _has_field_[13]; }
+  bool did_perform_impl_side_invalidaion() const { return did_perform_impl_side_invalidaion_; }
+  void set_did_perform_impl_side_invalidaion(bool value) { did_perform_impl_side_invalidaion_ = value; _has_field_.set(13); }
+
+  bool has_did_prepare_tiles() const { return _has_field_[14]; }
+  bool did_prepare_tiles() const { return did_prepare_tiles_; }
+  void set_did_prepare_tiles(bool value) { did_prepare_tiles_ = value; _has_field_.set(14); }
+
+  bool has_consecutive_checkerboard_animations() const { return _has_field_[15]; }
+  int32_t consecutive_checkerboard_animations() const { return consecutive_checkerboard_animations_; }
+  void set_consecutive_checkerboard_animations(int32_t value) { consecutive_checkerboard_animations_ = value; _has_field_.set(15); }
+
+  bool has_pending_submit_frames() const { return _has_field_[16]; }
+  int32_t pending_submit_frames() const { return pending_submit_frames_; }
+  void set_pending_submit_frames(int32_t value) { pending_submit_frames_ = value; _has_field_.set(16); }
+
+  bool has_submit_frames_with_current_layer_tree_frame_sink() const { return _has_field_[17]; }
+  int32_t submit_frames_with_current_layer_tree_frame_sink() const { return submit_frames_with_current_layer_tree_frame_sink_; }
+  void set_submit_frames_with_current_layer_tree_frame_sink(int32_t value) { submit_frames_with_current_layer_tree_frame_sink_ = value; _has_field_.set(17); }
+
+  bool has_needs_redraw() const { return _has_field_[18]; }
+  bool needs_redraw() const { return needs_redraw_; }
+  void set_needs_redraw(bool value) { needs_redraw_ = value; _has_field_.set(18); }
+
+  bool has_needs_prepare_tiles() const { return _has_field_[19]; }
+  bool needs_prepare_tiles() const { return needs_prepare_tiles_; }
+  void set_needs_prepare_tiles(bool value) { needs_prepare_tiles_ = value; _has_field_.set(19); }
+
+  bool has_needs_begin_main_frame() const { return _has_field_[20]; }
+  bool needs_begin_main_frame() const { return needs_begin_main_frame_; }
+  void set_needs_begin_main_frame(bool value) { needs_begin_main_frame_ = value; _has_field_.set(20); }
+
+  bool has_needs_one_begin_impl_frame() const { return _has_field_[21]; }
+  bool needs_one_begin_impl_frame() const { return needs_one_begin_impl_frame_; }
+  void set_needs_one_begin_impl_frame(bool value) { needs_one_begin_impl_frame_ = value; _has_field_.set(21); }
+
+  bool has_visible() const { return _has_field_[22]; }
+  bool visible() const { return visible_; }
+  void set_visible(bool value) { visible_ = value; _has_field_.set(22); }
+
+  bool has_begin_frame_source_paused() const { return _has_field_[23]; }
+  bool begin_frame_source_paused() const { return begin_frame_source_paused_; }
+  void set_begin_frame_source_paused(bool value) { begin_frame_source_paused_ = value; _has_field_.set(23); }
+
+  bool has_can_draw() const { return _has_field_[24]; }
+  bool can_draw() const { return can_draw_; }
+  void set_can_draw(bool value) { can_draw_ = value; _has_field_.set(24); }
+
+  bool has_resourceless_draw() const { return _has_field_[25]; }
+  bool resourceless_draw() const { return resourceless_draw_; }
+  void set_resourceless_draw(bool value) { resourceless_draw_ = value; _has_field_.set(25); }
+
+  bool has_has_pending_tree() const { return _has_field_[26]; }
+  bool has_pending_tree() const { return has_pending_tree_; }
+  void set_has_pending_tree(bool value) { has_pending_tree_ = value; _has_field_.set(26); }
+
+  bool has_pending_tree_is_ready_for_activation() const { return _has_field_[27]; }
+  bool pending_tree_is_ready_for_activation() const { return pending_tree_is_ready_for_activation_; }
+  void set_pending_tree_is_ready_for_activation(bool value) { pending_tree_is_ready_for_activation_ = value; _has_field_.set(27); }
+
+  bool has_active_tree_needs_first_draw() const { return _has_field_[28]; }
+  bool active_tree_needs_first_draw() const { return active_tree_needs_first_draw_; }
+  void set_active_tree_needs_first_draw(bool value) { active_tree_needs_first_draw_ = value; _has_field_.set(28); }
+
+  bool has_active_tree_is_ready_to_draw() const { return _has_field_[29]; }
+  bool active_tree_is_ready_to_draw() const { return active_tree_is_ready_to_draw_; }
+  void set_active_tree_is_ready_to_draw(bool value) { active_tree_is_ready_to_draw_ = value; _has_field_.set(29); }
+
+  bool has_did_create_and_initialize_first_layer_tree_frame_sink() const { return _has_field_[30]; }
+  bool did_create_and_initialize_first_layer_tree_frame_sink() const { return did_create_and_initialize_first_layer_tree_frame_sink_; }
+  void set_did_create_and_initialize_first_layer_tree_frame_sink(bool value) { did_create_and_initialize_first_layer_tree_frame_sink_ = value; _has_field_.set(30); }
+
+  bool has_tree_priority() const { return _has_field_[31]; }
+  ChromeCompositorStateMachine_MinorState_TreePriority tree_priority() const { return tree_priority_; }
+  void set_tree_priority(ChromeCompositorStateMachine_MinorState_TreePriority value) { tree_priority_ = value; _has_field_.set(31); }
+
+  bool has_scroll_handler_state() const { return _has_field_[32]; }
+  ChromeCompositorStateMachine_MinorState_ScrollHandlerState scroll_handler_state() const { return scroll_handler_state_; }
+  void set_scroll_handler_state(ChromeCompositorStateMachine_MinorState_ScrollHandlerState value) { scroll_handler_state_ = value; _has_field_.set(32); }
+
+  bool has_critical_begin_main_frame_to_activate_is_fast() const { return _has_field_[33]; }
+  bool critical_begin_main_frame_to_activate_is_fast() const { return critical_begin_main_frame_to_activate_is_fast_; }
+  void set_critical_begin_main_frame_to_activate_is_fast(bool value) { critical_begin_main_frame_to_activate_is_fast_ = value; _has_field_.set(33); }
+
+  bool has_main_thread_missed_last_deadline() const { return _has_field_[34]; }
+  bool main_thread_missed_last_deadline() const { return main_thread_missed_last_deadline_; }
+  void set_main_thread_missed_last_deadline(bool value) { main_thread_missed_last_deadline_ = value; _has_field_.set(34); }
+
+  bool has_skip_next_begin_main_frame_to_reduce_latency() const { return _has_field_[35]; }
+  bool skip_next_begin_main_frame_to_reduce_latency() const { return skip_next_begin_main_frame_to_reduce_latency_; }
+  void set_skip_next_begin_main_frame_to_reduce_latency(bool value) { skip_next_begin_main_frame_to_reduce_latency_ = value; _has_field_.set(35); }
+
+  bool has_video_needs_begin_frames() const { return _has_field_[36]; }
+  bool video_needs_begin_frames() const { return video_needs_begin_frames_; }
+  void set_video_needs_begin_frames(bool value) { video_needs_begin_frames_ = value; _has_field_.set(36); }
+
+  bool has_defer_begin_main_frame() const { return _has_field_[37]; }
+  bool defer_begin_main_frame() const { return defer_begin_main_frame_; }
+  void set_defer_begin_main_frame(bool value) { defer_begin_main_frame_ = value; _has_field_.set(37); }
+
+  bool has_last_commit_had_no_updates() const { return _has_field_[38]; }
+  bool last_commit_had_no_updates() const { return last_commit_had_no_updates_; }
+  void set_last_commit_had_no_updates(bool value) { last_commit_had_no_updates_ = value; _has_field_.set(38); }
+
+  bool has_did_draw_in_last_frame() const { return _has_field_[39]; }
+  bool did_draw_in_last_frame() const { return did_draw_in_last_frame_; }
+  void set_did_draw_in_last_frame(bool value) { did_draw_in_last_frame_ = value; _has_field_.set(39); }
+
+  bool has_did_submit_in_last_frame() const { return _has_field_[40]; }
+  bool did_submit_in_last_frame() const { return did_submit_in_last_frame_; }
+  void set_did_submit_in_last_frame(bool value) { did_submit_in_last_frame_ = value; _has_field_.set(40); }
+
+  bool has_needs_impl_side_invalidation() const { return _has_field_[41]; }
+  bool needs_impl_side_invalidation() const { return needs_impl_side_invalidation_; }
+  void set_needs_impl_side_invalidation(bool value) { needs_impl_side_invalidation_ = value; _has_field_.set(41); }
+
+  bool has_current_pending_tree_is_impl_side() const { return _has_field_[42]; }
+  bool current_pending_tree_is_impl_side() const { return current_pending_tree_is_impl_side_; }
+  void set_current_pending_tree_is_impl_side(bool value) { current_pending_tree_is_impl_side_ = value; _has_field_.set(42); }
+
+  bool has_previous_pending_tree_was_impl_side() const { return _has_field_[43]; }
+  bool previous_pending_tree_was_impl_side() const { return previous_pending_tree_was_impl_side_; }
+  void set_previous_pending_tree_was_impl_side(bool value) { previous_pending_tree_was_impl_side_ = value; _has_field_.set(43); }
+
+  bool has_processing_animation_worklets_for_active_tree() const { return _has_field_[44]; }
+  bool processing_animation_worklets_for_active_tree() const { return processing_animation_worklets_for_active_tree_; }
+  void set_processing_animation_worklets_for_active_tree(bool value) { processing_animation_worklets_for_active_tree_ = value; _has_field_.set(44); }
+
+  bool has_processing_animation_worklets_for_pending_tree() const { return _has_field_[45]; }
+  bool processing_animation_worklets_for_pending_tree() const { return processing_animation_worklets_for_pending_tree_; }
+  void set_processing_animation_worklets_for_pending_tree(bool value) { processing_animation_worklets_for_pending_tree_ = value; _has_field_.set(45); }
+
+  bool has_processing_paint_worklets_for_pending_tree() const { return _has_field_[46]; }
+  bool processing_paint_worklets_for_pending_tree() const { return processing_paint_worklets_for_pending_tree_; }
+  void set_processing_paint_worklets_for_pending_tree(bool value) { processing_paint_worklets_for_pending_tree_ = value; _has_field_.set(46); }
+
+ private:
+  int32_t commit_count_{};
+  int32_t current_frame_number_{};
+  int32_t last_frame_number_submit_performed_{};
+  int32_t last_frame_number_draw_performed_{};
+  int32_t last_frame_number_begin_main_frame_sent_{};
+  bool did_draw_{};
+  bool did_send_begin_main_frame_for_current_frame_{};
+  bool did_notify_begin_main_frame_not_expected_until_{};
+  bool did_notify_begin_main_frame_not_expected_soon_{};
+  bool wants_begin_main_frame_not_expected_{};
+  bool did_commit_during_frame_{};
+  bool did_invalidate_layer_tree_frame_sink_{};
+  bool did_perform_impl_side_invalidaion_{};
+  bool did_prepare_tiles_{};
+  int32_t consecutive_checkerboard_animations_{};
+  int32_t pending_submit_frames_{};
+  int32_t submit_frames_with_current_layer_tree_frame_sink_{};
+  bool needs_redraw_{};
+  bool needs_prepare_tiles_{};
+  bool needs_begin_main_frame_{};
+  bool needs_one_begin_impl_frame_{};
+  bool visible_{};
+  bool begin_frame_source_paused_{};
+  bool can_draw_{};
+  bool resourceless_draw_{};
+  bool has_pending_tree_{};
+  bool pending_tree_is_ready_for_activation_{};
+  bool active_tree_needs_first_draw_{};
+  bool active_tree_is_ready_to_draw_{};
+  bool did_create_and_initialize_first_layer_tree_frame_sink_{};
+  ChromeCompositorStateMachine_MinorState_TreePriority tree_priority_{};
+  ChromeCompositorStateMachine_MinorState_ScrollHandlerState scroll_handler_state_{};
+  bool critical_begin_main_frame_to_activate_is_fast_{};
+  bool main_thread_missed_last_deadline_{};
+  bool skip_next_begin_main_frame_to_reduce_latency_{};
+  bool video_needs_begin_frames_{};
+  bool defer_begin_main_frame_{};
+  bool last_commit_had_no_updates_{};
+  bool did_draw_in_last_frame_{};
+  bool did_submit_in_last_frame_{};
+  bool needs_impl_side_invalidation_{};
+  bool current_pending_tree_is_impl_side_{};
+  bool previous_pending_tree_was_impl_side_{};
+  bool processing_animation_worklets_for_active_tree_{};
+  bool processing_animation_worklets_for_pending_tree_{};
+  bool processing_paint_worklets_for_pending_tree_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<47> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT ChromeCompositorStateMachine_MajorState : public ::protozero::CppMessageObj {
+ public:
+  using BeginImplFrameState = ChromeCompositorStateMachine_MajorState_BeginImplFrameState;
+  static constexpr auto BEGIN_IMPL_FRAME_UNSPECIFIED = ChromeCompositorStateMachine_MajorState_BeginImplFrameState_BEGIN_IMPL_FRAME_UNSPECIFIED;
+  static constexpr auto BEGIN_IMPL_FRAME_IDLE = ChromeCompositorStateMachine_MajorState_BeginImplFrameState_BEGIN_IMPL_FRAME_IDLE;
+  static constexpr auto BEGIN_IMPL_FRAME_INSIDE_BEGIN_FRAME = ChromeCompositorStateMachine_MajorState_BeginImplFrameState_BEGIN_IMPL_FRAME_INSIDE_BEGIN_FRAME;
+  static constexpr auto BEGIN_IMPL_FRAME_INSIDE_DEADLINE = ChromeCompositorStateMachine_MajorState_BeginImplFrameState_BEGIN_IMPL_FRAME_INSIDE_DEADLINE;
+  static constexpr auto BeginImplFrameState_MIN = ChromeCompositorStateMachine_MajorState_BeginImplFrameState_BEGIN_IMPL_FRAME_UNSPECIFIED;
+  static constexpr auto BeginImplFrameState_MAX = ChromeCompositorStateMachine_MajorState_BeginImplFrameState_BEGIN_IMPL_FRAME_INSIDE_DEADLINE;
+  using BeginMainFrameState = ChromeCompositorStateMachine_MajorState_BeginMainFrameState;
+  static constexpr auto BEGIN_MAIN_FRAME_UNSPECIFIED = ChromeCompositorStateMachine_MajorState_BeginMainFrameState_BEGIN_MAIN_FRAME_UNSPECIFIED;
+  static constexpr auto BEGIN_MAIN_FRAME_IDLE = ChromeCompositorStateMachine_MajorState_BeginMainFrameState_BEGIN_MAIN_FRAME_IDLE;
+  static constexpr auto BEGIN_MAIN_FRAME_SENT = ChromeCompositorStateMachine_MajorState_BeginMainFrameState_BEGIN_MAIN_FRAME_SENT;
+  static constexpr auto BEGIN_MAIN_FRAME_READY_TO_COMMIT = ChromeCompositorStateMachine_MajorState_BeginMainFrameState_BEGIN_MAIN_FRAME_READY_TO_COMMIT;
+  static constexpr auto BeginMainFrameState_MIN = ChromeCompositorStateMachine_MajorState_BeginMainFrameState_BEGIN_MAIN_FRAME_UNSPECIFIED;
+  static constexpr auto BeginMainFrameState_MAX = ChromeCompositorStateMachine_MajorState_BeginMainFrameState_BEGIN_MAIN_FRAME_READY_TO_COMMIT;
+  using LayerTreeFrameSinkState = ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState;
+  static constexpr auto LAYER_TREE_FRAME_UNSPECIFIED = ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_UNSPECIFIED;
+  static constexpr auto LAYER_TREE_FRAME_NONE = ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_NONE;
+  static constexpr auto LAYER_TREE_FRAME_ACTIVE = ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_ACTIVE;
+  static constexpr auto LAYER_TREE_FRAME_CREATING = ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_CREATING;
+  static constexpr auto LAYER_TREE_FRAME_WAITING_FOR_FIRST_COMMIT = ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_WAITING_FOR_FIRST_COMMIT;
+  static constexpr auto LAYER_TREE_FRAME_WAITING_FOR_FIRST_ACTIVATION = ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_WAITING_FOR_FIRST_ACTIVATION;
+  static constexpr auto LayerTreeFrameSinkState_MIN = ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_UNSPECIFIED;
+  static constexpr auto LayerTreeFrameSinkState_MAX = ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_WAITING_FOR_FIRST_ACTIVATION;
+  using ForcedRedrawOnTimeoutState = ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState;
+  static constexpr auto FORCED_REDRAW_UNSPECIFIED = ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState_FORCED_REDRAW_UNSPECIFIED;
+  static constexpr auto FORCED_REDRAW_IDLE = ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState_FORCED_REDRAW_IDLE;
+  static constexpr auto FORCED_REDRAW_WAITING_FOR_COMMIT = ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState_FORCED_REDRAW_WAITING_FOR_COMMIT;
+  static constexpr auto FORCED_REDRAW_WAITING_FOR_ACTIVATION = ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState_FORCED_REDRAW_WAITING_FOR_ACTIVATION;
+  static constexpr auto FORCED_REDRAW_WAITING_FOR_DRAW = ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState_FORCED_REDRAW_WAITING_FOR_DRAW;
+  static constexpr auto ForcedRedrawOnTimeoutState_MIN = ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState_FORCED_REDRAW_UNSPECIFIED;
+  static constexpr auto ForcedRedrawOnTimeoutState_MAX = ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState_FORCED_REDRAW_WAITING_FOR_DRAW;
+  enum FieldNumbers {
+    kNextActionFieldNumber = 1,
+    kBeginImplFrameStateFieldNumber = 2,
+    kBeginMainFrameStateFieldNumber = 3,
+    kLayerTreeFrameSinkStateFieldNumber = 4,
+    kForcedRedrawStateFieldNumber = 5,
+  };
+
+  ChromeCompositorStateMachine_MajorState();
+  ~ChromeCompositorStateMachine_MajorState() override;
+  ChromeCompositorStateMachine_MajorState(ChromeCompositorStateMachine_MajorState&&) noexcept;
+  ChromeCompositorStateMachine_MajorState& operator=(ChromeCompositorStateMachine_MajorState&&);
+  ChromeCompositorStateMachine_MajorState(const ChromeCompositorStateMachine_MajorState&);
+  ChromeCompositorStateMachine_MajorState& operator=(const ChromeCompositorStateMachine_MajorState&);
+  bool operator==(const ChromeCompositorStateMachine_MajorState&) const;
+  bool operator!=(const ChromeCompositorStateMachine_MajorState& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_next_action() const { return _has_field_[1]; }
+  ChromeCompositorSchedulerAction next_action() const { return next_action_; }
+  void set_next_action(ChromeCompositorSchedulerAction value) { next_action_ = value; _has_field_.set(1); }
+
+  bool has_begin_impl_frame_state() const { return _has_field_[2]; }
+  ChromeCompositorStateMachine_MajorState_BeginImplFrameState begin_impl_frame_state() const { return begin_impl_frame_state_; }
+  void set_begin_impl_frame_state(ChromeCompositorStateMachine_MajorState_BeginImplFrameState value) { begin_impl_frame_state_ = value; _has_field_.set(2); }
+
+  bool has_begin_main_frame_state() const { return _has_field_[3]; }
+  ChromeCompositorStateMachine_MajorState_BeginMainFrameState begin_main_frame_state() const { return begin_main_frame_state_; }
+  void set_begin_main_frame_state(ChromeCompositorStateMachine_MajorState_BeginMainFrameState value) { begin_main_frame_state_ = value; _has_field_.set(3); }
+
+  bool has_layer_tree_frame_sink_state() const { return _has_field_[4]; }
+  ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState layer_tree_frame_sink_state() const { return layer_tree_frame_sink_state_; }
+  void set_layer_tree_frame_sink_state(ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState value) { layer_tree_frame_sink_state_ = value; _has_field_.set(4); }
+
+  bool has_forced_redraw_state() const { return _has_field_[5]; }
+  ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState forced_redraw_state() const { return forced_redraw_state_; }
+  void set_forced_redraw_state(ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState value) { forced_redraw_state_ = value; _has_field_.set(5); }
+
+ private:
+  ChromeCompositorSchedulerAction next_action_{};
+  ChromeCompositorStateMachine_MajorState_BeginImplFrameState begin_impl_frame_state_{};
+  ChromeCompositorStateMachine_MajorState_BeginMainFrameState begin_main_frame_state_{};
+  ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState layer_tree_frame_sink_state_{};
+  ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState forced_redraw_state_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<6> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT ChromeCompositorSchedulerState : public ::protozero::CppMessageObj {
+ public:
+  using BeginImplFrameDeadlineMode = ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode;
+  static constexpr auto DEADLINE_MODE_UNSPECIFIED = ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_UNSPECIFIED;
+  static constexpr auto DEADLINE_MODE_NONE = ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_NONE;
+  static constexpr auto DEADLINE_MODE_IMMEDIATE = ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_IMMEDIATE;
+  static constexpr auto DEADLINE_MODE_REGULAR = ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_REGULAR;
+  static constexpr auto DEADLINE_MODE_LATE = ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_LATE;
+  static constexpr auto DEADLINE_MODE_BLOCKED = ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_BLOCKED;
+  static constexpr auto BeginImplFrameDeadlineMode_MIN = ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_UNSPECIFIED;
+  static constexpr auto BeginImplFrameDeadlineMode_MAX = ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_BLOCKED;
+  enum FieldNumbers {
+    kStateMachineFieldNumber = 1,
+    kObservingBeginFrameSourceFieldNumber = 2,
+    kBeginImplFrameDeadlineTaskFieldNumber = 3,
+    kPendingBeginFrameTaskFieldNumber = 4,
+    kSkippedLastFrameMissedExceededDeadlineFieldNumber = 5,
+    kSkippedLastFrameToReduceLatencyFieldNumber = 6,
+    kInsideActionFieldNumber = 7,
+    kDeadlineModeFieldNumber = 8,
+    kDeadlineUsFieldNumber = 9,
+    kDeadlineScheduledAtUsFieldNumber = 10,
+    kNowUsFieldNumber = 11,
+    kNowToDeadlineDeltaUsFieldNumber = 12,
+    kNowToDeadlineScheduledAtDeltaUsFieldNumber = 13,
+    kBeginImplFrameArgsFieldNumber = 14,
+    kBeginFrameObserverStateFieldNumber = 15,
+    kBeginFrameSourceStateFieldNumber = 16,
+    kCompositorTimingHistoryFieldNumber = 17,
+  };
+
+  ChromeCompositorSchedulerState();
+  ~ChromeCompositorSchedulerState() override;
+  ChromeCompositorSchedulerState(ChromeCompositorSchedulerState&&) noexcept;
+  ChromeCompositorSchedulerState& operator=(ChromeCompositorSchedulerState&&);
+  ChromeCompositorSchedulerState(const ChromeCompositorSchedulerState&);
+  ChromeCompositorSchedulerState& operator=(const ChromeCompositorSchedulerState&);
+  bool operator==(const ChromeCompositorSchedulerState&) const;
+  bool operator!=(const ChromeCompositorSchedulerState& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_state_machine() const { return _has_field_[1]; }
+  const ChromeCompositorStateMachine& state_machine() const { return *state_machine_; }
+  ChromeCompositorStateMachine* mutable_state_machine() { _has_field_.set(1); return state_machine_.get(); }
+
+  bool has_observing_begin_frame_source() const { return _has_field_[2]; }
+  bool observing_begin_frame_source() const { return observing_begin_frame_source_; }
+  void set_observing_begin_frame_source(bool value) { observing_begin_frame_source_ = value; _has_field_.set(2); }
+
+  bool has_begin_impl_frame_deadline_task() const { return _has_field_[3]; }
+  bool begin_impl_frame_deadline_task() const { return begin_impl_frame_deadline_task_; }
+  void set_begin_impl_frame_deadline_task(bool value) { begin_impl_frame_deadline_task_ = value; _has_field_.set(3); }
+
+  bool has_pending_begin_frame_task() const { return _has_field_[4]; }
+  bool pending_begin_frame_task() const { return pending_begin_frame_task_; }
+  void set_pending_begin_frame_task(bool value) { pending_begin_frame_task_ = value; _has_field_.set(4); }
+
+  bool has_skipped_last_frame_missed_exceeded_deadline() const { return _has_field_[5]; }
+  bool skipped_last_frame_missed_exceeded_deadline() const { return skipped_last_frame_missed_exceeded_deadline_; }
+  void set_skipped_last_frame_missed_exceeded_deadline(bool value) { skipped_last_frame_missed_exceeded_deadline_ = value; _has_field_.set(5); }
+
+  bool has_skipped_last_frame_to_reduce_latency() const { return _has_field_[6]; }
+  bool skipped_last_frame_to_reduce_latency() const { return skipped_last_frame_to_reduce_latency_; }
+  void set_skipped_last_frame_to_reduce_latency(bool value) { skipped_last_frame_to_reduce_latency_ = value; _has_field_.set(6); }
+
+  bool has_inside_action() const { return _has_field_[7]; }
+  ChromeCompositorSchedulerAction inside_action() const { return inside_action_; }
+  void set_inside_action(ChromeCompositorSchedulerAction value) { inside_action_ = value; _has_field_.set(7); }
+
+  bool has_deadline_mode() const { return _has_field_[8]; }
+  ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode deadline_mode() const { return deadline_mode_; }
+  void set_deadline_mode(ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode value) { deadline_mode_ = value; _has_field_.set(8); }
+
+  bool has_deadline_us() const { return _has_field_[9]; }
+  int64_t deadline_us() const { return deadline_us_; }
+  void set_deadline_us(int64_t value) { deadline_us_ = value; _has_field_.set(9); }
+
+  bool has_deadline_scheduled_at_us() const { return _has_field_[10]; }
+  int64_t deadline_scheduled_at_us() const { return deadline_scheduled_at_us_; }
+  void set_deadline_scheduled_at_us(int64_t value) { deadline_scheduled_at_us_ = value; _has_field_.set(10); }
+
+  bool has_now_us() const { return _has_field_[11]; }
+  int64_t now_us() const { return now_us_; }
+  void set_now_us(int64_t value) { now_us_ = value; _has_field_.set(11); }
+
+  bool has_now_to_deadline_delta_us() const { return _has_field_[12]; }
+  int64_t now_to_deadline_delta_us() const { return now_to_deadline_delta_us_; }
+  void set_now_to_deadline_delta_us(int64_t value) { now_to_deadline_delta_us_ = value; _has_field_.set(12); }
+
+  bool has_now_to_deadline_scheduled_at_delta_us() const { return _has_field_[13]; }
+  int64_t now_to_deadline_scheduled_at_delta_us() const { return now_to_deadline_scheduled_at_delta_us_; }
+  void set_now_to_deadline_scheduled_at_delta_us(int64_t value) { now_to_deadline_scheduled_at_delta_us_ = value; _has_field_.set(13); }
+
+  bool has_begin_impl_frame_args() const { return _has_field_[14]; }
+  const BeginImplFrameArgs& begin_impl_frame_args() const { return *begin_impl_frame_args_; }
+  BeginImplFrameArgs* mutable_begin_impl_frame_args() { _has_field_.set(14); return begin_impl_frame_args_.get(); }
+
+  bool has_begin_frame_observer_state() const { return _has_field_[15]; }
+  const BeginFrameObserverState& begin_frame_observer_state() const { return *begin_frame_observer_state_; }
+  BeginFrameObserverState* mutable_begin_frame_observer_state() { _has_field_.set(15); return begin_frame_observer_state_.get(); }
+
+  bool has_begin_frame_source_state() const { return _has_field_[16]; }
+  const BeginFrameSourceState& begin_frame_source_state() const { return *begin_frame_source_state_; }
+  BeginFrameSourceState* mutable_begin_frame_source_state() { _has_field_.set(16); return begin_frame_source_state_.get(); }
+
+  bool has_compositor_timing_history() const { return _has_field_[17]; }
+  const CompositorTimingHistory& compositor_timing_history() const { return *compositor_timing_history_; }
+  CompositorTimingHistory* mutable_compositor_timing_history() { _has_field_.set(17); return compositor_timing_history_.get(); }
+
+ private:
+  ::protozero::CopyablePtr<ChromeCompositorStateMachine> state_machine_;
+  bool observing_begin_frame_source_{};
+  bool begin_impl_frame_deadline_task_{};
+  bool pending_begin_frame_task_{};
+  bool skipped_last_frame_missed_exceeded_deadline_{};
+  bool skipped_last_frame_to_reduce_latency_{};
+  ChromeCompositorSchedulerAction inside_action_{};
+  ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode deadline_mode_{};
+  int64_t deadline_us_{};
+  int64_t deadline_scheduled_at_us_{};
+  int64_t now_us_{};
+  int64_t now_to_deadline_delta_us_{};
+  int64_t now_to_deadline_scheduled_at_delta_us_{};
+  ::protozero::CopyablePtr<BeginImplFrameArgs> begin_impl_frame_args_;
+  ::protozero::CopyablePtr<BeginFrameObserverState> begin_frame_observer_state_;
+  ::protozero::CopyablePtr<BeginFrameSourceState> begin_frame_source_state_;
+  ::protozero::CopyablePtr<CompositorTimingHistory> compositor_timing_history_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<18> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_COMPOSITOR_SCHEDULER_STATE_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/source_location.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_SOURCE_LOCATION_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_SOURCE_LOCATION_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class SourceLocation;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT SourceLocation : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kIidFieldNumber = 1,
+    kFileNameFieldNumber = 2,
+    kFunctionNameFieldNumber = 3,
+    kLineNumberFieldNumber = 4,
+  };
+
+  SourceLocation();
+  ~SourceLocation() override;
+  SourceLocation(SourceLocation&&) noexcept;
+  SourceLocation& operator=(SourceLocation&&);
+  SourceLocation(const SourceLocation&);
+  SourceLocation& operator=(const SourceLocation&);
+  bool operator==(const SourceLocation&) const;
+  bool operator!=(const SourceLocation& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_iid() const { return _has_field_[1]; }
+  uint64_t iid() const { return iid_; }
+  void set_iid(uint64_t value) { iid_ = value; _has_field_.set(1); }
+
+  bool has_file_name() const { return _has_field_[2]; }
+  const std::string& file_name() const { return file_name_; }
+  void set_file_name(const std::string& value) { file_name_ = value; _has_field_.set(2); }
+
+  bool has_function_name() const { return _has_field_[3]; }
+  const std::string& function_name() const { return function_name_; }
+  void set_function_name(const std::string& value) { function_name_ = value; _has_field_.set(3); }
+
+  bool has_line_number() const { return _has_field_[4]; }
+  uint32_t line_number() const { return line_number_; }
+  void set_line_number(uint32_t value) { line_number_ = value; _has_field_.set(4); }
+
+ private:
+  uint64_t iid_{};
+  std::string file_name_{};
+  std::string function_name_{};
+  uint32_t line_number_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<5> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_SOURCE_LOCATION_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_compositor_scheduler_state.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/source_location.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+CompositorTimingHistory::CompositorTimingHistory() = default;
+CompositorTimingHistory::~CompositorTimingHistory() = default;
+CompositorTimingHistory::CompositorTimingHistory(const CompositorTimingHistory&) = default;
+CompositorTimingHistory& CompositorTimingHistory::operator=(const CompositorTimingHistory&) = default;
+CompositorTimingHistory::CompositorTimingHistory(CompositorTimingHistory&&) noexcept = default;
+CompositorTimingHistory& CompositorTimingHistory::operator=(CompositorTimingHistory&&) = default;
+
+bool CompositorTimingHistory::operator==(const CompositorTimingHistory& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && begin_main_frame_queue_critical_estimate_delta_us_ == other.begin_main_frame_queue_critical_estimate_delta_us_
+   && begin_main_frame_queue_not_critical_estimate_delta_us_ == other.begin_main_frame_queue_not_critical_estimate_delta_us_
+   && begin_main_frame_start_to_ready_to_commit_estimate_delta_us_ == other.begin_main_frame_start_to_ready_to_commit_estimate_delta_us_
+   && commit_to_ready_to_activate_estimate_delta_us_ == other.commit_to_ready_to_activate_estimate_delta_us_
+   && prepare_tiles_estimate_delta_us_ == other.prepare_tiles_estimate_delta_us_
+   && activate_estimate_delta_us_ == other.activate_estimate_delta_us_
+   && draw_estimate_delta_us_ == other.draw_estimate_delta_us_;
+}
+
+bool CompositorTimingHistory::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* begin_main_frame_queue_critical_estimate_delta_us */:
+        field.get(&begin_main_frame_queue_critical_estimate_delta_us_);
+        break;
+      case 2 /* begin_main_frame_queue_not_critical_estimate_delta_us */:
+        field.get(&begin_main_frame_queue_not_critical_estimate_delta_us_);
+        break;
+      case 3 /* begin_main_frame_start_to_ready_to_commit_estimate_delta_us */:
+        field.get(&begin_main_frame_start_to_ready_to_commit_estimate_delta_us_);
+        break;
+      case 4 /* commit_to_ready_to_activate_estimate_delta_us */:
+        field.get(&commit_to_ready_to_activate_estimate_delta_us_);
+        break;
+      case 5 /* prepare_tiles_estimate_delta_us */:
+        field.get(&prepare_tiles_estimate_delta_us_);
+        break;
+      case 6 /* activate_estimate_delta_us */:
+        field.get(&activate_estimate_delta_us_);
+        break;
+      case 7 /* draw_estimate_delta_us */:
+        field.get(&draw_estimate_delta_us_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string CompositorTimingHistory::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> CompositorTimingHistory::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void CompositorTimingHistory::Serialize(::protozero::Message* msg) const {
+  // Field 1: begin_main_frame_queue_critical_estimate_delta_us
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, begin_main_frame_queue_critical_estimate_delta_us_);
+  }
+
+  // Field 2: begin_main_frame_queue_not_critical_estimate_delta_us
+  if (_has_field_[2]) {
+    msg->AppendVarInt(2, begin_main_frame_queue_not_critical_estimate_delta_us_);
+  }
+
+  // Field 3: begin_main_frame_start_to_ready_to_commit_estimate_delta_us
+  if (_has_field_[3]) {
+    msg->AppendVarInt(3, begin_main_frame_start_to_ready_to_commit_estimate_delta_us_);
+  }
+
+  // Field 4: commit_to_ready_to_activate_estimate_delta_us
+  if (_has_field_[4]) {
+    msg->AppendVarInt(4, commit_to_ready_to_activate_estimate_delta_us_);
+  }
+
+  // Field 5: prepare_tiles_estimate_delta_us
+  if (_has_field_[5]) {
+    msg->AppendVarInt(5, prepare_tiles_estimate_delta_us_);
+  }
+
+  // Field 6: activate_estimate_delta_us
+  if (_has_field_[6]) {
+    msg->AppendVarInt(6, activate_estimate_delta_us_);
+  }
+
+  // Field 7: draw_estimate_delta_us
+  if (_has_field_[7]) {
+    msg->AppendVarInt(7, draw_estimate_delta_us_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+BeginFrameSourceState::BeginFrameSourceState() = default;
+BeginFrameSourceState::~BeginFrameSourceState() = default;
+BeginFrameSourceState::BeginFrameSourceState(const BeginFrameSourceState&) = default;
+BeginFrameSourceState& BeginFrameSourceState::operator=(const BeginFrameSourceState&) = default;
+BeginFrameSourceState::BeginFrameSourceState(BeginFrameSourceState&&) noexcept = default;
+BeginFrameSourceState& BeginFrameSourceState::operator=(BeginFrameSourceState&&) = default;
+
+bool BeginFrameSourceState::operator==(const BeginFrameSourceState& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && source_id_ == other.source_id_
+   && paused_ == other.paused_
+   && num_observers_ == other.num_observers_
+   && last_begin_frame_args_ == other.last_begin_frame_args_;
+}
+
+bool BeginFrameSourceState::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* source_id */:
+        field.get(&source_id_);
+        break;
+      case 2 /* paused */:
+        field.get(&paused_);
+        break;
+      case 3 /* num_observers */:
+        field.get(&num_observers_);
+        break;
+      case 4 /* last_begin_frame_args */:
+        (*last_begin_frame_args_).ParseFromArray(field.data(), field.size());
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string BeginFrameSourceState::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> BeginFrameSourceState::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void BeginFrameSourceState::Serialize(::protozero::Message* msg) const {
+  // Field 1: source_id
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, source_id_);
+  }
+
+  // Field 2: paused
+  if (_has_field_[2]) {
+    msg->AppendTinyVarInt(2, paused_);
+  }
+
+  // Field 3: num_observers
+  if (_has_field_[3]) {
+    msg->AppendVarInt(3, num_observers_);
+  }
+
+  // Field 4: last_begin_frame_args
+  if (_has_field_[4]) {
+    (*last_begin_frame_args_).Serialize(msg->BeginNestedMessage<::protozero::Message>(4));
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+BeginFrameArgs::BeginFrameArgs() = default;
+BeginFrameArgs::~BeginFrameArgs() = default;
+BeginFrameArgs::BeginFrameArgs(const BeginFrameArgs&) = default;
+BeginFrameArgs& BeginFrameArgs::operator=(const BeginFrameArgs&) = default;
+BeginFrameArgs::BeginFrameArgs(BeginFrameArgs&&) noexcept = default;
+BeginFrameArgs& BeginFrameArgs::operator=(BeginFrameArgs&&) = default;
+
+bool BeginFrameArgs::operator==(const BeginFrameArgs& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && type_ == other.type_
+   && source_id_ == other.source_id_
+   && sequence_number_ == other.sequence_number_
+   && frame_time_us_ == other.frame_time_us_
+   && deadline_us_ == other.deadline_us_
+   && interval_delta_us_ == other.interval_delta_us_
+   && on_critical_path_ == other.on_critical_path_
+   && animate_only_ == other.animate_only_
+   && source_location_iid_ == other.source_location_iid_
+   && source_location_ == other.source_location_;
+}
+
+bool BeginFrameArgs::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* type */:
+        field.get(&type_);
+        break;
+      case 2 /* source_id */:
+        field.get(&source_id_);
+        break;
+      case 3 /* sequence_number */:
+        field.get(&sequence_number_);
+        break;
+      case 4 /* frame_time_us */:
+        field.get(&frame_time_us_);
+        break;
+      case 5 /* deadline_us */:
+        field.get(&deadline_us_);
+        break;
+      case 6 /* interval_delta_us */:
+        field.get(&interval_delta_us_);
+        break;
+      case 7 /* on_critical_path */:
+        field.get(&on_critical_path_);
+        break;
+      case 8 /* animate_only */:
+        field.get(&animate_only_);
+        break;
+      case 9 /* source_location_iid */:
+        field.get(&source_location_iid_);
+        break;
+      case 10 /* source_location */:
+        (*source_location_).ParseFromArray(field.data(), field.size());
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string BeginFrameArgs::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> BeginFrameArgs::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void BeginFrameArgs::Serialize(::protozero::Message* msg) const {
+  // Field 1: type
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, type_);
+  }
+
+  // Field 2: source_id
+  if (_has_field_[2]) {
+    msg->AppendVarInt(2, source_id_);
+  }
+
+  // Field 3: sequence_number
+  if (_has_field_[3]) {
+    msg->AppendVarInt(3, sequence_number_);
+  }
+
+  // Field 4: frame_time_us
+  if (_has_field_[4]) {
+    msg->AppendVarInt(4, frame_time_us_);
+  }
+
+  // Field 5: deadline_us
+  if (_has_field_[5]) {
+    msg->AppendVarInt(5, deadline_us_);
+  }
+
+  // Field 6: interval_delta_us
+  if (_has_field_[6]) {
+    msg->AppendVarInt(6, interval_delta_us_);
+  }
+
+  // Field 7: on_critical_path
+  if (_has_field_[7]) {
+    msg->AppendTinyVarInt(7, on_critical_path_);
+  }
+
+  // Field 8: animate_only
+  if (_has_field_[8]) {
+    msg->AppendTinyVarInt(8, animate_only_);
+  }
+
+  // Field 9: source_location_iid
+  if (_has_field_[9]) {
+    msg->AppendVarInt(9, source_location_iid_);
+  }
+
+  // Field 10: source_location
+  if (_has_field_[10]) {
+    (*source_location_).Serialize(msg->BeginNestedMessage<::protozero::Message>(10));
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+BeginFrameObserverState::BeginFrameObserverState() = default;
+BeginFrameObserverState::~BeginFrameObserverState() = default;
+BeginFrameObserverState::BeginFrameObserverState(const BeginFrameObserverState&) = default;
+BeginFrameObserverState& BeginFrameObserverState::operator=(const BeginFrameObserverState&) = default;
+BeginFrameObserverState::BeginFrameObserverState(BeginFrameObserverState&&) noexcept = default;
+BeginFrameObserverState& BeginFrameObserverState::operator=(BeginFrameObserverState&&) = default;
+
+bool BeginFrameObserverState::operator==(const BeginFrameObserverState& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && dropped_begin_frame_args_ == other.dropped_begin_frame_args_
+   && last_begin_frame_args_ == other.last_begin_frame_args_;
+}
+
+bool BeginFrameObserverState::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* dropped_begin_frame_args */:
+        field.get(&dropped_begin_frame_args_);
+        break;
+      case 2 /* last_begin_frame_args */:
+        (*last_begin_frame_args_).ParseFromArray(field.data(), field.size());
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string BeginFrameObserverState::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> BeginFrameObserverState::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void BeginFrameObserverState::Serialize(::protozero::Message* msg) const {
+  // Field 1: dropped_begin_frame_args
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, dropped_begin_frame_args_);
+  }
+
+  // Field 2: last_begin_frame_args
+  if (_has_field_[2]) {
+    (*last_begin_frame_args_).Serialize(msg->BeginNestedMessage<::protozero::Message>(2));
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+BeginImplFrameArgs::BeginImplFrameArgs() = default;
+BeginImplFrameArgs::~BeginImplFrameArgs() = default;
+BeginImplFrameArgs::BeginImplFrameArgs(const BeginImplFrameArgs&) = default;
+BeginImplFrameArgs& BeginImplFrameArgs::operator=(const BeginImplFrameArgs&) = default;
+BeginImplFrameArgs::BeginImplFrameArgs(BeginImplFrameArgs&&) noexcept = default;
+BeginImplFrameArgs& BeginImplFrameArgs::operator=(BeginImplFrameArgs&&) = default;
+
+bool BeginImplFrameArgs::operator==(const BeginImplFrameArgs& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && updated_at_us_ == other.updated_at_us_
+   && finished_at_us_ == other.finished_at_us_
+   && state_ == other.state_
+   && current_args_ == other.current_args_
+   && last_args_ == other.last_args_
+   && timestamps_in_us_ == other.timestamps_in_us_;
+}
+
+bool BeginImplFrameArgs::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* updated_at_us */:
+        field.get(&updated_at_us_);
+        break;
+      case 2 /* finished_at_us */:
+        field.get(&finished_at_us_);
+        break;
+      case 3 /* state */:
+        field.get(&state_);
+        break;
+      case 4 /* current_args */:
+        (*current_args_).ParseFromArray(field.data(), field.size());
+        break;
+      case 5 /* last_args */:
+        (*last_args_).ParseFromArray(field.data(), field.size());
+        break;
+      case 6 /* timestamps_in_us */:
+        (*timestamps_in_us_).ParseFromArray(field.data(), field.size());
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string BeginImplFrameArgs::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> BeginImplFrameArgs::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void BeginImplFrameArgs::Serialize(::protozero::Message* msg) const {
+  // Field 1: updated_at_us
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, updated_at_us_);
+  }
+
+  // Field 2: finished_at_us
+  if (_has_field_[2]) {
+    msg->AppendVarInt(2, finished_at_us_);
+  }
+
+  // Field 3: state
+  if (_has_field_[3]) {
+    msg->AppendVarInt(3, state_);
+  }
+
+  // Field 4: current_args
+  if (_has_field_[4]) {
+    (*current_args_).Serialize(msg->BeginNestedMessage<::protozero::Message>(4));
+  }
+
+  // Field 5: last_args
+  if (_has_field_[5]) {
+    (*last_args_).Serialize(msg->BeginNestedMessage<::protozero::Message>(5));
+  }
+
+  // Field 6: timestamps_in_us
+  if (_has_field_[6]) {
+    (*timestamps_in_us_).Serialize(msg->BeginNestedMessage<::protozero::Message>(6));
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+BeginImplFrameArgs_TimestampsInUs::BeginImplFrameArgs_TimestampsInUs() = default;
+BeginImplFrameArgs_TimestampsInUs::~BeginImplFrameArgs_TimestampsInUs() = default;
+BeginImplFrameArgs_TimestampsInUs::BeginImplFrameArgs_TimestampsInUs(const BeginImplFrameArgs_TimestampsInUs&) = default;
+BeginImplFrameArgs_TimestampsInUs& BeginImplFrameArgs_TimestampsInUs::operator=(const BeginImplFrameArgs_TimestampsInUs&) = default;
+BeginImplFrameArgs_TimestampsInUs::BeginImplFrameArgs_TimestampsInUs(BeginImplFrameArgs_TimestampsInUs&&) noexcept = default;
+BeginImplFrameArgs_TimestampsInUs& BeginImplFrameArgs_TimestampsInUs::operator=(BeginImplFrameArgs_TimestampsInUs&&) = default;
+
+bool BeginImplFrameArgs_TimestampsInUs::operator==(const BeginImplFrameArgs_TimestampsInUs& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && interval_delta_ == other.interval_delta_
+   && now_to_deadline_delta_ == other.now_to_deadline_delta_
+   && frame_time_to_now_delta_ == other.frame_time_to_now_delta_
+   && frame_time_to_deadline_delta_ == other.frame_time_to_deadline_delta_
+   && now_ == other.now_
+   && frame_time_ == other.frame_time_
+   && deadline_ == other.deadline_;
+}
+
+bool BeginImplFrameArgs_TimestampsInUs::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* interval_delta */:
+        field.get(&interval_delta_);
+        break;
+      case 2 /* now_to_deadline_delta */:
+        field.get(&now_to_deadline_delta_);
+        break;
+      case 3 /* frame_time_to_now_delta */:
+        field.get(&frame_time_to_now_delta_);
+        break;
+      case 4 /* frame_time_to_deadline_delta */:
+        field.get(&frame_time_to_deadline_delta_);
+        break;
+      case 5 /* now */:
+        field.get(&now_);
+        break;
+      case 6 /* frame_time */:
+        field.get(&frame_time_);
+        break;
+      case 7 /* deadline */:
+        field.get(&deadline_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string BeginImplFrameArgs_TimestampsInUs::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> BeginImplFrameArgs_TimestampsInUs::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void BeginImplFrameArgs_TimestampsInUs::Serialize(::protozero::Message* msg) const {
+  // Field 1: interval_delta
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, interval_delta_);
+  }
+
+  // Field 2: now_to_deadline_delta
+  if (_has_field_[2]) {
+    msg->AppendVarInt(2, now_to_deadline_delta_);
+  }
+
+  // Field 3: frame_time_to_now_delta
+  if (_has_field_[3]) {
+    msg->AppendVarInt(3, frame_time_to_now_delta_);
+  }
+
+  // Field 4: frame_time_to_deadline_delta
+  if (_has_field_[4]) {
+    msg->AppendVarInt(4, frame_time_to_deadline_delta_);
+  }
+
+  // Field 5: now
+  if (_has_field_[5]) {
+    msg->AppendVarInt(5, now_);
+  }
+
+  // Field 6: frame_time
+  if (_has_field_[6]) {
+    msg->AppendVarInt(6, frame_time_);
+  }
+
+  // Field 7: deadline
+  if (_has_field_[7]) {
+    msg->AppendVarInt(7, deadline_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+ChromeCompositorStateMachine::ChromeCompositorStateMachine() = default;
+ChromeCompositorStateMachine::~ChromeCompositorStateMachine() = default;
+ChromeCompositorStateMachine::ChromeCompositorStateMachine(const ChromeCompositorStateMachine&) = default;
+ChromeCompositorStateMachine& ChromeCompositorStateMachine::operator=(const ChromeCompositorStateMachine&) = default;
+ChromeCompositorStateMachine::ChromeCompositorStateMachine(ChromeCompositorStateMachine&&) noexcept = default;
+ChromeCompositorStateMachine& ChromeCompositorStateMachine::operator=(ChromeCompositorStateMachine&&) = default;
+
+bool ChromeCompositorStateMachine::operator==(const ChromeCompositorStateMachine& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && major_state_ == other.major_state_
+   && minor_state_ == other.minor_state_;
+}
+
+bool ChromeCompositorStateMachine::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* major_state */:
+        (*major_state_).ParseFromArray(field.data(), field.size());
+        break;
+      case 2 /* minor_state */:
+        (*minor_state_).ParseFromArray(field.data(), field.size());
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string ChromeCompositorStateMachine::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> ChromeCompositorStateMachine::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void ChromeCompositorStateMachine::Serialize(::protozero::Message* msg) const {
+  // Field 1: major_state
+  if (_has_field_[1]) {
+    (*major_state_).Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
+  }
+
+  // Field 2: minor_state
+  if (_has_field_[2]) {
+    (*minor_state_).Serialize(msg->BeginNestedMessage<::protozero::Message>(2));
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+ChromeCompositorStateMachine_MinorState::ChromeCompositorStateMachine_MinorState() = default;
+ChromeCompositorStateMachine_MinorState::~ChromeCompositorStateMachine_MinorState() = default;
+ChromeCompositorStateMachine_MinorState::ChromeCompositorStateMachine_MinorState(const ChromeCompositorStateMachine_MinorState&) = default;
+ChromeCompositorStateMachine_MinorState& ChromeCompositorStateMachine_MinorState::operator=(const ChromeCompositorStateMachine_MinorState&) = default;
+ChromeCompositorStateMachine_MinorState::ChromeCompositorStateMachine_MinorState(ChromeCompositorStateMachine_MinorState&&) noexcept = default;
+ChromeCompositorStateMachine_MinorState& ChromeCompositorStateMachine_MinorState::operator=(ChromeCompositorStateMachine_MinorState&&) = default;
+
+bool ChromeCompositorStateMachine_MinorState::operator==(const ChromeCompositorStateMachine_MinorState& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && commit_count_ == other.commit_count_
+   && current_frame_number_ == other.current_frame_number_
+   && last_frame_number_submit_performed_ == other.last_frame_number_submit_performed_
+   && last_frame_number_draw_performed_ == other.last_frame_number_draw_performed_
+   && last_frame_number_begin_main_frame_sent_ == other.last_frame_number_begin_main_frame_sent_
+   && did_draw_ == other.did_draw_
+   && did_send_begin_main_frame_for_current_frame_ == other.did_send_begin_main_frame_for_current_frame_
+   && did_notify_begin_main_frame_not_expected_until_ == other.did_notify_begin_main_frame_not_expected_until_
+   && did_notify_begin_main_frame_not_expected_soon_ == other.did_notify_begin_main_frame_not_expected_soon_
+   && wants_begin_main_frame_not_expected_ == other.wants_begin_main_frame_not_expected_
+   && did_commit_during_frame_ == other.did_commit_during_frame_
+   && did_invalidate_layer_tree_frame_sink_ == other.did_invalidate_layer_tree_frame_sink_
+   && did_perform_impl_side_invalidaion_ == other.did_perform_impl_side_invalidaion_
+   && did_prepare_tiles_ == other.did_prepare_tiles_
+   && consecutive_checkerboard_animations_ == other.consecutive_checkerboard_animations_
+   && pending_submit_frames_ == other.pending_submit_frames_
+   && submit_frames_with_current_layer_tree_frame_sink_ == other.submit_frames_with_current_layer_tree_frame_sink_
+   && needs_redraw_ == other.needs_redraw_
+   && needs_prepare_tiles_ == other.needs_prepare_tiles_
+   && needs_begin_main_frame_ == other.needs_begin_main_frame_
+   && needs_one_begin_impl_frame_ == other.needs_one_begin_impl_frame_
+   && visible_ == other.visible_
+   && begin_frame_source_paused_ == other.begin_frame_source_paused_
+   && can_draw_ == other.can_draw_
+   && resourceless_draw_ == other.resourceless_draw_
+   && has_pending_tree_ == other.has_pending_tree_
+   && pending_tree_is_ready_for_activation_ == other.pending_tree_is_ready_for_activation_
+   && active_tree_needs_first_draw_ == other.active_tree_needs_first_draw_
+   && active_tree_is_ready_to_draw_ == other.active_tree_is_ready_to_draw_
+   && did_create_and_initialize_first_layer_tree_frame_sink_ == other.did_create_and_initialize_first_layer_tree_frame_sink_
+   && tree_priority_ == other.tree_priority_
+   && scroll_handler_state_ == other.scroll_handler_state_
+   && critical_begin_main_frame_to_activate_is_fast_ == other.critical_begin_main_frame_to_activate_is_fast_
+   && main_thread_missed_last_deadline_ == other.main_thread_missed_last_deadline_
+   && skip_next_begin_main_frame_to_reduce_latency_ == other.skip_next_begin_main_frame_to_reduce_latency_
+   && video_needs_begin_frames_ == other.video_needs_begin_frames_
+   && defer_begin_main_frame_ == other.defer_begin_main_frame_
+   && last_commit_had_no_updates_ == other.last_commit_had_no_updates_
+   && did_draw_in_last_frame_ == other.did_draw_in_last_frame_
+   && did_submit_in_last_frame_ == other.did_submit_in_last_frame_
+   && needs_impl_side_invalidation_ == other.needs_impl_side_invalidation_
+   && current_pending_tree_is_impl_side_ == other.current_pending_tree_is_impl_side_
+   && previous_pending_tree_was_impl_side_ == other.previous_pending_tree_was_impl_side_
+   && processing_animation_worklets_for_active_tree_ == other.processing_animation_worklets_for_active_tree_
+   && processing_animation_worklets_for_pending_tree_ == other.processing_animation_worklets_for_pending_tree_
+   && processing_paint_worklets_for_pending_tree_ == other.processing_paint_worklets_for_pending_tree_;
+}
+
+bool ChromeCompositorStateMachine_MinorState::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* commit_count */:
+        field.get(&commit_count_);
+        break;
+      case 2 /* current_frame_number */:
+        field.get(&current_frame_number_);
+        break;
+      case 3 /* last_frame_number_submit_performed */:
+        field.get(&last_frame_number_submit_performed_);
+        break;
+      case 4 /* last_frame_number_draw_performed */:
+        field.get(&last_frame_number_draw_performed_);
+        break;
+      case 5 /* last_frame_number_begin_main_frame_sent */:
+        field.get(&last_frame_number_begin_main_frame_sent_);
+        break;
+      case 6 /* did_draw */:
+        field.get(&did_draw_);
+        break;
+      case 7 /* did_send_begin_main_frame_for_current_frame */:
+        field.get(&did_send_begin_main_frame_for_current_frame_);
+        break;
+      case 8 /* did_notify_begin_main_frame_not_expected_until */:
+        field.get(&did_notify_begin_main_frame_not_expected_until_);
+        break;
+      case 9 /* did_notify_begin_main_frame_not_expected_soon */:
+        field.get(&did_notify_begin_main_frame_not_expected_soon_);
+        break;
+      case 10 /* wants_begin_main_frame_not_expected */:
+        field.get(&wants_begin_main_frame_not_expected_);
+        break;
+      case 11 /* did_commit_during_frame */:
+        field.get(&did_commit_during_frame_);
+        break;
+      case 12 /* did_invalidate_layer_tree_frame_sink */:
+        field.get(&did_invalidate_layer_tree_frame_sink_);
+        break;
+      case 13 /* did_perform_impl_side_invalidaion */:
+        field.get(&did_perform_impl_side_invalidaion_);
+        break;
+      case 14 /* did_prepare_tiles */:
+        field.get(&did_prepare_tiles_);
+        break;
+      case 15 /* consecutive_checkerboard_animations */:
+        field.get(&consecutive_checkerboard_animations_);
+        break;
+      case 16 /* pending_submit_frames */:
+        field.get(&pending_submit_frames_);
+        break;
+      case 17 /* submit_frames_with_current_layer_tree_frame_sink */:
+        field.get(&submit_frames_with_current_layer_tree_frame_sink_);
+        break;
+      case 18 /* needs_redraw */:
+        field.get(&needs_redraw_);
+        break;
+      case 19 /* needs_prepare_tiles */:
+        field.get(&needs_prepare_tiles_);
+        break;
+      case 20 /* needs_begin_main_frame */:
+        field.get(&needs_begin_main_frame_);
+        break;
+      case 21 /* needs_one_begin_impl_frame */:
+        field.get(&needs_one_begin_impl_frame_);
+        break;
+      case 22 /* visible */:
+        field.get(&visible_);
+        break;
+      case 23 /* begin_frame_source_paused */:
+        field.get(&begin_frame_source_paused_);
+        break;
+      case 24 /* can_draw */:
+        field.get(&can_draw_);
+        break;
+      case 25 /* resourceless_draw */:
+        field.get(&resourceless_draw_);
+        break;
+      case 26 /* has_pending_tree */:
+        field.get(&has_pending_tree_);
+        break;
+      case 27 /* pending_tree_is_ready_for_activation */:
+        field.get(&pending_tree_is_ready_for_activation_);
+        break;
+      case 28 /* active_tree_needs_first_draw */:
+        field.get(&active_tree_needs_first_draw_);
+        break;
+      case 29 /* active_tree_is_ready_to_draw */:
+        field.get(&active_tree_is_ready_to_draw_);
+        break;
+      case 30 /* did_create_and_initialize_first_layer_tree_frame_sink */:
+        field.get(&did_create_and_initialize_first_layer_tree_frame_sink_);
+        break;
+      case 31 /* tree_priority */:
+        field.get(&tree_priority_);
+        break;
+      case 32 /* scroll_handler_state */:
+        field.get(&scroll_handler_state_);
+        break;
+      case 33 /* critical_begin_main_frame_to_activate_is_fast */:
+        field.get(&critical_begin_main_frame_to_activate_is_fast_);
+        break;
+      case 34 /* main_thread_missed_last_deadline */:
+        field.get(&main_thread_missed_last_deadline_);
+        break;
+      case 35 /* skip_next_begin_main_frame_to_reduce_latency */:
+        field.get(&skip_next_begin_main_frame_to_reduce_latency_);
+        break;
+      case 36 /* video_needs_begin_frames */:
+        field.get(&video_needs_begin_frames_);
+        break;
+      case 37 /* defer_begin_main_frame */:
+        field.get(&defer_begin_main_frame_);
+        break;
+      case 38 /* last_commit_had_no_updates */:
+        field.get(&last_commit_had_no_updates_);
+        break;
+      case 39 /* did_draw_in_last_frame */:
+        field.get(&did_draw_in_last_frame_);
+        break;
+      case 40 /* did_submit_in_last_frame */:
+        field.get(&did_submit_in_last_frame_);
+        break;
+      case 41 /* needs_impl_side_invalidation */:
+        field.get(&needs_impl_side_invalidation_);
+        break;
+      case 42 /* current_pending_tree_is_impl_side */:
+        field.get(&current_pending_tree_is_impl_side_);
+        break;
+      case 43 /* previous_pending_tree_was_impl_side */:
+        field.get(&previous_pending_tree_was_impl_side_);
+        break;
+      case 44 /* processing_animation_worklets_for_active_tree */:
+        field.get(&processing_animation_worklets_for_active_tree_);
+        break;
+      case 45 /* processing_animation_worklets_for_pending_tree */:
+        field.get(&processing_animation_worklets_for_pending_tree_);
+        break;
+      case 46 /* processing_paint_worklets_for_pending_tree */:
+        field.get(&processing_paint_worklets_for_pending_tree_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string ChromeCompositorStateMachine_MinorState::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> ChromeCompositorStateMachine_MinorState::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void ChromeCompositorStateMachine_MinorState::Serialize(::protozero::Message* msg) const {
+  // Field 1: commit_count
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, commit_count_);
+  }
+
+  // Field 2: current_frame_number
+  if (_has_field_[2]) {
+    msg->AppendVarInt(2, current_frame_number_);
+  }
+
+  // Field 3: last_frame_number_submit_performed
+  if (_has_field_[3]) {
+    msg->AppendVarInt(3, last_frame_number_submit_performed_);
+  }
+
+  // Field 4: last_frame_number_draw_performed
+  if (_has_field_[4]) {
+    msg->AppendVarInt(4, last_frame_number_draw_performed_);
+  }
+
+  // Field 5: last_frame_number_begin_main_frame_sent
+  if (_has_field_[5]) {
+    msg->AppendVarInt(5, last_frame_number_begin_main_frame_sent_);
+  }
+
+  // Field 6: did_draw
+  if (_has_field_[6]) {
+    msg->AppendTinyVarInt(6, did_draw_);
+  }
+
+  // Field 7: did_send_begin_main_frame_for_current_frame
+  if (_has_field_[7]) {
+    msg->AppendTinyVarInt(7, did_send_begin_main_frame_for_current_frame_);
+  }
+
+  // Field 8: did_notify_begin_main_frame_not_expected_until
+  if (_has_field_[8]) {
+    msg->AppendTinyVarInt(8, did_notify_begin_main_frame_not_expected_until_);
+  }
+
+  // Field 9: did_notify_begin_main_frame_not_expected_soon
+  if (_has_field_[9]) {
+    msg->AppendTinyVarInt(9, did_notify_begin_main_frame_not_expected_soon_);
+  }
+
+  // Field 10: wants_begin_main_frame_not_expected
+  if (_has_field_[10]) {
+    msg->AppendTinyVarInt(10, wants_begin_main_frame_not_expected_);
+  }
+
+  // Field 11: did_commit_during_frame
+  if (_has_field_[11]) {
+    msg->AppendTinyVarInt(11, did_commit_during_frame_);
+  }
+
+  // Field 12: did_invalidate_layer_tree_frame_sink
+  if (_has_field_[12]) {
+    msg->AppendTinyVarInt(12, did_invalidate_layer_tree_frame_sink_);
+  }
+
+  // Field 13: did_perform_impl_side_invalidaion
+  if (_has_field_[13]) {
+    msg->AppendTinyVarInt(13, did_perform_impl_side_invalidaion_);
+  }
+
+  // Field 14: did_prepare_tiles
+  if (_has_field_[14]) {
+    msg->AppendTinyVarInt(14, did_prepare_tiles_);
+  }
+
+  // Field 15: consecutive_checkerboard_animations
+  if (_has_field_[15]) {
+    msg->AppendVarInt(15, consecutive_checkerboard_animations_);
+  }
+
+  // Field 16: pending_submit_frames
+  if (_has_field_[16]) {
+    msg->AppendVarInt(16, pending_submit_frames_);
+  }
+
+  // Field 17: submit_frames_with_current_layer_tree_frame_sink
+  if (_has_field_[17]) {
+    msg->AppendVarInt(17, submit_frames_with_current_layer_tree_frame_sink_);
+  }
+
+  // Field 18: needs_redraw
+  if (_has_field_[18]) {
+    msg->AppendTinyVarInt(18, needs_redraw_);
+  }
+
+  // Field 19: needs_prepare_tiles
+  if (_has_field_[19]) {
+    msg->AppendTinyVarInt(19, needs_prepare_tiles_);
+  }
+
+  // Field 20: needs_begin_main_frame
+  if (_has_field_[20]) {
+    msg->AppendTinyVarInt(20, needs_begin_main_frame_);
+  }
+
+  // Field 21: needs_one_begin_impl_frame
+  if (_has_field_[21]) {
+    msg->AppendTinyVarInt(21, needs_one_begin_impl_frame_);
+  }
+
+  // Field 22: visible
+  if (_has_field_[22]) {
+    msg->AppendTinyVarInt(22, visible_);
+  }
+
+  // Field 23: begin_frame_source_paused
+  if (_has_field_[23]) {
+    msg->AppendTinyVarInt(23, begin_frame_source_paused_);
+  }
+
+  // Field 24: can_draw
+  if (_has_field_[24]) {
+    msg->AppendTinyVarInt(24, can_draw_);
+  }
+
+  // Field 25: resourceless_draw
+  if (_has_field_[25]) {
+    msg->AppendTinyVarInt(25, resourceless_draw_);
+  }
+
+  // Field 26: has_pending_tree
+  if (_has_field_[26]) {
+    msg->AppendTinyVarInt(26, has_pending_tree_);
+  }
+
+  // Field 27: pending_tree_is_ready_for_activation
+  if (_has_field_[27]) {
+    msg->AppendTinyVarInt(27, pending_tree_is_ready_for_activation_);
+  }
+
+  // Field 28: active_tree_needs_first_draw
+  if (_has_field_[28]) {
+    msg->AppendTinyVarInt(28, active_tree_needs_first_draw_);
+  }
+
+  // Field 29: active_tree_is_ready_to_draw
+  if (_has_field_[29]) {
+    msg->AppendTinyVarInt(29, active_tree_is_ready_to_draw_);
+  }
+
+  // Field 30: did_create_and_initialize_first_layer_tree_frame_sink
+  if (_has_field_[30]) {
+    msg->AppendTinyVarInt(30, did_create_and_initialize_first_layer_tree_frame_sink_);
+  }
+
+  // Field 31: tree_priority
+  if (_has_field_[31]) {
+    msg->AppendVarInt(31, tree_priority_);
+  }
+
+  // Field 32: scroll_handler_state
+  if (_has_field_[32]) {
+    msg->AppendVarInt(32, scroll_handler_state_);
+  }
+
+  // Field 33: critical_begin_main_frame_to_activate_is_fast
+  if (_has_field_[33]) {
+    msg->AppendTinyVarInt(33, critical_begin_main_frame_to_activate_is_fast_);
+  }
+
+  // Field 34: main_thread_missed_last_deadline
+  if (_has_field_[34]) {
+    msg->AppendTinyVarInt(34, main_thread_missed_last_deadline_);
+  }
+
+  // Field 35: skip_next_begin_main_frame_to_reduce_latency
+  if (_has_field_[35]) {
+    msg->AppendTinyVarInt(35, skip_next_begin_main_frame_to_reduce_latency_);
+  }
+
+  // Field 36: video_needs_begin_frames
+  if (_has_field_[36]) {
+    msg->AppendTinyVarInt(36, video_needs_begin_frames_);
+  }
+
+  // Field 37: defer_begin_main_frame
+  if (_has_field_[37]) {
+    msg->AppendTinyVarInt(37, defer_begin_main_frame_);
+  }
+
+  // Field 38: last_commit_had_no_updates
+  if (_has_field_[38]) {
+    msg->AppendTinyVarInt(38, last_commit_had_no_updates_);
+  }
+
+  // Field 39: did_draw_in_last_frame
+  if (_has_field_[39]) {
+    msg->AppendTinyVarInt(39, did_draw_in_last_frame_);
+  }
+
+  // Field 40: did_submit_in_last_frame
+  if (_has_field_[40]) {
+    msg->AppendTinyVarInt(40, did_submit_in_last_frame_);
+  }
+
+  // Field 41: needs_impl_side_invalidation
+  if (_has_field_[41]) {
+    msg->AppendTinyVarInt(41, needs_impl_side_invalidation_);
+  }
+
+  // Field 42: current_pending_tree_is_impl_side
+  if (_has_field_[42]) {
+    msg->AppendTinyVarInt(42, current_pending_tree_is_impl_side_);
+  }
+
+  // Field 43: previous_pending_tree_was_impl_side
+  if (_has_field_[43]) {
+    msg->AppendTinyVarInt(43, previous_pending_tree_was_impl_side_);
+  }
+
+  // Field 44: processing_animation_worklets_for_active_tree
+  if (_has_field_[44]) {
+    msg->AppendTinyVarInt(44, processing_animation_worklets_for_active_tree_);
+  }
+
+  // Field 45: processing_animation_worklets_for_pending_tree
+  if (_has_field_[45]) {
+    msg->AppendTinyVarInt(45, processing_animation_worklets_for_pending_tree_);
+  }
+
+  // Field 46: processing_paint_worklets_for_pending_tree
+  if (_has_field_[46]) {
+    msg->AppendTinyVarInt(46, processing_paint_worklets_for_pending_tree_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+ChromeCompositorStateMachine_MajorState::ChromeCompositorStateMachine_MajorState() = default;
+ChromeCompositorStateMachine_MajorState::~ChromeCompositorStateMachine_MajorState() = default;
+ChromeCompositorStateMachine_MajorState::ChromeCompositorStateMachine_MajorState(const ChromeCompositorStateMachine_MajorState&) = default;
+ChromeCompositorStateMachine_MajorState& ChromeCompositorStateMachine_MajorState::operator=(const ChromeCompositorStateMachine_MajorState&) = default;
+ChromeCompositorStateMachine_MajorState::ChromeCompositorStateMachine_MajorState(ChromeCompositorStateMachine_MajorState&&) noexcept = default;
+ChromeCompositorStateMachine_MajorState& ChromeCompositorStateMachine_MajorState::operator=(ChromeCompositorStateMachine_MajorState&&) = default;
+
+bool ChromeCompositorStateMachine_MajorState::operator==(const ChromeCompositorStateMachine_MajorState& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && next_action_ == other.next_action_
+   && begin_impl_frame_state_ == other.begin_impl_frame_state_
+   && begin_main_frame_state_ == other.begin_main_frame_state_
+   && layer_tree_frame_sink_state_ == other.layer_tree_frame_sink_state_
+   && forced_redraw_state_ == other.forced_redraw_state_;
+}
+
+bool ChromeCompositorStateMachine_MajorState::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* next_action */:
+        field.get(&next_action_);
+        break;
+      case 2 /* begin_impl_frame_state */:
+        field.get(&begin_impl_frame_state_);
+        break;
+      case 3 /* begin_main_frame_state */:
+        field.get(&begin_main_frame_state_);
+        break;
+      case 4 /* layer_tree_frame_sink_state */:
+        field.get(&layer_tree_frame_sink_state_);
+        break;
+      case 5 /* forced_redraw_state */:
+        field.get(&forced_redraw_state_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string ChromeCompositorStateMachine_MajorState::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> ChromeCompositorStateMachine_MajorState::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void ChromeCompositorStateMachine_MajorState::Serialize(::protozero::Message* msg) const {
+  // Field 1: next_action
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, next_action_);
+  }
+
+  // Field 2: begin_impl_frame_state
+  if (_has_field_[2]) {
+    msg->AppendVarInt(2, begin_impl_frame_state_);
+  }
+
+  // Field 3: begin_main_frame_state
+  if (_has_field_[3]) {
+    msg->AppendVarInt(3, begin_main_frame_state_);
+  }
+
+  // Field 4: layer_tree_frame_sink_state
+  if (_has_field_[4]) {
+    msg->AppendVarInt(4, layer_tree_frame_sink_state_);
+  }
+
+  // Field 5: forced_redraw_state
+  if (_has_field_[5]) {
+    msg->AppendVarInt(5, forced_redraw_state_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+ChromeCompositorSchedulerState::ChromeCompositorSchedulerState() = default;
+ChromeCompositorSchedulerState::~ChromeCompositorSchedulerState() = default;
+ChromeCompositorSchedulerState::ChromeCompositorSchedulerState(const ChromeCompositorSchedulerState&) = default;
+ChromeCompositorSchedulerState& ChromeCompositorSchedulerState::operator=(const ChromeCompositorSchedulerState&) = default;
+ChromeCompositorSchedulerState::ChromeCompositorSchedulerState(ChromeCompositorSchedulerState&&) noexcept = default;
+ChromeCompositorSchedulerState& ChromeCompositorSchedulerState::operator=(ChromeCompositorSchedulerState&&) = default;
+
+bool ChromeCompositorSchedulerState::operator==(const ChromeCompositorSchedulerState& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && state_machine_ == other.state_machine_
+   && observing_begin_frame_source_ == other.observing_begin_frame_source_
+   && begin_impl_frame_deadline_task_ == other.begin_impl_frame_deadline_task_
+   && pending_begin_frame_task_ == other.pending_begin_frame_task_
+   && skipped_last_frame_missed_exceeded_deadline_ == other.skipped_last_frame_missed_exceeded_deadline_
+   && skipped_last_frame_to_reduce_latency_ == other.skipped_last_frame_to_reduce_latency_
+   && inside_action_ == other.inside_action_
+   && deadline_mode_ == other.deadline_mode_
+   && deadline_us_ == other.deadline_us_
+   && deadline_scheduled_at_us_ == other.deadline_scheduled_at_us_
+   && now_us_ == other.now_us_
+   && now_to_deadline_delta_us_ == other.now_to_deadline_delta_us_
+   && now_to_deadline_scheduled_at_delta_us_ == other.now_to_deadline_scheduled_at_delta_us_
+   && begin_impl_frame_args_ == other.begin_impl_frame_args_
+   && begin_frame_observer_state_ == other.begin_frame_observer_state_
+   && begin_frame_source_state_ == other.begin_frame_source_state_
+   && compositor_timing_history_ == other.compositor_timing_history_;
+}
+
+bool ChromeCompositorSchedulerState::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* state_machine */:
+        (*state_machine_).ParseFromArray(field.data(), field.size());
+        break;
+      case 2 /* observing_begin_frame_source */:
+        field.get(&observing_begin_frame_source_);
+        break;
+      case 3 /* begin_impl_frame_deadline_task */:
+        field.get(&begin_impl_frame_deadline_task_);
+        break;
+      case 4 /* pending_begin_frame_task */:
+        field.get(&pending_begin_frame_task_);
+        break;
+      case 5 /* skipped_last_frame_missed_exceeded_deadline */:
+        field.get(&skipped_last_frame_missed_exceeded_deadline_);
+        break;
+      case 6 /* skipped_last_frame_to_reduce_latency */:
+        field.get(&skipped_last_frame_to_reduce_latency_);
+        break;
+      case 7 /* inside_action */:
+        field.get(&inside_action_);
+        break;
+      case 8 /* deadline_mode */:
+        field.get(&deadline_mode_);
+        break;
+      case 9 /* deadline_us */:
+        field.get(&deadline_us_);
+        break;
+      case 10 /* deadline_scheduled_at_us */:
+        field.get(&deadline_scheduled_at_us_);
+        break;
+      case 11 /* now_us */:
+        field.get(&now_us_);
+        break;
+      case 12 /* now_to_deadline_delta_us */:
+        field.get(&now_to_deadline_delta_us_);
+        break;
+      case 13 /* now_to_deadline_scheduled_at_delta_us */:
+        field.get(&now_to_deadline_scheduled_at_delta_us_);
+        break;
+      case 14 /* begin_impl_frame_args */:
+        (*begin_impl_frame_args_).ParseFromArray(field.data(), field.size());
+        break;
+      case 15 /* begin_frame_observer_state */:
+        (*begin_frame_observer_state_).ParseFromArray(field.data(), field.size());
+        break;
+      case 16 /* begin_frame_source_state */:
+        (*begin_frame_source_state_).ParseFromArray(field.data(), field.size());
+        break;
+      case 17 /* compositor_timing_history */:
+        (*compositor_timing_history_).ParseFromArray(field.data(), field.size());
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string ChromeCompositorSchedulerState::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> ChromeCompositorSchedulerState::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void ChromeCompositorSchedulerState::Serialize(::protozero::Message* msg) const {
+  // Field 1: state_machine
+  if (_has_field_[1]) {
+    (*state_machine_).Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
+  }
+
+  // Field 2: observing_begin_frame_source
+  if (_has_field_[2]) {
+    msg->AppendTinyVarInt(2, observing_begin_frame_source_);
+  }
+
+  // Field 3: begin_impl_frame_deadline_task
+  if (_has_field_[3]) {
+    msg->AppendTinyVarInt(3, begin_impl_frame_deadline_task_);
+  }
+
+  // Field 4: pending_begin_frame_task
+  if (_has_field_[4]) {
+    msg->AppendTinyVarInt(4, pending_begin_frame_task_);
+  }
+
+  // Field 5: skipped_last_frame_missed_exceeded_deadline
+  if (_has_field_[5]) {
+    msg->AppendTinyVarInt(5, skipped_last_frame_missed_exceeded_deadline_);
+  }
+
+  // Field 6: skipped_last_frame_to_reduce_latency
+  if (_has_field_[6]) {
+    msg->AppendTinyVarInt(6, skipped_last_frame_to_reduce_latency_);
+  }
+
+  // Field 7: inside_action
+  if (_has_field_[7]) {
+    msg->AppendVarInt(7, inside_action_);
+  }
+
+  // Field 8: deadline_mode
+  if (_has_field_[8]) {
+    msg->AppendVarInt(8, deadline_mode_);
+  }
+
+  // Field 9: deadline_us
+  if (_has_field_[9]) {
+    msg->AppendVarInt(9, deadline_us_);
+  }
+
+  // Field 10: deadline_scheduled_at_us
+  if (_has_field_[10]) {
+    msg->AppendVarInt(10, deadline_scheduled_at_us_);
+  }
+
+  // Field 11: now_us
+  if (_has_field_[11]) {
+    msg->AppendVarInt(11, now_us_);
+  }
+
+  // Field 12: now_to_deadline_delta_us
+  if (_has_field_[12]) {
+    msg->AppendVarInt(12, now_to_deadline_delta_us_);
+  }
+
+  // Field 13: now_to_deadline_scheduled_at_delta_us
+  if (_has_field_[13]) {
+    msg->AppendVarInt(13, now_to_deadline_scheduled_at_delta_us_);
+  }
+
+  // Field 14: begin_impl_frame_args
+  if (_has_field_[14]) {
+    (*begin_impl_frame_args_).Serialize(msg->BeginNestedMessage<::protozero::Message>(14));
+  }
+
+  // Field 15: begin_frame_observer_state
+  if (_has_field_[15]) {
+    (*begin_frame_observer_state_).Serialize(msg->BeginNestedMessage<::protozero::Message>(15));
+  }
+
+  // Field 16: begin_frame_source_state
+  if (_has_field_[16]) {
+    (*begin_frame_source_state_).Serialize(msg->BeginNestedMessage<::protozero::Message>(16));
+  }
+
+  // Field 17: compositor_timing_history
+  if (_has_field_[17]) {
+    (*compositor_timing_history_).Serialize(msg->BeginNestedMessage<::protozero::Message>(17));
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_content_settings_event_info.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_content_settings_event_info.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_CONTENT_SETTINGS_EVENT_INFO_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_CONTENT_SETTINGS_EVENT_INFO_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class ChromeContentSettingsEventInfo;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT ChromeContentSettingsEventInfo : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kNumberOfExceptionsFieldNumber = 1,
+  };
+
+  ChromeContentSettingsEventInfo();
+  ~ChromeContentSettingsEventInfo() override;
+  ChromeContentSettingsEventInfo(ChromeContentSettingsEventInfo&&) noexcept;
+  ChromeContentSettingsEventInfo& operator=(ChromeContentSettingsEventInfo&&);
+  ChromeContentSettingsEventInfo(const ChromeContentSettingsEventInfo&);
+  ChromeContentSettingsEventInfo& operator=(const ChromeContentSettingsEventInfo&);
+  bool operator==(const ChromeContentSettingsEventInfo&) const;
+  bool operator!=(const ChromeContentSettingsEventInfo& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_number_of_exceptions() const { return _has_field_[1]; }
+  uint32_t number_of_exceptions() const { return number_of_exceptions_; }
+  void set_number_of_exceptions(uint32_t value) { number_of_exceptions_ = value; _has_field_.set(1); }
+
+ private:
+  uint32_t number_of_exceptions_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_CONTENT_SETTINGS_EVENT_INFO_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_content_settings_event_info.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+ChromeContentSettingsEventInfo::ChromeContentSettingsEventInfo() = default;
+ChromeContentSettingsEventInfo::~ChromeContentSettingsEventInfo() = default;
+ChromeContentSettingsEventInfo::ChromeContentSettingsEventInfo(const ChromeContentSettingsEventInfo&) = default;
+ChromeContentSettingsEventInfo& ChromeContentSettingsEventInfo::operator=(const ChromeContentSettingsEventInfo&) = default;
+ChromeContentSettingsEventInfo::ChromeContentSettingsEventInfo(ChromeContentSettingsEventInfo&&) noexcept = default;
+ChromeContentSettingsEventInfo& ChromeContentSettingsEventInfo::operator=(ChromeContentSettingsEventInfo&&) = default;
+
+bool ChromeContentSettingsEventInfo::operator==(const ChromeContentSettingsEventInfo& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && number_of_exceptions_ == other.number_of_exceptions_;
+}
+
+bool ChromeContentSettingsEventInfo::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* number_of_exceptions */:
+        field.get(&number_of_exceptions_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string ChromeContentSettingsEventInfo::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> ChromeContentSettingsEventInfo::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void ChromeContentSettingsEventInfo::Serialize(::protozero::Message* msg) const {
+  // Field 1: number_of_exceptions
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, number_of_exceptions_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_frame_reporter.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_frame_reporter.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_FRAME_REPORTER_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_FRAME_REPORTER_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class ChromeFrameReporter;
+enum ChromeFrameReporter_State : int;
+enum ChromeFrameReporter_FrameDropReason : int;
+enum ChromeFrameReporter_ScrollState : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum ChromeFrameReporter_State : int {
+  ChromeFrameReporter_State_STATE_NO_UPDATE_DESIRED = 0,
+  ChromeFrameReporter_State_STATE_PRESENTED_ALL = 1,
+  ChromeFrameReporter_State_STATE_PRESENTED_PARTIAL = 2,
+  ChromeFrameReporter_State_STATE_DROPPED = 3,
+};
+enum ChromeFrameReporter_FrameDropReason : int {
+  ChromeFrameReporter_FrameDropReason_REASON_UNSPECIFIED = 0,
+  ChromeFrameReporter_FrameDropReason_REASON_DISPLAY_COMPOSITOR = 1,
+  ChromeFrameReporter_FrameDropReason_REASON_MAIN_THREAD = 2,
+  ChromeFrameReporter_FrameDropReason_REASON_CLIENT_COMPOSITOR = 3,
+};
+enum ChromeFrameReporter_ScrollState : int {
+  ChromeFrameReporter_ScrollState_SCROLL_NONE = 0,
+  ChromeFrameReporter_ScrollState_SCROLL_MAIN_THREAD = 1,
+  ChromeFrameReporter_ScrollState_SCROLL_COMPOSITOR_THREAD = 2,
+  ChromeFrameReporter_ScrollState_SCROLL_UNKNOWN = 3,
+};
+
+class PERFETTO_EXPORT ChromeFrameReporter : public ::protozero::CppMessageObj {
+ public:
+  using State = ChromeFrameReporter_State;
+  static constexpr auto STATE_NO_UPDATE_DESIRED = ChromeFrameReporter_State_STATE_NO_UPDATE_DESIRED;
+  static constexpr auto STATE_PRESENTED_ALL = ChromeFrameReporter_State_STATE_PRESENTED_ALL;
+  static constexpr auto STATE_PRESENTED_PARTIAL = ChromeFrameReporter_State_STATE_PRESENTED_PARTIAL;
+  static constexpr auto STATE_DROPPED = ChromeFrameReporter_State_STATE_DROPPED;
+  static constexpr auto State_MIN = ChromeFrameReporter_State_STATE_NO_UPDATE_DESIRED;
+  static constexpr auto State_MAX = ChromeFrameReporter_State_STATE_DROPPED;
+  using FrameDropReason = ChromeFrameReporter_FrameDropReason;
+  static constexpr auto REASON_UNSPECIFIED = ChromeFrameReporter_FrameDropReason_REASON_UNSPECIFIED;
+  static constexpr auto REASON_DISPLAY_COMPOSITOR = ChromeFrameReporter_FrameDropReason_REASON_DISPLAY_COMPOSITOR;
+  static constexpr auto REASON_MAIN_THREAD = ChromeFrameReporter_FrameDropReason_REASON_MAIN_THREAD;
+  static constexpr auto REASON_CLIENT_COMPOSITOR = ChromeFrameReporter_FrameDropReason_REASON_CLIENT_COMPOSITOR;
+  static constexpr auto FrameDropReason_MIN = ChromeFrameReporter_FrameDropReason_REASON_UNSPECIFIED;
+  static constexpr auto FrameDropReason_MAX = ChromeFrameReporter_FrameDropReason_REASON_CLIENT_COMPOSITOR;
+  using ScrollState = ChromeFrameReporter_ScrollState;
+  static constexpr auto SCROLL_NONE = ChromeFrameReporter_ScrollState_SCROLL_NONE;
+  static constexpr auto SCROLL_MAIN_THREAD = ChromeFrameReporter_ScrollState_SCROLL_MAIN_THREAD;
+  static constexpr auto SCROLL_COMPOSITOR_THREAD = ChromeFrameReporter_ScrollState_SCROLL_COMPOSITOR_THREAD;
+  static constexpr auto SCROLL_UNKNOWN = ChromeFrameReporter_ScrollState_SCROLL_UNKNOWN;
+  static constexpr auto ScrollState_MIN = ChromeFrameReporter_ScrollState_SCROLL_NONE;
+  static constexpr auto ScrollState_MAX = ChromeFrameReporter_ScrollState_SCROLL_UNKNOWN;
+  enum FieldNumbers {
+    kStateFieldNumber = 1,
+    kReasonFieldNumber = 2,
+    kFrameSourceFieldNumber = 3,
+    kFrameSequenceFieldNumber = 4,
+    kAffectsSmoothnessFieldNumber = 5,
+    kScrollStateFieldNumber = 6,
+    kHasMainAnimationFieldNumber = 7,
+    kHasCompositorAnimationFieldNumber = 8,
+    kHasSmoothInputMainFieldNumber = 9,
+    kHasMissingContentFieldNumber = 10,
+    kLayerTreeHostIdFieldNumber = 11,
+  };
+
+  ChromeFrameReporter();
+  ~ChromeFrameReporter() override;
+  ChromeFrameReporter(ChromeFrameReporter&&) noexcept;
+  ChromeFrameReporter& operator=(ChromeFrameReporter&&);
+  ChromeFrameReporter(const ChromeFrameReporter&);
+  ChromeFrameReporter& operator=(const ChromeFrameReporter&);
+  bool operator==(const ChromeFrameReporter&) const;
+  bool operator!=(const ChromeFrameReporter& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_state() const { return _has_field_[1]; }
+  ChromeFrameReporter_State state() const { return state_; }
+  void set_state(ChromeFrameReporter_State value) { state_ = value; _has_field_.set(1); }
+
+  bool has_reason() const { return _has_field_[2]; }
+  ChromeFrameReporter_FrameDropReason reason() const { return reason_; }
+  void set_reason(ChromeFrameReporter_FrameDropReason value) { reason_ = value; _has_field_.set(2); }
+
+  bool has_frame_source() const { return _has_field_[3]; }
+  uint64_t frame_source() const { return frame_source_; }
+  void set_frame_source(uint64_t value) { frame_source_ = value; _has_field_.set(3); }
+
+  bool has_frame_sequence() const { return _has_field_[4]; }
+  uint64_t frame_sequence() const { return frame_sequence_; }
+  void set_frame_sequence(uint64_t value) { frame_sequence_ = value; _has_field_.set(4); }
+
+  bool has_affects_smoothness() const { return _has_field_[5]; }
+  bool affects_smoothness() const { return affects_smoothness_; }
+  void set_affects_smoothness(bool value) { affects_smoothness_ = value; _has_field_.set(5); }
+
+  bool has_scroll_state() const { return _has_field_[6]; }
+  ChromeFrameReporter_ScrollState scroll_state() const { return scroll_state_; }
+  void set_scroll_state(ChromeFrameReporter_ScrollState value) { scroll_state_ = value; _has_field_.set(6); }
+
+  bool has_has_main_animation() const { return _has_field_[7]; }
+  bool has_main_animation() const { return has_main_animation_; }
+  void set_has_main_animation(bool value) { has_main_animation_ = value; _has_field_.set(7); }
+
+  bool has_has_compositor_animation() const { return _has_field_[8]; }
+  bool has_compositor_animation() const { return has_compositor_animation_; }
+  void set_has_compositor_animation(bool value) { has_compositor_animation_ = value; _has_field_.set(8); }
+
+  bool has_has_smooth_input_main() const { return _has_field_[9]; }
+  bool has_smooth_input_main() const { return has_smooth_input_main_; }
+  void set_has_smooth_input_main(bool value) { has_smooth_input_main_ = value; _has_field_.set(9); }
+
+  bool has_has_missing_content() const { return _has_field_[10]; }
+  bool has_missing_content() const { return has_missing_content_; }
+  void set_has_missing_content(bool value) { has_missing_content_ = value; _has_field_.set(10); }
+
+  bool has_layer_tree_host_id() const { return _has_field_[11]; }
+  uint64_t layer_tree_host_id() const { return layer_tree_host_id_; }
+  void set_layer_tree_host_id(uint64_t value) { layer_tree_host_id_ = value; _has_field_.set(11); }
+
+ private:
+  ChromeFrameReporter_State state_{};
+  ChromeFrameReporter_FrameDropReason reason_{};
+  uint64_t frame_source_{};
+  uint64_t frame_sequence_{};
+  bool affects_smoothness_{};
+  ChromeFrameReporter_ScrollState scroll_state_{};
+  bool has_main_animation_{};
+  bool has_compositor_animation_{};
+  bool has_smooth_input_main_{};
+  bool has_missing_content_{};
+  uint64_t layer_tree_host_id_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<12> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_FRAME_REPORTER_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_frame_reporter.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+ChromeFrameReporter::ChromeFrameReporter() = default;
+ChromeFrameReporter::~ChromeFrameReporter() = default;
+ChromeFrameReporter::ChromeFrameReporter(const ChromeFrameReporter&) = default;
+ChromeFrameReporter& ChromeFrameReporter::operator=(const ChromeFrameReporter&) = default;
+ChromeFrameReporter::ChromeFrameReporter(ChromeFrameReporter&&) noexcept = default;
+ChromeFrameReporter& ChromeFrameReporter::operator=(ChromeFrameReporter&&) = default;
+
+bool ChromeFrameReporter::operator==(const ChromeFrameReporter& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && state_ == other.state_
+   && reason_ == other.reason_
+   && frame_source_ == other.frame_source_
+   && frame_sequence_ == other.frame_sequence_
+   && affects_smoothness_ == other.affects_smoothness_
+   && scroll_state_ == other.scroll_state_
+   && has_main_animation_ == other.has_main_animation_
+   && has_compositor_animation_ == other.has_compositor_animation_
+   && has_smooth_input_main_ == other.has_smooth_input_main_
+   && has_missing_content_ == other.has_missing_content_
+   && layer_tree_host_id_ == other.layer_tree_host_id_;
+}
+
+bool ChromeFrameReporter::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* state */:
+        field.get(&state_);
+        break;
+      case 2 /* reason */:
+        field.get(&reason_);
+        break;
+      case 3 /* frame_source */:
+        field.get(&frame_source_);
+        break;
+      case 4 /* frame_sequence */:
+        field.get(&frame_sequence_);
+        break;
+      case 5 /* affects_smoothness */:
+        field.get(&affects_smoothness_);
+        break;
+      case 6 /* scroll_state */:
+        field.get(&scroll_state_);
+        break;
+      case 7 /* has_main_animation */:
+        field.get(&has_main_animation_);
+        break;
+      case 8 /* has_compositor_animation */:
+        field.get(&has_compositor_animation_);
+        break;
+      case 9 /* has_smooth_input_main */:
+        field.get(&has_smooth_input_main_);
+        break;
+      case 10 /* has_missing_content */:
+        field.get(&has_missing_content_);
+        break;
+      case 11 /* layer_tree_host_id */:
+        field.get(&layer_tree_host_id_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string ChromeFrameReporter::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> ChromeFrameReporter::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void ChromeFrameReporter::Serialize(::protozero::Message* msg) const {
+  // Field 1: state
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, state_);
+  }
+
+  // Field 2: reason
+  if (_has_field_[2]) {
+    msg->AppendVarInt(2, reason_);
+  }
+
+  // Field 3: frame_source
+  if (_has_field_[3]) {
+    msg->AppendVarInt(3, frame_source_);
+  }
+
+  // Field 4: frame_sequence
+  if (_has_field_[4]) {
+    msg->AppendVarInt(4, frame_sequence_);
+  }
+
+  // Field 5: affects_smoothness
+  if (_has_field_[5]) {
+    msg->AppendTinyVarInt(5, affects_smoothness_);
+  }
+
+  // Field 6: scroll_state
+  if (_has_field_[6]) {
+    msg->AppendVarInt(6, scroll_state_);
+  }
+
+  // Field 7: has_main_animation
+  if (_has_field_[7]) {
+    msg->AppendTinyVarInt(7, has_main_animation_);
+  }
+
+  // Field 8: has_compositor_animation
+  if (_has_field_[8]) {
+    msg->AppendTinyVarInt(8, has_compositor_animation_);
+  }
+
+  // Field 9: has_smooth_input_main
+  if (_has_field_[9]) {
+    msg->AppendTinyVarInt(9, has_smooth_input_main_);
+  }
+
+  // Field 10: has_missing_content
+  if (_has_field_[10]) {
+    msg->AppendTinyVarInt(10, has_missing_content_);
+  }
+
+  // Field 11: layer_tree_host_id
+  if (_has_field_[11]) {
+    msg->AppendVarInt(11, layer_tree_host_id_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_histogram_sample.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_histogram_sample.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_HISTOGRAM_SAMPLE_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_HISTOGRAM_SAMPLE_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class ChromeHistogramSample;
+class HistogramName;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT ChromeHistogramSample : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kNameHashFieldNumber = 1,
+    kNameFieldNumber = 2,
+    kSampleFieldNumber = 3,
+    kNameIidFieldNumber = 4,
+  };
+
+  ChromeHistogramSample();
+  ~ChromeHistogramSample() override;
+  ChromeHistogramSample(ChromeHistogramSample&&) noexcept;
+  ChromeHistogramSample& operator=(ChromeHistogramSample&&);
+  ChromeHistogramSample(const ChromeHistogramSample&);
+  ChromeHistogramSample& operator=(const ChromeHistogramSample&);
+  bool operator==(const ChromeHistogramSample&) const;
+  bool operator!=(const ChromeHistogramSample& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_name_hash() const { return _has_field_[1]; }
+  uint64_t name_hash() const { return name_hash_; }
+  void set_name_hash(uint64_t value) { name_hash_ = value; _has_field_.set(1); }
+
+  bool has_name() const { return _has_field_[2]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(2); }
+
+  bool has_sample() const { return _has_field_[3]; }
+  int64_t sample() const { return sample_; }
+  void set_sample(int64_t value) { sample_ = value; _has_field_.set(3); }
+
+  bool has_name_iid() const { return _has_field_[4]; }
+  uint64_t name_iid() const { return name_iid_; }
+  void set_name_iid(uint64_t value) { name_iid_ = value; _has_field_.set(4); }
+
+ private:
+  uint64_t name_hash_{};
+  std::string name_{};
+  int64_t sample_{};
+  uint64_t name_iid_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<5> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT HistogramName : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kIidFieldNumber = 1,
+    kNameFieldNumber = 2,
+  };
+
+  HistogramName();
+  ~HistogramName() override;
+  HistogramName(HistogramName&&) noexcept;
+  HistogramName& operator=(HistogramName&&);
+  HistogramName(const HistogramName&);
+  HistogramName& operator=(const HistogramName&);
+  bool operator==(const HistogramName&) const;
+  bool operator!=(const HistogramName& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_iid() const { return _has_field_[1]; }
+  uint64_t iid() const { return iid_; }
+  void set_iid(uint64_t value) { iid_ = value; _has_field_.set(1); }
+
+  bool has_name() const { return _has_field_[2]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(2); }
+
+ private:
+  uint64_t iid_{};
+  std::string name_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_HISTOGRAM_SAMPLE_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_histogram_sample.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+ChromeHistogramSample::ChromeHistogramSample() = default;
+ChromeHistogramSample::~ChromeHistogramSample() = default;
+ChromeHistogramSample::ChromeHistogramSample(const ChromeHistogramSample&) = default;
+ChromeHistogramSample& ChromeHistogramSample::operator=(const ChromeHistogramSample&) = default;
+ChromeHistogramSample::ChromeHistogramSample(ChromeHistogramSample&&) noexcept = default;
+ChromeHistogramSample& ChromeHistogramSample::operator=(ChromeHistogramSample&&) = default;
+
+bool ChromeHistogramSample::operator==(const ChromeHistogramSample& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && name_hash_ == other.name_hash_
+   && name_ == other.name_
+   && sample_ == other.sample_
+   && name_iid_ == other.name_iid_;
+}
+
+bool ChromeHistogramSample::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* name_hash */:
+        field.get(&name_hash_);
+        break;
+      case 2 /* name */:
+        field.get(&name_);
+        break;
+      case 3 /* sample */:
+        field.get(&sample_);
+        break;
+      case 4 /* name_iid */:
+        field.get(&name_iid_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string ChromeHistogramSample::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> ChromeHistogramSample::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void ChromeHistogramSample::Serialize(::protozero::Message* msg) const {
+  // Field 1: name_hash
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, name_hash_);
+  }
+
+  // Field 2: name
+  if (_has_field_[2]) {
+    msg->AppendString(2, name_);
+  }
+
+  // Field 3: sample
+  if (_has_field_[3]) {
+    msg->AppendVarInt(3, sample_);
+  }
+
+  // Field 4: name_iid
+  if (_has_field_[4]) {
+    msg->AppendVarInt(4, name_iid_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+HistogramName::HistogramName() = default;
+HistogramName::~HistogramName() = default;
+HistogramName::HistogramName(const HistogramName&) = default;
+HistogramName& HistogramName::operator=(const HistogramName&) = default;
+HistogramName::HistogramName(HistogramName&&) noexcept = default;
+HistogramName& HistogramName::operator=(HistogramName&&) = default;
+
+bool HistogramName::operator==(const HistogramName& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && iid_ == other.iid_
+   && name_ == other.name_;
+}
+
+bool HistogramName::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* iid */:
+        field.get(&iid_);
+        break;
+      case 2 /* name */:
+        field.get(&name_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string HistogramName::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> HistogramName::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void HistogramName::Serialize(::protozero::Message* msg) const {
+  // Field 1: iid
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, iid_);
+  }
+
+  // Field 2: name
+  if (_has_field_[2]) {
+    msg->AppendString(2, name_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_keyed_service.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_keyed_service.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_KEYED_SERVICE_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_KEYED_SERVICE_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class ChromeKeyedService;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT ChromeKeyedService : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kNameFieldNumber = 1,
+  };
+
+  ChromeKeyedService();
+  ~ChromeKeyedService() override;
+  ChromeKeyedService(ChromeKeyedService&&) noexcept;
+  ChromeKeyedService& operator=(ChromeKeyedService&&);
+  ChromeKeyedService(const ChromeKeyedService&);
+  ChromeKeyedService& operator=(const ChromeKeyedService&);
+  bool operator==(const ChromeKeyedService&) const;
+  bool operator!=(const ChromeKeyedService& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_name() const { return _has_field_[1]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(1); }
+
+ private:
+  std::string name_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_KEYED_SERVICE_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_keyed_service.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+ChromeKeyedService::ChromeKeyedService() = default;
+ChromeKeyedService::~ChromeKeyedService() = default;
+ChromeKeyedService::ChromeKeyedService(const ChromeKeyedService&) = default;
+ChromeKeyedService& ChromeKeyedService::operator=(const ChromeKeyedService&) = default;
+ChromeKeyedService::ChromeKeyedService(ChromeKeyedService&&) noexcept = default;
+ChromeKeyedService& ChromeKeyedService::operator=(ChromeKeyedService&&) = default;
+
+bool ChromeKeyedService::operator==(const ChromeKeyedService& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && name_ == other.name_;
+}
+
+bool ChromeKeyedService::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* name */:
+        field.get(&name_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string ChromeKeyedService::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> ChromeKeyedService::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void ChromeKeyedService::Serialize(::protozero::Message* msg) const {
+  // Field 1: name
+  if (_has_field_[1]) {
+    msg->AppendString(1, name_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_latency_info.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_latency_info.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_LATENCY_INFO_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_LATENCY_INFO_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class ChromeLatencyInfo;
+class ChromeLatencyInfo_ComponentInfo;
+enum ChromeLatencyInfo_Step : int;
+enum ChromeLatencyInfo_LatencyComponentType : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum ChromeLatencyInfo_Step : int {
+  ChromeLatencyInfo_Step_STEP_UNSPECIFIED = 0,
+  ChromeLatencyInfo_Step_STEP_SEND_INPUT_EVENT_UI = 3,
+  ChromeLatencyInfo_Step_STEP_HANDLE_INPUT_EVENT_IMPL = 5,
+  ChromeLatencyInfo_Step_STEP_DID_HANDLE_INPUT_AND_OVERSCROLL = 8,
+  ChromeLatencyInfo_Step_STEP_HANDLE_INPUT_EVENT_MAIN = 4,
+  ChromeLatencyInfo_Step_STEP_MAIN_THREAD_SCROLL_UPDATE = 2,
+  ChromeLatencyInfo_Step_STEP_HANDLE_INPUT_EVENT_MAIN_COMMIT = 1,
+  ChromeLatencyInfo_Step_STEP_HANDLED_INPUT_EVENT_MAIN_OR_IMPL = 9,
+  ChromeLatencyInfo_Step_STEP_HANDLED_INPUT_EVENT_IMPL = 10,
+  ChromeLatencyInfo_Step_STEP_SWAP_BUFFERS = 6,
+  ChromeLatencyInfo_Step_STEP_DRAW_AND_SWAP = 7,
+  ChromeLatencyInfo_Step_STEP_FINISHED_SWAP_BUFFERS = 11,
+};
+enum ChromeLatencyInfo_LatencyComponentType : int {
+  ChromeLatencyInfo_LatencyComponentType_COMPONENT_UNSPECIFIED = 0,
+  ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_BEGIN_RWH = 1,
+  ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_SCROLL_UPDATE_ORIGINAL = 2,
+  ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_FIRST_SCROLL_UPDATE_ORIGINAL = 3,
+  ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_ORIGINAL = 4,
+  ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_UI = 5,
+  ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_RENDERER_MAIN = 6,
+  ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_RENDERING_SCHEDULED_MAIN = 7,
+  ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_RENDERING_SCHEDULED_IMPL = 8,
+  ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_SCROLL_UPDATE_LAST_EVENT = 9,
+  ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_ACK_RWH = 10,
+  ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_RENDERER_SWAP = 11,
+  ChromeLatencyInfo_LatencyComponentType_COMPONENT_DISPLAY_COMPOSITOR_RECEIVED_FRAME = 12,
+  ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_GPU_SWAP_BUFFER = 13,
+  ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_FRAME_SWAP = 14,
+};
+
+class PERFETTO_EXPORT ChromeLatencyInfo : public ::protozero::CppMessageObj {
+ public:
+  using ComponentInfo = ChromeLatencyInfo_ComponentInfo;
+  using Step = ChromeLatencyInfo_Step;
+  static constexpr auto STEP_UNSPECIFIED = ChromeLatencyInfo_Step_STEP_UNSPECIFIED;
+  static constexpr auto STEP_SEND_INPUT_EVENT_UI = ChromeLatencyInfo_Step_STEP_SEND_INPUT_EVENT_UI;
+  static constexpr auto STEP_HANDLE_INPUT_EVENT_IMPL = ChromeLatencyInfo_Step_STEP_HANDLE_INPUT_EVENT_IMPL;
+  static constexpr auto STEP_DID_HANDLE_INPUT_AND_OVERSCROLL = ChromeLatencyInfo_Step_STEP_DID_HANDLE_INPUT_AND_OVERSCROLL;
+  static constexpr auto STEP_HANDLE_INPUT_EVENT_MAIN = ChromeLatencyInfo_Step_STEP_HANDLE_INPUT_EVENT_MAIN;
+  static constexpr auto STEP_MAIN_THREAD_SCROLL_UPDATE = ChromeLatencyInfo_Step_STEP_MAIN_THREAD_SCROLL_UPDATE;
+  static constexpr auto STEP_HANDLE_INPUT_EVENT_MAIN_COMMIT = ChromeLatencyInfo_Step_STEP_HANDLE_INPUT_EVENT_MAIN_COMMIT;
+  static constexpr auto STEP_HANDLED_INPUT_EVENT_MAIN_OR_IMPL = ChromeLatencyInfo_Step_STEP_HANDLED_INPUT_EVENT_MAIN_OR_IMPL;
+  static constexpr auto STEP_HANDLED_INPUT_EVENT_IMPL = ChromeLatencyInfo_Step_STEP_HANDLED_INPUT_EVENT_IMPL;
+  static constexpr auto STEP_SWAP_BUFFERS = ChromeLatencyInfo_Step_STEP_SWAP_BUFFERS;
+  static constexpr auto STEP_DRAW_AND_SWAP = ChromeLatencyInfo_Step_STEP_DRAW_AND_SWAP;
+  static constexpr auto STEP_FINISHED_SWAP_BUFFERS = ChromeLatencyInfo_Step_STEP_FINISHED_SWAP_BUFFERS;
+  static constexpr auto Step_MIN = ChromeLatencyInfo_Step_STEP_UNSPECIFIED;
+  static constexpr auto Step_MAX = ChromeLatencyInfo_Step_STEP_FINISHED_SWAP_BUFFERS;
+  using LatencyComponentType = ChromeLatencyInfo_LatencyComponentType;
+  static constexpr auto COMPONENT_UNSPECIFIED = ChromeLatencyInfo_LatencyComponentType_COMPONENT_UNSPECIFIED;
+  static constexpr auto COMPONENT_INPUT_EVENT_LATENCY_BEGIN_RWH = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_BEGIN_RWH;
+  static constexpr auto COMPONENT_INPUT_EVENT_LATENCY_SCROLL_UPDATE_ORIGINAL = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_SCROLL_UPDATE_ORIGINAL;
+  static constexpr auto COMPONENT_INPUT_EVENT_LATENCY_FIRST_SCROLL_UPDATE_ORIGINAL = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_FIRST_SCROLL_UPDATE_ORIGINAL;
+  static constexpr auto COMPONENT_INPUT_EVENT_LATENCY_ORIGINAL = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_ORIGINAL;
+  static constexpr auto COMPONENT_INPUT_EVENT_LATENCY_UI = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_UI;
+  static constexpr auto COMPONENT_INPUT_EVENT_LATENCY_RENDERER_MAIN = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_RENDERER_MAIN;
+  static constexpr auto COMPONENT_INPUT_EVENT_LATENCY_RENDERING_SCHEDULED_MAIN = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_RENDERING_SCHEDULED_MAIN;
+  static constexpr auto COMPONENT_INPUT_EVENT_LATENCY_RENDERING_SCHEDULED_IMPL = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_RENDERING_SCHEDULED_IMPL;
+  static constexpr auto COMPONENT_INPUT_EVENT_LATENCY_SCROLL_UPDATE_LAST_EVENT = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_SCROLL_UPDATE_LAST_EVENT;
+  static constexpr auto COMPONENT_INPUT_EVENT_LATENCY_ACK_RWH = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_ACK_RWH;
+  static constexpr auto COMPONENT_INPUT_EVENT_LATENCY_RENDERER_SWAP = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_RENDERER_SWAP;
+  static constexpr auto COMPONENT_DISPLAY_COMPOSITOR_RECEIVED_FRAME = ChromeLatencyInfo_LatencyComponentType_COMPONENT_DISPLAY_COMPOSITOR_RECEIVED_FRAME;
+  static constexpr auto COMPONENT_INPUT_EVENT_GPU_SWAP_BUFFER = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_GPU_SWAP_BUFFER;
+  static constexpr auto COMPONENT_INPUT_EVENT_LATENCY_FRAME_SWAP = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_FRAME_SWAP;
+  static constexpr auto LatencyComponentType_MIN = ChromeLatencyInfo_LatencyComponentType_COMPONENT_UNSPECIFIED;
+  static constexpr auto LatencyComponentType_MAX = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_FRAME_SWAP;
+  enum FieldNumbers {
+    kTraceIdFieldNumber = 1,
+    kStepFieldNumber = 2,
+    kFrameTreeNodeIdFieldNumber = 3,
+    kComponentInfoFieldNumber = 4,
+    kIsCoalescedFieldNumber = 5,
+    kGestureScrollIdFieldNumber = 6,
+  };
+
+  ChromeLatencyInfo();
+  ~ChromeLatencyInfo() override;
+  ChromeLatencyInfo(ChromeLatencyInfo&&) noexcept;
+  ChromeLatencyInfo& operator=(ChromeLatencyInfo&&);
+  ChromeLatencyInfo(const ChromeLatencyInfo&);
+  ChromeLatencyInfo& operator=(const ChromeLatencyInfo&);
+  bool operator==(const ChromeLatencyInfo&) const;
+  bool operator!=(const ChromeLatencyInfo& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_trace_id() const { return _has_field_[1]; }
+  int64_t trace_id() const { return trace_id_; }
+  void set_trace_id(int64_t value) { trace_id_ = value; _has_field_.set(1); }
+
+  bool has_step() const { return _has_field_[2]; }
+  ChromeLatencyInfo_Step step() const { return step_; }
+  void set_step(ChromeLatencyInfo_Step value) { step_ = value; _has_field_.set(2); }
+
+  bool has_frame_tree_node_id() const { return _has_field_[3]; }
+  int32_t frame_tree_node_id() const { return frame_tree_node_id_; }
+  void set_frame_tree_node_id(int32_t value) { frame_tree_node_id_ = value; _has_field_.set(3); }
+
+  const std::vector<ChromeLatencyInfo_ComponentInfo>& component_info() const { return component_info_; }
+  std::vector<ChromeLatencyInfo_ComponentInfo>* mutable_component_info() { return &component_info_; }
+  int component_info_size() const;
+  void clear_component_info();
+  ChromeLatencyInfo_ComponentInfo* add_component_info();
+
+  bool has_is_coalesced() const { return _has_field_[5]; }
+  bool is_coalesced() const { return is_coalesced_; }
+  void set_is_coalesced(bool value) { is_coalesced_ = value; _has_field_.set(5); }
+
+  bool has_gesture_scroll_id() const { return _has_field_[6]; }
+  int64_t gesture_scroll_id() const { return gesture_scroll_id_; }
+  void set_gesture_scroll_id(int64_t value) { gesture_scroll_id_ = value; _has_field_.set(6); }
+
+ private:
+  int64_t trace_id_{};
+  ChromeLatencyInfo_Step step_{};
+  int32_t frame_tree_node_id_{};
+  std::vector<ChromeLatencyInfo_ComponentInfo> component_info_;
+  bool is_coalesced_{};
+  int64_t gesture_scroll_id_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<7> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT ChromeLatencyInfo_ComponentInfo : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kComponentTypeFieldNumber = 1,
+    kTimeUsFieldNumber = 2,
+  };
+
+  ChromeLatencyInfo_ComponentInfo();
+  ~ChromeLatencyInfo_ComponentInfo() override;
+  ChromeLatencyInfo_ComponentInfo(ChromeLatencyInfo_ComponentInfo&&) noexcept;
+  ChromeLatencyInfo_ComponentInfo& operator=(ChromeLatencyInfo_ComponentInfo&&);
+  ChromeLatencyInfo_ComponentInfo(const ChromeLatencyInfo_ComponentInfo&);
+  ChromeLatencyInfo_ComponentInfo& operator=(const ChromeLatencyInfo_ComponentInfo&);
+  bool operator==(const ChromeLatencyInfo_ComponentInfo&) const;
+  bool operator!=(const ChromeLatencyInfo_ComponentInfo& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_component_type() const { return _has_field_[1]; }
+  ChromeLatencyInfo_LatencyComponentType component_type() const { return component_type_; }
+  void set_component_type(ChromeLatencyInfo_LatencyComponentType value) { component_type_ = value; _has_field_.set(1); }
+
+  bool has_time_us() const { return _has_field_[2]; }
+  uint64_t time_us() const { return time_us_; }
+  void set_time_us(uint64_t value) { time_us_ = value; _has_field_.set(2); }
+
+ private:
+  ChromeLatencyInfo_LatencyComponentType component_type_{};
+  uint64_t time_us_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_LATENCY_INFO_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_latency_info.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+ChromeLatencyInfo::ChromeLatencyInfo() = default;
+ChromeLatencyInfo::~ChromeLatencyInfo() = default;
+ChromeLatencyInfo::ChromeLatencyInfo(const ChromeLatencyInfo&) = default;
+ChromeLatencyInfo& ChromeLatencyInfo::operator=(const ChromeLatencyInfo&) = default;
+ChromeLatencyInfo::ChromeLatencyInfo(ChromeLatencyInfo&&) noexcept = default;
+ChromeLatencyInfo& ChromeLatencyInfo::operator=(ChromeLatencyInfo&&) = default;
+
+bool ChromeLatencyInfo::operator==(const ChromeLatencyInfo& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && trace_id_ == other.trace_id_
+   && step_ == other.step_
+   && frame_tree_node_id_ == other.frame_tree_node_id_
+   && component_info_ == other.component_info_
+   && is_coalesced_ == other.is_coalesced_
+   && gesture_scroll_id_ == other.gesture_scroll_id_;
+}
+
+int ChromeLatencyInfo::component_info_size() const { return static_cast<int>(component_info_.size()); }
+void ChromeLatencyInfo::clear_component_info() { component_info_.clear(); }
+ChromeLatencyInfo_ComponentInfo* ChromeLatencyInfo::add_component_info() { component_info_.emplace_back(); return &component_info_.back(); }
+bool ChromeLatencyInfo::ParseFromArray(const void* raw, size_t size) {
+  component_info_.clear();
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* trace_id */:
+        field.get(&trace_id_);
+        break;
+      case 2 /* step */:
+        field.get(&step_);
+        break;
+      case 3 /* frame_tree_node_id */:
+        field.get(&frame_tree_node_id_);
+        break;
+      case 4 /* component_info */:
+        component_info_.emplace_back();
+        component_info_.back().ParseFromArray(field.data(), field.size());
+        break;
+      case 5 /* is_coalesced */:
+        field.get(&is_coalesced_);
+        break;
+      case 6 /* gesture_scroll_id */:
+        field.get(&gesture_scroll_id_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string ChromeLatencyInfo::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> ChromeLatencyInfo::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void ChromeLatencyInfo::Serialize(::protozero::Message* msg) const {
+  // Field 1: trace_id
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, trace_id_);
+  }
+
+  // Field 2: step
+  if (_has_field_[2]) {
+    msg->AppendVarInt(2, step_);
+  }
+
+  // Field 3: frame_tree_node_id
+  if (_has_field_[3]) {
+    msg->AppendVarInt(3, frame_tree_node_id_);
+  }
+
+  // Field 4: component_info
+  for (auto& it : component_info_) {
+    it.Serialize(msg->BeginNestedMessage<::protozero::Message>(4));
+  }
+
+  // Field 5: is_coalesced
+  if (_has_field_[5]) {
+    msg->AppendTinyVarInt(5, is_coalesced_);
+  }
+
+  // Field 6: gesture_scroll_id
+  if (_has_field_[6]) {
+    msg->AppendVarInt(6, gesture_scroll_id_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+ChromeLatencyInfo_ComponentInfo::ChromeLatencyInfo_ComponentInfo() = default;
+ChromeLatencyInfo_ComponentInfo::~ChromeLatencyInfo_ComponentInfo() = default;
+ChromeLatencyInfo_ComponentInfo::ChromeLatencyInfo_ComponentInfo(const ChromeLatencyInfo_ComponentInfo&) = default;
+ChromeLatencyInfo_ComponentInfo& ChromeLatencyInfo_ComponentInfo::operator=(const ChromeLatencyInfo_ComponentInfo&) = default;
+ChromeLatencyInfo_ComponentInfo::ChromeLatencyInfo_ComponentInfo(ChromeLatencyInfo_ComponentInfo&&) noexcept = default;
+ChromeLatencyInfo_ComponentInfo& ChromeLatencyInfo_ComponentInfo::operator=(ChromeLatencyInfo_ComponentInfo&&) = default;
+
+bool ChromeLatencyInfo_ComponentInfo::operator==(const ChromeLatencyInfo_ComponentInfo& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && component_type_ == other.component_type_
+   && time_us_ == other.time_us_;
+}
+
+bool ChromeLatencyInfo_ComponentInfo::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* component_type */:
+        field.get(&component_type_);
+        break;
+      case 2 /* time_us */:
+        field.get(&time_us_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string ChromeLatencyInfo_ComponentInfo::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> ChromeLatencyInfo_ComponentInfo::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void ChromeLatencyInfo_ComponentInfo::Serialize(::protozero::Message* msg) const {
+  // Field 1: component_type
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, component_type_);
+  }
+
+  // Field 2: time_us
+  if (_has_field_[2]) {
+    msg->AppendVarInt(2, time_us_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_legacy_ipc.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_legacy_ipc.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_LEGACY_IPC_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_LEGACY_IPC_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class ChromeLegacyIpc;
+enum ChromeLegacyIpc_MessageClass : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum ChromeLegacyIpc_MessageClass : int {
+  ChromeLegacyIpc_MessageClass_CLASS_UNSPECIFIED = 0,
+  ChromeLegacyIpc_MessageClass_CLASS_AUTOMATION = 1,
+  ChromeLegacyIpc_MessageClass_CLASS_FRAME = 2,
+  ChromeLegacyIpc_MessageClass_CLASS_PAGE = 3,
+  ChromeLegacyIpc_MessageClass_CLASS_VIEW = 4,
+  ChromeLegacyIpc_MessageClass_CLASS_WIDGET = 5,
+  ChromeLegacyIpc_MessageClass_CLASS_INPUT = 6,
+  ChromeLegacyIpc_MessageClass_CLASS_TEST = 7,
+  ChromeLegacyIpc_MessageClass_CLASS_WORKER = 8,
+  ChromeLegacyIpc_MessageClass_CLASS_NACL = 9,
+  ChromeLegacyIpc_MessageClass_CLASS_GPU_CHANNEL = 10,
+  ChromeLegacyIpc_MessageClass_CLASS_MEDIA = 11,
+  ChromeLegacyIpc_MessageClass_CLASS_PPAPI = 12,
+  ChromeLegacyIpc_MessageClass_CLASS_CHROME = 13,
+  ChromeLegacyIpc_MessageClass_CLASS_DRAG = 14,
+  ChromeLegacyIpc_MessageClass_CLASS_PRINT = 15,
+  ChromeLegacyIpc_MessageClass_CLASS_EXTENSION = 16,
+  ChromeLegacyIpc_MessageClass_CLASS_TEXT_INPUT_CLIENT = 17,
+  ChromeLegacyIpc_MessageClass_CLASS_BLINK_TEST = 18,
+  ChromeLegacyIpc_MessageClass_CLASS_ACCESSIBILITY = 19,
+  ChromeLegacyIpc_MessageClass_CLASS_PRERENDER = 20,
+  ChromeLegacyIpc_MessageClass_CLASS_CHROMOTING = 21,
+  ChromeLegacyIpc_MessageClass_CLASS_BROWSER_PLUGIN = 22,
+  ChromeLegacyIpc_MessageClass_CLASS_ANDROID_WEB_VIEW = 23,
+  ChromeLegacyIpc_MessageClass_CLASS_NACL_HOST = 24,
+  ChromeLegacyIpc_MessageClass_CLASS_ENCRYPTED_MEDIA = 25,
+  ChromeLegacyIpc_MessageClass_CLASS_CAST = 26,
+  ChromeLegacyIpc_MessageClass_CLASS_GIN_JAVA_BRIDGE = 27,
+  ChromeLegacyIpc_MessageClass_CLASS_CHROME_UTILITY_PRINTING = 28,
+  ChromeLegacyIpc_MessageClass_CLASS_OZONE_GPU = 29,
+  ChromeLegacyIpc_MessageClass_CLASS_WEB_TEST = 30,
+  ChromeLegacyIpc_MessageClass_CLASS_NETWORK_HINTS = 31,
+  ChromeLegacyIpc_MessageClass_CLASS_EXTENSIONS_GUEST_VIEW = 32,
+  ChromeLegacyIpc_MessageClass_CLASS_GUEST_VIEW = 33,
+  ChromeLegacyIpc_MessageClass_CLASS_MEDIA_PLAYER_DELEGATE = 34,
+  ChromeLegacyIpc_MessageClass_CLASS_EXTENSION_WORKER = 35,
+  ChromeLegacyIpc_MessageClass_CLASS_SUBRESOURCE_FILTER = 36,
+  ChromeLegacyIpc_MessageClass_CLASS_UNFREEZABLE_FRAME = 37,
+};
+
+class PERFETTO_EXPORT ChromeLegacyIpc : public ::protozero::CppMessageObj {
+ public:
+  using MessageClass = ChromeLegacyIpc_MessageClass;
+  static constexpr auto CLASS_UNSPECIFIED = ChromeLegacyIpc_MessageClass_CLASS_UNSPECIFIED;
+  static constexpr auto CLASS_AUTOMATION = ChromeLegacyIpc_MessageClass_CLASS_AUTOMATION;
+  static constexpr auto CLASS_FRAME = ChromeLegacyIpc_MessageClass_CLASS_FRAME;
+  static constexpr auto CLASS_PAGE = ChromeLegacyIpc_MessageClass_CLASS_PAGE;
+  static constexpr auto CLASS_VIEW = ChromeLegacyIpc_MessageClass_CLASS_VIEW;
+  static constexpr auto CLASS_WIDGET = ChromeLegacyIpc_MessageClass_CLASS_WIDGET;
+  static constexpr auto CLASS_INPUT = ChromeLegacyIpc_MessageClass_CLASS_INPUT;
+  static constexpr auto CLASS_TEST = ChromeLegacyIpc_MessageClass_CLASS_TEST;
+  static constexpr auto CLASS_WORKER = ChromeLegacyIpc_MessageClass_CLASS_WORKER;
+  static constexpr auto CLASS_NACL = ChromeLegacyIpc_MessageClass_CLASS_NACL;
+  static constexpr auto CLASS_GPU_CHANNEL = ChromeLegacyIpc_MessageClass_CLASS_GPU_CHANNEL;
+  static constexpr auto CLASS_MEDIA = ChromeLegacyIpc_MessageClass_CLASS_MEDIA;
+  static constexpr auto CLASS_PPAPI = ChromeLegacyIpc_MessageClass_CLASS_PPAPI;
+  static constexpr auto CLASS_CHROME = ChromeLegacyIpc_MessageClass_CLASS_CHROME;
+  static constexpr auto CLASS_DRAG = ChromeLegacyIpc_MessageClass_CLASS_DRAG;
+  static constexpr auto CLASS_PRINT = ChromeLegacyIpc_MessageClass_CLASS_PRINT;
+  static constexpr auto CLASS_EXTENSION = ChromeLegacyIpc_MessageClass_CLASS_EXTENSION;
+  static constexpr auto CLASS_TEXT_INPUT_CLIENT = ChromeLegacyIpc_MessageClass_CLASS_TEXT_INPUT_CLIENT;
+  static constexpr auto CLASS_BLINK_TEST = ChromeLegacyIpc_MessageClass_CLASS_BLINK_TEST;
+  static constexpr auto CLASS_ACCESSIBILITY = ChromeLegacyIpc_MessageClass_CLASS_ACCESSIBILITY;
+  static constexpr auto CLASS_PRERENDER = ChromeLegacyIpc_MessageClass_CLASS_PRERENDER;
+  static constexpr auto CLASS_CHROMOTING = ChromeLegacyIpc_MessageClass_CLASS_CHROMOTING;
+  static constexpr auto CLASS_BROWSER_PLUGIN = ChromeLegacyIpc_MessageClass_CLASS_BROWSER_PLUGIN;
+  static constexpr auto CLASS_ANDROID_WEB_VIEW = ChromeLegacyIpc_MessageClass_CLASS_ANDROID_WEB_VIEW;
+  static constexpr auto CLASS_NACL_HOST = ChromeLegacyIpc_MessageClass_CLASS_NACL_HOST;
+  static constexpr auto CLASS_ENCRYPTED_MEDIA = ChromeLegacyIpc_MessageClass_CLASS_ENCRYPTED_MEDIA;
+  static constexpr auto CLASS_CAST = ChromeLegacyIpc_MessageClass_CLASS_CAST;
+  static constexpr auto CLASS_GIN_JAVA_BRIDGE = ChromeLegacyIpc_MessageClass_CLASS_GIN_JAVA_BRIDGE;
+  static constexpr auto CLASS_CHROME_UTILITY_PRINTING = ChromeLegacyIpc_MessageClass_CLASS_CHROME_UTILITY_PRINTING;
+  static constexpr auto CLASS_OZONE_GPU = ChromeLegacyIpc_MessageClass_CLASS_OZONE_GPU;
+  static constexpr auto CLASS_WEB_TEST = ChromeLegacyIpc_MessageClass_CLASS_WEB_TEST;
+  static constexpr auto CLASS_NETWORK_HINTS = ChromeLegacyIpc_MessageClass_CLASS_NETWORK_HINTS;
+  static constexpr auto CLASS_EXTENSIONS_GUEST_VIEW = ChromeLegacyIpc_MessageClass_CLASS_EXTENSIONS_GUEST_VIEW;
+  static constexpr auto CLASS_GUEST_VIEW = ChromeLegacyIpc_MessageClass_CLASS_GUEST_VIEW;
+  static constexpr auto CLASS_MEDIA_PLAYER_DELEGATE = ChromeLegacyIpc_MessageClass_CLASS_MEDIA_PLAYER_DELEGATE;
+  static constexpr auto CLASS_EXTENSION_WORKER = ChromeLegacyIpc_MessageClass_CLASS_EXTENSION_WORKER;
+  static constexpr auto CLASS_SUBRESOURCE_FILTER = ChromeLegacyIpc_MessageClass_CLASS_SUBRESOURCE_FILTER;
+  static constexpr auto CLASS_UNFREEZABLE_FRAME = ChromeLegacyIpc_MessageClass_CLASS_UNFREEZABLE_FRAME;
+  static constexpr auto MessageClass_MIN = ChromeLegacyIpc_MessageClass_CLASS_UNSPECIFIED;
+  static constexpr auto MessageClass_MAX = ChromeLegacyIpc_MessageClass_CLASS_UNFREEZABLE_FRAME;
+  enum FieldNumbers {
+    kMessageClassFieldNumber = 1,
+    kMessageLineFieldNumber = 2,
+  };
+
+  ChromeLegacyIpc();
+  ~ChromeLegacyIpc() override;
+  ChromeLegacyIpc(ChromeLegacyIpc&&) noexcept;
+  ChromeLegacyIpc& operator=(ChromeLegacyIpc&&);
+  ChromeLegacyIpc(const ChromeLegacyIpc&);
+  ChromeLegacyIpc& operator=(const ChromeLegacyIpc&);
+  bool operator==(const ChromeLegacyIpc&) const;
+  bool operator!=(const ChromeLegacyIpc& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_message_class() const { return _has_field_[1]; }
+  ChromeLegacyIpc_MessageClass message_class() const { return message_class_; }
+  void set_message_class(ChromeLegacyIpc_MessageClass value) { message_class_ = value; _has_field_.set(1); }
+
+  bool has_message_line() const { return _has_field_[2]; }
+  uint32_t message_line() const { return message_line_; }
+  void set_message_line(uint32_t value) { message_line_ = value; _has_field_.set(2); }
+
+ private:
+  ChromeLegacyIpc_MessageClass message_class_{};
+  uint32_t message_line_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_LEGACY_IPC_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_legacy_ipc.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+ChromeLegacyIpc::ChromeLegacyIpc() = default;
+ChromeLegacyIpc::~ChromeLegacyIpc() = default;
+ChromeLegacyIpc::ChromeLegacyIpc(const ChromeLegacyIpc&) = default;
+ChromeLegacyIpc& ChromeLegacyIpc::operator=(const ChromeLegacyIpc&) = default;
+ChromeLegacyIpc::ChromeLegacyIpc(ChromeLegacyIpc&&) noexcept = default;
+ChromeLegacyIpc& ChromeLegacyIpc::operator=(ChromeLegacyIpc&&) = default;
+
+bool ChromeLegacyIpc::operator==(const ChromeLegacyIpc& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && message_class_ == other.message_class_
+   && message_line_ == other.message_line_;
+}
+
+bool ChromeLegacyIpc::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* message_class */:
+        field.get(&message_class_);
+        break;
+      case 2 /* message_line */:
+        field.get(&message_line_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string ChromeLegacyIpc::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> ChromeLegacyIpc::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void ChromeLegacyIpc::Serialize(::protozero::Message* msg) const {
+  // Field 1: message_class
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, message_class_);
+  }
+
+  // Field 2: message_line
+  if (_has_field_[2]) {
+    msg->AppendVarInt(2, message_line_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_message_pump.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_message_pump.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_MESSAGE_PUMP_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_MESSAGE_PUMP_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class ChromeMessagePump;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT ChromeMessagePump : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kSentMessagesInQueueFieldNumber = 1,
+    kIoHandlerLocationIidFieldNumber = 2,
+  };
+
+  ChromeMessagePump();
+  ~ChromeMessagePump() override;
+  ChromeMessagePump(ChromeMessagePump&&) noexcept;
+  ChromeMessagePump& operator=(ChromeMessagePump&&);
+  ChromeMessagePump(const ChromeMessagePump&);
+  ChromeMessagePump& operator=(const ChromeMessagePump&);
+  bool operator==(const ChromeMessagePump&) const;
+  bool operator!=(const ChromeMessagePump& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_sent_messages_in_queue() const { return _has_field_[1]; }
+  bool sent_messages_in_queue() const { return sent_messages_in_queue_; }
+  void set_sent_messages_in_queue(bool value) { sent_messages_in_queue_ = value; _has_field_.set(1); }
+
+  bool has_io_handler_location_iid() const { return _has_field_[2]; }
+  uint64_t io_handler_location_iid() const { return io_handler_location_iid_; }
+  void set_io_handler_location_iid(uint64_t value) { io_handler_location_iid_ = value; _has_field_.set(2); }
+
+ private:
+  bool sent_messages_in_queue_{};
+  uint64_t io_handler_location_iid_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_MESSAGE_PUMP_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_message_pump.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+ChromeMessagePump::ChromeMessagePump() = default;
+ChromeMessagePump::~ChromeMessagePump() = default;
+ChromeMessagePump::ChromeMessagePump(const ChromeMessagePump&) = default;
+ChromeMessagePump& ChromeMessagePump::operator=(const ChromeMessagePump&) = default;
+ChromeMessagePump::ChromeMessagePump(ChromeMessagePump&&) noexcept = default;
+ChromeMessagePump& ChromeMessagePump::operator=(ChromeMessagePump&&) = default;
+
+bool ChromeMessagePump::operator==(const ChromeMessagePump& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && sent_messages_in_queue_ == other.sent_messages_in_queue_
+   && io_handler_location_iid_ == other.io_handler_location_iid_;
+}
+
+bool ChromeMessagePump::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* sent_messages_in_queue */:
+        field.get(&sent_messages_in_queue_);
+        break;
+      case 2 /* io_handler_location_iid */:
+        field.get(&io_handler_location_iid_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string ChromeMessagePump::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> ChromeMessagePump::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void ChromeMessagePump::Serialize(::protozero::Message* msg) const {
+  // Field 1: sent_messages_in_queue
+  if (_has_field_[1]) {
+    msg->AppendTinyVarInt(1, sent_messages_in_queue_);
+  }
+
+  // Field 2: io_handler_location_iid
+  if (_has_field_[2]) {
+    msg->AppendVarInt(2, io_handler_location_iid_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_mojo_event_info.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_mojo_event_info.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_MOJO_EVENT_INFO_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_MOJO_EVENT_INFO_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class ChromeMojoEventInfo;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT ChromeMojoEventInfo : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kWatcherNotifyInterfaceTagFieldNumber = 1,
+    kIpcHashFieldNumber = 2,
+    kMojoInterfaceTagFieldNumber = 3,
+  };
+
+  ChromeMojoEventInfo();
+  ~ChromeMojoEventInfo() override;
+  ChromeMojoEventInfo(ChromeMojoEventInfo&&) noexcept;
+  ChromeMojoEventInfo& operator=(ChromeMojoEventInfo&&);
+  ChromeMojoEventInfo(const ChromeMojoEventInfo&);
+  ChromeMojoEventInfo& operator=(const ChromeMojoEventInfo&);
+  bool operator==(const ChromeMojoEventInfo&) const;
+  bool operator!=(const ChromeMojoEventInfo& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_watcher_notify_interface_tag() const { return _has_field_[1]; }
+  const std::string& watcher_notify_interface_tag() const { return watcher_notify_interface_tag_; }
+  void set_watcher_notify_interface_tag(const std::string& value) { watcher_notify_interface_tag_ = value; _has_field_.set(1); }
+
+  bool has_ipc_hash() const { return _has_field_[2]; }
+  uint32_t ipc_hash() const { return ipc_hash_; }
+  void set_ipc_hash(uint32_t value) { ipc_hash_ = value; _has_field_.set(2); }
+
+  bool has_mojo_interface_tag() const { return _has_field_[3]; }
+  const std::string& mojo_interface_tag() const { return mojo_interface_tag_; }
+  void set_mojo_interface_tag(const std::string& value) { mojo_interface_tag_ = value; _has_field_.set(3); }
+
+ private:
+  std::string watcher_notify_interface_tag_{};
+  uint32_t ipc_hash_{};
+  std::string mojo_interface_tag_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<4> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_MOJO_EVENT_INFO_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_mojo_event_info.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+ChromeMojoEventInfo::ChromeMojoEventInfo() = default;
+ChromeMojoEventInfo::~ChromeMojoEventInfo() = default;
+ChromeMojoEventInfo::ChromeMojoEventInfo(const ChromeMojoEventInfo&) = default;
+ChromeMojoEventInfo& ChromeMojoEventInfo::operator=(const ChromeMojoEventInfo&) = default;
+ChromeMojoEventInfo::ChromeMojoEventInfo(ChromeMojoEventInfo&&) noexcept = default;
+ChromeMojoEventInfo& ChromeMojoEventInfo::operator=(ChromeMojoEventInfo&&) = default;
+
+bool ChromeMojoEventInfo::operator==(const ChromeMojoEventInfo& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && watcher_notify_interface_tag_ == other.watcher_notify_interface_tag_
+   && ipc_hash_ == other.ipc_hash_
+   && mojo_interface_tag_ == other.mojo_interface_tag_;
+}
+
+bool ChromeMojoEventInfo::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* watcher_notify_interface_tag */:
+        field.get(&watcher_notify_interface_tag_);
+        break;
+      case 2 /* ipc_hash */:
+        field.get(&ipc_hash_);
+        break;
+      case 3 /* mojo_interface_tag */:
+        field.get(&mojo_interface_tag_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string ChromeMojoEventInfo::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> ChromeMojoEventInfo::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void ChromeMojoEventInfo::Serialize(::protozero::Message* msg) const {
+  // Field 1: watcher_notify_interface_tag
+  if (_has_field_[1]) {
+    msg->AppendString(1, watcher_notify_interface_tag_);
+  }
+
+  // Field 2: ipc_hash
+  if (_has_field_[2]) {
+    msg->AppendVarInt(2, ipc_hash_);
+  }
+
+  // Field 3: mojo_interface_tag
+  if (_has_field_[3]) {
+    msg->AppendString(3, mojo_interface_tag_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_process_descriptor.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_process_descriptor.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_PROCESS_DESCRIPTOR_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_PROCESS_DESCRIPTOR_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class ChromeProcessDescriptor;
+enum ChromeProcessDescriptor_ProcessType : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum ChromeProcessDescriptor_ProcessType : int {
+  ChromeProcessDescriptor_ProcessType_PROCESS_UNSPECIFIED = 0,
+  ChromeProcessDescriptor_ProcessType_PROCESS_BROWSER = 1,
+  ChromeProcessDescriptor_ProcessType_PROCESS_RENDERER = 2,
+  ChromeProcessDescriptor_ProcessType_PROCESS_UTILITY = 3,
+  ChromeProcessDescriptor_ProcessType_PROCESS_ZYGOTE = 4,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SANDBOX_HELPER = 5,
+  ChromeProcessDescriptor_ProcessType_PROCESS_GPU = 6,
+  ChromeProcessDescriptor_ProcessType_PROCESS_PPAPI_PLUGIN = 7,
+  ChromeProcessDescriptor_ProcessType_PROCESS_PPAPI_BROKER = 8,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_NETWORK = 9,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_TRACING = 10,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_STORAGE = 11,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_AUDIO = 12,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_DATA_DECODER = 13,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_UTIL_WIN = 14,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_PROXY_RESOLVER = 15,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_CDM = 16,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_VIDEO_CAPTURE = 17,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_UNZIPPER = 18,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_MIRRORING = 19,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_FILEPATCHER = 20,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_TTS = 21,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_PRINTING = 22,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_QUARANTINE = 23,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_CROS_LOCALSEARCH = 24,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_CROS_ASSISTANT_AUDIO_DECODER = 25,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_FILEUTIL = 26,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_PRINTCOMPOSITOR = 27,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_PAINTPREVIEW = 28,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_SPEECHRECOGNITION = 29,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_XRDEVICE = 30,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_READICON = 31,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_LANGUAGEDETECTION = 32,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_SHARING = 33,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_MEDIAPARSER = 34,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_QRCODEGENERATOR = 35,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_PROFILEIMPORT = 36,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_IME = 37,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_RECORDING = 38,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_SHAPEDETECTION = 39,
+};
+
+class PERFETTO_EXPORT ChromeProcessDescriptor : public ::protozero::CppMessageObj {
+ public:
+  using ProcessType = ChromeProcessDescriptor_ProcessType;
+  static constexpr auto PROCESS_UNSPECIFIED = ChromeProcessDescriptor_ProcessType_PROCESS_UNSPECIFIED;
+  static constexpr auto PROCESS_BROWSER = ChromeProcessDescriptor_ProcessType_PROCESS_BROWSER;
+  static constexpr auto PROCESS_RENDERER = ChromeProcessDescriptor_ProcessType_PROCESS_RENDERER;
+  static constexpr auto PROCESS_UTILITY = ChromeProcessDescriptor_ProcessType_PROCESS_UTILITY;
+  static constexpr auto PROCESS_ZYGOTE = ChromeProcessDescriptor_ProcessType_PROCESS_ZYGOTE;
+  static constexpr auto PROCESS_SANDBOX_HELPER = ChromeProcessDescriptor_ProcessType_PROCESS_SANDBOX_HELPER;
+  static constexpr auto PROCESS_GPU = ChromeProcessDescriptor_ProcessType_PROCESS_GPU;
+  static constexpr auto PROCESS_PPAPI_PLUGIN = ChromeProcessDescriptor_ProcessType_PROCESS_PPAPI_PLUGIN;
+  static constexpr auto PROCESS_PPAPI_BROKER = ChromeProcessDescriptor_ProcessType_PROCESS_PPAPI_BROKER;
+  static constexpr auto PROCESS_SERVICE_NETWORK = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_NETWORK;
+  static constexpr auto PROCESS_SERVICE_TRACING = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_TRACING;
+  static constexpr auto PROCESS_SERVICE_STORAGE = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_STORAGE;
+  static constexpr auto PROCESS_SERVICE_AUDIO = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_AUDIO;
+  static constexpr auto PROCESS_SERVICE_DATA_DECODER = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_DATA_DECODER;
+  static constexpr auto PROCESS_SERVICE_UTIL_WIN = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_UTIL_WIN;
+  static constexpr auto PROCESS_SERVICE_PROXY_RESOLVER = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_PROXY_RESOLVER;
+  static constexpr auto PROCESS_SERVICE_CDM = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_CDM;
+  static constexpr auto PROCESS_SERVICE_VIDEO_CAPTURE = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_VIDEO_CAPTURE;
+  static constexpr auto PROCESS_SERVICE_UNZIPPER = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_UNZIPPER;
+  static constexpr auto PROCESS_SERVICE_MIRRORING = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_MIRRORING;
+  static constexpr auto PROCESS_SERVICE_FILEPATCHER = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_FILEPATCHER;
+  static constexpr auto PROCESS_SERVICE_TTS = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_TTS;
+  static constexpr auto PROCESS_SERVICE_PRINTING = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_PRINTING;
+  static constexpr auto PROCESS_SERVICE_QUARANTINE = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_QUARANTINE;
+  static constexpr auto PROCESS_SERVICE_CROS_LOCALSEARCH = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_CROS_LOCALSEARCH;
+  static constexpr auto PROCESS_SERVICE_CROS_ASSISTANT_AUDIO_DECODER = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_CROS_ASSISTANT_AUDIO_DECODER;
+  static constexpr auto PROCESS_SERVICE_FILEUTIL = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_FILEUTIL;
+  static constexpr auto PROCESS_SERVICE_PRINTCOMPOSITOR = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_PRINTCOMPOSITOR;
+  static constexpr auto PROCESS_SERVICE_PAINTPREVIEW = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_PAINTPREVIEW;
+  static constexpr auto PROCESS_SERVICE_SPEECHRECOGNITION = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_SPEECHRECOGNITION;
+  static constexpr auto PROCESS_SERVICE_XRDEVICE = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_XRDEVICE;
+  static constexpr auto PROCESS_SERVICE_READICON = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_READICON;
+  static constexpr auto PROCESS_SERVICE_LANGUAGEDETECTION = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_LANGUAGEDETECTION;
+  static constexpr auto PROCESS_SERVICE_SHARING = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_SHARING;
+  static constexpr auto PROCESS_SERVICE_MEDIAPARSER = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_MEDIAPARSER;
+  static constexpr auto PROCESS_SERVICE_QRCODEGENERATOR = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_QRCODEGENERATOR;
+  static constexpr auto PROCESS_SERVICE_PROFILEIMPORT = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_PROFILEIMPORT;
+  static constexpr auto PROCESS_SERVICE_IME = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_IME;
+  static constexpr auto PROCESS_SERVICE_RECORDING = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_RECORDING;
+  static constexpr auto PROCESS_SERVICE_SHAPEDETECTION = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_SHAPEDETECTION;
+  static constexpr auto ProcessType_MIN = ChromeProcessDescriptor_ProcessType_PROCESS_UNSPECIFIED;
+  static constexpr auto ProcessType_MAX = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_SHAPEDETECTION;
+  enum FieldNumbers {
+    kProcessTypeFieldNumber = 1,
+    kProcessPriorityFieldNumber = 2,
+    kLegacySortIndexFieldNumber = 3,
+    kHostAppPackageNameFieldNumber = 4,
+    kCrashTraceIdFieldNumber = 5,
+  };
+
+  ChromeProcessDescriptor();
+  ~ChromeProcessDescriptor() override;
+  ChromeProcessDescriptor(ChromeProcessDescriptor&&) noexcept;
+  ChromeProcessDescriptor& operator=(ChromeProcessDescriptor&&);
+  ChromeProcessDescriptor(const ChromeProcessDescriptor&);
+  ChromeProcessDescriptor& operator=(const ChromeProcessDescriptor&);
+  bool operator==(const ChromeProcessDescriptor&) const;
+  bool operator!=(const ChromeProcessDescriptor& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_process_type() const { return _has_field_[1]; }
+  ChromeProcessDescriptor_ProcessType process_type() const { return process_type_; }
+  void set_process_type(ChromeProcessDescriptor_ProcessType value) { process_type_ = value; _has_field_.set(1); }
+
+  bool has_process_priority() const { return _has_field_[2]; }
+  int32_t process_priority() const { return process_priority_; }
+  void set_process_priority(int32_t value) { process_priority_ = value; _has_field_.set(2); }
+
+  bool has_legacy_sort_index() const { return _has_field_[3]; }
+  int32_t legacy_sort_index() const { return legacy_sort_index_; }
+  void set_legacy_sort_index(int32_t value) { legacy_sort_index_ = value; _has_field_.set(3); }
+
+  bool has_host_app_package_name() const { return _has_field_[4]; }
+  const std::string& host_app_package_name() const { return host_app_package_name_; }
+  void set_host_app_package_name(const std::string& value) { host_app_package_name_ = value; _has_field_.set(4); }
+
+  bool has_crash_trace_id() const { return _has_field_[5]; }
+  uint64_t crash_trace_id() const { return crash_trace_id_; }
+  void set_crash_trace_id(uint64_t value) { crash_trace_id_ = value; _has_field_.set(5); }
+
+ private:
+  ChromeProcessDescriptor_ProcessType process_type_{};
+  int32_t process_priority_{};
+  int32_t legacy_sort_index_{};
+  std::string host_app_package_name_{};
+  uint64_t crash_trace_id_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<6> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_PROCESS_DESCRIPTOR_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_process_descriptor.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+ChromeProcessDescriptor::ChromeProcessDescriptor() = default;
+ChromeProcessDescriptor::~ChromeProcessDescriptor() = default;
+ChromeProcessDescriptor::ChromeProcessDescriptor(const ChromeProcessDescriptor&) = default;
+ChromeProcessDescriptor& ChromeProcessDescriptor::operator=(const ChromeProcessDescriptor&) = default;
+ChromeProcessDescriptor::ChromeProcessDescriptor(ChromeProcessDescriptor&&) noexcept = default;
+ChromeProcessDescriptor& ChromeProcessDescriptor::operator=(ChromeProcessDescriptor&&) = default;
+
+bool ChromeProcessDescriptor::operator==(const ChromeProcessDescriptor& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && process_type_ == other.process_type_
+   && process_priority_ == other.process_priority_
+   && legacy_sort_index_ == other.legacy_sort_index_
+   && host_app_package_name_ == other.host_app_package_name_
+   && crash_trace_id_ == other.crash_trace_id_;
+}
+
+bool ChromeProcessDescriptor::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* process_type */:
+        field.get(&process_type_);
+        break;
+      case 2 /* process_priority */:
+        field.get(&process_priority_);
+        break;
+      case 3 /* legacy_sort_index */:
+        field.get(&legacy_sort_index_);
+        break;
+      case 4 /* host_app_package_name */:
+        field.get(&host_app_package_name_);
+        break;
+      case 5 /* crash_trace_id */:
+        field.get(&crash_trace_id_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string ChromeProcessDescriptor::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> ChromeProcessDescriptor::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void ChromeProcessDescriptor::Serialize(::protozero::Message* msg) const {
+  // Field 1: process_type
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, process_type_);
+  }
+
+  // Field 2: process_priority
+  if (_has_field_[2]) {
+    msg->AppendVarInt(2, process_priority_);
+  }
+
+  // Field 3: legacy_sort_index
+  if (_has_field_[3]) {
+    msg->AppendVarInt(3, legacy_sort_index_);
+  }
+
+  // Field 4: host_app_package_name
+  if (_has_field_[4]) {
+    msg->AppendString(4, host_app_package_name_);
+  }
+
+  // Field 5: crash_trace_id
+  if (_has_field_[5]) {
+    msg->AppendVarInt(5, crash_trace_id_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_renderer_scheduler_state.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_renderer_scheduler_state.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_RENDERER_SCHEDULER_STATE_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_RENDERER_SCHEDULER_STATE_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class ChromeRendererSchedulerState;
+enum ChromeRAILMode : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum ChromeRAILMode : int {
+  RAIL_MODE_NONE = 0,
+  RAIL_MODE_RESPONSE = 1,
+  RAIL_MODE_ANIMATION = 2,
+  RAIL_MODE_IDLE = 3,
+  RAIL_MODE_LOAD = 4,
+};
+
+class PERFETTO_EXPORT ChromeRendererSchedulerState : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kRailModeFieldNumber = 1,
+  };
+
+  ChromeRendererSchedulerState();
+  ~ChromeRendererSchedulerState() override;
+  ChromeRendererSchedulerState(ChromeRendererSchedulerState&&) noexcept;
+  ChromeRendererSchedulerState& operator=(ChromeRendererSchedulerState&&);
+  ChromeRendererSchedulerState(const ChromeRendererSchedulerState&);
+  ChromeRendererSchedulerState& operator=(const ChromeRendererSchedulerState&);
+  bool operator==(const ChromeRendererSchedulerState&) const;
+  bool operator!=(const ChromeRendererSchedulerState& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_rail_mode() const { return _has_field_[1]; }
+  ChromeRAILMode rail_mode() const { return rail_mode_; }
+  void set_rail_mode(ChromeRAILMode value) { rail_mode_ = value; _has_field_.set(1); }
+
+ private:
+  ChromeRAILMode rail_mode_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_RENDERER_SCHEDULER_STATE_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_renderer_scheduler_state.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+ChromeRendererSchedulerState::ChromeRendererSchedulerState() = default;
+ChromeRendererSchedulerState::~ChromeRendererSchedulerState() = default;
+ChromeRendererSchedulerState::ChromeRendererSchedulerState(const ChromeRendererSchedulerState&) = default;
+ChromeRendererSchedulerState& ChromeRendererSchedulerState::operator=(const ChromeRendererSchedulerState&) = default;
+ChromeRendererSchedulerState::ChromeRendererSchedulerState(ChromeRendererSchedulerState&&) noexcept = default;
+ChromeRendererSchedulerState& ChromeRendererSchedulerState::operator=(ChromeRendererSchedulerState&&) = default;
+
+bool ChromeRendererSchedulerState::operator==(const ChromeRendererSchedulerState& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && rail_mode_ == other.rail_mode_;
+}
+
+bool ChromeRendererSchedulerState::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* rail_mode */:
+        field.get(&rail_mode_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string ChromeRendererSchedulerState::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> ChromeRendererSchedulerState::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void ChromeRendererSchedulerState::Serialize(::protozero::Message* msg) const {
+  // Field 1: rail_mode
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, rail_mode_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_thread_descriptor.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_thread_descriptor.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_THREAD_DESCRIPTOR_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_THREAD_DESCRIPTOR_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class ChromeThreadDescriptor;
+enum ChromeThreadDescriptor_ThreadType : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum ChromeThreadDescriptor_ThreadType : int {
+  ChromeThreadDescriptor_ThreadType_THREAD_UNSPECIFIED = 0,
+  ChromeThreadDescriptor_ThreadType_THREAD_MAIN = 1,
+  ChromeThreadDescriptor_ThreadType_THREAD_IO = 2,
+  ChromeThreadDescriptor_ThreadType_THREAD_POOL_BG_WORKER = 3,
+  ChromeThreadDescriptor_ThreadType_THREAD_POOL_FG_WORKER = 4,
+  ChromeThreadDescriptor_ThreadType_THREAD_POOL_FG_BLOCKING = 5,
+  ChromeThreadDescriptor_ThreadType_THREAD_POOL_BG_BLOCKING = 6,
+  ChromeThreadDescriptor_ThreadType_THREAD_POOL_SERVICE = 7,
+  ChromeThreadDescriptor_ThreadType_THREAD_COMPOSITOR = 8,
+  ChromeThreadDescriptor_ThreadType_THREAD_VIZ_COMPOSITOR = 9,
+  ChromeThreadDescriptor_ThreadType_THREAD_COMPOSITOR_WORKER = 10,
+  ChromeThreadDescriptor_ThreadType_THREAD_SERVICE_WORKER = 11,
+  ChromeThreadDescriptor_ThreadType_THREAD_NETWORK_SERVICE = 12,
+  ChromeThreadDescriptor_ThreadType_THREAD_CHILD_IO = 13,
+  ChromeThreadDescriptor_ThreadType_THREAD_BROWSER_IO = 14,
+  ChromeThreadDescriptor_ThreadType_THREAD_BROWSER_MAIN = 15,
+  ChromeThreadDescriptor_ThreadType_THREAD_RENDERER_MAIN = 16,
+  ChromeThreadDescriptor_ThreadType_THREAD_UTILITY_MAIN = 17,
+  ChromeThreadDescriptor_ThreadType_THREAD_GPU_MAIN = 18,
+  ChromeThreadDescriptor_ThreadType_THREAD_CACHE_BLOCKFILE = 19,
+  ChromeThreadDescriptor_ThreadType_THREAD_MEDIA = 20,
+  ChromeThreadDescriptor_ThreadType_THREAD_AUDIO_OUTPUTDEVICE = 21,
+  ChromeThreadDescriptor_ThreadType_THREAD_AUDIO_INPUTDEVICE = 22,
+  ChromeThreadDescriptor_ThreadType_THREAD_GPU_MEMORY = 23,
+  ChromeThreadDescriptor_ThreadType_THREAD_GPU_VSYNC = 24,
+  ChromeThreadDescriptor_ThreadType_THREAD_DXA_VIDEODECODER = 25,
+  ChromeThreadDescriptor_ThreadType_THREAD_BROWSER_WATCHDOG = 26,
+  ChromeThreadDescriptor_ThreadType_THREAD_WEBRTC_NETWORK = 27,
+  ChromeThreadDescriptor_ThreadType_THREAD_WINDOW_OWNER = 28,
+  ChromeThreadDescriptor_ThreadType_THREAD_WEBRTC_SIGNALING = 29,
+  ChromeThreadDescriptor_ThreadType_THREAD_WEBRTC_WORKER = 30,
+  ChromeThreadDescriptor_ThreadType_THREAD_PPAPI_MAIN = 31,
+  ChromeThreadDescriptor_ThreadType_THREAD_GPU_WATCHDOG = 32,
+  ChromeThreadDescriptor_ThreadType_THREAD_SWAPPER = 33,
+  ChromeThreadDescriptor_ThreadType_THREAD_GAMEPAD_POLLING = 34,
+  ChromeThreadDescriptor_ThreadType_THREAD_WEBCRYPTO = 35,
+  ChromeThreadDescriptor_ThreadType_THREAD_DATABASE = 36,
+  ChromeThreadDescriptor_ThreadType_THREAD_PROXYRESOLVER = 37,
+  ChromeThreadDescriptor_ThreadType_THREAD_DEVTOOLSADB = 38,
+  ChromeThreadDescriptor_ThreadType_THREAD_NETWORKCONFIGWATCHER = 39,
+  ChromeThreadDescriptor_ThreadType_THREAD_WASAPI_RENDER = 40,
+  ChromeThreadDescriptor_ThreadType_THREAD_MEMORY_INFRA = 50,
+  ChromeThreadDescriptor_ThreadType_THREAD_SAMPLING_PROFILER = 51,
+};
+
+class PERFETTO_EXPORT ChromeThreadDescriptor : public ::protozero::CppMessageObj {
+ public:
+  using ThreadType = ChromeThreadDescriptor_ThreadType;
+  static constexpr auto THREAD_UNSPECIFIED = ChromeThreadDescriptor_ThreadType_THREAD_UNSPECIFIED;
+  static constexpr auto THREAD_MAIN = ChromeThreadDescriptor_ThreadType_THREAD_MAIN;
+  static constexpr auto THREAD_IO = ChromeThreadDescriptor_ThreadType_THREAD_IO;
+  static constexpr auto THREAD_POOL_BG_WORKER = ChromeThreadDescriptor_ThreadType_THREAD_POOL_BG_WORKER;
+  static constexpr auto THREAD_POOL_FG_WORKER = ChromeThreadDescriptor_ThreadType_THREAD_POOL_FG_WORKER;
+  static constexpr auto THREAD_POOL_FG_BLOCKING = ChromeThreadDescriptor_ThreadType_THREAD_POOL_FG_BLOCKING;
+  static constexpr auto THREAD_POOL_BG_BLOCKING = ChromeThreadDescriptor_ThreadType_THREAD_POOL_BG_BLOCKING;
+  static constexpr auto THREAD_POOL_SERVICE = ChromeThreadDescriptor_ThreadType_THREAD_POOL_SERVICE;
+  static constexpr auto THREAD_COMPOSITOR = ChromeThreadDescriptor_ThreadType_THREAD_COMPOSITOR;
+  static constexpr auto THREAD_VIZ_COMPOSITOR = ChromeThreadDescriptor_ThreadType_THREAD_VIZ_COMPOSITOR;
+  static constexpr auto THREAD_COMPOSITOR_WORKER = ChromeThreadDescriptor_ThreadType_THREAD_COMPOSITOR_WORKER;
+  static constexpr auto THREAD_SERVICE_WORKER = ChromeThreadDescriptor_ThreadType_THREAD_SERVICE_WORKER;
+  static constexpr auto THREAD_NETWORK_SERVICE = ChromeThreadDescriptor_ThreadType_THREAD_NETWORK_SERVICE;
+  static constexpr auto THREAD_CHILD_IO = ChromeThreadDescriptor_ThreadType_THREAD_CHILD_IO;
+  static constexpr auto THREAD_BROWSER_IO = ChromeThreadDescriptor_ThreadType_THREAD_BROWSER_IO;
+  static constexpr auto THREAD_BROWSER_MAIN = ChromeThreadDescriptor_ThreadType_THREAD_BROWSER_MAIN;
+  static constexpr auto THREAD_RENDERER_MAIN = ChromeThreadDescriptor_ThreadType_THREAD_RENDERER_MAIN;
+  static constexpr auto THREAD_UTILITY_MAIN = ChromeThreadDescriptor_ThreadType_THREAD_UTILITY_MAIN;
+  static constexpr auto THREAD_GPU_MAIN = ChromeThreadDescriptor_ThreadType_THREAD_GPU_MAIN;
+  static constexpr auto THREAD_CACHE_BLOCKFILE = ChromeThreadDescriptor_ThreadType_THREAD_CACHE_BLOCKFILE;
+  static constexpr auto THREAD_MEDIA = ChromeThreadDescriptor_ThreadType_THREAD_MEDIA;
+  static constexpr auto THREAD_AUDIO_OUTPUTDEVICE = ChromeThreadDescriptor_ThreadType_THREAD_AUDIO_OUTPUTDEVICE;
+  static constexpr auto THREAD_AUDIO_INPUTDEVICE = ChromeThreadDescriptor_ThreadType_THREAD_AUDIO_INPUTDEVICE;
+  static constexpr auto THREAD_GPU_MEMORY = ChromeThreadDescriptor_ThreadType_THREAD_GPU_MEMORY;
+  static constexpr auto THREAD_GPU_VSYNC = ChromeThreadDescriptor_ThreadType_THREAD_GPU_VSYNC;
+  static constexpr auto THREAD_DXA_VIDEODECODER = ChromeThreadDescriptor_ThreadType_THREAD_DXA_VIDEODECODER;
+  static constexpr auto THREAD_BROWSER_WATCHDOG = ChromeThreadDescriptor_ThreadType_THREAD_BROWSER_WATCHDOG;
+  static constexpr auto THREAD_WEBRTC_NETWORK = ChromeThreadDescriptor_ThreadType_THREAD_WEBRTC_NETWORK;
+  static constexpr auto THREAD_WINDOW_OWNER = ChromeThreadDescriptor_ThreadType_THREAD_WINDOW_OWNER;
+  static constexpr auto THREAD_WEBRTC_SIGNALING = ChromeThreadDescriptor_ThreadType_THREAD_WEBRTC_SIGNALING;
+  static constexpr auto THREAD_WEBRTC_WORKER = ChromeThreadDescriptor_ThreadType_THREAD_WEBRTC_WORKER;
+  static constexpr auto THREAD_PPAPI_MAIN = ChromeThreadDescriptor_ThreadType_THREAD_PPAPI_MAIN;
+  static constexpr auto THREAD_GPU_WATCHDOG = ChromeThreadDescriptor_ThreadType_THREAD_GPU_WATCHDOG;
+  static constexpr auto THREAD_SWAPPER = ChromeThreadDescriptor_ThreadType_THREAD_SWAPPER;
+  static constexpr auto THREAD_GAMEPAD_POLLING = ChromeThreadDescriptor_ThreadType_THREAD_GAMEPAD_POLLING;
+  static constexpr auto THREAD_WEBCRYPTO = ChromeThreadDescriptor_ThreadType_THREAD_WEBCRYPTO;
+  static constexpr auto THREAD_DATABASE = ChromeThreadDescriptor_ThreadType_THREAD_DATABASE;
+  static constexpr auto THREAD_PROXYRESOLVER = ChromeThreadDescriptor_ThreadType_THREAD_PROXYRESOLVER;
+  static constexpr auto THREAD_DEVTOOLSADB = ChromeThreadDescriptor_ThreadType_THREAD_DEVTOOLSADB;
+  static constexpr auto THREAD_NETWORKCONFIGWATCHER = ChromeThreadDescriptor_ThreadType_THREAD_NETWORKCONFIGWATCHER;
+  static constexpr auto THREAD_WASAPI_RENDER = ChromeThreadDescriptor_ThreadType_THREAD_WASAPI_RENDER;
+  static constexpr auto THREAD_MEMORY_INFRA = ChromeThreadDescriptor_ThreadType_THREAD_MEMORY_INFRA;
+  static constexpr auto THREAD_SAMPLING_PROFILER = ChromeThreadDescriptor_ThreadType_THREAD_SAMPLING_PROFILER;
+  static constexpr auto ThreadType_MIN = ChromeThreadDescriptor_ThreadType_THREAD_UNSPECIFIED;
+  static constexpr auto ThreadType_MAX = ChromeThreadDescriptor_ThreadType_THREAD_SAMPLING_PROFILER;
+  enum FieldNumbers {
+    kThreadTypeFieldNumber = 1,
+    kLegacySortIndexFieldNumber = 2,
+  };
+
+  ChromeThreadDescriptor();
+  ~ChromeThreadDescriptor() override;
+  ChromeThreadDescriptor(ChromeThreadDescriptor&&) noexcept;
+  ChromeThreadDescriptor& operator=(ChromeThreadDescriptor&&);
+  ChromeThreadDescriptor(const ChromeThreadDescriptor&);
+  ChromeThreadDescriptor& operator=(const ChromeThreadDescriptor&);
+  bool operator==(const ChromeThreadDescriptor&) const;
+  bool operator!=(const ChromeThreadDescriptor& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_thread_type() const { return _has_field_[1]; }
+  ChromeThreadDescriptor_ThreadType thread_type() const { return thread_type_; }
+  void set_thread_type(ChromeThreadDescriptor_ThreadType value) { thread_type_ = value; _has_field_.set(1); }
+
+  bool has_legacy_sort_index() const { return _has_field_[2]; }
+  int32_t legacy_sort_index() const { return legacy_sort_index_; }
+  void set_legacy_sort_index(int32_t value) { legacy_sort_index_ = value; _has_field_.set(2); }
+
+ private:
+  ChromeThreadDescriptor_ThreadType thread_type_{};
+  int32_t legacy_sort_index_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_THREAD_DESCRIPTOR_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_thread_descriptor.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+ChromeThreadDescriptor::ChromeThreadDescriptor() = default;
+ChromeThreadDescriptor::~ChromeThreadDescriptor() = default;
+ChromeThreadDescriptor::ChromeThreadDescriptor(const ChromeThreadDescriptor&) = default;
+ChromeThreadDescriptor& ChromeThreadDescriptor::operator=(const ChromeThreadDescriptor&) = default;
+ChromeThreadDescriptor::ChromeThreadDescriptor(ChromeThreadDescriptor&&) noexcept = default;
+ChromeThreadDescriptor& ChromeThreadDescriptor::operator=(ChromeThreadDescriptor&&) = default;
+
+bool ChromeThreadDescriptor::operator==(const ChromeThreadDescriptor& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && thread_type_ == other.thread_type_
+   && legacy_sort_index_ == other.legacy_sort_index_;
+}
+
+bool ChromeThreadDescriptor::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* thread_type */:
+        field.get(&thread_type_);
+        break;
+      case 2 /* legacy_sort_index */:
+        field.get(&legacy_sort_index_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string ChromeThreadDescriptor::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> ChromeThreadDescriptor::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void ChromeThreadDescriptor::Serialize(::protozero::Message* msg) const {
+  // Field 1: thread_type
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, thread_type_);
+  }
+
+  // Field 2: legacy_sort_index
+  if (_has_field_[2]) {
+    msg->AppendVarInt(2, legacy_sort_index_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_user_event.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_user_event.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_USER_EVENT_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_USER_EVENT_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class ChromeUserEvent;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT ChromeUserEvent : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kActionFieldNumber = 1,
+    kActionHashFieldNumber = 2,
+  };
+
+  ChromeUserEvent();
+  ~ChromeUserEvent() override;
+  ChromeUserEvent(ChromeUserEvent&&) noexcept;
+  ChromeUserEvent& operator=(ChromeUserEvent&&);
+  ChromeUserEvent(const ChromeUserEvent&);
+  ChromeUserEvent& operator=(const ChromeUserEvent&);
+  bool operator==(const ChromeUserEvent&) const;
+  bool operator!=(const ChromeUserEvent& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_action() const { return _has_field_[1]; }
+  const std::string& action() const { return action_; }
+  void set_action(const std::string& value) { action_ = value; _has_field_.set(1); }
+
+  bool has_action_hash() const { return _has_field_[2]; }
+  uint64_t action_hash() const { return action_hash_; }
+  void set_action_hash(uint64_t value) { action_hash_ = value; _has_field_.set(2); }
+
+ private:
+  std::string action_{};
+  uint64_t action_hash_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_USER_EVENT_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_user_event.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+ChromeUserEvent::ChromeUserEvent() = default;
+ChromeUserEvent::~ChromeUserEvent() = default;
+ChromeUserEvent::ChromeUserEvent(const ChromeUserEvent&) = default;
+ChromeUserEvent& ChromeUserEvent::operator=(const ChromeUserEvent&) = default;
+ChromeUserEvent::ChromeUserEvent(ChromeUserEvent&&) noexcept = default;
+ChromeUserEvent& ChromeUserEvent::operator=(ChromeUserEvent&&) = default;
+
+bool ChromeUserEvent::operator==(const ChromeUserEvent& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && action_ == other.action_
+   && action_hash_ == other.action_hash_;
+}
+
+bool ChromeUserEvent::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* action */:
+        field.get(&action_);
+        break;
+      case 2 /* action_hash */:
+        field.get(&action_hash_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string ChromeUserEvent::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> ChromeUserEvent::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void ChromeUserEvent::Serialize(::protozero::Message* msg) const {
+  // Field 1: action
+  if (_has_field_[1]) {
+    msg->AppendString(1, action_);
+  }
+
+  // Field 2: action_hash
+  if (_has_field_[2]) {
+    msg->AppendVarInt(2, action_hash_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/chrome_window_handle_event_info.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_window_handle_event_info.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_WINDOW_HANDLE_EVENT_INFO_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_WINDOW_HANDLE_EVENT_INFO_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class ChromeWindowHandleEventInfo;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT ChromeWindowHandleEventInfo : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kDpiFieldNumber = 1,
+    kMessageIdFieldNumber = 2,
+    kHwndPtrFieldNumber = 3,
+  };
+
+  ChromeWindowHandleEventInfo();
+  ~ChromeWindowHandleEventInfo() override;
+  ChromeWindowHandleEventInfo(ChromeWindowHandleEventInfo&&) noexcept;
+  ChromeWindowHandleEventInfo& operator=(ChromeWindowHandleEventInfo&&);
+  ChromeWindowHandleEventInfo(const ChromeWindowHandleEventInfo&);
+  ChromeWindowHandleEventInfo& operator=(const ChromeWindowHandleEventInfo&);
+  bool operator==(const ChromeWindowHandleEventInfo&) const;
+  bool operator!=(const ChromeWindowHandleEventInfo& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_dpi() const { return _has_field_[1]; }
+  uint32_t dpi() const { return dpi_; }
+  void set_dpi(uint32_t value) { dpi_ = value; _has_field_.set(1); }
+
+  bool has_message_id() const { return _has_field_[2]; }
+  uint32_t message_id() const { return message_id_; }
+  void set_message_id(uint32_t value) { message_id_ = value; _has_field_.set(2); }
+
+  bool has_hwnd_ptr() const { return _has_field_[3]; }
+  uint64_t hwnd_ptr() const { return hwnd_ptr_; }
+  void set_hwnd_ptr(uint64_t value) { hwnd_ptr_ = value; _has_field_.set(3); }
+
+ private:
+  uint32_t dpi_{};
+  uint32_t message_id_{};
+  uint64_t hwnd_ptr_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<4> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_WINDOW_HANDLE_EVENT_INFO_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_window_handle_event_info.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+ChromeWindowHandleEventInfo::ChromeWindowHandleEventInfo() = default;
+ChromeWindowHandleEventInfo::~ChromeWindowHandleEventInfo() = default;
+ChromeWindowHandleEventInfo::ChromeWindowHandleEventInfo(const ChromeWindowHandleEventInfo&) = default;
+ChromeWindowHandleEventInfo& ChromeWindowHandleEventInfo::operator=(const ChromeWindowHandleEventInfo&) = default;
+ChromeWindowHandleEventInfo::ChromeWindowHandleEventInfo(ChromeWindowHandleEventInfo&&) noexcept = default;
+ChromeWindowHandleEventInfo& ChromeWindowHandleEventInfo::operator=(ChromeWindowHandleEventInfo&&) = default;
+
+bool ChromeWindowHandleEventInfo::operator==(const ChromeWindowHandleEventInfo& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && dpi_ == other.dpi_
+   && message_id_ == other.message_id_
+   && hwnd_ptr_ == other.hwnd_ptr_;
+}
+
+bool ChromeWindowHandleEventInfo::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* dpi */:
+        field.get(&dpi_);
+        break;
+      case 2 /* message_id */:
+        field.get(&message_id_);
+        break;
+      case 3 /* hwnd_ptr */:
+        field.get(&hwnd_ptr_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string ChromeWindowHandleEventInfo::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> ChromeWindowHandleEventInfo::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void ChromeWindowHandleEventInfo::Serialize(::protozero::Message* msg) const {
+  // Field 1: dpi
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, dpi_);
+  }
+
+  // Field 2: message_id
+  if (_has_field_[2]) {
+    msg->AppendVarInt(2, message_id_);
+  }
+
+  // Field 3: hwnd_ptr
+  if (_has_field_[3]) {
+    msg->AppendFixed(3, hwnd_ptr_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/counter_descriptor.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/counter_descriptor.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_COUNTER_DESCRIPTOR_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_COUNTER_DESCRIPTOR_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class CounterDescriptor;
+enum CounterDescriptor_BuiltinCounterType : int;
+enum CounterDescriptor_Unit : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum CounterDescriptor_BuiltinCounterType : int {
+  CounterDescriptor_BuiltinCounterType_COUNTER_UNSPECIFIED = 0,
+  CounterDescriptor_BuiltinCounterType_COUNTER_THREAD_TIME_NS = 1,
+  CounterDescriptor_BuiltinCounterType_COUNTER_THREAD_INSTRUCTION_COUNT = 2,
+};
+enum CounterDescriptor_Unit : int {
+  CounterDescriptor_Unit_UNIT_UNSPECIFIED = 0,
+  CounterDescriptor_Unit_UNIT_TIME_NS = 1,
+  CounterDescriptor_Unit_UNIT_COUNT = 2,
+  CounterDescriptor_Unit_UNIT_SIZE_BYTES = 3,
+};
+
+class PERFETTO_EXPORT CounterDescriptor : public ::protozero::CppMessageObj {
+ public:
+  using BuiltinCounterType = CounterDescriptor_BuiltinCounterType;
+  static constexpr auto COUNTER_UNSPECIFIED = CounterDescriptor_BuiltinCounterType_COUNTER_UNSPECIFIED;
+  static constexpr auto COUNTER_THREAD_TIME_NS = CounterDescriptor_BuiltinCounterType_COUNTER_THREAD_TIME_NS;
+  static constexpr auto COUNTER_THREAD_INSTRUCTION_COUNT = CounterDescriptor_BuiltinCounterType_COUNTER_THREAD_INSTRUCTION_COUNT;
+  static constexpr auto BuiltinCounterType_MIN = CounterDescriptor_BuiltinCounterType_COUNTER_UNSPECIFIED;
+  static constexpr auto BuiltinCounterType_MAX = CounterDescriptor_BuiltinCounterType_COUNTER_THREAD_INSTRUCTION_COUNT;
+  using Unit = CounterDescriptor_Unit;
+  static constexpr auto UNIT_UNSPECIFIED = CounterDescriptor_Unit_UNIT_UNSPECIFIED;
+  static constexpr auto UNIT_TIME_NS = CounterDescriptor_Unit_UNIT_TIME_NS;
+  static constexpr auto UNIT_COUNT = CounterDescriptor_Unit_UNIT_COUNT;
+  static constexpr auto UNIT_SIZE_BYTES = CounterDescriptor_Unit_UNIT_SIZE_BYTES;
+  static constexpr auto Unit_MIN = CounterDescriptor_Unit_UNIT_UNSPECIFIED;
+  static constexpr auto Unit_MAX = CounterDescriptor_Unit_UNIT_SIZE_BYTES;
+  enum FieldNumbers {
+    kTypeFieldNumber = 1,
+    kCategoriesFieldNumber = 2,
+    kUnitFieldNumber = 3,
+    kUnitNameFieldNumber = 6,
+    kUnitMultiplierFieldNumber = 4,
+    kIsIncrementalFieldNumber = 5,
+  };
+
+  CounterDescriptor();
+  ~CounterDescriptor() override;
+  CounterDescriptor(CounterDescriptor&&) noexcept;
+  CounterDescriptor& operator=(CounterDescriptor&&);
+  CounterDescriptor(const CounterDescriptor&);
+  CounterDescriptor& operator=(const CounterDescriptor&);
+  bool operator==(const CounterDescriptor&) const;
+  bool operator!=(const CounterDescriptor& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_type() const { return _has_field_[1]; }
+  CounterDescriptor_BuiltinCounterType type() const { return type_; }
+  void set_type(CounterDescriptor_BuiltinCounterType value) { type_ = value; _has_field_.set(1); }
+
+  const std::vector<std::string>& categories() const { return categories_; }
+  std::vector<std::string>* mutable_categories() { return &categories_; }
+  int categories_size() const { return static_cast<int>(categories_.size()); }
+  void clear_categories() { categories_.clear(); }
+  void add_categories(std::string value) { categories_.emplace_back(value); }
+  std::string* add_categories() { categories_.emplace_back(); return &categories_.back(); }
+
+  bool has_unit() const { return _has_field_[3]; }
+  CounterDescriptor_Unit unit() const { return unit_; }
+  void set_unit(CounterDescriptor_Unit value) { unit_ = value; _has_field_.set(3); }
+
+  bool has_unit_name() const { return _has_field_[6]; }
+  const std::string& unit_name() const { return unit_name_; }
+  void set_unit_name(const std::string& value) { unit_name_ = value; _has_field_.set(6); }
+
+  bool has_unit_multiplier() const { return _has_field_[4]; }
+  int64_t unit_multiplier() const { return unit_multiplier_; }
+  void set_unit_multiplier(int64_t value) { unit_multiplier_ = value; _has_field_.set(4); }
+
+  bool has_is_incremental() const { return _has_field_[5]; }
+  bool is_incremental() const { return is_incremental_; }
+  void set_is_incremental(bool value) { is_incremental_ = value; _has_field_.set(5); }
+
+ private:
+  CounterDescriptor_BuiltinCounterType type_{};
+  std::vector<std::string> categories_;
+  CounterDescriptor_Unit unit_{};
+  std::string unit_name_{};
+  int64_t unit_multiplier_{};
+  bool is_incremental_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<7> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_COUNTER_DESCRIPTOR_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/counter_descriptor.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+CounterDescriptor::CounterDescriptor() = default;
+CounterDescriptor::~CounterDescriptor() = default;
+CounterDescriptor::CounterDescriptor(const CounterDescriptor&) = default;
+CounterDescriptor& CounterDescriptor::operator=(const CounterDescriptor&) = default;
+CounterDescriptor::CounterDescriptor(CounterDescriptor&&) noexcept = default;
+CounterDescriptor& CounterDescriptor::operator=(CounterDescriptor&&) = default;
+
+bool CounterDescriptor::operator==(const CounterDescriptor& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && type_ == other.type_
+   && categories_ == other.categories_
+   && unit_ == other.unit_
+   && unit_name_ == other.unit_name_
+   && unit_multiplier_ == other.unit_multiplier_
+   && is_incremental_ == other.is_incremental_;
+}
+
+bool CounterDescriptor::ParseFromArray(const void* raw, size_t size) {
+  categories_.clear();
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* type */:
+        field.get(&type_);
+        break;
+      case 2 /* categories */:
+        categories_.emplace_back();
+        field.get(&categories_.back());
+        break;
+      case 3 /* unit */:
+        field.get(&unit_);
+        break;
+      case 6 /* unit_name */:
+        field.get(&unit_name_);
+        break;
+      case 4 /* unit_multiplier */:
+        field.get(&unit_multiplier_);
+        break;
+      case 5 /* is_incremental */:
+        field.get(&is_incremental_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string CounterDescriptor::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> CounterDescriptor::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void CounterDescriptor::Serialize(::protozero::Message* msg) const {
+  // Field 1: type
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, type_);
+  }
+
+  // Field 2: categories
+  for (auto& it : categories_) {
+    msg->AppendString(2, it);
+  }
+
+  // Field 3: unit
+  if (_has_field_[3]) {
+    msg->AppendVarInt(3, unit_);
+  }
+
+  // Field 6: unit_name
+  if (_has_field_[6]) {
+    msg->AppendString(6, unit_name_);
+  }
+
+  // Field 4: unit_multiplier
+  if (_has_field_[4]) {
+    msg->AppendVarInt(4, unit_multiplier_);
+  }
+
+  // Field 5: is_incremental
+  if (_has_field_[5]) {
+    msg->AppendTinyVarInt(5, is_incremental_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/debug_annotation.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/debug_annotation.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_DEBUG_ANNOTATION_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_DEBUG_ANNOTATION_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class DebugAnnotationName;
+class DebugAnnotation;
+class DebugAnnotation_NestedValue;
+enum DebugAnnotation_NestedValue_NestedType : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum DebugAnnotation_NestedValue_NestedType : int {
+  DebugAnnotation_NestedValue_NestedType_UNSPECIFIED = 0,
+  DebugAnnotation_NestedValue_NestedType_DICT = 1,
+  DebugAnnotation_NestedValue_NestedType_ARRAY = 2,
+};
+
+class PERFETTO_EXPORT DebugAnnotationName : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kIidFieldNumber = 1,
+    kNameFieldNumber = 2,
+  };
+
+  DebugAnnotationName();
+  ~DebugAnnotationName() override;
+  DebugAnnotationName(DebugAnnotationName&&) noexcept;
+  DebugAnnotationName& operator=(DebugAnnotationName&&);
+  DebugAnnotationName(const DebugAnnotationName&);
+  DebugAnnotationName& operator=(const DebugAnnotationName&);
+  bool operator==(const DebugAnnotationName&) const;
+  bool operator!=(const DebugAnnotationName& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_iid() const { return _has_field_[1]; }
+  uint64_t iid() const { return iid_; }
+  void set_iid(uint64_t value) { iid_ = value; _has_field_.set(1); }
+
+  bool has_name() const { return _has_field_[2]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(2); }
+
+ private:
+  uint64_t iid_{};
+  std::string name_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT DebugAnnotation : public ::protozero::CppMessageObj {
+ public:
+  using NestedValue = DebugAnnotation_NestedValue;
+  enum FieldNumbers {
+    kNameIidFieldNumber = 1,
+    kNameFieldNumber = 10,
+    kBoolValueFieldNumber = 2,
+    kUintValueFieldNumber = 3,
+    kIntValueFieldNumber = 4,
+    kDoubleValueFieldNumber = 5,
+    kStringValueFieldNumber = 6,
+    kPointerValueFieldNumber = 7,
+    kNestedValueFieldNumber = 8,
+    kLegacyJsonValueFieldNumber = 9,
+    kDictEntriesFieldNumber = 11,
+    kArrayValuesFieldNumber = 12,
+  };
+
+  DebugAnnotation();
+  ~DebugAnnotation() override;
+  DebugAnnotation(DebugAnnotation&&) noexcept;
+  DebugAnnotation& operator=(DebugAnnotation&&);
+  DebugAnnotation(const DebugAnnotation&);
+  DebugAnnotation& operator=(const DebugAnnotation&);
+  bool operator==(const DebugAnnotation&) const;
+  bool operator!=(const DebugAnnotation& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_name_iid() const { return _has_field_[1]; }
+  uint64_t name_iid() const { return name_iid_; }
+  void set_name_iid(uint64_t value) { name_iid_ = value; _has_field_.set(1); }
+
+  bool has_name() const { return _has_field_[10]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(10); }
+
+  bool has_bool_value() const { return _has_field_[2]; }
+  bool bool_value() const { return bool_value_; }
+  void set_bool_value(bool value) { bool_value_ = value; _has_field_.set(2); }
+
+  bool has_uint_value() const { return _has_field_[3]; }
+  uint64_t uint_value() const { return uint_value_; }
+  void set_uint_value(uint64_t value) { uint_value_ = value; _has_field_.set(3); }
+
+  bool has_int_value() const { return _has_field_[4]; }
+  int64_t int_value() const { return int_value_; }
+  void set_int_value(int64_t value) { int_value_ = value; _has_field_.set(4); }
+
+  bool has_double_value() const { return _has_field_[5]; }
+  double double_value() const { return double_value_; }
+  void set_double_value(double value) { double_value_ = value; _has_field_.set(5); }
+
+  bool has_string_value() const { return _has_field_[6]; }
+  const std::string& string_value() const { return string_value_; }
+  void set_string_value(const std::string& value) { string_value_ = value; _has_field_.set(6); }
+
+  bool has_pointer_value() const { return _has_field_[7]; }
+  uint64_t pointer_value() const { return pointer_value_; }
+  void set_pointer_value(uint64_t value) { pointer_value_ = value; _has_field_.set(7); }
+
+  bool has_nested_value() const { return _has_field_[8]; }
+  const DebugAnnotation_NestedValue& nested_value() const { return *nested_value_; }
+  DebugAnnotation_NestedValue* mutable_nested_value() { _has_field_.set(8); return nested_value_.get(); }
+
+  bool has_legacy_json_value() const { return _has_field_[9]; }
+  const std::string& legacy_json_value() const { return legacy_json_value_; }
+  void set_legacy_json_value(const std::string& value) { legacy_json_value_ = value; _has_field_.set(9); }
+
+  const std::vector<DebugAnnotation>& dict_entries() const { return dict_entries_; }
+  std::vector<DebugAnnotation>* mutable_dict_entries() { return &dict_entries_; }
+  int dict_entries_size() const;
+  void clear_dict_entries();
+  DebugAnnotation* add_dict_entries();
+
+  const std::vector<DebugAnnotation>& array_values() const { return array_values_; }
+  std::vector<DebugAnnotation>* mutable_array_values() { return &array_values_; }
+  int array_values_size() const;
+  void clear_array_values();
+  DebugAnnotation* add_array_values();
+
+ private:
+  uint64_t name_iid_{};
+  std::string name_{};
+  bool bool_value_{};
+  uint64_t uint_value_{};
+  int64_t int_value_{};
+  double double_value_{};
+  std::string string_value_{};
+  uint64_t pointer_value_{};
+  ::protozero::CopyablePtr<DebugAnnotation_NestedValue> nested_value_;
+  std::string legacy_json_value_{};
+  std::vector<DebugAnnotation> dict_entries_;
+  std::vector<DebugAnnotation> array_values_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<13> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT DebugAnnotation_NestedValue : public ::protozero::CppMessageObj {
+ public:
+  using NestedType = DebugAnnotation_NestedValue_NestedType;
+  static constexpr auto UNSPECIFIED = DebugAnnotation_NestedValue_NestedType_UNSPECIFIED;
+  static constexpr auto DICT = DebugAnnotation_NestedValue_NestedType_DICT;
+  static constexpr auto ARRAY = DebugAnnotation_NestedValue_NestedType_ARRAY;
+  static constexpr auto NestedType_MIN = DebugAnnotation_NestedValue_NestedType_UNSPECIFIED;
+  static constexpr auto NestedType_MAX = DebugAnnotation_NestedValue_NestedType_ARRAY;
+  enum FieldNumbers {
+    kNestedTypeFieldNumber = 1,
+    kDictKeysFieldNumber = 2,
+    kDictValuesFieldNumber = 3,
+    kArrayValuesFieldNumber = 4,
+    kIntValueFieldNumber = 5,
+    kDoubleValueFieldNumber = 6,
+    kBoolValueFieldNumber = 7,
+    kStringValueFieldNumber = 8,
+  };
+
+  DebugAnnotation_NestedValue();
+  ~DebugAnnotation_NestedValue() override;
+  DebugAnnotation_NestedValue(DebugAnnotation_NestedValue&&) noexcept;
+  DebugAnnotation_NestedValue& operator=(DebugAnnotation_NestedValue&&);
+  DebugAnnotation_NestedValue(const DebugAnnotation_NestedValue&);
+  DebugAnnotation_NestedValue& operator=(const DebugAnnotation_NestedValue&);
+  bool operator==(const DebugAnnotation_NestedValue&) const;
+  bool operator!=(const DebugAnnotation_NestedValue& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_nested_type() const { return _has_field_[1]; }
+  DebugAnnotation_NestedValue_NestedType nested_type() const { return nested_type_; }
+  void set_nested_type(DebugAnnotation_NestedValue_NestedType value) { nested_type_ = value; _has_field_.set(1); }
+
+  const std::vector<std::string>& dict_keys() const { return dict_keys_; }
+  std::vector<std::string>* mutable_dict_keys() { return &dict_keys_; }
+  int dict_keys_size() const { return static_cast<int>(dict_keys_.size()); }
+  void clear_dict_keys() { dict_keys_.clear(); }
+  void add_dict_keys(std::string value) { dict_keys_.emplace_back(value); }
+  std::string* add_dict_keys() { dict_keys_.emplace_back(); return &dict_keys_.back(); }
+
+  const std::vector<DebugAnnotation_NestedValue>& dict_values() const { return dict_values_; }
+  std::vector<DebugAnnotation_NestedValue>* mutable_dict_values() { return &dict_values_; }
+  int dict_values_size() const;
+  void clear_dict_values();
+  DebugAnnotation_NestedValue* add_dict_values();
+
+  const std::vector<DebugAnnotation_NestedValue>& array_values() const { return array_values_; }
+  std::vector<DebugAnnotation_NestedValue>* mutable_array_values() { return &array_values_; }
+  int array_values_size() const;
+  void clear_array_values();
+  DebugAnnotation_NestedValue* add_array_values();
+
+  bool has_int_value() const { return _has_field_[5]; }
+  int64_t int_value() const { return int_value_; }
+  void set_int_value(int64_t value) { int_value_ = value; _has_field_.set(5); }
+
+  bool has_double_value() const { return _has_field_[6]; }
+  double double_value() const { return double_value_; }
+  void set_double_value(double value) { double_value_ = value; _has_field_.set(6); }
+
+  bool has_bool_value() const { return _has_field_[7]; }
+  bool bool_value() const { return bool_value_; }
+  void set_bool_value(bool value) { bool_value_ = value; _has_field_.set(7); }
+
+  bool has_string_value() const { return _has_field_[8]; }
+  const std::string& string_value() const { return string_value_; }
+  void set_string_value(const std::string& value) { string_value_ = value; _has_field_.set(8); }
+
+ private:
+  DebugAnnotation_NestedValue_NestedType nested_type_{};
+  std::vector<std::string> dict_keys_;
+  std::vector<DebugAnnotation_NestedValue> dict_values_;
+  std::vector<DebugAnnotation_NestedValue> array_values_;
+  int64_t int_value_{};
+  double double_value_{};
+  bool bool_value_{};
+  std::string string_value_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<9> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_DEBUG_ANNOTATION_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/debug_annotation.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+DebugAnnotationName::DebugAnnotationName() = default;
+DebugAnnotationName::~DebugAnnotationName() = default;
+DebugAnnotationName::DebugAnnotationName(const DebugAnnotationName&) = default;
+DebugAnnotationName& DebugAnnotationName::operator=(const DebugAnnotationName&) = default;
+DebugAnnotationName::DebugAnnotationName(DebugAnnotationName&&) noexcept = default;
+DebugAnnotationName& DebugAnnotationName::operator=(DebugAnnotationName&&) = default;
+
+bool DebugAnnotationName::operator==(const DebugAnnotationName& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && iid_ == other.iid_
+   && name_ == other.name_;
+}
+
+bool DebugAnnotationName::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* iid */:
+        field.get(&iid_);
+        break;
+      case 2 /* name */:
+        field.get(&name_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string DebugAnnotationName::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> DebugAnnotationName::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void DebugAnnotationName::Serialize(::protozero::Message* msg) const {
+  // Field 1: iid
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, iid_);
+  }
+
+  // Field 2: name
+  if (_has_field_[2]) {
+    msg->AppendString(2, name_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+DebugAnnotation::DebugAnnotation() = default;
+DebugAnnotation::~DebugAnnotation() = default;
+DebugAnnotation::DebugAnnotation(const DebugAnnotation&) = default;
+DebugAnnotation& DebugAnnotation::operator=(const DebugAnnotation&) = default;
+DebugAnnotation::DebugAnnotation(DebugAnnotation&&) noexcept = default;
+DebugAnnotation& DebugAnnotation::operator=(DebugAnnotation&&) = default;
+
+bool DebugAnnotation::operator==(const DebugAnnotation& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && name_iid_ == other.name_iid_
+   && name_ == other.name_
+   && bool_value_ == other.bool_value_
+   && uint_value_ == other.uint_value_
+   && int_value_ == other.int_value_
+   && double_value_ == other.double_value_
+   && string_value_ == other.string_value_
+   && pointer_value_ == other.pointer_value_
+   && nested_value_ == other.nested_value_
+   && legacy_json_value_ == other.legacy_json_value_
+   && dict_entries_ == other.dict_entries_
+   && array_values_ == other.array_values_;
+}
+
+int DebugAnnotation::dict_entries_size() const { return static_cast<int>(dict_entries_.size()); }
+void DebugAnnotation::clear_dict_entries() { dict_entries_.clear(); }
+DebugAnnotation* DebugAnnotation::add_dict_entries() { dict_entries_.emplace_back(); return &dict_entries_.back(); }
+int DebugAnnotation::array_values_size() const { return static_cast<int>(array_values_.size()); }
+void DebugAnnotation::clear_array_values() { array_values_.clear(); }
+DebugAnnotation* DebugAnnotation::add_array_values() { array_values_.emplace_back(); return &array_values_.back(); }
+bool DebugAnnotation::ParseFromArray(const void* raw, size_t size) {
+  dict_entries_.clear();
+  array_values_.clear();
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* name_iid */:
+        field.get(&name_iid_);
+        break;
+      case 10 /* name */:
+        field.get(&name_);
+        break;
+      case 2 /* bool_value */:
+        field.get(&bool_value_);
+        break;
+      case 3 /* uint_value */:
+        field.get(&uint_value_);
+        break;
+      case 4 /* int_value */:
+        field.get(&int_value_);
+        break;
+      case 5 /* double_value */:
+        field.get(&double_value_);
+        break;
+      case 6 /* string_value */:
+        field.get(&string_value_);
+        break;
+      case 7 /* pointer_value */:
+        field.get(&pointer_value_);
+        break;
+      case 8 /* nested_value */:
+        (*nested_value_).ParseFromArray(field.data(), field.size());
+        break;
+      case 9 /* legacy_json_value */:
+        field.get(&legacy_json_value_);
+        break;
+      case 11 /* dict_entries */:
+        dict_entries_.emplace_back();
+        dict_entries_.back().ParseFromArray(field.data(), field.size());
+        break;
+      case 12 /* array_values */:
+        array_values_.emplace_back();
+        array_values_.back().ParseFromArray(field.data(), field.size());
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string DebugAnnotation::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> DebugAnnotation::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void DebugAnnotation::Serialize(::protozero::Message* msg) const {
+  // Field 1: name_iid
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, name_iid_);
+  }
+
+  // Field 10: name
+  if (_has_field_[10]) {
+    msg->AppendString(10, name_);
+  }
+
+  // Field 2: bool_value
+  if (_has_field_[2]) {
+    msg->AppendTinyVarInt(2, bool_value_);
+  }
+
+  // Field 3: uint_value
+  if (_has_field_[3]) {
+    msg->AppendVarInt(3, uint_value_);
+  }
+
+  // Field 4: int_value
+  if (_has_field_[4]) {
+    msg->AppendVarInt(4, int_value_);
+  }
+
+  // Field 5: double_value
+  if (_has_field_[5]) {
+    msg->AppendFixed(5, double_value_);
+  }
+
+  // Field 6: string_value
+  if (_has_field_[6]) {
+    msg->AppendString(6, string_value_);
+  }
+
+  // Field 7: pointer_value
+  if (_has_field_[7]) {
+    msg->AppendVarInt(7, pointer_value_);
+  }
+
+  // Field 8: nested_value
+  if (_has_field_[8]) {
+    (*nested_value_).Serialize(msg->BeginNestedMessage<::protozero::Message>(8));
+  }
+
+  // Field 9: legacy_json_value
+  if (_has_field_[9]) {
+    msg->AppendString(9, legacy_json_value_);
+  }
+
+  // Field 11: dict_entries
+  for (auto& it : dict_entries_) {
+    it.Serialize(msg->BeginNestedMessage<::protozero::Message>(11));
+  }
+
+  // Field 12: array_values
+  for (auto& it : array_values_) {
+    it.Serialize(msg->BeginNestedMessage<::protozero::Message>(12));
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+DebugAnnotation_NestedValue::DebugAnnotation_NestedValue() = default;
+DebugAnnotation_NestedValue::~DebugAnnotation_NestedValue() = default;
+DebugAnnotation_NestedValue::DebugAnnotation_NestedValue(const DebugAnnotation_NestedValue&) = default;
+DebugAnnotation_NestedValue& DebugAnnotation_NestedValue::operator=(const DebugAnnotation_NestedValue&) = default;
+DebugAnnotation_NestedValue::DebugAnnotation_NestedValue(DebugAnnotation_NestedValue&&) noexcept = default;
+DebugAnnotation_NestedValue& DebugAnnotation_NestedValue::operator=(DebugAnnotation_NestedValue&&) = default;
+
+bool DebugAnnotation_NestedValue::operator==(const DebugAnnotation_NestedValue& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && nested_type_ == other.nested_type_
+   && dict_keys_ == other.dict_keys_
+   && dict_values_ == other.dict_values_
+   && array_values_ == other.array_values_
+   && int_value_ == other.int_value_
+   && double_value_ == other.double_value_
+   && bool_value_ == other.bool_value_
+   && string_value_ == other.string_value_;
+}
+
+int DebugAnnotation_NestedValue::dict_values_size() const { return static_cast<int>(dict_values_.size()); }
+void DebugAnnotation_NestedValue::clear_dict_values() { dict_values_.clear(); }
+DebugAnnotation_NestedValue* DebugAnnotation_NestedValue::add_dict_values() { dict_values_.emplace_back(); return &dict_values_.back(); }
+int DebugAnnotation_NestedValue::array_values_size() const { return static_cast<int>(array_values_.size()); }
+void DebugAnnotation_NestedValue::clear_array_values() { array_values_.clear(); }
+DebugAnnotation_NestedValue* DebugAnnotation_NestedValue::add_array_values() { array_values_.emplace_back(); return &array_values_.back(); }
+bool DebugAnnotation_NestedValue::ParseFromArray(const void* raw, size_t size) {
+  dict_keys_.clear();
+  dict_values_.clear();
+  array_values_.clear();
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* nested_type */:
+        field.get(&nested_type_);
+        break;
+      case 2 /* dict_keys */:
+        dict_keys_.emplace_back();
+        field.get(&dict_keys_.back());
+        break;
+      case 3 /* dict_values */:
+        dict_values_.emplace_back();
+        dict_values_.back().ParseFromArray(field.data(), field.size());
+        break;
+      case 4 /* array_values */:
+        array_values_.emplace_back();
+        array_values_.back().ParseFromArray(field.data(), field.size());
+        break;
+      case 5 /* int_value */:
+        field.get(&int_value_);
+        break;
+      case 6 /* double_value */:
+        field.get(&double_value_);
+        break;
+      case 7 /* bool_value */:
+        field.get(&bool_value_);
+        break;
+      case 8 /* string_value */:
+        field.get(&string_value_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string DebugAnnotation_NestedValue::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> DebugAnnotation_NestedValue::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void DebugAnnotation_NestedValue::Serialize(::protozero::Message* msg) const {
+  // Field 1: nested_type
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, nested_type_);
+  }
+
+  // Field 2: dict_keys
+  for (auto& it : dict_keys_) {
+    msg->AppendString(2, it);
+  }
+
+  // Field 3: dict_values
+  for (auto& it : dict_values_) {
+    it.Serialize(msg->BeginNestedMessage<::protozero::Message>(3));
+  }
+
+  // Field 4: array_values
+  for (auto& it : array_values_) {
+    it.Serialize(msg->BeginNestedMessage<::protozero::Message>(4));
+  }
+
+  // Field 5: int_value
+  if (_has_field_[5]) {
+    msg->AppendVarInt(5, int_value_);
+  }
+
+  // Field 6: double_value
+  if (_has_field_[6]) {
+    msg->AppendFixed(6, double_value_);
+  }
+
+  // Field 7: bool_value
+  if (_has_field_[7]) {
+    msg->AppendTinyVarInt(7, bool_value_);
+  }
+
+  // Field 8: string_value
+  if (_has_field_[8]) {
+    msg->AppendString(8, string_value_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/log_message.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/log_message.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_LOG_MESSAGE_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_LOG_MESSAGE_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class LogMessageBody;
+class LogMessage;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT LogMessageBody : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kIidFieldNumber = 1,
+    kBodyFieldNumber = 2,
+  };
+
+  LogMessageBody();
+  ~LogMessageBody() override;
+  LogMessageBody(LogMessageBody&&) noexcept;
+  LogMessageBody& operator=(LogMessageBody&&);
+  LogMessageBody(const LogMessageBody&);
+  LogMessageBody& operator=(const LogMessageBody&);
+  bool operator==(const LogMessageBody&) const;
+  bool operator!=(const LogMessageBody& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_iid() const { return _has_field_[1]; }
+  uint64_t iid() const { return iid_; }
+  void set_iid(uint64_t value) { iid_ = value; _has_field_.set(1); }
+
+  bool has_body() const { return _has_field_[2]; }
+  const std::string& body() const { return body_; }
+  void set_body(const std::string& value) { body_ = value; _has_field_.set(2); }
+
+ private:
+  uint64_t iid_{};
+  std::string body_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT LogMessage : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kSourceLocationIidFieldNumber = 1,
+    kBodyIidFieldNumber = 2,
+  };
+
+  LogMessage();
+  ~LogMessage() override;
+  LogMessage(LogMessage&&) noexcept;
+  LogMessage& operator=(LogMessage&&);
+  LogMessage(const LogMessage&);
+  LogMessage& operator=(const LogMessage&);
+  bool operator==(const LogMessage&) const;
+  bool operator!=(const LogMessage& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_source_location_iid() const { return _has_field_[1]; }
+  uint64_t source_location_iid() const { return source_location_iid_; }
+  void set_source_location_iid(uint64_t value) { source_location_iid_ = value; _has_field_.set(1); }
+
+  bool has_body_iid() const { return _has_field_[2]; }
+  uint64_t body_iid() const { return body_iid_; }
+  void set_body_iid(uint64_t value) { body_iid_ = value; _has_field_.set(2); }
+
+ private:
+  uint64_t source_location_iid_{};
+  uint64_t body_iid_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_LOG_MESSAGE_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/log_message.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+LogMessageBody::LogMessageBody() = default;
+LogMessageBody::~LogMessageBody() = default;
+LogMessageBody::LogMessageBody(const LogMessageBody&) = default;
+LogMessageBody& LogMessageBody::operator=(const LogMessageBody&) = default;
+LogMessageBody::LogMessageBody(LogMessageBody&&) noexcept = default;
+LogMessageBody& LogMessageBody::operator=(LogMessageBody&&) = default;
+
+bool LogMessageBody::operator==(const LogMessageBody& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && iid_ == other.iid_
+   && body_ == other.body_;
+}
+
+bool LogMessageBody::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* iid */:
+        field.get(&iid_);
+        break;
+      case 2 /* body */:
+        field.get(&body_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string LogMessageBody::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> LogMessageBody::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void LogMessageBody::Serialize(::protozero::Message* msg) const {
+  // Field 1: iid
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, iid_);
+  }
+
+  // Field 2: body
+  if (_has_field_[2]) {
+    msg->AppendString(2, body_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+LogMessage::LogMessage() = default;
+LogMessage::~LogMessage() = default;
+LogMessage::LogMessage(const LogMessage&) = default;
+LogMessage& LogMessage::operator=(const LogMessage&) = default;
+LogMessage::LogMessage(LogMessage&&) noexcept = default;
+LogMessage& LogMessage::operator=(LogMessage&&) = default;
+
+bool LogMessage::operator==(const LogMessage& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && source_location_iid_ == other.source_location_iid_
+   && body_iid_ == other.body_iid_;
+}
+
+bool LogMessage::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* source_location_iid */:
+        field.get(&source_location_iid_);
+        break;
+      case 2 /* body_iid */:
+        field.get(&body_iid_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string LogMessage::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> LogMessage::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void LogMessage::Serialize(::protozero::Message* msg) const {
+  // Field 1: source_location_iid
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, source_location_iid_);
+  }
+
+  // Field 2: body_iid
+  if (_has_field_[2]) {
+    msg->AppendVarInt(2, body_iid_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/process_descriptor.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/process_descriptor.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_PROCESS_DESCRIPTOR_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_PROCESS_DESCRIPTOR_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class ProcessDescriptor;
+enum ProcessDescriptor_ChromeProcessType : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum ProcessDescriptor_ChromeProcessType : int {
+  ProcessDescriptor_ChromeProcessType_PROCESS_UNSPECIFIED = 0,
+  ProcessDescriptor_ChromeProcessType_PROCESS_BROWSER = 1,
+  ProcessDescriptor_ChromeProcessType_PROCESS_RENDERER = 2,
+  ProcessDescriptor_ChromeProcessType_PROCESS_UTILITY = 3,
+  ProcessDescriptor_ChromeProcessType_PROCESS_ZYGOTE = 4,
+  ProcessDescriptor_ChromeProcessType_PROCESS_SANDBOX_HELPER = 5,
+  ProcessDescriptor_ChromeProcessType_PROCESS_GPU = 6,
+  ProcessDescriptor_ChromeProcessType_PROCESS_PPAPI_PLUGIN = 7,
+  ProcessDescriptor_ChromeProcessType_PROCESS_PPAPI_BROKER = 8,
+};
+
+class PERFETTO_EXPORT ProcessDescriptor : public ::protozero::CppMessageObj {
+ public:
+  using ChromeProcessType = ProcessDescriptor_ChromeProcessType;
+  static constexpr auto PROCESS_UNSPECIFIED = ProcessDescriptor_ChromeProcessType_PROCESS_UNSPECIFIED;
+  static constexpr auto PROCESS_BROWSER = ProcessDescriptor_ChromeProcessType_PROCESS_BROWSER;
+  static constexpr auto PROCESS_RENDERER = ProcessDescriptor_ChromeProcessType_PROCESS_RENDERER;
+  static constexpr auto PROCESS_UTILITY = ProcessDescriptor_ChromeProcessType_PROCESS_UTILITY;
+  static constexpr auto PROCESS_ZYGOTE = ProcessDescriptor_ChromeProcessType_PROCESS_ZYGOTE;
+  static constexpr auto PROCESS_SANDBOX_HELPER = ProcessDescriptor_ChromeProcessType_PROCESS_SANDBOX_HELPER;
+  static constexpr auto PROCESS_GPU = ProcessDescriptor_ChromeProcessType_PROCESS_GPU;
+  static constexpr auto PROCESS_PPAPI_PLUGIN = ProcessDescriptor_ChromeProcessType_PROCESS_PPAPI_PLUGIN;
+  static constexpr auto PROCESS_PPAPI_BROKER = ProcessDescriptor_ChromeProcessType_PROCESS_PPAPI_BROKER;
+  static constexpr auto ChromeProcessType_MIN = ProcessDescriptor_ChromeProcessType_PROCESS_UNSPECIFIED;
+  static constexpr auto ChromeProcessType_MAX = ProcessDescriptor_ChromeProcessType_PROCESS_PPAPI_BROKER;
+  enum FieldNumbers {
+    kPidFieldNumber = 1,
+    kCmdlineFieldNumber = 2,
+    kProcessNameFieldNumber = 6,
+    kProcessPriorityFieldNumber = 5,
+    kStartTimestampNsFieldNumber = 7,
+    kChromeProcessTypeFieldNumber = 4,
+    kLegacySortIndexFieldNumber = 3,
+  };
+
+  ProcessDescriptor();
+  ~ProcessDescriptor() override;
+  ProcessDescriptor(ProcessDescriptor&&) noexcept;
+  ProcessDescriptor& operator=(ProcessDescriptor&&);
+  ProcessDescriptor(const ProcessDescriptor&);
+  ProcessDescriptor& operator=(const ProcessDescriptor&);
+  bool operator==(const ProcessDescriptor&) const;
+  bool operator!=(const ProcessDescriptor& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_pid() const { return _has_field_[1]; }
+  int32_t pid() const { return pid_; }
+  void set_pid(int32_t value) { pid_ = value; _has_field_.set(1); }
+
+  const std::vector<std::string>& cmdline() const { return cmdline_; }
+  std::vector<std::string>* mutable_cmdline() { return &cmdline_; }
+  int cmdline_size() const { return static_cast<int>(cmdline_.size()); }
+  void clear_cmdline() { cmdline_.clear(); }
+  void add_cmdline(std::string value) { cmdline_.emplace_back(value); }
+  std::string* add_cmdline() { cmdline_.emplace_back(); return &cmdline_.back(); }
+
+  bool has_process_name() const { return _has_field_[6]; }
+  const std::string& process_name() const { return process_name_; }
+  void set_process_name(const std::string& value) { process_name_ = value; _has_field_.set(6); }
+
+  bool has_process_priority() const { return _has_field_[5]; }
+  int32_t process_priority() const { return process_priority_; }
+  void set_process_priority(int32_t value) { process_priority_ = value; _has_field_.set(5); }
+
+  bool has_start_timestamp_ns() const { return _has_field_[7]; }
+  int64_t start_timestamp_ns() const { return start_timestamp_ns_; }
+  void set_start_timestamp_ns(int64_t value) { start_timestamp_ns_ = value; _has_field_.set(7); }
+
+  bool has_chrome_process_type() const { return _has_field_[4]; }
+  ProcessDescriptor_ChromeProcessType chrome_process_type() const { return chrome_process_type_; }
+  void set_chrome_process_type(ProcessDescriptor_ChromeProcessType value) { chrome_process_type_ = value; _has_field_.set(4); }
+
+  bool has_legacy_sort_index() const { return _has_field_[3]; }
+  int32_t legacy_sort_index() const { return legacy_sort_index_; }
+  void set_legacy_sort_index(int32_t value) { legacy_sort_index_ = value; _has_field_.set(3); }
+
+ private:
+  int32_t pid_{};
+  std::vector<std::string> cmdline_;
+  std::string process_name_{};
+  int32_t process_priority_{};
+  int64_t start_timestamp_ns_{};
+  ProcessDescriptor_ChromeProcessType chrome_process_type_{};
+  int32_t legacy_sort_index_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<8> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_PROCESS_DESCRIPTOR_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/process_descriptor.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+ProcessDescriptor::ProcessDescriptor() = default;
+ProcessDescriptor::~ProcessDescriptor() = default;
+ProcessDescriptor::ProcessDescriptor(const ProcessDescriptor&) = default;
+ProcessDescriptor& ProcessDescriptor::operator=(const ProcessDescriptor&) = default;
+ProcessDescriptor::ProcessDescriptor(ProcessDescriptor&&) noexcept = default;
+ProcessDescriptor& ProcessDescriptor::operator=(ProcessDescriptor&&) = default;
+
+bool ProcessDescriptor::operator==(const ProcessDescriptor& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && pid_ == other.pid_
+   && cmdline_ == other.cmdline_
+   && process_name_ == other.process_name_
+   && process_priority_ == other.process_priority_
+   && start_timestamp_ns_ == other.start_timestamp_ns_
+   && chrome_process_type_ == other.chrome_process_type_
+   && legacy_sort_index_ == other.legacy_sort_index_;
+}
+
+bool ProcessDescriptor::ParseFromArray(const void* raw, size_t size) {
+  cmdline_.clear();
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* pid */:
+        field.get(&pid_);
+        break;
+      case 2 /* cmdline */:
+        cmdline_.emplace_back();
+        field.get(&cmdline_.back());
+        break;
+      case 6 /* process_name */:
+        field.get(&process_name_);
+        break;
+      case 5 /* process_priority */:
+        field.get(&process_priority_);
+        break;
+      case 7 /* start_timestamp_ns */:
+        field.get(&start_timestamp_ns_);
+        break;
+      case 4 /* chrome_process_type */:
+        field.get(&chrome_process_type_);
+        break;
+      case 3 /* legacy_sort_index */:
+        field.get(&legacy_sort_index_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string ProcessDescriptor::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> ProcessDescriptor::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void ProcessDescriptor::Serialize(::protozero::Message* msg) const {
+  // Field 1: pid
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, pid_);
+  }
+
+  // Field 2: cmdline
+  for (auto& it : cmdline_) {
+    msg->AppendString(2, it);
+  }
+
+  // Field 6: process_name
+  if (_has_field_[6]) {
+    msg->AppendString(6, process_name_);
+  }
+
+  // Field 5: process_priority
+  if (_has_field_[5]) {
+    msg->AppendVarInt(5, process_priority_);
+  }
+
+  // Field 7: start_timestamp_ns
+  if (_has_field_[7]) {
+    msg->AppendVarInt(7, start_timestamp_ns_);
+  }
+
+  // Field 4: chrome_process_type
+  if (_has_field_[4]) {
+    msg->AppendVarInt(4, chrome_process_type_);
+  }
+
+  // Field 3: legacy_sort_index
+  if (_has_field_[3]) {
+    msg->AppendVarInt(3, legacy_sort_index_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/source_location.gen.cc
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/source_location.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+SourceLocation::SourceLocation() = default;
+SourceLocation::~SourceLocation() = default;
+SourceLocation::SourceLocation(const SourceLocation&) = default;
+SourceLocation& SourceLocation::operator=(const SourceLocation&) = default;
+SourceLocation::SourceLocation(SourceLocation&&) noexcept = default;
+SourceLocation& SourceLocation::operator=(SourceLocation&&) = default;
+
+bool SourceLocation::operator==(const SourceLocation& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && iid_ == other.iid_
+   && file_name_ == other.file_name_
+   && function_name_ == other.function_name_
+   && line_number_ == other.line_number_;
+}
+
+bool SourceLocation::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* iid */:
+        field.get(&iid_);
+        break;
+      case 2 /* file_name */:
+        field.get(&file_name_);
+        break;
+      case 3 /* function_name */:
+        field.get(&function_name_);
+        break;
+      case 4 /* line_number */:
+        field.get(&line_number_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string SourceLocation::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> SourceLocation::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void SourceLocation::Serialize(::protozero::Message* msg) const {
+  // Field 1: iid
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, iid_);
+  }
+
+  // Field 2: file_name
+  if (_has_field_[2]) {
+    msg->AppendString(2, file_name_);
+  }
+
+  // Field 3: function_name
+  if (_has_field_[3]) {
+    msg->AppendString(3, function_name_);
+  }
+
+  // Field 4: line_number
+  if (_has_field_[4]) {
+    msg->AppendVarInt(4, line_number_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/task_execution.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/task_execution.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_TASK_EXECUTION_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_TASK_EXECUTION_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class TaskExecution;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT TaskExecution : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kPostedFromIidFieldNumber = 1,
+  };
+
+  TaskExecution();
+  ~TaskExecution() override;
+  TaskExecution(TaskExecution&&) noexcept;
+  TaskExecution& operator=(TaskExecution&&);
+  TaskExecution(const TaskExecution&);
+  TaskExecution& operator=(const TaskExecution&);
+  bool operator==(const TaskExecution&) const;
+  bool operator!=(const TaskExecution& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_posted_from_iid() const { return _has_field_[1]; }
+  uint64_t posted_from_iid() const { return posted_from_iid_; }
+  void set_posted_from_iid(uint64_t value) { posted_from_iid_ = value; _has_field_.set(1); }
+
+ private:
+  uint64_t posted_from_iid_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_TASK_EXECUTION_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/task_execution.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+TaskExecution::TaskExecution() = default;
+TaskExecution::~TaskExecution() = default;
+TaskExecution::TaskExecution(const TaskExecution&) = default;
+TaskExecution& TaskExecution::operator=(const TaskExecution&) = default;
+TaskExecution::TaskExecution(TaskExecution&&) noexcept = default;
+TaskExecution& TaskExecution::operator=(TaskExecution&&) = default;
+
+bool TaskExecution::operator==(const TaskExecution& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && posted_from_iid_ == other.posted_from_iid_;
+}
+
+bool TaskExecution::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* posted_from_iid */:
+        field.get(&posted_from_iid_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string TaskExecution::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> TaskExecution::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void TaskExecution::Serialize(::protozero::Message* msg) const {
+  // Field 1: posted_from_iid
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, posted_from_iid_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/thread_descriptor.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/thread_descriptor.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_THREAD_DESCRIPTOR_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_THREAD_DESCRIPTOR_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class ThreadDescriptor;
+enum ThreadDescriptor_ChromeThreadType : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum ThreadDescriptor_ChromeThreadType : int {
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_UNSPECIFIED = 0,
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_MAIN = 1,
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_IO = 2,
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_BG_WORKER = 3,
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_FG_WORKER = 4,
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_FB_BLOCKING = 5,
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_BG_BLOCKING = 6,
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_SERVICE = 7,
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_COMPOSITOR = 8,
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_VIZ_COMPOSITOR = 9,
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_COMPOSITOR_WORKER = 10,
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_SERVICE_WORKER = 11,
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_MEMORY_INFRA = 50,
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_SAMPLING_PROFILER = 51,
+};
+
+class PERFETTO_EXPORT ThreadDescriptor : public ::protozero::CppMessageObj {
+ public:
+  using ChromeThreadType = ThreadDescriptor_ChromeThreadType;
+  static constexpr auto CHROME_THREAD_UNSPECIFIED = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_UNSPECIFIED;
+  static constexpr auto CHROME_THREAD_MAIN = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_MAIN;
+  static constexpr auto CHROME_THREAD_IO = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_IO;
+  static constexpr auto CHROME_THREAD_POOL_BG_WORKER = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_BG_WORKER;
+  static constexpr auto CHROME_THREAD_POOL_FG_WORKER = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_FG_WORKER;
+  static constexpr auto CHROME_THREAD_POOL_FB_BLOCKING = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_FB_BLOCKING;
+  static constexpr auto CHROME_THREAD_POOL_BG_BLOCKING = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_BG_BLOCKING;
+  static constexpr auto CHROME_THREAD_POOL_SERVICE = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_SERVICE;
+  static constexpr auto CHROME_THREAD_COMPOSITOR = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_COMPOSITOR;
+  static constexpr auto CHROME_THREAD_VIZ_COMPOSITOR = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_VIZ_COMPOSITOR;
+  static constexpr auto CHROME_THREAD_COMPOSITOR_WORKER = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_COMPOSITOR_WORKER;
+  static constexpr auto CHROME_THREAD_SERVICE_WORKER = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_SERVICE_WORKER;
+  static constexpr auto CHROME_THREAD_MEMORY_INFRA = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_MEMORY_INFRA;
+  static constexpr auto CHROME_THREAD_SAMPLING_PROFILER = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_SAMPLING_PROFILER;
+  static constexpr auto ChromeThreadType_MIN = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_UNSPECIFIED;
+  static constexpr auto ChromeThreadType_MAX = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_SAMPLING_PROFILER;
+  enum FieldNumbers {
+    kPidFieldNumber = 1,
+    kTidFieldNumber = 2,
+    kThreadNameFieldNumber = 5,
+    kChromeThreadTypeFieldNumber = 4,
+    kReferenceTimestampUsFieldNumber = 6,
+    kReferenceThreadTimeUsFieldNumber = 7,
+    kReferenceThreadInstructionCountFieldNumber = 8,
+    kLegacySortIndexFieldNumber = 3,
+  };
+
+  ThreadDescriptor();
+  ~ThreadDescriptor() override;
+  ThreadDescriptor(ThreadDescriptor&&) noexcept;
+  ThreadDescriptor& operator=(ThreadDescriptor&&);
+  ThreadDescriptor(const ThreadDescriptor&);
+  ThreadDescriptor& operator=(const ThreadDescriptor&);
+  bool operator==(const ThreadDescriptor&) const;
+  bool operator!=(const ThreadDescriptor& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_pid() const { return _has_field_[1]; }
+  int32_t pid() const { return pid_; }
+  void set_pid(int32_t value) { pid_ = value; _has_field_.set(1); }
+
+  bool has_tid() const { return _has_field_[2]; }
+  int32_t tid() const { return tid_; }
+  void set_tid(int32_t value) { tid_ = value; _has_field_.set(2); }
+
+  bool has_thread_name() const { return _has_field_[5]; }
+  const std::string& thread_name() const { return thread_name_; }
+  void set_thread_name(const std::string& value) { thread_name_ = value; _has_field_.set(5); }
+
+  bool has_chrome_thread_type() const { return _has_field_[4]; }
+  ThreadDescriptor_ChromeThreadType chrome_thread_type() const { return chrome_thread_type_; }
+  void set_chrome_thread_type(ThreadDescriptor_ChromeThreadType value) { chrome_thread_type_ = value; _has_field_.set(4); }
+
+  bool has_reference_timestamp_us() const { return _has_field_[6]; }
+  int64_t reference_timestamp_us() const { return reference_timestamp_us_; }
+  void set_reference_timestamp_us(int64_t value) { reference_timestamp_us_ = value; _has_field_.set(6); }
+
+  bool has_reference_thread_time_us() const { return _has_field_[7]; }
+  int64_t reference_thread_time_us() const { return reference_thread_time_us_; }
+  void set_reference_thread_time_us(int64_t value) { reference_thread_time_us_ = value; _has_field_.set(7); }
+
+  bool has_reference_thread_instruction_count() const { return _has_field_[8]; }
+  int64_t reference_thread_instruction_count() const { return reference_thread_instruction_count_; }
+  void set_reference_thread_instruction_count(int64_t value) { reference_thread_instruction_count_ = value; _has_field_.set(8); }
+
+  bool has_legacy_sort_index() const { return _has_field_[3]; }
+  int32_t legacy_sort_index() const { return legacy_sort_index_; }
+  void set_legacy_sort_index(int32_t value) { legacy_sort_index_ = value; _has_field_.set(3); }
+
+ private:
+  int32_t pid_{};
+  int32_t tid_{};
+  std::string thread_name_{};
+  ThreadDescriptor_ChromeThreadType chrome_thread_type_{};
+  int64_t reference_timestamp_us_{};
+  int64_t reference_thread_time_us_{};
+  int64_t reference_thread_instruction_count_{};
+  int32_t legacy_sort_index_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<9> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_THREAD_DESCRIPTOR_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/thread_descriptor.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+ThreadDescriptor::ThreadDescriptor() = default;
+ThreadDescriptor::~ThreadDescriptor() = default;
+ThreadDescriptor::ThreadDescriptor(const ThreadDescriptor&) = default;
+ThreadDescriptor& ThreadDescriptor::operator=(const ThreadDescriptor&) = default;
+ThreadDescriptor::ThreadDescriptor(ThreadDescriptor&&) noexcept = default;
+ThreadDescriptor& ThreadDescriptor::operator=(ThreadDescriptor&&) = default;
+
+bool ThreadDescriptor::operator==(const ThreadDescriptor& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && pid_ == other.pid_
+   && tid_ == other.tid_
+   && thread_name_ == other.thread_name_
+   && chrome_thread_type_ == other.chrome_thread_type_
+   && reference_timestamp_us_ == other.reference_timestamp_us_
+   && reference_thread_time_us_ == other.reference_thread_time_us_
+   && reference_thread_instruction_count_ == other.reference_thread_instruction_count_
+   && legacy_sort_index_ == other.legacy_sort_index_;
+}
+
+bool ThreadDescriptor::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* pid */:
+        field.get(&pid_);
+        break;
+      case 2 /* tid */:
+        field.get(&tid_);
+        break;
+      case 5 /* thread_name */:
+        field.get(&thread_name_);
+        break;
+      case 4 /* chrome_thread_type */:
+        field.get(&chrome_thread_type_);
+        break;
+      case 6 /* reference_timestamp_us */:
+        field.get(&reference_timestamp_us_);
+        break;
+      case 7 /* reference_thread_time_us */:
+        field.get(&reference_thread_time_us_);
+        break;
+      case 8 /* reference_thread_instruction_count */:
+        field.get(&reference_thread_instruction_count_);
+        break;
+      case 3 /* legacy_sort_index */:
+        field.get(&legacy_sort_index_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string ThreadDescriptor::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> ThreadDescriptor::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void ThreadDescriptor::Serialize(::protozero::Message* msg) const {
+  // Field 1: pid
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, pid_);
+  }
+
+  // Field 2: tid
+  if (_has_field_[2]) {
+    msg->AppendVarInt(2, tid_);
+  }
+
+  // Field 5: thread_name
+  if (_has_field_[5]) {
+    msg->AppendString(5, thread_name_);
+  }
+
+  // Field 4: chrome_thread_type
+  if (_has_field_[4]) {
+    msg->AppendVarInt(4, chrome_thread_type_);
+  }
+
+  // Field 6: reference_timestamp_us
+  if (_has_field_[6]) {
+    msg->AppendVarInt(6, reference_timestamp_us_);
+  }
+
+  // Field 7: reference_thread_time_us
+  if (_has_field_[7]) {
+    msg->AppendVarInt(7, reference_thread_time_us_);
+  }
+
+  // Field 8: reference_thread_instruction_count
+  if (_has_field_[8]) {
+    msg->AppendVarInt(8, reference_thread_instruction_count_);
+  }
+
+  // Field 3: legacy_sort_index
+  if (_has_field_[3]) {
+    msg->AppendVarInt(3, legacy_sort_index_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/track_descriptor.gen.cc
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/track_descriptor.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/counter_descriptor.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/thread_descriptor.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/process_descriptor.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_thread_descriptor.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_process_descriptor.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+TrackDescriptor::TrackDescriptor() = default;
+TrackDescriptor::~TrackDescriptor() = default;
+TrackDescriptor::TrackDescriptor(const TrackDescriptor&) = default;
+TrackDescriptor& TrackDescriptor::operator=(const TrackDescriptor&) = default;
+TrackDescriptor::TrackDescriptor(TrackDescriptor&&) noexcept = default;
+TrackDescriptor& TrackDescriptor::operator=(TrackDescriptor&&) = default;
+
+bool TrackDescriptor::operator==(const TrackDescriptor& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && uuid_ == other.uuid_
+   && parent_uuid_ == other.parent_uuid_
+   && name_ == other.name_
+   && process_ == other.process_
+   && chrome_process_ == other.chrome_process_
+   && thread_ == other.thread_
+   && chrome_thread_ == other.chrome_thread_
+   && counter_ == other.counter_;
+}
+
+bool TrackDescriptor::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* uuid */:
+        field.get(&uuid_);
+        break;
+      case 5 /* parent_uuid */:
+        field.get(&parent_uuid_);
+        break;
+      case 2 /* name */:
+        field.get(&name_);
+        break;
+      case 3 /* process */:
+        (*process_).ParseFromArray(field.data(), field.size());
+        break;
+      case 6 /* chrome_process */:
+        (*chrome_process_).ParseFromArray(field.data(), field.size());
+        break;
+      case 4 /* thread */:
+        (*thread_).ParseFromArray(field.data(), field.size());
+        break;
+      case 7 /* chrome_thread */:
+        (*chrome_thread_).ParseFromArray(field.data(), field.size());
+        break;
+      case 8 /* counter */:
+        (*counter_).ParseFromArray(field.data(), field.size());
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string TrackDescriptor::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> TrackDescriptor::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void TrackDescriptor::Serialize(::protozero::Message* msg) const {
+  // Field 1: uuid
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, uuid_);
+  }
+
+  // Field 5: parent_uuid
+  if (_has_field_[5]) {
+    msg->AppendVarInt(5, parent_uuid_);
+  }
+
+  // Field 2: name
+  if (_has_field_[2]) {
+    msg->AppendString(2, name_);
+  }
+
+  // Field 3: process
+  if (_has_field_[3]) {
+    (*process_).Serialize(msg->BeginNestedMessage<::protozero::Message>(3));
+  }
+
+  // Field 6: chrome_process
+  if (_has_field_[6]) {
+    (*chrome_process_).Serialize(msg->BeginNestedMessage<::protozero::Message>(6));
+  }
+
+  // Field 4: thread
+  if (_has_field_[4]) {
+    (*thread_).Serialize(msg->BeginNestedMessage<::protozero::Message>(4));
+  }
+
+  // Field 7: chrome_thread
+  if (_has_field_[7]) {
+    (*chrome_thread_).Serialize(msg->BeginNestedMessage<::protozero::Message>(7));
+  }
+
+  // Field 8: counter
+  if (_has_field_[8]) {
+    (*counter_).Serialize(msg->BeginNestedMessage<::protozero::Message>(8));
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/trace/track_event/track_event.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/track_event.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_TRACK_EVENT_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_TRACK_EVENT_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class EventName;
+class EventCategory;
+class TrackEventDefaults;
+class TrackEvent;
+class TrackEvent_LegacyEvent;
+class ChromeMojoEventInfo;
+class ChromeMessagePump;
+class SourceLocation;
+class ChromeContentSettingsEventInfo;
+class ChromeWindowHandleEventInfo;
+class ChromeRendererSchedulerState;
+class ChromeApplicationStateInfo;
+class ChromeFrameReporter;
+class ChromeLatencyInfo;
+class ChromeLatencyInfo_ComponentInfo;
+class ChromeHistogramSample;
+class ChromeLegacyIpc;
+class ChromeKeyedService;
+class ChromeUserEvent;
+class ChromeCompositorSchedulerState;
+class CompositorTimingHistory;
+class BeginFrameSourceState;
+class BeginFrameArgs;
+class BeginFrameObserverState;
+class BeginImplFrameArgs;
+class BeginImplFrameArgs_TimestampsInUs;
+class ChromeCompositorStateMachine;
+class ChromeCompositorStateMachine_MinorState;
+class ChromeCompositorStateMachine_MajorState;
+class LogMessage;
+class TaskExecution;
+class DebugAnnotation;
+class DebugAnnotation_NestedValue;
+enum TrackEvent_Type : int;
+enum TrackEvent_LegacyEvent_FlowDirection : int;
+enum TrackEvent_LegacyEvent_InstantEventScope : int;
+enum ChromeRAILMode : int;
+enum ChromeApplicationStateInfo_ChromeApplicationState : int;
+enum ChromeFrameReporter_State : int;
+enum ChromeFrameReporter_FrameDropReason : int;
+enum ChromeFrameReporter_ScrollState : int;
+enum ChromeLatencyInfo_Step : int;
+enum ChromeLatencyInfo_LatencyComponentType : int;
+enum ChromeLegacyIpc_MessageClass : int;
+enum ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode : int;
+enum ChromeCompositorSchedulerAction : int;
+enum BeginFrameArgs_BeginFrameArgsType : int;
+enum BeginImplFrameArgs_State : int;
+enum ChromeCompositorStateMachine_MinorState_TreePriority : int;
+enum ChromeCompositorStateMachine_MinorState_ScrollHandlerState : int;
+enum ChromeCompositorStateMachine_MajorState_BeginImplFrameState : int;
+enum ChromeCompositorStateMachine_MajorState_BeginMainFrameState : int;
+enum ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState : int;
+enum ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState : int;
+enum DebugAnnotation_NestedValue_NestedType : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum TrackEvent_Type : int {
+  TrackEvent_Type_TYPE_UNSPECIFIED = 0,
+  TrackEvent_Type_TYPE_SLICE_BEGIN = 1,
+  TrackEvent_Type_TYPE_SLICE_END = 2,
+  TrackEvent_Type_TYPE_INSTANT = 3,
+  TrackEvent_Type_TYPE_COUNTER = 4,
+};
+enum TrackEvent_LegacyEvent_FlowDirection : int {
+  TrackEvent_LegacyEvent_FlowDirection_FLOW_UNSPECIFIED = 0,
+  TrackEvent_LegacyEvent_FlowDirection_FLOW_IN = 1,
+  TrackEvent_LegacyEvent_FlowDirection_FLOW_OUT = 2,
+  TrackEvent_LegacyEvent_FlowDirection_FLOW_INOUT = 3,
+};
+enum TrackEvent_LegacyEvent_InstantEventScope : int {
+  TrackEvent_LegacyEvent_InstantEventScope_SCOPE_UNSPECIFIED = 0,
+  TrackEvent_LegacyEvent_InstantEventScope_SCOPE_GLOBAL = 1,
+  TrackEvent_LegacyEvent_InstantEventScope_SCOPE_PROCESS = 2,
+  TrackEvent_LegacyEvent_InstantEventScope_SCOPE_THREAD = 3,
+};
+
+class PERFETTO_EXPORT EventName : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kIidFieldNumber = 1,
+    kNameFieldNumber = 2,
+  };
+
+  EventName();
+  ~EventName() override;
+  EventName(EventName&&) noexcept;
+  EventName& operator=(EventName&&);
+  EventName(const EventName&);
+  EventName& operator=(const EventName&);
+  bool operator==(const EventName&) const;
+  bool operator!=(const EventName& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_iid() const { return _has_field_[1]; }
+  uint64_t iid() const { return iid_; }
+  void set_iid(uint64_t value) { iid_ = value; _has_field_.set(1); }
+
+  bool has_name() const { return _has_field_[2]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(2); }
+
+ private:
+  uint64_t iid_{};
+  std::string name_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT EventCategory : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kIidFieldNumber = 1,
+    kNameFieldNumber = 2,
+  };
+
+  EventCategory();
+  ~EventCategory() override;
+  EventCategory(EventCategory&&) noexcept;
+  EventCategory& operator=(EventCategory&&);
+  EventCategory(const EventCategory&);
+  EventCategory& operator=(const EventCategory&);
+  bool operator==(const EventCategory&) const;
+  bool operator!=(const EventCategory& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_iid() const { return _has_field_[1]; }
+  uint64_t iid() const { return iid_; }
+  void set_iid(uint64_t value) { iid_ = value; _has_field_.set(1); }
+
+  bool has_name() const { return _has_field_[2]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(2); }
+
+ private:
+  uint64_t iid_{};
+  std::string name_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT TrackEventDefaults : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kTrackUuidFieldNumber = 11,
+    kExtraCounterTrackUuidsFieldNumber = 31,
+    kExtraDoubleCounterTrackUuidsFieldNumber = 45,
+  };
+
+  TrackEventDefaults();
+  ~TrackEventDefaults() override;
+  TrackEventDefaults(TrackEventDefaults&&) noexcept;
+  TrackEventDefaults& operator=(TrackEventDefaults&&);
+  TrackEventDefaults(const TrackEventDefaults&);
+  TrackEventDefaults& operator=(const TrackEventDefaults&);
+  bool operator==(const TrackEventDefaults&) const;
+  bool operator!=(const TrackEventDefaults& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_track_uuid() const { return _has_field_[11]; }
+  uint64_t track_uuid() const { return track_uuid_; }
+  void set_track_uuid(uint64_t value) { track_uuid_ = value; _has_field_.set(11); }
+
+  const std::vector<uint64_t>& extra_counter_track_uuids() const { return extra_counter_track_uuids_; }
+  std::vector<uint64_t>* mutable_extra_counter_track_uuids() { return &extra_counter_track_uuids_; }
+  int extra_counter_track_uuids_size() const { return static_cast<int>(extra_counter_track_uuids_.size()); }
+  void clear_extra_counter_track_uuids() { extra_counter_track_uuids_.clear(); }
+  void add_extra_counter_track_uuids(uint64_t value) { extra_counter_track_uuids_.emplace_back(value); }
+  uint64_t* add_extra_counter_track_uuids() { extra_counter_track_uuids_.emplace_back(); return &extra_counter_track_uuids_.back(); }
+
+  const std::vector<uint64_t>& extra_double_counter_track_uuids() const { return extra_double_counter_track_uuids_; }
+  std::vector<uint64_t>* mutable_extra_double_counter_track_uuids() { return &extra_double_counter_track_uuids_; }
+  int extra_double_counter_track_uuids_size() const { return static_cast<int>(extra_double_counter_track_uuids_.size()); }
+  void clear_extra_double_counter_track_uuids() { extra_double_counter_track_uuids_.clear(); }
+  void add_extra_double_counter_track_uuids(uint64_t value) { extra_double_counter_track_uuids_.emplace_back(value); }
+  uint64_t* add_extra_double_counter_track_uuids() { extra_double_counter_track_uuids_.emplace_back(); return &extra_double_counter_track_uuids_.back(); }
+
+ private:
+  uint64_t track_uuid_{};
+  std::vector<uint64_t> extra_counter_track_uuids_;
+  std::vector<uint64_t> extra_double_counter_track_uuids_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<46> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT TrackEvent : public ::protozero::CppMessageObj {
+ public:
+  using LegacyEvent = TrackEvent_LegacyEvent;
+  using Type = TrackEvent_Type;
+  static constexpr auto TYPE_UNSPECIFIED = TrackEvent_Type_TYPE_UNSPECIFIED;
+  static constexpr auto TYPE_SLICE_BEGIN = TrackEvent_Type_TYPE_SLICE_BEGIN;
+  static constexpr auto TYPE_SLICE_END = TrackEvent_Type_TYPE_SLICE_END;
+  static constexpr auto TYPE_INSTANT = TrackEvent_Type_TYPE_INSTANT;
+  static constexpr auto TYPE_COUNTER = TrackEvent_Type_TYPE_COUNTER;
+  static constexpr auto Type_MIN = TrackEvent_Type_TYPE_UNSPECIFIED;
+  static constexpr auto Type_MAX = TrackEvent_Type_TYPE_COUNTER;
+  enum FieldNumbers {
+    kCategoryIidsFieldNumber = 3,
+    kCategoriesFieldNumber = 22,
+    kNameIidFieldNumber = 10,
+    kNameFieldNumber = 23,
+    kTypeFieldNumber = 9,
+    kTrackUuidFieldNumber = 11,
+    kCounterValueFieldNumber = 30,
+    kDoubleCounterValueFieldNumber = 44,
+    kExtraCounterTrackUuidsFieldNumber = 31,
+    kExtraCounterValuesFieldNumber = 12,
+    kExtraDoubleCounterTrackUuidsFieldNumber = 45,
+    kExtraDoubleCounterValuesFieldNumber = 46,
+    kFlowIdsFieldNumber = 36,
+    kTerminatingFlowIdsFieldNumber = 42,
+    kDebugAnnotationsFieldNumber = 4,
+    kTaskExecutionFieldNumber = 5,
+    kLogMessageFieldNumber = 21,
+    kCcSchedulerStateFieldNumber = 24,
+    kChromeUserEventFieldNumber = 25,
+    kChromeKeyedServiceFieldNumber = 26,
+    kChromeLegacyIpcFieldNumber = 27,
+    kChromeHistogramSampleFieldNumber = 28,
+    kChromeLatencyInfoFieldNumber = 29,
+    kChromeFrameReporterFieldNumber = 32,
+    kChromeApplicationStateInfoFieldNumber = 39,
+    kChromeRendererSchedulerStateFieldNumber = 40,
+    kChromeWindowHandleEventInfoFieldNumber = 41,
+    kChromeContentSettingsEventInfoFieldNumber = 43,
+    kSourceLocationFieldNumber = 33,
+    kSourceLocationIidFieldNumber = 34,
+    kChromeMessagePumpFieldNumber = 35,
+    kChromeMojoEventInfoFieldNumber = 38,
+    kTimestampDeltaUsFieldNumber = 1,
+    kTimestampAbsoluteUsFieldNumber = 16,
+    kThreadTimeDeltaUsFieldNumber = 2,
+    kThreadTimeAbsoluteUsFieldNumber = 17,
+    kThreadInstructionCountDeltaFieldNumber = 8,
+    kThreadInstructionCountAbsoluteFieldNumber = 20,
+    kLegacyEventFieldNumber = 6,
+  };
+
+  TrackEvent();
+  ~TrackEvent() override;
+  TrackEvent(TrackEvent&&) noexcept;
+  TrackEvent& operator=(TrackEvent&&);
+  TrackEvent(const TrackEvent&);
+  TrackEvent& operator=(const TrackEvent&);
+  bool operator==(const TrackEvent&) const;
+  bool operator!=(const TrackEvent& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  const std::vector<uint64_t>& category_iids() const { return category_iids_; }
+  std::vector<uint64_t>* mutable_category_iids() { return &category_iids_; }
+  int category_iids_size() const { return static_cast<int>(category_iids_.size()); }
+  void clear_category_iids() { category_iids_.clear(); }
+  void add_category_iids(uint64_t value) { category_iids_.emplace_back(value); }
+  uint64_t* add_category_iids() { category_iids_.emplace_back(); return &category_iids_.back(); }
+
+  const std::vector<std::string>& categories() const { return categories_; }
+  std::vector<std::string>* mutable_categories() { return &categories_; }
+  int categories_size() const { return static_cast<int>(categories_.size()); }
+  void clear_categories() { categories_.clear(); }
+  void add_categories(std::string value) { categories_.emplace_back(value); }
+  std::string* add_categories() { categories_.emplace_back(); return &categories_.back(); }
+
+  bool has_name_iid() const { return _has_field_[10]; }
+  uint64_t name_iid() const { return name_iid_; }
+  void set_name_iid(uint64_t value) { name_iid_ = value; _has_field_.set(10); }
+
+  bool has_name() const { return _has_field_[23]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(23); }
+
+  bool has_type() const { return _has_field_[9]; }
+  TrackEvent_Type type() const { return type_; }
+  void set_type(TrackEvent_Type value) { type_ = value; _has_field_.set(9); }
+
+  bool has_track_uuid() const { return _has_field_[11]; }
+  uint64_t track_uuid() const { return track_uuid_; }
+  void set_track_uuid(uint64_t value) { track_uuid_ = value; _has_field_.set(11); }
+
+  bool has_counter_value() const { return _has_field_[30]; }
+  int64_t counter_value() const { return counter_value_; }
+  void set_counter_value(int64_t value) { counter_value_ = value; _has_field_.set(30); }
+
+  bool has_double_counter_value() const { return _has_field_[44]; }
+  double double_counter_value() const { return double_counter_value_; }
+  void set_double_counter_value(double value) { double_counter_value_ = value; _has_field_.set(44); }
+
+  const std::vector<uint64_t>& extra_counter_track_uuids() const { return extra_counter_track_uuids_; }
+  std::vector<uint64_t>* mutable_extra_counter_track_uuids() { return &extra_counter_track_uuids_; }
+  int extra_counter_track_uuids_size() const { return static_cast<int>(extra_counter_track_uuids_.size()); }
+  void clear_extra_counter_track_uuids() { extra_counter_track_uuids_.clear(); }
+  void add_extra_counter_track_uuids(uint64_t value) { extra_counter_track_uuids_.emplace_back(value); }
+  uint64_t* add_extra_counter_track_uuids() { extra_counter_track_uuids_.emplace_back(); return &extra_counter_track_uuids_.back(); }
+
+  const std::vector<int64_t>& extra_counter_values() const { return extra_counter_values_; }
+  std::vector<int64_t>* mutable_extra_counter_values() { return &extra_counter_values_; }
+  int extra_counter_values_size() const { return static_cast<int>(extra_counter_values_.size()); }
+  void clear_extra_counter_values() { extra_counter_values_.clear(); }
+  void add_extra_counter_values(int64_t value) { extra_counter_values_.emplace_back(value); }
+  int64_t* add_extra_counter_values() { extra_counter_values_.emplace_back(); return &extra_counter_values_.back(); }
+
+  const std::vector<uint64_t>& extra_double_counter_track_uuids() const { return extra_double_counter_track_uuids_; }
+  std::vector<uint64_t>* mutable_extra_double_counter_track_uuids() { return &extra_double_counter_track_uuids_; }
+  int extra_double_counter_track_uuids_size() const { return static_cast<int>(extra_double_counter_track_uuids_.size()); }
+  void clear_extra_double_counter_track_uuids() { extra_double_counter_track_uuids_.clear(); }
+  void add_extra_double_counter_track_uuids(uint64_t value) { extra_double_counter_track_uuids_.emplace_back(value); }
+  uint64_t* add_extra_double_counter_track_uuids() { extra_double_counter_track_uuids_.emplace_back(); return &extra_double_counter_track_uuids_.back(); }
+
+  const std::vector<double>& extra_double_counter_values() const { return extra_double_counter_values_; }
+  std::vector<double>* mutable_extra_double_counter_values() { return &extra_double_counter_values_; }
+  int extra_double_counter_values_size() const { return static_cast<int>(extra_double_counter_values_.size()); }
+  void clear_extra_double_counter_values() { extra_double_counter_values_.clear(); }
+  void add_extra_double_counter_values(double value) { extra_double_counter_values_.emplace_back(value); }
+  double* add_extra_double_counter_values() { extra_double_counter_values_.emplace_back(); return &extra_double_counter_values_.back(); }
+
+  const std::vector<uint64_t>& flow_ids() const { return flow_ids_; }
+  std::vector<uint64_t>* mutable_flow_ids() { return &flow_ids_; }
+  int flow_ids_size() const { return static_cast<int>(flow_ids_.size()); }
+  void clear_flow_ids() { flow_ids_.clear(); }
+  void add_flow_ids(uint64_t value) { flow_ids_.emplace_back(value); }
+  uint64_t* add_flow_ids() { flow_ids_.emplace_back(); return &flow_ids_.back(); }
+
+  const std::vector<uint64_t>& terminating_flow_ids() const { return terminating_flow_ids_; }
+  std::vector<uint64_t>* mutable_terminating_flow_ids() { return &terminating_flow_ids_; }
+  int terminating_flow_ids_size() const { return static_cast<int>(terminating_flow_ids_.size()); }
+  void clear_terminating_flow_ids() { terminating_flow_ids_.clear(); }
+  void add_terminating_flow_ids(uint64_t value) { terminating_flow_ids_.emplace_back(value); }
+  uint64_t* add_terminating_flow_ids() { terminating_flow_ids_.emplace_back(); return &terminating_flow_ids_.back(); }
+
+  const std::vector<DebugAnnotation>& debug_annotations() const { return debug_annotations_; }
+  std::vector<DebugAnnotation>* mutable_debug_annotations() { return &debug_annotations_; }
+  int debug_annotations_size() const;
+  void clear_debug_annotations();
+  DebugAnnotation* add_debug_annotations();
+
+  bool has_task_execution() const { return _has_field_[5]; }
+  const TaskExecution& task_execution() const { return *task_execution_; }
+  TaskExecution* mutable_task_execution() { _has_field_.set(5); return task_execution_.get(); }
+
+  bool has_log_message() const { return _has_field_[21]; }
+  const LogMessage& log_message() const { return *log_message_; }
+  LogMessage* mutable_log_message() { _has_field_.set(21); return log_message_.get(); }
+
+  bool has_cc_scheduler_state() const { return _has_field_[24]; }
+  const ChromeCompositorSchedulerState& cc_scheduler_state() const { return *cc_scheduler_state_; }
+  ChromeCompositorSchedulerState* mutable_cc_scheduler_state() { _has_field_.set(24); return cc_scheduler_state_.get(); }
+
+  bool has_chrome_user_event() const { return _has_field_[25]; }
+  const ChromeUserEvent& chrome_user_event() const { return *chrome_user_event_; }
+  ChromeUserEvent* mutable_chrome_user_event() { _has_field_.set(25); return chrome_user_event_.get(); }
+
+  bool has_chrome_keyed_service() const { return _has_field_[26]; }
+  const ChromeKeyedService& chrome_keyed_service() const { return *chrome_keyed_service_; }
+  ChromeKeyedService* mutable_chrome_keyed_service() { _has_field_.set(26); return chrome_keyed_service_.get(); }
+
+  bool has_chrome_legacy_ipc() const { return _has_field_[27]; }
+  const ChromeLegacyIpc& chrome_legacy_ipc() const { return *chrome_legacy_ipc_; }
+  ChromeLegacyIpc* mutable_chrome_legacy_ipc() { _has_field_.set(27); return chrome_legacy_ipc_.get(); }
+
+  bool has_chrome_histogram_sample() const { return _has_field_[28]; }
+  const ChromeHistogramSample& chrome_histogram_sample() const { return *chrome_histogram_sample_; }
+  ChromeHistogramSample* mutable_chrome_histogram_sample() { _has_field_.set(28); return chrome_histogram_sample_.get(); }
+
+  bool has_chrome_latency_info() const { return _has_field_[29]; }
+  const ChromeLatencyInfo& chrome_latency_info() const { return *chrome_latency_info_; }
+  ChromeLatencyInfo* mutable_chrome_latency_info() { _has_field_.set(29); return chrome_latency_info_.get(); }
+
+  bool has_chrome_frame_reporter() const { return _has_field_[32]; }
+  const ChromeFrameReporter& chrome_frame_reporter() const { return *chrome_frame_reporter_; }
+  ChromeFrameReporter* mutable_chrome_frame_reporter() { _has_field_.set(32); return chrome_frame_reporter_.get(); }
+
+  bool has_chrome_application_state_info() const { return _has_field_[39]; }
+  const ChromeApplicationStateInfo& chrome_application_state_info() const { return *chrome_application_state_info_; }
+  ChromeApplicationStateInfo* mutable_chrome_application_state_info() { _has_field_.set(39); return chrome_application_state_info_.get(); }
+
+  bool has_chrome_renderer_scheduler_state() const { return _has_field_[40]; }
+  const ChromeRendererSchedulerState& chrome_renderer_scheduler_state() const { return *chrome_renderer_scheduler_state_; }
+  ChromeRendererSchedulerState* mutable_chrome_renderer_scheduler_state() { _has_field_.set(40); return chrome_renderer_scheduler_state_.get(); }
+
+  bool has_chrome_window_handle_event_info() const { return _has_field_[41]; }
+  const ChromeWindowHandleEventInfo& chrome_window_handle_event_info() const { return *chrome_window_handle_event_info_; }
+  ChromeWindowHandleEventInfo* mutable_chrome_window_handle_event_info() { _has_field_.set(41); return chrome_window_handle_event_info_.get(); }
+
+  bool has_chrome_content_settings_event_info() const { return _has_field_[43]; }
+  const ChromeContentSettingsEventInfo& chrome_content_settings_event_info() const { return *chrome_content_settings_event_info_; }
+  ChromeContentSettingsEventInfo* mutable_chrome_content_settings_event_info() { _has_field_.set(43); return chrome_content_settings_event_info_.get(); }
+
+  bool has_source_location() const { return _has_field_[33]; }
+  const SourceLocation& source_location() const { return *source_location_; }
+  SourceLocation* mutable_source_location() { _has_field_.set(33); return source_location_.get(); }
+
+  bool has_source_location_iid() const { return _has_field_[34]; }
+  uint64_t source_location_iid() const { return source_location_iid_; }
+  void set_source_location_iid(uint64_t value) { source_location_iid_ = value; _has_field_.set(34); }
+
+  bool has_chrome_message_pump() const { return _has_field_[35]; }
+  const ChromeMessagePump& chrome_message_pump() const { return *chrome_message_pump_; }
+  ChromeMessagePump* mutable_chrome_message_pump() { _has_field_.set(35); return chrome_message_pump_.get(); }
+
+  bool has_chrome_mojo_event_info() const { return _has_field_[38]; }
+  const ChromeMojoEventInfo& chrome_mojo_event_info() const { return *chrome_mojo_event_info_; }
+  ChromeMojoEventInfo* mutable_chrome_mojo_event_info() { _has_field_.set(38); return chrome_mojo_event_info_.get(); }
+
+  bool has_timestamp_delta_us() const { return _has_field_[1]; }
+  int64_t timestamp_delta_us() const { return timestamp_delta_us_; }
+  void set_timestamp_delta_us(int64_t value) { timestamp_delta_us_ = value; _has_field_.set(1); }
+
+  bool has_timestamp_absolute_us() const { return _has_field_[16]; }
+  int64_t timestamp_absolute_us() const { return timestamp_absolute_us_; }
+  void set_timestamp_absolute_us(int64_t value) { timestamp_absolute_us_ = value; _has_field_.set(16); }
+
+  bool has_thread_time_delta_us() const { return _has_field_[2]; }
+  int64_t thread_time_delta_us() const { return thread_time_delta_us_; }
+  void set_thread_time_delta_us(int64_t value) { thread_time_delta_us_ = value; _has_field_.set(2); }
+
+  bool has_thread_time_absolute_us() const { return _has_field_[17]; }
+  int64_t thread_time_absolute_us() const { return thread_time_absolute_us_; }
+  void set_thread_time_absolute_us(int64_t value) { thread_time_absolute_us_ = value; _has_field_.set(17); }
+
+  bool has_thread_instruction_count_delta() const { return _has_field_[8]; }
+  int64_t thread_instruction_count_delta() const { return thread_instruction_count_delta_; }
+  void set_thread_instruction_count_delta(int64_t value) { thread_instruction_count_delta_ = value; _has_field_.set(8); }
+
+  bool has_thread_instruction_count_absolute() const { return _has_field_[20]; }
+  int64_t thread_instruction_count_absolute() const { return thread_instruction_count_absolute_; }
+  void set_thread_instruction_count_absolute(int64_t value) { thread_instruction_count_absolute_ = value; _has_field_.set(20); }
+
+  bool has_legacy_event() const { return _has_field_[6]; }
+  const TrackEvent_LegacyEvent& legacy_event() const { return *legacy_event_; }
+  TrackEvent_LegacyEvent* mutable_legacy_event() { _has_field_.set(6); return legacy_event_.get(); }
+
+ private:
+  std::vector<uint64_t> category_iids_;
+  std::vector<std::string> categories_;
+  uint64_t name_iid_{};
+  std::string name_{};
+  TrackEvent_Type type_{};
+  uint64_t track_uuid_{};
+  int64_t counter_value_{};
+  double double_counter_value_{};
+  std::vector<uint64_t> extra_counter_track_uuids_;
+  std::vector<int64_t> extra_counter_values_;
+  std::vector<uint64_t> extra_double_counter_track_uuids_;
+  std::vector<double> extra_double_counter_values_;
+  std::vector<uint64_t> flow_ids_;
+  std::vector<uint64_t> terminating_flow_ids_;
+  std::vector<DebugAnnotation> debug_annotations_;
+  ::protozero::CopyablePtr<TaskExecution> task_execution_;
+  ::protozero::CopyablePtr<LogMessage> log_message_;
+  ::protozero::CopyablePtr<ChromeCompositorSchedulerState> cc_scheduler_state_;
+  ::protozero::CopyablePtr<ChromeUserEvent> chrome_user_event_;
+  ::protozero::CopyablePtr<ChromeKeyedService> chrome_keyed_service_;
+  ::protozero::CopyablePtr<ChromeLegacyIpc> chrome_legacy_ipc_;
+  ::protozero::CopyablePtr<ChromeHistogramSample> chrome_histogram_sample_;
+  ::protozero::CopyablePtr<ChromeLatencyInfo> chrome_latency_info_;
+  ::protozero::CopyablePtr<ChromeFrameReporter> chrome_frame_reporter_;
+  ::protozero::CopyablePtr<ChromeApplicationStateInfo> chrome_application_state_info_;
+  ::protozero::CopyablePtr<ChromeRendererSchedulerState> chrome_renderer_scheduler_state_;
+  ::protozero::CopyablePtr<ChromeWindowHandleEventInfo> chrome_window_handle_event_info_;
+  ::protozero::CopyablePtr<ChromeContentSettingsEventInfo> chrome_content_settings_event_info_;
+  ::protozero::CopyablePtr<SourceLocation> source_location_;
+  uint64_t source_location_iid_{};
+  ::protozero::CopyablePtr<ChromeMessagePump> chrome_message_pump_;
+  ::protozero::CopyablePtr<ChromeMojoEventInfo> chrome_mojo_event_info_;
+  int64_t timestamp_delta_us_{};
+  int64_t timestamp_absolute_us_{};
+  int64_t thread_time_delta_us_{};
+  int64_t thread_time_absolute_us_{};
+  int64_t thread_instruction_count_delta_{};
+  int64_t thread_instruction_count_absolute_{};
+  ::protozero::CopyablePtr<TrackEvent_LegacyEvent> legacy_event_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<47> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT TrackEvent_LegacyEvent : public ::protozero::CppMessageObj {
+ public:
+  using FlowDirection = TrackEvent_LegacyEvent_FlowDirection;
+  static constexpr auto FLOW_UNSPECIFIED = TrackEvent_LegacyEvent_FlowDirection_FLOW_UNSPECIFIED;
+  static constexpr auto FLOW_IN = TrackEvent_LegacyEvent_FlowDirection_FLOW_IN;
+  static constexpr auto FLOW_OUT = TrackEvent_LegacyEvent_FlowDirection_FLOW_OUT;
+  static constexpr auto FLOW_INOUT = TrackEvent_LegacyEvent_FlowDirection_FLOW_INOUT;
+  static constexpr auto FlowDirection_MIN = TrackEvent_LegacyEvent_FlowDirection_FLOW_UNSPECIFIED;
+  static constexpr auto FlowDirection_MAX = TrackEvent_LegacyEvent_FlowDirection_FLOW_INOUT;
+  using InstantEventScope = TrackEvent_LegacyEvent_InstantEventScope;
+  static constexpr auto SCOPE_UNSPECIFIED = TrackEvent_LegacyEvent_InstantEventScope_SCOPE_UNSPECIFIED;
+  static constexpr auto SCOPE_GLOBAL = TrackEvent_LegacyEvent_InstantEventScope_SCOPE_GLOBAL;
+  static constexpr auto SCOPE_PROCESS = TrackEvent_LegacyEvent_InstantEventScope_SCOPE_PROCESS;
+  static constexpr auto SCOPE_THREAD = TrackEvent_LegacyEvent_InstantEventScope_SCOPE_THREAD;
+  static constexpr auto InstantEventScope_MIN = TrackEvent_LegacyEvent_InstantEventScope_SCOPE_UNSPECIFIED;
+  static constexpr auto InstantEventScope_MAX = TrackEvent_LegacyEvent_InstantEventScope_SCOPE_THREAD;
+  enum FieldNumbers {
+    kNameIidFieldNumber = 1,
+    kPhaseFieldNumber = 2,
+    kDurationUsFieldNumber = 3,
+    kThreadDurationUsFieldNumber = 4,
+    kThreadInstructionDeltaFieldNumber = 15,
+    kUnscopedIdFieldNumber = 6,
+    kLocalIdFieldNumber = 10,
+    kGlobalIdFieldNumber = 11,
+    kIdScopeFieldNumber = 7,
+    kUseAsyncTtsFieldNumber = 9,
+    kBindIdFieldNumber = 8,
+    kBindToEnclosingFieldNumber = 12,
+    kFlowDirectionFieldNumber = 13,
+    kInstantEventScopeFieldNumber = 14,
+    kPidOverrideFieldNumber = 18,
+    kTidOverrideFieldNumber = 19,
+  };
+
+  TrackEvent_LegacyEvent();
+  ~TrackEvent_LegacyEvent() override;
+  TrackEvent_LegacyEvent(TrackEvent_LegacyEvent&&) noexcept;
+  TrackEvent_LegacyEvent& operator=(TrackEvent_LegacyEvent&&);
+  TrackEvent_LegacyEvent(const TrackEvent_LegacyEvent&);
+  TrackEvent_LegacyEvent& operator=(const TrackEvent_LegacyEvent&);
+  bool operator==(const TrackEvent_LegacyEvent&) const;
+  bool operator!=(const TrackEvent_LegacyEvent& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_name_iid() const { return _has_field_[1]; }
+  uint64_t name_iid() const { return name_iid_; }
+  void set_name_iid(uint64_t value) { name_iid_ = value; _has_field_.set(1); }
+
+  bool has_phase() const { return _has_field_[2]; }
+  int32_t phase() const { return phase_; }
+  void set_phase(int32_t value) { phase_ = value; _has_field_.set(2); }
+
+  bool has_duration_us() const { return _has_field_[3]; }
+  int64_t duration_us() const { return duration_us_; }
+  void set_duration_us(int64_t value) { duration_us_ = value; _has_field_.set(3); }
+
+  bool has_thread_duration_us() const { return _has_field_[4]; }
+  int64_t thread_duration_us() const { return thread_duration_us_; }
+  void set_thread_duration_us(int64_t value) { thread_duration_us_ = value; _has_field_.set(4); }
+
+  bool has_thread_instruction_delta() const { return _has_field_[15]; }
+  int64_t thread_instruction_delta() const { return thread_instruction_delta_; }
+  void set_thread_instruction_delta(int64_t value) { thread_instruction_delta_ = value; _has_field_.set(15); }
+
+  bool has_unscoped_id() const { return _has_field_[6]; }
+  uint64_t unscoped_id() const { return unscoped_id_; }
+  void set_unscoped_id(uint64_t value) { unscoped_id_ = value; _has_field_.set(6); }
+
+  bool has_local_id() const { return _has_field_[10]; }
+  uint64_t local_id() const { return local_id_; }
+  void set_local_id(uint64_t value) { local_id_ = value; _has_field_.set(10); }
+
+  bool has_global_id() const { return _has_field_[11]; }
+  uint64_t global_id() const { return global_id_; }
+  void set_global_id(uint64_t value) { global_id_ = value; _has_field_.set(11); }
+
+  bool has_id_scope() const { return _has_field_[7]; }
+  const std::string& id_scope() const { return id_scope_; }
+  void set_id_scope(const std::string& value) { id_scope_ = value; _has_field_.set(7); }
+
+  bool has_use_async_tts() const { return _has_field_[9]; }
+  bool use_async_tts() const { return use_async_tts_; }
+  void set_use_async_tts(bool value) { use_async_tts_ = value; _has_field_.set(9); }
+
+  bool has_bind_id() const { return _has_field_[8]; }
+  uint64_t bind_id() const { return bind_id_; }
+  void set_bind_id(uint64_t value) { bind_id_ = value; _has_field_.set(8); }
+
+  bool has_bind_to_enclosing() const { return _has_field_[12]; }
+  bool bind_to_enclosing() const { return bind_to_enclosing_; }
+  void set_bind_to_enclosing(bool value) { bind_to_enclosing_ = value; _has_field_.set(12); }
+
+  bool has_flow_direction() const { return _has_field_[13]; }
+  TrackEvent_LegacyEvent_FlowDirection flow_direction() const { return flow_direction_; }
+  void set_flow_direction(TrackEvent_LegacyEvent_FlowDirection value) { flow_direction_ = value; _has_field_.set(13); }
+
+  bool has_instant_event_scope() const { return _has_field_[14]; }
+  TrackEvent_LegacyEvent_InstantEventScope instant_event_scope() const { return instant_event_scope_; }
+  void set_instant_event_scope(TrackEvent_LegacyEvent_InstantEventScope value) { instant_event_scope_ = value; _has_field_.set(14); }
+
+  bool has_pid_override() const { return _has_field_[18]; }
+  int32_t pid_override() const { return pid_override_; }
+  void set_pid_override(int32_t value) { pid_override_ = value; _has_field_.set(18); }
+
+  bool has_tid_override() const { return _has_field_[19]; }
+  int32_t tid_override() const { return tid_override_; }
+  void set_tid_override(int32_t value) { tid_override_ = value; _has_field_.set(19); }
+
+ private:
+  uint64_t name_iid_{};
+  int32_t phase_{};
+  int64_t duration_us_{};
+  int64_t thread_duration_us_{};
+  int64_t thread_instruction_delta_{};
+  uint64_t unscoped_id_{};
+  uint64_t local_id_{};
+  uint64_t global_id_{};
+  std::string id_scope_{};
+  bool use_async_tts_{};
+  uint64_t bind_id_{};
+  bool bind_to_enclosing_{};
+  TrackEvent_LegacyEvent_FlowDirection flow_direction_{};
+  TrackEvent_LegacyEvent_InstantEventScope instant_event_scope_{};
+  int32_t pid_override_{};
+  int32_t tid_override_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<20> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_TRACK_EVENT_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/track_event.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/source_location.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_window_handle_event_info.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_user_event.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_renderer_scheduler_state.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_mojo_event_info.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_message_pump.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_legacy_ipc.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_latency_info.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_keyed_service.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_histogram_sample.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_frame_reporter.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_content_settings_event_info.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_compositor_scheduler_state.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/chrome_application_state_info.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/task_execution.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/log_message.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/debug_annotation.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+EventName::EventName() = default;
+EventName::~EventName() = default;
+EventName::EventName(const EventName&) = default;
+EventName& EventName::operator=(const EventName&) = default;
+EventName::EventName(EventName&&) noexcept = default;
+EventName& EventName::operator=(EventName&&) = default;
+
+bool EventName::operator==(const EventName& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && iid_ == other.iid_
+   && name_ == other.name_;
+}
+
+bool EventName::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* iid */:
+        field.get(&iid_);
+        break;
+      case 2 /* name */:
+        field.get(&name_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string EventName::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> EventName::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void EventName::Serialize(::protozero::Message* msg) const {
+  // Field 1: iid
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, iid_);
+  }
+
+  // Field 2: name
+  if (_has_field_[2]) {
+    msg->AppendString(2, name_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+EventCategory::EventCategory() = default;
+EventCategory::~EventCategory() = default;
+EventCategory::EventCategory(const EventCategory&) = default;
+EventCategory& EventCategory::operator=(const EventCategory&) = default;
+EventCategory::EventCategory(EventCategory&&) noexcept = default;
+EventCategory& EventCategory::operator=(EventCategory&&) = default;
+
+bool EventCategory::operator==(const EventCategory& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && iid_ == other.iid_
+   && name_ == other.name_;
+}
+
+bool EventCategory::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* iid */:
+        field.get(&iid_);
+        break;
+      case 2 /* name */:
+        field.get(&name_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string EventCategory::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> EventCategory::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void EventCategory::Serialize(::protozero::Message* msg) const {
+  // Field 1: iid
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, iid_);
+  }
+
+  // Field 2: name
+  if (_has_field_[2]) {
+    msg->AppendString(2, name_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+TrackEventDefaults::TrackEventDefaults() = default;
+TrackEventDefaults::~TrackEventDefaults() = default;
+TrackEventDefaults::TrackEventDefaults(const TrackEventDefaults&) = default;
+TrackEventDefaults& TrackEventDefaults::operator=(const TrackEventDefaults&) = default;
+TrackEventDefaults::TrackEventDefaults(TrackEventDefaults&&) noexcept = default;
+TrackEventDefaults& TrackEventDefaults::operator=(TrackEventDefaults&&) = default;
+
+bool TrackEventDefaults::operator==(const TrackEventDefaults& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && track_uuid_ == other.track_uuid_
+   && extra_counter_track_uuids_ == other.extra_counter_track_uuids_
+   && extra_double_counter_track_uuids_ == other.extra_double_counter_track_uuids_;
+}
+
+bool TrackEventDefaults::ParseFromArray(const void* raw, size_t size) {
+  extra_counter_track_uuids_.clear();
+  extra_double_counter_track_uuids_.clear();
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 11 /* track_uuid */:
+        field.get(&track_uuid_);
+        break;
+      case 31 /* extra_counter_track_uuids */:
+        extra_counter_track_uuids_.emplace_back();
+        field.get(&extra_counter_track_uuids_.back());
+        break;
+      case 45 /* extra_double_counter_track_uuids */:
+        extra_double_counter_track_uuids_.emplace_back();
+        field.get(&extra_double_counter_track_uuids_.back());
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string TrackEventDefaults::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> TrackEventDefaults::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void TrackEventDefaults::Serialize(::protozero::Message* msg) const {
+  // Field 11: track_uuid
+  if (_has_field_[11]) {
+    msg->AppendVarInt(11, track_uuid_);
+  }
+
+  // Field 31: extra_counter_track_uuids
+  for (auto& it : extra_counter_track_uuids_) {
+    msg->AppendVarInt(31, it);
+  }
+
+  // Field 45: extra_double_counter_track_uuids
+  for (auto& it : extra_double_counter_track_uuids_) {
+    msg->AppendVarInt(45, it);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+TrackEvent::TrackEvent() = default;
+TrackEvent::~TrackEvent() = default;
+TrackEvent::TrackEvent(const TrackEvent&) = default;
+TrackEvent& TrackEvent::operator=(const TrackEvent&) = default;
+TrackEvent::TrackEvent(TrackEvent&&) noexcept = default;
+TrackEvent& TrackEvent::operator=(TrackEvent&&) = default;
+
+bool TrackEvent::operator==(const TrackEvent& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && category_iids_ == other.category_iids_
+   && categories_ == other.categories_
+   && name_iid_ == other.name_iid_
+   && name_ == other.name_
+   && type_ == other.type_
+   && track_uuid_ == other.track_uuid_
+   && counter_value_ == other.counter_value_
+   && double_counter_value_ == other.double_counter_value_
+   && extra_counter_track_uuids_ == other.extra_counter_track_uuids_
+   && extra_counter_values_ == other.extra_counter_values_
+   && extra_double_counter_track_uuids_ == other.extra_double_counter_track_uuids_
+   && extra_double_counter_values_ == other.extra_double_counter_values_
+   && flow_ids_ == other.flow_ids_
+   && terminating_flow_ids_ == other.terminating_flow_ids_
+   && debug_annotations_ == other.debug_annotations_
+   && task_execution_ == other.task_execution_
+   && log_message_ == other.log_message_
+   && cc_scheduler_state_ == other.cc_scheduler_state_
+   && chrome_user_event_ == other.chrome_user_event_
+   && chrome_keyed_service_ == other.chrome_keyed_service_
+   && chrome_legacy_ipc_ == other.chrome_legacy_ipc_
+   && chrome_histogram_sample_ == other.chrome_histogram_sample_
+   && chrome_latency_info_ == other.chrome_latency_info_
+   && chrome_frame_reporter_ == other.chrome_frame_reporter_
+   && chrome_application_state_info_ == other.chrome_application_state_info_
+   && chrome_renderer_scheduler_state_ == other.chrome_renderer_scheduler_state_
+   && chrome_window_handle_event_info_ == other.chrome_window_handle_event_info_
+   && chrome_content_settings_event_info_ == other.chrome_content_settings_event_info_
+   && source_location_ == other.source_location_
+   && source_location_iid_ == other.source_location_iid_
+   && chrome_message_pump_ == other.chrome_message_pump_
+   && chrome_mojo_event_info_ == other.chrome_mojo_event_info_
+   && timestamp_delta_us_ == other.timestamp_delta_us_
+   && timestamp_absolute_us_ == other.timestamp_absolute_us_
+   && thread_time_delta_us_ == other.thread_time_delta_us_
+   && thread_time_absolute_us_ == other.thread_time_absolute_us_
+   && thread_instruction_count_delta_ == other.thread_instruction_count_delta_
+   && thread_instruction_count_absolute_ == other.thread_instruction_count_absolute_
+   && legacy_event_ == other.legacy_event_;
+}
+
+int TrackEvent::debug_annotations_size() const { return static_cast<int>(debug_annotations_.size()); }
+void TrackEvent::clear_debug_annotations() { debug_annotations_.clear(); }
+DebugAnnotation* TrackEvent::add_debug_annotations() { debug_annotations_.emplace_back(); return &debug_annotations_.back(); }
+bool TrackEvent::ParseFromArray(const void* raw, size_t size) {
+  category_iids_.clear();
+  categories_.clear();
+  extra_counter_track_uuids_.clear();
+  extra_counter_values_.clear();
+  extra_double_counter_track_uuids_.clear();
+  extra_double_counter_values_.clear();
+  flow_ids_.clear();
+  terminating_flow_ids_.clear();
+  debug_annotations_.clear();
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 3 /* category_iids */:
+        category_iids_.emplace_back();
+        field.get(&category_iids_.back());
+        break;
+      case 22 /* categories */:
+        categories_.emplace_back();
+        field.get(&categories_.back());
+        break;
+      case 10 /* name_iid */:
+        field.get(&name_iid_);
+        break;
+      case 23 /* name */:
+        field.get(&name_);
+        break;
+      case 9 /* type */:
+        field.get(&type_);
+        break;
+      case 11 /* track_uuid */:
+        field.get(&track_uuid_);
+        break;
+      case 30 /* counter_value */:
+        field.get(&counter_value_);
+        break;
+      case 44 /* double_counter_value */:
+        field.get(&double_counter_value_);
+        break;
+      case 31 /* extra_counter_track_uuids */:
+        extra_counter_track_uuids_.emplace_back();
+        field.get(&extra_counter_track_uuids_.back());
+        break;
+      case 12 /* extra_counter_values */:
+        extra_counter_values_.emplace_back();
+        field.get(&extra_counter_values_.back());
+        break;
+      case 45 /* extra_double_counter_track_uuids */:
+        extra_double_counter_track_uuids_.emplace_back();
+        field.get(&extra_double_counter_track_uuids_.back());
+        break;
+      case 46 /* extra_double_counter_values */:
+        extra_double_counter_values_.emplace_back();
+        field.get(&extra_double_counter_values_.back());
+        break;
+      case 36 /* flow_ids */:
+        flow_ids_.emplace_back();
+        field.get(&flow_ids_.back());
+        break;
+      case 42 /* terminating_flow_ids */:
+        terminating_flow_ids_.emplace_back();
+        field.get(&terminating_flow_ids_.back());
+        break;
+      case 4 /* debug_annotations */:
+        debug_annotations_.emplace_back();
+        debug_annotations_.back().ParseFromArray(field.data(), field.size());
+        break;
+      case 5 /* task_execution */:
+        (*task_execution_).ParseFromArray(field.data(), field.size());
+        break;
+      case 21 /* log_message */:
+        (*log_message_).ParseFromArray(field.data(), field.size());
+        break;
+      case 24 /* cc_scheduler_state */:
+        (*cc_scheduler_state_).ParseFromArray(field.data(), field.size());
+        break;
+      case 25 /* chrome_user_event */:
+        (*chrome_user_event_).ParseFromArray(field.data(), field.size());
+        break;
+      case 26 /* chrome_keyed_service */:
+        (*chrome_keyed_service_).ParseFromArray(field.data(), field.size());
+        break;
+      case 27 /* chrome_legacy_ipc */:
+        (*chrome_legacy_ipc_).ParseFromArray(field.data(), field.size());
+        break;
+      case 28 /* chrome_histogram_sample */:
+        (*chrome_histogram_sample_).ParseFromArray(field.data(), field.size());
+        break;
+      case 29 /* chrome_latency_info */:
+        (*chrome_latency_info_).ParseFromArray(field.data(), field.size());
+        break;
+      case 32 /* chrome_frame_reporter */:
+        (*chrome_frame_reporter_).ParseFromArray(field.data(), field.size());
+        break;
+      case 39 /* chrome_application_state_info */:
+        (*chrome_application_state_info_).ParseFromArray(field.data(), field.size());
+        break;
+      case 40 /* chrome_renderer_scheduler_state */:
+        (*chrome_renderer_scheduler_state_).ParseFromArray(field.data(), field.size());
+        break;
+      case 41 /* chrome_window_handle_event_info */:
+        (*chrome_window_handle_event_info_).ParseFromArray(field.data(), field.size());
+        break;
+      case 43 /* chrome_content_settings_event_info */:
+        (*chrome_content_settings_event_info_).ParseFromArray(field.data(), field.size());
+        break;
+      case 33 /* source_location */:
+        (*source_location_).ParseFromArray(field.data(), field.size());
+        break;
+      case 34 /* source_location_iid */:
+        field.get(&source_location_iid_);
+        break;
+      case 35 /* chrome_message_pump */:
+        (*chrome_message_pump_).ParseFromArray(field.data(), field.size());
+        break;
+      case 38 /* chrome_mojo_event_info */:
+        (*chrome_mojo_event_info_).ParseFromArray(field.data(), field.size());
+        break;
+      case 1 /* timestamp_delta_us */:
+        field.get(&timestamp_delta_us_);
+        break;
+      case 16 /* timestamp_absolute_us */:
+        field.get(&timestamp_absolute_us_);
+        break;
+      case 2 /* thread_time_delta_us */:
+        field.get(&thread_time_delta_us_);
+        break;
+      case 17 /* thread_time_absolute_us */:
+        field.get(&thread_time_absolute_us_);
+        break;
+      case 8 /* thread_instruction_count_delta */:
+        field.get(&thread_instruction_count_delta_);
+        break;
+      case 20 /* thread_instruction_count_absolute */:
+        field.get(&thread_instruction_count_absolute_);
+        break;
+      case 6 /* legacy_event */:
+        (*legacy_event_).ParseFromArray(field.data(), field.size());
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string TrackEvent::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> TrackEvent::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void TrackEvent::Serialize(::protozero::Message* msg) const {
+  // Field 3: category_iids
+  for (auto& it : category_iids_) {
+    msg->AppendVarInt(3, it);
+  }
+
+  // Field 22: categories
+  for (auto& it : categories_) {
+    msg->AppendString(22, it);
+  }
+
+  // Field 10: name_iid
+  if (_has_field_[10]) {
+    msg->AppendVarInt(10, name_iid_);
+  }
+
+  // Field 23: name
+  if (_has_field_[23]) {
+    msg->AppendString(23, name_);
+  }
+
+  // Field 9: type
+  if (_has_field_[9]) {
+    msg->AppendVarInt(9, type_);
+  }
+
+  // Field 11: track_uuid
+  if (_has_field_[11]) {
+    msg->AppendVarInt(11, track_uuid_);
+  }
+
+  // Field 30: counter_value
+  if (_has_field_[30]) {
+    msg->AppendVarInt(30, counter_value_);
+  }
+
+  // Field 44: double_counter_value
+  if (_has_field_[44]) {
+    msg->AppendFixed(44, double_counter_value_);
+  }
+
+  // Field 31: extra_counter_track_uuids
+  for (auto& it : extra_counter_track_uuids_) {
+    msg->AppendVarInt(31, it);
+  }
+
+  // Field 12: extra_counter_values
+  for (auto& it : extra_counter_values_) {
+    msg->AppendVarInt(12, it);
+  }
+
+  // Field 45: extra_double_counter_track_uuids
+  for (auto& it : extra_double_counter_track_uuids_) {
+    msg->AppendVarInt(45, it);
+  }
+
+  // Field 46: extra_double_counter_values
+  for (auto& it : extra_double_counter_values_) {
+    msg->AppendFixed(46, it);
+  }
+
+  // Field 36: flow_ids
+  for (auto& it : flow_ids_) {
+    msg->AppendVarInt(36, it);
+  }
+
+  // Field 42: terminating_flow_ids
+  for (auto& it : terminating_flow_ids_) {
+    msg->AppendVarInt(42, it);
+  }
+
+  // Field 4: debug_annotations
+  for (auto& it : debug_annotations_) {
+    it.Serialize(msg->BeginNestedMessage<::protozero::Message>(4));
+  }
+
+  // Field 5: task_execution
+  if (_has_field_[5]) {
+    (*task_execution_).Serialize(msg->BeginNestedMessage<::protozero::Message>(5));
+  }
+
+  // Field 21: log_message
+  if (_has_field_[21]) {
+    (*log_message_).Serialize(msg->BeginNestedMessage<::protozero::Message>(21));
+  }
+
+  // Field 24: cc_scheduler_state
+  if (_has_field_[24]) {
+    (*cc_scheduler_state_).Serialize(msg->BeginNestedMessage<::protozero::Message>(24));
+  }
+
+  // Field 25: chrome_user_event
+  if (_has_field_[25]) {
+    (*chrome_user_event_).Serialize(msg->BeginNestedMessage<::protozero::Message>(25));
+  }
+
+  // Field 26: chrome_keyed_service
+  if (_has_field_[26]) {
+    (*chrome_keyed_service_).Serialize(msg->BeginNestedMessage<::protozero::Message>(26));
+  }
+
+  // Field 27: chrome_legacy_ipc
+  if (_has_field_[27]) {
+    (*chrome_legacy_ipc_).Serialize(msg->BeginNestedMessage<::protozero::Message>(27));
+  }
+
+  // Field 28: chrome_histogram_sample
+  if (_has_field_[28]) {
+    (*chrome_histogram_sample_).Serialize(msg->BeginNestedMessage<::protozero::Message>(28));
+  }
+
+  // Field 29: chrome_latency_info
+  if (_has_field_[29]) {
+    (*chrome_latency_info_).Serialize(msg->BeginNestedMessage<::protozero::Message>(29));
+  }
+
+  // Field 32: chrome_frame_reporter
+  if (_has_field_[32]) {
+    (*chrome_frame_reporter_).Serialize(msg->BeginNestedMessage<::protozero::Message>(32));
+  }
+
+  // Field 39: chrome_application_state_info
+  if (_has_field_[39]) {
+    (*chrome_application_state_info_).Serialize(msg->BeginNestedMessage<::protozero::Message>(39));
+  }
+
+  // Field 40: chrome_renderer_scheduler_state
+  if (_has_field_[40]) {
+    (*chrome_renderer_scheduler_state_).Serialize(msg->BeginNestedMessage<::protozero::Message>(40));
+  }
+
+  // Field 41: chrome_window_handle_event_info
+  if (_has_field_[41]) {
+    (*chrome_window_handle_event_info_).Serialize(msg->BeginNestedMessage<::protozero::Message>(41));
+  }
+
+  // Field 43: chrome_content_settings_event_info
+  if (_has_field_[43]) {
+    (*chrome_content_settings_event_info_).Serialize(msg->BeginNestedMessage<::protozero::Message>(43));
+  }
+
+  // Field 33: source_location
+  if (_has_field_[33]) {
+    (*source_location_).Serialize(msg->BeginNestedMessage<::protozero::Message>(33));
+  }
+
+  // Field 34: source_location_iid
+  if (_has_field_[34]) {
+    msg->AppendVarInt(34, source_location_iid_);
+  }
+
+  // Field 35: chrome_message_pump
+  if (_has_field_[35]) {
+    (*chrome_message_pump_).Serialize(msg->BeginNestedMessage<::protozero::Message>(35));
+  }
+
+  // Field 38: chrome_mojo_event_info
+  if (_has_field_[38]) {
+    (*chrome_mojo_event_info_).Serialize(msg->BeginNestedMessage<::protozero::Message>(38));
+  }
+
+  // Field 1: timestamp_delta_us
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, timestamp_delta_us_);
+  }
+
+  // Field 16: timestamp_absolute_us
+  if (_has_field_[16]) {
+    msg->AppendVarInt(16, timestamp_absolute_us_);
+  }
+
+  // Field 2: thread_time_delta_us
+  if (_has_field_[2]) {
+    msg->AppendVarInt(2, thread_time_delta_us_);
+  }
+
+  // Field 17: thread_time_absolute_us
+  if (_has_field_[17]) {
+    msg->AppendVarInt(17, thread_time_absolute_us_);
+  }
+
+  // Field 8: thread_instruction_count_delta
+  if (_has_field_[8]) {
+    msg->AppendVarInt(8, thread_instruction_count_delta_);
+  }
+
+  // Field 20: thread_instruction_count_absolute
+  if (_has_field_[20]) {
+    msg->AppendVarInt(20, thread_instruction_count_absolute_);
+  }
+
+  // Field 6: legacy_event
+  if (_has_field_[6]) {
+    (*legacy_event_).Serialize(msg->BeginNestedMessage<::protozero::Message>(6));
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+TrackEvent_LegacyEvent::TrackEvent_LegacyEvent() = default;
+TrackEvent_LegacyEvent::~TrackEvent_LegacyEvent() = default;
+TrackEvent_LegacyEvent::TrackEvent_LegacyEvent(const TrackEvent_LegacyEvent&) = default;
+TrackEvent_LegacyEvent& TrackEvent_LegacyEvent::operator=(const TrackEvent_LegacyEvent&) = default;
+TrackEvent_LegacyEvent::TrackEvent_LegacyEvent(TrackEvent_LegacyEvent&&) noexcept = default;
+TrackEvent_LegacyEvent& TrackEvent_LegacyEvent::operator=(TrackEvent_LegacyEvent&&) = default;
+
+bool TrackEvent_LegacyEvent::operator==(const TrackEvent_LegacyEvent& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && name_iid_ == other.name_iid_
+   && phase_ == other.phase_
+   && duration_us_ == other.duration_us_
+   && thread_duration_us_ == other.thread_duration_us_
+   && thread_instruction_delta_ == other.thread_instruction_delta_
+   && unscoped_id_ == other.unscoped_id_
+   && local_id_ == other.local_id_
+   && global_id_ == other.global_id_
+   && id_scope_ == other.id_scope_
+   && use_async_tts_ == other.use_async_tts_
+   && bind_id_ == other.bind_id_
+   && bind_to_enclosing_ == other.bind_to_enclosing_
+   && flow_direction_ == other.flow_direction_
+   && instant_event_scope_ == other.instant_event_scope_
+   && pid_override_ == other.pid_override_
+   && tid_override_ == other.tid_override_;
+}
+
+bool TrackEvent_LegacyEvent::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* name_iid */:
+        field.get(&name_iid_);
+        break;
+      case 2 /* phase */:
+        field.get(&phase_);
+        break;
+      case 3 /* duration_us */:
+        field.get(&duration_us_);
+        break;
+      case 4 /* thread_duration_us */:
+        field.get(&thread_duration_us_);
+        break;
+      case 15 /* thread_instruction_delta */:
+        field.get(&thread_instruction_delta_);
+        break;
+      case 6 /* unscoped_id */:
+        field.get(&unscoped_id_);
+        break;
+      case 10 /* local_id */:
+        field.get(&local_id_);
+        break;
+      case 11 /* global_id */:
+        field.get(&global_id_);
+        break;
+      case 7 /* id_scope */:
+        field.get(&id_scope_);
+        break;
+      case 9 /* use_async_tts */:
+        field.get(&use_async_tts_);
+        break;
+      case 8 /* bind_id */:
+        field.get(&bind_id_);
+        break;
+      case 12 /* bind_to_enclosing */:
+        field.get(&bind_to_enclosing_);
+        break;
+      case 13 /* flow_direction */:
+        field.get(&flow_direction_);
+        break;
+      case 14 /* instant_event_scope */:
+        field.get(&instant_event_scope_);
+        break;
+      case 18 /* pid_override */:
+        field.get(&pid_override_);
+        break;
+      case 19 /* tid_override */:
+        field.get(&tid_override_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string TrackEvent_LegacyEvent::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> TrackEvent_LegacyEvent::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void TrackEvent_LegacyEvent::Serialize(::protozero::Message* msg) const {
+  // Field 1: name_iid
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, name_iid_);
+  }
+
+  // Field 2: phase
+  if (_has_field_[2]) {
+    msg->AppendVarInt(2, phase_);
+  }
+
+  // Field 3: duration_us
+  if (_has_field_[3]) {
+    msg->AppendVarInt(3, duration_us_);
+  }
+
+  // Field 4: thread_duration_us
+  if (_has_field_[4]) {
+    msg->AppendVarInt(4, thread_duration_us_);
+  }
+
+  // Field 15: thread_instruction_delta
+  if (_has_field_[15]) {
+    msg->AppendVarInt(15, thread_instruction_delta_);
+  }
+
+  // Field 6: unscoped_id
+  if (_has_field_[6]) {
+    msg->AppendVarInt(6, unscoped_id_);
+  }
+
+  // Field 10: local_id
+  if (_has_field_[10]) {
+    msg->AppendVarInt(10, local_id_);
+  }
+
+  // Field 11: global_id
+  if (_has_field_[11]) {
+    msg->AppendVarInt(11, global_id_);
+  }
+
+  // Field 7: id_scope
+  if (_has_field_[7]) {
+    msg->AppendString(7, id_scope_);
+  }
+
+  // Field 9: use_async_tts
+  if (_has_field_[9]) {
+    msg->AppendTinyVarInt(9, use_async_tts_);
+  }
+
+  // Field 8: bind_id
+  if (_has_field_[8]) {
+    msg->AppendVarInt(8, bind_id_);
+  }
+
+  // Field 12: bind_to_enclosing
+  if (_has_field_[12]) {
+    msg->AppendTinyVarInt(12, bind_to_enclosing_);
+  }
+
+  // Field 13: flow_direction
+  if (_has_field_[13]) {
+    msg->AppendVarInt(13, flow_direction_);
+  }
+
+  // Field 14: instant_event_scope
+  if (_has_field_[14]) {
+    msg->AppendVarInt(14, instant_event_scope_);
+  }
+
+  // Field 18: pid_override
+  if (_has_field_[18]) {
+    msg->AppendVarInt(18, pid_override_);
+  }
+
+  // Field 19: tid_override
+  if (_has_field_[19]) {
+    msg->AppendVarInt(19, tid_override_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/config/android/android_log_config.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/config/android/android_polled_state_config.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/config/android/packages_list_config.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/config/ftrace/ftrace_config.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/config/gpu/gpu_counter_config.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/config/gpu/vulkan_memory_config.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/config/inode_file/inode_file_config.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/config/interceptors/console_config.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/config/power/android_power_config.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/config/process_stats/process_stats_config.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/config/profiling/heapprofd_config.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/config/profiling/java_hprof_config.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/config/profiling/perf_event_config.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/config/sys_stats/sys_stats_config.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/config/track_event/track_event_config.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/config/chrome/chrome_config.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/config/data_source_config.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/config/interceptor_config.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/config/stress_test_config.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/config/test_config.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/config/trace_config.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/clock_snapshot.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/trigger.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/system_info.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/android/android_log.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/android/frame_timeline_event.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/android/gpu_mem_event.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/android/graphics_frame_event.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/android/initial_display_state.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/android/packages_list.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/chrome/chrome_benchmark_metadata.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/chrome/chrome_metadata.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/chrome/chrome_trace_event.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/filesystem/inode_file_map.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/ftrace_event.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/ftrace_event_bundle.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/ftrace_stats.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/test_bundle_wrapper.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/generic.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/binder.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/block.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/cgroup.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/clk.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/compaction.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/cpuhp.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/dmabuf_heap.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/dpu.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/ext4.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/f2fs.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/fastrpc.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/fence.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/filemap.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/ftrace.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/g2d.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/gpu_mem.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/i2c.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/ion.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/ipi.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/irq.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/kmem.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/lowmemorykiller.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/mali.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/mdss.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/mm_event.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/oom.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/power.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/raw_syscalls.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/regulator.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/sched.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/scm.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/sde.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/signal.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/sync.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/systrace.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/task.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/thermal.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/vmscan.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/ftrace/workqueue.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/perfetto/perfetto_metatrace.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/perfetto/tracing_service_event.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/power/android_energy_estimation_breakdown.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/power/battery_counters.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/power/power_rails.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/ps/process_stats.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/ps/process_tree.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/sys_stats/sys_stats.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/system_info/cpu_info.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/trace_packet_defaults.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/test_event.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/test_extensions.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/trace_packet.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/trace.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/extension_descriptor.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/memory_graph.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: gen/protos/perfetto/trace/ui_state.pbzero.cc
+// Intentionally empty (crbug.com/998165)
+// gen_amalgamated begin source: src/tracing/trace_writer_base.cc
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/tracing/trace_writer_base.h"
+
+namespace perfetto {
+
+// This destructor needs to be defined in a dedicated translation unit and
+// cannot be merged together with the other ones in virtual_destructors.cc.
+// This is because trace_writer_base.h/cc  is part of a separate target
+// (src/public:common) that is linked also by other part of the codebase.
+
+TraceWriterBase::~TraceWriterBase() = default;
+
+}  // namespace perfetto
+// gen_amalgamated begin source: src/tracing/core/id_allocator.cc
+// gen_amalgamated begin header: src/tracing/core/id_allocator.h
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SRC_TRACING_CORE_ID_ALLOCATOR_H_
+#define SRC_TRACING_CORE_ID_ALLOCATOR_H_
+
+#include <stdint.h>
+
+#include <type_traits>
+#include <vector>
+
+namespace perfetto {
+
+// Handles assigment of IDs (int types) from a fixed-size pool.
+// Zero is not considered a valid ID.
+// The base class takes always a uint32_t and the derived class casts and checks
+// bounds at compile time. This is to avoid bloating code with different
+// instances of the main class for each size.
+class IdAllocatorGeneric {
+ public:
+  // |max_id| is inclusive.
+  explicit IdAllocatorGeneric(uint32_t max_id);
+  ~IdAllocatorGeneric();
+
+  // Returns an ID in the range [1, max_id] or 0 if no more ids are available.
+  uint32_t AllocateGeneric();
+  void FreeGeneric(uint32_t);
+
+  bool IsEmpty() const;
+
+ private:
+  IdAllocatorGeneric(const IdAllocatorGeneric&) = delete;
+  IdAllocatorGeneric& operator=(const IdAllocatorGeneric&) = delete;
+
+  const uint32_t max_id_;
+  uint32_t last_id_ = 0;
+  std::vector<bool> ids_;
+};
+
+template <typename T = uint32_t>
+class IdAllocator : public IdAllocatorGeneric {
+ public:
+  explicit IdAllocator(T end) : IdAllocatorGeneric(end) {
+    static_assert(std::is_integral<T>::value && std::is_unsigned<T>::value,
+                  "T must be an unsigned integer");
+    static_assert(sizeof(T) <= sizeof(uint32_t), "T is too big");
+  }
+
+  T Allocate() { return static_cast<T>(AllocateGeneric()); }
+  void Free(T id) { FreeGeneric(id); }
+};
+
+}  // namespace perfetto
+
+#endif  // SRC_TRACING_CORE_ID_ALLOCATOR_H_
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "src/tracing/core/id_allocator.h"
+
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+
+namespace perfetto {
+
+IdAllocatorGeneric::IdAllocatorGeneric(uint32_t max_id) : max_id_(max_id) {
+  PERFETTO_DCHECK(max_id > 1);
+}
+
+IdAllocatorGeneric::~IdAllocatorGeneric() = default;
+
+uint32_t IdAllocatorGeneric::AllocateGeneric() {
+  for (uint32_t ignored = 1; ignored <= max_id_; ignored++) {
+    last_id_ = last_id_ < max_id_ ? last_id_ + 1 : 1;
+    const auto id = last_id_;
+
+    // 0 is never a valid ID. So if we are looking for |id| == N and there are
+    // N or less elements in the vector, they must necessarily be all < N.
+    // e.g. if |id| == 4 and size() == 4, the vector will contain IDs 0,1,2,3.
+    if (id >= ids_.size()) {
+      ids_.resize(id + 1);
+      ids_[id] = true;
+      return id;
+    }
+
+    if (!ids_[id]) {
+      ids_[id] = true;
+      return id;
+    }
+  }
+  return 0;
+}
+
+void IdAllocatorGeneric::FreeGeneric(uint32_t id) {
+  if (id == 0 || id >= ids_.size() || !ids_[id]) {
+    PERFETTO_DFATAL("Invalid id.");
+    return;
+  }
+  ids_[id] = false;
+}
+
+bool IdAllocatorGeneric::IsEmpty() const {
+  for (const auto id : ids_) {
+    if (id)
+      return false;
+  }
+  return true;
+}
+
+}  // namespace perfetto
+// gen_amalgamated begin source: src/tracing/core/null_trace_writer.cc
+// gen_amalgamated begin header: src/tracing/core/null_trace_writer.h
+// gen_amalgamated begin header: include/perfetto/ext/tracing/core/trace_writer.h
+// gen_amalgamated begin header: include/perfetto/ext/tracing/core/basic_types.h
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_TRACING_CORE_BASIC_TYPES_H_
+#define INCLUDE_PERFETTO_EXT_TRACING_CORE_BASIC_TYPES_H_
+
+// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
+
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/types.h>
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+using uid_t = unsigned int;
+#endif
+
+namespace perfetto {
+
+// Unique within the scope of the tracing service.
+using TracingSessionID = uint64_t;
+
+// Unique within the scope of the tracing service.
+using ProducerID = uint16_t;
+
+// Unique within the scope of the tracing service.
+using DataSourceInstanceID = uint64_t;
+
+// Unique within the scope of a Producer.
+using WriterID = uint16_t;
+
+// Unique within the scope of the tracing service.
+using FlushRequestID = uint64_t;
+
+// We need one FD per producer and we are not going to be able to keep > 64k FDs
+// open in the service.
+static constexpr ProducerID kMaxProducerID = static_cast<ProducerID>(-1);
+
+// 1024 Writers per producer seems a resonable bound. This reduces the ability
+// to memory-DoS the service by having to keep track of too many writer IDs.
+static constexpr WriterID kMaxWriterID = static_cast<WriterID>((1 << 10) - 1);
+
+// Unique within the scope of a {ProducerID, WriterID} tuple.
+using ChunkID = uint32_t;
+static constexpr ChunkID kMaxChunkID = static_cast<ChunkID>(-1);
+
+// Unique within the scope of the tracing service.
+using BufferID = uint16_t;
+
+// Target buffer ID for SharedMemoryArbiter. Values up to max uint16_t are
+// equivalent to a bound BufferID. Values above max uint16_t are reservation IDs
+// for the target buffer of a startup trace writer. Reservation IDs will be
+// translated to actual BufferIDs after they are bound by
+// SharedMemoryArbiter::BindStartupTargetBuffer().
+using MaybeUnboundBufferID = uint32_t;
+
+// Keep this in sync with SharedMemoryABI::PageHeader::target_buffer.
+static constexpr BufferID kMaxTraceBufferID = static_cast<BufferID>(-1);
+
+// Unique within the scope of a tracing session.
+using PacketSequenceID = uint32_t;
+// Used for extra packets emitted by the service, such as statistics.
+static constexpr PacketSequenceID kServicePacketSequenceID = 1;
+static constexpr PacketSequenceID kMaxPacketSequenceID =
+    static_cast<PacketSequenceID>(-1);
+
+constexpr uid_t kInvalidUid = static_cast<uid_t>(-1);
+
+constexpr uint32_t kDefaultFlushTimeoutMs = 5000;
+
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_TRACING_CORE_BASIC_TYPES_H_
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_TRACING_CORE_TRACE_WRITER_H_
+#define INCLUDE_PERFETTO_EXT_TRACING_CORE_TRACE_WRITER_H_
+
+#include <functional>
+
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/basic_types.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message_handle.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/trace_writer_base.h"
+
+namespace perfetto {
+
+namespace protos {
+namespace pbzero {
+class TracePacket;
+}  // namespace pbzero
+}  // namespace protos
+
+// This is a single-thread write interface that allows to write protobufs
+// directly into the tracing shared buffer without making any copies.
+// It takes care of acquiring and releasing chunks from the
+// SharedMemoryArbiter and splitting protos over chunks.
+// The idea is that each data source creates one (or more) TraceWriter for each
+// thread it wants to write from. Each TraceWriter will get its own dedicated
+// chunk and will write into the shared buffer without any locking most of the
+// time. Locking will happen only when a chunk is exhausted and a new one is
+// acquired from the arbiter.
+
+// TODO: TraceWriter needs to keep the shared memory buffer alive (refcount?).
+// Otherwise if the shared memory buffer goes away (e.g. the Service crashes)
+// the TraceWriter will keep writing into unmapped memory.
+
+class PERFETTO_EXPORT TraceWriter : public TraceWriterBase {
+ public:
+  using TracePacketHandle =
+      protozero::MessageHandle<protos::pbzero::TracePacket>;
+
+  TraceWriter();
+  ~TraceWriter() override;
+
+  // Returns a handle to the root proto message for the trace. The message will
+  // be finalized either by calling directly handle.Finalize() or by letting the
+  // handle go out of scope. The returned handle can be std::move()'d but cannot
+  // be used after either: (i) the TraceWriter instance is destroyed, (ii) a
+  // subsequence NewTracePacket() call is made on the same TraceWriter instance.
+  // The returned packet handle is always valid, but note that, when using
+  // BufferExhaustedPolicy::kDrop and the SMB is exhausted, it may be assigned
+  // a garbage chunk and any trace data written into it will be lost. For more
+  // details on buffer size choices: https://perfetto.dev/docs/concepts/buffers.
+  TracePacketHandle NewTracePacket() override = 0;
+
+  // Commits the data pending for the current chunk into the shared memory
+  // buffer and sends a CommitDataRequest() to the service. This can be called
+  // only if the handle returned by NewTracePacket() has been destroyed (i.e. we
+  // cannot Flush() while writing a TracePacket).
+  // Note: Flush() also happens implicitly when destroying the TraceWriter.
+  // |callback| is an optional callback. When non-null it will request the
+  // service to ACK the flush and will be invoked after the service has
+  // acknowledged it. The callback might be NEVER INVOKED if the service crashes
+  // or the IPC connection is dropped. The callback should be used only by tests
+  // and best-effort features (logging).
+  // TODO(primiano): right now the |callback| will be called on the IPC thread.
+  // This is fine in the current single-thread scenario, but long-term
+  // trace_writer_impl.cc should be smarter and post it on the right thread.
+  void Flush(std::function<void()> callback = {}) override = 0;
+
+  virtual WriterID writer_id() const = 0;
+
+  // Bytes written since creation. Is not reset when new chunks are acquired.
+  virtual uint64_t written() const override = 0;
+
+ private:
+  TraceWriter(const TraceWriter&) = delete;
+  TraceWriter& operator=(const TraceWriter&) = delete;
+};
+
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_TRACING_CORE_TRACE_WRITER_H_
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SRC_TRACING_CORE_NULL_TRACE_WRITER_H_
+#define SRC_TRACING_CORE_NULL_TRACE_WRITER_H_
+
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message_handle.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/root_message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_stream_null_delegate.h"
+
+namespace perfetto {
+
+// A specialization of TraceWriter which no-ops all the writes routing them
+// into a fixed region of memory
+// See //include/perfetto/tracing/core/trace_writer.h for docs.
+class NullTraceWriter : public TraceWriter {
+ public:
+  NullTraceWriter();
+  ~NullTraceWriter() override;
+
+  // TraceWriter implementation. See documentation in trace_writer.h.
+  // TracePacketHandle is defined in trace_writer.h
+  TracePacketHandle NewTracePacket() override;
+  void Flush(std::function<void()> callback = {}) override;
+  WriterID writer_id() const override;
+  uint64_t written() const override;
+
+ private:
+  NullTraceWriter(const NullTraceWriter&) = delete;
+  NullTraceWriter& operator=(const NullTraceWriter&) = delete;
+
+  protozero::ScatteredStreamWriterNullDelegate delegate_;
+  protozero::ScatteredStreamWriter stream_;
+
+  // The packet returned via NewTracePacket(). Its owned by this class,
+  // TracePacketHandle has just a pointer to it.
+  std::unique_ptr<protozero::RootMessage<protos::pbzero::TracePacket>>
+      cur_packet_;
+};
+
+}  // namespace perfetto
+
+#endif  // SRC_TRACING_CORE_NULL_TRACE_WRITER_H_
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "src/tracing/core/null_trace_writer.h"
+
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+
+// gen_amalgamated expanded: #include "protos/perfetto/trace/trace_packet.pbzero.h"
+
+namespace perfetto {
+
+NullTraceWriter::NullTraceWriter() : delegate_(4096), stream_(&delegate_) {
+  cur_packet_.reset(new protozero::RootMessage<protos::pbzero::TracePacket>());
+  cur_packet_->Finalize();  // To avoid the DCHECK in NewTracePacket().
+}
+
+NullTraceWriter::~NullTraceWriter() {}
+
+void NullTraceWriter::Flush(std::function<void()> callback) {
+  // Flush() cannot be called in the middle of a TracePacket.
+  PERFETTO_CHECK(cur_packet_->is_finalized());
+
+  if (callback)
+    callback();
+}
+
+NullTraceWriter::TracePacketHandle NullTraceWriter::NewTracePacket() {
+  // If we hit this, the caller is calling NewTracePacket() without having
+  // finalized the previous packet.
+  PERFETTO_DCHECK(cur_packet_->is_finalized());
+  cur_packet_->Reset(&stream_);
+  return TraceWriter::TracePacketHandle(cur_packet_.get());
+}
+
+WriterID NullTraceWriter::writer_id() const {
+  return 0;
+}
+
+uint64_t NullTraceWriter::written() const {
+  return 0;
+}
+
+}  // namespace perfetto
+// gen_amalgamated begin source: src/tracing/core/shared_memory_abi.cc
+// gen_amalgamated begin header: include/perfetto/ext/tracing/core/shared_memory_abi.h
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_TRACING_CORE_SHARED_MEMORY_ABI_H_
+#define INCLUDE_PERFETTO_EXT_TRACING_CORE_SHARED_MEMORY_ABI_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <array>
+#include <atomic>
+#include <bitset>
+#include <thread>
+#include <type_traits>
+#include <utility>
+
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+
+// This file defines the binary interface of the memory buffers shared between
+// Producer and Service. This is a long-term stable ABI and has to be backwards
+// compatible to deal with mismatching Producer and Service versions.
+//
+// Overview
+// --------
+// SMB := "Shared Memory Buffer".
+// In the most typical case of a multi-process architecture (i.e. Producer and
+// Service are hosted by different processes), a Producer means almost always
+// a "client process producing data" (almost: in some cases a process might host
+// > 1 Producer, if it links two libraries, independent of each other, that both
+// use Perfetto tracing).
+// The Service has one SMB for each Producer.
+// A producer has one or (typically) more data sources. They all share the same
+// SMB.
+// The SMB is a staging area to decouple data sources living in the Producer
+// and allow them to do non-blocking async writes.
+// The SMB is *not* the ultimate logging buffer seen by the Consumer. That one
+// is larger (~MBs) and not shared with Producers.
+// Each SMB is small, typically few KB. Its size is configurable by the producer
+// within a max limit of ~MB (see kMaxShmSize in tracing_service_impl.cc).
+// The SMB is partitioned into fixed-size Page(s). The size of the Pages are
+// determined by each Producer at connection time and cannot be changed.
+// Hence, different producers can have SMB(s) that have a different Page size
+// from each other, but the page size will be constant throughout all the
+// lifetime of the SMB.
+// Page(s) are partitioned by the Producer into variable size Chunk(s):
+//
+// +------------+      +--------------------------+
+// | Producer 1 |  <-> |      SMB 1 [~32K - 1MB]  |
+// +------------+      +--------+--------+--------+
+//                     |  Page  |  Page  |  Page  |
+//                     +--------+--------+--------+
+//                     | Chunk  |        | Chunk  |
+//                     +--------+  Chunk +--------+ <----+
+//                     | Chunk  |        | Chunk  |      |
+//                     +--------+--------+--------+      +---------------------+
+//                                                       |       Service       |
+// +------------+      +--------------------------+      +---------------------+
+// | Producer 2 |  <-> |      SMB 2 [~32K - 1MB]  |     /| large ring buffers  |
+// +------------+      +--------+--------+--------+ <--+ | (100K - several MB) |
+//                     |  Page  |  Page  |  Page  |      +---------------------+
+//                     +--------+--------+--------+
+//                     | Chunk  |        | Chunk  |
+//                     +--------+  Chunk +--------+
+//                     | Chunk  |        | Chunk  |
+//                     +--------+--------+--------+
+//
+// * Sizes of both SMB and ring buffers are purely indicative and decided at
+// configuration time by the Producer (for SMB sizes) and the Consumer (for the
+// final ring buffer size).
+
+// Page
+// ----
+// A page is a portion of the shared memory buffer and defines the granularity
+// of the interaction between the Producer and tracing Service. When scanning
+// the shared memory buffer to determine if something should be moved to the
+// central logging buffers, the Service most of the times looks at and moves
+// whole pages. Similarly, the Producer sends an IPC to invite the Service to
+// drain the shared memory buffer only when a whole page is filled.
+// Having fixed the total SMB size (hence the total memory overhead), the page
+// size is a triangular tradeoff between:
+// 1) IPC traffic: smaller pages -> more IPCs.
+// 2) Producer lock freedom: larger pages -> larger chunks -> data sources can
+//    write more data without needing to swap chunks and synchronize.
+// 3) Risk of write-starving the SMB: larger pages -> higher chance that the
+//    Service won't manage to drain them and the SMB remains full.
+// The page size, on the other side, has no implications on wasted memory due to
+// fragmentations (see Chunk below).
+// The size of the page is chosen by the Service at connection time and stays
+// fixed throughout all the lifetime of the Producer. Different producers (i.e.
+// ~ different client processes) can use different page sizes.
+// The page size must be an integer multiple of 4k (this is to allow VM page
+// stealing optimizations) and obviously has to be an integer divisor of the
+// total SMB size.
+
+// Chunk
+// -----
+// A chunk is a portion of a Page which is written and handled by a Producer.
+// A chunk contains a linear sequence of TracePacket(s) (the root proto).
+// A chunk cannot be written concurrently by two data sources. Protobufs must be
+// encoded as contiguous byte streams and cannot be interleaved. Therefore, on
+// the Producer side, a chunk is almost always owned exclusively by one thread
+// (% extremely peculiar slow-path cases).
+// Chunks are essentially single-writer single-thread lock-free arenas. Locking
+// happens only when a Chunk is full and a new one needs to be acquired.
+// Locking happens only within the scope of a Producer process. There is no
+// inter-process locking. The Producer cannot lock the Service and viceversa.
+// In the worst case, any of the two can starve the SMB, by marking all chunks
+// as either being read or written. But that has the only side effect of
+// losing the trace data.
+// The Producer can decide to partition each page into a number of limited
+// configurations (e.g., 1 page == 1 chunk, 1 page == 2 chunks and so on).
+
+// TracePacket
+// -----------
+// Is the atom of tracing. Putting aside pages and chunks a trace is merely a
+// sequence of TracePacket(s). TracePacket is the root protobuf message.
+// A TracePacket can span across several chunks (hence even across several
+// pages). A TracePacket can therefore be >> chunk size, >> page size and even
+// >> SMB size. The Chunk header carries metadata to deal with the TracePacket
+// splitting case.
+
+// Use only explicitly-sized types below. DO NOT use size_t or any architecture
+// dependent size (e.g. size_t) in the struct fields. This buffer will be read
+// and written by processes that have a different bitness in the same OS.
+// Instead it's fine to assume little-endianess. Big-endian is a dream we are
+// not currently pursuing.
+
+class SharedMemoryABI {
+ public:
+  static constexpr size_t kMinPageSize = 4 * 1024;
+
+  // This is due to Chunk::size being 16 bits.
+  static constexpr size_t kMaxPageSize = 64 * 1024;
+
+  // "14" is the max number that can be encoded in a 32 bit atomic word using
+  // 2 state bits per Chunk and leaving 4 bits for the page layout.
+  // See PageLayout below.
+  static constexpr size_t kMaxChunksPerPage = 14;
+
+  // Each TracePacket in the Chunk is prefixed by a 4 bytes redundant VarInt
+  // (see proto_utils.h) stating its size.
+  static constexpr size_t kPacketHeaderSize = 4;
+
+  // TraceWriter specifies this invalid packet/fragment size to signal to the
+  // service that a packet should be discarded, because the TraceWriter couldn't
+  // write its remaining fragments (e.g. because the SMB was exhausted).
+  static constexpr size_t kPacketSizeDropPacket =
+      protozero::proto_utils::kMaxMessageLength;
+
+  // Chunk states and transitions:
+  //    kChunkFree  <----------------+
+  //         |  (Producer)           |
+  //         V                       |
+  //  kChunkBeingWritten             |
+  //         |  (Producer)           |
+  //         V                       |
+  //  kChunkComplete                 |
+  //         |  (Service)            |
+  //         V                       |
+  //  kChunkBeingRead                |
+  //        |   (Service)            |
+  //        +------------------------+
+  enum ChunkState : uint32_t {
+    // The Chunk is free. The Service shall never touch it, the Producer can
+    // acquire it and transition it into kChunkBeingWritten.
+    kChunkFree = 0,
+
+    // The Chunk is being used by the Producer and is not complete yet.
+    // The Service shall never touch kChunkBeingWritten pages.
+    kChunkBeingWritten = 1,
+
+    // The Service is moving the page into its non-shared ring buffer. The
+    // Producer shall never touch kChunkBeingRead pages.
+    kChunkBeingRead = 2,
+
+    // The Producer is done writing the page and won't touch it again. The
+    // Service can now move it to its non-shared ring buffer.
+    // kAllChunksComplete relies on this being == 3.
+    kChunkComplete = 3,
+  };
+  static constexpr const char* kChunkStateStr[] = {"Free", "BeingWritten",
+                                                   "BeingRead", "Complete"};
+
+  enum PageLayout : uint32_t {
+    // The page is fully free and has not been partitioned yet.
+    kPageNotPartitioned = 0,
+
+    // TODO(primiano): Aligning a chunk @ 16 bytes could allow to use faster
+    // intrinsics based on quad-word moves. Do the math and check what is the
+    // fragmentation loss.
+
+    // align4(X) := the largest integer N s.t. (N % 4) == 0 && N <= X.
+    // 8 == sizeof(PageHeader).
+    kPageDiv1 = 1,   // Only one chunk of size: PAGE_SIZE - 8.
+    kPageDiv2 = 2,   // Two chunks of size: align4((PAGE_SIZE - 8) / 2).
+    kPageDiv4 = 3,   // Four chunks of size: align4((PAGE_SIZE - 8) / 4).
+    kPageDiv7 = 4,   // Seven chunks of size: align4((PAGE_SIZE - 8) / 7).
+    kPageDiv14 = 5,  // Fourteen chunks of size: align4((PAGE_SIZE - 8) / 14).
+
+    // The rationale for 7 and 14 above is to maximize the page usage for the
+    // likely case of |page_size| == 4096:
+    // (((4096 - 8) / 14) % 4) == 0, while (((4096 - 8) / 16 % 4)) == 3. So
+    // Div16 would waste 3 * 16 = 48 bytes per page for chunk alignment gaps.
+
+    kPageDivReserved1 = 6,
+    kPageDivReserved2 = 7,
+    kNumPageLayouts = 8,
+  };
+
+  // Keep this consistent with the PageLayout enum above.
+  static constexpr uint32_t kNumChunksForLayout[] = {0, 1, 2, 4, 7, 14, 0, 0};
+
+  // Layout of a Page.
+  // +===================================================+
+  // | Page header [8 bytes]                             |
+  // | Tells how many chunks there are, how big they are |
+  // | and their state (free, read, write, complete).    |
+  // +===================================================+
+  // +***************************************************+
+  // | Chunk #0 header [8 bytes]                         |
+  // | Tells how many packets there are and whether the  |
+  // | whether the 1st and last ones are fragmented.     |
+  // | Also has a chunk id to reassemble fragments.    |
+  // +***************************************************+
+  // +---------------------------------------------------+
+  // | Packet #0 size [varint, up to 4 bytes]            |
+  // + - - - - - - - - - - - - - - - - - - - - - - - - - +
+  // | Packet #0 payload                                 |
+  // | A TracePacket protobuf message                    |
+  // +---------------------------------------------------+
+  //                         ...
+  // + . . . . . . . . . . . . . . . . . . . . . . . . . +
+  // |      Optional padding to maintain aligment        |
+  // + . . . . . . . . . . . . . . . . . . . . . . . . . +
+  // +---------------------------------------------------+
+  // | Packet #N size [varint, up to 4 bytes]            |
+  // + - - - - - - - - - - - - - - - - - - - - - - - - - +
+  // | Packet #N payload                                 |
+  // | A TracePacket protobuf message                    |
+  // +---------------------------------------------------+
+  //                         ...
+  // +***************************************************+
+  // | Chunk #M header [8 bytes]                         |
+  //                         ...
+
+  // Alignment applies to start offset only. The Chunk size is *not* aligned.
+  static constexpr uint32_t kChunkAlignment = 4;
+  static constexpr uint32_t kChunkShift = 2;
+  static constexpr uint32_t kChunkMask = 0x3;
+  static constexpr uint32_t kLayoutMask = 0x70000000;
+  static constexpr uint32_t kLayoutShift = 28;
+  static constexpr uint32_t kAllChunksMask = 0x0FFFFFFF;
+
+  // This assumes that kChunkComplete == 3.
+  static constexpr uint32_t kAllChunksComplete = 0x0FFFFFFF;
+  static constexpr uint32_t kAllChunksFree = 0;
+  static constexpr size_t kInvalidPageIdx = static_cast<size_t>(-1);
+
+  // There is one page header per page, at the beginning of the page.
+  struct PageHeader {
+    // |layout| bits:
+    // [31] [30:28] [27:26] ... [1:0]
+    //  |      |       |     |    |
+    //  |      |       |     |    +---------- ChunkState[0]
+    //  |      |       |     +--------------- ChunkState[12..1]
+    //  |      |       +--------------------- ChunkState[13]
+    //  |      +----------------------------- PageLayout (0 == page fully free)
+    //  +------------------------------------ Reserved for future use
+    std::atomic<uint32_t> layout;
+
+    // If we'll ever going to use this in the future it might come handy
+    // reviving the kPageBeingPartitioned logic (look in git log, it was there
+    // at some point in the past).
+    uint32_t reserved;
+  };
+
+  // There is one Chunk header per chunk (hence PageLayout per page) at the
+  // beginning of each chunk.
+  struct ChunkHeader {
+    enum Flags : uint8_t {
+      // If set, the first TracePacket in the chunk is partial and continues
+      // from |chunk_id| - 1 (within the same |writer_id|).
+      kFirstPacketContinuesFromPrevChunk = 1 << 0,
+
+      // If set, the last TracePacket in the chunk is partial and continues on
+      // |chunk_id| + 1 (within the same |writer_id|).
+      kLastPacketContinuesOnNextChunk = 1 << 1,
+
+      // If set, the last (fragmented) TracePacket in the chunk has holes (even
+      // if the chunk is marked as kChunkComplete) that need to be patched
+      // out-of-band before the chunk can be read.
+      kChunkNeedsPatching = 1 << 2,
+    };
+
+    struct Packets {
+      // Number of valid TracePacket protobuf messages contained in the chunk.
+      // Each TracePacket is prefixed by its own size. This field is
+      // monotonically updated by the Producer with release store semantic when
+      // the packet at position |count| is started. This last packet may not be
+      // considered complete until |count| is incremented for the subsequent
+      // packet or the chunk is completed.
+      uint16_t count : 10;
+      static constexpr size_t kMaxCount = (1 << 10) - 1;
+
+      // See Flags above.
+      uint16_t flags : 6;
+    };
+
+    // A monotonic counter of the chunk within the scoped of a |writer_id|.
+    // The tuple (ProducerID, WriterID, ChunkID) allows to figure out if two
+    // chunks are contiguous (and hence a trace packets spanning across them can
+    // be glued) or we had some holes due to the ring buffer wrapping.
+    // This is set only when transitioning from kChunkFree to kChunkBeingWritten
+    // and remains unchanged throughout the remaining lifetime of the chunk.
+    std::atomic<uint32_t> chunk_id;
+
+    // ID of the writer, unique within the producer.
+    // Like |chunk_id|, this is set only when transitioning from kChunkFree to
+    // kChunkBeingWritten.
+    std::atomic<uint16_t> writer_id;
+
+    // There is no ProducerID here. The service figures that out from the IPC
+    // channel, which is unspoofable.
+
+    // Updated with release-store semantics.
+    std::atomic<Packets> packets;
+  };
+
+  class Chunk {
+   public:
+    Chunk();  // Constructs an invalid chunk.
+
+    // Chunk is move-only, to document the scope of the Acquire/Release
+    // TryLock operations below.
+    Chunk(const Chunk&) = delete;
+    Chunk operator=(const Chunk&) = delete;
+    Chunk(Chunk&&) noexcept;
+    Chunk& operator=(Chunk&&);
+
+    uint8_t* begin() const { return begin_; }
+    uint8_t* end() const { return begin_ + size_; }
+
+    // Size, including Chunk header.
+    size_t size() const { return size_; }
+
+    // Begin of the first packet (or packet fragment).
+    uint8_t* payload_begin() const { return begin_ + sizeof(ChunkHeader); }
+    size_t payload_size() const {
+      PERFETTO_DCHECK(size_ >= sizeof(ChunkHeader));
+      return size_ - sizeof(ChunkHeader);
+    }
+
+    bool is_valid() const { return begin_ && size_; }
+
+    // Index of the chunk within the page [0..13] (13 comes from kPageDiv14).
+    uint8_t chunk_idx() const { return chunk_idx_; }
+
+    ChunkHeader* header() { return reinterpret_cast<ChunkHeader*>(begin_); }
+
+    uint16_t writer_id() {
+      return header()->writer_id.load(std::memory_order_relaxed);
+    }
+
+    // Returns the count of packets and the flags with acquire-load semantics.
+    std::pair<uint16_t, uint8_t> GetPacketCountAndFlags() {
+      auto packets = header()->packets.load(std::memory_order_acquire);
+      const uint16_t packets_count = packets.count;
+      const uint8_t packets_flags = packets.flags;
+      return std::make_pair(packets_count, packets_flags);
+    }
+
+    // Increases |packets.count| with release semantics (note, however, that the
+    // packet count is incremented *before* starting writing a packet). Returns
+    // the new packet count. The increment is atomic but NOT race-free (i.e. no
+    // CAS). Only the Producer is supposed to perform this increment, and it's
+    // supposed to do that in a thread-safe way (holding a lock). A Chunk cannot
+    // be shared by multiple Producer threads without locking. The packet count
+    // is cleared by TryAcquireChunk(), when passing the new header for the
+    // chunk.
+    uint16_t IncrementPacketCount() {
+      ChunkHeader* chunk_header = header();
+      auto packets = chunk_header->packets.load(std::memory_order_relaxed);
+      packets.count++;
+      chunk_header->packets.store(packets, std::memory_order_release);
+      return packets.count;
+    }
+
+    // Increases |packets.count| to the given |packet_count|, but only if
+    // |packet_count| is larger than the current value of |packets.count|.
+    // Returns the new packet count. Same atomicity guarantees as
+    // IncrementPacketCount().
+    uint16_t IncreasePacketCountTo(uint16_t packet_count) {
+      ChunkHeader* chunk_header = header();
+      auto packets = chunk_header->packets.load(std::memory_order_relaxed);
+      if (packets.count < packet_count)
+        packets.count = packet_count;
+      chunk_header->packets.store(packets, std::memory_order_release);
+      return packets.count;
+    }
+
+    // Flags are cleared by TryAcquireChunk(), by passing the new header for
+    // the chunk, or through ClearNeedsPatchingFlag.
+    void SetFlag(ChunkHeader::Flags flag) {
+      ChunkHeader* chunk_header = header();
+      auto packets = chunk_header->packets.load(std::memory_order_relaxed);
+      packets.flags |= flag;
+      chunk_header->packets.store(packets, std::memory_order_release);
+    }
+
+    // This flag can only be cleared by the producer while it is still holding
+    // on to the chunk - i.e. while the chunk is still in state
+    // ChunkState::kChunkBeingWritten and hasn't been transitioned to
+    // ChunkState::kChunkComplete. This is ok, because the service is oblivious
+    // to the needs patching flag before the chunk is released as complete.
+    void ClearNeedsPatchingFlag() {
+      ChunkHeader* chunk_header = header();
+      auto packets = chunk_header->packets.load(std::memory_order_relaxed);
+      packets.flags &= ~ChunkHeader::kChunkNeedsPatching;
+      chunk_header->packets.store(packets, std::memory_order_release);
+    }
+
+   private:
+    friend class SharedMemoryABI;
+    Chunk(uint8_t* begin, uint16_t size, uint8_t chunk_idx);
+
+    // Don't add extra fields, keep the move operator fast.
+    uint8_t* begin_ = nullptr;
+    uint16_t size_ = 0;
+    uint8_t chunk_idx_ = 0;
+  };
+
+  // Construct an instance from an existing shared memory buffer.
+  SharedMemoryABI(uint8_t* start, size_t size, size_t page_size);
+  SharedMemoryABI();
+
+  void Initialize(uint8_t* start, size_t size, size_t page_size);
+
+  uint8_t* start() const { return start_; }
+  uint8_t* end() const { return start_ + size_; }
+  size_t size() const { return size_; }
+  size_t page_size() const { return page_size_; }
+  size_t num_pages() const { return num_pages_; }
+  bool is_valid() { return num_pages() > 0; }
+
+  uint8_t* page_start(size_t page_idx) {
+    PERFETTO_DCHECK(page_idx < num_pages_);
+    return start_ + page_size_ * page_idx;
+  }
+
+  PageHeader* page_header(size_t page_idx) {
+    return reinterpret_cast<PageHeader*>(page_start(page_idx));
+  }
+
+  // Returns true if the page is fully clear and has not been partitioned yet.
+  // The state of the page can change at any point after this returns (or even
+  // before). The Producer should use this only as a hint to decide out whether
+  // it should TryPartitionPage() or acquire an individual chunk.
+  bool is_page_free(size_t page_idx) {
+    return page_header(page_idx)->layout.load(std::memory_order_relaxed) == 0;
+  }
+
+  // Returns true if all chunks in the page are kChunkComplete. As above, this
+  // is advisory only. The Service is supposed to use this only to decide
+  // whether to TryAcquireAllChunksForReading() or not.
+  bool is_page_complete(size_t page_idx) {
+    auto layout = page_header(page_idx)->layout.load(std::memory_order_relaxed);
+    const uint32_t num_chunks = GetNumChunksForLayout(layout);
+    if (num_chunks == 0)
+      return false;  // Non partitioned pages cannot be complete.
+    return (layout & kAllChunksMask) ==
+           (kAllChunksComplete & ((1 << (num_chunks * kChunkShift)) - 1));
+  }
+
+  // For testing / debugging only.
+  std::string page_header_dbg(size_t page_idx) {
+    uint32_t x = page_header(page_idx)->layout.load(std::memory_order_relaxed);
+    return std::bitset<32>(x).to_string();
+  }
+
+  // Returns the page layout, which is a bitmap that specifies the chunking
+  // layout of the page and each chunk's current state. Reads with an
+  // acquire-load semantic to ensure a producer's writes corresponding to an
+  // update of the layout (e.g. clearing a chunk's header) are observed
+  // consistently.
+  uint32_t GetPageLayout(size_t page_idx) {
+    return page_header(page_idx)->layout.load(std::memory_order_acquire);
+  }
+
+  // Returns a bitmap in which each bit is set if the corresponding Chunk exists
+  // in the page (according to the page layout) and is free. If the page is not
+  // partitioned it returns 0 (as if the page had no free chunks).
+  uint32_t GetFreeChunks(size_t page_idx);
+
+  // Tries to atomically partition a page with the given |layout|. Returns true
+  // if the page was free and has been partitioned with the given |layout|,
+  // false if the page wasn't free anymore by the time we got there.
+  // If succeeds all the chunks are atomically set in the kChunkFree state.
+  bool TryPartitionPage(size_t page_idx, PageLayout layout);
+
+  // Tries to atomically mark a single chunk within the page as
+  // kChunkBeingWritten. Returns an invalid chunk if the page is not partitioned
+  // or the chunk is not in the kChunkFree state. If succeeds sets the chunk
+  // header to |header|.
+  Chunk TryAcquireChunkForWriting(size_t page_idx,
+                                  size_t chunk_idx,
+                                  const ChunkHeader* header) {
+    return TryAcquireChunk(page_idx, chunk_idx, kChunkBeingWritten, header);
+  }
+
+  // Similar to TryAcquireChunkForWriting. Fails if the chunk isn't in the
+  // kChunkComplete state.
+  Chunk TryAcquireChunkForReading(size_t page_idx, size_t chunk_idx) {
+    return TryAcquireChunk(page_idx, chunk_idx, kChunkBeingRead, nullptr);
+  }
+
+  // The caller must have successfully TryAcquireAllChunksForReading() or it
+  // needs to guarantee that the chunk is already in the kChunkBeingWritten
+  // state.
+  Chunk GetChunkUnchecked(size_t page_idx,
+                          uint32_t page_layout,
+                          size_t chunk_idx);
+
+  // Puts a chunk into the kChunkComplete state. Returns the page index.
+  size_t ReleaseChunkAsComplete(Chunk chunk) {
+    return ReleaseChunk(std::move(chunk), kChunkComplete);
+  }
+
+  // Puts a chunk into the kChunkFree state. Returns the page index.
+  size_t ReleaseChunkAsFree(Chunk chunk) {
+    return ReleaseChunk(std::move(chunk), kChunkFree);
+  }
+
+  ChunkState GetChunkState(size_t page_idx, size_t chunk_idx) {
+    PageHeader* phdr = page_header(page_idx);
+    uint32_t layout = phdr->layout.load(std::memory_order_relaxed);
+    return GetChunkStateFromLayout(layout, chunk_idx);
+  }
+
+  std::pair<size_t, size_t> GetPageAndChunkIndex(const Chunk& chunk);
+
+  uint16_t GetChunkSizeForLayout(uint32_t page_layout) const {
+    return chunk_sizes_[(page_layout & kLayoutMask) >> kLayoutShift];
+  }
+
+  static ChunkState GetChunkStateFromLayout(uint32_t page_layout,
+                                            size_t chunk_idx) {
+    return static_cast<ChunkState>((page_layout >> (chunk_idx * kChunkShift)) &
+                                   kChunkMask);
+  }
+
+  static constexpr uint32_t GetNumChunksForLayout(uint32_t page_layout) {
+    return kNumChunksForLayout[(page_layout & kLayoutMask) >> kLayoutShift];
+  }
+
+  // Returns a bitmap in which each bit is set if the corresponding Chunk exists
+  // in the page (according to the page layout) and is not free. If the page is
+  // not partitioned it returns 0 (as if the page had no used chunks). Bit N
+  // corresponds to Chunk N.
+  static uint32_t GetUsedChunks(uint32_t page_layout) {
+    const uint32_t num_chunks = GetNumChunksForLayout(page_layout);
+    uint32_t res = 0;
+    for (uint32_t i = 0; i < num_chunks; i++) {
+      res |= ((page_layout & kChunkMask) != kChunkFree) ? (1 << i) : 0;
+      page_layout >>= kChunkShift;
+    }
+    return res;
+  }
+
+ private:
+  SharedMemoryABI(const SharedMemoryABI&) = delete;
+  SharedMemoryABI& operator=(const SharedMemoryABI&) = delete;
+
+  Chunk TryAcquireChunk(size_t page_idx,
+                        size_t chunk_idx,
+                        ChunkState,
+                        const ChunkHeader*);
+  size_t ReleaseChunk(Chunk chunk, ChunkState);
+
+  uint8_t* start_ = nullptr;
+  size_t size_ = 0;
+  size_t page_size_ = 0;
+  size_t num_pages_ = 0;
+  std::array<uint16_t, kNumPageLayouts> chunk_sizes_;
+};
+
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_TRACING_CORE_SHARED_MEMORY_ABI_H_
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an "AS
+ * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language
+ * governing permissions and limitations under the License.
+ */
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory_abi.h"
+
+// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
+// gen_amalgamated expanded: #include "perfetto/base/time.h"
+
+#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+#include <sys/mman.h>
+#endif
+
+// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/basic_types.h"
+
+namespace perfetto {
+
+namespace {
+
+constexpr int kRetryAttempts = 64;
+
+inline void WaitBeforeNextAttempt(int attempt) {
+  if (attempt < kRetryAttempts / 2) {
+    std::this_thread::yield();
+  } else {
+    base::SleepMicroseconds((unsigned(attempt) / 10) * 1000);
+  }
+}
+
+// Returns the largest 4-bytes aligned chunk size <= |page_size| / |divider|
+// for each divider in PageLayout.
+constexpr size_t GetChunkSize(size_t page_size, size_t divider) {
+  return ((page_size - sizeof(SharedMemoryABI::PageHeader)) / divider) & ~3UL;
+}
+
+// Initializer for the const |chunk_sizes_| array.
+std::array<uint16_t, SharedMemoryABI::kNumPageLayouts> InitChunkSizes(
+    size_t page_size) {
+  static_assert(SharedMemoryABI::kNumPageLayouts ==
+                    base::ArraySize(SharedMemoryABI::kNumChunksForLayout),
+                "kNumPageLayouts out of date");
+  std::array<uint16_t, SharedMemoryABI::kNumPageLayouts> res = {};
+  for (size_t i = 0; i < SharedMemoryABI::kNumPageLayouts; i++) {
+    size_t num_chunks = SharedMemoryABI::kNumChunksForLayout[i];
+    size_t size = num_chunks == 0 ? 0 : GetChunkSize(page_size, num_chunks);
+    PERFETTO_CHECK(size <= std::numeric_limits<uint16_t>::max());
+    res[i] = static_cast<uint16_t>(size);
+  }
+  return res;
+}
+
+inline void ClearChunkHeader(SharedMemoryABI::ChunkHeader* header) {
+  header->writer_id.store(0u, std::memory_order_relaxed);
+  header->chunk_id.store(0u, std::memory_order_relaxed);
+  header->packets.store({}, std::memory_order_release);
+}
+
+}  // namespace
+
+// static
+constexpr uint32_t SharedMemoryABI::kNumChunksForLayout[];
+constexpr const char* SharedMemoryABI::kChunkStateStr[];
+constexpr const size_t SharedMemoryABI::kInvalidPageIdx;
+constexpr const size_t SharedMemoryABI::kMinPageSize;
+constexpr const size_t SharedMemoryABI::kMaxPageSize;
+constexpr const size_t SharedMemoryABI::kPacketSizeDropPacket;
+
+SharedMemoryABI::SharedMemoryABI() = default;
+
+SharedMemoryABI::SharedMemoryABI(uint8_t* start,
+                                 size_t size,
+                                 size_t page_size) {
+  Initialize(start, size, page_size);
+}
+
+void SharedMemoryABI::Initialize(uint8_t* start,
+                                 size_t size,
+                                 size_t page_size) {
+  start_ = start;
+  size_ = size;
+  page_size_ = page_size;
+  num_pages_ = size / page_size;
+  chunk_sizes_ = InitChunkSizes(page_size);
+  static_assert(sizeof(PageHeader) == 8, "PageHeader size");
+  static_assert(sizeof(ChunkHeader) == 8, "ChunkHeader size");
+  static_assert(sizeof(ChunkHeader::chunk_id) == sizeof(ChunkID),
+                "ChunkID size");
+
+  static_assert(sizeof(ChunkHeader::Packets) == 2, "ChunkHeader::Packets size");
+  static_assert(alignof(ChunkHeader) == kChunkAlignment,
+                "ChunkHeader alignment");
+
+  // In theory std::atomic does not guarantee that the underlying type
+  // consists only of the actual atomic word. Theoretically it could have
+  // locks or other state. In practice most implementations just implement
+  // them without extra state. The code below overlays the atomic into the
+  // SMB, hence relies on this implementation detail. This should be fine
+  // pragmatically (Chrome's base makes the same assumption), but let's have a
+  // check for this.
+  static_assert(sizeof(std::atomic<uint32_t>) == sizeof(uint32_t) &&
+                    sizeof(std::atomic<uint16_t>) == sizeof(uint16_t),
+                "Incompatible STL <atomic> implementation");
+
+  // Chec that the kAllChunks(Complete,Free) are consistent with the
+  // ChunkState enum values.
+
+  // These must be zero because rely on zero-initialized memory being
+  // interpreted as "free".
+  static_assert(kChunkFree == 0 && kAllChunksFree == 0,
+                "kChunkFree/kAllChunksFree and must be 0");
+
+  static_assert((kAllChunksComplete & kChunkMask) == kChunkComplete,
+                "kAllChunksComplete out of sync with kChunkComplete");
+
+  // Check the consistency of the kMax... constants.
+  static_assert(sizeof(ChunkHeader::writer_id) == sizeof(WriterID),
+                "WriterID size");
+  ChunkHeader chunk_header{};
+  chunk_header.chunk_id.store(static_cast<uint32_t>(-1));
+  PERFETTO_CHECK(chunk_header.chunk_id.load() == kMaxChunkID);
+
+  chunk_header.writer_id.store(static_cast<uint16_t>(-1));
+  PERFETTO_CHECK(kMaxWriterID <= chunk_header.writer_id.load());
+
+  PERFETTO_CHECK(page_size >= kMinPageSize);
+  PERFETTO_CHECK(page_size <= kMaxPageSize);
+  PERFETTO_CHECK(page_size % kMinPageSize == 0);
+  PERFETTO_CHECK(reinterpret_cast<uintptr_t>(start) % kMinPageSize == 0);
+  PERFETTO_CHECK(size % page_size == 0);
+}
+
+SharedMemoryABI::Chunk SharedMemoryABI::GetChunkUnchecked(size_t page_idx,
+                                                          uint32_t page_layout,
+                                                          size_t chunk_idx) {
+  const size_t num_chunks = GetNumChunksForLayout(page_layout);
+  PERFETTO_DCHECK(chunk_idx < num_chunks);
+  // Compute the chunk virtual address and write it into |chunk|.
+  const uint16_t chunk_size = GetChunkSizeForLayout(page_layout);
+  size_t chunk_offset_in_page = sizeof(PageHeader) + chunk_idx * chunk_size;
+
+  Chunk chunk(page_start(page_idx) + chunk_offset_in_page, chunk_size,
+              static_cast<uint8_t>(chunk_idx));
+  PERFETTO_DCHECK(chunk.end() <= end());
+  return chunk;
+}
+
+SharedMemoryABI::Chunk SharedMemoryABI::TryAcquireChunk(
+    size_t page_idx,
+    size_t chunk_idx,
+    ChunkState desired_chunk_state,
+    const ChunkHeader* header) {
+  PERFETTO_DCHECK(desired_chunk_state == kChunkBeingRead ||
+                  desired_chunk_state == kChunkBeingWritten);
+  PageHeader* phdr = page_header(page_idx);
+  for (int attempt = 0; attempt < kRetryAttempts; attempt++) {
+    uint32_t layout = phdr->layout.load(std::memory_order_acquire);
+    const size_t num_chunks = GetNumChunksForLayout(layout);
+
+    // The page layout has changed (or the page is free).
+    if (chunk_idx >= num_chunks)
+      return Chunk();
+
+    // Verify that the chunk is still in a state that allows the transition to
+    // |desired_chunk_state|. The only allowed transitions are:
+    // 1. kChunkFree -> kChunkBeingWritten (Producer).
+    // 2. kChunkComplete -> kChunkBeingRead (Service).
+    ChunkState expected_chunk_state =
+        desired_chunk_state == kChunkBeingWritten ? kChunkFree : kChunkComplete;
+    auto cur_chunk_state = (layout >> (chunk_idx * kChunkShift)) & kChunkMask;
+    if (cur_chunk_state != expected_chunk_state)
+      return Chunk();
+
+    uint32_t next_layout = layout;
+    next_layout &= ~(kChunkMask << (chunk_idx * kChunkShift));
+    next_layout |= (desired_chunk_state << (chunk_idx * kChunkShift));
+    if (phdr->layout.compare_exchange_strong(layout, next_layout,
+                                             std::memory_order_acq_rel)) {
+      // Compute the chunk virtual address and write it into |chunk|.
+      Chunk chunk = GetChunkUnchecked(page_idx, layout, chunk_idx);
+      if (desired_chunk_state == kChunkBeingWritten) {
+        PERFETTO_DCHECK(header);
+        ChunkHeader* new_header = chunk.header();
+        new_header->writer_id.store(header->writer_id,
+                                    std::memory_order_relaxed);
+        new_header->chunk_id.store(header->chunk_id, std::memory_order_relaxed);
+        new_header->packets.store(header->packets, std::memory_order_release);
+      }
+      return chunk;
+    }
+    WaitBeforeNextAttempt(attempt);
+  }
+  return Chunk();  // All our attempts failed.
+}
+
+bool SharedMemoryABI::TryPartitionPage(size_t page_idx, PageLayout layout) {
+  PERFETTO_DCHECK(layout >= kPageDiv1 && layout <= kPageDiv14);
+  uint32_t expected_layout = 0;  // Free page.
+  uint32_t next_layout = (layout << kLayoutShift) & kLayoutMask;
+  PageHeader* phdr = page_header(page_idx);
+  if (!phdr->layout.compare_exchange_strong(expected_layout, next_layout,
+                                            std::memory_order_acq_rel)) {
+    return false;
+  }
+  return true;
+}
+
+uint32_t SharedMemoryABI::GetFreeChunks(size_t page_idx) {
+  uint32_t layout =
+      page_header(page_idx)->layout.load(std::memory_order_relaxed);
+  const uint32_t num_chunks = GetNumChunksForLayout(layout);
+  uint32_t res = 0;
+  for (uint32_t i = 0; i < num_chunks; i++) {
+    res |= ((layout & kChunkMask) == kChunkFree) ? (1 << i) : 0;
+    layout >>= kChunkShift;
+  }
+  return res;
+}
+
+size_t SharedMemoryABI::ReleaseChunk(Chunk chunk,
+                                     ChunkState desired_chunk_state) {
+  PERFETTO_DCHECK(desired_chunk_state == kChunkComplete ||
+                  desired_chunk_state == kChunkFree);
+
+  size_t page_idx;
+  size_t chunk_idx;
+  std::tie(page_idx, chunk_idx) = GetPageAndChunkIndex(chunk);
+
+  // Reset header fields, so that the service can identify when the chunk's
+  // header has been initialized by the producer.
+  if (desired_chunk_state == kChunkFree)
+    ClearChunkHeader(chunk.header());
+
+  for (int attempt = 0; attempt < kRetryAttempts; attempt++) {
+    PageHeader* phdr = page_header(page_idx);
+    uint32_t layout = phdr->layout.load(std::memory_order_relaxed);
+    const size_t page_chunk_size = GetChunkSizeForLayout(layout);
+
+    // TODO(primiano): this should not be a CHECK, because a malicious producer
+    // could crash us by putting the chunk in an invalid state. This should
+    // gracefully fail. Keep a CHECK until then.
+    PERFETTO_CHECK(chunk.size() == page_chunk_size);
+    const uint32_t chunk_state =
+        ((layout >> (chunk_idx * kChunkShift)) & kChunkMask);
+
+    // Verify that the chunk is still in a state that allows the transition to
+    // |desired_chunk_state|. The only allowed transitions are:
+    // 1. kChunkBeingWritten -> kChunkComplete (Producer).
+    // 2. kChunkBeingRead -> kChunkFree (Service).
+    ChunkState expected_chunk_state;
+    if (desired_chunk_state == kChunkComplete) {
+      expected_chunk_state = kChunkBeingWritten;
+    } else {
+      expected_chunk_state = kChunkBeingRead;
+    }
+
+    // TODO(primiano): should not be a CHECK (same rationale of comment above).
+    PERFETTO_CHECK(chunk_state == expected_chunk_state);
+    uint32_t next_layout = layout;
+    next_layout &= ~(kChunkMask << (chunk_idx * kChunkShift));
+    next_layout |= (desired_chunk_state << (chunk_idx * kChunkShift));
+
+    // If we are freeing a chunk and all the other chunks in the page are free
+    // we should de-partition the page and mark it as clear.
+    if ((next_layout & kAllChunksMask) == kAllChunksFree)
+      next_layout = 0;
+
+    if (phdr->layout.compare_exchange_strong(layout, next_layout,
+                                             std::memory_order_acq_rel)) {
+      return page_idx;
+    }
+    WaitBeforeNextAttempt(attempt);
+  }
+  // Too much contention on this page. Give up. This page will be left pending
+  // forever but there isn't much more we can do at this point.
+  PERFETTO_DFATAL("Too much contention on page.");
+  return kInvalidPageIdx;
+}
+
+SharedMemoryABI::Chunk::Chunk() = default;
+
+SharedMemoryABI::Chunk::Chunk(uint8_t* begin, uint16_t size, uint8_t chunk_idx)
+    : begin_(begin), size_(size), chunk_idx_(chunk_idx) {
+  PERFETTO_CHECK(reinterpret_cast<uintptr_t>(begin) % kChunkAlignment == 0);
+  PERFETTO_CHECK(size > 0);
+}
+
+SharedMemoryABI::Chunk::Chunk(Chunk&& o) noexcept {
+  *this = std::move(o);
+}
+
+SharedMemoryABI::Chunk& SharedMemoryABI::Chunk::operator=(Chunk&& o) {
+  begin_ = o.begin_;
+  size_ = o.size_;
+  chunk_idx_ = o.chunk_idx_;
+  o.begin_ = nullptr;
+  o.size_ = 0;
+  o.chunk_idx_ = 0;
+  return *this;
+}
+
+std::pair<size_t, size_t> SharedMemoryABI::GetPageAndChunkIndex(
+    const Chunk& chunk) {
+  PERFETTO_DCHECK(chunk.is_valid());
+  PERFETTO_DCHECK(chunk.begin() >= start_);
+  PERFETTO_DCHECK(chunk.end() <= start_ + size_);
+
+  // TODO(primiano): The divisions below could be avoided if we cached
+  // |page_shift_|.
+  const uintptr_t rel_addr = static_cast<uintptr_t>(chunk.begin() - start_);
+  const size_t page_idx = rel_addr / page_size_;
+  const size_t offset = rel_addr % page_size_;
+  PERFETTO_DCHECK(offset >= sizeof(PageHeader));
+  PERFETTO_DCHECK(offset % kChunkAlignment == 0);
+  PERFETTO_DCHECK((offset - sizeof(PageHeader)) % chunk.size() == 0);
+  const size_t chunk_idx = (offset - sizeof(PageHeader)) / chunk.size();
+  PERFETTO_DCHECK(chunk_idx < kMaxChunksPerPage);
+  PERFETTO_DCHECK(chunk_idx < GetNumChunksForLayout(GetPageLayout(page_idx)));
+  return std::make_pair(page_idx, chunk_idx);
+}
+
+}  // namespace perfetto
+// gen_amalgamated begin source: src/tracing/core/shared_memory_arbiter_impl.cc
+// gen_amalgamated begin header: src/tracing/core/shared_memory_arbiter_impl.h
+// gen_amalgamated begin header: include/perfetto/ext/tracing/core/shared_memory_arbiter.h
+// gen_amalgamated begin header: include/perfetto/ext/tracing/core/tracing_service.h
+// gen_amalgamated begin header: include/perfetto/ext/tracing/core/shared_memory.h
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_TRACING_CORE_SHARED_MEMORY_H_
+#define INCLUDE_PERFETTO_EXT_TRACING_CORE_SHARED_MEMORY_H_
+
+#include <stddef.h>
+
+#include <memory>
+
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+// gen_amalgamated expanded: #include "perfetto/base/platform_handle.h"
+
+namespace perfetto {
+
+// An abstract interface that models the shared memory region shared between
+// Service and Producer. The concrete implementation of this is up to the
+// transport layer. This can be as simple as a malloc()-ed buffer, if both
+// Producer and Service are hosted in the same process, or some posix shared
+// memory for the out-of-process case (see src/unix_rpc).
+// Both this class and the Factory are subclassed by the transport layer, which
+// will attach platform specific fields to it (e.g., a unix file descriptor).
+class PERFETTO_EXPORT SharedMemory {
+ public:
+  class PERFETTO_EXPORT Factory {
+   public:
+    virtual ~Factory();
+    virtual std::unique_ptr<SharedMemory> CreateSharedMemory(size_t) = 0;
+  };
+
+  // The transport layer is expected to tear down the resource associated to
+  // this object region when destroyed.
+  virtual ~SharedMemory();
+
+  virtual void* start() const = 0;
+  virtual size_t size() const = 0;
+};
+
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_TRACING_CORE_SHARED_MEMORY_H_
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_TRACING_CORE_TRACING_SERVICE_H_
+#define INCLUDE_PERFETTO_EXT_TRACING_CORE_TRACING_SERVICE_H_
+
+#include <stdint.h>
+
+#include <functional>
+#include <memory>
+#include <vector>
+
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/basic_types.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/buffer_exhausted_policy.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/core/forward_decls.h"
+
+namespace perfetto {
+
+namespace base {
+class TaskRunner;
+}  // namespace base
+
+class Consumer;
+class Producer;
+class SharedMemoryArbiter;
+class TraceWriter;
+
+// Exposed for testing.
+std::string GetBugreportPath();
+
+// TODO: for the moment this assumes that all the calls happen on the same
+// thread/sequence. Not sure this will be the case long term in Chrome.
+
+// The API for the Producer port of the Service.
+// Subclassed by:
+// 1. The tracing_service_impl.cc business logic when returning it in response
+//    to the ConnectProducer() method.
+// 2. The transport layer (e.g., src/ipc) when the producer and
+//    the service don't talk locally but via some IPC mechanism.
+class PERFETTO_EXPORT ProducerEndpoint {
+ public:
+  virtual ~ProducerEndpoint();
+
+  // Called by the Producer to (un)register data sources. Data sources are
+  // identified by their name (i.e. DataSourceDescriptor.name)
+  virtual void RegisterDataSource(const DataSourceDescriptor&) = 0;
+  virtual void UnregisterDataSource(const std::string& name) = 0;
+
+  // Associate the trace writer with the given |writer_id| with
+  // |target_buffer|. The service may use this information to retrieve and
+  // copy uncommitted chunks written by the trace writer into its associated
+  // buffer, e.g. when a producer process crashes or when a flush is
+  // necessary.
+  virtual void RegisterTraceWriter(uint32_t writer_id,
+                                   uint32_t target_buffer) = 0;
+
+  // Remove the association of the trace writer previously created via
+  // RegisterTraceWriter.
+  virtual void UnregisterTraceWriter(uint32_t writer_id) = 0;
+
+  // Called by the Producer to signal that some pages in the shared memory
+  // buffer (shared between Service and Producer) have changed.
+  // When the Producer and the Service are hosted in the same process and
+  // hence potentially live on the same task runner, This method must call
+  // TracingServiceImpl's CommitData synchronously, without any PostTask()s,
+  // if on the same thread. This is to avoid a deadlock where the Producer
+  // exhausts its SMB and stalls waiting for the service to catch up with
+  // reads, but the Service never gets to that because it lives on the same
+  // thread.
+  using CommitDataCallback = std::function<void()>;
+  virtual void CommitData(const CommitDataRequest&,
+                          CommitDataCallback callback = {}) = 0;
+
+  virtual SharedMemory* shared_memory() const = 0;
+
+  // Size of shared memory buffer pages. It's always a multiple of 4K.
+  // See shared_memory_abi.h
+  virtual size_t shared_buffer_page_size_kb() const = 0;
+
+  // Creates a trace writer, which allows to create events, handling the
+  // underying shared memory buffer and signalling to the Service. This method
+  // is thread-safe but the returned object is not. A TraceWriter should be
+  // used only from a single thread, or the caller has to handle sequencing
+  // via a mutex or equivalent. This method can only be called if
+  // TracingService::ConnectProducer was called with |in_process=true|.
+  // Args:
+  // |target_buffer| is the target buffer ID where the data produced by the
+  // writer should be stored by the tracing service. This value is passed
+  // upon creation of the data source (StartDataSource()) in the
+  // DataSourceConfig.target_buffer().
+  virtual std::unique_ptr<TraceWriter> CreateTraceWriter(
+      BufferID target_buffer,
+      BufferExhaustedPolicy buffer_exhausted_policy =
+          BufferExhaustedPolicy::kDefault) = 0;
+
+  // TODO(eseckler): Also expose CreateStartupTraceWriter() ?
+
+  // In some cases you can access the producer's SharedMemoryArbiter (for
+  // example if TracingService::ConnectProducer is called with
+  // |in_process=true|). The SharedMemoryArbiter can be used to create
+  // TraceWriters which is able to directly commit chunks. For the
+  // |in_process=true| case this can be done without going through an IPC layer.
+  virtual SharedMemoryArbiter* MaybeSharedMemoryArbiter() = 0;
+
+  // Whether the service accepted a shared memory buffer provided by the
+  // producer.
+  virtual bool IsShmemProvidedByProducer() const = 0;
+
+  // Called in response to a Producer::Flush(request_id) call after all data
+  // for the flush request has been committed.
+  virtual void NotifyFlushComplete(FlushRequestID) = 0;
+
+  // Called in response to one or more Producer::StartDataSource(),
+  // if the data source registered setting the flag
+  // DataSourceDescriptor.will_notify_on_start.
+  virtual void NotifyDataSourceStarted(DataSourceInstanceID) = 0;
+
+  // Called in response to one or more Producer::StopDataSource(),
+  // if the data source registered setting the flag
+  // DataSourceDescriptor.will_notify_on_stop.
+  virtual void NotifyDataSourceStopped(DataSourceInstanceID) = 0;
+
+  // This informs the service to activate any of these triggers if any tracing
+  // session was waiting for them.
+  virtual void ActivateTriggers(const std::vector<std::string>&) = 0;
+
+  // Emits a synchronization barrier to linearize with the service. When
+  // |callback| is invoked, the caller has the guarantee that the service has
+  // seen and processed all the requests sent by this producer prior to the
+  // Sync() call. Used mainly in tests.
+  virtual void Sync(std::function<void()> callback) = 0;
+};  // class ProducerEndpoint.
+
+// The API for the Consumer port of the Service.
+// Subclassed by:
+// 1. The tracing_service_impl.cc business logic when returning it in response
+// to
+//    the ConnectConsumer() method.
+// 2. The transport layer (e.g., src/ipc) when the consumer and
+//    the service don't talk locally but via some IPC mechanism.
+class PERFETTO_EXPORT ConsumerEndpoint {
+ public:
+  virtual ~ConsumerEndpoint();
+
+  // Enables tracing with the given TraceConfig. The ScopedFile argument is
+  // used only when TraceConfig.write_into_file == true.
+  // If TraceConfig.deferred_start == true data sources are configured via
+  // SetupDataSource() but are not started until StartTracing() is called.
+  // This is to support pre-initialization and fast triggering of traces.
+  // The ScopedFile argument is used only when TraceConfig.write_into_file
+  // == true.
+  virtual void EnableTracing(const TraceConfig&,
+                             base::ScopedFile = base::ScopedFile()) = 0;
+
+  // Update the trace config of an existing tracing session; only a subset
+  // of options can be changed mid-session. Currently the only
+  // supported functionality is expanding the list of producer_name_filters()
+  // (or removing the filter entirely) for existing data sources.
+  virtual void ChangeTraceConfig(const TraceConfig&) = 0;
+
+  // Starts all data sources configured in the trace config. This is used only
+  // after calling EnableTracing() with TraceConfig.deferred_start=true.
+  // It's a no-op if called after a regular EnableTracing(), without setting
+  // deferred_start.
+  virtual void StartTracing() = 0;
+
+  virtual void DisableTracing() = 0;
+
+  // Requests all data sources to flush their data immediately and invokes the
+  // passed callback once all of them have acked the flush (in which case
+  // the callback argument |success| will be true) or |timeout_ms| are elapsed
+  // (in which case |success| will be false).
+  // If |timeout_ms| is 0 the TraceConfig's flush_timeout_ms is used, or,
+  // if that one is not set (or is set to 0), kDefaultFlushTimeoutMs (5s) is
+  // used.
+  using FlushCallback = std::function<void(bool /*success*/)>;
+  virtual void Flush(uint32_t timeout_ms, FlushCallback) = 0;
+
+  // Tracing data will be delivered invoking Consumer::OnTraceData().
+  virtual void ReadBuffers() = 0;
+
+  virtual void FreeBuffers() = 0;
+
+  // Will call OnDetach().
+  virtual void Detach(const std::string& key) = 0;
+
+  // Will call OnAttach().
+  virtual void Attach(const std::string& key) = 0;
+
+  // Will call OnTraceStats().
+  virtual void GetTraceStats() = 0;
+
+  // Start or stop observing events of selected types. |events_mask| specifies
+  // the types of events to observe in a bitmask of ObservableEvents::Type.
+  // To disable observing, pass 0.
+  // Will call OnObservableEvents() repeatedly whenever an event of an enabled
+  // ObservableEventType occurs.
+  // TODO(eseckler): Extend this to support producers & data sources.
+  virtual void ObserveEvents(uint32_t events_mask) = 0;
+
+  // Used to obtain the list of connected data sources and other info about
+  // the tracing service.
+  using QueryServiceStateCallback =
+      std::function<void(bool success, const TracingServiceState&)>;
+  virtual void QueryServiceState(QueryServiceStateCallback) = 0;
+
+  // Used for feature detection. Makes sense only when the consumer and the
+  // service talk over IPC and can be from different versions.
+  using QueryCapabilitiesCallback =
+      std::function<void(const TracingServiceCapabilities&)>;
+  virtual void QueryCapabilities(QueryCapabilitiesCallback) = 0;
+
+  // If any tracing session with TraceConfig.bugreport_score > 0 is running,
+  // this will pick the highest-score one, stop it and save it into a fixed
+  // path (See kBugreportTracePath).
+  // The callback is invoked when the file has been saved, in case of success,
+  // or whenever an error occurs.
+  // Args:
+  // - success: if true, an eligible trace was found and saved into file.
+  //            If false, either there was no eligible trace running or
+  //            something else failed (See |msg|).
+  // - msg: human readable diagnostic messages to debug failures.
+  using SaveTraceForBugreportCallback =
+      std::function<void(bool /*success*/, const std::string& /*msg*/)>;
+  virtual void SaveTraceForBugreport(SaveTraceForBugreportCallback) = 0;
+};  // class ConsumerEndpoint.
+
+// The public API of the tracing Service business logic.
+//
+// Exposed to:
+// 1. The transport layer (e.g., src/unix_rpc/unix_service_host.cc),
+//    which forwards commands received from a remote producer or consumer to
+//    the actual service implementation.
+// 2. Tests.
+//
+// Subclassed by:
+//   The service business logic in src/core/tracing_service_impl.cc.
+class PERFETTO_EXPORT TracingService {
+ public:
+  using ProducerEndpoint = perfetto::ProducerEndpoint;
+  using ConsumerEndpoint = perfetto::ConsumerEndpoint;
+
+  enum class ProducerSMBScrapingMode {
+    // Use service's default setting for SMB scraping. Currently, the default
+    // mode is to disable SMB scraping, but this may change in the future.
+    kDefault,
+
+    // Enable scraping of uncommitted chunks in producers' shared memory
+    // buffers.
+    kEnabled,
+
+    // Disable scraping of uncommitted chunks in producers' shared memory
+    // buffers.
+    kDisabled
+  };
+
+  // Implemented in src/core/tracing_service_impl.cc .
+  static std::unique_ptr<TracingService> CreateInstance(
+      std::unique_ptr<SharedMemory::Factory>,
+      base::TaskRunner*);
+
+  virtual ~TracingService();
+
+  // Connects a Producer instance and obtains a ProducerEndpoint, which is
+  // essentially a 1:1 channel between one Producer and the Service.
+  //
+  // The caller has to guarantee that the passed Producer will be alive as long
+  // as the returned ProducerEndpoint is alive. Both the passed Producer and the
+  // returned ProducerEndpoint must live on the same task runner of the service,
+  // specifically:
+  // 1) The Service will call Producer::* methods on the Service's task runner.
+  // 2) The Producer should call ProducerEndpoint::* methods only on the
+  //    service's task runner, except for ProducerEndpoint::CreateTraceWriter(),
+  //    which can be called on any thread. To disconnect just destroy the
+  //    returned ProducerEndpoint object. It is safe to destroy the Producer
+  //    once the Producer::OnDisconnect() has been invoked.
+  //
+  // |uid| is the trusted user id of the producer process, used by the consumers
+  // for validating the origin of trace data. |shared_memory_size_hint_bytes|
+  // and |shared_memory_page_size_hint_bytes| are optional hints on the size of
+  // the shared memory buffer and its pages. The service can ignore the hints
+  // (e.g., if the hints are unreasonably large or other sizes were configured
+  // in a tracing session's config). |in_process| enables the ProducerEndpoint
+  // to manage its own shared memory and enables use of
+  // |ProducerEndpoint::CreateTraceWriter|.
+  //
+  // The producer can optionally provide a non-null |shm|, which the service
+  // will adopt for the connection to the producer, provided it is correctly
+  // sized. In this case, |shared_memory_page_size_hint_bytes| indicates the
+  // page size used in this SMB. The producer can use this mechanism to record
+  // tracing data to an SMB even before the tracing session is started by the
+  // service. This is used in Chrome to implement startup tracing. If the buffer
+  // is incorrectly sized, the service will discard the SMB and allocate a new
+  // one, provided to the producer via ProducerEndpoint::shared_memory() after
+  // OnTracingSetup(). To verify that the service accepted the SMB, the producer
+  // may check via ProducerEndpoint::IsShmemProvidedByProducer(). If the service
+  // accepted the SMB, the producer can then commit any data that is already in
+  // the SMB after the tracing session was started by the service via
+  // Producer::StartDataSource(). The |shm| will also be rejected when
+  // connecting to a service that is too old (pre Android-11).
+  //
+  // Can return null in the unlikely event that service has too many producers
+  // connected.
+  virtual std::unique_ptr<ProducerEndpoint> ConnectProducer(
+      Producer*,
+      uid_t uid,
+      const std::string& name,
+      size_t shared_memory_size_hint_bytes = 0,
+      bool in_process = false,
+      ProducerSMBScrapingMode smb_scraping_mode =
+          ProducerSMBScrapingMode::kDefault,
+      size_t shared_memory_page_size_hint_bytes = 0,
+      std::unique_ptr<SharedMemory> shm = nullptr,
+      const std::string& sdk_version = {}) = 0;
+
+  // Connects a Consumer instance and obtains a ConsumerEndpoint, which is
+  // essentially a 1:1 channel between one Consumer and the Service.
+  // The caller has to guarantee that the passed Consumer will be alive as long
+  // as the returned ConsumerEndpoint is alive.
+  // To disconnect just destroy the returned ConsumerEndpoint object. It is safe
+  // to destroy the Consumer once the Consumer::OnDisconnect() has been invoked.
+  virtual std::unique_ptr<ConsumerEndpoint> ConnectConsumer(Consumer*,
+                                                            uid_t) = 0;
+
+  // Enable/disable scraping of chunks in the shared memory buffer. If enabled,
+  // the service will copy uncommitted but non-empty chunks from the SMB when
+  // flushing (e.g. to handle unresponsive producers or producers unable to
+  // flush their active chunks), on producer disconnect (e.g. to recover data
+  // from crashed producers), and after disabling a tracing session (e.g. to
+  // gather data from producers that didn't stop their data sources in time).
+  //
+  // This feature is currently used by Chrome.
+  virtual void SetSMBScrapingEnabled(bool enabled) = 0;
+};
+
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_TRACING_CORE_TRACING_SERVICE_H_
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_TRACING_CORE_SHARED_MEMORY_ARBITER_H_
+#define INCLUDE_PERFETTO_EXT_TRACING_CORE_SHARED_MEMORY_ARBITER_H_
+
+#include <stddef.h>
+
+#include <functional>
+#include <memory>
+#include <vector>
+
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/basic_types.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/tracing_service.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/buffer_exhausted_policy.h"
+
+namespace perfetto {
+
+namespace base {
+class TaskRunner;
+}
+
+class SharedMemory;
+class TraceWriter;
+
+// Used by the Producer-side of the transport layer to vend TraceWriters
+// from the SharedMemory it receives from the Service-side.
+class PERFETTO_EXPORT SharedMemoryArbiter {
+ public:
+  virtual ~SharedMemoryArbiter();
+
+  // Creates a new TraceWriter and assigns it a new WriterID. The WriterID is
+  // written in each chunk header owned by a given TraceWriter and is used by
+  // the Service to reconstruct TracePackets written by the same TraceWriter.
+  // Returns null impl of TraceWriter if all WriterID slots are exhausted. The
+  // writer will commit to the provided |target_buffer|. If the arbiter was
+  // created via CreateUnbound(), only BufferExhaustedPolicy::kDrop is
+  // supported.
+  virtual std::unique_ptr<TraceWriter> CreateTraceWriter(
+      BufferID target_buffer,
+      BufferExhaustedPolicy buffer_exhausted_policy =
+          BufferExhaustedPolicy::kDefault) = 0;
+
+  // Creates a TraceWriter that will commit to the target buffer with the given
+  // reservation ID (creating a new reservation for this ID if none exists yet).
+  // The buffer reservation should be bound to an actual BufferID via
+  // BindStartupTargetBuffer() once the actual BufferID is known. Only supported
+  // if the arbiter was created using CreateUnbound(), and may be called while
+  // the arbiter is unbound.
+  //
+  // While any unbound buffer reservation exists, all commits will be buffered
+  // until all reservations were bound. Thus, until all reservations are bound,
+  // the data written to the SMB will not be consumed by the service - the SMB
+  // size should be chosen with this in mind. Startup writers always use
+  // BufferExhaustedPolicy::kDrop, as we cannot feasibly stall while not
+  // flushing to the service.
+  //
+  // The |target_buffer_reservation_id| should be greater than 0 but can
+  // otherwise be freely chosen by the producer and is only used to translate
+  // packets into the actual buffer id once
+  // BindStartupTargetBuffer(reservation_id) is called. For example, Chrome uses
+  // startup tracing not only for the first, but also subsequent tracing
+  // sessions (to enable tracing in the browser process before it instructs the
+  // tracing service to start tracing asynchronously, minimizing trace data loss
+  // in the meantime), and increments the reservation ID between sessions.
+  // Similarly, if more than a single target buffer per session is required
+  // (e.g. for two different data sources), different reservation IDs should be
+  // chosen for different targets buffers.
+  virtual std::unique_ptr<TraceWriter> CreateStartupTraceWriter(
+      uint16_t target_buffer_reservation_id) = 0;
+
+  // Should only be called on unbound SharedMemoryArbiters. Binds the arbiter to
+  // the provided ProducerEndpoint and TaskRunner. Should be called only once
+  // and on the provided |TaskRunner|. Usually called by the producer (i.e., no
+  // specific data source) once it connects to the service. Both the endpoint
+  // and task runner should remain valid for the remainder of the arbiter's
+  // lifetime.
+  virtual void BindToProducerEndpoint(TracingService::ProducerEndpoint*,
+                                      base::TaskRunner*) = 0;
+
+  // Binds commits from TraceWriters created via CreateStartupTraceWriter() with
+  // the given |target_buffer_reservation_id| to |target_buffer_id|. May only be
+  // called once per |target_buffer_reservation_id|. Should be called on the
+  // arbiter's TaskRunner, and after BindToProducerEndpoint() was called.
+  // Usually, it is called by a specific data source, after it received its
+  // configuration (including the target buffer ID) from the service.
+  virtual void BindStartupTargetBuffer(uint16_t target_buffer_reservation_id,
+                                       BufferID target_buffer_id) = 0;
+
+  // Treat the reservation as resolved to an invalid buffer. Commits for this
+  // reservation will be flushed to the service ASAP. The service will free
+  // committed chunks but otherwise ignore them. The producer can call this
+  // method, for example, if connection to the tracing service failed or the
+  // session was stopped concurrently before the connection was established.
+  virtual void AbortStartupTracingForReservation(
+      uint16_t target_buffer_reservation_id) = 0;
+
+  // Notifies the service that all data for the given FlushRequestID has been
+  // committed in the shared memory buffer. Should only be called while bound.
+  virtual void NotifyFlushComplete(FlushRequestID) = 0;
+
+  // Sets the duration during which commits are batched. Args:
+  // |batch_commits_duration_ms|: The length of the period, during which commits
+  // by all trace writers are accumulated, before being sent to the service.
+  // When the period ends, all accumulated commits are flushed. On the first
+  // commit after the last flush, another delayed flush is scheduled to run in
+  // |batch_commits_duration_ms|. If an immediate flush occurs (via
+  // FlushPendingCommitDataRequests()) during a batching period, any
+  // accumulated commits up to that point will be sent to the service
+  // immediately. And when the batching period ends, the commits that occurred
+  // after the immediate flush will also be sent to the service.
+  //
+  // If the duration has already been set to a non-zero value before this method
+  // is called, and there is already a scheduled flush with the previously-set
+  // duration, the new duration will take effect after the scheduled flush
+  // occurs.
+  //
+  // If |batch_commits_duration_ms| is non-zero, batched data that hasn't been
+  // sent could be lost at the end of a tracing session. To avoid this,
+  // producers should make sure that FlushPendingCommitDataRequests is called
+  // after the last TraceWriter write and before the service has stopped
+  // listening for commits from the tracing session's data sources (i.e.
+  // data sources should stop asynchronously, see
+  // DataSourceDescriptor.will_notify_on_stop=true).
+  virtual void SetBatchCommitsDuration(uint32_t batch_commits_duration_ms) = 0;
+
+  // Called to enable direct producer-side patching of chunks that have not yet
+  // been committed to the service. The return value indicates whether direct
+  // patching was successfully enabled. It will be true if
+  // SharedMemoryArbiter::SetDirectSMBPatchingSupportedByService has been called
+  // and false otherwise.
+  virtual bool EnableDirectSMBPatching() = 0;
+
+  // When the producer and service live in separate processes, this method
+  // should be called if the producer receives an
+  // InitializeConnectionResponse.direct_smb_patching_supported set to true by
+  // the service (see producer_port.proto) .
+  //
+  // In the in-process case, the service will always support direct SMB patching
+  // and this method should always be called.
+  virtual void SetDirectSMBPatchingSupportedByService() = 0;
+
+  // Forces an immediate commit of the completed packets, without waiting for
+  // the next task or for a batching period to end. Should only be called while
+  // bound.
+  virtual void FlushPendingCommitDataRequests(
+      std::function<void()> callback = {}) = 0;
+
+  // Attempts to shut down this arbiter. This function prevents new trace
+  // writers from being created for this this arbiter, but if there are any
+  // existing trace writers, the shutdown cannot proceed and this funtion
+  // returns false. The caller should not delete the arbiter before all of its
+  // associated trace writers have been destroyed and this function returns
+  // true.
+  virtual bool TryShutdown() = 0;
+
+  // Create a bound arbiter instance. Args:
+  // |SharedMemory|: the shared memory buffer to use.
+  // |page_size|: a multiple of 4KB that defines the granularity of tracing
+  // pages. See tradeoff considerations in shared_memory_abi.h.
+  // |ProducerEndpoint|: The service's producer endpoint used e.g. to commit
+  // chunks and register trace writers.
+  // |TaskRunner|: Task runner for perfetto's main thread, which executes the
+  // OnPagesCompleteCallback and IPC calls to the |ProducerEndpoint|.
+  //
+  // Implemented in src/core/shared_memory_arbiter_impl.cc.
+  static std::unique_ptr<SharedMemoryArbiter> CreateInstance(
+      SharedMemory*,
+      size_t page_size,
+      TracingService::ProducerEndpoint*,
+      base::TaskRunner*);
+
+  // Create an unbound arbiter instance, which should later be bound to a
+  // ProducerEndpoint and TaskRunner by calling BindToProducerEndpoint(). The
+  // returned arbiter will ONLY support trace writers with
+  // BufferExhaustedPolicy::kDrop.
+  //
+  // An unbound SharedMemoryArbiter can be used to write to a producer-created
+  // SharedMemory buffer before the producer connects to the tracing service.
+  // The producer can then pass this SMB to the service when it connects (see
+  // TracingService::ConnectProducer).
+  //
+  // To trace into the SMB before the service starts the tracing session, trace
+  // writers can be obtained via CreateStartupTraceWriter() and later associated
+  // with a target buffer via BindStartupTargetBuffer(), once the target buffer
+  // is known.
+  //
+  // Implemented in src/core/shared_memory_arbiter_impl.cc. See CreateInstance()
+  // for comments about the arguments.
+  static std::unique_ptr<SharedMemoryArbiter> CreateUnboundInstance(
+      SharedMemory*,
+      size_t page_size);
+};
+
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_TRACING_CORE_SHARED_MEMORY_ARBITER_H_
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SRC_TRACING_CORE_SHARED_MEMORY_ARBITER_IMPL_H_
+#define SRC_TRACING_CORE_SHARED_MEMORY_ARBITER_IMPL_H_
+
+#include <stdint.h>
+
+#include <functional>
+#include <map>
+#include <memory>
+#include <mutex>
+#include <vector>
+
+// gen_amalgamated expanded: #include "perfetto/ext/base/weak_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/basic_types.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory_abi.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory_arbiter.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/core/forward_decls.h"
+// gen_amalgamated expanded: #include "src/tracing/core/id_allocator.h"
+
+namespace perfetto {
+
+class PatchList;
+class Patch;
+class TraceWriter;
+class TraceWriterImpl;
+
+namespace base {
+class TaskRunner;
+}  // namespace base
+
+// This class handles the shared memory buffer on the producer side. It is used
+// to obtain thread-local chunks and to partition pages from several threads.
+// There is one arbiter instance per Producer.
+// This class is thread-safe and uses locks to do so. Data sources are supposed
+// to interact with this sporadically, only when they run out of space on their
+// current thread-local chunk.
+//
+// When the arbiter is created using CreateUnboundInstance(), the following
+// state transitions are possible:
+//
+//   [ !fully_bound_, !endpoint_, 0 unbound buffer reservations ]
+//       |     |
+//       |     | CreateStartupTraceWriter(buf)
+//       |     |  buffer reservations += buf
+//       |     |
+//       |     |             ----
+//       |     |            |    | CreateStartupTraceWriter(buf)
+//       |     |            |    |  buffer reservations += buf
+//       |     V            |    V
+//       |   [ !fully_bound_, !endpoint_, >=1 unbound buffer reservations ]
+//       |                                                |
+//       |                       BindToProducerEndpoint() |
+//       |                                                |
+//       | BindToProducerEndpoint()                       |
+//       |                                                V
+//       |   [ !fully_bound_, endpoint_, >=1 unbound buffer reservations ]
+//       |   A    |    A                               |     A
+//       |   |    |    |                               |     |
+//       |   |     ----                                |     |
+//       |   |    CreateStartupTraceWriter(buf)        |     |
+//       |   |     buffer reservations += buf          |     |
+//       |   |                                         |     |
+//       |   | CreateStartupTraceWriter(buf)           |     |
+//       |   |  where buf is not yet bound             |     |
+//       |   |  buffer reservations += buf             |     | (yes)
+//       |   |                                         |     |
+//       |   |        BindStartupTargetBuffer(buf, id) |-----
+//       |   |           buffer reservations -= buf    | reservations > 0?
+//       |   |                                         |
+//       |   |                                         | (no)
+//       V   |                                         V
+//       [ fully_bound_, endpoint_, 0 unbound buffer reservations ]
+//          |    A
+//          |    | CreateStartupTraceWriter(buf)
+//          |    |  where buf is already bound
+//           ----
+class SharedMemoryArbiterImpl : public SharedMemoryArbiter {
+ public:
+  // See SharedMemoryArbiter::CreateInstance(). |start|, |size| define the
+  // boundaries of the shared memory buffer. ProducerEndpoint and TaskRunner may
+  // be |nullptr| if created unbound, see
+  // SharedMemoryArbiter::CreateUnboundInstance().
+  SharedMemoryArbiterImpl(void* start,
+                          size_t size,
+                          size_t page_size,
+                          TracingService::ProducerEndpoint*,
+                          base::TaskRunner*);
+
+  // Returns a new Chunk to write tracing data. Depending on the provided
+  // BufferExhaustedPolicy, this may return an invalid chunk if no valid free
+  // chunk could be found in the SMB.
+  SharedMemoryABI::Chunk GetNewChunk(const SharedMemoryABI::ChunkHeader&,
+                                     BufferExhaustedPolicy,
+                                     size_t size_hint = 0);
+
+  // Puts back a Chunk that has been completed and sends a request to the
+  // service to move it to the central tracing buffer. |target_buffer| is the
+  // absolute trace buffer ID where the service should move the chunk onto (the
+  // producer is just to copy back the same number received in the
+  // DataSourceConfig upon the StartDataSource() reques).
+  // PatchList is a pointer to the list of patches for previous chunks. The
+  // first patched entries will be removed from the patched list and sent over
+  // to the service in the same CommitData() IPC request.
+  void ReturnCompletedChunk(SharedMemoryABI::Chunk,
+                            MaybeUnboundBufferID target_buffer,
+                            PatchList*);
+
+  // Send a request to the service to apply completed patches from |patch_list|.
+  // |writer_id| is the ID of the TraceWriter that calls this method,
+  // |target_buffer| is the global trace buffer ID of its target buffer.
+  void SendPatches(WriterID writer_id,
+                   MaybeUnboundBufferID target_buffer,
+                   PatchList* patch_list);
+
+  SharedMemoryABI* shmem_abi_for_testing() { return &shmem_abi_; }
+
+  static void set_default_layout_for_testing(SharedMemoryABI::PageLayout l) {
+    default_page_layout = l;
+  }
+
+  // SharedMemoryArbiter implementation.
+  // See include/perfetto/tracing/core/shared_memory_arbiter.h for comments.
+  std::unique_ptr<TraceWriter> CreateTraceWriter(
+      BufferID target_buffer,
+      BufferExhaustedPolicy = BufferExhaustedPolicy::kDefault) override;
+  std::unique_ptr<TraceWriter> CreateStartupTraceWriter(
+      uint16_t target_buffer_reservation_id) override;
+  void BindToProducerEndpoint(TracingService::ProducerEndpoint*,
+                              base::TaskRunner*) override;
+  void BindStartupTargetBuffer(uint16_t target_buffer_reservation_id,
+                               BufferID target_buffer_id) override;
+  void AbortStartupTracingForReservation(
+      uint16_t target_buffer_reservation_id) override;
+  void NotifyFlushComplete(FlushRequestID) override;
+
+  void SetBatchCommitsDuration(uint32_t batch_commits_duration_ms) override;
+
+  bool EnableDirectSMBPatching() override;
+
+  void SetDirectSMBPatchingSupportedByService() override;
+
+  void FlushPendingCommitDataRequests(
+      std::function<void()> callback = {}) override;
+  bool TryShutdown() override;
+
+  base::TaskRunner* task_runner() const { return task_runner_; }
+  size_t page_size() const { return shmem_abi_.page_size(); }
+  size_t num_pages() const { return shmem_abi_.num_pages(); }
+
+  base::WeakPtr<SharedMemoryArbiterImpl> GetWeakPtr() const {
+    return weak_ptr_factory_.GetWeakPtr();
+  }
+
+ private:
+  friend class TraceWriterImpl;
+  friend class StartupTraceWriterTest;
+  friend class SharedMemoryArbiterImplTest;
+
+  struct TargetBufferReservation {
+    bool resolved = false;
+    BufferID target_buffer = kInvalidBufferId;
+  };
+
+  // Placeholder for the actual target buffer ID of a startup target buffer
+  // reservation ID in |target_buffer_reservations_|.
+  static constexpr BufferID kInvalidBufferId = 0;
+
+  static SharedMemoryABI::PageLayout default_page_layout;
+
+  SharedMemoryArbiterImpl(const SharedMemoryArbiterImpl&) = delete;
+  SharedMemoryArbiterImpl& operator=(const SharedMemoryArbiterImpl&) = delete;
+
+  void UpdateCommitDataRequest(SharedMemoryABI::Chunk chunk,
+                               WriterID writer_id,
+                               MaybeUnboundBufferID target_buffer,
+                               PatchList* patch_list);
+
+  // Search the chunks that are being batched in |commit_data_req_| for a chunk
+  // that needs patching and that matches the provided |writer_id| and
+  // |patch.chunk_id|. If found, apply |patch| to that chunk, and if
+  // |chunk_needs_more_patching| is true, clear the needs patching flag of the
+  // chunk and mark it as complete - to allow the service to read it (and other
+  // chunks after it) during scraping. Returns true if the patch was applied,
+  // false otherwise.
+  //
+  // Note: the caller must be holding |lock_| for the duration of the call.
+  bool TryDirectPatchLocked(WriterID writer_id,
+                            const Patch& patch,
+                            bool chunk_needs_more_patching);
+  std::unique_ptr<TraceWriter> CreateTraceWriterInternal(
+      MaybeUnboundBufferID target_buffer,
+      BufferExhaustedPolicy);
+
+  // Called by the TraceWriter destructor.
+  void ReleaseWriterID(WriterID);
+
+  void BindStartupTargetBufferImpl(std::unique_lock<std::mutex> scoped_lock,
+                                   uint16_t target_buffer_reservation_id,
+                                   BufferID target_buffer_id);
+
+  // If any flush callbacks were queued up while the arbiter or any target
+  // buffer reservation was unbound, this wraps the pending callbacks into a new
+  // std::function and returns it. Otherwise returns an invalid std::function.
+  std::function<void()> TakePendingFlushCallbacksLocked();
+
+  // Replace occurrences of target buffer reservation IDs in |commit_data_req_|
+  // with their respective actual BufferIDs if they were already bound. Returns
+  // true iff all occurrences were replaced.
+  bool ReplaceCommitPlaceholderBufferIdsLocked();
+
+  // Update and return |fully_bound_| based on the arbiter's |pending_writers_|
+  // state.
+  bool UpdateFullyBoundLocked();
+
+  const bool initially_bound_;
+
+  // Only accessed on |task_runner_| after the producer endpoint was bound.
+  TracingService::ProducerEndpoint* producer_endpoint_ = nullptr;
+
+  // --- Begin lock-protected members ---
+
+  std::mutex lock_;
+
+  base::TaskRunner* task_runner_ = nullptr;
+  SharedMemoryABI shmem_abi_;
+  size_t page_idx_ = 0;
+  std::unique_ptr<CommitDataRequest> commit_data_req_;
+  size_t bytes_pending_commit_ = 0;  // SUM(chunk.size() : commit_data_req_).
+  IdAllocator<WriterID> active_writer_ids_;
+  bool did_shutdown_ = false;
+
+  // Whether the arbiter itself and all startup target buffer reservations are
+  // bound. Note that this can become false again later if a new target buffer
+  // reservation is created by calling CreateStartupTraceWriter() with a new
+  // reservation id.
+  bool fully_bound_;
+
+  // IDs of writers and their assigned target buffers that should be registered
+  // with the service after the arbiter and/or their startup target buffer is
+  // bound.
+  std::map<WriterID, MaybeUnboundBufferID> pending_writers_;
+
+  // Callbacks for flush requests issued while the arbiter or a target buffer
+  // reservation was unbound.
+  std::vector<std::function<void()>> pending_flush_callbacks_;
+
+  // See SharedMemoryArbiter::SetBatchCommitsDuration.
+  uint32_t batch_commits_duration_ms_ = 0;
+
+  // See SharedMemoryArbiter::EnableDirectSMBPatching.
+  bool direct_patching_enabled_ = false;
+
+  // See SharedMemoryArbiter::SetDirectSMBPatchingSupportedByService.
+  bool direct_patching_supported_by_service_ = false;
+
+  // Indicates whether we have already scheduled a delayed flush for the
+  // purposes of batching. Set to true at the beginning of a batching period and
+  // cleared at the end of the period. Immediate flushes that happen during a
+  // batching period will empty the |commit_data_req| (triggering an immediate
+  // IPC to the service), but will not clear this flag and the
+  // previously-scheduled delayed flush will still occur at the end of the
+  // batching period.
+  bool delayed_flush_scheduled_ = false;
+
+  // Stores target buffer reservations for writers created via
+  // CreateStartupTraceWriter(). A bound reservation sets
+  // TargetBufferReservation::resolved to true and is associated with the actual
+  // BufferID supplied in BindStartupTargetBuffer().
+  //
+  // TODO(eseckler): Clean up entries from this map. This would probably require
+  // a method in SharedMemoryArbiter that allows a producer to invalidate a
+  // reservation ID.
+  std::map<MaybeUnboundBufferID, TargetBufferReservation>
+      target_buffer_reservations_;
+
+  // --- End lock-protected members ---
+
+  // Keep at the end.
+  base::WeakPtrFactory<SharedMemoryArbiterImpl> weak_ptr_factory_;
+};
+
+}  // namespace perfetto
+
+#endif  // SRC_TRACING_CORE_SHARED_MEMORY_ARBITER_IMPL_H_
+// gen_amalgamated begin header: include/perfetto/ext/tracing/core/commit_data_request.h
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_TRACING_CORE_COMMIT_DATA_REQUEST_H_
+#define INCLUDE_PERFETTO_EXT_TRACING_CORE_COMMIT_DATA_REQUEST_H_
+
+// Creates the aliases in the ::perfetto namespace, doing things like:
+// using ::perfetto::Foo = ::perfetto::protos::gen::Foo.
+// See comments in forward_decls.h for the historical reasons of this
+// indirection layer.
+// gen_amalgamated expanded: #include "perfetto/tracing/core/forward_decls.h"
+
+// gen_amalgamated expanded: #include "protos/perfetto/common/commit_data_request.gen.h"
+
+#endif  // INCLUDE_PERFETTO_EXT_TRACING_CORE_COMMIT_DATA_REQUEST_H_
+// gen_amalgamated begin header: src/tracing/core/trace_writer_impl.h
+// gen_amalgamated begin header: src/tracing/core/patch_list.h
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SRC_TRACING_CORE_PATCH_LIST_H_
+#define SRC_TRACING_CORE_PATCH_LIST_H_
+
+#include <array>
+#include <forward_list>
+
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/basic_types.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory_abi.h"
+
+namespace perfetto {
+
+// Used to handle the backfilling of the headers (the |size_field|) of nested
+// messages when a proto is fragmented over several chunks. These patches are
+// sent out-of-band to the tracing service, after having returned the initial
+// chunks of the fragment.
+// TODO(crbug.com/904477): Re-disable the move constructors when all usses of
+// this class have been fixed.
+class Patch {
+ public:
+  using PatchContent = std::array<uint8_t, SharedMemoryABI::kPacketHeaderSize>;
+  Patch(ChunkID c, uint16_t o) : chunk_id(c), offset(o) {}
+  Patch(const Patch&) = default;  // For tests.
+
+  const ChunkID chunk_id;
+  const uint16_t offset;
+  PatchContent size_field{};
+
+  // |size_field| contains a varint. Any varint must start with != 0. Even in
+  // the case we want to encode a size == 0, protozero will write a redundant
+  // varint for that, that is [0x80, 0x80, 0x80, 0x00]. So the first byte is 0
+  // iff we never wrote any varint into that.
+  bool is_patched() const { return size_field[0] != 0; }
+
+  // For tests.
+  bool operator==(const Patch& o) const {
+    return chunk_id == o.chunk_id && offset == o.offset &&
+           size_field == o.size_field;
+  }
+
+ private:
+  Patch& operator=(const Patch&) = delete;
+};
+
+// Note: the protozero::Message(s) will take pointers to the |size_field| of
+// these entries. This container must guarantee that the Patch objects are never
+// moved around (i.e. cannot be a vector because of reallocations can change
+// addresses of pre-existing entries).
+class PatchList {
+ public:
+  using ListType = std::forward_list<Patch>;
+  using value_type = ListType::value_type;          // For gtest.
+  using const_iterator = ListType::const_iterator;  // For gtest.
+
+  PatchList() : last_(list_.before_begin()) {}
+
+  Patch* emplace_back(ChunkID chunk_id, uint16_t offset) {
+    PERFETTO_DCHECK(empty() || last_->chunk_id != chunk_id ||
+                    offset >= last_->offset + sizeof(Patch::PatchContent));
+    last_ = list_.emplace_after(last_, chunk_id, offset);
+    return &*last_;
+  }
+
+  void pop_front() {
+    PERFETTO_DCHECK(!list_.empty());
+    list_.pop_front();
+    if (empty())
+      last_ = list_.before_begin();
+  }
+
+  const Patch& front() const {
+    PERFETTO_DCHECK(!list_.empty());
+    return list_.front();
+  }
+
+  const Patch& back() const {
+    PERFETTO_DCHECK(!list_.empty());
+    return *last_;
+  }
+
+  ListType::const_iterator begin() const { return list_.begin(); }
+  ListType::const_iterator end() const { return list_.end(); }
+  bool empty() const { return list_.empty(); }
+
+ private:
+  ListType list_;
+  ListType::iterator last_;
+};
+
+}  // namespace perfetto
+
+#endif  // SRC_TRACING_CORE_PATCH_LIST_H_
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SRC_TRACING_CORE_TRACE_WRITER_IMPL_H_
+#define SRC_TRACING_CORE_TRACE_WRITER_IMPL_H_
+
+// gen_amalgamated expanded: #include "perfetto/base/proc_utils.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/basic_types.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory_abi.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory_arbiter.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message_handle.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/root_message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_stream_writer.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/buffer_exhausted_policy.h"
+// gen_amalgamated expanded: #include "src/tracing/core/patch_list.h"
+
+namespace perfetto {
+
+class SharedMemoryArbiterImpl;
+
+// See //include/perfetto/tracing/core/trace_writer.h for docs.
+class TraceWriterImpl : public TraceWriter,
+                        public protozero::ScatteredStreamWriter::Delegate {
+ public:
+  // TracePacketHandle is defined in trace_writer.h
+  TraceWriterImpl(SharedMemoryArbiterImpl*,
+                  WriterID,
+                  MaybeUnboundBufferID buffer_id,
+                  BufferExhaustedPolicy);
+  ~TraceWriterImpl() override;
+
+  // TraceWriter implementation. See documentation in trace_writer.h.
+  TracePacketHandle NewTracePacket() override;
+  void Flush(std::function<void()> callback = {}) override;
+  WriterID writer_id() const override;
+  uint64_t written() const override {
+    return protobuf_stream_writer_.written();
+  }
+
+  void ResetChunkForTesting() { cur_chunk_ = SharedMemoryABI::Chunk(); }
+  bool drop_packets_for_testing() const { return drop_packets_; }
+
+ private:
+  TraceWriterImpl(const TraceWriterImpl&) = delete;
+  TraceWriterImpl& operator=(const TraceWriterImpl&) = delete;
+
+  // ScatteredStreamWriter::Delegate implementation.
+  protozero::ContiguousMemoryRange GetNewBuffer() override;
+
+  // The per-producer arbiter that coordinates access to the shared memory
+  // buffer from several threads.
+  SharedMemoryArbiterImpl* const shmem_arbiter_;
+
+  // ID of the current writer.
+  const WriterID id_;
+
+  // This is copied into the commit request by SharedMemoryArbiter. See comments
+  // in data_source_config.proto for |target_buffer|. If this is a reservation
+  // for a buffer ID in case of a startup trace writer, SharedMemoryArbiterImpl
+  // will also translate the reservation ID to the actual buffer ID.
+  const MaybeUnboundBufferID target_buffer_;
+
+  // Whether GetNewChunk() should stall or return an invalid chunk if the SMB is
+  // exhausted.
+  const BufferExhaustedPolicy buffer_exhausted_policy_;
+
+  // Monotonic (% wrapping) sequence id of the chunk. Together with the WriterID
+  // this allows the Service to reconstruct the linear sequence of packets.
+  ChunkID next_chunk_id_ = 0;
+
+  // The chunk we are holding onto (if any).
+  SharedMemoryABI::Chunk cur_chunk_;
+
+  // Passed to protozero message to write directly into |cur_chunk_|. It
+  // keeps track of the write pointer. It calls us back (GetNewBuffer()) when
+  // |cur_chunk_| is filled.
+  protozero::ScatteredStreamWriter protobuf_stream_writer_;
+
+  // The packet returned via NewTracePacket(). Its owned by this class,
+  // TracePacketHandle has just a pointer to it.
+  std::unique_ptr<protozero::RootMessage<protos::pbzero::TracePacket>>
+      cur_packet_;
+
+  // The start address of |cur_packet_| within |cur_chunk_|. Used to figure out
+  // fragments sizes when a TracePacket write is interrupted by GetNewBuffer().
+  uint8_t* cur_fragment_start_ = nullptr;
+
+  // true if we received a call to GetNewBuffer() after NewTracePacket(),
+  // false if GetNewBuffer() happened during NewTracePacket() prologue, while
+  // starting the TracePacket header.
+  bool fragmenting_packet_ = false;
+
+  // Set to |true| when the current chunk contains the maximum number of packets
+  // a chunk can contain. When this is |true|, the next packet requires starting
+  // a new chunk.
+  bool reached_max_packets_per_chunk_ = false;
+
+  // If we fail to acquire a new chunk when the arbiter operates in
+  // SharedMemory::BufferExhaustedPolicy::kDrop mode, the trace writer enters a
+  // mode in which data is written to a local garbage chunk and dropped.
+  bool drop_packets_ = false;
+
+  // Whether the trace writer should try to acquire a new chunk from the SMB
+  // when the next TracePacket is started because it filled the garbage chunk at
+  // least once since the last attempt.
+  bool retry_new_chunk_after_packet_ = false;
+
+  // Points to the size field of the last packet we wrote to the current chunk.
+  // If the chunk was already returned, this is reset to |nullptr|.
+  uint8_t* last_packet_size_field_ = nullptr;
+
+  // When a packet is fragmented across different chunks, the |size_field| of
+  // the outstanding nested protobuf messages is redirected onto Patch entries
+  // in this list at the time the Chunk is returned (because at that point we
+  // have to release the ownership of the current Chunk). This list will be
+  // later sent out-of-band to the tracing service, who will patch the required
+  // chunks, if they are still around.
+  PatchList patch_list_;
+
+  // PID of the process that created the trace writer. Used for a DCHECK that
+  // aims to detect unsupported process forks while tracing.
+  const base::PlatformProcessId process_id_;
+};
+
+}  // namespace perfetto
+
+#endif  // SRC_TRACING_CORE_TRACE_WRITER_IMPL_H_
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "src/tracing/core/shared_memory_arbiter_impl.h"
+
+#include <algorithm>
+#include <limits>
+#include <utility>
+
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
+// gen_amalgamated expanded: #include "perfetto/base/time.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/commit_data_request.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory.h"
+// gen_amalgamated expanded: #include "src/tracing/core/null_trace_writer.h"
+// gen_amalgamated expanded: #include "src/tracing/core/trace_writer_impl.h"
+
+namespace perfetto {
+
+using Chunk = SharedMemoryABI::Chunk;
+
+namespace {
+static_assert(sizeof(BufferID) == sizeof(uint16_t),
+              "The MaybeUnboundBufferID logic requires BufferID not to grow "
+              "above uint16_t.");
+
+MaybeUnboundBufferID MakeTargetBufferIdForReservation(uint16_t reservation_id) {
+  // Reservation IDs are stored in the upper bits.
+  PERFETTO_CHECK(reservation_id > 0);
+  return static_cast<MaybeUnboundBufferID>(reservation_id) << 16;
+}
+
+bool IsReservationTargetBufferId(MaybeUnboundBufferID buffer_id) {
+  return (buffer_id >> 16) > 0;
+}
+}  // namespace
+
+// static
+SharedMemoryABI::PageLayout SharedMemoryArbiterImpl::default_page_layout =
+    SharedMemoryABI::PageLayout::kPageDiv1;
+
+// static
+constexpr BufferID SharedMemoryArbiterImpl::kInvalidBufferId;
+
+// static
+std::unique_ptr<SharedMemoryArbiter> SharedMemoryArbiter::CreateInstance(
+    SharedMemory* shared_memory,
+    size_t page_size,
+    TracingService::ProducerEndpoint* producer_endpoint,
+    base::TaskRunner* task_runner) {
+  return std::unique_ptr<SharedMemoryArbiterImpl>(
+      new SharedMemoryArbiterImpl(shared_memory->start(), shared_memory->size(),
+                                  page_size, producer_endpoint, task_runner));
+}
+
+// static
+std::unique_ptr<SharedMemoryArbiter> SharedMemoryArbiter::CreateUnboundInstance(
+    SharedMemory* shared_memory,
+    size_t page_size) {
+  return std::unique_ptr<SharedMemoryArbiterImpl>(new SharedMemoryArbiterImpl(
+      shared_memory->start(), shared_memory->size(), page_size,
+      /*producer_endpoint=*/nullptr, /*task_runner=*/nullptr));
+}
+
+SharedMemoryArbiterImpl::SharedMemoryArbiterImpl(
+    void* start,
+    size_t size,
+    size_t page_size,
+    TracingService::ProducerEndpoint* producer_endpoint,
+    base::TaskRunner* task_runner)
+    : initially_bound_(task_runner && producer_endpoint),
+      producer_endpoint_(producer_endpoint),
+      task_runner_(task_runner),
+      shmem_abi_(reinterpret_cast<uint8_t*>(start), size, page_size),
+      active_writer_ids_(kMaxWriterID),
+      fully_bound_(initially_bound_),
+      weak_ptr_factory_(this) {}
+
+Chunk SharedMemoryArbiterImpl::GetNewChunk(
+    const SharedMemoryABI::ChunkHeader& header,
+    BufferExhaustedPolicy buffer_exhausted_policy,
+    size_t size_hint) {
+  PERFETTO_DCHECK(size_hint == 0);  // Not implemented yet.
+  // If initially unbound, we do not support stalling. In theory, we could
+  // support stalling for TraceWriters created after the arbiter and startup
+  // buffer reservations were bound, but to avoid raciness between the creation
+  // of startup writers and binding, we categorically forbid kStall mode.
+  PERFETTO_DCHECK(initially_bound_ ||
+                  buffer_exhausted_policy == BufferExhaustedPolicy::kDrop);
+
+  int stall_count = 0;
+  unsigned stall_interval_us = 0;
+  bool task_runner_runs_on_current_thread = false;
+  static const unsigned kMaxStallIntervalUs = 100000;
+  static const int kLogAfterNStalls = 3;
+  static const int kFlushCommitsAfterEveryNStalls = 2;
+  static const int kAssertAtNStalls = 100;
+
+  for (;;) {
+    // TODO(primiano): Probably this lock is not really required and this code
+    // could be rewritten leveraging only the Try* atomic operations in
+    // SharedMemoryABI. But let's not be too adventurous for the moment.
+    {
+      std::unique_lock<std::mutex> scoped_lock(lock_);
+
+      task_runner_runs_on_current_thread =
+          task_runner_ && task_runner_->RunsTasksOnCurrentThread();
+
+      // If more than half of the SMB.size() is filled with completed chunks for
+      // which we haven't notified the service yet (i.e. they are still enqueued
+      // in |commit_data_req_|), force a synchronous CommitDataRequest() even if
+      // we acquire a chunk, to reduce the likeliness of stalling the writer.
+      //
+      // We can only do this if we're writing on the same thread that we access
+      // the producer endpoint on, since we cannot notify the producer endpoint
+      // to commit synchronously on a different thread. Attempting to flush
+      // synchronously on another thread will lead to subtle bugs caused by
+      // out-of-order commit requests (crbug.com/919187#c28).
+      bool should_commit_synchronously =
+          task_runner_runs_on_current_thread &&
+          buffer_exhausted_policy == BufferExhaustedPolicy::kStall &&
+          commit_data_req_ && bytes_pending_commit_ >= shmem_abi_.size() / 2;
+
+      const size_t initial_page_idx = page_idx_;
+      for (size_t i = 0; i < shmem_abi_.num_pages(); i++) {
+        page_idx_ = (initial_page_idx + i) % shmem_abi_.num_pages();
+        bool is_new_page = false;
+
+        // TODO(primiano): make the page layout dynamic.
+        auto layout = SharedMemoryArbiterImpl::default_page_layout;
+
+        if (shmem_abi_.is_page_free(page_idx_)) {
+          // TODO(primiano): Use the |size_hint| here to decide the layout.
+          is_new_page = shmem_abi_.TryPartitionPage(page_idx_, layout);
+        }
+        uint32_t free_chunks;
+        if (is_new_page) {
+          free_chunks = (1 << SharedMemoryABI::kNumChunksForLayout[layout]) - 1;
+        } else {
+          free_chunks = shmem_abi_.GetFreeChunks(page_idx_);
+        }
+
+        for (uint32_t chunk_idx = 0; free_chunks;
+             chunk_idx++, free_chunks >>= 1) {
+          if (!(free_chunks & 1))
+            continue;
+          // We found a free chunk.
+          Chunk chunk = shmem_abi_.TryAcquireChunkForWriting(
+              page_idx_, chunk_idx, &header);
+          if (!chunk.is_valid())
+            continue;
+          if (stall_count > kLogAfterNStalls) {
+            PERFETTO_LOG("Recovered from stall after %d iterations",
+                         stall_count);
+          }
+
+          if (should_commit_synchronously) {
+            // We can't flush while holding the lock.
+            scoped_lock.unlock();
+            FlushPendingCommitDataRequests();
+            return chunk;
+          } else {
+            return chunk;
+          }
+        }
+      }
+    }  // scoped_lock
+
+    if (buffer_exhausted_policy == BufferExhaustedPolicy::kDrop) {
+      PERFETTO_DLOG("Shared memory buffer exhaused, returning invalid Chunk!");
+      return Chunk();
+    }
+
+    PERFETTO_DCHECK(initially_bound_);
+
+    // All chunks are taken (either kBeingWritten by us or kBeingRead by the
+    // Service).
+    if (stall_count++ == kLogAfterNStalls) {
+      PERFETTO_LOG("Shared memory buffer overrun! Stalling");
+    }
+
+    if (stall_count == kAssertAtNStalls) {
+      PERFETTO_FATAL(
+          "Shared memory buffer max stall count exceeded; possible deadlock");
+    }
+
+    // If the IPC thread itself is stalled because the current process has
+    // filled up the SMB, we need to make sure that the service can process and
+    // purge the chunks written by our process, by flushing any pending commit
+    // requests. Because other threads in our process can continue to
+    // concurrently grab, fill and commit any chunks purged by the service, it
+    // is possible that the SMB remains full and the IPC thread remains stalled,
+    // needing to flush the concurrently queued up commits again. This is
+    // particularly likely with in-process perfetto service where the IPC thread
+    // is the service thread. To avoid remaining stalled forever in such a
+    // situation, we attempt to flush periodically after every N stalls.
+    if (stall_count % kFlushCommitsAfterEveryNStalls == 0 &&
+        task_runner_runs_on_current_thread) {
+      // TODO(primiano): sending the IPC synchronously is a temporary workaround
+      // until the backpressure logic in probes_producer is sorted out. Until
+      // then the risk is that we stall the message loop waiting for the tracing
+      // service to consume the shared memory buffer (SMB) and, for this reason,
+      // never run the task that tells the service to purge the SMB. This must
+      // happen iff we are on the IPC thread, not doing this will cause
+      // deadlocks, doing this on the wrong thread causes out-of-order data
+      // commits (crbug.com/919187#c28).
+      FlushPendingCommitDataRequests();
+    } else {
+      base::SleepMicroseconds(stall_interval_us);
+      stall_interval_us =
+          std::min(kMaxStallIntervalUs, (stall_interval_us + 1) * 8);
+    }
+  }
+}
+
+void SharedMemoryArbiterImpl::ReturnCompletedChunk(
+    Chunk chunk,
+    MaybeUnboundBufferID target_buffer,
+    PatchList* patch_list) {
+  PERFETTO_DCHECK(chunk.is_valid());
+  const WriterID writer_id = chunk.writer_id();
+  UpdateCommitDataRequest(std::move(chunk), writer_id, target_buffer,
+                          patch_list);
+}
+
+void SharedMemoryArbiterImpl::SendPatches(WriterID writer_id,
+                                          MaybeUnboundBufferID target_buffer,
+                                          PatchList* patch_list) {
+  PERFETTO_DCHECK(!patch_list->empty() && patch_list->front().is_patched());
+  UpdateCommitDataRequest(Chunk(), writer_id, target_buffer, patch_list);
+}
+
+void SharedMemoryArbiterImpl::UpdateCommitDataRequest(
+    Chunk chunk,
+    WriterID writer_id,
+    MaybeUnboundBufferID target_buffer,
+    PatchList* patch_list) {
+  // Note: chunk will be invalid if the call came from SendPatches().
+  base::TaskRunner* task_runner_to_post_delayed_callback_on = nullptr;
+  // The delay with which the flush will be posted.
+  uint32_t flush_delay_ms = 0;
+  base::WeakPtr<SharedMemoryArbiterImpl> weak_this;
+  {
+    std::lock_guard<std::mutex> scoped_lock(lock_);
+
+    if (!commit_data_req_) {
+      commit_data_req_.reset(new CommitDataRequest());
+
+      // Flushing the commit is only supported while we're |fully_bound_|. If we
+      // aren't, we'll flush when |fully_bound_| is updated.
+      if (fully_bound_ && !delayed_flush_scheduled_) {
+        weak_this = weak_ptr_factory_.GetWeakPtr();
+        task_runner_to_post_delayed_callback_on = task_runner_;
+        flush_delay_ms = batch_commits_duration_ms_;
+        delayed_flush_scheduled_ = true;
+      }
+    }
+
+    // If a valid chunk is specified, return it and attach it to the request.
+    if (chunk.is_valid()) {
+      PERFETTO_DCHECK(chunk.writer_id() == writer_id);
+      uint8_t chunk_idx = chunk.chunk_idx();
+      bytes_pending_commit_ += chunk.size();
+      size_t page_idx;
+      // If the chunk needs patching, it should not be marked as complete yet,
+      // because this would indicate to the service that the producer will not
+      // be writing to it anymore, while the producer might still apply patches
+      // to the chunk later on. In particular, when re-reading (e.g. because of
+      // periodic scraping) a completed chunk, the service expects the flags of
+      // that chunk not to be removed between reads. So, let's say the producer
+      // marked the chunk as complete here and the service then read it for the
+      // first time. If the producer then fully patched the chunk, thus removing
+      // the kChunkNeedsPatching flag, and the service re-read the chunk after
+      // the patching, the service would be thrown off by the removed flag.
+      if (direct_patching_enabled_ &&
+          (chunk.GetPacketCountAndFlags().second &
+           SharedMemoryABI::ChunkHeader::kChunkNeedsPatching)) {
+        page_idx = shmem_abi_.GetPageAndChunkIndex(std::move(chunk)).first;
+      } else {
+        // If the chunk doesn't need patching, we can mark it as complete
+        // immediately. This allows the service to read it in full while
+        // scraping, which would not be the case if the chunk was left in a
+        // kChunkBeingWritten state.
+        page_idx = shmem_abi_.ReleaseChunkAsComplete(std::move(chunk));
+      }
+
+      // DO NOT access |chunk| after this point, it has been std::move()-d
+      // above.
+      CommitDataRequest::ChunksToMove* ctm =
+          commit_data_req_->add_chunks_to_move();
+      ctm->set_page(static_cast<uint32_t>(page_idx));
+      ctm->set_chunk(chunk_idx);
+      ctm->set_target_buffer(target_buffer);
+    }
+
+    // Process the completed patches for previous chunks from the |patch_list|.
+    CommitDataRequest::ChunkToPatch* last_patch_req = nullptr;
+    while (!patch_list->empty() && patch_list->front().is_patched()) {
+      Patch curr_patch = patch_list->front();
+      patch_list->pop_front();
+      // Patches for the same chunk are contiguous in the |patch_list|. So, to
+      // determine if there are any other patches that apply to the chunk that
+      // is being patched, check if the next patch in the |patch_list| applies
+      // to the same chunk.
+      bool chunk_needs_more_patching =
+          !patch_list->empty() &&
+          patch_list->front().chunk_id == curr_patch.chunk_id;
+
+      if (direct_patching_enabled_ &&
+          TryDirectPatchLocked(writer_id, curr_patch,
+                               chunk_needs_more_patching)) {
+        continue;
+      }
+
+      // The chunk that this patch applies to has already been released to the
+      // service, so it cannot be patches here. Add the patch to the commit data
+      // request, so that it can be sent to the service and applied there.
+      if (!last_patch_req ||
+          last_patch_req->chunk_id() != curr_patch.chunk_id) {
+        last_patch_req = commit_data_req_->add_chunks_to_patch();
+        last_patch_req->set_writer_id(writer_id);
+        last_patch_req->set_chunk_id(curr_patch.chunk_id);
+        last_patch_req->set_target_buffer(target_buffer);
+      }
+      auto* patch = last_patch_req->add_patches();
+      patch->set_offset(curr_patch.offset);
+      patch->set_data(&curr_patch.size_field[0], curr_patch.size_field.size());
+    }
+
+    // Patches are enqueued in the |patch_list| in order and are notified to
+    // the service when the chunk is returned. The only case when the current
+    // patch list is incomplete is if there is an unpatched entry at the head of
+    // the |patch_list| that belongs to the same ChunkID as the last one we are
+    // about to send to the service.
+    if (last_patch_req && !patch_list->empty() &&
+        patch_list->front().chunk_id == last_patch_req->chunk_id()) {
+      last_patch_req->set_has_more_patches(true);
+    }
+
+    // If the buffer is filling up or if we are given a patch for a chunk
+    // that was already sent to the service, we don't want to wait for the next
+    // delayed flush to happen and we flush immediately. Otherwise, if we
+    // accumulate the patch and a crash occurs before the patch is sent, the
+    // service will not know of the patch and won't be able to reconstruct the
+    // trace.
+    if (fully_bound_ &&
+        (last_patch_req || bytes_pending_commit_ >= shmem_abi_.size() / 2)) {
+      weak_this = weak_ptr_factory_.GetWeakPtr();
+      task_runner_to_post_delayed_callback_on = task_runner_;
+      flush_delay_ms = 0;
+    }
+  }  // scoped_lock(lock_)
+
+  // We shouldn't post tasks while locked.
+  // |task_runner_to_post_delayed_callback_on| remains valid after unlocking,
+  // because |task_runner_| is never reset.
+  if (task_runner_to_post_delayed_callback_on) {
+    task_runner_to_post_delayed_callback_on->PostDelayedTask(
+        [weak_this] {
+          if (!weak_this)
+            return;
+          {
+            std::lock_guard<std::mutex> scoped_lock(weak_this->lock_);
+            // Clear |delayed_flush_scheduled_|, allowing the next call to
+            // UpdateCommitDataRequest to start another batching period.
+            weak_this->delayed_flush_scheduled_ = false;
+          }
+          weak_this->FlushPendingCommitDataRequests();
+        },
+        flush_delay_ms);
+  }
+}
+
+bool SharedMemoryArbiterImpl::TryDirectPatchLocked(
+    WriterID writer_id,
+    const Patch& patch,
+    bool chunk_needs_more_patching) {
+  // Search the chunks that are being batched in |commit_data_req_| for a chunk
+  // that needs patching and that matches the provided |writer_id| and
+  // |patch.chunk_id|. Iterate |commit_data_req_| in reverse, since
+  // |commit_data_req_| is appended to at the end with newly-returned chunks,
+  // and patches are more likely to apply to chunks that have been returned
+  // recently.
+  SharedMemoryABI::Chunk chunk;
+  bool chunk_found = false;
+  auto& chunks_to_move = commit_data_req_->chunks_to_move();
+  for (auto ctm_it = chunks_to_move.rbegin(); ctm_it != chunks_to_move.rend();
+       ++ctm_it) {
+    uint32_t layout = shmem_abi_.GetPageLayout(ctm_it->page());
+    auto chunk_state =
+        shmem_abi_.GetChunkStateFromLayout(layout, ctm_it->chunk());
+    // Note: the subset of |commit_data_req_| chunks that still need patching is
+    // also the subset of chunks that are still being written to. The rest of
+    // the chunks in |commit_data_req_| do not need patching and have already
+    // been marked as complete.
+    if (chunk_state != SharedMemoryABI::kChunkBeingWritten)
+      continue;
+
+    chunk =
+        shmem_abi_.GetChunkUnchecked(ctm_it->page(), layout, ctm_it->chunk());
+    if (chunk.writer_id() == writer_id &&
+        chunk.header()->chunk_id.load(std::memory_order_relaxed) ==
+            patch.chunk_id) {
+      chunk_found = true;
+      break;
+    }
+  }
+
+  if (!chunk_found) {
+    // The chunk has already been committed to the service and the patch cannot
+    // be applied in the producer.
+    return false;
+  }
+
+  // Apply the patch.
+  size_t page_idx;
+  uint8_t chunk_idx;
+  std::tie(page_idx, chunk_idx) = shmem_abi_.GetPageAndChunkIndex(chunk);
+  PERFETTO_DCHECK(shmem_abi_.GetChunkState(page_idx, chunk_idx) ==
+                  SharedMemoryABI::ChunkState::kChunkBeingWritten);
+  auto chunk_begin = chunk.payload_begin();
+  uint8_t* ptr = chunk_begin + patch.offset;
+  PERFETTO_CHECK(ptr <= chunk.end() - SharedMemoryABI::kPacketHeaderSize);
+  // DCHECK that we are writing into a zero-filled size field and not into
+  // valid data. It relies on ScatteredStreamWriter::ReserveBytes() to
+  // zero-fill reservations in debug builds.
+  const char zero[SharedMemoryABI::kPacketHeaderSize]{};
+  PERFETTO_DCHECK(memcmp(ptr, &zero, SharedMemoryABI::kPacketHeaderSize) == 0);
+
+  memcpy(ptr, &patch.size_field[0], SharedMemoryABI::kPacketHeaderSize);
+
+  if (!chunk_needs_more_patching) {
+    // Mark that the chunk doesn't need more patching and mark it as complete,
+    // as the producer will not write to it anymore. This allows the service to
+    // read the chunk in full while scraping, which would not be the case if the
+    // chunk was left in a kChunkBeingWritten state.
+    chunk.ClearNeedsPatchingFlag();
+    shmem_abi_.ReleaseChunkAsComplete(std::move(chunk));
+  }
+
+  return true;
+}
+
+void SharedMemoryArbiterImpl::SetBatchCommitsDuration(
+    uint32_t batch_commits_duration_ms) {
+  std::lock_guard<std::mutex> scoped_lock(lock_);
+  batch_commits_duration_ms_ = batch_commits_duration_ms;
+}
+
+bool SharedMemoryArbiterImpl::EnableDirectSMBPatching() {
+  std::lock_guard<std::mutex> scoped_lock(lock_);
+  if (!direct_patching_supported_by_service_) {
+    return false;
+  }
+
+  return direct_patching_enabled_ = true;
+}
+
+void SharedMemoryArbiterImpl::SetDirectSMBPatchingSupportedByService() {
+  std::lock_guard<std::mutex> scoped_lock(lock_);
+  direct_patching_supported_by_service_ = true;
+}
+
+// This function is quite subtle. When making changes keep in mind these two
+// challenges:
+// 1) If the producer stalls and we happen to be on the |task_runner_| IPC
+//    thread (or, for in-process cases, on the same thread where
+//    TracingServiceImpl lives), the CommitData() call must be synchronous and
+//    not posted, to avoid deadlocks.
+// 2) When different threads hit this function, we must guarantee that we don't
+//    accidentally make commits out of order. See commit 4e4fe8f56ef and
+//    crbug.com/919187 for more context.
+void SharedMemoryArbiterImpl::FlushPendingCommitDataRequests(
+    std::function<void()> callback) {
+  std::unique_ptr<CommitDataRequest> req;
+  {
+    std::unique_lock<std::mutex> scoped_lock(lock_);
+
+    // Flushing is only supported while |fully_bound_|, and there may still be
+    // unbound startup trace writers. If so, skip the commit for now - it'll be
+    // done when |fully_bound_| is updated.
+    if (!fully_bound_) {
+      if (callback)
+        pending_flush_callbacks_.push_back(callback);
+      return;
+    }
+
+    // May be called by TraceWriterImpl on any thread.
+    base::TaskRunner* task_runner = task_runner_;
+    if (!task_runner->RunsTasksOnCurrentThread()) {
+      // We shouldn't post a task while holding a lock. |task_runner| remains
+      // valid after unlocking, because |task_runner_| is never reset.
+      scoped_lock.unlock();
+
+      auto weak_this = weak_ptr_factory_.GetWeakPtr();
+      task_runner->PostTask([weak_this, callback] {
+        if (weak_this)
+          weak_this->FlushPendingCommitDataRequests(std::move(callback));
+      });
+      return;
+    }
+
+    // |commit_data_req_| could have become a nullptr, for example when a forced
+    // sync flush happens in GetNewChunk().
+    if (commit_data_req_) {
+      // Make sure any placeholder buffer IDs from StartupWriters are replaced
+      // before sending the request.
+      bool all_placeholders_replaced =
+          ReplaceCommitPlaceholderBufferIdsLocked();
+      // We're |fully_bound_|, thus all writers are bound and all placeholders
+      // should have been replaced.
+      PERFETTO_DCHECK(all_placeholders_replaced);
+
+      // In order to allow patching in the producer we delay the kChunkComplete
+      // transition and keep batched chunks in the kChunkBeingWritten state.
+      // Since we are about to notify the service of all batched chunks, it will
+      // not be possible to apply any more patches to them and we need to move
+      // them to kChunkComplete - otherwise the service won't look at them.
+      for (auto& ctm : commit_data_req_->chunks_to_move()) {
+        uint32_t layout = shmem_abi_.GetPageLayout(ctm.page());
+        auto chunk_state =
+            shmem_abi_.GetChunkStateFromLayout(layout, ctm.chunk());
+        // Note: the subset of |commit_data_req_| chunks that still need
+        // patching is also the subset of chunks that are still being written
+        // to. The rest of the chunks in |commit_data_req_| do not need patching
+        // and have already been marked as complete.
+        if (chunk_state != SharedMemoryABI::kChunkBeingWritten)
+          continue;
+
+        SharedMemoryABI::Chunk chunk =
+            shmem_abi_.GetChunkUnchecked(ctm.page(), layout, ctm.chunk());
+        shmem_abi_.ReleaseChunkAsComplete(std::move(chunk));
+      }
+
+      req = std::move(commit_data_req_);
+      bytes_pending_commit_ = 0;
+    }
+  }  // scoped_lock
+
+  if (req) {
+    producer_endpoint_->CommitData(*req, callback);
+  } else if (callback) {
+    // If |req| was nullptr, it means that an enqueued deferred commit was
+    // executed just before this. At this point send an empty commit request
+    // to the service, just to linearize with it and give the guarantee to the
+    // caller that the data has been flushed into the service.
+    producer_endpoint_->CommitData(CommitDataRequest(), std::move(callback));
+  }
+}
+
+bool SharedMemoryArbiterImpl::TryShutdown() {
+  std::lock_guard<std::mutex> scoped_lock(lock_);
+  did_shutdown_ = true;
+  // Shutdown is safe if there are no active trace writers for this arbiter.
+  return active_writer_ids_.IsEmpty();
+}
+
+std::unique_ptr<TraceWriter> SharedMemoryArbiterImpl::CreateTraceWriter(
+    BufferID target_buffer,
+    BufferExhaustedPolicy buffer_exhausted_policy) {
+  PERFETTO_CHECK(target_buffer > 0);
+  return CreateTraceWriterInternal(target_buffer, buffer_exhausted_policy);
+}
+
+std::unique_ptr<TraceWriter> SharedMemoryArbiterImpl::CreateStartupTraceWriter(
+    uint16_t target_buffer_reservation_id) {
+  PERFETTO_CHECK(!initially_bound_);
+  return CreateTraceWriterInternal(
+      MakeTargetBufferIdForReservation(target_buffer_reservation_id),
+      BufferExhaustedPolicy::kDrop);
+}
+
+void SharedMemoryArbiterImpl::BindToProducerEndpoint(
+    TracingService::ProducerEndpoint* producer_endpoint,
+    base::TaskRunner* task_runner) {
+  PERFETTO_DCHECK(producer_endpoint && task_runner);
+  PERFETTO_DCHECK(task_runner->RunsTasksOnCurrentThread());
+  PERFETTO_CHECK(!initially_bound_);
+
+  bool should_flush = false;
+  std::function<void()> flush_callback;
+  {
+    std::lock_guard<std::mutex> scoped_lock(lock_);
+    PERFETTO_CHECK(!fully_bound_);
+    PERFETTO_CHECK(!producer_endpoint_ && !task_runner_);
+
+    producer_endpoint_ = producer_endpoint;
+    task_runner_ = task_runner;
+
+    // Now that we're bound to a task runner, also reset the WeakPtrFactory to
+    // it. Because this code runs on the task runner, the factory's weak
+    // pointers will be valid on it.
+    weak_ptr_factory_.Reset(this);
+
+    // All writers registered so far should be startup trace writers, since
+    // the producer cannot feasibly know the target buffer for any future
+    // session yet.
+    for (const auto& entry : pending_writers_) {
+      PERFETTO_CHECK(IsReservationTargetBufferId(entry.second));
+    }
+
+    // If all buffer reservations are bound, we can flush pending commits.
+    if (UpdateFullyBoundLocked()) {
+      should_flush = true;
+      flush_callback = TakePendingFlushCallbacksLocked();
+    }
+  }  // scoped_lock
+
+  // Attempt to flush any pending commits (and run pending flush callbacks). If
+  // there are none, this will have no effect. If we ended up in a race that
+  // changed |fully_bound_| back to false, the commit will happen once we become
+  // |fully_bound_| again.
+  if (should_flush)
+    FlushPendingCommitDataRequests(flush_callback);
+}
+
+void SharedMemoryArbiterImpl::BindStartupTargetBuffer(
+    uint16_t target_buffer_reservation_id,
+    BufferID target_buffer_id) {
+  PERFETTO_DCHECK(target_buffer_id > 0);
+  PERFETTO_CHECK(!initially_bound_);
+
+  std::unique_lock<std::mutex> scoped_lock(lock_);
+
+  // We should already be bound to an endpoint, but not fully bound.
+  PERFETTO_CHECK(!fully_bound_);
+  PERFETTO_CHECK(producer_endpoint_);
+  PERFETTO_CHECK(task_runner_);
+  PERFETTO_CHECK(task_runner_->RunsTasksOnCurrentThread());
+
+  BindStartupTargetBufferImpl(std::move(scoped_lock),
+                              target_buffer_reservation_id, target_buffer_id);
+}
+
+void SharedMemoryArbiterImpl::AbortStartupTracingForReservation(
+    uint16_t target_buffer_reservation_id) {
+  PERFETTO_CHECK(!initially_bound_);
+
+  std::unique_lock<std::mutex> scoped_lock(lock_);
+
+  // If we are already bound to an arbiter, we may need to flush after aborting
+  // the session, and thus should be running on the arbiter's task runner.
+  if (task_runner_ && !task_runner_->RunsTasksOnCurrentThread()) {
+    // We shouldn't post tasks while locked.
+    auto* task_runner = task_runner_;
+    scoped_lock.unlock();
+
+    auto weak_this = weak_ptr_factory_.GetWeakPtr();
+    task_runner->PostTask([weak_this, target_buffer_reservation_id]() {
+      if (!weak_this)
+        return;
+      weak_this->AbortStartupTracingForReservation(
+          target_buffer_reservation_id);
+    });
+    return;
+  }
+
+  PERFETTO_CHECK(!fully_bound_);
+
+  // Bind the target buffer reservation to an invalid buffer (ID 0), so that
+  // existing commits, as well as future commits (of currently acquired chunks),
+  // will be released as free free by the service but otherwise ignored (i.e.
+  // not copied into any valid target buffer).
+  BindStartupTargetBufferImpl(std::move(scoped_lock),
+                              target_buffer_reservation_id,
+                              /*target_buffer_id=*/kInvalidBufferId);
+}
+
+void SharedMemoryArbiterImpl::BindStartupTargetBufferImpl(
+    std::unique_lock<std::mutex> scoped_lock,
+    uint16_t target_buffer_reservation_id,
+    BufferID target_buffer_id) {
+  // We should already be bound to an endpoint if the target buffer is valid.
+  PERFETTO_DCHECK((producer_endpoint_ && task_runner_) ||
+                  target_buffer_id == kInvalidBufferId);
+
+  MaybeUnboundBufferID reserved_id =
+      MakeTargetBufferIdForReservation(target_buffer_reservation_id);
+
+  bool should_flush = false;
+  std::function<void()> flush_callback;
+  std::vector<std::pair<WriterID, BufferID>> writers_to_register;
+
+  TargetBufferReservation& reservation =
+      target_buffer_reservations_[reserved_id];
+  PERFETTO_CHECK(!reservation.resolved);
+  reservation.resolved = true;
+  reservation.target_buffer = target_buffer_id;
+
+  // Collect trace writers associated with the reservation.
+  for (auto it = pending_writers_.begin(); it != pending_writers_.end();) {
+    if (it->second == reserved_id) {
+      // No need to register writers that have an invalid target buffer.
+      if (target_buffer_id != kInvalidBufferId) {
+        writers_to_register.push_back(
+            std::make_pair(it->first, target_buffer_id));
+      }
+      it = pending_writers_.erase(it);
+    } else {
+      it++;
+    }
+  }
+
+  // If all buffer reservations are bound, we can flush pending commits.
+  if (UpdateFullyBoundLocked()) {
+    should_flush = true;
+    flush_callback = TakePendingFlushCallbacksLocked();
+  }
+
+  scoped_lock.unlock();
+
+  // Register any newly bound trace writers with the service.
+  for (const auto& writer_and_target_buffer : writers_to_register) {
+    producer_endpoint_->RegisterTraceWriter(writer_and_target_buffer.first,
+                                            writer_and_target_buffer.second);
+  }
+
+  // Attempt to flush any pending commits (and run pending flush callbacks). If
+  // there are none, this will have no effect. If we ended up in a race that
+  // changed |fully_bound_| back to false, the commit will happen once we become
+  // |fully_bound_| again.
+  if (should_flush)
+    FlushPendingCommitDataRequests(flush_callback);
+}
+
+std::function<void()>
+SharedMemoryArbiterImpl::TakePendingFlushCallbacksLocked() {
+  if (pending_flush_callbacks_.empty())
+    return std::function<void()>();
+
+  std::vector<std::function<void()>> pending_flush_callbacks;
+  pending_flush_callbacks.swap(pending_flush_callbacks_);
+  // Capture the callback list into the lambda by copy.
+  return [pending_flush_callbacks]() {
+    for (auto& callback : pending_flush_callbacks)
+      callback();
+  };
+}
+
+void SharedMemoryArbiterImpl::NotifyFlushComplete(FlushRequestID req_id) {
+  base::TaskRunner* task_runner_to_commit_on = nullptr;
+
+  {
+    std::lock_guard<std::mutex> scoped_lock(lock_);
+    // If a commit_data_req_ exists it means that somebody else already posted a
+    // FlushPendingCommitDataRequests() task.
+    if (!commit_data_req_) {
+      commit_data_req_.reset(new CommitDataRequest());
+
+      // Flushing the commit is only supported while we're |fully_bound_|. If we
+      // aren't, we'll flush when |fully_bound_| is updated.
+      if (fully_bound_)
+        task_runner_to_commit_on = task_runner_;
+    } else {
+      // If there is another request queued and that also contains is a reply
+      // to a flush request, reply with the highest id.
+      req_id = std::max(req_id, commit_data_req_->flush_request_id());
+    }
+    commit_data_req_->set_flush_request_id(req_id);
+  }  // scoped_lock
+
+  // We shouldn't post tasks while locked. |task_runner_to_commit_on|
+  // remains valid after unlocking, because |task_runner_| is never reset.
+  if (task_runner_to_commit_on) {
+    auto weak_this = weak_ptr_factory_.GetWeakPtr();
+    task_runner_to_commit_on->PostTask([weak_this] {
+      if (weak_this)
+        weak_this->FlushPendingCommitDataRequests();
+    });
+  }
+}
+
+std::unique_ptr<TraceWriter> SharedMemoryArbiterImpl::CreateTraceWriterInternal(
+    MaybeUnboundBufferID target_buffer,
+    BufferExhaustedPolicy buffer_exhausted_policy) {
+  WriterID id;
+  base::TaskRunner* task_runner_to_register_on = nullptr;
+
+  {
+    std::lock_guard<std::mutex> scoped_lock(lock_);
+    if (did_shutdown_)
+      return std::unique_ptr<TraceWriter>(new NullTraceWriter());
+
+    id = active_writer_ids_.Allocate();
+    if (!id)
+      return std::unique_ptr<TraceWriter>(new NullTraceWriter());
+
+    PERFETTO_DCHECK(!pending_writers_.count(id));
+
+    if (IsReservationTargetBufferId(target_buffer)) {
+      // If the reservation is new, mark it as unbound in
+      // |target_buffer_reservations_|. Otherwise, if the reservation was
+      // already bound, choose the bound buffer ID now.
+      auto it_and_inserted = target_buffer_reservations_.insert(
+          {target_buffer, TargetBufferReservation()});
+      if (it_and_inserted.first->second.resolved)
+        target_buffer = it_and_inserted.first->second.target_buffer;
+    }
+
+    if (IsReservationTargetBufferId(target_buffer)) {
+      // The arbiter and/or startup buffer reservations are not bound yet, so
+      // buffer the registration of the writer until after we're bound.
+      pending_writers_[id] = target_buffer;
+
+      // Mark the arbiter as not fully bound, since we now have at least one
+      // unbound trace writer / target buffer reservation.
+      fully_bound_ = false;
+    } else if (target_buffer != kInvalidBufferId) {
+      // Trace writer is bound, so arbiter should be bound to an endpoint, too.
+      PERFETTO_CHECK(producer_endpoint_ && task_runner_);
+      task_runner_to_register_on = task_runner_;
+    }
+  }  // scoped_lock
+
+  // We shouldn't post tasks while locked. |task_runner_to_register_on|
+  // remains valid after unlocking, because |task_runner_| is never reset.
+  if (task_runner_to_register_on) {
+    auto weak_this = weak_ptr_factory_.GetWeakPtr();
+    task_runner_to_register_on->PostTask([weak_this, id, target_buffer] {
+      if (weak_this)
+        weak_this->producer_endpoint_->RegisterTraceWriter(id, target_buffer);
+    });
+  }
+
+  return std::unique_ptr<TraceWriter>(
+      new TraceWriterImpl(this, id, target_buffer, buffer_exhausted_policy));
+}
+
+void SharedMemoryArbiterImpl::ReleaseWriterID(WriterID id) {
+  base::TaskRunner* task_runner = nullptr;
+  {
+    std::lock_guard<std::mutex> scoped_lock(lock_);
+    active_writer_ids_.Free(id);
+
+    auto it = pending_writers_.find(id);
+    if (it != pending_writers_.end()) {
+      // Writer hasn't been bound yet and thus also not yet registered with the
+      // service.
+      pending_writers_.erase(it);
+      return;
+    }
+
+    // A trace writer from an aborted session may be destroyed before the
+    // arbiter is bound to a task runner. In that case, it was never registered
+    // with the service.
+    if (!task_runner_)
+      return;
+
+    task_runner = task_runner_;
+  }  // scoped_lock
+
+  // We shouldn't post tasks while locked. |task_runner| remains valid after
+  // unlocking, because |task_runner_| is never reset.
+  auto weak_this = weak_ptr_factory_.GetWeakPtr();
+  task_runner->PostTask([weak_this, id] {
+    if (weak_this)
+      weak_this->producer_endpoint_->UnregisterTraceWriter(id);
+  });
+}
+
+bool SharedMemoryArbiterImpl::ReplaceCommitPlaceholderBufferIdsLocked() {
+  if (!commit_data_req_)
+    return true;
+
+  bool all_placeholders_replaced = true;
+  for (auto& chunk : *commit_data_req_->mutable_chunks_to_move()) {
+    if (!IsReservationTargetBufferId(chunk.target_buffer()))
+      continue;
+    const auto it = target_buffer_reservations_.find(chunk.target_buffer());
+    PERFETTO_DCHECK(it != target_buffer_reservations_.end());
+    if (!it->second.resolved) {
+      all_placeholders_replaced = false;
+      continue;
+    }
+    chunk.set_target_buffer(it->second.target_buffer);
+  }
+  for (auto& chunk : *commit_data_req_->mutable_chunks_to_patch()) {
+    if (!IsReservationTargetBufferId(chunk.target_buffer()))
+      continue;
+    const auto it = target_buffer_reservations_.find(chunk.target_buffer());
+    PERFETTO_DCHECK(it != target_buffer_reservations_.end());
+    if (!it->second.resolved) {
+      all_placeholders_replaced = false;
+      continue;
+    }
+    chunk.set_target_buffer(it->second.target_buffer);
+  }
+  return all_placeholders_replaced;
+}
+
+bool SharedMemoryArbiterImpl::UpdateFullyBoundLocked() {
+  if (!producer_endpoint_) {
+    PERFETTO_DCHECK(!fully_bound_);
+    return false;
+  }
+  // We're fully bound if all target buffer reservations have a valid associated
+  // BufferID.
+  fully_bound_ = std::none_of(
+      target_buffer_reservations_.begin(), target_buffer_reservations_.end(),
+      [](std::pair<MaybeUnboundBufferID, TargetBufferReservation> entry) {
+        return !entry.second.resolved;
+      });
+  return fully_bound_;
+}
+
+}  // namespace perfetto
+// gen_amalgamated begin source: src/tracing/core/trace_packet.cc
+// gen_amalgamated begin header: include/perfetto/ext/tracing/core/trace_packet.h
+// gen_amalgamated begin header: include/perfetto/ext/tracing/core/slice.h
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_TRACING_CORE_SLICE_H_
+#define INCLUDE_PERFETTO_EXT_TRACING_CORE_SLICE_H_
+
+#include <stddef.h>
+#include <string.h>
+
+#include <memory>
+#include <string>
+#include <vector>
+
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+
+namespace perfetto {
+
+// A simple wrapper around a virtually contiguous memory range that contains a
+// TracePacket, or just a portion of it.
+struct Slice {
+  Slice() : start(nullptr), size(0) {}
+  Slice(const void* st, size_t sz) : start(st), size(sz) {}
+  Slice(Slice&& other) noexcept = default;
+
+  // Create a Slice which owns |size| bytes of memory.
+  static Slice Allocate(size_t size) {
+    Slice slice;
+    slice.own_data_.reset(new uint8_t[size]);
+    slice.start = &slice.own_data_[0];
+    slice.size = size;
+    return slice;
+  }
+
+  static Slice TakeOwnership(std::unique_ptr<uint8_t[]> buf, size_t size) {
+    Slice slice;
+    slice.own_data_ = std::move(buf);
+    slice.start = &slice.own_data_[0];
+    slice.size = size;
+    return slice;
+  }
+
+  uint8_t* own_data() {
+    PERFETTO_DCHECK(own_data_);
+    return own_data_.get();
+  }
+
+  const void* start;
+  size_t size;
+
+ private:
+  Slice(const Slice&) = delete;
+  void operator=(const Slice&) = delete;
+
+  std::unique_ptr<uint8_t[]> own_data_;
+};
+
+// TODO(primiano): most TracePacket(s) fit in a slice or two. We need something
+// a bit more clever here that has inline capacity for 2 slices and then uses a
+// std::forward_list or a std::vector for the less likely cases.
+using Slices = std::vector<Slice>;
+
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_TRACING_CORE_SLICE_H_
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_TRACING_CORE_TRACE_PACKET_H_
+#define INCLUDE_PERFETTO_EXT_TRACING_CORE_TRACE_PACKET_H_
+
+#include <stddef.h>
+#include <memory>
+#include <tuple>
+
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/slice.h"
+
+namespace perfetto {
+
+// A wrapper around a byte buffer that contains a protobuf-encoded TracePacket
+// (see trace_packet.proto). The TracePacket is decoded only if the Consumer
+// requests that. This is to allow Consumer(s) to just stream the packet over
+// the network or save it to a file without wasting time decoding it and without
+// needing to depend on libprotobuf or the trace_packet.pb.h header.
+// If the packets are saved / streamed and not just consumed locally, consumers
+// should ensure to preserve the unknown fields in the proto. A consumer, in
+// fact, might have an older version .proto which is newer on the producer.
+class PERFETTO_EXPORT TracePacket {
+ public:
+  using const_iterator = Slices::const_iterator;
+
+  // The field id of protos::Trace::packet, static_assert()-ed in the unittest.
+  static constexpr uint32_t kPacketFieldNumber = 1;
+
+  // Maximum size of the preamble returned by GetProtoPreamble().
+  static constexpr size_t kMaxPreambleBytes = 8;
+
+  TracePacket();
+  ~TracePacket();
+  TracePacket(TracePacket&&) noexcept;
+  TracePacket& operator=(TracePacket&&);
+
+  // Accesses all the raw slices in the packet, for saving them to file/network.
+  const Slices& slices() const { return slices_; }
+
+  // Mutator, used only by the service and tests.
+  void AddSlice(Slice);
+
+  // Does not copy / take ownership of the memory of the slice. The TracePacket
+  // will be valid only as long as the original buffer is valid.
+  void AddSlice(const void* start, size_t size);
+
+  // Total size of all slices.
+  size_t size() const { return size_; }
+
+  // Generates a protobuf preamble suitable to represent this packet as a
+  // repeated field within a root trace.proto message.
+  // Returns a pointer to a buffer, owned by this class, containing the preamble
+  // and its size.
+  std::tuple<char*, size_t> GetProtoPreamble();
+
+  // Returns the raw protobuf bytes of the slices, all stitched together into
+  // a string. Only for testing.
+  std::string GetRawBytesForTesting();
+
+ private:
+  TracePacket(const TracePacket&) = delete;
+  TracePacket& operator=(const TracePacket&) = delete;
+
+  Slices slices_;     // Not owned.
+  size_t size_ = 0;   // SUM(slice.size for slice in slices_).
+  char preamble_[kMaxPreambleBytes];  // Deliberately not initialized.
+
+  // Remember to update the move operators and their unittest if adding new
+  // fields. ConsumerIPCClientImpl::OnReadBuffersResponse() relies on
+  // std::move(TracePacket) to clear up the moved-from instance.
+};
+
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_TRACING_CORE_TRACE_PACKET_H_
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_packet.h"
+
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+
+TracePacket::TracePacket() = default;
+TracePacket::~TracePacket() = default;
+
+TracePacket::TracePacket(TracePacket&& other) noexcept {
+  *this = std::move(other);
+}
+
+TracePacket& TracePacket::operator=(TracePacket&& other) {
+  slices_ = std::move(other.slices_);
+  other.slices_.clear();
+  size_ = other.size_;
+  other.size_ = 0;
+  return *this;
+}
+
+void TracePacket::AddSlice(Slice slice) {
+  size_ += slice.size;
+  slices_.push_back(std::move(slice));
+}
+
+void TracePacket::AddSlice(const void* start, size_t size) {
+  size_ += size;
+  slices_.emplace_back(start, size);
+}
+
+std::tuple<char*, size_t> TracePacket::GetProtoPreamble() {
+  using protozero::proto_utils::MakeTagLengthDelimited;
+  using protozero::proto_utils::WriteVarInt;
+  uint8_t* ptr = reinterpret_cast<uint8_t*>(&preamble_[0]);
+
+  constexpr uint8_t tag = MakeTagLengthDelimited(kPacketFieldNumber);
+  static_assert(tag < 0x80, "TracePacket tag should fit in one byte");
+  *(ptr++) = tag;
+
+  ptr = WriteVarInt(size(), ptr);
+  size_t preamble_size = reinterpret_cast<uintptr_t>(ptr) -
+                         reinterpret_cast<uintptr_t>(&preamble_[0]);
+  PERFETTO_DCHECK(preamble_size <= sizeof(preamble_));
+  return std::make_tuple(&preamble_[0], preamble_size);
+}
+
+std::string TracePacket::GetRawBytesForTesting() {
+  std::string data;
+  data.resize(size());
+  size_t pos = 0;
+  for (const Slice& slice : slices()) {
+    PERFETTO_CHECK(pos + slice.size <= data.size());
+    memcpy(&data[pos], slice.start, slice.size);
+    pos += slice.size;
+  }
+  return data;
+}
+
+}  // namespace perfetto
+// gen_amalgamated begin source: src/tracing/core/trace_writer_impl.cc
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "src/tracing/core/trace_writer_impl.h"
+
+#include <string.h>
+
+#include <algorithm>
+#include <type_traits>
+#include <utility>
+
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/thread_annotations.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/root_message.h"
+// gen_amalgamated expanded: #include "src/tracing/core/shared_memory_arbiter_impl.h"
+
+// gen_amalgamated expanded: #include "protos/perfetto/trace/trace_packet.pbzero.h"
+
+using protozero::proto_utils::kMessageLengthFieldSize;
+using protozero::proto_utils::WriteRedundantVarInt;
+using ChunkHeader = perfetto::SharedMemoryABI::ChunkHeader;
+
+namespace perfetto {
+
+namespace {
+constexpr size_t kPacketHeaderSize = SharedMemoryABI::kPacketHeaderSize;
+uint8_t g_garbage_chunk[1024];
+}  // namespace
+
+TraceWriterImpl::TraceWriterImpl(SharedMemoryArbiterImpl* shmem_arbiter,
+                                 WriterID id,
+                                 MaybeUnboundBufferID target_buffer,
+                                 BufferExhaustedPolicy buffer_exhausted_policy)
+    : shmem_arbiter_(shmem_arbiter),
+      id_(id),
+      target_buffer_(target_buffer),
+      buffer_exhausted_policy_(buffer_exhausted_policy),
+      protobuf_stream_writer_(this),
+      process_id_(base::GetProcessId()) {
+  // TODO(primiano): we could handle the case of running out of TraceWriterID(s)
+  // more gracefully and always return a no-op TracePacket in NewTracePacket().
+  PERFETTO_CHECK(id_ != 0);
+
+  cur_packet_.reset(new protozero::RootMessage<protos::pbzero::TracePacket>());
+  cur_packet_->Finalize();  // To avoid the DCHECK in NewTracePacket().
+}
+
+TraceWriterImpl::~TraceWriterImpl() {
+  if (cur_chunk_.is_valid()) {
+    cur_packet_->Finalize();
+    Flush();
+  }
+  // This call may cause the shared memory arbiter (and the underlying memory)
+  // to get asynchronously deleted if this was the last trace writer targeting
+  // the arbiter and the arbiter was marked for shutdown.
+  shmem_arbiter_->ReleaseWriterID(id_);
+}
+
+void TraceWriterImpl::Flush(std::function<void()> callback) {
+  // Flush() cannot be called in the middle of a TracePacket.
+  PERFETTO_CHECK(cur_packet_->is_finalized());
+
+  if (cur_chunk_.is_valid()) {
+    shmem_arbiter_->ReturnCompletedChunk(std::move(cur_chunk_), target_buffer_,
+                                         &patch_list_);
+  } else {
+    // When in stall mode, all patches should have been returned with the last
+    // chunk, since the last packet was completed. In drop_packets_ mode, this
+    // may not be the case because the packet may have been fragmenting when
+    // SMB exhaustion occurred and |cur_chunk_| became invalid. In this case,
+    // drop_packets_ should be true.
+    PERFETTO_DCHECK(patch_list_.empty() || drop_packets_);
+  }
+
+  // Always issue the Flush request, even if there is nothing to flush, just
+  // for the sake of getting the callback posted back.
+  shmem_arbiter_->FlushPendingCommitDataRequests(callback);
+  protobuf_stream_writer_.Reset({nullptr, nullptr});
+
+  // |last_packet_size_field_| might have pointed into the chunk we returned.
+  last_packet_size_field_ = nullptr;
+}
+
+TraceWriterImpl::TracePacketHandle TraceWriterImpl::NewTracePacket() {
+  // If we hit this, the caller is calling NewTracePacket() without having
+  // finalized the previous packet.
+  PERFETTO_CHECK(cur_packet_->is_finalized());
+  // If we hit this, this trace writer was created in a different process. This
+  // likely means that the process forked while tracing was active, and the
+  // forked child process tried to emit a trace event. This is not supported, as
+  // it would lead to two processes writing to the same tracing SMB.
+  PERFETTO_DCHECK(process_id_ == base::GetProcessId());
+
+  fragmenting_packet_ = false;
+
+  // Reserve space for the size of the message. Note: this call might re-enter
+  // into this class invoking GetNewBuffer() if there isn't enough space or if
+  // this is the very first call to NewTracePacket().
+  static_assert(kPacketHeaderSize == kMessageLengthFieldSize,
+                "The packet header must match the Message header size");
+
+  bool was_dropping_packets = drop_packets_;
+
+  // It doesn't make sense to begin a packet that is going to fragment
+  // immediately after (8 is just an arbitrary estimation on the minimum size of
+  // a realistic packet).
+  bool chunk_too_full =
+      protobuf_stream_writer_.bytes_available() < kPacketHeaderSize + 8;
+  if (chunk_too_full || reached_max_packets_per_chunk_ ||
+      retry_new_chunk_after_packet_) {
+    protobuf_stream_writer_.Reset(GetNewBuffer());
+  }
+
+  // Send any completed patches to the service to facilitate trace data
+  // recovery by the service. This should only happen when we're completing
+  // the first packet in a chunk which was a continuation from the previous
+  // chunk, i.e. at most once per chunk.
+  if (!patch_list_.empty() && patch_list_.front().is_patched()) {
+    shmem_arbiter_->SendPatches(id_, target_buffer_, &patch_list_);
+  }
+
+  cur_packet_->Reset(&protobuf_stream_writer_);
+  uint8_t* header = protobuf_stream_writer_.ReserveBytes(kPacketHeaderSize);
+  memset(header, 0, kPacketHeaderSize);
+  cur_packet_->set_size_field(header);
+  last_packet_size_field_ = header;
+
+  TracePacketHandle handle(cur_packet_.get());
+  cur_fragment_start_ = protobuf_stream_writer_.write_ptr();
+  fragmenting_packet_ = true;
+
+  if (PERFETTO_LIKELY(!drop_packets_)) {
+    uint16_t new_packet_count = cur_chunk_.IncrementPacketCount();
+    reached_max_packets_per_chunk_ =
+        new_packet_count == ChunkHeader::Packets::kMaxCount;
+
+    if (PERFETTO_UNLIKELY(was_dropping_packets)) {
+      // We've succeeded to get a new chunk from the SMB after we entered
+      // drop_packets_ mode. Record a marker into the new packet to indicate the
+      // data loss.
+      cur_packet_->set_previous_packet_dropped(true);
+    }
+  }
+
+  return handle;
+}
+
+// Called by the Message. We can get here in two cases:
+// 1. In the middle of writing a Message,
+// when |fragmenting_packet_| == true. In this case we want to update the
+// chunk header with a partial packet and start a new partial packet in the
+// new chunk.
+// 2. While calling ReserveBytes() for the packet header in NewTracePacket().
+// In this case |fragmenting_packet_| == false and we just want a new chunk
+// without creating any fragments.
+protozero::ContiguousMemoryRange TraceWriterImpl::GetNewBuffer() {
+  if (fragmenting_packet_ && drop_packets_) {
+    // We can't write the remaining data of the fragmenting packet to a new
+    // chunk, because we have already lost some of its data in the garbage
+    // chunk. Thus, we will wrap around in the garbage chunk, wait until the
+    // current packet was completed, and then attempt to get a new chunk from
+    // the SMB again. Instead, if |drop_packets_| is true and
+    // |fragmenting_packet_| is false, we try to acquire a valid chunk because
+    // the SMB exhaustion might be resolved.
+    retry_new_chunk_after_packet_ = true;
+    return protozero::ContiguousMemoryRange{
+        &g_garbage_chunk[0], &g_garbage_chunk[0] + sizeof(g_garbage_chunk)};
+  }
+
+  // Attempt to grab the next chunk before finalizing the current one, so that
+  // we know whether we need to start dropping packets before writing the
+  // current packet fragment's header.
+  ChunkHeader::Packets packets = {};
+  if (fragmenting_packet_) {
+    packets.count = 1;
+    packets.flags = ChunkHeader::kFirstPacketContinuesFromPrevChunk;
+  }
+
+  // The memory order of the stores below doesn't really matter. This |header|
+  // is just a local temporary object. The GetNewChunk() call below will copy it
+  // into the shared buffer with the proper barriers.
+  ChunkHeader header = {};
+  header.writer_id.store(id_, std::memory_order_relaxed);
+  header.chunk_id.store(next_chunk_id_, std::memory_order_relaxed);
+  header.packets.store(packets, std::memory_order_relaxed);
+
+  SharedMemoryABI::Chunk new_chunk =
+      shmem_arbiter_->GetNewChunk(header, buffer_exhausted_policy_);
+  if (!new_chunk.is_valid()) {
+    // Shared memory buffer exhausted, switch into |drop_packets_| mode. We'll
+    // drop data until the garbage chunk has been filled once and then retry.
+
+    // If we started a packet in one of the previous (valid) chunks, we need to
+    // tell the service to discard it.
+    if (fragmenting_packet_) {
+      // We can only end up here if the previous chunk was a valid chunk,
+      // because we never try to acquire a new chunk in |drop_packets_| mode
+      // while fragmenting.
+      PERFETTO_DCHECK(!drop_packets_);
+
+      // Backfill the last fragment's header with an invalid size (too large),
+      // so that the service's TraceBuffer throws out the incomplete packet.
+      // It'll restart reading from the next chunk we submit.
+      WriteRedundantVarInt(SharedMemoryABI::kPacketSizeDropPacket,
+                           cur_packet_->size_field());
+
+      // Reset the size field, since we should not write the current packet's
+      // size anymore after this.
+      cur_packet_->set_size_field(nullptr);
+
+      // We don't set kLastPacketContinuesOnNextChunk or kChunkNeedsPatching on
+      // the last chunk, because its last fragment will be discarded anyway.
+      // However, the current packet fragment points to a valid |cur_chunk_| and
+      // may have non-finalized nested messages which will continue in the
+      // garbage chunk and currently still point into |cur_chunk_|. As we are
+      // about to return |cur_chunk_|, we need to invalidate the size fields of
+      // those nested messages. Normally we move them in the |patch_list_| (see
+      // below) but in this case, it doesn't make sense to send patches for a
+      // fragment that will be discarded for sure. Thus, we clean up any size
+      // field references into |cur_chunk_|.
+      for (auto* nested_msg = cur_packet_->nested_message(); nested_msg;
+           nested_msg = nested_msg->nested_message()) {
+        uint8_t* const cur_hdr = nested_msg->size_field();
+
+        // If this is false the protozero Message has already been instructed to
+        // write, upon Finalize(), its size into the patch list.
+        bool size_field_points_within_chunk =
+            cur_hdr >= cur_chunk_.payload_begin() &&
+            cur_hdr + kMessageLengthFieldSize <= cur_chunk_.end();
+
+        if (size_field_points_within_chunk)
+          nested_msg->set_size_field(nullptr);
+      }
+    } else if (!drop_packets_ && last_packet_size_field_) {
+      // If we weren't dropping packets before, we should indicate to the
+      // service that we're about to lose data. We do this by invalidating the
+      // size of the last packet in |cur_chunk_|. The service will record
+      // statistics about packets with kPacketSizeDropPacket size.
+      PERFETTO_DCHECK(cur_packet_->is_finalized());
+      PERFETTO_DCHECK(cur_chunk_.is_valid());
+
+      // |last_packet_size_field_| should point within |cur_chunk_|'s payload.
+      PERFETTO_DCHECK(last_packet_size_field_ >= cur_chunk_.payload_begin() &&
+                      last_packet_size_field_ + kMessageLengthFieldSize <=
+                          cur_chunk_.end());
+
+      WriteRedundantVarInt(SharedMemoryABI::kPacketSizeDropPacket,
+                           last_packet_size_field_);
+    }
+
+    if (cur_chunk_.is_valid()) {
+      shmem_arbiter_->ReturnCompletedChunk(std::move(cur_chunk_),
+                                           target_buffer_, &patch_list_);
+    }
+
+    drop_packets_ = true;
+    cur_chunk_ = SharedMemoryABI::Chunk();  // Reset to an invalid chunk.
+    reached_max_packets_per_chunk_ = false;
+    retry_new_chunk_after_packet_ = false;
+    last_packet_size_field_ = nullptr;
+
+    PERFETTO_ANNOTATE_BENIGN_RACE_SIZED(&g_garbage_chunk,
+                                        sizeof(g_garbage_chunk),
+                                        "nobody reads the garbage chunk")
+    return protozero::ContiguousMemoryRange{
+        &g_garbage_chunk[0], &g_garbage_chunk[0] + sizeof(g_garbage_chunk)};
+  }  // if (!new_chunk.is_valid())
+
+  PERFETTO_DCHECK(new_chunk.is_valid());
+
+  if (fragmenting_packet_) {
+    // We should not be fragmenting a packet after we exited drop_packets_ mode,
+    // because we only retry to get a new chunk when a fresh packet is started.
+    PERFETTO_DCHECK(!drop_packets_);
+
+    uint8_t* const wptr = protobuf_stream_writer_.write_ptr();
+    PERFETTO_DCHECK(wptr >= cur_fragment_start_);
+    uint32_t partial_size = static_cast<uint32_t>(wptr - cur_fragment_start_);
+    PERFETTO_DCHECK(partial_size < cur_chunk_.size());
+
+    // Backfill the packet header with the fragment size.
+    PERFETTO_DCHECK(partial_size > 0);
+    cur_packet_->inc_size_already_written(partial_size);
+    cur_chunk_.SetFlag(ChunkHeader::kLastPacketContinuesOnNextChunk);
+    WriteRedundantVarInt(partial_size, cur_packet_->size_field());
+
+    // Descend in the stack of non-finalized nested submessages (if any) and
+    // detour their |size_field| into the |patch_list_|. At this point we have
+    // to release the chunk and they cannot write anymore into that.
+    // TODO(primiano): add tests to cover this logic.
+    bool chunk_needs_patching = false;
+    for (auto* nested_msg = cur_packet_->nested_message(); nested_msg;
+         nested_msg = nested_msg->nested_message()) {
+      uint8_t* const cur_hdr = nested_msg->size_field();
+
+      // If this is false the protozero Message has already been instructed to
+      // write, upon Finalize(), its size into the patch list.
+      bool size_field_points_within_chunk =
+          cur_hdr >= cur_chunk_.payload_begin() &&
+          cur_hdr + kMessageLengthFieldSize <= cur_chunk_.end();
+
+      if (size_field_points_within_chunk) {
+        auto offset =
+            static_cast<uint16_t>(cur_hdr - cur_chunk_.payload_begin());
+        const ChunkID cur_chunk_id =
+            cur_chunk_.header()->chunk_id.load(std::memory_order_relaxed);
+        Patch* patch = patch_list_.emplace_back(cur_chunk_id, offset);
+        nested_msg->set_size_field(&patch->size_field[0]);
+        chunk_needs_patching = true;
+      } else {
+#if PERFETTO_DCHECK_IS_ON()
+        // Ensure that the size field of the message points to an element of the
+        // patch list.
+        auto patch_it = std::find_if(
+            patch_list_.begin(), patch_list_.end(),
+            [cur_hdr](const Patch& p) { return &p.size_field[0] == cur_hdr; });
+        PERFETTO_DCHECK(patch_it != patch_list_.end());
+#endif
+      }
+    }  // for(nested_msg
+
+    if (chunk_needs_patching)
+      cur_chunk_.SetFlag(ChunkHeader::kChunkNeedsPatching);
+  }  // if(fragmenting_packet)
+
+  if (cur_chunk_.is_valid()) {
+    // ReturnCompletedChunk will consume the first patched entries from
+    // |patch_list_| and shrink it.
+    shmem_arbiter_->ReturnCompletedChunk(std::move(cur_chunk_), target_buffer_,
+                                         &patch_list_);
+  }
+
+  // Switch to the new chunk.
+  drop_packets_ = false;
+  reached_max_packets_per_chunk_ = false;
+  retry_new_chunk_after_packet_ = false;
+  next_chunk_id_++;
+  cur_chunk_ = std::move(new_chunk);
+  last_packet_size_field_ = nullptr;
+
+  uint8_t* payload_begin = cur_chunk_.payload_begin();
+  if (fragmenting_packet_) {
+    cur_packet_->set_size_field(payload_begin);
+    last_packet_size_field_ = payload_begin;
+    memset(payload_begin, 0, kPacketHeaderSize);
+    payload_begin += kPacketHeaderSize;
+    cur_fragment_start_ = payload_begin;
+  }
+
+  return protozero::ContiguousMemoryRange{payload_begin, cur_chunk_.end()};
+}
+
+WriterID TraceWriterImpl::writer_id() const {
+  return id_;
+}
+
+// Base class definitions.
+TraceWriter::TraceWriter() = default;
+TraceWriter::~TraceWriter() = default;
+
+}  // namespace perfetto
+// gen_amalgamated begin source: src/tracing/core/virtual_destructors.cc
+// gen_amalgamated begin header: include/perfetto/ext/tracing/core/consumer.h
+// gen_amalgamated begin header: include/perfetto/ext/tracing/core/observable_events.h
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_TRACING_CORE_OBSERVABLE_EVENTS_H_
+#define INCLUDE_PERFETTO_EXT_TRACING_CORE_OBSERVABLE_EVENTS_H_
+
+// Creates the aliases in the ::perfetto namespace, doing things like:
+// using ::perfetto::Foo = ::perfetto::protos::gen::Foo.
+// See comments in forward_decls.h for the historical reasons of this
+// indirection layer.
+// gen_amalgamated expanded: #include "perfetto/tracing/core/forward_decls.h"
+
+// gen_amalgamated expanded: #include "protos/perfetto/common/observable_events.gen.h"
+
+#endif  // INCLUDE_PERFETTO_EXT_TRACING_CORE_OBSERVABLE_EVENTS_H_
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_TRACING_CORE_CONSUMER_H_
+#define INCLUDE_PERFETTO_EXT_TRACING_CORE_CONSUMER_H_
+
+#include <vector>
+
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/basic_types.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/observable_events.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/core/forward_decls.h"
+namespace perfetto {
+
+class TracePacket;
+
+class PERFETTO_EXPORT Consumer {
+ public:
+  virtual ~Consumer();
+
+  // Called by Service (or more typically by the transport layer, on behalf of
+  // the remote Service), once the Consumer <> Service connection has been
+  // established.
+  virtual void OnConnect() = 0;
+
+  // Called by the Service or by the transport layer if the connection with the
+  // service drops, either voluntarily (e.g., by destroying the ConsumerEndpoint
+  // obtained through Service::ConnectConsumer()) or involuntarily (e.g., if the
+  // Service process crashes).
+  virtual void OnDisconnect() = 0;
+
+  // Called by the Service after the tracing session has ended. This can happen
+  // for a variety of reasons:
+  // - The consumer explicitly called DisableTracing()
+  // - The TraceConfig's |duration_ms| has been reached.
+  // - The TraceConfig's |max_file_size_bytes| has been reached.
+  // - An error occurred while trying to enable tracing. In this case |error|
+  //   is non-empty.
+  virtual void OnTracingDisabled(const std::string& error) = 0;
+
+  // Called back by the Service (or transport layer) after invoking
+  // TracingService::ConsumerEndpoint::ReadBuffers(). This function can be
+  // called more than once. Each invocation can carry one or more
+  // TracePacket(s). Upon the last call, |has_more| is set to true (i.e.
+  // |has_more| is a !EOF).
+  virtual void OnTraceData(std::vector<TracePacket>, bool has_more) = 0;
+
+  // Called back by the Service (or transport layer) after invoking
+  // TracingService::ConsumerEndpoint::Detach().
+  // The consumer can disconnect at this point and the trace session will keep
+  // on going. A new consumer can later re-attach passing back the same |key|
+  // passed to Detach(), but only if the two requests come from the same uid.
+  virtual void OnDetach(bool success) = 0;
+
+  // Called back by the Service (or transport layer) after invoking
+  // TracingService::ConsumerEndpoint::Attach().
+  virtual void OnAttach(bool success, const TraceConfig&) = 0;
+
+  // Called back by the Service (or transport layer) after invoking
+  // TracingService::ConsumerEndpoint::GetTraceStats().
+  virtual void OnTraceStats(bool success, const TraceStats&) = 0;
+
+  // Called back by the Service (or transport layer) after invoking
+  // TracingService::ConsumerEndpoint::ObserveEvents() whenever one or more
+  // ObservableEvents of enabled event types occur.
+  virtual void OnObservableEvents(const ObservableEvents&) = 0;
+};
+
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_TRACING_CORE_CONSUMER_H_
+// gen_amalgamated begin header: include/perfetto/ext/tracing/core/producer.h
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_TRACING_CORE_PRODUCER_H_
+#define INCLUDE_PERFETTO_EXT_TRACING_CORE_PRODUCER_H_
+
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/basic_types.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/core/forward_decls.h"
+namespace perfetto {
+
+class SharedMemory;
+
+// A Producer is an entity that connects to the write-only port of the Service
+// and exposes the ability to produce performance data on-demand. The lifecycle
+// of a Producer is as follows:
+// 1. The producer connects to the service and advertises its data sources
+//    (e.g., the ability to get kernel ftraces, to list process stats).
+// 2. The service acknowledges the connection and sends over the SharedMemory
+//    region that will be used to exchange data (together with the signalling
+//    API TracingService::ProducerEndpoint::OnPageAcquired()/OnPageReleased()).
+// 3. At some point later on, the Service asks the Producer to turn on some of
+//    the previously registered data sources, together with some configuration
+//    parameters. This happens via the StartDataSource() callback.
+// 4. In response to that the Producer will spawn an instance of the given data
+//    source and inject its data into the shared memory buffer (obtained during
+//    OnConnect).
+// This interface is subclassed by:
+//  1. The actual producer code in the clients e.g., the ftrace reader process.
+//  2. The transport layer when interposing RPC between service and producers.
+class PERFETTO_EXPORT Producer {
+ public:
+  virtual ~Producer();
+
+  // Called by Service (or more typically by the transport layer, on behalf of
+  // the remote Service), once the Producer <> Service connection has been
+  // established.
+  virtual void OnConnect() = 0;
+
+  // Called by the Service or by the transport layer if the connection with the
+  // service drops, either voluntarily (e.g., by destroying the ProducerEndpoint
+  // obtained through Service::ConnectProducer()) or involuntarily (e.g., if the
+  // Service process crashes).
+  // The Producer is expected to tear down all its data sources if this happens.
+  // Once this call returns it is possible to safely destroy the Producer
+  // instance.
+  virtual void OnDisconnect() = 0;
+
+  // Called by the Service after OnConnect but before the first DataSource is
+  // created. Can be used for any setup required before tracing begins.
+  virtual void OnTracingSetup() = 0;
+
+  // The lifecycle methods below are always called in the following sequence:
+  // SetupDataSource  -> StartDataSource -> StopDataSource.
+  // Or, in the edge case where a trace is aborted immediately:
+  // SetupDataSource  -> StopDataSource.
+  // The Setup+Start call sequence is always guaranateed, regardless of the
+  // TraceConfig.deferred_start flags.
+  // Called by the Service to configure one of the data sources previously
+  // registered through TracingService::ProducerEndpoint::RegisterDataSource().
+  // This method is always called before StartDataSource. There is always a
+  // SetupDataSource() call before each StartDataSource() call.
+  // Args:
+  // - DataSourceInstanceID is an identifier chosen by the Service that should
+  //   be assigned to the newly created data source instance. It is used to
+  //   match the StopDataSource() request below.
+  // - DataSourceConfig is the configuration for the new data source (e.g.,
+  //   tells which trace categories to enable).
+  virtual void SetupDataSource(DataSourceInstanceID,
+                               const DataSourceConfig&) = 0;
+
+  // Called by the Service to turn on one of the data sources previously
+  // registered through TracingService::ProducerEndpoint::RegisterDataSource()
+  // and initialized through SetupDataSource().
+  // Both arguments are guaranteed to be identical to the ones passed to the
+  // prior SetupDataSource() call.
+  virtual void StartDataSource(DataSourceInstanceID,
+                               const DataSourceConfig&) = 0;
+
+  // Called by the Service to shut down an existing data source instance.
+  virtual void StopDataSource(DataSourceInstanceID) = 0;
+
+  // Called by the service to request the Producer to commit the data of the
+  // given data sources and return their chunks into the shared memory buffer.
+  // The Producer is expected to invoke NotifyFlushComplete(FlushRequestID) on
+  // the Service after the data has been committed. The producer has to either
+  // reply to the flush requests in order, or can just reply to the latest one
+  // Upon seeing a NotifyFlushComplete(N), the service will assume that all
+  // flushes < N have also been committed.
+  virtual void Flush(FlushRequestID,
+                     const DataSourceInstanceID* data_source_ids,
+                     size_t num_data_sources) = 0;
+
+  // Called by the service to instruct the given data sources to stop referring
+  // to any trace contents emitted so far. The intent is that after processing
+  // this call, the rest of the trace should be parsable even if all of the
+  // packets emitted so far have been lost (for example due to ring buffer
+  // overwrites).
+  //
+  // Called only for Producers with active data sources that have opted in by
+  // setting |handles_incremental_state_clear| in their DataSourceDescriptor.
+  //
+  // The way this call is handled is up to the individual Producer
+  // implementation. Some might wish to emit invalidation markers in the trace
+  // (see TracePacket.incremental_state_cleared for an existing field), and
+  // handle them when parsing the trace.
+  virtual void ClearIncrementalState(
+      const DataSourceInstanceID* data_source_ids,
+      size_t num_data_sources) = 0;
+};
+
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_TRACING_CORE_PRODUCER_H_
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/consumer.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/producer.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory_arbiter.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/tracing_service.h"
+
+// This translation unit contains the definitions for the destructor of pure
+// virtual interfaces for the current build target. The alternative would be
+// introducing a one-liner .cc file for each pure virtual interface, which is
+// overkill. This is for compliance with -Wweak-vtables.
+
+namespace perfetto {
+
+Consumer::~Consumer() = default;
+Producer::~Producer() = default;
+TracingService::~TracingService() = default;
+ConsumerEndpoint::~ConsumerEndpoint() = default;
+ProducerEndpoint::~ProducerEndpoint() = default;
+SharedMemory::~SharedMemory() = default;
+SharedMemory::Factory::~Factory() = default;
+SharedMemoryArbiter::~SharedMemoryArbiter() = default;
+
+}  // namespace perfetto
+// gen_amalgamated begin source: src/tracing/console_interceptor.cc
+// gen_amalgamated begin header: gen/protos/perfetto/config/interceptors/console_config.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_INTERCEPTORS_CONSOLE_CONFIG_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_INTERCEPTORS_CONSOLE_CONFIG_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+enum ConsoleConfig_Output : int32_t;
+
+enum ConsoleConfig_Output : int32_t {
+  ConsoleConfig_Output_OUTPUT_UNSPECIFIED = 0,
+  ConsoleConfig_Output_OUTPUT_STDOUT = 1,
+  ConsoleConfig_Output_OUTPUT_STDERR = 2,
+};
+
+const ConsoleConfig_Output ConsoleConfig_Output_MIN = ConsoleConfig_Output_OUTPUT_UNSPECIFIED;
+const ConsoleConfig_Output ConsoleConfig_Output_MAX = ConsoleConfig_Output_OUTPUT_STDERR;
+
+class ConsoleConfig_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  ConsoleConfig_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ConsoleConfig_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ConsoleConfig_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_output() const { return at<1>().valid(); }
+  int32_t output() const { return at<1>().as_int32(); }
+  bool has_enable_colors() const { return at<2>().valid(); }
+  bool enable_colors() const { return at<2>().as_bool(); }
+};
+
+class ConsoleConfig : public ::protozero::Message {
+ public:
+  using Decoder = ConsoleConfig_Decoder;
+  enum : int32_t {
+    kOutputFieldNumber = 1,
+    kEnableColorsFieldNumber = 2,
+  };
+  using Output = ::perfetto::protos::pbzero::ConsoleConfig_Output;
+  static const Output OUTPUT_UNSPECIFIED = ConsoleConfig_Output_OUTPUT_UNSPECIFIED;
+  static const Output OUTPUT_STDOUT = ConsoleConfig_Output_OUTPUT_STDOUT;
+  static const Output OUTPUT_STDERR = ConsoleConfig_Output_OUTPUT_STDERR;
+
+  using FieldMetadata_Output =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::ConsoleConfig_Output,
+      ConsoleConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Output kOutput() { return {}; }
+  void set_output(::perfetto::protos::pbzero::ConsoleConfig_Output value) {
+    static constexpr uint32_t field_id = FieldMetadata_Output::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_EnableColors =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ConsoleConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_EnableColors kEnableColors() { return {}; }
+  void set_enable_colors(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_EnableColors::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/trace_packet_defaults.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACE_PACKET_DEFAULTS_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACE_PACKET_DEFAULTS_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class PerfSampleDefaults;
+class TrackEventDefaults;
+
+class TracePacketDefaults_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/58, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  TracePacketDefaults_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TracePacketDefaults_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TracePacketDefaults_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_timestamp_clock_id() const { return at<58>().valid(); }
+  uint32_t timestamp_clock_id() const { return at<58>().as_uint32(); }
+  bool has_track_event_defaults() const { return at<11>().valid(); }
+  ::protozero::ConstBytes track_event_defaults() const { return at<11>().as_bytes(); }
+  bool has_perf_sample_defaults() const { return at<12>().valid(); }
+  ::protozero::ConstBytes perf_sample_defaults() const { return at<12>().as_bytes(); }
+};
+
+class TracePacketDefaults : public ::protozero::Message {
+ public:
+  using Decoder = TracePacketDefaults_Decoder;
+  enum : int32_t {
+    kTimestampClockIdFieldNumber = 58,
+    kTrackEventDefaultsFieldNumber = 11,
+    kPerfSampleDefaultsFieldNumber = 12,
+  };
+
+  using FieldMetadata_TimestampClockId =
+    ::protozero::proto_utils::FieldMetadata<
+      58,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      TracePacketDefaults>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TimestampClockId kTimestampClockId() { return {}; }
+  void set_timestamp_clock_id(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TimestampClockId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TrackEventDefaults =
+    ::protozero::proto_utils::FieldMetadata<
+      11,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TrackEventDefaults,
+      TracePacketDefaults>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TrackEventDefaults kTrackEventDefaults() { return {}; }
+  template <typename T = TrackEventDefaults> T* set_track_event_defaults() {
+    return BeginNestedMessage<T>(11);
+  }
+
+
+  using FieldMetadata_PerfSampleDefaults =
+    ::protozero::proto_utils::FieldMetadata<
+      12,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      PerfSampleDefaults,
+      TracePacketDefaults>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PerfSampleDefaults kPerfSampleDefaults() { return {}; }
+  template <typename T = PerfSampleDefaults> T* set_perf_sample_defaults() {
+    return BeginNestedMessage<T>(12);
+  }
+
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/process_descriptor.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_PROCESS_DESCRIPTOR_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_PROCESS_DESCRIPTOR_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+enum ProcessDescriptor_ChromeProcessType : int32_t;
+
+enum ProcessDescriptor_ChromeProcessType : int32_t {
+  ProcessDescriptor_ChromeProcessType_PROCESS_UNSPECIFIED = 0,
+  ProcessDescriptor_ChromeProcessType_PROCESS_BROWSER = 1,
+  ProcessDescriptor_ChromeProcessType_PROCESS_RENDERER = 2,
+  ProcessDescriptor_ChromeProcessType_PROCESS_UTILITY = 3,
+  ProcessDescriptor_ChromeProcessType_PROCESS_ZYGOTE = 4,
+  ProcessDescriptor_ChromeProcessType_PROCESS_SANDBOX_HELPER = 5,
+  ProcessDescriptor_ChromeProcessType_PROCESS_GPU = 6,
+  ProcessDescriptor_ChromeProcessType_PROCESS_PPAPI_PLUGIN = 7,
+  ProcessDescriptor_ChromeProcessType_PROCESS_PPAPI_BROKER = 8,
+};
+
+const ProcessDescriptor_ChromeProcessType ProcessDescriptor_ChromeProcessType_MIN = ProcessDescriptor_ChromeProcessType_PROCESS_UNSPECIFIED;
+const ProcessDescriptor_ChromeProcessType ProcessDescriptor_ChromeProcessType_MAX = ProcessDescriptor_ChromeProcessType_PROCESS_PPAPI_BROKER;
+
+class ProcessDescriptor_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/7, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  ProcessDescriptor_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ProcessDescriptor_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ProcessDescriptor_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_pid() const { return at<1>().valid(); }
+  int32_t pid() const { return at<1>().as_int32(); }
+  bool has_cmdline() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstChars> cmdline() const { return GetRepeated<::protozero::ConstChars>(2); }
+  bool has_process_name() const { return at<6>().valid(); }
+  ::protozero::ConstChars process_name() const { return at<6>().as_string(); }
+  bool has_process_priority() const { return at<5>().valid(); }
+  int32_t process_priority() const { return at<5>().as_int32(); }
+  bool has_start_timestamp_ns() const { return at<7>().valid(); }
+  int64_t start_timestamp_ns() const { return at<7>().as_int64(); }
+  bool has_chrome_process_type() const { return at<4>().valid(); }
+  int32_t chrome_process_type() const { return at<4>().as_int32(); }
+  bool has_legacy_sort_index() const { return at<3>().valid(); }
+  int32_t legacy_sort_index() const { return at<3>().as_int32(); }
+};
+
+class ProcessDescriptor : public ::protozero::Message {
+ public:
+  using Decoder = ProcessDescriptor_Decoder;
+  enum : int32_t {
+    kPidFieldNumber = 1,
+    kCmdlineFieldNumber = 2,
+    kProcessNameFieldNumber = 6,
+    kProcessPriorityFieldNumber = 5,
+    kStartTimestampNsFieldNumber = 7,
+    kChromeProcessTypeFieldNumber = 4,
+    kLegacySortIndexFieldNumber = 3,
+  };
+  using ChromeProcessType = ::perfetto::protos::pbzero::ProcessDescriptor_ChromeProcessType;
+  static const ChromeProcessType PROCESS_UNSPECIFIED = ProcessDescriptor_ChromeProcessType_PROCESS_UNSPECIFIED;
+  static const ChromeProcessType PROCESS_BROWSER = ProcessDescriptor_ChromeProcessType_PROCESS_BROWSER;
+  static const ChromeProcessType PROCESS_RENDERER = ProcessDescriptor_ChromeProcessType_PROCESS_RENDERER;
+  static const ChromeProcessType PROCESS_UTILITY = ProcessDescriptor_ChromeProcessType_PROCESS_UTILITY;
+  static const ChromeProcessType PROCESS_ZYGOTE = ProcessDescriptor_ChromeProcessType_PROCESS_ZYGOTE;
+  static const ChromeProcessType PROCESS_SANDBOX_HELPER = ProcessDescriptor_ChromeProcessType_PROCESS_SANDBOX_HELPER;
+  static const ChromeProcessType PROCESS_GPU = ProcessDescriptor_ChromeProcessType_PROCESS_GPU;
+  static const ChromeProcessType PROCESS_PPAPI_PLUGIN = ProcessDescriptor_ChromeProcessType_PROCESS_PPAPI_PLUGIN;
+  static const ChromeProcessType PROCESS_PPAPI_BROKER = ProcessDescriptor_ChromeProcessType_PROCESS_PPAPI_BROKER;
+
+  using FieldMetadata_Pid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      ProcessDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pid kPid() { return {}; }
+  void set_pid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Cmdline =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ProcessDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Cmdline kCmdline() { return {}; }
+  void add_cmdline(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Cmdline::kFieldId, data, size);
+  }
+  void add_cmdline(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Cmdline::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ProcessName =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ProcessDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ProcessName kProcessName() { return {}; }
+  void set_process_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_ProcessName::kFieldId, data, size);
+  }
+  void set_process_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_ProcessName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ProcessPriority =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      ProcessDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ProcessPriority kProcessPriority() { return {}; }
+  void set_process_priority(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ProcessPriority::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_StartTimestampNs =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      ProcessDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_StartTimestampNs kStartTimestampNs() { return {}; }
+  void set_start_timestamp_ns(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_StartTimestampNs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ChromeProcessType =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::ProcessDescriptor_ChromeProcessType,
+      ProcessDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChromeProcessType kChromeProcessType() { return {}; }
+  void set_chrome_process_type(::perfetto::protos::pbzero::ProcessDescriptor_ChromeProcessType value) {
+    static constexpr uint32_t field_id = FieldMetadata_ChromeProcessType::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_LegacySortIndex =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      ProcessDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_LegacySortIndex kLegacySortIndex() { return {}; }
+  void set_legacy_sort_index(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_LegacySortIndex::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/thread_descriptor.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_THREAD_DESCRIPTOR_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_THREAD_DESCRIPTOR_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+enum ThreadDescriptor_ChromeThreadType : int32_t;
+
+enum ThreadDescriptor_ChromeThreadType : int32_t {
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_UNSPECIFIED = 0,
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_MAIN = 1,
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_IO = 2,
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_BG_WORKER = 3,
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_FG_WORKER = 4,
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_FB_BLOCKING = 5,
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_BG_BLOCKING = 6,
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_SERVICE = 7,
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_COMPOSITOR = 8,
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_VIZ_COMPOSITOR = 9,
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_COMPOSITOR_WORKER = 10,
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_SERVICE_WORKER = 11,
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_MEMORY_INFRA = 50,
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_SAMPLING_PROFILER = 51,
+};
+
+const ThreadDescriptor_ChromeThreadType ThreadDescriptor_ChromeThreadType_MIN = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_UNSPECIFIED;
+const ThreadDescriptor_ChromeThreadType ThreadDescriptor_ChromeThreadType_MAX = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_SAMPLING_PROFILER;
+
+class ThreadDescriptor_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/8, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  ThreadDescriptor_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ThreadDescriptor_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ThreadDescriptor_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_pid() const { return at<1>().valid(); }
+  int32_t pid() const { return at<1>().as_int32(); }
+  bool has_tid() const { return at<2>().valid(); }
+  int32_t tid() const { return at<2>().as_int32(); }
+  bool has_thread_name() const { return at<5>().valid(); }
+  ::protozero::ConstChars thread_name() const { return at<5>().as_string(); }
+  bool has_chrome_thread_type() const { return at<4>().valid(); }
+  int32_t chrome_thread_type() const { return at<4>().as_int32(); }
+  bool has_reference_timestamp_us() const { return at<6>().valid(); }
+  int64_t reference_timestamp_us() const { return at<6>().as_int64(); }
+  bool has_reference_thread_time_us() const { return at<7>().valid(); }
+  int64_t reference_thread_time_us() const { return at<7>().as_int64(); }
+  bool has_reference_thread_instruction_count() const { return at<8>().valid(); }
+  int64_t reference_thread_instruction_count() const { return at<8>().as_int64(); }
+  bool has_legacy_sort_index() const { return at<3>().valid(); }
+  int32_t legacy_sort_index() const { return at<3>().as_int32(); }
+};
+
+class ThreadDescriptor : public ::protozero::Message {
+ public:
+  using Decoder = ThreadDescriptor_Decoder;
+  enum : int32_t {
+    kPidFieldNumber = 1,
+    kTidFieldNumber = 2,
+    kThreadNameFieldNumber = 5,
+    kChromeThreadTypeFieldNumber = 4,
+    kReferenceTimestampUsFieldNumber = 6,
+    kReferenceThreadTimeUsFieldNumber = 7,
+    kReferenceThreadInstructionCountFieldNumber = 8,
+    kLegacySortIndexFieldNumber = 3,
+  };
+  using ChromeThreadType = ::perfetto::protos::pbzero::ThreadDescriptor_ChromeThreadType;
+  static const ChromeThreadType CHROME_THREAD_UNSPECIFIED = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_UNSPECIFIED;
+  static const ChromeThreadType CHROME_THREAD_MAIN = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_MAIN;
+  static const ChromeThreadType CHROME_THREAD_IO = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_IO;
+  static const ChromeThreadType CHROME_THREAD_POOL_BG_WORKER = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_BG_WORKER;
+  static const ChromeThreadType CHROME_THREAD_POOL_FG_WORKER = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_FG_WORKER;
+  static const ChromeThreadType CHROME_THREAD_POOL_FB_BLOCKING = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_FB_BLOCKING;
+  static const ChromeThreadType CHROME_THREAD_POOL_BG_BLOCKING = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_BG_BLOCKING;
+  static const ChromeThreadType CHROME_THREAD_POOL_SERVICE = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_SERVICE;
+  static const ChromeThreadType CHROME_THREAD_COMPOSITOR = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_COMPOSITOR;
+  static const ChromeThreadType CHROME_THREAD_VIZ_COMPOSITOR = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_VIZ_COMPOSITOR;
+  static const ChromeThreadType CHROME_THREAD_COMPOSITOR_WORKER = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_COMPOSITOR_WORKER;
+  static const ChromeThreadType CHROME_THREAD_SERVICE_WORKER = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_SERVICE_WORKER;
+  static const ChromeThreadType CHROME_THREAD_MEMORY_INFRA = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_MEMORY_INFRA;
+  static const ChromeThreadType CHROME_THREAD_SAMPLING_PROFILER = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_SAMPLING_PROFILER;
+
+  using FieldMetadata_Pid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      ThreadDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pid kPid() { return {}; }
+  void set_pid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Tid =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      ThreadDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Tid kTid() { return {}; }
+  void set_tid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Tid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ThreadName =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ThreadDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ThreadName kThreadName() { return {}; }
+  void set_thread_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_ThreadName::kFieldId, data, size);
+  }
+  void set_thread_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_ThreadName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ChromeThreadType =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::ThreadDescriptor_ChromeThreadType,
+      ThreadDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChromeThreadType kChromeThreadType() { return {}; }
+  void set_chrome_thread_type(::perfetto::protos::pbzero::ThreadDescriptor_ChromeThreadType value) {
+    static constexpr uint32_t field_id = FieldMetadata_ChromeThreadType::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ReferenceTimestampUs =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      ThreadDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ReferenceTimestampUs kReferenceTimestampUs() { return {}; }
+  void set_reference_timestamp_us(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ReferenceTimestampUs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ReferenceThreadTimeUs =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      ThreadDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ReferenceThreadTimeUs kReferenceThreadTimeUs() { return {}; }
+  void set_reference_thread_time_us(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ReferenceThreadTimeUs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ReferenceThreadInstructionCount =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      ThreadDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ReferenceThreadInstructionCount kReferenceThreadInstructionCount() { return {}; }
+  void set_reference_thread_instruction_count(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ReferenceThreadInstructionCount::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_LegacySortIndex =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      ThreadDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_LegacySortIndex kLegacySortIndex() { return {}; }
+  void set_legacy_sort_index(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_LegacySortIndex::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/tracing/console_interceptor.h"
+
+// gen_amalgamated expanded: #include "perfetto/ext/base/file_utils.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/hash.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/optional.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/internal/track_event_internal.h"
+
+// gen_amalgamated expanded: #include "protos/perfetto/common/interceptor_descriptor.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/data_source_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/interceptor_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/interceptors/console_config.pbzero.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/interned_data/interned_data.pbzero.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/trace_packet.pbzero.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/trace_packet_defaults.pbzero.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/process_descriptor.pbzero.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/thread_descriptor.pbzero.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/track_descriptor.pbzero.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/track_event.pbzero.h"
+
+#include <algorithm>
+#include <cmath>
+#include <tuple>
+
+namespace perfetto {
+
+// sRGB color.
+struct ConsoleColor {
+  uint8_t r;
+  uint8_t g;
+  uint8_t b;
+};
+
+namespace {
+
+int g_output_fd_for_testing;
+
+// Google Turbo colormap.
+constexpr std::array<ConsoleColor, 16> kTurboColors = {{
+    ConsoleColor{0x30, 0x12, 0x3b},
+    ConsoleColor{0x40, 0x40, 0xa1},
+    ConsoleColor{0x46, 0x6b, 0xe3},
+    ConsoleColor{0x41, 0x93, 0xfe},
+    ConsoleColor{0x28, 0xbb, 0xeb},
+    ConsoleColor{0x17, 0xdc, 0xc2},
+    ConsoleColor{0x32, 0xf1, 0x97},
+    ConsoleColor{0x6d, 0xfd, 0x62},
+    ConsoleColor{0xa4, 0xfc, 0x3b},
+    ConsoleColor{0xcd, 0xeb, 0x34},
+    ConsoleColor{0xed, 0xcf, 0x39},
+    ConsoleColor{0xfd, 0xab, 0x33},
+    ConsoleColor{0xfa, 0x7d, 0x20},
+    ConsoleColor{0xea, 0x50, 0x0d},
+    ConsoleColor{0xd0, 0x2f, 0x04},
+    ConsoleColor{0xa9, 0x15, 0x01},
+}};
+
+constexpr size_t kHueBits = 4;
+constexpr uint32_t kMaxHue = kTurboColors.size() << kHueBits;
+constexpr uint8_t kLightness = 128u;
+constexpr ConsoleColor kWhiteColor{0xff, 0xff, 0xff};
+
+const char kDim[] = "\x1b[90m";
+const char kDefault[] = "\x1b[39m";
+const char kReset[] = "\x1b[0m";
+
+#define FMT_RGB_SET "\x1b[38;2;%d;%d;%dm"
+#define FMT_RGB_SET_BG "\x1b[48;2;%d;%d;%dm"
+
+ConsoleColor Mix(ConsoleColor a, ConsoleColor b, uint8_t ratio) {
+  return {
+      static_cast<uint8_t>(a.r + (((b.r - a.r) * ratio) >> 8)),
+      static_cast<uint8_t>(a.g + (((b.g - a.g) * ratio) >> 8)),
+      static_cast<uint8_t>(a.b + (((b.b - a.b) * ratio) >> 8)),
+  };
+}
+
+ConsoleColor HueToRGB(uint32_t hue) {
+  PERFETTO_DCHECK(hue < kMaxHue);
+  uint32_t c1 = hue >> kHueBits;
+  uint32_t c2 =
+      std::min(static_cast<uint32_t>(kTurboColors.size() - 1), c1 + 1u);
+  uint32_t ratio = hue & ((1 << kHueBits) - 1);
+  return Mix(kTurboColors[c1], kTurboColors[c2],
+             static_cast<uint8_t>(ratio | (ratio << kHueBits)));
+}
+
+uint32_t CounterToHue(uint32_t counter) {
+  // We split the hue space into 8 segments, reversing the order of bits so
+  // successive counter values will be far from each other.
+  uint32_t reversed =
+      ((counter & 0x7) >> 2) | ((counter & 0x3)) | ((counter & 0x1) << 2);
+  return reversed * kMaxHue / 8;
+}
+
+}  // namespace
+
+class ConsoleInterceptor::Delegate : public TrackEventStateTracker::Delegate {
+ public:
+  explicit Delegate(InterceptorContext&);
+  ~Delegate() override;
+
+  TrackEventStateTracker::SessionState* GetSessionState() override;
+  void OnTrackUpdated(TrackEventStateTracker::Track&) override;
+  void OnTrackEvent(const TrackEventStateTracker::Track&,
+                    const TrackEventStateTracker::ParsedTrackEvent&) override;
+
+ private:
+  using SelfHandle = LockedHandle<ConsoleInterceptor>;
+
+  InterceptorContext& context_;
+  base::Optional<SelfHandle> locked_self_;
+};
+
+ConsoleInterceptor::~ConsoleInterceptor() = default;
+
+ConsoleInterceptor::ThreadLocalState::ThreadLocalState(
+    ThreadLocalStateArgs& args) {
+  if (auto self = args.GetInterceptorLocked()) {
+    start_time_ns = self->start_time_ns_;
+    use_colors = self->use_colors_;
+    fd = self->fd_;
+  }
+}
+
+ConsoleInterceptor::ThreadLocalState::~ThreadLocalState() = default;
+
+ConsoleInterceptor::Delegate::Delegate(InterceptorContext& context)
+    : context_(context) {}
+ConsoleInterceptor::Delegate::~Delegate() = default;
+
+TrackEventStateTracker::SessionState*
+ConsoleInterceptor::Delegate::GetSessionState() {
+  // When the session state is retrieved for the first time, it is cached (and
+  // kept locked) until we return from OnTracePacket. This avoids having to lock
+  // and unlock the instance multiple times per invocation.
+  if (locked_self_.has_value())
+    return &locked_self_.value()->session_state_;
+  locked_self_ =
+      base::make_optional<SelfHandle>(context_.GetInterceptorLocked());
+  return &locked_self_.value()->session_state_;
+}
+
+void ConsoleInterceptor::Delegate::OnTrackUpdated(
+    TrackEventStateTracker::Track& track) {
+  auto track_color = HueToRGB(CounterToHue(track.index));
+  std::array<char, 16> title;
+  if (!track.name.empty()) {
+    snprintf(title.data(), title.size(), "%s", track.name.c_str());
+  } else if (track.pid && track.tid) {
+    snprintf(title.data(), title.size(), "%u:%u",
+             static_cast<uint32_t>(track.pid),
+             static_cast<uint32_t>(track.tid));
+  } else if (track.pid) {
+    snprintf(title.data(), title.size(), "%" PRId64, track.pid);
+  } else {
+    snprintf(title.data(), title.size(), "%" PRIu64, track.uuid);
+  }
+  int title_width = static_cast<int>(title.size());
+
+  auto& tls = context_.GetThreadLocalState();
+  std::array<char, 128> message_prefix{};
+  ssize_t written = 0;
+  if (tls.use_colors) {
+    written = snprintf(message_prefix.data(), message_prefix.size(),
+                       FMT_RGB_SET_BG " %s%s %-*.*s", track_color.r,
+                       track_color.g, track_color.b, kReset, kDim, title_width,
+                       title_width, title.data());
+  } else {
+    written = snprintf(message_prefix.data(), message_prefix.size(), "%-*.*s",
+                       title_width + 2, title_width, title.data());
+  }
+  if (written < 0)
+    written = message_prefix.size();
+  track.user_data.assign(message_prefix.begin(),
+                         message_prefix.begin() + written);
+}
+
+void ConsoleInterceptor::Delegate::OnTrackEvent(
+    const TrackEventStateTracker::Track& track,
+    const TrackEventStateTracker::ParsedTrackEvent& event) {
+  // Start printing.
+  auto& tls = context_.GetThreadLocalState();
+  tls.buffer_pos = 0;
+
+  // Print timestamp and track identifier.
+  SetColor(context_, kDim);
+  Printf(context_, "[%7.3lf] %.*s",
+         static_cast<double>(event.timestamp_ns - tls.start_time_ns) / 1e9,
+         static_cast<int>(track.user_data.size()), track.user_data.data());
+
+  // Print category.
+  Printf(context_, "%-5.*s ",
+         std::min(5, static_cast<int>(event.category.size)),
+         event.category.data);
+
+  // Print stack depth.
+  for (size_t i = 0; i < event.stack_depth; i++) {
+    Printf(context_, "-  ");
+  }
+
+  // Print slice name.
+  auto slice_color = HueToRGB(event.name_hash % kMaxHue);
+  auto highlight_color = Mix(slice_color, kWhiteColor, kLightness);
+  if (event.track_event.type() == protos::pbzero::TrackEvent::TYPE_SLICE_END) {
+    SetColor(context_, kDefault);
+    Printf(context_, "} ");
+  }
+  SetColor(context_, highlight_color);
+  Printf(context_, "%.*s", static_cast<int>(event.name.size), event.name.data);
+  SetColor(context_, kReset);
+  if (event.track_event.type() ==
+      protos::pbzero::TrackEvent::TYPE_SLICE_BEGIN) {
+    SetColor(context_, kDefault);
+    Printf(context_, " {");
+  }
+
+  // Print annotations.
+  if (event.track_event.has_debug_annotations()) {
+    PrintDebugAnnotations(context_, event.track_event, slice_color,
+                          highlight_color);
+  }
+
+  // TODO(skyostil): Print typed arguments.
+
+  // Print duration for longer events.
+  constexpr uint64_t kNsPerMillisecond = 1000000u;
+  if (event.duration_ns >= 10 * kNsPerMillisecond) {
+    SetColor(context_, kDim);
+    Printf(context_, " +%" PRIu64 "ms", event.duration_ns / kNsPerMillisecond);
+  }
+  SetColor(context_, kReset);
+  Printf(context_, "\n");
+}
+
+// static
+void ConsoleInterceptor::Register() {
+  perfetto::protos::gen::InterceptorDescriptor desc;
+  desc.set_name("console");
+  Interceptor<ConsoleInterceptor>::Register(desc);
+}
+
+// static
+void ConsoleInterceptor::SetOutputFdForTesting(int fd) {
+  g_output_fd_for_testing = fd;
+}
+
+void ConsoleInterceptor::OnSetup(const SetupArgs& args) {
+  int fd = STDOUT_FILENO;
+  if (g_output_fd_for_testing)
+    fd = g_output_fd_for_testing;
+#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) && \
+    !PERFETTO_BUILDFLAG(PERFETTO_OS_WASM)
+  bool use_colors = isatty(fd);
+#else
+  bool use_colors = false;
+#endif
+  protos::pbzero::ConsoleConfig::Decoder config(
+      args.config.interceptor_config().console_config_raw());
+  if (config.has_enable_colors())
+    use_colors = config.enable_colors();
+  if (config.output() == protos::pbzero::ConsoleConfig::OUTPUT_STDOUT) {
+    fd = STDOUT_FILENO;
+  } else if (config.output() == protos::pbzero::ConsoleConfig::OUTPUT_STDERR) {
+    fd = STDERR_FILENO;
+  }
+  fd_ = fd;
+  use_colors_ = use_colors;
+}
+
+void ConsoleInterceptor::OnStart(const StartArgs&) {
+  start_time_ns_ = internal::TrackEventInternal::GetTimeNs();
+}
+
+void ConsoleInterceptor::OnStop(const StopArgs&) {}
+
+// static
+void ConsoleInterceptor::OnTracePacket(InterceptorContext context) {
+  {
+    auto& tls = context.GetThreadLocalState();
+    Delegate delegate(context);
+    perfetto::protos::pbzero::TracePacket::Decoder packet(
+        context.packet_data.data, context.packet_data.size);
+    TrackEventStateTracker::ProcessTracePacket(delegate, tls.sequence_state,
+                                               packet);
+  }  // (Potential) lock scope for session state.
+  Flush(context);
+}
+
+// static
+void ConsoleInterceptor::Printf(InterceptorContext& context,
+                                const char* format,
+                                ...) {
+  auto& tls = context.GetThreadLocalState();
+  ssize_t remaining = static_cast<ssize_t>(tls.message_buffer.size()) -
+                      static_cast<ssize_t>(tls.buffer_pos);
+  int written = 0;
+  if (remaining > 0) {
+    va_list args;
+    va_start(args, format);
+    written = vsnprintf(&tls.message_buffer[tls.buffer_pos],
+                        static_cast<size_t>(remaining), format, args);
+    PERFETTO_DCHECK(written >= 0);
+    va_end(args);
+  }
+
+  // In case of buffer overflow, flush to the fd and write the latest message to
+  // it directly instead.
+  if (remaining <= 0 || written > remaining) {
+    FILE* output = (tls.fd == STDOUT_FILENO) ? stdout : stderr;
+    if (g_output_fd_for_testing) {
+      output = fdopen(dup(g_output_fd_for_testing), "w");
+    }
+    Flush(context);
+    va_list args;
+    va_start(args, format);
+    vfprintf(output, format, args);
+    va_end(args);
+    if (g_output_fd_for_testing) {
+      fclose(output);
+    }
+  } else if (written > 0) {
+    tls.buffer_pos += static_cast<size_t>(written);
+  }
+}
+
+// static
+void ConsoleInterceptor::Flush(InterceptorContext& context) {
+  auto& tls = context.GetThreadLocalState();
+  ssize_t res = base::WriteAll(tls.fd, &tls.message_buffer[0], tls.buffer_pos);
+  PERFETTO_DCHECK(res == static_cast<ssize_t>(tls.buffer_pos));
+  tls.buffer_pos = 0;
+}
+
+// static
+void ConsoleInterceptor::SetColor(InterceptorContext& context,
+                                  const ConsoleColor& color) {
+  auto& tls = context.GetThreadLocalState();
+  if (!tls.use_colors)
+    return;
+  Printf(context, FMT_RGB_SET, color.r, color.g, color.b);
+}
+
+// static
+void ConsoleInterceptor::SetColor(InterceptorContext& context,
+                                  const char* color) {
+  auto& tls = context.GetThreadLocalState();
+  if (!tls.use_colors)
+    return;
+  Printf(context, "%s", color);
+}
+
+// static
+void ConsoleInterceptor::PrintDebugAnnotations(
+    InterceptorContext& context,
+    const protos::pbzero::TrackEvent_Decoder& track_event,
+    const ConsoleColor& slice_color,
+    const ConsoleColor& highlight_color) {
+  SetColor(context, slice_color);
+  Printf(context, "(");
+
+  bool is_first = true;
+  for (auto it = track_event.debug_annotations(); it; it++) {
+    perfetto::protos::pbzero::DebugAnnotation::Decoder annotation(*it);
+    SetColor(context, slice_color);
+    if (!is_first)
+      Printf(context, ", ");
+
+    PrintDebugAnnotationName(context, annotation);
+    Printf(context, ":");
+
+    SetColor(context, highlight_color);
+    PrintDebugAnnotationValue(context, annotation);
+
+    is_first = false;
+  }
+  SetColor(context, slice_color);
+  Printf(context, ")");
+}
+
+// static
+void ConsoleInterceptor::PrintDebugAnnotationName(
+    InterceptorContext& context,
+    const perfetto::protos::pbzero::DebugAnnotation::Decoder& annotation) {
+  auto& tls = context.GetThreadLocalState();
+  protozero::ConstChars name{};
+  if (annotation.name_iid()) {
+    name.data =
+        tls.sequence_state.debug_annotation_names[annotation.name_iid()].data();
+    name.size =
+        tls.sequence_state.debug_annotation_names[annotation.name_iid()].size();
+  } else if (annotation.has_name()) {
+    name.data = annotation.name().data;
+    name.size = annotation.name().size;
+  }
+  Printf(context, "%.*s", static_cast<int>(name.size), name.data);
+}
+
+// static
+void ConsoleInterceptor::PrintDebugAnnotationValue(
+    InterceptorContext& context,
+    const perfetto::protos::pbzero::DebugAnnotation::Decoder& annotation) {
+  if (annotation.has_bool_value()) {
+    Printf(context, "%s", annotation.bool_value() ? "true" : "false");
+  } else if (annotation.has_uint_value()) {
+    Printf(context, "%" PRIu64, annotation.uint_value());
+  } else if (annotation.has_int_value()) {
+    Printf(context, "%" PRId64, annotation.int_value());
+  } else if (annotation.has_double_value()) {
+    Printf(context, "%f", annotation.double_value());
+  } else if (annotation.has_string_value()) {
+    Printf(context, "%.*s", static_cast<int>(annotation.string_value().size),
+           annotation.string_value().data);
+  } else if (annotation.has_pointer_value()) {
+    Printf(context, "%p", reinterpret_cast<void*>(annotation.pointer_value()));
+  } else if (annotation.has_legacy_json_value()) {
+    Printf(context, "%.*s",
+           static_cast<int>(annotation.legacy_json_value().size),
+           annotation.legacy_json_value().data);
+  } else if (annotation.has_dict_entries()) {
+    Printf(context, "{");
+    bool is_first = true;
+    for (auto it = annotation.dict_entries(); it; ++it) {
+      if (!is_first)
+        Printf(context, ", ");
+      perfetto::protos::pbzero::DebugAnnotation::Decoder key_value(*it);
+      PrintDebugAnnotationName(context, key_value);
+      Printf(context, ":");
+      PrintDebugAnnotationValue(context, key_value);
+      is_first = false;
+    }
+    Printf(context, "}");
+  } else if (annotation.has_array_values()) {
+    Printf(context, "[");
+    bool is_first = true;
+    for (auto it = annotation.array_values(); it; ++it) {
+      if (!is_first)
+        Printf(context, ", ");
+      perfetto::protos::pbzero::DebugAnnotation::Decoder key_value(*it);
+      PrintDebugAnnotationValue(context, key_value);
+      is_first = false;
+    }
+    Printf(context, "]");
+  } else {
+    Printf(context, "{}");
+  }
+}
+
+}  // namespace perfetto
+// gen_amalgamated begin source: src/tracing/data_source.cc
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/tracing/data_source.h"
+
+namespace perfetto {
+
+DataSourceBase::StopArgs::~StopArgs() = default;
+DataSourceBase::~DataSourceBase() = default;
+void DataSourceBase::OnSetup(const SetupArgs&) {}
+void DataSourceBase::OnStart(const StartArgs&) {}
+void DataSourceBase::OnStop(const StopArgs&) {}
+
+}  // namespace perfetto
+// gen_amalgamated begin source: src/tracing/debug_annotation.cc
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/tracing/debug_annotation.h"
+
+// gen_amalgamated expanded: #include "perfetto/tracing/traced_value.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/debug_annotation.pbzero.h"
+
+namespace perfetto {
+
+DebugAnnotation::~DebugAnnotation() = default;
+
+void DebugAnnotation::WriteIntoTracedValue(TracedValue context) const {
+  Add(context.context_);
+}
+
+}  // namespace perfetto
+// gen_amalgamated begin source: src/tracing/event_context.cc
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/tracing/event_context.h"
+
+// gen_amalgamated expanded: #include "protos/perfetto/trace/interned_data/interned_data.pbzero.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/track_event.pbzero.h"
+
+namespace perfetto {
+
+EventContext::EventContext(
+    EventContext::TracePacketHandle trace_packet,
+    internal::TrackEventIncrementalState* incremental_state)
+    : trace_packet_(std::move(trace_packet)),
+      event_(trace_packet_->set_track_event()),
+      incremental_state_(incremental_state) {}
+
+EventContext::~EventContext() {
+  if (!trace_packet_)
+    return;
+
+  // When the track event is finalized (i.e., the context is destroyed), we
+  // should flush any newly seen interned data to the trace. The data has
+  // earlier been written to a heap allocated protobuf message
+  // (|serialized_interned_data|). Here we just need to flush it to the main
+  // trace.
+  auto& serialized_interned_data = incremental_state_->serialized_interned_data;
+  if (PERFETTO_LIKELY(serialized_interned_data.empty()))
+    return;
+
+  auto ranges = serialized_interned_data.GetRanges();
+  trace_packet_->AppendScatteredBytes(
+      perfetto::protos::pbzero::TracePacket::kInternedDataFieldNumber,
+      &ranges[0], ranges.size());
+
+  // Reset the message but keep one buffer allocated for future use.
+  serialized_interned_data.Reset();
+}
+
+}  // namespace perfetto
+// gen_amalgamated begin source: src/tracing/interceptor.cc
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/tracing/interceptor.h"
+
+// gen_amalgamated expanded: #include "perfetto/tracing/internal/tracing_muxer.h"
+
+namespace perfetto {
+
+InterceptorBase::~InterceptorBase() = default;
+InterceptorBase::ThreadLocalState::~ThreadLocalState() = default;
+
+// static
+void InterceptorBase::RegisterImpl(
+    const InterceptorDescriptor& descriptor,
+    std::function<std::unique_ptr<InterceptorBase>()> factory,
+    InterceptorBase::TLSFactory tls_factory,
+    InterceptorBase::TracePacketCallback on_trace_packet) {
+  auto* tracing_impl = internal::TracingMuxer::Get();
+  tracing_impl->RegisterInterceptor(descriptor, factory, tls_factory,
+                                    on_trace_packet);
+}
+
+}  // namespace perfetto
+// gen_amalgamated begin source: src/tracing/internal/checked_scope.cc
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/tracing/internal/checked_scope.h"
+
+#include <utility>
+
+namespace perfetto {
+namespace internal {
+
+#if PERFETTO_DCHECK_IS_ON()
+CheckedScope::CheckedScope(CheckedScope* parent_scope)
+    : parent_scope_(parent_scope) {
+  if (parent_scope_) {
+    PERFETTO_DCHECK(parent_scope_->is_active());
+    parent_scope_->set_is_active(false);
+  }
+}
+
+CheckedScope::~CheckedScope() {
+  Reset();
+}
+
+void CheckedScope::Reset() {
+  if (!is_active_) {
+    // The only case when inactive scope could be destroyed is when Reset() was
+    // called explicitly or the contents of the object were moved away.
+    PERFETTO_DCHECK(deleted_);
+    return;
+  }
+  is_active_ = false;
+  deleted_ = true;
+  if (parent_scope_)
+    parent_scope_->set_is_active(true);
+}
+
+CheckedScope::CheckedScope(CheckedScope&& other) {
+  *this = std::move(other);
+}
+
+CheckedScope& CheckedScope::operator=(CheckedScope&& other) {
+  is_active_ = other.is_active_;
+  parent_scope_ = other.parent_scope_;
+  deleted_ = other.deleted_;
+
+  other.is_active_ = false;
+  other.parent_scope_ = nullptr;
+  other.deleted_ = true;
+
+  return *this;
+}
+#endif
+
+}  // namespace internal
+}  // namespace perfetto
+// gen_amalgamated begin source: src/tracing/internal/interceptor_trace_writer.cc
+// gen_amalgamated begin header: include/perfetto/tracing/internal/interceptor_trace_writer.h
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_TRACING_INTERNAL_INTERCEPTOR_TRACE_WRITER_H_
+#define INCLUDE_PERFETTO_TRACING_INTERNAL_INTERCEPTOR_TRACE_WRITER_H_
+
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/interceptor.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/internal/basic_types.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/trace_writer_base.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/trace_packet.pbzero.h"
+
+namespace perfetto {
+namespace internal {
+
+// A heap-backed trace writer used to reroute trace packets to an interceptor.
+class InterceptorTraceWriter : public TraceWriterBase {
+ public:
+  InterceptorTraceWriter(std::unique_ptr<InterceptorBase::ThreadLocalState> tls,
+                         InterceptorBase::TracePacketCallback packet_callback,
+                         DataSourceStaticState* static_state,
+                         uint32_t instance_index);
+  ~InterceptorTraceWriter() override;
+
+  // TraceWriterBase implementation.
+  protozero::MessageHandle<protos::pbzero::TracePacket> NewTracePacket()
+      override;
+  void Flush(std::function<void()> callback = {}) override;
+  uint64_t written() const override;
+
+ private:
+  std::unique_ptr<InterceptorBase::ThreadLocalState> tls_;
+  InterceptorBase::TracePacketCallback packet_callback_;
+
+  protozero::HeapBuffered<protos::pbzero::TracePacket> cur_packet_;
+  uint64_t bytes_written_ = 0;
+
+  // Static state of the data source we are intercepting.
+  DataSourceStaticState* const static_state_;
+
+  // Index of the data source tracing session which we are intercepting
+  // (0...kMaxDataSourceInstances - 1). Used to look up this interceptor's
+  // session state (i.e., the Interceptor class instance) in the
+  // DataSourceStaticState::instances array.
+  const uint32_t instance_index_;
+
+  const uint32_t sequence_id_;
+
+  static std::atomic<uint32_t> next_sequence_id_;
+};
+
+}  // namespace internal
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_TRACING_INTERNAL_INTERCEPTOR_TRACE_WRITER_H_
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/tracing/internal/interceptor_trace_writer.h"
+
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_writer.h"
+
+namespace perfetto {
+namespace internal {
+
+// static
+std::atomic<uint32_t> InterceptorTraceWriter::next_sequence_id_{};
+
+InterceptorTraceWriter::InterceptorTraceWriter(
+    std::unique_ptr<InterceptorBase::ThreadLocalState> tls,
+    InterceptorBase::TracePacketCallback packet_callback,
+    DataSourceStaticState* static_state,
+    uint32_t instance_index)
+    : tls_(std::move(tls)),
+      packet_callback_(std::move(packet_callback)),
+      static_state_(static_state),
+      instance_index_(instance_index),
+      sequence_id_(++next_sequence_id_) {}
+
+InterceptorTraceWriter::~InterceptorTraceWriter() = default;
+
+protozero::MessageHandle<protos::pbzero::TracePacket>
+InterceptorTraceWriter::NewTracePacket() {
+  Flush();
+  auto packet = TraceWriter::TracePacketHandle(cur_packet_.get());
+  packet->set_trusted_packet_sequence_id(sequence_id_);
+  return packet;
+}
+
+void InterceptorTraceWriter::Flush(std::function<void()> callback) {
+  if (!cur_packet_.empty()) {
+    InterceptorBase::TracePacketCallbackArgs args{};
+    args.static_state = static_state_;
+    args.instance_index = instance_index_;
+    args.tls = tls_.get();
+
+    const auto& slices = cur_packet_.GetSlices();
+    if (slices.size() == 1) {
+      // Fast path: the current packet fits into a single slice.
+      auto slice_range = slices.begin()->GetUsedRange();
+      args.packet_data = protozero::ConstBytes{
+          slice_range.begin,
+          static_cast<size_t>(slice_range.end - slice_range.begin)};
+      bytes_written_ += static_cast<uint64_t>(args.packet_data.size);
+      packet_callback_(std::move(args));
+    } else {
+      // Fallback: stitch together multiple slices.
+      auto stitched_data = cur_packet_.SerializeAsArray();
+      args.packet_data =
+          protozero::ConstBytes{stitched_data.data(), stitched_data.size()};
+      bytes_written_ += static_cast<uint64_t>(stitched_data.size());
+      packet_callback_(std::move(args));
+    }
+    cur_packet_.Reset();
+  }
+  if (callback)
+    callback();
+}
+
+uint64_t InterceptorTraceWriter::written() const {
+  return bytes_written_;
+}
+
+}  // namespace internal
+}  // namespace perfetto
+// gen_amalgamated begin source: src/tracing/internal/tracing_backend_fake.cc
+// gen_amalgamated begin header: include/perfetto/tracing/internal/tracing_backend_fake.h
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_TRACING_INTERNAL_TRACING_BACKEND_FAKE_H_
+#define INCLUDE_PERFETTO_TRACING_INTERNAL_TRACING_BACKEND_FAKE_H_
+
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/tracing_backend.h"
+
+namespace perfetto {
+namespace internal {
+
+// A built-in implementation of TracingBackend that fails any attempt to create
+// a tracing session.
+class PERFETTO_EXPORT TracingBackendFake : public TracingBackend {
+ public:
+  static TracingBackend* GetInstance();
+
+  // TracingBackend implementation.
+  std::unique_ptr<ProducerEndpoint> ConnectProducer(
+      const ConnectProducerArgs&) override;
+  std::unique_ptr<ConsumerEndpoint> ConnectConsumer(
+      const ConnectConsumerArgs&) override;
+
+ private:
+  TracingBackendFake();
+};
+
+}  // namespace internal
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_TRACING_INTERNAL_TRACING_BACKEND_FAKE_H_
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/tracing/internal/tracing_backend_fake.h"
+
+// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/weak_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/consumer.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/producer.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_writer.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/tracing_service.h"
+
+namespace perfetto {
+namespace internal {
+
+namespace {
+
+class UnsupportedProducerEndpoint : public ProducerEndpoint {
+ public:
+  UnsupportedProducerEndpoint(Producer* producer, base::TaskRunner* task_runner)
+      : producer_(producer), task_runner_(task_runner) {
+    // The SDK will attempt to reconnect the producer, so instead we allow it
+    // to connect successfully, but never start any sessions.
+    auto weak_ptr = weak_ptr_factory_.GetWeakPtr();
+    task_runner_->PostTask([weak_ptr] {
+      if (weak_ptr)
+        weak_ptr->producer_->OnConnect();
+    });
+  }
+  ~UnsupportedProducerEndpoint() override { producer_->OnDisconnect(); }
+
+  void RegisterDataSource(const DataSourceDescriptor&) override {}
+  void UnregisterDataSource(const std::string& /*name*/) override {}
+
+  void RegisterTraceWriter(uint32_t /*writer_id*/,
+                           uint32_t /*target_buffer*/) override {}
+  void UnregisterTraceWriter(uint32_t /*writer_id*/) override {}
+
+  void CommitData(const CommitDataRequest&,
+                  CommitDataCallback callback = {}) override {
+    callback();
+  }
+
+  SharedMemory* shared_memory() const override { return nullptr; }
+  size_t shared_buffer_page_size_kb() const override { return 0; }
+
+  std::unique_ptr<TraceWriter> CreateTraceWriter(
+      BufferID /*target_buffer*/,
+      BufferExhaustedPolicy = BufferExhaustedPolicy::kDefault) override {
+    return nullptr;
+  }
+
+  SharedMemoryArbiter* MaybeSharedMemoryArbiter() override { return nullptr; }
+  bool IsShmemProvidedByProducer() const override { return false; }
+
+  void NotifyFlushComplete(FlushRequestID) override {}
+  void NotifyDataSourceStarted(DataSourceInstanceID) override {}
+  void NotifyDataSourceStopped(DataSourceInstanceID) override {}
+  void ActivateTriggers(const std::vector<std::string>&) override {}
+
+  void Sync(std::function<void()> callback) override { callback(); }
+
+ private:
+  Producer* const producer_;
+  base::TaskRunner* const task_runner_;
+  base::WeakPtrFactory<UnsupportedProducerEndpoint> weak_ptr_factory_{
+      this};  // Keep last.
+};
+
+class UnsupportedConsumerEndpoint : public ConsumerEndpoint {
+ public:
+  UnsupportedConsumerEndpoint(Consumer* consumer, base::TaskRunner* task_runner)
+      : consumer_(consumer), task_runner_(task_runner) {
+    // The SDK will not to reconnect the consumer, so we just disconnect it
+    // immediately, which will cancel the tracing session.
+    auto weak_this = weak_ptr_factory_.GetWeakPtr();
+    task_runner_->PostTask([weak_this] {
+      if (weak_this)
+        weak_this->consumer_->OnDisconnect();
+    });
+  }
+  ~UnsupportedConsumerEndpoint() override = default;
+
+  void EnableTracing(const TraceConfig&,
+                     base::ScopedFile = base::ScopedFile()) override {}
+  void ChangeTraceConfig(const TraceConfig&) override {}
+
+  void StartTracing() override {}
+  void DisableTracing() override {}
+
+  void Flush(uint32_t /*timeout_ms*/, FlushCallback callback) override {
+    callback(/*success=*/false);
+  }
+
+  void ReadBuffers() override {}
+  void FreeBuffers() override {}
+
+  void Detach(const std::string& /*key*/) override {}
+  void Attach(const std::string& /*key*/) override {}
+
+  void GetTraceStats() override {}
+  void ObserveEvents(uint32_t /*events_mask*/) override {}
+  void QueryServiceState(QueryServiceStateCallback) override {}
+  void QueryCapabilities(QueryCapabilitiesCallback) override {}
+
+  void SaveTraceForBugreport(SaveTraceForBugreportCallback) override {}
+
+ private:
+  Consumer* const consumer_;
+  base::TaskRunner* const task_runner_;
+  base::WeakPtrFactory<UnsupportedConsumerEndpoint> weak_ptr_factory_{
+      this};  // Keep last.
+};
+
+}  // namespace
+
+// static
+TracingBackend* TracingBackendFake::GetInstance() {
+  static auto* instance = new TracingBackendFake();
+  return instance;
+}
+
+TracingBackendFake::TracingBackendFake() = default;
+
+std::unique_ptr<ProducerEndpoint> TracingBackendFake::ConnectProducer(
+    const ConnectProducerArgs& args) {
+  return std::unique_ptr<ProducerEndpoint>(
+      new UnsupportedProducerEndpoint(args.producer, args.task_runner));
+}
+
+std::unique_ptr<ConsumerEndpoint> TracingBackendFake::ConnectConsumer(
+    const ConnectConsumerArgs& args) {
+  return std::unique_ptr<ConsumerEndpoint>(
+      new UnsupportedConsumerEndpoint(args.consumer, args.task_runner));
+}
+
+}  // namespace internal
+}  // namespace perfetto
+// gen_amalgamated begin source: src/tracing/internal/tracing_muxer_fake.cc
+// gen_amalgamated begin header: src/tracing/internal/tracing_muxer_fake.h
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SRC_TRACING_INTERNAL_TRACING_MUXER_FAKE_H_
+#define SRC_TRACING_INTERNAL_TRACING_MUXER_FAKE_H_
+
+// gen_amalgamated expanded: #include "perfetto/base/compiler.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/internal/tracing_muxer.h"
+
+namespace perfetto {
+namespace internal {
+
+// An always-fail implementation of TracingMuxer. Before tracing has been
+// initialiazed, all muxer operations will route here and fail with a helpful
+// error message. This is to avoid introducing null checks in
+// performance-critical parts of the codebase.
+class TracingMuxerFake : public TracingMuxer {
+  class FakePlatform : public Platform {
+   public:
+    ~FakePlatform() override;
+    ThreadLocalObject* GetOrCreateThreadLocalObject() override;
+    std::unique_ptr<base::TaskRunner> CreateTaskRunner(
+        const CreateTaskRunnerArgs&) override;
+    std::string GetCurrentProcessName() override;
+
+    static FakePlatform instance;
+  };
+
+ public:
+  TracingMuxerFake() : TracingMuxer(&FakePlatform::instance) {}
+
+  static constexpr TracingMuxerFake* Get() {
+#if PERFETTO_HAS_NO_DESTROY()
+    return &instance;
+#else
+    return nullptr;
+#endif
+  }
+
+  // TracingMuxer implementation.
+  bool RegisterDataSource(const DataSourceDescriptor&,
+                          DataSourceFactory,
+                          DataSourceStaticState*) override;
+  std::unique_ptr<TraceWriterBase> CreateTraceWriter(
+      DataSourceStaticState*,
+      uint32_t data_source_instance_index,
+      DataSourceState*,
+      BufferExhaustedPolicy buffer_exhausted_policy) override;
+  void DestroyStoppedTraceWritersForCurrentThread() override;
+  void RegisterInterceptor(const InterceptorDescriptor&,
+                           InterceptorFactory,
+                           InterceptorBase::TLSFactory,
+                           InterceptorBase::TracePacketCallback) override;
+
+ private:
+  static TracingMuxerFake instance;
+};
+
+}  // namespace internal
+}  // namespace perfetto
+
+#endif  // SRC_TRACING_INTERNAL_TRACING_MUXER_FAKE_H_
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "src/tracing/internal/tracing_muxer_fake.h"
+
+namespace perfetto {
+namespace internal {
+namespace {
+
+PERFETTO_NORETURN void FailUninitialized() {
+  PERFETTO_FATAL(
+      "Tracing not initialized. Call perfetto::Tracing::Initialize() first.");
+}
+
+}  // namespace
+
+#if PERFETTO_HAS_NO_DESTROY()
+// static
+PERFETTO_NO_DESTROY TracingMuxerFake::FakePlatform
+    TracingMuxerFake::FakePlatform::instance{};
+// static
+PERFETTO_NO_DESTROY TracingMuxerFake TracingMuxerFake::instance{};
+#endif  // PERFETTO_HAS_NO_DESTROY()
+
+TracingMuxerFake::FakePlatform::~FakePlatform() = default;
+
+Platform::ThreadLocalObject*
+TracingMuxerFake::FakePlatform::GetOrCreateThreadLocalObject() {
+  FailUninitialized();
+}
+
+std::unique_ptr<base::TaskRunner>
+TracingMuxerFake::FakePlatform::CreateTaskRunner(const CreateTaskRunnerArgs&) {
+  FailUninitialized();
+}
+
+std::string TracingMuxerFake::FakePlatform::GetCurrentProcessName() {
+  FailUninitialized();
+}
+
+bool TracingMuxerFake::RegisterDataSource(const DataSourceDescriptor&,
+                                          DataSourceFactory,
+                                          DataSourceStaticState*) {
+  FailUninitialized();
+}
+
+std::unique_ptr<TraceWriterBase> TracingMuxerFake::CreateTraceWriter(
+    DataSourceStaticState*,
+    uint32_t,
+    DataSourceState*,
+    BufferExhaustedPolicy) {
+  FailUninitialized();
+}
+
+void TracingMuxerFake::DestroyStoppedTraceWritersForCurrentThread() {
+  FailUninitialized();
+}
+
+void TracingMuxerFake::RegisterInterceptor(
+    const InterceptorDescriptor&,
+    InterceptorFactory,
+    InterceptorBase::TLSFactory,
+    InterceptorBase::TracePacketCallback) {
+  FailUninitialized();
+}
+
+}  // namespace internal
+}  // namespace perfetto
+// gen_amalgamated begin source: src/tracing/internal/tracing_muxer_impl.cc
+// gen_amalgamated begin header: src/tracing/internal/tracing_muxer_impl.h
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SRC_TRACING_INTERNAL_TRACING_MUXER_IMPL_H_
+#define SRC_TRACING_INTERNAL_TRACING_MUXER_IMPL_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <array>
+#include <atomic>
+#include <bitset>
+#include <list>
+#include <map>
+#include <memory>
+#include <vector>
+
+// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/thread_checker.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/basic_types.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/consumer.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/producer.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/core/data_source_descriptor.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/core/forward_decls.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/core/trace_config.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/internal/basic_types.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/internal/tracing_muxer.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/tracing.h"
+
+// gen_amalgamated expanded: #include "protos/perfetto/common/interceptor_descriptor.gen.h"
+
+namespace perfetto {
+
+class ConsumerEndpoint;
+class DataSourceBase;
+class ProducerEndpoint;
+class TraceWriterBase;
+class TracingBackend;
+class TracingSession;
+struct TracingInitArgs;
+
+namespace base {
+class TaskRunner;
+}
+
+namespace internal {
+
+struct DataSourceStaticState;
+
+// This class acts as a bridge between the public API and the TracingBackend(s).
+// It exposes a simplified view of the world to the API methods handling all the
+// bookkeeping to map data source instances and trace writers to the various
+// backends. It deals with N data sources, M backends (1 backend == 1 tracing
+// service == 1 producer connection) and T concurrent tracing sessions.
+//
+// Handing data source registration and start/stop flows [producer side]:
+// ----------------------------------------------------------------------
+// 1. The API client subclasses perfetto::DataSource and calls
+//    DataSource::Register<MyDataSource>(). In turn this calls into the
+//    TracingMuxer.
+// 2. The tracing muxer iterates through all the backends (1 backend == 1
+//    service == 1 producer connection) and registers the data source on each
+//    backend.
+// 3. When any (services behind a) backend starts tracing and requests to start
+//    that specific data source, the TracingMuxerImpl constructs a new instance
+//    of MyDataSource and calls the OnStart() method.
+//
+// Controlling trace and retrieving trace data [consumer side]:
+// ------------------------------------------------------------
+// 1. The API client calls Tracing::NewTrace(), returns a RAII TracingSession
+//    object.
+// 2. NewTrace() calls into internal::TracingMuxer(Impl). TracingMuxer
+//    subclasses the TracingSession object (TracingSessionImpl) and returns it.
+// 3. The tracing muxer identifies the backend (according to the args passed to
+//    NewTrace), creates a new Consumer and connects to it.
+// 4. When the API client calls Start()/Stop()/ReadTrace() methods, the
+//    TracingMuxer forwards them to the consumer associated to the
+//    TracingSession. Likewise for callbacks coming from the consumer-side of
+//    the service.
+class TracingMuxerImpl : public TracingMuxer {
+ public:
+  // This is different than TracingSessionID because it's global across all
+  // backends. TracingSessionID is global only within the scope of one service.
+  using TracingSessionGlobalID = uint64_t;
+
+  static void InitializeInstance(const TracingInitArgs&);
+
+  // TracingMuxer implementation.
+  bool RegisterDataSource(const DataSourceDescriptor&,
+                          DataSourceFactory,
+                          DataSourceStaticState*) override;
+  std::unique_ptr<TraceWriterBase> CreateTraceWriter(
+      DataSourceStaticState*,
+      uint32_t data_source_instance_index,
+      DataSourceState*,
+      BufferExhaustedPolicy buffer_exhausted_policy) override;
+  void DestroyStoppedTraceWritersForCurrentThread() override;
+  void RegisterInterceptor(const InterceptorDescriptor&,
+                           InterceptorFactory,
+                           InterceptorBase::TLSFactory,
+                           InterceptorBase::TracePacketCallback) override;
+
+  std::unique_ptr<TracingSession> CreateTracingSession(BackendType);
+
+  // Producer-side bookkeeping methods.
+  void UpdateDataSourcesOnAllBackends();
+  void SetupDataSource(TracingBackendId,
+                       uint32_t backend_connection_id,
+                       DataSourceInstanceID,
+                       const DataSourceConfig&);
+  void StartDataSource(TracingBackendId, DataSourceInstanceID);
+  void StopDataSource_AsyncBegin(TracingBackendId, DataSourceInstanceID);
+  void StopDataSource_AsyncEnd(TracingBackendId, DataSourceInstanceID);
+  void ClearDataSourceIncrementalState(TracingBackendId, DataSourceInstanceID);
+  void SyncProducersForTesting();
+
+  // Consumer-side bookkeeping methods.
+  void SetupTracingSession(TracingSessionGlobalID,
+                           const std::shared_ptr<TraceConfig>&,
+                           base::ScopedFile trace_fd = base::ScopedFile());
+  void StartTracingSession(TracingSessionGlobalID);
+  void ChangeTracingSessionConfig(TracingSessionGlobalID, const TraceConfig&);
+  void StopTracingSession(TracingSessionGlobalID);
+  void DestroyTracingSession(TracingSessionGlobalID);
+  void FlushTracingSession(TracingSessionGlobalID,
+                           uint32_t,
+                           std::function<void(bool)>);
+  void ReadTracingSessionData(
+      TracingSessionGlobalID,
+      std::function<void(TracingSession::ReadTraceCallbackArgs)>);
+  void GetTraceStats(TracingSessionGlobalID,
+                     TracingSession::GetTraceStatsCallback);
+  void QueryServiceState(TracingSessionGlobalID,
+                         TracingSession::QueryServiceStateCallback);
+
+  // Sets the batching period to |batch_commits_duration_ms| on the backends
+  // with type |backend_type|.
+  void SetBatchCommitsDurationForTesting(uint32_t batch_commits_duration_ms,
+                                         BackendType backend_type);
+
+  // Enables direct SMB patching on the backends with type |backend_type| (see
+  // SharedMemoryArbiter::EnableDirectSMBPatching). Returns true if the
+  // operation succeeded for all backends with type |backend_type|, false
+  // otherwise.
+  bool EnableDirectSMBPatchingForTesting(BackendType backend_type);
+
+  void SetMaxProducerReconnectionsForTesting(uint32_t count);
+
+ private:
+  // For each TracingBackend we create and register one ProducerImpl instance.
+  // This talks to the producer-side of the service, gets start/stop requests
+  // from it and routes them to the registered data sources.
+  // One ProducerImpl == one backend == one tracing service.
+  // This class is needed to disambiguate callbacks coming from different
+  // services. TracingMuxerImpl can't directly implement the Producer interface
+  // because the Producer virtual methods don't allow to identify the service.
+  class ProducerImpl : public Producer {
+   public:
+    ProducerImpl(TracingMuxerImpl*,
+                 TracingBackendId,
+                 uint32_t shmem_batch_commits_duration_ms);
+    ~ProducerImpl() override;
+
+    void Initialize(std::unique_ptr<ProducerEndpoint> endpoint);
+    void RegisterDataSource(const DataSourceDescriptor&,
+                            DataSourceFactory,
+                            DataSourceStaticState*);
+
+    // perfetto::Producer implementation.
+    void OnConnect() override;
+    void OnDisconnect() override;
+    void OnTracingSetup() override;
+    void SetupDataSource(DataSourceInstanceID,
+                         const DataSourceConfig&) override;
+    void StartDataSource(DataSourceInstanceID,
+                         const DataSourceConfig&) override;
+    void StopDataSource(DataSourceInstanceID) override;
+    void Flush(FlushRequestID, const DataSourceInstanceID*, size_t) override;
+    void ClearIncrementalState(const DataSourceInstanceID*, size_t) override;
+
+    void SweepDeadServices();
+
+    PERFETTO_THREAD_CHECKER(thread_checker_)
+    TracingMuxerImpl* const muxer_;
+    TracingBackendId const backend_id_;
+    bool connected_ = false;
+    uint32_t connection_id_ = 0;
+
+    const uint32_t shmem_batch_commits_duration_ms_ = 0;
+
+    // Set of data sources that have been actually registered on this producer.
+    // This can be a subset of the global |data_sources_|, because data sources
+    // can register before the producer is fully connected.
+    std::bitset<kMaxDataSources> registered_data_sources_{};
+
+    // A collection of disconnected service endpoints. Since trace writers on
+    // arbitrary threads might continue writing data to disconnected services,
+    // we keep the old services around and periodically try to clean up ones
+    // that no longer have any writers (see SweepDeadServices).
+    std::list<std::shared_ptr<ProducerEndpoint>> dead_services_;
+
+    // The currently active service endpoint is maintained as an atomic shared
+    // pointer so it won't get deleted from underneath threads that are creating
+    // trace writers. At any given time one endpoint can be shared (and thus
+    // kept alive) by the |service_| pointer, an entry in |dead_services_| and
+    // as a pointer on the stack in CreateTraceWriter() (on an arbitrary
+    // thread). The endpoint is never shared outside ProducerImpl itself.
+    //
+    // WARNING: Any *write* access to this variable or any *read* access from a
+    // non-muxer thread must be done through std::atomic_{load,store} to avoid
+    // data races.
+    std::shared_ptr<ProducerEndpoint> service_;  // Keep last.
+  };
+
+  // For each TracingSession created by the API client (Tracing::NewTrace() we
+  // create and register one ConsumerImpl instance.
+  // This talks to the consumer-side of the service, gets end-of-trace and
+  // on-trace-data callbacks and routes them to the API client callbacks.
+  // This class is needed to disambiguate callbacks coming from different
+  // tracing sessions.
+  class ConsumerImpl : public Consumer {
+   public:
+    ConsumerImpl(TracingMuxerImpl*,
+                 BackendType,
+                 TracingBackendId,
+                 TracingSessionGlobalID);
+    ~ConsumerImpl() override;
+
+    void Initialize(std::unique_ptr<ConsumerEndpoint> endpoint);
+
+    // perfetto::Consumer implementation.
+    void OnConnect() override;
+    void OnDisconnect() override;
+    void OnTracingDisabled(const std::string& error) override;
+    void OnTraceData(std::vector<TracePacket>, bool has_more) override;
+    void OnDetach(bool success) override;
+    void OnAttach(bool success, const TraceConfig&) override;
+    void OnTraceStats(bool success, const TraceStats&) override;
+    void OnObservableEvents(const ObservableEvents&) override;
+
+    void NotifyStartComplete();
+    void NotifyError(const TracingError&);
+    void NotifyStopComplete();
+
+    // Will eventually inform the |muxer_| when it is safe to remove |this|.
+    void Disconnect();
+
+    TracingMuxerImpl* const muxer_;
+    BackendType const backend_type_;
+    TracingBackendId const backend_id_;
+    TracingSessionGlobalID const session_id_;
+    bool connected_ = false;
+
+    // This is to handle the case where the Setup call from the API client
+    // arrives before the consumer has connected. In this case we keep around
+    // the config and check if we have it after connection.
+    bool start_pending_ = false;
+
+    // Similarly if the session is stopped before the consumer was connected, we
+    // need to wait until the session has started before stopping it.
+    bool stop_pending_ = false;
+
+    // Similarly we need to buffer a call to get trace statistics if the
+    // consumer wasn't connected yet.
+    bool get_trace_stats_pending_ = false;
+
+    // Whether this session was already stopped. This will happen in response to
+    // Stop{,Blocking}, but also if the service stops the session for us
+    // automatically (e.g., when there are no data sources).
+    bool stopped_ = false;
+
+    // shared_ptr because it's posted across threads. This is to avoid copying
+    // it more than once.
+    std::shared_ptr<TraceConfig> trace_config_;
+    base::ScopedFile trace_fd_;
+
+    // If the API client passes a callback to start, we should invoke this when
+    // NotifyStartComplete() is invoked.
+    std::function<void()> start_complete_callback_;
+
+    // An internal callback used to implement StartBlocking().
+    std::function<void()> blocking_start_complete_callback_;
+
+    // If the API client passes a callback to get notification about the
+    // errors, we should invoke this when NotifyError() is invoked.
+    std::function<void(TracingError)> error_callback_;
+
+    // If the API client passes a callback to stop, we should invoke this when
+    // OnTracingDisabled() is invoked.
+    std::function<void()> stop_complete_callback_;
+
+    // An internal callback used to implement StopBlocking().
+    std::function<void()> blocking_stop_complete_callback_;
+
+    // Callback passed to ReadTrace().
+    std::function<void(TracingSession::ReadTraceCallbackArgs)>
+        read_trace_callback_;
+
+    // Callback passed to GetTraceStats().
+    TracingSession::GetTraceStatsCallback get_trace_stats_callback_;
+
+    // Callback for a pending call to QueryServiceState().
+    TracingSession::QueryServiceStateCallback query_service_state_callback_;
+
+    // The states of all data sources in this tracing session. |true| means the
+    // data source has started tracing.
+    using DataSourceHandle = std::pair<std::string, std::string>;
+    std::map<DataSourceHandle, bool> data_source_states_;
+
+    std::unique_ptr<ConsumerEndpoint> service_;  // Keep before last.
+    PERFETTO_THREAD_CHECKER(thread_checker_)     // Keep last.
+  };
+
+  // This object is returned to API clients when they call
+  // Tracing::CreateTracingSession().
+  class TracingSessionImpl : public TracingSession {
+   public:
+    TracingSessionImpl(TracingMuxerImpl*, TracingSessionGlobalID, BackendType);
+    ~TracingSessionImpl() override;
+    void Setup(const TraceConfig&, int fd) override;
+    void Start() override;
+    void StartBlocking() override;
+    void SetOnStartCallback(std::function<void()>) override;
+    void SetOnErrorCallback(std::function<void(TracingError)>) override;
+    void Stop() override;
+    void StopBlocking() override;
+    void Flush(std::function<void(bool)>, uint32_t timeout_ms) override;
+    void ReadTrace(ReadTraceCallback) override;
+    void SetOnStopCallback(std::function<void()>) override;
+    void GetTraceStats(GetTraceStatsCallback) override;
+    void QueryServiceState(QueryServiceStateCallback) override;
+    void ChangeTraceConfig(const TraceConfig&) override;
+
+   private:
+    TracingMuxerImpl* const muxer_;
+    TracingSessionGlobalID const session_id_;
+    BackendType const backend_type_;
+  };
+
+  struct RegisteredDataSource {
+    DataSourceDescriptor descriptor;
+    DataSourceFactory factory{};
+    DataSourceStaticState* static_state = nullptr;
+  };
+
+  struct RegisteredInterceptor {
+    protos::gen::InterceptorDescriptor descriptor;
+    InterceptorFactory factory{};
+    InterceptorBase::TLSFactory tls_factory{};
+    InterceptorBase::TracePacketCallback packet_callback{};
+  };
+
+  struct RegisteredBackend {
+    // Backends are supposed to have static lifetime.
+    TracingBackend* backend = nullptr;
+    TracingBackendId id = 0;
+    BackendType type{};
+
+    TracingBackend::ConnectProducerArgs producer_conn_args;
+    std::unique_ptr<ProducerImpl> producer;
+
+    // The calling code can request more than one concurrently active tracing
+    // session for the same backend. We need to create one consumer per session.
+    std::vector<std::unique_ptr<ConsumerImpl>> consumers;
+  };
+
+  explicit TracingMuxerImpl(const TracingInitArgs&);
+  void Initialize(const TracingInitArgs& args);
+  ConsumerImpl* FindConsumer(TracingSessionGlobalID session_id);
+  void InitializeConsumer(TracingSessionGlobalID session_id);
+  void OnConsumerDisconnected(ConsumerImpl* consumer);
+  void OnProducerDisconnected(ProducerImpl* producer);
+
+  struct FindDataSourceRes {
+    FindDataSourceRes() = default;
+    FindDataSourceRes(DataSourceStaticState* a, DataSourceState* b, uint32_t c)
+        : static_state(a), internal_state(b), instance_idx(c) {}
+    explicit operator bool() const { return !!internal_state; }
+
+    DataSourceStaticState* static_state = nullptr;
+    DataSourceState* internal_state = nullptr;
+    uint32_t instance_idx = 0;
+  };
+  FindDataSourceRes FindDataSource(TracingBackendId, DataSourceInstanceID);
+
+  std::unique_ptr<base::TaskRunner> task_runner_;
+  std::vector<RegisteredDataSource> data_sources_;
+  std::vector<RegisteredBackend> backends_;
+  std::vector<RegisteredInterceptor> interceptors_;
+  TracingPolicy* policy_ = nullptr;
+
+  std::atomic<TracingSessionGlobalID> next_tracing_session_id_{};
+
+  // Maximum number of times we will try to reconnect producer backend.
+  // Should only be modified for testing purposes.
+  std::atomic<uint32_t> max_producer_reconnections_{100u};
+
+  PERFETTO_THREAD_CHECKER(thread_checker_)
+};
+
+}  // namespace internal
+}  // namespace perfetto
+
+#endif  // SRC_TRACING_INTERNAL_TRACING_MUXER_IMPL_H_
+// gen_amalgamated begin header: include/perfetto/ext/tracing/core/trace_stats.h
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_TRACING_CORE_TRACE_STATS_H_
+#define INCLUDE_PERFETTO_EXT_TRACING_CORE_TRACE_STATS_H_
+
+// Creates the aliases in the ::perfetto namespace, doing things like:
+// using ::perfetto::Foo = ::perfetto::protos::gen::Foo.
+// See comments in forward_decls.h for the historical reasons of this
+// indirection layer.
+// gen_amalgamated expanded: #include "perfetto/tracing/core/forward_decls.h"
+
+// gen_amalgamated expanded: #include "protos/perfetto/common/trace_stats.gen.h"
+
+#endif  // INCLUDE_PERFETTO_EXT_TRACING_CORE_TRACE_STATS_H_
+// gen_amalgamated begin header: include/perfetto/tracing/core/tracing_service_state.h
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef INCLUDE_PERFETTO_TRACING_CORE_TRACING_SERVICE_STATE_H_
+#define INCLUDE_PERFETTO_TRACING_CORE_TRACING_SERVICE_STATE_H_
+
+// Creates the aliases in the ::perfetto namespace, doing things like:
+// using ::perfetto::Foo = ::perfetto::protos::gen::Foo.
+// See comments in forward_decls.h for the historical reasons of this
+// indirection layer.
+// gen_amalgamated expanded: #include "perfetto/tracing/core/forward_decls.h"
+
+// gen_amalgamated expanded: #include "protos/perfetto/common/tracing_service_state.gen.h"
+
+#endif  // INCLUDE_PERFETTO_TRACING_CORE_TRACING_SERVICE_STATE_H_
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "src/tracing/internal/tracing_muxer_impl.h"
+
+#include <algorithm>
+#include <atomic>
+#include <mutex>
+#include <vector>
+
+// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/hash.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/thread_checker.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/waitable_event.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory_arbiter.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_packet.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_stats.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_writer.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/tracing_service.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/buffer_exhausted_policy.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/core/data_source_config.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/core/tracing_service_state.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/data_source.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/internal/data_source_internal.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/internal/interceptor_trace_writer.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/internal/tracing_backend_fake.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/trace_writer_base.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/tracing.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/tracing_backend.h"
+
+// gen_amalgamated expanded: #include "protos/perfetto/config/interceptor_config.gen.h"
+
+// gen_amalgamated expanded: #include "src/tracing/internal/tracing_muxer_fake.h"
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+#include <io.h>  // For dup()
+#else
+#include <unistd.h>  // For dup()
+#endif
+
+namespace perfetto {
+namespace internal {
+
+namespace {
+
+// A task runner which prevents calls to DataSource::Trace() while an operation
+// is in progress. Used to guard against unexpected re-entrancy where the
+// user-provided task runner implementation tries to enter a trace point under
+// the hood.
+class NonReentrantTaskRunner : public base::TaskRunner {
+ public:
+  NonReentrantTaskRunner(TracingMuxer* muxer,
+                         std::unique_ptr<base::TaskRunner> task_runner)
+      : muxer_(muxer), task_runner_(std::move(task_runner)) {}
+
+  // base::TaskRunner implementation.
+  void PostTask(std::function<void()> task) override {
+    CallWithGuard([&] { task_runner_->PostTask(std::move(task)); });
+  }
+
+  void PostDelayedTask(std::function<void()> task, uint32_t delay_ms) override {
+    CallWithGuard(
+        [&] { task_runner_->PostDelayedTask(std::move(task), delay_ms); });
+  }
+
+  void AddFileDescriptorWatch(base::PlatformHandle fd,
+                              std::function<void()> callback) override {
+    CallWithGuard(
+        [&] { task_runner_->AddFileDescriptorWatch(fd, std::move(callback)); });
+  }
+
+  void RemoveFileDescriptorWatch(base::PlatformHandle fd) override {
+    CallWithGuard([&] { task_runner_->RemoveFileDescriptorWatch(fd); });
+  }
+
+  bool RunsTasksOnCurrentThread() const override {
+    bool result;
+    CallWithGuard([&] { result = task_runner_->RunsTasksOnCurrentThread(); });
+    return result;
+  }
+
+ private:
+  template <typename T>
+  void CallWithGuard(T lambda) const {
+    auto* root_tls = muxer_->GetOrCreateTracingTLS();
+    if (PERFETTO_UNLIKELY(root_tls->is_in_trace_point)) {
+      lambda();
+      return;
+    }
+    ScopedReentrancyAnnotator scoped_annotator(*root_tls);
+    lambda();
+  }
+
+  TracingMuxer* const muxer_;
+  std::unique_ptr<base::TaskRunner> task_runner_;
+};
+
+class StopArgsImpl : public DataSourceBase::StopArgs {
+ public:
+  std::function<void()> HandleStopAsynchronously() const override {
+    auto closure = std::move(async_stop_closure);
+    async_stop_closure = std::function<void()>();
+    return closure;
+  }
+
+  mutable std::function<void()> async_stop_closure;
+};
+
+uint64_t ComputeConfigHash(const DataSourceConfig& config) {
+  base::Hash hasher;
+  std::string config_bytes = config.SerializeAsString();
+  hasher.Update(config_bytes.data(), config_bytes.size());
+  return hasher.digest();
+}
+
+}  // namespace
+
+// ----- Begin of TracingMuxerImpl::ProducerImpl
+TracingMuxerImpl::ProducerImpl::ProducerImpl(
+    TracingMuxerImpl* muxer,
+    TracingBackendId backend_id,
+    uint32_t shmem_batch_commits_duration_ms)
+    : muxer_(muxer),
+      backend_id_(backend_id),
+      shmem_batch_commits_duration_ms_(shmem_batch_commits_duration_ms) {}
+
+TracingMuxerImpl::ProducerImpl::~ProducerImpl() = default;
+
+void TracingMuxerImpl::ProducerImpl::Initialize(
+    std::unique_ptr<ProducerEndpoint> endpoint) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  PERFETTO_DCHECK(!connected_);
+  connection_id_++;
+
+  // Adopt the endpoint into a shared pointer so that we can safely share it
+  // across threads that create trace writers. The custom deleter function
+  // ensures that the endpoint is always destroyed on the muxer's thread. (Note
+  // that |task_runner| is assumed to outlive tracing sessions on all threads.)
+  auto* task_runner = muxer_->task_runner_.get();
+  auto deleter = [task_runner](ProducerEndpoint* e) {
+    task_runner->PostTask([e] { delete e; });
+  };
+  std::shared_ptr<ProducerEndpoint> service(endpoint.release(), deleter);
+  // This atomic store is needed because another thread might be concurrently
+  // creating a trace writer using the previous (disconnected) |service_|. See
+  // CreateTraceWriter().
+  std::atomic_store(&service_, std::move(service));
+  // Don't try to use the service here since it may not have connected yet. See
+  // OnConnect().
+}
+
+void TracingMuxerImpl::ProducerImpl::OnConnect() {
+  PERFETTO_DLOG("Producer connected");
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  PERFETTO_DCHECK(!connected_);
+  connected_ = true;
+  muxer_->UpdateDataSourcesOnAllBackends();
+}
+
+void TracingMuxerImpl::ProducerImpl::OnDisconnect() {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  connected_ = false;
+  // Active data sources for this producer will be stopped by
+  // DestroyStoppedTraceWritersForCurrentThread() since the reconnected producer
+  // will have a different connection id (even before it has finished
+  // connecting).
+  registered_data_sources_.reset();
+  // Keep the old service around as a dead connection in case it has active
+  // trace writers. We can't clear |service_| here because other threads may be
+  // concurrently creating new trace writers. The reconnection below will
+  // atomically swap the new service in place of the old one.
+  dead_services_.push_back(service_);
+  // Try reconnecting the producer.
+  muxer_->OnProducerDisconnected(this);
+}
+
+void TracingMuxerImpl::ProducerImpl::OnTracingSetup() {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  service_->MaybeSharedMemoryArbiter()->SetBatchCommitsDuration(
+      shmem_batch_commits_duration_ms_);
+}
+
+void TracingMuxerImpl::ProducerImpl::SetupDataSource(
+    DataSourceInstanceID id,
+    const DataSourceConfig& cfg) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  muxer_->SetupDataSource(backend_id_, connection_id_, id, cfg);
+}
+
+void TracingMuxerImpl::ProducerImpl::StartDataSource(DataSourceInstanceID id,
+                                                     const DataSourceConfig&) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  muxer_->StartDataSource(backend_id_, id);
+  service_->NotifyDataSourceStarted(id);
+}
+
+void TracingMuxerImpl::ProducerImpl::StopDataSource(DataSourceInstanceID id) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  muxer_->StopDataSource_AsyncBegin(backend_id_, id);
+}
+
+void TracingMuxerImpl::ProducerImpl::Flush(FlushRequestID flush_id,
+                                           const DataSourceInstanceID*,
+                                           size_t) {
+  // Flush is not plumbed for now, we just ack straight away.
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  service_->NotifyFlushComplete(flush_id);
+}
+
+void TracingMuxerImpl::ProducerImpl::ClearIncrementalState(
+    const DataSourceInstanceID* instances,
+    size_t instance_count) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  for (size_t inst_idx = 0; inst_idx < instance_count; inst_idx++) {
+    muxer_->ClearDataSourceIncrementalState(backend_id_, instances[inst_idx]);
+  }
+}
+
+void TracingMuxerImpl::ProducerImpl::SweepDeadServices() {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  auto is_unused = [](const std::shared_ptr<ProducerEndpoint>& endpoint) {
+    auto* arbiter = endpoint->MaybeSharedMemoryArbiter();
+    return !arbiter || arbiter->TryShutdown();
+  };
+  for (auto it = dead_services_.begin(); it != dead_services_.end();) {
+    auto next_it = it;
+    next_it++;
+    if (is_unused(*it)) {
+      dead_services_.erase(it);
+    }
+    it = next_it;
+  }
+}
+
+// ----- End of TracingMuxerImpl::ProducerImpl methods.
+
+// ----- Begin of TracingMuxerImpl::ConsumerImpl
+TracingMuxerImpl::ConsumerImpl::ConsumerImpl(TracingMuxerImpl* muxer,
+                                             BackendType backend_type,
+                                             TracingBackendId backend_id,
+                                             TracingSessionGlobalID session_id)
+    : muxer_(muxer),
+      backend_type_(backend_type),
+      backend_id_(backend_id),
+      session_id_(session_id) {}
+
+TracingMuxerImpl::ConsumerImpl::~ConsumerImpl() = default;
+
+void TracingMuxerImpl::ConsumerImpl::Initialize(
+    std::unique_ptr<ConsumerEndpoint> endpoint) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  service_ = std::move(endpoint);
+  // Don't try to use the service here since it may not have connected yet. See
+  // OnConnect().
+}
+
+void TracingMuxerImpl::ConsumerImpl::OnConnect() {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  PERFETTO_DCHECK(!connected_);
+  connected_ = true;
+
+  // Observe data source instance events so we get notified when tracing starts.
+  service_->ObserveEvents(ObservableEvents::TYPE_DATA_SOURCES_INSTANCES |
+                          ObservableEvents::TYPE_ALL_DATA_SOURCES_STARTED);
+
+  // If the API client configured and started tracing before we connected,
+  // tell the backend about it now.
+  if (trace_config_)
+    muxer_->SetupTracingSession(session_id_, trace_config_);
+  if (start_pending_)
+    muxer_->StartTracingSession(session_id_);
+  if (get_trace_stats_pending_) {
+    auto callback = std::move(get_trace_stats_callback_);
+    get_trace_stats_callback_ = nullptr;
+    muxer_->GetTraceStats(session_id_, std::move(callback));
+  }
+  if (query_service_state_callback_) {
+    auto callback = std::move(query_service_state_callback_);
+    query_service_state_callback_ = nullptr;
+    muxer_->QueryServiceState(session_id_, std::move(callback));
+  }
+  if (stop_pending_)
+    muxer_->StopTracingSession(session_id_);
+}
+
+void TracingMuxerImpl::ConsumerImpl::OnDisconnect() {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
+  if (!connected_ && backend_type_ == kSystemBackend) {
+    PERFETTO_ELOG(
+        "Unable to connect to the system tracing service as a consumer. On "
+        "Android, use the \"perfetto\" command line tool instead to start "
+        "system-wide tracing sessions");
+  }
+#endif  // PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
+
+  // Notify the client about disconnection.
+  NotifyError(TracingError{TracingError::kDisconnected, "Peer disconnected"});
+
+  // Make sure the client doesn't hang in a blocking start/stop because of the
+  // disconnection.
+  NotifyStartComplete();
+  NotifyStopComplete();
+
+  // It shouldn't be necessary to call StopTracingSession. If we get this call
+  // it means that the service did shutdown before us, so there is no point
+  // trying it to ask it to stop the session. We should just remember to cleanup
+  // the consumer vector.
+  connected_ = false;
+
+  // Notify the muxer that it is safe to destroy |this|. This is needed because
+  // the ConsumerEndpoint stored in |service_| requires that |this| be safe to
+  // access until OnDisconnect() is called.
+  muxer_->OnConsumerDisconnected(this);
+}
+
+void TracingMuxerImpl::ConsumerImpl::Disconnect() {
+  // This is weird and deserves a comment.
+  //
+  // When we called the ConnectConsumer method on the service it returns
+  // us a ConsumerEndpoint which we stored in |service_|, however this
+  // ConsumerEndpoint holds a pointer to the ConsumerImpl pointed to by
+  // |this|. Part of the API contract to TracingService::ConnectConsumer is that
+  // the ConsumerImpl pointer has to be valid until the
+  // ConsumerImpl::OnDisconnect method is called. Therefore we reset the
+  // ConsumerEndpoint |service_|. Eventually this will call
+  // ConsumerImpl::OnDisconnect and we will inform the muxer it is safe to
+  // call the destructor of |this|.
+  service_.reset();
+}
+
+void TracingMuxerImpl::ConsumerImpl::OnTracingDisabled(
+    const std::string& error) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  PERFETTO_DCHECK(!stopped_);
+  stopped_ = true;
+
+  if (!error.empty())
+    NotifyError(TracingError{TracingError::kTracingFailed, error});
+
+  // If we're still waiting for the start event, fire it now. This may happen if
+  // there are no active data sources in the session.
+  NotifyStartComplete();
+  NotifyStopComplete();
+}
+
+void TracingMuxerImpl::ConsumerImpl::NotifyStartComplete() {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  if (start_complete_callback_) {
+    muxer_->task_runner_->PostTask(std::move(start_complete_callback_));
+    start_complete_callback_ = nullptr;
+  }
+  if (blocking_start_complete_callback_) {
+    muxer_->task_runner_->PostTask(
+        std::move(blocking_start_complete_callback_));
+    blocking_start_complete_callback_ = nullptr;
+  }
+}
+
+void TracingMuxerImpl::ConsumerImpl::NotifyError(const TracingError& error) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  if (error_callback_) {
+    muxer_->task_runner_->PostTask(
+        std::bind(std::move(error_callback_), error));
+  }
+}
+
+void TracingMuxerImpl::ConsumerImpl::NotifyStopComplete() {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  if (stop_complete_callback_) {
+    muxer_->task_runner_->PostTask(std::move(stop_complete_callback_));
+    stop_complete_callback_ = nullptr;
+  }
+  if (blocking_stop_complete_callback_) {
+    muxer_->task_runner_->PostTask(std::move(blocking_stop_complete_callback_));
+    blocking_stop_complete_callback_ = nullptr;
+  }
+}
+
+void TracingMuxerImpl::ConsumerImpl::OnTraceData(
+    std::vector<TracePacket> packets,
+    bool has_more) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  if (!read_trace_callback_)
+    return;
+
+  size_t capacity = 0;
+  for (const auto& packet : packets) {
+    // 16 is an over-estimation of the proto preamble size
+    capacity += packet.size() + 16;
+  }
+
+  // The shared_ptr is to avoid making a copy of the buffer when PostTask-ing.
+  std::shared_ptr<std::vector<char>> buf(new std::vector<char>());
+  buf->reserve(capacity);
+  for (auto& packet : packets) {
+    char* start;
+    size_t size;
+    std::tie(start, size) = packet.GetProtoPreamble();
+    buf->insert(buf->end(), start, start + size);
+    for (auto& slice : packet.slices()) {
+      const auto* slice_data = reinterpret_cast<const char*>(slice.start);
+      buf->insert(buf->end(), slice_data, slice_data + slice.size);
+    }
+  }
+
+  auto callback = read_trace_callback_;
+  muxer_->task_runner_->PostTask([callback, buf, has_more] {
+    TracingSession::ReadTraceCallbackArgs callback_arg{};
+    callback_arg.data = buf->empty() ? nullptr : &(*buf)[0];
+    callback_arg.size = buf->size();
+    callback_arg.has_more = has_more;
+    callback(callback_arg);
+  });
+
+  if (!has_more)
+    read_trace_callback_ = nullptr;
+}
+
+void TracingMuxerImpl::ConsumerImpl::OnObservableEvents(
+    const ObservableEvents& events) {
+  if (events.instance_state_changes_size()) {
+    for (const auto& state_change : events.instance_state_changes()) {
+      DataSourceHandle handle{state_change.producer_name(),
+                              state_change.data_source_name()};
+      data_source_states_[handle] =
+          state_change.state() ==
+          ObservableEvents::DATA_SOURCE_INSTANCE_STATE_STARTED;
+    }
+  }
+
+  if (events.instance_state_changes_size() ||
+      events.all_data_sources_started()) {
+    // Data sources are first reported as being stopped before starting, so once
+    // all the data sources we know about have started we can declare tracing
+    // begun. In the case where there are no matching data sources for the
+    // session, the service will report the all_data_sources_started() event
+    // without adding any instances (only since Android S / Perfetto v10.0).
+    if (start_complete_callback_ || blocking_start_complete_callback_) {
+      bool all_data_sources_started = std::all_of(
+          data_source_states_.cbegin(), data_source_states_.cend(),
+          [](std::pair<DataSourceHandle, bool> state) { return state.second; });
+      if (all_data_sources_started)
+        NotifyStartComplete();
+    }
+  }
+}
+
+void TracingMuxerImpl::ConsumerImpl::OnTraceStats(
+    bool success,
+    const TraceStats& trace_stats) {
+  if (!get_trace_stats_callback_)
+    return;
+  TracingSession::GetTraceStatsCallbackArgs callback_arg{};
+  callback_arg.success = success;
+  callback_arg.trace_stats_data = trace_stats.SerializeAsArray();
+  muxer_->task_runner_->PostTask(
+      std::bind(std::move(get_trace_stats_callback_), std::move(callback_arg)));
+  get_trace_stats_callback_ = nullptr;
+}
+
+// The callbacks below are not used.
+void TracingMuxerImpl::ConsumerImpl::OnDetach(bool) {}
+void TracingMuxerImpl::ConsumerImpl::OnAttach(bool, const TraceConfig&) {}
+// ----- End of TracingMuxerImpl::ConsumerImpl
+
+// ----- Begin of TracingMuxerImpl::TracingSessionImpl
+
+// TracingSessionImpl is the RAII object returned to API clients when they
+// invoke Tracing::CreateTracingSession. They use it for starting/stopping
+// tracing.
+
+TracingMuxerImpl::TracingSessionImpl::TracingSessionImpl(
+    TracingMuxerImpl* muxer,
+    TracingSessionGlobalID session_id,
+    BackendType backend_type)
+    : muxer_(muxer), session_id_(session_id), backend_type_(backend_type) {}
+
+// Can be destroyed from any thread.
+TracingMuxerImpl::TracingSessionImpl::~TracingSessionImpl() {
+  auto* muxer = muxer_;
+  auto session_id = session_id_;
+  muxer->task_runner_->PostTask(
+      [muxer, session_id] { muxer->DestroyTracingSession(session_id); });
+}
+
+// Can be called from any thread.
+void TracingMuxerImpl::TracingSessionImpl::Setup(const TraceConfig& cfg,
+                                                 int fd) {
+  auto* muxer = muxer_;
+  auto session_id = session_id_;
+  std::shared_ptr<TraceConfig> trace_config(new TraceConfig(cfg));
+  if (fd >= 0) {
+    base::ignore_result(backend_type_);  // For -Wunused in the amalgamation.
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+    if (backend_type_ != kInProcessBackend) {
+      PERFETTO_FATAL(
+          "Passing a file descriptor to TracingSession::Setup() is only "
+          "supported with the kInProcessBackend on Windows. Use "
+          "TracingSession::ReadTrace() instead");
+    }
+#endif
+    trace_config->set_write_into_file(true);
+    fd = dup(fd);
+  }
+  muxer->task_runner_->PostTask([muxer, session_id, trace_config, fd] {
+    muxer->SetupTracingSession(session_id, trace_config, base::ScopedFile(fd));
+  });
+}
+
+// Can be called from any thread.
+void TracingMuxerImpl::TracingSessionImpl::Start() {
+  auto* muxer = muxer_;
+  auto session_id = session_id_;
+  muxer->task_runner_->PostTask(
+      [muxer, session_id] { muxer->StartTracingSession(session_id); });
+}
+
+// Can be called from any thread.
+void TracingMuxerImpl::TracingSessionImpl::ChangeTraceConfig(
+    const TraceConfig& cfg) {
+  auto* muxer = muxer_;
+  auto session_id = session_id_;
+  muxer->task_runner_->PostTask([muxer, session_id, cfg] {
+    muxer->ChangeTracingSessionConfig(session_id, cfg);
+  });
+}
+
+// Can be called from any thread except the service thread.
+void TracingMuxerImpl::TracingSessionImpl::StartBlocking() {
+  PERFETTO_DCHECK(!muxer_->task_runner_->RunsTasksOnCurrentThread());
+  auto* muxer = muxer_;
+  auto session_id = session_id_;
+  base::WaitableEvent tracing_started;
+  muxer->task_runner_->PostTask([muxer, session_id, &tracing_started] {
+    auto* consumer = muxer->FindConsumer(session_id);
+    if (!consumer) {
+      // TODO(skyostil): Signal an error to the user.
+      tracing_started.Notify();
+      return;
+    }
+    PERFETTO_DCHECK(!consumer->blocking_start_complete_callback_);
+    consumer->blocking_start_complete_callback_ = [&] {
+      tracing_started.Notify();
+    };
+    muxer->StartTracingSession(session_id);
+  });
+  tracing_started.Wait();
+}
+
+// Can be called from any thread.
+void TracingMuxerImpl::TracingSessionImpl::Flush(
+    std::function<void(bool)> user_callback,
+    uint32_t timeout_ms) {
+  auto* muxer = muxer_;
+  auto session_id = session_id_;
+  muxer->task_runner_->PostTask([muxer, session_id, timeout_ms, user_callback] {
+    auto* consumer = muxer->FindConsumer(session_id);
+    if (!consumer) {
+      std::move(user_callback)(false);
+      return;
+    }
+    muxer->FlushTracingSession(session_id, timeout_ms,
+                               std::move(user_callback));
+  });
+}
+
+// Can be called from any thread.
+void TracingMuxerImpl::TracingSessionImpl::Stop() {
+  auto* muxer = muxer_;
+  auto session_id = session_id_;
+  muxer->task_runner_->PostTask(
+      [muxer, session_id] { muxer->StopTracingSession(session_id); });
+}
+
+// Can be called from any thread except the service thread.
+void TracingMuxerImpl::TracingSessionImpl::StopBlocking() {
+  PERFETTO_DCHECK(!muxer_->task_runner_->RunsTasksOnCurrentThread());
+  auto* muxer = muxer_;
+  auto session_id = session_id_;
+  base::WaitableEvent tracing_stopped;
+  muxer->task_runner_->PostTask([muxer, session_id, &tracing_stopped] {
+    auto* consumer = muxer->FindConsumer(session_id);
+    if (!consumer) {
+      // TODO(skyostil): Signal an error to the user.
+      tracing_stopped.Notify();
+      return;
+    }
+    PERFETTO_DCHECK(!consumer->blocking_stop_complete_callback_);
+    consumer->blocking_stop_complete_callback_ = [&] {
+      tracing_stopped.Notify();
+    };
+    muxer->StopTracingSession(session_id);
+  });
+  tracing_stopped.Wait();
+}
+
+// Can be called from any thread.
+void TracingMuxerImpl::TracingSessionImpl::ReadTrace(ReadTraceCallback cb) {
+  auto* muxer = muxer_;
+  auto session_id = session_id_;
+  muxer->task_runner_->PostTask([muxer, session_id, cb] {
+    muxer->ReadTracingSessionData(session_id, std::move(cb));
+  });
+}
+
+// Can be called from any thread.
+void TracingMuxerImpl::TracingSessionImpl::SetOnStartCallback(
+    std::function<void()> cb) {
+  auto* muxer = muxer_;
+  auto session_id = session_id_;
+  muxer->task_runner_->PostTask([muxer, session_id, cb] {
+    auto* consumer = muxer->FindConsumer(session_id);
+    if (!consumer)
+      return;
+    consumer->start_complete_callback_ = cb;
+  });
+}
+
+// Can be called from any thread
+void TracingMuxerImpl::TracingSessionImpl::SetOnErrorCallback(
+    std::function<void(TracingError)> cb) {
+  auto* muxer = muxer_;
+  auto session_id = session_id_;
+  muxer->task_runner_->PostTask([muxer, session_id, cb] {
+    auto* consumer = muxer->FindConsumer(session_id);
+    if (!consumer) {
+      // Notify the client about concurrent disconnection of the session.
+      if (cb)
+        cb(TracingError{TracingError::kDisconnected, "Peer disconnected"});
+      return;
+    }
+    consumer->error_callback_ = cb;
+  });
+}
+
+// Can be called from any thread.
+void TracingMuxerImpl::TracingSessionImpl::SetOnStopCallback(
+    std::function<void()> cb) {
+  auto* muxer = muxer_;
+  auto session_id = session_id_;
+  muxer->task_runner_->PostTask([muxer, session_id, cb] {
+    auto* consumer = muxer->FindConsumer(session_id);
+    if (!consumer)
+      return;
+    consumer->stop_complete_callback_ = cb;
+  });
+}
+
+// Can be called from any thread.
+void TracingMuxerImpl::TracingSessionImpl::GetTraceStats(
+    GetTraceStatsCallback cb) {
+  auto* muxer = muxer_;
+  auto session_id = session_id_;
+  muxer->task_runner_->PostTask([muxer, session_id, cb] {
+    muxer->GetTraceStats(session_id, std::move(cb));
+  });
+}
+
+// Can be called from any thread.
+void TracingMuxerImpl::TracingSessionImpl::QueryServiceState(
+    QueryServiceStateCallback cb) {
+  auto* muxer = muxer_;
+  auto session_id = session_id_;
+  muxer->task_runner_->PostTask([muxer, session_id, cb] {
+    muxer->QueryServiceState(session_id, std::move(cb));
+  });
+}
+
+// ----- End of TracingMuxerImpl::TracingSessionImpl
+
+// static
+TracingMuxer* TracingMuxer::instance_ = TracingMuxerFake::Get();
+
+// This is called by perfetto::Tracing::Initialize().
+// Can be called on any thread. Typically, but not necessarily, that will be
+// the embedder's main thread.
+TracingMuxerImpl::TracingMuxerImpl(const TracingInitArgs& args)
+    : TracingMuxer(args.platform ? args.platform
+                                 : Platform::GetDefaultPlatform()) {
+  PERFETTO_DETACH_FROM_THREAD(thread_checker_);
+  instance_ = this;
+
+  // Create the thread where muxer, producers and service will live.
+  task_runner_.reset(
+      new NonReentrantTaskRunner(this, platform_->CreateTaskRunner({})));
+
+  // Run the initializer on that thread.
+  task_runner_->PostTask([this, args] { Initialize(args); });
+}
+
+void TracingMuxerImpl::Initialize(const TracingInitArgs& args) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);  // Rebind the thread checker.
+
+  policy_ = args.tracing_policy;
+
+  auto add_backend = [this, &args](TracingBackend* backend, BackendType type) {
+    if (!backend) {
+      // We skip the log in release builds because the *_backend_fake.cc code
+      // has already an ELOG before returning a nullptr.
+      PERFETTO_DLOG("Backend creation failed, type %d", static_cast<int>(type));
+      return;
+    }
+    TracingBackendId backend_id = backends_.size();
+    backends_.emplace_back();
+    RegisteredBackend& rb = backends_.back();
+    rb.backend = backend;
+    rb.id = backend_id;
+    rb.type = type;
+    rb.producer.reset(new ProducerImpl(this, backend_id,
+                                       args.shmem_batch_commits_duration_ms));
+    rb.producer_conn_args.producer = rb.producer.get();
+    rb.producer_conn_args.producer_name = platform_->GetCurrentProcessName();
+    rb.producer_conn_args.task_runner = task_runner_.get();
+    rb.producer_conn_args.shmem_size_hint_bytes =
+        args.shmem_size_hint_kb * 1024;
+    rb.producer_conn_args.shmem_page_size_hint_bytes =
+        args.shmem_page_size_hint_kb * 1024;
+    rb.producer->Initialize(rb.backend->ConnectProducer(rb.producer_conn_args));
+  };
+
+  if (args.backends & kSystemBackend) {
+    PERFETTO_CHECK(args.system_backend_factory_);
+    add_backend(args.system_backend_factory_(), kSystemBackend);
+  }
+
+  if (args.backends & kInProcessBackend) {
+    PERFETTO_CHECK(args.in_process_backend_factory_);
+    add_backend(args.in_process_backend_factory_(), kInProcessBackend);
+  }
+
+  if (args.backends & kCustomBackend) {
+    PERFETTO_CHECK(args.custom_backend);
+    add_backend(args.custom_backend, kCustomBackend);
+  }
+
+  if (args.backends & ~(kSystemBackend | kInProcessBackend | kCustomBackend)) {
+    PERFETTO_FATAL("Unsupported tracing backend type");
+  }
+
+  // Fallback backend for consumer creation for an unsupported backend type.
+  // This backend simply fails any attempt to start a tracing session.
+  // NOTE: This backend instance has to be added last.
+  add_backend(internal::TracingBackendFake::GetInstance(),
+              BackendType::kUnspecifiedBackend);
+}
+
+// Can be called from any thread (but not concurrently).
+bool TracingMuxerImpl::RegisterDataSource(
+    const DataSourceDescriptor& descriptor,
+    DataSourceFactory factory,
+    DataSourceStaticState* static_state) {
+  // Ignore repeated registrations.
+  if (static_state->index != kMaxDataSources)
+    return true;
+
+  static std::atomic<uint32_t> last_id{};
+  uint32_t new_index = last_id++;
+  if (new_index >= kMaxDataSources) {
+    PERFETTO_DLOG(
+        "RegisterDataSource failed: too many data sources already registered");
+    return false;
+  }
+
+  // Initialize the static state.
+  static_assert(sizeof(static_state->instances[0]) >= sizeof(DataSourceState),
+                "instances[] size mismatch");
+  for (size_t i = 0; i < static_state->instances.size(); i++)
+    new (&static_state->instances[i]) DataSourceState{};
+
+  static_state->index = new_index;
+
+  task_runner_->PostTask([this, descriptor, factory, static_state] {
+    data_sources_.emplace_back();
+    RegisteredDataSource& rds = data_sources_.back();
+    rds.descriptor = descriptor;
+    rds.factory = factory;
+    rds.static_state = static_state;
+    UpdateDataSourcesOnAllBackends();
+  });
+  return true;
+}
+
+// Can be called from any thread (but not concurrently).
+void TracingMuxerImpl::RegisterInterceptor(
+    const InterceptorDescriptor& descriptor,
+    InterceptorFactory factory,
+    InterceptorBase::TLSFactory tls_factory,
+    InterceptorBase::TracePacketCallback packet_callback) {
+  task_runner_->PostTask(
+      [this, descriptor, factory, tls_factory, packet_callback] {
+        // Ignore repeated registrations.
+        for (const auto& interceptor : interceptors_) {
+          if (interceptor.descriptor.name() == descriptor.name()) {
+            PERFETTO_DCHECK(interceptor.tls_factory == tls_factory);
+            PERFETTO_DCHECK(interceptor.packet_callback == packet_callback);
+            return;
+          }
+        }
+        // Only allow certain interceptors for now.
+        if (descriptor.name() != "test_interceptor" &&
+            descriptor.name() != "console") {
+          PERFETTO_ELOG(
+              "Interceptors are experimental. If you want to use them, please "
+              "get in touch with the project maintainers "
+              "(https://perfetto.dev/docs/contributing/"
+              "getting-started#community).");
+          return;
+        }
+        interceptors_.emplace_back();
+        RegisteredInterceptor& interceptor = interceptors_.back();
+        interceptor.descriptor = descriptor;
+        interceptor.factory = factory;
+        interceptor.tls_factory = tls_factory;
+        interceptor.packet_callback = packet_callback;
+      });
+}
+
+// Called by the service of one of the backends.
+void TracingMuxerImpl::SetupDataSource(TracingBackendId backend_id,
+                                       uint32_t backend_connection_id,
+                                       DataSourceInstanceID instance_id,
+                                       const DataSourceConfig& cfg) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  PERFETTO_DLOG("Setting up data source %" PRIu64 " %s", instance_id,
+                cfg.name().c_str());
+  uint64_t config_hash = ComputeConfigHash(cfg);
+
+  for (const auto& rds : data_sources_) {
+    if (rds.descriptor.name() != cfg.name())
+      continue;
+    DataSourceStaticState& static_state = *rds.static_state;
+
+    // If this data source is already active for this exact config, don't start
+    // another instance. This happens when we have several data sources with the
+    // same name, in which case the service sends one SetupDataSource event for
+    // each one. Since we can't map which event maps to which data source, we
+    // ensure each event only starts one data source instance.
+    // TODO(skyostil): Register a unique id with each data source to the service
+    // to disambiguate.
+    bool active_for_config = false;
+    for (uint32_t i = 0; i < kMaxDataSourceInstances; i++) {
+      if (!static_state.TryGet(i))
+        continue;
+      auto* internal_state =
+          reinterpret_cast<DataSourceState*>(&static_state.instances[i]);
+      if (internal_state->backend_id == backend_id &&
+          internal_state->config_hash == config_hash) {
+        active_for_config = true;
+        break;
+      }
+    }
+    if (active_for_config) {
+      PERFETTO_DLOG(
+          "Data source %s is already active with this config, skipping",
+          cfg.name().c_str());
+      continue;
+    }
+
+    for (uint32_t i = 0; i < kMaxDataSourceInstances; i++) {
+      // Find a free slot.
+      if (static_state.TryGet(i))
+        continue;
+
+      auto* internal_state =
+          reinterpret_cast<DataSourceState*>(&static_state.instances[i]);
+      std::lock_guard<std::recursive_mutex> guard(internal_state->lock);
+      static_assert(
+          std::is_same<decltype(internal_state->data_source_instance_id),
+                       DataSourceInstanceID>::value,
+          "data_source_instance_id type mismatch");
+      internal_state->backend_id = backend_id;
+      internal_state->backend_connection_id = backend_connection_id;
+      internal_state->data_source_instance_id = instance_id;
+      internal_state->buffer_id =
+          static_cast<internal::BufferId>(cfg.target_buffer());
+      internal_state->config_hash = config_hash;
+      internal_state->data_source = rds.factory();
+      internal_state->interceptor = nullptr;
+      internal_state->interceptor_id = 0;
+
+      if (cfg.has_interceptor_config()) {
+        for (size_t j = 0; j < interceptors_.size(); j++) {
+          if (cfg.interceptor_config().name() ==
+              interceptors_[j].descriptor.name()) {
+            PERFETTO_DLOG("Intercepting data source %" PRIu64
+                          " \"%s\" into \"%s\"",
+                          instance_id, cfg.name().c_str(),
+                          cfg.interceptor_config().name().c_str());
+            internal_state->interceptor_id = static_cast<uint32_t>(j + 1);
+            internal_state->interceptor = interceptors_[j].factory();
+            internal_state->interceptor->OnSetup({cfg});
+            break;
+          }
+        }
+        if (!internal_state->interceptor_id) {
+          PERFETTO_ELOG("Unknown interceptor configured for data source: %s",
+                        cfg.interceptor_config().name().c_str());
+        }
+      }
+
+      // This must be made at the end. See matching acquire-load in
+      // DataSource::Trace().
+      static_state.valid_instances.fetch_or(1 << i, std::memory_order_release);
+
+      DataSourceBase::SetupArgs setup_args;
+      setup_args.config = &cfg;
+      setup_args.internal_instance_index = i;
+      internal_state->data_source->OnSetup(setup_args);
+      return;
+    }
+    PERFETTO_ELOG(
+        "Maximum number of data source instances exhausted. "
+        "Dropping data source %" PRIu64,
+        instance_id);
+    break;
+  }
+}
+
+// Called by the service of one of the backends.
+void TracingMuxerImpl::StartDataSource(TracingBackendId backend_id,
+                                       DataSourceInstanceID instance_id) {
+  PERFETTO_DLOG("Starting data source %" PRIu64, instance_id);
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+
+  auto ds = FindDataSource(backend_id, instance_id);
+  if (!ds) {
+    PERFETTO_ELOG("Could not find data source to start");
+    return;
+  }
+
+  DataSourceBase::StartArgs start_args{};
+  start_args.internal_instance_index = ds.instance_idx;
+
+  std::lock_guard<std::recursive_mutex> guard(ds.internal_state->lock);
+  if (ds.internal_state->interceptor)
+    ds.internal_state->interceptor->OnStart({});
+  ds.internal_state->trace_lambda_enabled = true;
+  ds.internal_state->data_source->OnStart(start_args);
+}
+
+// Called by the service of one of the backends.
+void TracingMuxerImpl::StopDataSource_AsyncBegin(
+    TracingBackendId backend_id,
+    DataSourceInstanceID instance_id) {
+  PERFETTO_DLOG("Stopping data source %" PRIu64, instance_id);
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+
+  auto ds = FindDataSource(backend_id, instance_id);
+  if (!ds) {
+    PERFETTO_ELOG("Could not find data source to stop");
+    return;
+  }
+
+  StopArgsImpl stop_args{};
+  stop_args.internal_instance_index = ds.instance_idx;
+  stop_args.async_stop_closure = [this, backend_id, instance_id] {
+    // TracingMuxerImpl is long lived, capturing |this| is okay.
+    // The notification closure can be moved out of the StopArgs by the
+    // embedder to handle stop asynchronously. The embedder might then
+    // call the closure on a different thread than the current one, hence
+    // this nested PostTask().
+    task_runner_->PostTask([this, backend_id, instance_id] {
+      StopDataSource_AsyncEnd(backend_id, instance_id);
+    });
+  };
+
+  {
+    std::lock_guard<std::recursive_mutex> guard(ds.internal_state->lock);
+    if (ds.internal_state->interceptor)
+      ds.internal_state->interceptor->OnStop({});
+    ds.internal_state->data_source->OnStop(stop_args);
+  }
+
+  // If the embedder hasn't called StopArgs.HandleStopAsynchronously() run the
+  // async closure here. In theory we could avoid the PostTask and call
+  // straight into CompleteDataSourceAsyncStop(). We keep that to reduce
+  // divergencies between the deferred-stop vs non-deferred-stop code paths.
+  if (stop_args.async_stop_closure)
+    std::move(stop_args.async_stop_closure)();
+}
+
+void TracingMuxerImpl::StopDataSource_AsyncEnd(
+    TracingBackendId backend_id,
+    DataSourceInstanceID instance_id) {
+  PERFETTO_DLOG("Ending async stop of data source %" PRIu64, instance_id);
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+
+  auto ds = FindDataSource(backend_id, instance_id);
+  if (!ds) {
+    PERFETTO_ELOG(
+        "Async stop of data source %" PRIu64
+        " failed. This might be due to calling the async_stop_closure twice.",
+        instance_id);
+    return;
+  }
+
+  const uint32_t mask = ~(1 << ds.instance_idx);
+  ds.static_state->valid_instances.fetch_and(mask, std::memory_order_acq_rel);
+
+  // Take the mutex to prevent that the data source is in the middle of
+  // a Trace() execution where it called GetDataSourceLocked() while we
+  // destroy it.
+  {
+    std::lock_guard<std::recursive_mutex> guard(ds.internal_state->lock);
+    ds.internal_state->trace_lambda_enabled = false;
+    ds.internal_state->data_source.reset();
+  }
+
+  // The other fields of internal_state are deliberately *not* cleared.
+  // See races-related comments of DataSource::Trace().
+
+  TracingMuxer::generation_++;
+
+  // |backends_| is append-only, Backend instances are always valid.
+  PERFETTO_CHECK(backend_id < backends_.size());
+  ProducerImpl* producer = backends_[backend_id].producer.get();
+  if (!producer)
+    return;
+  if (producer->connected_) {
+    // Flush any commits that might have been batched by SharedMemoryArbiter.
+    producer->service_->MaybeSharedMemoryArbiter()
+        ->FlushPendingCommitDataRequests();
+    producer->service_->NotifyDataSourceStopped(instance_id);
+  }
+  producer->SweepDeadServices();
+}
+
+void TracingMuxerImpl::ClearDataSourceIncrementalState(
+    TracingBackendId backend_id,
+    DataSourceInstanceID instance_id) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  PERFETTO_DLOG("Clearing incremental state for data source %" PRIu64,
+                instance_id);
+  auto ds = FindDataSource(backend_id, instance_id);
+  if (!ds) {
+    PERFETTO_ELOG("Could not find data source to clear incremental state for");
+    return;
+  }
+  // Make DataSource::TraceContext::GetIncrementalState() eventually notice that
+  // the incremental state should be cleared.
+  ds.static_state->incremental_state_generation.fetch_add(
+      1, std::memory_order_relaxed);
+}
+
+void TracingMuxerImpl::SyncProducersForTesting() {
+  std::mutex mutex;
+  std::condition_variable cv;
+
+  // IPC-based producers don't report connection errors explicitly for each
+  // command, but instead with an asynchronous callback
+  // (ProducerImpl::OnDisconnected). This means that the sync command below
+  // may have completed but failed to reach the service because of a
+  // disconnection, but we can't tell until the disconnection message comes
+  // through. To guard against this, we run two whole rounds of sync round-trips
+  // before returning; the first one will detect any disconnected producers and
+  // the second one will ensure any reconnections have completed and all data
+  // sources are registered in the service again.
+  for (size_t i = 0; i < 2; i++) {
+    size_t countdown = std::numeric_limits<size_t>::max();
+    task_runner_->PostTask([this, &mutex, &cv, &countdown] {
+      {
+        std::unique_lock<std::mutex> countdown_lock(mutex);
+        countdown = backends_.size();
+      }
+      for (auto& backend : backends_) {
+        auto* producer = backend.producer.get();
+        producer->service_->Sync([&mutex, &cv, &countdown] {
+          std::unique_lock<std::mutex> countdown_lock(mutex);
+          countdown--;
+          cv.notify_one();
+        });
+      }
+    });
+
+    {
+      std::unique_lock<std::mutex> countdown_lock(mutex);
+      cv.wait(countdown_lock, [&countdown] { return !countdown; });
+    }
+  }
+
+  // Check that all producers are indeed connected.
+  bool done = false;
+  bool all_producers_connected = true;
+  task_runner_->PostTask([this, &mutex, &cv, &done, &all_producers_connected] {
+    for (auto& backend : backends_)
+      all_producers_connected &= backend.producer->connected_;
+    std::unique_lock<std::mutex> lock(mutex);
+    done = true;
+    cv.notify_one();
+  });
+
+  {
+    std::unique_lock<std::mutex> lock(mutex);
+    cv.wait(lock, [&done] { return done; });
+  }
+  PERFETTO_DCHECK(all_producers_connected);
+}
+
+void TracingMuxerImpl::DestroyStoppedTraceWritersForCurrentThread() {
+  // Iterate across all possible data source types.
+  auto cur_generation = generation_.load(std::memory_order_acquire);
+  auto* root_tls = GetOrCreateTracingTLS();
+
+  auto destroy_stopped_instances = [](DataSourceThreadLocalState& tls) {
+    // |tls| has a vector of per-data-source-instance thread-local state.
+    DataSourceStaticState* static_state = tls.static_state;
+    if (!static_state)
+      return;  // Slot not used.
+
+    // Iterate across all possible instances for this data source.
+    for (uint32_t inst = 0; inst < kMaxDataSourceInstances; inst++) {
+      DataSourceInstanceThreadLocalState& ds_tls = tls.per_instance[inst];
+      if (!ds_tls.trace_writer)
+        continue;
+
+      DataSourceState* ds_state = static_state->TryGet(inst);
+      if (ds_state && ds_state->backend_id == ds_tls.backend_id &&
+          ds_state->backend_connection_id == ds_tls.backend_connection_id &&
+          ds_state->buffer_id == ds_tls.buffer_id &&
+          ds_state->data_source_instance_id == ds_tls.data_source_instance_id) {
+        continue;
+      }
+
+      // The DataSource instance has been destroyed or recycled.
+      ds_tls.Reset();  // Will also destroy the |ds_tls.trace_writer|.
+    }
+  };
+
+  for (size_t ds_idx = 0; ds_idx < kMaxDataSources; ds_idx++) {
+    // |tls| has a vector of per-data-source-instance thread-local state.
+    DataSourceThreadLocalState& tls = root_tls->data_sources_tls[ds_idx];
+    destroy_stopped_instances(tls);
+  }
+  destroy_stopped_instances(root_tls->track_event_tls);
+  root_tls->generation = cur_generation;
+}
+
+// Called both when a new data source is registered or when a new backend
+// connects. In both cases we want to be sure we reflected the data source
+// registrations on the backends.
+void TracingMuxerImpl::UpdateDataSourcesOnAllBackends() {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  for (RegisteredDataSource& rds : data_sources_) {
+    for (RegisteredBackend& backend : backends_) {
+      // We cannot call RegisterDataSource on the backend before it connects.
+      if (!backend.producer->connected_)
+        continue;
+
+      PERFETTO_DCHECK(rds.static_state->index < kMaxDataSources);
+      if (backend.producer->registered_data_sources_.test(
+              rds.static_state->index))
+        continue;
+
+      rds.descriptor.set_will_notify_on_start(true);
+      rds.descriptor.set_will_notify_on_stop(true);
+      rds.descriptor.set_handles_incremental_state_clear(true);
+      backend.producer->service_->RegisterDataSource(rds.descriptor);
+      backend.producer->registered_data_sources_.set(rds.static_state->index);
+    }
+  }
+}
+
+void TracingMuxerImpl::SetupTracingSession(
+    TracingSessionGlobalID session_id,
+    const std::shared_ptr<TraceConfig>& trace_config,
+    base::ScopedFile trace_fd) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  PERFETTO_CHECK(!trace_fd || trace_config->write_into_file());
+
+  auto* consumer = FindConsumer(session_id);
+  if (!consumer)
+    return;
+
+  consumer->trace_config_ = trace_config;
+  if (trace_fd)
+    consumer->trace_fd_ = std::move(trace_fd);
+
+  if (!consumer->connected_)
+    return;
+
+  // Only used in the deferred start mode.
+  if (trace_config->deferred_start()) {
+    consumer->service_->EnableTracing(*trace_config,
+                                      std::move(consumer->trace_fd_));
+  }
+}
+
+void TracingMuxerImpl::StartTracingSession(TracingSessionGlobalID session_id) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+
+  auto* consumer = FindConsumer(session_id);
+
+  if (!consumer)
+    return;
+
+  if (!consumer->trace_config_) {
+    PERFETTO_ELOG("Must call Setup(config) first");
+    return;
+  }
+
+  if (!consumer->connected_) {
+    consumer->start_pending_ = true;
+    return;
+  }
+
+  consumer->start_pending_ = false;
+  if (consumer->trace_config_->deferred_start()) {
+    consumer->service_->StartTracing();
+  } else {
+    consumer->service_->EnableTracing(*consumer->trace_config_,
+                                      std::move(consumer->trace_fd_));
+  }
+
+  // TODO implement support for the deferred-start + fast-triggering case.
+}
+
+void TracingMuxerImpl::ChangeTracingSessionConfig(
+    TracingSessionGlobalID session_id,
+    const TraceConfig& trace_config) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+
+  auto* consumer = FindConsumer(session_id);
+
+  if (!consumer)
+    return;
+
+  if (!consumer->trace_config_) {
+    // Changing the config is only supported for started sessions.
+    PERFETTO_ELOG("Must call Setup(config) and Start() first");
+    return;
+  }
+
+  consumer->trace_config_ = std::make_shared<TraceConfig>(trace_config);
+  if (consumer->connected_)
+    consumer->service_->ChangeTraceConfig(trace_config);
+}
+
+void TracingMuxerImpl::FlushTracingSession(TracingSessionGlobalID session_id,
+                                           uint32_t timeout_ms,
+                                           std::function<void(bool)> callback) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  auto* consumer = FindConsumer(session_id);
+  if (!consumer || consumer->start_pending_ || consumer->stop_pending_ ||
+      !consumer->trace_config_) {
+    PERFETTO_ELOG("Flush() can be called only after Start() and before Stop()");
+    std::move(callback)(false);
+    return;
+  }
+
+  consumer->service_->Flush(timeout_ms, std::move(callback));
+}
+
+void TracingMuxerImpl::StopTracingSession(TracingSessionGlobalID session_id) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  auto* consumer = FindConsumer(session_id);
+  if (!consumer)
+    return;
+
+  if (consumer->start_pending_) {
+    // If the session hasn't started yet, wait until it does before stopping.
+    consumer->stop_pending_ = true;
+    return;
+  }
+
+  consumer->stop_pending_ = false;
+  if (consumer->stopped_) {
+    // If the session was already stopped (e.g., it failed to start), don't try
+    // stopping again.
+    consumer->NotifyStopComplete();
+  } else if (!consumer->trace_config_) {
+    PERFETTO_ELOG("Must call Setup(config) and Start() first");
+    return;
+  } else {
+    consumer->service_->DisableTracing();
+  }
+
+  consumer->trace_config_.reset();
+}
+
+void TracingMuxerImpl::DestroyTracingSession(
+    TracingSessionGlobalID session_id) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  for (RegisteredBackend& backend : backends_) {
+    // We need to find the consumer (if any) and call Disconnect as we destroy
+    // the tracing session. We can't call Disconnect() inside this for loop
+    // because in the in-process case this will end up to a synchronous call to
+    // OnConsumerDisconnect which will invalidate all the iterators to
+    // |backend.consumers|.
+    ConsumerImpl* consumer = nullptr;
+    for (auto& con : backend.consumers) {
+      if (con->session_id_ == session_id) {
+        consumer = con.get();
+        break;
+      }
+    }
+    if (consumer) {
+      // We broke out of the loop above on the assumption that each backend will
+      // only have a single consumer per session. This DCHECK ensures that
+      // this is the case.
+      PERFETTO_DCHECK(
+          std::count_if(backend.consumers.begin(), backend.consumers.end(),
+                        [session_id](const std::unique_ptr<ConsumerImpl>& con) {
+                          return con->session_id_ == session_id;
+                        }) == 1u);
+      consumer->Disconnect();
+    }
+  }
+}
+
+void TracingMuxerImpl::ReadTracingSessionData(
+    TracingSessionGlobalID session_id,
+    std::function<void(TracingSession::ReadTraceCallbackArgs)> callback) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  auto* consumer = FindConsumer(session_id);
+  if (!consumer) {
+    // TODO(skyostil): Signal an error to the user.
+    TracingSession::ReadTraceCallbackArgs callback_arg{};
+    callback(callback_arg);
+    return;
+  }
+  PERFETTO_DCHECK(!consumer->read_trace_callback_);
+  consumer->read_trace_callback_ = std::move(callback);
+  consumer->service_->ReadBuffers();
+}
+
+void TracingMuxerImpl::GetTraceStats(
+    TracingSessionGlobalID session_id,
+    TracingSession::GetTraceStatsCallback callback) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  auto* consumer = FindConsumer(session_id);
+  if (!consumer) {
+    TracingSession::GetTraceStatsCallbackArgs callback_arg{};
+    callback_arg.success = false;
+    callback(std::move(callback_arg));
+    return;
+  }
+  PERFETTO_DCHECK(!consumer->get_trace_stats_callback_);
+  consumer->get_trace_stats_callback_ = std::move(callback);
+  if (!consumer->connected_) {
+    consumer->get_trace_stats_pending_ = true;
+    return;
+  }
+  consumer->get_trace_stats_pending_ = false;
+  consumer->service_->GetTraceStats();
+}
+
+void TracingMuxerImpl::QueryServiceState(
+    TracingSessionGlobalID session_id,
+    TracingSession::QueryServiceStateCallback callback) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  auto* consumer = FindConsumer(session_id);
+  if (!consumer) {
+    TracingSession::QueryServiceStateCallbackArgs callback_arg{};
+    callback_arg.success = false;
+    callback(std::move(callback_arg));
+    return;
+  }
+  PERFETTO_DCHECK(!consumer->query_service_state_callback_);
+  if (!consumer->connected_) {
+    consumer->query_service_state_callback_ = std::move(callback);
+    return;
+  }
+  auto callback_wrapper = [callback](bool success,
+                                     protos::gen::TracingServiceState state) {
+    TracingSession::QueryServiceStateCallbackArgs callback_arg{};
+    callback_arg.success = success;
+    callback_arg.service_state_data = state.SerializeAsArray();
+    callback(std::move(callback_arg));
+  };
+  consumer->service_->QueryServiceState(std::move(callback_wrapper));
+}
+
+void TracingMuxerImpl::SetBatchCommitsDurationForTesting(
+    uint32_t batch_commits_duration_ms,
+    BackendType backend_type) {
+  for (RegisteredBackend& backend : backends_) {
+    if (backend.producer && backend.producer->connected_ &&
+        backend.type == backend_type) {
+      backend.producer->service_->MaybeSharedMemoryArbiter()
+          ->SetBatchCommitsDuration(batch_commits_duration_ms);
+    }
+  }
+}
+
+bool TracingMuxerImpl::EnableDirectSMBPatchingForTesting(
+    BackendType backend_type) {
+  for (RegisteredBackend& backend : backends_) {
+    if (backend.producer && backend.producer->connected_ &&
+        backend.type == backend_type &&
+        !backend.producer->service_->MaybeSharedMemoryArbiter()
+             ->EnableDirectSMBPatching()) {
+      return false;
+    }
+  }
+  return true;
+}
+
+TracingMuxerImpl::ConsumerImpl* TracingMuxerImpl::FindConsumer(
+    TracingSessionGlobalID session_id) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  for (RegisteredBackend& backend : backends_) {
+    for (auto& consumer : backend.consumers) {
+      if (consumer->session_id_ == session_id) {
+        return consumer.get();
+      }
+    }
+  }
+  return nullptr;
+}
+
+void TracingMuxerImpl::InitializeConsumer(TracingSessionGlobalID session_id) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+
+  auto* consumer = FindConsumer(session_id);
+  if (!consumer)
+    return;
+
+  TracingBackendId backend_id = consumer->backend_id_;
+  // |backends_| is append-only, Backend instances are always valid.
+  PERFETTO_CHECK(backend_id < backends_.size());
+  RegisteredBackend& backend = backends_[backend_id];
+
+  TracingBackend::ConnectConsumerArgs conn_args;
+  conn_args.consumer = consumer;
+  conn_args.task_runner = task_runner_.get();
+  consumer->Initialize(backend.backend->ConnectConsumer(conn_args));
+}
+
+void TracingMuxerImpl::OnConsumerDisconnected(ConsumerImpl* consumer) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  for (RegisteredBackend& backend : backends_) {
+    auto pred = [consumer](const std::unique_ptr<ConsumerImpl>& con) {
+      return con.get() == consumer;
+    };
+    backend.consumers.erase(std::remove_if(backend.consumers.begin(),
+                                           backend.consumers.end(), pred),
+                            backend.consumers.end());
+  }
+}
+
+void TracingMuxerImpl::SetMaxProducerReconnectionsForTesting(uint32_t count) {
+  max_producer_reconnections_.store(count);
+}
+
+void TracingMuxerImpl::OnProducerDisconnected(ProducerImpl* producer) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  for (RegisteredBackend& backend : backends_) {
+    if (backend.producer.get() != producer)
+      continue;
+    // Try reconnecting the disconnected producer. If the connection succeeds,
+    // all the data sources will be automatically re-registered.
+    if (producer->connection_id_ > max_producer_reconnections_.load()) {
+      // Avoid reconnecting a failing producer too many times. Instead we just
+      // leak the producer instead of trying to avoid further complicating
+      // cross-thread trace writer creation.
+      PERFETTO_ELOG("Producer disconnected too many times; not reconnecting");
+      continue;
+    }
+    backend.producer->Initialize(
+        backend.backend->ConnectProducer(backend.producer_conn_args));
+  }
+
+  // Increment the generation counter to atomically ensure that:
+  // 1. Old trace writers from the severed connection eventually get cleaned up
+  //    by DestroyStoppedTraceWritersForCurrentThread().
+  // 2. No new trace writers can be created for the SharedMemoryArbiter from the
+  //    old connection.
+  TracingMuxer::generation_++;
+}
+
+TracingMuxerImpl::FindDataSourceRes TracingMuxerImpl::FindDataSource(
+    TracingBackendId backend_id,
+    DataSourceInstanceID instance_id) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  for (const auto& rds : data_sources_) {
+    DataSourceStaticState* static_state = rds.static_state;
+    for (uint32_t i = 0; i < kMaxDataSourceInstances; i++) {
+      auto* internal_state = static_state->TryGet(i);
+      if (internal_state && internal_state->backend_id == backend_id &&
+          internal_state->data_source_instance_id == instance_id) {
+        return FindDataSourceRes(static_state, internal_state, i);
+      }
+    }
+  }
+  return FindDataSourceRes();
+}
+
+// Can be called from any thread.
+std::unique_ptr<TraceWriterBase> TracingMuxerImpl::CreateTraceWriter(
+    DataSourceStaticState* static_state,
+    uint32_t data_source_instance_index,
+    DataSourceState* data_source,
+    BufferExhaustedPolicy buffer_exhausted_policy) {
+  if (PERFETTO_UNLIKELY(data_source->interceptor_id)) {
+    // If the session is being intercepted, return a heap-backed trace writer
+    // instead. This is safe because all the data given to the interceptor is
+    // either thread-local (|instance_index|), statically allocated
+    // (|static_state|) or constant after initialization (|interceptor|). Access
+    // to the interceptor instance itself through |data_source| is protected by
+    // a statically allocated lock (similarly to the data source instance).
+    auto& interceptor = interceptors_[data_source->interceptor_id - 1];
+    return std::unique_ptr<TraceWriterBase>(new InterceptorTraceWriter(
+        interceptor.tls_factory(static_state, data_source_instance_index),
+        interceptor.packet_callback, static_state, data_source_instance_index));
+  }
+  ProducerImpl* producer = backends_[data_source->backend_id].producer.get();
+  // Atomically load the current service endpoint. We keep the pointer as a
+  // shared pointer on the stack to guard against it from being concurrently
+  // modified on the thread by ProducerImpl::Initialize() swapping in a
+  // reconnected service on the muxer task runner thread.
+  //
+  // The endpoint may also be concurrently modified by SweepDeadServices()
+  // clearing out old disconnected services. We guard against that by
+  // SharedMemoryArbiter keeping track of any outstanding trace writers. After
+  // shutdown has started, the trace writer created below will be a null one
+  // which will drop any written data. See SharedMemoryArbiter::TryShutdown().
+  //
+  // We use an atomic pointer instead of holding a lock because
+  // CreateTraceWriter posts tasks under the hood.
+  std::shared_ptr<ProducerEndpoint> service =
+      std::atomic_load(&producer->service_);
+  return service->CreateTraceWriter(data_source->buffer_id,
+                                    buffer_exhausted_policy);
+}
+
+// This is called via the public API Tracing::NewTrace().
+// Can be called from any thread.
+std::unique_ptr<TracingSession> TracingMuxerImpl::CreateTracingSession(
+    BackendType requested_backend_type) {
+  TracingSessionGlobalID session_id = ++next_tracing_session_id_;
+
+  // |backend_type| can only specify one backend, not an OR-ed mask.
+  PERFETTO_CHECK((requested_backend_type & (requested_backend_type - 1)) == 0);
+
+  // Capturing |this| is fine because the TracingMuxer is a leaky singleton.
+  task_runner_->PostTask([this, requested_backend_type, session_id] {
+    for (RegisteredBackend& backend : backends_) {
+      if (requested_backend_type && backend.type &&
+          backend.type != requested_backend_type) {
+        continue;
+      }
+
+      TracingBackendId backend_id = backend.id;
+
+      // Create the consumer now, even if we have to ask the embedder below, so
+      // that any other tasks executing after this one can find the consumer and
+      // change its pending attributes.
+      backend.consumers.emplace_back(
+          new ConsumerImpl(this, backend.type, backend.id, session_id));
+
+      // The last registered backend in |backends_| is the unsupported backend
+      // without a valid type.
+      if (!backend.type) {
+        PERFETTO_ELOG(
+            "No tracing backend ready for type=%d, consumer will disconnect",
+            requested_backend_type);
+        InitializeConsumer(session_id);
+        return;
+      }
+
+      // Check if the embedder wants to be asked for permission before
+      // connecting the consumer.
+      if (!policy_) {
+        InitializeConsumer(session_id);
+        return;
+      }
+
+      TracingPolicy::ShouldAllowConsumerSessionArgs args;
+      args.backend_type = backend.type;
+      args.result_callback = [this, backend_id, session_id](bool allow) {
+        task_runner_->PostTask([this, backend_id, session_id, allow] {
+          if (allow) {
+            InitializeConsumer(session_id);
+            return;
+          }
+
+          PERFETTO_ELOG(
+              "Consumer session for backend type type=%d forbidden, "
+              "consumer will disconnect",
+              backends_[backend_id].type);
+
+          auto* consumer = FindConsumer(session_id);
+          if (!consumer)
+            return;
+
+          consumer->OnDisconnect();
+        });
+      };
+      policy_->ShouldAllowConsumerSession(args);
+      return;
+    }
+    PERFETTO_DFATAL("Not reached");
+  });
+
+  return std::unique_ptr<TracingSession>(
+      new TracingSessionImpl(this, session_id, requested_backend_type));
+}
+
+void TracingMuxerImpl::InitializeInstance(const TracingInitArgs& args) {
+  if (instance_ != TracingMuxerFake::Get())
+    PERFETTO_FATAL("Tracing already initialized");
+  new TracingMuxerImpl(args);
+}
+
+TracingMuxer::~TracingMuxer() = default;
+
+static_assert(std::is_same<internal::BufferId, BufferID>::value,
+              "public's BufferId and tracing/core's BufferID diverged");
+
+}  // namespace internal
+}  // namespace perfetto
+// gen_amalgamated begin source: src/tracing/internal/track_event_internal.cc
+// gen_amalgamated begin header: include/perfetto/tracing/internal/track_event_interned_fields.h
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/track_event_interned_data_index.h"
+
+#ifndef INCLUDE_PERFETTO_TRACING_INTERNAL_TRACK_EVENT_INTERNED_FIELDS_H_
+#define INCLUDE_PERFETTO_TRACING_INTERNAL_TRACK_EVENT_INTERNED_FIELDS_H_
+
+namespace perfetto {
+namespace internal {
+
+// These helpers are exposed here to allow Chromium-without-client library
+// to share the interning buffers with Perfetto internals (e.g.
+// perfetto::TracedValue implementation).
+
+struct PERFETTO_EXPORT InternedEventCategory
+    : public TrackEventInternedDataIndex<
+          InternedEventCategory,
+          perfetto::protos::pbzero::InternedData::kEventCategoriesFieldNumber,
+          const char*,
+          SmallInternedDataTraits> {
+  ~InternedEventCategory() override;
+
+  static void Add(protos::pbzero::InternedData* interned_data,
+                  size_t iid,
+                  const char* value,
+                  size_t length);
+};
+
+struct PERFETTO_EXPORT InternedEventName
+    : public TrackEventInternedDataIndex<
+          InternedEventName,
+          perfetto::protos::pbzero::InternedData::kEventNamesFieldNumber,
+          const char*,
+          SmallInternedDataTraits> {
+  ~InternedEventName() override;
+
+  static void Add(protos::pbzero::InternedData* interned_data,
+                  size_t iid,
+                  const char* value);
+};
+
+struct PERFETTO_EXPORT InternedDebugAnnotationName
+    : public TrackEventInternedDataIndex<
+          InternedDebugAnnotationName,
+          perfetto::protos::pbzero::InternedData::
+              kDebugAnnotationNamesFieldNumber,
+          const char*,
+          SmallInternedDataTraits> {
+  ~InternedDebugAnnotationName() override;
+
+  static void Add(protos::pbzero::InternedData* interned_data,
+                  size_t iid,
+                  const char* value);
+};
+
+}  // namespace internal
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_TRACING_INTERNAL_TRACK_EVENT_INTERNED_FIELDS_H_
+// gen_amalgamated begin header: gen/protos/perfetto/common/track_event_descriptor.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_TRACK_EVENT_DESCRIPTOR_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_TRACK_EVENT_DESCRIPTOR_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class TrackEventCategory;
+
+class TrackEventDescriptor_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  TrackEventDescriptor_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TrackEventDescriptor_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TrackEventDescriptor_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_available_categories() const { return at<1>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> available_categories() const { return GetRepeated<::protozero::ConstBytes>(1); }
+};
+
+class TrackEventDescriptor : public ::protozero::Message {
+ public:
+  using Decoder = TrackEventDescriptor_Decoder;
+  enum : int32_t {
+    kAvailableCategoriesFieldNumber = 1,
+  };
+
+  using FieldMetadata_AvailableCategories =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TrackEventCategory,
+      TrackEventDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AvailableCategories kAvailableCategories() { return {}; }
+  template <typename T = TrackEventCategory> T* add_available_categories() {
+    return BeginNestedMessage<T>(1);
+  }
+
+};
+
+class TrackEventCategory_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  TrackEventCategory_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TrackEventCategory_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TrackEventCategory_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars name() const { return at<1>().as_string(); }
+  bool has_description() const { return at<2>().valid(); }
+  ::protozero::ConstChars description() const { return at<2>().as_string(); }
+  bool has_tags() const { return at<3>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstChars> tags() const { return GetRepeated<::protozero::ConstChars>(3); }
+};
+
+class TrackEventCategory : public ::protozero::Message {
+ public:
+  using Decoder = TrackEventCategory_Decoder;
+  enum : int32_t {
+    kNameFieldNumber = 1,
+    kDescriptionFieldNumber = 2,
+    kTagsFieldNumber = 3,
+  };
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TrackEventCategory>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Description =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TrackEventCategory>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Description kDescription() { return {}; }
+  void set_description(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Description::kFieldId, data, size);
+  }
+  void set_description(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Description::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Tags =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TrackEventCategory>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Tags kTags() { return {}; }
+  void add_tags(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Tags::kFieldId, data, size);
+  }
+  void add_tags(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Tags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/tracing/internal/track_event_internal.h"
+
+// gen_amalgamated expanded: #include "perfetto/base/proc_utils.h"
+// gen_amalgamated expanded: #include "perfetto/base/thread_utils.h"
+// gen_amalgamated expanded: #include "perfetto/base/time.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/core/data_source_config.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/internal/track_event_interned_fields.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/track_event.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/track_event_category_registry.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/track_event_interned_data_index.h"
+// gen_amalgamated expanded: #include "protos/perfetto/common/data_source_descriptor.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/common/track_event_descriptor.pbzero.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/interned_data/interned_data.pbzero.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/trace_packet_defaults.pbzero.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/debug_annotation.pbzero.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/track_descriptor.pbzero.h"
+
+namespace perfetto {
+
+TrackEventSessionObserver::~TrackEventSessionObserver() = default;
+void TrackEventSessionObserver::OnSetup(const DataSourceBase::SetupArgs&) {}
+void TrackEventSessionObserver::OnStart(const DataSourceBase::StartArgs&) {}
+void TrackEventSessionObserver::OnStop(const DataSourceBase::StopArgs&) {}
+
+namespace internal {
+
+BaseTrackEventInternedDataIndex::~BaseTrackEventInternedDataIndex() = default;
+
+namespace {
+
+std::atomic<perfetto::base::PlatformThreadId> g_main_thread;
+static constexpr const char kLegacySlowPrefix[] = "disabled-by-default-";
+static constexpr const char kSlowTag[] = "slow";
+static constexpr const char kDebugTag[] = "debug";
+
+void ForEachObserver(
+    std::function<bool(TrackEventSessionObserver*&)> callback) {
+  // Session observers, shared by all track event data source instances.
+  static constexpr int kMaxObservers = 8;
+  static std::recursive_mutex* mutex = new std::recursive_mutex{};  // Leaked.
+  static std::array<TrackEventSessionObserver*, kMaxObservers> observers{};
+  std::unique_lock<std::recursive_mutex> lock(*mutex);
+  for (auto& o : observers) {
+    if (!callback(o))
+      break;
+  }
+}
+
+enum class MatchType { kExact, kPattern };
+
+bool NameMatchesPattern(const std::string& pattern,
+                        const std::string& name,
+                        MatchType match_type) {
+  // To avoid pulling in all of std::regex, for now we only support a single "*"
+  // wildcard at the end of the pattern.
+  size_t i = pattern.find('*');
+  if (i != std::string::npos) {
+    PERFETTO_DCHECK(i == pattern.size() - 1);
+    if (match_type != MatchType::kPattern)
+      return false;
+    return name.substr(0, i) == pattern.substr(0, i);
+  }
+  return name == pattern;
+}
+
+bool NameMatchesPatternList(const std::vector<std::string>& patterns,
+                            const std::string& name,
+                            MatchType match_type) {
+  for (const auto& pattern : patterns) {
+    if (NameMatchesPattern(pattern, name, match_type))
+      return true;
+  }
+  return false;
+}
+
+}  // namespace
+
+// static
+const Track TrackEventInternal::kDefaultTrack{};
+
+// static
+std::atomic<int> TrackEventInternal::session_count_{};
+
+// static
+bool TrackEventInternal::Initialize(
+    const TrackEventCategoryRegistry& registry,
+    bool (*register_data_source)(const DataSourceDescriptor&)) {
+  if (!g_main_thread)
+    g_main_thread = perfetto::base::GetThreadId();
+
+  DataSourceDescriptor dsd;
+  dsd.set_name("track_event");
+
+  protozero::HeapBuffered<protos::pbzero::TrackEventDescriptor> ted;
+  for (size_t i = 0; i < registry.category_count(); i++) {
+    auto category = registry.GetCategory(i);
+    // Don't register group categories.
+    if (category->IsGroup())
+      continue;
+    auto cat = ted->add_available_categories();
+    cat->set_name(category->name);
+    if (category->description)
+      cat->set_description(category->description);
+    for (const auto& tag : category->tags) {
+      if (tag)
+        cat->add_tags(tag);
+    }
+    // Disabled-by-default categories get a "slow" tag.
+    if (!strncmp(category->name, kLegacySlowPrefix, strlen(kLegacySlowPrefix)))
+      cat->add_tags(kSlowTag);
+  }
+  dsd.set_track_event_descriptor_raw(ted.SerializeAsString());
+
+  return register_data_source(dsd);
+}
+
+// static
+bool TrackEventInternal::AddSessionObserver(
+    TrackEventSessionObserver* observer) {
+  bool result = false;
+  ForEachObserver([&](TrackEventSessionObserver*& o) {
+    if (!o) {
+      o = observer;
+      result = true;
+      return false;
+    }
+    return true;
+  });
+  return result;
+}
+
+// static
+void TrackEventInternal::RemoveSessionObserver(
+    TrackEventSessionObserver* observer) {
+  ForEachObserver([&](TrackEventSessionObserver*& o) {
+    if (o == observer) {
+      o = nullptr;
+      return false;
+    }
+    return true;
+  });
+}
+
+// static
+void TrackEventInternal::EnableTracing(
+    const TrackEventCategoryRegistry& registry,
+    const protos::gen::TrackEventConfig& config,
+    const DataSourceBase::SetupArgs& args) {
+  for (size_t i = 0; i < registry.category_count(); i++) {
+    if (IsCategoryEnabled(registry, config, *registry.GetCategory(i)))
+      registry.EnableCategoryForInstance(i, args.internal_instance_index);
+  }
+  ForEachObserver([&](TrackEventSessionObserver*& o) {
+    if (o)
+      o->OnSetup(args);
+    return true;
+  });
+}
+
+// static
+void TrackEventInternal::OnStart(const DataSourceBase::StartArgs& args) {
+  session_count_.fetch_add(1);
+  ForEachObserver([&](TrackEventSessionObserver*& o) {
+    if (o)
+      o->OnStart(args);
+    return true;
+  });
+}
+
+// static
+void TrackEventInternal::DisableTracing(
+    const TrackEventCategoryRegistry& registry,
+    const DataSourceBase::StopArgs& args) {
+  ForEachObserver([&](TrackEventSessionObserver*& o) {
+    if (o)
+      o->OnStop(args);
+    return true;
+  });
+  for (size_t i = 0; i < registry.category_count(); i++)
+    registry.DisableCategoryForInstance(i, args.internal_instance_index);
+}
+
+// static
+bool TrackEventInternal::IsCategoryEnabled(
+    const TrackEventCategoryRegistry& registry,
+    const protos::gen::TrackEventConfig& config,
+    const Category& category) {
+  // If this is a group category, check if any of its constituent categories are
+  // enabled. If so, then this one is enabled too.
+  if (category.IsGroup()) {
+    bool result = false;
+    category.ForEachGroupMember([&](const char* member_name, size_t name_size) {
+      for (size_t i = 0; i < registry.category_count(); i++) {
+        const auto ref_category = registry.GetCategory(i);
+        // Groups can't refer to other groups.
+        if (ref_category->IsGroup())
+          continue;
+        // Require an exact match.
+        if (ref_category->name_size() != name_size ||
+            strncmp(ref_category->name, member_name, name_size)) {
+          continue;
+        }
+        if (IsCategoryEnabled(registry, config, *ref_category)) {
+          result = true;
+          // Break ForEachGroupMember() loop.
+          return false;
+        }
+        break;
+      }
+      // No match? Must be a dynamic category.
+      DynamicCategory dyn_category(std::string(member_name, name_size));
+      Category ref_category{Category::FromDynamicCategory(dyn_category)};
+      if (IsCategoryEnabled(registry, config, ref_category)) {
+        result = true;
+        // Break ForEachGroupMember() loop.
+        return false;
+      }
+      // No match found => keep iterating.
+      return true;
+    });
+    return result;
+  }
+
+  auto has_matching_tag = [&](std::function<bool(const char*)> matcher) {
+    for (const auto& tag : category.tags) {
+      if (!tag)
+        break;
+      if (matcher(tag))
+        return true;
+    }
+    // Legacy "disabled-by-default" categories automatically get the "slow" tag.
+    if (!strncmp(category.name, kLegacySlowPrefix, strlen(kLegacySlowPrefix)) &&
+        matcher(kSlowTag)) {
+      return true;
+    }
+    return false;
+  };
+
+  // First try exact matches, then pattern matches.
+  const std::array<MatchType, 2> match_types = {
+      {MatchType::kExact, MatchType::kPattern}};
+  for (auto match_type : match_types) {
+    // 1. Enabled categories.
+    if (NameMatchesPatternList(config.enabled_categories(), category.name,
+                               match_type)) {
+      return true;
+    }
+
+    // 2. Enabled tags.
+    if (has_matching_tag([&](const char* tag) {
+          return NameMatchesPatternList(config.enabled_tags(), tag, match_type);
+        })) {
+      return true;
+    }
+
+    // 3. Disabled categories.
+    if (NameMatchesPatternList(config.disabled_categories(), category.name,
+                               match_type)) {
+      return false;
+    }
+
+    // 4. Disabled tags.
+    if (has_matching_tag([&](const char* tag) {
+          if (config.disabled_tags_size()) {
+            return NameMatchesPatternList(config.disabled_tags(), tag,
+                                          match_type);
+          } else {
+            // The "slow" and "debug" tags are disabled by default.
+            return NameMatchesPattern(kSlowTag, tag, match_type) ||
+                   NameMatchesPattern(kDebugTag, tag, match_type);
+          }
+        })) {
+      return false;
+    }
+  }
+
+  // If nothing matched, enable the category by default.
+  return true;
+}
+
+// static
+uint64_t TrackEventInternal::GetTimeNs() {
+  if (GetClockId() == protos::pbzero::BUILTIN_CLOCK_BOOTTIME)
+    return static_cast<uint64_t>(perfetto::base::GetBootTimeNs().count());
+  PERFETTO_DCHECK(GetClockId() == protos::pbzero::BUILTIN_CLOCK_MONOTONIC);
+  return static_cast<uint64_t>(perfetto::base::GetWallTimeNs().count());
+}
+
+// static
+int TrackEventInternal::GetSessionCount() {
+  return session_count_.load();
+}
+
+// static
+void TrackEventInternal::ResetIncrementalState(TraceWriterBase* trace_writer,
+                                               uint64_t timestamp) {
+  auto default_track = ThreadTrack::Current();
+  {
+    // Mark any incremental state before this point invalid. Also set up
+    // defaults so that we don't need to repeat constant data for each packet.
+    auto packet = NewTracePacket(
+        trace_writer, timestamp,
+        protos::pbzero::TracePacket::SEQ_INCREMENTAL_STATE_CLEARED);
+    auto defaults = packet->set_trace_packet_defaults();
+    defaults->set_timestamp_clock_id(GetClockId());
+
+    // Establish the default track for this event sequence.
+    auto track_defaults = defaults->set_track_event_defaults();
+    track_defaults->set_track_uuid(default_track.uuid);
+  }
+
+  // Every thread should write a descriptor for its default track, because most
+  // trace points won't explicitly reference it.
+  WriteTrackDescriptor(default_track, trace_writer);
+
+  // Additionally the main thread should dump the process descriptor.
+  if (perfetto::base::GetThreadId() == g_main_thread)
+    WriteTrackDescriptor(ProcessTrack::Current(), trace_writer);
+}
+
+// static
+protozero::MessageHandle<protos::pbzero::TracePacket>
+TrackEventInternal::NewTracePacket(TraceWriterBase* trace_writer,
+                                   uint64_t timestamp,
+                                   uint32_t seq_flags) {
+  auto packet = trace_writer->NewTracePacket();
+  packet->set_timestamp(timestamp);
+  // TODO(skyostil): Stop emitting this for every event once the trace
+  // processor understands trace packet defaults.
+  if (GetClockId() != protos::pbzero::BUILTIN_CLOCK_BOOTTIME)
+    packet->set_timestamp_clock_id(GetClockId());
+  packet->set_sequence_flags(seq_flags);
+  return packet;
+}
+
+// static
+EventContext TrackEventInternal::WriteEvent(
+    TraceWriterBase* trace_writer,
+    TrackEventIncrementalState* incr_state,
+    const Category* category,
+    const char* name,
+    perfetto::protos::pbzero::TrackEvent::Type type,
+    uint64_t timestamp) {
+  PERFETTO_DCHECK(g_main_thread);
+  PERFETTO_DCHECK(!incr_state->was_cleared);
+
+  auto packet = NewTracePacket(trace_writer, timestamp);
+  EventContext ctx(std::move(packet), incr_state);
+
+  auto track_event = ctx.event();
+  if (type != protos::pbzero::TrackEvent::TYPE_UNSPECIFIED)
+    track_event->set_type(type);
+
+  // We assume that |category| and |name| point to strings with static lifetime.
+  // This means we can use their addresses as interning keys.
+  // TODO(skyostil): Intern categories at compile time.
+  if (category && type != protos::pbzero::TrackEvent::TYPE_SLICE_END &&
+      type != protos::pbzero::TrackEvent::TYPE_COUNTER) {
+    category->ForEachGroupMember(
+        [&](const char* member_name, size_t name_size) {
+          size_t category_iid =
+              InternedEventCategory::Get(&ctx, member_name, name_size);
+          track_event->add_category_iids(category_iid);
+          return true;
+        });
+  }
+  if (name && type != protos::pbzero::TrackEvent::TYPE_SLICE_END) {
+    size_t name_iid = InternedEventName::Get(&ctx, name);
+    track_event->set_name_iid(name_iid);
+  }
+  return ctx;
+}
+
+// static
+protos::pbzero::DebugAnnotation* TrackEventInternal::AddDebugAnnotation(
+    perfetto::EventContext* event_ctx,
+    const char* name) {
+  auto annotation = event_ctx->event()->add_debug_annotations();
+  annotation->set_name_iid(InternedDebugAnnotationName::Get(event_ctx, name));
+  return annotation;
+}
+
+}  // namespace internal
+}  // namespace perfetto
+// gen_amalgamated begin source: src/tracing/internal/track_event_interned_fields.cc
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/tracing/internal/track_event_interned_fields.h"
+
+namespace perfetto {
+namespace internal {
+
+InternedEventCategory::~InternedEventCategory() = default;
+
+// static
+void InternedEventCategory::Add(protos::pbzero::InternedData* interned_data,
+                                size_t iid,
+                                const char* value,
+                                size_t length) {
+  auto category = interned_data->add_event_categories();
+  category->set_iid(iid);
+  category->set_name(value, length);
+}
+
+InternedEventName::~InternedEventName() = default;
+
+// static
+void InternedEventName::Add(protos::pbzero::InternedData* interned_data,
+                            size_t iid,
+                            const char* value) {
+  auto name = interned_data->add_event_names();
+  name->set_iid(iid);
+  name->set_name(value);
+}
+
+InternedDebugAnnotationName::~InternedDebugAnnotationName() = default;
+
+// static
+void InternedDebugAnnotationName::Add(
+    protos::pbzero::InternedData* interned_data,
+    size_t iid,
+    const char* value) {
+  auto name = interned_data->add_debug_annotation_names();
+  name->set_iid(iid);
+  name->set_name(value);
+}
+
+}  // namespace internal
+}  // namespace perfetto
+// gen_amalgamated begin source: src/tracing/platform.cc
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/tracing/platform.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/internal/tracing_tls.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/trace_writer_base.h"
+
+namespace perfetto {
+
+PlatformThreadLocalObject::~PlatformThreadLocalObject() = default;
+Platform::~Platform() = default;
+
+// static
+std::unique_ptr<PlatformThreadLocalObject>
+PlatformThreadLocalObject::CreateInstance() {
+  return std::unique_ptr<PlatformThreadLocalObject>(new internal::TracingTLS());
+}
+
+}  // namespace perfetto
+// gen_amalgamated begin source: src/tracing/traced_value.cc
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/tracing/traced_value.h"
+
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/debug_annotation.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/debug_annotation.pbzero.h"
+
+namespace perfetto {
+
+namespace internal {
+
+TracedValue CreateTracedValueFromProto(
+    protos::pbzero::DebugAnnotation* context) {
+  return TracedValue::CreateFromProto(context);
+}
+
+}  // namespace internal
+
+// static
+TracedValue TracedValue::CreateFromProto(
+    protos::pbzero::DebugAnnotation* context) {
+  return TracedValue(context, nullptr);
+}
+
+void TracedValue::WriteInt64(int64_t value) && {
+  PERFETTO_DCHECK(checked_scope_.is_active());
+  context_->set_int_value(value);
+}
+
+void TracedValue::WriteUInt64(uint64_t value) && {
+  PERFETTO_DCHECK(checked_scope_.is_active());
+  context_->set_uint_value(value);
+}
+
+void TracedValue::WriteDouble(double value) && {
+  PERFETTO_DCHECK(checked_scope_.is_active());
+  context_->set_double_value(value);
+}
+
+void TracedValue::WriteBoolean(bool value) && {
+  PERFETTO_DCHECK(checked_scope_.is_active());
+  context_->set_bool_value(value);
+}
+
+void TracedValue::WriteString(const char* value) && {
+  PERFETTO_DCHECK(checked_scope_.is_active());
+  context_->set_string_value(value);
+}
+
+void TracedValue::WriteString(const char* value, size_t len) && {
+  PERFETTO_DCHECK(checked_scope_.is_active());
+  context_->set_string_value(value, len);
+}
+
+void TracedValue::WriteString(const std::string& value) && {
+  PERFETTO_DCHECK(checked_scope_.is_active());
+  context_->set_string_value(value);
+}
+
+void TracedValue::WritePointer(const void* value) && {
+  PERFETTO_DCHECK(checked_scope_.is_active());
+  context_->set_pointer_value(reinterpret_cast<uint64_t>(value));
+}
+
+TracedDictionary TracedValue::WriteDictionary() && {
+  // Note: this passes |checked_scope_.is_active_| bit to the parent to be
+  // picked up later by the new TracedDictionary.
+  PERFETTO_DCHECK(checked_scope_.is_active());
+  checked_scope_.Reset();
+
+  PERFETTO_DCHECK(!context_->is_finalized());
+  return TracedDictionary(context_, checked_scope_.parent_scope());
+}
+
+TracedArray TracedValue::WriteArray() && {
+  // Note: this passes |checked_scope_.is_active_| bit to the parent to be
+  // picked up later by the new TracedDictionary.
+  PERFETTO_DCHECK(checked_scope_.is_active());
+  checked_scope_.Reset();
+
+  PERFETTO_DCHECK(!context_->is_finalized());
+  return TracedArray(context_, checked_scope_.parent_scope());
+}
+
+TracedValue TracedArray::AppendItem() {
+  PERFETTO_DCHECK(checked_scope_.is_active());
+  return TracedValue(context_->add_array_values(), &checked_scope_);
+}
+
+TracedDictionary TracedArray::AppendDictionary() {
+  PERFETTO_DCHECK(checked_scope_.is_active());
+  return AppendItem().WriteDictionary();
+}
+
+TracedArray TracedArray::AppendArray() {
+  PERFETTO_DCHECK(checked_scope_.is_active());
+  return AppendItem().WriteArray();
+}
+
+TracedValue TracedDictionary::AddItem(StaticString key) {
+  PERFETTO_DCHECK(checked_scope_.is_active());
+  protos::pbzero::DebugAnnotation* item = context_->add_dict_entries();
+  item->set_name(key.value);
+  return TracedValue(item, &checked_scope_);
+}
+
+TracedValue TracedDictionary::AddItem(DynamicString key) {
+  PERFETTO_DCHECK(checked_scope_.is_active());
+  protos::pbzero::DebugAnnotation* item = context_->add_dict_entries();
+  item->set_name(key.value);
+  return TracedValue(item, &checked_scope_);
+}
+
+TracedDictionary TracedDictionary::AddDictionary(StaticString key) {
+  PERFETTO_DCHECK(checked_scope_.is_active());
+  return AddItem(key).WriteDictionary();
+}
+
+TracedDictionary TracedDictionary::AddDictionary(DynamicString key) {
+  PERFETTO_DCHECK(checked_scope_.is_active());
+  return AddItem(key).WriteDictionary();
+}
+
+TracedArray TracedDictionary::AddArray(StaticString key) {
+  PERFETTO_DCHECK(checked_scope_.is_active());
+  return AddItem(key).WriteArray();
+}
+
+TracedArray TracedDictionary::AddArray(DynamicString key) {
+  PERFETTO_DCHECK(checked_scope_.is_active());
+  return AddItem(key).WriteArray();
+}
+
+}  // namespace perfetto
+// gen_amalgamated begin source: src/tracing/tracing.cc
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/tracing/tracing.h"
+
+#include <atomic>
+#include <condition_variable>
+#include <mutex>
+
+// gen_amalgamated expanded: #include "perfetto/ext/base/waitable_event.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/internal/track_event_internal.h"
+// gen_amalgamated expanded: #include "src/tracing/internal/tracing_muxer_impl.h"
+
+namespace perfetto {
+namespace {
+bool g_was_initialized = false;
+}
+
+// static
+void Tracing::InitializeInternal(const TracingInitArgs& args) {
+  static TracingInitArgs init_args;
+  if (g_was_initialized) {
+    if (!(init_args == args)) {
+      PERFETTO_ELOG(
+          "Tracing::Initialize() called more than once with different args. "
+          "This is not supported, only the first call will have effect.");
+      PERFETTO_DCHECK(false);
+    }
+    return;
+  }
+
+  // Make sure the headers and implementation files agree on the build config.
+  PERFETTO_CHECK(args.dcheck_is_on_ == PERFETTO_DCHECK_IS_ON());
+  if (args.log_message_callback) {
+    SetLogMessageCallback(args.log_message_callback);
+  }
+  internal::TracingMuxerImpl::InitializeInstance(args);
+  internal::TrackRegistry::InitializeInstance();
+  g_was_initialized = true;
+  init_args = args;
+}
+
+// static
+bool Tracing::IsInitialized() {
+  return g_was_initialized;
+}
+
+//  static
+std::unique_ptr<TracingSession> Tracing::NewTrace(BackendType backend) {
+  return static_cast<internal::TracingMuxerImpl*>(internal::TracingMuxer::Get())
+      ->CreateTracingSession(backend);
+}
+
+// Can be called from any thread.
+bool TracingSession::FlushBlocking(uint32_t timeout_ms) {
+  std::atomic<bool> flush_result;
+  base::WaitableEvent flush_ack;
+
+  // The non blocking Flush() can be called on any thread. It does the PostTask
+  // internally.
+  Flush(
+      [&flush_ack, &flush_result](bool res) {
+        flush_result = res;
+        flush_ack.Notify();
+      },
+      timeout_ms);
+  flush_ack.Wait();
+  return flush_result;
+}
+
+std::vector<char> TracingSession::ReadTraceBlocking() {
+  std::vector<char> raw_trace;
+  std::mutex mutex;
+  std::condition_variable cv;
+
+  bool all_read = false;
+
+  ReadTrace([&mutex, &raw_trace, &all_read, &cv](ReadTraceCallbackArgs cb) {
+    raw_trace.insert(raw_trace.end(), cb.data, cb.data + cb.size);
+    std::unique_lock<std::mutex> lock(mutex);
+    all_read = !cb.has_more;
+    if (all_read)
+      cv.notify_one();
+  });
+
+  {
+    std::unique_lock<std::mutex> lock(mutex);
+    cv.wait(lock, [&all_read] { return all_read; });
+  }
+  return raw_trace;
+}
+
+TracingSession::GetTraceStatsCallbackArgs
+TracingSession::GetTraceStatsBlocking() {
+  std::mutex mutex;
+  std::condition_variable cv;
+  GetTraceStatsCallbackArgs result;
+  bool stats_read = false;
+
+  GetTraceStats(
+      [&mutex, &result, &stats_read, &cv](GetTraceStatsCallbackArgs args) {
+        result = std::move(args);
+        std::unique_lock<std::mutex> lock(mutex);
+        stats_read = true;
+        cv.notify_one();
+      });
+
+  {
+    std::unique_lock<std::mutex> lock(mutex);
+    cv.wait(lock, [&stats_read] { return stats_read; });
+  }
+  return result;
+}
+
+TracingSession::QueryServiceStateCallbackArgs
+TracingSession::QueryServiceStateBlocking() {
+  std::mutex mutex;
+  std::condition_variable cv;
+  QueryServiceStateCallbackArgs result;
+  bool status_read = false;
+
+  QueryServiceState(
+      [&mutex, &result, &status_read, &cv](QueryServiceStateCallbackArgs args) {
+        result = std::move(args);
+        std::unique_lock<std::mutex> lock(mutex);
+        status_read = true;
+        cv.notify_one();
+      });
+
+  {
+    std::unique_lock<std::mutex> lock(mutex);
+    cv.wait(lock, [&status_read] { return status_read; });
+  }
+  return result;
+}
+
+}  // namespace perfetto
+// gen_amalgamated begin source: src/tracing/tracing_policy.cc
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/tracing/tracing_policy.h"
+
+namespace perfetto {
+
+TracingPolicy::~TracingPolicy() = default;
+
+}  // namespace perfetto
+// gen_amalgamated begin source: src/tracing/track.cc
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/tracing/track.h"
+
+// gen_amalgamated expanded: #include "perfetto/ext/base/file_utils.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/hash.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/string_splitter.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/string_utils.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/thread_utils.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/uuid.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/internal/track_event_data_source.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/counter_descriptor.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/process_descriptor.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/process_descriptor.pbzero.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/thread_descriptor.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/thread_descriptor.pbzero.h"
+
+namespace perfetto {
+
+// static
+uint64_t Track::process_uuid;
+
+protos::gen::TrackDescriptor Track::Serialize() const {
+  protos::gen::TrackDescriptor desc;
+  desc.set_uuid(uuid);
+  if (parent_uuid)
+    desc.set_parent_uuid(parent_uuid);
+  return desc;
+}
+
+void Track::Serialize(protos::pbzero::TrackDescriptor* desc) const {
+  auto bytes = Serialize().SerializeAsString();
+  desc->AppendRawProtoBytes(bytes.data(), bytes.size());
+}
+
+protos::gen::TrackDescriptor ProcessTrack::Serialize() const {
+  auto desc = Track::Serialize();
+  auto pd = desc.mutable_process();
+  pd->set_pid(static_cast<int32_t>(pid));
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
+    PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
+  std::string cmdline;
+  if (base::ReadFile("/proc/self/cmdline", &cmdline)) {
+    // Since cmdline is a zero-terminated list of arguments, this ends up
+    // writing just the first element, i.e., the process name, into the process
+    // name field.
+    pd->set_process_name(cmdline.c_str());
+    base::StringSplitter splitter(std::move(cmdline), '\0');
+    while (splitter.Next()) {
+      pd->add_cmdline(
+          std::string(splitter.cur_token(), splitter.cur_token_size()));
+    }
+  }
+  // TODO(skyostil): Record command line on Windows and Mac.
+#endif
+  return desc;
+}
+
+void ProcessTrack::Serialize(protos::pbzero::TrackDescriptor* desc) const {
+  auto bytes = Serialize().SerializeAsString();
+  desc->AppendRawProtoBytes(bytes.data(), bytes.size());
+}
+
+protos::gen::TrackDescriptor ThreadTrack::Serialize() const {
+  auto desc = Track::Serialize();
+  auto td = desc.mutable_thread();
+  td->set_pid(static_cast<int32_t>(pid));
+  td->set_tid(static_cast<int32_t>(tid));
+  std::string thread_name;
+  if (base::GetThreadName(thread_name))
+    td->set_thread_name(thread_name);
+  return desc;
+}
+
+void ThreadTrack::Serialize(protos::pbzero::TrackDescriptor* desc) const {
+  auto bytes = Serialize().SerializeAsString();
+  desc->AppendRawProtoBytes(bytes.data(), bytes.size());
+}
+
+protos::gen::TrackDescriptor CounterTrack::Serialize() const {
+  auto desc = Track::Serialize();
+  desc.set_name(name_);
+  auto* counter = desc.mutable_counter();
+  if (category_)
+    counter->add_categories(category_);
+  if (unit_ != perfetto::protos::pbzero::CounterDescriptor::UNIT_UNSPECIFIED)
+    counter->set_unit(static_cast<protos::gen::CounterDescriptor_Unit>(unit_));
+  if (unit_name_)
+    counter->set_unit_name(unit_name_);
+  if (unit_multiplier_ != 1)
+    counter->set_unit_multiplier(unit_multiplier_);
+  if (is_incremental_)
+    counter->set_is_incremental(is_incremental_);
+  return desc;
+}
+
+void CounterTrack::Serialize(protos::pbzero::TrackDescriptor* desc) const {
+  auto bytes = Serialize().SerializeAsString();
+  desc->AppendRawProtoBytes(bytes.data(), bytes.size());
+}
+
+namespace internal {
+namespace {
+
+uint64_t GetProcessStartTime() {
+#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+  std::string stat;
+  if (!base::ReadFile("/proc/self/stat", &stat))
+    return 0u;
+  // The stat file is a single line split into space-separated fields as "pid
+  // (comm) state ppid ...". However because the command name can contain any
+  // characters (including parentheses and spaces), we need to skip past it
+  // before parsing the rest of the fields. To do that, we look for the last
+  // instance of ") " (parentheses followed by space) and parse forward from
+  // that point.
+  size_t comm_end = stat.rfind(") ");
+  if (comm_end == std::string::npos)
+    return 0u;
+  stat = stat.substr(comm_end + strlen(") "));
+  base::StringSplitter splitter(stat, ' ');
+  for (size_t skip = 0; skip < 20; skip++) {
+    if (!splitter.Next())
+      return 0u;
+  }
+  return base::CStringToUInt64(splitter.cur_token()).value_or(0u);
+#else
+  return 0;
+#endif  // !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+}
+
+}  // namespace
+
+// static
+TrackRegistry* TrackRegistry::instance_;
+
+TrackRegistry::TrackRegistry() = default;
+TrackRegistry::~TrackRegistry() = default;
+
+// static
+void TrackRegistry::InitializeInstance() {
+  // TODO(eseckler): Chrome may call this more than once. Once Chrome doesn't
+  // call this directly anymore, bring back DCHECK(!instance_) instead.
+  if (instance_)
+    return;
+  instance_ = new TrackRegistry();
+
+  // Use the process start time + pid as the unique identifier for this process.
+  // This ensures that if there are two independent copies of the Perfetto SDK
+  // in the same process (e.g., one in the app and another in a system
+  // framework), events emitted by each will be consistently interleaved on
+  // common thread and process tracks.
+  if (uint64_t start_time = GetProcessStartTime()) {
+    base::Hash hash;
+    hash.Update(start_time);
+    hash.Update(base::GetProcessId());
+    Track::process_uuid = hash.digest();
+  } else {
+    // Fall back to a randomly generated identifier.
+    Track::process_uuid = static_cast<uint64_t>(base::Uuidv4().lsb());
+  }
+}
+
+void TrackRegistry::UpdateTrack(Track track,
+                                const std::string& serialized_desc) {
+  std::lock_guard<std::mutex> lock(mutex_);
+  tracks_[track.uuid] = std::move(serialized_desc);
+}
+
+void TrackRegistry::UpdateTrackImpl(
+    Track track,
+    std::function<void(protos::pbzero::TrackDescriptor*)> fill_function) {
+  constexpr size_t kInitialSliceSize = 32;
+  constexpr size_t kMaximumSliceSize = 4096;
+  protozero::HeapBuffered<protos::pbzero::TrackDescriptor> new_descriptor(
+      kInitialSliceSize, kMaximumSliceSize);
+  fill_function(new_descriptor.get());
+  auto serialized_desc = new_descriptor.SerializeAsString();
+  UpdateTrack(track, serialized_desc);
+}
+
+void TrackRegistry::EraseTrack(Track track) {
+  std::lock_guard<std::mutex> lock(mutex_);
+  tracks_.erase(track.uuid);
+}
+
+// static
+void TrackRegistry::WriteTrackDescriptor(
+    const SerializedTrackDescriptor& desc,
+    protozero::MessageHandle<protos::pbzero::TracePacket> packet) {
+  packet->AppendString(
+      perfetto::protos::pbzero::TracePacket::kTrackDescriptorFieldNumber, desc);
+}
+
+}  // namespace internal
+}  // namespace perfetto
+// gen_amalgamated begin source: src/tracing/track_event_category_registry.cc
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/tracing/track_event_category_registry.h"
+
+namespace perfetto {
+
+// static
+Category Category::FromDynamicCategory(const char* name) {
+  if (GetNthNameSize(1, name, name)) {
+    Category group(Group(name));
+    PERFETTO_DCHECK(group.name);
+    return group;
+  }
+  Category category(name);
+  PERFETTO_DCHECK(category.name);
+  return category;
+}
+
+Category Category::FromDynamicCategory(
+    const DynamicCategory& dynamic_category) {
+  return FromDynamicCategory(dynamic_category.name.c_str());
+}
+
+namespace internal {
+
+perfetto::DynamicCategory NullCategory(const perfetto::DynamicCategory&) {
+  return perfetto::DynamicCategory{};
+}
+
+void TrackEventCategoryRegistry::EnableCategoryForInstance(
+    size_t category_index,
+    uint32_t instance_index) const {
+  PERFETTO_DCHECK(instance_index < kMaxDataSourceInstances);
+  PERFETTO_DCHECK(category_index < category_count_);
+  // Matches the acquire_load in DataSource::Trace().
+  state_storage_[category_index].fetch_or(
+      static_cast<uint8_t>(1u << instance_index), std::memory_order_release);
+}
+
+void TrackEventCategoryRegistry::DisableCategoryForInstance(
+    size_t category_index,
+    uint32_t instance_index) const {
+  PERFETTO_DCHECK(instance_index < kMaxDataSourceInstances);
+  PERFETTO_DCHECK(category_index < category_count_);
+  // Matches the acquire_load in DataSource::Trace().
+  state_storage_[category_index].fetch_and(
+      static_cast<uint8_t>(~(1u << instance_index)), std::memory_order_release);
+}
+
+}  // namespace internal
+}  // namespace perfetto
+// gen_amalgamated begin source: src/tracing/track_event_legacy.cc
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/tracing/track_event_legacy.h"
+
+// gen_amalgamated expanded: #include "perfetto/tracing/track.h"
+
+namespace perfetto {
+namespace legacy {
+
+template <>
+ThreadTrack ConvertThreadId(const PerfettoLegacyCurrentThreadId&) {
+  // Because of the short-circuit in PERFETTO_INTERNAL_LEGACY_EVENT, we should
+  // never get here.
+  PERFETTO_DCHECK(false);
+  return ThreadTrack::Current();
+}
+
+}  // namespace legacy
+
+namespace internal {
+
+void LegacyTraceId::Write(protos::pbzero::TrackEvent::LegacyEvent* event,
+                          uint32_t event_flags) const {
+  // Legacy flow events always use bind_id.
+  if (event_flags &
+      (legacy::kTraceEventFlagFlowOut | legacy::kTraceEventFlagFlowIn)) {
+    // Flow bind_ids don't have scopes, so we need to mangle in-process ones to
+    // avoid collisions.
+    if (id_flags_ & legacy::kTraceEventFlagHasLocalId) {
+      event->set_bind_id(raw_id_ ^ ProcessTrack::Current().uuid);
+    } else {
+      event->set_bind_id(raw_id_);
+    }
+    return;
+  }
+
+  uint32_t scope_flags = id_flags_ & (legacy::kTraceEventFlagHasId |
+                                      legacy::kTraceEventFlagHasLocalId |
+                                      legacy::kTraceEventFlagHasGlobalId);
+  switch (scope_flags) {
+    case legacy::kTraceEventFlagHasId:
+      event->set_unscoped_id(raw_id_);
+      break;
+    case legacy::kTraceEventFlagHasLocalId:
+      event->set_local_id(raw_id_);
+      break;
+    case legacy::kTraceEventFlagHasGlobalId:
+      event->set_global_id(raw_id_);
+      break;
+  }
+  if (scope_)
+    event->set_id_scope(scope_);
+}
+
+}  // namespace internal
+}  // namespace perfetto
+// gen_amalgamated begin source: src/tracing/track_event_state_tracker.cc
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/tracing/track_event_state_tracker.h"
+
+// gen_amalgamated expanded: #include "perfetto/ext/base/hash.h"
+
+// gen_amalgamated expanded: #include "protos/perfetto/common/interceptor_descriptor.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/interned_data/interned_data.pbzero.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/trace_packet.pbzero.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/trace_packet_defaults.pbzero.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/debug_annotation.pbzero.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/process_descriptor.pbzero.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/thread_descriptor.pbzero.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/track_descriptor.pbzero.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/track_event.pbzero.h"
+
+namespace perfetto {
+
+TrackEventStateTracker::~TrackEventStateTracker() = default;
+TrackEventStateTracker::Delegate::~Delegate() = default;
+
+// static
+void TrackEventStateTracker::ProcessTracePacket(
+    Delegate& delegate,
+    SequenceState& sequence_state,
+    const protos::pbzero::TracePacket_Decoder& packet) {
+  UpdateIncrementalState(delegate, sequence_state, packet);
+
+  if (!packet.has_track_event())
+    return;
+  perfetto::protos::pbzero::TrackEvent::Decoder track_event(
+      packet.track_event());
+
+  // TODO(skyostil): Support incremental timestamps.
+  uint64_t timestamp = packet.timestamp();
+
+  Track* track = &sequence_state.track;
+  if (track_event.has_track_uuid()) {
+    auto* session_state = delegate.GetSessionState();
+    if (!session_state)
+      return;  // Tracing must have ended.
+    track = &session_state->tracks[track_event.track_uuid()];
+  }
+
+  // We only log the first category of each event.
+  protozero::ConstChars category{};
+  uint64_t category_iid = 0;
+  if (auto iid_it = track_event.category_iids()) {
+    category_iid = *iid_it;
+    category.data = sequence_state.event_categories[category_iid].data();
+    category.size = sequence_state.event_categories[category_iid].size();
+  } else if (auto cat_it = track_event.categories()) {
+    category.data = reinterpret_cast<const char*>(cat_it->data());
+    category.size = cat_it->size();
+  }
+
+  protozero::ConstChars name{};
+  uint64_t name_iid = track_event.name_iid();
+  uint64_t name_hash = 0;
+  uint64_t duration = 0;
+  if (name_iid) {
+    name.data = sequence_state.event_names[name_iid].data();
+    name.size = sequence_state.event_names[name_iid].size();
+  } else if (track_event.has_name()) {
+    name.data = track_event.name().data;
+    name.size = track_event.name().size;
+  }
+
+  if (name.data) {
+    base::Hash hash;
+    hash.Update(name.data, name.size);
+    name_hash = hash.digest();
+  }
+
+  size_t depth = track->stack.size();
+  switch (track_event.type()) {
+    case protos::pbzero::TrackEvent::TYPE_SLICE_BEGIN: {
+      StackFrame frame;
+      frame.timestamp = timestamp;
+      frame.name_hash = name_hash;
+      if (track_event.has_track_uuid()) {
+        frame.name = name.ToStdString();
+        frame.category = category.ToStdString();
+      } else {
+        frame.name_iid = name_iid;
+        frame.category_iid = category_iid;
+      }
+      track->stack.push_back(std::move(frame));
+      break;
+    }
+    case protos::pbzero::TrackEvent::TYPE_SLICE_END:
+      if (!track->stack.empty()) {
+        const auto& prev_frame = track->stack.back();
+        if (prev_frame.name_iid) {
+          name.data = sequence_state.event_names[prev_frame.name_iid].data();
+          name.size = sequence_state.event_names[prev_frame.name_iid].size();
+        } else {
+          name.data = prev_frame.name.data();
+          name.size = prev_frame.name.size();
+        }
+        name_hash = prev_frame.name_hash;
+        if (prev_frame.category_iid) {
+          category.data =
+              sequence_state.event_categories[prev_frame.category_iid].data();
+          category.size =
+              sequence_state.event_categories[prev_frame.category_iid].size();
+        } else {
+          category.data = prev_frame.category.data();
+          category.size = prev_frame.category.size();
+        }
+        duration = timestamp - prev_frame.timestamp;
+        depth--;
+      }
+      break;
+    case protos::pbzero::TrackEvent::TYPE_INSTANT:
+      break;
+    case protos::pbzero::TrackEvent::TYPE_COUNTER:
+    case protos::pbzero::TrackEvent::TYPE_UNSPECIFIED:
+      // TODO(skyostil): Support counters.
+      return;
+  }
+
+  ParsedTrackEvent parsed_event{track_event};
+  parsed_event.timestamp_ns = timestamp;
+  parsed_event.duration_ns = duration;
+  parsed_event.stack_depth = depth;
+  parsed_event.category = category;
+  parsed_event.name = name;
+  parsed_event.name_hash = name_hash;
+  delegate.OnTrackEvent(*track, parsed_event);
+
+  if (track_event.type() == protos::pbzero::TrackEvent::TYPE_SLICE_END &&
+      !track->stack.empty()) {
+    track->stack.pop_back();
+  }
+}
+
+// static
+void TrackEventStateTracker::UpdateIncrementalState(
+    Delegate& delegate,
+    SequenceState& sequence_state,
+    const protos::pbzero::TracePacket_Decoder& packet) {
+#if PERFETTO_DCHECK_IS_ON()
+  if (!sequence_state.sequence_id) {
+    sequence_state.sequence_id = packet.trusted_packet_sequence_id();
+  } else {
+    PERFETTO_DCHECK(sequence_state.sequence_id ==
+                    packet.trusted_packet_sequence_id());
+  }
+#endif
+
+  if (packet.sequence_flags() &
+      perfetto::protos::pbzero::TracePacket::SEQ_INCREMENTAL_STATE_CLEARED) {
+    // Convert any existing event names and categories on the stack to
+    // non-interned strings so we can look up their names even after the
+    // incremental state is gone.
+    for (auto& frame : sequence_state.track.stack) {
+      if (frame.name_iid) {
+        frame.name = sequence_state.event_names[frame.name_iid];
+        frame.name_iid = 0u;
+      }
+      if (frame.category_iid) {
+        frame.category = sequence_state.event_categories[frame.category_iid];
+        frame.category_iid = 0u;
+      }
+    }
+    sequence_state.event_names.clear();
+    sequence_state.event_categories.clear();
+    sequence_state.debug_annotation_names.clear();
+    sequence_state.track.uuid = 0u;
+    sequence_state.track.index = 0u;
+  }
+  if (packet.has_interned_data()) {
+    perfetto::protos::pbzero::InternedData::Decoder interned_data(
+        packet.interned_data());
+    for (auto it = interned_data.event_names(); it; it++) {
+      perfetto::protos::pbzero::EventName::Decoder entry(*it);
+      sequence_state.event_names[entry.iid()] = entry.name().ToStdString();
+    }
+    for (auto it = interned_data.event_categories(); it; it++) {
+      perfetto::protos::pbzero::EventCategory::Decoder entry(*it);
+      sequence_state.event_categories[entry.iid()] = entry.name().ToStdString();
+    }
+    for (auto it = interned_data.debug_annotation_names(); it; it++) {
+      perfetto::protos::pbzero::DebugAnnotationName::Decoder entry(*it);
+      sequence_state.debug_annotation_names[entry.iid()] =
+          entry.name().ToStdString();
+    }
+  }
+  if (packet.has_trace_packet_defaults()) {
+    perfetto::protos::pbzero::TracePacketDefaults::Decoder defaults(
+        packet.trace_packet_defaults());
+    if (defaults.has_track_event_defaults()) {
+      perfetto::protos::pbzero::TrackEventDefaults::Decoder
+          track_event_defaults(defaults.track_event_defaults());
+      sequence_state.track.uuid = track_event_defaults.track_uuid();
+    }
+  }
+  if (packet.has_track_descriptor()) {
+    perfetto::protos::pbzero::TrackDescriptor::Decoder track_descriptor(
+        packet.track_descriptor());
+    auto* session_state = delegate.GetSessionState();
+    auto& track = session_state->tracks[track_descriptor.uuid()];
+    if (!track.index)
+      track.index = static_cast<uint32_t>(session_state->tracks.size() + 1);
+    track.uuid = track_descriptor.uuid();
+
+    track.name = track_descriptor.name().ToStdString();
+    track.pid = 0;
+    track.tid = 0;
+    if (track_descriptor.has_process()) {
+      perfetto::protos::pbzero::ProcessDescriptor::Decoder process(
+          track_descriptor.process());
+      track.pid = process.pid();
+      if (track.name.empty())
+        track.name = process.process_name().ToStdString();
+    } else if (track_descriptor.has_thread()) {
+      perfetto::protos::pbzero::ThreadDescriptor::Decoder thread(
+          track_descriptor.thread());
+      track.pid = thread.pid();
+      track.tid = thread.tid();
+      if (track.name.empty())
+        track.name = thread.thread_name().ToStdString();
+    }
+    delegate.OnTrackUpdated(track);
+
+    // Mirror properties to the default track of the sequence. Note that
+    // this does not catch updates to the default track written through other
+    // sequences.
+    if (track.uuid == sequence_state.track.uuid) {
+      sequence_state.track.index = track.index;
+      sequence_state.track.name = track.name;
+      sequence_state.track.pid = track.pid;
+      sequence_state.track.tid = track.tid;
+      sequence_state.track.user_data = track.user_data;
+    }
+  }
+}
+
+TrackEventStateTracker::ParsedTrackEvent::ParsedTrackEvent(
+    const perfetto::protos::pbzero::TrackEvent::Decoder& track_event_)
+    : track_event(track_event_) {}
+
+}  // namespace perfetto
+// gen_amalgamated begin source: src/tracing/virtual_destructors.cc
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/tracing/internal/tracing_tls.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/tracing.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/tracing_backend.h"
+
+// This translation unit contains the definitions for the destructor of pure
+// virtual interfaces for the src/public:public target. The alternative would be
+// introducing a one-liner .cc file for each pure virtual interface, which is
+// overkill. This is for compliance with -Wweak-vtables.
+
+namespace perfetto {
+namespace internal {
+
+TracingTLS::~TracingTLS() {
+  // Avoid entering trace points while the thread is being torn down.
+  is_in_trace_point = true;
+}
+
+}  // namespace internal
+
+TracingBackend::~TracingBackend() = default;
+TracingSession::~TracingSession() = default;
+
+}  // namespace perfetto
+// gen_amalgamated begin source: src/android_stats/statsd_logging_helper.cc
+// gen_amalgamated begin header: src/android_stats/statsd_logging_helper.h
+// gen_amalgamated begin header: src/android_stats/perfetto_atoms.h
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SRC_ANDROID_STATS_PERFETTO_ATOMS_H_
+#define SRC_ANDROID_STATS_PERFETTO_ATOMS_H_
+
+namespace perfetto {
+
+// This must match the values of the PerfettoUploadEvent enum in:
+// frameworks/proto_logging/stats/atoms.proto
+enum class PerfettoStatsdAtom {
+  kUndefined = 0,
+
+  // Checkpoints inside perfetto_cmd before tracing is finished.
+  kTraceBegin = 1,
+  kBackgroundTraceBegin = 2,
+  kOnConnect = 3,
+
+  // Guardrails inside perfetto_cmd before tracing is finished.
+  kOnTimeout = 16,
+  kCmdUserBuildTracingNotAllowed = 43,
+  kCmdFailedToInitGuardrailState = 44,
+  kCmdInvalidGuardrailState = 45,
+  kCmdHitUploadLimit = 46,
+
+  // Checkpoints inside traced.
+  kTracedEnableTracing = 37,
+  kTracedStartTracing = 38,
+  kTracedDisableTracing = 39,
+  kTracedNotifyTracingDisabled = 40,
+
+  // Trigger checkpoints inside traced.
+  // These atoms are special because, along with the UUID,
+  // they log the trigger name.
+  kTracedTriggerStartTracing = 41,
+  kTracedTriggerStopTracing = 42,
+
+  // Guardrails inside traced.
+  kTracedEnableTracingExistingTraceSession = 18,
+  kTracedEnableTracingTooLongTrace = 19,
+  kTracedEnableTracingInvalidTriggerTimeout = 20,
+  kTracedEnableTracingDurationWithTrigger = 21,
+  kTracedEnableTracingStopTracingWriteIntoFile = 22,
+  kTracedEnableTracingDuplicateTriggerName = 23,
+  kTracedEnableTracingInvalidDeferredStart = 24,
+  kTracedEnableTracingInvalidBufferSize = 25,
+  kTracedEnableTracingBufferSizeTooLarge = 26,
+  kTracedEnableTracingTooManyBuffers = 27,
+  kTracedEnableTracingDuplicateSessionName = 28,
+  kTracedEnableTracingSessionNameTooRecent = 29,
+  kTracedEnableTracingTooManySessionsForUid = 30,
+  kTracedEnableTracingTooManyConcurrentSessions = 31,
+  kTracedEnableTracingInvalidFdOutputFile = 32,
+  kTracedEnableTracingFailedToCreateFile = 33,
+  kTracedEnableTracingOom = 34,
+  kTracedEnableTracingUnknown = 35,
+  kTracedStartTracingInvalidSessionState = 36,
+  kTracedEnableTracingInvalidFilter = 47,
+
+  // Checkpoints inside perfetto_cmd after tracing has finished.
+  kOnTracingDisabled = 4,
+  kUploadIncidentBegin = 8,
+  kFinalizeTraceAndExit = 11,
+  kNotUploadingEmptyTrace = 17,
+
+  // Guardrails inside perfetto_cmd after tracing has finished.
+  kUploadIncidentFailure = 10,
+
+  // Deprecated as "success" is misleading; it simply means we were
+  // able to communicate with incidentd. Will be removed once
+  // incidentd is properly instrumented.
+  kUploadIncidentSuccess = 9,
+
+  // Deprecated as has the potential to be too spammy. Will be
+  // replaced with a whole new atom proto which uses a count metric
+  // instead of the event metric used for this proto.
+  kTriggerBegin = 12,
+  kTriggerSuccess = 13,
+  kTriggerFailure = 14,
+
+  // Deprecated as too coarse grained to be useful. Will be replaced
+  // with better broken down atoms as we do with traced.
+  kHitGuardrails = 15,
+
+  // Contained status of Dropbox uploads. Removed as Perfetto no
+  // longer supports uploading traces using Dropbox.
+  // reserved 5, 6, 7;
+};
+
+// This must match the values of the PerfettoTrigger::TriggerType enum in:
+// frameworks/base/cmds/statsd/src/atoms.proto
+enum PerfettoTriggerAtom {
+  kUndefined = 0,
+
+  kCmdTrigger = 1,
+  kCmdTriggerFail = 2,
+
+  kTriggerPerfettoTrigger = 3,
+  kTriggerPerfettoTriggerFail = 4,
+
+  kTracedLimitProbability = 5,
+  kTracedLimitMaxPer24h = 6,
+
+  kProbesProducerTrigger = 7,
+  kProbesProducerTriggerFail = 8,
+};
+
+}  // namespace perfetto
+
+#endif  // SRC_ANDROID_STATS_PERFETTO_ATOMS_H_
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SRC_ANDROID_STATS_STATSD_LOGGING_HELPER_H_
+#define SRC_ANDROID_STATS_STATSD_LOGGING_HELPER_H_
+
+#include <stdint.h>
+#include <string>
+#include <vector>
+
+// gen_amalgamated expanded: #include "src/android_stats/perfetto_atoms.h"
+
+namespace perfetto {
+namespace android_stats {
+
+// Functions in this file are only active on built in the Android
+// tree. On other platforms (including Android standalone and Chromium
+// on Android) these functions are a noop.
+
+// Logs the upload event to statsd if built in the Android tree.
+void MaybeLogUploadEvent(PerfettoStatsdAtom atom,
+                         int64_t uuid_lsb,
+                         int64_t uuid_msb,
+                         const std::string& trigger_name = "");
+
+// Logs the trigger events to statsd if built in the Android tree.
+void MaybeLogTriggerEvent(PerfettoTriggerAtom atom, const std::string& trigger);
+
+// Logs the trigger events to statsd if built in the Android tree.
+void MaybeLogTriggerEvents(PerfettoTriggerAtom atom,
+                           const std::vector<std::string>& triggers);
+
+}  // namespace android_stats
+}  // namespace perfetto
+
+#endif  // SRC_ANDROID_STATS_STATSD_LOGGING_HELPER_H_
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "src/android_stats/statsd_logging_helper.h"
+
+#include <string>
+
+// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
+// gen_amalgamated expanded: #include "perfetto/base/compiler.h"
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) && \
+    PERFETTO_BUILDFLAG(PERFETTO_ANDROID_BUILD)
+// gen_amalgamated expanded: #include "src/android_internal/lazy_library_loader.h"  // nogncheck
+// gen_amalgamated expanded: #include "src/android_internal/statsd_logging.h"       // nogncheck
+#endif
+
+namespace perfetto {
+namespace android_stats {
+
+// Make sure we don't accidentally log on non-Android tree build. Note that even
+// removing this ifdef still doesn't make uploads work on OS_ANDROID.
+// PERFETTO_LAZY_LOAD will return a nullptr on non-Android and non-in-tree
+// builds as libperfetto_android_internal will not be available.
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) && \
+    PERFETTO_BUILDFLAG(PERFETTO_ANDROID_BUILD)
+
+void MaybeLogUploadEvent(PerfettoStatsdAtom atom,
+                         int64_t uuid_lsb,
+                         int64_t uuid_msb,
+                         const std::string& trigger_name) {
+  PERFETTO_LAZY_LOAD(android_internal::StatsdLogUploadEvent, log_event_fn);
+  if (log_event_fn) {
+    log_event_fn(atom, uuid_lsb, uuid_msb, trigger_name.c_str());
+  }
+}
+
+void MaybeLogTriggerEvent(PerfettoTriggerAtom atom,
+                          const std::string& trigger_name) {
+  PERFETTO_LAZY_LOAD(android_internal::StatsdLogTriggerEvent, log_event_fn);
+  if (log_event_fn) {
+    log_event_fn(atom, trigger_name.c_str());
+  }
+}
+
+void MaybeLogTriggerEvents(PerfettoTriggerAtom atom,
+                           const std::vector<std::string>& triggers) {
+  PERFETTO_LAZY_LOAD(android_internal::StatsdLogTriggerEvent, log_event_fn);
+  if (log_event_fn) {
+    for (const std::string& trigger_name : triggers) {
+      log_event_fn(atom, trigger_name.c_str());
+    }
+  }
+}
+
+#else
+void MaybeLogUploadEvent(PerfettoStatsdAtom,
+                         int64_t,
+                         int64_t,
+                         const std::string&) {}
+void MaybeLogTriggerEvent(PerfettoTriggerAtom, const std::string&) {}
+void MaybeLogTriggerEvents(PerfettoTriggerAtom,
+                           const std::vector<std::string>&) {}
+#endif
+
+}  // namespace android_stats
+}  // namespace perfetto
+// gen_amalgamated begin source: src/protozero/filtering/filter_bytecode_parser.cc
+// gen_amalgamated begin header: src/protozero/filtering/filter_bytecode_parser.h
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SRC_PROTOZERO_FILTERING_FILTER_BYTECODE_PARSER_H_
+#define SRC_PROTOZERO_FILTERING_FILTER_BYTECODE_PARSER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <vector>
+
+namespace protozero {
+
+// Loads the proto-encoded bytecode in memory and allows fast lookups for tuples
+// (msg_index, field_id) to tell if a given field should be allowed or not and,
+// in the case of nested fields, what is the next message index to recurse into.
+// This class does two things:
+// 1. Expands the array of varint from the proto into a vector<uint32_t>. This
+//    is to avoid performing varint decoding on every lookup, at the cost of
+//    some extra memory (2KB-4KB). Note that the expanded vector is not just a
+//    1:1 copy of the proto one (more below). This is to avoid O(Fields) linear
+//    lookup complexity.
+// 2. Creates an index of offsets to remember the start word for each message.
+//    This is so we can jump to O(1) to the N-th message when recursing into a
+//    nested fields, without having to scan and find the (N-1)-th END_OF_MESSAGE
+//    marker.
+// Overall lookups are O(1) for field ids < 128 (kDirectlyIndexLimit) and O(N),
+// with N being the number of allowed field ranges for other fields.
+// See comments around |word_| below for the structure of the word vector.
+class FilterBytecodeParser {
+ public:
+  // Result of a Query() operation
+  struct QueryResult {
+    bool allowed;  // Whether the field is allowed at all or no.
+
+    // If |allowed|==true && simple_field()==false, this tells the message index
+    // of the nested field that should be used when recursing in the parser.
+    uint32_t nested_msg_index;
+
+    // If |allowed|==true, specifies if the field is of a simple type (varint,
+    // fixed32/64, string or byte) or a nested field that needs recursion.
+    // In the latter case the caller is expected to use |nested_msg_index| for
+    // the next Query() calls.
+    bool simple_field() const { return nested_msg_index == kSimpleField; }
+  };
+
+  // Loads a filter. The filter data consists of a sequence of varints which
+  // contains the filter opcodes and a final checksum.
+  bool Load(const void* filter_data, size_t len);
+
+  // Checks wheter a given field is allowed or not.
+  // msg_index = 0 is the index of the root message, where all queries should
+  // start from (typically perfetto.protos.Trace).
+  QueryResult Query(uint32_t msg_index, uint32_t field_id);
+
+  void Reset();
+  void set_suppress_logs_for_fuzzer(bool x) { suppress_logs_for_fuzzer_ = x; }
+
+ private:
+  static constexpr uint32_t kDirectlyIndexLimit = 128;
+  static constexpr uint32_t kAllowed = 1u << 31u;
+  static constexpr uint32_t kSimpleField = 0x7fffffff;
+
+  bool LoadInternal(const uint8_t* filter_data, size_t len);
+
+  // The state of all fields for all messages is stored in one contiguous array.
+  // This is to avoid memory fragmentation and allocator overhead.
+  // We expect a high number of messages (hundreds), but each message is small.
+  // For each message we store two sets of uint32:
+  // 1. A set of "directly indexed" fields, for field ids < 128.
+  // 2. The remainder is a set of ranges.
+  // So each message descriptor consists of a sequence of words as follows:
+  //
+  // [0] -> how many directly indexed fields are stored next (up to 128)
+  //
+  // [1..N] -> One word per field id (See "field state" below).
+  //
+  // [N + 1] -> Start of field id range 1
+  // [N + 2] -> End of field id range 1 (exclusive, STL-style).
+  // [N + 3] -> Field state for fields in range 1 (below)
+  //
+  // [N + 4] -> Start of field id range 2
+  // [N + 5] -> End of field id range 2 (exclusive, STL-style).
+  // [N + 6] -> Field state for fields in range 2 (below)
+
+  // The "field state" word is as follows:
+  // Bit 31: 0 if the field is disallowed, 1 if allowed.
+  //         Only directly indexed fields can be 0 (it doesn't make sense to add
+  //         a range and then say "btw it's NOT allowed".. don't add it then.
+  //         0 is only used for filling gaps in the directly indexed bucket.
+  // Bits [30..0] (only when MSB == allowed):
+  //  0x7fffffff: The field is "simple" (varint, fixed32/64, string, bytes) and
+  //      can be directly passed through in output. No recursion is needed.
+  //  [0, 7ffffffe]: The field is a nested submessage. The value is the index
+  //     that must be passed as first argument to the next Query() calls.
+  //     Note that the message index is purely a monotonic counter in the
+  //     filter bytecode, has no proto-equivalent match (unlike field ids).
+  std::vector<uint32_t> words_;
+
+  // One entry for each message index stored in the filter plus a sentinel at
+  // the end. Maps each message index to the offset in |words_| where the the
+  // Nth message start.
+  // message_offset_.size() - 2 == the max message id that can be parsed.
+  std::vector<uint32_t> message_offset_;
+
+  bool suppress_logs_for_fuzzer_ = false;
+};
+
+}  // namespace protozero
+
+#endif  // SRC_PROTOZERO_FILTERING_FILTER_BYTECODE_PARSER_H_
+// gen_amalgamated begin header: src/protozero/filtering/filter_bytecode_common.h
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SRC_PROTOZERO_FILTERING_FILTER_BYTECODE_COMMON_H_
+#define SRC_PROTOZERO_FILTERING_FILTER_BYTECODE_COMMON_H_
+
+#include <stdint.h>
+
+namespace protozero {
+
+enum FilterOpcode : uint32_t {
+  // The immediate value is 0 in this case.
+  kFilterOpcode_EndOfMessage = 0,
+
+  // The immediate value is the id of the allowed field.
+  kFilterOpcode_SimpleField = 1,
+
+  // The immediate value is the start of the range. The next word (without
+  // any shifting) is the length of the range.
+  kFilterOpcode_SimpleFieldRange = 2,
+
+  // The immediate value is the id of the allowed field. The next word
+  // (without any shifting) is the index of the filter that should be used to
+  // recurse into the nested message.
+  kFilterOpcode_NestedField = 3,
+};
+}  // namespace protozero
+
+#endif  // SRC_PROTOZERO_FILTERING_FILTER_BYTECODE_COMMON_H_
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "src/protozero/filtering/filter_bytecode_parser.h"
+
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/hash.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+// gen_amalgamated expanded: #include "src/protozero/filtering/filter_bytecode_common.h"
+
+namespace protozero {
+
+void FilterBytecodeParser::Reset() {
+  bool suppress = suppress_logs_for_fuzzer_;
+  *this = FilterBytecodeParser();
+  suppress_logs_for_fuzzer_ = suppress;
+}
+
+bool FilterBytecodeParser::Load(const void* filter_data, size_t len) {
+  Reset();
+  bool res = LoadInternal(static_cast<const uint8_t*>(filter_data), len);
+  // If load fails, don't leave the parser in a half broken state.
+  if (!res)
+    Reset();
+  return res;
+}
+
+bool FilterBytecodeParser::LoadInternal(const uint8_t* bytecode_data,
+                                        size_t len) {
+  // First unpack the varints into a plain uint32 vector, so it's easy to
+  // iterate through them and look ahead.
+  std::vector<uint32_t> words;
+  bool packed_parse_err = false;
+  words.reserve(len);  // An overestimation, but avoids reallocations.
+  using BytecodeDecoder =
+      PackedRepeatedFieldIterator<proto_utils::ProtoWireType::kVarInt,
+                                  uint32_t>;
+  for (BytecodeDecoder it(bytecode_data, len, &packed_parse_err); it; ++it)
+    words.emplace_back(*it);
+
+  if (packed_parse_err || words.empty())
+    return false;
+
+  perfetto::base::Hash hasher;
+  for (size_t i = 0; i < words.size() - 1; ++i)
+    hasher.Update(words[i]);
+
+  uint32_t expected_csum = static_cast<uint32_t>(hasher.digest());
+  if (expected_csum != words.back()) {
+    if (!suppress_logs_for_fuzzer_) {
+      PERFETTO_ELOG("Filter bytecode checksum failed. Expected: %x, actual: %x",
+                    expected_csum, words.back());
+    }
+    return false;
+  }
+
+  words.pop_back();  // Pop the checksum.
+
+  // Temporay storage for each message. Cleared on every END_OF_MESSAGE.
+  std::vector<uint32_t> direct_indexed_fields;
+  std::vector<uint32_t> ranges;
+  uint32_t max_msg_index = 0;
+
+  auto add_directly_indexed_field = [&](uint32_t field_id, uint32_t msg_id) {
+    PERFETTO_DCHECK(field_id > 0 && field_id < kDirectlyIndexLimit);
+    direct_indexed_fields.resize(std::max(direct_indexed_fields.size(),
+                                          static_cast<size_t>(field_id) + 1));
+    direct_indexed_fields[field_id] = kAllowed | msg_id;
+  };
+
+  auto add_range = [&](uint32_t id_start, uint32_t id_end, uint32_t msg_id) {
+    PERFETTO_DCHECK(id_end > id_start);
+    PERFETTO_DCHECK(id_start >= kDirectlyIndexLimit);
+    ranges.emplace_back(id_start);
+    ranges.emplace_back(id_end);
+    ranges.emplace_back(kAllowed | msg_id);
+  };
+
+  for (size_t i = 0; i < words.size(); ++i) {
+    const uint32_t word = words[i];
+    const bool has_next_word = i < words.size() - 1;
+    const uint32_t opcode = word & 0x7u;
+    const uint32_t field_id = word >> 3;
+
+    if (field_id == 0 && opcode != kFilterOpcode_EndOfMessage) {
+      PERFETTO_DLOG("bytecode error @ word %zu, invalid field id (0)", i);
+      return false;
+    }
+
+    if (opcode == kFilterOpcode_SimpleField ||
+        opcode == kFilterOpcode_NestedField) {
+      // Field words are organized as follow:
+      // MSB: 1 if allowed, 0 if not allowed.
+      // Remaining bits:
+      //   Message index in the case of nested (non-simple) messages.
+      //   0x7f..f in the case of simple messages.
+      uint32_t msg_id;
+      if (opcode == kFilterOpcode_SimpleField) {
+        msg_id = kSimpleField;
+      } else {  // FILTER_OPCODE_NESTED_FIELD
+        // The next word in the bytecode contains the message index.
+        if (!has_next_word) {
+          PERFETTO_DLOG("bytecode error @ word %zu: unterminated nested field",
+                        i);
+          return false;
+        }
+        msg_id = words[++i];
+        max_msg_index = std::max(max_msg_index, msg_id);
+      }
+
+      if (field_id < kDirectlyIndexLimit) {
+        add_directly_indexed_field(field_id, msg_id);
+      } else {
+        // In the case of a large field id (rare) we waste an extra word and
+        // represent it as a range. Doesn't make sense to introduce extra
+        // complexity to deal with rare cases like this.
+        add_range(field_id, field_id + 1, msg_id);
+      }
+    } else if (opcode == kFilterOpcode_SimpleFieldRange) {
+      if (!has_next_word) {
+        PERFETTO_DLOG("bytecode error @ word %zu: unterminated range", i);
+        return false;
+      }
+      const uint32_t range_len = words[++i];
+      const uint32_t range_end = field_id + range_len;  // STL-style, excl.
+      uint32_t id = field_id;
+
+      // Here's the subtle complexity: at the bytecode level, we don't know
+      // anything about the kDirectlyIndexLimit. It is legit to define a range
+      // that spans across the direct-indexing threshold (e.g. 126-132). In that
+      // case we want to add all the elements < the indexing to the O(1) bucket
+      // and add only the remaining range as a non-indexed range.
+      for (; id < range_end && id < kDirectlyIndexLimit; ++id)
+        add_directly_indexed_field(id, kAllowed | kSimpleField);
+      PERFETTO_DCHECK(id >= kDirectlyIndexLimit || id == range_end);
+      if (id < range_end)
+        add_range(id, range_end, kSimpleField);
+    } else if (opcode == kFilterOpcode_EndOfMessage) {
+      // For each message append:
+      // 1. The "header" word telling how many directly indexed fields there
+      //    are.
+      // 2. The words for the directly indexed fields (id < 128).
+      // 3. The rest of the fields, encoded as ranges.
+      // Also update the |message_offset_| index to remember the word offset for
+      // the current message.
+      message_offset_.emplace_back(static_cast<uint32_t>(words_.size()));
+      words_.emplace_back(static_cast<uint32_t>(direct_indexed_fields.size()));
+      words_.insert(words_.end(), direct_indexed_fields.begin(),
+                    direct_indexed_fields.end());
+      words_.insert(words_.end(), ranges.begin(), ranges.end());
+      direct_indexed_fields.clear();
+      ranges.clear();
+    } else {
+      PERFETTO_DLOG("bytecode error @ word %zu: invalid opcode (%x)", i, word);
+      return false;
+    }
+  }  // (for word in bytecode).
+
+  if (max_msg_index > 0 && max_msg_index >= message_offset_.size()) {
+    PERFETTO_DLOG(
+        "bytecode error: a message index (%u) is out of range "
+        "(num_messages=%zu)",
+        max_msg_index, message_offset_.size());
+    return false;
+  }
+
+  // Add a final entry to |message_offset_| so we can tell where the last
+  // message ends without an extra branch in the Query() hotpath.
+  message_offset_.emplace_back(static_cast<uint32_t>(words_.size()));
+
+  return true;
+}
+
+FilterBytecodeParser::QueryResult FilterBytecodeParser::Query(
+    uint32_t msg_index,
+    uint32_t field_id) {
+  FilterBytecodeParser::QueryResult res{false, 0u};
+  if (static_cast<uint64_t>(msg_index) + 1 >=
+      static_cast<uint64_t>(message_offset_.size())) {
+    return res;
+  }
+  const uint32_t start_offset = message_offset_[msg_index];
+  // These are DCHECKs and not just CHECKS because the |words_| is populated
+  // by the LoadInternal call above. These cannot be violated with a malformed
+  // bytecode.
+  PERFETTO_DCHECK(start_offset < words_.size());
+  const uint32_t* word = &words_[start_offset];
+  const uint32_t end_off = message_offset_[msg_index + 1];
+  const uint32_t* const end = words_.data() + end_off;
+  PERFETTO_DCHECK(end > word && end <= words_.data() + words_.size());
+  const uint32_t num_directly_indexed = *(word++);
+  PERFETTO_DCHECK(num_directly_indexed <= kDirectlyIndexLimit);
+  PERFETTO_DCHECK(word + num_directly_indexed <= end);
+  uint32_t field_state = 0;
+  if (PERFETTO_LIKELY(field_id < num_directly_indexed)) {
+    PERFETTO_DCHECK(&word[field_id] < end);
+    field_state = word[field_id];
+  } else {
+    for (word = word + num_directly_indexed; word + 2 < end;) {
+      const uint32_t range_start = *(word++);
+      const uint32_t range_end = *(word++);
+      const uint32_t range_state = *(word++);
+      if (field_id >= range_start && field_id < range_end) {
+        field_state = range_state;
+        break;
+      }
+    }  // for (word in ranges)
+  }    // if (field_id >= num_directly_indexed)
+
+  res.allowed = (field_state & kAllowed) != 0;
+  res.nested_msg_index = field_state & ~kAllowed;
+  PERFETTO_DCHECK(res.simple_field() ||
+                  res.nested_msg_index < message_offset_.size() - 1);
+  return res;
+}
+
+}  // namespace protozero
+// gen_amalgamated begin source: src/protozero/filtering/message_filter.cc
+// gen_amalgamated begin header: src/protozero/filtering/message_filter.h
+// gen_amalgamated begin header: src/protozero/filtering/message_tokenizer.h
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SRC_PROTOZERO_FILTERING_MESSAGE_TOKENIZER_H_
+#define SRC_PROTOZERO_FILTERING_MESSAGE_TOKENIZER_H_
+
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/base/compiler.h"
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace protozero {
+
+// A helper class for schema-less tokenizing of protobuf messages.
+// This class takes a stream of proto-encoded bytes, pushed one by one in input
+// via Push(octet), and returns a stream of tokens (each Push() call can return
+// 0 or 1 token).
+// A "token" contains metadata about a field, specifically: its ID, its wire
+// type and:
+//  - For varint and fixed32/64 fields: its payload.
+//  - For string and bytes fields: the length of its payload.
+//    In this case the caller is supposed to "eat" those N bytes before calling
+//    Push() again.
+// Note that this class cannot differentiate between a string/bytes field or
+// a submessage, because they are encoded in the same way. The caller is
+// supposed to know whether a field can be recursed into by just keep calling
+// Push() or is a string that should be skipped.
+// This is inline to allow the compiler to see through the Push method and
+// avoid a function call for each byte.
+class MessageTokenizer {
+ public:
+  struct Token {
+    uint32_t field_id;  // 0 == not valid.
+    proto_utils::ProtoWireType type;
+
+    // For kLengthDelimited, |value| represent the length of the payload.
+    uint64_t value;
+
+    inline bool valid() const { return field_id != 0; }
+    bool operator==(const Token& o) const {
+      return field_id == o.field_id && type == o.type && value == o.value;
+    }
+  };
+
+  // Pushes a byte in input and returns a token, only when getting to the last
+  // byte of each field. Specifically:
+  // - For varint and fixed32 fields, the Token is returned after the last byte
+  //   of the numeric payload is pushed.
+  // - For length-delimited fields, this returns after the last byte of the
+  //   length is pushed (i.e. right before the payload starts). The caller is
+  //   expected to either skip the next |value| bytes (in the case of a string
+  //   or bytes fields) or keep calling Push, in the case of a submessage.
+  inline Token Push(uint8_t octet) {
+    using protozero::proto_utils::ProtoWireType;
+
+    // Parsing a fixed32/64 field is the only case where we don't have to do
+    // any varint decoding. This is why this block is before the remaining
+    // switch statement below (all the rest is a varint).
+    if (PERFETTO_UNLIKELY(state_ == kFixedIntValue)) {
+      PERFETTO_DCHECK(fixed_int_bits_ == 32 || fixed_int_bits_ == 64);
+      fixed_int_value_ |= static_cast<uint64_t>(octet) << fixed_int_shift_;
+      fixed_int_shift_ += 8;
+      if (fixed_int_shift_ < fixed_int_bits_)
+        return Token{};  // Intermediate byte of a fixed32/64.
+      auto wire_type = fixed_int_bits_ == 32 ? ProtoWireType::kFixed32
+                                             : ProtoWireType::kFixed64;
+      uint64_t fixed_int_value = fixed_int_value_;
+      fixed_int_value_ = fixed_int_shift_ = fixed_int_bits_ = 0;
+      state_ = kFieldPreamble;
+      return Token{field_id_, wire_type, fixed_int_value};
+    }
+
+    // At this point either we are: (i) parsing a field preamble; (ii) parsing a
+    // varint field paylod; (iii) parsing the length of a length-delimited
+    // field. In all cases, we need to decode a varint before proceeding.
+    varint_ |= static_cast<uint64_t>(octet & 0x7F) << varint_shift_;
+    if (octet & 0x80) {
+      varint_shift_ += 7;
+      if (PERFETTO_UNLIKELY(varint_shift_ >= 64)) {
+        varint_shift_ = 0;
+        state_ = kInvalidVarInt;
+      }
+      return Token{};  // Still parsing a varint.
+    }
+
+    uint64_t varint = varint_;
+    varint_ = 0;
+    varint_shift_ = 0;
+
+    switch (state_) {
+      case kFieldPreamble: {
+        auto field_type = static_cast<uint32_t>(varint & 7u);  // 7 = 0..0111
+        field_id_ = static_cast<uint32_t>(varint >> 3);
+
+        // The field type is legit, now check it's well formed and within
+        // boundaries.
+        if (field_type == static_cast<uint32_t>(ProtoWireType::kVarInt)) {
+          state_ = kVarIntValue;
+        } else if (field_type ==
+                       static_cast<uint32_t>(ProtoWireType::kFixed32) ||
+                   field_type ==
+                       static_cast<uint32_t>(ProtoWireType::kFixed64)) {
+          state_ = kFixedIntValue;
+          fixed_int_shift_ = 0;
+          fixed_int_value_ = 0;
+          fixed_int_bits_ =
+              field_type == static_cast<uint32_t>(ProtoWireType::kFixed32) ? 32
+                                                                           : 64;
+        } else if (field_type ==
+                   static_cast<uint32_t>(ProtoWireType::kLengthDelimited)) {
+          state_ = kLenDelimited;
+        } else {
+          state_ = kInvalidFieldType;
+        }
+        return Token{};
+      }
+
+      case kVarIntValue: {
+        // Return the varint field payload and go back to the next field.
+        state_ = kFieldPreamble;
+        return Token{field_id_, ProtoWireType::kVarInt, varint};
+      }
+
+      case kLenDelimited: {
+        const auto payload_len = varint;
+        if (payload_len > protozero::proto_utils::kMaxMessageLength) {
+          state_ = kMessageTooBig;
+          return Token{};
+        }
+        state_ = kFieldPreamble;
+        // At this point the caller is expected to consume the next
+        // |payload_len| bytes.
+        return Token{field_id_, ProtoWireType::kLengthDelimited, payload_len};
+      }
+
+      case kFixedIntValue:
+        // Unreacheable because of the if before the switch.
+        PERFETTO_DCHECK(false);
+        break;
+
+      // Unrecoverable error states.
+      case kInvalidFieldType:
+      case kMessageTooBig:
+      case kInvalidVarInt:
+        break;
+    }  // switch(state_)
+
+    return Token{};  // Keep GCC happy.
+  }
+
+  // Returns true if the tokenizer FSM has reached quiescence (i.e. if we are
+  // NOT in the middle of parsing a field).
+  bool idle() const {
+    return state_ == kFieldPreamble && varint_shift_ == 0 &&
+           fixed_int_shift_ == 0;
+  }
+
+  // Only for reporting parser errors in the trace.
+  uint32_t state() const { return static_cast<uint32_t>(state_); }
+
+ private:
+  enum State {
+    kFieldPreamble = 0,  // Parsing the varint for the field preamble.
+    kVarIntValue = 1,    // Parsing the payload of a varint field.
+    kFixedIntValue = 2,  // Parsing the payload of a fixed32/64 field.
+    kLenDelimited = 3,   // Parsing the length of a length-delimited field.
+
+    // Unrecoverable error states:
+    kInvalidFieldType = 4,  // Encountered an invalid field type.
+    kMessageTooBig = 5,     // Size of the length delimited message was too big.
+    kInvalidVarInt = 6,     // Varint larger than 64 bits.
+  };
+
+  State state_ = kFieldPreamble;
+  uint32_t field_id_ = 0;
+  uint64_t varint_ = 0;
+  uint32_t varint_shift_ = 0;
+  uint32_t fixed_int_shift_ = 0;
+  uint32_t fixed_int_bits_ = 0;
+  uint64_t fixed_int_value_ = 0;
+};
+
+}  // namespace protozero
+
+#endif  // SRC_PROTOZERO_FILTERING_MESSAGE_TOKENIZER_H_
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SRC_PROTOZERO_FILTERING_MESSAGE_FILTER_H_
+#define SRC_PROTOZERO_FILTERING_MESSAGE_FILTER_H_
+
+#include <stdint.h>
+
+#include <memory>
+#include <string>
+#include <unordered_map>
+
+// gen_amalgamated expanded: #include "src/protozero/filtering/filter_bytecode_parser.h"
+// gen_amalgamated expanded: #include "src/protozero/filtering/message_tokenizer.h"
+
+namespace protozero {
+
+// A class to filter binary-encoded proto messages using an allow-list of field
+// ids, also known as "filter bytecode". The filter determines which fields are
+// allowed to be passed through in output and strips all the other fields.
+// See go/trace-filtering for full design.
+// This class takes in input:
+// 1) The filter bytecode, loaded once via the LoadFilterBytecode() method.
+// 2) A proto-encoded binary message. The message doesn't have to be contiguous,
+//    it can be passed as an array of arbitrarily chunked fragments.
+// The FilterMessage*() method returns in output a proto message, stripping out
+// all unknown fields. If the input is malformed (e.g., unknown proto field wire
+// types, lengths out of bound) the whole filtering failed and the |error| flag
+// of the FilteredMessage object is set to true.
+// The filtering operation is based on rewriting a copy of the message into a
+// self-allocated buffer, which is then returned in the output. The input buffer
+// is NOT altered.
+// Note also that the process of rewriting the protos gets rid of most redundant
+// varint encoding (if present). So even if all fields are allow-listed, the
+// output might NOT be bitwise identical to the input (but it will be
+// semantically equivalent).
+// Furthermore the enable_field_usage_tracking() method allows to keep track of
+// a histogram of allowed / denied fields. It slows down filtering and is
+// intended only on host tools.
+class MessageFilter {
+ public:
+  MessageFilter();
+  ~MessageFilter();
+
+  struct InputSlice {
+    const void* data;
+    size_t len;
+  };
+
+  struct FilteredMessage {
+    FilteredMessage(std::unique_ptr<uint8_t[]> d, size_t s)
+        : data(std::move(d)), size(s) {}
+    std::unique_ptr<uint8_t[]> data;
+    size_t size;  // The used bytes in |data|. This is <= sizeof(data).
+    bool error = false;
+  };
+
+  // Loads the filter bytecode that will be used to filter any subsequent
+  // message. Must be called before the first call to FilterMessage*().
+  // |filter_data| must point to a byte buffer for a proto-encoded ProtoFilter
+  // message (see proto_filter.proto).
+  bool LoadFilterBytecode(const void* filter_data, size_t len);
+
+  // This affects the filter starting point of the subsequent FilterMessage*()
+  // calls. By default the filtering process starts from the message @ index 0,
+  // the root message passed to proto_filter when generating the bytecode
+  // (in typical tracing use-cases, this is perfetto.protos.Trace). However, the
+  // caller (TracingServiceImpl) might want to filter packets from the 2nd level
+  // (perfetto.protos.TracePacket) because the root level is pre-pended after
+  // the fact. This call allows to change the root message for the filter.
+  // The argument |field_ids| is an array of proto field ids and determines the
+  // path to the new root. For instance, in the case of [1,2,3] SetFilterRoot
+  // will identify the sub-message for the field "root.1.2.3" and use that.
+  // In order for this to succeed all the fields in the path must be allowed
+  // in the filter and must be a nested message type.
+  bool SetFilterRoot(const uint32_t* field_ids, size_t num_fields);
+
+  // Takes an input message, fragmented in arbitrary slices, and returns a
+  // filtered message in output.
+  FilteredMessage FilterMessageFragments(const InputSlice*, size_t num_slices);
+
+  // Helper for tests, where the input is a contiguous buffer.
+  FilteredMessage FilterMessage(const void* data, size_t len) {
+    InputSlice slice{data, len};
+    return FilterMessageFragments(&slice, 1);
+  }
+
+  // When enabled returns a map of "field path" to "usage counter".
+  // The key (std::string) is a binary buffer (i.e. NOT an ASCII/UTF-8 string)
+  // which contains a varint for each field. Consider the following:
+  // message Root { Sub1 f1 = 1; };
+  // message Sub1 { Sub2 f2 = 7;}
+  // message Sub2 { string f3 = 5; }
+  // The field .f1.f2.f3 will be encoded as \x01\0x07\x05.
+  // The value is the number of times that field has been encountered. If the
+  // field is not allow-listed in the bytecode (the field is stripped in output)
+  // the count will be negative.
+  void enable_field_usage_tracking(bool x) { track_field_usage_ = x; }
+  const std::unordered_map<std::string, int32_t>& field_usage() const {
+    return field_usage_;
+  }
+
+  // Exposed only for DCHECKS in TracingServiceImpl.
+  uint32_t root_msg_index() { return root_msg_index_; }
+
+ private:
+  // This is called by FilterMessageFragments().
+  // Inlining allows the compiler turn the per-byte call/return into a for loop,
+  // while, at the same time, keeping the code easy to read and reason about.
+  // It gives a 20-25% speedup (265ms vs 215ms for a 25MB trace).
+  void FilterOneByte(uint8_t octet) PERFETTO_ALWAYS_INLINE;
+
+  // No-inline because this is a slowpath (only when usage tracking is enabled).
+  void IncrementCurrentFieldUsage(uint32_t field_id,
+                                  bool allowed) PERFETTO_NO_INLINE;
+
+  // Gets into an error state which swallows all the input and emits no output.
+  void SetUnrecoverableErrorState();
+
+  // We keep track of the the nest of messages in a stack. Each StackState
+  // object corresponds to a level of nesting in the proto message structure.
+  // Every time a new field of type len-delimited that has a corresponding
+  // sub-message in the bytecode is encountered, a new StackState is pushed in
+  // |stack_|. stack_[0] is a sentinel to prevent over-popping without adding
+  // extra branches in the fastpath.
+  // |stack_|. stack_[1] is the state of the root message.
+  struct StackState {
+    uint32_t in_bytes = 0;  // Number of input bytes processed.
+
+    // When |in_bytes| reaches this value, the current state should be popped.
+    // This is set when recursing into nested submessages. This is 0 only for
+    // stack_[0] (we don't know the size of the root message upfront).
+    uint32_t in_bytes_limit = 0;
+
+    // This is set when a len-delimited message is encountered, either a string
+    // or a nested submessage that is NOT allow-listed in the bytecode.
+    // This causes input bytes to be consumed without being parsed from the
+    // input stream. If |passthrough_eaten_bytes| == true, they will be copied
+    // as-is in output (e.g. in the case of an allowed string/bytes field).
+    uint32_t eat_next_bytes = 0;
+
+    // Keeps tracks of the stream_writer output counter (out_.written()) then
+    // the StackState is pushed. This is used to work out, when popping, how
+    // many bytes have been written for the current submessage.
+    uint32_t out_bytes_written_at_start = 0;
+
+    uint32_t field_id = 0;   // The proto field id for the current message.
+    uint32_t msg_index = 0;  // The index of the message filter in the bytecode.
+
+    // This is a pointer to the proto preamble for the current submessage
+    // (it's nullptr for stack_[0] and non-null elsewhere). This will be filled
+    // with the actual size of the message (out_.written() -
+    // |out_bytes_written_at_start|) when finishing (popping) the message.
+    // This must be filled using WriteRedundantVarint(). Note that the
+    // |size_field_len| is variable and depends on the actual length of the
+    // input message. If the output message has roughly the same size of the
+    // input message, the length will not be redundant.
+    // In other words: the length of the field is reserved when the submessage
+    // starts. At that point we know the upper-bound for the output message
+    // (a filtered submessage can be <= the original one, but not >). So we
+    // reserve as many bytes it takes to write the input length in varint.
+    // Then, when the message is finalized and we know the actual output size
+    // we backfill the field.
+    // Consider the example of a submessage where the input size = 130 (>127,
+    // 2 varint bytes) and the output is 120 bytes. The length will be 2 bytes
+    // wide even though could have been encoded with just one byte.
+    uint8_t* size_field = nullptr;
+    uint32_t size_field_len = 0;
+
+    // When true the next |eat_next_bytes| are copied as-is in output.
+    // It seems that keeping this field at the end rather than next to
+    // |eat_next_bytes| makes the filter a little (but measurably) faster.
+    // (likely something related with struct layout vs cache sizes).
+    bool passthrough_eaten_bytes = false;
+  };
+
+  uint32_t out_written() { return static_cast<uint32_t>(out_ - &out_buf_[0]); }
+
+  std::unique_ptr<uint8_t[]> out_buf_;
+  uint8_t* out_ = nullptr;
+  uint8_t* out_end_ = nullptr;
+  uint32_t root_msg_index_ = 0;
+
+  FilterBytecodeParser filter_;
+  MessageTokenizer tokenizer_;
+  std::vector<StackState> stack_;
+
+  bool error_ = false;
+  bool track_field_usage_ = false;
+  std::unordered_map<std::string, int32_t> field_usage_;
+};
+
+}  // namespace protozero
+
+#endif  // SRC_PROTOZERO_FILTERING_MESSAGE_FILTER_H_
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "src/protozero/filtering/message_filter.h"
+
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace protozero {
+
+namespace {
+
+// Inline helpers to append proto fields in output. They are the equivalent of
+// the protozero::Message::AppendXXX() fields but don't require building and
+// maintaining a full protozero::Message object or dealing with scattered
+// output slices.
+// All these functions assume there is enough space in the output buffer, which
+// should be always the case assuming that we don't end up generating more
+// output than input.
+
+inline void AppendVarInt(uint32_t field_id, uint64_t value, uint8_t** out) {
+  *out = proto_utils::WriteVarInt(proto_utils::MakeTagVarInt(field_id), *out);
+  *out = proto_utils::WriteVarInt(value, *out);
+}
+
+// For fixed32 / fixed64.
+template <typename INT_T /* uint32_t | uint64_t*/>
+inline void AppendFixed(uint32_t field_id, INT_T value, uint8_t** out) {
+  *out = proto_utils::WriteVarInt(proto_utils::MakeTagFixed<INT_T>(field_id),
+                                  *out);
+  memcpy(*out, &value, sizeof(value));
+  *out += sizeof(value);
+}
+
+// For length-delimited (string, bytes) fields. Note: this function appends only
+// the proto preamble and the varint field that states the length of the payload
+// not the payload itself.
+// In the case of submessages, the caller needs to re-write the length at the
+// end in the in the returned memory area.
+// The problem here is that, because of filtering, the length of a submessage
+// might be < original length (the original length is still an upper-bound).
+// Returns a pair with: (1) the pointer where the final length should be written
+// into, (2) the length of the size field.
+// The caller must write a redundant varint to match the original size (i.e.
+// needs to use WriteRedundantVarInt()).
+inline std::pair<uint8_t*, uint32_t> AppendLenDelim(uint32_t field_id,
+                                                    uint32_t len,
+                                                    uint8_t** out) {
+  *out = proto_utils::WriteVarInt(proto_utils::MakeTagLengthDelimited(field_id),
+                                  *out);
+  uint8_t* size_field_start = *out;
+  *out = proto_utils::WriteVarInt(len, *out);
+  const size_t size_field_len = static_cast<size_t>(*out - size_field_start);
+  return std::make_pair(size_field_start, size_field_len);
+}
+}  // namespace
+
+MessageFilter::MessageFilter() {
+  // Push a state on the stack for the implicit root message.
+  stack_.emplace_back();
+}
+
+MessageFilter::~MessageFilter() = default;
+
+bool MessageFilter::LoadFilterBytecode(const void* filter_data, size_t len) {
+  return filter_.Load(filter_data, len);
+}
+
+bool MessageFilter::SetFilterRoot(const uint32_t* field_ids,
+                                  size_t num_fields) {
+  uint32_t root_msg_idx = 0;
+  for (const uint32_t* it = field_ids; it < field_ids + num_fields; ++it) {
+    uint32_t field_id = *it;
+    auto res = filter_.Query(root_msg_idx, field_id);
+    if (!res.allowed || res.simple_field())
+      return false;
+    root_msg_idx = res.nested_msg_index;
+  }
+  root_msg_index_ = root_msg_idx;
+  return true;
+}
+
+MessageFilter::FilteredMessage MessageFilter::FilterMessageFragments(
+    const InputSlice* slices,
+    size_t num_slices) {
+  // First compute the upper bound for the output. The filtered message cannot
+  // be > the original message.
+  uint32_t total_len = 0;
+  for (size_t i = 0; i < num_slices; ++i)
+    total_len += slices[i].len;
+  out_buf_.reset(new uint8_t[total_len]);
+  out_ = out_buf_.get();
+  out_end_ = out_ + total_len;
+
+  // Reset the parser state.
+  tokenizer_ = MessageTokenizer();
+  error_ = false;
+  stack_.clear();
+  stack_.resize(2);
+  // stack_[0] is a sentinel and should never be hit in nominal cases. If we
+  // end up there we will just keep consuming the input stream and detecting
+  // at the end, without hurting the fastpath.
+  stack_[0].in_bytes_limit = UINT32_MAX;
+  stack_[0].eat_next_bytes = UINT32_MAX;
+  // stack_[1] is the actual root message.
+  stack_[1].in_bytes_limit = total_len;
+  stack_[1].msg_index = root_msg_index_;
+
+  // Process the input data and write the output.
+  for (size_t slice_idx = 0; slice_idx < num_slices; ++slice_idx) {
+    const InputSlice& slice = slices[slice_idx];
+    const uint8_t* data = static_cast<const uint8_t*>(slice.data);
+    for (size_t i = 0; i < slice.len; ++i)
+      FilterOneByte(data[i]);
+  }
+
+  // Construct the output object.
+  PERFETTO_CHECK(out_ >= out_buf_.get() && out_ <= out_end_);
+  auto used_size = static_cast<size_t>(out_ - out_buf_.get());
+  FilteredMessage res{std::move(out_buf_), used_size};
+  res.error = error_;
+  if (stack_.size() != 1 || !tokenizer_.idle() ||
+      stack_[0].in_bytes != total_len) {
+    res.error = true;
+  }
+  return res;
+}
+
+void MessageFilter::FilterOneByte(uint8_t octet) {
+  PERFETTO_DCHECK(!stack_.empty());
+
+  auto* state = &stack_.back();
+  StackState next_state{};
+  bool push_next_state = false;
+
+  if (state->eat_next_bytes > 0) {
+    // This is the case where the previous tokenizer_.Push() call returned a
+    // length delimited message which is NOT a submessage (a string or a bytes
+    // field). We just want to consume it, and pass it through in output
+    // if the field was allowed.
+    --state->eat_next_bytes;
+    if (state->passthrough_eaten_bytes)
+      *(out_++) = octet;
+  } else {
+    MessageTokenizer::Token token = tokenizer_.Push(octet);
+    // |token| will not be valid() in most cases and this is WAI. When pushing
+    // a varint field, only the last byte yields a token, all the other bytes
+    // return an invalid token, they just update the internal tokenizer state.
+    if (token.valid()) {
+      auto filter = filter_.Query(state->msg_index, token.field_id);
+      switch (token.type) {
+        case proto_utils::ProtoWireType::kVarInt:
+          if (filter.allowed && filter.simple_field())
+            AppendVarInt(token.field_id, token.value, &out_);
+          break;
+        case proto_utils::ProtoWireType::kFixed32:
+          if (filter.allowed && filter.simple_field())
+            AppendFixed(token.field_id, static_cast<uint32_t>(token.value),
+                        &out_);
+          break;
+        case proto_utils::ProtoWireType::kFixed64:
+          if (filter.allowed && filter.simple_field())
+            AppendFixed(token.field_id, static_cast<uint64_t>(token.value),
+                        &out_);
+          break;
+        case proto_utils::ProtoWireType::kLengthDelimited:
+          // Here we have two cases:
+          // A. A simple string/bytes field: we just want to consume the next
+          //    bytes (the string payload), optionally passing them through in
+          //    output if the field is allowed.
+          // B. This is a nested submessage. In this case we want to recurse and
+          //    push a new state on the stack.
+          // Note that we can't tell the difference between a
+          // "non-allowed string" and a "non-allowed submessage". But it doesn't
+          // matter because in both cases we just want to skip the next N bytes.
+          const auto submessage_len = static_cast<uint32_t>(token.value);
+          auto in_bytes_left = state->in_bytes_limit - state->in_bytes - 1;
+          if (PERFETTO_UNLIKELY(submessage_len > in_bytes_left)) {
+            // This is a malicious / malformed string/bytes/submessage that
+            // claims to be larger than the outer message that contains it.
+            return SetUnrecoverableErrorState();
+          }
+
+          if (filter.allowed && !filter.simple_field() && submessage_len > 0) {
+            // submessage_len == 0 is the edge case of a message with a 0-len
+            // (but present) submessage. In this case, if allowed, we don't want
+            // to push any further state (doing so would desync the FSM) but we
+            // still want to emit it.
+            // At this point |submessage_len| is only an upper bound. The
+            // final message written in output can be <= the one in input,
+            // only some of its fields might be allowed (also remember that
+            // this class implicitly removes redundancy varint encoding of
+            // len-delimited field lengths). The final length varint (the
+            // return value of AppendLenDelim()) will be filled when popping
+            // from |stack_|.
+            auto size_field =
+                AppendLenDelim(token.field_id, submessage_len, &out_);
+            push_next_state = true;
+            next_state.field_id = token.field_id;
+            next_state.msg_index = filter.nested_msg_index;
+            next_state.in_bytes_limit = submessage_len;
+            next_state.size_field = size_field.first;
+            next_state.size_field_len = size_field.second;
+            next_state.out_bytes_written_at_start = out_written();
+          } else {
+            // A string or bytes field, or a 0 length submessage.
+            state->eat_next_bytes = submessage_len;
+            state->passthrough_eaten_bytes = filter.allowed;
+            if (filter.allowed)
+              AppendLenDelim(token.field_id, submessage_len, &out_);
+          }
+          break;
+      }  // switch(type)
+
+      if (PERFETTO_UNLIKELY(track_field_usage_)) {
+        IncrementCurrentFieldUsage(token.field_id, filter.allowed);
+      }
+    }  // if (token.valid)
+  }    // if (eat_next_bytes == 0)
+
+  ++state->in_bytes;
+  while (state->in_bytes >= state->in_bytes_limit) {
+    PERFETTO_DCHECK(state->in_bytes == state->in_bytes_limit);
+    push_next_state = false;
+
+    // We can't possibly write more than we read.
+    const uint32_t msg_bytes_written = static_cast<uint32_t>(
+        out_written() - state->out_bytes_written_at_start);
+    PERFETTO_DCHECK(msg_bytes_written <= state->in_bytes_limit);
+
+    // Backfill the length field of the
+    proto_utils::WriteRedundantVarInt(msg_bytes_written, state->size_field,
+                                      state->size_field_len);
+
+    const uint32_t in_bytes_processes_for_last_msg = state->in_bytes;
+    stack_.pop_back();
+    PERFETTO_CHECK(!stack_.empty());
+    state = &stack_.back();
+    state->in_bytes += in_bytes_processes_for_last_msg;
+    if (PERFETTO_UNLIKELY(!tokenizer_.idle())) {
+      // If we hit this case, it means that we got to the end of a submessage
+      // while decoding a field. We can't recover from this and we don't want to
+      // propagate a broken sub-message.
+      return SetUnrecoverableErrorState();
+    }
+  }
+
+  if (push_next_state) {
+    PERFETTO_DCHECK(tokenizer_.idle());
+    stack_.emplace_back(std::move(next_state));
+    state = &stack_.back();
+  }
+}
+
+void MessageFilter::SetUnrecoverableErrorState() {
+  error_ = true;
+  stack_.clear();
+  stack_.resize(1);
+  auto& state = stack_[0];
+  state.eat_next_bytes = UINT32_MAX;
+  state.in_bytes_limit = UINT32_MAX;
+  state.passthrough_eaten_bytes = false;
+  out_ = out_buf_.get();  // Reset the write pointer.
+}
+
+void MessageFilter::IncrementCurrentFieldUsage(uint32_t field_id,
+                                               bool allowed) {
+  // Slowpath. Used mainly in offline tools and tests to workout used fields in
+  // a proto.
+  PERFETTO_DCHECK(track_field_usage_);
+
+  // Field path contains a concatenation of varints, one for each nesting level.
+  // e.g. y in message Root { Sub x = 2; }; message Sub { SubSub y = 7; }
+  // is encoded as [varint(2) + varint(7)].
+  // We use varint to take the most out of SSO (small string opt). In most cases
+  // the path will fit in the on-stack 22 bytes, requiring no heap.
+  std::string field_path;
+
+  auto append_field_id = [&field_path](uint32_t id) {
+    uint8_t buf[10];
+    uint8_t* end = proto_utils::WriteVarInt(id, buf);
+    field_path.append(reinterpret_cast<char*>(buf),
+                      static_cast<size_t>(end - buf));
+  };
+
+  // Append all the ancestors IDs from the state stack.
+  // The first entry of the stack has always ID 0 and we skip it (we don't know
+  // the ID of the root message itself).
+  PERFETTO_DCHECK(stack_.size() >= 2 && stack_[1].field_id == 0);
+  for (size_t i = 2; i < stack_.size(); ++i)
+    append_field_id(stack_[i].field_id);
+  // Append the id of the field in the current message.
+  append_field_id(field_id);
+  field_usage_[field_path] += allowed ? 1 : -1;
+}
+
+}  // namespace protozero
+// gen_amalgamated begin source: src/tracing/core/metatrace_writer.cc
+// gen_amalgamated begin header: src/tracing/core/metatrace_writer.h
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SRC_TRACING_CORE_METATRACE_WRITER_H_
+#define SRC_TRACING_CORE_METATRACE_WRITER_H_
+
+#include <functional>
+#include <memory>
+
+// gen_amalgamated expanded: #include "perfetto/ext/base/metatrace.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/thread_checker.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/weak_ptr.h"
+
+namespace perfetto {
+
+namespace base {
+class TaskRunner;
+}
+
+class TraceWriter;
+
+// Complements the base::metatrace infrastructure.
+// It hooks a callback to metatrace::Enable() and writes metatrace events into
+// a TraceWriter whenever the metatrace ring buffer is half full.
+// It is safe to create and attempt to start multiple instances of this class,
+// however only the first one will succeed because the metatrace framework
+// doesn't support multiple instances.
+// This class is defined here (instead of directly in src/probes/) so it can
+// be reused by other components (e.g. heapprofd).
+class MetatraceWriter {
+ public:
+  static constexpr char kDataSourceName[] = "perfetto.metatrace";
+
+  MetatraceWriter();
+  ~MetatraceWriter();
+
+  MetatraceWriter(const MetatraceWriter&) = delete;
+  MetatraceWriter& operator=(const MetatraceWriter&) = delete;
+  MetatraceWriter(MetatraceWriter&&) = delete;
+  MetatraceWriter& operator=(MetatraceWriter&&) = delete;
+
+  void Enable(base::TaskRunner*, std::unique_ptr<TraceWriter>, uint32_t tags);
+  void Disable();
+  void WriteAllAndFlushTraceWriter(std::function<void()> callback);
+
+ private:
+  void WriteAllAvailableEvents();
+
+  bool started_ = false;
+  base::TaskRunner* task_runner_ = nullptr;
+  std::unique_ptr<TraceWriter> trace_writer_;
+  PERFETTO_THREAD_CHECKER(thread_checker_)
+  base::WeakPtrFactory<MetatraceWriter> weak_ptr_factory_;  // Keep last.
+};
+
+}  // namespace perfetto
+
+#endif  // SRC_TRACING_CORE_METATRACE_WRITER_H_
+// gen_amalgamated begin header: gen/protos/perfetto/trace/perfetto/perfetto_metatrace.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_PERFETTO_PERFETTO_METATRACE_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_PERFETTO_PERFETTO_METATRACE_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class PerfettoMetatrace_Arg;
+
+class PerfettoMetatrace_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/9, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  PerfettoMetatrace_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit PerfettoMetatrace_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit PerfettoMetatrace_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_event_id() const { return at<1>().valid(); }
+  uint32_t event_id() const { return at<1>().as_uint32(); }
+  bool has_counter_id() const { return at<2>().valid(); }
+  uint32_t counter_id() const { return at<2>().as_uint32(); }
+  bool has_event_name() const { return at<8>().valid(); }
+  ::protozero::ConstChars event_name() const { return at<8>().as_string(); }
+  bool has_counter_name() const { return at<9>().valid(); }
+  ::protozero::ConstChars counter_name() const { return at<9>().as_string(); }
+  bool has_event_duration_ns() const { return at<3>().valid(); }
+  uint32_t event_duration_ns() const { return at<3>().as_uint32(); }
+  bool has_counter_value() const { return at<4>().valid(); }
+  int32_t counter_value() const { return at<4>().as_int32(); }
+  bool has_thread_id() const { return at<5>().valid(); }
+  uint32_t thread_id() const { return at<5>().as_uint32(); }
+  bool has_has_overruns() const { return at<6>().valid(); }
+  bool has_overruns() const { return at<6>().as_bool(); }
+  bool has_args() const { return at<7>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> args() const { return GetRepeated<::protozero::ConstBytes>(7); }
+};
+
+class PerfettoMetatrace : public ::protozero::Message {
+ public:
+  using Decoder = PerfettoMetatrace_Decoder;
+  enum : int32_t {
+    kEventIdFieldNumber = 1,
+    kCounterIdFieldNumber = 2,
+    kEventNameFieldNumber = 8,
+    kCounterNameFieldNumber = 9,
+    kEventDurationNsFieldNumber = 3,
+    kCounterValueFieldNumber = 4,
+    kThreadIdFieldNumber = 5,
+    kHasOverrunsFieldNumber = 6,
+    kArgsFieldNumber = 7,
+  };
+  using Arg = ::perfetto::protos::pbzero::PerfettoMetatrace_Arg;
+
+  using FieldMetadata_EventId =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      PerfettoMetatrace>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_EventId kEventId() { return {}; }
+  void set_event_id(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_EventId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CounterId =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      PerfettoMetatrace>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CounterId kCounterId() { return {}; }
+  void set_counter_id(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CounterId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_EventName =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      PerfettoMetatrace>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_EventName kEventName() { return {}; }
+  void set_event_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_EventName::kFieldId, data, size);
+  }
+  void set_event_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_EventName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CounterName =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      PerfettoMetatrace>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CounterName kCounterName() { return {}; }
+  void set_counter_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_CounterName::kFieldId, data, size);
+  }
+  void set_counter_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_CounterName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_EventDurationNs =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      PerfettoMetatrace>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_EventDurationNs kEventDurationNs() { return {}; }
+  void set_event_duration_ns(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_EventDurationNs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CounterValue =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      PerfettoMetatrace>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CounterValue kCounterValue() { return {}; }
+  void set_counter_value(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CounterValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ThreadId =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      PerfettoMetatrace>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ThreadId kThreadId() { return {}; }
+  void set_thread_id(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ThreadId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_HasOverruns =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      PerfettoMetatrace>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_HasOverruns kHasOverruns() { return {}; }
+  void set_has_overruns(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_HasOverruns::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Args =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      PerfettoMetatrace_Arg,
+      PerfettoMetatrace>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Args kArgs() { return {}; }
+  template <typename T = PerfettoMetatrace_Arg> T* add_args() {
+    return BeginNestedMessage<T>(7);
+  }
+
+};
+
+class PerfettoMetatrace_Arg_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  PerfettoMetatrace_Arg_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit PerfettoMetatrace_Arg_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit PerfettoMetatrace_Arg_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_key() const { return at<1>().valid(); }
+  ::protozero::ConstChars key() const { return at<1>().as_string(); }
+  bool has_value() const { return at<2>().valid(); }
+  ::protozero::ConstChars value() const { return at<2>().as_string(); }
+};
+
+class PerfettoMetatrace_Arg : public ::protozero::Message {
+ public:
+  using Decoder = PerfettoMetatrace_Arg_Decoder;
+  enum : int32_t {
+    kKeyFieldNumber = 1,
+    kValueFieldNumber = 2,
+  };
+
+  using FieldMetadata_Key =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      PerfettoMetatrace_Arg>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Key kKey() { return {}; }
+  void set_key(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Key::kFieldId, data, size);
+  }
+  void set_key(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Key::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Value =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      PerfettoMetatrace_Arg>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Value kValue() { return {}; }
+  void set_value(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Value::kFieldId, data, size);
+  }
+  void set_value(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Value::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "src/tracing/core/metatrace_writer.h"
+
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_writer.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/core/data_source_descriptor.h"
+
+// gen_amalgamated expanded: #include "protos/perfetto/trace/perfetto/perfetto_metatrace.pbzero.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/trace_packet.pbzero.h"
+
+namespace perfetto {
+
+// static
+constexpr char MetatraceWriter::kDataSourceName[];
+
+MetatraceWriter::MetatraceWriter() : weak_ptr_factory_(this) {}
+
+MetatraceWriter::~MetatraceWriter() {
+  Disable();
+}
+
+void MetatraceWriter::Enable(base::TaskRunner* task_runner,
+                             std::unique_ptr<TraceWriter> trace_writer,
+                             uint32_t tags) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  if (started_) {
+    PERFETTO_DFATAL_OR_ELOG("Metatrace already started from this instance");
+    return;
+  }
+  task_runner_ = task_runner;
+  trace_writer_ = std::move(trace_writer);
+  auto weak_ptr = weak_ptr_factory_.GetWeakPtr();
+  bool enabled = metatrace::Enable(
+      [weak_ptr] {
+        if (weak_ptr)
+          weak_ptr->WriteAllAvailableEvents();
+      },
+      task_runner, tags);
+  if (!enabled)
+    return;
+  started_ = true;
+}
+
+void MetatraceWriter::Disable() {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  if (!started_)
+    return;
+  metatrace::Disable();
+  started_ = false;
+  trace_writer_.reset();
+}
+
+void MetatraceWriter::WriteAllAvailableEvents() {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  if (!started_)
+    return;
+  for (auto it = metatrace::RingBuffer::GetReadIterator(); it; ++it) {
+    auto type_and_id = it->type_and_id.load(std::memory_order_acquire);
+    if (type_and_id == 0)
+      break;  // Stop at the first incomplete event.
+
+    auto packet = trace_writer_->NewTracePacket();
+    packet->set_timestamp(it->timestamp_ns());
+    auto* evt = packet->set_perfetto_metatrace();
+    uint16_t type = type_and_id & metatrace::Record::kTypeMask;
+    uint16_t id = type_and_id & ~metatrace::Record::kTypeMask;
+    if (type == metatrace::Record::kTypeCounter) {
+      evt->set_counter_id(id);
+      evt->set_counter_value(it->counter_value);
+    } else {
+      evt->set_event_id(id);
+      evt->set_event_duration_ns(it->duration_ns);
+    }
+
+    evt->set_thread_id(static_cast<uint32_t>(it->thread_id));
+
+    if (metatrace::RingBuffer::has_overruns())
+      evt->set_has_overruns(true);
+  }
+  // The |it| destructor will automatically update the read index position in
+  // the meta-trace ring buffer.
+}
+
+void MetatraceWriter::WriteAllAndFlushTraceWriter(
+    std::function<void()> callback) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  if (!started_)
+    return;
+  WriteAllAvailableEvents();
+  trace_writer_->Flush(std::move(callback));
+}
+
+}  // namespace perfetto
+// gen_amalgamated begin source: src/tracing/core/packet_stream_validator.cc
+// gen_amalgamated begin header: src/tracing/core/packet_stream_validator.h
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SRC_TRACING_CORE_PACKET_STREAM_VALIDATOR_H_
+#define SRC_TRACING_CORE_PACKET_STREAM_VALIDATOR_H_
+
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/slice.h"
+
+namespace perfetto {
+
+// Checks that the stream of trace packets sent by the producer is well formed.
+// This includes:
+//
+// - Checking that the packets are not truncated.
+// - There are no dangling bytes left over in the packets.
+// - Any trusted fields (e.g., uid) are not set.
+//
+// Note that we only validate top-level fields in the trace proto; sub-messages
+// are simply skipped.
+class PacketStreamValidator {
+ public:
+  PacketStreamValidator() = delete;
+
+  static bool Validate(const Slices&);
+};
+
+}  // namespace perfetto
+
+#endif  // SRC_TRACING_CORE_PACKET_STREAM_VALIDATOR_H_
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "src/tracing/core/packet_stream_validator.h"
+
+#include <inttypes.h>
+#include <stddef.h>
+
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+// gen_amalgamated expanded: #include "protos/perfetto/trace/trace_packet.pbzero.h"
+
+namespace perfetto {
+
+namespace {
+
+using protozero::proto_utils::ProtoWireType;
+
+const uint32_t kReservedFieldIds[] = {
+    protos::pbzero::TracePacket::kTrustedUidFieldNumber,
+    protos::pbzero::TracePacket::kTrustedPacketSequenceIdFieldNumber,
+    protos::pbzero::TracePacket::kTraceConfigFieldNumber,
+    protos::pbzero::TracePacket::kTraceStatsFieldNumber,
+    protos::pbzero::TracePacket::kCompressedPacketsFieldNumber,
+    protos::pbzero::TracePacket::kSynchronizationMarkerFieldNumber,
+};
+
+// This translation unit is quite subtle and perf-sensitive. Remember to check
+// BM_PacketStreamValidator in perfetto_benchmarks when making changes.
+
+// Checks that a packet, spread over several slices, is well-formed and doesn't
+// contain reserved top-level fields.
+// The checking logic is based on a state-machine that skips the fields' payload
+// and operates as follows:
+//              +-------------------------------+ <-------------------------+
+// +----------> | Read field preamble (varint)  | <----------------------+  |
+// |            +-------------------------------+                        |  |
+// |              |              |            |                          |  |
+// |       <Varint>        <Fixed 32/64>     <Length-delimited field>    |  |
+// |          V                  |                      V                |  |
+// |  +------------------+       |               +--------------+        |  |
+// |  | Read field value |       |               | Read length  |        |  |
+// |  | (another varint) |       |               |   (varint)   |        |  |
+// |  +------------------+       |               +--------------+        |  |
+// |           |                 V                      V                |  |
+// +-----------+        +----------------+     +-----------------+       |  |
+//                      | Skip 4/8 Bytes |     | Skip $len Bytes |-------+  |
+//                      +----------------+     +-----------------+          |
+//                               |                                          |
+//                               +------------------------------------------+
+class ProtoFieldParserFSM {
+ public:
+  // This method effectively continuously parses varints (either for the field
+  // preamble or the payload or the submessage length) and tells the caller
+  // (the Validate() method) how many bytes to skip until the next field.
+  size_t Push(uint8_t octet) {
+    varint_ |= static_cast<uint64_t>(octet & 0x7F) << varint_shift_;
+    if (octet & 0x80) {
+      varint_shift_ += 7;
+      if (varint_shift_ >= 64) {
+        // Do not invoke UB on next call.
+        varint_shift_ = 0;
+        state_ = kInvalidVarInt;
+      }
+      return 0;
+    }
+    uint64_t varint = varint_;
+    varint_ = 0;
+    varint_shift_ = 0;
+
+    switch (state_) {
+      case kFieldPreamble: {
+        uint64_t field_type = varint & 7;  // 7 = 0..0111
+        auto field_id = static_cast<uint32_t>(varint >> 3);
+        // Check if the field id is reserved, go into an error state if it is.
+        for (size_t i = 0; i < base::ArraySize(kReservedFieldIds); ++i) {
+          if (field_id == kReservedFieldIds[i]) {
+            state_ = kWroteReservedField;
+            return 0;
+          }
+        }
+        // The field type is legit, now check it's well formed and within
+        // boundaries.
+        if (field_type == static_cast<uint64_t>(ProtoWireType::kVarInt)) {
+          state_ = kVarIntValue;
+        } else if (field_type ==
+                   static_cast<uint64_t>(ProtoWireType::kFixed32)) {
+          return 4;
+        } else if (field_type ==
+                   static_cast<uint64_t>(ProtoWireType::kFixed64)) {
+          return 8;
+        } else if (field_type ==
+                   static_cast<uint64_t>(ProtoWireType::kLengthDelimited)) {
+          state_ = kLenDelimitedLen;
+        } else {
+          state_ = kUnknownFieldType;
+        }
+        return 0;
+      }
+
+      case kVarIntValue: {
+        // Consume the int field payload and go back to the next field.
+        state_ = kFieldPreamble;
+        return 0;
+      }
+
+      case kLenDelimitedLen: {
+        if (varint > protozero::proto_utils::kMaxMessageLength) {
+          state_ = kMessageTooBig;
+          return 0;
+        }
+        state_ = kFieldPreamble;
+        return static_cast<size_t>(varint);
+      }
+
+      case kWroteReservedField:
+      case kUnknownFieldType:
+      case kMessageTooBig:
+      case kInvalidVarInt:
+        // Persistent error states.
+        return 0;
+
+    }          // switch(state_)
+    return 0;  // To keep GCC happy.
+  }
+
+  // Queried at the end of the all payload. A message is well-formed only
+  // if the FSM is back to the state where it should parse the next field and
+  // hasn't started parsing any preamble.
+  bool valid() const { return state_ == kFieldPreamble && varint_shift_ == 0; }
+  int state() const { return static_cast<int>(state_); }
+
+ private:
+  enum State {
+    kFieldPreamble = 0,  // Parsing the varint for the field preamble.
+    kVarIntValue,        // Parsing the varint value for the field payload.
+    kLenDelimitedLen,    // Parsing the length of the length-delimited field.
+
+    // Error states:
+    kWroteReservedField,  // Tried to set a reserved field id.
+    kUnknownFieldType,    // Encountered an invalid field type.
+    kMessageTooBig,       // Size of the length delimited message was too big.
+    kInvalidVarInt,       // VarInt larger than 64 bits.
+  };
+
+  State state_ = kFieldPreamble;
+  uint64_t varint_ = 0;
+  uint32_t varint_shift_ = 0;
+};
+
+}  // namespace
+
+// static
+bool PacketStreamValidator::Validate(const Slices& slices) {
+  ProtoFieldParserFSM parser;
+  size_t skip_bytes = 0;
+  for (const Slice& slice : slices) {
+    for (size_t i = 0; i < slice.size;) {
+      const size_t skip_bytes_cur_slice = std::min(skip_bytes, slice.size - i);
+      if (skip_bytes_cur_slice > 0) {
+        i += skip_bytes_cur_slice;
+        skip_bytes -= skip_bytes_cur_slice;
+      } else {
+        uint8_t octet = *(reinterpret_cast<const uint8_t*>(slice.start) + i);
+        skip_bytes = parser.Push(octet);
+        i++;
+      }
+    }
+  }
+  if (skip_bytes == 0 && parser.valid())
+    return true;
+
+  PERFETTO_DLOG("Packet validation error (state %d, skip = %zu)",
+                parser.state(), skip_bytes);
+  return false;
+}
+
+}  // namespace perfetto
+// gen_amalgamated begin source: src/tracing/core/trace_buffer.cc
+// gen_amalgamated begin header: src/tracing/core/trace_buffer.h
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SRC_TRACING_CORE_TRACE_BUFFER_H_
+#define SRC_TRACING_CORE_TRACE_BUFFER_H_
+
+#include <stdint.h>
+#include <string.h>
+
+#include <array>
+#include <limits>
+#include <map>
+#include <tuple>
+
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/paged_memory.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/thread_annotations.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/basic_types.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/slice.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_stats.h"
+
+namespace perfetto {
+
+class TracePacket;
+
+// The main buffer, owned by the tracing service, where all the trace data is
+// ultimately stored into. The service will own several instances of this class,
+// at least one per active consumer (as defined in the |buffers| section of
+// trace_config.proto) and will copy chunks from the producer's shared memory
+// buffers into here when a CommitData IPC is received.
+//
+// Writing into the buffer
+// -----------------------
+// Data is copied from the SMB(s) using CopyChunkUntrusted(). The buffer will
+// hence contain data coming from different producers and different writer
+// sequences, more specifically:
+// - The service receives data by several producer(s), identified by their ID.
+// - Each producer writes several sequences identified by the same WriterID.
+//   (they correspond to TraceWriter instances in the producer).
+// - Each Writer writes, in order, several chunks.
+// - Each chunk contains zero, one, or more TracePacket(s), or even just
+//   fragments of packets (when they span across several chunks).
+//
+// So at any point in time, the buffer will contain a variable number of logical
+// sequences identified by the {ProducerID, WriterID} tuple. Any given chunk
+// will only contain packets (or fragments) belonging to the same sequence.
+//
+// The buffer operates by default as a ring buffer.
+// It has two overwrite policies:
+//  1. kOverwrite (default): if the write pointer reaches the read pointer, old
+//     unread chunks will be overwritten by new chunks.
+//  2. kDiscard: if the write pointer reaches the read pointer, unread chunks
+//     are preserved and the new chunks are discarded. Any future write becomes
+//     a no-op, even if the reader manages to fully catch up. This is because
+//     once a chunk is discarded, the sequence of packets is broken and trying
+//     to recover would be too hard (also due to the fact that, at the same
+//     time, we allow out-of-order commits and chunk re-writes).
+//
+// Chunks are (over)written in the same order of the CopyChunkUntrusted() calls.
+// When overwriting old content, entire chunks are overwritten or clobbered.
+// The buffer never leaves a partial chunk around. Chunks' payload is copied
+// as-is, but their header is not and is repacked in order to keep the
+// ProducerID around.
+//
+// Chunks are stored in the buffer next to each other. Each chunk is prefixed by
+// an inline header (ChunkRecord), which contains most of the fields of the
+// SharedMemoryABI ChunkHeader + the ProducerID + the size of the payload.
+// It's a conventional binary object stream essentially, where each ChunkRecord
+// tells where it ends and hence where to find the next one, like this:
+//
+//          .-------------------------. 16 byte boundary
+//          | ChunkRecord:   16 bytes |
+//          | - chunk id:     4 bytes |
+//          | - producer id:  2 bytes |
+//          | - writer id:    2 bytes |
+//          | - #fragments:   2 bytes |
+//    +-----+ - record size:  2 bytes |
+//    |     | - flags+pad:    4 bytes |
+//    |     +-------------------------+
+//    |     |                         |
+//    |     :     Chunk payload       :
+//    |     |                         |
+//    |     +-------------------------+
+//    |     |    Optional padding     |
+//    +---> +-------------------------+ 16 byte boundary
+//          |      ChunkRecord        |
+//          :                         :
+// Chunks stored in the buffer are always rounded up to 16 bytes (that is
+// sizeof(ChunkRecord)), in order to avoid further inner fragmentation.
+// Special "padding" chunks can be put in the buffer, e.g. in the case when we
+// try to write a chunk of size N while the write pointer is at the end of the
+// buffer, but the write pointer is < N bytes from the end (and hence needs to
+// wrap over).
+// Because of this, the buffer is self-describing: the contents of the buffer
+// can be reconstructed by just looking at the buffer content (this will be
+// quite useful in future to recover the buffer from crash reports).
+//
+// However, in order to keep some operations (patching and reading) fast, a
+// lookaside index is maintained (in |index_|), keeping each chunk in the buffer
+// indexed by their {ProducerID, WriterID, ChunkID} tuple.
+//
+// Patching data out-of-band
+// -------------------------
+// This buffer also supports patching chunks' payload out-of-band, after they
+// have been stored. This is to allow producers to backfill the "size" fields
+// of the protos that spawn across several chunks, when the previous chunks are
+// returned to the service. The MaybePatchChunkContents() deals with the fact
+// that a chunk might have been lost (because of wrapping) by the time the OOB
+// IPC comes.
+//
+// Reading from the buffer
+// -----------------------
+// This class supports one reader only (the consumer). Reads are NOT idempotent
+// as they move the read cursors around. Reading back the buffer is the most
+// conceptually complex part. The ReadNextTracePacket() method operates with
+// whole packet granularity. Packets are returned only when all their fragments
+// are available.
+// This class takes care of:
+// - Gluing packets within the same sequence, even if they are not stored
+//   adjacently in the buffer.
+// - Re-ordering chunks within a sequence (using the ChunkID, which wraps).
+// - Detecting holes in packet fragments (because of loss of chunks).
+// Reads guarantee that packets for the same sequence are read in FIFO order
+// (according to their ChunkID), but don't give any guarantee about the read
+// order of packets from different sequences, see comments in
+// ReadNextTracePacket() below.
+class TraceBuffer {
+ public:
+  static const size_t InlineChunkHeaderSize;  // For test/fake_packet.{cc,h}.
+
+  // See comment in the header above.
+  enum OverwritePolicy { kOverwrite, kDiscard };
+
+  // Argument for out-of-band patches applied through TryPatchChunkContents().
+  struct Patch {
+    // From SharedMemoryABI::kPacketHeaderSize.
+    static constexpr size_t kSize = 4;
+
+    size_t offset_untrusted;
+    std::array<uint8_t, kSize> data;
+  };
+
+  // Identifiers that are constant for a packet sequence.
+  struct PacketSequenceProperties {
+    ProducerID producer_id_trusted;
+    uid_t producer_uid_trusted;
+    WriterID writer_id;
+  };
+
+  // Can return nullptr if the memory allocation fails.
+  static std::unique_ptr<TraceBuffer> Create(size_t size_in_bytes,
+                                             OverwritePolicy = kOverwrite);
+
+  ~TraceBuffer();
+
+  // Copies a Chunk from a producer Shared Memory Buffer into the trace buffer.
+  // |src| points to the first packet in the SharedMemoryABI's chunk shared with
+  // an untrusted producer. "untrusted" here means: the producer might be
+  // malicious and might change |src| concurrently while we read it (internally
+  // this method memcpy()-s first the chunk before processing it). None of the
+  // arguments should be trusted, unless otherwise stated. We can trust that
+  // |src| points to a valid memory area, but not its contents.
+  //
+  // This method may be called multiple times for the same chunk. In this case,
+  // the original chunk's payload will be overridden and its number of fragments
+  // and flags adjusted to match |num_fragments| and |chunk_flags|. The service
+  // may use this to insert partial chunks (|chunk_complete = false|) before the
+  // producer has committed them.
+  //
+  // If |chunk_complete| is |false|, the TraceBuffer will only consider the
+  // first |num_fragments - 1| packets to be complete, since the producer may
+  // not have finished writing the latest packet. Reading from a sequence will
+  // also not progress past any incomplete chunks until they were rewritten with
+  // |chunk_complete = true|, e.g. after a producer's commit.
+  //
+  // TODO(eseckler): Pass in a PacketStreamProperties instead of individual IDs.
+  void CopyChunkUntrusted(ProducerID producer_id_trusted,
+                          uid_t producer_uid_trusted,
+                          WriterID writer_id,
+                          ChunkID chunk_id,
+                          uint16_t num_fragments,
+                          uint8_t chunk_flags,
+                          bool chunk_complete,
+                          const uint8_t* src,
+                          size_t size);
+  // Applies a batch of |patches| to the given chunk, if the given chunk is
+  // still in the buffer. Does nothing if the given ChunkID is gone.
+  // Returns true if the chunk has been found and patched, false otherwise.
+  // |other_patches_pending| is used to determine whether this is the only
+  // batch of patches for the chunk or there is more.
+  // If |other_patches_pending| == false, the chunk is marked as ready to be
+  // consumed. If true, the state of the chunk is not altered.
+  //
+  // Note: If the producer is batching commits (see shared_memory_arbiter.h), it
+  // will also attempt to do patching locally. Namely, if nested messages are
+  // completed while the chunk on which they started is being batched (i.e.
+  // before it has been committed to the service), the producer will apply the
+  // respective patches to the batched chunk. These patches will not be sent to
+  // the service - i.e. only the patches that the producer did not manage to
+  // apply before committing the chunk will be applied here.
+  bool TryPatchChunkContents(ProducerID,
+                             WriterID,
+                             ChunkID,
+                             const Patch* patches,
+                             size_t patches_size,
+                             bool other_patches_pending);
+
+  // To read the contents of the buffer the caller needs to:
+  //   BeginRead()
+  //   while (ReadNextTracePacket(packet_fragments)) { ... }
+  // No other calls to any other method should be interleaved between
+  // BeginRead() and ReadNextTracePacket().
+  // Reads in the TraceBuffer are NOT idempotent.
+  void BeginRead();
+
+  // Returns the next packet in the buffer, if any, and the producer_id,
+  // producer_uid, and writer_id of the producer/writer that wrote it (as passed
+  // in the CopyChunkUntrusted() call). Returns false if no packets can be read
+  // at this point. If a packet was read successfully,
+  // |previous_packet_on_sequence_dropped| is set to |true| if the previous
+  // packet on the sequence was dropped from the buffer before it could be read
+  // (e.g. because its chunk was overridden due to the ring buffer wrapping or
+  // due to an ABI violation), and to |false| otherwise.
+  //
+  // This function returns only complete packets. Specifically:
+  // When there is at least one complete packet in the buffer, this function
+  // returns true and populates the TracePacket argument with the boundaries of
+  // each fragment for one packet.
+  // TracePacket will have at least one slice when this function returns true.
+  // When there are no whole packets eligible to read (e.g. we are still missing
+  // fragments) this function returns false.
+  // This function guarantees also that packets for a given
+  // {ProducerID, WriterID} are read in FIFO order.
+  // This function does not guarantee any ordering w.r.t. packets belonging to
+  // different WriterID(s). For instance, given the following packets copied
+  // into the buffer:
+  //   {ProducerID: 1, WriterID: 1}: P1 P2 P3
+  //   {ProducerID: 1, WriterID: 2}: P4 P5 P6
+  //   {ProducerID: 2, WriterID: 1}: P7 P8 P9
+  // The following read sequence is possible:
+  //   P1, P4, P7, P2, P3, P5, P8, P9, P6
+  // But the following is guaranteed to NOT happen:
+  //   P1, P5, P7, P4 (P4 cannot come after P5)
+  bool ReadNextTracePacket(TracePacket*,
+                           PacketSequenceProperties* sequence_properties,
+                           bool* previous_packet_on_sequence_dropped);
+
+  const TraceStats::BufferStats& stats() const { return stats_; }
+  size_t size() const { return size_; }
+
+ private:
+  friend class TraceBufferTest;
+
+  // ChunkRecord is a Chunk header stored inline in the |data_| buffer, before
+  // the chunk payload (the packets' data). The |data_| buffer looks like this:
+  // +---------------+------------------++---------------+-----------------+
+  // | ChunkRecord 1 | Chunk payload 1  || ChunkRecord 2 | Chunk payload 2 | ...
+  // +---------------+------------------++---------------+-----------------+
+  // Most of the ChunkRecord fields are copied from SharedMemoryABI::ChunkHeader
+  // (the chunk header used in the shared memory buffers).
+  // A ChunkRecord can be a special "padding" record. In this case its payload
+  // should be ignored and the record should be just skipped.
+  //
+  // Full page move optimization:
+  // This struct has to be exactly (sizeof(PageHeader) + sizeof(ChunkHeader))
+  // (from shared_memory_abi.h) to allow full page move optimizations
+  // (TODO(primiano): not implemented yet). In the special case of moving a full
+  // 4k page that contains only one chunk, in fact, we can just ask the kernel
+  // to move the full SHM page (see SPLICE_F_{GIFT,MOVE}) and overlay the
+  // ChunkRecord on top of the moved SMB's header (page + chunk header).
+  // This special requirement is covered by static_assert(s) in the .cc file.
+  struct ChunkRecord {
+    explicit ChunkRecord(size_t sz) : flags{0}, is_padding{0} {
+      PERFETTO_DCHECK(sz >= sizeof(ChunkRecord) &&
+                      sz % sizeof(ChunkRecord) == 0 && sz <= kMaxSize);
+      size = static_cast<decltype(size)>(sz);
+    }
+
+    bool is_valid() const { return size != 0; }
+
+    // Keep this structure packed and exactly 16 bytes (128 bits) big.
+
+    // [32 bits] Monotonic counter within the same writer_id.
+    ChunkID chunk_id = 0;
+
+    // [16 bits] ID of the Producer from which the Chunk was copied from.
+    ProducerID producer_id = 0;
+
+    // [16 bits] Unique per Producer (but not within the service).
+    // If writer_id == kWriterIdPadding the record should just be skipped.
+    WriterID writer_id = 0;
+
+    // Number of fragments contained in the chunk.
+    uint16_t num_fragments = 0;
+
+    // Size in bytes, including sizeof(ChunkRecord) itself.
+    uint16_t size;
+
+    uint8_t flags : 6;  // See SharedMemoryABI::ChunkHeader::flags.
+    uint8_t is_padding : 1;
+    uint8_t unused_flag : 1;
+
+    // Not strictly needed, can be reused for more fields in the future. But
+    // right now helps to spot chunks in hex dumps.
+    char unused[3] = {'C', 'H', 'U'};
+
+    static constexpr size_t kMaxSize =
+        std::numeric_limits<decltype(size)>::max();
+  };
+
+  // Lookaside index entry. This serves two purposes:
+  // 1) Allow a fast lookup of ChunkRecord by their ID (the tuple
+  //   {ProducerID, WriterID, ChunkID}). This is used when applying out-of-band
+  //   patches to the contents of the chunks after they have been copied into
+  //   the TraceBuffer.
+  // 2) keep the chunks ordered by their ID. This is used when reading back.
+  // 3) Keep metadata about the status of the chunk, e.g. whether the contents
+  //    have been read already and should be skipped in a future read pass.
+  // This struct should not have any field that is essential for reconstructing
+  // the contents of the buffer from a crash dump.
+  struct ChunkMeta {
+    // Key used for sorting in the map.
+    struct Key {
+      Key(ProducerID p, WriterID w, ChunkID c)
+          : producer_id{p}, writer_id{w}, chunk_id{c} {}
+
+      explicit Key(const ChunkRecord& cr)
+          : Key(cr.producer_id, cr.writer_id, cr.chunk_id) {}
+
+      // Note that this sorting doesn't keep into account the fact that ChunkID
+      // will wrap over at some point. The extra logic in SequenceIterator deals
+      // with that.
+      bool operator<(const Key& other) const {
+        return std::tie(producer_id, writer_id, chunk_id) <
+               std::tie(other.producer_id, other.writer_id, other.chunk_id);
+      }
+
+      bool operator==(const Key& other) const {
+        return std::tie(producer_id, writer_id, chunk_id) ==
+               std::tie(other.producer_id, other.writer_id, other.chunk_id);
+      }
+
+      bool operator!=(const Key& other) const { return !(*this == other); }
+
+      // These fields should match at all times the corresponding fields in
+      // the |chunk_record|. They are copied here purely for efficiency to avoid
+      // dereferencing the buffer all the time.
+      ProducerID producer_id;
+      WriterID writer_id;
+      ChunkID chunk_id;
+    };
+
+    enum IndexFlags : uint8_t {
+      // If set, the chunk state was kChunkComplete at the time it was copied.
+      // If unset, the chunk was still kChunkBeingWritten while copied. When
+      // reading from the chunk's sequence, the sequence will not advance past
+      // this chunk until this flag is set.
+      kComplete = 1 << 0,
+
+      // If set, we skipped the last packet that we read from this chunk e.g.
+      // because we it was a continuation from a previous chunk that was dropped
+      // or due to an ABI violation.
+      kLastReadPacketSkipped = 1 << 1
+    };
+
+    ChunkMeta(ChunkRecord* r, uint16_t p, bool complete, uint8_t f, uid_t u)
+        : chunk_record{r}, trusted_uid{u}, flags{f}, num_fragments{p} {
+      if (complete)
+        index_flags = kComplete;
+    }
+
+    bool is_complete() const { return index_flags & kComplete; }
+
+    void set_complete(bool complete) {
+      if (complete) {
+        index_flags |= kComplete;
+      } else {
+        index_flags &= ~kComplete;
+      }
+    }
+
+    bool last_read_packet_skipped() const {
+      return index_flags & kLastReadPacketSkipped;
+    }
+
+    void set_last_read_packet_skipped(bool skipped) {
+      if (skipped) {
+        index_flags |= kLastReadPacketSkipped;
+      } else {
+        index_flags &= ~kLastReadPacketSkipped;
+      }
+    }
+
+    ChunkRecord* const chunk_record;  // Addr of ChunkRecord within |data_|.
+    const uid_t trusted_uid;          // uid of the producer.
+
+    // Flags set by TraceBuffer to track the state of the chunk in the index.
+    uint8_t index_flags = 0;
+
+    // Correspond to |chunk_record->flags| and |chunk_record->num_fragments|.
+    // Copied here for performance reasons (avoids having to dereference
+    // |chunk_record| while iterating over ChunkMeta) and to aid debugging in
+    // case the buffer gets corrupted.
+    uint8_t flags = 0;           // See SharedMemoryABI::ChunkHeader::flags.
+    uint16_t num_fragments = 0;  // Total number of packet fragments.
+
+    uint16_t num_fragments_read = 0;  // Number of fragments already read.
+
+    // The start offset of the next fragment (the |num_fragments_read|-th) to be
+    // read. This is the offset in bytes from the beginning of the ChunkRecord's
+    // payload (the 1st fragment starts at |chunk_record| +
+    // sizeof(ChunkRecord)).
+    uint16_t cur_fragment_offset = 0;
+  };
+
+  using ChunkMap = std::map<ChunkMeta::Key, ChunkMeta>;
+
+  // Allows to iterate over a sub-sequence of |index_| for all keys belonging to
+  // the same {ProducerID,WriterID}. Furthermore takes into account the wrapping
+  // of ChunkID. Instances are valid only as long as the |index_| is not altered
+  // (can be used safely only between adjacent ReadNextTracePacket() calls).
+  // The order of the iteration will proceed in the following order:
+  // |wrapping_id| + 1 -> |seq_end|, |seq_begin| -> |wrapping_id|.
+  // Practical example:
+  // - Assume that kMaxChunkID == 7
+  // - Assume that we have all 8 chunks in the range (0..7).
+  // - Hence, |seq_begin| == c0, |seq_end| == c7
+  // - Assume |wrapping_id| = 4 (c4 is the last chunk copied over
+  //   through a CopyChunkUntrusted()).
+  // The resulting iteration order will be: c5, c6, c7, c0, c1, c2, c3, c4.
+  struct SequenceIterator {
+    // Points to the 1st key (the one with the numerically min ChunkID).
+    ChunkMap::iterator seq_begin;
+
+    // Points one past the last key (the one with the numerically max ChunkID).
+    ChunkMap::iterator seq_end;
+
+    // Current iterator, always >= seq_begin && <= seq_end.
+    ChunkMap::iterator cur;
+
+    // The latest ChunkID written. Determines the start/end of the sequence.
+    ChunkID wrapping_id;
+
+    bool is_valid() const { return cur != seq_end; }
+
+    ProducerID producer_id() const {
+      PERFETTO_DCHECK(is_valid());
+      return cur->first.producer_id;
+    }
+
+    WriterID writer_id() const {
+      PERFETTO_DCHECK(is_valid());
+      return cur->first.writer_id;
+    }
+
+    ChunkID chunk_id() const {
+      PERFETTO_DCHECK(is_valid());
+      return cur->first.chunk_id;
+    }
+
+    ChunkMeta& operator*() {
+      PERFETTO_DCHECK(is_valid());
+      return cur->second;
+    }
+
+    // Moves |cur| to the next chunk in the index.
+    // is_valid() will become false after calling this, if this was the last
+    // entry of the sequence.
+    void MoveNext();
+
+    void MoveToEnd() { cur = seq_end; }
+  };
+
+  enum class ReadAheadResult {
+    kSucceededReturnSlices,
+    kFailedMoveToNextSequence,
+    kFailedStayOnSameSequence,
+  };
+
+  enum class ReadPacketResult {
+    kSucceeded,
+    kFailedInvalidPacket,
+    kFailedEmptyPacket,
+  };
+
+  explicit TraceBuffer(OverwritePolicy);
+  TraceBuffer(const TraceBuffer&) = delete;
+  TraceBuffer& operator=(const TraceBuffer&) = delete;
+
+  bool Initialize(size_t size);
+
+  // Returns an object that allows to iterate over chunks in the |index_| that
+  // have the same {ProducerID, WriterID} of
+  // |seq_begin.first.{producer,writer}_id|. |seq_begin| must be an iterator to
+  // the first entry in the |index_| that has a different {ProducerID, WriterID}
+  // from the previous one. It is valid for |seq_begin| to be == index_.end()
+  // (i.e. if the index is empty). The iteration takes care of ChunkID wrapping,
+  // by using |last_chunk_id_|.
+  SequenceIterator GetReadIterForSequence(ChunkMap::iterator seq_begin);
+
+  // Used as a last resort when a buffer corruption is detected.
+  void ClearContentsAndResetRWCursors();
+
+  // Adds a padding record of the given size (must be a multiple of
+  // sizeof(ChunkRecord)).
+  void AddPaddingRecord(size_t);
+
+  // Look for contiguous fragment of the same packet starting from |read_iter_|.
+  // If a contiguous packet is found, all the fragments are pushed into
+  // TracePacket and the function returns kSucceededReturnSlices. If not, the
+  // function returns either kFailedMoveToNextSequence or
+  // kFailedStayOnSameSequence, telling the caller to continue looking for
+  // packets.
+  ReadAheadResult ReadAhead(TracePacket*);
+
+  // Deletes (by marking the record invalid and removing form the index) all
+  // chunks from |wptr_| to |wptr_| + |bytes_to_clear|.
+  // Returns:
+  //   * The size of the gap left between the next valid Chunk and the end of
+  //     the deletion range.
+  //   * 0 if no next valid chunk exists (if the buffer is still zeroed).
+  //   * -1 if the buffer |overwrite_policy_| == kDiscard and the deletion would
+  //     cause unread chunks to be overwritten. In this case the buffer is left
+  //     untouched.
+  // Graphically, assume the initial situation is the following (|wptr_| = 10).
+  // |0        |10 (wptr_)       |30       |40                 |60
+  // +---------+-----------------+---------+-------------------+---------+
+  // | Chunk 1 | Chunk 2         | Chunk 3 | Chunk 4           | Chunk 5 |
+  // +---------+-----------------+---------+-------------------+---------+
+  //           |_________Deletion range_______|~~return value~~|
+  //
+  // A call to DeleteNextChunksFor(32) will remove chunks 2,3,4 and return 18
+  // (60 - 42), the distance between chunk 5 and the end of the deletion range.
+  ssize_t DeleteNextChunksFor(size_t bytes_to_clear);
+
+  // Decodes the boundaries of the next packet (or a fragment) pointed by
+  // ChunkMeta and pushes that into |TracePacket|. It also increments the
+  // |num_fragments_read| counter.
+  // TracePacket can be nullptr, in which case the read state is still advanced.
+  // When TracePacket is not nullptr, ProducerID must also be not null and will
+  // be updated with the ProducerID that originally wrote the chunk.
+  ReadPacketResult ReadNextPacketInChunk(ChunkMeta*, TracePacket*);
+
+  void DcheckIsAlignedAndWithinBounds(const uint8_t* ptr) const {
+    PERFETTO_DCHECK(ptr >= begin() && ptr <= end() - sizeof(ChunkRecord));
+    PERFETTO_DCHECK(
+        (reinterpret_cast<uintptr_t>(ptr) & (alignof(ChunkRecord) - 1)) == 0);
+  }
+
+  ChunkRecord* GetChunkRecordAt(uint8_t* ptr) {
+    DcheckIsAlignedAndWithinBounds(ptr);
+    // We may be accessing a new (empty) record.
+    data_.EnsureCommitted(
+        static_cast<size_t>(ptr + sizeof(ChunkRecord) - begin()));
+    return reinterpret_cast<ChunkRecord*>(ptr);
+  }
+
+  void DiscardWrite();
+
+  // |src| can be nullptr (in which case |size| must be ==
+  // record.size - sizeof(ChunkRecord)), for the case of writing a padding
+  // record. |wptr_| is NOT advanced by this function, the caller must do that.
+  void WriteChunkRecord(uint8_t* wptr,
+                        const ChunkRecord& record,
+                        const uint8_t* src,
+                        size_t size) {
+    // Note: |record.size| will be slightly bigger than |size| because of the
+    // ChunkRecord header and rounding, to ensure that all ChunkRecord(s) are
+    // multiple of sizeof(ChunkRecord). The invariant is:
+    // record.size >= |size| + sizeof(ChunkRecord) (== if no rounding).
+    PERFETTO_DCHECK(size <= ChunkRecord::kMaxSize);
+    PERFETTO_DCHECK(record.size >= sizeof(record));
+    PERFETTO_DCHECK(record.size % sizeof(record) == 0);
+    PERFETTO_DCHECK(record.size >= size + sizeof(record));
+    PERFETTO_CHECK(record.size <= size_to_end());
+    DcheckIsAlignedAndWithinBounds(wptr);
+
+    // We may be writing to this area for the first time.
+    data_.EnsureCommitted(static_cast<size_t>(wptr + record.size - begin()));
+
+    // Deliberately not a *D*CHECK.
+    PERFETTO_CHECK(wptr + sizeof(record) + size <= end());
+    memcpy(wptr, &record, sizeof(record));
+    if (PERFETTO_LIKELY(src)) {
+      // If the producer modifies the data in the shared memory buffer while we
+      // are copying it to the central buffer, TSAN will (rightfully) flag that
+      // as a race. However the entire purpose of copying the data into the
+      // central buffer is that we can validate it without worrying that the
+      // producer changes it from under our feet, so this race is benign. The
+      // alternative would be to try computing which part of the buffer is safe
+      // to read (assuming a well-behaving client), but the risk of introducing
+      // a bug that way outweighs the benefit.
+      PERFETTO_ANNOTATE_BENIGN_RACE_SIZED(
+          src, size, "Benign race when copying chunk from shared memory.")
+      memcpy(wptr + sizeof(record), src, size);
+    } else {
+      PERFETTO_DCHECK(size == record.size - sizeof(record));
+    }
+    const size_t rounding_size = record.size - sizeof(record) - size;
+    memset(wptr + sizeof(record) + size, 0, rounding_size);
+  }
+
+  uint8_t* begin() const { return reinterpret_cast<uint8_t*>(data_.Get()); }
+  uint8_t* end() const { return begin() + size_; }
+  size_t size_to_end() const { return static_cast<size_t>(end() - wptr_); }
+
+  base::PagedMemory data_;
+  size_t size_ = 0;            // Size in bytes of |data_|.
+  size_t max_chunk_size_ = 0;  // Max size in bytes allowed for a chunk.
+  uint8_t* wptr_ = nullptr;    // Write pointer.
+
+  // An index that keeps track of the positions and metadata of each
+  // ChunkRecord.
+  ChunkMap index_;
+
+  // Read iterator used for ReadNext(). It is reset by calling BeginRead().
+  // It becomes invalid after any call to methods that alters the |index_|.
+  SequenceIterator read_iter_;
+
+  // See comments at the top of the file.
+  OverwritePolicy overwrite_policy_ = kOverwrite;
+
+  // Only used when |overwrite_policy_ == kDiscard|. This is set the first time
+  // a write fails because it would overwrite unread chunks.
+  bool discard_writes_ = false;
+
+  // Keeps track of the highest ChunkID written for a given sequence, taking
+  // into account a potential overflow of ChunkIDs. In the case of overflow,
+  // stores the highest ChunkID written since the overflow.
+  //
+  // TODO(primiano): should clean up keys from this map. Right now it grows
+  // without bounds (although realistically is not a problem unless we have too
+  // many producers/writers within the same trace session).
+  std::map<std::pair<ProducerID, WriterID>, ChunkID> last_chunk_id_written_;
+
+  // Statistics about buffer usage.
+  TraceStats::BufferStats stats_;
+
+#if PERFETTO_DCHECK_IS_ON()
+  bool changed_since_last_read_ = false;
+#endif
+
+  // When true disable some DCHECKs that have been put in place to detect
+  // bugs in the producers. This is for tests that feed malicious inputs and
+  // hence mimic a buggy producer.
+  bool suppress_client_dchecks_for_testing_ = false;
+};
+
+}  // namespace perfetto
+
+#endif  // SRC_TRACING_CORE_TRACE_BUFFER_H_
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "src/tracing/core/trace_buffer.h"
+
+#include <limits>
+
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory_abi.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_packet.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+#define TRACE_BUFFER_VERBOSE_LOGGING() 0  // Set to 1 when debugging unittests.
+#if TRACE_BUFFER_VERBOSE_LOGGING()
+#define TRACE_BUFFER_DLOG PERFETTO_DLOG
+namespace {
+constexpr char kHexDigits[] = "0123456789abcdef";
+std::string HexDump(const uint8_t* src, size_t size) {
+  std::string buf;
+  buf.reserve(4096 * 4);
+  char line[64];
+  char* c = line;
+  for (size_t i = 0; i < size; i++) {
+    *c++ = kHexDigits[(src[i] >> 4) & 0x0f];
+    *c++ = kHexDigits[(src[i] >> 0) & 0x0f];
+    if (i % 16 == 15) {
+      buf.append("\n");
+      buf.append(line);
+      c = line;
+    }
+  }
+  return buf;
+}
+}  // namespace
+#else
+#define TRACE_BUFFER_DLOG(...) void()
+#endif
+
+namespace perfetto {
+
+namespace {
+constexpr uint8_t kFirstPacketContinuesFromPrevChunk =
+    SharedMemoryABI::ChunkHeader::kFirstPacketContinuesFromPrevChunk;
+constexpr uint8_t kLastPacketContinuesOnNextChunk =
+    SharedMemoryABI::ChunkHeader::kLastPacketContinuesOnNextChunk;
+constexpr uint8_t kChunkNeedsPatching =
+    SharedMemoryABI::ChunkHeader::kChunkNeedsPatching;
+}  // namespace.
+
+constexpr size_t TraceBuffer::ChunkRecord::kMaxSize;
+constexpr size_t TraceBuffer::InlineChunkHeaderSize = sizeof(ChunkRecord);
+
+// static
+std::unique_ptr<TraceBuffer> TraceBuffer::Create(size_t size_in_bytes,
+                                                 OverwritePolicy pol) {
+  std::unique_ptr<TraceBuffer> trace_buffer(new TraceBuffer(pol));
+  if (!trace_buffer->Initialize(size_in_bytes))
+    return nullptr;
+  return trace_buffer;
+}
+
+TraceBuffer::TraceBuffer(OverwritePolicy pol) : overwrite_policy_(pol) {
+  // See comments in ChunkRecord for the rationale of this.
+  static_assert(sizeof(ChunkRecord) == sizeof(SharedMemoryABI::PageHeader) +
+                                           sizeof(SharedMemoryABI::ChunkHeader),
+                "ChunkRecord out of sync with the layout of SharedMemoryABI");
+}
+
+TraceBuffer::~TraceBuffer() = default;
+
+bool TraceBuffer::Initialize(size_t size) {
+  static_assert(
+      SharedMemoryABI::kMinPageSize % sizeof(ChunkRecord) == 0,
+      "sizeof(ChunkRecord) must be an integer divider of a page size");
+  data_ = base::PagedMemory::Allocate(
+      size, base::PagedMemory::kMayFail | base::PagedMemory::kDontCommit);
+  if (!data_.IsValid()) {
+    PERFETTO_ELOG("Trace buffer allocation failed (size: %zu)", size);
+    return false;
+  }
+  size_ = size;
+  stats_.set_buffer_size(size);
+  max_chunk_size_ = std::min(size, ChunkRecord::kMaxSize);
+  wptr_ = begin();
+  index_.clear();
+  last_chunk_id_written_.clear();
+  read_iter_ = GetReadIterForSequence(index_.end());
+  return true;
+}
+
+// Note: |src| points to a shmem region that is shared with the producer. Assume
+// that the producer is malicious and will change the content of |src|
+// while we execute here. Don't do any processing on it other than memcpy().
+void TraceBuffer::CopyChunkUntrusted(ProducerID producer_id_trusted,
+                                     uid_t producer_uid_trusted,
+                                     WriterID writer_id,
+                                     ChunkID chunk_id,
+                                     uint16_t num_fragments,
+                                     uint8_t chunk_flags,
+                                     bool chunk_complete,
+                                     const uint8_t* src,
+                                     size_t size) {
+  // |record_size| = |size| + sizeof(ChunkRecord), rounded up to avoid to end
+  // up in a fragmented state where size_to_end() < sizeof(ChunkRecord).
+  const size_t record_size =
+      base::AlignUp<sizeof(ChunkRecord)>(size + sizeof(ChunkRecord));
+  if (PERFETTO_UNLIKELY(record_size > max_chunk_size_)) {
+    stats_.set_abi_violations(stats_.abi_violations() + 1);
+    PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
+    return;
+  }
+
+  TRACE_BUFFER_DLOG("CopyChunk @ %lu, size=%zu", wptr_ - begin(), record_size);
+
+#if PERFETTO_DCHECK_IS_ON()
+  changed_since_last_read_ = true;
+#endif
+
+  // If the chunk hasn't been completed, we should only consider the first
+  // |num_fragments - 1| packets complete. For simplicity, we simply disregard
+  // the last one when we copy the chunk.
+  if (PERFETTO_UNLIKELY(!chunk_complete)) {
+    if (num_fragments > 0) {
+      num_fragments--;
+      // These flags should only affect the last packet in the chunk. We clear
+      // them, so that TraceBuffer is able to look at the remaining packets in
+      // this chunk.
+      chunk_flags &= ~kLastPacketContinuesOnNextChunk;
+      chunk_flags &= ~kChunkNeedsPatching;
+    }
+  }
+
+  ChunkRecord record(record_size);
+  record.producer_id = producer_id_trusted;
+  record.chunk_id = chunk_id;
+  record.writer_id = writer_id;
+  record.num_fragments = num_fragments;
+  record.flags = chunk_flags;
+  ChunkMeta::Key key(record);
+
+  // Check whether we have already copied the same chunk previously. This may
+  // happen if the service scrapes chunks in a potentially incomplete state
+  // before receiving commit requests for them from the producer. Note that the
+  // service may scrape and thus override chunks in arbitrary order since the
+  // chunks aren't ordered in the SMB.
+  const auto it = index_.find(key);
+  if (PERFETTO_UNLIKELY(it != index_.end())) {
+    ChunkMeta* record_meta = &it->second;
+    ChunkRecord* prev = record_meta->chunk_record;
+
+    // Verify that the old chunk's metadata corresponds to the new one.
+    // Overridden chunks should never change size, since the page layout is
+    // fixed per writer. The number of fragments should also never decrease and
+    // flags should not be removed.
+    if (PERFETTO_UNLIKELY(ChunkMeta::Key(*prev) != key ||
+                          prev->size != record_size ||
+                          prev->num_fragments > num_fragments ||
+                          (prev->flags & chunk_flags) != prev->flags)) {
+      stats_.set_abi_violations(stats_.abi_violations() + 1);
+      PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
+      return;
+    }
+
+    // If this chunk was previously copied with the same number of fragments and
+    // the number didn't change, there's no need to copy it again. If the
+    // previous chunk was complete already, this should always be the case.
+    PERFETTO_DCHECK(suppress_client_dchecks_for_testing_ ||
+                    !record_meta->is_complete() ||
+                    (chunk_complete && prev->num_fragments == num_fragments));
+    if (prev->num_fragments == num_fragments) {
+      TRACE_BUFFER_DLOG("  skipping recommit of identical chunk");
+      return;
+    }
+
+    // If we've already started reading from chunk N+1 following this chunk N,
+    // don't override chunk N. Otherwise we may end up reading a packet from
+    // chunk N after having read from chunk N+1, thereby violating sequential
+    // read of packets. This shouldn't happen if the producer is well-behaved,
+    // because it shouldn't start chunk N+1 before completing chunk N.
+    ChunkMeta::Key subsequent_key = key;
+    static_assert(std::numeric_limits<ChunkID>::max() == kMaxChunkID,
+                  "ChunkID wraps");
+    subsequent_key.chunk_id++;
+    const auto subsequent_it = index_.find(subsequent_key);
+    if (subsequent_it != index_.end() &&
+        subsequent_it->second.num_fragments_read > 0) {
+      stats_.set_abi_violations(stats_.abi_violations() + 1);
+      PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
+      return;
+    }
+
+    // We should not have read past the last packet.
+    if (record_meta->num_fragments_read > prev->num_fragments) {
+      PERFETTO_ELOG(
+          "TraceBuffer read too many fragments from an incomplete chunk");
+      PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
+      return;
+    }
+
+    uint8_t* wptr = reinterpret_cast<uint8_t*>(prev);
+    TRACE_BUFFER_DLOG("  overriding chunk @ %lu, size=%zu", wptr - begin(),
+                      record_size);
+
+    // Update chunk meta data stored in the index, as it may have changed.
+    record_meta->num_fragments = num_fragments;
+    record_meta->flags = chunk_flags;
+    record_meta->set_complete(chunk_complete);
+
+    // Override the ChunkRecord contents at the original |wptr|.
+    TRACE_BUFFER_DLOG("  copying @ [%lu - %lu] %zu", wptr - begin(),
+                      uintptr_t(wptr - begin()) + record_size, record_size);
+    WriteChunkRecord(wptr, record, src, size);
+    TRACE_BUFFER_DLOG("Chunk raw: %s", HexDump(wptr, record_size).c_str());
+    stats_.set_chunks_rewritten(stats_.chunks_rewritten() + 1);
+    return;
+  }
+
+  if (PERFETTO_UNLIKELY(discard_writes_))
+    return DiscardWrite();
+
+  // If there isn't enough room from the given write position. Write a padding
+  // record to clear the end of the buffer and wrap back.
+  const size_t cached_size_to_end = size_to_end();
+  if (PERFETTO_UNLIKELY(record_size > cached_size_to_end)) {
+    ssize_t res = DeleteNextChunksFor(cached_size_to_end);
+    if (res == -1)
+      return DiscardWrite();
+    PERFETTO_DCHECK(static_cast<size_t>(res) <= cached_size_to_end);
+    AddPaddingRecord(cached_size_to_end);
+    wptr_ = begin();
+    stats_.set_write_wrap_count(stats_.write_wrap_count() + 1);
+    PERFETTO_DCHECK(size_to_end() >= record_size);
+  }
+
+  // At this point either |wptr_| points to an untouched part of the buffer
+  // (i.e. *wptr_ == 0) or we are about to overwrite one or more ChunkRecord(s).
+  // In the latter case we need to first figure out where the next valid
+  // ChunkRecord is (if it exists) and add padding between the new record.
+  // Example ((w) == write cursor):
+  //
+  // Initial state (wtpr_ == 0):
+  // |0 (w)    |10               |30                  |50
+  // +---------+-----------------+--------------------+--------------------+
+  // | Chunk 1 | Chunk 2         | Chunk 3            | Chunk 4            |
+  // +---------+-----------------+--------------------+--------------------+
+  //
+  // Let's assume we now want now write a 5th Chunk of size == 35. The final
+  // state should look like this:
+  // |0                                |35 (w)         |50
+  // +---------------------------------+---------------+--------------------+
+  // | Chunk 5                         | Padding Chunk | Chunk 4            |
+  // +---------------------------------+---------------+--------------------+
+
+  // Deletes all chunks from |wptr_| to |wptr_| + |record_size|.
+  ssize_t del_res = DeleteNextChunksFor(record_size);
+  if (del_res == -1)
+    return DiscardWrite();
+  size_t padding_size = static_cast<size_t>(del_res);
+
+  // Now first insert the new chunk. At the end, if necessary, add the padding.
+  stats_.set_chunks_written(stats_.chunks_written() + 1);
+  stats_.set_bytes_written(stats_.bytes_written() + record_size);
+  auto it_and_inserted = index_.emplace(
+      key, ChunkMeta(GetChunkRecordAt(wptr_), num_fragments, chunk_complete,
+                     chunk_flags, producer_uid_trusted));
+  PERFETTO_DCHECK(it_and_inserted.second);
+  TRACE_BUFFER_DLOG("  copying @ [%lu - %lu] %zu", wptr_ - begin(),
+                    uintptr_t(wptr_ - begin()) + record_size, record_size);
+  WriteChunkRecord(wptr_, record, src, size);
+  TRACE_BUFFER_DLOG("Chunk raw: %s", HexDump(wptr_, record_size).c_str());
+  wptr_ += record_size;
+  if (wptr_ >= end()) {
+    PERFETTO_DCHECK(padding_size == 0);
+    wptr_ = begin();
+    stats_.set_write_wrap_count(stats_.write_wrap_count() + 1);
+  }
+  DcheckIsAlignedAndWithinBounds(wptr_);
+
+  // Chunks may be received out of order, so only update last_chunk_id if the
+  // new chunk_id is larger. But take into account overflows by only selecting
+  // the new ID if its distance to the latest ID is smaller than half the number
+  // space.
+  //
+  // This accounts for both the case where the new ID has just overflown and
+  // last_chunk_id be updated even though it's smaller (e.g. |chunk_id| = 1 and
+  // |last_chunk_id| = kMaxChunkId; chunk_id - last_chunk_id = 0) and the case
+  // where the new ID is an out-of-order ID right after an overflow and
+  // last_chunk_id shouldn't be updated even though it's larger (e.g. |chunk_id|
+  // = kMaxChunkId and |last_chunk_id| = 1; chunk_id - last_chunk_id =
+  // kMaxChunkId - 1).
+  auto producer_and_writer_id = std::make_pair(producer_id_trusted, writer_id);
+  ChunkID& last_chunk_id = last_chunk_id_written_[producer_and_writer_id];
+  static_assert(std::numeric_limits<ChunkID>::max() == kMaxChunkID,
+                "This code assumes that ChunkID wraps at kMaxChunkID");
+  if (chunk_id - last_chunk_id < kMaxChunkID / 2) {
+    last_chunk_id = chunk_id;
+  } else {
+    stats_.set_chunks_committed_out_of_order(
+        stats_.chunks_committed_out_of_order() + 1);
+  }
+
+  if (padding_size)
+    AddPaddingRecord(padding_size);
+}
+
+ssize_t TraceBuffer::DeleteNextChunksFor(size_t bytes_to_clear) {
+  PERFETTO_CHECK(!discard_writes_);
+
+  // Find the position of the first chunk which begins at or after
+  // (|wptr_| + |bytes|). Note that such a chunk might not exist and we might
+  // either reach the end of the buffer or a zeroed region of the buffer.
+  uint8_t* next_chunk_ptr = wptr_;
+  uint8_t* search_end = wptr_ + bytes_to_clear;
+  TRACE_BUFFER_DLOG("Delete [%zu %zu]", wptr_ - begin(), search_end - begin());
+  DcheckIsAlignedAndWithinBounds(wptr_);
+  PERFETTO_DCHECK(search_end <= end());
+  std::vector<ChunkMap::iterator> index_delete;
+  uint64_t chunks_overwritten = stats_.chunks_overwritten();
+  uint64_t bytes_overwritten = stats_.bytes_overwritten();
+  uint64_t padding_bytes_cleared = stats_.padding_bytes_cleared();
+  while (next_chunk_ptr < search_end) {
+    const ChunkRecord& next_chunk = *GetChunkRecordAt(next_chunk_ptr);
+    TRACE_BUFFER_DLOG(
+        "  scanning chunk [%zu %zu] (valid=%d)", next_chunk_ptr - begin(),
+        next_chunk_ptr - begin() + next_chunk.size, next_chunk.is_valid());
+
+    // We just reached the untouched part of the buffer, it's going to be all
+    // zeroes from here to end().
+    // Optimization: if during Initialize() we fill the buffer with padding
+    // records we could get rid of this branch.
+    if (PERFETTO_UNLIKELY(!next_chunk.is_valid())) {
+      // This should happen only at the first iteration. The zeroed area can
+      // only begin precisely at the |wptr_|, not after. Otherwise it means that
+      // we wrapped but screwed up the ChunkRecord chain.
+      PERFETTO_DCHECK(next_chunk_ptr == wptr_);
+      return 0;
+    }
+
+    // Remove |next_chunk| from the index, unless it's a padding record (padding
+    // records are not part of the index).
+    if (PERFETTO_LIKELY(!next_chunk.is_padding)) {
+      ChunkMeta::Key key(next_chunk);
+      auto it = index_.find(key);
+      bool will_remove = false;
+      if (PERFETTO_LIKELY(it != index_.end())) {
+        const ChunkMeta& meta = it->second;
+        if (PERFETTO_UNLIKELY(meta.num_fragments_read < meta.num_fragments)) {
+          if (overwrite_policy_ == kDiscard)
+            return -1;
+          chunks_overwritten++;
+          bytes_overwritten += next_chunk.size;
+        }
+        index_delete.push_back(it);
+        will_remove = true;
+      }
+      TRACE_BUFFER_DLOG(
+          "  del index {%" PRIu32 ",%" PRIu32 ",%u} @ [%lu - %lu] %d",
+          key.producer_id, key.writer_id, key.chunk_id,
+          next_chunk_ptr - begin(), next_chunk_ptr - begin() + next_chunk.size,
+          will_remove);
+      PERFETTO_DCHECK(will_remove);
+    } else {
+      padding_bytes_cleared += next_chunk.size;
+    }
+
+    next_chunk_ptr += next_chunk.size;
+
+    // We should never hit this, unless we managed to screw up while writing
+    // to the buffer and breaking the ChunkRecord(s) chain.
+    // TODO(primiano): Write more meaningful logging with the status of the
+    // buffer, to get more actionable bugs in case we hit this.
+    PERFETTO_CHECK(next_chunk_ptr <= end());
+  }
+
+  // Remove from the index.
+  for (auto it : index_delete) {
+    index_.erase(it);
+  }
+  stats_.set_chunks_overwritten(chunks_overwritten);
+  stats_.set_bytes_overwritten(bytes_overwritten);
+  stats_.set_padding_bytes_cleared(padding_bytes_cleared);
+
+  PERFETTO_DCHECK(next_chunk_ptr >= search_end && next_chunk_ptr <= end());
+  return static_cast<ssize_t>(next_chunk_ptr - search_end);
+}
+
+void TraceBuffer::AddPaddingRecord(size_t size) {
+  PERFETTO_DCHECK(size >= sizeof(ChunkRecord) && size <= ChunkRecord::kMaxSize);
+  ChunkRecord record(size);
+  record.is_padding = 1;
+  TRACE_BUFFER_DLOG("AddPaddingRecord @ [%lu - %lu] %zu", wptr_ - begin(),
+                    uintptr_t(wptr_ - begin()) + size, size);
+  WriteChunkRecord(wptr_, record, nullptr, size - sizeof(ChunkRecord));
+  stats_.set_padding_bytes_written(stats_.padding_bytes_written() + size);
+  // |wptr_| is deliberately not advanced when writing a padding record.
+}
+
+bool TraceBuffer::TryPatchChunkContents(ProducerID producer_id,
+                                        WriterID writer_id,
+                                        ChunkID chunk_id,
+                                        const Patch* patches,
+                                        size_t patches_size,
+                                        bool other_patches_pending) {
+  ChunkMeta::Key key(producer_id, writer_id, chunk_id);
+  auto it = index_.find(key);
+  if (it == index_.end()) {
+    stats_.set_patches_failed(stats_.patches_failed() + 1);
+    return false;
+  }
+  ChunkMeta& chunk_meta = it->second;
+
+  // Check that the index is consistent with the actual ProducerID/WriterID
+  // stored in the ChunkRecord.
+  PERFETTO_DCHECK(ChunkMeta::Key(*chunk_meta.chunk_record) == key);
+  uint8_t* chunk_begin = reinterpret_cast<uint8_t*>(chunk_meta.chunk_record);
+  PERFETTO_DCHECK(chunk_begin >= begin());
+  uint8_t* chunk_end = chunk_begin + chunk_meta.chunk_record->size;
+  PERFETTO_DCHECK(chunk_end <= end());
+
+  static_assert(Patch::kSize == SharedMemoryABI::kPacketHeaderSize,
+                "Patch::kSize out of sync with SharedMemoryABI");
+
+  for (size_t i = 0; i < patches_size; i++) {
+    uint8_t* ptr =
+        chunk_begin + sizeof(ChunkRecord) + patches[i].offset_untrusted;
+    TRACE_BUFFER_DLOG("PatchChunk {%" PRIu32 ",%" PRIu32
+                      ",%u} size=%zu @ %zu with {%02x %02x %02x %02x} cur "
+                      "{%02x %02x %02x %02x}",
+                      producer_id, writer_id, chunk_id, chunk_end - chunk_begin,
+                      patches[i].offset_untrusted, patches[i].data[0],
+                      patches[i].data[1], patches[i].data[2],
+                      patches[i].data[3], ptr[0], ptr[1], ptr[2], ptr[3]);
+    if (ptr < chunk_begin + sizeof(ChunkRecord) ||
+        ptr > chunk_end - Patch::kSize) {
+      // Either the IPC was so slow and in the meantime the writer managed to
+      // wrap over |chunk_id| or the producer sent a malicious IPC.
+      stats_.set_patches_failed(stats_.patches_failed() + 1);
+      return false;
+    }
+
+    // DCHECK that we are writing into a zero-filled size field and not into
+    // valid data. It relies on ScatteredStreamWriter::ReserveBytes() to
+    // zero-fill reservations in debug builds.
+    char zero[Patch::kSize]{};
+    PERFETTO_DCHECK(memcmp(ptr, &zero, Patch::kSize) == 0);
+
+    memcpy(ptr, &patches[i].data[0], Patch::kSize);
+  }
+  TRACE_BUFFER_DLOG(
+      "Chunk raw (after patch): %s",
+      HexDump(chunk_begin, chunk_meta.chunk_record->size).c_str());
+
+  stats_.set_patches_succeeded(stats_.patches_succeeded() + patches_size);
+  if (!other_patches_pending) {
+    chunk_meta.flags &= ~kChunkNeedsPatching;
+    chunk_meta.chunk_record->flags = chunk_meta.flags;
+  }
+  return true;
+}
+
+void TraceBuffer::BeginRead() {
+  read_iter_ = GetReadIterForSequence(index_.begin());
+#if PERFETTO_DCHECK_IS_ON()
+  changed_since_last_read_ = false;
+#endif
+}
+
+TraceBuffer::SequenceIterator TraceBuffer::GetReadIterForSequence(
+    ChunkMap::iterator seq_begin) {
+  SequenceIterator iter;
+  iter.seq_begin = seq_begin;
+  if (seq_begin == index_.end()) {
+    iter.cur = iter.seq_end = index_.end();
+    return iter;
+  }
+
+#if PERFETTO_DCHECK_IS_ON()
+  // Either |seq_begin| is == index_.begin() or the item immediately before must
+  // belong to a different {ProducerID, WriterID} sequence.
+  if (seq_begin != index_.begin() && seq_begin != index_.end()) {
+    auto prev_it = seq_begin;
+    prev_it--;
+    PERFETTO_DCHECK(
+        seq_begin == index_.begin() ||
+        std::tie(prev_it->first.producer_id, prev_it->first.writer_id) <
+            std::tie(seq_begin->first.producer_id, seq_begin->first.writer_id));
+  }
+#endif
+
+  // Find the first entry that has a greater {ProducerID, WriterID} (or just
+  // index_.end() if we reached the end).
+  ChunkMeta::Key key = seq_begin->first;  // Deliberate copy.
+  key.chunk_id = kMaxChunkID;
+  iter.seq_end = index_.upper_bound(key);
+  PERFETTO_DCHECK(iter.seq_begin != iter.seq_end);
+
+  // Now find the first entry between [seq_begin, seq_end) that is
+  // > last_chunk_id_written_. This is where we the sequence will start (see
+  // notes about wrapping of IDs in the header).
+  auto producer_and_writer_id = std::make_pair(key.producer_id, key.writer_id);
+  PERFETTO_DCHECK(last_chunk_id_written_.count(producer_and_writer_id));
+  iter.wrapping_id = last_chunk_id_written_[producer_and_writer_id];
+  key.chunk_id = iter.wrapping_id;
+  iter.cur = index_.upper_bound(key);
+  if (iter.cur == iter.seq_end)
+    iter.cur = iter.seq_begin;
+  return iter;
+}
+
+void TraceBuffer::SequenceIterator::MoveNext() {
+  // Stop iterating when we reach the end of the sequence.
+  // Note: |seq_begin| might be == |seq_end|.
+  if (cur == seq_end || cur->first.chunk_id == wrapping_id) {
+    cur = seq_end;
+    return;
+  }
+
+  // If the current chunk wasn't completed yet, we shouldn't advance past it as
+  // it may be rewritten with additional packets.
+  if (!cur->second.is_complete()) {
+    cur = seq_end;
+    return;
+  }
+
+  ChunkID last_chunk_id = cur->first.chunk_id;
+  if (++cur == seq_end)
+    cur = seq_begin;
+
+  // There may be a missing chunk in the sequence of chunks, in which case the
+  // next chunk's ID won't follow the last one's. If so, skip the rest of the
+  // sequence. We'll return to it later once the hole is filled.
+  if (last_chunk_id + 1 != cur->first.chunk_id)
+    cur = seq_end;
+}
+
+bool TraceBuffer::ReadNextTracePacket(
+    TracePacket* packet,
+    PacketSequenceProperties* sequence_properties,
+    bool* previous_packet_on_sequence_dropped) {
+  // Note: MoveNext() moves only within the next chunk within the same
+  // {ProducerID, WriterID} sequence. Here we want to:
+  // - return the next patched+complete packet in the current sequence, if any.
+  // - return the first patched+complete packet in the next sequence, if any.
+  // - return false if none of the above is found.
+  TRACE_BUFFER_DLOG("ReadNextTracePacket()");
+
+  // Just in case we forget to initialize these below.
+  *sequence_properties = {0, kInvalidUid, 0};
+  *previous_packet_on_sequence_dropped = false;
+
+  // At the start of each sequence iteration, we consider the last read packet
+  // dropped. While iterating over the chunks in the sequence, we update this
+  // flag based on our knowledge about the last packet that was read from each
+  // chunk (|last_read_packet_skipped| in ChunkMeta).
+  bool previous_packet_dropped = true;
+
+#if PERFETTO_DCHECK_IS_ON()
+  PERFETTO_DCHECK(!changed_since_last_read_);
+#endif
+  for (;; read_iter_.MoveNext()) {
+    if (PERFETTO_UNLIKELY(!read_iter_.is_valid())) {
+      // We ran out of chunks in the current {ProducerID, WriterID} sequence or
+      // we just reached the index_.end().
+
+      if (PERFETTO_UNLIKELY(read_iter_.seq_end == index_.end()))
+        return false;
+
+      // We reached the end of sequence, move to the next one.
+      // Note: ++read_iter_.seq_end might become index_.end(), but
+      // GetReadIterForSequence() knows how to deal with that.
+      read_iter_ = GetReadIterForSequence(read_iter_.seq_end);
+      PERFETTO_DCHECK(read_iter_.is_valid() && read_iter_.cur != index_.end());
+      previous_packet_dropped = true;
+    }
+
+    ChunkMeta* chunk_meta = &*read_iter_;
+
+    // If the chunk has holes that are awaiting to be patched out-of-band,
+    // skip the current sequence and move to the next one.
+    if (chunk_meta->flags & kChunkNeedsPatching) {
+      read_iter_.MoveToEnd();
+      continue;
+    }
+
+    const ProducerID trusted_producer_id = read_iter_.producer_id();
+    const WriterID writer_id = read_iter_.writer_id();
+    const uid_t trusted_uid = chunk_meta->trusted_uid;
+
+    // At this point we have a chunk in |chunk_meta| that has not been fully
+    // read. We don't know yet whether we have enough data to read the full
+    // packet (in the case it's fragmented over several chunks) and we are about
+    // to find that out. Specifically:
+    // A) If the first fragment is unread and is a fragment continuing from a
+    //    previous chunk, it means we have missed the previous ChunkID. In
+    //    fact, if this wasn't the case, a previous call to ReadNext() shouldn't
+    //    have moved the cursor to this chunk.
+    // B) Any fragment > 0 && < last is always readable. By definition an inner
+    //    packet is never fragmented and hence doesn't require neither stitching
+    //    nor any out-of-band patching. The same applies to the last packet
+    //    iff it doesn't continue on the next chunk.
+    // C) If the last packet (which might be also the only packet in the chunk)
+    //    is a fragment and continues on the next chunk, we peek at the next
+    //    chunks and, if we have all of them, mark as read and move the cursor.
+    //
+    // +---------------+   +-------------------+  +---------------+
+    // | ChunkID: 1    |   | ChunkID: 2        |  | ChunkID: 3    |
+    // |---------------+   +-------------------+  +---------------+
+    // | Packet 1      |   |                   |  | ... Packet 3  |
+    // | Packet 2      |   | ... Packet 3  ... |  | Packet 4      |
+    // | Packet 3  ... |   |                   |  | Packet 5 ...  |
+    // +---------------+   +-------------------+  +---------------+
+
+    PERFETTO_DCHECK(chunk_meta->num_fragments_read <=
+                    chunk_meta->num_fragments);
+
+    // If we didn't read any packets from this chunk, the last packet was from
+    // the previous chunk we iterated over; so don't update
+    // |previous_packet_dropped| in this case.
+    if (chunk_meta->num_fragments_read > 0)
+      previous_packet_dropped = chunk_meta->last_read_packet_skipped();
+
+    while (chunk_meta->num_fragments_read < chunk_meta->num_fragments) {
+      enum { kSkip = 0, kReadOnePacket, kTryReadAhead } action;
+      if (chunk_meta->num_fragments_read == 0) {
+        if (chunk_meta->flags & kFirstPacketContinuesFromPrevChunk) {
+          action = kSkip;  // Case A.
+        } else if (chunk_meta->num_fragments == 1 &&
+                   (chunk_meta->flags & kLastPacketContinuesOnNextChunk)) {
+          action = kTryReadAhead;  // Case C.
+        } else {
+          action = kReadOnePacket;  // Case B.
+        }
+      } else if (chunk_meta->num_fragments_read <
+                     chunk_meta->num_fragments - 1 ||
+                 !(chunk_meta->flags & kLastPacketContinuesOnNextChunk)) {
+        action = kReadOnePacket;  // Case B.
+      } else {
+        action = kTryReadAhead;  // Case C.
+      }
+
+      TRACE_BUFFER_DLOG("  chunk %u, packet %hu of %hu, action=%d",
+                        read_iter_.chunk_id(), chunk_meta->num_fragments_read,
+                        chunk_meta->num_fragments, action);
+
+      if (action == kSkip) {
+        // This fragment will be skipped forever, not just in this ReadPacket()
+        // iteration. This happens by virtue of ReadNextPacketInChunk()
+        // incrementing the |num_fragments_read| and marking the fragment as
+        // read even if we didn't really.
+        ReadNextPacketInChunk(chunk_meta, nullptr);
+        chunk_meta->set_last_read_packet_skipped(true);
+        previous_packet_dropped = true;
+        continue;
+      }
+
+      if (action == kReadOnePacket) {
+        // The easy peasy case B.
+        ReadPacketResult result = ReadNextPacketInChunk(chunk_meta, packet);
+
+        if (PERFETTO_LIKELY(result == ReadPacketResult::kSucceeded)) {
+          *sequence_properties = {trusted_producer_id, trusted_uid, writer_id};
+          *previous_packet_on_sequence_dropped = previous_packet_dropped;
+          return true;
+        } else if (result == ReadPacketResult::kFailedEmptyPacket) {
+          // We can ignore and skip empty packets.
+          PERFETTO_DCHECK(packet->slices().empty());
+          continue;
+        }
+
+        // In extremely rare cases (producer bugged / malicious) the chunk might
+        // contain an invalid fragment. In such case we don't want to stall the
+        // sequence but just skip the chunk and move on. ReadNextPacketInChunk()
+        // marks the chunk as fully read, so we don't attempt to read from it
+        // again in a future call to ReadBuffers(). It also already records an
+        // abi violation for this.
+        PERFETTO_DCHECK(result == ReadPacketResult::kFailedInvalidPacket);
+        chunk_meta->set_last_read_packet_skipped(true);
+        previous_packet_dropped = true;
+        break;
+      }
+
+      PERFETTO_DCHECK(action == kTryReadAhead);
+      ReadAheadResult ra_res = ReadAhead(packet);
+      if (ra_res == ReadAheadResult::kSucceededReturnSlices) {
+        stats_.set_readaheads_succeeded(stats_.readaheads_succeeded() + 1);
+        *sequence_properties = {trusted_producer_id, trusted_uid, writer_id};
+        *previous_packet_on_sequence_dropped = previous_packet_dropped;
+        return true;
+      }
+
+      if (ra_res == ReadAheadResult::kFailedMoveToNextSequence) {
+        // readahead didn't find a contiguous packet sequence. We'll try again
+        // on the next ReadPacket() call.
+        stats_.set_readaheads_failed(stats_.readaheads_failed() + 1);
+
+        // TODO(primiano): optimization: this MoveToEnd() is the reason why
+        // MoveNext() (that is called in the outer for(;;MoveNext)) needs to
+        // deal gracefully with the case of |cur|==|seq_end|. Maybe we can do
+        // something to avoid that check by reshuffling the code here?
+        read_iter_.MoveToEnd();
+
+        // This break will go back to beginning of the for(;;MoveNext()). That
+        // will move to the next sequence because we set the read iterator to
+        // its end.
+        break;
+      }
+
+      PERFETTO_DCHECK(ra_res == ReadAheadResult::kFailedStayOnSameSequence);
+
+      // In this case ReadAhead() might advance |read_iter_|, so we need to
+      // re-cache the |chunk_meta| pointer to point to the current chunk.
+      chunk_meta = &*read_iter_;
+      chunk_meta->set_last_read_packet_skipped(true);
+      previous_packet_dropped = true;
+    }  // while(...)  [iterate over packet fragments for the current chunk].
+  }    // for(;;MoveNext()) [iterate over chunks].
+}
+
+TraceBuffer::ReadAheadResult TraceBuffer::ReadAhead(TracePacket* packet) {
+  static_assert(static_cast<ChunkID>(kMaxChunkID + 1) == 0,
+                "relying on kMaxChunkID to wrap naturally");
+  TRACE_BUFFER_DLOG(" readahead start @ chunk %u", read_iter_.chunk_id());
+  ChunkID next_chunk_id = read_iter_.chunk_id() + 1;
+  SequenceIterator it = read_iter_;
+  for (it.MoveNext(); it.is_valid(); it.MoveNext(), next_chunk_id++) {
+    // We should stay within the same sequence while iterating here.
+    PERFETTO_DCHECK(it.producer_id() == read_iter_.producer_id() &&
+                    it.writer_id() == read_iter_.writer_id());
+
+    TRACE_BUFFER_DLOG("   expected chunk ID: %u, actual ID: %u", next_chunk_id,
+                      it.chunk_id());
+
+    if (PERFETTO_UNLIKELY((*it).num_fragments == 0))
+      continue;
+
+    // If we miss the next chunk, stop looking in the current sequence and
+    // try another sequence. This chunk might come in the near future.
+    // The second condition is the edge case of a buggy/malicious
+    // producer. The ChunkID is contiguous but its flags don't make sense.
+    if (it.chunk_id() != next_chunk_id ||
+        PERFETTO_UNLIKELY(
+            !((*it).flags & kFirstPacketContinuesFromPrevChunk))) {
+      return ReadAheadResult::kFailedMoveToNextSequence;
+    }
+
+    // If the chunk is contiguous but has not been patched yet move to the next
+    // sequence and try coming back here on the next ReadNextTracePacket() call.
+    // TODO(primiano): add a test to cover this, it's a subtle case.
+    if ((*it).flags & kChunkNeedsPatching)
+      return ReadAheadResult::kFailedMoveToNextSequence;
+
+    // This is the case of an intermediate chunk which contains only one
+    // fragment which continues on the next chunk. This is the case for large
+    // packets, e.g.: [Packet0, Packet1(0)] [Packet1(1)] [Packet1(2), ...]
+    // (Packet1(X) := fragment X of Packet1).
+    if ((*it).num_fragments == 1 &&
+        ((*it).flags & kLastPacketContinuesOnNextChunk)) {
+      continue;
+    }
+
+    // We made it! We got all fragments for the packet without holes.
+    TRACE_BUFFER_DLOG("  readahead success @ chunk %u", it.chunk_id());
+    PERFETTO_DCHECK(((*it).num_fragments == 1 &&
+                     !((*it).flags & kLastPacketContinuesOnNextChunk)) ||
+                    (*it).num_fragments > 1);
+
+    // Now let's re-iterate over the [read_iter_, it] sequence and mark
+    // all the fragments as read.
+    bool packet_corruption = false;
+    for (;;) {
+      PERFETTO_DCHECK(read_iter_.is_valid());
+      TRACE_BUFFER_DLOG("    commit chunk %u", read_iter_.chunk_id());
+      if (PERFETTO_LIKELY((*read_iter_).num_fragments > 0)) {
+        // In the unlikely case of a corrupted packet (corrupted or empty
+        // fragment), invalidate the all stitching and move on to the next chunk
+        // in the same sequence, if any.
+        packet_corruption |= ReadNextPacketInChunk(&*read_iter_, packet) ==
+                             ReadPacketResult::kFailedInvalidPacket;
+      }
+      if (read_iter_.cur == it.cur)
+        break;
+      read_iter_.MoveNext();
+    }  // for(;;)
+    PERFETTO_DCHECK(read_iter_.cur == it.cur);
+
+    if (PERFETTO_UNLIKELY(packet_corruption)) {
+      // ReadNextPacketInChunk() already records an abi violation for this case.
+      *packet = TracePacket();  // clear.
+      return ReadAheadResult::kFailedStayOnSameSequence;
+    }
+
+    return ReadAheadResult::kSucceededReturnSlices;
+  }  // for(it...)  [readahead loop]
+  return ReadAheadResult::kFailedMoveToNextSequence;
+}
+
+TraceBuffer::ReadPacketResult TraceBuffer::ReadNextPacketInChunk(
+    ChunkMeta* chunk_meta,
+    TracePacket* packet) {
+  PERFETTO_DCHECK(chunk_meta->num_fragments_read < chunk_meta->num_fragments);
+  PERFETTO_DCHECK(!(chunk_meta->flags & kChunkNeedsPatching));
+
+  const uint8_t* record_begin =
+      reinterpret_cast<const uint8_t*>(chunk_meta->chunk_record);
+  const uint8_t* record_end = record_begin + chunk_meta->chunk_record->size;
+  const uint8_t* packets_begin = record_begin + sizeof(ChunkRecord);
+  const uint8_t* packet_begin = packets_begin + chunk_meta->cur_fragment_offset;
+
+  if (PERFETTO_UNLIKELY(packet_begin < packets_begin ||
+                        packet_begin >= record_end)) {
+    // The producer has a bug or is malicious and did declare that the chunk
+    // contains more packets beyond its boundaries.
+    stats_.set_abi_violations(stats_.abi_violations() + 1);
+    PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
+    chunk_meta->cur_fragment_offset = 0;
+    chunk_meta->num_fragments_read = chunk_meta->num_fragments;
+    if (PERFETTO_LIKELY(chunk_meta->is_complete())) {
+      stats_.set_chunks_read(stats_.chunks_read() + 1);
+      stats_.set_bytes_read(stats_.bytes_read() +
+                            chunk_meta->chunk_record->size);
+    }
+    return ReadPacketResult::kFailedInvalidPacket;
+  }
+
+  // A packet (or a fragment) starts with a varint stating its size, followed
+  // by its content. The varint shouldn't be larger than 4 bytes (just in case
+  // the producer is using a redundant encoding)
+  uint64_t packet_size = 0;
+  const uint8_t* header_end =
+      std::min(packet_begin + protozero::proto_utils::kMessageLengthFieldSize,
+               record_end);
+  const uint8_t* packet_data = protozero::proto_utils::ParseVarInt(
+      packet_begin, header_end, &packet_size);
+
+  const uint8_t* next_packet = packet_data + packet_size;
+  if (PERFETTO_UNLIKELY(next_packet <= packet_begin ||
+                        next_packet > record_end)) {
+    // In BufferExhaustedPolicy::kDrop mode, TraceWriter may abort a fragmented
+    // packet by writing an invalid size in the last fragment's header. We
+    // should handle this case without recording an ABI violation (since Android
+    // R).
+    if (packet_size != SharedMemoryABI::kPacketSizeDropPacket) {
+      stats_.set_abi_violations(stats_.abi_violations() + 1);
+      PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
+    } else {
+      stats_.set_trace_writer_packet_loss(stats_.trace_writer_packet_loss() +
+                                          1);
+    }
+    chunk_meta->cur_fragment_offset = 0;
+    chunk_meta->num_fragments_read = chunk_meta->num_fragments;
+    if (PERFETTO_LIKELY(chunk_meta->is_complete())) {
+      stats_.set_chunks_read(stats_.chunks_read() + 1);
+      stats_.set_bytes_read(stats_.bytes_read() +
+                            chunk_meta->chunk_record->size);
+    }
+    return ReadPacketResult::kFailedInvalidPacket;
+  }
+
+  chunk_meta->cur_fragment_offset =
+      static_cast<uint16_t>(next_packet - packets_begin);
+  chunk_meta->num_fragments_read++;
+
+  if (PERFETTO_UNLIKELY(chunk_meta->num_fragments_read ==
+                            chunk_meta->num_fragments &&
+                        chunk_meta->is_complete())) {
+    stats_.set_chunks_read(stats_.chunks_read() + 1);
+    stats_.set_bytes_read(stats_.bytes_read() + chunk_meta->chunk_record->size);
+  } else {
+    // We have at least one more packet to parse. It should be within the chunk.
+    if (chunk_meta->cur_fragment_offset + sizeof(ChunkRecord) >=
+        chunk_meta->chunk_record->size) {
+      PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
+    }
+  }
+
+  chunk_meta->set_last_read_packet_skipped(false);
+
+  if (PERFETTO_UNLIKELY(packet_size == 0))
+    return ReadPacketResult::kFailedEmptyPacket;
+
+  if (PERFETTO_LIKELY(packet))
+    packet->AddSlice(packet_data, static_cast<size_t>(packet_size));
+
+  return ReadPacketResult::kSucceeded;
+}
+
+void TraceBuffer::DiscardWrite() {
+  PERFETTO_DCHECK(overwrite_policy_ == kDiscard);
+  discard_writes_ = true;
+  stats_.set_chunks_discarded(stats_.chunks_discarded() + 1);
+  TRACE_BUFFER_DLOG("  discarding write");
+}
+
+}  // namespace perfetto
+// gen_amalgamated begin source: src/tracing/core/tracing_service_impl.cc
+// gen_amalgamated begin header: src/tracing/core/tracing_service_impl.h
+// gen_amalgamated begin header: include/perfetto/ext/base/circular_queue.h
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_BASE_CIRCULAR_QUEUE_H_
+#define INCLUDE_PERFETTO_EXT_BASE_CIRCULAR_QUEUE_H_
+
+#include <stdint.h>
+#include <iterator>
+
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
+
+namespace perfetto {
+namespace base {
+
+// CircularQueue is a push-back-only / pop-front-only queue with the following
+// characteristics:
+// - The storage is based on a flat circular buffer. Beginning and end wrap
+//   as necessary, to keep pushes and pops O(1) as long as capacity expansion is
+//   not required.
+// - Capacity is automatically expanded like in a std::vector. Expansion has a
+//   O(N) cost.
+// - It allows random access, allowing in-place std::sort.
+// - Iterators are not stable. Mutating the container invalidates all iterators.
+// - It doesn't bother with const-correctness.
+//
+// Implementation details:
+// Internally, |begin|, |end| and iterators use 64-bit monotonic indexes, which
+// are incremented as if the queue was backed by unlimited storage.
+// Even assuming that elements are inserted and removed every nanosecond, 64 bit
+// is enough for 584 years.
+// Wrapping happens only when addressing elements in the underlying circular
+// storage. This limits the complexity and avoiding dealing with modular
+// arithmetic all over the places.
+template <class T>
+class CircularQueue {
+ public:
+  class Iterator {
+   public:
+    using difference_type = ptrdiff_t;
+    using value_type = T;
+    using pointer = T*;
+    using reference = T&;
+    using iterator_category = std::random_access_iterator_tag;
+
+    Iterator(CircularQueue* queue, uint64_t pos, uint32_t generation)
+        : queue_(queue),
+          pos_(pos)
+#if PERFETTO_DCHECK_IS_ON()
+          ,
+          generation_(generation)
+#endif
+    {
+      ignore_result(generation);
+    }
+
+    Iterator(const Iterator&) noexcept = default;
+    Iterator& operator=(const Iterator&) noexcept = default;
+    Iterator(Iterator&&) noexcept = default;
+    Iterator& operator=(Iterator&&) noexcept = default;
+
+    T* operator->() const {
+#if PERFETTO_DCHECK_IS_ON()
+      PERFETTO_DCHECK(generation_ == queue_->generation());
+#endif
+      return queue_->Get(pos_);
+    }
+
+    T& operator*() const { return *(operator->()); }
+
+    value_type& operator[](difference_type i) { return *(*this + i); }
+
+    Iterator& operator++() {
+      Add(1);
+      return *this;
+    }
+
+    Iterator operator++(int) {
+      Iterator ret = *this;
+      Add(1);
+      return ret;
+    }
+
+    Iterator& operator--() {
+      Add(-1);
+      return *this;
+    }
+
+    Iterator operator--(int) {
+      Iterator ret = *this;
+      Add(-1);
+      return ret;
+    }
+
+    friend Iterator operator+(const Iterator& iter, difference_type offset) {
+      Iterator ret = iter;
+      ret.Add(offset);
+      return ret;
+    }
+
+    Iterator& operator+=(difference_type offset) {
+      Add(offset);
+      return *this;
+    }
+
+    friend Iterator operator-(const Iterator& iter, difference_type offset) {
+      Iterator ret = iter;
+      ret.Add(-offset);
+      return ret;
+    }
+
+    Iterator& operator-=(difference_type offset) {
+      Add(-offset);
+      return *this;
+    }
+
+    friend ptrdiff_t operator-(const Iterator& lhs, const Iterator& rhs) {
+      return static_cast<ptrdiff_t>(lhs.pos_) -
+             static_cast<ptrdiff_t>(rhs.pos_);
+    }
+
+    friend bool operator==(const Iterator& lhs, const Iterator& rhs) {
+      return lhs.pos_ == rhs.pos_;
+    }
+
+    friend bool operator!=(const Iterator& lhs, const Iterator& rhs) {
+      return lhs.pos_ != rhs.pos_;
+    }
+
+    friend bool operator<(const Iterator& lhs, const Iterator& rhs) {
+      return lhs.pos_ < rhs.pos_;
+    }
+
+    friend bool operator<=(const Iterator& lhs, const Iterator& rhs) {
+      return lhs.pos_ <= rhs.pos_;
+    }
+
+    friend bool operator>(const Iterator& lhs, const Iterator& rhs) {
+      return lhs.pos_ > rhs.pos_;
+    }
+
+    friend bool operator>=(const Iterator& lhs, const Iterator& rhs) {
+      return lhs.pos_ >= rhs.pos_;
+    }
+
+   private:
+    inline void Add(difference_type offset) {
+      pos_ = static_cast<uint64_t>(static_cast<difference_type>(pos_) + offset);
+      PERFETTO_DCHECK(pos_ <= queue_->end_);
+    }
+
+    CircularQueue* queue_;
+    uint64_t pos_;
+
+#if PERFETTO_DCHECK_IS_ON()
+    uint32_t generation_;
+#endif
+  };
+
+  CircularQueue(size_t initial_capacity = 1024) { Grow(initial_capacity); }
+
+  CircularQueue(CircularQueue&& other) noexcept {
+    // Copy all fields using the (private) default copy assignment operator.
+    *this = other;
+    increment_generation();
+    new (&other) CircularQueue();  // Reset the old queue so it's still usable.
+  }
+
+  CircularQueue& operator=(CircularQueue&& other) {
+    this->~CircularQueue();                      // Destroy the current state.
+    new (this) CircularQueue(std::move(other));  // Use the move ctor above.
+    return *this;
+  }
+
+  ~CircularQueue() {
+    if (!entries_) {
+      PERFETTO_DCHECK(empty());
+      return;
+    }
+    clear();  // Invoke destructors on all alive entries.
+    PERFETTO_DCHECK(empty());
+    free(entries_);
+  }
+
+  template <typename... Args>
+  void emplace_back(Args&&... args) {
+    increment_generation();
+    if (PERFETTO_UNLIKELY(size() >= capacity_))
+      Grow();
+    T* slot = Get(end_++);
+    new (slot) T(std::forward<Args>(args)...);
+  }
+
+  void erase_front(size_t n) {
+    increment_generation();
+    for (; n && (begin_ < end_); --n) {
+      Get(begin_)->~T();
+      begin_++;  // This needs to be its own statement, Get() checks begin_.
+    }
+  }
+
+  void pop_front() { erase_front(1); }
+
+  void clear() { erase_front(size()); }
+
+  T& at(size_t idx) {
+    PERFETTO_DCHECK(idx < size());
+    return *Get(begin_ + idx);
+  }
+
+  Iterator begin() { return Iterator(this, begin_, generation()); }
+  Iterator end() { return Iterator(this, end_, generation()); }
+  T& front() { return *begin(); }
+  T& back() { return *(end() - 1); }
+
+  bool empty() const { return size() == 0; }
+
+  size_t size() const {
+    PERFETTO_DCHECK(end_ - begin_ <= capacity_);
+    return static_cast<size_t>(end_ - begin_);
+  }
+
+  size_t capacity() const { return capacity_; }
+
+#if PERFETTO_DCHECK_IS_ON()
+  uint32_t generation() const { return generation_; }
+  void increment_generation() { ++generation_; }
+#else
+  uint32_t generation() const { return 0; }
+  void increment_generation() {}
+#endif
+
+ private:
+  CircularQueue(const CircularQueue&) = delete;
+  CircularQueue& operator=(const CircularQueue&) = default;
+
+  void Grow(size_t new_capacity = 0) {
+    // Capacity must be always a power of two. This allows Get() to use a simple
+    // bitwise-AND for handling the wrapping instead of a full division.
+    new_capacity = new_capacity ? new_capacity : capacity_ * 2;
+    PERFETTO_CHECK((new_capacity & (new_capacity - 1)) == 0);  // Must be pow2.
+
+    // On 32-bit systems this might hit the 4GB wall and overflow. We can't do
+    // anything other than crash in this case.
+    PERFETTO_CHECK(new_capacity > capacity_);
+    size_t malloc_size = new_capacity * sizeof(T);
+    PERFETTO_CHECK(malloc_size > new_capacity);
+    auto* new_vec = static_cast<T*>(malloc(malloc_size));
+
+    // Move all elements in the expanded array.
+    size_t new_size = 0;
+    for (uint64_t i = begin_; i < end_; i++)
+      new (&new_vec[new_size++]) T(std::move(*Get(i)));  // Placement move ctor.
+
+    // Even if all the elements are std::move()-d and likely empty, we are still
+    // required to call the dtor for them.
+    for (uint64_t i = begin_; i < end_; i++)
+      Get(i)->~T();
+    free(entries_);  // It's fine to free(nullptr) (for the ctor call case).
+
+    begin_ = 0;
+    end_ = new_size;
+    capacity_ = new_capacity;
+    entries_ = new_vec;
+  }
+
+  inline T* Get(uint64_t pos) {
+    PERFETTO_DCHECK(pos >= begin_ && pos < end_);
+    PERFETTO_DCHECK((capacity_ & (capacity_ - 1)) == 0);  // Must be a pow2.
+    auto index = static_cast<size_t>(pos & (capacity_ - 1));
+    return &entries_[index];
+  }
+
+  // Underlying storage. It's raw malloc-ed rather than being a unique_ptr<T[]>
+  // to allow having uninitialized entries inside it.
+  T* entries_ = nullptr;
+  size_t capacity_ = 0;  // Number of allocated slots (NOT bytes) in |entries_|.
+
+  // The |begin_| and |end_| indexes are monotonic and never wrap. Modular arith
+  // is used only when dereferencing entries in the vector.
+  uint64_t begin_ = 0;
+  uint64_t end_ = 0;
+
+// Generation is used in debug builds only for checking iterator validity.
+#if PERFETTO_DCHECK_IS_ON()
+  uint32_t generation_ = 0;
+#endif
+};
+
+}  // namespace base
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_BASE_CIRCULAR_QUEUE_H_
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SRC_TRACING_CORE_TRACING_SERVICE_IMPL_H_
+#define SRC_TRACING_CORE_TRACING_SERVICE_IMPL_H_
+
+#include <algorithm>
+#include <functional>
+#include <map>
+#include <memory>
+#include <mutex>
+#include <random>
+#include <set>
+#include <utility>
+#include <vector>
+
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/base/status.h"
+// gen_amalgamated expanded: #include "perfetto/base/time.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/circular_queue.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/optional.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/periodic_task.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/weak_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/basic_types.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/commit_data_request.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/observable_events.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory_abi.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_stats.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/tracing_service.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/core/data_source_config.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/core/data_source_descriptor.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/core/forward_decls.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/core/trace_config.h"
+// gen_amalgamated expanded: #include "src/android_stats/perfetto_atoms.h"
+// gen_amalgamated expanded: #include "src/tracing/core/id_allocator.h"
+
+namespace protozero {
+class MessageFilter;
+}
+
+namespace perfetto {
+
+namespace base {
+class TaskRunner;
+}  // namespace base
+
+class Consumer;
+class Producer;
+class SharedMemory;
+class SharedMemoryArbiterImpl;
+class TraceBuffer;
+class TracePacket;
+
+// The tracing service business logic.
+class TracingServiceImpl : public TracingService {
+ private:
+  struct DataSourceInstance;
+
+ public:
+  static constexpr size_t kDefaultShmPageSize = 4096ul;
+  static constexpr size_t kDefaultShmSize = 256 * 1024ul;
+  static constexpr size_t kMaxShmSize = 32 * 1024 * 1024ul;
+  static constexpr uint32_t kDataSourceStopTimeoutMs = 5000;
+  static constexpr uint8_t kSyncMarker[] = {0x82, 0x47, 0x7a, 0x76, 0xb2, 0x8d,
+                                            0x42, 0xba, 0x81, 0xdc, 0x33, 0x32,
+                                            0x6d, 0x57, 0xa0, 0x79};
+
+  // The implementation behind the service endpoint exposed to each producer.
+  class ProducerEndpointImpl : public TracingService::ProducerEndpoint {
+   public:
+    ProducerEndpointImpl(ProducerID,
+                         uid_t uid,
+                         TracingServiceImpl*,
+                         base::TaskRunner*,
+                         Producer*,
+                         const std::string& producer_name,
+                         const std::string& sdk_version,
+                         bool in_process,
+                         bool smb_scraping_enabled);
+    ~ProducerEndpointImpl() override;
+
+    // TracingService::ProducerEndpoint implementation.
+    void RegisterDataSource(const DataSourceDescriptor&) override;
+    void UnregisterDataSource(const std::string& name) override;
+    void RegisterTraceWriter(uint32_t writer_id,
+                             uint32_t target_buffer) override;
+    void UnregisterTraceWriter(uint32_t writer_id) override;
+    void CommitData(const CommitDataRequest&, CommitDataCallback) override;
+    void SetupSharedMemory(std::unique_ptr<SharedMemory>,
+                           size_t page_size_bytes,
+                           bool provided_by_producer);
+    std::unique_ptr<TraceWriter> CreateTraceWriter(
+        BufferID,
+        BufferExhaustedPolicy) override;
+    SharedMemoryArbiter* MaybeSharedMemoryArbiter() override;
+    bool IsShmemProvidedByProducer() const override;
+    void NotifyFlushComplete(FlushRequestID) override;
+    void NotifyDataSourceStarted(DataSourceInstanceID) override;
+    void NotifyDataSourceStopped(DataSourceInstanceID) override;
+    SharedMemory* shared_memory() const override;
+    size_t shared_buffer_page_size_kb() const override;
+    void ActivateTriggers(const std::vector<std::string>&) override;
+    void Sync(std::function<void()> callback) override;
+
+    void OnTracingSetup();
+    void SetupDataSource(DataSourceInstanceID, const DataSourceConfig&);
+    void StartDataSource(DataSourceInstanceID, const DataSourceConfig&);
+    void StopDataSource(DataSourceInstanceID);
+    void Flush(FlushRequestID, const std::vector<DataSourceInstanceID>&);
+    void OnFreeBuffers(const std::vector<BufferID>& target_buffers);
+    void ClearIncrementalState(const std::vector<DataSourceInstanceID>&);
+
+    bool is_allowed_target_buffer(BufferID buffer_id) const {
+      return allowed_target_buffers_.count(buffer_id);
+    }
+
+    base::Optional<BufferID> buffer_id_for_writer(WriterID writer_id) const {
+      const auto it = writers_.find(writer_id);
+      if (it != writers_.end())
+        return it->second;
+      return base::nullopt;
+    }
+
+    uid_t uid() const { return uid_; }
+
+   private:
+    friend class TracingServiceImpl;
+    friend class TracingServiceImplTest;
+    friend class TracingIntegrationTest;
+    ProducerEndpointImpl(const ProducerEndpointImpl&) = delete;
+    ProducerEndpointImpl& operator=(const ProducerEndpointImpl&) = delete;
+
+    ProducerID const id_;
+    const uid_t uid_;
+    TracingServiceImpl* const service_;
+    base::TaskRunner* const task_runner_;
+    Producer* producer_;
+    std::unique_ptr<SharedMemory> shared_memory_;
+    size_t shared_buffer_page_size_kb_ = 0;
+    SharedMemoryABI shmem_abi_;
+    size_t shmem_size_hint_bytes_ = 0;
+    size_t shmem_page_size_hint_bytes_ = 0;
+    bool is_shmem_provided_by_producer_ = false;
+    const std::string name_;
+    std::string sdk_version_;
+    bool in_process_;
+    bool smb_scraping_enabled_;
+
+    // Set of the global target_buffer IDs that the producer is configured to
+    // write into in any active tracing session.
+    std::set<BufferID> allowed_target_buffers_;
+
+    // Maps registered TraceWriter IDs to their target buffers as registered by
+    // the producer. Note that producers aren't required to register their
+    // writers, so we may see commits of chunks with WriterIDs that aren't
+    // contained in this map. However, if a producer does register a writer, the
+    // service will prevent the writer from writing into any other buffer than
+    // the one associated with it here. The BufferIDs stored in this map are
+    // untrusted, so need to be verified against |allowed_target_buffers_|
+    // before use.
+    std::map<WriterID, BufferID> writers_;
+
+    // This is used only in in-process configurations.
+    // SharedMemoryArbiterImpl methods themselves are thread-safe.
+    std::unique_ptr<SharedMemoryArbiterImpl> inproc_shmem_arbiter_;
+
+    PERFETTO_THREAD_CHECKER(thread_checker_)
+    base::WeakPtrFactory<ProducerEndpointImpl> weak_ptr_factory_;  // Keep last.
+  };
+
+  // The implementation behind the service endpoint exposed to each consumer.
+  class ConsumerEndpointImpl : public TracingService::ConsumerEndpoint {
+   public:
+    ConsumerEndpointImpl(TracingServiceImpl*,
+                         base::TaskRunner*,
+                         Consumer*,
+                         uid_t uid);
+    ~ConsumerEndpointImpl() override;
+
+    void NotifyOnTracingDisabled(const std::string& error);
+
+    // TracingService::ConsumerEndpoint implementation.
+    void EnableTracing(const TraceConfig&, base::ScopedFile) override;
+    void ChangeTraceConfig(const TraceConfig& cfg) override;
+    void StartTracing() override;
+    void DisableTracing() override;
+    void ReadBuffers() override;
+    void FreeBuffers() override;
+    void Flush(uint32_t timeout_ms, FlushCallback) override;
+    void Detach(const std::string& key) override;
+    void Attach(const std::string& key) override;
+    void GetTraceStats() override;
+    void ObserveEvents(uint32_t enabled_event_types) override;
+    void QueryServiceState(QueryServiceStateCallback) override;
+    void QueryCapabilities(QueryCapabilitiesCallback) override;
+    void SaveTraceForBugreport(SaveTraceForBugreportCallback) override;
+
+    // Will queue a task to notify the consumer about the state change.
+    void OnDataSourceInstanceStateChange(const ProducerEndpointImpl&,
+                                         const DataSourceInstance&);
+    void OnAllDataSourcesStarted();
+
+   private:
+    friend class TracingServiceImpl;
+    ConsumerEndpointImpl(const ConsumerEndpointImpl&) = delete;
+    ConsumerEndpointImpl& operator=(const ConsumerEndpointImpl&) = delete;
+
+    // Returns a pointer to an ObservableEvents object that the caller can fill
+    // and schedules a task to send the ObservableEvents to the consumer.
+    ObservableEvents* AddObservableEvents();
+
+    base::TaskRunner* const task_runner_;
+    TracingServiceImpl* const service_;
+    Consumer* const consumer_;
+    uid_t const uid_;
+    TracingSessionID tracing_session_id_ = 0;
+
+    // Whether the consumer is interested in DataSourceInstance state change
+    // events.
+    uint32_t observable_events_mask_ = 0;
+
+    // ObservableEvents that will be sent to the consumer. If set, a task to
+    // flush the events to the consumer has been queued.
+    std::unique_ptr<ObservableEvents> observable_events_;
+
+    PERFETTO_THREAD_CHECKER(thread_checker_)
+    base::WeakPtrFactory<ConsumerEndpointImpl> weak_ptr_factory_;  // Keep last.
+  };
+
+  explicit TracingServiceImpl(std::unique_ptr<SharedMemory::Factory>,
+                              base::TaskRunner*);
+  ~TracingServiceImpl() override;
+
+  // Called by ProducerEndpointImpl.
+  void DisconnectProducer(ProducerID);
+  void RegisterDataSource(ProducerID, const DataSourceDescriptor&);
+  void UnregisterDataSource(ProducerID, const std::string& name);
+  void CopyProducerPageIntoLogBuffer(ProducerID,
+                                     uid_t,
+                                     WriterID,
+                                     ChunkID,
+                                     BufferID,
+                                     uint16_t num_fragments,
+                                     uint8_t chunk_flags,
+                                     bool chunk_complete,
+                                     const uint8_t* src,
+                                     size_t size);
+  void ApplyChunkPatches(ProducerID,
+                         const std::vector<CommitDataRequest::ChunkToPatch>&);
+  void NotifyFlushDoneForProducer(ProducerID, FlushRequestID);
+  void NotifyDataSourceStarted(ProducerID, const DataSourceInstanceID);
+  void NotifyDataSourceStopped(ProducerID, const DataSourceInstanceID);
+  void ActivateTriggers(ProducerID, const std::vector<std::string>& triggers);
+
+  // Called by ConsumerEndpointImpl.
+  bool DetachConsumer(ConsumerEndpointImpl*, const std::string& key);
+  bool AttachConsumer(ConsumerEndpointImpl*, const std::string& key);
+  void DisconnectConsumer(ConsumerEndpointImpl*);
+  base::Status EnableTracing(ConsumerEndpointImpl*,
+                             const TraceConfig&,
+                             base::ScopedFile);
+  void ChangeTraceConfig(ConsumerEndpointImpl*, const TraceConfig&);
+
+  base::Status StartTracing(TracingSessionID);
+  void DisableTracing(TracingSessionID, bool disable_immediately = false);
+  void Flush(TracingSessionID tsid,
+             uint32_t timeout_ms,
+             ConsumerEndpoint::FlushCallback);
+  void FlushAndDisableTracing(TracingSessionID);
+  bool ReadBuffers(TracingSessionID, ConsumerEndpointImpl*);
+  void FreeBuffers(TracingSessionID);
+
+  // Service implementation.
+  std::unique_ptr<TracingService::ProducerEndpoint> ConnectProducer(
+      Producer*,
+      uid_t uid,
+      const std::string& producer_name,
+      size_t shared_memory_size_hint_bytes = 0,
+      bool in_process = false,
+      ProducerSMBScrapingMode smb_scraping_mode =
+          ProducerSMBScrapingMode::kDefault,
+      size_t shared_memory_page_size_hint_bytes = 0,
+      std::unique_ptr<SharedMemory> shm = nullptr,
+      const std::string& sdk_version = {}) override;
+
+  std::unique_ptr<TracingService::ConsumerEndpoint> ConnectConsumer(
+      Consumer*,
+      uid_t) override;
+
+  // Set whether SMB scraping should be enabled by default or not. Producers can
+  // override this setting for their own SMBs.
+  void SetSMBScrapingEnabled(bool enabled) override {
+    smb_scraping_enabled_ = enabled;
+  }
+
+  // Exposed mainly for testing.
+  size_t num_producers() const { return producers_.size(); }
+  ProducerEndpointImpl* GetProducer(ProducerID) const;
+
+ private:
+  friend class TracingServiceImplTest;
+  friend class TracingIntegrationTest;
+
+  static constexpr int64_t kOneDayInNs = 24ll * 60 * 60 * 1000 * 1000 * 1000;
+
+  struct TriggerHistory {
+    int64_t timestamp_ns;
+    uint64_t name_hash;
+
+    bool operator<(const TriggerHistory& other) const {
+      return timestamp_ns < other.timestamp_ns;
+    }
+  };
+
+  struct RegisteredDataSource {
+    ProducerID producer_id;
+    DataSourceDescriptor descriptor;
+  };
+
+  // Represents an active data source for a tracing session.
+  struct DataSourceInstance {
+    DataSourceInstance(DataSourceInstanceID id,
+                       const DataSourceConfig& cfg,
+                       const std::string& ds_name,
+                       bool notify_on_start,
+                       bool notify_on_stop,
+                       bool handles_incremental_state_invalidation)
+        : instance_id(id),
+          config(cfg),
+          data_source_name(ds_name),
+          will_notify_on_start(notify_on_start),
+          will_notify_on_stop(notify_on_stop),
+          handles_incremental_state_clear(
+              handles_incremental_state_invalidation) {}
+    DataSourceInstance(const DataSourceInstance&) = delete;
+    DataSourceInstance& operator=(const DataSourceInstance&) = delete;
+
+    DataSourceInstanceID instance_id;
+    DataSourceConfig config;
+    std::string data_source_name;
+    bool will_notify_on_start;
+    bool will_notify_on_stop;
+    bool handles_incremental_state_clear;
+
+    enum DataSourceInstanceState {
+      CONFIGURED,
+      STARTING,
+      STARTED,
+      STOPPING,
+      STOPPED
+    };
+    DataSourceInstanceState state = CONFIGURED;
+  };
+
+  struct PendingFlush {
+    std::set<ProducerID> producers;
+    ConsumerEndpoint::FlushCallback callback;
+    explicit PendingFlush(decltype(callback) cb) : callback(std::move(cb)) {}
+  };
+
+  // Holds the state of a tracing session. A tracing session is uniquely bound
+  // a specific Consumer. Each Consumer can own one or more sessions.
+  struct TracingSession {
+    enum State {
+      DISABLED = 0,
+      CONFIGURED,
+      STARTED,
+      DISABLING_WAITING_STOP_ACKS
+    };
+
+    TracingSession(TracingSessionID,
+                   ConsumerEndpointImpl*,
+                   const TraceConfig&,
+                   base::TaskRunner*);
+    TracingSession(TracingSession&&) = delete;
+    TracingSession& operator=(TracingSession&&) = delete;
+
+    size_t num_buffers() const { return buffers_index.size(); }
+
+    uint32_t delay_to_next_write_period_ms() const {
+      PERFETTO_DCHECK(write_period_ms > 0);
+      return write_period_ms -
+             static_cast<uint32_t>(base::GetWallTimeMs().count() %
+                                   write_period_ms);
+    }
+
+    uint32_t flush_timeout_ms() {
+      uint32_t timeout_ms = config.flush_timeout_ms();
+      return timeout_ms ? timeout_ms : kDefaultFlushTimeoutMs;
+    }
+
+    uint32_t data_source_stop_timeout_ms() {
+      uint32_t timeout_ms = config.data_source_stop_timeout_ms();
+      return timeout_ms ? timeout_ms : kDataSourceStopTimeoutMs;
+    }
+
+    PacketSequenceID GetPacketSequenceID(ProducerID producer_id,
+                                         WriterID writer_id) {
+      auto key = std::make_pair(producer_id, writer_id);
+      auto it = packet_sequence_ids.find(key);
+      if (it != packet_sequence_ids.end())
+        return it->second;
+      // We shouldn't run out of sequence IDs (producer ID is 16 bit, writer IDs
+      // are limited to 1024).
+      static_assert(kMaxPacketSequenceID > kMaxProducerID * kMaxWriterID,
+                    "PacketSequenceID value space doesn't cover service "
+                    "sequence ID and all producer/writer ID combinations!");
+      PERFETTO_DCHECK(last_packet_sequence_id < kMaxPacketSequenceID);
+      PacketSequenceID sequence_id = ++last_packet_sequence_id;
+      packet_sequence_ids[key] = sequence_id;
+      return sequence_id;
+    }
+
+    DataSourceInstance* GetDataSourceInstance(
+        ProducerID producer_id,
+        DataSourceInstanceID instance_id) {
+      for (auto& inst_kv : data_source_instances) {
+        if (inst_kv.first != producer_id ||
+            inst_kv.second.instance_id != instance_id) {
+          continue;
+        }
+        return &inst_kv.second;
+      }
+      return nullptr;
+    }
+
+    bool AllDataSourceInstancesStarted() {
+      return std::all_of(
+          data_source_instances.begin(), data_source_instances.end(),
+          [](decltype(data_source_instances)::const_reference x) {
+            return x.second.state == DataSourceInstance::STARTED;
+          });
+    }
+
+    bool AllDataSourceInstancesStopped() {
+      return std::all_of(
+          data_source_instances.begin(), data_source_instances.end(),
+          [](decltype(data_source_instances)::const_reference x) {
+            return x.second.state == DataSourceInstance::STOPPED;
+          });
+    }
+
+    const TracingSessionID id;
+
+    // The consumer that started the session.
+    // Can be nullptr if the consumer detached from the session.
+    ConsumerEndpointImpl* consumer_maybe_null;
+
+    // Unix uid of the consumer. This is valid even after the consumer detaches
+    // and does not change for the entire duration of the session. It is used to
+    // prevent that a consumer re-attaches to a session from a different uid.
+    uid_t const consumer_uid;
+
+    // The list of triggers this session received while alive and the time they
+    // were received at. This is used to insert 'fake' packets back to the
+    // consumer so they can tell when some event happened. The order matches the
+    // order they were received.
+    struct TriggerInfo {
+      uint64_t boot_time_ns;
+      std::string trigger_name;
+      std::string producer_name;
+      uid_t producer_uid;
+    };
+    std::vector<TriggerInfo> received_triggers;
+
+    // The trace config provided by the Consumer when calling
+    // EnableTracing(), plus any updates performed by ChangeTraceConfig.
+    TraceConfig config;
+
+    // List of data source instances that have been enabled on the various
+    // producers for this tracing session.
+    // TODO(rsavitski): at the time of writing, the map structure is unused
+    // (even when the calling code has a key). This is also an opportunity to
+    // consider an alternative data type, e.g. a map of vectors.
+    std::multimap<ProducerID, DataSourceInstance> data_source_instances;
+
+    // For each Flush(N) request, keeps track of the set of producers for which
+    // we are still awaiting a NotifyFlushComplete(N) ack.
+    std::map<FlushRequestID, PendingFlush> pending_flushes;
+
+    // Maps a per-trace-session buffer index into the corresponding global
+    // BufferID (shared namespace amongst all consumers). This vector has as
+    // many entries as |config.buffers_size()|.
+    std::vector<BufferID> buffers_index;
+
+    std::map<std::pair<ProducerID, WriterID>, PacketSequenceID>
+        packet_sequence_ids;
+    PacketSequenceID last_packet_sequence_id = kServicePacketSequenceID;
+
+    // Whether we should emit the trace stats next time we reach EOF while
+    // performing ReadBuffers.
+    bool should_emit_stats = false;
+
+    // Whether we should emit the sync marker the next time ReadBuffers() is
+    // called.
+    bool should_emit_sync_marker = false;
+
+    // Whether we mirrored the trace config back to the trace output yet.
+    bool did_emit_config = false;
+
+    // Whether we put the system info into the trace output yet.
+    bool did_emit_system_info = false;
+
+    // The number of received triggers we've emitted into the trace output.
+    size_t num_triggers_emitted_into_trace = 0;
+
+    // Packets that failed validation of the TrustedPacket.
+    uint64_t invalid_packets = 0;
+
+    // Set to true on the first call to MaybeNotifyAllDataSourcesStarted().
+    bool did_notify_all_data_source_started = false;
+
+    // Stores all lifecycle events of a particular type (i.e. associated with a
+    // single field id in the TracingServiceEvent proto).
+    struct LifecycleEvent {
+      LifecycleEvent(uint32_t f_id, uint32_t m_size = 1)
+          : field_id(f_id), max_size(m_size), timestamps(m_size) {}
+
+      // The field id of the event in the TracingServiceEvent proto.
+      uint32_t field_id;
+
+      // Stores the max size of |timestamps|. Set to 1 by default (in
+      // the constructor) but can be overriden in TraceSession constructor
+      // if a larger size is required.
+      uint32_t max_size;
+
+      // Stores the timestamps emitted for each event type (in nanoseconds).
+      // Emitted into the trace and cleared when the consumer next calls
+      // ReadBuffers.
+      base::CircularQueue<int64_t> timestamps;
+    };
+    std::vector<LifecycleEvent> lifecycle_events;
+
+    using ClockSnapshotData =
+        std::vector<std::pair<uint32_t /*clock_id*/, uint64_t /*ts*/>>;
+
+    // Initial clock snapshot, captured at trace start time (when state goes to
+    // TracingSession::STARTED). Emitted into the trace when the consumer first
+    // calls ReadBuffers().
+    ClockSnapshotData initial_clock_snapshot;
+
+    // Stores clock snapshots to emit into the trace as a ring buffer. This
+    // buffer is populated both periodically and when lifecycle events happen
+    // but only when significant clock drift is detected. Emitted into the trace
+    // and cleared when the consumer next calls ReadBuffers().
+    base::CircularQueue<ClockSnapshotData> clock_snapshot_ring_buffer;
+
+    State state = DISABLED;
+
+    // If the consumer detached the session, this variable defines the key used
+    // for identifying the session later when reattaching.
+    std::string detach_key;
+
+    // This is set when the Consumer calls sets |write_into_file| == true in the
+    // TraceConfig. In this case this represents the file we should stream the
+    // trace packets into, rather than returning it to the consumer via
+    // OnTraceData().
+    base::ScopedFile write_into_file;
+    uint32_t write_period_ms = 0;
+    uint64_t max_file_size_bytes = 0;
+    uint64_t bytes_written_into_file = 0;
+
+    // Set when using SaveTraceForBugreport(). This callback will be called
+    // when the tracing session ends and the data has been saved into the file.
+    std::function<void()> on_disable_callback_for_bugreport;
+    bool seized_for_bugreport = false;
+
+    // Periodic task for snapshotting service events (e.g. clocks, sync markers
+    // etc)
+    base::PeriodicTask snapshot_periodic_task;
+
+    // When non-NULL the packets should be post-processed using the filter.
+    std::unique_ptr<protozero::MessageFilter> trace_filter;
+    uint64_t filter_input_packets = 0;
+    uint64_t filter_input_bytes = 0;
+    uint64_t filter_output_bytes = 0;
+    uint64_t filter_errors = 0;
+  };
+
+  TracingServiceImpl(const TracingServiceImpl&) = delete;
+  TracingServiceImpl& operator=(const TracingServiceImpl&) = delete;
+
+  DataSourceInstance* SetupDataSource(const TraceConfig::DataSource&,
+                                      const TraceConfig::ProducerConfig&,
+                                      const RegisteredDataSource&,
+                                      TracingSession*);
+
+  // Returns the next available ProducerID that is not in |producers_|.
+  ProducerID GetNextProducerID();
+
+  // Returns a pointer to the |tracing_sessions_| entry or nullptr if the
+  // session doesn't exists.
+  TracingSession* GetTracingSession(TracingSessionID);
+
+  // Returns a pointer to the |tracing_sessions_| entry, matching the given
+  // uid and detach key, or nullptr if no such session exists.
+  TracingSession* GetDetachedSession(uid_t, const std::string& key);
+
+  // Update the memory guard rail by using the latest information from the
+  // shared memory and trace buffers.
+  void UpdateMemoryGuardrail();
+
+  void StartDataSourceInstance(ProducerEndpointImpl*,
+                               TracingSession*,
+                               DataSourceInstance*);
+  void StopDataSourceInstance(ProducerEndpointImpl*,
+                              TracingSession*,
+                              DataSourceInstance*,
+                              bool disable_immediately);
+  void PeriodicSnapshotTask(TracingSessionID);
+  void MaybeSnapshotClocksIntoRingBuffer(TracingSession*);
+  bool SnapshotClocks(TracingSession::ClockSnapshotData*);
+  void SnapshotLifecyleEvent(TracingSession*,
+                             uint32_t field_id,
+                             bool snapshot_clocks);
+  void EmitClockSnapshot(TracingSession*,
+                         TracingSession::ClockSnapshotData,
+                         std::vector<TracePacket>*);
+  void EmitSyncMarker(std::vector<TracePacket>*);
+  void EmitStats(TracingSession*, std::vector<TracePacket>*);
+  TraceStats GetTraceStats(TracingSession*);
+  void EmitLifecycleEvents(TracingSession*, std::vector<TracePacket>*);
+  void EmitSeizedForBugreportLifecycleEvent(std::vector<TracePacket>*);
+  void MaybeEmitTraceConfig(TracingSession*, std::vector<TracePacket>*);
+  void MaybeEmitSystemInfo(TracingSession*, std::vector<TracePacket>*);
+  void MaybeEmitReceivedTriggers(TracingSession*, std::vector<TracePacket>*);
+  void MaybeNotifyAllDataSourcesStarted(TracingSession*);
+  bool MaybeSaveTraceForBugreport(std::function<void()> callback);
+  void OnFlushTimeout(TracingSessionID, FlushRequestID);
+  void OnDisableTracingTimeout(TracingSessionID);
+  void DisableTracingNotifyConsumerAndFlushFile(TracingSession*);
+  void PeriodicFlushTask(TracingSessionID, bool post_next_only);
+  void CompleteFlush(TracingSessionID tsid,
+                     ConsumerEndpoint::FlushCallback callback,
+                     bool success);
+  void ScrapeSharedMemoryBuffers(TracingSession*, ProducerEndpointImpl*);
+  void PeriodicClearIncrementalStateTask(TracingSessionID, bool post_next_only);
+  TraceBuffer* GetBufferByID(BufferID);
+  void OnStartTriggersTimeout(TracingSessionID tsid);
+  void MaybeLogUploadEvent(const TraceConfig&,
+                           PerfettoStatsdAtom atom,
+                           const std::string& trigger_name = "");
+  void MaybeLogTriggerEvent(const TraceConfig&,
+                            PerfettoTriggerAtom atom,
+                            const std::string& trigger_name);
+  size_t PurgeExpiredAndCountTriggerInWindow(int64_t now_ns,
+                                             uint64_t trigger_name_hash);
+
+  base::TaskRunner* const task_runner_;
+  std::unique_ptr<SharedMemory::Factory> shm_factory_;
+  ProducerID last_producer_id_ = 0;
+  DataSourceInstanceID last_data_source_instance_id_ = 0;
+  TracingSessionID last_tracing_session_id_ = 0;
+  FlushRequestID last_flush_request_id_ = 0;
+  uid_t uid_ = 0;
+
+  // Buffer IDs are global across all consumers (because a Producer can produce
+  // data for more than one trace session, hence more than one consumer).
+  IdAllocator<BufferID> buffer_ids_;
+
+  std::multimap<std::string /*name*/, RegisteredDataSource> data_sources_;
+  std::map<ProducerID, ProducerEndpointImpl*> producers_;
+  std::set<ConsumerEndpointImpl*> consumers_;
+  std::map<TracingSessionID, TracingSession> tracing_sessions_;
+  std::map<BufferID, std::unique_ptr<TraceBuffer>> buffers_;
+  std::map<std::string, int64_t> session_to_last_trace_s_;
+
+  // Contains timestamps of triggers.
+  // The queue is sorted by timestamp and invocations older than
+  // |trigger_window_ns_| are purged when a trigger happens.
+  base::CircularQueue<TriggerHistory> trigger_history_;
+
+  bool smb_scraping_enabled_ = false;
+  bool lockdown_mode_ = false;
+  uint32_t min_write_period_ms_ = 100;       // Overridable for testing.
+  int64_t trigger_window_ns_ = kOneDayInNs;  // Overridable for testing.
+
+  std::minstd_rand trigger_probability_rand_;
+  std::uniform_real_distribution<> trigger_probability_dist_;
+  double trigger_rnd_override_for_testing_ = 0;  // Overridable for testing.
+
+  uint8_t sync_marker_packet_[32];  // Lazily initialized.
+  size_t sync_marker_packet_size_ = 0;
+
+  // Stats.
+  uint64_t chunks_discarded_ = 0;
+  uint64_t patches_discarded_ = 0;
+
+  PERFETTO_THREAD_CHECKER(thread_checker_)
+
+  base::WeakPtrFactory<TracingServiceImpl>
+      weak_ptr_factory_;  // Keep at the end.
+};
+
+}  // namespace perfetto
+
+#endif  // SRC_TRACING_CORE_TRACING_SERVICE_IMPL_H_
+// gen_amalgamated begin header: include/perfetto/tracing/core/tracing_service_capabilities.h
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_TRACING_CORE_TRACING_SERVICE_CAPABILITIES_H_
+#define INCLUDE_PERFETTO_TRACING_CORE_TRACING_SERVICE_CAPABILITIES_H_
+
+// Creates the aliases in the ::perfetto namespace, doing things like:
+// using ::perfetto::Foo = ::perfetto::protos::gen::Foo.
+// See comments in forward_decls.h for the historical reasons of this
+// indirection layer.
+// gen_amalgamated expanded: #include "perfetto/tracing/core/forward_decls.h"
+
+// gen_amalgamated expanded: #include "protos/perfetto/common/tracing_service_capabilities.gen.h"
+
+#endif  // INCLUDE_PERFETTO_TRACING_CORE_TRACING_SERVICE_CAPABILITIES_H_
+// gen_amalgamated begin header: gen/protos/perfetto/common/trace_stats.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_TRACE_STATS_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_TRACE_STATS_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class TraceStats_BufferStats;
+class TraceStats_FilterStats;
+
+class TraceStats_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/11, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  TraceStats_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TraceStats_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TraceStats_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_buffer_stats() const { return at<1>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> buffer_stats() const { return GetRepeated<::protozero::ConstBytes>(1); }
+  bool has_producers_connected() const { return at<2>().valid(); }
+  uint32_t producers_connected() const { return at<2>().as_uint32(); }
+  bool has_producers_seen() const { return at<3>().valid(); }
+  uint64_t producers_seen() const { return at<3>().as_uint64(); }
+  bool has_data_sources_registered() const { return at<4>().valid(); }
+  uint32_t data_sources_registered() const { return at<4>().as_uint32(); }
+  bool has_data_sources_seen() const { return at<5>().valid(); }
+  uint64_t data_sources_seen() const { return at<5>().as_uint64(); }
+  bool has_tracing_sessions() const { return at<6>().valid(); }
+  uint32_t tracing_sessions() const { return at<6>().as_uint32(); }
+  bool has_total_buffers() const { return at<7>().valid(); }
+  uint32_t total_buffers() const { return at<7>().as_uint32(); }
+  bool has_chunks_discarded() const { return at<8>().valid(); }
+  uint64_t chunks_discarded() const { return at<8>().as_uint64(); }
+  bool has_patches_discarded() const { return at<9>().valid(); }
+  uint64_t patches_discarded() const { return at<9>().as_uint64(); }
+  bool has_invalid_packets() const { return at<10>().valid(); }
+  uint64_t invalid_packets() const { return at<10>().as_uint64(); }
+  bool has_filter_stats() const { return at<11>().valid(); }
+  ::protozero::ConstBytes filter_stats() const { return at<11>().as_bytes(); }
+};
+
+class TraceStats : public ::protozero::Message {
+ public:
+  using Decoder = TraceStats_Decoder;
+  enum : int32_t {
+    kBufferStatsFieldNumber = 1,
+    kProducersConnectedFieldNumber = 2,
+    kProducersSeenFieldNumber = 3,
+    kDataSourcesRegisteredFieldNumber = 4,
+    kDataSourcesSeenFieldNumber = 5,
+    kTracingSessionsFieldNumber = 6,
+    kTotalBuffersFieldNumber = 7,
+    kChunksDiscardedFieldNumber = 8,
+    kPatchesDiscardedFieldNumber = 9,
+    kInvalidPacketsFieldNumber = 10,
+    kFilterStatsFieldNumber = 11,
+  };
+  using BufferStats = ::perfetto::protos::pbzero::TraceStats_BufferStats;
+  using FilterStats = ::perfetto::protos::pbzero::TraceStats_FilterStats;
+
+  using FieldMetadata_BufferStats =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TraceStats_BufferStats,
+      TraceStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BufferStats kBufferStats() { return {}; }
+  template <typename T = TraceStats_BufferStats> T* add_buffer_stats() {
+    return BeginNestedMessage<T>(1);
+  }
+
+
+  using FieldMetadata_ProducersConnected =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      TraceStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ProducersConnected kProducersConnected() { return {}; }
+  void set_producers_connected(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ProducersConnected::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ProducersSeen =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ProducersSeen kProducersSeen() { return {}; }
+  void set_producers_seen(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ProducersSeen::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DataSourcesRegistered =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      TraceStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DataSourcesRegistered kDataSourcesRegistered() { return {}; }
+  void set_data_sources_registered(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DataSourcesRegistered::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DataSourcesSeen =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DataSourcesSeen kDataSourcesSeen() { return {}; }
+  void set_data_sources_seen(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DataSourcesSeen::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TracingSessions =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      TraceStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TracingSessions kTracingSessions() { return {}; }
+  void set_tracing_sessions(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TracingSessions::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TotalBuffers =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      TraceStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TotalBuffers kTotalBuffers() { return {}; }
+  void set_total_buffers(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TotalBuffers::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ChunksDiscarded =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChunksDiscarded kChunksDiscarded() { return {}; }
+  void set_chunks_discarded(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ChunksDiscarded::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PatchesDiscarded =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PatchesDiscarded kPatchesDiscarded() { return {}; }
+  void set_patches_discarded(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PatchesDiscarded::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_InvalidPackets =
+    ::protozero::proto_utils::FieldMetadata<
+      10,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_InvalidPackets kInvalidPackets() { return {}; }
+  void set_invalid_packets(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_InvalidPackets::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FilterStats =
+    ::protozero::proto_utils::FieldMetadata<
+      11,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TraceStats_FilterStats,
+      TraceStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FilterStats kFilterStats() { return {}; }
+  template <typename T = TraceStats_FilterStats> T* set_filter_stats() {
+    return BeginNestedMessage<T>(11);
+  }
+
+};
+
+class TraceStats_FilterStats_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  TraceStats_FilterStats_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TraceStats_FilterStats_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TraceStats_FilterStats_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_input_packets() const { return at<1>().valid(); }
+  uint64_t input_packets() const { return at<1>().as_uint64(); }
+  bool has_input_bytes() const { return at<2>().valid(); }
+  uint64_t input_bytes() const { return at<2>().as_uint64(); }
+  bool has_output_bytes() const { return at<3>().valid(); }
+  uint64_t output_bytes() const { return at<3>().as_uint64(); }
+  bool has_errors() const { return at<4>().valid(); }
+  uint64_t errors() const { return at<4>().as_uint64(); }
+};
+
+class TraceStats_FilterStats : public ::protozero::Message {
+ public:
+  using Decoder = TraceStats_FilterStats_Decoder;
+  enum : int32_t {
+    kInputPacketsFieldNumber = 1,
+    kInputBytesFieldNumber = 2,
+    kOutputBytesFieldNumber = 3,
+    kErrorsFieldNumber = 4,
+  };
+
+  using FieldMetadata_InputPackets =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats_FilterStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_InputPackets kInputPackets() { return {}; }
+  void set_input_packets(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_InputPackets::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_InputBytes =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats_FilterStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_InputBytes kInputBytes() { return {}; }
+  void set_input_bytes(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_InputBytes::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_OutputBytes =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats_FilterStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_OutputBytes kOutputBytes() { return {}; }
+  void set_output_bytes(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_OutputBytes::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Errors =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats_FilterStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Errors kErrors() { return {}; }
+  void set_errors(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Errors::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class TraceStats_BufferStats_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/19, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  TraceStats_BufferStats_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TraceStats_BufferStats_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TraceStats_BufferStats_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_buffer_size() const { return at<12>().valid(); }
+  uint64_t buffer_size() const { return at<12>().as_uint64(); }
+  bool has_bytes_written() const { return at<1>().valid(); }
+  uint64_t bytes_written() const { return at<1>().as_uint64(); }
+  bool has_bytes_overwritten() const { return at<13>().valid(); }
+  uint64_t bytes_overwritten() const { return at<13>().as_uint64(); }
+  bool has_bytes_read() const { return at<14>().valid(); }
+  uint64_t bytes_read() const { return at<14>().as_uint64(); }
+  bool has_padding_bytes_written() const { return at<15>().valid(); }
+  uint64_t padding_bytes_written() const { return at<15>().as_uint64(); }
+  bool has_padding_bytes_cleared() const { return at<16>().valid(); }
+  uint64_t padding_bytes_cleared() const { return at<16>().as_uint64(); }
+  bool has_chunks_written() const { return at<2>().valid(); }
+  uint64_t chunks_written() const { return at<2>().as_uint64(); }
+  bool has_chunks_rewritten() const { return at<10>().valid(); }
+  uint64_t chunks_rewritten() const { return at<10>().as_uint64(); }
+  bool has_chunks_overwritten() const { return at<3>().valid(); }
+  uint64_t chunks_overwritten() const { return at<3>().as_uint64(); }
+  bool has_chunks_discarded() const { return at<18>().valid(); }
+  uint64_t chunks_discarded() const { return at<18>().as_uint64(); }
+  bool has_chunks_read() const { return at<17>().valid(); }
+  uint64_t chunks_read() const { return at<17>().as_uint64(); }
+  bool has_chunks_committed_out_of_order() const { return at<11>().valid(); }
+  uint64_t chunks_committed_out_of_order() const { return at<11>().as_uint64(); }
+  bool has_write_wrap_count() const { return at<4>().valid(); }
+  uint64_t write_wrap_count() const { return at<4>().as_uint64(); }
+  bool has_patches_succeeded() const { return at<5>().valid(); }
+  uint64_t patches_succeeded() const { return at<5>().as_uint64(); }
+  bool has_patches_failed() const { return at<6>().valid(); }
+  uint64_t patches_failed() const { return at<6>().as_uint64(); }
+  bool has_readaheads_succeeded() const { return at<7>().valid(); }
+  uint64_t readaheads_succeeded() const { return at<7>().as_uint64(); }
+  bool has_readaheads_failed() const { return at<8>().valid(); }
+  uint64_t readaheads_failed() const { return at<8>().as_uint64(); }
+  bool has_abi_violations() const { return at<9>().valid(); }
+  uint64_t abi_violations() const { return at<9>().as_uint64(); }
+  bool has_trace_writer_packet_loss() const { return at<19>().valid(); }
+  uint64_t trace_writer_packet_loss() const { return at<19>().as_uint64(); }
+};
+
+class TraceStats_BufferStats : public ::protozero::Message {
+ public:
+  using Decoder = TraceStats_BufferStats_Decoder;
+  enum : int32_t {
+    kBufferSizeFieldNumber = 12,
+    kBytesWrittenFieldNumber = 1,
+    kBytesOverwrittenFieldNumber = 13,
+    kBytesReadFieldNumber = 14,
+    kPaddingBytesWrittenFieldNumber = 15,
+    kPaddingBytesClearedFieldNumber = 16,
+    kChunksWrittenFieldNumber = 2,
+    kChunksRewrittenFieldNumber = 10,
+    kChunksOverwrittenFieldNumber = 3,
+    kChunksDiscardedFieldNumber = 18,
+    kChunksReadFieldNumber = 17,
+    kChunksCommittedOutOfOrderFieldNumber = 11,
+    kWriteWrapCountFieldNumber = 4,
+    kPatchesSucceededFieldNumber = 5,
+    kPatchesFailedFieldNumber = 6,
+    kReadaheadsSucceededFieldNumber = 7,
+    kReadaheadsFailedFieldNumber = 8,
+    kAbiViolationsFieldNumber = 9,
+    kTraceWriterPacketLossFieldNumber = 19,
+  };
+
+  using FieldMetadata_BufferSize =
+    ::protozero::proto_utils::FieldMetadata<
+      12,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats_BufferStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BufferSize kBufferSize() { return {}; }
+  void set_buffer_size(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_BufferSize::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BytesWritten =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats_BufferStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BytesWritten kBytesWritten() { return {}; }
+  void set_bytes_written(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_BytesWritten::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BytesOverwritten =
+    ::protozero::proto_utils::FieldMetadata<
+      13,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats_BufferStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BytesOverwritten kBytesOverwritten() { return {}; }
+  void set_bytes_overwritten(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_BytesOverwritten::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BytesRead =
+    ::protozero::proto_utils::FieldMetadata<
+      14,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats_BufferStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BytesRead kBytesRead() { return {}; }
+  void set_bytes_read(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_BytesRead::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PaddingBytesWritten =
+    ::protozero::proto_utils::FieldMetadata<
+      15,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats_BufferStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PaddingBytesWritten kPaddingBytesWritten() { return {}; }
+  void set_padding_bytes_written(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PaddingBytesWritten::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PaddingBytesCleared =
+    ::protozero::proto_utils::FieldMetadata<
+      16,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats_BufferStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PaddingBytesCleared kPaddingBytesCleared() { return {}; }
+  void set_padding_bytes_cleared(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PaddingBytesCleared::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ChunksWritten =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats_BufferStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChunksWritten kChunksWritten() { return {}; }
+  void set_chunks_written(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ChunksWritten::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ChunksRewritten =
+    ::protozero::proto_utils::FieldMetadata<
+      10,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats_BufferStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChunksRewritten kChunksRewritten() { return {}; }
+  void set_chunks_rewritten(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ChunksRewritten::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ChunksOverwritten =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats_BufferStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChunksOverwritten kChunksOverwritten() { return {}; }
+  void set_chunks_overwritten(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ChunksOverwritten::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ChunksDiscarded =
+    ::protozero::proto_utils::FieldMetadata<
+      18,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats_BufferStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChunksDiscarded kChunksDiscarded() { return {}; }
+  void set_chunks_discarded(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ChunksDiscarded::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ChunksRead =
+    ::protozero::proto_utils::FieldMetadata<
+      17,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats_BufferStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChunksRead kChunksRead() { return {}; }
+  void set_chunks_read(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ChunksRead::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ChunksCommittedOutOfOrder =
+    ::protozero::proto_utils::FieldMetadata<
+      11,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats_BufferStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChunksCommittedOutOfOrder kChunksCommittedOutOfOrder() { return {}; }
+  void set_chunks_committed_out_of_order(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ChunksCommittedOutOfOrder::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_WriteWrapCount =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats_BufferStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_WriteWrapCount kWriteWrapCount() { return {}; }
+  void set_write_wrap_count(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_WriteWrapCount::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PatchesSucceeded =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats_BufferStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PatchesSucceeded kPatchesSucceeded() { return {}; }
+  void set_patches_succeeded(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PatchesSucceeded::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PatchesFailed =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats_BufferStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PatchesFailed kPatchesFailed() { return {}; }
+  void set_patches_failed(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PatchesFailed::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ReadaheadsSucceeded =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats_BufferStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ReadaheadsSucceeded kReadaheadsSucceeded() { return {}; }
+  void set_readaheads_succeeded(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ReadaheadsSucceeded::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ReadaheadsFailed =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats_BufferStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ReadaheadsFailed kReadaheadsFailed() { return {}; }
+  void set_readaheads_failed(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ReadaheadsFailed::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_AbiViolations =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats_BufferStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AbiViolations kAbiViolations() { return {}; }
+  void set_abi_violations(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_AbiViolations::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TraceWriterPacketLoss =
+    ::protozero::proto_utils::FieldMetadata<
+      19,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats_BufferStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TraceWriterPacketLoss kTraceWriterPacketLoss() { return {}; }
+  void set_trace_writer_packet_loss(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TraceWriterPacketLoss::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/config/trace_config.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_TRACE_CONFIG_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_TRACE_CONFIG_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class DataSourceConfig;
+class TraceConfig_BufferConfig;
+class TraceConfig_BuiltinDataSource;
+class TraceConfig_DataSource;
+class TraceConfig_GuardrailOverrides;
+class TraceConfig_IncidentReportConfig;
+class TraceConfig_IncrementalStateConfig;
+class TraceConfig_ProducerConfig;
+class TraceConfig_StatsdMetadata;
+class TraceConfig_TraceFilter;
+class TraceConfig_TriggerConfig;
+class TraceConfig_TriggerConfig_Trigger;
+enum BuiltinClock : int32_t;
+enum TraceConfig_BufferConfig_FillPolicy : int32_t;
+enum TraceConfig_CompressionType : int32_t;
+enum TraceConfig_LockdownModeOperation : int32_t;
+enum TraceConfig_StatsdLogging : int32_t;
+enum TraceConfig_TriggerConfig_TriggerMode : int32_t;
+
+enum TraceConfig_LockdownModeOperation : int32_t {
+  TraceConfig_LockdownModeOperation_LOCKDOWN_UNCHANGED = 0,
+  TraceConfig_LockdownModeOperation_LOCKDOWN_CLEAR = 1,
+  TraceConfig_LockdownModeOperation_LOCKDOWN_SET = 2,
+};
+
+const TraceConfig_LockdownModeOperation TraceConfig_LockdownModeOperation_MIN = TraceConfig_LockdownModeOperation_LOCKDOWN_UNCHANGED;
+const TraceConfig_LockdownModeOperation TraceConfig_LockdownModeOperation_MAX = TraceConfig_LockdownModeOperation_LOCKDOWN_SET;
+
+enum TraceConfig_CompressionType : int32_t {
+  TraceConfig_CompressionType_COMPRESSION_TYPE_UNSPECIFIED = 0,
+  TraceConfig_CompressionType_COMPRESSION_TYPE_DEFLATE = 1,
+};
+
+const TraceConfig_CompressionType TraceConfig_CompressionType_MIN = TraceConfig_CompressionType_COMPRESSION_TYPE_UNSPECIFIED;
+const TraceConfig_CompressionType TraceConfig_CompressionType_MAX = TraceConfig_CompressionType_COMPRESSION_TYPE_DEFLATE;
+
+enum TraceConfig_StatsdLogging : int32_t {
+  TraceConfig_StatsdLogging_STATSD_LOGGING_UNSPECIFIED = 0,
+  TraceConfig_StatsdLogging_STATSD_LOGGING_ENABLED = 1,
+  TraceConfig_StatsdLogging_STATSD_LOGGING_DISABLED = 2,
+};
+
+const TraceConfig_StatsdLogging TraceConfig_StatsdLogging_MIN = TraceConfig_StatsdLogging_STATSD_LOGGING_UNSPECIFIED;
+const TraceConfig_StatsdLogging TraceConfig_StatsdLogging_MAX = TraceConfig_StatsdLogging_STATSD_LOGGING_DISABLED;
+
+enum TraceConfig_TriggerConfig_TriggerMode : int32_t {
+  TraceConfig_TriggerConfig_TriggerMode_UNSPECIFIED = 0,
+  TraceConfig_TriggerConfig_TriggerMode_START_TRACING = 1,
+  TraceConfig_TriggerConfig_TriggerMode_STOP_TRACING = 2,
+};
+
+const TraceConfig_TriggerConfig_TriggerMode TraceConfig_TriggerConfig_TriggerMode_MIN = TraceConfig_TriggerConfig_TriggerMode_UNSPECIFIED;
+const TraceConfig_TriggerConfig_TriggerMode TraceConfig_TriggerConfig_TriggerMode_MAX = TraceConfig_TriggerConfig_TriggerMode_STOP_TRACING;
+
+enum TraceConfig_BufferConfig_FillPolicy : int32_t {
+  TraceConfig_BufferConfig_FillPolicy_UNSPECIFIED = 0,
+  TraceConfig_BufferConfig_FillPolicy_RING_BUFFER = 1,
+  TraceConfig_BufferConfig_FillPolicy_DISCARD = 2,
+};
+
+const TraceConfig_BufferConfig_FillPolicy TraceConfig_BufferConfig_FillPolicy_MIN = TraceConfig_BufferConfig_FillPolicy_UNSPECIFIED;
+const TraceConfig_BufferConfig_FillPolicy TraceConfig_BufferConfig_FillPolicy_MAX = TraceConfig_BufferConfig_FillPolicy_DISCARD;
+
+class TraceConfig_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/32, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  TraceConfig_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TraceConfig_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TraceConfig_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_buffers() const { return at<1>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> buffers() const { return GetRepeated<::protozero::ConstBytes>(1); }
+  bool has_data_sources() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> data_sources() const { return GetRepeated<::protozero::ConstBytes>(2); }
+  bool has_builtin_data_sources() const { return at<20>().valid(); }
+  ::protozero::ConstBytes builtin_data_sources() const { return at<20>().as_bytes(); }
+  bool has_duration_ms() const { return at<3>().valid(); }
+  uint32_t duration_ms() const { return at<3>().as_uint32(); }
+  bool has_enable_extra_guardrails() const { return at<4>().valid(); }
+  bool enable_extra_guardrails() const { return at<4>().as_bool(); }
+  bool has_lockdown_mode() const { return at<5>().valid(); }
+  int32_t lockdown_mode() const { return at<5>().as_int32(); }
+  bool has_producers() const { return at<6>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> producers() const { return GetRepeated<::protozero::ConstBytes>(6); }
+  bool has_statsd_metadata() const { return at<7>().valid(); }
+  ::protozero::ConstBytes statsd_metadata() const { return at<7>().as_bytes(); }
+  bool has_write_into_file() const { return at<8>().valid(); }
+  bool write_into_file() const { return at<8>().as_bool(); }
+  bool has_output_path() const { return at<29>().valid(); }
+  ::protozero::ConstChars output_path() const { return at<29>().as_string(); }
+  bool has_file_write_period_ms() const { return at<9>().valid(); }
+  uint32_t file_write_period_ms() const { return at<9>().as_uint32(); }
+  bool has_max_file_size_bytes() const { return at<10>().valid(); }
+  uint64_t max_file_size_bytes() const { return at<10>().as_uint64(); }
+  bool has_guardrail_overrides() const { return at<11>().valid(); }
+  ::protozero::ConstBytes guardrail_overrides() const { return at<11>().as_bytes(); }
+  bool has_deferred_start() const { return at<12>().valid(); }
+  bool deferred_start() const { return at<12>().as_bool(); }
+  bool has_flush_period_ms() const { return at<13>().valid(); }
+  uint32_t flush_period_ms() const { return at<13>().as_uint32(); }
+  bool has_flush_timeout_ms() const { return at<14>().valid(); }
+  uint32_t flush_timeout_ms() const { return at<14>().as_uint32(); }
+  bool has_data_source_stop_timeout_ms() const { return at<23>().valid(); }
+  uint32_t data_source_stop_timeout_ms() const { return at<23>().as_uint32(); }
+  bool has_notify_traceur() const { return at<16>().valid(); }
+  bool notify_traceur() const { return at<16>().as_bool(); }
+  bool has_bugreport_score() const { return at<30>().valid(); }
+  int32_t bugreport_score() const { return at<30>().as_int32(); }
+  bool has_trigger_config() const { return at<17>().valid(); }
+  ::protozero::ConstBytes trigger_config() const { return at<17>().as_bytes(); }
+  bool has_activate_triggers() const { return at<18>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstChars> activate_triggers() const { return GetRepeated<::protozero::ConstChars>(18); }
+  bool has_incremental_state_config() const { return at<21>().valid(); }
+  ::protozero::ConstBytes incremental_state_config() const { return at<21>().as_bytes(); }
+  bool has_allow_user_build_tracing() const { return at<19>().valid(); }
+  bool allow_user_build_tracing() const { return at<19>().as_bool(); }
+  bool has_unique_session_name() const { return at<22>().valid(); }
+  ::protozero::ConstChars unique_session_name() const { return at<22>().as_string(); }
+  bool has_compression_type() const { return at<24>().valid(); }
+  int32_t compression_type() const { return at<24>().as_int32(); }
+  bool has_incident_report_config() const { return at<25>().valid(); }
+  ::protozero::ConstBytes incident_report_config() const { return at<25>().as_bytes(); }
+  bool has_statsd_logging() const { return at<31>().valid(); }
+  int32_t statsd_logging() const { return at<31>().as_int32(); }
+  bool has_trace_uuid_msb() const { return at<27>().valid(); }
+  int64_t trace_uuid_msb() const { return at<27>().as_int64(); }
+  bool has_trace_uuid_lsb() const { return at<28>().valid(); }
+  int64_t trace_uuid_lsb() const { return at<28>().as_int64(); }
+  bool has_trace_filter() const { return at<32>().valid(); }
+  ::protozero::ConstBytes trace_filter() const { return at<32>().as_bytes(); }
+};
+
+class TraceConfig : public ::protozero::Message {
+ public:
+  using Decoder = TraceConfig_Decoder;
+  enum : int32_t {
+    kBuffersFieldNumber = 1,
+    kDataSourcesFieldNumber = 2,
+    kBuiltinDataSourcesFieldNumber = 20,
+    kDurationMsFieldNumber = 3,
+    kEnableExtraGuardrailsFieldNumber = 4,
+    kLockdownModeFieldNumber = 5,
+    kProducersFieldNumber = 6,
+    kStatsdMetadataFieldNumber = 7,
+    kWriteIntoFileFieldNumber = 8,
+    kOutputPathFieldNumber = 29,
+    kFileWritePeriodMsFieldNumber = 9,
+    kMaxFileSizeBytesFieldNumber = 10,
+    kGuardrailOverridesFieldNumber = 11,
+    kDeferredStartFieldNumber = 12,
+    kFlushPeriodMsFieldNumber = 13,
+    kFlushTimeoutMsFieldNumber = 14,
+    kDataSourceStopTimeoutMsFieldNumber = 23,
+    kNotifyTraceurFieldNumber = 16,
+    kBugreportScoreFieldNumber = 30,
+    kTriggerConfigFieldNumber = 17,
+    kActivateTriggersFieldNumber = 18,
+    kIncrementalStateConfigFieldNumber = 21,
+    kAllowUserBuildTracingFieldNumber = 19,
+    kUniqueSessionNameFieldNumber = 22,
+    kCompressionTypeFieldNumber = 24,
+    kIncidentReportConfigFieldNumber = 25,
+    kStatsdLoggingFieldNumber = 31,
+    kTraceUuidMsbFieldNumber = 27,
+    kTraceUuidLsbFieldNumber = 28,
+    kTraceFilterFieldNumber = 32,
+  };
+  using BufferConfig = ::perfetto::protos::pbzero::TraceConfig_BufferConfig;
+  using DataSource = ::perfetto::protos::pbzero::TraceConfig_DataSource;
+  using BuiltinDataSource = ::perfetto::protos::pbzero::TraceConfig_BuiltinDataSource;
+  using ProducerConfig = ::perfetto::protos::pbzero::TraceConfig_ProducerConfig;
+  using StatsdMetadata = ::perfetto::protos::pbzero::TraceConfig_StatsdMetadata;
+  using GuardrailOverrides = ::perfetto::protos::pbzero::TraceConfig_GuardrailOverrides;
+  using TriggerConfig = ::perfetto::protos::pbzero::TraceConfig_TriggerConfig;
+  using IncrementalStateConfig = ::perfetto::protos::pbzero::TraceConfig_IncrementalStateConfig;
+  using IncidentReportConfig = ::perfetto::protos::pbzero::TraceConfig_IncidentReportConfig;
+  using TraceFilter = ::perfetto::protos::pbzero::TraceConfig_TraceFilter;
+  using LockdownModeOperation = ::perfetto::protos::pbzero::TraceConfig_LockdownModeOperation;
+  using CompressionType = ::perfetto::protos::pbzero::TraceConfig_CompressionType;
+  using StatsdLogging = ::perfetto::protos::pbzero::TraceConfig_StatsdLogging;
+  static const LockdownModeOperation LOCKDOWN_UNCHANGED = TraceConfig_LockdownModeOperation_LOCKDOWN_UNCHANGED;
+  static const LockdownModeOperation LOCKDOWN_CLEAR = TraceConfig_LockdownModeOperation_LOCKDOWN_CLEAR;
+  static const LockdownModeOperation LOCKDOWN_SET = TraceConfig_LockdownModeOperation_LOCKDOWN_SET;
+  static const CompressionType COMPRESSION_TYPE_UNSPECIFIED = TraceConfig_CompressionType_COMPRESSION_TYPE_UNSPECIFIED;
+  static const CompressionType COMPRESSION_TYPE_DEFLATE = TraceConfig_CompressionType_COMPRESSION_TYPE_DEFLATE;
+  static const StatsdLogging STATSD_LOGGING_UNSPECIFIED = TraceConfig_StatsdLogging_STATSD_LOGGING_UNSPECIFIED;
+  static const StatsdLogging STATSD_LOGGING_ENABLED = TraceConfig_StatsdLogging_STATSD_LOGGING_ENABLED;
+  static const StatsdLogging STATSD_LOGGING_DISABLED = TraceConfig_StatsdLogging_STATSD_LOGGING_DISABLED;
+
+  using FieldMetadata_Buffers =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TraceConfig_BufferConfig,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Buffers kBuffers() { return {}; }
+  template <typename T = TraceConfig_BufferConfig> T* add_buffers() {
+    return BeginNestedMessage<T>(1);
+  }
+
+
+  using FieldMetadata_DataSources =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TraceConfig_DataSource,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DataSources kDataSources() { return {}; }
+  template <typename T = TraceConfig_DataSource> T* add_data_sources() {
+    return BeginNestedMessage<T>(2);
+  }
+
+
+  using FieldMetadata_BuiltinDataSources =
+    ::protozero::proto_utils::FieldMetadata<
+      20,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TraceConfig_BuiltinDataSource,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BuiltinDataSources kBuiltinDataSources() { return {}; }
+  template <typename T = TraceConfig_BuiltinDataSource> T* set_builtin_data_sources() {
+    return BeginNestedMessage<T>(20);
+  }
+
+
+  using FieldMetadata_DurationMs =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DurationMs kDurationMs() { return {}; }
+  void set_duration_ms(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DurationMs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_EnableExtraGuardrails =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_EnableExtraGuardrails kEnableExtraGuardrails() { return {}; }
+  void set_enable_extra_guardrails(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_EnableExtraGuardrails::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_LockdownMode =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::TraceConfig_LockdownModeOperation,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_LockdownMode kLockdownMode() { return {}; }
+  void set_lockdown_mode(::perfetto::protos::pbzero::TraceConfig_LockdownModeOperation value) {
+    static constexpr uint32_t field_id = FieldMetadata_LockdownMode::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Producers =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TraceConfig_ProducerConfig,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Producers kProducers() { return {}; }
+  template <typename T = TraceConfig_ProducerConfig> T* add_producers() {
+    return BeginNestedMessage<T>(6);
+  }
+
+
+  using FieldMetadata_StatsdMetadata =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TraceConfig_StatsdMetadata,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_StatsdMetadata kStatsdMetadata() { return {}; }
+  template <typename T = TraceConfig_StatsdMetadata> T* set_statsd_metadata() {
+    return BeginNestedMessage<T>(7);
+  }
+
+
+  using FieldMetadata_WriteIntoFile =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_WriteIntoFile kWriteIntoFile() { return {}; }
+  void set_write_into_file(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_WriteIntoFile::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_OutputPath =
+    ::protozero::proto_utils::FieldMetadata<
+      29,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_OutputPath kOutputPath() { return {}; }
+  void set_output_path(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_OutputPath::kFieldId, data, size);
+  }
+  void set_output_path(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_OutputPath::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FileWritePeriodMs =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FileWritePeriodMs kFileWritePeriodMs() { return {}; }
+  void set_file_write_period_ms(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_FileWritePeriodMs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_MaxFileSizeBytes =
+    ::protozero::proto_utils::FieldMetadata<
+      10,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MaxFileSizeBytes kMaxFileSizeBytes() { return {}; }
+  void set_max_file_size_bytes(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_MaxFileSizeBytes::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_GuardrailOverrides =
+    ::protozero::proto_utils::FieldMetadata<
+      11,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TraceConfig_GuardrailOverrides,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_GuardrailOverrides kGuardrailOverrides() { return {}; }
+  template <typename T = TraceConfig_GuardrailOverrides> T* set_guardrail_overrides() {
+    return BeginNestedMessage<T>(11);
+  }
+
+
+  using FieldMetadata_DeferredStart =
+    ::protozero::proto_utils::FieldMetadata<
+      12,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DeferredStart kDeferredStart() { return {}; }
+  void set_deferred_start(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_DeferredStart::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FlushPeriodMs =
+    ::protozero::proto_utils::FieldMetadata<
+      13,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FlushPeriodMs kFlushPeriodMs() { return {}; }
+  void set_flush_period_ms(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_FlushPeriodMs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FlushTimeoutMs =
+    ::protozero::proto_utils::FieldMetadata<
+      14,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FlushTimeoutMs kFlushTimeoutMs() { return {}; }
+  void set_flush_timeout_ms(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_FlushTimeoutMs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DataSourceStopTimeoutMs =
+    ::protozero::proto_utils::FieldMetadata<
+      23,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DataSourceStopTimeoutMs kDataSourceStopTimeoutMs() { return {}; }
+  void set_data_source_stop_timeout_ms(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DataSourceStopTimeoutMs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NotifyTraceur =
+    ::protozero::proto_utils::FieldMetadata<
+      16,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NotifyTraceur kNotifyTraceur() { return {}; }
+  void set_notify_traceur(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_NotifyTraceur::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BugreportScore =
+    ::protozero::proto_utils::FieldMetadata<
+      30,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BugreportScore kBugreportScore() { return {}; }
+  void set_bugreport_score(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_BugreportScore::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TriggerConfig =
+    ::protozero::proto_utils::FieldMetadata<
+      17,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TraceConfig_TriggerConfig,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TriggerConfig kTriggerConfig() { return {}; }
+  template <typename T = TraceConfig_TriggerConfig> T* set_trigger_config() {
+    return BeginNestedMessage<T>(17);
+  }
+
+
+  using FieldMetadata_ActivateTriggers =
+    ::protozero::proto_utils::FieldMetadata<
+      18,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ActivateTriggers kActivateTriggers() { return {}; }
+  void add_activate_triggers(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_ActivateTriggers::kFieldId, data, size);
+  }
+  void add_activate_triggers(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_ActivateTriggers::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_IncrementalStateConfig =
+    ::protozero::proto_utils::FieldMetadata<
+      21,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TraceConfig_IncrementalStateConfig,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IncrementalStateConfig kIncrementalStateConfig() { return {}; }
+  template <typename T = TraceConfig_IncrementalStateConfig> T* set_incremental_state_config() {
+    return BeginNestedMessage<T>(21);
+  }
+
+
+  using FieldMetadata_AllowUserBuildTracing =
+    ::protozero::proto_utils::FieldMetadata<
+      19,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AllowUserBuildTracing kAllowUserBuildTracing() { return {}; }
+  void set_allow_user_build_tracing(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_AllowUserBuildTracing::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_UniqueSessionName =
+    ::protozero::proto_utils::FieldMetadata<
+      22,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_UniqueSessionName kUniqueSessionName() { return {}; }
+  void set_unique_session_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_UniqueSessionName::kFieldId, data, size);
+  }
+  void set_unique_session_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_UniqueSessionName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CompressionType =
+    ::protozero::proto_utils::FieldMetadata<
+      24,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::TraceConfig_CompressionType,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CompressionType kCompressionType() { return {}; }
+  void set_compression_type(::perfetto::protos::pbzero::TraceConfig_CompressionType value) {
+    static constexpr uint32_t field_id = FieldMetadata_CompressionType::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_IncidentReportConfig =
+    ::protozero::proto_utils::FieldMetadata<
+      25,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TraceConfig_IncidentReportConfig,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IncidentReportConfig kIncidentReportConfig() { return {}; }
+  template <typename T = TraceConfig_IncidentReportConfig> T* set_incident_report_config() {
+    return BeginNestedMessage<T>(25);
+  }
+
+
+  using FieldMetadata_StatsdLogging =
+    ::protozero::proto_utils::FieldMetadata<
+      31,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::TraceConfig_StatsdLogging,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_StatsdLogging kStatsdLogging() { return {}; }
+  void set_statsd_logging(::perfetto::protos::pbzero::TraceConfig_StatsdLogging value) {
+    static constexpr uint32_t field_id = FieldMetadata_StatsdLogging::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TraceUuidMsb =
+    ::protozero::proto_utils::FieldMetadata<
+      27,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TraceUuidMsb kTraceUuidMsb() { return {}; }
+  void set_trace_uuid_msb(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TraceUuidMsb::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TraceUuidLsb =
+    ::protozero::proto_utils::FieldMetadata<
+      28,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TraceUuidLsb kTraceUuidLsb() { return {}; }
+  void set_trace_uuid_lsb(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TraceUuidLsb::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TraceFilter =
+    ::protozero::proto_utils::FieldMetadata<
+      32,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TraceConfig_TraceFilter,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TraceFilter kTraceFilter() { return {}; }
+  template <typename T = TraceConfig_TraceFilter> T* set_trace_filter() {
+    return BeginNestedMessage<T>(32);
+  }
+
+};
+
+class TraceConfig_TraceFilter_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  TraceConfig_TraceFilter_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TraceConfig_TraceFilter_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TraceConfig_TraceFilter_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_bytecode() const { return at<1>().valid(); }
+  ::protozero::ConstBytes bytecode() const { return at<1>().as_bytes(); }
+};
+
+class TraceConfig_TraceFilter : public ::protozero::Message {
+ public:
+  using Decoder = TraceConfig_TraceFilter_Decoder;
+  enum : int32_t {
+    kBytecodeFieldNumber = 1,
+  };
+
+  using FieldMetadata_Bytecode =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBytes,
+      std::string,
+      TraceConfig_TraceFilter>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Bytecode kBytecode() { return {}; }
+  void set_bytecode(const uint8_t* data, size_t size) {
+    AppendBytes(FieldMetadata_Bytecode::kFieldId, data, size);
+  }
+  void set_bytecode(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Bytecode::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBytes>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class TraceConfig_IncidentReportConfig_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  TraceConfig_IncidentReportConfig_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TraceConfig_IncidentReportConfig_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TraceConfig_IncidentReportConfig_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_destination_package() const { return at<1>().valid(); }
+  ::protozero::ConstChars destination_package() const { return at<1>().as_string(); }
+  bool has_destination_class() const { return at<2>().valid(); }
+  ::protozero::ConstChars destination_class() const { return at<2>().as_string(); }
+  bool has_privacy_level() const { return at<3>().valid(); }
+  int32_t privacy_level() const { return at<3>().as_int32(); }
+  bool has_skip_incidentd() const { return at<5>().valid(); }
+  bool skip_incidentd() const { return at<5>().as_bool(); }
+  bool has_skip_dropbox() const { return at<4>().valid(); }
+  bool skip_dropbox() const { return at<4>().as_bool(); }
+};
+
+class TraceConfig_IncidentReportConfig : public ::protozero::Message {
+ public:
+  using Decoder = TraceConfig_IncidentReportConfig_Decoder;
+  enum : int32_t {
+    kDestinationPackageFieldNumber = 1,
+    kDestinationClassFieldNumber = 2,
+    kPrivacyLevelFieldNumber = 3,
+    kSkipIncidentdFieldNumber = 5,
+    kSkipDropboxFieldNumber = 4,
+  };
+
+  using FieldMetadata_DestinationPackage =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TraceConfig_IncidentReportConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DestinationPackage kDestinationPackage() { return {}; }
+  void set_destination_package(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_DestinationPackage::kFieldId, data, size);
+  }
+  void set_destination_package(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_DestinationPackage::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DestinationClass =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TraceConfig_IncidentReportConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DestinationClass kDestinationClass() { return {}; }
+  void set_destination_class(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_DestinationClass::kFieldId, data, size);
+  }
+  void set_destination_class(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_DestinationClass::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PrivacyLevel =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      TraceConfig_IncidentReportConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PrivacyLevel kPrivacyLevel() { return {}; }
+  void set_privacy_level(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PrivacyLevel::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SkipIncidentd =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      TraceConfig_IncidentReportConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SkipIncidentd kSkipIncidentd() { return {}; }
+  void set_skip_incidentd(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_SkipIncidentd::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SkipDropbox =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      TraceConfig_IncidentReportConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SkipDropbox kSkipDropbox() { return {}; }
+  void set_skip_dropbox(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_SkipDropbox::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class TraceConfig_IncrementalStateConfig_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  TraceConfig_IncrementalStateConfig_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TraceConfig_IncrementalStateConfig_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TraceConfig_IncrementalStateConfig_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_clear_period_ms() const { return at<1>().valid(); }
+  uint32_t clear_period_ms() const { return at<1>().as_uint32(); }
+};
+
+class TraceConfig_IncrementalStateConfig : public ::protozero::Message {
+ public:
+  using Decoder = TraceConfig_IncrementalStateConfig_Decoder;
+  enum : int32_t {
+    kClearPeriodMsFieldNumber = 1,
+  };
+
+  using FieldMetadata_ClearPeriodMs =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      TraceConfig_IncrementalStateConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ClearPeriodMs kClearPeriodMs() { return {}; }
+  void set_clear_period_ms(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ClearPeriodMs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class TraceConfig_TriggerConfig_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  TraceConfig_TriggerConfig_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TraceConfig_TriggerConfig_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TraceConfig_TriggerConfig_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_trigger_mode() const { return at<1>().valid(); }
+  int32_t trigger_mode() const { return at<1>().as_int32(); }
+  bool has_triggers() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> triggers() const { return GetRepeated<::protozero::ConstBytes>(2); }
+  bool has_trigger_timeout_ms() const { return at<3>().valid(); }
+  uint32_t trigger_timeout_ms() const { return at<3>().as_uint32(); }
+};
+
+class TraceConfig_TriggerConfig : public ::protozero::Message {
+ public:
+  using Decoder = TraceConfig_TriggerConfig_Decoder;
+  enum : int32_t {
+    kTriggerModeFieldNumber = 1,
+    kTriggersFieldNumber = 2,
+    kTriggerTimeoutMsFieldNumber = 3,
+  };
+  using Trigger = ::perfetto::protos::pbzero::TraceConfig_TriggerConfig_Trigger;
+  using TriggerMode = ::perfetto::protos::pbzero::TraceConfig_TriggerConfig_TriggerMode;
+  static const TriggerMode UNSPECIFIED = TraceConfig_TriggerConfig_TriggerMode_UNSPECIFIED;
+  static const TriggerMode START_TRACING = TraceConfig_TriggerConfig_TriggerMode_START_TRACING;
+  static const TriggerMode STOP_TRACING = TraceConfig_TriggerConfig_TriggerMode_STOP_TRACING;
+
+  using FieldMetadata_TriggerMode =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::TraceConfig_TriggerConfig_TriggerMode,
+      TraceConfig_TriggerConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TriggerMode kTriggerMode() { return {}; }
+  void set_trigger_mode(::perfetto::protos::pbzero::TraceConfig_TriggerConfig_TriggerMode value) {
+    static constexpr uint32_t field_id = FieldMetadata_TriggerMode::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Triggers =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TraceConfig_TriggerConfig_Trigger,
+      TraceConfig_TriggerConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Triggers kTriggers() { return {}; }
+  template <typename T = TraceConfig_TriggerConfig_Trigger> T* add_triggers() {
+    return BeginNestedMessage<T>(2);
+  }
+
+
+  using FieldMetadata_TriggerTimeoutMs =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      TraceConfig_TriggerConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TriggerTimeoutMs kTriggerTimeoutMs() { return {}; }
+  void set_trigger_timeout_ms(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TriggerTimeoutMs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class TraceConfig_TriggerConfig_Trigger_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  TraceConfig_TriggerConfig_Trigger_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TraceConfig_TriggerConfig_Trigger_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TraceConfig_TriggerConfig_Trigger_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars name() const { return at<1>().as_string(); }
+  bool has_producer_name_regex() const { return at<2>().valid(); }
+  ::protozero::ConstChars producer_name_regex() const { return at<2>().as_string(); }
+  bool has_stop_delay_ms() const { return at<3>().valid(); }
+  uint32_t stop_delay_ms() const { return at<3>().as_uint32(); }
+  bool has_max_per_24_h() const { return at<4>().valid(); }
+  uint32_t max_per_24_h() const { return at<4>().as_uint32(); }
+  bool has_skip_probability() const { return at<5>().valid(); }
+  double skip_probability() const { return at<5>().as_double(); }
+};
+
+class TraceConfig_TriggerConfig_Trigger : public ::protozero::Message {
+ public:
+  using Decoder = TraceConfig_TriggerConfig_Trigger_Decoder;
+  enum : int32_t {
+    kNameFieldNumber = 1,
+    kProducerNameRegexFieldNumber = 2,
+    kStopDelayMsFieldNumber = 3,
+    kMaxPer24HFieldNumber = 4,
+    kSkipProbabilityFieldNumber = 5,
+  };
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TraceConfig_TriggerConfig_Trigger>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ProducerNameRegex =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TraceConfig_TriggerConfig_Trigger>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ProducerNameRegex kProducerNameRegex() { return {}; }
+  void set_producer_name_regex(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_ProducerNameRegex::kFieldId, data, size);
+  }
+  void set_producer_name_regex(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_ProducerNameRegex::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_StopDelayMs =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      TraceConfig_TriggerConfig_Trigger>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_StopDelayMs kStopDelayMs() { return {}; }
+  void set_stop_delay_ms(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_StopDelayMs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_MaxPer24H =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      TraceConfig_TriggerConfig_Trigger>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MaxPer24H kMaxPer24H() { return {}; }
+  void set_max_per_24_h(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_MaxPer24H::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SkipProbability =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kDouble,
+      double,
+      TraceConfig_TriggerConfig_Trigger>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SkipProbability kSkipProbability() { return {}; }
+  void set_skip_probability(double value) {
+    static constexpr uint32_t field_id = FieldMetadata_SkipProbability::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kDouble>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class TraceConfig_GuardrailOverrides_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  TraceConfig_GuardrailOverrides_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TraceConfig_GuardrailOverrides_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TraceConfig_GuardrailOverrides_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_max_upload_per_day_bytes() const { return at<1>().valid(); }
+  uint64_t max_upload_per_day_bytes() const { return at<1>().as_uint64(); }
+};
+
+class TraceConfig_GuardrailOverrides : public ::protozero::Message {
+ public:
+  using Decoder = TraceConfig_GuardrailOverrides_Decoder;
+  enum : int32_t {
+    kMaxUploadPerDayBytesFieldNumber = 1,
+  };
+
+  using FieldMetadata_MaxUploadPerDayBytes =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceConfig_GuardrailOverrides>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MaxUploadPerDayBytes kMaxUploadPerDayBytes() { return {}; }
+  void set_max_upload_per_day_bytes(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_MaxUploadPerDayBytes::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class TraceConfig_StatsdMetadata_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  TraceConfig_StatsdMetadata_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TraceConfig_StatsdMetadata_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TraceConfig_StatsdMetadata_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_triggering_alert_id() const { return at<1>().valid(); }
+  int64_t triggering_alert_id() const { return at<1>().as_int64(); }
+  bool has_triggering_config_uid() const { return at<2>().valid(); }
+  int32_t triggering_config_uid() const { return at<2>().as_int32(); }
+  bool has_triggering_config_id() const { return at<3>().valid(); }
+  int64_t triggering_config_id() const { return at<3>().as_int64(); }
+  bool has_triggering_subscription_id() const { return at<4>().valid(); }
+  int64_t triggering_subscription_id() const { return at<4>().as_int64(); }
+};
+
+class TraceConfig_StatsdMetadata : public ::protozero::Message {
+ public:
+  using Decoder = TraceConfig_StatsdMetadata_Decoder;
+  enum : int32_t {
+    kTriggeringAlertIdFieldNumber = 1,
+    kTriggeringConfigUidFieldNumber = 2,
+    kTriggeringConfigIdFieldNumber = 3,
+    kTriggeringSubscriptionIdFieldNumber = 4,
+  };
+
+  using FieldMetadata_TriggeringAlertId =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      TraceConfig_StatsdMetadata>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TriggeringAlertId kTriggeringAlertId() { return {}; }
+  void set_triggering_alert_id(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TriggeringAlertId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TriggeringConfigUid =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      TraceConfig_StatsdMetadata>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TriggeringConfigUid kTriggeringConfigUid() { return {}; }
+  void set_triggering_config_uid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TriggeringConfigUid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TriggeringConfigId =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      TraceConfig_StatsdMetadata>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TriggeringConfigId kTriggeringConfigId() { return {}; }
+  void set_triggering_config_id(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TriggeringConfigId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TriggeringSubscriptionId =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      TraceConfig_StatsdMetadata>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TriggeringSubscriptionId kTriggeringSubscriptionId() { return {}; }
+  void set_triggering_subscription_id(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TriggeringSubscriptionId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class TraceConfig_ProducerConfig_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  TraceConfig_ProducerConfig_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TraceConfig_ProducerConfig_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TraceConfig_ProducerConfig_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_producer_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars producer_name() const { return at<1>().as_string(); }
+  bool has_shm_size_kb() const { return at<2>().valid(); }
+  uint32_t shm_size_kb() const { return at<2>().as_uint32(); }
+  bool has_page_size_kb() const { return at<3>().valid(); }
+  uint32_t page_size_kb() const { return at<3>().as_uint32(); }
+};
+
+class TraceConfig_ProducerConfig : public ::protozero::Message {
+ public:
+  using Decoder = TraceConfig_ProducerConfig_Decoder;
+  enum : int32_t {
+    kProducerNameFieldNumber = 1,
+    kShmSizeKbFieldNumber = 2,
+    kPageSizeKbFieldNumber = 3,
+  };
+
+  using FieldMetadata_ProducerName =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TraceConfig_ProducerConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ProducerName kProducerName() { return {}; }
+  void set_producer_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_ProducerName::kFieldId, data, size);
+  }
+  void set_producer_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_ProducerName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ShmSizeKb =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      TraceConfig_ProducerConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ShmSizeKb kShmSizeKb() { return {}; }
+  void set_shm_size_kb(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ShmSizeKb::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PageSizeKb =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      TraceConfig_ProducerConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PageSizeKb kPageSizeKb() { return {}; }
+  void set_page_size_kb(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PageSizeKb::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class TraceConfig_BuiltinDataSource_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/7, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  TraceConfig_BuiltinDataSource_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TraceConfig_BuiltinDataSource_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TraceConfig_BuiltinDataSource_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_disable_clock_snapshotting() const { return at<1>().valid(); }
+  bool disable_clock_snapshotting() const { return at<1>().as_bool(); }
+  bool has_disable_trace_config() const { return at<2>().valid(); }
+  bool disable_trace_config() const { return at<2>().as_bool(); }
+  bool has_disable_system_info() const { return at<3>().valid(); }
+  bool disable_system_info() const { return at<3>().as_bool(); }
+  bool has_disable_service_events() const { return at<4>().valid(); }
+  bool disable_service_events() const { return at<4>().as_bool(); }
+  bool has_primary_trace_clock() const { return at<5>().valid(); }
+  int32_t primary_trace_clock() const { return at<5>().as_int32(); }
+  bool has_snapshot_interval_ms() const { return at<6>().valid(); }
+  uint32_t snapshot_interval_ms() const { return at<6>().as_uint32(); }
+  bool has_prefer_suspend_clock_for_snapshot() const { return at<7>().valid(); }
+  bool prefer_suspend_clock_for_snapshot() const { return at<7>().as_bool(); }
+};
+
+class TraceConfig_BuiltinDataSource : public ::protozero::Message {
+ public:
+  using Decoder = TraceConfig_BuiltinDataSource_Decoder;
+  enum : int32_t {
+    kDisableClockSnapshottingFieldNumber = 1,
+    kDisableTraceConfigFieldNumber = 2,
+    kDisableSystemInfoFieldNumber = 3,
+    kDisableServiceEventsFieldNumber = 4,
+    kPrimaryTraceClockFieldNumber = 5,
+    kSnapshotIntervalMsFieldNumber = 6,
+    kPreferSuspendClockForSnapshotFieldNumber = 7,
+  };
+
+  using FieldMetadata_DisableClockSnapshotting =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      TraceConfig_BuiltinDataSource>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DisableClockSnapshotting kDisableClockSnapshotting() { return {}; }
+  void set_disable_clock_snapshotting(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_DisableClockSnapshotting::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DisableTraceConfig =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      TraceConfig_BuiltinDataSource>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DisableTraceConfig kDisableTraceConfig() { return {}; }
+  void set_disable_trace_config(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_DisableTraceConfig::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DisableSystemInfo =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      TraceConfig_BuiltinDataSource>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DisableSystemInfo kDisableSystemInfo() { return {}; }
+  void set_disable_system_info(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_DisableSystemInfo::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DisableServiceEvents =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      TraceConfig_BuiltinDataSource>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DisableServiceEvents kDisableServiceEvents() { return {}; }
+  void set_disable_service_events(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_DisableServiceEvents::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PrimaryTraceClock =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::BuiltinClock,
+      TraceConfig_BuiltinDataSource>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PrimaryTraceClock kPrimaryTraceClock() { return {}; }
+  void set_primary_trace_clock(::perfetto::protos::pbzero::BuiltinClock value) {
+    static constexpr uint32_t field_id = FieldMetadata_PrimaryTraceClock::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SnapshotIntervalMs =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      TraceConfig_BuiltinDataSource>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SnapshotIntervalMs kSnapshotIntervalMs() { return {}; }
+  void set_snapshot_interval_ms(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SnapshotIntervalMs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PreferSuspendClockForSnapshot =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      TraceConfig_BuiltinDataSource>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PreferSuspendClockForSnapshot kPreferSuspendClockForSnapshot() { return {}; }
+  void set_prefer_suspend_clock_for_snapshot(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_PreferSuspendClockForSnapshot::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class TraceConfig_DataSource_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  TraceConfig_DataSource_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TraceConfig_DataSource_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TraceConfig_DataSource_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_config() const { return at<1>().valid(); }
+  ::protozero::ConstBytes config() const { return at<1>().as_bytes(); }
+  bool has_producer_name_filter() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstChars> producer_name_filter() const { return GetRepeated<::protozero::ConstChars>(2); }
+  bool has_producer_name_regex_filter() const { return at<3>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstChars> producer_name_regex_filter() const { return GetRepeated<::protozero::ConstChars>(3); }
+};
+
+class TraceConfig_DataSource : public ::protozero::Message {
+ public:
+  using Decoder = TraceConfig_DataSource_Decoder;
+  enum : int32_t {
+    kConfigFieldNumber = 1,
+    kProducerNameFilterFieldNumber = 2,
+    kProducerNameRegexFilterFieldNumber = 3,
+  };
+
+  using FieldMetadata_Config =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      DataSourceConfig,
+      TraceConfig_DataSource>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Config kConfig() { return {}; }
+  template <typename T = DataSourceConfig> T* set_config() {
+    return BeginNestedMessage<T>(1);
+  }
+
+
+  using FieldMetadata_ProducerNameFilter =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TraceConfig_DataSource>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ProducerNameFilter kProducerNameFilter() { return {}; }
+  void add_producer_name_filter(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_ProducerNameFilter::kFieldId, data, size);
+  }
+  void add_producer_name_filter(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_ProducerNameFilter::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ProducerNameRegexFilter =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TraceConfig_DataSource>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ProducerNameRegexFilter kProducerNameRegexFilter() { return {}; }
+  void add_producer_name_regex_filter(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_ProducerNameRegexFilter::kFieldId, data, size);
+  }
+  void add_producer_name_regex_filter(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_ProducerNameRegexFilter::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class TraceConfig_BufferConfig_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  TraceConfig_BufferConfig_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TraceConfig_BufferConfig_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TraceConfig_BufferConfig_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_size_kb() const { return at<1>().valid(); }
+  uint32_t size_kb() const { return at<1>().as_uint32(); }
+  bool has_fill_policy() const { return at<4>().valid(); }
+  int32_t fill_policy() const { return at<4>().as_int32(); }
+};
+
+class TraceConfig_BufferConfig : public ::protozero::Message {
+ public:
+  using Decoder = TraceConfig_BufferConfig_Decoder;
+  enum : int32_t {
+    kSizeKbFieldNumber = 1,
+    kFillPolicyFieldNumber = 4,
+  };
+  using FillPolicy = ::perfetto::protos::pbzero::TraceConfig_BufferConfig_FillPolicy;
+  static const FillPolicy UNSPECIFIED = TraceConfig_BufferConfig_FillPolicy_UNSPECIFIED;
+  static const FillPolicy RING_BUFFER = TraceConfig_BufferConfig_FillPolicy_RING_BUFFER;
+  static const FillPolicy DISCARD = TraceConfig_BufferConfig_FillPolicy_DISCARD;
+
+  using FieldMetadata_SizeKb =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      TraceConfig_BufferConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SizeKb kSizeKb() { return {}; }
+  void set_size_kb(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SizeKb::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FillPolicy =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::TraceConfig_BufferConfig_FillPolicy,
+      TraceConfig_BufferConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FillPolicy kFillPolicy() { return {}; }
+  void set_fill_policy(::perfetto::protos::pbzero::TraceConfig_BufferConfig_FillPolicy value) {
+    static constexpr uint32_t field_id = FieldMetadata_FillPolicy::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/clock_snapshot.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_CLOCK_SNAPSHOT_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_CLOCK_SNAPSHOT_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class ClockSnapshot_Clock;
+enum BuiltinClock : int32_t;
+
+enum ClockSnapshot_Clock_BuiltinClocks : int32_t {
+  ClockSnapshot_Clock_BuiltinClocks_UNKNOWN = 0,
+  ClockSnapshot_Clock_BuiltinClocks_REALTIME = 1,
+  ClockSnapshot_Clock_BuiltinClocks_REALTIME_COARSE = 2,
+  ClockSnapshot_Clock_BuiltinClocks_MONOTONIC = 3,
+  ClockSnapshot_Clock_BuiltinClocks_MONOTONIC_COARSE = 4,
+  ClockSnapshot_Clock_BuiltinClocks_MONOTONIC_RAW = 5,
+  ClockSnapshot_Clock_BuiltinClocks_BOOTTIME = 6,
+  ClockSnapshot_Clock_BuiltinClocks_BUILTIN_CLOCK_MAX_ID = 63,
+};
+
+const ClockSnapshot_Clock_BuiltinClocks ClockSnapshot_Clock_BuiltinClocks_MIN = ClockSnapshot_Clock_BuiltinClocks_UNKNOWN;
+const ClockSnapshot_Clock_BuiltinClocks ClockSnapshot_Clock_BuiltinClocks_MAX = ClockSnapshot_Clock_BuiltinClocks_BUILTIN_CLOCK_MAX_ID;
+
+class ClockSnapshot_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  ClockSnapshot_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ClockSnapshot_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ClockSnapshot_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_clocks() const { return at<1>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> clocks() const { return GetRepeated<::protozero::ConstBytes>(1); }
+  bool has_primary_trace_clock() const { return at<2>().valid(); }
+  int32_t primary_trace_clock() const { return at<2>().as_int32(); }
+};
+
+class ClockSnapshot : public ::protozero::Message {
+ public:
+  using Decoder = ClockSnapshot_Decoder;
+  enum : int32_t {
+    kClocksFieldNumber = 1,
+    kPrimaryTraceClockFieldNumber = 2,
+  };
+  using Clock = ::perfetto::protos::pbzero::ClockSnapshot_Clock;
+
+  using FieldMetadata_Clocks =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ClockSnapshot_Clock,
+      ClockSnapshot>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Clocks kClocks() { return {}; }
+  template <typename T = ClockSnapshot_Clock> T* add_clocks() {
+    return BeginNestedMessage<T>(1);
+  }
+
+
+  using FieldMetadata_PrimaryTraceClock =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::BuiltinClock,
+      ClockSnapshot>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PrimaryTraceClock kPrimaryTraceClock() { return {}; }
+  void set_primary_trace_clock(::perfetto::protos::pbzero::BuiltinClock value) {
+    static constexpr uint32_t field_id = FieldMetadata_PrimaryTraceClock::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class ClockSnapshot_Clock_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  ClockSnapshot_Clock_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ClockSnapshot_Clock_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ClockSnapshot_Clock_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_clock_id() const { return at<1>().valid(); }
+  uint32_t clock_id() const { return at<1>().as_uint32(); }
+  bool has_timestamp() const { return at<2>().valid(); }
+  uint64_t timestamp() const { return at<2>().as_uint64(); }
+  bool has_is_incremental() const { return at<3>().valid(); }
+  bool is_incremental() const { return at<3>().as_bool(); }
+  bool has_unit_multiplier_ns() const { return at<4>().valid(); }
+  uint64_t unit_multiplier_ns() const { return at<4>().as_uint64(); }
+};
+
+class ClockSnapshot_Clock : public ::protozero::Message {
+ public:
+  using Decoder = ClockSnapshot_Clock_Decoder;
+  enum : int32_t {
+    kClockIdFieldNumber = 1,
+    kTimestampFieldNumber = 2,
+    kIsIncrementalFieldNumber = 3,
+    kUnitMultiplierNsFieldNumber = 4,
+  };
+  using BuiltinClocks = ::perfetto::protos::pbzero::ClockSnapshot_Clock_BuiltinClocks;
+  static const BuiltinClocks UNKNOWN = ClockSnapshot_Clock_BuiltinClocks_UNKNOWN;
+  static const BuiltinClocks REALTIME = ClockSnapshot_Clock_BuiltinClocks_REALTIME;
+  static const BuiltinClocks REALTIME_COARSE = ClockSnapshot_Clock_BuiltinClocks_REALTIME_COARSE;
+  static const BuiltinClocks MONOTONIC = ClockSnapshot_Clock_BuiltinClocks_MONOTONIC;
+  static const BuiltinClocks MONOTONIC_COARSE = ClockSnapshot_Clock_BuiltinClocks_MONOTONIC_COARSE;
+  static const BuiltinClocks MONOTONIC_RAW = ClockSnapshot_Clock_BuiltinClocks_MONOTONIC_RAW;
+  static const BuiltinClocks BOOTTIME = ClockSnapshot_Clock_BuiltinClocks_BOOTTIME;
+  static const BuiltinClocks BUILTIN_CLOCK_MAX_ID = ClockSnapshot_Clock_BuiltinClocks_BUILTIN_CLOCK_MAX_ID;
+
+  using FieldMetadata_ClockId =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      ClockSnapshot_Clock>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ClockId kClockId() { return {}; }
+  void set_clock_id(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ClockId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Timestamp =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ClockSnapshot_Clock>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Timestamp kTimestamp() { return {}; }
+  void set_timestamp(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Timestamp::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_IsIncremental =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ClockSnapshot_Clock>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IsIncremental kIsIncremental() { return {}; }
+  void set_is_incremental(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_IsIncremental::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_UnitMultiplierNs =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ClockSnapshot_Clock>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_UnitMultiplierNs kUnitMultiplierNs() { return {}; }
+  void set_unit_multiplier_ns(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_UnitMultiplierNs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/perfetto/tracing_service_event.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_PERFETTO_TRACING_SERVICE_EVENT_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_PERFETTO_TRACING_SERVICE_EVENT_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class TracingServiceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/6, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  TracingServiceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TracingServiceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TracingServiceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_tracing_started() const { return at<2>().valid(); }
+  bool tracing_started() const { return at<2>().as_bool(); }
+  bool has_all_data_sources_started() const { return at<1>().valid(); }
+  bool all_data_sources_started() const { return at<1>().as_bool(); }
+  bool has_all_data_sources_flushed() const { return at<3>().valid(); }
+  bool all_data_sources_flushed() const { return at<3>().as_bool(); }
+  bool has_read_tracing_buffers_completed() const { return at<4>().valid(); }
+  bool read_tracing_buffers_completed() const { return at<4>().as_bool(); }
+  bool has_tracing_disabled() const { return at<5>().valid(); }
+  bool tracing_disabled() const { return at<5>().as_bool(); }
+  bool has_seized_for_bugreport() const { return at<6>().valid(); }
+  bool seized_for_bugreport() const { return at<6>().as_bool(); }
+};
+
+class TracingServiceEvent : public ::protozero::Message {
+ public:
+  using Decoder = TracingServiceEvent_Decoder;
+  enum : int32_t {
+    kTracingStartedFieldNumber = 2,
+    kAllDataSourcesStartedFieldNumber = 1,
+    kAllDataSourcesFlushedFieldNumber = 3,
+    kReadTracingBuffersCompletedFieldNumber = 4,
+    kTracingDisabledFieldNumber = 5,
+    kSeizedForBugreportFieldNumber = 6,
+  };
+
+  using FieldMetadata_TracingStarted =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      TracingServiceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TracingStarted kTracingStarted() { return {}; }
+  void set_tracing_started(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_TracingStarted::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_AllDataSourcesStarted =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      TracingServiceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AllDataSourcesStarted kAllDataSourcesStarted() { return {}; }
+  void set_all_data_sources_started(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_AllDataSourcesStarted::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_AllDataSourcesFlushed =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      TracingServiceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AllDataSourcesFlushed kAllDataSourcesFlushed() { return {}; }
+  void set_all_data_sources_flushed(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_AllDataSourcesFlushed::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ReadTracingBuffersCompleted =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      TracingServiceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ReadTracingBuffersCompleted kReadTracingBuffersCompleted() { return {}; }
+  void set_read_tracing_buffers_completed(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_ReadTracingBuffersCompleted::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TracingDisabled =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      TracingServiceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TracingDisabled kTracingDisabled() { return {}; }
+  void set_tracing_disabled(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_TracingDisabled::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SeizedForBugreport =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      TracingServiceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SeizedForBugreport kSeizedForBugreport() { return {}; }
+  void set_seized_for_bugreport(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_SeizedForBugreport::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/system_info.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_SYSTEM_INFO_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_SYSTEM_INFO_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class Utsname;
+
+class SystemInfo_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  SystemInfo_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit SystemInfo_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit SystemInfo_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_utsname() const { return at<1>().valid(); }
+  ::protozero::ConstBytes utsname() const { return at<1>().as_bytes(); }
+  bool has_android_build_fingerprint() const { return at<2>().valid(); }
+  ::protozero::ConstChars android_build_fingerprint() const { return at<2>().as_string(); }
+  bool has_hz() const { return at<3>().valid(); }
+  int64_t hz() const { return at<3>().as_int64(); }
+  bool has_tracing_service_version() const { return at<4>().valid(); }
+  ::protozero::ConstChars tracing_service_version() const { return at<4>().as_string(); }
+};
+
+class SystemInfo : public ::protozero::Message {
+ public:
+  using Decoder = SystemInfo_Decoder;
+  enum : int32_t {
+    kUtsnameFieldNumber = 1,
+    kAndroidBuildFingerprintFieldNumber = 2,
+    kHzFieldNumber = 3,
+    kTracingServiceVersionFieldNumber = 4,
+  };
+
+  using FieldMetadata_Utsname =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Utsname,
+      SystemInfo>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Utsname kUtsname() { return {}; }
+  template <typename T = Utsname> T* set_utsname() {
+    return BeginNestedMessage<T>(1);
+  }
+
+
+  using FieldMetadata_AndroidBuildFingerprint =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      SystemInfo>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AndroidBuildFingerprint kAndroidBuildFingerprint() { return {}; }
+  void set_android_build_fingerprint(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_AndroidBuildFingerprint::kFieldId, data, size);
+  }
+  void set_android_build_fingerprint(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_AndroidBuildFingerprint::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Hz =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      SystemInfo>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Hz kHz() { return {}; }
+  void set_hz(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Hz::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TracingServiceVersion =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      SystemInfo>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TracingServiceVersion kTracingServiceVersion() { return {}; }
+  void set_tracing_service_version(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_TracingServiceVersion::kFieldId, data, size);
+  }
+  void set_tracing_service_version(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_TracingServiceVersion::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Utsname_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Utsname_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Utsname_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Utsname_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_sysname() const { return at<1>().valid(); }
+  ::protozero::ConstChars sysname() const { return at<1>().as_string(); }
+  bool has_version() const { return at<2>().valid(); }
+  ::protozero::ConstChars version() const { return at<2>().as_string(); }
+  bool has_release() const { return at<3>().valid(); }
+  ::protozero::ConstChars release() const { return at<3>().as_string(); }
+  bool has_machine() const { return at<4>().valid(); }
+  ::protozero::ConstChars machine() const { return at<4>().as_string(); }
+};
+
+class Utsname : public ::protozero::Message {
+ public:
+  using Decoder = Utsname_Decoder;
+  enum : int32_t {
+    kSysnameFieldNumber = 1,
+    kVersionFieldNumber = 2,
+    kReleaseFieldNumber = 3,
+    kMachineFieldNumber = 4,
+  };
+
+  using FieldMetadata_Sysname =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      Utsname>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Sysname kSysname() { return {}; }
+  void set_sysname(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Sysname::kFieldId, data, size);
+  }
+  void set_sysname(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Sysname::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Version =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      Utsname>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Version kVersion() { return {}; }
+  void set_version(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Version::kFieldId, data, size);
+  }
+  void set_version(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Version::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Release =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      Utsname>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Release kRelease() { return {}; }
+  void set_release(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Release::kFieldId, data, size);
+  }
+  void set_release(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Release::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Machine =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      Utsname>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Machine kMachine() { return {}; }
+  void set_machine(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Machine::kFieldId, data, size);
+  }
+  void set_machine(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Machine::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/trigger.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRIGGER_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRIGGER_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class Trigger_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Trigger_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Trigger_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Trigger_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_trigger_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars trigger_name() const { return at<1>().as_string(); }
+  bool has_producer_name() const { return at<2>().valid(); }
+  ::protozero::ConstChars producer_name() const { return at<2>().as_string(); }
+  bool has_trusted_producer_uid() const { return at<3>().valid(); }
+  int32_t trusted_producer_uid() const { return at<3>().as_int32(); }
+};
+
+class Trigger : public ::protozero::Message {
+ public:
+  using Decoder = Trigger_Decoder;
+  enum : int32_t {
+    kTriggerNameFieldNumber = 1,
+    kProducerNameFieldNumber = 2,
+    kTrustedProducerUidFieldNumber = 3,
+  };
+
+  using FieldMetadata_TriggerName =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      Trigger>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TriggerName kTriggerName() { return {}; }
+  void set_trigger_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_TriggerName::kFieldId, data, size);
+  }
+  void set_trigger_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_TriggerName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ProducerName =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      Trigger>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ProducerName kProducerName() { return {}; }
+  void set_producer_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_ProducerName::kFieldId, data, size);
+  }
+  void set_producer_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_ProducerName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TrustedProducerUid =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Trigger>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TrustedProducerUid kTrustedProducerUid() { return {}; }
+  void set_trusted_producer_uid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TrustedProducerUid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "src/tracing/core/tracing_service_impl.h"
+
+// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/core/forward_decls.h"
+
+#include <errno.h>
+#include <inttypes.h>
+#include <limits.h>
+#include <string.h>
+#include <regex>
+#include <unordered_set>
+
+#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) && \
+    !PERFETTO_BUILDFLAG(PERFETTO_OS_NACL)
+#include <sys/uio.h>
+#include <sys/utsname.h>
+#include <unistd.h>
+#endif
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
+#include <sys/system_properties.h>
+#if PERFETTO_BUILDFLAG(PERFETTO_ANDROID_BUILD)
+// gen_amalgamated expanded: #include "src/android_internal/lazy_library_loader.h"    // nogncheck
+// gen_amalgamated expanded: #include "src/android_internal/tracing_service_proxy.h"  // nogncheck
+#endif  // PERFETTO_ANDROID_BUILD
+#endif  // PERFETTO_OS_ANDROID
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) || \
+    PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) ||   \
+    PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
+#define PERFETTO_HAS_CHMOD
+#include <sys/stat.h>
+#endif
+
+#include <algorithm>
+
+// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
+// gen_amalgamated expanded: #include "perfetto/base/status.h"
+// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/file_utils.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/metatrace.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/string_utils.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/temp_file.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/version.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/watchdog.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/basic_types.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/consumer.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/observable_events.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/producer.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory_abi.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_packet.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/static_buffer.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/core/data_source_descriptor.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/core/tracing_service_capabilities.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/core/tracing_service_state.h"
+// gen_amalgamated expanded: #include "src/android_stats/statsd_logging_helper.h"
+// gen_amalgamated expanded: #include "src/protozero/filtering/message_filter.h"
+// gen_amalgamated expanded: #include "src/tracing/core/packet_stream_validator.h"
+// gen_amalgamated expanded: #include "src/tracing/core/shared_memory_arbiter_impl.h"
+// gen_amalgamated expanded: #include "src/tracing/core/trace_buffer.h"
+
+// gen_amalgamated expanded: #include "protos/perfetto/common/builtin_clock.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/common/builtin_clock.pbzero.h"
+// gen_amalgamated expanded: #include "protos/perfetto/common/trace_stats.pbzero.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/trace_config.pbzero.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/clock_snapshot.pbzero.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/perfetto/tracing_service_event.pbzero.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/system_info.pbzero.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/trace_packet.pbzero.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/trigger.pbzero.h"
+
+// General note: this class must assume that Producers are malicious and will
+// try to crash / exploit this class. We can trust pointers because they come
+// from the IPC layer, but we should never assume that that the producer calls
+// come in the right order or their arguments are sane / within bounds.
+
+// This is a macro because we want the call-site line number for the ELOG.
+#define PERFETTO_SVC_ERR(...) \
+  (PERFETTO_ELOG(__VA_ARGS__), ::perfetto::base::ErrStatus(__VA_ARGS__))
+
+namespace perfetto {
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) && \
+    PERFETTO_BUILDFLAG(PERFETTO_ANDROID_BUILD)
+// These are the only SELinux approved dir for trace files that are created
+// directly by traced.
+const char* kTraceDirBasePath = "/data/misc/perfetto-traces/";
+const char* kAndroidProductionBugreportTracePath =
+    "/data/misc/perfetto-traces/bugreport/systrace.pftrace";
+#endif
+
+namespace {
+constexpr int kMaxBuffersPerConsumer = 128;
+constexpr uint32_t kDefaultSnapshotsIntervalMs = 10 * 1000;
+constexpr int kDefaultWriteIntoFilePeriodMs = 5000;
+constexpr int kMaxConcurrentTracingSessions = 15;
+constexpr int kMaxConcurrentTracingSessionsPerUid = 5;
+constexpr int kMaxConcurrentTracingSessionsForStatsdUid = 10;
+constexpr int64_t kMinSecondsBetweenTracesGuardrail = 5 * 60;
+
+constexpr uint32_t kMillisPerHour = 3600000;
+constexpr uint32_t kMillisPerDay = kMillisPerHour * 24;
+constexpr uint32_t kMaxTracingDurationMillis = 7 * 24 * kMillisPerHour;
+
+// These apply only if enable_extra_guardrails is true.
+constexpr uint32_t kGuardrailsMaxTracingBufferSizeKb = 128 * 1024;
+constexpr uint32_t kGuardrailsMaxTracingDurationMillis = 24 * kMillisPerHour;
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) || PERFETTO_BUILDFLAG(PERFETTO_OS_NACL)
+struct iovec {
+  void* iov_base;  // Address
+  size_t iov_len;  // Block size
+};
+
+// Simple implementation of writev. Note that this does not give the atomicity
+// guarantees of a real writev, but we don't depend on these (we aren't writing
+// to the same file from another thread).
+ssize_t writev(int fd, const struct iovec* iov, int iovcnt) {
+  ssize_t total_size = 0;
+  for (int i = 0; i < iovcnt; ++i) {
+    ssize_t current_size = base::WriteAll(fd, iov[i].iov_base, iov[i].iov_len);
+    if (current_size != static_cast<ssize_t>(iov[i].iov_len))
+      return -1;
+    total_size += current_size;
+  }
+  return total_size;
+}
+
+#define IOV_MAX 1024  // Linux compatible limit.
+
+#endif  // PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) ||
+        // PERFETTO_BUILDFLAG(PERFETTO_OS_NACL)
+
+// Partially encodes a CommitDataRequest in an int32 for the purposes of
+// metatracing. Note that it encodes only the bottom 10 bits of the producer id
+// (which is technically 16 bits wide).
+//
+// Format (by bit range):
+// [   31 ][         30 ][             29:20 ][            19:10 ][        9:0]
+// [unused][has flush id][num chunks to patch][num chunks to move][producer id]
+static int32_t EncodeCommitDataRequest(ProducerID producer_id,
+                                       const CommitDataRequest& req_untrusted) {
+  uint32_t cmov = static_cast<uint32_t>(req_untrusted.chunks_to_move_size());
+  uint32_t cpatch = static_cast<uint32_t>(req_untrusted.chunks_to_patch_size());
+  uint32_t has_flush_id = req_untrusted.flush_request_id() != 0;
+
+  uint32_t mask = (1 << 10) - 1;
+  uint32_t acc = 0;
+  acc |= has_flush_id << 30;
+  acc |= (cpatch & mask) << 20;
+  acc |= (cmov & mask) << 10;
+  acc |= (producer_id & mask);
+  return static_cast<int32_t>(acc);
+}
+
+void SerializeAndAppendPacket(std::vector<TracePacket>* packets,
+                              std::vector<uint8_t> packet) {
+  Slice slice = Slice::Allocate(packet.size());
+  memcpy(slice.own_data(), packet.data(), packet.size());
+  packets->emplace_back();
+  packets->back().AddSlice(std::move(slice));
+}
+
+std::tuple<size_t /*shm_size*/, size_t /*page_size*/> EnsureValidShmSizes(
+    size_t shm_size,
+    size_t page_size) {
+  // Theoretically the max page size supported by the ABI is 64KB.
+  // However, the current implementation of TraceBuffer (the non-shared
+  // userspace buffer where the service copies data) supports at most
+  // 32K. Setting 64K "works" from the producer<>consumer viewpoint
+  // but then causes the data to be discarded when copying it into
+  // TraceBuffer.
+  constexpr size_t kMaxPageSize = 32 * 1024;
+  static_assert(kMaxPageSize <= SharedMemoryABI::kMaxPageSize, "");
+
+  if (page_size == 0)
+    page_size = TracingServiceImpl::kDefaultShmPageSize;
+  if (shm_size == 0)
+    shm_size = TracingServiceImpl::kDefaultShmSize;
+
+  page_size = std::min<size_t>(page_size, kMaxPageSize);
+  shm_size = std::min<size_t>(shm_size, TracingServiceImpl::kMaxShmSize);
+
+  // The tracing page size has to be multiple of 4K. On some systems (e.g. Mac
+  // on Arm64) the system page size can be larger (e.g., 16K). That doesn't
+  // matter here, because the tracing page size is just a logical partitioning
+  // and does not have any dependencies on kernel mm syscalls (read: it's fine
+  // to have trace page sizes of 4K on a system where the kernel page size is
+  // 16K).
+  bool page_size_is_valid = page_size >= SharedMemoryABI::kMinPageSize;
+  page_size_is_valid &= page_size % SharedMemoryABI::kMinPageSize == 0;
+
+  // Only allow power of two numbers of pages, i.e. 1, 2, 4, 8 pages.
+  size_t num_pages = page_size / SharedMemoryABI::kMinPageSize;
+  page_size_is_valid &= (num_pages & (num_pages - 1)) == 0;
+
+  if (!page_size_is_valid || shm_size < page_size ||
+      shm_size % page_size != 0) {
+    return std::make_tuple(TracingServiceImpl::kDefaultShmSize,
+                           TracingServiceImpl::kDefaultShmPageSize);
+  }
+  return std::make_tuple(shm_size, page_size);
+}
+
+bool NameMatchesFilter(const std::string& name,
+                       const std::vector<std::string>& name_filter,
+                       const std::vector<std::string>& name_regex_filter) {
+  bool filter_is_set = !name_filter.empty() || !name_regex_filter.empty();
+  if (!filter_is_set)
+    return true;
+  bool filter_matches = std::find(name_filter.begin(), name_filter.end(),
+                                  name) != name_filter.end();
+  bool filter_regex_matches =
+      std::find_if(name_regex_filter.begin(), name_regex_filter.end(),
+                   [&](const std::string& regex) {
+                     return std::regex_match(
+                         name, std::regex(regex, std::regex::extended));
+                   }) != name_regex_filter.end();
+  return filter_matches || filter_regex_matches;
+}
+
+// Used when:
+// 1. TraceConfig.write_into_file == true and output_path is not empty.
+// 2. Calling SaveTraceForBugreport(), from perfetto --save-for-bugreport.
+base::ScopedFile CreateTraceFile(const std::string& path, bool overwrite) {
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) && \
+    PERFETTO_BUILDFLAG(PERFETTO_ANDROID_BUILD)
+  // This is NOT trying to preserve any security property, SELinux does that.
+  // It just improves the actionability of the error when people try to save the
+  // trace in a location that is not SELinux-allowed (a generic "permission
+  // denied" vs "don't put it here, put it there").
+  if (!base::StartsWith(path, kTraceDirBasePath)) {
+    PERFETTO_ELOG("Invalid output_path %s. On Android it must be within %s.",
+                  path.c_str(), kTraceDirBasePath);
+    return base::ScopedFile();
+  }
+#endif
+  // O_CREAT | O_EXCL will fail if the file exists already.
+  const int flags = O_RDWR | O_CREAT | (overwrite ? O_TRUNC : O_EXCL);
+  auto fd = base::OpenFile(path, flags, 0600);
+  if (fd) {
+#if defined(PERFETTO_HAS_CHMOD)
+    // Passing 0644 directly above won't work because of umask.
+    PERFETTO_CHECK(fchmod(*fd, 0644) == 0);
+#endif
+  } else {
+    PERFETTO_PLOG("Failed to create %s", path.c_str());
+  }
+  return fd;
+}
+
+std::string GetBugreportTmpPath() {
+  return GetBugreportPath() + ".tmp";
+}
+
+bool ShouldLogEvent(const TraceConfig& cfg) {
+  switch (cfg.statsd_logging()) {
+    case TraceConfig::STATSD_LOGGING_ENABLED:
+      return true;
+    case TraceConfig::STATSD_LOGGING_DISABLED:
+      return false;
+    case TraceConfig::STATSD_LOGGING_UNSPECIFIED:
+      // For backward compatibility with older versions of perfetto_cmd.
+      return cfg.enable_extra_guardrails();
+  }
+  PERFETTO_FATAL("For GCC");
+}
+
+}  // namespace
+
+// These constants instead are defined in the header because are used by tests.
+constexpr size_t TracingServiceImpl::kDefaultShmSize;
+constexpr size_t TracingServiceImpl::kDefaultShmPageSize;
+
+constexpr size_t TracingServiceImpl::kMaxShmSize;
+constexpr uint32_t TracingServiceImpl::kDataSourceStopTimeoutMs;
+constexpr uint8_t TracingServiceImpl::kSyncMarker[];
+
+std::string GetBugreportPath() {
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) && \
+    PERFETTO_BUILDFLAG(PERFETTO_ANDROID_BUILD)
+  return kAndroidProductionBugreportTracePath;
+#else
+  // Only for tests, SaveTraceForBugreport is not used on other OSes.
+  return base::GetSysTempDir() + "/bugreport.pftrace";
+#endif
+}
+
+// static
+std::unique_ptr<TracingService> TracingService::CreateInstance(
+    std::unique_ptr<SharedMemory::Factory> shm_factory,
+    base::TaskRunner* task_runner) {
+  return std::unique_ptr<TracingService>(
+      new TracingServiceImpl(std::move(shm_factory), task_runner));
+}
+
+TracingServiceImpl::TracingServiceImpl(
+    std::unique_ptr<SharedMemory::Factory> shm_factory,
+    base::TaskRunner* task_runner)
+    : task_runner_(task_runner),
+      shm_factory_(std::move(shm_factory)),
+      uid_(base::GetCurrentUserId()),
+      buffer_ids_(kMaxTraceBufferID),
+      trigger_probability_rand_(
+          static_cast<uint32_t>(base::GetWallTimeNs().count())),
+      weak_ptr_factory_(this) {
+  PERFETTO_DCHECK(task_runner_);
+}
+
+TracingServiceImpl::~TracingServiceImpl() {
+  // TODO(fmayer): handle teardown of all Producer.
+}
+
+std::unique_ptr<TracingService::ProducerEndpoint>
+TracingServiceImpl::ConnectProducer(Producer* producer,
+                                    uid_t uid,
+                                    const std::string& producer_name,
+                                    size_t shared_memory_size_hint_bytes,
+                                    bool in_process,
+                                    ProducerSMBScrapingMode smb_scraping_mode,
+                                    size_t shared_memory_page_size_hint_bytes,
+                                    std::unique_ptr<SharedMemory> shm,
+                                    const std::string& sdk_version) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+
+  if (lockdown_mode_ && uid != base::GetCurrentUserId()) {
+    PERFETTO_DLOG("Lockdown mode. Rejecting producer with UID %ld",
+                  static_cast<unsigned long>(uid));
+    return nullptr;
+  }
+
+  if (producers_.size() >= kMaxProducerID) {
+    PERFETTO_DFATAL("Too many producers.");
+    return nullptr;
+  }
+  const ProducerID id = GetNextProducerID();
+  PERFETTO_DLOG("Producer %" PRIu16 " connected", id);
+
+  bool smb_scraping_enabled = smb_scraping_enabled_;
+  switch (smb_scraping_mode) {
+    case ProducerSMBScrapingMode::kDefault:
+      break;
+    case ProducerSMBScrapingMode::kEnabled:
+      smb_scraping_enabled = true;
+      break;
+    case ProducerSMBScrapingMode::kDisabled:
+      smb_scraping_enabled = false;
+      break;
+  }
+
+  std::unique_ptr<ProducerEndpointImpl> endpoint(new ProducerEndpointImpl(
+      id, uid, this, task_runner_, producer, producer_name, sdk_version,
+      in_process, smb_scraping_enabled));
+  auto it_and_inserted = producers_.emplace(id, endpoint.get());
+  PERFETTO_DCHECK(it_and_inserted.second);
+  endpoint->shmem_size_hint_bytes_ = shared_memory_size_hint_bytes;
+  endpoint->shmem_page_size_hint_bytes_ = shared_memory_page_size_hint_bytes;
+
+  // Producer::OnConnect() should run before Producer::OnTracingSetup(). The
+  // latter may be posted by SetupSharedMemory() below, so post OnConnect() now.
+  auto weak_ptr = endpoint->weak_ptr_factory_.GetWeakPtr();
+  task_runner_->PostTask([weak_ptr] {
+    if (weak_ptr)
+      weak_ptr->producer_->OnConnect();
+  });
+
+  if (shm) {
+    // The producer supplied an SMB. This is used only by Chrome; in the most
+    // common cases the SMB is created by the service and passed via
+    // OnTracingSetup(). Verify that it is correctly sized before we attempt to
+    // use it. The transport layer has to verify the integrity of the SMB (e.g.
+    // ensure that the producer can't resize if after the fact).
+    size_t shm_size, page_size;
+    std::tie(shm_size, page_size) =
+        EnsureValidShmSizes(shm->size(), endpoint->shmem_page_size_hint_bytes_);
+    if (shm_size == shm->size() &&
+        page_size == endpoint->shmem_page_size_hint_bytes_) {
+      PERFETTO_DLOG(
+          "Adopting producer-provided SMB of %zu kB for producer \"%s\"",
+          shm_size / 1024, endpoint->name_.c_str());
+      endpoint->SetupSharedMemory(std::move(shm), page_size,
+                                  /*provided_by_producer=*/true);
+    } else {
+      PERFETTO_LOG(
+          "Discarding incorrectly sized producer-provided SMB for producer "
+          "\"%s\", falling back to service-provided SMB. Requested sizes: %zu "
+          "B total, %zu B page size; suggested corrected sizes: %zu B total, "
+          "%zu B page size",
+          endpoint->name_.c_str(), shm->size(),
+          endpoint->shmem_page_size_hint_bytes_, shm_size, page_size);
+      shm.reset();
+    }
+  }
+
+  return std::unique_ptr<ProducerEndpoint>(std::move(endpoint));
+}
+
+void TracingServiceImpl::DisconnectProducer(ProducerID id) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  PERFETTO_DLOG("Producer %" PRIu16 " disconnected", id);
+  PERFETTO_DCHECK(producers_.count(id));
+
+  // Scrape remaining chunks for this producer to ensure we don't lose data.
+  if (auto* producer = GetProducer(id)) {
+    for (auto& session_id_and_session : tracing_sessions_)
+      ScrapeSharedMemoryBuffers(&session_id_and_session.second, producer);
+  }
+
+  for (auto it = data_sources_.begin(); it != data_sources_.end();) {
+    auto next = it;
+    next++;
+    if (it->second.producer_id == id)
+      UnregisterDataSource(id, it->second.descriptor.name());
+    it = next;
+  }
+
+  producers_.erase(id);
+  UpdateMemoryGuardrail();
+}
+
+TracingServiceImpl::ProducerEndpointImpl* TracingServiceImpl::GetProducer(
+    ProducerID id) const {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  auto it = producers_.find(id);
+  if (it == producers_.end())
+    return nullptr;
+  return it->second;
+}
+
+std::unique_ptr<TracingService::ConsumerEndpoint>
+TracingServiceImpl::ConnectConsumer(Consumer* consumer, uid_t uid) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  PERFETTO_DLOG("Consumer %p connected from UID %" PRIu64,
+                reinterpret_cast<void*>(consumer), static_cast<uint64_t>(uid));
+  std::unique_ptr<ConsumerEndpointImpl> endpoint(
+      new ConsumerEndpointImpl(this, task_runner_, consumer, uid));
+  auto it_and_inserted = consumers_.emplace(endpoint.get());
+  PERFETTO_DCHECK(it_and_inserted.second);
+  // Consumer might go away before we're able to send the connect notification,
+  // if that is the case just bail out.
+  auto weak_ptr = endpoint->weak_ptr_factory_.GetWeakPtr();
+  task_runner_->PostTask([weak_ptr] {
+    if (weak_ptr)
+      weak_ptr->consumer_->OnConnect();
+  });
+  return std::unique_ptr<ConsumerEndpoint>(std::move(endpoint));
+}
+
+void TracingServiceImpl::DisconnectConsumer(ConsumerEndpointImpl* consumer) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  PERFETTO_DLOG("Consumer %p disconnected", reinterpret_cast<void*>(consumer));
+  PERFETTO_DCHECK(consumers_.count(consumer));
+
+  // TODO(primiano) : Check that this is safe (what happens if there are
+  // ReadBuffers() calls posted in the meantime? They need to become noop).
+  if (consumer->tracing_session_id_)
+    FreeBuffers(consumer->tracing_session_id_);  // Will also DisableTracing().
+  consumers_.erase(consumer);
+
+  // At this point no more pointers to |consumer| should be around.
+  PERFETTO_DCHECK(!std::any_of(
+      tracing_sessions_.begin(), tracing_sessions_.end(),
+      [consumer](const std::pair<const TracingSessionID, TracingSession>& kv) {
+        return kv.second.consumer_maybe_null == consumer;
+      }));
+}
+
+bool TracingServiceImpl::DetachConsumer(ConsumerEndpointImpl* consumer,
+                                        const std::string& key) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  PERFETTO_DLOG("Consumer %p detached", reinterpret_cast<void*>(consumer));
+  PERFETTO_DCHECK(consumers_.count(consumer));
+
+  TracingSessionID tsid = consumer->tracing_session_id_;
+  TracingSession* tracing_session;
+  if (!tsid || !(tracing_session = GetTracingSession(tsid)))
+    return false;
+
+  if (GetDetachedSession(consumer->uid_, key)) {
+    PERFETTO_ELOG("Another session has been detached with the same key \"%s\"",
+                  key.c_str());
+    return false;
+  }
+
+  PERFETTO_DCHECK(tracing_session->consumer_maybe_null == consumer);
+  tracing_session->consumer_maybe_null = nullptr;
+  tracing_session->detach_key = key;
+  consumer->tracing_session_id_ = 0;
+  return true;
+}
+
+bool TracingServiceImpl::AttachConsumer(ConsumerEndpointImpl* consumer,
+                                        const std::string& key) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  PERFETTO_DLOG("Consumer %p attaching to session %s",
+                reinterpret_cast<void*>(consumer), key.c_str());
+  PERFETTO_DCHECK(consumers_.count(consumer));
+
+  if (consumer->tracing_session_id_) {
+    PERFETTO_ELOG(
+        "Cannot reattach consumer to session %s"
+        " while it already attached tracing session ID %" PRIu64,
+        key.c_str(), consumer->tracing_session_id_);
+    return false;
+  }
+
+  auto* tracing_session = GetDetachedSession(consumer->uid_, key);
+  if (!tracing_session) {
+    PERFETTO_ELOG(
+        "Failed to attach consumer, session '%s' not found for uid %d",
+        key.c_str(), static_cast<int>(consumer->uid_));
+    return false;
+  }
+
+  consumer->tracing_session_id_ = tracing_session->id;
+  tracing_session->consumer_maybe_null = consumer;
+  tracing_session->detach_key.clear();
+  return true;
+}
+
+base::Status TracingServiceImpl::EnableTracing(ConsumerEndpointImpl* consumer,
+                                               const TraceConfig& cfg,
+                                               base::ScopedFile fd) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  PERFETTO_DLOG("Enabling tracing for consumer %p",
+                reinterpret_cast<void*>(consumer));
+  MaybeLogUploadEvent(cfg, PerfettoStatsdAtom::kTracedEnableTracing);
+  if (cfg.lockdown_mode() == TraceConfig::LOCKDOWN_SET)
+    lockdown_mode_ = true;
+  if (cfg.lockdown_mode() == TraceConfig::LOCKDOWN_CLEAR)
+    lockdown_mode_ = false;
+
+  // Scope |tracing_session| to this block to prevent accidental use of a null
+  // pointer later in this function.
+  {
+    TracingSession* tracing_session =
+        GetTracingSession(consumer->tracing_session_id_);
+    if (tracing_session) {
+      MaybeLogUploadEvent(
+          cfg, PerfettoStatsdAtom::kTracedEnableTracingExistingTraceSession);
+      return PERFETTO_SVC_ERR(
+          "A Consumer is trying to EnableTracing() but another tracing "
+          "session is already active (forgot a call to FreeBuffers() ?)");
+    }
+  }
+
+  const uint32_t max_duration_ms = cfg.enable_extra_guardrails()
+                                       ? kGuardrailsMaxTracingDurationMillis
+                                       : kMaxTracingDurationMillis;
+  if (cfg.duration_ms() > max_duration_ms) {
+    MaybeLogUploadEvent(cfg,
+                        PerfettoStatsdAtom::kTracedEnableTracingTooLongTrace);
+    return PERFETTO_SVC_ERR("Requested too long trace (%" PRIu32
+                            "ms  > %" PRIu32 " ms)",
+                            cfg.duration_ms(), max_duration_ms);
+  }
+
+  const bool has_trigger_config = cfg.trigger_config().trigger_mode() !=
+                                  TraceConfig::TriggerConfig::UNSPECIFIED;
+  if (has_trigger_config && (cfg.trigger_config().trigger_timeout_ms() == 0 ||
+                             cfg.trigger_config().trigger_timeout_ms() >
+                                 kGuardrailsMaxTracingDurationMillis)) {
+    MaybeLogUploadEvent(
+        cfg, PerfettoStatsdAtom::kTracedEnableTracingInvalidTriggerTimeout);
+    return PERFETTO_SVC_ERR(
+        "Traces with START_TRACING triggers must provide a positive "
+        "trigger_timeout_ms < 7 days (received %" PRIu32 "ms)",
+        cfg.trigger_config().trigger_timeout_ms());
+  }
+
+  if (has_trigger_config && cfg.duration_ms() != 0) {
+    MaybeLogUploadEvent(
+        cfg, PerfettoStatsdAtom::kTracedEnableTracingDurationWithTrigger);
+    return PERFETTO_SVC_ERR(
+        "duration_ms was set, this must not be set for traces with triggers.");
+  }
+
+  if (cfg.trigger_config().trigger_mode() ==
+          TraceConfig::TriggerConfig::STOP_TRACING &&
+      cfg.write_into_file()) {
+    // We don't support this usecase because there are subtle assumptions which
+    // break around TracingServiceEvents and windowed sorting (i.e. if we don't
+    // drain the events in ReadBuffers because we are waiting for STOP_TRACING,
+    // we can end up queueing up a lot of TracingServiceEvents and emitting them
+    // wildy out of order breaking windowed sorting in trace processor).
+    MaybeLogUploadEvent(
+        cfg, PerfettoStatsdAtom::kTracedEnableTracingStopTracingWriteIntoFile);
+    return PERFETTO_SVC_ERR(
+        "Specifying trigger mode STOP_TRACING and write_into_file together is "
+        "unsupported");
+  }
+
+  std::unordered_set<std::string> triggers;
+  for (const auto& trigger : cfg.trigger_config().triggers()) {
+    if (!triggers.insert(trigger.name()).second) {
+      MaybeLogUploadEvent(
+          cfg, PerfettoStatsdAtom::kTracedEnableTracingDuplicateTriggerName);
+      return PERFETTO_SVC_ERR("Duplicate trigger name: %s",
+                              trigger.name().c_str());
+    }
+  }
+
+  if (cfg.enable_extra_guardrails()) {
+    if (cfg.deferred_start()) {
+      MaybeLogUploadEvent(
+          cfg, PerfettoStatsdAtom::kTracedEnableTracingInvalidDeferredStart);
+      return PERFETTO_SVC_ERR(
+          "deferred_start=true is not supported in unsupervised traces");
+    }
+    uint64_t buf_size_sum = 0;
+    for (const auto& buf : cfg.buffers()) {
+      if (buf.size_kb() % 4 != 0) {
+        MaybeLogUploadEvent(
+            cfg, PerfettoStatsdAtom::kTracedEnableTracingInvalidBufferSize);
+        return PERFETTO_SVC_ERR(
+            "buffers.size_kb must be a multiple of 4, got %" PRIu32,
+            buf.size_kb());
+      }
+      buf_size_sum += buf.size_kb();
+    }
+    if (buf_size_sum > kGuardrailsMaxTracingBufferSizeKb) {
+      MaybeLogUploadEvent(
+          cfg, PerfettoStatsdAtom::kTracedEnableTracingBufferSizeTooLarge);
+      return PERFETTO_SVC_ERR("Requested too large trace buffer (%" PRIu64
+                              "kB  > %" PRIu32 " kB)",
+                              buf_size_sum, kGuardrailsMaxTracingBufferSizeKb);
+    }
+  }
+
+  if (cfg.buffers_size() > kMaxBuffersPerConsumer) {
+    MaybeLogUploadEvent(cfg,
+                        PerfettoStatsdAtom::kTracedEnableTracingTooManyBuffers);
+    return PERFETTO_SVC_ERR("Too many buffers configured (%d)",
+                            cfg.buffers_size());
+  }
+
+  if (!cfg.unique_session_name().empty()) {
+    const std::string& name = cfg.unique_session_name();
+    for (auto& kv : tracing_sessions_) {
+      if (kv.second.config.unique_session_name() == name) {
+        MaybeLogUploadEvent(
+            cfg, PerfettoStatsdAtom::kTracedEnableTracingDuplicateSessionName);
+        static const char fmt[] =
+            "A trace with this unique session name (%s) already exists";
+        // This happens frequently, don't make it an "E"LOG.
+        PERFETTO_LOG(fmt, name.c_str());
+        return base::ErrStatus(fmt, name.c_str());
+      }
+    }
+  }
+
+  if (cfg.enable_extra_guardrails()) {
+    // unique_session_name can be empty
+    const std::string& name = cfg.unique_session_name();
+    int64_t now_s = base::GetBootTimeS().count();
+
+    // Remove any entries where the time limit has passed so this map doesn't
+    // grow indefinitely:
+    std::map<std::string, int64_t>& sessions = session_to_last_trace_s_;
+    for (auto it = sessions.cbegin(); it != sessions.cend();) {
+      if (now_s - it->second > kMinSecondsBetweenTracesGuardrail) {
+        it = sessions.erase(it);
+      } else {
+        ++it;
+      }
+    }
+
+    int64_t& previous_s = session_to_last_trace_s_[name];
+    if (previous_s == 0) {
+      previous_s = now_s;
+    } else {
+      MaybeLogUploadEvent(
+          cfg, PerfettoStatsdAtom::kTracedEnableTracingSessionNameTooRecent);
+      return PERFETTO_SVC_ERR(
+          "A trace with unique session name \"%s\" began less than %" PRId64
+          "s ago (%" PRId64 "s)",
+          name.c_str(), kMinSecondsBetweenTracesGuardrail, now_s - previous_s);
+    }
+  }
+
+  const int sessions_for_uid = static_cast<int>(std::count_if(
+      tracing_sessions_.begin(), tracing_sessions_.end(),
+      [consumer](const decltype(tracing_sessions_)::value_type& s) {
+        return s.second.consumer_uid == consumer->uid_;
+      }));
+
+  int per_uid_limit = kMaxConcurrentTracingSessionsPerUid;
+  if (consumer->uid_ == 1066 /* AID_STATSD*/) {
+    per_uid_limit = kMaxConcurrentTracingSessionsForStatsdUid;
+  }
+  if (sessions_for_uid >= per_uid_limit) {
+    MaybeLogUploadEvent(
+        cfg, PerfettoStatsdAtom::kTracedEnableTracingTooManySessionsForUid);
+    return PERFETTO_SVC_ERR(
+        "Too many concurrent tracing sesions (%d) for uid %d limit is %d",
+        sessions_for_uid, static_cast<int>(consumer->uid_), per_uid_limit);
+  }
+
+  // TODO(primiano): This is a workaround to prevent that a producer gets stuck
+  // in a state where it stalls by design by having more TraceWriterImpl
+  // instances than free pages in the buffer. This is really a bug in
+  // trace_probes and the way it handles stalls in the shmem buffer.
+  if (tracing_sessions_.size() >= kMaxConcurrentTracingSessions) {
+    MaybeLogUploadEvent(
+        cfg, PerfettoStatsdAtom::kTracedEnableTracingTooManyConcurrentSessions);
+    return PERFETTO_SVC_ERR("Too many concurrent tracing sesions (%zu)",
+                            tracing_sessions_.size());
+  }
+
+  // If the trace config provides a filter bytecode, setup the filter now.
+  // If the filter loading fails, abort the tracing session rather than running
+  // unfiltered.
+  std::unique_ptr<protozero::MessageFilter> trace_filter;
+  if (cfg.has_trace_filter()) {
+    const auto& filt = cfg.trace_filter();
+    const std::string& bytecode = filt.bytecode();
+    trace_filter.reset(new protozero::MessageFilter());
+    if (!trace_filter->LoadFilterBytecode(bytecode.data(), bytecode.size())) {
+      MaybeLogUploadEvent(
+          cfg, PerfettoStatsdAtom::kTracedEnableTracingInvalidFilter);
+      return PERFETTO_SVC_ERR("Trace filter bytecode invalid, aborting");
+    }
+    // The filter is created using perfetto.protos.Trace as root message
+    // (because that makes it possible to play around with the `proto_filter`
+    // tool on actual traces). Here in the service, however, we deal with
+    // perfetto.protos.TracePacket(s), which are one level down (Trace.packet).
+    // The IPC client (or the write_into_filte logic in here) are responsible
+    // for pre-pending the packet preamble (See GetProtoPreamble() calls), but
+    // the preamble is not there at ReadBuffer time. Hence we change the root of
+    // the filtering to start at the Trace.packet level.
+    uint32_t packet_field_id = TracePacket::kPacketFieldNumber;
+    if (!trace_filter->SetFilterRoot(&packet_field_id, 1)) {
+      MaybeLogUploadEvent(
+          cfg, PerfettoStatsdAtom::kTracedEnableTracingInvalidFilter);
+      return PERFETTO_SVC_ERR("Failed to set filter root.");
+    }
+  }
+
+  const TracingSessionID tsid = ++last_tracing_session_id_;
+  TracingSession* tracing_session =
+      &tracing_sessions_
+           .emplace(std::piecewise_construct, std::forward_as_tuple(tsid),
+                    std::forward_as_tuple(tsid, consumer, cfg, task_runner_))
+           .first->second;
+
+  if (trace_filter)
+    tracing_session->trace_filter = std::move(trace_filter);
+
+  if (cfg.write_into_file()) {
+    if (!fd ^ !cfg.output_path().empty()) {
+      tracing_sessions_.erase(tsid);
+      MaybeLogUploadEvent(
+          tracing_session->config,
+          PerfettoStatsdAtom::kTracedEnableTracingInvalidFdOutputFile);
+      return PERFETTO_SVC_ERR(
+          "When write_into_file==true either a FD needs to be passed or "
+          "output_path must be populated (but not both)");
+    }
+    if (!cfg.output_path().empty()) {
+      fd = CreateTraceFile(cfg.output_path(), /*overwrite=*/false);
+      if (!fd) {
+        MaybeLogUploadEvent(
+            tracing_session->config,
+            PerfettoStatsdAtom::kTracedEnableTracingFailedToCreateFile);
+        tracing_sessions_.erase(tsid);
+        return PERFETTO_SVC_ERR("Failed to create the trace file %s",
+                                cfg.output_path().c_str());
+      }
+    }
+    tracing_session->write_into_file = std::move(fd);
+    uint32_t write_period_ms = cfg.file_write_period_ms();
+    if (write_period_ms == 0)
+      write_period_ms = kDefaultWriteIntoFilePeriodMs;
+    if (write_period_ms < min_write_period_ms_)
+      write_period_ms = min_write_period_ms_;
+    tracing_session->write_period_ms = write_period_ms;
+    tracing_session->max_file_size_bytes = cfg.max_file_size_bytes();
+    tracing_session->bytes_written_into_file = 0;
+  }
+
+  // Initialize the log buffers.
+  bool did_allocate_all_buffers = true;
+
+  // Allocate the trace buffers. Also create a map to translate a consumer
+  // relative index (TraceConfig.DataSourceConfig.target_buffer) into the
+  // corresponding BufferID, which is a global ID namespace for the service and
+  // all producers.
+  size_t total_buf_size_kb = 0;
+  const size_t num_buffers = static_cast<size_t>(cfg.buffers_size());
+  tracing_session->buffers_index.reserve(num_buffers);
+  for (size_t i = 0; i < num_buffers; i++) {
+    const TraceConfig::BufferConfig& buffer_cfg = cfg.buffers()[i];
+    BufferID global_id = buffer_ids_.Allocate();
+    if (!global_id) {
+      did_allocate_all_buffers = false;  // We ran out of IDs.
+      break;
+    }
+    tracing_session->buffers_index.push_back(global_id);
+    const size_t buf_size_bytes = buffer_cfg.size_kb() * 1024u;
+    total_buf_size_kb += buffer_cfg.size_kb();
+    TraceBuffer::OverwritePolicy policy =
+        buffer_cfg.fill_policy() == TraceConfig::BufferConfig::DISCARD
+            ? TraceBuffer::kDiscard
+            : TraceBuffer::kOverwrite;
+    auto it_and_inserted = buffers_.emplace(
+        global_id, TraceBuffer::Create(buf_size_bytes, policy));
+    PERFETTO_DCHECK(it_and_inserted.second);  // buffers_.count(global_id) == 0.
+    std::unique_ptr<TraceBuffer>& trace_buffer = it_and_inserted.first->second;
+    if (!trace_buffer) {
+      did_allocate_all_buffers = false;
+      break;
+    }
+  }
+
+  UpdateMemoryGuardrail();
+
+  // This can happen if either:
+  // - All the kMaxTraceBufferID slots are taken.
+  // - OOM, or, more relistically, we exhausted virtual memory.
+  // In any case, free all the previously allocated buffers and abort.
+  // TODO(fmayer): add a test to cover this case, this is quite subtle.
+  if (!did_allocate_all_buffers) {
+    for (BufferID global_id : tracing_session->buffers_index) {
+      buffer_ids_.Free(global_id);
+      buffers_.erase(global_id);
+    }
+    tracing_sessions_.erase(tsid);
+    MaybeLogUploadEvent(tracing_session->config,
+                        PerfettoStatsdAtom::kTracedEnableTracingOom);
+    return PERFETTO_SVC_ERR(
+        "Failed to allocate tracing buffers: OOM or too many buffers");
+  }
+
+  consumer->tracing_session_id_ = tsid;
+
+  // Setup the data sources on the producers without starting them.
+  for (const TraceConfig::DataSource& cfg_data_source : cfg.data_sources()) {
+    // Scan all the registered data sources with a matching name.
+    auto range = data_sources_.equal_range(cfg_data_source.config().name());
+    for (auto it = range.first; it != range.second; it++) {
+      TraceConfig::ProducerConfig producer_config;
+      for (auto& config : cfg.producers()) {
+        if (GetProducer(it->second.producer_id)->name_ ==
+            config.producer_name()) {
+          producer_config = config;
+          break;
+        }
+      }
+      SetupDataSource(cfg_data_source, producer_config, it->second,
+                      tracing_session);
+    }
+  }
+
+  bool has_start_trigger = false;
+  auto weak_this = weak_ptr_factory_.GetWeakPtr();
+  switch (cfg.trigger_config().trigger_mode()) {
+    case TraceConfig::TriggerConfig::UNSPECIFIED:
+      // no triggers are specified so this isn't a trace that is using triggers.
+      PERFETTO_DCHECK(!has_trigger_config);
+      break;
+    case TraceConfig::TriggerConfig::START_TRACING:
+      // For traces which use START_TRACE triggers we need to ensure that the
+      // tracing session will be cleaned up when it times out.
+      has_start_trigger = true;
+      task_runner_->PostDelayedTask(
+          [weak_this, tsid]() {
+            if (weak_this)
+              weak_this->OnStartTriggersTimeout(tsid);
+          },
+          cfg.trigger_config().trigger_timeout_ms());
+      break;
+    case TraceConfig::TriggerConfig::STOP_TRACING:
+      // Update the tracing_session's duration_ms to ensure that if no trigger
+      // is received the session will end and be cleaned up equal to the
+      // timeout.
+      //
+      // TODO(nuskos): Refactor this so that rather then modifying the config we
+      // have a field we look at on the tracing_session.
+      tracing_session->config.set_duration_ms(
+          cfg.trigger_config().trigger_timeout_ms());
+      break;
+  }
+
+  tracing_session->state = TracingSession::CONFIGURED;
+  PERFETTO_LOG(
+      "Configured tracing session %" PRIu64
+      ", #sources:%zu, duration:%d ms, #buffers:%d, total "
+      "buffer size:%zu KB, total sessions:%zu, uid:%d session name: \"%s\"",
+      tsid, cfg.data_sources().size(), tracing_session->config.duration_ms(),
+      cfg.buffers_size(), total_buf_size_kb, tracing_sessions_.size(),
+      static_cast<unsigned int>(consumer->uid_),
+      cfg.unique_session_name().c_str());
+
+  // Start the data sources, unless this is a case of early setup + fast
+  // triggering, either through TraceConfig.deferred_start or
+  // TraceConfig.trigger_config(). If both are specified which ever one occurs
+  // first will initiate the trace.
+  if (!cfg.deferred_start() && !has_start_trigger)
+    return StartTracing(tsid);
+
+  return base::OkStatus();
+}
+
+void TracingServiceImpl::ChangeTraceConfig(ConsumerEndpointImpl* consumer,
+                                           const TraceConfig& updated_cfg) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  TracingSession* tracing_session =
+      GetTracingSession(consumer->tracing_session_id_);
+  PERFETTO_DCHECK(tracing_session);
+
+  if ((tracing_session->state != TracingSession::STARTED) &&
+      (tracing_session->state != TracingSession::CONFIGURED)) {
+    PERFETTO_ELOG(
+        "ChangeTraceConfig() was called for a tracing session which isn't "
+        "running.");
+    return;
+  }
+
+  // We only support updating producer_name_{,regex}_filter (and pass-through
+  // configs) for now; null out any changeable fields and make sure the rest are
+  // identical.
+  TraceConfig new_config_copy(updated_cfg);
+  for (auto& ds_cfg : *new_config_copy.mutable_data_sources()) {
+    ds_cfg.clear_producer_name_filter();
+    ds_cfg.clear_producer_name_regex_filter();
+  }
+
+  TraceConfig current_config_copy(tracing_session->config);
+  for (auto& ds_cfg : *current_config_copy.mutable_data_sources()) {
+    ds_cfg.clear_producer_name_filter();
+    ds_cfg.clear_producer_name_regex_filter();
+  }
+
+  if (new_config_copy != current_config_copy) {
+    PERFETTO_LOG(
+        "ChangeTraceConfig() was called with a config containing unsupported "
+        "changes; only adding to the producer_name_{,regex}_filter is "
+        "currently supported and will have an effect.");
+  }
+
+  for (TraceConfig::DataSource& cfg_data_source :
+       *tracing_session->config.mutable_data_sources()) {
+    // Find the updated producer_filter in the new config.
+    std::vector<std::string> new_producer_name_filter;
+    std::vector<std::string> new_producer_name_regex_filter;
+    bool found_data_source = false;
+    for (const auto& it : updated_cfg.data_sources()) {
+      if (cfg_data_source.config().name() == it.config().name()) {
+        new_producer_name_filter = it.producer_name_filter();
+        new_producer_name_regex_filter = it.producer_name_regex_filter();
+        found_data_source = true;
+        break;
+      }
+    }
+
+    // Bail out if data source not present in the new config.
+    if (!found_data_source) {
+      PERFETTO_ELOG(
+          "ChangeTraceConfig() called without a current data source also "
+          "present in the new config: %s",
+          cfg_data_source.config().name().c_str());
+      continue;
+    }
+
+    // TODO(oysteine): Just replacing the filter means that if
+    // there are any filter entries which were present in the original config,
+    // but removed from the config passed to ChangeTraceConfig, any matching
+    // producers will keep producing but newly added producers after this
+    // point will never start.
+    *cfg_data_source.mutable_producer_name_filter() = new_producer_name_filter;
+    *cfg_data_source.mutable_producer_name_regex_filter() =
+        new_producer_name_regex_filter;
+
+    // Scan all the registered data sources with a matching name.
+    auto range = data_sources_.equal_range(cfg_data_source.config().name());
+    for (auto it = range.first; it != range.second; it++) {
+      ProducerEndpointImpl* producer = GetProducer(it->second.producer_id);
+      PERFETTO_DCHECK(producer);
+
+      // Check if the producer name of this data source is present
+      // in the name filters. We currently only support new filters, not
+      // removing old ones.
+      if (!NameMatchesFilter(producer->name_, new_producer_name_filter,
+                             new_producer_name_regex_filter)) {
+        continue;
+      }
+
+      bool already_setup = false;
+      auto& ds_instances = tracing_session->data_source_instances;
+      for (auto instance_it = ds_instances.begin();
+           instance_it != ds_instances.end(); ++instance_it) {
+        if (instance_it->first == it->second.producer_id &&
+            instance_it->second.data_source_name ==
+                cfg_data_source.config().name()) {
+          already_setup = true;
+          break;
+        }
+      }
+
+      if (already_setup)
+        continue;
+
+      // If it wasn't previously setup, set it up now.
+      // (The per-producer config is optional).
+      TraceConfig::ProducerConfig producer_config;
+      for (auto& config : tracing_session->config.producers()) {
+        if (producer->name_ == config.producer_name()) {
+          producer_config = config;
+          break;
+        }
+      }
+
+      DataSourceInstance* ds_inst = SetupDataSource(
+          cfg_data_source, producer_config, it->second, tracing_session);
+
+      if (ds_inst && tracing_session->state == TracingSession::STARTED)
+        StartDataSourceInstance(producer, tracing_session, ds_inst);
+    }
+  }
+}
+
+base::Status TracingServiceImpl::StartTracing(TracingSessionID tsid) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+
+  auto weak_this = weak_ptr_factory_.GetWeakPtr();
+  TracingSession* tracing_session = GetTracingSession(tsid);
+  if (!tracing_session) {
+    return PERFETTO_SVC_ERR(
+        "StartTracing() failed, invalid session ID %" PRIu64, tsid);
+  }
+
+  MaybeLogUploadEvent(tracing_session->config,
+                      PerfettoStatsdAtom::kTracedStartTracing);
+
+  if (tracing_session->state != TracingSession::CONFIGURED) {
+    MaybeLogUploadEvent(
+        tracing_session->config,
+        PerfettoStatsdAtom::kTracedStartTracingInvalidSessionState);
+    return PERFETTO_SVC_ERR("StartTracing() failed, invalid session state: %d",
+                            tracing_session->state);
+  }
+
+  tracing_session->state = TracingSession::STARTED;
+
+  // We store the start of trace snapshot separately as it's important to make
+  // sure we can interpret all the data in the trace and storing it in the ring
+  // buffer means it could be overwritten by a later snapshot.
+  if (!tracing_session->config.builtin_data_sources()
+           .disable_clock_snapshotting()) {
+    SnapshotClocks(&tracing_session->initial_clock_snapshot);
+  }
+
+  // We don't snapshot the clocks here because we just did this above.
+  SnapshotLifecyleEvent(
+      tracing_session,
+      protos::pbzero::TracingServiceEvent::kTracingStartedFieldNumber,
+      false /* snapshot_clocks */);
+
+  // Periodically snapshot clocks, stats, sync markers while the trace is
+  // active. The snapshots are emitted on the future ReadBuffers() calls, which
+  // means that:
+  //  (a) If we're streaming to a file (or to a consumer) while tracing, we
+  //      write snapshots periodically into the trace.
+  //  (b) If ReadBuffers() is only called after tracing ends, we emit the latest
+  //      snapshot into the trace. For clock snapshots, we keep track of the
+  //      snapshot recorded at the beginning of the session
+  //      (initial_clock_snapshot above), as well as the most recent sampled
+  //      snapshots that showed significant new drift between different clocks.
+  //      The latter clock snapshots are sampled periodically and at lifecycle
+  //      events.
+  base::PeriodicTask::Args snapshot_task_args;
+  snapshot_task_args.start_first_task_immediately = true;
+  snapshot_task_args.use_suspend_aware_timer =
+      tracing_session->config.builtin_data_sources()
+          .prefer_suspend_clock_for_snapshot();
+  snapshot_task_args.task = [weak_this, tsid] {
+    if (weak_this)
+      weak_this->PeriodicSnapshotTask(tsid);
+  };
+  snapshot_task_args.period_ms =
+      tracing_session->config.builtin_data_sources().snapshot_interval_ms();
+  if (!snapshot_task_args.period_ms)
+    snapshot_task_args.period_ms = kDefaultSnapshotsIntervalMs;
+  tracing_session->snapshot_periodic_task.Start(snapshot_task_args);
+
+  // Trigger delayed task if the trace is time limited.
+  const uint32_t trace_duration_ms = tracing_session->config.duration_ms();
+  if (trace_duration_ms > 0) {
+    task_runner_->PostDelayedTask(
+        [weak_this, tsid] {
+          // Skip entirely the flush if the trace session doesn't exist anymore.
+          // This is to prevent misleading error messages to be logged.
+          if (!weak_this)
+            return;
+          auto* tracing_session_ptr = weak_this->GetTracingSession(tsid);
+          if (!tracing_session_ptr)
+            return;
+          // If this trace was using STOP_TRACING triggers and we've seen
+          // one, then the trigger overrides the normal timeout. In this
+          // case we just return and let the other task clean up this trace.
+          if (tracing_session_ptr->config.trigger_config().trigger_mode() ==
+                  TraceConfig::TriggerConfig::STOP_TRACING &&
+              !tracing_session_ptr->received_triggers.empty())
+            return;
+          // In all other cases (START_TRACING or no triggers) we flush
+          // after |trace_duration_ms| unconditionally.
+          weak_this->FlushAndDisableTracing(tsid);
+        },
+        trace_duration_ms);
+  }
+
+  // Start the periodic drain tasks if we should to save the trace into a file.
+  if (tracing_session->config.write_into_file()) {
+    task_runner_->PostDelayedTask(
+        [weak_this, tsid] {
+          if (weak_this)
+            weak_this->ReadBuffers(tsid, nullptr);
+        },
+        tracing_session->delay_to_next_write_period_ms());
+  }
+
+  // Start the periodic flush tasks if the config specified a flush period.
+  if (tracing_session->config.flush_period_ms())
+    PeriodicFlushTask(tsid, /*post_next_only=*/true);
+
+  // Start the periodic incremental state clear tasks if the config specified a
+  // period.
+  if (tracing_session->config.incremental_state_config().clear_period_ms()) {
+    PeriodicClearIncrementalStateTask(tsid, /*post_next_only=*/true);
+  }
+
+  for (auto& kv : tracing_session->data_source_instances) {
+    ProducerID producer_id = kv.first;
+    DataSourceInstance& data_source = kv.second;
+    ProducerEndpointImpl* producer = GetProducer(producer_id);
+    if (!producer) {
+      PERFETTO_DFATAL("Producer does not exist.");
+      continue;
+    }
+    StartDataSourceInstance(producer, tracing_session, &data_source);
+  }
+
+  MaybeNotifyAllDataSourcesStarted(tracing_session);
+  return base::OkStatus();
+}
+
+void TracingServiceImpl::StartDataSourceInstance(
+    ProducerEndpointImpl* producer,
+    TracingSession* tracing_session,
+    TracingServiceImpl::DataSourceInstance* instance) {
+  PERFETTO_DCHECK(instance->state == DataSourceInstance::CONFIGURED);
+  if (instance->will_notify_on_start) {
+    instance->state = DataSourceInstance::STARTING;
+  } else {
+    instance->state = DataSourceInstance::STARTED;
+  }
+  if (tracing_session->consumer_maybe_null) {
+    tracing_session->consumer_maybe_null->OnDataSourceInstanceStateChange(
+        *producer, *instance);
+  }
+  producer->StartDataSource(instance->instance_id, instance->config);
+
+  // If all data sources are started, notify the consumer.
+  if (instance->state == DataSourceInstance::STARTED)
+    MaybeNotifyAllDataSourcesStarted(tracing_session);
+}
+
+// DisableTracing just stops the data sources but doesn't free up any buffer.
+// This is to allow the consumer to freeze the buffers (by stopping the trace)
+// and then drain the buffers. The actual teardown of the TracingSession happens
+// in FreeBuffers().
+void TracingServiceImpl::DisableTracing(TracingSessionID tsid,
+                                        bool disable_immediately) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  TracingSession* tracing_session = GetTracingSession(tsid);
+  if (!tracing_session) {
+    // Can happen if the consumer calls this before EnableTracing() or after
+    // FreeBuffers().
+    PERFETTO_DLOG("DisableTracing() failed, invalid session ID %" PRIu64, tsid);
+    return;
+  }
+
+  MaybeLogUploadEvent(tracing_session->config,
+                      PerfettoStatsdAtom::kTracedDisableTracing);
+
+  switch (tracing_session->state) {
+    // Spurious call to DisableTracing() while already disabled, nothing to do.
+    case TracingSession::DISABLED:
+      PERFETTO_DCHECK(tracing_session->AllDataSourceInstancesStopped());
+      return;
+
+    // This is either:
+    // A) The case of a graceful DisableTracing() call followed by a call to
+    //    FreeBuffers(), iff |disable_immediately| == true. In this case we want
+    //    to forcefully transition in the disabled state without waiting for the
+    //    outstanding acks because the buffers are going to be destroyed soon.
+    // B) A spurious call, iff |disable_immediately| == false, in which case
+    //    there is nothing to do.
+    case TracingSession::DISABLING_WAITING_STOP_ACKS:
+      PERFETTO_DCHECK(!tracing_session->AllDataSourceInstancesStopped());
+      if (disable_immediately)
+        DisableTracingNotifyConsumerAndFlushFile(tracing_session);
+      return;
+
+    // Continues below.
+    case TracingSession::CONFIGURED:
+      // If the session didn't even start there is no need to orchestrate a
+      // graceful stop of data sources.
+      disable_immediately = true;
+      break;
+
+    // This is the nominal case, continues below.
+    case TracingSession::STARTED:
+      break;
+  }
+
+  for (auto& data_source_inst : tracing_session->data_source_instances) {
+    const ProducerID producer_id = data_source_inst.first;
+    DataSourceInstance& instance = data_source_inst.second;
+    ProducerEndpointImpl* producer = GetProducer(producer_id);
+    PERFETTO_DCHECK(producer);
+    PERFETTO_DCHECK(instance.state == DataSourceInstance::CONFIGURED ||
+                    instance.state == DataSourceInstance::STARTING ||
+                    instance.state == DataSourceInstance::STARTED);
+    StopDataSourceInstance(producer, tracing_session, &instance,
+                           disable_immediately);
+  }
+
+  // If the periodic task is running, we can stop the periodic snapshot timer
+  // here instead of waiting until FreeBuffers to prevent useless snapshots
+  // which won't be read.
+  tracing_session->snapshot_periodic_task.Reset();
+
+  // Either this request is flagged with |disable_immediately| or there are no
+  // data sources that are requesting a final handshake. In both cases just mark
+  // the session as disabled immediately, notify the consumer and flush the
+  // trace file (if used).
+  if (tracing_session->AllDataSourceInstancesStopped())
+    return DisableTracingNotifyConsumerAndFlushFile(tracing_session);
+
+  tracing_session->state = TracingSession::DISABLING_WAITING_STOP_ACKS;
+  auto weak_this = weak_ptr_factory_.GetWeakPtr();
+  task_runner_->PostDelayedTask(
+      [weak_this, tsid] {
+        if (weak_this)
+          weak_this->OnDisableTracingTimeout(tsid);
+      },
+      tracing_session->data_source_stop_timeout_ms());
+
+  // Deliberately NOT removing the session from |tracing_session_|, it's still
+  // needed to call ReadBuffers(). FreeBuffers() will erase() the session.
+}
+
+void TracingServiceImpl::NotifyDataSourceStarted(
+    ProducerID producer_id,
+    DataSourceInstanceID instance_id) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  for (auto& kv : tracing_sessions_) {
+    TracingSession& tracing_session = kv.second;
+    DataSourceInstance* instance =
+        tracing_session.GetDataSourceInstance(producer_id, instance_id);
+
+    if (!instance)
+      continue;
+
+    // If the tracing session was already stopped, ignore this notification.
+    if (tracing_session.state != TracingSession::STARTED)
+      continue;
+
+    if (instance->state != DataSourceInstance::STARTING) {
+      PERFETTO_ELOG("Started data source instance in incorrect state: %d",
+                    instance->state);
+      continue;
+    }
+
+    instance->state = DataSourceInstance::STARTED;
+
+    ProducerEndpointImpl* producer = GetProducer(producer_id);
+    PERFETTO_DCHECK(producer);
+    if (tracing_session.consumer_maybe_null) {
+      tracing_session.consumer_maybe_null->OnDataSourceInstanceStateChange(
+          *producer, *instance);
+    }
+
+    // If all data sources are started, notify the consumer.
+    MaybeNotifyAllDataSourcesStarted(&tracing_session);
+  }  // for (tracing_session)
+}
+
+void TracingServiceImpl::MaybeNotifyAllDataSourcesStarted(
+    TracingSession* tracing_session) {
+  if (!tracing_session->consumer_maybe_null)
+    return;
+
+  if (!tracing_session->AllDataSourceInstancesStarted())
+    return;
+
+  // In some rare cases, we can get in this state more than once. Consider the
+  // following scenario: 3 data sources are registered -> trace starts ->
+  // all 3 data sources ack -> OnAllDataSourcesStarted() is called.
+  // Imagine now that a 4th data source registers while the trace is ongoing.
+  // This would hit the AllDataSourceInstancesStarted() condition again.
+  // In this case, however, we don't want to re-notify the consumer again.
+  // That would be unexpected (even if, perhaps, technically correct) and
+  // trigger bugs in the consumer.
+  if (tracing_session->did_notify_all_data_source_started)
+    return;
+
+  PERFETTO_DLOG("All data sources started");
+
+  SnapshotLifecyleEvent(
+      tracing_session,
+      protos::pbzero::TracingServiceEvent::kAllDataSourcesStartedFieldNumber,
+      true /* snapshot_clocks */);
+
+  tracing_session->did_notify_all_data_source_started = true;
+  tracing_session->consumer_maybe_null->OnAllDataSourcesStarted();
+}
+
+void TracingServiceImpl::NotifyDataSourceStopped(
+    ProducerID producer_id,
+    DataSourceInstanceID instance_id) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  for (auto& kv : tracing_sessions_) {
+    TracingSession& tracing_session = kv.second;
+    DataSourceInstance* instance =
+        tracing_session.GetDataSourceInstance(producer_id, instance_id);
+
+    if (!instance)
+      continue;
+
+    if (instance->state != DataSourceInstance::STOPPING) {
+      PERFETTO_ELOG("Stopped data source instance in incorrect state: %d",
+                    instance->state);
+      continue;
+    }
+
+    instance->state = DataSourceInstance::STOPPED;
+
+    ProducerEndpointImpl* producer = GetProducer(producer_id);
+    PERFETTO_DCHECK(producer);
+    if (tracing_session.consumer_maybe_null) {
+      tracing_session.consumer_maybe_null->OnDataSourceInstanceStateChange(
+          *producer, *instance);
+    }
+
+    if (!tracing_session.AllDataSourceInstancesStopped())
+      continue;
+
+    if (tracing_session.state != TracingSession::DISABLING_WAITING_STOP_ACKS)
+      continue;
+
+    // All data sources acked the termination.
+    DisableTracingNotifyConsumerAndFlushFile(&tracing_session);
+  }  // for (tracing_session)
+}
+
+void TracingServiceImpl::ActivateTriggers(
+    ProducerID producer_id,
+    const std::vector<std::string>& triggers) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  auto* producer = GetProducer(producer_id);
+  PERFETTO_DCHECK(producer);
+
+  int64_t now_ns = base::GetBootTimeNs().count();
+  for (const auto& trigger_name : triggers) {
+    PERFETTO_DLOG("Received ActivateTriggers request for \"%s\"",
+                  trigger_name.c_str());
+    base::Hash hash;
+    hash.Update(trigger_name.c_str(), trigger_name.size());
+
+    uint64_t trigger_name_hash = hash.digest();
+    size_t count_in_window =
+        PurgeExpiredAndCountTriggerInWindow(now_ns, trigger_name_hash);
+
+    bool trigger_applied = false;
+    for (auto& id_and_tracing_session : tracing_sessions_) {
+      auto& tracing_session = id_and_tracing_session.second;
+      TracingSessionID tsid = id_and_tracing_session.first;
+      auto iter = std::find_if(
+          tracing_session.config.trigger_config().triggers().begin(),
+          tracing_session.config.trigger_config().triggers().end(),
+          [&trigger_name](const TraceConfig::TriggerConfig::Trigger& trigger) {
+            return trigger.name() == trigger_name;
+          });
+      if (iter == tracing_session.config.trigger_config().triggers().end()) {
+        continue;
+      }
+
+      // If this trigger requires a certain producer to have sent it
+      // (non-empty producer_name()) ensure the producer who sent this trigger
+      // matches.
+      if (!iter->producer_name_regex().empty() &&
+          !std::regex_match(
+              producer->name_,
+              std::regex(iter->producer_name_regex(), std::regex::extended))) {
+        continue;
+      }
+
+      // Use a random number between 0 and 1 to check if we should allow this
+      // trigger through or not.
+      double trigger_rnd =
+          trigger_rnd_override_for_testing_ > 0
+              ? trigger_rnd_override_for_testing_
+              : trigger_probability_dist_(trigger_probability_rand_);
+      PERFETTO_DCHECK(trigger_rnd >= 0 && trigger_rnd < 1);
+      if (trigger_rnd < iter->skip_probability()) {
+        MaybeLogTriggerEvent(tracing_session.config,
+                             PerfettoTriggerAtom::kTracedLimitProbability,
+                             trigger_name);
+        continue;
+      }
+
+      // If we already triggered more times than the limit, silently ignore
+      // this trigger.
+      if (iter->max_per_24_h() > 0 && count_in_window >= iter->max_per_24_h()) {
+        MaybeLogTriggerEvent(tracing_session.config,
+                             PerfettoTriggerAtom::kTracedLimitMaxPer24h,
+                             trigger_name);
+        continue;
+      }
+      trigger_applied = true;
+
+      const bool triggers_already_received =
+          !tracing_session.received_triggers.empty();
+      tracing_session.received_triggers.push_back(
+          {static_cast<uint64_t>(now_ns), iter->name(), producer->name_,
+           producer->uid_});
+      auto weak_this = weak_ptr_factory_.GetWeakPtr();
+      switch (tracing_session.config.trigger_config().trigger_mode()) {
+        case TraceConfig::TriggerConfig::START_TRACING:
+          // If the session has already been triggered and moved past
+          // CONFIGURED then we don't need to repeat StartTracing. This would
+          // work fine (StartTracing would return false) but would add error
+          // logs.
+          if (tracing_session.state != TracingSession::CONFIGURED)
+            break;
+
+          PERFETTO_DLOG("Triggering '%s' on tracing session %" PRIu64
+                        " with duration of %" PRIu32 "ms.",
+                        iter->name().c_str(), tsid, iter->stop_delay_ms());
+          MaybeLogUploadEvent(tracing_session.config,
+                              PerfettoStatsdAtom::kTracedTriggerStartTracing,
+                              iter->name());
+
+          // We override the trace duration to be the trigger's requested
+          // value, this ensures that the trace will end after this amount
+          // of time has passed.
+          tracing_session.config.set_duration_ms(iter->stop_delay_ms());
+          StartTracing(tsid);
+          break;
+        case TraceConfig::TriggerConfig::STOP_TRACING:
+          // Only stop the trace once to avoid confusing log messages. I.E.
+          // when we've already hit the first trigger we've already Posted the
+          // task to FlushAndDisable. So all future triggers will just break
+          // out.
+          if (triggers_already_received)
+            break;
+
+          PERFETTO_DLOG("Triggering '%s' on tracing session %" PRIu64
+                        " with duration of %" PRIu32 "ms.",
+                        iter->name().c_str(), tsid, iter->stop_delay_ms());
+          MaybeLogUploadEvent(tracing_session.config,
+                              PerfettoStatsdAtom::kTracedTriggerStopTracing,
+                              iter->name());
+
+          // Now that we've seen a trigger we need to stop, flush, and disable
+          // this session after the configured |stop_delay_ms|.
+          task_runner_->PostDelayedTask(
+              [weak_this, tsid] {
+                // Skip entirely the flush if the trace session doesn't exist
+                // anymore. This is to prevent misleading error messages to be
+                // logged.
+                if (weak_this && weak_this->GetTracingSession(tsid))
+                  weak_this->FlushAndDisableTracing(tsid);
+              },
+              // If this trigger is zero this will immediately executable and
+              // will happen shortly.
+              iter->stop_delay_ms());
+          break;
+        case TraceConfig::TriggerConfig::UNSPECIFIED:
+          PERFETTO_ELOG("Trigger activated but trigger mode unspecified.");
+          break;
+      }
+    }  // for (.. : tracing_sessions_)
+
+    if (trigger_applied) {
+      trigger_history_.emplace_back(TriggerHistory{now_ns, trigger_name_hash});
+    }
+  }
+}
+
+// Always invoked kDataSourceStopTimeoutMs after DisableTracing(). In nominal
+// conditions all data sources should have acked the stop and this will early
+// out.
+void TracingServiceImpl::OnDisableTracingTimeout(TracingSessionID tsid) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  TracingSession* tracing_session = GetTracingSession(tsid);
+  if (!tracing_session ||
+      tracing_session->state != TracingSession::DISABLING_WAITING_STOP_ACKS) {
+    return;  // Tracing session was successfully disabled.
+  }
+
+  PERFETTO_ILOG("Timeout while waiting for ACKs for tracing session %" PRIu64,
+                tsid);
+  PERFETTO_DCHECK(!tracing_session->AllDataSourceInstancesStopped());
+  DisableTracingNotifyConsumerAndFlushFile(tracing_session);
+}
+
+void TracingServiceImpl::DisableTracingNotifyConsumerAndFlushFile(
+    TracingSession* tracing_session) {
+  PERFETTO_DCHECK(tracing_session->state != TracingSession::DISABLED);
+  for (auto& inst_kv : tracing_session->data_source_instances) {
+    if (inst_kv.second.state == DataSourceInstance::STOPPED)
+      continue;
+    inst_kv.second.state = DataSourceInstance::STOPPED;
+    ProducerEndpointImpl* producer = GetProducer(inst_kv.first);
+    PERFETTO_DCHECK(producer);
+    if (tracing_session->consumer_maybe_null) {
+      tracing_session->consumer_maybe_null->OnDataSourceInstanceStateChange(
+          *producer, inst_kv.second);
+    }
+  }
+  tracing_session->state = TracingSession::DISABLED;
+
+  // Scrape any remaining chunks that weren't flushed by the producers.
+  for (auto& producer_id_and_producer : producers_)
+    ScrapeSharedMemoryBuffers(tracing_session, producer_id_and_producer.second);
+
+  SnapshotLifecyleEvent(
+      tracing_session,
+      protos::pbzero::TracingServiceEvent::kTracingDisabledFieldNumber,
+      true /* snapshot_clocks */);
+
+  if (tracing_session->write_into_file) {
+    tracing_session->write_period_ms = 0;
+    ReadBuffers(tracing_session->id, nullptr);
+  }
+
+  if (tracing_session->on_disable_callback_for_bugreport) {
+    std::move(tracing_session->on_disable_callback_for_bugreport)();
+    tracing_session->on_disable_callback_for_bugreport = nullptr;
+  }
+
+  MaybeLogUploadEvent(tracing_session->config,
+                      PerfettoStatsdAtom::kTracedNotifyTracingDisabled);
+
+  if (tracing_session->consumer_maybe_null)
+    tracing_session->consumer_maybe_null->NotifyOnTracingDisabled("");
+}
+
+void TracingServiceImpl::Flush(TracingSessionID tsid,
+                               uint32_t timeout_ms,
+                               ConsumerEndpoint::FlushCallback callback) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  TracingSession* tracing_session = GetTracingSession(tsid);
+  if (!tracing_session) {
+    PERFETTO_DLOG("Flush() failed, invalid session ID %" PRIu64, tsid);
+    return;
+  }
+
+  if (!timeout_ms)
+    timeout_ms = tracing_session->flush_timeout_ms();
+
+  if (tracing_session->pending_flushes.size() > 1000) {
+    PERFETTO_ELOG("Too many flushes (%zu) pending for the tracing session",
+                  tracing_session->pending_flushes.size());
+    callback(false);
+    return;
+  }
+
+  FlushRequestID flush_request_id = ++last_flush_request_id_;
+  PendingFlush& pending_flush =
+      tracing_session->pending_flushes
+          .emplace_hint(tracing_session->pending_flushes.end(),
+                        flush_request_id, PendingFlush(std::move(callback)))
+          ->second;
+
+  // Send a flush request to each producer involved in the tracing session. In
+  // order to issue a flush request we have to build a map of all data source
+  // instance ids enabled for each producer.
+  std::map<ProducerID, std::vector<DataSourceInstanceID>> flush_map;
+  for (const auto& data_source_inst : tracing_session->data_source_instances) {
+    const ProducerID producer_id = data_source_inst.first;
+    const DataSourceInstanceID ds_inst_id = data_source_inst.second.instance_id;
+    flush_map[producer_id].push_back(ds_inst_id);
+  }
+
+  for (const auto& kv : flush_map) {
+    ProducerID producer_id = kv.first;
+    ProducerEndpointImpl* producer = GetProducer(producer_id);
+    const std::vector<DataSourceInstanceID>& data_sources = kv.second;
+    producer->Flush(flush_request_id, data_sources);
+    pending_flush.producers.insert(producer_id);
+  }
+
+  // If there are no producers to flush (realistically this happens only in
+  // some tests) fire OnFlushTimeout() straight away, without waiting.
+  if (flush_map.empty())
+    timeout_ms = 0;
+
+  auto weak_this = weak_ptr_factory_.GetWeakPtr();
+  task_runner_->PostDelayedTask(
+      [weak_this, tsid, flush_request_id] {
+        if (weak_this)
+          weak_this->OnFlushTimeout(tsid, flush_request_id);
+      },
+      timeout_ms);
+}
+
+void TracingServiceImpl::NotifyFlushDoneForProducer(
+    ProducerID producer_id,
+    FlushRequestID flush_request_id) {
+  for (auto& kv : tracing_sessions_) {
+    // Remove all pending flushes <= |flush_request_id| for |producer_id|.
+    auto& pending_flushes = kv.second.pending_flushes;
+    auto end_it = pending_flushes.upper_bound(flush_request_id);
+    for (auto it = pending_flushes.begin(); it != end_it;) {
+      PendingFlush& pending_flush = it->second;
+      pending_flush.producers.erase(producer_id);
+      if (pending_flush.producers.empty()) {
+        auto weak_this = weak_ptr_factory_.GetWeakPtr();
+        TracingSessionID tsid = kv.first;
+        auto callback = std::move(pending_flush.callback);
+        task_runner_->PostTask([weak_this, tsid, callback]() {
+          if (weak_this) {
+            weak_this->CompleteFlush(tsid, std::move(callback),
+                                     /*success=*/true);
+          }
+        });
+        it = pending_flushes.erase(it);
+      } else {
+        it++;
+      }
+    }  // for (pending_flushes)
+  }    // for (tracing_session)
+}
+
+void TracingServiceImpl::OnFlushTimeout(TracingSessionID tsid,
+                                        FlushRequestID flush_request_id) {
+  TracingSession* tracing_session = GetTracingSession(tsid);
+  if (!tracing_session)
+    return;
+  auto it = tracing_session->pending_flushes.find(flush_request_id);
+  if (it == tracing_session->pending_flushes.end())
+    return;  // Nominal case: flush was completed and acked on time.
+
+  // If there were no producers to flush, consider it a success.
+  bool success = it->second.producers.empty();
+
+  auto callback = std::move(it->second.callback);
+  tracing_session->pending_flushes.erase(it);
+  CompleteFlush(tsid, std::move(callback), success);
+}
+
+void TracingServiceImpl::CompleteFlush(TracingSessionID tsid,
+                                       ConsumerEndpoint::FlushCallback callback,
+                                       bool success) {
+  TracingSession* tracing_session = GetTracingSession(tsid);
+  if (!tracing_session) {
+    callback(false);
+    return;
+  }
+  // Producers may not have been able to flush all their data, even if they
+  // indicated flush completion. If possible, also collect uncommitted chunks
+  // to make sure we have everything they wrote so far.
+  for (auto& producer_id_and_producer : producers_) {
+    ScrapeSharedMemoryBuffers(tracing_session, producer_id_and_producer.second);
+  }
+  SnapshotLifecyleEvent(
+      tracing_session,
+      protos::pbzero::TracingServiceEvent::kAllDataSourcesFlushedFieldNumber,
+      true /* snapshot_clocks */);
+  callback(success);
+}
+
+void TracingServiceImpl::ScrapeSharedMemoryBuffers(
+    TracingSession* tracing_session,
+    ProducerEndpointImpl* producer) {
+  if (!producer->smb_scraping_enabled_)
+    return;
+
+  // Can't copy chunks if we don't know about any trace writers.
+  if (producer->writers_.empty())
+    return;
+
+  // Performance optimization: On flush or session disconnect, this method is
+  // called for each producer. If the producer doesn't participate in the
+  // session, there's no need to scape its chunks right now. We can tell if a
+  // producer participates in the session by checking if the producer is allowed
+  // to write into the session's log buffers.
+  const auto& session_buffers = tracing_session->buffers_index;
+  bool producer_in_session =
+      std::any_of(session_buffers.begin(), session_buffers.end(),
+                  [producer](BufferID buffer_id) {
+                    return producer->allowed_target_buffers_.count(buffer_id);
+                  });
+  if (!producer_in_session)
+    return;
+
+  PERFETTO_DLOG("Scraping SMB for producer %" PRIu16, producer->id_);
+
+  // Find and copy any uncommitted chunks from the SMB.
+  //
+  // In nominal conditions, the page layout of the used SMB pages should never
+  // change because the service is the only one who is supposed to modify used
+  // pages (to make them free again).
+  //
+  // However, the code here needs to deal with the case of a malicious producer
+  // altering the SMB in unpredictable ways. Thankfully the SMB size is
+  // immutable, so a chunk will always point to some valid memory, even if the
+  // producer alters the intended layout and chunk header concurrently.
+  // Ultimately a malicious producer altering the SMB's chunk layout while we
+  // are iterating in this function is not any different from the case of a
+  // malicious producer asking to commit a chunk made of random data, which is
+  // something this class has to deal with regardless.
+  //
+  // The only legitimate mutations that can happen from sane producers,
+  // concurrently to this function, are:
+  //   A. free pages being partitioned,
+  //   B. free chunks being migrated to kChunkBeingWritten,
+  //   C. kChunkBeingWritten chunks being migrated to kChunkCompleted.
+
+  SharedMemoryABI* abi = &producer->shmem_abi_;
+  // num_pages() is immutable after the SMB is initialized and cannot be changed
+  // even by a producer even if malicious.
+  for (size_t page_idx = 0; page_idx < abi->num_pages(); page_idx++) {
+    uint32_t layout = abi->GetPageLayout(page_idx);
+
+    uint32_t used_chunks = abi->GetUsedChunks(layout);  // Returns a bitmap.
+    // Skip empty pages.
+    if (used_chunks == 0)
+      continue;
+
+    // Scrape the chunks that are currently used. These should be either in
+    // state kChunkBeingWritten or kChunkComplete.
+    for (uint32_t chunk_idx = 0; used_chunks; chunk_idx++, used_chunks >>= 1) {
+      if (!(used_chunks & 1))
+        continue;
+
+      SharedMemoryABI::ChunkState state =
+          SharedMemoryABI::GetChunkStateFromLayout(layout, chunk_idx);
+      PERFETTO_DCHECK(state == SharedMemoryABI::kChunkBeingWritten ||
+                      state == SharedMemoryABI::kChunkComplete);
+      bool chunk_complete = state == SharedMemoryABI::kChunkComplete;
+
+      SharedMemoryABI::Chunk chunk =
+          abi->GetChunkUnchecked(page_idx, layout, chunk_idx);
+
+      uint16_t packet_count;
+      uint8_t flags;
+      // GetPacketCountAndFlags has acquire_load semantics.
+      std::tie(packet_count, flags) = chunk.GetPacketCountAndFlags();
+
+      // It only makes sense to copy an incomplete chunk if there's at least
+      // one full packet available. (The producer may not have completed the
+      // last packet in it yet, so we need at least 2.)
+      if (!chunk_complete && packet_count < 2)
+        continue;
+
+      // At this point, it is safe to access the remaining header fields of
+      // the chunk. Even if the chunk was only just transferred from
+      // kChunkFree into kChunkBeingWritten state, the header should be
+      // written completely once the packet count increased above 1 (it was
+      // reset to 0 by the service when the chunk was freed).
+
+      WriterID writer_id = chunk.writer_id();
+      base::Optional<BufferID> target_buffer_id =
+          producer->buffer_id_for_writer(writer_id);
+
+      // We can only scrape this chunk if we know which log buffer to copy it
+      // into.
+      if (!target_buffer_id)
+        continue;
+
+      // Skip chunks that don't belong to the requested tracing session.
+      bool target_buffer_belongs_to_session =
+          std::find(session_buffers.begin(), session_buffers.end(),
+                    *target_buffer_id) != session_buffers.end();
+      if (!target_buffer_belongs_to_session)
+        continue;
+
+      uint32_t chunk_id =
+          chunk.header()->chunk_id.load(std::memory_order_relaxed);
+
+      CopyProducerPageIntoLogBuffer(
+          producer->id_, producer->uid_, writer_id, chunk_id, *target_buffer_id,
+          packet_count, flags, chunk_complete, chunk.payload_begin(),
+          chunk.payload_size());
+    }
+  }
+}
+
+void TracingServiceImpl::FlushAndDisableTracing(TracingSessionID tsid) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  PERFETTO_DLOG("Triggering final flush for %" PRIu64, tsid);
+  auto weak_this = weak_ptr_factory_.GetWeakPtr();
+  Flush(tsid, 0, [weak_this, tsid](bool success) {
+    // This was a DLOG up to Jun 2021 (v16, Android S).
+    PERFETTO_LOG("FlushAndDisableTracing(%" PRIu64 ") done, success=%d", tsid,
+                 success);
+    if (!weak_this)
+      return;
+    TracingSession* session = weak_this->GetTracingSession(tsid);
+    if (session->consumer_maybe_null) {
+      // If the consumer is still attached, just disable the session but give it
+      // a chance to read the contents.
+      weak_this->DisableTracing(tsid);
+    } else {
+      // If the consumer detached, destroy the session. If the consumer did
+      // start the session in long-tracing mode, the service will have saved
+      // the contents to the passed file. If not, the contents will be
+      // destroyed.
+      weak_this->FreeBuffers(tsid);
+    }
+  });
+}
+
+void TracingServiceImpl::PeriodicFlushTask(TracingSessionID tsid,
+                                           bool post_next_only) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  TracingSession* tracing_session = GetTracingSession(tsid);
+  if (!tracing_session || tracing_session->state != TracingSession::STARTED)
+    return;
+
+  uint32_t flush_period_ms = tracing_session->config.flush_period_ms();
+  auto weak_this = weak_ptr_factory_.GetWeakPtr();
+  task_runner_->PostDelayedTask(
+      [weak_this, tsid] {
+        if (weak_this)
+          weak_this->PeriodicFlushTask(tsid, /*post_next_only=*/false);
+      },
+      flush_period_ms - static_cast<uint32_t>(base::GetWallTimeMs().count() %
+                                              flush_period_ms));
+
+  if (post_next_only)
+    return;
+
+  PERFETTO_DLOG("Triggering periodic flush for trace session %" PRIu64, tsid);
+  Flush(tsid, 0, [](bool success) {
+    if (!success)
+      PERFETTO_ELOG("Periodic flush timed out");
+  });
+}
+
+void TracingServiceImpl::PeriodicClearIncrementalStateTask(
+    TracingSessionID tsid,
+    bool post_next_only) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  TracingSession* tracing_session = GetTracingSession(tsid);
+  if (!tracing_session || tracing_session->state != TracingSession::STARTED)
+    return;
+
+  uint32_t clear_period_ms =
+      tracing_session->config.incremental_state_config().clear_period_ms();
+  auto weak_this = weak_ptr_factory_.GetWeakPtr();
+  task_runner_->PostDelayedTask(
+      [weak_this, tsid] {
+        if (weak_this)
+          weak_this->PeriodicClearIncrementalStateTask(
+              tsid, /*post_next_only=*/false);
+      },
+      clear_period_ms - static_cast<uint32_t>(base::GetWallTimeMs().count() %
+                                              clear_period_ms));
+
+  if (post_next_only)
+    return;
+
+  PERFETTO_DLOG(
+      "Performing periodic incremental state clear for trace session %" PRIu64,
+      tsid);
+
+  // Queue the IPCs to producers with active data sources that opted in.
+  std::map<ProducerID, std::vector<DataSourceInstanceID>> clear_map;
+  for (const auto& kv : tracing_session->data_source_instances) {
+    ProducerID producer_id = kv.first;
+    const DataSourceInstance& data_source = kv.second;
+    if (data_source.handles_incremental_state_clear)
+      clear_map[producer_id].push_back(data_source.instance_id);
+  }
+
+  for (const auto& kv : clear_map) {
+    ProducerID producer_id = kv.first;
+    const std::vector<DataSourceInstanceID>& data_sources = kv.second;
+    ProducerEndpointImpl* producer = GetProducer(producer_id);
+    if (!producer) {
+      PERFETTO_DFATAL("Producer does not exist.");
+      continue;
+    }
+    producer->ClearIncrementalState(data_sources);
+  }
+}
+
+// Note: when this is called to write into a file passed when starting tracing
+// |consumer| will be == nullptr (as opposite to the case of a consumer asking
+// to send the trace data back over IPC).
+bool TracingServiceImpl::ReadBuffers(TracingSessionID tsid,
+                                     ConsumerEndpointImpl* consumer) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  TracingSession* tracing_session = GetTracingSession(tsid);
+  if (!tracing_session) {
+    // This will be hit systematically from the PostDelayedTask when directly
+    // writing into the file (in which case consumer == nullptr). Suppress the
+    // log in this case as it's just spam.
+    if (consumer) {
+      PERFETTO_DLOG("Cannot ReadBuffers(): no tracing session is active");
+    }
+    return false;
+  }
+
+  // When a tracing session is waiting for a trigger it is considered empty. If
+  // a tracing session finishes and moves into DISABLED without ever receiving a
+  // trigger the trace should never return any data. This includes the synthetic
+  // packets like TraceConfig and Clock snapshots. So we bail out early and let
+  // the consumer know there is no data.
+  if (!tracing_session->config.trigger_config().triggers().empty() &&
+      tracing_session->received_triggers.empty() &&
+      !tracing_session->seized_for_bugreport) {
+    PERFETTO_DLOG(
+        "ReadBuffers(): tracing session has not received a trigger yet.");
+    return false;
+  }
+
+  // This can happen if the file is closed by a previous task because it reaches
+  // |max_file_size_bytes|.
+  if (!tracing_session->write_into_file && !consumer)
+    return false;
+
+  if (tracing_session->write_into_file && consumer) {
+    // If the consumer enabled tracing and asked to save the contents into the
+    // passed file makes little sense to also try to read the buffers over IPC,
+    // as that would just steal data from the periodic draining task.
+    PERFETTO_ELOG("Consumer trying to read from write_into_file session.");
+    return false;
+  }
+
+  std::vector<TracePacket> packets;
+  packets.reserve(1024);  // Just an educated guess to avoid trivial expansions.
+
+  // If a bugreport request happened and the trace was stolen for that, give
+  // an empty trace with a clear signal to the consumer. This deals only with
+  // the case of readback-from-IPC. A similar code-path deals with the
+  // write_into_file case in MaybeSaveTraceForBugreport().
+  if (tracing_session->seized_for_bugreport && consumer) {
+    if (!tracing_session->config.builtin_data_sources()
+             .disable_service_events()) {
+      EmitSeizedForBugreportLifecycleEvent(&packets);
+    }
+    EmitLifecycleEvents(tracing_session, &packets);
+    consumer->consumer_->OnTraceData(std::move(packets), /*has_more=*/false);
+    return true;
+  }
+
+  if (!tracing_session->initial_clock_snapshot.empty()) {
+    EmitClockSnapshot(tracing_session,
+                      std::move(tracing_session->initial_clock_snapshot),
+                      &packets);
+  }
+
+  for (auto& snapshot : tracing_session->clock_snapshot_ring_buffer) {
+    PERFETTO_DCHECK(!snapshot.empty());
+    EmitClockSnapshot(tracing_session, std::move(snapshot), &packets);
+  }
+  tracing_session->clock_snapshot_ring_buffer.clear();
+
+  if (tracing_session->should_emit_sync_marker) {
+    EmitSyncMarker(&packets);
+    tracing_session->should_emit_sync_marker = false;
+  }
+
+  if (!tracing_session->config.builtin_data_sources().disable_trace_config()) {
+    MaybeEmitTraceConfig(tracing_session, &packets);
+    MaybeEmitReceivedTriggers(tracing_session, &packets);
+  }
+  if (!tracing_session->config.builtin_data_sources().disable_system_info())
+    MaybeEmitSystemInfo(tracing_session, &packets);
+
+  // Note that in the proto comment, we guarantee that the tracing_started
+  // lifecycle event will be emitted before any data packets so make sure to
+  // keep this before reading the tracing buffers.
+  if (!tracing_session->config.builtin_data_sources().disable_service_events())
+    EmitLifecycleEvents(tracing_session, &packets);
+
+  size_t packets_bytes = 0;  // SUM(slice.size() for each slice in |packets|).
+  size_t total_slices = 0;   // SUM(#slices in |packets|).
+
+  // Add up size for packets added by the Maybe* calls above.
+  for (const TracePacket& packet : packets) {
+    packets_bytes += packet.size();
+    total_slices += packet.slices().size();
+  }
+
+  // This is a rough threshold to determine how much to read from the buffer in
+  // each task. This is to avoid executing a single huge sending task for too
+  // long and risk to hit the watchdog. This is *not* an upper bound: we just
+  // stop accumulating new packets and PostTask *after* we cross this threshold.
+  // This constant essentially balances the PostTask and IPC overhead vs the
+  // responsiveness of the service. An extremely small value will cause one IPC
+  // and one PostTask for each slice but will keep the service extremely
+  // responsive. An extremely large value will batch the send for the full
+  // buffer in one large task, will hit the blocking send() once the socket
+  // buffers are full and hang the service for a bit (until the consumer
+  // catches up).
+  static constexpr size_t kApproxBytesPerTask = 32768;
+  bool did_hit_threshold = false;
+
+  // TODO(primiano): Extend the ReadBuffers API to allow reading only some
+  // buffers, not all of them in one go.
+  for (size_t buf_idx = 0;
+       buf_idx < tracing_session->num_buffers() && !did_hit_threshold;
+       buf_idx++) {
+    auto tbuf_iter = buffers_.find(tracing_session->buffers_index[buf_idx]);
+    if (tbuf_iter == buffers_.end()) {
+      PERFETTO_DFATAL("Buffer not found.");
+      continue;
+    }
+    TraceBuffer& tbuf = *tbuf_iter->second;
+    tbuf.BeginRead();
+    while (!did_hit_threshold) {
+      TracePacket packet;
+      TraceBuffer::PacketSequenceProperties sequence_properties{};
+      bool previous_packet_dropped;
+      if (!tbuf.ReadNextTracePacket(&packet, &sequence_properties,
+                                    &previous_packet_dropped)) {
+        break;
+      }
+      PERFETTO_DCHECK(sequence_properties.producer_id_trusted != 0);
+      PERFETTO_DCHECK(sequence_properties.writer_id != 0);
+      PERFETTO_DCHECK(sequence_properties.producer_uid_trusted != kInvalidUid);
+      PERFETTO_DCHECK(packet.size() > 0);
+      if (!PacketStreamValidator::Validate(packet.slices())) {
+        tracing_session->invalid_packets++;
+        PERFETTO_DLOG("Dropping invalid packet");
+        continue;
+      }
+
+      // Append a slice with the trusted field data. This can't be spoofed
+      // because above we validated that the existing slices don't contain any
+      // trusted fields. For added safety we append instead of prepending
+      // because according to protobuf semantics, if the same field is
+      // encountered multiple times the last instance takes priority. Note that
+      // truncated packets are also rejected, so the producer can't give us a
+      // partial packet (e.g., a truncated string) which only becomes valid when
+      // the trusted data is appended here.
+      Slice slice = Slice::Allocate(32);
+      protozero::StaticBuffered<protos::pbzero::TracePacket> trusted_packet(
+          slice.own_data(), slice.size);
+      trusted_packet->set_trusted_uid(
+          static_cast<int32_t>(sequence_properties.producer_uid_trusted));
+      trusted_packet->set_trusted_packet_sequence_id(
+          tracing_session->GetPacketSequenceID(
+              sequence_properties.producer_id_trusted,
+              sequence_properties.writer_id));
+      if (previous_packet_dropped)
+        trusted_packet->set_previous_packet_dropped(previous_packet_dropped);
+      slice.size = trusted_packet.Finalize();
+      packet.AddSlice(std::move(slice));
+
+      // Append the packet (inclusive of the trusted uid) to |packets|.
+      packets_bytes += packet.size();
+      total_slices += packet.slices().size();
+      did_hit_threshold = packets_bytes >= kApproxBytesPerTask &&
+                          !tracing_session->write_into_file;
+      packets.emplace_back(std::move(packet));
+    }  // for(packets...)
+  }    // for(buffers...)
+
+  const bool has_more = did_hit_threshold;
+
+  size_t prev_packets_size = packets.size();
+  if (!tracing_session->config.builtin_data_sources()
+           .disable_service_events()) {
+    // We don't bother snapshotting clocks here because we wouldn't be able to
+    // emit it and we shouldn't have significant drift from the last snapshot in
+    // any case.
+    SnapshotLifecyleEvent(tracing_session,
+                          protos::pbzero::TracingServiceEvent::
+                              kReadTracingBuffersCompletedFieldNumber,
+                          false /* snapshot_clocks */);
+    EmitLifecycleEvents(tracing_session, &packets);
+  }
+
+  // Only emit the stats when there is no more trace data is available to read.
+  // That way, any problems that occur while reading from the buffers are
+  // reflected in the emitted stats. This is particularly important for use
+  // cases where ReadBuffers is only ever called after the tracing session is
+  // stopped.
+  if (!has_more && tracing_session->should_emit_stats) {
+    EmitStats(tracing_session, &packets);
+    tracing_session->should_emit_stats = false;
+  }
+
+  // Add sizes of packets emitted by the EmitLifecycleEvents + EmitStats.
+  for (size_t i = prev_packets_size; i < packets.size(); ++i) {
+    packets_bytes += packets[i].size();
+    total_slices += packets[i].slices().size();
+  }
+
+  // +-------------------------------------------------------------------------+
+  // | NO MORE CHANGES TO |packets| AFTER THIS POINT.                          |
+  // +-------------------------------------------------------------------------+
+
+  // If the tracing session specified a filter, run all packets through the
+  // filter and replace them with the filter results.
+  // The process below mantains the cardinality of input packets. Even if an
+  // entire packet is filtered out, we emit a zero-sized TracePacket proto. That
+  // makes debugging and reasoning about the trace stats easier.
+  // This place swaps the contents of each |packets| entry in place.
+  if (tracing_session->trace_filter) {
+    auto& trace_filter = *tracing_session->trace_filter;
+    // The filter root shoud be reset from protos.Trace to protos.TracePacket
+    // by the earlier call to SetFilterRoot() in EnableTracing().
+    PERFETTO_DCHECK(trace_filter.root_msg_index() != 0);
+    std::vector<protozero::MessageFilter::InputSlice> filter_input;
+    for (auto it = packets.begin(); it != packets.end(); ++it) {
+      const auto& packet_slices = it->slices();
+      filter_input.clear();
+      filter_input.resize(packet_slices.size());
+      ++tracing_session->filter_input_packets;
+      tracing_session->filter_input_bytes += it->size();
+      for (size_t i = 0; i < packet_slices.size(); ++i)
+        filter_input[i] = {packet_slices[i].start, packet_slices[i].size};
+      auto filtered_packet = trace_filter.FilterMessageFragments(
+          &filter_input[0], filter_input.size());
+
+      // Replace the packet in-place with the filtered one (unless failed).
+      *it = TracePacket();
+      if (filtered_packet.error) {
+        ++tracing_session->filter_errors;
+        PERFETTO_DLOG("Trace packet filtering failed @ packet %" PRIu64,
+                      tracing_session->filter_input_packets);
+        continue;
+      }
+      tracing_session->filter_output_bytes += filtered_packet.size;
+      it->AddSlice(Slice::TakeOwnership(std::move(filtered_packet.data),
+                                        filtered_packet.size));
+
+    }  // for (packet)
+  }    // if (trace_filter)
+
+  // If the caller asked us to write into a file by setting
+  // |write_into_file| == true in the trace config, drain the packets read
+  // (if any) into the given file descriptor.
+  if (tracing_session->write_into_file) {
+    const uint64_t max_size = tracing_session->max_file_size_bytes
+                                  ? tracing_session->max_file_size_bytes
+                                  : std::numeric_limits<size_t>::max();
+
+    // When writing into a file, the file should look like a root trace.proto
+    // message. Each packet should be prepended with a proto preamble stating
+    // its field id (within trace.proto) and size. Hence the addition below.
+    const size_t max_iovecs = total_slices + packets.size();
+
+    size_t num_iovecs = 0;
+    bool stop_writing_into_file = tracing_session->write_period_ms == 0;
+    std::unique_ptr<struct iovec[]> iovecs(new struct iovec[max_iovecs]);
+    size_t num_iovecs_at_last_packet = 0;
+    uint64_t bytes_about_to_be_written = 0;
+    for (TracePacket& packet : packets) {
+      std::tie(iovecs[num_iovecs].iov_base, iovecs[num_iovecs].iov_len) =
+          packet.GetProtoPreamble();
+      bytes_about_to_be_written += iovecs[num_iovecs].iov_len;
+      num_iovecs++;
+      for (const Slice& slice : packet.slices()) {
+        // writev() doesn't change the passed pointer. However, struct iovec
+        // take a non-const ptr because it's the same struct used by readv().
+        // Hence the const_cast here.
+        char* start = static_cast<char*>(const_cast<void*>(slice.start));
+        bytes_about_to_be_written += slice.size;
+        iovecs[num_iovecs++] = {start, slice.size};
+      }
+
+      if (tracing_session->bytes_written_into_file +
+              bytes_about_to_be_written >=
+          max_size) {
+        stop_writing_into_file = true;
+        num_iovecs = num_iovecs_at_last_packet;
+        break;
+      }
+
+      num_iovecs_at_last_packet = num_iovecs;
+    }
+    PERFETTO_DCHECK(num_iovecs <= max_iovecs);
+    int fd = *tracing_session->write_into_file;
+
+    uint64_t total_wr_size = 0;
+
+    // writev() can take at most IOV_MAX entries per call. Batch them.
+    constexpr size_t kIOVMax = IOV_MAX;
+    for (size_t i = 0; i < num_iovecs; i += kIOVMax) {
+      int iov_batch_size = static_cast<int>(std::min(num_iovecs - i, kIOVMax));
+      ssize_t wr_size = PERFETTO_EINTR(writev(fd, &iovecs[i], iov_batch_size));
+      if (wr_size <= 0) {
+        PERFETTO_PLOG("writev() failed");
+        stop_writing_into_file = true;
+        break;
+      }
+      total_wr_size += static_cast<size_t>(wr_size);
+    }
+
+    tracing_session->bytes_written_into_file += total_wr_size;
+
+    PERFETTO_DLOG("Draining into file, written: %" PRIu64 " KB, stop: %d",
+                  (total_wr_size + 1023) / 1024, stop_writing_into_file);
+    if (stop_writing_into_file) {
+      // Ensure all data was written to the file before we close it.
+      base::FlushFile(fd);
+      tracing_session->write_into_file.reset();
+      tracing_session->write_period_ms = 0;
+      if (tracing_session->state == TracingSession::STARTED)
+        DisableTracing(tsid);
+      return true;
+    }
+
+    auto weak_this = weak_ptr_factory_.GetWeakPtr();
+    task_runner_->PostDelayedTask(
+        [weak_this, tsid] {
+          if (weak_this)
+            weak_this->ReadBuffers(tsid, nullptr);
+        },
+        tracing_session->delay_to_next_write_period_ms());
+    return true;
+  }  // if (tracing_session->write_into_file)
+
+  if (has_more) {
+    auto weak_consumer = consumer->weak_ptr_factory_.GetWeakPtr();
+    auto weak_this = weak_ptr_factory_.GetWeakPtr();
+    task_runner_->PostTask([weak_this, weak_consumer, tsid] {
+      if (!weak_this || !weak_consumer)
+        return;
+      weak_this->ReadBuffers(tsid, weak_consumer.get());
+    });
+  }
+
+  // Keep this as tail call, just in case the consumer re-enters.
+  consumer->consumer_->OnTraceData(std::move(packets), has_more);
+  return true;
+}
+
+void TracingServiceImpl::FreeBuffers(TracingSessionID tsid) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  PERFETTO_DLOG("Freeing buffers for session %" PRIu64, tsid);
+  TracingSession* tracing_session = GetTracingSession(tsid);
+  if (!tracing_session) {
+    PERFETTO_DLOG("FreeBuffers() failed, invalid session ID %" PRIu64, tsid);
+    return;  // TODO(primiano): signal failure?
+  }
+  DisableTracing(tsid, /*disable_immediately=*/true);
+
+  PERFETTO_DCHECK(tracing_session->AllDataSourceInstancesStopped());
+  tracing_session->data_source_instances.clear();
+
+  for (auto& producer_entry : producers_) {
+    ProducerEndpointImpl* producer = producer_entry.second;
+    producer->OnFreeBuffers(tracing_session->buffers_index);
+  }
+
+  for (BufferID buffer_id : tracing_session->buffers_index) {
+    buffer_ids_.Free(buffer_id);
+    PERFETTO_DCHECK(buffers_.count(buffer_id) == 1);
+    buffers_.erase(buffer_id);
+  }
+  bool notify_traceur = tracing_session->config.notify_traceur();
+  bool is_long_trace =
+      (tracing_session->config.write_into_file() &&
+       tracing_session->config.file_write_period_ms() < kMillisPerDay);
+  bool seized_for_bugreport = tracing_session->seized_for_bugreport;
+  tracing_sessions_.erase(tsid);
+  tracing_session = nullptr;
+  UpdateMemoryGuardrail();
+
+  PERFETTO_LOG("Tracing session %" PRIu64 " ended, total sessions:%zu", tsid,
+               tracing_sessions_.size());
+
+#if PERFETTO_BUILDFLAG(PERFETTO_ANDROID_BUILD) && \
+    PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
+  if (notify_traceur && (seized_for_bugreport || is_long_trace)) {
+    PERFETTO_LAZY_LOAD(android_internal::NotifyTraceSessionEnded, notify_fn);
+    if (!notify_fn || !notify_fn(seized_for_bugreport))
+      PERFETTO_ELOG("Failed to notify Traceur long tracing has ended");
+  }
+#else
+  base::ignore_result(notify_traceur);
+  base::ignore_result(is_long_trace);
+  base::ignore_result(seized_for_bugreport);
+#endif
+}
+
+void TracingServiceImpl::RegisterDataSource(ProducerID producer_id,
+                                            const DataSourceDescriptor& desc) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  PERFETTO_DLOG("Producer %" PRIu16 " registered data source \"%s\"",
+                producer_id, desc.name().c_str());
+
+  PERFETTO_DCHECK(!desc.name().empty());
+  auto reg_ds = data_sources_.emplace(desc.name(),
+                                      RegisteredDataSource{producer_id, desc});
+
+  // If there are existing tracing sessions, we need to check if the new
+  // data source is enabled by any of them.
+  if (tracing_sessions_.empty())
+    return;
+
+  ProducerEndpointImpl* producer = GetProducer(producer_id);
+  if (!producer) {
+    PERFETTO_DFATAL("Producer not found.");
+    return;
+  }
+
+  for (auto& iter : tracing_sessions_) {
+    TracingSession& tracing_session = iter.second;
+    if (tracing_session.state != TracingSession::STARTED &&
+        tracing_session.state != TracingSession::CONFIGURED) {
+      continue;
+    }
+
+    TraceConfig::ProducerConfig producer_config;
+    for (auto& config : tracing_session.config.producers()) {
+      if (producer->name_ == config.producer_name()) {
+        producer_config = config;
+        break;
+      }
+    }
+    for (const TraceConfig::DataSource& cfg_data_source :
+         tracing_session.config.data_sources()) {
+      if (cfg_data_source.config().name() != desc.name())
+        continue;
+      DataSourceInstance* ds_inst = SetupDataSource(
+          cfg_data_source, producer_config, reg_ds->second, &tracing_session);
+      if (ds_inst && tracing_session.state == TracingSession::STARTED)
+        StartDataSourceInstance(producer, &tracing_session, ds_inst);
+    }
+  }
+}
+
+void TracingServiceImpl::StopDataSourceInstance(ProducerEndpointImpl* producer,
+                                                TracingSession* tracing_session,
+                                                DataSourceInstance* instance,
+                                                bool disable_immediately) {
+  const DataSourceInstanceID ds_inst_id = instance->instance_id;
+  if (instance->will_notify_on_stop && !disable_immediately) {
+    instance->state = DataSourceInstance::STOPPING;
+  } else {
+    instance->state = DataSourceInstance::STOPPED;
+  }
+  if (tracing_session->consumer_maybe_null) {
+    tracing_session->consumer_maybe_null->OnDataSourceInstanceStateChange(
+        *producer, *instance);
+  }
+  producer->StopDataSource(ds_inst_id);
+}
+
+void TracingServiceImpl::UnregisterDataSource(ProducerID producer_id,
+                                              const std::string& name) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  PERFETTO_DLOG("Producer %" PRIu16 " unregistered data source \"%s\"",
+                producer_id, name.c_str());
+  PERFETTO_CHECK(producer_id);
+  ProducerEndpointImpl* producer = GetProducer(producer_id);
+  PERFETTO_DCHECK(producer);
+  for (auto& kv : tracing_sessions_) {
+    auto& ds_instances = kv.second.data_source_instances;
+    bool removed = false;
+    for (auto it = ds_instances.begin(); it != ds_instances.end();) {
+      if (it->first == producer_id && it->second.data_source_name == name) {
+        DataSourceInstanceID ds_inst_id = it->second.instance_id;
+        if (it->second.state != DataSourceInstance::STOPPED) {
+          if (it->second.state != DataSourceInstance::STOPPING) {
+            StopDataSourceInstance(producer, &kv.second, &it->second,
+                                   /* disable_immediately = */ false);
+          }
+
+          // Mark the instance as stopped immediately, since we are
+          // unregistering it below.
+          //
+          //  The StopDataSourceInstance above might have set the state to
+          //  STOPPING so this condition isn't an else.
+          if (it->second.state == DataSourceInstance::STOPPING)
+            NotifyDataSourceStopped(producer_id, ds_inst_id);
+        }
+        it = ds_instances.erase(it);
+        removed = true;
+      } else {
+        ++it;
+      }
+    }  // for (data_source_instances)
+    if (removed)
+      MaybeNotifyAllDataSourcesStarted(&kv.second);
+  }  // for (tracing_session)
+
+  for (auto it = data_sources_.begin(); it != data_sources_.end(); ++it) {
+    if (it->second.producer_id == producer_id &&
+        it->second.descriptor.name() == name) {
+      data_sources_.erase(it);
+      return;
+    }
+  }
+
+  PERFETTO_DFATAL(
+      "Tried to unregister a non-existent data source \"%s\" for "
+      "producer %" PRIu16,
+      name.c_str(), producer_id);
+}
+
+TracingServiceImpl::DataSourceInstance* TracingServiceImpl::SetupDataSource(
+    const TraceConfig::DataSource& cfg_data_source,
+    const TraceConfig::ProducerConfig& producer_config,
+    const RegisteredDataSource& data_source,
+    TracingSession* tracing_session) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  ProducerEndpointImpl* producer = GetProducer(data_source.producer_id);
+  PERFETTO_DCHECK(producer);
+  // An existing producer that is not ftrace could have registered itself as
+  // ftrace, we must not enable it in that case.
+  if (lockdown_mode_ && producer->uid_ != uid_) {
+    PERFETTO_DLOG("Lockdown mode: not enabling producer %hu", producer->id_);
+    return nullptr;
+  }
+  // TODO(primiano): Add tests for registration ordering (data sources vs
+  // consumers).
+  if (!NameMatchesFilter(producer->name_,
+                         cfg_data_source.producer_name_filter(),
+                         cfg_data_source.producer_name_regex_filter())) {
+    PERFETTO_DLOG("Data source: %s is filtered out for producer: %s",
+                  cfg_data_source.config().name().c_str(),
+                  producer->name_.c_str());
+    return nullptr;
+  }
+
+  auto relative_buffer_id = cfg_data_source.config().target_buffer();
+  if (relative_buffer_id >= tracing_session->num_buffers()) {
+    PERFETTO_LOG(
+        "The TraceConfig for DataSource %s specified a target_buffer out of "
+        "bound (%d). Skipping it.",
+        cfg_data_source.config().name().c_str(), relative_buffer_id);
+    return nullptr;
+  }
+
+  // Create a copy of the DataSourceConfig specified in the trace config. This
+  // will be passed to the producer after translating the |target_buffer| id.
+  // The |target_buffer| parameter passed by the consumer in the trace config is
+  // relative to the buffers declared in the same trace config. This has to be
+  // translated to the global BufferID before passing it to the producers, which
+  // don't know anything about tracing sessions and consumers.
+
+  DataSourceInstanceID inst_id = ++last_data_source_instance_id_;
+  auto insert_iter = tracing_session->data_source_instances.emplace(
+      std::piecewise_construct,  //
+      std::forward_as_tuple(producer->id_),
+      std::forward_as_tuple(
+          inst_id,
+          cfg_data_source.config(),  //  Deliberate copy.
+          data_source.descriptor.name(),
+          data_source.descriptor.will_notify_on_start(),
+          data_source.descriptor.will_notify_on_stop(),
+          data_source.descriptor.handles_incremental_state_clear()));
+  DataSourceInstance* ds_instance = &insert_iter->second;
+
+  // New data source instance starts out in CONFIGURED state.
+  if (tracing_session->consumer_maybe_null) {
+    tracing_session->consumer_maybe_null->OnDataSourceInstanceStateChange(
+        *producer, *ds_instance);
+  }
+
+  DataSourceConfig& ds_config = ds_instance->config;
+  ds_config.set_trace_duration_ms(tracing_session->config.duration_ms());
+  ds_config.set_stop_timeout_ms(tracing_session->data_source_stop_timeout_ms());
+  ds_config.set_enable_extra_guardrails(
+      tracing_session->config.enable_extra_guardrails());
+  if (tracing_session->consumer_uid == 1066 /* AID_STATSD */ &&
+      tracing_session->config.statsd_metadata().triggering_config_uid() !=
+          2000 /* AID_SHELL */
+      && tracing_session->config.statsd_metadata().triggering_config_uid() !=
+             0 /* AID_ROOT */) {
+    // StatsD can be triggered either by shell, root or an app that has DUMP and
+    // USAGE_STATS permission. When triggered by shell or root, we do not want
+    // to consider the trace a trusted system trace, as it was initiated by the
+    // user. Otherwise, it has to come from an app with DUMP and
+    // PACKAGE_USAGE_STATS, which has to be preinstalled and trusted by the
+    // system.
+    // Check for shell / root: https://bit.ly/3b7oZNi
+    // Check for DUMP or PACKAGE_USAGE_STATS: https://bit.ly/3ep0NrR
+    ds_config.set_session_initiator(
+        DataSourceConfig::SESSION_INITIATOR_TRUSTED_SYSTEM);
+  } else {
+    // Unset in case the consumer set it.
+    // We need to be able to trust this field.
+    ds_config.set_session_initiator(
+        DataSourceConfig::SESSION_INITIATOR_UNSPECIFIED);
+  }
+  ds_config.set_tracing_session_id(tracing_session->id);
+  BufferID global_id = tracing_session->buffers_index[relative_buffer_id];
+  PERFETTO_DCHECK(global_id);
+  ds_config.set_target_buffer(global_id);
+
+  PERFETTO_DLOG("Setting up data source %s with target buffer %" PRIu16,
+                ds_config.name().c_str(), global_id);
+  if (!producer->shared_memory()) {
+    // Determine the SMB page size. Must be an integer multiple of 4k.
+    // As for the SMB size below, the decision tree is as follows:
+    // 1. Give priority to what is defined in the trace config.
+    // 2. If unset give priority to the hint passed by the producer.
+    // 3. Keep within bounds and ensure it's a multiple of 4k.
+    size_t page_size = producer_config.page_size_kb() * 1024;
+    if (page_size == 0)
+      page_size = producer->shmem_page_size_hint_bytes_;
+
+    // Determine the SMB size. Must be an integer multiple of the SMB page size.
+    // The decision tree is as follows:
+    // 1. Give priority to what defined in the trace config.
+    // 2. If unset give priority to the hint passed by the producer.
+    // 3. Keep within bounds and ensure it's a multiple of the page size.
+    size_t shm_size = producer_config.shm_size_kb() * 1024;
+    if (shm_size == 0)
+      shm_size = producer->shmem_size_hint_bytes_;
+
+    auto valid_sizes = EnsureValidShmSizes(shm_size, page_size);
+    if (valid_sizes != std::tie(shm_size, page_size)) {
+      PERFETTO_DLOG(
+          "Invalid configured SMB sizes: shm_size %zu page_size %zu. Falling "
+          "back to shm_size %zu page_size %zu.",
+          shm_size, page_size, std::get<0>(valid_sizes),
+          std::get<1>(valid_sizes));
+    }
+    std::tie(shm_size, page_size) = valid_sizes;
+
+    // TODO(primiano): right now Create() will suicide in case of OOM if the
+    // mmap fails. We should instead gracefully fail the request and tell the
+    // client to go away.
+    PERFETTO_DLOG("Creating SMB of %zu KB for producer \"%s\"", shm_size / 1024,
+                  producer->name_.c_str());
+    auto shared_memory = shm_factory_->CreateSharedMemory(shm_size);
+    producer->SetupSharedMemory(std::move(shared_memory), page_size,
+                                /*provided_by_producer=*/false);
+  }
+  producer->SetupDataSource(inst_id, ds_config);
+  return ds_instance;
+}
+
+// Note: all the fields % *_trusted ones are untrusted, as in, the Producer
+// might be lying / returning garbage contents. |src| and |size| can be trusted
+// in terms of being a valid pointer, but not the contents.
+void TracingServiceImpl::CopyProducerPageIntoLogBuffer(
+    ProducerID producer_id_trusted,
+    uid_t producer_uid_trusted,
+    WriterID writer_id,
+    ChunkID chunk_id,
+    BufferID buffer_id,
+    uint16_t num_fragments,
+    uint8_t chunk_flags,
+    bool chunk_complete,
+    const uint8_t* src,
+    size_t size) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+
+  ProducerEndpointImpl* producer = GetProducer(producer_id_trusted);
+  if (!producer) {
+    PERFETTO_DFATAL("Producer not found.");
+    chunks_discarded_++;
+    return;
+  }
+
+  TraceBuffer* buf = GetBufferByID(buffer_id);
+  if (!buf) {
+    PERFETTO_DLOG("Could not find target buffer %" PRIu16
+                  " for producer %" PRIu16,
+                  buffer_id, producer_id_trusted);
+    chunks_discarded_++;
+    return;
+  }
+
+  // Verify that the producer is actually allowed to write into the target
+  // buffer specified in the request. This prevents a malicious producer from
+  // injecting data into a log buffer that belongs to a tracing session the
+  // producer is not part of.
+  if (!producer->is_allowed_target_buffer(buffer_id)) {
+    PERFETTO_ELOG("Producer %" PRIu16
+                  " tried to write into forbidden target buffer %" PRIu16,
+                  producer_id_trusted, buffer_id);
+    PERFETTO_DFATAL("Forbidden target buffer");
+    chunks_discarded_++;
+    return;
+  }
+
+  // If the writer was registered by the producer, it should only write into the
+  // buffer it was registered with.
+  base::Optional<BufferID> associated_buffer =
+      producer->buffer_id_for_writer(writer_id);
+  if (associated_buffer && *associated_buffer != buffer_id) {
+    PERFETTO_ELOG("Writer %" PRIu16 " of producer %" PRIu16
+                  " was registered to write into target buffer %" PRIu16
+                  ", but tried to write into buffer %" PRIu16,
+                  writer_id, producer_id_trusted, *associated_buffer,
+                  buffer_id);
+    PERFETTO_DFATAL("Wrong target buffer");
+    chunks_discarded_++;
+    return;
+  }
+
+  buf->CopyChunkUntrusted(producer_id_trusted, producer_uid_trusted, writer_id,
+                          chunk_id, num_fragments, chunk_flags, chunk_complete,
+                          src, size);
+}
+
+void TracingServiceImpl::ApplyChunkPatches(
+    ProducerID producer_id_trusted,
+    const std::vector<CommitDataRequest::ChunkToPatch>& chunks_to_patch) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+
+  for (const auto& chunk : chunks_to_patch) {
+    const ChunkID chunk_id = static_cast<ChunkID>(chunk.chunk_id());
+    const WriterID writer_id = static_cast<WriterID>(chunk.writer_id());
+    TraceBuffer* buf =
+        GetBufferByID(static_cast<BufferID>(chunk.target_buffer()));
+    static_assert(std::numeric_limits<ChunkID>::max() == kMaxChunkID,
+                  "Add a '|| chunk_id > kMaxChunkID' below if this fails");
+    if (!writer_id || writer_id > kMaxWriterID || !buf) {
+      // This can genuinely happen when the trace is stopped. The producers
+      // might see the stop signal with some delay and try to keep sending
+      // patches left soon after.
+      PERFETTO_DLOG(
+          "Received invalid chunks_to_patch request from Producer: %" PRIu16
+          ", BufferID: %" PRIu32 " ChunkdID: %" PRIu32 " WriterID: %" PRIu16,
+          producer_id_trusted, chunk.target_buffer(), chunk_id, writer_id);
+      patches_discarded_ += static_cast<uint64_t>(chunk.patches_size());
+      continue;
+    }
+
+    // Note, there's no need to validate that the producer is allowed to write
+    // to the specified buffer ID (or that it's the correct buffer ID for a
+    // registered TraceWriter). That's because TraceBuffer uses the producer ID
+    // and writer ID to look up the chunk to patch. If the producer specifies an
+    // incorrect buffer, this lookup will fail and TraceBuffer will ignore the
+    // patches. Because the producer ID is trusted, there's also no way for a
+    // malicious producer to patch another producer's data.
+
+    // Speculate on the fact that there are going to be a limited amount of
+    // patches per request, so we can allocate the |patches| array on the stack.
+    std::array<TraceBuffer::Patch, 1024> patches;  // Uninitialized.
+    if (chunk.patches().size() > patches.size()) {
+      PERFETTO_ELOG("Too many patches (%zu) batched in the same request",
+                    patches.size());
+      PERFETTO_DFATAL("Too many patches");
+      patches_discarded_ += static_cast<uint64_t>(chunk.patches_size());
+      continue;
+    }
+
+    size_t i = 0;
+    for (const auto& patch : chunk.patches()) {
+      const std::string& patch_data = patch.data();
+      if (patch_data.size() != patches[i].data.size()) {
+        PERFETTO_ELOG("Received patch from producer: %" PRIu16
+                      " of unexpected size %zu",
+                      producer_id_trusted, patch_data.size());
+        patches_discarded_++;
+        continue;
+      }
+      patches[i].offset_untrusted = patch.offset();
+      memcpy(&patches[i].data[0], patch_data.data(), patches[i].data.size());
+      i++;
+    }
+    buf->TryPatchChunkContents(producer_id_trusted, writer_id, chunk_id,
+                               &patches[0], i, chunk.has_more_patches());
+  }
+}
+
+TracingServiceImpl::TracingSession* TracingServiceImpl::GetDetachedSession(
+    uid_t uid,
+    const std::string& key) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  for (auto& kv : tracing_sessions_) {
+    TracingSession* session = &kv.second;
+    if (session->consumer_uid == uid && session->detach_key == key) {
+      PERFETTO_DCHECK(session->consumer_maybe_null == nullptr);
+      return session;
+    }
+  }
+  return nullptr;
+}
+
+TracingServiceImpl::TracingSession* TracingServiceImpl::GetTracingSession(
+    TracingSessionID tsid) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  auto it = tsid ? tracing_sessions_.find(tsid) : tracing_sessions_.end();
+  if (it == tracing_sessions_.end())
+    return nullptr;
+  return &it->second;
+}
+
+ProducerID TracingServiceImpl::GetNextProducerID() {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  PERFETTO_CHECK(producers_.size() < kMaxProducerID);
+  do {
+    ++last_producer_id_;
+  } while (producers_.count(last_producer_id_) || last_producer_id_ == 0);
+  PERFETTO_DCHECK(last_producer_id_ > 0 && last_producer_id_ <= kMaxProducerID);
+  return last_producer_id_;
+}
+
+TraceBuffer* TracingServiceImpl::GetBufferByID(BufferID buffer_id) {
+  auto buf_iter = buffers_.find(buffer_id);
+  if (buf_iter == buffers_.end())
+    return nullptr;
+  return &*buf_iter->second;
+}
+
+void TracingServiceImpl::OnStartTriggersTimeout(TracingSessionID tsid) {
+  // Skip entirely the flush if the trace session doesn't exist anymore.
+  // This is to prevent misleading error messages to be logged.
+  //
+  // if the trace has started from the trigger we rely on
+  // the |stop_delay_ms| from the trigger so don't flush and
+  // disable if we've moved beyond a CONFIGURED state
+  auto* tracing_session_ptr = GetTracingSession(tsid);
+  if (tracing_session_ptr &&
+      tracing_session_ptr->state == TracingSession::CONFIGURED) {
+    PERFETTO_DLOG("Disabling TracingSession %" PRIu64
+                  " since no triggers activated.",
+                  tsid);
+    // No data should be returned from ReadBuffers() regardless of if we
+    // call FreeBuffers() or DisableTracing(). This is because in
+    // STOP_TRACING we need this promise in either case, and using
+    // DisableTracing() allows a graceful shutdown. Consumers can follow
+    // their normal path and check the buffers through ReadBuffers() and
+    // the code won't hang because the tracing session will still be
+    // alive just disabled.
+    DisableTracing(tsid);
+  }
+}
+
+void TracingServiceImpl::UpdateMemoryGuardrail() {
+#if PERFETTO_BUILDFLAG(PERFETTO_WATCHDOG)
+  uint64_t total_buffer_bytes = 0;
+
+  // Sum up all the shared memory buffers.
+  for (const auto& id_to_producer : producers_) {
+    if (id_to_producer.second->shared_memory())
+      total_buffer_bytes += id_to_producer.second->shared_memory()->size();
+  }
+
+  // Sum up all the trace buffers.
+  for (const auto& id_to_buffer : buffers_) {
+    total_buffer_bytes += id_to_buffer.second->size();
+  }
+
+  // Set the guard rail to 32MB + the sum of all the buffers over a 30 second
+  // interval.
+  uint64_t guardrail = base::kWatchdogDefaultMemorySlack + total_buffer_bytes;
+  base::Watchdog::GetInstance()->SetMemoryLimit(guardrail, 30 * 1000);
+#endif
+}
+
+void TracingServiceImpl::PeriodicSnapshotTask(TracingSessionID tsid) {
+  auto* tracing_session = GetTracingSession(tsid);
+  if (!tracing_session)
+    return;
+  if (tracing_session->state != TracingSession::STARTED)
+    return;
+  tracing_session->should_emit_sync_marker = true;
+  tracing_session->should_emit_stats = true;
+  MaybeSnapshotClocksIntoRingBuffer(tracing_session);
+}
+
+void TracingServiceImpl::SnapshotLifecyleEvent(TracingSession* tracing_session,
+                                               uint32_t field_id,
+                                               bool snapshot_clocks) {
+  // field_id should be an id of a field in TracingServiceEvent.
+  auto& lifecycle_events = tracing_session->lifecycle_events;
+  auto event_it =
+      std::find_if(lifecycle_events.begin(), lifecycle_events.end(),
+                   [field_id](const TracingSession::LifecycleEvent& event) {
+                     return event.field_id == field_id;
+                   });
+
+  TracingSession::LifecycleEvent* event;
+  if (event_it == lifecycle_events.end()) {
+    lifecycle_events.emplace_back(field_id);
+    event = &lifecycle_events.back();
+  } else {
+    event = &*event_it;
+  }
+
+  // Snapshot the clocks before capturing the timestamp for the event so we can
+  // use this snapshot to resolve the event timestamp if necessary.
+  if (snapshot_clocks)
+    MaybeSnapshotClocksIntoRingBuffer(tracing_session);
+
+  // Erase before emplacing to prevent a unncessary doubling of memory if
+  // not needed.
+  if (event->timestamps.size() >= event->max_size) {
+    event->timestamps.erase_front(1 + event->timestamps.size() -
+                                  event->max_size);
+  }
+  event->timestamps.emplace_back(base::GetBootTimeNs().count());
+}
+
+void TracingServiceImpl::MaybeSnapshotClocksIntoRingBuffer(
+    TracingSession* tracing_session) {
+  if (tracing_session->config.builtin_data_sources()
+          .disable_clock_snapshotting()) {
+    return;
+  }
+
+  // We are making an explicit copy of the latest snapshot (if it exists)
+  // because SnapshotClocks reads this data and computes the drift based on its
+  // content. If the clock drift is high enough, it will update the contents of
+  // |snapshot| and return true. Otherwise, it will return false.
+  TracingSession::ClockSnapshotData snapshot =
+      tracing_session->clock_snapshot_ring_buffer.empty()
+          ? TracingSession::ClockSnapshotData()
+          : tracing_session->clock_snapshot_ring_buffer.back();
+  bool did_update = SnapshotClocks(&snapshot);
+  if (did_update) {
+    // This means clocks drifted enough since last snapshot. See the comment
+    // in SnapshotClocks.
+    auto* snapshot_buffer = &tracing_session->clock_snapshot_ring_buffer;
+
+    // Erase before emplacing to prevent a unncessary doubling of memory if
+    // not needed.
+    static constexpr uint32_t kClockSnapshotRingBufferSize = 16;
+    if (snapshot_buffer->size() >= kClockSnapshotRingBufferSize) {
+      snapshot_buffer->erase_front(1 + snapshot_buffer->size() -
+                                   kClockSnapshotRingBufferSize);
+    }
+    snapshot_buffer->emplace_back(std::move(snapshot));
+  }
+}
+
+// Returns true when the data in |snapshot_data| is updated with the new state
+// of the clocks and false otherwise.
+bool TracingServiceImpl::SnapshotClocks(
+    TracingSession::ClockSnapshotData* snapshot_data) {
+  // Minimum drift that justifies replacing a prior clock snapshot that hasn't
+  // been emitted into the trace yet (see comment below).
+  static constexpr int64_t kSignificantDriftNs = 10 * 1000 * 1000;  // 10 ms
+
+  TracingSession::ClockSnapshotData new_snapshot_data;
+
+#if !PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE) && \
+    !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) &&   \
+    !PERFETTO_BUILDFLAG(PERFETTO_OS_NACL)
+  struct {
+    clockid_t id;
+    protos::pbzero::BuiltinClock type;
+    struct timespec ts;
+  } clocks[] = {
+      {CLOCK_BOOTTIME, protos::pbzero::BUILTIN_CLOCK_BOOTTIME, {0, 0}},
+      {CLOCK_REALTIME_COARSE,
+       protos::pbzero::BUILTIN_CLOCK_REALTIME_COARSE,
+       {0, 0}},
+      {CLOCK_MONOTONIC_COARSE,
+       protos::pbzero::BUILTIN_CLOCK_MONOTONIC_COARSE,
+       {0, 0}},
+      {CLOCK_REALTIME, protos::pbzero::BUILTIN_CLOCK_REALTIME, {0, 0}},
+      {CLOCK_MONOTONIC, protos::pbzero::BUILTIN_CLOCK_MONOTONIC, {0, 0}},
+      {CLOCK_MONOTONIC_RAW,
+       protos::pbzero::BUILTIN_CLOCK_MONOTONIC_RAW,
+       {0, 0}},
+  };
+  // First snapshot all the clocks as atomically as we can.
+  for (auto& clock : clocks) {
+    if (clock_gettime(clock.id, &clock.ts) == -1)
+      PERFETTO_DLOG("clock_gettime failed for clock %d", clock.id);
+  }
+  for (auto& clock : clocks) {
+    new_snapshot_data.push_back(std::make_pair(
+        static_cast<uint32_t>(clock.type),
+        static_cast<uint64_t>(base::FromPosixTimespec(clock.ts).count())));
+  }
+#else  // OS_APPLE || OS_WIN && OS_NACL
+  auto wall_time_ns = static_cast<uint64_t>(base::GetWallTimeNs().count());
+  // The default trace clock is boot time, so we always need to emit a path to
+  // it. However since we don't actually have a boot time source on these
+  // platforms, pretend that wall time equals boot time.
+  new_snapshot_data.push_back(
+      std::make_pair(protos::pbzero::BUILTIN_CLOCK_BOOTTIME, wall_time_ns));
+  new_snapshot_data.push_back(
+      std::make_pair(protos::pbzero::BUILTIN_CLOCK_MONOTONIC, wall_time_ns));
+#endif
+
+  // If we're about to update a session's latest clock snapshot that hasn't been
+  // emitted into the trace yet, check whether the clocks have drifted enough to
+  // warrant overriding the current snapshot values. The older snapshot would be
+  // valid for a larger part of the currently buffered trace data because the
+  // clock sync protocol in trace processor uses the latest clock <= timestamp
+  // to translate times (see https://perfetto.dev/docs/concepts/clock-sync), so
+  // we try to keep it if we can.
+  if (!snapshot_data->empty()) {
+    PERFETTO_DCHECK(snapshot_data->size() == new_snapshot_data.size());
+    PERFETTO_DCHECK((*snapshot_data)[0].first ==
+                    protos::gen::BUILTIN_CLOCK_BOOTTIME);
+
+    bool update_snapshot = false;
+    uint64_t old_boot_ns = (*snapshot_data)[0].second;
+    uint64_t new_boot_ns = new_snapshot_data[0].second;
+    int64_t boot_diff =
+        static_cast<int64_t>(new_boot_ns) - static_cast<int64_t>(old_boot_ns);
+
+    for (size_t i = 1; i < snapshot_data->size(); i++) {
+      uint64_t old_ns = (*snapshot_data)[i].second;
+      uint64_t new_ns = new_snapshot_data[i].second;
+
+      int64_t diff =
+          static_cast<int64_t>(new_ns) - static_cast<int64_t>(old_ns);
+
+      // Compare the boottime delta against the delta of this clock.
+      if (std::abs(boot_diff - diff) >= kSignificantDriftNs) {
+        update_snapshot = true;
+        break;
+      }
+    }
+    if (!update_snapshot)
+      return false;
+    snapshot_data->clear();
+  }
+
+  *snapshot_data = std::move(new_snapshot_data);
+  return true;
+}
+
+void TracingServiceImpl::EmitClockSnapshot(
+    TracingSession* tracing_session,
+    TracingSession::ClockSnapshotData snapshot_data,
+    std::vector<TracePacket>* packets) {
+  PERFETTO_DCHECK(!tracing_session->config.builtin_data_sources()
+                       .disable_clock_snapshotting());
+
+  protozero::HeapBuffered<protos::pbzero::TracePacket> packet;
+  auto* snapshot = packet->set_clock_snapshot();
+
+  protos::gen::BuiltinClock trace_clock =
+      tracing_session->config.builtin_data_sources().primary_trace_clock();
+  if (!trace_clock)
+    trace_clock = protos::gen::BUILTIN_CLOCK_BOOTTIME;
+  snapshot->set_primary_trace_clock(
+      static_cast<protos::pbzero::BuiltinClock>(trace_clock));
+
+  for (auto& clock_id_and_ts : snapshot_data) {
+    auto* c = snapshot->add_clocks();
+    c->set_clock_id(clock_id_and_ts.first);
+    c->set_timestamp(clock_id_and_ts.second);
+  }
+
+  packet->set_trusted_uid(static_cast<int32_t>(uid_));
+  packet->set_trusted_packet_sequence_id(kServicePacketSequenceID);
+  SerializeAndAppendPacket(packets, packet.SerializeAsArray());
+}
+
+void TracingServiceImpl::EmitSyncMarker(std::vector<TracePacket>* packets) {
+  // The sync marks are used to tokenize large traces efficiently.
+  // See description in trace_packet.proto.
+  if (sync_marker_packet_size_ == 0) {
+    // The marker ABI expects that the marker is written after the uid.
+    // Protozero guarantees that fields are written in the same order of the
+    // calls. The ResynchronizeTraceStreamUsingSyncMarker test verifies the ABI.
+    protozero::StaticBuffered<protos::pbzero::TracePacket> packet(
+        &sync_marker_packet_[0], sizeof(sync_marker_packet_));
+    packet->set_trusted_uid(static_cast<int32_t>(uid_));
+    packet->set_trusted_packet_sequence_id(kServicePacketSequenceID);
+
+    // Keep this last.
+    packet->set_synchronization_marker(kSyncMarker, sizeof(kSyncMarker));
+    sync_marker_packet_size_ = packet.Finalize();
+  }
+  packets->emplace_back();
+  packets->back().AddSlice(&sync_marker_packet_[0], sync_marker_packet_size_);
+}
+
+void TracingServiceImpl::EmitStats(TracingSession* tracing_session,
+                                   std::vector<TracePacket>* packets) {
+  protozero::HeapBuffered<protos::pbzero::TracePacket> packet;
+  packet->set_trusted_uid(static_cast<int32_t>(uid_));
+  packet->set_trusted_packet_sequence_id(kServicePacketSequenceID);
+  GetTraceStats(tracing_session).Serialize(packet->set_trace_stats());
+  SerializeAndAppendPacket(packets, packet.SerializeAsArray());
+}
+
+TraceStats TracingServiceImpl::GetTraceStats(TracingSession* tracing_session) {
+  TraceStats trace_stats;
+  trace_stats.set_producers_connected(static_cast<uint32_t>(producers_.size()));
+  trace_stats.set_producers_seen(last_producer_id_);
+  trace_stats.set_data_sources_registered(
+      static_cast<uint32_t>(data_sources_.size()));
+  trace_stats.set_data_sources_seen(last_data_source_instance_id_);
+  trace_stats.set_tracing_sessions(
+      static_cast<uint32_t>(tracing_sessions_.size()));
+  trace_stats.set_total_buffers(static_cast<uint32_t>(buffers_.size()));
+  trace_stats.set_chunks_discarded(chunks_discarded_);
+  trace_stats.set_patches_discarded(patches_discarded_);
+  trace_stats.set_invalid_packets(tracing_session->invalid_packets);
+
+  if (tracing_session->trace_filter) {
+    auto* filt_stats = trace_stats.mutable_filter_stats();
+    filt_stats->set_input_packets(tracing_session->filter_input_packets);
+    filt_stats->set_input_bytes(tracing_session->filter_input_bytes);
+    filt_stats->set_output_bytes(tracing_session->filter_output_bytes);
+    filt_stats->set_errors(tracing_session->filter_errors);
+  }
+
+  for (BufferID buf_id : tracing_session->buffers_index) {
+    TraceBuffer* buf = GetBufferByID(buf_id);
+    if (!buf) {
+      PERFETTO_DFATAL("Buffer not found.");
+      continue;
+    }
+    *trace_stats.add_buffer_stats() = buf->stats();
+  }  // for (buf in session).
+  return trace_stats;
+}
+
+void TracingServiceImpl::MaybeEmitTraceConfig(
+    TracingSession* tracing_session,
+    std::vector<TracePacket>* packets) {
+  if (tracing_session->did_emit_config)
+    return;
+  tracing_session->did_emit_config = true;
+  protozero::HeapBuffered<protos::pbzero::TracePacket> packet;
+  packet->set_trusted_uid(static_cast<int32_t>(uid_));
+  packet->set_trusted_packet_sequence_id(kServicePacketSequenceID);
+  tracing_session->config.Serialize(packet->set_trace_config());
+  SerializeAndAppendPacket(packets, packet.SerializeAsArray());
+}
+
+void TracingServiceImpl::MaybeEmitSystemInfo(
+    TracingSession* tracing_session,
+    std::vector<TracePacket>* packets) {
+  if (tracing_session->did_emit_system_info)
+    return;
+  tracing_session->did_emit_system_info = true;
+  protozero::HeapBuffered<protos::pbzero::TracePacket> packet;
+  auto* info = packet->set_system_info();
+  info->set_tracing_service_version(base::GetVersionString());
+#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) && \
+    !PERFETTO_BUILDFLAG(PERFETTO_OS_NACL)
+  struct utsname uname_info;
+  if (uname(&uname_info) == 0) {
+    auto* utsname_info = info->set_utsname();
+    utsname_info->set_sysname(uname_info.sysname);
+    utsname_info->set_version(uname_info.version);
+    utsname_info->set_machine(uname_info.machine);
+    utsname_info->set_release(uname_info.release);
+  }
+#endif  // !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
+  char value[PROP_VALUE_MAX];
+  if (__system_property_get("ro.build.fingerprint", value)) {
+    info->set_android_build_fingerprint(value);
+  } else {
+    PERFETTO_ELOG("Unable to read ro.build.fingerprint");
+  }
+  info->set_hz(sysconf(_SC_CLK_TCK));
+#endif  // PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
+  packet->set_trusted_uid(static_cast<int32_t>(uid_));
+  packet->set_trusted_packet_sequence_id(kServicePacketSequenceID);
+  SerializeAndAppendPacket(packets, packet.SerializeAsArray());
+}
+
+void TracingServiceImpl::EmitLifecycleEvents(
+    TracingSession* tracing_session,
+    std::vector<TracePacket>* packets) {
+  using TimestampedPacket =
+      std::pair<int64_t /* ts */, std::vector<uint8_t> /* serialized packet */>;
+
+  std::vector<TimestampedPacket> timestamped_packets;
+  for (auto& event : tracing_session->lifecycle_events) {
+    for (int64_t ts : event.timestamps) {
+      protozero::HeapBuffered<protos::pbzero::TracePacket> packet;
+      packet->set_timestamp(static_cast<uint64_t>(ts));
+      packet->set_trusted_uid(static_cast<int32_t>(uid_));
+      packet->set_trusted_packet_sequence_id(kServicePacketSequenceID);
+
+      auto* service_event = packet->set_service_event();
+      service_event->AppendVarInt(event.field_id, 1);
+      timestamped_packets.emplace_back(ts, packet.SerializeAsArray());
+    }
+    event.timestamps.clear();
+  }
+
+  // We sort by timestamp here to ensure that the "sequence" of lifecycle
+  // packets has monotonic timestamps like other sequences in the trace.
+  // Note that these events could still be out of order with respect to other
+  // events on the service packet sequence (e.g. trigger received packets).
+  std::sort(timestamped_packets.begin(), timestamped_packets.end(),
+            [](const TimestampedPacket& a, const TimestampedPacket& b) {
+              return a.first < b.first;
+            });
+
+  for (const auto& pair : timestamped_packets)
+    SerializeAndAppendPacket(packets, std::move(pair.second));
+}
+
+void TracingServiceImpl::EmitSeizedForBugreportLifecycleEvent(
+    std::vector<TracePacket>* packets) {
+  protozero::HeapBuffered<protos::pbzero::TracePacket> packet;
+  packet->set_timestamp(static_cast<uint64_t>(base::GetBootTimeNs().count()));
+  packet->set_trusted_uid(static_cast<int32_t>(uid_));
+  packet->set_trusted_packet_sequence_id(kServicePacketSequenceID);
+  auto* service_event = packet->set_service_event();
+  service_event->AppendVarInt(
+      protos::pbzero::TracingServiceEvent::kSeizedForBugreportFieldNumber, 1);
+  SerializeAndAppendPacket(packets, packet.SerializeAsArray());
+}
+
+void TracingServiceImpl::MaybeEmitReceivedTriggers(
+    TracingSession* tracing_session,
+    std::vector<TracePacket>* packets) {
+  PERFETTO_DCHECK(tracing_session->num_triggers_emitted_into_trace <=
+                  tracing_session->received_triggers.size());
+  for (size_t i = tracing_session->num_triggers_emitted_into_trace;
+       i < tracing_session->received_triggers.size(); ++i) {
+    const auto& info = tracing_session->received_triggers[i];
+    protozero::HeapBuffered<protos::pbzero::TracePacket> packet;
+    auto* trigger = packet->set_trigger();
+    trigger->set_trigger_name(info.trigger_name);
+    trigger->set_producer_name(info.producer_name);
+    trigger->set_trusted_producer_uid(static_cast<int32_t>(info.producer_uid));
+
+    packet->set_timestamp(info.boot_time_ns);
+    packet->set_trusted_uid(static_cast<int32_t>(uid_));
+    packet->set_trusted_packet_sequence_id(kServicePacketSequenceID);
+    SerializeAndAppendPacket(packets, packet.SerializeAsArray());
+    ++tracing_session->num_triggers_emitted_into_trace;
+  }
+}
+
+bool TracingServiceImpl::MaybeSaveTraceForBugreport(
+    std::function<void()> callback) {
+  TracingSession* max_session = nullptr;
+  TracingSessionID max_tsid = 0;
+  for (auto& session_id_and_session : tracing_sessions_) {
+    auto& session = session_id_and_session.second;
+    const int32_t score = session.config.bugreport_score();
+    // Exclude sessions with 0 (or below) score. By default tracing sessions
+    // should NOT be eligible to be attached to bugreports.
+    if (score <= 0 || session.state != TracingSession::STARTED)
+      continue;
+
+    // Also don't try to steal long traces with write_into_file if their content
+    // has been already partially written into a file, as we would get partial
+    // traces on both sides. We can't just copy the original file into the
+    // bugreport because the file could be too big (GBs) for bugreports.
+    // The only case where it's legit to steal traces with write_into_file, is
+    // when the consumer specified a very large write_period_ms (e.g. 24h),
+    // meaning that this is effectively a ring-buffer trace. Traceur (the
+    // Android System Tracing app), which uses --detach, does this to have a
+    // consistent invocation path for long-traces and ring-buffer-mode traces.
+    if (session.write_into_file && session.bytes_written_into_file > 0)
+      continue;
+
+    // If we are already in the process of finalizing another trace for
+    // bugreport, don't even start another one, as they would try to write onto
+    // the same file.
+    if (session.on_disable_callback_for_bugreport)
+      return false;
+
+    if (!max_session || score > max_session->config.bugreport_score()) {
+      max_session = &session;
+      max_tsid = session_id_and_session.first;
+    }
+  }
+
+  // No eligible trace found.
+  if (!max_session)
+    return false;
+
+  PERFETTO_LOG("Seizing trace for bugreport. tsid:%" PRIu64
+               " state:%d wf:%d score:%d name:\"%s\"",
+               max_tsid, max_session->state, !!max_session->write_into_file,
+               max_session->config.bugreport_score(),
+               max_session->config.unique_session_name().c_str());
+
+  auto br_fd = CreateTraceFile(GetBugreportTmpPath(), /*overwrite=*/true);
+  if (!br_fd)
+    return false;
+
+  if (max_session->write_into_file) {
+    auto fd = *max_session->write_into_file;
+    // If we are stealing a write_into_file session, add a marker that explains
+    // why the trace has been stolen rather than creating an empty file. This is
+    // only for write_into_file traces. A similar code path deals with the case
+    // of reading-back a seized trace from IPC in ReadBuffers().
+    if (!max_session->config.builtin_data_sources().disable_service_events()) {
+      std::vector<TracePacket> packets;
+      EmitSeizedForBugreportLifecycleEvent(&packets);
+      for (auto& packet : packets) {
+        char* preamble;
+        size_t preamble_size = 0;
+        std::tie(preamble, preamble_size) = packet.GetProtoPreamble();
+        base::WriteAll(fd, preamble, preamble_size);
+        for (const Slice& slice : packet.slices()) {
+          base::WriteAll(fd, slice.start, slice.size);
+        }
+      }  // for (packets)
+    }    // if (!disable_service_events())
+  }      // if (max_session->write_into_file)
+  max_session->write_into_file = std::move(br_fd);
+  max_session->on_disable_callback_for_bugreport = std::move(callback);
+  max_session->seized_for_bugreport = true;
+
+  // Post a task to avoid that early FlushAndDisableTracing() failures invoke
+  // the callback before we return. That would re-enter in a weird way the
+  // callstack of the calling ConsumerEndpointImpl::SaveTraceForBugreport().
+  auto weak_this = weak_ptr_factory_.GetWeakPtr();
+  task_runner_->PostTask([weak_this, max_tsid] {
+    if (weak_this)
+      weak_this->FlushAndDisableTracing(max_tsid);
+  });
+  return true;
+}
+
+void TracingServiceImpl::MaybeLogUploadEvent(const TraceConfig& cfg,
+                                             PerfettoStatsdAtom atom,
+                                             const std::string& trigger_name) {
+  if (!ShouldLogEvent(cfg))
+    return;
+
+  // If the UUID is not set for some reason, don't log anything.
+  if (cfg.trace_uuid_lsb() == 0 && cfg.trace_uuid_msb() == 0)
+    return;
+
+  android_stats::MaybeLogUploadEvent(atom, cfg.trace_uuid_lsb(),
+                                     cfg.trace_uuid_msb(), trigger_name);
+}
+
+void TracingServiceImpl::MaybeLogTriggerEvent(const TraceConfig& cfg,
+                                              PerfettoTriggerAtom atom,
+                                              const std::string& trigger_name) {
+  if (!ShouldLogEvent(cfg))
+    return;
+  android_stats::MaybeLogTriggerEvent(atom, trigger_name);
+}
+
+size_t TracingServiceImpl::PurgeExpiredAndCountTriggerInWindow(
+    int64_t now_ns,
+    uint64_t trigger_name_hash) {
+  PERFETTO_DCHECK(
+      std::is_sorted(trigger_history_.begin(), trigger_history_.end()));
+  size_t remove_count = 0;
+  size_t trigger_count = 0;
+  for (const TriggerHistory& h : trigger_history_) {
+    if (h.timestamp_ns < now_ns - trigger_window_ns_) {
+      remove_count++;
+    } else if (h.name_hash == trigger_name_hash) {
+      trigger_count++;
+    }
+  }
+  trigger_history_.erase_front(remove_count);
+  return trigger_count;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// TracingServiceImpl::ConsumerEndpointImpl implementation
+////////////////////////////////////////////////////////////////////////////////
+
+TracingServiceImpl::ConsumerEndpointImpl::ConsumerEndpointImpl(
+    TracingServiceImpl* service,
+    base::TaskRunner* task_runner,
+    Consumer* consumer,
+    uid_t uid)
+    : task_runner_(task_runner),
+      service_(service),
+      consumer_(consumer),
+      uid_(uid),
+      weak_ptr_factory_(this) {}
+
+TracingServiceImpl::ConsumerEndpointImpl::~ConsumerEndpointImpl() {
+  service_->DisconnectConsumer(this);
+  consumer_->OnDisconnect();
+}
+
+void TracingServiceImpl::ConsumerEndpointImpl::NotifyOnTracingDisabled(
+    const std::string& error) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  auto weak_this = weak_ptr_factory_.GetWeakPtr();
+  task_runner_->PostTask([weak_this, error /* deliberate copy */] {
+    if (weak_this)
+      weak_this->consumer_->OnTracingDisabled(error);
+  });
+}
+
+void TracingServiceImpl::ConsumerEndpointImpl::EnableTracing(
+    const TraceConfig& cfg,
+    base::ScopedFile fd) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  auto status = service_->EnableTracing(this, cfg, std::move(fd));
+  if (!status.ok())
+    NotifyOnTracingDisabled(status.message());
+}
+
+void TracingServiceImpl::ConsumerEndpointImpl::ChangeTraceConfig(
+    const TraceConfig& cfg) {
+  if (!tracing_session_id_) {
+    PERFETTO_LOG(
+        "Consumer called ChangeTraceConfig() but tracing was "
+        "not active");
+    return;
+  }
+  service_->ChangeTraceConfig(this, cfg);
+}
+
+void TracingServiceImpl::ConsumerEndpointImpl::StartTracing() {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  if (!tracing_session_id_) {
+    PERFETTO_LOG("Consumer called StartTracing() but tracing was not active");
+    return;
+  }
+  service_->StartTracing(tracing_session_id_);
+}
+
+void TracingServiceImpl::ConsumerEndpointImpl::DisableTracing() {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  if (!tracing_session_id_) {
+    PERFETTO_LOG("Consumer called DisableTracing() but tracing was not active");
+    return;
+  }
+  service_->DisableTracing(tracing_session_id_);
+}
+
+void TracingServiceImpl::ConsumerEndpointImpl::ReadBuffers() {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  if (!tracing_session_id_) {
+    PERFETTO_LOG("Consumer called ReadBuffers() but tracing was not active");
+    consumer_->OnTraceData({}, /* has_more = */ false);
+    return;
+  }
+  if (!service_->ReadBuffers(tracing_session_id_, this)) {
+    consumer_->OnTraceData({}, /* has_more = */ false);
+  }
+}
+
+void TracingServiceImpl::ConsumerEndpointImpl::FreeBuffers() {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  if (!tracing_session_id_) {
+    PERFETTO_LOG("Consumer called FreeBuffers() but tracing was not active");
+    return;
+  }
+  service_->FreeBuffers(tracing_session_id_);
+  tracing_session_id_ = 0;
+}
+
+void TracingServiceImpl::ConsumerEndpointImpl::Flush(uint32_t timeout_ms,
+                                                     FlushCallback callback) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  if (!tracing_session_id_) {
+    PERFETTO_LOG("Consumer called Flush() but tracing was not active");
+    return;
+  }
+  service_->Flush(tracing_session_id_, timeout_ms, callback);
+}
+
+void TracingServiceImpl::ConsumerEndpointImpl::Detach(const std::string& key) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  bool success = service_->DetachConsumer(this, key);
+  auto weak_this = weak_ptr_factory_.GetWeakPtr();
+  task_runner_->PostTask([weak_this, success] {
+    if (weak_this)
+      weak_this->consumer_->OnDetach(success);
+  });
+}
+
+void TracingServiceImpl::ConsumerEndpointImpl::Attach(const std::string& key) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  bool success = service_->AttachConsumer(this, key);
+  auto weak_this = weak_ptr_factory_.GetWeakPtr();
+  task_runner_->PostTask([weak_this, success] {
+    if (!weak_this)
+      return;
+    Consumer* consumer = weak_this->consumer_;
+    TracingSession* session =
+        weak_this->service_->GetTracingSession(weak_this->tracing_session_id_);
+    if (!session) {
+      consumer->OnAttach(false, TraceConfig());
+      return;
+    }
+    consumer->OnAttach(success, session->config);
+  });
+}
+
+void TracingServiceImpl::ConsumerEndpointImpl::GetTraceStats() {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  bool success = false;
+  TraceStats stats;
+  TracingSession* session = service_->GetTracingSession(tracing_session_id_);
+  if (session) {
+    success = true;
+    stats = service_->GetTraceStats(session);
+  }
+  auto weak_this = weak_ptr_factory_.GetWeakPtr();
+  task_runner_->PostTask([weak_this, success, stats] {
+    if (weak_this)
+      weak_this->consumer_->OnTraceStats(success, stats);
+  });
+}
+
+void TracingServiceImpl::ConsumerEndpointImpl::ObserveEvents(
+    uint32_t events_mask) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  observable_events_mask_ = events_mask;
+  TracingSession* session = service_->GetTracingSession(tracing_session_id_);
+  if (!session)
+    return;
+
+  if (observable_events_mask_ & ObservableEvents::TYPE_DATA_SOURCES_INSTANCES) {
+    // Issue initial states.
+    for (const auto& kv : session->data_source_instances) {
+      ProducerEndpointImpl* producer = service_->GetProducer(kv.first);
+      PERFETTO_DCHECK(producer);
+      OnDataSourceInstanceStateChange(*producer, kv.second);
+    }
+  }
+
+  // If the ObserveEvents() call happens after data sources have acked already
+  // notify immediately.
+  if (observable_events_mask_ &
+      ObservableEvents::TYPE_ALL_DATA_SOURCES_STARTED) {
+    service_->MaybeNotifyAllDataSourcesStarted(session);
+  }
+}
+
+void TracingServiceImpl::ConsumerEndpointImpl::OnDataSourceInstanceStateChange(
+    const ProducerEndpointImpl& producer,
+    const DataSourceInstance& instance) {
+  if (!(observable_events_mask_ &
+        ObservableEvents::TYPE_DATA_SOURCES_INSTANCES)) {
+    return;
+  }
+
+  if (instance.state != DataSourceInstance::CONFIGURED &&
+      instance.state != DataSourceInstance::STARTED &&
+      instance.state != DataSourceInstance::STOPPED) {
+    return;
+  }
+
+  auto* observable_events = AddObservableEvents();
+  auto* change = observable_events->add_instance_state_changes();
+  change->set_producer_name(producer.name_);
+  change->set_data_source_name(instance.data_source_name);
+  if (instance.state == DataSourceInstance::STARTED) {
+    change->set_state(ObservableEvents::DATA_SOURCE_INSTANCE_STATE_STARTED);
+  } else {
+    change->set_state(ObservableEvents::DATA_SOURCE_INSTANCE_STATE_STOPPED);
+  }
+}
+
+void TracingServiceImpl::ConsumerEndpointImpl::OnAllDataSourcesStarted() {
+  if (!(observable_events_mask_ &
+        ObservableEvents::TYPE_ALL_DATA_SOURCES_STARTED)) {
+    return;
+  }
+  auto* observable_events = AddObservableEvents();
+  observable_events->set_all_data_sources_started(true);
+}
+
+ObservableEvents*
+TracingServiceImpl::ConsumerEndpointImpl::AddObservableEvents() {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  if (!observable_events_) {
+    observable_events_.reset(new ObservableEvents());
+    auto weak_this = weak_ptr_factory_.GetWeakPtr();
+    task_runner_->PostTask([weak_this] {
+      if (!weak_this)
+        return;
+
+      // Move into a temporary to allow reentrancy in OnObservableEvents.
+      auto observable_events = std::move(weak_this->observable_events_);
+      weak_this->consumer_->OnObservableEvents(*observable_events);
+    });
+  }
+  return observable_events_.get();
+}
+
+void TracingServiceImpl::ConsumerEndpointImpl::QueryServiceState(
+    QueryServiceStateCallback callback) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  TracingServiceState svc_state;
+
+  const auto& sessions = service_->tracing_sessions_;
+  svc_state.set_tracing_service_version(base::GetVersionString());
+  svc_state.set_num_sessions(static_cast<int>(sessions.size()));
+
+  int num_started = 0;
+  for (const auto& kv : sessions)
+    num_started += kv.second.state == TracingSession::State::STARTED ? 1 : 0;
+  svc_state.set_num_sessions_started(static_cast<int>(num_started));
+
+  for (const auto& kv : service_->producers_) {
+    auto* producer = svc_state.add_producers();
+    producer->set_id(static_cast<int>(kv.first));
+    producer->set_name(kv.second->name_);
+    producer->set_sdk_version(kv.second->sdk_version_);
+    producer->set_uid(static_cast<int32_t>(producer->uid()));
+  }
+
+  for (const auto& kv : service_->data_sources_) {
+    const auto& registered_data_source = kv.second;
+    auto* data_source = svc_state.add_data_sources();
+    *data_source->mutable_ds_descriptor() = registered_data_source.descriptor;
+    data_source->set_producer_id(
+        static_cast<int>(registered_data_source.producer_id));
+  }
+  callback(/*success=*/true, svc_state);
+}
+
+void TracingServiceImpl::ConsumerEndpointImpl::QueryCapabilities(
+    QueryCapabilitiesCallback callback) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  TracingServiceCapabilities caps;
+  caps.set_has_query_capabilities(true);
+  caps.set_has_trace_config_output_path(true);
+  caps.add_observable_events(ObservableEvents::TYPE_DATA_SOURCES_INSTANCES);
+  caps.add_observable_events(ObservableEvents::TYPE_ALL_DATA_SOURCES_STARTED);
+  static_assert(ObservableEvents::Type_MAX ==
+                    ObservableEvents::TYPE_ALL_DATA_SOURCES_STARTED,
+                "");
+  callback(caps);
+}
+
+void TracingServiceImpl::ConsumerEndpointImpl::SaveTraceForBugreport(
+    SaveTraceForBugreportCallback consumer_callback) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  auto on_complete_callback = [consumer_callback] {
+    if (rename(GetBugreportTmpPath().c_str(), GetBugreportPath().c_str())) {
+      consumer_callback(false, "rename(" + GetBugreportTmpPath() + ", " +
+                                   GetBugreportPath() + ") failed (" +
+                                   strerror(errno) + ")");
+    } else {
+      consumer_callback(true, GetBugreportPath());
+    }
+  };
+  if (!service_->MaybeSaveTraceForBugreport(std::move(on_complete_callback))) {
+    consumer_callback(false,
+                      "No trace with TraceConfig.bugreport_score > 0 eligible "
+                      "for bug reporting was found");
+  }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// TracingServiceImpl::ProducerEndpointImpl implementation
+////////////////////////////////////////////////////////////////////////////////
+
+TracingServiceImpl::ProducerEndpointImpl::ProducerEndpointImpl(
+    ProducerID id,
+    uid_t uid,
+    TracingServiceImpl* service,
+    base::TaskRunner* task_runner,
+    Producer* producer,
+    const std::string& producer_name,
+    const std::string& sdk_version,
+    bool in_process,
+    bool smb_scraping_enabled)
+    : id_(id),
+      uid_(uid),
+      service_(service),
+      task_runner_(task_runner),
+      producer_(producer),
+      name_(producer_name),
+      sdk_version_(sdk_version),
+      in_process_(in_process),
+      smb_scraping_enabled_(smb_scraping_enabled),
+      weak_ptr_factory_(this) {}
+
+TracingServiceImpl::ProducerEndpointImpl::~ProducerEndpointImpl() {
+  service_->DisconnectProducer(id_);
+  producer_->OnDisconnect();
+}
+
+void TracingServiceImpl::ProducerEndpointImpl::RegisterDataSource(
+    const DataSourceDescriptor& desc) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  if (desc.name().empty()) {
+    PERFETTO_DLOG("Received RegisterDataSource() with empty name");
+    return;
+  }
+
+  service_->RegisterDataSource(id_, desc);
+}
+
+void TracingServiceImpl::ProducerEndpointImpl::UnregisterDataSource(
+    const std::string& name) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  service_->UnregisterDataSource(id_, name);
+}
+
+void TracingServiceImpl::ProducerEndpointImpl::RegisterTraceWriter(
+    uint32_t writer_id,
+    uint32_t target_buffer) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  writers_[static_cast<WriterID>(writer_id)] =
+      static_cast<BufferID>(target_buffer);
+}
+
+void TracingServiceImpl::ProducerEndpointImpl::UnregisterTraceWriter(
+    uint32_t writer_id) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  writers_.erase(static_cast<WriterID>(writer_id));
+}
+
+void TracingServiceImpl::ProducerEndpointImpl::CommitData(
+    const CommitDataRequest& req_untrusted,
+    CommitDataCallback callback) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+
+  if (metatrace::IsEnabled(metatrace::TAG_TRACE_SERVICE)) {
+    PERFETTO_METATRACE_COUNTER(TAG_TRACE_SERVICE, TRACE_SERVICE_COMMIT_DATA,
+                               EncodeCommitDataRequest(id_, req_untrusted));
+  }
+
+  if (!shared_memory_) {
+    PERFETTO_DLOG(
+        "Attempted to commit data before the shared memory was allocated.");
+    return;
+  }
+  PERFETTO_DCHECK(shmem_abi_.is_valid());
+  for (const auto& entry : req_untrusted.chunks_to_move()) {
+    const uint32_t page_idx = entry.page();
+    if (page_idx >= shmem_abi_.num_pages())
+      continue;  // A buggy or malicious producer.
+
+    SharedMemoryABI::Chunk chunk =
+        shmem_abi_.TryAcquireChunkForReading(page_idx, entry.chunk());
+    if (!chunk.is_valid()) {
+      PERFETTO_DLOG("Asked to move chunk %d:%d, but it's not complete",
+                    entry.page(), entry.chunk());
+      continue;
+    }
+
+    // TryAcquireChunkForReading() has load-acquire semantics. Once acquired,
+    // the ABI contract expects the producer to not touch the chunk anymore
+    // (until the service marks that as free). This is why all the reads below
+    // are just memory_order_relaxed. Also, the code here assumes that all this
+    // data can be malicious and just gives up if anything is malformed.
+    BufferID buffer_id = static_cast<BufferID>(entry.target_buffer());
+    const SharedMemoryABI::ChunkHeader& chunk_header = *chunk.header();
+    WriterID writer_id = chunk_header.writer_id.load(std::memory_order_relaxed);
+    ChunkID chunk_id = chunk_header.chunk_id.load(std::memory_order_relaxed);
+    auto packets = chunk_header.packets.load(std::memory_order_relaxed);
+    uint16_t num_fragments = packets.count;
+    uint8_t chunk_flags = packets.flags;
+
+    service_->CopyProducerPageIntoLogBuffer(
+        id_, uid_, writer_id, chunk_id, buffer_id, num_fragments, chunk_flags,
+        /*chunk_complete=*/true, chunk.payload_begin(), chunk.payload_size());
+
+    // This one has release-store semantics.
+    shmem_abi_.ReleaseChunkAsFree(std::move(chunk));
+  }  // for(chunks_to_move)
+
+  service_->ApplyChunkPatches(id_, req_untrusted.chunks_to_patch());
+
+  if (req_untrusted.flush_request_id()) {
+    service_->NotifyFlushDoneForProducer(id_, req_untrusted.flush_request_id());
+  }
+
+  // Keep this invocation last. ProducerIPCService::CommitData() relies on this
+  // callback being invoked within the same callstack and not posted. If this
+  // changes, the code there needs to be changed accordingly.
+  if (callback)
+    callback();
+}
+
+void TracingServiceImpl::ProducerEndpointImpl::SetupSharedMemory(
+    std::unique_ptr<SharedMemory> shared_memory,
+    size_t page_size_bytes,
+    bool provided_by_producer) {
+  PERFETTO_DCHECK(!shared_memory_ && !shmem_abi_.is_valid());
+  PERFETTO_DCHECK(page_size_bytes % 1024 == 0);
+
+  shared_memory_ = std::move(shared_memory);
+  shared_buffer_page_size_kb_ = page_size_bytes / 1024;
+  is_shmem_provided_by_producer_ = provided_by_producer;
+
+  shmem_abi_.Initialize(reinterpret_cast<uint8_t*>(shared_memory_->start()),
+                        shared_memory_->size(),
+                        shared_buffer_page_size_kb() * 1024);
+  if (in_process_) {
+    inproc_shmem_arbiter_.reset(new SharedMemoryArbiterImpl(
+        shared_memory_->start(), shared_memory_->size(),
+        shared_buffer_page_size_kb_ * 1024, this, task_runner_));
+    inproc_shmem_arbiter_->SetDirectSMBPatchingSupportedByService();
+  }
+
+  OnTracingSetup();
+  service_->UpdateMemoryGuardrail();
+}
+
+SharedMemory* TracingServiceImpl::ProducerEndpointImpl::shared_memory() const {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  return shared_memory_.get();
+}
+
+size_t TracingServiceImpl::ProducerEndpointImpl::shared_buffer_page_size_kb()
+    const {
+  return shared_buffer_page_size_kb_;
+}
+
+void TracingServiceImpl::ProducerEndpointImpl::ActivateTriggers(
+    const std::vector<std::string>& triggers) {
+  service_->ActivateTriggers(id_, triggers);
+}
+
+void TracingServiceImpl::ProducerEndpointImpl::StopDataSource(
+    DataSourceInstanceID ds_inst_id) {
+  // TODO(primiano): When we'll support tearing down the SMB, at this point we
+  // should send the Producer a TearDownTracing if all its data sources have
+  // been disabled (see b/77532839 and aosp/655179 PS1).
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  auto weak_this = weak_ptr_factory_.GetWeakPtr();
+  task_runner_->PostTask([weak_this, ds_inst_id] {
+    if (weak_this)
+      weak_this->producer_->StopDataSource(ds_inst_id);
+  });
+}
+
+SharedMemoryArbiter*
+TracingServiceImpl::ProducerEndpointImpl::MaybeSharedMemoryArbiter() {
+  if (!inproc_shmem_arbiter_) {
+    PERFETTO_FATAL(
+        "The in-process SharedMemoryArbiter can only be used when "
+        "CreateProducer has been called with in_process=true and after tracing "
+        "has started.");
+  }
+
+  PERFETTO_DCHECK(in_process_);
+  return inproc_shmem_arbiter_.get();
+}
+
+bool TracingServiceImpl::ProducerEndpointImpl::IsShmemProvidedByProducer()
+    const {
+  return is_shmem_provided_by_producer_;
+}
+
+// Can be called on any thread.
+std::unique_ptr<TraceWriter>
+TracingServiceImpl::ProducerEndpointImpl::CreateTraceWriter(
+    BufferID buf_id,
+    BufferExhaustedPolicy buffer_exhausted_policy) {
+  PERFETTO_DCHECK(MaybeSharedMemoryArbiter());
+  return MaybeSharedMemoryArbiter()->CreateTraceWriter(buf_id,
+                                                       buffer_exhausted_policy);
+}
+
+void TracingServiceImpl::ProducerEndpointImpl::NotifyFlushComplete(
+    FlushRequestID id) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  PERFETTO_DCHECK(MaybeSharedMemoryArbiter());
+  return MaybeSharedMemoryArbiter()->NotifyFlushComplete(id);
+}
+
+void TracingServiceImpl::ProducerEndpointImpl::OnTracingSetup() {
+  auto weak_this = weak_ptr_factory_.GetWeakPtr();
+  task_runner_->PostTask([weak_this] {
+    if (weak_this)
+      weak_this->producer_->OnTracingSetup();
+  });
+}
+
+void TracingServiceImpl::ProducerEndpointImpl::Flush(
+    FlushRequestID flush_request_id,
+    const std::vector<DataSourceInstanceID>& data_sources) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  auto weak_this = weak_ptr_factory_.GetWeakPtr();
+  task_runner_->PostTask([weak_this, flush_request_id, data_sources] {
+    if (weak_this) {
+      weak_this->producer_->Flush(flush_request_id, data_sources.data(),
+                                  data_sources.size());
+    }
+  });
+}
+
+void TracingServiceImpl::ProducerEndpointImpl::SetupDataSource(
+    DataSourceInstanceID ds_id,
+    const DataSourceConfig& config) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  allowed_target_buffers_.insert(static_cast<BufferID>(config.target_buffer()));
+  auto weak_this = weak_ptr_factory_.GetWeakPtr();
+  task_runner_->PostTask([weak_this, ds_id, config] {
+    if (weak_this)
+      weak_this->producer_->SetupDataSource(ds_id, std::move(config));
+  });
+}
+
+void TracingServiceImpl::ProducerEndpointImpl::StartDataSource(
+    DataSourceInstanceID ds_id,
+    const DataSourceConfig& config) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  auto weak_this = weak_ptr_factory_.GetWeakPtr();
+  task_runner_->PostTask([weak_this, ds_id, config] {
+    if (weak_this)
+      weak_this->producer_->StartDataSource(ds_id, std::move(config));
+  });
+}
+
+void TracingServiceImpl::ProducerEndpointImpl::NotifyDataSourceStarted(
+    DataSourceInstanceID data_source_id) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  service_->NotifyDataSourceStarted(id_, data_source_id);
+}
+
+void TracingServiceImpl::ProducerEndpointImpl::NotifyDataSourceStopped(
+    DataSourceInstanceID data_source_id) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  service_->NotifyDataSourceStopped(id_, data_source_id);
+}
+
+void TracingServiceImpl::ProducerEndpointImpl::OnFreeBuffers(
+    const std::vector<BufferID>& target_buffers) {
+  if (allowed_target_buffers_.empty())
+    return;
+  for (BufferID buffer : target_buffers)
+    allowed_target_buffers_.erase(buffer);
+}
+
+void TracingServiceImpl::ProducerEndpointImpl::ClearIncrementalState(
+    const std::vector<DataSourceInstanceID>& data_sources) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  auto weak_this = weak_ptr_factory_.GetWeakPtr();
+  task_runner_->PostTask([weak_this, data_sources] {
+    if (weak_this) {
+      weak_this->producer_->ClearIncrementalState(data_sources.data(),
+                                                  data_sources.size());
+    }
+  });
+}
+
+void TracingServiceImpl::ProducerEndpointImpl::Sync(
+    std::function<void()> callback) {
+  task_runner_->PostTask(callback);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// TracingServiceImpl::TracingSession implementation
+////////////////////////////////////////////////////////////////////////////////
+
+TracingServiceImpl::TracingSession::TracingSession(
+    TracingSessionID session_id,
+    ConsumerEndpointImpl* consumer,
+    const TraceConfig& new_config,
+    base::TaskRunner* task_runner)
+    : id(session_id),
+      consumer_maybe_null(consumer),
+      consumer_uid(consumer->uid_),
+      config(new_config),
+      snapshot_periodic_task(task_runner) {
+  // all_data_sources_flushed is special because we store up to 64 events of
+  // this type. Other events will go through the default case in
+  // SnapshotLifecycleEvent() where they will be given a max history of 1.
+  lifecycle_events.emplace_back(
+      protos::pbzero::TracingServiceEvent::kAllDataSourcesFlushedFieldNumber,
+      64 /* max_size */);
+}
+
+}  // namespace perfetto
+// gen_amalgamated begin source: src/tracing/internal/in_process_tracing_backend.cc
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/tracing/internal/in_process_tracing_backend.h"
+
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/paged_memory.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/tracing_service.h"
+
+// TODO(primiano): When the in-process backend is used, we should never end up
+// in a situation where the thread where the TracingService and Producer live
+// writes a packet and hence can get into the GetNewChunk() stall.
+// This would happen only if the API client code calls Trace() from one of the
+// callbacks it receives (e.g. OnStart(), OnStop()). We should either cause a
+// hard crash or ignore traces from that thread if that happens, because it
+// will deadlock (the Service will never free up the SMB because won't ever get
+// to run the task).
+
+namespace perfetto {
+namespace internal {
+
+namespace {
+
+class InProcessShm : public SharedMemory {
+ public:
+  explicit InProcessShm(size_t size);
+  ~InProcessShm() override;
+  void* start() const override;
+  size_t size() const override;
+
+ private:
+  base::PagedMemory mem_;
+};
+
+class InProcessShmFactory : public SharedMemory::Factory {
+ public:
+  ~InProcessShmFactory() override;
+  std::unique_ptr<SharedMemory> CreateSharedMemory(size_t) override;
+};
+
+InProcessShm::~InProcessShm() = default;
+
+InProcessShm::InProcessShm(size_t size)
+    : mem_(base::PagedMemory::Allocate(size)) {}
+
+void* InProcessShm::start() const {
+  return mem_.Get();
+}
+
+size_t InProcessShm::size() const {
+  return mem_.size();
+}
+
+InProcessShmFactory::~InProcessShmFactory() = default;
+std::unique_ptr<SharedMemory> InProcessShmFactory::CreateSharedMemory(
+    size_t size) {
+  return std::unique_ptr<SharedMemory>(new InProcessShm(size));
+}
+
+}  // namespace
+
+// static
+TracingBackend* InProcessTracingBackend::GetInstance() {
+  static auto* instance = new InProcessTracingBackend();
+  return instance;
+}
+
+InProcessTracingBackend::InProcessTracingBackend() {}
+
+std::unique_ptr<ProducerEndpoint> InProcessTracingBackend::ConnectProducer(
+    const ConnectProducerArgs& args) {
+  PERFETTO_DCHECK(args.task_runner->RunsTasksOnCurrentThread());
+
+  // This should never happen as we can have at most one in-process backend.
+  if (service_)
+    PERFETTO_FATAL("InProcessTracingBackend initialized twice");
+
+  return GetOrCreateService(args.task_runner)
+      ->ConnectProducer(args.producer, /*uid=*/0, args.producer_name,
+                        args.shmem_size_hint_bytes,
+                        /*in_process=*/true,
+                        TracingService::ProducerSMBScrapingMode::kEnabled,
+                        args.shmem_page_size_hint_bytes);
+}
+
+std::unique_ptr<ConsumerEndpoint> InProcessTracingBackend::ConnectConsumer(
+    const ConnectConsumerArgs& args) {
+  return GetOrCreateService(args.task_runner)
+      ->ConnectConsumer(args.consumer, /*uid=*/0);
+}
+
+TracingService* InProcessTracingBackend::GetOrCreateService(
+    base::TaskRunner* task_runner) {
+  if (!service_) {
+    std::unique_ptr<InProcessShmFactory> shm(new InProcessShmFactory());
+    service_ = TracingService::CreateInstance(std::move(shm), task_runner);
+    service_->SetSMBScrapingEnabled(true);
+  }
+  return service_.get();
+}
+
+}  // namespace internal
+}  // namespace perfetto
+// gen_amalgamated begin source: gen/protos/perfetto/ipc/consumer_port.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/ipc/consumer_port.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_IPC_CONSUMER_PORT_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_IPC_CONSUMER_PORT_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class SaveTraceForBugreportResponse;
+class SaveTraceForBugreportRequest;
+class QueryCapabilitiesResponse;
+class TracingServiceCapabilities;
+class QueryCapabilitiesRequest;
+class QueryServiceStateResponse;
+class TracingServiceState;
+class TracingServiceState_DataSource;
+class DataSourceDescriptor;
+class TracingServiceState_Producer;
+class QueryServiceStateRequest;
+class ObserveEventsResponse;
+class ObservableEvents;
+class ObservableEvents_DataSourceInstanceStateChange;
+class ObserveEventsRequest;
+class GetTraceStatsResponse;
+class TraceStats;
+class TraceStats_FilterStats;
+class TraceStats_BufferStats;
+class GetTraceStatsRequest;
+class AttachResponse;
+class TraceConfig;
+class TraceConfig_TraceFilter;
+class TraceConfig_IncidentReportConfig;
+class TraceConfig_IncrementalStateConfig;
+class TraceConfig_TriggerConfig;
+class TraceConfig_TriggerConfig_Trigger;
+class TraceConfig_GuardrailOverrides;
+class TraceConfig_StatsdMetadata;
+class TraceConfig_ProducerConfig;
+class TraceConfig_BuiltinDataSource;
+class TraceConfig_DataSource;
+class DataSourceConfig;
+class TestConfig;
+class TestConfig_DummyFields;
+class InterceptorConfig;
+class ChromeConfig;
+class TraceConfig_BufferConfig;
+class AttachRequest;
+class DetachResponse;
+class DetachRequest;
+class FlushResponse;
+class FlushRequest;
+class FreeBuffersResponse;
+class FreeBuffersRequest;
+class ReadBuffersResponse;
+class ReadBuffersResponse_Slice;
+class ReadBuffersRequest;
+class DisableTracingResponse;
+class DisableTracingRequest;
+class ChangeTraceConfigResponse;
+class ChangeTraceConfigRequest;
+class StartTracingResponse;
+class StartTracingRequest;
+class EnableTracingResponse;
+class EnableTracingRequest;
+enum ObservableEvents_Type : int;
+enum ObservableEvents_DataSourceInstanceState : int;
+enum TraceConfig_LockdownModeOperation : int;
+enum TraceConfig_CompressionType : int;
+enum TraceConfig_StatsdLogging : int;
+enum TraceConfig_TriggerConfig_TriggerMode : int;
+enum BuiltinClock : int;
+enum DataSourceConfig_SessionInitiator : int;
+enum ChromeConfig_ClientPriority : int;
+enum TraceConfig_BufferConfig_FillPolicy : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT SaveTraceForBugreportResponse : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kSuccessFieldNumber = 1,
+    kMsgFieldNumber = 2,
+  };
+
+  SaveTraceForBugreportResponse();
+  ~SaveTraceForBugreportResponse() override;
+  SaveTraceForBugreportResponse(SaveTraceForBugreportResponse&&) noexcept;
+  SaveTraceForBugreportResponse& operator=(SaveTraceForBugreportResponse&&);
+  SaveTraceForBugreportResponse(const SaveTraceForBugreportResponse&);
+  SaveTraceForBugreportResponse& operator=(const SaveTraceForBugreportResponse&);
+  bool operator==(const SaveTraceForBugreportResponse&) const;
+  bool operator!=(const SaveTraceForBugreportResponse& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_success() const { return _has_field_[1]; }
+  bool success() const { return success_; }
+  void set_success(bool value) { success_ = value; _has_field_.set(1); }
+
+  bool has_msg() const { return _has_field_[2]; }
+  const std::string& msg() const { return msg_; }
+  void set_msg(const std::string& value) { msg_ = value; _has_field_.set(2); }
+
+ private:
+  bool success_{};
+  std::string msg_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT SaveTraceForBugreportRequest : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+  };
+
+  SaveTraceForBugreportRequest();
+  ~SaveTraceForBugreportRequest() override;
+  SaveTraceForBugreportRequest(SaveTraceForBugreportRequest&&) noexcept;
+  SaveTraceForBugreportRequest& operator=(SaveTraceForBugreportRequest&&);
+  SaveTraceForBugreportRequest(const SaveTraceForBugreportRequest&);
+  SaveTraceForBugreportRequest& operator=(const SaveTraceForBugreportRequest&);
+  bool operator==(const SaveTraceForBugreportRequest&) const;
+  bool operator!=(const SaveTraceForBugreportRequest& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+ private:
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT QueryCapabilitiesResponse : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kCapabilitiesFieldNumber = 1,
+  };
+
+  QueryCapabilitiesResponse();
+  ~QueryCapabilitiesResponse() override;
+  QueryCapabilitiesResponse(QueryCapabilitiesResponse&&) noexcept;
+  QueryCapabilitiesResponse& operator=(QueryCapabilitiesResponse&&);
+  QueryCapabilitiesResponse(const QueryCapabilitiesResponse&);
+  QueryCapabilitiesResponse& operator=(const QueryCapabilitiesResponse&);
+  bool operator==(const QueryCapabilitiesResponse&) const;
+  bool operator!=(const QueryCapabilitiesResponse& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_capabilities() const { return _has_field_[1]; }
+  const TracingServiceCapabilities& capabilities() const { return *capabilities_; }
+  TracingServiceCapabilities* mutable_capabilities() { _has_field_.set(1); return capabilities_.get(); }
+
+ private:
+  ::protozero::CopyablePtr<TracingServiceCapabilities> capabilities_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT QueryCapabilitiesRequest : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+  };
+
+  QueryCapabilitiesRequest();
+  ~QueryCapabilitiesRequest() override;
+  QueryCapabilitiesRequest(QueryCapabilitiesRequest&&) noexcept;
+  QueryCapabilitiesRequest& operator=(QueryCapabilitiesRequest&&);
+  QueryCapabilitiesRequest(const QueryCapabilitiesRequest&);
+  QueryCapabilitiesRequest& operator=(const QueryCapabilitiesRequest&);
+  bool operator==(const QueryCapabilitiesRequest&) const;
+  bool operator!=(const QueryCapabilitiesRequest& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+ private:
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT QueryServiceStateResponse : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kServiceStateFieldNumber = 1,
+  };
+
+  QueryServiceStateResponse();
+  ~QueryServiceStateResponse() override;
+  QueryServiceStateResponse(QueryServiceStateResponse&&) noexcept;
+  QueryServiceStateResponse& operator=(QueryServiceStateResponse&&);
+  QueryServiceStateResponse(const QueryServiceStateResponse&);
+  QueryServiceStateResponse& operator=(const QueryServiceStateResponse&);
+  bool operator==(const QueryServiceStateResponse&) const;
+  bool operator!=(const QueryServiceStateResponse& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_service_state() const { return _has_field_[1]; }
+  const TracingServiceState& service_state() const { return *service_state_; }
+  TracingServiceState* mutable_service_state() { _has_field_.set(1); return service_state_.get(); }
+
+ private:
+  ::protozero::CopyablePtr<TracingServiceState> service_state_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT QueryServiceStateRequest : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+  };
+
+  QueryServiceStateRequest();
+  ~QueryServiceStateRequest() override;
+  QueryServiceStateRequest(QueryServiceStateRequest&&) noexcept;
+  QueryServiceStateRequest& operator=(QueryServiceStateRequest&&);
+  QueryServiceStateRequest(const QueryServiceStateRequest&);
+  QueryServiceStateRequest& operator=(const QueryServiceStateRequest&);
+  bool operator==(const QueryServiceStateRequest&) const;
+  bool operator!=(const QueryServiceStateRequest& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+ private:
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT ObserveEventsResponse : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kEventsFieldNumber = 1,
+  };
+
+  ObserveEventsResponse();
+  ~ObserveEventsResponse() override;
+  ObserveEventsResponse(ObserveEventsResponse&&) noexcept;
+  ObserveEventsResponse& operator=(ObserveEventsResponse&&);
+  ObserveEventsResponse(const ObserveEventsResponse&);
+  ObserveEventsResponse& operator=(const ObserveEventsResponse&);
+  bool operator==(const ObserveEventsResponse&) const;
+  bool operator!=(const ObserveEventsResponse& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_events() const { return _has_field_[1]; }
+  const ObservableEvents& events() const { return *events_; }
+  ObservableEvents* mutable_events() { _has_field_.set(1); return events_.get(); }
+
+ private:
+  ::protozero::CopyablePtr<ObservableEvents> events_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT ObserveEventsRequest : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kEventsToObserveFieldNumber = 1,
+  };
+
+  ObserveEventsRequest();
+  ~ObserveEventsRequest() override;
+  ObserveEventsRequest(ObserveEventsRequest&&) noexcept;
+  ObserveEventsRequest& operator=(ObserveEventsRequest&&);
+  ObserveEventsRequest(const ObserveEventsRequest&);
+  ObserveEventsRequest& operator=(const ObserveEventsRequest&);
+  bool operator==(const ObserveEventsRequest&) const;
+  bool operator!=(const ObserveEventsRequest& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  const std::vector<ObservableEvents_Type>& events_to_observe() const { return events_to_observe_; }
+  std::vector<ObservableEvents_Type>* mutable_events_to_observe() { return &events_to_observe_; }
+  int events_to_observe_size() const { return static_cast<int>(events_to_observe_.size()); }
+  void clear_events_to_observe() { events_to_observe_.clear(); }
+  void add_events_to_observe(ObservableEvents_Type value) { events_to_observe_.emplace_back(value); }
+  ObservableEvents_Type* add_events_to_observe() { events_to_observe_.emplace_back(); return &events_to_observe_.back(); }
+
+ private:
+  std::vector<ObservableEvents_Type> events_to_observe_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT GetTraceStatsResponse : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kTraceStatsFieldNumber = 1,
+  };
+
+  GetTraceStatsResponse();
+  ~GetTraceStatsResponse() override;
+  GetTraceStatsResponse(GetTraceStatsResponse&&) noexcept;
+  GetTraceStatsResponse& operator=(GetTraceStatsResponse&&);
+  GetTraceStatsResponse(const GetTraceStatsResponse&);
+  GetTraceStatsResponse& operator=(const GetTraceStatsResponse&);
+  bool operator==(const GetTraceStatsResponse&) const;
+  bool operator!=(const GetTraceStatsResponse& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_trace_stats() const { return _has_field_[1]; }
+  const TraceStats& trace_stats() const { return *trace_stats_; }
+  TraceStats* mutable_trace_stats() { _has_field_.set(1); return trace_stats_.get(); }
+
+ private:
+  ::protozero::CopyablePtr<TraceStats> trace_stats_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT GetTraceStatsRequest : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+  };
+
+  GetTraceStatsRequest();
+  ~GetTraceStatsRequest() override;
+  GetTraceStatsRequest(GetTraceStatsRequest&&) noexcept;
+  GetTraceStatsRequest& operator=(GetTraceStatsRequest&&);
+  GetTraceStatsRequest(const GetTraceStatsRequest&);
+  GetTraceStatsRequest& operator=(const GetTraceStatsRequest&);
+  bool operator==(const GetTraceStatsRequest&) const;
+  bool operator!=(const GetTraceStatsRequest& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+ private:
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT AttachResponse : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kTraceConfigFieldNumber = 1,
+  };
+
+  AttachResponse();
+  ~AttachResponse() override;
+  AttachResponse(AttachResponse&&) noexcept;
+  AttachResponse& operator=(AttachResponse&&);
+  AttachResponse(const AttachResponse&);
+  AttachResponse& operator=(const AttachResponse&);
+  bool operator==(const AttachResponse&) const;
+  bool operator!=(const AttachResponse& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_trace_config() const { return _has_field_[1]; }
+  const TraceConfig& trace_config() const { return *trace_config_; }
+  TraceConfig* mutable_trace_config() { _has_field_.set(1); return trace_config_.get(); }
+
+ private:
+  ::protozero::CopyablePtr<TraceConfig> trace_config_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT AttachRequest : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kKeyFieldNumber = 1,
+  };
+
+  AttachRequest();
+  ~AttachRequest() override;
+  AttachRequest(AttachRequest&&) noexcept;
+  AttachRequest& operator=(AttachRequest&&);
+  AttachRequest(const AttachRequest&);
+  AttachRequest& operator=(const AttachRequest&);
+  bool operator==(const AttachRequest&) const;
+  bool operator!=(const AttachRequest& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_key() const { return _has_field_[1]; }
+  const std::string& key() const { return key_; }
+  void set_key(const std::string& value) { key_ = value; _has_field_.set(1); }
+
+ private:
+  std::string key_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT DetachResponse : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+  };
+
+  DetachResponse();
+  ~DetachResponse() override;
+  DetachResponse(DetachResponse&&) noexcept;
+  DetachResponse& operator=(DetachResponse&&);
+  DetachResponse(const DetachResponse&);
+  DetachResponse& operator=(const DetachResponse&);
+  bool operator==(const DetachResponse&) const;
+  bool operator!=(const DetachResponse& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+ private:
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT DetachRequest : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kKeyFieldNumber = 1,
+  };
+
+  DetachRequest();
+  ~DetachRequest() override;
+  DetachRequest(DetachRequest&&) noexcept;
+  DetachRequest& operator=(DetachRequest&&);
+  DetachRequest(const DetachRequest&);
+  DetachRequest& operator=(const DetachRequest&);
+  bool operator==(const DetachRequest&) const;
+  bool operator!=(const DetachRequest& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_key() const { return _has_field_[1]; }
+  const std::string& key() const { return key_; }
+  void set_key(const std::string& value) { key_ = value; _has_field_.set(1); }
+
+ private:
+  std::string key_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT FlushResponse : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+  };
+
+  FlushResponse();
+  ~FlushResponse() override;
+  FlushResponse(FlushResponse&&) noexcept;
+  FlushResponse& operator=(FlushResponse&&);
+  FlushResponse(const FlushResponse&);
+  FlushResponse& operator=(const FlushResponse&);
+  bool operator==(const FlushResponse&) const;
+  bool operator!=(const FlushResponse& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+ private:
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT FlushRequest : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kTimeoutMsFieldNumber = 1,
+  };
+
+  FlushRequest();
+  ~FlushRequest() override;
+  FlushRequest(FlushRequest&&) noexcept;
+  FlushRequest& operator=(FlushRequest&&);
+  FlushRequest(const FlushRequest&);
+  FlushRequest& operator=(const FlushRequest&);
+  bool operator==(const FlushRequest&) const;
+  bool operator!=(const FlushRequest& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_timeout_ms() const { return _has_field_[1]; }
+  uint32_t timeout_ms() const { return timeout_ms_; }
+  void set_timeout_ms(uint32_t value) { timeout_ms_ = value; _has_field_.set(1); }
+
+ private:
+  uint32_t timeout_ms_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT FreeBuffersResponse : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+  };
+
+  FreeBuffersResponse();
+  ~FreeBuffersResponse() override;
+  FreeBuffersResponse(FreeBuffersResponse&&) noexcept;
+  FreeBuffersResponse& operator=(FreeBuffersResponse&&);
+  FreeBuffersResponse(const FreeBuffersResponse&);
+  FreeBuffersResponse& operator=(const FreeBuffersResponse&);
+  bool operator==(const FreeBuffersResponse&) const;
+  bool operator!=(const FreeBuffersResponse& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+ private:
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT FreeBuffersRequest : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kBufferIdsFieldNumber = 1,
+  };
+
+  FreeBuffersRequest();
+  ~FreeBuffersRequest() override;
+  FreeBuffersRequest(FreeBuffersRequest&&) noexcept;
+  FreeBuffersRequest& operator=(FreeBuffersRequest&&);
+  FreeBuffersRequest(const FreeBuffersRequest&);
+  FreeBuffersRequest& operator=(const FreeBuffersRequest&);
+  bool operator==(const FreeBuffersRequest&) const;
+  bool operator!=(const FreeBuffersRequest& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  const std::vector<uint32_t>& buffer_ids() const { return buffer_ids_; }
+  std::vector<uint32_t>* mutable_buffer_ids() { return &buffer_ids_; }
+  int buffer_ids_size() const { return static_cast<int>(buffer_ids_.size()); }
+  void clear_buffer_ids() { buffer_ids_.clear(); }
+  void add_buffer_ids(uint32_t value) { buffer_ids_.emplace_back(value); }
+  uint32_t* add_buffer_ids() { buffer_ids_.emplace_back(); return &buffer_ids_.back(); }
+
+ private:
+  std::vector<uint32_t> buffer_ids_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT ReadBuffersResponse : public ::protozero::CppMessageObj {
+ public:
+  using Slice = ReadBuffersResponse_Slice;
+  enum FieldNumbers {
+    kSlicesFieldNumber = 2,
+  };
+
+  ReadBuffersResponse();
+  ~ReadBuffersResponse() override;
+  ReadBuffersResponse(ReadBuffersResponse&&) noexcept;
+  ReadBuffersResponse& operator=(ReadBuffersResponse&&);
+  ReadBuffersResponse(const ReadBuffersResponse&);
+  ReadBuffersResponse& operator=(const ReadBuffersResponse&);
+  bool operator==(const ReadBuffersResponse&) const;
+  bool operator!=(const ReadBuffersResponse& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  const std::vector<ReadBuffersResponse_Slice>& slices() const { return slices_; }
+  std::vector<ReadBuffersResponse_Slice>* mutable_slices() { return &slices_; }
+  int slices_size() const;
+  void clear_slices();
+  ReadBuffersResponse_Slice* add_slices();
+
+ private:
+  std::vector<ReadBuffersResponse_Slice> slices_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT ReadBuffersResponse_Slice : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kDataFieldNumber = 1,
+    kLastSliceForPacketFieldNumber = 2,
+  };
+
+  ReadBuffersResponse_Slice();
+  ~ReadBuffersResponse_Slice() override;
+  ReadBuffersResponse_Slice(ReadBuffersResponse_Slice&&) noexcept;
+  ReadBuffersResponse_Slice& operator=(ReadBuffersResponse_Slice&&);
+  ReadBuffersResponse_Slice(const ReadBuffersResponse_Slice&);
+  ReadBuffersResponse_Slice& operator=(const ReadBuffersResponse_Slice&);
+  bool operator==(const ReadBuffersResponse_Slice&) const;
+  bool operator!=(const ReadBuffersResponse_Slice& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_data() const { return _has_field_[1]; }
+  const std::string& data() const { return data_; }
+  void set_data(const std::string& value) { data_ = value; _has_field_.set(1); }
+  void set_data(const void* p, size_t s) { data_.assign(reinterpret_cast<const char*>(p), s); _has_field_.set(1); }
+
+  bool has_last_slice_for_packet() const { return _has_field_[2]; }
+  bool last_slice_for_packet() const { return last_slice_for_packet_; }
+  void set_last_slice_for_packet(bool value) { last_slice_for_packet_ = value; _has_field_.set(2); }
+
+ private:
+  std::string data_{};
+  bool last_slice_for_packet_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT ReadBuffersRequest : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+  };
+
+  ReadBuffersRequest();
+  ~ReadBuffersRequest() override;
+  ReadBuffersRequest(ReadBuffersRequest&&) noexcept;
+  ReadBuffersRequest& operator=(ReadBuffersRequest&&);
+  ReadBuffersRequest(const ReadBuffersRequest&);
+  ReadBuffersRequest& operator=(const ReadBuffersRequest&);
+  bool operator==(const ReadBuffersRequest&) const;
+  bool operator!=(const ReadBuffersRequest& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+ private:
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT DisableTracingResponse : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+  };
+
+  DisableTracingResponse();
+  ~DisableTracingResponse() override;
+  DisableTracingResponse(DisableTracingResponse&&) noexcept;
+  DisableTracingResponse& operator=(DisableTracingResponse&&);
+  DisableTracingResponse(const DisableTracingResponse&);
+  DisableTracingResponse& operator=(const DisableTracingResponse&);
+  bool operator==(const DisableTracingResponse&) const;
+  bool operator!=(const DisableTracingResponse& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+ private:
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT DisableTracingRequest : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+  };
+
+  DisableTracingRequest();
+  ~DisableTracingRequest() override;
+  DisableTracingRequest(DisableTracingRequest&&) noexcept;
+  DisableTracingRequest& operator=(DisableTracingRequest&&);
+  DisableTracingRequest(const DisableTracingRequest&);
+  DisableTracingRequest& operator=(const DisableTracingRequest&);
+  bool operator==(const DisableTracingRequest&) const;
+  bool operator!=(const DisableTracingRequest& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+ private:
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT ChangeTraceConfigResponse : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+  };
+
+  ChangeTraceConfigResponse();
+  ~ChangeTraceConfigResponse() override;
+  ChangeTraceConfigResponse(ChangeTraceConfigResponse&&) noexcept;
+  ChangeTraceConfigResponse& operator=(ChangeTraceConfigResponse&&);
+  ChangeTraceConfigResponse(const ChangeTraceConfigResponse&);
+  ChangeTraceConfigResponse& operator=(const ChangeTraceConfigResponse&);
+  bool operator==(const ChangeTraceConfigResponse&) const;
+  bool operator!=(const ChangeTraceConfigResponse& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+ private:
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT ChangeTraceConfigRequest : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kTraceConfigFieldNumber = 1,
+  };
+
+  ChangeTraceConfigRequest();
+  ~ChangeTraceConfigRequest() override;
+  ChangeTraceConfigRequest(ChangeTraceConfigRequest&&) noexcept;
+  ChangeTraceConfigRequest& operator=(ChangeTraceConfigRequest&&);
+  ChangeTraceConfigRequest(const ChangeTraceConfigRequest&);
+  ChangeTraceConfigRequest& operator=(const ChangeTraceConfigRequest&);
+  bool operator==(const ChangeTraceConfigRequest&) const;
+  bool operator!=(const ChangeTraceConfigRequest& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_trace_config() const { return _has_field_[1]; }
+  const TraceConfig& trace_config() const { return *trace_config_; }
+  TraceConfig* mutable_trace_config() { _has_field_.set(1); return trace_config_.get(); }
+
+ private:
+  ::protozero::CopyablePtr<TraceConfig> trace_config_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT StartTracingResponse : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+  };
+
+  StartTracingResponse();
+  ~StartTracingResponse() override;
+  StartTracingResponse(StartTracingResponse&&) noexcept;
+  StartTracingResponse& operator=(StartTracingResponse&&);
+  StartTracingResponse(const StartTracingResponse&);
+  StartTracingResponse& operator=(const StartTracingResponse&);
+  bool operator==(const StartTracingResponse&) const;
+  bool operator!=(const StartTracingResponse& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+ private:
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT StartTracingRequest : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+  };
+
+  StartTracingRequest();
+  ~StartTracingRequest() override;
+  StartTracingRequest(StartTracingRequest&&) noexcept;
+  StartTracingRequest& operator=(StartTracingRequest&&);
+  StartTracingRequest(const StartTracingRequest&);
+  StartTracingRequest& operator=(const StartTracingRequest&);
+  bool operator==(const StartTracingRequest&) const;
+  bool operator!=(const StartTracingRequest& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+ private:
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT EnableTracingResponse : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kDisabledFieldNumber = 1,
+    kErrorFieldNumber = 3,
+  };
+
+  EnableTracingResponse();
+  ~EnableTracingResponse() override;
+  EnableTracingResponse(EnableTracingResponse&&) noexcept;
+  EnableTracingResponse& operator=(EnableTracingResponse&&);
+  EnableTracingResponse(const EnableTracingResponse&);
+  EnableTracingResponse& operator=(const EnableTracingResponse&);
+  bool operator==(const EnableTracingResponse&) const;
+  bool operator!=(const EnableTracingResponse& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_disabled() const { return _has_field_[1]; }
+  bool disabled() const { return disabled_; }
+  void set_disabled(bool value) { disabled_ = value; _has_field_.set(1); }
+
+  bool has_error() const { return _has_field_[3]; }
+  const std::string& error() const { return error_; }
+  void set_error(const std::string& value) { error_ = value; _has_field_.set(3); }
+
+ private:
+  bool disabled_{};
+  std::string error_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<4> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT EnableTracingRequest : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kTraceConfigFieldNumber = 1,
+    kAttachNotificationOnlyFieldNumber = 2,
+  };
+
+  EnableTracingRequest();
+  ~EnableTracingRequest() override;
+  EnableTracingRequest(EnableTracingRequest&&) noexcept;
+  EnableTracingRequest& operator=(EnableTracingRequest&&);
+  EnableTracingRequest(const EnableTracingRequest&);
+  EnableTracingRequest& operator=(const EnableTracingRequest&);
+  bool operator==(const EnableTracingRequest&) const;
+  bool operator!=(const EnableTracingRequest& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_trace_config() const { return _has_field_[1]; }
+  const TraceConfig& trace_config() const { return *trace_config_; }
+  TraceConfig* mutable_trace_config() { _has_field_.set(1); return trace_config_.get(); }
+
+  bool has_attach_notification_only() const { return _has_field_[2]; }
+  bool attach_notification_only() const { return attach_notification_only_; }
+  void set_attach_notification_only(bool value) { attach_notification_only_ = value; _has_field_.set(2); }
+
+ private:
+  ::protozero::CopyablePtr<TraceConfig> trace_config_;
+  bool attach_notification_only_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_IPC_CONSUMER_PORT_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/ipc/consumer_port.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/trace_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/common/builtin_clock.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/data_source_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/track_event/track_event_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/test_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/sys_stats/sys_stats_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/common/sys_stats_counters.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/profiling/perf_event_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/common/perf_events.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/profiling/java_hprof_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/profiling/heapprofd_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/process_stats/process_stats_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/power/android_power_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/interceptor_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/interceptors/console_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/inode_file/inode_file_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/gpu/vulkan_memory_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/gpu/gpu_counter_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/ftrace/ftrace_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/chrome/chrome_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/android/packages_list_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_polled_state_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_log_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/common/android_log_constants.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/common/trace_stats.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/common/tracing_service_capabilities.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/common/observable_events.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/common/tracing_service_state.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/common/data_source_descriptor.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/common/track_event_descriptor.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/common/gpu_counter_descriptor.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/common/observable_events.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+SaveTraceForBugreportResponse::SaveTraceForBugreportResponse() = default;
+SaveTraceForBugreportResponse::~SaveTraceForBugreportResponse() = default;
+SaveTraceForBugreportResponse::SaveTraceForBugreportResponse(const SaveTraceForBugreportResponse&) = default;
+SaveTraceForBugreportResponse& SaveTraceForBugreportResponse::operator=(const SaveTraceForBugreportResponse&) = default;
+SaveTraceForBugreportResponse::SaveTraceForBugreportResponse(SaveTraceForBugreportResponse&&) noexcept = default;
+SaveTraceForBugreportResponse& SaveTraceForBugreportResponse::operator=(SaveTraceForBugreportResponse&&) = default;
+
+bool SaveTraceForBugreportResponse::operator==(const SaveTraceForBugreportResponse& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && success_ == other.success_
+   && msg_ == other.msg_;
+}
+
+bool SaveTraceForBugreportResponse::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* success */:
+        field.get(&success_);
+        break;
+      case 2 /* msg */:
+        field.get(&msg_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string SaveTraceForBugreportResponse::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> SaveTraceForBugreportResponse::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void SaveTraceForBugreportResponse::Serialize(::protozero::Message* msg) const {
+  // Field 1: success
+  if (_has_field_[1]) {
+    msg->AppendTinyVarInt(1, success_);
+  }
+
+  // Field 2: msg
+  if (_has_field_[2]) {
+    msg->AppendString(2, msg_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+SaveTraceForBugreportRequest::SaveTraceForBugreportRequest() = default;
+SaveTraceForBugreportRequest::~SaveTraceForBugreportRequest() = default;
+SaveTraceForBugreportRequest::SaveTraceForBugreportRequest(const SaveTraceForBugreportRequest&) = default;
+SaveTraceForBugreportRequest& SaveTraceForBugreportRequest::operator=(const SaveTraceForBugreportRequest&) = default;
+SaveTraceForBugreportRequest::SaveTraceForBugreportRequest(SaveTraceForBugreportRequest&&) noexcept = default;
+SaveTraceForBugreportRequest& SaveTraceForBugreportRequest::operator=(SaveTraceForBugreportRequest&&) = default;
+
+bool SaveTraceForBugreportRequest::operator==(const SaveTraceForBugreportRequest& other) const {
+  return unknown_fields_ == other.unknown_fields_;
+}
+
+bool SaveTraceForBugreportRequest::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string SaveTraceForBugreportRequest::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> SaveTraceForBugreportRequest::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void SaveTraceForBugreportRequest::Serialize(::protozero::Message* msg) const {
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+QueryCapabilitiesResponse::QueryCapabilitiesResponse() = default;
+QueryCapabilitiesResponse::~QueryCapabilitiesResponse() = default;
+QueryCapabilitiesResponse::QueryCapabilitiesResponse(const QueryCapabilitiesResponse&) = default;
+QueryCapabilitiesResponse& QueryCapabilitiesResponse::operator=(const QueryCapabilitiesResponse&) = default;
+QueryCapabilitiesResponse::QueryCapabilitiesResponse(QueryCapabilitiesResponse&&) noexcept = default;
+QueryCapabilitiesResponse& QueryCapabilitiesResponse::operator=(QueryCapabilitiesResponse&&) = default;
+
+bool QueryCapabilitiesResponse::operator==(const QueryCapabilitiesResponse& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && capabilities_ == other.capabilities_;
+}
+
+bool QueryCapabilitiesResponse::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* capabilities */:
+        (*capabilities_).ParseFromArray(field.data(), field.size());
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string QueryCapabilitiesResponse::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> QueryCapabilitiesResponse::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void QueryCapabilitiesResponse::Serialize(::protozero::Message* msg) const {
+  // Field 1: capabilities
+  if (_has_field_[1]) {
+    (*capabilities_).Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+QueryCapabilitiesRequest::QueryCapabilitiesRequest() = default;
+QueryCapabilitiesRequest::~QueryCapabilitiesRequest() = default;
+QueryCapabilitiesRequest::QueryCapabilitiesRequest(const QueryCapabilitiesRequest&) = default;
+QueryCapabilitiesRequest& QueryCapabilitiesRequest::operator=(const QueryCapabilitiesRequest&) = default;
+QueryCapabilitiesRequest::QueryCapabilitiesRequest(QueryCapabilitiesRequest&&) noexcept = default;
+QueryCapabilitiesRequest& QueryCapabilitiesRequest::operator=(QueryCapabilitiesRequest&&) = default;
+
+bool QueryCapabilitiesRequest::operator==(const QueryCapabilitiesRequest& other) const {
+  return unknown_fields_ == other.unknown_fields_;
+}
+
+bool QueryCapabilitiesRequest::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string QueryCapabilitiesRequest::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> QueryCapabilitiesRequest::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void QueryCapabilitiesRequest::Serialize(::protozero::Message* msg) const {
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+QueryServiceStateResponse::QueryServiceStateResponse() = default;
+QueryServiceStateResponse::~QueryServiceStateResponse() = default;
+QueryServiceStateResponse::QueryServiceStateResponse(const QueryServiceStateResponse&) = default;
+QueryServiceStateResponse& QueryServiceStateResponse::operator=(const QueryServiceStateResponse&) = default;
+QueryServiceStateResponse::QueryServiceStateResponse(QueryServiceStateResponse&&) noexcept = default;
+QueryServiceStateResponse& QueryServiceStateResponse::operator=(QueryServiceStateResponse&&) = default;
+
+bool QueryServiceStateResponse::operator==(const QueryServiceStateResponse& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && service_state_ == other.service_state_;
+}
+
+bool QueryServiceStateResponse::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* service_state */:
+        (*service_state_).ParseFromArray(field.data(), field.size());
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string QueryServiceStateResponse::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> QueryServiceStateResponse::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void QueryServiceStateResponse::Serialize(::protozero::Message* msg) const {
+  // Field 1: service_state
+  if (_has_field_[1]) {
+    (*service_state_).Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+QueryServiceStateRequest::QueryServiceStateRequest() = default;
+QueryServiceStateRequest::~QueryServiceStateRequest() = default;
+QueryServiceStateRequest::QueryServiceStateRequest(const QueryServiceStateRequest&) = default;
+QueryServiceStateRequest& QueryServiceStateRequest::operator=(const QueryServiceStateRequest&) = default;
+QueryServiceStateRequest::QueryServiceStateRequest(QueryServiceStateRequest&&) noexcept = default;
+QueryServiceStateRequest& QueryServiceStateRequest::operator=(QueryServiceStateRequest&&) = default;
+
+bool QueryServiceStateRequest::operator==(const QueryServiceStateRequest& other) const {
+  return unknown_fields_ == other.unknown_fields_;
+}
+
+bool QueryServiceStateRequest::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string QueryServiceStateRequest::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> QueryServiceStateRequest::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void QueryServiceStateRequest::Serialize(::protozero::Message* msg) const {
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+ObserveEventsResponse::ObserveEventsResponse() = default;
+ObserveEventsResponse::~ObserveEventsResponse() = default;
+ObserveEventsResponse::ObserveEventsResponse(const ObserveEventsResponse&) = default;
+ObserveEventsResponse& ObserveEventsResponse::operator=(const ObserveEventsResponse&) = default;
+ObserveEventsResponse::ObserveEventsResponse(ObserveEventsResponse&&) noexcept = default;
+ObserveEventsResponse& ObserveEventsResponse::operator=(ObserveEventsResponse&&) = default;
+
+bool ObserveEventsResponse::operator==(const ObserveEventsResponse& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && events_ == other.events_;
+}
+
+bool ObserveEventsResponse::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* events */:
+        (*events_).ParseFromArray(field.data(), field.size());
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string ObserveEventsResponse::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> ObserveEventsResponse::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void ObserveEventsResponse::Serialize(::protozero::Message* msg) const {
+  // Field 1: events
+  if (_has_field_[1]) {
+    (*events_).Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+ObserveEventsRequest::ObserveEventsRequest() = default;
+ObserveEventsRequest::~ObserveEventsRequest() = default;
+ObserveEventsRequest::ObserveEventsRequest(const ObserveEventsRequest&) = default;
+ObserveEventsRequest& ObserveEventsRequest::operator=(const ObserveEventsRequest&) = default;
+ObserveEventsRequest::ObserveEventsRequest(ObserveEventsRequest&&) noexcept = default;
+ObserveEventsRequest& ObserveEventsRequest::operator=(ObserveEventsRequest&&) = default;
+
+bool ObserveEventsRequest::operator==(const ObserveEventsRequest& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && events_to_observe_ == other.events_to_observe_;
+}
+
+bool ObserveEventsRequest::ParseFromArray(const void* raw, size_t size) {
+  events_to_observe_.clear();
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* events_to_observe */:
+        events_to_observe_.emplace_back();
+        field.get(&events_to_observe_.back());
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string ObserveEventsRequest::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> ObserveEventsRequest::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void ObserveEventsRequest::Serialize(::protozero::Message* msg) const {
+  // Field 1: events_to_observe
+  for (auto& it : events_to_observe_) {
+    msg->AppendVarInt(1, it);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+GetTraceStatsResponse::GetTraceStatsResponse() = default;
+GetTraceStatsResponse::~GetTraceStatsResponse() = default;
+GetTraceStatsResponse::GetTraceStatsResponse(const GetTraceStatsResponse&) = default;
+GetTraceStatsResponse& GetTraceStatsResponse::operator=(const GetTraceStatsResponse&) = default;
+GetTraceStatsResponse::GetTraceStatsResponse(GetTraceStatsResponse&&) noexcept = default;
+GetTraceStatsResponse& GetTraceStatsResponse::operator=(GetTraceStatsResponse&&) = default;
+
+bool GetTraceStatsResponse::operator==(const GetTraceStatsResponse& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && trace_stats_ == other.trace_stats_;
+}
+
+bool GetTraceStatsResponse::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* trace_stats */:
+        (*trace_stats_).ParseFromArray(field.data(), field.size());
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string GetTraceStatsResponse::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> GetTraceStatsResponse::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void GetTraceStatsResponse::Serialize(::protozero::Message* msg) const {
+  // Field 1: trace_stats
+  if (_has_field_[1]) {
+    (*trace_stats_).Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+GetTraceStatsRequest::GetTraceStatsRequest() = default;
+GetTraceStatsRequest::~GetTraceStatsRequest() = default;
+GetTraceStatsRequest::GetTraceStatsRequest(const GetTraceStatsRequest&) = default;
+GetTraceStatsRequest& GetTraceStatsRequest::operator=(const GetTraceStatsRequest&) = default;
+GetTraceStatsRequest::GetTraceStatsRequest(GetTraceStatsRequest&&) noexcept = default;
+GetTraceStatsRequest& GetTraceStatsRequest::operator=(GetTraceStatsRequest&&) = default;
+
+bool GetTraceStatsRequest::operator==(const GetTraceStatsRequest& other) const {
+  return unknown_fields_ == other.unknown_fields_;
+}
+
+bool GetTraceStatsRequest::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string GetTraceStatsRequest::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> GetTraceStatsRequest::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void GetTraceStatsRequest::Serialize(::protozero::Message* msg) const {
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+AttachResponse::AttachResponse() = default;
+AttachResponse::~AttachResponse() = default;
+AttachResponse::AttachResponse(const AttachResponse&) = default;
+AttachResponse& AttachResponse::operator=(const AttachResponse&) = default;
+AttachResponse::AttachResponse(AttachResponse&&) noexcept = default;
+AttachResponse& AttachResponse::operator=(AttachResponse&&) = default;
+
+bool AttachResponse::operator==(const AttachResponse& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && trace_config_ == other.trace_config_;
+}
+
+bool AttachResponse::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* trace_config */:
+        (*trace_config_).ParseFromArray(field.data(), field.size());
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string AttachResponse::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> AttachResponse::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void AttachResponse::Serialize(::protozero::Message* msg) const {
+  // Field 1: trace_config
+  if (_has_field_[1]) {
+    (*trace_config_).Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+AttachRequest::AttachRequest() = default;
+AttachRequest::~AttachRequest() = default;
+AttachRequest::AttachRequest(const AttachRequest&) = default;
+AttachRequest& AttachRequest::operator=(const AttachRequest&) = default;
+AttachRequest::AttachRequest(AttachRequest&&) noexcept = default;
+AttachRequest& AttachRequest::operator=(AttachRequest&&) = default;
+
+bool AttachRequest::operator==(const AttachRequest& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && key_ == other.key_;
+}
+
+bool AttachRequest::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* key */:
+        field.get(&key_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string AttachRequest::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> AttachRequest::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void AttachRequest::Serialize(::protozero::Message* msg) const {
+  // Field 1: key
+  if (_has_field_[1]) {
+    msg->AppendString(1, key_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+DetachResponse::DetachResponse() = default;
+DetachResponse::~DetachResponse() = default;
+DetachResponse::DetachResponse(const DetachResponse&) = default;
+DetachResponse& DetachResponse::operator=(const DetachResponse&) = default;
+DetachResponse::DetachResponse(DetachResponse&&) noexcept = default;
+DetachResponse& DetachResponse::operator=(DetachResponse&&) = default;
+
+bool DetachResponse::operator==(const DetachResponse& other) const {
+  return unknown_fields_ == other.unknown_fields_;
+}
+
+bool DetachResponse::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string DetachResponse::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> DetachResponse::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void DetachResponse::Serialize(::protozero::Message* msg) const {
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+DetachRequest::DetachRequest() = default;
+DetachRequest::~DetachRequest() = default;
+DetachRequest::DetachRequest(const DetachRequest&) = default;
+DetachRequest& DetachRequest::operator=(const DetachRequest&) = default;
+DetachRequest::DetachRequest(DetachRequest&&) noexcept = default;
+DetachRequest& DetachRequest::operator=(DetachRequest&&) = default;
+
+bool DetachRequest::operator==(const DetachRequest& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && key_ == other.key_;
+}
+
+bool DetachRequest::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* key */:
+        field.get(&key_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string DetachRequest::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> DetachRequest::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void DetachRequest::Serialize(::protozero::Message* msg) const {
+  // Field 1: key
+  if (_has_field_[1]) {
+    msg->AppendString(1, key_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+FlushResponse::FlushResponse() = default;
+FlushResponse::~FlushResponse() = default;
+FlushResponse::FlushResponse(const FlushResponse&) = default;
+FlushResponse& FlushResponse::operator=(const FlushResponse&) = default;
+FlushResponse::FlushResponse(FlushResponse&&) noexcept = default;
+FlushResponse& FlushResponse::operator=(FlushResponse&&) = default;
+
+bool FlushResponse::operator==(const FlushResponse& other) const {
+  return unknown_fields_ == other.unknown_fields_;
+}
+
+bool FlushResponse::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string FlushResponse::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> FlushResponse::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void FlushResponse::Serialize(::protozero::Message* msg) const {
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+FlushRequest::FlushRequest() = default;
+FlushRequest::~FlushRequest() = default;
+FlushRequest::FlushRequest(const FlushRequest&) = default;
+FlushRequest& FlushRequest::operator=(const FlushRequest&) = default;
+FlushRequest::FlushRequest(FlushRequest&&) noexcept = default;
+FlushRequest& FlushRequest::operator=(FlushRequest&&) = default;
+
+bool FlushRequest::operator==(const FlushRequest& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && timeout_ms_ == other.timeout_ms_;
+}
+
+bool FlushRequest::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* timeout_ms */:
+        field.get(&timeout_ms_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string FlushRequest::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> FlushRequest::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void FlushRequest::Serialize(::protozero::Message* msg) const {
+  // Field 1: timeout_ms
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, timeout_ms_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+FreeBuffersResponse::FreeBuffersResponse() = default;
+FreeBuffersResponse::~FreeBuffersResponse() = default;
+FreeBuffersResponse::FreeBuffersResponse(const FreeBuffersResponse&) = default;
+FreeBuffersResponse& FreeBuffersResponse::operator=(const FreeBuffersResponse&) = default;
+FreeBuffersResponse::FreeBuffersResponse(FreeBuffersResponse&&) noexcept = default;
+FreeBuffersResponse& FreeBuffersResponse::operator=(FreeBuffersResponse&&) = default;
+
+bool FreeBuffersResponse::operator==(const FreeBuffersResponse& other) const {
+  return unknown_fields_ == other.unknown_fields_;
+}
+
+bool FreeBuffersResponse::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string FreeBuffersResponse::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> FreeBuffersResponse::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void FreeBuffersResponse::Serialize(::protozero::Message* msg) const {
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+FreeBuffersRequest::FreeBuffersRequest() = default;
+FreeBuffersRequest::~FreeBuffersRequest() = default;
+FreeBuffersRequest::FreeBuffersRequest(const FreeBuffersRequest&) = default;
+FreeBuffersRequest& FreeBuffersRequest::operator=(const FreeBuffersRequest&) = default;
+FreeBuffersRequest::FreeBuffersRequest(FreeBuffersRequest&&) noexcept = default;
+FreeBuffersRequest& FreeBuffersRequest::operator=(FreeBuffersRequest&&) = default;
+
+bool FreeBuffersRequest::operator==(const FreeBuffersRequest& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && buffer_ids_ == other.buffer_ids_;
+}
+
+bool FreeBuffersRequest::ParseFromArray(const void* raw, size_t size) {
+  buffer_ids_.clear();
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* buffer_ids */:
+        buffer_ids_.emplace_back();
+        field.get(&buffer_ids_.back());
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string FreeBuffersRequest::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> FreeBuffersRequest::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void FreeBuffersRequest::Serialize(::protozero::Message* msg) const {
+  // Field 1: buffer_ids
+  for (auto& it : buffer_ids_) {
+    msg->AppendVarInt(1, it);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+ReadBuffersResponse::ReadBuffersResponse() = default;
+ReadBuffersResponse::~ReadBuffersResponse() = default;
+ReadBuffersResponse::ReadBuffersResponse(const ReadBuffersResponse&) = default;
+ReadBuffersResponse& ReadBuffersResponse::operator=(const ReadBuffersResponse&) = default;
+ReadBuffersResponse::ReadBuffersResponse(ReadBuffersResponse&&) noexcept = default;
+ReadBuffersResponse& ReadBuffersResponse::operator=(ReadBuffersResponse&&) = default;
+
+bool ReadBuffersResponse::operator==(const ReadBuffersResponse& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && slices_ == other.slices_;
+}
+
+int ReadBuffersResponse::slices_size() const { return static_cast<int>(slices_.size()); }
+void ReadBuffersResponse::clear_slices() { slices_.clear(); }
+ReadBuffersResponse_Slice* ReadBuffersResponse::add_slices() { slices_.emplace_back(); return &slices_.back(); }
+bool ReadBuffersResponse::ParseFromArray(const void* raw, size_t size) {
+  slices_.clear();
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 2 /* slices */:
+        slices_.emplace_back();
+        slices_.back().ParseFromArray(field.data(), field.size());
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string ReadBuffersResponse::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> ReadBuffersResponse::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void ReadBuffersResponse::Serialize(::protozero::Message* msg) const {
+  // Field 2: slices
+  for (auto& it : slices_) {
+    it.Serialize(msg->BeginNestedMessage<::protozero::Message>(2));
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+ReadBuffersResponse_Slice::ReadBuffersResponse_Slice() = default;
+ReadBuffersResponse_Slice::~ReadBuffersResponse_Slice() = default;
+ReadBuffersResponse_Slice::ReadBuffersResponse_Slice(const ReadBuffersResponse_Slice&) = default;
+ReadBuffersResponse_Slice& ReadBuffersResponse_Slice::operator=(const ReadBuffersResponse_Slice&) = default;
+ReadBuffersResponse_Slice::ReadBuffersResponse_Slice(ReadBuffersResponse_Slice&&) noexcept = default;
+ReadBuffersResponse_Slice& ReadBuffersResponse_Slice::operator=(ReadBuffersResponse_Slice&&) = default;
+
+bool ReadBuffersResponse_Slice::operator==(const ReadBuffersResponse_Slice& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && data_ == other.data_
+   && last_slice_for_packet_ == other.last_slice_for_packet_;
+}
+
+bool ReadBuffersResponse_Slice::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* data */:
+        field.get(&data_);
+        break;
+      case 2 /* last_slice_for_packet */:
+        field.get(&last_slice_for_packet_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string ReadBuffersResponse_Slice::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> ReadBuffersResponse_Slice::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void ReadBuffersResponse_Slice::Serialize(::protozero::Message* msg) const {
+  // Field 1: data
+  if (_has_field_[1]) {
+    msg->AppendString(1, data_);
+  }
+
+  // Field 2: last_slice_for_packet
+  if (_has_field_[2]) {
+    msg->AppendTinyVarInt(2, last_slice_for_packet_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+ReadBuffersRequest::ReadBuffersRequest() = default;
+ReadBuffersRequest::~ReadBuffersRequest() = default;
+ReadBuffersRequest::ReadBuffersRequest(const ReadBuffersRequest&) = default;
+ReadBuffersRequest& ReadBuffersRequest::operator=(const ReadBuffersRequest&) = default;
+ReadBuffersRequest::ReadBuffersRequest(ReadBuffersRequest&&) noexcept = default;
+ReadBuffersRequest& ReadBuffersRequest::operator=(ReadBuffersRequest&&) = default;
+
+bool ReadBuffersRequest::operator==(const ReadBuffersRequest& other) const {
+  return unknown_fields_ == other.unknown_fields_;
+}
+
+bool ReadBuffersRequest::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string ReadBuffersRequest::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> ReadBuffersRequest::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void ReadBuffersRequest::Serialize(::protozero::Message* msg) const {
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+DisableTracingResponse::DisableTracingResponse() = default;
+DisableTracingResponse::~DisableTracingResponse() = default;
+DisableTracingResponse::DisableTracingResponse(const DisableTracingResponse&) = default;
+DisableTracingResponse& DisableTracingResponse::operator=(const DisableTracingResponse&) = default;
+DisableTracingResponse::DisableTracingResponse(DisableTracingResponse&&) noexcept = default;
+DisableTracingResponse& DisableTracingResponse::operator=(DisableTracingResponse&&) = default;
+
+bool DisableTracingResponse::operator==(const DisableTracingResponse& other) const {
+  return unknown_fields_ == other.unknown_fields_;
+}
+
+bool DisableTracingResponse::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string DisableTracingResponse::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> DisableTracingResponse::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void DisableTracingResponse::Serialize(::protozero::Message* msg) const {
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+DisableTracingRequest::DisableTracingRequest() = default;
+DisableTracingRequest::~DisableTracingRequest() = default;
+DisableTracingRequest::DisableTracingRequest(const DisableTracingRequest&) = default;
+DisableTracingRequest& DisableTracingRequest::operator=(const DisableTracingRequest&) = default;
+DisableTracingRequest::DisableTracingRequest(DisableTracingRequest&&) noexcept = default;
+DisableTracingRequest& DisableTracingRequest::operator=(DisableTracingRequest&&) = default;
+
+bool DisableTracingRequest::operator==(const DisableTracingRequest& other) const {
+  return unknown_fields_ == other.unknown_fields_;
+}
+
+bool DisableTracingRequest::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string DisableTracingRequest::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> DisableTracingRequest::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void DisableTracingRequest::Serialize(::protozero::Message* msg) const {
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+ChangeTraceConfigResponse::ChangeTraceConfigResponse() = default;
+ChangeTraceConfigResponse::~ChangeTraceConfigResponse() = default;
+ChangeTraceConfigResponse::ChangeTraceConfigResponse(const ChangeTraceConfigResponse&) = default;
+ChangeTraceConfigResponse& ChangeTraceConfigResponse::operator=(const ChangeTraceConfigResponse&) = default;
+ChangeTraceConfigResponse::ChangeTraceConfigResponse(ChangeTraceConfigResponse&&) noexcept = default;
+ChangeTraceConfigResponse& ChangeTraceConfigResponse::operator=(ChangeTraceConfigResponse&&) = default;
+
+bool ChangeTraceConfigResponse::operator==(const ChangeTraceConfigResponse& other) const {
+  return unknown_fields_ == other.unknown_fields_;
+}
+
+bool ChangeTraceConfigResponse::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string ChangeTraceConfigResponse::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> ChangeTraceConfigResponse::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void ChangeTraceConfigResponse::Serialize(::protozero::Message* msg) const {
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+ChangeTraceConfigRequest::ChangeTraceConfigRequest() = default;
+ChangeTraceConfigRequest::~ChangeTraceConfigRequest() = default;
+ChangeTraceConfigRequest::ChangeTraceConfigRequest(const ChangeTraceConfigRequest&) = default;
+ChangeTraceConfigRequest& ChangeTraceConfigRequest::operator=(const ChangeTraceConfigRequest&) = default;
+ChangeTraceConfigRequest::ChangeTraceConfigRequest(ChangeTraceConfigRequest&&) noexcept = default;
+ChangeTraceConfigRequest& ChangeTraceConfigRequest::operator=(ChangeTraceConfigRequest&&) = default;
+
+bool ChangeTraceConfigRequest::operator==(const ChangeTraceConfigRequest& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && trace_config_ == other.trace_config_;
+}
+
+bool ChangeTraceConfigRequest::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* trace_config */:
+        (*trace_config_).ParseFromArray(field.data(), field.size());
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string ChangeTraceConfigRequest::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> ChangeTraceConfigRequest::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void ChangeTraceConfigRequest::Serialize(::protozero::Message* msg) const {
+  // Field 1: trace_config
+  if (_has_field_[1]) {
+    (*trace_config_).Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+StartTracingResponse::StartTracingResponse() = default;
+StartTracingResponse::~StartTracingResponse() = default;
+StartTracingResponse::StartTracingResponse(const StartTracingResponse&) = default;
+StartTracingResponse& StartTracingResponse::operator=(const StartTracingResponse&) = default;
+StartTracingResponse::StartTracingResponse(StartTracingResponse&&) noexcept = default;
+StartTracingResponse& StartTracingResponse::operator=(StartTracingResponse&&) = default;
+
+bool StartTracingResponse::operator==(const StartTracingResponse& other) const {
+  return unknown_fields_ == other.unknown_fields_;
+}
+
+bool StartTracingResponse::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string StartTracingResponse::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> StartTracingResponse::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void StartTracingResponse::Serialize(::protozero::Message* msg) const {
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+StartTracingRequest::StartTracingRequest() = default;
+StartTracingRequest::~StartTracingRequest() = default;
+StartTracingRequest::StartTracingRequest(const StartTracingRequest&) = default;
+StartTracingRequest& StartTracingRequest::operator=(const StartTracingRequest&) = default;
+StartTracingRequest::StartTracingRequest(StartTracingRequest&&) noexcept = default;
+StartTracingRequest& StartTracingRequest::operator=(StartTracingRequest&&) = default;
+
+bool StartTracingRequest::operator==(const StartTracingRequest& other) const {
+  return unknown_fields_ == other.unknown_fields_;
+}
+
+bool StartTracingRequest::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string StartTracingRequest::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> StartTracingRequest::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void StartTracingRequest::Serialize(::protozero::Message* msg) const {
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+EnableTracingResponse::EnableTracingResponse() = default;
+EnableTracingResponse::~EnableTracingResponse() = default;
+EnableTracingResponse::EnableTracingResponse(const EnableTracingResponse&) = default;
+EnableTracingResponse& EnableTracingResponse::operator=(const EnableTracingResponse&) = default;
+EnableTracingResponse::EnableTracingResponse(EnableTracingResponse&&) noexcept = default;
+EnableTracingResponse& EnableTracingResponse::operator=(EnableTracingResponse&&) = default;
+
+bool EnableTracingResponse::operator==(const EnableTracingResponse& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && disabled_ == other.disabled_
+   && error_ == other.error_;
+}
+
+bool EnableTracingResponse::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* disabled */:
+        field.get(&disabled_);
+        break;
+      case 3 /* error */:
+        field.get(&error_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string EnableTracingResponse::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> EnableTracingResponse::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void EnableTracingResponse::Serialize(::protozero::Message* msg) const {
+  // Field 1: disabled
+  if (_has_field_[1]) {
+    msg->AppendTinyVarInt(1, disabled_);
+  }
+
+  // Field 3: error
+  if (_has_field_[3]) {
+    msg->AppendString(3, error_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+EnableTracingRequest::EnableTracingRequest() = default;
+EnableTracingRequest::~EnableTracingRequest() = default;
+EnableTracingRequest::EnableTracingRequest(const EnableTracingRequest&) = default;
+EnableTracingRequest& EnableTracingRequest::operator=(const EnableTracingRequest&) = default;
+EnableTracingRequest::EnableTracingRequest(EnableTracingRequest&&) noexcept = default;
+EnableTracingRequest& EnableTracingRequest::operator=(EnableTracingRequest&&) = default;
+
+bool EnableTracingRequest::operator==(const EnableTracingRequest& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && trace_config_ == other.trace_config_
+   && attach_notification_only_ == other.attach_notification_only_;
+}
+
+bool EnableTracingRequest::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* trace_config */:
+        (*trace_config_).ParseFromArray(field.data(), field.size());
+        break;
+      case 2 /* attach_notification_only */:
+        field.get(&attach_notification_only_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string EnableTracingRequest::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> EnableTracingRequest::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void EnableTracingRequest::Serialize(::protozero::Message* msg) const {
+  // Field 1: trace_config
+  if (_has_field_[1]) {
+    (*trace_config_).Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
+  }
+
+  // Field 2: attach_notification_only
+  if (_has_field_[2]) {
+    msg->AppendTinyVarInt(2, attach_notification_only_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/ipc/producer_port.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/ipc/producer_port.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_IPC_PRODUCER_PORT_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_IPC_PRODUCER_PORT_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class SyncResponse;
+class SyncRequest;
+class GetAsyncCommandResponse;
+class GetAsyncCommandResponse_ClearIncrementalState;
+class GetAsyncCommandResponse_Flush;
+class GetAsyncCommandResponse_StopDataSource;
+class GetAsyncCommandResponse_StartDataSource;
+class DataSourceConfig;
+class TestConfig;
+class TestConfig_DummyFields;
+class InterceptorConfig;
+class ChromeConfig;
+class GetAsyncCommandResponse_SetupDataSource;
+class GetAsyncCommandResponse_SetupTracing;
+class GetAsyncCommandRequest;
+class ActivateTriggersResponse;
+class ActivateTriggersRequest;
+class NotifyDataSourceStoppedResponse;
+class NotifyDataSourceStoppedRequest;
+class NotifyDataSourceStartedResponse;
+class NotifyDataSourceStartedRequest;
+class CommitDataResponse;
+class UnregisterTraceWriterResponse;
+class UnregisterTraceWriterRequest;
+class RegisterTraceWriterResponse;
+class RegisterTraceWriterRequest;
+class UnregisterDataSourceResponse;
+class UnregisterDataSourceRequest;
+class RegisterDataSourceResponse;
+class RegisterDataSourceRequest;
+class DataSourceDescriptor;
+class InitializeConnectionResponse;
+class InitializeConnectionRequest;
+enum DataSourceConfig_SessionInitiator : int;
+enum ChromeConfig_ClientPriority : int;
+enum InitializeConnectionRequest_ProducerSMBScrapingMode : int;
+enum InitializeConnectionRequest_ProducerBuildFlags : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum InitializeConnectionRequest_ProducerSMBScrapingMode : int {
+  InitializeConnectionRequest_ProducerSMBScrapingMode_SMB_SCRAPING_UNSPECIFIED = 0,
+  InitializeConnectionRequest_ProducerSMBScrapingMode_SMB_SCRAPING_ENABLED = 1,
+  InitializeConnectionRequest_ProducerSMBScrapingMode_SMB_SCRAPING_DISABLED = 2,
+};
+enum InitializeConnectionRequest_ProducerBuildFlags : int {
+  InitializeConnectionRequest_ProducerBuildFlags_BUILD_FLAGS_UNSPECIFIED = 0,
+  InitializeConnectionRequest_ProducerBuildFlags_BUILD_FLAGS_DCHECKS_ON = 1,
+  InitializeConnectionRequest_ProducerBuildFlags_BUILD_FLAGS_DCHECKS_OFF = 2,
+};
+
+class PERFETTO_EXPORT SyncResponse : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+  };
+
+  SyncResponse();
+  ~SyncResponse() override;
+  SyncResponse(SyncResponse&&) noexcept;
+  SyncResponse& operator=(SyncResponse&&);
+  SyncResponse(const SyncResponse&);
+  SyncResponse& operator=(const SyncResponse&);
+  bool operator==(const SyncResponse&) const;
+  bool operator!=(const SyncResponse& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+ private:
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT SyncRequest : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+  };
+
+  SyncRequest();
+  ~SyncRequest() override;
+  SyncRequest(SyncRequest&&) noexcept;
+  SyncRequest& operator=(SyncRequest&&);
+  SyncRequest(const SyncRequest&);
+  SyncRequest& operator=(const SyncRequest&);
+  bool operator==(const SyncRequest&) const;
+  bool operator!=(const SyncRequest& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+ private:
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT GetAsyncCommandResponse : public ::protozero::CppMessageObj {
+ public:
+  using SetupDataSource = GetAsyncCommandResponse_SetupDataSource;
+  using StartDataSource = GetAsyncCommandResponse_StartDataSource;
+  using StopDataSource = GetAsyncCommandResponse_StopDataSource;
+  using SetupTracing = GetAsyncCommandResponse_SetupTracing;
+  using Flush = GetAsyncCommandResponse_Flush;
+  using ClearIncrementalState = GetAsyncCommandResponse_ClearIncrementalState;
+  enum FieldNumbers {
+    kSetupTracingFieldNumber = 3,
+    kSetupDataSourceFieldNumber = 6,
+    kStartDataSourceFieldNumber = 1,
+    kStopDataSourceFieldNumber = 2,
+    kFlushFieldNumber = 5,
+    kClearIncrementalStateFieldNumber = 7,
+  };
+
+  GetAsyncCommandResponse();
+  ~GetAsyncCommandResponse() override;
+  GetAsyncCommandResponse(GetAsyncCommandResponse&&) noexcept;
+  GetAsyncCommandResponse& operator=(GetAsyncCommandResponse&&);
+  GetAsyncCommandResponse(const GetAsyncCommandResponse&);
+  GetAsyncCommandResponse& operator=(const GetAsyncCommandResponse&);
+  bool operator==(const GetAsyncCommandResponse&) const;
+  bool operator!=(const GetAsyncCommandResponse& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_setup_tracing() const { return _has_field_[3]; }
+  const GetAsyncCommandResponse_SetupTracing& setup_tracing() const { return *setup_tracing_; }
+  GetAsyncCommandResponse_SetupTracing* mutable_setup_tracing() { _has_field_.set(3); return setup_tracing_.get(); }
+
+  bool has_setup_data_source() const { return _has_field_[6]; }
+  const GetAsyncCommandResponse_SetupDataSource& setup_data_source() const { return *setup_data_source_; }
+  GetAsyncCommandResponse_SetupDataSource* mutable_setup_data_source() { _has_field_.set(6); return setup_data_source_.get(); }
+
+  bool has_start_data_source() const { return _has_field_[1]; }
+  const GetAsyncCommandResponse_StartDataSource& start_data_source() const { return *start_data_source_; }
+  GetAsyncCommandResponse_StartDataSource* mutable_start_data_source() { _has_field_.set(1); return start_data_source_.get(); }
+
+  bool has_stop_data_source() const { return _has_field_[2]; }
+  const GetAsyncCommandResponse_StopDataSource& stop_data_source() const { return *stop_data_source_; }
+  GetAsyncCommandResponse_StopDataSource* mutable_stop_data_source() { _has_field_.set(2); return stop_data_source_.get(); }
+
+  bool has_flush() const { return _has_field_[5]; }
+  const GetAsyncCommandResponse_Flush& flush() const { return *flush_; }
+  GetAsyncCommandResponse_Flush* mutable_flush() { _has_field_.set(5); return flush_.get(); }
+
+  bool has_clear_incremental_state() const { return _has_field_[7]; }
+  const GetAsyncCommandResponse_ClearIncrementalState& clear_incremental_state() const { return *clear_incremental_state_; }
+  GetAsyncCommandResponse_ClearIncrementalState* mutable_clear_incremental_state() { _has_field_.set(7); return clear_incremental_state_.get(); }
+
+ private:
+  ::protozero::CopyablePtr<GetAsyncCommandResponse_SetupTracing> setup_tracing_;
+  ::protozero::CopyablePtr<GetAsyncCommandResponse_SetupDataSource> setup_data_source_;
+  ::protozero::CopyablePtr<GetAsyncCommandResponse_StartDataSource> start_data_source_;
+  ::protozero::CopyablePtr<GetAsyncCommandResponse_StopDataSource> stop_data_source_;
+  ::protozero::CopyablePtr<GetAsyncCommandResponse_Flush> flush_;
+  ::protozero::CopyablePtr<GetAsyncCommandResponse_ClearIncrementalState> clear_incremental_state_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<8> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT GetAsyncCommandResponse_ClearIncrementalState : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kDataSourceIdsFieldNumber = 1,
+  };
+
+  GetAsyncCommandResponse_ClearIncrementalState();
+  ~GetAsyncCommandResponse_ClearIncrementalState() override;
+  GetAsyncCommandResponse_ClearIncrementalState(GetAsyncCommandResponse_ClearIncrementalState&&) noexcept;
+  GetAsyncCommandResponse_ClearIncrementalState& operator=(GetAsyncCommandResponse_ClearIncrementalState&&);
+  GetAsyncCommandResponse_ClearIncrementalState(const GetAsyncCommandResponse_ClearIncrementalState&);
+  GetAsyncCommandResponse_ClearIncrementalState& operator=(const GetAsyncCommandResponse_ClearIncrementalState&);
+  bool operator==(const GetAsyncCommandResponse_ClearIncrementalState&) const;
+  bool operator!=(const GetAsyncCommandResponse_ClearIncrementalState& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  const std::vector<uint64_t>& data_source_ids() const { return data_source_ids_; }
+  std::vector<uint64_t>* mutable_data_source_ids() { return &data_source_ids_; }
+  int data_source_ids_size() const { return static_cast<int>(data_source_ids_.size()); }
+  void clear_data_source_ids() { data_source_ids_.clear(); }
+  void add_data_source_ids(uint64_t value) { data_source_ids_.emplace_back(value); }
+  uint64_t* add_data_source_ids() { data_source_ids_.emplace_back(); return &data_source_ids_.back(); }
+
+ private:
+  std::vector<uint64_t> data_source_ids_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT GetAsyncCommandResponse_Flush : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kDataSourceIdsFieldNumber = 1,
+    kRequestIdFieldNumber = 2,
+  };
+
+  GetAsyncCommandResponse_Flush();
+  ~GetAsyncCommandResponse_Flush() override;
+  GetAsyncCommandResponse_Flush(GetAsyncCommandResponse_Flush&&) noexcept;
+  GetAsyncCommandResponse_Flush& operator=(GetAsyncCommandResponse_Flush&&);
+  GetAsyncCommandResponse_Flush(const GetAsyncCommandResponse_Flush&);
+  GetAsyncCommandResponse_Flush& operator=(const GetAsyncCommandResponse_Flush&);
+  bool operator==(const GetAsyncCommandResponse_Flush&) const;
+  bool operator!=(const GetAsyncCommandResponse_Flush& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  const std::vector<uint64_t>& data_source_ids() const { return data_source_ids_; }
+  std::vector<uint64_t>* mutable_data_source_ids() { return &data_source_ids_; }
+  int data_source_ids_size() const { return static_cast<int>(data_source_ids_.size()); }
+  void clear_data_source_ids() { data_source_ids_.clear(); }
+  void add_data_source_ids(uint64_t value) { data_source_ids_.emplace_back(value); }
+  uint64_t* add_data_source_ids() { data_source_ids_.emplace_back(); return &data_source_ids_.back(); }
+
+  bool has_request_id() const { return _has_field_[2]; }
+  uint64_t request_id() const { return request_id_; }
+  void set_request_id(uint64_t value) { request_id_ = value; _has_field_.set(2); }
+
+ private:
+  std::vector<uint64_t> data_source_ids_;
+  uint64_t request_id_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT GetAsyncCommandResponse_StopDataSource : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kInstanceIdFieldNumber = 1,
+  };
+
+  GetAsyncCommandResponse_StopDataSource();
+  ~GetAsyncCommandResponse_StopDataSource() override;
+  GetAsyncCommandResponse_StopDataSource(GetAsyncCommandResponse_StopDataSource&&) noexcept;
+  GetAsyncCommandResponse_StopDataSource& operator=(GetAsyncCommandResponse_StopDataSource&&);
+  GetAsyncCommandResponse_StopDataSource(const GetAsyncCommandResponse_StopDataSource&);
+  GetAsyncCommandResponse_StopDataSource& operator=(const GetAsyncCommandResponse_StopDataSource&);
+  bool operator==(const GetAsyncCommandResponse_StopDataSource&) const;
+  bool operator!=(const GetAsyncCommandResponse_StopDataSource& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_instance_id() const { return _has_field_[1]; }
+  uint64_t instance_id() const { return instance_id_; }
+  void set_instance_id(uint64_t value) { instance_id_ = value; _has_field_.set(1); }
+
+ private:
+  uint64_t instance_id_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT GetAsyncCommandResponse_StartDataSource : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kNewInstanceIdFieldNumber = 1,
+    kConfigFieldNumber = 2,
+  };
+
+  GetAsyncCommandResponse_StartDataSource();
+  ~GetAsyncCommandResponse_StartDataSource() override;
+  GetAsyncCommandResponse_StartDataSource(GetAsyncCommandResponse_StartDataSource&&) noexcept;
+  GetAsyncCommandResponse_StartDataSource& operator=(GetAsyncCommandResponse_StartDataSource&&);
+  GetAsyncCommandResponse_StartDataSource(const GetAsyncCommandResponse_StartDataSource&);
+  GetAsyncCommandResponse_StartDataSource& operator=(const GetAsyncCommandResponse_StartDataSource&);
+  bool operator==(const GetAsyncCommandResponse_StartDataSource&) const;
+  bool operator!=(const GetAsyncCommandResponse_StartDataSource& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_new_instance_id() const { return _has_field_[1]; }
+  uint64_t new_instance_id() const { return new_instance_id_; }
+  void set_new_instance_id(uint64_t value) { new_instance_id_ = value; _has_field_.set(1); }
+
+  bool has_config() const { return _has_field_[2]; }
+  const DataSourceConfig& config() const { return *config_; }
+  DataSourceConfig* mutable_config() { _has_field_.set(2); return config_.get(); }
+
+ private:
+  uint64_t new_instance_id_{};
+  ::protozero::CopyablePtr<DataSourceConfig> config_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT GetAsyncCommandResponse_SetupDataSource : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kNewInstanceIdFieldNumber = 1,
+    kConfigFieldNumber = 2,
+  };
+
+  GetAsyncCommandResponse_SetupDataSource();
+  ~GetAsyncCommandResponse_SetupDataSource() override;
+  GetAsyncCommandResponse_SetupDataSource(GetAsyncCommandResponse_SetupDataSource&&) noexcept;
+  GetAsyncCommandResponse_SetupDataSource& operator=(GetAsyncCommandResponse_SetupDataSource&&);
+  GetAsyncCommandResponse_SetupDataSource(const GetAsyncCommandResponse_SetupDataSource&);
+  GetAsyncCommandResponse_SetupDataSource& operator=(const GetAsyncCommandResponse_SetupDataSource&);
+  bool operator==(const GetAsyncCommandResponse_SetupDataSource&) const;
+  bool operator!=(const GetAsyncCommandResponse_SetupDataSource& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_new_instance_id() const { return _has_field_[1]; }
+  uint64_t new_instance_id() const { return new_instance_id_; }
+  void set_new_instance_id(uint64_t value) { new_instance_id_ = value; _has_field_.set(1); }
+
+  bool has_config() const { return _has_field_[2]; }
+  const DataSourceConfig& config() const { return *config_; }
+  DataSourceConfig* mutable_config() { _has_field_.set(2); return config_.get(); }
+
+ private:
+  uint64_t new_instance_id_{};
+  ::protozero::CopyablePtr<DataSourceConfig> config_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT GetAsyncCommandResponse_SetupTracing : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kSharedBufferPageSizeKbFieldNumber = 1,
+  };
+
+  GetAsyncCommandResponse_SetupTracing();
+  ~GetAsyncCommandResponse_SetupTracing() override;
+  GetAsyncCommandResponse_SetupTracing(GetAsyncCommandResponse_SetupTracing&&) noexcept;
+  GetAsyncCommandResponse_SetupTracing& operator=(GetAsyncCommandResponse_SetupTracing&&);
+  GetAsyncCommandResponse_SetupTracing(const GetAsyncCommandResponse_SetupTracing&);
+  GetAsyncCommandResponse_SetupTracing& operator=(const GetAsyncCommandResponse_SetupTracing&);
+  bool operator==(const GetAsyncCommandResponse_SetupTracing&) const;
+  bool operator!=(const GetAsyncCommandResponse_SetupTracing& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_shared_buffer_page_size_kb() const { return _has_field_[1]; }
+  uint32_t shared_buffer_page_size_kb() const { return shared_buffer_page_size_kb_; }
+  void set_shared_buffer_page_size_kb(uint32_t value) { shared_buffer_page_size_kb_ = value; _has_field_.set(1); }
+
+ private:
+  uint32_t shared_buffer_page_size_kb_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT GetAsyncCommandRequest : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+  };
+
+  GetAsyncCommandRequest();
+  ~GetAsyncCommandRequest() override;
+  GetAsyncCommandRequest(GetAsyncCommandRequest&&) noexcept;
+  GetAsyncCommandRequest& operator=(GetAsyncCommandRequest&&);
+  GetAsyncCommandRequest(const GetAsyncCommandRequest&);
+  GetAsyncCommandRequest& operator=(const GetAsyncCommandRequest&);
+  bool operator==(const GetAsyncCommandRequest&) const;
+  bool operator!=(const GetAsyncCommandRequest& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+ private:
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT ActivateTriggersResponse : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+  };
+
+  ActivateTriggersResponse();
+  ~ActivateTriggersResponse() override;
+  ActivateTriggersResponse(ActivateTriggersResponse&&) noexcept;
+  ActivateTriggersResponse& operator=(ActivateTriggersResponse&&);
+  ActivateTriggersResponse(const ActivateTriggersResponse&);
+  ActivateTriggersResponse& operator=(const ActivateTriggersResponse&);
+  bool operator==(const ActivateTriggersResponse&) const;
+  bool operator!=(const ActivateTriggersResponse& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+ private:
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT ActivateTriggersRequest : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kTriggerNamesFieldNumber = 1,
+  };
+
+  ActivateTriggersRequest();
+  ~ActivateTriggersRequest() override;
+  ActivateTriggersRequest(ActivateTriggersRequest&&) noexcept;
+  ActivateTriggersRequest& operator=(ActivateTriggersRequest&&);
+  ActivateTriggersRequest(const ActivateTriggersRequest&);
+  ActivateTriggersRequest& operator=(const ActivateTriggersRequest&);
+  bool operator==(const ActivateTriggersRequest&) const;
+  bool operator!=(const ActivateTriggersRequest& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  const std::vector<std::string>& trigger_names() const { return trigger_names_; }
+  std::vector<std::string>* mutable_trigger_names() { return &trigger_names_; }
+  int trigger_names_size() const { return static_cast<int>(trigger_names_.size()); }
+  void clear_trigger_names() { trigger_names_.clear(); }
+  void add_trigger_names(std::string value) { trigger_names_.emplace_back(value); }
+  std::string* add_trigger_names() { trigger_names_.emplace_back(); return &trigger_names_.back(); }
+
+ private:
+  std::vector<std::string> trigger_names_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT NotifyDataSourceStoppedResponse : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+  };
+
+  NotifyDataSourceStoppedResponse();
+  ~NotifyDataSourceStoppedResponse() override;
+  NotifyDataSourceStoppedResponse(NotifyDataSourceStoppedResponse&&) noexcept;
+  NotifyDataSourceStoppedResponse& operator=(NotifyDataSourceStoppedResponse&&);
+  NotifyDataSourceStoppedResponse(const NotifyDataSourceStoppedResponse&);
+  NotifyDataSourceStoppedResponse& operator=(const NotifyDataSourceStoppedResponse&);
+  bool operator==(const NotifyDataSourceStoppedResponse&) const;
+  bool operator!=(const NotifyDataSourceStoppedResponse& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+ private:
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT NotifyDataSourceStoppedRequest : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kDataSourceIdFieldNumber = 1,
+  };
+
+  NotifyDataSourceStoppedRequest();
+  ~NotifyDataSourceStoppedRequest() override;
+  NotifyDataSourceStoppedRequest(NotifyDataSourceStoppedRequest&&) noexcept;
+  NotifyDataSourceStoppedRequest& operator=(NotifyDataSourceStoppedRequest&&);
+  NotifyDataSourceStoppedRequest(const NotifyDataSourceStoppedRequest&);
+  NotifyDataSourceStoppedRequest& operator=(const NotifyDataSourceStoppedRequest&);
+  bool operator==(const NotifyDataSourceStoppedRequest&) const;
+  bool operator!=(const NotifyDataSourceStoppedRequest& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_data_source_id() const { return _has_field_[1]; }
+  uint64_t data_source_id() const { return data_source_id_; }
+  void set_data_source_id(uint64_t value) { data_source_id_ = value; _has_field_.set(1); }
+
+ private:
+  uint64_t data_source_id_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT NotifyDataSourceStartedResponse : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+  };
+
+  NotifyDataSourceStartedResponse();
+  ~NotifyDataSourceStartedResponse() override;
+  NotifyDataSourceStartedResponse(NotifyDataSourceStartedResponse&&) noexcept;
+  NotifyDataSourceStartedResponse& operator=(NotifyDataSourceStartedResponse&&);
+  NotifyDataSourceStartedResponse(const NotifyDataSourceStartedResponse&);
+  NotifyDataSourceStartedResponse& operator=(const NotifyDataSourceStartedResponse&);
+  bool operator==(const NotifyDataSourceStartedResponse&) const;
+  bool operator!=(const NotifyDataSourceStartedResponse& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+ private:
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT NotifyDataSourceStartedRequest : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kDataSourceIdFieldNumber = 1,
+  };
+
+  NotifyDataSourceStartedRequest();
+  ~NotifyDataSourceStartedRequest() override;
+  NotifyDataSourceStartedRequest(NotifyDataSourceStartedRequest&&) noexcept;
+  NotifyDataSourceStartedRequest& operator=(NotifyDataSourceStartedRequest&&);
+  NotifyDataSourceStartedRequest(const NotifyDataSourceStartedRequest&);
+  NotifyDataSourceStartedRequest& operator=(const NotifyDataSourceStartedRequest&);
+  bool operator==(const NotifyDataSourceStartedRequest&) const;
+  bool operator!=(const NotifyDataSourceStartedRequest& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_data_source_id() const { return _has_field_[1]; }
+  uint64_t data_source_id() const { return data_source_id_; }
+  void set_data_source_id(uint64_t value) { data_source_id_ = value; _has_field_.set(1); }
+
+ private:
+  uint64_t data_source_id_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT CommitDataResponse : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+  };
+
+  CommitDataResponse();
+  ~CommitDataResponse() override;
+  CommitDataResponse(CommitDataResponse&&) noexcept;
+  CommitDataResponse& operator=(CommitDataResponse&&);
+  CommitDataResponse(const CommitDataResponse&);
+  CommitDataResponse& operator=(const CommitDataResponse&);
+  bool operator==(const CommitDataResponse&) const;
+  bool operator!=(const CommitDataResponse& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+ private:
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT UnregisterTraceWriterResponse : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+  };
+
+  UnregisterTraceWriterResponse();
+  ~UnregisterTraceWriterResponse() override;
+  UnregisterTraceWriterResponse(UnregisterTraceWriterResponse&&) noexcept;
+  UnregisterTraceWriterResponse& operator=(UnregisterTraceWriterResponse&&);
+  UnregisterTraceWriterResponse(const UnregisterTraceWriterResponse&);
+  UnregisterTraceWriterResponse& operator=(const UnregisterTraceWriterResponse&);
+  bool operator==(const UnregisterTraceWriterResponse&) const;
+  bool operator!=(const UnregisterTraceWriterResponse& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+ private:
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT UnregisterTraceWriterRequest : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kTraceWriterIdFieldNumber = 1,
+  };
+
+  UnregisterTraceWriterRequest();
+  ~UnregisterTraceWriterRequest() override;
+  UnregisterTraceWriterRequest(UnregisterTraceWriterRequest&&) noexcept;
+  UnregisterTraceWriterRequest& operator=(UnregisterTraceWriterRequest&&);
+  UnregisterTraceWriterRequest(const UnregisterTraceWriterRequest&);
+  UnregisterTraceWriterRequest& operator=(const UnregisterTraceWriterRequest&);
+  bool operator==(const UnregisterTraceWriterRequest&) const;
+  bool operator!=(const UnregisterTraceWriterRequest& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_trace_writer_id() const { return _has_field_[1]; }
+  uint32_t trace_writer_id() const { return trace_writer_id_; }
+  void set_trace_writer_id(uint32_t value) { trace_writer_id_ = value; _has_field_.set(1); }
+
+ private:
+  uint32_t trace_writer_id_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT RegisterTraceWriterResponse : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+  };
+
+  RegisterTraceWriterResponse();
+  ~RegisterTraceWriterResponse() override;
+  RegisterTraceWriterResponse(RegisterTraceWriterResponse&&) noexcept;
+  RegisterTraceWriterResponse& operator=(RegisterTraceWriterResponse&&);
+  RegisterTraceWriterResponse(const RegisterTraceWriterResponse&);
+  RegisterTraceWriterResponse& operator=(const RegisterTraceWriterResponse&);
+  bool operator==(const RegisterTraceWriterResponse&) const;
+  bool operator!=(const RegisterTraceWriterResponse& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+ private:
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT RegisterTraceWriterRequest : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kTraceWriterIdFieldNumber = 1,
+    kTargetBufferFieldNumber = 2,
+  };
+
+  RegisterTraceWriterRequest();
+  ~RegisterTraceWriterRequest() override;
+  RegisterTraceWriterRequest(RegisterTraceWriterRequest&&) noexcept;
+  RegisterTraceWriterRequest& operator=(RegisterTraceWriterRequest&&);
+  RegisterTraceWriterRequest(const RegisterTraceWriterRequest&);
+  RegisterTraceWriterRequest& operator=(const RegisterTraceWriterRequest&);
+  bool operator==(const RegisterTraceWriterRequest&) const;
+  bool operator!=(const RegisterTraceWriterRequest& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_trace_writer_id() const { return _has_field_[1]; }
+  uint32_t trace_writer_id() const { return trace_writer_id_; }
+  void set_trace_writer_id(uint32_t value) { trace_writer_id_ = value; _has_field_.set(1); }
+
+  bool has_target_buffer() const { return _has_field_[2]; }
+  uint32_t target_buffer() const { return target_buffer_; }
+  void set_target_buffer(uint32_t value) { target_buffer_ = value; _has_field_.set(2); }
+
+ private:
+  uint32_t trace_writer_id_{};
+  uint32_t target_buffer_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT UnregisterDataSourceResponse : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+  };
+
+  UnregisterDataSourceResponse();
+  ~UnregisterDataSourceResponse() override;
+  UnregisterDataSourceResponse(UnregisterDataSourceResponse&&) noexcept;
+  UnregisterDataSourceResponse& operator=(UnregisterDataSourceResponse&&);
+  UnregisterDataSourceResponse(const UnregisterDataSourceResponse&);
+  UnregisterDataSourceResponse& operator=(const UnregisterDataSourceResponse&);
+  bool operator==(const UnregisterDataSourceResponse&) const;
+  bool operator!=(const UnregisterDataSourceResponse& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+ private:
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT UnregisterDataSourceRequest : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kDataSourceNameFieldNumber = 1,
+  };
+
+  UnregisterDataSourceRequest();
+  ~UnregisterDataSourceRequest() override;
+  UnregisterDataSourceRequest(UnregisterDataSourceRequest&&) noexcept;
+  UnregisterDataSourceRequest& operator=(UnregisterDataSourceRequest&&);
+  UnregisterDataSourceRequest(const UnregisterDataSourceRequest&);
+  UnregisterDataSourceRequest& operator=(const UnregisterDataSourceRequest&);
+  bool operator==(const UnregisterDataSourceRequest&) const;
+  bool operator!=(const UnregisterDataSourceRequest& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_data_source_name() const { return _has_field_[1]; }
+  const std::string& data_source_name() const { return data_source_name_; }
+  void set_data_source_name(const std::string& value) { data_source_name_ = value; _has_field_.set(1); }
+
+ private:
+  std::string data_source_name_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT RegisterDataSourceResponse : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kErrorFieldNumber = 1,
+  };
+
+  RegisterDataSourceResponse();
+  ~RegisterDataSourceResponse() override;
+  RegisterDataSourceResponse(RegisterDataSourceResponse&&) noexcept;
+  RegisterDataSourceResponse& operator=(RegisterDataSourceResponse&&);
+  RegisterDataSourceResponse(const RegisterDataSourceResponse&);
+  RegisterDataSourceResponse& operator=(const RegisterDataSourceResponse&);
+  bool operator==(const RegisterDataSourceResponse&) const;
+  bool operator!=(const RegisterDataSourceResponse& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_error() const { return _has_field_[1]; }
+  const std::string& error() const { return error_; }
+  void set_error(const std::string& value) { error_ = value; _has_field_.set(1); }
+
+ private:
+  std::string error_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT RegisterDataSourceRequest : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kDataSourceDescriptorFieldNumber = 1,
+  };
+
+  RegisterDataSourceRequest();
+  ~RegisterDataSourceRequest() override;
+  RegisterDataSourceRequest(RegisterDataSourceRequest&&) noexcept;
+  RegisterDataSourceRequest& operator=(RegisterDataSourceRequest&&);
+  RegisterDataSourceRequest(const RegisterDataSourceRequest&);
+  RegisterDataSourceRequest& operator=(const RegisterDataSourceRequest&);
+  bool operator==(const RegisterDataSourceRequest&) const;
+  bool operator!=(const RegisterDataSourceRequest& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_data_source_descriptor() const { return _has_field_[1]; }
+  const DataSourceDescriptor& data_source_descriptor() const { return *data_source_descriptor_; }
+  DataSourceDescriptor* mutable_data_source_descriptor() { _has_field_.set(1); return data_source_descriptor_.get(); }
+
+ private:
+  ::protozero::CopyablePtr<DataSourceDescriptor> data_source_descriptor_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT InitializeConnectionResponse : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kUsingShmemProvidedByProducerFieldNumber = 1,
+    kDirectSmbPatchingSupportedFieldNumber = 2,
+  };
+
+  InitializeConnectionResponse();
+  ~InitializeConnectionResponse() override;
+  InitializeConnectionResponse(InitializeConnectionResponse&&) noexcept;
+  InitializeConnectionResponse& operator=(InitializeConnectionResponse&&);
+  InitializeConnectionResponse(const InitializeConnectionResponse&);
+  InitializeConnectionResponse& operator=(const InitializeConnectionResponse&);
+  bool operator==(const InitializeConnectionResponse&) const;
+  bool operator!=(const InitializeConnectionResponse& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_using_shmem_provided_by_producer() const { return _has_field_[1]; }
+  bool using_shmem_provided_by_producer() const { return using_shmem_provided_by_producer_; }
+  void set_using_shmem_provided_by_producer(bool value) { using_shmem_provided_by_producer_ = value; _has_field_.set(1); }
+
+  bool has_direct_smb_patching_supported() const { return _has_field_[2]; }
+  bool direct_smb_patching_supported() const { return direct_smb_patching_supported_; }
+  void set_direct_smb_patching_supported(bool value) { direct_smb_patching_supported_ = value; _has_field_.set(2); }
+
+ private:
+  bool using_shmem_provided_by_producer_{};
+  bool direct_smb_patching_supported_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT InitializeConnectionRequest : public ::protozero::CppMessageObj {
+ public:
+  using ProducerSMBScrapingMode = InitializeConnectionRequest_ProducerSMBScrapingMode;
+  static constexpr auto SMB_SCRAPING_UNSPECIFIED = InitializeConnectionRequest_ProducerSMBScrapingMode_SMB_SCRAPING_UNSPECIFIED;
+  static constexpr auto SMB_SCRAPING_ENABLED = InitializeConnectionRequest_ProducerSMBScrapingMode_SMB_SCRAPING_ENABLED;
+  static constexpr auto SMB_SCRAPING_DISABLED = InitializeConnectionRequest_ProducerSMBScrapingMode_SMB_SCRAPING_DISABLED;
+  static constexpr auto ProducerSMBScrapingMode_MIN = InitializeConnectionRequest_ProducerSMBScrapingMode_SMB_SCRAPING_UNSPECIFIED;
+  static constexpr auto ProducerSMBScrapingMode_MAX = InitializeConnectionRequest_ProducerSMBScrapingMode_SMB_SCRAPING_DISABLED;
+  using ProducerBuildFlags = InitializeConnectionRequest_ProducerBuildFlags;
+  static constexpr auto BUILD_FLAGS_UNSPECIFIED = InitializeConnectionRequest_ProducerBuildFlags_BUILD_FLAGS_UNSPECIFIED;
+  static constexpr auto BUILD_FLAGS_DCHECKS_ON = InitializeConnectionRequest_ProducerBuildFlags_BUILD_FLAGS_DCHECKS_ON;
+  static constexpr auto BUILD_FLAGS_DCHECKS_OFF = InitializeConnectionRequest_ProducerBuildFlags_BUILD_FLAGS_DCHECKS_OFF;
+  static constexpr auto ProducerBuildFlags_MIN = InitializeConnectionRequest_ProducerBuildFlags_BUILD_FLAGS_UNSPECIFIED;
+  static constexpr auto ProducerBuildFlags_MAX = InitializeConnectionRequest_ProducerBuildFlags_BUILD_FLAGS_DCHECKS_OFF;
+  enum FieldNumbers {
+    kSharedMemoryPageSizeHintBytesFieldNumber = 1,
+    kSharedMemorySizeHintBytesFieldNumber = 2,
+    kProducerNameFieldNumber = 3,
+    kSmbScrapingModeFieldNumber = 4,
+    kBuildFlagsFieldNumber = 5,
+    kProducerProvidedShmemFieldNumber = 6,
+    kSdkVersionFieldNumber = 8,
+  };
+
+  InitializeConnectionRequest();
+  ~InitializeConnectionRequest() override;
+  InitializeConnectionRequest(InitializeConnectionRequest&&) noexcept;
+  InitializeConnectionRequest& operator=(InitializeConnectionRequest&&);
+  InitializeConnectionRequest(const InitializeConnectionRequest&);
+  InitializeConnectionRequest& operator=(const InitializeConnectionRequest&);
+  bool operator==(const InitializeConnectionRequest&) const;
+  bool operator!=(const InitializeConnectionRequest& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_shared_memory_page_size_hint_bytes() const { return _has_field_[1]; }
+  uint32_t shared_memory_page_size_hint_bytes() const { return shared_memory_page_size_hint_bytes_; }
+  void set_shared_memory_page_size_hint_bytes(uint32_t value) { shared_memory_page_size_hint_bytes_ = value; _has_field_.set(1); }
+
+  bool has_shared_memory_size_hint_bytes() const { return _has_field_[2]; }
+  uint32_t shared_memory_size_hint_bytes() const { return shared_memory_size_hint_bytes_; }
+  void set_shared_memory_size_hint_bytes(uint32_t value) { shared_memory_size_hint_bytes_ = value; _has_field_.set(2); }
+
+  bool has_producer_name() const { return _has_field_[3]; }
+  const std::string& producer_name() const { return producer_name_; }
+  void set_producer_name(const std::string& value) { producer_name_ = value; _has_field_.set(3); }
+
+  bool has_smb_scraping_mode() const { return _has_field_[4]; }
+  InitializeConnectionRequest_ProducerSMBScrapingMode smb_scraping_mode() const { return smb_scraping_mode_; }
+  void set_smb_scraping_mode(InitializeConnectionRequest_ProducerSMBScrapingMode value) { smb_scraping_mode_ = value; _has_field_.set(4); }
+
+  bool has_build_flags() const { return _has_field_[5]; }
+  InitializeConnectionRequest_ProducerBuildFlags build_flags() const { return build_flags_; }
+  void set_build_flags(InitializeConnectionRequest_ProducerBuildFlags value) { build_flags_ = value; _has_field_.set(5); }
+
+  bool has_producer_provided_shmem() const { return _has_field_[6]; }
+  bool producer_provided_shmem() const { return producer_provided_shmem_; }
+  void set_producer_provided_shmem(bool value) { producer_provided_shmem_ = value; _has_field_.set(6); }
+
+  bool has_sdk_version() const { return _has_field_[8]; }
+  const std::string& sdk_version() const { return sdk_version_; }
+  void set_sdk_version(const std::string& value) { sdk_version_ = value; _has_field_.set(8); }
+
+ private:
+  uint32_t shared_memory_page_size_hint_bytes_{};
+  uint32_t shared_memory_size_hint_bytes_{};
+  std::string producer_name_{};
+  InitializeConnectionRequest_ProducerSMBScrapingMode smb_scraping_mode_{};
+  InitializeConnectionRequest_ProducerBuildFlags build_flags_{};
+  bool producer_provided_shmem_{};
+  std::string sdk_version_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<9> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_IPC_PRODUCER_PORT_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/ipc/producer_port.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/common/data_source_descriptor.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/common/track_event_descriptor.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/common/gpu_counter_descriptor.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/data_source_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/track_event/track_event_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/test_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/sys_stats/sys_stats_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/common/sys_stats_counters.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/profiling/perf_event_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/common/perf_events.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/profiling/java_hprof_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/profiling/heapprofd_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/process_stats/process_stats_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/power/android_power_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/interceptor_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/interceptors/console_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/inode_file/inode_file_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/gpu/vulkan_memory_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/gpu/gpu_counter_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/ftrace/ftrace_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/chrome/chrome_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/android/packages_list_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_polled_state_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/android/android_log_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/common/android_log_constants.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/common/commit_data_request.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+SyncResponse::SyncResponse() = default;
+SyncResponse::~SyncResponse() = default;
+SyncResponse::SyncResponse(const SyncResponse&) = default;
+SyncResponse& SyncResponse::operator=(const SyncResponse&) = default;
+SyncResponse::SyncResponse(SyncResponse&&) noexcept = default;
+SyncResponse& SyncResponse::operator=(SyncResponse&&) = default;
+
+bool SyncResponse::operator==(const SyncResponse& other) const {
+  return unknown_fields_ == other.unknown_fields_;
+}
+
+bool SyncResponse::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string SyncResponse::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> SyncResponse::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void SyncResponse::Serialize(::protozero::Message* msg) const {
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+SyncRequest::SyncRequest() = default;
+SyncRequest::~SyncRequest() = default;
+SyncRequest::SyncRequest(const SyncRequest&) = default;
+SyncRequest& SyncRequest::operator=(const SyncRequest&) = default;
+SyncRequest::SyncRequest(SyncRequest&&) noexcept = default;
+SyncRequest& SyncRequest::operator=(SyncRequest&&) = default;
+
+bool SyncRequest::operator==(const SyncRequest& other) const {
+  return unknown_fields_ == other.unknown_fields_;
+}
+
+bool SyncRequest::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string SyncRequest::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> SyncRequest::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void SyncRequest::Serialize(::protozero::Message* msg) const {
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+GetAsyncCommandResponse::GetAsyncCommandResponse() = default;
+GetAsyncCommandResponse::~GetAsyncCommandResponse() = default;
+GetAsyncCommandResponse::GetAsyncCommandResponse(const GetAsyncCommandResponse&) = default;
+GetAsyncCommandResponse& GetAsyncCommandResponse::operator=(const GetAsyncCommandResponse&) = default;
+GetAsyncCommandResponse::GetAsyncCommandResponse(GetAsyncCommandResponse&&) noexcept = default;
+GetAsyncCommandResponse& GetAsyncCommandResponse::operator=(GetAsyncCommandResponse&&) = default;
+
+bool GetAsyncCommandResponse::operator==(const GetAsyncCommandResponse& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && setup_tracing_ == other.setup_tracing_
+   && setup_data_source_ == other.setup_data_source_
+   && start_data_source_ == other.start_data_source_
+   && stop_data_source_ == other.stop_data_source_
+   && flush_ == other.flush_
+   && clear_incremental_state_ == other.clear_incremental_state_;
+}
+
+bool GetAsyncCommandResponse::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 3 /* setup_tracing */:
+        (*setup_tracing_).ParseFromArray(field.data(), field.size());
+        break;
+      case 6 /* setup_data_source */:
+        (*setup_data_source_).ParseFromArray(field.data(), field.size());
+        break;
+      case 1 /* start_data_source */:
+        (*start_data_source_).ParseFromArray(field.data(), field.size());
+        break;
+      case 2 /* stop_data_source */:
+        (*stop_data_source_).ParseFromArray(field.data(), field.size());
+        break;
+      case 5 /* flush */:
+        (*flush_).ParseFromArray(field.data(), field.size());
+        break;
+      case 7 /* clear_incremental_state */:
+        (*clear_incremental_state_).ParseFromArray(field.data(), field.size());
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string GetAsyncCommandResponse::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> GetAsyncCommandResponse::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void GetAsyncCommandResponse::Serialize(::protozero::Message* msg) const {
+  // Field 3: setup_tracing
+  if (_has_field_[3]) {
+    (*setup_tracing_).Serialize(msg->BeginNestedMessage<::protozero::Message>(3));
+  }
+
+  // Field 6: setup_data_source
+  if (_has_field_[6]) {
+    (*setup_data_source_).Serialize(msg->BeginNestedMessage<::protozero::Message>(6));
+  }
+
+  // Field 1: start_data_source
+  if (_has_field_[1]) {
+    (*start_data_source_).Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
+  }
+
+  // Field 2: stop_data_source
+  if (_has_field_[2]) {
+    (*stop_data_source_).Serialize(msg->BeginNestedMessage<::protozero::Message>(2));
+  }
+
+  // Field 5: flush
+  if (_has_field_[5]) {
+    (*flush_).Serialize(msg->BeginNestedMessage<::protozero::Message>(5));
+  }
+
+  // Field 7: clear_incremental_state
+  if (_has_field_[7]) {
+    (*clear_incremental_state_).Serialize(msg->BeginNestedMessage<::protozero::Message>(7));
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+GetAsyncCommandResponse_ClearIncrementalState::GetAsyncCommandResponse_ClearIncrementalState() = default;
+GetAsyncCommandResponse_ClearIncrementalState::~GetAsyncCommandResponse_ClearIncrementalState() = default;
+GetAsyncCommandResponse_ClearIncrementalState::GetAsyncCommandResponse_ClearIncrementalState(const GetAsyncCommandResponse_ClearIncrementalState&) = default;
+GetAsyncCommandResponse_ClearIncrementalState& GetAsyncCommandResponse_ClearIncrementalState::operator=(const GetAsyncCommandResponse_ClearIncrementalState&) = default;
+GetAsyncCommandResponse_ClearIncrementalState::GetAsyncCommandResponse_ClearIncrementalState(GetAsyncCommandResponse_ClearIncrementalState&&) noexcept = default;
+GetAsyncCommandResponse_ClearIncrementalState& GetAsyncCommandResponse_ClearIncrementalState::operator=(GetAsyncCommandResponse_ClearIncrementalState&&) = default;
+
+bool GetAsyncCommandResponse_ClearIncrementalState::operator==(const GetAsyncCommandResponse_ClearIncrementalState& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && data_source_ids_ == other.data_source_ids_;
+}
+
+bool GetAsyncCommandResponse_ClearIncrementalState::ParseFromArray(const void* raw, size_t size) {
+  data_source_ids_.clear();
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* data_source_ids */:
+        data_source_ids_.emplace_back();
+        field.get(&data_source_ids_.back());
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string GetAsyncCommandResponse_ClearIncrementalState::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> GetAsyncCommandResponse_ClearIncrementalState::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void GetAsyncCommandResponse_ClearIncrementalState::Serialize(::protozero::Message* msg) const {
+  // Field 1: data_source_ids
+  for (auto& it : data_source_ids_) {
+    msg->AppendVarInt(1, it);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+GetAsyncCommandResponse_Flush::GetAsyncCommandResponse_Flush() = default;
+GetAsyncCommandResponse_Flush::~GetAsyncCommandResponse_Flush() = default;
+GetAsyncCommandResponse_Flush::GetAsyncCommandResponse_Flush(const GetAsyncCommandResponse_Flush&) = default;
+GetAsyncCommandResponse_Flush& GetAsyncCommandResponse_Flush::operator=(const GetAsyncCommandResponse_Flush&) = default;
+GetAsyncCommandResponse_Flush::GetAsyncCommandResponse_Flush(GetAsyncCommandResponse_Flush&&) noexcept = default;
+GetAsyncCommandResponse_Flush& GetAsyncCommandResponse_Flush::operator=(GetAsyncCommandResponse_Flush&&) = default;
+
+bool GetAsyncCommandResponse_Flush::operator==(const GetAsyncCommandResponse_Flush& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && data_source_ids_ == other.data_source_ids_
+   && request_id_ == other.request_id_;
+}
+
+bool GetAsyncCommandResponse_Flush::ParseFromArray(const void* raw, size_t size) {
+  data_source_ids_.clear();
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* data_source_ids */:
+        data_source_ids_.emplace_back();
+        field.get(&data_source_ids_.back());
+        break;
+      case 2 /* request_id */:
+        field.get(&request_id_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string GetAsyncCommandResponse_Flush::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> GetAsyncCommandResponse_Flush::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void GetAsyncCommandResponse_Flush::Serialize(::protozero::Message* msg) const {
+  // Field 1: data_source_ids
+  for (auto& it : data_source_ids_) {
+    msg->AppendVarInt(1, it);
+  }
+
+  // Field 2: request_id
+  if (_has_field_[2]) {
+    msg->AppendVarInt(2, request_id_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+GetAsyncCommandResponse_StopDataSource::GetAsyncCommandResponse_StopDataSource() = default;
+GetAsyncCommandResponse_StopDataSource::~GetAsyncCommandResponse_StopDataSource() = default;
+GetAsyncCommandResponse_StopDataSource::GetAsyncCommandResponse_StopDataSource(const GetAsyncCommandResponse_StopDataSource&) = default;
+GetAsyncCommandResponse_StopDataSource& GetAsyncCommandResponse_StopDataSource::operator=(const GetAsyncCommandResponse_StopDataSource&) = default;
+GetAsyncCommandResponse_StopDataSource::GetAsyncCommandResponse_StopDataSource(GetAsyncCommandResponse_StopDataSource&&) noexcept = default;
+GetAsyncCommandResponse_StopDataSource& GetAsyncCommandResponse_StopDataSource::operator=(GetAsyncCommandResponse_StopDataSource&&) = default;
+
+bool GetAsyncCommandResponse_StopDataSource::operator==(const GetAsyncCommandResponse_StopDataSource& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && instance_id_ == other.instance_id_;
+}
+
+bool GetAsyncCommandResponse_StopDataSource::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* instance_id */:
+        field.get(&instance_id_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string GetAsyncCommandResponse_StopDataSource::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> GetAsyncCommandResponse_StopDataSource::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void GetAsyncCommandResponse_StopDataSource::Serialize(::protozero::Message* msg) const {
+  // Field 1: instance_id
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, instance_id_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+GetAsyncCommandResponse_StartDataSource::GetAsyncCommandResponse_StartDataSource() = default;
+GetAsyncCommandResponse_StartDataSource::~GetAsyncCommandResponse_StartDataSource() = default;
+GetAsyncCommandResponse_StartDataSource::GetAsyncCommandResponse_StartDataSource(const GetAsyncCommandResponse_StartDataSource&) = default;
+GetAsyncCommandResponse_StartDataSource& GetAsyncCommandResponse_StartDataSource::operator=(const GetAsyncCommandResponse_StartDataSource&) = default;
+GetAsyncCommandResponse_StartDataSource::GetAsyncCommandResponse_StartDataSource(GetAsyncCommandResponse_StartDataSource&&) noexcept = default;
+GetAsyncCommandResponse_StartDataSource& GetAsyncCommandResponse_StartDataSource::operator=(GetAsyncCommandResponse_StartDataSource&&) = default;
+
+bool GetAsyncCommandResponse_StartDataSource::operator==(const GetAsyncCommandResponse_StartDataSource& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && new_instance_id_ == other.new_instance_id_
+   && config_ == other.config_;
+}
+
+bool GetAsyncCommandResponse_StartDataSource::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* new_instance_id */:
+        field.get(&new_instance_id_);
+        break;
+      case 2 /* config */:
+        (*config_).ParseFromArray(field.data(), field.size());
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string GetAsyncCommandResponse_StartDataSource::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> GetAsyncCommandResponse_StartDataSource::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void GetAsyncCommandResponse_StartDataSource::Serialize(::protozero::Message* msg) const {
+  // Field 1: new_instance_id
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, new_instance_id_);
+  }
+
+  // Field 2: config
+  if (_has_field_[2]) {
+    (*config_).Serialize(msg->BeginNestedMessage<::protozero::Message>(2));
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+GetAsyncCommandResponse_SetupDataSource::GetAsyncCommandResponse_SetupDataSource() = default;
+GetAsyncCommandResponse_SetupDataSource::~GetAsyncCommandResponse_SetupDataSource() = default;
+GetAsyncCommandResponse_SetupDataSource::GetAsyncCommandResponse_SetupDataSource(const GetAsyncCommandResponse_SetupDataSource&) = default;
+GetAsyncCommandResponse_SetupDataSource& GetAsyncCommandResponse_SetupDataSource::operator=(const GetAsyncCommandResponse_SetupDataSource&) = default;
+GetAsyncCommandResponse_SetupDataSource::GetAsyncCommandResponse_SetupDataSource(GetAsyncCommandResponse_SetupDataSource&&) noexcept = default;
+GetAsyncCommandResponse_SetupDataSource& GetAsyncCommandResponse_SetupDataSource::operator=(GetAsyncCommandResponse_SetupDataSource&&) = default;
+
+bool GetAsyncCommandResponse_SetupDataSource::operator==(const GetAsyncCommandResponse_SetupDataSource& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && new_instance_id_ == other.new_instance_id_
+   && config_ == other.config_;
+}
+
+bool GetAsyncCommandResponse_SetupDataSource::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* new_instance_id */:
+        field.get(&new_instance_id_);
+        break;
+      case 2 /* config */:
+        (*config_).ParseFromArray(field.data(), field.size());
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string GetAsyncCommandResponse_SetupDataSource::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> GetAsyncCommandResponse_SetupDataSource::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void GetAsyncCommandResponse_SetupDataSource::Serialize(::protozero::Message* msg) const {
+  // Field 1: new_instance_id
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, new_instance_id_);
+  }
+
+  // Field 2: config
+  if (_has_field_[2]) {
+    (*config_).Serialize(msg->BeginNestedMessage<::protozero::Message>(2));
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+GetAsyncCommandResponse_SetupTracing::GetAsyncCommandResponse_SetupTracing() = default;
+GetAsyncCommandResponse_SetupTracing::~GetAsyncCommandResponse_SetupTracing() = default;
+GetAsyncCommandResponse_SetupTracing::GetAsyncCommandResponse_SetupTracing(const GetAsyncCommandResponse_SetupTracing&) = default;
+GetAsyncCommandResponse_SetupTracing& GetAsyncCommandResponse_SetupTracing::operator=(const GetAsyncCommandResponse_SetupTracing&) = default;
+GetAsyncCommandResponse_SetupTracing::GetAsyncCommandResponse_SetupTracing(GetAsyncCommandResponse_SetupTracing&&) noexcept = default;
+GetAsyncCommandResponse_SetupTracing& GetAsyncCommandResponse_SetupTracing::operator=(GetAsyncCommandResponse_SetupTracing&&) = default;
+
+bool GetAsyncCommandResponse_SetupTracing::operator==(const GetAsyncCommandResponse_SetupTracing& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && shared_buffer_page_size_kb_ == other.shared_buffer_page_size_kb_;
+}
+
+bool GetAsyncCommandResponse_SetupTracing::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* shared_buffer_page_size_kb */:
+        field.get(&shared_buffer_page_size_kb_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string GetAsyncCommandResponse_SetupTracing::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> GetAsyncCommandResponse_SetupTracing::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void GetAsyncCommandResponse_SetupTracing::Serialize(::protozero::Message* msg) const {
+  // Field 1: shared_buffer_page_size_kb
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, shared_buffer_page_size_kb_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+GetAsyncCommandRequest::GetAsyncCommandRequest() = default;
+GetAsyncCommandRequest::~GetAsyncCommandRequest() = default;
+GetAsyncCommandRequest::GetAsyncCommandRequest(const GetAsyncCommandRequest&) = default;
+GetAsyncCommandRequest& GetAsyncCommandRequest::operator=(const GetAsyncCommandRequest&) = default;
+GetAsyncCommandRequest::GetAsyncCommandRequest(GetAsyncCommandRequest&&) noexcept = default;
+GetAsyncCommandRequest& GetAsyncCommandRequest::operator=(GetAsyncCommandRequest&&) = default;
+
+bool GetAsyncCommandRequest::operator==(const GetAsyncCommandRequest& other) const {
+  return unknown_fields_ == other.unknown_fields_;
+}
+
+bool GetAsyncCommandRequest::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string GetAsyncCommandRequest::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> GetAsyncCommandRequest::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void GetAsyncCommandRequest::Serialize(::protozero::Message* msg) const {
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+ActivateTriggersResponse::ActivateTriggersResponse() = default;
+ActivateTriggersResponse::~ActivateTriggersResponse() = default;
+ActivateTriggersResponse::ActivateTriggersResponse(const ActivateTriggersResponse&) = default;
+ActivateTriggersResponse& ActivateTriggersResponse::operator=(const ActivateTriggersResponse&) = default;
+ActivateTriggersResponse::ActivateTriggersResponse(ActivateTriggersResponse&&) noexcept = default;
+ActivateTriggersResponse& ActivateTriggersResponse::operator=(ActivateTriggersResponse&&) = default;
+
+bool ActivateTriggersResponse::operator==(const ActivateTriggersResponse& other) const {
+  return unknown_fields_ == other.unknown_fields_;
+}
+
+bool ActivateTriggersResponse::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string ActivateTriggersResponse::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> ActivateTriggersResponse::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void ActivateTriggersResponse::Serialize(::protozero::Message* msg) const {
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+ActivateTriggersRequest::ActivateTriggersRequest() = default;
+ActivateTriggersRequest::~ActivateTriggersRequest() = default;
+ActivateTriggersRequest::ActivateTriggersRequest(const ActivateTriggersRequest&) = default;
+ActivateTriggersRequest& ActivateTriggersRequest::operator=(const ActivateTriggersRequest&) = default;
+ActivateTriggersRequest::ActivateTriggersRequest(ActivateTriggersRequest&&) noexcept = default;
+ActivateTriggersRequest& ActivateTriggersRequest::operator=(ActivateTriggersRequest&&) = default;
+
+bool ActivateTriggersRequest::operator==(const ActivateTriggersRequest& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && trigger_names_ == other.trigger_names_;
+}
+
+bool ActivateTriggersRequest::ParseFromArray(const void* raw, size_t size) {
+  trigger_names_.clear();
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* trigger_names */:
+        trigger_names_.emplace_back();
+        field.get(&trigger_names_.back());
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string ActivateTriggersRequest::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> ActivateTriggersRequest::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void ActivateTriggersRequest::Serialize(::protozero::Message* msg) const {
+  // Field 1: trigger_names
+  for (auto& it : trigger_names_) {
+    msg->AppendString(1, it);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+NotifyDataSourceStoppedResponse::NotifyDataSourceStoppedResponse() = default;
+NotifyDataSourceStoppedResponse::~NotifyDataSourceStoppedResponse() = default;
+NotifyDataSourceStoppedResponse::NotifyDataSourceStoppedResponse(const NotifyDataSourceStoppedResponse&) = default;
+NotifyDataSourceStoppedResponse& NotifyDataSourceStoppedResponse::operator=(const NotifyDataSourceStoppedResponse&) = default;
+NotifyDataSourceStoppedResponse::NotifyDataSourceStoppedResponse(NotifyDataSourceStoppedResponse&&) noexcept = default;
+NotifyDataSourceStoppedResponse& NotifyDataSourceStoppedResponse::operator=(NotifyDataSourceStoppedResponse&&) = default;
+
+bool NotifyDataSourceStoppedResponse::operator==(const NotifyDataSourceStoppedResponse& other) const {
+  return unknown_fields_ == other.unknown_fields_;
+}
+
+bool NotifyDataSourceStoppedResponse::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string NotifyDataSourceStoppedResponse::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> NotifyDataSourceStoppedResponse::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void NotifyDataSourceStoppedResponse::Serialize(::protozero::Message* msg) const {
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+NotifyDataSourceStoppedRequest::NotifyDataSourceStoppedRequest() = default;
+NotifyDataSourceStoppedRequest::~NotifyDataSourceStoppedRequest() = default;
+NotifyDataSourceStoppedRequest::NotifyDataSourceStoppedRequest(const NotifyDataSourceStoppedRequest&) = default;
+NotifyDataSourceStoppedRequest& NotifyDataSourceStoppedRequest::operator=(const NotifyDataSourceStoppedRequest&) = default;
+NotifyDataSourceStoppedRequest::NotifyDataSourceStoppedRequest(NotifyDataSourceStoppedRequest&&) noexcept = default;
+NotifyDataSourceStoppedRequest& NotifyDataSourceStoppedRequest::operator=(NotifyDataSourceStoppedRequest&&) = default;
+
+bool NotifyDataSourceStoppedRequest::operator==(const NotifyDataSourceStoppedRequest& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && data_source_id_ == other.data_source_id_;
+}
+
+bool NotifyDataSourceStoppedRequest::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* data_source_id */:
+        field.get(&data_source_id_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string NotifyDataSourceStoppedRequest::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> NotifyDataSourceStoppedRequest::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void NotifyDataSourceStoppedRequest::Serialize(::protozero::Message* msg) const {
+  // Field 1: data_source_id
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, data_source_id_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+NotifyDataSourceStartedResponse::NotifyDataSourceStartedResponse() = default;
+NotifyDataSourceStartedResponse::~NotifyDataSourceStartedResponse() = default;
+NotifyDataSourceStartedResponse::NotifyDataSourceStartedResponse(const NotifyDataSourceStartedResponse&) = default;
+NotifyDataSourceStartedResponse& NotifyDataSourceStartedResponse::operator=(const NotifyDataSourceStartedResponse&) = default;
+NotifyDataSourceStartedResponse::NotifyDataSourceStartedResponse(NotifyDataSourceStartedResponse&&) noexcept = default;
+NotifyDataSourceStartedResponse& NotifyDataSourceStartedResponse::operator=(NotifyDataSourceStartedResponse&&) = default;
+
+bool NotifyDataSourceStartedResponse::operator==(const NotifyDataSourceStartedResponse& other) const {
+  return unknown_fields_ == other.unknown_fields_;
+}
+
+bool NotifyDataSourceStartedResponse::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string NotifyDataSourceStartedResponse::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> NotifyDataSourceStartedResponse::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void NotifyDataSourceStartedResponse::Serialize(::protozero::Message* msg) const {
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+NotifyDataSourceStartedRequest::NotifyDataSourceStartedRequest() = default;
+NotifyDataSourceStartedRequest::~NotifyDataSourceStartedRequest() = default;
+NotifyDataSourceStartedRequest::NotifyDataSourceStartedRequest(const NotifyDataSourceStartedRequest&) = default;
+NotifyDataSourceStartedRequest& NotifyDataSourceStartedRequest::operator=(const NotifyDataSourceStartedRequest&) = default;
+NotifyDataSourceStartedRequest::NotifyDataSourceStartedRequest(NotifyDataSourceStartedRequest&&) noexcept = default;
+NotifyDataSourceStartedRequest& NotifyDataSourceStartedRequest::operator=(NotifyDataSourceStartedRequest&&) = default;
+
+bool NotifyDataSourceStartedRequest::operator==(const NotifyDataSourceStartedRequest& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && data_source_id_ == other.data_source_id_;
+}
+
+bool NotifyDataSourceStartedRequest::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* data_source_id */:
+        field.get(&data_source_id_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string NotifyDataSourceStartedRequest::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> NotifyDataSourceStartedRequest::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void NotifyDataSourceStartedRequest::Serialize(::protozero::Message* msg) const {
+  // Field 1: data_source_id
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, data_source_id_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+CommitDataResponse::CommitDataResponse() = default;
+CommitDataResponse::~CommitDataResponse() = default;
+CommitDataResponse::CommitDataResponse(const CommitDataResponse&) = default;
+CommitDataResponse& CommitDataResponse::operator=(const CommitDataResponse&) = default;
+CommitDataResponse::CommitDataResponse(CommitDataResponse&&) noexcept = default;
+CommitDataResponse& CommitDataResponse::operator=(CommitDataResponse&&) = default;
+
+bool CommitDataResponse::operator==(const CommitDataResponse& other) const {
+  return unknown_fields_ == other.unknown_fields_;
+}
+
+bool CommitDataResponse::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string CommitDataResponse::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> CommitDataResponse::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void CommitDataResponse::Serialize(::protozero::Message* msg) const {
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+UnregisterTraceWriterResponse::UnregisterTraceWriterResponse() = default;
+UnregisterTraceWriterResponse::~UnregisterTraceWriterResponse() = default;
+UnregisterTraceWriterResponse::UnregisterTraceWriterResponse(const UnregisterTraceWriterResponse&) = default;
+UnregisterTraceWriterResponse& UnregisterTraceWriterResponse::operator=(const UnregisterTraceWriterResponse&) = default;
+UnregisterTraceWriterResponse::UnregisterTraceWriterResponse(UnregisterTraceWriterResponse&&) noexcept = default;
+UnregisterTraceWriterResponse& UnregisterTraceWriterResponse::operator=(UnregisterTraceWriterResponse&&) = default;
+
+bool UnregisterTraceWriterResponse::operator==(const UnregisterTraceWriterResponse& other) const {
+  return unknown_fields_ == other.unknown_fields_;
+}
+
+bool UnregisterTraceWriterResponse::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string UnregisterTraceWriterResponse::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> UnregisterTraceWriterResponse::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void UnregisterTraceWriterResponse::Serialize(::protozero::Message* msg) const {
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+UnregisterTraceWriterRequest::UnregisterTraceWriterRequest() = default;
+UnregisterTraceWriterRequest::~UnregisterTraceWriterRequest() = default;
+UnregisterTraceWriterRequest::UnregisterTraceWriterRequest(const UnregisterTraceWriterRequest&) = default;
+UnregisterTraceWriterRequest& UnregisterTraceWriterRequest::operator=(const UnregisterTraceWriterRequest&) = default;
+UnregisterTraceWriterRequest::UnregisterTraceWriterRequest(UnregisterTraceWriterRequest&&) noexcept = default;
+UnregisterTraceWriterRequest& UnregisterTraceWriterRequest::operator=(UnregisterTraceWriterRequest&&) = default;
+
+bool UnregisterTraceWriterRequest::operator==(const UnregisterTraceWriterRequest& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && trace_writer_id_ == other.trace_writer_id_;
+}
+
+bool UnregisterTraceWriterRequest::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* trace_writer_id */:
+        field.get(&trace_writer_id_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string UnregisterTraceWriterRequest::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> UnregisterTraceWriterRequest::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void UnregisterTraceWriterRequest::Serialize(::protozero::Message* msg) const {
+  // Field 1: trace_writer_id
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, trace_writer_id_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+RegisterTraceWriterResponse::RegisterTraceWriterResponse() = default;
+RegisterTraceWriterResponse::~RegisterTraceWriterResponse() = default;
+RegisterTraceWriterResponse::RegisterTraceWriterResponse(const RegisterTraceWriterResponse&) = default;
+RegisterTraceWriterResponse& RegisterTraceWriterResponse::operator=(const RegisterTraceWriterResponse&) = default;
+RegisterTraceWriterResponse::RegisterTraceWriterResponse(RegisterTraceWriterResponse&&) noexcept = default;
+RegisterTraceWriterResponse& RegisterTraceWriterResponse::operator=(RegisterTraceWriterResponse&&) = default;
+
+bool RegisterTraceWriterResponse::operator==(const RegisterTraceWriterResponse& other) const {
+  return unknown_fields_ == other.unknown_fields_;
+}
+
+bool RegisterTraceWriterResponse::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string RegisterTraceWriterResponse::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> RegisterTraceWriterResponse::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void RegisterTraceWriterResponse::Serialize(::protozero::Message* msg) const {
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+RegisterTraceWriterRequest::RegisterTraceWriterRequest() = default;
+RegisterTraceWriterRequest::~RegisterTraceWriterRequest() = default;
+RegisterTraceWriterRequest::RegisterTraceWriterRequest(const RegisterTraceWriterRequest&) = default;
+RegisterTraceWriterRequest& RegisterTraceWriterRequest::operator=(const RegisterTraceWriterRequest&) = default;
+RegisterTraceWriterRequest::RegisterTraceWriterRequest(RegisterTraceWriterRequest&&) noexcept = default;
+RegisterTraceWriterRequest& RegisterTraceWriterRequest::operator=(RegisterTraceWriterRequest&&) = default;
+
+bool RegisterTraceWriterRequest::operator==(const RegisterTraceWriterRequest& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && trace_writer_id_ == other.trace_writer_id_
+   && target_buffer_ == other.target_buffer_;
+}
+
+bool RegisterTraceWriterRequest::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* trace_writer_id */:
+        field.get(&trace_writer_id_);
+        break;
+      case 2 /* target_buffer */:
+        field.get(&target_buffer_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string RegisterTraceWriterRequest::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> RegisterTraceWriterRequest::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void RegisterTraceWriterRequest::Serialize(::protozero::Message* msg) const {
+  // Field 1: trace_writer_id
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, trace_writer_id_);
+  }
+
+  // Field 2: target_buffer
+  if (_has_field_[2]) {
+    msg->AppendVarInt(2, target_buffer_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+UnregisterDataSourceResponse::UnregisterDataSourceResponse() = default;
+UnregisterDataSourceResponse::~UnregisterDataSourceResponse() = default;
+UnregisterDataSourceResponse::UnregisterDataSourceResponse(const UnregisterDataSourceResponse&) = default;
+UnregisterDataSourceResponse& UnregisterDataSourceResponse::operator=(const UnregisterDataSourceResponse&) = default;
+UnregisterDataSourceResponse::UnregisterDataSourceResponse(UnregisterDataSourceResponse&&) noexcept = default;
+UnregisterDataSourceResponse& UnregisterDataSourceResponse::operator=(UnregisterDataSourceResponse&&) = default;
+
+bool UnregisterDataSourceResponse::operator==(const UnregisterDataSourceResponse& other) const {
+  return unknown_fields_ == other.unknown_fields_;
+}
+
+bool UnregisterDataSourceResponse::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string UnregisterDataSourceResponse::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> UnregisterDataSourceResponse::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void UnregisterDataSourceResponse::Serialize(::protozero::Message* msg) const {
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+UnregisterDataSourceRequest::UnregisterDataSourceRequest() = default;
+UnregisterDataSourceRequest::~UnregisterDataSourceRequest() = default;
+UnregisterDataSourceRequest::UnregisterDataSourceRequest(const UnregisterDataSourceRequest&) = default;
+UnregisterDataSourceRequest& UnregisterDataSourceRequest::operator=(const UnregisterDataSourceRequest&) = default;
+UnregisterDataSourceRequest::UnregisterDataSourceRequest(UnregisterDataSourceRequest&&) noexcept = default;
+UnregisterDataSourceRequest& UnregisterDataSourceRequest::operator=(UnregisterDataSourceRequest&&) = default;
+
+bool UnregisterDataSourceRequest::operator==(const UnregisterDataSourceRequest& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && data_source_name_ == other.data_source_name_;
+}
+
+bool UnregisterDataSourceRequest::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* data_source_name */:
+        field.get(&data_source_name_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string UnregisterDataSourceRequest::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> UnregisterDataSourceRequest::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void UnregisterDataSourceRequest::Serialize(::protozero::Message* msg) const {
+  // Field 1: data_source_name
+  if (_has_field_[1]) {
+    msg->AppendString(1, data_source_name_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+RegisterDataSourceResponse::RegisterDataSourceResponse() = default;
+RegisterDataSourceResponse::~RegisterDataSourceResponse() = default;
+RegisterDataSourceResponse::RegisterDataSourceResponse(const RegisterDataSourceResponse&) = default;
+RegisterDataSourceResponse& RegisterDataSourceResponse::operator=(const RegisterDataSourceResponse&) = default;
+RegisterDataSourceResponse::RegisterDataSourceResponse(RegisterDataSourceResponse&&) noexcept = default;
+RegisterDataSourceResponse& RegisterDataSourceResponse::operator=(RegisterDataSourceResponse&&) = default;
+
+bool RegisterDataSourceResponse::operator==(const RegisterDataSourceResponse& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && error_ == other.error_;
+}
+
+bool RegisterDataSourceResponse::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* error */:
+        field.get(&error_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string RegisterDataSourceResponse::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> RegisterDataSourceResponse::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void RegisterDataSourceResponse::Serialize(::protozero::Message* msg) const {
+  // Field 1: error
+  if (_has_field_[1]) {
+    msg->AppendString(1, error_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+RegisterDataSourceRequest::RegisterDataSourceRequest() = default;
+RegisterDataSourceRequest::~RegisterDataSourceRequest() = default;
+RegisterDataSourceRequest::RegisterDataSourceRequest(const RegisterDataSourceRequest&) = default;
+RegisterDataSourceRequest& RegisterDataSourceRequest::operator=(const RegisterDataSourceRequest&) = default;
+RegisterDataSourceRequest::RegisterDataSourceRequest(RegisterDataSourceRequest&&) noexcept = default;
+RegisterDataSourceRequest& RegisterDataSourceRequest::operator=(RegisterDataSourceRequest&&) = default;
+
+bool RegisterDataSourceRequest::operator==(const RegisterDataSourceRequest& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && data_source_descriptor_ == other.data_source_descriptor_;
+}
+
+bool RegisterDataSourceRequest::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* data_source_descriptor */:
+        (*data_source_descriptor_).ParseFromArray(field.data(), field.size());
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string RegisterDataSourceRequest::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> RegisterDataSourceRequest::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void RegisterDataSourceRequest::Serialize(::protozero::Message* msg) const {
+  // Field 1: data_source_descriptor
+  if (_has_field_[1]) {
+    (*data_source_descriptor_).Serialize(msg->BeginNestedMessage<::protozero::Message>(1));
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+InitializeConnectionResponse::InitializeConnectionResponse() = default;
+InitializeConnectionResponse::~InitializeConnectionResponse() = default;
+InitializeConnectionResponse::InitializeConnectionResponse(const InitializeConnectionResponse&) = default;
+InitializeConnectionResponse& InitializeConnectionResponse::operator=(const InitializeConnectionResponse&) = default;
+InitializeConnectionResponse::InitializeConnectionResponse(InitializeConnectionResponse&&) noexcept = default;
+InitializeConnectionResponse& InitializeConnectionResponse::operator=(InitializeConnectionResponse&&) = default;
+
+bool InitializeConnectionResponse::operator==(const InitializeConnectionResponse& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && using_shmem_provided_by_producer_ == other.using_shmem_provided_by_producer_
+   && direct_smb_patching_supported_ == other.direct_smb_patching_supported_;
+}
+
+bool InitializeConnectionResponse::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* using_shmem_provided_by_producer */:
+        field.get(&using_shmem_provided_by_producer_);
+        break;
+      case 2 /* direct_smb_patching_supported */:
+        field.get(&direct_smb_patching_supported_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string InitializeConnectionResponse::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> InitializeConnectionResponse::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void InitializeConnectionResponse::Serialize(::protozero::Message* msg) const {
+  // Field 1: using_shmem_provided_by_producer
+  if (_has_field_[1]) {
+    msg->AppendTinyVarInt(1, using_shmem_provided_by_producer_);
+  }
+
+  // Field 2: direct_smb_patching_supported
+  if (_has_field_[2]) {
+    msg->AppendTinyVarInt(2, direct_smb_patching_supported_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+InitializeConnectionRequest::InitializeConnectionRequest() = default;
+InitializeConnectionRequest::~InitializeConnectionRequest() = default;
+InitializeConnectionRequest::InitializeConnectionRequest(const InitializeConnectionRequest&) = default;
+InitializeConnectionRequest& InitializeConnectionRequest::operator=(const InitializeConnectionRequest&) = default;
+InitializeConnectionRequest::InitializeConnectionRequest(InitializeConnectionRequest&&) noexcept = default;
+InitializeConnectionRequest& InitializeConnectionRequest::operator=(InitializeConnectionRequest&&) = default;
+
+bool InitializeConnectionRequest::operator==(const InitializeConnectionRequest& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && shared_memory_page_size_hint_bytes_ == other.shared_memory_page_size_hint_bytes_
+   && shared_memory_size_hint_bytes_ == other.shared_memory_size_hint_bytes_
+   && producer_name_ == other.producer_name_
+   && smb_scraping_mode_ == other.smb_scraping_mode_
+   && build_flags_ == other.build_flags_
+   && producer_provided_shmem_ == other.producer_provided_shmem_
+   && sdk_version_ == other.sdk_version_;
+}
+
+bool InitializeConnectionRequest::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* shared_memory_page_size_hint_bytes */:
+        field.get(&shared_memory_page_size_hint_bytes_);
+        break;
+      case 2 /* shared_memory_size_hint_bytes */:
+        field.get(&shared_memory_size_hint_bytes_);
+        break;
+      case 3 /* producer_name */:
+        field.get(&producer_name_);
+        break;
+      case 4 /* smb_scraping_mode */:
+        field.get(&smb_scraping_mode_);
+        break;
+      case 5 /* build_flags */:
+        field.get(&build_flags_);
+        break;
+      case 6 /* producer_provided_shmem */:
+        field.get(&producer_provided_shmem_);
+        break;
+      case 8 /* sdk_version */:
+        field.get(&sdk_version_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string InitializeConnectionRequest::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> InitializeConnectionRequest::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void InitializeConnectionRequest::Serialize(::protozero::Message* msg) const {
+  // Field 1: shared_memory_page_size_hint_bytes
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, shared_memory_page_size_hint_bytes_);
+  }
+
+  // Field 2: shared_memory_size_hint_bytes
+  if (_has_field_[2]) {
+    msg->AppendVarInt(2, shared_memory_size_hint_bytes_);
+  }
+
+  // Field 3: producer_name
+  if (_has_field_[3]) {
+    msg->AppendString(3, producer_name_);
+  }
+
+  // Field 4: smb_scraping_mode
+  if (_has_field_[4]) {
+    msg->AppendVarInt(4, smb_scraping_mode_);
+  }
+
+  // Field 5: build_flags
+  if (_has_field_[5]) {
+    msg->AppendVarInt(5, build_flags_);
+  }
+
+  // Field 6: producer_provided_shmem
+  if (_has_field_[6]) {
+    msg->AppendTinyVarInt(6, producer_provided_shmem_);
+  }
+
+  // Field 8: sdk_version
+  if (_has_field_[8]) {
+    msg->AppendString(8, sdk_version_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: gen/protos/perfetto/ipc/wire_protocol.gen.cc
+// gen_amalgamated begin header: gen/protos/perfetto/ipc/wire_protocol.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_IPC_WIRE_PROTOCOL_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_IPC_WIRE_PROTOCOL_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class IPCFrame;
+class IPCFrame_RequestError;
+class IPCFrame_InvokeMethodReply;
+class IPCFrame_InvokeMethod;
+class IPCFrame_BindServiceReply;
+class IPCFrame_BindServiceReply_MethodInfo;
+class IPCFrame_BindService;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT IPCFrame : public ::protozero::CppMessageObj {
+ public:
+  using BindService = IPCFrame_BindService;
+  using BindServiceReply = IPCFrame_BindServiceReply;
+  using InvokeMethod = IPCFrame_InvokeMethod;
+  using InvokeMethodReply = IPCFrame_InvokeMethodReply;
+  using RequestError = IPCFrame_RequestError;
+  enum FieldNumbers {
+    kRequestIdFieldNumber = 2,
+    kMsgBindServiceFieldNumber = 3,
+    kMsgBindServiceReplyFieldNumber = 4,
+    kMsgInvokeMethodFieldNumber = 5,
+    kMsgInvokeMethodReplyFieldNumber = 6,
+    kMsgRequestErrorFieldNumber = 7,
+    kDataForTestingFieldNumber = 1,
+  };
+
+  IPCFrame();
+  ~IPCFrame() override;
+  IPCFrame(IPCFrame&&) noexcept;
+  IPCFrame& operator=(IPCFrame&&);
+  IPCFrame(const IPCFrame&);
+  IPCFrame& operator=(const IPCFrame&);
+  bool operator==(const IPCFrame&) const;
+  bool operator!=(const IPCFrame& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_request_id() const { return _has_field_[2]; }
+  uint64_t request_id() const { return request_id_; }
+  void set_request_id(uint64_t value) { request_id_ = value; _has_field_.set(2); }
+
+  bool has_msg_bind_service() const { return _has_field_[3]; }
+  const IPCFrame_BindService& msg_bind_service() const { return *msg_bind_service_; }
+  IPCFrame_BindService* mutable_msg_bind_service() { _has_field_.set(3); return msg_bind_service_.get(); }
+
+  bool has_msg_bind_service_reply() const { return _has_field_[4]; }
+  const IPCFrame_BindServiceReply& msg_bind_service_reply() const { return *msg_bind_service_reply_; }
+  IPCFrame_BindServiceReply* mutable_msg_bind_service_reply() { _has_field_.set(4); return msg_bind_service_reply_.get(); }
+
+  bool has_msg_invoke_method() const { return _has_field_[5]; }
+  const IPCFrame_InvokeMethod& msg_invoke_method() const { return *msg_invoke_method_; }
+  IPCFrame_InvokeMethod* mutable_msg_invoke_method() { _has_field_.set(5); return msg_invoke_method_.get(); }
+
+  bool has_msg_invoke_method_reply() const { return _has_field_[6]; }
+  const IPCFrame_InvokeMethodReply& msg_invoke_method_reply() const { return *msg_invoke_method_reply_; }
+  IPCFrame_InvokeMethodReply* mutable_msg_invoke_method_reply() { _has_field_.set(6); return msg_invoke_method_reply_.get(); }
+
+  bool has_msg_request_error() const { return _has_field_[7]; }
+  const IPCFrame_RequestError& msg_request_error() const { return *msg_request_error_; }
+  IPCFrame_RequestError* mutable_msg_request_error() { _has_field_.set(7); return msg_request_error_.get(); }
+
+  const std::vector<std::string>& data_for_testing() const { return data_for_testing_; }
+  std::vector<std::string>* mutable_data_for_testing() { return &data_for_testing_; }
+  int data_for_testing_size() const { return static_cast<int>(data_for_testing_.size()); }
+  void clear_data_for_testing() { data_for_testing_.clear(); }
+  void add_data_for_testing(std::string value) { data_for_testing_.emplace_back(value); }
+  std::string* add_data_for_testing() { data_for_testing_.emplace_back(); return &data_for_testing_.back(); }
+
+ private:
+  uint64_t request_id_{};
+  ::protozero::CopyablePtr<IPCFrame_BindService> msg_bind_service_;
+  ::protozero::CopyablePtr<IPCFrame_BindServiceReply> msg_bind_service_reply_;
+  ::protozero::CopyablePtr<IPCFrame_InvokeMethod> msg_invoke_method_;
+  ::protozero::CopyablePtr<IPCFrame_InvokeMethodReply> msg_invoke_method_reply_;
+  ::protozero::CopyablePtr<IPCFrame_RequestError> msg_request_error_;
+  std::vector<std::string> data_for_testing_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<8> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT IPCFrame_RequestError : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kErrorFieldNumber = 1,
+  };
+
+  IPCFrame_RequestError();
+  ~IPCFrame_RequestError() override;
+  IPCFrame_RequestError(IPCFrame_RequestError&&) noexcept;
+  IPCFrame_RequestError& operator=(IPCFrame_RequestError&&);
+  IPCFrame_RequestError(const IPCFrame_RequestError&);
+  IPCFrame_RequestError& operator=(const IPCFrame_RequestError&);
+  bool operator==(const IPCFrame_RequestError&) const;
+  bool operator!=(const IPCFrame_RequestError& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_error() const { return _has_field_[1]; }
+  const std::string& error() const { return error_; }
+  void set_error(const std::string& value) { error_ = value; _has_field_.set(1); }
+
+ private:
+  std::string error_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT IPCFrame_InvokeMethodReply : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kSuccessFieldNumber = 1,
+    kHasMoreFieldNumber = 2,
+    kReplyProtoFieldNumber = 3,
+  };
+
+  IPCFrame_InvokeMethodReply();
+  ~IPCFrame_InvokeMethodReply() override;
+  IPCFrame_InvokeMethodReply(IPCFrame_InvokeMethodReply&&) noexcept;
+  IPCFrame_InvokeMethodReply& operator=(IPCFrame_InvokeMethodReply&&);
+  IPCFrame_InvokeMethodReply(const IPCFrame_InvokeMethodReply&);
+  IPCFrame_InvokeMethodReply& operator=(const IPCFrame_InvokeMethodReply&);
+  bool operator==(const IPCFrame_InvokeMethodReply&) const;
+  bool operator!=(const IPCFrame_InvokeMethodReply& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_success() const { return _has_field_[1]; }
+  bool success() const { return success_; }
+  void set_success(bool value) { success_ = value; _has_field_.set(1); }
+
+  bool has_has_more() const { return _has_field_[2]; }
+  bool has_more() const { return has_more_; }
+  void set_has_more(bool value) { has_more_ = value; _has_field_.set(2); }
+
+  bool has_reply_proto() const { return _has_field_[3]; }
+  const std::string& reply_proto() const { return reply_proto_; }
+  void set_reply_proto(const std::string& value) { reply_proto_ = value; _has_field_.set(3); }
+  void set_reply_proto(const void* p, size_t s) { reply_proto_.assign(reinterpret_cast<const char*>(p), s); _has_field_.set(3); }
+
+ private:
+  bool success_{};
+  bool has_more_{};
+  std::string reply_proto_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<4> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT IPCFrame_InvokeMethod : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kServiceIdFieldNumber = 1,
+    kMethodIdFieldNumber = 2,
+    kArgsProtoFieldNumber = 3,
+    kDropReplyFieldNumber = 4,
+  };
+
+  IPCFrame_InvokeMethod();
+  ~IPCFrame_InvokeMethod() override;
+  IPCFrame_InvokeMethod(IPCFrame_InvokeMethod&&) noexcept;
+  IPCFrame_InvokeMethod& operator=(IPCFrame_InvokeMethod&&);
+  IPCFrame_InvokeMethod(const IPCFrame_InvokeMethod&);
+  IPCFrame_InvokeMethod& operator=(const IPCFrame_InvokeMethod&);
+  bool operator==(const IPCFrame_InvokeMethod&) const;
+  bool operator!=(const IPCFrame_InvokeMethod& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_service_id() const { return _has_field_[1]; }
+  uint32_t service_id() const { return service_id_; }
+  void set_service_id(uint32_t value) { service_id_ = value; _has_field_.set(1); }
+
+  bool has_method_id() const { return _has_field_[2]; }
+  uint32_t method_id() const { return method_id_; }
+  void set_method_id(uint32_t value) { method_id_ = value; _has_field_.set(2); }
+
+  bool has_args_proto() const { return _has_field_[3]; }
+  const std::string& args_proto() const { return args_proto_; }
+  void set_args_proto(const std::string& value) { args_proto_ = value; _has_field_.set(3); }
+  void set_args_proto(const void* p, size_t s) { args_proto_.assign(reinterpret_cast<const char*>(p), s); _has_field_.set(3); }
+
+  bool has_drop_reply() const { return _has_field_[4]; }
+  bool drop_reply() const { return drop_reply_; }
+  void set_drop_reply(bool value) { drop_reply_ = value; _has_field_.set(4); }
+
+ private:
+  uint32_t service_id_{};
+  uint32_t method_id_{};
+  std::string args_proto_{};
+  bool drop_reply_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<5> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT IPCFrame_BindServiceReply : public ::protozero::CppMessageObj {
+ public:
+  using MethodInfo = IPCFrame_BindServiceReply_MethodInfo;
+  enum FieldNumbers {
+    kSuccessFieldNumber = 1,
+    kServiceIdFieldNumber = 2,
+    kMethodsFieldNumber = 3,
+  };
+
+  IPCFrame_BindServiceReply();
+  ~IPCFrame_BindServiceReply() override;
+  IPCFrame_BindServiceReply(IPCFrame_BindServiceReply&&) noexcept;
+  IPCFrame_BindServiceReply& operator=(IPCFrame_BindServiceReply&&);
+  IPCFrame_BindServiceReply(const IPCFrame_BindServiceReply&);
+  IPCFrame_BindServiceReply& operator=(const IPCFrame_BindServiceReply&);
+  bool operator==(const IPCFrame_BindServiceReply&) const;
+  bool operator!=(const IPCFrame_BindServiceReply& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_success() const { return _has_field_[1]; }
+  bool success() const { return success_; }
+  void set_success(bool value) { success_ = value; _has_field_.set(1); }
+
+  bool has_service_id() const { return _has_field_[2]; }
+  uint32_t service_id() const { return service_id_; }
+  void set_service_id(uint32_t value) { service_id_ = value; _has_field_.set(2); }
+
+  const std::vector<IPCFrame_BindServiceReply_MethodInfo>& methods() const { return methods_; }
+  std::vector<IPCFrame_BindServiceReply_MethodInfo>* mutable_methods() { return &methods_; }
+  int methods_size() const;
+  void clear_methods();
+  IPCFrame_BindServiceReply_MethodInfo* add_methods();
+
+ private:
+  bool success_{};
+  uint32_t service_id_{};
+  std::vector<IPCFrame_BindServiceReply_MethodInfo> methods_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<4> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT IPCFrame_BindServiceReply_MethodInfo : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kIdFieldNumber = 1,
+    kNameFieldNumber = 2,
+  };
+
+  IPCFrame_BindServiceReply_MethodInfo();
+  ~IPCFrame_BindServiceReply_MethodInfo() override;
+  IPCFrame_BindServiceReply_MethodInfo(IPCFrame_BindServiceReply_MethodInfo&&) noexcept;
+  IPCFrame_BindServiceReply_MethodInfo& operator=(IPCFrame_BindServiceReply_MethodInfo&&);
+  IPCFrame_BindServiceReply_MethodInfo(const IPCFrame_BindServiceReply_MethodInfo&);
+  IPCFrame_BindServiceReply_MethodInfo& operator=(const IPCFrame_BindServiceReply_MethodInfo&);
+  bool operator==(const IPCFrame_BindServiceReply_MethodInfo&) const;
+  bool operator!=(const IPCFrame_BindServiceReply_MethodInfo& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_id() const { return _has_field_[1]; }
+  uint32_t id() const { return id_; }
+  void set_id(uint32_t value) { id_ = value; _has_field_.set(1); }
+
+  bool has_name() const { return _has_field_[2]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(2); }
+
+ private:
+  uint32_t id_{};
+  std::string name_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT IPCFrame_BindService : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kServiceNameFieldNumber = 1,
+  };
+
+  IPCFrame_BindService();
+  ~IPCFrame_BindService() override;
+  IPCFrame_BindService(IPCFrame_BindService&&) noexcept;
+  IPCFrame_BindService& operator=(IPCFrame_BindService&&);
+  IPCFrame_BindService(const IPCFrame_BindService&);
+  IPCFrame_BindService& operator=(const IPCFrame_BindService&);
+  bool operator==(const IPCFrame_BindService&) const;
+  bool operator!=(const IPCFrame_BindService& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_service_name() const { return _has_field_[1]; }
+  const std::string& service_name() const { return service_name_; }
+  void set_service_name(const std::string& value) { service_name_ = value; _has_field_.set(1); }
+
+ private:
+  std::string service_name_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_IPC_WIRE_PROTOCOL_PROTO_CPP_H_
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wfloat-equal"
+#endif
+// gen_amalgamated expanded: #include "protos/perfetto/ipc/wire_protocol.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+IPCFrame::IPCFrame() = default;
+IPCFrame::~IPCFrame() = default;
+IPCFrame::IPCFrame(const IPCFrame&) = default;
+IPCFrame& IPCFrame::operator=(const IPCFrame&) = default;
+IPCFrame::IPCFrame(IPCFrame&&) noexcept = default;
+IPCFrame& IPCFrame::operator=(IPCFrame&&) = default;
+
+bool IPCFrame::operator==(const IPCFrame& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && request_id_ == other.request_id_
+   && msg_bind_service_ == other.msg_bind_service_
+   && msg_bind_service_reply_ == other.msg_bind_service_reply_
+   && msg_invoke_method_ == other.msg_invoke_method_
+   && msg_invoke_method_reply_ == other.msg_invoke_method_reply_
+   && msg_request_error_ == other.msg_request_error_
+   && data_for_testing_ == other.data_for_testing_;
+}
+
+bool IPCFrame::ParseFromArray(const void* raw, size_t size) {
+  data_for_testing_.clear();
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 2 /* request_id */:
+        field.get(&request_id_);
+        break;
+      case 3 /* msg_bind_service */:
+        (*msg_bind_service_).ParseFromArray(field.data(), field.size());
+        break;
+      case 4 /* msg_bind_service_reply */:
+        (*msg_bind_service_reply_).ParseFromArray(field.data(), field.size());
+        break;
+      case 5 /* msg_invoke_method */:
+        (*msg_invoke_method_).ParseFromArray(field.data(), field.size());
+        break;
+      case 6 /* msg_invoke_method_reply */:
+        (*msg_invoke_method_reply_).ParseFromArray(field.data(), field.size());
+        break;
+      case 7 /* msg_request_error */:
+        (*msg_request_error_).ParseFromArray(field.data(), field.size());
+        break;
+      case 1 /* data_for_testing */:
+        data_for_testing_.emplace_back();
+        field.get(&data_for_testing_.back());
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string IPCFrame::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> IPCFrame::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void IPCFrame::Serialize(::protozero::Message* msg) const {
+  // Field 2: request_id
+  if (_has_field_[2]) {
+    msg->AppendVarInt(2, request_id_);
+  }
+
+  // Field 3: msg_bind_service
+  if (_has_field_[3]) {
+    (*msg_bind_service_).Serialize(msg->BeginNestedMessage<::protozero::Message>(3));
+  }
+
+  // Field 4: msg_bind_service_reply
+  if (_has_field_[4]) {
+    (*msg_bind_service_reply_).Serialize(msg->BeginNestedMessage<::protozero::Message>(4));
+  }
+
+  // Field 5: msg_invoke_method
+  if (_has_field_[5]) {
+    (*msg_invoke_method_).Serialize(msg->BeginNestedMessage<::protozero::Message>(5));
+  }
+
+  // Field 6: msg_invoke_method_reply
+  if (_has_field_[6]) {
+    (*msg_invoke_method_reply_).Serialize(msg->BeginNestedMessage<::protozero::Message>(6));
+  }
+
+  // Field 7: msg_request_error
+  if (_has_field_[7]) {
+    (*msg_request_error_).Serialize(msg->BeginNestedMessage<::protozero::Message>(7));
+  }
+
+  // Field 1: data_for_testing
+  for (auto& it : data_for_testing_) {
+    msg->AppendString(1, it);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+IPCFrame_RequestError::IPCFrame_RequestError() = default;
+IPCFrame_RequestError::~IPCFrame_RequestError() = default;
+IPCFrame_RequestError::IPCFrame_RequestError(const IPCFrame_RequestError&) = default;
+IPCFrame_RequestError& IPCFrame_RequestError::operator=(const IPCFrame_RequestError&) = default;
+IPCFrame_RequestError::IPCFrame_RequestError(IPCFrame_RequestError&&) noexcept = default;
+IPCFrame_RequestError& IPCFrame_RequestError::operator=(IPCFrame_RequestError&&) = default;
+
+bool IPCFrame_RequestError::operator==(const IPCFrame_RequestError& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && error_ == other.error_;
+}
+
+bool IPCFrame_RequestError::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* error */:
+        field.get(&error_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string IPCFrame_RequestError::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> IPCFrame_RequestError::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void IPCFrame_RequestError::Serialize(::protozero::Message* msg) const {
+  // Field 1: error
+  if (_has_field_[1]) {
+    msg->AppendString(1, error_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+IPCFrame_InvokeMethodReply::IPCFrame_InvokeMethodReply() = default;
+IPCFrame_InvokeMethodReply::~IPCFrame_InvokeMethodReply() = default;
+IPCFrame_InvokeMethodReply::IPCFrame_InvokeMethodReply(const IPCFrame_InvokeMethodReply&) = default;
+IPCFrame_InvokeMethodReply& IPCFrame_InvokeMethodReply::operator=(const IPCFrame_InvokeMethodReply&) = default;
+IPCFrame_InvokeMethodReply::IPCFrame_InvokeMethodReply(IPCFrame_InvokeMethodReply&&) noexcept = default;
+IPCFrame_InvokeMethodReply& IPCFrame_InvokeMethodReply::operator=(IPCFrame_InvokeMethodReply&&) = default;
+
+bool IPCFrame_InvokeMethodReply::operator==(const IPCFrame_InvokeMethodReply& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && success_ == other.success_
+   && has_more_ == other.has_more_
+   && reply_proto_ == other.reply_proto_;
+}
+
+bool IPCFrame_InvokeMethodReply::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* success */:
+        field.get(&success_);
+        break;
+      case 2 /* has_more */:
+        field.get(&has_more_);
+        break;
+      case 3 /* reply_proto */:
+        field.get(&reply_proto_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string IPCFrame_InvokeMethodReply::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> IPCFrame_InvokeMethodReply::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void IPCFrame_InvokeMethodReply::Serialize(::protozero::Message* msg) const {
+  // Field 1: success
+  if (_has_field_[1]) {
+    msg->AppendTinyVarInt(1, success_);
+  }
+
+  // Field 2: has_more
+  if (_has_field_[2]) {
+    msg->AppendTinyVarInt(2, has_more_);
+  }
+
+  // Field 3: reply_proto
+  if (_has_field_[3]) {
+    msg->AppendString(3, reply_proto_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+IPCFrame_InvokeMethod::IPCFrame_InvokeMethod() = default;
+IPCFrame_InvokeMethod::~IPCFrame_InvokeMethod() = default;
+IPCFrame_InvokeMethod::IPCFrame_InvokeMethod(const IPCFrame_InvokeMethod&) = default;
+IPCFrame_InvokeMethod& IPCFrame_InvokeMethod::operator=(const IPCFrame_InvokeMethod&) = default;
+IPCFrame_InvokeMethod::IPCFrame_InvokeMethod(IPCFrame_InvokeMethod&&) noexcept = default;
+IPCFrame_InvokeMethod& IPCFrame_InvokeMethod::operator=(IPCFrame_InvokeMethod&&) = default;
+
+bool IPCFrame_InvokeMethod::operator==(const IPCFrame_InvokeMethod& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && service_id_ == other.service_id_
+   && method_id_ == other.method_id_
+   && args_proto_ == other.args_proto_
+   && drop_reply_ == other.drop_reply_;
+}
+
+bool IPCFrame_InvokeMethod::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* service_id */:
+        field.get(&service_id_);
+        break;
+      case 2 /* method_id */:
+        field.get(&method_id_);
+        break;
+      case 3 /* args_proto */:
+        field.get(&args_proto_);
+        break;
+      case 4 /* drop_reply */:
+        field.get(&drop_reply_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string IPCFrame_InvokeMethod::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> IPCFrame_InvokeMethod::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void IPCFrame_InvokeMethod::Serialize(::protozero::Message* msg) const {
+  // Field 1: service_id
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, service_id_);
+  }
+
+  // Field 2: method_id
+  if (_has_field_[2]) {
+    msg->AppendVarInt(2, method_id_);
+  }
+
+  // Field 3: args_proto
+  if (_has_field_[3]) {
+    msg->AppendString(3, args_proto_);
+  }
+
+  // Field 4: drop_reply
+  if (_has_field_[4]) {
+    msg->AppendTinyVarInt(4, drop_reply_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+IPCFrame_BindServiceReply::IPCFrame_BindServiceReply() = default;
+IPCFrame_BindServiceReply::~IPCFrame_BindServiceReply() = default;
+IPCFrame_BindServiceReply::IPCFrame_BindServiceReply(const IPCFrame_BindServiceReply&) = default;
+IPCFrame_BindServiceReply& IPCFrame_BindServiceReply::operator=(const IPCFrame_BindServiceReply&) = default;
+IPCFrame_BindServiceReply::IPCFrame_BindServiceReply(IPCFrame_BindServiceReply&&) noexcept = default;
+IPCFrame_BindServiceReply& IPCFrame_BindServiceReply::operator=(IPCFrame_BindServiceReply&&) = default;
+
+bool IPCFrame_BindServiceReply::operator==(const IPCFrame_BindServiceReply& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && success_ == other.success_
+   && service_id_ == other.service_id_
+   && methods_ == other.methods_;
+}
+
+int IPCFrame_BindServiceReply::methods_size() const { return static_cast<int>(methods_.size()); }
+void IPCFrame_BindServiceReply::clear_methods() { methods_.clear(); }
+IPCFrame_BindServiceReply_MethodInfo* IPCFrame_BindServiceReply::add_methods() { methods_.emplace_back(); return &methods_.back(); }
+bool IPCFrame_BindServiceReply::ParseFromArray(const void* raw, size_t size) {
+  methods_.clear();
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* success */:
+        field.get(&success_);
+        break;
+      case 2 /* service_id */:
+        field.get(&service_id_);
+        break;
+      case 3 /* methods */:
+        methods_.emplace_back();
+        methods_.back().ParseFromArray(field.data(), field.size());
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string IPCFrame_BindServiceReply::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> IPCFrame_BindServiceReply::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void IPCFrame_BindServiceReply::Serialize(::protozero::Message* msg) const {
+  // Field 1: success
+  if (_has_field_[1]) {
+    msg->AppendTinyVarInt(1, success_);
+  }
+
+  // Field 2: service_id
+  if (_has_field_[2]) {
+    msg->AppendVarInt(2, service_id_);
+  }
+
+  // Field 3: methods
+  for (auto& it : methods_) {
+    it.Serialize(msg->BeginNestedMessage<::protozero::Message>(3));
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+IPCFrame_BindServiceReply_MethodInfo::IPCFrame_BindServiceReply_MethodInfo() = default;
+IPCFrame_BindServiceReply_MethodInfo::~IPCFrame_BindServiceReply_MethodInfo() = default;
+IPCFrame_BindServiceReply_MethodInfo::IPCFrame_BindServiceReply_MethodInfo(const IPCFrame_BindServiceReply_MethodInfo&) = default;
+IPCFrame_BindServiceReply_MethodInfo& IPCFrame_BindServiceReply_MethodInfo::operator=(const IPCFrame_BindServiceReply_MethodInfo&) = default;
+IPCFrame_BindServiceReply_MethodInfo::IPCFrame_BindServiceReply_MethodInfo(IPCFrame_BindServiceReply_MethodInfo&&) noexcept = default;
+IPCFrame_BindServiceReply_MethodInfo& IPCFrame_BindServiceReply_MethodInfo::operator=(IPCFrame_BindServiceReply_MethodInfo&&) = default;
+
+bool IPCFrame_BindServiceReply_MethodInfo::operator==(const IPCFrame_BindServiceReply_MethodInfo& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && id_ == other.id_
+   && name_ == other.name_;
+}
+
+bool IPCFrame_BindServiceReply_MethodInfo::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* id */:
+        field.get(&id_);
+        break;
+      case 2 /* name */:
+        field.get(&name_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string IPCFrame_BindServiceReply_MethodInfo::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> IPCFrame_BindServiceReply_MethodInfo::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void IPCFrame_BindServiceReply_MethodInfo::Serialize(::protozero::Message* msg) const {
+  // Field 1: id
+  if (_has_field_[1]) {
+    msg->AppendVarInt(1, id_);
+  }
+
+  // Field 2: name
+  if (_has_field_[2]) {
+    msg->AppendString(2, name_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+
+IPCFrame_BindService::IPCFrame_BindService() = default;
+IPCFrame_BindService::~IPCFrame_BindService() = default;
+IPCFrame_BindService::IPCFrame_BindService(const IPCFrame_BindService&) = default;
+IPCFrame_BindService& IPCFrame_BindService::operator=(const IPCFrame_BindService&) = default;
+IPCFrame_BindService::IPCFrame_BindService(IPCFrame_BindService&&) noexcept = default;
+IPCFrame_BindService& IPCFrame_BindService::operator=(IPCFrame_BindService&&) = default;
+
+bool IPCFrame_BindService::operator==(const IPCFrame_BindService& other) const {
+  return unknown_fields_ == other.unknown_fields_
+   && service_name_ == other.service_name_;
+}
+
+bool IPCFrame_BindService::ParseFromArray(const void* raw, size_t size) {
+  unknown_fields_.clear();
+  bool packed_error = false;
+
+  ::protozero::ProtoDecoder dec(raw, size);
+  for (auto field = dec.ReadField(); field.valid(); field = dec.ReadField()) {
+    if (field.id() < _has_field_.size()) {
+      _has_field_.set(field.id());
+    }
+    switch (field.id()) {
+      case 1 /* service_name */:
+        field.get(&service_name_);
+        break;
+      default:
+        field.SerializeAndAppendTo(&unknown_fields_);
+        break;
+    }
+  }
+  return !packed_error && !dec.bytes_left();
+}
+
+std::string IPCFrame_BindService::SerializeAsString() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsString();
+}
+
+std::vector<uint8_t> IPCFrame_BindService::SerializeAsArray() const {
+  ::protozero::HeapBuffered<::protozero::Message> msg;
+  Serialize(msg.get());
+  return msg.SerializeAsArray();
+}
+
+void IPCFrame_BindService::Serialize(::protozero::Message* msg) const {
+  // Field 1: service_name
+  if (_has_field_[1]) {
+    msg->AppendString(1, service_name_);
+  }
+
+  msg->AppendRawProtoBytes(unknown_fields_.data(), unknown_fields_.size());
+}
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+// gen_amalgamated begin source: src/base/unix_socket.cc
+// gen_amalgamated begin header: include/perfetto/ext/base/unix_socket.h
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_BASE_UNIX_SOCKET_H_
+#define INCLUDE_PERFETTO_EXT_BASE_UNIX_SOCKET_H_
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <memory>
+#include <string>
+
+// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/weak_ptr.h"
+
+struct msghdr;
+
+namespace perfetto {
+namespace base {
+
+// Define the SocketHandle and ScopedSocketHandle types.
+// On POSIX OSes, a SocketHandle is really just an int (a file descriptor).
+// On Windows, sockets are have their own type (SOCKET) which is neither a
+// HANDLE nor an int. However Windows SOCKET(s) can have a event HANDLE attached
+// to them (which in Perfetto is a PlatformHandle), and that can be used in
+// WaitForMultipleObjects, hence in base::TaskRunner.AddFileDescriptorWatch().
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+// uintptr_t really reads as SOCKET here (Windows headers typedef to that).
+// As usual we don't just use SOCKET here to avoid leaking Windows.h includes
+// in our headers.
+using SocketHandle = uintptr_t;  // SOCKET
+int CloseSocket(SocketHandle);   // A wrapper around ::closesocket().
+using ScopedSocketHandle =
+    ScopedResource<SocketHandle, CloseSocket, static_cast<SocketHandle>(-1)>;
+#else
+using SocketHandle = int;
+using ScopedSocketHandle = ScopedFile;
+#endif
+
+class TaskRunner;
+
+// Use arbitrarily high values to avoid that some code accidentally ends up
+// assuming that these enum values match the sysroot's SOCK_xxx defines rather
+// than using GetSockType() / GetSockFamily().
+enum class SockType { kStream = 100, kDgram, kSeqPacket };
+enum class SockFamily { kUnix = 200, kInet, kInet6 };
+
+// Controls the getsockopt(SO_PEERCRED) behavior, which allows to obtain the
+// peer credentials.
+enum class SockPeerCredMode {
+  // Obtain the peer credentials immediatley after connection and cache them.
+  kReadOnConnect = 0,
+
+  // Don't read peer credentials at all. Calls to peer_uid()/peer_pid() will
+  // hit a DCHECK and return kInvalidUid/Pid in release builds.
+  kIgnore = 1,
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+  kDefault = kIgnore,
+#else
+  kDefault = kReadOnConnect,
+#endif
+};
+
+// UnixSocketRaw is a basic wrapper around sockets. It exposes wrapper
+// methods that take care of most common pitfalls (e.g., marking fd as
+// O_CLOEXEC, avoiding SIGPIPE, properly handling partial writes). It is used as
+// a building block for the more sophisticated UnixSocket class which depends
+// on base::TaskRunner.
+class UnixSocketRaw {
+ public:
+  // Creates a new unconnected unix socket.
+  static UnixSocketRaw CreateMayFail(SockFamily family, SockType type);
+
+#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+  // Crates a pair of connected sockets.
+  static std::pair<UnixSocketRaw, UnixSocketRaw> CreatePairPosix(SockFamily,
+                                                                 SockType);
+#endif
+
+  // Creates an uninitialized unix socket.
+  UnixSocketRaw();
+
+  // Creates a unix socket adopting an existing file descriptor. This is
+  // typically used to inherit fds from init via environment variables.
+  UnixSocketRaw(ScopedSocketHandle, SockFamily, SockType);
+
+  ~UnixSocketRaw() = default;
+  UnixSocketRaw(UnixSocketRaw&&) noexcept = default;
+  UnixSocketRaw& operator=(UnixSocketRaw&&) = default;
+
+  bool Bind(const std::string& socket_name);
+  bool Listen();
+  bool Connect(const std::string& socket_name);
+  bool SetTxTimeout(uint32_t timeout_ms);
+  bool SetRxTimeout(uint32_t timeout_ms);
+  void Shutdown();
+  void SetBlocking(bool);
+  void DcheckIsBlocking(bool expected) const;  // No-op on release and Win.
+  void RetainOnExec();
+  SockType type() const { return type_; }
+  SockFamily family() const { return family_; }
+  SocketHandle fd() const { return *fd_; }
+  explicit operator bool() const { return !!fd_; }
+
+  // This is the handle that passed to TaskRunner.AddFileDescriptorWatch().
+  // On UNIX this is just the socket FD. On Windows, we need to create a
+  // dedicated event object.
+  PlatformHandle watch_handle() const {
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+    return *event_handle_;
+#else
+    return *fd_;
+#endif
+  }
+
+  ScopedSocketHandle ReleaseFd() { return std::move(fd_); }
+
+  // |send_fds| and |num_fds| are ignored on Windows.
+  ssize_t Send(const void* msg,
+               size_t len,
+               const int* send_fds = nullptr,
+               size_t num_fds = 0);
+
+  // |fd_vec| and |max_files| are ignored on Windows.
+  ssize_t Receive(void* msg,
+                  size_t len,
+                  ScopedFile* fd_vec = nullptr,
+                  size_t max_files = 0);
+
+#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+  // UNIX-specific helpers to deal with SCM_RIGHTS.
+
+  // Re-enter sendmsg until all the data has been sent or an error occurs.
+  // TODO(fmayer): Figure out how to do timeouts here for heapprofd.
+  ssize_t SendMsgAllPosix(struct msghdr* msg);
+
+  // Exposed for testing only.
+  // Update msghdr so subsequent sendmsg will send data that remains after n
+  // bytes have already been sent.
+  static void ShiftMsgHdrPosix(size_t n, struct msghdr* msg);
+#endif
+
+ private:
+  UnixSocketRaw(SockFamily, SockType);
+
+  UnixSocketRaw(const UnixSocketRaw&) = delete;
+  UnixSocketRaw& operator=(const UnixSocketRaw&) = delete;
+
+  ScopedSocketHandle fd_;
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+  ScopedPlatformHandle event_handle_;
+#endif
+  SockFamily family_ = SockFamily::kUnix;
+  SockType type_ = SockType::kStream;
+};
+
+// A non-blocking UNIX domain socket. Allows also to transfer file descriptors.
+// None of the methods in this class are blocking.
+// The main design goal is making strong guarantees on the EventListener
+// callbacks, in order to avoid ending in some undefined state.
+// In case of any error it will aggressively just shut down the socket and
+// notify the failure with OnConnect(false) or OnDisconnect() depending on the
+// state of the socket (see below).
+// EventListener callbacks stop happening as soon as the instance is destroyed.
+//
+// Lifecycle of a client socket:
+//
+//                           Connect()
+//                               |
+//            +------------------+------------------+
+//            | (success)                           | (failure or Shutdown())
+//            V                                     V
+//     OnConnect(true)                         OnConnect(false)
+//            |
+//            V
+//    OnDataAvailable()
+//            |
+//            V
+//     OnDisconnect()  (failure or shutdown)
+//
+//
+// Lifecycle of a server socket:
+//
+//                          Listen()  --> returns false in case of errors.
+//                             |
+//                             V
+//              OnNewIncomingConnection(new_socket)
+//
+//          (|new_socket| inherits the same EventListener)
+//                             |
+//                             V
+//                     OnDataAvailable()
+//                             | (failure or Shutdown())
+//                             V
+//                       OnDisconnect()
+class PERFETTO_EXPORT UnixSocket {
+ public:
+  class EventListener {
+   public:
+    virtual ~EventListener();
+
+    // After Listen().
+    virtual void OnNewIncomingConnection(
+        UnixSocket* self,
+        std::unique_ptr<UnixSocket> new_connection);
+
+    // After Connect(), whether successful or not.
+    virtual void OnConnect(UnixSocket* self, bool connected);
+
+    // After a successful Connect() or OnNewIncomingConnection(). Either the
+    // other endpoint did disconnect or some other error happened.
+    virtual void OnDisconnect(UnixSocket* self);
+
+    // Whenever there is data available to Receive(). Note that spurious FD
+    // watch events are possible, so it is possible that Receive() soon after
+    // OnDataAvailable() returns 0 (just ignore those).
+    virtual void OnDataAvailable(UnixSocket* self);
+  };
+
+  enum class State {
+    kDisconnected = 0,  // Failed connection, peer disconnection or Shutdown().
+    kConnecting,  // Soon after Connect(), before it either succeeds or fails.
+    kConnected,   // After a successful Connect().
+    kListening    // After Listen(), until Shutdown().
+  };
+
+  // Creates a socket and starts listening. If SockFamily::kUnix and
+  // |socket_name| starts with a '@', an abstract UNIX dmoain socket will be
+  // created instead of a filesystem-linked UNIX socket (Linux/Android only).
+  // If SockFamily::kInet, |socket_name| is host:port (e.g., "1.2.3.4:8000").
+  // If SockFamily::kInet6, |socket_name| is [host]:port (e.g., "[::1]:8000").
+  // Returns nullptr if the socket creation or bind fails. If listening fails,
+  // (e.g. if another socket with the same name is already listening) the
+  // returned socket will have is_listening() == false.
+  static std::unique_ptr<UnixSocket> Listen(const std::string& socket_name,
+                                            EventListener*,
+                                            TaskRunner*,
+                                            SockFamily,
+                                            SockType);
+
+  // Attaches to a pre-existing socket. The socket must have been created in
+  // SOCK_STREAM mode and the caller must have called bind() on it.
+  static std::unique_ptr<UnixSocket> Listen(ScopedSocketHandle,
+                                            EventListener*,
+                                            TaskRunner*,
+                                            SockFamily,
+                                            SockType);
+
+  // Creates a Unix domain socket and connects to the listening endpoint.
+  // Returns always an instance. EventListener::OnConnect(bool success) will
+  // be called always, whether the connection succeeded or not.
+  static std::unique_ptr<UnixSocket> Connect(
+      const std::string& socket_name,
+      EventListener*,
+      TaskRunner*,
+      SockFamily,
+      SockType,
+      SockPeerCredMode = SockPeerCredMode::kDefault);
+
+  // Constructs a UnixSocket using the given connected socket.
+  static std::unique_ptr<UnixSocket> AdoptConnected(
+      ScopedSocketHandle,
+      EventListener*,
+      TaskRunner*,
+      SockFamily,
+      SockType,
+      SockPeerCredMode = SockPeerCredMode::kDefault);
+
+  UnixSocket(const UnixSocket&) = delete;
+  UnixSocket& operator=(const UnixSocket&) = delete;
+  // Cannot be easily moved because of tasks from the FileDescriptorWatch.
+  UnixSocket(UnixSocket&&) = delete;
+  UnixSocket& operator=(UnixSocket&&) = delete;
+
+  // This class gives the hard guarantee that no callback is called on the
+  // passed EventListener immediately after the object has been destroyed.
+  // Any queued callback will be silently dropped.
+  ~UnixSocket();
+
+  // Shuts down the current connection, if any. If the socket was Listen()-ing,
+  // stops listening. The socket goes back to kNotInitialized state, so it can
+  // be reused with Listen() or Connect().
+  void Shutdown(bool notify);
+
+  void SetTxTimeout(uint32_t timeout_ms) {
+    PERFETTO_CHECK(sock_raw_.SetTxTimeout(timeout_ms));
+  }
+  void SetRxTimeout(uint32_t timeout_ms) {
+    PERFETTO_CHECK(sock_raw_.SetRxTimeout(timeout_ms));
+  }
+  // Returns true is the message was queued, false if there was no space in the
+  // output buffer, in which case the client should retry or give up.
+  // If any other error happens the socket will be shutdown and
+  // EventListener::OnDisconnect() will be called.
+  // If the socket is not connected, Send() will just return false.
+  // Does not append a null string terminator to msg in any case.
+  bool Send(const void* msg, size_t len, const int* send_fds, size_t num_fds);
+
+  inline bool Send(const void* msg, size_t len, int send_fd = -1) {
+    if (send_fd != -1)
+      return Send(msg, len, &send_fd, 1);
+    return Send(msg, len, nullptr, 0);
+  }
+
+  inline bool Send(const std::string& msg) {
+    return Send(msg.c_str(), msg.size() + 1, -1);
+  }
+
+  // Returns the number of bytes (<= |len|) written in |msg| or 0 if there
+  // is no data in the buffer to read or an error occurs (in which case a
+  // EventListener::OnDisconnect() will follow).
+  // If the ScopedFile pointer is not null and a FD is received, it moves the
+  // received FD into that. If a FD is received but the ScopedFile pointer is
+  // null, the FD will be automatically closed.
+  size_t Receive(void* msg, size_t len, ScopedFile*, size_t max_files = 1);
+
+  inline size_t Receive(void* msg, size_t len) {
+    return Receive(msg, len, nullptr, 0);
+  }
+
+  // Only for tests. This is slower than Receive() as it requires a heap
+  // allocation and a copy for the std::string. Guarantees that the returned
+  // string is null terminated even if the underlying message sent by the peer
+  // is not.
+  std::string ReceiveString(size_t max_length = 1024);
+
+  bool is_connected() const { return state_ == State::kConnected; }
+  bool is_listening() const { return state_ == State::kListening; }
+  SocketHandle fd() const { return sock_raw_.fd(); }
+
+  // User ID of the peer, as returned by the kernel. If the client disconnects
+  // and the socket goes into the kDisconnected state, it retains the uid of
+  // the last peer.
+#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+  uid_t peer_uid_posix(bool skip_check_for_testing = false) const {
+    PERFETTO_DCHECK((!is_listening() && peer_uid_ != kInvalidUid) ||
+                    skip_check_for_testing);
+
+    return peer_uid_;
+  }
+#endif
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
+    PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
+  // Process ID of the peer, as returned by the kernel. If the client
+  // disconnects and the socket goes into the kDisconnected state, it
+  // retains the pid of the last peer.
+  //
+  // This is only available on Linux / Android.
+  pid_t peer_pid_linux(bool skip_check_for_testing = false) const {
+    PERFETTO_DCHECK((!is_listening() && peer_pid_ != kInvalidPid) ||
+                    skip_check_for_testing);
+    return peer_pid_;
+  }
+#endif
+
+  // This makes the UnixSocket unusable.
+  UnixSocketRaw ReleaseSocket();
+
+ private:
+  UnixSocket(EventListener*,
+             TaskRunner*,
+             SockFamily,
+             SockType,
+             SockPeerCredMode);
+  UnixSocket(EventListener*,
+             TaskRunner*,
+             ScopedSocketHandle,
+             State,
+             SockFamily,
+             SockType,
+             SockPeerCredMode);
+
+  // Called once by the corresponding public static factory methods.
+  void DoConnect(const std::string& socket_name);
+
+#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+  void ReadPeerCredentialsPosix();
+#endif
+
+  void OnEvent();
+  void NotifyConnectionState(bool success);
+
+  UnixSocketRaw sock_raw_;
+  State state_ = State::kDisconnected;
+  SockPeerCredMode peer_cred_mode_ = SockPeerCredMode::kDefault;
+
+#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+  uid_t peer_uid_ = kInvalidUid;
+#endif
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
+    PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
+  pid_t peer_pid_ = kInvalidPid;
+#endif
+  EventListener* const event_listener_;
+  TaskRunner* const task_runner_;
+  WeakPtrFactory<UnixSocket> weak_ptr_factory_;  // Keep last.
+};
+
+}  // namespace base
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_BASE_UNIX_SOCKET_H_
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/ext/base/unix_socket.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+// gen_amalgamated expanded: #include "perfetto/base/compiler.h"
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+// The include order matters on these three Windows header groups.
+#include <Windows.h>
+
+#include <WS2tcpip.h>
+#include <WinSock2.h>
+
+#include <afunix.h>
+#else
+#include <netdb.h>
+#include <netinet/in.h>
+#include <netinet/tcp.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <unistd.h>
+#endif
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
+#include <sys/ucred.h>
+#endif
+
+#include <algorithm>
+#include <memory>
+
+// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/string_utils.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
+
+namespace perfetto {
+namespace base {
+
+// The CMSG_* macros use NULL instead of nullptr.
+// Note: MSVC doesn't have #pragma GCC diagnostic, hence the if __GNUC__.
+#if defined(__GNUC__) && !PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wzero-as-null-pointer-constant"
+#endif
+
+namespace {
+
+// MSG_NOSIGNAL is not supported on Mac OS X, but in that case the socket is
+// created with SO_NOSIGPIPE (See InitializeSocket()).
+// On Windows this does't apply as signals don't exist.
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+#elif PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
+constexpr int kNoSigPipe = 0;
+#else
+constexpr int kNoSigPipe = MSG_NOSIGNAL;
+#endif
+
+// Android takes an int instead of socklen_t for the control buffer size.
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
+using CBufLenType = size_t;
+#else
+using CBufLenType = socklen_t;
+#endif
+
+// A wrapper around variable-size sockaddr structs.
+// This is solving the following problem: when calling connect() or bind(), the
+// caller needs to take care to allocate the right struct (sockaddr_un for
+// AF_UNIX, sockaddr_in for AF_INET).   Those structs have different sizes and,
+// more importantly, are bigger than the base struct sockaddr.
+struct SockaddrAny {
+  SockaddrAny() : size() {}
+  SockaddrAny(const void* addr, socklen_t sz)
+      : data(new char[static_cast<size_t>(sz)]), size(sz) {
+    memcpy(data.get(), addr, static_cast<size_t>(size));
+  }
+
+  const struct sockaddr* addr() const {
+    return reinterpret_cast<const struct sockaddr*>(data.get());
+  }
+
+  std::unique_ptr<char[]> data;
+  socklen_t size;
+};
+
+inline int GetSockFamily(SockFamily family) {
+  switch (family) {
+    case SockFamily::kUnix:
+      return AF_UNIX;
+    case SockFamily::kInet:
+      return AF_INET;
+    case SockFamily::kInet6:
+      return AF_INET6;
+  }
+  PERFETTO_CHECK(false);  // For GCC.
+}
+
+inline int GetSockType(SockType type) {
+#ifdef SOCK_CLOEXEC
+  constexpr int kSockCloExec = SOCK_CLOEXEC;
+#else
+  constexpr int kSockCloExec = 0;
+#endif
+  switch (type) {
+    case SockType::kStream:
+      return SOCK_STREAM | kSockCloExec;
+    case SockType::kDgram:
+      return SOCK_DGRAM | kSockCloExec;
+    case SockType::kSeqPacket:
+      return SOCK_SEQPACKET | kSockCloExec;
+  }
+  PERFETTO_CHECK(false);  // For GCC.
+}
+
+SockaddrAny MakeSockAddr(SockFamily family, const std::string& socket_name) {
+  switch (family) {
+    case SockFamily::kUnix: {
+      struct sockaddr_un saddr {};
+      const size_t name_len = socket_name.size();
+      if (name_len >= sizeof(saddr.sun_path)) {
+        errno = ENAMETOOLONG;
+        return SockaddrAny();
+      }
+      memcpy(saddr.sun_path, socket_name.data(), name_len);
+      if (saddr.sun_path[0] == '@') {
+        saddr.sun_path[0] = '\0';
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+        // The MSDN blog claims that abstract (non-filesystem based) AF_UNIX
+        // socket are supported, but that doesn't seem true.
+        PERFETTO_ELOG(
+            "Abstract AF_UNIX sockets are not supported on Windows, see "
+            "https://github.com/microsoft/WSL/issues/4240");
+        return SockaddrAny{};
+#endif
+      }
+      saddr.sun_family = AF_UNIX;
+      auto size = static_cast<socklen_t>(
+          __builtin_offsetof(sockaddr_un, sun_path) + name_len + 1);
+      PERFETTO_CHECK(static_cast<size_t>(size) <= sizeof(saddr));
+      return SockaddrAny(&saddr, size);
+    }
+    case SockFamily::kInet: {
+      auto parts = SplitString(socket_name, ":");
+      PERFETTO_CHECK(parts.size() == 2);
+      struct addrinfo* addr_info = nullptr;
+      struct addrinfo hints {};
+      hints.ai_family = AF_INET;
+      PERFETTO_CHECK(getaddrinfo(parts[0].c_str(), parts[1].c_str(), &hints,
+                                 &addr_info) == 0);
+      PERFETTO_CHECK(addr_info->ai_family == AF_INET);
+      SockaddrAny res(addr_info->ai_addr,
+                      static_cast<socklen_t>(addr_info->ai_addrlen));
+      freeaddrinfo(addr_info);
+      return res;
+    }
+    case SockFamily::kInet6: {
+      auto parts = SplitString(socket_name, "]");
+      PERFETTO_CHECK(parts.size() == 2);
+      auto address = SplitString(parts[0], "[");
+      PERFETTO_CHECK(address.size() == 1);
+      auto port = SplitString(parts[1], ":");
+      PERFETTO_CHECK(port.size() == 1);
+      struct addrinfo* addr_info = nullptr;
+      struct addrinfo hints {};
+      hints.ai_family = AF_INET6;
+      PERFETTO_CHECK(getaddrinfo(address[0].c_str(), port[0].c_str(), &hints,
+                                 &addr_info) == 0);
+      PERFETTO_CHECK(addr_info->ai_family == AF_INET6);
+      SockaddrAny res(addr_info->ai_addr,
+                      static_cast<socklen_t>(addr_info->ai_addrlen));
+      freeaddrinfo(addr_info);
+      return res;
+    }
+  }
+  PERFETTO_CHECK(false);  // For GCC.
+}
+
+ScopedSocketHandle CreateSocketHandle(SockFamily family, SockType type) {
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+  static bool init_winsock_once = [] {
+    WSADATA ignored{};
+    return WSAStartup(MAKEWORD(2, 2), &ignored) == 0;
+  }();
+  PERFETTO_CHECK(init_winsock_once);
+#endif
+  return ScopedSocketHandle(
+      socket(GetSockFamily(family), GetSockType(type), 0));
+}
+
+}  // namespace
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+int CloseSocket(SocketHandle s) {
+  return ::closesocket(s);
+}
+#endif
+
+// +-----------------------+
+// | UnixSocketRaw methods |
+// +-----------------------+
+
+#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+// static
+void UnixSocketRaw::ShiftMsgHdrPosix(size_t n, struct msghdr* msg) {
+  using LenType = decltype(msg->msg_iovlen);  // Mac and Linux don't agree.
+  for (LenType i = 0; i < msg->msg_iovlen; ++i) {
+    struct iovec* vec = &msg->msg_iov[i];
+    if (n < vec->iov_len) {
+      // We sent a part of this iovec.
+      vec->iov_base = reinterpret_cast<char*>(vec->iov_base) + n;
+      vec->iov_len -= n;
+      msg->msg_iov = vec;
+      msg->msg_iovlen -= i;
+      return;
+    }
+    // We sent the whole iovec.
+    n -= vec->iov_len;
+  }
+  // We sent all the iovecs.
+  PERFETTO_CHECK(n == 0);
+  msg->msg_iovlen = 0;
+  msg->msg_iov = nullptr;
+}
+
+// static
+std::pair<UnixSocketRaw, UnixSocketRaw> UnixSocketRaw::CreatePairPosix(
+    SockFamily family,
+    SockType type) {
+  int fds[2];
+  if (socketpair(GetSockFamily(family), GetSockType(type), 0, fds) != 0)
+    return std::make_pair(UnixSocketRaw(), UnixSocketRaw());
+
+  return std::make_pair(UnixSocketRaw(ScopedFile(fds[0]), family, type),
+                        UnixSocketRaw(ScopedFile(fds[1]), family, type));
+}
+#endif
+
+// static
+UnixSocketRaw UnixSocketRaw::CreateMayFail(SockFamily family, SockType type) {
+  auto fd = CreateSocketHandle(family, type);
+  if (!fd)
+    return UnixSocketRaw();
+  return UnixSocketRaw(std::move(fd), family, type);
+}
+
+UnixSocketRaw::UnixSocketRaw() = default;
+
+UnixSocketRaw::UnixSocketRaw(SockFamily family, SockType type)
+    : UnixSocketRaw(CreateSocketHandle(family, type), family, type) {}
+
+UnixSocketRaw::UnixSocketRaw(ScopedSocketHandle fd,
+                             SockFamily family,
+                             SockType type)
+    : fd_(std::move(fd)), family_(family), type_(type) {
+  PERFETTO_CHECK(fd_);
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
+  const int no_sigpipe = 1;
+  setsockopt(*fd_, SOL_SOCKET, SO_NOSIGPIPE, &no_sigpipe, sizeof(no_sigpipe));
+#endif
+
+  if (family == SockFamily::kInet || family == SockFamily::kInet6) {
+    int flag = 1;
+    // The reinterpret_cast<const char*> is needed for Windows, where the 4th
+    // arg is a const char* (on other POSIX system is a const void*).
+    PERFETTO_CHECK(!setsockopt(*fd_, SOL_SOCKET, SO_REUSEADDR,
+                               reinterpret_cast<const char*>(&flag),
+                               sizeof(flag)));
+    flag = 1;
+    // Disable Nagle's algorithm, optimize for low-latency.
+    // See https://github.com/google/perfetto/issues/70.
+    setsockopt(*fd_, IPPROTO_TCP, TCP_NODELAY,
+               reinterpret_cast<const char*>(&flag), sizeof(flag));
+  }
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+  // We use one event handle for all socket events, to stay consistent to what
+  // we do on UNIX with the base::TaskRunner's poll().
+  event_handle_.reset(WSACreateEvent());
+  PERFETTO_CHECK(event_handle_);
+#else
+  // There is no reason why a socket should outlive the process in case of
+  // exec() by default, this is just working around a broken unix design.
+  int fcntl_res = fcntl(*fd_, F_SETFD, FD_CLOEXEC);
+  PERFETTO_CHECK(fcntl_res == 0);
+#endif
+}
+
+void UnixSocketRaw::SetBlocking(bool is_blocking) {
+  PERFETTO_DCHECK(fd_);
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+  unsigned long flag = is_blocking ? 0 : 1;  // FIONBIO has reverse logic.
+  if (is_blocking) {
+    // When switching between non-blocking -> blocking mode, we need to reset
+    // the event handle registration, otherwise the call will fail.
+    PERFETTO_CHECK(WSAEventSelect(*fd_, *event_handle_, 0) == 0);
+  }
+  PERFETTO_CHECK(ioctlsocket(*fd_, static_cast<long>(FIONBIO), &flag) == 0);
+  if (!is_blocking) {
+    PERFETTO_CHECK(
+        WSAEventSelect(*fd_, *event_handle_,
+                       FD_ACCEPT | FD_CONNECT | FD_READ | FD_CLOSE) == 0);
+  }
+#else
+  int flags = fcntl(*fd_, F_GETFL, 0);
+  if (!is_blocking) {
+    flags |= O_NONBLOCK;
+  } else {
+    flags &= ~static_cast<int>(O_NONBLOCK);
+  }
+  int fcntl_res = fcntl(*fd_, F_SETFL, flags);
+  PERFETTO_CHECK(fcntl_res == 0);
+#endif
+}
+
+void UnixSocketRaw::RetainOnExec() {
+#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+  PERFETTO_DCHECK(fd_);
+  int flags = fcntl(*fd_, F_GETFD, 0);
+  flags &= ~static_cast<int>(FD_CLOEXEC);
+  int fcntl_res = fcntl(*fd_, F_SETFD, flags);
+  PERFETTO_CHECK(fcntl_res == 0);
+#endif
+}
+
+void UnixSocketRaw::DcheckIsBlocking(bool expected) const {
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+  ignore_result(expected);
+#else
+  PERFETTO_DCHECK(fd_);
+  bool is_blocking = (fcntl(*fd_, F_GETFL, 0) & O_NONBLOCK) == 0;
+  PERFETTO_DCHECK(is_blocking == expected);
+#endif
+}
+
+bool UnixSocketRaw::Bind(const std::string& socket_name) {
+  PERFETTO_DCHECK(fd_);
+  SockaddrAny addr = MakeSockAddr(family_, socket_name);
+  if (addr.size == 0)
+    return false;
+
+  if (bind(*fd_, addr.addr(), addr.size)) {
+    PERFETTO_DPLOG("bind(%s)", socket_name.c_str());
+    return false;
+  }
+
+  return true;
+}
+
+bool UnixSocketRaw::Listen() {
+  PERFETTO_DCHECK(fd_);
+  PERFETTO_DCHECK(type_ == SockType::kStream || type_ == SockType::kSeqPacket);
+  return listen(*fd_, SOMAXCONN) == 0;
+}
+
+bool UnixSocketRaw::Connect(const std::string& socket_name) {
+  PERFETTO_DCHECK(fd_);
+  SockaddrAny addr = MakeSockAddr(family_, socket_name);
+  if (addr.size == 0)
+    return false;
+
+  int res = PERFETTO_EINTR(connect(*fd_, addr.addr(), addr.size));
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+  bool continue_async = WSAGetLastError() == WSAEWOULDBLOCK;
+#else
+  bool continue_async = errno == EINPROGRESS;
+#endif
+  if (res && !continue_async)
+    return false;
+
+  return true;
+}
+
+void UnixSocketRaw::Shutdown() {
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+  // Somebody felt very strongly about the naming of this constant.
+  shutdown(*fd_, SD_BOTH);
+#else
+  shutdown(*fd_, SHUT_RDWR);
+#endif
+  fd_.reset();
+}
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+
+ssize_t UnixSocketRaw::Send(const void* msg,
+                            size_t len,
+                            const int* /*send_fds*/,
+                            size_t num_fds) {
+  PERFETTO_DCHECK(num_fds == 0);
+  return sendto(*fd_, static_cast<const char*>(msg), static_cast<int>(len), 0,
+                nullptr, 0);
+}
+
+ssize_t UnixSocketRaw::Receive(void* msg,
+                               size_t len,
+                               ScopedFile* /*fd_vec*/,
+                               size_t /*max_files*/) {
+  return recv(*fd_, static_cast<char*>(msg), static_cast<int>(len), 0);
+}
+
+#else
+// For the interested reader, Linux kernel dive to verify this is not only a
+// theoretical possibility: sock_stream_sendmsg, if sock_alloc_send_pskb returns
+// NULL [1] (which it does when it gets interrupted [2]), returns early with the
+// amount of bytes already sent.
+//
+// [1]:
+// https://elixir.bootlin.com/linux/v4.18.10/source/net/unix/af_unix.c#L1872
+// [2]: https://elixir.bootlin.com/linux/v4.18.10/source/net/core/sock.c#L2101
+ssize_t UnixSocketRaw::SendMsgAllPosix(struct msghdr* msg) {
+  // This does not make sense on non-blocking sockets.
+  PERFETTO_DCHECK(fd_);
+
+  ssize_t total_sent = 0;
+  while (msg->msg_iov) {
+    ssize_t sent = PERFETTO_EINTR(sendmsg(*fd_, msg, kNoSigPipe));
+    if (sent <= 0) {
+      if (sent == -1 && IsAgain(errno))
+        return total_sent;
+      return sent;
+    }
+    total_sent += sent;
+    ShiftMsgHdrPosix(static_cast<size_t>(sent), msg);
+    // Only send the ancillary data with the first sendmsg call.
+    msg->msg_control = nullptr;
+    msg->msg_controllen = 0;
+  }
+  return total_sent;
+}
+
+ssize_t UnixSocketRaw::Send(const void* msg,
+                            size_t len,
+                            const int* send_fds,
+                            size_t num_fds) {
+  PERFETTO_DCHECK(fd_);
+  msghdr msg_hdr = {};
+  iovec iov = {const_cast<void*>(msg), len};
+  msg_hdr.msg_iov = &iov;
+  msg_hdr.msg_iovlen = 1;
+  alignas(cmsghdr) char control_buf[256];
+
+  if (num_fds > 0) {
+    const auto raw_ctl_data_sz = num_fds * sizeof(int);
+    const CBufLenType control_buf_len =
+        static_cast<CBufLenType>(CMSG_SPACE(raw_ctl_data_sz));
+    PERFETTO_CHECK(control_buf_len <= sizeof(control_buf));
+    memset(control_buf, 0, sizeof(control_buf));
+    msg_hdr.msg_control = control_buf;
+    msg_hdr.msg_controllen = control_buf_len;  // used by CMSG_FIRSTHDR
+    struct cmsghdr* cmsg = CMSG_FIRSTHDR(&msg_hdr);
+    cmsg->cmsg_level = SOL_SOCKET;
+    cmsg->cmsg_type = SCM_RIGHTS;
+    cmsg->cmsg_len = static_cast<CBufLenType>(CMSG_LEN(raw_ctl_data_sz));
+    memcpy(CMSG_DATA(cmsg), send_fds, num_fds * sizeof(int));
+    // note: if we were to send multiple cmsghdr structures, then
+    // msg_hdr.msg_controllen would need to be adjusted, see "man 3 cmsg".
+  }
+
+  return SendMsgAllPosix(&msg_hdr);
+}
+
+ssize_t UnixSocketRaw::Receive(void* msg,
+                               size_t len,
+                               ScopedFile* fd_vec,
+                               size_t max_files) {
+  PERFETTO_DCHECK(fd_);
+  msghdr msg_hdr = {};
+  iovec iov = {msg, len};
+  msg_hdr.msg_iov = &iov;
+  msg_hdr.msg_iovlen = 1;
+  alignas(cmsghdr) char control_buf[256];
+
+  if (max_files > 0) {
+    msg_hdr.msg_control = control_buf;
+    msg_hdr.msg_controllen =
+        static_cast<CBufLenType>(CMSG_SPACE(max_files * sizeof(int)));
+    PERFETTO_CHECK(msg_hdr.msg_controllen <= sizeof(control_buf));
+  }
+  const ssize_t sz = PERFETTO_EINTR(recvmsg(*fd_, &msg_hdr, 0));
+  if (sz <= 0) {
+    return sz;
+  }
+  PERFETTO_CHECK(static_cast<size_t>(sz) <= len);
+
+  int* fds = nullptr;
+  uint32_t fds_len = 0;
+
+  if (max_files > 0) {
+    for (cmsghdr* cmsg = CMSG_FIRSTHDR(&msg_hdr); cmsg;
+         cmsg = CMSG_NXTHDR(&msg_hdr, cmsg)) {
+      const size_t payload_len = cmsg->cmsg_len - CMSG_LEN(0);
+      if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
+        PERFETTO_DCHECK(payload_len % sizeof(int) == 0u);
+        PERFETTO_CHECK(fds == nullptr);
+        fds = reinterpret_cast<int*>(CMSG_DATA(cmsg));
+        fds_len = static_cast<uint32_t>(payload_len / sizeof(int));
+      }
+    }
+  }
+
+  if (msg_hdr.msg_flags & MSG_TRUNC || msg_hdr.msg_flags & MSG_CTRUNC) {
+    for (size_t i = 0; fds && i < fds_len; ++i)
+      close(fds[i]);
+    PERFETTO_ELOG(
+        "Socket message truncated. This might be due to a SELinux denial on "
+        "fd:use.");
+    errno = EMSGSIZE;
+    return -1;
+  }
+
+  for (size_t i = 0; fds && i < fds_len; ++i) {
+    if (i < max_files)
+      fd_vec[i].reset(fds[i]);
+    else
+      close(fds[i]);
+  }
+
+  return sz;
+}
+#endif  // OS_WIN
+
+bool UnixSocketRaw::SetTxTimeout(uint32_t timeout_ms) {
+  PERFETTO_DCHECK(fd_);
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+  DWORD timeout = timeout_ms;
+#else
+  struct timeval timeout {};
+  uint32_t timeout_sec = timeout_ms / 1000;
+  timeout.tv_sec = static_cast<decltype(timeout.tv_sec)>(timeout_sec);
+  timeout.tv_usec = static_cast<decltype(timeout.tv_usec)>(
+      (timeout_ms - (timeout_sec * 1000)) * 1000);
+#endif
+  return setsockopt(*fd_, SOL_SOCKET, SO_SNDTIMEO,
+                    reinterpret_cast<const char*>(&timeout),
+                    sizeof(timeout)) == 0;
+}
+
+bool UnixSocketRaw::SetRxTimeout(uint32_t timeout_ms) {
+  PERFETTO_DCHECK(fd_);
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+  DWORD timeout = timeout_ms;
+#else
+  struct timeval timeout {};
+  uint32_t timeout_sec = timeout_ms / 1000;
+  timeout.tv_sec = static_cast<decltype(timeout.tv_sec)>(timeout_sec);
+  timeout.tv_usec = static_cast<decltype(timeout.tv_usec)>(
+      (timeout_ms - (timeout_sec * 1000)) * 1000);
+#endif
+  return setsockopt(*fd_, SOL_SOCKET, SO_RCVTIMEO,
+                    reinterpret_cast<const char*>(&timeout),
+                    sizeof(timeout)) == 0;
+}
+
+#if defined(__GNUC__) && !PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
+#pragma GCC diagnostic pop
+#endif
+
+// +--------------------+
+// | UnixSocket methods |
+// +--------------------+
+
+// TODO(primiano): Add ThreadChecker to methods of this class.
+
+// static
+std::unique_ptr<UnixSocket> UnixSocket::Listen(const std::string& socket_name,
+                                               EventListener* event_listener,
+                                               TaskRunner* task_runner,
+                                               SockFamily sock_family,
+                                               SockType sock_type) {
+  auto sock_raw = UnixSocketRaw::CreateMayFail(sock_family, sock_type);
+  if (!sock_raw || !sock_raw.Bind(socket_name))
+    return nullptr;
+
+  // Forward the call to the Listen() overload below.
+  return Listen(sock_raw.ReleaseFd(), event_listener, task_runner, sock_family,
+                sock_type);
+}
+
+// static
+std::unique_ptr<UnixSocket> UnixSocket::Listen(ScopedSocketHandle fd,
+                                               EventListener* event_listener,
+                                               TaskRunner* task_runner,
+                                               SockFamily sock_family,
+                                               SockType sock_type) {
+  return std::unique_ptr<UnixSocket>(new UnixSocket(
+      event_listener, task_runner, std::move(fd), State::kListening,
+      sock_family, sock_type, SockPeerCredMode::kDefault));
+}
+
+// static
+std::unique_ptr<UnixSocket> UnixSocket::Connect(
+    const std::string& socket_name,
+    EventListener* event_listener,
+    TaskRunner* task_runner,
+    SockFamily sock_family,
+    SockType sock_type,
+    SockPeerCredMode peer_cred_mode) {
+  std::unique_ptr<UnixSocket> sock(new UnixSocket(
+      event_listener, task_runner, sock_family, sock_type, peer_cred_mode));
+  sock->DoConnect(socket_name);
+  return sock;
+}
+
+// static
+std::unique_ptr<UnixSocket> UnixSocket::AdoptConnected(
+    ScopedSocketHandle fd,
+    EventListener* event_listener,
+    TaskRunner* task_runner,
+    SockFamily sock_family,
+    SockType sock_type,
+    SockPeerCredMode peer_cred_mode) {
+  return std::unique_ptr<UnixSocket>(new UnixSocket(
+      event_listener, task_runner, std::move(fd), State::kConnected,
+      sock_family, sock_type, peer_cred_mode));
+}
+
+UnixSocket::UnixSocket(EventListener* event_listener,
+                       TaskRunner* task_runner,
+                       SockFamily sock_family,
+                       SockType sock_type,
+                       SockPeerCredMode peer_cred_mode)
+    : UnixSocket(event_listener,
+                 task_runner,
+                 ScopedSocketHandle(),
+                 State::kDisconnected,
+                 sock_family,
+                 sock_type,
+                 peer_cred_mode) {}
+
+UnixSocket::UnixSocket(EventListener* event_listener,
+                       TaskRunner* task_runner,
+                       ScopedSocketHandle adopt_fd,
+                       State adopt_state,
+                       SockFamily sock_family,
+                       SockType sock_type,
+                       SockPeerCredMode peer_cred_mode)
+    : peer_cred_mode_(peer_cred_mode),
+      event_listener_(event_listener),
+      task_runner_(task_runner),
+      weak_ptr_factory_(this) {
+  state_ = State::kDisconnected;
+  if (adopt_state == State::kDisconnected) {
+    PERFETTO_DCHECK(!adopt_fd);
+    sock_raw_ = UnixSocketRaw::CreateMayFail(sock_family, sock_type);
+    if (!sock_raw_)
+      return;
+  } else if (adopt_state == State::kConnected) {
+    PERFETTO_DCHECK(adopt_fd);
+    sock_raw_ = UnixSocketRaw(std::move(adopt_fd), sock_family, sock_type);
+    state_ = State::kConnected;
+#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+    if (peer_cred_mode_ == SockPeerCredMode::kReadOnConnect)
+      ReadPeerCredentialsPosix();
+#endif
+  } else if (adopt_state == State::kListening) {
+    // We get here from Listen().
+
+    // |adopt_fd| might genuinely be invalid if the bind() failed.
+    if (!adopt_fd)
+      return;
+
+    sock_raw_ = UnixSocketRaw(std::move(adopt_fd), sock_family, sock_type);
+    if (!sock_raw_.Listen()) {
+      PERFETTO_DPLOG("listen() failed");
+      return;
+    }
+    state_ = State::kListening;
+  } else {
+    PERFETTO_FATAL("Unexpected adopt_state");  // Unfeasible.
+  }
+
+  PERFETTO_CHECK(sock_raw_);
+
+  sock_raw_.SetBlocking(false);
+
+  WeakPtr<UnixSocket> weak_ptr = weak_ptr_factory_.GetWeakPtr();
+
+  task_runner_->AddFileDescriptorWatch(sock_raw_.watch_handle(), [weak_ptr] {
+    if (weak_ptr)
+      weak_ptr->OnEvent();
+  });
+}
+
+UnixSocket::~UnixSocket() {
+  // The implicit dtor of |weak_ptr_factory_| will no-op pending callbacks.
+  Shutdown(true);
+}
+
+UnixSocketRaw UnixSocket::ReleaseSocket() {
+  // This will invalidate any pending calls to OnEvent.
+  state_ = State::kDisconnected;
+  if (sock_raw_)
+    task_runner_->RemoveFileDescriptorWatch(sock_raw_.watch_handle());
+
+  return std::move(sock_raw_);
+}
+
+// Called only by the Connect() static constructor.
+void UnixSocket::DoConnect(const std::string& socket_name) {
+  PERFETTO_DCHECK(state_ == State::kDisconnected);
+
+  // This is the only thing that can gracefully fail in the ctor.
+  if (!sock_raw_)
+    return NotifyConnectionState(false);
+
+  if (!sock_raw_.Connect(socket_name))
+    return NotifyConnectionState(false);
+
+  // At this point either connect() succeeded or started asynchronously
+  // (errno = EINPROGRESS).
+  state_ = State::kConnecting;
+
+  // Even if the socket is non-blocking, connecting to a UNIX socket can be
+  // acknowledged straight away rather than returning EINPROGRESS.
+  // The decision here is to deal with the two cases uniformly, at the cost of
+  // delaying the straight-away-connect() case by one task, to avoid depending
+  // on implementation details of UNIX socket on the various OSes.
+  // Posting the OnEvent() below emulates a wakeup of the FD watch. OnEvent(),
+  // which knows how to deal with spurious wakeups, will poll the SO_ERROR and
+  // evolve, if necessary, the state into either kConnected or kDisconnected.
+  WeakPtr<UnixSocket> weak_ptr = weak_ptr_factory_.GetWeakPtr();
+  task_runner_->PostTask([weak_ptr] {
+    if (weak_ptr)
+      weak_ptr->OnEvent();
+  });
+}
+
+#if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+void UnixSocket::ReadPeerCredentialsPosix() {
+  // Peer credentials are supported only on AF_UNIX sockets.
+  if (sock_raw_.family() != SockFamily::kUnix)
+    return;
+  PERFETTO_CHECK(peer_cred_mode_ != SockPeerCredMode::kIgnore);
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
+    PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
+  struct ucred user_cred;
+  socklen_t len = sizeof(user_cred);
+  int fd = sock_raw_.fd();
+  int res = getsockopt(fd, SOL_SOCKET, SO_PEERCRED, &user_cred, &len);
+  PERFETTO_CHECK(res == 0);
+  peer_uid_ = user_cred.uid;
+  peer_pid_ = user_cred.pid;
+#elif PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
+  struct xucred user_cred;
+  socklen_t len = sizeof(user_cred);
+  int res = getsockopt(sock_raw_.fd(), 0, LOCAL_PEERCRED, &user_cred, &len);
+  PERFETTO_CHECK(res == 0 && user_cred.cr_version == XUCRED_VERSION);
+  peer_uid_ = static_cast<uid_t>(user_cred.cr_uid);
+  // There is no pid in the LOCAL_PEERCREDS for MacOS / FreeBSD.
+#endif
+}
+#endif  // !OS_WIN
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+void UnixSocket::OnEvent() {
+  WSANETWORKEVENTS evts{};
+  PERFETTO_CHECK(WSAEnumNetworkEvents(sock_raw_.fd(), sock_raw_.watch_handle(),
+                                      &evts) == 0);
+  if (state_ == State::kDisconnected)
+    return;  // Some spurious event, typically queued just before Shutdown().
+
+  if (state_ == State::kConnecting && (evts.lNetworkEvents & FD_CONNECT)) {
+    PERFETTO_DCHECK(sock_raw_);
+    int err = evts.iErrorCode[FD_CONNECT_BIT];
+    if (err) {
+      PERFETTO_DPLOG("Connection error: %d", err);
+      Shutdown(false);
+      event_listener_->OnConnect(this, false /* connected */);
+      return;
+    }
+
+    // kReadOnConnect is not supported on Windows.
+    PERFETTO_DCHECK(peer_cred_mode_ != SockPeerCredMode::kReadOnConnect);
+    state_ = State::kConnected;
+    event_listener_->OnConnect(this, true /* connected */);
+  }
+
+  // This is deliberately NOT an else-if. When a client socket connects and
+  // there is already data queued, the following will happen within the same
+  // OnEvent() call:
+  // 1. The block above will transition kConnecting -> kConnected.
+  // 2. This block will cause an OnDataAvailable() call.
+  // Unlike UNIX, where poll() keeps signalling the event until the client
+  // does a recv(), Windows is more picky and stops signalling the event until
+  // the next call to recv() is made. In other words, in Windows we cannot
+  // miss an OnDataAvailable() call or the event pump will stop.
+  if (state_ == State::kConnected) {
+    if (evts.lNetworkEvents & FD_READ) {
+      event_listener_->OnDataAvailable(this);
+      // TODO(primiano): I am very conflicted here. Because of the behavior
+      // described above, if the event listener doesn't do a Recv() call in
+      // the OnDataAvailable() callback, WinSock won't notify the event ever
+      // again. On one side, I don't see any reason why a client should decide
+      // to not do a Recv() in OnDataAvailable. On the other side, the
+      // behavior here diverges from UNIX, where OnDataAvailable() would be
+      // re-posted immediately. In both cases, not doing a Recv() in
+      // OnDataAvailable, leads to something bad (getting stuck on Windows,
+      // getting in a hot loop on Linux), so doesn't feel we should worry too
+      // much about this. If we wanted to keep the behavrior consistent, here
+      // we should do something like: `if (sock_raw_)
+      // sock_raw_.SetBlocking(false)` (Note that the socket might be closed
+      // by the time we come back here, hence the if part).
+      return;
+    }
+    // Could read EOF and disconnect here.
+    if (evts.lNetworkEvents & FD_CLOSE) {
+      Shutdown(true);
+      return;
+    }
+  }
+
+  // New incoming connection.
+  if (state_ == State::kListening && (evts.lNetworkEvents & FD_ACCEPT)) {
+    // There could be more than one incoming connection behind each FD watch
+    // notification. Drain'em all.
+    for (;;) {
+      // Note: right now we don't need the remote endpoint, hence we pass
+      // nullptr to |addr| and |addrlen|. If we ever need to do so, be
+      // extremely careful. Windows' WinSock API will happily write more than
+      // |addrlen| (hence corrupt the stack) if the |addr| argument passed is
+      // not big enough (e.g. passing a struct sockaddr_in to a AF_UNIX
+      // socket, where sizeof(sockaddr_un) is >> sizef(sockaddr_in)). It seems
+      // a Windows / CRT bug in the AF_UNIX implementation.
+      ScopedSocketHandle new_fd(accept(sock_raw_.fd(), nullptr, nullptr));
+      if (!new_fd)
+        return;
+      std::unique_ptr<UnixSocket> new_sock(new UnixSocket(
+          event_listener_, task_runner_, std::move(new_fd), State::kConnected,
+          sock_raw_.family(), sock_raw_.type(), peer_cred_mode_));
+      event_listener_->OnNewIncomingConnection(this, std::move(new_sock));
+    }
+  }
+}
+#else
+void UnixSocket::OnEvent() {
+  if (state_ == State::kDisconnected)
+    return;  // Some spurious event, typically queued just before Shutdown().
+
+  if (state_ == State::kConnected)
+    return event_listener_->OnDataAvailable(this);
+
+  if (state_ == State::kConnecting) {
+    PERFETTO_DCHECK(sock_raw_);
+    int sock_err = EINVAL;
+    socklen_t err_len = sizeof(sock_err);
+    int res =
+        getsockopt(sock_raw_.fd(), SOL_SOCKET, SO_ERROR, &sock_err, &err_len);
+
+    if (res == 0 && sock_err == EINPROGRESS)
+      return;  // Not connected yet, just a spurious FD watch wakeup.
+    if (res == 0 && sock_err == 0) {
+      if (peer_cred_mode_ == SockPeerCredMode::kReadOnConnect)
+        ReadPeerCredentialsPosix();
+      state_ = State::kConnected;
+      return event_listener_->OnConnect(this, true /* connected */);
+    }
+    PERFETTO_DLOG("Connection error: %s", strerror(sock_err));
+    Shutdown(false);
+    return event_listener_->OnConnect(this, false /* connected */);
+  }
+
+  // New incoming connection.
+  if (state_ == State::kListening) {
+    // There could be more than one incoming connection behind each FD watch
+    // notification. Drain'em all.
+    for (;;) {
+      ScopedFile new_fd(
+          PERFETTO_EINTR(accept(sock_raw_.fd(), nullptr, nullptr)));
+      if (!new_fd)
+        return;
+      std::unique_ptr<UnixSocket> new_sock(new UnixSocket(
+          event_listener_, task_runner_, std::move(new_fd), State::kConnected,
+          sock_raw_.family(), sock_raw_.type(), peer_cred_mode_));
+      event_listener_->OnNewIncomingConnection(this, std::move(new_sock));
+    }
+  }
+}
+#endif
+
+bool UnixSocket::Send(const void* msg,
+                      size_t len,
+                      const int* send_fds,
+                      size_t num_fds) {
+  if (state_ != State::kConnected) {
+    errno = ENOTCONN;
+    return false;
+  }
+
+  sock_raw_.SetBlocking(true);
+  const ssize_t sz = sock_raw_.Send(msg, len, send_fds, num_fds);
+  sock_raw_.SetBlocking(false);
+
+  if (sz == static_cast<ssize_t>(len)) {
+    return true;
+  }
+
+  // If we ever decide to support non-blocking sends again, here we should
+  // watch for both EAGAIN and EWOULDBLOCK (see base::IsAgain()).
+
+  // If sendmsg() succeeds but the returned size is >= 0 and < |len| it means
+  // that the endpoint disconnected in the middle of the read, and we managed
+  // to send only a portion of the buffer.
+  // If sz < 0, either the other endpoint disconnected (ECONNRESET) or some
+  // other error happened. In both cases we should just give up.
+  PERFETTO_DPLOG("sendmsg() failed");
+  Shutdown(true);
+  return false;
+}
+
+void UnixSocket::Shutdown(bool notify) {
+  WeakPtr<UnixSocket> weak_ptr = weak_ptr_factory_.GetWeakPtr();
+  if (notify) {
+    if (state_ == State::kConnected) {
+      task_runner_->PostTask([weak_ptr] {
+        if (weak_ptr)
+          weak_ptr->event_listener_->OnDisconnect(weak_ptr.get());
+      });
+    } else if (state_ == State::kConnecting) {
+      task_runner_->PostTask([weak_ptr] {
+        if (weak_ptr)
+          weak_ptr->event_listener_->OnConnect(weak_ptr.get(), false);
+      });
+    }
+  }
+
+  if (sock_raw_) {
+    task_runner_->RemoveFileDescriptorWatch(sock_raw_.watch_handle());
+    sock_raw_.Shutdown();
+  }
+  state_ = State::kDisconnected;
+}
+
+size_t UnixSocket::Receive(void* msg,
+                           size_t len,
+                           ScopedFile* fd_vec,
+                           size_t max_files) {
+  if (state_ != State::kConnected)
+    return 0;
+
+  const ssize_t sz = sock_raw_.Receive(msg, len, fd_vec, max_files);
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+  bool async_would_block = WSAGetLastError() == WSAEWOULDBLOCK;
+#else
+  bool async_would_block = IsAgain(errno);
+#endif
+  if (sz < 0 && async_would_block)
+    return 0;
+
+  if (sz <= 0) {
+    Shutdown(true);
+    return 0;
+  }
+  PERFETTO_CHECK(static_cast<size_t>(sz) <= len);
+  return static_cast<size_t>(sz);
+}
+
+std::string UnixSocket::ReceiveString(size_t max_length) {
+  std::unique_ptr<char[]> buf(new char[max_length + 1]);
+  size_t rsize = Receive(buf.get(), max_length);
+  PERFETTO_CHECK(rsize <= max_length);
+  buf[rsize] = '\0';
+  return std::string(buf.get());
+}
+
+void UnixSocket::NotifyConnectionState(bool success) {
+  if (!success)
+    Shutdown(false);
+
+  WeakPtr<UnixSocket> weak_ptr = weak_ptr_factory_.GetWeakPtr();
+  task_runner_->PostTask([weak_ptr, success] {
+    if (weak_ptr)
+      weak_ptr->event_listener_->OnConnect(weak_ptr.get(), success);
+  });
+}
+
+UnixSocket::EventListener::~EventListener() {}
+void UnixSocket::EventListener::OnNewIncomingConnection(
+    UnixSocket*,
+    std::unique_ptr<UnixSocket>) {}
+void UnixSocket::EventListener::OnConnect(UnixSocket*, bool) {}
+void UnixSocket::EventListener::OnDisconnect(UnixSocket*) {}
+void UnixSocket::EventListener::OnDataAvailable(UnixSocket*) {}
+
+}  // namespace base
+}  // namespace perfetto
+// gen_amalgamated begin source: src/ipc/buffered_frame_deserializer.cc
+// gen_amalgamated begin header: src/ipc/buffered_frame_deserializer.h
+// gen_amalgamated begin header: include/perfetto/ext/ipc/basic_types.h
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_IPC_BASIC_TYPES_H_
+#define INCLUDE_PERFETTO_EXT_IPC_BASIC_TYPES_H_
+
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/types.h>
+
+// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+
+namespace perfetto {
+namespace ipc {
+
+using ProtoMessage = ::protozero::CppMessageObj;
+using ServiceID = uint32_t;
+using MethodID = uint32_t;
+using ClientID = uint64_t;
+using RequestID = uint64_t;
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+// AF_UNIX on Windows is supported only on Windows 10 from build 17063.
+// Also it doesn't bring major advantages compared to a TCP socket.
+// See go/perfetto-win .
+constexpr bool kUseTCPSocket = true;
+#else
+// On Android, Linux, Mac use a AF_UNIX socket.
+constexpr bool kUseTCPSocket = false;
+#endif
+
+// This determines the maximum size allowed for an IPC message. Trying to send
+// or receive a larger message will hit DCHECK(s) and auto-disconnect.
+constexpr size_t kIPCBufferSize = 128 * 1024;
+
+constexpr uid_t kInvalidUid = static_cast<uid_t>(-1);
+
+}  // namespace ipc
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_IPC_BASIC_TYPES_H_
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SRC_IPC_BUFFERED_FRAME_DESERIALIZER_H_
+#define SRC_IPC_BUFFERED_FRAME_DESERIALIZER_H_
+
+#include <stddef.h>
+
+#include <list>
+#include <memory>
+
+// gen_amalgamated expanded: #include "perfetto/ext/base/paged_memory.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
+// gen_amalgamated expanded: #include "perfetto/ext/ipc/basic_types.h"
+
+namespace perfetto {
+
+namespace protos {
+namespace gen {
+class IPCFrame;
+}  // namespace gen
+}  // namespace protos
+
+namespace ipc {
+
+using Frame = ::perfetto::protos::gen::IPCFrame;
+
+// Deserializes incoming frames, taking care of buffering and tokenization.
+// Used by both host and client to decode incoming frames.
+//
+// Which problem does it solve?
+// ----------------------------
+// The wire protocol is as follows:
+// [32-bit frame size][proto-encoded Frame], e.g:
+// [06 00 00 00][00 11 22 33 44 55 66]
+// [02 00 00 00][AA BB]
+// [04 00 00 00][CC DD EE FF]
+// However, given that the socket works in SOCK_STREAM mode, the recv() calls
+// might see the following:
+// 06 00 00
+// 00 00 11 22 33 44 55
+// 66 02 00 00 00 ...
+// This class takes care of buffering efficiently the data received, without
+// making any assumption on how the incoming data will be chunked by the socket.
+// For instance, it is possible that a recv() doesn't produce any frame (because
+// it received only a part of the frame) or produces more than one frame.
+//
+// Usage
+// -----
+// Both host and client use this as follows:
+//
+// auto buf = rpc_frame_decoder.BeginReceive();
+// size_t rsize = socket.recv(buf.first, buf.second);
+// rpc_frame_decoder.EndReceive(rsize);
+// while (Frame frame = rpc_frame_decoder.PopNextFrame()) {
+//   ... process |frame|
+// }
+//
+// Design goals:
+// -------------
+// - Optimize for the realistic case of each recv() receiving one or more
+//   whole frames. In this case no memmove is performed.
+// - Guarantee that frames lay in a virtually contiguous memory area.
+//   This allows to use the protobuf-lite deserialization API (scattered
+//   deserialization is supported only by libprotobuf-full).
+// - Put a hard boundary to the size of the incoming buffer. This is to prevent
+//   that a malicious sends an abnormally large frame and OOMs us.
+// - Simplicity: just use a linear mmap region. No reallocations or scattering.
+//   Takes care of madvise()-ing unused memory.
+
+class BufferedFrameDeserializer {
+ public:
+  struct ReceiveBuffer {
+    char* data;
+    size_t size;
+  };
+
+  // |max_capacity| is overridable only for tests.
+  explicit BufferedFrameDeserializer(size_t max_capacity = kIPCBufferSize);
+  ~BufferedFrameDeserializer();
+
+  // This function doesn't really belong here as it does Serialization, unlike
+  // the rest of this class. However it is so small and has so many dependencies
+  // in common that doesn't justify having its own class.
+  static std::string Serialize(const Frame&);
+
+  // Returns a buffer that can be passed to recv(). The buffer is deliberately
+  // not initialized.
+  ReceiveBuffer BeginReceive();
+
+  // Must be called soon after BeginReceive().
+  // |recv_size| is the number of valid bytes that have been written into the
+  // buffer previously returned by BeginReceive() (the return value of recv()).
+  // Returns false if a header > |max_capacity| is received, in which case the
+  // caller is expected to shutdown the socket and terminate the ipc.
+  bool EndReceive(size_t recv_size) PERFETTO_WARN_UNUSED_RESULT;
+
+  // Decodes and returns the next decoded frame in the buffer if any, nullptr
+  // if no further frames have been decoded.
+  std::unique_ptr<Frame> PopNextFrame();
+
+  size_t capacity() const { return capacity_; }
+  size_t size() const { return size_; }
+
+ private:
+  BufferedFrameDeserializer(const BufferedFrameDeserializer&) = delete;
+  BufferedFrameDeserializer& operator=(const BufferedFrameDeserializer&) =
+      delete;
+
+  // If a valid frame is decoded it is added to |decoded_frames_|.
+  void DecodeFrame(const char*, size_t);
+
+  char* buf() { return reinterpret_cast<char*>(buf_.Get()); }
+
+  base::PagedMemory buf_;
+  const size_t capacity_ = 0;  // sizeof(|buf_|).
+
+  // THe number of bytes in |buf_| that contain valid data (as a result of
+  // EndReceive()). This is always <= |capacity_|.
+  size_t size_ = 0;
+
+  std::list<std::unique_ptr<Frame>> decoded_frames_;
+};
+
+}  // namespace ipc
+}  // namespace perfetto
+
+#endif  // SRC_IPC_BUFFERED_FRAME_DESERIALIZER_H_
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "src/ipc/buffered_frame_deserializer.h"
+
+#include <inttypes.h>
+
+#include <algorithm>
+#include <type_traits>
+#include <utility>
+
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
+
+// gen_amalgamated expanded: #include "protos/perfetto/ipc/wire_protocol.gen.h"
+
+namespace perfetto {
+namespace ipc {
+
+namespace {
+
+// The header is just the number of bytes of the Frame protobuf message.
+constexpr size_t kHeaderSize = sizeof(uint32_t);
+}  // namespace
+
+BufferedFrameDeserializer::BufferedFrameDeserializer(size_t max_capacity)
+    : capacity_(max_capacity) {
+  PERFETTO_CHECK(max_capacity % base::GetSysPageSize() == 0);
+  PERFETTO_CHECK(max_capacity >= base::GetSysPageSize());
+}
+
+BufferedFrameDeserializer::~BufferedFrameDeserializer() = default;
+
+BufferedFrameDeserializer::ReceiveBuffer
+BufferedFrameDeserializer::BeginReceive() {
+  // Upon the first recv initialize the buffer to the max message size but
+  // release the physical memory for all but the first page. The kernel will
+  // automatically give us physical pages back as soon as we page-fault on them.
+  if (!buf_.IsValid()) {
+    PERFETTO_DCHECK(size_ == 0);
+    // TODO(eseckler): Don't commit all of the buffer at once on Windows.
+    buf_ = base::PagedMemory::Allocate(capacity_);
+
+    // Surely we are going to use at least the first page, but we may not need
+    // the rest for a bit.
+    const auto page_size = base::GetSysPageSize();
+    buf_.AdviseDontNeed(buf() + page_size, capacity_ - page_size);
+  }
+
+  PERFETTO_CHECK(capacity_ > size_);
+  return ReceiveBuffer{buf() + size_, capacity_ - size_};
+}
+
+bool BufferedFrameDeserializer::EndReceive(size_t recv_size) {
+  const auto page_size = base::GetSysPageSize();
+  PERFETTO_CHECK(recv_size + size_ <= capacity_);
+  size_ += recv_size;
+
+  // At this point the contents buf_ can contain:
+  // A) Only a fragment of the header (the size of the frame). E.g.,
+  //    03 00 00 (the header is 4 bytes, one is missing).
+  //
+  // B) A header and a part of the frame. E.g.,
+  //     05 00 00 00         11 22 33
+  //    [ header, size=5 ]  [ Partial frame ]
+  //
+  // C) One or more complete header+frame. E.g.,
+  //     05 00 00 00         11 22 33 44 55   03 00 00 00        AA BB CC
+  //    [ header, size=5 ]  [ Whole frame ]  [ header, size=3 ] [ Whole frame ]
+  //
+  // D) Some complete header+frame(s) and a partial header or frame (C + A/B).
+  //
+  // C Is the more likely case and the one we are optimizing for. A, B, D can
+  // happen because of the streaming nature of the socket.
+  // The invariant of this function is that, when it returns, buf_ is either
+  // empty (we drained all the complete frames) or starts with the header of the
+  // next, still incomplete, frame.
+
+  size_t consumed_size = 0;
+  for (;;) {
+    if (size_ < consumed_size + kHeaderSize)
+      break;  // Case A, not enough data to read even the header.
+
+    // Read the header into |payload_size|.
+    uint32_t payload_size = 0;
+    const char* rd_ptr = buf() + consumed_size;
+    memcpy(base::AssumeLittleEndian(&payload_size), rd_ptr, kHeaderSize);
+
+    // Saturate the |payload_size| to prevent overflows. The > capacity_ check
+    // below will abort the parsing.
+    size_t next_frame_size =
+        std::min(static_cast<size_t>(payload_size), capacity_);
+    next_frame_size += kHeaderSize;
+    rd_ptr += kHeaderSize;
+
+    if (size_ < consumed_size + next_frame_size) {
+      // Case B. We got the header but not the whole frame.
+      if (next_frame_size > capacity_) {
+        // The caller is expected to shut down the socket and give up at this
+        // point. If it doesn't do that and insists going on at some point it
+        // will hit the capacity check in BeginReceive().
+        PERFETTO_LOG("IPC Frame too large (size %zu)", next_frame_size);
+        return false;
+      }
+      break;
+    }
+
+    // Case C. We got at least one header and whole frame.
+    DecodeFrame(rd_ptr, payload_size);
+    consumed_size += next_frame_size;
+  }
+
+  PERFETTO_DCHECK(consumed_size <= size_);
+  if (consumed_size > 0) {
+    // Shift out the consumed data from the buffer. In the typical case (C)
+    // there is nothing to shift really, just setting size_ = 0 is enough.
+    // Shifting is only for the (unlikely) case D.
+    size_ -= consumed_size;
+    if (size_ > 0) {
+      // Case D. We consumed some frames but there is a leftover at the end of
+      // the buffer. Shift out the consumed bytes, so that on the next round
+      // |buf_| starts with the header of the next unconsumed frame.
+      const char* move_begin = buf() + consumed_size;
+      PERFETTO_CHECK(move_begin > buf());
+      PERFETTO_CHECK(move_begin + size_ <= buf() + capacity_);
+      memmove(buf(), move_begin, size_);
+    }
+    // If we just finished decoding a large frame that used more than one page,
+    // release the extra memory in the buffer. Large frames should be quite
+    // rare.
+    if (consumed_size > page_size) {
+      size_t size_rounded_up = (size_ / page_size + 1) * page_size;
+      if (size_rounded_up < capacity_) {
+        char* madvise_begin = buf() + size_rounded_up;
+        const size_t madvise_size = capacity_ - size_rounded_up;
+        PERFETTO_CHECK(madvise_begin > buf() + size_);
+        PERFETTO_CHECK(madvise_begin + madvise_size <= buf() + capacity_);
+        buf_.AdviseDontNeed(madvise_begin, madvise_size);
+      }
+    }
+  }
+  // At this point |size_| == 0 for case C, > 0 for cases A, B, D.
+  return true;
+}
+
+std::unique_ptr<Frame> BufferedFrameDeserializer::PopNextFrame() {
+  if (decoded_frames_.empty())
+    return nullptr;
+  std::unique_ptr<Frame> frame = std::move(decoded_frames_.front());
+  decoded_frames_.pop_front();
+  return frame;
+}
+
+void BufferedFrameDeserializer::DecodeFrame(const char* data, size_t size) {
+  if (size == 0)
+    return;
+  std::unique_ptr<Frame> frame(new Frame);
+  if (frame->ParseFromArray(data, size))
+    decoded_frames_.push_back(std::move(frame));
+}
+
+// static
+std::string BufferedFrameDeserializer::Serialize(const Frame& frame) {
+  std::vector<uint8_t> payload = frame.SerializeAsArray();
+  const uint32_t payload_size = static_cast<uint32_t>(payload.size());
+  std::string buf;
+  buf.resize(kHeaderSize + payload_size);
+  memcpy(&buf[0], base::AssumeLittleEndian(&payload_size), kHeaderSize);
+  memcpy(&buf[kHeaderSize], payload.data(), payload.size());
+  return buf;
+}
+
+}  // namespace ipc
+}  // namespace perfetto
+// gen_amalgamated begin source: src/ipc/deferred.cc
+// gen_amalgamated begin header: include/perfetto/ext/ipc/deferred.h
+// gen_amalgamated begin header: include/perfetto/ext/ipc/async_result.h
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_IPC_ASYNC_RESULT_H_
+#define INCLUDE_PERFETTO_EXT_IPC_ASYNC_RESULT_H_
+
+#include <memory>
+#include <type_traits>
+#include <utility>
+
+// gen_amalgamated expanded: #include "perfetto/ext/ipc/basic_types.h"
+
+namespace perfetto {
+namespace ipc {
+
+// Wraps the result of an asynchronous invocation. This is the equivalent of a
+// std::pair<unique_ptr<T>, bool> with syntactic sugar. It is used as callback
+// argument by Deferred<T>. T is a ProtoMessage subclass (i.e. generated .pb.h).
+template <typename T>
+class AsyncResult {
+ public:
+  static AsyncResult Create() {
+    return AsyncResult(std::unique_ptr<T>(new T()));
+  }
+
+  AsyncResult(std::unique_ptr<T> msg = nullptr,
+              bool has_more = false,
+              int fd = -1)
+      : msg_(std::move(msg)), has_more_(has_more), fd_(fd) {
+    static_assert(std::is_base_of<ProtoMessage, T>::value, "T->ProtoMessage");
+  }
+  AsyncResult(AsyncResult&&) noexcept = default;
+  AsyncResult& operator=(AsyncResult&&) = default;
+
+  bool success() const { return !!msg_; }
+  explicit operator bool() const { return success(); }
+
+  bool has_more() const { return has_more_; }
+  void set_has_more(bool has_more) { has_more_ = has_more; }
+
+  void set_msg(std::unique_ptr<T> msg) { msg_ = std::move(msg); }
+  T* release_msg() { return msg_.release(); }
+  T* operator->() { return msg_.get(); }
+  T& operator*() { return *msg_; }
+
+  void set_fd(int fd) { fd_ = fd; }
+  int fd() const { return fd_; }
+
+ private:
+  std::unique_ptr<T> msg_;
+  bool has_more_ = false;
+
+  // Optional. Only for messages that convey a file descriptor, for sharing
+  // memory across processes.
+  int fd_ = -1;
+};
+
+}  // namespace ipc
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_IPC_ASYNC_RESULT_H_
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_IPC_DEFERRED_H_
+#define INCLUDE_PERFETTO_EXT_IPC_DEFERRED_H_
+
+#include <functional>
+#include <memory>
+#include <utility>
+
+// gen_amalgamated expanded: #include "perfetto/ext/ipc/async_result.h"
+// gen_amalgamated expanded: #include "perfetto/ext/ipc/basic_types.h"
+
+namespace perfetto {
+namespace ipc {
+
+// This class is a wrapper for a callback handling async results.
+// The problem this is solving is the following: For each result argument of the
+// methods generated from the .proto file:
+// - The client wants to see something on which it can Bind() a callback, which
+//   is invoked asynchronously once reply is received from the host.
+// - The host wants to expose something to user code that implements the IPC
+//   methods to allow them to provide an asynchronous reply back to the client.
+//   Eventually even more than once, for the case streaming replies.
+//
+// In both cases we want to make sure that callbacks don't get lost along the
+// way. To address this, this class will automatically reject the callbacks
+// if they are not resolved at destructor time (or the object is std::move()'d).
+//
+// The client is supposed to use this class as follows:
+//   class GreeterProxy {
+//      void SayHello(const HelloRequest&, Deferred<HelloReply> reply)
+//   }
+//  ...
+//  Deferred<HelloReply> reply;
+//  reply.Bind([] (AsyncResult<HelloReply> reply) {
+//    std::cout << reply.success() ? reply->message : "failure";
+//  });
+//  host_proxy_instance.SayHello(req, std::move(reply));
+//
+// The host instead is supposed to use this as follows:
+//   class GreeterImpl : public Greeter {
+//     void SayHello(const HelloRequest& req, Deferred<HelloReply> reply) {
+//        AsyncResult<HelloReply> reply = AsyncResult<HelloReply>::Create();
+//        reply->set_greeting("Hello " + req.name)
+//        reply.Resolve(std::move(reply));
+//     }
+//   }
+// Or for more complex cases, the deferred object can be std::move()'d outside
+// and the reply can continue asynchronously later.
+
+template <typename T>
+class Deferred;
+
+class DeferredBase {
+ public:
+  explicit DeferredBase(
+      std::function<void(AsyncResult<ProtoMessage>)> callback = nullptr);
+
+  template <typename T>
+  explicit DeferredBase(Deferred<T> other)
+      : callback_(std::move(other.callback_)) {}
+
+  ~DeferredBase();
+  DeferredBase(DeferredBase&&) noexcept;
+  DeferredBase& operator=(DeferredBase&&);
+  void Bind(std::function<void(AsyncResult<ProtoMessage>)> callback);
+  bool IsBound() const;
+  void Resolve(AsyncResult<ProtoMessage>);
+  void Reject();
+
+ protected:
+  template <typename T>
+  friend class Deferred;
+  void Move(DeferredBase&);
+
+  std::function<void(AsyncResult<ProtoMessage>)> callback_;
+};
+
+template <typename T>  // T : ProtoMessage subclass
+class Deferred : public DeferredBase {
+ public:
+  explicit Deferred(std::function<void(AsyncResult<T>)> callback = nullptr) {
+    Bind(std::move(callback));
+  }
+
+  // This move constructor (and the similar one in DeferredBase) is meant to be
+  // called only by the autogenerated code. The caller has to guarantee that the
+  // moved-from and moved-to types match. The behavior is otherwise undefined.
+  explicit Deferred(DeferredBase&& other) {
+    callback_ = std::move(other.callback_);
+    other.callback_ = nullptr;
+  }
+
+  void Bind(std::function<void(AsyncResult<T>)> callback) {
+    if (!callback)
+      return;
+
+    // Here we need a callback adapter to downcast the callback to a generic
+    // callback that takes an AsyncResult<ProtoMessage>, so that it can be
+    // stored in the base class |callback_|.
+    auto callback_adapter = [callback](
+                                AsyncResult<ProtoMessage> async_result_base) {
+      // Upcast the async_result from <ProtoMessage> -> <T : ProtoMessage>.
+      static_assert(std::is_base_of<ProtoMessage, T>::value, "T:ProtoMessage");
+      AsyncResult<T> async_result(
+          std::unique_ptr<T>(static_cast<T*>(async_result_base.release_msg())),
+          async_result_base.has_more(), async_result_base.fd());
+      callback(std::move(async_result));
+    };
+    DeferredBase::Bind(callback_adapter);
+  }
+
+  // If no more messages are expected, |callback_| is released.
+  void Resolve(AsyncResult<T> async_result) {
+    // Convert the |async_result| to the generic base one (T -> ProtoMessage).
+    AsyncResult<ProtoMessage> async_result_base(
+        std::unique_ptr<ProtoMessage>(async_result.release_msg()),
+        async_result.has_more(), async_result.fd());
+    DeferredBase::Resolve(std::move(async_result_base));
+  }
+};
+
+}  // namespace ipc
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_IPC_DEFERRED_H_
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/ext/ipc/deferred.h"
+
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
+
+namespace perfetto {
+namespace ipc {
+
+DeferredBase::DeferredBase(
+    std::function<void(AsyncResult<ProtoMessage>)> callback)
+    : callback_(std::move(callback)) {}
+
+DeferredBase::~DeferredBase() {
+  if (callback_)
+    Reject();
+}
+
+// Can't just use "= default" here because the default move operator for
+// std::function doesn't necessarily swap and hence can leave a copy of the
+// bind state around, which is undesirable.
+DeferredBase::DeferredBase(DeferredBase&& other) noexcept {
+  Move(other);
+}
+
+DeferredBase& DeferredBase::operator=(DeferredBase&& other) {
+  if (callback_)
+    Reject();
+  Move(other);
+  return *this;
+}
+
+void DeferredBase::Move(DeferredBase& other) {
+  callback_ = std::move(other.callback_);
+  other.callback_ = nullptr;
+}
+
+void DeferredBase::Bind(
+    std::function<void(AsyncResult<ProtoMessage>)> callback) {
+  callback_ = std::move(callback);
+}
+
+bool DeferredBase::IsBound() const {
+  return !!callback_;
+}
+
+void DeferredBase::Resolve(AsyncResult<ProtoMessage> async_result) {
+  if (!callback_) {
+    PERFETTO_DFATAL("No callback set.");
+    return;
+  }
+  bool has_more = async_result.has_more();
+  callback_(std::move(async_result));
+  if (!has_more)
+    callback_ = nullptr;
+}
+
+// Resolves with a nullptr |msg_|, signalling failure to |callback_|.
+void DeferredBase::Reject() {
+  Resolve(AsyncResult<ProtoMessage>());
+}
+
+}  // namespace ipc
+}  // namespace perfetto
+// gen_amalgamated begin source: src/ipc/virtual_destructors.cc
+// gen_amalgamated begin header: include/perfetto/ext/ipc/client.h
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_IPC_CLIENT_H_
+#define INCLUDE_PERFETTO_EXT_IPC_CLIENT_H_
+
+#include <functional>
+#include <memory>
+
+// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/unix_socket.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/weak_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/ext/ipc/basic_types.h"
+
+namespace perfetto {
+
+namespace base {
+class TaskRunner;
+}  // namespace base
+
+namespace ipc {
+class ServiceProxy;
+
+// The client-side class that talks to the host over the socket and multiplexes
+// requests coming from the various autogenerated ServiceProxy stubs.
+// This is meant to be used by the user code as follows:
+// auto client = Client::CreateInstance("socket_name", task_runner);
+// std::unique_ptr<GreeterService> svc(new GreeterService());
+// client.BindService(svc);
+// svc.OnConnect([] () {
+//    svc.SayHello(..., ...);
+// });
+class Client {
+ public:
+  // struct ConnArgs is used for creating a client in 2 connection modes:
+  // 1. Connect using a socket name with the option to retry the connection on
+  //    connection failure.
+  // 2. Adopt a connected socket.
+  struct ConnArgs {
+    ConnArgs(const char* sock_name, bool sock_retry)
+        : socket_name(sock_name), retry(sock_retry) {}
+    explicit ConnArgs(base::ScopedSocketHandle sock_fd)
+        : socket_fd(std::move(sock_fd)) {}
+
+    // Disallow copy. Only supports move.
+    ConnArgs(const ConnArgs& other) = delete;
+    ConnArgs(ConnArgs&& other) = default;
+
+    base::ScopedSocketHandle socket_fd;
+    const char* socket_name = nullptr;
+    bool retry = false;  // Only for connecting with |socket_name|.
+  };
+
+  static std::unique_ptr<Client> CreateInstance(ConnArgs, base::TaskRunner*);
+  virtual ~Client();
+
+  virtual void BindService(base::WeakPtr<ServiceProxy>) = 0;
+
+  // There is no need to call this method explicitly. Destroying the
+  // ServiceProxy instance is sufficient and will automatically unbind it. This
+  // method is exposed only for the ServiceProxy destructor.
+  virtual void UnbindService(ServiceID) = 0;
+
+  // Returns (with move semantics) the last file descriptor received on the IPC
+  // channel. No buffering is performed: if a service sends two file descriptors
+  // and the caller doesn't read them immediately, the first one will be
+  // automatically closed when the second is received (and will hit a DCHECK in
+  // debug builds).
+  virtual base::ScopedFile TakeReceivedFD() = 0;
+};
+
+}  // namespace ipc
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_IPC_CLIENT_H_
+// gen_amalgamated begin header: include/perfetto/ext/ipc/host.h
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_IPC_HOST_H_
+#define INCLUDE_PERFETTO_EXT_IPC_HOST_H_
+
+#include <memory>
+
+// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/unix_socket.h"
+// gen_amalgamated expanded: #include "perfetto/ext/ipc/basic_types.h"
+
+namespace perfetto {
+
+namespace base {
+class TaskRunner;
+}  // namespace base
+
+namespace ipc {
+
+class Service;
+
+// The host-side of the IPC layer. This class acts as a registry and request
+// dispatcher. It listen on the UnixSocket |socket_name| for incoming requests
+// (coming Client instances) and dispatches their requests to the various
+// Services exposed.
+class Host {
+ public:
+  // Creates an instance and starts listening on the given |socket_name|.
+  // Returns nullptr if listening on the socket fails.
+  static std::unique_ptr<Host> CreateInstance(const char* socket_name,
+                                              base::TaskRunner*);
+
+  // Like the above but takes a file descriptor to a pre-bound unix socket.
+  // Returns nullptr if listening on the socket fails.
+  static std::unique_ptr<Host> CreateInstance(base::ScopedSocketHandle,
+                                              base::TaskRunner*);
+
+  virtual ~Host();
+
+  // Registers a new service and makes it available to remote IPC peers.
+  // All the exposed Service instances will be destroyed when destroying the
+  // Host instance if ExposeService succeeds and returns true, or immediately
+  // after the call in case of failure.
+  // Returns true if the register has been successfully registered, false in
+  // case of errors (e.g., another service with the same name is already
+  // registered).
+  virtual bool ExposeService(std::unique_ptr<Service>) = 0;
+};
+
+}  // namespace ipc
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_IPC_HOST_H_
+// gen_amalgamated begin header: include/perfetto/ext/ipc/service.h
+// gen_amalgamated begin header: include/perfetto/ext/ipc/client_info.h
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_IPC_CLIENT_INFO_H_
+#define INCLUDE_PERFETTO_EXT_IPC_CLIENT_INFO_H_
+
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
+// gen_amalgamated expanded: #include "perfetto/ext/ipc/basic_types.h"
+
+namespace perfetto {
+namespace ipc {
+
+// Passed to Service(s) to identify remote clients.
+class ClientInfo {
+ public:
+  ClientInfo() = default;
+  ClientInfo(ClientID client_id, uid_t uid)
+      : client_id_(client_id), uid_(uid) {}
+
+  bool operator==(const ClientInfo& other) const {
+    return (client_id_ == other.client_id_ && uid_ == other.uid_);
+  }
+  bool operator!=(const ClientInfo& other) const { return !(*this == other); }
+
+  // For map<> and other sorted containers.
+  bool operator<(const ClientInfo& other) const {
+    PERFETTO_DCHECK(client_id_ != other.client_id_ || *this == other);
+    return client_id_ < other.client_id_;
+  }
+
+  bool is_valid() const { return client_id_ != 0; }
+
+  // A monotonic counter.
+  ClientID client_id() const { return client_id_; }
+
+  // Posix User ID. Comes from the kernel, can be trusted.
+  uid_t uid() const { return uid_; }
+
+ private:
+  ClientID client_id_ = 0;
+  uid_t uid_ = kInvalidUid;
+};
+
+}  // namespace ipc
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_IPC_CLIENT_INFO_H_
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_IPC_SERVICE_H_
+#define INCLUDE_PERFETTO_EXT_IPC_SERVICE_H_
+
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
+// gen_amalgamated expanded: #include "perfetto/ext/ipc/client_info.h"
+
+namespace perfetto {
+namespace ipc {
+
+class ServiceDescriptor;
+
+// The base class for all the autogenerated host-side service interfaces.
+class Service {
+ public:
+  virtual ~Service();
+
+  // Overridden by the auto-generated class. Provides the list of methods and
+  // the protobuf (de)serialization functions for their arguments.
+  virtual const ServiceDescriptor& GetDescriptor() = 0;
+
+  // Invoked when a remote client disconnects. Use client_info() to obtain
+  // details about the client that disconnected.
+  virtual void OnClientDisconnected() {}
+
+  // Returns the ClientInfo for the current IPC request. Returns an invalid
+  // ClientInfo if called outside the scope of an IPC method.
+  const ClientInfo& client_info() {
+    PERFETTO_DCHECK(client_info_.is_valid());
+    return client_info_;
+  }
+
+  base::ScopedFile TakeReceivedFD() {
+    if (received_fd_)
+      return std::move(*received_fd_);
+    return base::ScopedFile();
+  }
+
+ private:
+  friend class HostImpl;
+  ClientInfo client_info_;
+  // This is a pointer because the received fd needs to remain owned by the
+  // ClientConnection, as we will provide it to all method invocations
+  // for that client until one of them calls Service::TakeReceivedFD.
+  //
+  // Different clients might have sent different FDs so this cannot be owned
+  // here.
+  //
+  // Note that this means that there can always only be one outstanding
+  // invocation per client that supplies an FD and the client needs to
+  // wait for this one to return before calling another one.
+  base::ScopedFile* received_fd_;
+};
+
+}  // namespace ipc
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_IPC_SERVICE_H_
+// gen_amalgamated begin header: include/perfetto/ext/ipc/service_proxy.h
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_IPC_SERVICE_PROXY_H_
+#define INCLUDE_PERFETTO_EXT_IPC_SERVICE_PROXY_H_
+
+// gen_amalgamated expanded: #include "perfetto/ext/ipc/basic_types.h"
+
+#include <assert.h>
+
+#include <functional>
+#include <map>
+#include <memory>
+#include <string>
+
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/weak_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/ext/ipc/deferred.h"
+
+namespace perfetto {
+namespace ipc {
+
+class Client;
+class ServiceDescriptor;
+
+// The base class for the client-side autogenerated stubs that forward method
+// invocations to the host. All the methods of this class are meant to be called
+// only by the autogenerated code.
+class PERFETTO_EXPORT ServiceProxy {
+ public:
+  class EventListener {
+   public:
+    virtual ~EventListener();
+
+    // Called once after Client::BindService() if the ServiceProxy has been
+    // successfully bound to the host. It is possible to start sending IPC
+    // requests soon after this.
+    virtual void OnConnect() {}
+
+    // Called if the connection fails to be established or drops after having
+    // been established.
+    virtual void OnDisconnect() {}
+  };
+
+  // Guarantees that no callback will happen after this object has been
+  // destroyed. The caller has to guarantee that the |event_listener| stays
+  // alive at least as long as the ServiceProxy instance.
+  explicit ServiceProxy(EventListener*);
+  virtual ~ServiceProxy();
+
+  void InitializeBinding(base::WeakPtr<Client>,
+                         ServiceID,
+                         std::map<std::string, MethodID>);
+
+  // Called by the IPC methods in the autogenerated classes.
+  void BeginInvoke(const std::string& method_name,
+                   const ProtoMessage& request,
+                   DeferredBase reply,
+                   int fd = -1);
+
+  // Called by ClientImpl.
+  // |reply_args| == nullptr means request failure.
+  void EndInvoke(RequestID,
+                 std::unique_ptr<ProtoMessage> reply_arg,
+                 bool has_more);
+
+  // Called by ClientImpl.
+  void OnConnect(bool success);
+  void OnDisconnect();
+  bool connected() const { return service_id_ != 0; }
+
+  base::WeakPtr<ServiceProxy> GetWeakPtr() const;
+
+  // Implemented by the autogenerated class.
+  virtual const ServiceDescriptor& GetDescriptor() = 0;
+
+ private:
+  base::WeakPtr<Client> client_;
+  ServiceID service_id_ = 0;
+  std::map<std::string, MethodID> remote_method_ids_;
+  std::map<RequestID, DeferredBase> pending_callbacks_;
+  EventListener* const event_listener_;
+  base::WeakPtrFactory<ServiceProxy> weak_ptr_factory_;  // Keep last.
+};
+
+}  // namespace ipc
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_IPC_SERVICE_PROXY_H_
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/ext/ipc/client.h"
+// gen_amalgamated expanded: #include "perfetto/ext/ipc/host.h"
+// gen_amalgamated expanded: #include "perfetto/ext/ipc/service.h"
+// gen_amalgamated expanded: #include "perfetto/ext/ipc/service_proxy.h"
+
+// This translation unit contains the definitions for the destructor of pure
+// virtual interfaces for the current build target. The alternative would be
+// introducing a one-liner .cc file for each pure virtual interface, which is
+// overkill. This is for compliance with -Wweak-vtables.
+
+namespace perfetto {
+namespace ipc {
+
+Client::~Client() = default;
+Host::~Host() = default;
+Service::~Service() = default;
+ServiceProxy::EventListener::~EventListener() = default;
+
+}  // namespace ipc
+}  // namespace perfetto
+// gen_amalgamated begin source: src/ipc/client_impl.cc
+// gen_amalgamated begin header: src/ipc/client_impl.h
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SRC_IPC_CLIENT_IMPL_H_
+#define SRC_IPC_CLIENT_IMPL_H_
+
+#include <list>
+#include <map>
+#include <memory>
+
+// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/unix_socket.h"
+// gen_amalgamated expanded: #include "perfetto/ext/ipc/client.h"
+// gen_amalgamated expanded: #include "src/ipc/buffered_frame_deserializer.h"
+
+namespace perfetto {
+
+namespace protos {
+namespace gen {
+class IPCFrame_BindServiceReply;
+class IPCFrame_InvokeMethodReply;
+}  // namespace gen
+}  // namespace protos
+
+namespace base {
+class TaskRunner;
+}  // namespace base
+
+namespace ipc {
+
+class ServiceDescriptor;
+
+class ClientImpl : public Client, public base::UnixSocket::EventListener {
+ public:
+  ClientImpl(ConnArgs, base::TaskRunner*);
+  ~ClientImpl() override;
+
+  // Client implementation.
+  void BindService(base::WeakPtr<ServiceProxy>) override;
+  void UnbindService(ServiceID) override;
+  base::ScopedFile TakeReceivedFD() override;
+
+  // base::UnixSocket::EventListener implementation.
+  void OnConnect(base::UnixSocket*, bool connected) override;
+  void OnDisconnect(base::UnixSocket*) override;
+  void OnDataAvailable(base::UnixSocket*) override;
+
+  RequestID BeginInvoke(ServiceID,
+                        const std::string& method_name,
+                        MethodID remote_method_id,
+                        const ProtoMessage& method_args,
+                        bool drop_reply,
+                        base::WeakPtr<ServiceProxy>,
+                        int fd = -1);
+
+  base::UnixSocket* GetUnixSocketForTesting() { return sock_.get(); }
+
+ private:
+  struct QueuedRequest {
+    QueuedRequest();
+    int type = 0;  // From Frame::msg_case(), see wire_protocol.proto.
+    RequestID request_id = 0;
+    base::WeakPtr<ServiceProxy> service_proxy;
+
+    // Only for type == kMsgInvokeMethod.
+    std::string method_name;
+  };
+
+  ClientImpl(const ClientImpl&) = delete;
+  ClientImpl& operator=(const ClientImpl&) = delete;
+
+  void TryConnect();
+  bool SendFrame(const Frame&, int fd = -1);
+  void OnFrameReceived(const Frame&);
+  void OnBindServiceReply(QueuedRequest,
+                          const protos::gen::IPCFrame_BindServiceReply&);
+  void OnInvokeMethodReply(QueuedRequest,
+                           const protos::gen::IPCFrame_InvokeMethodReply&);
+
+  bool invoking_method_reply_ = false;
+  const char* socket_name_ = nullptr;
+  bool socket_retry_ = false;
+  uint32_t socket_backoff_ms_ = 0;
+  std::unique_ptr<base::UnixSocket> sock_;
+  base::TaskRunner* const task_runner_;
+  RequestID last_request_id_ = 0;
+  BufferedFrameDeserializer frame_deserializer_;
+  base::ScopedFile received_fd_;
+  std::map<RequestID, QueuedRequest> queued_requests_;
+  std::map<ServiceID, base::WeakPtr<ServiceProxy>> service_bindings_;
+
+  // Queue of calls to BindService() that happened before the socket connected.
+  std::list<base::WeakPtr<ServiceProxy>> queued_bindings_;
+
+  base::WeakPtrFactory<Client> weak_ptr_factory_;  // Keep last.
+};
+
+}  // namespace ipc
+}  // namespace perfetto
+
+#endif  // SRC_IPC_CLIENT_IMPL_H_
+// gen_amalgamated begin header: include/perfetto/ext/ipc/service_descriptor.h
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_IPC_SERVICE_DESCRIPTOR_H_
+#define INCLUDE_PERFETTO_EXT_IPC_SERVICE_DESCRIPTOR_H_
+
+#include <functional>
+#include <string>
+#include <utility>
+#include <vector>
+
+// gen_amalgamated expanded: #include "perfetto/ext/ipc/basic_types.h"
+// gen_amalgamated expanded: #include "perfetto/ext/ipc/deferred.h"
+
+namespace perfetto {
+namespace ipc {
+
+class Service;
+
+// This is a pure data structure which holds factory methods and strings for the
+// services and their methods that get generated in the .h/.cc files.
+// Each autogenerated class has a GetDescriptor() method that returns one
+// instance of these and allows both client and hosts to map service and method
+// names to IDs and provide function pointers to the protobuf decoder fuctions.
+class ServiceDescriptor {
+ public:
+  struct Method {
+    const char* name;
+
+    // DecoderFunc is pointer to a function that takes a string in input
+    // containing protobuf encoded data and returns a decoded protobuf message.
+    using DecoderFunc = std::unique_ptr<ProtoMessage> (*)(const std::string&);
+
+    // Function pointer to decode the request argument of the method.
+    DecoderFunc request_proto_decoder;
+
+    // Function pointer to decoded the reply argument of the method.
+    DecoderFunc reply_proto_decoder;
+
+    // Function pointer that dispatches the generic request to the corresponding
+    // method implementation.
+    using InvokerFunc = void (*)(Service*,
+                                 const ProtoMessage& /* request_args */,
+                                 DeferredBase /* deferred_reply */);
+    InvokerFunc invoker;
+  };
+
+  const char* service_name = nullptr;
+
+  // Note that methods order is not stable. Client and Host might have different
+  // method indexes, depending on their versions. The Client can't just rely
+  // on the indexes and has to keep a [string -> remote index] translation map.
+  std::vector<Method> methods;
+};
+
+}  // namespace ipc
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_IPC_SERVICE_DESCRIPTOR_H_
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "src/ipc/client_impl.h"
+
+#include <fcntl.h>
+#include <inttypes.h>
+
+#include <utility>
+
+// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/unix_socket.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
+// gen_amalgamated expanded: #include "perfetto/ext/ipc/service_descriptor.h"
+// gen_amalgamated expanded: #include "perfetto/ext/ipc/service_proxy.h"
+
+// gen_amalgamated expanded: #include "protos/perfetto/ipc/wire_protocol.gen.h"
+
+// TODO(primiano): Add ThreadChecker everywhere.
+
+// TODO(primiano): Add timeouts.
+
+namespace perfetto {
+namespace ipc {
+
+namespace {
+constexpr base::SockFamily kClientSockFamily =
+    kUseTCPSocket ? base::SockFamily::kInet : base::SockFamily::kUnix;
+}  // namespace
+
+// static
+std::unique_ptr<Client> Client::CreateInstance(ConnArgs conn_args,
+                                               base::TaskRunner* task_runner) {
+  std::unique_ptr<Client> client(
+      new ClientImpl(std::move(conn_args), task_runner));
+  return client;
+}
+
+ClientImpl::ClientImpl(ConnArgs conn_args, base::TaskRunner* task_runner)
+    : socket_name_(conn_args.socket_name),
+      socket_retry_(conn_args.retry),
+      task_runner_(task_runner),
+      weak_ptr_factory_(this) {
+  if (conn_args.socket_fd) {
+    // Create the client using a connected socket. This code path will never hit
+    // OnConnect().
+    sock_ = base::UnixSocket::AdoptConnected(
+        std::move(conn_args.socket_fd), this, task_runner_, kClientSockFamily,
+        base::SockType::kStream, base::SockPeerCredMode::kIgnore);
+  } else {
+    // Connect using the socket name.
+    TryConnect();
+  }
+}
+
+ClientImpl::~ClientImpl() {
+  // Ensure we are not destroyed in the middle of invoking a reply.
+  PERFETTO_DCHECK(!invoking_method_reply_);
+  OnDisconnect(
+      nullptr);  // The base::UnixSocket* ptr is not used in OnDisconnect().
+}
+
+void ClientImpl::TryConnect() {
+  PERFETTO_DCHECK(socket_name_);
+  sock_ = base::UnixSocket::Connect(socket_name_, this, task_runner_,
+                                    kClientSockFamily, base::SockType::kStream,
+                                    base::SockPeerCredMode::kIgnore);
+}
+
+void ClientImpl::BindService(base::WeakPtr<ServiceProxy> service_proxy) {
+  if (!service_proxy)
+    return;
+  if (!sock_->is_connected()) {
+    queued_bindings_.emplace_back(service_proxy);
+    return;
+  }
+  RequestID request_id = ++last_request_id_;
+  Frame frame;
+  frame.set_request_id(request_id);
+  Frame::BindService* req = frame.mutable_msg_bind_service();
+  const char* const service_name = service_proxy->GetDescriptor().service_name;
+  req->set_service_name(service_name);
+  if (!SendFrame(frame)) {
+    PERFETTO_DLOG("BindService(%s) failed", service_name);
+    return service_proxy->OnConnect(false /* success */);
+  }
+  QueuedRequest qr;
+  qr.type = Frame::kMsgBindServiceFieldNumber;
+  qr.request_id = request_id;
+  qr.service_proxy = service_proxy;
+  queued_requests_.emplace(request_id, std::move(qr));
+}
+
+void ClientImpl::UnbindService(ServiceID service_id) {
+  service_bindings_.erase(service_id);
+}
+
+RequestID ClientImpl::BeginInvoke(ServiceID service_id,
+                                  const std::string& method_name,
+                                  MethodID remote_method_id,
+                                  const ProtoMessage& method_args,
+                                  bool drop_reply,
+                                  base::WeakPtr<ServiceProxy> service_proxy,
+                                  int fd) {
+  RequestID request_id = ++last_request_id_;
+  Frame frame;
+  frame.set_request_id(request_id);
+  Frame::InvokeMethod* req = frame.mutable_msg_invoke_method();
+  req->set_service_id(service_id);
+  req->set_method_id(remote_method_id);
+  req->set_drop_reply(drop_reply);
+  req->set_args_proto(method_args.SerializeAsString());
+  if (!SendFrame(frame, fd)) {
+    PERFETTO_DLOG("BeginInvoke() failed while sending the frame");
+    return 0;
+  }
+  if (drop_reply)
+    return 0;
+  QueuedRequest qr;
+  qr.type = Frame::kMsgInvokeMethodFieldNumber;
+  qr.request_id = request_id;
+  qr.method_name = method_name;
+  qr.service_proxy = std::move(service_proxy);
+  queued_requests_.emplace(request_id, std::move(qr));
+  return request_id;
+}
+
+bool ClientImpl::SendFrame(const Frame& frame, int fd) {
+  // Serialize the frame into protobuf, add the size header, and send it.
+  std::string buf = BufferedFrameDeserializer::Serialize(frame);
+
+  // TODO(primiano): this should do non-blocking I/O. But then what if the
+  // socket buffer is full? We might want to either drop the request or throttle
+  // the send and PostTask the reply later? Right now we are making Send()
+  // blocking as a workaround. Propagate bakpressure to the caller instead.
+  bool res = sock_->Send(buf.data(), buf.size(), fd);
+  PERFETTO_CHECK(res || !sock_->is_connected());
+  return res;
+}
+
+void ClientImpl::OnConnect(base::UnixSocket*, bool connected) {
+  if (!connected && socket_retry_) {
+    socket_backoff_ms_ =
+        (socket_backoff_ms_ < 10000) ? socket_backoff_ms_ + 1000 : 30000;
+    PERFETTO_DLOG(
+        "Connection to traced's UNIX socket failed, retrying in %u seconds",
+        socket_backoff_ms_ / 1000);
+    auto weak_this = weak_ptr_factory_.GetWeakPtr();
+    task_runner_->PostDelayedTask(
+        [weak_this] {
+          if (weak_this)
+            static_cast<ClientImpl&>(*weak_this).TryConnect();
+        },
+        socket_backoff_ms_);
+    return;
+  }
+
+  // Drain the BindService() calls that were queued before establishing the
+  // connection with the host. Note that if we got disconnected, the call to
+  // OnConnect below might delete |this|, so move everything on the stack first.
+  auto queued_bindings = std::move(queued_bindings_);
+  queued_bindings_.clear();
+  for (base::WeakPtr<ServiceProxy>& service_proxy : queued_bindings) {
+    if (connected) {
+      BindService(service_proxy);
+    } else if (service_proxy) {
+      service_proxy->OnConnect(false /* success */);
+    }
+  }
+  // Don't access |this| below here.
+}
+
+void ClientImpl::OnDisconnect(base::UnixSocket*) {
+  for (const auto& it : service_bindings_) {
+    base::WeakPtr<ServiceProxy> service_proxy = it.second;
+    task_runner_->PostTask([service_proxy] {
+      if (service_proxy)
+        service_proxy->OnDisconnect();
+    });
+  }
+  service_bindings_.clear();
+  queued_bindings_.clear();
+}
+
+void ClientImpl::OnDataAvailable(base::UnixSocket*) {
+  size_t rsize;
+  do {
+    auto buf = frame_deserializer_.BeginReceive();
+    base::ScopedFile fd;
+    rsize = sock_->Receive(buf.data, buf.size, &fd);
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+    PERFETTO_DCHECK(!fd);
+#else
+    if (fd) {
+      PERFETTO_DCHECK(!received_fd_);
+      int res = fcntl(*fd, F_SETFD, FD_CLOEXEC);
+      PERFETTO_DCHECK(res == 0);
+      received_fd_ = std::move(fd);
+    }
+#endif
+    if (!frame_deserializer_.EndReceive(rsize)) {
+      // The endpoint tried to send a frame that is way too large.
+      return sock_->Shutdown(true);  // In turn will trigger an OnDisconnect().
+      // TODO(fmayer): check this.
+    }
+  } while (rsize > 0);
+
+  while (std::unique_ptr<Frame> frame = frame_deserializer_.PopNextFrame())
+    OnFrameReceived(*frame);
+}
+
+void ClientImpl::OnFrameReceived(const Frame& frame) {
+  auto queued_requests_it = queued_requests_.find(frame.request_id());
+  if (queued_requests_it == queued_requests_.end()) {
+    PERFETTO_DLOG("OnFrameReceived(): got invalid request_id=%" PRIu64,
+                  static_cast<uint64_t>(frame.request_id()));
+    return;
+  }
+  QueuedRequest req = std::move(queued_requests_it->second);
+  queued_requests_.erase(queued_requests_it);
+
+  if (req.type == Frame::kMsgBindServiceFieldNumber &&
+      frame.has_msg_bind_service_reply()) {
+    return OnBindServiceReply(std::move(req), frame.msg_bind_service_reply());
+  }
+  if (req.type == Frame::kMsgInvokeMethodFieldNumber &&
+      frame.has_msg_invoke_method_reply()) {
+    return OnInvokeMethodReply(std::move(req), frame.msg_invoke_method_reply());
+  }
+  if (frame.has_msg_request_error()) {
+    PERFETTO_DLOG("Host error: %s", frame.msg_request_error().error().c_str());
+    return;
+  }
+
+  PERFETTO_DLOG(
+      "OnFrameReceived() request type=%d, received unknown frame in reply to "
+      "request_id=%" PRIu64,
+      req.type, static_cast<uint64_t>(frame.request_id()));
+}
+
+void ClientImpl::OnBindServiceReply(QueuedRequest req,
+                                    const Frame::BindServiceReply& reply) {
+  base::WeakPtr<ServiceProxy>& service_proxy = req.service_proxy;
+  if (!service_proxy)
+    return;
+  const char* svc_name = service_proxy->GetDescriptor().service_name;
+  if (!reply.success()) {
+    PERFETTO_DLOG("BindService(): unknown service_name=\"%s\"", svc_name);
+    return service_proxy->OnConnect(false /* success */);
+  }
+
+  auto prev_service = service_bindings_.find(reply.service_id());
+  if (prev_service != service_bindings_.end() && prev_service->second.get()) {
+    PERFETTO_DLOG(
+        "BindService(): Trying to bind service \"%s\" but another service "
+        "named \"%s\" is already bound with the same ID.",
+        svc_name, prev_service->second->GetDescriptor().service_name);
+    return service_proxy->OnConnect(false /* success */);
+  }
+
+  // Build the method [name] -> [remote_id] map.
+  std::map<std::string, MethodID> methods;
+  for (const auto& method : reply.methods()) {
+    if (method.name().empty() || method.id() <= 0) {
+      PERFETTO_DLOG("OnBindServiceReply(): invalid method \"%s\" -> %" PRIu64,
+                    method.name().c_str(), static_cast<uint64_t>(method.id()));
+      continue;
+    }
+    methods[method.name()] = method.id();
+  }
+  service_proxy->InitializeBinding(weak_ptr_factory_.GetWeakPtr(),
+                                   reply.service_id(), std::move(methods));
+  service_bindings_[reply.service_id()] = service_proxy;
+  service_proxy->OnConnect(true /* success */);
+}
+
+void ClientImpl::OnInvokeMethodReply(QueuedRequest req,
+                                     const Frame::InvokeMethodReply& reply) {
+  base::WeakPtr<ServiceProxy> service_proxy = req.service_proxy;
+  if (!service_proxy)
+    return;
+  std::unique_ptr<ProtoMessage> decoded_reply;
+  if (reply.success()) {
+    // If this becomes a hotspot, optimize by maintaining a dedicated hashtable.
+    for (const auto& method : service_proxy->GetDescriptor().methods) {
+      if (req.method_name == method.name) {
+        decoded_reply = method.reply_proto_decoder(reply.reply_proto());
+        break;
+      }
+    }
+  }
+  const RequestID request_id = req.request_id;
+  invoking_method_reply_ = true;
+  service_proxy->EndInvoke(request_id, std::move(decoded_reply),
+                           reply.has_more());
+  invoking_method_reply_ = false;
+
+  // If this is a streaming method and future replies will be resolved, put back
+  // the |req| with the callback into the set of active requests.
+  if (reply.has_more())
+    queued_requests_.emplace(request_id, std::move(req));
+}
+
+ClientImpl::QueuedRequest::QueuedRequest() = default;
+
+base::ScopedFile ClientImpl::TakeReceivedFD() {
+  return std::move(received_fd_);
+}
+
+}  // namespace ipc
+}  // namespace perfetto
+// gen_amalgamated begin source: src/ipc/service_proxy.cc
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/ext/ipc/service_proxy.h"
+
+#include <utility>
+
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/weak_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/ext/ipc/service_descriptor.h"
+// gen_amalgamated expanded: #include "src/ipc/client_impl.h"
+
+// gen_amalgamated expanded: #include "protos/perfetto/ipc/wire_protocol.gen.h"
+
+namespace perfetto {
+namespace ipc {
+
+ServiceProxy::ServiceProxy(EventListener* event_listener)
+    : event_listener_(event_listener), weak_ptr_factory_(this) {}
+
+ServiceProxy::~ServiceProxy() {
+  if (client_ && connected())
+    client_->UnbindService(service_id_);
+}
+
+void ServiceProxy::InitializeBinding(
+    base::WeakPtr<Client> client,
+    ServiceID service_id,
+    std::map<std::string, MethodID> remote_method_ids) {
+  client_ = std::move(client);
+  service_id_ = service_id;
+  remote_method_ids_ = std::move(remote_method_ids);
+}
+
+void ServiceProxy::BeginInvoke(const std::string& method_name,
+                               const ProtoMessage& request,
+                               DeferredBase reply,
+                               int fd) {
+  // |reply| will auto-resolve if it gets out of scope early.
+  if (!connected()) {
+    PERFETTO_DFATAL("Not connected.");
+    return;
+  }
+  if (!client_)
+    return;  // The Client object has been destroyed in the meantime.
+
+  auto remote_method_it = remote_method_ids_.find(method_name);
+  RequestID request_id = 0;
+  const bool drop_reply = !reply.IsBound();
+  if (remote_method_it != remote_method_ids_.end()) {
+    request_id =
+        static_cast<ClientImpl*>(client_.get())
+            ->BeginInvoke(service_id_, method_name, remote_method_it->second,
+                          request, drop_reply, weak_ptr_factory_.GetWeakPtr(),
+                          fd);
+  } else {
+    PERFETTO_DLOG("Cannot find method \"%s\" on the host", method_name.c_str());
+  }
+
+  // When passing |drop_reply| == true, the returned |request_id| should be 0.
+  PERFETTO_DCHECK(!drop_reply || !request_id);
+
+  if (!request_id)
+    return;
+  PERFETTO_DCHECK(pending_callbacks_.count(request_id) == 0);
+  pending_callbacks_.emplace(request_id, std::move(reply));
+}
+
+void ServiceProxy::EndInvoke(RequestID request_id,
+                             std::unique_ptr<ProtoMessage> result,
+                             bool has_more) {
+  auto callback_it = pending_callbacks_.find(request_id);
+  if (callback_it == pending_callbacks_.end()) {
+    // Either we are getting a reply for a method we never invoked, or we are
+    // getting a reply to a method marked drop_reply (that has been invoked
+    // without binding any callback in the Defererd response object).
+    PERFETTO_DFATAL("Unexpected reply received.");
+    return;
+  }
+  DeferredBase& reply_callback = callback_it->second;
+  AsyncResult<ProtoMessage> reply(std::move(result), has_more);
+  reply_callback.Resolve(std::move(reply));
+  if (!has_more)
+    pending_callbacks_.erase(callback_it);
+}
+
+void ServiceProxy::OnConnect(bool success) {
+  if (success) {
+    PERFETTO_DCHECK(service_id_);
+    return event_listener_->OnConnect();
+  }
+  return event_listener_->OnDisconnect();
+}
+
+void ServiceProxy::OnDisconnect() {
+  pending_callbacks_.clear();  // Will Reject() all the pending callbacks.
+  event_listener_->OnDisconnect();
+}
+
+base::WeakPtr<ServiceProxy> ServiceProxy::GetWeakPtr() const {
+  return weak_ptr_factory_.GetWeakPtr();
+}
+
+}  // namespace ipc
+}  // namespace perfetto
+// gen_amalgamated begin source: src/ipc/host_impl.cc
+// gen_amalgamated begin header: src/ipc/host_impl.h
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SRC_IPC_HOST_IMPL_H_
+#define SRC_IPC_HOST_IMPL_H_
+
+#include <map>
+#include <set>
+#include <string>
+#include <vector>
+
+// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/thread_checker.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/unix_socket.h"
+// gen_amalgamated expanded: #include "perfetto/ext/ipc/deferred.h"
+// gen_amalgamated expanded: #include "perfetto/ext/ipc/host.h"
+// gen_amalgamated expanded: #include "src/ipc/buffered_frame_deserializer.h"
+
+namespace perfetto {
+namespace ipc {
+
+class HostImpl : public Host, public base::UnixSocket::EventListener {
+ public:
+  HostImpl(const char* socket_name, base::TaskRunner*);
+  HostImpl(base::ScopedSocketHandle, base::TaskRunner*);
+  ~HostImpl() override;
+
+  // Host implementation.
+  bool ExposeService(std::unique_ptr<Service>) override;
+
+  // base::UnixSocket::EventListener implementation.
+  void OnNewIncomingConnection(base::UnixSocket*,
+                               std::unique_ptr<base::UnixSocket>) override;
+  void OnDisconnect(base::UnixSocket*) override;
+  void OnDataAvailable(base::UnixSocket*) override;
+
+  const base::UnixSocket* sock() const { return sock_.get(); }
+
+ private:
+  // Owns the per-client receive buffer (BufferedFrameDeserializer).
+  struct ClientConnection {
+    ~ClientConnection();
+    ClientID id;
+    std::unique_ptr<base::UnixSocket> sock;
+    BufferedFrameDeserializer frame_deserializer;
+    base::ScopedFile received_fd;
+  };
+  struct ExposedService {
+    ExposedService(ServiceID, const std::string&, std::unique_ptr<Service>);
+    ~ExposedService();
+    ExposedService(ExposedService&&) noexcept;
+    ExposedService& operator=(ExposedService&&);
+
+    ServiceID id;
+    std::string name;
+    std::unique_ptr<Service> instance;
+  };
+
+  HostImpl(const HostImpl&) = delete;
+  HostImpl& operator=(const HostImpl&) = delete;
+
+  bool Initialize(const char* socket_name);
+  void OnReceivedFrame(ClientConnection*, const Frame&);
+  void OnBindService(ClientConnection*, const Frame&);
+  void OnInvokeMethod(ClientConnection*, const Frame&);
+  void ReplyToMethodInvocation(ClientID, RequestID, AsyncResult<ProtoMessage>);
+  const ExposedService* GetServiceByName(const std::string&);
+
+  static void SendFrame(ClientConnection*, const Frame&, int fd = -1);
+
+  base::TaskRunner* const task_runner_;
+  std::map<ServiceID, ExposedService> services_;
+  std::unique_ptr<base::UnixSocket> sock_;  // The listening socket.
+  std::map<ClientID, std::unique_ptr<ClientConnection>> clients_;
+  std::map<base::UnixSocket*, ClientConnection*> clients_by_socket_;
+  ServiceID last_service_id_ = 0;
+  ClientID last_client_id_ = 0;
+  PERFETTO_THREAD_CHECKER(thread_checker_)
+  base::WeakPtrFactory<HostImpl> weak_ptr_factory_;  // Keep last.
+};
+
+}  // namespace ipc
+}  // namespace perfetto
+
+#endif  // SRC_IPC_HOST_IMPL_H_
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "src/ipc/host_impl.h"
+
+#include <inttypes.h>
+
+#include <algorithm>
+#include <utility>
+
+// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
+// gen_amalgamated expanded: #include "perfetto/ext/ipc/service.h"
+// gen_amalgamated expanded: #include "perfetto/ext/ipc/service_descriptor.h"
+
+// gen_amalgamated expanded: #include "protos/perfetto/ipc/wire_protocol.gen.h"
+
+// TODO(primiano): put limits on #connections/uid and req. queue (b/69093705).
+
+namespace perfetto {
+namespace ipc {
+
+namespace {
+
+constexpr base::SockFamily kHostSockFamily =
+    kUseTCPSocket ? base::SockFamily::kInet : base::SockFamily::kUnix;
+
+uid_t GetPosixPeerUid(base::UnixSocket* sock) {
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+  base::ignore_result(sock);
+  // Unsupported. Must be != kInvalidUid or the PacketValidator will fail.
+  return 0;
+#else
+  return sock->peer_uid_posix();
+#endif
+}
+
+}  // namespace
+
+// static
+std::unique_ptr<Host> Host::CreateInstance(const char* socket_name,
+                                           base::TaskRunner* task_runner) {
+  std::unique_ptr<HostImpl> host(new HostImpl(socket_name, task_runner));
+  if (!host->sock() || !host->sock()->is_listening())
+    return nullptr;
+  return std::unique_ptr<Host>(std::move(host));
+}
+
+// static
+std::unique_ptr<Host> Host::CreateInstance(base::ScopedSocketHandle socket_fd,
+                                           base::TaskRunner* task_runner) {
+  std::unique_ptr<HostImpl> host(
+      new HostImpl(std::move(socket_fd), task_runner));
+  if (!host->sock() || !host->sock()->is_listening())
+    return nullptr;
+  return std::unique_ptr<Host>(std::move(host));
+}
+
+HostImpl::HostImpl(base::ScopedSocketHandle socket_fd,
+                   base::TaskRunner* task_runner)
+    : task_runner_(task_runner), weak_ptr_factory_(this) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  sock_ = base::UnixSocket::Listen(std::move(socket_fd), this, task_runner_,
+                                   kHostSockFamily, base::SockType::kStream);
+}
+
+HostImpl::HostImpl(const char* socket_name, base::TaskRunner* task_runner)
+    : task_runner_(task_runner), weak_ptr_factory_(this) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  sock_ = base::UnixSocket::Listen(socket_name, this, task_runner_,
+                                   kHostSockFamily, base::SockType::kStream);
+  if (!sock_) {
+    PERFETTO_PLOG("Failed to create %s", socket_name);
+  }
+}
+
+HostImpl::~HostImpl() = default;
+
+bool HostImpl::ExposeService(std::unique_ptr<Service> service) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  const std::string& service_name = service->GetDescriptor().service_name;
+  if (GetServiceByName(service_name)) {
+    PERFETTO_DLOG("Duplicate ExposeService(): %s", service_name.c_str());
+    return false;
+  }
+  ServiceID sid = ++last_service_id_;
+  ExposedService exposed_service(sid, service_name, std::move(service));
+  services_.emplace(sid, std::move(exposed_service));
+  return true;
+}
+
+void HostImpl::OnNewIncomingConnection(
+    base::UnixSocket*,
+    std::unique_ptr<base::UnixSocket> new_conn) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  std::unique_ptr<ClientConnection> client(new ClientConnection());
+  ClientID client_id = ++last_client_id_;
+  clients_by_socket_[new_conn.get()] = client.get();
+  client->id = client_id;
+  client->sock = std::move(new_conn);
+  // Watchdog is 30 seconds, so set the socket timeout to 10 seconds.
+  client->sock->SetTxTimeout(10000);
+  clients_[client_id] = std::move(client);
+}
+
+void HostImpl::OnDataAvailable(base::UnixSocket* sock) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  auto it = clients_by_socket_.find(sock);
+  if (it == clients_by_socket_.end())
+    return;
+  ClientConnection* client = it->second;
+  BufferedFrameDeserializer& frame_deserializer = client->frame_deserializer;
+
+  size_t rsize;
+  do {
+    auto buf = frame_deserializer.BeginReceive();
+    base::ScopedFile fd;
+    rsize = client->sock->Receive(buf.data, buf.size, &fd);
+    if (fd) {
+      PERFETTO_DCHECK(!client->received_fd);
+      client->received_fd = std::move(fd);
+    }
+    if (!frame_deserializer.EndReceive(rsize))
+      return OnDisconnect(client->sock.get());
+  } while (rsize > 0);
+
+  for (;;) {
+    std::unique_ptr<Frame> frame = frame_deserializer.PopNextFrame();
+    if (!frame)
+      break;
+    OnReceivedFrame(client, *frame);
+  }
+}
+
+void HostImpl::OnReceivedFrame(ClientConnection* client,
+                               const Frame& req_frame) {
+  if (req_frame.has_msg_bind_service())
+    return OnBindService(client, req_frame);
+  if (req_frame.has_msg_invoke_method())
+    return OnInvokeMethod(client, req_frame);
+
+  PERFETTO_DLOG("Received invalid RPC frame from client %" PRIu64, client->id);
+  Frame reply_frame;
+  reply_frame.set_request_id(req_frame.request_id());
+  reply_frame.mutable_msg_request_error()->set_error("unknown request");
+  SendFrame(client, reply_frame);
+}
+
+void HostImpl::OnBindService(ClientConnection* client, const Frame& req_frame) {
+  // Binding a service doesn't do anything major. It just returns back the
+  // service id and its method map.
+  const Frame::BindService& req = req_frame.msg_bind_service();
+  Frame reply_frame;
+  reply_frame.set_request_id(req_frame.request_id());
+  auto* reply = reply_frame.mutable_msg_bind_service_reply();
+  const ExposedService* service = GetServiceByName(req.service_name());
+  if (service) {
+    reply->set_success(true);
+    reply->set_service_id(service->id);
+    uint32_t method_id = 1;  // method ids start at index 1.
+    for (const auto& desc_method : service->instance->GetDescriptor().methods) {
+      Frame::BindServiceReply::MethodInfo* method_info = reply->add_methods();
+      method_info->set_name(desc_method.name);
+      method_info->set_id(method_id++);
+    }
+  }
+  SendFrame(client, reply_frame);
+}
+
+void HostImpl::OnInvokeMethod(ClientConnection* client,
+                              const Frame& req_frame) {
+  const Frame::InvokeMethod& req = req_frame.msg_invoke_method();
+  Frame reply_frame;
+  RequestID request_id = req_frame.request_id();
+  reply_frame.set_request_id(request_id);
+  reply_frame.mutable_msg_invoke_method_reply()->set_success(false);
+  auto svc_it = services_.find(req.service_id());
+  if (svc_it == services_.end())
+    return SendFrame(client, reply_frame);  // |success| == false by default.
+
+  Service* service = svc_it->second.instance.get();
+  const ServiceDescriptor& svc = service->GetDescriptor();
+  const auto& methods = svc.methods;
+  const uint32_t method_id = req.method_id();
+  if (method_id == 0 || method_id > methods.size())
+    return SendFrame(client, reply_frame);
+
+  const ServiceDescriptor::Method& method = methods[method_id - 1];
+  std::unique_ptr<ProtoMessage> decoded_req_args(
+      method.request_proto_decoder(req.args_proto()));
+  if (!decoded_req_args)
+    return SendFrame(client, reply_frame);
+
+  Deferred<ProtoMessage> deferred_reply;
+  base::WeakPtr<HostImpl> host_weak_ptr = weak_ptr_factory_.GetWeakPtr();
+  ClientID client_id = client->id;
+
+  if (!req.drop_reply()) {
+    deferred_reply.Bind([host_weak_ptr, client_id,
+                         request_id](AsyncResult<ProtoMessage> reply) {
+      if (!host_weak_ptr)
+        return;  // The reply came too late, the HostImpl has gone.
+      host_weak_ptr->ReplyToMethodInvocation(client_id, request_id,
+                                             std::move(reply));
+    });
+  }
+
+  service->client_info_ =
+      ClientInfo(client->id, GetPosixPeerUid(client->sock.get()));
+  service->received_fd_ = &client->received_fd;
+  method.invoker(service, *decoded_req_args, std::move(deferred_reply));
+  service->received_fd_ = nullptr;
+  service->client_info_ = ClientInfo();
+}
+
+void HostImpl::ReplyToMethodInvocation(ClientID client_id,
+                                       RequestID request_id,
+                                       AsyncResult<ProtoMessage> reply) {
+  auto client_iter = clients_.find(client_id);
+  if (client_iter == clients_.end())
+    return;  // client has disconnected by the time we got the async reply.
+
+  ClientConnection* client = client_iter->second.get();
+  Frame reply_frame;
+  reply_frame.set_request_id(request_id);
+
+  // TODO(fmayer): add a test to guarantee that the reply is consumed within the
+  // same call stack and not kept around. ConsumerIPCService::OnTraceData()
+  // relies on this behavior.
+  auto* reply_frame_data = reply_frame.mutable_msg_invoke_method_reply();
+  reply_frame_data->set_has_more(reply.has_more());
+  if (reply.success()) {
+    std::string reply_proto = reply->SerializeAsString();
+    reply_frame_data->set_reply_proto(reply_proto);
+    reply_frame_data->set_success(true);
+  }
+  SendFrame(client, reply_frame, reply.fd());
+}
+
+// static
+void HostImpl::SendFrame(ClientConnection* client, const Frame& frame, int fd) {
+  std::string buf = BufferedFrameDeserializer::Serialize(frame);
+
+  // When a new Client connects in OnNewClientConnection we set a timeout on
+  // Send (see call to SetTxTimeout).
+  //
+  // The old behaviour was to do a blocking I/O call, which caused crashes from
+  // misbehaving producers (see b/169051440).
+  bool res = client->sock->Send(buf.data(), buf.size(), fd);
+  // If we timeout |res| will be false, but the UnixSocket will have called
+  // UnixSocket::ShutDown() and thus |is_connected()| is false.
+  PERFETTO_CHECK(res || !client->sock->is_connected());
+}
+
+void HostImpl::OnDisconnect(base::UnixSocket* sock) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  auto it = clients_by_socket_.find(sock);
+  if (it == clients_by_socket_.end())
+    return;
+  ClientID client_id = it->second->id;
+
+  ClientInfo client_info(client_id, GetPosixPeerUid(sock));
+  clients_by_socket_.erase(it);
+  PERFETTO_DCHECK(clients_.count(client_id));
+  clients_.erase(client_id);
+
+  for (const auto& service_it : services_) {
+    Service& service = *service_it.second.instance;
+    service.client_info_ = client_info;
+    service.OnClientDisconnected();
+    service.client_info_ = ClientInfo();
+  }
+}
+
+const HostImpl::ExposedService* HostImpl::GetServiceByName(
+    const std::string& name) {
+  // This could be optimized by using another map<name,ServiceID>. However this
+  // is used only by Bind/ExposeService that are quite rare (once per client
+  // connection and once per service instance), not worth it.
+  for (const auto& it : services_) {
+    if (it.second.name == name)
+      return &it.second;
+  }
+  return nullptr;
+}
+
+HostImpl::ExposedService::ExposedService(ServiceID id_,
+                                         const std::string& name_,
+                                         std::unique_ptr<Service> instance_)
+    : id(id_), name(name_), instance(std::move(instance_)) {}
+
+HostImpl::ExposedService::ExposedService(ExposedService&&) noexcept = default;
+HostImpl::ExposedService& HostImpl::ExposedService::operator=(
+    HostImpl::ExposedService&&) = default;
+HostImpl::ExposedService::~ExposedService() = default;
+
+HostImpl::ClientConnection::~ClientConnection() = default;
+
+}  // namespace ipc
+}  // namespace perfetto
+// gen_amalgamated begin source: gen/protos/perfetto/ipc/consumer_port.ipc.cc
+// gen_amalgamated begin header: gen/protos/perfetto/ipc/consumer_port.ipc.h
+// DO NOT EDIT. Autogenerated by Perfetto IPC
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_IPC_CONSUMER_PORT_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_IPC_CONSUMER_PORT_PROTO_H_
+
+// gen_amalgamated expanded: #include "perfetto/ext/ipc/deferred.h"
+// gen_amalgamated expanded: #include "perfetto/ext/ipc/service.h"
+// gen_amalgamated expanded: #include "perfetto/ext/ipc/service_descriptor.h"
+// gen_amalgamated expanded: #include "perfetto/ext/ipc/service_proxy.h"
+
+// gen_amalgamated expanded: #include "protos/perfetto/ipc/consumer_port.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/common/observable_events.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/common/tracing_service_state.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/common/tracing_service_capabilities.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/common/trace_stats.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/trace_config.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class ConsumerPort : public ::perfetto::ipc::Service {
+ private:
+  static ::perfetto::ipc::ServiceDescriptor* NewDescriptor();
+
+ public:
+  ~ConsumerPort() override;
+
+  static const ::perfetto::ipc::ServiceDescriptor& GetDescriptorStatic();
+
+  // Service implementation.
+  const ::perfetto::ipc::ServiceDescriptor& GetDescriptor() override;
+
+  // Methods from the .proto file
+  using DeferredEnableTracingResponse = ::perfetto::ipc::Deferred<EnableTracingResponse>;
+  virtual void EnableTracing(const EnableTracingRequest&, DeferredEnableTracingResponse) = 0;
+
+  using DeferredDisableTracingResponse = ::perfetto::ipc::Deferred<DisableTracingResponse>;
+  virtual void DisableTracing(const DisableTracingRequest&, DeferredDisableTracingResponse) = 0;
+
+  using DeferredReadBuffersResponse = ::perfetto::ipc::Deferred<ReadBuffersResponse>;
+  virtual void ReadBuffers(const ReadBuffersRequest&, DeferredReadBuffersResponse) = 0;
+
+  using DeferredFreeBuffersResponse = ::perfetto::ipc::Deferred<FreeBuffersResponse>;
+  virtual void FreeBuffers(const FreeBuffersRequest&, DeferredFreeBuffersResponse) = 0;
+
+  using DeferredFlushResponse = ::perfetto::ipc::Deferred<FlushResponse>;
+  virtual void Flush(const FlushRequest&, DeferredFlushResponse) = 0;
+
+  using DeferredStartTracingResponse = ::perfetto::ipc::Deferred<StartTracingResponse>;
+  virtual void StartTracing(const StartTracingRequest&, DeferredStartTracingResponse) = 0;
+
+  using DeferredChangeTraceConfigResponse = ::perfetto::ipc::Deferred<ChangeTraceConfigResponse>;
+  virtual void ChangeTraceConfig(const ChangeTraceConfigRequest&, DeferredChangeTraceConfigResponse) = 0;
+
+  using DeferredDetachResponse = ::perfetto::ipc::Deferred<DetachResponse>;
+  virtual void Detach(const DetachRequest&, DeferredDetachResponse) = 0;
+
+  using DeferredAttachResponse = ::perfetto::ipc::Deferred<AttachResponse>;
+  virtual void Attach(const AttachRequest&, DeferredAttachResponse) = 0;
+
+  using DeferredGetTraceStatsResponse = ::perfetto::ipc::Deferred<GetTraceStatsResponse>;
+  virtual void GetTraceStats(const GetTraceStatsRequest&, DeferredGetTraceStatsResponse) = 0;
+
+  using DeferredObserveEventsResponse = ::perfetto::ipc::Deferred<ObserveEventsResponse>;
+  virtual void ObserveEvents(const ObserveEventsRequest&, DeferredObserveEventsResponse) = 0;
+
+  using DeferredQueryServiceStateResponse = ::perfetto::ipc::Deferred<QueryServiceStateResponse>;
+  virtual void QueryServiceState(const QueryServiceStateRequest&, DeferredQueryServiceStateResponse) = 0;
+
+  using DeferredQueryCapabilitiesResponse = ::perfetto::ipc::Deferred<QueryCapabilitiesResponse>;
+  virtual void QueryCapabilities(const QueryCapabilitiesRequest&, DeferredQueryCapabilitiesResponse) = 0;
+
+  using DeferredSaveTraceForBugreportResponse = ::perfetto::ipc::Deferred<SaveTraceForBugreportResponse>;
+  virtual void SaveTraceForBugreport(const SaveTraceForBugreportRequest&, DeferredSaveTraceForBugreportResponse) = 0;
+
+};
+
+
+class ConsumerPortProxy : public ::perfetto::ipc::ServiceProxy {
+ public:
+   explicit ConsumerPortProxy(::perfetto::ipc::ServiceProxy::EventListener*);
+   ~ConsumerPortProxy() override;
+
+  // ServiceProxy implementation.
+  const ::perfetto::ipc::ServiceDescriptor& GetDescriptor() override;
+
+  // Methods from the .proto file
+  using DeferredEnableTracingResponse = ::perfetto::ipc::Deferred<EnableTracingResponse>;
+  void EnableTracing(const EnableTracingRequest&, DeferredEnableTracingResponse, int fd = -1);
+
+  using DeferredDisableTracingResponse = ::perfetto::ipc::Deferred<DisableTracingResponse>;
+  void DisableTracing(const DisableTracingRequest&, DeferredDisableTracingResponse, int fd = -1);
+
+  using DeferredReadBuffersResponse = ::perfetto::ipc::Deferred<ReadBuffersResponse>;
+  void ReadBuffers(const ReadBuffersRequest&, DeferredReadBuffersResponse, int fd = -1);
+
+  using DeferredFreeBuffersResponse = ::perfetto::ipc::Deferred<FreeBuffersResponse>;
+  void FreeBuffers(const FreeBuffersRequest&, DeferredFreeBuffersResponse, int fd = -1);
+
+  using DeferredFlushResponse = ::perfetto::ipc::Deferred<FlushResponse>;
+  void Flush(const FlushRequest&, DeferredFlushResponse, int fd = -1);
+
+  using DeferredStartTracingResponse = ::perfetto::ipc::Deferred<StartTracingResponse>;
+  void StartTracing(const StartTracingRequest&, DeferredStartTracingResponse, int fd = -1);
+
+  using DeferredChangeTraceConfigResponse = ::perfetto::ipc::Deferred<ChangeTraceConfigResponse>;
+  void ChangeTraceConfig(const ChangeTraceConfigRequest&, DeferredChangeTraceConfigResponse, int fd = -1);
+
+  using DeferredDetachResponse = ::perfetto::ipc::Deferred<DetachResponse>;
+  void Detach(const DetachRequest&, DeferredDetachResponse, int fd = -1);
+
+  using DeferredAttachResponse = ::perfetto::ipc::Deferred<AttachResponse>;
+  void Attach(const AttachRequest&, DeferredAttachResponse, int fd = -1);
+
+  using DeferredGetTraceStatsResponse = ::perfetto::ipc::Deferred<GetTraceStatsResponse>;
+  void GetTraceStats(const GetTraceStatsRequest&, DeferredGetTraceStatsResponse, int fd = -1);
+
+  using DeferredObserveEventsResponse = ::perfetto::ipc::Deferred<ObserveEventsResponse>;
+  void ObserveEvents(const ObserveEventsRequest&, DeferredObserveEventsResponse, int fd = -1);
+
+  using DeferredQueryServiceStateResponse = ::perfetto::ipc::Deferred<QueryServiceStateResponse>;
+  void QueryServiceState(const QueryServiceStateRequest&, DeferredQueryServiceStateResponse, int fd = -1);
+
+  using DeferredQueryCapabilitiesResponse = ::perfetto::ipc::Deferred<QueryCapabilitiesResponse>;
+  void QueryCapabilities(const QueryCapabilitiesRequest&, DeferredQueryCapabilitiesResponse, int fd = -1);
+
+  using DeferredSaveTraceForBugreportResponse = ::perfetto::ipc::Deferred<SaveTraceForBugreportResponse>;
+  void SaveTraceForBugreport(const SaveTraceForBugreportRequest&, DeferredSaveTraceForBugreportResponse, int fd = -1);
+
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_IPC_CONSUMER_PORT_PROTO_H_
+// gen_amalgamated begin header: include/perfetto/ext/ipc/codegen_helpers.h
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// This file is only meant to be included in autogenerated .cc files.
+
+#ifndef INCLUDE_PERFETTO_EXT_IPC_CODEGEN_HELPERS_H_
+#define INCLUDE_PERFETTO_EXT_IPC_CODEGEN_HELPERS_H_
+
+#include <memory>
+
+// gen_amalgamated expanded: #include "perfetto/ext/ipc/basic_types.h"
+// gen_amalgamated expanded: #include "perfetto/ext/ipc/deferred.h"
+// gen_amalgamated expanded: #include "perfetto/ext/ipc/service.h"
+
+// A templated protobuf message decoder. Returns nullptr in case of failure.
+template <typename T>
+::std::unique_ptr<::perfetto::ipc::ProtoMessage> _IPC_Decoder(
+    const std::string& proto_data) {
+  ::std::unique_ptr<::perfetto::ipc::ProtoMessage> msg(new T());
+  if (msg->ParseFromString(proto_data))
+    return msg;
+  return nullptr;
+}
+
+// Templated method dispatcher. Used to obtain a function pointer to a given
+// IPC method (Method) of a given service (TSvc) that can be invoked by the
+// host-side machinery starting from a generic Service pointer and a generic
+// ProtoMessage request argument.
+template <typename TSvc,    // Type of the actual Service subclass.
+          typename TReq,    // Type of the request argument.
+          typename TReply,  // Type of the reply argument.
+          void (TSvc::*Method)(const TReq&, ::perfetto::ipc::Deferred<TReply>)>
+void _IPC_Invoker(::perfetto::ipc::Service* s,
+                  const ::perfetto::ipc::ProtoMessage& req,
+                  ::perfetto::ipc::DeferredBase reply) {
+  (*static_cast<TSvc*>(s).*Method)(
+      static_cast<const TReq&>(req),
+      ::perfetto::ipc::Deferred<TReply>(::std::move(reply)));
+}
+
+#endif  // INCLUDE_PERFETTO_EXT_IPC_CODEGEN_HELPERS_H_
+// DO NOT EDIT. Autogenerated by Perfetto IPC
+// gen_amalgamated expanded: #include "protos/perfetto/ipc/consumer_port.ipc.h"
+// gen_amalgamated expanded: #include "perfetto/ext/ipc/codegen_helpers.h"
+
+#include <memory>
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+::perfetto::ipc::ServiceDescriptor* ConsumerPort::NewDescriptor() {
+  auto* desc = new ::perfetto::ipc::ServiceDescriptor();
+  desc->service_name = "ConsumerPort";
+
+  desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
+     "EnableTracing",
+     &_IPC_Decoder<EnableTracingRequest>,
+     &_IPC_Decoder<EnableTracingResponse>,
+     &_IPC_Invoker<ConsumerPort, EnableTracingRequest, EnableTracingResponse, &ConsumerPort::EnableTracing>});
+
+  desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
+     "DisableTracing",
+     &_IPC_Decoder<DisableTracingRequest>,
+     &_IPC_Decoder<DisableTracingResponse>,
+     &_IPC_Invoker<ConsumerPort, DisableTracingRequest, DisableTracingResponse, &ConsumerPort::DisableTracing>});
+
+  desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
+     "ReadBuffers",
+     &_IPC_Decoder<ReadBuffersRequest>,
+     &_IPC_Decoder<ReadBuffersResponse>,
+     &_IPC_Invoker<ConsumerPort, ReadBuffersRequest, ReadBuffersResponse, &ConsumerPort::ReadBuffers>});
+
+  desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
+     "FreeBuffers",
+     &_IPC_Decoder<FreeBuffersRequest>,
+     &_IPC_Decoder<FreeBuffersResponse>,
+     &_IPC_Invoker<ConsumerPort, FreeBuffersRequest, FreeBuffersResponse, &ConsumerPort::FreeBuffers>});
+
+  desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
+     "Flush",
+     &_IPC_Decoder<FlushRequest>,
+     &_IPC_Decoder<FlushResponse>,
+     &_IPC_Invoker<ConsumerPort, FlushRequest, FlushResponse, &ConsumerPort::Flush>});
+
+  desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
+     "StartTracing",
+     &_IPC_Decoder<StartTracingRequest>,
+     &_IPC_Decoder<StartTracingResponse>,
+     &_IPC_Invoker<ConsumerPort, StartTracingRequest, StartTracingResponse, &ConsumerPort::StartTracing>});
+
+  desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
+     "ChangeTraceConfig",
+     &_IPC_Decoder<ChangeTraceConfigRequest>,
+     &_IPC_Decoder<ChangeTraceConfigResponse>,
+     &_IPC_Invoker<ConsumerPort, ChangeTraceConfigRequest, ChangeTraceConfigResponse, &ConsumerPort::ChangeTraceConfig>});
+
+  desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
+     "Detach",
+     &_IPC_Decoder<DetachRequest>,
+     &_IPC_Decoder<DetachResponse>,
+     &_IPC_Invoker<ConsumerPort, DetachRequest, DetachResponse, &ConsumerPort::Detach>});
+
+  desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
+     "Attach",
+     &_IPC_Decoder<AttachRequest>,
+     &_IPC_Decoder<AttachResponse>,
+     &_IPC_Invoker<ConsumerPort, AttachRequest, AttachResponse, &ConsumerPort::Attach>});
+
+  desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
+     "GetTraceStats",
+     &_IPC_Decoder<GetTraceStatsRequest>,
+     &_IPC_Decoder<GetTraceStatsResponse>,
+     &_IPC_Invoker<ConsumerPort, GetTraceStatsRequest, GetTraceStatsResponse, &ConsumerPort::GetTraceStats>});
+
+  desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
+     "ObserveEvents",
+     &_IPC_Decoder<ObserveEventsRequest>,
+     &_IPC_Decoder<ObserveEventsResponse>,
+     &_IPC_Invoker<ConsumerPort, ObserveEventsRequest, ObserveEventsResponse, &ConsumerPort::ObserveEvents>});
+
+  desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
+     "QueryServiceState",
+     &_IPC_Decoder<QueryServiceStateRequest>,
+     &_IPC_Decoder<QueryServiceStateResponse>,
+     &_IPC_Invoker<ConsumerPort, QueryServiceStateRequest, QueryServiceStateResponse, &ConsumerPort::QueryServiceState>});
+
+  desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
+     "QueryCapabilities",
+     &_IPC_Decoder<QueryCapabilitiesRequest>,
+     &_IPC_Decoder<QueryCapabilitiesResponse>,
+     &_IPC_Invoker<ConsumerPort, QueryCapabilitiesRequest, QueryCapabilitiesResponse, &ConsumerPort::QueryCapabilities>});
+
+  desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
+     "SaveTraceForBugreport",
+     &_IPC_Decoder<SaveTraceForBugreportRequest>,
+     &_IPC_Decoder<SaveTraceForBugreportResponse>,
+     &_IPC_Invoker<ConsumerPort, SaveTraceForBugreportRequest, SaveTraceForBugreportResponse, &ConsumerPort::SaveTraceForBugreport>});
+  desc->methods.shrink_to_fit();
+  return desc;
+}
+
+
+const ::perfetto::ipc::ServiceDescriptor& ConsumerPort::GetDescriptorStatic() {
+  static auto* instance = NewDescriptor();
+  return *instance;
+}
+
+// Host-side definitions.
+ConsumerPort::~ConsumerPort() = default;
+
+const ::perfetto::ipc::ServiceDescriptor& ConsumerPort::GetDescriptor() {
+  return GetDescriptorStatic();
+}
+
+// Client-side definitions.
+ConsumerPortProxy::ConsumerPortProxy(::perfetto::ipc::ServiceProxy::EventListener* event_listener)
+    : ::perfetto::ipc::ServiceProxy(event_listener) {}
+
+ConsumerPortProxy::~ConsumerPortProxy() = default;
+
+const ::perfetto::ipc::ServiceDescriptor& ConsumerPortProxy::GetDescriptor() {
+  return ConsumerPort::GetDescriptorStatic();
+}
+
+void ConsumerPortProxy::EnableTracing(const EnableTracingRequest& request, DeferredEnableTracingResponse reply, int fd) {
+  BeginInvoke("EnableTracing", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
+              fd);
+}
+
+void ConsumerPortProxy::DisableTracing(const DisableTracingRequest& request, DeferredDisableTracingResponse reply, int fd) {
+  BeginInvoke("DisableTracing", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
+              fd);
+}
+
+void ConsumerPortProxy::ReadBuffers(const ReadBuffersRequest& request, DeferredReadBuffersResponse reply, int fd) {
+  BeginInvoke("ReadBuffers", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
+              fd);
+}
+
+void ConsumerPortProxy::FreeBuffers(const FreeBuffersRequest& request, DeferredFreeBuffersResponse reply, int fd) {
+  BeginInvoke("FreeBuffers", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
+              fd);
+}
+
+void ConsumerPortProxy::Flush(const FlushRequest& request, DeferredFlushResponse reply, int fd) {
+  BeginInvoke("Flush", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
+              fd);
+}
+
+void ConsumerPortProxy::StartTracing(const StartTracingRequest& request, DeferredStartTracingResponse reply, int fd) {
+  BeginInvoke("StartTracing", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
+              fd);
+}
+
+void ConsumerPortProxy::ChangeTraceConfig(const ChangeTraceConfigRequest& request, DeferredChangeTraceConfigResponse reply, int fd) {
+  BeginInvoke("ChangeTraceConfig", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
+              fd);
+}
+
+void ConsumerPortProxy::Detach(const DetachRequest& request, DeferredDetachResponse reply, int fd) {
+  BeginInvoke("Detach", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
+              fd);
+}
+
+void ConsumerPortProxy::Attach(const AttachRequest& request, DeferredAttachResponse reply, int fd) {
+  BeginInvoke("Attach", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
+              fd);
+}
+
+void ConsumerPortProxy::GetTraceStats(const GetTraceStatsRequest& request, DeferredGetTraceStatsResponse reply, int fd) {
+  BeginInvoke("GetTraceStats", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
+              fd);
+}
+
+void ConsumerPortProxy::ObserveEvents(const ObserveEventsRequest& request, DeferredObserveEventsResponse reply, int fd) {
+  BeginInvoke("ObserveEvents", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
+              fd);
+}
+
+void ConsumerPortProxy::QueryServiceState(const QueryServiceStateRequest& request, DeferredQueryServiceStateResponse reply, int fd) {
+  BeginInvoke("QueryServiceState", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
+              fd);
+}
+
+void ConsumerPortProxy::QueryCapabilities(const QueryCapabilitiesRequest& request, DeferredQueryCapabilitiesResponse reply, int fd) {
+  BeginInvoke("QueryCapabilities", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
+              fd);
+}
+
+void ConsumerPortProxy::SaveTraceForBugreport(const SaveTraceForBugreportRequest& request, DeferredSaveTraceForBugreportResponse reply, int fd) {
+  BeginInvoke("SaveTraceForBugreport", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
+              fd);
+}
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+// gen_amalgamated begin source: gen/protos/perfetto/ipc/producer_port.ipc.cc
+// gen_amalgamated begin header: gen/protos/perfetto/ipc/producer_port.ipc.h
+// DO NOT EDIT. Autogenerated by Perfetto IPC
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_IPC_PRODUCER_PORT_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_IPC_PRODUCER_PORT_PROTO_H_
+
+// gen_amalgamated expanded: #include "perfetto/ext/ipc/deferred.h"
+// gen_amalgamated expanded: #include "perfetto/ext/ipc/service.h"
+// gen_amalgamated expanded: #include "perfetto/ext/ipc/service_descriptor.h"
+// gen_amalgamated expanded: #include "perfetto/ext/ipc/service_proxy.h"
+
+// gen_amalgamated expanded: #include "protos/perfetto/ipc/producer_port.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/common/commit_data_request.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/data_source_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/common/data_source_descriptor.gen.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class ProducerPort : public ::perfetto::ipc::Service {
+ private:
+  static ::perfetto::ipc::ServiceDescriptor* NewDescriptor();
+
+ public:
+  ~ProducerPort() override;
+
+  static const ::perfetto::ipc::ServiceDescriptor& GetDescriptorStatic();
+
+  // Service implementation.
+  const ::perfetto::ipc::ServiceDescriptor& GetDescriptor() override;
+
+  // Methods from the .proto file
+  using DeferredInitializeConnectionResponse = ::perfetto::ipc::Deferred<InitializeConnectionResponse>;
+  virtual void InitializeConnection(const InitializeConnectionRequest&, DeferredInitializeConnectionResponse) = 0;
+
+  using DeferredRegisterDataSourceResponse = ::perfetto::ipc::Deferred<RegisterDataSourceResponse>;
+  virtual void RegisterDataSource(const RegisterDataSourceRequest&, DeferredRegisterDataSourceResponse) = 0;
+
+  using DeferredUnregisterDataSourceResponse = ::perfetto::ipc::Deferred<UnregisterDataSourceResponse>;
+  virtual void UnregisterDataSource(const UnregisterDataSourceRequest&, DeferredUnregisterDataSourceResponse) = 0;
+
+  using DeferredCommitDataResponse = ::perfetto::ipc::Deferred<CommitDataResponse>;
+  virtual void CommitData(const CommitDataRequest&, DeferredCommitDataResponse) = 0;
+
+  using DeferredGetAsyncCommandResponse = ::perfetto::ipc::Deferred<GetAsyncCommandResponse>;
+  virtual void GetAsyncCommand(const GetAsyncCommandRequest&, DeferredGetAsyncCommandResponse) = 0;
+
+  using DeferredRegisterTraceWriterResponse = ::perfetto::ipc::Deferred<RegisterTraceWriterResponse>;
+  virtual void RegisterTraceWriter(const RegisterTraceWriterRequest&, DeferredRegisterTraceWriterResponse) = 0;
+
+  using DeferredUnregisterTraceWriterResponse = ::perfetto::ipc::Deferred<UnregisterTraceWriterResponse>;
+  virtual void UnregisterTraceWriter(const UnregisterTraceWriterRequest&, DeferredUnregisterTraceWriterResponse) = 0;
+
+  using DeferredNotifyDataSourceStartedResponse = ::perfetto::ipc::Deferred<NotifyDataSourceStartedResponse>;
+  virtual void NotifyDataSourceStarted(const NotifyDataSourceStartedRequest&, DeferredNotifyDataSourceStartedResponse) = 0;
+
+  using DeferredNotifyDataSourceStoppedResponse = ::perfetto::ipc::Deferred<NotifyDataSourceStoppedResponse>;
+  virtual void NotifyDataSourceStopped(const NotifyDataSourceStoppedRequest&, DeferredNotifyDataSourceStoppedResponse) = 0;
+
+  using DeferredActivateTriggersResponse = ::perfetto::ipc::Deferred<ActivateTriggersResponse>;
+  virtual void ActivateTriggers(const ActivateTriggersRequest&, DeferredActivateTriggersResponse) = 0;
+
+  using DeferredSyncResponse = ::perfetto::ipc::Deferred<SyncResponse>;
+  virtual void Sync(const SyncRequest&, DeferredSyncResponse) = 0;
+
+};
+
+
+class ProducerPortProxy : public ::perfetto::ipc::ServiceProxy {
+ public:
+   explicit ProducerPortProxy(::perfetto::ipc::ServiceProxy::EventListener*);
+   ~ProducerPortProxy() override;
+
+  // ServiceProxy implementation.
+  const ::perfetto::ipc::ServiceDescriptor& GetDescriptor() override;
+
+  // Methods from the .proto file
+  using DeferredInitializeConnectionResponse = ::perfetto::ipc::Deferred<InitializeConnectionResponse>;
+  void InitializeConnection(const InitializeConnectionRequest&, DeferredInitializeConnectionResponse, int fd = -1);
+
+  using DeferredRegisterDataSourceResponse = ::perfetto::ipc::Deferred<RegisterDataSourceResponse>;
+  void RegisterDataSource(const RegisterDataSourceRequest&, DeferredRegisterDataSourceResponse, int fd = -1);
+
+  using DeferredUnregisterDataSourceResponse = ::perfetto::ipc::Deferred<UnregisterDataSourceResponse>;
+  void UnregisterDataSource(const UnregisterDataSourceRequest&, DeferredUnregisterDataSourceResponse, int fd = -1);
+
+  using DeferredCommitDataResponse = ::perfetto::ipc::Deferred<CommitDataResponse>;
+  void CommitData(const CommitDataRequest&, DeferredCommitDataResponse, int fd = -1);
+
+  using DeferredGetAsyncCommandResponse = ::perfetto::ipc::Deferred<GetAsyncCommandResponse>;
+  void GetAsyncCommand(const GetAsyncCommandRequest&, DeferredGetAsyncCommandResponse, int fd = -1);
+
+  using DeferredRegisterTraceWriterResponse = ::perfetto::ipc::Deferred<RegisterTraceWriterResponse>;
+  void RegisterTraceWriter(const RegisterTraceWriterRequest&, DeferredRegisterTraceWriterResponse, int fd = -1);
+
+  using DeferredUnregisterTraceWriterResponse = ::perfetto::ipc::Deferred<UnregisterTraceWriterResponse>;
+  void UnregisterTraceWriter(const UnregisterTraceWriterRequest&, DeferredUnregisterTraceWriterResponse, int fd = -1);
+
+  using DeferredNotifyDataSourceStartedResponse = ::perfetto::ipc::Deferred<NotifyDataSourceStartedResponse>;
+  void NotifyDataSourceStarted(const NotifyDataSourceStartedRequest&, DeferredNotifyDataSourceStartedResponse, int fd = -1);
+
+  using DeferredNotifyDataSourceStoppedResponse = ::perfetto::ipc::Deferred<NotifyDataSourceStoppedResponse>;
+  void NotifyDataSourceStopped(const NotifyDataSourceStoppedRequest&, DeferredNotifyDataSourceStoppedResponse, int fd = -1);
+
+  using DeferredActivateTriggersResponse = ::perfetto::ipc::Deferred<ActivateTriggersResponse>;
+  void ActivateTriggers(const ActivateTriggersRequest&, DeferredActivateTriggersResponse, int fd = -1);
+
+  using DeferredSyncResponse = ::perfetto::ipc::Deferred<SyncResponse>;
+  void Sync(const SyncRequest&, DeferredSyncResponse, int fd = -1);
+
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_IPC_PRODUCER_PORT_PROTO_H_
+// DO NOT EDIT. Autogenerated by Perfetto IPC
+// gen_amalgamated expanded: #include "protos/perfetto/ipc/producer_port.ipc.h"
+// gen_amalgamated expanded: #include "perfetto/ext/ipc/codegen_helpers.h"
+
+#include <memory>
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+::perfetto::ipc::ServiceDescriptor* ProducerPort::NewDescriptor() {
+  auto* desc = new ::perfetto::ipc::ServiceDescriptor();
+  desc->service_name = "ProducerPort";
+
+  desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
+     "InitializeConnection",
+     &_IPC_Decoder<InitializeConnectionRequest>,
+     &_IPC_Decoder<InitializeConnectionResponse>,
+     &_IPC_Invoker<ProducerPort, InitializeConnectionRequest, InitializeConnectionResponse, &ProducerPort::InitializeConnection>});
+
+  desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
+     "RegisterDataSource",
+     &_IPC_Decoder<RegisterDataSourceRequest>,
+     &_IPC_Decoder<RegisterDataSourceResponse>,
+     &_IPC_Invoker<ProducerPort, RegisterDataSourceRequest, RegisterDataSourceResponse, &ProducerPort::RegisterDataSource>});
+
+  desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
+     "UnregisterDataSource",
+     &_IPC_Decoder<UnregisterDataSourceRequest>,
+     &_IPC_Decoder<UnregisterDataSourceResponse>,
+     &_IPC_Invoker<ProducerPort, UnregisterDataSourceRequest, UnregisterDataSourceResponse, &ProducerPort::UnregisterDataSource>});
+
+  desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
+     "CommitData",
+     &_IPC_Decoder<CommitDataRequest>,
+     &_IPC_Decoder<CommitDataResponse>,
+     &_IPC_Invoker<ProducerPort, CommitDataRequest, CommitDataResponse, &ProducerPort::CommitData>});
+
+  desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
+     "GetAsyncCommand",
+     &_IPC_Decoder<GetAsyncCommandRequest>,
+     &_IPC_Decoder<GetAsyncCommandResponse>,
+     &_IPC_Invoker<ProducerPort, GetAsyncCommandRequest, GetAsyncCommandResponse, &ProducerPort::GetAsyncCommand>});
+
+  desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
+     "RegisterTraceWriter",
+     &_IPC_Decoder<RegisterTraceWriterRequest>,
+     &_IPC_Decoder<RegisterTraceWriterResponse>,
+     &_IPC_Invoker<ProducerPort, RegisterTraceWriterRequest, RegisterTraceWriterResponse, &ProducerPort::RegisterTraceWriter>});
+
+  desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
+     "UnregisterTraceWriter",
+     &_IPC_Decoder<UnregisterTraceWriterRequest>,
+     &_IPC_Decoder<UnregisterTraceWriterResponse>,
+     &_IPC_Invoker<ProducerPort, UnregisterTraceWriterRequest, UnregisterTraceWriterResponse, &ProducerPort::UnregisterTraceWriter>});
+
+  desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
+     "NotifyDataSourceStarted",
+     &_IPC_Decoder<NotifyDataSourceStartedRequest>,
+     &_IPC_Decoder<NotifyDataSourceStartedResponse>,
+     &_IPC_Invoker<ProducerPort, NotifyDataSourceStartedRequest, NotifyDataSourceStartedResponse, &ProducerPort::NotifyDataSourceStarted>});
+
+  desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
+     "NotifyDataSourceStopped",
+     &_IPC_Decoder<NotifyDataSourceStoppedRequest>,
+     &_IPC_Decoder<NotifyDataSourceStoppedResponse>,
+     &_IPC_Invoker<ProducerPort, NotifyDataSourceStoppedRequest, NotifyDataSourceStoppedResponse, &ProducerPort::NotifyDataSourceStopped>});
+
+  desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
+     "ActivateTriggers",
+     &_IPC_Decoder<ActivateTriggersRequest>,
+     &_IPC_Decoder<ActivateTriggersResponse>,
+     &_IPC_Invoker<ProducerPort, ActivateTriggersRequest, ActivateTriggersResponse, &ProducerPort::ActivateTriggers>});
+
+  desc->methods.emplace_back(::perfetto::ipc::ServiceDescriptor::Method{
+     "Sync",
+     &_IPC_Decoder<SyncRequest>,
+     &_IPC_Decoder<SyncResponse>,
+     &_IPC_Invoker<ProducerPort, SyncRequest, SyncResponse, &ProducerPort::Sync>});
+  desc->methods.shrink_to_fit();
+  return desc;
+}
+
+
+const ::perfetto::ipc::ServiceDescriptor& ProducerPort::GetDescriptorStatic() {
+  static auto* instance = NewDescriptor();
+  return *instance;
+}
+
+// Host-side definitions.
+ProducerPort::~ProducerPort() = default;
+
+const ::perfetto::ipc::ServiceDescriptor& ProducerPort::GetDescriptor() {
+  return GetDescriptorStatic();
+}
+
+// Client-side definitions.
+ProducerPortProxy::ProducerPortProxy(::perfetto::ipc::ServiceProxy::EventListener* event_listener)
+    : ::perfetto::ipc::ServiceProxy(event_listener) {}
+
+ProducerPortProxy::~ProducerPortProxy() = default;
+
+const ::perfetto::ipc::ServiceDescriptor& ProducerPortProxy::GetDescriptor() {
+  return ProducerPort::GetDescriptorStatic();
+}
+
+void ProducerPortProxy::InitializeConnection(const InitializeConnectionRequest& request, DeferredInitializeConnectionResponse reply, int fd) {
+  BeginInvoke("InitializeConnection", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
+              fd);
+}
+
+void ProducerPortProxy::RegisterDataSource(const RegisterDataSourceRequest& request, DeferredRegisterDataSourceResponse reply, int fd) {
+  BeginInvoke("RegisterDataSource", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
+              fd);
+}
+
+void ProducerPortProxy::UnregisterDataSource(const UnregisterDataSourceRequest& request, DeferredUnregisterDataSourceResponse reply, int fd) {
+  BeginInvoke("UnregisterDataSource", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
+              fd);
+}
+
+void ProducerPortProxy::CommitData(const CommitDataRequest& request, DeferredCommitDataResponse reply, int fd) {
+  BeginInvoke("CommitData", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
+              fd);
+}
+
+void ProducerPortProxy::GetAsyncCommand(const GetAsyncCommandRequest& request, DeferredGetAsyncCommandResponse reply, int fd) {
+  BeginInvoke("GetAsyncCommand", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
+              fd);
+}
+
+void ProducerPortProxy::RegisterTraceWriter(const RegisterTraceWriterRequest& request, DeferredRegisterTraceWriterResponse reply, int fd) {
+  BeginInvoke("RegisterTraceWriter", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
+              fd);
+}
+
+void ProducerPortProxy::UnregisterTraceWriter(const UnregisterTraceWriterRequest& request, DeferredUnregisterTraceWriterResponse reply, int fd) {
+  BeginInvoke("UnregisterTraceWriter", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
+              fd);
+}
+
+void ProducerPortProxy::NotifyDataSourceStarted(const NotifyDataSourceStartedRequest& request, DeferredNotifyDataSourceStartedResponse reply, int fd) {
+  BeginInvoke("NotifyDataSourceStarted", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
+              fd);
+}
+
+void ProducerPortProxy::NotifyDataSourceStopped(const NotifyDataSourceStoppedRequest& request, DeferredNotifyDataSourceStoppedResponse reply, int fd) {
+  BeginInvoke("NotifyDataSourceStopped", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
+              fd);
+}
+
+void ProducerPortProxy::ActivateTriggers(const ActivateTriggersRequest& request, DeferredActivateTriggersResponse reply, int fd) {
+  BeginInvoke("ActivateTriggers", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
+              fd);
+}
+
+void ProducerPortProxy::Sync(const SyncRequest& request, DeferredSyncResponse reply, int fd) {
+  BeginInvoke("Sync", request, ::perfetto::ipc::DeferredBase(std::move(reply)),
+              fd);
+}
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+// gen_amalgamated begin source: src/tracing/ipc/default_socket.cc
+// gen_amalgamated begin header: include/perfetto/ext/tracing/ipc/default_socket.h
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_TRACING_IPC_DEFAULT_SOCKET_H_
+#define INCLUDE_PERFETTO_EXT_TRACING_IPC_DEFAULT_SOCKET_H_
+
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+
+PERFETTO_EXPORT const char* GetConsumerSocket();
+PERFETTO_EXPORT const char* GetProducerSocket();
+
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_TRACING_IPC_DEFAULT_SOCKET_H_
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/ipc/default_socket.h"
+
+// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/utils.h"
+// gen_amalgamated expanded: #include "perfetto/ext/ipc/basic_types.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/basic_types.h"
+
+#include <stdlib.h>
+#include <unistd.h>
+
+namespace perfetto {
+#if !PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
+// On non-Android platforms, check /run/perfetto/ before using /tmp/ as the
+// socket base directory.
+namespace {
+const char* kRunPerfettoBaseDir = "/run/perfetto/";
+
+bool UseRunPerfettoBaseDir() {
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) ||   \
+    PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) || \
+    PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
+  // Note that the trailing / in |kRunPerfettoBaseDir| ensures we are checking
+  // against a directory, not a file.
+  int res = PERFETTO_EINTR(access(kRunPerfettoBaseDir, X_OK));
+  if (!res)
+    return true;
+
+  // If the path doesn't exist (ENOENT), fail silently to the caller. Otherwise,
+  // fail with an explicit error message.
+  if (errno != ENOENT) {
+    PERFETTO_PLOG("%s exists but cannot be accessed. Falling back on /tmp/ ",
+                  kRunPerfettoBaseDir);
+  }
+  return false;
+#else
+  return false;
+#endif
+}
+
+}  // anonymous namespace
+#endif  // !PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
+
+static_assert(kInvalidUid == ipc::kInvalidUid, "kInvalidUid mismatching");
+
+const char* GetProducerSocket() {
+  const char* name = getenv("PERFETTO_PRODUCER_SOCK_NAME");
+  if (name == nullptr) {
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+    name = "127.0.0.1:32278";
+#elif PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
+    name = "/dev/socket/traced_producer";
+#else
+    // Use /run/perfetto if it exists. Then fallback to /tmp.
+    static const char* producer_socket =
+        UseRunPerfettoBaseDir() ? "/run/perfetto/traced-producer.sock"
+                                : "/tmp/perfetto-producer";
+    name = producer_socket;
+#endif
+  }
+  return name;
+}
+
+const char* GetConsumerSocket() {
+  const char* name = getenv("PERFETTO_CONSUMER_SOCK_NAME");
+  if (name == nullptr) {
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+    name = "127.0.0.1:32279";
+#elif PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
+    name = "/dev/socket/traced_consumer";
+#else
+    // Use /run/perfetto if it exists. Then fallback to /tmp.
+    static const char* consumer_socket =
+        UseRunPerfettoBaseDir() ? "/run/perfetto/traced-consumer.sock"
+                                : "/tmp/perfetto-consumer";
+    name = consumer_socket;
+#endif
+  }
+  return name;
+}
+
+}  // namespace perfetto
+// gen_amalgamated begin source: src/tracing/ipc/memfd.cc
+// gen_amalgamated begin header: src/tracing/ipc/memfd.h
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SRC_TRACING_IPC_MEMFD_H_
+#define SRC_TRACING_IPC_MEMFD_H_
+
+// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
+
+// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
+
+// Some android build bots use a sysroot that doesn't support memfd when
+// compiling for the host, so we define the flags we need ourselves.
+
+// from memfd.h
+#ifndef MFD_CLOEXEC
+#define MFD_CLOEXEC 0x0001U
+#define MFD_ALLOW_SEALING 0x0002U
+#endif
+
+// from fcntl.h
+#ifndef F_ADD_SEALS
+#define F_ADD_SEALS 1033
+#define F_GET_SEALS 1034
+#define F_SEAL_SEAL 0x0001
+#define F_SEAL_SHRINK 0x0002
+#define F_SEAL_GROW 0x0004
+#define F_SEAL_WRITE 0x0008
+#endif
+
+namespace perfetto {
+
+// Whether the operating system supports memfd.
+bool HasMemfdSupport();
+
+// Call memfd(2) if available on platform and return the fd as result. This call
+// also makes a kernel version check for safety on older kernels (b/116769556).
+// Returns an invalid ScopedFile on failure.
+base::ScopedFile CreateMemfd(const char* name, unsigned int flags);
+
+}  // namespace perfetto
+
+#endif  // SRC_TRACING_IPC_MEMFD_H_
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "src/tracing/ipc/memfd.h"
+
+#include <errno.h>
+
+#define PERFETTO_MEMFD_ENABLED()             \
+  PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) || \
+      PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX)
+
+#if PERFETTO_MEMFD_ENABLED()
+
+#include <stdio.h>
+#include <string.h>
+#include <sys/syscall.h>
+#include <sys/utsname.h>
+#include <unistd.h>
+
+// Some android build bots use a sysroot that doesn't support memfd when
+// compiling for the host, so we redefine it if necessary.
+#if !defined(__NR_memfd_create)
+#if defined(__x86_64__)
+#define __NR_memfd_create 319
+#elif defined(__i386__)
+#define __NR_memfd_create 356
+#elif defined(__aarch64__)
+#define __NR_memfd_create 279
+#elif defined(__arm__)
+#define __NR_memfd_create 385
+#else
+#error "unsupported sysroot without memfd support"
+#endif
+#endif  // !defined(__NR_memfd_create)
+
+namespace perfetto {
+bool HasMemfdSupport() {
+  static bool kSupportsMemfd = [] {
+    // Check kernel version supports memfd_create(). Some older kernels segfault
+    // executing memfd_create() rather than returning ENOSYS (b/116769556).
+    static constexpr int kRequiredMajor = 3;
+    static constexpr int kRequiredMinor = 17;
+    struct utsname uts;
+    int major, minor;
+    if (uname(&uts) == 0 && strcmp(uts.sysname, "Linux") == 0 &&
+        sscanf(uts.release, "%d.%d", &major, &minor) == 2 &&
+        ((major < kRequiredMajor ||
+          (major == kRequiredMajor && minor < kRequiredMinor)))) {
+      return false;
+    }
+
+    base::ScopedFile fd;
+    fd.reset(static_cast<int>(syscall(__NR_memfd_create, "perfetto_shmem",
+                                      MFD_CLOEXEC | MFD_ALLOW_SEALING)));
+    return !!fd;
+  }();
+  return kSupportsMemfd;
+}
+
+base::ScopedFile CreateMemfd(const char* name, unsigned int flags) {
+  if (!HasMemfdSupport()) {
+    errno = ENOSYS;
+    return base::ScopedFile();
+  }
+  return base::ScopedFile(
+      static_cast<int>(syscall(__NR_memfd_create, name, flags)));
+}
+}  // namespace perfetto
+
+#else  // PERFETTO_MEMFD_ENABLED()
+
+namespace perfetto {
+bool HasMemfdSupport() {
+  return false;
+}
+base::ScopedFile CreateMemfd(const char*, unsigned int) {
+  errno = ENOSYS;
+  return base::ScopedFile();
+}
+}  // namespace perfetto
+
+#endif  // PERFETTO_MEMFD_ENABLED()
+// gen_amalgamated begin source: src/tracing/ipc/posix_shared_memory.cc
+// gen_amalgamated begin header: src/tracing/ipc/posix_shared_memory.h
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SRC_TRACING_IPC_POSIX_SHARED_MEMORY_H_
+#define SRC_TRACING_IPC_POSIX_SHARED_MEMORY_H_
+
+// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) ||   \
+    PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) || \
+    PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
+
+#include <stddef.h>
+
+#include <memory>
+
+// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory.h"
+
+namespace perfetto {
+
+// Implements the SharedMemory and its factory for the posix-based transport.
+class PosixSharedMemory : public SharedMemory {
+ public:
+  class Factory : public SharedMemory::Factory {
+   public:
+    ~Factory() override;
+    std::unique_ptr<SharedMemory> CreateSharedMemory(size_t) override;
+  };
+
+  // Create a brand new SHM region.
+  static std::unique_ptr<PosixSharedMemory> Create(size_t size);
+
+  // Mmaps a file descriptor to an existing SHM region. If
+  // |require_seals_if_supported| is true and the system supports
+  // memfd_create(), the FD is required to be a sealed memfd with F_SEAL_SEAL,
+  // F_SEAL_GROW, and F_SEAL_SHRINK seals set (otherwise, nullptr is returned).
+  // May also return nullptr if mapping fails for another reason (e.g. OOM).
+  static std::unique_ptr<PosixSharedMemory> AttachToFd(
+      base::ScopedFile,
+      bool require_seals_if_supported = true);
+
+  ~PosixSharedMemory() override;
+
+  int fd() const { return fd_.get(); }
+
+  // SharedMemory implementation.
+  void* start() const override { return start_; }
+  size_t size() const override { return size_; }
+
+ private:
+  static std::unique_ptr<PosixSharedMemory> MapFD(base::ScopedFile, size_t);
+
+  PosixSharedMemory(void* start, size_t size, base::ScopedFile);
+  PosixSharedMemory(const PosixSharedMemory&) = delete;
+  PosixSharedMemory& operator=(const PosixSharedMemory&) = delete;
+
+  void* const start_;
+  const size_t size_;
+  base::ScopedFile fd_;
+};
+
+}  // namespace perfetto
+
+#endif  // OS_LINUX || OS_ANDROID || OS_APPLE
+#endif  // SRC_TRACING_IPC_POSIX_SHARED_MEMORY_H_
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "src/tracing/ipc/posix_shared_memory.h"
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) ||   \
+    PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) || \
+    PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
+
+#include <fcntl.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include <memory>
+#include <utility>
+
+// gen_amalgamated expanded: #include "perfetto/base/compiler.h"
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/temp_file.h"
+// gen_amalgamated expanded: #include "src/tracing/ipc/memfd.h"
+
+namespace perfetto {
+
+namespace {
+int kFileSeals = F_SEAL_SHRINK | F_SEAL_GROW | F_SEAL_SEAL;
+}  // namespace
+
+// static
+std::unique_ptr<PosixSharedMemory> PosixSharedMemory::Create(size_t size) {
+  base::ScopedFile fd =
+      CreateMemfd("perfetto_shmem", MFD_CLOEXEC | MFD_ALLOW_SEALING);
+  bool is_memfd = !!fd;
+
+  // In-tree builds only allow mem_fd, so we can inspect the seals to verify the
+  // fd is appropriately sealed. We'll crash in the PERFETTO_CHECK(fd) below if
+  // memfd_create failed.
+#if !PERFETTO_BUILDFLAG(PERFETTO_ANDROID_BUILD)
+  if (!fd) {
+    // TODO: if this fails on Android we should fall back on ashmem.
+    PERFETTO_DPLOG("memfd_create() failed");
+    fd = base::TempFile::CreateUnlinked().ReleaseFD();
+  }
+#endif
+
+  PERFETTO_CHECK(fd);
+  int res = ftruncate(fd.get(), static_cast<off_t>(size));
+  PERFETTO_CHECK(res == 0);
+
+  if (is_memfd) {
+    // When memfd is supported, file seals should be, too.
+    res = fcntl(*fd, F_ADD_SEALS, kFileSeals);
+    PERFETTO_DCHECK(res == 0);
+  }
+
+  return MapFD(std::move(fd), size);
+}
+
+// static
+std::unique_ptr<PosixSharedMemory> PosixSharedMemory::AttachToFd(
+    base::ScopedFile fd,
+    bool require_seals_if_supported) {
+  bool requires_seals = require_seals_if_supported;
+
+#if PERFETTO_BUILDFLAG(PERFETTO_ANDROID_BUILD)
+  // In-tree kernels all support memfd.
+  PERFETTO_CHECK(HasMemfdSupport());
+#else
+  // In out-of-tree builds, we only require seals if the kernel supports memfd.
+  if (requires_seals)
+    requires_seals = HasMemfdSupport();
+#endif
+
+  if (requires_seals) {
+    // If the system supports memfd, we require a sealed memfd.
+    int res = fcntl(*fd, F_GET_SEALS);
+    if (res == -1 || (res & kFileSeals) != kFileSeals) {
+      PERFETTO_PLOG("Couldn't verify file seals on shmem FD");
+      return nullptr;
+    }
+  }
+
+  struct stat stat_buf = {};
+  int res = fstat(fd.get(), &stat_buf);
+  PERFETTO_CHECK(res == 0 && stat_buf.st_size > 0);
+  return MapFD(std::move(fd), static_cast<size_t>(stat_buf.st_size));
+}
+
+// static
+std::unique_ptr<PosixSharedMemory> PosixSharedMemory::MapFD(base::ScopedFile fd,
+                                                            size_t size) {
+  PERFETTO_DCHECK(fd);
+  PERFETTO_DCHECK(size > 0);
+  void* start =
+      mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd.get(), 0);
+  PERFETTO_CHECK(start != MAP_FAILED);
+  return std::unique_ptr<PosixSharedMemory>(
+      new PosixSharedMemory(start, size, std::move(fd)));
+}
+
+PosixSharedMemory::PosixSharedMemory(void* start,
+                                     size_t size,
+                                     base::ScopedFile fd)
+    : start_(start), size_(size), fd_(std::move(fd)) {}
+
+PosixSharedMemory::~PosixSharedMemory() {
+  munmap(start(), size());
+}
+
+PosixSharedMemory::Factory::~Factory() {}
+
+std::unique_ptr<SharedMemory> PosixSharedMemory::Factory::CreateSharedMemory(
+    size_t size) {
+  return PosixSharedMemory::Create(size);
+}
+
+}  // namespace perfetto
+
+#endif  // OS_LINUX || OS_ANDROID || OS_APPLE
+// gen_amalgamated begin source: src/tracing/ipc/consumer/consumer_ipc_client_impl.cc
+// gen_amalgamated begin header: src/tracing/ipc/consumer/consumer_ipc_client_impl.h
+// gen_amalgamated begin header: include/perfetto/ext/tracing/ipc/consumer_ipc_client.h
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_TRACING_IPC_CONSUMER_IPC_CLIENT_H_
+#define INCLUDE_PERFETTO_EXT_TRACING_IPC_CONSUMER_IPC_CLIENT_H_
+
+#include <memory>
+#include <string>
+
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/tracing_service.h"
+
+namespace perfetto {
+
+class Consumer;
+
+// Allows to connect to a remote Service through a UNIX domain socket.
+// Exposed to:
+//   Consumer(s) of the tracing library.
+// Implemented in:
+//   src/tracing/ipc/consumer/consumer_ipc_client_impl.cc
+class PERFETTO_EXPORT ConsumerIPCClient {
+ public:
+  // Connects to the producer port of the Service listening on the given
+  // |service_sock_name|. If the connection is successful, the OnConnect()
+  // method will be invoked asynchronously on the passed Consumer interface.
+  // If the connection fails, OnDisconnect() will be invoked instead.
+  // The returned ConsumerEndpoint serves also to delimit the scope of the
+  // callbacks invoked on the Consumer interface: no more Consumer callbacks are
+  // invoked immediately after its destruction and any pending callback will be
+  // dropped.
+  static std::unique_ptr<TracingService::ConsumerEndpoint>
+  Connect(const char* service_sock_name, Consumer*, base::TaskRunner*);
+
+ protected:
+  ConsumerIPCClient() = delete;
+};
+
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_TRACING_IPC_CONSUMER_IPC_CLIENT_H_
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SRC_TRACING_IPC_CONSUMER_CONSUMER_IPC_CLIENT_IMPL_H_
+#define SRC_TRACING_IPC_CONSUMER_CONSUMER_IPC_CLIENT_IMPL_H_
+
+#include <stdint.h>
+
+#include <list>
+#include <vector>
+
+// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/weak_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/ext/ipc/service_proxy.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/basic_types.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_packet.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/tracing_service.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/ipc/consumer_ipc_client.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/core/forward_decls.h"
+
+// gen_amalgamated expanded: #include "protos/perfetto/ipc/consumer_port.ipc.h"
+
+namespace perfetto {
+
+namespace base {
+class TaskRunner;
+}  // namespace base
+
+namespace ipc {
+class Client;
+}  // namespace ipc
+
+class Consumer;
+
+// Exposes a Service endpoint to Consumer(s), proxying all requests through a
+// IPC channel to the remote Service. This class is the glue layer between the
+// generic Service interface exposed to the clients of the library and the
+// actual IPC transport.
+class ConsumerIPCClientImpl : public TracingService::ConsumerEndpoint,
+                              public ipc::ServiceProxy::EventListener {
+ public:
+  ConsumerIPCClientImpl(const char* service_sock_name,
+                        Consumer*,
+                        base::TaskRunner*);
+  ~ConsumerIPCClientImpl() override;
+
+  // TracingService::ConsumerEndpoint implementation.
+  // These methods are invoked by the actual Consumer(s) code by clients of the
+  // tracing library, which know nothing about the IPC transport.
+  void EnableTracing(const TraceConfig&, base::ScopedFile) override;
+  void StartTracing() override;
+  void ChangeTraceConfig(const TraceConfig&) override;
+  void DisableTracing() override;
+  void ReadBuffers() override;
+  void FreeBuffers() override;
+  void Flush(uint32_t timeout_ms, FlushCallback) override;
+  void Detach(const std::string& key) override;
+  void Attach(const std::string& key) override;
+  void GetTraceStats() override;
+  void ObserveEvents(uint32_t enabled_event_types) override;
+  void QueryServiceState(QueryServiceStateCallback) override;
+  void QueryCapabilities(QueryCapabilitiesCallback) override;
+  void SaveTraceForBugreport(SaveTraceForBugreportCallback) override;
+
+  // ipc::ServiceProxy::EventListener implementation.
+  // These methods are invoked by the IPC layer, which knows nothing about
+  // tracing, consumers and consumers.
+  void OnConnect() override;
+  void OnDisconnect() override;
+
+ private:
+  struct PendingQueryServiceRequest {
+    QueryServiceStateCallback callback;
+
+    // All the replies will be appended here until |has_more| == false.
+    std::vector<uint8_t> merged_resp;
+  };
+
+  // List because we need stable iterators.
+  using PendingQueryServiceRequests = std::list<PendingQueryServiceRequest>;
+
+  void OnReadBuffersResponse(
+      ipc::AsyncResult<protos::gen::ReadBuffersResponse>);
+  void OnEnableTracingResponse(
+      ipc::AsyncResult<protos::gen::EnableTracingResponse>);
+  void OnQueryServiceStateResponse(
+      ipc::AsyncResult<protos::gen::QueryServiceStateResponse>,
+      PendingQueryServiceRequests::iterator);
+
+  // TODO(primiano): think to dtor order, do we rely on any specific sequence?
+  Consumer* const consumer_;
+
+  // The object that owns the client socket and takes care of IPC traffic.
+  std::unique_ptr<ipc::Client> ipc_channel_;
+
+  // The proxy interface for the consumer port of the service. It is bound
+  // to |ipc_channel_| and (de)serializes method invocations over the wire.
+  protos::gen::ConsumerPortProxy consumer_port_;
+
+  bool connected_ = false;
+
+  PendingQueryServiceRequests pending_query_svc_reqs_;
+
+  // When a packet is too big to fit into a ReadBuffersResponse IPC, the service
+  // will chunk it into several IPCs, each containing few slices of the packet
+  // (a packet's slice is always guaranteed to be << kIPCBufferSize). When
+  // chunking happens this field accumulates the slices received until the
+  // one with |last_slice_for_packet| == true is received.
+  TracePacket partial_packet_;
+
+  // Keep last.
+  base::WeakPtrFactory<ConsumerIPCClientImpl> weak_ptr_factory_;
+};
+
+}  // namespace perfetto
+
+#endif  // SRC_TRACING_IPC_CONSUMER_CONSUMER_IPC_CLIENT_IMPL_H_
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "src/tracing/ipc/consumer/consumer_ipc_client_impl.h"
+
+#include <inttypes.h>
+#include <string.h>
+
+// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
+// gen_amalgamated expanded: #include "perfetto/ext/ipc/client.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/consumer.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/observable_events.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_stats.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/core/trace_config.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/core/tracing_service_state.h"
+
+// TODO(fmayer): Add a test to check to what happens when ConsumerIPCClientImpl
+// gets destroyed w.r.t. the Consumer pointer. Also think to lifetime of the
+// Consumer* during the callbacks.
+
+namespace perfetto {
+
+// static. (Declared in include/tracing/ipc/consumer_ipc_client.h).
+std::unique_ptr<TracingService::ConsumerEndpoint> ConsumerIPCClient::Connect(
+    const char* service_sock_name,
+    Consumer* consumer,
+    base::TaskRunner* task_runner) {
+  return std::unique_ptr<TracingService::ConsumerEndpoint>(
+      new ConsumerIPCClientImpl(service_sock_name, consumer, task_runner));
+}
+
+ConsumerIPCClientImpl::ConsumerIPCClientImpl(const char* service_sock_name,
+                                             Consumer* consumer,
+                                             base::TaskRunner* task_runner)
+    : consumer_(consumer),
+      ipc_channel_(
+          ipc::Client::CreateInstance({service_sock_name, /*sock_retry=*/false},
+                                      task_runner)),
+      consumer_port_(this /* event_listener */),
+      weak_ptr_factory_(this) {
+  ipc_channel_->BindService(consumer_port_.GetWeakPtr());
+}
+
+ConsumerIPCClientImpl::~ConsumerIPCClientImpl() = default;
+
+// Called by the IPC layer if the BindService() succeeds.
+void ConsumerIPCClientImpl::OnConnect() {
+  connected_ = true;
+  consumer_->OnConnect();
+}
+
+void ConsumerIPCClientImpl::OnDisconnect() {
+  PERFETTO_DLOG("Tracing service connection failure");
+  connected_ = false;
+  consumer_->OnDisconnect();  // Note: may delete |this|.
+}
+
+void ConsumerIPCClientImpl::EnableTracing(const TraceConfig& trace_config,
+                                          base::ScopedFile fd) {
+  if (!connected_) {
+    PERFETTO_DLOG("Cannot EnableTracing(), not connected to tracing service");
+    return;
+  }
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+  if (fd) {
+    consumer_->OnTracingDisabled(
+        "Passing FDs for write_into_file is not supported on Windows");
+    return;
+  }
+#endif
+
+  protos::gen::EnableTracingRequest req;
+  *req.mutable_trace_config() = trace_config;
+  ipc::Deferred<protos::gen::EnableTracingResponse> async_response;
+  auto weak_this = weak_ptr_factory_.GetWeakPtr();
+  async_response.Bind(
+      [weak_this](
+          ipc::AsyncResult<protos::gen::EnableTracingResponse> response) {
+        if (weak_this)
+          weak_this->OnEnableTracingResponse(std::move(response));
+      });
+
+  // |fd| will be closed when this function returns, but it's fine because the
+  // IPC layer dup()'s it when sending the IPC.
+  consumer_port_.EnableTracing(req, std::move(async_response), *fd);
+}
+
+void ConsumerIPCClientImpl::ChangeTraceConfig(const TraceConfig& trace_config) {
+  if (!connected_) {
+    PERFETTO_DLOG(
+        "Cannot ChangeTraceConfig(), not connected to tracing service");
+    return;
+  }
+
+  ipc::Deferred<protos::gen::ChangeTraceConfigResponse> async_response;
+  async_response.Bind(
+      [](ipc::AsyncResult<protos::gen::ChangeTraceConfigResponse> response) {
+        if (!response)
+          PERFETTO_DLOG("ChangeTraceConfig() failed");
+      });
+  protos::gen::ChangeTraceConfigRequest req;
+  *req.mutable_trace_config() = trace_config;
+  consumer_port_.ChangeTraceConfig(req, std::move(async_response));
+}
+
+void ConsumerIPCClientImpl::StartTracing() {
+  if (!connected_) {
+    PERFETTO_DLOG("Cannot StartTracing(), not connected to tracing service");
+    return;
+  }
+
+  ipc::Deferred<protos::gen::StartTracingResponse> async_response;
+  async_response.Bind(
+      [](ipc::AsyncResult<protos::gen::StartTracingResponse> response) {
+        if (!response)
+          PERFETTO_DLOG("StartTracing() failed");
+      });
+  protos::gen::StartTracingRequest req;
+  consumer_port_.StartTracing(req, std::move(async_response));
+}
+
+void ConsumerIPCClientImpl::DisableTracing() {
+  if (!connected_) {
+    PERFETTO_DLOG("Cannot DisableTracing(), not connected to tracing service");
+    return;
+  }
+
+  ipc::Deferred<protos::gen::DisableTracingResponse> async_response;
+  async_response.Bind(
+      [](ipc::AsyncResult<protos::gen::DisableTracingResponse> response) {
+        if (!response)
+          PERFETTO_DLOG("DisableTracing() failed");
+      });
+  consumer_port_.DisableTracing(protos::gen::DisableTracingRequest(),
+                                std::move(async_response));
+}
+
+void ConsumerIPCClientImpl::ReadBuffers() {
+  if (!connected_) {
+    PERFETTO_DLOG("Cannot ReadBuffers(), not connected to tracing service");
+    return;
+  }
+
+  ipc::Deferred<protos::gen::ReadBuffersResponse> async_response;
+
+  // The IPC layer guarantees that callbacks are destroyed after this object
+  // is destroyed (by virtue of destroying the |consumer_port_|). In turn the
+  // contract of this class expects the caller to not destroy the Consumer class
+  // before having destroyed this class. Hence binding |this| here is safe.
+  async_response.Bind(
+      [this](ipc::AsyncResult<protos::gen::ReadBuffersResponse> response) {
+        OnReadBuffersResponse(std::move(response));
+      });
+  consumer_port_.ReadBuffers(protos::gen::ReadBuffersRequest(),
+                             std::move(async_response));
+}
+
+void ConsumerIPCClientImpl::OnReadBuffersResponse(
+    ipc::AsyncResult<protos::gen::ReadBuffersResponse> response) {
+  if (!response) {
+    PERFETTO_DLOG("ReadBuffers() failed");
+    return;
+  }
+  std::vector<TracePacket> trace_packets;
+  for (auto& resp_slice : response->slices()) {
+    const std::string& slice_data = resp_slice.data();
+    Slice slice = Slice::Allocate(slice_data.size());
+    memcpy(slice.own_data(), slice_data.data(), slice.size);
+    partial_packet_.AddSlice(std::move(slice));
+    if (resp_slice.last_slice_for_packet())
+      trace_packets.emplace_back(std::move(partial_packet_));
+  }
+  if (!trace_packets.empty() || !response.has_more())
+    consumer_->OnTraceData(std::move(trace_packets), response.has_more());
+}
+
+void ConsumerIPCClientImpl::OnEnableTracingResponse(
+    ipc::AsyncResult<protos::gen::EnableTracingResponse> response) {
+  std::string error;
+  // |response| might be empty when the request gets rejected (if the connection
+  // with the service is dropped all outstanding requests are auto-rejected).
+  if (!response) {
+    error =
+        "EnableTracing IPC request rejected. This is likely due to a loss of "
+        "the traced connection";
+  } else {
+    error = response->error();
+  }
+  if (!response || response->disabled())
+    consumer_->OnTracingDisabled(error);
+}
+
+void ConsumerIPCClientImpl::FreeBuffers() {
+  if (!connected_) {
+    PERFETTO_DLOG("Cannot FreeBuffers(), not connected to tracing service");
+    return;
+  }
+
+  protos::gen::FreeBuffersRequest req;
+  ipc::Deferred<protos::gen::FreeBuffersResponse> async_response;
+  async_response.Bind(
+      [](ipc::AsyncResult<protos::gen::FreeBuffersResponse> response) {
+        if (!response)
+          PERFETTO_DLOG("FreeBuffers() failed");
+      });
+  consumer_port_.FreeBuffers(req, std::move(async_response));
+}
+
+void ConsumerIPCClientImpl::Flush(uint32_t timeout_ms, FlushCallback callback) {
+  if (!connected_) {
+    PERFETTO_DLOG("Cannot Flush(), not connected to tracing service");
+    return callback(/*success=*/false);
+  }
+
+  protos::gen::FlushRequest req;
+  req.set_timeout_ms(static_cast<uint32_t>(timeout_ms));
+  ipc::Deferred<protos::gen::FlushResponse> async_response;
+  async_response.Bind(
+      [callback](ipc::AsyncResult<protos::gen::FlushResponse> response) {
+        callback(!!response);
+      });
+  consumer_port_.Flush(req, std::move(async_response));
+}
+
+void ConsumerIPCClientImpl::Detach(const std::string& key) {
+  if (!connected_) {
+    PERFETTO_DLOG("Cannot Detach(), not connected to tracing service");
+    return;
+  }
+
+  protos::gen::DetachRequest req;
+  req.set_key(key);
+  ipc::Deferred<protos::gen::DetachResponse> async_response;
+  auto weak_this = weak_ptr_factory_.GetWeakPtr();
+
+  async_response.Bind(
+      [weak_this](ipc::AsyncResult<protos::gen::DetachResponse> response) {
+        if (weak_this)
+          weak_this->consumer_->OnDetach(!!response);
+      });
+  consumer_port_.Detach(req, std::move(async_response));
+}
+
+void ConsumerIPCClientImpl::Attach(const std::string& key) {
+  if (!connected_) {
+    PERFETTO_DLOG("Cannot Attach(), not connected to tracing service");
+    return;
+  }
+
+  {
+    protos::gen::AttachRequest req;
+    req.set_key(key);
+    ipc::Deferred<protos::gen::AttachResponse> async_response;
+    auto weak_this = weak_ptr_factory_.GetWeakPtr();
+
+    async_response.Bind(
+        [weak_this](ipc::AsyncResult<protos::gen::AttachResponse> response) {
+          if (!weak_this)
+            return;
+          if (!response) {
+            weak_this->consumer_->OnAttach(/*success=*/false, TraceConfig());
+            return;
+          }
+          const TraceConfig& trace_config = response->trace_config();
+
+          // If attached succesfully, also attach to the end-of-trace
+          // notificaton callback, via EnableTracing(attach_notification_only).
+          protos::gen::EnableTracingRequest enable_req;
+          enable_req.set_attach_notification_only(true);
+          ipc::Deferred<protos::gen::EnableTracingResponse> enable_resp;
+          enable_resp.Bind(
+              [weak_this](
+                  ipc::AsyncResult<protos::gen::EnableTracingResponse> resp) {
+                if (weak_this)
+                  weak_this->OnEnableTracingResponse(std::move(resp));
+              });
+          weak_this->consumer_port_.EnableTracing(enable_req,
+                                                  std::move(enable_resp));
+
+          weak_this->consumer_->OnAttach(/*success=*/true, trace_config);
+        });
+    consumer_port_.Attach(req, std::move(async_response));
+  }
+}
+
+void ConsumerIPCClientImpl::GetTraceStats() {
+  if (!connected_) {
+    PERFETTO_DLOG("Cannot GetTraceStats(), not connected to tracing service");
+    return;
+  }
+
+  protos::gen::GetTraceStatsRequest req;
+  ipc::Deferred<protos::gen::GetTraceStatsResponse> async_response;
+
+  // The IPC layer guarantees that callbacks are destroyed after this object
+  // is destroyed (by virtue of destroying the |consumer_port_|). In turn the
+  // contract of this class expects the caller to not destroy the Consumer class
+  // before having destroyed this class. Hence binding |this| here is safe.
+  async_response.Bind(
+      [this](ipc::AsyncResult<protos::gen::GetTraceStatsResponse> response) {
+        if (!response) {
+          consumer_->OnTraceStats(/*success=*/false, TraceStats());
+          return;
+        }
+        consumer_->OnTraceStats(/*success=*/true, response->trace_stats());
+      });
+  consumer_port_.GetTraceStats(req, std::move(async_response));
+}
+
+void ConsumerIPCClientImpl::ObserveEvents(uint32_t enabled_event_types) {
+  if (!connected_) {
+    PERFETTO_DLOG("Cannot ObserveEvents(), not connected to tracing service");
+    return;
+  }
+
+  protos::gen::ObserveEventsRequest req;
+  for (uint32_t i = 0; i < 32; i++) {
+    const uint32_t event_id = 1u << i;
+    if (enabled_event_types & event_id)
+      req.add_events_to_observe(static_cast<ObservableEvents::Type>(event_id));
+  }
+
+  ipc::Deferred<protos::gen::ObserveEventsResponse> async_response;
+  // The IPC layer guarantees that callbacks are destroyed after this object
+  // is destroyed (by virtue of destroying the |consumer_port_|). In turn the
+  // contract of this class expects the caller to not destroy the Consumer class
+  // before having destroyed this class. Hence binding |this| here is safe.
+  async_response.Bind(
+      [this](ipc::AsyncResult<protos::gen::ObserveEventsResponse> response) {
+        // Skip empty response, which the service sends to close the stream.
+        if (!response.has_more()) {
+          PERFETTO_DCHECK(!response.success());
+          return;
+        }
+        consumer_->OnObservableEvents(response->events());
+      });
+  consumer_port_.ObserveEvents(req, std::move(async_response));
+}
+
+void ConsumerIPCClientImpl::QueryServiceState(
+    QueryServiceStateCallback callback) {
+  if (!connected_) {
+    PERFETTO_DLOG(
+        "Cannot QueryServiceState(), not connected to tracing service");
+    return;
+  }
+
+  auto it = pending_query_svc_reqs_.insert(pending_query_svc_reqs_.end(),
+                                           {std::move(callback), {}});
+  protos::gen::QueryServiceStateRequest req;
+  ipc::Deferred<protos::gen::QueryServiceStateResponse> async_response;
+  auto weak_this = weak_ptr_factory_.GetWeakPtr();
+  async_response.Bind(
+      [weak_this,
+       it](ipc::AsyncResult<protos::gen::QueryServiceStateResponse> response) {
+        if (weak_this)
+          weak_this->OnQueryServiceStateResponse(std::move(response), it);
+      });
+  consumer_port_.QueryServiceState(req, std::move(async_response));
+}
+
+void ConsumerIPCClientImpl::OnQueryServiceStateResponse(
+    ipc::AsyncResult<protos::gen::QueryServiceStateResponse> response,
+    PendingQueryServiceRequests::iterator req_it) {
+  PERFETTO_DCHECK(req_it->callback);
+
+  if (!response) {
+    auto callback = std::move(req_it->callback);
+    pending_query_svc_reqs_.erase(req_it);
+    callback(false, TracingServiceState());
+    return;
+  }
+
+  // The QueryServiceState response can be split in several chunks if the
+  // service has several data sources. The client is supposed to merge all the
+  // replies. The easiest way to achieve this is to re-serialize the partial
+  // response and then re-decode the merged result in one shot.
+  std::vector<uint8_t>& merged_resp = req_it->merged_resp;
+  std::vector<uint8_t> part = response->service_state().SerializeAsArray();
+  merged_resp.insert(merged_resp.end(), part.begin(), part.end());
+
+  if (response.has_more())
+    return;
+
+  // All replies have been received. Decode the merged result and reply to the
+  // callback.
+  protos::gen::TracingServiceState svc_state;
+  bool ok = svc_state.ParseFromArray(merged_resp.data(), merged_resp.size());
+  if (!ok)
+    PERFETTO_ELOG("Failed to decode merged QueryServiceStateResponse");
+  auto callback = std::move(req_it->callback);
+  pending_query_svc_reqs_.erase(req_it);
+  callback(ok, std::move(svc_state));
+}
+
+void ConsumerIPCClientImpl::QueryCapabilities(
+    QueryCapabilitiesCallback callback) {
+  if (!connected_) {
+    PERFETTO_DLOG(
+        "Cannot QueryCapabilities(), not connected to tracing service");
+    return;
+  }
+
+  protos::gen::QueryCapabilitiesRequest req;
+  ipc::Deferred<protos::gen::QueryCapabilitiesResponse> async_response;
+  async_response.Bind(
+      [callback](
+          ipc::AsyncResult<protos::gen::QueryCapabilitiesResponse> response) {
+        if (!response) {
+          // If the IPC fails, we are talking to an older version of the service
+          // that didn't support QueryCapabilities at all. In this case return
+          // an empty capabilities message.
+          callback(TracingServiceCapabilities());
+        } else {
+          callback(response->capabilities());
+        }
+      });
+  consumer_port_.QueryCapabilities(req, std::move(async_response));
+}
+
+void ConsumerIPCClientImpl::SaveTraceForBugreport(
+    SaveTraceForBugreportCallback callback) {
+  if (!connected_) {
+    PERFETTO_DLOG(
+        "Cannot SaveTraceForBugreport(), not connected to tracing service");
+    return;
+  }
+
+  protos::gen::SaveTraceForBugreportRequest req;
+  ipc::Deferred<protos::gen::SaveTraceForBugreportResponse> async_response;
+  async_response.Bind(
+      [callback](ipc::AsyncResult<protos::gen::SaveTraceForBugreportResponse>
+                     response) {
+        if (!response) {
+          // If the IPC fails, we are talking to an older version of the service
+          // that didn't support SaveTraceForBugreport at all.
+          callback(
+              false,
+              "The tracing service doesn't support SaveTraceForBugreport()");
+        } else {
+          callback(response->success(), response->msg());
+        }
+      });
+  consumer_port_.SaveTraceForBugreport(req, std::move(async_response));
+}
+
+}  // namespace perfetto
+// gen_amalgamated begin source: src/tracing/ipc/producer/producer_ipc_client_impl.cc
+// gen_amalgamated begin header: src/tracing/ipc/producer/producer_ipc_client_impl.h
+// gen_amalgamated begin header: include/perfetto/ext/tracing/ipc/producer_ipc_client.h
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_TRACING_IPC_PRODUCER_IPC_CLIENT_H_
+#define INCLUDE_PERFETTO_EXT_TRACING_IPC_PRODUCER_IPC_CLIENT_H_
+
+#include <memory>
+#include <string>
+
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+// gen_amalgamated expanded: #include "perfetto/ext/ipc/client.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory_arbiter.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/tracing_service.h"
+
+namespace perfetto {
+
+class Producer;
+
+// Allows to connect to a remote Service through a UNIX domain socket.
+// Exposed to:
+//   Producer(s) of the tracing library.
+// Implemented in:
+//   src/tracing/ipc/producer/producer_ipc_client_impl.cc
+class PERFETTO_EXPORT ProducerIPCClient {
+ public:
+  enum class ConnectionFlags {
+    // Fails immediately with OnConnect(false) if the service connection cannot
+    // be established.
+    kDefault = 0,
+
+    // Keeps retrying with exponential backoff indefinitely. The caller will
+    // never see an OnConnect(false).
+    kRetryIfUnreachable = 1,
+  };
+
+  // Connects to the producer port of the Service listening on the given
+  // |service_sock_name|. If the connection is successful, the OnConnect()
+  // method will be invoked asynchronously on the passed Producer interface. If
+  // the connection fails, OnDisconnect() will be invoked instead. The returned
+  // ProducerEndpoint serves also to delimit the scope of the callbacks invoked
+  // on the Producer interface: no more Producer callbacks are invoked
+  // immediately after its destruction and any pending callback will be dropped.
+  // To provide a producer-allocated shared memory buffer, both |shm| and
+  // |shm_arbiter| should be set. |shm_arbiter| should be an unbound
+  // SharedMemoryArbiter instance. When |shm| and |shm_arbiter| are provided,
+  // the service will attempt to adopt the provided SMB. If this fails, the
+  // ProducerEndpoint will disconnect, but the SMB and arbiter will remain valid
+  // until the client is destroyed.
+  //
+  // TODO(eseckler): Support adoption failure more gracefully.
+  // TODO(primiano): move all the existing use cases to the Connect(ConnArgs)
+  // below. Also move the functionality of ConnectionFlags into ConnArgs.
+  static std::unique_ptr<TracingService::ProducerEndpoint> Connect(
+      const char* service_sock_name,
+      Producer*,
+      const std::string& producer_name,
+      base::TaskRunner*,
+      TracingService::ProducerSMBScrapingMode smb_scraping_mode =
+          TracingService::ProducerSMBScrapingMode::kDefault,
+      size_t shared_memory_size_hint_bytes = 0,
+      size_t shared_memory_page_size_hint_bytes = 0,
+      std::unique_ptr<SharedMemory> shm = nullptr,
+      std::unique_ptr<SharedMemoryArbiter> shm_arbiter = nullptr,
+      ConnectionFlags = ConnectionFlags::kDefault);
+
+  // Overload of Connect() to support adopting a connected socket using
+  // ipc::Client::ConnArgs.
+  static std::unique_ptr<TracingService::ProducerEndpoint> Connect(
+      ipc::Client::ConnArgs,
+      Producer*,
+      const std::string& producer_name,
+      base::TaskRunner*,
+      TracingService::ProducerSMBScrapingMode smb_scraping_mode =
+          TracingService::ProducerSMBScrapingMode::kDefault,
+      size_t shared_memory_size_hint_bytes = 0,
+      size_t shared_memory_page_size_hint_bytes = 0,
+      std::unique_ptr<SharedMemory> shm = nullptr,
+      std::unique_ptr<SharedMemoryArbiter> shm_arbiter = nullptr);
+
+ protected:
+  ProducerIPCClient() = delete;
+};
+
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_TRACING_IPC_PRODUCER_IPC_CLIENT_H_
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SRC_TRACING_IPC_PRODUCER_PRODUCER_IPC_CLIENT_IMPL_H_
+#define SRC_TRACING_IPC_PRODUCER_PRODUCER_IPC_CLIENT_IMPL_H_
+
+#include <stdint.h>
+
+#include <set>
+#include <vector>
+
+// gen_amalgamated expanded: #include "perfetto/ext/base/thread_checker.h"
+// gen_amalgamated expanded: #include "perfetto/ext/ipc/client.h"
+// gen_amalgamated expanded: #include "perfetto/ext/ipc/service_proxy.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/basic_types.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/tracing_service.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/ipc/producer_ipc_client.h"
+
+// gen_amalgamated expanded: #include "protos/perfetto/ipc/producer_port.ipc.h"
+
+namespace perfetto {
+
+namespace base {
+class TaskRunner;
+}  // namespace base
+
+class Producer;
+class SharedMemoryArbiter;
+
+// Exposes a Service endpoint to Producer(s), proxying all requests through a
+// IPC channel to the remote Service. This class is the glue layer between the
+// generic Service interface exposed to the clients of the library and the
+// actual IPC transport.
+class ProducerIPCClientImpl : public TracingService::ProducerEndpoint,
+                              public ipc::ServiceProxy::EventListener {
+ public:
+  ProducerIPCClientImpl(ipc::Client::ConnArgs,
+                        Producer*,
+                        const std::string& producer_name,
+                        base::TaskRunner*,
+                        TracingService::ProducerSMBScrapingMode,
+                        size_t shared_memory_size_hint_bytes,
+                        size_t shared_memory_page_size_hint_bytes,
+                        std::unique_ptr<SharedMemory> shm,
+                        std::unique_ptr<SharedMemoryArbiter> shm_arbiter);
+  ~ProducerIPCClientImpl() override;
+
+  // TracingService::ProducerEndpoint implementation.
+  // These methods are invoked by the actual Producer(s) code by clients of the
+  // tracing library, which know nothing about the IPC transport.
+  void RegisterDataSource(const DataSourceDescriptor&) override;
+  void UnregisterDataSource(const std::string& name) override;
+  void RegisterTraceWriter(uint32_t writer_id, uint32_t target_buffer) override;
+  void UnregisterTraceWriter(uint32_t writer_id) override;
+  void CommitData(const CommitDataRequest&, CommitDataCallback) override;
+  void NotifyDataSourceStarted(DataSourceInstanceID) override;
+  void NotifyDataSourceStopped(DataSourceInstanceID) override;
+  void ActivateTriggers(const std::vector<std::string>&) override;
+  void Sync(std::function<void()> callback) override;
+
+  std::unique_ptr<TraceWriter> CreateTraceWriter(
+      BufferID target_buffer,
+      BufferExhaustedPolicy) override;
+  SharedMemoryArbiter* MaybeSharedMemoryArbiter() override;
+  bool IsShmemProvidedByProducer() const override;
+  void NotifyFlushComplete(FlushRequestID) override;
+  SharedMemory* shared_memory() const override;
+  size_t shared_buffer_page_size_kb() const override;
+
+  // ipc::ServiceProxy::EventListener implementation.
+  // These methods are invoked by the IPC layer, which knows nothing about
+  // tracing, producers and consumers.
+  void OnConnect() override;
+  void OnDisconnect() override;
+
+  ipc::Client* GetClientForTesting() { return ipc_channel_.get(); }
+
+ private:
+  // Invoked soon after having established the connection with the service.
+  void OnConnectionInitialized(bool connection_succeeded,
+                               bool using_shmem_provided_by_producer,
+                               bool direct_smb_patching_supported);
+
+  // Invoked when the remote Service sends an IPC to tell us to do something
+  // (e.g. start/stop a data source).
+  void OnServiceRequest(const protos::gen::GetAsyncCommandResponse&);
+
+  // TODO think to destruction order, do we rely on any specific dtor sequence?
+  Producer* const producer_;
+  base::TaskRunner* const task_runner_;
+
+  // The object that owns the client socket and takes care of IPC traffic.
+  std::unique_ptr<ipc::Client> ipc_channel_;
+
+  // The proxy interface for the producer port of the service. It is bound
+  // to |ipc_channel_| and (de)serializes method invocations over the wire.
+  protos::gen::ProducerPortProxy producer_port_;
+
+  std::unique_ptr<SharedMemory> shared_memory_;
+  std::unique_ptr<SharedMemoryArbiter> shared_memory_arbiter_;
+  size_t shared_buffer_page_size_kb_ = 0;
+  std::set<DataSourceInstanceID> data_sources_setup_;
+  bool connected_ = false;
+  std::string const name_;
+  size_t shared_memory_page_size_hint_bytes_ = 0;
+  size_t shared_memory_size_hint_bytes_ = 0;
+  TracingService::ProducerSMBScrapingMode const smb_scraping_mode_;
+  bool is_shmem_provided_by_producer_ = false;
+  bool direct_smb_patching_supported_ = false;
+  std::vector<std::function<void()>> pending_sync_reqs_;
+  PERFETTO_THREAD_CHECKER(thread_checker_)
+};
+
+}  // namespace perfetto
+
+#endif  // SRC_TRACING_IPC_PRODUCER_PRODUCER_IPC_CLIENT_IMPL_H_
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "src/tracing/ipc/producer/producer_ipc_client_impl.h"
+
+#include <inttypes.h>
+#include <string.h>
+
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/version.h"
+// gen_amalgamated expanded: #include "perfetto/ext/ipc/client.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/commit_data_request.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/producer.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory_arbiter.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_writer.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/core/data_source_config.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/core/data_source_descriptor.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/core/trace_config.h"
+// gen_amalgamated expanded: #include "src/tracing/ipc/posix_shared_memory.h"
+
+// TODO(fmayer): think to what happens when ProducerIPCClientImpl gets destroyed
+// w.r.t. the Producer pointer. Also think to lifetime of the Producer* during
+// the callbacks.
+
+namespace perfetto {
+
+// static. (Declared in include/tracing/ipc/producer_ipc_client.h).
+std::unique_ptr<TracingService::ProducerEndpoint> ProducerIPCClient::Connect(
+    const char* service_sock_name,
+    Producer* producer,
+    const std::string& producer_name,
+    base::TaskRunner* task_runner,
+    TracingService::ProducerSMBScrapingMode smb_scraping_mode,
+    size_t shared_memory_size_hint_bytes,
+    size_t shared_memory_page_size_hint_bytes,
+    std::unique_ptr<SharedMemory> shm,
+    std::unique_ptr<SharedMemoryArbiter> shm_arbiter,
+    ConnectionFlags conn_flags) {
+  return std::unique_ptr<TracingService::ProducerEndpoint>(
+      new ProducerIPCClientImpl(
+          {service_sock_name,
+           conn_flags ==
+               ProducerIPCClient::ConnectionFlags::kRetryIfUnreachable},
+          producer, producer_name, task_runner, smb_scraping_mode,
+          shared_memory_size_hint_bytes, shared_memory_page_size_hint_bytes,
+          std::move(shm), std::move(shm_arbiter)));
+}
+
+// static. (Declared in include/tracing/ipc/producer_ipc_client.h).
+std::unique_ptr<TracingService::ProducerEndpoint> ProducerIPCClient::Connect(
+    ipc::Client::ConnArgs conn_args,
+    Producer* producer,
+    const std::string& producer_name,
+    base::TaskRunner* task_runner,
+    TracingService::ProducerSMBScrapingMode smb_scraping_mode,
+    size_t shared_memory_size_hint_bytes,
+    size_t shared_memory_page_size_hint_bytes,
+    std::unique_ptr<SharedMemory> shm,
+    std::unique_ptr<SharedMemoryArbiter> shm_arbiter) {
+  return std::unique_ptr<TracingService::ProducerEndpoint>(
+      new ProducerIPCClientImpl(std::move(conn_args), producer, producer_name,
+                                task_runner, smb_scraping_mode,
+                                shared_memory_size_hint_bytes,
+                                shared_memory_page_size_hint_bytes,
+                                std::move(shm), std::move(shm_arbiter)));
+}
+
+ProducerIPCClientImpl::ProducerIPCClientImpl(
+    ipc::Client::ConnArgs conn_args,
+    Producer* producer,
+    const std::string& producer_name,
+    base::TaskRunner* task_runner,
+    TracingService::ProducerSMBScrapingMode smb_scraping_mode,
+    size_t shared_memory_size_hint_bytes,
+    size_t shared_memory_page_size_hint_bytes,
+    std::unique_ptr<SharedMemory> shm,
+    std::unique_ptr<SharedMemoryArbiter> shm_arbiter)
+    : producer_(producer),
+      task_runner_(task_runner),
+      ipc_channel_(
+          ipc::Client::CreateInstance(std::move(conn_args), task_runner)),
+      producer_port_(this /* event_listener */),
+      shared_memory_(std::move(shm)),
+      shared_memory_arbiter_(std::move(shm_arbiter)),
+      name_(producer_name),
+      shared_memory_page_size_hint_bytes_(shared_memory_page_size_hint_bytes),
+      shared_memory_size_hint_bytes_(shared_memory_size_hint_bytes),
+      smb_scraping_mode_(smb_scraping_mode) {
+  // Check for producer-provided SMB (used by Chrome for startup tracing).
+  if (shared_memory_) {
+    // We also expect a valid (unbound) arbiter. Bind it to this endpoint now.
+    PERFETTO_CHECK(shared_memory_arbiter_);
+    shared_memory_arbiter_->BindToProducerEndpoint(this, task_runner_);
+
+    // If the service accepts our SMB, then it must match our requested page
+    // layout. The protocol doesn't allow the service to change the size and
+    // layout when the SMB is provided by the producer.
+    shared_buffer_page_size_kb_ = shared_memory_page_size_hint_bytes_ / 1024;
+  }
+
+  ipc_channel_->BindService(producer_port_.GetWeakPtr());
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+}
+
+ProducerIPCClientImpl::~ProducerIPCClientImpl() {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+}
+
+// Called by the IPC layer if the BindService() succeeds.
+void ProducerIPCClientImpl::OnConnect() {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  connected_ = true;
+
+  // The IPC layer guarantees that any outstanding callback will be dropped on
+  // the floor if producer_port_ is destroyed between the request and the reply.
+  // Binding |this| is hence safe.
+  ipc::Deferred<protos::gen::InitializeConnectionResponse> on_init;
+  on_init.Bind(
+      [this](ipc::AsyncResult<protos::gen::InitializeConnectionResponse> resp) {
+        OnConnectionInitialized(
+            resp.success(),
+            resp.success() ? resp->using_shmem_provided_by_producer() : false,
+            resp.success() ? resp->direct_smb_patching_supported() : false);
+      });
+  protos::gen::InitializeConnectionRequest req;
+  req.set_producer_name(name_);
+  req.set_shared_memory_size_hint_bytes(
+      static_cast<uint32_t>(shared_memory_size_hint_bytes_));
+  req.set_shared_memory_page_size_hint_bytes(
+      static_cast<uint32_t>(shared_memory_page_size_hint_bytes_));
+  switch (smb_scraping_mode_) {
+    case TracingService::ProducerSMBScrapingMode::kDefault:
+      // No need to set the mode, it defaults to use the service default if
+      // unspecified.
+      break;
+    case TracingService::ProducerSMBScrapingMode::kEnabled:
+      req.set_smb_scraping_mode(
+          protos::gen::InitializeConnectionRequest::SMB_SCRAPING_ENABLED);
+      break;
+    case TracingService::ProducerSMBScrapingMode::kDisabled:
+      req.set_smb_scraping_mode(
+          protos::gen::InitializeConnectionRequest::SMB_SCRAPING_DISABLED);
+      break;
+  }
+
+  int shm_fd = -1;
+  if (shared_memory_) {
+    shm_fd = static_cast<PosixSharedMemory*>(shared_memory_.get())->fd();
+    req.set_producer_provided_shmem(true);
+  }
+
+#if PERFETTO_DCHECK_IS_ON()
+  req.set_build_flags(
+      protos::gen::InitializeConnectionRequest::BUILD_FLAGS_DCHECKS_ON);
+#else
+  req.set_build_flags(
+      protos::gen::InitializeConnectionRequest::BUILD_FLAGS_DCHECKS_OFF);
+#endif
+  req.set_sdk_version(base::GetVersionString());
+  producer_port_.InitializeConnection(req, std::move(on_init), shm_fd);
+
+  // Create the back channel to receive commands from the Service.
+  ipc::Deferred<protos::gen::GetAsyncCommandResponse> on_cmd;
+  on_cmd.Bind(
+      [this](ipc::AsyncResult<protos::gen::GetAsyncCommandResponse> resp) {
+        if (!resp)
+          return;  // The IPC channel was closed and |resp| was auto-rejected.
+        OnServiceRequest(*resp);
+      });
+  producer_port_.GetAsyncCommand(protos::gen::GetAsyncCommandRequest(),
+                                 std::move(on_cmd));
+
+  // If there are pending Sync() requests, send them now.
+  for (const auto& pending_sync : pending_sync_reqs_)
+    Sync(std::move(pending_sync));
+  pending_sync_reqs_.clear();
+}
+
+void ProducerIPCClientImpl::OnDisconnect() {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  PERFETTO_DLOG("Tracing service connection failure");
+  connected_ = false;
+  data_sources_setup_.clear();
+  producer_->OnDisconnect();  // Note: may delete |this|.
+}
+
+void ProducerIPCClientImpl::OnConnectionInitialized(
+    bool connection_succeeded,
+    bool using_shmem_provided_by_producer,
+    bool direct_smb_patching_supported) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  // If connection_succeeded == false, the OnDisconnect() call will follow next
+  // and there we'll notify the |producer_|. TODO: add a test for this.
+  if (!connection_succeeded)
+    return;
+  is_shmem_provided_by_producer_ = using_shmem_provided_by_producer;
+  direct_smb_patching_supported_ = direct_smb_patching_supported;
+  producer_->OnConnect();
+
+  // Bail out if the service failed to adopt our producer-allocated SMB.
+  // TODO(eseckler): Handle adoption failure more gracefully.
+  if (shared_memory_ && !is_shmem_provided_by_producer_) {
+    PERFETTO_DLOG("Service failed adopt producer-provided SMB, disconnecting.");
+    ipc_channel_.reset();
+    return;
+  }
+}
+
+void ProducerIPCClientImpl::OnServiceRequest(
+    const protos::gen::GetAsyncCommandResponse& cmd) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+
+  // This message is sent only when connecting to a service running Android Q+.
+  // See comment below in kStartDataSource.
+  if (cmd.has_setup_data_source()) {
+    const auto& req = cmd.setup_data_source();
+    const DataSourceInstanceID dsid = req.new_instance_id();
+    data_sources_setup_.insert(dsid);
+    producer_->SetupDataSource(dsid, req.config());
+    return;
+  }
+
+  if (cmd.has_start_data_source()) {
+    const auto& req = cmd.start_data_source();
+    const DataSourceInstanceID dsid = req.new_instance_id();
+    const DataSourceConfig& cfg = req.config();
+    if (!data_sources_setup_.count(dsid)) {
+      // When connecting with an older (Android P) service, the service will not
+      // send a SetupDataSource message. We synthesize it here in that case.
+      producer_->SetupDataSource(dsid, cfg);
+    }
+    producer_->StartDataSource(dsid, cfg);
+    return;
+  }
+
+  if (cmd.has_stop_data_source()) {
+    const DataSourceInstanceID dsid = cmd.stop_data_source().instance_id();
+    producer_->StopDataSource(dsid);
+    data_sources_setup_.erase(dsid);
+    return;
+  }
+
+  if (cmd.has_setup_tracing()) {
+    base::ScopedFile shmem_fd = ipc_channel_->TakeReceivedFD();
+    if (shmem_fd) {
+      // This is the nominal case used in most configurations, where the service
+      // provides the SMB.
+      PERFETTO_CHECK(!is_shmem_provided_by_producer_ && !shared_memory_);
+      // TODO(primiano): handle mmap failure in case of OOM.
+      shared_memory_ =
+          PosixSharedMemory::AttachToFd(std::move(shmem_fd),
+                                        /*require_seals_if_supported=*/false);
+      shared_buffer_page_size_kb_ =
+          cmd.setup_tracing().shared_buffer_page_size_kb();
+      shared_memory_arbiter_ = SharedMemoryArbiter::CreateInstance(
+          shared_memory_.get(), shared_buffer_page_size_kb_ * 1024, this,
+          task_runner_);
+      if (direct_smb_patching_supported_)
+        shared_memory_arbiter_->SetDirectSMBPatchingSupportedByService();
+    } else {
+      // Producer-provided SMB (used by Chrome for startup tracing).
+      PERFETTO_CHECK(is_shmem_provided_by_producer_ && shared_memory_ &&
+                     shared_memory_arbiter_);
+    }
+    producer_->OnTracingSetup();
+    return;
+  }
+
+  if (cmd.has_flush()) {
+    // This cast boilerplate is required only because protobuf uses its own
+    // uint64 and not stdint's uint64_t. On some 64 bit archs they differ on the
+    // type (long vs long long) even though they have the same size.
+    const auto* data_source_ids = cmd.flush().data_source_ids().data();
+    static_assert(sizeof(data_source_ids[0]) == sizeof(DataSourceInstanceID),
+                  "data_source_ids should be 64-bit");
+    producer_->Flush(
+        cmd.flush().request_id(),
+        reinterpret_cast<const DataSourceInstanceID*>(data_source_ids),
+        static_cast<size_t>(cmd.flush().data_source_ids().size()));
+    return;
+  }
+
+  if (cmd.has_clear_incremental_state()) {
+    const auto* data_source_ids =
+        cmd.clear_incremental_state().data_source_ids().data();
+    static_assert(sizeof(data_source_ids[0]) == sizeof(DataSourceInstanceID),
+                  "data_source_ids should be 64-bit");
+    producer_->ClearIncrementalState(
+        reinterpret_cast<const DataSourceInstanceID*>(data_source_ids),
+        static_cast<size_t>(
+            cmd.clear_incremental_state().data_source_ids().size()));
+    return;
+  }
+
+  PERFETTO_DFATAL("Unknown async request received from tracing service");
+}
+
+void ProducerIPCClientImpl::RegisterDataSource(
+    const DataSourceDescriptor& descriptor) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  if (!connected_) {
+    PERFETTO_DLOG(
+        "Cannot RegisterDataSource(), not connected to tracing service");
+  }
+  protos::gen::RegisterDataSourceRequest req;
+  *req.mutable_data_source_descriptor() = descriptor;
+  ipc::Deferred<protos::gen::RegisterDataSourceResponse> async_response;
+  async_response.Bind(
+      [](ipc::AsyncResult<protos::gen::RegisterDataSourceResponse> response) {
+        if (!response)
+          PERFETTO_DLOG("RegisterDataSource() failed: connection reset");
+      });
+  producer_port_.RegisterDataSource(req, std::move(async_response));
+}
+
+void ProducerIPCClientImpl::UnregisterDataSource(const std::string& name) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  if (!connected_) {
+    PERFETTO_DLOG(
+        "Cannot UnregisterDataSource(), not connected to tracing service");
+    return;
+  }
+  protos::gen::UnregisterDataSourceRequest req;
+  req.set_data_source_name(name);
+  producer_port_.UnregisterDataSource(
+      req, ipc::Deferred<protos::gen::UnregisterDataSourceResponse>());
+}
+
+void ProducerIPCClientImpl::RegisterTraceWriter(uint32_t writer_id,
+                                                uint32_t target_buffer) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  if (!connected_) {
+    PERFETTO_DLOG(
+        "Cannot RegisterTraceWriter(), not connected to tracing service");
+    return;
+  }
+  protos::gen::RegisterTraceWriterRequest req;
+  req.set_trace_writer_id(writer_id);
+  req.set_target_buffer(target_buffer);
+  producer_port_.RegisterTraceWriter(
+      req, ipc::Deferred<protos::gen::RegisterTraceWriterResponse>());
+}
+
+void ProducerIPCClientImpl::UnregisterTraceWriter(uint32_t writer_id) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  if (!connected_) {
+    PERFETTO_DLOG(
+        "Cannot UnregisterTraceWriter(), not connected to tracing service");
+    return;
+  }
+  protos::gen::UnregisterTraceWriterRequest req;
+  req.set_trace_writer_id(writer_id);
+  producer_port_.UnregisterTraceWriter(
+      req, ipc::Deferred<protos::gen::UnregisterTraceWriterResponse>());
+}
+
+void ProducerIPCClientImpl::CommitData(const CommitDataRequest& req,
+                                       CommitDataCallback callback) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  if (!connected_) {
+    PERFETTO_DLOG("Cannot CommitData(), not connected to tracing service");
+    return;
+  }
+  ipc::Deferred<protos::gen::CommitDataResponse> async_response;
+  // TODO(primiano): add a test that destroys ProducerIPCClientImpl soon after
+  // this call and checks that the callback is dropped.
+  if (callback) {
+    async_response.Bind(
+        [callback](ipc::AsyncResult<protos::gen::CommitDataResponse> response) {
+          if (!response) {
+            PERFETTO_DLOG("CommitData() failed: connection reset");
+            return;
+          }
+          callback();
+        });
+  }
+  producer_port_.CommitData(req, std::move(async_response));
+}
+
+void ProducerIPCClientImpl::NotifyDataSourceStarted(DataSourceInstanceID id) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  if (!connected_) {
+    PERFETTO_DLOG(
+        "Cannot NotifyDataSourceStarted(), not connected to tracing service");
+    return;
+  }
+  protos::gen::NotifyDataSourceStartedRequest req;
+  req.set_data_source_id(id);
+  producer_port_.NotifyDataSourceStarted(
+      req, ipc::Deferred<protos::gen::NotifyDataSourceStartedResponse>());
+}
+
+void ProducerIPCClientImpl::NotifyDataSourceStopped(DataSourceInstanceID id) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  if (!connected_) {
+    PERFETTO_DLOG(
+        "Cannot NotifyDataSourceStopped(), not connected to tracing service");
+    return;
+  }
+  protos::gen::NotifyDataSourceStoppedRequest req;
+  req.set_data_source_id(id);
+  producer_port_.NotifyDataSourceStopped(
+      req, ipc::Deferred<protos::gen::NotifyDataSourceStoppedResponse>());
+}
+
+void ProducerIPCClientImpl::ActivateTriggers(
+    const std::vector<std::string>& triggers) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  if (!connected_) {
+    PERFETTO_DLOG(
+        "Cannot ActivateTriggers(), not connected to tracing service");
+    return;
+  }
+  protos::gen::ActivateTriggersRequest proto_req;
+  for (const auto& name : triggers) {
+    *proto_req.add_trigger_names() = name;
+  }
+  producer_port_.ActivateTriggers(
+      proto_req, ipc::Deferred<protos::gen::ActivateTriggersResponse>());
+}
+
+void ProducerIPCClientImpl::Sync(std::function<void()> callback) {
+  PERFETTO_DCHECK_THREAD(thread_checker_);
+  if (!connected_) {
+    pending_sync_reqs_.emplace_back(std::move(callback));
+    return;
+  }
+  ipc::Deferred<protos::gen::SyncResponse> resp;
+  resp.Bind([callback](ipc::AsyncResult<protos::gen::SyncResponse>) {
+    // Here we ACK the callback even if the service replies with a failure
+    // (i.e. the service is too old and doesn't understand Sync()). In that
+    // case the service has still seen the request, the IPC roundtrip is
+    // still a (weaker) linearization fence.
+    callback();
+  });
+  producer_port_.Sync(protos::gen::SyncRequest(), std::move(resp));
+}
+
+std::unique_ptr<TraceWriter> ProducerIPCClientImpl::CreateTraceWriter(
+    BufferID target_buffer,
+    BufferExhaustedPolicy buffer_exhausted_policy) {
+  // This method can be called by different threads. |shared_memory_arbiter_| is
+  // thread-safe but be aware of accessing any other state in this function.
+  return shared_memory_arbiter_->CreateTraceWriter(target_buffer,
+                                                   buffer_exhausted_policy);
+}
+
+SharedMemoryArbiter* ProducerIPCClientImpl::MaybeSharedMemoryArbiter() {
+  return shared_memory_arbiter_.get();
+}
+
+bool ProducerIPCClientImpl::IsShmemProvidedByProducer() const {
+  return is_shmem_provided_by_producer_;
+}
+
+void ProducerIPCClientImpl::NotifyFlushComplete(FlushRequestID req_id) {
+  return shared_memory_arbiter_->NotifyFlushComplete(req_id);
+}
+
+SharedMemory* ProducerIPCClientImpl::shared_memory() const {
+  return shared_memory_.get();
+}
+
+size_t ProducerIPCClientImpl::shared_buffer_page_size_kb() const {
+  return shared_buffer_page_size_kb_;
+}
+
+}  // namespace perfetto
+// gen_amalgamated begin source: src/tracing/ipc/service/consumer_ipc_service.cc
+// gen_amalgamated begin header: src/tracing/ipc/service/consumer_ipc_service.h
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SRC_TRACING_IPC_SERVICE_CONSUMER_IPC_SERVICE_H_
+#define SRC_TRACING_IPC_SERVICE_CONSUMER_IPC_SERVICE_H_
+
+#include <list>
+#include <map>
+#include <memory>
+#include <string>
+
+// gen_amalgamated expanded: #include "perfetto/ext/base/weak_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/ext/ipc/basic_types.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/consumer.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/tracing_service.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/core/forward_decls.h"
+// gen_amalgamated expanded: #include "protos/perfetto/ipc/consumer_port.ipc.h"
+
+namespace perfetto {
+
+namespace ipc {
+class Host;
+}  // namespace ipc
+
+// Implements the Consumer port of the IPC service. This class proxies requests
+// and responses between the core service logic (|svc_|) and remote Consumer(s)
+// on the IPC socket, through the methods overriddden from ConsumerPort.
+class ConsumerIPCService : public protos::gen::ConsumerPort {
+ public:
+  explicit ConsumerIPCService(TracingService* core_service);
+  ~ConsumerIPCService() override;
+
+  // ConsumerPort implementation (from .proto IPC definition).
+  void EnableTracing(const protos::gen::EnableTracingRequest&,
+                     DeferredEnableTracingResponse) override;
+  void StartTracing(const protos::gen::StartTracingRequest&,
+                    DeferredStartTracingResponse) override;
+  void ChangeTraceConfig(const protos::gen::ChangeTraceConfigRequest&,
+                         DeferredChangeTraceConfigResponse) override;
+  void DisableTracing(const protos::gen::DisableTracingRequest&,
+                      DeferredDisableTracingResponse) override;
+  void ReadBuffers(const protos::gen::ReadBuffersRequest&,
+                   DeferredReadBuffersResponse) override;
+  void FreeBuffers(const protos::gen::FreeBuffersRequest&,
+                   DeferredFreeBuffersResponse) override;
+  void Flush(const protos::gen::FlushRequest&, DeferredFlushResponse) override;
+  void Detach(const protos::gen::DetachRequest&,
+              DeferredDetachResponse) override;
+  void Attach(const protos::gen::AttachRequest&,
+              DeferredAttachResponse) override;
+  void GetTraceStats(const protos::gen::GetTraceStatsRequest&,
+                     DeferredGetTraceStatsResponse) override;
+  void ObserveEvents(const protos::gen::ObserveEventsRequest&,
+                     DeferredObserveEventsResponse) override;
+  void QueryServiceState(const protos::gen::QueryServiceStateRequest&,
+                         DeferredQueryServiceStateResponse) override;
+  void QueryCapabilities(const protos::gen::QueryCapabilitiesRequest&,
+                         DeferredQueryCapabilitiesResponse) override;
+  void SaveTraceForBugreport(const protos::gen::SaveTraceForBugreportRequest&,
+                             DeferredSaveTraceForBugreportResponse) override;
+  void OnClientDisconnected() override;
+
+ private:
+  // Acts like a Consumer with the core Service business logic (which doesn't
+  // know anything about the remote transport), but all it does is proxying
+  // methods to the remote Consumer on the other side of the IPC channel.
+  class RemoteConsumer : public Consumer {
+   public:
+    RemoteConsumer();
+    ~RemoteConsumer() override;
+
+    // These methods are called by the |core_service_| business logic. There is
+    // no connection here, these methods are posted straight away.
+    void OnConnect() override;
+    void OnDisconnect() override;
+    void OnTracingDisabled(const std::string& error) override;
+    void OnTraceData(std::vector<TracePacket>, bool has_more) override;
+    void OnDetach(bool) override;
+    void OnAttach(bool, const TraceConfig&) override;
+    void OnTraceStats(bool, const TraceStats&) override;
+    void OnObservableEvents(const ObservableEvents&) override;
+
+    void CloseObserveEventsResponseStream();
+
+    // The interface obtained from the core service business logic through
+    // TracingService::ConnectConsumer(this). This allows to invoke methods for
+    // a specific Consumer on the Service business logic.
+    std::unique_ptr<TracingService::ConsumerEndpoint> service_endpoint;
+
+    // After ReadBuffers() is invoked, this binds the async callback that
+    // allows to stream trace packets back to the client.
+    DeferredReadBuffersResponse read_buffers_response;
+
+    // After EnableTracing() is invoked, this binds the async callback that
+    // allows to send the OnTracingDisabled notification.
+    DeferredEnableTracingResponse enable_tracing_response;
+
+    // After Detach() is invoked, this binds the async callback that allows to
+    // send the session id to the consumer.
+    DeferredDetachResponse detach_response;
+
+    // As above, but for the Attach() case.
+    DeferredAttachResponse attach_response;
+
+    // As above, but for GetTraceStats().
+    DeferredGetTraceStatsResponse get_trace_stats_response;
+
+    // After ObserveEvents() is invoked, this binds the async callback that
+    // allows to stream ObservableEvents back to the client.
+    DeferredObserveEventsResponse observe_events_response;
+  };
+
+  // This has to be a container that doesn't invalidate iterators.
+  using PendingFlushResponses = std::list<DeferredFlushResponse>;
+  using PendingQuerySvcResponses = std::list<DeferredQueryServiceStateResponse>;
+  using PendingQueryCapabilitiesResponses =
+      std::list<DeferredQueryCapabilitiesResponse>;
+  using PendingSaveTraceForBugreportResponses =
+      std::list<DeferredSaveTraceForBugreportResponse>;
+
+  ConsumerIPCService(const ConsumerIPCService&) = delete;
+  ConsumerIPCService& operator=(const ConsumerIPCService&) = delete;
+
+  // Returns the ConsumerEndpoint in the core business logic that corresponds to
+  // the current IPC request.
+  RemoteConsumer* GetConsumerForCurrentRequest();
+
+  void OnFlushCallback(bool success, PendingFlushResponses::iterator);
+  void OnQueryServiceCallback(bool success,
+                              const TracingServiceState&,
+                              PendingQuerySvcResponses::iterator);
+  void OnQueryCapabilitiesCallback(const TracingServiceCapabilities&,
+                                   PendingQueryCapabilitiesResponses::iterator);
+  void OnSaveTraceForBugreportCallback(
+      bool success,
+      const std::string& msg,
+      PendingSaveTraceForBugreportResponses::iterator);
+
+  TracingService* const core_service_;
+
+  // Maps IPC clients to ConsumerEndpoint instances registered on the
+  // |core_service_| business logic.
+  std::map<ipc::ClientID, std::unique_ptr<RemoteConsumer>> consumers_;
+
+  PendingFlushResponses pending_flush_responses_;
+  PendingQuerySvcResponses pending_query_service_responses_;
+  PendingQueryCapabilitiesResponses pending_query_capabilities_responses_;
+  PendingSaveTraceForBugreportResponses pending_bugreport_responses_;
+
+  base::WeakPtrFactory<ConsumerIPCService> weak_ptr_factory_;  // Keep last.
+};
+
+}  // namespace perfetto
+
+#endif  // SRC_TRACING_IPC_SERVICE_CONSUMER_IPC_SERVICE_H_
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "src/tracing/ipc/service/consumer_ipc_service.h"
+
+#include <inttypes.h>
+
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
+// gen_amalgamated expanded: #include "perfetto/ext/ipc/basic_types.h"
+// gen_amalgamated expanded: #include "perfetto/ext/ipc/host.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/shared_memory_abi.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/slice.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_packet.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/trace_stats.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/tracing_service.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/core/trace_config.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/core/tracing_service_capabilities.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/core/tracing_service_state.h"
+
+namespace perfetto {
+
+ConsumerIPCService::ConsumerIPCService(TracingService* core_service)
+    : core_service_(core_service), weak_ptr_factory_(this) {}
+
+ConsumerIPCService::~ConsumerIPCService() = default;
+
+ConsumerIPCService::RemoteConsumer*
+ConsumerIPCService::GetConsumerForCurrentRequest() {
+  const ipc::ClientID ipc_client_id = ipc::Service::client_info().client_id();
+  const uid_t uid = ipc::Service::client_info().uid();
+  PERFETTO_CHECK(ipc_client_id);
+  auto it = consumers_.find(ipc_client_id);
+  if (it == consumers_.end()) {
+    auto* remote_consumer = new RemoteConsumer();
+    consumers_[ipc_client_id].reset(remote_consumer);
+    remote_consumer->service_endpoint =
+        core_service_->ConnectConsumer(remote_consumer, uid);
+    return remote_consumer;
+  }
+  return it->second.get();
+}
+
+// Called by the IPC layer.
+void ConsumerIPCService::OnClientDisconnected() {
+  ipc::ClientID client_id = ipc::Service::client_info().client_id();
+  consumers_.erase(client_id);
+}
+
+// Called by the IPC layer.
+void ConsumerIPCService::EnableTracing(
+    const protos::gen::EnableTracingRequest& req,
+    DeferredEnableTracingResponse resp) {
+  RemoteConsumer* remote_consumer = GetConsumerForCurrentRequest();
+  if (req.attach_notification_only()) {
+    remote_consumer->enable_tracing_response = std::move(resp);
+    return;
+  }
+  const TraceConfig& trace_config = req.trace_config();
+  base::ScopedFile fd;
+  if (trace_config.write_into_file() && trace_config.output_path().empty())
+    fd = ipc::Service::TakeReceivedFD();
+  remote_consumer->service_endpoint->EnableTracing(trace_config, std::move(fd));
+  remote_consumer->enable_tracing_response = std::move(resp);
+}
+
+// Called by the IPC layer.
+void ConsumerIPCService::StartTracing(const protos::gen::StartTracingRequest&,
+                                      DeferredStartTracingResponse resp) {
+  RemoteConsumer* remote_consumer = GetConsumerForCurrentRequest();
+  remote_consumer->service_endpoint->StartTracing();
+  resp.Resolve(ipc::AsyncResult<protos::gen::StartTracingResponse>::Create());
+}
+
+// Called by the IPC layer.
+void ConsumerIPCService::ChangeTraceConfig(
+    const protos::gen::ChangeTraceConfigRequest& req,
+    DeferredChangeTraceConfigResponse resp) {
+  RemoteConsumer* remote_consumer = GetConsumerForCurrentRequest();
+  remote_consumer->service_endpoint->ChangeTraceConfig(req.trace_config());
+  resp.Resolve(
+      ipc::AsyncResult<protos::gen::ChangeTraceConfigResponse>::Create());
+}
+
+// Called by the IPC layer.
+void ConsumerIPCService::DisableTracing(
+    const protos::gen::DisableTracingRequest&,
+    DeferredDisableTracingResponse resp) {
+  GetConsumerForCurrentRequest()->service_endpoint->DisableTracing();
+  resp.Resolve(ipc::AsyncResult<protos::gen::DisableTracingResponse>::Create());
+}
+
+// Called by the IPC layer.
+void ConsumerIPCService::ReadBuffers(const protos::gen::ReadBuffersRequest&,
+                                     DeferredReadBuffersResponse resp) {
+  RemoteConsumer* remote_consumer = GetConsumerForCurrentRequest();
+  remote_consumer->read_buffers_response = std::move(resp);
+  remote_consumer->service_endpoint->ReadBuffers();
+}
+
+// Called by the IPC layer.
+void ConsumerIPCService::FreeBuffers(const protos::gen::FreeBuffersRequest&,
+                                     DeferredFreeBuffersResponse resp) {
+  GetConsumerForCurrentRequest()->service_endpoint->FreeBuffers();
+  resp.Resolve(ipc::AsyncResult<protos::gen::FreeBuffersResponse>::Create());
+}
+
+// Called by the IPC layer.
+void ConsumerIPCService::Flush(const protos::gen::FlushRequest& req,
+                               DeferredFlushResponse resp) {
+  auto it = pending_flush_responses_.insert(pending_flush_responses_.end(),
+                                            std::move(resp));
+  auto weak_this = weak_ptr_factory_.GetWeakPtr();
+  auto callback = [weak_this, it](bool success) {
+    if (weak_this)
+      weak_this->OnFlushCallback(success, std::move(it));
+  };
+  GetConsumerForCurrentRequest()->service_endpoint->Flush(req.timeout_ms(),
+                                                          std::move(callback));
+}
+
+// Called by the IPC layer.
+void ConsumerIPCService::Detach(const protos::gen::DetachRequest& req,
+                                DeferredDetachResponse resp) {
+  // OnDetach() will resolve the |detach_response|.
+  RemoteConsumer* remote_consumer = GetConsumerForCurrentRequest();
+  remote_consumer->detach_response = std::move(resp);
+  remote_consumer->service_endpoint->Detach(req.key());
+}
+
+// Called by the IPC layer.
+void ConsumerIPCService::Attach(const protos::gen::AttachRequest& req,
+                                DeferredAttachResponse resp) {
+  // OnAttach() will resolve the |attach_response|.
+  RemoteConsumer* remote_consumer = GetConsumerForCurrentRequest();
+  remote_consumer->attach_response = std::move(resp);
+  remote_consumer->service_endpoint->Attach(req.key());
+}
+
+// Called by the IPC layer.
+void ConsumerIPCService::GetTraceStats(const protos::gen::GetTraceStatsRequest&,
+                                       DeferredGetTraceStatsResponse resp) {
+  // OnTraceStats() will resolve the |get_trace_stats_response|.
+  RemoteConsumer* remote_consumer = GetConsumerForCurrentRequest();
+  remote_consumer->get_trace_stats_response = std::move(resp);
+  remote_consumer->service_endpoint->GetTraceStats();
+}
+
+// Called by the IPC layer.
+void ConsumerIPCService::ObserveEvents(
+    const protos::gen::ObserveEventsRequest& req,
+    DeferredObserveEventsResponse resp) {
+  RemoteConsumer* remote_consumer = GetConsumerForCurrentRequest();
+
+  // If there's a prior stream, close it so that client can clean it up.
+  remote_consumer->CloseObserveEventsResponseStream();
+
+  remote_consumer->observe_events_response = std::move(resp);
+
+  uint32_t events_mask = 0;
+  for (const auto& type : req.events_to_observe()) {
+    events_mask |= static_cast<uint32_t>(type);
+  }
+  remote_consumer->service_endpoint->ObserveEvents(events_mask);
+
+  // If no events are to be observed, close the stream immediately so that the
+  // client can clean up.
+  if (events_mask == 0)
+    remote_consumer->CloseObserveEventsResponseStream();
+}
+
+// Called by the IPC layer.
+void ConsumerIPCService::QueryServiceState(
+    const protos::gen::QueryServiceStateRequest&,
+    DeferredQueryServiceStateResponse resp) {
+  RemoteConsumer* remote_consumer = GetConsumerForCurrentRequest();
+  auto it = pending_query_service_responses_.insert(
+      pending_query_service_responses_.end(), std::move(resp));
+  auto weak_this = weak_ptr_factory_.GetWeakPtr();
+  auto callback = [weak_this, it](bool success,
+                                  const TracingServiceState& svc_state) {
+    if (weak_this)
+      weak_this->OnQueryServiceCallback(success, svc_state, std::move(it));
+  };
+  remote_consumer->service_endpoint->QueryServiceState(callback);
+}
+
+// Called by the service in response to service_endpoint->QueryServiceState().
+void ConsumerIPCService::OnQueryServiceCallback(
+    bool success,
+    const TracingServiceState& svc_state,
+    PendingQuerySvcResponses::iterator pending_response_it) {
+  DeferredQueryServiceStateResponse response(std::move(*pending_response_it));
+  pending_query_service_responses_.erase(pending_response_it);
+  if (!success) {
+    response.Reject();
+    return;
+  }
+
+  // The TracingServiceState object might be too big to fit into a single IPC
+  // message because it contains the DataSourceDescriptor of each data source.
+  // Here we split it in chunks to fit in the IPC limit, observing the
+  // following rule: each chunk must be invididually a valid TracingServiceState
+  // message; all the chunks concatenated together must form the original
+  // message. This is to deal with the legacy API that was just sending one
+  // whole message (failing in presence of too many data sources, b/153142114).
+  // The message is split as follows: we take the whole TracingServiceState,
+  // take out the data sources section (which is a top-level repeated field)
+  // and re-add them one-by-one. If, in the process of appending, the IPC msg
+  // size is reached, a new chunk is created. This assumes that the rest of
+  // TracingServiceState fits in one IPC message and each DataSourceDescriptor
+  // fits in the worst case in a dedicated message (which is true, because
+  // otherwise the RegisterDataSource() which passes the descriptor in the first
+  // place would fail).
+
+  std::vector<uint8_t> chunked_reply;
+
+  // Transmits the current chunk and starts a new one.
+  bool sent_eof = false;
+  auto send_chunked_reply = [&chunked_reply, &response,
+                             &sent_eof](bool has_more) {
+    PERFETTO_CHECK(!sent_eof);
+    sent_eof = !has_more;
+    auto resp =
+        ipc::AsyncResult<protos::gen::QueryServiceStateResponse>::Create();
+    resp.set_has_more(has_more);
+    PERFETTO_CHECK(resp->mutable_service_state()->ParseFromArray(
+        chunked_reply.data(), chunked_reply.size()));
+    chunked_reply.clear();
+    response.Resolve(std::move(resp));
+  };
+
+  // Create a copy of the whole response and cut away the data_sources section.
+  protos::gen::TracingServiceState svc_state_copy = svc_state;
+  auto data_sources = std::move(*svc_state_copy.mutable_data_sources());
+  chunked_reply = svc_state_copy.SerializeAsArray();
+
+  // Now re-add them fitting within the IPC message limits (- some margin for
+  // the outer IPC frame).
+  constexpr size_t kMaxMsgSize = ipc::kIPCBufferSize - 128;
+  for (const auto& data_source : data_sources) {
+    protos::gen::TracingServiceState tmp;
+    tmp.mutable_data_sources()->emplace_back(std::move(data_source));
+    std::vector<uint8_t> chunk = tmp.SerializeAsArray();
+    if (chunked_reply.size() + chunk.size() < kMaxMsgSize) {
+      chunked_reply.insert(chunked_reply.end(), chunk.begin(), chunk.end());
+    } else {
+      send_chunked_reply(/*has_more=*/true);
+      chunked_reply = std::move(chunk);
+    }
+  }
+
+  PERFETTO_DCHECK(!chunked_reply.empty());
+  send_chunked_reply(/*has_more=*/false);
+  PERFETTO_CHECK(sent_eof);
+}
+
+// Called by the service in response to a service_endpoint->Flush() request.
+void ConsumerIPCService::OnFlushCallback(
+    bool success,
+    PendingFlushResponses::iterator pending_response_it) {
+  DeferredFlushResponse response(std::move(*pending_response_it));
+  pending_flush_responses_.erase(pending_response_it);
+  if (success) {
+    response.Resolve(ipc::AsyncResult<protos::gen::FlushResponse>::Create());
+  } else {
+    response.Reject();
+  }
+}
+
+void ConsumerIPCService::QueryCapabilities(
+    const protos::gen::QueryCapabilitiesRequest&,
+    DeferredQueryCapabilitiesResponse resp) {
+  RemoteConsumer* remote_consumer = GetConsumerForCurrentRequest();
+  auto it = pending_query_capabilities_responses_.insert(
+      pending_query_capabilities_responses_.end(), std::move(resp));
+  auto weak_this = weak_ptr_factory_.GetWeakPtr();
+  auto callback = [weak_this, it](const TracingServiceCapabilities& caps) {
+    if (weak_this)
+      weak_this->OnQueryCapabilitiesCallback(caps, std::move(it));
+  };
+  remote_consumer->service_endpoint->QueryCapabilities(callback);
+}
+
+// Called by the service in response to service_endpoint->QueryCapabilities().
+void ConsumerIPCService::OnQueryCapabilitiesCallback(
+    const TracingServiceCapabilities& caps,
+    PendingQueryCapabilitiesResponses::iterator pending_response_it) {
+  DeferredQueryCapabilitiesResponse response(std::move(*pending_response_it));
+  pending_query_capabilities_responses_.erase(pending_response_it);
+  auto resp =
+      ipc::AsyncResult<protos::gen::QueryCapabilitiesResponse>::Create();
+  *resp->mutable_capabilities() = caps;
+  response.Resolve(std::move(resp));
+}
+
+void ConsumerIPCService::SaveTraceForBugreport(
+    const protos::gen::SaveTraceForBugreportRequest&,
+    DeferredSaveTraceForBugreportResponse resp) {
+  RemoteConsumer* remote_consumer = GetConsumerForCurrentRequest();
+  auto it = pending_bugreport_responses_.insert(
+      pending_bugreport_responses_.end(), std::move(resp));
+  auto weak_this = weak_ptr_factory_.GetWeakPtr();
+  auto callback = [weak_this, it](bool success, const std::string& msg) {
+    if (weak_this)
+      weak_this->OnSaveTraceForBugreportCallback(success, msg, std::move(it));
+  };
+  remote_consumer->service_endpoint->SaveTraceForBugreport(callback);
+}
+
+// Called by the service in response to
+// service_endpoint->SaveTraceForBugreport().
+void ConsumerIPCService::OnSaveTraceForBugreportCallback(
+    bool success,
+    const std::string& msg,
+    PendingSaveTraceForBugreportResponses::iterator pending_response_it) {
+  DeferredSaveTraceForBugreportResponse response(
+      std::move(*pending_response_it));
+  pending_bugreport_responses_.erase(pending_response_it);
+  auto resp =
+      ipc::AsyncResult<protos::gen::SaveTraceForBugreportResponse>::Create();
+  resp->set_success(success);
+  resp->set_msg(msg);
+  response.Resolve(std::move(resp));
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// RemoteConsumer methods
+////////////////////////////////////////////////////////////////////////////////
+
+ConsumerIPCService::RemoteConsumer::RemoteConsumer() = default;
+ConsumerIPCService::RemoteConsumer::~RemoteConsumer() = default;
+
+// Invoked by the |core_service_| business logic after the ConnectConsumer()
+// call. There is nothing to do here, we really expected the ConnectConsumer()
+// to just work in the local case.
+void ConsumerIPCService::RemoteConsumer::OnConnect() {}
+
+// Invoked by the |core_service_| business logic after we destroy the
+// |service_endpoint| (in the RemoteConsumer dtor).
+void ConsumerIPCService::RemoteConsumer::OnDisconnect() {}
+
+void ConsumerIPCService::RemoteConsumer::OnTracingDisabled(
+    const std::string& error) {
+  if (enable_tracing_response.IsBound()) {
+    auto result =
+        ipc::AsyncResult<protos::gen::EnableTracingResponse>::Create();
+    result->set_disabled(true);
+    if (!error.empty())
+      result->set_error(error);
+    enable_tracing_response.Resolve(std::move(result));
+  }
+}
+
+void ConsumerIPCService::RemoteConsumer::OnTraceData(
+    std::vector<TracePacket> trace_packets,
+    bool has_more) {
+  if (!read_buffers_response.IsBound())
+    return;
+
+  auto result = ipc::AsyncResult<protos::gen::ReadBuffersResponse>::Create();
+
+  // A TracePacket might be too big to fit into a single IPC message (max
+  // kIPCBufferSize). However a TracePacket is made of slices and each slice
+  // is way smaller than kIPCBufferSize (a slice size is effectively bounded by
+  // the max chunk size of the SharedMemoryABI). When sending a TracePacket,
+  // if its slices don't fit within one IPC, chunk them over several contiguous
+  // IPCs using the |last_slice_for_packet| for glueing on the other side.
+  static_assert(ipc::kIPCBufferSize >= SharedMemoryABI::kMaxPageSize * 2,
+                "kIPCBufferSize too small given the max possible slice size");
+
+  auto send_ipc_reply = [this, &result](bool more) {
+    result.set_has_more(more);
+    read_buffers_response.Resolve(std::move(result));
+    result = ipc::AsyncResult<protos::gen::ReadBuffersResponse>::Create();
+  };
+
+  size_t approx_reply_size = 0;
+  for (const TracePacket& trace_packet : trace_packets) {
+    size_t num_slices_left_for_packet = trace_packet.slices().size();
+    for (const Slice& slice : trace_packet.slices()) {
+      // Check if this slice would cause the IPC to overflow its max size and,
+      // if that is the case, split the IPCs. The "16" and "64" below are
+      // over-estimations of, respectively:
+      // 16: the preamble that prefixes each slice (there are 2 x size fields
+      //     in the proto + the |last_slice_for_packet| bool).
+      // 64: the overhead of the IPC InvokeMethodReply + wire_protocol's frame.
+      // If these estimations are wrong, BufferedFrameDeserializer::Serialize()
+      // will hit a DCHECK anyways.
+      const size_t approx_slice_size = slice.size + 16;
+      if (approx_reply_size + approx_slice_size > ipc::kIPCBufferSize - 64) {
+        // If we hit this CHECK we got a single slice that is > kIPCBufferSize.
+        PERFETTO_CHECK(result->slices_size() > 0);
+        send_ipc_reply(/*has_more=*/true);
+        approx_reply_size = 0;
+      }
+      approx_reply_size += approx_slice_size;
+
+      auto* res_slice = result->add_slices();
+      res_slice->set_last_slice_for_packet(--num_slices_left_for_packet == 0);
+      res_slice->set_data(slice.start, slice.size);
+    }
+  }
+  send_ipc_reply(has_more);
+}
+
+void ConsumerIPCService::RemoteConsumer::OnDetach(bool success) {
+  if (!success) {
+    std::move(detach_response).Reject();
+    return;
+  }
+  auto resp = ipc::AsyncResult<protos::gen::DetachResponse>::Create();
+  std::move(detach_response).Resolve(std::move(resp));
+}
+
+void ConsumerIPCService::RemoteConsumer::OnAttach(
+    bool success,
+    const TraceConfig& trace_config) {
+  if (!success) {
+    std::move(attach_response).Reject();
+    return;
+  }
+  auto response = ipc::AsyncResult<protos::gen::AttachResponse>::Create();
+  *response->mutable_trace_config() = trace_config;
+  std::move(attach_response).Resolve(std::move(response));
+}
+
+void ConsumerIPCService::RemoteConsumer::OnTraceStats(bool success,
+                                                      const TraceStats& stats) {
+  if (!success) {
+    std::move(get_trace_stats_response).Reject();
+    return;
+  }
+  auto response =
+      ipc::AsyncResult<protos::gen::GetTraceStatsResponse>::Create();
+  *response->mutable_trace_stats() = stats;
+  std::move(get_trace_stats_response).Resolve(std::move(response));
+}
+
+void ConsumerIPCService::RemoteConsumer::OnObservableEvents(
+    const ObservableEvents& events) {
+  if (!observe_events_response.IsBound())
+    return;
+
+  auto result = ipc::AsyncResult<protos::gen::ObserveEventsResponse>::Create();
+  result.set_has_more(true);
+  *result->mutable_events() = events;
+  observe_events_response.Resolve(std::move(result));
+}
+
+void ConsumerIPCService::RemoteConsumer::CloseObserveEventsResponseStream() {
+  if (!observe_events_response.IsBound())
+    return;
+
+  auto result = ipc::AsyncResult<protos::gen::ObserveEventsResponse>::Create();
+  result.set_has_more(false);
+  observe_events_response.Resolve(std::move(result));
+}
+
+}  // namespace perfetto
+// gen_amalgamated begin source: src/tracing/ipc/service/producer_ipc_service.cc
+// gen_amalgamated begin header: src/tracing/ipc/service/producer_ipc_service.h
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SRC_TRACING_IPC_SERVICE_PRODUCER_IPC_SERVICE_H_
+#define SRC_TRACING_IPC_SERVICE_PRODUCER_IPC_SERVICE_H_
+
+#include <list>
+#include <map>
+#include <memory>
+#include <string>
+
+// gen_amalgamated expanded: #include "perfetto/ext/base/weak_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/ext/ipc/basic_types.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/producer.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/tracing_service.h"
+
+// gen_amalgamated expanded: #include "protos/perfetto/ipc/producer_port.ipc.h"
+
+namespace perfetto {
+
+namespace ipc {
+class Host;
+}  // namespace ipc
+
+// Implements the Producer port of the IPC service. This class proxies requests
+// and responses between the core service logic (|svc_|) and remote Producer(s)
+// on the IPC socket, through the methods overriddden from ProducerPort.
+class ProducerIPCService : public protos::gen::ProducerPort {
+ public:
+  explicit ProducerIPCService(TracingService* core_service);
+  ~ProducerIPCService() override;
+
+  // ProducerPort implementation (from .proto IPC definition).
+  void InitializeConnection(const protos::gen::InitializeConnectionRequest&,
+                            DeferredInitializeConnectionResponse) override;
+  void RegisterDataSource(const protos::gen::RegisterDataSourceRequest&,
+                          DeferredRegisterDataSourceResponse) override;
+  void UnregisterDataSource(const protos::gen::UnregisterDataSourceRequest&,
+                            DeferredUnregisterDataSourceResponse) override;
+  void RegisterTraceWriter(const protos::gen::RegisterTraceWriterRequest&,
+                           DeferredRegisterTraceWriterResponse) override;
+  void UnregisterTraceWriter(const protos::gen::UnregisterTraceWriterRequest&,
+                             DeferredUnregisterTraceWriterResponse) override;
+  void CommitData(const protos::gen::CommitDataRequest&,
+                  DeferredCommitDataResponse) override;
+  void NotifyDataSourceStarted(
+      const protos::gen::NotifyDataSourceStartedRequest&,
+      DeferredNotifyDataSourceStartedResponse) override;
+  void NotifyDataSourceStopped(
+      const protos::gen::NotifyDataSourceStoppedRequest&,
+      DeferredNotifyDataSourceStoppedResponse) override;
+  void ActivateTriggers(const protos::gen::ActivateTriggersRequest&,
+                        DeferredActivateTriggersResponse) override;
+
+  void GetAsyncCommand(const protos::gen::GetAsyncCommandRequest&,
+                       DeferredGetAsyncCommandResponse) override;
+  void Sync(const protos::gen::SyncRequest&, DeferredSyncResponse) override;
+  void OnClientDisconnected() override;
+
+ private:
+  // Acts like a Producer with the core Service business logic (which doesn't
+  // know anything about the remote transport), but all it does is proxying
+  // methods to the remote Producer on the other side of the IPC channel.
+  class RemoteProducer : public Producer {
+   public:
+    RemoteProducer();
+    ~RemoteProducer() override;
+
+    // These methods are called by the |core_service_| business logic. There is
+    // no connection here, these methods are posted straight away.
+    void OnConnect() override;
+    void OnDisconnect() override;
+    void SetupDataSource(DataSourceInstanceID,
+                         const DataSourceConfig&) override;
+    void StartDataSource(DataSourceInstanceID,
+                         const DataSourceConfig&) override;
+    void StopDataSource(DataSourceInstanceID) override;
+    void OnTracingSetup() override;
+    void Flush(FlushRequestID,
+               const DataSourceInstanceID* data_source_ids,
+               size_t num_data_sources) override;
+
+    void ClearIncrementalState(const DataSourceInstanceID* data_source_ids,
+                               size_t num_data_sources) override;
+
+    void SendSetupTracing();
+
+    // The interface obtained from the core service business logic through
+    // Service::ConnectProducer(this). This allows to invoke methods for a
+    // specific Producer on the Service business logic.
+    std::unique_ptr<TracingService::ProducerEndpoint> service_endpoint;
+
+    // The back-channel (based on a never ending stream request) that allows us
+    // to send asynchronous commands to the remote Producer (e.g. start/stop a
+    // data source).
+    DeferredGetAsyncCommandResponse async_producer_commands;
+
+    // Set if the service calls OnTracingSetup() before the
+    // |async_producer_commands| was bound by the service. In this case, we
+    // forward the SetupTracing command when it is bound later.
+    bool send_setup_tracing_on_async_commands_bound = false;
+  };
+
+  ProducerIPCService(const ProducerIPCService&) = delete;
+  ProducerIPCService& operator=(const ProducerIPCService&) = delete;
+
+  // Returns the ProducerEndpoint in the core business logic that corresponds to
+  // the current IPC request.
+  RemoteProducer* GetProducerForCurrentRequest();
+
+  TracingService* const core_service_;
+
+  // Maps IPC clients to ProducerEndpoint instances registered on the
+  // |core_service_| business logic.
+  std::map<ipc::ClientID, std::unique_ptr<RemoteProducer>> producers_;
+
+  // List because pointers need to be stable.
+  std::list<DeferredSyncResponse> pending_syncs_;
+
+  base::WeakPtrFactory<ProducerIPCService> weak_ptr_factory_;  // Keep last.
+};
+
+}  // namespace perfetto
+
+#endif  // SRC_TRACING_IPC_SERVICE_PRODUCER_IPC_SERVICE_H_
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "src/tracing/ipc/service/producer_ipc_service.h"
+
+#include <inttypes.h>
+
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
+// gen_amalgamated expanded: #include "perfetto/ext/ipc/host.h"
+// gen_amalgamated expanded: #include "perfetto/ext/ipc/service.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/commit_data_request.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/tracing_service.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/core/data_source_config.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/core/data_source_descriptor.h"
+// gen_amalgamated expanded: #include "src/tracing/ipc/posix_shared_memory.h"
+
+// The remote Producer(s) are not trusted. All the methods from the ProducerPort
+// IPC layer (e.g. RegisterDataSource()) must assume that the remote Producer is
+// compromised.
+
+namespace perfetto {
+
+ProducerIPCService::ProducerIPCService(TracingService* core_service)
+    : core_service_(core_service), weak_ptr_factory_(this) {}
+
+ProducerIPCService::~ProducerIPCService() = default;
+
+ProducerIPCService::RemoteProducer*
+ProducerIPCService::GetProducerForCurrentRequest() {
+  const ipc::ClientID ipc_client_id = ipc::Service::client_info().client_id();
+  PERFETTO_CHECK(ipc_client_id);
+  auto it = producers_.find(ipc_client_id);
+  if (it == producers_.end())
+    return nullptr;
+  return it->second.get();
+}
+
+// Called by the remote Producer through the IPC channel soon after connecting.
+void ProducerIPCService::InitializeConnection(
+    const protos::gen::InitializeConnectionRequest& req,
+    DeferredInitializeConnectionResponse response) {
+  const auto& client_info = ipc::Service::client_info();
+  const ipc::ClientID ipc_client_id = client_info.client_id();
+  PERFETTO_CHECK(ipc_client_id);
+
+  if (producers_.count(ipc_client_id) > 0) {
+    PERFETTO_DLOG(
+        "The remote Producer is trying to re-initialize the connection");
+    return response.Reject();
+  }
+
+  // Create a new entry.
+  std::unique_ptr<RemoteProducer> producer(new RemoteProducer());
+
+  TracingService::ProducerSMBScrapingMode smb_scraping_mode =
+      TracingService::ProducerSMBScrapingMode::kDefault;
+  switch (req.smb_scraping_mode()) {
+    case protos::gen::InitializeConnectionRequest::SMB_SCRAPING_UNSPECIFIED:
+      break;
+    case protos::gen::InitializeConnectionRequest::SMB_SCRAPING_DISABLED:
+      smb_scraping_mode = TracingService::ProducerSMBScrapingMode::kDisabled;
+      break;
+    case protos::gen::InitializeConnectionRequest::SMB_SCRAPING_ENABLED:
+      smb_scraping_mode = TracingService::ProducerSMBScrapingMode::kEnabled;
+      break;
+  }
+
+#if PERFETTO_DCHECK_IS_ON()
+  if (req.build_flags() ==
+      protos::gen::InitializeConnectionRequest::BUILD_FLAGS_DCHECKS_OFF) {
+    PERFETTO_LOG(
+        "The producer is built with NDEBUG but the service binary was built "
+        "with the DEBUG flag. This will likely cause crashes.");
+    // The other way round (DEBUG producer with NDEBUG service) is expected to
+    // work.
+  }
+#endif
+
+  // If the producer provided an SMB, tell the service to attempt to adopt it.
+  std::unique_ptr<SharedMemory> shmem;
+  if (req.producer_provided_shmem()) {
+    base::ScopedFile shmem_fd = ipc::Service::TakeReceivedFD();
+    if (shmem_fd) {
+      shmem = PosixSharedMemory::AttachToFd(
+          std::move(shmem_fd), /*require_seals_if_supported=*/true);
+      if (!shmem) {
+        PERFETTO_ELOG(
+            "Couldn't map producer-provided SMB, falling back to "
+            "service-provided SMB");
+      }
+    } else {
+      PERFETTO_DLOG(
+          "InitializeConnectionRequest's producer_provided_shmem flag is set "
+          "but the producer didn't provide an FD");
+    }
+  }
+
+  // ConnectProducer will call OnConnect() on the next task.
+  producer->service_endpoint = core_service_->ConnectProducer(
+      producer.get(), client_info.uid(), req.producer_name(),
+      req.shared_memory_size_hint_bytes(),
+      /*in_process=*/false, smb_scraping_mode,
+      req.shared_memory_page_size_hint_bytes(), std::move(shmem),
+      req.sdk_version());
+
+  // Could happen if the service has too many producers connected.
+  if (!producer->service_endpoint) {
+    response.Reject();
+    return;
+  }
+
+  bool using_producer_shmem =
+      producer->service_endpoint->IsShmemProvidedByProducer();
+
+  producers_.emplace(ipc_client_id, std::move(producer));
+  // Because of the std::move() |producer| is invalid after this point.
+
+  auto async_res =
+      ipc::AsyncResult<protos::gen::InitializeConnectionResponse>::Create();
+  async_res->set_using_shmem_provided_by_producer(using_producer_shmem);
+  async_res->set_direct_smb_patching_supported(true);
+  response.Resolve(std::move(async_res));
+}
+
+// Called by the remote Producer through the IPC channel.
+void ProducerIPCService::RegisterDataSource(
+    const protos::gen::RegisterDataSourceRequest& req,
+    DeferredRegisterDataSourceResponse response) {
+  RemoteProducer* producer = GetProducerForCurrentRequest();
+  if (!producer) {
+    PERFETTO_DLOG(
+        "Producer invoked RegisterDataSource() before InitializeConnection()");
+    if (response.IsBound())
+      response.Reject();
+    return;
+  }
+
+  const DataSourceDescriptor& dsd = req.data_source_descriptor();
+  GetProducerForCurrentRequest()->service_endpoint->RegisterDataSource(dsd);
+
+  // RegisterDataSource doesn't expect any meaningful response.
+  if (response.IsBound()) {
+    response.Resolve(
+        ipc::AsyncResult<protos::gen::RegisterDataSourceResponse>::Create());
+  }
+}
+
+// Called by the IPC layer.
+void ProducerIPCService::OnClientDisconnected() {
+  ipc::ClientID client_id = ipc::Service::client_info().client_id();
+  PERFETTO_DLOG("Client %" PRIu64 " disconnected", client_id);
+  producers_.erase(client_id);
+}
+
+// TODO(fmayer): test what happens if we receive the following tasks, in order:
+// RegisterDataSource, UnregisterDataSource, OnDataSourceRegistered.
+// which essentially means that the client posted back to back a
+// ReqisterDataSource and UnregisterDataSource speculating on the next id.
+// Called by the remote Service through the IPC channel.
+void ProducerIPCService::UnregisterDataSource(
+    const protos::gen::UnregisterDataSourceRequest& req,
+    DeferredUnregisterDataSourceResponse response) {
+  RemoteProducer* producer = GetProducerForCurrentRequest();
+  if (!producer) {
+    PERFETTO_DLOG(
+        "Producer invoked UnregisterDataSource() before "
+        "InitializeConnection()");
+    if (response.IsBound())
+      response.Reject();
+    return;
+  }
+  producer->service_endpoint->UnregisterDataSource(req.data_source_name());
+
+  // UnregisterDataSource doesn't expect any meaningful response.
+  if (response.IsBound()) {
+    response.Resolve(
+        ipc::AsyncResult<protos::gen::UnregisterDataSourceResponse>::Create());
+  }
+}
+
+void ProducerIPCService::RegisterTraceWriter(
+    const protos::gen::RegisterTraceWriterRequest& req,
+    DeferredRegisterTraceWriterResponse response) {
+  RemoteProducer* producer = GetProducerForCurrentRequest();
+  if (!producer) {
+    PERFETTO_DLOG(
+        "Producer invoked RegisterTraceWriter() before "
+        "InitializeConnection()");
+    if (response.IsBound())
+      response.Reject();
+    return;
+  }
+  producer->service_endpoint->RegisterTraceWriter(req.trace_writer_id(),
+                                                  req.target_buffer());
+
+  // RegisterTraceWriter doesn't expect any meaningful response.
+  if (response.IsBound()) {
+    response.Resolve(
+        ipc::AsyncResult<protos::gen::RegisterTraceWriterResponse>::Create());
+  }
+}
+
+void ProducerIPCService::UnregisterTraceWriter(
+    const protos::gen::UnregisterTraceWriterRequest& req,
+    DeferredUnregisterTraceWriterResponse response) {
+  RemoteProducer* producer = GetProducerForCurrentRequest();
+  if (!producer) {
+    PERFETTO_DLOG(
+        "Producer invoked UnregisterTraceWriter() before "
+        "InitializeConnection()");
+    if (response.IsBound())
+      response.Reject();
+    return;
+  }
+  producer->service_endpoint->UnregisterTraceWriter(req.trace_writer_id());
+
+  // UnregisterTraceWriter doesn't expect any meaningful response.
+  if (response.IsBound()) {
+    response.Resolve(
+        ipc::AsyncResult<protos::gen::UnregisterTraceWriterResponse>::Create());
+  }
+}
+
+void ProducerIPCService::CommitData(const protos::gen::CommitDataRequest& req,
+                                    DeferredCommitDataResponse resp) {
+  RemoteProducer* producer = GetProducerForCurrentRequest();
+  if (!producer) {
+    PERFETTO_DLOG(
+        "Producer invoked CommitData() before InitializeConnection()");
+    if (resp.IsBound())
+      resp.Reject();
+    return;
+  }
+
+  // We don't want to send a response if the client didn't attach a callback to
+  // the original request. Doing so would generate unnecessary wakeups and
+  // context switches.
+  std::function<void()> callback;
+  if (resp.IsBound()) {
+    // Capturing |resp| by reference here speculates on the fact that
+    // CommitData() in tracing_service_impl.cc invokes the passed callback
+    // inline, without posting it. If that assumption changes this code needs to
+    // wrap the response in a shared_ptr (C+11 lambdas don't support move) and
+    // use a weak ptr in the caller.
+    callback = [&resp] {
+      resp.Resolve(ipc::AsyncResult<protos::gen::CommitDataResponse>::Create());
+    };
+  }
+  producer->service_endpoint->CommitData(req, callback);
+}
+
+void ProducerIPCService::NotifyDataSourceStarted(
+    const protos::gen::NotifyDataSourceStartedRequest& request,
+    DeferredNotifyDataSourceStartedResponse response) {
+  RemoteProducer* producer = GetProducerForCurrentRequest();
+  if (!producer) {
+    PERFETTO_DLOG(
+        "Producer invoked NotifyDataSourceStarted() before "
+        "InitializeConnection()");
+    if (response.IsBound())
+      response.Reject();
+    return;
+  }
+  producer->service_endpoint->NotifyDataSourceStarted(request.data_source_id());
+
+  // NotifyDataSourceStopped shouldn't expect any meaningful response, avoid
+  // a useless IPC in that case.
+  if (response.IsBound()) {
+    response.Resolve(ipc::AsyncResult<
+                     protos::gen::NotifyDataSourceStartedResponse>::Create());
+  }
+}
+
+void ProducerIPCService::NotifyDataSourceStopped(
+    const protos::gen::NotifyDataSourceStoppedRequest& request,
+    DeferredNotifyDataSourceStoppedResponse response) {
+  RemoteProducer* producer = GetProducerForCurrentRequest();
+  if (!producer) {
+    PERFETTO_DLOG(
+        "Producer invoked NotifyDataSourceStopped() before "
+        "InitializeConnection()");
+    if (response.IsBound())
+      response.Reject();
+    return;
+  }
+  producer->service_endpoint->NotifyDataSourceStopped(request.data_source_id());
+
+  // NotifyDataSourceStopped shouldn't expect any meaningful response, avoid
+  // a useless IPC in that case.
+  if (response.IsBound()) {
+    response.Resolve(ipc::AsyncResult<
+                     protos::gen::NotifyDataSourceStoppedResponse>::Create());
+  }
+}
+
+void ProducerIPCService::ActivateTriggers(
+    const protos::gen::ActivateTriggersRequest& proto_req,
+    DeferredActivateTriggersResponse resp) {
+  RemoteProducer* producer = GetProducerForCurrentRequest();
+  if (!producer) {
+    PERFETTO_DLOG(
+        "Producer invoked ActivateTriggers() before InitializeConnection()");
+    if (resp.IsBound())
+      resp.Reject();
+    return;
+  }
+  std::vector<std::string> triggers;
+  for (const auto& name : proto_req.trigger_names()) {
+    triggers.push_back(name);
+  }
+  producer->service_endpoint->ActivateTriggers(triggers);
+  // ActivateTriggers shouldn't expect any meaningful response, avoid
+  // a useless IPC in that case.
+  if (resp.IsBound()) {
+    resp.Resolve(
+        ipc::AsyncResult<protos::gen::ActivateTriggersResponse>::Create());
+  }
+}
+
+void ProducerIPCService::GetAsyncCommand(
+    const protos::gen::GetAsyncCommandRequest&,
+    DeferredGetAsyncCommandResponse response) {
+  RemoteProducer* producer = GetProducerForCurrentRequest();
+  if (!producer) {
+    PERFETTO_DLOG(
+        "Producer invoked GetAsyncCommand() before "
+        "InitializeConnection()");
+    return response.Reject();
+  }
+  // Keep the back channel open, without ever resolving the ipc::Deferred fully,
+  // to send async commands to the RemoteProducer (e.g., starting/stopping a
+  // data source).
+  producer->async_producer_commands = std::move(response);
+
+  // Service may already have issued the OnTracingSetup() event, in which case
+  // we should forward it to the producer now.
+  if (producer->send_setup_tracing_on_async_commands_bound)
+    producer->SendSetupTracing();
+}
+
+void ProducerIPCService::Sync(const protos::gen::SyncRequest&,
+                              DeferredSyncResponse resp) {
+  RemoteProducer* producer = GetProducerForCurrentRequest();
+  if (!producer) {
+    PERFETTO_DLOG("Producer invoked Sync() before InitializeConnection()");
+    return resp.Reject();
+  }
+  auto weak_this = weak_ptr_factory_.GetWeakPtr();
+  auto resp_it = pending_syncs_.insert(pending_syncs_.end(), std::move(resp));
+  auto callback = [weak_this, resp_it]() {
+    if (!weak_this)
+      return;
+    auto pending_resp = std::move(*resp_it);
+    weak_this->pending_syncs_.erase(resp_it);
+    pending_resp.Resolve(ipc::AsyncResult<protos::gen::SyncResponse>::Create());
+  };
+  producer->service_endpoint->Sync(callback);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// RemoteProducer methods
+////////////////////////////////////////////////////////////////////////////////
+
+ProducerIPCService::RemoteProducer::RemoteProducer() = default;
+ProducerIPCService::RemoteProducer::~RemoteProducer() = default;
+
+// Invoked by the |core_service_| business logic after the ConnectProducer()
+// call. There is nothing to do here, we really expected the ConnectProducer()
+// to just work in the local case.
+void ProducerIPCService::RemoteProducer::OnConnect() {}
+
+// Invoked by the |core_service_| business logic after we destroy the
+// |service_endpoint| (in the RemoteProducer dtor).
+void ProducerIPCService::RemoteProducer::OnDisconnect() {}
+
+// Invoked by the |core_service_| business logic when it wants to create a new
+// data source.
+void ProducerIPCService::RemoteProducer::SetupDataSource(
+    DataSourceInstanceID dsid,
+    const DataSourceConfig& cfg) {
+  if (!async_producer_commands.IsBound()) {
+    PERFETTO_DLOG(
+        "The Service tried to create a new data source but the remote Producer "
+        "has not yet initialized the connection");
+    return;
+  }
+  auto cmd = ipc::AsyncResult<protos::gen::GetAsyncCommandResponse>::Create();
+  cmd.set_has_more(true);
+  cmd->mutable_setup_data_source()->set_new_instance_id(dsid);
+  *cmd->mutable_setup_data_source()->mutable_config() = cfg;
+  async_producer_commands.Resolve(std::move(cmd));
+}
+
+// Invoked by the |core_service_| business logic when it wants to start a new
+// data source.
+void ProducerIPCService::RemoteProducer::StartDataSource(
+    DataSourceInstanceID dsid,
+    const DataSourceConfig& cfg) {
+  if (!async_producer_commands.IsBound()) {
+    PERFETTO_DLOG(
+        "The Service tried to start a new data source but the remote Producer "
+        "has not yet initialized the connection");
+    return;
+  }
+  auto cmd = ipc::AsyncResult<protos::gen::GetAsyncCommandResponse>::Create();
+  cmd.set_has_more(true);
+  cmd->mutable_start_data_source()->set_new_instance_id(dsid);
+  *cmd->mutable_start_data_source()->mutable_config() = cfg;
+  async_producer_commands.Resolve(std::move(cmd));
+}
+
+void ProducerIPCService::RemoteProducer::StopDataSource(
+    DataSourceInstanceID dsid) {
+  if (!async_producer_commands.IsBound()) {
+    PERFETTO_DLOG(
+        "The Service tried to stop a data source but the remote Producer "
+        "has not yet initialized the connection");
+    return;
+  }
+  auto cmd = ipc::AsyncResult<protos::gen::GetAsyncCommandResponse>::Create();
+  cmd.set_has_more(true);
+  cmd->mutable_stop_data_source()->set_instance_id(dsid);
+  async_producer_commands.Resolve(std::move(cmd));
+}
+
+void ProducerIPCService::RemoteProducer::OnTracingSetup() {
+  if (!async_producer_commands.IsBound()) {
+    // Service may call this before the producer issued GetAsyncCommand.
+    send_setup_tracing_on_async_commands_bound = true;
+    return;
+  }
+  SendSetupTracing();
+}
+
+void ProducerIPCService::RemoteProducer::SendSetupTracing() {
+  PERFETTO_CHECK(async_producer_commands.IsBound());
+  PERFETTO_CHECK(service_endpoint->shared_memory());
+  auto cmd = ipc::AsyncResult<protos::gen::GetAsyncCommandResponse>::Create();
+  cmd.set_has_more(true);
+  auto setup_tracing = cmd->mutable_setup_tracing();
+  if (!service_endpoint->IsShmemProvidedByProducer()) {
+    // Nominal case (% Chrome): service provides SMB.
+    setup_tracing->set_shared_buffer_page_size_kb(
+        static_cast<uint32_t>(service_endpoint->shared_buffer_page_size_kb()));
+    const int shm_fd =
+        static_cast<PosixSharedMemory*>(service_endpoint->shared_memory())
+            ->fd();
+    cmd.set_fd(shm_fd);
+  }
+  async_producer_commands.Resolve(std::move(cmd));
+}
+
+void ProducerIPCService::RemoteProducer::Flush(
+    FlushRequestID flush_request_id,
+    const DataSourceInstanceID* data_source_ids,
+    size_t num_data_sources) {
+  if (!async_producer_commands.IsBound()) {
+    PERFETTO_DLOG(
+        "The Service tried to request a flush but the remote Producer has not "
+        "yet initialized the connection");
+    return;
+  }
+  auto cmd = ipc::AsyncResult<protos::gen::GetAsyncCommandResponse>::Create();
+  cmd.set_has_more(true);
+  for (size_t i = 0; i < num_data_sources; i++)
+    cmd->mutable_flush()->add_data_source_ids(data_source_ids[i]);
+  cmd->mutable_flush()->set_request_id(flush_request_id);
+  async_producer_commands.Resolve(std::move(cmd));
+}
+
+void ProducerIPCService::RemoteProducer::ClearIncrementalState(
+    const DataSourceInstanceID* data_source_ids,
+    size_t num_data_sources) {
+  if (!async_producer_commands.IsBound()) {
+    PERFETTO_DLOG(
+        "The Service tried to request an incremental state invalidation, but "
+        "the remote Producer has not yet initialized the connection");
+    return;
+  }
+  auto cmd = ipc::AsyncResult<protos::gen::GetAsyncCommandResponse>::Create();
+  cmd.set_has_more(true);
+  for (size_t i = 0; i < num_data_sources; i++)
+    cmd->mutable_clear_incremental_state()->add_data_source_ids(
+        data_source_ids[i]);
+  async_producer_commands.Resolve(std::move(cmd));
+}
+
+}  // namespace perfetto
+// gen_amalgamated begin source: src/tracing/ipc/service/service_ipc_host_impl.cc
+// gen_amalgamated begin header: src/tracing/ipc/service/service_ipc_host_impl.h
+// gen_amalgamated begin header: include/perfetto/ext/tracing/ipc/service_ipc_host.h
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_EXT_TRACING_IPC_SERVICE_IPC_HOST_H_
+#define INCLUDE_PERFETTO_EXT_TRACING_IPC_SERVICE_IPC_HOST_H_
+
+#include <memory>
+
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/scoped_file.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/unix_socket.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/basic_types.h"
+
+namespace perfetto {
+namespace base {
+class TaskRunner;
+}  // namespace base.
+
+class TracingService;
+
+// Creates an instance of the service (business logic + UNIX socket transport).
+// Exposed to:
+//   The code in the tracing client that will host the service e.g., traced.
+// Implemented in:
+//   src/tracing/ipc/service/service_ipc_host_impl.cc
+class PERFETTO_EXPORT ServiceIPCHost {
+ public:
+  static std::unique_ptr<ServiceIPCHost> CreateInstance(base::TaskRunner*);
+  virtual ~ServiceIPCHost();
+
+  // Start listening on the Producer & Consumer ports. Returns false in case of
+  // failure (e.g., something else is listening on |socket_name|).
+  virtual bool Start(const char* producer_socket_name,
+                     const char* consumer_socket_name) = 0;
+
+  // Like the above, but takes two file descriptors to already bound sockets.
+  // This is used when building as part of the Android tree, where init opens
+  // and binds the socket beore exec()-ing us.
+  virtual bool Start(base::ScopedSocketHandle producer_socket_fd,
+                     base::ScopedSocketHandle consumer_socket_fd) = 0;
+
+  virtual TracingService* service() const = 0;
+
+ protected:
+  ServiceIPCHost();
+
+ private:
+  ServiceIPCHost(const ServiceIPCHost&) = delete;
+  ServiceIPCHost& operator=(const ServiceIPCHost&) = delete;
+};
+
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_EXT_TRACING_IPC_SERVICE_IPC_HOST_H_
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SRC_TRACING_IPC_SERVICE_SERVICE_IPC_HOST_IMPL_H_
+#define SRC_TRACING_IPC_SERVICE_SERVICE_IPC_HOST_IMPL_H_
+
+#include <memory>
+
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/ipc/service_ipc_host.h"
+
+namespace perfetto {
+
+namespace ipc {
+class Host;
+}
+
+// The implementation of the IPC host for the tracing service. This class does
+// very few things: it mostly initializes the IPC transport. The actual
+// implementation of the IPC <> Service business logic glue lives in
+// producer_ipc_service.cc and consumer_ipc_service.cc.
+class ServiceIPCHostImpl : public ServiceIPCHost {
+ public:
+  ServiceIPCHostImpl(base::TaskRunner*);
+  ~ServiceIPCHostImpl() override;
+
+  // ServiceIPCHost implementation.
+  bool Start(const char* producer_socket_name,
+             const char* consumer_socket_name) override;
+  bool Start(base::ScopedSocketHandle producer_socket_fd,
+             base::ScopedSocketHandle consumer_socket_fd) override;
+
+  TracingService* service() const override;
+
+ private:
+  bool DoStart();
+  void Shutdown();
+
+  base::TaskRunner* const task_runner_;
+  std::unique_ptr<TracingService> svc_;  // The service business logic.
+
+  // The IPC host that listens on the Producer socket. It owns the
+  // PosixServiceProducerPort instance which deals with all producers' IPC(s).
+  std::unique_ptr<ipc::Host> producer_ipc_port_;
+
+  // As above, but for the Consumer port.
+  std::unique_ptr<ipc::Host> consumer_ipc_port_;
+};
+
+}  // namespace perfetto
+
+#endif  // SRC_TRACING_IPC_SERVICE_SERVICE_IPC_HOST_IMPL_H_
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "src/tracing/ipc/service/service_ipc_host_impl.h"
+
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
+// gen_amalgamated expanded: #include "perfetto/ext/ipc/host.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/tracing_service.h"
+// gen_amalgamated expanded: #include "src/tracing/ipc/posix_shared_memory.h"
+// gen_amalgamated expanded: #include "src/tracing/ipc/service/consumer_ipc_service.h"
+// gen_amalgamated expanded: #include "src/tracing/ipc/service/producer_ipc_service.h"
+
+namespace perfetto {
+
+// TODO(fmayer): implement per-uid connection limit (b/69093705).
+
+// Implements the publicly exposed factory method declared in
+// include/tracing/posix_ipc/posix_service_host.h.
+std::unique_ptr<ServiceIPCHost> ServiceIPCHost::CreateInstance(
+    base::TaskRunner* task_runner) {
+  return std::unique_ptr<ServiceIPCHost>(new ServiceIPCHostImpl(task_runner));
+}
+
+ServiceIPCHostImpl::ServiceIPCHostImpl(base::TaskRunner* task_runner)
+    : task_runner_(task_runner) {}
+
+ServiceIPCHostImpl::~ServiceIPCHostImpl() {}
+
+bool ServiceIPCHostImpl::Start(const char* producer_socket_name,
+                               const char* consumer_socket_name) {
+  PERFETTO_CHECK(!svc_);  // Check if already started.
+
+  // Initialize the IPC transport.
+  producer_ipc_port_ =
+      ipc::Host::CreateInstance(producer_socket_name, task_runner_);
+  consumer_ipc_port_ =
+      ipc::Host::CreateInstance(consumer_socket_name, task_runner_);
+  return DoStart();
+}
+
+bool ServiceIPCHostImpl::Start(base::ScopedSocketHandle producer_socket_fd,
+                               base::ScopedSocketHandle consumer_socket_fd) {
+  PERFETTO_CHECK(!svc_);  // Check if already started.
+
+  // Initialize the IPC transport.
+  producer_ipc_port_ =
+      ipc::Host::CreateInstance(std::move(producer_socket_fd), task_runner_);
+  consumer_ipc_port_ =
+      ipc::Host::CreateInstance(std::move(consumer_socket_fd), task_runner_);
+  return DoStart();
+}
+
+bool ServiceIPCHostImpl::DoStart() {
+  // Create and initialize the platform-independent tracing business logic.
+  std::unique_ptr<SharedMemory::Factory> shm_factory(
+      new PosixSharedMemory::Factory());
+  svc_ = TracingService::CreateInstance(std::move(shm_factory), task_runner_);
+
+  if (!producer_ipc_port_ || !consumer_ipc_port_) {
+    Shutdown();
+    return false;
+  }
+
+  // TODO(fmayer): add a test that destroyes the ServiceIPCHostImpl soon after
+  // Start() and checks that no spurious callbacks are issued.
+  bool producer_service_exposed = producer_ipc_port_->ExposeService(
+      std::unique_ptr<ipc::Service>(new ProducerIPCService(svc_.get())));
+  PERFETTO_CHECK(producer_service_exposed);
+
+  bool consumer_service_exposed = consumer_ipc_port_->ExposeService(
+      std::unique_ptr<ipc::Service>(new ConsumerIPCService(svc_.get())));
+  PERFETTO_CHECK(consumer_service_exposed);
+
+  return true;
+}
+
+TracingService* ServiceIPCHostImpl::service() const {
+  return svc_.get();
+}
+
+void ServiceIPCHostImpl::Shutdown() {
+  // TODO(primiano): add a test that causes the Shutdown() and checks that no
+  // spurious callbacks are issued.
+  producer_ipc_port_.reset();
+  consumer_ipc_port_.reset();
+  svc_.reset();
+}
+
+// Definitions for the base class ctor/dtor.
+ServiceIPCHost::ServiceIPCHost() = default;
+ServiceIPCHost::~ServiceIPCHost() = default;
+
+}  // namespace perfetto
+// gen_amalgamated begin source: src/tracing/internal/system_tracing_backend.cc
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/tracing/internal/system_tracing_backend.h"
+
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/base/task_runner.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/core/tracing_service.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/ipc/consumer_ipc_client.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/ipc/default_socket.h"
+// gen_amalgamated expanded: #include "perfetto/ext/tracing/ipc/producer_ipc_client.h"
+
+namespace perfetto {
+namespace internal {
+
+// static
+TracingBackend* SystemTracingBackend::GetInstance() {
+  static auto* instance = new SystemTracingBackend();
+  return instance;
+}
+
+SystemTracingBackend::SystemTracingBackend() {}
+
+std::unique_ptr<ProducerEndpoint> SystemTracingBackend::ConnectProducer(
+    const ConnectProducerArgs& args) {
+  PERFETTO_DCHECK(args.task_runner->RunsTasksOnCurrentThread());
+
+  auto endpoint = ProducerIPCClient::Connect(
+      GetProducerSocket(), args.producer, args.producer_name, args.task_runner,
+      TracingService::ProducerSMBScrapingMode::kEnabled,
+      args.shmem_size_hint_bytes, args.shmem_page_size_hint_bytes, nullptr,
+      nullptr, ProducerIPCClient::ConnectionFlags::kRetryIfUnreachable);
+  PERFETTO_CHECK(endpoint);
+  return endpoint;
+}
+
+std::unique_ptr<ConsumerEndpoint> SystemTracingBackend::ConnectConsumer(
+    const ConnectConsumerArgs& args) {
+  auto endpoint = ConsumerIPCClient::Connect(GetConsumerSocket(), args.consumer,
+                                             args.task_runner);
+  PERFETTO_CHECK(endpoint);
+  return endpoint;
+}
+
+}  // namespace internal
+}  // namespace perfetto
+// gen_amalgamated begin source: src/tracing/platform_posix.cc
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) ||   \
+    PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) || \
+    PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
+
+// gen_amalgamated expanded: #include "perfetto/ext/base/file_utils.h"
+// gen_amalgamated expanded: #include "perfetto/ext/base/thread_task_runner.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/internal/tracing_tls.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/platform.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/trace_writer_base.h"
+
+#include <pthread.h>
+#include <stdlib.h>
+
+namespace perfetto {
+
+namespace {
+
+class PlatformPosix : public Platform {
+ public:
+  PlatformPosix();
+  ~PlatformPosix() override;
+
+  ThreadLocalObject* GetOrCreateThreadLocalObject() override;
+  std::unique_ptr<base::TaskRunner> CreateTaskRunner(
+      const CreateTaskRunnerArgs&) override;
+  std::string GetCurrentProcessName() override;
+
+ private:
+  pthread_key_t tls_key_{};
+};
+
+using ThreadLocalObject = Platform::ThreadLocalObject;
+
+PlatformPosix::PlatformPosix() {
+  auto tls_dtor = [](void* obj) {
+    delete static_cast<ThreadLocalObject*>(obj);
+  };
+  PERFETTO_CHECK(pthread_key_create(&tls_key_, tls_dtor) == 0);
+}
+
+PlatformPosix::~PlatformPosix() {
+  pthread_key_delete(tls_key_);
+}
+
+ThreadLocalObject* PlatformPosix::GetOrCreateThreadLocalObject() {
+  // In chromium this should be implemented using base::ThreadLocalStorage.
+  auto tls = static_cast<ThreadLocalObject*>(pthread_getspecific(tls_key_));
+  if (!tls) {
+    tls = ThreadLocalObject::CreateInstance().release();
+    pthread_setspecific(tls_key_, tls);
+  }
+  return tls;
+}
+
+std::unique_ptr<base::TaskRunner> PlatformPosix::CreateTaskRunner(
+    const CreateTaskRunnerArgs&) {
+  return std::unique_ptr<base::TaskRunner>(
+      new base::ThreadTaskRunner(base::ThreadTaskRunner::CreateAndStart()));
+}
+
+std::string PlatformPosix::GetCurrentProcessName() {
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
+    PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
+  std::string cmdline;
+  base::ReadFile("/proc/self/cmdline", &cmdline);
+  return cmdline.substr(0, cmdline.find('\0'));
+#elif PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
+  return std::string(getprogname());
+#else
+  return "unknown_producer";
+#endif
+}
+
+}  // namespace
+
+// static
+Platform* Platform::GetDefaultPlatform() {
+  static PlatformPosix* instance = new PlatformPosix();
+  return instance;
+}
+
+}  // namespace perfetto
+#endif  // OS_LINUX || OS_ANDROID || OS_APPLE
+
diff --git a/system/profiler/perfetto.h b/system/profiler/perfetto.h
new file mode 100644
index 0000000..d66b3ef
--- /dev/null
+++ b/system/profiler/perfetto.h
@@ -0,0 +1,132001 @@
+// Copyright (C) 2019 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This file is automatically generated by gen_amalgamated. Do not edit.
+
+// gen_amalgamated begin header: include/perfetto/tracing.h
+// gen_amalgamated begin header: include/perfetto/tracing/buffer_exhausted_policy.h
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_TRACING_BUFFER_EXHAUSTED_POLICY_H_
+#define INCLUDE_PERFETTO_TRACING_BUFFER_EXHAUSTED_POLICY_H_
+
+namespace perfetto {
+
+// Determines how SharedMemoryArbiterImpl::GetNewChunk() behaves when no free
+// chunks are available.
+enum class BufferExhaustedPolicy {
+  // SharedMemoryArbiterImpl::GetNewChunk() will stall if no free SMB chunk is
+  // available and wait for the tracing service to free one. Note that this
+  // requires that messages the arbiter sends to the tracing service (from any
+  // TraceWriter thread) will be received by it, even if all TraceWriter threads
+  // are stalled.
+  kStall,
+
+  // SharedMemoryArbiterImpl::GetNewChunk() will return an invalid chunk if no
+  // free SMB chunk is available. In this case, the TraceWriter will fall back
+  // to a garbage chunk and drop written data until acquiring a future chunk
+  // succeeds again.
+  kDrop,
+
+  // TODO(eseckler): Switch to kDrop by default and change the Android code to
+  // explicitly request kStall instead.
+  kDefault = kStall
+};
+
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_TRACING_BUFFER_EXHAUSTED_POLICY_H_
+// gen_amalgamated begin header: include/perfetto/tracing/console_interceptor.h
+// gen_amalgamated begin header: include/perfetto/base/compiler.h
+// gen_amalgamated begin header: include/perfetto/base/build_config.h
+// gen_amalgamated begin header: gen/build_config/perfetto_build_flags.h
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Generated by write_buildflag_header.py
+
+// fix_include_guards: off
+#ifndef GEN_BUILD_CONFIG_PERFETTO_BUILD_FLAGS_H_
+#define GEN_BUILD_CONFIG_PERFETTO_BUILD_FLAGS_H_
+
+// clang-format off
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_ANDROID_BUILD() (0)
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_CHROMIUM_BUILD() (0)
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_STANDALONE_BUILD() (0)
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_START_DAEMONS() (1)
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_IPC() (1)
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_WATCHDOG() (0)
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_COMPONENT_BUILD() (0)
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_FORCE_DLOG_ON() (0)
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_FORCE_DLOG_OFF() (0)
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_VERBOSE_LOGS() (1)
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_VERSION_GEN() (1)
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_TP_PERCENTILE() (0)
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_TP_LINENOISE() (0)
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_TP_HTTPD() (0)
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_TP_JSON() (1)
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_LOCAL_SYMBOLIZER() (0)
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_ZLIB() (1)
+
+// clang-format on
+#endif  // GEN_BUILD_CONFIG_PERFETTO_BUILD_FLAGS_H_
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_BASE_BUILD_CONFIG_H_
+#define INCLUDE_PERFETTO_BASE_BUILD_CONFIG_H_
+
+// Allows to define build flags that give a compiler error if the header that
+// defined the flag is not included, instead of silently ignoring the #if block.
+#define PERFETTO_BUILDFLAG_CAT_INDIRECT(a, b) a##b
+#define PERFETTO_BUILDFLAG_CAT(a, b) PERFETTO_BUILDFLAG_CAT_INDIRECT(a, b)
+#define PERFETTO_BUILDFLAG(flag) \
+  (PERFETTO_BUILDFLAG_CAT(PERFETTO_BUILDFLAG_DEFINE_, flag)())
+
+#if defined(__ANDROID__)
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_ANDROID() 1
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_LINUX() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_WIN() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_APPLE() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_MAC() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_IOS() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_WASM() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_FUCHSIA() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_NACL() 0
+#elif defined(__APPLE__)
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_ANDROID() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_APPLE() 1
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_LINUX() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_WIN() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_WASM() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_FUCHSIA() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_NACL() 0
+// Include TARGET_OS_IPHONE when on __APPLE__ systems.
+#include <TargetConditionals.h>
+#if defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_MAC() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_IOS() 1
+#else
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_MAC() 1
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_IOS() 0
+#endif
+#elif defined(__linux__)
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_ANDROID() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_LINUX() 1
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_WIN() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_APPLE() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_MAC() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_IOS() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_WASM() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_FUCHSIA() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_NACL() 0
+#elif defined(_WIN32)
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_ANDROID() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_LINUX() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_WIN() 1
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_APPLE() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_MAC() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_IOS() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_WASM() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_FUCHSIA() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_NACL() 0
+#elif defined(__EMSCRIPTEN__)
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_ANDROID() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_LINUX() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_WIN() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_APPLE() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_MAC() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_IOS() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_WASM() 1
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_FUCHSIA() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_NACL() 0
+#elif defined(__Fuchsia__)
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_ANDROID() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_APPLE() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_MAC() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_IOS() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_LINUX() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_WIN() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_WASM() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_FUCHSIA() 1
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_NACL() 0
+#elif defined(__native_client__)
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_ANDROID() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_LINUX() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_WIN() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_APPLE() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_MAC() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_IOS() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_WASM() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_FUCHSIA() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_OS_NACL() 1
+#else
+#error OS not supported (see build_config.h)
+#endif
+
+#if defined(__clang__)
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_COMPILER_CLANG() 1
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_COMPILER_GCC() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_COMPILER_MSVC() 0
+#elif defined(__GNUC__) // Careful: Clang also defines this!
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_COMPILER_CLANG() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_COMPILER_GCC() 1
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_COMPILER_MSVC() 0
+#elif defined(_MSC_VER)
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_COMPILER_CLANG() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_COMPILER_GCC() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_COMPILER_MSVC() 1
+#else
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_COMPILER_CLANG() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_COMPILER_GCC() 0
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_COMPILER_MSVC() 0
+#endif
+
+#if defined(PERFETTO_BUILD_WITH_ANDROID_USERDEBUG)
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_ANDROID_USERDEBUG_BUILD() 1
+#else
+#define PERFETTO_BUILDFLAG_DEFINE_PERFETTO_ANDROID_USERDEBUG_BUILD() 0
+#endif
+
+// perfetto_build_flags.h contains the tweakable build flags defined via GN.
+// - In GN builds (e.g., standalone, chromium, v8) this file is generated at
+//   build time via the gen_rule //gn/gen_buildflags.
+// - In Android in-tree builds, this file is generated by tools/gen_android_bp
+//   and checked in into include/perfetto/base/build_configs/android_tree/. The
+//   default cflags add this path to the default include path.
+// - Similarly, in bazel builds, this file is generated by tools/gen_bazel and
+//   checked in into include/perfetto/base/build_configs/bazel/.
+// - In amaglamated builds, this file is generated by tools/gen_amalgamated and
+//   added to the amalgamated headers.
+// gen_amalgamated expanded: #include "perfetto_build_flags.h"  // no-include-violation-check
+
+#endif  // INCLUDE_PERFETTO_BASE_BUILD_CONFIG_H_
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_BASE_COMPILER_H_
+#define INCLUDE_PERFETTO_BASE_COMPILER_H_
+
+#include <stddef.h>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
+
+// __has_attribute is supported only by clang and recent versions of GCC.
+// Add a layer to wrap the __has_attribute macro.
+#if defined(__has_attribute)
+#define PERFETTO_HAS_ATTRIBUTE(x) __has_attribute(x)
+#else
+#define PERFETTO_HAS_ATTRIBUTE(x) 0
+#endif
+
+#if defined(__GNUC__) || defined(__clang__)
+#define PERFETTO_LIKELY(_x) __builtin_expect(!!(_x), 1)
+#define PERFETTO_UNLIKELY(_x) __builtin_expect(!!(_x), 0)
+#else
+#define PERFETTO_LIKELY(_x) (_x)
+#define PERFETTO_UNLIKELY(_x) (_x)
+#endif
+
+#if defined(__GNUC__) || defined(__clang__)
+#define PERFETTO_WARN_UNUSED_RESULT __attribute__((warn_unused_result))
+#else
+#define PERFETTO_WARN_UNUSED_RESULT
+#endif
+
+#if defined(__clang__)
+#define PERFETTO_ALWAYS_INLINE __attribute__((__always_inline__))
+#define PERFETTO_NO_INLINE __attribute__((__noinline__))
+#else
+// GCC is too pedantic and often fails with the error:
+// "always_inline function might not be inlinable"
+#define PERFETTO_ALWAYS_INLINE
+#define PERFETTO_NO_INLINE
+#endif
+
+#if defined(__GNUC__) || defined(__clang__)
+#define PERFETTO_NORETURN __attribute__((__noreturn__))
+#else
+#define PERFETTO_NORETURN __declspec(noreturn)
+#endif
+
+#if defined(__GNUC__) || defined(__clang__)
+#define PERFETTO_DEBUG_FUNCTION_IDENTIFIER() __PRETTY_FUNCTION__
+#elif defined(_MSC_VER)
+#define PERFETTO_DEBUG_FUNCTION_IDENTIFIER() __FUNCSIG__
+#else
+#define PERFETTO_DEBUG_FUNCTION_IDENTIFIER() \
+  static_assert(false, "Not implemented for this compiler")
+#endif
+
+#if defined(__GNUC__) || defined(__clang__)
+#define PERFETTO_PRINTF_FORMAT(x, y) \
+  __attribute__((__format__(__printf__, x, y)))
+#else
+#define PERFETTO_PRINTF_FORMAT(x, y)
+#endif
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_IOS)
+// TODO(b/158814068): For iOS builds, thread_local is only supported since iOS
+// 8. We'd have to use pthread for thread local data instead here. For now, just
+// define it to nothing since we don't support running perfetto or the client
+// lib on iOS right now.
+#define PERFETTO_THREAD_LOCAL
+#else
+#define PERFETTO_THREAD_LOCAL thread_local
+#endif
+
+#if defined(__GNUC__) || defined(__clang__)
+#define PERFETTO_POPCOUNT(x) __builtin_popcountll(x)
+#else
+#include <intrin.h>
+#define PERFETTO_POPCOUNT(x) __popcnt64(x)
+#endif
+
+#if defined(__clang__)
+#if __has_feature(address_sanitizer) || defined(__SANITIZE_ADDRESS__)
+extern "C" void __asan_poison_memory_region(void const volatile*, size_t);
+extern "C" void __asan_unpoison_memory_region(void const volatile*, size_t);
+#define PERFETTO_ASAN_POISON(a, s) __asan_poison_memory_region((a), (s))
+#define PERFETTO_ASAN_UNPOISON(a, s) __asan_unpoison_memory_region((a), (s))
+#else
+#define PERFETTO_ASAN_POISON(addr, size)
+#define PERFETTO_ASAN_UNPOISON(addr, size)
+#endif  // __has_feature(address_sanitizer)
+#else
+#define PERFETTO_ASAN_POISON(addr, size)
+#define PERFETTO_ASAN_UNPOISON(addr, size)
+#endif  // __clang__
+
+#if defined(__GNUC__) || defined(__clang__)
+#define PERFETTO_IS_LITTLE_ENDIAN() __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#else
+// Assume all MSVC targets are little endian.
+#define PERFETTO_IS_LITTLE_ENDIAN() 1
+#endif
+
+// This is used for exporting xxxMain() symbols (e.g., PerfettoCmdMain,
+// ProbesMain) from libperfetto.so when the GN arg monolithic_binaries = false.
+#if defined(__GNUC__) || defined(__clang__)
+#define PERFETTO_EXPORT_ENTRYPOINT __attribute__((visibility("default")))
+#else
+// TODO(primiano): on Windows this should be a pair of dllexport/dllimport. But
+// that requires a -DXXX_IMPLEMENTATION depending on whether we are on the
+// impl-site or call-site. Right now it's not worth the trouble as we
+// force-export the xxxMain() symbols only on Android, where we pack all the
+// code for N binaries into one .so to save binary size. On Windows we support
+// only monolithic binaries, as they are easier to deal with.
+#define PERFETTO_EXPORT_ENTRYPOINT
+#endif
+
+// Disables thread safety analysis for functions where the compiler can't
+// accurate figure out which locks are being held.
+#if defined(__clang__)
+#define PERFETTO_NO_THREAD_SAFETY_ANALYSIS \
+  __attribute__((no_thread_safety_analysis))
+#else
+#define PERFETTO_NO_THREAD_SAFETY_ANALYSIS
+#endif
+
+// Avoid calling the exit-time destructor on an object with static lifetime.
+#if PERFETTO_HAS_ATTRIBUTE(no_destroy)
+#define PERFETTO_HAS_NO_DESTROY() 1
+#define PERFETTO_NO_DESTROY __attribute__((no_destroy))
+#else
+#define PERFETTO_HAS_NO_DESTROY() 0
+#define PERFETTO_NO_DESTROY
+#endif
+
+namespace perfetto {
+namespace base {
+
+template <typename... T>
+inline void ignore_result(const T&...) {}
+
+}  // namespace base
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_BASE_COMPILER_H_
+// gen_amalgamated begin header: include/perfetto/base/logging.h
+// gen_amalgamated begin header: include/perfetto/base/export.h
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_BASE_EXPORT_H_
+#define INCLUDE_PERFETTO_BASE_EXPORT_H_
+
+// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
+
+#if PERFETTO_BUILDFLAG(PERFETTO_COMPONENT_BUILD)
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+
+#if defined(PERFETTO_IMPLEMENTATION)
+#define PERFETTO_EXPORT __declspec(dllexport)
+#else
+#define PERFETTO_EXPORT __declspec(dllimport)
+#endif
+
+#else  // PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+
+#if defined(PERFETTO_IMPLEMENTATION)
+#define PERFETTO_EXPORT __attribute__((visibility("default")))
+#else
+#define PERFETTO_EXPORT
+#endif
+
+#endif  // PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+
+#else  // !PERFETTO_BUILDFLAG(PERFETTO_COMPONENT_BUILD)
+
+#define PERFETTO_EXPORT
+
+#endif  // PERFETTO_BUILDFLAG(PERFETTO_COMPONENT_BUILD)
+
+#endif  // INCLUDE_PERFETTO_BASE_EXPORT_H_
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_BASE_LOGGING_H_
+#define INCLUDE_PERFETTO_BASE_LOGGING_H_
+
+#include <errno.h>
+#include <string.h>  // For strerror.
+
+// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
+// gen_amalgamated expanded: #include "perfetto/base/compiler.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+#if defined(__GNUC__) || defined(__clang__)
+// Ignore GCC warning about a missing argument for a variadic macro parameter.
+#pragma GCC system_header
+#endif
+
+// TODO(primiano): move this to base/build_config.h, turn into
+// PERFETTO_BUILDFLAG(DCHECK_IS_ON) and update call sites to use that instead.
+#if defined(NDEBUG) && !defined(DCHECK_ALWAYS_ON)
+#define PERFETTO_DCHECK_IS_ON() 0
+#else
+#define PERFETTO_DCHECK_IS_ON() 1
+#endif
+
+#if PERFETTO_BUILDFLAG(PERFETTO_FORCE_DLOG_ON)
+#define PERFETTO_DLOG_IS_ON() 1
+#elif PERFETTO_BUILDFLAG(PERFETTO_FORCE_DLOG_OFF)
+#define PERFETTO_DLOG_IS_ON() 0
+#else
+#define PERFETTO_DLOG_IS_ON() PERFETTO_DCHECK_IS_ON()
+#endif
+
+#if defined(PERFETTO_ANDROID_ASYNC_SAFE_LOG)
+#if !PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID) || \
+    !PERFETTO_BUILDFLAG(PERFETTO_ANDROID_BUILD)
+#error "Async-safe logging is limited to Android tree builds"
+#endif
+// For binaries which need a very lightweight logging implementation.
+// Note that this header is incompatible with android/log.h.
+#include <async_safe/log.h>
+#elif PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
+// Normal android logging.
+#include <android/log.h>
+#endif
+
+namespace perfetto {
+namespace base {
+
+// Constexpr functions to extract basename(__FILE__), e.g.: ../foo/f.c -> f.c .
+constexpr const char* StrEnd(const char* s) {
+  return *s ? StrEnd(s + 1) : s;
+}
+
+constexpr const char* BasenameRecursive(const char* s,
+                                        const char* begin,
+                                        const char* end) {
+  return (*s == '/' && s < end)
+             ? (s + 1)
+             : ((s > begin) ? BasenameRecursive(s - 1, begin, end) : s);
+}
+
+constexpr const char* Basename(const char* str) {
+  return BasenameRecursive(StrEnd(str), str, StrEnd(str));
+}
+
+enum LogLev { kLogDebug = 0, kLogInfo, kLogImportant, kLogError };
+
+struct LogMessageCallbackArgs {
+  LogLev level;
+  int line;
+  const char* filename;
+  const char* message;
+};
+
+using LogMessageCallback = void (*)(LogMessageCallbackArgs);
+
+// This is not thread safe and must be called before using tracing from other
+// threads.
+PERFETTO_EXPORT void SetLogMessageCallback(LogMessageCallback callback);
+
+PERFETTO_EXPORT void LogMessage(LogLev,
+                                const char* fname,
+                                int line,
+                                const char* fmt,
+                                ...) PERFETTO_PRINTF_FORMAT(4, 5);
+
+#if defined(PERFETTO_ANDROID_ASYNC_SAFE_LOG)
+#define PERFETTO_XLOG(level, fmt, ...)                                        \
+  do {                                                                        \
+    async_safe_format_log((ANDROID_LOG_DEBUG + level), "perfetto",            \
+                          "%s:%d " fmt, ::perfetto::base::Basename(__FILE__), \
+                          __LINE__, ##__VA_ARGS__);                           \
+  } while (0)
+#elif defined(PERFETTO_DISABLE_LOG)
+#define PERFETTO_XLOG(...) ::perfetto::base::ignore_result(__VA_ARGS__)
+#else
+#define PERFETTO_XLOG(level, fmt, ...)                                      \
+  ::perfetto::base::LogMessage(level, ::perfetto::base::Basename(__FILE__), \
+                               __LINE__, fmt, ##__VA_ARGS__)
+#endif
+
+#if defined(_MSC_VER)
+#define PERFETTO_IMMEDIATE_CRASH() \
+  do {                             \
+    __debugbreak();                \
+    __assume(0);                   \
+  } while (0)
+#else
+#define PERFETTO_IMMEDIATE_CRASH() \
+  do {                             \
+    __builtin_trap();              \
+    __builtin_unreachable();       \
+  } while (0)
+#endif
+
+#if PERFETTO_BUILDFLAG(PERFETTO_VERBOSE_LOGS)
+#define PERFETTO_LOG(fmt, ...) \
+  PERFETTO_XLOG(::perfetto::base::kLogInfo, fmt, ##__VA_ARGS__)
+#else  // PERFETTO_BUILDFLAG(PERFETTO_VERBOSE_LOGS)
+#define PERFETTO_LOG(...) ::perfetto::base::ignore_result(__VA_ARGS__)
+#endif  // PERFETTO_BUILDFLAG(PERFETTO_VERBOSE_LOGS)
+
+#define PERFETTO_ILOG(fmt, ...) \
+  PERFETTO_XLOG(::perfetto::base::kLogImportant, fmt, ##__VA_ARGS__)
+#define PERFETTO_ELOG(fmt, ...) \
+  PERFETTO_XLOG(::perfetto::base::kLogError, fmt, ##__VA_ARGS__)
+#define PERFETTO_FATAL(fmt, ...)       \
+  do {                                 \
+    PERFETTO_PLOG(fmt, ##__VA_ARGS__); \
+    PERFETTO_IMMEDIATE_CRASH();        \
+  } while (0)
+
+#if defined(__GNUC__) || defined(__clang__)
+#define PERFETTO_PLOG(x, ...) \
+  PERFETTO_ELOG(x " (errno: %d, %s)", ##__VA_ARGS__, errno, strerror(errno))
+#else
+// MSVC expands __VA_ARGS__ in a different order. Give up, not worth it.
+#define PERFETTO_PLOG PERFETTO_ELOG
+#endif
+
+#define PERFETTO_CHECK(x)                            \
+  do {                                               \
+    if (PERFETTO_UNLIKELY(!(x))) {                   \
+      PERFETTO_PLOG("%s", "PERFETTO_CHECK(" #x ")"); \
+      PERFETTO_IMMEDIATE_CRASH();                    \
+    }                                                \
+  } while (0)
+
+#if PERFETTO_DLOG_IS_ON()
+
+#define PERFETTO_DLOG(fmt, ...) \
+  PERFETTO_XLOG(::perfetto::base::kLogDebug, fmt, ##__VA_ARGS__)
+
+#if defined(__GNUC__) || defined(__clang__)
+#define PERFETTO_DPLOG(x, ...) \
+  PERFETTO_DLOG(x " (errno: %d, %s)", ##__VA_ARGS__, errno, strerror(errno))
+#else
+// MSVC expands __VA_ARGS__ in a different order. Give up, not worth it.
+#define PERFETTO_DPLOG PERFETTO_DLOG
+#endif
+
+#else  // PERFETTO_DLOG_IS_ON()
+
+#define PERFETTO_DLOG(...) ::perfetto::base::ignore_result(__VA_ARGS__)
+#define PERFETTO_DPLOG(...) ::perfetto::base::ignore_result(__VA_ARGS__)
+
+#endif  // PERFETTO_DLOG_IS_ON()
+
+#if PERFETTO_DCHECK_IS_ON()
+
+#define PERFETTO_DCHECK(x) PERFETTO_CHECK(x)
+#define PERFETTO_DFATAL(...) PERFETTO_FATAL(__VA_ARGS__)
+#define PERFETTO_DFATAL_OR_ELOG(...) PERFETTO_DFATAL(__VA_ARGS__)
+
+#else  // PERFETTO_DCHECK_IS_ON()
+
+#define PERFETTO_DCHECK(x) \
+  do {                     \
+  } while (false && (x))
+
+#define PERFETTO_DFATAL(...) ::perfetto::base::ignore_result(__VA_ARGS__)
+#define PERFETTO_DFATAL_OR_ELOG(...) PERFETTO_ELOG(__VA_ARGS__)
+
+#endif  // PERFETTO_DCHECK_IS_ON()
+
+}  // namespace base
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_BASE_LOGGING_H_
+// gen_amalgamated begin header: include/perfetto/tracing/interceptor.h
+// gen_amalgamated begin header: include/perfetto/protozero/field.h
+// gen_amalgamated begin header: include/perfetto/protozero/contiguous_memory_range.h
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_PROTOZERO_CONTIGUOUS_MEMORY_RANGE_H_
+#define INCLUDE_PERFETTO_PROTOZERO_CONTIGUOUS_MEMORY_RANGE_H_
+
+#include <assert.h>
+#include <stddef.h>
+#include <stdint.h>
+
+namespace protozero {
+
+// Keep this struct trivially constructible (no ctors, no default initializers).
+struct ContiguousMemoryRange {
+  uint8_t* begin;
+  uint8_t* end;  // STL style: one byte past the end of the buffer.
+
+  inline bool is_valid() const { return begin != nullptr; }
+  inline void reset() { begin = nullptr; }
+  inline size_t size() const { return static_cast<size_t>(end - begin); }
+};
+
+}  // namespace protozero
+
+#endif  // INCLUDE_PERFETTO_PROTOZERO_CONTIGUOUS_MEMORY_RANGE_H_
+// gen_amalgamated begin header: include/perfetto/protozero/proto_utils.h
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_PROTOZERO_PROTO_UTILS_H_
+#define INCLUDE_PERFETTO_PROTOZERO_PROTO_UTILS_H_
+
+#include <inttypes.h>
+#include <stddef.h>
+
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+
+namespace protozero {
+namespace proto_utils {
+
+// See https://developers.google.com/protocol-buffers/docs/encoding wire types.
+// This is a type encoded into the proto that provides just enough info to
+// find the length of the following value.
+enum class ProtoWireType : uint32_t {
+  kVarInt = 0,
+  kFixed64 = 1,
+  kLengthDelimited = 2,
+  kFixed32 = 5,
+};
+
+// This is the type defined in the proto for each field. This information
+// is used to decide the translation strategy when writing the trace.
+enum class ProtoSchemaType {
+  kUnknown = 0,
+  kDouble,
+  kFloat,
+  kInt64,
+  kUint64,
+  kInt32,
+  kFixed64,
+  kFixed32,
+  kBool,
+  kString,
+  kGroup,  // Deprecated (proto2 only)
+  kMessage,
+  kBytes,
+  kUint32,
+  kEnum,
+  kSfixed32,
+  kSfixed64,
+  kSint32,
+  kSint64,
+};
+
+inline const char* ProtoSchemaToString(ProtoSchemaType v) {
+  switch (v) {
+    case ProtoSchemaType::kUnknown:
+      return "unknown";
+    case ProtoSchemaType::kDouble:
+      return "double";
+    case ProtoSchemaType::kFloat:
+      return "float";
+    case ProtoSchemaType::kInt64:
+      return "int64";
+    case ProtoSchemaType::kUint64:
+      return "uint64";
+    case ProtoSchemaType::kInt32:
+      return "int32";
+    case ProtoSchemaType::kFixed64:
+      return "fixed64";
+    case ProtoSchemaType::kFixed32:
+      return "fixed32";
+    case ProtoSchemaType::kBool:
+      return "bool";
+    case ProtoSchemaType::kString:
+      return "string";
+    case ProtoSchemaType::kGroup:
+      return "group";
+    case ProtoSchemaType::kMessage:
+      return "message";
+    case ProtoSchemaType::kBytes:
+      return "bytes";
+    case ProtoSchemaType::kUint32:
+      return "uint32";
+    case ProtoSchemaType::kEnum:
+      return "enum";
+    case ProtoSchemaType::kSfixed32:
+      return "sfixed32";
+    case ProtoSchemaType::kSfixed64:
+      return "sfixed64";
+    case ProtoSchemaType::kSint32:
+      return "sint32";
+    case ProtoSchemaType::kSint64:
+      return "sint64";
+  }
+  // For gcc:
+  PERFETTO_DCHECK(false);
+  return "";
+}
+
+// Maximum message size supported: 256 MiB (4 x 7-bit due to varint encoding).
+constexpr size_t kMessageLengthFieldSize = 4;
+constexpr size_t kMaxMessageLength = (1u << (kMessageLengthFieldSize * 7)) - 1;
+
+// Field tag is encoded as 32-bit varint (5 bytes at most).
+// Largest value of simple (not length-delimited) field is 64-bit varint
+// (10 bytes at most). 15 bytes buffer is enough to store a simple field.
+constexpr size_t kMaxTagEncodedSize = 5;
+constexpr size_t kMaxSimpleFieldEncodedSize = kMaxTagEncodedSize + 10;
+
+// Proto types: (int|uint|sint)(32|64), bool, enum.
+constexpr uint32_t MakeTagVarInt(uint32_t field_id) {
+  return (field_id << 3) | static_cast<uint32_t>(ProtoWireType::kVarInt);
+}
+
+// Proto types: fixed64, sfixed64, fixed32, sfixed32, double, float.
+template <typename T>
+constexpr uint32_t MakeTagFixed(uint32_t field_id) {
+  static_assert(sizeof(T) == 8 || sizeof(T) == 4, "Value must be 4 or 8 bytes");
+  return (field_id << 3) |
+         static_cast<uint32_t>((sizeof(T) == 8 ? ProtoWireType::kFixed64
+                                               : ProtoWireType::kFixed32));
+}
+
+// Proto types: string, bytes, embedded messages.
+constexpr uint32_t MakeTagLengthDelimited(uint32_t field_id) {
+  return (field_id << 3) |
+         static_cast<uint32_t>(ProtoWireType::kLengthDelimited);
+}
+
+// Proto types: sint64, sint32.
+template <typename T>
+inline typename std::make_unsigned<T>::type ZigZagEncode(T value) {
+  using UnsignedType = typename std::make_unsigned<T>::type;
+
+  // Right-shift of negative values is implementation specific.
+  // Assert the implementation does what we expect, which is that shifting any
+  // positive value by sizeof(T) * 8 - 1 gives an all 0 bitmap, and a negative
+  // value gives and all 1 bitmap.
+  constexpr uint64_t kUnsignedZero = 0u;
+  constexpr int64_t kNegativeOne = -1;
+  constexpr int64_t kPositiveOne = 1;
+  static_assert(static_cast<uint64_t>(kNegativeOne >> 63) == ~kUnsignedZero,
+                "implementation does not support assumed rightshift");
+  static_assert(static_cast<uint64_t>(kPositiveOne >> 63) == kUnsignedZero,
+                "implementation does not support assumed rightshift");
+
+  return (static_cast<UnsignedType>(value) << 1) ^
+         static_cast<UnsignedType>(value >> (sizeof(T) * 8 - 1));
+}
+
+// Proto types: sint64, sint32.
+template <typename T>
+inline typename std::make_signed<T>::type ZigZagDecode(T value) {
+  using UnsignedType = typename std::make_unsigned<T>::type;
+  using SignedType = typename std::make_signed<T>::type;
+  auto u_value = static_cast<UnsignedType>(value);
+  auto mask = static_cast<UnsignedType>(-static_cast<SignedType>(u_value & 1));
+  return static_cast<SignedType>((u_value >> 1) ^ mask);
+}
+
+template <typename T>
+inline uint8_t* WriteVarInt(T value, uint8_t* target) {
+  // If value is <= 0 we must first sign extend to int64_t (see [1]).
+  // Finally we always cast to an unsigned value to to avoid arithmetic
+  // (sign expanding) shifts in the while loop.
+  // [1]: "If you use int32 or int64 as the type for a negative number, the
+  // resulting varint is always ten bytes long".
+  // - developers.google.com/protocol-buffers/docs/encoding
+  // So for each input type we do the following casts:
+  // uintX_t -> uintX_t -> uintX_t
+  // int8_t  -> int64_t -> uint64_t
+  // int16_t -> int64_t -> uint64_t
+  // int32_t -> int64_t -> uint64_t
+  // int64_t -> int64_t -> uint64_t
+  using MaybeExtendedType =
+      typename std::conditional<std::is_unsigned<T>::value, T, int64_t>::type;
+  using UnsignedType = typename std::make_unsigned<MaybeExtendedType>::type;
+
+  MaybeExtendedType extended_value = static_cast<MaybeExtendedType>(value);
+  UnsignedType unsigned_value = static_cast<UnsignedType>(extended_value);
+
+  while (unsigned_value >= 0x80) {
+    *target++ = static_cast<uint8_t>(unsigned_value) | 0x80;
+    unsigned_value >>= 7;
+  }
+  *target = static_cast<uint8_t>(unsigned_value);
+  return target + 1;
+}
+
+// Writes a fixed-size redundant encoding of the given |value|. This is
+// used to backfill fixed-size reservations for the length field using a
+// non-canonical varint encoding (e.g. \x81\x80\x80\x00 instead of \x01).
+// See https://github.com/google/protobuf/issues/1530.
+// This is used mainly in two cases:
+// 1) At trace writing time, when starting a nested messages. The size of a
+//    nested message is not known until all its field have been written.
+//    |kMessageLengthFieldSize| bytes are reserved to encode the size field and
+//    backfilled at the end.
+// 2) When rewriting a message at trace filtering time, in protozero/filtering.
+//    At that point we know only the upper bound of the length (a filtered
+//    message is <= the original one) and we backfill after the message has been
+//    filtered.
+inline void WriteRedundantVarInt(uint32_t value,
+                                 uint8_t* buf,
+                                 size_t size = kMessageLengthFieldSize) {
+  for (size_t i = 0; i < size; ++i) {
+    const uint8_t msb = (i < size - 1) ? 0x80 : 0;
+    buf[i] = static_cast<uint8_t>(value) | msb;
+    value >>= 7;
+  }
+}
+
+template <uint32_t field_id>
+void StaticAssertSingleBytePreamble() {
+  static_assert(field_id < 16,
+                "Proto field id too big to fit in a single byte preamble");
+}
+
+// Parses a VarInt from the encoded buffer [start, end). |end| is STL-style and
+// points one byte past the end of buffer.
+// The parsed int value is stored in the output arg |value|. Returns a pointer
+// to the next unconsumed byte (so start < retval <= end) or |start| if the
+// VarInt could not be fully parsed because there was not enough space in the
+// buffer.
+inline const uint8_t* ParseVarInt(const uint8_t* start,
+                                  const uint8_t* end,
+                                  uint64_t* out_value) {
+  const uint8_t* pos = start;
+  uint64_t value = 0;
+  for (uint32_t shift = 0; pos < end && shift < 64u; shift += 7) {
+    // Cache *pos into |cur_byte| to prevent that the compiler dereferences the
+    // pointer twice (here and in the if() below) due to char* aliasing rules.
+    uint8_t cur_byte = *pos++;
+    value |= static_cast<uint64_t>(cur_byte & 0x7f) << shift;
+    if ((cur_byte & 0x80) == 0) {
+      // In valid cases we get here.
+      *out_value = value;
+      return pos;
+    }
+  }
+  *out_value = 0;
+  return start;
+}
+
+enum class RepetitionType {
+  kNotRepeated,
+  kRepeatedPacked,
+  kRepeatedNotPacked,
+};
+
+// Provide a common base struct for all templated FieldMetadata types to allow
+// simple checks if a given type is a FieldMetadata or not.
+struct FieldMetadataBase {
+  constexpr FieldMetadataBase() = default;
+};
+
+template <uint32_t field_id,
+          RepetitionType repetition_type,
+          ProtoSchemaType proto_schema_type,
+          typename CppFieldType,
+          typename MessageType>
+struct FieldMetadata : public FieldMetadataBase {
+  constexpr FieldMetadata() = default;
+
+  static constexpr int kFieldId = field_id;
+  // Whether this field is repeated, packed (repeated [packed-true]) or not
+  // (optional).
+  static constexpr RepetitionType kRepetitionType = repetition_type;
+  // Proto type of this field (e.g. int64, fixed32 or nested message).
+  static constexpr ProtoSchemaType kProtoFieldType = proto_schema_type;
+  // C++ type of this field (for nested messages - C++ protozero class).
+  using cpp_field_type = CppFieldType;
+  // Protozero message which this field belongs to.
+  using message_type = MessageType;
+};
+
+namespace internal {
+
+// Ideally we would create variables of FieldMetadata<...> type directly,
+// but before C++17's support for constexpr inline variables arrive, we have to
+// actually use pointers to inline functions instead to avoid having to define
+// symbols in *.pbzero.cc files.
+//
+// Note: protozero bindings will generate Message::kFieldName variable and which
+// can then be passed to TRACE_EVENT macro for inline writing of typed messages.
+// The fact that the former can be passed to the latter is a part of the stable
+// API, while the particular type is not and users should not rely on it.
+template <typename T>
+using FieldMetadataHelper = T (*)(void);
+
+}  // namespace internal
+}  // namespace proto_utils
+}  // namespace protozero
+
+#endif  // INCLUDE_PERFETTO_PROTOZERO_PROTO_UTILS_H_
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_PROTOZERO_FIELD_H_
+#define INCLUDE_PERFETTO_PROTOZERO_FIELD_H_
+
+#include <stdint.h>
+
+#include <string>
+#include <vector>
+
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/contiguous_memory_range.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace protozero {
+
+struct ConstBytes {
+  std::string ToStdString() const {
+    return std::string(reinterpret_cast<const char*>(data), size);
+  }
+
+  const uint8_t* data;
+  size_t size;
+};
+
+struct ConstChars {
+  // Allow implicit conversion to perfetto's base::StringView without depending
+  // on perfetto/base or viceversa.
+  static constexpr bool kConvertibleToStringView = true;
+  std::string ToStdString() const { return std::string(data, size); }
+
+  const char* data;
+  size_t size;
+};
+
+// A protobuf field decoded by the protozero proto decoders. It exposes
+// convenience accessors with minimal debug checks.
+// This class is used both by the iterator-based ProtoDecoder and by the
+// one-shot TypedProtoDecoder.
+// If the field is not valid the accessors consistently return zero-integers or
+// null strings.
+class Field {
+ public:
+  bool valid() const { return id_ != 0; }
+  uint16_t id() const { return id_; }
+  explicit operator bool() const { return valid(); }
+
+  proto_utils::ProtoWireType type() const {
+    auto res = static_cast<proto_utils::ProtoWireType>(type_);
+    PERFETTO_DCHECK(res == proto_utils::ProtoWireType::kVarInt ||
+                    res == proto_utils::ProtoWireType::kLengthDelimited ||
+                    res == proto_utils::ProtoWireType::kFixed32 ||
+                    res == proto_utils::ProtoWireType::kFixed64);
+    return res;
+  }
+
+  bool as_bool() const {
+    PERFETTO_DCHECK(!valid() || type() == proto_utils::ProtoWireType::kVarInt);
+    return static_cast<bool>(int_value_);
+  }
+
+  uint32_t as_uint32() const {
+    PERFETTO_DCHECK(!valid() || type() == proto_utils::ProtoWireType::kVarInt ||
+                    type() == proto_utils::ProtoWireType::kFixed32);
+    return static_cast<uint32_t>(int_value_);
+  }
+
+  int32_t as_int32() const {
+    PERFETTO_DCHECK(!valid() || type() == proto_utils::ProtoWireType::kVarInt ||
+                    type() == proto_utils::ProtoWireType::kFixed32);
+    return static_cast<int32_t>(int_value_);
+  }
+
+  int32_t as_sint32() const {
+    PERFETTO_DCHECK(!valid() || type() == proto_utils::ProtoWireType::kVarInt);
+    return proto_utils::ZigZagDecode(static_cast<uint32_t>(int_value_));
+  }
+
+  uint64_t as_uint64() const {
+    PERFETTO_DCHECK(!valid() || type() == proto_utils::ProtoWireType::kVarInt ||
+                    type() == proto_utils::ProtoWireType::kFixed32 ||
+                    type() == proto_utils::ProtoWireType::kFixed64);
+    return int_value_;
+  }
+
+  int64_t as_int64() const {
+    PERFETTO_DCHECK(!valid() || type() == proto_utils::ProtoWireType::kVarInt ||
+                    type() == proto_utils::ProtoWireType::kFixed32 ||
+                    type() == proto_utils::ProtoWireType::kFixed64);
+    return static_cast<int64_t>(int_value_);
+  }
+
+  int64_t as_sint64() const {
+    PERFETTO_DCHECK(!valid() || type() == proto_utils::ProtoWireType::kVarInt);
+    return proto_utils::ZigZagDecode(static_cast<uint64_t>(int_value_));
+  }
+
+  float as_float() const {
+    PERFETTO_DCHECK(!valid() || type() == proto_utils::ProtoWireType::kFixed32);
+    float res;
+    uint32_t value32 = static_cast<uint32_t>(int_value_);
+    memcpy(&res, &value32, sizeof(res));
+    return res;
+  }
+
+  double as_double() const {
+    PERFETTO_DCHECK(!valid() || type() == proto_utils::ProtoWireType::kFixed64);
+    double res;
+    memcpy(&res, &int_value_, sizeof(res));
+    return res;
+  }
+
+  ConstChars as_string() const {
+    PERFETTO_DCHECK(!valid() ||
+                    type() == proto_utils::ProtoWireType::kLengthDelimited);
+    return ConstChars{reinterpret_cast<const char*>(data()), size_};
+  }
+
+  std::string as_std_string() const { return as_string().ToStdString(); }
+
+  ConstBytes as_bytes() const {
+    PERFETTO_DCHECK(!valid() ||
+                    type() == proto_utils::ProtoWireType::kLengthDelimited);
+    return ConstBytes{data(), size_};
+  }
+
+  const uint8_t* data() const {
+    PERFETTO_DCHECK(!valid() ||
+                    type() == proto_utils::ProtoWireType::kLengthDelimited);
+    return reinterpret_cast<const uint8_t*>(int_value_);
+  }
+
+  size_t size() const {
+    PERFETTO_DCHECK(!valid() ||
+                    type() == proto_utils::ProtoWireType::kLengthDelimited);
+    return size_;
+  }
+
+  uint64_t raw_int_value() const { return int_value_; }
+
+  void initialize(uint16_t id,
+                  uint8_t type,
+                  uint64_t int_value,
+                  uint32_t size) {
+    id_ = id;
+    type_ = type;
+    int_value_ = int_value;
+    size_ = size;
+  }
+
+  // For use with templates. This is used by RepeatedFieldIterator::operator*().
+  void get(bool* val) const { *val = as_bool(); }
+  void get(uint32_t* val) const { *val = as_uint32(); }
+  void get(int32_t* val) const { *val = as_int32(); }
+  void get(uint64_t* val) const { *val = as_uint64(); }
+  void get(int64_t* val) const { *val = as_int64(); }
+  void get(float* val) const { *val = as_float(); }
+  void get(double* val) const { *val = as_double(); }
+  void get(std::string* val) const { *val = as_std_string(); }
+  void get(ConstChars* val) const { *val = as_string(); }
+  void get(ConstBytes* val) const { *val = as_bytes(); }
+  void get_signed(int32_t* val) const { *val = as_sint32(); }
+  void get_signed(int64_t* val) const { *val = as_sint64(); }
+
+  // For enum types.
+  template <typename T,
+            typename = typename std::enable_if<std::is_enum<T>::value, T>::type>
+  void get(T* val) const {
+    *val = static_cast<T>(as_int32());
+  }
+
+  // Serializes the field back into a proto-encoded byte stream and appends it
+  // to |dst|. |dst| is resized accordingly.
+  void SerializeAndAppendTo(std::string* dst) const;
+
+  // Serializes the field back into a proto-encoded byte stream and appends it
+  // to |dst|. |dst| is resized accordingly.
+  void SerializeAndAppendTo(std::vector<uint8_t>* dst) const;
+
+ private:
+  template <typename Container>
+  void SerializeAndAppendToInternal(Container* dst) const;
+
+  // Fields are deliberately not initialized to keep the class trivially
+  // constructible. It makes a large perf difference for ProtoDecoder.
+
+  uint64_t int_value_;  // In kLengthDelimited this contains the data() addr.
+  uint32_t size_;       // Only valid when when type == kLengthDelimited.
+  uint16_t id_;         // Proto field ordinal.
+  uint8_t type_;        // proto_utils::ProtoWireType.
+};
+
+// The Field struct is used in a lot of perf-sensitive contexts.
+static_assert(sizeof(Field) == 16, "Field struct too big");
+
+}  // namespace protozero
+
+#endif  // INCLUDE_PERFETTO_PROTOZERO_FIELD_H_
+// gen_amalgamated begin header: include/perfetto/tracing/core/forward_decls.h
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_TRACING_CORE_FORWARD_DECLS_H_
+#define INCLUDE_PERFETTO_TRACING_CORE_FORWARD_DECLS_H_
+
+// Forward declares classes that are generated at build-time from protos.
+// First of all, why are we forward declaring at all?
+//  1. Chromium diverges from the Google style guide on this, because forward
+//     declarations typically make build times faster, and that's a desirable
+//     property for a large and complex codebase.
+//  2. Adding #include to build-time-generated headers from headers typically
+//     creates subtle build errors that are hard to spot in GN. This is because
+//     once a standard header (say foo.h) has an #include "protos/foo.gen.h",
+//     the build target that depends on foo.h needs to depend on the genrule
+//     that generates foo.gen.h. This is achievable using public_deps in GN but
+//     is not testable / enforceable, hence too easy to get wrong.
+
+// Historically the classes below used to be generated from the corresponding
+// .proto(s) at CL *check-in* time (!= build time) in the ::perfetto namespace.
+// Nowadays we have code everywhere that assume the right class is
+// ::perfetto::TraceConfig or the like. Back then other headers could just
+// forward declared ::perfetto::TraceConfig. These days, the real class is
+// ::perfetto::protos::gen::TraceConfig and core/trace_config.h aliases that as
+// using ::perfetto::TraceConfig = ::perfetto::protos::gen::TraceConfig.
+// In C++ one cannot forward declare a type alias (but only the aliased type).
+// Hence this header, which should be used every time one wants to forward
+// declare classes like TraceConfig.
+
+// The overall plan is that, when one of the classes below is needed:
+// The .h file includes this file.
+// The .cc file includes perfetto/tracing/core/trace_config.h (or equiv). That
+// header will pull the full declaration from trace_config.gen.h and will also
+// setup the alias in the ::perfetto namespace.
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class ChromeConfig;
+class CommitDataRequest;
+class DataSourceConfig;
+class DataSourceDescriptor;
+class ObservableEvents;
+class TraceConfig;
+class TraceStats;
+class TracingServiceCapabilities;
+class TracingServiceState;
+
+}  // namespace gen
+}  // namespace protos
+
+using ChromeConfig = ::perfetto::protos::gen::ChromeConfig;
+using CommitDataRequest = ::perfetto::protos::gen::CommitDataRequest;
+using DataSourceConfig = ::perfetto::protos::gen::DataSourceConfig;
+using DataSourceDescriptor = ::perfetto::protos::gen::DataSourceDescriptor;
+using ObservableEvents = ::perfetto::protos::gen::ObservableEvents;
+using TraceConfig = ::perfetto::protos::gen::TraceConfig;
+using TraceStats = ::perfetto::protos::gen::TraceStats;
+using TracingServiceCapabilities =
+    ::perfetto::protos::gen::TracingServiceCapabilities;
+using TracingServiceState = ::perfetto::protos::gen::TracingServiceState;
+
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_TRACING_CORE_FORWARD_DECLS_H_
+// gen_amalgamated begin header: include/perfetto/tracing/internal/basic_types.h
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_TRACING_INTERNAL_BASIC_TYPES_H_
+#define INCLUDE_PERFETTO_TRACING_INTERNAL_BASIC_TYPES_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+namespace perfetto {
+namespace internal {
+
+// A static_assert in tracing_muxer_impl.cc guarantees that this stays in sync
+// with the definition in tracing/core/basic_types.h
+using BufferId = uint16_t;
+
+// This is a direct index in the TracingMuxer::backends_ vector.
+// Backends are only added and never removed.
+using TracingBackendId = size_t;
+
+// Max numbers of data sources that can be registered in a process.
+constexpr size_t kMaxDataSources = 32;
+
+// Max instances for each data source type. This typically matches the
+// "max number of concurrent tracing sessions". However remember that a data
+// source can be instantiated more than once within one tracing session by
+// creating two entries for it in the trace config.
+constexpr size_t kMaxDataSourceInstances = 8;
+
+}  // namespace internal
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_TRACING_INTERNAL_BASIC_TYPES_H_
+// gen_amalgamated begin header: include/perfetto/tracing/internal/data_source_internal.h
+// gen_amalgamated begin header: include/perfetto/tracing/trace_writer_base.h
+// gen_amalgamated begin header: include/perfetto/protozero/message_handle.h
+// gen_amalgamated begin header: include/perfetto/protozero/message.h
+// gen_amalgamated begin header: include/perfetto/protozero/scattered_stream_writer.h
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_PROTOZERO_SCATTERED_STREAM_WRITER_H_
+#define INCLUDE_PERFETTO_PROTOZERO_SCATTERED_STREAM_WRITER_H_
+
+#include <assert.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+
+// gen_amalgamated expanded: #include "perfetto/base/compiler.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/contiguous_memory_range.h"
+
+namespace protozero {
+
+// This class deals with the following problem: append-only proto messages want
+// to write a stream of bytes, without caring about the implementation of the
+// underlying buffer (which concretely will be either the trace ring buffer
+// or a heap-allocated buffer). The main deal is: proto messages don't know in
+// advance what their size will be.
+// Due to the tracing buffer being split into fixed-size chunks, on some
+// occasions, these writes need to be spread over two (or more) non-contiguous
+// chunks of memory. Similarly, when the buffer is backed by the heap, we want
+// to avoid realloc() calls, as they might cause a full copy of the contents
+// of the buffer.
+// The purpose of this class is to abstract away the non-contiguous write logic.
+// This class knows how to deal with writes as long as they fall in the same
+// ContiguousMemoryRange and defers the chunk-chaining logic to the Delegate.
+class PERFETTO_EXPORT ScatteredStreamWriter {
+ public:
+  class PERFETTO_EXPORT Delegate {
+   public:
+    virtual ~Delegate();
+    virtual ContiguousMemoryRange GetNewBuffer() = 0;
+  };
+
+  explicit ScatteredStreamWriter(Delegate* delegate);
+  ~ScatteredStreamWriter();
+
+  inline void WriteByte(uint8_t value) {
+    if (write_ptr_ >= cur_range_.end)
+      Extend();
+    *write_ptr_++ = value;
+  }
+
+  // Assumes that the caller checked that there is enough headroom.
+  // TODO(primiano): perf optimization, this is a tracing hot path. The
+  // compiler can make strong optimization on memcpy if the size arg is a
+  // constexpr. Make a templated variant of this for fixed-size writes.
+  // TODO(primiano): restrict / noalias might also help.
+  inline void WriteBytesUnsafe(const uint8_t* src, size_t size) {
+    uint8_t* const end = write_ptr_ + size;
+    assert(end <= cur_range_.end);
+    memcpy(write_ptr_, src, size);
+    write_ptr_ = end;
+  }
+
+  inline void WriteBytes(const uint8_t* src, size_t size) {
+    uint8_t* const end = write_ptr_ + size;
+    if (PERFETTO_LIKELY(end <= cur_range_.end))
+      return WriteBytesUnsafe(src, size);
+    WriteBytesSlowPath(src, size);
+  }
+
+  void WriteBytesSlowPath(const uint8_t* src, size_t size);
+
+  // Reserves a fixed amount of bytes to be backfilled later. The reserved range
+  // is guaranteed to be contiguous and not span across chunks. |size| has to be
+  // <= than the size of a new buffer returned by the Delegate::GetNewBuffer().
+  uint8_t* ReserveBytes(size_t size);
+
+  // Fast (but unsafe) version of the above. The caller must have previously
+  // checked that there are at least |size| contiguous bytes available.
+  // Returns only the start pointer of the reservation.
+  uint8_t* ReserveBytesUnsafe(size_t size) {
+    uint8_t* begin = write_ptr_;
+    write_ptr_ += size;
+    assert(write_ptr_ <= cur_range_.end);
+    return begin;
+  }
+
+  // Resets the buffer boundaries and the write pointer to the given |range|.
+  // Subsequent WriteByte(s) will write into |range|.
+  void Reset(ContiguousMemoryRange range);
+
+  // Number of contiguous free bytes in |cur_range_| that can be written without
+  // requesting a new buffer.
+  size_t bytes_available() const {
+    return static_cast<size_t>(cur_range_.end - write_ptr_);
+  }
+
+  uint8_t* write_ptr() const { return write_ptr_; }
+
+  uint64_t written() const {
+    return written_previously_ +
+           static_cast<uint64_t>(write_ptr_ - cur_range_.begin);
+  }
+
+ private:
+  ScatteredStreamWriter(const ScatteredStreamWriter&) = delete;
+  ScatteredStreamWriter& operator=(const ScatteredStreamWriter&) = delete;
+
+  void Extend();
+
+  Delegate* const delegate_;
+  ContiguousMemoryRange cur_range_;
+  uint8_t* write_ptr_;
+  uint64_t written_previously_ = 0;
+};
+
+}  // namespace protozero
+
+#endif  // INCLUDE_PERFETTO_PROTOZERO_SCATTERED_STREAM_WRITER_H_
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_PROTOZERO_MESSAGE_H_
+#define INCLUDE_PERFETTO_PROTOZERO_MESSAGE_H_
+
+#include <assert.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/contiguous_memory_range.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_stream_writer.h"
+
+namespace perfetto {
+namespace shm_fuzz {
+class FakeProducer;
+}  // namespace shm_fuzz
+}  // namespace perfetto
+
+namespace protozero {
+
+class MessageArena;
+class MessageHandleBase;
+
+// Base class extended by the proto C++ stubs generated by the ProtoZero
+// compiler. This class provides the minimal runtime required to support
+// append-only operations and is designed for performance. None of the methods
+// require any dynamic memory allocation, unless more than 16 nested messages
+// are created via BeginNestedMessage() calls.
+class PERFETTO_EXPORT Message {
+ public:
+  friend class MessageHandleBase;
+
+  // The ctor is deliberately a no-op to avoid forwarding args from all
+  // subclasses. The real initialization is performed by Reset().
+  // Nested messages are allocated via placement new by MessageArena and
+  // implictly destroyed when the RootMessage's arena goes away. This is
+  // fine as long as all the fields are PODs, which is checked by the
+  // static_assert()s in the Reset() method.
+  Message() = default;
+
+  // Clears up the state, allowing the message to be reused as a fresh one.
+  void Reset(ScatteredStreamWriter*, MessageArena*);
+
+  // Commits all the changes to the buffer (backfills the size field of this and
+  // all nested messages) and seals the message. Returns the size of the message
+  // (and all nested sub-messages), without taking into account any chunking.
+  // Finalize is idempotent and can be called several times w/o side effects.
+  uint32_t Finalize();
+
+  // Optional. If is_valid() == true, the corresponding memory region (its
+  // length == proto_utils::kMessageLengthFieldSize) is backfilled with the size
+  // of this message (minus |size_already_written| below). This is the mechanism
+  // used by messages to backfill their corresponding size field in the parent
+  // message.
+  uint8_t* size_field() const { return size_field_; }
+  void set_size_field(uint8_t* size_field) { size_field_ = size_field; }
+
+  // This is to deal with case of backfilling the size of a root (non-nested)
+  // message which is split into multiple chunks. Upon finalization only the
+  // partial size that lies in the last chunk has to be backfilled.
+  void inc_size_already_written(uint32_t sz) { size_already_written_ += sz; }
+
+  Message* nested_message() { return nested_message_; }
+
+  bool is_finalized() const { return finalized_; }
+
+#if PERFETTO_DCHECK_IS_ON()
+  void set_handle(MessageHandleBase* handle) { handle_ = handle; }
+#endif
+
+  // Proto types: uint64, uint32, int64, int32, bool, enum.
+  template <typename T>
+  void AppendVarInt(uint32_t field_id, T value) {
+    if (nested_message_)
+      EndNestedMessage();
+
+    uint8_t buffer[proto_utils::kMaxSimpleFieldEncodedSize];
+    uint8_t* pos = buffer;
+
+    pos = proto_utils::WriteVarInt(proto_utils::MakeTagVarInt(field_id), pos);
+    // WriteVarInt encodes signed values in two's complement form.
+    pos = proto_utils::WriteVarInt(value, pos);
+    WriteToStream(buffer, pos);
+  }
+
+  // Proto types: sint64, sint32.
+  template <typename T>
+  void AppendSignedVarInt(uint32_t field_id, T value) {
+    AppendVarInt(field_id, proto_utils::ZigZagEncode(value));
+  }
+
+  // Proto types: bool, enum (small).
+  // Faster version of AppendVarInt for tiny numbers.
+  void AppendTinyVarInt(uint32_t field_id, int32_t value) {
+    PERFETTO_DCHECK(0 <= value && value < 0x80);
+    if (nested_message_)
+      EndNestedMessage();
+
+    uint8_t buffer[proto_utils::kMaxSimpleFieldEncodedSize];
+    uint8_t* pos = buffer;
+    // MakeTagVarInt gets super optimized here for constexpr.
+    pos = proto_utils::WriteVarInt(proto_utils::MakeTagVarInt(field_id), pos);
+    *pos++ = static_cast<uint8_t>(value);
+    WriteToStream(buffer, pos);
+  }
+
+  // Proto types: fixed64, sfixed64, fixed32, sfixed32, double, float.
+  template <typename T>
+  void AppendFixed(uint32_t field_id, T value) {
+    if (nested_message_)
+      EndNestedMessage();
+
+    uint8_t buffer[proto_utils::kMaxSimpleFieldEncodedSize];
+    uint8_t* pos = buffer;
+
+    pos = proto_utils::WriteVarInt(proto_utils::MakeTagFixed<T>(field_id), pos);
+    memcpy(pos, &value, sizeof(T));
+    pos += sizeof(T);
+    // TODO: Optimize memcpy performance, see http://crbug.com/624311 .
+    WriteToStream(buffer, pos);
+  }
+
+  void AppendString(uint32_t field_id, const char* str);
+
+  void AppendString(uint32_t field_id, const std::string& str) {
+    AppendBytes(field_id, str.data(), str.size());
+  }
+
+  void AppendBytes(uint32_t field_id, const void* value, size_t size);
+
+  // Append raw bytes for a field, using the supplied |ranges| to
+  // copy from |num_ranges| individual buffers.
+  size_t AppendScatteredBytes(uint32_t field_id,
+                              ContiguousMemoryRange* ranges,
+                              size_t num_ranges);
+
+  // Begins a nested message. The returned object is owned by the MessageArena
+  // of the root message. The nested message ends either when Finalize() is
+  // called or when any other Append* method is called in the parent class.
+  // The template argument T is supposed to be a stub class auto generated from
+  // a .proto, hence a subclass of Message.
+  template <class T>
+  T* BeginNestedMessage(uint32_t field_id) {
+    // This is to prevent subclasses (which should be autogenerated, though), to
+    // introduce extra state fields (which wouldn't be initialized by Reset()).
+    static_assert(std::is_base_of<Message, T>::value,
+                  "T must be a subclass of Message");
+    static_assert(sizeof(T) == sizeof(Message),
+                  "Message subclasses cannot introduce extra state.");
+    return static_cast<T*>(BeginNestedMessageInternal(field_id));
+  }
+
+  // Gives read-only access to the underlying stream_writer. This is used only
+  // by few internals to query the state of the underlying buffer. It is almost
+  // always a bad idea to poke at the stream_writer() internals.
+  const ScatteredStreamWriter* stream_writer() const { return stream_writer_; }
+
+  // Appends some raw bytes to the message. The use-case for this is preserving
+  // unknown fields in the decode -> re-encode path of xxx.gen.cc classes
+  // generated by the cppgen_plugin.cc.
+  // The caller needs to guarantee that the appended data is properly
+  // proto-encoded and each field has a proto preamble.
+  void AppendRawProtoBytes(const void* data, size_t size) {
+    const uint8_t* src = reinterpret_cast<const uint8_t*>(data);
+    WriteToStream(src, src + size);
+  }
+
+ private:
+  Message(const Message&) = delete;
+  Message& operator=(const Message&) = delete;
+
+  Message* BeginNestedMessageInternal(uint32_t field_id);
+
+  // Called by Finalize and Append* methods.
+  void EndNestedMessage();
+
+  void WriteToStream(const uint8_t* src_begin, const uint8_t* src_end) {
+    PERFETTO_DCHECK(!finalized_);
+    PERFETTO_DCHECK(src_begin <= src_end);
+    const uint32_t size = static_cast<uint32_t>(src_end - src_begin);
+    stream_writer_->WriteBytes(src_begin, size);
+    size_ += size;
+  }
+
+  // Only POD fields are allowed. This class's dtor is never called.
+  // See the comment on the static_assert in the corresponding .cc file.
+
+  // The stream writer interface used for the serialization.
+  ScatteredStreamWriter* stream_writer_;
+
+  // The storage used to allocate nested Message objects.
+  // This is owned by RootMessage<T>.
+  MessageArena* arena_;
+
+  // Pointer to the last child message created through BeginNestedMessage(), if
+  // any, nullptr otherwise. There is no need to keep track of more than one
+  // message per nesting level as the proto-zero API contract mandates that
+  // nested fields can be filled only in a stacked fashion. In other words,
+  // nested messages are finalized and sealed when any other field is set in the
+  // parent message (or the parent message itself is finalized) and cannot be
+  // accessed anymore afterwards.
+  Message* nested_message_;
+
+  // [optional] Pointer to a non-aligned pre-reserved var-int slot of
+  // kMessageLengthFieldSize bytes. When set, the Finalize() method will write
+  // the size of proto-encoded message in the pointed memory region.
+  uint8_t* size_field_;
+
+  // Keeps track of the size of the current message.
+  uint32_t size_;
+
+  // See comment for inc_size_already_written().
+  uint32_t size_already_written_;
+
+  // When true, no more changes to the message are allowed. This is to DCHECK
+  // attempts of writing to a message which has been Finalize()-d.
+  bool finalized_;
+
+#if PERFETTO_DCHECK_IS_ON()
+  // Current generation of message. Incremented on Reset.
+  // Used to detect stale handles.
+  uint32_t generation_;
+
+  MessageHandleBase* handle_;
+#endif
+};
+
+}  // namespace protozero
+
+#endif  // INCLUDE_PERFETTO_PROTOZERO_MESSAGE_H_
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_PROTOZERO_MESSAGE_HANDLE_H_
+#define INCLUDE_PERFETTO_PROTOZERO_MESSAGE_HANDLE_H_
+
+#include <functional>
+
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+
+namespace protozero {
+
+class Message;
+
+// MessageHandle allows to decouple the lifetime of a proto message
+// from the underlying storage. It gives the following guarantees:
+// - The underlying message is finalized (if still alive) if the handle goes
+//   out of scope.
+// - In Debug / DCHECK_ALWAYS_ON builds, the handle becomes null once the
+//   message is finalized. This is to enforce the append-only API. For instance
+//   when adding two repeated messages, the addition of the 2nd one forces
+//   the finalization of the first.
+// Think about this as a WeakPtr<Message> which calls
+// Message::Finalize() when going out of scope.
+
+class PERFETTO_EXPORT MessageHandleBase {
+ public:
+  ~MessageHandleBase();
+
+  // Move-only type.
+  MessageHandleBase(MessageHandleBase&&) noexcept;
+  MessageHandleBase& operator=(MessageHandleBase&&);
+  explicit operator bool() const {
+#if PERFETTO_DCHECK_IS_ON()
+    PERFETTO_DCHECK(!message_ || generation_ == message_->generation_);
+#endif
+    return !!message_;
+  }
+
+ protected:
+  explicit MessageHandleBase(Message* = nullptr);
+  Message* operator->() const {
+#if PERFETTO_DCHECK_IS_ON()
+    PERFETTO_DCHECK(!message_ || generation_ == message_->generation_);
+#endif
+    return message_;
+  }
+  Message& operator*() const { return *(operator->()); }
+
+ private:
+  friend class Message;
+  MessageHandleBase(const MessageHandleBase&) = delete;
+  MessageHandleBase& operator=(const MessageHandleBase&) = delete;
+
+  void reset_message() {
+    // This is called by Message::Finalize().
+    PERFETTO_DCHECK(message_->is_finalized());
+    message_ = nullptr;
+  }
+
+  void Move(MessageHandleBase&&);
+
+  void FinalizeMessage() { message_->Finalize(); }
+
+  Message* message_;
+#if PERFETTO_DCHECK_IS_ON()
+  uint32_t generation_;
+#endif
+};
+
+template <typename T>
+class MessageHandle : public MessageHandleBase {
+ public:
+  MessageHandle() : MessageHandle(nullptr) {}
+  explicit MessageHandle(T* message) : MessageHandleBase(message) {}
+
+  explicit operator bool() const { return MessageHandleBase::operator bool(); }
+
+  T& operator*() const {
+    return static_cast<T&>(MessageHandleBase::operator*());
+  }
+
+  T* operator->() const {
+    return static_cast<T*>(MessageHandleBase::operator->());
+  }
+
+  T* get() const { return static_cast<T*>(MessageHandleBase::operator->()); }
+};
+
+}  // namespace protozero
+
+#endif  // INCLUDE_PERFETTO_PROTOZERO_MESSAGE_HANDLE_H_
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_TRACING_TRACE_WRITER_BASE_H_
+#define INCLUDE_PERFETTO_TRACING_TRACE_WRITER_BASE_H_
+
+// gen_amalgamated expanded: #include "perfetto/protozero/message_handle.h"
+
+namespace perfetto {
+
+namespace protos {
+namespace pbzero {
+class TracePacket;
+}  // namespace pbzero
+}  // namespace protos
+
+// The bare-minimum subset of the TraceWriter interface that is exposed as a
+// fully public API.
+// See comments in /include/perfetto/ext/tracing/core/trace_writer.h.
+class TraceWriterBase {
+ public:
+  virtual ~TraceWriterBase();
+
+  virtual protozero::MessageHandle<protos::pbzero::TracePacket>
+  NewTracePacket() = 0;
+
+  virtual void Flush(std::function<void()> callback = {}) = 0;
+  virtual uint64_t written() const = 0;
+};
+
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_TRACING_TRACE_WRITER_BASE_H_
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_TRACING_INTERNAL_DATA_SOURCE_INTERNAL_H_
+#define INCLUDE_PERFETTO_TRACING_INTERNAL_DATA_SOURCE_INTERNAL_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <array>
+#include <atomic>
+#include <functional>
+#include <memory>
+#include <mutex>
+
+// No perfetto headers (other than tracing/api and protozero) should be here.
+// gen_amalgamated expanded: #include "perfetto/tracing/buffer_exhausted_policy.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/internal/basic_types.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/trace_writer_base.h"
+
+namespace perfetto {
+
+class DataSourceBase;
+class InterceptorBase;
+class TraceWriterBase;
+
+namespace internal {
+
+class TracingTLS;
+
+// This maintains the internal state of a data source instance that is used only
+// to implement the tracing mechanics and is not exposed to the API client.
+// There is one of these object per DataSource instance (up to
+// kMaxDataSourceInstances).
+struct DataSourceState {
+  // This boolean flag determines whether the DataSource::Trace() method should
+  // do something or be a no-op. This flag doesn't give the full guarantee
+  // that tracing data will be visible in the trace, it just makes it so that
+  // the client attemps writing trace data and interacting with the service.
+  // For instance, when a tracing session ends the service will reject data
+  // commits that arrive too late even if the producer hasn't received the stop
+  // IPC message.
+  // This flag is set right before calling OnStart() and cleared right before
+  // calling OnStop(), unless using HandleStopAsynchronously() (see comments
+  // in data_source.h).
+  // Keep this flag as the first field. This allows the compiler to directly
+  // dereference the DataSourceState* pointer in the trace fast-path without
+  // doing extra pointr arithmetic.
+  bool trace_lambda_enabled = false;
+
+  // The central buffer id that all TraceWriter(s) created by this data source
+  // must target.
+  BufferId buffer_id = 0;
+
+  // The index within TracingMuxerImpl.backends_. Practically it allows to
+  // lookup the Producer object, and hence the IPC channel, for this data
+  // source.
+  TracingBackendId backend_id = 0;
+
+  // Each backend may connect to the tracing service multiple times if a
+  // disconnection occurs. This counter is used to uniquely identify each
+  // connection so that trace writers don't get reused across connections.
+  uint32_t backend_connection_id = 0;
+
+  // The instance id as assigned by the tracing service. Note that because a
+  // process can be connected to >1 services, this ID is not globally unique but
+  // is only unique within the scope of its backend.
+  // Only the tuple (backend_id, data_source_instance_id) is globally unique.
+  uint64_t data_source_instance_id = 0;
+
+  // A hash of the trace config used by this instance. This is used to
+  // de-duplicate instances for data sources with identical names (e.g., track
+  // event).
+  uint64_t config_hash = 0;
+
+  // If this data source is being intercepted (see Interceptor), this field
+  // contains the non-zero id of a registered interceptor which should receive
+  // trace packets for this session. Note: interceptor id 1 refers to the first
+  // element of TracingMuxerImpl::interceptors_ with successive numbers using
+  // the following slots.
+  uint32_t interceptor_id = 0;
+
+  // This lock is not held to implement Trace() and it's used only if the trace
+  // code wants to access its own data source state.
+  // This is to prevent that accessing the data source on an arbitrary embedder
+  // thread races with the internal IPC thread destroying the data source
+  // because of a end-of-tracing notification from the service.
+  // This lock is also used to protect access to a possible interceptor for this
+  // data source session.
+  std::recursive_mutex lock;
+  std::unique_ptr<DataSourceBase> data_source;
+  std::unique_ptr<InterceptorBase> interceptor;
+};
+
+// This is to allow lazy-initialization and avoid static initializers and
+// at-exit destructors. All the entries are initialized via placement-new when
+// DataSource::Register() is called, see TracingMuxerImpl::RegisterDataSource().
+struct DataSourceStateStorage {
+  alignas(DataSourceState) char storage[sizeof(DataSourceState)]{};
+};
+
+// Per-DataSource-type global state.
+struct DataSourceStaticState {
+  // Unique index of the data source, assigned at registration time.
+  uint32_t index = kMaxDataSources;
+
+  // A bitmap that tells about the validity of each |instances| entry. When the
+  // i-th bit of the bitmap it's set, instances[i] is valid.
+  std::atomic<uint32_t> valid_instances{};
+  std::array<DataSourceStateStorage, kMaxDataSourceInstances> instances{};
+
+  // Incremented whenever incremental state should be reset for any instance of
+  // this data source.
+  std::atomic<uint32_t> incremental_state_generation{};
+
+  // Can be used with a cached |valid_instances| bitmap.
+  DataSourceState* TryGetCached(uint32_t cached_bitmap, size_t n) {
+    return cached_bitmap & (1 << n)
+               ? reinterpret_cast<DataSourceState*>(&instances[n])
+               : nullptr;
+  }
+
+  DataSourceState* TryGet(size_t n) {
+    return TryGetCached(valid_instances.load(std::memory_order_acquire), n);
+  }
+
+  void CompilerAsserts() {
+    static_assert(sizeof(valid_instances.load()) * 8 >= kMaxDataSourceInstances,
+                  "kMaxDataSourceInstances too high");
+  }
+};
+
+// Per-DataSource-instance thread-local state.
+struct DataSourceInstanceThreadLocalState {
+  using IncrementalStatePointer = std::unique_ptr<void, void (*)(void*)>;
+
+  void Reset() {
+    trace_writer.reset();
+    incremental_state.reset();
+    backend_id = 0;
+    backend_connection_id = 0;
+    buffer_id = 0;
+    data_source_instance_id = 0;
+    incremental_state_generation = 0;
+    is_intercepted = false;
+  }
+
+  std::unique_ptr<TraceWriterBase> trace_writer;
+  IncrementalStatePointer incremental_state = {nullptr, [](void*) {}};
+  uint32_t incremental_state_generation;
+  TracingBackendId backend_id;
+  uint32_t backend_connection_id;
+  BufferId buffer_id;
+  uint64_t data_source_instance_id;
+  bool is_intercepted;
+};
+
+// Per-DataSource-type thread-local state.
+struct DataSourceThreadLocalState {
+  DataSourceStaticState* static_state = nullptr;
+
+  // Pointer to the parent tls object that holds us. Used to retrieve the
+  // generation, which is per-global-TLS and not per data-source.
+  TracingTLS* root_tls = nullptr;
+
+  // One entry per each data source instance.
+  std::array<DataSourceInstanceThreadLocalState, kMaxDataSourceInstances>
+      per_instance{};
+};
+
+}  // namespace internal
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_TRACING_INTERNAL_DATA_SOURCE_INTERNAL_H_
+// gen_amalgamated begin header: include/perfetto/tracing/locked_handle.h
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_TRACING_LOCKED_HANDLE_H_
+#define INCLUDE_PERFETTO_TRACING_LOCKED_HANDLE_H_
+
+#include <mutex>
+
+namespace perfetto {
+
+// This is used for GetDataSourceLocked(), in the (rare) case where the
+// tracing code wants to access the state of its data source from the Trace()
+// method.
+template <typename T>
+class LockedHandle {
+ public:
+  LockedHandle(std::recursive_mutex* mtx, T* obj) : lock_(*mtx), obj_(obj) {}
+  LockedHandle() = default;  // For the invalid case.
+  LockedHandle(LockedHandle&&) = default;
+  LockedHandle& operator=(LockedHandle&&) = default;
+
+  bool valid() const { return obj_; }
+  explicit operator bool() const { return valid(); }
+
+  T* operator->() {
+    assert(valid());
+    return obj_;
+  }
+
+  T& operator*() { return *(this->operator->()); }
+
+ private:
+  std::unique_lock<std::recursive_mutex> lock_;
+  T* obj_ = nullptr;
+};
+
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_TRACING_LOCKED_HANDLE_H_
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_TRACING_INTERCEPTOR_H_
+#define INCLUDE_PERFETTO_TRACING_INTERCEPTOR_H_
+
+// An interceptor is used to redirect trace packets written by a data source
+// into a custom backend instead of the normal Perfetto tracing service. For
+// example, the console interceptor prints all trace packets to the console as
+// they are generated. Another potential use is exporting trace data to another
+// tracing service such as Android ATrace or Windows ETW.
+//
+// An interceptor is defined by subclassing the perfetto::Interceptor template:
+//
+// class MyInterceptor : public perfetto::Interceptor<MyInterceptor> {
+//  public:
+//   ~MyInterceptor() override = default;
+//
+//   // This function is called for each intercepted trace packet. |context|
+//   // contains information about the trace packet as well as other state
+//   // tracked by the interceptor (e.g., see ThreadLocalState).
+//   //
+//   // Intercepted trace data is provided in the form of serialized protobuf
+//   // bytes, accessed through the |context.packet_data| field.
+//   //
+//   // Warning: this function can be called on any thread at any time. See
+//   // below for how to safely access shared interceptor data from here.
+//   static void OnTracePacket(InterceptorContext context) {
+//     perfetto::protos::pbzero::TracePacket::Decoder packet(
+//         context.packet_data.data, context.packet_data.size);
+//     // ... Write |packet| to the desired destination ...
+//   }
+// };
+//
+// An interceptor should be registered before any tracing sessions are started.
+// Note that the interceptor also needs to be activated through the trace config
+// as shown below.
+//
+//   perfetto::InterceptorDescriptor desc;
+//   desc.set_name("my_interceptor");
+//   MyInterceptor::Register(desc);
+//
+// Finally, an interceptor is enabled through the trace config like this:
+//
+//   perfetto::TraceConfig cfg;
+//   auto* ds_cfg = cfg.add_data_sources()->mutable_config();
+//   ds_cfg->set_name("data_source_to_intercept");   // e.g. "track_event"
+//   ds_cfg->mutable_interceptor_config()->set_name("my_interceptor");
+//
+// Once an interceptor is enabled, all data from the affected data sources is
+// sent to the interceptor instead of the main tracing buffer.
+//
+// Interceptor state
+// =================
+//
+// Besides the serialized trace packet data, the |OnTracePacket| interceptor
+// function can access three other types of state:
+//
+// 1. Global state: this is no different from a normal static function, but care
+//    must be taken because |OnTracePacket| can be called concurrently on any
+//    thread at any time.
+//
+// 2. Per-data source instance state: since the interceptor class is
+//    automatically instantiated for each intercepted data source, its fields
+//    can be used to store per-instance data such as the trace config. This data
+//    can be maintained through the OnSetup/OnStart/OnStop callbacks:
+//
+//    class MyInterceptor : public perfetto::Interceptor<MyInterceptor> {
+//     public:
+//      void OnSetup(const SetupArgs& args) override {
+//        enable_foo_ = args.config.interceptor_config().enable_foo();
+//      }
+//
+//      bool enable_foo_{};
+//    };
+//
+//    In the interceptor function this data must be accessed through a scoped
+//    lock for safety:
+//
+//    class MyInterceptor : public perfetto::Interceptor<MyInterceptor> {
+//      ...
+//      static void OnTracePacket(InterceptorContext context) {
+//        auto my_interceptor = context.GetInterceptorLocked();
+//        if (my_interceptor) {
+//           // Access fields of MyInterceptor here.
+//           if (my_interceptor->enable_foo_) { ... }
+//        }
+//        ...
+//      }
+//    };
+//
+//    Since accessing this data involves holding a lock, it should be done
+//    sparingly.
+//
+// 3. Per-thread/TraceWriter state: many data sources use interning to avoid
+//    repeating common data in the trace. Since the interning dictionaries are
+//    typically kept individually for each TraceWriter sequence (i.e., per
+//    thread), an interceptor can declare a data structure with lifetime
+//    matching the TraceWriter:
+//
+//    class MyInterceptor : public perfetto::Interceptor<MyInterceptor> {
+//     public:
+//      struct ThreadLocalState
+//          : public perfetto::InterceptorBase::ThreadLocalState {
+//        ThreadLocalState(ThreadLocalStateArgs&) override = default;
+//        ~ThreadLocalState() override = default;
+//
+//        std::map<size_t, std::string> event_names;
+//      };
+//    };
+//
+//    This per-thread state can then be accessed and maintained in
+//    |OnTracePacket| like this:
+//
+//    class MyInterceptor : public perfetto::Interceptor<MyInterceptor> {
+//      ...
+//      static void OnTracePacket(InterceptorContext context) {
+//        // Updating interned data.
+//        auto& tls = context.GetThreadLocalState();
+//        if (parsed_packet.sequence_flags() & perfetto::protos::pbzero::
+//                TracePacket::SEQ_INCREMENTAL_STATE_CLEARED) {
+//          tls.event_names.clear();
+//        }
+//        for (const auto& entry : parsed_packet.interned_data().event_names())
+//          tls.event_names[entry.iid()] = entry.name();
+//
+//        // Looking up interned data.
+//        if (parsed_packet.has_track_event()) {
+//          size_t name_iid = parsed_packet.track_event().name_iid();
+//          const std::string& event_name = tls.event_names[name_iid];
+//        }
+//        ...
+//      }
+//    };
+//
+
+#include <functional>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/core/forward_decls.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/internal/basic_types.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/internal/data_source_internal.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/locked_handle.h"
+
+namespace {
+class MockTracingMuxer;
+}
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class DataSourceConfig;
+class InterceptorDescriptor;
+}  // namespace gen
+}  // namespace protos
+
+using protos::gen::InterceptorDescriptor;
+
+namespace internal {
+class InterceptorTraceWriter;
+class TracingMuxer;
+class TracingMuxerFake;
+class TracingMuxerImpl;
+}  // namespace internal
+
+// A virtual base class for interceptors. Users should derive from the templated
+// subclass below instead of this one.
+class PERFETTO_EXPORT InterceptorBase {
+ public:
+  virtual ~InterceptorBase();
+
+  // A virtual base class for thread-local state needed by the interceptor.
+  // To define your own state, subclass this with the same name in the
+  // interceptor class. A reference to the state can then be looked up through
+  // context.GetThreadLocalState() in the trace packet interceptor function.
+  class ThreadLocalState {
+   public:
+    virtual ~ThreadLocalState();
+  };
+
+  struct SetupArgs {
+    const DataSourceConfig& config;
+  };
+  struct StartArgs {};
+  struct StopArgs {};
+
+  // Called when an intercepted data source is set up. Both the interceptor's
+  // and the data source's configuration is available in
+  // |SetupArgs|. Called on an internal Perfetto service thread, but not
+  // concurrently.
+  virtual void OnSetup(const SetupArgs&) {}
+
+  // Called when an intercepted data source starts. Called on an internal
+  // Perfetto service thread, but not concurrently.
+  virtual void OnStart(const StartArgs&) {}
+
+  // Called when an intercepted data source stops. Called on an internal
+  // Perfetto service thread, but not concurrently.
+  virtual void OnStop(const StopArgs&) {}
+
+ private:
+  friend class internal::InterceptorTraceWriter;
+  friend class internal::TracingMuxer;
+  friend class internal::TracingMuxerFake;
+  friend class internal::TracingMuxerImpl;
+  friend MockTracingMuxer;
+  template <class T>
+  friend class Interceptor;
+
+  // Data passed from DataSource::Trace() into the interceptor.
+  struct TracePacketCallbackArgs {
+    internal::DataSourceStaticState* static_state;
+    uint32_t instance_index;
+    protozero::ConstBytes packet_data;
+    ThreadLocalState* tls;
+  };
+
+  // These callback functions are defined as stateless to avoid accidentally
+  // introducing cross-thread data races.
+  using TLSFactory = std::unique_ptr<ThreadLocalState> (*)(
+      internal::DataSourceStaticState*,
+      uint32_t data_source_instance_index);
+  using TracePacketCallback = void (*)(TracePacketCallbackArgs);
+
+  static void RegisterImpl(
+      const InterceptorDescriptor& descriptor,
+      std::function<std::unique_ptr<InterceptorBase>()> factory,
+      InterceptorBase::TLSFactory tls_factory,
+      InterceptorBase::TracePacketCallback on_trace_packet);
+};
+
+// Templated interceptor instantiation. See above for usage.
+template <class InterceptorType>
+class PERFETTO_EXPORT Interceptor : public InterceptorBase {
+ public:
+  // A context object provided to the ThreadLocalState constructor. Provides
+  // access to the per-instance interceptor object.
+  class ThreadLocalStateArgs {
+   public:
+    ~ThreadLocalStateArgs() = default;
+
+    // Return a locked reference to the interceptor session. The session object
+    // will remain valid as long as the returned handle is in scope.
+    LockedHandle<InterceptorType> GetInterceptorLocked() {
+      auto* internal_state = static_state_->TryGet(data_source_instance_index_);
+      if (!internal_state)
+        return LockedHandle<InterceptorType>();
+      return LockedHandle<InterceptorType>(
+          &internal_state->lock,
+          static_cast<InterceptorType*>(internal_state->interceptor.get()));
+    }
+
+   private:
+    friend class Interceptor<InterceptorType>;
+    friend class InterceptorContext;
+    friend class TracingMuxerImpl;
+
+    ThreadLocalStateArgs(internal::DataSourceStaticState* static_state,
+                         uint32_t data_source_instance_index)
+        : static_state_(static_state),
+          data_source_instance_index_(data_source_instance_index) {}
+
+    internal::DataSourceStaticState* const static_state_;
+    const uint32_t data_source_instance_index_;
+  };
+
+  // A context object provided to each call into |OnTracePacket|. Contains the
+  // intercepted serialized trace packet data.
+  class InterceptorContext {
+   public:
+    InterceptorContext(InterceptorContext&&) noexcept = default;
+    ~InterceptorContext() = default;
+
+    // Return a locked reference to the interceptor session. The session object
+    // will remain valid as long as the returned handle is in scope.
+    LockedHandle<InterceptorType> GetInterceptorLocked() {
+      return tls_args_.GetInterceptorLocked();
+    }
+
+    // Return the thread-local state for this interceptor. See
+    // InterceptorBase::ThreadLocalState.
+    typename InterceptorType::ThreadLocalState& GetThreadLocalState() {
+      return static_cast<typename InterceptorType::ThreadLocalState&>(*tls_);
+    }
+
+    // A buffer containing the serialized TracePacket protocol buffer message.
+    // This memory is only valid during the call to OnTracePacket.
+    protozero::ConstBytes packet_data;
+
+   private:
+    friend class Interceptor<InterceptorType>;
+    InterceptorContext(TracePacketCallbackArgs args)
+        : packet_data(args.packet_data),
+          tls_args_(args.static_state, args.instance_index),
+          tls_(args.tls) {}
+    InterceptorContext(const InterceptorContext&) = delete;
+    InterceptorContext& operator=(const InterceptorContext&) = delete;
+
+    ThreadLocalStateArgs tls_args_;
+    InterceptorBase::ThreadLocalState* const tls_;
+  };
+
+  // Register the interceptor for use in tracing sessions.
+  // The optional |constructor_args| will be passed to the interceptor when it
+  // is constructed.
+  template <class... Args>
+  static void Register(const InterceptorDescriptor& descriptor,
+                       const Args&... constructor_args) {
+    auto factory = [constructor_args...]() {
+      return std::unique_ptr<InterceptorBase>(
+          new InterceptorType(constructor_args...));
+    };
+    auto tls_factory = [](internal::DataSourceStaticState* static_state,
+                          uint32_t data_source_instance_index) {
+      // Don't bother allocating TLS state unless the interceptor is actually
+      // using it.
+      if (std::is_same<typename InterceptorType::ThreadLocalState,
+                       InterceptorBase::ThreadLocalState>::value) {
+        return std::unique_ptr<InterceptorBase::ThreadLocalState>(nullptr);
+      }
+      ThreadLocalStateArgs args(static_state, data_source_instance_index);
+      return std::unique_ptr<InterceptorBase::ThreadLocalState>(
+          new typename InterceptorType::ThreadLocalState(args));
+    };
+    auto on_trace_packet = [](TracePacketCallbackArgs args) {
+      InterceptorType::OnTracePacket(InterceptorContext(std::move(args)));
+    };
+    RegisterImpl(descriptor, std::move(factory), std::move(tls_factory),
+                 std::move(on_trace_packet));
+  }
+};
+
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_TRACING_INTERCEPTOR_H_
+// gen_amalgamated begin header: include/perfetto/tracing/track_event_state_tracker.h
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/track_event.pbzero.h
+// gen_amalgamated begin header: include/perfetto/protozero/field_writer.h
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+#ifndef INCLUDE_PERFETTO_PROTOZERO_FIELD_WRITER_H_
+#define INCLUDE_PERFETTO_PROTOZERO_FIELD_WRITER_H_
+
+namespace protozero {
+namespace internal {
+
+template <proto_utils::ProtoSchemaType proto_schema_type>
+struct FieldWriter {
+  static_assert(proto_schema_type != proto_utils::ProtoSchemaType::kMessage,
+                "FieldWriter can't be used with nested messages");
+};
+
+template <>
+struct FieldWriter<proto_utils::ProtoSchemaType::kDouble> {
+  inline static void Append(Message& message, uint32_t field_id, double value) {
+    message.AppendFixed(field_id, value);
+  }
+};
+
+template <>
+struct FieldWriter<proto_utils::ProtoSchemaType::kFloat> {
+  inline static void Append(Message& message, uint32_t field_id, float value) {
+    message.AppendFixed(field_id, value);
+  }
+};
+
+template <>
+struct FieldWriter<proto_utils::ProtoSchemaType::kBool> {
+  inline static void Append(Message& message, uint32_t field_id, bool value) {
+    message.AppendTinyVarInt(field_id, value);
+  }
+};
+
+template <>
+struct FieldWriter<proto_utils::ProtoSchemaType::kInt32> {
+  inline static void Append(Message& message,
+                            uint32_t field_id,
+                            int32_t value) {
+    message.AppendVarInt(field_id, value);
+  }
+};
+
+template <>
+struct FieldWriter<proto_utils::ProtoSchemaType::kInt64> {
+  inline static void Append(Message& message,
+                            uint32_t field_id,
+                            int64_t value) {
+    message.AppendVarInt(field_id, value);
+  }
+};
+
+template <>
+struct FieldWriter<proto_utils::ProtoSchemaType::kUint32> {
+  inline static void Append(Message& message,
+                            uint32_t field_id,
+                            uint32_t value) {
+    message.AppendVarInt(field_id, value);
+  }
+};
+
+template <>
+struct FieldWriter<proto_utils::ProtoSchemaType::kUint64> {
+  inline static void Append(Message& message,
+                            uint32_t field_id,
+                            uint64_t value) {
+    message.AppendVarInt(field_id, value);
+  }
+};
+
+template <>
+struct FieldWriter<proto_utils::ProtoSchemaType::kSint32> {
+  inline static void Append(Message& message,
+                            uint32_t field_id,
+                            int32_t value) {
+    message.AppendSignedVarInt(field_id, value);
+  }
+};
+
+template <>
+struct FieldWriter<proto_utils::ProtoSchemaType::kSint64> {
+  inline static void Append(Message& message,
+                            uint32_t field_id,
+                            int64_t value) {
+    message.AppendSignedVarInt(field_id, value);
+  }
+};
+
+template <>
+struct FieldWriter<proto_utils::ProtoSchemaType::kFixed32> {
+  inline static void Append(Message& message,
+                            uint32_t field_id,
+                            uint32_t value) {
+    message.AppendFixed(field_id, value);
+  }
+};
+
+template <>
+struct FieldWriter<proto_utils::ProtoSchemaType::kFixed64> {
+  inline static void Append(Message& message,
+                            uint32_t field_id,
+                            uint64_t value) {
+    message.AppendFixed(field_id, value);
+  }
+};
+
+template <>
+struct FieldWriter<proto_utils::ProtoSchemaType::kSfixed32> {
+  inline static void Append(Message& message,
+                            uint32_t field_id,
+                            int32_t value) {
+    message.AppendFixed(field_id, value);
+  }
+};
+
+template <>
+struct FieldWriter<proto_utils::ProtoSchemaType::kSfixed64> {
+  inline static void Append(Message& message,
+                            uint32_t field_id,
+                            int64_t value) {
+    message.AppendFixed(field_id, value);
+  }
+};
+
+template <>
+struct FieldWriter<proto_utils::ProtoSchemaType::kEnum> {
+  template <typename EnumType>
+  inline static void Append(Message& message,
+                            uint32_t field_id,
+                            EnumType value) {
+    message.AppendVarInt(field_id, value);
+  }
+};
+
+template <>
+struct FieldWriter<proto_utils::ProtoSchemaType::kString> {
+  inline static void Append(Message& message,
+                            uint32_t field_id,
+                            const char* data,
+                            size_t size) {
+    message.AppendBytes(field_id, data, size);
+  }
+
+  inline static void Append(Message& message,
+                            uint32_t field_id,
+                            const std::string& value) {
+    message.AppendBytes(field_id, value.data(), value.size());
+  }
+};
+
+template <>
+struct FieldWriter<proto_utils::ProtoSchemaType::kBytes> {
+  inline static void Append(Message& message,
+                            uint32_t field_id,
+                            const uint8_t* data,
+                            size_t size) {
+    message.AppendBytes(field_id, data, size);
+  }
+
+  inline static void Append(Message& message,
+                            uint32_t field_id,
+                            const std::string& value) {
+    message.AppendBytes(field_id, value.data(), value.size());
+  }
+};
+
+}  // namespace internal
+}  // namespace protozero
+
+#endif  // INCLUDE_PERFETTO_PROTOZERO_FIELD_WRITER_H_
+// gen_amalgamated begin header: include/perfetto/protozero/packed_repeated_fields.h
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_PROTOZERO_PACKED_REPEATED_FIELDS_H_
+#define INCLUDE_PERFETTO_PROTOZERO_PACKED_REPEATED_FIELDS_H_
+
+#include <stdint.h>
+
+#include <array>
+#include <memory>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace protozero {
+
+// This file contains classes used when encoding packed repeated fields.
+// To encode such a field, the caller is first expected to accumulate all of the
+// values in one of the following types (depending on the wire type of the
+// individual elements), defined below:
+// * protozero::PackedVarInt
+// * protozero::PackedFixedSizeInt</*element_type=*/ uint32_t>
+// Then that buffer is passed to the protozero-generated setters as an argument.
+// After calling the setter, the buffer can be destroyed.
+//
+// An example of encoding a packed field:
+//   protozero::HeapBuffered<protozero::Message> msg;
+//   protozero::PackedVarInt buf;
+//   buf.Append(42);
+//   buf.Append(-1);
+//   msg->set_fieldname(buf);
+//   msg.SerializeAsString();
+
+class PackedBufferBase {
+ public:
+  PackedBufferBase() { Reset(); }
+
+  // Copy or move is disabled due to pointers to stack addresses.
+  PackedBufferBase(const PackedBufferBase&) = delete;
+  PackedBufferBase(PackedBufferBase&&) = delete;
+  PackedBufferBase& operator=(const PackedBufferBase&) = delete;
+  PackedBufferBase& operator=(PackedBufferBase&&) = delete;
+
+  void Reset();
+
+  const uint8_t* data() const { return storage_begin_; }
+
+  size_t size() const {
+    return static_cast<size_t>(write_ptr_ - storage_begin_);
+  }
+
+ protected:
+  void GrowIfNeeded() {
+    PERFETTO_DCHECK(write_ptr_ >= storage_begin_ && write_ptr_ <= storage_end_);
+    if (PERFETTO_UNLIKELY(write_ptr_ + kMaxElementSize > storage_end_)) {
+      GrowSlowpath();
+    }
+  }
+
+  void GrowSlowpath();
+
+  // max(uint64_t varint encoding, biggest fixed type (uint64)).
+  static constexpr size_t kMaxElementSize = 10;
+
+  // So sizeof(this) == 8k.
+  static constexpr size_t kOnStackStorageSize = 8192 - 32;
+
+  uint8_t* storage_begin_;
+  uint8_t* storage_end_;
+  uint8_t* write_ptr_;
+  std::unique_ptr<uint8_t[]> heap_buf_;
+  alignas(uint64_t) uint8_t stack_buf_[kOnStackStorageSize];
+};
+
+class PackedVarInt : public PackedBufferBase {
+ public:
+  template <typename T>
+  void Append(T value) {
+    GrowIfNeeded();
+    write_ptr_ = proto_utils::WriteVarInt(value, write_ptr_);
+  }
+};
+
+template <typename T /* e.g. uint32_t for Fixed32 */>
+class PackedFixedSizeInt : public PackedBufferBase {
+ public:
+  void Append(T value) {
+    static_assert(sizeof(T) == 4 || sizeof(T) == 8,
+                  "PackedFixedSizeInt should be used only with 32/64-bit ints");
+    static_assert(sizeof(T) <= kMaxElementSize,
+                  "kMaxElementSize needs to be updated");
+    GrowIfNeeded();
+    PERFETTO_DCHECK(reinterpret_cast<size_t>(write_ptr_) % alignof(T) == 0);
+    memcpy(reinterpret_cast<T*>(write_ptr_), &value, sizeof(T));
+    write_ptr_ += sizeof(T);
+  }
+};
+
+}  // namespace protozero
+
+#endif  // INCLUDE_PERFETTO_PROTOZERO_PACKED_REPEATED_FIELDS_H_
+// gen_amalgamated begin header: include/perfetto/protozero/proto_decoder.h
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_PROTOZERO_PROTO_DECODER_H_
+#define INCLUDE_PERFETTO_PROTOZERO_PROTO_DECODER_H_
+
+#include <stdint.h>
+#include <array>
+#include <memory>
+#include <vector>
+
+// gen_amalgamated expanded: #include "perfetto/base/compiler.h"
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/field.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace protozero {
+
+// A generic protobuf decoder. Doesn't require any knowledge about the proto
+// schema. It tokenizes fields, retrieves their ID and type and exposes
+// accessors to retrieve its values.
+// It does NOT recurse in nested submessages, instead it just computes their
+// boundaries, recursion is left to the caller.
+// This class is designed to be used in perf-sensitive contexts. It does not
+// allocate and does not perform any proto semantic checks (e.g. repeated /
+// required / optional). It's supposedly safe wrt out-of-bounds memory accesses
+// (see proto_decoder_fuzzer.cc).
+// This class serves also as a building block for TypedProtoDecoder, used when
+// the schema is known at compile time.
+class PERFETTO_EXPORT ProtoDecoder {
+ public:
+  // Creates a ProtoDecoder using the given |buffer| with size |length| bytes.
+  ProtoDecoder(const void* buffer, size_t length)
+      : begin_(reinterpret_cast<const uint8_t*>(buffer)),
+        end_(begin_ + length),
+        read_ptr_(begin_) {}
+  ProtoDecoder(const std::string& str) : ProtoDecoder(str.data(), str.size()) {}
+  ProtoDecoder(const ConstBytes& cb) : ProtoDecoder(cb.data, cb.size) {}
+
+  // Reads the next field from the buffer and advances the read cursor. If a
+  // full field cannot be read, the returned Field will be invalid (i.e.
+  // field.valid() == false).
+  Field ReadField();
+
+  // Finds the first field with the given id. Doesn't affect the read cursor.
+  Field FindField(uint32_t field_id);
+
+  // Resets the read cursor to the start of the buffer.
+  void Reset() { read_ptr_ = begin_; }
+
+  // Resets the read cursor to the given position (must be within the buffer).
+  void Reset(const uint8_t* pos) {
+    PERFETTO_DCHECK(pos >= begin_ && pos < end_);
+    read_ptr_ = pos;
+  }
+
+  // Returns the position of read cursor, relative to the start of the buffer.
+  size_t read_offset() const { return static_cast<size_t>(read_ptr_ - begin_); }
+
+  size_t bytes_left() const {
+    PERFETTO_DCHECK(read_ptr_ <= end_);
+    return static_cast<size_t>(end_ - read_ptr_);
+  }
+
+  const uint8_t* begin() const { return begin_; }
+  const uint8_t* end() const { return end_; }
+
+ protected:
+  const uint8_t* const begin_;
+  const uint8_t* const end_;
+  const uint8_t* read_ptr_ = nullptr;
+};
+
+// An iterator-like class used to iterate through repeated fields. Used by
+// TypedProtoDecoder. The iteration sequence is a bit counter-intuitive due to
+// the fact that fields_[field_id] holds the *last* value of the field, not the
+// first, but the remaining storage holds repeated fields in FIFO order.
+// Assume that we push the 10,11,12 into a repeated field with ID=1.
+//
+// Decoder memory layout:  [  fields storage  ] [ repeated fields storage ]
+// 1st iteration:           10
+// 2nd iteration:           11                   10
+// 3rd iteration:           12                   10 11
+//
+// We start the iteration @ fields_[num_fields], which is the start of the
+// repeated fields storage, proceed until the end and lastly jump @ fields_[id].
+template <typename T>
+class RepeatedFieldIterator {
+ public:
+  RepeatedFieldIterator(uint32_t field_id,
+                        const Field* begin,
+                        const Field* end,
+                        const Field* last)
+      : field_id_(field_id), iter_(begin), end_(end), last_(last) {
+    FindNextMatchingId();
+  }
+
+  // Constructs an invalid iterator.
+  RepeatedFieldIterator()
+      : field_id_(0u), iter_(nullptr), end_(nullptr), last_(nullptr) {}
+
+  explicit operator bool() const { return iter_ != end_; }
+  const Field& field() const { return *iter_; }
+
+  T operator*() const {
+    T val{};
+    iter_->get(&val);
+    return val;
+  }
+  const Field* operator->() const { return iter_; }
+
+  RepeatedFieldIterator& operator++() {
+    PERFETTO_DCHECK(iter_ != end_);
+    if (iter_ == last_) {
+      iter_ = end_;
+      return *this;
+    }
+    ++iter_;
+    FindNextMatchingId();
+    return *this;
+  }
+
+  RepeatedFieldIterator operator++(int) {
+    PERFETTO_DCHECK(iter_ != end_);
+    RepeatedFieldIterator it(*this);
+    ++(*this);
+    return it;
+  }
+
+ private:
+  void FindNextMatchingId() {
+    PERFETTO_DCHECK(iter_ != last_);
+    for (; iter_ != end_; ++iter_) {
+      if (iter_->id() == field_id_)
+        return;
+    }
+    iter_ = last_->valid() ? last_ : end_;
+  }
+
+  uint32_t field_id_;
+
+  // Initially points to the beginning of the repeated field storage, then is
+  // incremented as we call operator++().
+  const Field* iter_;
+
+  // Always points to fields_[size_], i.e. past the end of the storage.
+  const Field* end_;
+
+  // Always points to fields_[field_id].
+  const Field* last_;
+};
+
+// As RepeatedFieldIterator, but allows iterating over a packed repeated field
+// (which will be initially stored as a single length-delimited field).
+// See |GetPackedRepeatedField| for details.
+//
+// Assumes little endianness, and that the input buffers are well formed -
+// containing an exact multiple of encoded elements.
+template <proto_utils::ProtoWireType wire_type, typename CppType>
+class PackedRepeatedFieldIterator {
+ public:
+  PackedRepeatedFieldIterator(const uint8_t* data_begin,
+                              size_t size,
+                              bool* parse_error_ptr)
+      : data_end_(data_begin ? data_begin + size : nullptr),
+        read_ptr_(data_begin),
+        parse_error_(parse_error_ptr) {
+    using proto_utils::ProtoWireType;
+    static_assert(wire_type == ProtoWireType::kVarInt ||
+                      wire_type == ProtoWireType::kFixed32 ||
+                      wire_type == ProtoWireType::kFixed64,
+                  "invalid type");
+
+    PERFETTO_DCHECK(parse_error_ptr);
+
+    // Either the field is unset (and there are no data pointer), or the field
+    // is set with a zero length payload. Mark the iterator as invalid in both
+    // cases.
+    if (size == 0) {
+      curr_value_valid_ = false;
+      return;
+    }
+
+    if ((wire_type == ProtoWireType::kFixed32 && (size % 4) != 0) ||
+        (wire_type == ProtoWireType::kFixed64 && (size % 8) != 0)) {
+      *parse_error_ = true;
+      curr_value_valid_ = false;
+      return;
+    }
+
+    ++(*this);
+  }
+
+  const CppType operator*() const { return curr_value_; }
+  explicit operator bool() const { return curr_value_valid_; }
+
+  PackedRepeatedFieldIterator& operator++() {
+    using proto_utils::ProtoWireType;
+
+    if (PERFETTO_UNLIKELY(!curr_value_valid_))
+      return *this;
+
+    if (PERFETTO_UNLIKELY(read_ptr_ == data_end_)) {
+      curr_value_valid_ = false;
+      return *this;
+    }
+
+    if (wire_type == ProtoWireType::kVarInt) {
+      uint64_t new_value = 0;
+      const uint8_t* new_pos =
+          proto_utils::ParseVarInt(read_ptr_, data_end_, &new_value);
+
+      if (PERFETTO_UNLIKELY(new_pos == read_ptr_)) {
+        // Failed to decode the varint (probably incomplete buffer).
+        *parse_error_ = true;
+        curr_value_valid_ = false;
+      } else {
+        read_ptr_ = new_pos;
+        curr_value_ = static_cast<CppType>(new_value);
+      }
+    } else {  // kFixed32 or kFixed64
+      constexpr size_t kStep = wire_type == ProtoWireType::kFixed32 ? 4 : 8;
+
+      // NB: the raw buffer is not guaranteed to be aligned, so neither are
+      // these copies.
+      memcpy(&curr_value_, read_ptr_, sizeof(CppType));
+      read_ptr_ += kStep;
+    }
+
+    return *this;
+  }
+
+  PackedRepeatedFieldIterator operator++(int) {
+    PackedRepeatedFieldIterator it(*this);
+    ++(*this);
+    return it;
+  }
+
+ private:
+  // Might be null if the backing proto field isn't set.
+  const uint8_t* const data_end_;
+
+  // The iterator looks ahead by an element, so |curr_value| holds the value
+  // to be returned when the caller dereferences the iterator, and |read_ptr_|
+  // points at the start of the next element to be decoded.
+  // |read_ptr_| might be null if the backing proto field isn't set.
+  const uint8_t* read_ptr_;
+  CppType curr_value_ = 0;
+
+  // Set to false once we've exhausted the iterator, or encountered an error.
+  bool curr_value_valid_ = true;
+
+  // Where to set parsing errors, supplied by the caller.
+  bool* const parse_error_;
+};
+
+// This decoder loads all fields upfront, without recursing in nested messages.
+// It is used as a base class for typed decoders generated by the pbzero plugin.
+// The split between TypedProtoDecoderBase and TypedProtoDecoder<> is to have
+// unique definition of functions like ParseAllFields() and ExpandHeapStorage().
+// The storage (either on-stack or on-heap) for this class is organized as
+// follows:
+// |-------------------------- fields_ ----------------------|
+// [ field 0 (invalid) ] [ fields 1 .. N ] [ repeated fields ]
+//                                        ^                  ^
+//                                        num_fields_        size_
+class PERFETTO_EXPORT TypedProtoDecoderBase : public ProtoDecoder {
+ public:
+  // If the field |id| is known at compile time, prefer the templated
+  // specialization at<kFieldNumber>().
+  const Field& Get(uint32_t id) const {
+    return PERFETTO_LIKELY(id < num_fields_) ? fields_[id] : fields_[0];
+  }
+
+  // Returns an object that allows to iterate over all instances of a repeated
+  // field given its id. Example usage:
+  //   for (auto it = decoder.GetRepeated<int32_t>(N); it; ++it) { ... }
+  template <typename T>
+  RepeatedFieldIterator<T> GetRepeated(uint32_t field_id) const {
+    return RepeatedFieldIterator<T>(field_id, &fields_[num_fields_],
+                                    &fields_[size_], &fields_[field_id]);
+  }
+
+  // Returns an objects that allows to iterate over all entries of a packed
+  // repeated field given its id and type. The |wire_type| is necessary for
+  // decoding the packed field, the |cpp_type| is for convenience & stronger
+  // typing.
+  //
+  // The caller must also supply a pointer to a bool that is set to true if the
+  // packed buffer is found to be malformed while iterating (so you need to
+  // exhaust the iterator if you want to check the full extent of the buffer).
+  //
+  // Note that unlike standard protobuf parsers, protozero does not allow
+  // treating of packed repeated fields as non-packed and vice-versa (therefore
+  // not making the packed option forwards and backwards compatible). So
+  // the caller needs to use the right accessor for correct results.
+  template <proto_utils::ProtoWireType wire_type, typename cpp_type>
+  PackedRepeatedFieldIterator<wire_type, cpp_type> GetPackedRepeated(
+      uint32_t field_id,
+      bool* parse_error_location) const {
+    const Field& field = Get(field_id);
+    if (field.valid()) {
+      return PackedRepeatedFieldIterator<wire_type, cpp_type>(
+          field.data(), field.size(), parse_error_location);
+    } else {
+      return PackedRepeatedFieldIterator<wire_type, cpp_type>(
+          nullptr, 0, parse_error_location);
+    }
+  }
+
+ protected:
+  TypedProtoDecoderBase(Field* storage,
+                        uint32_t num_fields,
+                        uint32_t capacity,
+                        const uint8_t* buffer,
+                        size_t length)
+      : ProtoDecoder(buffer, length),
+        fields_(storage),
+        num_fields_(num_fields),
+        size_(num_fields),
+        capacity_(capacity) {
+    // The reason why Field needs to be trivially de/constructible is to avoid
+    // implicit initializers on all the ~1000 entries. We need it to initialize
+    // only on the first |max_field_id| fields, the remaining capacity doesn't
+    // require initialization.
+    static_assert(std::is_trivially_constructible<Field>::value &&
+                      std::is_trivially_destructible<Field>::value &&
+                      std::is_trivial<Field>::value,
+                  "Field must be a trivial aggregate type");
+    memset(fields_, 0, sizeof(Field) * num_fields_);
+  }
+
+  void ParseAllFields();
+
+  // Called when the default on-stack storage is exhausted and new repeated
+  // fields need to be pushed.
+  void ExpandHeapStorage();
+
+  // Used only in presence of a large number of repeated fields, when the
+  // default on-stack storage is exhausted.
+  std::unique_ptr<Field[]> heap_storage_;
+
+  // Points to the storage, either on-stack (default, provided by the template
+  // specialization) or |heap_storage_| after ExpandHeapStorage() is called, in
+  // case of a large number of repeated fields.
+  Field* fields_;
+
+  // Number of fields without accounting repeated storage. This is equal to
+  // MAX_FIELD_ID + 1 (to account for the invalid 0th field).
+  // This value is always <= size_ (and hence <= capacity);
+  uint32_t num_fields_;
+
+  // Number of active |fields_| entries. This is initially equal to the highest
+  // number of fields for the message (num_fields_ == MAX_FIELD_ID + 1) and can
+  // grow up to |capacity_| in the case of repeated fields.
+  uint32_t size_;
+
+  // Initially equal to kFieldsCapacity of the TypedProtoDecoder
+  // specialization. Can grow when falling back on heap-based storage, in which
+  // case it represents the size (#fields with each entry of a repeated field
+  // counted individually) of the |heap_storage_| array.
+  uint32_t capacity_;
+};
+
+// Template class instantiated by the auto-generated decoder classes declared in
+// xxx.pbzero.h files.
+template <int MAX_FIELD_ID, bool HAS_NONPACKED_REPEATED_FIELDS>
+class TypedProtoDecoder : public TypedProtoDecoderBase {
+ public:
+  TypedProtoDecoder(const uint8_t* buffer, size_t length)
+      : TypedProtoDecoderBase(on_stack_storage_,
+                              /*num_fields=*/MAX_FIELD_ID + 1,
+                              kCapacity,
+                              buffer,
+                              length) {
+    static_assert(MAX_FIELD_ID <= kMaxDecoderFieldId, "Field ordinal too high");
+    TypedProtoDecoderBase::ParseAllFields();
+  }
+
+  template <uint32_t FIELD_ID>
+  const Field& at() const {
+    static_assert(FIELD_ID <= MAX_FIELD_ID, "FIELD_ID > MAX_FIELD_ID");
+    return fields_[FIELD_ID];
+  }
+
+  TypedProtoDecoder(TypedProtoDecoder&& other) noexcept
+      : TypedProtoDecoderBase(std::move(other)) {
+    // If the moved-from decoder was using on-stack storage, we need to update
+    // our pointer to point to this decoder's on-stack storage.
+    if (fields_ == other.on_stack_storage_) {
+      fields_ = on_stack_storage_;
+      memcpy(on_stack_storage_, other.on_stack_storage_,
+             sizeof(on_stack_storage_));
+    }
+  }
+
+ private:
+  // In the case of non-repeated fields, this constant defines the highest field
+  // id we are able to decode. This is to limit the on-stack storage.
+  // In the case of repeated fields, this constant defines the max number of
+  // repeated fields that we'll be able to store before falling back on the
+  // heap. Keep this value in sync with the one in protozero_generator.cc.
+  static constexpr size_t kMaxDecoderFieldId = 999;
+
+  // If we the message has no repeated fields we need at most N Field entries
+  // in the on-stack storage, where N is the highest field id.
+  // Otherwise we need some room to store repeated fields.
+  static constexpr size_t kCapacity =
+      1 + (HAS_NONPACKED_REPEATED_FIELDS ? kMaxDecoderFieldId : MAX_FIELD_ID);
+
+  Field on_stack_storage_[kCapacity];
+};
+
+}  // namespace protozero
+
+#endif  // INCLUDE_PERFETTO_PROTOZERO_PROTO_DECODER_H_
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_TRACK_EVENT_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_TRACK_EVENT_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class ChromeApplicationStateInfo;
+class ChromeCompositorSchedulerState;
+class ChromeContentSettingsEventInfo;
+class ChromeFrameReporter;
+class ChromeHistogramSample;
+class ChromeKeyedService;
+class ChromeLatencyInfo;
+class ChromeLegacyIpc;
+class ChromeMessagePump;
+class ChromeMojoEventInfo;
+class ChromeRendererSchedulerState;
+class ChromeUserEvent;
+class ChromeWindowHandleEventInfo;
+class DebugAnnotation;
+class LogMessage;
+class SourceLocation;
+class TaskExecution;
+class TrackEvent_LegacyEvent;
+enum TrackEvent_LegacyEvent_FlowDirection : int32_t;
+enum TrackEvent_LegacyEvent_InstantEventScope : int32_t;
+enum TrackEvent_Type : int32_t;
+
+enum TrackEvent_Type : int32_t {
+  TrackEvent_Type_TYPE_UNSPECIFIED = 0,
+  TrackEvent_Type_TYPE_SLICE_BEGIN = 1,
+  TrackEvent_Type_TYPE_SLICE_END = 2,
+  TrackEvent_Type_TYPE_INSTANT = 3,
+  TrackEvent_Type_TYPE_COUNTER = 4,
+};
+
+const TrackEvent_Type TrackEvent_Type_MIN = TrackEvent_Type_TYPE_UNSPECIFIED;
+const TrackEvent_Type TrackEvent_Type_MAX = TrackEvent_Type_TYPE_COUNTER;
+
+enum TrackEvent_LegacyEvent_FlowDirection : int32_t {
+  TrackEvent_LegacyEvent_FlowDirection_FLOW_UNSPECIFIED = 0,
+  TrackEvent_LegacyEvent_FlowDirection_FLOW_IN = 1,
+  TrackEvent_LegacyEvent_FlowDirection_FLOW_OUT = 2,
+  TrackEvent_LegacyEvent_FlowDirection_FLOW_INOUT = 3,
+};
+
+const TrackEvent_LegacyEvent_FlowDirection TrackEvent_LegacyEvent_FlowDirection_MIN = TrackEvent_LegacyEvent_FlowDirection_FLOW_UNSPECIFIED;
+const TrackEvent_LegacyEvent_FlowDirection TrackEvent_LegacyEvent_FlowDirection_MAX = TrackEvent_LegacyEvent_FlowDirection_FLOW_INOUT;
+
+enum TrackEvent_LegacyEvent_InstantEventScope : int32_t {
+  TrackEvent_LegacyEvent_InstantEventScope_SCOPE_UNSPECIFIED = 0,
+  TrackEvent_LegacyEvent_InstantEventScope_SCOPE_GLOBAL = 1,
+  TrackEvent_LegacyEvent_InstantEventScope_SCOPE_PROCESS = 2,
+  TrackEvent_LegacyEvent_InstantEventScope_SCOPE_THREAD = 3,
+};
+
+const TrackEvent_LegacyEvent_InstantEventScope TrackEvent_LegacyEvent_InstantEventScope_MIN = TrackEvent_LegacyEvent_InstantEventScope_SCOPE_UNSPECIFIED;
+const TrackEvent_LegacyEvent_InstantEventScope TrackEvent_LegacyEvent_InstantEventScope_MAX = TrackEvent_LegacyEvent_InstantEventScope_SCOPE_THREAD;
+
+class EventName_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  EventName_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit EventName_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit EventName_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_iid() const { return at<1>().valid(); }
+  uint64_t iid() const { return at<1>().as_uint64(); }
+  bool has_name() const { return at<2>().valid(); }
+  ::protozero::ConstChars name() const { return at<2>().as_string(); }
+};
+
+class EventName : public ::protozero::Message {
+ public:
+  using Decoder = EventName_Decoder;
+  enum : int32_t {
+    kIidFieldNumber = 1,
+    kNameFieldNumber = 2,
+  };
+
+  using FieldMetadata_Iid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      EventName>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Iid kIid() { return {}; }
+  void set_iid(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Iid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      EventName>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class EventCategory_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  EventCategory_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit EventCategory_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit EventCategory_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_iid() const { return at<1>().valid(); }
+  uint64_t iid() const { return at<1>().as_uint64(); }
+  bool has_name() const { return at<2>().valid(); }
+  ::protozero::ConstChars name() const { return at<2>().as_string(); }
+};
+
+class EventCategory : public ::protozero::Message {
+ public:
+  using Decoder = EventCategory_Decoder;
+  enum : int32_t {
+    kIidFieldNumber = 1,
+    kNameFieldNumber = 2,
+  };
+
+  using FieldMetadata_Iid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      EventCategory>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Iid kIid() { return {}; }
+  void set_iid(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Iid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      EventCategory>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class TrackEventDefaults_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/45, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  TrackEventDefaults_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TrackEventDefaults_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TrackEventDefaults_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_track_uuid() const { return at<11>().valid(); }
+  uint64_t track_uuid() const { return at<11>().as_uint64(); }
+  bool has_extra_counter_track_uuids() const { return at<31>().valid(); }
+  ::protozero::RepeatedFieldIterator<uint64_t> extra_counter_track_uuids() const { return GetRepeated<uint64_t>(31); }
+  bool has_extra_double_counter_track_uuids() const { return at<45>().valid(); }
+  ::protozero::RepeatedFieldIterator<uint64_t> extra_double_counter_track_uuids() const { return GetRepeated<uint64_t>(45); }
+};
+
+class TrackEventDefaults : public ::protozero::Message {
+ public:
+  using Decoder = TrackEventDefaults_Decoder;
+  enum : int32_t {
+    kTrackUuidFieldNumber = 11,
+    kExtraCounterTrackUuidsFieldNumber = 31,
+    kExtraDoubleCounterTrackUuidsFieldNumber = 45,
+  };
+
+  using FieldMetadata_TrackUuid =
+    ::protozero::proto_utils::FieldMetadata<
+      11,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TrackEventDefaults>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TrackUuid kTrackUuid() { return {}; }
+  void set_track_uuid(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TrackUuid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ExtraCounterTrackUuids =
+    ::protozero::proto_utils::FieldMetadata<
+      31,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TrackEventDefaults>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ExtraCounterTrackUuids kExtraCounterTrackUuids() { return {}; }
+  void add_extra_counter_track_uuids(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ExtraCounterTrackUuids::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ExtraDoubleCounterTrackUuids =
+    ::protozero::proto_utils::FieldMetadata<
+      45,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TrackEventDefaults>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ExtraDoubleCounterTrackUuids kExtraDoubleCounterTrackUuids() { return {}; }
+  void add_extra_double_counter_track_uuids(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ExtraDoubleCounterTrackUuids::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class TrackEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/46, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  TrackEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TrackEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TrackEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_category_iids() const { return at<3>().valid(); }
+  ::protozero::RepeatedFieldIterator<uint64_t> category_iids() const { return GetRepeated<uint64_t>(3); }
+  bool has_categories() const { return at<22>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstChars> categories() const { return GetRepeated<::protozero::ConstChars>(22); }
+  bool has_name_iid() const { return at<10>().valid(); }
+  uint64_t name_iid() const { return at<10>().as_uint64(); }
+  bool has_name() const { return at<23>().valid(); }
+  ::protozero::ConstChars name() const { return at<23>().as_string(); }
+  bool has_type() const { return at<9>().valid(); }
+  int32_t type() const { return at<9>().as_int32(); }
+  bool has_track_uuid() const { return at<11>().valid(); }
+  uint64_t track_uuid() const { return at<11>().as_uint64(); }
+  bool has_counter_value() const { return at<30>().valid(); }
+  int64_t counter_value() const { return at<30>().as_int64(); }
+  bool has_double_counter_value() const { return at<44>().valid(); }
+  double double_counter_value() const { return at<44>().as_double(); }
+  bool has_extra_counter_track_uuids() const { return at<31>().valid(); }
+  ::protozero::RepeatedFieldIterator<uint64_t> extra_counter_track_uuids() const { return GetRepeated<uint64_t>(31); }
+  bool has_extra_counter_values() const { return at<12>().valid(); }
+  ::protozero::RepeatedFieldIterator<int64_t> extra_counter_values() const { return GetRepeated<int64_t>(12); }
+  bool has_extra_double_counter_track_uuids() const { return at<45>().valid(); }
+  ::protozero::RepeatedFieldIterator<uint64_t> extra_double_counter_track_uuids() const { return GetRepeated<uint64_t>(45); }
+  bool has_extra_double_counter_values() const { return at<46>().valid(); }
+  ::protozero::RepeatedFieldIterator<double> extra_double_counter_values() const { return GetRepeated<double>(46); }
+  bool has_flow_ids() const { return at<36>().valid(); }
+  ::protozero::RepeatedFieldIterator<uint64_t> flow_ids() const { return GetRepeated<uint64_t>(36); }
+  bool has_terminating_flow_ids() const { return at<42>().valid(); }
+  ::protozero::RepeatedFieldIterator<uint64_t> terminating_flow_ids() const { return GetRepeated<uint64_t>(42); }
+  bool has_debug_annotations() const { return at<4>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> debug_annotations() const { return GetRepeated<::protozero::ConstBytes>(4); }
+  bool has_task_execution() const { return at<5>().valid(); }
+  ::protozero::ConstBytes task_execution() const { return at<5>().as_bytes(); }
+  bool has_log_message() const { return at<21>().valid(); }
+  ::protozero::ConstBytes log_message() const { return at<21>().as_bytes(); }
+  bool has_cc_scheduler_state() const { return at<24>().valid(); }
+  ::protozero::ConstBytes cc_scheduler_state() const { return at<24>().as_bytes(); }
+  bool has_chrome_user_event() const { return at<25>().valid(); }
+  ::protozero::ConstBytes chrome_user_event() const { return at<25>().as_bytes(); }
+  bool has_chrome_keyed_service() const { return at<26>().valid(); }
+  ::protozero::ConstBytes chrome_keyed_service() const { return at<26>().as_bytes(); }
+  bool has_chrome_legacy_ipc() const { return at<27>().valid(); }
+  ::protozero::ConstBytes chrome_legacy_ipc() const { return at<27>().as_bytes(); }
+  bool has_chrome_histogram_sample() const { return at<28>().valid(); }
+  ::protozero::ConstBytes chrome_histogram_sample() const { return at<28>().as_bytes(); }
+  bool has_chrome_latency_info() const { return at<29>().valid(); }
+  ::protozero::ConstBytes chrome_latency_info() const { return at<29>().as_bytes(); }
+  bool has_chrome_frame_reporter() const { return at<32>().valid(); }
+  ::protozero::ConstBytes chrome_frame_reporter() const { return at<32>().as_bytes(); }
+  bool has_chrome_application_state_info() const { return at<39>().valid(); }
+  ::protozero::ConstBytes chrome_application_state_info() const { return at<39>().as_bytes(); }
+  bool has_chrome_renderer_scheduler_state() const { return at<40>().valid(); }
+  ::protozero::ConstBytes chrome_renderer_scheduler_state() const { return at<40>().as_bytes(); }
+  bool has_chrome_window_handle_event_info() const { return at<41>().valid(); }
+  ::protozero::ConstBytes chrome_window_handle_event_info() const { return at<41>().as_bytes(); }
+  bool has_chrome_content_settings_event_info() const { return at<43>().valid(); }
+  ::protozero::ConstBytes chrome_content_settings_event_info() const { return at<43>().as_bytes(); }
+  bool has_source_location() const { return at<33>().valid(); }
+  ::protozero::ConstBytes source_location() const { return at<33>().as_bytes(); }
+  bool has_source_location_iid() const { return at<34>().valid(); }
+  uint64_t source_location_iid() const { return at<34>().as_uint64(); }
+  bool has_chrome_message_pump() const { return at<35>().valid(); }
+  ::protozero::ConstBytes chrome_message_pump() const { return at<35>().as_bytes(); }
+  bool has_chrome_mojo_event_info() const { return at<38>().valid(); }
+  ::protozero::ConstBytes chrome_mojo_event_info() const { return at<38>().as_bytes(); }
+  bool has_timestamp_delta_us() const { return at<1>().valid(); }
+  int64_t timestamp_delta_us() const { return at<1>().as_int64(); }
+  bool has_timestamp_absolute_us() const { return at<16>().valid(); }
+  int64_t timestamp_absolute_us() const { return at<16>().as_int64(); }
+  bool has_thread_time_delta_us() const { return at<2>().valid(); }
+  int64_t thread_time_delta_us() const { return at<2>().as_int64(); }
+  bool has_thread_time_absolute_us() const { return at<17>().valid(); }
+  int64_t thread_time_absolute_us() const { return at<17>().as_int64(); }
+  bool has_thread_instruction_count_delta() const { return at<8>().valid(); }
+  int64_t thread_instruction_count_delta() const { return at<8>().as_int64(); }
+  bool has_thread_instruction_count_absolute() const { return at<20>().valid(); }
+  int64_t thread_instruction_count_absolute() const { return at<20>().as_int64(); }
+  bool has_legacy_event() const { return at<6>().valid(); }
+  ::protozero::ConstBytes legacy_event() const { return at<6>().as_bytes(); }
+};
+
+class TrackEvent : public ::protozero::Message {
+ public:
+  using Decoder = TrackEvent_Decoder;
+  enum : int32_t {
+    kCategoryIidsFieldNumber = 3,
+    kCategoriesFieldNumber = 22,
+    kNameIidFieldNumber = 10,
+    kNameFieldNumber = 23,
+    kTypeFieldNumber = 9,
+    kTrackUuidFieldNumber = 11,
+    kCounterValueFieldNumber = 30,
+    kDoubleCounterValueFieldNumber = 44,
+    kExtraCounterTrackUuidsFieldNumber = 31,
+    kExtraCounterValuesFieldNumber = 12,
+    kExtraDoubleCounterTrackUuidsFieldNumber = 45,
+    kExtraDoubleCounterValuesFieldNumber = 46,
+    kFlowIdsFieldNumber = 36,
+    kTerminatingFlowIdsFieldNumber = 42,
+    kDebugAnnotationsFieldNumber = 4,
+    kTaskExecutionFieldNumber = 5,
+    kLogMessageFieldNumber = 21,
+    kCcSchedulerStateFieldNumber = 24,
+    kChromeUserEventFieldNumber = 25,
+    kChromeKeyedServiceFieldNumber = 26,
+    kChromeLegacyIpcFieldNumber = 27,
+    kChromeHistogramSampleFieldNumber = 28,
+    kChromeLatencyInfoFieldNumber = 29,
+    kChromeFrameReporterFieldNumber = 32,
+    kChromeApplicationStateInfoFieldNumber = 39,
+    kChromeRendererSchedulerStateFieldNumber = 40,
+    kChromeWindowHandleEventInfoFieldNumber = 41,
+    kChromeContentSettingsEventInfoFieldNumber = 43,
+    kSourceLocationFieldNumber = 33,
+    kSourceLocationIidFieldNumber = 34,
+    kChromeMessagePumpFieldNumber = 35,
+    kChromeMojoEventInfoFieldNumber = 38,
+    kTimestampDeltaUsFieldNumber = 1,
+    kTimestampAbsoluteUsFieldNumber = 16,
+    kThreadTimeDeltaUsFieldNumber = 2,
+    kThreadTimeAbsoluteUsFieldNumber = 17,
+    kThreadInstructionCountDeltaFieldNumber = 8,
+    kThreadInstructionCountAbsoluteFieldNumber = 20,
+    kLegacyEventFieldNumber = 6,
+  };
+  using LegacyEvent = ::perfetto::protos::pbzero::TrackEvent_LegacyEvent;
+  using Type = ::perfetto::protos::pbzero::TrackEvent_Type;
+  static const Type TYPE_UNSPECIFIED = TrackEvent_Type_TYPE_UNSPECIFIED;
+  static const Type TYPE_SLICE_BEGIN = TrackEvent_Type_TYPE_SLICE_BEGIN;
+  static const Type TYPE_SLICE_END = TrackEvent_Type_TYPE_SLICE_END;
+  static const Type TYPE_INSTANT = TrackEvent_Type_TYPE_INSTANT;
+  static const Type TYPE_COUNTER = TrackEvent_Type_TYPE_COUNTER;
+
+  using FieldMetadata_CategoryIids =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CategoryIids kCategoryIids() { return {}; }
+  void add_category_iids(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CategoryIids::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Categories =
+    ::protozero::proto_utils::FieldMetadata<
+      22,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Categories kCategories() { return {}; }
+  void add_categories(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Categories::kFieldId, data, size);
+  }
+  void add_categories(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Categories::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NameIid =
+    ::protozero::proto_utils::FieldMetadata<
+      10,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NameIid kNameIid() { return {}; }
+  void set_name_iid(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NameIid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      23,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Type =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::TrackEvent_Type,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Type kType() { return {}; }
+  void set_type(::perfetto::protos::pbzero::TrackEvent_Type value) {
+    static constexpr uint32_t field_id = FieldMetadata_Type::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TrackUuid =
+    ::protozero::proto_utils::FieldMetadata<
+      11,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TrackUuid kTrackUuid() { return {}; }
+  void set_track_uuid(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TrackUuid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CounterValue =
+    ::protozero::proto_utils::FieldMetadata<
+      30,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CounterValue kCounterValue() { return {}; }
+  void set_counter_value(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CounterValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DoubleCounterValue =
+    ::protozero::proto_utils::FieldMetadata<
+      44,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kDouble,
+      double,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DoubleCounterValue kDoubleCounterValue() { return {}; }
+  void set_double_counter_value(double value) {
+    static constexpr uint32_t field_id = FieldMetadata_DoubleCounterValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kDouble>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ExtraCounterTrackUuids =
+    ::protozero::proto_utils::FieldMetadata<
+      31,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ExtraCounterTrackUuids kExtraCounterTrackUuids() { return {}; }
+  void add_extra_counter_track_uuids(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ExtraCounterTrackUuids::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ExtraCounterValues =
+    ::protozero::proto_utils::FieldMetadata<
+      12,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ExtraCounterValues kExtraCounterValues() { return {}; }
+  void add_extra_counter_values(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ExtraCounterValues::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ExtraDoubleCounterTrackUuids =
+    ::protozero::proto_utils::FieldMetadata<
+      45,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ExtraDoubleCounterTrackUuids kExtraDoubleCounterTrackUuids() { return {}; }
+  void add_extra_double_counter_track_uuids(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ExtraDoubleCounterTrackUuids::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ExtraDoubleCounterValues =
+    ::protozero::proto_utils::FieldMetadata<
+      46,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kDouble,
+      double,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ExtraDoubleCounterValues kExtraDoubleCounterValues() { return {}; }
+  void add_extra_double_counter_values(double value) {
+    static constexpr uint32_t field_id = FieldMetadata_ExtraDoubleCounterValues::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kDouble>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FlowIds =
+    ::protozero::proto_utils::FieldMetadata<
+      36,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FlowIds kFlowIds() { return {}; }
+  void add_flow_ids(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_FlowIds::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TerminatingFlowIds =
+    ::protozero::proto_utils::FieldMetadata<
+      42,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TerminatingFlowIds kTerminatingFlowIds() { return {}; }
+  void add_terminating_flow_ids(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TerminatingFlowIds::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DebugAnnotations =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      DebugAnnotation,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DebugAnnotations kDebugAnnotations() { return {}; }
+  template <typename T = DebugAnnotation> T* add_debug_annotations() {
+    return BeginNestedMessage<T>(4);
+  }
+
+
+  using FieldMetadata_TaskExecution =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TaskExecution,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TaskExecution kTaskExecution() { return {}; }
+  template <typename T = TaskExecution> T* set_task_execution() {
+    return BeginNestedMessage<T>(5);
+  }
+
+
+  using FieldMetadata_LogMessage =
+    ::protozero::proto_utils::FieldMetadata<
+      21,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      LogMessage,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_LogMessage kLogMessage() { return {}; }
+  template <typename T = LogMessage> T* set_log_message() {
+    return BeginNestedMessage<T>(21);
+  }
+
+
+  using FieldMetadata_CcSchedulerState =
+    ::protozero::proto_utils::FieldMetadata<
+      24,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ChromeCompositorSchedulerState,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CcSchedulerState kCcSchedulerState() { return {}; }
+  template <typename T = ChromeCompositorSchedulerState> T* set_cc_scheduler_state() {
+    return BeginNestedMessage<T>(24);
+  }
+
+
+  using FieldMetadata_ChromeUserEvent =
+    ::protozero::proto_utils::FieldMetadata<
+      25,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ChromeUserEvent,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChromeUserEvent kChromeUserEvent() { return {}; }
+  template <typename T = ChromeUserEvent> T* set_chrome_user_event() {
+    return BeginNestedMessage<T>(25);
+  }
+
+
+  using FieldMetadata_ChromeKeyedService =
+    ::protozero::proto_utils::FieldMetadata<
+      26,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ChromeKeyedService,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChromeKeyedService kChromeKeyedService() { return {}; }
+  template <typename T = ChromeKeyedService> T* set_chrome_keyed_service() {
+    return BeginNestedMessage<T>(26);
+  }
+
+
+  using FieldMetadata_ChromeLegacyIpc =
+    ::protozero::proto_utils::FieldMetadata<
+      27,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ChromeLegacyIpc,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChromeLegacyIpc kChromeLegacyIpc() { return {}; }
+  template <typename T = ChromeLegacyIpc> T* set_chrome_legacy_ipc() {
+    return BeginNestedMessage<T>(27);
+  }
+
+
+  using FieldMetadata_ChromeHistogramSample =
+    ::protozero::proto_utils::FieldMetadata<
+      28,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ChromeHistogramSample,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChromeHistogramSample kChromeHistogramSample() { return {}; }
+  template <typename T = ChromeHistogramSample> T* set_chrome_histogram_sample() {
+    return BeginNestedMessage<T>(28);
+  }
+
+
+  using FieldMetadata_ChromeLatencyInfo =
+    ::protozero::proto_utils::FieldMetadata<
+      29,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ChromeLatencyInfo,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChromeLatencyInfo kChromeLatencyInfo() { return {}; }
+  template <typename T = ChromeLatencyInfo> T* set_chrome_latency_info() {
+    return BeginNestedMessage<T>(29);
+  }
+
+
+  using FieldMetadata_ChromeFrameReporter =
+    ::protozero::proto_utils::FieldMetadata<
+      32,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ChromeFrameReporter,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChromeFrameReporter kChromeFrameReporter() { return {}; }
+  template <typename T = ChromeFrameReporter> T* set_chrome_frame_reporter() {
+    return BeginNestedMessage<T>(32);
+  }
+
+
+  using FieldMetadata_ChromeApplicationStateInfo =
+    ::protozero::proto_utils::FieldMetadata<
+      39,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ChromeApplicationStateInfo,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChromeApplicationStateInfo kChromeApplicationStateInfo() { return {}; }
+  template <typename T = ChromeApplicationStateInfo> T* set_chrome_application_state_info() {
+    return BeginNestedMessage<T>(39);
+  }
+
+
+  using FieldMetadata_ChromeRendererSchedulerState =
+    ::protozero::proto_utils::FieldMetadata<
+      40,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ChromeRendererSchedulerState,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChromeRendererSchedulerState kChromeRendererSchedulerState() { return {}; }
+  template <typename T = ChromeRendererSchedulerState> T* set_chrome_renderer_scheduler_state() {
+    return BeginNestedMessage<T>(40);
+  }
+
+
+  using FieldMetadata_ChromeWindowHandleEventInfo =
+    ::protozero::proto_utils::FieldMetadata<
+      41,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ChromeWindowHandleEventInfo,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChromeWindowHandleEventInfo kChromeWindowHandleEventInfo() { return {}; }
+  template <typename T = ChromeWindowHandleEventInfo> T* set_chrome_window_handle_event_info() {
+    return BeginNestedMessage<T>(41);
+  }
+
+
+  using FieldMetadata_ChromeContentSettingsEventInfo =
+    ::protozero::proto_utils::FieldMetadata<
+      43,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ChromeContentSettingsEventInfo,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChromeContentSettingsEventInfo kChromeContentSettingsEventInfo() { return {}; }
+  template <typename T = ChromeContentSettingsEventInfo> T* set_chrome_content_settings_event_info() {
+    return BeginNestedMessage<T>(43);
+  }
+
+
+  using FieldMetadata_SourceLocation =
+    ::protozero::proto_utils::FieldMetadata<
+      33,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      SourceLocation,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SourceLocation kSourceLocation() { return {}; }
+  template <typename T = SourceLocation> T* set_source_location() {
+    return BeginNestedMessage<T>(33);
+  }
+
+
+  using FieldMetadata_SourceLocationIid =
+    ::protozero::proto_utils::FieldMetadata<
+      34,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SourceLocationIid kSourceLocationIid() { return {}; }
+  void set_source_location_iid(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SourceLocationIid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ChromeMessagePump =
+    ::protozero::proto_utils::FieldMetadata<
+      35,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ChromeMessagePump,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChromeMessagePump kChromeMessagePump() { return {}; }
+  template <typename T = ChromeMessagePump> T* set_chrome_message_pump() {
+    return BeginNestedMessage<T>(35);
+  }
+
+
+  using FieldMetadata_ChromeMojoEventInfo =
+    ::protozero::proto_utils::FieldMetadata<
+      38,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ChromeMojoEventInfo,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChromeMojoEventInfo kChromeMojoEventInfo() { return {}; }
+  template <typename T = ChromeMojoEventInfo> T* set_chrome_mojo_event_info() {
+    return BeginNestedMessage<T>(38);
+  }
+
+
+  using FieldMetadata_TimestampDeltaUs =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TimestampDeltaUs kTimestampDeltaUs() { return {}; }
+  void set_timestamp_delta_us(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TimestampDeltaUs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TimestampAbsoluteUs =
+    ::protozero::proto_utils::FieldMetadata<
+      16,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TimestampAbsoluteUs kTimestampAbsoluteUs() { return {}; }
+  void set_timestamp_absolute_us(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TimestampAbsoluteUs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ThreadTimeDeltaUs =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ThreadTimeDeltaUs kThreadTimeDeltaUs() { return {}; }
+  void set_thread_time_delta_us(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ThreadTimeDeltaUs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ThreadTimeAbsoluteUs =
+    ::protozero::proto_utils::FieldMetadata<
+      17,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ThreadTimeAbsoluteUs kThreadTimeAbsoluteUs() { return {}; }
+  void set_thread_time_absolute_us(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ThreadTimeAbsoluteUs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ThreadInstructionCountDelta =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ThreadInstructionCountDelta kThreadInstructionCountDelta() { return {}; }
+  void set_thread_instruction_count_delta(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ThreadInstructionCountDelta::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ThreadInstructionCountAbsolute =
+    ::protozero::proto_utils::FieldMetadata<
+      20,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ThreadInstructionCountAbsolute kThreadInstructionCountAbsolute() { return {}; }
+  void set_thread_instruction_count_absolute(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ThreadInstructionCountAbsolute::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_LegacyEvent =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TrackEvent_LegacyEvent,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_LegacyEvent kLegacyEvent() { return {}; }
+  template <typename T = TrackEvent_LegacyEvent> T* set_legacy_event() {
+    return BeginNestedMessage<T>(6);
+  }
+
+};
+
+class TrackEvent_LegacyEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/19, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  TrackEvent_LegacyEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TrackEvent_LegacyEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TrackEvent_LegacyEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_name_iid() const { return at<1>().valid(); }
+  uint64_t name_iid() const { return at<1>().as_uint64(); }
+  bool has_phase() const { return at<2>().valid(); }
+  int32_t phase() const { return at<2>().as_int32(); }
+  bool has_duration_us() const { return at<3>().valid(); }
+  int64_t duration_us() const { return at<3>().as_int64(); }
+  bool has_thread_duration_us() const { return at<4>().valid(); }
+  int64_t thread_duration_us() const { return at<4>().as_int64(); }
+  bool has_thread_instruction_delta() const { return at<15>().valid(); }
+  int64_t thread_instruction_delta() const { return at<15>().as_int64(); }
+  bool has_unscoped_id() const { return at<6>().valid(); }
+  uint64_t unscoped_id() const { return at<6>().as_uint64(); }
+  bool has_local_id() const { return at<10>().valid(); }
+  uint64_t local_id() const { return at<10>().as_uint64(); }
+  bool has_global_id() const { return at<11>().valid(); }
+  uint64_t global_id() const { return at<11>().as_uint64(); }
+  bool has_id_scope() const { return at<7>().valid(); }
+  ::protozero::ConstChars id_scope() const { return at<7>().as_string(); }
+  bool has_use_async_tts() const { return at<9>().valid(); }
+  bool use_async_tts() const { return at<9>().as_bool(); }
+  bool has_bind_id() const { return at<8>().valid(); }
+  uint64_t bind_id() const { return at<8>().as_uint64(); }
+  bool has_bind_to_enclosing() const { return at<12>().valid(); }
+  bool bind_to_enclosing() const { return at<12>().as_bool(); }
+  bool has_flow_direction() const { return at<13>().valid(); }
+  int32_t flow_direction() const { return at<13>().as_int32(); }
+  bool has_instant_event_scope() const { return at<14>().valid(); }
+  int32_t instant_event_scope() const { return at<14>().as_int32(); }
+  bool has_pid_override() const { return at<18>().valid(); }
+  int32_t pid_override() const { return at<18>().as_int32(); }
+  bool has_tid_override() const { return at<19>().valid(); }
+  int32_t tid_override() const { return at<19>().as_int32(); }
+};
+
+class TrackEvent_LegacyEvent : public ::protozero::Message {
+ public:
+  using Decoder = TrackEvent_LegacyEvent_Decoder;
+  enum : int32_t {
+    kNameIidFieldNumber = 1,
+    kPhaseFieldNumber = 2,
+    kDurationUsFieldNumber = 3,
+    kThreadDurationUsFieldNumber = 4,
+    kThreadInstructionDeltaFieldNumber = 15,
+    kUnscopedIdFieldNumber = 6,
+    kLocalIdFieldNumber = 10,
+    kGlobalIdFieldNumber = 11,
+    kIdScopeFieldNumber = 7,
+    kUseAsyncTtsFieldNumber = 9,
+    kBindIdFieldNumber = 8,
+    kBindToEnclosingFieldNumber = 12,
+    kFlowDirectionFieldNumber = 13,
+    kInstantEventScopeFieldNumber = 14,
+    kPidOverrideFieldNumber = 18,
+    kTidOverrideFieldNumber = 19,
+  };
+  using FlowDirection = ::perfetto::protos::pbzero::TrackEvent_LegacyEvent_FlowDirection;
+  using InstantEventScope = ::perfetto::protos::pbzero::TrackEvent_LegacyEvent_InstantEventScope;
+  static const FlowDirection FLOW_UNSPECIFIED = TrackEvent_LegacyEvent_FlowDirection_FLOW_UNSPECIFIED;
+  static const FlowDirection FLOW_IN = TrackEvent_LegacyEvent_FlowDirection_FLOW_IN;
+  static const FlowDirection FLOW_OUT = TrackEvent_LegacyEvent_FlowDirection_FLOW_OUT;
+  static const FlowDirection FLOW_INOUT = TrackEvent_LegacyEvent_FlowDirection_FLOW_INOUT;
+  static const InstantEventScope SCOPE_UNSPECIFIED = TrackEvent_LegacyEvent_InstantEventScope_SCOPE_UNSPECIFIED;
+  static const InstantEventScope SCOPE_GLOBAL = TrackEvent_LegacyEvent_InstantEventScope_SCOPE_GLOBAL;
+  static const InstantEventScope SCOPE_PROCESS = TrackEvent_LegacyEvent_InstantEventScope_SCOPE_PROCESS;
+  static const InstantEventScope SCOPE_THREAD = TrackEvent_LegacyEvent_InstantEventScope_SCOPE_THREAD;
+
+  using FieldMetadata_NameIid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TrackEvent_LegacyEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NameIid kNameIid() { return {}; }
+  void set_name_iid(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NameIid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Phase =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      TrackEvent_LegacyEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Phase kPhase() { return {}; }
+  void set_phase(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Phase::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DurationUs =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      TrackEvent_LegacyEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DurationUs kDurationUs() { return {}; }
+  void set_duration_us(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DurationUs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ThreadDurationUs =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      TrackEvent_LegacyEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ThreadDurationUs kThreadDurationUs() { return {}; }
+  void set_thread_duration_us(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ThreadDurationUs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ThreadInstructionDelta =
+    ::protozero::proto_utils::FieldMetadata<
+      15,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      TrackEvent_LegacyEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ThreadInstructionDelta kThreadInstructionDelta() { return {}; }
+  void set_thread_instruction_delta(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ThreadInstructionDelta::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_UnscopedId =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TrackEvent_LegacyEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_UnscopedId kUnscopedId() { return {}; }
+  void set_unscoped_id(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_UnscopedId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_LocalId =
+    ::protozero::proto_utils::FieldMetadata<
+      10,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TrackEvent_LegacyEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_LocalId kLocalId() { return {}; }
+  void set_local_id(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_LocalId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_GlobalId =
+    ::protozero::proto_utils::FieldMetadata<
+      11,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TrackEvent_LegacyEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_GlobalId kGlobalId() { return {}; }
+  void set_global_id(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_GlobalId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_IdScope =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TrackEvent_LegacyEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IdScope kIdScope() { return {}; }
+  void set_id_scope(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_IdScope::kFieldId, data, size);
+  }
+  void set_id_scope(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_IdScope::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_UseAsyncTts =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      TrackEvent_LegacyEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_UseAsyncTts kUseAsyncTts() { return {}; }
+  void set_use_async_tts(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_UseAsyncTts::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BindId =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TrackEvent_LegacyEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BindId kBindId() { return {}; }
+  void set_bind_id(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_BindId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BindToEnclosing =
+    ::protozero::proto_utils::FieldMetadata<
+      12,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      TrackEvent_LegacyEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BindToEnclosing kBindToEnclosing() { return {}; }
+  void set_bind_to_enclosing(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_BindToEnclosing::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FlowDirection =
+    ::protozero::proto_utils::FieldMetadata<
+      13,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::TrackEvent_LegacyEvent_FlowDirection,
+      TrackEvent_LegacyEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FlowDirection kFlowDirection() { return {}; }
+  void set_flow_direction(::perfetto::protos::pbzero::TrackEvent_LegacyEvent_FlowDirection value) {
+    static constexpr uint32_t field_id = FieldMetadata_FlowDirection::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_InstantEventScope =
+    ::protozero::proto_utils::FieldMetadata<
+      14,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::TrackEvent_LegacyEvent_InstantEventScope,
+      TrackEvent_LegacyEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_InstantEventScope kInstantEventScope() { return {}; }
+  void set_instant_event_scope(::perfetto::protos::pbzero::TrackEvent_LegacyEvent_InstantEventScope value) {
+    static constexpr uint32_t field_id = FieldMetadata_InstantEventScope::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PidOverride =
+    ::protozero::proto_utils::FieldMetadata<
+      18,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      TrackEvent_LegacyEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PidOverride kPidOverride() { return {}; }
+  void set_pid_override(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PidOverride::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TidOverride =
+    ::protozero::proto_utils::FieldMetadata<
+      19,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      TrackEvent_LegacyEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TidOverride kTidOverride() { return {}; }
+  void set_tid_override(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TidOverride::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_TRACING_TRACK_EVENT_STATE_TRACKER_H_
+#define INCLUDE_PERFETTO_TRACING_TRACK_EVENT_STATE_TRACKER_H_
+
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/track_event.pbzero.h"
+
+#include <map>
+#include <string>
+#include <vector>
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+class TracePacket_Decoder;
+class TrackEvent;
+class TrackEvent_Decoder;
+}  // namespace pbzero
+}  // namespace protos
+
+// A helper for keeping track of incremental state when intercepting track
+// events.
+class PERFETTO_EXPORT TrackEventStateTracker {
+ public:
+  ~TrackEventStateTracker();
+
+  struct StackFrame {
+    uint64_t timestamp{};
+
+    // Only one of |name| and |name_iid| will be set.
+    std::string name;
+    uint64_t name_iid{};
+    uint64_t name_hash{};
+
+    // Only one of |category| and |category_iid| will be set.
+    std::string category;
+    uint64_t category_iid{};
+  };
+
+  struct Track {
+    uint64_t uuid{};
+    uint32_t index{};  // Ordinal number for the track in the tracing session.
+
+    std::string name;
+    int64_t pid{};
+    int64_t tid{};
+
+    // Opaque user data associated with the track.
+    std::vector<uint8_t> user_data;
+
+    // Stack of opened slices on this track.
+    std::vector<StackFrame> stack;
+  };
+
+  // State for a single trace writer sequence (typically a single thread).
+  struct SequenceState {
+    // Trace packet sequence defaults.
+    Track track;
+
+    // Interned state.
+#if PERFETTO_DCHECK_IS_ON()
+    uint32_t sequence_id{};
+#endif
+    std::map<uint64_t /*iid*/, std::string> event_names;
+    std::map<uint64_t /*iid*/, std::string> event_categories;
+    std::map<uint64_t /*iid*/, std::string> debug_annotation_names;
+  };
+
+  // State for the entire tracing session. Shared by all trace writer sequences
+  // participating in the session.
+  struct SessionState {
+    // Non-thread-bound tracks.
+    std::map<uint64_t /*uuid*/, Track> tracks;
+  };
+
+  // Represents a single decoded track event (without arguments).
+  struct ParsedTrackEvent {
+    explicit ParsedTrackEvent(
+        const perfetto::protos::pbzero::TrackEvent::Decoder&);
+
+    // Underlying event.
+    const perfetto::protos::pbzero::TrackEvent::Decoder& track_event;
+
+    // Event metadata.
+    uint64_t timestamp_ns{};
+    uint64_t duration_ns{};
+
+    size_t stack_depth{};
+
+    protozero::ConstChars category{};
+    protozero::ConstChars name{};
+    uint64_t name_hash{};
+  };
+
+  // Interface used by the tracker to access tracing session and sequence state
+  // and to report parsed track events.
+  class Delegate {
+   public:
+    virtual ~Delegate();
+
+    // Called to retrieve the session-global state shared by all sequences. The
+    // returned pointer must remain valid (locked) throughout the call to
+    // |ProcessTracePacket|.
+    virtual SessionState* GetSessionState() = 0;
+
+    // Called when the metadata (e.g., name) for a track changes. |Track| can be
+    // modified by the callback to attach user data.
+    virtual void OnTrackUpdated(Track&) = 0;
+
+    // If the packet given to |ProcessTracePacket| contains a track event, this
+    // method is called to report the properties of that event. Note that memory
+    // pointers in |TrackEvent| will only be valid during this call.
+    virtual void OnTrackEvent(const Track&, const ParsedTrackEvent&) = 0;
+  };
+
+  // Process a single trace packet, reporting any contained track event back via
+  // the delegate interface. |SequenceState| must correspond to the sequence
+  // that was used to write the packet.
+  static void ProcessTracePacket(Delegate&,
+                                 SequenceState&,
+                                 const protos::pbzero::TracePacket_Decoder&);
+
+ private:
+  static void UpdateIncrementalState(
+      Delegate&,
+      SequenceState&,
+      const protos::pbzero::TracePacket_Decoder&);
+};
+
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_TRACING_TRACK_EVENT_STATE_TRACKER_H_
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_TRACING_CONSOLE_INTERCEPTOR_H_
+#define INCLUDE_PERFETTO_TRACING_CONSOLE_INTERCEPTOR_H_
+
+// gen_amalgamated expanded: #include "perfetto/base/compiler.h"
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/interceptor.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/track_event_state_tracker.h"
+
+#include <stdarg.h>
+
+#include <functional>
+#include <map>
+#include <vector>
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+#include <io.h>
+#else
+#include <unistd.h>
+#endif
+
+#if defined(__GNUC__) || defined(__clang__)
+#define PERFETTO_PRINTF_ATTR \
+  __attribute__((format(printf, /*format_index=*/2, /*first_to_check=*/3)))
+#else
+#define PERFETTO_PRINTF_ATTR
+#endif
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) && !defined(STDOUT_FILENO)
+#define STDOUT_FILENO 1
+#define STDERR_FILENO 2
+#endif
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+class DebugAnnotation_Decoder;
+class TracePacket_Decoder;
+class TrackEvent_Decoder;
+}  // namespace pbzero
+}  // namespace protos
+
+struct ConsoleColor;
+
+class PERFETTO_EXPORT ConsoleInterceptor
+    : public Interceptor<ConsoleInterceptor> {
+ public:
+  ~ConsoleInterceptor() override;
+
+  static void Register();
+  static void OnTracePacket(InterceptorContext context);
+
+  static void SetOutputFdForTesting(int fd);
+
+  void OnSetup(const SetupArgs&) override;
+  void OnStart(const StartArgs&) override;
+  void OnStop(const StopArgs&) override;
+
+  struct ThreadLocalState : public InterceptorBase::ThreadLocalState {
+    ThreadLocalState(ThreadLocalStateArgs&);
+    ~ThreadLocalState() override;
+
+    // Destination file. Assumed to stay valid until the program ends (i.e., is
+    // stderr or stdout).
+    int fd{};
+    bool use_colors{};
+
+    // Messages up to this length are buffered and written atomically. If a
+    // message is longer, it will be printed with multiple writes.
+    std::array<char, 1024> message_buffer{};
+    size_t buffer_pos{};
+
+    // We only support a single trace writer sequence per thread, so the
+    // sequence state is stored in TLS.
+    TrackEventStateTracker::SequenceState sequence_state;
+    uint64_t start_time_ns{};
+  };
+
+ private:
+  class Delegate;
+
+  // Appends a formatted message to |message_buffer_| or directly to the output
+  // file if the buffer is full.
+  static void Printf(InterceptorContext& context,
+                     const char* format,
+                     ...) PERFETTO_PRINTF_ATTR;
+  static void Flush(InterceptorContext& context);
+  static void SetColor(InterceptorContext& context, const ConsoleColor&);
+  static void SetColor(InterceptorContext& context, const char*);
+
+  static void PrintDebugAnnotations(InterceptorContext&,
+                                    const protos::pbzero::TrackEvent_Decoder&,
+                                    const ConsoleColor& slice_color,
+                                    const ConsoleColor& highlight_color);
+  static void PrintDebugAnnotationName(
+      InterceptorContext&,
+      const perfetto::protos::pbzero::DebugAnnotation_Decoder& annotation);
+  static void PrintDebugAnnotationValue(
+      InterceptorContext&,
+      const perfetto::protos::pbzero::DebugAnnotation_Decoder& annotation);
+
+  int fd_ = STDOUT_FILENO;
+  bool use_colors_ = true;
+
+  TrackEventStateTracker::SessionState session_state_;
+  uint64_t start_time_ns_{};
+};
+
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_TRACING_CONSOLE_INTERCEPTOR_H_
+// gen_amalgamated begin header: include/perfetto/tracing/core/data_source_config.h
+// gen_amalgamated begin header: gen/protos/perfetto/config/data_source_config.gen.h
+// gen_amalgamated begin header: include/perfetto/protozero/cpp_message_obj.h
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_PROTOZERO_CPP_MESSAGE_OBJ_H_
+#define INCLUDE_PERFETTO_PROTOZERO_CPP_MESSAGE_OBJ_H_
+
+#include <stdint.h>
+
+#include <string>
+#include <vector>
+
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace protozero {
+
+// Base class for generated .gen.h classes, which are full C++ objects that
+// support both ser and deserialization (but are not zero-copy).
+// This is only used by the "cpp" targets not the "pbzero" ones.
+class PERFETTO_EXPORT CppMessageObj {
+ public:
+  virtual ~CppMessageObj();
+  virtual std::string SerializeAsString() const = 0;
+  virtual std::vector<uint8_t> SerializeAsArray() const = 0;
+  virtual bool ParseFromArray(const void*, size_t) = 0;
+
+  bool ParseFromString(const std::string& str) {
+    return ParseFromArray(str.data(), str.size());
+  }
+};
+
+}  // namespace protozero
+
+#endif  // INCLUDE_PERFETTO_PROTOZERO_CPP_MESSAGE_OBJ_H_
+// gen_amalgamated begin header: include/perfetto/protozero/copyable_ptr.h
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_PROTOZERO_COPYABLE_PTR_H_
+#define INCLUDE_PERFETTO_PROTOZERO_COPYABLE_PTR_H_
+
+#include <memory>
+
+namespace protozero {
+
+// This class is essentially a std::vector<T> of fixed size = 1.
+// It's a pointer wrapper with deep copying and deep equality comparison.
+// At all effects this wrapper behaves like the underlying T, with the exception
+// of the heap indirection.
+// Conversely to a std::unique_ptr, the pointer will be always valid, never
+// null. The problem it solves is the following: when generating C++ classes
+// from proto files, we want to keep each header hermetic (i.e. not #include
+// headers of dependent types). As such we can't directly instantiate T
+// field members but we can instead rely on pointers, so only the .cc file needs
+// to see the actual definition of T. If the generated classes were move-only we
+// could just use a unique_ptr there. But they aren't, hence this wrapper.
+// Converesely to unique_ptr, this wrapper:
+// - Default constructs the T instance in its constructor.
+// - Implements deep comparison in operator== instead of pointer comparison.
+template <typename T>
+class CopyablePtr {
+ public:
+  CopyablePtr() : ptr_(new T()) {}
+  ~CopyablePtr() = default;
+
+  // Copy operators.
+  CopyablePtr(const CopyablePtr& other) : ptr_(new T(*other.ptr_)) {}
+  CopyablePtr& operator=(const CopyablePtr& other) {
+    *ptr_ = *other.ptr_;
+    return *this;
+  }
+
+  // Move operators.
+  CopyablePtr(CopyablePtr&& other) noexcept : ptr_(std::move(other.ptr_)) {
+    other.ptr_.reset(new T());
+  }
+
+  CopyablePtr& operator=(CopyablePtr&& other) {
+    ptr_ = std::move(other.ptr_);
+    other.ptr_.reset(new T());
+    return *this;
+  }
+
+  T* get() { return ptr_.get(); }
+  const T* get() const { return ptr_.get(); }
+
+  T* operator->() { return ptr_.get(); }
+  const T* operator->() const { return ptr_.get(); }
+
+  T& operator*() { return *ptr_; }
+  const T& operator*() const { return *ptr_; }
+
+  friend bool operator==(const CopyablePtr& lhs, const CopyablePtr& rhs) {
+    return *lhs == *rhs;
+  }
+
+  friend bool operator!=(const CopyablePtr& lhs, const CopyablePtr& rhs) {
+    // In theory the underlying type might have a special operator!=
+    // implementation which is not just !(x == y). Respect that.
+    return *lhs != *rhs;
+  }
+
+ private:
+  std::unique_ptr<T> ptr_;
+};
+
+}  // namespace protozero
+
+#endif  // INCLUDE_PERFETTO_PROTOZERO_COPYABLE_PTR_H_
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_DATA_SOURCE_CONFIG_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_DATA_SOURCE_CONFIG_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class DataSourceConfig;
+class TestConfig;
+class TestConfig_DummyFields;
+class InterceptorConfig;
+class ChromeConfig;
+enum DataSourceConfig_SessionInitiator : int;
+enum ChromeConfig_ClientPriority : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum DataSourceConfig_SessionInitiator : int {
+  DataSourceConfig_SessionInitiator_SESSION_INITIATOR_UNSPECIFIED = 0,
+  DataSourceConfig_SessionInitiator_SESSION_INITIATOR_TRUSTED_SYSTEM = 1,
+};
+
+class PERFETTO_EXPORT DataSourceConfig : public ::protozero::CppMessageObj {
+ public:
+  using SessionInitiator = DataSourceConfig_SessionInitiator;
+  static constexpr auto SESSION_INITIATOR_UNSPECIFIED = DataSourceConfig_SessionInitiator_SESSION_INITIATOR_UNSPECIFIED;
+  static constexpr auto SESSION_INITIATOR_TRUSTED_SYSTEM = DataSourceConfig_SessionInitiator_SESSION_INITIATOR_TRUSTED_SYSTEM;
+  static constexpr auto SessionInitiator_MIN = DataSourceConfig_SessionInitiator_SESSION_INITIATOR_UNSPECIFIED;
+  static constexpr auto SessionInitiator_MAX = DataSourceConfig_SessionInitiator_SESSION_INITIATOR_TRUSTED_SYSTEM;
+  enum FieldNumbers {
+    kNameFieldNumber = 1,
+    kTargetBufferFieldNumber = 2,
+    kTraceDurationMsFieldNumber = 3,
+    kStopTimeoutMsFieldNumber = 7,
+    kEnableExtraGuardrailsFieldNumber = 6,
+    kSessionInitiatorFieldNumber = 8,
+    kTracingSessionIdFieldNumber = 4,
+    kFtraceConfigFieldNumber = 100,
+    kInodeFileConfigFieldNumber = 102,
+    kProcessStatsConfigFieldNumber = 103,
+    kSysStatsConfigFieldNumber = 104,
+    kHeapprofdConfigFieldNumber = 105,
+    kJavaHprofConfigFieldNumber = 110,
+    kAndroidPowerConfigFieldNumber = 106,
+    kAndroidLogConfigFieldNumber = 107,
+    kGpuCounterConfigFieldNumber = 108,
+    kPackagesListConfigFieldNumber = 109,
+    kPerfEventConfigFieldNumber = 111,
+    kVulkanMemoryConfigFieldNumber = 112,
+    kTrackEventConfigFieldNumber = 113,
+    kAndroidPolledStateConfigFieldNumber = 114,
+    kChromeConfigFieldNumber = 101,
+    kInterceptorConfigFieldNumber = 115,
+    kLegacyConfigFieldNumber = 1000,
+    kForTestingFieldNumber = 1001,
+  };
+
+  DataSourceConfig();
+  ~DataSourceConfig() override;
+  DataSourceConfig(DataSourceConfig&&) noexcept;
+  DataSourceConfig& operator=(DataSourceConfig&&);
+  DataSourceConfig(const DataSourceConfig&);
+  DataSourceConfig& operator=(const DataSourceConfig&);
+  bool operator==(const DataSourceConfig&) const;
+  bool operator!=(const DataSourceConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_name() const { return _has_field_[1]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(1); }
+
+  bool has_target_buffer() const { return _has_field_[2]; }
+  uint32_t target_buffer() const { return target_buffer_; }
+  void set_target_buffer(uint32_t value) { target_buffer_ = value; _has_field_.set(2); }
+
+  bool has_trace_duration_ms() const { return _has_field_[3]; }
+  uint32_t trace_duration_ms() const { return trace_duration_ms_; }
+  void set_trace_duration_ms(uint32_t value) { trace_duration_ms_ = value; _has_field_.set(3); }
+
+  bool has_stop_timeout_ms() const { return _has_field_[7]; }
+  uint32_t stop_timeout_ms() const { return stop_timeout_ms_; }
+  void set_stop_timeout_ms(uint32_t value) { stop_timeout_ms_ = value; _has_field_.set(7); }
+
+  bool has_enable_extra_guardrails() const { return _has_field_[6]; }
+  bool enable_extra_guardrails() const { return enable_extra_guardrails_; }
+  void set_enable_extra_guardrails(bool value) { enable_extra_guardrails_ = value; _has_field_.set(6); }
+
+  bool has_session_initiator() const { return _has_field_[8]; }
+  DataSourceConfig_SessionInitiator session_initiator() const { return session_initiator_; }
+  void set_session_initiator(DataSourceConfig_SessionInitiator value) { session_initiator_ = value; _has_field_.set(8); }
+
+  bool has_tracing_session_id() const { return _has_field_[4]; }
+  uint64_t tracing_session_id() const { return tracing_session_id_; }
+  void set_tracing_session_id(uint64_t value) { tracing_session_id_ = value; _has_field_.set(4); }
+
+  const std::string& ftrace_config_raw() const { return ftrace_config_; }
+  void set_ftrace_config_raw(const std::string& raw) { ftrace_config_ = raw; _has_field_.set(100); }
+
+  const std::string& inode_file_config_raw() const { return inode_file_config_; }
+  void set_inode_file_config_raw(const std::string& raw) { inode_file_config_ = raw; _has_field_.set(102); }
+
+  const std::string& process_stats_config_raw() const { return process_stats_config_; }
+  void set_process_stats_config_raw(const std::string& raw) { process_stats_config_ = raw; _has_field_.set(103); }
+
+  const std::string& sys_stats_config_raw() const { return sys_stats_config_; }
+  void set_sys_stats_config_raw(const std::string& raw) { sys_stats_config_ = raw; _has_field_.set(104); }
+
+  const std::string& heapprofd_config_raw() const { return heapprofd_config_; }
+  void set_heapprofd_config_raw(const std::string& raw) { heapprofd_config_ = raw; _has_field_.set(105); }
+
+  const std::string& java_hprof_config_raw() const { return java_hprof_config_; }
+  void set_java_hprof_config_raw(const std::string& raw) { java_hprof_config_ = raw; _has_field_.set(110); }
+
+  const std::string& android_power_config_raw() const { return android_power_config_; }
+  void set_android_power_config_raw(const std::string& raw) { android_power_config_ = raw; _has_field_.set(106); }
+
+  const std::string& android_log_config_raw() const { return android_log_config_; }
+  void set_android_log_config_raw(const std::string& raw) { android_log_config_ = raw; _has_field_.set(107); }
+
+  const std::string& gpu_counter_config_raw() const { return gpu_counter_config_; }
+  void set_gpu_counter_config_raw(const std::string& raw) { gpu_counter_config_ = raw; _has_field_.set(108); }
+
+  const std::string& packages_list_config_raw() const { return packages_list_config_; }
+  void set_packages_list_config_raw(const std::string& raw) { packages_list_config_ = raw; _has_field_.set(109); }
+
+  const std::string& perf_event_config_raw() const { return perf_event_config_; }
+  void set_perf_event_config_raw(const std::string& raw) { perf_event_config_ = raw; _has_field_.set(111); }
+
+  const std::string& vulkan_memory_config_raw() const { return vulkan_memory_config_; }
+  void set_vulkan_memory_config_raw(const std::string& raw) { vulkan_memory_config_ = raw; _has_field_.set(112); }
+
+  const std::string& track_event_config_raw() const { return track_event_config_; }
+  void set_track_event_config_raw(const std::string& raw) { track_event_config_ = raw; _has_field_.set(113); }
+
+  const std::string& android_polled_state_config_raw() const { return android_polled_state_config_; }
+  void set_android_polled_state_config_raw(const std::string& raw) { android_polled_state_config_ = raw; _has_field_.set(114); }
+
+  bool has_chrome_config() const { return _has_field_[101]; }
+  const ChromeConfig& chrome_config() const { return *chrome_config_; }
+  ChromeConfig* mutable_chrome_config() { _has_field_.set(101); return chrome_config_.get(); }
+
+  bool has_interceptor_config() const { return _has_field_[115]; }
+  const InterceptorConfig& interceptor_config() const { return *interceptor_config_; }
+  InterceptorConfig* mutable_interceptor_config() { _has_field_.set(115); return interceptor_config_.get(); }
+
+  bool has_legacy_config() const { return _has_field_[1000]; }
+  const std::string& legacy_config() const { return legacy_config_; }
+  void set_legacy_config(const std::string& value) { legacy_config_ = value; _has_field_.set(1000); }
+
+  bool has_for_testing() const { return _has_field_[1001]; }
+  const TestConfig& for_testing() const { return *for_testing_; }
+  TestConfig* mutable_for_testing() { _has_field_.set(1001); return for_testing_.get(); }
+
+ private:
+  std::string name_{};
+  uint32_t target_buffer_{};
+  uint32_t trace_duration_ms_{};
+  uint32_t stop_timeout_ms_{};
+  bool enable_extra_guardrails_{};
+  DataSourceConfig_SessionInitiator session_initiator_{};
+  uint64_t tracing_session_id_{};
+  std::string ftrace_config_;  // [lazy=true]
+  std::string inode_file_config_;  // [lazy=true]
+  std::string process_stats_config_;  // [lazy=true]
+  std::string sys_stats_config_;  // [lazy=true]
+  std::string heapprofd_config_;  // [lazy=true]
+  std::string java_hprof_config_;  // [lazy=true]
+  std::string android_power_config_;  // [lazy=true]
+  std::string android_log_config_;  // [lazy=true]
+  std::string gpu_counter_config_;  // [lazy=true]
+  std::string packages_list_config_;  // [lazy=true]
+  std::string perf_event_config_;  // [lazy=true]
+  std::string vulkan_memory_config_;  // [lazy=true]
+  std::string track_event_config_;  // [lazy=true]
+  std::string android_polled_state_config_;  // [lazy=true]
+  ::protozero::CopyablePtr<ChromeConfig> chrome_config_;
+  ::protozero::CopyablePtr<InterceptorConfig> interceptor_config_;
+  std::string legacy_config_{};
+  ::protozero::CopyablePtr<TestConfig> for_testing_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<1002> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_DATA_SOURCE_CONFIG_PROTO_CPP_H_
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_TRACING_CORE_DATA_SOURCE_CONFIG_H_
+#define INCLUDE_PERFETTO_TRACING_CORE_DATA_SOURCE_CONFIG_H_
+
+// Creates the aliases in the ::perfetto namespace, doing things like:
+// using ::perfetto::Foo = ::perfetto::protos::gen::Foo.
+// See comments in forward_decls.h for the historical reasons of this
+// indirection layer.
+// gen_amalgamated expanded: #include "perfetto/tracing/core/forward_decls.h"
+
+// gen_amalgamated expanded: #include "protos/perfetto/config/data_source_config.gen.h"
+
+#endif  // INCLUDE_PERFETTO_TRACING_CORE_DATA_SOURCE_CONFIG_H_
+// gen_amalgamated begin header: include/perfetto/tracing/core/data_source_descriptor.h
+// gen_amalgamated begin header: gen/protos/perfetto/common/data_source_descriptor.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_DATA_SOURCE_DESCRIPTOR_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_DATA_SOURCE_DESCRIPTOR_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class DataSourceDescriptor;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT DataSourceDescriptor : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kNameFieldNumber = 1,
+    kWillNotifyOnStopFieldNumber = 2,
+    kWillNotifyOnStartFieldNumber = 3,
+    kHandlesIncrementalStateClearFieldNumber = 4,
+    kGpuCounterDescriptorFieldNumber = 5,
+    kTrackEventDescriptorFieldNumber = 6,
+  };
+
+  DataSourceDescriptor();
+  ~DataSourceDescriptor() override;
+  DataSourceDescriptor(DataSourceDescriptor&&) noexcept;
+  DataSourceDescriptor& operator=(DataSourceDescriptor&&);
+  DataSourceDescriptor(const DataSourceDescriptor&);
+  DataSourceDescriptor& operator=(const DataSourceDescriptor&);
+  bool operator==(const DataSourceDescriptor&) const;
+  bool operator!=(const DataSourceDescriptor& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_name() const { return _has_field_[1]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(1); }
+
+  bool has_will_notify_on_stop() const { return _has_field_[2]; }
+  bool will_notify_on_stop() const { return will_notify_on_stop_; }
+  void set_will_notify_on_stop(bool value) { will_notify_on_stop_ = value; _has_field_.set(2); }
+
+  bool has_will_notify_on_start() const { return _has_field_[3]; }
+  bool will_notify_on_start() const { return will_notify_on_start_; }
+  void set_will_notify_on_start(bool value) { will_notify_on_start_ = value; _has_field_.set(3); }
+
+  bool has_handles_incremental_state_clear() const { return _has_field_[4]; }
+  bool handles_incremental_state_clear() const { return handles_incremental_state_clear_; }
+  void set_handles_incremental_state_clear(bool value) { handles_incremental_state_clear_ = value; _has_field_.set(4); }
+
+  const std::string& gpu_counter_descriptor_raw() const { return gpu_counter_descriptor_; }
+  void set_gpu_counter_descriptor_raw(const std::string& raw) { gpu_counter_descriptor_ = raw; _has_field_.set(5); }
+
+  const std::string& track_event_descriptor_raw() const { return track_event_descriptor_; }
+  void set_track_event_descriptor_raw(const std::string& raw) { track_event_descriptor_ = raw; _has_field_.set(6); }
+
+ private:
+  std::string name_{};
+  bool will_notify_on_stop_{};
+  bool will_notify_on_start_{};
+  bool handles_incremental_state_clear_{};
+  std::string gpu_counter_descriptor_;  // [lazy=true]
+  std::string track_event_descriptor_;  // [lazy=true]
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<7> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_DATA_SOURCE_DESCRIPTOR_PROTO_CPP_H_
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_TRACING_CORE_DATA_SOURCE_DESCRIPTOR_H_
+#define INCLUDE_PERFETTO_TRACING_CORE_DATA_SOURCE_DESCRIPTOR_H_
+
+// Creates the aliases in the ::perfetto namespace, doing things like:
+// using ::perfetto::Foo = ::perfetto::protos::gen::Foo.
+// See comments in forward_decls.h for the historical reasons of this
+// indirection layer.
+// gen_amalgamated expanded: #include "perfetto/tracing/core/forward_decls.h"
+
+// gen_amalgamated expanded: #include "protos/perfetto/common/data_source_descriptor.gen.h"
+
+#endif  // INCLUDE_PERFETTO_TRACING_CORE_DATA_SOURCE_DESCRIPTOR_H_
+// gen_amalgamated begin header: include/perfetto/tracing/core/trace_config.h
+// gen_amalgamated begin header: gen/protos/perfetto/config/trace_config.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_TRACE_CONFIG_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_TRACE_CONFIG_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class TraceConfig;
+class TraceConfig_TraceFilter;
+class TraceConfig_IncidentReportConfig;
+class TraceConfig_IncrementalStateConfig;
+class TraceConfig_TriggerConfig;
+class TraceConfig_TriggerConfig_Trigger;
+class TraceConfig_GuardrailOverrides;
+class TraceConfig_StatsdMetadata;
+class TraceConfig_ProducerConfig;
+class TraceConfig_BuiltinDataSource;
+class TraceConfig_DataSource;
+class DataSourceConfig;
+class TestConfig;
+class TestConfig_DummyFields;
+class InterceptorConfig;
+class ChromeConfig;
+class TraceConfig_BufferConfig;
+enum TraceConfig_LockdownModeOperation : int;
+enum TraceConfig_CompressionType : int;
+enum TraceConfig_StatsdLogging : int;
+enum TraceConfig_TriggerConfig_TriggerMode : int;
+enum BuiltinClock : int;
+enum DataSourceConfig_SessionInitiator : int;
+enum ChromeConfig_ClientPriority : int;
+enum TraceConfig_BufferConfig_FillPolicy : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum TraceConfig_LockdownModeOperation : int {
+  TraceConfig_LockdownModeOperation_LOCKDOWN_UNCHANGED = 0,
+  TraceConfig_LockdownModeOperation_LOCKDOWN_CLEAR = 1,
+  TraceConfig_LockdownModeOperation_LOCKDOWN_SET = 2,
+};
+enum TraceConfig_CompressionType : int {
+  TraceConfig_CompressionType_COMPRESSION_TYPE_UNSPECIFIED = 0,
+  TraceConfig_CompressionType_COMPRESSION_TYPE_DEFLATE = 1,
+};
+enum TraceConfig_StatsdLogging : int {
+  TraceConfig_StatsdLogging_STATSD_LOGGING_UNSPECIFIED = 0,
+  TraceConfig_StatsdLogging_STATSD_LOGGING_ENABLED = 1,
+  TraceConfig_StatsdLogging_STATSD_LOGGING_DISABLED = 2,
+};
+enum TraceConfig_TriggerConfig_TriggerMode : int {
+  TraceConfig_TriggerConfig_TriggerMode_UNSPECIFIED = 0,
+  TraceConfig_TriggerConfig_TriggerMode_START_TRACING = 1,
+  TraceConfig_TriggerConfig_TriggerMode_STOP_TRACING = 2,
+};
+enum TraceConfig_BufferConfig_FillPolicy : int {
+  TraceConfig_BufferConfig_FillPolicy_UNSPECIFIED = 0,
+  TraceConfig_BufferConfig_FillPolicy_RING_BUFFER = 1,
+  TraceConfig_BufferConfig_FillPolicy_DISCARD = 2,
+};
+
+class PERFETTO_EXPORT TraceConfig : public ::protozero::CppMessageObj {
+ public:
+  using BufferConfig = TraceConfig_BufferConfig;
+  using DataSource = TraceConfig_DataSource;
+  using BuiltinDataSource = TraceConfig_BuiltinDataSource;
+  using ProducerConfig = TraceConfig_ProducerConfig;
+  using StatsdMetadata = TraceConfig_StatsdMetadata;
+  using GuardrailOverrides = TraceConfig_GuardrailOverrides;
+  using TriggerConfig = TraceConfig_TriggerConfig;
+  using IncrementalStateConfig = TraceConfig_IncrementalStateConfig;
+  using IncidentReportConfig = TraceConfig_IncidentReportConfig;
+  using TraceFilter = TraceConfig_TraceFilter;
+  using LockdownModeOperation = TraceConfig_LockdownModeOperation;
+  static constexpr auto LOCKDOWN_UNCHANGED = TraceConfig_LockdownModeOperation_LOCKDOWN_UNCHANGED;
+  static constexpr auto LOCKDOWN_CLEAR = TraceConfig_LockdownModeOperation_LOCKDOWN_CLEAR;
+  static constexpr auto LOCKDOWN_SET = TraceConfig_LockdownModeOperation_LOCKDOWN_SET;
+  static constexpr auto LockdownModeOperation_MIN = TraceConfig_LockdownModeOperation_LOCKDOWN_UNCHANGED;
+  static constexpr auto LockdownModeOperation_MAX = TraceConfig_LockdownModeOperation_LOCKDOWN_SET;
+  using CompressionType = TraceConfig_CompressionType;
+  static constexpr auto COMPRESSION_TYPE_UNSPECIFIED = TraceConfig_CompressionType_COMPRESSION_TYPE_UNSPECIFIED;
+  static constexpr auto COMPRESSION_TYPE_DEFLATE = TraceConfig_CompressionType_COMPRESSION_TYPE_DEFLATE;
+  static constexpr auto CompressionType_MIN = TraceConfig_CompressionType_COMPRESSION_TYPE_UNSPECIFIED;
+  static constexpr auto CompressionType_MAX = TraceConfig_CompressionType_COMPRESSION_TYPE_DEFLATE;
+  using StatsdLogging = TraceConfig_StatsdLogging;
+  static constexpr auto STATSD_LOGGING_UNSPECIFIED = TraceConfig_StatsdLogging_STATSD_LOGGING_UNSPECIFIED;
+  static constexpr auto STATSD_LOGGING_ENABLED = TraceConfig_StatsdLogging_STATSD_LOGGING_ENABLED;
+  static constexpr auto STATSD_LOGGING_DISABLED = TraceConfig_StatsdLogging_STATSD_LOGGING_DISABLED;
+  static constexpr auto StatsdLogging_MIN = TraceConfig_StatsdLogging_STATSD_LOGGING_UNSPECIFIED;
+  static constexpr auto StatsdLogging_MAX = TraceConfig_StatsdLogging_STATSD_LOGGING_DISABLED;
+  enum FieldNumbers {
+    kBuffersFieldNumber = 1,
+    kDataSourcesFieldNumber = 2,
+    kBuiltinDataSourcesFieldNumber = 20,
+    kDurationMsFieldNumber = 3,
+    kEnableExtraGuardrailsFieldNumber = 4,
+    kLockdownModeFieldNumber = 5,
+    kProducersFieldNumber = 6,
+    kStatsdMetadataFieldNumber = 7,
+    kWriteIntoFileFieldNumber = 8,
+    kOutputPathFieldNumber = 29,
+    kFileWritePeriodMsFieldNumber = 9,
+    kMaxFileSizeBytesFieldNumber = 10,
+    kGuardrailOverridesFieldNumber = 11,
+    kDeferredStartFieldNumber = 12,
+    kFlushPeriodMsFieldNumber = 13,
+    kFlushTimeoutMsFieldNumber = 14,
+    kDataSourceStopTimeoutMsFieldNumber = 23,
+    kNotifyTraceurFieldNumber = 16,
+    kBugreportScoreFieldNumber = 30,
+    kTriggerConfigFieldNumber = 17,
+    kActivateTriggersFieldNumber = 18,
+    kIncrementalStateConfigFieldNumber = 21,
+    kAllowUserBuildTracingFieldNumber = 19,
+    kUniqueSessionNameFieldNumber = 22,
+    kCompressionTypeFieldNumber = 24,
+    kIncidentReportConfigFieldNumber = 25,
+    kStatsdLoggingFieldNumber = 31,
+    kTraceUuidMsbFieldNumber = 27,
+    kTraceUuidLsbFieldNumber = 28,
+    kTraceFilterFieldNumber = 32,
+  };
+
+  TraceConfig();
+  ~TraceConfig() override;
+  TraceConfig(TraceConfig&&) noexcept;
+  TraceConfig& operator=(TraceConfig&&);
+  TraceConfig(const TraceConfig&);
+  TraceConfig& operator=(const TraceConfig&);
+  bool operator==(const TraceConfig&) const;
+  bool operator!=(const TraceConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  const std::vector<TraceConfig_BufferConfig>& buffers() const { return buffers_; }
+  std::vector<TraceConfig_BufferConfig>* mutable_buffers() { return &buffers_; }
+  int buffers_size() const;
+  void clear_buffers();
+  TraceConfig_BufferConfig* add_buffers();
+
+  const std::vector<TraceConfig_DataSource>& data_sources() const { return data_sources_; }
+  std::vector<TraceConfig_DataSource>* mutable_data_sources() { return &data_sources_; }
+  int data_sources_size() const;
+  void clear_data_sources();
+  TraceConfig_DataSource* add_data_sources();
+
+  bool has_builtin_data_sources() const { return _has_field_[20]; }
+  const TraceConfig_BuiltinDataSource& builtin_data_sources() const { return *builtin_data_sources_; }
+  TraceConfig_BuiltinDataSource* mutable_builtin_data_sources() { _has_field_.set(20); return builtin_data_sources_.get(); }
+
+  bool has_duration_ms() const { return _has_field_[3]; }
+  uint32_t duration_ms() const { return duration_ms_; }
+  void set_duration_ms(uint32_t value) { duration_ms_ = value; _has_field_.set(3); }
+
+  bool has_enable_extra_guardrails() const { return _has_field_[4]; }
+  bool enable_extra_guardrails() const { return enable_extra_guardrails_; }
+  void set_enable_extra_guardrails(bool value) { enable_extra_guardrails_ = value; _has_field_.set(4); }
+
+  bool has_lockdown_mode() const { return _has_field_[5]; }
+  TraceConfig_LockdownModeOperation lockdown_mode() const { return lockdown_mode_; }
+  void set_lockdown_mode(TraceConfig_LockdownModeOperation value) { lockdown_mode_ = value; _has_field_.set(5); }
+
+  const std::vector<TraceConfig_ProducerConfig>& producers() const { return producers_; }
+  std::vector<TraceConfig_ProducerConfig>* mutable_producers() { return &producers_; }
+  int producers_size() const;
+  void clear_producers();
+  TraceConfig_ProducerConfig* add_producers();
+
+  bool has_statsd_metadata() const { return _has_field_[7]; }
+  const TraceConfig_StatsdMetadata& statsd_metadata() const { return *statsd_metadata_; }
+  TraceConfig_StatsdMetadata* mutable_statsd_metadata() { _has_field_.set(7); return statsd_metadata_.get(); }
+
+  bool has_write_into_file() const { return _has_field_[8]; }
+  bool write_into_file() const { return write_into_file_; }
+  void set_write_into_file(bool value) { write_into_file_ = value; _has_field_.set(8); }
+
+  bool has_output_path() const { return _has_field_[29]; }
+  const std::string& output_path() const { return output_path_; }
+  void set_output_path(const std::string& value) { output_path_ = value; _has_field_.set(29); }
+
+  bool has_file_write_period_ms() const { return _has_field_[9]; }
+  uint32_t file_write_period_ms() const { return file_write_period_ms_; }
+  void set_file_write_period_ms(uint32_t value) { file_write_period_ms_ = value; _has_field_.set(9); }
+
+  bool has_max_file_size_bytes() const { return _has_field_[10]; }
+  uint64_t max_file_size_bytes() const { return max_file_size_bytes_; }
+  void set_max_file_size_bytes(uint64_t value) { max_file_size_bytes_ = value; _has_field_.set(10); }
+
+  bool has_guardrail_overrides() const { return _has_field_[11]; }
+  const TraceConfig_GuardrailOverrides& guardrail_overrides() const { return *guardrail_overrides_; }
+  TraceConfig_GuardrailOverrides* mutable_guardrail_overrides() { _has_field_.set(11); return guardrail_overrides_.get(); }
+
+  bool has_deferred_start() const { return _has_field_[12]; }
+  bool deferred_start() const { return deferred_start_; }
+  void set_deferred_start(bool value) { deferred_start_ = value; _has_field_.set(12); }
+
+  bool has_flush_period_ms() const { return _has_field_[13]; }
+  uint32_t flush_period_ms() const { return flush_period_ms_; }
+  void set_flush_period_ms(uint32_t value) { flush_period_ms_ = value; _has_field_.set(13); }
+
+  bool has_flush_timeout_ms() const { return _has_field_[14]; }
+  uint32_t flush_timeout_ms() const { return flush_timeout_ms_; }
+  void set_flush_timeout_ms(uint32_t value) { flush_timeout_ms_ = value; _has_field_.set(14); }
+
+  bool has_data_source_stop_timeout_ms() const { return _has_field_[23]; }
+  uint32_t data_source_stop_timeout_ms() const { return data_source_stop_timeout_ms_; }
+  void set_data_source_stop_timeout_ms(uint32_t value) { data_source_stop_timeout_ms_ = value; _has_field_.set(23); }
+
+  bool has_notify_traceur() const { return _has_field_[16]; }
+  bool notify_traceur() const { return notify_traceur_; }
+  void set_notify_traceur(bool value) { notify_traceur_ = value; _has_field_.set(16); }
+
+  bool has_bugreport_score() const { return _has_field_[30]; }
+  int32_t bugreport_score() const { return bugreport_score_; }
+  void set_bugreport_score(int32_t value) { bugreport_score_ = value; _has_field_.set(30); }
+
+  bool has_trigger_config() const { return _has_field_[17]; }
+  const TraceConfig_TriggerConfig& trigger_config() const { return *trigger_config_; }
+  TraceConfig_TriggerConfig* mutable_trigger_config() { _has_field_.set(17); return trigger_config_.get(); }
+
+  const std::vector<std::string>& activate_triggers() const { return activate_triggers_; }
+  std::vector<std::string>* mutable_activate_triggers() { return &activate_triggers_; }
+  int activate_triggers_size() const { return static_cast<int>(activate_triggers_.size()); }
+  void clear_activate_triggers() { activate_triggers_.clear(); }
+  void add_activate_triggers(std::string value) { activate_triggers_.emplace_back(value); }
+  std::string* add_activate_triggers() { activate_triggers_.emplace_back(); return &activate_triggers_.back(); }
+
+  bool has_incremental_state_config() const { return _has_field_[21]; }
+  const TraceConfig_IncrementalStateConfig& incremental_state_config() const { return *incremental_state_config_; }
+  TraceConfig_IncrementalStateConfig* mutable_incremental_state_config() { _has_field_.set(21); return incremental_state_config_.get(); }
+
+  bool has_allow_user_build_tracing() const { return _has_field_[19]; }
+  bool allow_user_build_tracing() const { return allow_user_build_tracing_; }
+  void set_allow_user_build_tracing(bool value) { allow_user_build_tracing_ = value; _has_field_.set(19); }
+
+  bool has_unique_session_name() const { return _has_field_[22]; }
+  const std::string& unique_session_name() const { return unique_session_name_; }
+  void set_unique_session_name(const std::string& value) { unique_session_name_ = value; _has_field_.set(22); }
+
+  bool has_compression_type() const { return _has_field_[24]; }
+  TraceConfig_CompressionType compression_type() const { return compression_type_; }
+  void set_compression_type(TraceConfig_CompressionType value) { compression_type_ = value; _has_field_.set(24); }
+
+  bool has_incident_report_config() const { return _has_field_[25]; }
+  const TraceConfig_IncidentReportConfig& incident_report_config() const { return *incident_report_config_; }
+  TraceConfig_IncidentReportConfig* mutable_incident_report_config() { _has_field_.set(25); return incident_report_config_.get(); }
+
+  bool has_statsd_logging() const { return _has_field_[31]; }
+  TraceConfig_StatsdLogging statsd_logging() const { return statsd_logging_; }
+  void set_statsd_logging(TraceConfig_StatsdLogging value) { statsd_logging_ = value; _has_field_.set(31); }
+
+  bool has_trace_uuid_msb() const { return _has_field_[27]; }
+  int64_t trace_uuid_msb() const { return trace_uuid_msb_; }
+  void set_trace_uuid_msb(int64_t value) { trace_uuid_msb_ = value; _has_field_.set(27); }
+
+  bool has_trace_uuid_lsb() const { return _has_field_[28]; }
+  int64_t trace_uuid_lsb() const { return trace_uuid_lsb_; }
+  void set_trace_uuid_lsb(int64_t value) { trace_uuid_lsb_ = value; _has_field_.set(28); }
+
+  bool has_trace_filter() const { return _has_field_[32]; }
+  const TraceConfig_TraceFilter& trace_filter() const { return *trace_filter_; }
+  TraceConfig_TraceFilter* mutable_trace_filter() { _has_field_.set(32); return trace_filter_.get(); }
+
+ private:
+  std::vector<TraceConfig_BufferConfig> buffers_;
+  std::vector<TraceConfig_DataSource> data_sources_;
+  ::protozero::CopyablePtr<TraceConfig_BuiltinDataSource> builtin_data_sources_;
+  uint32_t duration_ms_{};
+  bool enable_extra_guardrails_{};
+  TraceConfig_LockdownModeOperation lockdown_mode_{};
+  std::vector<TraceConfig_ProducerConfig> producers_;
+  ::protozero::CopyablePtr<TraceConfig_StatsdMetadata> statsd_metadata_;
+  bool write_into_file_{};
+  std::string output_path_{};
+  uint32_t file_write_period_ms_{};
+  uint64_t max_file_size_bytes_{};
+  ::protozero::CopyablePtr<TraceConfig_GuardrailOverrides> guardrail_overrides_;
+  bool deferred_start_{};
+  uint32_t flush_period_ms_{};
+  uint32_t flush_timeout_ms_{};
+  uint32_t data_source_stop_timeout_ms_{};
+  bool notify_traceur_{};
+  int32_t bugreport_score_{};
+  ::protozero::CopyablePtr<TraceConfig_TriggerConfig> trigger_config_;
+  std::vector<std::string> activate_triggers_;
+  ::protozero::CopyablePtr<TraceConfig_IncrementalStateConfig> incremental_state_config_;
+  bool allow_user_build_tracing_{};
+  std::string unique_session_name_{};
+  TraceConfig_CompressionType compression_type_{};
+  ::protozero::CopyablePtr<TraceConfig_IncidentReportConfig> incident_report_config_;
+  TraceConfig_StatsdLogging statsd_logging_{};
+  int64_t trace_uuid_msb_{};
+  int64_t trace_uuid_lsb_{};
+  ::protozero::CopyablePtr<TraceConfig_TraceFilter> trace_filter_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<33> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT TraceConfig_TraceFilter : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kBytecodeFieldNumber = 1,
+  };
+
+  TraceConfig_TraceFilter();
+  ~TraceConfig_TraceFilter() override;
+  TraceConfig_TraceFilter(TraceConfig_TraceFilter&&) noexcept;
+  TraceConfig_TraceFilter& operator=(TraceConfig_TraceFilter&&);
+  TraceConfig_TraceFilter(const TraceConfig_TraceFilter&);
+  TraceConfig_TraceFilter& operator=(const TraceConfig_TraceFilter&);
+  bool operator==(const TraceConfig_TraceFilter&) const;
+  bool operator!=(const TraceConfig_TraceFilter& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_bytecode() const { return _has_field_[1]; }
+  const std::string& bytecode() const { return bytecode_; }
+  void set_bytecode(const std::string& value) { bytecode_ = value; _has_field_.set(1); }
+  void set_bytecode(const void* p, size_t s) { bytecode_.assign(reinterpret_cast<const char*>(p), s); _has_field_.set(1); }
+
+ private:
+  std::string bytecode_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT TraceConfig_IncidentReportConfig : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kDestinationPackageFieldNumber = 1,
+    kDestinationClassFieldNumber = 2,
+    kPrivacyLevelFieldNumber = 3,
+    kSkipIncidentdFieldNumber = 5,
+    kSkipDropboxFieldNumber = 4,
+  };
+
+  TraceConfig_IncidentReportConfig();
+  ~TraceConfig_IncidentReportConfig() override;
+  TraceConfig_IncidentReportConfig(TraceConfig_IncidentReportConfig&&) noexcept;
+  TraceConfig_IncidentReportConfig& operator=(TraceConfig_IncidentReportConfig&&);
+  TraceConfig_IncidentReportConfig(const TraceConfig_IncidentReportConfig&);
+  TraceConfig_IncidentReportConfig& operator=(const TraceConfig_IncidentReportConfig&);
+  bool operator==(const TraceConfig_IncidentReportConfig&) const;
+  bool operator!=(const TraceConfig_IncidentReportConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_destination_package() const { return _has_field_[1]; }
+  const std::string& destination_package() const { return destination_package_; }
+  void set_destination_package(const std::string& value) { destination_package_ = value; _has_field_.set(1); }
+
+  bool has_destination_class() const { return _has_field_[2]; }
+  const std::string& destination_class() const { return destination_class_; }
+  void set_destination_class(const std::string& value) { destination_class_ = value; _has_field_.set(2); }
+
+  bool has_privacy_level() const { return _has_field_[3]; }
+  int32_t privacy_level() const { return privacy_level_; }
+  void set_privacy_level(int32_t value) { privacy_level_ = value; _has_field_.set(3); }
+
+  bool has_skip_incidentd() const { return _has_field_[5]; }
+  bool skip_incidentd() const { return skip_incidentd_; }
+  void set_skip_incidentd(bool value) { skip_incidentd_ = value; _has_field_.set(5); }
+
+  bool has_skip_dropbox() const { return _has_field_[4]; }
+  bool skip_dropbox() const { return skip_dropbox_; }
+  void set_skip_dropbox(bool value) { skip_dropbox_ = value; _has_field_.set(4); }
+
+ private:
+  std::string destination_package_{};
+  std::string destination_class_{};
+  int32_t privacy_level_{};
+  bool skip_incidentd_{};
+  bool skip_dropbox_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<6> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT TraceConfig_IncrementalStateConfig : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kClearPeriodMsFieldNumber = 1,
+  };
+
+  TraceConfig_IncrementalStateConfig();
+  ~TraceConfig_IncrementalStateConfig() override;
+  TraceConfig_IncrementalStateConfig(TraceConfig_IncrementalStateConfig&&) noexcept;
+  TraceConfig_IncrementalStateConfig& operator=(TraceConfig_IncrementalStateConfig&&);
+  TraceConfig_IncrementalStateConfig(const TraceConfig_IncrementalStateConfig&);
+  TraceConfig_IncrementalStateConfig& operator=(const TraceConfig_IncrementalStateConfig&);
+  bool operator==(const TraceConfig_IncrementalStateConfig&) const;
+  bool operator!=(const TraceConfig_IncrementalStateConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_clear_period_ms() const { return _has_field_[1]; }
+  uint32_t clear_period_ms() const { return clear_period_ms_; }
+  void set_clear_period_ms(uint32_t value) { clear_period_ms_ = value; _has_field_.set(1); }
+
+ private:
+  uint32_t clear_period_ms_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT TraceConfig_TriggerConfig : public ::protozero::CppMessageObj {
+ public:
+  using Trigger = TraceConfig_TriggerConfig_Trigger;
+  using TriggerMode = TraceConfig_TriggerConfig_TriggerMode;
+  static constexpr auto UNSPECIFIED = TraceConfig_TriggerConfig_TriggerMode_UNSPECIFIED;
+  static constexpr auto START_TRACING = TraceConfig_TriggerConfig_TriggerMode_START_TRACING;
+  static constexpr auto STOP_TRACING = TraceConfig_TriggerConfig_TriggerMode_STOP_TRACING;
+  static constexpr auto TriggerMode_MIN = TraceConfig_TriggerConfig_TriggerMode_UNSPECIFIED;
+  static constexpr auto TriggerMode_MAX = TraceConfig_TriggerConfig_TriggerMode_STOP_TRACING;
+  enum FieldNumbers {
+    kTriggerModeFieldNumber = 1,
+    kTriggersFieldNumber = 2,
+    kTriggerTimeoutMsFieldNumber = 3,
+  };
+
+  TraceConfig_TriggerConfig();
+  ~TraceConfig_TriggerConfig() override;
+  TraceConfig_TriggerConfig(TraceConfig_TriggerConfig&&) noexcept;
+  TraceConfig_TriggerConfig& operator=(TraceConfig_TriggerConfig&&);
+  TraceConfig_TriggerConfig(const TraceConfig_TriggerConfig&);
+  TraceConfig_TriggerConfig& operator=(const TraceConfig_TriggerConfig&);
+  bool operator==(const TraceConfig_TriggerConfig&) const;
+  bool operator!=(const TraceConfig_TriggerConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_trigger_mode() const { return _has_field_[1]; }
+  TraceConfig_TriggerConfig_TriggerMode trigger_mode() const { return trigger_mode_; }
+  void set_trigger_mode(TraceConfig_TriggerConfig_TriggerMode value) { trigger_mode_ = value; _has_field_.set(1); }
+
+  const std::vector<TraceConfig_TriggerConfig_Trigger>& triggers() const { return triggers_; }
+  std::vector<TraceConfig_TriggerConfig_Trigger>* mutable_triggers() { return &triggers_; }
+  int triggers_size() const;
+  void clear_triggers();
+  TraceConfig_TriggerConfig_Trigger* add_triggers();
+
+  bool has_trigger_timeout_ms() const { return _has_field_[3]; }
+  uint32_t trigger_timeout_ms() const { return trigger_timeout_ms_; }
+  void set_trigger_timeout_ms(uint32_t value) { trigger_timeout_ms_ = value; _has_field_.set(3); }
+
+ private:
+  TraceConfig_TriggerConfig_TriggerMode trigger_mode_{};
+  std::vector<TraceConfig_TriggerConfig_Trigger> triggers_;
+  uint32_t trigger_timeout_ms_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<4> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT TraceConfig_TriggerConfig_Trigger : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kNameFieldNumber = 1,
+    kProducerNameRegexFieldNumber = 2,
+    kStopDelayMsFieldNumber = 3,
+    kMaxPer24HFieldNumber = 4,
+    kSkipProbabilityFieldNumber = 5,
+  };
+
+  TraceConfig_TriggerConfig_Trigger();
+  ~TraceConfig_TriggerConfig_Trigger() override;
+  TraceConfig_TriggerConfig_Trigger(TraceConfig_TriggerConfig_Trigger&&) noexcept;
+  TraceConfig_TriggerConfig_Trigger& operator=(TraceConfig_TriggerConfig_Trigger&&);
+  TraceConfig_TriggerConfig_Trigger(const TraceConfig_TriggerConfig_Trigger&);
+  TraceConfig_TriggerConfig_Trigger& operator=(const TraceConfig_TriggerConfig_Trigger&);
+  bool operator==(const TraceConfig_TriggerConfig_Trigger&) const;
+  bool operator!=(const TraceConfig_TriggerConfig_Trigger& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_name() const { return _has_field_[1]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(1); }
+
+  bool has_producer_name_regex() const { return _has_field_[2]; }
+  const std::string& producer_name_regex() const { return producer_name_regex_; }
+  void set_producer_name_regex(const std::string& value) { producer_name_regex_ = value; _has_field_.set(2); }
+
+  bool has_stop_delay_ms() const { return _has_field_[3]; }
+  uint32_t stop_delay_ms() const { return stop_delay_ms_; }
+  void set_stop_delay_ms(uint32_t value) { stop_delay_ms_ = value; _has_field_.set(3); }
+
+  bool has_max_per_24_h() const { return _has_field_[4]; }
+  uint32_t max_per_24_h() const { return max_per_24_h_; }
+  void set_max_per_24_h(uint32_t value) { max_per_24_h_ = value; _has_field_.set(4); }
+
+  bool has_skip_probability() const { return _has_field_[5]; }
+  double skip_probability() const { return skip_probability_; }
+  void set_skip_probability(double value) { skip_probability_ = value; _has_field_.set(5); }
+
+ private:
+  std::string name_{};
+  std::string producer_name_regex_{};
+  uint32_t stop_delay_ms_{};
+  uint32_t max_per_24_h_{};
+  double skip_probability_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<6> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT TraceConfig_GuardrailOverrides : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kMaxUploadPerDayBytesFieldNumber = 1,
+  };
+
+  TraceConfig_GuardrailOverrides();
+  ~TraceConfig_GuardrailOverrides() override;
+  TraceConfig_GuardrailOverrides(TraceConfig_GuardrailOverrides&&) noexcept;
+  TraceConfig_GuardrailOverrides& operator=(TraceConfig_GuardrailOverrides&&);
+  TraceConfig_GuardrailOverrides(const TraceConfig_GuardrailOverrides&);
+  TraceConfig_GuardrailOverrides& operator=(const TraceConfig_GuardrailOverrides&);
+  bool operator==(const TraceConfig_GuardrailOverrides&) const;
+  bool operator!=(const TraceConfig_GuardrailOverrides& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_max_upload_per_day_bytes() const { return _has_field_[1]; }
+  uint64_t max_upload_per_day_bytes() const { return max_upload_per_day_bytes_; }
+  void set_max_upload_per_day_bytes(uint64_t value) { max_upload_per_day_bytes_ = value; _has_field_.set(1); }
+
+ private:
+  uint64_t max_upload_per_day_bytes_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT TraceConfig_StatsdMetadata : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kTriggeringAlertIdFieldNumber = 1,
+    kTriggeringConfigUidFieldNumber = 2,
+    kTriggeringConfigIdFieldNumber = 3,
+    kTriggeringSubscriptionIdFieldNumber = 4,
+  };
+
+  TraceConfig_StatsdMetadata();
+  ~TraceConfig_StatsdMetadata() override;
+  TraceConfig_StatsdMetadata(TraceConfig_StatsdMetadata&&) noexcept;
+  TraceConfig_StatsdMetadata& operator=(TraceConfig_StatsdMetadata&&);
+  TraceConfig_StatsdMetadata(const TraceConfig_StatsdMetadata&);
+  TraceConfig_StatsdMetadata& operator=(const TraceConfig_StatsdMetadata&);
+  bool operator==(const TraceConfig_StatsdMetadata&) const;
+  bool operator!=(const TraceConfig_StatsdMetadata& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_triggering_alert_id() const { return _has_field_[1]; }
+  int64_t triggering_alert_id() const { return triggering_alert_id_; }
+  void set_triggering_alert_id(int64_t value) { triggering_alert_id_ = value; _has_field_.set(1); }
+
+  bool has_triggering_config_uid() const { return _has_field_[2]; }
+  int32_t triggering_config_uid() const { return triggering_config_uid_; }
+  void set_triggering_config_uid(int32_t value) { triggering_config_uid_ = value; _has_field_.set(2); }
+
+  bool has_triggering_config_id() const { return _has_field_[3]; }
+  int64_t triggering_config_id() const { return triggering_config_id_; }
+  void set_triggering_config_id(int64_t value) { triggering_config_id_ = value; _has_field_.set(3); }
+
+  bool has_triggering_subscription_id() const { return _has_field_[4]; }
+  int64_t triggering_subscription_id() const { return triggering_subscription_id_; }
+  void set_triggering_subscription_id(int64_t value) { triggering_subscription_id_ = value; _has_field_.set(4); }
+
+ private:
+  int64_t triggering_alert_id_{};
+  int32_t triggering_config_uid_{};
+  int64_t triggering_config_id_{};
+  int64_t triggering_subscription_id_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<5> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT TraceConfig_ProducerConfig : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kProducerNameFieldNumber = 1,
+    kShmSizeKbFieldNumber = 2,
+    kPageSizeKbFieldNumber = 3,
+  };
+
+  TraceConfig_ProducerConfig();
+  ~TraceConfig_ProducerConfig() override;
+  TraceConfig_ProducerConfig(TraceConfig_ProducerConfig&&) noexcept;
+  TraceConfig_ProducerConfig& operator=(TraceConfig_ProducerConfig&&);
+  TraceConfig_ProducerConfig(const TraceConfig_ProducerConfig&);
+  TraceConfig_ProducerConfig& operator=(const TraceConfig_ProducerConfig&);
+  bool operator==(const TraceConfig_ProducerConfig&) const;
+  bool operator!=(const TraceConfig_ProducerConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_producer_name() const { return _has_field_[1]; }
+  const std::string& producer_name() const { return producer_name_; }
+  void set_producer_name(const std::string& value) { producer_name_ = value; _has_field_.set(1); }
+
+  bool has_shm_size_kb() const { return _has_field_[2]; }
+  uint32_t shm_size_kb() const { return shm_size_kb_; }
+  void set_shm_size_kb(uint32_t value) { shm_size_kb_ = value; _has_field_.set(2); }
+
+  bool has_page_size_kb() const { return _has_field_[3]; }
+  uint32_t page_size_kb() const { return page_size_kb_; }
+  void set_page_size_kb(uint32_t value) { page_size_kb_ = value; _has_field_.set(3); }
+
+ private:
+  std::string producer_name_{};
+  uint32_t shm_size_kb_{};
+  uint32_t page_size_kb_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<4> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT TraceConfig_BuiltinDataSource : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kDisableClockSnapshottingFieldNumber = 1,
+    kDisableTraceConfigFieldNumber = 2,
+    kDisableSystemInfoFieldNumber = 3,
+    kDisableServiceEventsFieldNumber = 4,
+    kPrimaryTraceClockFieldNumber = 5,
+    kSnapshotIntervalMsFieldNumber = 6,
+    kPreferSuspendClockForSnapshotFieldNumber = 7,
+  };
+
+  TraceConfig_BuiltinDataSource();
+  ~TraceConfig_BuiltinDataSource() override;
+  TraceConfig_BuiltinDataSource(TraceConfig_BuiltinDataSource&&) noexcept;
+  TraceConfig_BuiltinDataSource& operator=(TraceConfig_BuiltinDataSource&&);
+  TraceConfig_BuiltinDataSource(const TraceConfig_BuiltinDataSource&);
+  TraceConfig_BuiltinDataSource& operator=(const TraceConfig_BuiltinDataSource&);
+  bool operator==(const TraceConfig_BuiltinDataSource&) const;
+  bool operator!=(const TraceConfig_BuiltinDataSource& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_disable_clock_snapshotting() const { return _has_field_[1]; }
+  bool disable_clock_snapshotting() const { return disable_clock_snapshotting_; }
+  void set_disable_clock_snapshotting(bool value) { disable_clock_snapshotting_ = value; _has_field_.set(1); }
+
+  bool has_disable_trace_config() const { return _has_field_[2]; }
+  bool disable_trace_config() const { return disable_trace_config_; }
+  void set_disable_trace_config(bool value) { disable_trace_config_ = value; _has_field_.set(2); }
+
+  bool has_disable_system_info() const { return _has_field_[3]; }
+  bool disable_system_info() const { return disable_system_info_; }
+  void set_disable_system_info(bool value) { disable_system_info_ = value; _has_field_.set(3); }
+
+  bool has_disable_service_events() const { return _has_field_[4]; }
+  bool disable_service_events() const { return disable_service_events_; }
+  void set_disable_service_events(bool value) { disable_service_events_ = value; _has_field_.set(4); }
+
+  bool has_primary_trace_clock() const { return _has_field_[5]; }
+  BuiltinClock primary_trace_clock() const { return primary_trace_clock_; }
+  void set_primary_trace_clock(BuiltinClock value) { primary_trace_clock_ = value; _has_field_.set(5); }
+
+  bool has_snapshot_interval_ms() const { return _has_field_[6]; }
+  uint32_t snapshot_interval_ms() const { return snapshot_interval_ms_; }
+  void set_snapshot_interval_ms(uint32_t value) { snapshot_interval_ms_ = value; _has_field_.set(6); }
+
+  bool has_prefer_suspend_clock_for_snapshot() const { return _has_field_[7]; }
+  bool prefer_suspend_clock_for_snapshot() const { return prefer_suspend_clock_for_snapshot_; }
+  void set_prefer_suspend_clock_for_snapshot(bool value) { prefer_suspend_clock_for_snapshot_ = value; _has_field_.set(7); }
+
+ private:
+  bool disable_clock_snapshotting_{};
+  bool disable_trace_config_{};
+  bool disable_system_info_{};
+  bool disable_service_events_{};
+  BuiltinClock primary_trace_clock_{};
+  uint32_t snapshot_interval_ms_{};
+  bool prefer_suspend_clock_for_snapshot_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<8> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT TraceConfig_DataSource : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kConfigFieldNumber = 1,
+    kProducerNameFilterFieldNumber = 2,
+    kProducerNameRegexFilterFieldNumber = 3,
+  };
+
+  TraceConfig_DataSource();
+  ~TraceConfig_DataSource() override;
+  TraceConfig_DataSource(TraceConfig_DataSource&&) noexcept;
+  TraceConfig_DataSource& operator=(TraceConfig_DataSource&&);
+  TraceConfig_DataSource(const TraceConfig_DataSource&);
+  TraceConfig_DataSource& operator=(const TraceConfig_DataSource&);
+  bool operator==(const TraceConfig_DataSource&) const;
+  bool operator!=(const TraceConfig_DataSource& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_config() const { return _has_field_[1]; }
+  const DataSourceConfig& config() const { return *config_; }
+  DataSourceConfig* mutable_config() { _has_field_.set(1); return config_.get(); }
+
+  const std::vector<std::string>& producer_name_filter() const { return producer_name_filter_; }
+  std::vector<std::string>* mutable_producer_name_filter() { return &producer_name_filter_; }
+  int producer_name_filter_size() const { return static_cast<int>(producer_name_filter_.size()); }
+  void clear_producer_name_filter() { producer_name_filter_.clear(); }
+  void add_producer_name_filter(std::string value) { producer_name_filter_.emplace_back(value); }
+  std::string* add_producer_name_filter() { producer_name_filter_.emplace_back(); return &producer_name_filter_.back(); }
+
+  const std::vector<std::string>& producer_name_regex_filter() const { return producer_name_regex_filter_; }
+  std::vector<std::string>* mutable_producer_name_regex_filter() { return &producer_name_regex_filter_; }
+  int producer_name_regex_filter_size() const { return static_cast<int>(producer_name_regex_filter_.size()); }
+  void clear_producer_name_regex_filter() { producer_name_regex_filter_.clear(); }
+  void add_producer_name_regex_filter(std::string value) { producer_name_regex_filter_.emplace_back(value); }
+  std::string* add_producer_name_regex_filter() { producer_name_regex_filter_.emplace_back(); return &producer_name_regex_filter_.back(); }
+
+ private:
+  ::protozero::CopyablePtr<DataSourceConfig> config_;
+  std::vector<std::string> producer_name_filter_;
+  std::vector<std::string> producer_name_regex_filter_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<4> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT TraceConfig_BufferConfig : public ::protozero::CppMessageObj {
+ public:
+  using FillPolicy = TraceConfig_BufferConfig_FillPolicy;
+  static constexpr auto UNSPECIFIED = TraceConfig_BufferConfig_FillPolicy_UNSPECIFIED;
+  static constexpr auto RING_BUFFER = TraceConfig_BufferConfig_FillPolicy_RING_BUFFER;
+  static constexpr auto DISCARD = TraceConfig_BufferConfig_FillPolicy_DISCARD;
+  static constexpr auto FillPolicy_MIN = TraceConfig_BufferConfig_FillPolicy_UNSPECIFIED;
+  static constexpr auto FillPolicy_MAX = TraceConfig_BufferConfig_FillPolicy_DISCARD;
+  enum FieldNumbers {
+    kSizeKbFieldNumber = 1,
+    kFillPolicyFieldNumber = 4,
+  };
+
+  TraceConfig_BufferConfig();
+  ~TraceConfig_BufferConfig() override;
+  TraceConfig_BufferConfig(TraceConfig_BufferConfig&&) noexcept;
+  TraceConfig_BufferConfig& operator=(TraceConfig_BufferConfig&&);
+  TraceConfig_BufferConfig(const TraceConfig_BufferConfig&);
+  TraceConfig_BufferConfig& operator=(const TraceConfig_BufferConfig&);
+  bool operator==(const TraceConfig_BufferConfig&) const;
+  bool operator!=(const TraceConfig_BufferConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_size_kb() const { return _has_field_[1]; }
+  uint32_t size_kb() const { return size_kb_; }
+  void set_size_kb(uint32_t value) { size_kb_ = value; _has_field_.set(1); }
+
+  bool has_fill_policy() const { return _has_field_[4]; }
+  TraceConfig_BufferConfig_FillPolicy fill_policy() const { return fill_policy_; }
+  void set_fill_policy(TraceConfig_BufferConfig_FillPolicy value) { fill_policy_ = value; _has_field_.set(4); }
+
+ private:
+  uint32_t size_kb_{};
+  TraceConfig_BufferConfig_FillPolicy fill_policy_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<5> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_TRACE_CONFIG_PROTO_CPP_H_
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_TRACING_CORE_TRACE_CONFIG_H_
+#define INCLUDE_PERFETTO_TRACING_CORE_TRACE_CONFIG_H_
+
+// Creates the aliases in the ::perfetto namespace, doing things like:
+// using ::perfetto::Foo = ::perfetto::protos::gen::Foo.
+// See comments in forward_decls.h for the historical reasons of this
+// indirection layer.
+// gen_amalgamated expanded: #include "perfetto/tracing/core/forward_decls.h"
+
+// gen_amalgamated expanded: #include "protos/perfetto/config/trace_config.gen.h"
+
+#endif  // INCLUDE_PERFETTO_TRACING_CORE_TRACE_CONFIG_H_
+// gen_amalgamated begin header: include/perfetto/tracing/data_source.h
+// gen_amalgamated begin header: include/perfetto/tracing/internal/tracing_muxer.h
+// gen_amalgamated begin header: include/perfetto/tracing/internal/tracing_tls.h
+// gen_amalgamated begin header: include/perfetto/tracing/platform.h
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_TRACING_PLATFORM_H_
+#define INCLUDE_PERFETTO_TRACING_PLATFORM_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <functional>
+#include <memory>
+#include <string>
+
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+
+namespace base {
+class TaskRunner;
+}  // namespace base
+
+// This abstract class is used to abstract dependencies on platform-specific
+// primitives that cannot be implemented by the perfetto codebase and must be
+// provided or overridden by the embedder.
+// This is, for instance, for cases where we want to use some particular
+// base:: class in Chrome and provide instead POSIX fallbacks for other
+// embedders.
+
+// Base class for thread-local objects. This is to get a basic object vtable and
+// delegate destruction to the embedder. See Platform::CreateThreadLocalObject.
+class PERFETTO_EXPORT PlatformThreadLocalObject {
+ public:
+  // Implemented by perfetto internal code. The embedder must call this when
+  // implementing GetOrCreateThreadLocalObject() to create an instance for the
+  // first time on each thread.
+  static std::unique_ptr<PlatformThreadLocalObject> CreateInstance();
+  virtual ~PlatformThreadLocalObject();
+};
+
+class PERFETTO_EXPORT Platform {
+ public:
+  // Embedders can use this unless they have custom needs (e.g. Chrome wanting
+  // to use its own base class for TLS).
+  static Platform* GetDefaultPlatform();
+
+  virtual ~Platform();
+
+  // Creates a thread-local object. The embedder must:
+  // - Create an instance per-thread calling ThreadLocalObject::CreateInstance.
+  // - Own the lifetime of the returned object as long as the thread is alive.
+  // - Destroy it when the thread exits.
+  // Perfetto requires only one thread-local object overall (obviously, one
+  // instance per-thread) from the embedder.
+  using ThreadLocalObject = ::perfetto::PlatformThreadLocalObject;
+  virtual ThreadLocalObject* GetOrCreateThreadLocalObject() = 0;
+
+  // Creates a sequenced task runner. The easiest implementation is to create
+  // a new thread (e.g. use base::ThreadTaskRunner) but this can also be
+  // implemented in some more clever way (e.g. using chromiums's scheduler).
+  struct CreateTaskRunnerArgs {};
+  virtual std::unique_ptr<base::TaskRunner> CreateTaskRunner(
+      const CreateTaskRunnerArgs&) = 0;
+
+  // Used to derive the producer name. Mostly relevant when using the
+  // kSystemBackend mode. It can be an arbitrary string when using the
+  // in-process mode.
+  virtual std::string GetCurrentProcessName() = 0;
+};
+
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_TRACING_PLATFORM_H_
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_TRACING_INTERNAL_TRACING_TLS_H_
+#define INCLUDE_PERFETTO_TRACING_INTERNAL_TRACING_TLS_H_
+
+#include <array>
+#include <memory>
+
+// gen_amalgamated expanded: #include "perfetto/tracing/internal/basic_types.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/internal/data_source_internal.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/platform.h"
+
+namespace perfetto {
+
+class TraceWriterBase;
+
+namespace internal {
+
+// Organization of the thread-local storage
+// ----------------------------------------
+// First of all, remember the cardinality of the problem: at any point in time
+// there are M data sources registered (i.e. number of subclasses of DataSource)
+// and up to N concurrent instances for each data source, so up to M * N total
+// data source instances around.
+// Each data source instance can be accessed by T threads (no upper bound).
+// We can safely put hard limits both to M and N (i.e. say that we support at
+// most 32 data source types per process and up to 8 concurrent instances).
+//
+// We want to make it so from the Platform viewpoint, we use only one global
+// TLS object, so T instances in total, one per thread, regardless of M and N.
+// This allows to deal with at-thread-exit destruction only in one place, rather
+// than N, M or M * N.
+//
+// Visually:
+//                     [    Thread 1   ] [    Thread 2   ] [    Thread T   ]
+//                     +---------------+ +---------------+ +---------------+
+// Data source Foo     |               | |               | |               |
+//  Instance 1         |     TLS       | |     TLS       | |     TLS       |
+//  Instance 2         |    Object     | |    Object     | |    Object     |
+//  Instance 3         |               | |               | |               |
+//                     |               | |               | |               |
+// Data source Bar     |               | |               | |               |
+//  Instance 1         |               | |               | |               |
+//  Instance 2         |               | |               | |               |
+//                     +---------------+ +---------------+ +---------------+
+//
+// Each TLS Object is organized as an array of M DataSourceThreadLocalState.
+// Each DSTLS itself is an array of up to N per-instance objects.
+// The only per-instance object for now is the TraceWriter.
+// So for each data source, for each instance, for each thread we keep one
+// TraceWriter.
+// The lookup is O(1): Given the TLS object, the TraceWriter is just tls[M][N].
+class TracingTLS : public Platform::ThreadLocalObject {
+ public:
+  ~TracingTLS() override;
+
+  // This is checked against TraceMuxerImpl's global generation counter to
+  // handle destruction of TraceWriter(s) that belong to data sources that
+  // have been stopped. When the two numbers diverge, a scan of all the
+  // thread-local TraceWriter(s) is issued.
+  uint32_t generation = 0;
+
+  // This flag is true while this thread is inside a trace point for any data
+  // source or in other delicate parts of the tracing machinery during which we
+  // should not try to trace. Used to prevent unexpected re-entrancy.
+  bool is_in_trace_point = false;
+
+  // By default all data source instances have independent thread-local state
+  // (see above).
+  std::array<DataSourceThreadLocalState, kMaxDataSources> data_sources_tls{};
+
+  // Track event data sources, however, share the same thread-local state in
+  // order to be able to share trace writers and interning state across all
+  // track event categories.
+  DataSourceThreadLocalState track_event_tls{};
+};
+
+struct ScopedReentrancyAnnotator {
+  ScopedReentrancyAnnotator(TracingTLS& root_tls) : root_tls_(root_tls) {
+    PERFETTO_DCHECK(!root_tls_.is_in_trace_point);
+    root_tls_.is_in_trace_point = true;
+  }
+  ~ScopedReentrancyAnnotator() { root_tls_.is_in_trace_point = false; }
+
+ private:
+  TracingTLS& root_tls_;
+};
+
+}  // namespace internal
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_TRACING_INTERNAL_TRACING_TLS_H_
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_TRACING_INTERNAL_TRACING_MUXER_H_
+#define INCLUDE_PERFETTO_TRACING_INTERNAL_TRACING_MUXER_H_
+
+#include <atomic>
+#include <memory>
+
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/core/forward_decls.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/interceptor.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/internal/basic_types.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/internal/tracing_tls.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/platform.h"
+namespace perfetto {
+
+class DataSourceBase;
+class TraceWriterBase;
+struct TracingInitArgs;
+class TracingSession;
+
+namespace internal {
+
+struct DataSourceStaticState;
+
+// This class acts as a bridge between the public API methods and the
+// TracingBackend(s). It exposes a simplified view of the world to the API
+// methods, so that they don't have to care about the multiplicity of backends.
+// It handles all the bookkeeping to map data source instances and trace writers
+// to the various backends.
+// See tracing_muxer_impl.h for the full picture. This class contains only the
+// fewer fields and methods that need to be exposed to public/ headers. Fields
+// and methods that are required to implement them should go into
+// src/tracing/internal/tracing_muxer_impl.h instead: that one can pull in
+// perfetto headers outside of public, this one cannot.
+class PERFETTO_EXPORT TracingMuxer {
+ public:
+  static TracingMuxer* Get() { return instance_; }
+
+  virtual ~TracingMuxer();
+
+  TracingTLS* GetOrCreateTracingTLS() {
+    return static_cast<TracingTLS*>(platform_->GetOrCreateThreadLocalObject());
+  }
+
+  // This method can fail and return false if trying to register more than
+  // kMaxDataSources types.
+  using DataSourceFactory = std::function<std::unique_ptr<DataSourceBase>()>;
+  virtual bool RegisterDataSource(const DataSourceDescriptor&,
+                                  DataSourceFactory,
+                                  DataSourceStaticState*) = 0;
+
+  // It identifies the right backend and forwards the call to it.
+  // The returned TraceWriter must be used within the same sequence (for most
+  // projects this means "same thread"). Alternatively the client needs to take
+  // care of using synchronization primitives to prevent concurrent accesses.
+  virtual std::unique_ptr<TraceWriterBase> CreateTraceWriter(
+      DataSourceStaticState*,
+      uint32_t data_source_instance_index,
+      DataSourceState*,
+      BufferExhaustedPolicy buffer_exhausted_policy) = 0;
+
+  virtual void DestroyStoppedTraceWritersForCurrentThread() = 0;
+
+  uint32_t generation(std::memory_order ord) { return generation_.load(ord); }
+
+  using InterceptorFactory = std::function<std::unique_ptr<InterceptorBase>()>;
+  virtual void RegisterInterceptor(const InterceptorDescriptor&,
+                                   InterceptorFactory,
+                                   InterceptorBase::TLSFactory,
+                                   InterceptorBase::TracePacketCallback) = 0;
+
+ protected:
+  explicit TracingMuxer(Platform* platform) : platform_(platform) {}
+
+  static TracingMuxer* instance_;
+  Platform* const platform_ = nullptr;
+
+  // Incremented every time a data source is destroyed. See tracing_tls.h.
+  std::atomic<uint32_t> generation_{};
+};
+
+}  // namespace internal
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_TRACING_INTERNAL_TRACING_MUXER_H_
+// gen_amalgamated begin header: gen/protos/perfetto/trace/trace_packet.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACE_PACKET_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACE_PACKET_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class AndroidEnergyEstimationBreakdown;
+class AndroidLogPacket;
+class BatteryCounters;
+class ChromeBenchmarkMetadata;
+class ChromeEventBundle;
+class ChromeMetadataPacket;
+class ClockSnapshot;
+class CpuInfo;
+class DeobfuscationMapping;
+class ExtensionDescriptor;
+class FrameTimelineEvent;
+class FtraceEventBundle;
+class FtraceStats;
+class GpuCounterEvent;
+class GpuLog;
+class GpuMemTotalEvent;
+class GpuRenderStageEvent;
+class GraphicsFrameEvent;
+class HeapGraph;
+class InitialDisplayState;
+class InodeFileMap;
+class InternedData;
+class MemoryTrackerSnapshot;
+class ModuleSymbols;
+class PackagesList;
+class PerfSample;
+class PerfettoMetatrace;
+class PowerRails;
+class ProcessDescriptor;
+class ProcessStats;
+class ProcessTree;
+class ProfilePacket;
+class ProfiledFrameSymbols;
+class SmapsPacket;
+class StreamingAllocation;
+class StreamingFree;
+class StreamingProfilePacket;
+class SysStats;
+class SystemInfo;
+class TestEvent;
+class ThreadDescriptor;
+class TraceConfig;
+class TracePacketDefaults;
+class TraceStats;
+class TracingServiceEvent;
+class TrackDescriptor;
+class TrackEvent;
+class Trigger;
+class UiState;
+class VulkanApiEvent;
+class VulkanMemoryEvent;
+
+enum TracePacket_SequenceFlags : int32_t {
+  TracePacket_SequenceFlags_SEQ_UNSPECIFIED = 0,
+  TracePacket_SequenceFlags_SEQ_INCREMENTAL_STATE_CLEARED = 1,
+  TracePacket_SequenceFlags_SEQ_NEEDS_INCREMENTAL_STATE = 2,
+};
+
+const TracePacket_SequenceFlags TracePacket_SequenceFlags_MIN = TracePacket_SequenceFlags_SEQ_UNSPECIFIED;
+const TracePacket_SequenceFlags TracePacket_SequenceFlags_MAX = TracePacket_SequenceFlags_SEQ_NEEDS_INCREMENTAL_STATE;
+
+class TracePacket_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/900, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  TracePacket_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TracePacket_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TracePacket_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_timestamp() const { return at<8>().valid(); }
+  uint64_t timestamp() const { return at<8>().as_uint64(); }
+  bool has_timestamp_clock_id() const { return at<58>().valid(); }
+  uint32_t timestamp_clock_id() const { return at<58>().as_uint32(); }
+  bool has_process_tree() const { return at<2>().valid(); }
+  ::protozero::ConstBytes process_tree() const { return at<2>().as_bytes(); }
+  bool has_process_stats() const { return at<9>().valid(); }
+  ::protozero::ConstBytes process_stats() const { return at<9>().as_bytes(); }
+  bool has_inode_file_map() const { return at<4>().valid(); }
+  ::protozero::ConstBytes inode_file_map() const { return at<4>().as_bytes(); }
+  bool has_chrome_events() const { return at<5>().valid(); }
+  ::protozero::ConstBytes chrome_events() const { return at<5>().as_bytes(); }
+  bool has_clock_snapshot() const { return at<6>().valid(); }
+  ::protozero::ConstBytes clock_snapshot() const { return at<6>().as_bytes(); }
+  bool has_sys_stats() const { return at<7>().valid(); }
+  ::protozero::ConstBytes sys_stats() const { return at<7>().as_bytes(); }
+  bool has_track_event() const { return at<11>().valid(); }
+  ::protozero::ConstBytes track_event() const { return at<11>().as_bytes(); }
+  bool has_trace_config() const { return at<33>().valid(); }
+  ::protozero::ConstBytes trace_config() const { return at<33>().as_bytes(); }
+  bool has_ftrace_stats() const { return at<34>().valid(); }
+  ::protozero::ConstBytes ftrace_stats() const { return at<34>().as_bytes(); }
+  bool has_trace_stats() const { return at<35>().valid(); }
+  ::protozero::ConstBytes trace_stats() const { return at<35>().as_bytes(); }
+  bool has_profile_packet() const { return at<37>().valid(); }
+  ::protozero::ConstBytes profile_packet() const { return at<37>().as_bytes(); }
+  bool has_streaming_allocation() const { return at<74>().valid(); }
+  ::protozero::ConstBytes streaming_allocation() const { return at<74>().as_bytes(); }
+  bool has_streaming_free() const { return at<75>().valid(); }
+  ::protozero::ConstBytes streaming_free() const { return at<75>().as_bytes(); }
+  bool has_battery() const { return at<38>().valid(); }
+  ::protozero::ConstBytes battery() const { return at<38>().as_bytes(); }
+  bool has_power_rails() const { return at<40>().valid(); }
+  ::protozero::ConstBytes power_rails() const { return at<40>().as_bytes(); }
+  bool has_android_log() const { return at<39>().valid(); }
+  ::protozero::ConstBytes android_log() const { return at<39>().as_bytes(); }
+  bool has_system_info() const { return at<45>().valid(); }
+  ::protozero::ConstBytes system_info() const { return at<45>().as_bytes(); }
+  bool has_trigger() const { return at<46>().valid(); }
+  ::protozero::ConstBytes trigger() const { return at<46>().as_bytes(); }
+  bool has_packages_list() const { return at<47>().valid(); }
+  ::protozero::ConstBytes packages_list() const { return at<47>().as_bytes(); }
+  bool has_chrome_benchmark_metadata() const { return at<48>().valid(); }
+  ::protozero::ConstBytes chrome_benchmark_metadata() const { return at<48>().as_bytes(); }
+  bool has_perfetto_metatrace() const { return at<49>().valid(); }
+  ::protozero::ConstBytes perfetto_metatrace() const { return at<49>().as_bytes(); }
+  bool has_chrome_metadata() const { return at<51>().valid(); }
+  ::protozero::ConstBytes chrome_metadata() const { return at<51>().as_bytes(); }
+  bool has_gpu_counter_event() const { return at<52>().valid(); }
+  ::protozero::ConstBytes gpu_counter_event() const { return at<52>().as_bytes(); }
+  bool has_gpu_render_stage_event() const { return at<53>().valid(); }
+  ::protozero::ConstBytes gpu_render_stage_event() const { return at<53>().as_bytes(); }
+  bool has_streaming_profile_packet() const { return at<54>().valid(); }
+  ::protozero::ConstBytes streaming_profile_packet() const { return at<54>().as_bytes(); }
+  bool has_heap_graph() const { return at<56>().valid(); }
+  ::protozero::ConstBytes heap_graph() const { return at<56>().as_bytes(); }
+  bool has_graphics_frame_event() const { return at<57>().valid(); }
+  ::protozero::ConstBytes graphics_frame_event() const { return at<57>().as_bytes(); }
+  bool has_vulkan_memory_event() const { return at<62>().valid(); }
+  ::protozero::ConstBytes vulkan_memory_event() const { return at<62>().as_bytes(); }
+  bool has_gpu_log() const { return at<63>().valid(); }
+  ::protozero::ConstBytes gpu_log() const { return at<63>().as_bytes(); }
+  bool has_vulkan_api_event() const { return at<65>().valid(); }
+  ::protozero::ConstBytes vulkan_api_event() const { return at<65>().as_bytes(); }
+  bool has_perf_sample() const { return at<66>().valid(); }
+  ::protozero::ConstBytes perf_sample() const { return at<66>().as_bytes(); }
+  bool has_cpu_info() const { return at<67>().valid(); }
+  ::protozero::ConstBytes cpu_info() const { return at<67>().as_bytes(); }
+  bool has_smaps_packet() const { return at<68>().valid(); }
+  ::protozero::ConstBytes smaps_packet() const { return at<68>().as_bytes(); }
+  bool has_service_event() const { return at<69>().valid(); }
+  ::protozero::ConstBytes service_event() const { return at<69>().as_bytes(); }
+  bool has_initial_display_state() const { return at<70>().valid(); }
+  ::protozero::ConstBytes initial_display_state() const { return at<70>().as_bytes(); }
+  bool has_gpu_mem_total_event() const { return at<71>().valid(); }
+  ::protozero::ConstBytes gpu_mem_total_event() const { return at<71>().as_bytes(); }
+  bool has_memory_tracker_snapshot() const { return at<73>().valid(); }
+  ::protozero::ConstBytes memory_tracker_snapshot() const { return at<73>().as_bytes(); }
+  bool has_frame_timeline_event() const { return at<76>().valid(); }
+  ::protozero::ConstBytes frame_timeline_event() const { return at<76>().as_bytes(); }
+  bool has_android_energy_estimation_breakdown() const { return at<77>().valid(); }
+  ::protozero::ConstBytes android_energy_estimation_breakdown() const { return at<77>().as_bytes(); }
+  bool has_ui_state() const { return at<78>().valid(); }
+  ::protozero::ConstBytes ui_state() const { return at<78>().as_bytes(); }
+  bool has_profiled_frame_symbols() const { return at<55>().valid(); }
+  ::protozero::ConstBytes profiled_frame_symbols() const { return at<55>().as_bytes(); }
+  bool has_module_symbols() const { return at<61>().valid(); }
+  ::protozero::ConstBytes module_symbols() const { return at<61>().as_bytes(); }
+  bool has_deobfuscation_mapping() const { return at<64>().valid(); }
+  ::protozero::ConstBytes deobfuscation_mapping() const { return at<64>().as_bytes(); }
+  bool has_track_descriptor() const { return at<60>().valid(); }
+  ::protozero::ConstBytes track_descriptor() const { return at<60>().as_bytes(); }
+  bool has_process_descriptor() const { return at<43>().valid(); }
+  ::protozero::ConstBytes process_descriptor() const { return at<43>().as_bytes(); }
+  bool has_thread_descriptor() const { return at<44>().valid(); }
+  ::protozero::ConstBytes thread_descriptor() const { return at<44>().as_bytes(); }
+  bool has_ftrace_events() const { return at<1>().valid(); }
+  ::protozero::ConstBytes ftrace_events() const { return at<1>().as_bytes(); }
+  bool has_synchronization_marker() const { return at<36>().valid(); }
+  ::protozero::ConstBytes synchronization_marker() const { return at<36>().as_bytes(); }
+  bool has_compressed_packets() const { return at<50>().valid(); }
+  ::protozero::ConstBytes compressed_packets() const { return at<50>().as_bytes(); }
+  bool has_extension_descriptor() const { return at<72>().valid(); }
+  ::protozero::ConstBytes extension_descriptor() const { return at<72>().as_bytes(); }
+  bool has_for_testing() const { return at<900>().valid(); }
+  ::protozero::ConstBytes for_testing() const { return at<900>().as_bytes(); }
+  bool has_trusted_uid() const { return at<3>().valid(); }
+  int32_t trusted_uid() const { return at<3>().as_int32(); }
+  bool has_trusted_packet_sequence_id() const { return at<10>().valid(); }
+  uint32_t trusted_packet_sequence_id() const { return at<10>().as_uint32(); }
+  bool has_interned_data() const { return at<12>().valid(); }
+  ::protozero::ConstBytes interned_data() const { return at<12>().as_bytes(); }
+  bool has_sequence_flags() const { return at<13>().valid(); }
+  uint32_t sequence_flags() const { return at<13>().as_uint32(); }
+  bool has_incremental_state_cleared() const { return at<41>().valid(); }
+  bool incremental_state_cleared() const { return at<41>().as_bool(); }
+  bool has_trace_packet_defaults() const { return at<59>().valid(); }
+  ::protozero::ConstBytes trace_packet_defaults() const { return at<59>().as_bytes(); }
+  bool has_previous_packet_dropped() const { return at<42>().valid(); }
+  bool previous_packet_dropped() const { return at<42>().as_bool(); }
+};
+
+class TracePacket : public ::protozero::Message {
+ public:
+  using Decoder = TracePacket_Decoder;
+  enum : int32_t {
+    kTimestampFieldNumber = 8,
+    kTimestampClockIdFieldNumber = 58,
+    kProcessTreeFieldNumber = 2,
+    kProcessStatsFieldNumber = 9,
+    kInodeFileMapFieldNumber = 4,
+    kChromeEventsFieldNumber = 5,
+    kClockSnapshotFieldNumber = 6,
+    kSysStatsFieldNumber = 7,
+    kTrackEventFieldNumber = 11,
+    kTraceConfigFieldNumber = 33,
+    kFtraceStatsFieldNumber = 34,
+    kTraceStatsFieldNumber = 35,
+    kProfilePacketFieldNumber = 37,
+    kStreamingAllocationFieldNumber = 74,
+    kStreamingFreeFieldNumber = 75,
+    kBatteryFieldNumber = 38,
+    kPowerRailsFieldNumber = 40,
+    kAndroidLogFieldNumber = 39,
+    kSystemInfoFieldNumber = 45,
+    kTriggerFieldNumber = 46,
+    kPackagesListFieldNumber = 47,
+    kChromeBenchmarkMetadataFieldNumber = 48,
+    kPerfettoMetatraceFieldNumber = 49,
+    kChromeMetadataFieldNumber = 51,
+    kGpuCounterEventFieldNumber = 52,
+    kGpuRenderStageEventFieldNumber = 53,
+    kStreamingProfilePacketFieldNumber = 54,
+    kHeapGraphFieldNumber = 56,
+    kGraphicsFrameEventFieldNumber = 57,
+    kVulkanMemoryEventFieldNumber = 62,
+    kGpuLogFieldNumber = 63,
+    kVulkanApiEventFieldNumber = 65,
+    kPerfSampleFieldNumber = 66,
+    kCpuInfoFieldNumber = 67,
+    kSmapsPacketFieldNumber = 68,
+    kServiceEventFieldNumber = 69,
+    kInitialDisplayStateFieldNumber = 70,
+    kGpuMemTotalEventFieldNumber = 71,
+    kMemoryTrackerSnapshotFieldNumber = 73,
+    kFrameTimelineEventFieldNumber = 76,
+    kAndroidEnergyEstimationBreakdownFieldNumber = 77,
+    kUiStateFieldNumber = 78,
+    kProfiledFrameSymbolsFieldNumber = 55,
+    kModuleSymbolsFieldNumber = 61,
+    kDeobfuscationMappingFieldNumber = 64,
+    kTrackDescriptorFieldNumber = 60,
+    kProcessDescriptorFieldNumber = 43,
+    kThreadDescriptorFieldNumber = 44,
+    kFtraceEventsFieldNumber = 1,
+    kSynchronizationMarkerFieldNumber = 36,
+    kCompressedPacketsFieldNumber = 50,
+    kExtensionDescriptorFieldNumber = 72,
+    kForTestingFieldNumber = 900,
+    kTrustedUidFieldNumber = 3,
+    kTrustedPacketSequenceIdFieldNumber = 10,
+    kInternedDataFieldNumber = 12,
+    kSequenceFlagsFieldNumber = 13,
+    kIncrementalStateClearedFieldNumber = 41,
+    kTracePacketDefaultsFieldNumber = 59,
+    kPreviousPacketDroppedFieldNumber = 42,
+  };
+  using SequenceFlags = ::perfetto::protos::pbzero::TracePacket_SequenceFlags;
+  static const SequenceFlags SEQ_UNSPECIFIED = TracePacket_SequenceFlags_SEQ_UNSPECIFIED;
+  static const SequenceFlags SEQ_INCREMENTAL_STATE_CLEARED = TracePacket_SequenceFlags_SEQ_INCREMENTAL_STATE_CLEARED;
+  static const SequenceFlags SEQ_NEEDS_INCREMENTAL_STATE = TracePacket_SequenceFlags_SEQ_NEEDS_INCREMENTAL_STATE;
+
+  using FieldMetadata_Timestamp =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Timestamp kTimestamp() { return {}; }
+  void set_timestamp(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Timestamp::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TimestampClockId =
+    ::protozero::proto_utils::FieldMetadata<
+      58,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TimestampClockId kTimestampClockId() { return {}; }
+  void set_timestamp_clock_id(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TimestampClockId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ProcessTree =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ProcessTree,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ProcessTree kProcessTree() { return {}; }
+  template <typename T = ProcessTree> T* set_process_tree() {
+    return BeginNestedMessage<T>(2);
+  }
+
+
+  using FieldMetadata_ProcessStats =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ProcessStats,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ProcessStats kProcessStats() { return {}; }
+  template <typename T = ProcessStats> T* set_process_stats() {
+    return BeginNestedMessage<T>(9);
+  }
+
+
+  using FieldMetadata_InodeFileMap =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      InodeFileMap,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_InodeFileMap kInodeFileMap() { return {}; }
+  template <typename T = InodeFileMap> T* set_inode_file_map() {
+    return BeginNestedMessage<T>(4);
+  }
+
+
+  using FieldMetadata_ChromeEvents =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ChromeEventBundle,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChromeEvents kChromeEvents() { return {}; }
+  template <typename T = ChromeEventBundle> T* set_chrome_events() {
+    return BeginNestedMessage<T>(5);
+  }
+
+
+  using FieldMetadata_ClockSnapshot =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ClockSnapshot,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ClockSnapshot kClockSnapshot() { return {}; }
+  template <typename T = ClockSnapshot> T* set_clock_snapshot() {
+    return BeginNestedMessage<T>(6);
+  }
+
+
+  using FieldMetadata_SysStats =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      SysStats,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SysStats kSysStats() { return {}; }
+  template <typename T = SysStats> T* set_sys_stats() {
+    return BeginNestedMessage<T>(7);
+  }
+
+
+  using FieldMetadata_TrackEvent =
+    ::protozero::proto_utils::FieldMetadata<
+      11,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TrackEvent,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TrackEvent kTrackEvent() { return {}; }
+  template <typename T = TrackEvent> T* set_track_event() {
+    return BeginNestedMessage<T>(11);
+  }
+
+
+  using FieldMetadata_TraceConfig =
+    ::protozero::proto_utils::FieldMetadata<
+      33,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TraceConfig,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TraceConfig kTraceConfig() { return {}; }
+  template <typename T = TraceConfig> T* set_trace_config() {
+    return BeginNestedMessage<T>(33);
+  }
+
+
+  using FieldMetadata_FtraceStats =
+    ::protozero::proto_utils::FieldMetadata<
+      34,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      FtraceStats,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FtraceStats kFtraceStats() { return {}; }
+  template <typename T = FtraceStats> T* set_ftrace_stats() {
+    return BeginNestedMessage<T>(34);
+  }
+
+
+  using FieldMetadata_TraceStats =
+    ::protozero::proto_utils::FieldMetadata<
+      35,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TraceStats,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TraceStats kTraceStats() { return {}; }
+  template <typename T = TraceStats> T* set_trace_stats() {
+    return BeginNestedMessage<T>(35);
+  }
+
+
+  using FieldMetadata_ProfilePacket =
+    ::protozero::proto_utils::FieldMetadata<
+      37,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ProfilePacket,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ProfilePacket kProfilePacket() { return {}; }
+  template <typename T = ProfilePacket> T* set_profile_packet() {
+    return BeginNestedMessage<T>(37);
+  }
+
+
+  using FieldMetadata_StreamingAllocation =
+    ::protozero::proto_utils::FieldMetadata<
+      74,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      StreamingAllocation,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_StreamingAllocation kStreamingAllocation() { return {}; }
+  template <typename T = StreamingAllocation> T* set_streaming_allocation() {
+    return BeginNestedMessage<T>(74);
+  }
+
+
+  using FieldMetadata_StreamingFree =
+    ::protozero::proto_utils::FieldMetadata<
+      75,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      StreamingFree,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_StreamingFree kStreamingFree() { return {}; }
+  template <typename T = StreamingFree> T* set_streaming_free() {
+    return BeginNestedMessage<T>(75);
+  }
+
+
+  using FieldMetadata_Battery =
+    ::protozero::proto_utils::FieldMetadata<
+      38,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      BatteryCounters,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Battery kBattery() { return {}; }
+  template <typename T = BatteryCounters> T* set_battery() {
+    return BeginNestedMessage<T>(38);
+  }
+
+
+  using FieldMetadata_PowerRails =
+    ::protozero::proto_utils::FieldMetadata<
+      40,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      PowerRails,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PowerRails kPowerRails() { return {}; }
+  template <typename T = PowerRails> T* set_power_rails() {
+    return BeginNestedMessage<T>(40);
+  }
+
+
+  using FieldMetadata_AndroidLog =
+    ::protozero::proto_utils::FieldMetadata<
+      39,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      AndroidLogPacket,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AndroidLog kAndroidLog() { return {}; }
+  template <typename T = AndroidLogPacket> T* set_android_log() {
+    return BeginNestedMessage<T>(39);
+  }
+
+
+  using FieldMetadata_SystemInfo =
+    ::protozero::proto_utils::FieldMetadata<
+      45,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      SystemInfo,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SystemInfo kSystemInfo() { return {}; }
+  template <typename T = SystemInfo> T* set_system_info() {
+    return BeginNestedMessage<T>(45);
+  }
+
+
+  using FieldMetadata_Trigger =
+    ::protozero::proto_utils::FieldMetadata<
+      46,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Trigger,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Trigger kTrigger() { return {}; }
+  template <typename T = Trigger> T* set_trigger() {
+    return BeginNestedMessage<T>(46);
+  }
+
+
+  using FieldMetadata_PackagesList =
+    ::protozero::proto_utils::FieldMetadata<
+      47,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      PackagesList,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PackagesList kPackagesList() { return {}; }
+  template <typename T = PackagesList> T* set_packages_list() {
+    return BeginNestedMessage<T>(47);
+  }
+
+
+  using FieldMetadata_ChromeBenchmarkMetadata =
+    ::protozero::proto_utils::FieldMetadata<
+      48,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ChromeBenchmarkMetadata,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChromeBenchmarkMetadata kChromeBenchmarkMetadata() { return {}; }
+  template <typename T = ChromeBenchmarkMetadata> T* set_chrome_benchmark_metadata() {
+    return BeginNestedMessage<T>(48);
+  }
+
+
+  using FieldMetadata_PerfettoMetatrace =
+    ::protozero::proto_utils::FieldMetadata<
+      49,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      PerfettoMetatrace,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PerfettoMetatrace kPerfettoMetatrace() { return {}; }
+  template <typename T = PerfettoMetatrace> T* set_perfetto_metatrace() {
+    return BeginNestedMessage<T>(49);
+  }
+
+
+  using FieldMetadata_ChromeMetadata =
+    ::protozero::proto_utils::FieldMetadata<
+      51,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ChromeMetadataPacket,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChromeMetadata kChromeMetadata() { return {}; }
+  template <typename T = ChromeMetadataPacket> T* set_chrome_metadata() {
+    return BeginNestedMessage<T>(51);
+  }
+
+
+  using FieldMetadata_GpuCounterEvent =
+    ::protozero::proto_utils::FieldMetadata<
+      52,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      GpuCounterEvent,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_GpuCounterEvent kGpuCounterEvent() { return {}; }
+  template <typename T = GpuCounterEvent> T* set_gpu_counter_event() {
+    return BeginNestedMessage<T>(52);
+  }
+
+
+  using FieldMetadata_GpuRenderStageEvent =
+    ::protozero::proto_utils::FieldMetadata<
+      53,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      GpuRenderStageEvent,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_GpuRenderStageEvent kGpuRenderStageEvent() { return {}; }
+  template <typename T = GpuRenderStageEvent> T* set_gpu_render_stage_event() {
+    return BeginNestedMessage<T>(53);
+  }
+
+
+  using FieldMetadata_StreamingProfilePacket =
+    ::protozero::proto_utils::FieldMetadata<
+      54,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      StreamingProfilePacket,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_StreamingProfilePacket kStreamingProfilePacket() { return {}; }
+  template <typename T = StreamingProfilePacket> T* set_streaming_profile_packet() {
+    return BeginNestedMessage<T>(54);
+  }
+
+
+  using FieldMetadata_HeapGraph =
+    ::protozero::proto_utils::FieldMetadata<
+      56,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      HeapGraph,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_HeapGraph kHeapGraph() { return {}; }
+  template <typename T = HeapGraph> T* set_heap_graph() {
+    return BeginNestedMessage<T>(56);
+  }
+
+
+  using FieldMetadata_GraphicsFrameEvent =
+    ::protozero::proto_utils::FieldMetadata<
+      57,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      GraphicsFrameEvent,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_GraphicsFrameEvent kGraphicsFrameEvent() { return {}; }
+  template <typename T = GraphicsFrameEvent> T* set_graphics_frame_event() {
+    return BeginNestedMessage<T>(57);
+  }
+
+
+  using FieldMetadata_VulkanMemoryEvent =
+    ::protozero::proto_utils::FieldMetadata<
+      62,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      VulkanMemoryEvent,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_VulkanMemoryEvent kVulkanMemoryEvent() { return {}; }
+  template <typename T = VulkanMemoryEvent> T* set_vulkan_memory_event() {
+    return BeginNestedMessage<T>(62);
+  }
+
+
+  using FieldMetadata_GpuLog =
+    ::protozero::proto_utils::FieldMetadata<
+      63,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      GpuLog,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_GpuLog kGpuLog() { return {}; }
+  template <typename T = GpuLog> T* set_gpu_log() {
+    return BeginNestedMessage<T>(63);
+  }
+
+
+  using FieldMetadata_VulkanApiEvent =
+    ::protozero::proto_utils::FieldMetadata<
+      65,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      VulkanApiEvent,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_VulkanApiEvent kVulkanApiEvent() { return {}; }
+  template <typename T = VulkanApiEvent> T* set_vulkan_api_event() {
+    return BeginNestedMessage<T>(65);
+  }
+
+
+  using FieldMetadata_PerfSample =
+    ::protozero::proto_utils::FieldMetadata<
+      66,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      PerfSample,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PerfSample kPerfSample() { return {}; }
+  template <typename T = PerfSample> T* set_perf_sample() {
+    return BeginNestedMessage<T>(66);
+  }
+
+
+  using FieldMetadata_CpuInfo =
+    ::protozero::proto_utils::FieldMetadata<
+      67,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      CpuInfo,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CpuInfo kCpuInfo() { return {}; }
+  template <typename T = CpuInfo> T* set_cpu_info() {
+    return BeginNestedMessage<T>(67);
+  }
+
+
+  using FieldMetadata_SmapsPacket =
+    ::protozero::proto_utils::FieldMetadata<
+      68,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      SmapsPacket,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SmapsPacket kSmapsPacket() { return {}; }
+  template <typename T = SmapsPacket> T* set_smaps_packet() {
+    return BeginNestedMessage<T>(68);
+  }
+
+
+  using FieldMetadata_ServiceEvent =
+    ::protozero::proto_utils::FieldMetadata<
+      69,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TracingServiceEvent,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ServiceEvent kServiceEvent() { return {}; }
+  template <typename T = TracingServiceEvent> T* set_service_event() {
+    return BeginNestedMessage<T>(69);
+  }
+
+
+  using FieldMetadata_InitialDisplayState =
+    ::protozero::proto_utils::FieldMetadata<
+      70,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      InitialDisplayState,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_InitialDisplayState kInitialDisplayState() { return {}; }
+  template <typename T = InitialDisplayState> T* set_initial_display_state() {
+    return BeginNestedMessage<T>(70);
+  }
+
+
+  using FieldMetadata_GpuMemTotalEvent =
+    ::protozero::proto_utils::FieldMetadata<
+      71,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      GpuMemTotalEvent,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_GpuMemTotalEvent kGpuMemTotalEvent() { return {}; }
+  template <typename T = GpuMemTotalEvent> T* set_gpu_mem_total_event() {
+    return BeginNestedMessage<T>(71);
+  }
+
+
+  using FieldMetadata_MemoryTrackerSnapshot =
+    ::protozero::proto_utils::FieldMetadata<
+      73,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MemoryTrackerSnapshot,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MemoryTrackerSnapshot kMemoryTrackerSnapshot() { return {}; }
+  template <typename T = MemoryTrackerSnapshot> T* set_memory_tracker_snapshot() {
+    return BeginNestedMessage<T>(73);
+  }
+
+
+  using FieldMetadata_FrameTimelineEvent =
+    ::protozero::proto_utils::FieldMetadata<
+      76,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      FrameTimelineEvent,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FrameTimelineEvent kFrameTimelineEvent() { return {}; }
+  template <typename T = FrameTimelineEvent> T* set_frame_timeline_event() {
+    return BeginNestedMessage<T>(76);
+  }
+
+
+  using FieldMetadata_AndroidEnergyEstimationBreakdown =
+    ::protozero::proto_utils::FieldMetadata<
+      77,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      AndroidEnergyEstimationBreakdown,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AndroidEnergyEstimationBreakdown kAndroidEnergyEstimationBreakdown() { return {}; }
+  template <typename T = AndroidEnergyEstimationBreakdown> T* set_android_energy_estimation_breakdown() {
+    return BeginNestedMessage<T>(77);
+  }
+
+
+  using FieldMetadata_UiState =
+    ::protozero::proto_utils::FieldMetadata<
+      78,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      UiState,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_UiState kUiState() { return {}; }
+  template <typename T = UiState> T* set_ui_state() {
+    return BeginNestedMessage<T>(78);
+  }
+
+
+  using FieldMetadata_ProfiledFrameSymbols =
+    ::protozero::proto_utils::FieldMetadata<
+      55,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ProfiledFrameSymbols,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ProfiledFrameSymbols kProfiledFrameSymbols() { return {}; }
+  template <typename T = ProfiledFrameSymbols> T* set_profiled_frame_symbols() {
+    return BeginNestedMessage<T>(55);
+  }
+
+
+  using FieldMetadata_ModuleSymbols =
+    ::protozero::proto_utils::FieldMetadata<
+      61,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ModuleSymbols,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ModuleSymbols kModuleSymbols() { return {}; }
+  template <typename T = ModuleSymbols> T* set_module_symbols() {
+    return BeginNestedMessage<T>(61);
+  }
+
+
+  using FieldMetadata_DeobfuscationMapping =
+    ::protozero::proto_utils::FieldMetadata<
+      64,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      DeobfuscationMapping,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DeobfuscationMapping kDeobfuscationMapping() { return {}; }
+  template <typename T = DeobfuscationMapping> T* set_deobfuscation_mapping() {
+    return BeginNestedMessage<T>(64);
+  }
+
+
+  using FieldMetadata_TrackDescriptor =
+    ::protozero::proto_utils::FieldMetadata<
+      60,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TrackDescriptor,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TrackDescriptor kTrackDescriptor() { return {}; }
+  template <typename T = TrackDescriptor> T* set_track_descriptor() {
+    return BeginNestedMessage<T>(60);
+  }
+
+
+  using FieldMetadata_ProcessDescriptor =
+    ::protozero::proto_utils::FieldMetadata<
+      43,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ProcessDescriptor,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ProcessDescriptor kProcessDescriptor() { return {}; }
+  template <typename T = ProcessDescriptor> T* set_process_descriptor() {
+    return BeginNestedMessage<T>(43);
+  }
+
+
+  using FieldMetadata_ThreadDescriptor =
+    ::protozero::proto_utils::FieldMetadata<
+      44,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ThreadDescriptor,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ThreadDescriptor kThreadDescriptor() { return {}; }
+  template <typename T = ThreadDescriptor> T* set_thread_descriptor() {
+    return BeginNestedMessage<T>(44);
+  }
+
+
+  using FieldMetadata_FtraceEvents =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      FtraceEventBundle,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FtraceEvents kFtraceEvents() { return {}; }
+  template <typename T = FtraceEventBundle> T* set_ftrace_events() {
+    return BeginNestedMessage<T>(1);
+  }
+
+
+  using FieldMetadata_SynchronizationMarker =
+    ::protozero::proto_utils::FieldMetadata<
+      36,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBytes,
+      std::string,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SynchronizationMarker kSynchronizationMarker() { return {}; }
+  void set_synchronization_marker(const uint8_t* data, size_t size) {
+    AppendBytes(FieldMetadata_SynchronizationMarker::kFieldId, data, size);
+  }
+  void set_synchronization_marker(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_SynchronizationMarker::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBytes>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CompressedPackets =
+    ::protozero::proto_utils::FieldMetadata<
+      50,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBytes,
+      std::string,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CompressedPackets kCompressedPackets() { return {}; }
+  void set_compressed_packets(const uint8_t* data, size_t size) {
+    AppendBytes(FieldMetadata_CompressedPackets::kFieldId, data, size);
+  }
+  void set_compressed_packets(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_CompressedPackets::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBytes>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ExtensionDescriptor =
+    ::protozero::proto_utils::FieldMetadata<
+      72,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ExtensionDescriptor,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ExtensionDescriptor kExtensionDescriptor() { return {}; }
+  template <typename T = ExtensionDescriptor> T* set_extension_descriptor() {
+    return BeginNestedMessage<T>(72);
+  }
+
+
+  using FieldMetadata_ForTesting =
+    ::protozero::proto_utils::FieldMetadata<
+      900,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TestEvent,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ForTesting kForTesting() { return {}; }
+  template <typename T = TestEvent> T* set_for_testing() {
+    return BeginNestedMessage<T>(900);
+  }
+
+
+  using FieldMetadata_TrustedUid =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TrustedUid kTrustedUid() { return {}; }
+  void set_trusted_uid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TrustedUid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TrustedPacketSequenceId =
+    ::protozero::proto_utils::FieldMetadata<
+      10,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TrustedPacketSequenceId kTrustedPacketSequenceId() { return {}; }
+  void set_trusted_packet_sequence_id(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TrustedPacketSequenceId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_InternedData =
+    ::protozero::proto_utils::FieldMetadata<
+      12,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      InternedData,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_InternedData kInternedData() { return {}; }
+  template <typename T = InternedData> T* set_interned_data() {
+    return BeginNestedMessage<T>(12);
+  }
+
+
+  using FieldMetadata_SequenceFlags =
+    ::protozero::proto_utils::FieldMetadata<
+      13,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SequenceFlags kSequenceFlags() { return {}; }
+  void set_sequence_flags(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SequenceFlags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_IncrementalStateCleared =
+    ::protozero::proto_utils::FieldMetadata<
+      41,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IncrementalStateCleared kIncrementalStateCleared() { return {}; }
+  void set_incremental_state_cleared(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_IncrementalStateCleared::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TracePacketDefaults =
+    ::protozero::proto_utils::FieldMetadata<
+      59,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TracePacketDefaults,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TracePacketDefaults kTracePacketDefaults() { return {}; }
+  template <typename T = TracePacketDefaults> T* set_trace_packet_defaults() {
+    return BeginNestedMessage<T>(59);
+  }
+
+
+  using FieldMetadata_PreviousPacketDropped =
+    ::protozero::proto_utils::FieldMetadata<
+      42,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PreviousPacketDropped kPreviousPacketDropped() { return {}; }
+  void set_previous_packet_dropped(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_PreviousPacketDropped::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_TRACING_DATA_SOURCE_H_
+#define INCLUDE_PERFETTO_TRACING_DATA_SOURCE_H_
+
+// This header contains the key class (DataSource) that a producer app should
+// override in order to create a custom data source that gets tracing Start/Stop
+// notifications and emits tracing data.
+
+#include <assert.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <array>
+#include <atomic>
+#include <functional>
+#include <memory>
+#include <mutex>
+
+// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
+// gen_amalgamated expanded: #include "perfetto/base/compiler.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message_handle.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/buffer_exhausted_policy.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/core/forward_decls.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/internal/basic_types.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/internal/data_source_internal.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/internal/tracing_muxer.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/locked_handle.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/trace_writer_base.h"
+
+// gen_amalgamated expanded: #include "protos/perfetto/trace/trace_packet.pbzero.h"
+
+// PERFETTO_COMPONENT_EXPORT is used to mark symbols in Perfetto's headers
+// (typically templates) that are defined by the user outside of Perfetto and
+// should be made visible outside the current module. (e.g., in Chrome's
+// component build).
+#if !defined(PERFETTO_COMPONENT_EXPORT)
+#define PERFETTO_COMPONENT_EXPORT
+#endif
+
+namespace perfetto {
+namespace internal {
+class TracingMuxerImpl;
+class TrackEventCategoryRegistry;
+template <typename, const internal::TrackEventCategoryRegistry*>
+class TrackEventDataSource;
+}  // namespace internal
+
+// Base class with the virtual methods to get start/stop notifications.
+// Embedders are supposed to derive the templated version below, not this one.
+class PERFETTO_EXPORT DataSourceBase {
+ public:
+  virtual ~DataSourceBase();
+
+  // TODO(primiano): change the const& args below to be pointers instead. It
+  // makes it more awkward to handle output arguments and require mutable(s).
+  // This requires synchronizing a breaking API change for existing embedders.
+
+  // OnSetup() is invoked when tracing is configured. In most cases this happens
+  // just before starting the trace. In the case of deferred start (see
+  // deferred_start in trace_config.proto) start might happen later.
+  class SetupArgs {
+   public:
+    // This is valid only within the scope of the OnSetup() call and must not
+    // be retained.
+    const DataSourceConfig* config = nullptr;
+
+    // The index of this data source instance (0..kMaxDataSourceInstances - 1).
+    uint32_t internal_instance_index = 0;
+  };
+  virtual void OnSetup(const SetupArgs&);
+
+  class StartArgs {
+   public:
+    // The index of this data source instance (0..kMaxDataSourceInstances - 1).
+    uint32_t internal_instance_index = 0;
+  };
+  virtual void OnStart(const StartArgs&);
+
+  class StopArgs {
+   public:
+    virtual ~StopArgs();
+
+    // HandleAsynchronously() can optionally be called to defer the tracing
+    // session stop and write tracing data just before stopping.
+    // This function returns a closure that must be invoked after the last
+    // trace events have been emitted. The returned closure can be called from
+    // any thread. The caller also needs to explicitly call TraceContext.Flush()
+    // from the last Trace() lambda invocation because no other implicit flushes
+    // will happen after the stop signal.
+    // When this function is called, the tracing service will defer the stop of
+    // the tracing session until the returned closure is invoked.
+    // However, the caller cannot hang onto this closure for too long. The
+    // tracing service will forcefully stop the tracing session without waiting
+    // for pending producers after TraceConfig.data_source_stop_timeout_ms
+    // (default: 5s, can be overridden by Consumers when starting a trace).
+    // If the closure is called after this timeout an error will be logged and
+    // the trace data emitted will not be present in the trace. No other
+    // functional side effects (e.g. crashes or corruptions) will happen. In
+    // other words, it is fine to accidentally hold onto this closure for too
+    // long but, if that happens, some tracing data will be lost.
+    virtual std::function<void()> HandleStopAsynchronously() const = 0;
+
+    // The index of this data source instance (0..kMaxDataSourceInstances - 1).
+    uint32_t internal_instance_index = 0;
+  };
+  virtual void OnStop(const StopArgs&);
+};
+
+struct DefaultDataSourceTraits {
+  // |IncrementalStateType| can optionally be used store custom per-sequence
+  // incremental data (e.g., interning tables). It should have a Clear() method
+  // for when incremental state needs to be cleared. See
+  // TraceContext::GetIncrementalState().
+  using IncrementalStateType = void;
+
+  // Allows overriding what type of thread-local state configuration the data
+  // source uses. By default every data source gets independent thread-local
+  // state, which means every instance uses separate trace writers and
+  // incremental state even on the same thread. Some data sources (most notably
+  // the track event data source) want to share trace writers and incremental
+  // state on the same thread.
+  static internal::DataSourceThreadLocalState* GetDataSourceTLS(
+      internal::DataSourceStaticState* static_state,
+      internal::TracingTLS* root_tls) {
+    auto* ds_tls = &root_tls->data_sources_tls[static_state->index];
+    // The per-type TLS is either zero-initialized or must have been initialized
+    // for this specific data source type.
+    assert(!ds_tls->static_state ||
+           ds_tls->static_state->index == static_state->index);
+    return ds_tls;
+  }
+};
+
+// Templated base class meant to be derived by embedders to create a custom data
+// source. DataSourceType must be the type of the derived class itself, e.g.:
+// class MyDataSource : public DataSourceBase<MyDataSource> {...}.
+//
+// |DataSourceTraits| allows customizing the behavior of the data source. See
+// |DefaultDataSourceTraits|.
+template <typename DataSourceType,
+          typename DataSourceTraits = DefaultDataSourceTraits>
+class DataSource : public DataSourceBase {
+  struct DefaultTracePointTraits;
+
+ public:
+  // The BufferExhaustedPolicy to use for TraceWriters of this DataSource.
+  // Override this in your DataSource class to change the default, which is to
+  // drop data on shared memory overruns.
+  constexpr static BufferExhaustedPolicy kBufferExhaustedPolicy =
+      BufferExhaustedPolicy::kDrop;
+
+  // Argument passed to the lambda function passed to Trace() (below).
+  class TraceContext {
+   public:
+    using TracePacketHandle =
+        ::protozero::MessageHandle<::perfetto::protos::pbzero::TracePacket>;
+
+    TraceContext(TraceContext&&) noexcept = default;
+    ~TraceContext() {
+      // If the data source is being intercepted, flush the trace writer after
+      // each trace point to make sure the interceptor sees the data right away.
+      if (PERFETTO_UNLIKELY(tls_inst_->is_intercepted))
+        Flush();
+    }
+
+    TracePacketHandle NewTracePacket() {
+      return tls_inst_->trace_writer->NewTracePacket();
+    }
+
+    // Forces a commit of the thread-local tracing data written so far to the
+    // service. This is almost never required (tracing data is periodically
+    // committed as trace pages are filled up) and has a non-negligible
+    // performance hit (requires an IPC + refresh of the current thread-local
+    // chunk). The only case when this should be used is when handling OnStop()
+    // asynchronously, to ensure sure that the data is committed before the
+    // Stop timeout expires.
+    // The TracePacketHandle obtained by the last NewTracePacket() call must be
+    // finalized before calling Flush() (either implicitly by going out of scope
+    // or by explicitly calling Finalize()).
+    // |cb| is an optional callback. When non-null it will request the
+    // service to ACK the flush and will be invoked on an internal thread after
+    // the service has  acknowledged it. The callback might be NEVER INVOKED if
+    // the service crashes or the IPC connection is dropped.
+    void Flush(std::function<void()> cb = {}) {
+      tls_inst_->trace_writer->Flush(cb);
+    }
+
+    // Returns the number of bytes written on the current thread by the current
+    // data-source since its creation.
+    // This can be useful for splitting protos that might grow very large.
+    uint64_t written() { return tls_inst_->trace_writer->written(); }
+
+    // Returns a RAII handle to access the data source instance, guaranteeing
+    // that it won't be deleted on another thread (because of trace stopping)
+    // while accessing it from within the Trace() lambda.
+    // The returned handle can be invalid (nullptr) if tracing is stopped
+    // immediately before calling this. The caller is supposed to check for its
+    // validity before using it. After checking, the handle is guaranteed to
+    // remain valid until the handle goes out of scope.
+    LockedHandle<DataSourceType> GetDataSourceLocked() {
+      auto* internal_state = static_state_.TryGet(instance_index_);
+      if (!internal_state)
+        return LockedHandle<DataSourceType>();
+      return LockedHandle<DataSourceType>(
+          &internal_state->lock,
+          static_cast<DataSourceType*>(internal_state->data_source.get()));
+    }
+
+    typename DataSourceTraits::IncrementalStateType* GetIncrementalState() {
+      // Recreate incremental state data if it has been reset by the service.
+      if (tls_inst_->incremental_state_generation !=
+          static_state_.incremental_state_generation.load(
+              std::memory_order_relaxed)) {
+        tls_inst_->incremental_state.reset();
+        CreateIncrementalState(tls_inst_);
+      }
+      return reinterpret_cast<typename DataSourceTraits::IncrementalStateType*>(
+          tls_inst_->incremental_state.get());
+    }
+
+   private:
+    friend class DataSource;
+    template <typename, const internal::TrackEventCategoryRegistry*>
+    friend class internal::TrackEventDataSource;
+    TraceContext(internal::DataSourceInstanceThreadLocalState* tls_inst,
+                 uint32_t instance_index)
+        : tls_inst_(tls_inst), instance_index_(instance_index) {}
+    TraceContext(const TraceContext&) = delete;
+    TraceContext& operator=(const TraceContext&) = delete;
+
+    internal::DataSourceInstanceThreadLocalState* const tls_inst_;
+    uint32_t const instance_index_;
+  };
+
+  // The main tracing method. Tracing code should call this passing a lambda as
+  // argument, with the following signature: void(TraceContext).
+  // The lambda will be called synchronously (i.e., always before Trace()
+  // returns) only if tracing is enabled and the data source has been enabled in
+  // the tracing config.
+  // The lambda can be called more than once per Trace() call, in the case of
+  // concurrent tracing sessions (or even if the data source is instantiated
+  // twice within the same trace config).
+  template <typename Lambda>
+  static void Trace(Lambda tracing_fn) {
+    CallIfEnabled<DefaultTracePointTraits>([&tracing_fn](uint32_t instances) {
+      TraceWithInstances<DefaultTracePointTraits>(instances,
+                                                  std::move(tracing_fn));
+    });
+  }
+
+  // An efficient trace point guard for checking if this data source is active.
+  // |callback| is a function which will only be called if there are active
+  // instances. It is given an instance state parameter, which should be passed
+  // to TraceWithInstances() to actually record trace data.
+  template <typename Traits = DefaultTracePointTraits, typename Callback>
+  static void CallIfEnabled(Callback callback,
+                            typename Traits::TracePointData trace_point_data =
+                                {}) PERFETTO_ALWAYS_INLINE {
+    // |instances| is a per-class bitmap that tells:
+    // 1. If the data source is enabled at all.
+    // 2. The index of the slot within |static_state_| that holds the instance
+    //    state. In turn this allows to map the data source to the tracing
+    //    session and buffers.
+    // memory_order_relaxed is okay because:
+    // - |instances| is re-read with an acquire barrier below if this succeeds.
+    // - The code between this point and the acquire-load is based on static
+    //    storage which has indefinite lifetime.
+    uint32_t instances = Traits::GetActiveInstances(trace_point_data)
+                             ->load(std::memory_order_relaxed);
+
+    // This is the tracing fast-path. Bail out immediately if tracing is not
+    // enabled (or tracing is enabled but not for this data source).
+    if (PERFETTO_LIKELY(!instances))
+      return;
+    callback(instances);
+  }
+
+  // The "lower half" of a trace point which actually performs tracing after
+  // this data source has been determined to be active.
+  // |instances| must be the instance state value retrieved through
+  // CallIfEnabled().
+  // |tracing_fn| will be called to record trace data as in Trace().
+  //
+  // |trace_point_data| is an optional parameter given to |Traits::
+  // GetActiveInstances| to make it possible to use custom storage for
+  // the data source enabled state. This is, for example, used by TrackEvent to
+  // implement per-tracing category enabled states.
+  //
+  // TODO(primiano): all the stuff below should be outlined from the trace
+  // point. Or at least we should have some compile-time traits like
+  // kOptimizeBinarySize / kOptimizeTracingLatency.
+  template <typename Traits = DefaultTracePointTraits, typename Lambda>
+  static void TraceWithInstances(
+      uint32_t instances,
+      Lambda tracing_fn,
+      typename Traits::TracePointData trace_point_data = {}) {
+    PERFETTO_DCHECK(instances);
+    constexpr auto kMaxDataSourceInstances = internal::kMaxDataSourceInstances;
+
+    // See tracing_muxer.h for the structure of the TLS.
+    auto* tracing_impl = internal::TracingMuxer::Get();
+    if (PERFETTO_UNLIKELY(!tls_state_))
+      tls_state_ = GetOrCreateDataSourceTLS(&static_state_);
+
+    // Avoid re-entering the trace point recursively.
+    if (PERFETTO_UNLIKELY(tls_state_->root_tls->is_in_trace_point))
+      return;
+    internal::ScopedReentrancyAnnotator scoped_annotator(*tls_state_->root_tls);
+
+    // TracingTLS::generation is a global monotonic counter that is incremented
+    // every time a tracing session is stopped. We use that as a signal to force
+    // a slow-path garbage collection of all the trace writers for the current
+    // thread and to destroy the ones that belong to tracing sessions that have
+    // ended. This is to avoid having too many TraceWriter instances alive, each
+    // holding onto one chunk of the shared memory buffer.
+    // Rationale why memory_order_relaxed should be fine:
+    // - The TraceWriter object that we use is always constructed and destructed
+    //   on the current thread. There is no risk of accessing a half-initialized
+    //   TraceWriter (which would be really bad).
+    // - In the worst case, in the case of a race on the generation check, we
+    //   might end up using a TraceWriter for the same data source that belongs
+    //   to a stopped session. This is not really wrong, as we don't give any
+    //   guarantee on the global atomicity of the stop. In the worst case the
+    //   service will reject the data commit if this arrives too late.
+
+    if (PERFETTO_UNLIKELY(
+            tls_state_->root_tls->generation !=
+            tracing_impl->generation(std::memory_order_relaxed))) {
+      // Will update root_tls->generation.
+      tracing_impl->DestroyStoppedTraceWritersForCurrentThread();
+    }
+
+    for (uint32_t i = 0; i < kMaxDataSourceInstances; i++) {
+      internal::DataSourceState* instance_state =
+          static_state_.TryGetCached(instances, i);
+      if (!instance_state)
+        continue;
+
+      // Even if we passed the check above, the DataSourceInstance might be
+      // still destroyed concurrently while this code runs. The code below is
+      // designed to deal with such race, as follows:
+      // - We don't access the user-defined data source instance state. The only
+      //   bits of state we use are |backend_id| and |buffer_id|.
+      // - Beyond those two integers, we access only the TraceWriter here. The
+      //   TraceWriter is always safe because it lives on the TLS.
+      // - |instance_state| is backed by static storage, so the pointer is
+      //   always valid, even after the data source instance is destroyed.
+      // - In the case of a race-on-destruction, we'll still see the latest
+      //   backend_id and buffer_id and in the worst case keep trying writing
+      //   into the tracing shared memory buffer after stopped. But this isn't
+      //   really any worse than the case of the stop IPC being delayed by the
+      //   kernel scheduler. The tracing service is robust against data commit
+      //   attemps made after tracing is stopped.
+      // There is a theoretical race that would case the wrong behavior w.r.t
+      // writing data in the wrong buffer, but it's so rare that we ignore it:
+      // if the data source is stopped and started kMaxDataSourceInstances
+      // times (so that the same id is recycled) while we are in this function,
+      // we might end up reusing the old data source's backend_id and buffer_id
+      // for the new one, because we don't see the generation change past this
+      // point. But stopping and starting tracing (even once) takes so much
+      // handshaking to make this extremely unrealistic.
+
+      auto& tls_inst = tls_state_->per_instance[i];
+      if (PERFETTO_UNLIKELY(!tls_inst.trace_writer)) {
+        // Here we need an acquire barrier, which matches the release-store made
+        // by TracingMuxerImpl::SetupDataSource(), to ensure that the backend_id
+        // and buffer_id are consistent.
+        instances = Traits::GetActiveInstances(trace_point_data)
+                        ->load(std::memory_order_acquire);
+        instance_state = static_state_.TryGetCached(instances, i);
+        if (!instance_state || !instance_state->trace_lambda_enabled)
+          continue;
+        tls_inst.backend_id = instance_state->backend_id;
+        tls_inst.backend_connection_id = instance_state->backend_connection_id;
+        tls_inst.buffer_id = instance_state->buffer_id;
+        tls_inst.data_source_instance_id =
+            instance_state->data_source_instance_id;
+        tls_inst.is_intercepted = instance_state->interceptor_id != 0;
+        tls_inst.trace_writer = tracing_impl->CreateTraceWriter(
+            &static_state_, i, instance_state,
+            DataSourceType::kBufferExhaustedPolicy);
+        CreateIncrementalState(&tls_inst);
+
+        // Even in the case of out-of-IDs, SharedMemoryArbiterImpl returns a
+        // NullTraceWriter. The returned pointer should never be null.
+        assert(tls_inst.trace_writer);
+      }
+
+      tracing_fn(TraceContext(&tls_inst, i));
+    }
+  }
+
+  // Registers the data source on all tracing backends, including ones that
+  // connect after the registration. Doing so enables the data source to receive
+  // Setup/Start/Stop notifications and makes the Trace() method work when
+  // tracing is enabled and the data source is selected.
+  // This must be called after Tracing::Initialize().
+  // Can return false to signal failure if attemping to register more than
+  // kMaxDataSources (32) data sources types or if tracing hasn't been
+  // initialized.
+  // The optional |constructor_args| will be passed to the data source when it
+  // is constructed.
+  template <class... Args>
+  static bool Register(const DataSourceDescriptor& descriptor,
+                       const Args&... constructor_args) {
+    // Silences -Wunused-variable warning in case the trace method is not used
+    // by the translation unit that declares the data source.
+    (void)static_state_;
+    (void)tls_state_;
+
+    auto factory = [constructor_args...]() {
+      return std::unique_ptr<DataSourceBase>(
+          new DataSourceType(constructor_args...));
+    };
+    auto* tracing_impl = internal::TracingMuxer::Get();
+    return tracing_impl->RegisterDataSource(descriptor, factory,
+                                            &static_state_);
+  }
+
+ private:
+  // Traits for customizing the behavior of a specific trace point.
+  struct DefaultTracePointTraits {
+    // By default, every call to DataSource::Trace() will record trace events
+    // for every active instance of that data source. A single trace point can,
+    // however, use a custom set of enable flags for more fine grained control
+    // of when that trace point is active.
+    //
+    // DANGER: when doing this, the data source must use the appropriate memory
+    // fences when changing the state of the bitmap.
+    //
+    // |TraceWithInstances| may be optionally given an additional parameter for
+    // looking up the enable flags. That parameter is passed as |TracePointData|
+    // to |GetActiveInstances|. This is, for example, used by TrackEvent to
+    // implement per-category enabled states.
+    struct TracePointData {};
+    static constexpr std::atomic<uint32_t>* GetActiveInstances(TracePointData) {
+      return &static_state_.valid_instances;
+    }
+  };
+
+  // Create the user provided incremental state in the given thread-local
+  // storage. Note: The second parameter here is used to specialize the case
+  // where there is no incremental state type.
+  template <typename T>
+  static void CreateIncrementalStateImpl(
+      internal::DataSourceInstanceThreadLocalState* tls_inst,
+      const T*) {
+    PERFETTO_DCHECK(!tls_inst->incremental_state);
+    tls_inst->incremental_state_generation =
+        static_state_.incremental_state_generation.load(
+            std::memory_order_relaxed);
+    tls_inst->incremental_state =
+        internal::DataSourceInstanceThreadLocalState::IncrementalStatePointer(
+            reinterpret_cast<void*>(new T()),
+            [](void* p) { delete reinterpret_cast<T*>(p); });
+  }
+
+  static void CreateIncrementalStateImpl(
+      internal::DataSourceInstanceThreadLocalState*,
+      const void*) {}
+
+  static void CreateIncrementalState(
+      internal::DataSourceInstanceThreadLocalState* tls_inst) {
+    CreateIncrementalStateImpl(
+        tls_inst,
+        static_cast<typename DataSourceTraits::IncrementalStateType*>(nullptr));
+  }
+
+  // Note that the returned object is one per-thread per-data-source-type, NOT
+  // per data-source *instance*.
+  static internal::DataSourceThreadLocalState* GetOrCreateDataSourceTLS(
+      internal::DataSourceStaticState* static_state) {
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_IOS)
+    PERFETTO_FATAL("Data source TLS not supported on iOS, see b/158814068");
+#endif
+    auto* tracing_impl = internal::TracingMuxer::Get();
+    internal::TracingTLS* root_tls = tracing_impl->GetOrCreateTracingTLS();
+    internal::DataSourceThreadLocalState* ds_tls =
+        DataSourceTraits::GetDataSourceTLS(static_state, root_tls);
+    // We keep re-initializing as the initialization is idempotent and not worth
+    // the code for extra checks.
+    ds_tls->static_state = static_state;
+    assert(!ds_tls->root_tls || ds_tls->root_tls == root_tls);
+    ds_tls->root_tls = root_tls;
+    return ds_tls;
+  }
+
+  // Static state. Accessed by the static Trace() method fastpaths.
+  static internal::DataSourceStaticState static_state_;
+
+  // This TLS object is a cached raw pointer and has deliberately no destructor.
+  // The Platform implementation is supposed to create and manage the lifetime
+  // of the Platform::ThreadLocalObject and take care of destroying it.
+  // This is because non-POD thread_local variables have subtleties (global
+  // destructors) that we need to defer to the embedder. In chromium's platform
+  // implementation, for instance, the tls slot is implemented using
+  // chromium's base::ThreadLocalStorage.
+  static PERFETTO_THREAD_LOCAL internal::DataSourceThreadLocalState* tls_state_;
+};
+
+// static
+template <typename T, typename D>
+internal::DataSourceStaticState DataSource<T, D>::static_state_;
+// static
+template <typename T, typename D>
+PERFETTO_THREAD_LOCAL internal::DataSourceThreadLocalState*
+    DataSource<T, D>::tls_state_;
+
+}  // namespace perfetto
+
+// If placed at the end of a macro declaration, eats the semicolon at the end of
+// the macro invocation (e.g., "MACRO(...);") to avoid warnings about extra
+// semicolons.
+#define PERFETTO_INTERNAL_SWALLOW_SEMICOLON() \
+  extern int perfetto_internal_unused
+
+// This macro must be used once for each data source next to the data source's
+// declaration.
+#define PERFETTO_DECLARE_DATA_SOURCE_STATIC_MEMBERS(...)              \
+  template <>                                                         \
+  PERFETTO_COMPONENT_EXPORT perfetto::internal::DataSourceStaticState \
+      perfetto::DataSource<__VA_ARGS__>::static_state_;               \
+  template <>                                                         \
+  PERFETTO_COMPONENT_EXPORT PERFETTO_THREAD_LOCAL                     \
+      perfetto::internal::DataSourceThreadLocalState*                 \
+          perfetto::DataSource<__VA_ARGS__>::tls_state_
+
+// MSVC has a bug where explicit template member specialization declarations
+// can't have thread_local as the storage class specifier. The generated code
+// seems correct without the specifier, so drop it until the bug gets fixed.
+// See https://developercommunity2.visualstudio.com/t/Unable-to-specialize-
+// static-thread_local/1302689.
+#if PERFETTO_BUILDFLAG(PERFETTO_COMPILER_MSVC)
+#define PERFETTO_TEMPLATE_THREAD_LOCAL
+#else
+#define PERFETTO_TEMPLATE_THREAD_LOCAL PERFETTO_THREAD_LOCAL
+#endif
+
+// This macro must be used once for each data source in one source file to
+// allocate static storage for the data source's static state.
+//
+// Note: if MSVC fails with a C2086 (redefinition) error here, use the
+// permissive- flag to enable standards-compliant mode. See
+// https://developercommunity.visualstudio.com/content/problem/319447/
+// explicit-specialization-of-static-data-member-inco.html.
+#define PERFETTO_DEFINE_DATA_SOURCE_STATIC_MEMBERS(...)               \
+  template <>                                                         \
+  PERFETTO_COMPONENT_EXPORT perfetto::internal::DataSourceStaticState \
+      perfetto::DataSource<__VA_ARGS__>::static_state_{};             \
+  template <>                                                         \
+  PERFETTO_COMPONENT_EXPORT PERFETTO_TEMPLATE_THREAD_LOCAL            \
+      perfetto::internal::DataSourceThreadLocalState*                 \
+          perfetto::DataSource<__VA_ARGS__>::tls_state_ = nullptr
+
+#endif  // INCLUDE_PERFETTO_TRACING_DATA_SOURCE_H_
+// gen_amalgamated begin header: include/perfetto/tracing/tracing.h
+// gen_amalgamated begin header: include/perfetto/tracing/backend_type.h
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_TRACING_BACKEND_TYPE_H_
+#define INCLUDE_PERFETTO_TRACING_BACKEND_TYPE_H_
+
+#include <stdint.h>
+
+namespace perfetto {
+
+enum BackendType : uint32_t {
+  kUnspecifiedBackend = 0,
+
+  // Connects to a previously-initialized perfetto tracing backend for
+  // in-process. If the in-process backend has not been previously initialized
+  // it will do so and create the tracing service on a dedicated thread.
+  kInProcessBackend = 1 << 0,
+
+  // Connects to the system tracing service (e.g. on Linux/Android/Mac uses a
+  // named UNIX socket).
+  kSystemBackend = 1 << 1,
+
+  // Used to provide a custom IPC transport to connect to the service.
+  // TracingInitArgs::custom_backend must be non-null and point to an
+  // indefinitely lived instance.
+  kCustomBackend = 1 << 2,
+};
+
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_TRACING_BACKEND_TYPE_H_
+// gen_amalgamated begin header: include/perfetto/tracing/internal/in_process_tracing_backend.h
+// gen_amalgamated begin header: include/perfetto/tracing/tracing_backend.h
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_TRACING_TRACING_BACKEND_H_
+#define INCLUDE_PERFETTO_TRACING_TRACING_BACKEND_H_
+
+#include <memory>
+#include <string>
+
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+// The embedder can (but doesn't have to) extend the TracingBackend class and
+// pass as an argument to Tracing::Initialize(kCustomBackend) to override the
+// way to reach the service. This is for peculiar cases where the embedder has
+// a multi-process architecture and wants to override the IPC transport. The
+// real use-case for this at the time of writing is chromium (+ Mojo IPC).
+// Extending this class requires depending on the full set of perfetto headers
+// (not just /public/). Contact the team before doing so as the non-public
+// headers are not guaranteed to be API stable.
+
+namespace perfetto {
+
+namespace base {
+class TaskRunner;
+}
+
+// These classes are declared in headers outside of /public/.
+class Consumer;
+class ConsumerEndpoint;
+class Producer;
+class ProducerEndpoint;
+
+class PERFETTO_EXPORT TracingBackend {
+ public:
+  virtual ~TracingBackend();
+
+  // Connects a Producer instance and obtains a ProducerEndpoint, which is
+  // essentially a 1:1 channel between one Producer and the Service.
+  // To disconnect just destroy the returned endpoint object. It is safe to
+  // destroy the Producer once Producer::OnDisconnect() has been invoked.
+  struct ConnectProducerArgs {
+    std::string producer_name;
+
+    // The Producer object that will receive calls like Start/StopDataSource().
+    // The caller has to guarantee that this object is valid as long as the
+    // returned ProducerEndpoint is alive.
+    Producer* producer = nullptr;
+
+    // The task runner where the Producer methods will be called onto.
+    // The caller has to guarantee that the passed TaskRunner is valid as long
+    // as the returned ProducerEndpoint is alive.
+    ::perfetto::base::TaskRunner* task_runner = nullptr;
+
+    // These get propagated from TracingInitArgs and are optionally provided by
+    // the client when calling Tracing::Initialize().
+    uint32_t shmem_size_hint_bytes = 0;
+    uint32_t shmem_page_size_hint_bytes = 0;
+  };
+
+  virtual std::unique_ptr<ProducerEndpoint> ConnectProducer(
+      const ConnectProducerArgs&) = 0;
+
+  // As above, for the Consumer-side.
+  struct ConnectConsumerArgs {
+    // The Consumer object that will receive calls like OnTracingDisabled(),
+    // OnTraceData().
+    Consumer* consumer{};
+
+    // The task runner where the Consumer methods will be called onto.
+    ::perfetto::base::TaskRunner* task_runner{};
+  };
+  virtual std::unique_ptr<ConsumerEndpoint> ConnectConsumer(
+      const ConnectConsumerArgs&) = 0;
+};
+
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_TRACING_TRACING_BACKEND_H_
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_TRACING_INTERNAL_IN_PROCESS_TRACING_BACKEND_H_
+#define INCLUDE_PERFETTO_TRACING_INTERNAL_IN_PROCESS_TRACING_BACKEND_H_
+
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/tracing_backend.h"
+
+namespace perfetto {
+
+namespace base {
+class TaskRunner;
+}
+
+class Producer;
+class TracingService;
+
+namespace internal {
+
+// A built-in implementation of TracingBackend that creates a tracing service
+// instance in-process. Instantiated when the embedder calls
+// Tracing::Initialize(kInProcessBackend). Solves most in-app-only tracing
+// use-cases.
+class PERFETTO_EXPORT InProcessTracingBackend : public TracingBackend {
+ public:
+  static TracingBackend* GetInstance();
+
+  // TracingBackend implementation.
+  std::unique_ptr<ProducerEndpoint> ConnectProducer(
+      const ConnectProducerArgs&) override;
+  std::unique_ptr<ConsumerEndpoint> ConnectConsumer(
+      const ConnectConsumerArgs&) override;
+
+ private:
+  InProcessTracingBackend();
+  TracingService* GetOrCreateService(base::TaskRunner*);
+
+  std::unique_ptr<TracingService> service_;
+};
+
+}  // namespace internal
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_TRACING_INTERNAL_IN_PROCESS_TRACING_BACKEND_H_
+// gen_amalgamated begin header: include/perfetto/tracing/internal/system_tracing_backend.h
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_TRACING_INTERNAL_SYSTEM_TRACING_BACKEND_H_
+#define INCLUDE_PERFETTO_TRACING_INTERNAL_SYSTEM_TRACING_BACKEND_H_
+
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/tracing_backend.h"
+
+namespace perfetto {
+
+namespace base {
+class TaskRunner;
+}
+
+class Producer;
+
+// A built-in implementation of TracingBackend that connects to the system
+// tracing daemon (traced) via a UNIX socket using the perfetto built-in
+// proto-based IPC mechanism. Instantiated when the embedder calls
+// Tracing::Initialize(kSystemBackend). It allows to get app-traces fused
+// together with system traces, useful to correlate on the timeline system
+// events (e.g. scheduling slices from the kernel) with in-app events.
+namespace internal {
+class PERFETTO_EXPORT SystemTracingBackend : public TracingBackend {
+ public:
+  static TracingBackend* GetInstance();
+
+  // TracingBackend implementation.
+  std::unique_ptr<ProducerEndpoint> ConnectProducer(
+      const ConnectProducerArgs&) override;
+  std::unique_ptr<ConsumerEndpoint> ConnectConsumer(
+      const ConnectConsumerArgs&) override;
+
+ private:
+  SystemTracingBackend();
+};
+
+}  // namespace internal
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_TRACING_INTERNAL_SYSTEM_TRACING_BACKEND_H_
+// gen_amalgamated begin header: include/perfetto/tracing/tracing_policy.h
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_TRACING_TRACING_POLICY_H_
+#define INCLUDE_PERFETTO_TRACING_TRACING_POLICY_H_
+
+#include <functional>
+
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/backend_type.h"
+
+namespace perfetto {
+
+// Applies policy decisions, such as allowing or denying connections, when
+// certain tracing SDK events occur. All methods are called on an internal
+// perfetto thread.
+class PERFETTO_EXPORT TracingPolicy {
+ public:
+  virtual ~TracingPolicy();
+
+  // Called when the current process attempts to connect a new consumer to the
+  // backend of |backend_type| to check if the connection should be allowed. Its
+  // implementation should execute |result_callback| with the result of the
+  // check (synchronuosly or asynchronously on any thread). If the result is
+  // false, the consumer connection is aborted. Chrome uses this to restrict
+  // creating (system) tracing sessions based on an enterprise policy.
+  struct ShouldAllowConsumerSessionArgs {
+    BackendType backend_type;
+    std::function<void(bool /*allow*/)> result_callback;
+  };
+  virtual void ShouldAllowConsumerSession(
+      const ShouldAllowConsumerSessionArgs&) = 0;
+};
+
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_TRACING_TRACING_POLICY_H_
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_TRACING_TRACING_H_
+#define INCLUDE_PERFETTO_TRACING_TRACING_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <functional>
+#include <memory>
+#include <string>
+#include <vector>
+
+// gen_amalgamated expanded: #include "perfetto/base/compiler.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/backend_type.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/core/forward_decls.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/internal/in_process_tracing_backend.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/internal/system_tracing_backend.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/tracing_policy.h"
+
+namespace perfetto {
+
+namespace internal {
+class TracingMuxerImpl;
+}
+
+class TracingBackend;
+class Platform;
+class TracingSession;  // Declared below.
+
+struct TracingError {
+  enum ErrorCode : uint32_t {
+    // Peer disconnection.
+    kDisconnected = 1,
+
+    // The Start() method failed. This is typically because errors in the passed
+    // TraceConfig. More details are available in |message|.
+    kTracingFailed = 2,
+  };
+
+  ErrorCode code;
+  std::string message;
+
+  TracingError(ErrorCode cd, std::string msg)
+      : code(cd), message(std::move(msg)) {
+    PERFETTO_CHECK(!message.empty());
+  }
+};
+
+using LogLev = ::perfetto::base::LogLev;
+using LogMessageCallbackArgs = ::perfetto::base::LogMessageCallbackArgs;
+using LogMessageCallback = ::perfetto::base::LogMessageCallback;
+
+struct TracingInitArgs {
+  uint32_t backends = 0;                     // One or more BackendTypes.
+  TracingBackend* custom_backend = nullptr;  // [Optional].
+
+  // [Optional] Platform implementation. It allows the embedder to take control
+  // of platform-specific bits like thread creation and TLS slot handling. If
+  // not set it will use Platform::GetDefaultPlatform().
+  Platform* platform = nullptr;
+
+  // [Optional] Tune the size of the shared memory buffer between the current
+  // process and the service backend(s). This is a trade-off between memory
+  // footprint and the ability to sustain bursts of trace writes (see comments
+  // in shared_memory_abi.h).
+  // If set, the value must be a multiple of 4KB. The value can be ignored if
+  // larger than kMaxShmSize (32MB) or not a multiple of 4KB.
+  uint32_t shmem_size_hint_kb = 0;
+
+  // [Optional] Specifies the preferred size of each page in the shmem buffer.
+  // This is a trade-off between IPC overhead and fragmentation/efficiency of
+  // the shmem buffer in presence of multiple writer threads.
+  // Must be one of [4, 8, 16, 32].
+  uint32_t shmem_page_size_hint_kb = 0;
+
+  // [Optional] The length of the period during which shared-memory-buffer
+  // chunks that have been filled with data are accumulated (batched) on the
+  // producer side, before the service is notified of them over an out-of-band
+  // IPC call. If, while this period lasts, the shared memory buffer gets too
+  // full, the IPC call will be sent immediately. The value of this parameter is
+  // a trade-off between IPC traffic overhead and the ability to sustain bursts
+  // of trace writes. The higher the value, the more chunks will be batched and
+  // the less buffer space will be available to hide the latency of the service,
+  // and vice versa. For more details, see the SetBatchCommitsDuration method in
+  // shared_memory_arbiter.h.
+  //
+  // Note: With the default value of 0ms, batching still happens but with a zero
+  // delay, i.e. commits will be sent to the service at the next opportunity.
+  uint32_t shmem_batch_commits_duration_ms = 0;
+
+  // [Optional] If set, the policy object is notified when certain SDK events
+  // occur and may apply policy decisions, such as denying connections. The
+  // embedder is responsible for ensuring the object remains alive for the
+  // lifetime of the process.
+  TracingPolicy* tracing_policy = nullptr;
+
+  // [Optional] If set, log messages generated by perfetto are passed to this
+  // callback instead of being logged directly.
+  LogMessageCallback log_message_callback = nullptr;
+
+ protected:
+  friend class Tracing;
+  friend class internal::TracingMuxerImpl;
+
+  // Used only by the DCHECK in tracing.cc, to check that the config is the
+  // same in case of re-initialization.
+  bool operator==(const TracingInitArgs& other) const {
+    return std::tie(backends, custom_backend, platform, shmem_size_hint_kb,
+                    shmem_page_size_hint_kb, in_process_backend_factory_,
+                    system_backend_factory_, dcheck_is_on_) ==
+           std::tie(other.backends, other.custom_backend, other.platform,
+                    other.shmem_size_hint_kb, other.shmem_page_size_hint_kb,
+                    other.in_process_backend_factory_,
+                    other.system_backend_factory_, other.dcheck_is_on_);
+  }
+
+  using BackendFactoryFunction = TracingBackend* (*)();
+  BackendFactoryFunction in_process_backend_factory_ = nullptr;
+  BackendFactoryFunction system_backend_factory_ = nullptr;
+  bool dcheck_is_on_ = PERFETTO_DCHECK_IS_ON();
+};
+
+// The entry-point for using perfetto.
+class PERFETTO_EXPORT Tracing {
+ public:
+  // Initializes Perfetto with the given backends in the calling process and/or
+  // with a user-provided backend. No-op if called more than once.
+  static inline void Initialize(const TracingInitArgs& args)
+      PERFETTO_ALWAYS_INLINE {
+    TracingInitArgs args_copy(args);
+    // This code is inlined to allow dead-code elimination for unused backends.
+    // This saves ~200 KB when not using the in-process backend (b/148198993).
+    // The logic behind it is the following:
+    // Nothing other than the code below references the two GetInstance()
+    // methods. From a linker-graph viewpoint, those GetInstance() pull in many
+    // other pieces of the codebase (e.g. InProcessTracingBackend pulls the
+    // whole TracingServiceImpl, SystemTracingBackend pulls the whole //ipc
+    // layer). Due to the inline, the compiler can see through the code and
+    // realize that some branches are always not taken. When that happens, no
+    // reference to the backends' GetInstance() is emitted and that allows the
+    // linker GC to get rid of the entire set of dependencies.
+    if (args.backends & kInProcessBackend) {
+      args_copy.in_process_backend_factory_ =
+          &internal::InProcessTracingBackend::GetInstance;
+    }
+    if (args.backends & kSystemBackend) {
+      args_copy.system_backend_factory_ =
+          &internal::SystemTracingBackend::GetInstance;
+    }
+    InitializeInternal(args_copy);
+  }
+
+  // Checks if tracing has been initialized by calling |Initialize|.
+  static bool IsInitialized();
+
+  // Start a new tracing session using the given tracing backend. Use
+  // |kUnspecifiedBackend| to select an available backend automatically.
+  // For the moment this can be used only when initializing tracing in
+  // kInProcess mode. For the system mode use the 'bin/perfetto' cmdline client.
+  static std::unique_ptr<TracingSession> NewTrace(
+      BackendType = kUnspecifiedBackend);
+
+ private:
+  static void InitializeInternal(const TracingInitArgs&);
+
+  Tracing() = delete;
+};
+
+class PERFETTO_EXPORT TracingSession {
+ public:
+  virtual ~TracingSession();
+
+  // Configure the session passing the trace config.
+  // If a writable file handle is given through |fd|, the trace will
+  // automatically written to that file. Otherwise you should call ReadTrace()
+  // to retrieve the trace data. This call does not take ownership of |fd|.
+  // TODO(primiano): add an error callback.
+  virtual void Setup(const TraceConfig&, int fd = -1) = 0;
+
+  // Enable tracing asynchronously. Use SetOnStartCallback() to get a
+  // notification when the session has fully started.
+  virtual void Start() = 0;
+
+  // Enable tracing and block until tracing has started. Note that if data
+  // sources are registered after this call was initiated, the call may return
+  // before the additional data sources have started. Also, if other producers
+  // (e.g., with system-wide tracing) have registered data sources without start
+  // notification support, this call may return before those data sources have
+  // started.
+  virtual void StartBlocking() = 0;
+
+  // This callback will be invoked when all data sources have acknowledged that
+  // tracing has started. This callback will be invoked on an internal perfetto
+  // thread.
+  virtual void SetOnStartCallback(std::function<void()>) = 0;
+
+  // This callback can be used to get a notification when some error occured
+  // (e.g., peer disconnection). Error type will be passed as an argument. This
+  // callback will be invoked on an internal perfetto thread.
+  virtual void SetOnErrorCallback(std::function<void(TracingError)>) = 0;
+
+  // Issues a flush request, asking all data sources to ack the request, within
+  // the specified timeout. A "flush" is a fence to ensure visibility of data in
+  // the async tracing pipeline. It guarantees that all data written before the
+  // Flush() call will be visible in the trace buffer and hence by the
+  // ReadTrace() / ReadTraceBlocking() methods.
+  // Args:
+  //  callback: will be invoked on an internal perfetto thread when all data
+  //    sources have acked, or the timeout is reached. The bool argument
+  //    will be true if all data sources acked within the timeout, false if
+  //    the timeout was hit or some other error occurred (e.g. the tracing
+  //    session wasn't started or ended).
+  //  timeout_ms: how much time the service will wait for data source acks. If
+  //    0, the global timeout specified in the TraceConfig (flush_timeout_ms)
+  //    will be used. If flush_timeout_ms is also unspecified, a default value
+  //    of 5s will be used.
+  // Known issues:
+  //    Because flushing is still based on service-side scraping, the very last
+  //    trace packet for each data source thread will not be visible. Fixing
+  //    this requires either propagating the Flush() to the data sources or
+  //    changing the order of atomic operations in the service (b/162206162).
+  //    Until then, a workaround is to make sure to call
+  //    DataSource::Trace([](TraceContext ctx) { ctx.Flush(); }) just before
+  //    stopping, on each thread where DataSource::Trace has been previously
+  //    called.
+  virtual void Flush(std::function<void(bool)>, uint32_t timeout_ms = 0) = 0;
+
+  // Blocking version of Flush(). Waits until all data sources have acked and
+  // returns the success/failure status.
+  bool FlushBlocking(uint32_t timeout_ms = 0);
+
+  // Disable tracing asynchronously.
+  // Use SetOnStopCallback() to get a notification when the tracing session is
+  // fully stopped and all data sources have acked.
+  virtual void Stop() = 0;
+
+  // Disable tracing and block until tracing has stopped.
+  virtual void StopBlocking() = 0;
+
+  // This callback will be invoked when tracing is disabled.
+  // This can happen either when explicitly calling TracingSession.Stop() or
+  // when the trace reaches its |duration_ms| time limit.
+  // This callback will be invoked on an internal perfetto thread.
+  virtual void SetOnStopCallback(std::function<void()>) = 0;
+
+  // Changes the TraceConfig for an active tracing session. The session must
+  // have been configured and started before. Note that the tracing service
+  // only supports changing a subset of TraceConfig fields,
+  // see ConsumerEndpoint::ChangeTraceConfig().
+  virtual void ChangeTraceConfig(const TraceConfig&) = 0;
+
+  // Struct passed as argument to the callback passed to ReadTrace().
+  // [data, size] is guaranteed to contain 1 or more full trace packets, which
+  // can be decoded using trace.proto. No partial or truncated packets are
+  // exposed. If the trace is empty this returns a zero-sized nullptr with
+  // |has_more| == true to signal EOF.
+  // This callback will be invoked on an internal perfetto thread.
+  struct ReadTraceCallbackArgs {
+    const char* data = nullptr;
+    size_t size = 0;
+
+    // When false, this will be the last invocation of the callback for this
+    // read cycle.
+    bool has_more = false;
+  };
+
+  // Reads back the trace data (raw protobuf-encoded bytes) asynchronously.
+  // Can be called at any point during the trace, typically but not necessarily,
+  // after stopping. If this is called before the end of the trace (i.e. before
+  // Stop() / StopBlocking()), in almost all cases you need to call
+  // Flush() / FlushBlocking() before Read(). This is to guarantee that tracing
+  // data in-flight in the data sources is committed into the tracing buffers
+  // before reading them.
+  // Reading the trace data is a destructive operation w.r.t. contents of the
+  // trace buffer and is not idempotent.
+  // A single ReadTrace() call can yield >1 callback invocations, until
+  // |has_more| is false.
+  using ReadTraceCallback = std::function<void(ReadTraceCallbackArgs)>;
+  virtual void ReadTrace(ReadTraceCallback) = 0;
+
+  // Synchronous version of ReadTrace(). It blocks the calling thread until all
+  // the trace contents are read. This is slow and inefficient (involves more
+  // copies) and is mainly intended for testing.
+  std::vector<char> ReadTraceBlocking();
+
+  // Struct passed as an argument to the callback for GetTraceStats(). Contains
+  // statistics about the tracing session.
+  struct GetTraceStatsCallbackArgs {
+    // Whether or not querying statistics succeeded.
+    bool success = false;
+    // Serialized TraceStats protobuf message. To decode:
+    //
+    //   perfetto::protos::gen::TraceStats trace_stats;
+    //   trace_stats.ParseFromArray(args.trace_stats_data.data(),
+    //                              args.trace_stats_data.size());
+    //
+    std::vector<uint8_t> trace_stats_data;
+  };
+
+  // Requests a snapshot of statistical data for this tracing session. Only one
+  // query may be active at a time. This callback will be invoked on an internal
+  // perfetto thread.
+  using GetTraceStatsCallback = std::function<void(GetTraceStatsCallbackArgs)>;
+  virtual void GetTraceStats(GetTraceStatsCallback) = 0;
+
+  // Synchronous version of GetTraceStats() for convenience.
+  GetTraceStatsCallbackArgs GetTraceStatsBlocking();
+
+  // Struct passed as an argument to the callback for QueryServiceState().
+  // Contains information about registered data sources.
+  struct QueryServiceStateCallbackArgs {
+    // Whether or not getting the service state succeeded.
+    bool success = false;
+    // Serialized TracingServiceState protobuf message. To decode:
+    //
+    //   perfetto::protos::gen::TracingServiceState state;
+    //   state.ParseFromArray(args.service_state_data.data(),
+    //                        args.service_state_data.size());
+    //
+    std::vector<uint8_t> service_state_data;
+  };
+
+  // Requests a snapshot of the tracing service state for this session. Only one
+  // request per session may be active at a time. This callback will be invoked
+  // on an internal perfetto thread.
+  using QueryServiceStateCallback =
+      std::function<void(QueryServiceStateCallbackArgs)>;
+  virtual void QueryServiceState(QueryServiceStateCallback) = 0;
+
+  // Synchronous version of QueryServiceState() for convenience.
+  QueryServiceStateCallbackArgs QueryServiceStateBlocking();
+};
+
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_TRACING_TRACING_H_
+// gen_amalgamated begin header: include/perfetto/tracing/track_event.h
+// gen_amalgamated begin header: include/perfetto/base/time.h
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_BASE_TIME_H_
+#define INCLUDE_PERFETTO_BASE_TIME_H_
+
+#include <time.h>
+
+#include <chrono>
+#include <string>
+
+// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
+#include <mach/mach_init.h>
+#include <mach/mach_port.h>
+#include <mach/mach_time.h>
+#include <mach/thread_act.h>
+#endif
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WASM)
+#include <emscripten/emscripten.h>
+#endif
+
+namespace perfetto {
+namespace base {
+
+using TimeSeconds = std::chrono::seconds;
+using TimeMillis = std::chrono::milliseconds;
+using TimeNanos = std::chrono::nanoseconds;
+
+inline TimeNanos FromPosixTimespec(const struct timespec& ts) {
+  return TimeNanos(ts.tv_sec * 1000000000LL + ts.tv_nsec);
+}
+
+void SleepMicroseconds(unsigned interval_us);
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+
+TimeNanos GetWallTimeNs();
+TimeNanos GetThreadCPUTimeNs();
+
+// TODO: Clock that counts time during suspend is not implemented on Windows.
+inline TimeNanos GetBootTimeNs() {
+  return GetWallTimeNs();
+}
+
+#elif PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
+
+inline TimeNanos GetWallTimeNs() {
+  auto init_time_factor = []() -> uint64_t {
+    mach_timebase_info_data_t timebase_info;
+    mach_timebase_info(&timebase_info);
+    return timebase_info.numer / timebase_info.denom;
+  };
+
+  static uint64_t monotonic_timebase_factor = init_time_factor();
+  return TimeNanos(mach_absolute_time() * monotonic_timebase_factor);
+}
+
+// TODO: Clock that counts time during suspend is not implemented on Mac.
+inline TimeNanos GetBootTimeNs() {
+  return GetWallTimeNs();
+}
+
+inline TimeNanos GetThreadCPUTimeNs() {
+  mach_port_t this_thread = mach_thread_self();
+  mach_msg_type_number_t count = THREAD_BASIC_INFO_COUNT;
+  thread_basic_info_data_t info{};
+  kern_return_t kr =
+      thread_info(this_thread, THREAD_BASIC_INFO,
+                  reinterpret_cast<thread_info_t>(&info), &count);
+  mach_port_deallocate(mach_task_self(), this_thread);
+
+  if (kr != KERN_SUCCESS) {
+    PERFETTO_DFATAL("Failed to get CPU time.");
+    return TimeNanos(0);
+  }
+  return TimeNanos(info.user_time.seconds * 1000000000LL +
+                   info.user_time.microseconds * 1000LL +
+                   info.system_time.seconds * 1000000000LL +
+                   info.system_time.microseconds * 1000LL);
+}
+
+#elif PERFETTO_BUILDFLAG(PERFETTO_OS_WASM)
+
+inline TimeNanos GetWallTimeNs() {
+  return TimeNanos(static_cast<uint64_t>(emscripten_get_now()) * 1000000);
+}
+
+inline TimeNanos GetThreadCPUTimeNs() {
+  return TimeNanos(0);
+}
+
+// TODO: Clock that counts time during suspend is not implemented on WASM.
+inline TimeNanos GetBootTimeNs() {
+  return GetWallTimeNs();
+}
+
+#elif PERFETTO_BUILDFLAG(PERFETTO_OS_NACL)
+
+// Tracing time doesn't need to work on NaCl since its going away shortly. We
+// just need to compile on it. The only function NaCl could support is
+// GetWallTimeNs(), but to prevent false hope we leave it unimplemented.
+
+inline TimeNanos GetWallTimeNs() {
+  return TimeNanos(0);
+}
+
+inline TimeNanos GetThreadCPUTimeNs() {
+  return TimeNanos(0);
+}
+
+inline TimeNanos GetBootTimeNs() {
+  return TimeNanos(0);
+}
+
+#else  // posix
+
+constexpr clockid_t kWallTimeClockSource = CLOCK_MONOTONIC;
+
+inline TimeNanos GetTimeInternalNs(clockid_t clk_id) {
+  struct timespec ts = {};
+  PERFETTO_CHECK(clock_gettime(clk_id, &ts) == 0);
+  return FromPosixTimespec(ts);
+}
+
+// Return ns from boot. Conversely to GetWallTimeNs, this clock counts also time
+// during suspend (when supported).
+inline TimeNanos GetBootTimeNs() {
+  // Determine if CLOCK_BOOTTIME is available on the first call.
+  static const clockid_t kBootTimeClockSource = [] {
+    struct timespec ts = {};
+    int res = clock_gettime(CLOCK_BOOTTIME, &ts);
+    return res == 0 ? CLOCK_BOOTTIME : kWallTimeClockSource;
+  }();
+  return GetTimeInternalNs(kBootTimeClockSource);
+}
+
+inline TimeNanos GetWallTimeNs() {
+  return GetTimeInternalNs(kWallTimeClockSource);
+}
+
+inline TimeNanos GetThreadCPUTimeNs() {
+  return GetTimeInternalNs(CLOCK_THREAD_CPUTIME_ID);
+}
+#endif
+
+inline TimeSeconds GetBootTimeS() {
+  return std::chrono::duration_cast<TimeSeconds>(GetBootTimeNs());
+}
+
+inline TimeMillis GetWallTimeMs() {
+  return std::chrono::duration_cast<TimeMillis>(GetWallTimeNs());
+}
+
+inline TimeSeconds GetWallTimeS() {
+  return std::chrono::duration_cast<TimeSeconds>(GetWallTimeNs());
+}
+
+inline struct timespec ToPosixTimespec(TimeMillis time) {
+  struct timespec ts {};
+  const long time_s = static_cast<long>(time.count() / 1000);
+  ts.tv_sec = time_s;
+  ts.tv_nsec = (static_cast<long>(time.count()) - time_s * 1000L) * 1000000L;
+  return ts;
+}
+
+std::string GetTimeFmt(const std::string& fmt);
+
+}  // namespace base
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_BASE_TIME_H_
+// gen_amalgamated begin header: include/perfetto/tracing/internal/track_event_data_source.h
+// gen_amalgamated begin header: include/perfetto/base/template_util.h
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_BASE_TEMPLATE_UTIL_H_
+#define INCLUDE_PERFETTO_BASE_TEMPLATE_UTIL_H_
+
+#include <cstddef>
+#include <type_traits>
+
+namespace perfetto {
+namespace base {
+
+// Helper to express preferences in an overload set. If more than one overload
+// is available for a given set of parameters the overload with the higher
+// priority will be chosen.
+template <size_t I>
+struct priority_tag : priority_tag<I - 1> {};
+
+template <>
+struct priority_tag<0> {};
+
+// enable_if_t is an implementation of std::enable_if_t from C++14.
+//
+// Specification:
+// https://en.cppreference.com/w/cpp/types/enable_if
+template <bool B, class T = void>
+using enable_if_t = typename std::enable_if<B, T>::type;
+
+// decay_t is an implementation of std::decay_t from C++14.
+//
+// Specification:
+// https://en.cppreference.com/w/cpp/types/decay
+template <class T>
+using decay_t = typename std::decay<T>::type;
+
+// remove_cvref is an implementation of std::remove_cvref from
+// C++20.
+//
+// Specification:
+// https://en.cppreference.com/w/cpp/types/remove_cvref
+
+template <class T>
+struct remove_cvref {
+  using type = typename std::remove_cv<typename std::remove_cv<
+      typename std::remove_reference<T>::type>::type>::type;
+};
+template <class T>
+using remove_cvref_t = typename remove_cvref<T>::type;
+
+// Check if a given type is a specialization of a given template:
+// is_specialization<T, std::vector>::value.
+
+template <typename Type, template <typename...> class Template>
+struct is_specialization : std::false_type {};
+
+template <template <typename...> class Ref, typename... Args>
+struct is_specialization<Ref<Args...>, Ref> : std::true_type {};
+
+}  // namespace base
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_BASE_TEMPLATE_UTIL_H_
+// gen_amalgamated begin header: include/perfetto/tracing/event_context.h
+// gen_amalgamated begin header: include/perfetto/tracing/internal/track_event_internal.h
+// gen_amalgamated begin header: include/perfetto/base/flat_set.h
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_BASE_FLAT_SET_H_
+#define INCLUDE_PERFETTO_BASE_FLAT_SET_H_
+
+#include <algorithm>
+#include <vector>
+
+// A vector-based set::set-like container.
+// It's more cache friendly than std::*set and performs for cases where:
+// 1. A high number of dupes is expected (e.g. pid tracking in ftrace).
+// 2. The working set is small (hundreds of elements).
+
+// Performance characteristics (for uniformly random insertion order):
+// - For smaller insertions (up to ~500), it outperforms both std::set<int> and
+//   std::unordered_set<int> by ~3x.
+// - Up until 4k insertions, it is always faster than std::set<int>.
+// - unordered_set<int> is faster with more than 2k insertions.
+// - unordered_set, however, it's less memory efficient and has more caveats
+//   (see chromium's base/containers/README.md).
+//
+// See flat_set_benchmark.cc and the charts in go/perfetto-int-set-benchmark.
+
+namespace perfetto {
+namespace base {
+
+template <typename T>
+class FlatSet {
+ public:
+  using value_type = T;
+  using const_pointer = const T*;
+  using iterator = typename std::vector<T>::iterator;
+  using const_iterator = typename std::vector<T>::const_iterator;
+
+  FlatSet() = default;
+
+  // Mainly for tests. Deliberately not marked as "explicit".
+  FlatSet(std::initializer_list<T> initial) : entries_(initial) {
+    std::sort(entries_.begin(), entries_.end());
+    entries_.erase(std::unique(entries_.begin(), entries_.end()),
+                   entries_.end());
+  }
+
+  const_iterator find(T value) const {
+    auto entries_end = entries_.end();
+    auto it = std::lower_bound(entries_.begin(), entries_end, value);
+    return (it != entries_end && *it == value) ? it : entries_end;
+  }
+
+  size_t count(T value) const { return find(value) == entries_.end() ? 0 : 1; }
+
+  std::pair<iterator, bool> insert(T value) {
+    auto entries_end = entries_.end();
+    auto it = std::lower_bound(entries_.begin(), entries_end, value);
+    if (it != entries_end && *it == value)
+      return std::make_pair(it, false);
+    // If the value is not found |it| is either end() or the next item strictly
+    // greater than |value|. In both cases we want to insert just before that.
+    it = entries_.insert(it, std::move(value));
+    return std::make_pair(it, true);
+  }
+
+  size_t erase(T value) {
+    auto it = find(value);
+    if (it == entries_.end())
+      return 0;
+    entries_.erase(it);
+    return 1;
+  }
+
+  void clear() { entries_.clear(); }
+
+  bool empty() const { return entries_.empty(); }
+  void reserve(size_t n) { entries_.reserve(n); }
+  size_t size() const { return entries_.size(); }
+  const_iterator begin() const { return entries_.begin(); }
+  const_iterator end() const { return entries_.end(); }
+
+ private:
+  std::vector<T> entries_;
+};
+
+}  // namespace base
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_BASE_FLAT_SET_H_
+// gen_amalgamated begin header: include/perfetto/protozero/scattered_heap_buffer.h
+// gen_amalgamated begin header: include/perfetto/protozero/root_message.h
+// gen_amalgamated begin header: include/perfetto/protozero/message_arena.h
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_PROTOZERO_MESSAGE_ARENA_H_
+#define INCLUDE_PERFETTO_PROTOZERO_MESSAGE_ARENA_H_
+
+#include <stdint.h>
+
+#include <list>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+
+namespace protozero {
+
+class Message;
+
+// Object allocator for fixed-sized protozero::Message objects.
+// It's a simple bump-pointer allocator which leverages the stack-alike
+// usage pattern of protozero nested messages. It avoids hitting the system
+// allocator in most cases, by reusing the same block, and falls back on
+// allocating new blocks only when using deeply nested messages (which are
+// extremely rare).
+// This is used by RootMessage<T> to handle the storage for root-level messages.
+class PERFETTO_EXPORT MessageArena {
+ public:
+  MessageArena();
+  ~MessageArena();
+
+  // Strictly no copies or moves as this is used to hand out pointers.
+  MessageArena(const MessageArena&) = delete;
+  MessageArena& operator=(const MessageArena&) = delete;
+  MessageArena(MessageArena&&) = delete;
+  MessageArena& operator=(MessageArena&&) = delete;
+
+  // Allocates a new Message object.
+  Message* NewMessage();
+
+  // Deletes the last message allocated. The |msg| argument is used only for
+  // DCHECKs, it MUST be the pointer obtained by the last NewMessage() call.
+  void DeleteLastMessage(Message* msg) {
+    PERFETTO_DCHECK(!blocks_.empty() && blocks_.back().entries > 0);
+    PERFETTO_DCHECK(&blocks_.back().storage[blocks_.back().entries - 1] ==
+                    static_cast<void*>(msg));
+    DeleteLastMessageInternal();
+  }
+
+  // Resets the state of the arena, clearing up all but one block. This is used
+  // to avoid leaking outstanding unfinished sub-messages while recycling the
+  // RootMessage object (this is extremely rare due to the RAII scoped handles
+  // but could happen if some client does some overly clever std::move() trick).
+  void Reset() {
+    PERFETTO_DCHECK(!blocks_.empty());
+    blocks_.resize(1);
+    auto& block = blocks_.back();
+    block.entries = 0;
+    PERFETTO_ASAN_POISON(block.storage, sizeof(block.storage));
+  }
+
+ private:
+  void DeleteLastMessageInternal();
+
+  struct Block {
+    static constexpr size_t kCapacity = 16;
+
+    Block() { PERFETTO_ASAN_POISON(storage, sizeof(storage)); }
+
+    std::aligned_storage<sizeof(Message), alignof(Message)>::type
+        storage[kCapacity];
+    uint32_t entries = 0;  // # Message entries used (<= kCapacity).
+  };
+
+  // blocks are used to hand out pointers and must not be moved. Hence why
+  // std::list rather than std::vector.
+  std::list<Block> blocks_;
+};
+
+}  // namespace protozero
+
+#endif  // INCLUDE_PERFETTO_PROTOZERO_MESSAGE_ARENA_H_
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_PROTOZERO_ROOT_MESSAGE_H_
+#define INCLUDE_PERFETTO_PROTOZERO_ROOT_MESSAGE_H_
+
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message_arena.h"
+
+namespace protozero {
+
+// Helper class to hand out messages using the default MessageArena.
+// Usage:
+// RootMessage<perfetto::protos::zero::MyMessage> msg;
+// msg.Reset(stream_writer);
+// msg.set_foo(...);
+// auto* nested = msg.set_nested();
+template <typename T = Message>
+class RootMessage : public T {
+ public:
+  RootMessage() { T::Reset(nullptr, &root_arena_); }
+
+  // Disallow copy and move.
+  RootMessage(const RootMessage&) = delete;
+  RootMessage& operator=(const RootMessage&) = delete;
+  RootMessage(RootMessage&&) = delete;
+  RootMessage& operator=(RootMessage&&) = delete;
+
+  void Reset(ScatteredStreamWriter* writer) {
+    root_arena_.Reset();
+    Message::Reset(writer, &root_arena_);
+  }
+
+ private:
+  MessageArena root_arena_;
+};
+
+}  // namespace protozero
+
+#endif  // INCLUDE_PERFETTO_PROTOZERO_ROOT_MESSAGE_H_
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_PROTOZERO_SCATTERED_HEAP_BUFFER_H_
+#define INCLUDE_PERFETTO_PROTOZERO_SCATTERED_HEAP_BUFFER_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/root_message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_stream_writer.h"
+
+namespace protozero {
+
+class Message;
+
+class PERFETTO_EXPORT ScatteredHeapBuffer
+    : public protozero::ScatteredStreamWriter::Delegate {
+ public:
+  class PERFETTO_EXPORT Slice {
+   public:
+    Slice();
+    explicit Slice(size_t size);
+    Slice(Slice&& slice) noexcept;
+    ~Slice();
+    Slice& operator=(Slice&&);
+
+    inline protozero::ContiguousMemoryRange GetTotalRange() const {
+      return {buffer_.get(), buffer_.get() + size_};
+    }
+
+    inline protozero::ContiguousMemoryRange GetUsedRange() const {
+      return {buffer_.get(), buffer_.get() + size_ - unused_bytes_};
+    }
+
+    uint8_t* start() const { return buffer_.get(); }
+    size_t size() const { return size_; }
+    size_t unused_bytes() const { return unused_bytes_; }
+    void set_unused_bytes(size_t unused_bytes) {
+      PERFETTO_DCHECK(unused_bytes_ <= size_);
+      unused_bytes_ = unused_bytes;
+    }
+
+    void Clear();
+
+   private:
+    std::unique_ptr<uint8_t[]> buffer_;
+    size_t size_;
+    size_t unused_bytes_;
+  };
+
+  ScatteredHeapBuffer(size_t initial_slice_size_bytes = 128,
+                      size_t maximum_slice_size_bytes = 128 * 1024);
+  ~ScatteredHeapBuffer() override;
+
+  // protozero::ScatteredStreamWriter::Delegate implementation.
+  protozero::ContiguousMemoryRange GetNewBuffer() override;
+
+  // Return the slices backing this buffer, adjusted for the number of bytes the
+  // writer has written.
+  const std::vector<Slice>& GetSlices();
+
+  // Stitch all the slices into a single contiguous buffer.
+  std::vector<uint8_t> StitchSlices();
+
+  // Note that the returned ranges point back to this buffer and thus cannot
+  // outlive it.
+  std::vector<protozero::ContiguousMemoryRange> GetRanges();
+
+  // Note that size of the last slice isn't updated to reflect the number of
+  // bytes written by the trace writer.
+  const std::vector<Slice>& slices() const { return slices_; }
+
+  void set_writer(protozero::ScatteredStreamWriter* writer) {
+    writer_ = writer;
+  }
+
+  // Update unused_bytes() of the current |Slice| based on the writer's state.
+  void AdjustUsedSizeOfCurrentSlice();
+
+  // Returns the total size the slices occupy in heap memory (including unused).
+  size_t GetTotalSize();
+
+  // Reset the contents of this buffer but retain one slice allocation (if it
+  // exists) to be reused for future writes.
+  void Reset();
+
+ private:
+  size_t next_slice_size_;
+  const size_t maximum_slice_size_;
+  protozero::ScatteredStreamWriter* writer_ = nullptr;
+  std::vector<Slice> slices_;
+
+  // Used to keep an allocated slice around after this buffer is reset.
+  Slice cached_slice_;
+};
+
+// Helper function to create heap-based protozero messages in one line.
+// Useful when manually serializing a protozero message (primarily in
+// tests/utilities). So instead of the following:
+//   protozero::MyMessage msg;
+//   protozero::ScatteredHeapBuffer shb;
+//   protozero::ScatteredStreamWriter writer(&shb);
+//   shb.set_writer(&writer);
+//   msg.Reset(&writer);
+//   ...
+// You can write:
+//   protozero::HeapBuffered<protozero::MyMessage> msg;
+//   msg->set_stuff(...);
+//   msg.SerializeAsString();
+template <typename T = ::protozero::Message>
+class HeapBuffered {
+ public:
+  HeapBuffered() : HeapBuffered(4096, 4096) {}
+  HeapBuffered(size_t initial_slice_size_bytes, size_t maximum_slice_size_bytes)
+      : shb_(initial_slice_size_bytes, maximum_slice_size_bytes),
+        writer_(&shb_) {
+    shb_.set_writer(&writer_);
+    msg_.Reset(&writer_);
+  }
+
+  // This can't be neither copied nor moved because Message hands out pointers
+  // to itself when creating submessages.
+  HeapBuffered(const HeapBuffered&) = delete;
+  HeapBuffered& operator=(const HeapBuffered&) = delete;
+  HeapBuffered(HeapBuffered&&) = delete;
+  HeapBuffered& operator=(HeapBuffered&&) = delete;
+
+  T* get() { return &msg_; }
+  T* operator->() { return &msg_; }
+
+  bool empty() const { return shb_.slices().empty(); }
+
+  std::vector<uint8_t> SerializeAsArray() {
+    msg_.Finalize();
+    return shb_.StitchSlices();
+  }
+
+  std::string SerializeAsString() {
+    auto vec = SerializeAsArray();
+    return std::string(reinterpret_cast<const char*>(vec.data()), vec.size());
+  }
+
+  std::vector<protozero::ContiguousMemoryRange> GetRanges() {
+    msg_.Finalize();
+    return shb_.GetRanges();
+  }
+
+  const std::vector<ScatteredHeapBuffer::Slice>& GetSlices() {
+    msg_.Finalize();
+    return shb_.GetSlices();
+  }
+
+  void Reset() {
+    shb_.Reset();
+    writer_.Reset(protozero::ContiguousMemoryRange{});
+    msg_.Reset(&writer_);
+    PERFETTO_DCHECK(empty());
+  }
+
+ private:
+  ScatteredHeapBuffer shb_;
+  ScatteredStreamWriter writer_;
+  RootMessage<T> msg_;
+};
+
+}  // namespace protozero
+
+#endif  // INCLUDE_PERFETTO_PROTOZERO_SCATTERED_HEAP_BUFFER_H_
+// gen_amalgamated begin header: include/perfetto/tracing/debug_annotation.h
+// gen_amalgamated begin header: include/perfetto/tracing/traced_value_forward.h
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_TRACING_TRACED_VALUE_FORWARD_H_
+#define INCLUDE_PERFETTO_TRACING_TRACED_VALUE_FORWARD_H_
+
+namespace perfetto {
+
+class TracedValue;
+class TracedArray;
+class TracedDictionary;
+
+template <typename T>
+void WriteIntoTracedValue(TracedValue context, T&& value);
+
+template <typename T, class = void>
+struct TraceFormatTraits;
+
+// Write support checker to allow it to be used when matching.
+//
+// Intended to be used for types like smart pointers, who should support
+// AsTracedValueInto only iff their inner type supports being written into
+// a TracedValue.
+//
+// template <typename T>
+// class SmartPtr {
+//   ...
+//
+//   typename check_traced_value_support<T, void>::value
+//   AsTracedValueInto(perfetto::TracedValue context) const {
+//      WriteIntoTracedValue(std::move(context), *ptr_);
+//   }
+// };
+template <typename T, typename ResultType = void, class = void>
+struct check_traced_value_support;
+
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_TRACING_TRACED_VALUE_FORWARD_H_
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/debug_annotation.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_DEBUG_ANNOTATION_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_DEBUG_ANNOTATION_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class DebugAnnotation;
+class DebugAnnotation_NestedValue;
+enum DebugAnnotation_NestedValue_NestedType : int32_t;
+
+enum DebugAnnotation_NestedValue_NestedType : int32_t {
+  DebugAnnotation_NestedValue_NestedType_UNSPECIFIED = 0,
+  DebugAnnotation_NestedValue_NestedType_DICT = 1,
+  DebugAnnotation_NestedValue_NestedType_ARRAY = 2,
+};
+
+const DebugAnnotation_NestedValue_NestedType DebugAnnotation_NestedValue_NestedType_MIN = DebugAnnotation_NestedValue_NestedType_UNSPECIFIED;
+const DebugAnnotation_NestedValue_NestedType DebugAnnotation_NestedValue_NestedType_MAX = DebugAnnotation_NestedValue_NestedType_ARRAY;
+
+class DebugAnnotationName_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  DebugAnnotationName_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit DebugAnnotationName_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit DebugAnnotationName_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_iid() const { return at<1>().valid(); }
+  uint64_t iid() const { return at<1>().as_uint64(); }
+  bool has_name() const { return at<2>().valid(); }
+  ::protozero::ConstChars name() const { return at<2>().as_string(); }
+};
+
+class DebugAnnotationName : public ::protozero::Message {
+ public:
+  using Decoder = DebugAnnotationName_Decoder;
+  enum : int32_t {
+    kIidFieldNumber = 1,
+    kNameFieldNumber = 2,
+  };
+
+  using FieldMetadata_Iid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      DebugAnnotationName>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Iid kIid() { return {}; }
+  void set_iid(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Iid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      DebugAnnotationName>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class DebugAnnotation_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/12, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  DebugAnnotation_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit DebugAnnotation_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit DebugAnnotation_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_name_iid() const { return at<1>().valid(); }
+  uint64_t name_iid() const { return at<1>().as_uint64(); }
+  bool has_name() const { return at<10>().valid(); }
+  ::protozero::ConstChars name() const { return at<10>().as_string(); }
+  bool has_bool_value() const { return at<2>().valid(); }
+  bool bool_value() const { return at<2>().as_bool(); }
+  bool has_uint_value() const { return at<3>().valid(); }
+  uint64_t uint_value() const { return at<3>().as_uint64(); }
+  bool has_int_value() const { return at<4>().valid(); }
+  int64_t int_value() const { return at<4>().as_int64(); }
+  bool has_double_value() const { return at<5>().valid(); }
+  double double_value() const { return at<5>().as_double(); }
+  bool has_string_value() const { return at<6>().valid(); }
+  ::protozero::ConstChars string_value() const { return at<6>().as_string(); }
+  bool has_pointer_value() const { return at<7>().valid(); }
+  uint64_t pointer_value() const { return at<7>().as_uint64(); }
+  bool has_nested_value() const { return at<8>().valid(); }
+  ::protozero::ConstBytes nested_value() const { return at<8>().as_bytes(); }
+  bool has_legacy_json_value() const { return at<9>().valid(); }
+  ::protozero::ConstChars legacy_json_value() const { return at<9>().as_string(); }
+  bool has_dict_entries() const { return at<11>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> dict_entries() const { return GetRepeated<::protozero::ConstBytes>(11); }
+  bool has_array_values() const { return at<12>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> array_values() const { return GetRepeated<::protozero::ConstBytes>(12); }
+};
+
+class DebugAnnotation : public ::protozero::Message {
+ public:
+  using Decoder = DebugAnnotation_Decoder;
+  enum : int32_t {
+    kNameIidFieldNumber = 1,
+    kNameFieldNumber = 10,
+    kBoolValueFieldNumber = 2,
+    kUintValueFieldNumber = 3,
+    kIntValueFieldNumber = 4,
+    kDoubleValueFieldNumber = 5,
+    kStringValueFieldNumber = 6,
+    kPointerValueFieldNumber = 7,
+    kNestedValueFieldNumber = 8,
+    kLegacyJsonValueFieldNumber = 9,
+    kDictEntriesFieldNumber = 11,
+    kArrayValuesFieldNumber = 12,
+  };
+  using NestedValue = ::perfetto::protos::pbzero::DebugAnnotation_NestedValue;
+
+  using FieldMetadata_NameIid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      DebugAnnotation>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NameIid kNameIid() { return {}; }
+  void set_name_iid(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NameIid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      10,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      DebugAnnotation>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BoolValue =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      DebugAnnotation>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BoolValue kBoolValue() { return {}; }
+  void set_bool_value(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_BoolValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_UintValue =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      DebugAnnotation>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_UintValue kUintValue() { return {}; }
+  void set_uint_value(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_UintValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_IntValue =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      DebugAnnotation>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IntValue kIntValue() { return {}; }
+  void set_int_value(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_IntValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DoubleValue =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kDouble,
+      double,
+      DebugAnnotation>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DoubleValue kDoubleValue() { return {}; }
+  void set_double_value(double value) {
+    static constexpr uint32_t field_id = FieldMetadata_DoubleValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kDouble>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_StringValue =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      DebugAnnotation>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_StringValue kStringValue() { return {}; }
+  void set_string_value(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_StringValue::kFieldId, data, size);
+  }
+  void set_string_value(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_StringValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PointerValue =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      DebugAnnotation>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PointerValue kPointerValue() { return {}; }
+  void set_pointer_value(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PointerValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NestedValue =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      DebugAnnotation_NestedValue,
+      DebugAnnotation>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NestedValue kNestedValue() { return {}; }
+  template <typename T = DebugAnnotation_NestedValue> T* set_nested_value() {
+    return BeginNestedMessage<T>(8);
+  }
+
+
+  using FieldMetadata_LegacyJsonValue =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      DebugAnnotation>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_LegacyJsonValue kLegacyJsonValue() { return {}; }
+  void set_legacy_json_value(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_LegacyJsonValue::kFieldId, data, size);
+  }
+  void set_legacy_json_value(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_LegacyJsonValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DictEntries =
+    ::protozero::proto_utils::FieldMetadata<
+      11,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      DebugAnnotation,
+      DebugAnnotation>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DictEntries kDictEntries() { return {}; }
+  template <typename T = DebugAnnotation> T* add_dict_entries() {
+    return BeginNestedMessage<T>(11);
+  }
+
+
+  using FieldMetadata_ArrayValues =
+    ::protozero::proto_utils::FieldMetadata<
+      12,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      DebugAnnotation,
+      DebugAnnotation>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ArrayValues kArrayValues() { return {}; }
+  template <typename T = DebugAnnotation> T* add_array_values() {
+    return BeginNestedMessage<T>(12);
+  }
+
+};
+
+class DebugAnnotation_NestedValue_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/8, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  DebugAnnotation_NestedValue_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit DebugAnnotation_NestedValue_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit DebugAnnotation_NestedValue_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_nested_type() const { return at<1>().valid(); }
+  int32_t nested_type() const { return at<1>().as_int32(); }
+  bool has_dict_keys() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstChars> dict_keys() const { return GetRepeated<::protozero::ConstChars>(2); }
+  bool has_dict_values() const { return at<3>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> dict_values() const { return GetRepeated<::protozero::ConstBytes>(3); }
+  bool has_array_values() const { return at<4>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> array_values() const { return GetRepeated<::protozero::ConstBytes>(4); }
+  bool has_int_value() const { return at<5>().valid(); }
+  int64_t int_value() const { return at<5>().as_int64(); }
+  bool has_double_value() const { return at<6>().valid(); }
+  double double_value() const { return at<6>().as_double(); }
+  bool has_bool_value() const { return at<7>().valid(); }
+  bool bool_value() const { return at<7>().as_bool(); }
+  bool has_string_value() const { return at<8>().valid(); }
+  ::protozero::ConstChars string_value() const { return at<8>().as_string(); }
+};
+
+class DebugAnnotation_NestedValue : public ::protozero::Message {
+ public:
+  using Decoder = DebugAnnotation_NestedValue_Decoder;
+  enum : int32_t {
+    kNestedTypeFieldNumber = 1,
+    kDictKeysFieldNumber = 2,
+    kDictValuesFieldNumber = 3,
+    kArrayValuesFieldNumber = 4,
+    kIntValueFieldNumber = 5,
+    kDoubleValueFieldNumber = 6,
+    kBoolValueFieldNumber = 7,
+    kStringValueFieldNumber = 8,
+  };
+  using NestedType = ::perfetto::protos::pbzero::DebugAnnotation_NestedValue_NestedType;
+  static const NestedType UNSPECIFIED = DebugAnnotation_NestedValue_NestedType_UNSPECIFIED;
+  static const NestedType DICT = DebugAnnotation_NestedValue_NestedType_DICT;
+  static const NestedType ARRAY = DebugAnnotation_NestedValue_NestedType_ARRAY;
+
+  using FieldMetadata_NestedType =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::DebugAnnotation_NestedValue_NestedType,
+      DebugAnnotation_NestedValue>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NestedType kNestedType() { return {}; }
+  void set_nested_type(::perfetto::protos::pbzero::DebugAnnotation_NestedValue_NestedType value) {
+    static constexpr uint32_t field_id = FieldMetadata_NestedType::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DictKeys =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      DebugAnnotation_NestedValue>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DictKeys kDictKeys() { return {}; }
+  void add_dict_keys(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_DictKeys::kFieldId, data, size);
+  }
+  void add_dict_keys(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_DictKeys::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DictValues =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      DebugAnnotation_NestedValue,
+      DebugAnnotation_NestedValue>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DictValues kDictValues() { return {}; }
+  template <typename T = DebugAnnotation_NestedValue> T* add_dict_values() {
+    return BeginNestedMessage<T>(3);
+  }
+
+
+  using FieldMetadata_ArrayValues =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      DebugAnnotation_NestedValue,
+      DebugAnnotation_NestedValue>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ArrayValues kArrayValues() { return {}; }
+  template <typename T = DebugAnnotation_NestedValue> T* add_array_values() {
+    return BeginNestedMessage<T>(4);
+  }
+
+
+  using FieldMetadata_IntValue =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      DebugAnnotation_NestedValue>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IntValue kIntValue() { return {}; }
+  void set_int_value(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_IntValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DoubleValue =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kDouble,
+      double,
+      DebugAnnotation_NestedValue>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DoubleValue kDoubleValue() { return {}; }
+  void set_double_value(double value) {
+    static constexpr uint32_t field_id = FieldMetadata_DoubleValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kDouble>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BoolValue =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      DebugAnnotation_NestedValue>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BoolValue kBoolValue() { return {}; }
+  void set_bool_value(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_BoolValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_StringValue =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      DebugAnnotation_NestedValue>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_StringValue kStringValue() { return {}; }
+  void set_string_value(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_StringValue::kFieldId, data, size);
+  }
+  void set_string_value(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_StringValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_TRACING_DEBUG_ANNOTATION_H_
+#define INCLUDE_PERFETTO_TRACING_DEBUG_ANNOTATION_H_
+
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/traced_value_forward.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/debug_annotation.pbzero.h"
+
+#include <stdint.h>
+
+#include <memory>
+#include <string>
+
+namespace {
+// std::underlying_type can't be used with non-enum types, so we need this
+// indirection.
+template <typename T, bool = std::is_enum<T>::value>
+struct safe_underlying_type {
+  using type = typename std::underlying_type<T>::type;
+};
+
+template <typename T>
+struct safe_underlying_type<T, false> {
+  using type = T;
+};
+}  // namespace
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+class DebugAnnotation;
+}  // namespace pbzero
+}  // namespace protos
+
+// A base class for custom track event debug annotations.
+class PERFETTO_EXPORT DebugAnnotation {
+ public:
+  DebugAnnotation() = default;
+  virtual ~DebugAnnotation();
+
+  // Called to write the contents of the debug annotation into the trace.
+  virtual void Add(protos::pbzero::DebugAnnotation*) const = 0;
+
+  void WriteIntoTracedValue(TracedValue context) const;
+};
+
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_TRACING_DEBUG_ANNOTATION_H_
+// gen_amalgamated begin header: include/perfetto/tracing/traced_value.h
+// gen_amalgamated begin header: include/perfetto/tracing/internal/checked_scope.h
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_TRACING_INTERNAL_CHECKED_SCOPE_H_
+#define INCLUDE_PERFETTO_TRACING_INTERNAL_CHECKED_SCOPE_H_
+
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+
+namespace perfetto {
+namespace internal {
+
+#if PERFETTO_DCHECK_IS_ON()
+
+// Checker to ensure that despite multiple scopes being present, only the active
+// one is being accessed. Rules:
+// - Only an active scope can create inner scopes. When this happens, it stops
+// being active and the inner scope becomes active instead.
+// - Only an active scope can be destroyed. When this happens, its parent scope
+// becomes active.
+class PERFETTO_EXPORT CheckedScope {
+ public:
+  explicit CheckedScope(CheckedScope* parent_scope);
+  ~CheckedScope();
+  CheckedScope(CheckedScope&&);
+  CheckedScope& operator=(CheckedScope&&);
+  CheckedScope(const CheckedScope&) = delete;
+  CheckedScope& operator=(const CheckedScope&) = delete;
+
+  void Reset();
+
+  CheckedScope* parent_scope() const { return parent_scope_; }
+  bool is_active() const { return is_active_; }
+
+ private:
+  void set_is_active(bool is_active) { is_active_ = is_active; }
+
+  bool is_active_ = true;
+  CheckedScope* parent_scope_;
+
+  bool deleted_ = false;
+};
+
+#else
+
+// Dummy for cases when DCHECK is not enabled. Methods are marked constexpr to
+// ensure that the compiler can inline and optimise them away.
+class CheckedScope {
+ public:
+  inline explicit CheckedScope(CheckedScope*) {}
+  inline ~CheckedScope() {}
+
+  inline void Reset() {}
+
+  inline CheckedScope* parent_scope() const { return nullptr; }
+  inline bool is_active() const { return true; }
+};
+
+#endif  // PERFETTO_DCHECK_IS_ON()
+
+}  // namespace internal
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_TRACING_INTERNAL_CHECKED_SCOPE_H_
+// gen_amalgamated begin header: include/perfetto/tracing/string_helpers.h
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_TRACING_STRING_HELPERS_H_
+#define INCLUDE_PERFETTO_TRACING_STRING_HELPERS_H_
+
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+#include <cstddef>
+#include <string>
+
+namespace perfetto {
+
+// A wrapper for marking strings that can't be determined to be static at build
+// time, but are in fact static.
+class PERFETTO_EXPORT StaticString {
+ public:
+  // Implicit constructor for string literals.
+  template <size_t N>
+  constexpr StaticString(const char (&str)[N]) : value(str) {}
+
+  // Implicit constructor for null strings.
+  constexpr StaticString(std::nullptr_t) : value(nullptr) {}
+
+  constexpr explicit StaticString(const char* str) : value(str) {}
+
+  const char* value;
+};
+
+namespace internal {
+
+// Ensure that |string| is a static constant string.
+//
+// If you get a compiler failure here, you are most likely trying to use
+// TRACE_EVENT with a dynamic event name. There are two ways to fix this:
+//
+// 1) If the event name is actually dynamic (e.g., std::string), write it into
+//    the event manually:
+//
+//      TRACE_EVENT("category", nullptr, [&](perfetto::EventContext ctx) {
+//        ctx.event()->set_name(dynamic_name);
+//      });
+//
+// 2) If the name is static, but the pointer is computed at runtime, wrap it
+//    with perfetto::StaticString:
+//
+//      TRACE_EVENT("category", perfetto::StaticString{name});
+//
+//    DANGER: Using perfetto::StaticString with strings whose contents change
+//            dynamically can cause silent trace data corruption.
+//
+constexpr const char* GetStaticString(StaticString string) {
+  return string.value;
+}
+
+}  // namespace internal
+
+// A explicit wrapper for marking strings as dynamic to ensure that perfetto
+// doesn't try to cache the pointer value.
+class PERFETTO_EXPORT DynamicString {
+ public:
+  explicit DynamicString(const std::string& str)
+      : value(str.data()), length(str.length()) {}
+  explicit DynamicString(const char* str) : value(str), length(strlen(str)) {}
+  DynamicString(const char* str, size_t len) : value(str), length(len) {}
+
+  const char* value;
+  size_t length;
+};
+
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_TRACING_STRING_HELPERS_H_
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_TRACING_TRACED_VALUE_H_
+#define INCLUDE_PERFETTO_TRACING_TRACED_VALUE_H_
+
+// gen_amalgamated expanded: #include "perfetto/base/compiler.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+// gen_amalgamated expanded: #include "perfetto/base/template_util.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/internal/checked_scope.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/string_helpers.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/traced_value_forward.h"
+
+#include <memory>
+#include <type_traits>
+#include <utility>
+
+namespace perfetto {
+
+namespace protos {
+namespace pbzero {
+class DebugAnnotation;
+}
+}  // namespace protos
+
+class DebugAnnotation;
+
+// These classes provide a JSON-inspired way to write structed data into traces.
+//
+// Each TracedValue can be consumed exactly once to write a value into a trace
+// using one of the Write* methods.
+//
+// Write* methods fall into two categories:
+// - Primitive types (int, string, bool, double, etc): they just write the
+//   provided value, consuming the TracedValue in the process.
+// - Complex types (arrays and dicts): they consume the TracedValue and
+//   return a corresponding scoped object (TracedArray or TracedDictionary).
+//   This scope then can be used to write multiple items into the container:
+//   TracedArray::AppendItem and TracedDictionary::AddItem return a new
+//   TracedValue which then can be used to write an element of the
+//   dictionary or array.
+//
+// To define how a custom class should be written into the trace, users should
+// define one of the two following functions:
+// - Foo::WriteIntoTrace(TracedValue) const
+//   (preferred for code which depends on perfetto directly)
+// - perfetto::TraceFormatTraits<T>::WriteIntoTrace(
+//       TracedValue, const T&);
+//   (should be used if T is defined in a library which doesn't know anything
+//   about tracing).
+//
+//
+// After definiting a conversion method, the object can be used directly as a
+// TRACE_EVENT argument:
+//
+// Foo foo;
+// TRACE_EVENT("cat", "Event", "arg", foo);
+//
+// Examples:
+//
+// TRACE_EVENT("cat", "event", "params", [&](perfetto::TracedValue context)
+// {
+//   auto dict = std::move(context).WriteDictionary();
+//   dict->Add("param1", param1);
+//   dict->Add("param2", param2);
+//   ...
+//   dict->Add("paramN", paramN);
+//
+//   {
+//     auto inner_array = dict->AddArray("inner");
+//     inner_array->Append(value1);
+//     inner_array->Append(value2);
+//   }
+// });
+//
+// template <typename T>
+// TraceFormatTraits<std::optional<T>>::WriteIntoTrace(
+//    TracedValue context, const std::optional<T>& value) {
+//  if (!value) {
+//    std::move(context).WritePointer(nullptr);
+//    return;
+//  }
+//  perfetto::WriteIntoTrace(std::move(context), *value);
+// }
+//
+// template <typename T>
+// TraceFormatTraits<std::vector<T>>::WriteIntoTrace(
+//    TracedValue context, const std::array<T>& value) {
+//  auto array = std::move(context).WriteArray();
+//  for (const auto& item: value) {
+//    array_scope.Append(item);
+//  }
+// }
+//
+// class Foo {
+//   void WriteIntoTrace(TracedValue context) const {
+//     auto dict = std::move(context).WriteDictionary();
+//     dict->Set("key", 42);
+//     dict->Set("foo", "bar");
+//     dict->Set("member", member_);
+//   }
+// }
+namespace internal {
+PERFETTO_EXPORT TracedValue
+CreateTracedValueFromProto(protos::pbzero::DebugAnnotation*);
+}
+
+class PERFETTO_EXPORT TracedValue {
+ public:
+  TracedValue(const TracedValue&) = delete;
+  TracedValue& operator=(const TracedValue&) = delete;
+  TracedValue& operator=(TracedValue&&) = delete;
+  TracedValue(TracedValue&&) = default;
+  ~TracedValue() = default;
+
+  // TracedValue represents a context into which a single value can be written
+  // (either by writing it directly for primitive types, or by creating a
+  // TracedArray or TracedDictionary for the complex types). This is enforced
+  // by allowing Write* methods to be called only on rvalue references.
+
+  void WriteInt64(int64_t value) &&;
+  void WriteUInt64(uint64_t value) &&;
+  void WriteDouble(double value) &&;
+  void WriteBoolean(bool value) &&;
+  void WriteString(const char*) &&;
+  void WriteString(const char*, size_t len) &&;
+  void WriteString(const std::string&) &&;
+  void WritePointer(const void* value) &&;
+
+  // Rules for writing nested dictionaries and arrays:
+  // - Only one scope (TracedArray, TracedDictionary or TracedValue) can be
+  // active at the same time. It's only allowed to call methods on the active
+  // scope.
+  // - When a scope creates a nested scope, the new scope becomes active.
+  // - When a scope is destroyed, it's parent scope becomes active again.
+  //
+  // Typically users will have to create a scope only at the beginning of a
+  // conversion function and this scope should be destroyed at the end of it.
+  // TracedArray::Append and TracedDictionary::Add create, write and complete
+  // inner scopes automatically.
+
+  // Scope which allows multiple values to be appended.
+  TracedArray WriteArray() && PERFETTO_WARN_UNUSED_RESULT;
+
+  // Scope which allows multiple key-value pairs to be added.
+  TracedDictionary WriteDictionary() && PERFETTO_WARN_UNUSED_RESULT;
+
+ private:
+  friend class TracedArray;
+  friend class TracedDictionary;
+  friend TracedValue internal::CreateTracedValueFromProto(
+      protos::pbzero::DebugAnnotation*);
+
+  static TracedValue CreateFromProto(protos::pbzero::DebugAnnotation*);
+
+  inline explicit TracedValue(protos::pbzero::DebugAnnotation* context,
+                              internal::CheckedScope* parent_scope)
+      : context_(context), checked_scope_(parent_scope) {}
+
+  // Temporary support for perfetto::DebugAnnotation C++ class before it's going
+  // to be replaced by TracedValue.
+  // TODO(altimin): Convert v8 to use TracedValue directly and delete it.
+  friend class DebugAnnotation;
+
+  protos::pbzero::DebugAnnotation* const context_ = nullptr;
+
+  internal::CheckedScope checked_scope_;
+};
+
+class PERFETTO_EXPORT TracedArray {
+ public:
+  TracedArray(const TracedArray&) = delete;
+  TracedArray& operator=(const TracedArray&) = delete;
+  TracedArray& operator=(TracedArray&&) = delete;
+  TracedArray(TracedArray&&) = default;
+  ~TracedArray() = default;
+
+  TracedValue AppendItem();
+
+  template <typename T>
+  void Append(T&& value) {
+    WriteIntoTracedValue(AppendItem(), std::forward<T>(value));
+  }
+
+  TracedDictionary AppendDictionary() PERFETTO_WARN_UNUSED_RESULT;
+  TracedArray AppendArray();
+
+ private:
+  friend class TracedValue;
+
+  inline explicit TracedArray(protos::pbzero::DebugAnnotation* context,
+                              internal::CheckedScope* parent_scope)
+      : context_(context), checked_scope_(parent_scope) {}
+
+  protos::pbzero::DebugAnnotation* context_;
+
+  internal::CheckedScope checked_scope_;
+};
+
+class PERFETTO_EXPORT TracedDictionary {
+ public:
+  TracedDictionary(const TracedDictionary&) = delete;
+  TracedDictionary& operator=(const TracedDictionary&) = delete;
+  TracedDictionary& operator=(TracedDictionary&&) = delete;
+  TracedDictionary(TracedDictionary&&) = default;
+  ~TracedDictionary() = default;
+
+  // There are two paths for writing dictionary keys: fast path for writing
+  // compile-time const, whose pointer is remains valid during the entire
+  // runtime of the program and the slow path for dynamic strings, which need to
+  // be copied.
+  // In the most common case, a string literal can be passed to `Add`/`AddItem`.
+  // In other cases, either StaticString or DynamicString declarations are
+  // needed.
+
+  TracedValue AddItem(StaticString key);
+  TracedValue AddItem(DynamicString key);
+
+  template <typename T>
+  void Add(StaticString key, T&& value) {
+    WriteIntoTracedValue(AddItem(key), std::forward<T>(value));
+  }
+
+  template <typename T>
+  void Add(DynamicString key, T&& value) {
+    WriteIntoTracedValue(AddItem(key), std::forward<T>(value));
+  }
+
+  TracedDictionary AddDictionary(StaticString key);
+  TracedDictionary AddDictionary(DynamicString key);
+  TracedArray AddArray(StaticString key);
+  TracedArray AddArray(DynamicString key);
+
+ private:
+  friend class TracedValue;
+
+  inline explicit TracedDictionary(protos::pbzero::DebugAnnotation* context,
+                                   internal::CheckedScope* parent_scope)
+      : context_(context), checked_scope_(parent_scope) {}
+
+  protos::pbzero::DebugAnnotation* context_;
+
+  internal::CheckedScope checked_scope_;
+};
+
+namespace internal {
+
+// SFINAE helpers for finding a right overload to convert a given class to
+// trace-friendly form, ordered from most to least preferred.
+
+constexpr int kMaxWriteImplPriority = 4;
+
+// If T has WriteIntoTracedValue member function, call it.
+template <typename T>
+decltype(std::declval<T>().WriteIntoTracedValue(std::declval<TracedValue>()),
+         void())
+WriteImpl(base::priority_tag<4>, TracedValue context, T&& value) {
+  value.WriteIntoTracedValue(std::move(context));
+}
+
+// If T has WriteIntoTrace member function, call it.
+template <typename T>
+decltype(std::declval<T>().WriteIntoTrace(std::declval<TracedValue>()), void())
+WriteImpl(base::priority_tag<4>, TracedValue context, T&& value) {
+  value.WriteIntoTrace(std::move(context));
+}
+
+// If perfetto::TraceFormatTraits<T>::WriteIntoTracedValue(TracedValue, const
+// T&) is available, use it.
+template <typename T>
+decltype(TraceFormatTraits<base::remove_cvref_t<T>>::WriteIntoTracedValue(
+             std::declval<TracedValue>(),
+             std::declval<T>()),
+         void())
+WriteImpl(base::priority_tag<3>, TracedValue context, T&& value) {
+  TraceFormatTraits<base::remove_cvref_t<T>>::WriteIntoTracedValue(
+      std::move(context), std::forward<T>(value));
+}
+
+// If perfetto::TraceFormatTraits<T>::WriteIntoTrace(TracedValue, const T&)
+// is available, use it.
+template <typename T>
+decltype(TraceFormatTraits<base::remove_cvref_t<T>>::WriteIntoTrace(
+             std::declval<TracedValue>(),
+             std::declval<T>()),
+         void())
+WriteImpl(base::priority_tag<3>, TracedValue context, T&& value) {
+  TraceFormatTraits<base::remove_cvref_t<T>>::WriteIntoTrace(
+      std::move(context), std::forward<T>(value));
+}
+
+// If T has operator(), which takes TracedValue, use it.
+// Very useful for lambda resolutions.
+template <typename T>
+decltype(std::declval<T>()(std::declval<TracedValue>()), void())
+WriteImpl(base::priority_tag<2>, TracedValue context, T&& value) {
+  std::forward<T>(value)(std::move(context));
+}
+
+// If T is a container and its elements have tracing support, use it.
+//
+// Note: a reference to T should be passed to std::begin, otherwise
+// for non-reference types const T& will be passed to std::begin, losing
+// support for non-const WriteIntoTracedValue methods.
+template <typename T>
+typename check_traced_value_support<
+    decltype(*std::begin(std::declval<T&>()))>::type
+WriteImpl(base::priority_tag<1>, TracedValue context, T&& value) {
+  auto array = std::move(context).WriteArray();
+  for (auto&& item : value) {
+    array.Append(item);
+  }
+}
+
+// std::underlying_type can't be used with non-enum types, so we need this
+// indirection.
+template <typename T, bool = std::is_enum<T>::value>
+struct safe_underlying_type {
+  using type = typename std::underlying_type<T>::type;
+};
+
+template <typename T>
+struct safe_underlying_type<T, false> {
+  using type = T;
+};
+
+template <typename T>
+struct is_incomplete_type {
+  static constexpr bool value = sizeof(T) != 0;
+};
+
+// sizeof is not available for const char[], but it's still not considered to be
+// an incomplete type for our purposes as the size can be determined at runtime
+// due to strings being null-terminated.
+template <>
+struct is_incomplete_type<const char[]> {
+  static constexpr bool value = true;
+};
+
+}  // namespace internal
+
+// Helper template to determine if a given type can be passed to
+// perfetto::WriteIntoTracedValue. These templates will fail to resolve if the
+// class does not have it support, so they are useful in SFINAE and in producing
+// helpful compiler results.
+template <typename T, class Result = void>
+using check_traced_value_support_t = decltype(
+    internal::WriteImpl(
+        std::declval<base::priority_tag<internal::kMaxWriteImplPriority>>(),
+        std::declval<TracedValue>(),
+        std::declval<T>()),
+    std::declval<Result>());
+
+// check_traced_value_support<T, V>::type is defined (and equal to V) iff T
+// supports being passed to WriteIntoTracedValue. See the comment in
+// traced_value_forward.h for more details.
+template <typename T, class Result>
+struct check_traced_value_support<T,
+                                  Result,
+                                  check_traced_value_support_t<T, Result>> {
+  static_assert(
+      internal::is_incomplete_type<T>::value,
+      "perfetto::TracedValue should not be used with incomplete types");
+
+  static constexpr bool value = true;
+  using type = Result;
+};
+
+namespace internal {
+
+// Helper class to check if a given type can be passed to
+// perfetto::WriteIntoTracedValue. This template will always resolve (with
+// |value| being set to either true or false depending on presence of the
+// support, so this macro is useful in the situation when you want to e.g. OR
+// the result with some other conditions.
+//
+// In this case, compiler will not give you the full deduction chain, so, for
+// example, use check_traced_value_support for writing positive static_asserts
+// and has_traced_value_support for writing negative.
+template <typename T>
+class has_traced_value_support {
+  using Yes = char[1];
+  using No = char[2];
+
+  template <typename V>
+  static Yes& check_support(check_traced_value_support_t<V, int>);
+  template <typename V>
+  static No& check_support(...);
+
+ public:
+  static constexpr bool value = sizeof(Yes) == sizeof(check_support<T>(0));
+};
+
+}  // namespace internal
+
+template <typename T>
+void WriteIntoTracedValue(TracedValue context, T&& value) {
+  // TODO(altimin): Add a URL to documentation and a list of common failure
+  // patterns.
+  static_assert(
+      internal::has_traced_value_support<T>::value,
+      "The provided type (passed to TRACE_EVENT argument / TracedArray::Append "
+      "/ TracedDictionary::Add) does not support being written in a trace "
+      "format. Please see the comment in traced_value.h for more details.");
+
+  // Should be kept in sync with check_traced_value_support_t!
+  internal::WriteImpl(base::priority_tag<internal::kMaxWriteImplPriority>(),
+                      std::move(context), std::forward<T>(value));
+}
+
+// Helpers to write a given value into TracedValue even if the given type
+// doesn't support conversion (in which case the provided fallback should be
+// used). Useful for automatically generating conversions for autogenerated
+// code, but otherwise shouldn't be used as non-autogenerated code is expected
+// to define WriteIntoTracedValue convertor.
+// See WriteWithFallback test in traced_value_unittest.cc for a concrete
+// example.
+template <typename T>
+typename std::enable_if<internal::has_traced_value_support<T>::value>::type
+WriteIntoTracedValueWithFallback(TracedValue context,
+                                 T&& value,
+                                 const std::string&) {
+  WriteIntoTracedValue(std::move(context), std::forward<T>(value));
+}
+
+template <typename T>
+typename std::enable_if<!internal::has_traced_value_support<T>::value>::type
+WriteIntoTracedValueWithFallback(TracedValue context,
+                                 T&&,
+                                 const std::string& fallback) {
+  std::move(context).WriteString(fallback);
+}
+
+// TraceFormatTraits implementations for primitive types.
+
+// Specialisation for signed integer types (note: it excludes enums, which have
+// their own explicit specialisation).
+template <typename T>
+struct TraceFormatTraits<
+    T,
+    typename std::enable_if<std::is_integral<T>::value &&
+                            !std::is_same<T, bool>::value &&
+                            std::is_signed<T>::value>::type> {
+  inline static void WriteIntoTrace(TracedValue context, T value) {
+    std::move(context).WriteInt64(value);
+  }
+};
+
+// Specialisation for unsigned integer types (note: it excludes enums, which
+// have their own explicit specialisation).
+template <typename T>
+struct TraceFormatTraits<
+    T,
+    typename std::enable_if<std::is_integral<T>::value &&
+                            !std::is_same<T, bool>::value &&
+                            std::is_unsigned<T>::value>::type> {
+  inline static void WriteIntoTrace(TracedValue context, T value) {
+    std::move(context).WriteUInt64(value);
+  }
+};
+
+// Specialisation for bools.
+template <>
+struct TraceFormatTraits<bool> {
+  inline static void WriteIntoTrace(TracedValue context, bool value) {
+    std::move(context).WriteBoolean(value);
+  }
+};
+
+// Specialisation for floating point values.
+template <typename T>
+struct TraceFormatTraits<
+    T,
+    typename std::enable_if<std::is_floating_point<T>::value>::type> {
+  inline static void WriteIntoTrace(TracedValue context, T value) {
+    std::move(context).WriteDouble(static_cast<double>(value));
+  }
+};
+
+// Specialisation for signed enums.
+template <typename T>
+struct TraceFormatTraits<
+    T,
+    typename std::enable_if<
+        std::is_enum<T>::value &&
+        std::is_signed<
+            typename internal::safe_underlying_type<T>::type>::value>::type> {
+  inline static void WriteIntoTrace(TracedValue context, T value) {
+    std::move(context).WriteInt64(static_cast<int64_t>(value));
+  }
+};
+
+// Specialisation for unsigned enums.
+template <typename T>
+struct TraceFormatTraits<
+    T,
+    typename std::enable_if<
+        std::is_enum<T>::value &&
+        std::is_unsigned<
+            typename internal::safe_underlying_type<T>::type>::value>::type> {
+  inline static void WriteIntoTrace(TracedValue context, T value) {
+    std::move(context).WriteUInt64(static_cast<uint64_t>(value));
+  }
+};
+
+// Specialisations for C-style strings.
+template <>
+struct TraceFormatTraits<const char*> {
+  inline static void WriteIntoTrace(TracedValue context, const char* value) {
+    std::move(context).WriteString(value);
+  }
+};
+
+template <>
+struct TraceFormatTraits<char[]> {
+  inline static void WriteIntoTrace(TracedValue context, const char value[]) {
+    std::move(context).WriteString(value);
+  }
+};
+
+template <size_t N>
+struct TraceFormatTraits<char[N]> {
+  inline static void WriteIntoTrace(TracedValue context, const char value[N]) {
+    std::move(context).WriteString(value);
+  }
+};
+
+// Specialisation for C++ strings.
+template <>
+struct TraceFormatTraits<std::string> {
+  inline static void WriteIntoTrace(TracedValue context,
+                                    const std::string& value) {
+    std::move(context).WriteString(value);
+  }
+};
+
+// Specialisation for (const) void*, which writes the pointer value.
+template <>
+struct TraceFormatTraits<void*> {
+  inline static void WriteIntoTrace(TracedValue context, void* value) {
+    std::move(context).WritePointer(value);
+  }
+};
+
+template <>
+struct TraceFormatTraits<const void*> {
+  inline static void WriteIntoTrace(TracedValue context, const void* value) {
+    std::move(context).WritePointer(value);
+  }
+};
+
+// Specialisation for std::unique_ptr<>, which writes either nullptr or the
+// object it points to.
+template <typename T>
+struct TraceFormatTraits<std::unique_ptr<T>, check_traced_value_support_t<T>> {
+  inline static void WriteIntoTrace(TracedValue context,
+                                    const std::unique_ptr<T>& value) {
+    ::perfetto::WriteIntoTracedValue(std::move(context), value.get());
+  }
+};
+
+// Specialisation for raw pointer, which writes either nullptr or the object it
+// points to.
+template <typename T>
+struct TraceFormatTraits<T*, check_traced_value_support_t<T>> {
+  inline static void WriteIntoTrace(TracedValue context, T* value) {
+    if (!value) {
+      std::move(context).WritePointer(nullptr);
+      return;
+    }
+    ::perfetto::WriteIntoTracedValue(std::move(context), *value);
+  }
+};
+
+// Specialisation for nullptr.
+template <>
+struct TraceFormatTraits<std::nullptr_t> {
+  inline static void WriteIntoTrace(TracedValue context, std::nullptr_t) {
+    std::move(context).WritePointer(nullptr);
+  }
+};
+
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_TRACING_TRACED_VALUE_H_
+// gen_amalgamated begin header: include/perfetto/tracing/track.h
+// gen_amalgamated begin header: include/perfetto/base/proc_utils.h
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_BASE_PROC_UTILS_H_
+#define INCLUDE_PERFETTO_BASE_PROC_UTILS_H_
+
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+#include <Windows.h>
+#include <processthreadsapi.h>
+#elif PERFETTO_BUILDFLAG(PERFETTO_OS_FUCHSIA)
+#include <zircon/process.h>
+#include <zircon/types.h>
+#else
+#include <unistd.h>
+#endif
+
+namespace perfetto {
+namespace base {
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_FUCHSIA)
+using PlatformProcessId = zx_handle_t;
+inline PlatformProcessId GetProcessId() {
+  return zx_process_self();
+}
+#elif PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+using PlatformProcessId = uint64_t;
+inline PlatformProcessId GetProcessId() {
+  return static_cast<uint64_t>(GetCurrentProcessId());
+}
+#else
+using PlatformProcessId = pid_t;
+inline PlatformProcessId GetProcessId() {
+  return getpid();
+}
+#endif
+
+}  // namespace base
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_BASE_PROC_UTILS_H_
+// gen_amalgamated begin header: include/perfetto/base/thread_utils.h
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_BASE_THREAD_UTILS_H_
+#define INCLUDE_PERFETTO_BASE_THREAD_UTILS_H_
+
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/base/build_config.h"
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+#include <Windows.h>
+#include <processthreadsapi.h>
+#elif PERFETTO_BUILDFLAG(PERFETTO_OS_FUCHSIA)
+#include <zircon/process.h>
+#include <zircon/types.h>
+#elif PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
+    PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <unistd.h>
+#else
+#include <pthread.h>
+#endif
+
+namespace perfetto {
+namespace base {
+
+#if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
+using PlatformThreadId = pid_t;
+inline PlatformThreadId GetThreadId() {
+  return gettid();
+}
+#elif PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX)
+using PlatformThreadId = pid_t;
+inline PlatformThreadId GetThreadId() {
+  return static_cast<pid_t>(syscall(__NR_gettid));
+}
+#elif PERFETTO_BUILDFLAG(PERFETTO_OS_FUCHSIA)
+using PlatformThreadId = zx_handle_t;
+inline PlatformThreadId GetThreadId() {
+  return zx_thread_self();
+}
+#elif PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE)
+using PlatformThreadId = uint64_t;
+inline PlatformThreadId GetThreadId() {
+  uint64_t tid;
+  pthread_threadid_np(nullptr, &tid);
+  return tid;
+}
+#elif PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+using PlatformThreadId = uint64_t;
+inline PlatformThreadId GetThreadId() {
+  return static_cast<uint64_t>(GetCurrentThreadId());
+}
+#elif PERFETTO_BUILDFLAG(PERFETTO_OS_NACL)
+using PlatformThreadId = pid_t;
+inline PlatformThreadId GetThreadId() {
+  return reinterpret_cast<int32_t>(pthread_self());
+}
+#else  // Default to pthreads in case no OS is set.
+using PlatformThreadId = pthread_t;
+inline PlatformThreadId GetThreadId() {
+  return pthread_self();
+}
+#endif
+
+}  // namespace base
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_BASE_THREAD_UTILS_H_
+// gen_amalgamated begin header: include/perfetto/tracing/internal/compile_time_hash.h
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_TRACING_INTERNAL_COMPILE_TIME_HASH_H_
+#define INCLUDE_PERFETTO_TRACING_INTERNAL_COMPILE_TIME_HASH_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+namespace perfetto {
+namespace internal {
+
+// A helper class which computes a 64-bit hash of the input data at compile
+// time. The algorithm used is FNV-1a as it is fast and easy to implement and
+// has relatively few collisions.
+// WARNING: This hash function should not be used for any cryptographic purpose.
+class CompileTimeHash {
+ public:
+  // Creates an empty hash object
+  constexpr inline CompileTimeHash() {}
+
+  // Hashes a byte array.
+  constexpr inline CompileTimeHash Update(const char* data, size_t size) const {
+    return CompileTimeHash(HashRecursively(kFnv1a64OffsetBasis, data, size));
+  }
+
+  constexpr inline uint64_t digest() const { return result_; }
+
+ private:
+  constexpr inline CompileTimeHash(uint64_t result) : result_(result) {}
+
+  static constexpr inline uint64_t HashRecursively(uint64_t value,
+                                                   const char* data,
+                                                   size_t size) {
+    return !size ? value
+                 : HashRecursively(
+                       (value ^ static_cast<uint8_t>(*data)) * kFnv1a64Prime,
+                       data + 1, size - 1);
+  }
+
+  static constexpr uint64_t kFnv1a64OffsetBasis = 0xcbf29ce484222325;
+  static constexpr uint64_t kFnv1a64Prime = 0x100000001b3;
+
+  uint64_t result_ = kFnv1a64OffsetBasis;
+};
+
+}  // namespace internal
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_TRACING_INTERNAL_COMPILE_TIME_HASH_H_
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/counter_descriptor.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_COUNTER_DESCRIPTOR_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_COUNTER_DESCRIPTOR_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+enum CounterDescriptor_BuiltinCounterType : int32_t;
+enum CounterDescriptor_Unit : int32_t;
+
+enum CounterDescriptor_BuiltinCounterType : int32_t {
+  CounterDescriptor_BuiltinCounterType_COUNTER_UNSPECIFIED = 0,
+  CounterDescriptor_BuiltinCounterType_COUNTER_THREAD_TIME_NS = 1,
+  CounterDescriptor_BuiltinCounterType_COUNTER_THREAD_INSTRUCTION_COUNT = 2,
+};
+
+const CounterDescriptor_BuiltinCounterType CounterDescriptor_BuiltinCounterType_MIN = CounterDescriptor_BuiltinCounterType_COUNTER_UNSPECIFIED;
+const CounterDescriptor_BuiltinCounterType CounterDescriptor_BuiltinCounterType_MAX = CounterDescriptor_BuiltinCounterType_COUNTER_THREAD_INSTRUCTION_COUNT;
+
+enum CounterDescriptor_Unit : int32_t {
+  CounterDescriptor_Unit_UNIT_UNSPECIFIED = 0,
+  CounterDescriptor_Unit_UNIT_TIME_NS = 1,
+  CounterDescriptor_Unit_UNIT_COUNT = 2,
+  CounterDescriptor_Unit_UNIT_SIZE_BYTES = 3,
+};
+
+const CounterDescriptor_Unit CounterDescriptor_Unit_MIN = CounterDescriptor_Unit_UNIT_UNSPECIFIED;
+const CounterDescriptor_Unit CounterDescriptor_Unit_MAX = CounterDescriptor_Unit_UNIT_SIZE_BYTES;
+
+class CounterDescriptor_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/6, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  CounterDescriptor_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit CounterDescriptor_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit CounterDescriptor_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_type() const { return at<1>().valid(); }
+  int32_t type() const { return at<1>().as_int32(); }
+  bool has_categories() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstChars> categories() const { return GetRepeated<::protozero::ConstChars>(2); }
+  bool has_unit() const { return at<3>().valid(); }
+  int32_t unit() const { return at<3>().as_int32(); }
+  bool has_unit_name() const { return at<6>().valid(); }
+  ::protozero::ConstChars unit_name() const { return at<6>().as_string(); }
+  bool has_unit_multiplier() const { return at<4>().valid(); }
+  int64_t unit_multiplier() const { return at<4>().as_int64(); }
+  bool has_is_incremental() const { return at<5>().valid(); }
+  bool is_incremental() const { return at<5>().as_bool(); }
+};
+
+class CounterDescriptor : public ::protozero::Message {
+ public:
+  using Decoder = CounterDescriptor_Decoder;
+  enum : int32_t {
+    kTypeFieldNumber = 1,
+    kCategoriesFieldNumber = 2,
+    kUnitFieldNumber = 3,
+    kUnitNameFieldNumber = 6,
+    kUnitMultiplierFieldNumber = 4,
+    kIsIncrementalFieldNumber = 5,
+  };
+  using BuiltinCounterType = ::perfetto::protos::pbzero::CounterDescriptor_BuiltinCounterType;
+  using Unit = ::perfetto::protos::pbzero::CounterDescriptor_Unit;
+  static const BuiltinCounterType COUNTER_UNSPECIFIED = CounterDescriptor_BuiltinCounterType_COUNTER_UNSPECIFIED;
+  static const BuiltinCounterType COUNTER_THREAD_TIME_NS = CounterDescriptor_BuiltinCounterType_COUNTER_THREAD_TIME_NS;
+  static const BuiltinCounterType COUNTER_THREAD_INSTRUCTION_COUNT = CounterDescriptor_BuiltinCounterType_COUNTER_THREAD_INSTRUCTION_COUNT;
+  static const Unit UNIT_UNSPECIFIED = CounterDescriptor_Unit_UNIT_UNSPECIFIED;
+  static const Unit UNIT_TIME_NS = CounterDescriptor_Unit_UNIT_TIME_NS;
+  static const Unit UNIT_COUNT = CounterDescriptor_Unit_UNIT_COUNT;
+  static const Unit UNIT_SIZE_BYTES = CounterDescriptor_Unit_UNIT_SIZE_BYTES;
+
+  using FieldMetadata_Type =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::CounterDescriptor_BuiltinCounterType,
+      CounterDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Type kType() { return {}; }
+  void set_type(::perfetto::protos::pbzero::CounterDescriptor_BuiltinCounterType value) {
+    static constexpr uint32_t field_id = FieldMetadata_Type::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Categories =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      CounterDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Categories kCategories() { return {}; }
+  void add_categories(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Categories::kFieldId, data, size);
+  }
+  void add_categories(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Categories::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Unit =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::CounterDescriptor_Unit,
+      CounterDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Unit kUnit() { return {}; }
+  void set_unit(::perfetto::protos::pbzero::CounterDescriptor_Unit value) {
+    static constexpr uint32_t field_id = FieldMetadata_Unit::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_UnitName =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      CounterDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_UnitName kUnitName() { return {}; }
+  void set_unit_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_UnitName::kFieldId, data, size);
+  }
+  void set_unit_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_UnitName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_UnitMultiplier =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      CounterDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_UnitMultiplier kUnitMultiplier() { return {}; }
+  void set_unit_multiplier(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_UnitMultiplier::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_IsIncremental =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      CounterDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IsIncremental kIsIncremental() { return {}; }
+  void set_is_incremental(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_IsIncremental::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/track_descriptor.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_TRACK_DESCRIPTOR_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_TRACK_DESCRIPTOR_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class TrackDescriptor;
+class CounterDescriptor;
+class ChromeThreadDescriptor;
+class ThreadDescriptor;
+class ChromeProcessDescriptor;
+class ProcessDescriptor;
+enum CounterDescriptor_BuiltinCounterType : int;
+enum CounterDescriptor_Unit : int;
+enum ChromeThreadDescriptor_ThreadType : int;
+enum ThreadDescriptor_ChromeThreadType : int;
+enum ChromeProcessDescriptor_ProcessType : int;
+enum ProcessDescriptor_ChromeProcessType : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT TrackDescriptor : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kUuidFieldNumber = 1,
+    kParentUuidFieldNumber = 5,
+    kNameFieldNumber = 2,
+    kProcessFieldNumber = 3,
+    kChromeProcessFieldNumber = 6,
+    kThreadFieldNumber = 4,
+    kChromeThreadFieldNumber = 7,
+    kCounterFieldNumber = 8,
+  };
+
+  TrackDescriptor();
+  ~TrackDescriptor() override;
+  TrackDescriptor(TrackDescriptor&&) noexcept;
+  TrackDescriptor& operator=(TrackDescriptor&&);
+  TrackDescriptor(const TrackDescriptor&);
+  TrackDescriptor& operator=(const TrackDescriptor&);
+  bool operator==(const TrackDescriptor&) const;
+  bool operator!=(const TrackDescriptor& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_uuid() const { return _has_field_[1]; }
+  uint64_t uuid() const { return uuid_; }
+  void set_uuid(uint64_t value) { uuid_ = value; _has_field_.set(1); }
+
+  bool has_parent_uuid() const { return _has_field_[5]; }
+  uint64_t parent_uuid() const { return parent_uuid_; }
+  void set_parent_uuid(uint64_t value) { parent_uuid_ = value; _has_field_.set(5); }
+
+  bool has_name() const { return _has_field_[2]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(2); }
+
+  bool has_process() const { return _has_field_[3]; }
+  const ProcessDescriptor& process() const { return *process_; }
+  ProcessDescriptor* mutable_process() { _has_field_.set(3); return process_.get(); }
+
+  bool has_chrome_process() const { return _has_field_[6]; }
+  const ChromeProcessDescriptor& chrome_process() const { return *chrome_process_; }
+  ChromeProcessDescriptor* mutable_chrome_process() { _has_field_.set(6); return chrome_process_.get(); }
+
+  bool has_thread() const { return _has_field_[4]; }
+  const ThreadDescriptor& thread() const { return *thread_; }
+  ThreadDescriptor* mutable_thread() { _has_field_.set(4); return thread_.get(); }
+
+  bool has_chrome_thread() const { return _has_field_[7]; }
+  const ChromeThreadDescriptor& chrome_thread() const { return *chrome_thread_; }
+  ChromeThreadDescriptor* mutable_chrome_thread() { _has_field_.set(7); return chrome_thread_.get(); }
+
+  bool has_counter() const { return _has_field_[8]; }
+  const CounterDescriptor& counter() const { return *counter_; }
+  CounterDescriptor* mutable_counter() { _has_field_.set(8); return counter_.get(); }
+
+ private:
+  uint64_t uuid_{};
+  uint64_t parent_uuid_{};
+  std::string name_{};
+  ::protozero::CopyablePtr<ProcessDescriptor> process_;
+  ::protozero::CopyablePtr<ChromeProcessDescriptor> chrome_process_;
+  ::protozero::CopyablePtr<ThreadDescriptor> thread_;
+  ::protozero::CopyablePtr<ChromeThreadDescriptor> chrome_thread_;
+  ::protozero::CopyablePtr<CounterDescriptor> counter_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<9> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_TRACK_DESCRIPTOR_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/track_descriptor.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_TRACK_DESCRIPTOR_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_TRACK_DESCRIPTOR_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class ChromeProcessDescriptor;
+class ChromeThreadDescriptor;
+class CounterDescriptor;
+class ProcessDescriptor;
+class ThreadDescriptor;
+
+class TrackDescriptor_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/8, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  TrackDescriptor_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TrackDescriptor_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TrackDescriptor_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_uuid() const { return at<1>().valid(); }
+  uint64_t uuid() const { return at<1>().as_uint64(); }
+  bool has_parent_uuid() const { return at<5>().valid(); }
+  uint64_t parent_uuid() const { return at<5>().as_uint64(); }
+  bool has_name() const { return at<2>().valid(); }
+  ::protozero::ConstChars name() const { return at<2>().as_string(); }
+  bool has_process() const { return at<3>().valid(); }
+  ::protozero::ConstBytes process() const { return at<3>().as_bytes(); }
+  bool has_chrome_process() const { return at<6>().valid(); }
+  ::protozero::ConstBytes chrome_process() const { return at<6>().as_bytes(); }
+  bool has_thread() const { return at<4>().valid(); }
+  ::protozero::ConstBytes thread() const { return at<4>().as_bytes(); }
+  bool has_chrome_thread() const { return at<7>().valid(); }
+  ::protozero::ConstBytes chrome_thread() const { return at<7>().as_bytes(); }
+  bool has_counter() const { return at<8>().valid(); }
+  ::protozero::ConstBytes counter() const { return at<8>().as_bytes(); }
+};
+
+class TrackDescriptor : public ::protozero::Message {
+ public:
+  using Decoder = TrackDescriptor_Decoder;
+  enum : int32_t {
+    kUuidFieldNumber = 1,
+    kParentUuidFieldNumber = 5,
+    kNameFieldNumber = 2,
+    kProcessFieldNumber = 3,
+    kChromeProcessFieldNumber = 6,
+    kThreadFieldNumber = 4,
+    kChromeThreadFieldNumber = 7,
+    kCounterFieldNumber = 8,
+  };
+
+  using FieldMetadata_Uuid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TrackDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Uuid kUuid() { return {}; }
+  void set_uuid(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Uuid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ParentUuid =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TrackDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ParentUuid kParentUuid() { return {}; }
+  void set_parent_uuid(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ParentUuid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TrackDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Process =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ProcessDescriptor,
+      TrackDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Process kProcess() { return {}; }
+  template <typename T = ProcessDescriptor> T* set_process() {
+    return BeginNestedMessage<T>(3);
+  }
+
+
+  using FieldMetadata_ChromeProcess =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ChromeProcessDescriptor,
+      TrackDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChromeProcess kChromeProcess() { return {}; }
+  template <typename T = ChromeProcessDescriptor> T* set_chrome_process() {
+    return BeginNestedMessage<T>(6);
+  }
+
+
+  using FieldMetadata_Thread =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ThreadDescriptor,
+      TrackDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Thread kThread() { return {}; }
+  template <typename T = ThreadDescriptor> T* set_thread() {
+    return BeginNestedMessage<T>(4);
+  }
+
+
+  using FieldMetadata_ChromeThread =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ChromeThreadDescriptor,
+      TrackDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChromeThread kChromeThread() { return {}; }
+  template <typename T = ChromeThreadDescriptor> T* set_chrome_thread() {
+    return BeginNestedMessage<T>(7);
+  }
+
+
+  using FieldMetadata_Counter =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      CounterDescriptor,
+      TrackDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Counter kCounter() { return {}; }
+  template <typename T = CounterDescriptor> T* set_counter() {
+    return BeginNestedMessage<T>(8);
+  }
+
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_TRACING_TRACK_H_
+#define INCLUDE_PERFETTO_TRACING_TRACK_H_
+
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+// gen_amalgamated expanded: #include "perfetto/base/proc_utils.h"
+// gen_amalgamated expanded: #include "perfetto/base/thread_utils.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message_handle.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/internal/compile_time_hash.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/trace_packet.pbzero.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/counter_descriptor.pbzero.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/track_descriptor.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/track_descriptor.pbzero.h"
+
+#include <stdint.h>
+#include <map>
+#include <mutex>
+
+namespace perfetto {
+namespace internal {
+class TrackRegistry;
+}
+
+// Track events are recorded on a timeline track, which maintains the relative
+// time ordering of all events on that track. Each thread has its own default
+// track (ThreadTrack), which is by default where all track events are written.
+// Thread tracks are grouped under their hosting process (ProcessTrack).
+
+// Events which aren't strictly scoped to a thread or a process, or don't
+// correspond to synchronous code execution on a thread can use a custom
+// track (Track, ThreadTrack or ProcessTrack). A Track object can also
+// optionally be parented to a thread or a process.
+//
+// A track is represented by a uuid, which must be unique across the entire
+// recorded trace.
+//
+// For example, to record an event that begins and ends on different threads,
+// use a matching id to tie the begin and end events together:
+//
+//   TRACE_EVENT_BEGIN("category", "AsyncEvent", perfetto::Track(8086));
+//   ...
+//   TRACE_EVENT_END("category", perfetto::Track(8086));
+//
+// Tracks can also be annotated with metadata:
+//
+//   auto desc = track.Serialize();
+//   desc.set_name("MyTrack");
+//   perfetto::TrackEvent::SetTrackDescriptor(track, desc);
+//
+// Threads and processes can also be named in a similar way, e.g.:
+//
+//   auto desc = perfetto::ProcessTrack::Current().Serialize();
+//   desc.mutable_process()->set_process_name("MyProcess");
+//   perfetto::TrackEvent::SetTrackDescriptor(
+//       perfetto::ProcessTrack::Current(), desc);
+//
+// The metadata remains valid between tracing sessions. To free up data for a
+// track, call EraseTrackDescriptor:
+//
+//   perfetto::TrackEvent::EraseTrackDescriptor(track);
+//
+struct PERFETTO_EXPORT Track {
+  const uint64_t uuid;
+  const uint64_t parent_uuid;
+  constexpr Track() : uuid(0), parent_uuid(0) {}
+
+  // Construct a track with identifier |id|, optionally parented under |parent|.
+  // If no parent is specified, the track's parent is the current process's
+  // track.
+  //
+  // To minimize the chances for accidental id collisions across processes, the
+  // track's effective uuid is generated by xorring |id| with a random,
+  // per-process cookie.
+  explicit constexpr Track(uint64_t id, Track parent = MakeProcessTrack())
+      : uuid(id ^ parent.uuid), parent_uuid(parent.uuid) {}
+
+  explicit operator bool() const { return uuid; }
+  void Serialize(protos::pbzero::TrackDescriptor*) const;
+  protos::gen::TrackDescriptor Serialize() const;
+
+  // Construct a global track with identifier |id|.
+  //
+  // Beware: the globally unique |id| should be chosen carefully to avoid
+  // accidental clashes with track identifiers emitted by other producers.
+  static Track Global(uint64_t id) { return Track(id, Track()); }
+
+  // Construct a track using |ptr| as identifier.
+  static Track FromPointer(const void* ptr, Track parent = MakeProcessTrack()) {
+    // Using pointers as global TrackIds isn't supported as pointers are
+    // per-proccess and the same pointer value can be used in different
+    // processes.
+    PERFETTO_DCHECK(parent.uuid != Track().uuid);
+
+    return Track(static_cast<uint64_t>(reinterpret_cast<uintptr_t>(ptr)),
+                 parent);
+  }
+
+ protected:
+  constexpr Track(uint64_t uuid_, uint64_t parent_uuid_)
+      : uuid(uuid_), parent_uuid(parent_uuid_) {}
+
+  static Track MakeThreadTrack(base::PlatformThreadId tid) {
+    // If tid were 0 here (which is an invalid tid), we would create a thread
+    // track with a uuid that conflicts with the corresponding ProcessTrack.
+    PERFETTO_DCHECK(tid != 0);
+    return Track(static_cast<uint64_t>(tid), MakeProcessTrack());
+  }
+
+  static Track MakeProcessTrack() { return Track(process_uuid, Track()); }
+
+  static constexpr inline uint64_t CompileTimeHash(const char* string) {
+    return internal::CompileTimeHash()
+        .Update(string, static_cast<size_t>(base::StrEnd(string) - string))
+        .digest();
+  }
+
+ private:
+  friend class internal::TrackRegistry;
+  static uint64_t process_uuid;
+};
+
+// A process track represents events that describe the state of the entire
+// application (e.g., counter events). Currently a ProcessTrack can only
+// represent the current process.
+struct PERFETTO_EXPORT ProcessTrack : public Track {
+  const base::PlatformProcessId pid;
+
+  static ProcessTrack Current() { return ProcessTrack(); }
+
+  void Serialize(protos::pbzero::TrackDescriptor*) const;
+  protos::gen::TrackDescriptor Serialize() const;
+
+ private:
+  ProcessTrack() : Track(MakeProcessTrack()), pid(base::GetProcessId()) {}
+};
+
+// A thread track is associated with a specific thread of execution. Currently
+// only threads in the current process can be referenced.
+struct PERFETTO_EXPORT ThreadTrack : public Track {
+  const base::PlatformProcessId pid;
+  const base::PlatformThreadId tid;
+
+  static ThreadTrack Current() { return ThreadTrack(base::GetThreadId()); }
+
+  // Represents a thread in the current process.
+  static ThreadTrack ForThread(base::PlatformThreadId tid_) {
+    return ThreadTrack(tid_);
+  }
+
+  void Serialize(protos::pbzero::TrackDescriptor*) const;
+  protos::gen::TrackDescriptor Serialize() const;
+
+ private:
+  explicit ThreadTrack(base::PlatformThreadId tid_)
+      : Track(MakeThreadTrack(tid_)),
+        pid(ProcessTrack::Current().pid),
+        tid(tid_) {}
+};
+
+// A track for recording counter values with the TRACE_COUNTER macro. Counter
+// tracks can optionally be given units and other metadata. See
+// /protos/perfetto/trace/track_event/counter_descriptor.proto for details.
+class CounterTrack : public Track {
+  // A random value mixed into counter track uuids to avoid collisions with
+  // other types of tracks.
+  static constexpr uint64_t kCounterMagic = 0xb1a4a67d7970839eul;
+
+ public:
+  using Unit = perfetto::protos::pbzero::CounterDescriptor::Unit;
+
+  // |name| must be a string with static lifetime.
+  constexpr explicit CounterTrack(const char* name,
+                                  Track parent = MakeProcessTrack())
+      : Track(CompileTimeHash(name) ^ kCounterMagic, parent),
+        name_(name),
+        category_(nullptr) {}
+
+  // |unit_name| is a free-form description of the unit used by this counter. It
+  // must have static lifetime.
+  constexpr CounterTrack(const char* name,
+                         const char* unit_name,
+                         Track parent = MakeProcessTrack())
+      : Track(CompileTimeHash(name) ^ kCounterMagic, parent),
+        name_(name),
+        category_(nullptr),
+        unit_name_(unit_name) {}
+
+  constexpr CounterTrack(const char* name,
+                         Unit unit,
+                         Track parent = MakeProcessTrack())
+      : Track(CompileTimeHash(name) ^ kCounterMagic, parent),
+        name_(name),
+        category_(nullptr),
+        unit_(unit) {}
+
+  static constexpr CounterTrack Global(const char* name,
+                                       const char* unit_name) {
+    return CounterTrack(name, unit_name, Track());
+  }
+
+  static constexpr CounterTrack Global(const char* name, Unit unit) {
+    return CounterTrack(name, unit, Track());
+  }
+
+  static constexpr CounterTrack Global(const char* name) {
+    return Global(name, nullptr);
+  }
+
+  constexpr CounterTrack set_unit(Unit unit) const {
+    return CounterTrack(uuid, parent_uuid, name_, category_, unit, unit_name_,
+                        unit_multiplier_, is_incremental_);
+  }
+
+  constexpr CounterTrack set_unit_name(const char* unit_name) const {
+    return CounterTrack(uuid, parent_uuid, name_, category_, unit_, unit_name,
+                        unit_multiplier_, is_incremental_);
+  }
+
+  constexpr CounterTrack set_unit_multiplier(int64_t unit_multiplier) const {
+    return CounterTrack(uuid, parent_uuid, name_, category_, unit_, unit_name_,
+                        unit_multiplier, is_incremental_);
+  }
+
+  constexpr CounterTrack set_category(const char* category) const {
+    return CounterTrack(uuid, parent_uuid, name_, category, unit_, unit_name_,
+                        unit_multiplier_, is_incremental_);
+  }
+
+  void Serialize(protos::pbzero::TrackDescriptor*) const;
+  protos::gen::TrackDescriptor Serialize() const;
+
+ private:
+  constexpr CounterTrack(uint64_t uuid_,
+                         uint64_t parent_uuid_,
+                         const char* name,
+                         const char* category,
+                         Unit unit,
+                         const char* unit_name,
+                         int64_t unit_multiplier,
+                         bool is_incremental)
+      : Track(uuid_, parent_uuid_),
+        name_(name),
+        category_(category),
+        unit_(unit),
+        unit_name_(unit_name),
+        unit_multiplier_(unit_multiplier),
+        is_incremental_(is_incremental) {}
+
+  // TODO(skyostil): Expose incremental counters once we decide how to manage
+  // their incremental state.
+  constexpr CounterTrack set_is_incremental(bool is_incremental = true) const {
+    return CounterTrack(uuid, parent_uuid, name_, category_, unit_, unit_name_,
+                        unit_multiplier_, is_incremental);
+  }
+
+  const char* const name_;
+  const char* const category_;
+  Unit unit_ = perfetto::protos::pbzero::CounterDescriptor::UNIT_UNSPECIFIED;
+  const char* const unit_name_ = nullptr;
+  int64_t unit_multiplier_ = 1;
+  bool is_incremental_ = false;
+};
+
+namespace internal {
+
+// Keeps a map of uuids to serialized track descriptors and provides a
+// thread-safe way to read and write them. Each trace writer keeps a TLS set of
+// the tracks it has seen (see TrackEventIncrementalState). In the common case,
+// this registry is not consulted (and no locks are taken). However when a new
+// track is seen, this registry is used to write either 1) the default
+// descriptor for that track (see *Track::Serialize) or 2) a serialized
+// descriptor stored in the registry which may have additional metadata (e.g.,
+// track name).
+// TODO(eseckler): Remove PERFETTO_EXPORT once Chromium no longer calls
+// TrackRegistry::InitializeInstance() directly.
+class PERFETTO_EXPORT TrackRegistry {
+ public:
+  using SerializedTrackDescriptor = std::string;
+
+  TrackRegistry();
+  ~TrackRegistry();
+
+  static void InitializeInstance();
+  static TrackRegistry* Get() { return instance_; }
+
+  void EraseTrack(Track);
+
+  // Store metadata for |track| in the registry. |fill_function| is called
+  // synchronously to record additional properties for the track.
+  template <typename TrackType>
+  void UpdateTrack(
+      const TrackType& track,
+      std::function<void(protos::pbzero::TrackDescriptor*)> fill_function) {
+    UpdateTrackImpl(track, [&](protos::pbzero::TrackDescriptor* desc) {
+      track.Serialize(desc);
+      fill_function(desc);
+    });
+  }
+
+  // This variant lets the user supply a serialized track descriptor directly.
+  void UpdateTrack(Track, const std::string& serialized_desc);
+
+  // If |track| exists in the registry, write out the serialized track
+  // descriptor for it into |packet|. Otherwise just the ephemeral track object
+  // is serialized without any additional metadata.
+  template <typename TrackType>
+  void SerializeTrack(
+      const TrackType& track,
+      protozero::MessageHandle<protos::pbzero::TracePacket> packet) {
+    // If the track has extra metadata (recorded with UpdateTrack), it will be
+    // found in the registry. To minimize the time the lock is held, make a copy
+    // of the data held in the registry and write it outside the lock.
+    std::string desc_copy;
+    {
+      std::lock_guard<std::mutex> lock(mutex_);
+      const auto& it = tracks_.find(track.uuid);
+      if (it != tracks_.end()) {
+        desc_copy = it->second;
+        PERFETTO_DCHECK(!desc_copy.empty());
+      }
+    }
+    if (!desc_copy.empty()) {
+      WriteTrackDescriptor(std::move(desc_copy), std::move(packet));
+    } else {
+      // Otherwise we just write the basic descriptor for this type of track
+      // (e.g., just uuid, no name).
+      track.Serialize(packet->set_track_descriptor());
+    }
+  }
+
+  static void WriteTrackDescriptor(
+      const SerializedTrackDescriptor& desc,
+      protozero::MessageHandle<protos::pbzero::TracePacket> packet);
+
+ private:
+  void UpdateTrackImpl(
+      Track,
+      std::function<void(protos::pbzero::TrackDescriptor*)> fill_function);
+
+  std::mutex mutex_;
+  std::map<uint64_t /* uuid */, SerializedTrackDescriptor> tracks_;
+
+  static TrackRegistry* instance_;
+};
+
+}  // namespace internal
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_TRACING_TRACK_H_
+// gen_amalgamated begin header: gen/protos/perfetto/common/builtin_clock.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_BUILTIN_CLOCK_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_BUILTIN_CLOCK_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+enum BuiltinClock : int32_t {
+  BUILTIN_CLOCK_UNKNOWN = 0,
+  BUILTIN_CLOCK_REALTIME = 1,
+  BUILTIN_CLOCK_REALTIME_COARSE = 2,
+  BUILTIN_CLOCK_MONOTONIC = 3,
+  BUILTIN_CLOCK_MONOTONIC_COARSE = 4,
+  BUILTIN_CLOCK_MONOTONIC_RAW = 5,
+  BUILTIN_CLOCK_BOOTTIME = 6,
+  BUILTIN_CLOCK_MAX_ID = 63,
+};
+
+const BuiltinClock BuiltinClock_MIN = BUILTIN_CLOCK_UNKNOWN;
+const BuiltinClock BuiltinClock_MAX = BUILTIN_CLOCK_MAX_ID;
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/interned_data/interned_data.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_INTERNED_DATA_INTERNED_DATA_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_INTERNED_DATA_INTERNED_DATA_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class Callstack;
+class DebugAnnotationName;
+class EventCategory;
+class EventName;
+class Frame;
+class HistogramName;
+class InternedGpuRenderStageSpecification;
+class InternedGraphicsContext;
+class InternedString;
+class LogMessageBody;
+class Mapping;
+class ProfiledFrameSymbols;
+class SourceLocation;
+
+class InternedData_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/26, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  InternedData_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit InternedData_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit InternedData_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_event_categories() const { return at<1>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> event_categories() const { return GetRepeated<::protozero::ConstBytes>(1); }
+  bool has_event_names() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> event_names() const { return GetRepeated<::protozero::ConstBytes>(2); }
+  bool has_debug_annotation_names() const { return at<3>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> debug_annotation_names() const { return GetRepeated<::protozero::ConstBytes>(3); }
+  bool has_source_locations() const { return at<4>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> source_locations() const { return GetRepeated<::protozero::ConstBytes>(4); }
+  bool has_log_message_body() const { return at<20>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> log_message_body() const { return GetRepeated<::protozero::ConstBytes>(20); }
+  bool has_histogram_names() const { return at<25>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> histogram_names() const { return GetRepeated<::protozero::ConstBytes>(25); }
+  bool has_build_ids() const { return at<16>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> build_ids() const { return GetRepeated<::protozero::ConstBytes>(16); }
+  bool has_mapping_paths() const { return at<17>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> mapping_paths() const { return GetRepeated<::protozero::ConstBytes>(17); }
+  bool has_source_paths() const { return at<18>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> source_paths() const { return GetRepeated<::protozero::ConstBytes>(18); }
+  bool has_function_names() const { return at<5>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> function_names() const { return GetRepeated<::protozero::ConstBytes>(5); }
+  bool has_profiled_frame_symbols() const { return at<21>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> profiled_frame_symbols() const { return GetRepeated<::protozero::ConstBytes>(21); }
+  bool has_mappings() const { return at<19>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> mappings() const { return GetRepeated<::protozero::ConstBytes>(19); }
+  bool has_frames() const { return at<6>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> frames() const { return GetRepeated<::protozero::ConstBytes>(6); }
+  bool has_callstacks() const { return at<7>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> callstacks() const { return GetRepeated<::protozero::ConstBytes>(7); }
+  bool has_vulkan_memory_keys() const { return at<22>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> vulkan_memory_keys() const { return GetRepeated<::protozero::ConstBytes>(22); }
+  bool has_graphics_contexts() const { return at<23>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> graphics_contexts() const { return GetRepeated<::protozero::ConstBytes>(23); }
+  bool has_gpu_specifications() const { return at<24>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> gpu_specifications() const { return GetRepeated<::protozero::ConstBytes>(24); }
+  bool has_kernel_symbols() const { return at<26>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> kernel_symbols() const { return GetRepeated<::protozero::ConstBytes>(26); }
+};
+
+class InternedData : public ::protozero::Message {
+ public:
+  using Decoder = InternedData_Decoder;
+  enum : int32_t {
+    kEventCategoriesFieldNumber = 1,
+    kEventNamesFieldNumber = 2,
+    kDebugAnnotationNamesFieldNumber = 3,
+    kSourceLocationsFieldNumber = 4,
+    kLogMessageBodyFieldNumber = 20,
+    kHistogramNamesFieldNumber = 25,
+    kBuildIdsFieldNumber = 16,
+    kMappingPathsFieldNumber = 17,
+    kSourcePathsFieldNumber = 18,
+    kFunctionNamesFieldNumber = 5,
+    kProfiledFrameSymbolsFieldNumber = 21,
+    kMappingsFieldNumber = 19,
+    kFramesFieldNumber = 6,
+    kCallstacksFieldNumber = 7,
+    kVulkanMemoryKeysFieldNumber = 22,
+    kGraphicsContextsFieldNumber = 23,
+    kGpuSpecificationsFieldNumber = 24,
+    kKernelSymbolsFieldNumber = 26,
+  };
+
+  using FieldMetadata_EventCategories =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      EventCategory,
+      InternedData>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_EventCategories kEventCategories() { return {}; }
+  template <typename T = EventCategory> T* add_event_categories() {
+    return BeginNestedMessage<T>(1);
+  }
+
+
+  using FieldMetadata_EventNames =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      EventName,
+      InternedData>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_EventNames kEventNames() { return {}; }
+  template <typename T = EventName> T* add_event_names() {
+    return BeginNestedMessage<T>(2);
+  }
+
+
+  using FieldMetadata_DebugAnnotationNames =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      DebugAnnotationName,
+      InternedData>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DebugAnnotationNames kDebugAnnotationNames() { return {}; }
+  template <typename T = DebugAnnotationName> T* add_debug_annotation_names() {
+    return BeginNestedMessage<T>(3);
+  }
+
+
+  using FieldMetadata_SourceLocations =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      SourceLocation,
+      InternedData>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SourceLocations kSourceLocations() { return {}; }
+  template <typename T = SourceLocation> T* add_source_locations() {
+    return BeginNestedMessage<T>(4);
+  }
+
+
+  using FieldMetadata_LogMessageBody =
+    ::protozero::proto_utils::FieldMetadata<
+      20,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      LogMessageBody,
+      InternedData>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_LogMessageBody kLogMessageBody() { return {}; }
+  template <typename T = LogMessageBody> T* add_log_message_body() {
+    return BeginNestedMessage<T>(20);
+  }
+
+
+  using FieldMetadata_HistogramNames =
+    ::protozero::proto_utils::FieldMetadata<
+      25,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      HistogramName,
+      InternedData>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_HistogramNames kHistogramNames() { return {}; }
+  template <typename T = HistogramName> T* add_histogram_names() {
+    return BeginNestedMessage<T>(25);
+  }
+
+
+  using FieldMetadata_BuildIds =
+    ::protozero::proto_utils::FieldMetadata<
+      16,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      InternedString,
+      InternedData>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BuildIds kBuildIds() { return {}; }
+  template <typename T = InternedString> T* add_build_ids() {
+    return BeginNestedMessage<T>(16);
+  }
+
+
+  using FieldMetadata_MappingPaths =
+    ::protozero::proto_utils::FieldMetadata<
+      17,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      InternedString,
+      InternedData>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MappingPaths kMappingPaths() { return {}; }
+  template <typename T = InternedString> T* add_mapping_paths() {
+    return BeginNestedMessage<T>(17);
+  }
+
+
+  using FieldMetadata_SourcePaths =
+    ::protozero::proto_utils::FieldMetadata<
+      18,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      InternedString,
+      InternedData>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SourcePaths kSourcePaths() { return {}; }
+  template <typename T = InternedString> T* add_source_paths() {
+    return BeginNestedMessage<T>(18);
+  }
+
+
+  using FieldMetadata_FunctionNames =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      InternedString,
+      InternedData>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FunctionNames kFunctionNames() { return {}; }
+  template <typename T = InternedString> T* add_function_names() {
+    return BeginNestedMessage<T>(5);
+  }
+
+
+  using FieldMetadata_ProfiledFrameSymbols =
+    ::protozero::proto_utils::FieldMetadata<
+      21,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ProfiledFrameSymbols,
+      InternedData>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ProfiledFrameSymbols kProfiledFrameSymbols() { return {}; }
+  template <typename T = ProfiledFrameSymbols> T* add_profiled_frame_symbols() {
+    return BeginNestedMessage<T>(21);
+  }
+
+
+  using FieldMetadata_Mappings =
+    ::protozero::proto_utils::FieldMetadata<
+      19,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Mapping,
+      InternedData>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Mappings kMappings() { return {}; }
+  template <typename T = Mapping> T* add_mappings() {
+    return BeginNestedMessage<T>(19);
+  }
+
+
+  using FieldMetadata_Frames =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Frame,
+      InternedData>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Frames kFrames() { return {}; }
+  template <typename T = Frame> T* add_frames() {
+    return BeginNestedMessage<T>(6);
+  }
+
+
+  using FieldMetadata_Callstacks =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Callstack,
+      InternedData>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Callstacks kCallstacks() { return {}; }
+  template <typename T = Callstack> T* add_callstacks() {
+    return BeginNestedMessage<T>(7);
+  }
+
+
+  using FieldMetadata_VulkanMemoryKeys =
+    ::protozero::proto_utils::FieldMetadata<
+      22,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      InternedString,
+      InternedData>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_VulkanMemoryKeys kVulkanMemoryKeys() { return {}; }
+  template <typename T = InternedString> T* add_vulkan_memory_keys() {
+    return BeginNestedMessage<T>(22);
+  }
+
+
+  using FieldMetadata_GraphicsContexts =
+    ::protozero::proto_utils::FieldMetadata<
+      23,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      InternedGraphicsContext,
+      InternedData>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_GraphicsContexts kGraphicsContexts() { return {}; }
+  template <typename T = InternedGraphicsContext> T* add_graphics_contexts() {
+    return BeginNestedMessage<T>(23);
+  }
+
+
+  using FieldMetadata_GpuSpecifications =
+    ::protozero::proto_utils::FieldMetadata<
+      24,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      InternedGpuRenderStageSpecification,
+      InternedData>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_GpuSpecifications kGpuSpecifications() { return {}; }
+  template <typename T = InternedGpuRenderStageSpecification> T* add_gpu_specifications() {
+    return BeginNestedMessage<T>(24);
+  }
+
+
+  using FieldMetadata_KernelSymbols =
+    ::protozero::proto_utils::FieldMetadata<
+      26,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      InternedString,
+      InternedData>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_KernelSymbols kKernelSymbols() { return {}; }
+  template <typename T = InternedString> T* add_kernel_symbols() {
+    return BeginNestedMessage<T>(26);
+  }
+
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_TRACING_INTERNAL_TRACK_EVENT_INTERNAL_H_
+#define INCLUDE_PERFETTO_TRACING_INTERNAL_TRACK_EVENT_INTERNAL_H_
+
+// gen_amalgamated expanded: #include "perfetto/base/flat_set.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_heap_buffer.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/core/forward_decls.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/data_source.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/debug_annotation.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/trace_writer_base.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/traced_value.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/track.h"
+// gen_amalgamated expanded: #include "protos/perfetto/common/builtin_clock.pbzero.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/interned_data/interned_data.pbzero.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/track_event.pbzero.h"
+
+#include <unordered_map>
+
+namespace perfetto {
+class EventContext;
+class TrackEventSessionObserver;
+struct Category;
+namespace protos {
+namespace gen {
+class TrackEventConfig;
+}  // namespace gen
+namespace pbzero {
+class DebugAnnotation;
+}  // namespace pbzero
+}  // namespace protos
+
+// A callback interface for observing track event tracing sessions starting and
+// stopping. See TrackEvent::{Add,Remove}SessionObserver. Note that all methods
+// will be called on an internal Perfetto thread.
+class PERFETTO_EXPORT TrackEventSessionObserver {
+ public:
+  virtual ~TrackEventSessionObserver();
+  // Called when a track event tracing session is configured. Note tracing isn't
+  // active yet, so track events emitted here won't be recorded. See
+  // DataSourceBase::OnSetup.
+  virtual void OnSetup(const DataSourceBase::SetupArgs&);
+  // Called when a track event tracing session is started. It is possible to
+  // emit track events from this callback.
+  virtual void OnStart(const DataSourceBase::StartArgs&);
+  // Called when a track event tracing session is stopped. It is still possible
+  // to emit track events from this callback.
+  virtual void OnStop(const DataSourceBase::StopArgs&);
+};
+
+namespace internal {
+class TrackEventCategoryRegistry;
+
+class PERFETTO_EXPORT BaseTrackEventInternedDataIndex {
+ public:
+  virtual ~BaseTrackEventInternedDataIndex();
+
+#if PERFETTO_DCHECK_IS_ON()
+  const char* type_id_ = nullptr;
+  const void* add_function_ptr_ = nullptr;
+#endif  // PERFETTO_DCHECK_IS_ON()
+};
+
+struct TrackEventIncrementalState {
+  static constexpr size_t kMaxInternedDataFields = 32;
+
+  bool was_cleared = true;
+
+  // A heap-allocated message for storing newly seen interned data while we are
+  // in the middle of writing a track event. When a track event wants to write
+  // new interned data into the trace, it is first serialized into this message
+  // and then flushed to the real trace in EventContext when the packet ends.
+  // The message is cached here as a part of incremental state so that we can
+  // reuse the underlying buffer allocation for subsequently written interned
+  // data.
+  protozero::HeapBuffered<protos::pbzero::InternedData>
+      serialized_interned_data;
+
+  // In-memory indices for looking up interned data ids.
+  // For each intern-able field (up to a max of 32) we keep a dictionary of
+  // field-value -> interning-key. Depending on the type we either keep the full
+  // value or a hash of it (See track_event_interned_data_index.h)
+  using InternedDataIndex =
+      std::pair</* interned_data.proto field number */ size_t,
+                std::unique_ptr<BaseTrackEventInternedDataIndex>>;
+  std::array<InternedDataIndex, kMaxInternedDataFields> interned_data_indices =
+      {};
+
+  // Track uuids for which we have written descriptors into the trace. If a
+  // trace event uses a track which is not in this set, we'll write out a
+  // descriptor for it.
+  base::FlatSet<uint64_t> seen_tracks;
+
+  // Dynamically registered category names that have been encountered during
+  // this tracing session. The value in the map indicates whether the category
+  // is enabled or disabled.
+  std::unordered_map<std::string, bool> dynamic_categories;
+};
+
+// The backend portion of the track event trace point implemention. Outlined to
+// a separate .cc file so it can be shared by different track event category
+// namespaces.
+class PERFETTO_EXPORT TrackEventInternal {
+ public:
+  static bool Initialize(
+      const TrackEventCategoryRegistry&,
+      bool (*register_data_source)(const DataSourceDescriptor&));
+
+  static bool AddSessionObserver(TrackEventSessionObserver*);
+  static void RemoveSessionObserver(TrackEventSessionObserver*);
+
+  static void EnableTracing(const TrackEventCategoryRegistry& registry,
+                            const protos::gen::TrackEventConfig& config,
+                            const DataSourceBase::SetupArgs&);
+  static void OnStart(const DataSourceBase::StartArgs&);
+  static void DisableTracing(const TrackEventCategoryRegistry& registry,
+                             const DataSourceBase::StopArgs&);
+  static bool IsCategoryEnabled(const TrackEventCategoryRegistry& registry,
+                                const protos::gen::TrackEventConfig& config,
+                                const Category& category);
+
+  static perfetto::EventContext WriteEvent(
+      TraceWriterBase*,
+      TrackEventIncrementalState*,
+      const Category* category,
+      const char* name,
+      perfetto::protos::pbzero::TrackEvent::Type,
+      uint64_t timestamp = GetTimeNs());
+
+  static void ResetIncrementalState(TraceWriterBase*, uint64_t timestamp);
+
+  template <typename T>
+  static void AddDebugAnnotation(perfetto::EventContext* event_ctx,
+                                 const char* name,
+                                 T&& value) {
+    auto annotation = AddDebugAnnotation(event_ctx, name);
+    WriteIntoTracedValue(internal::CreateTracedValueFromProto(annotation),
+                         std::forward<T>(value));
+  }
+
+  // If the given track hasn't been seen by the trace writer yet, write a
+  // descriptor for it into the trace. Doesn't take a lock unless the track
+  // descriptor is new.
+  template <typename TrackType>
+  static void WriteTrackDescriptorIfNeeded(
+      const TrackType& track,
+      TraceWriterBase* trace_writer,
+      TrackEventIncrementalState* incr_state) {
+    auto it_and_inserted = incr_state->seen_tracks.insert(track.uuid);
+    if (PERFETTO_LIKELY(!it_and_inserted.second))
+      return;
+    WriteTrackDescriptor(track, trace_writer);
+  }
+
+  // Unconditionally write a track descriptor into the trace.
+  template <typename TrackType>
+  static void WriteTrackDescriptor(const TrackType& track,
+                                   TraceWriterBase* trace_writer) {
+    TrackRegistry::Get()->SerializeTrack(
+        track, NewTracePacket(trace_writer, GetTimeNs()));
+  }
+
+  // Get the current time in nanoseconds in the trace clock timebase.
+  static uint64_t GetTimeNs();
+
+  // Get the clock used by GetTimeNs().
+  static constexpr protos::pbzero::BuiltinClock GetClockId() {
+#if !PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE) && \
+    !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
+    return protos::pbzero::BUILTIN_CLOCK_BOOTTIME;
+#else
+    return protos::pbzero::BUILTIN_CLOCK_MONOTONIC;
+#endif
+  }
+
+  static int GetSessionCount();
+
+  // Represents the default track for the calling thread.
+  static const Track kDefaultTrack;
+
+ private:
+  static protozero::MessageHandle<protos::pbzero::TracePacket> NewTracePacket(
+      TraceWriterBase*,
+      uint64_t timestamp,
+      uint32_t seq_flags =
+          protos::pbzero::TracePacket::SEQ_NEEDS_INCREMENTAL_STATE);
+  static protos::pbzero::DebugAnnotation* AddDebugAnnotation(
+      perfetto::EventContext*,
+      const char* name);
+
+  static std::atomic<int> session_count_;
+};
+
+}  // namespace internal
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_TRACING_INTERNAL_TRACK_EVENT_INTERNAL_H_
+// gen_amalgamated begin header: include/perfetto/tracing/traced_proto.h
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_TRACING_TRACED_PROTO_H_
+#define INCLUDE_PERFETTO_TRACING_TRACED_PROTO_H_
+
+// gen_amalgamated expanded: #include "perfetto/base/template_util.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+class EventContext;
+
+// A Wrapper around a protozero message to allow C++ classes to specify how it
+// should be serialised into the trace:
+//
+// class Foo {
+//  public:
+//   void WriteIntoTrace(perfetto::TracedProto<pbzero::Foo> message) {
+//     message->set_int_field(int_field_);
+//   }
+// };
+//
+// This class also exposes EventContext, e.g. to enable data interning.
+//
+// NOTE: the functionality below is not ready yet.
+// TODO(altimin): Make the interop below possible.
+// TracedProto also provides a seamless integration with writing untyped
+// values via TracedValue / TracedDictionary / TracedArray:
+//
+// - TracedValue can be converted to a TracedProto, either by calling
+//   TracedValue::WriteProto<T>() or implicitly.
+// - If a proto message has a repeating DebugAnnotation debug_annotations
+//   field, it can be filled using the TracedDictionary obtained from
+//   TracedProto::WriteDebugAnnotations.
+template <typename MessageType>
+class TracedProto {
+ public:
+  TracedProto(const TracedProto&) = delete;
+  TracedProto& operator=(const TracedProto&) = delete;
+  TracedProto& operator=(TracedProto&&) = delete;
+  TracedProto(TracedProto&&) = default;
+  ~TracedProto() = default;
+
+  MessageType* operator->() const { return message_; }
+
+  MessageType* message() { return message_; }
+
+  EventContext& context() const { return context_; }
+
+ private:
+  friend class EventContext;
+
+  TracedProto(MessageType* message, EventContext& context)
+      : message_(message), context_(context) {}
+
+  MessageType* const message_;
+  EventContext& context_;
+};
+
+namespace internal {
+
+// TypedProtoWriter takes the protozero message (TracedProto<MessageType>),
+// field description (FieldMetadata) and value and writes the given value
+// into the given field of the given protozero message.
+//
+// This is primarily used for inline writing of typed messages:
+// TRACE_EVENT(..., pbzero::Message:kField, value);
+//
+// Ideally we would use a function here and not a struct, but passing template
+// arguments directly to the function (e.g. foo<void>()) isn't supported until
+// C++20, so we have to use a helper struct here.
+template <typename FieldMetadata>
+struct TypedProtoWriter {
+ private:
+  using ProtoSchemaType = protozero::proto_utils::ProtoSchemaType;
+  using RepetitionType = protozero::proto_utils::RepetitionType;
+
+  static_assert(FieldMetadata::kRepetitionType !=
+                    RepetitionType::kRepeatedPacked,
+                "writing packed fields isn't supported yet");
+
+ public:
+  // Implementation note: typename Check=void is used to ensure that SFINAE
+  // kicks in and the methods which do not match FieldMetadata do not fail
+  // to compile. std::is_same<Check,void> prevents early evaluation of the
+  // first enable_if_t argument.
+
+  // Simple non-repeated field.
+  template <typename Proto, typename ValueType, typename Check = void>
+  static typename base::enable_if_t<
+      FieldMetadata::kProtoFieldType != ProtoSchemaType::kMessage &&
+      FieldMetadata::kRepetitionType == RepetitionType::kNotRepeated &&
+      std::is_same<Check, void>::value>
+  Write(TracedProto<Proto> context, ValueType&& value) {
+    protozero::internal::FieldWriter<FieldMetadata::kProtoFieldType>::Append(
+        *context.message(), FieldMetadata::kFieldId, value);
+  }
+
+  // Simple repeated non-packed field.
+  template <typename Proto, typename ValueType, typename Check = void>
+  static typename base::enable_if_t<
+      FieldMetadata::kProtoFieldType != ProtoSchemaType::kMessage &&
+      FieldMetadata::kRepetitionType == RepetitionType::kRepeatedNotPacked &&
+      std::is_same<Check, void>::value>
+  Write(TracedProto<Proto> context, ValueType&& value) {
+    for (auto&& item : value) {
+      protozero::internal::FieldWriter<FieldMetadata::kProtoFieldType>::Append(
+          *context.message(), FieldMetadata::kFieldId, item);
+    }
+  }
+
+  // Nested non-repeated field.
+  template <typename Proto, typename ValueType, typename Check = void>
+  static typename base::enable_if_t<
+      FieldMetadata::kProtoFieldType == ProtoSchemaType::kMessage &&
+      FieldMetadata::kRepetitionType == RepetitionType::kNotRepeated &&
+      std::is_same<Check, void>::value>
+  Write(TracedProto<Proto> context, ValueType&& value) {
+    // TODO(altimin): support TraceFormatTraits here.
+    value.WriteIntoTrace(
+        context.context().Wrap(context.message()
+                                   ->template BeginNestedMessage<
+                                       typename FieldMetadata::cpp_field_type>(
+                                       FieldMetadata::kFieldId)));
+  }
+
+  // Nested repeated non-packed field.
+  template <typename Proto, typename ValueType, typename Check = void>
+  static typename base::enable_if_t<
+      FieldMetadata::kProtoFieldType == ProtoSchemaType::kMessage &&
+      FieldMetadata::kRepetitionType == RepetitionType::kRepeatedNotPacked &&
+      std::is_same<Check, void>::value>
+  Write(TracedProto<Proto> context, ValueType&& value) {
+    // TODO(altimin): support TraceFormatTraits here.
+    for (auto&& item : value) {
+      item.WriteIntoTrace(context.context().Wrap(
+          context.message()
+              ->template BeginNestedMessage<
+                  typename FieldMetadata::cpp_field_type>(
+                  FieldMetadata::kFieldId)));
+    }
+  }
+};
+
+}  // namespace internal
+
+template <typename MessageType, typename FieldMetadataType, typename ValueType>
+void WriteIntoTracedProto(
+    TracedProto<MessageType> message,
+    protozero::proto_utils::internal::FieldMetadataHelper<FieldMetadataType>,
+    ValueType&& value) {
+  static_assert(
+      std::is_base_of<protozero::proto_utils::FieldMetadataBase,
+                      FieldMetadataType>::value,
+      "Field name should be a protozero::internal::FieldMetadata<...>");
+  static_assert(
+      std::is_base_of<MessageType,
+                      typename FieldMetadataType::message_type>::value,
+      "Field's parent type should match the context.");
+
+  internal::TypedProtoWriter<FieldMetadataType>::Write(
+      std::move(message), std::forward<ValueType>(value));
+}
+
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_TRACING_TRACED_PROTO_H_
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_TRACING_EVENT_CONTEXT_H_
+#define INCLUDE_PERFETTO_TRACING_EVENT_CONTEXT_H_
+
+// gen_amalgamated expanded: #include "perfetto/protozero/message_handle.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/internal/track_event_internal.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/traced_proto.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/trace_packet.pbzero.h"
+
+namespace perfetto {
+namespace internal {
+class TrackEventInternal;
+}
+
+// Allows adding custom arguments into track events. Example:
+//
+//   TRACE_EVENT_BEGIN("category", "Title",
+//                     [](perfetto::EventContext ctx) {
+//                       auto* dbg = ctx.event()->add_debug_annotations();
+//                       dbg->set_name("name");
+//                       dbg->set_int_value(1234);
+//                     });
+//
+class PERFETTO_EXPORT EventContext {
+ public:
+  EventContext(EventContext&&) = default;
+
+  // For Chromium during the transition phase to the client library.
+  // TODO(eseckler): Remove once Chromium has switched to client lib entirely.
+  explicit EventContext(
+      protos::pbzero::TrackEvent* event,
+      internal::TrackEventIncrementalState* incremental_state = nullptr)
+      : event_(event), incremental_state_(incremental_state) {}
+
+  ~EventContext();
+
+  // Get a TrackEvent message to write typed arguments to.
+  //
+  // event() is a template method to allow callers to specify a subclass of
+  // TrackEvent instead. Those subclasses correspond to TrackEvent message with
+  // application-specific extensions. More information in
+  // design-docs/extensions.md.
+  template <typename EventType = protos::pbzero::TrackEvent>
+  EventType* event() const {
+    // As the method does downcasting, we check that a target subclass does
+    // not add new fields.
+    static_assert(
+        sizeof(EventType) == sizeof(protos::pbzero::TrackEvent),
+        "Event type must be binary-compatible with protos::pbzero::TrackEvent");
+    return static_cast<EventType*>(event_);
+  }
+
+  // Convert a raw pointer to protozero message to TracedProto which captures
+  // the reference to this EventContext.
+  template <typename MessageType>
+  TracedProto<MessageType> Wrap(MessageType* message) {
+    static_assert(std::is_base_of<protozero::Message, MessageType>::value,
+                  "TracedProto can be used only with protozero messages");
+
+    return TracedProto<MessageType>(message, *this);
+  }
+
+ private:
+  template <typename, size_t, typename, typename>
+  friend class TrackEventInternedDataIndex;
+  friend class internal::TrackEventInternal;
+
+  using TracePacketHandle =
+      ::protozero::MessageHandle<protos::pbzero::TracePacket>;
+
+  EventContext(TracePacketHandle, internal::TrackEventIncrementalState*);
+  EventContext(const EventContext&) = delete;
+
+  TracePacketHandle trace_packet_;
+  protos::pbzero::TrackEvent* event_;
+  internal::TrackEventIncrementalState* incremental_state_;
+};
+
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_TRACING_EVENT_CONTEXT_H_
+// gen_amalgamated begin header: include/perfetto/tracing/internal/write_track_event_args.h
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_TRACING_INTERNAL_WRITE_TRACK_EVENT_ARGS_H_
+#define INCLUDE_PERFETTO_TRACING_INTERNAL_WRITE_TRACK_EVENT_ARGS_H_
+
+// gen_amalgamated expanded: #include "perfetto/base/compiler.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/event_context.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/traced_proto.h"
+
+namespace perfetto {
+namespace internal {
+
+// Helper function handling filling provided |EventContext| from the provided
+// arguments, which include:
+// - Lambda functions,
+// - Debug annotations.
+//
+// TRACE_EVENT parameters which do not translate to directly writing something
+// into TrackEvent proto (like tracks and timestamps are _not_ covered by this
+// function).
+template <typename... Args, typename TypeCheck = void>
+void WriteTrackEventArgs(EventContext event_context, Args&&... args);
+
+// No arguments means that we don't have to write anything.
+template <>
+PERFETTO_ALWAYS_INLINE inline void WriteTrackEventArgs(EventContext) {}
+
+namespace {
+
+// A template helper for determining whether a type can be used as a track event
+// lambda, i.e., it has the signature "void(EventContext)". This is achieved by
+// checking that we can pass an EventContext value (the inner declval) into a T
+// instance (the outer declval). If this is a valid expression, the result
+// evaluates to sizeof(0), i.e., true.
+// TODO(skyostil): Replace this with std::is_convertible<std::function<...>>
+// once we have C++14.
+template <typename T>
+static constexpr bool IsValidTraceLambdaImpl(
+    typename std::enable_if<static_cast<bool>(
+        sizeof(std::declval<T>()(std::declval<EventContext>()), 0))>::type* =
+        nullptr) {
+  return true;
+}
+
+template <typename T>
+static constexpr bool IsValidTraceLambdaImpl(...) {
+  return false;
+}
+
+template <typename T>
+static constexpr bool IsValidTraceLambda() {
+  return IsValidTraceLambdaImpl<T>(nullptr);
+}
+
+}  // namespace
+
+// Write a lambda.
+// TODO(altimin): At the moment lambda takes EventContext, which is
+// non-copyable, so only one lambda is supported and it has to be the last
+// argument.
+template <typename ArgumentFunction,
+          typename ArgFunctionCheck = typename std::enable_if<
+              IsValidTraceLambda<ArgumentFunction>()>::type>
+PERFETTO_ALWAYS_INLINE void WriteTrackEventArgs(EventContext event_ctx,
+                                                ArgumentFunction arg_function) {
+  arg_function(std::move(event_ctx));
+}
+
+// Write one debug annotation and recursively write the rest of the arguments.
+template <typename ArgValue, typename... Args>
+PERFETTO_ALWAYS_INLINE void WriteTrackEventArgs(EventContext event_ctx,
+                                                const char* arg_name,
+                                                ArgValue&& arg_value,
+                                                Args&&... args) {
+  TrackEventInternal::AddDebugAnnotation(&event_ctx, arg_name,
+                                         std::forward<ArgValue>(arg_value));
+  WriteTrackEventArgs(std::move(event_ctx), std::forward<Args>(args)...);
+}
+
+// Write one typed message and recursively write the rest of the arguments.
+template <typename FieldMetadataType,
+          typename ArgValue,
+          typename... Args,
+          typename Check = base::enable_if_t<
+              std::is_base_of<protozero::proto_utils::FieldMetadataBase,
+                              FieldMetadataType>::value>>
+PERFETTO_ALWAYS_INLINE void WriteTrackEventArgs(
+    EventContext event_ctx,
+    protozero::proto_utils::internal::FieldMetadataHelper<FieldMetadataType>
+        field_name,
+    ArgValue&& arg_value,
+    Args&&... args) {
+  static_assert(
+      std::is_base_of<protos::pbzero::TrackEvent,
+                      typename FieldMetadataType::message_type>::value,
+      "Only fields of TrackEvent (and TrackEvent's extensions) can "
+      "be passed to TRACE_EVENT");
+  WriteIntoTracedProto(
+      event_ctx.Wrap(
+          event_ctx.event<typename FieldMetadataType::message_type>()),
+      field_name, std::forward<ArgValue>(arg_value));
+  WriteTrackEventArgs(std::move(event_ctx), std::forward<Args>(args)...);
+}
+
+}  // namespace internal
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_TRACING_INTERNAL_WRITE_TRACK_EVENT_ARGS_H_
+// gen_amalgamated begin header: include/perfetto/tracing/track_event_category_registry.h
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_TRACING_TRACK_EVENT_CATEGORY_REGISTRY_H_
+#define INCLUDE_PERFETTO_TRACING_TRACK_EVENT_CATEGORY_REGISTRY_H_
+
+// gen_amalgamated expanded: #include "perfetto/tracing/data_source.h"
+
+#include <stddef.h>
+
+#include <atomic>
+#include <utility>
+
+namespace perfetto {
+class DynamicCategory;
+
+// A compile-time representation of a track event category. See
+// PERFETTO_DEFINE_CATEGORIES for registering your own categories.
+struct PERFETTO_EXPORT Category {
+  using Tags = std::array<const char*, 4>;
+
+  const char* const name = nullptr;
+  const char* const description = nullptr;
+  const Tags tags = {};
+
+  constexpr Category(const Category&) = default;
+  constexpr explicit Category(const char* name_)
+      : name(CheckIsValidCategory(name_)),
+        name_sizes_(ComputeNameSizes(name_)) {}
+
+  constexpr Category SetDescription(const char* description_) const {
+    return Category(name, description_, tags, name_sizes_);
+  }
+
+  template <typename... Args>
+  constexpr Category SetTags(Args&&... args) const {
+    return Category(name, description, {std::forward<Args>(args)...},
+                    name_sizes_);
+  }
+
+  // A comma separated list of multiple categories to be used in a single trace
+  // point.
+  static constexpr Category Group(const char* names) {
+    return Category(names, AllowGroup{});
+  }
+
+  // Used for parsing dynamic category groups. Note that |name| and
+  // |DynamicCategory| must outlive the returned object because the category
+  // name isn't copied.
+  static Category FromDynamicCategory(const char* name);
+  static Category FromDynamicCategory(const DynamicCategory&);
+
+  constexpr bool IsGroup() const { return GetNameSize(1) > 0; }
+
+  // Returns the number of character in the category name. Not valid for
+  // category groups.
+  size_t name_size() const {
+    PERFETTO_DCHECK(!IsGroup());
+    return GetNameSize(0);
+  }
+
+  // Iterates over all the members of this category group, or just the name of
+  // the category itself if this isn't a category group. Return false from
+  // |callback| to stop iteration.
+  template <typename T>
+  void ForEachGroupMember(T callback) const {
+    const char* name_ptr = name;
+    size_t i = 0;
+    while (size_t name_size = GetNameSize(i++)) {
+      if (!callback(name_ptr, name_size))
+        break;
+      name_ptr += name_size + 1;
+    }
+  }
+
+ private:
+  static constexpr size_t kMaxGroupSize = 4;
+  using NameSizes = std::array<uint8_t, kMaxGroupSize>;
+
+  constexpr Category(const char* name_,
+                     const char* description_,
+                     Tags tags_,
+                     NameSizes name_sizes)
+      : name(name_),
+        description(description_),
+        tags(tags_),
+        name_sizes_(name_sizes) {}
+
+  enum AllowGroup {};
+  constexpr Category(const char* name_, AllowGroup)
+      : name(CheckIsValidCategoryGroup(name_)),
+        name_sizes_(ComputeNameSizes(name_)) {}
+
+  constexpr size_t GetNameSize(size_t i) const {
+    return i < name_sizes_.size() ? name_sizes_[i] : 0;
+  }
+
+  static constexpr NameSizes ComputeNameSizes(const char* s) {
+    static_assert(kMaxGroupSize == 4, "Unexpected maximum category group size");
+    return NameSizes{{static_cast<uint8_t>(GetNthNameSize(0, s, s)),
+                      static_cast<uint8_t>(GetNthNameSize(1, s, s)),
+                      static_cast<uint8_t>(GetNthNameSize(2, s, s)),
+                      static_cast<uint8_t>(GetNthNameSize(3, s, s))}};
+  }
+
+  static constexpr ptrdiff_t GetNthNameSize(int n,
+                                            const char* start,
+                                            const char* end,
+                                            int counter = 0) {
+    return (!*end || *end == ',')
+               ? ((!*end || counter == n)
+                      ? (counter == n ? end - start : 0)
+                      : GetNthNameSize(n, end + 1, end + 1, counter + 1))
+               : GetNthNameSize(n, start, end + 1, counter);
+  }
+
+  static constexpr const char* CheckIsValidCategory(const char* n) {
+    // We just replace invalid input with a nullptr here; it will trigger a
+    // static assert in TrackEventCategoryRegistry::ValidateCategories().
+    return GetNthNameSize(1, n, n) ? nullptr : n;
+  }
+
+  static constexpr const char* CheckIsValidCategoryGroup(const char* n) {
+    // Same as above: replace invalid input with nullptr.
+    return !GetNthNameSize(1, n, n) || GetNthNameSize(kMaxGroupSize, n, n)
+               ? nullptr
+               : n;
+  }
+
+  // An array of lengths of the different names associated with this category.
+  // If this category doesn't represent a group of multiple categories, only the
+  // first element is non-zero.
+  const NameSizes name_sizes_ = {};
+};
+
+// Dynamically constructed category names should marked as such through this
+// container type to make it less likely for trace points to accidentally start
+// using dynamic categories. Events with dynamic categories will always be
+// slightly more expensive than regular events, so use them sparingly.
+class PERFETTO_EXPORT DynamicCategory final {
+ public:
+  explicit DynamicCategory(const std::string& name_) : name(name_) {}
+  explicit DynamicCategory(const char* name_) : name(name_) {}
+  DynamicCategory() {}
+  ~DynamicCategory() = default;
+
+  const std::string name;
+};
+
+namespace internal {
+
+constexpr const char* NullCategory(const char*) {
+  return nullptr;
+}
+
+perfetto::DynamicCategory NullCategory(const perfetto::DynamicCategory&);
+
+constexpr bool StringMatchesPrefix(const char* str, const char* prefix) {
+  return !*str ? !*prefix
+               : !*prefix ? true
+                          : *str != *prefix
+                                ? false
+                                : StringMatchesPrefix(str + 1, prefix + 1);
+}
+
+constexpr bool IsStringInPrefixList(const char*) {
+  return false;
+}
+
+template <typename... Args>
+constexpr bool IsStringInPrefixList(const char* str,
+                                    const char* prefix,
+                                    Args... args) {
+  return StringMatchesPrefix(str, prefix) ||
+         IsStringInPrefixList(str, std::forward<Args>(args)...);
+}
+
+// Holds all the registered categories for one category namespace. See
+// PERFETTO_DEFINE_CATEGORIES for building the registry.
+class PERFETTO_EXPORT TrackEventCategoryRegistry {
+ public:
+  constexpr TrackEventCategoryRegistry(size_t category_count,
+                                       const Category* categories,
+                                       std::atomic<uint8_t>* state_storage)
+      : categories_(categories),
+        category_count_(category_count),
+        state_storage_(state_storage) {
+    static_assert(
+        sizeof(state_storage[0].load()) * 8 >= kMaxDataSourceInstances,
+        "The category state must have enough bits for all possible data source "
+        "instances");
+  }
+
+  size_t category_count() const { return category_count_; }
+
+  // Returns a category based on its index.
+  const Category* GetCategory(size_t index) const {
+    PERFETTO_DCHECK(index < category_count_);
+    return &categories_[index];
+  }
+
+  // Turn tracing on or off for the given category in a track event data source
+  // instance.
+  void EnableCategoryForInstance(size_t category_index,
+                                 uint32_t instance_index) const;
+  void DisableCategoryForInstance(size_t category_index,
+                                  uint32_t instance_index) const;
+
+  constexpr std::atomic<uint8_t>* GetCategoryState(
+      size_t category_index) const {
+    return &state_storage_[category_index];
+  }
+
+  // --------------------------------------------------------------------------
+  // Trace point support
+  // --------------------------------------------------------------------------
+  //
+  // (The following methods are used by the track event trace point
+  // implementation and typically don't need to be called by other code.)
+
+  // At compile time, turn a category name into an index into the registry.
+  // Returns kInvalidCategoryIndex if the category was not found, or
+  // kDynamicCategoryIndex if |is_dynamic| is true or a DynamicCategory was
+  // passed in.
+  static constexpr size_t kInvalidCategoryIndex = static_cast<size_t>(-1);
+  static constexpr size_t kDynamicCategoryIndex = static_cast<size_t>(-2);
+  constexpr size_t Find(const char* name, bool is_dynamic) const {
+    return CheckIsValidCategoryIndex(FindImpl(name, is_dynamic));
+  }
+
+  constexpr size_t Find(const DynamicCategory&, bool) const {
+    return kDynamicCategoryIndex;
+  }
+
+  constexpr bool ValidateCategories(size_t index = 0) const {
+    return (index == category_count_)
+               ? true
+               : IsValidCategoryName(categories_[index].name)
+                     ? ValidateCategories(index + 1)
+                     : false;
+  }
+
+ private:
+  // TODO(skyostil): Make the compile-time routines nicer with C++14.
+  constexpr size_t FindImpl(const char* name,
+                            bool is_dynamic,
+                            size_t index = 0) const {
+    return is_dynamic ? kDynamicCategoryIndex
+                      : (index == category_count_)
+                            ? kInvalidCategoryIndex
+                            : StringEq(categories_[index].name, name)
+                                  ? index
+                                  : FindImpl(name, false, index + 1);
+  }
+
+  // A compile time helper for checking that a category index is valid.
+  static constexpr size_t CheckIsValidCategoryIndex(size_t index) {
+    // Relies on PERFETTO_CHECK() (and the surrounding lambda) being a
+    // non-constexpr function, which will fail the build if the given |index| is
+    // invalid. The funny formatting here is so that clang shows the comment
+    // below as part of the error message.
+    // clang-format off
+    return index != kInvalidCategoryIndex ? index : \
+        /* Invalid category -- add it to PERFETTO_DEFINE_CATEGORIES(). */ [] {
+        PERFETTO_CHECK(
+            false &&
+            "A track event used an unknown category. Please add it to "
+            "PERFETTO_DEFINE_CATEGORIES().");
+        return kInvalidCategoryIndex;
+      }();
+    // clang-format on
+  }
+
+  static constexpr bool IsValidCategoryName(const char* name) {
+    return (!name || *name == '\"' || *name == '*' || *name == ' ')
+               ? false
+               : *name ? IsValidCategoryName(name + 1) : true;
+  }
+
+  static constexpr bool StringEq(const char* a, const char* b) {
+    return *a != *b ? false
+                    : (!*a || !*b) ? (*a == *b) : StringEq(a + 1, b + 1);
+  }
+
+  const Category* const categories_;
+  const size_t category_count_;
+  std::atomic<uint8_t>* const state_storage_;
+};
+
+}  // namespace internal
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_TRACING_TRACK_EVENT_CATEGORY_REGISTRY_H_
+// gen_amalgamated begin header: gen/protos/perfetto/config/track_event/track_event_config.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_TRACK_EVENT_TRACK_EVENT_CONFIG_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_TRACK_EVENT_TRACK_EVENT_CONFIG_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class TrackEventConfig;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT TrackEventConfig : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kDisabledCategoriesFieldNumber = 1,
+    kEnabledCategoriesFieldNumber = 2,
+    kDisabledTagsFieldNumber = 3,
+    kEnabledTagsFieldNumber = 4,
+  };
+
+  TrackEventConfig();
+  ~TrackEventConfig() override;
+  TrackEventConfig(TrackEventConfig&&) noexcept;
+  TrackEventConfig& operator=(TrackEventConfig&&);
+  TrackEventConfig(const TrackEventConfig&);
+  TrackEventConfig& operator=(const TrackEventConfig&);
+  bool operator==(const TrackEventConfig&) const;
+  bool operator!=(const TrackEventConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  const std::vector<std::string>& disabled_categories() const { return disabled_categories_; }
+  std::vector<std::string>* mutable_disabled_categories() { return &disabled_categories_; }
+  int disabled_categories_size() const { return static_cast<int>(disabled_categories_.size()); }
+  void clear_disabled_categories() { disabled_categories_.clear(); }
+  void add_disabled_categories(std::string value) { disabled_categories_.emplace_back(value); }
+  std::string* add_disabled_categories() { disabled_categories_.emplace_back(); return &disabled_categories_.back(); }
+
+  const std::vector<std::string>& enabled_categories() const { return enabled_categories_; }
+  std::vector<std::string>* mutable_enabled_categories() { return &enabled_categories_; }
+  int enabled_categories_size() const { return static_cast<int>(enabled_categories_.size()); }
+  void clear_enabled_categories() { enabled_categories_.clear(); }
+  void add_enabled_categories(std::string value) { enabled_categories_.emplace_back(value); }
+  std::string* add_enabled_categories() { enabled_categories_.emplace_back(); return &enabled_categories_.back(); }
+
+  const std::vector<std::string>& disabled_tags() const { return disabled_tags_; }
+  std::vector<std::string>* mutable_disabled_tags() { return &disabled_tags_; }
+  int disabled_tags_size() const { return static_cast<int>(disabled_tags_.size()); }
+  void clear_disabled_tags() { disabled_tags_.clear(); }
+  void add_disabled_tags(std::string value) { disabled_tags_.emplace_back(value); }
+  std::string* add_disabled_tags() { disabled_tags_.emplace_back(); return &disabled_tags_.back(); }
+
+  const std::vector<std::string>& enabled_tags() const { return enabled_tags_; }
+  std::vector<std::string>* mutable_enabled_tags() { return &enabled_tags_; }
+  int enabled_tags_size() const { return static_cast<int>(enabled_tags_.size()); }
+  void clear_enabled_tags() { enabled_tags_.clear(); }
+  void add_enabled_tags(std::string value) { enabled_tags_.emplace_back(value); }
+  std::string* add_enabled_tags() { enabled_tags_.emplace_back(); return &enabled_tags_.back(); }
+
+ private:
+  std::vector<std::string> disabled_categories_;
+  std::vector<std::string> enabled_categories_;
+  std::vector<std::string> disabled_tags_;
+  std::vector<std::string> enabled_tags_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<5> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_TRACK_EVENT_TRACK_EVENT_CONFIG_PROTO_CPP_H_
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_TRACING_INTERNAL_TRACK_EVENT_DATA_SOURCE_H_
+#define INCLUDE_PERFETTO_TRACING_INTERNAL_TRACK_EVENT_DATA_SOURCE_H_
+
+// gen_amalgamated expanded: #include "perfetto/base/compiler.h"
+// gen_amalgamated expanded: #include "perfetto/base/template_util.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message_handle.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/core/data_source_config.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/data_source.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/event_context.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/internal/track_event_internal.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/internal/write_track_event_args.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/track.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/track_event_category_registry.h"
+// gen_amalgamated expanded: #include "protos/perfetto/common/builtin_clock.pbzero.h"
+// gen_amalgamated expanded: #include "protos/perfetto/config/track_event/track_event_config.gen.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/track_event.pbzero.h"
+
+#include <type_traits>
+
+namespace perfetto {
+
+struct TraceTimestamp {
+  protos::pbzero::BuiltinClock clock_id;
+  uint64_t nanoseconds;
+};
+
+// This template provides a way to convert an abstract timestamp into the trace
+// clock timebase in nanoseconds. By specialising this template and defining
+// static ConvertTimestampToTraceTimeNs function in it the user can register
+// additional timestamp types. The return value should specify the clock used by
+// the timestamp as well as its value in nanoseconds.
+//
+// The users should see the specialisation for uint64_t below as an example.
+// Note that the specialisation should be defined in perfetto namespace.
+template <typename T>
+struct TraceTimestampTraits;
+
+// A pass-through implementation for raw uint64_t nanosecond timestamps.
+template <>
+struct TraceTimestampTraits<uint64_t> {
+  static inline TraceTimestamp ConvertTimestampToTraceTimeNs(
+      const uint64_t& timestamp) {
+    return {internal::TrackEventInternal::GetClockId(), timestamp};
+  }
+};
+
+namespace internal {
+namespace {
+
+// Checks if |T| is a valid track.
+template <typename T>
+static constexpr bool IsValidTrack() {
+  return std::is_convertible<T, Track>::value;
+}
+
+// Checks if |T| is a valid non-counter track.
+template <typename T>
+static constexpr bool IsValidNormalTrack() {
+  return std::is_convertible<T, Track>::value &&
+         !std::is_convertible<T, CounterTrack>::value;
+}
+
+// Because the user can use arbitrary timestamp types, we can't compare against
+// any known base type here. Instead, we check that a track or a trace lambda
+// isn't being interpreted as a timestamp.
+template <typename T,
+          typename CanBeConvertedToNsCheck = decltype(
+              ::perfetto::TraceTimestampTraits<typename base::remove_cvref_t<
+                  T>>::ConvertTimestampToTraceTimeNs(std::declval<T>())),
+          typename NotTrackCheck =
+              typename std::enable_if<!IsValidNormalTrack<T>()>::type,
+          typename NotLambdaCheck =
+              typename std::enable_if<!IsValidTraceLambda<T>()>::type>
+static constexpr bool IsValidTimestamp() {
+  return true;
+}
+
+}  // namespace
+
+// Traits for dynamic categories.
+template <typename CategoryType>
+struct CategoryTraits {
+  static constexpr bool kIsDynamic = true;
+  static constexpr const Category* GetStaticCategory(
+      const TrackEventCategoryRegistry*,
+      const CategoryType&) {
+    return nullptr;
+  }
+  static size_t GetStaticIndex(const CategoryType&) {
+    PERFETTO_DCHECK(false);  // Not reached.
+    return TrackEventCategoryRegistry::kDynamicCategoryIndex;
+  }
+  static DynamicCategory GetDynamicCategory(const CategoryType& category) {
+    return DynamicCategory{category};
+  }
+};
+
+// Traits for static categories.
+template <>
+struct CategoryTraits<size_t> {
+  static constexpr bool kIsDynamic = false;
+  static const Category* GetStaticCategory(
+      const TrackEventCategoryRegistry* registry,
+      size_t category_index) {
+    return registry->GetCategory(category_index);
+  }
+  static constexpr size_t GetStaticIndex(size_t category_index) {
+    return category_index;
+  }
+  static DynamicCategory GetDynamicCategory(size_t) {
+    PERFETTO_DCHECK(false);  // Not reached.
+    return DynamicCategory();
+  }
+};
+
+struct TrackEventDataSourceTraits : public perfetto::DefaultDataSourceTraits {
+  using IncrementalStateType = TrackEventIncrementalState;
+
+  // Use a one shared TLS slot so that all track event data sources write into
+  // the same sequence and share interning dictionaries.
+  static DataSourceThreadLocalState* GetDataSourceTLS(DataSourceStaticState*,
+                                                      TracingTLS* root_tls) {
+    return &root_tls->track_event_tls;
+  }
+};
+
+// A generic track event data source which is instantiated once per track event
+// category namespace.
+template <typename DataSourceType, const TrackEventCategoryRegistry* Registry>
+class TrackEventDataSource
+    : public DataSource<DataSourceType, TrackEventDataSourceTraits> {
+  using Base = DataSource<DataSourceType, TrackEventDataSourceTraits>;
+
+ public:
+  // Add or remove a session observer for this track event data source. The
+  // observer will be notified about started and stopped tracing sessions.
+  // Returns |true| if the observer was succesfully added (i.e., the maximum
+  // number of observers wasn't exceeded).
+  static bool AddSessionObserver(TrackEventSessionObserver* observer) {
+    return TrackEventInternal::AddSessionObserver(observer);
+  }
+
+  static void RemoveSessionObserver(TrackEventSessionObserver* observer) {
+    TrackEventInternal::RemoveSessionObserver(observer);
+  }
+
+  // DataSource implementation.
+  void OnSetup(const DataSourceBase::SetupArgs& args) override {
+    auto config_raw = args.config->track_event_config_raw();
+    bool ok = config_.ParseFromArray(config_raw.data(), config_raw.size());
+    PERFETTO_DCHECK(ok);
+    TrackEventInternal::EnableTracing(*Registry, config_, args);
+  }
+
+  void OnStart(const DataSourceBase::StartArgs& args) override {
+    TrackEventInternal::OnStart(args);
+  }
+
+  void OnStop(const DataSourceBase::StopArgs& args) override {
+    TrackEventInternal::DisableTracing(*Registry, args);
+  }
+
+  static void Flush() {
+    Base::template Trace([](typename Base::TraceContext ctx) { ctx.Flush(); });
+  }
+
+  // Determine if *any* tracing category is enabled.
+  static bool IsEnabled() {
+    bool enabled = false;
+    Base::template CallIfEnabled(
+        [&](uint32_t /*instances*/) { enabled = true; });
+    return enabled;
+  }
+
+  // Determine if tracing for the given static category is enabled.
+  static bool IsCategoryEnabled(size_t category_index) {
+    return Registry->GetCategoryState(category_index)
+        ->load(std::memory_order_relaxed);
+  }
+
+  // Determine if tracing for the given dynamic category is enabled.
+  static bool IsDynamicCategoryEnabled(
+      const DynamicCategory& dynamic_category) {
+    bool enabled = false;
+    Base::template Trace([&](typename Base::TraceContext ctx) {
+      enabled = IsDynamicCategoryEnabled(&ctx, dynamic_category);
+    });
+    return enabled;
+  }
+
+  // This is the inlined entrypoint for all track event trace points. It tries
+  // to be as lightweight as possible in terms of instructions and aims to
+  // compile down to an unlikely conditional jump to the actual trace writing
+  // function.
+  template <typename Callback>
+  static void CallIfCategoryEnabled(size_t category_index,
+                                    Callback callback) PERFETTO_ALWAYS_INLINE {
+    Base::template CallIfEnabled<CategoryTracePointTraits>(
+        [&callback](uint32_t instances) { callback(instances); },
+        {category_index});
+  }
+
+  // Once we've determined tracing to be enabled for this category, actually
+  // write a trace event onto this thread's default track. Outlined to avoid
+  // bloating code (mostly stack depth) at the actual trace point.
+  //
+  // The following combination of parameters is supported (in the given order):
+  // - Zero or one track,
+  // - Zero or one custom timestamp,
+  // - Arbitrary number of debug annotations.
+  // - Zero or one lambda.
+
+  // Trace point which does not take a track or timestamp.
+  template <typename CategoryType, typename... Arguments>
+  static void TraceForCategory(uint32_t instances,
+                               const CategoryType& category,
+                               const char* event_name,
+                               perfetto::protos::pbzero::TrackEvent::Type type,
+                               Arguments&&... args) PERFETTO_NO_INLINE {
+    TraceForCategoryImpl(instances, category, event_name, type,
+                         TrackEventInternal::kDefaultTrack,
+                         TrackEventInternal::GetTimeNs(),
+                         std::forward<Arguments>(args)...);
+  }
+
+  // Trace point which takes a track, but not timestamp.
+  // NOTE: Here track should be captured using universal reference (TrackType&&)
+  // instead of const TrackType& to ensure that the proper overload is selected
+  // (otherwise the compiler will fail to disambiguate between adding const& and
+  // parsing track as a part of Arguments...).
+  template <typename TrackType,
+            typename CategoryType,
+            typename... Arguments,
+            typename TrackTypeCheck = typename std::enable_if<
+                std::is_convertible<TrackType, Track>::value>::type>
+  static void TraceForCategory(uint32_t instances,
+                               const CategoryType& category,
+                               const char* event_name,
+                               perfetto::protos::pbzero::TrackEvent::Type type,
+                               TrackType&& track,
+                               Arguments&&... args) PERFETTO_NO_INLINE {
+    TraceForCategoryImpl(
+        instances, category, event_name, type, std::forward<TrackType>(track),
+        TrackEventInternal::GetTimeNs(), std::forward<Arguments>(args)...);
+  }
+
+  // Trace point which takes a timestamp, but not track.
+  template <typename CategoryType,
+            typename TimestampType = uint64_t,
+            typename... Arguments,
+            typename TimestampTypeCheck = typename std::enable_if<
+                IsValidTimestamp<TimestampType>()>::type>
+  static void TraceForCategory(uint32_t instances,
+                               const CategoryType& category,
+                               const char* event_name,
+                               perfetto::protos::pbzero::TrackEvent::Type type,
+                               TimestampType&& timestamp,
+                               Arguments&&... args) PERFETTO_NO_INLINE {
+    TraceForCategoryImpl(instances, category, event_name, type,
+                         TrackEventInternal::kDefaultTrack,
+                         std::forward<TimestampType>(timestamp),
+                         std::forward<Arguments>(args)...);
+  }
+
+  // Trace point which takes a timestamp and a track.
+  template <typename TrackType,
+            typename CategoryType,
+            typename TimestampType = uint64_t,
+            typename... Arguments,
+            typename TrackTypeCheck = typename std::enable_if<
+                std::is_convertible<TrackType, Track>::value>::type,
+            typename TimestampTypeCheck = typename std::enable_if<
+                IsValidTimestamp<TimestampType>()>::type>
+  static void TraceForCategory(uint32_t instances,
+                               const CategoryType& category,
+                               const char* event_name,
+                               perfetto::protos::pbzero::TrackEvent::Type type,
+                               TrackType&& track,
+                               TimestampType&& timestamp,
+                               Arguments&&... args) PERFETTO_NO_INLINE {
+    TraceForCategoryImpl(instances, category, event_name, type,
+                         std::forward<TrackType>(track),
+                         std::forward<TimestampType>(timestamp),
+                         std::forward<Arguments>(args)...);
+  }
+
+  // Trace point with with a counter sample.
+  template <typename CategoryType, typename ValueType>
+  static void TraceForCategory(uint32_t instances,
+                               const CategoryType& category,
+                               const char*,
+                               perfetto::protos::pbzero::TrackEvent::Type type,
+                               CounterTrack track,
+                               ValueType value) PERFETTO_ALWAYS_INLINE {
+    PERFETTO_DCHECK(type == perfetto::protos::pbzero::TrackEvent::TYPE_COUNTER);
+    TraceForCategory(instances, category, /*name=*/nullptr, type, track,
+                     TrackEventInternal::GetTimeNs(), value);
+  }
+
+  // Trace point with with a timestamp and a counter sample.
+  template <typename CategoryType,
+            typename TimestampType = uint64_t,
+            typename TimestampTypeCheck = typename std::enable_if<
+                IsValidTimestamp<TimestampType>()>::type,
+            typename ValueType>
+  static void TraceForCategory(uint32_t instances,
+                               const CategoryType& category,
+                               const char*,
+                               perfetto::protos::pbzero::TrackEvent::Type type,
+                               CounterTrack track,
+                               TimestampType timestamp,
+                               ValueType value) PERFETTO_ALWAYS_INLINE {
+    PERFETTO_DCHECK(type == perfetto::protos::pbzero::TrackEvent::TYPE_COUNTER);
+    TraceForCategoryImpl(
+        instances, category, /*name=*/nullptr, type, track, timestamp,
+        [&](EventContext event_ctx) {
+          if (std::is_integral<ValueType>::value) {
+            event_ctx.event()->set_counter_value(static_cast<int64_t>(value));
+          } else {
+            event_ctx.event()->set_double_counter_value(
+                static_cast<double>(value));
+          }
+        });
+  }
+
+  // Initialize the track event library. Should be called before tracing is
+  // enabled.
+  static bool Register() {
+    // Registration is performed out-of-line so users don't need to depend on
+    // DataSourceDescriptor C++ bindings.
+    return TrackEventInternal::Initialize(
+        *Registry,
+        [](const DataSourceDescriptor& dsd) { return Base::Register(dsd); });
+  }
+
+  // Record metadata about different types of timeline tracks. See Track.
+  static void SetTrackDescriptor(const Track& track,
+                                 const protos::gen::TrackDescriptor& desc) {
+    PERFETTO_DCHECK(track.uuid == desc.uuid());
+    TrackRegistry::Get()->UpdateTrack(track, desc.SerializeAsString());
+    Base::template Trace([&](typename Base::TraceContext ctx) {
+      TrackEventInternal::WriteTrackDescriptor(
+          track, ctx.tls_inst_->trace_writer.get());
+    });
+  }
+
+  // DEPRECATED. Only kept for backwards compatibility.
+  static void SetTrackDescriptor(
+      const Track& track,
+      std::function<void(protos::pbzero::TrackDescriptor*)> callback) {
+    SetTrackDescriptorImpl(track, std::move(callback));
+  }
+
+  // DEPRECATED. Only kept for backwards compatibility.
+  static void SetProcessDescriptor(
+      std::function<void(protos::pbzero::TrackDescriptor*)> callback,
+      const ProcessTrack& track = ProcessTrack::Current()) {
+    SetTrackDescriptorImpl(std::move(track), std::move(callback));
+  }
+
+  // DEPRECATED. Only kept for backwards compatibility.
+  static void SetThreadDescriptor(
+      std::function<void(protos::pbzero::TrackDescriptor*)> callback,
+      const ThreadTrack& track = ThreadTrack::Current()) {
+    SetTrackDescriptorImpl(std::move(track), std::move(callback));
+  }
+
+  static void EraseTrackDescriptor(const Track& track) {
+    TrackRegistry::Get()->EraseTrack(track);
+  }
+
+  // Returns the current trace timestamp in nanoseconds. Note the returned
+  // timebase may vary depending on the platform, but will always match the
+  // timestamps recorded by track events (see GetTraceClockId).
+  static uint64_t GetTraceTimeNs() { return TrackEventInternal::GetTimeNs(); }
+
+  // Returns the type of clock used by GetTraceTimeNs().
+  static constexpr protos::pbzero::BuiltinClock GetTraceClockId() {
+    return TrackEventInternal::GetClockId();
+  }
+
+ private:
+  // Each category has its own enabled/disabled state, stored in the category
+  // registry.
+  struct CategoryTracePointTraits {
+    // Each trace point with a static category has an associated category index.
+    struct TracePointData {
+      size_t category_index;
+    };
+    // Called to get the enabled state bitmap of a given category.
+    // |data| is the trace point data structure given to
+    // DataSource::TraceWithInstances.
+    static constexpr std::atomic<uint8_t>* GetActiveInstances(
+        TracePointData data) {
+      return Registry->GetCategoryState(data.category_index);
+    }
+  };
+
+  template <typename CategoryType,
+            typename TrackType = Track,
+            typename TimestampType = uint64_t,
+            typename TimestampTypeCheck = typename std::enable_if<
+                IsValidTimestamp<TimestampType>()>::type,
+            typename TrackTypeCheck =
+                typename std::enable_if<IsValidTrack<TrackType>()>::type,
+            typename... Arguments>
+  static void TraceForCategoryImpl(
+      uint32_t instances,
+      const CategoryType& category,
+      const char* event_name,
+      perfetto::protos::pbzero::TrackEvent::Type type,
+      const TrackType& track,
+      const TimestampType& timestamp,
+      Arguments&&... args) PERFETTO_ALWAYS_INLINE {
+    using CatTraits = CategoryTraits<CategoryType>;
+    const Category* static_category =
+        CatTraits::GetStaticCategory(Registry, category);
+    TraceWithInstances(
+        instances, category, [&](typename Base::TraceContext ctx) {
+          // If this category is dynamic, first check whether it's enabled.
+          if (CatTraits::kIsDynamic &&
+              !IsDynamicCategoryEnabled(
+                  &ctx, CatTraits::GetDynamicCategory(category))) {
+            return;
+          }
+
+          // TODO(skyostil): Support additional clock ids.
+          TraceTimestamp trace_timestamp = ::perfetto::TraceTimestampTraits<
+              TimestampType>::ConvertTimestampToTraceTimeNs(timestamp);
+          PERFETTO_DCHECK(trace_timestamp.clock_id ==
+                          TrackEventInternal::GetClockId());
+
+          // Make sure incremental state is valid.
+          TraceWriterBase* trace_writer = ctx.tls_inst_->trace_writer.get();
+          TrackEventIncrementalState* incr_state = ctx.GetIncrementalState();
+          if (incr_state->was_cleared) {
+            incr_state->was_cleared = false;
+            TrackEventInternal::ResetIncrementalState(
+                trace_writer, trace_timestamp.nanoseconds);
+          }
+
+          // Write the track descriptor before any event on the track.
+          if (track) {
+            TrackEventInternal::WriteTrackDescriptorIfNeeded(
+                track, trace_writer, incr_state);
+          }
+
+          // Write the event itself.
+          {
+            auto event_ctx = TrackEventInternal::WriteEvent(
+                trace_writer, incr_state, static_category, event_name, type,
+                trace_timestamp.nanoseconds);
+            // Write dynamic categories (except for events that don't require
+            // categories). For counter events, the counter name (and optional
+            // category) is stored as part of the track descriptor instead being
+            // recorded with individual events.
+            if (CatTraits::kIsDynamic &&
+                type != protos::pbzero::TrackEvent::TYPE_SLICE_END &&
+                type != protos::pbzero::TrackEvent::TYPE_COUNTER) {
+              DynamicCategory dynamic_category =
+                  CatTraits::GetDynamicCategory(category);
+              Category cat = Category::FromDynamicCategory(dynamic_category);
+              cat.ForEachGroupMember(
+                  [&](const char* member_name, size_t name_size) {
+                    event_ctx.event()->add_categories(member_name, name_size);
+                    return true;
+                  });
+            }
+            if (&track != &TrackEventInternal::kDefaultTrack)
+              event_ctx.event()->set_track_uuid(track.uuid);
+            WriteTrackEventArgs(std::move(event_ctx),
+                                std::forward<Arguments>(args)...);
+          }  // event_ctx
+        });
+  }
+
+  template <typename CategoryType, typename Lambda>
+  static void TraceWithInstances(uint32_t instances,
+                                 const CategoryType& category,
+                                 Lambda lambda) PERFETTO_ALWAYS_INLINE {
+    using CatTraits = CategoryTraits<CategoryType>;
+    if (CatTraits::kIsDynamic) {
+      Base::template TraceWithInstances(instances, std::move(lambda));
+    } else {
+      Base::template TraceWithInstances<CategoryTracePointTraits>(
+          instances, std::move(lambda), {CatTraits::GetStaticIndex(category)});
+    }
+  }
+
+  // Records a track descriptor into the track descriptor registry and, if we
+  // are tracing, also mirrors the descriptor into the trace.
+  template <typename TrackType>
+  static void SetTrackDescriptorImpl(
+      const TrackType& track,
+      std::function<void(protos::pbzero::TrackDescriptor*)> callback) {
+    TrackRegistry::Get()->UpdateTrack(track, std::move(callback));
+    Base::template Trace([&](typename Base::TraceContext ctx) {
+      TrackEventInternal::WriteTrackDescriptor(
+          track, ctx.tls_inst_->trace_writer.get());
+    });
+  }
+
+  // Determines if the given dynamic category is enabled, first by checking the
+  // per-trace writer cache or by falling back to computing it based on the
+  // trace config for the given session.
+  static bool IsDynamicCategoryEnabled(
+      typename Base::TraceContext* ctx,
+      const DynamicCategory& dynamic_category) {
+    auto incr_state = ctx->GetIncrementalState();
+    auto it = incr_state->dynamic_categories.find(dynamic_category.name);
+    if (it == incr_state->dynamic_categories.end()) {
+      // We haven't seen this category before. Let's figure out if it's enabled.
+      // This requires grabbing a lock to read the session's trace config.
+      auto ds = ctx->GetDataSourceLocked();
+      Category category{Category::FromDynamicCategory(dynamic_category)};
+      bool enabled = TrackEventInternal::IsCategoryEnabled(
+          *Registry, ds->config_, category);
+      // TODO(skyostil): Cap the size of |dynamic_categories|.
+      incr_state->dynamic_categories[dynamic_category.name] = enabled;
+      return enabled;
+    }
+    return it->second;
+  }
+
+  // Config for the current tracing session.
+  protos::gen::TrackEventConfig config_;
+};
+
+}  // namespace internal
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_TRACING_INTERNAL_TRACK_EVENT_DATA_SOURCE_H_
+// gen_amalgamated begin header: include/perfetto/tracing/internal/track_event_macros.h
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_TRACING_INTERNAL_TRACK_EVENT_MACROS_H_
+#define INCLUDE_PERFETTO_TRACING_INTERNAL_TRACK_EVENT_MACROS_H_
+
+// This file contains underlying macros for the trace point track event
+// implementation. Perfetto API users typically don't need to use anything here
+// directly.
+
+// gen_amalgamated expanded: #include "perfetto/base/compiler.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/internal/track_event_data_source.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/track_event_category_registry.h"
+
+// Ignore GCC warning about a missing argument for a variadic macro parameter.
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC system_header
+#endif
+
+// Defines data structures for backing a category registry.
+//
+// Each category has one enabled/disabled bit per possible data source instance.
+// The bits are packed, i.e., each byte holds the state for instances. To
+// improve cache locality, the bits for each instance are stored separately from
+// the names of the categories:
+//
+//   byte 0                      byte 1
+//   (inst0, inst1, ..., inst7), (inst0, inst1, ..., inst7)
+//
+#define PERFETTO_INTERNAL_DECLARE_CATEGORIES(...)                             \
+  namespace internal {                                                        \
+  constexpr ::perfetto::Category kCategories[] = {__VA_ARGS__};               \
+  constexpr size_t kCategoryCount =                                           \
+      sizeof(kCategories) / sizeof(kCategories[0]);                           \
+  /* The per-instance enable/disable state per category */                    \
+  PERFETTO_COMPONENT_EXPORT extern std::atomic<uint8_t>                       \
+      g_category_state_storage[kCategoryCount];                               \
+  /* The category registry which mediates access to the above structures. */  \
+  /* The registry is used for two purposes: */                                \
+  /**/                                                                        \
+  /*    1) For looking up categories at build (constexpr) time. */            \
+  /*    2) For declaring the per-namespace TrackEvent data source. */         \
+  /**/                                                                        \
+  /* Because usage #1 requires a constexpr type and usage #2 requires an */   \
+  /* extern type (to avoid declaring a type based on a translation-unit */    \
+  /* variable), we need two separate copies of the registry with different */ \
+  /* storage specifiers. */                                                   \
+  /**/                                                                        \
+  /* TODO(skyostil): Unify these using a C++17 inline constexpr variable. */  \
+  constexpr ::perfetto::internal::TrackEventCategoryRegistry                  \
+      kConstExprCategoryRegistry(kCategoryCount,                              \
+                                 &kCategories[0],                             \
+                                 &g_category_state_storage[0]);               \
+  PERFETTO_COMPONENT_EXPORT extern const ::perfetto::internal::               \
+      TrackEventCategoryRegistry kCategoryRegistry;                           \
+  static_assert(kConstExprCategoryRegistry.ValidateCategories(),              \
+                "Invalid category names found");                              \
+  }  // namespace internal
+
+// In a .cc file, declares storage for each category's runtime state.
+#define PERFETTO_INTERNAL_CATEGORY_STORAGE()             \
+  namespace internal {                                   \
+  PERFETTO_COMPONENT_EXPORT std::atomic<uint8_t>         \
+      g_category_state_storage[kCategoryCount];          \
+  PERFETTO_COMPONENT_EXPORT const ::perfetto::internal:: \
+      TrackEventCategoryRegistry kCategoryRegistry(      \
+          kCategoryCount,                                \
+          &kCategories[0],                               \
+          &g_category_state_storage[0]);                 \
+  }  // namespace internal
+
+// Defines the TrackEvent data source for the current track event namespace.
+#define PERFETTO_INTERNAL_DECLARE_TRACK_EVENT_DATA_SOURCE() \
+  struct PERFETTO_COMPONENT_EXPORT TrackEvent               \
+      : public ::perfetto::internal::TrackEventDataSource<  \
+            TrackEvent, &internal::kCategoryRegistry> {}
+
+// At compile time, turns a category name represented by a static string into an
+// index into the current category registry. A build error will be generated if
+// the category hasn't been registered or added to the list of allowed dynamic
+// categories. See PERFETTO_DEFINE_CATEGORIES.
+#define PERFETTO_GET_CATEGORY_INDEX(category)                                  \
+  ::PERFETTO_TRACK_EVENT_NAMESPACE::internal::kConstExprCategoryRegistry.Find( \
+      category,                                                                \
+      ::PERFETTO_TRACK_EVENT_NAMESPACE::internal::IsDynamicCategory(category))
+
+// Generate a unique variable name with a given prefix.
+#define PERFETTO_INTERNAL_CONCAT2(a, b) a##b
+#define PERFETTO_INTERNAL_CONCAT(a, b) PERFETTO_INTERNAL_CONCAT2(a, b)
+#define PERFETTO_UID(prefix) PERFETTO_INTERNAL_CONCAT(prefix, __LINE__)
+
+// Efficiently determines whether tracing is enabled for the given category, and
+// if so, emits one trace event with the given arguments.
+#define PERFETTO_INTERNAL_TRACK_EVENT(category, ...)                           \
+  do {                                                                         \
+    namespace tns = ::PERFETTO_TRACK_EVENT_NAMESPACE;                          \
+    /* Compute the category index outside the lambda to work around a */       \
+    /* GCC 7 bug */                                                            \
+    static constexpr auto PERFETTO_UID(                                        \
+        kCatIndex_ADD_TO_PERFETTO_DEFINE_CATEGORIES_IF_FAILS_) =               \
+        PERFETTO_GET_CATEGORY_INDEX(category);                                 \
+    if (tns::internal::IsDynamicCategory(category)) {                          \
+      tns::TrackEvent::CallIfEnabled(                                          \
+          [&](uint32_t instances) PERFETTO_NO_THREAD_SAFETY_ANALYSIS {         \
+            tns::TrackEvent::TraceForCategory(instances, category,             \
+                                              ##__VA_ARGS__);                  \
+          });                                                                  \
+    } else {                                                                   \
+      tns::TrackEvent::CallIfCategoryEnabled(                                  \
+          PERFETTO_UID(kCatIndex_ADD_TO_PERFETTO_DEFINE_CATEGORIES_IF_FAILS_), \
+          [&](uint32_t instances) PERFETTO_NO_THREAD_SAFETY_ANALYSIS {         \
+            tns::TrackEvent::TraceForCategory(                                 \
+                instances,                                                     \
+                PERFETTO_UID(                                                  \
+                    kCatIndex_ADD_TO_PERFETTO_DEFINE_CATEGORIES_IF_FAILS_),    \
+                ##__VA_ARGS__);                                                \
+          });                                                                  \
+    }                                                                          \
+  } while (false)
+
+#define PERFETTO_INTERNAL_SCOPED_TRACK_EVENT(category, name, ...)             \
+  struct PERFETTO_UID(ScopedEvent) {                                          \
+    struct EventFinalizer {                                                   \
+      /* The parameter is an implementation detail. It allows the          */ \
+      /* anonymous struct to use aggregate initialization to invoke the    */ \
+      /* lambda (which emits the BEGIN event and returns an integer)       */ \
+      /* with the proper reference capture for any                         */ \
+      /* TrackEventArgumentFunction in |__VA_ARGS__|. This is required so  */ \
+      /* that the scoped event is exactly ONE line and can't escape the    */ \
+      /* scope if used in a single line if statement.                      */ \
+      EventFinalizer(...) {}                                                  \
+      ~EventFinalizer() { TRACE_EVENT_END(category); }                        \
+    } finalizer;                                                              \
+  } PERFETTO_UID(scoped_event) {                                              \
+    [&]() {                                                                   \
+      TRACE_EVENT_BEGIN(category, name, ##__VA_ARGS__);                       \
+      return 0;                                                               \
+    }()                                                                       \
+  }
+
+#define PERFETTO_INTERNAL_CATEGORY_ENABLED(category)                         \
+  (::PERFETTO_TRACK_EVENT_NAMESPACE::internal::IsDynamicCategory(category)   \
+       ? ::PERFETTO_TRACK_EVENT_NAMESPACE::TrackEvent::                      \
+             IsDynamicCategoryEnabled(::perfetto::DynamicCategory(category)) \
+       : ::PERFETTO_TRACK_EVENT_NAMESPACE::TrackEvent::IsCategoryEnabled(    \
+             PERFETTO_GET_CATEGORY_INDEX(category)))
+
+#endif  // INCLUDE_PERFETTO_TRACING_INTERNAL_TRACK_EVENT_MACROS_H_
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_TRACING_TRACK_EVENT_H_
+#define INCLUDE_PERFETTO_TRACING_TRACK_EVENT_H_
+
+// gen_amalgamated expanded: #include "perfetto/base/time.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/internal/track_event_data_source.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/internal/track_event_internal.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/internal/track_event_macros.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/string_helpers.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/track.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/track_event_category_registry.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/track_event.pbzero.h"
+
+#include <type_traits>
+
+// This file contains a set of macros designed for instrumenting applications
+// with track event trace points. While the underlying TrackEvent API can also
+// be used directly, doing so efficiently requires some care (e.g., to avoid
+// evaluating arguments while tracing is disabled). These types of optimizations
+// are abstracted away by the macros below.
+//
+// ================
+// Quickstart guide
+// ================
+//
+//   To add track events to your application, first define your categories in,
+//   e.g., my_tracing.h:
+//
+//       PERFETTO_DEFINE_CATEGORIES(
+//           perfetto::Category("base"),
+//           perfetto::Category("v8"),
+//           perfetto::Category("cc"));
+//
+//   Then in a single .cc file, e.g., my_tracing.cc:
+//
+//       #include "my_tracing.h"
+//       PERFETTO_TRACK_EVENT_STATIC_STORAGE();
+//
+//   Finally, register track events at startup, after which you can record
+//   events with the TRACE_EVENT macros:
+//
+//       #include "my_tracing.h"
+//
+//       int main() {
+//         perfetto::TrackEvent::Register();
+//
+//         // A basic track event with just a name.
+//         TRACE_EVENT("category", "MyEvent");
+//
+//         // A track event with (up to two) debug annotations.
+//         TRACE_EVENT("category", "MyEvent", "parameter", 42);
+//
+//         // A track event with a strongly typed parameter.
+//         TRACE_EVENT("category", "MyEvent", [](perfetto::EventContext ctx) {
+//           ctx.event()->set_foo(42);
+//           ctx.event()->set_bar(.5f);
+//         });
+//       }
+//
+//  Note that track events must be nested consistently, i.e., the following is
+//  not allowed:
+//
+//    TRACE_EVENT_BEGIN("a", "bar", ...);
+//    TRACE_EVENT_BEGIN("b", "foo", ...);
+//    TRACE_EVENT_END("a");  // "foo" must be closed before "bar".
+//    TRACE_EVENT_END("b");
+//
+// ====================
+// Implementation notes
+// ====================
+//
+// The track event library consists of the following layers and components. The
+// classes the internal namespace shouldn't be considered part of the public
+// API.
+//                    .--------------------------------.
+//               .----|  TRACE_EVENT                   |----.
+//      write   |     |   - App instrumentation point  |     |  write
+//      event   |     '--------------------------------'     |  arguments
+//              V                                            V
+//  .----------------------------------.    .-----------------------------.
+//  | TrackEvent                       |    | EventContext                |
+//  |  - Registry of event categories  |    |  - One track event instance |
+//  '----------------------------------'    '-----------------------------'
+//              |                                            |
+//              |                                            | look up
+//              | is                                         | interning ids
+//              V                                            V
+//  .----------------------------------.    .-----------------------------.
+//  | internal::TrackEventDataSource   |    | TrackEventInternedDataIndex |
+//  | - Perfetto data source           |    | - Corresponds to a field in |
+//  | - Has TrackEventIncrementalState |    |   in interned_data.proto    |
+//  '----------------------------------'    '-----------------------------'
+//              |                  |                         ^
+//              |                  |       owns (1:many)     |
+//              | write event      '-------------------------'
+//              V
+//  .----------------------------------.
+//  | internal::TrackEventInternal     |
+//  | - Outlined code to serialize     |
+//  |   one track event                |
+//  '----------------------------------'
+//
+
+// Each compilation unit can be in exactly one track event namespace,
+// allowing the overall program to use multiple track event data sources and
+// category lists if necessary. Use this macro to select the namespace for the
+// current compilation unit.
+//
+// If the program uses multiple track event namespaces, category & track event
+// registration (see quickstart above) needs to happen for both namespaces
+// separately.
+#ifndef PERFETTO_TRACK_EVENT_NAMESPACE
+#define PERFETTO_TRACK_EVENT_NAMESPACE perfetto
+#endif
+
+// Deprecated; see perfetto::Category().
+#define PERFETTO_CATEGORY(name) \
+  ::perfetto::Category { #name }
+
+// Internal helpers for determining if a given category is defined at build or
+// runtime.
+namespace PERFETTO_TRACK_EVENT_NAMESPACE {
+namespace internal {
+
+// By default no statically defined categories are dynamic, but this can be
+// overridden with PERFETTO_DEFINE_TEST_CATEGORY_PREFIXES.
+template <typename... T>
+constexpr bool IsDynamicCategory(const char*) {
+  return false;
+}
+
+// Explicitly dynamic categories are always dynamic.
+constexpr bool IsDynamicCategory(const ::perfetto::DynamicCategory&) {
+  return true;
+}
+
+}  // namespace internal
+}  // namespace PERFETTO_TRACK_EVENT_NAMESPACE
+
+// Normally all categories are defined statically at build-time (see
+// PERFETTO_DEFINE_CATEGORIES). However, some categories are only used for
+// testing, and we shouldn't publish them to the tracing service or include them
+// in a production binary. Use this macro to define a list of prefixes for these
+// types of categories. Note that trace points using these categories will be
+// slightly less efficient compared to regular trace points.
+#define PERFETTO_DEFINE_TEST_CATEGORY_PREFIXES(...)                       \
+  namespace PERFETTO_TRACK_EVENT_NAMESPACE {                              \
+  namespace internal {                                                    \
+  template <>                                                             \
+  constexpr bool IsDynamicCategory(const char* name) {                    \
+    return ::perfetto::internal::IsStringInPrefixList(name, __VA_ARGS__); \
+  }                                                                       \
+  } /* namespace internal */                                              \
+  } /* namespace PERFETTO_TRACK_EVENT_NAMESPACE */                        \
+  PERFETTO_INTERNAL_SWALLOW_SEMICOLON()
+
+// Register the set of available categories by passing a list of categories to
+// this macro: PERFETTO_CATEGORY(cat1), PERFETTO_CATEGORY(cat2), ...
+#define PERFETTO_DEFINE_CATEGORIES(...)                        \
+  namespace PERFETTO_TRACK_EVENT_NAMESPACE {                   \
+  /* The list of category names */                             \
+  PERFETTO_INTERNAL_DECLARE_CATEGORIES(__VA_ARGS__)            \
+  /* The track event data source for this set of categories */ \
+  PERFETTO_INTERNAL_DECLARE_TRACK_EVENT_DATA_SOURCE();         \
+  } /* namespace PERFETTO_TRACK_EVENT_NAMESPACE */             \
+  PERFETTO_DECLARE_DATA_SOURCE_STATIC_MEMBERS(                 \
+      PERFETTO_TRACK_EVENT_NAMESPACE::TrackEvent,              \
+      perfetto::internal::TrackEventDataSourceTraits)
+
+// Allocate storage for each category by using this macro once per track event
+// namespace.
+#define PERFETTO_TRACK_EVENT_STATIC_STORAGE()      \
+  namespace PERFETTO_TRACK_EVENT_NAMESPACE {       \
+  PERFETTO_INTERNAL_CATEGORY_STORAGE()             \
+  } /* namespace PERFETTO_TRACK_EVENT_NAMESPACE */ \
+  PERFETTO_DEFINE_DATA_SOURCE_STATIC_MEMBERS(      \
+      PERFETTO_TRACK_EVENT_NAMESPACE::TrackEvent,  \
+      perfetto::internal::TrackEventDataSourceTraits)
+
+// Ignore GCC warning about a missing argument for a variadic macro parameter.
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC system_header
+#endif
+
+// Begin a slice under |category| with the title |name|. Both strings must be
+// static constants. The track event is only recorded if |category| is enabled
+// for a tracing session.
+//
+// The slice is thread-scoped (i.e., written to the default track of the current
+// thread) unless overridden with a custom track object (see Track).
+//
+// |name| must be a string with static lifetime (i.e., the same
+// address must not be used for a different event name in the future). If you
+// want to use a dynamically allocated name, do this:
+//
+//  TRACE_EVENT("category", nullptr, [&](perfetto::EventContext ctx) {
+//    ctx.event()->set_name(dynamic_name);
+//  });
+//
+// The following optional arguments can be passed to `TRACE_EVENT` to add extra
+// information to events:
+//
+// TRACE_EVENT("cat", "name"[, track][, timestamp]
+//                          [, "debug_name1", debug_value1]
+//                          [, "debug_name2", debug_value2]
+//                          ...
+//                          [, "debug_nameN", debug_valueN]
+//                          [, lambda]);
+//
+// Some examples of valid combinations:
+//
+// 1. A lambda for writing custom TrackEvent fields:
+//
+//   TRACE_EVENT("category", "Name", [&](perfetto::EventContext ctx) {
+//     ctx.event()->set_custom_value(...);
+//   });
+//
+// 2. A timestamp and a lambda:
+//
+//   TRACE_EVENT("category", "Name", time_in_nanoseconds,
+//       [&](perfetto::EventContext ctx) {
+//     ctx.event()->set_custom_value(...);
+//   });
+//
+//   |time_in_nanoseconds| should be an uint64_t by default. To support custom
+//   timestamp types,
+//   |perfetto::TraceTimestampTraits<T>::ConvertTimestampToTraceTimeNs|
+//   should be defined. See |ConvertTimestampToTraceTimeNs| for more details.
+//
+// 3. Arbitrary number of debug annotations:
+//
+//   TRACE_EVENT("category", "Name", "arg", value);
+//   TRACE_EVENT("category", "Name", "arg", value, "arg2", value2);
+//   TRACE_EVENT("category", "Name", "arg", value, "arg2", value2,
+//                                   "arg3", value3);
+//
+//   See |TracedValue| for recording custom types as debug annotations.
+//
+// 4. Arbitrary number of debug annotations and a lambda:
+//
+//   TRACE_EVENT("category", "Name", "arg", value,
+//       [&](perfetto::EventContext ctx) {
+//     ctx.event()->set_custom_value(...);
+//   });
+//
+// 5. An overridden track:
+//
+//   TRACE_EVENT("category", "Name", perfetto::Track(1234));
+//
+//   See |Track| for other types of tracks which may be used.
+//
+// 6. A track and a lambda:
+//
+//   TRACE_EVENT("category", "Name", perfetto::Track(1234),
+//       [&](perfetto::EventContext ctx) {
+//     ctx.event()->set_custom_value(...);
+//   });
+//
+// 7. A track and a timestamp:
+//
+//   TRACE_EVENT("category", "Name", perfetto::Track(1234),
+//       time_in_nanoseconds);
+//
+// 8. A track, a timestamp and a lambda:
+//
+//   TRACE_EVENT("category", "Name", perfetto::Track(1234),
+//       time_in_nanoseconds, [&](perfetto::EventContext ctx) {
+//     ctx.event()->set_custom_value(...);
+//   });
+//
+// 9. A track and an arbitrary number of debug annotions:
+//
+//   TRACE_EVENT("category", "Name", perfetto::Track(1234),
+//               "arg", value);
+//   TRACE_EVENT("category", "Name", perfetto::Track(1234),
+//               "arg", value, "arg2", value2);
+//
+#define TRACE_EVENT_BEGIN(category, name, ...)               \
+  PERFETTO_INTERNAL_TRACK_EVENT(                             \
+      category, ::perfetto::internal::GetStaticString(name), \
+      ::perfetto::protos::pbzero::TrackEvent::TYPE_SLICE_BEGIN, ##__VA_ARGS__)
+
+// End a slice under |category|.
+#define TRACE_EVENT_END(category, ...) \
+  PERFETTO_INTERNAL_TRACK_EVENT(       \
+      category, /*name=*/nullptr,      \
+      ::perfetto::protos::pbzero::TrackEvent::TYPE_SLICE_END, ##__VA_ARGS__)
+
+// Begin a slice which gets automatically closed when going out of scope.
+#define TRACE_EVENT(category, name, ...) \
+  PERFETTO_INTERNAL_SCOPED_TRACK_EVENT(category, name, ##__VA_ARGS__)
+
+// Emit a slice which has zero duration.
+#define TRACE_EVENT_INSTANT(category, name, ...)             \
+  PERFETTO_INTERNAL_TRACK_EVENT(                             \
+      category, ::perfetto::internal::GetStaticString(name), \
+      ::perfetto::protos::pbzero::TrackEvent::TYPE_INSTANT, ##__VA_ARGS__)
+
+// Efficiently determine if the given static or dynamic trace category or
+// category group is enabled for tracing.
+#define TRACE_EVENT_CATEGORY_ENABLED(category) \
+  PERFETTO_INTERNAL_CATEGORY_ENABLED(category)
+
+// Time-varying numeric data can be recorded with the TRACE_COUNTER macro:
+//
+//   TRACE_COUNTER("cat", counter_track[, timestamp], value);
+//
+// For example, to record a single value for a counter called "MyCounter":
+//
+//   TRACE_COUNTER("category", "MyCounter", 1234.5);
+//
+// This data is displayed as a counter track in the Perfetto UI.
+//
+// Both integer and floating point counter values are supported. Counters can
+// also be annotated with additional information such as units, for example, for
+// tracking the rendering framerate in terms of frames per second or "fps":
+//
+//   TRACE_COUNTER("category", perfetto::CounterTrack("Framerate", "fps"), 120);
+//
+// As another example, a memory counter that records bytes but accepts samples
+// as kilobytes (to reduce trace binary size) can be defined like this:
+//
+//   perfetto::CounterTrack memory_track = perfetto::CounterTrack("Memory")
+//       .set_unit("bytes")
+//       .set_multiplier(1024);
+//   TRACE_COUNTER("category", memory_track, 4 /* = 4096 bytes */);
+//
+// See /protos/perfetto/trace/track_event/counter_descriptor.proto
+// for the full set of attributes for a counter track.
+//
+// To record a counter value at a specific point in time (instead of the current
+// time), you can pass in a custom timestamp:
+//
+//   // First record the current time and counter value.
+//   uint64_t timestamp = perfetto::TrackEvent::GetTraceTimeNs();
+//   int64_t value = 1234;
+//
+//   // Later, emit a sample at that point in time.
+//   TRACE_COUNTER("category", "MyCounter", timestamp, value);
+//
+#define TRACE_COUNTER(category, track, ...)                 \
+  PERFETTO_INTERNAL_TRACK_EVENT(                            \
+      category, /*name=*/nullptr,                           \
+      ::perfetto::protos::pbzero::TrackEvent::TYPE_COUNTER, \
+      ::perfetto::CounterTrack(track), ##__VA_ARGS__)
+
+// TODO(skyostil): Add flow events.
+
+#endif  // INCLUDE_PERFETTO_TRACING_TRACK_EVENT_H_
+// gen_amalgamated begin header: include/perfetto/tracing/track_event_interned_data_index.h
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_TRACING_TRACK_EVENT_INTERNED_DATA_INDEX_H_
+#define INCLUDE_PERFETTO_TRACING_TRACK_EVENT_INTERNED_DATA_INDEX_H_
+
+// gen_amalgamated expanded: #include "perfetto/tracing/internal/track_event_internal.h"
+
+// gen_amalgamated expanded: #include "perfetto/base/compiler.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/event_context.h"
+
+#include <map>
+#include <type_traits>
+#include <unordered_map>
+
+// This file has templates for defining your own interned data types to be used
+// with track event. Interned data can be useful for avoiding repeating the same
+// constant data (e.g., strings) throughout the trace.
+//
+// =============
+// Example usage
+// =============
+//
+// First define an interning index for your type. It should map to a specific
+// field of interned_data.proto and define how the interned data is written into
+// that message.
+//
+//   struct MyInternedData
+//       : public perfetto::TrackEventInternedDataIndex<
+//           MyInternedData,
+//           perfetto::protos::pbzero::InternedData::kMyInternedDataFieldNumber,
+//           const char*> {
+//     static void Add(perfetto::protos::pbzero::InternedData* interned_data,
+//                      size_t iid,
+//                      const char* value) {
+//       auto my_data = interned_data->add_my_interned_data();
+//       my_data->set_iid(iid);
+//       my_data->set_value(value);
+//     }
+//   };
+//
+// Next, use your interned data in a trace point as shown below. The interned
+// string will only be emitted the first time the trace point is hit.
+//
+//   TRACE_EVENT_BEGIN(
+//      "category", "Event", [&](perfetto::EventContext ctx) {
+//        auto my_message = ctx.event()->set_my_message();
+//        size_t iid = MyInternedData::Get(&ctx, "Some data");
+//        my_message->set_iid(iid);
+//      });
+//
+
+namespace perfetto {
+
+// By default, the interning index stores a full copy of the interned data. This
+// ensures the same data is always mapped to the same interning id, and there is
+// no danger of collisions. This comes at the cost of memory usage, however, so
+// consider using HashedInternedDataTraits if that may be an issue.
+//
+// This type of index also performs hashing on the stored data for lookups; for
+// types where this isn't necessary (e.g., raw const char*), use
+// SmallInternedDataTraits.
+struct BigInternedDataTraits {
+  template <typename ValueType>
+  class Index {
+   public:
+    bool LookUpOrInsert(size_t* iid, const ValueType& value) {
+      size_t next_id = data_.size() + 1;
+      auto it_and_inserted = data_.insert(std::make_pair(value, next_id));
+      if (!it_and_inserted.second) {
+        *iid = it_and_inserted.first->second;
+        return true;
+      }
+      *iid = next_id;
+      return false;
+    }
+
+   private:
+    std::unordered_map<ValueType, size_t> data_;
+  };
+};
+
+// This type of interning index keeps full copies of interned data without
+// hashing the values. This is a good fit for small types that can be directly
+// used as index keys.
+struct SmallInternedDataTraits {
+  template <typename ValueType>
+  class Index {
+   public:
+    bool LookUpOrInsert(size_t* iid, const ValueType& value) {
+      size_t next_id = data_.size() + 1;
+      auto it_and_inserted = data_.insert(std::make_pair(value, next_id));
+      if (!it_and_inserted.second) {
+        *iid = it_and_inserted.first->second;
+        return true;
+      }
+      *iid = next_id;
+      return false;
+    }
+
+   private:
+    std::map<ValueType, size_t> data_;
+  };
+};
+
+// This type of interning index only stores the hash of the interned values
+// instead of the values themselves. This is more efficient in terms of memory
+// usage, but assumes that there are no hash collisions. If a hash collision
+// occurs, two or more values will be mapped to the same interning id.
+//
+// Note that the given type must have a specialization for std::hash.
+struct HashedInternedDataTraits {
+  template <typename ValueType>
+  class Index {
+   public:
+    bool LookUpOrInsert(size_t* iid, const ValueType& value) {
+      auto key = std::hash<ValueType>()(value);
+      size_t next_id = data_.size() + 1;
+      auto it_and_inserted = data_.insert(std::make_pair(key, next_id));
+      if (!it_and_inserted.second) {
+        *iid = it_and_inserted.first->second;
+        return true;
+      }
+      *iid = next_id;
+      return false;
+    }
+
+   private:
+    std::map<size_t, size_t> data_;
+  };
+};
+
+// A templated base class for an interned data type which corresponds to a field
+// in interned_data.proto.
+//
+// |InternedDataType| must be the type of the subclass.
+// |FieldNumber| is the corresponding protobuf field in InternedData.
+// |ValueType| is the type which is stored in the index. It must be copyable.
+// |Traits| can be used to customize the storage and lookup mechanism.
+//
+// The subclass should define a static method with the following signature for
+// committing interned data together with the interning id |iid| into the trace:
+//
+//   static void Add(perfetto::protos::pbzero::InternedData*,
+//                   size_t iid,
+//                   const ValueType& value);
+//
+template <typename InternedDataType,
+          size_t FieldNumber,
+          typename ValueType,
+          // Avoid unnecessary hashing for pointers by default.
+          typename Traits =
+              typename std::conditional<(std::is_pointer<ValueType>::value),
+                                        SmallInternedDataTraits,
+                                        BigInternedDataTraits>::type>
+class TrackEventInternedDataIndex
+    : public internal::BaseTrackEventInternedDataIndex {
+ public:
+  // Return an interning id for |value|. The returned id can be immediately
+  // written to the trace. The optional |add_args| are passed to the Add()
+  // function.
+  template <typename... Args>
+  static size_t Get(EventContext* ctx,
+                    const ValueType& value,
+                    Args&&... add_args) {
+    // First check if the value exists in the dictionary.
+    auto index_for_field = GetOrCreateIndexForField(ctx->incremental_state_);
+    size_t iid;
+    if (PERFETTO_LIKELY(index_for_field->index_.LookUpOrInsert(&iid, value))) {
+      PERFETTO_DCHECK(iid);
+      return iid;
+    }
+
+    // If not, we need to serialize the definition of the interned value into
+    // the heap buffered message (which is committed to the trace when the
+    // packet ends).
+    PERFETTO_DCHECK(iid);
+    InternedDataType::Add(
+        ctx->incremental_state_->serialized_interned_data.get(), iid,
+        std::move(value), std::forward<Args>(add_args)...);
+    return iid;
+  }
+
+ private:
+  static InternedDataType* GetOrCreateIndexForField(
+      internal::TrackEventIncrementalState* incremental_state) {
+    // Fast path: look for matching field number.
+    for (const auto& entry : incremental_state->interned_data_indices) {
+      if (entry.first == FieldNumber) {
+#if PERFETTO_DCHECK_IS_ON()
+        if (strcmp(PERFETTO_DEBUG_FUNCTION_IDENTIFIER(),
+                   entry.second->type_id_)) {
+          PERFETTO_FATAL(
+              "Interned data accessed under different types! Previous type: "
+              "%s. New type: %s.",
+              entry.second->type_id_, PERFETTO_DEBUG_FUNCTION_IDENTIFIER());
+        }
+        // If an interned data index is defined in an anonymous namespace, we
+        // can end up with multiple copies of it in the same program. Because
+        // they will all share a memory address through TLS, this can lead to
+        // subtle data corruption if all the copies aren't exactly identical.
+        // Try to detect this by checking if the Add() function address remains
+        // constant.
+        if (reinterpret_cast<void*>(&InternedDataType::Add) !=
+            entry.second->add_function_ptr_) {
+          PERFETTO_FATAL(
+              "Inconsistent interned data index. Maybe the index was defined "
+              "in an anonymous namespace in a header or copied to multiple "
+              "files? Duplicate index definitions can lead to memory "
+              "corruption! Type id: %s",
+              entry.second->type_id_);
+        }
+#endif  // PERFETTO_DCHECK_IS_ON()
+        return reinterpret_cast<InternedDataType*>(entry.second.get());
+      }
+    }
+    // No match -- add a new entry for this field.
+    for (auto& entry : incremental_state->interned_data_indices) {
+      if (!entry.first) {
+        entry.first = FieldNumber;
+        entry.second.reset(new InternedDataType());
+#if PERFETTO_DCHECK_IS_ON()
+        entry.second->type_id_ = PERFETTO_DEBUG_FUNCTION_IDENTIFIER();
+        entry.second->add_function_ptr_ =
+            reinterpret_cast<void*>(&InternedDataType::Add);
+#endif  // PERFETTO_DCHECK_IS_ON()
+        return reinterpret_cast<InternedDataType*>(entry.second.get());
+      }
+    }
+    // Out of space in the interned data index table.
+    PERFETTO_CHECK(false);
+  }
+
+  // The actual interning dictionary for this type of interned data. The actual
+  // container type is defined by |Traits|, hence the extra layer of template
+  // indirection here.
+  typename Traits::template Index<ValueType> index_;
+};
+
+}  // namespace perfetto
+
+#endif  // INCLUDE_PERFETTO_TRACING_TRACK_EVENT_INTERNED_DATA_INDEX_H_
+// gen_amalgamated begin header: include/perfetto/tracing/track_event_legacy.h
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_TRACING_TRACK_EVENT_LEGACY_H_
+#define INCLUDE_PERFETTO_TRACING_TRACK_EVENT_LEGACY_H_
+
+// This file defines a compatibility shim between legacy (Chrome, V8) trace
+// event macros and track events. To avoid accidentally introducing legacy
+// events in new code, the PERFETTO_ENABLE_LEGACY_TRACE_EVENTS macro must be set
+// to 1 activate the compatibility layer.
+
+// gen_amalgamated expanded: #include "perfetto/base/compiler.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/track_event.h"
+
+#include <stdint.h>
+
+#ifndef PERFETTO_ENABLE_LEGACY_TRACE_EVENTS
+#define PERFETTO_ENABLE_LEGACY_TRACE_EVENTS 0
+#endif
+
+// Ignore GCC warning about a missing argument for a variadic macro parameter.
+#if defined(__GNUC__) || defined(__clang__)
+#pragma GCC system_header
+#endif
+
+// ----------------------------------------------------------------------------
+// Constants.
+// ----------------------------------------------------------------------------
+
+namespace perfetto {
+namespace legacy {
+
+enum TraceEventFlag {
+  kTraceEventFlagNone = 0,
+  kTraceEventFlagCopy = 1u << 0,
+  kTraceEventFlagHasId = 1u << 1,
+  kTraceEventFlagScopeOffset = 1u << 2,
+  kTraceEventFlagScopeExtra = 1u << 3,
+  kTraceEventFlagExplicitTimestamp = 1u << 4,
+  kTraceEventFlagAsyncTTS = 1u << 5,
+  kTraceEventFlagBindToEnclosing = 1u << 6,
+  kTraceEventFlagFlowIn = 1u << 7,
+  kTraceEventFlagFlowOut = 1u << 8,
+  kTraceEventFlagHasContextId = 1u << 9,
+  kTraceEventFlagHasProcessId = 1u << 10,
+  kTraceEventFlagHasLocalId = 1u << 11,
+  kTraceEventFlagHasGlobalId = 1u << 12,
+  // TODO(eseckler): Remove once we have native support for typed proto events
+  // in TRACE_EVENT macros.
+  kTraceEventFlagTypedProtoArgs = 1u << 15,
+  kTraceEventFlagJavaStringLiterals = 1u << 16,
+};
+
+enum PerfettoLegacyCurrentThreadId { kCurrentThreadId };
+
+}  // namespace legacy
+}  // namespace perfetto
+
+#if PERFETTO_ENABLE_LEGACY_TRACE_EVENTS
+// The following constants are defined in the global namespace, since they were
+// originally implemented as macros.
+
+// Event phases.
+static constexpr char TRACE_EVENT_PHASE_BEGIN = 'B';
+static constexpr char TRACE_EVENT_PHASE_END = 'E';
+static constexpr char TRACE_EVENT_PHASE_COMPLETE = 'X';
+static constexpr char TRACE_EVENT_PHASE_INSTANT = 'I';
+static constexpr char TRACE_EVENT_PHASE_ASYNC_BEGIN = 'S';
+static constexpr char TRACE_EVENT_PHASE_ASYNC_STEP_INTO = 'T';
+static constexpr char TRACE_EVENT_PHASE_ASYNC_STEP_PAST = 'p';
+static constexpr char TRACE_EVENT_PHASE_ASYNC_END = 'F';
+static constexpr char TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN = 'b';
+static constexpr char TRACE_EVENT_PHASE_NESTABLE_ASYNC_END = 'e';
+static constexpr char TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT = 'n';
+static constexpr char TRACE_EVENT_PHASE_FLOW_BEGIN = 's';
+static constexpr char TRACE_EVENT_PHASE_FLOW_STEP = 't';
+static constexpr char TRACE_EVENT_PHASE_FLOW_END = 'f';
+static constexpr char TRACE_EVENT_PHASE_METADATA = 'M';
+static constexpr char TRACE_EVENT_PHASE_COUNTER = 'C';
+static constexpr char TRACE_EVENT_PHASE_SAMPLE = 'P';
+static constexpr char TRACE_EVENT_PHASE_CREATE_OBJECT = 'N';
+static constexpr char TRACE_EVENT_PHASE_SNAPSHOT_OBJECT = 'O';
+static constexpr char TRACE_EVENT_PHASE_DELETE_OBJECT = 'D';
+static constexpr char TRACE_EVENT_PHASE_MEMORY_DUMP = 'v';
+static constexpr char TRACE_EVENT_PHASE_MARK = 'R';
+static constexpr char TRACE_EVENT_PHASE_CLOCK_SYNC = 'c';
+static constexpr char TRACE_EVENT_PHASE_ENTER_CONTEXT = '(';
+static constexpr char TRACE_EVENT_PHASE_LEAVE_CONTEXT = ')';
+
+// Flags for changing the behavior of TRACE_EVENT_API_ADD_TRACE_EVENT.
+static constexpr uint32_t TRACE_EVENT_FLAG_NONE =
+    perfetto::legacy::kTraceEventFlagNone;
+static constexpr uint32_t TRACE_EVENT_FLAG_COPY =
+    perfetto::legacy::kTraceEventFlagCopy;
+static constexpr uint32_t TRACE_EVENT_FLAG_HAS_ID =
+    perfetto::legacy::kTraceEventFlagHasId;
+static constexpr uint32_t TRACE_EVENT_FLAG_SCOPE_OFFSET =
+    perfetto::legacy::kTraceEventFlagScopeOffset;
+static constexpr uint32_t TRACE_EVENT_FLAG_SCOPE_EXTRA =
+    perfetto::legacy::kTraceEventFlagScopeExtra;
+static constexpr uint32_t TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP =
+    perfetto::legacy::kTraceEventFlagExplicitTimestamp;
+static constexpr uint32_t TRACE_EVENT_FLAG_ASYNC_TTS =
+    perfetto::legacy::kTraceEventFlagAsyncTTS;
+static constexpr uint32_t TRACE_EVENT_FLAG_BIND_TO_ENCLOSING =
+    perfetto::legacy::kTraceEventFlagBindToEnclosing;
+static constexpr uint32_t TRACE_EVENT_FLAG_FLOW_IN =
+    perfetto::legacy::kTraceEventFlagFlowIn;
+static constexpr uint32_t TRACE_EVENT_FLAG_FLOW_OUT =
+    perfetto::legacy::kTraceEventFlagFlowOut;
+static constexpr uint32_t TRACE_EVENT_FLAG_HAS_CONTEXT_ID =
+    perfetto::legacy::kTraceEventFlagHasContextId;
+static constexpr uint32_t TRACE_EVENT_FLAG_HAS_PROCESS_ID =
+    perfetto::legacy::kTraceEventFlagHasProcessId;
+static constexpr uint32_t TRACE_EVENT_FLAG_HAS_LOCAL_ID =
+    perfetto::legacy::kTraceEventFlagHasLocalId;
+static constexpr uint32_t TRACE_EVENT_FLAG_HAS_GLOBAL_ID =
+    perfetto::legacy::kTraceEventFlagHasGlobalId;
+static constexpr uint32_t TRACE_EVENT_FLAG_TYPED_PROTO_ARGS =
+    perfetto::legacy::kTraceEventFlagTypedProtoArgs;
+static constexpr uint32_t TRACE_EVENT_FLAG_JAVA_STRING_LITERALS =
+    perfetto::legacy::kTraceEventFlagJavaStringLiterals;
+
+static constexpr uint32_t TRACE_EVENT_FLAG_SCOPE_MASK =
+    TRACE_EVENT_FLAG_SCOPE_OFFSET | TRACE_EVENT_FLAG_SCOPE_EXTRA;
+
+// Type values for identifying types in the TraceValue union.
+static constexpr uint8_t TRACE_VALUE_TYPE_BOOL = 1;
+static constexpr uint8_t TRACE_VALUE_TYPE_UINT = 2;
+static constexpr uint8_t TRACE_VALUE_TYPE_INT = 3;
+static constexpr uint8_t TRACE_VALUE_TYPE_DOUBLE = 4;
+static constexpr uint8_t TRACE_VALUE_TYPE_POINTER = 5;
+static constexpr uint8_t TRACE_VALUE_TYPE_STRING = 6;
+static constexpr uint8_t TRACE_VALUE_TYPE_COPY_STRING = 7;
+static constexpr uint8_t TRACE_VALUE_TYPE_CONVERTABLE = 8;
+static constexpr uint8_t TRACE_VALUE_TYPE_PROTO = 9;
+
+// Enum reflecting the scope of an INSTANT event. Must fit within
+// TRACE_EVENT_FLAG_SCOPE_MASK.
+static constexpr uint8_t TRACE_EVENT_SCOPE_GLOBAL = 0u << 2;
+static constexpr uint8_t TRACE_EVENT_SCOPE_PROCESS = 1u << 2;
+static constexpr uint8_t TRACE_EVENT_SCOPE_THREAD = 2u << 2;
+
+static constexpr char TRACE_EVENT_SCOPE_NAME_GLOBAL = 'g';
+static constexpr char TRACE_EVENT_SCOPE_NAME_PROCESS = 'p';
+static constexpr char TRACE_EVENT_SCOPE_NAME_THREAD = 't';
+
+static constexpr auto TRACE_EVENT_API_CURRENT_THREAD_ID =
+    perfetto::legacy::kCurrentThreadId;
+
+#endif  // PERFETTO_ENABLE_LEGACY_TRACE_EVENTS
+
+// ----------------------------------------------------------------------------
+// Internal legacy trace point implementation.
+// ----------------------------------------------------------------------------
+
+namespace perfetto {
+namespace legacy {
+
+// The following user-provided adaptors are used to serialize user-defined
+// thread id and time types into track events. For full compatibility, the user
+// should also define the following macros appropriately:
+//
+//   #define TRACE_TIME_TICKS_NOW() ...
+//   #define TRACE_TIME_NOW() ...
+
+// User-provided function to convert an abstract thread id into a thread track.
+template <typename T>
+ThreadTrack ConvertThreadId(const T&);
+
+// Built-in implementation for events referring to the current thread.
+template <>
+ThreadTrack PERFETTO_EXPORT
+ConvertThreadId(const PerfettoLegacyCurrentThreadId&);
+
+}  // namespace legacy
+
+namespace internal {
+
+// LegacyTraceId encapsulates an ID that can either be an integer or pointer.
+class PERFETTO_EXPORT LegacyTraceId {
+ public:
+  // Can be combined with WithScope.
+  class LocalId {
+   public:
+    explicit LocalId(const void* raw_id)
+        : raw_id_(static_cast<uint64_t>(reinterpret_cast<uintptr_t>(raw_id))) {}
+    explicit LocalId(uint64_t raw_id) : raw_id_(raw_id) {}
+    uint64_t raw_id() const { return raw_id_; }
+
+   private:
+    uint64_t raw_id_;
+  };
+
+  // Can be combined with WithScope.
+  class GlobalId {
+   public:
+    explicit GlobalId(uint64_t raw_id) : raw_id_(raw_id) {}
+    uint64_t raw_id() const { return raw_id_; }
+
+   private:
+    uint64_t raw_id_;
+  };
+
+  class WithScope {
+   public:
+    WithScope(const char* scope, uint64_t raw_id)
+        : scope_(scope), raw_id_(raw_id) {}
+    WithScope(const char* scope, LocalId local_id)
+        : scope_(scope), raw_id_(local_id.raw_id()) {
+      id_flags_ = legacy::kTraceEventFlagHasLocalId;
+    }
+    WithScope(const char* scope, GlobalId global_id)
+        : scope_(scope), raw_id_(global_id.raw_id()) {
+      id_flags_ = legacy::kTraceEventFlagHasGlobalId;
+    }
+    WithScope(const char* scope, uint64_t prefix, uint64_t raw_id)
+        : scope_(scope), has_prefix_(true), prefix_(prefix), raw_id_(raw_id) {}
+    WithScope(const char* scope, uint64_t prefix, GlobalId global_id)
+        : scope_(scope),
+          has_prefix_(true),
+          prefix_(prefix),
+          raw_id_(global_id.raw_id()) {
+      id_flags_ = legacy::kTraceEventFlagHasGlobalId;
+    }
+    uint64_t raw_id() const { return raw_id_; }
+    const char* scope() const { return scope_; }
+    bool has_prefix() const { return has_prefix_; }
+    uint64_t prefix() const { return prefix_; }
+    uint32_t id_flags() const { return id_flags_; }
+
+   private:
+    const char* scope_ = nullptr;
+    bool has_prefix_ = false;
+    uint64_t prefix_;
+    uint64_t raw_id_;
+    uint32_t id_flags_ = legacy::kTraceEventFlagHasId;
+  };
+
+  explicit LegacyTraceId(const void* raw_id)
+      : raw_id_(static_cast<uint64_t>(reinterpret_cast<uintptr_t>(raw_id))) {
+    id_flags_ = legacy::kTraceEventFlagHasLocalId;
+  }
+  explicit LegacyTraceId(uint64_t raw_id) : raw_id_(raw_id) {}
+  explicit LegacyTraceId(uint32_t raw_id) : raw_id_(raw_id) {}
+  explicit LegacyTraceId(uint16_t raw_id) : raw_id_(raw_id) {}
+  explicit LegacyTraceId(uint8_t raw_id) : raw_id_(raw_id) {}
+  explicit LegacyTraceId(int64_t raw_id)
+      : raw_id_(static_cast<uint64_t>(raw_id)) {}
+  explicit LegacyTraceId(int32_t raw_id)
+      : raw_id_(static_cast<uint64_t>(raw_id)) {}
+  explicit LegacyTraceId(int16_t raw_id)
+      : raw_id_(static_cast<uint64_t>(raw_id)) {}
+  explicit LegacyTraceId(int8_t raw_id)
+      : raw_id_(static_cast<uint64_t>(raw_id)) {}
+  explicit LegacyTraceId(LocalId raw_id) : raw_id_(raw_id.raw_id()) {
+    id_flags_ = legacy::kTraceEventFlagHasLocalId;
+  }
+  explicit LegacyTraceId(GlobalId raw_id) : raw_id_(raw_id.raw_id()) {
+    id_flags_ = legacy::kTraceEventFlagHasGlobalId;
+  }
+  explicit LegacyTraceId(WithScope scoped_id)
+      : scope_(scoped_id.scope()),
+        has_prefix_(scoped_id.has_prefix()),
+        prefix_(scoped_id.prefix()),
+        raw_id_(scoped_id.raw_id()),
+        id_flags_(scoped_id.id_flags()) {}
+
+  uint64_t raw_id() const { return raw_id_; }
+  const char* scope() const { return scope_; }
+  bool has_prefix() const { return has_prefix_; }
+  uint64_t prefix() const { return prefix_; }
+  uint32_t id_flags() const { return id_flags_; }
+
+  void Write(protos::pbzero::TrackEvent::LegacyEvent*,
+             uint32_t event_flags) const;
+
+ private:
+  const char* scope_ = nullptr;
+  bool has_prefix_ = false;
+  uint64_t prefix_;
+  uint64_t raw_id_;
+  uint32_t id_flags_ = legacy::kTraceEventFlagHasId;
+};
+
+}  // namespace internal
+}  // namespace perfetto
+
+#if PERFETTO_ENABLE_LEGACY_TRACE_EVENTS
+
+namespace perfetto {
+namespace internal {
+
+class PERFETTO_EXPORT TrackEventLegacy {
+ public:
+  static constexpr protos::pbzero::TrackEvent::Type PhaseToType(char phase) {
+    // clang-format off
+    return (phase == TRACE_EVENT_PHASE_BEGIN) ?
+               protos::pbzero::TrackEvent::TYPE_SLICE_BEGIN :
+           (phase == TRACE_EVENT_PHASE_END) ?
+               protos::pbzero::TrackEvent::TYPE_SLICE_END :
+           (phase == TRACE_EVENT_PHASE_INSTANT) ?
+               protos::pbzero::TrackEvent::TYPE_INSTANT :
+           protos::pbzero::TrackEvent::TYPE_UNSPECIFIED;
+    // clang-format on
+  }
+
+  // Reduce binary size overhead by outlining most of the code for writing a
+  // legacy trace event.
+  template <typename... Args>
+  static void WriteLegacyEvent(EventContext ctx,
+                               char phase,
+                               uint32_t flags,
+                               Args&&... args) PERFETTO_NO_INLINE {
+    AddDebugAnnotations(&ctx, std::forward<Args>(args)...);
+    if (NeedLegacyFlags(phase, flags)) {
+      auto legacy_event = ctx.event()->set_legacy_event();
+      SetLegacyFlags(legacy_event, phase, flags);
+    }
+  }
+
+  template <typename ThreadIdType, typename... Args>
+  static void WriteLegacyEventWithIdAndTid(EventContext ctx,
+                                           char phase,
+                                           uint32_t flags,
+                                           const LegacyTraceId& id,
+                                           const ThreadIdType& thread_id,
+                                           Args&&... args) PERFETTO_NO_INLINE {
+    //
+    // Overrides to consider:
+    //
+    // 1. If we have an id, we need to write {unscoped,local,global}_id and/or
+    //    bind_id.
+    // 2. If we have a thread id, we need to write track_uuid() or
+    //    {pid,tid}_override if the id represents another process.  The
+    //    conversion from |thread_id| happens in embedder code since the type is
+    //    embedder-specified.
+    // 3. If we have a timestamp, we need to write a different timestamp in the
+    //    trace packet itself and make sure TrackEvent won't write one
+    //    internally. This is already done at the call site.
+    //
+    flags |= id.id_flags();
+    AddDebugAnnotations(&ctx, std::forward<Args>(args)...);
+    if (NeedLegacyFlags(phase, flags)) {
+      auto legacy_event = ctx.event()->set_legacy_event();
+      SetLegacyFlags(legacy_event, phase, flags);
+      if (id.id_flags())
+        id.Write(legacy_event, flags);
+      if (flags & TRACE_EVENT_FLAG_HAS_PROCESS_ID) {
+        // The thread identifier actually represents a process id. Let's set an
+        // override for it.
+        int32_t pid_override =
+            static_cast<int32_t>(legacy::ConvertThreadId(thread_id).tid);
+        legacy_event->set_pid_override(pid_override);
+        legacy_event->set_tid_override(-1);
+      }
+    }
+  }
+
+  // No arguments.
+  static void AddDebugAnnotations(EventContext*) {}
+
+  // One argument.
+  template <typename ArgType>
+  static void AddDebugAnnotations(EventContext* ctx,
+                                  const char* arg_name,
+                                  ArgType&& arg_value) {
+    TrackEventInternal::AddDebugAnnotation(ctx, arg_name, arg_value);
+  }
+
+  // Two arguments.
+  template <typename ArgType, typename ArgType2>
+  static void AddDebugAnnotations(EventContext* ctx,
+                                  const char* arg_name,
+                                  ArgType&& arg_value,
+                                  const char* arg_name2,
+                                  ArgType2&& arg_value2) {
+    TrackEventInternal::AddDebugAnnotation(ctx, arg_name, arg_value);
+    TrackEventInternal::AddDebugAnnotation(ctx, arg_name2, arg_value2);
+  }
+
+ private:
+  static bool NeedLegacyFlags(char phase, uint32_t flags) {
+    if (PhaseToType(phase) == protos::pbzero::TrackEvent::TYPE_UNSPECIFIED)
+      return true;
+    // TODO(skyostil): Implement/deprecate:
+    // - TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP
+    // - TRACE_EVENT_FLAG_HAS_CONTEXT_ID
+    // - TRACE_EVENT_FLAG_TYPED_PROTO_ARGS
+    // - TRACE_EVENT_FLAG_JAVA_STRING_LITERALS
+    return flags &
+           (TRACE_EVENT_FLAG_HAS_ID | TRACE_EVENT_FLAG_HAS_LOCAL_ID |
+            TRACE_EVENT_FLAG_HAS_GLOBAL_ID | TRACE_EVENT_FLAG_ASYNC_TTS |
+            TRACE_EVENT_FLAG_BIND_TO_ENCLOSING | TRACE_EVENT_FLAG_FLOW_IN |
+            TRACE_EVENT_FLAG_FLOW_OUT | TRACE_EVENT_FLAG_HAS_PROCESS_ID);
+  }
+
+  static void SetLegacyFlags(
+      protos::pbzero::TrackEvent::LegacyEvent* legacy_event,
+      char phase,
+      uint32_t flags) {
+    if (PhaseToType(phase) == protos::pbzero::TrackEvent::TYPE_UNSPECIFIED)
+      legacy_event->set_phase(phase);
+    if (flags & TRACE_EVENT_FLAG_ASYNC_TTS)
+      legacy_event->set_use_async_tts(true);
+    if (flags & TRACE_EVENT_FLAG_BIND_TO_ENCLOSING)
+      legacy_event->set_bind_to_enclosing(true);
+
+    const auto kFlowIn = TRACE_EVENT_FLAG_FLOW_IN;
+    const auto kFlowOut = TRACE_EVENT_FLAG_FLOW_OUT;
+    const auto kFlowInOut = kFlowIn | kFlowOut;
+    if ((flags & kFlowInOut) == kFlowInOut) {
+      legacy_event->set_flow_direction(
+          protos::pbzero::TrackEvent::LegacyEvent::FLOW_INOUT);
+    } else if (flags & kFlowIn) {
+      legacy_event->set_flow_direction(
+          protos::pbzero::TrackEvent::LegacyEvent::FLOW_IN);
+    } else if (flags & kFlowOut) {
+      legacy_event->set_flow_direction(
+          protos::pbzero::TrackEvent::LegacyEvent::FLOW_OUT);
+    }
+  }
+};
+
+}  // namespace internal
+}  // namespace perfetto
+
+// Implementations for the INTERNAL_* adapter macros used by the trace points
+// below.
+#define PERFETTO_INTERNAL_LEGACY_EVENT_ON_TRACK(phase, category, name, track, \
+                                                ...)                          \
+  PERFETTO_INTERNAL_TRACK_EVENT(                                              \
+      category,                                                               \
+      ::perfetto::internal::GetStaticString(::perfetto::StaticString{name}),  \
+      ::perfetto::internal::TrackEventLegacy::PhaseToType(phase), track,      \
+      ##__VA_ARGS__);
+
+// The main entrypoint for writing unscoped legacy events.  This macro
+// determines the right track to write the event on based on |flags| and
+// |thread_id|.
+#define PERFETTO_INTERNAL_LEGACY_EVENT(phase, category, name, flags,         \
+                                       thread_id, ...)                       \
+  [&]() {                                                                    \
+    constexpr auto& kDefaultTrack =                                          \
+        ::perfetto::internal::TrackEventInternal::kDefaultTrack;             \
+    /* First check the scope for instant events. */                          \
+    if ((phase) == TRACE_EVENT_PHASE_INSTANT) {                              \
+      /* Note: Avoids the need to set LegacyEvent::instant_event_scope. */   \
+      auto scope = (flags)&TRACE_EVENT_FLAG_SCOPE_MASK;                      \
+      switch (scope) {                                                       \
+        case TRACE_EVENT_SCOPE_GLOBAL:                                       \
+          PERFETTO_INTERNAL_LEGACY_EVENT_ON_TRACK(                           \
+              phase, category, name, ::perfetto::Track::Global(0),           \
+              ##__VA_ARGS__);                                                \
+          return;                                                            \
+        case TRACE_EVENT_SCOPE_PROCESS:                                      \
+          PERFETTO_INTERNAL_LEGACY_EVENT_ON_TRACK(                           \
+              phase, category, name, ::perfetto::ProcessTrack::Current(),    \
+              ##__VA_ARGS__);                                                \
+          return;                                                            \
+        default:                                                             \
+        case TRACE_EVENT_SCOPE_THREAD:                                       \
+          /* Fallthrough. */                                                 \
+          break;                                                             \
+      }                                                                      \
+    }                                                                        \
+    /* If an event targets the current thread or another process, write      \
+     * it on the current thread's track. The process override case is        \
+     * handled through |pid_override| in WriteLegacyEvent. */                \
+    if (std::is_same<                                                        \
+            decltype(thread_id),                                             \
+            ::perfetto::legacy::PerfettoLegacyCurrentThreadId>::value ||     \
+        ((flags)&TRACE_EVENT_FLAG_HAS_PROCESS_ID)) {                         \
+      PERFETTO_INTERNAL_LEGACY_EVENT_ON_TRACK(phase, category, name,         \
+                                              kDefaultTrack, ##__VA_ARGS__); \
+    } else {                                                                 \
+      PERFETTO_INTERNAL_LEGACY_EVENT_ON_TRACK(                               \
+          phase, category, name,                                             \
+          ::perfetto::legacy::ConvertThreadId(thread_id), ##__VA_ARGS__);    \
+    }                                                                        \
+  }()
+
+#define INTERNAL_TRACE_EVENT_ADD(phase, category, name, flags, ...)        \
+  PERFETTO_INTERNAL_LEGACY_EVENT(                                          \
+      phase, category, name, flags, ::perfetto::legacy::kCurrentThreadId,  \
+      [&](perfetto::EventContext ctx) PERFETTO_NO_THREAD_SAFETY_ANALYSIS { \
+        using ::perfetto::internal::TrackEventLegacy;                      \
+        TrackEventLegacy::WriteLegacyEvent(std::move(ctx), phase, flags,   \
+                                           ##__VA_ARGS__);                 \
+      })
+
+// PERFETTO_INTERNAL_SCOPED_TRACK_EVENT does not require GetStaticString, as it
+// uses TRACE_EVENT_BEGIN/END internally, which already have this call.
+#define INTERNAL_TRACE_EVENT_ADD_SCOPED(category, name, ...)               \
+  PERFETTO_INTERNAL_SCOPED_TRACK_EVENT(                                    \
+      category, ::perfetto::StaticString{name},                            \
+      [&](perfetto::EventContext ctx) PERFETTO_NO_THREAD_SAFETY_ANALYSIS { \
+        using ::perfetto::internal::TrackEventLegacy;                      \
+        TrackEventLegacy::AddDebugAnnotations(&ctx, ##__VA_ARGS__);        \
+      })
+
+// PERFETTO_INTERNAL_SCOPED_TRACK_EVENT does not require GetStaticString, as it
+// uses TRACE_EVENT_BEGIN/END internally, which already have this call.
+#define INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW(category, name, bind_id,   \
+                                                  flags, ...)                \
+  PERFETTO_INTERNAL_SCOPED_TRACK_EVENT(                                      \
+      category, ::perfetto::StaticString{name},                              \
+      [&](perfetto::EventContext ctx) PERFETTO_NO_THREAD_SAFETY_ANALYSIS {   \
+        using ::perfetto::internal::TrackEventLegacy;                        \
+        ::perfetto::internal::LegacyTraceId PERFETTO_UID(trace_id){bind_id}; \
+        TrackEventLegacy::WriteLegacyEventWithIdAndTid(                      \
+            std::move(ctx), TRACE_EVENT_PHASE_BEGIN, flags,                  \
+            PERFETTO_UID(trace_id), TRACE_EVENT_API_CURRENT_THREAD_ID,       \
+            ##__VA_ARGS__);                                                  \
+      })
+
+#define INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP(phase, category, name,     \
+                                                timestamp, flags, ...)     \
+  PERFETTO_INTERNAL_LEGACY_EVENT(                                          \
+      phase, category, name, flags, ::perfetto::legacy::kCurrentThreadId,  \
+      timestamp,                                                           \
+      [&](perfetto::EventContext ctx) PERFETTO_NO_THREAD_SAFETY_ANALYSIS { \
+        using ::perfetto::internal::TrackEventLegacy;                      \
+        TrackEventLegacy::WriteLegacyEvent(std::move(ctx), phase, flags,   \
+                                           ##__VA_ARGS__);                 \
+      })
+
+#define INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                  \
+    phase, category, name, id, thread_id, timestamp, flags, ...)             \
+  PERFETTO_INTERNAL_LEGACY_EVENT(                                            \
+      phase, category, name, flags, thread_id, timestamp,                    \
+      [&](perfetto::EventContext ctx) PERFETTO_NO_THREAD_SAFETY_ANALYSIS {   \
+        using ::perfetto::internal::TrackEventLegacy;                        \
+        ::perfetto::internal::LegacyTraceId PERFETTO_UID(trace_id){id};      \
+        TrackEventLegacy::WriteLegacyEventWithIdAndTid(                      \
+            std::move(ctx), phase, flags, PERFETTO_UID(trace_id), thread_id, \
+            ##__VA_ARGS__);                                                  \
+      })
+
+#define INTERNAL_TRACE_EVENT_ADD_WITH_ID(phase, category, name, id, flags, \
+                                         ...)                              \
+  PERFETTO_INTERNAL_LEGACY_EVENT(                                          \
+      phase, category, name, flags, ::perfetto::legacy::kCurrentThreadId,  \
+      [&](perfetto::EventContext ctx) PERFETTO_NO_THREAD_SAFETY_ANALYSIS { \
+        using ::perfetto::internal::TrackEventLegacy;                      \
+        ::perfetto::internal::LegacyTraceId PERFETTO_UID(trace_id){id};    \
+        TrackEventLegacy::WriteLegacyEventWithIdAndTid(                    \
+            std::move(ctx), phase, flags, PERFETTO_UID(trace_id),          \
+            TRACE_EVENT_API_CURRENT_THREAD_ID, ##__VA_ARGS__);             \
+      })
+
+#define INTERNAL_TRACE_EVENT_METADATA_ADD(category, name, ...)         \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_METADATA, category, name, \
+                           TRACE_EVENT_FLAG_NONE)
+
+// ----------------------------------------------------------------------------
+// Legacy tracing common API (adapted from trace_event_common.h).
+// ----------------------------------------------------------------------------
+
+#define TRACE_DISABLED_BY_DEFAULT(name) "disabled-by-default-" name
+
+// Scoped events.
+#define TRACE_EVENT0(category_group, name) \
+  INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name)
+#define TRACE_EVENT_WITH_FLOW0(category_group, name, bind_id, flow_flags)  \
+  INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW(category_group, name, bind_id, \
+                                            flow_flags)
+#define TRACE_EVENT1(category_group, name, arg1_name, arg1_val) \
+  INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, arg1_name, arg1_val)
+#define TRACE_EVENT_WITH_FLOW1(category_group, name, bind_id, flow_flags,  \
+                               arg1_name, arg1_val)                        \
+  INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW(category_group, name, bind_id, \
+                                            flow_flags, arg1_name, arg1_val)
+#define TRACE_EVENT2(category_group, name, arg1_name, arg1_val, arg2_name,   \
+                     arg2_val)                                               \
+  INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, arg1_name, arg1_val, \
+                                  arg2_name, arg2_val)
+#define TRACE_EVENT_WITH_FLOW2(category_group, name, bind_id, flow_flags,    \
+                               arg1_name, arg1_val, arg2_name, arg2_val)     \
+  INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW(category_group, name, bind_id,   \
+                                            flow_flags, arg1_name, arg1_val, \
+                                            arg2_name, arg2_val)
+
+// Instant events.
+#define TRACE_EVENT_INSTANT0(category_group, name, scope)                   \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \
+                           TRACE_EVENT_FLAG_NONE | scope)
+#define TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, arg1_val) \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name,    \
+                           TRACE_EVENT_FLAG_NONE | scope, arg1_name, arg1_val)
+#define TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, arg1_val, \
+                             arg2_name, arg2_val)                              \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name,    \
+                           TRACE_EVENT_FLAG_NONE | scope, arg1_name, arg1_val, \
+                           arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_INSTANT0(category_group, name, scope)              \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \
+                           TRACE_EVENT_FLAG_COPY | scope)
+#define TRACE_EVENT_COPY_INSTANT1(category_group, name, scope, arg1_name,   \
+                                  arg1_val)                                 \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \
+                           TRACE_EVENT_FLAG_COPY | scope, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_INSTANT2(category_group, name, scope, arg1_name,      \
+                                  arg1_val, arg2_name, arg2_val)               \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name,    \
+                           TRACE_EVENT_FLAG_COPY | scope, arg1_name, arg1_val, \
+                           arg2_name, arg2_val)
+#define TRACE_EVENT_INSTANT_WITH_FLAGS0(category_group, name, scope_and_flags) \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name,    \
+                           scope_and_flags)
+#define TRACE_EVENT_INSTANT_WITH_FLAGS1(category_group, name, scope_and_flags, \
+                                        arg1_name, arg1_val)                   \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name,    \
+                           scope_and_flags, arg1_name, arg1_val)
+
+// Instant events with explicit timestamps.
+#define TRACE_EVENT_INSTANT_WITH_TIMESTAMP0(category_group, name, scope,   \
+                                            timestamp)                     \
+  INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP(TRACE_EVENT_PHASE_INSTANT,       \
+                                          category_group, name, timestamp, \
+                                          TRACE_EVENT_FLAG_NONE | scope)
+
+#define TRACE_EVENT_INSTANT_WITH_TIMESTAMP1(category_group, name, scope,  \
+                                            timestamp, arg_name, arg_val) \
+  INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP(                                \
+      TRACE_EVENT_PHASE_INSTANT, category_group, name, timestamp,         \
+      TRACE_EVENT_FLAG_NONE | scope, arg_name, arg_val)
+
+// Begin events.
+#define TRACE_EVENT_BEGIN0(category_group, name)                          \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \
+                           TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_BEGIN1(category_group, name, arg1_name, arg1_val)     \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \
+                           TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_BEGIN2(category_group, name, arg1_name, arg1_val,     \
+                           arg2_name, arg2_val)                           \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \
+                           TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val,    \
+                           arg2_name, arg2_val)
+#define TRACE_EVENT_BEGIN_WITH_FLAGS0(category_group, name, flags) \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, flags)
+#define TRACE_EVENT_BEGIN_WITH_FLAGS1(category_group, name, flags, arg1_name, \
+                                      arg1_val)                               \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name,     \
+                           flags, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_BEGIN2(category_group, name, arg1_name, arg1_val, \
+                                arg2_name, arg2_val)                       \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name,  \
+                           TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val,     \
+                           arg2_name, arg2_val)
+
+// Begin events with explicit timestamps.
+#define TRACE_EVENT_BEGIN_WITH_ID_TID_AND_TIMESTAMP0(category_group, name, id, \
+                                                     thread_id, timestamp)     \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                          \
+      TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, thread_id,      \
+      timestamp, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP0(                \
+    category_group, name, id, thread_id, timestamp)                       \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                     \
+      TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, thread_id, \
+      timestamp, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP1(                \
+    category_group, name, id, thread_id, timestamp, arg1_name, arg1_val)  \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                     \
+      TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, thread_id, \
+      timestamp, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP2(                \
+    category_group, name, id, thread_id, timestamp, arg1_name, arg1_val,  \
+    arg2_name, arg2_val)                                                  \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                     \
+      TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, thread_id, \
+      timestamp, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name,   \
+      arg2_val)
+
+// End events.
+#define TRACE_EVENT_END0(category_group, name)                          \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, \
+                           TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_END1(category_group, name, arg1_name, arg1_val)     \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, \
+                           TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_END2(category_group, name, arg1_name, arg1_val, arg2_name, \
+                         arg2_val)                                             \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name,        \
+                           TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val,         \
+                           arg2_name, arg2_val)
+#define TRACE_EVENT_END_WITH_FLAGS0(category_group, name, flags) \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, flags)
+#define TRACE_EVENT_END_WITH_FLAGS1(category_group, name, flags, arg1_name,    \
+                                    arg1_val)                                  \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, flags, \
+                           arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_END2(category_group, name, arg1_name, arg1_val, \
+                              arg2_name, arg2_val)                       \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name,  \
+                           TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val,   \
+                           arg2_name, arg2_val)
+
+// Mark events.
+#define TRACE_EVENT_MARK_WITH_TIMESTAMP0(category_group, name, timestamp)  \
+  INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP(TRACE_EVENT_PHASE_MARK,          \
+                                          category_group, name, timestamp, \
+                                          TRACE_EVENT_FLAG_NONE)
+
+#define TRACE_EVENT_MARK_WITH_TIMESTAMP1(category_group, name, timestamp, \
+                                         arg1_name, arg1_val)             \
+  INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP(                                \
+      TRACE_EVENT_PHASE_MARK, category_group, name, timestamp,            \
+      TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+
+#define TRACE_EVENT_MARK_WITH_TIMESTAMP2(                                      \
+    category_group, name, timestamp, arg1_name, arg1_val, arg2_name, arg2_val) \
+  INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP(                                     \
+      TRACE_EVENT_PHASE_MARK, category_group, name, timestamp,                 \
+      TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+
+#define TRACE_EVENT_COPY_MARK(category_group, name)                      \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_MARK, category_group, name, \
+                           TRACE_EVENT_FLAG_COPY)
+
+#define TRACE_EVENT_COPY_MARK1(category_group, name, arg1_name, arg1_val) \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_MARK, category_group, name,  \
+                           TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+
+#define TRACE_EVENT_COPY_MARK_WITH_TIMESTAMP(category_group, name, timestamp) \
+  INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP(TRACE_EVENT_PHASE_MARK,             \
+                                          category_group, name, timestamp,    \
+                                          TRACE_EVENT_FLAG_COPY)
+
+// End events with explicit thread and timestamp.
+#define TRACE_EVENT_END_WITH_ID_TID_AND_TIMESTAMP0(category_group, name, id, \
+                                                   thread_id, timestamp)     \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                        \
+      TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, thread_id,      \
+      timestamp, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_COPY_END_WITH_ID_TID_AND_TIMESTAMP0(                \
+    category_group, name, id, thread_id, timestamp)                     \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                   \
+      TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, thread_id, \
+      timestamp, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_END_WITH_ID_TID_AND_TIMESTAMP1(                 \
+    category_group, name, id, thread_id, timestamp, arg1_name, arg1_val) \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                    \
+      TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, thread_id,  \
+      timestamp, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_END_WITH_ID_TID_AND_TIMESTAMP2(                 \
+    category_group, name, id, thread_id, timestamp, arg1_name, arg1_val, \
+    arg2_name, arg2_val)                                                 \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                    \
+      TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, thread_id,  \
+      timestamp, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name,  \
+      arg2_val)
+
+// Counters.
+#define TRACE_COUNTER1(category_group, name, value)                         \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, category_group, name, \
+                           TRACE_EVENT_FLAG_NONE, "value",                  \
+                           static_cast<int>(value))
+#define TRACE_COUNTER_WITH_FLAG1(category_group, name, flag, value)         \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, category_group, name, \
+                           flag, "value", static_cast<int>(value))
+#define TRACE_COPY_COUNTER1(category_group, name, value)                    \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, category_group, name, \
+                           TRACE_EVENT_FLAG_COPY, "value",                  \
+                           static_cast<int>(value))
+#define TRACE_COUNTER2(category_group, name, value1_name, value1_val,       \
+                       value2_name, value2_val)                             \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, category_group, name, \
+                           TRACE_EVENT_FLAG_NONE, value1_name,              \
+                           static_cast<int>(value1_val), value2_name,       \
+                           static_cast<int>(value2_val))
+#define TRACE_COPY_COUNTER2(category_group, name, value1_name, value1_val,  \
+                            value2_name, value2_val)                        \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, category_group, name, \
+                           TRACE_EVENT_FLAG_COPY, value1_name,              \
+                           static_cast<int>(value1_val), value2_name,       \
+                           static_cast<int>(value2_val))
+
+// Counters with explicit timestamps.
+#define TRACE_COUNTER_WITH_TIMESTAMP1(category_group, name, timestamp, value) \
+  INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP(                                    \
+      TRACE_EVENT_PHASE_COUNTER, category_group, name, timestamp,             \
+      TRACE_EVENT_FLAG_NONE, "value", static_cast<int>(value))
+
+#define TRACE_COUNTER_WITH_TIMESTAMP2(category_group, name, timestamp,      \
+                                      value1_name, value1_val, value2_name, \
+                                      value2_val)                           \
+  INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP(                                  \
+      TRACE_EVENT_PHASE_COUNTER, category_group, name, timestamp,           \
+      TRACE_EVENT_FLAG_NONE, value1_name, static_cast<int>(value1_val),     \
+      value2_name, static_cast<int>(value2_val))
+
+// Counters with ids.
+#define TRACE_COUNTER_ID1(category_group, name, id, value)                    \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, category_group, \
+                                   name, id, TRACE_EVENT_FLAG_NONE, "value",  \
+                                   static_cast<int>(value))
+#define TRACE_COPY_COUNTER_ID1(category_group, name, id, value)               \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, category_group, \
+                                   name, id, TRACE_EVENT_FLAG_COPY, "value",  \
+                                   static_cast<int>(value))
+#define TRACE_COUNTER_ID2(category_group, name, id, value1_name, value1_val,  \
+                          value2_name, value2_val)                            \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, category_group, \
+                                   name, id, TRACE_EVENT_FLAG_NONE,           \
+                                   value1_name, static_cast<int>(value1_val), \
+                                   value2_name, static_cast<int>(value2_val))
+#define TRACE_COPY_COUNTER_ID2(category_group, name, id, value1_name,         \
+                               value1_val, value2_name, value2_val)           \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, category_group, \
+                                   name, id, TRACE_EVENT_FLAG_COPY,           \
+                                   value1_name, static_cast<int>(value1_val), \
+                                   value2_name, static_cast<int>(value2_val))
+
+// Sampling profiler events.
+#define TRACE_EVENT_SAMPLE_WITH_ID1(category_group, name, id, arg1_name,       \
+                                    arg1_val)                                  \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_SAMPLE, category_group,   \
+                                   name, id, TRACE_EVENT_FLAG_NONE, arg1_name, \
+                                   arg1_val)
+
+// Legacy async events.
+#define TRACE_EVENT_ASYNC_BEGIN0(category_group, name, id)        \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
+                                   category_group, name, id,      \
+                                   TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_ASYNC_BEGIN1(category_group, name, id, arg1_name, \
+                                 arg1_val)                            \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN,     \
+                                   category_group, name, id,          \
+                                   TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_ASYNC_BEGIN2(category_group, name, id, arg1_name, \
+                                 arg1_val, arg2_name, arg2_val)       \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                   \
+      TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id,        \
+      TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_ASYNC_BEGIN0(category_group, name, id)   \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
+                                   category_group, name, id,      \
+                                   TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_ASYNC_BEGIN1(category_group, name, id, arg1_name, \
+                                      arg1_val)                            \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN,          \
+                                   category_group, name, id,               \
+                                   TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_ASYNC_BEGIN2(category_group, name, id, arg1_name, \
+                                      arg1_val, arg2_name, arg2_val)       \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                        \
+      TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id,             \
+      TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_ASYNC_BEGIN_WITH_FLAGS0(category_group, name, id, flags) \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN,            \
+                                   category_group, name, id, flags)
+
+// Legacy async events with explicit timestamps.
+#define TRACE_EVENT_ASYNC_BEGIN_WITH_TIMESTAMP0(category_group, name, id, \
+                                                timestamp)                \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                     \
+      TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id,            \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_ASYNC_BEGIN_WITH_TIMESTAMP1(                           \
+    category_group, name, id, timestamp, arg1_name, arg1_val)              \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                      \
+      TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id,             \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \
+      arg1_name, arg1_val)
+#define TRACE_EVENT_ASYNC_BEGIN_WITH_TIMESTAMP2(category_group, name, id,      \
+                                                timestamp, arg1_name,          \
+                                                arg1_val, arg2_name, arg2_val) \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                          \
+      TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id,                 \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE,     \
+      arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_ASYNC_BEGIN_WITH_TIMESTAMP0(category_group, name, id, \
+                                                     timestamp)                \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                          \
+      TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id,                 \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_ASYNC_BEGIN_WITH_TIMESTAMP_AND_FLAGS0(     \
+    category_group, name, id, timestamp, flags)                \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(          \
+      TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, flags)
+
+// Legacy async step into events.
+#define TRACE_EVENT_ASYNC_STEP_INTO0(category_group, name, id, step)  \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_STEP_INTO, \
+                                   category_group, name, id,          \
+                                   TRACE_EVENT_FLAG_NONE, "step", step)
+#define TRACE_EVENT_ASYNC_STEP_INTO1(category_group, name, id, step, \
+                                     arg1_name, arg1_val)            \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                  \
+      TRACE_EVENT_PHASE_ASYNC_STEP_INTO, category_group, name, id,   \
+      TRACE_EVENT_FLAG_NONE, "step", step, arg1_name, arg1_val)
+
+// Legacy async step into events with timestamps.
+#define TRACE_EVENT_ASYNC_STEP_INTO_WITH_TIMESTAMP0(category_group, name, id, \
+                                                    step, timestamp)          \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                         \
+      TRACE_EVENT_PHASE_ASYNC_STEP_INTO, category_group, name, id,            \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE,    \
+      "step", step)
+
+// Legacy async step past events.
+#define TRACE_EVENT_ASYNC_STEP_PAST0(category_group, name, id, step)  \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_STEP_PAST, \
+                                   category_group, name, id,          \
+                                   TRACE_EVENT_FLAG_NONE, "step", step)
+#define TRACE_EVENT_ASYNC_STEP_PAST1(category_group, name, id, step, \
+                                     arg1_name, arg1_val)            \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                  \
+      TRACE_EVENT_PHASE_ASYNC_STEP_PAST, category_group, name, id,   \
+      TRACE_EVENT_FLAG_NONE, "step", step, arg1_name, arg1_val)
+
+// Legacy async end events.
+#define TRACE_EVENT_ASYNC_END0(category_group, name, id)        \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
+                                   category_group, name, id,    \
+                                   TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_ASYNC_END1(category_group, name, id, arg1_name, arg1_val) \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END,               \
+                                   category_group, name, id,                  \
+                                   TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_ASYNC_END2(category_group, name, id, arg1_name, arg1_val, \
+                               arg2_name, arg2_val)                           \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                           \
+      TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id,                  \
+      TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_ASYNC_END0(category_group, name, id)   \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
+                                   category_group, name, id,    \
+                                   TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_ASYNC_END1(category_group, name, id, arg1_name, \
+                                    arg1_val)                            \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END,          \
+                                   category_group, name, id,             \
+                                   TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_COPY_ASYNC_END2(category_group, name, id, arg1_name, \
+                                    arg1_val, arg2_name, arg2_val)       \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                      \
+      TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id,             \
+      TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_ASYNC_END_WITH_FLAGS0(category_group, name, id, flags) \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END,            \
+                                   category_group, name, id, flags)
+
+// Legacy async end events with explicit timestamps.
+#define TRACE_EVENT_ASYNC_END_WITH_TIMESTAMP0(category_group, name, id, \
+                                              timestamp)                \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                   \
+      TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id,            \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_ASYNC_END_WITH_TIMESTAMP1(category_group, name, id,       \
+                                              timestamp, arg1_name, arg1_val) \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                         \
+      TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id,                  \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE,    \
+      arg1_name, arg1_val)
+#define TRACE_EVENT_ASYNC_END_WITH_TIMESTAMP2(category_group, name, id,       \
+                                              timestamp, arg1_name, arg1_val, \
+                                              arg2_name, arg2_val)            \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                         \
+      TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id,                  \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE,    \
+      arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_ASYNC_END_WITH_TIMESTAMP0(category_group, name, id, \
+                                                   timestamp)                \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                        \
+      TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id,                 \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_ASYNC_END_WITH_TIMESTAMP_AND_FLAGS0(category_group, name, \
+                                                        id, timestamp, flags) \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                         \
+      TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id,                  \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, flags)
+
+// Async events.
+#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(category_group, name, id)        \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, \
+                                   category_group, name, id,               \
+                                   TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN1(category_group, name, id, arg1_name, \
+                                          arg1_val)                            \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN,     \
+                                   category_group, name, id,                   \
+                                   TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN2(category_group, name, id, arg1_name, \
+                                          arg1_val, arg2_name, arg2_val)       \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                            \
+      TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id,        \
+      TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN_WITH_TIMESTAMP1(                  \
+    category_group, name, id, timestamp, arg1_name, arg1_val)              \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                      \
+      TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id,    \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \
+      arg1_name, arg1_val)
+
+// Async end events.
+#define TRACE_EVENT_NESTABLE_ASYNC_END0(category_group, name, id)        \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, \
+                                   category_group, name, id,             \
+                                   TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_NESTABLE_ASYNC_END1(category_group, name, id, arg1_name, \
+                                        arg1_val)                            \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_END,     \
+                                   category_group, name, id,                 \
+                                   TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_NESTABLE_ASYNC_END2(category_group, name, id, arg1_name, \
+                                        arg1_val, arg2_name, arg2_val)       \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                          \
+      TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id,        \
+      TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Async instant events.
+#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT0(category_group, name, id)        \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, \
+                                   category_group, name, id,                 \
+                                   TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT1(category_group, name, id,        \
+                                            arg1_name, arg1_val)             \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, \
+                                   category_group, name, id,                 \
+                                   TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
+#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT2(                              \
+    category_group, name, id, arg1_name, arg1_val, arg2_name, arg2_val)   \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                       \
+      TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, category_group, name, id, \
+      TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_NESTABLE_ASYNC_BEGIN_WITH_TTS2(                       \
+    category_group, name, id, arg1_name, arg1_val, arg2_name, arg2_val)        \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                            \
+      TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id,        \
+      TRACE_EVENT_FLAG_ASYNC_TTS | TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \
+      arg2_name, arg2_val)
+#define TRACE_EVENT_COPY_NESTABLE_ASYNC_END_WITH_TTS2(                         \
+    category_group, name, id, arg1_name, arg1_val, arg2_name, arg2_val)        \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                            \
+      TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id,          \
+      TRACE_EVENT_FLAG_ASYNC_TTS | TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \
+      arg2_name, arg2_val)
+
+// Async events with explicit timestamps.
+#define TRACE_EVENT_NESTABLE_ASYNC_BEGIN_WITH_TIMESTAMP0(category_group, name, \
+                                                         id, timestamp)        \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                          \
+      TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id,        \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_NESTABLE_ASYNC_END_WITH_TIMESTAMP0(category_group, name, \
+                                                       id, timestamp)        \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                        \
+      TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id,        \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_NESTABLE_ASYNC_END_WITH_TIMESTAMP1(                    \
+    category_group, name, id, timestamp, arg1_name, arg1_val)              \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                      \
+      TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id,      \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \
+      arg1_name, arg1_val)
+#define TRACE_EVENT_NESTABLE_ASYNC_INSTANT_WITH_TIMESTAMP0(               \
+    category_group, name, id, timestamp)                                  \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                     \
+      TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, category_group, name, id, \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_COPY_NESTABLE_ASYNC_BEGIN0(category_group, name, id)   \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, \
+                                   category_group, name, id,               \
+                                   TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_NESTABLE_ASYNC_END0(category_group, name, id)   \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, \
+                                   category_group, name, id,             \
+                                   TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_NESTABLE_ASYNC_BEGIN_WITH_TIMESTAMP0(          \
+    category_group, name, id, timestamp)                                \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                   \
+      TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, category_group, name, id, \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_COPY_NESTABLE_ASYNC_END_WITH_TIMESTAMP0(          \
+    category_group, name, id, timestamp)                              \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                 \
+      TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id, \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_NESTABLE_ASYNC_END_WITH_TIMESTAMP2(                    \
+    category_group, name, id, timestamp, arg1_name, arg1_val, arg2_name,   \
+    arg2_val)                                                              \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                      \
+      TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, category_group, name, id,      \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \
+      arg1_name, arg1_val, arg2_name, arg2_val)
+
+// Metadata events.
+#define TRACE_EVENT_METADATA1(category_group, name, arg1_name, arg1_val) \
+  INTERNAL_TRACE_EVENT_METADATA_ADD(category_group, name, arg1_name, arg1_val)
+
+// Clock sync events.
+#define TRACE_EVENT_CLOCK_SYNC_RECEIVER(sync_id)                           \
+  INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_CLOCK_SYNC, "__metadata",     \
+                           "clock_sync", TRACE_EVENT_FLAG_NONE, "sync_id", \
+                           sync_id)
+#define TRACE_EVENT_CLOCK_SYNC_ISSUER(sync_id, issue_ts, issue_end_ts)        \
+  INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP(                                    \
+      TRACE_EVENT_PHASE_CLOCK_SYNC, "__metadata", "clock_sync", issue_end_ts, \
+      TRACE_EVENT_FLAG_NONE, "sync_id", sync_id, "issue_ts", issue_ts)
+
+// Object events.
+#define TRACE_EVENT_OBJECT_CREATED_WITH_ID(category_group, name, id) \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_CREATE_OBJECT,  \
+                                   category_group, name, id,         \
+                                   TRACE_EVENT_FLAG_NONE)
+
+#define TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(category_group, name, id, \
+                                            snapshot)                 \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                                   \
+      TRACE_EVENT_PHASE_SNAPSHOT_OBJECT, category_group, name, id,    \
+      TRACE_EVENT_FLAG_NONE, "snapshot", snapshot)
+
+#define TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID_AND_TIMESTAMP(                 \
+    category_group, name, id, timestamp, snapshot)                         \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(                      \
+      TRACE_EVENT_PHASE_SNAPSHOT_OBJECT, category_group, name, id,         \
+      TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE, \
+      "snapshot", snapshot)
+
+#define TRACE_EVENT_OBJECT_DELETED_WITH_ID(category_group, name, id) \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_DELETE_OBJECT,  \
+                                   category_group, name, id,         \
+                                   TRACE_EVENT_FLAG_NONE)
+
+// Context events.
+#define TRACE_EVENT_ENTER_CONTEXT(category_group, name, context)    \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ENTER_CONTEXT, \
+                                   category_group, name, context,   \
+                                   TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_LEAVE_CONTEXT(category_group, name, context)    \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_LEAVE_CONTEXT, \
+                                   category_group, name, context,   \
+                                   TRACE_EVENT_FLAG_NONE)
+
+// TODO(skyostil): Implement binary-efficient trace events.
+#define TRACE_EVENT_BINARY_EFFICIENT0 TRACE_EVENT0
+#define TRACE_EVENT_BINARY_EFFICIENT1 TRACE_EVENT1
+#define TRACE_EVENT_BINARY_EFFICIENT2 TRACE_EVENT2
+
+// Macro to efficiently determine if a given category group is enabled.
+#define TRACE_EVENT_CATEGORY_GROUP_ENABLED(category, ret) \
+  do {                                                    \
+    *ret = TRACE_EVENT_CATEGORY_ENABLED(category);        \
+  } while (0)
+
+// Macro to efficiently determine, through polling, if a new trace has begun.
+#define TRACE_EVENT_IS_NEW_TRACE(ret)                                \
+  do {                                                               \
+    static int PERFETTO_UID(prev) = -1;                              \
+    int PERFETTO_UID(curr) =                                         \
+        ::perfetto::internal::TrackEventInternal::GetSessionCount(); \
+    if (::PERFETTO_TRACK_EVENT_NAMESPACE::TrackEvent::IsEnabled() && \
+        (PERFETTO_UID(prev) != PERFETTO_UID(curr))) {                \
+      *(ret) = true;                                                 \
+      PERFETTO_UID(prev) = PERFETTO_UID(curr);                       \
+    } else {                                                         \
+      *(ret) = false;                                                \
+    }                                                                \
+  } while (0)
+
+// ----------------------------------------------------------------------------
+// Legacy tracing API (adapted from trace_event.h).
+// ----------------------------------------------------------------------------
+
+// We can implement the following subset of the legacy tracing API without
+// involvement from the embedder. APIs such as TRACE_EVENT_API_ADD_TRACE_EVENT
+// are still up to the embedder to define.
+
+#define TRACE_STR_COPY(str) (str)
+
+#define TRACE_ID_WITH_SCOPE(scope, ...) \
+  ::perfetto::internal::LegacyTraceId::WithScope(scope, ##__VA_ARGS__)
+
+// Use this for ids that are unique across processes. This allows different
+// processes to use the same id to refer to the same event.
+#define TRACE_ID_GLOBAL(id) ::perfetto::internal::LegacyTraceId::GlobalId(id)
+
+// Use this for ids that are unique within a single process. This allows
+// different processes to use the same id to refer to different events.
+#define TRACE_ID_LOCAL(id) ::perfetto::internal::LegacyTraceId::LocalId(id)
+
+// Returns a pointer to a uint8_t which indicates whether tracing is enabled for
+// the given category or not. A zero value means tracing is disabled and
+// non-zero indicates at least one tracing session for this category is active.
+// Note that callers should not make any assumptions at what each bit represents
+// in the status byte. Does not support dynamic categories.
+#define TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(category)                \
+  reinterpret_cast<const uint8_t*>(                                         \
+      [&] {                                                                 \
+        static_assert(                                                      \
+            !std::is_same<::perfetto::DynamicCategory,                      \
+                          decltype(category)>::value,                       \
+            "Enabled flag pointers are not supported for dynamic trace "    \
+            "categories.");                                                 \
+      },                                                                    \
+      PERFETTO_TRACK_EVENT_NAMESPACE::internal::kConstExprCategoryRegistry  \
+          .GetCategoryState(                                                \
+              ::PERFETTO_TRACK_EVENT_NAMESPACE::internal::kCategoryRegistry \
+                  .Find(category, /*is_dynamic=*/false)))
+
+// Given a pointer returned by TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED,
+// yields a pointer to the name of the corresponding category group.
+#define TRACE_EVENT_API_GET_CATEGORY_GROUP_NAME(category_enabled_ptr)       \
+  ::PERFETTO_TRACK_EVENT_NAMESPACE::internal::kConstExprCategoryRegistry    \
+      .GetCategory(                                                         \
+          category_enabled_ptr -                                            \
+          reinterpret_cast<const uint8_t*>(                                 \
+              ::PERFETTO_TRACK_EVENT_NAMESPACE::internal::kCategoryRegistry \
+                  .GetCategoryState(0u)))                                   \
+      ->name
+
+#endif  // PERFETTO_ENABLE_LEGACY_TRACE_EVENTS
+
+#endif  // INCLUDE_PERFETTO_TRACING_TRACK_EVENT_LEGACY_H_
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_TRACING_H_
+#define INCLUDE_PERFETTO_TRACING_H_
+
+// This headers wraps all the headers necessary to use the public Perfetto
+// Tracing API. Embedders should preferably use this one header to avoid having
+// to figure out the various set of header required for each class.
+// The only exception to this should be large projects where build time is a
+// concern (e.g. chromium), which migh prefer sticking to strict IWYU.
+
+// gen_amalgamated expanded: #include "perfetto/tracing/buffer_exhausted_policy.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/console_interceptor.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/core/data_source_config.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/core/data_source_descriptor.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/core/trace_config.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/data_source.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/interceptor.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/platform.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/tracing.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/tracing_backend.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/track_event.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/track_event_interned_data_index.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/track_event_legacy.h"
+// gen_amalgamated expanded: #include "perfetto/tracing/track_event_state_tracker.h"
+
+#endif  // INCLUDE_PERFETTO_TRACING_H_
+// gen_amalgamated begin header: gen/protos/perfetto/common/android_energy_consumer_descriptor.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_ANDROID_ENERGY_CONSUMER_DESCRIPTOR_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_ANDROID_ENERGY_CONSUMER_DESCRIPTOR_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class AndroidEnergyConsumerDescriptor;
+class AndroidEnergyConsumer;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT AndroidEnergyConsumerDescriptor : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kEnergyConsumersFieldNumber = 1,
+  };
+
+  AndroidEnergyConsumerDescriptor();
+  ~AndroidEnergyConsumerDescriptor() override;
+  AndroidEnergyConsumerDescriptor(AndroidEnergyConsumerDescriptor&&) noexcept;
+  AndroidEnergyConsumerDescriptor& operator=(AndroidEnergyConsumerDescriptor&&);
+  AndroidEnergyConsumerDescriptor(const AndroidEnergyConsumerDescriptor&);
+  AndroidEnergyConsumerDescriptor& operator=(const AndroidEnergyConsumerDescriptor&);
+  bool operator==(const AndroidEnergyConsumerDescriptor&) const;
+  bool operator!=(const AndroidEnergyConsumerDescriptor& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  const std::vector<AndroidEnergyConsumer>& energy_consumers() const { return energy_consumers_; }
+  std::vector<AndroidEnergyConsumer>* mutable_energy_consumers() { return &energy_consumers_; }
+  int energy_consumers_size() const;
+  void clear_energy_consumers();
+  AndroidEnergyConsumer* add_energy_consumers();
+
+ private:
+  std::vector<AndroidEnergyConsumer> energy_consumers_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT AndroidEnergyConsumer : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kEnergyConsumerIdFieldNumber = 1,
+    kOrdinalFieldNumber = 2,
+    kTypeFieldNumber = 3,
+    kNameFieldNumber = 4,
+  };
+
+  AndroidEnergyConsumer();
+  ~AndroidEnergyConsumer() override;
+  AndroidEnergyConsumer(AndroidEnergyConsumer&&) noexcept;
+  AndroidEnergyConsumer& operator=(AndroidEnergyConsumer&&);
+  AndroidEnergyConsumer(const AndroidEnergyConsumer&);
+  AndroidEnergyConsumer& operator=(const AndroidEnergyConsumer&);
+  bool operator==(const AndroidEnergyConsumer&) const;
+  bool operator!=(const AndroidEnergyConsumer& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_energy_consumer_id() const { return _has_field_[1]; }
+  int32_t energy_consumer_id() const { return energy_consumer_id_; }
+  void set_energy_consumer_id(int32_t value) { energy_consumer_id_ = value; _has_field_.set(1); }
+
+  bool has_ordinal() const { return _has_field_[2]; }
+  int32_t ordinal() const { return ordinal_; }
+  void set_ordinal(int32_t value) { ordinal_ = value; _has_field_.set(2); }
+
+  bool has_type() const { return _has_field_[3]; }
+  const std::string& type() const { return type_; }
+  void set_type(const std::string& value) { type_ = value; _has_field_.set(3); }
+
+  bool has_name() const { return _has_field_[4]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(4); }
+
+ private:
+  int32_t energy_consumer_id_{};
+  int32_t ordinal_{};
+  std::string type_{};
+  std::string name_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<5> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_ANDROID_ENERGY_CONSUMER_DESCRIPTOR_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/common/android_log_constants.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_ANDROID_LOG_CONSTANTS_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_ANDROID_LOG_CONSTANTS_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum AndroidLogId : int;
+enum AndroidLogPriority : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum AndroidLogId : int {
+  LID_DEFAULT = 0,
+  LID_RADIO = 1,
+  LID_EVENTS = 2,
+  LID_SYSTEM = 3,
+  LID_CRASH = 4,
+  LID_STATS = 5,
+  LID_SECURITY = 6,
+  LID_KERNEL = 7,
+};
+enum AndroidLogPriority : int {
+  PRIO_UNSPECIFIED = 0,
+  PRIO_UNUSED = 1,
+  PRIO_VERBOSE = 2,
+  PRIO_DEBUG = 3,
+  PRIO_INFO = 4,
+  PRIO_WARN = 5,
+  PRIO_ERROR = 6,
+  PRIO_FATAL = 7,
+};
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_ANDROID_LOG_CONSTANTS_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/common/builtin_clock.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_BUILTIN_CLOCK_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_BUILTIN_CLOCK_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum BuiltinClock : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum BuiltinClock : int {
+  BUILTIN_CLOCK_UNKNOWN = 0,
+  BUILTIN_CLOCK_REALTIME = 1,
+  BUILTIN_CLOCK_REALTIME_COARSE = 2,
+  BUILTIN_CLOCK_MONOTONIC = 3,
+  BUILTIN_CLOCK_MONOTONIC_COARSE = 4,
+  BUILTIN_CLOCK_MONOTONIC_RAW = 5,
+  BUILTIN_CLOCK_BOOTTIME = 6,
+  BUILTIN_CLOCK_MAX_ID = 63,
+};
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_BUILTIN_CLOCK_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/common/commit_data_request.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_COMMIT_DATA_REQUEST_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_COMMIT_DATA_REQUEST_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class CommitDataRequest;
+class CommitDataRequest_ChunkToPatch;
+class CommitDataRequest_ChunkToPatch_Patch;
+class CommitDataRequest_ChunksToMove;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT CommitDataRequest : public ::protozero::CppMessageObj {
+ public:
+  using ChunksToMove = CommitDataRequest_ChunksToMove;
+  using ChunkToPatch = CommitDataRequest_ChunkToPatch;
+  enum FieldNumbers {
+    kChunksToMoveFieldNumber = 1,
+    kChunksToPatchFieldNumber = 2,
+    kFlushRequestIdFieldNumber = 3,
+  };
+
+  CommitDataRequest();
+  ~CommitDataRequest() override;
+  CommitDataRequest(CommitDataRequest&&) noexcept;
+  CommitDataRequest& operator=(CommitDataRequest&&);
+  CommitDataRequest(const CommitDataRequest&);
+  CommitDataRequest& operator=(const CommitDataRequest&);
+  bool operator==(const CommitDataRequest&) const;
+  bool operator!=(const CommitDataRequest& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  const std::vector<CommitDataRequest_ChunksToMove>& chunks_to_move() const { return chunks_to_move_; }
+  std::vector<CommitDataRequest_ChunksToMove>* mutable_chunks_to_move() { return &chunks_to_move_; }
+  int chunks_to_move_size() const;
+  void clear_chunks_to_move();
+  CommitDataRequest_ChunksToMove* add_chunks_to_move();
+
+  const std::vector<CommitDataRequest_ChunkToPatch>& chunks_to_patch() const { return chunks_to_patch_; }
+  std::vector<CommitDataRequest_ChunkToPatch>* mutable_chunks_to_patch() { return &chunks_to_patch_; }
+  int chunks_to_patch_size() const;
+  void clear_chunks_to_patch();
+  CommitDataRequest_ChunkToPatch* add_chunks_to_patch();
+
+  bool has_flush_request_id() const { return _has_field_[3]; }
+  uint64_t flush_request_id() const { return flush_request_id_; }
+  void set_flush_request_id(uint64_t value) { flush_request_id_ = value; _has_field_.set(3); }
+
+ private:
+  std::vector<CommitDataRequest_ChunksToMove> chunks_to_move_;
+  std::vector<CommitDataRequest_ChunkToPatch> chunks_to_patch_;
+  uint64_t flush_request_id_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<4> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT CommitDataRequest_ChunkToPatch : public ::protozero::CppMessageObj {
+ public:
+  using Patch = CommitDataRequest_ChunkToPatch_Patch;
+  enum FieldNumbers {
+    kTargetBufferFieldNumber = 1,
+    kWriterIdFieldNumber = 2,
+    kChunkIdFieldNumber = 3,
+    kPatchesFieldNumber = 4,
+    kHasMorePatchesFieldNumber = 5,
+  };
+
+  CommitDataRequest_ChunkToPatch();
+  ~CommitDataRequest_ChunkToPatch() override;
+  CommitDataRequest_ChunkToPatch(CommitDataRequest_ChunkToPatch&&) noexcept;
+  CommitDataRequest_ChunkToPatch& operator=(CommitDataRequest_ChunkToPatch&&);
+  CommitDataRequest_ChunkToPatch(const CommitDataRequest_ChunkToPatch&);
+  CommitDataRequest_ChunkToPatch& operator=(const CommitDataRequest_ChunkToPatch&);
+  bool operator==(const CommitDataRequest_ChunkToPatch&) const;
+  bool operator!=(const CommitDataRequest_ChunkToPatch& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_target_buffer() const { return _has_field_[1]; }
+  uint32_t target_buffer() const { return target_buffer_; }
+  void set_target_buffer(uint32_t value) { target_buffer_ = value; _has_field_.set(1); }
+
+  bool has_writer_id() const { return _has_field_[2]; }
+  uint32_t writer_id() const { return writer_id_; }
+  void set_writer_id(uint32_t value) { writer_id_ = value; _has_field_.set(2); }
+
+  bool has_chunk_id() const { return _has_field_[3]; }
+  uint32_t chunk_id() const { return chunk_id_; }
+  void set_chunk_id(uint32_t value) { chunk_id_ = value; _has_field_.set(3); }
+
+  const std::vector<CommitDataRequest_ChunkToPatch_Patch>& patches() const { return patches_; }
+  std::vector<CommitDataRequest_ChunkToPatch_Patch>* mutable_patches() { return &patches_; }
+  int patches_size() const;
+  void clear_patches();
+  CommitDataRequest_ChunkToPatch_Patch* add_patches();
+
+  bool has_has_more_patches() const { return _has_field_[5]; }
+  bool has_more_patches() const { return has_more_patches_; }
+  void set_has_more_patches(bool value) { has_more_patches_ = value; _has_field_.set(5); }
+
+ private:
+  uint32_t target_buffer_{};
+  uint32_t writer_id_{};
+  uint32_t chunk_id_{};
+  std::vector<CommitDataRequest_ChunkToPatch_Patch> patches_;
+  bool has_more_patches_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<6> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT CommitDataRequest_ChunkToPatch_Patch : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kOffsetFieldNumber = 1,
+    kDataFieldNumber = 2,
+  };
+
+  CommitDataRequest_ChunkToPatch_Patch();
+  ~CommitDataRequest_ChunkToPatch_Patch() override;
+  CommitDataRequest_ChunkToPatch_Patch(CommitDataRequest_ChunkToPatch_Patch&&) noexcept;
+  CommitDataRequest_ChunkToPatch_Patch& operator=(CommitDataRequest_ChunkToPatch_Patch&&);
+  CommitDataRequest_ChunkToPatch_Patch(const CommitDataRequest_ChunkToPatch_Patch&);
+  CommitDataRequest_ChunkToPatch_Patch& operator=(const CommitDataRequest_ChunkToPatch_Patch&);
+  bool operator==(const CommitDataRequest_ChunkToPatch_Patch&) const;
+  bool operator!=(const CommitDataRequest_ChunkToPatch_Patch& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_offset() const { return _has_field_[1]; }
+  uint32_t offset() const { return offset_; }
+  void set_offset(uint32_t value) { offset_ = value; _has_field_.set(1); }
+
+  bool has_data() const { return _has_field_[2]; }
+  const std::string& data() const { return data_; }
+  void set_data(const std::string& value) { data_ = value; _has_field_.set(2); }
+  void set_data(const void* p, size_t s) { data_.assign(reinterpret_cast<const char*>(p), s); _has_field_.set(2); }
+
+ private:
+  uint32_t offset_{};
+  std::string data_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT CommitDataRequest_ChunksToMove : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kPageFieldNumber = 1,
+    kChunkFieldNumber = 2,
+    kTargetBufferFieldNumber = 3,
+  };
+
+  CommitDataRequest_ChunksToMove();
+  ~CommitDataRequest_ChunksToMove() override;
+  CommitDataRequest_ChunksToMove(CommitDataRequest_ChunksToMove&&) noexcept;
+  CommitDataRequest_ChunksToMove& operator=(CommitDataRequest_ChunksToMove&&);
+  CommitDataRequest_ChunksToMove(const CommitDataRequest_ChunksToMove&);
+  CommitDataRequest_ChunksToMove& operator=(const CommitDataRequest_ChunksToMove&);
+  bool operator==(const CommitDataRequest_ChunksToMove&) const;
+  bool operator!=(const CommitDataRequest_ChunksToMove& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_page() const { return _has_field_[1]; }
+  uint32_t page() const { return page_; }
+  void set_page(uint32_t value) { page_ = value; _has_field_.set(1); }
+
+  bool has_chunk() const { return _has_field_[2]; }
+  uint32_t chunk() const { return chunk_; }
+  void set_chunk(uint32_t value) { chunk_ = value; _has_field_.set(2); }
+
+  bool has_target_buffer() const { return _has_field_[3]; }
+  uint32_t target_buffer() const { return target_buffer_; }
+  void set_target_buffer(uint32_t value) { target_buffer_ = value; _has_field_.set(3); }
+
+ private:
+  uint32_t page_{};
+  uint32_t chunk_{};
+  uint32_t target_buffer_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<4> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_COMMIT_DATA_REQUEST_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/common/data_source_descriptor.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_DATA_SOURCE_DESCRIPTOR_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_DATA_SOURCE_DESCRIPTOR_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class DataSourceDescriptor;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT DataSourceDescriptor : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kNameFieldNumber = 1,
+    kWillNotifyOnStopFieldNumber = 2,
+    kWillNotifyOnStartFieldNumber = 3,
+    kHandlesIncrementalStateClearFieldNumber = 4,
+    kGpuCounterDescriptorFieldNumber = 5,
+    kTrackEventDescriptorFieldNumber = 6,
+  };
+
+  DataSourceDescriptor();
+  ~DataSourceDescriptor() override;
+  DataSourceDescriptor(DataSourceDescriptor&&) noexcept;
+  DataSourceDescriptor& operator=(DataSourceDescriptor&&);
+  DataSourceDescriptor(const DataSourceDescriptor&);
+  DataSourceDescriptor& operator=(const DataSourceDescriptor&);
+  bool operator==(const DataSourceDescriptor&) const;
+  bool operator!=(const DataSourceDescriptor& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_name() const { return _has_field_[1]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(1); }
+
+  bool has_will_notify_on_stop() const { return _has_field_[2]; }
+  bool will_notify_on_stop() const { return will_notify_on_stop_; }
+  void set_will_notify_on_stop(bool value) { will_notify_on_stop_ = value; _has_field_.set(2); }
+
+  bool has_will_notify_on_start() const { return _has_field_[3]; }
+  bool will_notify_on_start() const { return will_notify_on_start_; }
+  void set_will_notify_on_start(bool value) { will_notify_on_start_ = value; _has_field_.set(3); }
+
+  bool has_handles_incremental_state_clear() const { return _has_field_[4]; }
+  bool handles_incremental_state_clear() const { return handles_incremental_state_clear_; }
+  void set_handles_incremental_state_clear(bool value) { handles_incremental_state_clear_ = value; _has_field_.set(4); }
+
+  const std::string& gpu_counter_descriptor_raw() const { return gpu_counter_descriptor_; }
+  void set_gpu_counter_descriptor_raw(const std::string& raw) { gpu_counter_descriptor_ = raw; _has_field_.set(5); }
+
+  const std::string& track_event_descriptor_raw() const { return track_event_descriptor_; }
+  void set_track_event_descriptor_raw(const std::string& raw) { track_event_descriptor_ = raw; _has_field_.set(6); }
+
+ private:
+  std::string name_{};
+  bool will_notify_on_stop_{};
+  bool will_notify_on_start_{};
+  bool handles_incremental_state_clear_{};
+  std::string gpu_counter_descriptor_;  // [lazy=true]
+  std::string track_event_descriptor_;  // [lazy=true]
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<7> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_DATA_SOURCE_DESCRIPTOR_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/common/descriptor.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_DESCRIPTOR_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_DESCRIPTOR_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class OneofOptions;
+class EnumValueDescriptorProto;
+class EnumDescriptorProto;
+class OneofDescriptorProto;
+class FieldDescriptorProto;
+class DescriptorProto;
+class DescriptorProto_ReservedRange;
+class FileDescriptorProto;
+class FileDescriptorSet;
+enum FieldDescriptorProto_Type : int;
+enum FieldDescriptorProto_Label : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum FieldDescriptorProto_Type : int {
+  FieldDescriptorProto_Type_TYPE_DOUBLE = 1,
+  FieldDescriptorProto_Type_TYPE_FLOAT = 2,
+  FieldDescriptorProto_Type_TYPE_INT64 = 3,
+  FieldDescriptorProto_Type_TYPE_UINT64 = 4,
+  FieldDescriptorProto_Type_TYPE_INT32 = 5,
+  FieldDescriptorProto_Type_TYPE_FIXED64 = 6,
+  FieldDescriptorProto_Type_TYPE_FIXED32 = 7,
+  FieldDescriptorProto_Type_TYPE_BOOL = 8,
+  FieldDescriptorProto_Type_TYPE_STRING = 9,
+  FieldDescriptorProto_Type_TYPE_GROUP = 10,
+  FieldDescriptorProto_Type_TYPE_MESSAGE = 11,
+  FieldDescriptorProto_Type_TYPE_BYTES = 12,
+  FieldDescriptorProto_Type_TYPE_UINT32 = 13,
+  FieldDescriptorProto_Type_TYPE_ENUM = 14,
+  FieldDescriptorProto_Type_TYPE_SFIXED32 = 15,
+  FieldDescriptorProto_Type_TYPE_SFIXED64 = 16,
+  FieldDescriptorProto_Type_TYPE_SINT32 = 17,
+  FieldDescriptorProto_Type_TYPE_SINT64 = 18,
+};
+enum FieldDescriptorProto_Label : int {
+  FieldDescriptorProto_Label_LABEL_OPTIONAL = 1,
+  FieldDescriptorProto_Label_LABEL_REQUIRED = 2,
+  FieldDescriptorProto_Label_LABEL_REPEATED = 3,
+};
+
+class PERFETTO_EXPORT OneofOptions : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+  };
+
+  OneofOptions();
+  ~OneofOptions() override;
+  OneofOptions(OneofOptions&&) noexcept;
+  OneofOptions& operator=(OneofOptions&&);
+  OneofOptions(const OneofOptions&);
+  OneofOptions& operator=(const OneofOptions&);
+  bool operator==(const OneofOptions&) const;
+  bool operator!=(const OneofOptions& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+ private:
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT EnumValueDescriptorProto : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kNameFieldNumber = 1,
+    kNumberFieldNumber = 2,
+  };
+
+  EnumValueDescriptorProto();
+  ~EnumValueDescriptorProto() override;
+  EnumValueDescriptorProto(EnumValueDescriptorProto&&) noexcept;
+  EnumValueDescriptorProto& operator=(EnumValueDescriptorProto&&);
+  EnumValueDescriptorProto(const EnumValueDescriptorProto&);
+  EnumValueDescriptorProto& operator=(const EnumValueDescriptorProto&);
+  bool operator==(const EnumValueDescriptorProto&) const;
+  bool operator!=(const EnumValueDescriptorProto& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_name() const { return _has_field_[1]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(1); }
+
+  bool has_number() const { return _has_field_[2]; }
+  int32_t number() const { return number_; }
+  void set_number(int32_t value) { number_ = value; _has_field_.set(2); }
+
+ private:
+  std::string name_{};
+  int32_t number_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT EnumDescriptorProto : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kNameFieldNumber = 1,
+    kValueFieldNumber = 2,
+    kReservedNameFieldNumber = 5,
+  };
+
+  EnumDescriptorProto();
+  ~EnumDescriptorProto() override;
+  EnumDescriptorProto(EnumDescriptorProto&&) noexcept;
+  EnumDescriptorProto& operator=(EnumDescriptorProto&&);
+  EnumDescriptorProto(const EnumDescriptorProto&);
+  EnumDescriptorProto& operator=(const EnumDescriptorProto&);
+  bool operator==(const EnumDescriptorProto&) const;
+  bool operator!=(const EnumDescriptorProto& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_name() const { return _has_field_[1]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(1); }
+
+  const std::vector<EnumValueDescriptorProto>& value() const { return value_; }
+  std::vector<EnumValueDescriptorProto>* mutable_value() { return &value_; }
+  int value_size() const;
+  void clear_value();
+  EnumValueDescriptorProto* add_value();
+
+  const std::vector<std::string>& reserved_name() const { return reserved_name_; }
+  std::vector<std::string>* mutable_reserved_name() { return &reserved_name_; }
+  int reserved_name_size() const { return static_cast<int>(reserved_name_.size()); }
+  void clear_reserved_name() { reserved_name_.clear(); }
+  void add_reserved_name(std::string value) { reserved_name_.emplace_back(value); }
+  std::string* add_reserved_name() { reserved_name_.emplace_back(); return &reserved_name_.back(); }
+
+ private:
+  std::string name_{};
+  std::vector<EnumValueDescriptorProto> value_;
+  std::vector<std::string> reserved_name_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<6> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT OneofDescriptorProto : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kNameFieldNumber = 1,
+    kOptionsFieldNumber = 2,
+  };
+
+  OneofDescriptorProto();
+  ~OneofDescriptorProto() override;
+  OneofDescriptorProto(OneofDescriptorProto&&) noexcept;
+  OneofDescriptorProto& operator=(OneofDescriptorProto&&);
+  OneofDescriptorProto(const OneofDescriptorProto&);
+  OneofDescriptorProto& operator=(const OneofDescriptorProto&);
+  bool operator==(const OneofDescriptorProto&) const;
+  bool operator!=(const OneofDescriptorProto& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_name() const { return _has_field_[1]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(1); }
+
+  bool has_options() const { return _has_field_[2]; }
+  const OneofOptions& options() const { return *options_; }
+  OneofOptions* mutable_options() { _has_field_.set(2); return options_.get(); }
+
+ private:
+  std::string name_{};
+  ::protozero::CopyablePtr<OneofOptions> options_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT FieldDescriptorProto : public ::protozero::CppMessageObj {
+ public:
+  using Type = FieldDescriptorProto_Type;
+  static constexpr auto TYPE_DOUBLE = FieldDescriptorProto_Type_TYPE_DOUBLE;
+  static constexpr auto TYPE_FLOAT = FieldDescriptorProto_Type_TYPE_FLOAT;
+  static constexpr auto TYPE_INT64 = FieldDescriptorProto_Type_TYPE_INT64;
+  static constexpr auto TYPE_UINT64 = FieldDescriptorProto_Type_TYPE_UINT64;
+  static constexpr auto TYPE_INT32 = FieldDescriptorProto_Type_TYPE_INT32;
+  static constexpr auto TYPE_FIXED64 = FieldDescriptorProto_Type_TYPE_FIXED64;
+  static constexpr auto TYPE_FIXED32 = FieldDescriptorProto_Type_TYPE_FIXED32;
+  static constexpr auto TYPE_BOOL = FieldDescriptorProto_Type_TYPE_BOOL;
+  static constexpr auto TYPE_STRING = FieldDescriptorProto_Type_TYPE_STRING;
+  static constexpr auto TYPE_GROUP = FieldDescriptorProto_Type_TYPE_GROUP;
+  static constexpr auto TYPE_MESSAGE = FieldDescriptorProto_Type_TYPE_MESSAGE;
+  static constexpr auto TYPE_BYTES = FieldDescriptorProto_Type_TYPE_BYTES;
+  static constexpr auto TYPE_UINT32 = FieldDescriptorProto_Type_TYPE_UINT32;
+  static constexpr auto TYPE_ENUM = FieldDescriptorProto_Type_TYPE_ENUM;
+  static constexpr auto TYPE_SFIXED32 = FieldDescriptorProto_Type_TYPE_SFIXED32;
+  static constexpr auto TYPE_SFIXED64 = FieldDescriptorProto_Type_TYPE_SFIXED64;
+  static constexpr auto TYPE_SINT32 = FieldDescriptorProto_Type_TYPE_SINT32;
+  static constexpr auto TYPE_SINT64 = FieldDescriptorProto_Type_TYPE_SINT64;
+  static constexpr auto Type_MIN = FieldDescriptorProto_Type_TYPE_DOUBLE;
+  static constexpr auto Type_MAX = FieldDescriptorProto_Type_TYPE_SINT64;
+  using Label = FieldDescriptorProto_Label;
+  static constexpr auto LABEL_OPTIONAL = FieldDescriptorProto_Label_LABEL_OPTIONAL;
+  static constexpr auto LABEL_REQUIRED = FieldDescriptorProto_Label_LABEL_REQUIRED;
+  static constexpr auto LABEL_REPEATED = FieldDescriptorProto_Label_LABEL_REPEATED;
+  static constexpr auto Label_MIN = FieldDescriptorProto_Label_LABEL_OPTIONAL;
+  static constexpr auto Label_MAX = FieldDescriptorProto_Label_LABEL_REPEATED;
+  enum FieldNumbers {
+    kNameFieldNumber = 1,
+    kNumberFieldNumber = 3,
+    kLabelFieldNumber = 4,
+    kTypeFieldNumber = 5,
+    kTypeNameFieldNumber = 6,
+    kExtendeeFieldNumber = 2,
+    kDefaultValueFieldNumber = 7,
+    kOneofIndexFieldNumber = 9,
+  };
+
+  FieldDescriptorProto();
+  ~FieldDescriptorProto() override;
+  FieldDescriptorProto(FieldDescriptorProto&&) noexcept;
+  FieldDescriptorProto& operator=(FieldDescriptorProto&&);
+  FieldDescriptorProto(const FieldDescriptorProto&);
+  FieldDescriptorProto& operator=(const FieldDescriptorProto&);
+  bool operator==(const FieldDescriptorProto&) const;
+  bool operator!=(const FieldDescriptorProto& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_name() const { return _has_field_[1]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(1); }
+
+  bool has_number() const { return _has_field_[3]; }
+  int32_t number() const { return number_; }
+  void set_number(int32_t value) { number_ = value; _has_field_.set(3); }
+
+  bool has_label() const { return _has_field_[4]; }
+  FieldDescriptorProto_Label label() const { return label_; }
+  void set_label(FieldDescriptorProto_Label value) { label_ = value; _has_field_.set(4); }
+
+  bool has_type() const { return _has_field_[5]; }
+  FieldDescriptorProto_Type type() const { return type_; }
+  void set_type(FieldDescriptorProto_Type value) { type_ = value; _has_field_.set(5); }
+
+  bool has_type_name() const { return _has_field_[6]; }
+  const std::string& type_name() const { return type_name_; }
+  void set_type_name(const std::string& value) { type_name_ = value; _has_field_.set(6); }
+
+  bool has_extendee() const { return _has_field_[2]; }
+  const std::string& extendee() const { return extendee_; }
+  void set_extendee(const std::string& value) { extendee_ = value; _has_field_.set(2); }
+
+  bool has_default_value() const { return _has_field_[7]; }
+  const std::string& default_value() const { return default_value_; }
+  void set_default_value(const std::string& value) { default_value_ = value; _has_field_.set(7); }
+
+  bool has_oneof_index() const { return _has_field_[9]; }
+  int32_t oneof_index() const { return oneof_index_; }
+  void set_oneof_index(int32_t value) { oneof_index_ = value; _has_field_.set(9); }
+
+ private:
+  std::string name_{};
+  int32_t number_{};
+  FieldDescriptorProto_Label label_{};
+  FieldDescriptorProto_Type type_{};
+  std::string type_name_{};
+  std::string extendee_{};
+  std::string default_value_{};
+  int32_t oneof_index_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<10> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT DescriptorProto : public ::protozero::CppMessageObj {
+ public:
+  using ReservedRange = DescriptorProto_ReservedRange;
+  enum FieldNumbers {
+    kNameFieldNumber = 1,
+    kFieldFieldNumber = 2,
+    kExtensionFieldNumber = 6,
+    kNestedTypeFieldNumber = 3,
+    kEnumTypeFieldNumber = 4,
+    kOneofDeclFieldNumber = 8,
+    kReservedRangeFieldNumber = 9,
+    kReservedNameFieldNumber = 10,
+  };
+
+  DescriptorProto();
+  ~DescriptorProto() override;
+  DescriptorProto(DescriptorProto&&) noexcept;
+  DescriptorProto& operator=(DescriptorProto&&);
+  DescriptorProto(const DescriptorProto&);
+  DescriptorProto& operator=(const DescriptorProto&);
+  bool operator==(const DescriptorProto&) const;
+  bool operator!=(const DescriptorProto& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_name() const { return _has_field_[1]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(1); }
+
+  const std::vector<FieldDescriptorProto>& field() const { return field_; }
+  std::vector<FieldDescriptorProto>* mutable_field() { return &field_; }
+  int field_size() const;
+  void clear_field();
+  FieldDescriptorProto* add_field();
+
+  const std::vector<FieldDescriptorProto>& extension() const { return extension_; }
+  std::vector<FieldDescriptorProto>* mutable_extension() { return &extension_; }
+  int extension_size() const;
+  void clear_extension();
+  FieldDescriptorProto* add_extension();
+
+  const std::vector<DescriptorProto>& nested_type() const { return nested_type_; }
+  std::vector<DescriptorProto>* mutable_nested_type() { return &nested_type_; }
+  int nested_type_size() const;
+  void clear_nested_type();
+  DescriptorProto* add_nested_type();
+
+  const std::vector<EnumDescriptorProto>& enum_type() const { return enum_type_; }
+  std::vector<EnumDescriptorProto>* mutable_enum_type() { return &enum_type_; }
+  int enum_type_size() const;
+  void clear_enum_type();
+  EnumDescriptorProto* add_enum_type();
+
+  const std::vector<OneofDescriptorProto>& oneof_decl() const { return oneof_decl_; }
+  std::vector<OneofDescriptorProto>* mutable_oneof_decl() { return &oneof_decl_; }
+  int oneof_decl_size() const;
+  void clear_oneof_decl();
+  OneofDescriptorProto* add_oneof_decl();
+
+  const std::vector<DescriptorProto_ReservedRange>& reserved_range() const { return reserved_range_; }
+  std::vector<DescriptorProto_ReservedRange>* mutable_reserved_range() { return &reserved_range_; }
+  int reserved_range_size() const;
+  void clear_reserved_range();
+  DescriptorProto_ReservedRange* add_reserved_range();
+
+  const std::vector<std::string>& reserved_name() const { return reserved_name_; }
+  std::vector<std::string>* mutable_reserved_name() { return &reserved_name_; }
+  int reserved_name_size() const { return static_cast<int>(reserved_name_.size()); }
+  void clear_reserved_name() { reserved_name_.clear(); }
+  void add_reserved_name(std::string value) { reserved_name_.emplace_back(value); }
+  std::string* add_reserved_name() { reserved_name_.emplace_back(); return &reserved_name_.back(); }
+
+ private:
+  std::string name_{};
+  std::vector<FieldDescriptorProto> field_;
+  std::vector<FieldDescriptorProto> extension_;
+  std::vector<DescriptorProto> nested_type_;
+  std::vector<EnumDescriptorProto> enum_type_;
+  std::vector<OneofDescriptorProto> oneof_decl_;
+  std::vector<DescriptorProto_ReservedRange> reserved_range_;
+  std::vector<std::string> reserved_name_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<11> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT DescriptorProto_ReservedRange : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kStartFieldNumber = 1,
+    kEndFieldNumber = 2,
+  };
+
+  DescriptorProto_ReservedRange();
+  ~DescriptorProto_ReservedRange() override;
+  DescriptorProto_ReservedRange(DescriptorProto_ReservedRange&&) noexcept;
+  DescriptorProto_ReservedRange& operator=(DescriptorProto_ReservedRange&&);
+  DescriptorProto_ReservedRange(const DescriptorProto_ReservedRange&);
+  DescriptorProto_ReservedRange& operator=(const DescriptorProto_ReservedRange&);
+  bool operator==(const DescriptorProto_ReservedRange&) const;
+  bool operator!=(const DescriptorProto_ReservedRange& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_start() const { return _has_field_[1]; }
+  int32_t start() const { return start_; }
+  void set_start(int32_t value) { start_ = value; _has_field_.set(1); }
+
+  bool has_end() const { return _has_field_[2]; }
+  int32_t end() const { return end_; }
+  void set_end(int32_t value) { end_ = value; _has_field_.set(2); }
+
+ private:
+  int32_t start_{};
+  int32_t end_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT FileDescriptorProto : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kNameFieldNumber = 1,
+    kPackageFieldNumber = 2,
+    kDependencyFieldNumber = 3,
+    kPublicDependencyFieldNumber = 10,
+    kWeakDependencyFieldNumber = 11,
+    kMessageTypeFieldNumber = 4,
+    kEnumTypeFieldNumber = 5,
+    kExtensionFieldNumber = 7,
+  };
+
+  FileDescriptorProto();
+  ~FileDescriptorProto() override;
+  FileDescriptorProto(FileDescriptorProto&&) noexcept;
+  FileDescriptorProto& operator=(FileDescriptorProto&&);
+  FileDescriptorProto(const FileDescriptorProto&);
+  FileDescriptorProto& operator=(const FileDescriptorProto&);
+  bool operator==(const FileDescriptorProto&) const;
+  bool operator!=(const FileDescriptorProto& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_name() const { return _has_field_[1]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(1); }
+
+  bool has_package() const { return _has_field_[2]; }
+  const std::string& package() const { return package_; }
+  void set_package(const std::string& value) { package_ = value; _has_field_.set(2); }
+
+  const std::vector<std::string>& dependency() const { return dependency_; }
+  std::vector<std::string>* mutable_dependency() { return &dependency_; }
+  int dependency_size() const { return static_cast<int>(dependency_.size()); }
+  void clear_dependency() { dependency_.clear(); }
+  void add_dependency(std::string value) { dependency_.emplace_back(value); }
+  std::string* add_dependency() { dependency_.emplace_back(); return &dependency_.back(); }
+
+  const std::vector<int32_t>& public_dependency() const { return public_dependency_; }
+  std::vector<int32_t>* mutable_public_dependency() { return &public_dependency_; }
+  int public_dependency_size() const { return static_cast<int>(public_dependency_.size()); }
+  void clear_public_dependency() { public_dependency_.clear(); }
+  void add_public_dependency(int32_t value) { public_dependency_.emplace_back(value); }
+  int32_t* add_public_dependency() { public_dependency_.emplace_back(); return &public_dependency_.back(); }
+
+  const std::vector<int32_t>& weak_dependency() const { return weak_dependency_; }
+  std::vector<int32_t>* mutable_weak_dependency() { return &weak_dependency_; }
+  int weak_dependency_size() const { return static_cast<int>(weak_dependency_.size()); }
+  void clear_weak_dependency() { weak_dependency_.clear(); }
+  void add_weak_dependency(int32_t value) { weak_dependency_.emplace_back(value); }
+  int32_t* add_weak_dependency() { weak_dependency_.emplace_back(); return &weak_dependency_.back(); }
+
+  const std::vector<DescriptorProto>& message_type() const { return message_type_; }
+  std::vector<DescriptorProto>* mutable_message_type() { return &message_type_; }
+  int message_type_size() const;
+  void clear_message_type();
+  DescriptorProto* add_message_type();
+
+  const std::vector<EnumDescriptorProto>& enum_type() const { return enum_type_; }
+  std::vector<EnumDescriptorProto>* mutable_enum_type() { return &enum_type_; }
+  int enum_type_size() const;
+  void clear_enum_type();
+  EnumDescriptorProto* add_enum_type();
+
+  const std::vector<FieldDescriptorProto>& extension() const { return extension_; }
+  std::vector<FieldDescriptorProto>* mutable_extension() { return &extension_; }
+  int extension_size() const;
+  void clear_extension();
+  FieldDescriptorProto* add_extension();
+
+ private:
+  std::string name_{};
+  std::string package_{};
+  std::vector<std::string> dependency_;
+  std::vector<int32_t> public_dependency_;
+  std::vector<int32_t> weak_dependency_;
+  std::vector<DescriptorProto> message_type_;
+  std::vector<EnumDescriptorProto> enum_type_;
+  std::vector<FieldDescriptorProto> extension_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<12> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT FileDescriptorSet : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kFileFieldNumber = 1,
+  };
+
+  FileDescriptorSet();
+  ~FileDescriptorSet() override;
+  FileDescriptorSet(FileDescriptorSet&&) noexcept;
+  FileDescriptorSet& operator=(FileDescriptorSet&&);
+  FileDescriptorSet(const FileDescriptorSet&);
+  FileDescriptorSet& operator=(const FileDescriptorSet&);
+  bool operator==(const FileDescriptorSet&) const;
+  bool operator!=(const FileDescriptorSet& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  const std::vector<FileDescriptorProto>& file() const { return file_; }
+  std::vector<FileDescriptorProto>* mutable_file() { return &file_; }
+  int file_size() const;
+  void clear_file();
+  FileDescriptorProto* add_file();
+
+ private:
+  std::vector<FileDescriptorProto> file_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_DESCRIPTOR_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/common/gpu_counter_descriptor.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_GPU_COUNTER_DESCRIPTOR_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_GPU_COUNTER_DESCRIPTOR_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class GpuCounterDescriptor;
+class GpuCounterDescriptor_GpuCounterBlock;
+class GpuCounterDescriptor_GpuCounterSpec;
+enum GpuCounterDescriptor_GpuCounterGroup : int;
+enum GpuCounterDescriptor_MeasureUnit : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum GpuCounterDescriptor_GpuCounterGroup : int {
+  GpuCounterDescriptor_GpuCounterGroup_UNCLASSIFIED = 0,
+  GpuCounterDescriptor_GpuCounterGroup_SYSTEM = 1,
+  GpuCounterDescriptor_GpuCounterGroup_VERTICES = 2,
+  GpuCounterDescriptor_GpuCounterGroup_FRAGMENTS = 3,
+  GpuCounterDescriptor_GpuCounterGroup_PRIMITIVES = 4,
+  GpuCounterDescriptor_GpuCounterGroup_MEMORY = 5,
+  GpuCounterDescriptor_GpuCounterGroup_COMPUTE = 6,
+};
+enum GpuCounterDescriptor_MeasureUnit : int {
+  GpuCounterDescriptor_MeasureUnit_NONE = 0,
+  GpuCounterDescriptor_MeasureUnit_BIT = 1,
+  GpuCounterDescriptor_MeasureUnit_KILOBIT = 2,
+  GpuCounterDescriptor_MeasureUnit_MEGABIT = 3,
+  GpuCounterDescriptor_MeasureUnit_GIGABIT = 4,
+  GpuCounterDescriptor_MeasureUnit_TERABIT = 5,
+  GpuCounterDescriptor_MeasureUnit_PETABIT = 6,
+  GpuCounterDescriptor_MeasureUnit_BYTE = 7,
+  GpuCounterDescriptor_MeasureUnit_KILOBYTE = 8,
+  GpuCounterDescriptor_MeasureUnit_MEGABYTE = 9,
+  GpuCounterDescriptor_MeasureUnit_GIGABYTE = 10,
+  GpuCounterDescriptor_MeasureUnit_TERABYTE = 11,
+  GpuCounterDescriptor_MeasureUnit_PETABYTE = 12,
+  GpuCounterDescriptor_MeasureUnit_HERTZ = 13,
+  GpuCounterDescriptor_MeasureUnit_KILOHERTZ = 14,
+  GpuCounterDescriptor_MeasureUnit_MEGAHERTZ = 15,
+  GpuCounterDescriptor_MeasureUnit_GIGAHERTZ = 16,
+  GpuCounterDescriptor_MeasureUnit_TERAHERTZ = 17,
+  GpuCounterDescriptor_MeasureUnit_PETAHERTZ = 18,
+  GpuCounterDescriptor_MeasureUnit_NANOSECOND = 19,
+  GpuCounterDescriptor_MeasureUnit_MICROSECOND = 20,
+  GpuCounterDescriptor_MeasureUnit_MILLISECOND = 21,
+  GpuCounterDescriptor_MeasureUnit_SECOND = 22,
+  GpuCounterDescriptor_MeasureUnit_MINUTE = 23,
+  GpuCounterDescriptor_MeasureUnit_HOUR = 24,
+  GpuCounterDescriptor_MeasureUnit_VERTEX = 25,
+  GpuCounterDescriptor_MeasureUnit_PIXEL = 26,
+  GpuCounterDescriptor_MeasureUnit_TRIANGLE = 27,
+  GpuCounterDescriptor_MeasureUnit_PRIMITIVE = 38,
+  GpuCounterDescriptor_MeasureUnit_FRAGMENT = 39,
+  GpuCounterDescriptor_MeasureUnit_MILLIWATT = 28,
+  GpuCounterDescriptor_MeasureUnit_WATT = 29,
+  GpuCounterDescriptor_MeasureUnit_KILOWATT = 30,
+  GpuCounterDescriptor_MeasureUnit_JOULE = 31,
+  GpuCounterDescriptor_MeasureUnit_VOLT = 32,
+  GpuCounterDescriptor_MeasureUnit_AMPERE = 33,
+  GpuCounterDescriptor_MeasureUnit_CELSIUS = 34,
+  GpuCounterDescriptor_MeasureUnit_FAHRENHEIT = 35,
+  GpuCounterDescriptor_MeasureUnit_KELVIN = 36,
+  GpuCounterDescriptor_MeasureUnit_PERCENT = 37,
+  GpuCounterDescriptor_MeasureUnit_INSTRUCTION = 40,
+};
+
+class PERFETTO_EXPORT GpuCounterDescriptor : public ::protozero::CppMessageObj {
+ public:
+  using GpuCounterSpec = GpuCounterDescriptor_GpuCounterSpec;
+  using GpuCounterBlock = GpuCounterDescriptor_GpuCounterBlock;
+  using GpuCounterGroup = GpuCounterDescriptor_GpuCounterGroup;
+  static constexpr auto UNCLASSIFIED = GpuCounterDescriptor_GpuCounterGroup_UNCLASSIFIED;
+  static constexpr auto SYSTEM = GpuCounterDescriptor_GpuCounterGroup_SYSTEM;
+  static constexpr auto VERTICES = GpuCounterDescriptor_GpuCounterGroup_VERTICES;
+  static constexpr auto FRAGMENTS = GpuCounterDescriptor_GpuCounterGroup_FRAGMENTS;
+  static constexpr auto PRIMITIVES = GpuCounterDescriptor_GpuCounterGroup_PRIMITIVES;
+  static constexpr auto MEMORY = GpuCounterDescriptor_GpuCounterGroup_MEMORY;
+  static constexpr auto COMPUTE = GpuCounterDescriptor_GpuCounterGroup_COMPUTE;
+  static constexpr auto GpuCounterGroup_MIN = GpuCounterDescriptor_GpuCounterGroup_UNCLASSIFIED;
+  static constexpr auto GpuCounterGroup_MAX = GpuCounterDescriptor_GpuCounterGroup_COMPUTE;
+  using MeasureUnit = GpuCounterDescriptor_MeasureUnit;
+  static constexpr auto NONE = GpuCounterDescriptor_MeasureUnit_NONE;
+  static constexpr auto BIT = GpuCounterDescriptor_MeasureUnit_BIT;
+  static constexpr auto KILOBIT = GpuCounterDescriptor_MeasureUnit_KILOBIT;
+  static constexpr auto MEGABIT = GpuCounterDescriptor_MeasureUnit_MEGABIT;
+  static constexpr auto GIGABIT = GpuCounterDescriptor_MeasureUnit_GIGABIT;
+  static constexpr auto TERABIT = GpuCounterDescriptor_MeasureUnit_TERABIT;
+  static constexpr auto PETABIT = GpuCounterDescriptor_MeasureUnit_PETABIT;
+  static constexpr auto BYTE = GpuCounterDescriptor_MeasureUnit_BYTE;
+  static constexpr auto KILOBYTE = GpuCounterDescriptor_MeasureUnit_KILOBYTE;
+  static constexpr auto MEGABYTE = GpuCounterDescriptor_MeasureUnit_MEGABYTE;
+  static constexpr auto GIGABYTE = GpuCounterDescriptor_MeasureUnit_GIGABYTE;
+  static constexpr auto TERABYTE = GpuCounterDescriptor_MeasureUnit_TERABYTE;
+  static constexpr auto PETABYTE = GpuCounterDescriptor_MeasureUnit_PETABYTE;
+  static constexpr auto HERTZ = GpuCounterDescriptor_MeasureUnit_HERTZ;
+  static constexpr auto KILOHERTZ = GpuCounterDescriptor_MeasureUnit_KILOHERTZ;
+  static constexpr auto MEGAHERTZ = GpuCounterDescriptor_MeasureUnit_MEGAHERTZ;
+  static constexpr auto GIGAHERTZ = GpuCounterDescriptor_MeasureUnit_GIGAHERTZ;
+  static constexpr auto TERAHERTZ = GpuCounterDescriptor_MeasureUnit_TERAHERTZ;
+  static constexpr auto PETAHERTZ = GpuCounterDescriptor_MeasureUnit_PETAHERTZ;
+  static constexpr auto NANOSECOND = GpuCounterDescriptor_MeasureUnit_NANOSECOND;
+  static constexpr auto MICROSECOND = GpuCounterDescriptor_MeasureUnit_MICROSECOND;
+  static constexpr auto MILLISECOND = GpuCounterDescriptor_MeasureUnit_MILLISECOND;
+  static constexpr auto SECOND = GpuCounterDescriptor_MeasureUnit_SECOND;
+  static constexpr auto MINUTE = GpuCounterDescriptor_MeasureUnit_MINUTE;
+  static constexpr auto HOUR = GpuCounterDescriptor_MeasureUnit_HOUR;
+  static constexpr auto VERTEX = GpuCounterDescriptor_MeasureUnit_VERTEX;
+  static constexpr auto PIXEL = GpuCounterDescriptor_MeasureUnit_PIXEL;
+  static constexpr auto TRIANGLE = GpuCounterDescriptor_MeasureUnit_TRIANGLE;
+  static constexpr auto PRIMITIVE = GpuCounterDescriptor_MeasureUnit_PRIMITIVE;
+  static constexpr auto FRAGMENT = GpuCounterDescriptor_MeasureUnit_FRAGMENT;
+  static constexpr auto MILLIWATT = GpuCounterDescriptor_MeasureUnit_MILLIWATT;
+  static constexpr auto WATT = GpuCounterDescriptor_MeasureUnit_WATT;
+  static constexpr auto KILOWATT = GpuCounterDescriptor_MeasureUnit_KILOWATT;
+  static constexpr auto JOULE = GpuCounterDescriptor_MeasureUnit_JOULE;
+  static constexpr auto VOLT = GpuCounterDescriptor_MeasureUnit_VOLT;
+  static constexpr auto AMPERE = GpuCounterDescriptor_MeasureUnit_AMPERE;
+  static constexpr auto CELSIUS = GpuCounterDescriptor_MeasureUnit_CELSIUS;
+  static constexpr auto FAHRENHEIT = GpuCounterDescriptor_MeasureUnit_FAHRENHEIT;
+  static constexpr auto KELVIN = GpuCounterDescriptor_MeasureUnit_KELVIN;
+  static constexpr auto PERCENT = GpuCounterDescriptor_MeasureUnit_PERCENT;
+  static constexpr auto INSTRUCTION = GpuCounterDescriptor_MeasureUnit_INSTRUCTION;
+  static constexpr auto MeasureUnit_MIN = GpuCounterDescriptor_MeasureUnit_NONE;
+  static constexpr auto MeasureUnit_MAX = GpuCounterDescriptor_MeasureUnit_INSTRUCTION;
+  enum FieldNumbers {
+    kSpecsFieldNumber = 1,
+    kBlocksFieldNumber = 2,
+    kMinSamplingPeriodNsFieldNumber = 3,
+    kMaxSamplingPeriodNsFieldNumber = 4,
+    kSupportsInstrumentedSamplingFieldNumber = 5,
+  };
+
+  GpuCounterDescriptor();
+  ~GpuCounterDescriptor() override;
+  GpuCounterDescriptor(GpuCounterDescriptor&&) noexcept;
+  GpuCounterDescriptor& operator=(GpuCounterDescriptor&&);
+  GpuCounterDescriptor(const GpuCounterDescriptor&);
+  GpuCounterDescriptor& operator=(const GpuCounterDescriptor&);
+  bool operator==(const GpuCounterDescriptor&) const;
+  bool operator!=(const GpuCounterDescriptor& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  const std::vector<GpuCounterDescriptor_GpuCounterSpec>& specs() const { return specs_; }
+  std::vector<GpuCounterDescriptor_GpuCounterSpec>* mutable_specs() { return &specs_; }
+  int specs_size() const;
+  void clear_specs();
+  GpuCounterDescriptor_GpuCounterSpec* add_specs();
+
+  const std::vector<GpuCounterDescriptor_GpuCounterBlock>& blocks() const { return blocks_; }
+  std::vector<GpuCounterDescriptor_GpuCounterBlock>* mutable_blocks() { return &blocks_; }
+  int blocks_size() const;
+  void clear_blocks();
+  GpuCounterDescriptor_GpuCounterBlock* add_blocks();
+
+  bool has_min_sampling_period_ns() const { return _has_field_[3]; }
+  uint64_t min_sampling_period_ns() const { return min_sampling_period_ns_; }
+  void set_min_sampling_period_ns(uint64_t value) { min_sampling_period_ns_ = value; _has_field_.set(3); }
+
+  bool has_max_sampling_period_ns() const { return _has_field_[4]; }
+  uint64_t max_sampling_period_ns() const { return max_sampling_period_ns_; }
+  void set_max_sampling_period_ns(uint64_t value) { max_sampling_period_ns_ = value; _has_field_.set(4); }
+
+  bool has_supports_instrumented_sampling() const { return _has_field_[5]; }
+  bool supports_instrumented_sampling() const { return supports_instrumented_sampling_; }
+  void set_supports_instrumented_sampling(bool value) { supports_instrumented_sampling_ = value; _has_field_.set(5); }
+
+ private:
+  std::vector<GpuCounterDescriptor_GpuCounterSpec> specs_;
+  std::vector<GpuCounterDescriptor_GpuCounterBlock> blocks_;
+  uint64_t min_sampling_period_ns_{};
+  uint64_t max_sampling_period_ns_{};
+  bool supports_instrumented_sampling_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<6> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT GpuCounterDescriptor_GpuCounterBlock : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kBlockIdFieldNumber = 1,
+    kBlockCapacityFieldNumber = 2,
+    kNameFieldNumber = 3,
+    kDescriptionFieldNumber = 4,
+    kCounterIdsFieldNumber = 5,
+  };
+
+  GpuCounterDescriptor_GpuCounterBlock();
+  ~GpuCounterDescriptor_GpuCounterBlock() override;
+  GpuCounterDescriptor_GpuCounterBlock(GpuCounterDescriptor_GpuCounterBlock&&) noexcept;
+  GpuCounterDescriptor_GpuCounterBlock& operator=(GpuCounterDescriptor_GpuCounterBlock&&);
+  GpuCounterDescriptor_GpuCounterBlock(const GpuCounterDescriptor_GpuCounterBlock&);
+  GpuCounterDescriptor_GpuCounterBlock& operator=(const GpuCounterDescriptor_GpuCounterBlock&);
+  bool operator==(const GpuCounterDescriptor_GpuCounterBlock&) const;
+  bool operator!=(const GpuCounterDescriptor_GpuCounterBlock& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_block_id() const { return _has_field_[1]; }
+  uint32_t block_id() const { return block_id_; }
+  void set_block_id(uint32_t value) { block_id_ = value; _has_field_.set(1); }
+
+  bool has_block_capacity() const { return _has_field_[2]; }
+  uint32_t block_capacity() const { return block_capacity_; }
+  void set_block_capacity(uint32_t value) { block_capacity_ = value; _has_field_.set(2); }
+
+  bool has_name() const { return _has_field_[3]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(3); }
+
+  bool has_description() const { return _has_field_[4]; }
+  const std::string& description() const { return description_; }
+  void set_description(const std::string& value) { description_ = value; _has_field_.set(4); }
+
+  const std::vector<uint32_t>& counter_ids() const { return counter_ids_; }
+  std::vector<uint32_t>* mutable_counter_ids() { return &counter_ids_; }
+  int counter_ids_size() const { return static_cast<int>(counter_ids_.size()); }
+  void clear_counter_ids() { counter_ids_.clear(); }
+  void add_counter_ids(uint32_t value) { counter_ids_.emplace_back(value); }
+  uint32_t* add_counter_ids() { counter_ids_.emplace_back(); return &counter_ids_.back(); }
+
+ private:
+  uint32_t block_id_{};
+  uint32_t block_capacity_{};
+  std::string name_{};
+  std::string description_{};
+  std::vector<uint32_t> counter_ids_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<6> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT GpuCounterDescriptor_GpuCounterSpec : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kCounterIdFieldNumber = 1,
+    kNameFieldNumber = 2,
+    kDescriptionFieldNumber = 3,
+    kIntPeakValueFieldNumber = 5,
+    kDoublePeakValueFieldNumber = 6,
+    kNumeratorUnitsFieldNumber = 7,
+    kDenominatorUnitsFieldNumber = 8,
+    kSelectByDefaultFieldNumber = 9,
+    kGroupsFieldNumber = 10,
+  };
+
+  GpuCounterDescriptor_GpuCounterSpec();
+  ~GpuCounterDescriptor_GpuCounterSpec() override;
+  GpuCounterDescriptor_GpuCounterSpec(GpuCounterDescriptor_GpuCounterSpec&&) noexcept;
+  GpuCounterDescriptor_GpuCounterSpec& operator=(GpuCounterDescriptor_GpuCounterSpec&&);
+  GpuCounterDescriptor_GpuCounterSpec(const GpuCounterDescriptor_GpuCounterSpec&);
+  GpuCounterDescriptor_GpuCounterSpec& operator=(const GpuCounterDescriptor_GpuCounterSpec&);
+  bool operator==(const GpuCounterDescriptor_GpuCounterSpec&) const;
+  bool operator!=(const GpuCounterDescriptor_GpuCounterSpec& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_counter_id() const { return _has_field_[1]; }
+  uint32_t counter_id() const { return counter_id_; }
+  void set_counter_id(uint32_t value) { counter_id_ = value; _has_field_.set(1); }
+
+  bool has_name() const { return _has_field_[2]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(2); }
+
+  bool has_description() const { return _has_field_[3]; }
+  const std::string& description() const { return description_; }
+  void set_description(const std::string& value) { description_ = value; _has_field_.set(3); }
+
+  bool has_int_peak_value() const { return _has_field_[5]; }
+  int64_t int_peak_value() const { return int_peak_value_; }
+  void set_int_peak_value(int64_t value) { int_peak_value_ = value; _has_field_.set(5); }
+
+  bool has_double_peak_value() const { return _has_field_[6]; }
+  double double_peak_value() const { return double_peak_value_; }
+  void set_double_peak_value(double value) { double_peak_value_ = value; _has_field_.set(6); }
+
+  const std::vector<GpuCounterDescriptor_MeasureUnit>& numerator_units() const { return numerator_units_; }
+  std::vector<GpuCounterDescriptor_MeasureUnit>* mutable_numerator_units() { return &numerator_units_; }
+  int numerator_units_size() const { return static_cast<int>(numerator_units_.size()); }
+  void clear_numerator_units() { numerator_units_.clear(); }
+  void add_numerator_units(GpuCounterDescriptor_MeasureUnit value) { numerator_units_.emplace_back(value); }
+  GpuCounterDescriptor_MeasureUnit* add_numerator_units() { numerator_units_.emplace_back(); return &numerator_units_.back(); }
+
+  const std::vector<GpuCounterDescriptor_MeasureUnit>& denominator_units() const { return denominator_units_; }
+  std::vector<GpuCounterDescriptor_MeasureUnit>* mutable_denominator_units() { return &denominator_units_; }
+  int denominator_units_size() const { return static_cast<int>(denominator_units_.size()); }
+  void clear_denominator_units() { denominator_units_.clear(); }
+  void add_denominator_units(GpuCounterDescriptor_MeasureUnit value) { denominator_units_.emplace_back(value); }
+  GpuCounterDescriptor_MeasureUnit* add_denominator_units() { denominator_units_.emplace_back(); return &denominator_units_.back(); }
+
+  bool has_select_by_default() const { return _has_field_[9]; }
+  bool select_by_default() const { return select_by_default_; }
+  void set_select_by_default(bool value) { select_by_default_ = value; _has_field_.set(9); }
+
+  const std::vector<GpuCounterDescriptor_GpuCounterGroup>& groups() const { return groups_; }
+  std::vector<GpuCounterDescriptor_GpuCounterGroup>* mutable_groups() { return &groups_; }
+  int groups_size() const { return static_cast<int>(groups_.size()); }
+  void clear_groups() { groups_.clear(); }
+  void add_groups(GpuCounterDescriptor_GpuCounterGroup value) { groups_.emplace_back(value); }
+  GpuCounterDescriptor_GpuCounterGroup* add_groups() { groups_.emplace_back(); return &groups_.back(); }
+
+ private:
+  uint32_t counter_id_{};
+  std::string name_{};
+  std::string description_{};
+  int64_t int_peak_value_{};
+  double double_peak_value_{};
+  std::vector<GpuCounterDescriptor_MeasureUnit> numerator_units_;
+  std::vector<GpuCounterDescriptor_MeasureUnit> denominator_units_;
+  bool select_by_default_{};
+  std::vector<GpuCounterDescriptor_GpuCounterGroup> groups_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<11> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_GPU_COUNTER_DESCRIPTOR_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/common/interceptor_descriptor.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_INTERCEPTOR_DESCRIPTOR_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_INTERCEPTOR_DESCRIPTOR_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class InterceptorDescriptor;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT InterceptorDescriptor : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kNameFieldNumber = 1,
+  };
+
+  InterceptorDescriptor();
+  ~InterceptorDescriptor() override;
+  InterceptorDescriptor(InterceptorDescriptor&&) noexcept;
+  InterceptorDescriptor& operator=(InterceptorDescriptor&&);
+  InterceptorDescriptor(const InterceptorDescriptor&);
+  InterceptorDescriptor& operator=(const InterceptorDescriptor&);
+  bool operator==(const InterceptorDescriptor&) const;
+  bool operator!=(const InterceptorDescriptor& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_name() const { return _has_field_[1]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(1); }
+
+ private:
+  std::string name_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_INTERCEPTOR_DESCRIPTOR_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/common/observable_events.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_OBSERVABLE_EVENTS_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_OBSERVABLE_EVENTS_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class ObservableEvents;
+class ObservableEvents_DataSourceInstanceStateChange;
+enum ObservableEvents_Type : int;
+enum ObservableEvents_DataSourceInstanceState : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum ObservableEvents_Type : int {
+  ObservableEvents_Type_TYPE_UNSPECIFIED = 0,
+  ObservableEvents_Type_TYPE_DATA_SOURCES_INSTANCES = 1,
+  ObservableEvents_Type_TYPE_ALL_DATA_SOURCES_STARTED = 2,
+};
+enum ObservableEvents_DataSourceInstanceState : int {
+  ObservableEvents_DataSourceInstanceState_DATA_SOURCE_INSTANCE_STATE_STOPPED = 1,
+  ObservableEvents_DataSourceInstanceState_DATA_SOURCE_INSTANCE_STATE_STARTED = 2,
+};
+
+class PERFETTO_EXPORT ObservableEvents : public ::protozero::CppMessageObj {
+ public:
+  using DataSourceInstanceStateChange = ObservableEvents_DataSourceInstanceStateChange;
+  using Type = ObservableEvents_Type;
+  static constexpr auto TYPE_UNSPECIFIED = ObservableEvents_Type_TYPE_UNSPECIFIED;
+  static constexpr auto TYPE_DATA_SOURCES_INSTANCES = ObservableEvents_Type_TYPE_DATA_SOURCES_INSTANCES;
+  static constexpr auto TYPE_ALL_DATA_SOURCES_STARTED = ObservableEvents_Type_TYPE_ALL_DATA_SOURCES_STARTED;
+  static constexpr auto Type_MIN = ObservableEvents_Type_TYPE_UNSPECIFIED;
+  static constexpr auto Type_MAX = ObservableEvents_Type_TYPE_ALL_DATA_SOURCES_STARTED;
+  using DataSourceInstanceState = ObservableEvents_DataSourceInstanceState;
+  static constexpr auto DATA_SOURCE_INSTANCE_STATE_STOPPED = ObservableEvents_DataSourceInstanceState_DATA_SOURCE_INSTANCE_STATE_STOPPED;
+  static constexpr auto DATA_SOURCE_INSTANCE_STATE_STARTED = ObservableEvents_DataSourceInstanceState_DATA_SOURCE_INSTANCE_STATE_STARTED;
+  static constexpr auto DataSourceInstanceState_MIN = ObservableEvents_DataSourceInstanceState_DATA_SOURCE_INSTANCE_STATE_STOPPED;
+  static constexpr auto DataSourceInstanceState_MAX = ObservableEvents_DataSourceInstanceState_DATA_SOURCE_INSTANCE_STATE_STARTED;
+  enum FieldNumbers {
+    kInstanceStateChangesFieldNumber = 1,
+    kAllDataSourcesStartedFieldNumber = 2,
+  };
+
+  ObservableEvents();
+  ~ObservableEvents() override;
+  ObservableEvents(ObservableEvents&&) noexcept;
+  ObservableEvents& operator=(ObservableEvents&&);
+  ObservableEvents(const ObservableEvents&);
+  ObservableEvents& operator=(const ObservableEvents&);
+  bool operator==(const ObservableEvents&) const;
+  bool operator!=(const ObservableEvents& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  const std::vector<ObservableEvents_DataSourceInstanceStateChange>& instance_state_changes() const { return instance_state_changes_; }
+  std::vector<ObservableEvents_DataSourceInstanceStateChange>* mutable_instance_state_changes() { return &instance_state_changes_; }
+  int instance_state_changes_size() const;
+  void clear_instance_state_changes();
+  ObservableEvents_DataSourceInstanceStateChange* add_instance_state_changes();
+
+  bool has_all_data_sources_started() const { return _has_field_[2]; }
+  bool all_data_sources_started() const { return all_data_sources_started_; }
+  void set_all_data_sources_started(bool value) { all_data_sources_started_ = value; _has_field_.set(2); }
+
+ private:
+  std::vector<ObservableEvents_DataSourceInstanceStateChange> instance_state_changes_;
+  bool all_data_sources_started_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT ObservableEvents_DataSourceInstanceStateChange : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kProducerNameFieldNumber = 1,
+    kDataSourceNameFieldNumber = 2,
+    kStateFieldNumber = 3,
+  };
+
+  ObservableEvents_DataSourceInstanceStateChange();
+  ~ObservableEvents_DataSourceInstanceStateChange() override;
+  ObservableEvents_DataSourceInstanceStateChange(ObservableEvents_DataSourceInstanceStateChange&&) noexcept;
+  ObservableEvents_DataSourceInstanceStateChange& operator=(ObservableEvents_DataSourceInstanceStateChange&&);
+  ObservableEvents_DataSourceInstanceStateChange(const ObservableEvents_DataSourceInstanceStateChange&);
+  ObservableEvents_DataSourceInstanceStateChange& operator=(const ObservableEvents_DataSourceInstanceStateChange&);
+  bool operator==(const ObservableEvents_DataSourceInstanceStateChange&) const;
+  bool operator!=(const ObservableEvents_DataSourceInstanceStateChange& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_producer_name() const { return _has_field_[1]; }
+  const std::string& producer_name() const { return producer_name_; }
+  void set_producer_name(const std::string& value) { producer_name_ = value; _has_field_.set(1); }
+
+  bool has_data_source_name() const { return _has_field_[2]; }
+  const std::string& data_source_name() const { return data_source_name_; }
+  void set_data_source_name(const std::string& value) { data_source_name_ = value; _has_field_.set(2); }
+
+  bool has_state() const { return _has_field_[3]; }
+  ObservableEvents_DataSourceInstanceState state() const { return state_; }
+  void set_state(ObservableEvents_DataSourceInstanceState value) { state_ = value; _has_field_.set(3); }
+
+ private:
+  std::string producer_name_{};
+  std::string data_source_name_{};
+  ObservableEvents_DataSourceInstanceState state_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<4> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_OBSERVABLE_EVENTS_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/common/perf_events.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_PERF_EVENTS_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_PERF_EVENTS_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class PerfEvents;
+class PerfEvents_Tracepoint;
+class PerfEvents_Timebase;
+enum PerfEvents_Counter : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum PerfEvents_Counter : int {
+  PerfEvents_Counter_UNKNOWN_COUNTER = 0,
+  PerfEvents_Counter_SW_CPU_CLOCK = 1,
+  PerfEvents_Counter_SW_PAGE_FAULTS = 2,
+  PerfEvents_Counter_HW_CPU_CYCLES = 10,
+  PerfEvents_Counter_HW_INSTRUCTIONS = 11,
+};
+
+class PERFETTO_EXPORT PerfEvents : public ::protozero::CppMessageObj {
+ public:
+  using Timebase = PerfEvents_Timebase;
+  using Tracepoint = PerfEvents_Tracepoint;
+  using Counter = PerfEvents_Counter;
+  static constexpr auto UNKNOWN_COUNTER = PerfEvents_Counter_UNKNOWN_COUNTER;
+  static constexpr auto SW_CPU_CLOCK = PerfEvents_Counter_SW_CPU_CLOCK;
+  static constexpr auto SW_PAGE_FAULTS = PerfEvents_Counter_SW_PAGE_FAULTS;
+  static constexpr auto HW_CPU_CYCLES = PerfEvents_Counter_HW_CPU_CYCLES;
+  static constexpr auto HW_INSTRUCTIONS = PerfEvents_Counter_HW_INSTRUCTIONS;
+  static constexpr auto Counter_MIN = PerfEvents_Counter_UNKNOWN_COUNTER;
+  static constexpr auto Counter_MAX = PerfEvents_Counter_HW_INSTRUCTIONS;
+  enum FieldNumbers {
+  };
+
+  PerfEvents();
+  ~PerfEvents() override;
+  PerfEvents(PerfEvents&&) noexcept;
+  PerfEvents& operator=(PerfEvents&&);
+  PerfEvents(const PerfEvents&);
+  PerfEvents& operator=(const PerfEvents&);
+  bool operator==(const PerfEvents&) const;
+  bool operator!=(const PerfEvents& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+ private:
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT PerfEvents_Tracepoint : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kNameFieldNumber = 1,
+    kFilterFieldNumber = 2,
+  };
+
+  PerfEvents_Tracepoint();
+  ~PerfEvents_Tracepoint() override;
+  PerfEvents_Tracepoint(PerfEvents_Tracepoint&&) noexcept;
+  PerfEvents_Tracepoint& operator=(PerfEvents_Tracepoint&&);
+  PerfEvents_Tracepoint(const PerfEvents_Tracepoint&);
+  PerfEvents_Tracepoint& operator=(const PerfEvents_Tracepoint&);
+  bool operator==(const PerfEvents_Tracepoint&) const;
+  bool operator!=(const PerfEvents_Tracepoint& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_name() const { return _has_field_[1]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(1); }
+
+  bool has_filter() const { return _has_field_[2]; }
+  const std::string& filter() const { return filter_; }
+  void set_filter(const std::string& value) { filter_ = value; _has_field_.set(2); }
+
+ private:
+  std::string name_{};
+  std::string filter_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT PerfEvents_Timebase : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kFrequencyFieldNumber = 2,
+    kPeriodFieldNumber = 1,
+    kCounterFieldNumber = 4,
+    kTracepointFieldNumber = 3,
+  };
+
+  PerfEvents_Timebase();
+  ~PerfEvents_Timebase() override;
+  PerfEvents_Timebase(PerfEvents_Timebase&&) noexcept;
+  PerfEvents_Timebase& operator=(PerfEvents_Timebase&&);
+  PerfEvents_Timebase(const PerfEvents_Timebase&);
+  PerfEvents_Timebase& operator=(const PerfEvents_Timebase&);
+  bool operator==(const PerfEvents_Timebase&) const;
+  bool operator!=(const PerfEvents_Timebase& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_frequency() const { return _has_field_[2]; }
+  uint64_t frequency() const { return frequency_; }
+  void set_frequency(uint64_t value) { frequency_ = value; _has_field_.set(2); }
+
+  bool has_period() const { return _has_field_[1]; }
+  uint64_t period() const { return period_; }
+  void set_period(uint64_t value) { period_ = value; _has_field_.set(1); }
+
+  bool has_counter() const { return _has_field_[4]; }
+  PerfEvents_Counter counter() const { return counter_; }
+  void set_counter(PerfEvents_Counter value) { counter_ = value; _has_field_.set(4); }
+
+  bool has_tracepoint() const { return _has_field_[3]; }
+  const PerfEvents_Tracepoint& tracepoint() const { return *tracepoint_; }
+  PerfEvents_Tracepoint* mutable_tracepoint() { _has_field_.set(3); return tracepoint_.get(); }
+
+ private:
+  uint64_t frequency_{};
+  uint64_t period_{};
+  PerfEvents_Counter counter_{};
+  ::protozero::CopyablePtr<PerfEvents_Tracepoint> tracepoint_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<5> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_PERF_EVENTS_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/common/sys_stats_counters.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_SYS_STATS_COUNTERS_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_SYS_STATS_COUNTERS_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum MeminfoCounters : int;
+enum VmstatCounters : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum MeminfoCounters : int {
+  MEMINFO_UNSPECIFIED = 0,
+  MEMINFO_MEM_TOTAL = 1,
+  MEMINFO_MEM_FREE = 2,
+  MEMINFO_MEM_AVAILABLE = 3,
+  MEMINFO_BUFFERS = 4,
+  MEMINFO_CACHED = 5,
+  MEMINFO_SWAP_CACHED = 6,
+  MEMINFO_ACTIVE = 7,
+  MEMINFO_INACTIVE = 8,
+  MEMINFO_ACTIVE_ANON = 9,
+  MEMINFO_INACTIVE_ANON = 10,
+  MEMINFO_ACTIVE_FILE = 11,
+  MEMINFO_INACTIVE_FILE = 12,
+  MEMINFO_UNEVICTABLE = 13,
+  MEMINFO_MLOCKED = 14,
+  MEMINFO_SWAP_TOTAL = 15,
+  MEMINFO_SWAP_FREE = 16,
+  MEMINFO_DIRTY = 17,
+  MEMINFO_WRITEBACK = 18,
+  MEMINFO_ANON_PAGES = 19,
+  MEMINFO_MAPPED = 20,
+  MEMINFO_SHMEM = 21,
+  MEMINFO_SLAB = 22,
+  MEMINFO_SLAB_RECLAIMABLE = 23,
+  MEMINFO_SLAB_UNRECLAIMABLE = 24,
+  MEMINFO_KERNEL_STACK = 25,
+  MEMINFO_PAGE_TABLES = 26,
+  MEMINFO_COMMIT_LIMIT = 27,
+  MEMINFO_COMMITED_AS = 28,
+  MEMINFO_VMALLOC_TOTAL = 29,
+  MEMINFO_VMALLOC_USED = 30,
+  MEMINFO_VMALLOC_CHUNK = 31,
+  MEMINFO_CMA_TOTAL = 32,
+  MEMINFO_CMA_FREE = 33,
+};
+enum VmstatCounters : int {
+  VMSTAT_UNSPECIFIED = 0,
+  VMSTAT_NR_FREE_PAGES = 1,
+  VMSTAT_NR_ALLOC_BATCH = 2,
+  VMSTAT_NR_INACTIVE_ANON = 3,
+  VMSTAT_NR_ACTIVE_ANON = 4,
+  VMSTAT_NR_INACTIVE_FILE = 5,
+  VMSTAT_NR_ACTIVE_FILE = 6,
+  VMSTAT_NR_UNEVICTABLE = 7,
+  VMSTAT_NR_MLOCK = 8,
+  VMSTAT_NR_ANON_PAGES = 9,
+  VMSTAT_NR_MAPPED = 10,
+  VMSTAT_NR_FILE_PAGES = 11,
+  VMSTAT_NR_DIRTY = 12,
+  VMSTAT_NR_WRITEBACK = 13,
+  VMSTAT_NR_SLAB_RECLAIMABLE = 14,
+  VMSTAT_NR_SLAB_UNRECLAIMABLE = 15,
+  VMSTAT_NR_PAGE_TABLE_PAGES = 16,
+  VMSTAT_NR_KERNEL_STACK = 17,
+  VMSTAT_NR_OVERHEAD = 18,
+  VMSTAT_NR_UNSTABLE = 19,
+  VMSTAT_NR_BOUNCE = 20,
+  VMSTAT_NR_VMSCAN_WRITE = 21,
+  VMSTAT_NR_VMSCAN_IMMEDIATE_RECLAIM = 22,
+  VMSTAT_NR_WRITEBACK_TEMP = 23,
+  VMSTAT_NR_ISOLATED_ANON = 24,
+  VMSTAT_NR_ISOLATED_FILE = 25,
+  VMSTAT_NR_SHMEM = 26,
+  VMSTAT_NR_DIRTIED = 27,
+  VMSTAT_NR_WRITTEN = 28,
+  VMSTAT_NR_PAGES_SCANNED = 29,
+  VMSTAT_WORKINGSET_REFAULT = 30,
+  VMSTAT_WORKINGSET_ACTIVATE = 31,
+  VMSTAT_WORKINGSET_NODERECLAIM = 32,
+  VMSTAT_NR_ANON_TRANSPARENT_HUGEPAGES = 33,
+  VMSTAT_NR_FREE_CMA = 34,
+  VMSTAT_NR_SWAPCACHE = 35,
+  VMSTAT_NR_DIRTY_THRESHOLD = 36,
+  VMSTAT_NR_DIRTY_BACKGROUND_THRESHOLD = 37,
+  VMSTAT_PGPGIN = 38,
+  VMSTAT_PGPGOUT = 39,
+  VMSTAT_PGPGOUTCLEAN = 40,
+  VMSTAT_PSWPIN = 41,
+  VMSTAT_PSWPOUT = 42,
+  VMSTAT_PGALLOC_DMA = 43,
+  VMSTAT_PGALLOC_NORMAL = 44,
+  VMSTAT_PGALLOC_MOVABLE = 45,
+  VMSTAT_PGFREE = 46,
+  VMSTAT_PGACTIVATE = 47,
+  VMSTAT_PGDEACTIVATE = 48,
+  VMSTAT_PGFAULT = 49,
+  VMSTAT_PGMAJFAULT = 50,
+  VMSTAT_PGREFILL_DMA = 51,
+  VMSTAT_PGREFILL_NORMAL = 52,
+  VMSTAT_PGREFILL_MOVABLE = 53,
+  VMSTAT_PGSTEAL_KSWAPD_DMA = 54,
+  VMSTAT_PGSTEAL_KSWAPD_NORMAL = 55,
+  VMSTAT_PGSTEAL_KSWAPD_MOVABLE = 56,
+  VMSTAT_PGSTEAL_DIRECT_DMA = 57,
+  VMSTAT_PGSTEAL_DIRECT_NORMAL = 58,
+  VMSTAT_PGSTEAL_DIRECT_MOVABLE = 59,
+  VMSTAT_PGSCAN_KSWAPD_DMA = 60,
+  VMSTAT_PGSCAN_KSWAPD_NORMAL = 61,
+  VMSTAT_PGSCAN_KSWAPD_MOVABLE = 62,
+  VMSTAT_PGSCAN_DIRECT_DMA = 63,
+  VMSTAT_PGSCAN_DIRECT_NORMAL = 64,
+  VMSTAT_PGSCAN_DIRECT_MOVABLE = 65,
+  VMSTAT_PGSCAN_DIRECT_THROTTLE = 66,
+  VMSTAT_PGINODESTEAL = 67,
+  VMSTAT_SLABS_SCANNED = 68,
+  VMSTAT_KSWAPD_INODESTEAL = 69,
+  VMSTAT_KSWAPD_LOW_WMARK_HIT_QUICKLY = 70,
+  VMSTAT_KSWAPD_HIGH_WMARK_HIT_QUICKLY = 71,
+  VMSTAT_PAGEOUTRUN = 72,
+  VMSTAT_ALLOCSTALL = 73,
+  VMSTAT_PGROTATED = 74,
+  VMSTAT_DROP_PAGECACHE = 75,
+  VMSTAT_DROP_SLAB = 76,
+  VMSTAT_PGMIGRATE_SUCCESS = 77,
+  VMSTAT_PGMIGRATE_FAIL = 78,
+  VMSTAT_COMPACT_MIGRATE_SCANNED = 79,
+  VMSTAT_COMPACT_FREE_SCANNED = 80,
+  VMSTAT_COMPACT_ISOLATED = 81,
+  VMSTAT_COMPACT_STALL = 82,
+  VMSTAT_COMPACT_FAIL = 83,
+  VMSTAT_COMPACT_SUCCESS = 84,
+  VMSTAT_COMPACT_DAEMON_WAKE = 85,
+  VMSTAT_UNEVICTABLE_PGS_CULLED = 86,
+  VMSTAT_UNEVICTABLE_PGS_SCANNED = 87,
+  VMSTAT_UNEVICTABLE_PGS_RESCUED = 88,
+  VMSTAT_UNEVICTABLE_PGS_MLOCKED = 89,
+  VMSTAT_UNEVICTABLE_PGS_MUNLOCKED = 90,
+  VMSTAT_UNEVICTABLE_PGS_CLEARED = 91,
+  VMSTAT_UNEVICTABLE_PGS_STRANDED = 92,
+  VMSTAT_NR_ZSPAGES = 93,
+  VMSTAT_NR_ION_HEAP = 94,
+  VMSTAT_NR_GPU_HEAP = 95,
+  VMSTAT_ALLOCSTALL_DMA = 96,
+  VMSTAT_ALLOCSTALL_MOVABLE = 97,
+  VMSTAT_ALLOCSTALL_NORMAL = 98,
+  VMSTAT_COMPACT_DAEMON_FREE_SCANNED = 99,
+  VMSTAT_COMPACT_DAEMON_MIGRATE_SCANNED = 100,
+  VMSTAT_NR_FASTRPC = 101,
+  VMSTAT_NR_INDIRECTLY_RECLAIMABLE = 102,
+  VMSTAT_NR_ION_HEAP_POOL = 103,
+  VMSTAT_NR_KERNEL_MISC_RECLAIMABLE = 104,
+  VMSTAT_NR_SHADOW_CALL_STACK_BYTES = 105,
+  VMSTAT_NR_SHMEM_HUGEPAGES = 106,
+  VMSTAT_NR_SHMEM_PMDMAPPED = 107,
+  VMSTAT_NR_UNRECLAIMABLE_PAGES = 108,
+  VMSTAT_NR_ZONE_ACTIVE_ANON = 109,
+  VMSTAT_NR_ZONE_ACTIVE_FILE = 110,
+  VMSTAT_NR_ZONE_INACTIVE_ANON = 111,
+  VMSTAT_NR_ZONE_INACTIVE_FILE = 112,
+  VMSTAT_NR_ZONE_UNEVICTABLE = 113,
+  VMSTAT_NR_ZONE_WRITE_PENDING = 114,
+  VMSTAT_OOM_KILL = 115,
+  VMSTAT_PGLAZYFREE = 116,
+  VMSTAT_PGLAZYFREED = 117,
+  VMSTAT_PGREFILL = 118,
+  VMSTAT_PGSCAN_DIRECT = 119,
+  VMSTAT_PGSCAN_KSWAPD = 120,
+  VMSTAT_PGSKIP_DMA = 121,
+  VMSTAT_PGSKIP_MOVABLE = 122,
+  VMSTAT_PGSKIP_NORMAL = 123,
+  VMSTAT_PGSTEAL_DIRECT = 124,
+  VMSTAT_PGSTEAL_KSWAPD = 125,
+  VMSTAT_SWAP_RA = 126,
+  VMSTAT_SWAP_RA_HIT = 127,
+  VMSTAT_WORKINGSET_RESTORE = 128,
+};
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_SYS_STATS_COUNTERS_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/common/trace_stats.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_TRACE_STATS_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_TRACE_STATS_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class TraceStats;
+class TraceStats_FilterStats;
+class TraceStats_BufferStats;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT TraceStats : public ::protozero::CppMessageObj {
+ public:
+  using BufferStats = TraceStats_BufferStats;
+  using FilterStats = TraceStats_FilterStats;
+  enum FieldNumbers {
+    kBufferStatsFieldNumber = 1,
+    kProducersConnectedFieldNumber = 2,
+    kProducersSeenFieldNumber = 3,
+    kDataSourcesRegisteredFieldNumber = 4,
+    kDataSourcesSeenFieldNumber = 5,
+    kTracingSessionsFieldNumber = 6,
+    kTotalBuffersFieldNumber = 7,
+    kChunksDiscardedFieldNumber = 8,
+    kPatchesDiscardedFieldNumber = 9,
+    kInvalidPacketsFieldNumber = 10,
+    kFilterStatsFieldNumber = 11,
+  };
+
+  TraceStats();
+  ~TraceStats() override;
+  TraceStats(TraceStats&&) noexcept;
+  TraceStats& operator=(TraceStats&&);
+  TraceStats(const TraceStats&);
+  TraceStats& operator=(const TraceStats&);
+  bool operator==(const TraceStats&) const;
+  bool operator!=(const TraceStats& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  const std::vector<TraceStats_BufferStats>& buffer_stats() const { return buffer_stats_; }
+  std::vector<TraceStats_BufferStats>* mutable_buffer_stats() { return &buffer_stats_; }
+  int buffer_stats_size() const;
+  void clear_buffer_stats();
+  TraceStats_BufferStats* add_buffer_stats();
+
+  bool has_producers_connected() const { return _has_field_[2]; }
+  uint32_t producers_connected() const { return producers_connected_; }
+  void set_producers_connected(uint32_t value) { producers_connected_ = value; _has_field_.set(2); }
+
+  bool has_producers_seen() const { return _has_field_[3]; }
+  uint64_t producers_seen() const { return producers_seen_; }
+  void set_producers_seen(uint64_t value) { producers_seen_ = value; _has_field_.set(3); }
+
+  bool has_data_sources_registered() const { return _has_field_[4]; }
+  uint32_t data_sources_registered() const { return data_sources_registered_; }
+  void set_data_sources_registered(uint32_t value) { data_sources_registered_ = value; _has_field_.set(4); }
+
+  bool has_data_sources_seen() const { return _has_field_[5]; }
+  uint64_t data_sources_seen() const { return data_sources_seen_; }
+  void set_data_sources_seen(uint64_t value) { data_sources_seen_ = value; _has_field_.set(5); }
+
+  bool has_tracing_sessions() const { return _has_field_[6]; }
+  uint32_t tracing_sessions() const { return tracing_sessions_; }
+  void set_tracing_sessions(uint32_t value) { tracing_sessions_ = value; _has_field_.set(6); }
+
+  bool has_total_buffers() const { return _has_field_[7]; }
+  uint32_t total_buffers() const { return total_buffers_; }
+  void set_total_buffers(uint32_t value) { total_buffers_ = value; _has_field_.set(7); }
+
+  bool has_chunks_discarded() const { return _has_field_[8]; }
+  uint64_t chunks_discarded() const { return chunks_discarded_; }
+  void set_chunks_discarded(uint64_t value) { chunks_discarded_ = value; _has_field_.set(8); }
+
+  bool has_patches_discarded() const { return _has_field_[9]; }
+  uint64_t patches_discarded() const { return patches_discarded_; }
+  void set_patches_discarded(uint64_t value) { patches_discarded_ = value; _has_field_.set(9); }
+
+  bool has_invalid_packets() const { return _has_field_[10]; }
+  uint64_t invalid_packets() const { return invalid_packets_; }
+  void set_invalid_packets(uint64_t value) { invalid_packets_ = value; _has_field_.set(10); }
+
+  bool has_filter_stats() const { return _has_field_[11]; }
+  const TraceStats_FilterStats& filter_stats() const { return *filter_stats_; }
+  TraceStats_FilterStats* mutable_filter_stats() { _has_field_.set(11); return filter_stats_.get(); }
+
+ private:
+  std::vector<TraceStats_BufferStats> buffer_stats_;
+  uint32_t producers_connected_{};
+  uint64_t producers_seen_{};
+  uint32_t data_sources_registered_{};
+  uint64_t data_sources_seen_{};
+  uint32_t tracing_sessions_{};
+  uint32_t total_buffers_{};
+  uint64_t chunks_discarded_{};
+  uint64_t patches_discarded_{};
+  uint64_t invalid_packets_{};
+  ::protozero::CopyablePtr<TraceStats_FilterStats> filter_stats_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<12> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT TraceStats_FilterStats : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kInputPacketsFieldNumber = 1,
+    kInputBytesFieldNumber = 2,
+    kOutputBytesFieldNumber = 3,
+    kErrorsFieldNumber = 4,
+  };
+
+  TraceStats_FilterStats();
+  ~TraceStats_FilterStats() override;
+  TraceStats_FilterStats(TraceStats_FilterStats&&) noexcept;
+  TraceStats_FilterStats& operator=(TraceStats_FilterStats&&);
+  TraceStats_FilterStats(const TraceStats_FilterStats&);
+  TraceStats_FilterStats& operator=(const TraceStats_FilterStats&);
+  bool operator==(const TraceStats_FilterStats&) const;
+  bool operator!=(const TraceStats_FilterStats& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_input_packets() const { return _has_field_[1]; }
+  uint64_t input_packets() const { return input_packets_; }
+  void set_input_packets(uint64_t value) { input_packets_ = value; _has_field_.set(1); }
+
+  bool has_input_bytes() const { return _has_field_[2]; }
+  uint64_t input_bytes() const { return input_bytes_; }
+  void set_input_bytes(uint64_t value) { input_bytes_ = value; _has_field_.set(2); }
+
+  bool has_output_bytes() const { return _has_field_[3]; }
+  uint64_t output_bytes() const { return output_bytes_; }
+  void set_output_bytes(uint64_t value) { output_bytes_ = value; _has_field_.set(3); }
+
+  bool has_errors() const { return _has_field_[4]; }
+  uint64_t errors() const { return errors_; }
+  void set_errors(uint64_t value) { errors_ = value; _has_field_.set(4); }
+
+ private:
+  uint64_t input_packets_{};
+  uint64_t input_bytes_{};
+  uint64_t output_bytes_{};
+  uint64_t errors_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<5> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT TraceStats_BufferStats : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kBufferSizeFieldNumber = 12,
+    kBytesWrittenFieldNumber = 1,
+    kBytesOverwrittenFieldNumber = 13,
+    kBytesReadFieldNumber = 14,
+    kPaddingBytesWrittenFieldNumber = 15,
+    kPaddingBytesClearedFieldNumber = 16,
+    kChunksWrittenFieldNumber = 2,
+    kChunksRewrittenFieldNumber = 10,
+    kChunksOverwrittenFieldNumber = 3,
+    kChunksDiscardedFieldNumber = 18,
+    kChunksReadFieldNumber = 17,
+    kChunksCommittedOutOfOrderFieldNumber = 11,
+    kWriteWrapCountFieldNumber = 4,
+    kPatchesSucceededFieldNumber = 5,
+    kPatchesFailedFieldNumber = 6,
+    kReadaheadsSucceededFieldNumber = 7,
+    kReadaheadsFailedFieldNumber = 8,
+    kAbiViolationsFieldNumber = 9,
+    kTraceWriterPacketLossFieldNumber = 19,
+  };
+
+  TraceStats_BufferStats();
+  ~TraceStats_BufferStats() override;
+  TraceStats_BufferStats(TraceStats_BufferStats&&) noexcept;
+  TraceStats_BufferStats& operator=(TraceStats_BufferStats&&);
+  TraceStats_BufferStats(const TraceStats_BufferStats&);
+  TraceStats_BufferStats& operator=(const TraceStats_BufferStats&);
+  bool operator==(const TraceStats_BufferStats&) const;
+  bool operator!=(const TraceStats_BufferStats& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_buffer_size() const { return _has_field_[12]; }
+  uint64_t buffer_size() const { return buffer_size_; }
+  void set_buffer_size(uint64_t value) { buffer_size_ = value; _has_field_.set(12); }
+
+  bool has_bytes_written() const { return _has_field_[1]; }
+  uint64_t bytes_written() const { return bytes_written_; }
+  void set_bytes_written(uint64_t value) { bytes_written_ = value; _has_field_.set(1); }
+
+  bool has_bytes_overwritten() const { return _has_field_[13]; }
+  uint64_t bytes_overwritten() const { return bytes_overwritten_; }
+  void set_bytes_overwritten(uint64_t value) { bytes_overwritten_ = value; _has_field_.set(13); }
+
+  bool has_bytes_read() const { return _has_field_[14]; }
+  uint64_t bytes_read() const { return bytes_read_; }
+  void set_bytes_read(uint64_t value) { bytes_read_ = value; _has_field_.set(14); }
+
+  bool has_padding_bytes_written() const { return _has_field_[15]; }
+  uint64_t padding_bytes_written() const { return padding_bytes_written_; }
+  void set_padding_bytes_written(uint64_t value) { padding_bytes_written_ = value; _has_field_.set(15); }
+
+  bool has_padding_bytes_cleared() const { return _has_field_[16]; }
+  uint64_t padding_bytes_cleared() const { return padding_bytes_cleared_; }
+  void set_padding_bytes_cleared(uint64_t value) { padding_bytes_cleared_ = value; _has_field_.set(16); }
+
+  bool has_chunks_written() const { return _has_field_[2]; }
+  uint64_t chunks_written() const { return chunks_written_; }
+  void set_chunks_written(uint64_t value) { chunks_written_ = value; _has_field_.set(2); }
+
+  bool has_chunks_rewritten() const { return _has_field_[10]; }
+  uint64_t chunks_rewritten() const { return chunks_rewritten_; }
+  void set_chunks_rewritten(uint64_t value) { chunks_rewritten_ = value; _has_field_.set(10); }
+
+  bool has_chunks_overwritten() const { return _has_field_[3]; }
+  uint64_t chunks_overwritten() const { return chunks_overwritten_; }
+  void set_chunks_overwritten(uint64_t value) { chunks_overwritten_ = value; _has_field_.set(3); }
+
+  bool has_chunks_discarded() const { return _has_field_[18]; }
+  uint64_t chunks_discarded() const { return chunks_discarded_; }
+  void set_chunks_discarded(uint64_t value) { chunks_discarded_ = value; _has_field_.set(18); }
+
+  bool has_chunks_read() const { return _has_field_[17]; }
+  uint64_t chunks_read() const { return chunks_read_; }
+  void set_chunks_read(uint64_t value) { chunks_read_ = value; _has_field_.set(17); }
+
+  bool has_chunks_committed_out_of_order() const { return _has_field_[11]; }
+  uint64_t chunks_committed_out_of_order() const { return chunks_committed_out_of_order_; }
+  void set_chunks_committed_out_of_order(uint64_t value) { chunks_committed_out_of_order_ = value; _has_field_.set(11); }
+
+  bool has_write_wrap_count() const { return _has_field_[4]; }
+  uint64_t write_wrap_count() const { return write_wrap_count_; }
+  void set_write_wrap_count(uint64_t value) { write_wrap_count_ = value; _has_field_.set(4); }
+
+  bool has_patches_succeeded() const { return _has_field_[5]; }
+  uint64_t patches_succeeded() const { return patches_succeeded_; }
+  void set_patches_succeeded(uint64_t value) { patches_succeeded_ = value; _has_field_.set(5); }
+
+  bool has_patches_failed() const { return _has_field_[6]; }
+  uint64_t patches_failed() const { return patches_failed_; }
+  void set_patches_failed(uint64_t value) { patches_failed_ = value; _has_field_.set(6); }
+
+  bool has_readaheads_succeeded() const { return _has_field_[7]; }
+  uint64_t readaheads_succeeded() const { return readaheads_succeeded_; }
+  void set_readaheads_succeeded(uint64_t value) { readaheads_succeeded_ = value; _has_field_.set(7); }
+
+  bool has_readaheads_failed() const { return _has_field_[8]; }
+  uint64_t readaheads_failed() const { return readaheads_failed_; }
+  void set_readaheads_failed(uint64_t value) { readaheads_failed_ = value; _has_field_.set(8); }
+
+  bool has_abi_violations() const { return _has_field_[9]; }
+  uint64_t abi_violations() const { return abi_violations_; }
+  void set_abi_violations(uint64_t value) { abi_violations_ = value; _has_field_.set(9); }
+
+  bool has_trace_writer_packet_loss() const { return _has_field_[19]; }
+  uint64_t trace_writer_packet_loss() const { return trace_writer_packet_loss_; }
+  void set_trace_writer_packet_loss(uint64_t value) { trace_writer_packet_loss_ = value; _has_field_.set(19); }
+
+ private:
+  uint64_t buffer_size_{};
+  uint64_t bytes_written_{};
+  uint64_t bytes_overwritten_{};
+  uint64_t bytes_read_{};
+  uint64_t padding_bytes_written_{};
+  uint64_t padding_bytes_cleared_{};
+  uint64_t chunks_written_{};
+  uint64_t chunks_rewritten_{};
+  uint64_t chunks_overwritten_{};
+  uint64_t chunks_discarded_{};
+  uint64_t chunks_read_{};
+  uint64_t chunks_committed_out_of_order_{};
+  uint64_t write_wrap_count_{};
+  uint64_t patches_succeeded_{};
+  uint64_t patches_failed_{};
+  uint64_t readaheads_succeeded_{};
+  uint64_t readaheads_failed_{};
+  uint64_t abi_violations_{};
+  uint64_t trace_writer_packet_loss_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<20> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_TRACE_STATS_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/common/tracing_service_capabilities.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_TRACING_SERVICE_CAPABILITIES_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_TRACING_SERVICE_CAPABILITIES_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class TracingServiceCapabilities;
+enum ObservableEvents_Type : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT TracingServiceCapabilities : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kHasQueryCapabilitiesFieldNumber = 1,
+    kObservableEventsFieldNumber = 2,
+    kHasTraceConfigOutputPathFieldNumber = 3,
+  };
+
+  TracingServiceCapabilities();
+  ~TracingServiceCapabilities() override;
+  TracingServiceCapabilities(TracingServiceCapabilities&&) noexcept;
+  TracingServiceCapabilities& operator=(TracingServiceCapabilities&&);
+  TracingServiceCapabilities(const TracingServiceCapabilities&);
+  TracingServiceCapabilities& operator=(const TracingServiceCapabilities&);
+  bool operator==(const TracingServiceCapabilities&) const;
+  bool operator!=(const TracingServiceCapabilities& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_has_query_capabilities() const { return _has_field_[1]; }
+  bool has_query_capabilities() const { return has_query_capabilities_; }
+  void set_has_query_capabilities(bool value) { has_query_capabilities_ = value; _has_field_.set(1); }
+
+  const std::vector<ObservableEvents_Type>& observable_events() const { return observable_events_; }
+  std::vector<ObservableEvents_Type>* mutable_observable_events() { return &observable_events_; }
+  int observable_events_size() const { return static_cast<int>(observable_events_.size()); }
+  void clear_observable_events() { observable_events_.clear(); }
+  void add_observable_events(ObservableEvents_Type value) { observable_events_.emplace_back(value); }
+  ObservableEvents_Type* add_observable_events() { observable_events_.emplace_back(); return &observable_events_.back(); }
+
+  bool has_has_trace_config_output_path() const { return _has_field_[3]; }
+  bool has_trace_config_output_path() const { return has_trace_config_output_path_; }
+  void set_has_trace_config_output_path(bool value) { has_trace_config_output_path_ = value; _has_field_.set(3); }
+
+ private:
+  bool has_query_capabilities_{};
+  std::vector<ObservableEvents_Type> observable_events_;
+  bool has_trace_config_output_path_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<4> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_TRACING_SERVICE_CAPABILITIES_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/common/tracing_service_state.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_TRACING_SERVICE_STATE_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_TRACING_SERVICE_STATE_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class TracingServiceState;
+class TracingServiceState_DataSource;
+class DataSourceDescriptor;
+class TracingServiceState_Producer;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT TracingServiceState : public ::protozero::CppMessageObj {
+ public:
+  using Producer = TracingServiceState_Producer;
+  using DataSource = TracingServiceState_DataSource;
+  enum FieldNumbers {
+    kProducersFieldNumber = 1,
+    kDataSourcesFieldNumber = 2,
+    kNumSessionsFieldNumber = 3,
+    kNumSessionsStartedFieldNumber = 4,
+    kTracingServiceVersionFieldNumber = 5,
+  };
+
+  TracingServiceState();
+  ~TracingServiceState() override;
+  TracingServiceState(TracingServiceState&&) noexcept;
+  TracingServiceState& operator=(TracingServiceState&&);
+  TracingServiceState(const TracingServiceState&);
+  TracingServiceState& operator=(const TracingServiceState&);
+  bool operator==(const TracingServiceState&) const;
+  bool operator!=(const TracingServiceState& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  const std::vector<TracingServiceState_Producer>& producers() const { return producers_; }
+  std::vector<TracingServiceState_Producer>* mutable_producers() { return &producers_; }
+  int producers_size() const;
+  void clear_producers();
+  TracingServiceState_Producer* add_producers();
+
+  const std::vector<TracingServiceState_DataSource>& data_sources() const { return data_sources_; }
+  std::vector<TracingServiceState_DataSource>* mutable_data_sources() { return &data_sources_; }
+  int data_sources_size() const;
+  void clear_data_sources();
+  TracingServiceState_DataSource* add_data_sources();
+
+  bool has_num_sessions() const { return _has_field_[3]; }
+  int32_t num_sessions() const { return num_sessions_; }
+  void set_num_sessions(int32_t value) { num_sessions_ = value; _has_field_.set(3); }
+
+  bool has_num_sessions_started() const { return _has_field_[4]; }
+  int32_t num_sessions_started() const { return num_sessions_started_; }
+  void set_num_sessions_started(int32_t value) { num_sessions_started_ = value; _has_field_.set(4); }
+
+  bool has_tracing_service_version() const { return _has_field_[5]; }
+  const std::string& tracing_service_version() const { return tracing_service_version_; }
+  void set_tracing_service_version(const std::string& value) { tracing_service_version_ = value; _has_field_.set(5); }
+
+ private:
+  std::vector<TracingServiceState_Producer> producers_;
+  std::vector<TracingServiceState_DataSource> data_sources_;
+  int32_t num_sessions_{};
+  int32_t num_sessions_started_{};
+  std::string tracing_service_version_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<6> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT TracingServiceState_DataSource : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kDsDescriptorFieldNumber = 1,
+    kProducerIdFieldNumber = 2,
+  };
+
+  TracingServiceState_DataSource();
+  ~TracingServiceState_DataSource() override;
+  TracingServiceState_DataSource(TracingServiceState_DataSource&&) noexcept;
+  TracingServiceState_DataSource& operator=(TracingServiceState_DataSource&&);
+  TracingServiceState_DataSource(const TracingServiceState_DataSource&);
+  TracingServiceState_DataSource& operator=(const TracingServiceState_DataSource&);
+  bool operator==(const TracingServiceState_DataSource&) const;
+  bool operator!=(const TracingServiceState_DataSource& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_ds_descriptor() const { return _has_field_[1]; }
+  const DataSourceDescriptor& ds_descriptor() const { return *ds_descriptor_; }
+  DataSourceDescriptor* mutable_ds_descriptor() { _has_field_.set(1); return ds_descriptor_.get(); }
+
+  bool has_producer_id() const { return _has_field_[2]; }
+  int32_t producer_id() const { return producer_id_; }
+  void set_producer_id(int32_t value) { producer_id_ = value; _has_field_.set(2); }
+
+ private:
+  ::protozero::CopyablePtr<DataSourceDescriptor> ds_descriptor_;
+  int32_t producer_id_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT TracingServiceState_Producer : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kIdFieldNumber = 1,
+    kNameFieldNumber = 2,
+    kUidFieldNumber = 3,
+    kSdkVersionFieldNumber = 4,
+  };
+
+  TracingServiceState_Producer();
+  ~TracingServiceState_Producer() override;
+  TracingServiceState_Producer(TracingServiceState_Producer&&) noexcept;
+  TracingServiceState_Producer& operator=(TracingServiceState_Producer&&);
+  TracingServiceState_Producer(const TracingServiceState_Producer&);
+  TracingServiceState_Producer& operator=(const TracingServiceState_Producer&);
+  bool operator==(const TracingServiceState_Producer&) const;
+  bool operator!=(const TracingServiceState_Producer& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_id() const { return _has_field_[1]; }
+  int32_t id() const { return id_; }
+  void set_id(int32_t value) { id_ = value; _has_field_.set(1); }
+
+  bool has_name() const { return _has_field_[2]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(2); }
+
+  bool has_uid() const { return _has_field_[3]; }
+  int32_t uid() const { return uid_; }
+  void set_uid(int32_t value) { uid_ = value; _has_field_.set(3); }
+
+  bool has_sdk_version() const { return _has_field_[4]; }
+  const std::string& sdk_version() const { return sdk_version_; }
+  void set_sdk_version(const std::string& value) { sdk_version_ = value; _has_field_.set(4); }
+
+ private:
+  int32_t id_{};
+  std::string name_{};
+  int32_t uid_{};
+  std::string sdk_version_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<5> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_TRACING_SERVICE_STATE_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/common/track_event_descriptor.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_TRACK_EVENT_DESCRIPTOR_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_TRACK_EVENT_DESCRIPTOR_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class TrackEventDescriptor;
+class TrackEventCategory;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT TrackEventDescriptor : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kAvailableCategoriesFieldNumber = 1,
+  };
+
+  TrackEventDescriptor();
+  ~TrackEventDescriptor() override;
+  TrackEventDescriptor(TrackEventDescriptor&&) noexcept;
+  TrackEventDescriptor& operator=(TrackEventDescriptor&&);
+  TrackEventDescriptor(const TrackEventDescriptor&);
+  TrackEventDescriptor& operator=(const TrackEventDescriptor&);
+  bool operator==(const TrackEventDescriptor&) const;
+  bool operator!=(const TrackEventDescriptor& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  const std::vector<TrackEventCategory>& available_categories() const { return available_categories_; }
+  std::vector<TrackEventCategory>* mutable_available_categories() { return &available_categories_; }
+  int available_categories_size() const;
+  void clear_available_categories();
+  TrackEventCategory* add_available_categories();
+
+ private:
+  std::vector<TrackEventCategory> available_categories_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT TrackEventCategory : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kNameFieldNumber = 1,
+    kDescriptionFieldNumber = 2,
+    kTagsFieldNumber = 3,
+  };
+
+  TrackEventCategory();
+  ~TrackEventCategory() override;
+  TrackEventCategory(TrackEventCategory&&) noexcept;
+  TrackEventCategory& operator=(TrackEventCategory&&);
+  TrackEventCategory(const TrackEventCategory&);
+  TrackEventCategory& operator=(const TrackEventCategory&);
+  bool operator==(const TrackEventCategory&) const;
+  bool operator!=(const TrackEventCategory& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_name() const { return _has_field_[1]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(1); }
+
+  bool has_description() const { return _has_field_[2]; }
+  const std::string& description() const { return description_; }
+  void set_description(const std::string& value) { description_ = value; _has_field_.set(2); }
+
+  const std::vector<std::string>& tags() const { return tags_; }
+  std::vector<std::string>* mutable_tags() { return &tags_; }
+  int tags_size() const { return static_cast<int>(tags_.size()); }
+  void clear_tags() { tags_.clear(); }
+  void add_tags(std::string value) { tags_.emplace_back(value); }
+  std::string* add_tags() { tags_.emplace_back(); return &tags_.back(); }
+
+ private:
+  std::string name_{};
+  std::string description_{};
+  std::vector<std::string> tags_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<4> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_TRACK_EVENT_DESCRIPTOR_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/common/android_energy_consumer_descriptor.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_ANDROID_ENERGY_CONSUMER_DESCRIPTOR_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_ANDROID_ENERGY_CONSUMER_DESCRIPTOR_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class AndroidEnergyConsumer;
+
+class AndroidEnergyConsumerDescriptor_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  AndroidEnergyConsumerDescriptor_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit AndroidEnergyConsumerDescriptor_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit AndroidEnergyConsumerDescriptor_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_energy_consumers() const { return at<1>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> energy_consumers() const { return GetRepeated<::protozero::ConstBytes>(1); }
+};
+
+class AndroidEnergyConsumerDescriptor : public ::protozero::Message {
+ public:
+  using Decoder = AndroidEnergyConsumerDescriptor_Decoder;
+  enum : int32_t {
+    kEnergyConsumersFieldNumber = 1,
+  };
+
+  using FieldMetadata_EnergyConsumers =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      AndroidEnergyConsumer,
+      AndroidEnergyConsumerDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_EnergyConsumers kEnergyConsumers() { return {}; }
+  template <typename T = AndroidEnergyConsumer> T* add_energy_consumers() {
+    return BeginNestedMessage<T>(1);
+  }
+
+};
+
+class AndroidEnergyConsumer_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  AndroidEnergyConsumer_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit AndroidEnergyConsumer_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit AndroidEnergyConsumer_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_energy_consumer_id() const { return at<1>().valid(); }
+  int32_t energy_consumer_id() const { return at<1>().as_int32(); }
+  bool has_ordinal() const { return at<2>().valid(); }
+  int32_t ordinal() const { return at<2>().as_int32(); }
+  bool has_type() const { return at<3>().valid(); }
+  ::protozero::ConstChars type() const { return at<3>().as_string(); }
+  bool has_name() const { return at<4>().valid(); }
+  ::protozero::ConstChars name() const { return at<4>().as_string(); }
+};
+
+class AndroidEnergyConsumer : public ::protozero::Message {
+ public:
+  using Decoder = AndroidEnergyConsumer_Decoder;
+  enum : int32_t {
+    kEnergyConsumerIdFieldNumber = 1,
+    kOrdinalFieldNumber = 2,
+    kTypeFieldNumber = 3,
+    kNameFieldNumber = 4,
+  };
+
+  using FieldMetadata_EnergyConsumerId =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      AndroidEnergyConsumer>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_EnergyConsumerId kEnergyConsumerId() { return {}; }
+  void set_energy_consumer_id(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_EnergyConsumerId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ordinal =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      AndroidEnergyConsumer>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ordinal kOrdinal() { return {}; }
+  void set_ordinal(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ordinal::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Type =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      AndroidEnergyConsumer>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Type kType() { return {}; }
+  void set_type(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Type::kFieldId, data, size);
+  }
+  void set_type(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Type::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      AndroidEnergyConsumer>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/common/android_log_constants.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_ANDROID_LOG_CONSTANTS_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_ANDROID_LOG_CONSTANTS_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+enum AndroidLogId : int32_t {
+  LID_DEFAULT = 0,
+  LID_RADIO = 1,
+  LID_EVENTS = 2,
+  LID_SYSTEM = 3,
+  LID_CRASH = 4,
+  LID_STATS = 5,
+  LID_SECURITY = 6,
+  LID_KERNEL = 7,
+};
+
+const AndroidLogId AndroidLogId_MIN = LID_DEFAULT;
+const AndroidLogId AndroidLogId_MAX = LID_KERNEL;
+
+enum AndroidLogPriority : int32_t {
+  PRIO_UNSPECIFIED = 0,
+  PRIO_UNUSED = 1,
+  PRIO_VERBOSE = 2,
+  PRIO_DEBUG = 3,
+  PRIO_INFO = 4,
+  PRIO_WARN = 5,
+  PRIO_ERROR = 6,
+  PRIO_FATAL = 7,
+};
+
+const AndroidLogPriority AndroidLogPriority_MIN = PRIO_UNSPECIFIED;
+const AndroidLogPriority AndroidLogPriority_MAX = PRIO_FATAL;
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/common/builtin_clock.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_BUILTIN_CLOCK_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_BUILTIN_CLOCK_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+enum BuiltinClock : int32_t {
+  BUILTIN_CLOCK_UNKNOWN = 0,
+  BUILTIN_CLOCK_REALTIME = 1,
+  BUILTIN_CLOCK_REALTIME_COARSE = 2,
+  BUILTIN_CLOCK_MONOTONIC = 3,
+  BUILTIN_CLOCK_MONOTONIC_COARSE = 4,
+  BUILTIN_CLOCK_MONOTONIC_RAW = 5,
+  BUILTIN_CLOCK_BOOTTIME = 6,
+  BUILTIN_CLOCK_MAX_ID = 63,
+};
+
+const BuiltinClock BuiltinClock_MIN = BUILTIN_CLOCK_UNKNOWN;
+const BuiltinClock BuiltinClock_MAX = BUILTIN_CLOCK_MAX_ID;
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/common/commit_data_request.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_COMMIT_DATA_REQUEST_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_COMMIT_DATA_REQUEST_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class CommitDataRequest_ChunkToPatch;
+class CommitDataRequest_ChunkToPatch_Patch;
+class CommitDataRequest_ChunksToMove;
+
+class CommitDataRequest_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  CommitDataRequest_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit CommitDataRequest_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit CommitDataRequest_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_chunks_to_move() const { return at<1>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> chunks_to_move() const { return GetRepeated<::protozero::ConstBytes>(1); }
+  bool has_chunks_to_patch() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> chunks_to_patch() const { return GetRepeated<::protozero::ConstBytes>(2); }
+  bool has_flush_request_id() const { return at<3>().valid(); }
+  uint64_t flush_request_id() const { return at<3>().as_uint64(); }
+};
+
+class CommitDataRequest : public ::protozero::Message {
+ public:
+  using Decoder = CommitDataRequest_Decoder;
+  enum : int32_t {
+    kChunksToMoveFieldNumber = 1,
+    kChunksToPatchFieldNumber = 2,
+    kFlushRequestIdFieldNumber = 3,
+  };
+  using ChunksToMove = ::perfetto::protos::pbzero::CommitDataRequest_ChunksToMove;
+  using ChunkToPatch = ::perfetto::protos::pbzero::CommitDataRequest_ChunkToPatch;
+
+  using FieldMetadata_ChunksToMove =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      CommitDataRequest_ChunksToMove,
+      CommitDataRequest>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChunksToMove kChunksToMove() { return {}; }
+  template <typename T = CommitDataRequest_ChunksToMove> T* add_chunks_to_move() {
+    return BeginNestedMessage<T>(1);
+  }
+
+
+  using FieldMetadata_ChunksToPatch =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      CommitDataRequest_ChunkToPatch,
+      CommitDataRequest>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChunksToPatch kChunksToPatch() { return {}; }
+  template <typename T = CommitDataRequest_ChunkToPatch> T* add_chunks_to_patch() {
+    return BeginNestedMessage<T>(2);
+  }
+
+
+  using FieldMetadata_FlushRequestId =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      CommitDataRequest>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FlushRequestId kFlushRequestId() { return {}; }
+  void set_flush_request_id(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_FlushRequestId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class CommitDataRequest_ChunkToPatch_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  CommitDataRequest_ChunkToPatch_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit CommitDataRequest_ChunkToPatch_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit CommitDataRequest_ChunkToPatch_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_target_buffer() const { return at<1>().valid(); }
+  uint32_t target_buffer() const { return at<1>().as_uint32(); }
+  bool has_writer_id() const { return at<2>().valid(); }
+  uint32_t writer_id() const { return at<2>().as_uint32(); }
+  bool has_chunk_id() const { return at<3>().valid(); }
+  uint32_t chunk_id() const { return at<3>().as_uint32(); }
+  bool has_patches() const { return at<4>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> patches() const { return GetRepeated<::protozero::ConstBytes>(4); }
+  bool has_has_more_patches() const { return at<5>().valid(); }
+  bool has_more_patches() const { return at<5>().as_bool(); }
+};
+
+class CommitDataRequest_ChunkToPatch : public ::protozero::Message {
+ public:
+  using Decoder = CommitDataRequest_ChunkToPatch_Decoder;
+  enum : int32_t {
+    kTargetBufferFieldNumber = 1,
+    kWriterIdFieldNumber = 2,
+    kChunkIdFieldNumber = 3,
+    kPatchesFieldNumber = 4,
+    kHasMorePatchesFieldNumber = 5,
+  };
+  using Patch = ::perfetto::protos::pbzero::CommitDataRequest_ChunkToPatch_Patch;
+
+  using FieldMetadata_TargetBuffer =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      CommitDataRequest_ChunkToPatch>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TargetBuffer kTargetBuffer() { return {}; }
+  void set_target_buffer(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TargetBuffer::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_WriterId =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      CommitDataRequest_ChunkToPatch>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_WriterId kWriterId() { return {}; }
+  void set_writer_id(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_WriterId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ChunkId =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      CommitDataRequest_ChunkToPatch>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChunkId kChunkId() { return {}; }
+  void set_chunk_id(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ChunkId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Patches =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      CommitDataRequest_ChunkToPatch_Patch,
+      CommitDataRequest_ChunkToPatch>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Patches kPatches() { return {}; }
+  template <typename T = CommitDataRequest_ChunkToPatch_Patch> T* add_patches() {
+    return BeginNestedMessage<T>(4);
+  }
+
+
+  using FieldMetadata_HasMorePatches =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      CommitDataRequest_ChunkToPatch>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_HasMorePatches kHasMorePatches() { return {}; }
+  void set_has_more_patches(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_HasMorePatches::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class CommitDataRequest_ChunkToPatch_Patch_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  CommitDataRequest_ChunkToPatch_Patch_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit CommitDataRequest_ChunkToPatch_Patch_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit CommitDataRequest_ChunkToPatch_Patch_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_offset() const { return at<1>().valid(); }
+  uint32_t offset() const { return at<1>().as_uint32(); }
+  bool has_data() const { return at<2>().valid(); }
+  ::protozero::ConstBytes data() const { return at<2>().as_bytes(); }
+};
+
+class CommitDataRequest_ChunkToPatch_Patch : public ::protozero::Message {
+ public:
+  using Decoder = CommitDataRequest_ChunkToPatch_Patch_Decoder;
+  enum : int32_t {
+    kOffsetFieldNumber = 1,
+    kDataFieldNumber = 2,
+  };
+
+  using FieldMetadata_Offset =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      CommitDataRequest_ChunkToPatch_Patch>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Offset kOffset() { return {}; }
+  void set_offset(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Offset::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Data =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBytes,
+      std::string,
+      CommitDataRequest_ChunkToPatch_Patch>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Data kData() { return {}; }
+  void set_data(const uint8_t* data, size_t size) {
+    AppendBytes(FieldMetadata_Data::kFieldId, data, size);
+  }
+  void set_data(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Data::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBytes>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class CommitDataRequest_ChunksToMove_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  CommitDataRequest_ChunksToMove_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit CommitDataRequest_ChunksToMove_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit CommitDataRequest_ChunksToMove_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_page() const { return at<1>().valid(); }
+  uint32_t page() const { return at<1>().as_uint32(); }
+  bool has_chunk() const { return at<2>().valid(); }
+  uint32_t chunk() const { return at<2>().as_uint32(); }
+  bool has_target_buffer() const { return at<3>().valid(); }
+  uint32_t target_buffer() const { return at<3>().as_uint32(); }
+};
+
+class CommitDataRequest_ChunksToMove : public ::protozero::Message {
+ public:
+  using Decoder = CommitDataRequest_ChunksToMove_Decoder;
+  enum : int32_t {
+    kPageFieldNumber = 1,
+    kChunkFieldNumber = 2,
+    kTargetBufferFieldNumber = 3,
+  };
+
+  using FieldMetadata_Page =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      CommitDataRequest_ChunksToMove>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Page kPage() { return {}; }
+  void set_page(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Page::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Chunk =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      CommitDataRequest_ChunksToMove>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Chunk kChunk() { return {}; }
+  void set_chunk(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Chunk::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TargetBuffer =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      CommitDataRequest_ChunksToMove>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TargetBuffer kTargetBuffer() { return {}; }
+  void set_target_buffer(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TargetBuffer::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/common/data_source_descriptor.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_DATA_SOURCE_DESCRIPTOR_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_DATA_SOURCE_DESCRIPTOR_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class GpuCounterDescriptor;
+class TrackEventDescriptor;
+
+class DataSourceDescriptor_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/6, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  DataSourceDescriptor_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit DataSourceDescriptor_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit DataSourceDescriptor_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars name() const { return at<1>().as_string(); }
+  bool has_will_notify_on_stop() const { return at<2>().valid(); }
+  bool will_notify_on_stop() const { return at<2>().as_bool(); }
+  bool has_will_notify_on_start() const { return at<3>().valid(); }
+  bool will_notify_on_start() const { return at<3>().as_bool(); }
+  bool has_handles_incremental_state_clear() const { return at<4>().valid(); }
+  bool handles_incremental_state_clear() const { return at<4>().as_bool(); }
+  bool has_gpu_counter_descriptor() const { return at<5>().valid(); }
+  ::protozero::ConstBytes gpu_counter_descriptor() const { return at<5>().as_bytes(); }
+  bool has_track_event_descriptor() const { return at<6>().valid(); }
+  ::protozero::ConstBytes track_event_descriptor() const { return at<6>().as_bytes(); }
+};
+
+class DataSourceDescriptor : public ::protozero::Message {
+ public:
+  using Decoder = DataSourceDescriptor_Decoder;
+  enum : int32_t {
+    kNameFieldNumber = 1,
+    kWillNotifyOnStopFieldNumber = 2,
+    kWillNotifyOnStartFieldNumber = 3,
+    kHandlesIncrementalStateClearFieldNumber = 4,
+    kGpuCounterDescriptorFieldNumber = 5,
+    kTrackEventDescriptorFieldNumber = 6,
+  };
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      DataSourceDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_WillNotifyOnStop =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      DataSourceDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_WillNotifyOnStop kWillNotifyOnStop() { return {}; }
+  void set_will_notify_on_stop(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_WillNotifyOnStop::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_WillNotifyOnStart =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      DataSourceDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_WillNotifyOnStart kWillNotifyOnStart() { return {}; }
+  void set_will_notify_on_start(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_WillNotifyOnStart::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_HandlesIncrementalStateClear =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      DataSourceDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_HandlesIncrementalStateClear kHandlesIncrementalStateClear() { return {}; }
+  void set_handles_incremental_state_clear(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_HandlesIncrementalStateClear::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_GpuCounterDescriptor =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      GpuCounterDescriptor,
+      DataSourceDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_GpuCounterDescriptor kGpuCounterDescriptor() { return {}; }
+  template <typename T = GpuCounterDescriptor> T* set_gpu_counter_descriptor() {
+    return BeginNestedMessage<T>(5);
+  }
+
+  void set_gpu_counter_descriptor_raw(const std::string& raw) {
+    return AppendBytes(5, raw.data(), raw.size());
+  }
+
+
+  using FieldMetadata_TrackEventDescriptor =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TrackEventDescriptor,
+      DataSourceDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TrackEventDescriptor kTrackEventDescriptor() { return {}; }
+  template <typename T = TrackEventDescriptor> T* set_track_event_descriptor() {
+    return BeginNestedMessage<T>(6);
+  }
+
+  void set_track_event_descriptor_raw(const std::string& raw) {
+    return AppendBytes(6, raw.data(), raw.size());
+  }
+
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/common/descriptor.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_DESCRIPTOR_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_DESCRIPTOR_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class DescriptorProto;
+class DescriptorProto_ReservedRange;
+class EnumDescriptorProto;
+class EnumValueDescriptorProto;
+class FieldDescriptorProto;
+class FileDescriptorProto;
+class OneofDescriptorProto;
+class OneofOptions;
+enum FieldDescriptorProto_Label : int32_t;
+enum FieldDescriptorProto_Type : int32_t;
+
+enum FieldDescriptorProto_Type : int32_t {
+  FieldDescriptorProto_Type_TYPE_DOUBLE = 1,
+  FieldDescriptorProto_Type_TYPE_FLOAT = 2,
+  FieldDescriptorProto_Type_TYPE_INT64 = 3,
+  FieldDescriptorProto_Type_TYPE_UINT64 = 4,
+  FieldDescriptorProto_Type_TYPE_INT32 = 5,
+  FieldDescriptorProto_Type_TYPE_FIXED64 = 6,
+  FieldDescriptorProto_Type_TYPE_FIXED32 = 7,
+  FieldDescriptorProto_Type_TYPE_BOOL = 8,
+  FieldDescriptorProto_Type_TYPE_STRING = 9,
+  FieldDescriptorProto_Type_TYPE_GROUP = 10,
+  FieldDescriptorProto_Type_TYPE_MESSAGE = 11,
+  FieldDescriptorProto_Type_TYPE_BYTES = 12,
+  FieldDescriptorProto_Type_TYPE_UINT32 = 13,
+  FieldDescriptorProto_Type_TYPE_ENUM = 14,
+  FieldDescriptorProto_Type_TYPE_SFIXED32 = 15,
+  FieldDescriptorProto_Type_TYPE_SFIXED64 = 16,
+  FieldDescriptorProto_Type_TYPE_SINT32 = 17,
+  FieldDescriptorProto_Type_TYPE_SINT64 = 18,
+};
+
+const FieldDescriptorProto_Type FieldDescriptorProto_Type_MIN = FieldDescriptorProto_Type_TYPE_DOUBLE;
+const FieldDescriptorProto_Type FieldDescriptorProto_Type_MAX = FieldDescriptorProto_Type_TYPE_SINT64;
+
+enum FieldDescriptorProto_Label : int32_t {
+  FieldDescriptorProto_Label_LABEL_OPTIONAL = 1,
+  FieldDescriptorProto_Label_LABEL_REQUIRED = 2,
+  FieldDescriptorProto_Label_LABEL_REPEATED = 3,
+};
+
+const FieldDescriptorProto_Label FieldDescriptorProto_Label_MIN = FieldDescriptorProto_Label_LABEL_OPTIONAL;
+const FieldDescriptorProto_Label FieldDescriptorProto_Label_MAX = FieldDescriptorProto_Label_LABEL_REPEATED;
+
+class OneofOptions_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/0, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  OneofOptions_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit OneofOptions_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit OneofOptions_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+};
+
+class OneofOptions : public ::protozero::Message {
+ public:
+  using Decoder = OneofOptions_Decoder;
+};
+
+class EnumValueDescriptorProto_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  EnumValueDescriptorProto_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit EnumValueDescriptorProto_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit EnumValueDescriptorProto_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars name() const { return at<1>().as_string(); }
+  bool has_number() const { return at<2>().valid(); }
+  int32_t number() const { return at<2>().as_int32(); }
+};
+
+class EnumValueDescriptorProto : public ::protozero::Message {
+ public:
+  using Decoder = EnumValueDescriptorProto_Decoder;
+  enum : int32_t {
+    kNameFieldNumber = 1,
+    kNumberFieldNumber = 2,
+  };
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      EnumValueDescriptorProto>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Number =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      EnumValueDescriptorProto>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Number kNumber() { return {}; }
+  void set_number(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Number::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class EnumDescriptorProto_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  EnumDescriptorProto_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit EnumDescriptorProto_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit EnumDescriptorProto_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars name() const { return at<1>().as_string(); }
+  bool has_value() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> value() const { return GetRepeated<::protozero::ConstBytes>(2); }
+  bool has_reserved_name() const { return at<5>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstChars> reserved_name() const { return GetRepeated<::protozero::ConstChars>(5); }
+};
+
+class EnumDescriptorProto : public ::protozero::Message {
+ public:
+  using Decoder = EnumDescriptorProto_Decoder;
+  enum : int32_t {
+    kNameFieldNumber = 1,
+    kValueFieldNumber = 2,
+    kReservedNameFieldNumber = 5,
+  };
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      EnumDescriptorProto>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Value =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      EnumValueDescriptorProto,
+      EnumDescriptorProto>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Value kValue() { return {}; }
+  template <typename T = EnumValueDescriptorProto> T* add_value() {
+    return BeginNestedMessage<T>(2);
+  }
+
+
+  using FieldMetadata_ReservedName =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      EnumDescriptorProto>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ReservedName kReservedName() { return {}; }
+  void add_reserved_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_ReservedName::kFieldId, data, size);
+  }
+  void add_reserved_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_ReservedName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class OneofDescriptorProto_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  OneofDescriptorProto_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit OneofDescriptorProto_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit OneofDescriptorProto_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars name() const { return at<1>().as_string(); }
+  bool has_options() const { return at<2>().valid(); }
+  ::protozero::ConstBytes options() const { return at<2>().as_bytes(); }
+};
+
+class OneofDescriptorProto : public ::protozero::Message {
+ public:
+  using Decoder = OneofDescriptorProto_Decoder;
+  enum : int32_t {
+    kNameFieldNumber = 1,
+    kOptionsFieldNumber = 2,
+  };
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      OneofDescriptorProto>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Options =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      OneofOptions,
+      OneofDescriptorProto>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Options kOptions() { return {}; }
+  template <typename T = OneofOptions> T* set_options() {
+    return BeginNestedMessage<T>(2);
+  }
+
+};
+
+class FieldDescriptorProto_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/9, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  FieldDescriptorProto_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit FieldDescriptorProto_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit FieldDescriptorProto_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars name() const { return at<1>().as_string(); }
+  bool has_number() const { return at<3>().valid(); }
+  int32_t number() const { return at<3>().as_int32(); }
+  bool has_label() const { return at<4>().valid(); }
+  int32_t label() const { return at<4>().as_int32(); }
+  bool has_type() const { return at<5>().valid(); }
+  int32_t type() const { return at<5>().as_int32(); }
+  bool has_type_name() const { return at<6>().valid(); }
+  ::protozero::ConstChars type_name() const { return at<6>().as_string(); }
+  bool has_extendee() const { return at<2>().valid(); }
+  ::protozero::ConstChars extendee() const { return at<2>().as_string(); }
+  bool has_default_value() const { return at<7>().valid(); }
+  ::protozero::ConstChars default_value() const { return at<7>().as_string(); }
+  bool has_oneof_index() const { return at<9>().valid(); }
+  int32_t oneof_index() const { return at<9>().as_int32(); }
+};
+
+class FieldDescriptorProto : public ::protozero::Message {
+ public:
+  using Decoder = FieldDescriptorProto_Decoder;
+  enum : int32_t {
+    kNameFieldNumber = 1,
+    kNumberFieldNumber = 3,
+    kLabelFieldNumber = 4,
+    kTypeFieldNumber = 5,
+    kTypeNameFieldNumber = 6,
+    kExtendeeFieldNumber = 2,
+    kDefaultValueFieldNumber = 7,
+    kOneofIndexFieldNumber = 9,
+  };
+  using Type = ::perfetto::protos::pbzero::FieldDescriptorProto_Type;
+  using Label = ::perfetto::protos::pbzero::FieldDescriptorProto_Label;
+  static const Type TYPE_DOUBLE = FieldDescriptorProto_Type_TYPE_DOUBLE;
+  static const Type TYPE_FLOAT = FieldDescriptorProto_Type_TYPE_FLOAT;
+  static const Type TYPE_INT64 = FieldDescriptorProto_Type_TYPE_INT64;
+  static const Type TYPE_UINT64 = FieldDescriptorProto_Type_TYPE_UINT64;
+  static const Type TYPE_INT32 = FieldDescriptorProto_Type_TYPE_INT32;
+  static const Type TYPE_FIXED64 = FieldDescriptorProto_Type_TYPE_FIXED64;
+  static const Type TYPE_FIXED32 = FieldDescriptorProto_Type_TYPE_FIXED32;
+  static const Type TYPE_BOOL = FieldDescriptorProto_Type_TYPE_BOOL;
+  static const Type TYPE_STRING = FieldDescriptorProto_Type_TYPE_STRING;
+  static const Type TYPE_GROUP = FieldDescriptorProto_Type_TYPE_GROUP;
+  static const Type TYPE_MESSAGE = FieldDescriptorProto_Type_TYPE_MESSAGE;
+  static const Type TYPE_BYTES = FieldDescriptorProto_Type_TYPE_BYTES;
+  static const Type TYPE_UINT32 = FieldDescriptorProto_Type_TYPE_UINT32;
+  static const Type TYPE_ENUM = FieldDescriptorProto_Type_TYPE_ENUM;
+  static const Type TYPE_SFIXED32 = FieldDescriptorProto_Type_TYPE_SFIXED32;
+  static const Type TYPE_SFIXED64 = FieldDescriptorProto_Type_TYPE_SFIXED64;
+  static const Type TYPE_SINT32 = FieldDescriptorProto_Type_TYPE_SINT32;
+  static const Type TYPE_SINT64 = FieldDescriptorProto_Type_TYPE_SINT64;
+  static const Label LABEL_OPTIONAL = FieldDescriptorProto_Label_LABEL_OPTIONAL;
+  static const Label LABEL_REQUIRED = FieldDescriptorProto_Label_LABEL_REQUIRED;
+  static const Label LABEL_REPEATED = FieldDescriptorProto_Label_LABEL_REPEATED;
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      FieldDescriptorProto>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Number =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      FieldDescriptorProto>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Number kNumber() { return {}; }
+  void set_number(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Number::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Label =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::FieldDescriptorProto_Label,
+      FieldDescriptorProto>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Label kLabel() { return {}; }
+  void set_label(::perfetto::protos::pbzero::FieldDescriptorProto_Label value) {
+    static constexpr uint32_t field_id = FieldMetadata_Label::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Type =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::FieldDescriptorProto_Type,
+      FieldDescriptorProto>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Type kType() { return {}; }
+  void set_type(::perfetto::protos::pbzero::FieldDescriptorProto_Type value) {
+    static constexpr uint32_t field_id = FieldMetadata_Type::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TypeName =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      FieldDescriptorProto>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TypeName kTypeName() { return {}; }
+  void set_type_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_TypeName::kFieldId, data, size);
+  }
+  void set_type_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_TypeName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Extendee =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      FieldDescriptorProto>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Extendee kExtendee() { return {}; }
+  void set_extendee(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Extendee::kFieldId, data, size);
+  }
+  void set_extendee(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Extendee::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DefaultValue =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      FieldDescriptorProto>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DefaultValue kDefaultValue() { return {}; }
+  void set_default_value(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_DefaultValue::kFieldId, data, size);
+  }
+  void set_default_value(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_DefaultValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_OneofIndex =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      FieldDescriptorProto>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_OneofIndex kOneofIndex() { return {}; }
+  void set_oneof_index(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_OneofIndex::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class DescriptorProto_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/10, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  DescriptorProto_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit DescriptorProto_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit DescriptorProto_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars name() const { return at<1>().as_string(); }
+  bool has_field() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> field() const { return GetRepeated<::protozero::ConstBytes>(2); }
+  bool has_extension() const { return at<6>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> extension() const { return GetRepeated<::protozero::ConstBytes>(6); }
+  bool has_nested_type() const { return at<3>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> nested_type() const { return GetRepeated<::protozero::ConstBytes>(3); }
+  bool has_enum_type() const { return at<4>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> enum_type() const { return GetRepeated<::protozero::ConstBytes>(4); }
+  bool has_oneof_decl() const { return at<8>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> oneof_decl() const { return GetRepeated<::protozero::ConstBytes>(8); }
+  bool has_reserved_range() const { return at<9>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> reserved_range() const { return GetRepeated<::protozero::ConstBytes>(9); }
+  bool has_reserved_name() const { return at<10>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstChars> reserved_name() const { return GetRepeated<::protozero::ConstChars>(10); }
+};
+
+class DescriptorProto : public ::protozero::Message {
+ public:
+  using Decoder = DescriptorProto_Decoder;
+  enum : int32_t {
+    kNameFieldNumber = 1,
+    kFieldFieldNumber = 2,
+    kExtensionFieldNumber = 6,
+    kNestedTypeFieldNumber = 3,
+    kEnumTypeFieldNumber = 4,
+    kOneofDeclFieldNumber = 8,
+    kReservedRangeFieldNumber = 9,
+    kReservedNameFieldNumber = 10,
+  };
+  using ReservedRange = ::perfetto::protos::pbzero::DescriptorProto_ReservedRange;
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      DescriptorProto>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Field =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      FieldDescriptorProto,
+      DescriptorProto>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Field kField() { return {}; }
+  template <typename T = FieldDescriptorProto> T* add_field() {
+    return BeginNestedMessage<T>(2);
+  }
+
+
+  using FieldMetadata_Extension =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      FieldDescriptorProto,
+      DescriptorProto>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Extension kExtension() { return {}; }
+  template <typename T = FieldDescriptorProto> T* add_extension() {
+    return BeginNestedMessage<T>(6);
+  }
+
+
+  using FieldMetadata_NestedType =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      DescriptorProto,
+      DescriptorProto>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NestedType kNestedType() { return {}; }
+  template <typename T = DescriptorProto> T* add_nested_type() {
+    return BeginNestedMessage<T>(3);
+  }
+
+
+  using FieldMetadata_EnumType =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      EnumDescriptorProto,
+      DescriptorProto>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_EnumType kEnumType() { return {}; }
+  template <typename T = EnumDescriptorProto> T* add_enum_type() {
+    return BeginNestedMessage<T>(4);
+  }
+
+
+  using FieldMetadata_OneofDecl =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      OneofDescriptorProto,
+      DescriptorProto>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_OneofDecl kOneofDecl() { return {}; }
+  template <typename T = OneofDescriptorProto> T* add_oneof_decl() {
+    return BeginNestedMessage<T>(8);
+  }
+
+
+  using FieldMetadata_ReservedRange =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      DescriptorProto_ReservedRange,
+      DescriptorProto>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ReservedRange kReservedRange() { return {}; }
+  template <typename T = DescriptorProto_ReservedRange> T* add_reserved_range() {
+    return BeginNestedMessage<T>(9);
+  }
+
+
+  using FieldMetadata_ReservedName =
+    ::protozero::proto_utils::FieldMetadata<
+      10,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      DescriptorProto>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ReservedName kReservedName() { return {}; }
+  void add_reserved_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_ReservedName::kFieldId, data, size);
+  }
+  void add_reserved_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_ReservedName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class DescriptorProto_ReservedRange_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  DescriptorProto_ReservedRange_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit DescriptorProto_ReservedRange_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit DescriptorProto_ReservedRange_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_start() const { return at<1>().valid(); }
+  int32_t start() const { return at<1>().as_int32(); }
+  bool has_end() const { return at<2>().valid(); }
+  int32_t end() const { return at<2>().as_int32(); }
+};
+
+class DescriptorProto_ReservedRange : public ::protozero::Message {
+ public:
+  using Decoder = DescriptorProto_ReservedRange_Decoder;
+  enum : int32_t {
+    kStartFieldNumber = 1,
+    kEndFieldNumber = 2,
+  };
+
+  using FieldMetadata_Start =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      DescriptorProto_ReservedRange>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Start kStart() { return {}; }
+  void set_start(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Start::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_End =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      DescriptorProto_ReservedRange>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_End kEnd() { return {}; }
+  void set_end(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_End::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class FileDescriptorProto_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/11, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  FileDescriptorProto_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit FileDescriptorProto_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit FileDescriptorProto_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars name() const { return at<1>().as_string(); }
+  bool has_package() const { return at<2>().valid(); }
+  ::protozero::ConstChars package() const { return at<2>().as_string(); }
+  bool has_dependency() const { return at<3>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstChars> dependency() const { return GetRepeated<::protozero::ConstChars>(3); }
+  bool has_public_dependency() const { return at<10>().valid(); }
+  ::protozero::RepeatedFieldIterator<int32_t> public_dependency() const { return GetRepeated<int32_t>(10); }
+  bool has_weak_dependency() const { return at<11>().valid(); }
+  ::protozero::RepeatedFieldIterator<int32_t> weak_dependency() const { return GetRepeated<int32_t>(11); }
+  bool has_message_type() const { return at<4>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> message_type() const { return GetRepeated<::protozero::ConstBytes>(4); }
+  bool has_enum_type() const { return at<5>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> enum_type() const { return GetRepeated<::protozero::ConstBytes>(5); }
+  bool has_extension() const { return at<7>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> extension() const { return GetRepeated<::protozero::ConstBytes>(7); }
+};
+
+class FileDescriptorProto : public ::protozero::Message {
+ public:
+  using Decoder = FileDescriptorProto_Decoder;
+  enum : int32_t {
+    kNameFieldNumber = 1,
+    kPackageFieldNumber = 2,
+    kDependencyFieldNumber = 3,
+    kPublicDependencyFieldNumber = 10,
+    kWeakDependencyFieldNumber = 11,
+    kMessageTypeFieldNumber = 4,
+    kEnumTypeFieldNumber = 5,
+    kExtensionFieldNumber = 7,
+  };
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      FileDescriptorProto>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Package =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      FileDescriptorProto>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Package kPackage() { return {}; }
+  void set_package(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Package::kFieldId, data, size);
+  }
+  void set_package(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Package::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Dependency =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      FileDescriptorProto>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dependency kDependency() { return {}; }
+  void add_dependency(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Dependency::kFieldId, data, size);
+  }
+  void add_dependency(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dependency::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PublicDependency =
+    ::protozero::proto_utils::FieldMetadata<
+      10,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      FileDescriptorProto>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PublicDependency kPublicDependency() { return {}; }
+  void add_public_dependency(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PublicDependency::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_WeakDependency =
+    ::protozero::proto_utils::FieldMetadata<
+      11,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      FileDescriptorProto>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_WeakDependency kWeakDependency() { return {}; }
+  void add_weak_dependency(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_WeakDependency::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_MessageType =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      DescriptorProto,
+      FileDescriptorProto>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MessageType kMessageType() { return {}; }
+  template <typename T = DescriptorProto> T* add_message_type() {
+    return BeginNestedMessage<T>(4);
+  }
+
+
+  using FieldMetadata_EnumType =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      EnumDescriptorProto,
+      FileDescriptorProto>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_EnumType kEnumType() { return {}; }
+  template <typename T = EnumDescriptorProto> T* add_enum_type() {
+    return BeginNestedMessage<T>(5);
+  }
+
+
+  using FieldMetadata_Extension =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      FieldDescriptorProto,
+      FileDescriptorProto>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Extension kExtension() { return {}; }
+  template <typename T = FieldDescriptorProto> T* add_extension() {
+    return BeginNestedMessage<T>(7);
+  }
+
+};
+
+class FileDescriptorSet_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  FileDescriptorSet_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit FileDescriptorSet_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit FileDescriptorSet_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_file() const { return at<1>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> file() const { return GetRepeated<::protozero::ConstBytes>(1); }
+};
+
+class FileDescriptorSet : public ::protozero::Message {
+ public:
+  using Decoder = FileDescriptorSet_Decoder;
+  enum : int32_t {
+    kFileFieldNumber = 1,
+  };
+
+  using FieldMetadata_File =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      FileDescriptorProto,
+      FileDescriptorSet>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_File kFile() { return {}; }
+  template <typename T = FileDescriptorProto> T* add_file() {
+    return BeginNestedMessage<T>(1);
+  }
+
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/common/gpu_counter_descriptor.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_GPU_COUNTER_DESCRIPTOR_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_GPU_COUNTER_DESCRIPTOR_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class GpuCounterDescriptor_GpuCounterBlock;
+class GpuCounterDescriptor_GpuCounterSpec;
+enum GpuCounterDescriptor_GpuCounterGroup : int32_t;
+enum GpuCounterDescriptor_MeasureUnit : int32_t;
+
+enum GpuCounterDescriptor_GpuCounterGroup : int32_t {
+  GpuCounterDescriptor_GpuCounterGroup_UNCLASSIFIED = 0,
+  GpuCounterDescriptor_GpuCounterGroup_SYSTEM = 1,
+  GpuCounterDescriptor_GpuCounterGroup_VERTICES = 2,
+  GpuCounterDescriptor_GpuCounterGroup_FRAGMENTS = 3,
+  GpuCounterDescriptor_GpuCounterGroup_PRIMITIVES = 4,
+  GpuCounterDescriptor_GpuCounterGroup_MEMORY = 5,
+  GpuCounterDescriptor_GpuCounterGroup_COMPUTE = 6,
+};
+
+const GpuCounterDescriptor_GpuCounterGroup GpuCounterDescriptor_GpuCounterGroup_MIN = GpuCounterDescriptor_GpuCounterGroup_UNCLASSIFIED;
+const GpuCounterDescriptor_GpuCounterGroup GpuCounterDescriptor_GpuCounterGroup_MAX = GpuCounterDescriptor_GpuCounterGroup_COMPUTE;
+
+enum GpuCounterDescriptor_MeasureUnit : int32_t {
+  GpuCounterDescriptor_MeasureUnit_NONE = 0,
+  GpuCounterDescriptor_MeasureUnit_BIT = 1,
+  GpuCounterDescriptor_MeasureUnit_KILOBIT = 2,
+  GpuCounterDescriptor_MeasureUnit_MEGABIT = 3,
+  GpuCounterDescriptor_MeasureUnit_GIGABIT = 4,
+  GpuCounterDescriptor_MeasureUnit_TERABIT = 5,
+  GpuCounterDescriptor_MeasureUnit_PETABIT = 6,
+  GpuCounterDescriptor_MeasureUnit_BYTE = 7,
+  GpuCounterDescriptor_MeasureUnit_KILOBYTE = 8,
+  GpuCounterDescriptor_MeasureUnit_MEGABYTE = 9,
+  GpuCounterDescriptor_MeasureUnit_GIGABYTE = 10,
+  GpuCounterDescriptor_MeasureUnit_TERABYTE = 11,
+  GpuCounterDescriptor_MeasureUnit_PETABYTE = 12,
+  GpuCounterDescriptor_MeasureUnit_HERTZ = 13,
+  GpuCounterDescriptor_MeasureUnit_KILOHERTZ = 14,
+  GpuCounterDescriptor_MeasureUnit_MEGAHERTZ = 15,
+  GpuCounterDescriptor_MeasureUnit_GIGAHERTZ = 16,
+  GpuCounterDescriptor_MeasureUnit_TERAHERTZ = 17,
+  GpuCounterDescriptor_MeasureUnit_PETAHERTZ = 18,
+  GpuCounterDescriptor_MeasureUnit_NANOSECOND = 19,
+  GpuCounterDescriptor_MeasureUnit_MICROSECOND = 20,
+  GpuCounterDescriptor_MeasureUnit_MILLISECOND = 21,
+  GpuCounterDescriptor_MeasureUnit_SECOND = 22,
+  GpuCounterDescriptor_MeasureUnit_MINUTE = 23,
+  GpuCounterDescriptor_MeasureUnit_HOUR = 24,
+  GpuCounterDescriptor_MeasureUnit_VERTEX = 25,
+  GpuCounterDescriptor_MeasureUnit_PIXEL = 26,
+  GpuCounterDescriptor_MeasureUnit_TRIANGLE = 27,
+  GpuCounterDescriptor_MeasureUnit_PRIMITIVE = 38,
+  GpuCounterDescriptor_MeasureUnit_FRAGMENT = 39,
+  GpuCounterDescriptor_MeasureUnit_MILLIWATT = 28,
+  GpuCounterDescriptor_MeasureUnit_WATT = 29,
+  GpuCounterDescriptor_MeasureUnit_KILOWATT = 30,
+  GpuCounterDescriptor_MeasureUnit_JOULE = 31,
+  GpuCounterDescriptor_MeasureUnit_VOLT = 32,
+  GpuCounterDescriptor_MeasureUnit_AMPERE = 33,
+  GpuCounterDescriptor_MeasureUnit_CELSIUS = 34,
+  GpuCounterDescriptor_MeasureUnit_FAHRENHEIT = 35,
+  GpuCounterDescriptor_MeasureUnit_KELVIN = 36,
+  GpuCounterDescriptor_MeasureUnit_PERCENT = 37,
+  GpuCounterDescriptor_MeasureUnit_INSTRUCTION = 40,
+};
+
+const GpuCounterDescriptor_MeasureUnit GpuCounterDescriptor_MeasureUnit_MIN = GpuCounterDescriptor_MeasureUnit_NONE;
+const GpuCounterDescriptor_MeasureUnit GpuCounterDescriptor_MeasureUnit_MAX = GpuCounterDescriptor_MeasureUnit_INSTRUCTION;
+
+class GpuCounterDescriptor_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  GpuCounterDescriptor_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit GpuCounterDescriptor_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit GpuCounterDescriptor_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_specs() const { return at<1>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> specs() const { return GetRepeated<::protozero::ConstBytes>(1); }
+  bool has_blocks() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> blocks() const { return GetRepeated<::protozero::ConstBytes>(2); }
+  bool has_min_sampling_period_ns() const { return at<3>().valid(); }
+  uint64_t min_sampling_period_ns() const { return at<3>().as_uint64(); }
+  bool has_max_sampling_period_ns() const { return at<4>().valid(); }
+  uint64_t max_sampling_period_ns() const { return at<4>().as_uint64(); }
+  bool has_supports_instrumented_sampling() const { return at<5>().valid(); }
+  bool supports_instrumented_sampling() const { return at<5>().as_bool(); }
+};
+
+class GpuCounterDescriptor : public ::protozero::Message {
+ public:
+  using Decoder = GpuCounterDescriptor_Decoder;
+  enum : int32_t {
+    kSpecsFieldNumber = 1,
+    kBlocksFieldNumber = 2,
+    kMinSamplingPeriodNsFieldNumber = 3,
+    kMaxSamplingPeriodNsFieldNumber = 4,
+    kSupportsInstrumentedSamplingFieldNumber = 5,
+  };
+  using GpuCounterSpec = ::perfetto::protos::pbzero::GpuCounterDescriptor_GpuCounterSpec;
+  using GpuCounterBlock = ::perfetto::protos::pbzero::GpuCounterDescriptor_GpuCounterBlock;
+  using GpuCounterGroup = ::perfetto::protos::pbzero::GpuCounterDescriptor_GpuCounterGroup;
+  using MeasureUnit = ::perfetto::protos::pbzero::GpuCounterDescriptor_MeasureUnit;
+  static const GpuCounterGroup UNCLASSIFIED = GpuCounterDescriptor_GpuCounterGroup_UNCLASSIFIED;
+  static const GpuCounterGroup SYSTEM = GpuCounterDescriptor_GpuCounterGroup_SYSTEM;
+  static const GpuCounterGroup VERTICES = GpuCounterDescriptor_GpuCounterGroup_VERTICES;
+  static const GpuCounterGroup FRAGMENTS = GpuCounterDescriptor_GpuCounterGroup_FRAGMENTS;
+  static const GpuCounterGroup PRIMITIVES = GpuCounterDescriptor_GpuCounterGroup_PRIMITIVES;
+  static const GpuCounterGroup MEMORY = GpuCounterDescriptor_GpuCounterGroup_MEMORY;
+  static const GpuCounterGroup COMPUTE = GpuCounterDescriptor_GpuCounterGroup_COMPUTE;
+  static const MeasureUnit NONE = GpuCounterDescriptor_MeasureUnit_NONE;
+  static const MeasureUnit BIT = GpuCounterDescriptor_MeasureUnit_BIT;
+  static const MeasureUnit KILOBIT = GpuCounterDescriptor_MeasureUnit_KILOBIT;
+  static const MeasureUnit MEGABIT = GpuCounterDescriptor_MeasureUnit_MEGABIT;
+  static const MeasureUnit GIGABIT = GpuCounterDescriptor_MeasureUnit_GIGABIT;
+  static const MeasureUnit TERABIT = GpuCounterDescriptor_MeasureUnit_TERABIT;
+  static const MeasureUnit PETABIT = GpuCounterDescriptor_MeasureUnit_PETABIT;
+  static const MeasureUnit BYTE = GpuCounterDescriptor_MeasureUnit_BYTE;
+  static const MeasureUnit KILOBYTE = GpuCounterDescriptor_MeasureUnit_KILOBYTE;
+  static const MeasureUnit MEGABYTE = GpuCounterDescriptor_MeasureUnit_MEGABYTE;
+  static const MeasureUnit GIGABYTE = GpuCounterDescriptor_MeasureUnit_GIGABYTE;
+  static const MeasureUnit TERABYTE = GpuCounterDescriptor_MeasureUnit_TERABYTE;
+  static const MeasureUnit PETABYTE = GpuCounterDescriptor_MeasureUnit_PETABYTE;
+  static const MeasureUnit HERTZ = GpuCounterDescriptor_MeasureUnit_HERTZ;
+  static const MeasureUnit KILOHERTZ = GpuCounterDescriptor_MeasureUnit_KILOHERTZ;
+  static const MeasureUnit MEGAHERTZ = GpuCounterDescriptor_MeasureUnit_MEGAHERTZ;
+  static const MeasureUnit GIGAHERTZ = GpuCounterDescriptor_MeasureUnit_GIGAHERTZ;
+  static const MeasureUnit TERAHERTZ = GpuCounterDescriptor_MeasureUnit_TERAHERTZ;
+  static const MeasureUnit PETAHERTZ = GpuCounterDescriptor_MeasureUnit_PETAHERTZ;
+  static const MeasureUnit NANOSECOND = GpuCounterDescriptor_MeasureUnit_NANOSECOND;
+  static const MeasureUnit MICROSECOND = GpuCounterDescriptor_MeasureUnit_MICROSECOND;
+  static const MeasureUnit MILLISECOND = GpuCounterDescriptor_MeasureUnit_MILLISECOND;
+  static const MeasureUnit SECOND = GpuCounterDescriptor_MeasureUnit_SECOND;
+  static const MeasureUnit MINUTE = GpuCounterDescriptor_MeasureUnit_MINUTE;
+  static const MeasureUnit HOUR = GpuCounterDescriptor_MeasureUnit_HOUR;
+  static const MeasureUnit VERTEX = GpuCounterDescriptor_MeasureUnit_VERTEX;
+  static const MeasureUnit PIXEL = GpuCounterDescriptor_MeasureUnit_PIXEL;
+  static const MeasureUnit TRIANGLE = GpuCounterDescriptor_MeasureUnit_TRIANGLE;
+  static const MeasureUnit PRIMITIVE = GpuCounterDescriptor_MeasureUnit_PRIMITIVE;
+  static const MeasureUnit FRAGMENT = GpuCounterDescriptor_MeasureUnit_FRAGMENT;
+  static const MeasureUnit MILLIWATT = GpuCounterDescriptor_MeasureUnit_MILLIWATT;
+  static const MeasureUnit WATT = GpuCounterDescriptor_MeasureUnit_WATT;
+  static const MeasureUnit KILOWATT = GpuCounterDescriptor_MeasureUnit_KILOWATT;
+  static const MeasureUnit JOULE = GpuCounterDescriptor_MeasureUnit_JOULE;
+  static const MeasureUnit VOLT = GpuCounterDescriptor_MeasureUnit_VOLT;
+  static const MeasureUnit AMPERE = GpuCounterDescriptor_MeasureUnit_AMPERE;
+  static const MeasureUnit CELSIUS = GpuCounterDescriptor_MeasureUnit_CELSIUS;
+  static const MeasureUnit FAHRENHEIT = GpuCounterDescriptor_MeasureUnit_FAHRENHEIT;
+  static const MeasureUnit KELVIN = GpuCounterDescriptor_MeasureUnit_KELVIN;
+  static const MeasureUnit PERCENT = GpuCounterDescriptor_MeasureUnit_PERCENT;
+  static const MeasureUnit INSTRUCTION = GpuCounterDescriptor_MeasureUnit_INSTRUCTION;
+
+  using FieldMetadata_Specs =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      GpuCounterDescriptor_GpuCounterSpec,
+      GpuCounterDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Specs kSpecs() { return {}; }
+  template <typename T = GpuCounterDescriptor_GpuCounterSpec> T* add_specs() {
+    return BeginNestedMessage<T>(1);
+  }
+
+
+  using FieldMetadata_Blocks =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      GpuCounterDescriptor_GpuCounterBlock,
+      GpuCounterDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Blocks kBlocks() { return {}; }
+  template <typename T = GpuCounterDescriptor_GpuCounterBlock> T* add_blocks() {
+    return BeginNestedMessage<T>(2);
+  }
+
+
+  using FieldMetadata_MinSamplingPeriodNs =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      GpuCounterDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MinSamplingPeriodNs kMinSamplingPeriodNs() { return {}; }
+  void set_min_sampling_period_ns(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_MinSamplingPeriodNs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_MaxSamplingPeriodNs =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      GpuCounterDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MaxSamplingPeriodNs kMaxSamplingPeriodNs() { return {}; }
+  void set_max_sampling_period_ns(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_MaxSamplingPeriodNs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SupportsInstrumentedSampling =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      GpuCounterDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SupportsInstrumentedSampling kSupportsInstrumentedSampling() { return {}; }
+  void set_supports_instrumented_sampling(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_SupportsInstrumentedSampling::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class GpuCounterDescriptor_GpuCounterBlock_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  GpuCounterDescriptor_GpuCounterBlock_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit GpuCounterDescriptor_GpuCounterBlock_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit GpuCounterDescriptor_GpuCounterBlock_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_block_id() const { return at<1>().valid(); }
+  uint32_t block_id() const { return at<1>().as_uint32(); }
+  bool has_block_capacity() const { return at<2>().valid(); }
+  uint32_t block_capacity() const { return at<2>().as_uint32(); }
+  bool has_name() const { return at<3>().valid(); }
+  ::protozero::ConstChars name() const { return at<3>().as_string(); }
+  bool has_description() const { return at<4>().valid(); }
+  ::protozero::ConstChars description() const { return at<4>().as_string(); }
+  bool has_counter_ids() const { return at<5>().valid(); }
+  ::protozero::RepeatedFieldIterator<uint32_t> counter_ids() const { return GetRepeated<uint32_t>(5); }
+};
+
+class GpuCounterDescriptor_GpuCounterBlock : public ::protozero::Message {
+ public:
+  using Decoder = GpuCounterDescriptor_GpuCounterBlock_Decoder;
+  enum : int32_t {
+    kBlockIdFieldNumber = 1,
+    kBlockCapacityFieldNumber = 2,
+    kNameFieldNumber = 3,
+    kDescriptionFieldNumber = 4,
+    kCounterIdsFieldNumber = 5,
+  };
+
+  using FieldMetadata_BlockId =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      GpuCounterDescriptor_GpuCounterBlock>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BlockId kBlockId() { return {}; }
+  void set_block_id(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_BlockId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BlockCapacity =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      GpuCounterDescriptor_GpuCounterBlock>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BlockCapacity kBlockCapacity() { return {}; }
+  void set_block_capacity(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_BlockCapacity::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      GpuCounterDescriptor_GpuCounterBlock>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Description =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      GpuCounterDescriptor_GpuCounterBlock>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Description kDescription() { return {}; }
+  void set_description(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Description::kFieldId, data, size);
+  }
+  void set_description(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Description::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CounterIds =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      GpuCounterDescriptor_GpuCounterBlock>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CounterIds kCounterIds() { return {}; }
+  void add_counter_ids(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CounterIds::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class GpuCounterDescriptor_GpuCounterSpec_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/10, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  GpuCounterDescriptor_GpuCounterSpec_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit GpuCounterDescriptor_GpuCounterSpec_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit GpuCounterDescriptor_GpuCounterSpec_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_counter_id() const { return at<1>().valid(); }
+  uint32_t counter_id() const { return at<1>().as_uint32(); }
+  bool has_name() const { return at<2>().valid(); }
+  ::protozero::ConstChars name() const { return at<2>().as_string(); }
+  bool has_description() const { return at<3>().valid(); }
+  ::protozero::ConstChars description() const { return at<3>().as_string(); }
+  bool has_int_peak_value() const { return at<5>().valid(); }
+  int64_t int_peak_value() const { return at<5>().as_int64(); }
+  bool has_double_peak_value() const { return at<6>().valid(); }
+  double double_peak_value() const { return at<6>().as_double(); }
+  bool has_numerator_units() const { return at<7>().valid(); }
+  ::protozero::RepeatedFieldIterator<int32_t> numerator_units() const { return GetRepeated<int32_t>(7); }
+  bool has_denominator_units() const { return at<8>().valid(); }
+  ::protozero::RepeatedFieldIterator<int32_t> denominator_units() const { return GetRepeated<int32_t>(8); }
+  bool has_select_by_default() const { return at<9>().valid(); }
+  bool select_by_default() const { return at<9>().as_bool(); }
+  bool has_groups() const { return at<10>().valid(); }
+  ::protozero::RepeatedFieldIterator<int32_t> groups() const { return GetRepeated<int32_t>(10); }
+};
+
+class GpuCounterDescriptor_GpuCounterSpec : public ::protozero::Message {
+ public:
+  using Decoder = GpuCounterDescriptor_GpuCounterSpec_Decoder;
+  enum : int32_t {
+    kCounterIdFieldNumber = 1,
+    kNameFieldNumber = 2,
+    kDescriptionFieldNumber = 3,
+    kIntPeakValueFieldNumber = 5,
+    kDoublePeakValueFieldNumber = 6,
+    kNumeratorUnitsFieldNumber = 7,
+    kDenominatorUnitsFieldNumber = 8,
+    kSelectByDefaultFieldNumber = 9,
+    kGroupsFieldNumber = 10,
+  };
+
+  using FieldMetadata_CounterId =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      GpuCounterDescriptor_GpuCounterSpec>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CounterId kCounterId() { return {}; }
+  void set_counter_id(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CounterId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      GpuCounterDescriptor_GpuCounterSpec>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Description =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      GpuCounterDescriptor_GpuCounterSpec>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Description kDescription() { return {}; }
+  void set_description(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Description::kFieldId, data, size);
+  }
+  void set_description(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Description::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_IntPeakValue =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      GpuCounterDescriptor_GpuCounterSpec>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IntPeakValue kIntPeakValue() { return {}; }
+  void set_int_peak_value(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_IntPeakValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DoublePeakValue =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kDouble,
+      double,
+      GpuCounterDescriptor_GpuCounterSpec>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DoublePeakValue kDoublePeakValue() { return {}; }
+  void set_double_peak_value(double value) {
+    static constexpr uint32_t field_id = FieldMetadata_DoublePeakValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kDouble>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NumeratorUnits =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::GpuCounterDescriptor_MeasureUnit,
+      GpuCounterDescriptor_GpuCounterSpec>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NumeratorUnits kNumeratorUnits() { return {}; }
+  void add_numerator_units(::perfetto::protos::pbzero::GpuCounterDescriptor_MeasureUnit value) {
+    static constexpr uint32_t field_id = FieldMetadata_NumeratorUnits::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DenominatorUnits =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::GpuCounterDescriptor_MeasureUnit,
+      GpuCounterDescriptor_GpuCounterSpec>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DenominatorUnits kDenominatorUnits() { return {}; }
+  void add_denominator_units(::perfetto::protos::pbzero::GpuCounterDescriptor_MeasureUnit value) {
+    static constexpr uint32_t field_id = FieldMetadata_DenominatorUnits::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SelectByDefault =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      GpuCounterDescriptor_GpuCounterSpec>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SelectByDefault kSelectByDefault() { return {}; }
+  void set_select_by_default(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_SelectByDefault::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Groups =
+    ::protozero::proto_utils::FieldMetadata<
+      10,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::GpuCounterDescriptor_GpuCounterGroup,
+      GpuCounterDescriptor_GpuCounterSpec>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Groups kGroups() { return {}; }
+  void add_groups(::perfetto::protos::pbzero::GpuCounterDescriptor_GpuCounterGroup value) {
+    static constexpr uint32_t field_id = FieldMetadata_Groups::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/common/interceptor_descriptor.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_INTERCEPTOR_DESCRIPTOR_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_INTERCEPTOR_DESCRIPTOR_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class InterceptorDescriptor_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  InterceptorDescriptor_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit InterceptorDescriptor_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit InterceptorDescriptor_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars name() const { return at<1>().as_string(); }
+};
+
+class InterceptorDescriptor : public ::protozero::Message {
+ public:
+  using Decoder = InterceptorDescriptor_Decoder;
+  enum : int32_t {
+    kNameFieldNumber = 1,
+  };
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      InterceptorDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/common/observable_events.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_OBSERVABLE_EVENTS_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_OBSERVABLE_EVENTS_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class ObservableEvents_DataSourceInstanceStateChange;
+enum ObservableEvents_DataSourceInstanceState : int32_t;
+
+enum ObservableEvents_Type : int32_t {
+  ObservableEvents_Type_TYPE_UNSPECIFIED = 0,
+  ObservableEvents_Type_TYPE_DATA_SOURCES_INSTANCES = 1,
+  ObservableEvents_Type_TYPE_ALL_DATA_SOURCES_STARTED = 2,
+};
+
+const ObservableEvents_Type ObservableEvents_Type_MIN = ObservableEvents_Type_TYPE_UNSPECIFIED;
+const ObservableEvents_Type ObservableEvents_Type_MAX = ObservableEvents_Type_TYPE_ALL_DATA_SOURCES_STARTED;
+
+enum ObservableEvents_DataSourceInstanceState : int32_t {
+  ObservableEvents_DataSourceInstanceState_DATA_SOURCE_INSTANCE_STATE_STOPPED = 1,
+  ObservableEvents_DataSourceInstanceState_DATA_SOURCE_INSTANCE_STATE_STARTED = 2,
+};
+
+const ObservableEvents_DataSourceInstanceState ObservableEvents_DataSourceInstanceState_MIN = ObservableEvents_DataSourceInstanceState_DATA_SOURCE_INSTANCE_STATE_STOPPED;
+const ObservableEvents_DataSourceInstanceState ObservableEvents_DataSourceInstanceState_MAX = ObservableEvents_DataSourceInstanceState_DATA_SOURCE_INSTANCE_STATE_STARTED;
+
+class ObservableEvents_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  ObservableEvents_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ObservableEvents_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ObservableEvents_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_instance_state_changes() const { return at<1>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> instance_state_changes() const { return GetRepeated<::protozero::ConstBytes>(1); }
+  bool has_all_data_sources_started() const { return at<2>().valid(); }
+  bool all_data_sources_started() const { return at<2>().as_bool(); }
+};
+
+class ObservableEvents : public ::protozero::Message {
+ public:
+  using Decoder = ObservableEvents_Decoder;
+  enum : int32_t {
+    kInstanceStateChangesFieldNumber = 1,
+    kAllDataSourcesStartedFieldNumber = 2,
+  };
+  using DataSourceInstanceStateChange = ::perfetto::protos::pbzero::ObservableEvents_DataSourceInstanceStateChange;
+  using Type = ::perfetto::protos::pbzero::ObservableEvents_Type;
+  using DataSourceInstanceState = ::perfetto::protos::pbzero::ObservableEvents_DataSourceInstanceState;
+  static const Type TYPE_UNSPECIFIED = ObservableEvents_Type_TYPE_UNSPECIFIED;
+  static const Type TYPE_DATA_SOURCES_INSTANCES = ObservableEvents_Type_TYPE_DATA_SOURCES_INSTANCES;
+  static const Type TYPE_ALL_DATA_SOURCES_STARTED = ObservableEvents_Type_TYPE_ALL_DATA_SOURCES_STARTED;
+  static const DataSourceInstanceState DATA_SOURCE_INSTANCE_STATE_STOPPED = ObservableEvents_DataSourceInstanceState_DATA_SOURCE_INSTANCE_STATE_STOPPED;
+  static const DataSourceInstanceState DATA_SOURCE_INSTANCE_STATE_STARTED = ObservableEvents_DataSourceInstanceState_DATA_SOURCE_INSTANCE_STATE_STARTED;
+
+  using FieldMetadata_InstanceStateChanges =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ObservableEvents_DataSourceInstanceStateChange,
+      ObservableEvents>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_InstanceStateChanges kInstanceStateChanges() { return {}; }
+  template <typename T = ObservableEvents_DataSourceInstanceStateChange> T* add_instance_state_changes() {
+    return BeginNestedMessage<T>(1);
+  }
+
+
+  using FieldMetadata_AllDataSourcesStarted =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ObservableEvents>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AllDataSourcesStarted kAllDataSourcesStarted() { return {}; }
+  void set_all_data_sources_started(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_AllDataSourcesStarted::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class ObservableEvents_DataSourceInstanceStateChange_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  ObservableEvents_DataSourceInstanceStateChange_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ObservableEvents_DataSourceInstanceStateChange_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ObservableEvents_DataSourceInstanceStateChange_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_producer_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars producer_name() const { return at<1>().as_string(); }
+  bool has_data_source_name() const { return at<2>().valid(); }
+  ::protozero::ConstChars data_source_name() const { return at<2>().as_string(); }
+  bool has_state() const { return at<3>().valid(); }
+  int32_t state() const { return at<3>().as_int32(); }
+};
+
+class ObservableEvents_DataSourceInstanceStateChange : public ::protozero::Message {
+ public:
+  using Decoder = ObservableEvents_DataSourceInstanceStateChange_Decoder;
+  enum : int32_t {
+    kProducerNameFieldNumber = 1,
+    kDataSourceNameFieldNumber = 2,
+    kStateFieldNumber = 3,
+  };
+
+  using FieldMetadata_ProducerName =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ObservableEvents_DataSourceInstanceStateChange>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ProducerName kProducerName() { return {}; }
+  void set_producer_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_ProducerName::kFieldId, data, size);
+  }
+  void set_producer_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_ProducerName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DataSourceName =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ObservableEvents_DataSourceInstanceStateChange>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DataSourceName kDataSourceName() { return {}; }
+  void set_data_source_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_DataSourceName::kFieldId, data, size);
+  }
+  void set_data_source_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_DataSourceName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_State =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::ObservableEvents_DataSourceInstanceState,
+      ObservableEvents_DataSourceInstanceStateChange>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_State kState() { return {}; }
+  void set_state(::perfetto::protos::pbzero::ObservableEvents_DataSourceInstanceState value) {
+    static constexpr uint32_t field_id = FieldMetadata_State::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/common/perf_events.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_PERF_EVENTS_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_PERF_EVENTS_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class PerfEvents_Timebase;
+class PerfEvents_Tracepoint;
+enum PerfEvents_Counter : int32_t;
+
+enum PerfEvents_Counter : int32_t {
+  PerfEvents_Counter_UNKNOWN_COUNTER = 0,
+  PerfEvents_Counter_SW_CPU_CLOCK = 1,
+  PerfEvents_Counter_SW_PAGE_FAULTS = 2,
+  PerfEvents_Counter_HW_CPU_CYCLES = 10,
+  PerfEvents_Counter_HW_INSTRUCTIONS = 11,
+};
+
+const PerfEvents_Counter PerfEvents_Counter_MIN = PerfEvents_Counter_UNKNOWN_COUNTER;
+const PerfEvents_Counter PerfEvents_Counter_MAX = PerfEvents_Counter_HW_INSTRUCTIONS;
+
+class PerfEvents_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/0, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  PerfEvents_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit PerfEvents_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit PerfEvents_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+};
+
+class PerfEvents : public ::protozero::Message {
+ public:
+  using Decoder = PerfEvents_Decoder;
+  using Timebase = ::perfetto::protos::pbzero::PerfEvents_Timebase;
+  using Tracepoint = ::perfetto::protos::pbzero::PerfEvents_Tracepoint;
+  using Counter = ::perfetto::protos::pbzero::PerfEvents_Counter;
+  static const Counter UNKNOWN_COUNTER = PerfEvents_Counter_UNKNOWN_COUNTER;
+  static const Counter SW_CPU_CLOCK = PerfEvents_Counter_SW_CPU_CLOCK;
+  static const Counter SW_PAGE_FAULTS = PerfEvents_Counter_SW_PAGE_FAULTS;
+  static const Counter HW_CPU_CYCLES = PerfEvents_Counter_HW_CPU_CYCLES;
+  static const Counter HW_INSTRUCTIONS = PerfEvents_Counter_HW_INSTRUCTIONS;
+};
+
+class PerfEvents_Tracepoint_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  PerfEvents_Tracepoint_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit PerfEvents_Tracepoint_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit PerfEvents_Tracepoint_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars name() const { return at<1>().as_string(); }
+  bool has_filter() const { return at<2>().valid(); }
+  ::protozero::ConstChars filter() const { return at<2>().as_string(); }
+};
+
+class PerfEvents_Tracepoint : public ::protozero::Message {
+ public:
+  using Decoder = PerfEvents_Tracepoint_Decoder;
+  enum : int32_t {
+    kNameFieldNumber = 1,
+    kFilterFieldNumber = 2,
+  };
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      PerfEvents_Tracepoint>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Filter =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      PerfEvents_Tracepoint>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Filter kFilter() { return {}; }
+  void set_filter(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Filter::kFieldId, data, size);
+  }
+  void set_filter(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Filter::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class PerfEvents_Timebase_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  PerfEvents_Timebase_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit PerfEvents_Timebase_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit PerfEvents_Timebase_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_frequency() const { return at<2>().valid(); }
+  uint64_t frequency() const { return at<2>().as_uint64(); }
+  bool has_period() const { return at<1>().valid(); }
+  uint64_t period() const { return at<1>().as_uint64(); }
+  bool has_counter() const { return at<4>().valid(); }
+  int32_t counter() const { return at<4>().as_int32(); }
+  bool has_tracepoint() const { return at<3>().valid(); }
+  ::protozero::ConstBytes tracepoint() const { return at<3>().as_bytes(); }
+};
+
+class PerfEvents_Timebase : public ::protozero::Message {
+ public:
+  using Decoder = PerfEvents_Timebase_Decoder;
+  enum : int32_t {
+    kFrequencyFieldNumber = 2,
+    kPeriodFieldNumber = 1,
+    kCounterFieldNumber = 4,
+    kTracepointFieldNumber = 3,
+  };
+
+  using FieldMetadata_Frequency =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      PerfEvents_Timebase>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Frequency kFrequency() { return {}; }
+  void set_frequency(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Frequency::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Period =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      PerfEvents_Timebase>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Period kPeriod() { return {}; }
+  void set_period(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Period::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Counter =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::PerfEvents_Counter,
+      PerfEvents_Timebase>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Counter kCounter() { return {}; }
+  void set_counter(::perfetto::protos::pbzero::PerfEvents_Counter value) {
+    static constexpr uint32_t field_id = FieldMetadata_Counter::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Tracepoint =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      PerfEvents_Tracepoint,
+      PerfEvents_Timebase>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Tracepoint kTracepoint() { return {}; }
+  template <typename T = PerfEvents_Tracepoint> T* set_tracepoint() {
+    return BeginNestedMessage<T>(3);
+  }
+
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/common/sys_stats_counters.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_SYS_STATS_COUNTERS_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_SYS_STATS_COUNTERS_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+enum MeminfoCounters : int32_t {
+  MEMINFO_UNSPECIFIED = 0,
+  MEMINFO_MEM_TOTAL = 1,
+  MEMINFO_MEM_FREE = 2,
+  MEMINFO_MEM_AVAILABLE = 3,
+  MEMINFO_BUFFERS = 4,
+  MEMINFO_CACHED = 5,
+  MEMINFO_SWAP_CACHED = 6,
+  MEMINFO_ACTIVE = 7,
+  MEMINFO_INACTIVE = 8,
+  MEMINFO_ACTIVE_ANON = 9,
+  MEMINFO_INACTIVE_ANON = 10,
+  MEMINFO_ACTIVE_FILE = 11,
+  MEMINFO_INACTIVE_FILE = 12,
+  MEMINFO_UNEVICTABLE = 13,
+  MEMINFO_MLOCKED = 14,
+  MEMINFO_SWAP_TOTAL = 15,
+  MEMINFO_SWAP_FREE = 16,
+  MEMINFO_DIRTY = 17,
+  MEMINFO_WRITEBACK = 18,
+  MEMINFO_ANON_PAGES = 19,
+  MEMINFO_MAPPED = 20,
+  MEMINFO_SHMEM = 21,
+  MEMINFO_SLAB = 22,
+  MEMINFO_SLAB_RECLAIMABLE = 23,
+  MEMINFO_SLAB_UNRECLAIMABLE = 24,
+  MEMINFO_KERNEL_STACK = 25,
+  MEMINFO_PAGE_TABLES = 26,
+  MEMINFO_COMMIT_LIMIT = 27,
+  MEMINFO_COMMITED_AS = 28,
+  MEMINFO_VMALLOC_TOTAL = 29,
+  MEMINFO_VMALLOC_USED = 30,
+  MEMINFO_VMALLOC_CHUNK = 31,
+  MEMINFO_CMA_TOTAL = 32,
+  MEMINFO_CMA_FREE = 33,
+};
+
+const MeminfoCounters MeminfoCounters_MIN = MEMINFO_UNSPECIFIED;
+const MeminfoCounters MeminfoCounters_MAX = MEMINFO_CMA_FREE;
+
+enum VmstatCounters : int32_t {
+  VMSTAT_UNSPECIFIED = 0,
+  VMSTAT_NR_FREE_PAGES = 1,
+  VMSTAT_NR_ALLOC_BATCH = 2,
+  VMSTAT_NR_INACTIVE_ANON = 3,
+  VMSTAT_NR_ACTIVE_ANON = 4,
+  VMSTAT_NR_INACTIVE_FILE = 5,
+  VMSTAT_NR_ACTIVE_FILE = 6,
+  VMSTAT_NR_UNEVICTABLE = 7,
+  VMSTAT_NR_MLOCK = 8,
+  VMSTAT_NR_ANON_PAGES = 9,
+  VMSTAT_NR_MAPPED = 10,
+  VMSTAT_NR_FILE_PAGES = 11,
+  VMSTAT_NR_DIRTY = 12,
+  VMSTAT_NR_WRITEBACK = 13,
+  VMSTAT_NR_SLAB_RECLAIMABLE = 14,
+  VMSTAT_NR_SLAB_UNRECLAIMABLE = 15,
+  VMSTAT_NR_PAGE_TABLE_PAGES = 16,
+  VMSTAT_NR_KERNEL_STACK = 17,
+  VMSTAT_NR_OVERHEAD = 18,
+  VMSTAT_NR_UNSTABLE = 19,
+  VMSTAT_NR_BOUNCE = 20,
+  VMSTAT_NR_VMSCAN_WRITE = 21,
+  VMSTAT_NR_VMSCAN_IMMEDIATE_RECLAIM = 22,
+  VMSTAT_NR_WRITEBACK_TEMP = 23,
+  VMSTAT_NR_ISOLATED_ANON = 24,
+  VMSTAT_NR_ISOLATED_FILE = 25,
+  VMSTAT_NR_SHMEM = 26,
+  VMSTAT_NR_DIRTIED = 27,
+  VMSTAT_NR_WRITTEN = 28,
+  VMSTAT_NR_PAGES_SCANNED = 29,
+  VMSTAT_WORKINGSET_REFAULT = 30,
+  VMSTAT_WORKINGSET_ACTIVATE = 31,
+  VMSTAT_WORKINGSET_NODERECLAIM = 32,
+  VMSTAT_NR_ANON_TRANSPARENT_HUGEPAGES = 33,
+  VMSTAT_NR_FREE_CMA = 34,
+  VMSTAT_NR_SWAPCACHE = 35,
+  VMSTAT_NR_DIRTY_THRESHOLD = 36,
+  VMSTAT_NR_DIRTY_BACKGROUND_THRESHOLD = 37,
+  VMSTAT_PGPGIN = 38,
+  VMSTAT_PGPGOUT = 39,
+  VMSTAT_PGPGOUTCLEAN = 40,
+  VMSTAT_PSWPIN = 41,
+  VMSTAT_PSWPOUT = 42,
+  VMSTAT_PGALLOC_DMA = 43,
+  VMSTAT_PGALLOC_NORMAL = 44,
+  VMSTAT_PGALLOC_MOVABLE = 45,
+  VMSTAT_PGFREE = 46,
+  VMSTAT_PGACTIVATE = 47,
+  VMSTAT_PGDEACTIVATE = 48,
+  VMSTAT_PGFAULT = 49,
+  VMSTAT_PGMAJFAULT = 50,
+  VMSTAT_PGREFILL_DMA = 51,
+  VMSTAT_PGREFILL_NORMAL = 52,
+  VMSTAT_PGREFILL_MOVABLE = 53,
+  VMSTAT_PGSTEAL_KSWAPD_DMA = 54,
+  VMSTAT_PGSTEAL_KSWAPD_NORMAL = 55,
+  VMSTAT_PGSTEAL_KSWAPD_MOVABLE = 56,
+  VMSTAT_PGSTEAL_DIRECT_DMA = 57,
+  VMSTAT_PGSTEAL_DIRECT_NORMAL = 58,
+  VMSTAT_PGSTEAL_DIRECT_MOVABLE = 59,
+  VMSTAT_PGSCAN_KSWAPD_DMA = 60,
+  VMSTAT_PGSCAN_KSWAPD_NORMAL = 61,
+  VMSTAT_PGSCAN_KSWAPD_MOVABLE = 62,
+  VMSTAT_PGSCAN_DIRECT_DMA = 63,
+  VMSTAT_PGSCAN_DIRECT_NORMAL = 64,
+  VMSTAT_PGSCAN_DIRECT_MOVABLE = 65,
+  VMSTAT_PGSCAN_DIRECT_THROTTLE = 66,
+  VMSTAT_PGINODESTEAL = 67,
+  VMSTAT_SLABS_SCANNED = 68,
+  VMSTAT_KSWAPD_INODESTEAL = 69,
+  VMSTAT_KSWAPD_LOW_WMARK_HIT_QUICKLY = 70,
+  VMSTAT_KSWAPD_HIGH_WMARK_HIT_QUICKLY = 71,
+  VMSTAT_PAGEOUTRUN = 72,
+  VMSTAT_ALLOCSTALL = 73,
+  VMSTAT_PGROTATED = 74,
+  VMSTAT_DROP_PAGECACHE = 75,
+  VMSTAT_DROP_SLAB = 76,
+  VMSTAT_PGMIGRATE_SUCCESS = 77,
+  VMSTAT_PGMIGRATE_FAIL = 78,
+  VMSTAT_COMPACT_MIGRATE_SCANNED = 79,
+  VMSTAT_COMPACT_FREE_SCANNED = 80,
+  VMSTAT_COMPACT_ISOLATED = 81,
+  VMSTAT_COMPACT_STALL = 82,
+  VMSTAT_COMPACT_FAIL = 83,
+  VMSTAT_COMPACT_SUCCESS = 84,
+  VMSTAT_COMPACT_DAEMON_WAKE = 85,
+  VMSTAT_UNEVICTABLE_PGS_CULLED = 86,
+  VMSTAT_UNEVICTABLE_PGS_SCANNED = 87,
+  VMSTAT_UNEVICTABLE_PGS_RESCUED = 88,
+  VMSTAT_UNEVICTABLE_PGS_MLOCKED = 89,
+  VMSTAT_UNEVICTABLE_PGS_MUNLOCKED = 90,
+  VMSTAT_UNEVICTABLE_PGS_CLEARED = 91,
+  VMSTAT_UNEVICTABLE_PGS_STRANDED = 92,
+  VMSTAT_NR_ZSPAGES = 93,
+  VMSTAT_NR_ION_HEAP = 94,
+  VMSTAT_NR_GPU_HEAP = 95,
+  VMSTAT_ALLOCSTALL_DMA = 96,
+  VMSTAT_ALLOCSTALL_MOVABLE = 97,
+  VMSTAT_ALLOCSTALL_NORMAL = 98,
+  VMSTAT_COMPACT_DAEMON_FREE_SCANNED = 99,
+  VMSTAT_COMPACT_DAEMON_MIGRATE_SCANNED = 100,
+  VMSTAT_NR_FASTRPC = 101,
+  VMSTAT_NR_INDIRECTLY_RECLAIMABLE = 102,
+  VMSTAT_NR_ION_HEAP_POOL = 103,
+  VMSTAT_NR_KERNEL_MISC_RECLAIMABLE = 104,
+  VMSTAT_NR_SHADOW_CALL_STACK_BYTES = 105,
+  VMSTAT_NR_SHMEM_HUGEPAGES = 106,
+  VMSTAT_NR_SHMEM_PMDMAPPED = 107,
+  VMSTAT_NR_UNRECLAIMABLE_PAGES = 108,
+  VMSTAT_NR_ZONE_ACTIVE_ANON = 109,
+  VMSTAT_NR_ZONE_ACTIVE_FILE = 110,
+  VMSTAT_NR_ZONE_INACTIVE_ANON = 111,
+  VMSTAT_NR_ZONE_INACTIVE_FILE = 112,
+  VMSTAT_NR_ZONE_UNEVICTABLE = 113,
+  VMSTAT_NR_ZONE_WRITE_PENDING = 114,
+  VMSTAT_OOM_KILL = 115,
+  VMSTAT_PGLAZYFREE = 116,
+  VMSTAT_PGLAZYFREED = 117,
+  VMSTAT_PGREFILL = 118,
+  VMSTAT_PGSCAN_DIRECT = 119,
+  VMSTAT_PGSCAN_KSWAPD = 120,
+  VMSTAT_PGSKIP_DMA = 121,
+  VMSTAT_PGSKIP_MOVABLE = 122,
+  VMSTAT_PGSKIP_NORMAL = 123,
+  VMSTAT_PGSTEAL_DIRECT = 124,
+  VMSTAT_PGSTEAL_KSWAPD = 125,
+  VMSTAT_SWAP_RA = 126,
+  VMSTAT_SWAP_RA_HIT = 127,
+  VMSTAT_WORKINGSET_RESTORE = 128,
+};
+
+const VmstatCounters VmstatCounters_MIN = VMSTAT_UNSPECIFIED;
+const VmstatCounters VmstatCounters_MAX = VMSTAT_WORKINGSET_RESTORE;
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/common/trace_stats.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_TRACE_STATS_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_TRACE_STATS_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class TraceStats_BufferStats;
+class TraceStats_FilterStats;
+
+class TraceStats_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/11, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  TraceStats_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TraceStats_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TraceStats_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_buffer_stats() const { return at<1>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> buffer_stats() const { return GetRepeated<::protozero::ConstBytes>(1); }
+  bool has_producers_connected() const { return at<2>().valid(); }
+  uint32_t producers_connected() const { return at<2>().as_uint32(); }
+  bool has_producers_seen() const { return at<3>().valid(); }
+  uint64_t producers_seen() const { return at<3>().as_uint64(); }
+  bool has_data_sources_registered() const { return at<4>().valid(); }
+  uint32_t data_sources_registered() const { return at<4>().as_uint32(); }
+  bool has_data_sources_seen() const { return at<5>().valid(); }
+  uint64_t data_sources_seen() const { return at<5>().as_uint64(); }
+  bool has_tracing_sessions() const { return at<6>().valid(); }
+  uint32_t tracing_sessions() const { return at<6>().as_uint32(); }
+  bool has_total_buffers() const { return at<7>().valid(); }
+  uint32_t total_buffers() const { return at<7>().as_uint32(); }
+  bool has_chunks_discarded() const { return at<8>().valid(); }
+  uint64_t chunks_discarded() const { return at<8>().as_uint64(); }
+  bool has_patches_discarded() const { return at<9>().valid(); }
+  uint64_t patches_discarded() const { return at<9>().as_uint64(); }
+  bool has_invalid_packets() const { return at<10>().valid(); }
+  uint64_t invalid_packets() const { return at<10>().as_uint64(); }
+  bool has_filter_stats() const { return at<11>().valid(); }
+  ::protozero::ConstBytes filter_stats() const { return at<11>().as_bytes(); }
+};
+
+class TraceStats : public ::protozero::Message {
+ public:
+  using Decoder = TraceStats_Decoder;
+  enum : int32_t {
+    kBufferStatsFieldNumber = 1,
+    kProducersConnectedFieldNumber = 2,
+    kProducersSeenFieldNumber = 3,
+    kDataSourcesRegisteredFieldNumber = 4,
+    kDataSourcesSeenFieldNumber = 5,
+    kTracingSessionsFieldNumber = 6,
+    kTotalBuffersFieldNumber = 7,
+    kChunksDiscardedFieldNumber = 8,
+    kPatchesDiscardedFieldNumber = 9,
+    kInvalidPacketsFieldNumber = 10,
+    kFilterStatsFieldNumber = 11,
+  };
+  using BufferStats = ::perfetto::protos::pbzero::TraceStats_BufferStats;
+  using FilterStats = ::perfetto::protos::pbzero::TraceStats_FilterStats;
+
+  using FieldMetadata_BufferStats =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TraceStats_BufferStats,
+      TraceStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BufferStats kBufferStats() { return {}; }
+  template <typename T = TraceStats_BufferStats> T* add_buffer_stats() {
+    return BeginNestedMessage<T>(1);
+  }
+
+
+  using FieldMetadata_ProducersConnected =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      TraceStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ProducersConnected kProducersConnected() { return {}; }
+  void set_producers_connected(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ProducersConnected::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ProducersSeen =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ProducersSeen kProducersSeen() { return {}; }
+  void set_producers_seen(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ProducersSeen::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DataSourcesRegistered =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      TraceStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DataSourcesRegistered kDataSourcesRegistered() { return {}; }
+  void set_data_sources_registered(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DataSourcesRegistered::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DataSourcesSeen =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DataSourcesSeen kDataSourcesSeen() { return {}; }
+  void set_data_sources_seen(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DataSourcesSeen::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TracingSessions =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      TraceStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TracingSessions kTracingSessions() { return {}; }
+  void set_tracing_sessions(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TracingSessions::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TotalBuffers =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      TraceStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TotalBuffers kTotalBuffers() { return {}; }
+  void set_total_buffers(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TotalBuffers::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ChunksDiscarded =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChunksDiscarded kChunksDiscarded() { return {}; }
+  void set_chunks_discarded(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ChunksDiscarded::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PatchesDiscarded =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PatchesDiscarded kPatchesDiscarded() { return {}; }
+  void set_patches_discarded(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PatchesDiscarded::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_InvalidPackets =
+    ::protozero::proto_utils::FieldMetadata<
+      10,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_InvalidPackets kInvalidPackets() { return {}; }
+  void set_invalid_packets(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_InvalidPackets::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FilterStats =
+    ::protozero::proto_utils::FieldMetadata<
+      11,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TraceStats_FilterStats,
+      TraceStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FilterStats kFilterStats() { return {}; }
+  template <typename T = TraceStats_FilterStats> T* set_filter_stats() {
+    return BeginNestedMessage<T>(11);
+  }
+
+};
+
+class TraceStats_FilterStats_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  TraceStats_FilterStats_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TraceStats_FilterStats_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TraceStats_FilterStats_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_input_packets() const { return at<1>().valid(); }
+  uint64_t input_packets() const { return at<1>().as_uint64(); }
+  bool has_input_bytes() const { return at<2>().valid(); }
+  uint64_t input_bytes() const { return at<2>().as_uint64(); }
+  bool has_output_bytes() const { return at<3>().valid(); }
+  uint64_t output_bytes() const { return at<3>().as_uint64(); }
+  bool has_errors() const { return at<4>().valid(); }
+  uint64_t errors() const { return at<4>().as_uint64(); }
+};
+
+class TraceStats_FilterStats : public ::protozero::Message {
+ public:
+  using Decoder = TraceStats_FilterStats_Decoder;
+  enum : int32_t {
+    kInputPacketsFieldNumber = 1,
+    kInputBytesFieldNumber = 2,
+    kOutputBytesFieldNumber = 3,
+    kErrorsFieldNumber = 4,
+  };
+
+  using FieldMetadata_InputPackets =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats_FilterStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_InputPackets kInputPackets() { return {}; }
+  void set_input_packets(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_InputPackets::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_InputBytes =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats_FilterStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_InputBytes kInputBytes() { return {}; }
+  void set_input_bytes(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_InputBytes::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_OutputBytes =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats_FilterStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_OutputBytes kOutputBytes() { return {}; }
+  void set_output_bytes(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_OutputBytes::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Errors =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats_FilterStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Errors kErrors() { return {}; }
+  void set_errors(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Errors::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class TraceStats_BufferStats_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/19, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  TraceStats_BufferStats_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TraceStats_BufferStats_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TraceStats_BufferStats_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_buffer_size() const { return at<12>().valid(); }
+  uint64_t buffer_size() const { return at<12>().as_uint64(); }
+  bool has_bytes_written() const { return at<1>().valid(); }
+  uint64_t bytes_written() const { return at<1>().as_uint64(); }
+  bool has_bytes_overwritten() const { return at<13>().valid(); }
+  uint64_t bytes_overwritten() const { return at<13>().as_uint64(); }
+  bool has_bytes_read() const { return at<14>().valid(); }
+  uint64_t bytes_read() const { return at<14>().as_uint64(); }
+  bool has_padding_bytes_written() const { return at<15>().valid(); }
+  uint64_t padding_bytes_written() const { return at<15>().as_uint64(); }
+  bool has_padding_bytes_cleared() const { return at<16>().valid(); }
+  uint64_t padding_bytes_cleared() const { return at<16>().as_uint64(); }
+  bool has_chunks_written() const { return at<2>().valid(); }
+  uint64_t chunks_written() const { return at<2>().as_uint64(); }
+  bool has_chunks_rewritten() const { return at<10>().valid(); }
+  uint64_t chunks_rewritten() const { return at<10>().as_uint64(); }
+  bool has_chunks_overwritten() const { return at<3>().valid(); }
+  uint64_t chunks_overwritten() const { return at<3>().as_uint64(); }
+  bool has_chunks_discarded() const { return at<18>().valid(); }
+  uint64_t chunks_discarded() const { return at<18>().as_uint64(); }
+  bool has_chunks_read() const { return at<17>().valid(); }
+  uint64_t chunks_read() const { return at<17>().as_uint64(); }
+  bool has_chunks_committed_out_of_order() const { return at<11>().valid(); }
+  uint64_t chunks_committed_out_of_order() const { return at<11>().as_uint64(); }
+  bool has_write_wrap_count() const { return at<4>().valid(); }
+  uint64_t write_wrap_count() const { return at<4>().as_uint64(); }
+  bool has_patches_succeeded() const { return at<5>().valid(); }
+  uint64_t patches_succeeded() const { return at<5>().as_uint64(); }
+  bool has_patches_failed() const { return at<6>().valid(); }
+  uint64_t patches_failed() const { return at<6>().as_uint64(); }
+  bool has_readaheads_succeeded() const { return at<7>().valid(); }
+  uint64_t readaheads_succeeded() const { return at<7>().as_uint64(); }
+  bool has_readaheads_failed() const { return at<8>().valid(); }
+  uint64_t readaheads_failed() const { return at<8>().as_uint64(); }
+  bool has_abi_violations() const { return at<9>().valid(); }
+  uint64_t abi_violations() const { return at<9>().as_uint64(); }
+  bool has_trace_writer_packet_loss() const { return at<19>().valid(); }
+  uint64_t trace_writer_packet_loss() const { return at<19>().as_uint64(); }
+};
+
+class TraceStats_BufferStats : public ::protozero::Message {
+ public:
+  using Decoder = TraceStats_BufferStats_Decoder;
+  enum : int32_t {
+    kBufferSizeFieldNumber = 12,
+    kBytesWrittenFieldNumber = 1,
+    kBytesOverwrittenFieldNumber = 13,
+    kBytesReadFieldNumber = 14,
+    kPaddingBytesWrittenFieldNumber = 15,
+    kPaddingBytesClearedFieldNumber = 16,
+    kChunksWrittenFieldNumber = 2,
+    kChunksRewrittenFieldNumber = 10,
+    kChunksOverwrittenFieldNumber = 3,
+    kChunksDiscardedFieldNumber = 18,
+    kChunksReadFieldNumber = 17,
+    kChunksCommittedOutOfOrderFieldNumber = 11,
+    kWriteWrapCountFieldNumber = 4,
+    kPatchesSucceededFieldNumber = 5,
+    kPatchesFailedFieldNumber = 6,
+    kReadaheadsSucceededFieldNumber = 7,
+    kReadaheadsFailedFieldNumber = 8,
+    kAbiViolationsFieldNumber = 9,
+    kTraceWriterPacketLossFieldNumber = 19,
+  };
+
+  using FieldMetadata_BufferSize =
+    ::protozero::proto_utils::FieldMetadata<
+      12,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats_BufferStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BufferSize kBufferSize() { return {}; }
+  void set_buffer_size(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_BufferSize::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BytesWritten =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats_BufferStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BytesWritten kBytesWritten() { return {}; }
+  void set_bytes_written(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_BytesWritten::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BytesOverwritten =
+    ::protozero::proto_utils::FieldMetadata<
+      13,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats_BufferStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BytesOverwritten kBytesOverwritten() { return {}; }
+  void set_bytes_overwritten(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_BytesOverwritten::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BytesRead =
+    ::protozero::proto_utils::FieldMetadata<
+      14,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats_BufferStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BytesRead kBytesRead() { return {}; }
+  void set_bytes_read(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_BytesRead::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PaddingBytesWritten =
+    ::protozero::proto_utils::FieldMetadata<
+      15,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats_BufferStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PaddingBytesWritten kPaddingBytesWritten() { return {}; }
+  void set_padding_bytes_written(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PaddingBytesWritten::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PaddingBytesCleared =
+    ::protozero::proto_utils::FieldMetadata<
+      16,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats_BufferStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PaddingBytesCleared kPaddingBytesCleared() { return {}; }
+  void set_padding_bytes_cleared(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PaddingBytesCleared::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ChunksWritten =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats_BufferStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChunksWritten kChunksWritten() { return {}; }
+  void set_chunks_written(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ChunksWritten::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ChunksRewritten =
+    ::protozero::proto_utils::FieldMetadata<
+      10,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats_BufferStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChunksRewritten kChunksRewritten() { return {}; }
+  void set_chunks_rewritten(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ChunksRewritten::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ChunksOverwritten =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats_BufferStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChunksOverwritten kChunksOverwritten() { return {}; }
+  void set_chunks_overwritten(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ChunksOverwritten::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ChunksDiscarded =
+    ::protozero::proto_utils::FieldMetadata<
+      18,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats_BufferStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChunksDiscarded kChunksDiscarded() { return {}; }
+  void set_chunks_discarded(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ChunksDiscarded::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ChunksRead =
+    ::protozero::proto_utils::FieldMetadata<
+      17,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats_BufferStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChunksRead kChunksRead() { return {}; }
+  void set_chunks_read(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ChunksRead::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ChunksCommittedOutOfOrder =
+    ::protozero::proto_utils::FieldMetadata<
+      11,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats_BufferStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChunksCommittedOutOfOrder kChunksCommittedOutOfOrder() { return {}; }
+  void set_chunks_committed_out_of_order(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ChunksCommittedOutOfOrder::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_WriteWrapCount =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats_BufferStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_WriteWrapCount kWriteWrapCount() { return {}; }
+  void set_write_wrap_count(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_WriteWrapCount::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PatchesSucceeded =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats_BufferStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PatchesSucceeded kPatchesSucceeded() { return {}; }
+  void set_patches_succeeded(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PatchesSucceeded::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PatchesFailed =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats_BufferStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PatchesFailed kPatchesFailed() { return {}; }
+  void set_patches_failed(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PatchesFailed::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ReadaheadsSucceeded =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats_BufferStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ReadaheadsSucceeded kReadaheadsSucceeded() { return {}; }
+  void set_readaheads_succeeded(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ReadaheadsSucceeded::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ReadaheadsFailed =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats_BufferStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ReadaheadsFailed kReadaheadsFailed() { return {}; }
+  void set_readaheads_failed(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ReadaheadsFailed::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_AbiViolations =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats_BufferStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AbiViolations kAbiViolations() { return {}; }
+  void set_abi_violations(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_AbiViolations::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TraceWriterPacketLoss =
+    ::protozero::proto_utils::FieldMetadata<
+      19,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceStats_BufferStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TraceWriterPacketLoss kTraceWriterPacketLoss() { return {}; }
+  void set_trace_writer_packet_loss(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TraceWriterPacketLoss::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/common/tracing_service_capabilities.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_TRACING_SERVICE_CAPABILITIES_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_TRACING_SERVICE_CAPABILITIES_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+enum ObservableEvents_Type : int32_t;
+
+class TracingServiceCapabilities_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  TracingServiceCapabilities_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TracingServiceCapabilities_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TracingServiceCapabilities_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_has_query_capabilities() const { return at<1>().valid(); }
+  bool has_query_capabilities() const { return at<1>().as_bool(); }
+  bool has_observable_events() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<int32_t> observable_events() const { return GetRepeated<int32_t>(2); }
+  bool has_has_trace_config_output_path() const { return at<3>().valid(); }
+  bool has_trace_config_output_path() const { return at<3>().as_bool(); }
+};
+
+class TracingServiceCapabilities : public ::protozero::Message {
+ public:
+  using Decoder = TracingServiceCapabilities_Decoder;
+  enum : int32_t {
+    kHasQueryCapabilitiesFieldNumber = 1,
+    kObservableEventsFieldNumber = 2,
+    kHasTraceConfigOutputPathFieldNumber = 3,
+  };
+
+  using FieldMetadata_HasQueryCapabilities =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      TracingServiceCapabilities>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_HasQueryCapabilities kHasQueryCapabilities() { return {}; }
+  void set_has_query_capabilities(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_HasQueryCapabilities::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ObservableEvents =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::ObservableEvents_Type,
+      TracingServiceCapabilities>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ObservableEvents kObservableEvents() { return {}; }
+  void add_observable_events(::perfetto::protos::pbzero::ObservableEvents_Type value) {
+    static constexpr uint32_t field_id = FieldMetadata_ObservableEvents::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_HasTraceConfigOutputPath =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      TracingServiceCapabilities>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_HasTraceConfigOutputPath kHasTraceConfigOutputPath() { return {}; }
+  void set_has_trace_config_output_path(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_HasTraceConfigOutputPath::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/common/tracing_service_state.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_TRACING_SERVICE_STATE_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_TRACING_SERVICE_STATE_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class DataSourceDescriptor;
+class TracingServiceState_DataSource;
+class TracingServiceState_Producer;
+
+class TracingServiceState_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  TracingServiceState_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TracingServiceState_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TracingServiceState_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_producers() const { return at<1>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> producers() const { return GetRepeated<::protozero::ConstBytes>(1); }
+  bool has_data_sources() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> data_sources() const { return GetRepeated<::protozero::ConstBytes>(2); }
+  bool has_num_sessions() const { return at<3>().valid(); }
+  int32_t num_sessions() const { return at<3>().as_int32(); }
+  bool has_num_sessions_started() const { return at<4>().valid(); }
+  int32_t num_sessions_started() const { return at<4>().as_int32(); }
+  bool has_tracing_service_version() const { return at<5>().valid(); }
+  ::protozero::ConstChars tracing_service_version() const { return at<5>().as_string(); }
+};
+
+class TracingServiceState : public ::protozero::Message {
+ public:
+  using Decoder = TracingServiceState_Decoder;
+  enum : int32_t {
+    kProducersFieldNumber = 1,
+    kDataSourcesFieldNumber = 2,
+    kNumSessionsFieldNumber = 3,
+    kNumSessionsStartedFieldNumber = 4,
+    kTracingServiceVersionFieldNumber = 5,
+  };
+  using Producer = ::perfetto::protos::pbzero::TracingServiceState_Producer;
+  using DataSource = ::perfetto::protos::pbzero::TracingServiceState_DataSource;
+
+  using FieldMetadata_Producers =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TracingServiceState_Producer,
+      TracingServiceState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Producers kProducers() { return {}; }
+  template <typename T = TracingServiceState_Producer> T* add_producers() {
+    return BeginNestedMessage<T>(1);
+  }
+
+
+  using FieldMetadata_DataSources =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TracingServiceState_DataSource,
+      TracingServiceState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DataSources kDataSources() { return {}; }
+  template <typename T = TracingServiceState_DataSource> T* add_data_sources() {
+    return BeginNestedMessage<T>(2);
+  }
+
+
+  using FieldMetadata_NumSessions =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      TracingServiceState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NumSessions kNumSessions() { return {}; }
+  void set_num_sessions(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NumSessions::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NumSessionsStarted =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      TracingServiceState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NumSessionsStarted kNumSessionsStarted() { return {}; }
+  void set_num_sessions_started(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NumSessionsStarted::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TracingServiceVersion =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TracingServiceState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TracingServiceVersion kTracingServiceVersion() { return {}; }
+  void set_tracing_service_version(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_TracingServiceVersion::kFieldId, data, size);
+  }
+  void set_tracing_service_version(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_TracingServiceVersion::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class TracingServiceState_DataSource_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  TracingServiceState_DataSource_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TracingServiceState_DataSource_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TracingServiceState_DataSource_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_ds_descriptor() const { return at<1>().valid(); }
+  ::protozero::ConstBytes ds_descriptor() const { return at<1>().as_bytes(); }
+  bool has_producer_id() const { return at<2>().valid(); }
+  int32_t producer_id() const { return at<2>().as_int32(); }
+};
+
+class TracingServiceState_DataSource : public ::protozero::Message {
+ public:
+  using Decoder = TracingServiceState_DataSource_Decoder;
+  enum : int32_t {
+    kDsDescriptorFieldNumber = 1,
+    kProducerIdFieldNumber = 2,
+  };
+
+  using FieldMetadata_DsDescriptor =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      DataSourceDescriptor,
+      TracingServiceState_DataSource>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DsDescriptor kDsDescriptor() { return {}; }
+  template <typename T = DataSourceDescriptor> T* set_ds_descriptor() {
+    return BeginNestedMessage<T>(1);
+  }
+
+
+  using FieldMetadata_ProducerId =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      TracingServiceState_DataSource>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ProducerId kProducerId() { return {}; }
+  void set_producer_id(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ProducerId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class TracingServiceState_Producer_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  TracingServiceState_Producer_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TracingServiceState_Producer_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TracingServiceState_Producer_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_id() const { return at<1>().valid(); }
+  int32_t id() const { return at<1>().as_int32(); }
+  bool has_name() const { return at<2>().valid(); }
+  ::protozero::ConstChars name() const { return at<2>().as_string(); }
+  bool has_uid() const { return at<3>().valid(); }
+  int32_t uid() const { return at<3>().as_int32(); }
+  bool has_sdk_version() const { return at<4>().valid(); }
+  ::protozero::ConstChars sdk_version() const { return at<4>().as_string(); }
+};
+
+class TracingServiceState_Producer : public ::protozero::Message {
+ public:
+  using Decoder = TracingServiceState_Producer_Decoder;
+  enum : int32_t {
+    kIdFieldNumber = 1,
+    kNameFieldNumber = 2,
+    kUidFieldNumber = 3,
+    kSdkVersionFieldNumber = 4,
+  };
+
+  using FieldMetadata_Id =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      TracingServiceState_Producer>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Id kId() { return {}; }
+  void set_id(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Id::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TracingServiceState_Producer>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Uid =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      TracingServiceState_Producer>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Uid kUid() { return {}; }
+  void set_uid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Uid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SdkVersion =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TracingServiceState_Producer>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SdkVersion kSdkVersion() { return {}; }
+  void set_sdk_version(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_SdkVersion::kFieldId, data, size);
+  }
+  void set_sdk_version(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_SdkVersion::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/common/track_event_descriptor.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_TRACK_EVENT_DESCRIPTOR_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_COMMON_TRACK_EVENT_DESCRIPTOR_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class TrackEventCategory;
+
+class TrackEventDescriptor_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  TrackEventDescriptor_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TrackEventDescriptor_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TrackEventDescriptor_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_available_categories() const { return at<1>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> available_categories() const { return GetRepeated<::protozero::ConstBytes>(1); }
+};
+
+class TrackEventDescriptor : public ::protozero::Message {
+ public:
+  using Decoder = TrackEventDescriptor_Decoder;
+  enum : int32_t {
+    kAvailableCategoriesFieldNumber = 1,
+  };
+
+  using FieldMetadata_AvailableCategories =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TrackEventCategory,
+      TrackEventDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AvailableCategories kAvailableCategories() { return {}; }
+  template <typename T = TrackEventCategory> T* add_available_categories() {
+    return BeginNestedMessage<T>(1);
+  }
+
+};
+
+class TrackEventCategory_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  TrackEventCategory_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TrackEventCategory_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TrackEventCategory_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars name() const { return at<1>().as_string(); }
+  bool has_description() const { return at<2>().valid(); }
+  ::protozero::ConstChars description() const { return at<2>().as_string(); }
+  bool has_tags() const { return at<3>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstChars> tags() const { return GetRepeated<::protozero::ConstChars>(3); }
+};
+
+class TrackEventCategory : public ::protozero::Message {
+ public:
+  using Decoder = TrackEventCategory_Decoder;
+  enum : int32_t {
+    kNameFieldNumber = 1,
+    kDescriptionFieldNumber = 2,
+    kTagsFieldNumber = 3,
+  };
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TrackEventCategory>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Description =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TrackEventCategory>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Description kDescription() { return {}; }
+  void set_description(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Description::kFieldId, data, size);
+  }
+  void set_description(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Description::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Tags =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TrackEventCategory>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Tags kTags() { return {}; }
+  void add_tags(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Tags::kFieldId, data, size);
+  }
+  void add_tags(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Tags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/config/track_event/track_event_config.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_TRACK_EVENT_TRACK_EVENT_CONFIG_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_TRACK_EVENT_TRACK_EVENT_CONFIG_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class TrackEventConfig;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT TrackEventConfig : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kDisabledCategoriesFieldNumber = 1,
+    kEnabledCategoriesFieldNumber = 2,
+    kDisabledTagsFieldNumber = 3,
+    kEnabledTagsFieldNumber = 4,
+  };
+
+  TrackEventConfig();
+  ~TrackEventConfig() override;
+  TrackEventConfig(TrackEventConfig&&) noexcept;
+  TrackEventConfig& operator=(TrackEventConfig&&);
+  TrackEventConfig(const TrackEventConfig&);
+  TrackEventConfig& operator=(const TrackEventConfig&);
+  bool operator==(const TrackEventConfig&) const;
+  bool operator!=(const TrackEventConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  const std::vector<std::string>& disabled_categories() const { return disabled_categories_; }
+  std::vector<std::string>* mutable_disabled_categories() { return &disabled_categories_; }
+  int disabled_categories_size() const { return static_cast<int>(disabled_categories_.size()); }
+  void clear_disabled_categories() { disabled_categories_.clear(); }
+  void add_disabled_categories(std::string value) { disabled_categories_.emplace_back(value); }
+  std::string* add_disabled_categories() { disabled_categories_.emplace_back(); return &disabled_categories_.back(); }
+
+  const std::vector<std::string>& enabled_categories() const { return enabled_categories_; }
+  std::vector<std::string>* mutable_enabled_categories() { return &enabled_categories_; }
+  int enabled_categories_size() const { return static_cast<int>(enabled_categories_.size()); }
+  void clear_enabled_categories() { enabled_categories_.clear(); }
+  void add_enabled_categories(std::string value) { enabled_categories_.emplace_back(value); }
+  std::string* add_enabled_categories() { enabled_categories_.emplace_back(); return &enabled_categories_.back(); }
+
+  const std::vector<std::string>& disabled_tags() const { return disabled_tags_; }
+  std::vector<std::string>* mutable_disabled_tags() { return &disabled_tags_; }
+  int disabled_tags_size() const { return static_cast<int>(disabled_tags_.size()); }
+  void clear_disabled_tags() { disabled_tags_.clear(); }
+  void add_disabled_tags(std::string value) { disabled_tags_.emplace_back(value); }
+  std::string* add_disabled_tags() { disabled_tags_.emplace_back(); return &disabled_tags_.back(); }
+
+  const std::vector<std::string>& enabled_tags() const { return enabled_tags_; }
+  std::vector<std::string>* mutable_enabled_tags() { return &enabled_tags_; }
+  int enabled_tags_size() const { return static_cast<int>(enabled_tags_.size()); }
+  void clear_enabled_tags() { enabled_tags_.clear(); }
+  void add_enabled_tags(std::string value) { enabled_tags_.emplace_back(value); }
+  std::string* add_enabled_tags() { enabled_tags_.emplace_back(); return &enabled_tags_.back(); }
+
+ private:
+  std::vector<std::string> disabled_categories_;
+  std::vector<std::string> enabled_categories_;
+  std::vector<std::string> disabled_tags_;
+  std::vector<std::string> enabled_tags_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<5> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_TRACK_EVENT_TRACK_EVENT_CONFIG_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/config/android/android_log_config.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_ANDROID_ANDROID_LOG_CONFIG_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_ANDROID_ANDROID_LOG_CONFIG_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+enum AndroidLogId : int32_t;
+enum AndroidLogPriority : int32_t;
+
+class AndroidLogConfig_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  AndroidLogConfig_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit AndroidLogConfig_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit AndroidLogConfig_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_log_ids() const { return at<1>().valid(); }
+  ::protozero::RepeatedFieldIterator<int32_t> log_ids() const { return GetRepeated<int32_t>(1); }
+  bool has_min_prio() const { return at<3>().valid(); }
+  int32_t min_prio() const { return at<3>().as_int32(); }
+  bool has_filter_tags() const { return at<4>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstChars> filter_tags() const { return GetRepeated<::protozero::ConstChars>(4); }
+};
+
+class AndroidLogConfig : public ::protozero::Message {
+ public:
+  using Decoder = AndroidLogConfig_Decoder;
+  enum : int32_t {
+    kLogIdsFieldNumber = 1,
+    kMinPrioFieldNumber = 3,
+    kFilterTagsFieldNumber = 4,
+  };
+
+  using FieldMetadata_LogIds =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::AndroidLogId,
+      AndroidLogConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_LogIds kLogIds() { return {}; }
+  void add_log_ids(::perfetto::protos::pbzero::AndroidLogId value) {
+    static constexpr uint32_t field_id = FieldMetadata_LogIds::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_MinPrio =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::AndroidLogPriority,
+      AndroidLogConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MinPrio kMinPrio() { return {}; }
+  void set_min_prio(::perfetto::protos::pbzero::AndroidLogPriority value) {
+    static constexpr uint32_t field_id = FieldMetadata_MinPrio::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FilterTags =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      AndroidLogConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FilterTags kFilterTags() { return {}; }
+  void add_filter_tags(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_FilterTags::kFieldId, data, size);
+  }
+  void add_filter_tags(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_FilterTags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/config/android/android_polled_state_config.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_ANDROID_ANDROID_POLLED_STATE_CONFIG_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_ANDROID_ANDROID_POLLED_STATE_CONFIG_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class AndroidPolledStateConfig_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  AndroidPolledStateConfig_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit AndroidPolledStateConfig_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit AndroidPolledStateConfig_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_poll_ms() const { return at<1>().valid(); }
+  uint32_t poll_ms() const { return at<1>().as_uint32(); }
+};
+
+class AndroidPolledStateConfig : public ::protozero::Message {
+ public:
+  using Decoder = AndroidPolledStateConfig_Decoder;
+  enum : int32_t {
+    kPollMsFieldNumber = 1,
+  };
+
+  using FieldMetadata_PollMs =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      AndroidPolledStateConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PollMs kPollMs() { return {}; }
+  void set_poll_ms(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PollMs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/config/android/packages_list_config.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_ANDROID_PACKAGES_LIST_CONFIG_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_ANDROID_PACKAGES_LIST_CONFIG_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class PackagesListConfig_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  PackagesListConfig_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit PackagesListConfig_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit PackagesListConfig_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_package_name_filter() const { return at<1>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstChars> package_name_filter() const { return GetRepeated<::protozero::ConstChars>(1); }
+};
+
+class PackagesListConfig : public ::protozero::Message {
+ public:
+  using Decoder = PackagesListConfig_Decoder;
+  enum : int32_t {
+    kPackageNameFilterFieldNumber = 1,
+  };
+
+  using FieldMetadata_PackageNameFilter =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      PackagesListConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PackageNameFilter kPackageNameFilter() { return {}; }
+  void add_package_name_filter(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_PackageNameFilter::kFieldId, data, size);
+  }
+  void add_package_name_filter(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_PackageNameFilter::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/config/ftrace/ftrace_config.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_FTRACE_FTRACE_CONFIG_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_FTRACE_FTRACE_CONFIG_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class FtraceConfig_CompactSchedConfig;
+
+class FtraceConfig_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/14, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  FtraceConfig_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit FtraceConfig_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit FtraceConfig_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_ftrace_events() const { return at<1>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstChars> ftrace_events() const { return GetRepeated<::protozero::ConstChars>(1); }
+  bool has_atrace_categories() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstChars> atrace_categories() const { return GetRepeated<::protozero::ConstChars>(2); }
+  bool has_atrace_apps() const { return at<3>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstChars> atrace_apps() const { return GetRepeated<::protozero::ConstChars>(3); }
+  bool has_buffer_size_kb() const { return at<10>().valid(); }
+  uint32_t buffer_size_kb() const { return at<10>().as_uint32(); }
+  bool has_drain_period_ms() const { return at<11>().valid(); }
+  uint32_t drain_period_ms() const { return at<11>().as_uint32(); }
+  bool has_compact_sched() const { return at<12>().valid(); }
+  ::protozero::ConstBytes compact_sched() const { return at<12>().as_bytes(); }
+  bool has_symbolize_ksyms() const { return at<13>().valid(); }
+  bool symbolize_ksyms() const { return at<13>().as_bool(); }
+  bool has_initialize_ksyms_synchronously_for_testing() const { return at<14>().valid(); }
+  bool initialize_ksyms_synchronously_for_testing() const { return at<14>().as_bool(); }
+};
+
+class FtraceConfig : public ::protozero::Message {
+ public:
+  using Decoder = FtraceConfig_Decoder;
+  enum : int32_t {
+    kFtraceEventsFieldNumber = 1,
+    kAtraceCategoriesFieldNumber = 2,
+    kAtraceAppsFieldNumber = 3,
+    kBufferSizeKbFieldNumber = 10,
+    kDrainPeriodMsFieldNumber = 11,
+    kCompactSchedFieldNumber = 12,
+    kSymbolizeKsymsFieldNumber = 13,
+    kInitializeKsymsSynchronouslyForTestingFieldNumber = 14,
+  };
+  using CompactSchedConfig = ::perfetto::protos::pbzero::FtraceConfig_CompactSchedConfig;
+
+  using FieldMetadata_FtraceEvents =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      FtraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FtraceEvents kFtraceEvents() { return {}; }
+  void add_ftrace_events(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_FtraceEvents::kFieldId, data, size);
+  }
+  void add_ftrace_events(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_FtraceEvents::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_AtraceCategories =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      FtraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AtraceCategories kAtraceCategories() { return {}; }
+  void add_atrace_categories(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_AtraceCategories::kFieldId, data, size);
+  }
+  void add_atrace_categories(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_AtraceCategories::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_AtraceApps =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      FtraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AtraceApps kAtraceApps() { return {}; }
+  void add_atrace_apps(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_AtraceApps::kFieldId, data, size);
+  }
+  void add_atrace_apps(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_AtraceApps::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BufferSizeKb =
+    ::protozero::proto_utils::FieldMetadata<
+      10,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      FtraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BufferSizeKb kBufferSizeKb() { return {}; }
+  void set_buffer_size_kb(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_BufferSizeKb::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DrainPeriodMs =
+    ::protozero::proto_utils::FieldMetadata<
+      11,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      FtraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DrainPeriodMs kDrainPeriodMs() { return {}; }
+  void set_drain_period_ms(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DrainPeriodMs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CompactSched =
+    ::protozero::proto_utils::FieldMetadata<
+      12,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      FtraceConfig_CompactSchedConfig,
+      FtraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CompactSched kCompactSched() { return {}; }
+  template <typename T = FtraceConfig_CompactSchedConfig> T* set_compact_sched() {
+    return BeginNestedMessage<T>(12);
+  }
+
+
+  using FieldMetadata_SymbolizeKsyms =
+    ::protozero::proto_utils::FieldMetadata<
+      13,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      FtraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SymbolizeKsyms kSymbolizeKsyms() { return {}; }
+  void set_symbolize_ksyms(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_SymbolizeKsyms::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_InitializeKsymsSynchronouslyForTesting =
+    ::protozero::proto_utils::FieldMetadata<
+      14,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      FtraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_InitializeKsymsSynchronouslyForTesting kInitializeKsymsSynchronouslyForTesting() { return {}; }
+  void set_initialize_ksyms_synchronously_for_testing(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_InitializeKsymsSynchronouslyForTesting::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class FtraceConfig_CompactSchedConfig_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  FtraceConfig_CompactSchedConfig_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit FtraceConfig_CompactSchedConfig_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit FtraceConfig_CompactSchedConfig_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_enabled() const { return at<1>().valid(); }
+  bool enabled() const { return at<1>().as_bool(); }
+};
+
+class FtraceConfig_CompactSchedConfig : public ::protozero::Message {
+ public:
+  using Decoder = FtraceConfig_CompactSchedConfig_Decoder;
+  enum : int32_t {
+    kEnabledFieldNumber = 1,
+  };
+
+  using FieldMetadata_Enabled =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      FtraceConfig_CompactSchedConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Enabled kEnabled() { return {}; }
+  void set_enabled(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_Enabled::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/config/gpu/gpu_counter_config.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_GPU_GPU_COUNTER_CONFIG_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_GPU_GPU_COUNTER_CONFIG_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class GpuCounterConfig_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  GpuCounterConfig_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit GpuCounterConfig_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit GpuCounterConfig_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_counter_period_ns() const { return at<1>().valid(); }
+  uint64_t counter_period_ns() const { return at<1>().as_uint64(); }
+  bool has_counter_ids() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<uint32_t> counter_ids() const { return GetRepeated<uint32_t>(2); }
+  bool has_instrumented_sampling() const { return at<3>().valid(); }
+  bool instrumented_sampling() const { return at<3>().as_bool(); }
+  bool has_fix_gpu_clock() const { return at<4>().valid(); }
+  bool fix_gpu_clock() const { return at<4>().as_bool(); }
+};
+
+class GpuCounterConfig : public ::protozero::Message {
+ public:
+  using Decoder = GpuCounterConfig_Decoder;
+  enum : int32_t {
+    kCounterPeriodNsFieldNumber = 1,
+    kCounterIdsFieldNumber = 2,
+    kInstrumentedSamplingFieldNumber = 3,
+    kFixGpuClockFieldNumber = 4,
+  };
+
+  using FieldMetadata_CounterPeriodNs =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      GpuCounterConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CounterPeriodNs kCounterPeriodNs() { return {}; }
+  void set_counter_period_ns(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CounterPeriodNs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CounterIds =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      GpuCounterConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CounterIds kCounterIds() { return {}; }
+  void add_counter_ids(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CounterIds::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_InstrumentedSampling =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      GpuCounterConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_InstrumentedSampling kInstrumentedSampling() { return {}; }
+  void set_instrumented_sampling(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_InstrumentedSampling::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FixGpuClock =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      GpuCounterConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FixGpuClock kFixGpuClock() { return {}; }
+  void set_fix_gpu_clock(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_FixGpuClock::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/config/gpu/vulkan_memory_config.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_GPU_VULKAN_MEMORY_CONFIG_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_GPU_VULKAN_MEMORY_CONFIG_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class VulkanMemoryConfig_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  VulkanMemoryConfig_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit VulkanMemoryConfig_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit VulkanMemoryConfig_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_track_driver_memory_usage() const { return at<1>().valid(); }
+  bool track_driver_memory_usage() const { return at<1>().as_bool(); }
+  bool has_track_device_memory_usage() const { return at<2>().valid(); }
+  bool track_device_memory_usage() const { return at<2>().as_bool(); }
+};
+
+class VulkanMemoryConfig : public ::protozero::Message {
+ public:
+  using Decoder = VulkanMemoryConfig_Decoder;
+  enum : int32_t {
+    kTrackDriverMemoryUsageFieldNumber = 1,
+    kTrackDeviceMemoryUsageFieldNumber = 2,
+  };
+
+  using FieldMetadata_TrackDriverMemoryUsage =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      VulkanMemoryConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TrackDriverMemoryUsage kTrackDriverMemoryUsage() { return {}; }
+  void set_track_driver_memory_usage(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_TrackDriverMemoryUsage::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TrackDeviceMemoryUsage =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      VulkanMemoryConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TrackDeviceMemoryUsage kTrackDeviceMemoryUsage() { return {}; }
+  void set_track_device_memory_usage(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_TrackDeviceMemoryUsage::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/config/inode_file/inode_file_config.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_INODE_FILE_INODE_FILE_CONFIG_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_INODE_FILE_INODE_FILE_CONFIG_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class InodeFileConfig_MountPointMappingEntry;
+
+class InodeFileConfig_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/6, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  InodeFileConfig_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit InodeFileConfig_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit InodeFileConfig_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_scan_interval_ms() const { return at<1>().valid(); }
+  uint32_t scan_interval_ms() const { return at<1>().as_uint32(); }
+  bool has_scan_delay_ms() const { return at<2>().valid(); }
+  uint32_t scan_delay_ms() const { return at<2>().as_uint32(); }
+  bool has_scan_batch_size() const { return at<3>().valid(); }
+  uint32_t scan_batch_size() const { return at<3>().as_uint32(); }
+  bool has_do_not_scan() const { return at<4>().valid(); }
+  bool do_not_scan() const { return at<4>().as_bool(); }
+  bool has_scan_mount_points() const { return at<5>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstChars> scan_mount_points() const { return GetRepeated<::protozero::ConstChars>(5); }
+  bool has_mount_point_mapping() const { return at<6>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> mount_point_mapping() const { return GetRepeated<::protozero::ConstBytes>(6); }
+};
+
+class InodeFileConfig : public ::protozero::Message {
+ public:
+  using Decoder = InodeFileConfig_Decoder;
+  enum : int32_t {
+    kScanIntervalMsFieldNumber = 1,
+    kScanDelayMsFieldNumber = 2,
+    kScanBatchSizeFieldNumber = 3,
+    kDoNotScanFieldNumber = 4,
+    kScanMountPointsFieldNumber = 5,
+    kMountPointMappingFieldNumber = 6,
+  };
+  using MountPointMappingEntry = ::perfetto::protos::pbzero::InodeFileConfig_MountPointMappingEntry;
+
+  using FieldMetadata_ScanIntervalMs =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      InodeFileConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ScanIntervalMs kScanIntervalMs() { return {}; }
+  void set_scan_interval_ms(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ScanIntervalMs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ScanDelayMs =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      InodeFileConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ScanDelayMs kScanDelayMs() { return {}; }
+  void set_scan_delay_ms(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ScanDelayMs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ScanBatchSize =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      InodeFileConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ScanBatchSize kScanBatchSize() { return {}; }
+  void set_scan_batch_size(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ScanBatchSize::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DoNotScan =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      InodeFileConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DoNotScan kDoNotScan() { return {}; }
+  void set_do_not_scan(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_DoNotScan::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ScanMountPoints =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      InodeFileConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ScanMountPoints kScanMountPoints() { return {}; }
+  void add_scan_mount_points(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_ScanMountPoints::kFieldId, data, size);
+  }
+  void add_scan_mount_points(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_ScanMountPoints::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_MountPointMapping =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      InodeFileConfig_MountPointMappingEntry,
+      InodeFileConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MountPointMapping kMountPointMapping() { return {}; }
+  template <typename T = InodeFileConfig_MountPointMappingEntry> T* add_mount_point_mapping() {
+    return BeginNestedMessage<T>(6);
+  }
+
+};
+
+class InodeFileConfig_MountPointMappingEntry_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  InodeFileConfig_MountPointMappingEntry_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit InodeFileConfig_MountPointMappingEntry_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit InodeFileConfig_MountPointMappingEntry_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_mountpoint() const { return at<1>().valid(); }
+  ::protozero::ConstChars mountpoint() const { return at<1>().as_string(); }
+  bool has_scan_roots() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstChars> scan_roots() const { return GetRepeated<::protozero::ConstChars>(2); }
+};
+
+class InodeFileConfig_MountPointMappingEntry : public ::protozero::Message {
+ public:
+  using Decoder = InodeFileConfig_MountPointMappingEntry_Decoder;
+  enum : int32_t {
+    kMountpointFieldNumber = 1,
+    kScanRootsFieldNumber = 2,
+  };
+
+  using FieldMetadata_Mountpoint =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      InodeFileConfig_MountPointMappingEntry>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Mountpoint kMountpoint() { return {}; }
+  void set_mountpoint(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Mountpoint::kFieldId, data, size);
+  }
+  void set_mountpoint(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Mountpoint::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ScanRoots =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      InodeFileConfig_MountPointMappingEntry>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ScanRoots kScanRoots() { return {}; }
+  void add_scan_roots(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_ScanRoots::kFieldId, data, size);
+  }
+  void add_scan_roots(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_ScanRoots::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/config/interceptors/console_config.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_INTERCEPTORS_CONSOLE_CONFIG_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_INTERCEPTORS_CONSOLE_CONFIG_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+enum ConsoleConfig_Output : int32_t;
+
+enum ConsoleConfig_Output : int32_t {
+  ConsoleConfig_Output_OUTPUT_UNSPECIFIED = 0,
+  ConsoleConfig_Output_OUTPUT_STDOUT = 1,
+  ConsoleConfig_Output_OUTPUT_STDERR = 2,
+};
+
+const ConsoleConfig_Output ConsoleConfig_Output_MIN = ConsoleConfig_Output_OUTPUT_UNSPECIFIED;
+const ConsoleConfig_Output ConsoleConfig_Output_MAX = ConsoleConfig_Output_OUTPUT_STDERR;
+
+class ConsoleConfig_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  ConsoleConfig_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ConsoleConfig_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ConsoleConfig_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_output() const { return at<1>().valid(); }
+  int32_t output() const { return at<1>().as_int32(); }
+  bool has_enable_colors() const { return at<2>().valid(); }
+  bool enable_colors() const { return at<2>().as_bool(); }
+};
+
+class ConsoleConfig : public ::protozero::Message {
+ public:
+  using Decoder = ConsoleConfig_Decoder;
+  enum : int32_t {
+    kOutputFieldNumber = 1,
+    kEnableColorsFieldNumber = 2,
+  };
+  using Output = ::perfetto::protos::pbzero::ConsoleConfig_Output;
+  static const Output OUTPUT_UNSPECIFIED = ConsoleConfig_Output_OUTPUT_UNSPECIFIED;
+  static const Output OUTPUT_STDOUT = ConsoleConfig_Output_OUTPUT_STDOUT;
+  static const Output OUTPUT_STDERR = ConsoleConfig_Output_OUTPUT_STDERR;
+
+  using FieldMetadata_Output =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::ConsoleConfig_Output,
+      ConsoleConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Output kOutput() { return {}; }
+  void set_output(::perfetto::protos::pbzero::ConsoleConfig_Output value) {
+    static constexpr uint32_t field_id = FieldMetadata_Output::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_EnableColors =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ConsoleConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_EnableColors kEnableColors() { return {}; }
+  void set_enable_colors(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_EnableColors::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/config/power/android_power_config.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_POWER_ANDROID_POWER_CONFIG_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_POWER_ANDROID_POWER_CONFIG_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+enum AndroidPowerConfig_BatteryCounters : int32_t;
+
+enum AndroidPowerConfig_BatteryCounters : int32_t {
+  AndroidPowerConfig_BatteryCounters_BATTERY_COUNTER_UNSPECIFIED = 0,
+  AndroidPowerConfig_BatteryCounters_BATTERY_COUNTER_CHARGE = 1,
+  AndroidPowerConfig_BatteryCounters_BATTERY_COUNTER_CAPACITY_PERCENT = 2,
+  AndroidPowerConfig_BatteryCounters_BATTERY_COUNTER_CURRENT = 3,
+  AndroidPowerConfig_BatteryCounters_BATTERY_COUNTER_CURRENT_AVG = 4,
+};
+
+const AndroidPowerConfig_BatteryCounters AndroidPowerConfig_BatteryCounters_MIN = AndroidPowerConfig_BatteryCounters_BATTERY_COUNTER_UNSPECIFIED;
+const AndroidPowerConfig_BatteryCounters AndroidPowerConfig_BatteryCounters_MAX = AndroidPowerConfig_BatteryCounters_BATTERY_COUNTER_CURRENT_AVG;
+
+class AndroidPowerConfig_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  AndroidPowerConfig_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit AndroidPowerConfig_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit AndroidPowerConfig_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_battery_poll_ms() const { return at<1>().valid(); }
+  uint32_t battery_poll_ms() const { return at<1>().as_uint32(); }
+  bool has_battery_counters() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<int32_t> battery_counters() const { return GetRepeated<int32_t>(2); }
+  bool has_collect_power_rails() const { return at<3>().valid(); }
+  bool collect_power_rails() const { return at<3>().as_bool(); }
+  bool has_collect_energy_estimation_breakdown() const { return at<4>().valid(); }
+  bool collect_energy_estimation_breakdown() const { return at<4>().as_bool(); }
+};
+
+class AndroidPowerConfig : public ::protozero::Message {
+ public:
+  using Decoder = AndroidPowerConfig_Decoder;
+  enum : int32_t {
+    kBatteryPollMsFieldNumber = 1,
+    kBatteryCountersFieldNumber = 2,
+    kCollectPowerRailsFieldNumber = 3,
+    kCollectEnergyEstimationBreakdownFieldNumber = 4,
+  };
+  using BatteryCounters = ::perfetto::protos::pbzero::AndroidPowerConfig_BatteryCounters;
+  static const BatteryCounters BATTERY_COUNTER_UNSPECIFIED = AndroidPowerConfig_BatteryCounters_BATTERY_COUNTER_UNSPECIFIED;
+  static const BatteryCounters BATTERY_COUNTER_CHARGE = AndroidPowerConfig_BatteryCounters_BATTERY_COUNTER_CHARGE;
+  static const BatteryCounters BATTERY_COUNTER_CAPACITY_PERCENT = AndroidPowerConfig_BatteryCounters_BATTERY_COUNTER_CAPACITY_PERCENT;
+  static const BatteryCounters BATTERY_COUNTER_CURRENT = AndroidPowerConfig_BatteryCounters_BATTERY_COUNTER_CURRENT;
+  static const BatteryCounters BATTERY_COUNTER_CURRENT_AVG = AndroidPowerConfig_BatteryCounters_BATTERY_COUNTER_CURRENT_AVG;
+
+  using FieldMetadata_BatteryPollMs =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      AndroidPowerConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BatteryPollMs kBatteryPollMs() { return {}; }
+  void set_battery_poll_ms(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_BatteryPollMs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BatteryCounters =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::AndroidPowerConfig_BatteryCounters,
+      AndroidPowerConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BatteryCounters kBatteryCounters() { return {}; }
+  void add_battery_counters(::perfetto::protos::pbzero::AndroidPowerConfig_BatteryCounters value) {
+    static constexpr uint32_t field_id = FieldMetadata_BatteryCounters::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CollectPowerRails =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      AndroidPowerConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CollectPowerRails kCollectPowerRails() { return {}; }
+  void set_collect_power_rails(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_CollectPowerRails::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CollectEnergyEstimationBreakdown =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      AndroidPowerConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CollectEnergyEstimationBreakdown kCollectEnergyEstimationBreakdown() { return {}; }
+  void set_collect_energy_estimation_breakdown(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_CollectEnergyEstimationBreakdown::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/config/process_stats/process_stats_config.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_PROCESS_STATS_PROCESS_STATS_CONFIG_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_PROCESS_STATS_PROCESS_STATS_CONFIG_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+enum ProcessStatsConfig_Quirks : int32_t;
+
+enum ProcessStatsConfig_Quirks : int32_t {
+  ProcessStatsConfig_Quirks_QUIRKS_UNSPECIFIED = 0,
+  ProcessStatsConfig_Quirks_DISABLE_INITIAL_DUMP = 1,
+  ProcessStatsConfig_Quirks_DISABLE_ON_DEMAND = 2,
+};
+
+const ProcessStatsConfig_Quirks ProcessStatsConfig_Quirks_MIN = ProcessStatsConfig_Quirks_QUIRKS_UNSPECIFIED;
+const ProcessStatsConfig_Quirks ProcessStatsConfig_Quirks_MAX = ProcessStatsConfig_Quirks_DISABLE_ON_DEMAND;
+
+class ProcessStatsConfig_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/8, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  ProcessStatsConfig_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ProcessStatsConfig_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ProcessStatsConfig_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_quirks() const { return at<1>().valid(); }
+  ::protozero::RepeatedFieldIterator<int32_t> quirks() const { return GetRepeated<int32_t>(1); }
+  bool has_scan_all_processes_on_start() const { return at<2>().valid(); }
+  bool scan_all_processes_on_start() const { return at<2>().as_bool(); }
+  bool has_record_thread_names() const { return at<3>().valid(); }
+  bool record_thread_names() const { return at<3>().as_bool(); }
+  bool has_proc_stats_poll_ms() const { return at<4>().valid(); }
+  uint32_t proc_stats_poll_ms() const { return at<4>().as_uint32(); }
+  bool has_proc_stats_cache_ttl_ms() const { return at<6>().valid(); }
+  uint32_t proc_stats_cache_ttl_ms() const { return at<6>().as_uint32(); }
+  bool has_record_thread_time_in_state() const { return at<7>().valid(); }
+  bool record_thread_time_in_state() const { return at<7>().as_bool(); }
+  bool has_thread_time_in_state_cache_size() const { return at<8>().valid(); }
+  uint32_t thread_time_in_state_cache_size() const { return at<8>().as_uint32(); }
+};
+
+class ProcessStatsConfig : public ::protozero::Message {
+ public:
+  using Decoder = ProcessStatsConfig_Decoder;
+  enum : int32_t {
+    kQuirksFieldNumber = 1,
+    kScanAllProcessesOnStartFieldNumber = 2,
+    kRecordThreadNamesFieldNumber = 3,
+    kProcStatsPollMsFieldNumber = 4,
+    kProcStatsCacheTtlMsFieldNumber = 6,
+    kRecordThreadTimeInStateFieldNumber = 7,
+    kThreadTimeInStateCacheSizeFieldNumber = 8,
+  };
+  using Quirks = ::perfetto::protos::pbzero::ProcessStatsConfig_Quirks;
+  static const Quirks QUIRKS_UNSPECIFIED = ProcessStatsConfig_Quirks_QUIRKS_UNSPECIFIED;
+  static const Quirks DISABLE_INITIAL_DUMP = ProcessStatsConfig_Quirks_DISABLE_INITIAL_DUMP;
+  static const Quirks DISABLE_ON_DEMAND = ProcessStatsConfig_Quirks_DISABLE_ON_DEMAND;
+
+  using FieldMetadata_Quirks =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::ProcessStatsConfig_Quirks,
+      ProcessStatsConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Quirks kQuirks() { return {}; }
+  void add_quirks(::perfetto::protos::pbzero::ProcessStatsConfig_Quirks value) {
+    static constexpr uint32_t field_id = FieldMetadata_Quirks::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ScanAllProcessesOnStart =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ProcessStatsConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ScanAllProcessesOnStart kScanAllProcessesOnStart() { return {}; }
+  void set_scan_all_processes_on_start(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_ScanAllProcessesOnStart::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_RecordThreadNames =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ProcessStatsConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_RecordThreadNames kRecordThreadNames() { return {}; }
+  void set_record_thread_names(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_RecordThreadNames::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ProcStatsPollMs =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      ProcessStatsConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ProcStatsPollMs kProcStatsPollMs() { return {}; }
+  void set_proc_stats_poll_ms(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ProcStatsPollMs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ProcStatsCacheTtlMs =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      ProcessStatsConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ProcStatsCacheTtlMs kProcStatsCacheTtlMs() { return {}; }
+  void set_proc_stats_cache_ttl_ms(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ProcStatsCacheTtlMs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_RecordThreadTimeInState =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ProcessStatsConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_RecordThreadTimeInState kRecordThreadTimeInState() { return {}; }
+  void set_record_thread_time_in_state(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_RecordThreadTimeInState::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ThreadTimeInStateCacheSize =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      ProcessStatsConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ThreadTimeInStateCacheSize kThreadTimeInStateCacheSize() { return {}; }
+  void set_thread_time_in_state_cache_size(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ThreadTimeInStateCacheSize::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/config/profiling/heapprofd_config.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_PROFILING_HEAPPROFD_CONFIG_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_PROFILING_HEAPPROFD_CONFIG_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class HeapprofdConfig_ContinuousDumpConfig;
+
+class HeapprofdConfig_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/27, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  HeapprofdConfig_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit HeapprofdConfig_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit HeapprofdConfig_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_sampling_interval_bytes() const { return at<1>().valid(); }
+  uint64_t sampling_interval_bytes() const { return at<1>().as_uint64(); }
+  bool has_adaptive_sampling_shmem_threshold() const { return at<24>().valid(); }
+  uint64_t adaptive_sampling_shmem_threshold() const { return at<24>().as_uint64(); }
+  bool has_adaptive_sampling_max_sampling_interval_bytes() const { return at<25>().valid(); }
+  uint64_t adaptive_sampling_max_sampling_interval_bytes() const { return at<25>().as_uint64(); }
+  bool has_process_cmdline() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstChars> process_cmdline() const { return GetRepeated<::protozero::ConstChars>(2); }
+  bool has_pid() const { return at<4>().valid(); }
+  ::protozero::RepeatedFieldIterator<uint64_t> pid() const { return GetRepeated<uint64_t>(4); }
+  bool has_target_installed_by() const { return at<26>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstChars> target_installed_by() const { return GetRepeated<::protozero::ConstChars>(26); }
+  bool has_heaps() const { return at<20>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstChars> heaps() const { return GetRepeated<::protozero::ConstChars>(20); }
+  bool has_exclude_heaps() const { return at<27>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstChars> exclude_heaps() const { return GetRepeated<::protozero::ConstChars>(27); }
+  bool has_stream_allocations() const { return at<23>().valid(); }
+  bool stream_allocations() const { return at<23>().as_bool(); }
+  bool has_heap_sampling_intervals() const { return at<22>().valid(); }
+  ::protozero::RepeatedFieldIterator<uint64_t> heap_sampling_intervals() const { return GetRepeated<uint64_t>(22); }
+  bool has_all_heaps() const { return at<21>().valid(); }
+  bool all_heaps() const { return at<21>().as_bool(); }
+  bool has_all() const { return at<5>().valid(); }
+  bool all() const { return at<5>().as_bool(); }
+  bool has_min_anonymous_memory_kb() const { return at<15>().valid(); }
+  uint32_t min_anonymous_memory_kb() const { return at<15>().as_uint32(); }
+  bool has_max_heapprofd_memory_kb() const { return at<16>().valid(); }
+  uint32_t max_heapprofd_memory_kb() const { return at<16>().as_uint32(); }
+  bool has_max_heapprofd_cpu_secs() const { return at<17>().valid(); }
+  uint64_t max_heapprofd_cpu_secs() const { return at<17>().as_uint64(); }
+  bool has_skip_symbol_prefix() const { return at<7>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstChars> skip_symbol_prefix() const { return GetRepeated<::protozero::ConstChars>(7); }
+  bool has_continuous_dump_config() const { return at<6>().valid(); }
+  ::protozero::ConstBytes continuous_dump_config() const { return at<6>().as_bytes(); }
+  bool has_shmem_size_bytes() const { return at<8>().valid(); }
+  uint64_t shmem_size_bytes() const { return at<8>().as_uint64(); }
+  bool has_block_client() const { return at<9>().valid(); }
+  bool block_client() const { return at<9>().as_bool(); }
+  bool has_block_client_timeout_us() const { return at<14>().valid(); }
+  uint32_t block_client_timeout_us() const { return at<14>().as_uint32(); }
+  bool has_no_startup() const { return at<10>().valid(); }
+  bool no_startup() const { return at<10>().as_bool(); }
+  bool has_no_running() const { return at<11>().valid(); }
+  bool no_running() const { return at<11>().as_bool(); }
+  bool has_dump_at_max() const { return at<13>().valid(); }
+  bool dump_at_max() const { return at<13>().as_bool(); }
+  bool has_disable_fork_teardown() const { return at<18>().valid(); }
+  bool disable_fork_teardown() const { return at<18>().as_bool(); }
+  bool has_disable_vfork_detection() const { return at<19>().valid(); }
+  bool disable_vfork_detection() const { return at<19>().as_bool(); }
+};
+
+class HeapprofdConfig : public ::protozero::Message {
+ public:
+  using Decoder = HeapprofdConfig_Decoder;
+  enum : int32_t {
+    kSamplingIntervalBytesFieldNumber = 1,
+    kAdaptiveSamplingShmemThresholdFieldNumber = 24,
+    kAdaptiveSamplingMaxSamplingIntervalBytesFieldNumber = 25,
+    kProcessCmdlineFieldNumber = 2,
+    kPidFieldNumber = 4,
+    kTargetInstalledByFieldNumber = 26,
+    kHeapsFieldNumber = 20,
+    kExcludeHeapsFieldNumber = 27,
+    kStreamAllocationsFieldNumber = 23,
+    kHeapSamplingIntervalsFieldNumber = 22,
+    kAllHeapsFieldNumber = 21,
+    kAllFieldNumber = 5,
+    kMinAnonymousMemoryKbFieldNumber = 15,
+    kMaxHeapprofdMemoryKbFieldNumber = 16,
+    kMaxHeapprofdCpuSecsFieldNumber = 17,
+    kSkipSymbolPrefixFieldNumber = 7,
+    kContinuousDumpConfigFieldNumber = 6,
+    kShmemSizeBytesFieldNumber = 8,
+    kBlockClientFieldNumber = 9,
+    kBlockClientTimeoutUsFieldNumber = 14,
+    kNoStartupFieldNumber = 10,
+    kNoRunningFieldNumber = 11,
+    kDumpAtMaxFieldNumber = 13,
+    kDisableForkTeardownFieldNumber = 18,
+    kDisableVforkDetectionFieldNumber = 19,
+  };
+  using ContinuousDumpConfig = ::perfetto::protos::pbzero::HeapprofdConfig_ContinuousDumpConfig;
+
+  using FieldMetadata_SamplingIntervalBytes =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      HeapprofdConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SamplingIntervalBytes kSamplingIntervalBytes() { return {}; }
+  void set_sampling_interval_bytes(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SamplingIntervalBytes::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_AdaptiveSamplingShmemThreshold =
+    ::protozero::proto_utils::FieldMetadata<
+      24,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      HeapprofdConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AdaptiveSamplingShmemThreshold kAdaptiveSamplingShmemThreshold() { return {}; }
+  void set_adaptive_sampling_shmem_threshold(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_AdaptiveSamplingShmemThreshold::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_AdaptiveSamplingMaxSamplingIntervalBytes =
+    ::protozero::proto_utils::FieldMetadata<
+      25,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      HeapprofdConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AdaptiveSamplingMaxSamplingIntervalBytes kAdaptiveSamplingMaxSamplingIntervalBytes() { return {}; }
+  void set_adaptive_sampling_max_sampling_interval_bytes(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_AdaptiveSamplingMaxSamplingIntervalBytes::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ProcessCmdline =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      HeapprofdConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ProcessCmdline kProcessCmdline() { return {}; }
+  void add_process_cmdline(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_ProcessCmdline::kFieldId, data, size);
+  }
+  void add_process_cmdline(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_ProcessCmdline::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pid =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      HeapprofdConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pid kPid() { return {}; }
+  void add_pid(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TargetInstalledBy =
+    ::protozero::proto_utils::FieldMetadata<
+      26,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      HeapprofdConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TargetInstalledBy kTargetInstalledBy() { return {}; }
+  void add_target_installed_by(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_TargetInstalledBy::kFieldId, data, size);
+  }
+  void add_target_installed_by(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_TargetInstalledBy::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Heaps =
+    ::protozero::proto_utils::FieldMetadata<
+      20,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      HeapprofdConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Heaps kHeaps() { return {}; }
+  void add_heaps(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Heaps::kFieldId, data, size);
+  }
+  void add_heaps(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Heaps::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ExcludeHeaps =
+    ::protozero::proto_utils::FieldMetadata<
+      27,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      HeapprofdConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ExcludeHeaps kExcludeHeaps() { return {}; }
+  void add_exclude_heaps(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_ExcludeHeaps::kFieldId, data, size);
+  }
+  void add_exclude_heaps(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_ExcludeHeaps::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_StreamAllocations =
+    ::protozero::proto_utils::FieldMetadata<
+      23,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      HeapprofdConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_StreamAllocations kStreamAllocations() { return {}; }
+  void set_stream_allocations(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_StreamAllocations::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_HeapSamplingIntervals =
+    ::protozero::proto_utils::FieldMetadata<
+      22,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      HeapprofdConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_HeapSamplingIntervals kHeapSamplingIntervals() { return {}; }
+  void add_heap_sampling_intervals(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_HeapSamplingIntervals::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_AllHeaps =
+    ::protozero::proto_utils::FieldMetadata<
+      21,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      HeapprofdConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AllHeaps kAllHeaps() { return {}; }
+  void set_all_heaps(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_AllHeaps::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_All =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      HeapprofdConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_All kAll() { return {}; }
+  void set_all(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_All::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_MinAnonymousMemoryKb =
+    ::protozero::proto_utils::FieldMetadata<
+      15,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      HeapprofdConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MinAnonymousMemoryKb kMinAnonymousMemoryKb() { return {}; }
+  void set_min_anonymous_memory_kb(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_MinAnonymousMemoryKb::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_MaxHeapprofdMemoryKb =
+    ::protozero::proto_utils::FieldMetadata<
+      16,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      HeapprofdConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MaxHeapprofdMemoryKb kMaxHeapprofdMemoryKb() { return {}; }
+  void set_max_heapprofd_memory_kb(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_MaxHeapprofdMemoryKb::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_MaxHeapprofdCpuSecs =
+    ::protozero::proto_utils::FieldMetadata<
+      17,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      HeapprofdConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MaxHeapprofdCpuSecs kMaxHeapprofdCpuSecs() { return {}; }
+  void set_max_heapprofd_cpu_secs(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_MaxHeapprofdCpuSecs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SkipSymbolPrefix =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      HeapprofdConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SkipSymbolPrefix kSkipSymbolPrefix() { return {}; }
+  void add_skip_symbol_prefix(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_SkipSymbolPrefix::kFieldId, data, size);
+  }
+  void add_skip_symbol_prefix(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_SkipSymbolPrefix::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ContinuousDumpConfig =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      HeapprofdConfig_ContinuousDumpConfig,
+      HeapprofdConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ContinuousDumpConfig kContinuousDumpConfig() { return {}; }
+  template <typename T = HeapprofdConfig_ContinuousDumpConfig> T* set_continuous_dump_config() {
+    return BeginNestedMessage<T>(6);
+  }
+
+
+  using FieldMetadata_ShmemSizeBytes =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      HeapprofdConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ShmemSizeBytes kShmemSizeBytes() { return {}; }
+  void set_shmem_size_bytes(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ShmemSizeBytes::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BlockClient =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      HeapprofdConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BlockClient kBlockClient() { return {}; }
+  void set_block_client(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_BlockClient::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BlockClientTimeoutUs =
+    ::protozero::proto_utils::FieldMetadata<
+      14,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      HeapprofdConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BlockClientTimeoutUs kBlockClientTimeoutUs() { return {}; }
+  void set_block_client_timeout_us(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_BlockClientTimeoutUs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NoStartup =
+    ::protozero::proto_utils::FieldMetadata<
+      10,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      HeapprofdConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NoStartup kNoStartup() { return {}; }
+  void set_no_startup(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_NoStartup::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NoRunning =
+    ::protozero::proto_utils::FieldMetadata<
+      11,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      HeapprofdConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NoRunning kNoRunning() { return {}; }
+  void set_no_running(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_NoRunning::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DumpAtMax =
+    ::protozero::proto_utils::FieldMetadata<
+      13,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      HeapprofdConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DumpAtMax kDumpAtMax() { return {}; }
+  void set_dump_at_max(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_DumpAtMax::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DisableForkTeardown =
+    ::protozero::proto_utils::FieldMetadata<
+      18,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      HeapprofdConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DisableForkTeardown kDisableForkTeardown() { return {}; }
+  void set_disable_fork_teardown(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_DisableForkTeardown::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DisableVforkDetection =
+    ::protozero::proto_utils::FieldMetadata<
+      19,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      HeapprofdConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DisableVforkDetection kDisableVforkDetection() { return {}; }
+  void set_disable_vfork_detection(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_DisableVforkDetection::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class HeapprofdConfig_ContinuousDumpConfig_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/6, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  HeapprofdConfig_ContinuousDumpConfig_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit HeapprofdConfig_ContinuousDumpConfig_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit HeapprofdConfig_ContinuousDumpConfig_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dump_phase_ms() const { return at<5>().valid(); }
+  uint32_t dump_phase_ms() const { return at<5>().as_uint32(); }
+  bool has_dump_interval_ms() const { return at<6>().valid(); }
+  uint32_t dump_interval_ms() const { return at<6>().as_uint32(); }
+};
+
+class HeapprofdConfig_ContinuousDumpConfig : public ::protozero::Message {
+ public:
+  using Decoder = HeapprofdConfig_ContinuousDumpConfig_Decoder;
+  enum : int32_t {
+    kDumpPhaseMsFieldNumber = 5,
+    kDumpIntervalMsFieldNumber = 6,
+  };
+
+  using FieldMetadata_DumpPhaseMs =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      HeapprofdConfig_ContinuousDumpConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DumpPhaseMs kDumpPhaseMs() { return {}; }
+  void set_dump_phase_ms(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DumpPhaseMs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DumpIntervalMs =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      HeapprofdConfig_ContinuousDumpConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DumpIntervalMs kDumpIntervalMs() { return {}; }
+  void set_dump_interval_ms(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DumpIntervalMs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/config/profiling/java_hprof_config.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_PROFILING_JAVA_HPROF_CONFIG_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_PROFILING_JAVA_HPROF_CONFIG_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class JavaHprofConfig_ContinuousDumpConfig;
+
+class JavaHprofConfig_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/7, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  JavaHprofConfig_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit JavaHprofConfig_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit JavaHprofConfig_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_process_cmdline() const { return at<1>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstChars> process_cmdline() const { return GetRepeated<::protozero::ConstChars>(1); }
+  bool has_pid() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<uint64_t> pid() const { return GetRepeated<uint64_t>(2); }
+  bool has_target_installed_by() const { return at<7>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstChars> target_installed_by() const { return GetRepeated<::protozero::ConstChars>(7); }
+  bool has_continuous_dump_config() const { return at<3>().valid(); }
+  ::protozero::ConstBytes continuous_dump_config() const { return at<3>().as_bytes(); }
+  bool has_min_anonymous_memory_kb() const { return at<4>().valid(); }
+  uint32_t min_anonymous_memory_kb() const { return at<4>().as_uint32(); }
+  bool has_dump_smaps() const { return at<5>().valid(); }
+  bool dump_smaps() const { return at<5>().as_bool(); }
+  bool has_ignored_types() const { return at<6>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstChars> ignored_types() const { return GetRepeated<::protozero::ConstChars>(6); }
+};
+
+class JavaHprofConfig : public ::protozero::Message {
+ public:
+  using Decoder = JavaHprofConfig_Decoder;
+  enum : int32_t {
+    kProcessCmdlineFieldNumber = 1,
+    kPidFieldNumber = 2,
+    kTargetInstalledByFieldNumber = 7,
+    kContinuousDumpConfigFieldNumber = 3,
+    kMinAnonymousMemoryKbFieldNumber = 4,
+    kDumpSmapsFieldNumber = 5,
+    kIgnoredTypesFieldNumber = 6,
+  };
+  using ContinuousDumpConfig = ::perfetto::protos::pbzero::JavaHprofConfig_ContinuousDumpConfig;
+
+  using FieldMetadata_ProcessCmdline =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      JavaHprofConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ProcessCmdline kProcessCmdline() { return {}; }
+  void add_process_cmdline(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_ProcessCmdline::kFieldId, data, size);
+  }
+  void add_process_cmdline(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_ProcessCmdline::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pid =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      JavaHprofConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pid kPid() { return {}; }
+  void add_pid(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TargetInstalledBy =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      JavaHprofConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TargetInstalledBy kTargetInstalledBy() { return {}; }
+  void add_target_installed_by(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_TargetInstalledBy::kFieldId, data, size);
+  }
+  void add_target_installed_by(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_TargetInstalledBy::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ContinuousDumpConfig =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      JavaHprofConfig_ContinuousDumpConfig,
+      JavaHprofConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ContinuousDumpConfig kContinuousDumpConfig() { return {}; }
+  template <typename T = JavaHprofConfig_ContinuousDumpConfig> T* set_continuous_dump_config() {
+    return BeginNestedMessage<T>(3);
+  }
+
+
+  using FieldMetadata_MinAnonymousMemoryKb =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      JavaHprofConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MinAnonymousMemoryKb kMinAnonymousMemoryKb() { return {}; }
+  void set_min_anonymous_memory_kb(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_MinAnonymousMemoryKb::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DumpSmaps =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      JavaHprofConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DumpSmaps kDumpSmaps() { return {}; }
+  void set_dump_smaps(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_DumpSmaps::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_IgnoredTypes =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      JavaHprofConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IgnoredTypes kIgnoredTypes() { return {}; }
+  void add_ignored_types(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_IgnoredTypes::kFieldId, data, size);
+  }
+  void add_ignored_types(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_IgnoredTypes::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class JavaHprofConfig_ContinuousDumpConfig_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  JavaHprofConfig_ContinuousDumpConfig_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit JavaHprofConfig_ContinuousDumpConfig_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit JavaHprofConfig_ContinuousDumpConfig_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dump_phase_ms() const { return at<1>().valid(); }
+  uint32_t dump_phase_ms() const { return at<1>().as_uint32(); }
+  bool has_dump_interval_ms() const { return at<2>().valid(); }
+  uint32_t dump_interval_ms() const { return at<2>().as_uint32(); }
+};
+
+class JavaHprofConfig_ContinuousDumpConfig : public ::protozero::Message {
+ public:
+  using Decoder = JavaHprofConfig_ContinuousDumpConfig_Decoder;
+  enum : int32_t {
+    kDumpPhaseMsFieldNumber = 1,
+    kDumpIntervalMsFieldNumber = 2,
+  };
+
+  using FieldMetadata_DumpPhaseMs =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      JavaHprofConfig_ContinuousDumpConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DumpPhaseMs kDumpPhaseMs() { return {}; }
+  void set_dump_phase_ms(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DumpPhaseMs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DumpIntervalMs =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      JavaHprofConfig_ContinuousDumpConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DumpIntervalMs kDumpIntervalMs() { return {}; }
+  void set_dump_interval_ms(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DumpIntervalMs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/config/profiling/perf_event_config.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_PROFILING_PERF_EVENT_CONFIG_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_PROFILING_PERF_EVENT_CONFIG_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class PerfEventConfig_CallstackSampling;
+class PerfEventConfig_Scope;
+class PerfEvents_Timebase;
+
+class PerfEventConfig_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/18, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  PerfEventConfig_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit PerfEventConfig_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit PerfEventConfig_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_timebase() const { return at<15>().valid(); }
+  ::protozero::ConstBytes timebase() const { return at<15>().as_bytes(); }
+  bool has_callstack_sampling() const { return at<16>().valid(); }
+  ::protozero::ConstBytes callstack_sampling() const { return at<16>().as_bytes(); }
+  bool has_ring_buffer_read_period_ms() const { return at<8>().valid(); }
+  uint32_t ring_buffer_read_period_ms() const { return at<8>().as_uint32(); }
+  bool has_ring_buffer_pages() const { return at<3>().valid(); }
+  uint32_t ring_buffer_pages() const { return at<3>().as_uint32(); }
+  bool has_max_enqueued_footprint_kb() const { return at<17>().valid(); }
+  uint64_t max_enqueued_footprint_kb() const { return at<17>().as_uint64(); }
+  bool has_max_daemon_memory_kb() const { return at<13>().valid(); }
+  uint32_t max_daemon_memory_kb() const { return at<13>().as_uint32(); }
+  bool has_remote_descriptor_timeout_ms() const { return at<9>().valid(); }
+  uint32_t remote_descriptor_timeout_ms() const { return at<9>().as_uint32(); }
+  bool has_unwind_state_clear_period_ms() const { return at<10>().valid(); }
+  uint32_t unwind_state_clear_period_ms() const { return at<10>().as_uint32(); }
+  bool has_all_cpus() const { return at<1>().valid(); }
+  bool all_cpus() const { return at<1>().as_bool(); }
+  bool has_sampling_frequency() const { return at<2>().valid(); }
+  uint32_t sampling_frequency() const { return at<2>().as_uint32(); }
+  bool has_kernel_frames() const { return at<12>().valid(); }
+  bool kernel_frames() const { return at<12>().as_bool(); }
+  bool has_target_pid() const { return at<4>().valid(); }
+  ::protozero::RepeatedFieldIterator<int32_t> target_pid() const { return GetRepeated<int32_t>(4); }
+  bool has_target_cmdline() const { return at<5>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstChars> target_cmdline() const { return GetRepeated<::protozero::ConstChars>(5); }
+  bool has_target_installed_by() const { return at<18>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstChars> target_installed_by() const { return GetRepeated<::protozero::ConstChars>(18); }
+  bool has_exclude_pid() const { return at<6>().valid(); }
+  ::protozero::RepeatedFieldIterator<int32_t> exclude_pid() const { return GetRepeated<int32_t>(6); }
+  bool has_exclude_cmdline() const { return at<7>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstChars> exclude_cmdline() const { return GetRepeated<::protozero::ConstChars>(7); }
+  bool has_additional_cmdline_count() const { return at<11>().valid(); }
+  uint32_t additional_cmdline_count() const { return at<11>().as_uint32(); }
+};
+
+class PerfEventConfig : public ::protozero::Message {
+ public:
+  using Decoder = PerfEventConfig_Decoder;
+  enum : int32_t {
+    kTimebaseFieldNumber = 15,
+    kCallstackSamplingFieldNumber = 16,
+    kRingBufferReadPeriodMsFieldNumber = 8,
+    kRingBufferPagesFieldNumber = 3,
+    kMaxEnqueuedFootprintKbFieldNumber = 17,
+    kMaxDaemonMemoryKbFieldNumber = 13,
+    kRemoteDescriptorTimeoutMsFieldNumber = 9,
+    kUnwindStateClearPeriodMsFieldNumber = 10,
+    kAllCpusFieldNumber = 1,
+    kSamplingFrequencyFieldNumber = 2,
+    kKernelFramesFieldNumber = 12,
+    kTargetPidFieldNumber = 4,
+    kTargetCmdlineFieldNumber = 5,
+    kTargetInstalledByFieldNumber = 18,
+    kExcludePidFieldNumber = 6,
+    kExcludeCmdlineFieldNumber = 7,
+    kAdditionalCmdlineCountFieldNumber = 11,
+  };
+  using CallstackSampling = ::perfetto::protos::pbzero::PerfEventConfig_CallstackSampling;
+  using Scope = ::perfetto::protos::pbzero::PerfEventConfig_Scope;
+
+  using FieldMetadata_Timebase =
+    ::protozero::proto_utils::FieldMetadata<
+      15,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      PerfEvents_Timebase,
+      PerfEventConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Timebase kTimebase() { return {}; }
+  template <typename T = PerfEvents_Timebase> T* set_timebase() {
+    return BeginNestedMessage<T>(15);
+  }
+
+
+  using FieldMetadata_CallstackSampling =
+    ::protozero::proto_utils::FieldMetadata<
+      16,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      PerfEventConfig_CallstackSampling,
+      PerfEventConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CallstackSampling kCallstackSampling() { return {}; }
+  template <typename T = PerfEventConfig_CallstackSampling> T* set_callstack_sampling() {
+    return BeginNestedMessage<T>(16);
+  }
+
+
+  using FieldMetadata_RingBufferReadPeriodMs =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      PerfEventConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_RingBufferReadPeriodMs kRingBufferReadPeriodMs() { return {}; }
+  void set_ring_buffer_read_period_ms(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_RingBufferReadPeriodMs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_RingBufferPages =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      PerfEventConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_RingBufferPages kRingBufferPages() { return {}; }
+  void set_ring_buffer_pages(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_RingBufferPages::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_MaxEnqueuedFootprintKb =
+    ::protozero::proto_utils::FieldMetadata<
+      17,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      PerfEventConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MaxEnqueuedFootprintKb kMaxEnqueuedFootprintKb() { return {}; }
+  void set_max_enqueued_footprint_kb(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_MaxEnqueuedFootprintKb::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_MaxDaemonMemoryKb =
+    ::protozero::proto_utils::FieldMetadata<
+      13,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      PerfEventConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MaxDaemonMemoryKb kMaxDaemonMemoryKb() { return {}; }
+  void set_max_daemon_memory_kb(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_MaxDaemonMemoryKb::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_RemoteDescriptorTimeoutMs =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      PerfEventConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_RemoteDescriptorTimeoutMs kRemoteDescriptorTimeoutMs() { return {}; }
+  void set_remote_descriptor_timeout_ms(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_RemoteDescriptorTimeoutMs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_UnwindStateClearPeriodMs =
+    ::protozero::proto_utils::FieldMetadata<
+      10,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      PerfEventConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_UnwindStateClearPeriodMs kUnwindStateClearPeriodMs() { return {}; }
+  void set_unwind_state_clear_period_ms(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_UnwindStateClearPeriodMs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_AllCpus =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      PerfEventConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AllCpus kAllCpus() { return {}; }
+  void set_all_cpus(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_AllCpus::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SamplingFrequency =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      PerfEventConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SamplingFrequency kSamplingFrequency() { return {}; }
+  void set_sampling_frequency(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SamplingFrequency::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_KernelFrames =
+    ::protozero::proto_utils::FieldMetadata<
+      12,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      PerfEventConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_KernelFrames kKernelFrames() { return {}; }
+  void set_kernel_frames(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_KernelFrames::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TargetPid =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      PerfEventConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TargetPid kTargetPid() { return {}; }
+  void add_target_pid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TargetPid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TargetCmdline =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      PerfEventConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TargetCmdline kTargetCmdline() { return {}; }
+  void add_target_cmdline(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_TargetCmdline::kFieldId, data, size);
+  }
+  void add_target_cmdline(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_TargetCmdline::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TargetInstalledBy =
+    ::protozero::proto_utils::FieldMetadata<
+      18,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      PerfEventConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TargetInstalledBy kTargetInstalledBy() { return {}; }
+  void add_target_installed_by(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_TargetInstalledBy::kFieldId, data, size);
+  }
+  void add_target_installed_by(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_TargetInstalledBy::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ExcludePid =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      PerfEventConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ExcludePid kExcludePid() { return {}; }
+  void add_exclude_pid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ExcludePid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ExcludeCmdline =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      PerfEventConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ExcludeCmdline kExcludeCmdline() { return {}; }
+  void add_exclude_cmdline(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_ExcludeCmdline::kFieldId, data, size);
+  }
+  void add_exclude_cmdline(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_ExcludeCmdline::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_AdditionalCmdlineCount =
+    ::protozero::proto_utils::FieldMetadata<
+      11,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      PerfEventConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AdditionalCmdlineCount kAdditionalCmdlineCount() { return {}; }
+  void set_additional_cmdline_count(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_AdditionalCmdlineCount::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class PerfEventConfig_Scope_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  PerfEventConfig_Scope_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit PerfEventConfig_Scope_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit PerfEventConfig_Scope_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_target_pid() const { return at<1>().valid(); }
+  ::protozero::RepeatedFieldIterator<int32_t> target_pid() const { return GetRepeated<int32_t>(1); }
+  bool has_target_cmdline() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstChars> target_cmdline() const { return GetRepeated<::protozero::ConstChars>(2); }
+  bool has_exclude_pid() const { return at<3>().valid(); }
+  ::protozero::RepeatedFieldIterator<int32_t> exclude_pid() const { return GetRepeated<int32_t>(3); }
+  bool has_exclude_cmdline() const { return at<4>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstChars> exclude_cmdline() const { return GetRepeated<::protozero::ConstChars>(4); }
+  bool has_additional_cmdline_count() const { return at<5>().valid(); }
+  uint32_t additional_cmdline_count() const { return at<5>().as_uint32(); }
+};
+
+class PerfEventConfig_Scope : public ::protozero::Message {
+ public:
+  using Decoder = PerfEventConfig_Scope_Decoder;
+  enum : int32_t {
+    kTargetPidFieldNumber = 1,
+    kTargetCmdlineFieldNumber = 2,
+    kExcludePidFieldNumber = 3,
+    kExcludeCmdlineFieldNumber = 4,
+    kAdditionalCmdlineCountFieldNumber = 5,
+  };
+
+  using FieldMetadata_TargetPid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      PerfEventConfig_Scope>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TargetPid kTargetPid() { return {}; }
+  void add_target_pid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TargetPid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TargetCmdline =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      PerfEventConfig_Scope>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TargetCmdline kTargetCmdline() { return {}; }
+  void add_target_cmdline(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_TargetCmdline::kFieldId, data, size);
+  }
+  void add_target_cmdline(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_TargetCmdline::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ExcludePid =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      PerfEventConfig_Scope>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ExcludePid kExcludePid() { return {}; }
+  void add_exclude_pid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ExcludePid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ExcludeCmdline =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      PerfEventConfig_Scope>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ExcludeCmdline kExcludeCmdline() { return {}; }
+  void add_exclude_cmdline(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_ExcludeCmdline::kFieldId, data, size);
+  }
+  void add_exclude_cmdline(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_ExcludeCmdline::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_AdditionalCmdlineCount =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      PerfEventConfig_Scope>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AdditionalCmdlineCount kAdditionalCmdlineCount() { return {}; }
+  void set_additional_cmdline_count(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_AdditionalCmdlineCount::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class PerfEventConfig_CallstackSampling_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  PerfEventConfig_CallstackSampling_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit PerfEventConfig_CallstackSampling_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit PerfEventConfig_CallstackSampling_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_scope() const { return at<1>().valid(); }
+  ::protozero::ConstBytes scope() const { return at<1>().as_bytes(); }
+  bool has_kernel_frames() const { return at<2>().valid(); }
+  bool kernel_frames() const { return at<2>().as_bool(); }
+};
+
+class PerfEventConfig_CallstackSampling : public ::protozero::Message {
+ public:
+  using Decoder = PerfEventConfig_CallstackSampling_Decoder;
+  enum : int32_t {
+    kScopeFieldNumber = 1,
+    kKernelFramesFieldNumber = 2,
+  };
+
+  using FieldMetadata_Scope =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      PerfEventConfig_Scope,
+      PerfEventConfig_CallstackSampling>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Scope kScope() { return {}; }
+  template <typename T = PerfEventConfig_Scope> T* set_scope() {
+    return BeginNestedMessage<T>(1);
+  }
+
+
+  using FieldMetadata_KernelFrames =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      PerfEventConfig_CallstackSampling>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_KernelFrames kKernelFrames() { return {}; }
+  void set_kernel_frames(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_KernelFrames::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/config/sys_stats/sys_stats_config.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_SYS_STATS_SYS_STATS_CONFIG_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_SYS_STATS_SYS_STATS_CONFIG_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+enum MeminfoCounters : int32_t;
+enum SysStatsConfig_StatCounters : int32_t;
+enum VmstatCounters : int32_t;
+
+enum SysStatsConfig_StatCounters : int32_t {
+  SysStatsConfig_StatCounters_STAT_UNSPECIFIED = 0,
+  SysStatsConfig_StatCounters_STAT_CPU_TIMES = 1,
+  SysStatsConfig_StatCounters_STAT_IRQ_COUNTS = 2,
+  SysStatsConfig_StatCounters_STAT_SOFTIRQ_COUNTS = 3,
+  SysStatsConfig_StatCounters_STAT_FORK_COUNT = 4,
+};
+
+const SysStatsConfig_StatCounters SysStatsConfig_StatCounters_MIN = SysStatsConfig_StatCounters_STAT_UNSPECIFIED;
+const SysStatsConfig_StatCounters SysStatsConfig_StatCounters_MAX = SysStatsConfig_StatCounters_STAT_FORK_COUNT;
+
+class SysStatsConfig_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/7, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  SysStatsConfig_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit SysStatsConfig_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit SysStatsConfig_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_meminfo_period_ms() const { return at<1>().valid(); }
+  uint32_t meminfo_period_ms() const { return at<1>().as_uint32(); }
+  bool has_meminfo_counters() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<int32_t> meminfo_counters() const { return GetRepeated<int32_t>(2); }
+  bool has_vmstat_period_ms() const { return at<3>().valid(); }
+  uint32_t vmstat_period_ms() const { return at<3>().as_uint32(); }
+  bool has_vmstat_counters() const { return at<4>().valid(); }
+  ::protozero::RepeatedFieldIterator<int32_t> vmstat_counters() const { return GetRepeated<int32_t>(4); }
+  bool has_stat_period_ms() const { return at<5>().valid(); }
+  uint32_t stat_period_ms() const { return at<5>().as_uint32(); }
+  bool has_stat_counters() const { return at<6>().valid(); }
+  ::protozero::RepeatedFieldIterator<int32_t> stat_counters() const { return GetRepeated<int32_t>(6); }
+  bool has_devfreq_period_ms() const { return at<7>().valid(); }
+  uint32_t devfreq_period_ms() const { return at<7>().as_uint32(); }
+};
+
+class SysStatsConfig : public ::protozero::Message {
+ public:
+  using Decoder = SysStatsConfig_Decoder;
+  enum : int32_t {
+    kMeminfoPeriodMsFieldNumber = 1,
+    kMeminfoCountersFieldNumber = 2,
+    kVmstatPeriodMsFieldNumber = 3,
+    kVmstatCountersFieldNumber = 4,
+    kStatPeriodMsFieldNumber = 5,
+    kStatCountersFieldNumber = 6,
+    kDevfreqPeriodMsFieldNumber = 7,
+  };
+  using StatCounters = ::perfetto::protos::pbzero::SysStatsConfig_StatCounters;
+  static const StatCounters STAT_UNSPECIFIED = SysStatsConfig_StatCounters_STAT_UNSPECIFIED;
+  static const StatCounters STAT_CPU_TIMES = SysStatsConfig_StatCounters_STAT_CPU_TIMES;
+  static const StatCounters STAT_IRQ_COUNTS = SysStatsConfig_StatCounters_STAT_IRQ_COUNTS;
+  static const StatCounters STAT_SOFTIRQ_COUNTS = SysStatsConfig_StatCounters_STAT_SOFTIRQ_COUNTS;
+  static const StatCounters STAT_FORK_COUNT = SysStatsConfig_StatCounters_STAT_FORK_COUNT;
+
+  using FieldMetadata_MeminfoPeriodMs =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      SysStatsConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MeminfoPeriodMs kMeminfoPeriodMs() { return {}; }
+  void set_meminfo_period_ms(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_MeminfoPeriodMs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_MeminfoCounters =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::MeminfoCounters,
+      SysStatsConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MeminfoCounters kMeminfoCounters() { return {}; }
+  void add_meminfo_counters(::perfetto::protos::pbzero::MeminfoCounters value) {
+    static constexpr uint32_t field_id = FieldMetadata_MeminfoCounters::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_VmstatPeriodMs =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      SysStatsConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_VmstatPeriodMs kVmstatPeriodMs() { return {}; }
+  void set_vmstat_period_ms(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_VmstatPeriodMs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_VmstatCounters =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::VmstatCounters,
+      SysStatsConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_VmstatCounters kVmstatCounters() { return {}; }
+  void add_vmstat_counters(::perfetto::protos::pbzero::VmstatCounters value) {
+    static constexpr uint32_t field_id = FieldMetadata_VmstatCounters::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_StatPeriodMs =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      SysStatsConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_StatPeriodMs kStatPeriodMs() { return {}; }
+  void set_stat_period_ms(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_StatPeriodMs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_StatCounters =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::SysStatsConfig_StatCounters,
+      SysStatsConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_StatCounters kStatCounters() { return {}; }
+  void add_stat_counters(::perfetto::protos::pbzero::SysStatsConfig_StatCounters value) {
+    static constexpr uint32_t field_id = FieldMetadata_StatCounters::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DevfreqPeriodMs =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      SysStatsConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DevfreqPeriodMs kDevfreqPeriodMs() { return {}; }
+  void set_devfreq_period_ms(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DevfreqPeriodMs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/config/track_event/track_event_config.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_TRACK_EVENT_TRACK_EVENT_CONFIG_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_TRACK_EVENT_TRACK_EVENT_CONFIG_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class TrackEventConfig_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  TrackEventConfig_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TrackEventConfig_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TrackEventConfig_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_disabled_categories() const { return at<1>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstChars> disabled_categories() const { return GetRepeated<::protozero::ConstChars>(1); }
+  bool has_enabled_categories() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstChars> enabled_categories() const { return GetRepeated<::protozero::ConstChars>(2); }
+  bool has_disabled_tags() const { return at<3>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstChars> disabled_tags() const { return GetRepeated<::protozero::ConstChars>(3); }
+  bool has_enabled_tags() const { return at<4>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstChars> enabled_tags() const { return GetRepeated<::protozero::ConstChars>(4); }
+};
+
+class TrackEventConfig : public ::protozero::Message {
+ public:
+  using Decoder = TrackEventConfig_Decoder;
+  enum : int32_t {
+    kDisabledCategoriesFieldNumber = 1,
+    kEnabledCategoriesFieldNumber = 2,
+    kDisabledTagsFieldNumber = 3,
+    kEnabledTagsFieldNumber = 4,
+  };
+
+  using FieldMetadata_DisabledCategories =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TrackEventConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DisabledCategories kDisabledCategories() { return {}; }
+  void add_disabled_categories(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_DisabledCategories::kFieldId, data, size);
+  }
+  void add_disabled_categories(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_DisabledCategories::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_EnabledCategories =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TrackEventConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_EnabledCategories kEnabledCategories() { return {}; }
+  void add_enabled_categories(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_EnabledCategories::kFieldId, data, size);
+  }
+  void add_enabled_categories(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_EnabledCategories::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DisabledTags =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TrackEventConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DisabledTags kDisabledTags() { return {}; }
+  void add_disabled_tags(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_DisabledTags::kFieldId, data, size);
+  }
+  void add_disabled_tags(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_DisabledTags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_EnabledTags =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TrackEventConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_EnabledTags kEnabledTags() { return {}; }
+  void add_enabled_tags(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_EnabledTags::kFieldId, data, size);
+  }
+  void add_enabled_tags(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_EnabledTags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/config/chrome/chrome_config.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_CHROME_CHROME_CONFIG_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_CHROME_CHROME_CONFIG_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+enum ChromeConfig_ClientPriority : int32_t;
+
+enum ChromeConfig_ClientPriority : int32_t {
+  ChromeConfig_ClientPriority_UNKNOWN = 0,
+  ChromeConfig_ClientPriority_BACKGROUND = 1,
+  ChromeConfig_ClientPriority_USER_INITIATED = 2,
+};
+
+const ChromeConfig_ClientPriority ChromeConfig_ClientPriority_MIN = ChromeConfig_ClientPriority_UNKNOWN;
+const ChromeConfig_ClientPriority ChromeConfig_ClientPriority_MAX = ChromeConfig_ClientPriority_USER_INITIATED;
+
+class ChromeConfig_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  ChromeConfig_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ChromeConfig_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ChromeConfig_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_trace_config() const { return at<1>().valid(); }
+  ::protozero::ConstChars trace_config() const { return at<1>().as_string(); }
+  bool has_privacy_filtering_enabled() const { return at<2>().valid(); }
+  bool privacy_filtering_enabled() const { return at<2>().as_bool(); }
+  bool has_convert_to_legacy_json() const { return at<3>().valid(); }
+  bool convert_to_legacy_json() const { return at<3>().as_bool(); }
+  bool has_client_priority() const { return at<4>().valid(); }
+  int32_t client_priority() const { return at<4>().as_int32(); }
+  bool has_json_agent_label_filter() const { return at<5>().valid(); }
+  ::protozero::ConstChars json_agent_label_filter() const { return at<5>().as_string(); }
+};
+
+class ChromeConfig : public ::protozero::Message {
+ public:
+  using Decoder = ChromeConfig_Decoder;
+  enum : int32_t {
+    kTraceConfigFieldNumber = 1,
+    kPrivacyFilteringEnabledFieldNumber = 2,
+    kConvertToLegacyJsonFieldNumber = 3,
+    kClientPriorityFieldNumber = 4,
+    kJsonAgentLabelFilterFieldNumber = 5,
+  };
+  using ClientPriority = ::perfetto::protos::pbzero::ChromeConfig_ClientPriority;
+  static const ClientPriority UNKNOWN = ChromeConfig_ClientPriority_UNKNOWN;
+  static const ClientPriority BACKGROUND = ChromeConfig_ClientPriority_BACKGROUND;
+  static const ClientPriority USER_INITIATED = ChromeConfig_ClientPriority_USER_INITIATED;
+
+  using FieldMetadata_TraceConfig =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ChromeConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TraceConfig kTraceConfig() { return {}; }
+  void set_trace_config(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_TraceConfig::kFieldId, data, size);
+  }
+  void set_trace_config(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_TraceConfig::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PrivacyFilteringEnabled =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PrivacyFilteringEnabled kPrivacyFilteringEnabled() { return {}; }
+  void set_privacy_filtering_enabled(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_PrivacyFilteringEnabled::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ConvertToLegacyJson =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ConvertToLegacyJson kConvertToLegacyJson() { return {}; }
+  void set_convert_to_legacy_json(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_ConvertToLegacyJson::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ClientPriority =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::ChromeConfig_ClientPriority,
+      ChromeConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ClientPriority kClientPriority() { return {}; }
+  void set_client_priority(::perfetto::protos::pbzero::ChromeConfig_ClientPriority value) {
+    static constexpr uint32_t field_id = FieldMetadata_ClientPriority::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_JsonAgentLabelFilter =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ChromeConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_JsonAgentLabelFilter kJsonAgentLabelFilter() { return {}; }
+  void set_json_agent_label_filter(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_JsonAgentLabelFilter::kFieldId, data, size);
+  }
+  void set_json_agent_label_filter(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_JsonAgentLabelFilter::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/config/data_source_config.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_DATA_SOURCE_CONFIG_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_DATA_SOURCE_CONFIG_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class AndroidLogConfig;
+class AndroidPolledStateConfig;
+class AndroidPowerConfig;
+class ChromeConfig;
+class FtraceConfig;
+class GpuCounterConfig;
+class HeapprofdConfig;
+class InodeFileConfig;
+class InterceptorConfig;
+class JavaHprofConfig;
+class PackagesListConfig;
+class PerfEventConfig;
+class ProcessStatsConfig;
+class SysStatsConfig;
+class TestConfig;
+class TrackEventConfig;
+class VulkanMemoryConfig;
+enum DataSourceConfig_SessionInitiator : int32_t;
+
+enum DataSourceConfig_SessionInitiator : int32_t {
+  DataSourceConfig_SessionInitiator_SESSION_INITIATOR_UNSPECIFIED = 0,
+  DataSourceConfig_SessionInitiator_SESSION_INITIATOR_TRUSTED_SYSTEM = 1,
+};
+
+const DataSourceConfig_SessionInitiator DataSourceConfig_SessionInitiator_MIN = DataSourceConfig_SessionInitiator_SESSION_INITIATOR_UNSPECIFIED;
+const DataSourceConfig_SessionInitiator DataSourceConfig_SessionInitiator_MAX = DataSourceConfig_SessionInitiator_SESSION_INITIATOR_TRUSTED_SYSTEM;
+
+class DataSourceConfig_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/115, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  DataSourceConfig_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit DataSourceConfig_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit DataSourceConfig_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars name() const { return at<1>().as_string(); }
+  bool has_target_buffer() const { return at<2>().valid(); }
+  uint32_t target_buffer() const { return at<2>().as_uint32(); }
+  bool has_trace_duration_ms() const { return at<3>().valid(); }
+  uint32_t trace_duration_ms() const { return at<3>().as_uint32(); }
+  bool has_stop_timeout_ms() const { return at<7>().valid(); }
+  uint32_t stop_timeout_ms() const { return at<7>().as_uint32(); }
+  bool has_enable_extra_guardrails() const { return at<6>().valid(); }
+  bool enable_extra_guardrails() const { return at<6>().as_bool(); }
+  bool has_session_initiator() const { return at<8>().valid(); }
+  int32_t session_initiator() const { return at<8>().as_int32(); }
+  bool has_tracing_session_id() const { return at<4>().valid(); }
+  uint64_t tracing_session_id() const { return at<4>().as_uint64(); }
+  bool has_ftrace_config() const { return at<100>().valid(); }
+  ::protozero::ConstBytes ftrace_config() const { return at<100>().as_bytes(); }
+  bool has_inode_file_config() const { return at<102>().valid(); }
+  ::protozero::ConstBytes inode_file_config() const { return at<102>().as_bytes(); }
+  bool has_process_stats_config() const { return at<103>().valid(); }
+  ::protozero::ConstBytes process_stats_config() const { return at<103>().as_bytes(); }
+  bool has_sys_stats_config() const { return at<104>().valid(); }
+  ::protozero::ConstBytes sys_stats_config() const { return at<104>().as_bytes(); }
+  bool has_heapprofd_config() const { return at<105>().valid(); }
+  ::protozero::ConstBytes heapprofd_config() const { return at<105>().as_bytes(); }
+  bool has_java_hprof_config() const { return at<110>().valid(); }
+  ::protozero::ConstBytes java_hprof_config() const { return at<110>().as_bytes(); }
+  bool has_android_power_config() const { return at<106>().valid(); }
+  ::protozero::ConstBytes android_power_config() const { return at<106>().as_bytes(); }
+  bool has_android_log_config() const { return at<107>().valid(); }
+  ::protozero::ConstBytes android_log_config() const { return at<107>().as_bytes(); }
+  bool has_gpu_counter_config() const { return at<108>().valid(); }
+  ::protozero::ConstBytes gpu_counter_config() const { return at<108>().as_bytes(); }
+  bool has_packages_list_config() const { return at<109>().valid(); }
+  ::protozero::ConstBytes packages_list_config() const { return at<109>().as_bytes(); }
+  bool has_perf_event_config() const { return at<111>().valid(); }
+  ::protozero::ConstBytes perf_event_config() const { return at<111>().as_bytes(); }
+  bool has_vulkan_memory_config() const { return at<112>().valid(); }
+  ::protozero::ConstBytes vulkan_memory_config() const { return at<112>().as_bytes(); }
+  bool has_track_event_config() const { return at<113>().valid(); }
+  ::protozero::ConstBytes track_event_config() const { return at<113>().as_bytes(); }
+  bool has_android_polled_state_config() const { return at<114>().valid(); }
+  ::protozero::ConstBytes android_polled_state_config() const { return at<114>().as_bytes(); }
+  bool has_chrome_config() const { return at<101>().valid(); }
+  ::protozero::ConstBytes chrome_config() const { return at<101>().as_bytes(); }
+  bool has_interceptor_config() const { return at<115>().valid(); }
+  ::protozero::ConstBytes interceptor_config() const { return at<115>().as_bytes(); }
+  // field legacy_config omitted because its id is too high
+  // field for_testing omitted because its id is too high
+};
+
+class DataSourceConfig : public ::protozero::Message {
+ public:
+  using Decoder = DataSourceConfig_Decoder;
+  enum : int32_t {
+    kNameFieldNumber = 1,
+    kTargetBufferFieldNumber = 2,
+    kTraceDurationMsFieldNumber = 3,
+    kStopTimeoutMsFieldNumber = 7,
+    kEnableExtraGuardrailsFieldNumber = 6,
+    kSessionInitiatorFieldNumber = 8,
+    kTracingSessionIdFieldNumber = 4,
+    kFtraceConfigFieldNumber = 100,
+    kInodeFileConfigFieldNumber = 102,
+    kProcessStatsConfigFieldNumber = 103,
+    kSysStatsConfigFieldNumber = 104,
+    kHeapprofdConfigFieldNumber = 105,
+    kJavaHprofConfigFieldNumber = 110,
+    kAndroidPowerConfigFieldNumber = 106,
+    kAndroidLogConfigFieldNumber = 107,
+    kGpuCounterConfigFieldNumber = 108,
+    kPackagesListConfigFieldNumber = 109,
+    kPerfEventConfigFieldNumber = 111,
+    kVulkanMemoryConfigFieldNumber = 112,
+    kTrackEventConfigFieldNumber = 113,
+    kAndroidPolledStateConfigFieldNumber = 114,
+    kChromeConfigFieldNumber = 101,
+    kInterceptorConfigFieldNumber = 115,
+    kLegacyConfigFieldNumber = 1000,
+    kForTestingFieldNumber = 1001,
+  };
+  using SessionInitiator = ::perfetto::protos::pbzero::DataSourceConfig_SessionInitiator;
+  static const SessionInitiator SESSION_INITIATOR_UNSPECIFIED = DataSourceConfig_SessionInitiator_SESSION_INITIATOR_UNSPECIFIED;
+  static const SessionInitiator SESSION_INITIATOR_TRUSTED_SYSTEM = DataSourceConfig_SessionInitiator_SESSION_INITIATOR_TRUSTED_SYSTEM;
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      DataSourceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TargetBuffer =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      DataSourceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TargetBuffer kTargetBuffer() { return {}; }
+  void set_target_buffer(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TargetBuffer::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TraceDurationMs =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      DataSourceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TraceDurationMs kTraceDurationMs() { return {}; }
+  void set_trace_duration_ms(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TraceDurationMs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_StopTimeoutMs =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      DataSourceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_StopTimeoutMs kStopTimeoutMs() { return {}; }
+  void set_stop_timeout_ms(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_StopTimeoutMs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_EnableExtraGuardrails =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      DataSourceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_EnableExtraGuardrails kEnableExtraGuardrails() { return {}; }
+  void set_enable_extra_guardrails(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_EnableExtraGuardrails::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SessionInitiator =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::DataSourceConfig_SessionInitiator,
+      DataSourceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SessionInitiator kSessionInitiator() { return {}; }
+  void set_session_initiator(::perfetto::protos::pbzero::DataSourceConfig_SessionInitiator value) {
+    static constexpr uint32_t field_id = FieldMetadata_SessionInitiator::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TracingSessionId =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      DataSourceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TracingSessionId kTracingSessionId() { return {}; }
+  void set_tracing_session_id(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TracingSessionId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FtraceConfig =
+    ::protozero::proto_utils::FieldMetadata<
+      100,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      FtraceConfig,
+      DataSourceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FtraceConfig kFtraceConfig() { return {}; }
+  template <typename T = FtraceConfig> T* set_ftrace_config() {
+    return BeginNestedMessage<T>(100);
+  }
+
+  void set_ftrace_config_raw(const std::string& raw) {
+    return AppendBytes(100, raw.data(), raw.size());
+  }
+
+
+  using FieldMetadata_InodeFileConfig =
+    ::protozero::proto_utils::FieldMetadata<
+      102,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      InodeFileConfig,
+      DataSourceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_InodeFileConfig kInodeFileConfig() { return {}; }
+  template <typename T = InodeFileConfig> T* set_inode_file_config() {
+    return BeginNestedMessage<T>(102);
+  }
+
+  void set_inode_file_config_raw(const std::string& raw) {
+    return AppendBytes(102, raw.data(), raw.size());
+  }
+
+
+  using FieldMetadata_ProcessStatsConfig =
+    ::protozero::proto_utils::FieldMetadata<
+      103,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ProcessStatsConfig,
+      DataSourceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ProcessStatsConfig kProcessStatsConfig() { return {}; }
+  template <typename T = ProcessStatsConfig> T* set_process_stats_config() {
+    return BeginNestedMessage<T>(103);
+  }
+
+  void set_process_stats_config_raw(const std::string& raw) {
+    return AppendBytes(103, raw.data(), raw.size());
+  }
+
+
+  using FieldMetadata_SysStatsConfig =
+    ::protozero::proto_utils::FieldMetadata<
+      104,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      SysStatsConfig,
+      DataSourceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SysStatsConfig kSysStatsConfig() { return {}; }
+  template <typename T = SysStatsConfig> T* set_sys_stats_config() {
+    return BeginNestedMessage<T>(104);
+  }
+
+  void set_sys_stats_config_raw(const std::string& raw) {
+    return AppendBytes(104, raw.data(), raw.size());
+  }
+
+
+  using FieldMetadata_HeapprofdConfig =
+    ::protozero::proto_utils::FieldMetadata<
+      105,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      HeapprofdConfig,
+      DataSourceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_HeapprofdConfig kHeapprofdConfig() { return {}; }
+  template <typename T = HeapprofdConfig> T* set_heapprofd_config() {
+    return BeginNestedMessage<T>(105);
+  }
+
+  void set_heapprofd_config_raw(const std::string& raw) {
+    return AppendBytes(105, raw.data(), raw.size());
+  }
+
+
+  using FieldMetadata_JavaHprofConfig =
+    ::protozero::proto_utils::FieldMetadata<
+      110,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      JavaHprofConfig,
+      DataSourceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_JavaHprofConfig kJavaHprofConfig() { return {}; }
+  template <typename T = JavaHprofConfig> T* set_java_hprof_config() {
+    return BeginNestedMessage<T>(110);
+  }
+
+  void set_java_hprof_config_raw(const std::string& raw) {
+    return AppendBytes(110, raw.data(), raw.size());
+  }
+
+
+  using FieldMetadata_AndroidPowerConfig =
+    ::protozero::proto_utils::FieldMetadata<
+      106,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      AndroidPowerConfig,
+      DataSourceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AndroidPowerConfig kAndroidPowerConfig() { return {}; }
+  template <typename T = AndroidPowerConfig> T* set_android_power_config() {
+    return BeginNestedMessage<T>(106);
+  }
+
+  void set_android_power_config_raw(const std::string& raw) {
+    return AppendBytes(106, raw.data(), raw.size());
+  }
+
+
+  using FieldMetadata_AndroidLogConfig =
+    ::protozero::proto_utils::FieldMetadata<
+      107,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      AndroidLogConfig,
+      DataSourceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AndroidLogConfig kAndroidLogConfig() { return {}; }
+  template <typename T = AndroidLogConfig> T* set_android_log_config() {
+    return BeginNestedMessage<T>(107);
+  }
+
+  void set_android_log_config_raw(const std::string& raw) {
+    return AppendBytes(107, raw.data(), raw.size());
+  }
+
+
+  using FieldMetadata_GpuCounterConfig =
+    ::protozero::proto_utils::FieldMetadata<
+      108,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      GpuCounterConfig,
+      DataSourceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_GpuCounterConfig kGpuCounterConfig() { return {}; }
+  template <typename T = GpuCounterConfig> T* set_gpu_counter_config() {
+    return BeginNestedMessage<T>(108);
+  }
+
+  void set_gpu_counter_config_raw(const std::string& raw) {
+    return AppendBytes(108, raw.data(), raw.size());
+  }
+
+
+  using FieldMetadata_PackagesListConfig =
+    ::protozero::proto_utils::FieldMetadata<
+      109,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      PackagesListConfig,
+      DataSourceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PackagesListConfig kPackagesListConfig() { return {}; }
+  template <typename T = PackagesListConfig> T* set_packages_list_config() {
+    return BeginNestedMessage<T>(109);
+  }
+
+  void set_packages_list_config_raw(const std::string& raw) {
+    return AppendBytes(109, raw.data(), raw.size());
+  }
+
+
+  using FieldMetadata_PerfEventConfig =
+    ::protozero::proto_utils::FieldMetadata<
+      111,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      PerfEventConfig,
+      DataSourceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PerfEventConfig kPerfEventConfig() { return {}; }
+  template <typename T = PerfEventConfig> T* set_perf_event_config() {
+    return BeginNestedMessage<T>(111);
+  }
+
+  void set_perf_event_config_raw(const std::string& raw) {
+    return AppendBytes(111, raw.data(), raw.size());
+  }
+
+
+  using FieldMetadata_VulkanMemoryConfig =
+    ::protozero::proto_utils::FieldMetadata<
+      112,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      VulkanMemoryConfig,
+      DataSourceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_VulkanMemoryConfig kVulkanMemoryConfig() { return {}; }
+  template <typename T = VulkanMemoryConfig> T* set_vulkan_memory_config() {
+    return BeginNestedMessage<T>(112);
+  }
+
+  void set_vulkan_memory_config_raw(const std::string& raw) {
+    return AppendBytes(112, raw.data(), raw.size());
+  }
+
+
+  using FieldMetadata_TrackEventConfig =
+    ::protozero::proto_utils::FieldMetadata<
+      113,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TrackEventConfig,
+      DataSourceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TrackEventConfig kTrackEventConfig() { return {}; }
+  template <typename T = TrackEventConfig> T* set_track_event_config() {
+    return BeginNestedMessage<T>(113);
+  }
+
+  void set_track_event_config_raw(const std::string& raw) {
+    return AppendBytes(113, raw.data(), raw.size());
+  }
+
+
+  using FieldMetadata_AndroidPolledStateConfig =
+    ::protozero::proto_utils::FieldMetadata<
+      114,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      AndroidPolledStateConfig,
+      DataSourceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AndroidPolledStateConfig kAndroidPolledStateConfig() { return {}; }
+  template <typename T = AndroidPolledStateConfig> T* set_android_polled_state_config() {
+    return BeginNestedMessage<T>(114);
+  }
+
+  void set_android_polled_state_config_raw(const std::string& raw) {
+    return AppendBytes(114, raw.data(), raw.size());
+  }
+
+
+  using FieldMetadata_ChromeConfig =
+    ::protozero::proto_utils::FieldMetadata<
+      101,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ChromeConfig,
+      DataSourceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChromeConfig kChromeConfig() { return {}; }
+  template <typename T = ChromeConfig> T* set_chrome_config() {
+    return BeginNestedMessage<T>(101);
+  }
+
+
+  using FieldMetadata_InterceptorConfig =
+    ::protozero::proto_utils::FieldMetadata<
+      115,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      InterceptorConfig,
+      DataSourceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_InterceptorConfig kInterceptorConfig() { return {}; }
+  template <typename T = InterceptorConfig> T* set_interceptor_config() {
+    return BeginNestedMessage<T>(115);
+  }
+
+
+  using FieldMetadata_LegacyConfig =
+    ::protozero::proto_utils::FieldMetadata<
+      1000,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      DataSourceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_LegacyConfig kLegacyConfig() { return {}; }
+  void set_legacy_config(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_LegacyConfig::kFieldId, data, size);
+  }
+  void set_legacy_config(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_LegacyConfig::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ForTesting =
+    ::protozero::proto_utils::FieldMetadata<
+      1001,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TestConfig,
+      DataSourceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ForTesting kForTesting() { return {}; }
+  template <typename T = TestConfig> T* set_for_testing() {
+    return BeginNestedMessage<T>(1001);
+  }
+
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/config/interceptor_config.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_INTERCEPTOR_CONFIG_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_INTERCEPTOR_CONFIG_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class ConsoleConfig;
+
+class InterceptorConfig_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/100, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  InterceptorConfig_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit InterceptorConfig_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit InterceptorConfig_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars name() const { return at<1>().as_string(); }
+  bool has_console_config() const { return at<100>().valid(); }
+  ::protozero::ConstBytes console_config() const { return at<100>().as_bytes(); }
+};
+
+class InterceptorConfig : public ::protozero::Message {
+ public:
+  using Decoder = InterceptorConfig_Decoder;
+  enum : int32_t {
+    kNameFieldNumber = 1,
+    kConsoleConfigFieldNumber = 100,
+  };
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      InterceptorConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ConsoleConfig =
+    ::protozero::proto_utils::FieldMetadata<
+      100,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ConsoleConfig,
+      InterceptorConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ConsoleConfig kConsoleConfig() { return {}; }
+  template <typename T = ConsoleConfig> T* set_console_config() {
+    return BeginNestedMessage<T>(100);
+  }
+
+  void set_console_config_raw(const std::string& raw) {
+    return AppendBytes(100, raw.data(), raw.size());
+  }
+
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/config/stress_test_config.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_STRESS_TEST_CONFIG_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_STRESS_TEST_CONFIG_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class StressTestConfig_WriterTiming;
+class TraceConfig;
+
+class StressTestConfig_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/11, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  StressTestConfig_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit StressTestConfig_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit StressTestConfig_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_trace_config() const { return at<1>().valid(); }
+  ::protozero::ConstBytes trace_config() const { return at<1>().as_bytes(); }
+  bool has_shmem_size_kb() const { return at<2>().valid(); }
+  uint32_t shmem_size_kb() const { return at<2>().as_uint32(); }
+  bool has_shmem_page_size_kb() const { return at<3>().valid(); }
+  uint32_t shmem_page_size_kb() const { return at<3>().as_uint32(); }
+  bool has_num_processes() const { return at<4>().valid(); }
+  uint32_t num_processes() const { return at<4>().as_uint32(); }
+  bool has_num_threads() const { return at<5>().valid(); }
+  uint32_t num_threads() const { return at<5>().as_uint32(); }
+  bool has_max_events() const { return at<6>().valid(); }
+  uint32_t max_events() const { return at<6>().as_uint32(); }
+  bool has_nesting() const { return at<7>().valid(); }
+  uint32_t nesting() const { return at<7>().as_uint32(); }
+  bool has_steady_state_timings() const { return at<8>().valid(); }
+  ::protozero::ConstBytes steady_state_timings() const { return at<8>().as_bytes(); }
+  bool has_burst_period_ms() const { return at<9>().valid(); }
+  uint32_t burst_period_ms() const { return at<9>().as_uint32(); }
+  bool has_burst_duration_ms() const { return at<10>().valid(); }
+  uint32_t burst_duration_ms() const { return at<10>().as_uint32(); }
+  bool has_burst_timings() const { return at<11>().valid(); }
+  ::protozero::ConstBytes burst_timings() const { return at<11>().as_bytes(); }
+};
+
+class StressTestConfig : public ::protozero::Message {
+ public:
+  using Decoder = StressTestConfig_Decoder;
+  enum : int32_t {
+    kTraceConfigFieldNumber = 1,
+    kShmemSizeKbFieldNumber = 2,
+    kShmemPageSizeKbFieldNumber = 3,
+    kNumProcessesFieldNumber = 4,
+    kNumThreadsFieldNumber = 5,
+    kMaxEventsFieldNumber = 6,
+    kNestingFieldNumber = 7,
+    kSteadyStateTimingsFieldNumber = 8,
+    kBurstPeriodMsFieldNumber = 9,
+    kBurstDurationMsFieldNumber = 10,
+    kBurstTimingsFieldNumber = 11,
+  };
+  using WriterTiming = ::perfetto::protos::pbzero::StressTestConfig_WriterTiming;
+
+  using FieldMetadata_TraceConfig =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TraceConfig,
+      StressTestConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TraceConfig kTraceConfig() { return {}; }
+  template <typename T = TraceConfig> T* set_trace_config() {
+    return BeginNestedMessage<T>(1);
+  }
+
+
+  using FieldMetadata_ShmemSizeKb =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      StressTestConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ShmemSizeKb kShmemSizeKb() { return {}; }
+  void set_shmem_size_kb(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ShmemSizeKb::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ShmemPageSizeKb =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      StressTestConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ShmemPageSizeKb kShmemPageSizeKb() { return {}; }
+  void set_shmem_page_size_kb(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ShmemPageSizeKb::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NumProcesses =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      StressTestConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NumProcesses kNumProcesses() { return {}; }
+  void set_num_processes(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NumProcesses::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NumThreads =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      StressTestConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NumThreads kNumThreads() { return {}; }
+  void set_num_threads(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NumThreads::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_MaxEvents =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      StressTestConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MaxEvents kMaxEvents() { return {}; }
+  void set_max_events(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_MaxEvents::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Nesting =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      StressTestConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Nesting kNesting() { return {}; }
+  void set_nesting(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Nesting::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SteadyStateTimings =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      StressTestConfig_WriterTiming,
+      StressTestConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SteadyStateTimings kSteadyStateTimings() { return {}; }
+  template <typename T = StressTestConfig_WriterTiming> T* set_steady_state_timings() {
+    return BeginNestedMessage<T>(8);
+  }
+
+
+  using FieldMetadata_BurstPeriodMs =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      StressTestConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BurstPeriodMs kBurstPeriodMs() { return {}; }
+  void set_burst_period_ms(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_BurstPeriodMs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BurstDurationMs =
+    ::protozero::proto_utils::FieldMetadata<
+      10,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      StressTestConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BurstDurationMs kBurstDurationMs() { return {}; }
+  void set_burst_duration_ms(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_BurstDurationMs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BurstTimings =
+    ::protozero::proto_utils::FieldMetadata<
+      11,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      StressTestConfig_WriterTiming,
+      StressTestConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BurstTimings kBurstTimings() { return {}; }
+  template <typename T = StressTestConfig_WriterTiming> T* set_burst_timings() {
+    return BeginNestedMessage<T>(11);
+  }
+
+};
+
+class StressTestConfig_WriterTiming_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  StressTestConfig_WriterTiming_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit StressTestConfig_WriterTiming_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit StressTestConfig_WriterTiming_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_payload_mean() const { return at<1>().valid(); }
+  double payload_mean() const { return at<1>().as_double(); }
+  bool has_payload_stddev() const { return at<2>().valid(); }
+  double payload_stddev() const { return at<2>().as_double(); }
+  bool has_rate_mean() const { return at<3>().valid(); }
+  double rate_mean() const { return at<3>().as_double(); }
+  bool has_rate_stddev() const { return at<4>().valid(); }
+  double rate_stddev() const { return at<4>().as_double(); }
+  bool has_payload_write_time_ms() const { return at<5>().valid(); }
+  uint32_t payload_write_time_ms() const { return at<5>().as_uint32(); }
+};
+
+class StressTestConfig_WriterTiming : public ::protozero::Message {
+ public:
+  using Decoder = StressTestConfig_WriterTiming_Decoder;
+  enum : int32_t {
+    kPayloadMeanFieldNumber = 1,
+    kPayloadStddevFieldNumber = 2,
+    kRateMeanFieldNumber = 3,
+    kRateStddevFieldNumber = 4,
+    kPayloadWriteTimeMsFieldNumber = 5,
+  };
+
+  using FieldMetadata_PayloadMean =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kDouble,
+      double,
+      StressTestConfig_WriterTiming>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PayloadMean kPayloadMean() { return {}; }
+  void set_payload_mean(double value) {
+    static constexpr uint32_t field_id = FieldMetadata_PayloadMean::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kDouble>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PayloadStddev =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kDouble,
+      double,
+      StressTestConfig_WriterTiming>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PayloadStddev kPayloadStddev() { return {}; }
+  void set_payload_stddev(double value) {
+    static constexpr uint32_t field_id = FieldMetadata_PayloadStddev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kDouble>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_RateMean =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kDouble,
+      double,
+      StressTestConfig_WriterTiming>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_RateMean kRateMean() { return {}; }
+  void set_rate_mean(double value) {
+    static constexpr uint32_t field_id = FieldMetadata_RateMean::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kDouble>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_RateStddev =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kDouble,
+      double,
+      StressTestConfig_WriterTiming>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_RateStddev kRateStddev() { return {}; }
+  void set_rate_stddev(double value) {
+    static constexpr uint32_t field_id = FieldMetadata_RateStddev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kDouble>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PayloadWriteTimeMs =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      StressTestConfig_WriterTiming>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PayloadWriteTimeMs kPayloadWriteTimeMs() { return {}; }
+  void set_payload_write_time_ms(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PayloadWriteTimeMs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/config/test_config.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_TEST_CONFIG_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_TEST_CONFIG_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class TestConfig_DummyFields;
+
+class TestConfig_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/6, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  TestConfig_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TestConfig_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TestConfig_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_message_count() const { return at<1>().valid(); }
+  uint32_t message_count() const { return at<1>().as_uint32(); }
+  bool has_max_messages_per_second() const { return at<2>().valid(); }
+  uint32_t max_messages_per_second() const { return at<2>().as_uint32(); }
+  bool has_seed() const { return at<3>().valid(); }
+  uint32_t seed() const { return at<3>().as_uint32(); }
+  bool has_message_size() const { return at<4>().valid(); }
+  uint32_t message_size() const { return at<4>().as_uint32(); }
+  bool has_send_batch_on_register() const { return at<5>().valid(); }
+  bool send_batch_on_register() const { return at<5>().as_bool(); }
+  bool has_dummy_fields() const { return at<6>().valid(); }
+  ::protozero::ConstBytes dummy_fields() const { return at<6>().as_bytes(); }
+};
+
+class TestConfig : public ::protozero::Message {
+ public:
+  using Decoder = TestConfig_Decoder;
+  enum : int32_t {
+    kMessageCountFieldNumber = 1,
+    kMaxMessagesPerSecondFieldNumber = 2,
+    kSeedFieldNumber = 3,
+    kMessageSizeFieldNumber = 4,
+    kSendBatchOnRegisterFieldNumber = 5,
+    kDummyFieldsFieldNumber = 6,
+  };
+  using DummyFields = ::perfetto::protos::pbzero::TestConfig_DummyFields;
+
+  using FieldMetadata_MessageCount =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      TestConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MessageCount kMessageCount() { return {}; }
+  void set_message_count(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_MessageCount::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_MaxMessagesPerSecond =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      TestConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MaxMessagesPerSecond kMaxMessagesPerSecond() { return {}; }
+  void set_max_messages_per_second(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_MaxMessagesPerSecond::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Seed =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      TestConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Seed kSeed() { return {}; }
+  void set_seed(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Seed::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_MessageSize =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      TestConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MessageSize kMessageSize() { return {}; }
+  void set_message_size(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_MessageSize::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SendBatchOnRegister =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      TestConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SendBatchOnRegister kSendBatchOnRegister() { return {}; }
+  void set_send_batch_on_register(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_SendBatchOnRegister::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DummyFields =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TestConfig_DummyFields,
+      TestConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DummyFields kDummyFields() { return {}; }
+  template <typename T = TestConfig_DummyFields> T* set_dummy_fields() {
+    return BeginNestedMessage<T>(6);
+  }
+
+};
+
+class TestConfig_DummyFields_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/14, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  TestConfig_DummyFields_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TestConfig_DummyFields_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TestConfig_DummyFields_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_field_uint32() const { return at<1>().valid(); }
+  uint32_t field_uint32() const { return at<1>().as_uint32(); }
+  bool has_field_int32() const { return at<2>().valid(); }
+  int32_t field_int32() const { return at<2>().as_int32(); }
+  bool has_field_uint64() const { return at<3>().valid(); }
+  uint64_t field_uint64() const { return at<3>().as_uint64(); }
+  bool has_field_int64() const { return at<4>().valid(); }
+  int64_t field_int64() const { return at<4>().as_int64(); }
+  bool has_field_fixed64() const { return at<5>().valid(); }
+  uint64_t field_fixed64() const { return at<5>().as_uint64(); }
+  bool has_field_sfixed64() const { return at<6>().valid(); }
+  int64_t field_sfixed64() const { return at<6>().as_int64(); }
+  bool has_field_fixed32() const { return at<7>().valid(); }
+  uint32_t field_fixed32() const { return at<7>().as_uint32(); }
+  bool has_field_sfixed32() const { return at<8>().valid(); }
+  int32_t field_sfixed32() const { return at<8>().as_int32(); }
+  bool has_field_double() const { return at<9>().valid(); }
+  double field_double() const { return at<9>().as_double(); }
+  bool has_field_float() const { return at<10>().valid(); }
+  float field_float() const { return at<10>().as_float(); }
+  bool has_field_sint64() const { return at<11>().valid(); }
+  int64_t field_sint64() const { return at<11>().as_int64(); }
+  bool has_field_sint32() const { return at<12>().valid(); }
+  int32_t field_sint32() const { return at<12>().as_int32(); }
+  bool has_field_string() const { return at<13>().valid(); }
+  ::protozero::ConstChars field_string() const { return at<13>().as_string(); }
+  bool has_field_bytes() const { return at<14>().valid(); }
+  ::protozero::ConstBytes field_bytes() const { return at<14>().as_bytes(); }
+};
+
+class TestConfig_DummyFields : public ::protozero::Message {
+ public:
+  using Decoder = TestConfig_DummyFields_Decoder;
+  enum : int32_t {
+    kFieldUint32FieldNumber = 1,
+    kFieldInt32FieldNumber = 2,
+    kFieldUint64FieldNumber = 3,
+    kFieldInt64FieldNumber = 4,
+    kFieldFixed64FieldNumber = 5,
+    kFieldSfixed64FieldNumber = 6,
+    kFieldFixed32FieldNumber = 7,
+    kFieldSfixed32FieldNumber = 8,
+    kFieldDoubleFieldNumber = 9,
+    kFieldFloatFieldNumber = 10,
+    kFieldSint64FieldNumber = 11,
+    kFieldSint32FieldNumber = 12,
+    kFieldStringFieldNumber = 13,
+    kFieldBytesFieldNumber = 14,
+  };
+
+  using FieldMetadata_FieldUint32 =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      TestConfig_DummyFields>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FieldUint32 kFieldUint32() { return {}; }
+  void set_field_uint32(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_FieldUint32::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FieldInt32 =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      TestConfig_DummyFields>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FieldInt32 kFieldInt32() { return {}; }
+  void set_field_int32(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_FieldInt32::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FieldUint64 =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TestConfig_DummyFields>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FieldUint64 kFieldUint64() { return {}; }
+  void set_field_uint64(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_FieldUint64::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FieldInt64 =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      TestConfig_DummyFields>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FieldInt64 kFieldInt64() { return {}; }
+  void set_field_int64(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_FieldInt64::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FieldFixed64 =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kFixed64,
+      uint64_t,
+      TestConfig_DummyFields>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FieldFixed64 kFieldFixed64() { return {}; }
+  void set_field_fixed64(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_FieldFixed64::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kFixed64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FieldSfixed64 =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kSfixed64,
+      int64_t,
+      TestConfig_DummyFields>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FieldSfixed64 kFieldSfixed64() { return {}; }
+  void set_field_sfixed64(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_FieldSfixed64::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kSfixed64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FieldFixed32 =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kFixed32,
+      uint32_t,
+      TestConfig_DummyFields>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FieldFixed32 kFieldFixed32() { return {}; }
+  void set_field_fixed32(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_FieldFixed32::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kFixed32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FieldSfixed32 =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kSfixed32,
+      int32_t,
+      TestConfig_DummyFields>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FieldSfixed32 kFieldSfixed32() { return {}; }
+  void set_field_sfixed32(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_FieldSfixed32::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kSfixed32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FieldDouble =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kDouble,
+      double,
+      TestConfig_DummyFields>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FieldDouble kFieldDouble() { return {}; }
+  void set_field_double(double value) {
+    static constexpr uint32_t field_id = FieldMetadata_FieldDouble::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kDouble>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FieldFloat =
+    ::protozero::proto_utils::FieldMetadata<
+      10,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kFloat,
+      float,
+      TestConfig_DummyFields>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FieldFloat kFieldFloat() { return {}; }
+  void set_field_float(float value) {
+    static constexpr uint32_t field_id = FieldMetadata_FieldFloat::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kFloat>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FieldSint64 =
+    ::protozero::proto_utils::FieldMetadata<
+      11,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kSint64,
+      int64_t,
+      TestConfig_DummyFields>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FieldSint64 kFieldSint64() { return {}; }
+  void set_field_sint64(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_FieldSint64::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kSint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FieldSint32 =
+    ::protozero::proto_utils::FieldMetadata<
+      12,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kSint32,
+      int32_t,
+      TestConfig_DummyFields>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FieldSint32 kFieldSint32() { return {}; }
+  void set_field_sint32(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_FieldSint32::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kSint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FieldString =
+    ::protozero::proto_utils::FieldMetadata<
+      13,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TestConfig_DummyFields>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FieldString kFieldString() { return {}; }
+  void set_field_string(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_FieldString::kFieldId, data, size);
+  }
+  void set_field_string(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_FieldString::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FieldBytes =
+    ::protozero::proto_utils::FieldMetadata<
+      14,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBytes,
+      std::string,
+      TestConfig_DummyFields>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FieldBytes kFieldBytes() { return {}; }
+  void set_field_bytes(const uint8_t* data, size_t size) {
+    AppendBytes(FieldMetadata_FieldBytes::kFieldId, data, size);
+  }
+  void set_field_bytes(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_FieldBytes::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBytes>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/config/trace_config.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_TRACE_CONFIG_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_TRACE_CONFIG_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class DataSourceConfig;
+class TraceConfig_BufferConfig;
+class TraceConfig_BuiltinDataSource;
+class TraceConfig_DataSource;
+class TraceConfig_GuardrailOverrides;
+class TraceConfig_IncidentReportConfig;
+class TraceConfig_IncrementalStateConfig;
+class TraceConfig_ProducerConfig;
+class TraceConfig_StatsdMetadata;
+class TraceConfig_TraceFilter;
+class TraceConfig_TriggerConfig;
+class TraceConfig_TriggerConfig_Trigger;
+enum BuiltinClock : int32_t;
+enum TraceConfig_BufferConfig_FillPolicy : int32_t;
+enum TraceConfig_CompressionType : int32_t;
+enum TraceConfig_LockdownModeOperation : int32_t;
+enum TraceConfig_StatsdLogging : int32_t;
+enum TraceConfig_TriggerConfig_TriggerMode : int32_t;
+
+enum TraceConfig_LockdownModeOperation : int32_t {
+  TraceConfig_LockdownModeOperation_LOCKDOWN_UNCHANGED = 0,
+  TraceConfig_LockdownModeOperation_LOCKDOWN_CLEAR = 1,
+  TraceConfig_LockdownModeOperation_LOCKDOWN_SET = 2,
+};
+
+const TraceConfig_LockdownModeOperation TraceConfig_LockdownModeOperation_MIN = TraceConfig_LockdownModeOperation_LOCKDOWN_UNCHANGED;
+const TraceConfig_LockdownModeOperation TraceConfig_LockdownModeOperation_MAX = TraceConfig_LockdownModeOperation_LOCKDOWN_SET;
+
+enum TraceConfig_CompressionType : int32_t {
+  TraceConfig_CompressionType_COMPRESSION_TYPE_UNSPECIFIED = 0,
+  TraceConfig_CompressionType_COMPRESSION_TYPE_DEFLATE = 1,
+};
+
+const TraceConfig_CompressionType TraceConfig_CompressionType_MIN = TraceConfig_CompressionType_COMPRESSION_TYPE_UNSPECIFIED;
+const TraceConfig_CompressionType TraceConfig_CompressionType_MAX = TraceConfig_CompressionType_COMPRESSION_TYPE_DEFLATE;
+
+enum TraceConfig_StatsdLogging : int32_t {
+  TraceConfig_StatsdLogging_STATSD_LOGGING_UNSPECIFIED = 0,
+  TraceConfig_StatsdLogging_STATSD_LOGGING_ENABLED = 1,
+  TraceConfig_StatsdLogging_STATSD_LOGGING_DISABLED = 2,
+};
+
+const TraceConfig_StatsdLogging TraceConfig_StatsdLogging_MIN = TraceConfig_StatsdLogging_STATSD_LOGGING_UNSPECIFIED;
+const TraceConfig_StatsdLogging TraceConfig_StatsdLogging_MAX = TraceConfig_StatsdLogging_STATSD_LOGGING_DISABLED;
+
+enum TraceConfig_TriggerConfig_TriggerMode : int32_t {
+  TraceConfig_TriggerConfig_TriggerMode_UNSPECIFIED = 0,
+  TraceConfig_TriggerConfig_TriggerMode_START_TRACING = 1,
+  TraceConfig_TriggerConfig_TriggerMode_STOP_TRACING = 2,
+};
+
+const TraceConfig_TriggerConfig_TriggerMode TraceConfig_TriggerConfig_TriggerMode_MIN = TraceConfig_TriggerConfig_TriggerMode_UNSPECIFIED;
+const TraceConfig_TriggerConfig_TriggerMode TraceConfig_TriggerConfig_TriggerMode_MAX = TraceConfig_TriggerConfig_TriggerMode_STOP_TRACING;
+
+enum TraceConfig_BufferConfig_FillPolicy : int32_t {
+  TraceConfig_BufferConfig_FillPolicy_UNSPECIFIED = 0,
+  TraceConfig_BufferConfig_FillPolicy_RING_BUFFER = 1,
+  TraceConfig_BufferConfig_FillPolicy_DISCARD = 2,
+};
+
+const TraceConfig_BufferConfig_FillPolicy TraceConfig_BufferConfig_FillPolicy_MIN = TraceConfig_BufferConfig_FillPolicy_UNSPECIFIED;
+const TraceConfig_BufferConfig_FillPolicy TraceConfig_BufferConfig_FillPolicy_MAX = TraceConfig_BufferConfig_FillPolicy_DISCARD;
+
+class TraceConfig_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/32, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  TraceConfig_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TraceConfig_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TraceConfig_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_buffers() const { return at<1>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> buffers() const { return GetRepeated<::protozero::ConstBytes>(1); }
+  bool has_data_sources() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> data_sources() const { return GetRepeated<::protozero::ConstBytes>(2); }
+  bool has_builtin_data_sources() const { return at<20>().valid(); }
+  ::protozero::ConstBytes builtin_data_sources() const { return at<20>().as_bytes(); }
+  bool has_duration_ms() const { return at<3>().valid(); }
+  uint32_t duration_ms() const { return at<3>().as_uint32(); }
+  bool has_enable_extra_guardrails() const { return at<4>().valid(); }
+  bool enable_extra_guardrails() const { return at<4>().as_bool(); }
+  bool has_lockdown_mode() const { return at<5>().valid(); }
+  int32_t lockdown_mode() const { return at<5>().as_int32(); }
+  bool has_producers() const { return at<6>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> producers() const { return GetRepeated<::protozero::ConstBytes>(6); }
+  bool has_statsd_metadata() const { return at<7>().valid(); }
+  ::protozero::ConstBytes statsd_metadata() const { return at<7>().as_bytes(); }
+  bool has_write_into_file() const { return at<8>().valid(); }
+  bool write_into_file() const { return at<8>().as_bool(); }
+  bool has_output_path() const { return at<29>().valid(); }
+  ::protozero::ConstChars output_path() const { return at<29>().as_string(); }
+  bool has_file_write_period_ms() const { return at<9>().valid(); }
+  uint32_t file_write_period_ms() const { return at<9>().as_uint32(); }
+  bool has_max_file_size_bytes() const { return at<10>().valid(); }
+  uint64_t max_file_size_bytes() const { return at<10>().as_uint64(); }
+  bool has_guardrail_overrides() const { return at<11>().valid(); }
+  ::protozero::ConstBytes guardrail_overrides() const { return at<11>().as_bytes(); }
+  bool has_deferred_start() const { return at<12>().valid(); }
+  bool deferred_start() const { return at<12>().as_bool(); }
+  bool has_flush_period_ms() const { return at<13>().valid(); }
+  uint32_t flush_period_ms() const { return at<13>().as_uint32(); }
+  bool has_flush_timeout_ms() const { return at<14>().valid(); }
+  uint32_t flush_timeout_ms() const { return at<14>().as_uint32(); }
+  bool has_data_source_stop_timeout_ms() const { return at<23>().valid(); }
+  uint32_t data_source_stop_timeout_ms() const { return at<23>().as_uint32(); }
+  bool has_notify_traceur() const { return at<16>().valid(); }
+  bool notify_traceur() const { return at<16>().as_bool(); }
+  bool has_bugreport_score() const { return at<30>().valid(); }
+  int32_t bugreport_score() const { return at<30>().as_int32(); }
+  bool has_trigger_config() const { return at<17>().valid(); }
+  ::protozero::ConstBytes trigger_config() const { return at<17>().as_bytes(); }
+  bool has_activate_triggers() const { return at<18>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstChars> activate_triggers() const { return GetRepeated<::protozero::ConstChars>(18); }
+  bool has_incremental_state_config() const { return at<21>().valid(); }
+  ::protozero::ConstBytes incremental_state_config() const { return at<21>().as_bytes(); }
+  bool has_allow_user_build_tracing() const { return at<19>().valid(); }
+  bool allow_user_build_tracing() const { return at<19>().as_bool(); }
+  bool has_unique_session_name() const { return at<22>().valid(); }
+  ::protozero::ConstChars unique_session_name() const { return at<22>().as_string(); }
+  bool has_compression_type() const { return at<24>().valid(); }
+  int32_t compression_type() const { return at<24>().as_int32(); }
+  bool has_incident_report_config() const { return at<25>().valid(); }
+  ::protozero::ConstBytes incident_report_config() const { return at<25>().as_bytes(); }
+  bool has_statsd_logging() const { return at<31>().valid(); }
+  int32_t statsd_logging() const { return at<31>().as_int32(); }
+  bool has_trace_uuid_msb() const { return at<27>().valid(); }
+  int64_t trace_uuid_msb() const { return at<27>().as_int64(); }
+  bool has_trace_uuid_lsb() const { return at<28>().valid(); }
+  int64_t trace_uuid_lsb() const { return at<28>().as_int64(); }
+  bool has_trace_filter() const { return at<32>().valid(); }
+  ::protozero::ConstBytes trace_filter() const { return at<32>().as_bytes(); }
+};
+
+class TraceConfig : public ::protozero::Message {
+ public:
+  using Decoder = TraceConfig_Decoder;
+  enum : int32_t {
+    kBuffersFieldNumber = 1,
+    kDataSourcesFieldNumber = 2,
+    kBuiltinDataSourcesFieldNumber = 20,
+    kDurationMsFieldNumber = 3,
+    kEnableExtraGuardrailsFieldNumber = 4,
+    kLockdownModeFieldNumber = 5,
+    kProducersFieldNumber = 6,
+    kStatsdMetadataFieldNumber = 7,
+    kWriteIntoFileFieldNumber = 8,
+    kOutputPathFieldNumber = 29,
+    kFileWritePeriodMsFieldNumber = 9,
+    kMaxFileSizeBytesFieldNumber = 10,
+    kGuardrailOverridesFieldNumber = 11,
+    kDeferredStartFieldNumber = 12,
+    kFlushPeriodMsFieldNumber = 13,
+    kFlushTimeoutMsFieldNumber = 14,
+    kDataSourceStopTimeoutMsFieldNumber = 23,
+    kNotifyTraceurFieldNumber = 16,
+    kBugreportScoreFieldNumber = 30,
+    kTriggerConfigFieldNumber = 17,
+    kActivateTriggersFieldNumber = 18,
+    kIncrementalStateConfigFieldNumber = 21,
+    kAllowUserBuildTracingFieldNumber = 19,
+    kUniqueSessionNameFieldNumber = 22,
+    kCompressionTypeFieldNumber = 24,
+    kIncidentReportConfigFieldNumber = 25,
+    kStatsdLoggingFieldNumber = 31,
+    kTraceUuidMsbFieldNumber = 27,
+    kTraceUuidLsbFieldNumber = 28,
+    kTraceFilterFieldNumber = 32,
+  };
+  using BufferConfig = ::perfetto::protos::pbzero::TraceConfig_BufferConfig;
+  using DataSource = ::perfetto::protos::pbzero::TraceConfig_DataSource;
+  using BuiltinDataSource = ::perfetto::protos::pbzero::TraceConfig_BuiltinDataSource;
+  using ProducerConfig = ::perfetto::protos::pbzero::TraceConfig_ProducerConfig;
+  using StatsdMetadata = ::perfetto::protos::pbzero::TraceConfig_StatsdMetadata;
+  using GuardrailOverrides = ::perfetto::protos::pbzero::TraceConfig_GuardrailOverrides;
+  using TriggerConfig = ::perfetto::protos::pbzero::TraceConfig_TriggerConfig;
+  using IncrementalStateConfig = ::perfetto::protos::pbzero::TraceConfig_IncrementalStateConfig;
+  using IncidentReportConfig = ::perfetto::protos::pbzero::TraceConfig_IncidentReportConfig;
+  using TraceFilter = ::perfetto::protos::pbzero::TraceConfig_TraceFilter;
+  using LockdownModeOperation = ::perfetto::protos::pbzero::TraceConfig_LockdownModeOperation;
+  using CompressionType = ::perfetto::protos::pbzero::TraceConfig_CompressionType;
+  using StatsdLogging = ::perfetto::protos::pbzero::TraceConfig_StatsdLogging;
+  static const LockdownModeOperation LOCKDOWN_UNCHANGED = TraceConfig_LockdownModeOperation_LOCKDOWN_UNCHANGED;
+  static const LockdownModeOperation LOCKDOWN_CLEAR = TraceConfig_LockdownModeOperation_LOCKDOWN_CLEAR;
+  static const LockdownModeOperation LOCKDOWN_SET = TraceConfig_LockdownModeOperation_LOCKDOWN_SET;
+  static const CompressionType COMPRESSION_TYPE_UNSPECIFIED = TraceConfig_CompressionType_COMPRESSION_TYPE_UNSPECIFIED;
+  static const CompressionType COMPRESSION_TYPE_DEFLATE = TraceConfig_CompressionType_COMPRESSION_TYPE_DEFLATE;
+  static const StatsdLogging STATSD_LOGGING_UNSPECIFIED = TraceConfig_StatsdLogging_STATSD_LOGGING_UNSPECIFIED;
+  static const StatsdLogging STATSD_LOGGING_ENABLED = TraceConfig_StatsdLogging_STATSD_LOGGING_ENABLED;
+  static const StatsdLogging STATSD_LOGGING_DISABLED = TraceConfig_StatsdLogging_STATSD_LOGGING_DISABLED;
+
+  using FieldMetadata_Buffers =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TraceConfig_BufferConfig,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Buffers kBuffers() { return {}; }
+  template <typename T = TraceConfig_BufferConfig> T* add_buffers() {
+    return BeginNestedMessage<T>(1);
+  }
+
+
+  using FieldMetadata_DataSources =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TraceConfig_DataSource,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DataSources kDataSources() { return {}; }
+  template <typename T = TraceConfig_DataSource> T* add_data_sources() {
+    return BeginNestedMessage<T>(2);
+  }
+
+
+  using FieldMetadata_BuiltinDataSources =
+    ::protozero::proto_utils::FieldMetadata<
+      20,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TraceConfig_BuiltinDataSource,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BuiltinDataSources kBuiltinDataSources() { return {}; }
+  template <typename T = TraceConfig_BuiltinDataSource> T* set_builtin_data_sources() {
+    return BeginNestedMessage<T>(20);
+  }
+
+
+  using FieldMetadata_DurationMs =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DurationMs kDurationMs() { return {}; }
+  void set_duration_ms(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DurationMs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_EnableExtraGuardrails =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_EnableExtraGuardrails kEnableExtraGuardrails() { return {}; }
+  void set_enable_extra_guardrails(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_EnableExtraGuardrails::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_LockdownMode =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::TraceConfig_LockdownModeOperation,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_LockdownMode kLockdownMode() { return {}; }
+  void set_lockdown_mode(::perfetto::protos::pbzero::TraceConfig_LockdownModeOperation value) {
+    static constexpr uint32_t field_id = FieldMetadata_LockdownMode::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Producers =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TraceConfig_ProducerConfig,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Producers kProducers() { return {}; }
+  template <typename T = TraceConfig_ProducerConfig> T* add_producers() {
+    return BeginNestedMessage<T>(6);
+  }
+
+
+  using FieldMetadata_StatsdMetadata =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TraceConfig_StatsdMetadata,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_StatsdMetadata kStatsdMetadata() { return {}; }
+  template <typename T = TraceConfig_StatsdMetadata> T* set_statsd_metadata() {
+    return BeginNestedMessage<T>(7);
+  }
+
+
+  using FieldMetadata_WriteIntoFile =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_WriteIntoFile kWriteIntoFile() { return {}; }
+  void set_write_into_file(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_WriteIntoFile::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_OutputPath =
+    ::protozero::proto_utils::FieldMetadata<
+      29,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_OutputPath kOutputPath() { return {}; }
+  void set_output_path(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_OutputPath::kFieldId, data, size);
+  }
+  void set_output_path(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_OutputPath::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FileWritePeriodMs =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FileWritePeriodMs kFileWritePeriodMs() { return {}; }
+  void set_file_write_period_ms(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_FileWritePeriodMs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_MaxFileSizeBytes =
+    ::protozero::proto_utils::FieldMetadata<
+      10,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MaxFileSizeBytes kMaxFileSizeBytes() { return {}; }
+  void set_max_file_size_bytes(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_MaxFileSizeBytes::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_GuardrailOverrides =
+    ::protozero::proto_utils::FieldMetadata<
+      11,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TraceConfig_GuardrailOverrides,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_GuardrailOverrides kGuardrailOverrides() { return {}; }
+  template <typename T = TraceConfig_GuardrailOverrides> T* set_guardrail_overrides() {
+    return BeginNestedMessage<T>(11);
+  }
+
+
+  using FieldMetadata_DeferredStart =
+    ::protozero::proto_utils::FieldMetadata<
+      12,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DeferredStart kDeferredStart() { return {}; }
+  void set_deferred_start(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_DeferredStart::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FlushPeriodMs =
+    ::protozero::proto_utils::FieldMetadata<
+      13,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FlushPeriodMs kFlushPeriodMs() { return {}; }
+  void set_flush_period_ms(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_FlushPeriodMs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FlushTimeoutMs =
+    ::protozero::proto_utils::FieldMetadata<
+      14,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FlushTimeoutMs kFlushTimeoutMs() { return {}; }
+  void set_flush_timeout_ms(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_FlushTimeoutMs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DataSourceStopTimeoutMs =
+    ::protozero::proto_utils::FieldMetadata<
+      23,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DataSourceStopTimeoutMs kDataSourceStopTimeoutMs() { return {}; }
+  void set_data_source_stop_timeout_ms(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DataSourceStopTimeoutMs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NotifyTraceur =
+    ::protozero::proto_utils::FieldMetadata<
+      16,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NotifyTraceur kNotifyTraceur() { return {}; }
+  void set_notify_traceur(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_NotifyTraceur::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BugreportScore =
+    ::protozero::proto_utils::FieldMetadata<
+      30,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BugreportScore kBugreportScore() { return {}; }
+  void set_bugreport_score(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_BugreportScore::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TriggerConfig =
+    ::protozero::proto_utils::FieldMetadata<
+      17,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TraceConfig_TriggerConfig,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TriggerConfig kTriggerConfig() { return {}; }
+  template <typename T = TraceConfig_TriggerConfig> T* set_trigger_config() {
+    return BeginNestedMessage<T>(17);
+  }
+
+
+  using FieldMetadata_ActivateTriggers =
+    ::protozero::proto_utils::FieldMetadata<
+      18,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ActivateTriggers kActivateTriggers() { return {}; }
+  void add_activate_triggers(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_ActivateTriggers::kFieldId, data, size);
+  }
+  void add_activate_triggers(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_ActivateTriggers::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_IncrementalStateConfig =
+    ::protozero::proto_utils::FieldMetadata<
+      21,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TraceConfig_IncrementalStateConfig,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IncrementalStateConfig kIncrementalStateConfig() { return {}; }
+  template <typename T = TraceConfig_IncrementalStateConfig> T* set_incremental_state_config() {
+    return BeginNestedMessage<T>(21);
+  }
+
+
+  using FieldMetadata_AllowUserBuildTracing =
+    ::protozero::proto_utils::FieldMetadata<
+      19,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AllowUserBuildTracing kAllowUserBuildTracing() { return {}; }
+  void set_allow_user_build_tracing(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_AllowUserBuildTracing::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_UniqueSessionName =
+    ::protozero::proto_utils::FieldMetadata<
+      22,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_UniqueSessionName kUniqueSessionName() { return {}; }
+  void set_unique_session_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_UniqueSessionName::kFieldId, data, size);
+  }
+  void set_unique_session_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_UniqueSessionName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CompressionType =
+    ::protozero::proto_utils::FieldMetadata<
+      24,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::TraceConfig_CompressionType,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CompressionType kCompressionType() { return {}; }
+  void set_compression_type(::perfetto::protos::pbzero::TraceConfig_CompressionType value) {
+    static constexpr uint32_t field_id = FieldMetadata_CompressionType::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_IncidentReportConfig =
+    ::protozero::proto_utils::FieldMetadata<
+      25,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TraceConfig_IncidentReportConfig,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IncidentReportConfig kIncidentReportConfig() { return {}; }
+  template <typename T = TraceConfig_IncidentReportConfig> T* set_incident_report_config() {
+    return BeginNestedMessage<T>(25);
+  }
+
+
+  using FieldMetadata_StatsdLogging =
+    ::protozero::proto_utils::FieldMetadata<
+      31,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::TraceConfig_StatsdLogging,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_StatsdLogging kStatsdLogging() { return {}; }
+  void set_statsd_logging(::perfetto::protos::pbzero::TraceConfig_StatsdLogging value) {
+    static constexpr uint32_t field_id = FieldMetadata_StatsdLogging::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TraceUuidMsb =
+    ::protozero::proto_utils::FieldMetadata<
+      27,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TraceUuidMsb kTraceUuidMsb() { return {}; }
+  void set_trace_uuid_msb(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TraceUuidMsb::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TraceUuidLsb =
+    ::protozero::proto_utils::FieldMetadata<
+      28,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TraceUuidLsb kTraceUuidLsb() { return {}; }
+  void set_trace_uuid_lsb(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TraceUuidLsb::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TraceFilter =
+    ::protozero::proto_utils::FieldMetadata<
+      32,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TraceConfig_TraceFilter,
+      TraceConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TraceFilter kTraceFilter() { return {}; }
+  template <typename T = TraceConfig_TraceFilter> T* set_trace_filter() {
+    return BeginNestedMessage<T>(32);
+  }
+
+};
+
+class TraceConfig_TraceFilter_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  TraceConfig_TraceFilter_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TraceConfig_TraceFilter_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TraceConfig_TraceFilter_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_bytecode() const { return at<1>().valid(); }
+  ::protozero::ConstBytes bytecode() const { return at<1>().as_bytes(); }
+};
+
+class TraceConfig_TraceFilter : public ::protozero::Message {
+ public:
+  using Decoder = TraceConfig_TraceFilter_Decoder;
+  enum : int32_t {
+    kBytecodeFieldNumber = 1,
+  };
+
+  using FieldMetadata_Bytecode =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBytes,
+      std::string,
+      TraceConfig_TraceFilter>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Bytecode kBytecode() { return {}; }
+  void set_bytecode(const uint8_t* data, size_t size) {
+    AppendBytes(FieldMetadata_Bytecode::kFieldId, data, size);
+  }
+  void set_bytecode(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Bytecode::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBytes>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class TraceConfig_IncidentReportConfig_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  TraceConfig_IncidentReportConfig_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TraceConfig_IncidentReportConfig_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TraceConfig_IncidentReportConfig_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_destination_package() const { return at<1>().valid(); }
+  ::protozero::ConstChars destination_package() const { return at<1>().as_string(); }
+  bool has_destination_class() const { return at<2>().valid(); }
+  ::protozero::ConstChars destination_class() const { return at<2>().as_string(); }
+  bool has_privacy_level() const { return at<3>().valid(); }
+  int32_t privacy_level() const { return at<3>().as_int32(); }
+  bool has_skip_incidentd() const { return at<5>().valid(); }
+  bool skip_incidentd() const { return at<5>().as_bool(); }
+  bool has_skip_dropbox() const { return at<4>().valid(); }
+  bool skip_dropbox() const { return at<4>().as_bool(); }
+};
+
+class TraceConfig_IncidentReportConfig : public ::protozero::Message {
+ public:
+  using Decoder = TraceConfig_IncidentReportConfig_Decoder;
+  enum : int32_t {
+    kDestinationPackageFieldNumber = 1,
+    kDestinationClassFieldNumber = 2,
+    kPrivacyLevelFieldNumber = 3,
+    kSkipIncidentdFieldNumber = 5,
+    kSkipDropboxFieldNumber = 4,
+  };
+
+  using FieldMetadata_DestinationPackage =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TraceConfig_IncidentReportConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DestinationPackage kDestinationPackage() { return {}; }
+  void set_destination_package(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_DestinationPackage::kFieldId, data, size);
+  }
+  void set_destination_package(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_DestinationPackage::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DestinationClass =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TraceConfig_IncidentReportConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DestinationClass kDestinationClass() { return {}; }
+  void set_destination_class(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_DestinationClass::kFieldId, data, size);
+  }
+  void set_destination_class(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_DestinationClass::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PrivacyLevel =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      TraceConfig_IncidentReportConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PrivacyLevel kPrivacyLevel() { return {}; }
+  void set_privacy_level(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PrivacyLevel::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SkipIncidentd =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      TraceConfig_IncidentReportConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SkipIncidentd kSkipIncidentd() { return {}; }
+  void set_skip_incidentd(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_SkipIncidentd::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SkipDropbox =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      TraceConfig_IncidentReportConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SkipDropbox kSkipDropbox() { return {}; }
+  void set_skip_dropbox(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_SkipDropbox::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class TraceConfig_IncrementalStateConfig_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  TraceConfig_IncrementalStateConfig_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TraceConfig_IncrementalStateConfig_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TraceConfig_IncrementalStateConfig_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_clear_period_ms() const { return at<1>().valid(); }
+  uint32_t clear_period_ms() const { return at<1>().as_uint32(); }
+};
+
+class TraceConfig_IncrementalStateConfig : public ::protozero::Message {
+ public:
+  using Decoder = TraceConfig_IncrementalStateConfig_Decoder;
+  enum : int32_t {
+    kClearPeriodMsFieldNumber = 1,
+  };
+
+  using FieldMetadata_ClearPeriodMs =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      TraceConfig_IncrementalStateConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ClearPeriodMs kClearPeriodMs() { return {}; }
+  void set_clear_period_ms(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ClearPeriodMs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class TraceConfig_TriggerConfig_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  TraceConfig_TriggerConfig_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TraceConfig_TriggerConfig_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TraceConfig_TriggerConfig_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_trigger_mode() const { return at<1>().valid(); }
+  int32_t trigger_mode() const { return at<1>().as_int32(); }
+  bool has_triggers() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> triggers() const { return GetRepeated<::protozero::ConstBytes>(2); }
+  bool has_trigger_timeout_ms() const { return at<3>().valid(); }
+  uint32_t trigger_timeout_ms() const { return at<3>().as_uint32(); }
+};
+
+class TraceConfig_TriggerConfig : public ::protozero::Message {
+ public:
+  using Decoder = TraceConfig_TriggerConfig_Decoder;
+  enum : int32_t {
+    kTriggerModeFieldNumber = 1,
+    kTriggersFieldNumber = 2,
+    kTriggerTimeoutMsFieldNumber = 3,
+  };
+  using Trigger = ::perfetto::protos::pbzero::TraceConfig_TriggerConfig_Trigger;
+  using TriggerMode = ::perfetto::protos::pbzero::TraceConfig_TriggerConfig_TriggerMode;
+  static const TriggerMode UNSPECIFIED = TraceConfig_TriggerConfig_TriggerMode_UNSPECIFIED;
+  static const TriggerMode START_TRACING = TraceConfig_TriggerConfig_TriggerMode_START_TRACING;
+  static const TriggerMode STOP_TRACING = TraceConfig_TriggerConfig_TriggerMode_STOP_TRACING;
+
+  using FieldMetadata_TriggerMode =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::TraceConfig_TriggerConfig_TriggerMode,
+      TraceConfig_TriggerConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TriggerMode kTriggerMode() { return {}; }
+  void set_trigger_mode(::perfetto::protos::pbzero::TraceConfig_TriggerConfig_TriggerMode value) {
+    static constexpr uint32_t field_id = FieldMetadata_TriggerMode::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Triggers =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TraceConfig_TriggerConfig_Trigger,
+      TraceConfig_TriggerConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Triggers kTriggers() { return {}; }
+  template <typename T = TraceConfig_TriggerConfig_Trigger> T* add_triggers() {
+    return BeginNestedMessage<T>(2);
+  }
+
+
+  using FieldMetadata_TriggerTimeoutMs =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      TraceConfig_TriggerConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TriggerTimeoutMs kTriggerTimeoutMs() { return {}; }
+  void set_trigger_timeout_ms(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TriggerTimeoutMs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class TraceConfig_TriggerConfig_Trigger_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  TraceConfig_TriggerConfig_Trigger_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TraceConfig_TriggerConfig_Trigger_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TraceConfig_TriggerConfig_Trigger_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars name() const { return at<1>().as_string(); }
+  bool has_producer_name_regex() const { return at<2>().valid(); }
+  ::protozero::ConstChars producer_name_regex() const { return at<2>().as_string(); }
+  bool has_stop_delay_ms() const { return at<3>().valid(); }
+  uint32_t stop_delay_ms() const { return at<3>().as_uint32(); }
+  bool has_max_per_24_h() const { return at<4>().valid(); }
+  uint32_t max_per_24_h() const { return at<4>().as_uint32(); }
+  bool has_skip_probability() const { return at<5>().valid(); }
+  double skip_probability() const { return at<5>().as_double(); }
+};
+
+class TraceConfig_TriggerConfig_Trigger : public ::protozero::Message {
+ public:
+  using Decoder = TraceConfig_TriggerConfig_Trigger_Decoder;
+  enum : int32_t {
+    kNameFieldNumber = 1,
+    kProducerNameRegexFieldNumber = 2,
+    kStopDelayMsFieldNumber = 3,
+    kMaxPer24HFieldNumber = 4,
+    kSkipProbabilityFieldNumber = 5,
+  };
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TraceConfig_TriggerConfig_Trigger>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ProducerNameRegex =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TraceConfig_TriggerConfig_Trigger>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ProducerNameRegex kProducerNameRegex() { return {}; }
+  void set_producer_name_regex(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_ProducerNameRegex::kFieldId, data, size);
+  }
+  void set_producer_name_regex(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_ProducerNameRegex::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_StopDelayMs =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      TraceConfig_TriggerConfig_Trigger>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_StopDelayMs kStopDelayMs() { return {}; }
+  void set_stop_delay_ms(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_StopDelayMs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_MaxPer24H =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      TraceConfig_TriggerConfig_Trigger>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MaxPer24H kMaxPer24H() { return {}; }
+  void set_max_per_24_h(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_MaxPer24H::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SkipProbability =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kDouble,
+      double,
+      TraceConfig_TriggerConfig_Trigger>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SkipProbability kSkipProbability() { return {}; }
+  void set_skip_probability(double value) {
+    static constexpr uint32_t field_id = FieldMetadata_SkipProbability::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kDouble>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class TraceConfig_GuardrailOverrides_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  TraceConfig_GuardrailOverrides_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TraceConfig_GuardrailOverrides_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TraceConfig_GuardrailOverrides_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_max_upload_per_day_bytes() const { return at<1>().valid(); }
+  uint64_t max_upload_per_day_bytes() const { return at<1>().as_uint64(); }
+};
+
+class TraceConfig_GuardrailOverrides : public ::protozero::Message {
+ public:
+  using Decoder = TraceConfig_GuardrailOverrides_Decoder;
+  enum : int32_t {
+    kMaxUploadPerDayBytesFieldNumber = 1,
+  };
+
+  using FieldMetadata_MaxUploadPerDayBytes =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TraceConfig_GuardrailOverrides>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MaxUploadPerDayBytes kMaxUploadPerDayBytes() { return {}; }
+  void set_max_upload_per_day_bytes(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_MaxUploadPerDayBytes::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class TraceConfig_StatsdMetadata_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  TraceConfig_StatsdMetadata_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TraceConfig_StatsdMetadata_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TraceConfig_StatsdMetadata_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_triggering_alert_id() const { return at<1>().valid(); }
+  int64_t triggering_alert_id() const { return at<1>().as_int64(); }
+  bool has_triggering_config_uid() const { return at<2>().valid(); }
+  int32_t triggering_config_uid() const { return at<2>().as_int32(); }
+  bool has_triggering_config_id() const { return at<3>().valid(); }
+  int64_t triggering_config_id() const { return at<3>().as_int64(); }
+  bool has_triggering_subscription_id() const { return at<4>().valid(); }
+  int64_t triggering_subscription_id() const { return at<4>().as_int64(); }
+};
+
+class TraceConfig_StatsdMetadata : public ::protozero::Message {
+ public:
+  using Decoder = TraceConfig_StatsdMetadata_Decoder;
+  enum : int32_t {
+    kTriggeringAlertIdFieldNumber = 1,
+    kTriggeringConfigUidFieldNumber = 2,
+    kTriggeringConfigIdFieldNumber = 3,
+    kTriggeringSubscriptionIdFieldNumber = 4,
+  };
+
+  using FieldMetadata_TriggeringAlertId =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      TraceConfig_StatsdMetadata>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TriggeringAlertId kTriggeringAlertId() { return {}; }
+  void set_triggering_alert_id(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TriggeringAlertId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TriggeringConfigUid =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      TraceConfig_StatsdMetadata>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TriggeringConfigUid kTriggeringConfigUid() { return {}; }
+  void set_triggering_config_uid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TriggeringConfigUid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TriggeringConfigId =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      TraceConfig_StatsdMetadata>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TriggeringConfigId kTriggeringConfigId() { return {}; }
+  void set_triggering_config_id(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TriggeringConfigId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TriggeringSubscriptionId =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      TraceConfig_StatsdMetadata>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TriggeringSubscriptionId kTriggeringSubscriptionId() { return {}; }
+  void set_triggering_subscription_id(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TriggeringSubscriptionId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class TraceConfig_ProducerConfig_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  TraceConfig_ProducerConfig_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TraceConfig_ProducerConfig_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TraceConfig_ProducerConfig_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_producer_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars producer_name() const { return at<1>().as_string(); }
+  bool has_shm_size_kb() const { return at<2>().valid(); }
+  uint32_t shm_size_kb() const { return at<2>().as_uint32(); }
+  bool has_page_size_kb() const { return at<3>().valid(); }
+  uint32_t page_size_kb() const { return at<3>().as_uint32(); }
+};
+
+class TraceConfig_ProducerConfig : public ::protozero::Message {
+ public:
+  using Decoder = TraceConfig_ProducerConfig_Decoder;
+  enum : int32_t {
+    kProducerNameFieldNumber = 1,
+    kShmSizeKbFieldNumber = 2,
+    kPageSizeKbFieldNumber = 3,
+  };
+
+  using FieldMetadata_ProducerName =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TraceConfig_ProducerConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ProducerName kProducerName() { return {}; }
+  void set_producer_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_ProducerName::kFieldId, data, size);
+  }
+  void set_producer_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_ProducerName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ShmSizeKb =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      TraceConfig_ProducerConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ShmSizeKb kShmSizeKb() { return {}; }
+  void set_shm_size_kb(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ShmSizeKb::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PageSizeKb =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      TraceConfig_ProducerConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PageSizeKb kPageSizeKb() { return {}; }
+  void set_page_size_kb(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PageSizeKb::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class TraceConfig_BuiltinDataSource_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/7, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  TraceConfig_BuiltinDataSource_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TraceConfig_BuiltinDataSource_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TraceConfig_BuiltinDataSource_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_disable_clock_snapshotting() const { return at<1>().valid(); }
+  bool disable_clock_snapshotting() const { return at<1>().as_bool(); }
+  bool has_disable_trace_config() const { return at<2>().valid(); }
+  bool disable_trace_config() const { return at<2>().as_bool(); }
+  bool has_disable_system_info() const { return at<3>().valid(); }
+  bool disable_system_info() const { return at<3>().as_bool(); }
+  bool has_disable_service_events() const { return at<4>().valid(); }
+  bool disable_service_events() const { return at<4>().as_bool(); }
+  bool has_primary_trace_clock() const { return at<5>().valid(); }
+  int32_t primary_trace_clock() const { return at<5>().as_int32(); }
+  bool has_snapshot_interval_ms() const { return at<6>().valid(); }
+  uint32_t snapshot_interval_ms() const { return at<6>().as_uint32(); }
+  bool has_prefer_suspend_clock_for_snapshot() const { return at<7>().valid(); }
+  bool prefer_suspend_clock_for_snapshot() const { return at<7>().as_bool(); }
+};
+
+class TraceConfig_BuiltinDataSource : public ::protozero::Message {
+ public:
+  using Decoder = TraceConfig_BuiltinDataSource_Decoder;
+  enum : int32_t {
+    kDisableClockSnapshottingFieldNumber = 1,
+    kDisableTraceConfigFieldNumber = 2,
+    kDisableSystemInfoFieldNumber = 3,
+    kDisableServiceEventsFieldNumber = 4,
+    kPrimaryTraceClockFieldNumber = 5,
+    kSnapshotIntervalMsFieldNumber = 6,
+    kPreferSuspendClockForSnapshotFieldNumber = 7,
+  };
+
+  using FieldMetadata_DisableClockSnapshotting =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      TraceConfig_BuiltinDataSource>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DisableClockSnapshotting kDisableClockSnapshotting() { return {}; }
+  void set_disable_clock_snapshotting(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_DisableClockSnapshotting::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DisableTraceConfig =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      TraceConfig_BuiltinDataSource>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DisableTraceConfig kDisableTraceConfig() { return {}; }
+  void set_disable_trace_config(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_DisableTraceConfig::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DisableSystemInfo =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      TraceConfig_BuiltinDataSource>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DisableSystemInfo kDisableSystemInfo() { return {}; }
+  void set_disable_system_info(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_DisableSystemInfo::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DisableServiceEvents =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      TraceConfig_BuiltinDataSource>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DisableServiceEvents kDisableServiceEvents() { return {}; }
+  void set_disable_service_events(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_DisableServiceEvents::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PrimaryTraceClock =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::BuiltinClock,
+      TraceConfig_BuiltinDataSource>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PrimaryTraceClock kPrimaryTraceClock() { return {}; }
+  void set_primary_trace_clock(::perfetto::protos::pbzero::BuiltinClock value) {
+    static constexpr uint32_t field_id = FieldMetadata_PrimaryTraceClock::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SnapshotIntervalMs =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      TraceConfig_BuiltinDataSource>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SnapshotIntervalMs kSnapshotIntervalMs() { return {}; }
+  void set_snapshot_interval_ms(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SnapshotIntervalMs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PreferSuspendClockForSnapshot =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      TraceConfig_BuiltinDataSource>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PreferSuspendClockForSnapshot kPreferSuspendClockForSnapshot() { return {}; }
+  void set_prefer_suspend_clock_for_snapshot(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_PreferSuspendClockForSnapshot::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class TraceConfig_DataSource_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  TraceConfig_DataSource_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TraceConfig_DataSource_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TraceConfig_DataSource_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_config() const { return at<1>().valid(); }
+  ::protozero::ConstBytes config() const { return at<1>().as_bytes(); }
+  bool has_producer_name_filter() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstChars> producer_name_filter() const { return GetRepeated<::protozero::ConstChars>(2); }
+  bool has_producer_name_regex_filter() const { return at<3>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstChars> producer_name_regex_filter() const { return GetRepeated<::protozero::ConstChars>(3); }
+};
+
+class TraceConfig_DataSource : public ::protozero::Message {
+ public:
+  using Decoder = TraceConfig_DataSource_Decoder;
+  enum : int32_t {
+    kConfigFieldNumber = 1,
+    kProducerNameFilterFieldNumber = 2,
+    kProducerNameRegexFilterFieldNumber = 3,
+  };
+
+  using FieldMetadata_Config =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      DataSourceConfig,
+      TraceConfig_DataSource>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Config kConfig() { return {}; }
+  template <typename T = DataSourceConfig> T* set_config() {
+    return BeginNestedMessage<T>(1);
+  }
+
+
+  using FieldMetadata_ProducerNameFilter =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TraceConfig_DataSource>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ProducerNameFilter kProducerNameFilter() { return {}; }
+  void add_producer_name_filter(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_ProducerNameFilter::kFieldId, data, size);
+  }
+  void add_producer_name_filter(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_ProducerNameFilter::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ProducerNameRegexFilter =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TraceConfig_DataSource>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ProducerNameRegexFilter kProducerNameRegexFilter() { return {}; }
+  void add_producer_name_regex_filter(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_ProducerNameRegexFilter::kFieldId, data, size);
+  }
+  void add_producer_name_regex_filter(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_ProducerNameRegexFilter::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class TraceConfig_BufferConfig_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  TraceConfig_BufferConfig_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TraceConfig_BufferConfig_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TraceConfig_BufferConfig_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_size_kb() const { return at<1>().valid(); }
+  uint32_t size_kb() const { return at<1>().as_uint32(); }
+  bool has_fill_policy() const { return at<4>().valid(); }
+  int32_t fill_policy() const { return at<4>().as_int32(); }
+};
+
+class TraceConfig_BufferConfig : public ::protozero::Message {
+ public:
+  using Decoder = TraceConfig_BufferConfig_Decoder;
+  enum : int32_t {
+    kSizeKbFieldNumber = 1,
+    kFillPolicyFieldNumber = 4,
+  };
+  using FillPolicy = ::perfetto::protos::pbzero::TraceConfig_BufferConfig_FillPolicy;
+  static const FillPolicy UNSPECIFIED = TraceConfig_BufferConfig_FillPolicy_UNSPECIFIED;
+  static const FillPolicy RING_BUFFER = TraceConfig_BufferConfig_FillPolicy_RING_BUFFER;
+  static const FillPolicy DISCARD = TraceConfig_BufferConfig_FillPolicy_DISCARD;
+
+  using FieldMetadata_SizeKb =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      TraceConfig_BufferConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SizeKb kSizeKb() { return {}; }
+  void set_size_kb(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SizeKb::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FillPolicy =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::TraceConfig_BufferConfig_FillPolicy,
+      TraceConfig_BufferConfig>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FillPolicy kFillPolicy() { return {}; }
+  void set_fill_policy(::perfetto::protos::pbzero::TraceConfig_BufferConfig_FillPolicy value) {
+    static constexpr uint32_t field_id = FieldMetadata_FillPolicy::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/clock_snapshot.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_CLOCK_SNAPSHOT_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_CLOCK_SNAPSHOT_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class ClockSnapshot_Clock;
+enum BuiltinClock : int32_t;
+
+enum ClockSnapshot_Clock_BuiltinClocks : int32_t {
+  ClockSnapshot_Clock_BuiltinClocks_UNKNOWN = 0,
+  ClockSnapshot_Clock_BuiltinClocks_REALTIME = 1,
+  ClockSnapshot_Clock_BuiltinClocks_REALTIME_COARSE = 2,
+  ClockSnapshot_Clock_BuiltinClocks_MONOTONIC = 3,
+  ClockSnapshot_Clock_BuiltinClocks_MONOTONIC_COARSE = 4,
+  ClockSnapshot_Clock_BuiltinClocks_MONOTONIC_RAW = 5,
+  ClockSnapshot_Clock_BuiltinClocks_BOOTTIME = 6,
+  ClockSnapshot_Clock_BuiltinClocks_BUILTIN_CLOCK_MAX_ID = 63,
+};
+
+const ClockSnapshot_Clock_BuiltinClocks ClockSnapshot_Clock_BuiltinClocks_MIN = ClockSnapshot_Clock_BuiltinClocks_UNKNOWN;
+const ClockSnapshot_Clock_BuiltinClocks ClockSnapshot_Clock_BuiltinClocks_MAX = ClockSnapshot_Clock_BuiltinClocks_BUILTIN_CLOCK_MAX_ID;
+
+class ClockSnapshot_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  ClockSnapshot_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ClockSnapshot_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ClockSnapshot_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_clocks() const { return at<1>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> clocks() const { return GetRepeated<::protozero::ConstBytes>(1); }
+  bool has_primary_trace_clock() const { return at<2>().valid(); }
+  int32_t primary_trace_clock() const { return at<2>().as_int32(); }
+};
+
+class ClockSnapshot : public ::protozero::Message {
+ public:
+  using Decoder = ClockSnapshot_Decoder;
+  enum : int32_t {
+    kClocksFieldNumber = 1,
+    kPrimaryTraceClockFieldNumber = 2,
+  };
+  using Clock = ::perfetto::protos::pbzero::ClockSnapshot_Clock;
+
+  using FieldMetadata_Clocks =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ClockSnapshot_Clock,
+      ClockSnapshot>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Clocks kClocks() { return {}; }
+  template <typename T = ClockSnapshot_Clock> T* add_clocks() {
+    return BeginNestedMessage<T>(1);
+  }
+
+
+  using FieldMetadata_PrimaryTraceClock =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::BuiltinClock,
+      ClockSnapshot>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PrimaryTraceClock kPrimaryTraceClock() { return {}; }
+  void set_primary_trace_clock(::perfetto::protos::pbzero::BuiltinClock value) {
+    static constexpr uint32_t field_id = FieldMetadata_PrimaryTraceClock::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class ClockSnapshot_Clock_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  ClockSnapshot_Clock_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ClockSnapshot_Clock_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ClockSnapshot_Clock_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_clock_id() const { return at<1>().valid(); }
+  uint32_t clock_id() const { return at<1>().as_uint32(); }
+  bool has_timestamp() const { return at<2>().valid(); }
+  uint64_t timestamp() const { return at<2>().as_uint64(); }
+  bool has_is_incremental() const { return at<3>().valid(); }
+  bool is_incremental() const { return at<3>().as_bool(); }
+  bool has_unit_multiplier_ns() const { return at<4>().valid(); }
+  uint64_t unit_multiplier_ns() const { return at<4>().as_uint64(); }
+};
+
+class ClockSnapshot_Clock : public ::protozero::Message {
+ public:
+  using Decoder = ClockSnapshot_Clock_Decoder;
+  enum : int32_t {
+    kClockIdFieldNumber = 1,
+    kTimestampFieldNumber = 2,
+    kIsIncrementalFieldNumber = 3,
+    kUnitMultiplierNsFieldNumber = 4,
+  };
+  using BuiltinClocks = ::perfetto::protos::pbzero::ClockSnapshot_Clock_BuiltinClocks;
+  static const BuiltinClocks UNKNOWN = ClockSnapshot_Clock_BuiltinClocks_UNKNOWN;
+  static const BuiltinClocks REALTIME = ClockSnapshot_Clock_BuiltinClocks_REALTIME;
+  static const BuiltinClocks REALTIME_COARSE = ClockSnapshot_Clock_BuiltinClocks_REALTIME_COARSE;
+  static const BuiltinClocks MONOTONIC = ClockSnapshot_Clock_BuiltinClocks_MONOTONIC;
+  static const BuiltinClocks MONOTONIC_COARSE = ClockSnapshot_Clock_BuiltinClocks_MONOTONIC_COARSE;
+  static const BuiltinClocks MONOTONIC_RAW = ClockSnapshot_Clock_BuiltinClocks_MONOTONIC_RAW;
+  static const BuiltinClocks BOOTTIME = ClockSnapshot_Clock_BuiltinClocks_BOOTTIME;
+  static const BuiltinClocks BUILTIN_CLOCK_MAX_ID = ClockSnapshot_Clock_BuiltinClocks_BUILTIN_CLOCK_MAX_ID;
+
+  using FieldMetadata_ClockId =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      ClockSnapshot_Clock>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ClockId kClockId() { return {}; }
+  void set_clock_id(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ClockId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Timestamp =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ClockSnapshot_Clock>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Timestamp kTimestamp() { return {}; }
+  void set_timestamp(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Timestamp::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_IsIncremental =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ClockSnapshot_Clock>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IsIncremental kIsIncremental() { return {}; }
+  void set_is_incremental(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_IsIncremental::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_UnitMultiplierNs =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ClockSnapshot_Clock>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_UnitMultiplierNs kUnitMultiplierNs() { return {}; }
+  void set_unit_multiplier_ns(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_UnitMultiplierNs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/trigger.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRIGGER_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRIGGER_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class Trigger_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Trigger_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Trigger_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Trigger_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_trigger_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars trigger_name() const { return at<1>().as_string(); }
+  bool has_producer_name() const { return at<2>().valid(); }
+  ::protozero::ConstChars producer_name() const { return at<2>().as_string(); }
+  bool has_trusted_producer_uid() const { return at<3>().valid(); }
+  int32_t trusted_producer_uid() const { return at<3>().as_int32(); }
+};
+
+class Trigger : public ::protozero::Message {
+ public:
+  using Decoder = Trigger_Decoder;
+  enum : int32_t {
+    kTriggerNameFieldNumber = 1,
+    kProducerNameFieldNumber = 2,
+    kTrustedProducerUidFieldNumber = 3,
+  };
+
+  using FieldMetadata_TriggerName =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      Trigger>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TriggerName kTriggerName() { return {}; }
+  void set_trigger_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_TriggerName::kFieldId, data, size);
+  }
+  void set_trigger_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_TriggerName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ProducerName =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      Trigger>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ProducerName kProducerName() { return {}; }
+  void set_producer_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_ProducerName::kFieldId, data, size);
+  }
+  void set_producer_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_ProducerName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TrustedProducerUid =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Trigger>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TrustedProducerUid kTrustedProducerUid() { return {}; }
+  void set_trusted_producer_uid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TrustedProducerUid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/system_info.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_SYSTEM_INFO_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_SYSTEM_INFO_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class Utsname;
+
+class SystemInfo_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  SystemInfo_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit SystemInfo_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit SystemInfo_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_utsname() const { return at<1>().valid(); }
+  ::protozero::ConstBytes utsname() const { return at<1>().as_bytes(); }
+  bool has_android_build_fingerprint() const { return at<2>().valid(); }
+  ::protozero::ConstChars android_build_fingerprint() const { return at<2>().as_string(); }
+  bool has_hz() const { return at<3>().valid(); }
+  int64_t hz() const { return at<3>().as_int64(); }
+  bool has_tracing_service_version() const { return at<4>().valid(); }
+  ::protozero::ConstChars tracing_service_version() const { return at<4>().as_string(); }
+};
+
+class SystemInfo : public ::protozero::Message {
+ public:
+  using Decoder = SystemInfo_Decoder;
+  enum : int32_t {
+    kUtsnameFieldNumber = 1,
+    kAndroidBuildFingerprintFieldNumber = 2,
+    kHzFieldNumber = 3,
+    kTracingServiceVersionFieldNumber = 4,
+  };
+
+  using FieldMetadata_Utsname =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Utsname,
+      SystemInfo>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Utsname kUtsname() { return {}; }
+  template <typename T = Utsname> T* set_utsname() {
+    return BeginNestedMessage<T>(1);
+  }
+
+
+  using FieldMetadata_AndroidBuildFingerprint =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      SystemInfo>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AndroidBuildFingerprint kAndroidBuildFingerprint() { return {}; }
+  void set_android_build_fingerprint(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_AndroidBuildFingerprint::kFieldId, data, size);
+  }
+  void set_android_build_fingerprint(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_AndroidBuildFingerprint::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Hz =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      SystemInfo>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Hz kHz() { return {}; }
+  void set_hz(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Hz::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TracingServiceVersion =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      SystemInfo>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TracingServiceVersion kTracingServiceVersion() { return {}; }
+  void set_tracing_service_version(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_TracingServiceVersion::kFieldId, data, size);
+  }
+  void set_tracing_service_version(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_TracingServiceVersion::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Utsname_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Utsname_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Utsname_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Utsname_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_sysname() const { return at<1>().valid(); }
+  ::protozero::ConstChars sysname() const { return at<1>().as_string(); }
+  bool has_version() const { return at<2>().valid(); }
+  ::protozero::ConstChars version() const { return at<2>().as_string(); }
+  bool has_release() const { return at<3>().valid(); }
+  ::protozero::ConstChars release() const { return at<3>().as_string(); }
+  bool has_machine() const { return at<4>().valid(); }
+  ::protozero::ConstChars machine() const { return at<4>().as_string(); }
+};
+
+class Utsname : public ::protozero::Message {
+ public:
+  using Decoder = Utsname_Decoder;
+  enum : int32_t {
+    kSysnameFieldNumber = 1,
+    kVersionFieldNumber = 2,
+    kReleaseFieldNumber = 3,
+    kMachineFieldNumber = 4,
+  };
+
+  using FieldMetadata_Sysname =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      Utsname>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Sysname kSysname() { return {}; }
+  void set_sysname(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Sysname::kFieldId, data, size);
+  }
+  void set_sysname(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Sysname::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Version =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      Utsname>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Version kVersion() { return {}; }
+  void set_version(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Version::kFieldId, data, size);
+  }
+  void set_version(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Version::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Release =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      Utsname>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Release kRelease() { return {}; }
+  void set_release(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Release::kFieldId, data, size);
+  }
+  void set_release(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Release::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Machine =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      Utsname>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Machine kMachine() { return {}; }
+  void set_machine(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Machine::kFieldId, data, size);
+  }
+  void set_machine(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Machine::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/android/android_log.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_ANDROID_ANDROID_LOG_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_ANDROID_ANDROID_LOG_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class AndroidLogPacket_LogEvent;
+class AndroidLogPacket_LogEvent_Arg;
+class AndroidLogPacket_Stats;
+enum AndroidLogId : int32_t;
+enum AndroidLogPriority : int32_t;
+
+class AndroidLogPacket_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  AndroidLogPacket_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit AndroidLogPacket_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit AndroidLogPacket_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_events() const { return at<1>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> events() const { return GetRepeated<::protozero::ConstBytes>(1); }
+  bool has_stats() const { return at<2>().valid(); }
+  ::protozero::ConstBytes stats() const { return at<2>().as_bytes(); }
+};
+
+class AndroidLogPacket : public ::protozero::Message {
+ public:
+  using Decoder = AndroidLogPacket_Decoder;
+  enum : int32_t {
+    kEventsFieldNumber = 1,
+    kStatsFieldNumber = 2,
+  };
+  using LogEvent = ::perfetto::protos::pbzero::AndroidLogPacket_LogEvent;
+  using Stats = ::perfetto::protos::pbzero::AndroidLogPacket_Stats;
+
+  using FieldMetadata_Events =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      AndroidLogPacket_LogEvent,
+      AndroidLogPacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Events kEvents() { return {}; }
+  template <typename T = AndroidLogPacket_LogEvent> T* add_events() {
+    return BeginNestedMessage<T>(1);
+  }
+
+
+  using FieldMetadata_Stats =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      AndroidLogPacket_Stats,
+      AndroidLogPacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Stats kStats() { return {}; }
+  template <typename T = AndroidLogPacket_Stats> T* set_stats() {
+    return BeginNestedMessage<T>(2);
+  }
+
+};
+
+class AndroidLogPacket_Stats_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  AndroidLogPacket_Stats_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit AndroidLogPacket_Stats_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit AndroidLogPacket_Stats_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_num_total() const { return at<1>().valid(); }
+  uint64_t num_total() const { return at<1>().as_uint64(); }
+  bool has_num_failed() const { return at<2>().valid(); }
+  uint64_t num_failed() const { return at<2>().as_uint64(); }
+  bool has_num_skipped() const { return at<3>().valid(); }
+  uint64_t num_skipped() const { return at<3>().as_uint64(); }
+};
+
+class AndroidLogPacket_Stats : public ::protozero::Message {
+ public:
+  using Decoder = AndroidLogPacket_Stats_Decoder;
+  enum : int32_t {
+    kNumTotalFieldNumber = 1,
+    kNumFailedFieldNumber = 2,
+    kNumSkippedFieldNumber = 3,
+  };
+
+  using FieldMetadata_NumTotal =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      AndroidLogPacket_Stats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NumTotal kNumTotal() { return {}; }
+  void set_num_total(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NumTotal::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NumFailed =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      AndroidLogPacket_Stats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NumFailed kNumFailed() { return {}; }
+  void set_num_failed(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NumFailed::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NumSkipped =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      AndroidLogPacket_Stats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NumSkipped kNumSkipped() { return {}; }
+  void set_num_skipped(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NumSkipped::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class AndroidLogPacket_LogEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/9, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  AndroidLogPacket_LogEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit AndroidLogPacket_LogEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit AndroidLogPacket_LogEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_log_id() const { return at<1>().valid(); }
+  int32_t log_id() const { return at<1>().as_int32(); }
+  bool has_pid() const { return at<2>().valid(); }
+  int32_t pid() const { return at<2>().as_int32(); }
+  bool has_tid() const { return at<3>().valid(); }
+  int32_t tid() const { return at<3>().as_int32(); }
+  bool has_uid() const { return at<4>().valid(); }
+  int32_t uid() const { return at<4>().as_int32(); }
+  bool has_timestamp() const { return at<5>().valid(); }
+  uint64_t timestamp() const { return at<5>().as_uint64(); }
+  bool has_tag() const { return at<6>().valid(); }
+  ::protozero::ConstChars tag() const { return at<6>().as_string(); }
+  bool has_prio() const { return at<7>().valid(); }
+  int32_t prio() const { return at<7>().as_int32(); }
+  bool has_message() const { return at<8>().valid(); }
+  ::protozero::ConstChars message() const { return at<8>().as_string(); }
+  bool has_args() const { return at<9>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> args() const { return GetRepeated<::protozero::ConstBytes>(9); }
+};
+
+class AndroidLogPacket_LogEvent : public ::protozero::Message {
+ public:
+  using Decoder = AndroidLogPacket_LogEvent_Decoder;
+  enum : int32_t {
+    kLogIdFieldNumber = 1,
+    kPidFieldNumber = 2,
+    kTidFieldNumber = 3,
+    kUidFieldNumber = 4,
+    kTimestampFieldNumber = 5,
+    kTagFieldNumber = 6,
+    kPrioFieldNumber = 7,
+    kMessageFieldNumber = 8,
+    kArgsFieldNumber = 9,
+  };
+  using Arg = ::perfetto::protos::pbzero::AndroidLogPacket_LogEvent_Arg;
+
+  using FieldMetadata_LogId =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::AndroidLogId,
+      AndroidLogPacket_LogEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_LogId kLogId() { return {}; }
+  void set_log_id(::perfetto::protos::pbzero::AndroidLogId value) {
+    static constexpr uint32_t field_id = FieldMetadata_LogId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pid =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      AndroidLogPacket_LogEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pid kPid() { return {}; }
+  void set_pid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Tid =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      AndroidLogPacket_LogEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Tid kTid() { return {}; }
+  void set_tid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Tid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Uid =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      AndroidLogPacket_LogEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Uid kUid() { return {}; }
+  void set_uid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Uid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Timestamp =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      AndroidLogPacket_LogEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Timestamp kTimestamp() { return {}; }
+  void set_timestamp(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Timestamp::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Tag =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      AndroidLogPacket_LogEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Tag kTag() { return {}; }
+  void set_tag(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Tag::kFieldId, data, size);
+  }
+  void set_tag(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Tag::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Prio =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::AndroidLogPriority,
+      AndroidLogPacket_LogEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Prio kPrio() { return {}; }
+  void set_prio(::perfetto::protos::pbzero::AndroidLogPriority value) {
+    static constexpr uint32_t field_id = FieldMetadata_Prio::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Message =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      AndroidLogPacket_LogEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Message kMessage() { return {}; }
+  void set_message(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Message::kFieldId, data, size);
+  }
+  void set_message(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Message::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Args =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      AndroidLogPacket_LogEvent_Arg,
+      AndroidLogPacket_LogEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Args kArgs() { return {}; }
+  template <typename T = AndroidLogPacket_LogEvent_Arg> T* add_args() {
+    return BeginNestedMessage<T>(9);
+  }
+
+};
+
+class AndroidLogPacket_LogEvent_Arg_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  AndroidLogPacket_LogEvent_Arg_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit AndroidLogPacket_LogEvent_Arg_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit AndroidLogPacket_LogEvent_Arg_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars name() const { return at<1>().as_string(); }
+  bool has_int_value() const { return at<2>().valid(); }
+  int64_t int_value() const { return at<2>().as_int64(); }
+  bool has_float_value() const { return at<3>().valid(); }
+  float float_value() const { return at<3>().as_float(); }
+  bool has_string_value() const { return at<4>().valid(); }
+  ::protozero::ConstChars string_value() const { return at<4>().as_string(); }
+};
+
+class AndroidLogPacket_LogEvent_Arg : public ::protozero::Message {
+ public:
+  using Decoder = AndroidLogPacket_LogEvent_Arg_Decoder;
+  enum : int32_t {
+    kNameFieldNumber = 1,
+    kIntValueFieldNumber = 2,
+    kFloatValueFieldNumber = 3,
+    kStringValueFieldNumber = 4,
+  };
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      AndroidLogPacket_LogEvent_Arg>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_IntValue =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      AndroidLogPacket_LogEvent_Arg>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IntValue kIntValue() { return {}; }
+  void set_int_value(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_IntValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FloatValue =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kFloat,
+      float,
+      AndroidLogPacket_LogEvent_Arg>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FloatValue kFloatValue() { return {}; }
+  void set_float_value(float value) {
+    static constexpr uint32_t field_id = FieldMetadata_FloatValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kFloat>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_StringValue =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      AndroidLogPacket_LogEvent_Arg>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_StringValue kStringValue() { return {}; }
+  void set_string_value(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_StringValue::kFieldId, data, size);
+  }
+  void set_string_value(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_StringValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/android/frame_timeline_event.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_ANDROID_FRAME_TIMELINE_EVENT_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_ANDROID_FRAME_TIMELINE_EVENT_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class FrameTimelineEvent_ActualDisplayFrameStart;
+class FrameTimelineEvent_ActualSurfaceFrameStart;
+class FrameTimelineEvent_ExpectedDisplayFrameStart;
+class FrameTimelineEvent_ExpectedSurfaceFrameStart;
+class FrameTimelineEvent_FrameEnd;
+enum FrameTimelineEvent_PredictionType : int32_t;
+enum FrameTimelineEvent_PresentType : int32_t;
+
+enum FrameTimelineEvent_JankType : int32_t {
+  FrameTimelineEvent_JankType_JANK_UNSPECIFIED = 0,
+  FrameTimelineEvent_JankType_JANK_NONE = 1,
+  FrameTimelineEvent_JankType_JANK_SF_SCHEDULING = 2,
+  FrameTimelineEvent_JankType_JANK_PREDICTION_ERROR = 4,
+  FrameTimelineEvent_JankType_JANK_DISPLAY_HAL = 8,
+  FrameTimelineEvent_JankType_JANK_SF_CPU_DEADLINE_MISSED = 16,
+  FrameTimelineEvent_JankType_JANK_SF_GPU_DEADLINE_MISSED = 32,
+  FrameTimelineEvent_JankType_JANK_APP_DEADLINE_MISSED = 64,
+  FrameTimelineEvent_JankType_JANK_BUFFER_STUFFING = 128,
+  FrameTimelineEvent_JankType_JANK_UNKNOWN = 256,
+  FrameTimelineEvent_JankType_JANK_SF_STUFFING = 512,
+};
+
+const FrameTimelineEvent_JankType FrameTimelineEvent_JankType_MIN = FrameTimelineEvent_JankType_JANK_UNSPECIFIED;
+const FrameTimelineEvent_JankType FrameTimelineEvent_JankType_MAX = FrameTimelineEvent_JankType_JANK_SF_STUFFING;
+
+enum FrameTimelineEvent_PresentType : int32_t {
+  FrameTimelineEvent_PresentType_PRESENT_UNSPECIFIED = 0,
+  FrameTimelineEvent_PresentType_PRESENT_ON_TIME = 1,
+  FrameTimelineEvent_PresentType_PRESENT_LATE = 2,
+  FrameTimelineEvent_PresentType_PRESENT_EARLY = 3,
+  FrameTimelineEvent_PresentType_PRESENT_DROPPED = 4,
+  FrameTimelineEvent_PresentType_PRESENT_UNKNOWN = 5,
+};
+
+const FrameTimelineEvent_PresentType FrameTimelineEvent_PresentType_MIN = FrameTimelineEvent_PresentType_PRESENT_UNSPECIFIED;
+const FrameTimelineEvent_PresentType FrameTimelineEvent_PresentType_MAX = FrameTimelineEvent_PresentType_PRESENT_UNKNOWN;
+
+enum FrameTimelineEvent_PredictionType : int32_t {
+  FrameTimelineEvent_PredictionType_PREDICTION_UNSPECIFIED = 0,
+  FrameTimelineEvent_PredictionType_PREDICTION_VALID = 1,
+  FrameTimelineEvent_PredictionType_PREDICTION_EXPIRED = 2,
+  FrameTimelineEvent_PredictionType_PREDICTION_UNKNOWN = 3,
+};
+
+const FrameTimelineEvent_PredictionType FrameTimelineEvent_PredictionType_MIN = FrameTimelineEvent_PredictionType_PREDICTION_UNSPECIFIED;
+const FrameTimelineEvent_PredictionType FrameTimelineEvent_PredictionType_MAX = FrameTimelineEvent_PredictionType_PREDICTION_UNKNOWN;
+
+class FrameTimelineEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  FrameTimelineEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit FrameTimelineEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit FrameTimelineEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_expected_display_frame_start() const { return at<1>().valid(); }
+  ::protozero::ConstBytes expected_display_frame_start() const { return at<1>().as_bytes(); }
+  bool has_actual_display_frame_start() const { return at<2>().valid(); }
+  ::protozero::ConstBytes actual_display_frame_start() const { return at<2>().as_bytes(); }
+  bool has_expected_surface_frame_start() const { return at<3>().valid(); }
+  ::protozero::ConstBytes expected_surface_frame_start() const { return at<3>().as_bytes(); }
+  bool has_actual_surface_frame_start() const { return at<4>().valid(); }
+  ::protozero::ConstBytes actual_surface_frame_start() const { return at<4>().as_bytes(); }
+  bool has_frame_end() const { return at<5>().valid(); }
+  ::protozero::ConstBytes frame_end() const { return at<5>().as_bytes(); }
+};
+
+class FrameTimelineEvent : public ::protozero::Message {
+ public:
+  using Decoder = FrameTimelineEvent_Decoder;
+  enum : int32_t {
+    kExpectedDisplayFrameStartFieldNumber = 1,
+    kActualDisplayFrameStartFieldNumber = 2,
+    kExpectedSurfaceFrameStartFieldNumber = 3,
+    kActualSurfaceFrameStartFieldNumber = 4,
+    kFrameEndFieldNumber = 5,
+  };
+  using ExpectedSurfaceFrameStart = ::perfetto::protos::pbzero::FrameTimelineEvent_ExpectedSurfaceFrameStart;
+  using ActualSurfaceFrameStart = ::perfetto::protos::pbzero::FrameTimelineEvent_ActualSurfaceFrameStart;
+  using ExpectedDisplayFrameStart = ::perfetto::protos::pbzero::FrameTimelineEvent_ExpectedDisplayFrameStart;
+  using ActualDisplayFrameStart = ::perfetto::protos::pbzero::FrameTimelineEvent_ActualDisplayFrameStart;
+  using FrameEnd = ::perfetto::protos::pbzero::FrameTimelineEvent_FrameEnd;
+  using JankType = ::perfetto::protos::pbzero::FrameTimelineEvent_JankType;
+  using PresentType = ::perfetto::protos::pbzero::FrameTimelineEvent_PresentType;
+  using PredictionType = ::perfetto::protos::pbzero::FrameTimelineEvent_PredictionType;
+  static const JankType JANK_UNSPECIFIED = FrameTimelineEvent_JankType_JANK_UNSPECIFIED;
+  static const JankType JANK_NONE = FrameTimelineEvent_JankType_JANK_NONE;
+  static const JankType JANK_SF_SCHEDULING = FrameTimelineEvent_JankType_JANK_SF_SCHEDULING;
+  static const JankType JANK_PREDICTION_ERROR = FrameTimelineEvent_JankType_JANK_PREDICTION_ERROR;
+  static const JankType JANK_DISPLAY_HAL = FrameTimelineEvent_JankType_JANK_DISPLAY_HAL;
+  static const JankType JANK_SF_CPU_DEADLINE_MISSED = FrameTimelineEvent_JankType_JANK_SF_CPU_DEADLINE_MISSED;
+  static const JankType JANK_SF_GPU_DEADLINE_MISSED = FrameTimelineEvent_JankType_JANK_SF_GPU_DEADLINE_MISSED;
+  static const JankType JANK_APP_DEADLINE_MISSED = FrameTimelineEvent_JankType_JANK_APP_DEADLINE_MISSED;
+  static const JankType JANK_BUFFER_STUFFING = FrameTimelineEvent_JankType_JANK_BUFFER_STUFFING;
+  static const JankType JANK_UNKNOWN = FrameTimelineEvent_JankType_JANK_UNKNOWN;
+  static const JankType JANK_SF_STUFFING = FrameTimelineEvent_JankType_JANK_SF_STUFFING;
+  static const PresentType PRESENT_UNSPECIFIED = FrameTimelineEvent_PresentType_PRESENT_UNSPECIFIED;
+  static const PresentType PRESENT_ON_TIME = FrameTimelineEvent_PresentType_PRESENT_ON_TIME;
+  static const PresentType PRESENT_LATE = FrameTimelineEvent_PresentType_PRESENT_LATE;
+  static const PresentType PRESENT_EARLY = FrameTimelineEvent_PresentType_PRESENT_EARLY;
+  static const PresentType PRESENT_DROPPED = FrameTimelineEvent_PresentType_PRESENT_DROPPED;
+  static const PresentType PRESENT_UNKNOWN = FrameTimelineEvent_PresentType_PRESENT_UNKNOWN;
+  static const PredictionType PREDICTION_UNSPECIFIED = FrameTimelineEvent_PredictionType_PREDICTION_UNSPECIFIED;
+  static const PredictionType PREDICTION_VALID = FrameTimelineEvent_PredictionType_PREDICTION_VALID;
+  static const PredictionType PREDICTION_EXPIRED = FrameTimelineEvent_PredictionType_PREDICTION_EXPIRED;
+  static const PredictionType PREDICTION_UNKNOWN = FrameTimelineEvent_PredictionType_PREDICTION_UNKNOWN;
+
+  using FieldMetadata_ExpectedDisplayFrameStart =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      FrameTimelineEvent_ExpectedDisplayFrameStart,
+      FrameTimelineEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ExpectedDisplayFrameStart kExpectedDisplayFrameStart() { return {}; }
+  template <typename T = FrameTimelineEvent_ExpectedDisplayFrameStart> T* set_expected_display_frame_start() {
+    return BeginNestedMessage<T>(1);
+  }
+
+
+  using FieldMetadata_ActualDisplayFrameStart =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      FrameTimelineEvent_ActualDisplayFrameStart,
+      FrameTimelineEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ActualDisplayFrameStart kActualDisplayFrameStart() { return {}; }
+  template <typename T = FrameTimelineEvent_ActualDisplayFrameStart> T* set_actual_display_frame_start() {
+    return BeginNestedMessage<T>(2);
+  }
+
+
+  using FieldMetadata_ExpectedSurfaceFrameStart =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      FrameTimelineEvent_ExpectedSurfaceFrameStart,
+      FrameTimelineEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ExpectedSurfaceFrameStart kExpectedSurfaceFrameStart() { return {}; }
+  template <typename T = FrameTimelineEvent_ExpectedSurfaceFrameStart> T* set_expected_surface_frame_start() {
+    return BeginNestedMessage<T>(3);
+  }
+
+
+  using FieldMetadata_ActualSurfaceFrameStart =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      FrameTimelineEvent_ActualSurfaceFrameStart,
+      FrameTimelineEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ActualSurfaceFrameStart kActualSurfaceFrameStart() { return {}; }
+  template <typename T = FrameTimelineEvent_ActualSurfaceFrameStart> T* set_actual_surface_frame_start() {
+    return BeginNestedMessage<T>(4);
+  }
+
+
+  using FieldMetadata_FrameEnd =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      FrameTimelineEvent_FrameEnd,
+      FrameTimelineEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FrameEnd kFrameEnd() { return {}; }
+  template <typename T = FrameTimelineEvent_FrameEnd> T* set_frame_end() {
+    return BeginNestedMessage<T>(5);
+  }
+
+};
+
+class FrameTimelineEvent_FrameEnd_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  FrameTimelineEvent_FrameEnd_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit FrameTimelineEvent_FrameEnd_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit FrameTimelineEvent_FrameEnd_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_cookie() const { return at<1>().valid(); }
+  int64_t cookie() const { return at<1>().as_int64(); }
+};
+
+class FrameTimelineEvent_FrameEnd : public ::protozero::Message {
+ public:
+  using Decoder = FrameTimelineEvent_FrameEnd_Decoder;
+  enum : int32_t {
+    kCookieFieldNumber = 1,
+  };
+
+  using FieldMetadata_Cookie =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      FrameTimelineEvent_FrameEnd>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Cookie kCookie() { return {}; }
+  void set_cookie(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Cookie::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class FrameTimelineEvent_ActualDisplayFrameStart_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/8, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  FrameTimelineEvent_ActualDisplayFrameStart_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit FrameTimelineEvent_ActualDisplayFrameStart_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit FrameTimelineEvent_ActualDisplayFrameStart_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_cookie() const { return at<1>().valid(); }
+  int64_t cookie() const { return at<1>().as_int64(); }
+  bool has_token() const { return at<2>().valid(); }
+  int64_t token() const { return at<2>().as_int64(); }
+  bool has_pid() const { return at<3>().valid(); }
+  int32_t pid() const { return at<3>().as_int32(); }
+  bool has_present_type() const { return at<4>().valid(); }
+  int32_t present_type() const { return at<4>().as_int32(); }
+  bool has_on_time_finish() const { return at<5>().valid(); }
+  bool on_time_finish() const { return at<5>().as_bool(); }
+  bool has_gpu_composition() const { return at<6>().valid(); }
+  bool gpu_composition() const { return at<6>().as_bool(); }
+  bool has_jank_type() const { return at<7>().valid(); }
+  int32_t jank_type() const { return at<7>().as_int32(); }
+  bool has_prediction_type() const { return at<8>().valid(); }
+  int32_t prediction_type() const { return at<8>().as_int32(); }
+};
+
+class FrameTimelineEvent_ActualDisplayFrameStart : public ::protozero::Message {
+ public:
+  using Decoder = FrameTimelineEvent_ActualDisplayFrameStart_Decoder;
+  enum : int32_t {
+    kCookieFieldNumber = 1,
+    kTokenFieldNumber = 2,
+    kPidFieldNumber = 3,
+    kPresentTypeFieldNumber = 4,
+    kOnTimeFinishFieldNumber = 5,
+    kGpuCompositionFieldNumber = 6,
+    kJankTypeFieldNumber = 7,
+    kPredictionTypeFieldNumber = 8,
+  };
+
+  using FieldMetadata_Cookie =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      FrameTimelineEvent_ActualDisplayFrameStart>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Cookie kCookie() { return {}; }
+  void set_cookie(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Cookie::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Token =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      FrameTimelineEvent_ActualDisplayFrameStart>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Token kToken() { return {}; }
+  void set_token(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Token::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pid =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      FrameTimelineEvent_ActualDisplayFrameStart>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pid kPid() { return {}; }
+  void set_pid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PresentType =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::FrameTimelineEvent_PresentType,
+      FrameTimelineEvent_ActualDisplayFrameStart>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PresentType kPresentType() { return {}; }
+  void set_present_type(::perfetto::protos::pbzero::FrameTimelineEvent_PresentType value) {
+    static constexpr uint32_t field_id = FieldMetadata_PresentType::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_OnTimeFinish =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      FrameTimelineEvent_ActualDisplayFrameStart>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_OnTimeFinish kOnTimeFinish() { return {}; }
+  void set_on_time_finish(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_OnTimeFinish::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_GpuComposition =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      FrameTimelineEvent_ActualDisplayFrameStart>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_GpuComposition kGpuComposition() { return {}; }
+  void set_gpu_composition(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_GpuComposition::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_JankType =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      FrameTimelineEvent_ActualDisplayFrameStart>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_JankType kJankType() { return {}; }
+  void set_jank_type(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_JankType::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PredictionType =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::FrameTimelineEvent_PredictionType,
+      FrameTimelineEvent_ActualDisplayFrameStart>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PredictionType kPredictionType() { return {}; }
+  void set_prediction_type(::perfetto::protos::pbzero::FrameTimelineEvent_PredictionType value) {
+    static constexpr uint32_t field_id = FieldMetadata_PredictionType::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class FrameTimelineEvent_ExpectedDisplayFrameStart_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  FrameTimelineEvent_ExpectedDisplayFrameStart_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit FrameTimelineEvent_ExpectedDisplayFrameStart_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit FrameTimelineEvent_ExpectedDisplayFrameStart_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_cookie() const { return at<1>().valid(); }
+  int64_t cookie() const { return at<1>().as_int64(); }
+  bool has_token() const { return at<2>().valid(); }
+  int64_t token() const { return at<2>().as_int64(); }
+  bool has_pid() const { return at<3>().valid(); }
+  int32_t pid() const { return at<3>().as_int32(); }
+};
+
+class FrameTimelineEvent_ExpectedDisplayFrameStart : public ::protozero::Message {
+ public:
+  using Decoder = FrameTimelineEvent_ExpectedDisplayFrameStart_Decoder;
+  enum : int32_t {
+    kCookieFieldNumber = 1,
+    kTokenFieldNumber = 2,
+    kPidFieldNumber = 3,
+  };
+
+  using FieldMetadata_Cookie =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      FrameTimelineEvent_ExpectedDisplayFrameStart>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Cookie kCookie() { return {}; }
+  void set_cookie(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Cookie::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Token =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      FrameTimelineEvent_ExpectedDisplayFrameStart>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Token kToken() { return {}; }
+  void set_token(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Token::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pid =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      FrameTimelineEvent_ExpectedDisplayFrameStart>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pid kPid() { return {}; }
+  void set_pid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class FrameTimelineEvent_ActualSurfaceFrameStart_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/11, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  FrameTimelineEvent_ActualSurfaceFrameStart_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit FrameTimelineEvent_ActualSurfaceFrameStart_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit FrameTimelineEvent_ActualSurfaceFrameStart_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_cookie() const { return at<1>().valid(); }
+  int64_t cookie() const { return at<1>().as_int64(); }
+  bool has_token() const { return at<2>().valid(); }
+  int64_t token() const { return at<2>().as_int64(); }
+  bool has_display_frame_token() const { return at<3>().valid(); }
+  int64_t display_frame_token() const { return at<3>().as_int64(); }
+  bool has_pid() const { return at<4>().valid(); }
+  int32_t pid() const { return at<4>().as_int32(); }
+  bool has_layer_name() const { return at<5>().valid(); }
+  ::protozero::ConstChars layer_name() const { return at<5>().as_string(); }
+  bool has_present_type() const { return at<6>().valid(); }
+  int32_t present_type() const { return at<6>().as_int32(); }
+  bool has_on_time_finish() const { return at<7>().valid(); }
+  bool on_time_finish() const { return at<7>().as_bool(); }
+  bool has_gpu_composition() const { return at<8>().valid(); }
+  bool gpu_composition() const { return at<8>().as_bool(); }
+  bool has_jank_type() const { return at<9>().valid(); }
+  int32_t jank_type() const { return at<9>().as_int32(); }
+  bool has_prediction_type() const { return at<10>().valid(); }
+  int32_t prediction_type() const { return at<10>().as_int32(); }
+  bool has_is_buffer() const { return at<11>().valid(); }
+  bool is_buffer() const { return at<11>().as_bool(); }
+};
+
+class FrameTimelineEvent_ActualSurfaceFrameStart : public ::protozero::Message {
+ public:
+  using Decoder = FrameTimelineEvent_ActualSurfaceFrameStart_Decoder;
+  enum : int32_t {
+    kCookieFieldNumber = 1,
+    kTokenFieldNumber = 2,
+    kDisplayFrameTokenFieldNumber = 3,
+    kPidFieldNumber = 4,
+    kLayerNameFieldNumber = 5,
+    kPresentTypeFieldNumber = 6,
+    kOnTimeFinishFieldNumber = 7,
+    kGpuCompositionFieldNumber = 8,
+    kJankTypeFieldNumber = 9,
+    kPredictionTypeFieldNumber = 10,
+    kIsBufferFieldNumber = 11,
+  };
+
+  using FieldMetadata_Cookie =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      FrameTimelineEvent_ActualSurfaceFrameStart>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Cookie kCookie() { return {}; }
+  void set_cookie(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Cookie::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Token =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      FrameTimelineEvent_ActualSurfaceFrameStart>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Token kToken() { return {}; }
+  void set_token(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Token::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DisplayFrameToken =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      FrameTimelineEvent_ActualSurfaceFrameStart>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DisplayFrameToken kDisplayFrameToken() { return {}; }
+  void set_display_frame_token(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DisplayFrameToken::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pid =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      FrameTimelineEvent_ActualSurfaceFrameStart>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pid kPid() { return {}; }
+  void set_pid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_LayerName =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      FrameTimelineEvent_ActualSurfaceFrameStart>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_LayerName kLayerName() { return {}; }
+  void set_layer_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_LayerName::kFieldId, data, size);
+  }
+  void set_layer_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_LayerName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PresentType =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::FrameTimelineEvent_PresentType,
+      FrameTimelineEvent_ActualSurfaceFrameStart>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PresentType kPresentType() { return {}; }
+  void set_present_type(::perfetto::protos::pbzero::FrameTimelineEvent_PresentType value) {
+    static constexpr uint32_t field_id = FieldMetadata_PresentType::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_OnTimeFinish =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      FrameTimelineEvent_ActualSurfaceFrameStart>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_OnTimeFinish kOnTimeFinish() { return {}; }
+  void set_on_time_finish(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_OnTimeFinish::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_GpuComposition =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      FrameTimelineEvent_ActualSurfaceFrameStart>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_GpuComposition kGpuComposition() { return {}; }
+  void set_gpu_composition(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_GpuComposition::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_JankType =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      FrameTimelineEvent_ActualSurfaceFrameStart>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_JankType kJankType() { return {}; }
+  void set_jank_type(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_JankType::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PredictionType =
+    ::protozero::proto_utils::FieldMetadata<
+      10,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::FrameTimelineEvent_PredictionType,
+      FrameTimelineEvent_ActualSurfaceFrameStart>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PredictionType kPredictionType() { return {}; }
+  void set_prediction_type(::perfetto::protos::pbzero::FrameTimelineEvent_PredictionType value) {
+    static constexpr uint32_t field_id = FieldMetadata_PredictionType::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_IsBuffer =
+    ::protozero::proto_utils::FieldMetadata<
+      11,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      FrameTimelineEvent_ActualSurfaceFrameStart>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IsBuffer kIsBuffer() { return {}; }
+  void set_is_buffer(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_IsBuffer::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class FrameTimelineEvent_ExpectedSurfaceFrameStart_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  FrameTimelineEvent_ExpectedSurfaceFrameStart_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit FrameTimelineEvent_ExpectedSurfaceFrameStart_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit FrameTimelineEvent_ExpectedSurfaceFrameStart_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_cookie() const { return at<1>().valid(); }
+  int64_t cookie() const { return at<1>().as_int64(); }
+  bool has_token() const { return at<2>().valid(); }
+  int64_t token() const { return at<2>().as_int64(); }
+  bool has_display_frame_token() const { return at<3>().valid(); }
+  int64_t display_frame_token() const { return at<3>().as_int64(); }
+  bool has_pid() const { return at<4>().valid(); }
+  int32_t pid() const { return at<4>().as_int32(); }
+  bool has_layer_name() const { return at<5>().valid(); }
+  ::protozero::ConstChars layer_name() const { return at<5>().as_string(); }
+};
+
+class FrameTimelineEvent_ExpectedSurfaceFrameStart : public ::protozero::Message {
+ public:
+  using Decoder = FrameTimelineEvent_ExpectedSurfaceFrameStart_Decoder;
+  enum : int32_t {
+    kCookieFieldNumber = 1,
+    kTokenFieldNumber = 2,
+    kDisplayFrameTokenFieldNumber = 3,
+    kPidFieldNumber = 4,
+    kLayerNameFieldNumber = 5,
+  };
+
+  using FieldMetadata_Cookie =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      FrameTimelineEvent_ExpectedSurfaceFrameStart>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Cookie kCookie() { return {}; }
+  void set_cookie(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Cookie::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Token =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      FrameTimelineEvent_ExpectedSurfaceFrameStart>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Token kToken() { return {}; }
+  void set_token(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Token::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DisplayFrameToken =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      FrameTimelineEvent_ExpectedSurfaceFrameStart>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DisplayFrameToken kDisplayFrameToken() { return {}; }
+  void set_display_frame_token(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DisplayFrameToken::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pid =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      FrameTimelineEvent_ExpectedSurfaceFrameStart>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pid kPid() { return {}; }
+  void set_pid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_LayerName =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      FrameTimelineEvent_ExpectedSurfaceFrameStart>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_LayerName kLayerName() { return {}; }
+  void set_layer_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_LayerName::kFieldId, data, size);
+  }
+  void set_layer_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_LayerName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/android/gpu_mem_event.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_ANDROID_GPU_MEM_EVENT_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_ANDROID_GPU_MEM_EVENT_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class GpuMemTotalEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  GpuMemTotalEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit GpuMemTotalEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit GpuMemTotalEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_gpu_id() const { return at<1>().valid(); }
+  uint32_t gpu_id() const { return at<1>().as_uint32(); }
+  bool has_pid() const { return at<2>().valid(); }
+  uint32_t pid() const { return at<2>().as_uint32(); }
+  bool has_size() const { return at<3>().valid(); }
+  uint64_t size() const { return at<3>().as_uint64(); }
+};
+
+class GpuMemTotalEvent : public ::protozero::Message {
+ public:
+  using Decoder = GpuMemTotalEvent_Decoder;
+  enum : int32_t {
+    kGpuIdFieldNumber = 1,
+    kPidFieldNumber = 2,
+    kSizeFieldNumber = 3,
+  };
+
+  using FieldMetadata_GpuId =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      GpuMemTotalEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_GpuId kGpuId() { return {}; }
+  void set_gpu_id(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_GpuId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pid =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      GpuMemTotalEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pid kPid() { return {}; }
+  void set_pid(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Size =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      GpuMemTotalEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Size kSize() { return {}; }
+  void set_size(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Size::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/android/graphics_frame_event.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_ANDROID_GRAPHICS_FRAME_EVENT_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_ANDROID_GRAPHICS_FRAME_EVENT_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class GraphicsFrameEvent_BufferEvent;
+enum GraphicsFrameEvent_BufferEventType : int32_t;
+
+enum GraphicsFrameEvent_BufferEventType : int32_t {
+  GraphicsFrameEvent_BufferEventType_UNSPECIFIED = 0,
+  GraphicsFrameEvent_BufferEventType_DEQUEUE = 1,
+  GraphicsFrameEvent_BufferEventType_QUEUE = 2,
+  GraphicsFrameEvent_BufferEventType_POST = 3,
+  GraphicsFrameEvent_BufferEventType_ACQUIRE_FENCE = 4,
+  GraphicsFrameEvent_BufferEventType_LATCH = 5,
+  GraphicsFrameEvent_BufferEventType_HWC_COMPOSITION_QUEUED = 6,
+  GraphicsFrameEvent_BufferEventType_FALLBACK_COMPOSITION = 7,
+  GraphicsFrameEvent_BufferEventType_PRESENT_FENCE = 8,
+  GraphicsFrameEvent_BufferEventType_RELEASE_FENCE = 9,
+  GraphicsFrameEvent_BufferEventType_MODIFY = 10,
+  GraphicsFrameEvent_BufferEventType_DETACH = 11,
+  GraphicsFrameEvent_BufferEventType_ATTACH = 12,
+  GraphicsFrameEvent_BufferEventType_CANCEL = 13,
+};
+
+const GraphicsFrameEvent_BufferEventType GraphicsFrameEvent_BufferEventType_MIN = GraphicsFrameEvent_BufferEventType_UNSPECIFIED;
+const GraphicsFrameEvent_BufferEventType GraphicsFrameEvent_BufferEventType_MAX = GraphicsFrameEvent_BufferEventType_CANCEL;
+
+class GraphicsFrameEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  GraphicsFrameEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit GraphicsFrameEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit GraphicsFrameEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_buffer_event() const { return at<1>().valid(); }
+  ::protozero::ConstBytes buffer_event() const { return at<1>().as_bytes(); }
+};
+
+class GraphicsFrameEvent : public ::protozero::Message {
+ public:
+  using Decoder = GraphicsFrameEvent_Decoder;
+  enum : int32_t {
+    kBufferEventFieldNumber = 1,
+  };
+  using BufferEvent = ::perfetto::protos::pbzero::GraphicsFrameEvent_BufferEvent;
+  using BufferEventType = ::perfetto::protos::pbzero::GraphicsFrameEvent_BufferEventType;
+  static const BufferEventType UNSPECIFIED = GraphicsFrameEvent_BufferEventType_UNSPECIFIED;
+  static const BufferEventType DEQUEUE = GraphicsFrameEvent_BufferEventType_DEQUEUE;
+  static const BufferEventType QUEUE = GraphicsFrameEvent_BufferEventType_QUEUE;
+  static const BufferEventType POST = GraphicsFrameEvent_BufferEventType_POST;
+  static const BufferEventType ACQUIRE_FENCE = GraphicsFrameEvent_BufferEventType_ACQUIRE_FENCE;
+  static const BufferEventType LATCH = GraphicsFrameEvent_BufferEventType_LATCH;
+  static const BufferEventType HWC_COMPOSITION_QUEUED = GraphicsFrameEvent_BufferEventType_HWC_COMPOSITION_QUEUED;
+  static const BufferEventType FALLBACK_COMPOSITION = GraphicsFrameEvent_BufferEventType_FALLBACK_COMPOSITION;
+  static const BufferEventType PRESENT_FENCE = GraphicsFrameEvent_BufferEventType_PRESENT_FENCE;
+  static const BufferEventType RELEASE_FENCE = GraphicsFrameEvent_BufferEventType_RELEASE_FENCE;
+  static const BufferEventType MODIFY = GraphicsFrameEvent_BufferEventType_MODIFY;
+  static const BufferEventType DETACH = GraphicsFrameEvent_BufferEventType_DETACH;
+  static const BufferEventType ATTACH = GraphicsFrameEvent_BufferEventType_ATTACH;
+  static const BufferEventType CANCEL = GraphicsFrameEvent_BufferEventType_CANCEL;
+
+  using FieldMetadata_BufferEvent =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      GraphicsFrameEvent_BufferEvent,
+      GraphicsFrameEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BufferEvent kBufferEvent() { return {}; }
+  template <typename T = GraphicsFrameEvent_BufferEvent> T* set_buffer_event() {
+    return BeginNestedMessage<T>(1);
+  }
+
+};
+
+class GraphicsFrameEvent_BufferEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  GraphicsFrameEvent_BufferEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit GraphicsFrameEvent_BufferEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit GraphicsFrameEvent_BufferEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_frame_number() const { return at<1>().valid(); }
+  uint32_t frame_number() const { return at<1>().as_uint32(); }
+  bool has_type() const { return at<2>().valid(); }
+  int32_t type() const { return at<2>().as_int32(); }
+  bool has_layer_name() const { return at<3>().valid(); }
+  ::protozero::ConstChars layer_name() const { return at<3>().as_string(); }
+  bool has_duration_ns() const { return at<4>().valid(); }
+  uint64_t duration_ns() const { return at<4>().as_uint64(); }
+  bool has_buffer_id() const { return at<5>().valid(); }
+  uint32_t buffer_id() const { return at<5>().as_uint32(); }
+};
+
+class GraphicsFrameEvent_BufferEvent : public ::protozero::Message {
+ public:
+  using Decoder = GraphicsFrameEvent_BufferEvent_Decoder;
+  enum : int32_t {
+    kFrameNumberFieldNumber = 1,
+    kTypeFieldNumber = 2,
+    kLayerNameFieldNumber = 3,
+    kDurationNsFieldNumber = 4,
+    kBufferIdFieldNumber = 5,
+  };
+
+  using FieldMetadata_FrameNumber =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      GraphicsFrameEvent_BufferEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FrameNumber kFrameNumber() { return {}; }
+  void set_frame_number(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_FrameNumber::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Type =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::GraphicsFrameEvent_BufferEventType,
+      GraphicsFrameEvent_BufferEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Type kType() { return {}; }
+  void set_type(::perfetto::protos::pbzero::GraphicsFrameEvent_BufferEventType value) {
+    static constexpr uint32_t field_id = FieldMetadata_Type::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_LayerName =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      GraphicsFrameEvent_BufferEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_LayerName kLayerName() { return {}; }
+  void set_layer_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_LayerName::kFieldId, data, size);
+  }
+  void set_layer_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_LayerName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DurationNs =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      GraphicsFrameEvent_BufferEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DurationNs kDurationNs() { return {}; }
+  void set_duration_ns(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DurationNs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BufferId =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      GraphicsFrameEvent_BufferEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BufferId kBufferId() { return {}; }
+  void set_buffer_id(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_BufferId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/android/initial_display_state.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_ANDROID_INITIAL_DISPLAY_STATE_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_ANDROID_INITIAL_DISPLAY_STATE_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class InitialDisplayState_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  InitialDisplayState_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit InitialDisplayState_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit InitialDisplayState_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_display_state() const { return at<1>().valid(); }
+  int32_t display_state() const { return at<1>().as_int32(); }
+  bool has_brightness() const { return at<2>().valid(); }
+  double brightness() const { return at<2>().as_double(); }
+};
+
+class InitialDisplayState : public ::protozero::Message {
+ public:
+  using Decoder = InitialDisplayState_Decoder;
+  enum : int32_t {
+    kDisplayStateFieldNumber = 1,
+    kBrightnessFieldNumber = 2,
+  };
+
+  using FieldMetadata_DisplayState =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      InitialDisplayState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DisplayState kDisplayState() { return {}; }
+  void set_display_state(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DisplayState::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Brightness =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kDouble,
+      double,
+      InitialDisplayState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Brightness kBrightness() { return {}; }
+  void set_brightness(double value) {
+    static constexpr uint32_t field_id = FieldMetadata_Brightness::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kDouble>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/android/packages_list.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_ANDROID_PACKAGES_LIST_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_ANDROID_PACKAGES_LIST_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class PackagesList_PackageInfo;
+
+class PackagesList_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  PackagesList_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit PackagesList_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit PackagesList_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_packages() const { return at<1>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> packages() const { return GetRepeated<::protozero::ConstBytes>(1); }
+  bool has_parse_error() const { return at<2>().valid(); }
+  bool parse_error() const { return at<2>().as_bool(); }
+  bool has_read_error() const { return at<3>().valid(); }
+  bool read_error() const { return at<3>().as_bool(); }
+};
+
+class PackagesList : public ::protozero::Message {
+ public:
+  using Decoder = PackagesList_Decoder;
+  enum : int32_t {
+    kPackagesFieldNumber = 1,
+    kParseErrorFieldNumber = 2,
+    kReadErrorFieldNumber = 3,
+  };
+  using PackageInfo = ::perfetto::protos::pbzero::PackagesList_PackageInfo;
+
+  using FieldMetadata_Packages =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      PackagesList_PackageInfo,
+      PackagesList>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Packages kPackages() { return {}; }
+  template <typename T = PackagesList_PackageInfo> T* add_packages() {
+    return BeginNestedMessage<T>(1);
+  }
+
+
+  using FieldMetadata_ParseError =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      PackagesList>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ParseError kParseError() { return {}; }
+  void set_parse_error(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_ParseError::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ReadError =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      PackagesList>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ReadError kReadError() { return {}; }
+  void set_read_error(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_ReadError::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class PackagesList_PackageInfo_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  PackagesList_PackageInfo_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit PackagesList_PackageInfo_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit PackagesList_PackageInfo_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars name() const { return at<1>().as_string(); }
+  bool has_uid() const { return at<2>().valid(); }
+  uint64_t uid() const { return at<2>().as_uint64(); }
+  bool has_debuggable() const { return at<3>().valid(); }
+  bool debuggable() const { return at<3>().as_bool(); }
+  bool has_profileable_from_shell() const { return at<4>().valid(); }
+  bool profileable_from_shell() const { return at<4>().as_bool(); }
+  bool has_version_code() const { return at<5>().valid(); }
+  int64_t version_code() const { return at<5>().as_int64(); }
+};
+
+class PackagesList_PackageInfo : public ::protozero::Message {
+ public:
+  using Decoder = PackagesList_PackageInfo_Decoder;
+  enum : int32_t {
+    kNameFieldNumber = 1,
+    kUidFieldNumber = 2,
+    kDebuggableFieldNumber = 3,
+    kProfileableFromShellFieldNumber = 4,
+    kVersionCodeFieldNumber = 5,
+  };
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      PackagesList_PackageInfo>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Uid =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      PackagesList_PackageInfo>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Uid kUid() { return {}; }
+  void set_uid(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Uid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Debuggable =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      PackagesList_PackageInfo>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Debuggable kDebuggable() { return {}; }
+  void set_debuggable(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_Debuggable::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ProfileableFromShell =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      PackagesList_PackageInfo>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ProfileableFromShell kProfileableFromShell() { return {}; }
+  void set_profileable_from_shell(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_ProfileableFromShell::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_VersionCode =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      PackagesList_PackageInfo>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_VersionCode kVersionCode() { return {}; }
+  void set_version_code(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_VersionCode::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/chrome/chrome_benchmark_metadata.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_CHROME_CHROME_BENCHMARK_METADATA_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_CHROME_CHROME_BENCHMARK_METADATA_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class ChromeBenchmarkMetadata_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/9, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  ChromeBenchmarkMetadata_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ChromeBenchmarkMetadata_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ChromeBenchmarkMetadata_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_benchmark_start_time_us() const { return at<1>().valid(); }
+  int64_t benchmark_start_time_us() const { return at<1>().as_int64(); }
+  bool has_story_run_time_us() const { return at<2>().valid(); }
+  int64_t story_run_time_us() const { return at<2>().as_int64(); }
+  bool has_benchmark_name() const { return at<3>().valid(); }
+  ::protozero::ConstChars benchmark_name() const { return at<3>().as_string(); }
+  bool has_benchmark_description() const { return at<4>().valid(); }
+  ::protozero::ConstChars benchmark_description() const { return at<4>().as_string(); }
+  bool has_label() const { return at<5>().valid(); }
+  ::protozero::ConstChars label() const { return at<5>().as_string(); }
+  bool has_story_name() const { return at<6>().valid(); }
+  ::protozero::ConstChars story_name() const { return at<6>().as_string(); }
+  bool has_story_tags() const { return at<7>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstChars> story_tags() const { return GetRepeated<::protozero::ConstChars>(7); }
+  bool has_story_run_index() const { return at<8>().valid(); }
+  int32_t story_run_index() const { return at<8>().as_int32(); }
+  bool has_had_failures() const { return at<9>().valid(); }
+  bool had_failures() const { return at<9>().as_bool(); }
+};
+
+class ChromeBenchmarkMetadata : public ::protozero::Message {
+ public:
+  using Decoder = ChromeBenchmarkMetadata_Decoder;
+  enum : int32_t {
+    kBenchmarkStartTimeUsFieldNumber = 1,
+    kStoryRunTimeUsFieldNumber = 2,
+    kBenchmarkNameFieldNumber = 3,
+    kBenchmarkDescriptionFieldNumber = 4,
+    kLabelFieldNumber = 5,
+    kStoryNameFieldNumber = 6,
+    kStoryTagsFieldNumber = 7,
+    kStoryRunIndexFieldNumber = 8,
+    kHadFailuresFieldNumber = 9,
+  };
+
+  using FieldMetadata_BenchmarkStartTimeUs =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      ChromeBenchmarkMetadata>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BenchmarkStartTimeUs kBenchmarkStartTimeUs() { return {}; }
+  void set_benchmark_start_time_us(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_BenchmarkStartTimeUs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_StoryRunTimeUs =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      ChromeBenchmarkMetadata>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_StoryRunTimeUs kStoryRunTimeUs() { return {}; }
+  void set_story_run_time_us(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_StoryRunTimeUs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BenchmarkName =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ChromeBenchmarkMetadata>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BenchmarkName kBenchmarkName() { return {}; }
+  void set_benchmark_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_BenchmarkName::kFieldId, data, size);
+  }
+  void set_benchmark_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_BenchmarkName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BenchmarkDescription =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ChromeBenchmarkMetadata>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BenchmarkDescription kBenchmarkDescription() { return {}; }
+  void set_benchmark_description(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_BenchmarkDescription::kFieldId, data, size);
+  }
+  void set_benchmark_description(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_BenchmarkDescription::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Label =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ChromeBenchmarkMetadata>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Label kLabel() { return {}; }
+  void set_label(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Label::kFieldId, data, size);
+  }
+  void set_label(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Label::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_StoryName =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ChromeBenchmarkMetadata>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_StoryName kStoryName() { return {}; }
+  void set_story_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_StoryName::kFieldId, data, size);
+  }
+  void set_story_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_StoryName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_StoryTags =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ChromeBenchmarkMetadata>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_StoryTags kStoryTags() { return {}; }
+  void add_story_tags(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_StoryTags::kFieldId, data, size);
+  }
+  void add_story_tags(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_StoryTags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_StoryRunIndex =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      ChromeBenchmarkMetadata>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_StoryRunIndex kStoryRunIndex() { return {}; }
+  void set_story_run_index(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_StoryRunIndex::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_HadFailures =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeBenchmarkMetadata>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_HadFailures kHadFailures() { return {}; }
+  void set_had_failures(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_HadFailures::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/chrome/chrome_metadata.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_CHROME_CHROME_METADATA_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_CHROME_CHROME_METADATA_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class BackgroundTracingMetadata;
+class BackgroundTracingMetadata_TriggerRule;
+class BackgroundTracingMetadata_TriggerRule_HistogramRule;
+class BackgroundTracingMetadata_TriggerRule_NamedRule;
+enum BackgroundTracingMetadata_TriggerRule_NamedRule_EventType : int32_t;
+enum BackgroundTracingMetadata_TriggerRule_TriggerType : int32_t;
+
+enum BackgroundTracingMetadata_TriggerRule_TriggerType : int32_t {
+  BackgroundTracingMetadata_TriggerRule_TriggerType_TRIGGER_UNSPECIFIED = 0,
+  BackgroundTracingMetadata_TriggerRule_TriggerType_MONITOR_AND_DUMP_WHEN_SPECIFIC_HISTOGRAM_AND_VALUE = 1,
+  BackgroundTracingMetadata_TriggerRule_TriggerType_MONITOR_AND_DUMP_WHEN_TRIGGER_NAMED = 2,
+};
+
+const BackgroundTracingMetadata_TriggerRule_TriggerType BackgroundTracingMetadata_TriggerRule_TriggerType_MIN = BackgroundTracingMetadata_TriggerRule_TriggerType_TRIGGER_UNSPECIFIED;
+const BackgroundTracingMetadata_TriggerRule_TriggerType BackgroundTracingMetadata_TriggerRule_TriggerType_MAX = BackgroundTracingMetadata_TriggerRule_TriggerType_MONITOR_AND_DUMP_WHEN_TRIGGER_NAMED;
+
+enum BackgroundTracingMetadata_TriggerRule_NamedRule_EventType : int32_t {
+  BackgroundTracingMetadata_TriggerRule_NamedRule_EventType_UNSPECIFIED = 0,
+  BackgroundTracingMetadata_TriggerRule_NamedRule_EventType_SESSION_RESTORE = 1,
+  BackgroundTracingMetadata_TriggerRule_NamedRule_EventType_NAVIGATION = 2,
+  BackgroundTracingMetadata_TriggerRule_NamedRule_EventType_STARTUP = 3,
+  BackgroundTracingMetadata_TriggerRule_NamedRule_EventType_REACHED_CODE = 4,
+  BackgroundTracingMetadata_TriggerRule_NamedRule_EventType_CONTENT_TRIGGER = 5,
+  BackgroundTracingMetadata_TriggerRule_NamedRule_EventType_TEST_RULE = 1000,
+};
+
+const BackgroundTracingMetadata_TriggerRule_NamedRule_EventType BackgroundTracingMetadata_TriggerRule_NamedRule_EventType_MIN = BackgroundTracingMetadata_TriggerRule_NamedRule_EventType_UNSPECIFIED;
+const BackgroundTracingMetadata_TriggerRule_NamedRule_EventType BackgroundTracingMetadata_TriggerRule_NamedRule_EventType_MAX = BackgroundTracingMetadata_TriggerRule_NamedRule_EventType_TEST_RULE;
+
+class BackgroundTracingMetadata_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  BackgroundTracingMetadata_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit BackgroundTracingMetadata_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit BackgroundTracingMetadata_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_triggered_rule() const { return at<1>().valid(); }
+  ::protozero::ConstBytes triggered_rule() const { return at<1>().as_bytes(); }
+  bool has_active_rules() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> active_rules() const { return GetRepeated<::protozero::ConstBytes>(2); }
+};
+
+class BackgroundTracingMetadata : public ::protozero::Message {
+ public:
+  using Decoder = BackgroundTracingMetadata_Decoder;
+  enum : int32_t {
+    kTriggeredRuleFieldNumber = 1,
+    kActiveRulesFieldNumber = 2,
+  };
+  using TriggerRule = ::perfetto::protos::pbzero::BackgroundTracingMetadata_TriggerRule;
+
+  using FieldMetadata_TriggeredRule =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      BackgroundTracingMetadata_TriggerRule,
+      BackgroundTracingMetadata>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TriggeredRule kTriggeredRule() { return {}; }
+  template <typename T = BackgroundTracingMetadata_TriggerRule> T* set_triggered_rule() {
+    return BeginNestedMessage<T>(1);
+  }
+
+
+  using FieldMetadata_ActiveRules =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      BackgroundTracingMetadata_TriggerRule,
+      BackgroundTracingMetadata>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ActiveRules kActiveRules() { return {}; }
+  template <typename T = BackgroundTracingMetadata_TriggerRule> T* add_active_rules() {
+    return BeginNestedMessage<T>(2);
+  }
+
+};
+
+class BackgroundTracingMetadata_TriggerRule_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  BackgroundTracingMetadata_TriggerRule_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit BackgroundTracingMetadata_TriggerRule_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit BackgroundTracingMetadata_TriggerRule_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_trigger_type() const { return at<1>().valid(); }
+  int32_t trigger_type() const { return at<1>().as_int32(); }
+  bool has_histogram_rule() const { return at<2>().valid(); }
+  ::protozero::ConstBytes histogram_rule() const { return at<2>().as_bytes(); }
+  bool has_named_rule() const { return at<3>().valid(); }
+  ::protozero::ConstBytes named_rule() const { return at<3>().as_bytes(); }
+};
+
+class BackgroundTracingMetadata_TriggerRule : public ::protozero::Message {
+ public:
+  using Decoder = BackgroundTracingMetadata_TriggerRule_Decoder;
+  enum : int32_t {
+    kTriggerTypeFieldNumber = 1,
+    kHistogramRuleFieldNumber = 2,
+    kNamedRuleFieldNumber = 3,
+  };
+  using HistogramRule = ::perfetto::protos::pbzero::BackgroundTracingMetadata_TriggerRule_HistogramRule;
+  using NamedRule = ::perfetto::protos::pbzero::BackgroundTracingMetadata_TriggerRule_NamedRule;
+  using TriggerType = ::perfetto::protos::pbzero::BackgroundTracingMetadata_TriggerRule_TriggerType;
+  static const TriggerType TRIGGER_UNSPECIFIED = BackgroundTracingMetadata_TriggerRule_TriggerType_TRIGGER_UNSPECIFIED;
+  static const TriggerType MONITOR_AND_DUMP_WHEN_SPECIFIC_HISTOGRAM_AND_VALUE = BackgroundTracingMetadata_TriggerRule_TriggerType_MONITOR_AND_DUMP_WHEN_SPECIFIC_HISTOGRAM_AND_VALUE;
+  static const TriggerType MONITOR_AND_DUMP_WHEN_TRIGGER_NAMED = BackgroundTracingMetadata_TriggerRule_TriggerType_MONITOR_AND_DUMP_WHEN_TRIGGER_NAMED;
+
+  using FieldMetadata_TriggerType =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::BackgroundTracingMetadata_TriggerRule_TriggerType,
+      BackgroundTracingMetadata_TriggerRule>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TriggerType kTriggerType() { return {}; }
+  void set_trigger_type(::perfetto::protos::pbzero::BackgroundTracingMetadata_TriggerRule_TriggerType value) {
+    static constexpr uint32_t field_id = FieldMetadata_TriggerType::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_HistogramRule =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      BackgroundTracingMetadata_TriggerRule_HistogramRule,
+      BackgroundTracingMetadata_TriggerRule>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_HistogramRule kHistogramRule() { return {}; }
+  template <typename T = BackgroundTracingMetadata_TriggerRule_HistogramRule> T* set_histogram_rule() {
+    return BeginNestedMessage<T>(2);
+  }
+
+
+  using FieldMetadata_NamedRule =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      BackgroundTracingMetadata_TriggerRule_NamedRule,
+      BackgroundTracingMetadata_TriggerRule>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NamedRule kNamedRule() { return {}; }
+  template <typename T = BackgroundTracingMetadata_TriggerRule_NamedRule> T* set_named_rule() {
+    return BeginNestedMessage<T>(3);
+  }
+
+};
+
+class BackgroundTracingMetadata_TriggerRule_NamedRule_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  BackgroundTracingMetadata_TriggerRule_NamedRule_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit BackgroundTracingMetadata_TriggerRule_NamedRule_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit BackgroundTracingMetadata_TriggerRule_NamedRule_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_event_type() const { return at<1>().valid(); }
+  int32_t event_type() const { return at<1>().as_int32(); }
+  bool has_content_trigger_name_hash() const { return at<2>().valid(); }
+  uint64_t content_trigger_name_hash() const { return at<2>().as_uint64(); }
+};
+
+class BackgroundTracingMetadata_TriggerRule_NamedRule : public ::protozero::Message {
+ public:
+  using Decoder = BackgroundTracingMetadata_TriggerRule_NamedRule_Decoder;
+  enum : int32_t {
+    kEventTypeFieldNumber = 1,
+    kContentTriggerNameHashFieldNumber = 2,
+  };
+  using EventType = ::perfetto::protos::pbzero::BackgroundTracingMetadata_TriggerRule_NamedRule_EventType;
+  static const EventType UNSPECIFIED = BackgroundTracingMetadata_TriggerRule_NamedRule_EventType_UNSPECIFIED;
+  static const EventType SESSION_RESTORE = BackgroundTracingMetadata_TriggerRule_NamedRule_EventType_SESSION_RESTORE;
+  static const EventType NAVIGATION = BackgroundTracingMetadata_TriggerRule_NamedRule_EventType_NAVIGATION;
+  static const EventType STARTUP = BackgroundTracingMetadata_TriggerRule_NamedRule_EventType_STARTUP;
+  static const EventType REACHED_CODE = BackgroundTracingMetadata_TriggerRule_NamedRule_EventType_REACHED_CODE;
+  static const EventType CONTENT_TRIGGER = BackgroundTracingMetadata_TriggerRule_NamedRule_EventType_CONTENT_TRIGGER;
+  static const EventType TEST_RULE = BackgroundTracingMetadata_TriggerRule_NamedRule_EventType_TEST_RULE;
+
+  using FieldMetadata_EventType =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::BackgroundTracingMetadata_TriggerRule_NamedRule_EventType,
+      BackgroundTracingMetadata_TriggerRule_NamedRule>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_EventType kEventType() { return {}; }
+  void set_event_type(::perfetto::protos::pbzero::BackgroundTracingMetadata_TriggerRule_NamedRule_EventType value) {
+    static constexpr uint32_t field_id = FieldMetadata_EventType::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ContentTriggerNameHash =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kFixed64,
+      uint64_t,
+      BackgroundTracingMetadata_TriggerRule_NamedRule>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ContentTriggerNameHash kContentTriggerNameHash() { return {}; }
+  void set_content_trigger_name_hash(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ContentTriggerNameHash::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kFixed64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class BackgroundTracingMetadata_TriggerRule_HistogramRule_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  BackgroundTracingMetadata_TriggerRule_HistogramRule_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit BackgroundTracingMetadata_TriggerRule_HistogramRule_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit BackgroundTracingMetadata_TriggerRule_HistogramRule_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_histogram_name_hash() const { return at<1>().valid(); }
+  uint64_t histogram_name_hash() const { return at<1>().as_uint64(); }
+  bool has_histogram_min_trigger() const { return at<2>().valid(); }
+  int64_t histogram_min_trigger() const { return at<2>().as_int64(); }
+  bool has_histogram_max_trigger() const { return at<3>().valid(); }
+  int64_t histogram_max_trigger() const { return at<3>().as_int64(); }
+};
+
+class BackgroundTracingMetadata_TriggerRule_HistogramRule : public ::protozero::Message {
+ public:
+  using Decoder = BackgroundTracingMetadata_TriggerRule_HistogramRule_Decoder;
+  enum : int32_t {
+    kHistogramNameHashFieldNumber = 1,
+    kHistogramMinTriggerFieldNumber = 2,
+    kHistogramMaxTriggerFieldNumber = 3,
+  };
+
+  using FieldMetadata_HistogramNameHash =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kFixed64,
+      uint64_t,
+      BackgroundTracingMetadata_TriggerRule_HistogramRule>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_HistogramNameHash kHistogramNameHash() { return {}; }
+  void set_histogram_name_hash(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_HistogramNameHash::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kFixed64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_HistogramMinTrigger =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      BackgroundTracingMetadata_TriggerRule_HistogramRule>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_HistogramMinTrigger kHistogramMinTrigger() { return {}; }
+  void set_histogram_min_trigger(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_HistogramMinTrigger::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_HistogramMaxTrigger =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      BackgroundTracingMetadata_TriggerRule_HistogramRule>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_HistogramMaxTrigger kHistogramMaxTrigger() { return {}; }
+  void set_histogram_max_trigger(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_HistogramMaxTrigger::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class ChromeMetadataPacket_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  ChromeMetadataPacket_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ChromeMetadataPacket_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ChromeMetadataPacket_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_background_tracing_metadata() const { return at<1>().valid(); }
+  ::protozero::ConstBytes background_tracing_metadata() const { return at<1>().as_bytes(); }
+  bool has_chrome_version_code() const { return at<2>().valid(); }
+  int32_t chrome_version_code() const { return at<2>().as_int32(); }
+  bool has_enabled_categories() const { return at<3>().valid(); }
+  ::protozero::ConstChars enabled_categories() const { return at<3>().as_string(); }
+};
+
+class ChromeMetadataPacket : public ::protozero::Message {
+ public:
+  using Decoder = ChromeMetadataPacket_Decoder;
+  enum : int32_t {
+    kBackgroundTracingMetadataFieldNumber = 1,
+    kChromeVersionCodeFieldNumber = 2,
+    kEnabledCategoriesFieldNumber = 3,
+  };
+
+  using FieldMetadata_BackgroundTracingMetadata =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      BackgroundTracingMetadata,
+      ChromeMetadataPacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BackgroundTracingMetadata kBackgroundTracingMetadata() { return {}; }
+  template <typename T = BackgroundTracingMetadata> T* set_background_tracing_metadata() {
+    return BeginNestedMessage<T>(1);
+  }
+
+
+  using FieldMetadata_ChromeVersionCode =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      ChromeMetadataPacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChromeVersionCode kChromeVersionCode() { return {}; }
+  void set_chrome_version_code(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ChromeVersionCode::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_EnabledCategories =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ChromeMetadataPacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_EnabledCategories kEnabledCategories() { return {}; }
+  void set_enabled_categories(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_EnabledCategories::kFieldId, data, size);
+  }
+  void set_enabled_categories(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_EnabledCategories::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/chrome/chrome_trace_event.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_CHROME_CHROME_TRACE_EVENT_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_CHROME_CHROME_TRACE_EVENT_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class ChromeLegacyJsonTrace;
+class ChromeMetadata;
+class ChromeStringTableEntry;
+class ChromeTraceEvent;
+class ChromeTraceEvent_Arg;
+class ChromeTracedValue;
+enum ChromeLegacyJsonTrace_TraceType : int32_t;
+enum ChromeTracedValue_NestedType : int32_t;
+
+enum ChromeLegacyJsonTrace_TraceType : int32_t {
+  ChromeLegacyJsonTrace_TraceType_USER_TRACE = 0,
+  ChromeLegacyJsonTrace_TraceType_SYSTEM_TRACE = 1,
+};
+
+const ChromeLegacyJsonTrace_TraceType ChromeLegacyJsonTrace_TraceType_MIN = ChromeLegacyJsonTrace_TraceType_USER_TRACE;
+const ChromeLegacyJsonTrace_TraceType ChromeLegacyJsonTrace_TraceType_MAX = ChromeLegacyJsonTrace_TraceType_SYSTEM_TRACE;
+
+enum ChromeTracedValue_NestedType : int32_t {
+  ChromeTracedValue_NestedType_DICT = 0,
+  ChromeTracedValue_NestedType_ARRAY = 1,
+};
+
+const ChromeTracedValue_NestedType ChromeTracedValue_NestedType_MIN = ChromeTracedValue_NestedType_DICT;
+const ChromeTracedValue_NestedType ChromeTracedValue_NestedType_MAX = ChromeTracedValue_NestedType_ARRAY;
+
+class ChromeEventBundle_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  ChromeEventBundle_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ChromeEventBundle_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ChromeEventBundle_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_trace_events() const { return at<1>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> trace_events() const { return GetRepeated<::protozero::ConstBytes>(1); }
+  bool has_metadata() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> metadata() const { return GetRepeated<::protozero::ConstBytes>(2); }
+  bool has_legacy_ftrace_output() const { return at<4>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstChars> legacy_ftrace_output() const { return GetRepeated<::protozero::ConstChars>(4); }
+  bool has_legacy_json_trace() const { return at<5>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> legacy_json_trace() const { return GetRepeated<::protozero::ConstBytes>(5); }
+  bool has_string_table() const { return at<3>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> string_table() const { return GetRepeated<::protozero::ConstBytes>(3); }
+};
+
+class ChromeEventBundle : public ::protozero::Message {
+ public:
+  using Decoder = ChromeEventBundle_Decoder;
+  enum : int32_t {
+    kTraceEventsFieldNumber = 1,
+    kMetadataFieldNumber = 2,
+    kLegacyFtraceOutputFieldNumber = 4,
+    kLegacyJsonTraceFieldNumber = 5,
+    kStringTableFieldNumber = 3,
+  };
+
+  using FieldMetadata_TraceEvents =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ChromeTraceEvent,
+      ChromeEventBundle>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TraceEvents kTraceEvents() { return {}; }
+  template <typename T = ChromeTraceEvent> T* add_trace_events() {
+    return BeginNestedMessage<T>(1);
+  }
+
+
+  using FieldMetadata_Metadata =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ChromeMetadata,
+      ChromeEventBundle>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Metadata kMetadata() { return {}; }
+  template <typename T = ChromeMetadata> T* add_metadata() {
+    return BeginNestedMessage<T>(2);
+  }
+
+
+  using FieldMetadata_LegacyFtraceOutput =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ChromeEventBundle>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_LegacyFtraceOutput kLegacyFtraceOutput() { return {}; }
+  void add_legacy_ftrace_output(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_LegacyFtraceOutput::kFieldId, data, size);
+  }
+  void add_legacy_ftrace_output(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_LegacyFtraceOutput::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_LegacyJsonTrace =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ChromeLegacyJsonTrace,
+      ChromeEventBundle>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_LegacyJsonTrace kLegacyJsonTrace() { return {}; }
+  template <typename T = ChromeLegacyJsonTrace> T* add_legacy_json_trace() {
+    return BeginNestedMessage<T>(5);
+  }
+
+
+  using FieldMetadata_StringTable =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ChromeStringTableEntry,
+      ChromeEventBundle>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_StringTable kStringTable() { return {}; }
+  template <typename T = ChromeStringTableEntry> T* add_string_table() {
+    return BeginNestedMessage<T>(3);
+  }
+
+};
+
+class ChromeLegacyJsonTrace_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  ChromeLegacyJsonTrace_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ChromeLegacyJsonTrace_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ChromeLegacyJsonTrace_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_type() const { return at<1>().valid(); }
+  int32_t type() const { return at<1>().as_int32(); }
+  bool has_data() const { return at<2>().valid(); }
+  ::protozero::ConstChars data() const { return at<2>().as_string(); }
+};
+
+class ChromeLegacyJsonTrace : public ::protozero::Message {
+ public:
+  using Decoder = ChromeLegacyJsonTrace_Decoder;
+  enum : int32_t {
+    kTypeFieldNumber = 1,
+    kDataFieldNumber = 2,
+  };
+  using TraceType = ::perfetto::protos::pbzero::ChromeLegacyJsonTrace_TraceType;
+  static const TraceType USER_TRACE = ChromeLegacyJsonTrace_TraceType_USER_TRACE;
+  static const TraceType SYSTEM_TRACE = ChromeLegacyJsonTrace_TraceType_SYSTEM_TRACE;
+
+  using FieldMetadata_Type =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::ChromeLegacyJsonTrace_TraceType,
+      ChromeLegacyJsonTrace>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Type kType() { return {}; }
+  void set_type(::perfetto::protos::pbzero::ChromeLegacyJsonTrace_TraceType value) {
+    static constexpr uint32_t field_id = FieldMetadata_Type::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Data =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ChromeLegacyJsonTrace>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Data kData() { return {}; }
+  void set_data(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Data::kFieldId, data, size);
+  }
+  void set_data(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Data::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class ChromeMetadata_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  ChromeMetadata_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ChromeMetadata_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ChromeMetadata_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars name() const { return at<1>().as_string(); }
+  bool has_string_value() const { return at<2>().valid(); }
+  ::protozero::ConstChars string_value() const { return at<2>().as_string(); }
+  bool has_bool_value() const { return at<3>().valid(); }
+  bool bool_value() const { return at<3>().as_bool(); }
+  bool has_int_value() const { return at<4>().valid(); }
+  int64_t int_value() const { return at<4>().as_int64(); }
+  bool has_json_value() const { return at<5>().valid(); }
+  ::protozero::ConstChars json_value() const { return at<5>().as_string(); }
+};
+
+class ChromeMetadata : public ::protozero::Message {
+ public:
+  using Decoder = ChromeMetadata_Decoder;
+  enum : int32_t {
+    kNameFieldNumber = 1,
+    kStringValueFieldNumber = 2,
+    kBoolValueFieldNumber = 3,
+    kIntValueFieldNumber = 4,
+    kJsonValueFieldNumber = 5,
+  };
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ChromeMetadata>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_StringValue =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ChromeMetadata>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_StringValue kStringValue() { return {}; }
+  void set_string_value(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_StringValue::kFieldId, data, size);
+  }
+  void set_string_value(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_StringValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BoolValue =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeMetadata>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BoolValue kBoolValue() { return {}; }
+  void set_bool_value(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_BoolValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_IntValue =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      ChromeMetadata>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IntValue kIntValue() { return {}; }
+  void set_int_value(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_IntValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_JsonValue =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ChromeMetadata>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_JsonValue kJsonValue() { return {}; }
+  void set_json_value(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_JsonValue::kFieldId, data, size);
+  }
+  void set_json_value(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_JsonValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class ChromeTraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/16, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  ChromeTraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ChromeTraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ChromeTraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars name() const { return at<1>().as_string(); }
+  bool has_timestamp() const { return at<2>().valid(); }
+  int64_t timestamp() const { return at<2>().as_int64(); }
+  bool has_phase() const { return at<3>().valid(); }
+  int32_t phase() const { return at<3>().as_int32(); }
+  bool has_thread_id() const { return at<4>().valid(); }
+  int32_t thread_id() const { return at<4>().as_int32(); }
+  bool has_duration() const { return at<5>().valid(); }
+  int64_t duration() const { return at<5>().as_int64(); }
+  bool has_thread_duration() const { return at<6>().valid(); }
+  int64_t thread_duration() const { return at<6>().as_int64(); }
+  bool has_scope() const { return at<7>().valid(); }
+  ::protozero::ConstChars scope() const { return at<7>().as_string(); }
+  bool has_id() const { return at<8>().valid(); }
+  uint64_t id() const { return at<8>().as_uint64(); }
+  bool has_flags() const { return at<9>().valid(); }
+  uint32_t flags() const { return at<9>().as_uint32(); }
+  bool has_category_group_name() const { return at<10>().valid(); }
+  ::protozero::ConstChars category_group_name() const { return at<10>().as_string(); }
+  bool has_process_id() const { return at<11>().valid(); }
+  int32_t process_id() const { return at<11>().as_int32(); }
+  bool has_thread_timestamp() const { return at<12>().valid(); }
+  int64_t thread_timestamp() const { return at<12>().as_int64(); }
+  bool has_bind_id() const { return at<13>().valid(); }
+  uint64_t bind_id() const { return at<13>().as_uint64(); }
+  bool has_args() const { return at<14>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> args() const { return GetRepeated<::protozero::ConstBytes>(14); }
+  bool has_name_index() const { return at<15>().valid(); }
+  uint32_t name_index() const { return at<15>().as_uint32(); }
+  bool has_category_group_name_index() const { return at<16>().valid(); }
+  uint32_t category_group_name_index() const { return at<16>().as_uint32(); }
+};
+
+class ChromeTraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = ChromeTraceEvent_Decoder;
+  enum : int32_t {
+    kNameFieldNumber = 1,
+    kTimestampFieldNumber = 2,
+    kPhaseFieldNumber = 3,
+    kThreadIdFieldNumber = 4,
+    kDurationFieldNumber = 5,
+    kThreadDurationFieldNumber = 6,
+    kScopeFieldNumber = 7,
+    kIdFieldNumber = 8,
+    kFlagsFieldNumber = 9,
+    kCategoryGroupNameFieldNumber = 10,
+    kProcessIdFieldNumber = 11,
+    kThreadTimestampFieldNumber = 12,
+    kBindIdFieldNumber = 13,
+    kArgsFieldNumber = 14,
+    kNameIndexFieldNumber = 15,
+    kCategoryGroupNameIndexFieldNumber = 16,
+  };
+  using Arg = ::perfetto::protos::pbzero::ChromeTraceEvent_Arg;
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ChromeTraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Timestamp =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      ChromeTraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Timestamp kTimestamp() { return {}; }
+  void set_timestamp(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Timestamp::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Phase =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      ChromeTraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Phase kPhase() { return {}; }
+  void set_phase(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Phase::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ThreadId =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      ChromeTraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ThreadId kThreadId() { return {}; }
+  void set_thread_id(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ThreadId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Duration =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      ChromeTraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Duration kDuration() { return {}; }
+  void set_duration(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Duration::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ThreadDuration =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      ChromeTraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ThreadDuration kThreadDuration() { return {}; }
+  void set_thread_duration(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ThreadDuration::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Scope =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ChromeTraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Scope kScope() { return {}; }
+  void set_scope(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Scope::kFieldId, data, size);
+  }
+  void set_scope(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Scope::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Id =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ChromeTraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Id kId() { return {}; }
+  void set_id(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Id::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Flags =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      ChromeTraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Flags kFlags() { return {}; }
+  void set_flags(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Flags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CategoryGroupName =
+    ::protozero::proto_utils::FieldMetadata<
+      10,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ChromeTraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CategoryGroupName kCategoryGroupName() { return {}; }
+  void set_category_group_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_CategoryGroupName::kFieldId, data, size);
+  }
+  void set_category_group_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_CategoryGroupName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ProcessId =
+    ::protozero::proto_utils::FieldMetadata<
+      11,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      ChromeTraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ProcessId kProcessId() { return {}; }
+  void set_process_id(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ProcessId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ThreadTimestamp =
+    ::protozero::proto_utils::FieldMetadata<
+      12,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      ChromeTraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ThreadTimestamp kThreadTimestamp() { return {}; }
+  void set_thread_timestamp(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ThreadTimestamp::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BindId =
+    ::protozero::proto_utils::FieldMetadata<
+      13,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ChromeTraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BindId kBindId() { return {}; }
+  void set_bind_id(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_BindId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Args =
+    ::protozero::proto_utils::FieldMetadata<
+      14,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ChromeTraceEvent_Arg,
+      ChromeTraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Args kArgs() { return {}; }
+  template <typename T = ChromeTraceEvent_Arg> T* add_args() {
+    return BeginNestedMessage<T>(14);
+  }
+
+
+  using FieldMetadata_NameIndex =
+    ::protozero::proto_utils::FieldMetadata<
+      15,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      ChromeTraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NameIndex kNameIndex() { return {}; }
+  void set_name_index(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NameIndex::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CategoryGroupNameIndex =
+    ::protozero::proto_utils::FieldMetadata<
+      16,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      ChromeTraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CategoryGroupNameIndex kCategoryGroupNameIndex() { return {}; }
+  void set_category_group_name_index(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CategoryGroupNameIndex::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class ChromeTraceEvent_Arg_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/10, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  ChromeTraceEvent_Arg_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ChromeTraceEvent_Arg_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ChromeTraceEvent_Arg_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars name() const { return at<1>().as_string(); }
+  bool has_bool_value() const { return at<2>().valid(); }
+  bool bool_value() const { return at<2>().as_bool(); }
+  bool has_uint_value() const { return at<3>().valid(); }
+  uint64_t uint_value() const { return at<3>().as_uint64(); }
+  bool has_int_value() const { return at<4>().valid(); }
+  int64_t int_value() const { return at<4>().as_int64(); }
+  bool has_double_value() const { return at<5>().valid(); }
+  double double_value() const { return at<5>().as_double(); }
+  bool has_string_value() const { return at<6>().valid(); }
+  ::protozero::ConstChars string_value() const { return at<6>().as_string(); }
+  bool has_pointer_value() const { return at<7>().valid(); }
+  uint64_t pointer_value() const { return at<7>().as_uint64(); }
+  bool has_json_value() const { return at<8>().valid(); }
+  ::protozero::ConstChars json_value() const { return at<8>().as_string(); }
+  bool has_traced_value() const { return at<10>().valid(); }
+  ::protozero::ConstBytes traced_value() const { return at<10>().as_bytes(); }
+  bool has_name_index() const { return at<9>().valid(); }
+  uint32_t name_index() const { return at<9>().as_uint32(); }
+};
+
+class ChromeTraceEvent_Arg : public ::protozero::Message {
+ public:
+  using Decoder = ChromeTraceEvent_Arg_Decoder;
+  enum : int32_t {
+    kNameFieldNumber = 1,
+    kBoolValueFieldNumber = 2,
+    kUintValueFieldNumber = 3,
+    kIntValueFieldNumber = 4,
+    kDoubleValueFieldNumber = 5,
+    kStringValueFieldNumber = 6,
+    kPointerValueFieldNumber = 7,
+    kJsonValueFieldNumber = 8,
+    kTracedValueFieldNumber = 10,
+    kNameIndexFieldNumber = 9,
+  };
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ChromeTraceEvent_Arg>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BoolValue =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeTraceEvent_Arg>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BoolValue kBoolValue() { return {}; }
+  void set_bool_value(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_BoolValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_UintValue =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ChromeTraceEvent_Arg>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_UintValue kUintValue() { return {}; }
+  void set_uint_value(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_UintValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_IntValue =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      ChromeTraceEvent_Arg>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IntValue kIntValue() { return {}; }
+  void set_int_value(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_IntValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DoubleValue =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kDouble,
+      double,
+      ChromeTraceEvent_Arg>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DoubleValue kDoubleValue() { return {}; }
+  void set_double_value(double value) {
+    static constexpr uint32_t field_id = FieldMetadata_DoubleValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kDouble>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_StringValue =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ChromeTraceEvent_Arg>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_StringValue kStringValue() { return {}; }
+  void set_string_value(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_StringValue::kFieldId, data, size);
+  }
+  void set_string_value(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_StringValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PointerValue =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ChromeTraceEvent_Arg>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PointerValue kPointerValue() { return {}; }
+  void set_pointer_value(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PointerValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_JsonValue =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ChromeTraceEvent_Arg>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_JsonValue kJsonValue() { return {}; }
+  void set_json_value(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_JsonValue::kFieldId, data, size);
+  }
+  void set_json_value(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_JsonValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TracedValue =
+    ::protozero::proto_utils::FieldMetadata<
+      10,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ChromeTracedValue,
+      ChromeTraceEvent_Arg>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TracedValue kTracedValue() { return {}; }
+  template <typename T = ChromeTracedValue> T* set_traced_value() {
+    return BeginNestedMessage<T>(10);
+  }
+
+
+  using FieldMetadata_NameIndex =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      ChromeTraceEvent_Arg>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NameIndex kNameIndex() { return {}; }
+  void set_name_index(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NameIndex::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class ChromeStringTableEntry_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  ChromeStringTableEntry_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ChromeStringTableEntry_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ChromeStringTableEntry_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_value() const { return at<1>().valid(); }
+  ::protozero::ConstChars value() const { return at<1>().as_string(); }
+  bool has_index() const { return at<2>().valid(); }
+  int32_t index() const { return at<2>().as_int32(); }
+};
+
+class ChromeStringTableEntry : public ::protozero::Message {
+ public:
+  using Decoder = ChromeStringTableEntry_Decoder;
+  enum : int32_t {
+    kValueFieldNumber = 1,
+    kIndexFieldNumber = 2,
+  };
+
+  using FieldMetadata_Value =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ChromeStringTableEntry>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Value kValue() { return {}; }
+  void set_value(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Value::kFieldId, data, size);
+  }
+  void set_value(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Value::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Index =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      ChromeStringTableEntry>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Index kIndex() { return {}; }
+  void set_index(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Index::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class ChromeTracedValue_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/8, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  ChromeTracedValue_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ChromeTracedValue_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ChromeTracedValue_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_nested_type() const { return at<1>().valid(); }
+  int32_t nested_type() const { return at<1>().as_int32(); }
+  bool has_dict_keys() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstChars> dict_keys() const { return GetRepeated<::protozero::ConstChars>(2); }
+  bool has_dict_values() const { return at<3>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> dict_values() const { return GetRepeated<::protozero::ConstBytes>(3); }
+  bool has_array_values() const { return at<4>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> array_values() const { return GetRepeated<::protozero::ConstBytes>(4); }
+  bool has_int_value() const { return at<5>().valid(); }
+  int32_t int_value() const { return at<5>().as_int32(); }
+  bool has_double_value() const { return at<6>().valid(); }
+  double double_value() const { return at<6>().as_double(); }
+  bool has_bool_value() const { return at<7>().valid(); }
+  bool bool_value() const { return at<7>().as_bool(); }
+  bool has_string_value() const { return at<8>().valid(); }
+  ::protozero::ConstChars string_value() const { return at<8>().as_string(); }
+};
+
+class ChromeTracedValue : public ::protozero::Message {
+ public:
+  using Decoder = ChromeTracedValue_Decoder;
+  enum : int32_t {
+    kNestedTypeFieldNumber = 1,
+    kDictKeysFieldNumber = 2,
+    kDictValuesFieldNumber = 3,
+    kArrayValuesFieldNumber = 4,
+    kIntValueFieldNumber = 5,
+    kDoubleValueFieldNumber = 6,
+    kBoolValueFieldNumber = 7,
+    kStringValueFieldNumber = 8,
+  };
+  using NestedType = ::perfetto::protos::pbzero::ChromeTracedValue_NestedType;
+  static const NestedType DICT = ChromeTracedValue_NestedType_DICT;
+  static const NestedType ARRAY = ChromeTracedValue_NestedType_ARRAY;
+
+  using FieldMetadata_NestedType =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::ChromeTracedValue_NestedType,
+      ChromeTracedValue>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NestedType kNestedType() { return {}; }
+  void set_nested_type(::perfetto::protos::pbzero::ChromeTracedValue_NestedType value) {
+    static constexpr uint32_t field_id = FieldMetadata_NestedType::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DictKeys =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ChromeTracedValue>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DictKeys kDictKeys() { return {}; }
+  void add_dict_keys(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_DictKeys::kFieldId, data, size);
+  }
+  void add_dict_keys(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_DictKeys::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DictValues =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ChromeTracedValue,
+      ChromeTracedValue>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DictValues kDictValues() { return {}; }
+  template <typename T = ChromeTracedValue> T* add_dict_values() {
+    return BeginNestedMessage<T>(3);
+  }
+
+
+  using FieldMetadata_ArrayValues =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ChromeTracedValue,
+      ChromeTracedValue>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ArrayValues kArrayValues() { return {}; }
+  template <typename T = ChromeTracedValue> T* add_array_values() {
+    return BeginNestedMessage<T>(4);
+  }
+
+
+  using FieldMetadata_IntValue =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      ChromeTracedValue>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IntValue kIntValue() { return {}; }
+  void set_int_value(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_IntValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DoubleValue =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kDouble,
+      double,
+      ChromeTracedValue>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DoubleValue kDoubleValue() { return {}; }
+  void set_double_value(double value) {
+    static constexpr uint32_t field_id = FieldMetadata_DoubleValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kDouble>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BoolValue =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeTracedValue>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BoolValue kBoolValue() { return {}; }
+  void set_bool_value(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_BoolValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_StringValue =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ChromeTracedValue>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_StringValue kStringValue() { return {}; }
+  void set_string_value(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_StringValue::kFieldId, data, size);
+  }
+  void set_string_value(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_StringValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/filesystem/inode_file_map.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FILESYSTEM_INODE_FILE_MAP_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FILESYSTEM_INODE_FILE_MAP_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class InodeFileMap_Entry;
+enum InodeFileMap_Entry_Type : int32_t;
+
+enum InodeFileMap_Entry_Type : int32_t {
+  InodeFileMap_Entry_Type_UNKNOWN = 0,
+  InodeFileMap_Entry_Type_FILE = 1,
+  InodeFileMap_Entry_Type_DIRECTORY = 2,
+};
+
+const InodeFileMap_Entry_Type InodeFileMap_Entry_Type_MIN = InodeFileMap_Entry_Type_UNKNOWN;
+const InodeFileMap_Entry_Type InodeFileMap_Entry_Type_MAX = InodeFileMap_Entry_Type_DIRECTORY;
+
+class InodeFileMap_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  InodeFileMap_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit InodeFileMap_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit InodeFileMap_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_block_device_id() const { return at<1>().valid(); }
+  uint64_t block_device_id() const { return at<1>().as_uint64(); }
+  bool has_mount_points() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstChars> mount_points() const { return GetRepeated<::protozero::ConstChars>(2); }
+  bool has_entries() const { return at<3>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> entries() const { return GetRepeated<::protozero::ConstBytes>(3); }
+};
+
+class InodeFileMap : public ::protozero::Message {
+ public:
+  using Decoder = InodeFileMap_Decoder;
+  enum : int32_t {
+    kBlockDeviceIdFieldNumber = 1,
+    kMountPointsFieldNumber = 2,
+    kEntriesFieldNumber = 3,
+  };
+  using Entry = ::perfetto::protos::pbzero::InodeFileMap_Entry;
+
+  using FieldMetadata_BlockDeviceId =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      InodeFileMap>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BlockDeviceId kBlockDeviceId() { return {}; }
+  void set_block_device_id(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_BlockDeviceId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_MountPoints =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      InodeFileMap>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MountPoints kMountPoints() { return {}; }
+  void add_mount_points(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_MountPoints::kFieldId, data, size);
+  }
+  void add_mount_points(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_MountPoints::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Entries =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      InodeFileMap_Entry,
+      InodeFileMap>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Entries kEntries() { return {}; }
+  template <typename T = InodeFileMap_Entry> T* add_entries() {
+    return BeginNestedMessage<T>(3);
+  }
+
+};
+
+class InodeFileMap_Entry_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  InodeFileMap_Entry_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit InodeFileMap_Entry_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit InodeFileMap_Entry_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_inode_number() const { return at<1>().valid(); }
+  uint64_t inode_number() const { return at<1>().as_uint64(); }
+  bool has_paths() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstChars> paths() const { return GetRepeated<::protozero::ConstChars>(2); }
+  bool has_type() const { return at<3>().valid(); }
+  int32_t type() const { return at<3>().as_int32(); }
+};
+
+class InodeFileMap_Entry : public ::protozero::Message {
+ public:
+  using Decoder = InodeFileMap_Entry_Decoder;
+  enum : int32_t {
+    kInodeNumberFieldNumber = 1,
+    kPathsFieldNumber = 2,
+    kTypeFieldNumber = 3,
+  };
+  using Type = ::perfetto::protos::pbzero::InodeFileMap_Entry_Type;
+  static const Type UNKNOWN = InodeFileMap_Entry_Type_UNKNOWN;
+  static const Type FILE = InodeFileMap_Entry_Type_FILE;
+  static const Type DIRECTORY = InodeFileMap_Entry_Type_DIRECTORY;
+
+  using FieldMetadata_InodeNumber =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      InodeFileMap_Entry>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_InodeNumber kInodeNumber() { return {}; }
+  void set_inode_number(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_InodeNumber::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Paths =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      InodeFileMap_Entry>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Paths kPaths() { return {}; }
+  void add_paths(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Paths::kFieldId, data, size);
+  }
+  void add_paths(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Paths::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Type =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::InodeFileMap_Entry_Type,
+      InodeFileMap_Entry>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Type kType() { return {}; }
+  void set_type(::perfetto::protos::pbzero::InodeFileMap_Entry_Type value) {
+    static constexpr uint32_t field_id = FieldMetadata_Type::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/ftrace/ftrace_event.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_FTRACE_EVENT_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_FTRACE_EVENT_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class AllocPagesIommuEndFtraceEvent;
+class AllocPagesIommuFailFtraceEvent;
+class AllocPagesIommuStartFtraceEvent;
+class AllocPagesSysEndFtraceEvent;
+class AllocPagesSysFailFtraceEvent;
+class AllocPagesSysStartFtraceEvent;
+class BinderLockFtraceEvent;
+class BinderLockedFtraceEvent;
+class BinderSetPriorityFtraceEvent;
+class BinderTransactionAllocBufFtraceEvent;
+class BinderTransactionFtraceEvent;
+class BinderTransactionReceivedFtraceEvent;
+class BinderUnlockFtraceEvent;
+class BlockBioBackmergeFtraceEvent;
+class BlockBioBounceFtraceEvent;
+class BlockBioCompleteFtraceEvent;
+class BlockBioFrontmergeFtraceEvent;
+class BlockBioQueueFtraceEvent;
+class BlockBioRemapFtraceEvent;
+class BlockDirtyBufferFtraceEvent;
+class BlockGetrqFtraceEvent;
+class BlockPlugFtraceEvent;
+class BlockRqAbortFtraceEvent;
+class BlockRqCompleteFtraceEvent;
+class BlockRqInsertFtraceEvent;
+class BlockRqIssueFtraceEvent;
+class BlockRqRemapFtraceEvent;
+class BlockRqRequeueFtraceEvent;
+class BlockSleeprqFtraceEvent;
+class BlockSplitFtraceEvent;
+class BlockTouchBufferFtraceEvent;
+class BlockUnplugFtraceEvent;
+class CdevUpdateFtraceEvent;
+class CgroupAttachTaskFtraceEvent;
+class CgroupDestroyRootFtraceEvent;
+class CgroupMkdirFtraceEvent;
+class CgroupReleaseFtraceEvent;
+class CgroupRemountFtraceEvent;
+class CgroupRenameFtraceEvent;
+class CgroupRmdirFtraceEvent;
+class CgroupSetupRootFtraceEvent;
+class CgroupTransferTasksFtraceEvent;
+class ClkDisableFtraceEvent;
+class ClkEnableFtraceEvent;
+class ClkSetRateFtraceEvent;
+class ClockDisableFtraceEvent;
+class ClockEnableFtraceEvent;
+class ClockSetRateFtraceEvent;
+class CpuFrequencyFtraceEvent;
+class CpuFrequencyLimitsFtraceEvent;
+class CpuIdleFtraceEvent;
+class CpuhpEnterFtraceEvent;
+class CpuhpExitFtraceEvent;
+class CpuhpLatencyFtraceEvent;
+class CpuhpMultiEnterFtraceEvent;
+class CpuhpPauseFtraceEvent;
+class DmaAllocContiguousRetryFtraceEvent;
+class DmaHeapStatFtraceEvent;
+class DpuTracingMarkWriteFtraceEvent;
+class Ext4AllocDaBlocksFtraceEvent;
+class Ext4AllocateBlocksFtraceEvent;
+class Ext4AllocateInodeFtraceEvent;
+class Ext4BeginOrderedTruncateFtraceEvent;
+class Ext4CollapseRangeFtraceEvent;
+class Ext4DaReleaseSpaceFtraceEvent;
+class Ext4DaReserveSpaceFtraceEvent;
+class Ext4DaUpdateReserveSpaceFtraceEvent;
+class Ext4DaWriteBeginFtraceEvent;
+class Ext4DaWriteEndFtraceEvent;
+class Ext4DaWritePagesExtentFtraceEvent;
+class Ext4DaWritePagesFtraceEvent;
+class Ext4DirectIOEnterFtraceEvent;
+class Ext4DirectIOExitFtraceEvent;
+class Ext4DiscardBlocksFtraceEvent;
+class Ext4DiscardPreallocationsFtraceEvent;
+class Ext4DropInodeFtraceEvent;
+class Ext4EsCacheExtentFtraceEvent;
+class Ext4EsFindDelayedExtentRangeEnterFtraceEvent;
+class Ext4EsFindDelayedExtentRangeExitFtraceEvent;
+class Ext4EsInsertExtentFtraceEvent;
+class Ext4EsLookupExtentEnterFtraceEvent;
+class Ext4EsLookupExtentExitFtraceEvent;
+class Ext4EsRemoveExtentFtraceEvent;
+class Ext4EsShrinkCountFtraceEvent;
+class Ext4EsShrinkFtraceEvent;
+class Ext4EsShrinkScanEnterFtraceEvent;
+class Ext4EsShrinkScanExitFtraceEvent;
+class Ext4EvictInodeFtraceEvent;
+class Ext4ExtConvertToInitializedEnterFtraceEvent;
+class Ext4ExtConvertToInitializedFastpathFtraceEvent;
+class Ext4ExtHandleUnwrittenExtentsFtraceEvent;
+class Ext4ExtInCacheFtraceEvent;
+class Ext4ExtLoadExtentFtraceEvent;
+class Ext4ExtMapBlocksEnterFtraceEvent;
+class Ext4ExtMapBlocksExitFtraceEvent;
+class Ext4ExtPutInCacheFtraceEvent;
+class Ext4ExtRemoveSpaceDoneFtraceEvent;
+class Ext4ExtRemoveSpaceFtraceEvent;
+class Ext4ExtRmIdxFtraceEvent;
+class Ext4ExtRmLeafFtraceEvent;
+class Ext4ExtShowExtentFtraceEvent;
+class Ext4FallocateEnterFtraceEvent;
+class Ext4FallocateExitFtraceEvent;
+class Ext4FindDelallocRangeFtraceEvent;
+class Ext4ForgetFtraceEvent;
+class Ext4FreeBlocksFtraceEvent;
+class Ext4FreeInodeFtraceEvent;
+class Ext4GetImpliedClusterAllocExitFtraceEvent;
+class Ext4GetReservedClusterAllocFtraceEvent;
+class Ext4IndMapBlocksEnterFtraceEvent;
+class Ext4IndMapBlocksExitFtraceEvent;
+class Ext4InsertRangeFtraceEvent;
+class Ext4InvalidatepageFtraceEvent;
+class Ext4JournalStartFtraceEvent;
+class Ext4JournalStartReservedFtraceEvent;
+class Ext4JournalledInvalidatepageFtraceEvent;
+class Ext4JournalledWriteEndFtraceEvent;
+class Ext4LoadInodeBitmapFtraceEvent;
+class Ext4LoadInodeFtraceEvent;
+class Ext4MarkInodeDirtyFtraceEvent;
+class Ext4MbBitmapLoadFtraceEvent;
+class Ext4MbBuddyBitmapLoadFtraceEvent;
+class Ext4MbDiscardPreallocationsFtraceEvent;
+class Ext4MbNewGroupPaFtraceEvent;
+class Ext4MbNewInodePaFtraceEvent;
+class Ext4MbReleaseGroupPaFtraceEvent;
+class Ext4MbReleaseInodePaFtraceEvent;
+class Ext4MballocAllocFtraceEvent;
+class Ext4MballocDiscardFtraceEvent;
+class Ext4MballocFreeFtraceEvent;
+class Ext4MballocPreallocFtraceEvent;
+class Ext4OtherInodeUpdateTimeFtraceEvent;
+class Ext4PunchHoleFtraceEvent;
+class Ext4ReadBlockBitmapLoadFtraceEvent;
+class Ext4ReadpageFtraceEvent;
+class Ext4ReleasepageFtraceEvent;
+class Ext4RemoveBlocksFtraceEvent;
+class Ext4RequestBlocksFtraceEvent;
+class Ext4RequestInodeFtraceEvent;
+class Ext4SyncFileEnterFtraceEvent;
+class Ext4SyncFileExitFtraceEvent;
+class Ext4SyncFsFtraceEvent;
+class Ext4TrimAllFreeFtraceEvent;
+class Ext4TrimExtentFtraceEvent;
+class Ext4TruncateEnterFtraceEvent;
+class Ext4TruncateExitFtraceEvent;
+class Ext4UnlinkEnterFtraceEvent;
+class Ext4UnlinkExitFtraceEvent;
+class Ext4WriteBeginFtraceEvent;
+class Ext4WriteEndFtraceEvent;
+class Ext4WritepageFtraceEvent;
+class Ext4WritepagesFtraceEvent;
+class Ext4WritepagesResultFtraceEvent;
+class Ext4ZeroRangeFtraceEvent;
+class F2fsDoSubmitBioFtraceEvent;
+class F2fsEvictInodeFtraceEvent;
+class F2fsFallocateFtraceEvent;
+class F2fsGetDataBlockFtraceEvent;
+class F2fsGetVictimFtraceEvent;
+class F2fsIgetExitFtraceEvent;
+class F2fsIgetFtraceEvent;
+class F2fsNewInodeFtraceEvent;
+class F2fsReadpageFtraceEvent;
+class F2fsReserveNewBlockFtraceEvent;
+class F2fsSetPageDirtyFtraceEvent;
+class F2fsSubmitWritePageFtraceEvent;
+class F2fsSyncFileEnterFtraceEvent;
+class F2fsSyncFileExitFtraceEvent;
+class F2fsSyncFsFtraceEvent;
+class F2fsTruncateBlocksEnterFtraceEvent;
+class F2fsTruncateBlocksExitFtraceEvent;
+class F2fsTruncateDataBlocksRangeFtraceEvent;
+class F2fsTruncateFtraceEvent;
+class F2fsTruncateInodeBlocksEnterFtraceEvent;
+class F2fsTruncateInodeBlocksExitFtraceEvent;
+class F2fsTruncateNodeFtraceEvent;
+class F2fsTruncateNodesEnterFtraceEvent;
+class F2fsTruncateNodesExitFtraceEvent;
+class F2fsTruncatePartialNodesFtraceEvent;
+class F2fsUnlinkEnterFtraceEvent;
+class F2fsUnlinkExitFtraceEvent;
+class F2fsVmPageMkwriteFtraceEvent;
+class F2fsWriteBeginFtraceEvent;
+class F2fsWriteCheckpointFtraceEvent;
+class F2fsWriteEndFtraceEvent;
+class FastrpcDmaStatFtraceEvent;
+class FenceDestroyFtraceEvent;
+class FenceEnableSignalFtraceEvent;
+class FenceInitFtraceEvent;
+class FenceSignaledFtraceEvent;
+class G2dTracingMarkWriteFtraceEvent;
+class GenericFtraceEvent;
+class GpuFrequencyFtraceEvent;
+class GpuMemTotalFtraceEvent;
+class I2cReadFtraceEvent;
+class I2cReplyFtraceEvent;
+class I2cResultFtraceEvent;
+class I2cWriteFtraceEvent;
+class IommuMapRangeFtraceEvent;
+class IommuSecPtblMapRangeEndFtraceEvent;
+class IommuSecPtblMapRangeStartFtraceEvent;
+class IonAllocBufferEndFtraceEvent;
+class IonAllocBufferFailFtraceEvent;
+class IonAllocBufferFallbackFtraceEvent;
+class IonAllocBufferStartFtraceEvent;
+class IonBufferCreateFtraceEvent;
+class IonBufferDestroyFtraceEvent;
+class IonCpAllocRetryFtraceEvent;
+class IonCpSecureBufferEndFtraceEvent;
+class IonCpSecureBufferStartFtraceEvent;
+class IonHeapGrowFtraceEvent;
+class IonHeapShrinkFtraceEvent;
+class IonPrefetchingFtraceEvent;
+class IonSecureCmaAddToPoolEndFtraceEvent;
+class IonSecureCmaAddToPoolStartFtraceEvent;
+class IonSecureCmaAllocateEndFtraceEvent;
+class IonSecureCmaAllocateStartFtraceEvent;
+class IonSecureCmaShrinkPoolEndFtraceEvent;
+class IonSecureCmaShrinkPoolStartFtraceEvent;
+class IonStatFtraceEvent;
+class IpiEntryFtraceEvent;
+class IpiExitFtraceEvent;
+class IpiRaiseFtraceEvent;
+class IrqHandlerEntryFtraceEvent;
+class IrqHandlerExitFtraceEvent;
+class KfreeFtraceEvent;
+class KmallocFtraceEvent;
+class KmallocNodeFtraceEvent;
+class KmemCacheAllocFtraceEvent;
+class KmemCacheAllocNodeFtraceEvent;
+class KmemCacheFreeFtraceEvent;
+class LowmemoryKillFtraceEvent;
+class MaliTracingMarkWriteFtraceEvent;
+class MarkVictimFtraceEvent;
+class MdpCmdKickoffFtraceEvent;
+class MdpCmdPingpongDoneFtraceEvent;
+class MdpCmdReadptrDoneFtraceEvent;
+class MdpCmdReleaseBwFtraceEvent;
+class MdpCmdWaitPingpongFtraceEvent;
+class MdpCommitFtraceEvent;
+class MdpCompareBwFtraceEvent;
+class MdpMisrCrcFtraceEvent;
+class MdpMixerUpdateFtraceEvent;
+class MdpPerfPrefillCalcFtraceEvent;
+class MdpPerfSetOtFtraceEvent;
+class MdpPerfSetPanicLutsFtraceEvent;
+class MdpPerfSetQosLutsFtraceEvent;
+class MdpPerfSetWmLevelsFtraceEvent;
+class MdpPerfUpdateBusFtraceEvent;
+class MdpSsppChangeFtraceEvent;
+class MdpSsppSetFtraceEvent;
+class MdpTraceCounterFtraceEvent;
+class MdpVideoUnderrunDoneFtraceEvent;
+class MigratePagesEndFtraceEvent;
+class MigratePagesStartFtraceEvent;
+class MigrateRetryFtraceEvent;
+class MmCompactionBeginFtraceEvent;
+class MmCompactionDeferCompactionFtraceEvent;
+class MmCompactionDeferResetFtraceEvent;
+class MmCompactionDeferredFtraceEvent;
+class MmCompactionEndFtraceEvent;
+class MmCompactionFinishedFtraceEvent;
+class MmCompactionIsolateFreepagesFtraceEvent;
+class MmCompactionIsolateMigratepagesFtraceEvent;
+class MmCompactionKcompactdSleepFtraceEvent;
+class MmCompactionKcompactdWakeFtraceEvent;
+class MmCompactionMigratepagesFtraceEvent;
+class MmCompactionSuitableFtraceEvent;
+class MmCompactionTryToCompactPagesFtraceEvent;
+class MmCompactionWakeupKcompactdFtraceEvent;
+class MmEventRecordFtraceEvent;
+class MmFilemapAddToPageCacheFtraceEvent;
+class MmFilemapDeleteFromPageCacheFtraceEvent;
+class MmPageAllocExtfragFtraceEvent;
+class MmPageAllocFtraceEvent;
+class MmPageAllocZoneLockedFtraceEvent;
+class MmPageFreeBatchedFtraceEvent;
+class MmPageFreeFtraceEvent;
+class MmPagePcpuDrainFtraceEvent;
+class MmVmscanDirectReclaimBeginFtraceEvent;
+class MmVmscanDirectReclaimEndFtraceEvent;
+class MmVmscanKswapdSleepFtraceEvent;
+class MmVmscanKswapdWakeFtraceEvent;
+class OomScoreAdjUpdateFtraceEvent;
+class PrintFtraceEvent;
+class RegulatorDisableCompleteFtraceEvent;
+class RegulatorDisableFtraceEvent;
+class RegulatorEnableCompleteFtraceEvent;
+class RegulatorEnableDelayFtraceEvent;
+class RegulatorEnableFtraceEvent;
+class RegulatorSetVoltageCompleteFtraceEvent;
+class RegulatorSetVoltageFtraceEvent;
+class RotatorBwAoAsContextFtraceEvent;
+class RssStatFtraceEvent;
+class SchedBlockedReasonFtraceEvent;
+class SchedCpuHotplugFtraceEvent;
+class SchedPiSetprioFtraceEvent;
+class SchedProcessExecFtraceEvent;
+class SchedProcessExitFtraceEvent;
+class SchedProcessForkFtraceEvent;
+class SchedProcessFreeFtraceEvent;
+class SchedProcessHangFtraceEvent;
+class SchedProcessWaitFtraceEvent;
+class SchedSwitchFtraceEvent;
+class SchedWakeupFtraceEvent;
+class SchedWakeupNewFtraceEvent;
+class SchedWakingFtraceEvent;
+class ScmCallEndFtraceEvent;
+class ScmCallStartFtraceEvent;
+class SdeSdeEvtlogFtraceEvent;
+class SdeSdePerfCalcCrtcFtraceEvent;
+class SdeSdePerfCrtcUpdateFtraceEvent;
+class SdeSdePerfSetQosLutsFtraceEvent;
+class SdeSdePerfUpdateBusFtraceEvent;
+class SdeTracingMarkWriteFtraceEvent;
+class SignalDeliverFtraceEvent;
+class SignalGenerateFtraceEvent;
+class SmbusReadFtraceEvent;
+class SmbusReplyFtraceEvent;
+class SmbusResultFtraceEvent;
+class SmbusWriteFtraceEvent;
+class SoftirqEntryFtraceEvent;
+class SoftirqExitFtraceEvent;
+class SoftirqRaiseFtraceEvent;
+class SuspendResumeFtraceEvent;
+class SyncPtFtraceEvent;
+class SyncTimelineFtraceEvent;
+class SyncWaitFtraceEvent;
+class SysEnterFtraceEvent;
+class SysExitFtraceEvent;
+class TaskNewtaskFtraceEvent;
+class TaskRenameFtraceEvent;
+class ThermalTemperatureFtraceEvent;
+class TracingMarkWriteFtraceEvent;
+class WorkqueueActivateWorkFtraceEvent;
+class WorkqueueExecuteEndFtraceEvent;
+class WorkqueueExecuteStartFtraceEvent;
+class WorkqueueQueueWorkFtraceEvent;
+class ZeroFtraceEvent;
+
+class FtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/358, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  FtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit FtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit FtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_timestamp() const { return at<1>().valid(); }
+  uint64_t timestamp() const { return at<1>().as_uint64(); }
+  bool has_pid() const { return at<2>().valid(); }
+  uint32_t pid() const { return at<2>().as_uint32(); }
+  bool has_print() const { return at<3>().valid(); }
+  ::protozero::ConstBytes print() const { return at<3>().as_bytes(); }
+  bool has_sched_switch() const { return at<4>().valid(); }
+  ::protozero::ConstBytes sched_switch() const { return at<4>().as_bytes(); }
+  bool has_cpu_frequency() const { return at<11>().valid(); }
+  ::protozero::ConstBytes cpu_frequency() const { return at<11>().as_bytes(); }
+  bool has_cpu_frequency_limits() const { return at<12>().valid(); }
+  ::protozero::ConstBytes cpu_frequency_limits() const { return at<12>().as_bytes(); }
+  bool has_cpu_idle() const { return at<13>().valid(); }
+  ::protozero::ConstBytes cpu_idle() const { return at<13>().as_bytes(); }
+  bool has_clock_enable() const { return at<14>().valid(); }
+  ::protozero::ConstBytes clock_enable() const { return at<14>().as_bytes(); }
+  bool has_clock_disable() const { return at<15>().valid(); }
+  ::protozero::ConstBytes clock_disable() const { return at<15>().as_bytes(); }
+  bool has_clock_set_rate() const { return at<16>().valid(); }
+  ::protozero::ConstBytes clock_set_rate() const { return at<16>().as_bytes(); }
+  bool has_sched_wakeup() const { return at<17>().valid(); }
+  ::protozero::ConstBytes sched_wakeup() const { return at<17>().as_bytes(); }
+  bool has_sched_blocked_reason() const { return at<18>().valid(); }
+  ::protozero::ConstBytes sched_blocked_reason() const { return at<18>().as_bytes(); }
+  bool has_sched_cpu_hotplug() const { return at<19>().valid(); }
+  ::protozero::ConstBytes sched_cpu_hotplug() const { return at<19>().as_bytes(); }
+  bool has_sched_waking() const { return at<20>().valid(); }
+  ::protozero::ConstBytes sched_waking() const { return at<20>().as_bytes(); }
+  bool has_ipi_entry() const { return at<21>().valid(); }
+  ::protozero::ConstBytes ipi_entry() const { return at<21>().as_bytes(); }
+  bool has_ipi_exit() const { return at<22>().valid(); }
+  ::protozero::ConstBytes ipi_exit() const { return at<22>().as_bytes(); }
+  bool has_ipi_raise() const { return at<23>().valid(); }
+  ::protozero::ConstBytes ipi_raise() const { return at<23>().as_bytes(); }
+  bool has_softirq_entry() const { return at<24>().valid(); }
+  ::protozero::ConstBytes softirq_entry() const { return at<24>().as_bytes(); }
+  bool has_softirq_exit() const { return at<25>().valid(); }
+  ::protozero::ConstBytes softirq_exit() const { return at<25>().as_bytes(); }
+  bool has_softirq_raise() const { return at<26>().valid(); }
+  ::protozero::ConstBytes softirq_raise() const { return at<26>().as_bytes(); }
+  bool has_i2c_read() const { return at<27>().valid(); }
+  ::protozero::ConstBytes i2c_read() const { return at<27>().as_bytes(); }
+  bool has_i2c_write() const { return at<28>().valid(); }
+  ::protozero::ConstBytes i2c_write() const { return at<28>().as_bytes(); }
+  bool has_i2c_result() const { return at<29>().valid(); }
+  ::protozero::ConstBytes i2c_result() const { return at<29>().as_bytes(); }
+  bool has_i2c_reply() const { return at<30>().valid(); }
+  ::protozero::ConstBytes i2c_reply() const { return at<30>().as_bytes(); }
+  bool has_smbus_read() const { return at<31>().valid(); }
+  ::protozero::ConstBytes smbus_read() const { return at<31>().as_bytes(); }
+  bool has_smbus_write() const { return at<32>().valid(); }
+  ::protozero::ConstBytes smbus_write() const { return at<32>().as_bytes(); }
+  bool has_smbus_result() const { return at<33>().valid(); }
+  ::protozero::ConstBytes smbus_result() const { return at<33>().as_bytes(); }
+  bool has_smbus_reply() const { return at<34>().valid(); }
+  ::protozero::ConstBytes smbus_reply() const { return at<34>().as_bytes(); }
+  bool has_lowmemory_kill() const { return at<35>().valid(); }
+  ::protozero::ConstBytes lowmemory_kill() const { return at<35>().as_bytes(); }
+  bool has_irq_handler_entry() const { return at<36>().valid(); }
+  ::protozero::ConstBytes irq_handler_entry() const { return at<36>().as_bytes(); }
+  bool has_irq_handler_exit() const { return at<37>().valid(); }
+  ::protozero::ConstBytes irq_handler_exit() const { return at<37>().as_bytes(); }
+  bool has_sync_pt() const { return at<38>().valid(); }
+  ::protozero::ConstBytes sync_pt() const { return at<38>().as_bytes(); }
+  bool has_sync_timeline() const { return at<39>().valid(); }
+  ::protozero::ConstBytes sync_timeline() const { return at<39>().as_bytes(); }
+  bool has_sync_wait() const { return at<40>().valid(); }
+  ::protozero::ConstBytes sync_wait() const { return at<40>().as_bytes(); }
+  bool has_ext4_da_write_begin() const { return at<41>().valid(); }
+  ::protozero::ConstBytes ext4_da_write_begin() const { return at<41>().as_bytes(); }
+  bool has_ext4_da_write_end() const { return at<42>().valid(); }
+  ::protozero::ConstBytes ext4_da_write_end() const { return at<42>().as_bytes(); }
+  bool has_ext4_sync_file_enter() const { return at<43>().valid(); }
+  ::protozero::ConstBytes ext4_sync_file_enter() const { return at<43>().as_bytes(); }
+  bool has_ext4_sync_file_exit() const { return at<44>().valid(); }
+  ::protozero::ConstBytes ext4_sync_file_exit() const { return at<44>().as_bytes(); }
+  bool has_block_rq_issue() const { return at<45>().valid(); }
+  ::protozero::ConstBytes block_rq_issue() const { return at<45>().as_bytes(); }
+  bool has_mm_vmscan_direct_reclaim_begin() const { return at<46>().valid(); }
+  ::protozero::ConstBytes mm_vmscan_direct_reclaim_begin() const { return at<46>().as_bytes(); }
+  bool has_mm_vmscan_direct_reclaim_end() const { return at<47>().valid(); }
+  ::protozero::ConstBytes mm_vmscan_direct_reclaim_end() const { return at<47>().as_bytes(); }
+  bool has_mm_vmscan_kswapd_wake() const { return at<48>().valid(); }
+  ::protozero::ConstBytes mm_vmscan_kswapd_wake() const { return at<48>().as_bytes(); }
+  bool has_mm_vmscan_kswapd_sleep() const { return at<49>().valid(); }
+  ::protozero::ConstBytes mm_vmscan_kswapd_sleep() const { return at<49>().as_bytes(); }
+  bool has_binder_transaction() const { return at<50>().valid(); }
+  ::protozero::ConstBytes binder_transaction() const { return at<50>().as_bytes(); }
+  bool has_binder_transaction_received() const { return at<51>().valid(); }
+  ::protozero::ConstBytes binder_transaction_received() const { return at<51>().as_bytes(); }
+  bool has_binder_set_priority() const { return at<52>().valid(); }
+  ::protozero::ConstBytes binder_set_priority() const { return at<52>().as_bytes(); }
+  bool has_binder_lock() const { return at<53>().valid(); }
+  ::protozero::ConstBytes binder_lock() const { return at<53>().as_bytes(); }
+  bool has_binder_locked() const { return at<54>().valid(); }
+  ::protozero::ConstBytes binder_locked() const { return at<54>().as_bytes(); }
+  bool has_binder_unlock() const { return at<55>().valid(); }
+  ::protozero::ConstBytes binder_unlock() const { return at<55>().as_bytes(); }
+  bool has_workqueue_activate_work() const { return at<56>().valid(); }
+  ::protozero::ConstBytes workqueue_activate_work() const { return at<56>().as_bytes(); }
+  bool has_workqueue_execute_end() const { return at<57>().valid(); }
+  ::protozero::ConstBytes workqueue_execute_end() const { return at<57>().as_bytes(); }
+  bool has_workqueue_execute_start() const { return at<58>().valid(); }
+  ::protozero::ConstBytes workqueue_execute_start() const { return at<58>().as_bytes(); }
+  bool has_workqueue_queue_work() const { return at<59>().valid(); }
+  ::protozero::ConstBytes workqueue_queue_work() const { return at<59>().as_bytes(); }
+  bool has_regulator_disable() const { return at<60>().valid(); }
+  ::protozero::ConstBytes regulator_disable() const { return at<60>().as_bytes(); }
+  bool has_regulator_disable_complete() const { return at<61>().valid(); }
+  ::protozero::ConstBytes regulator_disable_complete() const { return at<61>().as_bytes(); }
+  bool has_regulator_enable() const { return at<62>().valid(); }
+  ::protozero::ConstBytes regulator_enable() const { return at<62>().as_bytes(); }
+  bool has_regulator_enable_complete() const { return at<63>().valid(); }
+  ::protozero::ConstBytes regulator_enable_complete() const { return at<63>().as_bytes(); }
+  bool has_regulator_enable_delay() const { return at<64>().valid(); }
+  ::protozero::ConstBytes regulator_enable_delay() const { return at<64>().as_bytes(); }
+  bool has_regulator_set_voltage() const { return at<65>().valid(); }
+  ::protozero::ConstBytes regulator_set_voltage() const { return at<65>().as_bytes(); }
+  bool has_regulator_set_voltage_complete() const { return at<66>().valid(); }
+  ::protozero::ConstBytes regulator_set_voltage_complete() const { return at<66>().as_bytes(); }
+  bool has_cgroup_attach_task() const { return at<67>().valid(); }
+  ::protozero::ConstBytes cgroup_attach_task() const { return at<67>().as_bytes(); }
+  bool has_cgroup_mkdir() const { return at<68>().valid(); }
+  ::protozero::ConstBytes cgroup_mkdir() const { return at<68>().as_bytes(); }
+  bool has_cgroup_remount() const { return at<69>().valid(); }
+  ::protozero::ConstBytes cgroup_remount() const { return at<69>().as_bytes(); }
+  bool has_cgroup_rmdir() const { return at<70>().valid(); }
+  ::protozero::ConstBytes cgroup_rmdir() const { return at<70>().as_bytes(); }
+  bool has_cgroup_transfer_tasks() const { return at<71>().valid(); }
+  ::protozero::ConstBytes cgroup_transfer_tasks() const { return at<71>().as_bytes(); }
+  bool has_cgroup_destroy_root() const { return at<72>().valid(); }
+  ::protozero::ConstBytes cgroup_destroy_root() const { return at<72>().as_bytes(); }
+  bool has_cgroup_release() const { return at<73>().valid(); }
+  ::protozero::ConstBytes cgroup_release() const { return at<73>().as_bytes(); }
+  bool has_cgroup_rename() const { return at<74>().valid(); }
+  ::protozero::ConstBytes cgroup_rename() const { return at<74>().as_bytes(); }
+  bool has_cgroup_setup_root() const { return at<75>().valid(); }
+  ::protozero::ConstBytes cgroup_setup_root() const { return at<75>().as_bytes(); }
+  bool has_mdp_cmd_kickoff() const { return at<76>().valid(); }
+  ::protozero::ConstBytes mdp_cmd_kickoff() const { return at<76>().as_bytes(); }
+  bool has_mdp_commit() const { return at<77>().valid(); }
+  ::protozero::ConstBytes mdp_commit() const { return at<77>().as_bytes(); }
+  bool has_mdp_perf_set_ot() const { return at<78>().valid(); }
+  ::protozero::ConstBytes mdp_perf_set_ot() const { return at<78>().as_bytes(); }
+  bool has_mdp_sspp_change() const { return at<79>().valid(); }
+  ::protozero::ConstBytes mdp_sspp_change() const { return at<79>().as_bytes(); }
+  bool has_tracing_mark_write() const { return at<80>().valid(); }
+  ::protozero::ConstBytes tracing_mark_write() const { return at<80>().as_bytes(); }
+  bool has_mdp_cmd_pingpong_done() const { return at<81>().valid(); }
+  ::protozero::ConstBytes mdp_cmd_pingpong_done() const { return at<81>().as_bytes(); }
+  bool has_mdp_compare_bw() const { return at<82>().valid(); }
+  ::protozero::ConstBytes mdp_compare_bw() const { return at<82>().as_bytes(); }
+  bool has_mdp_perf_set_panic_luts() const { return at<83>().valid(); }
+  ::protozero::ConstBytes mdp_perf_set_panic_luts() const { return at<83>().as_bytes(); }
+  bool has_mdp_sspp_set() const { return at<84>().valid(); }
+  ::protozero::ConstBytes mdp_sspp_set() const { return at<84>().as_bytes(); }
+  bool has_mdp_cmd_readptr_done() const { return at<85>().valid(); }
+  ::protozero::ConstBytes mdp_cmd_readptr_done() const { return at<85>().as_bytes(); }
+  bool has_mdp_misr_crc() const { return at<86>().valid(); }
+  ::protozero::ConstBytes mdp_misr_crc() const { return at<86>().as_bytes(); }
+  bool has_mdp_perf_set_qos_luts() const { return at<87>().valid(); }
+  ::protozero::ConstBytes mdp_perf_set_qos_luts() const { return at<87>().as_bytes(); }
+  bool has_mdp_trace_counter() const { return at<88>().valid(); }
+  ::protozero::ConstBytes mdp_trace_counter() const { return at<88>().as_bytes(); }
+  bool has_mdp_cmd_release_bw() const { return at<89>().valid(); }
+  ::protozero::ConstBytes mdp_cmd_release_bw() const { return at<89>().as_bytes(); }
+  bool has_mdp_mixer_update() const { return at<90>().valid(); }
+  ::protozero::ConstBytes mdp_mixer_update() const { return at<90>().as_bytes(); }
+  bool has_mdp_perf_set_wm_levels() const { return at<91>().valid(); }
+  ::protozero::ConstBytes mdp_perf_set_wm_levels() const { return at<91>().as_bytes(); }
+  bool has_mdp_video_underrun_done() const { return at<92>().valid(); }
+  ::protozero::ConstBytes mdp_video_underrun_done() const { return at<92>().as_bytes(); }
+  bool has_mdp_cmd_wait_pingpong() const { return at<93>().valid(); }
+  ::protozero::ConstBytes mdp_cmd_wait_pingpong() const { return at<93>().as_bytes(); }
+  bool has_mdp_perf_prefill_calc() const { return at<94>().valid(); }
+  ::protozero::ConstBytes mdp_perf_prefill_calc() const { return at<94>().as_bytes(); }
+  bool has_mdp_perf_update_bus() const { return at<95>().valid(); }
+  ::protozero::ConstBytes mdp_perf_update_bus() const { return at<95>().as_bytes(); }
+  bool has_rotator_bw_ao_as_context() const { return at<96>().valid(); }
+  ::protozero::ConstBytes rotator_bw_ao_as_context() const { return at<96>().as_bytes(); }
+  bool has_mm_filemap_add_to_page_cache() const { return at<97>().valid(); }
+  ::protozero::ConstBytes mm_filemap_add_to_page_cache() const { return at<97>().as_bytes(); }
+  bool has_mm_filemap_delete_from_page_cache() const { return at<98>().valid(); }
+  ::protozero::ConstBytes mm_filemap_delete_from_page_cache() const { return at<98>().as_bytes(); }
+  bool has_mm_compaction_begin() const { return at<99>().valid(); }
+  ::protozero::ConstBytes mm_compaction_begin() const { return at<99>().as_bytes(); }
+  bool has_mm_compaction_defer_compaction() const { return at<100>().valid(); }
+  ::protozero::ConstBytes mm_compaction_defer_compaction() const { return at<100>().as_bytes(); }
+  bool has_mm_compaction_deferred() const { return at<101>().valid(); }
+  ::protozero::ConstBytes mm_compaction_deferred() const { return at<101>().as_bytes(); }
+  bool has_mm_compaction_defer_reset() const { return at<102>().valid(); }
+  ::protozero::ConstBytes mm_compaction_defer_reset() const { return at<102>().as_bytes(); }
+  bool has_mm_compaction_end() const { return at<103>().valid(); }
+  ::protozero::ConstBytes mm_compaction_end() const { return at<103>().as_bytes(); }
+  bool has_mm_compaction_finished() const { return at<104>().valid(); }
+  ::protozero::ConstBytes mm_compaction_finished() const { return at<104>().as_bytes(); }
+  bool has_mm_compaction_isolate_freepages() const { return at<105>().valid(); }
+  ::protozero::ConstBytes mm_compaction_isolate_freepages() const { return at<105>().as_bytes(); }
+  bool has_mm_compaction_isolate_migratepages() const { return at<106>().valid(); }
+  ::protozero::ConstBytes mm_compaction_isolate_migratepages() const { return at<106>().as_bytes(); }
+  bool has_mm_compaction_kcompactd_sleep() const { return at<107>().valid(); }
+  ::protozero::ConstBytes mm_compaction_kcompactd_sleep() const { return at<107>().as_bytes(); }
+  bool has_mm_compaction_kcompactd_wake() const { return at<108>().valid(); }
+  ::protozero::ConstBytes mm_compaction_kcompactd_wake() const { return at<108>().as_bytes(); }
+  bool has_mm_compaction_migratepages() const { return at<109>().valid(); }
+  ::protozero::ConstBytes mm_compaction_migratepages() const { return at<109>().as_bytes(); }
+  bool has_mm_compaction_suitable() const { return at<110>().valid(); }
+  ::protozero::ConstBytes mm_compaction_suitable() const { return at<110>().as_bytes(); }
+  bool has_mm_compaction_try_to_compact_pages() const { return at<111>().valid(); }
+  ::protozero::ConstBytes mm_compaction_try_to_compact_pages() const { return at<111>().as_bytes(); }
+  bool has_mm_compaction_wakeup_kcompactd() const { return at<112>().valid(); }
+  ::protozero::ConstBytes mm_compaction_wakeup_kcompactd() const { return at<112>().as_bytes(); }
+  bool has_suspend_resume() const { return at<113>().valid(); }
+  ::protozero::ConstBytes suspend_resume() const { return at<113>().as_bytes(); }
+  bool has_sched_wakeup_new() const { return at<114>().valid(); }
+  ::protozero::ConstBytes sched_wakeup_new() const { return at<114>().as_bytes(); }
+  bool has_block_bio_backmerge() const { return at<115>().valid(); }
+  ::protozero::ConstBytes block_bio_backmerge() const { return at<115>().as_bytes(); }
+  bool has_block_bio_bounce() const { return at<116>().valid(); }
+  ::protozero::ConstBytes block_bio_bounce() const { return at<116>().as_bytes(); }
+  bool has_block_bio_complete() const { return at<117>().valid(); }
+  ::protozero::ConstBytes block_bio_complete() const { return at<117>().as_bytes(); }
+  bool has_block_bio_frontmerge() const { return at<118>().valid(); }
+  ::protozero::ConstBytes block_bio_frontmerge() const { return at<118>().as_bytes(); }
+  bool has_block_bio_queue() const { return at<119>().valid(); }
+  ::protozero::ConstBytes block_bio_queue() const { return at<119>().as_bytes(); }
+  bool has_block_bio_remap() const { return at<120>().valid(); }
+  ::protozero::ConstBytes block_bio_remap() const { return at<120>().as_bytes(); }
+  bool has_block_dirty_buffer() const { return at<121>().valid(); }
+  ::protozero::ConstBytes block_dirty_buffer() const { return at<121>().as_bytes(); }
+  bool has_block_getrq() const { return at<122>().valid(); }
+  ::protozero::ConstBytes block_getrq() const { return at<122>().as_bytes(); }
+  bool has_block_plug() const { return at<123>().valid(); }
+  ::protozero::ConstBytes block_plug() const { return at<123>().as_bytes(); }
+  bool has_block_rq_abort() const { return at<124>().valid(); }
+  ::protozero::ConstBytes block_rq_abort() const { return at<124>().as_bytes(); }
+  bool has_block_rq_complete() const { return at<125>().valid(); }
+  ::protozero::ConstBytes block_rq_complete() const { return at<125>().as_bytes(); }
+  bool has_block_rq_insert() const { return at<126>().valid(); }
+  ::protozero::ConstBytes block_rq_insert() const { return at<126>().as_bytes(); }
+  bool has_block_rq_remap() const { return at<128>().valid(); }
+  ::protozero::ConstBytes block_rq_remap() const { return at<128>().as_bytes(); }
+  bool has_block_rq_requeue() const { return at<129>().valid(); }
+  ::protozero::ConstBytes block_rq_requeue() const { return at<129>().as_bytes(); }
+  bool has_block_sleeprq() const { return at<130>().valid(); }
+  ::protozero::ConstBytes block_sleeprq() const { return at<130>().as_bytes(); }
+  bool has_block_split() const { return at<131>().valid(); }
+  ::protozero::ConstBytes block_split() const { return at<131>().as_bytes(); }
+  bool has_block_touch_buffer() const { return at<132>().valid(); }
+  ::protozero::ConstBytes block_touch_buffer() const { return at<132>().as_bytes(); }
+  bool has_block_unplug() const { return at<133>().valid(); }
+  ::protozero::ConstBytes block_unplug() const { return at<133>().as_bytes(); }
+  bool has_ext4_alloc_da_blocks() const { return at<134>().valid(); }
+  ::protozero::ConstBytes ext4_alloc_da_blocks() const { return at<134>().as_bytes(); }
+  bool has_ext4_allocate_blocks() const { return at<135>().valid(); }
+  ::protozero::ConstBytes ext4_allocate_blocks() const { return at<135>().as_bytes(); }
+  bool has_ext4_allocate_inode() const { return at<136>().valid(); }
+  ::protozero::ConstBytes ext4_allocate_inode() const { return at<136>().as_bytes(); }
+  bool has_ext4_begin_ordered_truncate() const { return at<137>().valid(); }
+  ::protozero::ConstBytes ext4_begin_ordered_truncate() const { return at<137>().as_bytes(); }
+  bool has_ext4_collapse_range() const { return at<138>().valid(); }
+  ::protozero::ConstBytes ext4_collapse_range() const { return at<138>().as_bytes(); }
+  bool has_ext4_da_release_space() const { return at<139>().valid(); }
+  ::protozero::ConstBytes ext4_da_release_space() const { return at<139>().as_bytes(); }
+  bool has_ext4_da_reserve_space() const { return at<140>().valid(); }
+  ::protozero::ConstBytes ext4_da_reserve_space() const { return at<140>().as_bytes(); }
+  bool has_ext4_da_update_reserve_space() const { return at<141>().valid(); }
+  ::protozero::ConstBytes ext4_da_update_reserve_space() const { return at<141>().as_bytes(); }
+  bool has_ext4_da_write_pages() const { return at<142>().valid(); }
+  ::protozero::ConstBytes ext4_da_write_pages() const { return at<142>().as_bytes(); }
+  bool has_ext4_da_write_pages_extent() const { return at<143>().valid(); }
+  ::protozero::ConstBytes ext4_da_write_pages_extent() const { return at<143>().as_bytes(); }
+  bool has_ext4_direct_io_enter() const { return at<144>().valid(); }
+  ::protozero::ConstBytes ext4_direct_io_enter() const { return at<144>().as_bytes(); }
+  bool has_ext4_direct_io_exit() const { return at<145>().valid(); }
+  ::protozero::ConstBytes ext4_direct_io_exit() const { return at<145>().as_bytes(); }
+  bool has_ext4_discard_blocks() const { return at<146>().valid(); }
+  ::protozero::ConstBytes ext4_discard_blocks() const { return at<146>().as_bytes(); }
+  bool has_ext4_discard_preallocations() const { return at<147>().valid(); }
+  ::protozero::ConstBytes ext4_discard_preallocations() const { return at<147>().as_bytes(); }
+  bool has_ext4_drop_inode() const { return at<148>().valid(); }
+  ::protozero::ConstBytes ext4_drop_inode() const { return at<148>().as_bytes(); }
+  bool has_ext4_es_cache_extent() const { return at<149>().valid(); }
+  ::protozero::ConstBytes ext4_es_cache_extent() const { return at<149>().as_bytes(); }
+  bool has_ext4_es_find_delayed_extent_range_enter() const { return at<150>().valid(); }
+  ::protozero::ConstBytes ext4_es_find_delayed_extent_range_enter() const { return at<150>().as_bytes(); }
+  bool has_ext4_es_find_delayed_extent_range_exit() const { return at<151>().valid(); }
+  ::protozero::ConstBytes ext4_es_find_delayed_extent_range_exit() const { return at<151>().as_bytes(); }
+  bool has_ext4_es_insert_extent() const { return at<152>().valid(); }
+  ::protozero::ConstBytes ext4_es_insert_extent() const { return at<152>().as_bytes(); }
+  bool has_ext4_es_lookup_extent_enter() const { return at<153>().valid(); }
+  ::protozero::ConstBytes ext4_es_lookup_extent_enter() const { return at<153>().as_bytes(); }
+  bool has_ext4_es_lookup_extent_exit() const { return at<154>().valid(); }
+  ::protozero::ConstBytes ext4_es_lookup_extent_exit() const { return at<154>().as_bytes(); }
+  bool has_ext4_es_remove_extent() const { return at<155>().valid(); }
+  ::protozero::ConstBytes ext4_es_remove_extent() const { return at<155>().as_bytes(); }
+  bool has_ext4_es_shrink() const { return at<156>().valid(); }
+  ::protozero::ConstBytes ext4_es_shrink() const { return at<156>().as_bytes(); }
+  bool has_ext4_es_shrink_count() const { return at<157>().valid(); }
+  ::protozero::ConstBytes ext4_es_shrink_count() const { return at<157>().as_bytes(); }
+  bool has_ext4_es_shrink_scan_enter() const { return at<158>().valid(); }
+  ::protozero::ConstBytes ext4_es_shrink_scan_enter() const { return at<158>().as_bytes(); }
+  bool has_ext4_es_shrink_scan_exit() const { return at<159>().valid(); }
+  ::protozero::ConstBytes ext4_es_shrink_scan_exit() const { return at<159>().as_bytes(); }
+  bool has_ext4_evict_inode() const { return at<160>().valid(); }
+  ::protozero::ConstBytes ext4_evict_inode() const { return at<160>().as_bytes(); }
+  bool has_ext4_ext_convert_to_initialized_enter() const { return at<161>().valid(); }
+  ::protozero::ConstBytes ext4_ext_convert_to_initialized_enter() const { return at<161>().as_bytes(); }
+  bool has_ext4_ext_convert_to_initialized_fastpath() const { return at<162>().valid(); }
+  ::protozero::ConstBytes ext4_ext_convert_to_initialized_fastpath() const { return at<162>().as_bytes(); }
+  bool has_ext4_ext_handle_unwritten_extents() const { return at<163>().valid(); }
+  ::protozero::ConstBytes ext4_ext_handle_unwritten_extents() const { return at<163>().as_bytes(); }
+  bool has_ext4_ext_in_cache() const { return at<164>().valid(); }
+  ::protozero::ConstBytes ext4_ext_in_cache() const { return at<164>().as_bytes(); }
+  bool has_ext4_ext_load_extent() const { return at<165>().valid(); }
+  ::protozero::ConstBytes ext4_ext_load_extent() const { return at<165>().as_bytes(); }
+  bool has_ext4_ext_map_blocks_enter() const { return at<166>().valid(); }
+  ::protozero::ConstBytes ext4_ext_map_blocks_enter() const { return at<166>().as_bytes(); }
+  bool has_ext4_ext_map_blocks_exit() const { return at<167>().valid(); }
+  ::protozero::ConstBytes ext4_ext_map_blocks_exit() const { return at<167>().as_bytes(); }
+  bool has_ext4_ext_put_in_cache() const { return at<168>().valid(); }
+  ::protozero::ConstBytes ext4_ext_put_in_cache() const { return at<168>().as_bytes(); }
+  bool has_ext4_ext_remove_space() const { return at<169>().valid(); }
+  ::protozero::ConstBytes ext4_ext_remove_space() const { return at<169>().as_bytes(); }
+  bool has_ext4_ext_remove_space_done() const { return at<170>().valid(); }
+  ::protozero::ConstBytes ext4_ext_remove_space_done() const { return at<170>().as_bytes(); }
+  bool has_ext4_ext_rm_idx() const { return at<171>().valid(); }
+  ::protozero::ConstBytes ext4_ext_rm_idx() const { return at<171>().as_bytes(); }
+  bool has_ext4_ext_rm_leaf() const { return at<172>().valid(); }
+  ::protozero::ConstBytes ext4_ext_rm_leaf() const { return at<172>().as_bytes(); }
+  bool has_ext4_ext_show_extent() const { return at<173>().valid(); }
+  ::protozero::ConstBytes ext4_ext_show_extent() const { return at<173>().as_bytes(); }
+  bool has_ext4_fallocate_enter() const { return at<174>().valid(); }
+  ::protozero::ConstBytes ext4_fallocate_enter() const { return at<174>().as_bytes(); }
+  bool has_ext4_fallocate_exit() const { return at<175>().valid(); }
+  ::protozero::ConstBytes ext4_fallocate_exit() const { return at<175>().as_bytes(); }
+  bool has_ext4_find_delalloc_range() const { return at<176>().valid(); }
+  ::protozero::ConstBytes ext4_find_delalloc_range() const { return at<176>().as_bytes(); }
+  bool has_ext4_forget() const { return at<177>().valid(); }
+  ::protozero::ConstBytes ext4_forget() const { return at<177>().as_bytes(); }
+  bool has_ext4_free_blocks() const { return at<178>().valid(); }
+  ::protozero::ConstBytes ext4_free_blocks() const { return at<178>().as_bytes(); }
+  bool has_ext4_free_inode() const { return at<179>().valid(); }
+  ::protozero::ConstBytes ext4_free_inode() const { return at<179>().as_bytes(); }
+  bool has_ext4_get_implied_cluster_alloc_exit() const { return at<180>().valid(); }
+  ::protozero::ConstBytes ext4_get_implied_cluster_alloc_exit() const { return at<180>().as_bytes(); }
+  bool has_ext4_get_reserved_cluster_alloc() const { return at<181>().valid(); }
+  ::protozero::ConstBytes ext4_get_reserved_cluster_alloc() const { return at<181>().as_bytes(); }
+  bool has_ext4_ind_map_blocks_enter() const { return at<182>().valid(); }
+  ::protozero::ConstBytes ext4_ind_map_blocks_enter() const { return at<182>().as_bytes(); }
+  bool has_ext4_ind_map_blocks_exit() const { return at<183>().valid(); }
+  ::protozero::ConstBytes ext4_ind_map_blocks_exit() const { return at<183>().as_bytes(); }
+  bool has_ext4_insert_range() const { return at<184>().valid(); }
+  ::protozero::ConstBytes ext4_insert_range() const { return at<184>().as_bytes(); }
+  bool has_ext4_invalidatepage() const { return at<185>().valid(); }
+  ::protozero::ConstBytes ext4_invalidatepage() const { return at<185>().as_bytes(); }
+  bool has_ext4_journal_start() const { return at<186>().valid(); }
+  ::protozero::ConstBytes ext4_journal_start() const { return at<186>().as_bytes(); }
+  bool has_ext4_journal_start_reserved() const { return at<187>().valid(); }
+  ::protozero::ConstBytes ext4_journal_start_reserved() const { return at<187>().as_bytes(); }
+  bool has_ext4_journalled_invalidatepage() const { return at<188>().valid(); }
+  ::protozero::ConstBytes ext4_journalled_invalidatepage() const { return at<188>().as_bytes(); }
+  bool has_ext4_journalled_write_end() const { return at<189>().valid(); }
+  ::protozero::ConstBytes ext4_journalled_write_end() const { return at<189>().as_bytes(); }
+  bool has_ext4_load_inode() const { return at<190>().valid(); }
+  ::protozero::ConstBytes ext4_load_inode() const { return at<190>().as_bytes(); }
+  bool has_ext4_load_inode_bitmap() const { return at<191>().valid(); }
+  ::protozero::ConstBytes ext4_load_inode_bitmap() const { return at<191>().as_bytes(); }
+  bool has_ext4_mark_inode_dirty() const { return at<192>().valid(); }
+  ::protozero::ConstBytes ext4_mark_inode_dirty() const { return at<192>().as_bytes(); }
+  bool has_ext4_mb_bitmap_load() const { return at<193>().valid(); }
+  ::protozero::ConstBytes ext4_mb_bitmap_load() const { return at<193>().as_bytes(); }
+  bool has_ext4_mb_buddy_bitmap_load() const { return at<194>().valid(); }
+  ::protozero::ConstBytes ext4_mb_buddy_bitmap_load() const { return at<194>().as_bytes(); }
+  bool has_ext4_mb_discard_preallocations() const { return at<195>().valid(); }
+  ::protozero::ConstBytes ext4_mb_discard_preallocations() const { return at<195>().as_bytes(); }
+  bool has_ext4_mb_new_group_pa() const { return at<196>().valid(); }
+  ::protozero::ConstBytes ext4_mb_new_group_pa() const { return at<196>().as_bytes(); }
+  bool has_ext4_mb_new_inode_pa() const { return at<197>().valid(); }
+  ::protozero::ConstBytes ext4_mb_new_inode_pa() const { return at<197>().as_bytes(); }
+  bool has_ext4_mb_release_group_pa() const { return at<198>().valid(); }
+  ::protozero::ConstBytes ext4_mb_release_group_pa() const { return at<198>().as_bytes(); }
+  bool has_ext4_mb_release_inode_pa() const { return at<199>().valid(); }
+  ::protozero::ConstBytes ext4_mb_release_inode_pa() const { return at<199>().as_bytes(); }
+  bool has_ext4_mballoc_alloc() const { return at<200>().valid(); }
+  ::protozero::ConstBytes ext4_mballoc_alloc() const { return at<200>().as_bytes(); }
+  bool has_ext4_mballoc_discard() const { return at<201>().valid(); }
+  ::protozero::ConstBytes ext4_mballoc_discard() const { return at<201>().as_bytes(); }
+  bool has_ext4_mballoc_free() const { return at<202>().valid(); }
+  ::protozero::ConstBytes ext4_mballoc_free() const { return at<202>().as_bytes(); }
+  bool has_ext4_mballoc_prealloc() const { return at<203>().valid(); }
+  ::protozero::ConstBytes ext4_mballoc_prealloc() const { return at<203>().as_bytes(); }
+  bool has_ext4_other_inode_update_time() const { return at<204>().valid(); }
+  ::protozero::ConstBytes ext4_other_inode_update_time() const { return at<204>().as_bytes(); }
+  bool has_ext4_punch_hole() const { return at<205>().valid(); }
+  ::protozero::ConstBytes ext4_punch_hole() const { return at<205>().as_bytes(); }
+  bool has_ext4_read_block_bitmap_load() const { return at<206>().valid(); }
+  ::protozero::ConstBytes ext4_read_block_bitmap_load() const { return at<206>().as_bytes(); }
+  bool has_ext4_readpage() const { return at<207>().valid(); }
+  ::protozero::ConstBytes ext4_readpage() const { return at<207>().as_bytes(); }
+  bool has_ext4_releasepage() const { return at<208>().valid(); }
+  ::protozero::ConstBytes ext4_releasepage() const { return at<208>().as_bytes(); }
+  bool has_ext4_remove_blocks() const { return at<209>().valid(); }
+  ::protozero::ConstBytes ext4_remove_blocks() const { return at<209>().as_bytes(); }
+  bool has_ext4_request_blocks() const { return at<210>().valid(); }
+  ::protozero::ConstBytes ext4_request_blocks() const { return at<210>().as_bytes(); }
+  bool has_ext4_request_inode() const { return at<211>().valid(); }
+  ::protozero::ConstBytes ext4_request_inode() const { return at<211>().as_bytes(); }
+  bool has_ext4_sync_fs() const { return at<212>().valid(); }
+  ::protozero::ConstBytes ext4_sync_fs() const { return at<212>().as_bytes(); }
+  bool has_ext4_trim_all_free() const { return at<213>().valid(); }
+  ::protozero::ConstBytes ext4_trim_all_free() const { return at<213>().as_bytes(); }
+  bool has_ext4_trim_extent() const { return at<214>().valid(); }
+  ::protozero::ConstBytes ext4_trim_extent() const { return at<214>().as_bytes(); }
+  bool has_ext4_truncate_enter() const { return at<215>().valid(); }
+  ::protozero::ConstBytes ext4_truncate_enter() const { return at<215>().as_bytes(); }
+  bool has_ext4_truncate_exit() const { return at<216>().valid(); }
+  ::protozero::ConstBytes ext4_truncate_exit() const { return at<216>().as_bytes(); }
+  bool has_ext4_unlink_enter() const { return at<217>().valid(); }
+  ::protozero::ConstBytes ext4_unlink_enter() const { return at<217>().as_bytes(); }
+  bool has_ext4_unlink_exit() const { return at<218>().valid(); }
+  ::protozero::ConstBytes ext4_unlink_exit() const { return at<218>().as_bytes(); }
+  bool has_ext4_write_begin() const { return at<219>().valid(); }
+  ::protozero::ConstBytes ext4_write_begin() const { return at<219>().as_bytes(); }
+  bool has_ext4_write_end() const { return at<230>().valid(); }
+  ::protozero::ConstBytes ext4_write_end() const { return at<230>().as_bytes(); }
+  bool has_ext4_writepage() const { return at<231>().valid(); }
+  ::protozero::ConstBytes ext4_writepage() const { return at<231>().as_bytes(); }
+  bool has_ext4_writepages() const { return at<232>().valid(); }
+  ::protozero::ConstBytes ext4_writepages() const { return at<232>().as_bytes(); }
+  bool has_ext4_writepages_result() const { return at<233>().valid(); }
+  ::protozero::ConstBytes ext4_writepages_result() const { return at<233>().as_bytes(); }
+  bool has_ext4_zero_range() const { return at<234>().valid(); }
+  ::protozero::ConstBytes ext4_zero_range() const { return at<234>().as_bytes(); }
+  bool has_task_newtask() const { return at<235>().valid(); }
+  ::protozero::ConstBytes task_newtask() const { return at<235>().as_bytes(); }
+  bool has_task_rename() const { return at<236>().valid(); }
+  ::protozero::ConstBytes task_rename() const { return at<236>().as_bytes(); }
+  bool has_sched_process_exec() const { return at<237>().valid(); }
+  ::protozero::ConstBytes sched_process_exec() const { return at<237>().as_bytes(); }
+  bool has_sched_process_exit() const { return at<238>().valid(); }
+  ::protozero::ConstBytes sched_process_exit() const { return at<238>().as_bytes(); }
+  bool has_sched_process_fork() const { return at<239>().valid(); }
+  ::protozero::ConstBytes sched_process_fork() const { return at<239>().as_bytes(); }
+  bool has_sched_process_free() const { return at<240>().valid(); }
+  ::protozero::ConstBytes sched_process_free() const { return at<240>().as_bytes(); }
+  bool has_sched_process_hang() const { return at<241>().valid(); }
+  ::protozero::ConstBytes sched_process_hang() const { return at<241>().as_bytes(); }
+  bool has_sched_process_wait() const { return at<242>().valid(); }
+  ::protozero::ConstBytes sched_process_wait() const { return at<242>().as_bytes(); }
+  bool has_f2fs_do_submit_bio() const { return at<243>().valid(); }
+  ::protozero::ConstBytes f2fs_do_submit_bio() const { return at<243>().as_bytes(); }
+  bool has_f2fs_evict_inode() const { return at<244>().valid(); }
+  ::protozero::ConstBytes f2fs_evict_inode() const { return at<244>().as_bytes(); }
+  bool has_f2fs_fallocate() const { return at<245>().valid(); }
+  ::protozero::ConstBytes f2fs_fallocate() const { return at<245>().as_bytes(); }
+  bool has_f2fs_get_data_block() const { return at<246>().valid(); }
+  ::protozero::ConstBytes f2fs_get_data_block() const { return at<246>().as_bytes(); }
+  bool has_f2fs_get_victim() const { return at<247>().valid(); }
+  ::protozero::ConstBytes f2fs_get_victim() const { return at<247>().as_bytes(); }
+  bool has_f2fs_iget() const { return at<248>().valid(); }
+  ::protozero::ConstBytes f2fs_iget() const { return at<248>().as_bytes(); }
+  bool has_f2fs_iget_exit() const { return at<249>().valid(); }
+  ::protozero::ConstBytes f2fs_iget_exit() const { return at<249>().as_bytes(); }
+  bool has_f2fs_new_inode() const { return at<250>().valid(); }
+  ::protozero::ConstBytes f2fs_new_inode() const { return at<250>().as_bytes(); }
+  bool has_f2fs_readpage() const { return at<251>().valid(); }
+  ::protozero::ConstBytes f2fs_readpage() const { return at<251>().as_bytes(); }
+  bool has_f2fs_reserve_new_block() const { return at<252>().valid(); }
+  ::protozero::ConstBytes f2fs_reserve_new_block() const { return at<252>().as_bytes(); }
+  bool has_f2fs_set_page_dirty() const { return at<253>().valid(); }
+  ::protozero::ConstBytes f2fs_set_page_dirty() const { return at<253>().as_bytes(); }
+  bool has_f2fs_submit_write_page() const { return at<254>().valid(); }
+  ::protozero::ConstBytes f2fs_submit_write_page() const { return at<254>().as_bytes(); }
+  bool has_f2fs_sync_file_enter() const { return at<255>().valid(); }
+  ::protozero::ConstBytes f2fs_sync_file_enter() const { return at<255>().as_bytes(); }
+  bool has_f2fs_sync_file_exit() const { return at<256>().valid(); }
+  ::protozero::ConstBytes f2fs_sync_file_exit() const { return at<256>().as_bytes(); }
+  bool has_f2fs_sync_fs() const { return at<257>().valid(); }
+  ::protozero::ConstBytes f2fs_sync_fs() const { return at<257>().as_bytes(); }
+  bool has_f2fs_truncate() const { return at<258>().valid(); }
+  ::protozero::ConstBytes f2fs_truncate() const { return at<258>().as_bytes(); }
+  bool has_f2fs_truncate_blocks_enter() const { return at<259>().valid(); }
+  ::protozero::ConstBytes f2fs_truncate_blocks_enter() const { return at<259>().as_bytes(); }
+  bool has_f2fs_truncate_blocks_exit() const { return at<260>().valid(); }
+  ::protozero::ConstBytes f2fs_truncate_blocks_exit() const { return at<260>().as_bytes(); }
+  bool has_f2fs_truncate_data_blocks_range() const { return at<261>().valid(); }
+  ::protozero::ConstBytes f2fs_truncate_data_blocks_range() const { return at<261>().as_bytes(); }
+  bool has_f2fs_truncate_inode_blocks_enter() const { return at<262>().valid(); }
+  ::protozero::ConstBytes f2fs_truncate_inode_blocks_enter() const { return at<262>().as_bytes(); }
+  bool has_f2fs_truncate_inode_blocks_exit() const { return at<263>().valid(); }
+  ::protozero::ConstBytes f2fs_truncate_inode_blocks_exit() const { return at<263>().as_bytes(); }
+  bool has_f2fs_truncate_node() const { return at<264>().valid(); }
+  ::protozero::ConstBytes f2fs_truncate_node() const { return at<264>().as_bytes(); }
+  bool has_f2fs_truncate_nodes_enter() const { return at<265>().valid(); }
+  ::protozero::ConstBytes f2fs_truncate_nodes_enter() const { return at<265>().as_bytes(); }
+  bool has_f2fs_truncate_nodes_exit() const { return at<266>().valid(); }
+  ::protozero::ConstBytes f2fs_truncate_nodes_exit() const { return at<266>().as_bytes(); }
+  bool has_f2fs_truncate_partial_nodes() const { return at<267>().valid(); }
+  ::protozero::ConstBytes f2fs_truncate_partial_nodes() const { return at<267>().as_bytes(); }
+  bool has_f2fs_unlink_enter() const { return at<268>().valid(); }
+  ::protozero::ConstBytes f2fs_unlink_enter() const { return at<268>().as_bytes(); }
+  bool has_f2fs_unlink_exit() const { return at<269>().valid(); }
+  ::protozero::ConstBytes f2fs_unlink_exit() const { return at<269>().as_bytes(); }
+  bool has_f2fs_vm_page_mkwrite() const { return at<270>().valid(); }
+  ::protozero::ConstBytes f2fs_vm_page_mkwrite() const { return at<270>().as_bytes(); }
+  bool has_f2fs_write_begin() const { return at<271>().valid(); }
+  ::protozero::ConstBytes f2fs_write_begin() const { return at<271>().as_bytes(); }
+  bool has_f2fs_write_checkpoint() const { return at<272>().valid(); }
+  ::protozero::ConstBytes f2fs_write_checkpoint() const { return at<272>().as_bytes(); }
+  bool has_f2fs_write_end() const { return at<273>().valid(); }
+  ::protozero::ConstBytes f2fs_write_end() const { return at<273>().as_bytes(); }
+  bool has_alloc_pages_iommu_end() const { return at<274>().valid(); }
+  ::protozero::ConstBytes alloc_pages_iommu_end() const { return at<274>().as_bytes(); }
+  bool has_alloc_pages_iommu_fail() const { return at<275>().valid(); }
+  ::protozero::ConstBytes alloc_pages_iommu_fail() const { return at<275>().as_bytes(); }
+  bool has_alloc_pages_iommu_start() const { return at<276>().valid(); }
+  ::protozero::ConstBytes alloc_pages_iommu_start() const { return at<276>().as_bytes(); }
+  bool has_alloc_pages_sys_end() const { return at<277>().valid(); }
+  ::protozero::ConstBytes alloc_pages_sys_end() const { return at<277>().as_bytes(); }
+  bool has_alloc_pages_sys_fail() const { return at<278>().valid(); }
+  ::protozero::ConstBytes alloc_pages_sys_fail() const { return at<278>().as_bytes(); }
+  bool has_alloc_pages_sys_start() const { return at<279>().valid(); }
+  ::protozero::ConstBytes alloc_pages_sys_start() const { return at<279>().as_bytes(); }
+  bool has_dma_alloc_contiguous_retry() const { return at<280>().valid(); }
+  ::protozero::ConstBytes dma_alloc_contiguous_retry() const { return at<280>().as_bytes(); }
+  bool has_iommu_map_range() const { return at<281>().valid(); }
+  ::protozero::ConstBytes iommu_map_range() const { return at<281>().as_bytes(); }
+  bool has_iommu_sec_ptbl_map_range_end() const { return at<282>().valid(); }
+  ::protozero::ConstBytes iommu_sec_ptbl_map_range_end() const { return at<282>().as_bytes(); }
+  bool has_iommu_sec_ptbl_map_range_start() const { return at<283>().valid(); }
+  ::protozero::ConstBytes iommu_sec_ptbl_map_range_start() const { return at<283>().as_bytes(); }
+  bool has_ion_alloc_buffer_end() const { return at<284>().valid(); }
+  ::protozero::ConstBytes ion_alloc_buffer_end() const { return at<284>().as_bytes(); }
+  bool has_ion_alloc_buffer_fail() const { return at<285>().valid(); }
+  ::protozero::ConstBytes ion_alloc_buffer_fail() const { return at<285>().as_bytes(); }
+  bool has_ion_alloc_buffer_fallback() const { return at<286>().valid(); }
+  ::protozero::ConstBytes ion_alloc_buffer_fallback() const { return at<286>().as_bytes(); }
+  bool has_ion_alloc_buffer_start() const { return at<287>().valid(); }
+  ::protozero::ConstBytes ion_alloc_buffer_start() const { return at<287>().as_bytes(); }
+  bool has_ion_cp_alloc_retry() const { return at<288>().valid(); }
+  ::protozero::ConstBytes ion_cp_alloc_retry() const { return at<288>().as_bytes(); }
+  bool has_ion_cp_secure_buffer_end() const { return at<289>().valid(); }
+  ::protozero::ConstBytes ion_cp_secure_buffer_end() const { return at<289>().as_bytes(); }
+  bool has_ion_cp_secure_buffer_start() const { return at<290>().valid(); }
+  ::protozero::ConstBytes ion_cp_secure_buffer_start() const { return at<290>().as_bytes(); }
+  bool has_ion_prefetching() const { return at<291>().valid(); }
+  ::protozero::ConstBytes ion_prefetching() const { return at<291>().as_bytes(); }
+  bool has_ion_secure_cma_add_to_pool_end() const { return at<292>().valid(); }
+  ::protozero::ConstBytes ion_secure_cma_add_to_pool_end() const { return at<292>().as_bytes(); }
+  bool has_ion_secure_cma_add_to_pool_start() const { return at<293>().valid(); }
+  ::protozero::ConstBytes ion_secure_cma_add_to_pool_start() const { return at<293>().as_bytes(); }
+  bool has_ion_secure_cma_allocate_end() const { return at<294>().valid(); }
+  ::protozero::ConstBytes ion_secure_cma_allocate_end() const { return at<294>().as_bytes(); }
+  bool has_ion_secure_cma_allocate_start() const { return at<295>().valid(); }
+  ::protozero::ConstBytes ion_secure_cma_allocate_start() const { return at<295>().as_bytes(); }
+  bool has_ion_secure_cma_shrink_pool_end() const { return at<296>().valid(); }
+  ::protozero::ConstBytes ion_secure_cma_shrink_pool_end() const { return at<296>().as_bytes(); }
+  bool has_ion_secure_cma_shrink_pool_start() const { return at<297>().valid(); }
+  ::protozero::ConstBytes ion_secure_cma_shrink_pool_start() const { return at<297>().as_bytes(); }
+  bool has_kfree() const { return at<298>().valid(); }
+  ::protozero::ConstBytes kfree() const { return at<298>().as_bytes(); }
+  bool has_kmalloc() const { return at<299>().valid(); }
+  ::protozero::ConstBytes kmalloc() const { return at<299>().as_bytes(); }
+  bool has_kmalloc_node() const { return at<300>().valid(); }
+  ::protozero::ConstBytes kmalloc_node() const { return at<300>().as_bytes(); }
+  bool has_kmem_cache_alloc() const { return at<301>().valid(); }
+  ::protozero::ConstBytes kmem_cache_alloc() const { return at<301>().as_bytes(); }
+  bool has_kmem_cache_alloc_node() const { return at<302>().valid(); }
+  ::protozero::ConstBytes kmem_cache_alloc_node() const { return at<302>().as_bytes(); }
+  bool has_kmem_cache_free() const { return at<303>().valid(); }
+  ::protozero::ConstBytes kmem_cache_free() const { return at<303>().as_bytes(); }
+  bool has_migrate_pages_end() const { return at<304>().valid(); }
+  ::protozero::ConstBytes migrate_pages_end() const { return at<304>().as_bytes(); }
+  bool has_migrate_pages_start() const { return at<305>().valid(); }
+  ::protozero::ConstBytes migrate_pages_start() const { return at<305>().as_bytes(); }
+  bool has_migrate_retry() const { return at<306>().valid(); }
+  ::protozero::ConstBytes migrate_retry() const { return at<306>().as_bytes(); }
+  bool has_mm_page_alloc() const { return at<307>().valid(); }
+  ::protozero::ConstBytes mm_page_alloc() const { return at<307>().as_bytes(); }
+  bool has_mm_page_alloc_extfrag() const { return at<308>().valid(); }
+  ::protozero::ConstBytes mm_page_alloc_extfrag() const { return at<308>().as_bytes(); }
+  bool has_mm_page_alloc_zone_locked() const { return at<309>().valid(); }
+  ::protozero::ConstBytes mm_page_alloc_zone_locked() const { return at<309>().as_bytes(); }
+  bool has_mm_page_free() const { return at<310>().valid(); }
+  ::protozero::ConstBytes mm_page_free() const { return at<310>().as_bytes(); }
+  bool has_mm_page_free_batched() const { return at<311>().valid(); }
+  ::protozero::ConstBytes mm_page_free_batched() const { return at<311>().as_bytes(); }
+  bool has_mm_page_pcpu_drain() const { return at<312>().valid(); }
+  ::protozero::ConstBytes mm_page_pcpu_drain() const { return at<312>().as_bytes(); }
+  bool has_rss_stat() const { return at<313>().valid(); }
+  ::protozero::ConstBytes rss_stat() const { return at<313>().as_bytes(); }
+  bool has_ion_heap_shrink() const { return at<314>().valid(); }
+  ::protozero::ConstBytes ion_heap_shrink() const { return at<314>().as_bytes(); }
+  bool has_ion_heap_grow() const { return at<315>().valid(); }
+  ::protozero::ConstBytes ion_heap_grow() const { return at<315>().as_bytes(); }
+  bool has_fence_init() const { return at<316>().valid(); }
+  ::protozero::ConstBytes fence_init() const { return at<316>().as_bytes(); }
+  bool has_fence_destroy() const { return at<317>().valid(); }
+  ::protozero::ConstBytes fence_destroy() const { return at<317>().as_bytes(); }
+  bool has_fence_enable_signal() const { return at<318>().valid(); }
+  ::protozero::ConstBytes fence_enable_signal() const { return at<318>().as_bytes(); }
+  bool has_fence_signaled() const { return at<319>().valid(); }
+  ::protozero::ConstBytes fence_signaled() const { return at<319>().as_bytes(); }
+  bool has_clk_enable() const { return at<320>().valid(); }
+  ::protozero::ConstBytes clk_enable() const { return at<320>().as_bytes(); }
+  bool has_clk_disable() const { return at<321>().valid(); }
+  ::protozero::ConstBytes clk_disable() const { return at<321>().as_bytes(); }
+  bool has_clk_set_rate() const { return at<322>().valid(); }
+  ::protozero::ConstBytes clk_set_rate() const { return at<322>().as_bytes(); }
+  bool has_binder_transaction_alloc_buf() const { return at<323>().valid(); }
+  ::protozero::ConstBytes binder_transaction_alloc_buf() const { return at<323>().as_bytes(); }
+  bool has_signal_deliver() const { return at<324>().valid(); }
+  ::protozero::ConstBytes signal_deliver() const { return at<324>().as_bytes(); }
+  bool has_signal_generate() const { return at<325>().valid(); }
+  ::protozero::ConstBytes signal_generate() const { return at<325>().as_bytes(); }
+  bool has_oom_score_adj_update() const { return at<326>().valid(); }
+  ::protozero::ConstBytes oom_score_adj_update() const { return at<326>().as_bytes(); }
+  bool has_generic() const { return at<327>().valid(); }
+  ::protozero::ConstBytes generic() const { return at<327>().as_bytes(); }
+  bool has_mm_event_record() const { return at<328>().valid(); }
+  ::protozero::ConstBytes mm_event_record() const { return at<328>().as_bytes(); }
+  bool has_sys_enter() const { return at<329>().valid(); }
+  ::protozero::ConstBytes sys_enter() const { return at<329>().as_bytes(); }
+  bool has_sys_exit() const { return at<330>().valid(); }
+  ::protozero::ConstBytes sys_exit() const { return at<330>().as_bytes(); }
+  bool has_zero() const { return at<331>().valid(); }
+  ::protozero::ConstBytes zero() const { return at<331>().as_bytes(); }
+  bool has_gpu_frequency() const { return at<332>().valid(); }
+  ::protozero::ConstBytes gpu_frequency() const { return at<332>().as_bytes(); }
+  bool has_sde_tracing_mark_write() const { return at<333>().valid(); }
+  ::protozero::ConstBytes sde_tracing_mark_write() const { return at<333>().as_bytes(); }
+  bool has_mark_victim() const { return at<334>().valid(); }
+  ::protozero::ConstBytes mark_victim() const { return at<334>().as_bytes(); }
+  bool has_ion_stat() const { return at<335>().valid(); }
+  ::protozero::ConstBytes ion_stat() const { return at<335>().as_bytes(); }
+  bool has_ion_buffer_create() const { return at<336>().valid(); }
+  ::protozero::ConstBytes ion_buffer_create() const { return at<336>().as_bytes(); }
+  bool has_ion_buffer_destroy() const { return at<337>().valid(); }
+  ::protozero::ConstBytes ion_buffer_destroy() const { return at<337>().as_bytes(); }
+  bool has_scm_call_start() const { return at<338>().valid(); }
+  ::protozero::ConstBytes scm_call_start() const { return at<338>().as_bytes(); }
+  bool has_scm_call_end() const { return at<339>().valid(); }
+  ::protozero::ConstBytes scm_call_end() const { return at<339>().as_bytes(); }
+  bool has_gpu_mem_total() const { return at<340>().valid(); }
+  ::protozero::ConstBytes gpu_mem_total() const { return at<340>().as_bytes(); }
+  bool has_thermal_temperature() const { return at<341>().valid(); }
+  ::protozero::ConstBytes thermal_temperature() const { return at<341>().as_bytes(); }
+  bool has_cdev_update() const { return at<342>().valid(); }
+  ::protozero::ConstBytes cdev_update() const { return at<342>().as_bytes(); }
+  bool has_cpuhp_exit() const { return at<343>().valid(); }
+  ::protozero::ConstBytes cpuhp_exit() const { return at<343>().as_bytes(); }
+  bool has_cpuhp_multi_enter() const { return at<344>().valid(); }
+  ::protozero::ConstBytes cpuhp_multi_enter() const { return at<344>().as_bytes(); }
+  bool has_cpuhp_enter() const { return at<345>().valid(); }
+  ::protozero::ConstBytes cpuhp_enter() const { return at<345>().as_bytes(); }
+  bool has_cpuhp_latency() const { return at<346>().valid(); }
+  ::protozero::ConstBytes cpuhp_latency() const { return at<346>().as_bytes(); }
+  bool has_fastrpc_dma_stat() const { return at<347>().valid(); }
+  ::protozero::ConstBytes fastrpc_dma_stat() const { return at<347>().as_bytes(); }
+  bool has_dpu_tracing_mark_write() const { return at<348>().valid(); }
+  ::protozero::ConstBytes dpu_tracing_mark_write() const { return at<348>().as_bytes(); }
+  bool has_g2d_tracing_mark_write() const { return at<349>().valid(); }
+  ::protozero::ConstBytes g2d_tracing_mark_write() const { return at<349>().as_bytes(); }
+  bool has_mali_tracing_mark_write() const { return at<350>().valid(); }
+  ::protozero::ConstBytes mali_tracing_mark_write() const { return at<350>().as_bytes(); }
+  bool has_dma_heap_stat() const { return at<351>().valid(); }
+  ::protozero::ConstBytes dma_heap_stat() const { return at<351>().as_bytes(); }
+  bool has_cpuhp_pause() const { return at<352>().valid(); }
+  ::protozero::ConstBytes cpuhp_pause() const { return at<352>().as_bytes(); }
+  bool has_sched_pi_setprio() const { return at<353>().valid(); }
+  ::protozero::ConstBytes sched_pi_setprio() const { return at<353>().as_bytes(); }
+  bool has_sde_sde_evtlog() const { return at<354>().valid(); }
+  ::protozero::ConstBytes sde_sde_evtlog() const { return at<354>().as_bytes(); }
+  bool has_sde_sde_perf_calc_crtc() const { return at<355>().valid(); }
+  ::protozero::ConstBytes sde_sde_perf_calc_crtc() const { return at<355>().as_bytes(); }
+  bool has_sde_sde_perf_crtc_update() const { return at<356>().valid(); }
+  ::protozero::ConstBytes sde_sde_perf_crtc_update() const { return at<356>().as_bytes(); }
+  bool has_sde_sde_perf_set_qos_luts() const { return at<357>().valid(); }
+  ::protozero::ConstBytes sde_sde_perf_set_qos_luts() const { return at<357>().as_bytes(); }
+  bool has_sde_sde_perf_update_bus() const { return at<358>().valid(); }
+  ::protozero::ConstBytes sde_sde_perf_update_bus() const { return at<358>().as_bytes(); }
+};
+
+class FtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = FtraceEvent_Decoder;
+  enum : int32_t {
+    kTimestampFieldNumber = 1,
+    kPidFieldNumber = 2,
+    kPrintFieldNumber = 3,
+    kSchedSwitchFieldNumber = 4,
+    kCpuFrequencyFieldNumber = 11,
+    kCpuFrequencyLimitsFieldNumber = 12,
+    kCpuIdleFieldNumber = 13,
+    kClockEnableFieldNumber = 14,
+    kClockDisableFieldNumber = 15,
+    kClockSetRateFieldNumber = 16,
+    kSchedWakeupFieldNumber = 17,
+    kSchedBlockedReasonFieldNumber = 18,
+    kSchedCpuHotplugFieldNumber = 19,
+    kSchedWakingFieldNumber = 20,
+    kIpiEntryFieldNumber = 21,
+    kIpiExitFieldNumber = 22,
+    kIpiRaiseFieldNumber = 23,
+    kSoftirqEntryFieldNumber = 24,
+    kSoftirqExitFieldNumber = 25,
+    kSoftirqRaiseFieldNumber = 26,
+    kI2cReadFieldNumber = 27,
+    kI2cWriteFieldNumber = 28,
+    kI2cResultFieldNumber = 29,
+    kI2cReplyFieldNumber = 30,
+    kSmbusReadFieldNumber = 31,
+    kSmbusWriteFieldNumber = 32,
+    kSmbusResultFieldNumber = 33,
+    kSmbusReplyFieldNumber = 34,
+    kLowmemoryKillFieldNumber = 35,
+    kIrqHandlerEntryFieldNumber = 36,
+    kIrqHandlerExitFieldNumber = 37,
+    kSyncPtFieldNumber = 38,
+    kSyncTimelineFieldNumber = 39,
+    kSyncWaitFieldNumber = 40,
+    kExt4DaWriteBeginFieldNumber = 41,
+    kExt4DaWriteEndFieldNumber = 42,
+    kExt4SyncFileEnterFieldNumber = 43,
+    kExt4SyncFileExitFieldNumber = 44,
+    kBlockRqIssueFieldNumber = 45,
+    kMmVmscanDirectReclaimBeginFieldNumber = 46,
+    kMmVmscanDirectReclaimEndFieldNumber = 47,
+    kMmVmscanKswapdWakeFieldNumber = 48,
+    kMmVmscanKswapdSleepFieldNumber = 49,
+    kBinderTransactionFieldNumber = 50,
+    kBinderTransactionReceivedFieldNumber = 51,
+    kBinderSetPriorityFieldNumber = 52,
+    kBinderLockFieldNumber = 53,
+    kBinderLockedFieldNumber = 54,
+    kBinderUnlockFieldNumber = 55,
+    kWorkqueueActivateWorkFieldNumber = 56,
+    kWorkqueueExecuteEndFieldNumber = 57,
+    kWorkqueueExecuteStartFieldNumber = 58,
+    kWorkqueueQueueWorkFieldNumber = 59,
+    kRegulatorDisableFieldNumber = 60,
+    kRegulatorDisableCompleteFieldNumber = 61,
+    kRegulatorEnableFieldNumber = 62,
+    kRegulatorEnableCompleteFieldNumber = 63,
+    kRegulatorEnableDelayFieldNumber = 64,
+    kRegulatorSetVoltageFieldNumber = 65,
+    kRegulatorSetVoltageCompleteFieldNumber = 66,
+    kCgroupAttachTaskFieldNumber = 67,
+    kCgroupMkdirFieldNumber = 68,
+    kCgroupRemountFieldNumber = 69,
+    kCgroupRmdirFieldNumber = 70,
+    kCgroupTransferTasksFieldNumber = 71,
+    kCgroupDestroyRootFieldNumber = 72,
+    kCgroupReleaseFieldNumber = 73,
+    kCgroupRenameFieldNumber = 74,
+    kCgroupSetupRootFieldNumber = 75,
+    kMdpCmdKickoffFieldNumber = 76,
+    kMdpCommitFieldNumber = 77,
+    kMdpPerfSetOtFieldNumber = 78,
+    kMdpSsppChangeFieldNumber = 79,
+    kTracingMarkWriteFieldNumber = 80,
+    kMdpCmdPingpongDoneFieldNumber = 81,
+    kMdpCompareBwFieldNumber = 82,
+    kMdpPerfSetPanicLutsFieldNumber = 83,
+    kMdpSsppSetFieldNumber = 84,
+    kMdpCmdReadptrDoneFieldNumber = 85,
+    kMdpMisrCrcFieldNumber = 86,
+    kMdpPerfSetQosLutsFieldNumber = 87,
+    kMdpTraceCounterFieldNumber = 88,
+    kMdpCmdReleaseBwFieldNumber = 89,
+    kMdpMixerUpdateFieldNumber = 90,
+    kMdpPerfSetWmLevelsFieldNumber = 91,
+    kMdpVideoUnderrunDoneFieldNumber = 92,
+    kMdpCmdWaitPingpongFieldNumber = 93,
+    kMdpPerfPrefillCalcFieldNumber = 94,
+    kMdpPerfUpdateBusFieldNumber = 95,
+    kRotatorBwAoAsContextFieldNumber = 96,
+    kMmFilemapAddToPageCacheFieldNumber = 97,
+    kMmFilemapDeleteFromPageCacheFieldNumber = 98,
+    kMmCompactionBeginFieldNumber = 99,
+    kMmCompactionDeferCompactionFieldNumber = 100,
+    kMmCompactionDeferredFieldNumber = 101,
+    kMmCompactionDeferResetFieldNumber = 102,
+    kMmCompactionEndFieldNumber = 103,
+    kMmCompactionFinishedFieldNumber = 104,
+    kMmCompactionIsolateFreepagesFieldNumber = 105,
+    kMmCompactionIsolateMigratepagesFieldNumber = 106,
+    kMmCompactionKcompactdSleepFieldNumber = 107,
+    kMmCompactionKcompactdWakeFieldNumber = 108,
+    kMmCompactionMigratepagesFieldNumber = 109,
+    kMmCompactionSuitableFieldNumber = 110,
+    kMmCompactionTryToCompactPagesFieldNumber = 111,
+    kMmCompactionWakeupKcompactdFieldNumber = 112,
+    kSuspendResumeFieldNumber = 113,
+    kSchedWakeupNewFieldNumber = 114,
+    kBlockBioBackmergeFieldNumber = 115,
+    kBlockBioBounceFieldNumber = 116,
+    kBlockBioCompleteFieldNumber = 117,
+    kBlockBioFrontmergeFieldNumber = 118,
+    kBlockBioQueueFieldNumber = 119,
+    kBlockBioRemapFieldNumber = 120,
+    kBlockDirtyBufferFieldNumber = 121,
+    kBlockGetrqFieldNumber = 122,
+    kBlockPlugFieldNumber = 123,
+    kBlockRqAbortFieldNumber = 124,
+    kBlockRqCompleteFieldNumber = 125,
+    kBlockRqInsertFieldNumber = 126,
+    kBlockRqRemapFieldNumber = 128,
+    kBlockRqRequeueFieldNumber = 129,
+    kBlockSleeprqFieldNumber = 130,
+    kBlockSplitFieldNumber = 131,
+    kBlockTouchBufferFieldNumber = 132,
+    kBlockUnplugFieldNumber = 133,
+    kExt4AllocDaBlocksFieldNumber = 134,
+    kExt4AllocateBlocksFieldNumber = 135,
+    kExt4AllocateInodeFieldNumber = 136,
+    kExt4BeginOrderedTruncateFieldNumber = 137,
+    kExt4CollapseRangeFieldNumber = 138,
+    kExt4DaReleaseSpaceFieldNumber = 139,
+    kExt4DaReserveSpaceFieldNumber = 140,
+    kExt4DaUpdateReserveSpaceFieldNumber = 141,
+    kExt4DaWritePagesFieldNumber = 142,
+    kExt4DaWritePagesExtentFieldNumber = 143,
+    kExt4DirectIOEnterFieldNumber = 144,
+    kExt4DirectIOExitFieldNumber = 145,
+    kExt4DiscardBlocksFieldNumber = 146,
+    kExt4DiscardPreallocationsFieldNumber = 147,
+    kExt4DropInodeFieldNumber = 148,
+    kExt4EsCacheExtentFieldNumber = 149,
+    kExt4EsFindDelayedExtentRangeEnterFieldNumber = 150,
+    kExt4EsFindDelayedExtentRangeExitFieldNumber = 151,
+    kExt4EsInsertExtentFieldNumber = 152,
+    kExt4EsLookupExtentEnterFieldNumber = 153,
+    kExt4EsLookupExtentExitFieldNumber = 154,
+    kExt4EsRemoveExtentFieldNumber = 155,
+    kExt4EsShrinkFieldNumber = 156,
+    kExt4EsShrinkCountFieldNumber = 157,
+    kExt4EsShrinkScanEnterFieldNumber = 158,
+    kExt4EsShrinkScanExitFieldNumber = 159,
+    kExt4EvictInodeFieldNumber = 160,
+    kExt4ExtConvertToInitializedEnterFieldNumber = 161,
+    kExt4ExtConvertToInitializedFastpathFieldNumber = 162,
+    kExt4ExtHandleUnwrittenExtentsFieldNumber = 163,
+    kExt4ExtInCacheFieldNumber = 164,
+    kExt4ExtLoadExtentFieldNumber = 165,
+    kExt4ExtMapBlocksEnterFieldNumber = 166,
+    kExt4ExtMapBlocksExitFieldNumber = 167,
+    kExt4ExtPutInCacheFieldNumber = 168,
+    kExt4ExtRemoveSpaceFieldNumber = 169,
+    kExt4ExtRemoveSpaceDoneFieldNumber = 170,
+    kExt4ExtRmIdxFieldNumber = 171,
+    kExt4ExtRmLeafFieldNumber = 172,
+    kExt4ExtShowExtentFieldNumber = 173,
+    kExt4FallocateEnterFieldNumber = 174,
+    kExt4FallocateExitFieldNumber = 175,
+    kExt4FindDelallocRangeFieldNumber = 176,
+    kExt4ForgetFieldNumber = 177,
+    kExt4FreeBlocksFieldNumber = 178,
+    kExt4FreeInodeFieldNumber = 179,
+    kExt4GetImpliedClusterAllocExitFieldNumber = 180,
+    kExt4GetReservedClusterAllocFieldNumber = 181,
+    kExt4IndMapBlocksEnterFieldNumber = 182,
+    kExt4IndMapBlocksExitFieldNumber = 183,
+    kExt4InsertRangeFieldNumber = 184,
+    kExt4InvalidatepageFieldNumber = 185,
+    kExt4JournalStartFieldNumber = 186,
+    kExt4JournalStartReservedFieldNumber = 187,
+    kExt4JournalledInvalidatepageFieldNumber = 188,
+    kExt4JournalledWriteEndFieldNumber = 189,
+    kExt4LoadInodeFieldNumber = 190,
+    kExt4LoadInodeBitmapFieldNumber = 191,
+    kExt4MarkInodeDirtyFieldNumber = 192,
+    kExt4MbBitmapLoadFieldNumber = 193,
+    kExt4MbBuddyBitmapLoadFieldNumber = 194,
+    kExt4MbDiscardPreallocationsFieldNumber = 195,
+    kExt4MbNewGroupPaFieldNumber = 196,
+    kExt4MbNewInodePaFieldNumber = 197,
+    kExt4MbReleaseGroupPaFieldNumber = 198,
+    kExt4MbReleaseInodePaFieldNumber = 199,
+    kExt4MballocAllocFieldNumber = 200,
+    kExt4MballocDiscardFieldNumber = 201,
+    kExt4MballocFreeFieldNumber = 202,
+    kExt4MballocPreallocFieldNumber = 203,
+    kExt4OtherInodeUpdateTimeFieldNumber = 204,
+    kExt4PunchHoleFieldNumber = 205,
+    kExt4ReadBlockBitmapLoadFieldNumber = 206,
+    kExt4ReadpageFieldNumber = 207,
+    kExt4ReleasepageFieldNumber = 208,
+    kExt4RemoveBlocksFieldNumber = 209,
+    kExt4RequestBlocksFieldNumber = 210,
+    kExt4RequestInodeFieldNumber = 211,
+    kExt4SyncFsFieldNumber = 212,
+    kExt4TrimAllFreeFieldNumber = 213,
+    kExt4TrimExtentFieldNumber = 214,
+    kExt4TruncateEnterFieldNumber = 215,
+    kExt4TruncateExitFieldNumber = 216,
+    kExt4UnlinkEnterFieldNumber = 217,
+    kExt4UnlinkExitFieldNumber = 218,
+    kExt4WriteBeginFieldNumber = 219,
+    kExt4WriteEndFieldNumber = 230,
+    kExt4WritepageFieldNumber = 231,
+    kExt4WritepagesFieldNumber = 232,
+    kExt4WritepagesResultFieldNumber = 233,
+    kExt4ZeroRangeFieldNumber = 234,
+    kTaskNewtaskFieldNumber = 235,
+    kTaskRenameFieldNumber = 236,
+    kSchedProcessExecFieldNumber = 237,
+    kSchedProcessExitFieldNumber = 238,
+    kSchedProcessForkFieldNumber = 239,
+    kSchedProcessFreeFieldNumber = 240,
+    kSchedProcessHangFieldNumber = 241,
+    kSchedProcessWaitFieldNumber = 242,
+    kF2fsDoSubmitBioFieldNumber = 243,
+    kF2fsEvictInodeFieldNumber = 244,
+    kF2fsFallocateFieldNumber = 245,
+    kF2fsGetDataBlockFieldNumber = 246,
+    kF2fsGetVictimFieldNumber = 247,
+    kF2fsIgetFieldNumber = 248,
+    kF2fsIgetExitFieldNumber = 249,
+    kF2fsNewInodeFieldNumber = 250,
+    kF2fsReadpageFieldNumber = 251,
+    kF2fsReserveNewBlockFieldNumber = 252,
+    kF2fsSetPageDirtyFieldNumber = 253,
+    kF2fsSubmitWritePageFieldNumber = 254,
+    kF2fsSyncFileEnterFieldNumber = 255,
+    kF2fsSyncFileExitFieldNumber = 256,
+    kF2fsSyncFsFieldNumber = 257,
+    kF2fsTruncateFieldNumber = 258,
+    kF2fsTruncateBlocksEnterFieldNumber = 259,
+    kF2fsTruncateBlocksExitFieldNumber = 260,
+    kF2fsTruncateDataBlocksRangeFieldNumber = 261,
+    kF2fsTruncateInodeBlocksEnterFieldNumber = 262,
+    kF2fsTruncateInodeBlocksExitFieldNumber = 263,
+    kF2fsTruncateNodeFieldNumber = 264,
+    kF2fsTruncateNodesEnterFieldNumber = 265,
+    kF2fsTruncateNodesExitFieldNumber = 266,
+    kF2fsTruncatePartialNodesFieldNumber = 267,
+    kF2fsUnlinkEnterFieldNumber = 268,
+    kF2fsUnlinkExitFieldNumber = 269,
+    kF2fsVmPageMkwriteFieldNumber = 270,
+    kF2fsWriteBeginFieldNumber = 271,
+    kF2fsWriteCheckpointFieldNumber = 272,
+    kF2fsWriteEndFieldNumber = 273,
+    kAllocPagesIommuEndFieldNumber = 274,
+    kAllocPagesIommuFailFieldNumber = 275,
+    kAllocPagesIommuStartFieldNumber = 276,
+    kAllocPagesSysEndFieldNumber = 277,
+    kAllocPagesSysFailFieldNumber = 278,
+    kAllocPagesSysStartFieldNumber = 279,
+    kDmaAllocContiguousRetryFieldNumber = 280,
+    kIommuMapRangeFieldNumber = 281,
+    kIommuSecPtblMapRangeEndFieldNumber = 282,
+    kIommuSecPtblMapRangeStartFieldNumber = 283,
+    kIonAllocBufferEndFieldNumber = 284,
+    kIonAllocBufferFailFieldNumber = 285,
+    kIonAllocBufferFallbackFieldNumber = 286,
+    kIonAllocBufferStartFieldNumber = 287,
+    kIonCpAllocRetryFieldNumber = 288,
+    kIonCpSecureBufferEndFieldNumber = 289,
+    kIonCpSecureBufferStartFieldNumber = 290,
+    kIonPrefetchingFieldNumber = 291,
+    kIonSecureCmaAddToPoolEndFieldNumber = 292,
+    kIonSecureCmaAddToPoolStartFieldNumber = 293,
+    kIonSecureCmaAllocateEndFieldNumber = 294,
+    kIonSecureCmaAllocateStartFieldNumber = 295,
+    kIonSecureCmaShrinkPoolEndFieldNumber = 296,
+    kIonSecureCmaShrinkPoolStartFieldNumber = 297,
+    kKfreeFieldNumber = 298,
+    kKmallocFieldNumber = 299,
+    kKmallocNodeFieldNumber = 300,
+    kKmemCacheAllocFieldNumber = 301,
+    kKmemCacheAllocNodeFieldNumber = 302,
+    kKmemCacheFreeFieldNumber = 303,
+    kMigratePagesEndFieldNumber = 304,
+    kMigratePagesStartFieldNumber = 305,
+    kMigrateRetryFieldNumber = 306,
+    kMmPageAllocFieldNumber = 307,
+    kMmPageAllocExtfragFieldNumber = 308,
+    kMmPageAllocZoneLockedFieldNumber = 309,
+    kMmPageFreeFieldNumber = 310,
+    kMmPageFreeBatchedFieldNumber = 311,
+    kMmPagePcpuDrainFieldNumber = 312,
+    kRssStatFieldNumber = 313,
+    kIonHeapShrinkFieldNumber = 314,
+    kIonHeapGrowFieldNumber = 315,
+    kFenceInitFieldNumber = 316,
+    kFenceDestroyFieldNumber = 317,
+    kFenceEnableSignalFieldNumber = 318,
+    kFenceSignaledFieldNumber = 319,
+    kClkEnableFieldNumber = 320,
+    kClkDisableFieldNumber = 321,
+    kClkSetRateFieldNumber = 322,
+    kBinderTransactionAllocBufFieldNumber = 323,
+    kSignalDeliverFieldNumber = 324,
+    kSignalGenerateFieldNumber = 325,
+    kOomScoreAdjUpdateFieldNumber = 326,
+    kGenericFieldNumber = 327,
+    kMmEventRecordFieldNumber = 328,
+    kSysEnterFieldNumber = 329,
+    kSysExitFieldNumber = 330,
+    kZeroFieldNumber = 331,
+    kGpuFrequencyFieldNumber = 332,
+    kSdeTracingMarkWriteFieldNumber = 333,
+    kMarkVictimFieldNumber = 334,
+    kIonStatFieldNumber = 335,
+    kIonBufferCreateFieldNumber = 336,
+    kIonBufferDestroyFieldNumber = 337,
+    kScmCallStartFieldNumber = 338,
+    kScmCallEndFieldNumber = 339,
+    kGpuMemTotalFieldNumber = 340,
+    kThermalTemperatureFieldNumber = 341,
+    kCdevUpdateFieldNumber = 342,
+    kCpuhpExitFieldNumber = 343,
+    kCpuhpMultiEnterFieldNumber = 344,
+    kCpuhpEnterFieldNumber = 345,
+    kCpuhpLatencyFieldNumber = 346,
+    kFastrpcDmaStatFieldNumber = 347,
+    kDpuTracingMarkWriteFieldNumber = 348,
+    kG2dTracingMarkWriteFieldNumber = 349,
+    kMaliTracingMarkWriteFieldNumber = 350,
+    kDmaHeapStatFieldNumber = 351,
+    kCpuhpPauseFieldNumber = 352,
+    kSchedPiSetprioFieldNumber = 353,
+    kSdeSdeEvtlogFieldNumber = 354,
+    kSdeSdePerfCalcCrtcFieldNumber = 355,
+    kSdeSdePerfCrtcUpdateFieldNumber = 356,
+    kSdeSdePerfSetQosLutsFieldNumber = 357,
+    kSdeSdePerfUpdateBusFieldNumber = 358,
+  };
+
+  using FieldMetadata_Timestamp =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Timestamp kTimestamp() { return {}; }
+  void set_timestamp(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Timestamp::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pid =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pid kPid() { return {}; }
+  void set_pid(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Print =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      PrintFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Print kPrint() { return {}; }
+  template <typename T = PrintFtraceEvent> T* set_print() {
+    return BeginNestedMessage<T>(3);
+  }
+
+
+  using FieldMetadata_SchedSwitch =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      SchedSwitchFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SchedSwitch kSchedSwitch() { return {}; }
+  template <typename T = SchedSwitchFtraceEvent> T* set_sched_switch() {
+    return BeginNestedMessage<T>(4);
+  }
+
+
+  using FieldMetadata_CpuFrequency =
+    ::protozero::proto_utils::FieldMetadata<
+      11,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      CpuFrequencyFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CpuFrequency kCpuFrequency() { return {}; }
+  template <typename T = CpuFrequencyFtraceEvent> T* set_cpu_frequency() {
+    return BeginNestedMessage<T>(11);
+  }
+
+
+  using FieldMetadata_CpuFrequencyLimits =
+    ::protozero::proto_utils::FieldMetadata<
+      12,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      CpuFrequencyLimitsFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CpuFrequencyLimits kCpuFrequencyLimits() { return {}; }
+  template <typename T = CpuFrequencyLimitsFtraceEvent> T* set_cpu_frequency_limits() {
+    return BeginNestedMessage<T>(12);
+  }
+
+
+  using FieldMetadata_CpuIdle =
+    ::protozero::proto_utils::FieldMetadata<
+      13,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      CpuIdleFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CpuIdle kCpuIdle() { return {}; }
+  template <typename T = CpuIdleFtraceEvent> T* set_cpu_idle() {
+    return BeginNestedMessage<T>(13);
+  }
+
+
+  using FieldMetadata_ClockEnable =
+    ::protozero::proto_utils::FieldMetadata<
+      14,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ClockEnableFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ClockEnable kClockEnable() { return {}; }
+  template <typename T = ClockEnableFtraceEvent> T* set_clock_enable() {
+    return BeginNestedMessage<T>(14);
+  }
+
+
+  using FieldMetadata_ClockDisable =
+    ::protozero::proto_utils::FieldMetadata<
+      15,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ClockDisableFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ClockDisable kClockDisable() { return {}; }
+  template <typename T = ClockDisableFtraceEvent> T* set_clock_disable() {
+    return BeginNestedMessage<T>(15);
+  }
+
+
+  using FieldMetadata_ClockSetRate =
+    ::protozero::proto_utils::FieldMetadata<
+      16,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ClockSetRateFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ClockSetRate kClockSetRate() { return {}; }
+  template <typename T = ClockSetRateFtraceEvent> T* set_clock_set_rate() {
+    return BeginNestedMessage<T>(16);
+  }
+
+
+  using FieldMetadata_SchedWakeup =
+    ::protozero::proto_utils::FieldMetadata<
+      17,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      SchedWakeupFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SchedWakeup kSchedWakeup() { return {}; }
+  template <typename T = SchedWakeupFtraceEvent> T* set_sched_wakeup() {
+    return BeginNestedMessage<T>(17);
+  }
+
+
+  using FieldMetadata_SchedBlockedReason =
+    ::protozero::proto_utils::FieldMetadata<
+      18,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      SchedBlockedReasonFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SchedBlockedReason kSchedBlockedReason() { return {}; }
+  template <typename T = SchedBlockedReasonFtraceEvent> T* set_sched_blocked_reason() {
+    return BeginNestedMessage<T>(18);
+  }
+
+
+  using FieldMetadata_SchedCpuHotplug =
+    ::protozero::proto_utils::FieldMetadata<
+      19,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      SchedCpuHotplugFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SchedCpuHotplug kSchedCpuHotplug() { return {}; }
+  template <typename T = SchedCpuHotplugFtraceEvent> T* set_sched_cpu_hotplug() {
+    return BeginNestedMessage<T>(19);
+  }
+
+
+  using FieldMetadata_SchedWaking =
+    ::protozero::proto_utils::FieldMetadata<
+      20,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      SchedWakingFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SchedWaking kSchedWaking() { return {}; }
+  template <typename T = SchedWakingFtraceEvent> T* set_sched_waking() {
+    return BeginNestedMessage<T>(20);
+  }
+
+
+  using FieldMetadata_IpiEntry =
+    ::protozero::proto_utils::FieldMetadata<
+      21,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      IpiEntryFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IpiEntry kIpiEntry() { return {}; }
+  template <typename T = IpiEntryFtraceEvent> T* set_ipi_entry() {
+    return BeginNestedMessage<T>(21);
+  }
+
+
+  using FieldMetadata_IpiExit =
+    ::protozero::proto_utils::FieldMetadata<
+      22,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      IpiExitFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IpiExit kIpiExit() { return {}; }
+  template <typename T = IpiExitFtraceEvent> T* set_ipi_exit() {
+    return BeginNestedMessage<T>(22);
+  }
+
+
+  using FieldMetadata_IpiRaise =
+    ::protozero::proto_utils::FieldMetadata<
+      23,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      IpiRaiseFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IpiRaise kIpiRaise() { return {}; }
+  template <typename T = IpiRaiseFtraceEvent> T* set_ipi_raise() {
+    return BeginNestedMessage<T>(23);
+  }
+
+
+  using FieldMetadata_SoftirqEntry =
+    ::protozero::proto_utils::FieldMetadata<
+      24,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      SoftirqEntryFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SoftirqEntry kSoftirqEntry() { return {}; }
+  template <typename T = SoftirqEntryFtraceEvent> T* set_softirq_entry() {
+    return BeginNestedMessage<T>(24);
+  }
+
+
+  using FieldMetadata_SoftirqExit =
+    ::protozero::proto_utils::FieldMetadata<
+      25,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      SoftirqExitFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SoftirqExit kSoftirqExit() { return {}; }
+  template <typename T = SoftirqExitFtraceEvent> T* set_softirq_exit() {
+    return BeginNestedMessage<T>(25);
+  }
+
+
+  using FieldMetadata_SoftirqRaise =
+    ::protozero::proto_utils::FieldMetadata<
+      26,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      SoftirqRaiseFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SoftirqRaise kSoftirqRaise() { return {}; }
+  template <typename T = SoftirqRaiseFtraceEvent> T* set_softirq_raise() {
+    return BeginNestedMessage<T>(26);
+  }
+
+
+  using FieldMetadata_I2cRead =
+    ::protozero::proto_utils::FieldMetadata<
+      27,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      I2cReadFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_I2cRead kI2cRead() { return {}; }
+  template <typename T = I2cReadFtraceEvent> T* set_i2c_read() {
+    return BeginNestedMessage<T>(27);
+  }
+
+
+  using FieldMetadata_I2cWrite =
+    ::protozero::proto_utils::FieldMetadata<
+      28,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      I2cWriteFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_I2cWrite kI2cWrite() { return {}; }
+  template <typename T = I2cWriteFtraceEvent> T* set_i2c_write() {
+    return BeginNestedMessage<T>(28);
+  }
+
+
+  using FieldMetadata_I2cResult =
+    ::protozero::proto_utils::FieldMetadata<
+      29,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      I2cResultFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_I2cResult kI2cResult() { return {}; }
+  template <typename T = I2cResultFtraceEvent> T* set_i2c_result() {
+    return BeginNestedMessage<T>(29);
+  }
+
+
+  using FieldMetadata_I2cReply =
+    ::protozero::proto_utils::FieldMetadata<
+      30,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      I2cReplyFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_I2cReply kI2cReply() { return {}; }
+  template <typename T = I2cReplyFtraceEvent> T* set_i2c_reply() {
+    return BeginNestedMessage<T>(30);
+  }
+
+
+  using FieldMetadata_SmbusRead =
+    ::protozero::proto_utils::FieldMetadata<
+      31,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      SmbusReadFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SmbusRead kSmbusRead() { return {}; }
+  template <typename T = SmbusReadFtraceEvent> T* set_smbus_read() {
+    return BeginNestedMessage<T>(31);
+  }
+
+
+  using FieldMetadata_SmbusWrite =
+    ::protozero::proto_utils::FieldMetadata<
+      32,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      SmbusWriteFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SmbusWrite kSmbusWrite() { return {}; }
+  template <typename T = SmbusWriteFtraceEvent> T* set_smbus_write() {
+    return BeginNestedMessage<T>(32);
+  }
+
+
+  using FieldMetadata_SmbusResult =
+    ::protozero::proto_utils::FieldMetadata<
+      33,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      SmbusResultFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SmbusResult kSmbusResult() { return {}; }
+  template <typename T = SmbusResultFtraceEvent> T* set_smbus_result() {
+    return BeginNestedMessage<T>(33);
+  }
+
+
+  using FieldMetadata_SmbusReply =
+    ::protozero::proto_utils::FieldMetadata<
+      34,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      SmbusReplyFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SmbusReply kSmbusReply() { return {}; }
+  template <typename T = SmbusReplyFtraceEvent> T* set_smbus_reply() {
+    return BeginNestedMessage<T>(34);
+  }
+
+
+  using FieldMetadata_LowmemoryKill =
+    ::protozero::proto_utils::FieldMetadata<
+      35,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      LowmemoryKillFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_LowmemoryKill kLowmemoryKill() { return {}; }
+  template <typename T = LowmemoryKillFtraceEvent> T* set_lowmemory_kill() {
+    return BeginNestedMessage<T>(35);
+  }
+
+
+  using FieldMetadata_IrqHandlerEntry =
+    ::protozero::proto_utils::FieldMetadata<
+      36,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      IrqHandlerEntryFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IrqHandlerEntry kIrqHandlerEntry() { return {}; }
+  template <typename T = IrqHandlerEntryFtraceEvent> T* set_irq_handler_entry() {
+    return BeginNestedMessage<T>(36);
+  }
+
+
+  using FieldMetadata_IrqHandlerExit =
+    ::protozero::proto_utils::FieldMetadata<
+      37,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      IrqHandlerExitFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IrqHandlerExit kIrqHandlerExit() { return {}; }
+  template <typename T = IrqHandlerExitFtraceEvent> T* set_irq_handler_exit() {
+    return BeginNestedMessage<T>(37);
+  }
+
+
+  using FieldMetadata_SyncPt =
+    ::protozero::proto_utils::FieldMetadata<
+      38,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      SyncPtFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SyncPt kSyncPt() { return {}; }
+  template <typename T = SyncPtFtraceEvent> T* set_sync_pt() {
+    return BeginNestedMessage<T>(38);
+  }
+
+
+  using FieldMetadata_SyncTimeline =
+    ::protozero::proto_utils::FieldMetadata<
+      39,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      SyncTimelineFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SyncTimeline kSyncTimeline() { return {}; }
+  template <typename T = SyncTimelineFtraceEvent> T* set_sync_timeline() {
+    return BeginNestedMessage<T>(39);
+  }
+
+
+  using FieldMetadata_SyncWait =
+    ::protozero::proto_utils::FieldMetadata<
+      40,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      SyncWaitFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SyncWait kSyncWait() { return {}; }
+  template <typename T = SyncWaitFtraceEvent> T* set_sync_wait() {
+    return BeginNestedMessage<T>(40);
+  }
+
+
+  using FieldMetadata_Ext4DaWriteBegin =
+    ::protozero::proto_utils::FieldMetadata<
+      41,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4DaWriteBeginFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4DaWriteBegin kExt4DaWriteBegin() { return {}; }
+  template <typename T = Ext4DaWriteBeginFtraceEvent> T* set_ext4_da_write_begin() {
+    return BeginNestedMessage<T>(41);
+  }
+
+
+  using FieldMetadata_Ext4DaWriteEnd =
+    ::protozero::proto_utils::FieldMetadata<
+      42,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4DaWriteEndFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4DaWriteEnd kExt4DaWriteEnd() { return {}; }
+  template <typename T = Ext4DaWriteEndFtraceEvent> T* set_ext4_da_write_end() {
+    return BeginNestedMessage<T>(42);
+  }
+
+
+  using FieldMetadata_Ext4SyncFileEnter =
+    ::protozero::proto_utils::FieldMetadata<
+      43,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4SyncFileEnterFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4SyncFileEnter kExt4SyncFileEnter() { return {}; }
+  template <typename T = Ext4SyncFileEnterFtraceEvent> T* set_ext4_sync_file_enter() {
+    return BeginNestedMessage<T>(43);
+  }
+
+
+  using FieldMetadata_Ext4SyncFileExit =
+    ::protozero::proto_utils::FieldMetadata<
+      44,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4SyncFileExitFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4SyncFileExit kExt4SyncFileExit() { return {}; }
+  template <typename T = Ext4SyncFileExitFtraceEvent> T* set_ext4_sync_file_exit() {
+    return BeginNestedMessage<T>(44);
+  }
+
+
+  using FieldMetadata_BlockRqIssue =
+    ::protozero::proto_utils::FieldMetadata<
+      45,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      BlockRqIssueFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BlockRqIssue kBlockRqIssue() { return {}; }
+  template <typename T = BlockRqIssueFtraceEvent> T* set_block_rq_issue() {
+    return BeginNestedMessage<T>(45);
+  }
+
+
+  using FieldMetadata_MmVmscanDirectReclaimBegin =
+    ::protozero::proto_utils::FieldMetadata<
+      46,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MmVmscanDirectReclaimBeginFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MmVmscanDirectReclaimBegin kMmVmscanDirectReclaimBegin() { return {}; }
+  template <typename T = MmVmscanDirectReclaimBeginFtraceEvent> T* set_mm_vmscan_direct_reclaim_begin() {
+    return BeginNestedMessage<T>(46);
+  }
+
+
+  using FieldMetadata_MmVmscanDirectReclaimEnd =
+    ::protozero::proto_utils::FieldMetadata<
+      47,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MmVmscanDirectReclaimEndFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MmVmscanDirectReclaimEnd kMmVmscanDirectReclaimEnd() { return {}; }
+  template <typename T = MmVmscanDirectReclaimEndFtraceEvent> T* set_mm_vmscan_direct_reclaim_end() {
+    return BeginNestedMessage<T>(47);
+  }
+
+
+  using FieldMetadata_MmVmscanKswapdWake =
+    ::protozero::proto_utils::FieldMetadata<
+      48,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MmVmscanKswapdWakeFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MmVmscanKswapdWake kMmVmscanKswapdWake() { return {}; }
+  template <typename T = MmVmscanKswapdWakeFtraceEvent> T* set_mm_vmscan_kswapd_wake() {
+    return BeginNestedMessage<T>(48);
+  }
+
+
+  using FieldMetadata_MmVmscanKswapdSleep =
+    ::protozero::proto_utils::FieldMetadata<
+      49,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MmVmscanKswapdSleepFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MmVmscanKswapdSleep kMmVmscanKswapdSleep() { return {}; }
+  template <typename T = MmVmscanKswapdSleepFtraceEvent> T* set_mm_vmscan_kswapd_sleep() {
+    return BeginNestedMessage<T>(49);
+  }
+
+
+  using FieldMetadata_BinderTransaction =
+    ::protozero::proto_utils::FieldMetadata<
+      50,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      BinderTransactionFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BinderTransaction kBinderTransaction() { return {}; }
+  template <typename T = BinderTransactionFtraceEvent> T* set_binder_transaction() {
+    return BeginNestedMessage<T>(50);
+  }
+
+
+  using FieldMetadata_BinderTransactionReceived =
+    ::protozero::proto_utils::FieldMetadata<
+      51,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      BinderTransactionReceivedFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BinderTransactionReceived kBinderTransactionReceived() { return {}; }
+  template <typename T = BinderTransactionReceivedFtraceEvent> T* set_binder_transaction_received() {
+    return BeginNestedMessage<T>(51);
+  }
+
+
+  using FieldMetadata_BinderSetPriority =
+    ::protozero::proto_utils::FieldMetadata<
+      52,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      BinderSetPriorityFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BinderSetPriority kBinderSetPriority() { return {}; }
+  template <typename T = BinderSetPriorityFtraceEvent> T* set_binder_set_priority() {
+    return BeginNestedMessage<T>(52);
+  }
+
+
+  using FieldMetadata_BinderLock =
+    ::protozero::proto_utils::FieldMetadata<
+      53,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      BinderLockFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BinderLock kBinderLock() { return {}; }
+  template <typename T = BinderLockFtraceEvent> T* set_binder_lock() {
+    return BeginNestedMessage<T>(53);
+  }
+
+
+  using FieldMetadata_BinderLocked =
+    ::protozero::proto_utils::FieldMetadata<
+      54,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      BinderLockedFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BinderLocked kBinderLocked() { return {}; }
+  template <typename T = BinderLockedFtraceEvent> T* set_binder_locked() {
+    return BeginNestedMessage<T>(54);
+  }
+
+
+  using FieldMetadata_BinderUnlock =
+    ::protozero::proto_utils::FieldMetadata<
+      55,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      BinderUnlockFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BinderUnlock kBinderUnlock() { return {}; }
+  template <typename T = BinderUnlockFtraceEvent> T* set_binder_unlock() {
+    return BeginNestedMessage<T>(55);
+  }
+
+
+  using FieldMetadata_WorkqueueActivateWork =
+    ::protozero::proto_utils::FieldMetadata<
+      56,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      WorkqueueActivateWorkFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_WorkqueueActivateWork kWorkqueueActivateWork() { return {}; }
+  template <typename T = WorkqueueActivateWorkFtraceEvent> T* set_workqueue_activate_work() {
+    return BeginNestedMessage<T>(56);
+  }
+
+
+  using FieldMetadata_WorkqueueExecuteEnd =
+    ::protozero::proto_utils::FieldMetadata<
+      57,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      WorkqueueExecuteEndFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_WorkqueueExecuteEnd kWorkqueueExecuteEnd() { return {}; }
+  template <typename T = WorkqueueExecuteEndFtraceEvent> T* set_workqueue_execute_end() {
+    return BeginNestedMessage<T>(57);
+  }
+
+
+  using FieldMetadata_WorkqueueExecuteStart =
+    ::protozero::proto_utils::FieldMetadata<
+      58,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      WorkqueueExecuteStartFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_WorkqueueExecuteStart kWorkqueueExecuteStart() { return {}; }
+  template <typename T = WorkqueueExecuteStartFtraceEvent> T* set_workqueue_execute_start() {
+    return BeginNestedMessage<T>(58);
+  }
+
+
+  using FieldMetadata_WorkqueueQueueWork =
+    ::protozero::proto_utils::FieldMetadata<
+      59,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      WorkqueueQueueWorkFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_WorkqueueQueueWork kWorkqueueQueueWork() { return {}; }
+  template <typename T = WorkqueueQueueWorkFtraceEvent> T* set_workqueue_queue_work() {
+    return BeginNestedMessage<T>(59);
+  }
+
+
+  using FieldMetadata_RegulatorDisable =
+    ::protozero::proto_utils::FieldMetadata<
+      60,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      RegulatorDisableFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_RegulatorDisable kRegulatorDisable() { return {}; }
+  template <typename T = RegulatorDisableFtraceEvent> T* set_regulator_disable() {
+    return BeginNestedMessage<T>(60);
+  }
+
+
+  using FieldMetadata_RegulatorDisableComplete =
+    ::protozero::proto_utils::FieldMetadata<
+      61,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      RegulatorDisableCompleteFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_RegulatorDisableComplete kRegulatorDisableComplete() { return {}; }
+  template <typename T = RegulatorDisableCompleteFtraceEvent> T* set_regulator_disable_complete() {
+    return BeginNestedMessage<T>(61);
+  }
+
+
+  using FieldMetadata_RegulatorEnable =
+    ::protozero::proto_utils::FieldMetadata<
+      62,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      RegulatorEnableFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_RegulatorEnable kRegulatorEnable() { return {}; }
+  template <typename T = RegulatorEnableFtraceEvent> T* set_regulator_enable() {
+    return BeginNestedMessage<T>(62);
+  }
+
+
+  using FieldMetadata_RegulatorEnableComplete =
+    ::protozero::proto_utils::FieldMetadata<
+      63,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      RegulatorEnableCompleteFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_RegulatorEnableComplete kRegulatorEnableComplete() { return {}; }
+  template <typename T = RegulatorEnableCompleteFtraceEvent> T* set_regulator_enable_complete() {
+    return BeginNestedMessage<T>(63);
+  }
+
+
+  using FieldMetadata_RegulatorEnableDelay =
+    ::protozero::proto_utils::FieldMetadata<
+      64,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      RegulatorEnableDelayFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_RegulatorEnableDelay kRegulatorEnableDelay() { return {}; }
+  template <typename T = RegulatorEnableDelayFtraceEvent> T* set_regulator_enable_delay() {
+    return BeginNestedMessage<T>(64);
+  }
+
+
+  using FieldMetadata_RegulatorSetVoltage =
+    ::protozero::proto_utils::FieldMetadata<
+      65,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      RegulatorSetVoltageFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_RegulatorSetVoltage kRegulatorSetVoltage() { return {}; }
+  template <typename T = RegulatorSetVoltageFtraceEvent> T* set_regulator_set_voltage() {
+    return BeginNestedMessage<T>(65);
+  }
+
+
+  using FieldMetadata_RegulatorSetVoltageComplete =
+    ::protozero::proto_utils::FieldMetadata<
+      66,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      RegulatorSetVoltageCompleteFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_RegulatorSetVoltageComplete kRegulatorSetVoltageComplete() { return {}; }
+  template <typename T = RegulatorSetVoltageCompleteFtraceEvent> T* set_regulator_set_voltage_complete() {
+    return BeginNestedMessage<T>(66);
+  }
+
+
+  using FieldMetadata_CgroupAttachTask =
+    ::protozero::proto_utils::FieldMetadata<
+      67,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      CgroupAttachTaskFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CgroupAttachTask kCgroupAttachTask() { return {}; }
+  template <typename T = CgroupAttachTaskFtraceEvent> T* set_cgroup_attach_task() {
+    return BeginNestedMessage<T>(67);
+  }
+
+
+  using FieldMetadata_CgroupMkdir =
+    ::protozero::proto_utils::FieldMetadata<
+      68,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      CgroupMkdirFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CgroupMkdir kCgroupMkdir() { return {}; }
+  template <typename T = CgroupMkdirFtraceEvent> T* set_cgroup_mkdir() {
+    return BeginNestedMessage<T>(68);
+  }
+
+
+  using FieldMetadata_CgroupRemount =
+    ::protozero::proto_utils::FieldMetadata<
+      69,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      CgroupRemountFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CgroupRemount kCgroupRemount() { return {}; }
+  template <typename T = CgroupRemountFtraceEvent> T* set_cgroup_remount() {
+    return BeginNestedMessage<T>(69);
+  }
+
+
+  using FieldMetadata_CgroupRmdir =
+    ::protozero::proto_utils::FieldMetadata<
+      70,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      CgroupRmdirFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CgroupRmdir kCgroupRmdir() { return {}; }
+  template <typename T = CgroupRmdirFtraceEvent> T* set_cgroup_rmdir() {
+    return BeginNestedMessage<T>(70);
+  }
+
+
+  using FieldMetadata_CgroupTransferTasks =
+    ::protozero::proto_utils::FieldMetadata<
+      71,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      CgroupTransferTasksFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CgroupTransferTasks kCgroupTransferTasks() { return {}; }
+  template <typename T = CgroupTransferTasksFtraceEvent> T* set_cgroup_transfer_tasks() {
+    return BeginNestedMessage<T>(71);
+  }
+
+
+  using FieldMetadata_CgroupDestroyRoot =
+    ::protozero::proto_utils::FieldMetadata<
+      72,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      CgroupDestroyRootFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CgroupDestroyRoot kCgroupDestroyRoot() { return {}; }
+  template <typename T = CgroupDestroyRootFtraceEvent> T* set_cgroup_destroy_root() {
+    return BeginNestedMessage<T>(72);
+  }
+
+
+  using FieldMetadata_CgroupRelease =
+    ::protozero::proto_utils::FieldMetadata<
+      73,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      CgroupReleaseFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CgroupRelease kCgroupRelease() { return {}; }
+  template <typename T = CgroupReleaseFtraceEvent> T* set_cgroup_release() {
+    return BeginNestedMessage<T>(73);
+  }
+
+
+  using FieldMetadata_CgroupRename =
+    ::protozero::proto_utils::FieldMetadata<
+      74,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      CgroupRenameFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CgroupRename kCgroupRename() { return {}; }
+  template <typename T = CgroupRenameFtraceEvent> T* set_cgroup_rename() {
+    return BeginNestedMessage<T>(74);
+  }
+
+
+  using FieldMetadata_CgroupSetupRoot =
+    ::protozero::proto_utils::FieldMetadata<
+      75,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      CgroupSetupRootFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CgroupSetupRoot kCgroupSetupRoot() { return {}; }
+  template <typename T = CgroupSetupRootFtraceEvent> T* set_cgroup_setup_root() {
+    return BeginNestedMessage<T>(75);
+  }
+
+
+  using FieldMetadata_MdpCmdKickoff =
+    ::protozero::proto_utils::FieldMetadata<
+      76,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MdpCmdKickoffFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MdpCmdKickoff kMdpCmdKickoff() { return {}; }
+  template <typename T = MdpCmdKickoffFtraceEvent> T* set_mdp_cmd_kickoff() {
+    return BeginNestedMessage<T>(76);
+  }
+
+
+  using FieldMetadata_MdpCommit =
+    ::protozero::proto_utils::FieldMetadata<
+      77,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MdpCommitFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MdpCommit kMdpCommit() { return {}; }
+  template <typename T = MdpCommitFtraceEvent> T* set_mdp_commit() {
+    return BeginNestedMessage<T>(77);
+  }
+
+
+  using FieldMetadata_MdpPerfSetOt =
+    ::protozero::proto_utils::FieldMetadata<
+      78,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MdpPerfSetOtFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MdpPerfSetOt kMdpPerfSetOt() { return {}; }
+  template <typename T = MdpPerfSetOtFtraceEvent> T* set_mdp_perf_set_ot() {
+    return BeginNestedMessage<T>(78);
+  }
+
+
+  using FieldMetadata_MdpSsppChange =
+    ::protozero::proto_utils::FieldMetadata<
+      79,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MdpSsppChangeFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MdpSsppChange kMdpSsppChange() { return {}; }
+  template <typename T = MdpSsppChangeFtraceEvent> T* set_mdp_sspp_change() {
+    return BeginNestedMessage<T>(79);
+  }
+
+
+  using FieldMetadata_TracingMarkWrite =
+    ::protozero::proto_utils::FieldMetadata<
+      80,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TracingMarkWriteFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TracingMarkWrite kTracingMarkWrite() { return {}; }
+  template <typename T = TracingMarkWriteFtraceEvent> T* set_tracing_mark_write() {
+    return BeginNestedMessage<T>(80);
+  }
+
+
+  using FieldMetadata_MdpCmdPingpongDone =
+    ::protozero::proto_utils::FieldMetadata<
+      81,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MdpCmdPingpongDoneFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MdpCmdPingpongDone kMdpCmdPingpongDone() { return {}; }
+  template <typename T = MdpCmdPingpongDoneFtraceEvent> T* set_mdp_cmd_pingpong_done() {
+    return BeginNestedMessage<T>(81);
+  }
+
+
+  using FieldMetadata_MdpCompareBw =
+    ::protozero::proto_utils::FieldMetadata<
+      82,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MdpCompareBwFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MdpCompareBw kMdpCompareBw() { return {}; }
+  template <typename T = MdpCompareBwFtraceEvent> T* set_mdp_compare_bw() {
+    return BeginNestedMessage<T>(82);
+  }
+
+
+  using FieldMetadata_MdpPerfSetPanicLuts =
+    ::protozero::proto_utils::FieldMetadata<
+      83,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MdpPerfSetPanicLutsFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MdpPerfSetPanicLuts kMdpPerfSetPanicLuts() { return {}; }
+  template <typename T = MdpPerfSetPanicLutsFtraceEvent> T* set_mdp_perf_set_panic_luts() {
+    return BeginNestedMessage<T>(83);
+  }
+
+
+  using FieldMetadata_MdpSsppSet =
+    ::protozero::proto_utils::FieldMetadata<
+      84,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MdpSsppSetFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MdpSsppSet kMdpSsppSet() { return {}; }
+  template <typename T = MdpSsppSetFtraceEvent> T* set_mdp_sspp_set() {
+    return BeginNestedMessage<T>(84);
+  }
+
+
+  using FieldMetadata_MdpCmdReadptrDone =
+    ::protozero::proto_utils::FieldMetadata<
+      85,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MdpCmdReadptrDoneFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MdpCmdReadptrDone kMdpCmdReadptrDone() { return {}; }
+  template <typename T = MdpCmdReadptrDoneFtraceEvent> T* set_mdp_cmd_readptr_done() {
+    return BeginNestedMessage<T>(85);
+  }
+
+
+  using FieldMetadata_MdpMisrCrc =
+    ::protozero::proto_utils::FieldMetadata<
+      86,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MdpMisrCrcFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MdpMisrCrc kMdpMisrCrc() { return {}; }
+  template <typename T = MdpMisrCrcFtraceEvent> T* set_mdp_misr_crc() {
+    return BeginNestedMessage<T>(86);
+  }
+
+
+  using FieldMetadata_MdpPerfSetQosLuts =
+    ::protozero::proto_utils::FieldMetadata<
+      87,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MdpPerfSetQosLutsFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MdpPerfSetQosLuts kMdpPerfSetQosLuts() { return {}; }
+  template <typename T = MdpPerfSetQosLutsFtraceEvent> T* set_mdp_perf_set_qos_luts() {
+    return BeginNestedMessage<T>(87);
+  }
+
+
+  using FieldMetadata_MdpTraceCounter =
+    ::protozero::proto_utils::FieldMetadata<
+      88,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MdpTraceCounterFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MdpTraceCounter kMdpTraceCounter() { return {}; }
+  template <typename T = MdpTraceCounterFtraceEvent> T* set_mdp_trace_counter() {
+    return BeginNestedMessage<T>(88);
+  }
+
+
+  using FieldMetadata_MdpCmdReleaseBw =
+    ::protozero::proto_utils::FieldMetadata<
+      89,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MdpCmdReleaseBwFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MdpCmdReleaseBw kMdpCmdReleaseBw() { return {}; }
+  template <typename T = MdpCmdReleaseBwFtraceEvent> T* set_mdp_cmd_release_bw() {
+    return BeginNestedMessage<T>(89);
+  }
+
+
+  using FieldMetadata_MdpMixerUpdate =
+    ::protozero::proto_utils::FieldMetadata<
+      90,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MdpMixerUpdateFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MdpMixerUpdate kMdpMixerUpdate() { return {}; }
+  template <typename T = MdpMixerUpdateFtraceEvent> T* set_mdp_mixer_update() {
+    return BeginNestedMessage<T>(90);
+  }
+
+
+  using FieldMetadata_MdpPerfSetWmLevels =
+    ::protozero::proto_utils::FieldMetadata<
+      91,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MdpPerfSetWmLevelsFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MdpPerfSetWmLevels kMdpPerfSetWmLevels() { return {}; }
+  template <typename T = MdpPerfSetWmLevelsFtraceEvent> T* set_mdp_perf_set_wm_levels() {
+    return BeginNestedMessage<T>(91);
+  }
+
+
+  using FieldMetadata_MdpVideoUnderrunDone =
+    ::protozero::proto_utils::FieldMetadata<
+      92,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MdpVideoUnderrunDoneFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MdpVideoUnderrunDone kMdpVideoUnderrunDone() { return {}; }
+  template <typename T = MdpVideoUnderrunDoneFtraceEvent> T* set_mdp_video_underrun_done() {
+    return BeginNestedMessage<T>(92);
+  }
+
+
+  using FieldMetadata_MdpCmdWaitPingpong =
+    ::protozero::proto_utils::FieldMetadata<
+      93,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MdpCmdWaitPingpongFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MdpCmdWaitPingpong kMdpCmdWaitPingpong() { return {}; }
+  template <typename T = MdpCmdWaitPingpongFtraceEvent> T* set_mdp_cmd_wait_pingpong() {
+    return BeginNestedMessage<T>(93);
+  }
+
+
+  using FieldMetadata_MdpPerfPrefillCalc =
+    ::protozero::proto_utils::FieldMetadata<
+      94,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MdpPerfPrefillCalcFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MdpPerfPrefillCalc kMdpPerfPrefillCalc() { return {}; }
+  template <typename T = MdpPerfPrefillCalcFtraceEvent> T* set_mdp_perf_prefill_calc() {
+    return BeginNestedMessage<T>(94);
+  }
+
+
+  using FieldMetadata_MdpPerfUpdateBus =
+    ::protozero::proto_utils::FieldMetadata<
+      95,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MdpPerfUpdateBusFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MdpPerfUpdateBus kMdpPerfUpdateBus() { return {}; }
+  template <typename T = MdpPerfUpdateBusFtraceEvent> T* set_mdp_perf_update_bus() {
+    return BeginNestedMessage<T>(95);
+  }
+
+
+  using FieldMetadata_RotatorBwAoAsContext =
+    ::protozero::proto_utils::FieldMetadata<
+      96,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      RotatorBwAoAsContextFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_RotatorBwAoAsContext kRotatorBwAoAsContext() { return {}; }
+  template <typename T = RotatorBwAoAsContextFtraceEvent> T* set_rotator_bw_ao_as_context() {
+    return BeginNestedMessage<T>(96);
+  }
+
+
+  using FieldMetadata_MmFilemapAddToPageCache =
+    ::protozero::proto_utils::FieldMetadata<
+      97,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MmFilemapAddToPageCacheFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MmFilemapAddToPageCache kMmFilemapAddToPageCache() { return {}; }
+  template <typename T = MmFilemapAddToPageCacheFtraceEvent> T* set_mm_filemap_add_to_page_cache() {
+    return BeginNestedMessage<T>(97);
+  }
+
+
+  using FieldMetadata_MmFilemapDeleteFromPageCache =
+    ::protozero::proto_utils::FieldMetadata<
+      98,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MmFilemapDeleteFromPageCacheFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MmFilemapDeleteFromPageCache kMmFilemapDeleteFromPageCache() { return {}; }
+  template <typename T = MmFilemapDeleteFromPageCacheFtraceEvent> T* set_mm_filemap_delete_from_page_cache() {
+    return BeginNestedMessage<T>(98);
+  }
+
+
+  using FieldMetadata_MmCompactionBegin =
+    ::protozero::proto_utils::FieldMetadata<
+      99,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MmCompactionBeginFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MmCompactionBegin kMmCompactionBegin() { return {}; }
+  template <typename T = MmCompactionBeginFtraceEvent> T* set_mm_compaction_begin() {
+    return BeginNestedMessage<T>(99);
+  }
+
+
+  using FieldMetadata_MmCompactionDeferCompaction =
+    ::protozero::proto_utils::FieldMetadata<
+      100,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MmCompactionDeferCompactionFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MmCompactionDeferCompaction kMmCompactionDeferCompaction() { return {}; }
+  template <typename T = MmCompactionDeferCompactionFtraceEvent> T* set_mm_compaction_defer_compaction() {
+    return BeginNestedMessage<T>(100);
+  }
+
+
+  using FieldMetadata_MmCompactionDeferred =
+    ::protozero::proto_utils::FieldMetadata<
+      101,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MmCompactionDeferredFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MmCompactionDeferred kMmCompactionDeferred() { return {}; }
+  template <typename T = MmCompactionDeferredFtraceEvent> T* set_mm_compaction_deferred() {
+    return BeginNestedMessage<T>(101);
+  }
+
+
+  using FieldMetadata_MmCompactionDeferReset =
+    ::protozero::proto_utils::FieldMetadata<
+      102,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MmCompactionDeferResetFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MmCompactionDeferReset kMmCompactionDeferReset() { return {}; }
+  template <typename T = MmCompactionDeferResetFtraceEvent> T* set_mm_compaction_defer_reset() {
+    return BeginNestedMessage<T>(102);
+  }
+
+
+  using FieldMetadata_MmCompactionEnd =
+    ::protozero::proto_utils::FieldMetadata<
+      103,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MmCompactionEndFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MmCompactionEnd kMmCompactionEnd() { return {}; }
+  template <typename T = MmCompactionEndFtraceEvent> T* set_mm_compaction_end() {
+    return BeginNestedMessage<T>(103);
+  }
+
+
+  using FieldMetadata_MmCompactionFinished =
+    ::protozero::proto_utils::FieldMetadata<
+      104,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MmCompactionFinishedFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MmCompactionFinished kMmCompactionFinished() { return {}; }
+  template <typename T = MmCompactionFinishedFtraceEvent> T* set_mm_compaction_finished() {
+    return BeginNestedMessage<T>(104);
+  }
+
+
+  using FieldMetadata_MmCompactionIsolateFreepages =
+    ::protozero::proto_utils::FieldMetadata<
+      105,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MmCompactionIsolateFreepagesFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MmCompactionIsolateFreepages kMmCompactionIsolateFreepages() { return {}; }
+  template <typename T = MmCompactionIsolateFreepagesFtraceEvent> T* set_mm_compaction_isolate_freepages() {
+    return BeginNestedMessage<T>(105);
+  }
+
+
+  using FieldMetadata_MmCompactionIsolateMigratepages =
+    ::protozero::proto_utils::FieldMetadata<
+      106,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MmCompactionIsolateMigratepagesFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MmCompactionIsolateMigratepages kMmCompactionIsolateMigratepages() { return {}; }
+  template <typename T = MmCompactionIsolateMigratepagesFtraceEvent> T* set_mm_compaction_isolate_migratepages() {
+    return BeginNestedMessage<T>(106);
+  }
+
+
+  using FieldMetadata_MmCompactionKcompactdSleep =
+    ::protozero::proto_utils::FieldMetadata<
+      107,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MmCompactionKcompactdSleepFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MmCompactionKcompactdSleep kMmCompactionKcompactdSleep() { return {}; }
+  template <typename T = MmCompactionKcompactdSleepFtraceEvent> T* set_mm_compaction_kcompactd_sleep() {
+    return BeginNestedMessage<T>(107);
+  }
+
+
+  using FieldMetadata_MmCompactionKcompactdWake =
+    ::protozero::proto_utils::FieldMetadata<
+      108,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MmCompactionKcompactdWakeFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MmCompactionKcompactdWake kMmCompactionKcompactdWake() { return {}; }
+  template <typename T = MmCompactionKcompactdWakeFtraceEvent> T* set_mm_compaction_kcompactd_wake() {
+    return BeginNestedMessage<T>(108);
+  }
+
+
+  using FieldMetadata_MmCompactionMigratepages =
+    ::protozero::proto_utils::FieldMetadata<
+      109,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MmCompactionMigratepagesFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MmCompactionMigratepages kMmCompactionMigratepages() { return {}; }
+  template <typename T = MmCompactionMigratepagesFtraceEvent> T* set_mm_compaction_migratepages() {
+    return BeginNestedMessage<T>(109);
+  }
+
+
+  using FieldMetadata_MmCompactionSuitable =
+    ::protozero::proto_utils::FieldMetadata<
+      110,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MmCompactionSuitableFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MmCompactionSuitable kMmCompactionSuitable() { return {}; }
+  template <typename T = MmCompactionSuitableFtraceEvent> T* set_mm_compaction_suitable() {
+    return BeginNestedMessage<T>(110);
+  }
+
+
+  using FieldMetadata_MmCompactionTryToCompactPages =
+    ::protozero::proto_utils::FieldMetadata<
+      111,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MmCompactionTryToCompactPagesFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MmCompactionTryToCompactPages kMmCompactionTryToCompactPages() { return {}; }
+  template <typename T = MmCompactionTryToCompactPagesFtraceEvent> T* set_mm_compaction_try_to_compact_pages() {
+    return BeginNestedMessage<T>(111);
+  }
+
+
+  using FieldMetadata_MmCompactionWakeupKcompactd =
+    ::protozero::proto_utils::FieldMetadata<
+      112,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MmCompactionWakeupKcompactdFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MmCompactionWakeupKcompactd kMmCompactionWakeupKcompactd() { return {}; }
+  template <typename T = MmCompactionWakeupKcompactdFtraceEvent> T* set_mm_compaction_wakeup_kcompactd() {
+    return BeginNestedMessage<T>(112);
+  }
+
+
+  using FieldMetadata_SuspendResume =
+    ::protozero::proto_utils::FieldMetadata<
+      113,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      SuspendResumeFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SuspendResume kSuspendResume() { return {}; }
+  template <typename T = SuspendResumeFtraceEvent> T* set_suspend_resume() {
+    return BeginNestedMessage<T>(113);
+  }
+
+
+  using FieldMetadata_SchedWakeupNew =
+    ::protozero::proto_utils::FieldMetadata<
+      114,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      SchedWakeupNewFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SchedWakeupNew kSchedWakeupNew() { return {}; }
+  template <typename T = SchedWakeupNewFtraceEvent> T* set_sched_wakeup_new() {
+    return BeginNestedMessage<T>(114);
+  }
+
+
+  using FieldMetadata_BlockBioBackmerge =
+    ::protozero::proto_utils::FieldMetadata<
+      115,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      BlockBioBackmergeFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BlockBioBackmerge kBlockBioBackmerge() { return {}; }
+  template <typename T = BlockBioBackmergeFtraceEvent> T* set_block_bio_backmerge() {
+    return BeginNestedMessage<T>(115);
+  }
+
+
+  using FieldMetadata_BlockBioBounce =
+    ::protozero::proto_utils::FieldMetadata<
+      116,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      BlockBioBounceFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BlockBioBounce kBlockBioBounce() { return {}; }
+  template <typename T = BlockBioBounceFtraceEvent> T* set_block_bio_bounce() {
+    return BeginNestedMessage<T>(116);
+  }
+
+
+  using FieldMetadata_BlockBioComplete =
+    ::protozero::proto_utils::FieldMetadata<
+      117,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      BlockBioCompleteFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BlockBioComplete kBlockBioComplete() { return {}; }
+  template <typename T = BlockBioCompleteFtraceEvent> T* set_block_bio_complete() {
+    return BeginNestedMessage<T>(117);
+  }
+
+
+  using FieldMetadata_BlockBioFrontmerge =
+    ::protozero::proto_utils::FieldMetadata<
+      118,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      BlockBioFrontmergeFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BlockBioFrontmerge kBlockBioFrontmerge() { return {}; }
+  template <typename T = BlockBioFrontmergeFtraceEvent> T* set_block_bio_frontmerge() {
+    return BeginNestedMessage<T>(118);
+  }
+
+
+  using FieldMetadata_BlockBioQueue =
+    ::protozero::proto_utils::FieldMetadata<
+      119,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      BlockBioQueueFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BlockBioQueue kBlockBioQueue() { return {}; }
+  template <typename T = BlockBioQueueFtraceEvent> T* set_block_bio_queue() {
+    return BeginNestedMessage<T>(119);
+  }
+
+
+  using FieldMetadata_BlockBioRemap =
+    ::protozero::proto_utils::FieldMetadata<
+      120,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      BlockBioRemapFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BlockBioRemap kBlockBioRemap() { return {}; }
+  template <typename T = BlockBioRemapFtraceEvent> T* set_block_bio_remap() {
+    return BeginNestedMessage<T>(120);
+  }
+
+
+  using FieldMetadata_BlockDirtyBuffer =
+    ::protozero::proto_utils::FieldMetadata<
+      121,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      BlockDirtyBufferFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BlockDirtyBuffer kBlockDirtyBuffer() { return {}; }
+  template <typename T = BlockDirtyBufferFtraceEvent> T* set_block_dirty_buffer() {
+    return BeginNestedMessage<T>(121);
+  }
+
+
+  using FieldMetadata_BlockGetrq =
+    ::protozero::proto_utils::FieldMetadata<
+      122,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      BlockGetrqFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BlockGetrq kBlockGetrq() { return {}; }
+  template <typename T = BlockGetrqFtraceEvent> T* set_block_getrq() {
+    return BeginNestedMessage<T>(122);
+  }
+
+
+  using FieldMetadata_BlockPlug =
+    ::protozero::proto_utils::FieldMetadata<
+      123,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      BlockPlugFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BlockPlug kBlockPlug() { return {}; }
+  template <typename T = BlockPlugFtraceEvent> T* set_block_plug() {
+    return BeginNestedMessage<T>(123);
+  }
+
+
+  using FieldMetadata_BlockRqAbort =
+    ::protozero::proto_utils::FieldMetadata<
+      124,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      BlockRqAbortFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BlockRqAbort kBlockRqAbort() { return {}; }
+  template <typename T = BlockRqAbortFtraceEvent> T* set_block_rq_abort() {
+    return BeginNestedMessage<T>(124);
+  }
+
+
+  using FieldMetadata_BlockRqComplete =
+    ::protozero::proto_utils::FieldMetadata<
+      125,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      BlockRqCompleteFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BlockRqComplete kBlockRqComplete() { return {}; }
+  template <typename T = BlockRqCompleteFtraceEvent> T* set_block_rq_complete() {
+    return BeginNestedMessage<T>(125);
+  }
+
+
+  using FieldMetadata_BlockRqInsert =
+    ::protozero::proto_utils::FieldMetadata<
+      126,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      BlockRqInsertFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BlockRqInsert kBlockRqInsert() { return {}; }
+  template <typename T = BlockRqInsertFtraceEvent> T* set_block_rq_insert() {
+    return BeginNestedMessage<T>(126);
+  }
+
+
+  using FieldMetadata_BlockRqRemap =
+    ::protozero::proto_utils::FieldMetadata<
+      128,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      BlockRqRemapFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BlockRqRemap kBlockRqRemap() { return {}; }
+  template <typename T = BlockRqRemapFtraceEvent> T* set_block_rq_remap() {
+    return BeginNestedMessage<T>(128);
+  }
+
+
+  using FieldMetadata_BlockRqRequeue =
+    ::protozero::proto_utils::FieldMetadata<
+      129,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      BlockRqRequeueFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BlockRqRequeue kBlockRqRequeue() { return {}; }
+  template <typename T = BlockRqRequeueFtraceEvent> T* set_block_rq_requeue() {
+    return BeginNestedMessage<T>(129);
+  }
+
+
+  using FieldMetadata_BlockSleeprq =
+    ::protozero::proto_utils::FieldMetadata<
+      130,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      BlockSleeprqFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BlockSleeprq kBlockSleeprq() { return {}; }
+  template <typename T = BlockSleeprqFtraceEvent> T* set_block_sleeprq() {
+    return BeginNestedMessage<T>(130);
+  }
+
+
+  using FieldMetadata_BlockSplit =
+    ::protozero::proto_utils::FieldMetadata<
+      131,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      BlockSplitFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BlockSplit kBlockSplit() { return {}; }
+  template <typename T = BlockSplitFtraceEvent> T* set_block_split() {
+    return BeginNestedMessage<T>(131);
+  }
+
+
+  using FieldMetadata_BlockTouchBuffer =
+    ::protozero::proto_utils::FieldMetadata<
+      132,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      BlockTouchBufferFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BlockTouchBuffer kBlockTouchBuffer() { return {}; }
+  template <typename T = BlockTouchBufferFtraceEvent> T* set_block_touch_buffer() {
+    return BeginNestedMessage<T>(132);
+  }
+
+
+  using FieldMetadata_BlockUnplug =
+    ::protozero::proto_utils::FieldMetadata<
+      133,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      BlockUnplugFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BlockUnplug kBlockUnplug() { return {}; }
+  template <typename T = BlockUnplugFtraceEvent> T* set_block_unplug() {
+    return BeginNestedMessage<T>(133);
+  }
+
+
+  using FieldMetadata_Ext4AllocDaBlocks =
+    ::protozero::proto_utils::FieldMetadata<
+      134,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4AllocDaBlocksFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4AllocDaBlocks kExt4AllocDaBlocks() { return {}; }
+  template <typename T = Ext4AllocDaBlocksFtraceEvent> T* set_ext4_alloc_da_blocks() {
+    return BeginNestedMessage<T>(134);
+  }
+
+
+  using FieldMetadata_Ext4AllocateBlocks =
+    ::protozero::proto_utils::FieldMetadata<
+      135,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4AllocateBlocksFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4AllocateBlocks kExt4AllocateBlocks() { return {}; }
+  template <typename T = Ext4AllocateBlocksFtraceEvent> T* set_ext4_allocate_blocks() {
+    return BeginNestedMessage<T>(135);
+  }
+
+
+  using FieldMetadata_Ext4AllocateInode =
+    ::protozero::proto_utils::FieldMetadata<
+      136,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4AllocateInodeFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4AllocateInode kExt4AllocateInode() { return {}; }
+  template <typename T = Ext4AllocateInodeFtraceEvent> T* set_ext4_allocate_inode() {
+    return BeginNestedMessage<T>(136);
+  }
+
+
+  using FieldMetadata_Ext4BeginOrderedTruncate =
+    ::protozero::proto_utils::FieldMetadata<
+      137,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4BeginOrderedTruncateFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4BeginOrderedTruncate kExt4BeginOrderedTruncate() { return {}; }
+  template <typename T = Ext4BeginOrderedTruncateFtraceEvent> T* set_ext4_begin_ordered_truncate() {
+    return BeginNestedMessage<T>(137);
+  }
+
+
+  using FieldMetadata_Ext4CollapseRange =
+    ::protozero::proto_utils::FieldMetadata<
+      138,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4CollapseRangeFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4CollapseRange kExt4CollapseRange() { return {}; }
+  template <typename T = Ext4CollapseRangeFtraceEvent> T* set_ext4_collapse_range() {
+    return BeginNestedMessage<T>(138);
+  }
+
+
+  using FieldMetadata_Ext4DaReleaseSpace =
+    ::protozero::proto_utils::FieldMetadata<
+      139,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4DaReleaseSpaceFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4DaReleaseSpace kExt4DaReleaseSpace() { return {}; }
+  template <typename T = Ext4DaReleaseSpaceFtraceEvent> T* set_ext4_da_release_space() {
+    return BeginNestedMessage<T>(139);
+  }
+
+
+  using FieldMetadata_Ext4DaReserveSpace =
+    ::protozero::proto_utils::FieldMetadata<
+      140,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4DaReserveSpaceFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4DaReserveSpace kExt4DaReserveSpace() { return {}; }
+  template <typename T = Ext4DaReserveSpaceFtraceEvent> T* set_ext4_da_reserve_space() {
+    return BeginNestedMessage<T>(140);
+  }
+
+
+  using FieldMetadata_Ext4DaUpdateReserveSpace =
+    ::protozero::proto_utils::FieldMetadata<
+      141,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4DaUpdateReserveSpaceFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4DaUpdateReserveSpace kExt4DaUpdateReserveSpace() { return {}; }
+  template <typename T = Ext4DaUpdateReserveSpaceFtraceEvent> T* set_ext4_da_update_reserve_space() {
+    return BeginNestedMessage<T>(141);
+  }
+
+
+  using FieldMetadata_Ext4DaWritePages =
+    ::protozero::proto_utils::FieldMetadata<
+      142,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4DaWritePagesFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4DaWritePages kExt4DaWritePages() { return {}; }
+  template <typename T = Ext4DaWritePagesFtraceEvent> T* set_ext4_da_write_pages() {
+    return BeginNestedMessage<T>(142);
+  }
+
+
+  using FieldMetadata_Ext4DaWritePagesExtent =
+    ::protozero::proto_utils::FieldMetadata<
+      143,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4DaWritePagesExtentFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4DaWritePagesExtent kExt4DaWritePagesExtent() { return {}; }
+  template <typename T = Ext4DaWritePagesExtentFtraceEvent> T* set_ext4_da_write_pages_extent() {
+    return BeginNestedMessage<T>(143);
+  }
+
+
+  using FieldMetadata_Ext4DirectIOEnter =
+    ::protozero::proto_utils::FieldMetadata<
+      144,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4DirectIOEnterFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4DirectIOEnter kExt4DirectIOEnter() { return {}; }
+  template <typename T = Ext4DirectIOEnterFtraceEvent> T* set_ext4_direct_io_enter() {
+    return BeginNestedMessage<T>(144);
+  }
+
+
+  using FieldMetadata_Ext4DirectIOExit =
+    ::protozero::proto_utils::FieldMetadata<
+      145,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4DirectIOExitFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4DirectIOExit kExt4DirectIOExit() { return {}; }
+  template <typename T = Ext4DirectIOExitFtraceEvent> T* set_ext4_direct_io_exit() {
+    return BeginNestedMessage<T>(145);
+  }
+
+
+  using FieldMetadata_Ext4DiscardBlocks =
+    ::protozero::proto_utils::FieldMetadata<
+      146,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4DiscardBlocksFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4DiscardBlocks kExt4DiscardBlocks() { return {}; }
+  template <typename T = Ext4DiscardBlocksFtraceEvent> T* set_ext4_discard_blocks() {
+    return BeginNestedMessage<T>(146);
+  }
+
+
+  using FieldMetadata_Ext4DiscardPreallocations =
+    ::protozero::proto_utils::FieldMetadata<
+      147,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4DiscardPreallocationsFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4DiscardPreallocations kExt4DiscardPreallocations() { return {}; }
+  template <typename T = Ext4DiscardPreallocationsFtraceEvent> T* set_ext4_discard_preallocations() {
+    return BeginNestedMessage<T>(147);
+  }
+
+
+  using FieldMetadata_Ext4DropInode =
+    ::protozero::proto_utils::FieldMetadata<
+      148,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4DropInodeFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4DropInode kExt4DropInode() { return {}; }
+  template <typename T = Ext4DropInodeFtraceEvent> T* set_ext4_drop_inode() {
+    return BeginNestedMessage<T>(148);
+  }
+
+
+  using FieldMetadata_Ext4EsCacheExtent =
+    ::protozero::proto_utils::FieldMetadata<
+      149,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4EsCacheExtentFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4EsCacheExtent kExt4EsCacheExtent() { return {}; }
+  template <typename T = Ext4EsCacheExtentFtraceEvent> T* set_ext4_es_cache_extent() {
+    return BeginNestedMessage<T>(149);
+  }
+
+
+  using FieldMetadata_Ext4EsFindDelayedExtentRangeEnter =
+    ::protozero::proto_utils::FieldMetadata<
+      150,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4EsFindDelayedExtentRangeEnterFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4EsFindDelayedExtentRangeEnter kExt4EsFindDelayedExtentRangeEnter() { return {}; }
+  template <typename T = Ext4EsFindDelayedExtentRangeEnterFtraceEvent> T* set_ext4_es_find_delayed_extent_range_enter() {
+    return BeginNestedMessage<T>(150);
+  }
+
+
+  using FieldMetadata_Ext4EsFindDelayedExtentRangeExit =
+    ::protozero::proto_utils::FieldMetadata<
+      151,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4EsFindDelayedExtentRangeExitFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4EsFindDelayedExtentRangeExit kExt4EsFindDelayedExtentRangeExit() { return {}; }
+  template <typename T = Ext4EsFindDelayedExtentRangeExitFtraceEvent> T* set_ext4_es_find_delayed_extent_range_exit() {
+    return BeginNestedMessage<T>(151);
+  }
+
+
+  using FieldMetadata_Ext4EsInsertExtent =
+    ::protozero::proto_utils::FieldMetadata<
+      152,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4EsInsertExtentFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4EsInsertExtent kExt4EsInsertExtent() { return {}; }
+  template <typename T = Ext4EsInsertExtentFtraceEvent> T* set_ext4_es_insert_extent() {
+    return BeginNestedMessage<T>(152);
+  }
+
+
+  using FieldMetadata_Ext4EsLookupExtentEnter =
+    ::protozero::proto_utils::FieldMetadata<
+      153,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4EsLookupExtentEnterFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4EsLookupExtentEnter kExt4EsLookupExtentEnter() { return {}; }
+  template <typename T = Ext4EsLookupExtentEnterFtraceEvent> T* set_ext4_es_lookup_extent_enter() {
+    return BeginNestedMessage<T>(153);
+  }
+
+
+  using FieldMetadata_Ext4EsLookupExtentExit =
+    ::protozero::proto_utils::FieldMetadata<
+      154,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4EsLookupExtentExitFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4EsLookupExtentExit kExt4EsLookupExtentExit() { return {}; }
+  template <typename T = Ext4EsLookupExtentExitFtraceEvent> T* set_ext4_es_lookup_extent_exit() {
+    return BeginNestedMessage<T>(154);
+  }
+
+
+  using FieldMetadata_Ext4EsRemoveExtent =
+    ::protozero::proto_utils::FieldMetadata<
+      155,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4EsRemoveExtentFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4EsRemoveExtent kExt4EsRemoveExtent() { return {}; }
+  template <typename T = Ext4EsRemoveExtentFtraceEvent> T* set_ext4_es_remove_extent() {
+    return BeginNestedMessage<T>(155);
+  }
+
+
+  using FieldMetadata_Ext4EsShrink =
+    ::protozero::proto_utils::FieldMetadata<
+      156,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4EsShrinkFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4EsShrink kExt4EsShrink() { return {}; }
+  template <typename T = Ext4EsShrinkFtraceEvent> T* set_ext4_es_shrink() {
+    return BeginNestedMessage<T>(156);
+  }
+
+
+  using FieldMetadata_Ext4EsShrinkCount =
+    ::protozero::proto_utils::FieldMetadata<
+      157,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4EsShrinkCountFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4EsShrinkCount kExt4EsShrinkCount() { return {}; }
+  template <typename T = Ext4EsShrinkCountFtraceEvent> T* set_ext4_es_shrink_count() {
+    return BeginNestedMessage<T>(157);
+  }
+
+
+  using FieldMetadata_Ext4EsShrinkScanEnter =
+    ::protozero::proto_utils::FieldMetadata<
+      158,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4EsShrinkScanEnterFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4EsShrinkScanEnter kExt4EsShrinkScanEnter() { return {}; }
+  template <typename T = Ext4EsShrinkScanEnterFtraceEvent> T* set_ext4_es_shrink_scan_enter() {
+    return BeginNestedMessage<T>(158);
+  }
+
+
+  using FieldMetadata_Ext4EsShrinkScanExit =
+    ::protozero::proto_utils::FieldMetadata<
+      159,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4EsShrinkScanExitFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4EsShrinkScanExit kExt4EsShrinkScanExit() { return {}; }
+  template <typename T = Ext4EsShrinkScanExitFtraceEvent> T* set_ext4_es_shrink_scan_exit() {
+    return BeginNestedMessage<T>(159);
+  }
+
+
+  using FieldMetadata_Ext4EvictInode =
+    ::protozero::proto_utils::FieldMetadata<
+      160,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4EvictInodeFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4EvictInode kExt4EvictInode() { return {}; }
+  template <typename T = Ext4EvictInodeFtraceEvent> T* set_ext4_evict_inode() {
+    return BeginNestedMessage<T>(160);
+  }
+
+
+  using FieldMetadata_Ext4ExtConvertToInitializedEnter =
+    ::protozero::proto_utils::FieldMetadata<
+      161,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4ExtConvertToInitializedEnterFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4ExtConvertToInitializedEnter kExt4ExtConvertToInitializedEnter() { return {}; }
+  template <typename T = Ext4ExtConvertToInitializedEnterFtraceEvent> T* set_ext4_ext_convert_to_initialized_enter() {
+    return BeginNestedMessage<T>(161);
+  }
+
+
+  using FieldMetadata_Ext4ExtConvertToInitializedFastpath =
+    ::protozero::proto_utils::FieldMetadata<
+      162,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4ExtConvertToInitializedFastpathFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4ExtConvertToInitializedFastpath kExt4ExtConvertToInitializedFastpath() { return {}; }
+  template <typename T = Ext4ExtConvertToInitializedFastpathFtraceEvent> T* set_ext4_ext_convert_to_initialized_fastpath() {
+    return BeginNestedMessage<T>(162);
+  }
+
+
+  using FieldMetadata_Ext4ExtHandleUnwrittenExtents =
+    ::protozero::proto_utils::FieldMetadata<
+      163,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4ExtHandleUnwrittenExtentsFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4ExtHandleUnwrittenExtents kExt4ExtHandleUnwrittenExtents() { return {}; }
+  template <typename T = Ext4ExtHandleUnwrittenExtentsFtraceEvent> T* set_ext4_ext_handle_unwritten_extents() {
+    return BeginNestedMessage<T>(163);
+  }
+
+
+  using FieldMetadata_Ext4ExtInCache =
+    ::protozero::proto_utils::FieldMetadata<
+      164,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4ExtInCacheFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4ExtInCache kExt4ExtInCache() { return {}; }
+  template <typename T = Ext4ExtInCacheFtraceEvent> T* set_ext4_ext_in_cache() {
+    return BeginNestedMessage<T>(164);
+  }
+
+
+  using FieldMetadata_Ext4ExtLoadExtent =
+    ::protozero::proto_utils::FieldMetadata<
+      165,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4ExtLoadExtentFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4ExtLoadExtent kExt4ExtLoadExtent() { return {}; }
+  template <typename T = Ext4ExtLoadExtentFtraceEvent> T* set_ext4_ext_load_extent() {
+    return BeginNestedMessage<T>(165);
+  }
+
+
+  using FieldMetadata_Ext4ExtMapBlocksEnter =
+    ::protozero::proto_utils::FieldMetadata<
+      166,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4ExtMapBlocksEnterFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4ExtMapBlocksEnter kExt4ExtMapBlocksEnter() { return {}; }
+  template <typename T = Ext4ExtMapBlocksEnterFtraceEvent> T* set_ext4_ext_map_blocks_enter() {
+    return BeginNestedMessage<T>(166);
+  }
+
+
+  using FieldMetadata_Ext4ExtMapBlocksExit =
+    ::protozero::proto_utils::FieldMetadata<
+      167,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4ExtMapBlocksExitFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4ExtMapBlocksExit kExt4ExtMapBlocksExit() { return {}; }
+  template <typename T = Ext4ExtMapBlocksExitFtraceEvent> T* set_ext4_ext_map_blocks_exit() {
+    return BeginNestedMessage<T>(167);
+  }
+
+
+  using FieldMetadata_Ext4ExtPutInCache =
+    ::protozero::proto_utils::FieldMetadata<
+      168,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4ExtPutInCacheFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4ExtPutInCache kExt4ExtPutInCache() { return {}; }
+  template <typename T = Ext4ExtPutInCacheFtraceEvent> T* set_ext4_ext_put_in_cache() {
+    return BeginNestedMessage<T>(168);
+  }
+
+
+  using FieldMetadata_Ext4ExtRemoveSpace =
+    ::protozero::proto_utils::FieldMetadata<
+      169,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4ExtRemoveSpaceFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4ExtRemoveSpace kExt4ExtRemoveSpace() { return {}; }
+  template <typename T = Ext4ExtRemoveSpaceFtraceEvent> T* set_ext4_ext_remove_space() {
+    return BeginNestedMessage<T>(169);
+  }
+
+
+  using FieldMetadata_Ext4ExtRemoveSpaceDone =
+    ::protozero::proto_utils::FieldMetadata<
+      170,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4ExtRemoveSpaceDoneFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4ExtRemoveSpaceDone kExt4ExtRemoveSpaceDone() { return {}; }
+  template <typename T = Ext4ExtRemoveSpaceDoneFtraceEvent> T* set_ext4_ext_remove_space_done() {
+    return BeginNestedMessage<T>(170);
+  }
+
+
+  using FieldMetadata_Ext4ExtRmIdx =
+    ::protozero::proto_utils::FieldMetadata<
+      171,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4ExtRmIdxFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4ExtRmIdx kExt4ExtRmIdx() { return {}; }
+  template <typename T = Ext4ExtRmIdxFtraceEvent> T* set_ext4_ext_rm_idx() {
+    return BeginNestedMessage<T>(171);
+  }
+
+
+  using FieldMetadata_Ext4ExtRmLeaf =
+    ::protozero::proto_utils::FieldMetadata<
+      172,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4ExtRmLeafFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4ExtRmLeaf kExt4ExtRmLeaf() { return {}; }
+  template <typename T = Ext4ExtRmLeafFtraceEvent> T* set_ext4_ext_rm_leaf() {
+    return BeginNestedMessage<T>(172);
+  }
+
+
+  using FieldMetadata_Ext4ExtShowExtent =
+    ::protozero::proto_utils::FieldMetadata<
+      173,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4ExtShowExtentFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4ExtShowExtent kExt4ExtShowExtent() { return {}; }
+  template <typename T = Ext4ExtShowExtentFtraceEvent> T* set_ext4_ext_show_extent() {
+    return BeginNestedMessage<T>(173);
+  }
+
+
+  using FieldMetadata_Ext4FallocateEnter =
+    ::protozero::proto_utils::FieldMetadata<
+      174,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4FallocateEnterFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4FallocateEnter kExt4FallocateEnter() { return {}; }
+  template <typename T = Ext4FallocateEnterFtraceEvent> T* set_ext4_fallocate_enter() {
+    return BeginNestedMessage<T>(174);
+  }
+
+
+  using FieldMetadata_Ext4FallocateExit =
+    ::protozero::proto_utils::FieldMetadata<
+      175,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4FallocateExitFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4FallocateExit kExt4FallocateExit() { return {}; }
+  template <typename T = Ext4FallocateExitFtraceEvent> T* set_ext4_fallocate_exit() {
+    return BeginNestedMessage<T>(175);
+  }
+
+
+  using FieldMetadata_Ext4FindDelallocRange =
+    ::protozero::proto_utils::FieldMetadata<
+      176,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4FindDelallocRangeFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4FindDelallocRange kExt4FindDelallocRange() { return {}; }
+  template <typename T = Ext4FindDelallocRangeFtraceEvent> T* set_ext4_find_delalloc_range() {
+    return BeginNestedMessage<T>(176);
+  }
+
+
+  using FieldMetadata_Ext4Forget =
+    ::protozero::proto_utils::FieldMetadata<
+      177,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4ForgetFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4Forget kExt4Forget() { return {}; }
+  template <typename T = Ext4ForgetFtraceEvent> T* set_ext4_forget() {
+    return BeginNestedMessage<T>(177);
+  }
+
+
+  using FieldMetadata_Ext4FreeBlocks =
+    ::protozero::proto_utils::FieldMetadata<
+      178,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4FreeBlocksFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4FreeBlocks kExt4FreeBlocks() { return {}; }
+  template <typename T = Ext4FreeBlocksFtraceEvent> T* set_ext4_free_blocks() {
+    return BeginNestedMessage<T>(178);
+  }
+
+
+  using FieldMetadata_Ext4FreeInode =
+    ::protozero::proto_utils::FieldMetadata<
+      179,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4FreeInodeFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4FreeInode kExt4FreeInode() { return {}; }
+  template <typename T = Ext4FreeInodeFtraceEvent> T* set_ext4_free_inode() {
+    return BeginNestedMessage<T>(179);
+  }
+
+
+  using FieldMetadata_Ext4GetImpliedClusterAllocExit =
+    ::protozero::proto_utils::FieldMetadata<
+      180,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4GetImpliedClusterAllocExitFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4GetImpliedClusterAllocExit kExt4GetImpliedClusterAllocExit() { return {}; }
+  template <typename T = Ext4GetImpliedClusterAllocExitFtraceEvent> T* set_ext4_get_implied_cluster_alloc_exit() {
+    return BeginNestedMessage<T>(180);
+  }
+
+
+  using FieldMetadata_Ext4GetReservedClusterAlloc =
+    ::protozero::proto_utils::FieldMetadata<
+      181,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4GetReservedClusterAllocFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4GetReservedClusterAlloc kExt4GetReservedClusterAlloc() { return {}; }
+  template <typename T = Ext4GetReservedClusterAllocFtraceEvent> T* set_ext4_get_reserved_cluster_alloc() {
+    return BeginNestedMessage<T>(181);
+  }
+
+
+  using FieldMetadata_Ext4IndMapBlocksEnter =
+    ::protozero::proto_utils::FieldMetadata<
+      182,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4IndMapBlocksEnterFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4IndMapBlocksEnter kExt4IndMapBlocksEnter() { return {}; }
+  template <typename T = Ext4IndMapBlocksEnterFtraceEvent> T* set_ext4_ind_map_blocks_enter() {
+    return BeginNestedMessage<T>(182);
+  }
+
+
+  using FieldMetadata_Ext4IndMapBlocksExit =
+    ::protozero::proto_utils::FieldMetadata<
+      183,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4IndMapBlocksExitFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4IndMapBlocksExit kExt4IndMapBlocksExit() { return {}; }
+  template <typename T = Ext4IndMapBlocksExitFtraceEvent> T* set_ext4_ind_map_blocks_exit() {
+    return BeginNestedMessage<T>(183);
+  }
+
+
+  using FieldMetadata_Ext4InsertRange =
+    ::protozero::proto_utils::FieldMetadata<
+      184,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4InsertRangeFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4InsertRange kExt4InsertRange() { return {}; }
+  template <typename T = Ext4InsertRangeFtraceEvent> T* set_ext4_insert_range() {
+    return BeginNestedMessage<T>(184);
+  }
+
+
+  using FieldMetadata_Ext4Invalidatepage =
+    ::protozero::proto_utils::FieldMetadata<
+      185,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4InvalidatepageFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4Invalidatepage kExt4Invalidatepage() { return {}; }
+  template <typename T = Ext4InvalidatepageFtraceEvent> T* set_ext4_invalidatepage() {
+    return BeginNestedMessage<T>(185);
+  }
+
+
+  using FieldMetadata_Ext4JournalStart =
+    ::protozero::proto_utils::FieldMetadata<
+      186,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4JournalStartFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4JournalStart kExt4JournalStart() { return {}; }
+  template <typename T = Ext4JournalStartFtraceEvent> T* set_ext4_journal_start() {
+    return BeginNestedMessage<T>(186);
+  }
+
+
+  using FieldMetadata_Ext4JournalStartReserved =
+    ::protozero::proto_utils::FieldMetadata<
+      187,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4JournalStartReservedFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4JournalStartReserved kExt4JournalStartReserved() { return {}; }
+  template <typename T = Ext4JournalStartReservedFtraceEvent> T* set_ext4_journal_start_reserved() {
+    return BeginNestedMessage<T>(187);
+  }
+
+
+  using FieldMetadata_Ext4JournalledInvalidatepage =
+    ::protozero::proto_utils::FieldMetadata<
+      188,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4JournalledInvalidatepageFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4JournalledInvalidatepage kExt4JournalledInvalidatepage() { return {}; }
+  template <typename T = Ext4JournalledInvalidatepageFtraceEvent> T* set_ext4_journalled_invalidatepage() {
+    return BeginNestedMessage<T>(188);
+  }
+
+
+  using FieldMetadata_Ext4JournalledWriteEnd =
+    ::protozero::proto_utils::FieldMetadata<
+      189,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4JournalledWriteEndFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4JournalledWriteEnd kExt4JournalledWriteEnd() { return {}; }
+  template <typename T = Ext4JournalledWriteEndFtraceEvent> T* set_ext4_journalled_write_end() {
+    return BeginNestedMessage<T>(189);
+  }
+
+
+  using FieldMetadata_Ext4LoadInode =
+    ::protozero::proto_utils::FieldMetadata<
+      190,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4LoadInodeFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4LoadInode kExt4LoadInode() { return {}; }
+  template <typename T = Ext4LoadInodeFtraceEvent> T* set_ext4_load_inode() {
+    return BeginNestedMessage<T>(190);
+  }
+
+
+  using FieldMetadata_Ext4LoadInodeBitmap =
+    ::protozero::proto_utils::FieldMetadata<
+      191,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4LoadInodeBitmapFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4LoadInodeBitmap kExt4LoadInodeBitmap() { return {}; }
+  template <typename T = Ext4LoadInodeBitmapFtraceEvent> T* set_ext4_load_inode_bitmap() {
+    return BeginNestedMessage<T>(191);
+  }
+
+
+  using FieldMetadata_Ext4MarkInodeDirty =
+    ::protozero::proto_utils::FieldMetadata<
+      192,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4MarkInodeDirtyFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4MarkInodeDirty kExt4MarkInodeDirty() { return {}; }
+  template <typename T = Ext4MarkInodeDirtyFtraceEvent> T* set_ext4_mark_inode_dirty() {
+    return BeginNestedMessage<T>(192);
+  }
+
+
+  using FieldMetadata_Ext4MbBitmapLoad =
+    ::protozero::proto_utils::FieldMetadata<
+      193,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4MbBitmapLoadFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4MbBitmapLoad kExt4MbBitmapLoad() { return {}; }
+  template <typename T = Ext4MbBitmapLoadFtraceEvent> T* set_ext4_mb_bitmap_load() {
+    return BeginNestedMessage<T>(193);
+  }
+
+
+  using FieldMetadata_Ext4MbBuddyBitmapLoad =
+    ::protozero::proto_utils::FieldMetadata<
+      194,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4MbBuddyBitmapLoadFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4MbBuddyBitmapLoad kExt4MbBuddyBitmapLoad() { return {}; }
+  template <typename T = Ext4MbBuddyBitmapLoadFtraceEvent> T* set_ext4_mb_buddy_bitmap_load() {
+    return BeginNestedMessage<T>(194);
+  }
+
+
+  using FieldMetadata_Ext4MbDiscardPreallocations =
+    ::protozero::proto_utils::FieldMetadata<
+      195,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4MbDiscardPreallocationsFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4MbDiscardPreallocations kExt4MbDiscardPreallocations() { return {}; }
+  template <typename T = Ext4MbDiscardPreallocationsFtraceEvent> T* set_ext4_mb_discard_preallocations() {
+    return BeginNestedMessage<T>(195);
+  }
+
+
+  using FieldMetadata_Ext4MbNewGroupPa =
+    ::protozero::proto_utils::FieldMetadata<
+      196,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4MbNewGroupPaFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4MbNewGroupPa kExt4MbNewGroupPa() { return {}; }
+  template <typename T = Ext4MbNewGroupPaFtraceEvent> T* set_ext4_mb_new_group_pa() {
+    return BeginNestedMessage<T>(196);
+  }
+
+
+  using FieldMetadata_Ext4MbNewInodePa =
+    ::protozero::proto_utils::FieldMetadata<
+      197,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4MbNewInodePaFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4MbNewInodePa kExt4MbNewInodePa() { return {}; }
+  template <typename T = Ext4MbNewInodePaFtraceEvent> T* set_ext4_mb_new_inode_pa() {
+    return BeginNestedMessage<T>(197);
+  }
+
+
+  using FieldMetadata_Ext4MbReleaseGroupPa =
+    ::protozero::proto_utils::FieldMetadata<
+      198,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4MbReleaseGroupPaFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4MbReleaseGroupPa kExt4MbReleaseGroupPa() { return {}; }
+  template <typename T = Ext4MbReleaseGroupPaFtraceEvent> T* set_ext4_mb_release_group_pa() {
+    return BeginNestedMessage<T>(198);
+  }
+
+
+  using FieldMetadata_Ext4MbReleaseInodePa =
+    ::protozero::proto_utils::FieldMetadata<
+      199,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4MbReleaseInodePaFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4MbReleaseInodePa kExt4MbReleaseInodePa() { return {}; }
+  template <typename T = Ext4MbReleaseInodePaFtraceEvent> T* set_ext4_mb_release_inode_pa() {
+    return BeginNestedMessage<T>(199);
+  }
+
+
+  using FieldMetadata_Ext4MballocAlloc =
+    ::protozero::proto_utils::FieldMetadata<
+      200,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4MballocAllocFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4MballocAlloc kExt4MballocAlloc() { return {}; }
+  template <typename T = Ext4MballocAllocFtraceEvent> T* set_ext4_mballoc_alloc() {
+    return BeginNestedMessage<T>(200);
+  }
+
+
+  using FieldMetadata_Ext4MballocDiscard =
+    ::protozero::proto_utils::FieldMetadata<
+      201,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4MballocDiscardFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4MballocDiscard kExt4MballocDiscard() { return {}; }
+  template <typename T = Ext4MballocDiscardFtraceEvent> T* set_ext4_mballoc_discard() {
+    return BeginNestedMessage<T>(201);
+  }
+
+
+  using FieldMetadata_Ext4MballocFree =
+    ::protozero::proto_utils::FieldMetadata<
+      202,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4MballocFreeFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4MballocFree kExt4MballocFree() { return {}; }
+  template <typename T = Ext4MballocFreeFtraceEvent> T* set_ext4_mballoc_free() {
+    return BeginNestedMessage<T>(202);
+  }
+
+
+  using FieldMetadata_Ext4MballocPrealloc =
+    ::protozero::proto_utils::FieldMetadata<
+      203,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4MballocPreallocFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4MballocPrealloc kExt4MballocPrealloc() { return {}; }
+  template <typename T = Ext4MballocPreallocFtraceEvent> T* set_ext4_mballoc_prealloc() {
+    return BeginNestedMessage<T>(203);
+  }
+
+
+  using FieldMetadata_Ext4OtherInodeUpdateTime =
+    ::protozero::proto_utils::FieldMetadata<
+      204,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4OtherInodeUpdateTimeFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4OtherInodeUpdateTime kExt4OtherInodeUpdateTime() { return {}; }
+  template <typename T = Ext4OtherInodeUpdateTimeFtraceEvent> T* set_ext4_other_inode_update_time() {
+    return BeginNestedMessage<T>(204);
+  }
+
+
+  using FieldMetadata_Ext4PunchHole =
+    ::protozero::proto_utils::FieldMetadata<
+      205,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4PunchHoleFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4PunchHole kExt4PunchHole() { return {}; }
+  template <typename T = Ext4PunchHoleFtraceEvent> T* set_ext4_punch_hole() {
+    return BeginNestedMessage<T>(205);
+  }
+
+
+  using FieldMetadata_Ext4ReadBlockBitmapLoad =
+    ::protozero::proto_utils::FieldMetadata<
+      206,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4ReadBlockBitmapLoadFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4ReadBlockBitmapLoad kExt4ReadBlockBitmapLoad() { return {}; }
+  template <typename T = Ext4ReadBlockBitmapLoadFtraceEvent> T* set_ext4_read_block_bitmap_load() {
+    return BeginNestedMessage<T>(206);
+  }
+
+
+  using FieldMetadata_Ext4Readpage =
+    ::protozero::proto_utils::FieldMetadata<
+      207,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4ReadpageFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4Readpage kExt4Readpage() { return {}; }
+  template <typename T = Ext4ReadpageFtraceEvent> T* set_ext4_readpage() {
+    return BeginNestedMessage<T>(207);
+  }
+
+
+  using FieldMetadata_Ext4Releasepage =
+    ::protozero::proto_utils::FieldMetadata<
+      208,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4ReleasepageFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4Releasepage kExt4Releasepage() { return {}; }
+  template <typename T = Ext4ReleasepageFtraceEvent> T* set_ext4_releasepage() {
+    return BeginNestedMessage<T>(208);
+  }
+
+
+  using FieldMetadata_Ext4RemoveBlocks =
+    ::protozero::proto_utils::FieldMetadata<
+      209,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4RemoveBlocksFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4RemoveBlocks kExt4RemoveBlocks() { return {}; }
+  template <typename T = Ext4RemoveBlocksFtraceEvent> T* set_ext4_remove_blocks() {
+    return BeginNestedMessage<T>(209);
+  }
+
+
+  using FieldMetadata_Ext4RequestBlocks =
+    ::protozero::proto_utils::FieldMetadata<
+      210,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4RequestBlocksFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4RequestBlocks kExt4RequestBlocks() { return {}; }
+  template <typename T = Ext4RequestBlocksFtraceEvent> T* set_ext4_request_blocks() {
+    return BeginNestedMessage<T>(210);
+  }
+
+
+  using FieldMetadata_Ext4RequestInode =
+    ::protozero::proto_utils::FieldMetadata<
+      211,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4RequestInodeFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4RequestInode kExt4RequestInode() { return {}; }
+  template <typename T = Ext4RequestInodeFtraceEvent> T* set_ext4_request_inode() {
+    return BeginNestedMessage<T>(211);
+  }
+
+
+  using FieldMetadata_Ext4SyncFs =
+    ::protozero::proto_utils::FieldMetadata<
+      212,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4SyncFsFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4SyncFs kExt4SyncFs() { return {}; }
+  template <typename T = Ext4SyncFsFtraceEvent> T* set_ext4_sync_fs() {
+    return BeginNestedMessage<T>(212);
+  }
+
+
+  using FieldMetadata_Ext4TrimAllFree =
+    ::protozero::proto_utils::FieldMetadata<
+      213,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4TrimAllFreeFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4TrimAllFree kExt4TrimAllFree() { return {}; }
+  template <typename T = Ext4TrimAllFreeFtraceEvent> T* set_ext4_trim_all_free() {
+    return BeginNestedMessage<T>(213);
+  }
+
+
+  using FieldMetadata_Ext4TrimExtent =
+    ::protozero::proto_utils::FieldMetadata<
+      214,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4TrimExtentFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4TrimExtent kExt4TrimExtent() { return {}; }
+  template <typename T = Ext4TrimExtentFtraceEvent> T* set_ext4_trim_extent() {
+    return BeginNestedMessage<T>(214);
+  }
+
+
+  using FieldMetadata_Ext4TruncateEnter =
+    ::protozero::proto_utils::FieldMetadata<
+      215,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4TruncateEnterFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4TruncateEnter kExt4TruncateEnter() { return {}; }
+  template <typename T = Ext4TruncateEnterFtraceEvent> T* set_ext4_truncate_enter() {
+    return BeginNestedMessage<T>(215);
+  }
+
+
+  using FieldMetadata_Ext4TruncateExit =
+    ::protozero::proto_utils::FieldMetadata<
+      216,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4TruncateExitFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4TruncateExit kExt4TruncateExit() { return {}; }
+  template <typename T = Ext4TruncateExitFtraceEvent> T* set_ext4_truncate_exit() {
+    return BeginNestedMessage<T>(216);
+  }
+
+
+  using FieldMetadata_Ext4UnlinkEnter =
+    ::protozero::proto_utils::FieldMetadata<
+      217,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4UnlinkEnterFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4UnlinkEnter kExt4UnlinkEnter() { return {}; }
+  template <typename T = Ext4UnlinkEnterFtraceEvent> T* set_ext4_unlink_enter() {
+    return BeginNestedMessage<T>(217);
+  }
+
+
+  using FieldMetadata_Ext4UnlinkExit =
+    ::protozero::proto_utils::FieldMetadata<
+      218,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4UnlinkExitFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4UnlinkExit kExt4UnlinkExit() { return {}; }
+  template <typename T = Ext4UnlinkExitFtraceEvent> T* set_ext4_unlink_exit() {
+    return BeginNestedMessage<T>(218);
+  }
+
+
+  using FieldMetadata_Ext4WriteBegin =
+    ::protozero::proto_utils::FieldMetadata<
+      219,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4WriteBeginFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4WriteBegin kExt4WriteBegin() { return {}; }
+  template <typename T = Ext4WriteBeginFtraceEvent> T* set_ext4_write_begin() {
+    return BeginNestedMessage<T>(219);
+  }
+
+
+  using FieldMetadata_Ext4WriteEnd =
+    ::protozero::proto_utils::FieldMetadata<
+      230,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4WriteEndFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4WriteEnd kExt4WriteEnd() { return {}; }
+  template <typename T = Ext4WriteEndFtraceEvent> T* set_ext4_write_end() {
+    return BeginNestedMessage<T>(230);
+  }
+
+
+  using FieldMetadata_Ext4Writepage =
+    ::protozero::proto_utils::FieldMetadata<
+      231,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4WritepageFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4Writepage kExt4Writepage() { return {}; }
+  template <typename T = Ext4WritepageFtraceEvent> T* set_ext4_writepage() {
+    return BeginNestedMessage<T>(231);
+  }
+
+
+  using FieldMetadata_Ext4Writepages =
+    ::protozero::proto_utils::FieldMetadata<
+      232,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4WritepagesFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4Writepages kExt4Writepages() { return {}; }
+  template <typename T = Ext4WritepagesFtraceEvent> T* set_ext4_writepages() {
+    return BeginNestedMessage<T>(232);
+  }
+
+
+  using FieldMetadata_Ext4WritepagesResult =
+    ::protozero::proto_utils::FieldMetadata<
+      233,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4WritepagesResultFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4WritepagesResult kExt4WritepagesResult() { return {}; }
+  template <typename T = Ext4WritepagesResultFtraceEvent> T* set_ext4_writepages_result() {
+    return BeginNestedMessage<T>(233);
+  }
+
+
+  using FieldMetadata_Ext4ZeroRange =
+    ::protozero::proto_utils::FieldMetadata<
+      234,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Ext4ZeroRangeFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ext4ZeroRange kExt4ZeroRange() { return {}; }
+  template <typename T = Ext4ZeroRangeFtraceEvent> T* set_ext4_zero_range() {
+    return BeginNestedMessage<T>(234);
+  }
+
+
+  using FieldMetadata_TaskNewtask =
+    ::protozero::proto_utils::FieldMetadata<
+      235,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TaskNewtaskFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TaskNewtask kTaskNewtask() { return {}; }
+  template <typename T = TaskNewtaskFtraceEvent> T* set_task_newtask() {
+    return BeginNestedMessage<T>(235);
+  }
+
+
+  using FieldMetadata_TaskRename =
+    ::protozero::proto_utils::FieldMetadata<
+      236,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TaskRenameFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TaskRename kTaskRename() { return {}; }
+  template <typename T = TaskRenameFtraceEvent> T* set_task_rename() {
+    return BeginNestedMessage<T>(236);
+  }
+
+
+  using FieldMetadata_SchedProcessExec =
+    ::protozero::proto_utils::FieldMetadata<
+      237,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      SchedProcessExecFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SchedProcessExec kSchedProcessExec() { return {}; }
+  template <typename T = SchedProcessExecFtraceEvent> T* set_sched_process_exec() {
+    return BeginNestedMessage<T>(237);
+  }
+
+
+  using FieldMetadata_SchedProcessExit =
+    ::protozero::proto_utils::FieldMetadata<
+      238,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      SchedProcessExitFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SchedProcessExit kSchedProcessExit() { return {}; }
+  template <typename T = SchedProcessExitFtraceEvent> T* set_sched_process_exit() {
+    return BeginNestedMessage<T>(238);
+  }
+
+
+  using FieldMetadata_SchedProcessFork =
+    ::protozero::proto_utils::FieldMetadata<
+      239,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      SchedProcessForkFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SchedProcessFork kSchedProcessFork() { return {}; }
+  template <typename T = SchedProcessForkFtraceEvent> T* set_sched_process_fork() {
+    return BeginNestedMessage<T>(239);
+  }
+
+
+  using FieldMetadata_SchedProcessFree =
+    ::protozero::proto_utils::FieldMetadata<
+      240,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      SchedProcessFreeFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SchedProcessFree kSchedProcessFree() { return {}; }
+  template <typename T = SchedProcessFreeFtraceEvent> T* set_sched_process_free() {
+    return BeginNestedMessage<T>(240);
+  }
+
+
+  using FieldMetadata_SchedProcessHang =
+    ::protozero::proto_utils::FieldMetadata<
+      241,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      SchedProcessHangFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SchedProcessHang kSchedProcessHang() { return {}; }
+  template <typename T = SchedProcessHangFtraceEvent> T* set_sched_process_hang() {
+    return BeginNestedMessage<T>(241);
+  }
+
+
+  using FieldMetadata_SchedProcessWait =
+    ::protozero::proto_utils::FieldMetadata<
+      242,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      SchedProcessWaitFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SchedProcessWait kSchedProcessWait() { return {}; }
+  template <typename T = SchedProcessWaitFtraceEvent> T* set_sched_process_wait() {
+    return BeginNestedMessage<T>(242);
+  }
+
+
+  using FieldMetadata_F2fsDoSubmitBio =
+    ::protozero::proto_utils::FieldMetadata<
+      243,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      F2fsDoSubmitBioFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_F2fsDoSubmitBio kF2fsDoSubmitBio() { return {}; }
+  template <typename T = F2fsDoSubmitBioFtraceEvent> T* set_f2fs_do_submit_bio() {
+    return BeginNestedMessage<T>(243);
+  }
+
+
+  using FieldMetadata_F2fsEvictInode =
+    ::protozero::proto_utils::FieldMetadata<
+      244,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      F2fsEvictInodeFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_F2fsEvictInode kF2fsEvictInode() { return {}; }
+  template <typename T = F2fsEvictInodeFtraceEvent> T* set_f2fs_evict_inode() {
+    return BeginNestedMessage<T>(244);
+  }
+
+
+  using FieldMetadata_F2fsFallocate =
+    ::protozero::proto_utils::FieldMetadata<
+      245,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      F2fsFallocateFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_F2fsFallocate kF2fsFallocate() { return {}; }
+  template <typename T = F2fsFallocateFtraceEvent> T* set_f2fs_fallocate() {
+    return BeginNestedMessage<T>(245);
+  }
+
+
+  using FieldMetadata_F2fsGetDataBlock =
+    ::protozero::proto_utils::FieldMetadata<
+      246,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      F2fsGetDataBlockFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_F2fsGetDataBlock kF2fsGetDataBlock() { return {}; }
+  template <typename T = F2fsGetDataBlockFtraceEvent> T* set_f2fs_get_data_block() {
+    return BeginNestedMessage<T>(246);
+  }
+
+
+  using FieldMetadata_F2fsGetVictim =
+    ::protozero::proto_utils::FieldMetadata<
+      247,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      F2fsGetVictimFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_F2fsGetVictim kF2fsGetVictim() { return {}; }
+  template <typename T = F2fsGetVictimFtraceEvent> T* set_f2fs_get_victim() {
+    return BeginNestedMessage<T>(247);
+  }
+
+
+  using FieldMetadata_F2fsIget =
+    ::protozero::proto_utils::FieldMetadata<
+      248,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      F2fsIgetFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_F2fsIget kF2fsIget() { return {}; }
+  template <typename T = F2fsIgetFtraceEvent> T* set_f2fs_iget() {
+    return BeginNestedMessage<T>(248);
+  }
+
+
+  using FieldMetadata_F2fsIgetExit =
+    ::protozero::proto_utils::FieldMetadata<
+      249,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      F2fsIgetExitFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_F2fsIgetExit kF2fsIgetExit() { return {}; }
+  template <typename T = F2fsIgetExitFtraceEvent> T* set_f2fs_iget_exit() {
+    return BeginNestedMessage<T>(249);
+  }
+
+
+  using FieldMetadata_F2fsNewInode =
+    ::protozero::proto_utils::FieldMetadata<
+      250,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      F2fsNewInodeFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_F2fsNewInode kF2fsNewInode() { return {}; }
+  template <typename T = F2fsNewInodeFtraceEvent> T* set_f2fs_new_inode() {
+    return BeginNestedMessage<T>(250);
+  }
+
+
+  using FieldMetadata_F2fsReadpage =
+    ::protozero::proto_utils::FieldMetadata<
+      251,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      F2fsReadpageFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_F2fsReadpage kF2fsReadpage() { return {}; }
+  template <typename T = F2fsReadpageFtraceEvent> T* set_f2fs_readpage() {
+    return BeginNestedMessage<T>(251);
+  }
+
+
+  using FieldMetadata_F2fsReserveNewBlock =
+    ::protozero::proto_utils::FieldMetadata<
+      252,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      F2fsReserveNewBlockFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_F2fsReserveNewBlock kF2fsReserveNewBlock() { return {}; }
+  template <typename T = F2fsReserveNewBlockFtraceEvent> T* set_f2fs_reserve_new_block() {
+    return BeginNestedMessage<T>(252);
+  }
+
+
+  using FieldMetadata_F2fsSetPageDirty =
+    ::protozero::proto_utils::FieldMetadata<
+      253,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      F2fsSetPageDirtyFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_F2fsSetPageDirty kF2fsSetPageDirty() { return {}; }
+  template <typename T = F2fsSetPageDirtyFtraceEvent> T* set_f2fs_set_page_dirty() {
+    return BeginNestedMessage<T>(253);
+  }
+
+
+  using FieldMetadata_F2fsSubmitWritePage =
+    ::protozero::proto_utils::FieldMetadata<
+      254,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      F2fsSubmitWritePageFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_F2fsSubmitWritePage kF2fsSubmitWritePage() { return {}; }
+  template <typename T = F2fsSubmitWritePageFtraceEvent> T* set_f2fs_submit_write_page() {
+    return BeginNestedMessage<T>(254);
+  }
+
+
+  using FieldMetadata_F2fsSyncFileEnter =
+    ::protozero::proto_utils::FieldMetadata<
+      255,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      F2fsSyncFileEnterFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_F2fsSyncFileEnter kF2fsSyncFileEnter() { return {}; }
+  template <typename T = F2fsSyncFileEnterFtraceEvent> T* set_f2fs_sync_file_enter() {
+    return BeginNestedMessage<T>(255);
+  }
+
+
+  using FieldMetadata_F2fsSyncFileExit =
+    ::protozero::proto_utils::FieldMetadata<
+      256,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      F2fsSyncFileExitFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_F2fsSyncFileExit kF2fsSyncFileExit() { return {}; }
+  template <typename T = F2fsSyncFileExitFtraceEvent> T* set_f2fs_sync_file_exit() {
+    return BeginNestedMessage<T>(256);
+  }
+
+
+  using FieldMetadata_F2fsSyncFs =
+    ::protozero::proto_utils::FieldMetadata<
+      257,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      F2fsSyncFsFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_F2fsSyncFs kF2fsSyncFs() { return {}; }
+  template <typename T = F2fsSyncFsFtraceEvent> T* set_f2fs_sync_fs() {
+    return BeginNestedMessage<T>(257);
+  }
+
+
+  using FieldMetadata_F2fsTruncate =
+    ::protozero::proto_utils::FieldMetadata<
+      258,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      F2fsTruncateFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_F2fsTruncate kF2fsTruncate() { return {}; }
+  template <typename T = F2fsTruncateFtraceEvent> T* set_f2fs_truncate() {
+    return BeginNestedMessage<T>(258);
+  }
+
+
+  using FieldMetadata_F2fsTruncateBlocksEnter =
+    ::protozero::proto_utils::FieldMetadata<
+      259,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      F2fsTruncateBlocksEnterFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_F2fsTruncateBlocksEnter kF2fsTruncateBlocksEnter() { return {}; }
+  template <typename T = F2fsTruncateBlocksEnterFtraceEvent> T* set_f2fs_truncate_blocks_enter() {
+    return BeginNestedMessage<T>(259);
+  }
+
+
+  using FieldMetadata_F2fsTruncateBlocksExit =
+    ::protozero::proto_utils::FieldMetadata<
+      260,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      F2fsTruncateBlocksExitFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_F2fsTruncateBlocksExit kF2fsTruncateBlocksExit() { return {}; }
+  template <typename T = F2fsTruncateBlocksExitFtraceEvent> T* set_f2fs_truncate_blocks_exit() {
+    return BeginNestedMessage<T>(260);
+  }
+
+
+  using FieldMetadata_F2fsTruncateDataBlocksRange =
+    ::protozero::proto_utils::FieldMetadata<
+      261,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      F2fsTruncateDataBlocksRangeFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_F2fsTruncateDataBlocksRange kF2fsTruncateDataBlocksRange() { return {}; }
+  template <typename T = F2fsTruncateDataBlocksRangeFtraceEvent> T* set_f2fs_truncate_data_blocks_range() {
+    return BeginNestedMessage<T>(261);
+  }
+
+
+  using FieldMetadata_F2fsTruncateInodeBlocksEnter =
+    ::protozero::proto_utils::FieldMetadata<
+      262,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      F2fsTruncateInodeBlocksEnterFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_F2fsTruncateInodeBlocksEnter kF2fsTruncateInodeBlocksEnter() { return {}; }
+  template <typename T = F2fsTruncateInodeBlocksEnterFtraceEvent> T* set_f2fs_truncate_inode_blocks_enter() {
+    return BeginNestedMessage<T>(262);
+  }
+
+
+  using FieldMetadata_F2fsTruncateInodeBlocksExit =
+    ::protozero::proto_utils::FieldMetadata<
+      263,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      F2fsTruncateInodeBlocksExitFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_F2fsTruncateInodeBlocksExit kF2fsTruncateInodeBlocksExit() { return {}; }
+  template <typename T = F2fsTruncateInodeBlocksExitFtraceEvent> T* set_f2fs_truncate_inode_blocks_exit() {
+    return BeginNestedMessage<T>(263);
+  }
+
+
+  using FieldMetadata_F2fsTruncateNode =
+    ::protozero::proto_utils::FieldMetadata<
+      264,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      F2fsTruncateNodeFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_F2fsTruncateNode kF2fsTruncateNode() { return {}; }
+  template <typename T = F2fsTruncateNodeFtraceEvent> T* set_f2fs_truncate_node() {
+    return BeginNestedMessage<T>(264);
+  }
+
+
+  using FieldMetadata_F2fsTruncateNodesEnter =
+    ::protozero::proto_utils::FieldMetadata<
+      265,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      F2fsTruncateNodesEnterFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_F2fsTruncateNodesEnter kF2fsTruncateNodesEnter() { return {}; }
+  template <typename T = F2fsTruncateNodesEnterFtraceEvent> T* set_f2fs_truncate_nodes_enter() {
+    return BeginNestedMessage<T>(265);
+  }
+
+
+  using FieldMetadata_F2fsTruncateNodesExit =
+    ::protozero::proto_utils::FieldMetadata<
+      266,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      F2fsTruncateNodesExitFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_F2fsTruncateNodesExit kF2fsTruncateNodesExit() { return {}; }
+  template <typename T = F2fsTruncateNodesExitFtraceEvent> T* set_f2fs_truncate_nodes_exit() {
+    return BeginNestedMessage<T>(266);
+  }
+
+
+  using FieldMetadata_F2fsTruncatePartialNodes =
+    ::protozero::proto_utils::FieldMetadata<
+      267,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      F2fsTruncatePartialNodesFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_F2fsTruncatePartialNodes kF2fsTruncatePartialNodes() { return {}; }
+  template <typename T = F2fsTruncatePartialNodesFtraceEvent> T* set_f2fs_truncate_partial_nodes() {
+    return BeginNestedMessage<T>(267);
+  }
+
+
+  using FieldMetadata_F2fsUnlinkEnter =
+    ::protozero::proto_utils::FieldMetadata<
+      268,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      F2fsUnlinkEnterFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_F2fsUnlinkEnter kF2fsUnlinkEnter() { return {}; }
+  template <typename T = F2fsUnlinkEnterFtraceEvent> T* set_f2fs_unlink_enter() {
+    return BeginNestedMessage<T>(268);
+  }
+
+
+  using FieldMetadata_F2fsUnlinkExit =
+    ::protozero::proto_utils::FieldMetadata<
+      269,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      F2fsUnlinkExitFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_F2fsUnlinkExit kF2fsUnlinkExit() { return {}; }
+  template <typename T = F2fsUnlinkExitFtraceEvent> T* set_f2fs_unlink_exit() {
+    return BeginNestedMessage<T>(269);
+  }
+
+
+  using FieldMetadata_F2fsVmPageMkwrite =
+    ::protozero::proto_utils::FieldMetadata<
+      270,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      F2fsVmPageMkwriteFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_F2fsVmPageMkwrite kF2fsVmPageMkwrite() { return {}; }
+  template <typename T = F2fsVmPageMkwriteFtraceEvent> T* set_f2fs_vm_page_mkwrite() {
+    return BeginNestedMessage<T>(270);
+  }
+
+
+  using FieldMetadata_F2fsWriteBegin =
+    ::protozero::proto_utils::FieldMetadata<
+      271,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      F2fsWriteBeginFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_F2fsWriteBegin kF2fsWriteBegin() { return {}; }
+  template <typename T = F2fsWriteBeginFtraceEvent> T* set_f2fs_write_begin() {
+    return BeginNestedMessage<T>(271);
+  }
+
+
+  using FieldMetadata_F2fsWriteCheckpoint =
+    ::protozero::proto_utils::FieldMetadata<
+      272,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      F2fsWriteCheckpointFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_F2fsWriteCheckpoint kF2fsWriteCheckpoint() { return {}; }
+  template <typename T = F2fsWriteCheckpointFtraceEvent> T* set_f2fs_write_checkpoint() {
+    return BeginNestedMessage<T>(272);
+  }
+
+
+  using FieldMetadata_F2fsWriteEnd =
+    ::protozero::proto_utils::FieldMetadata<
+      273,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      F2fsWriteEndFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_F2fsWriteEnd kF2fsWriteEnd() { return {}; }
+  template <typename T = F2fsWriteEndFtraceEvent> T* set_f2fs_write_end() {
+    return BeginNestedMessage<T>(273);
+  }
+
+
+  using FieldMetadata_AllocPagesIommuEnd =
+    ::protozero::proto_utils::FieldMetadata<
+      274,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      AllocPagesIommuEndFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AllocPagesIommuEnd kAllocPagesIommuEnd() { return {}; }
+  template <typename T = AllocPagesIommuEndFtraceEvent> T* set_alloc_pages_iommu_end() {
+    return BeginNestedMessage<T>(274);
+  }
+
+
+  using FieldMetadata_AllocPagesIommuFail =
+    ::protozero::proto_utils::FieldMetadata<
+      275,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      AllocPagesIommuFailFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AllocPagesIommuFail kAllocPagesIommuFail() { return {}; }
+  template <typename T = AllocPagesIommuFailFtraceEvent> T* set_alloc_pages_iommu_fail() {
+    return BeginNestedMessage<T>(275);
+  }
+
+
+  using FieldMetadata_AllocPagesIommuStart =
+    ::protozero::proto_utils::FieldMetadata<
+      276,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      AllocPagesIommuStartFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AllocPagesIommuStart kAllocPagesIommuStart() { return {}; }
+  template <typename T = AllocPagesIommuStartFtraceEvent> T* set_alloc_pages_iommu_start() {
+    return BeginNestedMessage<T>(276);
+  }
+
+
+  using FieldMetadata_AllocPagesSysEnd =
+    ::protozero::proto_utils::FieldMetadata<
+      277,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      AllocPagesSysEndFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AllocPagesSysEnd kAllocPagesSysEnd() { return {}; }
+  template <typename T = AllocPagesSysEndFtraceEvent> T* set_alloc_pages_sys_end() {
+    return BeginNestedMessage<T>(277);
+  }
+
+
+  using FieldMetadata_AllocPagesSysFail =
+    ::protozero::proto_utils::FieldMetadata<
+      278,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      AllocPagesSysFailFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AllocPagesSysFail kAllocPagesSysFail() { return {}; }
+  template <typename T = AllocPagesSysFailFtraceEvent> T* set_alloc_pages_sys_fail() {
+    return BeginNestedMessage<T>(278);
+  }
+
+
+  using FieldMetadata_AllocPagesSysStart =
+    ::protozero::proto_utils::FieldMetadata<
+      279,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      AllocPagesSysStartFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AllocPagesSysStart kAllocPagesSysStart() { return {}; }
+  template <typename T = AllocPagesSysStartFtraceEvent> T* set_alloc_pages_sys_start() {
+    return BeginNestedMessage<T>(279);
+  }
+
+
+  using FieldMetadata_DmaAllocContiguousRetry =
+    ::protozero::proto_utils::FieldMetadata<
+      280,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      DmaAllocContiguousRetryFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DmaAllocContiguousRetry kDmaAllocContiguousRetry() { return {}; }
+  template <typename T = DmaAllocContiguousRetryFtraceEvent> T* set_dma_alloc_contiguous_retry() {
+    return BeginNestedMessage<T>(280);
+  }
+
+
+  using FieldMetadata_IommuMapRange =
+    ::protozero::proto_utils::FieldMetadata<
+      281,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      IommuMapRangeFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IommuMapRange kIommuMapRange() { return {}; }
+  template <typename T = IommuMapRangeFtraceEvent> T* set_iommu_map_range() {
+    return BeginNestedMessage<T>(281);
+  }
+
+
+  using FieldMetadata_IommuSecPtblMapRangeEnd =
+    ::protozero::proto_utils::FieldMetadata<
+      282,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      IommuSecPtblMapRangeEndFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IommuSecPtblMapRangeEnd kIommuSecPtblMapRangeEnd() { return {}; }
+  template <typename T = IommuSecPtblMapRangeEndFtraceEvent> T* set_iommu_sec_ptbl_map_range_end() {
+    return BeginNestedMessage<T>(282);
+  }
+
+
+  using FieldMetadata_IommuSecPtblMapRangeStart =
+    ::protozero::proto_utils::FieldMetadata<
+      283,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      IommuSecPtblMapRangeStartFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IommuSecPtblMapRangeStart kIommuSecPtblMapRangeStart() { return {}; }
+  template <typename T = IommuSecPtblMapRangeStartFtraceEvent> T* set_iommu_sec_ptbl_map_range_start() {
+    return BeginNestedMessage<T>(283);
+  }
+
+
+  using FieldMetadata_IonAllocBufferEnd =
+    ::protozero::proto_utils::FieldMetadata<
+      284,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      IonAllocBufferEndFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IonAllocBufferEnd kIonAllocBufferEnd() { return {}; }
+  template <typename T = IonAllocBufferEndFtraceEvent> T* set_ion_alloc_buffer_end() {
+    return BeginNestedMessage<T>(284);
+  }
+
+
+  using FieldMetadata_IonAllocBufferFail =
+    ::protozero::proto_utils::FieldMetadata<
+      285,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      IonAllocBufferFailFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IonAllocBufferFail kIonAllocBufferFail() { return {}; }
+  template <typename T = IonAllocBufferFailFtraceEvent> T* set_ion_alloc_buffer_fail() {
+    return BeginNestedMessage<T>(285);
+  }
+
+
+  using FieldMetadata_IonAllocBufferFallback =
+    ::protozero::proto_utils::FieldMetadata<
+      286,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      IonAllocBufferFallbackFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IonAllocBufferFallback kIonAllocBufferFallback() { return {}; }
+  template <typename T = IonAllocBufferFallbackFtraceEvent> T* set_ion_alloc_buffer_fallback() {
+    return BeginNestedMessage<T>(286);
+  }
+
+
+  using FieldMetadata_IonAllocBufferStart =
+    ::protozero::proto_utils::FieldMetadata<
+      287,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      IonAllocBufferStartFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IonAllocBufferStart kIonAllocBufferStart() { return {}; }
+  template <typename T = IonAllocBufferStartFtraceEvent> T* set_ion_alloc_buffer_start() {
+    return BeginNestedMessage<T>(287);
+  }
+
+
+  using FieldMetadata_IonCpAllocRetry =
+    ::protozero::proto_utils::FieldMetadata<
+      288,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      IonCpAllocRetryFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IonCpAllocRetry kIonCpAllocRetry() { return {}; }
+  template <typename T = IonCpAllocRetryFtraceEvent> T* set_ion_cp_alloc_retry() {
+    return BeginNestedMessage<T>(288);
+  }
+
+
+  using FieldMetadata_IonCpSecureBufferEnd =
+    ::protozero::proto_utils::FieldMetadata<
+      289,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      IonCpSecureBufferEndFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IonCpSecureBufferEnd kIonCpSecureBufferEnd() { return {}; }
+  template <typename T = IonCpSecureBufferEndFtraceEvent> T* set_ion_cp_secure_buffer_end() {
+    return BeginNestedMessage<T>(289);
+  }
+
+
+  using FieldMetadata_IonCpSecureBufferStart =
+    ::protozero::proto_utils::FieldMetadata<
+      290,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      IonCpSecureBufferStartFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IonCpSecureBufferStart kIonCpSecureBufferStart() { return {}; }
+  template <typename T = IonCpSecureBufferStartFtraceEvent> T* set_ion_cp_secure_buffer_start() {
+    return BeginNestedMessage<T>(290);
+  }
+
+
+  using FieldMetadata_IonPrefetching =
+    ::protozero::proto_utils::FieldMetadata<
+      291,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      IonPrefetchingFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IonPrefetching kIonPrefetching() { return {}; }
+  template <typename T = IonPrefetchingFtraceEvent> T* set_ion_prefetching() {
+    return BeginNestedMessage<T>(291);
+  }
+
+
+  using FieldMetadata_IonSecureCmaAddToPoolEnd =
+    ::protozero::proto_utils::FieldMetadata<
+      292,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      IonSecureCmaAddToPoolEndFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IonSecureCmaAddToPoolEnd kIonSecureCmaAddToPoolEnd() { return {}; }
+  template <typename T = IonSecureCmaAddToPoolEndFtraceEvent> T* set_ion_secure_cma_add_to_pool_end() {
+    return BeginNestedMessage<T>(292);
+  }
+
+
+  using FieldMetadata_IonSecureCmaAddToPoolStart =
+    ::protozero::proto_utils::FieldMetadata<
+      293,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      IonSecureCmaAddToPoolStartFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IonSecureCmaAddToPoolStart kIonSecureCmaAddToPoolStart() { return {}; }
+  template <typename T = IonSecureCmaAddToPoolStartFtraceEvent> T* set_ion_secure_cma_add_to_pool_start() {
+    return BeginNestedMessage<T>(293);
+  }
+
+
+  using FieldMetadata_IonSecureCmaAllocateEnd =
+    ::protozero::proto_utils::FieldMetadata<
+      294,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      IonSecureCmaAllocateEndFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IonSecureCmaAllocateEnd kIonSecureCmaAllocateEnd() { return {}; }
+  template <typename T = IonSecureCmaAllocateEndFtraceEvent> T* set_ion_secure_cma_allocate_end() {
+    return BeginNestedMessage<T>(294);
+  }
+
+
+  using FieldMetadata_IonSecureCmaAllocateStart =
+    ::protozero::proto_utils::FieldMetadata<
+      295,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      IonSecureCmaAllocateStartFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IonSecureCmaAllocateStart kIonSecureCmaAllocateStart() { return {}; }
+  template <typename T = IonSecureCmaAllocateStartFtraceEvent> T* set_ion_secure_cma_allocate_start() {
+    return BeginNestedMessage<T>(295);
+  }
+
+
+  using FieldMetadata_IonSecureCmaShrinkPoolEnd =
+    ::protozero::proto_utils::FieldMetadata<
+      296,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      IonSecureCmaShrinkPoolEndFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IonSecureCmaShrinkPoolEnd kIonSecureCmaShrinkPoolEnd() { return {}; }
+  template <typename T = IonSecureCmaShrinkPoolEndFtraceEvent> T* set_ion_secure_cma_shrink_pool_end() {
+    return BeginNestedMessage<T>(296);
+  }
+
+
+  using FieldMetadata_IonSecureCmaShrinkPoolStart =
+    ::protozero::proto_utils::FieldMetadata<
+      297,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      IonSecureCmaShrinkPoolStartFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IonSecureCmaShrinkPoolStart kIonSecureCmaShrinkPoolStart() { return {}; }
+  template <typename T = IonSecureCmaShrinkPoolStartFtraceEvent> T* set_ion_secure_cma_shrink_pool_start() {
+    return BeginNestedMessage<T>(297);
+  }
+
+
+  using FieldMetadata_Kfree =
+    ::protozero::proto_utils::FieldMetadata<
+      298,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      KfreeFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Kfree kKfree() { return {}; }
+  template <typename T = KfreeFtraceEvent> T* set_kfree() {
+    return BeginNestedMessage<T>(298);
+  }
+
+
+  using FieldMetadata_Kmalloc =
+    ::protozero::proto_utils::FieldMetadata<
+      299,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      KmallocFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Kmalloc kKmalloc() { return {}; }
+  template <typename T = KmallocFtraceEvent> T* set_kmalloc() {
+    return BeginNestedMessage<T>(299);
+  }
+
+
+  using FieldMetadata_KmallocNode =
+    ::protozero::proto_utils::FieldMetadata<
+      300,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      KmallocNodeFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_KmallocNode kKmallocNode() { return {}; }
+  template <typename T = KmallocNodeFtraceEvent> T* set_kmalloc_node() {
+    return BeginNestedMessage<T>(300);
+  }
+
+
+  using FieldMetadata_KmemCacheAlloc =
+    ::protozero::proto_utils::FieldMetadata<
+      301,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      KmemCacheAllocFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_KmemCacheAlloc kKmemCacheAlloc() { return {}; }
+  template <typename T = KmemCacheAllocFtraceEvent> T* set_kmem_cache_alloc() {
+    return BeginNestedMessage<T>(301);
+  }
+
+
+  using FieldMetadata_KmemCacheAllocNode =
+    ::protozero::proto_utils::FieldMetadata<
+      302,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      KmemCacheAllocNodeFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_KmemCacheAllocNode kKmemCacheAllocNode() { return {}; }
+  template <typename T = KmemCacheAllocNodeFtraceEvent> T* set_kmem_cache_alloc_node() {
+    return BeginNestedMessage<T>(302);
+  }
+
+
+  using FieldMetadata_KmemCacheFree =
+    ::protozero::proto_utils::FieldMetadata<
+      303,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      KmemCacheFreeFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_KmemCacheFree kKmemCacheFree() { return {}; }
+  template <typename T = KmemCacheFreeFtraceEvent> T* set_kmem_cache_free() {
+    return BeginNestedMessage<T>(303);
+  }
+
+
+  using FieldMetadata_MigratePagesEnd =
+    ::protozero::proto_utils::FieldMetadata<
+      304,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MigratePagesEndFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MigratePagesEnd kMigratePagesEnd() { return {}; }
+  template <typename T = MigratePagesEndFtraceEvent> T* set_migrate_pages_end() {
+    return BeginNestedMessage<T>(304);
+  }
+
+
+  using FieldMetadata_MigratePagesStart =
+    ::protozero::proto_utils::FieldMetadata<
+      305,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MigratePagesStartFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MigratePagesStart kMigratePagesStart() { return {}; }
+  template <typename T = MigratePagesStartFtraceEvent> T* set_migrate_pages_start() {
+    return BeginNestedMessage<T>(305);
+  }
+
+
+  using FieldMetadata_MigrateRetry =
+    ::protozero::proto_utils::FieldMetadata<
+      306,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MigrateRetryFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MigrateRetry kMigrateRetry() { return {}; }
+  template <typename T = MigrateRetryFtraceEvent> T* set_migrate_retry() {
+    return BeginNestedMessage<T>(306);
+  }
+
+
+  using FieldMetadata_MmPageAlloc =
+    ::protozero::proto_utils::FieldMetadata<
+      307,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MmPageAllocFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MmPageAlloc kMmPageAlloc() { return {}; }
+  template <typename T = MmPageAllocFtraceEvent> T* set_mm_page_alloc() {
+    return BeginNestedMessage<T>(307);
+  }
+
+
+  using FieldMetadata_MmPageAllocExtfrag =
+    ::protozero::proto_utils::FieldMetadata<
+      308,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MmPageAllocExtfragFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MmPageAllocExtfrag kMmPageAllocExtfrag() { return {}; }
+  template <typename T = MmPageAllocExtfragFtraceEvent> T* set_mm_page_alloc_extfrag() {
+    return BeginNestedMessage<T>(308);
+  }
+
+
+  using FieldMetadata_MmPageAllocZoneLocked =
+    ::protozero::proto_utils::FieldMetadata<
+      309,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MmPageAllocZoneLockedFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MmPageAllocZoneLocked kMmPageAllocZoneLocked() { return {}; }
+  template <typename T = MmPageAllocZoneLockedFtraceEvent> T* set_mm_page_alloc_zone_locked() {
+    return BeginNestedMessage<T>(309);
+  }
+
+
+  using FieldMetadata_MmPageFree =
+    ::protozero::proto_utils::FieldMetadata<
+      310,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MmPageFreeFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MmPageFree kMmPageFree() { return {}; }
+  template <typename T = MmPageFreeFtraceEvent> T* set_mm_page_free() {
+    return BeginNestedMessage<T>(310);
+  }
+
+
+  using FieldMetadata_MmPageFreeBatched =
+    ::protozero::proto_utils::FieldMetadata<
+      311,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MmPageFreeBatchedFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MmPageFreeBatched kMmPageFreeBatched() { return {}; }
+  template <typename T = MmPageFreeBatchedFtraceEvent> T* set_mm_page_free_batched() {
+    return BeginNestedMessage<T>(311);
+  }
+
+
+  using FieldMetadata_MmPagePcpuDrain =
+    ::protozero::proto_utils::FieldMetadata<
+      312,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MmPagePcpuDrainFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MmPagePcpuDrain kMmPagePcpuDrain() { return {}; }
+  template <typename T = MmPagePcpuDrainFtraceEvent> T* set_mm_page_pcpu_drain() {
+    return BeginNestedMessage<T>(312);
+  }
+
+
+  using FieldMetadata_RssStat =
+    ::protozero::proto_utils::FieldMetadata<
+      313,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      RssStatFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_RssStat kRssStat() { return {}; }
+  template <typename T = RssStatFtraceEvent> T* set_rss_stat() {
+    return BeginNestedMessage<T>(313);
+  }
+
+
+  using FieldMetadata_IonHeapShrink =
+    ::protozero::proto_utils::FieldMetadata<
+      314,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      IonHeapShrinkFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IonHeapShrink kIonHeapShrink() { return {}; }
+  template <typename T = IonHeapShrinkFtraceEvent> T* set_ion_heap_shrink() {
+    return BeginNestedMessage<T>(314);
+  }
+
+
+  using FieldMetadata_IonHeapGrow =
+    ::protozero::proto_utils::FieldMetadata<
+      315,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      IonHeapGrowFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IonHeapGrow kIonHeapGrow() { return {}; }
+  template <typename T = IonHeapGrowFtraceEvent> T* set_ion_heap_grow() {
+    return BeginNestedMessage<T>(315);
+  }
+
+
+  using FieldMetadata_FenceInit =
+    ::protozero::proto_utils::FieldMetadata<
+      316,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      FenceInitFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FenceInit kFenceInit() { return {}; }
+  template <typename T = FenceInitFtraceEvent> T* set_fence_init() {
+    return BeginNestedMessage<T>(316);
+  }
+
+
+  using FieldMetadata_FenceDestroy =
+    ::protozero::proto_utils::FieldMetadata<
+      317,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      FenceDestroyFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FenceDestroy kFenceDestroy() { return {}; }
+  template <typename T = FenceDestroyFtraceEvent> T* set_fence_destroy() {
+    return BeginNestedMessage<T>(317);
+  }
+
+
+  using FieldMetadata_FenceEnableSignal =
+    ::protozero::proto_utils::FieldMetadata<
+      318,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      FenceEnableSignalFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FenceEnableSignal kFenceEnableSignal() { return {}; }
+  template <typename T = FenceEnableSignalFtraceEvent> T* set_fence_enable_signal() {
+    return BeginNestedMessage<T>(318);
+  }
+
+
+  using FieldMetadata_FenceSignaled =
+    ::protozero::proto_utils::FieldMetadata<
+      319,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      FenceSignaledFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FenceSignaled kFenceSignaled() { return {}; }
+  template <typename T = FenceSignaledFtraceEvent> T* set_fence_signaled() {
+    return BeginNestedMessage<T>(319);
+  }
+
+
+  using FieldMetadata_ClkEnable =
+    ::protozero::proto_utils::FieldMetadata<
+      320,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ClkEnableFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ClkEnable kClkEnable() { return {}; }
+  template <typename T = ClkEnableFtraceEvent> T* set_clk_enable() {
+    return BeginNestedMessage<T>(320);
+  }
+
+
+  using FieldMetadata_ClkDisable =
+    ::protozero::proto_utils::FieldMetadata<
+      321,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ClkDisableFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ClkDisable kClkDisable() { return {}; }
+  template <typename T = ClkDisableFtraceEvent> T* set_clk_disable() {
+    return BeginNestedMessage<T>(321);
+  }
+
+
+  using FieldMetadata_ClkSetRate =
+    ::protozero::proto_utils::FieldMetadata<
+      322,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ClkSetRateFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ClkSetRate kClkSetRate() { return {}; }
+  template <typename T = ClkSetRateFtraceEvent> T* set_clk_set_rate() {
+    return BeginNestedMessage<T>(322);
+  }
+
+
+  using FieldMetadata_BinderTransactionAllocBuf =
+    ::protozero::proto_utils::FieldMetadata<
+      323,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      BinderTransactionAllocBufFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BinderTransactionAllocBuf kBinderTransactionAllocBuf() { return {}; }
+  template <typename T = BinderTransactionAllocBufFtraceEvent> T* set_binder_transaction_alloc_buf() {
+    return BeginNestedMessage<T>(323);
+  }
+
+
+  using FieldMetadata_SignalDeliver =
+    ::protozero::proto_utils::FieldMetadata<
+      324,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      SignalDeliverFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SignalDeliver kSignalDeliver() { return {}; }
+  template <typename T = SignalDeliverFtraceEvent> T* set_signal_deliver() {
+    return BeginNestedMessage<T>(324);
+  }
+
+
+  using FieldMetadata_SignalGenerate =
+    ::protozero::proto_utils::FieldMetadata<
+      325,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      SignalGenerateFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SignalGenerate kSignalGenerate() { return {}; }
+  template <typename T = SignalGenerateFtraceEvent> T* set_signal_generate() {
+    return BeginNestedMessage<T>(325);
+  }
+
+
+  using FieldMetadata_OomScoreAdjUpdate =
+    ::protozero::proto_utils::FieldMetadata<
+      326,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      OomScoreAdjUpdateFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_OomScoreAdjUpdate kOomScoreAdjUpdate() { return {}; }
+  template <typename T = OomScoreAdjUpdateFtraceEvent> T* set_oom_score_adj_update() {
+    return BeginNestedMessage<T>(326);
+  }
+
+
+  using FieldMetadata_Generic =
+    ::protozero::proto_utils::FieldMetadata<
+      327,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      GenericFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Generic kGeneric() { return {}; }
+  template <typename T = GenericFtraceEvent> T* set_generic() {
+    return BeginNestedMessage<T>(327);
+  }
+
+
+  using FieldMetadata_MmEventRecord =
+    ::protozero::proto_utils::FieldMetadata<
+      328,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MmEventRecordFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MmEventRecord kMmEventRecord() { return {}; }
+  template <typename T = MmEventRecordFtraceEvent> T* set_mm_event_record() {
+    return BeginNestedMessage<T>(328);
+  }
+
+
+  using FieldMetadata_SysEnter =
+    ::protozero::proto_utils::FieldMetadata<
+      329,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      SysEnterFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SysEnter kSysEnter() { return {}; }
+  template <typename T = SysEnterFtraceEvent> T* set_sys_enter() {
+    return BeginNestedMessage<T>(329);
+  }
+
+
+  using FieldMetadata_SysExit =
+    ::protozero::proto_utils::FieldMetadata<
+      330,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      SysExitFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SysExit kSysExit() { return {}; }
+  template <typename T = SysExitFtraceEvent> T* set_sys_exit() {
+    return BeginNestedMessage<T>(330);
+  }
+
+
+  using FieldMetadata_Zero =
+    ::protozero::proto_utils::FieldMetadata<
+      331,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ZeroFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Zero kZero() { return {}; }
+  template <typename T = ZeroFtraceEvent> T* set_zero() {
+    return BeginNestedMessage<T>(331);
+  }
+
+
+  using FieldMetadata_GpuFrequency =
+    ::protozero::proto_utils::FieldMetadata<
+      332,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      GpuFrequencyFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_GpuFrequency kGpuFrequency() { return {}; }
+  template <typename T = GpuFrequencyFtraceEvent> T* set_gpu_frequency() {
+    return BeginNestedMessage<T>(332);
+  }
+
+
+  using FieldMetadata_SdeTracingMarkWrite =
+    ::protozero::proto_utils::FieldMetadata<
+      333,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      SdeTracingMarkWriteFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SdeTracingMarkWrite kSdeTracingMarkWrite() { return {}; }
+  template <typename T = SdeTracingMarkWriteFtraceEvent> T* set_sde_tracing_mark_write() {
+    return BeginNestedMessage<T>(333);
+  }
+
+
+  using FieldMetadata_MarkVictim =
+    ::protozero::proto_utils::FieldMetadata<
+      334,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MarkVictimFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MarkVictim kMarkVictim() { return {}; }
+  template <typename T = MarkVictimFtraceEvent> T* set_mark_victim() {
+    return BeginNestedMessage<T>(334);
+  }
+
+
+  using FieldMetadata_IonStat =
+    ::protozero::proto_utils::FieldMetadata<
+      335,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      IonStatFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IonStat kIonStat() { return {}; }
+  template <typename T = IonStatFtraceEvent> T* set_ion_stat() {
+    return BeginNestedMessage<T>(335);
+  }
+
+
+  using FieldMetadata_IonBufferCreate =
+    ::protozero::proto_utils::FieldMetadata<
+      336,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      IonBufferCreateFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IonBufferCreate kIonBufferCreate() { return {}; }
+  template <typename T = IonBufferCreateFtraceEvent> T* set_ion_buffer_create() {
+    return BeginNestedMessage<T>(336);
+  }
+
+
+  using FieldMetadata_IonBufferDestroy =
+    ::protozero::proto_utils::FieldMetadata<
+      337,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      IonBufferDestroyFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IonBufferDestroy kIonBufferDestroy() { return {}; }
+  template <typename T = IonBufferDestroyFtraceEvent> T* set_ion_buffer_destroy() {
+    return BeginNestedMessage<T>(337);
+  }
+
+
+  using FieldMetadata_ScmCallStart =
+    ::protozero::proto_utils::FieldMetadata<
+      338,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ScmCallStartFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ScmCallStart kScmCallStart() { return {}; }
+  template <typename T = ScmCallStartFtraceEvent> T* set_scm_call_start() {
+    return BeginNestedMessage<T>(338);
+  }
+
+
+  using FieldMetadata_ScmCallEnd =
+    ::protozero::proto_utils::FieldMetadata<
+      339,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ScmCallEndFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ScmCallEnd kScmCallEnd() { return {}; }
+  template <typename T = ScmCallEndFtraceEvent> T* set_scm_call_end() {
+    return BeginNestedMessage<T>(339);
+  }
+
+
+  using FieldMetadata_GpuMemTotal =
+    ::protozero::proto_utils::FieldMetadata<
+      340,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      GpuMemTotalFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_GpuMemTotal kGpuMemTotal() { return {}; }
+  template <typename T = GpuMemTotalFtraceEvent> T* set_gpu_mem_total() {
+    return BeginNestedMessage<T>(340);
+  }
+
+
+  using FieldMetadata_ThermalTemperature =
+    ::protozero::proto_utils::FieldMetadata<
+      341,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ThermalTemperatureFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ThermalTemperature kThermalTemperature() { return {}; }
+  template <typename T = ThermalTemperatureFtraceEvent> T* set_thermal_temperature() {
+    return BeginNestedMessage<T>(341);
+  }
+
+
+  using FieldMetadata_CdevUpdate =
+    ::protozero::proto_utils::FieldMetadata<
+      342,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      CdevUpdateFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CdevUpdate kCdevUpdate() { return {}; }
+  template <typename T = CdevUpdateFtraceEvent> T* set_cdev_update() {
+    return BeginNestedMessage<T>(342);
+  }
+
+
+  using FieldMetadata_CpuhpExit =
+    ::protozero::proto_utils::FieldMetadata<
+      343,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      CpuhpExitFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CpuhpExit kCpuhpExit() { return {}; }
+  template <typename T = CpuhpExitFtraceEvent> T* set_cpuhp_exit() {
+    return BeginNestedMessage<T>(343);
+  }
+
+
+  using FieldMetadata_CpuhpMultiEnter =
+    ::protozero::proto_utils::FieldMetadata<
+      344,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      CpuhpMultiEnterFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CpuhpMultiEnter kCpuhpMultiEnter() { return {}; }
+  template <typename T = CpuhpMultiEnterFtraceEvent> T* set_cpuhp_multi_enter() {
+    return BeginNestedMessage<T>(344);
+  }
+
+
+  using FieldMetadata_CpuhpEnter =
+    ::protozero::proto_utils::FieldMetadata<
+      345,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      CpuhpEnterFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CpuhpEnter kCpuhpEnter() { return {}; }
+  template <typename T = CpuhpEnterFtraceEvent> T* set_cpuhp_enter() {
+    return BeginNestedMessage<T>(345);
+  }
+
+
+  using FieldMetadata_CpuhpLatency =
+    ::protozero::proto_utils::FieldMetadata<
+      346,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      CpuhpLatencyFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CpuhpLatency kCpuhpLatency() { return {}; }
+  template <typename T = CpuhpLatencyFtraceEvent> T* set_cpuhp_latency() {
+    return BeginNestedMessage<T>(346);
+  }
+
+
+  using FieldMetadata_FastrpcDmaStat =
+    ::protozero::proto_utils::FieldMetadata<
+      347,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      FastrpcDmaStatFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FastrpcDmaStat kFastrpcDmaStat() { return {}; }
+  template <typename T = FastrpcDmaStatFtraceEvent> T* set_fastrpc_dma_stat() {
+    return BeginNestedMessage<T>(347);
+  }
+
+
+  using FieldMetadata_DpuTracingMarkWrite =
+    ::protozero::proto_utils::FieldMetadata<
+      348,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      DpuTracingMarkWriteFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DpuTracingMarkWrite kDpuTracingMarkWrite() { return {}; }
+  template <typename T = DpuTracingMarkWriteFtraceEvent> T* set_dpu_tracing_mark_write() {
+    return BeginNestedMessage<T>(348);
+  }
+
+
+  using FieldMetadata_G2dTracingMarkWrite =
+    ::protozero::proto_utils::FieldMetadata<
+      349,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      G2dTracingMarkWriteFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_G2dTracingMarkWrite kG2dTracingMarkWrite() { return {}; }
+  template <typename T = G2dTracingMarkWriteFtraceEvent> T* set_g2d_tracing_mark_write() {
+    return BeginNestedMessage<T>(349);
+  }
+
+
+  using FieldMetadata_MaliTracingMarkWrite =
+    ::protozero::proto_utils::FieldMetadata<
+      350,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MaliTracingMarkWriteFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MaliTracingMarkWrite kMaliTracingMarkWrite() { return {}; }
+  template <typename T = MaliTracingMarkWriteFtraceEvent> T* set_mali_tracing_mark_write() {
+    return BeginNestedMessage<T>(350);
+  }
+
+
+  using FieldMetadata_DmaHeapStat =
+    ::protozero::proto_utils::FieldMetadata<
+      351,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      DmaHeapStatFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DmaHeapStat kDmaHeapStat() { return {}; }
+  template <typename T = DmaHeapStatFtraceEvent> T* set_dma_heap_stat() {
+    return BeginNestedMessage<T>(351);
+  }
+
+
+  using FieldMetadata_CpuhpPause =
+    ::protozero::proto_utils::FieldMetadata<
+      352,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      CpuhpPauseFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CpuhpPause kCpuhpPause() { return {}; }
+  template <typename T = CpuhpPauseFtraceEvent> T* set_cpuhp_pause() {
+    return BeginNestedMessage<T>(352);
+  }
+
+
+  using FieldMetadata_SchedPiSetprio =
+    ::protozero::proto_utils::FieldMetadata<
+      353,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      SchedPiSetprioFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SchedPiSetprio kSchedPiSetprio() { return {}; }
+  template <typename T = SchedPiSetprioFtraceEvent> T* set_sched_pi_setprio() {
+    return BeginNestedMessage<T>(353);
+  }
+
+
+  using FieldMetadata_SdeSdeEvtlog =
+    ::protozero::proto_utils::FieldMetadata<
+      354,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      SdeSdeEvtlogFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SdeSdeEvtlog kSdeSdeEvtlog() { return {}; }
+  template <typename T = SdeSdeEvtlogFtraceEvent> T* set_sde_sde_evtlog() {
+    return BeginNestedMessage<T>(354);
+  }
+
+
+  using FieldMetadata_SdeSdePerfCalcCrtc =
+    ::protozero::proto_utils::FieldMetadata<
+      355,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      SdeSdePerfCalcCrtcFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SdeSdePerfCalcCrtc kSdeSdePerfCalcCrtc() { return {}; }
+  template <typename T = SdeSdePerfCalcCrtcFtraceEvent> T* set_sde_sde_perf_calc_crtc() {
+    return BeginNestedMessage<T>(355);
+  }
+
+
+  using FieldMetadata_SdeSdePerfCrtcUpdate =
+    ::protozero::proto_utils::FieldMetadata<
+      356,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      SdeSdePerfCrtcUpdateFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SdeSdePerfCrtcUpdate kSdeSdePerfCrtcUpdate() { return {}; }
+  template <typename T = SdeSdePerfCrtcUpdateFtraceEvent> T* set_sde_sde_perf_crtc_update() {
+    return BeginNestedMessage<T>(356);
+  }
+
+
+  using FieldMetadata_SdeSdePerfSetQosLuts =
+    ::protozero::proto_utils::FieldMetadata<
+      357,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      SdeSdePerfSetQosLutsFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SdeSdePerfSetQosLuts kSdeSdePerfSetQosLuts() { return {}; }
+  template <typename T = SdeSdePerfSetQosLutsFtraceEvent> T* set_sde_sde_perf_set_qos_luts() {
+    return BeginNestedMessage<T>(357);
+  }
+
+
+  using FieldMetadata_SdeSdePerfUpdateBus =
+    ::protozero::proto_utils::FieldMetadata<
+      358,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      SdeSdePerfUpdateBusFtraceEvent,
+      FtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SdeSdePerfUpdateBus kSdeSdePerfUpdateBus() { return {}; }
+  template <typename T = SdeSdePerfUpdateBusFtraceEvent> T* set_sde_sde_perf_update_bus() {
+    return BeginNestedMessage<T>(358);
+  }
+
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/ftrace/ftrace_event_bundle.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_FTRACE_EVENT_BUNDLE_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_FTRACE_EVENT_BUNDLE_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class FtraceEvent;
+class FtraceEventBundle_CompactSched;
+
+class FtraceEventBundle_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  FtraceEventBundle_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit FtraceEventBundle_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit FtraceEventBundle_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_cpu() const { return at<1>().valid(); }
+  uint32_t cpu() const { return at<1>().as_uint32(); }
+  bool has_event() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> event() const { return GetRepeated<::protozero::ConstBytes>(2); }
+  bool has_lost_events() const { return at<3>().valid(); }
+  bool lost_events() const { return at<3>().as_bool(); }
+  bool has_compact_sched() const { return at<4>().valid(); }
+  ::protozero::ConstBytes compact_sched() const { return at<4>().as_bytes(); }
+};
+
+class FtraceEventBundle : public ::protozero::Message {
+ public:
+  using Decoder = FtraceEventBundle_Decoder;
+  enum : int32_t {
+    kCpuFieldNumber = 1,
+    kEventFieldNumber = 2,
+    kLostEventsFieldNumber = 3,
+    kCompactSchedFieldNumber = 4,
+  };
+  using CompactSched = ::perfetto::protos::pbzero::FtraceEventBundle_CompactSched;
+
+  using FieldMetadata_Cpu =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      FtraceEventBundle>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Cpu kCpu() { return {}; }
+  void set_cpu(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Cpu::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Event =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      FtraceEvent,
+      FtraceEventBundle>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Event kEvent() { return {}; }
+  template <typename T = FtraceEvent> T* add_event() {
+    return BeginNestedMessage<T>(2);
+  }
+
+
+  using FieldMetadata_LostEvents =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      FtraceEventBundle>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_LostEvents kLostEvents() { return {}; }
+  void set_lost_events(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_LostEvents::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CompactSched =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      FtraceEventBundle_CompactSched,
+      FtraceEventBundle>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CompactSched kCompactSched() { return {}; }
+  template <typename T = FtraceEventBundle_CompactSched> T* set_compact_sched() {
+    return BeginNestedMessage<T>(4);
+  }
+
+};
+
+class FtraceEventBundle_CompactSched_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/11, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  FtraceEventBundle_CompactSched_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit FtraceEventBundle_CompactSched_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit FtraceEventBundle_CompactSched_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_intern_table() const { return at<5>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstChars> intern_table() const { return GetRepeated<::protozero::ConstChars>(5); }
+  bool has_switch_timestamp() const { return at<1>().valid(); }
+  ::protozero::PackedRepeatedFieldIterator<::protozero::proto_utils::ProtoWireType::kVarInt, uint64_t> switch_timestamp(bool* parse_error_ptr) const { return GetPackedRepeated<::protozero::proto_utils::ProtoWireType::kVarInt, uint64_t>(1, parse_error_ptr); }
+  bool has_switch_prev_state() const { return at<2>().valid(); }
+  ::protozero::PackedRepeatedFieldIterator<::protozero::proto_utils::ProtoWireType::kVarInt, int64_t> switch_prev_state(bool* parse_error_ptr) const { return GetPackedRepeated<::protozero::proto_utils::ProtoWireType::kVarInt, int64_t>(2, parse_error_ptr); }
+  bool has_switch_next_pid() const { return at<3>().valid(); }
+  ::protozero::PackedRepeatedFieldIterator<::protozero::proto_utils::ProtoWireType::kVarInt, int32_t> switch_next_pid(bool* parse_error_ptr) const { return GetPackedRepeated<::protozero::proto_utils::ProtoWireType::kVarInt, int32_t>(3, parse_error_ptr); }
+  bool has_switch_next_prio() const { return at<4>().valid(); }
+  ::protozero::PackedRepeatedFieldIterator<::protozero::proto_utils::ProtoWireType::kVarInt, int32_t> switch_next_prio(bool* parse_error_ptr) const { return GetPackedRepeated<::protozero::proto_utils::ProtoWireType::kVarInt, int32_t>(4, parse_error_ptr); }
+  bool has_switch_next_comm_index() const { return at<6>().valid(); }
+  ::protozero::PackedRepeatedFieldIterator<::protozero::proto_utils::ProtoWireType::kVarInt, uint32_t> switch_next_comm_index(bool* parse_error_ptr) const { return GetPackedRepeated<::protozero::proto_utils::ProtoWireType::kVarInt, uint32_t>(6, parse_error_ptr); }
+  bool has_waking_timestamp() const { return at<7>().valid(); }
+  ::protozero::PackedRepeatedFieldIterator<::protozero::proto_utils::ProtoWireType::kVarInt, uint64_t> waking_timestamp(bool* parse_error_ptr) const { return GetPackedRepeated<::protozero::proto_utils::ProtoWireType::kVarInt, uint64_t>(7, parse_error_ptr); }
+  bool has_waking_pid() const { return at<8>().valid(); }
+  ::protozero::PackedRepeatedFieldIterator<::protozero::proto_utils::ProtoWireType::kVarInt, int32_t> waking_pid(bool* parse_error_ptr) const { return GetPackedRepeated<::protozero::proto_utils::ProtoWireType::kVarInt, int32_t>(8, parse_error_ptr); }
+  bool has_waking_target_cpu() const { return at<9>().valid(); }
+  ::protozero::PackedRepeatedFieldIterator<::protozero::proto_utils::ProtoWireType::kVarInt, int32_t> waking_target_cpu(bool* parse_error_ptr) const { return GetPackedRepeated<::protozero::proto_utils::ProtoWireType::kVarInt, int32_t>(9, parse_error_ptr); }
+  bool has_waking_prio() const { return at<10>().valid(); }
+  ::protozero::PackedRepeatedFieldIterator<::protozero::proto_utils::ProtoWireType::kVarInt, int32_t> waking_prio(bool* parse_error_ptr) const { return GetPackedRepeated<::protozero::proto_utils::ProtoWireType::kVarInt, int32_t>(10, parse_error_ptr); }
+  bool has_waking_comm_index() const { return at<11>().valid(); }
+  ::protozero::PackedRepeatedFieldIterator<::protozero::proto_utils::ProtoWireType::kVarInt, uint32_t> waking_comm_index(bool* parse_error_ptr) const { return GetPackedRepeated<::protozero::proto_utils::ProtoWireType::kVarInt, uint32_t>(11, parse_error_ptr); }
+};
+
+class FtraceEventBundle_CompactSched : public ::protozero::Message {
+ public:
+  using Decoder = FtraceEventBundle_CompactSched_Decoder;
+  enum : int32_t {
+    kInternTableFieldNumber = 5,
+    kSwitchTimestampFieldNumber = 1,
+    kSwitchPrevStateFieldNumber = 2,
+    kSwitchNextPidFieldNumber = 3,
+    kSwitchNextPrioFieldNumber = 4,
+    kSwitchNextCommIndexFieldNumber = 6,
+    kWakingTimestampFieldNumber = 7,
+    kWakingPidFieldNumber = 8,
+    kWakingTargetCpuFieldNumber = 9,
+    kWakingPrioFieldNumber = 10,
+    kWakingCommIndexFieldNumber = 11,
+  };
+
+  using FieldMetadata_InternTable =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      FtraceEventBundle_CompactSched>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_InternTable kInternTable() { return {}; }
+  void add_intern_table(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_InternTable::kFieldId, data, size);
+  }
+  void add_intern_table(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_InternTable::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SwitchTimestamp =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kRepeatedPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      FtraceEventBundle_CompactSched>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SwitchTimestamp kSwitchTimestamp() { return {}; }
+  void set_switch_timestamp(const ::protozero::PackedVarInt& packed_buffer) {
+    AppendBytes(FieldMetadata_SwitchTimestamp::kFieldId, packed_buffer.data(),
+                packed_buffer.size());
+  }
+
+  using FieldMetadata_SwitchPrevState =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      FtraceEventBundle_CompactSched>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SwitchPrevState kSwitchPrevState() { return {}; }
+  void set_switch_prev_state(const ::protozero::PackedVarInt& packed_buffer) {
+    AppendBytes(FieldMetadata_SwitchPrevState::kFieldId, packed_buffer.data(),
+                packed_buffer.size());
+  }
+
+  using FieldMetadata_SwitchNextPid =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kRepeatedPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      FtraceEventBundle_CompactSched>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SwitchNextPid kSwitchNextPid() { return {}; }
+  void set_switch_next_pid(const ::protozero::PackedVarInt& packed_buffer) {
+    AppendBytes(FieldMetadata_SwitchNextPid::kFieldId, packed_buffer.data(),
+                packed_buffer.size());
+  }
+
+  using FieldMetadata_SwitchNextPrio =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kRepeatedPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      FtraceEventBundle_CompactSched>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SwitchNextPrio kSwitchNextPrio() { return {}; }
+  void set_switch_next_prio(const ::protozero::PackedVarInt& packed_buffer) {
+    AppendBytes(FieldMetadata_SwitchNextPrio::kFieldId, packed_buffer.data(),
+                packed_buffer.size());
+  }
+
+  using FieldMetadata_SwitchNextCommIndex =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kRepeatedPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      FtraceEventBundle_CompactSched>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SwitchNextCommIndex kSwitchNextCommIndex() { return {}; }
+  void set_switch_next_comm_index(const ::protozero::PackedVarInt& packed_buffer) {
+    AppendBytes(FieldMetadata_SwitchNextCommIndex::kFieldId, packed_buffer.data(),
+                packed_buffer.size());
+  }
+
+  using FieldMetadata_WakingTimestamp =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kRepeatedPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      FtraceEventBundle_CompactSched>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_WakingTimestamp kWakingTimestamp() { return {}; }
+  void set_waking_timestamp(const ::protozero::PackedVarInt& packed_buffer) {
+    AppendBytes(FieldMetadata_WakingTimestamp::kFieldId, packed_buffer.data(),
+                packed_buffer.size());
+  }
+
+  using FieldMetadata_WakingPid =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kRepeatedPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      FtraceEventBundle_CompactSched>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_WakingPid kWakingPid() { return {}; }
+  void set_waking_pid(const ::protozero::PackedVarInt& packed_buffer) {
+    AppendBytes(FieldMetadata_WakingPid::kFieldId, packed_buffer.data(),
+                packed_buffer.size());
+  }
+
+  using FieldMetadata_WakingTargetCpu =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kRepeatedPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      FtraceEventBundle_CompactSched>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_WakingTargetCpu kWakingTargetCpu() { return {}; }
+  void set_waking_target_cpu(const ::protozero::PackedVarInt& packed_buffer) {
+    AppendBytes(FieldMetadata_WakingTargetCpu::kFieldId, packed_buffer.data(),
+                packed_buffer.size());
+  }
+
+  using FieldMetadata_WakingPrio =
+    ::protozero::proto_utils::FieldMetadata<
+      10,
+      ::protozero::proto_utils::RepetitionType::kRepeatedPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      FtraceEventBundle_CompactSched>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_WakingPrio kWakingPrio() { return {}; }
+  void set_waking_prio(const ::protozero::PackedVarInt& packed_buffer) {
+    AppendBytes(FieldMetadata_WakingPrio::kFieldId, packed_buffer.data(),
+                packed_buffer.size());
+  }
+
+  using FieldMetadata_WakingCommIndex =
+    ::protozero::proto_utils::FieldMetadata<
+      11,
+      ::protozero::proto_utils::RepetitionType::kRepeatedPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      FtraceEventBundle_CompactSched>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_WakingCommIndex kWakingCommIndex() { return {}; }
+  void set_waking_comm_index(const ::protozero::PackedVarInt& packed_buffer) {
+    AppendBytes(FieldMetadata_WakingCommIndex::kFieldId, packed_buffer.data(),
+                packed_buffer.size());
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/ftrace/ftrace_stats.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_FTRACE_STATS_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_FTRACE_STATS_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class FtraceCpuStats;
+enum FtraceStats_Phase : int32_t;
+
+enum FtraceStats_Phase : int32_t {
+  FtraceStats_Phase_UNSPECIFIED = 0,
+  FtraceStats_Phase_START_OF_TRACE = 1,
+  FtraceStats_Phase_END_OF_TRACE = 2,
+};
+
+const FtraceStats_Phase FtraceStats_Phase_MIN = FtraceStats_Phase_UNSPECIFIED;
+const FtraceStats_Phase FtraceStats_Phase_MAX = FtraceStats_Phase_END_OF_TRACE;
+
+class FtraceStats_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  FtraceStats_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit FtraceStats_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit FtraceStats_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_phase() const { return at<1>().valid(); }
+  int32_t phase() const { return at<1>().as_int32(); }
+  bool has_cpu_stats() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> cpu_stats() const { return GetRepeated<::protozero::ConstBytes>(2); }
+  bool has_kernel_symbols_parsed() const { return at<3>().valid(); }
+  uint32_t kernel_symbols_parsed() const { return at<3>().as_uint32(); }
+  bool has_kernel_symbols_mem_kb() const { return at<4>().valid(); }
+  uint32_t kernel_symbols_mem_kb() const { return at<4>().as_uint32(); }
+};
+
+class FtraceStats : public ::protozero::Message {
+ public:
+  using Decoder = FtraceStats_Decoder;
+  enum : int32_t {
+    kPhaseFieldNumber = 1,
+    kCpuStatsFieldNumber = 2,
+    kKernelSymbolsParsedFieldNumber = 3,
+    kKernelSymbolsMemKbFieldNumber = 4,
+  };
+  using Phase = ::perfetto::protos::pbzero::FtraceStats_Phase;
+  static const Phase UNSPECIFIED = FtraceStats_Phase_UNSPECIFIED;
+  static const Phase START_OF_TRACE = FtraceStats_Phase_START_OF_TRACE;
+  static const Phase END_OF_TRACE = FtraceStats_Phase_END_OF_TRACE;
+
+  using FieldMetadata_Phase =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::FtraceStats_Phase,
+      FtraceStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Phase kPhase() { return {}; }
+  void set_phase(::perfetto::protos::pbzero::FtraceStats_Phase value) {
+    static constexpr uint32_t field_id = FieldMetadata_Phase::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CpuStats =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      FtraceCpuStats,
+      FtraceStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CpuStats kCpuStats() { return {}; }
+  template <typename T = FtraceCpuStats> T* add_cpu_stats() {
+    return BeginNestedMessage<T>(2);
+  }
+
+
+  using FieldMetadata_KernelSymbolsParsed =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      FtraceStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_KernelSymbolsParsed kKernelSymbolsParsed() { return {}; }
+  void set_kernel_symbols_parsed(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_KernelSymbolsParsed::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_KernelSymbolsMemKb =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      FtraceStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_KernelSymbolsMemKb kKernelSymbolsMemKb() { return {}; }
+  void set_kernel_symbols_mem_kb(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_KernelSymbolsMemKb::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class FtraceCpuStats_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/9, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  FtraceCpuStats_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit FtraceCpuStats_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit FtraceCpuStats_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_cpu() const { return at<1>().valid(); }
+  uint64_t cpu() const { return at<1>().as_uint64(); }
+  bool has_entries() const { return at<2>().valid(); }
+  uint64_t entries() const { return at<2>().as_uint64(); }
+  bool has_overrun() const { return at<3>().valid(); }
+  uint64_t overrun() const { return at<3>().as_uint64(); }
+  bool has_commit_overrun() const { return at<4>().valid(); }
+  uint64_t commit_overrun() const { return at<4>().as_uint64(); }
+  bool has_bytes_read() const { return at<5>().valid(); }
+  uint64_t bytes_read() const { return at<5>().as_uint64(); }
+  bool has_oldest_event_ts() const { return at<6>().valid(); }
+  double oldest_event_ts() const { return at<6>().as_double(); }
+  bool has_now_ts() const { return at<7>().valid(); }
+  double now_ts() const { return at<7>().as_double(); }
+  bool has_dropped_events() const { return at<8>().valid(); }
+  uint64_t dropped_events() const { return at<8>().as_uint64(); }
+  bool has_read_events() const { return at<9>().valid(); }
+  uint64_t read_events() const { return at<9>().as_uint64(); }
+};
+
+class FtraceCpuStats : public ::protozero::Message {
+ public:
+  using Decoder = FtraceCpuStats_Decoder;
+  enum : int32_t {
+    kCpuFieldNumber = 1,
+    kEntriesFieldNumber = 2,
+    kOverrunFieldNumber = 3,
+    kCommitOverrunFieldNumber = 4,
+    kBytesReadFieldNumber = 5,
+    kOldestEventTsFieldNumber = 6,
+    kNowTsFieldNumber = 7,
+    kDroppedEventsFieldNumber = 8,
+    kReadEventsFieldNumber = 9,
+  };
+
+  using FieldMetadata_Cpu =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      FtraceCpuStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Cpu kCpu() { return {}; }
+  void set_cpu(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Cpu::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Entries =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      FtraceCpuStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Entries kEntries() { return {}; }
+  void set_entries(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Entries::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Overrun =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      FtraceCpuStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Overrun kOverrun() { return {}; }
+  void set_overrun(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Overrun::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CommitOverrun =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      FtraceCpuStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CommitOverrun kCommitOverrun() { return {}; }
+  void set_commit_overrun(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CommitOverrun::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BytesRead =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      FtraceCpuStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BytesRead kBytesRead() { return {}; }
+  void set_bytes_read(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_BytesRead::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_OldestEventTs =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kDouble,
+      double,
+      FtraceCpuStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_OldestEventTs kOldestEventTs() { return {}; }
+  void set_oldest_event_ts(double value) {
+    static constexpr uint32_t field_id = FieldMetadata_OldestEventTs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kDouble>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NowTs =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kDouble,
+      double,
+      FtraceCpuStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NowTs kNowTs() { return {}; }
+  void set_now_ts(double value) {
+    static constexpr uint32_t field_id = FieldMetadata_NowTs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kDouble>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DroppedEvents =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      FtraceCpuStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DroppedEvents kDroppedEvents() { return {}; }
+  void set_dropped_events(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DroppedEvents::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ReadEvents =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      FtraceCpuStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ReadEvents kReadEvents() { return {}; }
+  void set_read_events(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ReadEvents::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/ftrace/test_bundle_wrapper.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_TEST_BUNDLE_WRAPPER_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_TEST_BUNDLE_WRAPPER_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class FtraceEventBundle;
+
+class TestBundleWrapper_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  TestBundleWrapper_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TestBundleWrapper_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TestBundleWrapper_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_before() const { return at<1>().valid(); }
+  ::protozero::ConstChars before() const { return at<1>().as_string(); }
+  bool has_bundle() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> bundle() const { return GetRepeated<::protozero::ConstBytes>(2); }
+  bool has_after() const { return at<3>().valid(); }
+  ::protozero::ConstChars after() const { return at<3>().as_string(); }
+};
+
+class TestBundleWrapper : public ::protozero::Message {
+ public:
+  using Decoder = TestBundleWrapper_Decoder;
+  enum : int32_t {
+    kBeforeFieldNumber = 1,
+    kBundleFieldNumber = 2,
+    kAfterFieldNumber = 3,
+  };
+
+  using FieldMetadata_Before =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TestBundleWrapper>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Before kBefore() { return {}; }
+  void set_before(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Before::kFieldId, data, size);
+  }
+  void set_before(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Before::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Bundle =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      FtraceEventBundle,
+      TestBundleWrapper>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Bundle kBundle() { return {}; }
+  template <typename T = FtraceEventBundle> T* add_bundle() {
+    return BeginNestedMessage<T>(2);
+  }
+
+
+  using FieldMetadata_After =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TestBundleWrapper>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_After kAfter() { return {}; }
+  void set_after(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_After::kFieldId, data, size);
+  }
+  void set_after(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_After::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/ftrace/generic.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_GENERIC_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_GENERIC_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class GenericFtraceEvent_Field;
+
+class GenericFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  GenericFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit GenericFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit GenericFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_event_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars event_name() const { return at<1>().as_string(); }
+  bool has_field() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> field() const { return GetRepeated<::protozero::ConstBytes>(2); }
+};
+
+class GenericFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = GenericFtraceEvent_Decoder;
+  enum : int32_t {
+    kEventNameFieldNumber = 1,
+    kFieldFieldNumber = 2,
+  };
+  using Field = ::perfetto::protos::pbzero::GenericFtraceEvent_Field;
+
+  using FieldMetadata_EventName =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      GenericFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_EventName kEventName() { return {}; }
+  void set_event_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_EventName::kFieldId, data, size);
+  }
+  void set_event_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_EventName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Field =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      GenericFtraceEvent_Field,
+      GenericFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Field kField() { return {}; }
+  template <typename T = GenericFtraceEvent_Field> T* add_field() {
+    return BeginNestedMessage<T>(2);
+  }
+
+};
+
+class GenericFtraceEvent_Field_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  GenericFtraceEvent_Field_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit GenericFtraceEvent_Field_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit GenericFtraceEvent_Field_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars name() const { return at<1>().as_string(); }
+  bool has_str_value() const { return at<3>().valid(); }
+  ::protozero::ConstChars str_value() const { return at<3>().as_string(); }
+  bool has_int_value() const { return at<4>().valid(); }
+  int64_t int_value() const { return at<4>().as_int64(); }
+  bool has_uint_value() const { return at<5>().valid(); }
+  uint64_t uint_value() const { return at<5>().as_uint64(); }
+};
+
+class GenericFtraceEvent_Field : public ::protozero::Message {
+ public:
+  using Decoder = GenericFtraceEvent_Field_Decoder;
+  enum : int32_t {
+    kNameFieldNumber = 1,
+    kStrValueFieldNumber = 3,
+    kIntValueFieldNumber = 4,
+    kUintValueFieldNumber = 5,
+  };
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      GenericFtraceEvent_Field>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_StrValue =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      GenericFtraceEvent_Field>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_StrValue kStrValue() { return {}; }
+  void set_str_value(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_StrValue::kFieldId, data, size);
+  }
+  void set_str_value(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_StrValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_IntValue =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      GenericFtraceEvent_Field>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IntValue kIntValue() { return {}; }
+  void set_int_value(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_IntValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_UintValue =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      GenericFtraceEvent_Field>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_UintValue kUintValue() { return {}; }
+  void set_uint_value(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_UintValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/ftrace/binder.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_BINDER_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_BINDER_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class BinderTransactionAllocBufFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  BinderTransactionAllocBufFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit BinderTransactionAllocBufFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit BinderTransactionAllocBufFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_data_size() const { return at<1>().valid(); }
+  uint64_t data_size() const { return at<1>().as_uint64(); }
+  bool has_debug_id() const { return at<2>().valid(); }
+  int32_t debug_id() const { return at<2>().as_int32(); }
+  bool has_offsets_size() const { return at<3>().valid(); }
+  uint64_t offsets_size() const { return at<3>().as_uint64(); }
+};
+
+class BinderTransactionAllocBufFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = BinderTransactionAllocBufFtraceEvent_Decoder;
+  enum : int32_t {
+    kDataSizeFieldNumber = 1,
+    kDebugIdFieldNumber = 2,
+    kOffsetsSizeFieldNumber = 3,
+  };
+
+  using FieldMetadata_DataSize =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      BinderTransactionAllocBufFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DataSize kDataSize() { return {}; }
+  void set_data_size(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DataSize::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DebugId =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      BinderTransactionAllocBufFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DebugId kDebugId() { return {}; }
+  void set_debug_id(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DebugId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_OffsetsSize =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      BinderTransactionAllocBufFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_OffsetsSize kOffsetsSize() { return {}; }
+  void set_offsets_size(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_OffsetsSize::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class BinderUnlockFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  BinderUnlockFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit BinderUnlockFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit BinderUnlockFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_tag() const { return at<1>().valid(); }
+  ::protozero::ConstChars tag() const { return at<1>().as_string(); }
+};
+
+class BinderUnlockFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = BinderUnlockFtraceEvent_Decoder;
+  enum : int32_t {
+    kTagFieldNumber = 1,
+  };
+
+  using FieldMetadata_Tag =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      BinderUnlockFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Tag kTag() { return {}; }
+  void set_tag(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Tag::kFieldId, data, size);
+  }
+  void set_tag(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Tag::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class BinderLockedFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  BinderLockedFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit BinderLockedFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit BinderLockedFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_tag() const { return at<1>().valid(); }
+  ::protozero::ConstChars tag() const { return at<1>().as_string(); }
+};
+
+class BinderLockedFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = BinderLockedFtraceEvent_Decoder;
+  enum : int32_t {
+    kTagFieldNumber = 1,
+  };
+
+  using FieldMetadata_Tag =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      BinderLockedFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Tag kTag() { return {}; }
+  void set_tag(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Tag::kFieldId, data, size);
+  }
+  void set_tag(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Tag::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class BinderLockFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  BinderLockFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit BinderLockFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit BinderLockFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_tag() const { return at<1>().valid(); }
+  ::protozero::ConstChars tag() const { return at<1>().as_string(); }
+};
+
+class BinderLockFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = BinderLockFtraceEvent_Decoder;
+  enum : int32_t {
+    kTagFieldNumber = 1,
+  };
+
+  using FieldMetadata_Tag =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      BinderLockFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Tag kTag() { return {}; }
+  void set_tag(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Tag::kFieldId, data, size);
+  }
+  void set_tag(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Tag::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class BinderSetPriorityFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  BinderSetPriorityFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit BinderSetPriorityFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit BinderSetPriorityFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_proc() const { return at<1>().valid(); }
+  int32_t proc() const { return at<1>().as_int32(); }
+  bool has_thread() const { return at<2>().valid(); }
+  int32_t thread() const { return at<2>().as_int32(); }
+  bool has_old_prio() const { return at<3>().valid(); }
+  uint32_t old_prio() const { return at<3>().as_uint32(); }
+  bool has_new_prio() const { return at<4>().valid(); }
+  uint32_t new_prio() const { return at<4>().as_uint32(); }
+  bool has_desired_prio() const { return at<5>().valid(); }
+  uint32_t desired_prio() const { return at<5>().as_uint32(); }
+};
+
+class BinderSetPriorityFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = BinderSetPriorityFtraceEvent_Decoder;
+  enum : int32_t {
+    kProcFieldNumber = 1,
+    kThreadFieldNumber = 2,
+    kOldPrioFieldNumber = 3,
+    kNewPrioFieldNumber = 4,
+    kDesiredPrioFieldNumber = 5,
+  };
+
+  using FieldMetadata_Proc =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      BinderSetPriorityFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Proc kProc() { return {}; }
+  void set_proc(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Proc::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Thread =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      BinderSetPriorityFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Thread kThread() { return {}; }
+  void set_thread(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Thread::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_OldPrio =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      BinderSetPriorityFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_OldPrio kOldPrio() { return {}; }
+  void set_old_prio(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_OldPrio::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NewPrio =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      BinderSetPriorityFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NewPrio kNewPrio() { return {}; }
+  void set_new_prio(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NewPrio::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DesiredPrio =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      BinderSetPriorityFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DesiredPrio kDesiredPrio() { return {}; }
+  void set_desired_prio(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DesiredPrio::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class BinderTransactionReceivedFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  BinderTransactionReceivedFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit BinderTransactionReceivedFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit BinderTransactionReceivedFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_debug_id() const { return at<1>().valid(); }
+  int32_t debug_id() const { return at<1>().as_int32(); }
+};
+
+class BinderTransactionReceivedFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = BinderTransactionReceivedFtraceEvent_Decoder;
+  enum : int32_t {
+    kDebugIdFieldNumber = 1,
+  };
+
+  using FieldMetadata_DebugId =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      BinderTransactionReceivedFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DebugId kDebugId() { return {}; }
+  void set_debug_id(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DebugId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class BinderTransactionFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/7, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  BinderTransactionFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit BinderTransactionFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit BinderTransactionFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_debug_id() const { return at<1>().valid(); }
+  int32_t debug_id() const { return at<1>().as_int32(); }
+  bool has_target_node() const { return at<2>().valid(); }
+  int32_t target_node() const { return at<2>().as_int32(); }
+  bool has_to_proc() const { return at<3>().valid(); }
+  int32_t to_proc() const { return at<3>().as_int32(); }
+  bool has_to_thread() const { return at<4>().valid(); }
+  int32_t to_thread() const { return at<4>().as_int32(); }
+  bool has_reply() const { return at<5>().valid(); }
+  int32_t reply() const { return at<5>().as_int32(); }
+  bool has_code() const { return at<6>().valid(); }
+  uint32_t code() const { return at<6>().as_uint32(); }
+  bool has_flags() const { return at<7>().valid(); }
+  uint32_t flags() const { return at<7>().as_uint32(); }
+};
+
+class BinderTransactionFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = BinderTransactionFtraceEvent_Decoder;
+  enum : int32_t {
+    kDebugIdFieldNumber = 1,
+    kTargetNodeFieldNumber = 2,
+    kToProcFieldNumber = 3,
+    kToThreadFieldNumber = 4,
+    kReplyFieldNumber = 5,
+    kCodeFieldNumber = 6,
+    kFlagsFieldNumber = 7,
+  };
+
+  using FieldMetadata_DebugId =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      BinderTransactionFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DebugId kDebugId() { return {}; }
+  void set_debug_id(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DebugId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TargetNode =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      BinderTransactionFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TargetNode kTargetNode() { return {}; }
+  void set_target_node(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TargetNode::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ToProc =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      BinderTransactionFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ToProc kToProc() { return {}; }
+  void set_to_proc(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ToProc::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ToThread =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      BinderTransactionFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ToThread kToThread() { return {}; }
+  void set_to_thread(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ToThread::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Reply =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      BinderTransactionFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Reply kReply() { return {}; }
+  void set_reply(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Reply::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Code =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      BinderTransactionFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Code kCode() { return {}; }
+  void set_code(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Code::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Flags =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      BinderTransactionFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Flags kFlags() { return {}; }
+  void set_flags(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Flags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/ftrace/block.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_BLOCK_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_BLOCK_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class BlockUnplugFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  BlockUnplugFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit BlockUnplugFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit BlockUnplugFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_nr_rq() const { return at<1>().valid(); }
+  int32_t nr_rq() const { return at<1>().as_int32(); }
+  bool has_comm() const { return at<2>().valid(); }
+  ::protozero::ConstChars comm() const { return at<2>().as_string(); }
+};
+
+class BlockUnplugFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = BlockUnplugFtraceEvent_Decoder;
+  enum : int32_t {
+    kNrRqFieldNumber = 1,
+    kCommFieldNumber = 2,
+  };
+
+  using FieldMetadata_NrRq =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      BlockUnplugFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NrRq kNrRq() { return {}; }
+  void set_nr_rq(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NrRq::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Comm =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      BlockUnplugFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Comm kComm() { return {}; }
+  void set_comm(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Comm::kFieldId, data, size);
+  }
+  void set_comm(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Comm::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class BlockTouchBufferFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  BlockTouchBufferFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit BlockTouchBufferFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit BlockTouchBufferFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_sector() const { return at<2>().valid(); }
+  uint64_t sector() const { return at<2>().as_uint64(); }
+  bool has_size() const { return at<3>().valid(); }
+  uint64_t size() const { return at<3>().as_uint64(); }
+};
+
+class BlockTouchBufferFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = BlockTouchBufferFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kSectorFieldNumber = 2,
+    kSizeFieldNumber = 3,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      BlockTouchBufferFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Sector =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      BlockTouchBufferFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Sector kSector() { return {}; }
+  void set_sector(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Sector::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Size =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      BlockTouchBufferFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Size kSize() { return {}; }
+  void set_size(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Size::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class BlockSplitFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  BlockSplitFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit BlockSplitFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit BlockSplitFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_sector() const { return at<2>().valid(); }
+  uint64_t sector() const { return at<2>().as_uint64(); }
+  bool has_new_sector() const { return at<3>().valid(); }
+  uint64_t new_sector() const { return at<3>().as_uint64(); }
+  bool has_rwbs() const { return at<4>().valid(); }
+  ::protozero::ConstChars rwbs() const { return at<4>().as_string(); }
+  bool has_comm() const { return at<5>().valid(); }
+  ::protozero::ConstChars comm() const { return at<5>().as_string(); }
+};
+
+class BlockSplitFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = BlockSplitFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kSectorFieldNumber = 2,
+    kNewSectorFieldNumber = 3,
+    kRwbsFieldNumber = 4,
+    kCommFieldNumber = 5,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      BlockSplitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Sector =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      BlockSplitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Sector kSector() { return {}; }
+  void set_sector(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Sector::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NewSector =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      BlockSplitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NewSector kNewSector() { return {}; }
+  void set_new_sector(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NewSector::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Rwbs =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      BlockSplitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Rwbs kRwbs() { return {}; }
+  void set_rwbs(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Rwbs::kFieldId, data, size);
+  }
+  void set_rwbs(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Rwbs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Comm =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      BlockSplitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Comm kComm() { return {}; }
+  void set_comm(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Comm::kFieldId, data, size);
+  }
+  void set_comm(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Comm::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class BlockSleeprqFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  BlockSleeprqFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit BlockSleeprqFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit BlockSleeprqFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_sector() const { return at<2>().valid(); }
+  uint64_t sector() const { return at<2>().as_uint64(); }
+  bool has_nr_sector() const { return at<3>().valid(); }
+  uint32_t nr_sector() const { return at<3>().as_uint32(); }
+  bool has_rwbs() const { return at<4>().valid(); }
+  ::protozero::ConstChars rwbs() const { return at<4>().as_string(); }
+  bool has_comm() const { return at<5>().valid(); }
+  ::protozero::ConstChars comm() const { return at<5>().as_string(); }
+};
+
+class BlockSleeprqFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = BlockSleeprqFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kSectorFieldNumber = 2,
+    kNrSectorFieldNumber = 3,
+    kRwbsFieldNumber = 4,
+    kCommFieldNumber = 5,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      BlockSleeprqFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Sector =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      BlockSleeprqFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Sector kSector() { return {}; }
+  void set_sector(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Sector::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NrSector =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      BlockSleeprqFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NrSector kNrSector() { return {}; }
+  void set_nr_sector(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NrSector::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Rwbs =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      BlockSleeprqFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Rwbs kRwbs() { return {}; }
+  void set_rwbs(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Rwbs::kFieldId, data, size);
+  }
+  void set_rwbs(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Rwbs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Comm =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      BlockSleeprqFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Comm kComm() { return {}; }
+  void set_comm(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Comm::kFieldId, data, size);
+  }
+  void set_comm(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Comm::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class BlockRqRequeueFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/6, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  BlockRqRequeueFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit BlockRqRequeueFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit BlockRqRequeueFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_sector() const { return at<2>().valid(); }
+  uint64_t sector() const { return at<2>().as_uint64(); }
+  bool has_nr_sector() const { return at<3>().valid(); }
+  uint32_t nr_sector() const { return at<3>().as_uint32(); }
+  bool has_errors() const { return at<4>().valid(); }
+  int32_t errors() const { return at<4>().as_int32(); }
+  bool has_rwbs() const { return at<5>().valid(); }
+  ::protozero::ConstChars rwbs() const { return at<5>().as_string(); }
+  bool has_cmd() const { return at<6>().valid(); }
+  ::protozero::ConstChars cmd() const { return at<6>().as_string(); }
+};
+
+class BlockRqRequeueFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = BlockRqRequeueFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kSectorFieldNumber = 2,
+    kNrSectorFieldNumber = 3,
+    kErrorsFieldNumber = 4,
+    kRwbsFieldNumber = 5,
+    kCmdFieldNumber = 6,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      BlockRqRequeueFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Sector =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      BlockRqRequeueFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Sector kSector() { return {}; }
+  void set_sector(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Sector::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NrSector =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      BlockRqRequeueFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NrSector kNrSector() { return {}; }
+  void set_nr_sector(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NrSector::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Errors =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      BlockRqRequeueFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Errors kErrors() { return {}; }
+  void set_errors(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Errors::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Rwbs =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      BlockRqRequeueFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Rwbs kRwbs() { return {}; }
+  void set_rwbs(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Rwbs::kFieldId, data, size);
+  }
+  void set_rwbs(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Rwbs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Cmd =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      BlockRqRequeueFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Cmd kCmd() { return {}; }
+  void set_cmd(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Cmd::kFieldId, data, size);
+  }
+  void set_cmd(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Cmd::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class BlockRqRemapFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/7, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  BlockRqRemapFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit BlockRqRemapFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit BlockRqRemapFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_sector() const { return at<2>().valid(); }
+  uint64_t sector() const { return at<2>().as_uint64(); }
+  bool has_nr_sector() const { return at<3>().valid(); }
+  uint32_t nr_sector() const { return at<3>().as_uint32(); }
+  bool has_old_dev() const { return at<4>().valid(); }
+  uint64_t old_dev() const { return at<4>().as_uint64(); }
+  bool has_old_sector() const { return at<5>().valid(); }
+  uint64_t old_sector() const { return at<5>().as_uint64(); }
+  bool has_nr_bios() const { return at<6>().valid(); }
+  uint32_t nr_bios() const { return at<6>().as_uint32(); }
+  bool has_rwbs() const { return at<7>().valid(); }
+  ::protozero::ConstChars rwbs() const { return at<7>().as_string(); }
+};
+
+class BlockRqRemapFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = BlockRqRemapFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kSectorFieldNumber = 2,
+    kNrSectorFieldNumber = 3,
+    kOldDevFieldNumber = 4,
+    kOldSectorFieldNumber = 5,
+    kNrBiosFieldNumber = 6,
+    kRwbsFieldNumber = 7,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      BlockRqRemapFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Sector =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      BlockRqRemapFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Sector kSector() { return {}; }
+  void set_sector(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Sector::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NrSector =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      BlockRqRemapFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NrSector kNrSector() { return {}; }
+  void set_nr_sector(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NrSector::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_OldDev =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      BlockRqRemapFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_OldDev kOldDev() { return {}; }
+  void set_old_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_OldDev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_OldSector =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      BlockRqRemapFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_OldSector kOldSector() { return {}; }
+  void set_old_sector(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_OldSector::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NrBios =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      BlockRqRemapFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NrBios kNrBios() { return {}; }
+  void set_nr_bios(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NrBios::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Rwbs =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      BlockRqRemapFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Rwbs kRwbs() { return {}; }
+  void set_rwbs(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Rwbs::kFieldId, data, size);
+  }
+  void set_rwbs(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Rwbs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class BlockRqInsertFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/7, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  BlockRqInsertFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit BlockRqInsertFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit BlockRqInsertFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_sector() const { return at<2>().valid(); }
+  uint64_t sector() const { return at<2>().as_uint64(); }
+  bool has_nr_sector() const { return at<3>().valid(); }
+  uint32_t nr_sector() const { return at<3>().as_uint32(); }
+  bool has_bytes() const { return at<4>().valid(); }
+  uint32_t bytes() const { return at<4>().as_uint32(); }
+  bool has_rwbs() const { return at<5>().valid(); }
+  ::protozero::ConstChars rwbs() const { return at<5>().as_string(); }
+  bool has_comm() const { return at<6>().valid(); }
+  ::protozero::ConstChars comm() const { return at<6>().as_string(); }
+  bool has_cmd() const { return at<7>().valid(); }
+  ::protozero::ConstChars cmd() const { return at<7>().as_string(); }
+};
+
+class BlockRqInsertFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = BlockRqInsertFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kSectorFieldNumber = 2,
+    kNrSectorFieldNumber = 3,
+    kBytesFieldNumber = 4,
+    kRwbsFieldNumber = 5,
+    kCommFieldNumber = 6,
+    kCmdFieldNumber = 7,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      BlockRqInsertFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Sector =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      BlockRqInsertFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Sector kSector() { return {}; }
+  void set_sector(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Sector::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NrSector =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      BlockRqInsertFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NrSector kNrSector() { return {}; }
+  void set_nr_sector(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NrSector::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Bytes =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      BlockRqInsertFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Bytes kBytes() { return {}; }
+  void set_bytes(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Bytes::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Rwbs =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      BlockRqInsertFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Rwbs kRwbs() { return {}; }
+  void set_rwbs(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Rwbs::kFieldId, data, size);
+  }
+  void set_rwbs(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Rwbs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Comm =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      BlockRqInsertFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Comm kComm() { return {}; }
+  void set_comm(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Comm::kFieldId, data, size);
+  }
+  void set_comm(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Comm::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Cmd =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      BlockRqInsertFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Cmd kCmd() { return {}; }
+  void set_cmd(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Cmd::kFieldId, data, size);
+  }
+  void set_cmd(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Cmd::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class BlockRqCompleteFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/6, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  BlockRqCompleteFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit BlockRqCompleteFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit BlockRqCompleteFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_sector() const { return at<2>().valid(); }
+  uint64_t sector() const { return at<2>().as_uint64(); }
+  bool has_nr_sector() const { return at<3>().valid(); }
+  uint32_t nr_sector() const { return at<3>().as_uint32(); }
+  bool has_errors() const { return at<4>().valid(); }
+  int32_t errors() const { return at<4>().as_int32(); }
+  bool has_rwbs() const { return at<5>().valid(); }
+  ::protozero::ConstChars rwbs() const { return at<5>().as_string(); }
+  bool has_cmd() const { return at<6>().valid(); }
+  ::protozero::ConstChars cmd() const { return at<6>().as_string(); }
+};
+
+class BlockRqCompleteFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = BlockRqCompleteFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kSectorFieldNumber = 2,
+    kNrSectorFieldNumber = 3,
+    kErrorsFieldNumber = 4,
+    kRwbsFieldNumber = 5,
+    kCmdFieldNumber = 6,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      BlockRqCompleteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Sector =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      BlockRqCompleteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Sector kSector() { return {}; }
+  void set_sector(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Sector::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NrSector =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      BlockRqCompleteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NrSector kNrSector() { return {}; }
+  void set_nr_sector(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NrSector::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Errors =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      BlockRqCompleteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Errors kErrors() { return {}; }
+  void set_errors(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Errors::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Rwbs =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      BlockRqCompleteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Rwbs kRwbs() { return {}; }
+  void set_rwbs(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Rwbs::kFieldId, data, size);
+  }
+  void set_rwbs(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Rwbs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Cmd =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      BlockRqCompleteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Cmd kCmd() { return {}; }
+  void set_cmd(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Cmd::kFieldId, data, size);
+  }
+  void set_cmd(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Cmd::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class BlockRqAbortFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/6, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  BlockRqAbortFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit BlockRqAbortFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit BlockRqAbortFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_sector() const { return at<2>().valid(); }
+  uint64_t sector() const { return at<2>().as_uint64(); }
+  bool has_nr_sector() const { return at<3>().valid(); }
+  uint32_t nr_sector() const { return at<3>().as_uint32(); }
+  bool has_errors() const { return at<4>().valid(); }
+  int32_t errors() const { return at<4>().as_int32(); }
+  bool has_rwbs() const { return at<5>().valid(); }
+  ::protozero::ConstChars rwbs() const { return at<5>().as_string(); }
+  bool has_cmd() const { return at<6>().valid(); }
+  ::protozero::ConstChars cmd() const { return at<6>().as_string(); }
+};
+
+class BlockRqAbortFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = BlockRqAbortFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kSectorFieldNumber = 2,
+    kNrSectorFieldNumber = 3,
+    kErrorsFieldNumber = 4,
+    kRwbsFieldNumber = 5,
+    kCmdFieldNumber = 6,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      BlockRqAbortFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Sector =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      BlockRqAbortFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Sector kSector() { return {}; }
+  void set_sector(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Sector::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NrSector =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      BlockRqAbortFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NrSector kNrSector() { return {}; }
+  void set_nr_sector(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NrSector::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Errors =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      BlockRqAbortFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Errors kErrors() { return {}; }
+  void set_errors(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Errors::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Rwbs =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      BlockRqAbortFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Rwbs kRwbs() { return {}; }
+  void set_rwbs(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Rwbs::kFieldId, data, size);
+  }
+  void set_rwbs(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Rwbs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Cmd =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      BlockRqAbortFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Cmd kCmd() { return {}; }
+  void set_cmd(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Cmd::kFieldId, data, size);
+  }
+  void set_cmd(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Cmd::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class BlockPlugFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  BlockPlugFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit BlockPlugFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit BlockPlugFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_comm() const { return at<1>().valid(); }
+  ::protozero::ConstChars comm() const { return at<1>().as_string(); }
+};
+
+class BlockPlugFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = BlockPlugFtraceEvent_Decoder;
+  enum : int32_t {
+    kCommFieldNumber = 1,
+  };
+
+  using FieldMetadata_Comm =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      BlockPlugFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Comm kComm() { return {}; }
+  void set_comm(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Comm::kFieldId, data, size);
+  }
+  void set_comm(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Comm::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class BlockGetrqFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  BlockGetrqFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit BlockGetrqFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit BlockGetrqFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_sector() const { return at<2>().valid(); }
+  uint64_t sector() const { return at<2>().as_uint64(); }
+  bool has_nr_sector() const { return at<3>().valid(); }
+  uint32_t nr_sector() const { return at<3>().as_uint32(); }
+  bool has_rwbs() const { return at<4>().valid(); }
+  ::protozero::ConstChars rwbs() const { return at<4>().as_string(); }
+  bool has_comm() const { return at<5>().valid(); }
+  ::protozero::ConstChars comm() const { return at<5>().as_string(); }
+};
+
+class BlockGetrqFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = BlockGetrqFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kSectorFieldNumber = 2,
+    kNrSectorFieldNumber = 3,
+    kRwbsFieldNumber = 4,
+    kCommFieldNumber = 5,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      BlockGetrqFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Sector =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      BlockGetrqFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Sector kSector() { return {}; }
+  void set_sector(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Sector::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NrSector =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      BlockGetrqFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NrSector kNrSector() { return {}; }
+  void set_nr_sector(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NrSector::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Rwbs =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      BlockGetrqFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Rwbs kRwbs() { return {}; }
+  void set_rwbs(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Rwbs::kFieldId, data, size);
+  }
+  void set_rwbs(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Rwbs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Comm =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      BlockGetrqFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Comm kComm() { return {}; }
+  void set_comm(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Comm::kFieldId, data, size);
+  }
+  void set_comm(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Comm::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class BlockDirtyBufferFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  BlockDirtyBufferFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit BlockDirtyBufferFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit BlockDirtyBufferFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_sector() const { return at<2>().valid(); }
+  uint64_t sector() const { return at<2>().as_uint64(); }
+  bool has_size() const { return at<3>().valid(); }
+  uint64_t size() const { return at<3>().as_uint64(); }
+};
+
+class BlockDirtyBufferFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = BlockDirtyBufferFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kSectorFieldNumber = 2,
+    kSizeFieldNumber = 3,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      BlockDirtyBufferFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Sector =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      BlockDirtyBufferFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Sector kSector() { return {}; }
+  void set_sector(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Sector::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Size =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      BlockDirtyBufferFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Size kSize() { return {}; }
+  void set_size(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Size::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class BlockBioRemapFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/6, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  BlockBioRemapFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit BlockBioRemapFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit BlockBioRemapFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_sector() const { return at<2>().valid(); }
+  uint64_t sector() const { return at<2>().as_uint64(); }
+  bool has_nr_sector() const { return at<3>().valid(); }
+  uint32_t nr_sector() const { return at<3>().as_uint32(); }
+  bool has_old_dev() const { return at<4>().valid(); }
+  uint64_t old_dev() const { return at<4>().as_uint64(); }
+  bool has_old_sector() const { return at<5>().valid(); }
+  uint64_t old_sector() const { return at<5>().as_uint64(); }
+  bool has_rwbs() const { return at<6>().valid(); }
+  ::protozero::ConstChars rwbs() const { return at<6>().as_string(); }
+};
+
+class BlockBioRemapFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = BlockBioRemapFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kSectorFieldNumber = 2,
+    kNrSectorFieldNumber = 3,
+    kOldDevFieldNumber = 4,
+    kOldSectorFieldNumber = 5,
+    kRwbsFieldNumber = 6,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      BlockBioRemapFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Sector =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      BlockBioRemapFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Sector kSector() { return {}; }
+  void set_sector(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Sector::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NrSector =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      BlockBioRemapFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NrSector kNrSector() { return {}; }
+  void set_nr_sector(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NrSector::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_OldDev =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      BlockBioRemapFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_OldDev kOldDev() { return {}; }
+  void set_old_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_OldDev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_OldSector =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      BlockBioRemapFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_OldSector kOldSector() { return {}; }
+  void set_old_sector(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_OldSector::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Rwbs =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      BlockBioRemapFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Rwbs kRwbs() { return {}; }
+  void set_rwbs(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Rwbs::kFieldId, data, size);
+  }
+  void set_rwbs(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Rwbs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class BlockBioQueueFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  BlockBioQueueFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit BlockBioQueueFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit BlockBioQueueFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_sector() const { return at<2>().valid(); }
+  uint64_t sector() const { return at<2>().as_uint64(); }
+  bool has_nr_sector() const { return at<3>().valid(); }
+  uint32_t nr_sector() const { return at<3>().as_uint32(); }
+  bool has_rwbs() const { return at<4>().valid(); }
+  ::protozero::ConstChars rwbs() const { return at<4>().as_string(); }
+  bool has_comm() const { return at<5>().valid(); }
+  ::protozero::ConstChars comm() const { return at<5>().as_string(); }
+};
+
+class BlockBioQueueFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = BlockBioQueueFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kSectorFieldNumber = 2,
+    kNrSectorFieldNumber = 3,
+    kRwbsFieldNumber = 4,
+    kCommFieldNumber = 5,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      BlockBioQueueFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Sector =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      BlockBioQueueFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Sector kSector() { return {}; }
+  void set_sector(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Sector::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NrSector =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      BlockBioQueueFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NrSector kNrSector() { return {}; }
+  void set_nr_sector(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NrSector::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Rwbs =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      BlockBioQueueFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Rwbs kRwbs() { return {}; }
+  void set_rwbs(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Rwbs::kFieldId, data, size);
+  }
+  void set_rwbs(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Rwbs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Comm =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      BlockBioQueueFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Comm kComm() { return {}; }
+  void set_comm(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Comm::kFieldId, data, size);
+  }
+  void set_comm(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Comm::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class BlockBioFrontmergeFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  BlockBioFrontmergeFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit BlockBioFrontmergeFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit BlockBioFrontmergeFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_sector() const { return at<2>().valid(); }
+  uint64_t sector() const { return at<2>().as_uint64(); }
+  bool has_nr_sector() const { return at<3>().valid(); }
+  uint32_t nr_sector() const { return at<3>().as_uint32(); }
+  bool has_rwbs() const { return at<4>().valid(); }
+  ::protozero::ConstChars rwbs() const { return at<4>().as_string(); }
+  bool has_comm() const { return at<5>().valid(); }
+  ::protozero::ConstChars comm() const { return at<5>().as_string(); }
+};
+
+class BlockBioFrontmergeFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = BlockBioFrontmergeFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kSectorFieldNumber = 2,
+    kNrSectorFieldNumber = 3,
+    kRwbsFieldNumber = 4,
+    kCommFieldNumber = 5,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      BlockBioFrontmergeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Sector =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      BlockBioFrontmergeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Sector kSector() { return {}; }
+  void set_sector(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Sector::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NrSector =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      BlockBioFrontmergeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NrSector kNrSector() { return {}; }
+  void set_nr_sector(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NrSector::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Rwbs =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      BlockBioFrontmergeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Rwbs kRwbs() { return {}; }
+  void set_rwbs(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Rwbs::kFieldId, data, size);
+  }
+  void set_rwbs(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Rwbs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Comm =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      BlockBioFrontmergeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Comm kComm() { return {}; }
+  void set_comm(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Comm::kFieldId, data, size);
+  }
+  void set_comm(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Comm::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class BlockBioCompleteFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  BlockBioCompleteFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit BlockBioCompleteFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit BlockBioCompleteFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_sector() const { return at<2>().valid(); }
+  uint64_t sector() const { return at<2>().as_uint64(); }
+  bool has_nr_sector() const { return at<3>().valid(); }
+  uint32_t nr_sector() const { return at<3>().as_uint32(); }
+  bool has_error() const { return at<4>().valid(); }
+  int32_t error() const { return at<4>().as_int32(); }
+  bool has_rwbs() const { return at<5>().valid(); }
+  ::protozero::ConstChars rwbs() const { return at<5>().as_string(); }
+};
+
+class BlockBioCompleteFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = BlockBioCompleteFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kSectorFieldNumber = 2,
+    kNrSectorFieldNumber = 3,
+    kErrorFieldNumber = 4,
+    kRwbsFieldNumber = 5,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      BlockBioCompleteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Sector =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      BlockBioCompleteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Sector kSector() { return {}; }
+  void set_sector(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Sector::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NrSector =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      BlockBioCompleteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NrSector kNrSector() { return {}; }
+  void set_nr_sector(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NrSector::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Error =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      BlockBioCompleteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Error kError() { return {}; }
+  void set_error(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Error::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Rwbs =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      BlockBioCompleteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Rwbs kRwbs() { return {}; }
+  void set_rwbs(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Rwbs::kFieldId, data, size);
+  }
+  void set_rwbs(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Rwbs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class BlockBioBounceFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  BlockBioBounceFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit BlockBioBounceFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit BlockBioBounceFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_sector() const { return at<2>().valid(); }
+  uint64_t sector() const { return at<2>().as_uint64(); }
+  bool has_nr_sector() const { return at<3>().valid(); }
+  uint32_t nr_sector() const { return at<3>().as_uint32(); }
+  bool has_rwbs() const { return at<4>().valid(); }
+  ::protozero::ConstChars rwbs() const { return at<4>().as_string(); }
+  bool has_comm() const { return at<5>().valid(); }
+  ::protozero::ConstChars comm() const { return at<5>().as_string(); }
+};
+
+class BlockBioBounceFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = BlockBioBounceFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kSectorFieldNumber = 2,
+    kNrSectorFieldNumber = 3,
+    kRwbsFieldNumber = 4,
+    kCommFieldNumber = 5,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      BlockBioBounceFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Sector =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      BlockBioBounceFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Sector kSector() { return {}; }
+  void set_sector(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Sector::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NrSector =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      BlockBioBounceFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NrSector kNrSector() { return {}; }
+  void set_nr_sector(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NrSector::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Rwbs =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      BlockBioBounceFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Rwbs kRwbs() { return {}; }
+  void set_rwbs(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Rwbs::kFieldId, data, size);
+  }
+  void set_rwbs(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Rwbs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Comm =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      BlockBioBounceFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Comm kComm() { return {}; }
+  void set_comm(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Comm::kFieldId, data, size);
+  }
+  void set_comm(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Comm::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class BlockBioBackmergeFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  BlockBioBackmergeFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit BlockBioBackmergeFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit BlockBioBackmergeFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_sector() const { return at<2>().valid(); }
+  uint64_t sector() const { return at<2>().as_uint64(); }
+  bool has_nr_sector() const { return at<3>().valid(); }
+  uint32_t nr_sector() const { return at<3>().as_uint32(); }
+  bool has_rwbs() const { return at<4>().valid(); }
+  ::protozero::ConstChars rwbs() const { return at<4>().as_string(); }
+  bool has_comm() const { return at<5>().valid(); }
+  ::protozero::ConstChars comm() const { return at<5>().as_string(); }
+};
+
+class BlockBioBackmergeFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = BlockBioBackmergeFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kSectorFieldNumber = 2,
+    kNrSectorFieldNumber = 3,
+    kRwbsFieldNumber = 4,
+    kCommFieldNumber = 5,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      BlockBioBackmergeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Sector =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      BlockBioBackmergeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Sector kSector() { return {}; }
+  void set_sector(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Sector::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NrSector =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      BlockBioBackmergeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NrSector kNrSector() { return {}; }
+  void set_nr_sector(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NrSector::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Rwbs =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      BlockBioBackmergeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Rwbs kRwbs() { return {}; }
+  void set_rwbs(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Rwbs::kFieldId, data, size);
+  }
+  void set_rwbs(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Rwbs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Comm =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      BlockBioBackmergeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Comm kComm() { return {}; }
+  void set_comm(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Comm::kFieldId, data, size);
+  }
+  void set_comm(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Comm::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class BlockRqIssueFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/7, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  BlockRqIssueFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit BlockRqIssueFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit BlockRqIssueFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_sector() const { return at<2>().valid(); }
+  uint64_t sector() const { return at<2>().as_uint64(); }
+  bool has_nr_sector() const { return at<3>().valid(); }
+  uint32_t nr_sector() const { return at<3>().as_uint32(); }
+  bool has_bytes() const { return at<4>().valid(); }
+  uint32_t bytes() const { return at<4>().as_uint32(); }
+  bool has_rwbs() const { return at<5>().valid(); }
+  ::protozero::ConstChars rwbs() const { return at<5>().as_string(); }
+  bool has_comm() const { return at<6>().valid(); }
+  ::protozero::ConstChars comm() const { return at<6>().as_string(); }
+  bool has_cmd() const { return at<7>().valid(); }
+  ::protozero::ConstChars cmd() const { return at<7>().as_string(); }
+};
+
+class BlockRqIssueFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = BlockRqIssueFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kSectorFieldNumber = 2,
+    kNrSectorFieldNumber = 3,
+    kBytesFieldNumber = 4,
+    kRwbsFieldNumber = 5,
+    kCommFieldNumber = 6,
+    kCmdFieldNumber = 7,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      BlockRqIssueFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Sector =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      BlockRqIssueFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Sector kSector() { return {}; }
+  void set_sector(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Sector::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NrSector =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      BlockRqIssueFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NrSector kNrSector() { return {}; }
+  void set_nr_sector(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NrSector::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Bytes =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      BlockRqIssueFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Bytes kBytes() { return {}; }
+  void set_bytes(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Bytes::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Rwbs =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      BlockRqIssueFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Rwbs kRwbs() { return {}; }
+  void set_rwbs(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Rwbs::kFieldId, data, size);
+  }
+  void set_rwbs(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Rwbs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Comm =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      BlockRqIssueFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Comm kComm() { return {}; }
+  void set_comm(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Comm::kFieldId, data, size);
+  }
+  void set_comm(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Comm::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Cmd =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      BlockRqIssueFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Cmd kCmd() { return {}; }
+  void set_cmd(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Cmd::kFieldId, data, size);
+  }
+  void set_cmd(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Cmd::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/ftrace/cgroup.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_CGROUP_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_CGROUP_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class CgroupSetupRootFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  CgroupSetupRootFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit CgroupSetupRootFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit CgroupSetupRootFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_root() const { return at<1>().valid(); }
+  int32_t root() const { return at<1>().as_int32(); }
+  bool has_ss_mask() const { return at<2>().valid(); }
+  uint32_t ss_mask() const { return at<2>().as_uint32(); }
+  bool has_name() const { return at<3>().valid(); }
+  ::protozero::ConstChars name() const { return at<3>().as_string(); }
+};
+
+class CgroupSetupRootFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = CgroupSetupRootFtraceEvent_Decoder;
+  enum : int32_t {
+    kRootFieldNumber = 1,
+    kSsMaskFieldNumber = 2,
+    kNameFieldNumber = 3,
+  };
+
+  using FieldMetadata_Root =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      CgroupSetupRootFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Root kRoot() { return {}; }
+  void set_root(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Root::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SsMask =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      CgroupSetupRootFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SsMask kSsMask() { return {}; }
+  void set_ss_mask(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SsMask::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      CgroupSetupRootFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class CgroupRenameFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  CgroupRenameFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit CgroupRenameFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit CgroupRenameFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_root() const { return at<1>().valid(); }
+  int32_t root() const { return at<1>().as_int32(); }
+  bool has_id() const { return at<2>().valid(); }
+  int32_t id() const { return at<2>().as_int32(); }
+  bool has_cname() const { return at<3>().valid(); }
+  ::protozero::ConstChars cname() const { return at<3>().as_string(); }
+};
+
+class CgroupRenameFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = CgroupRenameFtraceEvent_Decoder;
+  enum : int32_t {
+    kRootFieldNumber = 1,
+    kIdFieldNumber = 2,
+    kCnameFieldNumber = 3,
+  };
+
+  using FieldMetadata_Root =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      CgroupRenameFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Root kRoot() { return {}; }
+  void set_root(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Root::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Id =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      CgroupRenameFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Id kId() { return {}; }
+  void set_id(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Id::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Cname =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      CgroupRenameFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Cname kCname() { return {}; }
+  void set_cname(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Cname::kFieldId, data, size);
+  }
+  void set_cname(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Cname::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class CgroupReleaseFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  CgroupReleaseFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit CgroupReleaseFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit CgroupReleaseFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_root() const { return at<1>().valid(); }
+  int32_t root() const { return at<1>().as_int32(); }
+  bool has_id() const { return at<2>().valid(); }
+  int32_t id() const { return at<2>().as_int32(); }
+  bool has_cname() const { return at<3>().valid(); }
+  ::protozero::ConstChars cname() const { return at<3>().as_string(); }
+};
+
+class CgroupReleaseFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = CgroupReleaseFtraceEvent_Decoder;
+  enum : int32_t {
+    kRootFieldNumber = 1,
+    kIdFieldNumber = 2,
+    kCnameFieldNumber = 3,
+  };
+
+  using FieldMetadata_Root =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      CgroupReleaseFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Root kRoot() { return {}; }
+  void set_root(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Root::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Id =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      CgroupReleaseFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Id kId() { return {}; }
+  void set_id(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Id::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Cname =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      CgroupReleaseFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Cname kCname() { return {}; }
+  void set_cname(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Cname::kFieldId, data, size);
+  }
+  void set_cname(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Cname::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class CgroupDestroyRootFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  CgroupDestroyRootFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit CgroupDestroyRootFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit CgroupDestroyRootFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_root() const { return at<1>().valid(); }
+  int32_t root() const { return at<1>().as_int32(); }
+  bool has_ss_mask() const { return at<2>().valid(); }
+  uint32_t ss_mask() const { return at<2>().as_uint32(); }
+  bool has_name() const { return at<3>().valid(); }
+  ::protozero::ConstChars name() const { return at<3>().as_string(); }
+};
+
+class CgroupDestroyRootFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = CgroupDestroyRootFtraceEvent_Decoder;
+  enum : int32_t {
+    kRootFieldNumber = 1,
+    kSsMaskFieldNumber = 2,
+    kNameFieldNumber = 3,
+  };
+
+  using FieldMetadata_Root =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      CgroupDestroyRootFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Root kRoot() { return {}; }
+  void set_root(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Root::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SsMask =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      CgroupDestroyRootFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SsMask kSsMask() { return {}; }
+  void set_ss_mask(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SsMask::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      CgroupDestroyRootFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class CgroupTransferTasksFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  CgroupTransferTasksFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit CgroupTransferTasksFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit CgroupTransferTasksFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dst_root() const { return at<1>().valid(); }
+  int32_t dst_root() const { return at<1>().as_int32(); }
+  bool has_dst_id() const { return at<2>().valid(); }
+  int32_t dst_id() const { return at<2>().as_int32(); }
+  bool has_pid() const { return at<3>().valid(); }
+  int32_t pid() const { return at<3>().as_int32(); }
+  bool has_comm() const { return at<4>().valid(); }
+  ::protozero::ConstChars comm() const { return at<4>().as_string(); }
+  bool has_cname() const { return at<5>().valid(); }
+  ::protozero::ConstChars cname() const { return at<5>().as_string(); }
+};
+
+class CgroupTransferTasksFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = CgroupTransferTasksFtraceEvent_Decoder;
+  enum : int32_t {
+    kDstRootFieldNumber = 1,
+    kDstIdFieldNumber = 2,
+    kPidFieldNumber = 3,
+    kCommFieldNumber = 4,
+    kCnameFieldNumber = 5,
+  };
+
+  using FieldMetadata_DstRoot =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      CgroupTransferTasksFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DstRoot kDstRoot() { return {}; }
+  void set_dst_root(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DstRoot::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DstId =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      CgroupTransferTasksFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DstId kDstId() { return {}; }
+  void set_dst_id(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DstId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pid =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      CgroupTransferTasksFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pid kPid() { return {}; }
+  void set_pid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Comm =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      CgroupTransferTasksFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Comm kComm() { return {}; }
+  void set_comm(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Comm::kFieldId, data, size);
+  }
+  void set_comm(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Comm::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Cname =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      CgroupTransferTasksFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Cname kCname() { return {}; }
+  void set_cname(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Cname::kFieldId, data, size);
+  }
+  void set_cname(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Cname::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class CgroupRmdirFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  CgroupRmdirFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit CgroupRmdirFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit CgroupRmdirFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_root() const { return at<1>().valid(); }
+  int32_t root() const { return at<1>().as_int32(); }
+  bool has_id() const { return at<2>().valid(); }
+  int32_t id() const { return at<2>().as_int32(); }
+  bool has_cname() const { return at<3>().valid(); }
+  ::protozero::ConstChars cname() const { return at<3>().as_string(); }
+};
+
+class CgroupRmdirFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = CgroupRmdirFtraceEvent_Decoder;
+  enum : int32_t {
+    kRootFieldNumber = 1,
+    kIdFieldNumber = 2,
+    kCnameFieldNumber = 3,
+  };
+
+  using FieldMetadata_Root =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      CgroupRmdirFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Root kRoot() { return {}; }
+  void set_root(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Root::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Id =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      CgroupRmdirFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Id kId() { return {}; }
+  void set_id(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Id::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Cname =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      CgroupRmdirFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Cname kCname() { return {}; }
+  void set_cname(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Cname::kFieldId, data, size);
+  }
+  void set_cname(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Cname::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class CgroupRemountFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  CgroupRemountFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit CgroupRemountFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit CgroupRemountFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_root() const { return at<1>().valid(); }
+  int32_t root() const { return at<1>().as_int32(); }
+  bool has_ss_mask() const { return at<2>().valid(); }
+  uint32_t ss_mask() const { return at<2>().as_uint32(); }
+  bool has_name() const { return at<3>().valid(); }
+  ::protozero::ConstChars name() const { return at<3>().as_string(); }
+};
+
+class CgroupRemountFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = CgroupRemountFtraceEvent_Decoder;
+  enum : int32_t {
+    kRootFieldNumber = 1,
+    kSsMaskFieldNumber = 2,
+    kNameFieldNumber = 3,
+  };
+
+  using FieldMetadata_Root =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      CgroupRemountFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Root kRoot() { return {}; }
+  void set_root(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Root::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SsMask =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      CgroupRemountFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SsMask kSsMask() { return {}; }
+  void set_ss_mask(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SsMask::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      CgroupRemountFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class CgroupMkdirFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  CgroupMkdirFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit CgroupMkdirFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit CgroupMkdirFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_root() const { return at<1>().valid(); }
+  int32_t root() const { return at<1>().as_int32(); }
+  bool has_id() const { return at<2>().valid(); }
+  int32_t id() const { return at<2>().as_int32(); }
+  bool has_cname() const { return at<3>().valid(); }
+  ::protozero::ConstChars cname() const { return at<3>().as_string(); }
+};
+
+class CgroupMkdirFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = CgroupMkdirFtraceEvent_Decoder;
+  enum : int32_t {
+    kRootFieldNumber = 1,
+    kIdFieldNumber = 2,
+    kCnameFieldNumber = 3,
+  };
+
+  using FieldMetadata_Root =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      CgroupMkdirFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Root kRoot() { return {}; }
+  void set_root(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Root::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Id =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      CgroupMkdirFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Id kId() { return {}; }
+  void set_id(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Id::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Cname =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      CgroupMkdirFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Cname kCname() { return {}; }
+  void set_cname(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Cname::kFieldId, data, size);
+  }
+  void set_cname(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Cname::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class CgroupAttachTaskFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  CgroupAttachTaskFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit CgroupAttachTaskFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit CgroupAttachTaskFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dst_root() const { return at<1>().valid(); }
+  int32_t dst_root() const { return at<1>().as_int32(); }
+  bool has_dst_id() const { return at<2>().valid(); }
+  int32_t dst_id() const { return at<2>().as_int32(); }
+  bool has_pid() const { return at<3>().valid(); }
+  int32_t pid() const { return at<3>().as_int32(); }
+  bool has_comm() const { return at<4>().valid(); }
+  ::protozero::ConstChars comm() const { return at<4>().as_string(); }
+  bool has_cname() const { return at<5>().valid(); }
+  ::protozero::ConstChars cname() const { return at<5>().as_string(); }
+};
+
+class CgroupAttachTaskFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = CgroupAttachTaskFtraceEvent_Decoder;
+  enum : int32_t {
+    kDstRootFieldNumber = 1,
+    kDstIdFieldNumber = 2,
+    kPidFieldNumber = 3,
+    kCommFieldNumber = 4,
+    kCnameFieldNumber = 5,
+  };
+
+  using FieldMetadata_DstRoot =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      CgroupAttachTaskFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DstRoot kDstRoot() { return {}; }
+  void set_dst_root(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DstRoot::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DstId =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      CgroupAttachTaskFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DstId kDstId() { return {}; }
+  void set_dst_id(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DstId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pid =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      CgroupAttachTaskFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pid kPid() { return {}; }
+  void set_pid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Comm =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      CgroupAttachTaskFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Comm kComm() { return {}; }
+  void set_comm(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Comm::kFieldId, data, size);
+  }
+  void set_comm(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Comm::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Cname =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      CgroupAttachTaskFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Cname kCname() { return {}; }
+  void set_cname(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Cname::kFieldId, data, size);
+  }
+  void set_cname(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Cname::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/ftrace/clk.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_CLK_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_CLK_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class ClkSetRateFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  ClkSetRateFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ClkSetRateFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ClkSetRateFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars name() const { return at<1>().as_string(); }
+  bool has_rate() const { return at<2>().valid(); }
+  uint64_t rate() const { return at<2>().as_uint64(); }
+};
+
+class ClkSetRateFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = ClkSetRateFtraceEvent_Decoder;
+  enum : int32_t {
+    kNameFieldNumber = 1,
+    kRateFieldNumber = 2,
+  };
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ClkSetRateFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Rate =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ClkSetRateFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Rate kRate() { return {}; }
+  void set_rate(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Rate::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class ClkDisableFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  ClkDisableFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ClkDisableFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ClkDisableFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars name() const { return at<1>().as_string(); }
+};
+
+class ClkDisableFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = ClkDisableFtraceEvent_Decoder;
+  enum : int32_t {
+    kNameFieldNumber = 1,
+  };
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ClkDisableFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class ClkEnableFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  ClkEnableFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ClkEnableFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ClkEnableFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars name() const { return at<1>().as_string(); }
+};
+
+class ClkEnableFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = ClkEnableFtraceEvent_Decoder;
+  enum : int32_t {
+    kNameFieldNumber = 1,
+  };
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ClkEnableFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/ftrace/compaction.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_COMPACTION_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_COMPACTION_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class MmCompactionWakeupKcompactdFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  MmCompactionWakeupKcompactdFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MmCompactionWakeupKcompactdFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MmCompactionWakeupKcompactdFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_nid() const { return at<1>().valid(); }
+  int32_t nid() const { return at<1>().as_int32(); }
+  bool has_order() const { return at<2>().valid(); }
+  int32_t order() const { return at<2>().as_int32(); }
+  bool has_classzone_idx() const { return at<3>().valid(); }
+  uint32_t classzone_idx() const { return at<3>().as_uint32(); }
+};
+
+class MmCompactionWakeupKcompactdFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = MmCompactionWakeupKcompactdFtraceEvent_Decoder;
+  enum : int32_t {
+    kNidFieldNumber = 1,
+    kOrderFieldNumber = 2,
+    kClasszoneIdxFieldNumber = 3,
+  };
+
+  using FieldMetadata_Nid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      MmCompactionWakeupKcompactdFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Nid kNid() { return {}; }
+  void set_nid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Nid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Order =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      MmCompactionWakeupKcompactdFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Order kOrder() { return {}; }
+  void set_order(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Order::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ClasszoneIdx =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MmCompactionWakeupKcompactdFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ClasszoneIdx kClasszoneIdx() { return {}; }
+  void set_classzone_idx(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ClasszoneIdx::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class MmCompactionTryToCompactPagesFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  MmCompactionTryToCompactPagesFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MmCompactionTryToCompactPagesFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MmCompactionTryToCompactPagesFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_order() const { return at<1>().valid(); }
+  int32_t order() const { return at<1>().as_int32(); }
+  bool has_gfp_mask() const { return at<2>().valid(); }
+  uint32_t gfp_mask() const { return at<2>().as_uint32(); }
+  bool has_mode() const { return at<3>().valid(); }
+  uint32_t mode() const { return at<3>().as_uint32(); }
+};
+
+class MmCompactionTryToCompactPagesFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = MmCompactionTryToCompactPagesFtraceEvent_Decoder;
+  enum : int32_t {
+    kOrderFieldNumber = 1,
+    kGfpMaskFieldNumber = 2,
+    kModeFieldNumber = 3,
+  };
+
+  using FieldMetadata_Order =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      MmCompactionTryToCompactPagesFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Order kOrder() { return {}; }
+  void set_order(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Order::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_GfpMask =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MmCompactionTryToCompactPagesFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_GfpMask kGfpMask() { return {}; }
+  void set_gfp_mask(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_GfpMask::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Mode =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MmCompactionTryToCompactPagesFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Mode kMode() { return {}; }
+  void set_mode(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Mode::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class MmCompactionSuitableFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  MmCompactionSuitableFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MmCompactionSuitableFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MmCompactionSuitableFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_nid() const { return at<1>().valid(); }
+  int32_t nid() const { return at<1>().as_int32(); }
+  bool has_idx() const { return at<2>().valid(); }
+  uint32_t idx() const { return at<2>().as_uint32(); }
+  bool has_order() const { return at<3>().valid(); }
+  int32_t order() const { return at<3>().as_int32(); }
+  bool has_ret() const { return at<4>().valid(); }
+  int32_t ret() const { return at<4>().as_int32(); }
+};
+
+class MmCompactionSuitableFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = MmCompactionSuitableFtraceEvent_Decoder;
+  enum : int32_t {
+    kNidFieldNumber = 1,
+    kIdxFieldNumber = 2,
+    kOrderFieldNumber = 3,
+    kRetFieldNumber = 4,
+  };
+
+  using FieldMetadata_Nid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      MmCompactionSuitableFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Nid kNid() { return {}; }
+  void set_nid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Nid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Idx =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MmCompactionSuitableFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Idx kIdx() { return {}; }
+  void set_idx(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Idx::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Order =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      MmCompactionSuitableFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Order kOrder() { return {}; }
+  void set_order(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Order::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ret =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      MmCompactionSuitableFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ret kRet() { return {}; }
+  void set_ret(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ret::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class MmCompactionMigratepagesFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  MmCompactionMigratepagesFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MmCompactionMigratepagesFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MmCompactionMigratepagesFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_nr_migrated() const { return at<1>().valid(); }
+  uint64_t nr_migrated() const { return at<1>().as_uint64(); }
+  bool has_nr_failed() const { return at<2>().valid(); }
+  uint64_t nr_failed() const { return at<2>().as_uint64(); }
+};
+
+class MmCompactionMigratepagesFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = MmCompactionMigratepagesFtraceEvent_Decoder;
+  enum : int32_t {
+    kNrMigratedFieldNumber = 1,
+    kNrFailedFieldNumber = 2,
+  };
+
+  using FieldMetadata_NrMigrated =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MmCompactionMigratepagesFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NrMigrated kNrMigrated() { return {}; }
+  void set_nr_migrated(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NrMigrated::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NrFailed =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MmCompactionMigratepagesFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NrFailed kNrFailed() { return {}; }
+  void set_nr_failed(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NrFailed::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class MmCompactionKcompactdWakeFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  MmCompactionKcompactdWakeFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MmCompactionKcompactdWakeFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MmCompactionKcompactdWakeFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_nid() const { return at<1>().valid(); }
+  int32_t nid() const { return at<1>().as_int32(); }
+  bool has_order() const { return at<2>().valid(); }
+  int32_t order() const { return at<2>().as_int32(); }
+  bool has_classzone_idx() const { return at<3>().valid(); }
+  uint32_t classzone_idx() const { return at<3>().as_uint32(); }
+};
+
+class MmCompactionKcompactdWakeFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = MmCompactionKcompactdWakeFtraceEvent_Decoder;
+  enum : int32_t {
+    kNidFieldNumber = 1,
+    kOrderFieldNumber = 2,
+    kClasszoneIdxFieldNumber = 3,
+  };
+
+  using FieldMetadata_Nid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      MmCompactionKcompactdWakeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Nid kNid() { return {}; }
+  void set_nid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Nid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Order =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      MmCompactionKcompactdWakeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Order kOrder() { return {}; }
+  void set_order(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Order::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ClasszoneIdx =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MmCompactionKcompactdWakeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ClasszoneIdx kClasszoneIdx() { return {}; }
+  void set_classzone_idx(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ClasszoneIdx::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class MmCompactionKcompactdSleepFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  MmCompactionKcompactdSleepFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MmCompactionKcompactdSleepFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MmCompactionKcompactdSleepFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_nid() const { return at<1>().valid(); }
+  int32_t nid() const { return at<1>().as_int32(); }
+};
+
+class MmCompactionKcompactdSleepFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = MmCompactionKcompactdSleepFtraceEvent_Decoder;
+  enum : int32_t {
+    kNidFieldNumber = 1,
+  };
+
+  using FieldMetadata_Nid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      MmCompactionKcompactdSleepFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Nid kNid() { return {}; }
+  void set_nid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Nid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class MmCompactionIsolateMigratepagesFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  MmCompactionIsolateMigratepagesFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MmCompactionIsolateMigratepagesFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MmCompactionIsolateMigratepagesFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_start_pfn() const { return at<1>().valid(); }
+  uint64_t start_pfn() const { return at<1>().as_uint64(); }
+  bool has_end_pfn() const { return at<2>().valid(); }
+  uint64_t end_pfn() const { return at<2>().as_uint64(); }
+  bool has_nr_scanned() const { return at<3>().valid(); }
+  uint64_t nr_scanned() const { return at<3>().as_uint64(); }
+  bool has_nr_taken() const { return at<4>().valid(); }
+  uint64_t nr_taken() const { return at<4>().as_uint64(); }
+};
+
+class MmCompactionIsolateMigratepagesFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = MmCompactionIsolateMigratepagesFtraceEvent_Decoder;
+  enum : int32_t {
+    kStartPfnFieldNumber = 1,
+    kEndPfnFieldNumber = 2,
+    kNrScannedFieldNumber = 3,
+    kNrTakenFieldNumber = 4,
+  };
+
+  using FieldMetadata_StartPfn =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MmCompactionIsolateMigratepagesFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_StartPfn kStartPfn() { return {}; }
+  void set_start_pfn(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_StartPfn::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_EndPfn =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MmCompactionIsolateMigratepagesFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_EndPfn kEndPfn() { return {}; }
+  void set_end_pfn(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_EndPfn::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NrScanned =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MmCompactionIsolateMigratepagesFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NrScanned kNrScanned() { return {}; }
+  void set_nr_scanned(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NrScanned::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NrTaken =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MmCompactionIsolateMigratepagesFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NrTaken kNrTaken() { return {}; }
+  void set_nr_taken(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NrTaken::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class MmCompactionIsolateFreepagesFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  MmCompactionIsolateFreepagesFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MmCompactionIsolateFreepagesFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MmCompactionIsolateFreepagesFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_start_pfn() const { return at<1>().valid(); }
+  uint64_t start_pfn() const { return at<1>().as_uint64(); }
+  bool has_end_pfn() const { return at<2>().valid(); }
+  uint64_t end_pfn() const { return at<2>().as_uint64(); }
+  bool has_nr_scanned() const { return at<3>().valid(); }
+  uint64_t nr_scanned() const { return at<3>().as_uint64(); }
+  bool has_nr_taken() const { return at<4>().valid(); }
+  uint64_t nr_taken() const { return at<4>().as_uint64(); }
+};
+
+class MmCompactionIsolateFreepagesFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = MmCompactionIsolateFreepagesFtraceEvent_Decoder;
+  enum : int32_t {
+    kStartPfnFieldNumber = 1,
+    kEndPfnFieldNumber = 2,
+    kNrScannedFieldNumber = 3,
+    kNrTakenFieldNumber = 4,
+  };
+
+  using FieldMetadata_StartPfn =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MmCompactionIsolateFreepagesFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_StartPfn kStartPfn() { return {}; }
+  void set_start_pfn(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_StartPfn::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_EndPfn =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MmCompactionIsolateFreepagesFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_EndPfn kEndPfn() { return {}; }
+  void set_end_pfn(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_EndPfn::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NrScanned =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MmCompactionIsolateFreepagesFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NrScanned kNrScanned() { return {}; }
+  void set_nr_scanned(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NrScanned::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NrTaken =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MmCompactionIsolateFreepagesFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NrTaken kNrTaken() { return {}; }
+  void set_nr_taken(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NrTaken::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class MmCompactionFinishedFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  MmCompactionFinishedFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MmCompactionFinishedFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MmCompactionFinishedFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_nid() const { return at<1>().valid(); }
+  int32_t nid() const { return at<1>().as_int32(); }
+  bool has_idx() const { return at<2>().valid(); }
+  uint32_t idx() const { return at<2>().as_uint32(); }
+  bool has_order() const { return at<3>().valid(); }
+  int32_t order() const { return at<3>().as_int32(); }
+  bool has_ret() const { return at<4>().valid(); }
+  int32_t ret() const { return at<4>().as_int32(); }
+};
+
+class MmCompactionFinishedFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = MmCompactionFinishedFtraceEvent_Decoder;
+  enum : int32_t {
+    kNidFieldNumber = 1,
+    kIdxFieldNumber = 2,
+    kOrderFieldNumber = 3,
+    kRetFieldNumber = 4,
+  };
+
+  using FieldMetadata_Nid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      MmCompactionFinishedFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Nid kNid() { return {}; }
+  void set_nid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Nid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Idx =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MmCompactionFinishedFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Idx kIdx() { return {}; }
+  void set_idx(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Idx::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Order =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      MmCompactionFinishedFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Order kOrder() { return {}; }
+  void set_order(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Order::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ret =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      MmCompactionFinishedFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ret kRet() { return {}; }
+  void set_ret(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ret::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class MmCompactionEndFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/6, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  MmCompactionEndFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MmCompactionEndFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MmCompactionEndFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_zone_start() const { return at<1>().valid(); }
+  uint64_t zone_start() const { return at<1>().as_uint64(); }
+  bool has_migrate_pfn() const { return at<2>().valid(); }
+  uint64_t migrate_pfn() const { return at<2>().as_uint64(); }
+  bool has_free_pfn() const { return at<3>().valid(); }
+  uint64_t free_pfn() const { return at<3>().as_uint64(); }
+  bool has_zone_end() const { return at<4>().valid(); }
+  uint64_t zone_end() const { return at<4>().as_uint64(); }
+  bool has_sync() const { return at<5>().valid(); }
+  uint32_t sync() const { return at<5>().as_uint32(); }
+  bool has_status() const { return at<6>().valid(); }
+  int32_t status() const { return at<6>().as_int32(); }
+};
+
+class MmCompactionEndFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = MmCompactionEndFtraceEvent_Decoder;
+  enum : int32_t {
+    kZoneStartFieldNumber = 1,
+    kMigratePfnFieldNumber = 2,
+    kFreePfnFieldNumber = 3,
+    kZoneEndFieldNumber = 4,
+    kSyncFieldNumber = 5,
+    kStatusFieldNumber = 6,
+  };
+
+  using FieldMetadata_ZoneStart =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MmCompactionEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ZoneStart kZoneStart() { return {}; }
+  void set_zone_start(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ZoneStart::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_MigratePfn =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MmCompactionEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MigratePfn kMigratePfn() { return {}; }
+  void set_migrate_pfn(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_MigratePfn::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FreePfn =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MmCompactionEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FreePfn kFreePfn() { return {}; }
+  void set_free_pfn(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_FreePfn::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ZoneEnd =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MmCompactionEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ZoneEnd kZoneEnd() { return {}; }
+  void set_zone_end(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ZoneEnd::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Sync =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MmCompactionEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Sync kSync() { return {}; }
+  void set_sync(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Sync::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Status =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      MmCompactionEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Status kStatus() { return {}; }
+  void set_status(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Status::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class MmCompactionDeferResetFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/6, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  MmCompactionDeferResetFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MmCompactionDeferResetFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MmCompactionDeferResetFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_nid() const { return at<1>().valid(); }
+  int32_t nid() const { return at<1>().as_int32(); }
+  bool has_idx() const { return at<2>().valid(); }
+  uint32_t idx() const { return at<2>().as_uint32(); }
+  bool has_order() const { return at<3>().valid(); }
+  int32_t order() const { return at<3>().as_int32(); }
+  bool has_considered() const { return at<4>().valid(); }
+  uint32_t considered() const { return at<4>().as_uint32(); }
+  bool has_defer_shift() const { return at<5>().valid(); }
+  uint32_t defer_shift() const { return at<5>().as_uint32(); }
+  bool has_order_failed() const { return at<6>().valid(); }
+  int32_t order_failed() const { return at<6>().as_int32(); }
+};
+
+class MmCompactionDeferResetFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = MmCompactionDeferResetFtraceEvent_Decoder;
+  enum : int32_t {
+    kNidFieldNumber = 1,
+    kIdxFieldNumber = 2,
+    kOrderFieldNumber = 3,
+    kConsideredFieldNumber = 4,
+    kDeferShiftFieldNumber = 5,
+    kOrderFailedFieldNumber = 6,
+  };
+
+  using FieldMetadata_Nid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      MmCompactionDeferResetFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Nid kNid() { return {}; }
+  void set_nid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Nid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Idx =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MmCompactionDeferResetFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Idx kIdx() { return {}; }
+  void set_idx(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Idx::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Order =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      MmCompactionDeferResetFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Order kOrder() { return {}; }
+  void set_order(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Order::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Considered =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MmCompactionDeferResetFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Considered kConsidered() { return {}; }
+  void set_considered(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Considered::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DeferShift =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MmCompactionDeferResetFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DeferShift kDeferShift() { return {}; }
+  void set_defer_shift(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DeferShift::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_OrderFailed =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      MmCompactionDeferResetFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_OrderFailed kOrderFailed() { return {}; }
+  void set_order_failed(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_OrderFailed::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class MmCompactionDeferredFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/6, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  MmCompactionDeferredFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MmCompactionDeferredFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MmCompactionDeferredFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_nid() const { return at<1>().valid(); }
+  int32_t nid() const { return at<1>().as_int32(); }
+  bool has_idx() const { return at<2>().valid(); }
+  uint32_t idx() const { return at<2>().as_uint32(); }
+  bool has_order() const { return at<3>().valid(); }
+  int32_t order() const { return at<3>().as_int32(); }
+  bool has_considered() const { return at<4>().valid(); }
+  uint32_t considered() const { return at<4>().as_uint32(); }
+  bool has_defer_shift() const { return at<5>().valid(); }
+  uint32_t defer_shift() const { return at<5>().as_uint32(); }
+  bool has_order_failed() const { return at<6>().valid(); }
+  int32_t order_failed() const { return at<6>().as_int32(); }
+};
+
+class MmCompactionDeferredFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = MmCompactionDeferredFtraceEvent_Decoder;
+  enum : int32_t {
+    kNidFieldNumber = 1,
+    kIdxFieldNumber = 2,
+    kOrderFieldNumber = 3,
+    kConsideredFieldNumber = 4,
+    kDeferShiftFieldNumber = 5,
+    kOrderFailedFieldNumber = 6,
+  };
+
+  using FieldMetadata_Nid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      MmCompactionDeferredFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Nid kNid() { return {}; }
+  void set_nid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Nid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Idx =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MmCompactionDeferredFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Idx kIdx() { return {}; }
+  void set_idx(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Idx::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Order =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      MmCompactionDeferredFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Order kOrder() { return {}; }
+  void set_order(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Order::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Considered =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MmCompactionDeferredFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Considered kConsidered() { return {}; }
+  void set_considered(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Considered::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DeferShift =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MmCompactionDeferredFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DeferShift kDeferShift() { return {}; }
+  void set_defer_shift(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DeferShift::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_OrderFailed =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      MmCompactionDeferredFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_OrderFailed kOrderFailed() { return {}; }
+  void set_order_failed(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_OrderFailed::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class MmCompactionDeferCompactionFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/6, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  MmCompactionDeferCompactionFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MmCompactionDeferCompactionFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MmCompactionDeferCompactionFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_nid() const { return at<1>().valid(); }
+  int32_t nid() const { return at<1>().as_int32(); }
+  bool has_idx() const { return at<2>().valid(); }
+  uint32_t idx() const { return at<2>().as_uint32(); }
+  bool has_order() const { return at<3>().valid(); }
+  int32_t order() const { return at<3>().as_int32(); }
+  bool has_considered() const { return at<4>().valid(); }
+  uint32_t considered() const { return at<4>().as_uint32(); }
+  bool has_defer_shift() const { return at<5>().valid(); }
+  uint32_t defer_shift() const { return at<5>().as_uint32(); }
+  bool has_order_failed() const { return at<6>().valid(); }
+  int32_t order_failed() const { return at<6>().as_int32(); }
+};
+
+class MmCompactionDeferCompactionFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = MmCompactionDeferCompactionFtraceEvent_Decoder;
+  enum : int32_t {
+    kNidFieldNumber = 1,
+    kIdxFieldNumber = 2,
+    kOrderFieldNumber = 3,
+    kConsideredFieldNumber = 4,
+    kDeferShiftFieldNumber = 5,
+    kOrderFailedFieldNumber = 6,
+  };
+
+  using FieldMetadata_Nid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      MmCompactionDeferCompactionFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Nid kNid() { return {}; }
+  void set_nid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Nid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Idx =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MmCompactionDeferCompactionFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Idx kIdx() { return {}; }
+  void set_idx(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Idx::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Order =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      MmCompactionDeferCompactionFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Order kOrder() { return {}; }
+  void set_order(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Order::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Considered =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MmCompactionDeferCompactionFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Considered kConsidered() { return {}; }
+  void set_considered(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Considered::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DeferShift =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MmCompactionDeferCompactionFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DeferShift kDeferShift() { return {}; }
+  void set_defer_shift(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DeferShift::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_OrderFailed =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      MmCompactionDeferCompactionFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_OrderFailed kOrderFailed() { return {}; }
+  void set_order_failed(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_OrderFailed::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class MmCompactionBeginFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  MmCompactionBeginFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MmCompactionBeginFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MmCompactionBeginFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_zone_start() const { return at<1>().valid(); }
+  uint64_t zone_start() const { return at<1>().as_uint64(); }
+  bool has_migrate_pfn() const { return at<2>().valid(); }
+  uint64_t migrate_pfn() const { return at<2>().as_uint64(); }
+  bool has_free_pfn() const { return at<3>().valid(); }
+  uint64_t free_pfn() const { return at<3>().as_uint64(); }
+  bool has_zone_end() const { return at<4>().valid(); }
+  uint64_t zone_end() const { return at<4>().as_uint64(); }
+  bool has_sync() const { return at<5>().valid(); }
+  uint32_t sync() const { return at<5>().as_uint32(); }
+};
+
+class MmCompactionBeginFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = MmCompactionBeginFtraceEvent_Decoder;
+  enum : int32_t {
+    kZoneStartFieldNumber = 1,
+    kMigratePfnFieldNumber = 2,
+    kFreePfnFieldNumber = 3,
+    kZoneEndFieldNumber = 4,
+    kSyncFieldNumber = 5,
+  };
+
+  using FieldMetadata_ZoneStart =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MmCompactionBeginFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ZoneStart kZoneStart() { return {}; }
+  void set_zone_start(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ZoneStart::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_MigratePfn =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MmCompactionBeginFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MigratePfn kMigratePfn() { return {}; }
+  void set_migrate_pfn(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_MigratePfn::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FreePfn =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MmCompactionBeginFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FreePfn kFreePfn() { return {}; }
+  void set_free_pfn(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_FreePfn::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ZoneEnd =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MmCompactionBeginFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ZoneEnd kZoneEnd() { return {}; }
+  void set_zone_end(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ZoneEnd::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Sync =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MmCompactionBeginFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Sync kSync() { return {}; }
+  void set_sync(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Sync::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/ftrace/cpuhp.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_CPUHP_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_CPUHP_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class CpuhpPauseFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  CpuhpPauseFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit CpuhpPauseFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit CpuhpPauseFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_active_cpus() const { return at<1>().valid(); }
+  uint32_t active_cpus() const { return at<1>().as_uint32(); }
+  bool has_cpus() const { return at<2>().valid(); }
+  uint32_t cpus() const { return at<2>().as_uint32(); }
+  bool has_pause() const { return at<3>().valid(); }
+  uint32_t pause() const { return at<3>().as_uint32(); }
+  bool has_time() const { return at<4>().valid(); }
+  uint32_t time() const { return at<4>().as_uint32(); }
+};
+
+class CpuhpPauseFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = CpuhpPauseFtraceEvent_Decoder;
+  enum : int32_t {
+    kActiveCpusFieldNumber = 1,
+    kCpusFieldNumber = 2,
+    kPauseFieldNumber = 3,
+    kTimeFieldNumber = 4,
+  };
+
+  using FieldMetadata_ActiveCpus =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      CpuhpPauseFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ActiveCpus kActiveCpus() { return {}; }
+  void set_active_cpus(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ActiveCpus::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Cpus =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      CpuhpPauseFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Cpus kCpus() { return {}; }
+  void set_cpus(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Cpus::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pause =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      CpuhpPauseFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pause kPause() { return {}; }
+  void set_pause(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pause::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Time =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      CpuhpPauseFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Time kTime() { return {}; }
+  void set_time(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Time::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class CpuhpLatencyFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  CpuhpLatencyFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit CpuhpLatencyFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit CpuhpLatencyFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_cpu() const { return at<1>().valid(); }
+  uint32_t cpu() const { return at<1>().as_uint32(); }
+  bool has_ret() const { return at<2>().valid(); }
+  int32_t ret() const { return at<2>().as_int32(); }
+  bool has_state() const { return at<3>().valid(); }
+  uint32_t state() const { return at<3>().as_uint32(); }
+  bool has_time() const { return at<4>().valid(); }
+  uint64_t time() const { return at<4>().as_uint64(); }
+};
+
+class CpuhpLatencyFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = CpuhpLatencyFtraceEvent_Decoder;
+  enum : int32_t {
+    kCpuFieldNumber = 1,
+    kRetFieldNumber = 2,
+    kStateFieldNumber = 3,
+    kTimeFieldNumber = 4,
+  };
+
+  using FieldMetadata_Cpu =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      CpuhpLatencyFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Cpu kCpu() { return {}; }
+  void set_cpu(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Cpu::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ret =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      CpuhpLatencyFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ret kRet() { return {}; }
+  void set_ret(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ret::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_State =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      CpuhpLatencyFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_State kState() { return {}; }
+  void set_state(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_State::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Time =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      CpuhpLatencyFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Time kTime() { return {}; }
+  void set_time(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Time::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class CpuhpEnterFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  CpuhpEnterFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit CpuhpEnterFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit CpuhpEnterFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_cpu() const { return at<1>().valid(); }
+  uint32_t cpu() const { return at<1>().as_uint32(); }
+  bool has_fun() const { return at<2>().valid(); }
+  uint64_t fun() const { return at<2>().as_uint64(); }
+  bool has_idx() const { return at<3>().valid(); }
+  int32_t idx() const { return at<3>().as_int32(); }
+  bool has_target() const { return at<4>().valid(); }
+  int32_t target() const { return at<4>().as_int32(); }
+};
+
+class CpuhpEnterFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = CpuhpEnterFtraceEvent_Decoder;
+  enum : int32_t {
+    kCpuFieldNumber = 1,
+    kFunFieldNumber = 2,
+    kIdxFieldNumber = 3,
+    kTargetFieldNumber = 4,
+  };
+
+  using FieldMetadata_Cpu =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      CpuhpEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Cpu kCpu() { return {}; }
+  void set_cpu(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Cpu::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Fun =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      CpuhpEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Fun kFun() { return {}; }
+  void set_fun(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Fun::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Idx =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      CpuhpEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Idx kIdx() { return {}; }
+  void set_idx(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Idx::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Target =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      CpuhpEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Target kTarget() { return {}; }
+  void set_target(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Target::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class CpuhpMultiEnterFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  CpuhpMultiEnterFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit CpuhpMultiEnterFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit CpuhpMultiEnterFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_cpu() const { return at<1>().valid(); }
+  uint32_t cpu() const { return at<1>().as_uint32(); }
+  bool has_fun() const { return at<2>().valid(); }
+  uint64_t fun() const { return at<2>().as_uint64(); }
+  bool has_idx() const { return at<3>().valid(); }
+  int32_t idx() const { return at<3>().as_int32(); }
+  bool has_target() const { return at<4>().valid(); }
+  int32_t target() const { return at<4>().as_int32(); }
+};
+
+class CpuhpMultiEnterFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = CpuhpMultiEnterFtraceEvent_Decoder;
+  enum : int32_t {
+    kCpuFieldNumber = 1,
+    kFunFieldNumber = 2,
+    kIdxFieldNumber = 3,
+    kTargetFieldNumber = 4,
+  };
+
+  using FieldMetadata_Cpu =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      CpuhpMultiEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Cpu kCpu() { return {}; }
+  void set_cpu(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Cpu::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Fun =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      CpuhpMultiEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Fun kFun() { return {}; }
+  void set_fun(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Fun::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Idx =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      CpuhpMultiEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Idx kIdx() { return {}; }
+  void set_idx(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Idx::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Target =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      CpuhpMultiEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Target kTarget() { return {}; }
+  void set_target(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Target::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class CpuhpExitFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  CpuhpExitFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit CpuhpExitFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit CpuhpExitFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_cpu() const { return at<1>().valid(); }
+  uint32_t cpu() const { return at<1>().as_uint32(); }
+  bool has_idx() const { return at<2>().valid(); }
+  int32_t idx() const { return at<2>().as_int32(); }
+  bool has_ret() const { return at<3>().valid(); }
+  int32_t ret() const { return at<3>().as_int32(); }
+  bool has_state() const { return at<4>().valid(); }
+  int32_t state() const { return at<4>().as_int32(); }
+};
+
+class CpuhpExitFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = CpuhpExitFtraceEvent_Decoder;
+  enum : int32_t {
+    kCpuFieldNumber = 1,
+    kIdxFieldNumber = 2,
+    kRetFieldNumber = 3,
+    kStateFieldNumber = 4,
+  };
+
+  using FieldMetadata_Cpu =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      CpuhpExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Cpu kCpu() { return {}; }
+  void set_cpu(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Cpu::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Idx =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      CpuhpExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Idx kIdx() { return {}; }
+  void set_idx(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Idx::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ret =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      CpuhpExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ret kRet() { return {}; }
+  void set_ret(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ret::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_State =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      CpuhpExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_State kState() { return {}; }
+  void set_state(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_State::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/ftrace/dmabuf_heap.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_DMABUF_HEAP_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_DMABUF_HEAP_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class DmaHeapStatFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  DmaHeapStatFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit DmaHeapStatFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit DmaHeapStatFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_inode() const { return at<1>().valid(); }
+  uint64_t inode() const { return at<1>().as_uint64(); }
+  bool has_len() const { return at<2>().valid(); }
+  int64_t len() const { return at<2>().as_int64(); }
+  bool has_total_allocated() const { return at<3>().valid(); }
+  uint64_t total_allocated() const { return at<3>().as_uint64(); }
+};
+
+class DmaHeapStatFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = DmaHeapStatFtraceEvent_Decoder;
+  enum : int32_t {
+    kInodeFieldNumber = 1,
+    kLenFieldNumber = 2,
+    kTotalAllocatedFieldNumber = 3,
+  };
+
+  using FieldMetadata_Inode =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      DmaHeapStatFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Inode kInode() { return {}; }
+  void set_inode(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Inode::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      DmaHeapStatFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TotalAllocated =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      DmaHeapStatFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TotalAllocated kTotalAllocated() { return {}; }
+  void set_total_allocated(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TotalAllocated::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/ftrace/dpu.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_DPU_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_DPU_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class DpuTracingMarkWriteFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/6, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  DpuTracingMarkWriteFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit DpuTracingMarkWriteFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit DpuTracingMarkWriteFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_pid() const { return at<1>().valid(); }
+  int32_t pid() const { return at<1>().as_int32(); }
+  bool has_trace_name() const { return at<2>().valid(); }
+  ::protozero::ConstChars trace_name() const { return at<2>().as_string(); }
+  bool has_trace_begin() const { return at<3>().valid(); }
+  uint32_t trace_begin() const { return at<3>().as_uint32(); }
+  bool has_name() const { return at<4>().valid(); }
+  ::protozero::ConstChars name() const { return at<4>().as_string(); }
+  bool has_type() const { return at<5>().valid(); }
+  uint32_t type() const { return at<5>().as_uint32(); }
+  bool has_value() const { return at<6>().valid(); }
+  int32_t value() const { return at<6>().as_int32(); }
+};
+
+class DpuTracingMarkWriteFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = DpuTracingMarkWriteFtraceEvent_Decoder;
+  enum : int32_t {
+    kPidFieldNumber = 1,
+    kTraceNameFieldNumber = 2,
+    kTraceBeginFieldNumber = 3,
+    kNameFieldNumber = 4,
+    kTypeFieldNumber = 5,
+    kValueFieldNumber = 6,
+  };
+
+  using FieldMetadata_Pid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      DpuTracingMarkWriteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pid kPid() { return {}; }
+  void set_pid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TraceName =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      DpuTracingMarkWriteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TraceName kTraceName() { return {}; }
+  void set_trace_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_TraceName::kFieldId, data, size);
+  }
+  void set_trace_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_TraceName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TraceBegin =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      DpuTracingMarkWriteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TraceBegin kTraceBegin() { return {}; }
+  void set_trace_begin(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TraceBegin::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      DpuTracingMarkWriteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Type =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      DpuTracingMarkWriteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Type kType() { return {}; }
+  void set_type(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Type::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Value =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      DpuTracingMarkWriteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Value kValue() { return {}; }
+  void set_value(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Value::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/ftrace/ext4.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_EXT4_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_EXT4_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class Ext4ZeroRangeFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4ZeroRangeFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4ZeroRangeFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4ZeroRangeFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_offset() const { return at<3>().valid(); }
+  int64_t offset() const { return at<3>().as_int64(); }
+  bool has_len() const { return at<4>().valid(); }
+  int64_t len() const { return at<4>().as_int64(); }
+  bool has_mode() const { return at<5>().valid(); }
+  int32_t mode() const { return at<5>().as_int32(); }
+};
+
+class Ext4ZeroRangeFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4ZeroRangeFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kOffsetFieldNumber = 3,
+    kLenFieldNumber = 4,
+    kModeFieldNumber = 5,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4ZeroRangeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4ZeroRangeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Offset =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      Ext4ZeroRangeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Offset kOffset() { return {}; }
+  void set_offset(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Offset::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      Ext4ZeroRangeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Mode =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4ZeroRangeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Mode kMode() { return {}; }
+  void set_mode(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Mode::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4WritepagesResultFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/7, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4WritepagesResultFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4WritepagesResultFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4WritepagesResultFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_ret() const { return at<3>().valid(); }
+  int32_t ret() const { return at<3>().as_int32(); }
+  bool has_pages_written() const { return at<4>().valid(); }
+  int32_t pages_written() const { return at<4>().as_int32(); }
+  bool has_pages_skipped() const { return at<5>().valid(); }
+  int64_t pages_skipped() const { return at<5>().as_int64(); }
+  bool has_writeback_index() const { return at<6>().valid(); }
+  uint64_t writeback_index() const { return at<6>().as_uint64(); }
+  bool has_sync_mode() const { return at<7>().valid(); }
+  int32_t sync_mode() const { return at<7>().as_int32(); }
+};
+
+class Ext4WritepagesResultFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4WritepagesResultFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kRetFieldNumber = 3,
+    kPagesWrittenFieldNumber = 4,
+    kPagesSkippedFieldNumber = 5,
+    kWritebackIndexFieldNumber = 6,
+    kSyncModeFieldNumber = 7,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4WritepagesResultFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4WritepagesResultFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ret =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4WritepagesResultFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ret kRet() { return {}; }
+  void set_ret(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ret::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PagesWritten =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4WritepagesResultFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PagesWritten kPagesWritten() { return {}; }
+  void set_pages_written(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PagesWritten::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PagesSkipped =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      Ext4WritepagesResultFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PagesSkipped kPagesSkipped() { return {}; }
+  void set_pages_skipped(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PagesSkipped::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_WritebackIndex =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4WritepagesResultFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_WritebackIndex kWritebackIndex() { return {}; }
+  void set_writeback_index(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_WritebackIndex::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SyncMode =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4WritepagesResultFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SyncMode kSyncMode() { return {}; }
+  void set_sync_mode(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SyncMode::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4WritepagesFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/10, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4WritepagesFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4WritepagesFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4WritepagesFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_nr_to_write() const { return at<3>().valid(); }
+  int64_t nr_to_write() const { return at<3>().as_int64(); }
+  bool has_pages_skipped() const { return at<4>().valid(); }
+  int64_t pages_skipped() const { return at<4>().as_int64(); }
+  bool has_range_start() const { return at<5>().valid(); }
+  int64_t range_start() const { return at<5>().as_int64(); }
+  bool has_range_end() const { return at<6>().valid(); }
+  int64_t range_end() const { return at<6>().as_int64(); }
+  bool has_writeback_index() const { return at<7>().valid(); }
+  uint64_t writeback_index() const { return at<7>().as_uint64(); }
+  bool has_sync_mode() const { return at<8>().valid(); }
+  int32_t sync_mode() const { return at<8>().as_int32(); }
+  bool has_for_kupdate() const { return at<9>().valid(); }
+  uint32_t for_kupdate() const { return at<9>().as_uint32(); }
+  bool has_range_cyclic() const { return at<10>().valid(); }
+  uint32_t range_cyclic() const { return at<10>().as_uint32(); }
+};
+
+class Ext4WritepagesFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4WritepagesFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kNrToWriteFieldNumber = 3,
+    kPagesSkippedFieldNumber = 4,
+    kRangeStartFieldNumber = 5,
+    kRangeEndFieldNumber = 6,
+    kWritebackIndexFieldNumber = 7,
+    kSyncModeFieldNumber = 8,
+    kForKupdateFieldNumber = 9,
+    kRangeCyclicFieldNumber = 10,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4WritepagesFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4WritepagesFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NrToWrite =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      Ext4WritepagesFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NrToWrite kNrToWrite() { return {}; }
+  void set_nr_to_write(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NrToWrite::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PagesSkipped =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      Ext4WritepagesFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PagesSkipped kPagesSkipped() { return {}; }
+  void set_pages_skipped(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PagesSkipped::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_RangeStart =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      Ext4WritepagesFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_RangeStart kRangeStart() { return {}; }
+  void set_range_start(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_RangeStart::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_RangeEnd =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      Ext4WritepagesFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_RangeEnd kRangeEnd() { return {}; }
+  void set_range_end(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_RangeEnd::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_WritebackIndex =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4WritepagesFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_WritebackIndex kWritebackIndex() { return {}; }
+  void set_writeback_index(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_WritebackIndex::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SyncMode =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4WritepagesFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SyncMode kSyncMode() { return {}; }
+  void set_sync_mode(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SyncMode::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ForKupdate =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4WritepagesFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ForKupdate kForKupdate() { return {}; }
+  void set_for_kupdate(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ForKupdate::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_RangeCyclic =
+    ::protozero::proto_utils::FieldMetadata<
+      10,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4WritepagesFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_RangeCyclic kRangeCyclic() { return {}; }
+  void set_range_cyclic(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_RangeCyclic::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4WritepageFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4WritepageFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4WritepageFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4WritepageFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_index() const { return at<3>().valid(); }
+  uint64_t index() const { return at<3>().as_uint64(); }
+};
+
+class Ext4WritepageFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4WritepageFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kIndexFieldNumber = 3,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4WritepageFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4WritepageFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Index =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4WritepageFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Index kIndex() { return {}; }
+  void set_index(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Index::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4WriteEndFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4WriteEndFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4WriteEndFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4WriteEndFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_pos() const { return at<3>().valid(); }
+  int64_t pos() const { return at<3>().as_int64(); }
+  bool has_len() const { return at<4>().valid(); }
+  uint32_t len() const { return at<4>().as_uint32(); }
+  bool has_copied() const { return at<5>().valid(); }
+  uint32_t copied() const { return at<5>().as_uint32(); }
+};
+
+class Ext4WriteEndFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4WriteEndFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kPosFieldNumber = 3,
+    kLenFieldNumber = 4,
+    kCopiedFieldNumber = 5,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4WriteEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4WriteEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pos =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      Ext4WriteEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pos kPos() { return {}; }
+  void set_pos(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pos::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4WriteEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Copied =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4WriteEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Copied kCopied() { return {}; }
+  void set_copied(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Copied::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4WriteBeginFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4WriteBeginFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4WriteBeginFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4WriteBeginFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_pos() const { return at<3>().valid(); }
+  int64_t pos() const { return at<3>().as_int64(); }
+  bool has_len() const { return at<4>().valid(); }
+  uint32_t len() const { return at<4>().as_uint32(); }
+  bool has_flags() const { return at<5>().valid(); }
+  uint32_t flags() const { return at<5>().as_uint32(); }
+};
+
+class Ext4WriteBeginFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4WriteBeginFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kPosFieldNumber = 3,
+    kLenFieldNumber = 4,
+    kFlagsFieldNumber = 5,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4WriteBeginFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4WriteBeginFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pos =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      Ext4WriteBeginFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pos kPos() { return {}; }
+  void set_pos(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pos::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4WriteBeginFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Flags =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4WriteBeginFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Flags kFlags() { return {}; }
+  void set_flags(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Flags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4UnlinkExitFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4UnlinkExitFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4UnlinkExitFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4UnlinkExitFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_ret() const { return at<3>().valid(); }
+  int32_t ret() const { return at<3>().as_int32(); }
+};
+
+class Ext4UnlinkExitFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4UnlinkExitFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kRetFieldNumber = 3,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4UnlinkExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4UnlinkExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ret =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4UnlinkExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ret kRet() { return {}; }
+  void set_ret(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ret::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4UnlinkEnterFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4UnlinkEnterFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4UnlinkEnterFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4UnlinkEnterFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_parent() const { return at<3>().valid(); }
+  uint64_t parent() const { return at<3>().as_uint64(); }
+  bool has_size() const { return at<4>().valid(); }
+  int64_t size() const { return at<4>().as_int64(); }
+};
+
+class Ext4UnlinkEnterFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4UnlinkEnterFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kParentFieldNumber = 3,
+    kSizeFieldNumber = 4,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4UnlinkEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4UnlinkEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Parent =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4UnlinkEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Parent kParent() { return {}; }
+  void set_parent(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Parent::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Size =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      Ext4UnlinkEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Size kSize() { return {}; }
+  void set_size(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Size::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4TruncateExitFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4TruncateExitFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4TruncateExitFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4TruncateExitFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_blocks() const { return at<3>().valid(); }
+  uint64_t blocks() const { return at<3>().as_uint64(); }
+};
+
+class Ext4TruncateExitFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4TruncateExitFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kBlocksFieldNumber = 3,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4TruncateExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4TruncateExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Blocks =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4TruncateExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Blocks kBlocks() { return {}; }
+  void set_blocks(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Blocks::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4TruncateEnterFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4TruncateEnterFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4TruncateEnterFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4TruncateEnterFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_blocks() const { return at<3>().valid(); }
+  uint64_t blocks() const { return at<3>().as_uint64(); }
+};
+
+class Ext4TruncateEnterFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4TruncateEnterFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kBlocksFieldNumber = 3,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4TruncateEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4TruncateEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Blocks =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4TruncateEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Blocks kBlocks() { return {}; }
+  void set_blocks(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Blocks::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4TrimExtentFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4TrimExtentFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4TrimExtentFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4TrimExtentFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev_major() const { return at<1>().valid(); }
+  int32_t dev_major() const { return at<1>().as_int32(); }
+  bool has_dev_minor() const { return at<2>().valid(); }
+  int32_t dev_minor() const { return at<2>().as_int32(); }
+  bool has_group() const { return at<3>().valid(); }
+  uint32_t group() const { return at<3>().as_uint32(); }
+  bool has_start() const { return at<4>().valid(); }
+  int32_t start() const { return at<4>().as_int32(); }
+  bool has_len() const { return at<5>().valid(); }
+  int32_t len() const { return at<5>().as_int32(); }
+};
+
+class Ext4TrimExtentFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4TrimExtentFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevMajorFieldNumber = 1,
+    kDevMinorFieldNumber = 2,
+    kGroupFieldNumber = 3,
+    kStartFieldNumber = 4,
+    kLenFieldNumber = 5,
+  };
+
+  using FieldMetadata_DevMajor =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4TrimExtentFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DevMajor kDevMajor() { return {}; }
+  void set_dev_major(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DevMajor::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DevMinor =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4TrimExtentFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DevMinor kDevMinor() { return {}; }
+  void set_dev_minor(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DevMinor::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Group =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4TrimExtentFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Group kGroup() { return {}; }
+  void set_group(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Group::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Start =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4TrimExtentFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Start kStart() { return {}; }
+  void set_start(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Start::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4TrimExtentFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4TrimAllFreeFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4TrimAllFreeFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4TrimAllFreeFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4TrimAllFreeFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev_major() const { return at<1>().valid(); }
+  int32_t dev_major() const { return at<1>().as_int32(); }
+  bool has_dev_minor() const { return at<2>().valid(); }
+  int32_t dev_minor() const { return at<2>().as_int32(); }
+  bool has_group() const { return at<3>().valid(); }
+  uint32_t group() const { return at<3>().as_uint32(); }
+  bool has_start() const { return at<4>().valid(); }
+  int32_t start() const { return at<4>().as_int32(); }
+  bool has_len() const { return at<5>().valid(); }
+  int32_t len() const { return at<5>().as_int32(); }
+};
+
+class Ext4TrimAllFreeFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4TrimAllFreeFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevMajorFieldNumber = 1,
+    kDevMinorFieldNumber = 2,
+    kGroupFieldNumber = 3,
+    kStartFieldNumber = 4,
+    kLenFieldNumber = 5,
+  };
+
+  using FieldMetadata_DevMajor =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4TrimAllFreeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DevMajor kDevMajor() { return {}; }
+  void set_dev_major(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DevMajor::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DevMinor =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4TrimAllFreeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DevMinor kDevMinor() { return {}; }
+  void set_dev_minor(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DevMinor::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Group =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4TrimAllFreeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Group kGroup() { return {}; }
+  void set_group(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Group::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Start =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4TrimAllFreeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Start kStart() { return {}; }
+  void set_start(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Start::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4TrimAllFreeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4SyncFsFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4SyncFsFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4SyncFsFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4SyncFsFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_wait() const { return at<2>().valid(); }
+  int32_t wait() const { return at<2>().as_int32(); }
+};
+
+class Ext4SyncFsFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4SyncFsFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kWaitFieldNumber = 2,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4SyncFsFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Wait =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4SyncFsFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Wait kWait() { return {}; }
+  void set_wait(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Wait::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4RequestInodeFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4RequestInodeFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4RequestInodeFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4RequestInodeFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_dir() const { return at<2>().valid(); }
+  uint64_t dir() const { return at<2>().as_uint64(); }
+  bool has_mode() const { return at<3>().valid(); }
+  uint32_t mode() const { return at<3>().as_uint32(); }
+};
+
+class Ext4RequestInodeFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4RequestInodeFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kDirFieldNumber = 2,
+    kModeFieldNumber = 3,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4RequestInodeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Dir =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4RequestInodeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dir kDir() { return {}; }
+  void set_dir(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dir::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Mode =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4RequestInodeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Mode kMode() { return {}; }
+  void set_mode(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Mode::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4RequestBlocksFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/10, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4RequestBlocksFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4RequestBlocksFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4RequestBlocksFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_len() const { return at<3>().valid(); }
+  uint32_t len() const { return at<3>().as_uint32(); }
+  bool has_logical() const { return at<4>().valid(); }
+  uint32_t logical() const { return at<4>().as_uint32(); }
+  bool has_lleft() const { return at<5>().valid(); }
+  uint32_t lleft() const { return at<5>().as_uint32(); }
+  bool has_lright() const { return at<6>().valid(); }
+  uint32_t lright() const { return at<6>().as_uint32(); }
+  bool has_goal() const { return at<7>().valid(); }
+  uint64_t goal() const { return at<7>().as_uint64(); }
+  bool has_pleft() const { return at<8>().valid(); }
+  uint64_t pleft() const { return at<8>().as_uint64(); }
+  bool has_pright() const { return at<9>().valid(); }
+  uint64_t pright() const { return at<9>().as_uint64(); }
+  bool has_flags() const { return at<10>().valid(); }
+  uint32_t flags() const { return at<10>().as_uint32(); }
+};
+
+class Ext4RequestBlocksFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4RequestBlocksFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kLenFieldNumber = 3,
+    kLogicalFieldNumber = 4,
+    kLleftFieldNumber = 5,
+    kLrightFieldNumber = 6,
+    kGoalFieldNumber = 7,
+    kPleftFieldNumber = 8,
+    kPrightFieldNumber = 9,
+    kFlagsFieldNumber = 10,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4RequestBlocksFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4RequestBlocksFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4RequestBlocksFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Logical =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4RequestBlocksFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Logical kLogical() { return {}; }
+  void set_logical(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Logical::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Lleft =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4RequestBlocksFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Lleft kLleft() { return {}; }
+  void set_lleft(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Lleft::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Lright =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4RequestBlocksFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Lright kLright() { return {}; }
+  void set_lright(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Lright::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Goal =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4RequestBlocksFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Goal kGoal() { return {}; }
+  void set_goal(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Goal::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pleft =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4RequestBlocksFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pleft kPleft() { return {}; }
+  void set_pleft(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pleft::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pright =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4RequestBlocksFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pright kPright() { return {}; }
+  void set_pright(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pright::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Flags =
+    ::protozero::proto_utils::FieldMetadata<
+      10,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4RequestBlocksFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Flags kFlags() { return {}; }
+  void set_flags(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Flags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4RemoveBlocksFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/8, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4RemoveBlocksFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4RemoveBlocksFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4RemoveBlocksFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_from() const { return at<3>().valid(); }
+  uint32_t from() const { return at<3>().as_uint32(); }
+  bool has_to() const { return at<4>().valid(); }
+  uint32_t to() const { return at<4>().as_uint32(); }
+  bool has_partial() const { return at<5>().valid(); }
+  int64_t partial() const { return at<5>().as_int64(); }
+  bool has_ee_pblk() const { return at<6>().valid(); }
+  uint64_t ee_pblk() const { return at<6>().as_uint64(); }
+  bool has_ee_lblk() const { return at<7>().valid(); }
+  uint32_t ee_lblk() const { return at<7>().as_uint32(); }
+  bool has_ee_len() const { return at<8>().valid(); }
+  uint32_t ee_len() const { return at<8>().as_uint32(); }
+};
+
+class Ext4RemoveBlocksFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4RemoveBlocksFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kFromFieldNumber = 3,
+    kToFieldNumber = 4,
+    kPartialFieldNumber = 5,
+    kEePblkFieldNumber = 6,
+    kEeLblkFieldNumber = 7,
+    kEeLenFieldNumber = 8,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4RemoveBlocksFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4RemoveBlocksFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_From =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4RemoveBlocksFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_From kFrom() { return {}; }
+  void set_from(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_From::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_To =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4RemoveBlocksFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_To kTo() { return {}; }
+  void set_to(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_To::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Partial =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      Ext4RemoveBlocksFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Partial kPartial() { return {}; }
+  void set_partial(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Partial::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_EePblk =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4RemoveBlocksFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_EePblk kEePblk() { return {}; }
+  void set_ee_pblk(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_EePblk::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_EeLblk =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4RemoveBlocksFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_EeLblk kEeLblk() { return {}; }
+  void set_ee_lblk(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_EeLblk::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_EeLen =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4RemoveBlocksFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_EeLen kEeLen() { return {}; }
+  void set_ee_len(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_EeLen::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4ReleasepageFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4ReleasepageFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4ReleasepageFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4ReleasepageFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_index() const { return at<3>().valid(); }
+  uint64_t index() const { return at<3>().as_uint64(); }
+};
+
+class Ext4ReleasepageFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4ReleasepageFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kIndexFieldNumber = 3,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4ReleasepageFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4ReleasepageFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Index =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4ReleasepageFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Index kIndex() { return {}; }
+  void set_index(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Index::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4ReadpageFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4ReadpageFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4ReadpageFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4ReadpageFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_index() const { return at<3>().valid(); }
+  uint64_t index() const { return at<3>().as_uint64(); }
+};
+
+class Ext4ReadpageFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4ReadpageFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kIndexFieldNumber = 3,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4ReadpageFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4ReadpageFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Index =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4ReadpageFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Index kIndex() { return {}; }
+  void set_index(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Index::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4ReadBlockBitmapLoadFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4ReadBlockBitmapLoadFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4ReadBlockBitmapLoadFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4ReadBlockBitmapLoadFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_group() const { return at<2>().valid(); }
+  uint32_t group() const { return at<2>().as_uint32(); }
+};
+
+class Ext4ReadBlockBitmapLoadFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4ReadBlockBitmapLoadFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kGroupFieldNumber = 2,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4ReadBlockBitmapLoadFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Group =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4ReadBlockBitmapLoadFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Group kGroup() { return {}; }
+  void set_group(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Group::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4PunchHoleFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4PunchHoleFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4PunchHoleFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4PunchHoleFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_offset() const { return at<3>().valid(); }
+  int64_t offset() const { return at<3>().as_int64(); }
+  bool has_len() const { return at<4>().valid(); }
+  int64_t len() const { return at<4>().as_int64(); }
+  bool has_mode() const { return at<5>().valid(); }
+  int32_t mode() const { return at<5>().as_int32(); }
+};
+
+class Ext4PunchHoleFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4PunchHoleFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kOffsetFieldNumber = 3,
+    kLenFieldNumber = 4,
+    kModeFieldNumber = 5,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4PunchHoleFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4PunchHoleFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Offset =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      Ext4PunchHoleFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Offset kOffset() { return {}; }
+  void set_offset(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Offset::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      Ext4PunchHoleFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Mode =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4PunchHoleFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Mode kMode() { return {}; }
+  void set_mode(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Mode::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4OtherInodeUpdateTimeFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/6, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4OtherInodeUpdateTimeFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4OtherInodeUpdateTimeFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4OtherInodeUpdateTimeFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_orig_ino() const { return at<3>().valid(); }
+  uint64_t orig_ino() const { return at<3>().as_uint64(); }
+  bool has_uid() const { return at<4>().valid(); }
+  uint32_t uid() const { return at<4>().as_uint32(); }
+  bool has_gid() const { return at<5>().valid(); }
+  uint32_t gid() const { return at<5>().as_uint32(); }
+  bool has_mode() const { return at<6>().valid(); }
+  uint32_t mode() const { return at<6>().as_uint32(); }
+};
+
+class Ext4OtherInodeUpdateTimeFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4OtherInodeUpdateTimeFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kOrigInoFieldNumber = 3,
+    kUidFieldNumber = 4,
+    kGidFieldNumber = 5,
+    kModeFieldNumber = 6,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4OtherInodeUpdateTimeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4OtherInodeUpdateTimeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_OrigIno =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4OtherInodeUpdateTimeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_OrigIno kOrigIno() { return {}; }
+  void set_orig_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_OrigIno::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Uid =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4OtherInodeUpdateTimeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Uid kUid() { return {}; }
+  void set_uid(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Uid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Gid =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4OtherInodeUpdateTimeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Gid kGid() { return {}; }
+  void set_gid(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Gid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Mode =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4OtherInodeUpdateTimeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Mode kMode() { return {}; }
+  void set_mode(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Mode::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4MballocPreallocFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/10, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4MballocPreallocFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4MballocPreallocFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4MballocPreallocFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_orig_logical() const { return at<3>().valid(); }
+  uint32_t orig_logical() const { return at<3>().as_uint32(); }
+  bool has_orig_start() const { return at<4>().valid(); }
+  int32_t orig_start() const { return at<4>().as_int32(); }
+  bool has_orig_group() const { return at<5>().valid(); }
+  uint32_t orig_group() const { return at<5>().as_uint32(); }
+  bool has_orig_len() const { return at<6>().valid(); }
+  int32_t orig_len() const { return at<6>().as_int32(); }
+  bool has_result_logical() const { return at<7>().valid(); }
+  uint32_t result_logical() const { return at<7>().as_uint32(); }
+  bool has_result_start() const { return at<8>().valid(); }
+  int32_t result_start() const { return at<8>().as_int32(); }
+  bool has_result_group() const { return at<9>().valid(); }
+  uint32_t result_group() const { return at<9>().as_uint32(); }
+  bool has_result_len() const { return at<10>().valid(); }
+  int32_t result_len() const { return at<10>().as_int32(); }
+};
+
+class Ext4MballocPreallocFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4MballocPreallocFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kOrigLogicalFieldNumber = 3,
+    kOrigStartFieldNumber = 4,
+    kOrigGroupFieldNumber = 5,
+    kOrigLenFieldNumber = 6,
+    kResultLogicalFieldNumber = 7,
+    kResultStartFieldNumber = 8,
+    kResultGroupFieldNumber = 9,
+    kResultLenFieldNumber = 10,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4MballocPreallocFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4MballocPreallocFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_OrigLogical =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4MballocPreallocFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_OrigLogical kOrigLogical() { return {}; }
+  void set_orig_logical(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_OrigLogical::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_OrigStart =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4MballocPreallocFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_OrigStart kOrigStart() { return {}; }
+  void set_orig_start(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_OrigStart::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_OrigGroup =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4MballocPreallocFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_OrigGroup kOrigGroup() { return {}; }
+  void set_orig_group(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_OrigGroup::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_OrigLen =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4MballocPreallocFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_OrigLen kOrigLen() { return {}; }
+  void set_orig_len(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_OrigLen::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ResultLogical =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4MballocPreallocFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ResultLogical kResultLogical() { return {}; }
+  void set_result_logical(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ResultLogical::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ResultStart =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4MballocPreallocFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ResultStart kResultStart() { return {}; }
+  void set_result_start(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ResultStart::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ResultGroup =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4MballocPreallocFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ResultGroup kResultGroup() { return {}; }
+  void set_result_group(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ResultGroup::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ResultLen =
+    ::protozero::proto_utils::FieldMetadata<
+      10,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4MballocPreallocFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ResultLen kResultLen() { return {}; }
+  void set_result_len(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ResultLen::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4MballocFreeFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4MballocFreeFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4MballocFreeFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4MballocFreeFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_result_start() const { return at<3>().valid(); }
+  int32_t result_start() const { return at<3>().as_int32(); }
+  bool has_result_group() const { return at<4>().valid(); }
+  uint32_t result_group() const { return at<4>().as_uint32(); }
+  bool has_result_len() const { return at<5>().valid(); }
+  int32_t result_len() const { return at<5>().as_int32(); }
+};
+
+class Ext4MballocFreeFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4MballocFreeFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kResultStartFieldNumber = 3,
+    kResultGroupFieldNumber = 4,
+    kResultLenFieldNumber = 5,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4MballocFreeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4MballocFreeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ResultStart =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4MballocFreeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ResultStart kResultStart() { return {}; }
+  void set_result_start(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ResultStart::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ResultGroup =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4MballocFreeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ResultGroup kResultGroup() { return {}; }
+  void set_result_group(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ResultGroup::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ResultLen =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4MballocFreeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ResultLen kResultLen() { return {}; }
+  void set_result_len(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ResultLen::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4MballocDiscardFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4MballocDiscardFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4MballocDiscardFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4MballocDiscardFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_result_start() const { return at<3>().valid(); }
+  int32_t result_start() const { return at<3>().as_int32(); }
+  bool has_result_group() const { return at<4>().valid(); }
+  uint32_t result_group() const { return at<4>().as_uint32(); }
+  bool has_result_len() const { return at<5>().valid(); }
+  int32_t result_len() const { return at<5>().as_int32(); }
+};
+
+class Ext4MballocDiscardFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4MballocDiscardFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kResultStartFieldNumber = 3,
+    kResultGroupFieldNumber = 4,
+    kResultLenFieldNumber = 5,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4MballocDiscardFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4MballocDiscardFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ResultStart =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4MballocDiscardFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ResultStart kResultStart() { return {}; }
+  void set_result_start(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ResultStart::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ResultGroup =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4MballocDiscardFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ResultGroup kResultGroup() { return {}; }
+  void set_result_group(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ResultGroup::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ResultLen =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4MballocDiscardFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ResultLen kResultLen() { return {}; }
+  void set_result_len(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ResultLen::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4MballocAllocFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/20, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4MballocAllocFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4MballocAllocFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4MballocAllocFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_orig_logical() const { return at<3>().valid(); }
+  uint32_t orig_logical() const { return at<3>().as_uint32(); }
+  bool has_orig_start() const { return at<4>().valid(); }
+  int32_t orig_start() const { return at<4>().as_int32(); }
+  bool has_orig_group() const { return at<5>().valid(); }
+  uint32_t orig_group() const { return at<5>().as_uint32(); }
+  bool has_orig_len() const { return at<6>().valid(); }
+  int32_t orig_len() const { return at<6>().as_int32(); }
+  bool has_goal_logical() const { return at<7>().valid(); }
+  uint32_t goal_logical() const { return at<7>().as_uint32(); }
+  bool has_goal_start() const { return at<8>().valid(); }
+  int32_t goal_start() const { return at<8>().as_int32(); }
+  bool has_goal_group() const { return at<9>().valid(); }
+  uint32_t goal_group() const { return at<9>().as_uint32(); }
+  bool has_goal_len() const { return at<10>().valid(); }
+  int32_t goal_len() const { return at<10>().as_int32(); }
+  bool has_result_logical() const { return at<11>().valid(); }
+  uint32_t result_logical() const { return at<11>().as_uint32(); }
+  bool has_result_start() const { return at<12>().valid(); }
+  int32_t result_start() const { return at<12>().as_int32(); }
+  bool has_result_group() const { return at<13>().valid(); }
+  uint32_t result_group() const { return at<13>().as_uint32(); }
+  bool has_result_len() const { return at<14>().valid(); }
+  int32_t result_len() const { return at<14>().as_int32(); }
+  bool has_found() const { return at<15>().valid(); }
+  uint32_t found() const { return at<15>().as_uint32(); }
+  bool has_groups() const { return at<16>().valid(); }
+  uint32_t groups() const { return at<16>().as_uint32(); }
+  bool has_buddy() const { return at<17>().valid(); }
+  uint32_t buddy() const { return at<17>().as_uint32(); }
+  bool has_flags() const { return at<18>().valid(); }
+  uint32_t flags() const { return at<18>().as_uint32(); }
+  bool has_tail() const { return at<19>().valid(); }
+  uint32_t tail() const { return at<19>().as_uint32(); }
+  bool has_cr() const { return at<20>().valid(); }
+  uint32_t cr() const { return at<20>().as_uint32(); }
+};
+
+class Ext4MballocAllocFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4MballocAllocFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kOrigLogicalFieldNumber = 3,
+    kOrigStartFieldNumber = 4,
+    kOrigGroupFieldNumber = 5,
+    kOrigLenFieldNumber = 6,
+    kGoalLogicalFieldNumber = 7,
+    kGoalStartFieldNumber = 8,
+    kGoalGroupFieldNumber = 9,
+    kGoalLenFieldNumber = 10,
+    kResultLogicalFieldNumber = 11,
+    kResultStartFieldNumber = 12,
+    kResultGroupFieldNumber = 13,
+    kResultLenFieldNumber = 14,
+    kFoundFieldNumber = 15,
+    kGroupsFieldNumber = 16,
+    kBuddyFieldNumber = 17,
+    kFlagsFieldNumber = 18,
+    kTailFieldNumber = 19,
+    kCrFieldNumber = 20,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4MballocAllocFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4MballocAllocFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_OrigLogical =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4MballocAllocFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_OrigLogical kOrigLogical() { return {}; }
+  void set_orig_logical(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_OrigLogical::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_OrigStart =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4MballocAllocFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_OrigStart kOrigStart() { return {}; }
+  void set_orig_start(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_OrigStart::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_OrigGroup =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4MballocAllocFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_OrigGroup kOrigGroup() { return {}; }
+  void set_orig_group(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_OrigGroup::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_OrigLen =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4MballocAllocFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_OrigLen kOrigLen() { return {}; }
+  void set_orig_len(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_OrigLen::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_GoalLogical =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4MballocAllocFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_GoalLogical kGoalLogical() { return {}; }
+  void set_goal_logical(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_GoalLogical::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_GoalStart =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4MballocAllocFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_GoalStart kGoalStart() { return {}; }
+  void set_goal_start(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_GoalStart::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_GoalGroup =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4MballocAllocFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_GoalGroup kGoalGroup() { return {}; }
+  void set_goal_group(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_GoalGroup::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_GoalLen =
+    ::protozero::proto_utils::FieldMetadata<
+      10,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4MballocAllocFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_GoalLen kGoalLen() { return {}; }
+  void set_goal_len(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_GoalLen::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ResultLogical =
+    ::protozero::proto_utils::FieldMetadata<
+      11,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4MballocAllocFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ResultLogical kResultLogical() { return {}; }
+  void set_result_logical(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ResultLogical::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ResultStart =
+    ::protozero::proto_utils::FieldMetadata<
+      12,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4MballocAllocFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ResultStart kResultStart() { return {}; }
+  void set_result_start(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ResultStart::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ResultGroup =
+    ::protozero::proto_utils::FieldMetadata<
+      13,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4MballocAllocFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ResultGroup kResultGroup() { return {}; }
+  void set_result_group(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ResultGroup::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ResultLen =
+    ::protozero::proto_utils::FieldMetadata<
+      14,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4MballocAllocFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ResultLen kResultLen() { return {}; }
+  void set_result_len(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ResultLen::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Found =
+    ::protozero::proto_utils::FieldMetadata<
+      15,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4MballocAllocFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Found kFound() { return {}; }
+  void set_found(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Found::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Groups =
+    ::protozero::proto_utils::FieldMetadata<
+      16,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4MballocAllocFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Groups kGroups() { return {}; }
+  void set_groups(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Groups::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Buddy =
+    ::protozero::proto_utils::FieldMetadata<
+      17,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4MballocAllocFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Buddy kBuddy() { return {}; }
+  void set_buddy(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Buddy::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Flags =
+    ::protozero::proto_utils::FieldMetadata<
+      18,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4MballocAllocFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Flags kFlags() { return {}; }
+  void set_flags(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Flags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Tail =
+    ::protozero::proto_utils::FieldMetadata<
+      19,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4MballocAllocFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Tail kTail() { return {}; }
+  void set_tail(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Tail::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Cr =
+    ::protozero::proto_utils::FieldMetadata<
+      20,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4MballocAllocFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Cr kCr() { return {}; }
+  void set_cr(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Cr::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4MbReleaseInodePaFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4MbReleaseInodePaFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4MbReleaseInodePaFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4MbReleaseInodePaFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_block() const { return at<3>().valid(); }
+  uint64_t block() const { return at<3>().as_uint64(); }
+  bool has_count() const { return at<4>().valid(); }
+  uint32_t count() const { return at<4>().as_uint32(); }
+};
+
+class Ext4MbReleaseInodePaFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4MbReleaseInodePaFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kBlockFieldNumber = 3,
+    kCountFieldNumber = 4,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4MbReleaseInodePaFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4MbReleaseInodePaFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Block =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4MbReleaseInodePaFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Block kBlock() { return {}; }
+  void set_block(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Block::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Count =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4MbReleaseInodePaFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Count kCount() { return {}; }
+  void set_count(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Count::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4MbReleaseGroupPaFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4MbReleaseGroupPaFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4MbReleaseGroupPaFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4MbReleaseGroupPaFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_pa_pstart() const { return at<2>().valid(); }
+  uint64_t pa_pstart() const { return at<2>().as_uint64(); }
+  bool has_pa_len() const { return at<3>().valid(); }
+  uint32_t pa_len() const { return at<3>().as_uint32(); }
+};
+
+class Ext4MbReleaseGroupPaFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4MbReleaseGroupPaFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kPaPstartFieldNumber = 2,
+    kPaLenFieldNumber = 3,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4MbReleaseGroupPaFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PaPstart =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4MbReleaseGroupPaFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PaPstart kPaPstart() { return {}; }
+  void set_pa_pstart(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PaPstart::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PaLen =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4MbReleaseGroupPaFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PaLen kPaLen() { return {}; }
+  void set_pa_len(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PaLen::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4MbNewInodePaFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4MbNewInodePaFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4MbNewInodePaFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4MbNewInodePaFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_pa_pstart() const { return at<3>().valid(); }
+  uint64_t pa_pstart() const { return at<3>().as_uint64(); }
+  bool has_pa_lstart() const { return at<4>().valid(); }
+  uint64_t pa_lstart() const { return at<4>().as_uint64(); }
+  bool has_pa_len() const { return at<5>().valid(); }
+  uint32_t pa_len() const { return at<5>().as_uint32(); }
+};
+
+class Ext4MbNewInodePaFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4MbNewInodePaFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kPaPstartFieldNumber = 3,
+    kPaLstartFieldNumber = 4,
+    kPaLenFieldNumber = 5,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4MbNewInodePaFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4MbNewInodePaFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PaPstart =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4MbNewInodePaFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PaPstart kPaPstart() { return {}; }
+  void set_pa_pstart(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PaPstart::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PaLstart =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4MbNewInodePaFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PaLstart kPaLstart() { return {}; }
+  void set_pa_lstart(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PaLstart::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PaLen =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4MbNewInodePaFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PaLen kPaLen() { return {}; }
+  void set_pa_len(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PaLen::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4MbNewGroupPaFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4MbNewGroupPaFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4MbNewGroupPaFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4MbNewGroupPaFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_pa_pstart() const { return at<3>().valid(); }
+  uint64_t pa_pstart() const { return at<3>().as_uint64(); }
+  bool has_pa_lstart() const { return at<4>().valid(); }
+  uint64_t pa_lstart() const { return at<4>().as_uint64(); }
+  bool has_pa_len() const { return at<5>().valid(); }
+  uint32_t pa_len() const { return at<5>().as_uint32(); }
+};
+
+class Ext4MbNewGroupPaFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4MbNewGroupPaFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kPaPstartFieldNumber = 3,
+    kPaLstartFieldNumber = 4,
+    kPaLenFieldNumber = 5,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4MbNewGroupPaFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4MbNewGroupPaFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PaPstart =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4MbNewGroupPaFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PaPstart kPaPstart() { return {}; }
+  void set_pa_pstart(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PaPstart::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PaLstart =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4MbNewGroupPaFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PaLstart kPaLstart() { return {}; }
+  void set_pa_lstart(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PaLstart::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PaLen =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4MbNewGroupPaFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PaLen kPaLen() { return {}; }
+  void set_pa_len(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PaLen::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4MbDiscardPreallocationsFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4MbDiscardPreallocationsFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4MbDiscardPreallocationsFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4MbDiscardPreallocationsFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_needed() const { return at<2>().valid(); }
+  int32_t needed() const { return at<2>().as_int32(); }
+};
+
+class Ext4MbDiscardPreallocationsFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4MbDiscardPreallocationsFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kNeededFieldNumber = 2,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4MbDiscardPreallocationsFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Needed =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4MbDiscardPreallocationsFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Needed kNeeded() { return {}; }
+  void set_needed(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Needed::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4MbBuddyBitmapLoadFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4MbBuddyBitmapLoadFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4MbBuddyBitmapLoadFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4MbBuddyBitmapLoadFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_group() const { return at<2>().valid(); }
+  uint32_t group() const { return at<2>().as_uint32(); }
+};
+
+class Ext4MbBuddyBitmapLoadFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4MbBuddyBitmapLoadFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kGroupFieldNumber = 2,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4MbBuddyBitmapLoadFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Group =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4MbBuddyBitmapLoadFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Group kGroup() { return {}; }
+  void set_group(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Group::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4MbBitmapLoadFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4MbBitmapLoadFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4MbBitmapLoadFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4MbBitmapLoadFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_group() const { return at<2>().valid(); }
+  uint32_t group() const { return at<2>().as_uint32(); }
+};
+
+class Ext4MbBitmapLoadFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4MbBitmapLoadFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kGroupFieldNumber = 2,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4MbBitmapLoadFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Group =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4MbBitmapLoadFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Group kGroup() { return {}; }
+  void set_group(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Group::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4MarkInodeDirtyFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4MarkInodeDirtyFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4MarkInodeDirtyFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4MarkInodeDirtyFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_ip() const { return at<3>().valid(); }
+  uint64_t ip() const { return at<3>().as_uint64(); }
+};
+
+class Ext4MarkInodeDirtyFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4MarkInodeDirtyFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kIpFieldNumber = 3,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4MarkInodeDirtyFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4MarkInodeDirtyFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ip =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4MarkInodeDirtyFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ip kIp() { return {}; }
+  void set_ip(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ip::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4LoadInodeBitmapFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4LoadInodeBitmapFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4LoadInodeBitmapFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4LoadInodeBitmapFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_group() const { return at<2>().valid(); }
+  uint32_t group() const { return at<2>().as_uint32(); }
+};
+
+class Ext4LoadInodeBitmapFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4LoadInodeBitmapFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kGroupFieldNumber = 2,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4LoadInodeBitmapFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Group =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4LoadInodeBitmapFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Group kGroup() { return {}; }
+  void set_group(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Group::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4LoadInodeFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4LoadInodeFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4LoadInodeFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4LoadInodeFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+};
+
+class Ext4LoadInodeFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4LoadInodeFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4LoadInodeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4LoadInodeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4JournalledWriteEndFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4JournalledWriteEndFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4JournalledWriteEndFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4JournalledWriteEndFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_pos() const { return at<3>().valid(); }
+  int64_t pos() const { return at<3>().as_int64(); }
+  bool has_len() const { return at<4>().valid(); }
+  uint32_t len() const { return at<4>().as_uint32(); }
+  bool has_copied() const { return at<5>().valid(); }
+  uint32_t copied() const { return at<5>().as_uint32(); }
+};
+
+class Ext4JournalledWriteEndFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4JournalledWriteEndFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kPosFieldNumber = 3,
+    kLenFieldNumber = 4,
+    kCopiedFieldNumber = 5,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4JournalledWriteEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4JournalledWriteEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pos =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      Ext4JournalledWriteEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pos kPos() { return {}; }
+  void set_pos(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pos::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4JournalledWriteEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Copied =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4JournalledWriteEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Copied kCopied() { return {}; }
+  void set_copied(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Copied::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4JournalledInvalidatepageFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4JournalledInvalidatepageFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4JournalledInvalidatepageFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4JournalledInvalidatepageFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_index() const { return at<3>().valid(); }
+  uint64_t index() const { return at<3>().as_uint64(); }
+  bool has_offset() const { return at<4>().valid(); }
+  uint64_t offset() const { return at<4>().as_uint64(); }
+  bool has_length() const { return at<5>().valid(); }
+  uint32_t length() const { return at<5>().as_uint32(); }
+};
+
+class Ext4JournalledInvalidatepageFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4JournalledInvalidatepageFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kIndexFieldNumber = 3,
+    kOffsetFieldNumber = 4,
+    kLengthFieldNumber = 5,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4JournalledInvalidatepageFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4JournalledInvalidatepageFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Index =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4JournalledInvalidatepageFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Index kIndex() { return {}; }
+  void set_index(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Index::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Offset =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4JournalledInvalidatepageFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Offset kOffset() { return {}; }
+  void set_offset(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Offset::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Length =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4JournalledInvalidatepageFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Length kLength() { return {}; }
+  void set_length(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Length::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4JournalStartReservedFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4JournalStartReservedFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4JournalStartReservedFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4JournalStartReservedFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ip() const { return at<2>().valid(); }
+  uint64_t ip() const { return at<2>().as_uint64(); }
+  bool has_blocks() const { return at<3>().valid(); }
+  int32_t blocks() const { return at<3>().as_int32(); }
+};
+
+class Ext4JournalStartReservedFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4JournalStartReservedFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kIpFieldNumber = 2,
+    kBlocksFieldNumber = 3,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4JournalStartReservedFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ip =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4JournalStartReservedFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ip kIp() { return {}; }
+  void set_ip(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ip::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Blocks =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4JournalStartReservedFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Blocks kBlocks() { return {}; }
+  void set_blocks(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Blocks::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4JournalStartFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4JournalStartFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4JournalStartFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4JournalStartFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ip() const { return at<2>().valid(); }
+  uint64_t ip() const { return at<2>().as_uint64(); }
+  bool has_blocks() const { return at<3>().valid(); }
+  int32_t blocks() const { return at<3>().as_int32(); }
+  bool has_rsv_blocks() const { return at<4>().valid(); }
+  int32_t rsv_blocks() const { return at<4>().as_int32(); }
+  bool has_nblocks() const { return at<5>().valid(); }
+  int32_t nblocks() const { return at<5>().as_int32(); }
+};
+
+class Ext4JournalStartFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4JournalStartFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kIpFieldNumber = 2,
+    kBlocksFieldNumber = 3,
+    kRsvBlocksFieldNumber = 4,
+    kNblocksFieldNumber = 5,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4JournalStartFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ip =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4JournalStartFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ip kIp() { return {}; }
+  void set_ip(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ip::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Blocks =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4JournalStartFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Blocks kBlocks() { return {}; }
+  void set_blocks(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Blocks::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_RsvBlocks =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4JournalStartFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_RsvBlocks kRsvBlocks() { return {}; }
+  void set_rsv_blocks(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_RsvBlocks::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Nblocks =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4JournalStartFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Nblocks kNblocks() { return {}; }
+  void set_nblocks(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Nblocks::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4InvalidatepageFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4InvalidatepageFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4InvalidatepageFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4InvalidatepageFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_index() const { return at<3>().valid(); }
+  uint64_t index() const { return at<3>().as_uint64(); }
+  bool has_offset() const { return at<4>().valid(); }
+  uint64_t offset() const { return at<4>().as_uint64(); }
+  bool has_length() const { return at<5>().valid(); }
+  uint32_t length() const { return at<5>().as_uint32(); }
+};
+
+class Ext4InvalidatepageFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4InvalidatepageFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kIndexFieldNumber = 3,
+    kOffsetFieldNumber = 4,
+    kLengthFieldNumber = 5,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4InvalidatepageFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4InvalidatepageFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Index =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4InvalidatepageFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Index kIndex() { return {}; }
+  void set_index(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Index::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Offset =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4InvalidatepageFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Offset kOffset() { return {}; }
+  void set_offset(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Offset::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Length =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4InvalidatepageFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Length kLength() { return {}; }
+  void set_length(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Length::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4InsertRangeFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4InsertRangeFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4InsertRangeFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4InsertRangeFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_offset() const { return at<3>().valid(); }
+  int64_t offset() const { return at<3>().as_int64(); }
+  bool has_len() const { return at<4>().valid(); }
+  int64_t len() const { return at<4>().as_int64(); }
+};
+
+class Ext4InsertRangeFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4InsertRangeFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kOffsetFieldNumber = 3,
+    kLenFieldNumber = 4,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4InsertRangeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4InsertRangeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Offset =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      Ext4InsertRangeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Offset kOffset() { return {}; }
+  void set_offset(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Offset::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      Ext4InsertRangeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4IndMapBlocksExitFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/8, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4IndMapBlocksExitFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4IndMapBlocksExitFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4IndMapBlocksExitFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_flags() const { return at<3>().valid(); }
+  uint32_t flags() const { return at<3>().as_uint32(); }
+  bool has_pblk() const { return at<4>().valid(); }
+  uint64_t pblk() const { return at<4>().as_uint64(); }
+  bool has_lblk() const { return at<5>().valid(); }
+  uint32_t lblk() const { return at<5>().as_uint32(); }
+  bool has_len() const { return at<6>().valid(); }
+  uint32_t len() const { return at<6>().as_uint32(); }
+  bool has_mflags() const { return at<7>().valid(); }
+  uint32_t mflags() const { return at<7>().as_uint32(); }
+  bool has_ret() const { return at<8>().valid(); }
+  int32_t ret() const { return at<8>().as_int32(); }
+};
+
+class Ext4IndMapBlocksExitFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4IndMapBlocksExitFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kFlagsFieldNumber = 3,
+    kPblkFieldNumber = 4,
+    kLblkFieldNumber = 5,
+    kLenFieldNumber = 6,
+    kMflagsFieldNumber = 7,
+    kRetFieldNumber = 8,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4IndMapBlocksExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4IndMapBlocksExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Flags =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4IndMapBlocksExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Flags kFlags() { return {}; }
+  void set_flags(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Flags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pblk =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4IndMapBlocksExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pblk kPblk() { return {}; }
+  void set_pblk(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pblk::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Lblk =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4IndMapBlocksExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Lblk kLblk() { return {}; }
+  void set_lblk(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Lblk::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4IndMapBlocksExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Mflags =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4IndMapBlocksExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Mflags kMflags() { return {}; }
+  void set_mflags(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Mflags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ret =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4IndMapBlocksExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ret kRet() { return {}; }
+  void set_ret(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ret::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4IndMapBlocksEnterFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4IndMapBlocksEnterFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4IndMapBlocksEnterFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4IndMapBlocksEnterFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_lblk() const { return at<3>().valid(); }
+  uint32_t lblk() const { return at<3>().as_uint32(); }
+  bool has_len() const { return at<4>().valid(); }
+  uint32_t len() const { return at<4>().as_uint32(); }
+  bool has_flags() const { return at<5>().valid(); }
+  uint32_t flags() const { return at<5>().as_uint32(); }
+};
+
+class Ext4IndMapBlocksEnterFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4IndMapBlocksEnterFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kLblkFieldNumber = 3,
+    kLenFieldNumber = 4,
+    kFlagsFieldNumber = 5,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4IndMapBlocksEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4IndMapBlocksEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Lblk =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4IndMapBlocksEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Lblk kLblk() { return {}; }
+  void set_lblk(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Lblk::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4IndMapBlocksEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Flags =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4IndMapBlocksEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Flags kFlags() { return {}; }
+  void set_flags(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Flags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4GetReservedClusterAllocFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4GetReservedClusterAllocFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4GetReservedClusterAllocFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4GetReservedClusterAllocFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_lblk() const { return at<3>().valid(); }
+  uint32_t lblk() const { return at<3>().as_uint32(); }
+  bool has_len() const { return at<4>().valid(); }
+  uint32_t len() const { return at<4>().as_uint32(); }
+};
+
+class Ext4GetReservedClusterAllocFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4GetReservedClusterAllocFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kLblkFieldNumber = 3,
+    kLenFieldNumber = 4,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4GetReservedClusterAllocFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4GetReservedClusterAllocFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Lblk =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4GetReservedClusterAllocFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Lblk kLblk() { return {}; }
+  void set_lblk(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Lblk::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4GetReservedClusterAllocFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4GetImpliedClusterAllocExitFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/6, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4GetImpliedClusterAllocExitFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4GetImpliedClusterAllocExitFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4GetImpliedClusterAllocExitFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_flags() const { return at<2>().valid(); }
+  uint32_t flags() const { return at<2>().as_uint32(); }
+  bool has_lblk() const { return at<3>().valid(); }
+  uint32_t lblk() const { return at<3>().as_uint32(); }
+  bool has_pblk() const { return at<4>().valid(); }
+  uint64_t pblk() const { return at<4>().as_uint64(); }
+  bool has_len() const { return at<5>().valid(); }
+  uint32_t len() const { return at<5>().as_uint32(); }
+  bool has_ret() const { return at<6>().valid(); }
+  int32_t ret() const { return at<6>().as_int32(); }
+};
+
+class Ext4GetImpliedClusterAllocExitFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4GetImpliedClusterAllocExitFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kFlagsFieldNumber = 2,
+    kLblkFieldNumber = 3,
+    kPblkFieldNumber = 4,
+    kLenFieldNumber = 5,
+    kRetFieldNumber = 6,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4GetImpliedClusterAllocExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Flags =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4GetImpliedClusterAllocExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Flags kFlags() { return {}; }
+  void set_flags(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Flags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Lblk =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4GetImpliedClusterAllocExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Lblk kLblk() { return {}; }
+  void set_lblk(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Lblk::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pblk =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4GetImpliedClusterAllocExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pblk kPblk() { return {}; }
+  void set_pblk(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pblk::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4GetImpliedClusterAllocExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ret =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4GetImpliedClusterAllocExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ret kRet() { return {}; }
+  void set_ret(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ret::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4FreeInodeFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/6, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4FreeInodeFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4FreeInodeFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4FreeInodeFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_uid() const { return at<3>().valid(); }
+  uint32_t uid() const { return at<3>().as_uint32(); }
+  bool has_gid() const { return at<4>().valid(); }
+  uint32_t gid() const { return at<4>().as_uint32(); }
+  bool has_blocks() const { return at<5>().valid(); }
+  uint64_t blocks() const { return at<5>().as_uint64(); }
+  bool has_mode() const { return at<6>().valid(); }
+  uint32_t mode() const { return at<6>().as_uint32(); }
+};
+
+class Ext4FreeInodeFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4FreeInodeFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kUidFieldNumber = 3,
+    kGidFieldNumber = 4,
+    kBlocksFieldNumber = 5,
+    kModeFieldNumber = 6,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4FreeInodeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4FreeInodeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Uid =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4FreeInodeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Uid kUid() { return {}; }
+  void set_uid(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Uid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Gid =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4FreeInodeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Gid kGid() { return {}; }
+  void set_gid(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Gid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Blocks =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4FreeInodeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Blocks kBlocks() { return {}; }
+  void set_blocks(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Blocks::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Mode =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4FreeInodeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Mode kMode() { return {}; }
+  void set_mode(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Mode::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4FreeBlocksFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/6, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4FreeBlocksFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4FreeBlocksFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4FreeBlocksFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_block() const { return at<3>().valid(); }
+  uint64_t block() const { return at<3>().as_uint64(); }
+  bool has_count() const { return at<4>().valid(); }
+  uint64_t count() const { return at<4>().as_uint64(); }
+  bool has_flags() const { return at<5>().valid(); }
+  int32_t flags() const { return at<5>().as_int32(); }
+  bool has_mode() const { return at<6>().valid(); }
+  uint32_t mode() const { return at<6>().as_uint32(); }
+};
+
+class Ext4FreeBlocksFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4FreeBlocksFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kBlockFieldNumber = 3,
+    kCountFieldNumber = 4,
+    kFlagsFieldNumber = 5,
+    kModeFieldNumber = 6,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4FreeBlocksFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4FreeBlocksFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Block =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4FreeBlocksFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Block kBlock() { return {}; }
+  void set_block(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Block::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Count =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4FreeBlocksFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Count kCount() { return {}; }
+  void set_count(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Count::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Flags =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4FreeBlocksFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Flags kFlags() { return {}; }
+  void set_flags(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Flags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Mode =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4FreeBlocksFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Mode kMode() { return {}; }
+  void set_mode(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Mode::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4ForgetFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4ForgetFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4ForgetFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4ForgetFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_block() const { return at<3>().valid(); }
+  uint64_t block() const { return at<3>().as_uint64(); }
+  bool has_is_metadata() const { return at<4>().valid(); }
+  int32_t is_metadata() const { return at<4>().as_int32(); }
+  bool has_mode() const { return at<5>().valid(); }
+  uint32_t mode() const { return at<5>().as_uint32(); }
+};
+
+class Ext4ForgetFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4ForgetFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kBlockFieldNumber = 3,
+    kIsMetadataFieldNumber = 4,
+    kModeFieldNumber = 5,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4ForgetFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4ForgetFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Block =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4ForgetFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Block kBlock() { return {}; }
+  void set_block(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Block::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_IsMetadata =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4ForgetFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IsMetadata kIsMetadata() { return {}; }
+  void set_is_metadata(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_IsMetadata::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Mode =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4ForgetFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Mode kMode() { return {}; }
+  void set_mode(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Mode::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4FindDelallocRangeFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/7, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4FindDelallocRangeFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4FindDelallocRangeFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4FindDelallocRangeFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_from() const { return at<3>().valid(); }
+  uint32_t from() const { return at<3>().as_uint32(); }
+  bool has_to() const { return at<4>().valid(); }
+  uint32_t to() const { return at<4>().as_uint32(); }
+  bool has_reverse() const { return at<5>().valid(); }
+  int32_t reverse() const { return at<5>().as_int32(); }
+  bool has_found() const { return at<6>().valid(); }
+  int32_t found() const { return at<6>().as_int32(); }
+  bool has_found_blk() const { return at<7>().valid(); }
+  uint32_t found_blk() const { return at<7>().as_uint32(); }
+};
+
+class Ext4FindDelallocRangeFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4FindDelallocRangeFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kFromFieldNumber = 3,
+    kToFieldNumber = 4,
+    kReverseFieldNumber = 5,
+    kFoundFieldNumber = 6,
+    kFoundBlkFieldNumber = 7,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4FindDelallocRangeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4FindDelallocRangeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_From =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4FindDelallocRangeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_From kFrom() { return {}; }
+  void set_from(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_From::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_To =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4FindDelallocRangeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_To kTo() { return {}; }
+  void set_to(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_To::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Reverse =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4FindDelallocRangeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Reverse kReverse() { return {}; }
+  void set_reverse(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Reverse::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Found =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4FindDelallocRangeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Found kFound() { return {}; }
+  void set_found(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Found::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FoundBlk =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4FindDelallocRangeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FoundBlk kFoundBlk() { return {}; }
+  void set_found_blk(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_FoundBlk::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4FallocateExitFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4FallocateExitFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4FallocateExitFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4FallocateExitFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_pos() const { return at<3>().valid(); }
+  int64_t pos() const { return at<3>().as_int64(); }
+  bool has_blocks() const { return at<4>().valid(); }
+  uint32_t blocks() const { return at<4>().as_uint32(); }
+  bool has_ret() const { return at<5>().valid(); }
+  int32_t ret() const { return at<5>().as_int32(); }
+};
+
+class Ext4FallocateExitFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4FallocateExitFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kPosFieldNumber = 3,
+    kBlocksFieldNumber = 4,
+    kRetFieldNumber = 5,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4FallocateExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4FallocateExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pos =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      Ext4FallocateExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pos kPos() { return {}; }
+  void set_pos(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pos::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Blocks =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4FallocateExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Blocks kBlocks() { return {}; }
+  void set_blocks(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Blocks::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ret =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4FallocateExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ret kRet() { return {}; }
+  void set_ret(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ret::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4FallocateEnterFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/6, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4FallocateEnterFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4FallocateEnterFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4FallocateEnterFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_offset() const { return at<3>().valid(); }
+  int64_t offset() const { return at<3>().as_int64(); }
+  bool has_len() const { return at<4>().valid(); }
+  int64_t len() const { return at<4>().as_int64(); }
+  bool has_mode() const { return at<5>().valid(); }
+  int32_t mode() const { return at<5>().as_int32(); }
+  bool has_pos() const { return at<6>().valid(); }
+  int64_t pos() const { return at<6>().as_int64(); }
+};
+
+class Ext4FallocateEnterFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4FallocateEnterFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kOffsetFieldNumber = 3,
+    kLenFieldNumber = 4,
+    kModeFieldNumber = 5,
+    kPosFieldNumber = 6,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4FallocateEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4FallocateEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Offset =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      Ext4FallocateEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Offset kOffset() { return {}; }
+  void set_offset(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Offset::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      Ext4FallocateEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Mode =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4FallocateEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Mode kMode() { return {}; }
+  void set_mode(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Mode::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pos =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      Ext4FallocateEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pos kPos() { return {}; }
+  void set_pos(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pos::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4ExtShowExtentFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4ExtShowExtentFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4ExtShowExtentFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4ExtShowExtentFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_pblk() const { return at<3>().valid(); }
+  uint64_t pblk() const { return at<3>().as_uint64(); }
+  bool has_lblk() const { return at<4>().valid(); }
+  uint32_t lblk() const { return at<4>().as_uint32(); }
+  bool has_len() const { return at<5>().valid(); }
+  uint32_t len() const { return at<5>().as_uint32(); }
+};
+
+class Ext4ExtShowExtentFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4ExtShowExtentFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kPblkFieldNumber = 3,
+    kLblkFieldNumber = 4,
+    kLenFieldNumber = 5,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4ExtShowExtentFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4ExtShowExtentFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pblk =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4ExtShowExtentFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pblk kPblk() { return {}; }
+  void set_pblk(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pblk::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Lblk =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4ExtShowExtentFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Lblk kLblk() { return {}; }
+  void set_lblk(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Lblk::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4ExtShowExtentFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4ExtRmLeafFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/7, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4ExtRmLeafFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4ExtRmLeafFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4ExtRmLeafFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_partial() const { return at<3>().valid(); }
+  int64_t partial() const { return at<3>().as_int64(); }
+  bool has_start() const { return at<4>().valid(); }
+  uint32_t start() const { return at<4>().as_uint32(); }
+  bool has_ee_lblk() const { return at<5>().valid(); }
+  uint32_t ee_lblk() const { return at<5>().as_uint32(); }
+  bool has_ee_pblk() const { return at<6>().valid(); }
+  uint64_t ee_pblk() const { return at<6>().as_uint64(); }
+  bool has_ee_len() const { return at<7>().valid(); }
+  int32_t ee_len() const { return at<7>().as_int32(); }
+};
+
+class Ext4ExtRmLeafFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4ExtRmLeafFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kPartialFieldNumber = 3,
+    kStartFieldNumber = 4,
+    kEeLblkFieldNumber = 5,
+    kEePblkFieldNumber = 6,
+    kEeLenFieldNumber = 7,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4ExtRmLeafFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4ExtRmLeafFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Partial =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      Ext4ExtRmLeafFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Partial kPartial() { return {}; }
+  void set_partial(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Partial::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Start =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4ExtRmLeafFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Start kStart() { return {}; }
+  void set_start(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Start::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_EeLblk =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4ExtRmLeafFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_EeLblk kEeLblk() { return {}; }
+  void set_ee_lblk(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_EeLblk::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_EePblk =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4ExtRmLeafFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_EePblk kEePblk() { return {}; }
+  void set_ee_pblk(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_EePblk::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_EeLen =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4ExtRmLeafFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_EeLen kEeLen() { return {}; }
+  void set_ee_len(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_EeLen::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4ExtRmIdxFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4ExtRmIdxFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4ExtRmIdxFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4ExtRmIdxFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_pblk() const { return at<3>().valid(); }
+  uint64_t pblk() const { return at<3>().as_uint64(); }
+};
+
+class Ext4ExtRmIdxFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4ExtRmIdxFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kPblkFieldNumber = 3,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4ExtRmIdxFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4ExtRmIdxFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pblk =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4ExtRmIdxFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pblk kPblk() { return {}; }
+  void set_pblk(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pblk::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4ExtRemoveSpaceDoneFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/7, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4ExtRemoveSpaceDoneFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4ExtRemoveSpaceDoneFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4ExtRemoveSpaceDoneFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_start() const { return at<3>().valid(); }
+  uint32_t start() const { return at<3>().as_uint32(); }
+  bool has_end() const { return at<4>().valid(); }
+  uint32_t end() const { return at<4>().as_uint32(); }
+  bool has_depth() const { return at<5>().valid(); }
+  int32_t depth() const { return at<5>().as_int32(); }
+  bool has_partial() const { return at<6>().valid(); }
+  int64_t partial() const { return at<6>().as_int64(); }
+  bool has_eh_entries() const { return at<7>().valid(); }
+  uint32_t eh_entries() const { return at<7>().as_uint32(); }
+};
+
+class Ext4ExtRemoveSpaceDoneFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4ExtRemoveSpaceDoneFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kStartFieldNumber = 3,
+    kEndFieldNumber = 4,
+    kDepthFieldNumber = 5,
+    kPartialFieldNumber = 6,
+    kEhEntriesFieldNumber = 7,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4ExtRemoveSpaceDoneFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4ExtRemoveSpaceDoneFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Start =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4ExtRemoveSpaceDoneFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Start kStart() { return {}; }
+  void set_start(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Start::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_End =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4ExtRemoveSpaceDoneFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_End kEnd() { return {}; }
+  void set_end(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_End::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Depth =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4ExtRemoveSpaceDoneFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Depth kDepth() { return {}; }
+  void set_depth(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Depth::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Partial =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      Ext4ExtRemoveSpaceDoneFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Partial kPartial() { return {}; }
+  void set_partial(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Partial::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_EhEntries =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4ExtRemoveSpaceDoneFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_EhEntries kEhEntries() { return {}; }
+  void set_eh_entries(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_EhEntries::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4ExtRemoveSpaceFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4ExtRemoveSpaceFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4ExtRemoveSpaceFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4ExtRemoveSpaceFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_start() const { return at<3>().valid(); }
+  uint32_t start() const { return at<3>().as_uint32(); }
+  bool has_end() const { return at<4>().valid(); }
+  uint32_t end() const { return at<4>().as_uint32(); }
+  bool has_depth() const { return at<5>().valid(); }
+  int32_t depth() const { return at<5>().as_int32(); }
+};
+
+class Ext4ExtRemoveSpaceFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4ExtRemoveSpaceFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kStartFieldNumber = 3,
+    kEndFieldNumber = 4,
+    kDepthFieldNumber = 5,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4ExtRemoveSpaceFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4ExtRemoveSpaceFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Start =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4ExtRemoveSpaceFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Start kStart() { return {}; }
+  void set_start(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Start::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_End =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4ExtRemoveSpaceFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_End kEnd() { return {}; }
+  void set_end(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_End::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Depth =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4ExtRemoveSpaceFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Depth kDepth() { return {}; }
+  void set_depth(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Depth::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4ExtPutInCacheFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4ExtPutInCacheFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4ExtPutInCacheFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4ExtPutInCacheFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_lblk() const { return at<3>().valid(); }
+  uint32_t lblk() const { return at<3>().as_uint32(); }
+  bool has_len() const { return at<4>().valid(); }
+  uint32_t len() const { return at<4>().as_uint32(); }
+  bool has_start() const { return at<5>().valid(); }
+  uint64_t start() const { return at<5>().as_uint64(); }
+};
+
+class Ext4ExtPutInCacheFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4ExtPutInCacheFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kLblkFieldNumber = 3,
+    kLenFieldNumber = 4,
+    kStartFieldNumber = 5,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4ExtPutInCacheFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4ExtPutInCacheFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Lblk =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4ExtPutInCacheFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Lblk kLblk() { return {}; }
+  void set_lblk(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Lblk::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4ExtPutInCacheFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Start =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4ExtPutInCacheFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Start kStart() { return {}; }
+  void set_start(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Start::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4ExtMapBlocksExitFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/8, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4ExtMapBlocksExitFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4ExtMapBlocksExitFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4ExtMapBlocksExitFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_flags() const { return at<3>().valid(); }
+  uint32_t flags() const { return at<3>().as_uint32(); }
+  bool has_pblk() const { return at<4>().valid(); }
+  uint64_t pblk() const { return at<4>().as_uint64(); }
+  bool has_lblk() const { return at<5>().valid(); }
+  uint32_t lblk() const { return at<5>().as_uint32(); }
+  bool has_len() const { return at<6>().valid(); }
+  uint32_t len() const { return at<6>().as_uint32(); }
+  bool has_mflags() const { return at<7>().valid(); }
+  uint32_t mflags() const { return at<7>().as_uint32(); }
+  bool has_ret() const { return at<8>().valid(); }
+  int32_t ret() const { return at<8>().as_int32(); }
+};
+
+class Ext4ExtMapBlocksExitFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4ExtMapBlocksExitFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kFlagsFieldNumber = 3,
+    kPblkFieldNumber = 4,
+    kLblkFieldNumber = 5,
+    kLenFieldNumber = 6,
+    kMflagsFieldNumber = 7,
+    kRetFieldNumber = 8,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4ExtMapBlocksExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4ExtMapBlocksExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Flags =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4ExtMapBlocksExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Flags kFlags() { return {}; }
+  void set_flags(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Flags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pblk =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4ExtMapBlocksExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pblk kPblk() { return {}; }
+  void set_pblk(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pblk::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Lblk =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4ExtMapBlocksExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Lblk kLblk() { return {}; }
+  void set_lblk(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Lblk::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4ExtMapBlocksExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Mflags =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4ExtMapBlocksExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Mflags kMflags() { return {}; }
+  void set_mflags(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Mflags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ret =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4ExtMapBlocksExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ret kRet() { return {}; }
+  void set_ret(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ret::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4ExtMapBlocksEnterFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4ExtMapBlocksEnterFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4ExtMapBlocksEnterFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4ExtMapBlocksEnterFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_lblk() const { return at<3>().valid(); }
+  uint32_t lblk() const { return at<3>().as_uint32(); }
+  bool has_len() const { return at<4>().valid(); }
+  uint32_t len() const { return at<4>().as_uint32(); }
+  bool has_flags() const { return at<5>().valid(); }
+  uint32_t flags() const { return at<5>().as_uint32(); }
+};
+
+class Ext4ExtMapBlocksEnterFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4ExtMapBlocksEnterFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kLblkFieldNumber = 3,
+    kLenFieldNumber = 4,
+    kFlagsFieldNumber = 5,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4ExtMapBlocksEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4ExtMapBlocksEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Lblk =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4ExtMapBlocksEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Lblk kLblk() { return {}; }
+  void set_lblk(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Lblk::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4ExtMapBlocksEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Flags =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4ExtMapBlocksEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Flags kFlags() { return {}; }
+  void set_flags(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Flags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4ExtLoadExtentFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4ExtLoadExtentFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4ExtLoadExtentFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4ExtLoadExtentFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_pblk() const { return at<3>().valid(); }
+  uint64_t pblk() const { return at<3>().as_uint64(); }
+  bool has_lblk() const { return at<4>().valid(); }
+  uint32_t lblk() const { return at<4>().as_uint32(); }
+};
+
+class Ext4ExtLoadExtentFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4ExtLoadExtentFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kPblkFieldNumber = 3,
+    kLblkFieldNumber = 4,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4ExtLoadExtentFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4ExtLoadExtentFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pblk =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4ExtLoadExtentFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pblk kPblk() { return {}; }
+  void set_pblk(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pblk::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Lblk =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4ExtLoadExtentFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Lblk kLblk() { return {}; }
+  void set_lblk(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Lblk::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4ExtInCacheFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4ExtInCacheFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4ExtInCacheFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4ExtInCacheFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_lblk() const { return at<3>().valid(); }
+  uint32_t lblk() const { return at<3>().as_uint32(); }
+  bool has_ret() const { return at<4>().valid(); }
+  int32_t ret() const { return at<4>().as_int32(); }
+};
+
+class Ext4ExtInCacheFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4ExtInCacheFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kLblkFieldNumber = 3,
+    kRetFieldNumber = 4,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4ExtInCacheFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4ExtInCacheFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Lblk =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4ExtInCacheFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Lblk kLblk() { return {}; }
+  void set_lblk(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Lblk::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ret =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4ExtInCacheFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ret kRet() { return {}; }
+  void set_ret(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ret::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4ExtHandleUnwrittenExtentsFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/8, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4ExtHandleUnwrittenExtentsFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4ExtHandleUnwrittenExtentsFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4ExtHandleUnwrittenExtentsFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_flags() const { return at<3>().valid(); }
+  int32_t flags() const { return at<3>().as_int32(); }
+  bool has_lblk() const { return at<4>().valid(); }
+  uint32_t lblk() const { return at<4>().as_uint32(); }
+  bool has_pblk() const { return at<5>().valid(); }
+  uint64_t pblk() const { return at<5>().as_uint64(); }
+  bool has_len() const { return at<6>().valid(); }
+  uint32_t len() const { return at<6>().as_uint32(); }
+  bool has_allocated() const { return at<7>().valid(); }
+  uint32_t allocated() const { return at<7>().as_uint32(); }
+  bool has_newblk() const { return at<8>().valid(); }
+  uint64_t newblk() const { return at<8>().as_uint64(); }
+};
+
+class Ext4ExtHandleUnwrittenExtentsFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4ExtHandleUnwrittenExtentsFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kFlagsFieldNumber = 3,
+    kLblkFieldNumber = 4,
+    kPblkFieldNumber = 5,
+    kLenFieldNumber = 6,
+    kAllocatedFieldNumber = 7,
+    kNewblkFieldNumber = 8,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4ExtHandleUnwrittenExtentsFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4ExtHandleUnwrittenExtentsFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Flags =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4ExtHandleUnwrittenExtentsFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Flags kFlags() { return {}; }
+  void set_flags(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Flags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Lblk =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4ExtHandleUnwrittenExtentsFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Lblk kLblk() { return {}; }
+  void set_lblk(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Lblk::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pblk =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4ExtHandleUnwrittenExtentsFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pblk kPblk() { return {}; }
+  void set_pblk(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pblk::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4ExtHandleUnwrittenExtentsFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Allocated =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4ExtHandleUnwrittenExtentsFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Allocated kAllocated() { return {}; }
+  void set_allocated(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Allocated::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Newblk =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4ExtHandleUnwrittenExtentsFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Newblk kNewblk() { return {}; }
+  void set_newblk(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Newblk::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4ExtConvertToInitializedFastpathFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/10, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4ExtConvertToInitializedFastpathFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4ExtConvertToInitializedFastpathFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4ExtConvertToInitializedFastpathFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_m_lblk() const { return at<3>().valid(); }
+  uint32_t m_lblk() const { return at<3>().as_uint32(); }
+  bool has_m_len() const { return at<4>().valid(); }
+  uint32_t m_len() const { return at<4>().as_uint32(); }
+  bool has_u_lblk() const { return at<5>().valid(); }
+  uint32_t u_lblk() const { return at<5>().as_uint32(); }
+  bool has_u_len() const { return at<6>().valid(); }
+  uint32_t u_len() const { return at<6>().as_uint32(); }
+  bool has_u_pblk() const { return at<7>().valid(); }
+  uint64_t u_pblk() const { return at<7>().as_uint64(); }
+  bool has_i_lblk() const { return at<8>().valid(); }
+  uint32_t i_lblk() const { return at<8>().as_uint32(); }
+  bool has_i_len() const { return at<9>().valid(); }
+  uint32_t i_len() const { return at<9>().as_uint32(); }
+  bool has_i_pblk() const { return at<10>().valid(); }
+  uint64_t i_pblk() const { return at<10>().as_uint64(); }
+};
+
+class Ext4ExtConvertToInitializedFastpathFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4ExtConvertToInitializedFastpathFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kMLblkFieldNumber = 3,
+    kMLenFieldNumber = 4,
+    kULblkFieldNumber = 5,
+    kULenFieldNumber = 6,
+    kUPblkFieldNumber = 7,
+    kILblkFieldNumber = 8,
+    kILenFieldNumber = 9,
+    kIPblkFieldNumber = 10,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4ExtConvertToInitializedFastpathFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4ExtConvertToInitializedFastpathFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_MLblk =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4ExtConvertToInitializedFastpathFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MLblk kMLblk() { return {}; }
+  void set_m_lblk(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_MLblk::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_MLen =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4ExtConvertToInitializedFastpathFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MLen kMLen() { return {}; }
+  void set_m_len(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_MLen::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ULblk =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4ExtConvertToInitializedFastpathFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ULblk kULblk() { return {}; }
+  void set_u_lblk(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ULblk::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ULen =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4ExtConvertToInitializedFastpathFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ULen kULen() { return {}; }
+  void set_u_len(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ULen::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_UPblk =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4ExtConvertToInitializedFastpathFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_UPblk kUPblk() { return {}; }
+  void set_u_pblk(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_UPblk::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ILblk =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4ExtConvertToInitializedFastpathFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ILblk kILblk() { return {}; }
+  void set_i_lblk(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ILblk::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ILen =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4ExtConvertToInitializedFastpathFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ILen kILen() { return {}; }
+  void set_i_len(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ILen::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_IPblk =
+    ::protozero::proto_utils::FieldMetadata<
+      10,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4ExtConvertToInitializedFastpathFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IPblk kIPblk() { return {}; }
+  void set_i_pblk(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_IPblk::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4ExtConvertToInitializedEnterFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/7, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4ExtConvertToInitializedEnterFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4ExtConvertToInitializedEnterFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4ExtConvertToInitializedEnterFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_m_lblk() const { return at<3>().valid(); }
+  uint32_t m_lblk() const { return at<3>().as_uint32(); }
+  bool has_m_len() const { return at<4>().valid(); }
+  uint32_t m_len() const { return at<4>().as_uint32(); }
+  bool has_u_lblk() const { return at<5>().valid(); }
+  uint32_t u_lblk() const { return at<5>().as_uint32(); }
+  bool has_u_len() const { return at<6>().valid(); }
+  uint32_t u_len() const { return at<6>().as_uint32(); }
+  bool has_u_pblk() const { return at<7>().valid(); }
+  uint64_t u_pblk() const { return at<7>().as_uint64(); }
+};
+
+class Ext4ExtConvertToInitializedEnterFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4ExtConvertToInitializedEnterFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kMLblkFieldNumber = 3,
+    kMLenFieldNumber = 4,
+    kULblkFieldNumber = 5,
+    kULenFieldNumber = 6,
+    kUPblkFieldNumber = 7,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4ExtConvertToInitializedEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4ExtConvertToInitializedEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_MLblk =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4ExtConvertToInitializedEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MLblk kMLblk() { return {}; }
+  void set_m_lblk(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_MLblk::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_MLen =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4ExtConvertToInitializedEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MLen kMLen() { return {}; }
+  void set_m_len(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_MLen::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ULblk =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4ExtConvertToInitializedEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ULblk kULblk() { return {}; }
+  void set_u_lblk(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ULblk::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ULen =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4ExtConvertToInitializedEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ULen kULen() { return {}; }
+  void set_u_len(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ULen::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_UPblk =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4ExtConvertToInitializedEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_UPblk kUPblk() { return {}; }
+  void set_u_pblk(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_UPblk::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4EvictInodeFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4EvictInodeFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4EvictInodeFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4EvictInodeFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_nlink() const { return at<3>().valid(); }
+  int32_t nlink() const { return at<3>().as_int32(); }
+};
+
+class Ext4EvictInodeFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4EvictInodeFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kNlinkFieldNumber = 3,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4EvictInodeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4EvictInodeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Nlink =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4EvictInodeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Nlink kNlink() { return {}; }
+  void set_nlink(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Nlink::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4EsShrinkScanExitFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4EsShrinkScanExitFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4EsShrinkScanExitFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4EsShrinkScanExitFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_nr_shrunk() const { return at<2>().valid(); }
+  int32_t nr_shrunk() const { return at<2>().as_int32(); }
+  bool has_cache_cnt() const { return at<3>().valid(); }
+  int32_t cache_cnt() const { return at<3>().as_int32(); }
+};
+
+class Ext4EsShrinkScanExitFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4EsShrinkScanExitFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kNrShrunkFieldNumber = 2,
+    kCacheCntFieldNumber = 3,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4EsShrinkScanExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NrShrunk =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4EsShrinkScanExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NrShrunk kNrShrunk() { return {}; }
+  void set_nr_shrunk(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NrShrunk::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CacheCnt =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4EsShrinkScanExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CacheCnt kCacheCnt() { return {}; }
+  void set_cache_cnt(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CacheCnt::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4EsShrinkScanEnterFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4EsShrinkScanEnterFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4EsShrinkScanEnterFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4EsShrinkScanEnterFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_nr_to_scan() const { return at<2>().valid(); }
+  int32_t nr_to_scan() const { return at<2>().as_int32(); }
+  bool has_cache_cnt() const { return at<3>().valid(); }
+  int32_t cache_cnt() const { return at<3>().as_int32(); }
+};
+
+class Ext4EsShrinkScanEnterFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4EsShrinkScanEnterFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kNrToScanFieldNumber = 2,
+    kCacheCntFieldNumber = 3,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4EsShrinkScanEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NrToScan =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4EsShrinkScanEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NrToScan kNrToScan() { return {}; }
+  void set_nr_to_scan(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NrToScan::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CacheCnt =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4EsShrinkScanEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CacheCnt kCacheCnt() { return {}; }
+  void set_cache_cnt(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CacheCnt::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4EsShrinkCountFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4EsShrinkCountFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4EsShrinkCountFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4EsShrinkCountFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_nr_to_scan() const { return at<2>().valid(); }
+  int32_t nr_to_scan() const { return at<2>().as_int32(); }
+  bool has_cache_cnt() const { return at<3>().valid(); }
+  int32_t cache_cnt() const { return at<3>().as_int32(); }
+};
+
+class Ext4EsShrinkCountFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4EsShrinkCountFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kNrToScanFieldNumber = 2,
+    kCacheCntFieldNumber = 3,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4EsShrinkCountFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NrToScan =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4EsShrinkCountFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NrToScan kNrToScan() { return {}; }
+  void set_nr_to_scan(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NrToScan::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CacheCnt =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4EsShrinkCountFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CacheCnt kCacheCnt() { return {}; }
+  void set_cache_cnt(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CacheCnt::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4EsShrinkFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4EsShrinkFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4EsShrinkFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4EsShrinkFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_nr_shrunk() const { return at<2>().valid(); }
+  int32_t nr_shrunk() const { return at<2>().as_int32(); }
+  bool has_scan_time() const { return at<3>().valid(); }
+  uint64_t scan_time() const { return at<3>().as_uint64(); }
+  bool has_nr_skipped() const { return at<4>().valid(); }
+  int32_t nr_skipped() const { return at<4>().as_int32(); }
+  bool has_retried() const { return at<5>().valid(); }
+  int32_t retried() const { return at<5>().as_int32(); }
+};
+
+class Ext4EsShrinkFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4EsShrinkFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kNrShrunkFieldNumber = 2,
+    kScanTimeFieldNumber = 3,
+    kNrSkippedFieldNumber = 4,
+    kRetriedFieldNumber = 5,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4EsShrinkFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NrShrunk =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4EsShrinkFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NrShrunk kNrShrunk() { return {}; }
+  void set_nr_shrunk(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NrShrunk::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ScanTime =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4EsShrinkFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ScanTime kScanTime() { return {}; }
+  void set_scan_time(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ScanTime::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NrSkipped =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4EsShrinkFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NrSkipped kNrSkipped() { return {}; }
+  void set_nr_skipped(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NrSkipped::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Retried =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4EsShrinkFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Retried kRetried() { return {}; }
+  void set_retried(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Retried::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4EsRemoveExtentFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4EsRemoveExtentFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4EsRemoveExtentFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4EsRemoveExtentFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_lblk() const { return at<3>().valid(); }
+  int64_t lblk() const { return at<3>().as_int64(); }
+  bool has_len() const { return at<4>().valid(); }
+  int64_t len() const { return at<4>().as_int64(); }
+};
+
+class Ext4EsRemoveExtentFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4EsRemoveExtentFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kLblkFieldNumber = 3,
+    kLenFieldNumber = 4,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4EsRemoveExtentFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4EsRemoveExtentFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Lblk =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      Ext4EsRemoveExtentFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Lblk kLblk() { return {}; }
+  void set_lblk(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Lblk::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      Ext4EsRemoveExtentFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4EsLookupExtentExitFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/7, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4EsLookupExtentExitFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4EsLookupExtentExitFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4EsLookupExtentExitFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_lblk() const { return at<3>().valid(); }
+  uint32_t lblk() const { return at<3>().as_uint32(); }
+  bool has_len() const { return at<4>().valid(); }
+  uint32_t len() const { return at<4>().as_uint32(); }
+  bool has_pblk() const { return at<5>().valid(); }
+  uint64_t pblk() const { return at<5>().as_uint64(); }
+  bool has_status() const { return at<6>().valid(); }
+  uint64_t status() const { return at<6>().as_uint64(); }
+  bool has_found() const { return at<7>().valid(); }
+  int32_t found() const { return at<7>().as_int32(); }
+};
+
+class Ext4EsLookupExtentExitFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4EsLookupExtentExitFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kLblkFieldNumber = 3,
+    kLenFieldNumber = 4,
+    kPblkFieldNumber = 5,
+    kStatusFieldNumber = 6,
+    kFoundFieldNumber = 7,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4EsLookupExtentExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4EsLookupExtentExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Lblk =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4EsLookupExtentExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Lblk kLblk() { return {}; }
+  void set_lblk(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Lblk::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4EsLookupExtentExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pblk =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4EsLookupExtentExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pblk kPblk() { return {}; }
+  void set_pblk(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pblk::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Status =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4EsLookupExtentExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Status kStatus() { return {}; }
+  void set_status(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Status::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Found =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4EsLookupExtentExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Found kFound() { return {}; }
+  void set_found(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Found::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4EsLookupExtentEnterFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4EsLookupExtentEnterFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4EsLookupExtentEnterFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4EsLookupExtentEnterFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_lblk() const { return at<3>().valid(); }
+  uint32_t lblk() const { return at<3>().as_uint32(); }
+};
+
+class Ext4EsLookupExtentEnterFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4EsLookupExtentEnterFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kLblkFieldNumber = 3,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4EsLookupExtentEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4EsLookupExtentEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Lblk =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4EsLookupExtentEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Lblk kLblk() { return {}; }
+  void set_lblk(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Lblk::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4EsInsertExtentFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/6, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4EsInsertExtentFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4EsInsertExtentFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4EsInsertExtentFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_lblk() const { return at<3>().valid(); }
+  uint32_t lblk() const { return at<3>().as_uint32(); }
+  bool has_len() const { return at<4>().valid(); }
+  uint32_t len() const { return at<4>().as_uint32(); }
+  bool has_pblk() const { return at<5>().valid(); }
+  uint64_t pblk() const { return at<5>().as_uint64(); }
+  bool has_status() const { return at<6>().valid(); }
+  uint64_t status() const { return at<6>().as_uint64(); }
+};
+
+class Ext4EsInsertExtentFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4EsInsertExtentFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kLblkFieldNumber = 3,
+    kLenFieldNumber = 4,
+    kPblkFieldNumber = 5,
+    kStatusFieldNumber = 6,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4EsInsertExtentFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4EsInsertExtentFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Lblk =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4EsInsertExtentFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Lblk kLblk() { return {}; }
+  void set_lblk(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Lblk::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4EsInsertExtentFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pblk =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4EsInsertExtentFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pblk kPblk() { return {}; }
+  void set_pblk(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pblk::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Status =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4EsInsertExtentFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Status kStatus() { return {}; }
+  void set_status(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Status::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4EsFindDelayedExtentRangeExitFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/6, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4EsFindDelayedExtentRangeExitFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4EsFindDelayedExtentRangeExitFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4EsFindDelayedExtentRangeExitFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_lblk() const { return at<3>().valid(); }
+  uint32_t lblk() const { return at<3>().as_uint32(); }
+  bool has_len() const { return at<4>().valid(); }
+  uint32_t len() const { return at<4>().as_uint32(); }
+  bool has_pblk() const { return at<5>().valid(); }
+  uint64_t pblk() const { return at<5>().as_uint64(); }
+  bool has_status() const { return at<6>().valid(); }
+  uint64_t status() const { return at<6>().as_uint64(); }
+};
+
+class Ext4EsFindDelayedExtentRangeExitFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4EsFindDelayedExtentRangeExitFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kLblkFieldNumber = 3,
+    kLenFieldNumber = 4,
+    kPblkFieldNumber = 5,
+    kStatusFieldNumber = 6,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4EsFindDelayedExtentRangeExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4EsFindDelayedExtentRangeExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Lblk =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4EsFindDelayedExtentRangeExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Lblk kLblk() { return {}; }
+  void set_lblk(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Lblk::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4EsFindDelayedExtentRangeExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pblk =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4EsFindDelayedExtentRangeExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pblk kPblk() { return {}; }
+  void set_pblk(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pblk::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Status =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4EsFindDelayedExtentRangeExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Status kStatus() { return {}; }
+  void set_status(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Status::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4EsFindDelayedExtentRangeEnterFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4EsFindDelayedExtentRangeEnterFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4EsFindDelayedExtentRangeEnterFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4EsFindDelayedExtentRangeEnterFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_lblk() const { return at<3>().valid(); }
+  uint32_t lblk() const { return at<3>().as_uint32(); }
+};
+
+class Ext4EsFindDelayedExtentRangeEnterFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4EsFindDelayedExtentRangeEnterFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kLblkFieldNumber = 3,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4EsFindDelayedExtentRangeEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4EsFindDelayedExtentRangeEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Lblk =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4EsFindDelayedExtentRangeEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Lblk kLblk() { return {}; }
+  void set_lblk(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Lblk::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4EsCacheExtentFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/6, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4EsCacheExtentFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4EsCacheExtentFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4EsCacheExtentFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_lblk() const { return at<3>().valid(); }
+  uint32_t lblk() const { return at<3>().as_uint32(); }
+  bool has_len() const { return at<4>().valid(); }
+  uint32_t len() const { return at<4>().as_uint32(); }
+  bool has_pblk() const { return at<5>().valid(); }
+  uint64_t pblk() const { return at<5>().as_uint64(); }
+  bool has_status() const { return at<6>().valid(); }
+  uint32_t status() const { return at<6>().as_uint32(); }
+};
+
+class Ext4EsCacheExtentFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4EsCacheExtentFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kLblkFieldNumber = 3,
+    kLenFieldNumber = 4,
+    kPblkFieldNumber = 5,
+    kStatusFieldNumber = 6,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4EsCacheExtentFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4EsCacheExtentFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Lblk =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4EsCacheExtentFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Lblk kLblk() { return {}; }
+  void set_lblk(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Lblk::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4EsCacheExtentFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pblk =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4EsCacheExtentFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pblk kPblk() { return {}; }
+  void set_pblk(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pblk::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Status =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4EsCacheExtentFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Status kStatus() { return {}; }
+  void set_status(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Status::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4DropInodeFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4DropInodeFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4DropInodeFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4DropInodeFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_drop() const { return at<3>().valid(); }
+  int32_t drop() const { return at<3>().as_int32(); }
+};
+
+class Ext4DropInodeFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4DropInodeFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kDropFieldNumber = 3,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4DropInodeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4DropInodeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Drop =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4DropInodeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Drop kDrop() { return {}; }
+  void set_drop(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Drop::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4DiscardPreallocationsFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4DiscardPreallocationsFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4DiscardPreallocationsFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4DiscardPreallocationsFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+};
+
+class Ext4DiscardPreallocationsFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4DiscardPreallocationsFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4DiscardPreallocationsFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4DiscardPreallocationsFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4DiscardBlocksFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4DiscardBlocksFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4DiscardBlocksFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4DiscardBlocksFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_blk() const { return at<2>().valid(); }
+  uint64_t blk() const { return at<2>().as_uint64(); }
+  bool has_count() const { return at<3>().valid(); }
+  uint64_t count() const { return at<3>().as_uint64(); }
+};
+
+class Ext4DiscardBlocksFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4DiscardBlocksFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kBlkFieldNumber = 2,
+    kCountFieldNumber = 3,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4DiscardBlocksFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Blk =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4DiscardBlocksFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Blk kBlk() { return {}; }
+  void set_blk(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Blk::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Count =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4DiscardBlocksFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Count kCount() { return {}; }
+  void set_count(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Count::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4DirectIOExitFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/6, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4DirectIOExitFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4DirectIOExitFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4DirectIOExitFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_pos() const { return at<3>().valid(); }
+  int64_t pos() const { return at<3>().as_int64(); }
+  bool has_len() const { return at<4>().valid(); }
+  uint64_t len() const { return at<4>().as_uint64(); }
+  bool has_rw() const { return at<5>().valid(); }
+  int32_t rw() const { return at<5>().as_int32(); }
+  bool has_ret() const { return at<6>().valid(); }
+  int32_t ret() const { return at<6>().as_int32(); }
+};
+
+class Ext4DirectIOExitFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4DirectIOExitFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kPosFieldNumber = 3,
+    kLenFieldNumber = 4,
+    kRwFieldNumber = 5,
+    kRetFieldNumber = 6,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4DirectIOExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4DirectIOExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pos =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      Ext4DirectIOExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pos kPos() { return {}; }
+  void set_pos(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pos::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4DirectIOExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Rw =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4DirectIOExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Rw kRw() { return {}; }
+  void set_rw(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Rw::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ret =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4DirectIOExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ret kRet() { return {}; }
+  void set_ret(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ret::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4DirectIOEnterFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4DirectIOEnterFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4DirectIOEnterFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4DirectIOEnterFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_pos() const { return at<3>().valid(); }
+  int64_t pos() const { return at<3>().as_int64(); }
+  bool has_len() const { return at<4>().valid(); }
+  uint64_t len() const { return at<4>().as_uint64(); }
+  bool has_rw() const { return at<5>().valid(); }
+  int32_t rw() const { return at<5>().as_int32(); }
+};
+
+class Ext4DirectIOEnterFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4DirectIOEnterFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kPosFieldNumber = 3,
+    kLenFieldNumber = 4,
+    kRwFieldNumber = 5,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4DirectIOEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4DirectIOEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pos =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      Ext4DirectIOEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pos kPos() { return {}; }
+  void set_pos(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pos::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4DirectIOEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Rw =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4DirectIOEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Rw kRw() { return {}; }
+  void set_rw(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Rw::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4DaWritePagesExtentFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4DaWritePagesExtentFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4DaWritePagesExtentFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4DaWritePagesExtentFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_lblk() const { return at<3>().valid(); }
+  uint64_t lblk() const { return at<3>().as_uint64(); }
+  bool has_len() const { return at<4>().valid(); }
+  uint32_t len() const { return at<4>().as_uint32(); }
+  bool has_flags() const { return at<5>().valid(); }
+  uint32_t flags() const { return at<5>().as_uint32(); }
+};
+
+class Ext4DaWritePagesExtentFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4DaWritePagesExtentFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kLblkFieldNumber = 3,
+    kLenFieldNumber = 4,
+    kFlagsFieldNumber = 5,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4DaWritePagesExtentFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4DaWritePagesExtentFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Lblk =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4DaWritePagesExtentFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Lblk kLblk() { return {}; }
+  void set_lblk(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Lblk::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4DaWritePagesExtentFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Flags =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4DaWritePagesExtentFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Flags kFlags() { return {}; }
+  void set_flags(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Flags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4DaWritePagesFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/10, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4DaWritePagesFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4DaWritePagesFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4DaWritePagesFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_first_page() const { return at<3>().valid(); }
+  uint64_t first_page() const { return at<3>().as_uint64(); }
+  bool has_nr_to_write() const { return at<4>().valid(); }
+  int64_t nr_to_write() const { return at<4>().as_int64(); }
+  bool has_sync_mode() const { return at<5>().valid(); }
+  int32_t sync_mode() const { return at<5>().as_int32(); }
+  bool has_b_blocknr() const { return at<6>().valid(); }
+  uint64_t b_blocknr() const { return at<6>().as_uint64(); }
+  bool has_b_size() const { return at<7>().valid(); }
+  uint32_t b_size() const { return at<7>().as_uint32(); }
+  bool has_b_state() const { return at<8>().valid(); }
+  uint32_t b_state() const { return at<8>().as_uint32(); }
+  bool has_io_done() const { return at<9>().valid(); }
+  int32_t io_done() const { return at<9>().as_int32(); }
+  bool has_pages_written() const { return at<10>().valid(); }
+  int32_t pages_written() const { return at<10>().as_int32(); }
+};
+
+class Ext4DaWritePagesFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4DaWritePagesFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kFirstPageFieldNumber = 3,
+    kNrToWriteFieldNumber = 4,
+    kSyncModeFieldNumber = 5,
+    kBBlocknrFieldNumber = 6,
+    kBSizeFieldNumber = 7,
+    kBStateFieldNumber = 8,
+    kIoDoneFieldNumber = 9,
+    kPagesWrittenFieldNumber = 10,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4DaWritePagesFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4DaWritePagesFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FirstPage =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4DaWritePagesFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FirstPage kFirstPage() { return {}; }
+  void set_first_page(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_FirstPage::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NrToWrite =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      Ext4DaWritePagesFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NrToWrite kNrToWrite() { return {}; }
+  void set_nr_to_write(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NrToWrite::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SyncMode =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4DaWritePagesFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SyncMode kSyncMode() { return {}; }
+  void set_sync_mode(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SyncMode::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BBlocknr =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4DaWritePagesFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BBlocknr kBBlocknr() { return {}; }
+  void set_b_blocknr(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_BBlocknr::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BSize =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4DaWritePagesFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BSize kBSize() { return {}; }
+  void set_b_size(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_BSize::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BState =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4DaWritePagesFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BState kBState() { return {}; }
+  void set_b_state(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_BState::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_IoDone =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4DaWritePagesFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IoDone kIoDone() { return {}; }
+  void set_io_done(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_IoDone::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PagesWritten =
+    ::protozero::proto_utils::FieldMetadata<
+      10,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4DaWritePagesFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PagesWritten kPagesWritten() { return {}; }
+  void set_pages_written(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PagesWritten::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4DaUpdateReserveSpaceFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/9, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4DaUpdateReserveSpaceFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4DaUpdateReserveSpaceFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4DaUpdateReserveSpaceFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_i_blocks() const { return at<3>().valid(); }
+  uint64_t i_blocks() const { return at<3>().as_uint64(); }
+  bool has_used_blocks() const { return at<4>().valid(); }
+  int32_t used_blocks() const { return at<4>().as_int32(); }
+  bool has_reserved_data_blocks() const { return at<5>().valid(); }
+  int32_t reserved_data_blocks() const { return at<5>().as_int32(); }
+  bool has_reserved_meta_blocks() const { return at<6>().valid(); }
+  int32_t reserved_meta_blocks() const { return at<6>().as_int32(); }
+  bool has_allocated_meta_blocks() const { return at<7>().valid(); }
+  int32_t allocated_meta_blocks() const { return at<7>().as_int32(); }
+  bool has_quota_claim() const { return at<8>().valid(); }
+  int32_t quota_claim() const { return at<8>().as_int32(); }
+  bool has_mode() const { return at<9>().valid(); }
+  uint32_t mode() const { return at<9>().as_uint32(); }
+};
+
+class Ext4DaUpdateReserveSpaceFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4DaUpdateReserveSpaceFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kIBlocksFieldNumber = 3,
+    kUsedBlocksFieldNumber = 4,
+    kReservedDataBlocksFieldNumber = 5,
+    kReservedMetaBlocksFieldNumber = 6,
+    kAllocatedMetaBlocksFieldNumber = 7,
+    kQuotaClaimFieldNumber = 8,
+    kModeFieldNumber = 9,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4DaUpdateReserveSpaceFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4DaUpdateReserveSpaceFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_IBlocks =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4DaUpdateReserveSpaceFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IBlocks kIBlocks() { return {}; }
+  void set_i_blocks(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_IBlocks::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_UsedBlocks =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4DaUpdateReserveSpaceFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_UsedBlocks kUsedBlocks() { return {}; }
+  void set_used_blocks(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_UsedBlocks::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ReservedDataBlocks =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4DaUpdateReserveSpaceFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ReservedDataBlocks kReservedDataBlocks() { return {}; }
+  void set_reserved_data_blocks(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ReservedDataBlocks::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ReservedMetaBlocks =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4DaUpdateReserveSpaceFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ReservedMetaBlocks kReservedMetaBlocks() { return {}; }
+  void set_reserved_meta_blocks(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ReservedMetaBlocks::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_AllocatedMetaBlocks =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4DaUpdateReserveSpaceFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AllocatedMetaBlocks kAllocatedMetaBlocks() { return {}; }
+  void set_allocated_meta_blocks(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_AllocatedMetaBlocks::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_QuotaClaim =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4DaUpdateReserveSpaceFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_QuotaClaim kQuotaClaim() { return {}; }
+  void set_quota_claim(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_QuotaClaim::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Mode =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4DaUpdateReserveSpaceFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Mode kMode() { return {}; }
+  void set_mode(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Mode::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4DaReserveSpaceFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/7, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4DaReserveSpaceFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4DaReserveSpaceFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4DaReserveSpaceFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_i_blocks() const { return at<3>().valid(); }
+  uint64_t i_blocks() const { return at<3>().as_uint64(); }
+  bool has_reserved_data_blocks() const { return at<4>().valid(); }
+  int32_t reserved_data_blocks() const { return at<4>().as_int32(); }
+  bool has_reserved_meta_blocks() const { return at<5>().valid(); }
+  int32_t reserved_meta_blocks() const { return at<5>().as_int32(); }
+  bool has_mode() const { return at<6>().valid(); }
+  uint32_t mode() const { return at<6>().as_uint32(); }
+  bool has_md_needed() const { return at<7>().valid(); }
+  int32_t md_needed() const { return at<7>().as_int32(); }
+};
+
+class Ext4DaReserveSpaceFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4DaReserveSpaceFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kIBlocksFieldNumber = 3,
+    kReservedDataBlocksFieldNumber = 4,
+    kReservedMetaBlocksFieldNumber = 5,
+    kModeFieldNumber = 6,
+    kMdNeededFieldNumber = 7,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4DaReserveSpaceFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4DaReserveSpaceFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_IBlocks =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4DaReserveSpaceFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IBlocks kIBlocks() { return {}; }
+  void set_i_blocks(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_IBlocks::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ReservedDataBlocks =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4DaReserveSpaceFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ReservedDataBlocks kReservedDataBlocks() { return {}; }
+  void set_reserved_data_blocks(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ReservedDataBlocks::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ReservedMetaBlocks =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4DaReserveSpaceFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ReservedMetaBlocks kReservedMetaBlocks() { return {}; }
+  void set_reserved_meta_blocks(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ReservedMetaBlocks::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Mode =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4DaReserveSpaceFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Mode kMode() { return {}; }
+  void set_mode(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Mode::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_MdNeeded =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4DaReserveSpaceFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MdNeeded kMdNeeded() { return {}; }
+  void set_md_needed(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_MdNeeded::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4DaReleaseSpaceFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/8, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4DaReleaseSpaceFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4DaReleaseSpaceFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4DaReleaseSpaceFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_i_blocks() const { return at<3>().valid(); }
+  uint64_t i_blocks() const { return at<3>().as_uint64(); }
+  bool has_freed_blocks() const { return at<4>().valid(); }
+  int32_t freed_blocks() const { return at<4>().as_int32(); }
+  bool has_reserved_data_blocks() const { return at<5>().valid(); }
+  int32_t reserved_data_blocks() const { return at<5>().as_int32(); }
+  bool has_reserved_meta_blocks() const { return at<6>().valid(); }
+  int32_t reserved_meta_blocks() const { return at<6>().as_int32(); }
+  bool has_allocated_meta_blocks() const { return at<7>().valid(); }
+  int32_t allocated_meta_blocks() const { return at<7>().as_int32(); }
+  bool has_mode() const { return at<8>().valid(); }
+  uint32_t mode() const { return at<8>().as_uint32(); }
+};
+
+class Ext4DaReleaseSpaceFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4DaReleaseSpaceFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kIBlocksFieldNumber = 3,
+    kFreedBlocksFieldNumber = 4,
+    kReservedDataBlocksFieldNumber = 5,
+    kReservedMetaBlocksFieldNumber = 6,
+    kAllocatedMetaBlocksFieldNumber = 7,
+    kModeFieldNumber = 8,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4DaReleaseSpaceFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4DaReleaseSpaceFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_IBlocks =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4DaReleaseSpaceFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IBlocks kIBlocks() { return {}; }
+  void set_i_blocks(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_IBlocks::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FreedBlocks =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4DaReleaseSpaceFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FreedBlocks kFreedBlocks() { return {}; }
+  void set_freed_blocks(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_FreedBlocks::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ReservedDataBlocks =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4DaReleaseSpaceFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ReservedDataBlocks kReservedDataBlocks() { return {}; }
+  void set_reserved_data_blocks(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ReservedDataBlocks::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ReservedMetaBlocks =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4DaReleaseSpaceFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ReservedMetaBlocks kReservedMetaBlocks() { return {}; }
+  void set_reserved_meta_blocks(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ReservedMetaBlocks::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_AllocatedMetaBlocks =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4DaReleaseSpaceFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AllocatedMetaBlocks kAllocatedMetaBlocks() { return {}; }
+  void set_allocated_meta_blocks(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_AllocatedMetaBlocks::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Mode =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4DaReleaseSpaceFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Mode kMode() { return {}; }
+  void set_mode(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Mode::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4CollapseRangeFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4CollapseRangeFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4CollapseRangeFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4CollapseRangeFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_offset() const { return at<3>().valid(); }
+  int64_t offset() const { return at<3>().as_int64(); }
+  bool has_len() const { return at<4>().valid(); }
+  int64_t len() const { return at<4>().as_int64(); }
+};
+
+class Ext4CollapseRangeFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4CollapseRangeFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kOffsetFieldNumber = 3,
+    kLenFieldNumber = 4,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4CollapseRangeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4CollapseRangeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Offset =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      Ext4CollapseRangeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Offset kOffset() { return {}; }
+  void set_offset(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Offset::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      Ext4CollapseRangeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4BeginOrderedTruncateFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4BeginOrderedTruncateFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4BeginOrderedTruncateFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4BeginOrderedTruncateFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_new_size() const { return at<3>().valid(); }
+  int64_t new_size() const { return at<3>().as_int64(); }
+};
+
+class Ext4BeginOrderedTruncateFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4BeginOrderedTruncateFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kNewSizeFieldNumber = 3,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4BeginOrderedTruncateFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4BeginOrderedTruncateFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NewSize =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      Ext4BeginOrderedTruncateFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NewSize kNewSize() { return {}; }
+  void set_new_size(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NewSize::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4AllocateInodeFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4AllocateInodeFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4AllocateInodeFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4AllocateInodeFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_dir() const { return at<3>().valid(); }
+  uint64_t dir() const { return at<3>().as_uint64(); }
+  bool has_mode() const { return at<4>().valid(); }
+  uint32_t mode() const { return at<4>().as_uint32(); }
+};
+
+class Ext4AllocateInodeFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4AllocateInodeFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kDirFieldNumber = 3,
+    kModeFieldNumber = 4,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4AllocateInodeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4AllocateInodeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Dir =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4AllocateInodeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dir kDir() { return {}; }
+  void set_dir(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dir::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Mode =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4AllocateInodeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Mode kMode() { return {}; }
+  void set_mode(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Mode::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4AllocateBlocksFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/11, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4AllocateBlocksFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4AllocateBlocksFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4AllocateBlocksFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_block() const { return at<3>().valid(); }
+  uint64_t block() const { return at<3>().as_uint64(); }
+  bool has_len() const { return at<4>().valid(); }
+  uint32_t len() const { return at<4>().as_uint32(); }
+  bool has_logical() const { return at<5>().valid(); }
+  uint32_t logical() const { return at<5>().as_uint32(); }
+  bool has_lleft() const { return at<6>().valid(); }
+  uint32_t lleft() const { return at<6>().as_uint32(); }
+  bool has_lright() const { return at<7>().valid(); }
+  uint32_t lright() const { return at<7>().as_uint32(); }
+  bool has_goal() const { return at<8>().valid(); }
+  uint64_t goal() const { return at<8>().as_uint64(); }
+  bool has_pleft() const { return at<9>().valid(); }
+  uint64_t pleft() const { return at<9>().as_uint64(); }
+  bool has_pright() const { return at<10>().valid(); }
+  uint64_t pright() const { return at<10>().as_uint64(); }
+  bool has_flags() const { return at<11>().valid(); }
+  uint32_t flags() const { return at<11>().as_uint32(); }
+};
+
+class Ext4AllocateBlocksFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4AllocateBlocksFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kBlockFieldNumber = 3,
+    kLenFieldNumber = 4,
+    kLogicalFieldNumber = 5,
+    kLleftFieldNumber = 6,
+    kLrightFieldNumber = 7,
+    kGoalFieldNumber = 8,
+    kPleftFieldNumber = 9,
+    kPrightFieldNumber = 10,
+    kFlagsFieldNumber = 11,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4AllocateBlocksFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4AllocateBlocksFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Block =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4AllocateBlocksFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Block kBlock() { return {}; }
+  void set_block(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Block::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4AllocateBlocksFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Logical =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4AllocateBlocksFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Logical kLogical() { return {}; }
+  void set_logical(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Logical::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Lleft =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4AllocateBlocksFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Lleft kLleft() { return {}; }
+  void set_lleft(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Lleft::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Lright =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4AllocateBlocksFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Lright kLright() { return {}; }
+  void set_lright(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Lright::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Goal =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4AllocateBlocksFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Goal kGoal() { return {}; }
+  void set_goal(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Goal::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pleft =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4AllocateBlocksFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pleft kPleft() { return {}; }
+  void set_pleft(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pleft::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pright =
+    ::protozero::proto_utils::FieldMetadata<
+      10,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4AllocateBlocksFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pright kPright() { return {}; }
+  void set_pright(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pright::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Flags =
+    ::protozero::proto_utils::FieldMetadata<
+      11,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4AllocateBlocksFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Flags kFlags() { return {}; }
+  void set_flags(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Flags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4AllocDaBlocksFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4AllocDaBlocksFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4AllocDaBlocksFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4AllocDaBlocksFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_data_blocks() const { return at<3>().valid(); }
+  uint32_t data_blocks() const { return at<3>().as_uint32(); }
+  bool has_meta_blocks() const { return at<4>().valid(); }
+  uint32_t meta_blocks() const { return at<4>().as_uint32(); }
+};
+
+class Ext4AllocDaBlocksFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4AllocDaBlocksFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kDataBlocksFieldNumber = 3,
+    kMetaBlocksFieldNumber = 4,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4AllocDaBlocksFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4AllocDaBlocksFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DataBlocks =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4AllocDaBlocksFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DataBlocks kDataBlocks() { return {}; }
+  void set_data_blocks(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DataBlocks::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_MetaBlocks =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4AllocDaBlocksFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MetaBlocks kMetaBlocks() { return {}; }
+  void set_meta_blocks(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_MetaBlocks::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4SyncFileExitFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4SyncFileExitFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4SyncFileExitFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4SyncFileExitFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_ret() const { return at<3>().valid(); }
+  int32_t ret() const { return at<3>().as_int32(); }
+};
+
+class Ext4SyncFileExitFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4SyncFileExitFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kRetFieldNumber = 3,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4SyncFileExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4SyncFileExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ret =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4SyncFileExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ret kRet() { return {}; }
+  void set_ret(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ret::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4SyncFileEnterFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4SyncFileEnterFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4SyncFileEnterFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4SyncFileEnterFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_parent() const { return at<3>().valid(); }
+  uint64_t parent() const { return at<3>().as_uint64(); }
+  bool has_datasync() const { return at<4>().valid(); }
+  int32_t datasync() const { return at<4>().as_int32(); }
+};
+
+class Ext4SyncFileEnterFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4SyncFileEnterFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kParentFieldNumber = 3,
+    kDatasyncFieldNumber = 4,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4SyncFileEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4SyncFileEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Parent =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4SyncFileEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Parent kParent() { return {}; }
+  void set_parent(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Parent::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Datasync =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      Ext4SyncFileEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Datasync kDatasync() { return {}; }
+  void set_datasync(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Datasync::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4DaWriteEndFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4DaWriteEndFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4DaWriteEndFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4DaWriteEndFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_pos() const { return at<3>().valid(); }
+  int64_t pos() const { return at<3>().as_int64(); }
+  bool has_len() const { return at<4>().valid(); }
+  uint32_t len() const { return at<4>().as_uint32(); }
+  bool has_copied() const { return at<5>().valid(); }
+  uint32_t copied() const { return at<5>().as_uint32(); }
+};
+
+class Ext4DaWriteEndFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4DaWriteEndFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kPosFieldNumber = 3,
+    kLenFieldNumber = 4,
+    kCopiedFieldNumber = 5,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4DaWriteEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4DaWriteEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pos =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      Ext4DaWriteEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pos kPos() { return {}; }
+  void set_pos(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pos::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4DaWriteEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Copied =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4DaWriteEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Copied kCopied() { return {}; }
+  void set_copied(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Copied::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Ext4DaWriteBeginFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Ext4DaWriteBeginFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Ext4DaWriteBeginFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Ext4DaWriteBeginFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_pos() const { return at<3>().valid(); }
+  int64_t pos() const { return at<3>().as_int64(); }
+  bool has_len() const { return at<4>().valid(); }
+  uint32_t len() const { return at<4>().as_uint32(); }
+  bool has_flags() const { return at<5>().valid(); }
+  uint32_t flags() const { return at<5>().as_uint32(); }
+};
+
+class Ext4DaWriteBeginFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = Ext4DaWriteBeginFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kPosFieldNumber = 3,
+    kLenFieldNumber = 4,
+    kFlagsFieldNumber = 5,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4DaWriteBeginFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Ext4DaWriteBeginFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pos =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      Ext4DaWriteBeginFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pos kPos() { return {}; }
+  void set_pos(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pos::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4DaWriteBeginFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Flags =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Ext4DaWriteBeginFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Flags kFlags() { return {}; }
+  void set_flags(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Flags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/ftrace/f2fs.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_F2FS_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_F2FS_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class F2fsWriteEndFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  F2fsWriteEndFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit F2fsWriteEndFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit F2fsWriteEndFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_pos() const { return at<3>().valid(); }
+  int64_t pos() const { return at<3>().as_int64(); }
+  bool has_len() const { return at<4>().valid(); }
+  uint32_t len() const { return at<4>().as_uint32(); }
+  bool has_copied() const { return at<5>().valid(); }
+  uint32_t copied() const { return at<5>().as_uint32(); }
+};
+
+class F2fsWriteEndFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = F2fsWriteEndFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kPosFieldNumber = 3,
+    kLenFieldNumber = 4,
+    kCopiedFieldNumber = 5,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsWriteEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsWriteEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pos =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      F2fsWriteEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pos kPos() { return {}; }
+  void set_pos(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pos::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      F2fsWriteEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Copied =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      F2fsWriteEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Copied kCopied() { return {}; }
+  void set_copied(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Copied::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class F2fsWriteCheckpointFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  F2fsWriteCheckpointFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit F2fsWriteCheckpointFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit F2fsWriteCheckpointFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_is_umount() const { return at<2>().valid(); }
+  uint32_t is_umount() const { return at<2>().as_uint32(); }
+  bool has_msg() const { return at<3>().valid(); }
+  ::protozero::ConstChars msg() const { return at<3>().as_string(); }
+};
+
+class F2fsWriteCheckpointFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = F2fsWriteCheckpointFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kIsUmountFieldNumber = 2,
+    kMsgFieldNumber = 3,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsWriteCheckpointFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_IsUmount =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      F2fsWriteCheckpointFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IsUmount kIsUmount() { return {}; }
+  void set_is_umount(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_IsUmount::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Msg =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      F2fsWriteCheckpointFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Msg kMsg() { return {}; }
+  void set_msg(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Msg::kFieldId, data, size);
+  }
+  void set_msg(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Msg::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class F2fsWriteBeginFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  F2fsWriteBeginFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit F2fsWriteBeginFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit F2fsWriteBeginFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_pos() const { return at<3>().valid(); }
+  int64_t pos() const { return at<3>().as_int64(); }
+  bool has_len() const { return at<4>().valid(); }
+  uint32_t len() const { return at<4>().as_uint32(); }
+  bool has_flags() const { return at<5>().valid(); }
+  uint32_t flags() const { return at<5>().as_uint32(); }
+};
+
+class F2fsWriteBeginFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = F2fsWriteBeginFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kPosFieldNumber = 3,
+    kLenFieldNumber = 4,
+    kFlagsFieldNumber = 5,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsWriteBeginFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsWriteBeginFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pos =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      F2fsWriteBeginFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pos kPos() { return {}; }
+  void set_pos(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pos::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      F2fsWriteBeginFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Flags =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      F2fsWriteBeginFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Flags kFlags() { return {}; }
+  void set_flags(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Flags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class F2fsVmPageMkwriteFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/6, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  F2fsVmPageMkwriteFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit F2fsVmPageMkwriteFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit F2fsVmPageMkwriteFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_type() const { return at<3>().valid(); }
+  int32_t type() const { return at<3>().as_int32(); }
+  bool has_dir() const { return at<4>().valid(); }
+  int32_t dir() const { return at<4>().as_int32(); }
+  bool has_index() const { return at<5>().valid(); }
+  uint64_t index() const { return at<5>().as_uint64(); }
+  bool has_dirty() const { return at<6>().valid(); }
+  int32_t dirty() const { return at<6>().as_int32(); }
+};
+
+class F2fsVmPageMkwriteFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = F2fsVmPageMkwriteFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kTypeFieldNumber = 3,
+    kDirFieldNumber = 4,
+    kIndexFieldNumber = 5,
+    kDirtyFieldNumber = 6,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsVmPageMkwriteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsVmPageMkwriteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Type =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      F2fsVmPageMkwriteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Type kType() { return {}; }
+  void set_type(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Type::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Dir =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      F2fsVmPageMkwriteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dir kDir() { return {}; }
+  void set_dir(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dir::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Index =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsVmPageMkwriteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Index kIndex() { return {}; }
+  void set_index(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Index::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Dirty =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      F2fsVmPageMkwriteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dirty kDirty() { return {}; }
+  void set_dirty(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dirty::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class F2fsUnlinkExitFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  F2fsUnlinkExitFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit F2fsUnlinkExitFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit F2fsUnlinkExitFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_ret() const { return at<3>().valid(); }
+  int32_t ret() const { return at<3>().as_int32(); }
+};
+
+class F2fsUnlinkExitFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = F2fsUnlinkExitFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kRetFieldNumber = 3,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsUnlinkExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsUnlinkExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ret =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      F2fsUnlinkExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ret kRet() { return {}; }
+  void set_ret(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ret::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class F2fsUnlinkEnterFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  F2fsUnlinkEnterFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit F2fsUnlinkEnterFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit F2fsUnlinkEnterFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_size() const { return at<3>().valid(); }
+  int64_t size() const { return at<3>().as_int64(); }
+  bool has_blocks() const { return at<4>().valid(); }
+  uint64_t blocks() const { return at<4>().as_uint64(); }
+  bool has_name() const { return at<5>().valid(); }
+  ::protozero::ConstChars name() const { return at<5>().as_string(); }
+};
+
+class F2fsUnlinkEnterFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = F2fsUnlinkEnterFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kSizeFieldNumber = 3,
+    kBlocksFieldNumber = 4,
+    kNameFieldNumber = 5,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsUnlinkEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsUnlinkEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Size =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      F2fsUnlinkEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Size kSize() { return {}; }
+  void set_size(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Size::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Blocks =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsUnlinkEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Blocks kBlocks() { return {}; }
+  void set_blocks(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Blocks::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      F2fsUnlinkEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class F2fsTruncatePartialNodesFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  F2fsTruncatePartialNodesFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit F2fsTruncatePartialNodesFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit F2fsTruncatePartialNodesFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_nid() const { return at<3>().valid(); }
+  uint32_t nid() const { return at<3>().as_uint32(); }
+  bool has_depth() const { return at<4>().valid(); }
+  int32_t depth() const { return at<4>().as_int32(); }
+  bool has_err() const { return at<5>().valid(); }
+  int32_t err() const { return at<5>().as_int32(); }
+};
+
+class F2fsTruncatePartialNodesFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = F2fsTruncatePartialNodesFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kNidFieldNumber = 3,
+    kDepthFieldNumber = 4,
+    kErrFieldNumber = 5,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsTruncatePartialNodesFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsTruncatePartialNodesFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Nid =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      F2fsTruncatePartialNodesFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Nid kNid() { return {}; }
+  void set_nid(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Nid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Depth =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      F2fsTruncatePartialNodesFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Depth kDepth() { return {}; }
+  void set_depth(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Depth::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Err =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      F2fsTruncatePartialNodesFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Err kErr() { return {}; }
+  void set_err(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Err::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class F2fsTruncateNodesExitFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  F2fsTruncateNodesExitFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit F2fsTruncateNodesExitFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit F2fsTruncateNodesExitFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_ret() const { return at<3>().valid(); }
+  int32_t ret() const { return at<3>().as_int32(); }
+};
+
+class F2fsTruncateNodesExitFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = F2fsTruncateNodesExitFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kRetFieldNumber = 3,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsTruncateNodesExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsTruncateNodesExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ret =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      F2fsTruncateNodesExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ret kRet() { return {}; }
+  void set_ret(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ret::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class F2fsTruncateNodesEnterFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  F2fsTruncateNodesEnterFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit F2fsTruncateNodesEnterFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit F2fsTruncateNodesEnterFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_nid() const { return at<3>().valid(); }
+  uint32_t nid() const { return at<3>().as_uint32(); }
+  bool has_blk_addr() const { return at<4>().valid(); }
+  uint32_t blk_addr() const { return at<4>().as_uint32(); }
+};
+
+class F2fsTruncateNodesEnterFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = F2fsTruncateNodesEnterFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kNidFieldNumber = 3,
+    kBlkAddrFieldNumber = 4,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsTruncateNodesEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsTruncateNodesEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Nid =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      F2fsTruncateNodesEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Nid kNid() { return {}; }
+  void set_nid(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Nid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BlkAddr =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      F2fsTruncateNodesEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BlkAddr kBlkAddr() { return {}; }
+  void set_blk_addr(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_BlkAddr::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class F2fsTruncateNodeFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  F2fsTruncateNodeFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit F2fsTruncateNodeFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit F2fsTruncateNodeFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_nid() const { return at<3>().valid(); }
+  uint32_t nid() const { return at<3>().as_uint32(); }
+  bool has_blk_addr() const { return at<4>().valid(); }
+  uint32_t blk_addr() const { return at<4>().as_uint32(); }
+};
+
+class F2fsTruncateNodeFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = F2fsTruncateNodeFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kNidFieldNumber = 3,
+    kBlkAddrFieldNumber = 4,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsTruncateNodeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsTruncateNodeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Nid =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      F2fsTruncateNodeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Nid kNid() { return {}; }
+  void set_nid(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Nid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BlkAddr =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      F2fsTruncateNodeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BlkAddr kBlkAddr() { return {}; }
+  void set_blk_addr(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_BlkAddr::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class F2fsTruncateInodeBlocksExitFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  F2fsTruncateInodeBlocksExitFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit F2fsTruncateInodeBlocksExitFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit F2fsTruncateInodeBlocksExitFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_ret() const { return at<3>().valid(); }
+  int32_t ret() const { return at<3>().as_int32(); }
+};
+
+class F2fsTruncateInodeBlocksExitFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = F2fsTruncateInodeBlocksExitFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kRetFieldNumber = 3,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsTruncateInodeBlocksExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsTruncateInodeBlocksExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ret =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      F2fsTruncateInodeBlocksExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ret kRet() { return {}; }
+  void set_ret(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ret::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class F2fsTruncateInodeBlocksEnterFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  F2fsTruncateInodeBlocksEnterFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit F2fsTruncateInodeBlocksEnterFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit F2fsTruncateInodeBlocksEnterFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_size() const { return at<3>().valid(); }
+  int64_t size() const { return at<3>().as_int64(); }
+  bool has_blocks() const { return at<4>().valid(); }
+  uint64_t blocks() const { return at<4>().as_uint64(); }
+  bool has_from() const { return at<5>().valid(); }
+  uint64_t from() const { return at<5>().as_uint64(); }
+};
+
+class F2fsTruncateInodeBlocksEnterFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = F2fsTruncateInodeBlocksEnterFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kSizeFieldNumber = 3,
+    kBlocksFieldNumber = 4,
+    kFromFieldNumber = 5,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsTruncateInodeBlocksEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsTruncateInodeBlocksEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Size =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      F2fsTruncateInodeBlocksEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Size kSize() { return {}; }
+  void set_size(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Size::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Blocks =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsTruncateInodeBlocksEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Blocks kBlocks() { return {}; }
+  void set_blocks(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Blocks::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_From =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsTruncateInodeBlocksEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_From kFrom() { return {}; }
+  void set_from(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_From::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class F2fsTruncateDataBlocksRangeFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  F2fsTruncateDataBlocksRangeFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit F2fsTruncateDataBlocksRangeFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit F2fsTruncateDataBlocksRangeFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_nid() const { return at<3>().valid(); }
+  uint32_t nid() const { return at<3>().as_uint32(); }
+  bool has_ofs() const { return at<4>().valid(); }
+  uint32_t ofs() const { return at<4>().as_uint32(); }
+  bool has_free() const { return at<5>().valid(); }
+  int32_t free() const { return at<5>().as_int32(); }
+};
+
+class F2fsTruncateDataBlocksRangeFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = F2fsTruncateDataBlocksRangeFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kNidFieldNumber = 3,
+    kOfsFieldNumber = 4,
+    kFreeFieldNumber = 5,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsTruncateDataBlocksRangeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsTruncateDataBlocksRangeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Nid =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      F2fsTruncateDataBlocksRangeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Nid kNid() { return {}; }
+  void set_nid(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Nid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ofs =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      F2fsTruncateDataBlocksRangeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ofs kOfs() { return {}; }
+  void set_ofs(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ofs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Free =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      F2fsTruncateDataBlocksRangeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Free kFree() { return {}; }
+  void set_free(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Free::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class F2fsTruncateBlocksExitFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  F2fsTruncateBlocksExitFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit F2fsTruncateBlocksExitFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit F2fsTruncateBlocksExitFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_ret() const { return at<3>().valid(); }
+  int32_t ret() const { return at<3>().as_int32(); }
+};
+
+class F2fsTruncateBlocksExitFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = F2fsTruncateBlocksExitFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kRetFieldNumber = 3,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsTruncateBlocksExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsTruncateBlocksExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ret =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      F2fsTruncateBlocksExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ret kRet() { return {}; }
+  void set_ret(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ret::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class F2fsTruncateBlocksEnterFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  F2fsTruncateBlocksEnterFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit F2fsTruncateBlocksEnterFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit F2fsTruncateBlocksEnterFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_size() const { return at<3>().valid(); }
+  int64_t size() const { return at<3>().as_int64(); }
+  bool has_blocks() const { return at<4>().valid(); }
+  uint64_t blocks() const { return at<4>().as_uint64(); }
+  bool has_from() const { return at<5>().valid(); }
+  uint64_t from() const { return at<5>().as_uint64(); }
+};
+
+class F2fsTruncateBlocksEnterFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = F2fsTruncateBlocksEnterFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kSizeFieldNumber = 3,
+    kBlocksFieldNumber = 4,
+    kFromFieldNumber = 5,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsTruncateBlocksEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsTruncateBlocksEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Size =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      F2fsTruncateBlocksEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Size kSize() { return {}; }
+  void set_size(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Size::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Blocks =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsTruncateBlocksEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Blocks kBlocks() { return {}; }
+  void set_blocks(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Blocks::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_From =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsTruncateBlocksEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_From kFrom() { return {}; }
+  void set_from(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_From::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class F2fsTruncateFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/8, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  F2fsTruncateFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit F2fsTruncateFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit F2fsTruncateFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_pino() const { return at<3>().valid(); }
+  uint64_t pino() const { return at<3>().as_uint64(); }
+  bool has_mode() const { return at<4>().valid(); }
+  uint32_t mode() const { return at<4>().as_uint32(); }
+  bool has_size() const { return at<5>().valid(); }
+  int64_t size() const { return at<5>().as_int64(); }
+  bool has_nlink() const { return at<6>().valid(); }
+  uint32_t nlink() const { return at<6>().as_uint32(); }
+  bool has_blocks() const { return at<7>().valid(); }
+  uint64_t blocks() const { return at<7>().as_uint64(); }
+  bool has_advise() const { return at<8>().valid(); }
+  uint32_t advise() const { return at<8>().as_uint32(); }
+};
+
+class F2fsTruncateFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = F2fsTruncateFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kPinoFieldNumber = 3,
+    kModeFieldNumber = 4,
+    kSizeFieldNumber = 5,
+    kNlinkFieldNumber = 6,
+    kBlocksFieldNumber = 7,
+    kAdviseFieldNumber = 8,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsTruncateFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsTruncateFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pino =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsTruncateFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pino kPino() { return {}; }
+  void set_pino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Mode =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      F2fsTruncateFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Mode kMode() { return {}; }
+  void set_mode(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Mode::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Size =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      F2fsTruncateFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Size kSize() { return {}; }
+  void set_size(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Size::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Nlink =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      F2fsTruncateFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Nlink kNlink() { return {}; }
+  void set_nlink(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Nlink::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Blocks =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsTruncateFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Blocks kBlocks() { return {}; }
+  void set_blocks(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Blocks::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Advise =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      F2fsTruncateFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Advise kAdvise() { return {}; }
+  void set_advise(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Advise::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class F2fsSyncFsFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  F2fsSyncFsFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit F2fsSyncFsFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit F2fsSyncFsFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_dirty() const { return at<2>().valid(); }
+  int32_t dirty() const { return at<2>().as_int32(); }
+  bool has_wait() const { return at<3>().valid(); }
+  int32_t wait() const { return at<3>().as_int32(); }
+};
+
+class F2fsSyncFsFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = F2fsSyncFsFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kDirtyFieldNumber = 2,
+    kWaitFieldNumber = 3,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsSyncFsFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Dirty =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      F2fsSyncFsFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dirty kDirty() { return {}; }
+  void set_dirty(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dirty::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Wait =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      F2fsSyncFsFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Wait kWait() { return {}; }
+  void set_wait(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Wait::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class F2fsSyncFileExitFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  F2fsSyncFileExitFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit F2fsSyncFileExitFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit F2fsSyncFileExitFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_need_cp() const { return at<3>().valid(); }
+  uint32_t need_cp() const { return at<3>().as_uint32(); }
+  bool has_datasync() const { return at<4>().valid(); }
+  int32_t datasync() const { return at<4>().as_int32(); }
+  bool has_ret() const { return at<5>().valid(); }
+  int32_t ret() const { return at<5>().as_int32(); }
+};
+
+class F2fsSyncFileExitFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = F2fsSyncFileExitFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kNeedCpFieldNumber = 3,
+    kDatasyncFieldNumber = 4,
+    kRetFieldNumber = 5,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsSyncFileExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsSyncFileExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NeedCp =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      F2fsSyncFileExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NeedCp kNeedCp() { return {}; }
+  void set_need_cp(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NeedCp::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Datasync =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      F2fsSyncFileExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Datasync kDatasync() { return {}; }
+  void set_datasync(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Datasync::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ret =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      F2fsSyncFileExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ret kRet() { return {}; }
+  void set_ret(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ret::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class F2fsSyncFileEnterFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/8, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  F2fsSyncFileEnterFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit F2fsSyncFileEnterFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit F2fsSyncFileEnterFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_pino() const { return at<3>().valid(); }
+  uint64_t pino() const { return at<3>().as_uint64(); }
+  bool has_mode() const { return at<4>().valid(); }
+  uint32_t mode() const { return at<4>().as_uint32(); }
+  bool has_size() const { return at<5>().valid(); }
+  int64_t size() const { return at<5>().as_int64(); }
+  bool has_nlink() const { return at<6>().valid(); }
+  uint32_t nlink() const { return at<6>().as_uint32(); }
+  bool has_blocks() const { return at<7>().valid(); }
+  uint64_t blocks() const { return at<7>().as_uint64(); }
+  bool has_advise() const { return at<8>().valid(); }
+  uint32_t advise() const { return at<8>().as_uint32(); }
+};
+
+class F2fsSyncFileEnterFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = F2fsSyncFileEnterFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kPinoFieldNumber = 3,
+    kModeFieldNumber = 4,
+    kSizeFieldNumber = 5,
+    kNlinkFieldNumber = 6,
+    kBlocksFieldNumber = 7,
+    kAdviseFieldNumber = 8,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsSyncFileEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsSyncFileEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pino =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsSyncFileEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pino kPino() { return {}; }
+  void set_pino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Mode =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      F2fsSyncFileEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Mode kMode() { return {}; }
+  void set_mode(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Mode::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Size =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      F2fsSyncFileEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Size kSize() { return {}; }
+  void set_size(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Size::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Nlink =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      F2fsSyncFileEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Nlink kNlink() { return {}; }
+  void set_nlink(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Nlink::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Blocks =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsSyncFileEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Blocks kBlocks() { return {}; }
+  void set_blocks(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Blocks::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Advise =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      F2fsSyncFileEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Advise kAdvise() { return {}; }
+  void set_advise(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Advise::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class F2fsSubmitWritePageFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  F2fsSubmitWritePageFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit F2fsSubmitWritePageFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit F2fsSubmitWritePageFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_type() const { return at<3>().valid(); }
+  int32_t type() const { return at<3>().as_int32(); }
+  bool has_index() const { return at<4>().valid(); }
+  uint64_t index() const { return at<4>().as_uint64(); }
+  bool has_block() const { return at<5>().valid(); }
+  uint32_t block() const { return at<5>().as_uint32(); }
+};
+
+class F2fsSubmitWritePageFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = F2fsSubmitWritePageFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kTypeFieldNumber = 3,
+    kIndexFieldNumber = 4,
+    kBlockFieldNumber = 5,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsSubmitWritePageFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsSubmitWritePageFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Type =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      F2fsSubmitWritePageFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Type kType() { return {}; }
+  void set_type(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Type::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Index =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsSubmitWritePageFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Index kIndex() { return {}; }
+  void set_index(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Index::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Block =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      F2fsSubmitWritePageFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Block kBlock() { return {}; }
+  void set_block(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Block::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class F2fsSetPageDirtyFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/6, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  F2fsSetPageDirtyFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit F2fsSetPageDirtyFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit F2fsSetPageDirtyFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_type() const { return at<3>().valid(); }
+  int32_t type() const { return at<3>().as_int32(); }
+  bool has_dir() const { return at<4>().valid(); }
+  int32_t dir() const { return at<4>().as_int32(); }
+  bool has_index() const { return at<5>().valid(); }
+  uint64_t index() const { return at<5>().as_uint64(); }
+  bool has_dirty() const { return at<6>().valid(); }
+  int32_t dirty() const { return at<6>().as_int32(); }
+};
+
+class F2fsSetPageDirtyFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = F2fsSetPageDirtyFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kTypeFieldNumber = 3,
+    kDirFieldNumber = 4,
+    kIndexFieldNumber = 5,
+    kDirtyFieldNumber = 6,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsSetPageDirtyFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsSetPageDirtyFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Type =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      F2fsSetPageDirtyFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Type kType() { return {}; }
+  void set_type(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Type::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Dir =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      F2fsSetPageDirtyFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dir kDir() { return {}; }
+  void set_dir(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dir::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Index =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsSetPageDirtyFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Index kIndex() { return {}; }
+  void set_index(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Index::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Dirty =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      F2fsSetPageDirtyFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dirty kDirty() { return {}; }
+  void set_dirty(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dirty::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class F2fsReserveNewBlockFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  F2fsReserveNewBlockFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit F2fsReserveNewBlockFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit F2fsReserveNewBlockFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_nid() const { return at<2>().valid(); }
+  uint32_t nid() const { return at<2>().as_uint32(); }
+  bool has_ofs_in_node() const { return at<3>().valid(); }
+  uint32_t ofs_in_node() const { return at<3>().as_uint32(); }
+};
+
+class F2fsReserveNewBlockFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = F2fsReserveNewBlockFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kNidFieldNumber = 2,
+    kOfsInNodeFieldNumber = 3,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsReserveNewBlockFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Nid =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      F2fsReserveNewBlockFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Nid kNid() { return {}; }
+  void set_nid(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Nid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_OfsInNode =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      F2fsReserveNewBlockFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_OfsInNode kOfsInNode() { return {}; }
+  void set_ofs_in_node(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_OfsInNode::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class F2fsReadpageFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  F2fsReadpageFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit F2fsReadpageFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit F2fsReadpageFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_index() const { return at<3>().valid(); }
+  uint64_t index() const { return at<3>().as_uint64(); }
+  bool has_blkaddr() const { return at<4>().valid(); }
+  uint64_t blkaddr() const { return at<4>().as_uint64(); }
+  bool has_type() const { return at<5>().valid(); }
+  int32_t type() const { return at<5>().as_int32(); }
+};
+
+class F2fsReadpageFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = F2fsReadpageFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kIndexFieldNumber = 3,
+    kBlkaddrFieldNumber = 4,
+    kTypeFieldNumber = 5,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsReadpageFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsReadpageFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Index =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsReadpageFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Index kIndex() { return {}; }
+  void set_index(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Index::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Blkaddr =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsReadpageFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Blkaddr kBlkaddr() { return {}; }
+  void set_blkaddr(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Blkaddr::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Type =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      F2fsReadpageFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Type kType() { return {}; }
+  void set_type(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Type::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class F2fsNewInodeFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  F2fsNewInodeFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit F2fsNewInodeFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit F2fsNewInodeFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_ret() const { return at<3>().valid(); }
+  int32_t ret() const { return at<3>().as_int32(); }
+};
+
+class F2fsNewInodeFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = F2fsNewInodeFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kRetFieldNumber = 3,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsNewInodeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsNewInodeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ret =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      F2fsNewInodeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ret kRet() { return {}; }
+  void set_ret(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ret::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class F2fsIgetExitFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  F2fsIgetExitFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit F2fsIgetExitFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit F2fsIgetExitFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_ret() const { return at<3>().valid(); }
+  int32_t ret() const { return at<3>().as_int32(); }
+};
+
+class F2fsIgetExitFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = F2fsIgetExitFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kRetFieldNumber = 3,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsIgetExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsIgetExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ret =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      F2fsIgetExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ret kRet() { return {}; }
+  void set_ret(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ret::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class F2fsIgetFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/8, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  F2fsIgetFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit F2fsIgetFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit F2fsIgetFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_pino() const { return at<3>().valid(); }
+  uint64_t pino() const { return at<3>().as_uint64(); }
+  bool has_mode() const { return at<4>().valid(); }
+  uint32_t mode() const { return at<4>().as_uint32(); }
+  bool has_size() const { return at<5>().valid(); }
+  int64_t size() const { return at<5>().as_int64(); }
+  bool has_nlink() const { return at<6>().valid(); }
+  uint32_t nlink() const { return at<6>().as_uint32(); }
+  bool has_blocks() const { return at<7>().valid(); }
+  uint64_t blocks() const { return at<7>().as_uint64(); }
+  bool has_advise() const { return at<8>().valid(); }
+  uint32_t advise() const { return at<8>().as_uint32(); }
+};
+
+class F2fsIgetFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = F2fsIgetFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kPinoFieldNumber = 3,
+    kModeFieldNumber = 4,
+    kSizeFieldNumber = 5,
+    kNlinkFieldNumber = 6,
+    kBlocksFieldNumber = 7,
+    kAdviseFieldNumber = 8,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsIgetFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsIgetFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pino =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsIgetFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pino kPino() { return {}; }
+  void set_pino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Mode =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      F2fsIgetFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Mode kMode() { return {}; }
+  void set_mode(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Mode::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Size =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      F2fsIgetFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Size kSize() { return {}; }
+  void set_size(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Size::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Nlink =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      F2fsIgetFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Nlink kNlink() { return {}; }
+  void set_nlink(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Nlink::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Blocks =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsIgetFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Blocks kBlocks() { return {}; }
+  void set_blocks(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Blocks::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Advise =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      F2fsIgetFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Advise kAdvise() { return {}; }
+  void set_advise(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Advise::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class F2fsGetVictimFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/10, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  F2fsGetVictimFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit F2fsGetVictimFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit F2fsGetVictimFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_type() const { return at<2>().valid(); }
+  int32_t type() const { return at<2>().as_int32(); }
+  bool has_gc_type() const { return at<3>().valid(); }
+  int32_t gc_type() const { return at<3>().as_int32(); }
+  bool has_alloc_mode() const { return at<4>().valid(); }
+  int32_t alloc_mode() const { return at<4>().as_int32(); }
+  bool has_gc_mode() const { return at<5>().valid(); }
+  int32_t gc_mode() const { return at<5>().as_int32(); }
+  bool has_victim() const { return at<6>().valid(); }
+  uint32_t victim() const { return at<6>().as_uint32(); }
+  bool has_ofs_unit() const { return at<7>().valid(); }
+  uint32_t ofs_unit() const { return at<7>().as_uint32(); }
+  bool has_pre_victim() const { return at<8>().valid(); }
+  uint32_t pre_victim() const { return at<8>().as_uint32(); }
+  bool has_prefree() const { return at<9>().valid(); }
+  uint32_t prefree() const { return at<9>().as_uint32(); }
+  bool has_free() const { return at<10>().valid(); }
+  uint32_t free() const { return at<10>().as_uint32(); }
+};
+
+class F2fsGetVictimFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = F2fsGetVictimFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kTypeFieldNumber = 2,
+    kGcTypeFieldNumber = 3,
+    kAllocModeFieldNumber = 4,
+    kGcModeFieldNumber = 5,
+    kVictimFieldNumber = 6,
+    kOfsUnitFieldNumber = 7,
+    kPreVictimFieldNumber = 8,
+    kPrefreeFieldNumber = 9,
+    kFreeFieldNumber = 10,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsGetVictimFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Type =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      F2fsGetVictimFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Type kType() { return {}; }
+  void set_type(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Type::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_GcType =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      F2fsGetVictimFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_GcType kGcType() { return {}; }
+  void set_gc_type(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_GcType::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_AllocMode =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      F2fsGetVictimFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AllocMode kAllocMode() { return {}; }
+  void set_alloc_mode(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_AllocMode::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_GcMode =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      F2fsGetVictimFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_GcMode kGcMode() { return {}; }
+  void set_gc_mode(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_GcMode::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Victim =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      F2fsGetVictimFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Victim kVictim() { return {}; }
+  void set_victim(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Victim::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_OfsUnit =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      F2fsGetVictimFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_OfsUnit kOfsUnit() { return {}; }
+  void set_ofs_unit(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_OfsUnit::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PreVictim =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      F2fsGetVictimFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PreVictim kPreVictim() { return {}; }
+  void set_pre_victim(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PreVictim::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Prefree =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      F2fsGetVictimFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Prefree kPrefree() { return {}; }
+  void set_prefree(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Prefree::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Free =
+    ::protozero::proto_utils::FieldMetadata<
+      10,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      F2fsGetVictimFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Free kFree() { return {}; }
+  void set_free(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Free::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class F2fsGetDataBlockFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/6, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  F2fsGetDataBlockFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit F2fsGetDataBlockFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit F2fsGetDataBlockFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_iblock() const { return at<3>().valid(); }
+  uint64_t iblock() const { return at<3>().as_uint64(); }
+  bool has_bh_start() const { return at<4>().valid(); }
+  uint64_t bh_start() const { return at<4>().as_uint64(); }
+  bool has_bh_size() const { return at<5>().valid(); }
+  uint64_t bh_size() const { return at<5>().as_uint64(); }
+  bool has_ret() const { return at<6>().valid(); }
+  int32_t ret() const { return at<6>().as_int32(); }
+};
+
+class F2fsGetDataBlockFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = F2fsGetDataBlockFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kIblockFieldNumber = 3,
+    kBhStartFieldNumber = 4,
+    kBhSizeFieldNumber = 5,
+    kRetFieldNumber = 6,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsGetDataBlockFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsGetDataBlockFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Iblock =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsGetDataBlockFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Iblock kIblock() { return {}; }
+  void set_iblock(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Iblock::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BhStart =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsGetDataBlockFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BhStart kBhStart() { return {}; }
+  void set_bh_start(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_BhStart::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BhSize =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsGetDataBlockFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BhSize kBhSize() { return {}; }
+  void set_bh_size(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_BhSize::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ret =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      F2fsGetDataBlockFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ret kRet() { return {}; }
+  void set_ret(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ret::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class F2fsFallocateFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/8, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  F2fsFallocateFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit F2fsFallocateFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit F2fsFallocateFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_mode() const { return at<3>().valid(); }
+  int32_t mode() const { return at<3>().as_int32(); }
+  bool has_offset() const { return at<4>().valid(); }
+  int64_t offset() const { return at<4>().as_int64(); }
+  bool has_len() const { return at<5>().valid(); }
+  int64_t len() const { return at<5>().as_int64(); }
+  bool has_size() const { return at<6>().valid(); }
+  int64_t size() const { return at<6>().as_int64(); }
+  bool has_blocks() const { return at<7>().valid(); }
+  uint64_t blocks() const { return at<7>().as_uint64(); }
+  bool has_ret() const { return at<8>().valid(); }
+  int32_t ret() const { return at<8>().as_int32(); }
+};
+
+class F2fsFallocateFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = F2fsFallocateFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kModeFieldNumber = 3,
+    kOffsetFieldNumber = 4,
+    kLenFieldNumber = 5,
+    kSizeFieldNumber = 6,
+    kBlocksFieldNumber = 7,
+    kRetFieldNumber = 8,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsFallocateFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsFallocateFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Mode =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      F2fsFallocateFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Mode kMode() { return {}; }
+  void set_mode(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Mode::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Offset =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      F2fsFallocateFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Offset kOffset() { return {}; }
+  void set_offset(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Offset::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      F2fsFallocateFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Size =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      F2fsFallocateFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Size kSize() { return {}; }
+  void set_size(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Size::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Blocks =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsFallocateFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Blocks kBlocks() { return {}; }
+  void set_blocks(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Blocks::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ret =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      F2fsFallocateFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ret kRet() { return {}; }
+  void set_ret(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ret::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class F2fsEvictInodeFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/8, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  F2fsEvictInodeFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit F2fsEvictInodeFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit F2fsEvictInodeFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_ino() const { return at<2>().valid(); }
+  uint64_t ino() const { return at<2>().as_uint64(); }
+  bool has_pino() const { return at<3>().valid(); }
+  uint64_t pino() const { return at<3>().as_uint64(); }
+  bool has_mode() const { return at<4>().valid(); }
+  uint32_t mode() const { return at<4>().as_uint32(); }
+  bool has_size() const { return at<5>().valid(); }
+  int64_t size() const { return at<5>().as_int64(); }
+  bool has_nlink() const { return at<6>().valid(); }
+  uint32_t nlink() const { return at<6>().as_uint32(); }
+  bool has_blocks() const { return at<7>().valid(); }
+  uint64_t blocks() const { return at<7>().as_uint64(); }
+  bool has_advise() const { return at<8>().valid(); }
+  uint32_t advise() const { return at<8>().as_uint32(); }
+};
+
+class F2fsEvictInodeFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = F2fsEvictInodeFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kInoFieldNumber = 2,
+    kPinoFieldNumber = 3,
+    kModeFieldNumber = 4,
+    kSizeFieldNumber = 5,
+    kNlinkFieldNumber = 6,
+    kBlocksFieldNumber = 7,
+    kAdviseFieldNumber = 8,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsEvictInodeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ino =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsEvictInodeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ino kIno() { return {}; }
+  void set_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pino =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsEvictInodeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pino kPino() { return {}; }
+  void set_pino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pino::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Mode =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      F2fsEvictInodeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Mode kMode() { return {}; }
+  void set_mode(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Mode::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Size =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      F2fsEvictInodeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Size kSize() { return {}; }
+  void set_size(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Size::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Nlink =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      F2fsEvictInodeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Nlink kNlink() { return {}; }
+  void set_nlink(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Nlink::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Blocks =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsEvictInodeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Blocks kBlocks() { return {}; }
+  void set_blocks(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Blocks::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Advise =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      F2fsEvictInodeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Advise kAdvise() { return {}; }
+  void set_advise(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Advise::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class F2fsDoSubmitBioFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  F2fsDoSubmitBioFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit F2fsDoSubmitBioFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit F2fsDoSubmitBioFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dev() const { return at<1>().valid(); }
+  uint64_t dev() const { return at<1>().as_uint64(); }
+  bool has_btype() const { return at<2>().valid(); }
+  int32_t btype() const { return at<2>().as_int32(); }
+  bool has_sync() const { return at<3>().valid(); }
+  uint32_t sync() const { return at<3>().as_uint32(); }
+  bool has_sector() const { return at<4>().valid(); }
+  uint64_t sector() const { return at<4>().as_uint64(); }
+  bool has_size() const { return at<5>().valid(); }
+  uint32_t size() const { return at<5>().as_uint32(); }
+};
+
+class F2fsDoSubmitBioFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = F2fsDoSubmitBioFtraceEvent_Decoder;
+  enum : int32_t {
+    kDevFieldNumber = 1,
+    kBtypeFieldNumber = 2,
+    kSyncFieldNumber = 3,
+    kSectorFieldNumber = 4,
+    kSizeFieldNumber = 5,
+  };
+
+  using FieldMetadata_Dev =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsDoSubmitBioFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dev kDev() { return {}; }
+  void set_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Btype =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      F2fsDoSubmitBioFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Btype kBtype() { return {}; }
+  void set_btype(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Btype::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Sync =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      F2fsDoSubmitBioFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Sync kSync() { return {}; }
+  void set_sync(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Sync::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Sector =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      F2fsDoSubmitBioFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Sector kSector() { return {}; }
+  void set_sector(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Sector::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Size =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      F2fsDoSubmitBioFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Size kSize() { return {}; }
+  void set_size(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Size::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/ftrace/fastrpc.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_FASTRPC_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_FASTRPC_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class FastrpcDmaStatFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  FastrpcDmaStatFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit FastrpcDmaStatFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit FastrpcDmaStatFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_cid() const { return at<1>().valid(); }
+  int32_t cid() const { return at<1>().as_int32(); }
+  bool has_len() const { return at<2>().valid(); }
+  int64_t len() const { return at<2>().as_int64(); }
+  bool has_total_allocated() const { return at<3>().valid(); }
+  uint64_t total_allocated() const { return at<3>().as_uint64(); }
+};
+
+class FastrpcDmaStatFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = FastrpcDmaStatFtraceEvent_Decoder;
+  enum : int32_t {
+    kCidFieldNumber = 1,
+    kLenFieldNumber = 2,
+    kTotalAllocatedFieldNumber = 3,
+  };
+
+  using FieldMetadata_Cid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      FastrpcDmaStatFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Cid kCid() { return {}; }
+  void set_cid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Cid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      FastrpcDmaStatFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TotalAllocated =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      FastrpcDmaStatFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TotalAllocated kTotalAllocated() { return {}; }
+  void set_total_allocated(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TotalAllocated::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/ftrace/fence.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_FENCE_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_FENCE_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class FenceSignaledFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  FenceSignaledFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit FenceSignaledFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit FenceSignaledFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_context() const { return at<1>().valid(); }
+  uint32_t context() const { return at<1>().as_uint32(); }
+  bool has_driver() const { return at<2>().valid(); }
+  ::protozero::ConstChars driver() const { return at<2>().as_string(); }
+  bool has_seqno() const { return at<3>().valid(); }
+  uint32_t seqno() const { return at<3>().as_uint32(); }
+  bool has_timeline() const { return at<4>().valid(); }
+  ::protozero::ConstChars timeline() const { return at<4>().as_string(); }
+};
+
+class FenceSignaledFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = FenceSignaledFtraceEvent_Decoder;
+  enum : int32_t {
+    kContextFieldNumber = 1,
+    kDriverFieldNumber = 2,
+    kSeqnoFieldNumber = 3,
+    kTimelineFieldNumber = 4,
+  };
+
+  using FieldMetadata_Context =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      FenceSignaledFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Context kContext() { return {}; }
+  void set_context(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Context::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Driver =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      FenceSignaledFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Driver kDriver() { return {}; }
+  void set_driver(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Driver::kFieldId, data, size);
+  }
+  void set_driver(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Driver::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Seqno =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      FenceSignaledFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Seqno kSeqno() { return {}; }
+  void set_seqno(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Seqno::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Timeline =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      FenceSignaledFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Timeline kTimeline() { return {}; }
+  void set_timeline(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Timeline::kFieldId, data, size);
+  }
+  void set_timeline(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Timeline::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class FenceEnableSignalFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  FenceEnableSignalFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit FenceEnableSignalFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit FenceEnableSignalFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_context() const { return at<1>().valid(); }
+  uint32_t context() const { return at<1>().as_uint32(); }
+  bool has_driver() const { return at<2>().valid(); }
+  ::protozero::ConstChars driver() const { return at<2>().as_string(); }
+  bool has_seqno() const { return at<3>().valid(); }
+  uint32_t seqno() const { return at<3>().as_uint32(); }
+  bool has_timeline() const { return at<4>().valid(); }
+  ::protozero::ConstChars timeline() const { return at<4>().as_string(); }
+};
+
+class FenceEnableSignalFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = FenceEnableSignalFtraceEvent_Decoder;
+  enum : int32_t {
+    kContextFieldNumber = 1,
+    kDriverFieldNumber = 2,
+    kSeqnoFieldNumber = 3,
+    kTimelineFieldNumber = 4,
+  };
+
+  using FieldMetadata_Context =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      FenceEnableSignalFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Context kContext() { return {}; }
+  void set_context(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Context::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Driver =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      FenceEnableSignalFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Driver kDriver() { return {}; }
+  void set_driver(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Driver::kFieldId, data, size);
+  }
+  void set_driver(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Driver::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Seqno =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      FenceEnableSignalFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Seqno kSeqno() { return {}; }
+  void set_seqno(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Seqno::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Timeline =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      FenceEnableSignalFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Timeline kTimeline() { return {}; }
+  void set_timeline(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Timeline::kFieldId, data, size);
+  }
+  void set_timeline(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Timeline::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class FenceDestroyFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  FenceDestroyFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit FenceDestroyFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit FenceDestroyFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_context() const { return at<1>().valid(); }
+  uint32_t context() const { return at<1>().as_uint32(); }
+  bool has_driver() const { return at<2>().valid(); }
+  ::protozero::ConstChars driver() const { return at<2>().as_string(); }
+  bool has_seqno() const { return at<3>().valid(); }
+  uint32_t seqno() const { return at<3>().as_uint32(); }
+  bool has_timeline() const { return at<4>().valid(); }
+  ::protozero::ConstChars timeline() const { return at<4>().as_string(); }
+};
+
+class FenceDestroyFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = FenceDestroyFtraceEvent_Decoder;
+  enum : int32_t {
+    kContextFieldNumber = 1,
+    kDriverFieldNumber = 2,
+    kSeqnoFieldNumber = 3,
+    kTimelineFieldNumber = 4,
+  };
+
+  using FieldMetadata_Context =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      FenceDestroyFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Context kContext() { return {}; }
+  void set_context(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Context::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Driver =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      FenceDestroyFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Driver kDriver() { return {}; }
+  void set_driver(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Driver::kFieldId, data, size);
+  }
+  void set_driver(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Driver::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Seqno =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      FenceDestroyFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Seqno kSeqno() { return {}; }
+  void set_seqno(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Seqno::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Timeline =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      FenceDestroyFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Timeline kTimeline() { return {}; }
+  void set_timeline(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Timeline::kFieldId, data, size);
+  }
+  void set_timeline(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Timeline::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class FenceInitFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  FenceInitFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit FenceInitFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit FenceInitFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_context() const { return at<1>().valid(); }
+  uint32_t context() const { return at<1>().as_uint32(); }
+  bool has_driver() const { return at<2>().valid(); }
+  ::protozero::ConstChars driver() const { return at<2>().as_string(); }
+  bool has_seqno() const { return at<3>().valid(); }
+  uint32_t seqno() const { return at<3>().as_uint32(); }
+  bool has_timeline() const { return at<4>().valid(); }
+  ::protozero::ConstChars timeline() const { return at<4>().as_string(); }
+};
+
+class FenceInitFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = FenceInitFtraceEvent_Decoder;
+  enum : int32_t {
+    kContextFieldNumber = 1,
+    kDriverFieldNumber = 2,
+    kSeqnoFieldNumber = 3,
+    kTimelineFieldNumber = 4,
+  };
+
+  using FieldMetadata_Context =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      FenceInitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Context kContext() { return {}; }
+  void set_context(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Context::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Driver =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      FenceInitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Driver kDriver() { return {}; }
+  void set_driver(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Driver::kFieldId, data, size);
+  }
+  void set_driver(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Driver::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Seqno =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      FenceInitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Seqno kSeqno() { return {}; }
+  void set_seqno(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Seqno::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Timeline =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      FenceInitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Timeline kTimeline() { return {}; }
+  void set_timeline(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Timeline::kFieldId, data, size);
+  }
+  void set_timeline(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Timeline::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/ftrace/filemap.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_FILEMAP_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_FILEMAP_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class MmFilemapDeleteFromPageCacheFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  MmFilemapDeleteFromPageCacheFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MmFilemapDeleteFromPageCacheFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MmFilemapDeleteFromPageCacheFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_pfn() const { return at<1>().valid(); }
+  uint64_t pfn() const { return at<1>().as_uint64(); }
+  bool has_i_ino() const { return at<2>().valid(); }
+  uint64_t i_ino() const { return at<2>().as_uint64(); }
+  bool has_index() const { return at<3>().valid(); }
+  uint64_t index() const { return at<3>().as_uint64(); }
+  bool has_s_dev() const { return at<4>().valid(); }
+  uint64_t s_dev() const { return at<4>().as_uint64(); }
+  bool has_page() const { return at<5>().valid(); }
+  uint64_t page() const { return at<5>().as_uint64(); }
+};
+
+class MmFilemapDeleteFromPageCacheFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = MmFilemapDeleteFromPageCacheFtraceEvent_Decoder;
+  enum : int32_t {
+    kPfnFieldNumber = 1,
+    kIInoFieldNumber = 2,
+    kIndexFieldNumber = 3,
+    kSDevFieldNumber = 4,
+    kPageFieldNumber = 5,
+  };
+
+  using FieldMetadata_Pfn =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MmFilemapDeleteFromPageCacheFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pfn kPfn() { return {}; }
+  void set_pfn(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pfn::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_IIno =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MmFilemapDeleteFromPageCacheFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IIno kIIno() { return {}; }
+  void set_i_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_IIno::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Index =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MmFilemapDeleteFromPageCacheFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Index kIndex() { return {}; }
+  void set_index(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Index::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SDev =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MmFilemapDeleteFromPageCacheFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SDev kSDev() { return {}; }
+  void set_s_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SDev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Page =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MmFilemapDeleteFromPageCacheFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Page kPage() { return {}; }
+  void set_page(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Page::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class MmFilemapAddToPageCacheFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  MmFilemapAddToPageCacheFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MmFilemapAddToPageCacheFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MmFilemapAddToPageCacheFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_pfn() const { return at<1>().valid(); }
+  uint64_t pfn() const { return at<1>().as_uint64(); }
+  bool has_i_ino() const { return at<2>().valid(); }
+  uint64_t i_ino() const { return at<2>().as_uint64(); }
+  bool has_index() const { return at<3>().valid(); }
+  uint64_t index() const { return at<3>().as_uint64(); }
+  bool has_s_dev() const { return at<4>().valid(); }
+  uint64_t s_dev() const { return at<4>().as_uint64(); }
+  bool has_page() const { return at<5>().valid(); }
+  uint64_t page() const { return at<5>().as_uint64(); }
+};
+
+class MmFilemapAddToPageCacheFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = MmFilemapAddToPageCacheFtraceEvent_Decoder;
+  enum : int32_t {
+    kPfnFieldNumber = 1,
+    kIInoFieldNumber = 2,
+    kIndexFieldNumber = 3,
+    kSDevFieldNumber = 4,
+    kPageFieldNumber = 5,
+  };
+
+  using FieldMetadata_Pfn =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MmFilemapAddToPageCacheFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pfn kPfn() { return {}; }
+  void set_pfn(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pfn::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_IIno =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MmFilemapAddToPageCacheFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IIno kIIno() { return {}; }
+  void set_i_ino(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_IIno::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Index =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MmFilemapAddToPageCacheFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Index kIndex() { return {}; }
+  void set_index(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Index::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SDev =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MmFilemapAddToPageCacheFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SDev kSDev() { return {}; }
+  void set_s_dev(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SDev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Page =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MmFilemapAddToPageCacheFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Page kPage() { return {}; }
+  void set_page(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Page::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/ftrace/ftrace.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_FTRACE_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_FTRACE_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class PrintFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  PrintFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit PrintFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit PrintFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_ip() const { return at<1>().valid(); }
+  uint64_t ip() const { return at<1>().as_uint64(); }
+  bool has_buf() const { return at<2>().valid(); }
+  ::protozero::ConstChars buf() const { return at<2>().as_string(); }
+};
+
+class PrintFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = PrintFtraceEvent_Decoder;
+  enum : int32_t {
+    kIpFieldNumber = 1,
+    kBufFieldNumber = 2,
+  };
+
+  using FieldMetadata_Ip =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      PrintFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ip kIp() { return {}; }
+  void set_ip(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ip::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Buf =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      PrintFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Buf kBuf() { return {}; }
+  void set_buf(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Buf::kFieldId, data, size);
+  }
+  void set_buf(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Buf::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/ftrace/g2d.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_G2D_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_G2D_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class G2dTracingMarkWriteFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/6, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  G2dTracingMarkWriteFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit G2dTracingMarkWriteFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit G2dTracingMarkWriteFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_pid() const { return at<1>().valid(); }
+  int32_t pid() const { return at<1>().as_int32(); }
+  bool has_name() const { return at<4>().valid(); }
+  ::protozero::ConstChars name() const { return at<4>().as_string(); }
+  bool has_type() const { return at<5>().valid(); }
+  uint32_t type() const { return at<5>().as_uint32(); }
+  bool has_value() const { return at<6>().valid(); }
+  int32_t value() const { return at<6>().as_int32(); }
+};
+
+class G2dTracingMarkWriteFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = G2dTracingMarkWriteFtraceEvent_Decoder;
+  enum : int32_t {
+    kPidFieldNumber = 1,
+    kNameFieldNumber = 4,
+    kTypeFieldNumber = 5,
+    kValueFieldNumber = 6,
+  };
+
+  using FieldMetadata_Pid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      G2dTracingMarkWriteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pid kPid() { return {}; }
+  void set_pid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      G2dTracingMarkWriteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Type =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      G2dTracingMarkWriteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Type kType() { return {}; }
+  void set_type(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Type::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Value =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      G2dTracingMarkWriteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Value kValue() { return {}; }
+  void set_value(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Value::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/ftrace/gpu_mem.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_GPU_MEM_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_GPU_MEM_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class GpuMemTotalFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  GpuMemTotalFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit GpuMemTotalFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit GpuMemTotalFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_gpu_id() const { return at<1>().valid(); }
+  uint32_t gpu_id() const { return at<1>().as_uint32(); }
+  bool has_pid() const { return at<2>().valid(); }
+  uint32_t pid() const { return at<2>().as_uint32(); }
+  bool has_size() const { return at<3>().valid(); }
+  uint64_t size() const { return at<3>().as_uint64(); }
+};
+
+class GpuMemTotalFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = GpuMemTotalFtraceEvent_Decoder;
+  enum : int32_t {
+    kGpuIdFieldNumber = 1,
+    kPidFieldNumber = 2,
+    kSizeFieldNumber = 3,
+  };
+
+  using FieldMetadata_GpuId =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      GpuMemTotalFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_GpuId kGpuId() { return {}; }
+  void set_gpu_id(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_GpuId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pid =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      GpuMemTotalFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pid kPid() { return {}; }
+  void set_pid(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Size =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      GpuMemTotalFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Size kSize() { return {}; }
+  void set_size(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Size::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/ftrace/i2c.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_I2C_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_I2C_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class SmbusReplyFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/6, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  SmbusReplyFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit SmbusReplyFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit SmbusReplyFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_adapter_nr() const { return at<1>().valid(); }
+  int32_t adapter_nr() const { return at<1>().as_int32(); }
+  bool has_addr() const { return at<2>().valid(); }
+  uint32_t addr() const { return at<2>().as_uint32(); }
+  bool has_flags() const { return at<3>().valid(); }
+  uint32_t flags() const { return at<3>().as_uint32(); }
+  bool has_command() const { return at<4>().valid(); }
+  uint32_t command() const { return at<4>().as_uint32(); }
+  bool has_len() const { return at<5>().valid(); }
+  uint32_t len() const { return at<5>().as_uint32(); }
+  bool has_protocol() const { return at<6>().valid(); }
+  uint32_t protocol() const { return at<6>().as_uint32(); }
+};
+
+class SmbusReplyFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = SmbusReplyFtraceEvent_Decoder;
+  enum : int32_t {
+    kAdapterNrFieldNumber = 1,
+    kAddrFieldNumber = 2,
+    kFlagsFieldNumber = 3,
+    kCommandFieldNumber = 4,
+    kLenFieldNumber = 5,
+    kProtocolFieldNumber = 6,
+  };
+
+  using FieldMetadata_AdapterNr =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SmbusReplyFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AdapterNr kAdapterNr() { return {}; }
+  void set_adapter_nr(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_AdapterNr::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Addr =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      SmbusReplyFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Addr kAddr() { return {}; }
+  void set_addr(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Addr::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Flags =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      SmbusReplyFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Flags kFlags() { return {}; }
+  void set_flags(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Flags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Command =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      SmbusReplyFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Command kCommand() { return {}; }
+  void set_command(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Command::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      SmbusReplyFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Protocol =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      SmbusReplyFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Protocol kProtocol() { return {}; }
+  void set_protocol(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Protocol::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class SmbusResultFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/7, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  SmbusResultFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit SmbusResultFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit SmbusResultFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_adapter_nr() const { return at<1>().valid(); }
+  int32_t adapter_nr() const { return at<1>().as_int32(); }
+  bool has_addr() const { return at<2>().valid(); }
+  uint32_t addr() const { return at<2>().as_uint32(); }
+  bool has_flags() const { return at<3>().valid(); }
+  uint32_t flags() const { return at<3>().as_uint32(); }
+  bool has_read_write() const { return at<4>().valid(); }
+  uint32_t read_write() const { return at<4>().as_uint32(); }
+  bool has_command() const { return at<5>().valid(); }
+  uint32_t command() const { return at<5>().as_uint32(); }
+  bool has_res() const { return at<6>().valid(); }
+  int32_t res() const { return at<6>().as_int32(); }
+  bool has_protocol() const { return at<7>().valid(); }
+  uint32_t protocol() const { return at<7>().as_uint32(); }
+};
+
+class SmbusResultFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = SmbusResultFtraceEvent_Decoder;
+  enum : int32_t {
+    kAdapterNrFieldNumber = 1,
+    kAddrFieldNumber = 2,
+    kFlagsFieldNumber = 3,
+    kReadWriteFieldNumber = 4,
+    kCommandFieldNumber = 5,
+    kResFieldNumber = 6,
+    kProtocolFieldNumber = 7,
+  };
+
+  using FieldMetadata_AdapterNr =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SmbusResultFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AdapterNr kAdapterNr() { return {}; }
+  void set_adapter_nr(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_AdapterNr::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Addr =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      SmbusResultFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Addr kAddr() { return {}; }
+  void set_addr(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Addr::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Flags =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      SmbusResultFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Flags kFlags() { return {}; }
+  void set_flags(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Flags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ReadWrite =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      SmbusResultFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ReadWrite kReadWrite() { return {}; }
+  void set_read_write(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ReadWrite::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Command =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      SmbusResultFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Command kCommand() { return {}; }
+  void set_command(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Command::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Res =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SmbusResultFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Res kRes() { return {}; }
+  void set_res(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Res::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Protocol =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      SmbusResultFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Protocol kProtocol() { return {}; }
+  void set_protocol(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Protocol::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class SmbusWriteFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/6, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  SmbusWriteFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit SmbusWriteFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit SmbusWriteFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_adapter_nr() const { return at<1>().valid(); }
+  int32_t adapter_nr() const { return at<1>().as_int32(); }
+  bool has_addr() const { return at<2>().valid(); }
+  uint32_t addr() const { return at<2>().as_uint32(); }
+  bool has_flags() const { return at<3>().valid(); }
+  uint32_t flags() const { return at<3>().as_uint32(); }
+  bool has_command() const { return at<4>().valid(); }
+  uint32_t command() const { return at<4>().as_uint32(); }
+  bool has_len() const { return at<5>().valid(); }
+  uint32_t len() const { return at<5>().as_uint32(); }
+  bool has_protocol() const { return at<6>().valid(); }
+  uint32_t protocol() const { return at<6>().as_uint32(); }
+};
+
+class SmbusWriteFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = SmbusWriteFtraceEvent_Decoder;
+  enum : int32_t {
+    kAdapterNrFieldNumber = 1,
+    kAddrFieldNumber = 2,
+    kFlagsFieldNumber = 3,
+    kCommandFieldNumber = 4,
+    kLenFieldNumber = 5,
+    kProtocolFieldNumber = 6,
+  };
+
+  using FieldMetadata_AdapterNr =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SmbusWriteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AdapterNr kAdapterNr() { return {}; }
+  void set_adapter_nr(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_AdapterNr::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Addr =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      SmbusWriteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Addr kAddr() { return {}; }
+  void set_addr(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Addr::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Flags =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      SmbusWriteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Flags kFlags() { return {}; }
+  void set_flags(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Flags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Command =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      SmbusWriteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Command kCommand() { return {}; }
+  void set_command(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Command::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      SmbusWriteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Protocol =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      SmbusWriteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Protocol kProtocol() { return {}; }
+  void set_protocol(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Protocol::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class SmbusReadFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  SmbusReadFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit SmbusReadFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit SmbusReadFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_adapter_nr() const { return at<1>().valid(); }
+  int32_t adapter_nr() const { return at<1>().as_int32(); }
+  bool has_flags() const { return at<2>().valid(); }
+  uint32_t flags() const { return at<2>().as_uint32(); }
+  bool has_addr() const { return at<3>().valid(); }
+  uint32_t addr() const { return at<3>().as_uint32(); }
+  bool has_command() const { return at<4>().valid(); }
+  uint32_t command() const { return at<4>().as_uint32(); }
+  bool has_protocol() const { return at<5>().valid(); }
+  uint32_t protocol() const { return at<5>().as_uint32(); }
+};
+
+class SmbusReadFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = SmbusReadFtraceEvent_Decoder;
+  enum : int32_t {
+    kAdapterNrFieldNumber = 1,
+    kFlagsFieldNumber = 2,
+    kAddrFieldNumber = 3,
+    kCommandFieldNumber = 4,
+    kProtocolFieldNumber = 5,
+  };
+
+  using FieldMetadata_AdapterNr =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SmbusReadFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AdapterNr kAdapterNr() { return {}; }
+  void set_adapter_nr(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_AdapterNr::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Flags =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      SmbusReadFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Flags kFlags() { return {}; }
+  void set_flags(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Flags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Addr =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      SmbusReadFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Addr kAddr() { return {}; }
+  void set_addr(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Addr::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Command =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      SmbusReadFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Command kCommand() { return {}; }
+  void set_command(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Command::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Protocol =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      SmbusReadFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Protocol kProtocol() { return {}; }
+  void set_protocol(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Protocol::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class I2cReplyFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/6, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  I2cReplyFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit I2cReplyFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit I2cReplyFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_adapter_nr() const { return at<1>().valid(); }
+  int32_t adapter_nr() const { return at<1>().as_int32(); }
+  bool has_msg_nr() const { return at<2>().valid(); }
+  uint32_t msg_nr() const { return at<2>().as_uint32(); }
+  bool has_addr() const { return at<3>().valid(); }
+  uint32_t addr() const { return at<3>().as_uint32(); }
+  bool has_flags() const { return at<4>().valid(); }
+  uint32_t flags() const { return at<4>().as_uint32(); }
+  bool has_len() const { return at<5>().valid(); }
+  uint32_t len() const { return at<5>().as_uint32(); }
+  bool has_buf() const { return at<6>().valid(); }
+  uint32_t buf() const { return at<6>().as_uint32(); }
+};
+
+class I2cReplyFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = I2cReplyFtraceEvent_Decoder;
+  enum : int32_t {
+    kAdapterNrFieldNumber = 1,
+    kMsgNrFieldNumber = 2,
+    kAddrFieldNumber = 3,
+    kFlagsFieldNumber = 4,
+    kLenFieldNumber = 5,
+    kBufFieldNumber = 6,
+  };
+
+  using FieldMetadata_AdapterNr =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      I2cReplyFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AdapterNr kAdapterNr() { return {}; }
+  void set_adapter_nr(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_AdapterNr::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_MsgNr =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      I2cReplyFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MsgNr kMsgNr() { return {}; }
+  void set_msg_nr(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_MsgNr::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Addr =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      I2cReplyFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Addr kAddr() { return {}; }
+  void set_addr(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Addr::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Flags =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      I2cReplyFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Flags kFlags() { return {}; }
+  void set_flags(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Flags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      I2cReplyFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Buf =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      I2cReplyFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Buf kBuf() { return {}; }
+  void set_buf(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Buf::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class I2cResultFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  I2cResultFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit I2cResultFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit I2cResultFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_adapter_nr() const { return at<1>().valid(); }
+  int32_t adapter_nr() const { return at<1>().as_int32(); }
+  bool has_nr_msgs() const { return at<2>().valid(); }
+  uint32_t nr_msgs() const { return at<2>().as_uint32(); }
+  bool has_ret() const { return at<3>().valid(); }
+  int32_t ret() const { return at<3>().as_int32(); }
+};
+
+class I2cResultFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = I2cResultFtraceEvent_Decoder;
+  enum : int32_t {
+    kAdapterNrFieldNumber = 1,
+    kNrMsgsFieldNumber = 2,
+    kRetFieldNumber = 3,
+  };
+
+  using FieldMetadata_AdapterNr =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      I2cResultFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AdapterNr kAdapterNr() { return {}; }
+  void set_adapter_nr(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_AdapterNr::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NrMsgs =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      I2cResultFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NrMsgs kNrMsgs() { return {}; }
+  void set_nr_msgs(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NrMsgs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ret =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      I2cResultFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ret kRet() { return {}; }
+  void set_ret(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ret::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class I2cWriteFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/6, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  I2cWriteFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit I2cWriteFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit I2cWriteFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_adapter_nr() const { return at<1>().valid(); }
+  int32_t adapter_nr() const { return at<1>().as_int32(); }
+  bool has_msg_nr() const { return at<2>().valid(); }
+  uint32_t msg_nr() const { return at<2>().as_uint32(); }
+  bool has_addr() const { return at<3>().valid(); }
+  uint32_t addr() const { return at<3>().as_uint32(); }
+  bool has_flags() const { return at<4>().valid(); }
+  uint32_t flags() const { return at<4>().as_uint32(); }
+  bool has_len() const { return at<5>().valid(); }
+  uint32_t len() const { return at<5>().as_uint32(); }
+  bool has_buf() const { return at<6>().valid(); }
+  uint32_t buf() const { return at<6>().as_uint32(); }
+};
+
+class I2cWriteFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = I2cWriteFtraceEvent_Decoder;
+  enum : int32_t {
+    kAdapterNrFieldNumber = 1,
+    kMsgNrFieldNumber = 2,
+    kAddrFieldNumber = 3,
+    kFlagsFieldNumber = 4,
+    kLenFieldNumber = 5,
+    kBufFieldNumber = 6,
+  };
+
+  using FieldMetadata_AdapterNr =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      I2cWriteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AdapterNr kAdapterNr() { return {}; }
+  void set_adapter_nr(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_AdapterNr::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_MsgNr =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      I2cWriteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MsgNr kMsgNr() { return {}; }
+  void set_msg_nr(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_MsgNr::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Addr =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      I2cWriteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Addr kAddr() { return {}; }
+  void set_addr(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Addr::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Flags =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      I2cWriteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Flags kFlags() { return {}; }
+  void set_flags(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Flags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      I2cWriteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Buf =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      I2cWriteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Buf kBuf() { return {}; }
+  void set_buf(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Buf::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class I2cReadFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  I2cReadFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit I2cReadFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit I2cReadFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_adapter_nr() const { return at<1>().valid(); }
+  int32_t adapter_nr() const { return at<1>().as_int32(); }
+  bool has_msg_nr() const { return at<2>().valid(); }
+  uint32_t msg_nr() const { return at<2>().as_uint32(); }
+  bool has_addr() const { return at<3>().valid(); }
+  uint32_t addr() const { return at<3>().as_uint32(); }
+  bool has_flags() const { return at<4>().valid(); }
+  uint32_t flags() const { return at<4>().as_uint32(); }
+  bool has_len() const { return at<5>().valid(); }
+  uint32_t len() const { return at<5>().as_uint32(); }
+};
+
+class I2cReadFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = I2cReadFtraceEvent_Decoder;
+  enum : int32_t {
+    kAdapterNrFieldNumber = 1,
+    kMsgNrFieldNumber = 2,
+    kAddrFieldNumber = 3,
+    kFlagsFieldNumber = 4,
+    kLenFieldNumber = 5,
+  };
+
+  using FieldMetadata_AdapterNr =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      I2cReadFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AdapterNr kAdapterNr() { return {}; }
+  void set_adapter_nr(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_AdapterNr::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_MsgNr =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      I2cReadFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MsgNr kMsgNr() { return {}; }
+  void set_msg_nr(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_MsgNr::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Addr =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      I2cReadFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Addr kAddr() { return {}; }
+  void set_addr(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Addr::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Flags =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      I2cReadFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Flags kFlags() { return {}; }
+  void set_flags(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Flags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      I2cReadFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/ftrace/ion.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_ION_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_ION_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class IonStatFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  IonStatFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit IonStatFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit IonStatFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_buffer_id() const { return at<1>().valid(); }
+  uint32_t buffer_id() const { return at<1>().as_uint32(); }
+  bool has_len() const { return at<2>().valid(); }
+  int64_t len() const { return at<2>().as_int64(); }
+  bool has_total_allocated() const { return at<3>().valid(); }
+  uint64_t total_allocated() const { return at<3>().as_uint64(); }
+};
+
+class IonStatFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = IonStatFtraceEvent_Decoder;
+  enum : int32_t {
+    kBufferIdFieldNumber = 1,
+    kLenFieldNumber = 2,
+    kTotalAllocatedFieldNumber = 3,
+  };
+
+  using FieldMetadata_BufferId =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      IonStatFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BufferId kBufferId() { return {}; }
+  void set_buffer_id(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_BufferId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      IonStatFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TotalAllocated =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      IonStatFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TotalAllocated kTotalAllocated() { return {}; }
+  void set_total_allocated(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TotalAllocated::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/ftrace/ipi.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_IPI_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_IPI_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class IpiRaiseFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  IpiRaiseFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit IpiRaiseFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit IpiRaiseFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_target_cpus() const { return at<1>().valid(); }
+  uint32_t target_cpus() const { return at<1>().as_uint32(); }
+  bool has_reason() const { return at<2>().valid(); }
+  ::protozero::ConstChars reason() const { return at<2>().as_string(); }
+};
+
+class IpiRaiseFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = IpiRaiseFtraceEvent_Decoder;
+  enum : int32_t {
+    kTargetCpusFieldNumber = 1,
+    kReasonFieldNumber = 2,
+  };
+
+  using FieldMetadata_TargetCpus =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      IpiRaiseFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TargetCpus kTargetCpus() { return {}; }
+  void set_target_cpus(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TargetCpus::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Reason =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      IpiRaiseFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Reason kReason() { return {}; }
+  void set_reason(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Reason::kFieldId, data, size);
+  }
+  void set_reason(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Reason::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class IpiExitFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  IpiExitFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit IpiExitFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit IpiExitFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_reason() const { return at<1>().valid(); }
+  ::protozero::ConstChars reason() const { return at<1>().as_string(); }
+};
+
+class IpiExitFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = IpiExitFtraceEvent_Decoder;
+  enum : int32_t {
+    kReasonFieldNumber = 1,
+  };
+
+  using FieldMetadata_Reason =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      IpiExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Reason kReason() { return {}; }
+  void set_reason(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Reason::kFieldId, data, size);
+  }
+  void set_reason(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Reason::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class IpiEntryFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  IpiEntryFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit IpiEntryFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit IpiEntryFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_reason() const { return at<1>().valid(); }
+  ::protozero::ConstChars reason() const { return at<1>().as_string(); }
+};
+
+class IpiEntryFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = IpiEntryFtraceEvent_Decoder;
+  enum : int32_t {
+    kReasonFieldNumber = 1,
+  };
+
+  using FieldMetadata_Reason =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      IpiEntryFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Reason kReason() { return {}; }
+  void set_reason(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Reason::kFieldId, data, size);
+  }
+  void set_reason(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Reason::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/ftrace/irq.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_IRQ_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_IRQ_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class IrqHandlerExitFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  IrqHandlerExitFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit IrqHandlerExitFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit IrqHandlerExitFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_irq() const { return at<1>().valid(); }
+  int32_t irq() const { return at<1>().as_int32(); }
+  bool has_ret() const { return at<2>().valid(); }
+  int32_t ret() const { return at<2>().as_int32(); }
+};
+
+class IrqHandlerExitFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = IrqHandlerExitFtraceEvent_Decoder;
+  enum : int32_t {
+    kIrqFieldNumber = 1,
+    kRetFieldNumber = 2,
+  };
+
+  using FieldMetadata_Irq =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      IrqHandlerExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Irq kIrq() { return {}; }
+  void set_irq(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Irq::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ret =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      IrqHandlerExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ret kRet() { return {}; }
+  void set_ret(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ret::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class IrqHandlerEntryFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  IrqHandlerEntryFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit IrqHandlerEntryFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit IrqHandlerEntryFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_irq() const { return at<1>().valid(); }
+  int32_t irq() const { return at<1>().as_int32(); }
+  bool has_name() const { return at<2>().valid(); }
+  ::protozero::ConstChars name() const { return at<2>().as_string(); }
+  bool has_handler() const { return at<3>().valid(); }
+  uint32_t handler() const { return at<3>().as_uint32(); }
+};
+
+class IrqHandlerEntryFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = IrqHandlerEntryFtraceEvent_Decoder;
+  enum : int32_t {
+    kIrqFieldNumber = 1,
+    kNameFieldNumber = 2,
+    kHandlerFieldNumber = 3,
+  };
+
+  using FieldMetadata_Irq =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      IrqHandlerEntryFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Irq kIrq() { return {}; }
+  void set_irq(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Irq::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      IrqHandlerEntryFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Handler =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      IrqHandlerEntryFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Handler kHandler() { return {}; }
+  void set_handler(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Handler::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class SoftirqRaiseFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  SoftirqRaiseFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit SoftirqRaiseFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit SoftirqRaiseFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_vec() const { return at<1>().valid(); }
+  uint32_t vec() const { return at<1>().as_uint32(); }
+};
+
+class SoftirqRaiseFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = SoftirqRaiseFtraceEvent_Decoder;
+  enum : int32_t {
+    kVecFieldNumber = 1,
+  };
+
+  using FieldMetadata_Vec =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      SoftirqRaiseFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Vec kVec() { return {}; }
+  void set_vec(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Vec::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class SoftirqExitFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  SoftirqExitFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit SoftirqExitFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit SoftirqExitFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_vec() const { return at<1>().valid(); }
+  uint32_t vec() const { return at<1>().as_uint32(); }
+};
+
+class SoftirqExitFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = SoftirqExitFtraceEvent_Decoder;
+  enum : int32_t {
+    kVecFieldNumber = 1,
+  };
+
+  using FieldMetadata_Vec =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      SoftirqExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Vec kVec() { return {}; }
+  void set_vec(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Vec::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class SoftirqEntryFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  SoftirqEntryFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit SoftirqEntryFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit SoftirqEntryFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_vec() const { return at<1>().valid(); }
+  uint32_t vec() const { return at<1>().as_uint32(); }
+};
+
+class SoftirqEntryFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = SoftirqEntryFtraceEvent_Decoder;
+  enum : int32_t {
+    kVecFieldNumber = 1,
+  };
+
+  using FieldMetadata_Vec =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      SoftirqEntryFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Vec kVec() { return {}; }
+  void set_vec(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Vec::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/ftrace/kmem.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_KMEM_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_KMEM_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class IonBufferDestroyFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  IonBufferDestroyFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit IonBufferDestroyFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit IonBufferDestroyFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_addr() const { return at<1>().valid(); }
+  uint64_t addr() const { return at<1>().as_uint64(); }
+  bool has_len() const { return at<2>().valid(); }
+  uint64_t len() const { return at<2>().as_uint64(); }
+};
+
+class IonBufferDestroyFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = IonBufferDestroyFtraceEvent_Decoder;
+  enum : int32_t {
+    kAddrFieldNumber = 1,
+    kLenFieldNumber = 2,
+  };
+
+  using FieldMetadata_Addr =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      IonBufferDestroyFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Addr kAddr() { return {}; }
+  void set_addr(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Addr::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      IonBufferDestroyFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class IonBufferCreateFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  IonBufferCreateFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit IonBufferCreateFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit IonBufferCreateFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_addr() const { return at<1>().valid(); }
+  uint64_t addr() const { return at<1>().as_uint64(); }
+  bool has_len() const { return at<2>().valid(); }
+  uint64_t len() const { return at<2>().as_uint64(); }
+};
+
+class IonBufferCreateFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = IonBufferCreateFtraceEvent_Decoder;
+  enum : int32_t {
+    kAddrFieldNumber = 1,
+    kLenFieldNumber = 2,
+  };
+
+  using FieldMetadata_Addr =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      IonBufferCreateFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Addr kAddr() { return {}; }
+  void set_addr(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Addr::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      IonBufferCreateFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class IonHeapGrowFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  IonHeapGrowFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit IonHeapGrowFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit IonHeapGrowFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_heap_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars heap_name() const { return at<1>().as_string(); }
+  bool has_len() const { return at<2>().valid(); }
+  uint64_t len() const { return at<2>().as_uint64(); }
+  bool has_total_allocated() const { return at<3>().valid(); }
+  int64_t total_allocated() const { return at<3>().as_int64(); }
+};
+
+class IonHeapGrowFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = IonHeapGrowFtraceEvent_Decoder;
+  enum : int32_t {
+    kHeapNameFieldNumber = 1,
+    kLenFieldNumber = 2,
+    kTotalAllocatedFieldNumber = 3,
+  };
+
+  using FieldMetadata_HeapName =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      IonHeapGrowFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_HeapName kHeapName() { return {}; }
+  void set_heap_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_HeapName::kFieldId, data, size);
+  }
+  void set_heap_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_HeapName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      IonHeapGrowFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TotalAllocated =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      IonHeapGrowFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TotalAllocated kTotalAllocated() { return {}; }
+  void set_total_allocated(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TotalAllocated::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class IonHeapShrinkFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  IonHeapShrinkFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit IonHeapShrinkFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit IonHeapShrinkFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_heap_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars heap_name() const { return at<1>().as_string(); }
+  bool has_len() const { return at<2>().valid(); }
+  uint64_t len() const { return at<2>().as_uint64(); }
+  bool has_total_allocated() const { return at<3>().valid(); }
+  int64_t total_allocated() const { return at<3>().as_int64(); }
+};
+
+class IonHeapShrinkFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = IonHeapShrinkFtraceEvent_Decoder;
+  enum : int32_t {
+    kHeapNameFieldNumber = 1,
+    kLenFieldNumber = 2,
+    kTotalAllocatedFieldNumber = 3,
+  };
+
+  using FieldMetadata_HeapName =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      IonHeapShrinkFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_HeapName kHeapName() { return {}; }
+  void set_heap_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_HeapName::kFieldId, data, size);
+  }
+  void set_heap_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_HeapName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      IonHeapShrinkFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TotalAllocated =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      IonHeapShrinkFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TotalAllocated kTotalAllocated() { return {}; }
+  void set_total_allocated(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TotalAllocated::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class RssStatFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  RssStatFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit RssStatFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit RssStatFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_member() const { return at<1>().valid(); }
+  int32_t member() const { return at<1>().as_int32(); }
+  bool has_size() const { return at<2>().valid(); }
+  int64_t size() const { return at<2>().as_int64(); }
+  bool has_curr() const { return at<3>().valid(); }
+  uint32_t curr() const { return at<3>().as_uint32(); }
+  bool has_mm_id() const { return at<4>().valid(); }
+  uint32_t mm_id() const { return at<4>().as_uint32(); }
+};
+
+class RssStatFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = RssStatFtraceEvent_Decoder;
+  enum : int32_t {
+    kMemberFieldNumber = 1,
+    kSizeFieldNumber = 2,
+    kCurrFieldNumber = 3,
+    kMmIdFieldNumber = 4,
+  };
+
+  using FieldMetadata_Member =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      RssStatFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Member kMember() { return {}; }
+  void set_member(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Member::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Size =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      RssStatFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Size kSize() { return {}; }
+  void set_size(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Size::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Curr =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      RssStatFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Curr kCurr() { return {}; }
+  void set_curr(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Curr::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_MmId =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      RssStatFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MmId kMmId() { return {}; }
+  void set_mm_id(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_MmId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class MmPagePcpuDrainFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  MmPagePcpuDrainFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MmPagePcpuDrainFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MmPagePcpuDrainFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_migratetype() const { return at<1>().valid(); }
+  int32_t migratetype() const { return at<1>().as_int32(); }
+  bool has_order() const { return at<2>().valid(); }
+  uint32_t order() const { return at<2>().as_uint32(); }
+  bool has_page() const { return at<3>().valid(); }
+  uint64_t page() const { return at<3>().as_uint64(); }
+  bool has_pfn() const { return at<4>().valid(); }
+  uint64_t pfn() const { return at<4>().as_uint64(); }
+};
+
+class MmPagePcpuDrainFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = MmPagePcpuDrainFtraceEvent_Decoder;
+  enum : int32_t {
+    kMigratetypeFieldNumber = 1,
+    kOrderFieldNumber = 2,
+    kPageFieldNumber = 3,
+    kPfnFieldNumber = 4,
+  };
+
+  using FieldMetadata_Migratetype =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      MmPagePcpuDrainFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Migratetype kMigratetype() { return {}; }
+  void set_migratetype(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Migratetype::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Order =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MmPagePcpuDrainFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Order kOrder() { return {}; }
+  void set_order(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Order::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Page =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MmPagePcpuDrainFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Page kPage() { return {}; }
+  void set_page(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Page::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pfn =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MmPagePcpuDrainFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pfn kPfn() { return {}; }
+  void set_pfn(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pfn::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class MmPageFreeBatchedFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  MmPageFreeBatchedFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MmPageFreeBatchedFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MmPageFreeBatchedFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_cold() const { return at<1>().valid(); }
+  int32_t cold() const { return at<1>().as_int32(); }
+  bool has_page() const { return at<2>().valid(); }
+  uint64_t page() const { return at<2>().as_uint64(); }
+  bool has_pfn() const { return at<3>().valid(); }
+  uint64_t pfn() const { return at<3>().as_uint64(); }
+};
+
+class MmPageFreeBatchedFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = MmPageFreeBatchedFtraceEvent_Decoder;
+  enum : int32_t {
+    kColdFieldNumber = 1,
+    kPageFieldNumber = 2,
+    kPfnFieldNumber = 3,
+  };
+
+  using FieldMetadata_Cold =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      MmPageFreeBatchedFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Cold kCold() { return {}; }
+  void set_cold(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Cold::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Page =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MmPageFreeBatchedFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Page kPage() { return {}; }
+  void set_page(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Page::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pfn =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MmPageFreeBatchedFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pfn kPfn() { return {}; }
+  void set_pfn(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pfn::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class MmPageFreeFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  MmPageFreeFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MmPageFreeFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MmPageFreeFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_order() const { return at<1>().valid(); }
+  uint32_t order() const { return at<1>().as_uint32(); }
+  bool has_page() const { return at<2>().valid(); }
+  uint64_t page() const { return at<2>().as_uint64(); }
+  bool has_pfn() const { return at<3>().valid(); }
+  uint64_t pfn() const { return at<3>().as_uint64(); }
+};
+
+class MmPageFreeFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = MmPageFreeFtraceEvent_Decoder;
+  enum : int32_t {
+    kOrderFieldNumber = 1,
+    kPageFieldNumber = 2,
+    kPfnFieldNumber = 3,
+  };
+
+  using FieldMetadata_Order =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MmPageFreeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Order kOrder() { return {}; }
+  void set_order(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Order::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Page =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MmPageFreeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Page kPage() { return {}; }
+  void set_page(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Page::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pfn =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MmPageFreeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pfn kPfn() { return {}; }
+  void set_pfn(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pfn::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class MmPageAllocZoneLockedFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  MmPageAllocZoneLockedFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MmPageAllocZoneLockedFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MmPageAllocZoneLockedFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_migratetype() const { return at<1>().valid(); }
+  int32_t migratetype() const { return at<1>().as_int32(); }
+  bool has_order() const { return at<2>().valid(); }
+  uint32_t order() const { return at<2>().as_uint32(); }
+  bool has_page() const { return at<3>().valid(); }
+  uint64_t page() const { return at<3>().as_uint64(); }
+  bool has_pfn() const { return at<4>().valid(); }
+  uint64_t pfn() const { return at<4>().as_uint64(); }
+};
+
+class MmPageAllocZoneLockedFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = MmPageAllocZoneLockedFtraceEvent_Decoder;
+  enum : int32_t {
+    kMigratetypeFieldNumber = 1,
+    kOrderFieldNumber = 2,
+    kPageFieldNumber = 3,
+    kPfnFieldNumber = 4,
+  };
+
+  using FieldMetadata_Migratetype =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      MmPageAllocZoneLockedFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Migratetype kMigratetype() { return {}; }
+  void set_migratetype(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Migratetype::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Order =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MmPageAllocZoneLockedFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Order kOrder() { return {}; }
+  void set_order(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Order::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Page =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MmPageAllocZoneLockedFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Page kPage() { return {}; }
+  void set_page(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Page::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pfn =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MmPageAllocZoneLockedFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pfn kPfn() { return {}; }
+  void set_pfn(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pfn::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class MmPageAllocExtfragFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/7, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  MmPageAllocExtfragFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MmPageAllocExtfragFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MmPageAllocExtfragFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_alloc_migratetype() const { return at<1>().valid(); }
+  int32_t alloc_migratetype() const { return at<1>().as_int32(); }
+  bool has_alloc_order() const { return at<2>().valid(); }
+  int32_t alloc_order() const { return at<2>().as_int32(); }
+  bool has_fallback_migratetype() const { return at<3>().valid(); }
+  int32_t fallback_migratetype() const { return at<3>().as_int32(); }
+  bool has_fallback_order() const { return at<4>().valid(); }
+  int32_t fallback_order() const { return at<4>().as_int32(); }
+  bool has_page() const { return at<5>().valid(); }
+  uint64_t page() const { return at<5>().as_uint64(); }
+  bool has_change_ownership() const { return at<6>().valid(); }
+  int32_t change_ownership() const { return at<6>().as_int32(); }
+  bool has_pfn() const { return at<7>().valid(); }
+  uint64_t pfn() const { return at<7>().as_uint64(); }
+};
+
+class MmPageAllocExtfragFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = MmPageAllocExtfragFtraceEvent_Decoder;
+  enum : int32_t {
+    kAllocMigratetypeFieldNumber = 1,
+    kAllocOrderFieldNumber = 2,
+    kFallbackMigratetypeFieldNumber = 3,
+    kFallbackOrderFieldNumber = 4,
+    kPageFieldNumber = 5,
+    kChangeOwnershipFieldNumber = 6,
+    kPfnFieldNumber = 7,
+  };
+
+  using FieldMetadata_AllocMigratetype =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      MmPageAllocExtfragFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AllocMigratetype kAllocMigratetype() { return {}; }
+  void set_alloc_migratetype(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_AllocMigratetype::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_AllocOrder =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      MmPageAllocExtfragFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AllocOrder kAllocOrder() { return {}; }
+  void set_alloc_order(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_AllocOrder::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FallbackMigratetype =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      MmPageAllocExtfragFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FallbackMigratetype kFallbackMigratetype() { return {}; }
+  void set_fallback_migratetype(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_FallbackMigratetype::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FallbackOrder =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      MmPageAllocExtfragFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FallbackOrder kFallbackOrder() { return {}; }
+  void set_fallback_order(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_FallbackOrder::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Page =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MmPageAllocExtfragFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Page kPage() { return {}; }
+  void set_page(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Page::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ChangeOwnership =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      MmPageAllocExtfragFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChangeOwnership kChangeOwnership() { return {}; }
+  void set_change_ownership(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ChangeOwnership::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pfn =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MmPageAllocExtfragFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pfn kPfn() { return {}; }
+  void set_pfn(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pfn::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class MmPageAllocFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  MmPageAllocFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MmPageAllocFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MmPageAllocFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_gfp_flags() const { return at<1>().valid(); }
+  uint32_t gfp_flags() const { return at<1>().as_uint32(); }
+  bool has_migratetype() const { return at<2>().valid(); }
+  int32_t migratetype() const { return at<2>().as_int32(); }
+  bool has_order() const { return at<3>().valid(); }
+  uint32_t order() const { return at<3>().as_uint32(); }
+  bool has_page() const { return at<4>().valid(); }
+  uint64_t page() const { return at<4>().as_uint64(); }
+  bool has_pfn() const { return at<5>().valid(); }
+  uint64_t pfn() const { return at<5>().as_uint64(); }
+};
+
+class MmPageAllocFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = MmPageAllocFtraceEvent_Decoder;
+  enum : int32_t {
+    kGfpFlagsFieldNumber = 1,
+    kMigratetypeFieldNumber = 2,
+    kOrderFieldNumber = 3,
+    kPageFieldNumber = 4,
+    kPfnFieldNumber = 5,
+  };
+
+  using FieldMetadata_GfpFlags =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MmPageAllocFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_GfpFlags kGfpFlags() { return {}; }
+  void set_gfp_flags(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_GfpFlags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Migratetype =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      MmPageAllocFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Migratetype kMigratetype() { return {}; }
+  void set_migratetype(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Migratetype::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Order =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MmPageAllocFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Order kOrder() { return {}; }
+  void set_order(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Order::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Page =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MmPageAllocFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Page kPage() { return {}; }
+  void set_page(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Page::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pfn =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MmPageAllocFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pfn kPfn() { return {}; }
+  void set_pfn(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pfn::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class MigrateRetryFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  MigrateRetryFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MigrateRetryFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MigrateRetryFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_tries() const { return at<1>().valid(); }
+  int32_t tries() const { return at<1>().as_int32(); }
+};
+
+class MigrateRetryFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = MigrateRetryFtraceEvent_Decoder;
+  enum : int32_t {
+    kTriesFieldNumber = 1,
+  };
+
+  using FieldMetadata_Tries =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      MigrateRetryFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Tries kTries() { return {}; }
+  void set_tries(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Tries::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class MigratePagesStartFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  MigratePagesStartFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MigratePagesStartFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MigratePagesStartFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_mode() const { return at<1>().valid(); }
+  int32_t mode() const { return at<1>().as_int32(); }
+};
+
+class MigratePagesStartFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = MigratePagesStartFtraceEvent_Decoder;
+  enum : int32_t {
+    kModeFieldNumber = 1,
+  };
+
+  using FieldMetadata_Mode =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      MigratePagesStartFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Mode kMode() { return {}; }
+  void set_mode(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Mode::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class MigratePagesEndFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  MigratePagesEndFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MigratePagesEndFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MigratePagesEndFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_mode() const { return at<1>().valid(); }
+  int32_t mode() const { return at<1>().as_int32(); }
+};
+
+class MigratePagesEndFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = MigratePagesEndFtraceEvent_Decoder;
+  enum : int32_t {
+    kModeFieldNumber = 1,
+  };
+
+  using FieldMetadata_Mode =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      MigratePagesEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Mode kMode() { return {}; }
+  void set_mode(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Mode::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class KmemCacheFreeFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  KmemCacheFreeFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit KmemCacheFreeFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit KmemCacheFreeFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_call_site() const { return at<1>().valid(); }
+  uint64_t call_site() const { return at<1>().as_uint64(); }
+  bool has_ptr() const { return at<2>().valid(); }
+  uint64_t ptr() const { return at<2>().as_uint64(); }
+};
+
+class KmemCacheFreeFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = KmemCacheFreeFtraceEvent_Decoder;
+  enum : int32_t {
+    kCallSiteFieldNumber = 1,
+    kPtrFieldNumber = 2,
+  };
+
+  using FieldMetadata_CallSite =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      KmemCacheFreeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CallSite kCallSite() { return {}; }
+  void set_call_site(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CallSite::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ptr =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      KmemCacheFreeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ptr kPtr() { return {}; }
+  void set_ptr(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ptr::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class KmemCacheAllocNodeFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/6, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  KmemCacheAllocNodeFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit KmemCacheAllocNodeFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit KmemCacheAllocNodeFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_bytes_alloc() const { return at<1>().valid(); }
+  uint64_t bytes_alloc() const { return at<1>().as_uint64(); }
+  bool has_bytes_req() const { return at<2>().valid(); }
+  uint64_t bytes_req() const { return at<2>().as_uint64(); }
+  bool has_call_site() const { return at<3>().valid(); }
+  uint64_t call_site() const { return at<3>().as_uint64(); }
+  bool has_gfp_flags() const { return at<4>().valid(); }
+  uint32_t gfp_flags() const { return at<4>().as_uint32(); }
+  bool has_node() const { return at<5>().valid(); }
+  int32_t node() const { return at<5>().as_int32(); }
+  bool has_ptr() const { return at<6>().valid(); }
+  uint64_t ptr() const { return at<6>().as_uint64(); }
+};
+
+class KmemCacheAllocNodeFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = KmemCacheAllocNodeFtraceEvent_Decoder;
+  enum : int32_t {
+    kBytesAllocFieldNumber = 1,
+    kBytesReqFieldNumber = 2,
+    kCallSiteFieldNumber = 3,
+    kGfpFlagsFieldNumber = 4,
+    kNodeFieldNumber = 5,
+    kPtrFieldNumber = 6,
+  };
+
+  using FieldMetadata_BytesAlloc =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      KmemCacheAllocNodeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BytesAlloc kBytesAlloc() { return {}; }
+  void set_bytes_alloc(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_BytesAlloc::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BytesReq =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      KmemCacheAllocNodeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BytesReq kBytesReq() { return {}; }
+  void set_bytes_req(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_BytesReq::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CallSite =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      KmemCacheAllocNodeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CallSite kCallSite() { return {}; }
+  void set_call_site(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CallSite::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_GfpFlags =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      KmemCacheAllocNodeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_GfpFlags kGfpFlags() { return {}; }
+  void set_gfp_flags(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_GfpFlags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Node =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      KmemCacheAllocNodeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Node kNode() { return {}; }
+  void set_node(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Node::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ptr =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      KmemCacheAllocNodeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ptr kPtr() { return {}; }
+  void set_ptr(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ptr::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class KmemCacheAllocFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  KmemCacheAllocFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit KmemCacheAllocFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit KmemCacheAllocFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_bytes_alloc() const { return at<1>().valid(); }
+  uint64_t bytes_alloc() const { return at<1>().as_uint64(); }
+  bool has_bytes_req() const { return at<2>().valid(); }
+  uint64_t bytes_req() const { return at<2>().as_uint64(); }
+  bool has_call_site() const { return at<3>().valid(); }
+  uint64_t call_site() const { return at<3>().as_uint64(); }
+  bool has_gfp_flags() const { return at<4>().valid(); }
+  uint32_t gfp_flags() const { return at<4>().as_uint32(); }
+  bool has_ptr() const { return at<5>().valid(); }
+  uint64_t ptr() const { return at<5>().as_uint64(); }
+};
+
+class KmemCacheAllocFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = KmemCacheAllocFtraceEvent_Decoder;
+  enum : int32_t {
+    kBytesAllocFieldNumber = 1,
+    kBytesReqFieldNumber = 2,
+    kCallSiteFieldNumber = 3,
+    kGfpFlagsFieldNumber = 4,
+    kPtrFieldNumber = 5,
+  };
+
+  using FieldMetadata_BytesAlloc =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      KmemCacheAllocFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BytesAlloc kBytesAlloc() { return {}; }
+  void set_bytes_alloc(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_BytesAlloc::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BytesReq =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      KmemCacheAllocFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BytesReq kBytesReq() { return {}; }
+  void set_bytes_req(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_BytesReq::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CallSite =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      KmemCacheAllocFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CallSite kCallSite() { return {}; }
+  void set_call_site(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CallSite::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_GfpFlags =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      KmemCacheAllocFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_GfpFlags kGfpFlags() { return {}; }
+  void set_gfp_flags(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_GfpFlags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ptr =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      KmemCacheAllocFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ptr kPtr() { return {}; }
+  void set_ptr(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ptr::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class KmallocNodeFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/6, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  KmallocNodeFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit KmallocNodeFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit KmallocNodeFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_bytes_alloc() const { return at<1>().valid(); }
+  uint64_t bytes_alloc() const { return at<1>().as_uint64(); }
+  bool has_bytes_req() const { return at<2>().valid(); }
+  uint64_t bytes_req() const { return at<2>().as_uint64(); }
+  bool has_call_site() const { return at<3>().valid(); }
+  uint64_t call_site() const { return at<3>().as_uint64(); }
+  bool has_gfp_flags() const { return at<4>().valid(); }
+  uint32_t gfp_flags() const { return at<4>().as_uint32(); }
+  bool has_node() const { return at<5>().valid(); }
+  int32_t node() const { return at<5>().as_int32(); }
+  bool has_ptr() const { return at<6>().valid(); }
+  uint64_t ptr() const { return at<6>().as_uint64(); }
+};
+
+class KmallocNodeFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = KmallocNodeFtraceEvent_Decoder;
+  enum : int32_t {
+    kBytesAllocFieldNumber = 1,
+    kBytesReqFieldNumber = 2,
+    kCallSiteFieldNumber = 3,
+    kGfpFlagsFieldNumber = 4,
+    kNodeFieldNumber = 5,
+    kPtrFieldNumber = 6,
+  };
+
+  using FieldMetadata_BytesAlloc =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      KmallocNodeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BytesAlloc kBytesAlloc() { return {}; }
+  void set_bytes_alloc(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_BytesAlloc::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BytesReq =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      KmallocNodeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BytesReq kBytesReq() { return {}; }
+  void set_bytes_req(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_BytesReq::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CallSite =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      KmallocNodeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CallSite kCallSite() { return {}; }
+  void set_call_site(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CallSite::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_GfpFlags =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      KmallocNodeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_GfpFlags kGfpFlags() { return {}; }
+  void set_gfp_flags(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_GfpFlags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Node =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      KmallocNodeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Node kNode() { return {}; }
+  void set_node(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Node::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ptr =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      KmallocNodeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ptr kPtr() { return {}; }
+  void set_ptr(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ptr::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class KmallocFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  KmallocFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit KmallocFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit KmallocFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_bytes_alloc() const { return at<1>().valid(); }
+  uint64_t bytes_alloc() const { return at<1>().as_uint64(); }
+  bool has_bytes_req() const { return at<2>().valid(); }
+  uint64_t bytes_req() const { return at<2>().as_uint64(); }
+  bool has_call_site() const { return at<3>().valid(); }
+  uint64_t call_site() const { return at<3>().as_uint64(); }
+  bool has_gfp_flags() const { return at<4>().valid(); }
+  uint32_t gfp_flags() const { return at<4>().as_uint32(); }
+  bool has_ptr() const { return at<5>().valid(); }
+  uint64_t ptr() const { return at<5>().as_uint64(); }
+};
+
+class KmallocFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = KmallocFtraceEvent_Decoder;
+  enum : int32_t {
+    kBytesAllocFieldNumber = 1,
+    kBytesReqFieldNumber = 2,
+    kCallSiteFieldNumber = 3,
+    kGfpFlagsFieldNumber = 4,
+    kPtrFieldNumber = 5,
+  };
+
+  using FieldMetadata_BytesAlloc =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      KmallocFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BytesAlloc kBytesAlloc() { return {}; }
+  void set_bytes_alloc(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_BytesAlloc::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BytesReq =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      KmallocFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BytesReq kBytesReq() { return {}; }
+  void set_bytes_req(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_BytesReq::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CallSite =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      KmallocFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CallSite kCallSite() { return {}; }
+  void set_call_site(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CallSite::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_GfpFlags =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      KmallocFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_GfpFlags kGfpFlags() { return {}; }
+  void set_gfp_flags(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_GfpFlags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ptr =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      KmallocFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ptr kPtr() { return {}; }
+  void set_ptr(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ptr::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class KfreeFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  KfreeFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit KfreeFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit KfreeFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_call_site() const { return at<1>().valid(); }
+  uint64_t call_site() const { return at<1>().as_uint64(); }
+  bool has_ptr() const { return at<2>().valid(); }
+  uint64_t ptr() const { return at<2>().as_uint64(); }
+};
+
+class KfreeFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = KfreeFtraceEvent_Decoder;
+  enum : int32_t {
+    kCallSiteFieldNumber = 1,
+    kPtrFieldNumber = 2,
+  };
+
+  using FieldMetadata_CallSite =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      KfreeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CallSite kCallSite() { return {}; }
+  void set_call_site(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CallSite::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ptr =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      KfreeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ptr kPtr() { return {}; }
+  void set_ptr(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ptr::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class IonSecureCmaShrinkPoolStartFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  IonSecureCmaShrinkPoolStartFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit IonSecureCmaShrinkPoolStartFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit IonSecureCmaShrinkPoolStartFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_drained_size() const { return at<1>().valid(); }
+  uint64_t drained_size() const { return at<1>().as_uint64(); }
+  bool has_skipped_size() const { return at<2>().valid(); }
+  uint64_t skipped_size() const { return at<2>().as_uint64(); }
+};
+
+class IonSecureCmaShrinkPoolStartFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = IonSecureCmaShrinkPoolStartFtraceEvent_Decoder;
+  enum : int32_t {
+    kDrainedSizeFieldNumber = 1,
+    kSkippedSizeFieldNumber = 2,
+  };
+
+  using FieldMetadata_DrainedSize =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      IonSecureCmaShrinkPoolStartFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DrainedSize kDrainedSize() { return {}; }
+  void set_drained_size(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DrainedSize::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SkippedSize =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      IonSecureCmaShrinkPoolStartFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SkippedSize kSkippedSize() { return {}; }
+  void set_skipped_size(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SkippedSize::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class IonSecureCmaShrinkPoolEndFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  IonSecureCmaShrinkPoolEndFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit IonSecureCmaShrinkPoolEndFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit IonSecureCmaShrinkPoolEndFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_drained_size() const { return at<1>().valid(); }
+  uint64_t drained_size() const { return at<1>().as_uint64(); }
+  bool has_skipped_size() const { return at<2>().valid(); }
+  uint64_t skipped_size() const { return at<2>().as_uint64(); }
+};
+
+class IonSecureCmaShrinkPoolEndFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = IonSecureCmaShrinkPoolEndFtraceEvent_Decoder;
+  enum : int32_t {
+    kDrainedSizeFieldNumber = 1,
+    kSkippedSizeFieldNumber = 2,
+  };
+
+  using FieldMetadata_DrainedSize =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      IonSecureCmaShrinkPoolEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DrainedSize kDrainedSize() { return {}; }
+  void set_drained_size(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DrainedSize::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SkippedSize =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      IonSecureCmaShrinkPoolEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SkippedSize kSkippedSize() { return {}; }
+  void set_skipped_size(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SkippedSize::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class IonSecureCmaAllocateStartFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  IonSecureCmaAllocateStartFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit IonSecureCmaAllocateStartFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit IonSecureCmaAllocateStartFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_align() const { return at<1>().valid(); }
+  uint64_t align() const { return at<1>().as_uint64(); }
+  bool has_flags() const { return at<2>().valid(); }
+  uint64_t flags() const { return at<2>().as_uint64(); }
+  bool has_heap_name() const { return at<3>().valid(); }
+  ::protozero::ConstChars heap_name() const { return at<3>().as_string(); }
+  bool has_len() const { return at<4>().valid(); }
+  uint64_t len() const { return at<4>().as_uint64(); }
+};
+
+class IonSecureCmaAllocateStartFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = IonSecureCmaAllocateStartFtraceEvent_Decoder;
+  enum : int32_t {
+    kAlignFieldNumber = 1,
+    kFlagsFieldNumber = 2,
+    kHeapNameFieldNumber = 3,
+    kLenFieldNumber = 4,
+  };
+
+  using FieldMetadata_Align =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      IonSecureCmaAllocateStartFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Align kAlign() { return {}; }
+  void set_align(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Align::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Flags =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      IonSecureCmaAllocateStartFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Flags kFlags() { return {}; }
+  void set_flags(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Flags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_HeapName =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      IonSecureCmaAllocateStartFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_HeapName kHeapName() { return {}; }
+  void set_heap_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_HeapName::kFieldId, data, size);
+  }
+  void set_heap_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_HeapName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      IonSecureCmaAllocateStartFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class IonSecureCmaAllocateEndFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  IonSecureCmaAllocateEndFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit IonSecureCmaAllocateEndFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit IonSecureCmaAllocateEndFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_align() const { return at<1>().valid(); }
+  uint64_t align() const { return at<1>().as_uint64(); }
+  bool has_flags() const { return at<2>().valid(); }
+  uint64_t flags() const { return at<2>().as_uint64(); }
+  bool has_heap_name() const { return at<3>().valid(); }
+  ::protozero::ConstChars heap_name() const { return at<3>().as_string(); }
+  bool has_len() const { return at<4>().valid(); }
+  uint64_t len() const { return at<4>().as_uint64(); }
+};
+
+class IonSecureCmaAllocateEndFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = IonSecureCmaAllocateEndFtraceEvent_Decoder;
+  enum : int32_t {
+    kAlignFieldNumber = 1,
+    kFlagsFieldNumber = 2,
+    kHeapNameFieldNumber = 3,
+    kLenFieldNumber = 4,
+  };
+
+  using FieldMetadata_Align =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      IonSecureCmaAllocateEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Align kAlign() { return {}; }
+  void set_align(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Align::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Flags =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      IonSecureCmaAllocateEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Flags kFlags() { return {}; }
+  void set_flags(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Flags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_HeapName =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      IonSecureCmaAllocateEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_HeapName kHeapName() { return {}; }
+  void set_heap_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_HeapName::kFieldId, data, size);
+  }
+  void set_heap_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_HeapName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      IonSecureCmaAllocateEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class IonSecureCmaAddToPoolStartFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  IonSecureCmaAddToPoolStartFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit IonSecureCmaAddToPoolStartFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit IonSecureCmaAddToPoolStartFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_is_prefetch() const { return at<1>().valid(); }
+  uint32_t is_prefetch() const { return at<1>().as_uint32(); }
+  bool has_len() const { return at<2>().valid(); }
+  uint64_t len() const { return at<2>().as_uint64(); }
+  bool has_pool_total() const { return at<3>().valid(); }
+  int32_t pool_total() const { return at<3>().as_int32(); }
+};
+
+class IonSecureCmaAddToPoolStartFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = IonSecureCmaAddToPoolStartFtraceEvent_Decoder;
+  enum : int32_t {
+    kIsPrefetchFieldNumber = 1,
+    kLenFieldNumber = 2,
+    kPoolTotalFieldNumber = 3,
+  };
+
+  using FieldMetadata_IsPrefetch =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      IonSecureCmaAddToPoolStartFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IsPrefetch kIsPrefetch() { return {}; }
+  void set_is_prefetch(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_IsPrefetch::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      IonSecureCmaAddToPoolStartFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PoolTotal =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      IonSecureCmaAddToPoolStartFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PoolTotal kPoolTotal() { return {}; }
+  void set_pool_total(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PoolTotal::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class IonSecureCmaAddToPoolEndFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  IonSecureCmaAddToPoolEndFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit IonSecureCmaAddToPoolEndFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit IonSecureCmaAddToPoolEndFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_is_prefetch() const { return at<1>().valid(); }
+  uint32_t is_prefetch() const { return at<1>().as_uint32(); }
+  bool has_len() const { return at<2>().valid(); }
+  uint64_t len() const { return at<2>().as_uint64(); }
+  bool has_pool_total() const { return at<3>().valid(); }
+  int32_t pool_total() const { return at<3>().as_int32(); }
+};
+
+class IonSecureCmaAddToPoolEndFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = IonSecureCmaAddToPoolEndFtraceEvent_Decoder;
+  enum : int32_t {
+    kIsPrefetchFieldNumber = 1,
+    kLenFieldNumber = 2,
+    kPoolTotalFieldNumber = 3,
+  };
+
+  using FieldMetadata_IsPrefetch =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      IonSecureCmaAddToPoolEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IsPrefetch kIsPrefetch() { return {}; }
+  void set_is_prefetch(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_IsPrefetch::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      IonSecureCmaAddToPoolEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PoolTotal =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      IonSecureCmaAddToPoolEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PoolTotal kPoolTotal() { return {}; }
+  void set_pool_total(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PoolTotal::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class IonPrefetchingFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  IonPrefetchingFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit IonPrefetchingFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit IonPrefetchingFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_len() const { return at<1>().valid(); }
+  uint64_t len() const { return at<1>().as_uint64(); }
+};
+
+class IonPrefetchingFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = IonPrefetchingFtraceEvent_Decoder;
+  enum : int32_t {
+    kLenFieldNumber = 1,
+  };
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      IonPrefetchingFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class IonCpSecureBufferStartFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  IonCpSecureBufferStartFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit IonCpSecureBufferStartFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit IonCpSecureBufferStartFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_align() const { return at<1>().valid(); }
+  uint64_t align() const { return at<1>().as_uint64(); }
+  bool has_flags() const { return at<2>().valid(); }
+  uint64_t flags() const { return at<2>().as_uint64(); }
+  bool has_heap_name() const { return at<3>().valid(); }
+  ::protozero::ConstChars heap_name() const { return at<3>().as_string(); }
+  bool has_len() const { return at<4>().valid(); }
+  uint64_t len() const { return at<4>().as_uint64(); }
+};
+
+class IonCpSecureBufferStartFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = IonCpSecureBufferStartFtraceEvent_Decoder;
+  enum : int32_t {
+    kAlignFieldNumber = 1,
+    kFlagsFieldNumber = 2,
+    kHeapNameFieldNumber = 3,
+    kLenFieldNumber = 4,
+  };
+
+  using FieldMetadata_Align =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      IonCpSecureBufferStartFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Align kAlign() { return {}; }
+  void set_align(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Align::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Flags =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      IonCpSecureBufferStartFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Flags kFlags() { return {}; }
+  void set_flags(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Flags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_HeapName =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      IonCpSecureBufferStartFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_HeapName kHeapName() { return {}; }
+  void set_heap_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_HeapName::kFieldId, data, size);
+  }
+  void set_heap_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_HeapName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      IonCpSecureBufferStartFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class IonCpSecureBufferEndFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  IonCpSecureBufferEndFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit IonCpSecureBufferEndFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit IonCpSecureBufferEndFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_align() const { return at<1>().valid(); }
+  uint64_t align() const { return at<1>().as_uint64(); }
+  bool has_flags() const { return at<2>().valid(); }
+  uint64_t flags() const { return at<2>().as_uint64(); }
+  bool has_heap_name() const { return at<3>().valid(); }
+  ::protozero::ConstChars heap_name() const { return at<3>().as_string(); }
+  bool has_len() const { return at<4>().valid(); }
+  uint64_t len() const { return at<4>().as_uint64(); }
+};
+
+class IonCpSecureBufferEndFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = IonCpSecureBufferEndFtraceEvent_Decoder;
+  enum : int32_t {
+    kAlignFieldNumber = 1,
+    kFlagsFieldNumber = 2,
+    kHeapNameFieldNumber = 3,
+    kLenFieldNumber = 4,
+  };
+
+  using FieldMetadata_Align =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      IonCpSecureBufferEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Align kAlign() { return {}; }
+  void set_align(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Align::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Flags =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      IonCpSecureBufferEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Flags kFlags() { return {}; }
+  void set_flags(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Flags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_HeapName =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      IonCpSecureBufferEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_HeapName kHeapName() { return {}; }
+  void set_heap_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_HeapName::kFieldId, data, size);
+  }
+  void set_heap_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_HeapName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      IonCpSecureBufferEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class IonCpAllocRetryFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  IonCpAllocRetryFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit IonCpAllocRetryFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit IonCpAllocRetryFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_tries() const { return at<1>().valid(); }
+  int32_t tries() const { return at<1>().as_int32(); }
+};
+
+class IonCpAllocRetryFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = IonCpAllocRetryFtraceEvent_Decoder;
+  enum : int32_t {
+    kTriesFieldNumber = 1,
+  };
+
+  using FieldMetadata_Tries =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      IonCpAllocRetryFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Tries kTries() { return {}; }
+  void set_tries(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Tries::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class IonAllocBufferStartFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  IonAllocBufferStartFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit IonAllocBufferStartFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit IonAllocBufferStartFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_client_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars client_name() const { return at<1>().as_string(); }
+  bool has_flags() const { return at<2>().valid(); }
+  uint32_t flags() const { return at<2>().as_uint32(); }
+  bool has_heap_name() const { return at<3>().valid(); }
+  ::protozero::ConstChars heap_name() const { return at<3>().as_string(); }
+  bool has_len() const { return at<4>().valid(); }
+  uint64_t len() const { return at<4>().as_uint64(); }
+  bool has_mask() const { return at<5>().valid(); }
+  uint32_t mask() const { return at<5>().as_uint32(); }
+};
+
+class IonAllocBufferStartFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = IonAllocBufferStartFtraceEvent_Decoder;
+  enum : int32_t {
+    kClientNameFieldNumber = 1,
+    kFlagsFieldNumber = 2,
+    kHeapNameFieldNumber = 3,
+    kLenFieldNumber = 4,
+    kMaskFieldNumber = 5,
+  };
+
+  using FieldMetadata_ClientName =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      IonAllocBufferStartFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ClientName kClientName() { return {}; }
+  void set_client_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_ClientName::kFieldId, data, size);
+  }
+  void set_client_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_ClientName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Flags =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      IonAllocBufferStartFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Flags kFlags() { return {}; }
+  void set_flags(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Flags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_HeapName =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      IonAllocBufferStartFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_HeapName kHeapName() { return {}; }
+  void set_heap_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_HeapName::kFieldId, data, size);
+  }
+  void set_heap_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_HeapName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      IonAllocBufferStartFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Mask =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      IonAllocBufferStartFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Mask kMask() { return {}; }
+  void set_mask(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Mask::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class IonAllocBufferFallbackFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/6, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  IonAllocBufferFallbackFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit IonAllocBufferFallbackFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit IonAllocBufferFallbackFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_client_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars client_name() const { return at<1>().as_string(); }
+  bool has_error() const { return at<2>().valid(); }
+  int64_t error() const { return at<2>().as_int64(); }
+  bool has_flags() const { return at<3>().valid(); }
+  uint32_t flags() const { return at<3>().as_uint32(); }
+  bool has_heap_name() const { return at<4>().valid(); }
+  ::protozero::ConstChars heap_name() const { return at<4>().as_string(); }
+  bool has_len() const { return at<5>().valid(); }
+  uint64_t len() const { return at<5>().as_uint64(); }
+  bool has_mask() const { return at<6>().valid(); }
+  uint32_t mask() const { return at<6>().as_uint32(); }
+};
+
+class IonAllocBufferFallbackFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = IonAllocBufferFallbackFtraceEvent_Decoder;
+  enum : int32_t {
+    kClientNameFieldNumber = 1,
+    kErrorFieldNumber = 2,
+    kFlagsFieldNumber = 3,
+    kHeapNameFieldNumber = 4,
+    kLenFieldNumber = 5,
+    kMaskFieldNumber = 6,
+  };
+
+  using FieldMetadata_ClientName =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      IonAllocBufferFallbackFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ClientName kClientName() { return {}; }
+  void set_client_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_ClientName::kFieldId, data, size);
+  }
+  void set_client_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_ClientName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Error =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      IonAllocBufferFallbackFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Error kError() { return {}; }
+  void set_error(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Error::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Flags =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      IonAllocBufferFallbackFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Flags kFlags() { return {}; }
+  void set_flags(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Flags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_HeapName =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      IonAllocBufferFallbackFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_HeapName kHeapName() { return {}; }
+  void set_heap_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_HeapName::kFieldId, data, size);
+  }
+  void set_heap_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_HeapName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      IonAllocBufferFallbackFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Mask =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      IonAllocBufferFallbackFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Mask kMask() { return {}; }
+  void set_mask(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Mask::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class IonAllocBufferFailFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/6, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  IonAllocBufferFailFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit IonAllocBufferFailFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit IonAllocBufferFailFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_client_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars client_name() const { return at<1>().as_string(); }
+  bool has_error() const { return at<2>().valid(); }
+  int64_t error() const { return at<2>().as_int64(); }
+  bool has_flags() const { return at<3>().valid(); }
+  uint32_t flags() const { return at<3>().as_uint32(); }
+  bool has_heap_name() const { return at<4>().valid(); }
+  ::protozero::ConstChars heap_name() const { return at<4>().as_string(); }
+  bool has_len() const { return at<5>().valid(); }
+  uint64_t len() const { return at<5>().as_uint64(); }
+  bool has_mask() const { return at<6>().valid(); }
+  uint32_t mask() const { return at<6>().as_uint32(); }
+};
+
+class IonAllocBufferFailFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = IonAllocBufferFailFtraceEvent_Decoder;
+  enum : int32_t {
+    kClientNameFieldNumber = 1,
+    kErrorFieldNumber = 2,
+    kFlagsFieldNumber = 3,
+    kHeapNameFieldNumber = 4,
+    kLenFieldNumber = 5,
+    kMaskFieldNumber = 6,
+  };
+
+  using FieldMetadata_ClientName =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      IonAllocBufferFailFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ClientName kClientName() { return {}; }
+  void set_client_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_ClientName::kFieldId, data, size);
+  }
+  void set_client_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_ClientName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Error =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      IonAllocBufferFailFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Error kError() { return {}; }
+  void set_error(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Error::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Flags =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      IonAllocBufferFailFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Flags kFlags() { return {}; }
+  void set_flags(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Flags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_HeapName =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      IonAllocBufferFailFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_HeapName kHeapName() { return {}; }
+  void set_heap_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_HeapName::kFieldId, data, size);
+  }
+  void set_heap_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_HeapName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      IonAllocBufferFailFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Mask =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      IonAllocBufferFailFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Mask kMask() { return {}; }
+  void set_mask(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Mask::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class IonAllocBufferEndFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  IonAllocBufferEndFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit IonAllocBufferEndFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit IonAllocBufferEndFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_client_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars client_name() const { return at<1>().as_string(); }
+  bool has_flags() const { return at<2>().valid(); }
+  uint32_t flags() const { return at<2>().as_uint32(); }
+  bool has_heap_name() const { return at<3>().valid(); }
+  ::protozero::ConstChars heap_name() const { return at<3>().as_string(); }
+  bool has_len() const { return at<4>().valid(); }
+  uint64_t len() const { return at<4>().as_uint64(); }
+  bool has_mask() const { return at<5>().valid(); }
+  uint32_t mask() const { return at<5>().as_uint32(); }
+};
+
+class IonAllocBufferEndFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = IonAllocBufferEndFtraceEvent_Decoder;
+  enum : int32_t {
+    kClientNameFieldNumber = 1,
+    kFlagsFieldNumber = 2,
+    kHeapNameFieldNumber = 3,
+    kLenFieldNumber = 4,
+    kMaskFieldNumber = 5,
+  };
+
+  using FieldMetadata_ClientName =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      IonAllocBufferEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ClientName kClientName() { return {}; }
+  void set_client_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_ClientName::kFieldId, data, size);
+  }
+  void set_client_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_ClientName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Flags =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      IonAllocBufferEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Flags kFlags() { return {}; }
+  void set_flags(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Flags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_HeapName =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      IonAllocBufferEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_HeapName kHeapName() { return {}; }
+  void set_heap_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_HeapName::kFieldId, data, size);
+  }
+  void set_heap_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_HeapName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      IonAllocBufferEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Mask =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      IonAllocBufferEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Mask kMask() { return {}; }
+  void set_mask(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Mask::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class IommuSecPtblMapRangeStartFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  IommuSecPtblMapRangeStartFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit IommuSecPtblMapRangeStartFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit IommuSecPtblMapRangeStartFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_len() const { return at<1>().valid(); }
+  uint64_t len() const { return at<1>().as_uint64(); }
+  bool has_num() const { return at<2>().valid(); }
+  int32_t num() const { return at<2>().as_int32(); }
+  bool has_pa() const { return at<3>().valid(); }
+  uint32_t pa() const { return at<3>().as_uint32(); }
+  bool has_sec_id() const { return at<4>().valid(); }
+  int32_t sec_id() const { return at<4>().as_int32(); }
+  bool has_va() const { return at<5>().valid(); }
+  uint64_t va() const { return at<5>().as_uint64(); }
+};
+
+class IommuSecPtblMapRangeStartFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = IommuSecPtblMapRangeStartFtraceEvent_Decoder;
+  enum : int32_t {
+    kLenFieldNumber = 1,
+    kNumFieldNumber = 2,
+    kPaFieldNumber = 3,
+    kSecIdFieldNumber = 4,
+    kVaFieldNumber = 5,
+  };
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      IommuSecPtblMapRangeStartFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Num =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      IommuSecPtblMapRangeStartFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Num kNum() { return {}; }
+  void set_num(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Num::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pa =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      IommuSecPtblMapRangeStartFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pa kPa() { return {}; }
+  void set_pa(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pa::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SecId =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      IommuSecPtblMapRangeStartFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SecId kSecId() { return {}; }
+  void set_sec_id(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SecId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Va =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      IommuSecPtblMapRangeStartFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Va kVa() { return {}; }
+  void set_va(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Va::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class IommuSecPtblMapRangeEndFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  IommuSecPtblMapRangeEndFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit IommuSecPtblMapRangeEndFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit IommuSecPtblMapRangeEndFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_len() const { return at<1>().valid(); }
+  uint64_t len() const { return at<1>().as_uint64(); }
+  bool has_num() const { return at<2>().valid(); }
+  int32_t num() const { return at<2>().as_int32(); }
+  bool has_pa() const { return at<3>().valid(); }
+  uint32_t pa() const { return at<3>().as_uint32(); }
+  bool has_sec_id() const { return at<4>().valid(); }
+  int32_t sec_id() const { return at<4>().as_int32(); }
+  bool has_va() const { return at<5>().valid(); }
+  uint64_t va() const { return at<5>().as_uint64(); }
+};
+
+class IommuSecPtblMapRangeEndFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = IommuSecPtblMapRangeEndFtraceEvent_Decoder;
+  enum : int32_t {
+    kLenFieldNumber = 1,
+    kNumFieldNumber = 2,
+    kPaFieldNumber = 3,
+    kSecIdFieldNumber = 4,
+    kVaFieldNumber = 5,
+  };
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      IommuSecPtblMapRangeEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Num =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      IommuSecPtblMapRangeEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Num kNum() { return {}; }
+  void set_num(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Num::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pa =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      IommuSecPtblMapRangeEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pa kPa() { return {}; }
+  void set_pa(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pa::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SecId =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      IommuSecPtblMapRangeEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SecId kSecId() { return {}; }
+  void set_sec_id(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SecId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Va =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      IommuSecPtblMapRangeEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Va kVa() { return {}; }
+  void set_va(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Va::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class IommuMapRangeFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  IommuMapRangeFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit IommuMapRangeFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit IommuMapRangeFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_chunk_size() const { return at<1>().valid(); }
+  uint64_t chunk_size() const { return at<1>().as_uint64(); }
+  bool has_len() const { return at<2>().valid(); }
+  uint64_t len() const { return at<2>().as_uint64(); }
+  bool has_pa() const { return at<3>().valid(); }
+  uint64_t pa() const { return at<3>().as_uint64(); }
+  bool has_va() const { return at<4>().valid(); }
+  uint64_t va() const { return at<4>().as_uint64(); }
+};
+
+class IommuMapRangeFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = IommuMapRangeFtraceEvent_Decoder;
+  enum : int32_t {
+    kChunkSizeFieldNumber = 1,
+    kLenFieldNumber = 2,
+    kPaFieldNumber = 3,
+    kVaFieldNumber = 4,
+  };
+
+  using FieldMetadata_ChunkSize =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      IommuMapRangeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChunkSize kChunkSize() { return {}; }
+  void set_chunk_size(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ChunkSize::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Len =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      IommuMapRangeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Len kLen() { return {}; }
+  void set_len(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Len::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pa =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      IommuMapRangeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pa kPa() { return {}; }
+  void set_pa(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pa::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Va =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      IommuMapRangeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Va kVa() { return {}; }
+  void set_va(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Va::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class DmaAllocContiguousRetryFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  DmaAllocContiguousRetryFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit DmaAllocContiguousRetryFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit DmaAllocContiguousRetryFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_tries() const { return at<1>().valid(); }
+  int32_t tries() const { return at<1>().as_int32(); }
+};
+
+class DmaAllocContiguousRetryFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = DmaAllocContiguousRetryFtraceEvent_Decoder;
+  enum : int32_t {
+    kTriesFieldNumber = 1,
+  };
+
+  using FieldMetadata_Tries =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      DmaAllocContiguousRetryFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Tries kTries() { return {}; }
+  void set_tries(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Tries::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class AllocPagesSysStartFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  AllocPagesSysStartFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit AllocPagesSysStartFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit AllocPagesSysStartFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_gfp_flags() const { return at<1>().valid(); }
+  uint32_t gfp_flags() const { return at<1>().as_uint32(); }
+  bool has_order() const { return at<2>().valid(); }
+  uint32_t order() const { return at<2>().as_uint32(); }
+};
+
+class AllocPagesSysStartFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = AllocPagesSysStartFtraceEvent_Decoder;
+  enum : int32_t {
+    kGfpFlagsFieldNumber = 1,
+    kOrderFieldNumber = 2,
+  };
+
+  using FieldMetadata_GfpFlags =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      AllocPagesSysStartFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_GfpFlags kGfpFlags() { return {}; }
+  void set_gfp_flags(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_GfpFlags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Order =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      AllocPagesSysStartFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Order kOrder() { return {}; }
+  void set_order(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Order::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class AllocPagesSysFailFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  AllocPagesSysFailFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit AllocPagesSysFailFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit AllocPagesSysFailFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_gfp_flags() const { return at<1>().valid(); }
+  uint32_t gfp_flags() const { return at<1>().as_uint32(); }
+  bool has_order() const { return at<2>().valid(); }
+  uint32_t order() const { return at<2>().as_uint32(); }
+};
+
+class AllocPagesSysFailFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = AllocPagesSysFailFtraceEvent_Decoder;
+  enum : int32_t {
+    kGfpFlagsFieldNumber = 1,
+    kOrderFieldNumber = 2,
+  };
+
+  using FieldMetadata_GfpFlags =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      AllocPagesSysFailFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_GfpFlags kGfpFlags() { return {}; }
+  void set_gfp_flags(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_GfpFlags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Order =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      AllocPagesSysFailFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Order kOrder() { return {}; }
+  void set_order(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Order::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class AllocPagesSysEndFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  AllocPagesSysEndFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit AllocPagesSysEndFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit AllocPagesSysEndFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_gfp_flags() const { return at<1>().valid(); }
+  uint32_t gfp_flags() const { return at<1>().as_uint32(); }
+  bool has_order() const { return at<2>().valid(); }
+  uint32_t order() const { return at<2>().as_uint32(); }
+};
+
+class AllocPagesSysEndFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = AllocPagesSysEndFtraceEvent_Decoder;
+  enum : int32_t {
+    kGfpFlagsFieldNumber = 1,
+    kOrderFieldNumber = 2,
+  };
+
+  using FieldMetadata_GfpFlags =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      AllocPagesSysEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_GfpFlags kGfpFlags() { return {}; }
+  void set_gfp_flags(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_GfpFlags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Order =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      AllocPagesSysEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Order kOrder() { return {}; }
+  void set_order(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Order::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class AllocPagesIommuStartFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  AllocPagesIommuStartFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit AllocPagesIommuStartFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit AllocPagesIommuStartFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_gfp_flags() const { return at<1>().valid(); }
+  uint32_t gfp_flags() const { return at<1>().as_uint32(); }
+  bool has_order() const { return at<2>().valid(); }
+  uint32_t order() const { return at<2>().as_uint32(); }
+};
+
+class AllocPagesIommuStartFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = AllocPagesIommuStartFtraceEvent_Decoder;
+  enum : int32_t {
+    kGfpFlagsFieldNumber = 1,
+    kOrderFieldNumber = 2,
+  };
+
+  using FieldMetadata_GfpFlags =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      AllocPagesIommuStartFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_GfpFlags kGfpFlags() { return {}; }
+  void set_gfp_flags(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_GfpFlags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Order =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      AllocPagesIommuStartFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Order kOrder() { return {}; }
+  void set_order(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Order::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class AllocPagesIommuFailFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  AllocPagesIommuFailFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit AllocPagesIommuFailFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit AllocPagesIommuFailFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_gfp_flags() const { return at<1>().valid(); }
+  uint32_t gfp_flags() const { return at<1>().as_uint32(); }
+  bool has_order() const { return at<2>().valid(); }
+  uint32_t order() const { return at<2>().as_uint32(); }
+};
+
+class AllocPagesIommuFailFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = AllocPagesIommuFailFtraceEvent_Decoder;
+  enum : int32_t {
+    kGfpFlagsFieldNumber = 1,
+    kOrderFieldNumber = 2,
+  };
+
+  using FieldMetadata_GfpFlags =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      AllocPagesIommuFailFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_GfpFlags kGfpFlags() { return {}; }
+  void set_gfp_flags(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_GfpFlags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Order =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      AllocPagesIommuFailFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Order kOrder() { return {}; }
+  void set_order(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Order::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class AllocPagesIommuEndFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  AllocPagesIommuEndFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit AllocPagesIommuEndFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit AllocPagesIommuEndFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_gfp_flags() const { return at<1>().valid(); }
+  uint32_t gfp_flags() const { return at<1>().as_uint32(); }
+  bool has_order() const { return at<2>().valid(); }
+  uint32_t order() const { return at<2>().as_uint32(); }
+};
+
+class AllocPagesIommuEndFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = AllocPagesIommuEndFtraceEvent_Decoder;
+  enum : int32_t {
+    kGfpFlagsFieldNumber = 1,
+    kOrderFieldNumber = 2,
+  };
+
+  using FieldMetadata_GfpFlags =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      AllocPagesIommuEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_GfpFlags kGfpFlags() { return {}; }
+  void set_gfp_flags(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_GfpFlags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Order =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      AllocPagesIommuEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Order kOrder() { return {}; }
+  void set_order(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Order::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/ftrace/lowmemorykiller.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_LOWMEMORYKILLER_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_LOWMEMORYKILLER_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class LowmemoryKillFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  LowmemoryKillFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit LowmemoryKillFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit LowmemoryKillFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_comm() const { return at<1>().valid(); }
+  ::protozero::ConstChars comm() const { return at<1>().as_string(); }
+  bool has_pid() const { return at<2>().valid(); }
+  int32_t pid() const { return at<2>().as_int32(); }
+  bool has_pagecache_size() const { return at<3>().valid(); }
+  int64_t pagecache_size() const { return at<3>().as_int64(); }
+  bool has_pagecache_limit() const { return at<4>().valid(); }
+  int64_t pagecache_limit() const { return at<4>().as_int64(); }
+  bool has_free() const { return at<5>().valid(); }
+  int64_t free() const { return at<5>().as_int64(); }
+};
+
+class LowmemoryKillFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = LowmemoryKillFtraceEvent_Decoder;
+  enum : int32_t {
+    kCommFieldNumber = 1,
+    kPidFieldNumber = 2,
+    kPagecacheSizeFieldNumber = 3,
+    kPagecacheLimitFieldNumber = 4,
+    kFreeFieldNumber = 5,
+  };
+
+  using FieldMetadata_Comm =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      LowmemoryKillFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Comm kComm() { return {}; }
+  void set_comm(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Comm::kFieldId, data, size);
+  }
+  void set_comm(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Comm::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pid =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      LowmemoryKillFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pid kPid() { return {}; }
+  void set_pid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PagecacheSize =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      LowmemoryKillFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PagecacheSize kPagecacheSize() { return {}; }
+  void set_pagecache_size(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PagecacheSize::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PagecacheLimit =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      LowmemoryKillFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PagecacheLimit kPagecacheLimit() { return {}; }
+  void set_pagecache_limit(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PagecacheLimit::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Free =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      LowmemoryKillFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Free kFree() { return {}; }
+  void set_free(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Free::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/ftrace/mali.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_MALI_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_MALI_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class MaliTracingMarkWriteFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  MaliTracingMarkWriteFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MaliTracingMarkWriteFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MaliTracingMarkWriteFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars name() const { return at<1>().as_string(); }
+  bool has_pid() const { return at<2>().valid(); }
+  int32_t pid() const { return at<2>().as_int32(); }
+  bool has_type() const { return at<3>().valid(); }
+  uint32_t type() const { return at<3>().as_uint32(); }
+  bool has_value() const { return at<4>().valid(); }
+  int32_t value() const { return at<4>().as_int32(); }
+};
+
+class MaliTracingMarkWriteFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = MaliTracingMarkWriteFtraceEvent_Decoder;
+  enum : int32_t {
+    kNameFieldNumber = 1,
+    kPidFieldNumber = 2,
+    kTypeFieldNumber = 3,
+    kValueFieldNumber = 4,
+  };
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      MaliTracingMarkWriteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pid =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      MaliTracingMarkWriteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pid kPid() { return {}; }
+  void set_pid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Type =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MaliTracingMarkWriteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Type kType() { return {}; }
+  void set_type(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Type::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Value =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      MaliTracingMarkWriteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Value kValue() { return {}; }
+  void set_value(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Value::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/ftrace/mdss.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_MDSS_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_MDSS_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class RotatorBwAoAsContextFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  RotatorBwAoAsContextFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit RotatorBwAoAsContextFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit RotatorBwAoAsContextFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_state() const { return at<1>().valid(); }
+  uint32_t state() const { return at<1>().as_uint32(); }
+};
+
+class RotatorBwAoAsContextFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = RotatorBwAoAsContextFtraceEvent_Decoder;
+  enum : int32_t {
+    kStateFieldNumber = 1,
+  };
+
+  using FieldMetadata_State =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      RotatorBwAoAsContextFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_State kState() { return {}; }
+  void set_state(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_State::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class MdpPerfUpdateBusFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  MdpPerfUpdateBusFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MdpPerfUpdateBusFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MdpPerfUpdateBusFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_client() const { return at<1>().valid(); }
+  int32_t client() const { return at<1>().as_int32(); }
+  bool has_ab_quota() const { return at<2>().valid(); }
+  uint64_t ab_quota() const { return at<2>().as_uint64(); }
+  bool has_ib_quota() const { return at<3>().valid(); }
+  uint64_t ib_quota() const { return at<3>().as_uint64(); }
+};
+
+class MdpPerfUpdateBusFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = MdpPerfUpdateBusFtraceEvent_Decoder;
+  enum : int32_t {
+    kClientFieldNumber = 1,
+    kAbQuotaFieldNumber = 2,
+    kIbQuotaFieldNumber = 3,
+  };
+
+  using FieldMetadata_Client =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      MdpPerfUpdateBusFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Client kClient() { return {}; }
+  void set_client(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Client::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_AbQuota =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MdpPerfUpdateBusFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AbQuota kAbQuota() { return {}; }
+  void set_ab_quota(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_AbQuota::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_IbQuota =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MdpPerfUpdateBusFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IbQuota kIbQuota() { return {}; }
+  void set_ib_quota(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_IbQuota::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class MdpPerfPrefillCalcFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/10, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  MdpPerfPrefillCalcFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MdpPerfPrefillCalcFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MdpPerfPrefillCalcFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_pnum() const { return at<1>().valid(); }
+  uint32_t pnum() const { return at<1>().as_uint32(); }
+  bool has_latency_buf() const { return at<2>().valid(); }
+  uint32_t latency_buf() const { return at<2>().as_uint32(); }
+  bool has_ot() const { return at<3>().valid(); }
+  uint32_t ot() const { return at<3>().as_uint32(); }
+  bool has_y_buf() const { return at<4>().valid(); }
+  uint32_t y_buf() const { return at<4>().as_uint32(); }
+  bool has_y_scaler() const { return at<5>().valid(); }
+  uint32_t y_scaler() const { return at<5>().as_uint32(); }
+  bool has_pp_lines() const { return at<6>().valid(); }
+  uint32_t pp_lines() const { return at<6>().as_uint32(); }
+  bool has_pp_bytes() const { return at<7>().valid(); }
+  uint32_t pp_bytes() const { return at<7>().as_uint32(); }
+  bool has_post_sc() const { return at<8>().valid(); }
+  uint32_t post_sc() const { return at<8>().as_uint32(); }
+  bool has_fbc_bytes() const { return at<9>().valid(); }
+  uint32_t fbc_bytes() const { return at<9>().as_uint32(); }
+  bool has_prefill_bytes() const { return at<10>().valid(); }
+  uint32_t prefill_bytes() const { return at<10>().as_uint32(); }
+};
+
+class MdpPerfPrefillCalcFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = MdpPerfPrefillCalcFtraceEvent_Decoder;
+  enum : int32_t {
+    kPnumFieldNumber = 1,
+    kLatencyBufFieldNumber = 2,
+    kOtFieldNumber = 3,
+    kYBufFieldNumber = 4,
+    kYScalerFieldNumber = 5,
+    kPpLinesFieldNumber = 6,
+    kPpBytesFieldNumber = 7,
+    kPostScFieldNumber = 8,
+    kFbcBytesFieldNumber = 9,
+    kPrefillBytesFieldNumber = 10,
+  };
+
+  using FieldMetadata_Pnum =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpPerfPrefillCalcFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pnum kPnum() { return {}; }
+  void set_pnum(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pnum::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_LatencyBuf =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpPerfPrefillCalcFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_LatencyBuf kLatencyBuf() { return {}; }
+  void set_latency_buf(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_LatencyBuf::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ot =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpPerfPrefillCalcFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ot kOt() { return {}; }
+  void set_ot(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ot::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_YBuf =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpPerfPrefillCalcFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_YBuf kYBuf() { return {}; }
+  void set_y_buf(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_YBuf::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_YScaler =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpPerfPrefillCalcFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_YScaler kYScaler() { return {}; }
+  void set_y_scaler(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_YScaler::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PpLines =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpPerfPrefillCalcFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PpLines kPpLines() { return {}; }
+  void set_pp_lines(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PpLines::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PpBytes =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpPerfPrefillCalcFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PpBytes kPpBytes() { return {}; }
+  void set_pp_bytes(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PpBytes::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PostSc =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpPerfPrefillCalcFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PostSc kPostSc() { return {}; }
+  void set_post_sc(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PostSc::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FbcBytes =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpPerfPrefillCalcFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FbcBytes kFbcBytes() { return {}; }
+  void set_fbc_bytes(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_FbcBytes::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PrefillBytes =
+    ::protozero::proto_utils::FieldMetadata<
+      10,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpPerfPrefillCalcFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PrefillBytes kPrefillBytes() { return {}; }
+  void set_prefill_bytes(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PrefillBytes::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class MdpCmdWaitPingpongFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  MdpCmdWaitPingpongFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MdpCmdWaitPingpongFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MdpCmdWaitPingpongFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_ctl_num() const { return at<1>().valid(); }
+  uint32_t ctl_num() const { return at<1>().as_uint32(); }
+  bool has_kickoff_cnt() const { return at<2>().valid(); }
+  int32_t kickoff_cnt() const { return at<2>().as_int32(); }
+};
+
+class MdpCmdWaitPingpongFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = MdpCmdWaitPingpongFtraceEvent_Decoder;
+  enum : int32_t {
+    kCtlNumFieldNumber = 1,
+    kKickoffCntFieldNumber = 2,
+  };
+
+  using FieldMetadata_CtlNum =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpCmdWaitPingpongFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CtlNum kCtlNum() { return {}; }
+  void set_ctl_num(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CtlNum::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_KickoffCnt =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      MdpCmdWaitPingpongFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_KickoffCnt kKickoffCnt() { return {}; }
+  void set_kickoff_cnt(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_KickoffCnt::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class MdpVideoUnderrunDoneFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  MdpVideoUnderrunDoneFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MdpVideoUnderrunDoneFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MdpVideoUnderrunDoneFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_ctl_num() const { return at<1>().valid(); }
+  uint32_t ctl_num() const { return at<1>().as_uint32(); }
+  bool has_underrun_cnt() const { return at<2>().valid(); }
+  uint32_t underrun_cnt() const { return at<2>().as_uint32(); }
+};
+
+class MdpVideoUnderrunDoneFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = MdpVideoUnderrunDoneFtraceEvent_Decoder;
+  enum : int32_t {
+    kCtlNumFieldNumber = 1,
+    kUnderrunCntFieldNumber = 2,
+  };
+
+  using FieldMetadata_CtlNum =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpVideoUnderrunDoneFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CtlNum kCtlNum() { return {}; }
+  void set_ctl_num(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CtlNum::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_UnderrunCnt =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpVideoUnderrunDoneFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_UnderrunCnt kUnderrunCnt() { return {}; }
+  void set_underrun_cnt(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_UnderrunCnt::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class MdpPerfSetWmLevelsFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/8, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  MdpPerfSetWmLevelsFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MdpPerfSetWmLevelsFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MdpPerfSetWmLevelsFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_pnum() const { return at<1>().valid(); }
+  uint32_t pnum() const { return at<1>().as_uint32(); }
+  bool has_use_space() const { return at<2>().valid(); }
+  uint32_t use_space() const { return at<2>().as_uint32(); }
+  bool has_priority_bytes() const { return at<3>().valid(); }
+  uint32_t priority_bytes() const { return at<3>().as_uint32(); }
+  bool has_wm0() const { return at<4>().valid(); }
+  uint32_t wm0() const { return at<4>().as_uint32(); }
+  bool has_wm1() const { return at<5>().valid(); }
+  uint32_t wm1() const { return at<5>().as_uint32(); }
+  bool has_wm2() const { return at<6>().valid(); }
+  uint32_t wm2() const { return at<6>().as_uint32(); }
+  bool has_mb_cnt() const { return at<7>().valid(); }
+  uint32_t mb_cnt() const { return at<7>().as_uint32(); }
+  bool has_mb_size() const { return at<8>().valid(); }
+  uint32_t mb_size() const { return at<8>().as_uint32(); }
+};
+
+class MdpPerfSetWmLevelsFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = MdpPerfSetWmLevelsFtraceEvent_Decoder;
+  enum : int32_t {
+    kPnumFieldNumber = 1,
+    kUseSpaceFieldNumber = 2,
+    kPriorityBytesFieldNumber = 3,
+    kWm0FieldNumber = 4,
+    kWm1FieldNumber = 5,
+    kWm2FieldNumber = 6,
+    kMbCntFieldNumber = 7,
+    kMbSizeFieldNumber = 8,
+  };
+
+  using FieldMetadata_Pnum =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpPerfSetWmLevelsFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pnum kPnum() { return {}; }
+  void set_pnum(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pnum::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_UseSpace =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpPerfSetWmLevelsFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_UseSpace kUseSpace() { return {}; }
+  void set_use_space(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_UseSpace::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PriorityBytes =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpPerfSetWmLevelsFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PriorityBytes kPriorityBytes() { return {}; }
+  void set_priority_bytes(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PriorityBytes::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Wm0 =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpPerfSetWmLevelsFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Wm0 kWm0() { return {}; }
+  void set_wm0(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Wm0::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Wm1 =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpPerfSetWmLevelsFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Wm1 kWm1() { return {}; }
+  void set_wm1(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Wm1::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Wm2 =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpPerfSetWmLevelsFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Wm2 kWm2() { return {}; }
+  void set_wm2(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Wm2::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_MbCnt =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpPerfSetWmLevelsFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MbCnt kMbCnt() { return {}; }
+  void set_mb_cnt(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_MbCnt::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_MbSize =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpPerfSetWmLevelsFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MbSize kMbSize() { return {}; }
+  void set_mb_size(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_MbSize::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class MdpMixerUpdateFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  MdpMixerUpdateFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MdpMixerUpdateFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MdpMixerUpdateFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_mixer_num() const { return at<1>().valid(); }
+  uint32_t mixer_num() const { return at<1>().as_uint32(); }
+};
+
+class MdpMixerUpdateFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = MdpMixerUpdateFtraceEvent_Decoder;
+  enum : int32_t {
+    kMixerNumFieldNumber = 1,
+  };
+
+  using FieldMetadata_MixerNum =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpMixerUpdateFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MixerNum kMixerNum() { return {}; }
+  void set_mixer_num(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_MixerNum::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class MdpCmdReleaseBwFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  MdpCmdReleaseBwFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MdpCmdReleaseBwFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MdpCmdReleaseBwFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_ctl_num() const { return at<1>().valid(); }
+  uint32_t ctl_num() const { return at<1>().as_uint32(); }
+};
+
+class MdpCmdReleaseBwFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = MdpCmdReleaseBwFtraceEvent_Decoder;
+  enum : int32_t {
+    kCtlNumFieldNumber = 1,
+  };
+
+  using FieldMetadata_CtlNum =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpCmdReleaseBwFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CtlNum kCtlNum() { return {}; }
+  void set_ctl_num(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CtlNum::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class MdpTraceCounterFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  MdpTraceCounterFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MdpTraceCounterFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MdpTraceCounterFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_pid() const { return at<1>().valid(); }
+  int32_t pid() const { return at<1>().as_int32(); }
+  bool has_counter_name() const { return at<2>().valid(); }
+  ::protozero::ConstChars counter_name() const { return at<2>().as_string(); }
+  bool has_value() const { return at<3>().valid(); }
+  int32_t value() const { return at<3>().as_int32(); }
+};
+
+class MdpTraceCounterFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = MdpTraceCounterFtraceEvent_Decoder;
+  enum : int32_t {
+    kPidFieldNumber = 1,
+    kCounterNameFieldNumber = 2,
+    kValueFieldNumber = 3,
+  };
+
+  using FieldMetadata_Pid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      MdpTraceCounterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pid kPid() { return {}; }
+  void set_pid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CounterName =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      MdpTraceCounterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CounterName kCounterName() { return {}; }
+  void set_counter_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_CounterName::kFieldId, data, size);
+  }
+  void set_counter_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_CounterName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Value =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      MdpTraceCounterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Value kValue() { return {}; }
+  void set_value(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Value::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class MdpPerfSetQosLutsFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/7, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  MdpPerfSetQosLutsFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MdpPerfSetQosLutsFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MdpPerfSetQosLutsFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_pnum() const { return at<1>().valid(); }
+  uint32_t pnum() const { return at<1>().as_uint32(); }
+  bool has_fmt() const { return at<2>().valid(); }
+  uint32_t fmt() const { return at<2>().as_uint32(); }
+  bool has_intf() const { return at<3>().valid(); }
+  uint32_t intf() const { return at<3>().as_uint32(); }
+  bool has_rot() const { return at<4>().valid(); }
+  uint32_t rot() const { return at<4>().as_uint32(); }
+  bool has_fl() const { return at<5>().valid(); }
+  uint32_t fl() const { return at<5>().as_uint32(); }
+  bool has_lut() const { return at<6>().valid(); }
+  uint32_t lut() const { return at<6>().as_uint32(); }
+  bool has_linear() const { return at<7>().valid(); }
+  uint32_t linear() const { return at<7>().as_uint32(); }
+};
+
+class MdpPerfSetQosLutsFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = MdpPerfSetQosLutsFtraceEvent_Decoder;
+  enum : int32_t {
+    kPnumFieldNumber = 1,
+    kFmtFieldNumber = 2,
+    kIntfFieldNumber = 3,
+    kRotFieldNumber = 4,
+    kFlFieldNumber = 5,
+    kLutFieldNumber = 6,
+    kLinearFieldNumber = 7,
+  };
+
+  using FieldMetadata_Pnum =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpPerfSetQosLutsFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pnum kPnum() { return {}; }
+  void set_pnum(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pnum::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Fmt =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpPerfSetQosLutsFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Fmt kFmt() { return {}; }
+  void set_fmt(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Fmt::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Intf =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpPerfSetQosLutsFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Intf kIntf() { return {}; }
+  void set_intf(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Intf::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Rot =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpPerfSetQosLutsFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Rot kRot() { return {}; }
+  void set_rot(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Rot::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Fl =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpPerfSetQosLutsFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Fl kFl() { return {}; }
+  void set_fl(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Fl::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Lut =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpPerfSetQosLutsFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Lut kLut() { return {}; }
+  void set_lut(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Lut::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Linear =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpPerfSetQosLutsFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Linear kLinear() { return {}; }
+  void set_linear(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Linear::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class MdpMisrCrcFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  MdpMisrCrcFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MdpMisrCrcFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MdpMisrCrcFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_block_id() const { return at<1>().valid(); }
+  uint32_t block_id() const { return at<1>().as_uint32(); }
+  bool has_vsync_cnt() const { return at<2>().valid(); }
+  uint32_t vsync_cnt() const { return at<2>().as_uint32(); }
+  bool has_crc() const { return at<3>().valid(); }
+  uint32_t crc() const { return at<3>().as_uint32(); }
+};
+
+class MdpMisrCrcFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = MdpMisrCrcFtraceEvent_Decoder;
+  enum : int32_t {
+    kBlockIdFieldNumber = 1,
+    kVsyncCntFieldNumber = 2,
+    kCrcFieldNumber = 3,
+  };
+
+  using FieldMetadata_BlockId =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpMisrCrcFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BlockId kBlockId() { return {}; }
+  void set_block_id(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_BlockId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_VsyncCnt =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpMisrCrcFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_VsyncCnt kVsyncCnt() { return {}; }
+  void set_vsync_cnt(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_VsyncCnt::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Crc =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpMisrCrcFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Crc kCrc() { return {}; }
+  void set_crc(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Crc::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class MdpCmdReadptrDoneFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  MdpCmdReadptrDoneFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MdpCmdReadptrDoneFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MdpCmdReadptrDoneFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_ctl_num() const { return at<1>().valid(); }
+  uint32_t ctl_num() const { return at<1>().as_uint32(); }
+  bool has_koff_cnt() const { return at<2>().valid(); }
+  int32_t koff_cnt() const { return at<2>().as_int32(); }
+};
+
+class MdpCmdReadptrDoneFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = MdpCmdReadptrDoneFtraceEvent_Decoder;
+  enum : int32_t {
+    kCtlNumFieldNumber = 1,
+    kKoffCntFieldNumber = 2,
+  };
+
+  using FieldMetadata_CtlNum =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpCmdReadptrDoneFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CtlNum kCtlNum() { return {}; }
+  void set_ctl_num(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CtlNum::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_KoffCnt =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      MdpCmdReadptrDoneFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_KoffCnt kKoffCnt() { return {}; }
+  void set_koff_cnt(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_KoffCnt::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class MdpSsppSetFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/16, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  MdpSsppSetFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MdpSsppSetFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MdpSsppSetFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_num() const { return at<1>().valid(); }
+  uint32_t num() const { return at<1>().as_uint32(); }
+  bool has_play_cnt() const { return at<2>().valid(); }
+  uint32_t play_cnt() const { return at<2>().as_uint32(); }
+  bool has_mixer() const { return at<3>().valid(); }
+  uint32_t mixer() const { return at<3>().as_uint32(); }
+  bool has_stage() const { return at<4>().valid(); }
+  uint32_t stage() const { return at<4>().as_uint32(); }
+  bool has_flags() const { return at<5>().valid(); }
+  uint32_t flags() const { return at<5>().as_uint32(); }
+  bool has_format() const { return at<6>().valid(); }
+  uint32_t format() const { return at<6>().as_uint32(); }
+  bool has_img_w() const { return at<7>().valid(); }
+  uint32_t img_w() const { return at<7>().as_uint32(); }
+  bool has_img_h() const { return at<8>().valid(); }
+  uint32_t img_h() const { return at<8>().as_uint32(); }
+  bool has_src_x() const { return at<9>().valid(); }
+  uint32_t src_x() const { return at<9>().as_uint32(); }
+  bool has_src_y() const { return at<10>().valid(); }
+  uint32_t src_y() const { return at<10>().as_uint32(); }
+  bool has_src_w() const { return at<11>().valid(); }
+  uint32_t src_w() const { return at<11>().as_uint32(); }
+  bool has_src_h() const { return at<12>().valid(); }
+  uint32_t src_h() const { return at<12>().as_uint32(); }
+  bool has_dst_x() const { return at<13>().valid(); }
+  uint32_t dst_x() const { return at<13>().as_uint32(); }
+  bool has_dst_y() const { return at<14>().valid(); }
+  uint32_t dst_y() const { return at<14>().as_uint32(); }
+  bool has_dst_w() const { return at<15>().valid(); }
+  uint32_t dst_w() const { return at<15>().as_uint32(); }
+  bool has_dst_h() const { return at<16>().valid(); }
+  uint32_t dst_h() const { return at<16>().as_uint32(); }
+};
+
+class MdpSsppSetFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = MdpSsppSetFtraceEvent_Decoder;
+  enum : int32_t {
+    kNumFieldNumber = 1,
+    kPlayCntFieldNumber = 2,
+    kMixerFieldNumber = 3,
+    kStageFieldNumber = 4,
+    kFlagsFieldNumber = 5,
+    kFormatFieldNumber = 6,
+    kImgWFieldNumber = 7,
+    kImgHFieldNumber = 8,
+    kSrcXFieldNumber = 9,
+    kSrcYFieldNumber = 10,
+    kSrcWFieldNumber = 11,
+    kSrcHFieldNumber = 12,
+    kDstXFieldNumber = 13,
+    kDstYFieldNumber = 14,
+    kDstWFieldNumber = 15,
+    kDstHFieldNumber = 16,
+  };
+
+  using FieldMetadata_Num =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpSsppSetFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Num kNum() { return {}; }
+  void set_num(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Num::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PlayCnt =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpSsppSetFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PlayCnt kPlayCnt() { return {}; }
+  void set_play_cnt(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PlayCnt::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Mixer =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpSsppSetFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Mixer kMixer() { return {}; }
+  void set_mixer(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Mixer::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Stage =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpSsppSetFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Stage kStage() { return {}; }
+  void set_stage(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Stage::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Flags =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpSsppSetFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Flags kFlags() { return {}; }
+  void set_flags(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Flags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Format =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpSsppSetFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Format kFormat() { return {}; }
+  void set_format(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Format::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ImgW =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpSsppSetFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ImgW kImgW() { return {}; }
+  void set_img_w(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ImgW::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ImgH =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpSsppSetFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ImgH kImgH() { return {}; }
+  void set_img_h(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ImgH::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SrcX =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpSsppSetFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SrcX kSrcX() { return {}; }
+  void set_src_x(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SrcX::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SrcY =
+    ::protozero::proto_utils::FieldMetadata<
+      10,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpSsppSetFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SrcY kSrcY() { return {}; }
+  void set_src_y(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SrcY::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SrcW =
+    ::protozero::proto_utils::FieldMetadata<
+      11,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpSsppSetFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SrcW kSrcW() { return {}; }
+  void set_src_w(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SrcW::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SrcH =
+    ::protozero::proto_utils::FieldMetadata<
+      12,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpSsppSetFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SrcH kSrcH() { return {}; }
+  void set_src_h(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SrcH::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DstX =
+    ::protozero::proto_utils::FieldMetadata<
+      13,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpSsppSetFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DstX kDstX() { return {}; }
+  void set_dst_x(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DstX::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DstY =
+    ::protozero::proto_utils::FieldMetadata<
+      14,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpSsppSetFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DstY kDstY() { return {}; }
+  void set_dst_y(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DstY::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DstW =
+    ::protozero::proto_utils::FieldMetadata<
+      15,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpSsppSetFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DstW kDstW() { return {}; }
+  void set_dst_w(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DstW::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DstH =
+    ::protozero::proto_utils::FieldMetadata<
+      16,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpSsppSetFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DstH kDstH() { return {}; }
+  void set_dst_h(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DstH::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class MdpPerfSetPanicLutsFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  MdpPerfSetPanicLutsFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MdpPerfSetPanicLutsFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MdpPerfSetPanicLutsFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_pnum() const { return at<1>().valid(); }
+  uint32_t pnum() const { return at<1>().as_uint32(); }
+  bool has_fmt() const { return at<2>().valid(); }
+  uint32_t fmt() const { return at<2>().as_uint32(); }
+  bool has_mode() const { return at<3>().valid(); }
+  uint32_t mode() const { return at<3>().as_uint32(); }
+  bool has_panic_lut() const { return at<4>().valid(); }
+  uint32_t panic_lut() const { return at<4>().as_uint32(); }
+  bool has_robust_lut() const { return at<5>().valid(); }
+  uint32_t robust_lut() const { return at<5>().as_uint32(); }
+};
+
+class MdpPerfSetPanicLutsFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = MdpPerfSetPanicLutsFtraceEvent_Decoder;
+  enum : int32_t {
+    kPnumFieldNumber = 1,
+    kFmtFieldNumber = 2,
+    kModeFieldNumber = 3,
+    kPanicLutFieldNumber = 4,
+    kRobustLutFieldNumber = 5,
+  };
+
+  using FieldMetadata_Pnum =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpPerfSetPanicLutsFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pnum kPnum() { return {}; }
+  void set_pnum(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pnum::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Fmt =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpPerfSetPanicLutsFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Fmt kFmt() { return {}; }
+  void set_fmt(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Fmt::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Mode =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpPerfSetPanicLutsFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Mode kMode() { return {}; }
+  void set_mode(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Mode::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PanicLut =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpPerfSetPanicLutsFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PanicLut kPanicLut() { return {}; }
+  void set_panic_lut(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PanicLut::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_RobustLut =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpPerfSetPanicLutsFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_RobustLut kRobustLut() { return {}; }
+  void set_robust_lut(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_RobustLut::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class MdpCompareBwFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/8, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  MdpCompareBwFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MdpCompareBwFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MdpCompareBwFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_new_ab() const { return at<1>().valid(); }
+  uint64_t new_ab() const { return at<1>().as_uint64(); }
+  bool has_new_ib() const { return at<2>().valid(); }
+  uint64_t new_ib() const { return at<2>().as_uint64(); }
+  bool has_new_wb() const { return at<3>().valid(); }
+  uint64_t new_wb() const { return at<3>().as_uint64(); }
+  bool has_old_ab() const { return at<4>().valid(); }
+  uint64_t old_ab() const { return at<4>().as_uint64(); }
+  bool has_old_ib() const { return at<5>().valid(); }
+  uint64_t old_ib() const { return at<5>().as_uint64(); }
+  bool has_old_wb() const { return at<6>().valid(); }
+  uint64_t old_wb() const { return at<6>().as_uint64(); }
+  bool has_params_changed() const { return at<7>().valid(); }
+  uint32_t params_changed() const { return at<7>().as_uint32(); }
+  bool has_update_bw() const { return at<8>().valid(); }
+  uint32_t update_bw() const { return at<8>().as_uint32(); }
+};
+
+class MdpCompareBwFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = MdpCompareBwFtraceEvent_Decoder;
+  enum : int32_t {
+    kNewAbFieldNumber = 1,
+    kNewIbFieldNumber = 2,
+    kNewWbFieldNumber = 3,
+    kOldAbFieldNumber = 4,
+    kOldIbFieldNumber = 5,
+    kOldWbFieldNumber = 6,
+    kParamsChangedFieldNumber = 7,
+    kUpdateBwFieldNumber = 8,
+  };
+
+  using FieldMetadata_NewAb =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MdpCompareBwFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NewAb kNewAb() { return {}; }
+  void set_new_ab(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NewAb::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NewIb =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MdpCompareBwFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NewIb kNewIb() { return {}; }
+  void set_new_ib(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NewIb::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NewWb =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MdpCompareBwFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NewWb kNewWb() { return {}; }
+  void set_new_wb(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NewWb::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_OldAb =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MdpCompareBwFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_OldAb kOldAb() { return {}; }
+  void set_old_ab(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_OldAb::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_OldIb =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MdpCompareBwFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_OldIb kOldIb() { return {}; }
+  void set_old_ib(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_OldIb::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_OldWb =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MdpCompareBwFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_OldWb kOldWb() { return {}; }
+  void set_old_wb(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_OldWb::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ParamsChanged =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpCompareBwFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ParamsChanged kParamsChanged() { return {}; }
+  void set_params_changed(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ParamsChanged::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_UpdateBw =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpCompareBwFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_UpdateBw kUpdateBw() { return {}; }
+  void set_update_bw(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_UpdateBw::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class MdpCmdPingpongDoneFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  MdpCmdPingpongDoneFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MdpCmdPingpongDoneFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MdpCmdPingpongDoneFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_ctl_num() const { return at<1>().valid(); }
+  uint32_t ctl_num() const { return at<1>().as_uint32(); }
+  bool has_intf_num() const { return at<2>().valid(); }
+  uint32_t intf_num() const { return at<2>().as_uint32(); }
+  bool has_pp_num() const { return at<3>().valid(); }
+  uint32_t pp_num() const { return at<3>().as_uint32(); }
+  bool has_koff_cnt() const { return at<4>().valid(); }
+  int32_t koff_cnt() const { return at<4>().as_int32(); }
+};
+
+class MdpCmdPingpongDoneFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = MdpCmdPingpongDoneFtraceEvent_Decoder;
+  enum : int32_t {
+    kCtlNumFieldNumber = 1,
+    kIntfNumFieldNumber = 2,
+    kPpNumFieldNumber = 3,
+    kKoffCntFieldNumber = 4,
+  };
+
+  using FieldMetadata_CtlNum =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpCmdPingpongDoneFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CtlNum kCtlNum() { return {}; }
+  void set_ctl_num(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CtlNum::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_IntfNum =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpCmdPingpongDoneFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IntfNum kIntfNum() { return {}; }
+  void set_intf_num(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_IntfNum::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PpNum =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpCmdPingpongDoneFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PpNum kPpNum() { return {}; }
+  void set_pp_num(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PpNum::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_KoffCnt =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      MdpCmdPingpongDoneFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_KoffCnt kKoffCnt() { return {}; }
+  void set_koff_cnt(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_KoffCnt::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class TracingMarkWriteFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  TracingMarkWriteFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TracingMarkWriteFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TracingMarkWriteFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_pid() const { return at<1>().valid(); }
+  int32_t pid() const { return at<1>().as_int32(); }
+  bool has_trace_name() const { return at<2>().valid(); }
+  ::protozero::ConstChars trace_name() const { return at<2>().as_string(); }
+  bool has_trace_begin() const { return at<3>().valid(); }
+  uint32_t trace_begin() const { return at<3>().as_uint32(); }
+};
+
+class TracingMarkWriteFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = TracingMarkWriteFtraceEvent_Decoder;
+  enum : int32_t {
+    kPidFieldNumber = 1,
+    kTraceNameFieldNumber = 2,
+    kTraceBeginFieldNumber = 3,
+  };
+
+  using FieldMetadata_Pid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      TracingMarkWriteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pid kPid() { return {}; }
+  void set_pid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TraceName =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TracingMarkWriteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TraceName kTraceName() { return {}; }
+  void set_trace_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_TraceName::kFieldId, data, size);
+  }
+  void set_trace_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_TraceName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TraceBegin =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      TracingMarkWriteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TraceBegin kTraceBegin() { return {}; }
+  void set_trace_begin(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TraceBegin::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class MdpSsppChangeFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/16, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  MdpSsppChangeFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MdpSsppChangeFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MdpSsppChangeFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_num() const { return at<1>().valid(); }
+  uint32_t num() const { return at<1>().as_uint32(); }
+  bool has_play_cnt() const { return at<2>().valid(); }
+  uint32_t play_cnt() const { return at<2>().as_uint32(); }
+  bool has_mixer() const { return at<3>().valid(); }
+  uint32_t mixer() const { return at<3>().as_uint32(); }
+  bool has_stage() const { return at<4>().valid(); }
+  uint32_t stage() const { return at<4>().as_uint32(); }
+  bool has_flags() const { return at<5>().valid(); }
+  uint32_t flags() const { return at<5>().as_uint32(); }
+  bool has_format() const { return at<6>().valid(); }
+  uint32_t format() const { return at<6>().as_uint32(); }
+  bool has_img_w() const { return at<7>().valid(); }
+  uint32_t img_w() const { return at<7>().as_uint32(); }
+  bool has_img_h() const { return at<8>().valid(); }
+  uint32_t img_h() const { return at<8>().as_uint32(); }
+  bool has_src_x() const { return at<9>().valid(); }
+  uint32_t src_x() const { return at<9>().as_uint32(); }
+  bool has_src_y() const { return at<10>().valid(); }
+  uint32_t src_y() const { return at<10>().as_uint32(); }
+  bool has_src_w() const { return at<11>().valid(); }
+  uint32_t src_w() const { return at<11>().as_uint32(); }
+  bool has_src_h() const { return at<12>().valid(); }
+  uint32_t src_h() const { return at<12>().as_uint32(); }
+  bool has_dst_x() const { return at<13>().valid(); }
+  uint32_t dst_x() const { return at<13>().as_uint32(); }
+  bool has_dst_y() const { return at<14>().valid(); }
+  uint32_t dst_y() const { return at<14>().as_uint32(); }
+  bool has_dst_w() const { return at<15>().valid(); }
+  uint32_t dst_w() const { return at<15>().as_uint32(); }
+  bool has_dst_h() const { return at<16>().valid(); }
+  uint32_t dst_h() const { return at<16>().as_uint32(); }
+};
+
+class MdpSsppChangeFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = MdpSsppChangeFtraceEvent_Decoder;
+  enum : int32_t {
+    kNumFieldNumber = 1,
+    kPlayCntFieldNumber = 2,
+    kMixerFieldNumber = 3,
+    kStageFieldNumber = 4,
+    kFlagsFieldNumber = 5,
+    kFormatFieldNumber = 6,
+    kImgWFieldNumber = 7,
+    kImgHFieldNumber = 8,
+    kSrcXFieldNumber = 9,
+    kSrcYFieldNumber = 10,
+    kSrcWFieldNumber = 11,
+    kSrcHFieldNumber = 12,
+    kDstXFieldNumber = 13,
+    kDstYFieldNumber = 14,
+    kDstWFieldNumber = 15,
+    kDstHFieldNumber = 16,
+  };
+
+  using FieldMetadata_Num =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpSsppChangeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Num kNum() { return {}; }
+  void set_num(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Num::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PlayCnt =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpSsppChangeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PlayCnt kPlayCnt() { return {}; }
+  void set_play_cnt(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PlayCnt::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Mixer =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpSsppChangeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Mixer kMixer() { return {}; }
+  void set_mixer(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Mixer::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Stage =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpSsppChangeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Stage kStage() { return {}; }
+  void set_stage(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Stage::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Flags =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpSsppChangeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Flags kFlags() { return {}; }
+  void set_flags(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Flags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Format =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpSsppChangeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Format kFormat() { return {}; }
+  void set_format(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Format::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ImgW =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpSsppChangeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ImgW kImgW() { return {}; }
+  void set_img_w(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ImgW::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ImgH =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpSsppChangeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ImgH kImgH() { return {}; }
+  void set_img_h(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ImgH::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SrcX =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpSsppChangeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SrcX kSrcX() { return {}; }
+  void set_src_x(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SrcX::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SrcY =
+    ::protozero::proto_utils::FieldMetadata<
+      10,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpSsppChangeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SrcY kSrcY() { return {}; }
+  void set_src_y(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SrcY::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SrcW =
+    ::protozero::proto_utils::FieldMetadata<
+      11,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpSsppChangeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SrcW kSrcW() { return {}; }
+  void set_src_w(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SrcW::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SrcH =
+    ::protozero::proto_utils::FieldMetadata<
+      12,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpSsppChangeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SrcH kSrcH() { return {}; }
+  void set_src_h(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SrcH::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DstX =
+    ::protozero::proto_utils::FieldMetadata<
+      13,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpSsppChangeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DstX kDstX() { return {}; }
+  void set_dst_x(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DstX::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DstY =
+    ::protozero::proto_utils::FieldMetadata<
+      14,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpSsppChangeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DstY kDstY() { return {}; }
+  void set_dst_y(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DstY::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DstW =
+    ::protozero::proto_utils::FieldMetadata<
+      15,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpSsppChangeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DstW kDstW() { return {}; }
+  void set_dst_w(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DstW::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DstH =
+    ::protozero::proto_utils::FieldMetadata<
+      16,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpSsppChangeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DstH kDstH() { return {}; }
+  void set_dst_h(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DstH::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class MdpPerfSetOtFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  MdpPerfSetOtFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MdpPerfSetOtFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MdpPerfSetOtFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_pnum() const { return at<1>().valid(); }
+  uint32_t pnum() const { return at<1>().as_uint32(); }
+  bool has_xin_id() const { return at<2>().valid(); }
+  uint32_t xin_id() const { return at<2>().as_uint32(); }
+  bool has_rd_lim() const { return at<3>().valid(); }
+  uint32_t rd_lim() const { return at<3>().as_uint32(); }
+  bool has_is_vbif_rt() const { return at<4>().valid(); }
+  uint32_t is_vbif_rt() const { return at<4>().as_uint32(); }
+};
+
+class MdpPerfSetOtFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = MdpPerfSetOtFtraceEvent_Decoder;
+  enum : int32_t {
+    kPnumFieldNumber = 1,
+    kXinIdFieldNumber = 2,
+    kRdLimFieldNumber = 3,
+    kIsVbifRtFieldNumber = 4,
+  };
+
+  using FieldMetadata_Pnum =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpPerfSetOtFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pnum kPnum() { return {}; }
+  void set_pnum(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pnum::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_XinId =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpPerfSetOtFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_XinId kXinId() { return {}; }
+  void set_xin_id(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_XinId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_RdLim =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpPerfSetOtFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_RdLim kRdLim() { return {}; }
+  void set_rd_lim(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_RdLim::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_IsVbifRt =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpPerfSetOtFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IsVbifRt kIsVbifRt() { return {}; }
+  void set_is_vbif_rt(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_IsVbifRt::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class MdpCommitFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  MdpCommitFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MdpCommitFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MdpCommitFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_num() const { return at<1>().valid(); }
+  uint32_t num() const { return at<1>().as_uint32(); }
+  bool has_play_cnt() const { return at<2>().valid(); }
+  uint32_t play_cnt() const { return at<2>().as_uint32(); }
+  bool has_clk_rate() const { return at<3>().valid(); }
+  uint32_t clk_rate() const { return at<3>().as_uint32(); }
+  bool has_bandwidth() const { return at<4>().valid(); }
+  uint64_t bandwidth() const { return at<4>().as_uint64(); }
+};
+
+class MdpCommitFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = MdpCommitFtraceEvent_Decoder;
+  enum : int32_t {
+    kNumFieldNumber = 1,
+    kPlayCntFieldNumber = 2,
+    kClkRateFieldNumber = 3,
+    kBandwidthFieldNumber = 4,
+  };
+
+  using FieldMetadata_Num =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpCommitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Num kNum() { return {}; }
+  void set_num(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Num::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PlayCnt =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpCommitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PlayCnt kPlayCnt() { return {}; }
+  void set_play_cnt(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PlayCnt::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ClkRate =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpCommitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ClkRate kClkRate() { return {}; }
+  void set_clk_rate(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ClkRate::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Bandwidth =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MdpCommitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Bandwidth kBandwidth() { return {}; }
+  void set_bandwidth(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Bandwidth::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class MdpCmdKickoffFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  MdpCmdKickoffFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MdpCmdKickoffFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MdpCmdKickoffFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_ctl_num() const { return at<1>().valid(); }
+  uint32_t ctl_num() const { return at<1>().as_uint32(); }
+  bool has_kickoff_cnt() const { return at<2>().valid(); }
+  int32_t kickoff_cnt() const { return at<2>().as_int32(); }
+};
+
+class MdpCmdKickoffFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = MdpCmdKickoffFtraceEvent_Decoder;
+  enum : int32_t {
+    kCtlNumFieldNumber = 1,
+    kKickoffCntFieldNumber = 2,
+  };
+
+  using FieldMetadata_CtlNum =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MdpCmdKickoffFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CtlNum kCtlNum() { return {}; }
+  void set_ctl_num(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CtlNum::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_KickoffCnt =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      MdpCmdKickoffFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_KickoffCnt kKickoffCnt() { return {}; }
+  void set_kickoff_cnt(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_KickoffCnt::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/ftrace/mm_event.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_MM_EVENT_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_MM_EVENT_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class MmEventRecordFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  MmEventRecordFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MmEventRecordFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MmEventRecordFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_avg_lat() const { return at<1>().valid(); }
+  uint32_t avg_lat() const { return at<1>().as_uint32(); }
+  bool has_count() const { return at<2>().valid(); }
+  uint32_t count() const { return at<2>().as_uint32(); }
+  bool has_max_lat() const { return at<3>().valid(); }
+  uint32_t max_lat() const { return at<3>().as_uint32(); }
+  bool has_type() const { return at<4>().valid(); }
+  uint32_t type() const { return at<4>().as_uint32(); }
+};
+
+class MmEventRecordFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = MmEventRecordFtraceEvent_Decoder;
+  enum : int32_t {
+    kAvgLatFieldNumber = 1,
+    kCountFieldNumber = 2,
+    kMaxLatFieldNumber = 3,
+    kTypeFieldNumber = 4,
+  };
+
+  using FieldMetadata_AvgLat =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MmEventRecordFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AvgLat kAvgLat() { return {}; }
+  void set_avg_lat(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_AvgLat::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Count =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MmEventRecordFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Count kCount() { return {}; }
+  void set_count(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Count::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_MaxLat =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MmEventRecordFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MaxLat kMaxLat() { return {}; }
+  void set_max_lat(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_MaxLat::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Type =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MmEventRecordFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Type kType() { return {}; }
+  void set_type(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Type::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/ftrace/oom.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_OOM_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_OOM_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class MarkVictimFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  MarkVictimFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MarkVictimFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MarkVictimFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_pid() const { return at<1>().valid(); }
+  int32_t pid() const { return at<1>().as_int32(); }
+};
+
+class MarkVictimFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = MarkVictimFtraceEvent_Decoder;
+  enum : int32_t {
+    kPidFieldNumber = 1,
+  };
+
+  using FieldMetadata_Pid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      MarkVictimFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pid kPid() { return {}; }
+  void set_pid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class OomScoreAdjUpdateFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  OomScoreAdjUpdateFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit OomScoreAdjUpdateFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit OomScoreAdjUpdateFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_comm() const { return at<1>().valid(); }
+  ::protozero::ConstChars comm() const { return at<1>().as_string(); }
+  bool has_oom_score_adj() const { return at<2>().valid(); }
+  int32_t oom_score_adj() const { return at<2>().as_int32(); }
+  bool has_pid() const { return at<3>().valid(); }
+  int32_t pid() const { return at<3>().as_int32(); }
+};
+
+class OomScoreAdjUpdateFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = OomScoreAdjUpdateFtraceEvent_Decoder;
+  enum : int32_t {
+    kCommFieldNumber = 1,
+    kOomScoreAdjFieldNumber = 2,
+    kPidFieldNumber = 3,
+  };
+
+  using FieldMetadata_Comm =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      OomScoreAdjUpdateFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Comm kComm() { return {}; }
+  void set_comm(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Comm::kFieldId, data, size);
+  }
+  void set_comm(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Comm::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_OomScoreAdj =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      OomScoreAdjUpdateFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_OomScoreAdj kOomScoreAdj() { return {}; }
+  void set_oom_score_adj(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_OomScoreAdj::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pid =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      OomScoreAdjUpdateFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pid kPid() { return {}; }
+  void set_pid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/ftrace/power.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_POWER_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_POWER_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class GpuFrequencyFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  GpuFrequencyFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit GpuFrequencyFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit GpuFrequencyFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_gpu_id() const { return at<1>().valid(); }
+  uint32_t gpu_id() const { return at<1>().as_uint32(); }
+  bool has_state() const { return at<2>().valid(); }
+  uint32_t state() const { return at<2>().as_uint32(); }
+};
+
+class GpuFrequencyFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = GpuFrequencyFtraceEvent_Decoder;
+  enum : int32_t {
+    kGpuIdFieldNumber = 1,
+    kStateFieldNumber = 2,
+  };
+
+  using FieldMetadata_GpuId =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      GpuFrequencyFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_GpuId kGpuId() { return {}; }
+  void set_gpu_id(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_GpuId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_State =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      GpuFrequencyFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_State kState() { return {}; }
+  void set_state(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_State::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class SuspendResumeFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  SuspendResumeFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit SuspendResumeFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit SuspendResumeFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_action() const { return at<1>().valid(); }
+  ::protozero::ConstChars action() const { return at<1>().as_string(); }
+  bool has_val() const { return at<2>().valid(); }
+  int32_t val() const { return at<2>().as_int32(); }
+  bool has_start() const { return at<3>().valid(); }
+  uint32_t start() const { return at<3>().as_uint32(); }
+};
+
+class SuspendResumeFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = SuspendResumeFtraceEvent_Decoder;
+  enum : int32_t {
+    kActionFieldNumber = 1,
+    kValFieldNumber = 2,
+    kStartFieldNumber = 3,
+  };
+
+  using FieldMetadata_Action =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      SuspendResumeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Action kAction() { return {}; }
+  void set_action(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Action::kFieldId, data, size);
+  }
+  void set_action(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Action::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Val =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SuspendResumeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Val kVal() { return {}; }
+  void set_val(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Val::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Start =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      SuspendResumeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Start kStart() { return {}; }
+  void set_start(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Start::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class ClockSetRateFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  ClockSetRateFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ClockSetRateFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ClockSetRateFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars name() const { return at<1>().as_string(); }
+  bool has_state() const { return at<2>().valid(); }
+  uint64_t state() const { return at<2>().as_uint64(); }
+  bool has_cpu_id() const { return at<3>().valid(); }
+  uint64_t cpu_id() const { return at<3>().as_uint64(); }
+};
+
+class ClockSetRateFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = ClockSetRateFtraceEvent_Decoder;
+  enum : int32_t {
+    kNameFieldNumber = 1,
+    kStateFieldNumber = 2,
+    kCpuIdFieldNumber = 3,
+  };
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ClockSetRateFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_State =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ClockSetRateFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_State kState() { return {}; }
+  void set_state(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_State::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CpuId =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ClockSetRateFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CpuId kCpuId() { return {}; }
+  void set_cpu_id(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CpuId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class ClockDisableFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  ClockDisableFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ClockDisableFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ClockDisableFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars name() const { return at<1>().as_string(); }
+  bool has_state() const { return at<2>().valid(); }
+  uint64_t state() const { return at<2>().as_uint64(); }
+  bool has_cpu_id() const { return at<3>().valid(); }
+  uint64_t cpu_id() const { return at<3>().as_uint64(); }
+};
+
+class ClockDisableFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = ClockDisableFtraceEvent_Decoder;
+  enum : int32_t {
+    kNameFieldNumber = 1,
+    kStateFieldNumber = 2,
+    kCpuIdFieldNumber = 3,
+  };
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ClockDisableFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_State =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ClockDisableFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_State kState() { return {}; }
+  void set_state(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_State::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CpuId =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ClockDisableFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CpuId kCpuId() { return {}; }
+  void set_cpu_id(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CpuId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class ClockEnableFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  ClockEnableFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ClockEnableFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ClockEnableFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars name() const { return at<1>().as_string(); }
+  bool has_state() const { return at<2>().valid(); }
+  uint64_t state() const { return at<2>().as_uint64(); }
+  bool has_cpu_id() const { return at<3>().valid(); }
+  uint64_t cpu_id() const { return at<3>().as_uint64(); }
+};
+
+class ClockEnableFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = ClockEnableFtraceEvent_Decoder;
+  enum : int32_t {
+    kNameFieldNumber = 1,
+    kStateFieldNumber = 2,
+    kCpuIdFieldNumber = 3,
+  };
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ClockEnableFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_State =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ClockEnableFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_State kState() { return {}; }
+  void set_state(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_State::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CpuId =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ClockEnableFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CpuId kCpuId() { return {}; }
+  void set_cpu_id(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CpuId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class CpuIdleFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  CpuIdleFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit CpuIdleFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit CpuIdleFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_state() const { return at<1>().valid(); }
+  uint32_t state() const { return at<1>().as_uint32(); }
+  bool has_cpu_id() const { return at<2>().valid(); }
+  uint32_t cpu_id() const { return at<2>().as_uint32(); }
+};
+
+class CpuIdleFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = CpuIdleFtraceEvent_Decoder;
+  enum : int32_t {
+    kStateFieldNumber = 1,
+    kCpuIdFieldNumber = 2,
+  };
+
+  using FieldMetadata_State =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      CpuIdleFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_State kState() { return {}; }
+  void set_state(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_State::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CpuId =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      CpuIdleFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CpuId kCpuId() { return {}; }
+  void set_cpu_id(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CpuId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class CpuFrequencyLimitsFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  CpuFrequencyLimitsFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit CpuFrequencyLimitsFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit CpuFrequencyLimitsFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_min_freq() const { return at<1>().valid(); }
+  uint32_t min_freq() const { return at<1>().as_uint32(); }
+  bool has_max_freq() const { return at<2>().valid(); }
+  uint32_t max_freq() const { return at<2>().as_uint32(); }
+  bool has_cpu_id() const { return at<3>().valid(); }
+  uint32_t cpu_id() const { return at<3>().as_uint32(); }
+};
+
+class CpuFrequencyLimitsFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = CpuFrequencyLimitsFtraceEvent_Decoder;
+  enum : int32_t {
+    kMinFreqFieldNumber = 1,
+    kMaxFreqFieldNumber = 2,
+    kCpuIdFieldNumber = 3,
+  };
+
+  using FieldMetadata_MinFreq =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      CpuFrequencyLimitsFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MinFreq kMinFreq() { return {}; }
+  void set_min_freq(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_MinFreq::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_MaxFreq =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      CpuFrequencyLimitsFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MaxFreq kMaxFreq() { return {}; }
+  void set_max_freq(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_MaxFreq::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CpuId =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      CpuFrequencyLimitsFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CpuId kCpuId() { return {}; }
+  void set_cpu_id(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CpuId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class CpuFrequencyFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  CpuFrequencyFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit CpuFrequencyFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit CpuFrequencyFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_state() const { return at<1>().valid(); }
+  uint32_t state() const { return at<1>().as_uint32(); }
+  bool has_cpu_id() const { return at<2>().valid(); }
+  uint32_t cpu_id() const { return at<2>().as_uint32(); }
+};
+
+class CpuFrequencyFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = CpuFrequencyFtraceEvent_Decoder;
+  enum : int32_t {
+    kStateFieldNumber = 1,
+    kCpuIdFieldNumber = 2,
+  };
+
+  using FieldMetadata_State =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      CpuFrequencyFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_State kState() { return {}; }
+  void set_state(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_State::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CpuId =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      CpuFrequencyFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CpuId kCpuId() { return {}; }
+  void set_cpu_id(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CpuId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/ftrace/raw_syscalls.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_RAW_SYSCALLS_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_RAW_SYSCALLS_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class SysExitFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  SysExitFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit SysExitFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit SysExitFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_id() const { return at<1>().valid(); }
+  int64_t id() const { return at<1>().as_int64(); }
+  bool has_ret() const { return at<2>().valid(); }
+  int64_t ret() const { return at<2>().as_int64(); }
+};
+
+class SysExitFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = SysExitFtraceEvent_Decoder;
+  enum : int32_t {
+    kIdFieldNumber = 1,
+    kRetFieldNumber = 2,
+  };
+
+  using FieldMetadata_Id =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      SysExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Id kId() { return {}; }
+  void set_id(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Id::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ret =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      SysExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ret kRet() { return {}; }
+  void set_ret(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ret::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class SysEnterFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  SysEnterFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit SysEnterFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit SysEnterFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_id() const { return at<1>().valid(); }
+  int64_t id() const { return at<1>().as_int64(); }
+};
+
+class SysEnterFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = SysEnterFtraceEvent_Decoder;
+  enum : int32_t {
+    kIdFieldNumber = 1,
+  };
+
+  using FieldMetadata_Id =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      SysEnterFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Id kId() { return {}; }
+  void set_id(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Id::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/ftrace/regulator.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_REGULATOR_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_REGULATOR_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class RegulatorSetVoltageCompleteFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  RegulatorSetVoltageCompleteFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit RegulatorSetVoltageCompleteFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit RegulatorSetVoltageCompleteFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars name() const { return at<1>().as_string(); }
+  bool has_val() const { return at<2>().valid(); }
+  uint32_t val() const { return at<2>().as_uint32(); }
+};
+
+class RegulatorSetVoltageCompleteFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = RegulatorSetVoltageCompleteFtraceEvent_Decoder;
+  enum : int32_t {
+    kNameFieldNumber = 1,
+    kValFieldNumber = 2,
+  };
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      RegulatorSetVoltageCompleteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Val =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      RegulatorSetVoltageCompleteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Val kVal() { return {}; }
+  void set_val(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Val::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class RegulatorSetVoltageFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  RegulatorSetVoltageFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit RegulatorSetVoltageFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit RegulatorSetVoltageFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars name() const { return at<1>().as_string(); }
+  bool has_min() const { return at<2>().valid(); }
+  int32_t min() const { return at<2>().as_int32(); }
+  bool has_max() const { return at<3>().valid(); }
+  int32_t max() const { return at<3>().as_int32(); }
+};
+
+class RegulatorSetVoltageFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = RegulatorSetVoltageFtraceEvent_Decoder;
+  enum : int32_t {
+    kNameFieldNumber = 1,
+    kMinFieldNumber = 2,
+    kMaxFieldNumber = 3,
+  };
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      RegulatorSetVoltageFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Min =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      RegulatorSetVoltageFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Min kMin() { return {}; }
+  void set_min(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Min::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Max =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      RegulatorSetVoltageFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Max kMax() { return {}; }
+  void set_max(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Max::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class RegulatorEnableDelayFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  RegulatorEnableDelayFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit RegulatorEnableDelayFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit RegulatorEnableDelayFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars name() const { return at<1>().as_string(); }
+};
+
+class RegulatorEnableDelayFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = RegulatorEnableDelayFtraceEvent_Decoder;
+  enum : int32_t {
+    kNameFieldNumber = 1,
+  };
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      RegulatorEnableDelayFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class RegulatorEnableCompleteFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  RegulatorEnableCompleteFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit RegulatorEnableCompleteFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit RegulatorEnableCompleteFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars name() const { return at<1>().as_string(); }
+};
+
+class RegulatorEnableCompleteFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = RegulatorEnableCompleteFtraceEvent_Decoder;
+  enum : int32_t {
+    kNameFieldNumber = 1,
+  };
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      RegulatorEnableCompleteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class RegulatorEnableFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  RegulatorEnableFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit RegulatorEnableFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit RegulatorEnableFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars name() const { return at<1>().as_string(); }
+};
+
+class RegulatorEnableFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = RegulatorEnableFtraceEvent_Decoder;
+  enum : int32_t {
+    kNameFieldNumber = 1,
+  };
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      RegulatorEnableFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class RegulatorDisableCompleteFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  RegulatorDisableCompleteFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit RegulatorDisableCompleteFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit RegulatorDisableCompleteFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars name() const { return at<1>().as_string(); }
+};
+
+class RegulatorDisableCompleteFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = RegulatorDisableCompleteFtraceEvent_Decoder;
+  enum : int32_t {
+    kNameFieldNumber = 1,
+  };
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      RegulatorDisableCompleteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class RegulatorDisableFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  RegulatorDisableFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit RegulatorDisableFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit RegulatorDisableFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars name() const { return at<1>().as_string(); }
+};
+
+class RegulatorDisableFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = RegulatorDisableFtraceEvent_Decoder;
+  enum : int32_t {
+    kNameFieldNumber = 1,
+  };
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      RegulatorDisableFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/ftrace/sched.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_SCHED_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_SCHED_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class SchedPiSetprioFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  SchedPiSetprioFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit SchedPiSetprioFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit SchedPiSetprioFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_comm() const { return at<1>().valid(); }
+  ::protozero::ConstChars comm() const { return at<1>().as_string(); }
+  bool has_newprio() const { return at<2>().valid(); }
+  int32_t newprio() const { return at<2>().as_int32(); }
+  bool has_oldprio() const { return at<3>().valid(); }
+  int32_t oldprio() const { return at<3>().as_int32(); }
+  bool has_pid() const { return at<4>().valid(); }
+  int32_t pid() const { return at<4>().as_int32(); }
+};
+
+class SchedPiSetprioFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = SchedPiSetprioFtraceEvent_Decoder;
+  enum : int32_t {
+    kCommFieldNumber = 1,
+    kNewprioFieldNumber = 2,
+    kOldprioFieldNumber = 3,
+    kPidFieldNumber = 4,
+  };
+
+  using FieldMetadata_Comm =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      SchedPiSetprioFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Comm kComm() { return {}; }
+  void set_comm(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Comm::kFieldId, data, size);
+  }
+  void set_comm(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Comm::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Newprio =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SchedPiSetprioFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Newprio kNewprio() { return {}; }
+  void set_newprio(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Newprio::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Oldprio =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SchedPiSetprioFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Oldprio kOldprio() { return {}; }
+  void set_oldprio(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Oldprio::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pid =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SchedPiSetprioFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pid kPid() { return {}; }
+  void set_pid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class SchedProcessWaitFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  SchedProcessWaitFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit SchedProcessWaitFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit SchedProcessWaitFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_comm() const { return at<1>().valid(); }
+  ::protozero::ConstChars comm() const { return at<1>().as_string(); }
+  bool has_pid() const { return at<2>().valid(); }
+  int32_t pid() const { return at<2>().as_int32(); }
+  bool has_prio() const { return at<3>().valid(); }
+  int32_t prio() const { return at<3>().as_int32(); }
+};
+
+class SchedProcessWaitFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = SchedProcessWaitFtraceEvent_Decoder;
+  enum : int32_t {
+    kCommFieldNumber = 1,
+    kPidFieldNumber = 2,
+    kPrioFieldNumber = 3,
+  };
+
+  using FieldMetadata_Comm =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      SchedProcessWaitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Comm kComm() { return {}; }
+  void set_comm(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Comm::kFieldId, data, size);
+  }
+  void set_comm(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Comm::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pid =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SchedProcessWaitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pid kPid() { return {}; }
+  void set_pid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Prio =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SchedProcessWaitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Prio kPrio() { return {}; }
+  void set_prio(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Prio::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class SchedProcessHangFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  SchedProcessHangFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit SchedProcessHangFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit SchedProcessHangFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_comm() const { return at<1>().valid(); }
+  ::protozero::ConstChars comm() const { return at<1>().as_string(); }
+  bool has_pid() const { return at<2>().valid(); }
+  int32_t pid() const { return at<2>().as_int32(); }
+};
+
+class SchedProcessHangFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = SchedProcessHangFtraceEvent_Decoder;
+  enum : int32_t {
+    kCommFieldNumber = 1,
+    kPidFieldNumber = 2,
+  };
+
+  using FieldMetadata_Comm =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      SchedProcessHangFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Comm kComm() { return {}; }
+  void set_comm(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Comm::kFieldId, data, size);
+  }
+  void set_comm(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Comm::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pid =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SchedProcessHangFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pid kPid() { return {}; }
+  void set_pid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class SchedProcessFreeFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  SchedProcessFreeFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit SchedProcessFreeFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit SchedProcessFreeFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_comm() const { return at<1>().valid(); }
+  ::protozero::ConstChars comm() const { return at<1>().as_string(); }
+  bool has_pid() const { return at<2>().valid(); }
+  int32_t pid() const { return at<2>().as_int32(); }
+  bool has_prio() const { return at<3>().valid(); }
+  int32_t prio() const { return at<3>().as_int32(); }
+};
+
+class SchedProcessFreeFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = SchedProcessFreeFtraceEvent_Decoder;
+  enum : int32_t {
+    kCommFieldNumber = 1,
+    kPidFieldNumber = 2,
+    kPrioFieldNumber = 3,
+  };
+
+  using FieldMetadata_Comm =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      SchedProcessFreeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Comm kComm() { return {}; }
+  void set_comm(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Comm::kFieldId, data, size);
+  }
+  void set_comm(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Comm::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pid =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SchedProcessFreeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pid kPid() { return {}; }
+  void set_pid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Prio =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SchedProcessFreeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Prio kPrio() { return {}; }
+  void set_prio(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Prio::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class SchedProcessForkFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  SchedProcessForkFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit SchedProcessForkFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit SchedProcessForkFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_parent_comm() const { return at<1>().valid(); }
+  ::protozero::ConstChars parent_comm() const { return at<1>().as_string(); }
+  bool has_parent_pid() const { return at<2>().valid(); }
+  int32_t parent_pid() const { return at<2>().as_int32(); }
+  bool has_child_comm() const { return at<3>().valid(); }
+  ::protozero::ConstChars child_comm() const { return at<3>().as_string(); }
+  bool has_child_pid() const { return at<4>().valid(); }
+  int32_t child_pid() const { return at<4>().as_int32(); }
+};
+
+class SchedProcessForkFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = SchedProcessForkFtraceEvent_Decoder;
+  enum : int32_t {
+    kParentCommFieldNumber = 1,
+    kParentPidFieldNumber = 2,
+    kChildCommFieldNumber = 3,
+    kChildPidFieldNumber = 4,
+  };
+
+  using FieldMetadata_ParentComm =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      SchedProcessForkFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ParentComm kParentComm() { return {}; }
+  void set_parent_comm(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_ParentComm::kFieldId, data, size);
+  }
+  void set_parent_comm(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_ParentComm::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ParentPid =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SchedProcessForkFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ParentPid kParentPid() { return {}; }
+  void set_parent_pid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ParentPid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ChildComm =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      SchedProcessForkFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChildComm kChildComm() { return {}; }
+  void set_child_comm(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_ChildComm::kFieldId, data, size);
+  }
+  void set_child_comm(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_ChildComm::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ChildPid =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SchedProcessForkFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChildPid kChildPid() { return {}; }
+  void set_child_pid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ChildPid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class SchedProcessExitFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  SchedProcessExitFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit SchedProcessExitFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit SchedProcessExitFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_comm() const { return at<1>().valid(); }
+  ::protozero::ConstChars comm() const { return at<1>().as_string(); }
+  bool has_pid() const { return at<2>().valid(); }
+  int32_t pid() const { return at<2>().as_int32(); }
+  bool has_tgid() const { return at<3>().valid(); }
+  int32_t tgid() const { return at<3>().as_int32(); }
+  bool has_prio() const { return at<4>().valid(); }
+  int32_t prio() const { return at<4>().as_int32(); }
+};
+
+class SchedProcessExitFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = SchedProcessExitFtraceEvent_Decoder;
+  enum : int32_t {
+    kCommFieldNumber = 1,
+    kPidFieldNumber = 2,
+    kTgidFieldNumber = 3,
+    kPrioFieldNumber = 4,
+  };
+
+  using FieldMetadata_Comm =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      SchedProcessExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Comm kComm() { return {}; }
+  void set_comm(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Comm::kFieldId, data, size);
+  }
+  void set_comm(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Comm::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pid =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SchedProcessExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pid kPid() { return {}; }
+  void set_pid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Tgid =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SchedProcessExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Tgid kTgid() { return {}; }
+  void set_tgid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Tgid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Prio =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SchedProcessExitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Prio kPrio() { return {}; }
+  void set_prio(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Prio::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class SchedProcessExecFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  SchedProcessExecFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit SchedProcessExecFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit SchedProcessExecFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_filename() const { return at<1>().valid(); }
+  ::protozero::ConstChars filename() const { return at<1>().as_string(); }
+  bool has_pid() const { return at<2>().valid(); }
+  int32_t pid() const { return at<2>().as_int32(); }
+  bool has_old_pid() const { return at<3>().valid(); }
+  int32_t old_pid() const { return at<3>().as_int32(); }
+};
+
+class SchedProcessExecFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = SchedProcessExecFtraceEvent_Decoder;
+  enum : int32_t {
+    kFilenameFieldNumber = 1,
+    kPidFieldNumber = 2,
+    kOldPidFieldNumber = 3,
+  };
+
+  using FieldMetadata_Filename =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      SchedProcessExecFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Filename kFilename() { return {}; }
+  void set_filename(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Filename::kFieldId, data, size);
+  }
+  void set_filename(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Filename::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pid =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SchedProcessExecFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pid kPid() { return {}; }
+  void set_pid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_OldPid =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SchedProcessExecFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_OldPid kOldPid() { return {}; }
+  void set_old_pid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_OldPid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class SchedWakeupNewFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  SchedWakeupNewFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit SchedWakeupNewFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit SchedWakeupNewFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_comm() const { return at<1>().valid(); }
+  ::protozero::ConstChars comm() const { return at<1>().as_string(); }
+  bool has_pid() const { return at<2>().valid(); }
+  int32_t pid() const { return at<2>().as_int32(); }
+  bool has_prio() const { return at<3>().valid(); }
+  int32_t prio() const { return at<3>().as_int32(); }
+  bool has_success() const { return at<4>().valid(); }
+  int32_t success() const { return at<4>().as_int32(); }
+  bool has_target_cpu() const { return at<5>().valid(); }
+  int32_t target_cpu() const { return at<5>().as_int32(); }
+};
+
+class SchedWakeupNewFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = SchedWakeupNewFtraceEvent_Decoder;
+  enum : int32_t {
+    kCommFieldNumber = 1,
+    kPidFieldNumber = 2,
+    kPrioFieldNumber = 3,
+    kSuccessFieldNumber = 4,
+    kTargetCpuFieldNumber = 5,
+  };
+
+  using FieldMetadata_Comm =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      SchedWakeupNewFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Comm kComm() { return {}; }
+  void set_comm(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Comm::kFieldId, data, size);
+  }
+  void set_comm(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Comm::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pid =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SchedWakeupNewFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pid kPid() { return {}; }
+  void set_pid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Prio =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SchedWakeupNewFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Prio kPrio() { return {}; }
+  void set_prio(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Prio::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Success =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SchedWakeupNewFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Success kSuccess() { return {}; }
+  void set_success(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Success::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TargetCpu =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SchedWakeupNewFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TargetCpu kTargetCpu() { return {}; }
+  void set_target_cpu(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TargetCpu::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class SchedWakingFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  SchedWakingFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit SchedWakingFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit SchedWakingFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_comm() const { return at<1>().valid(); }
+  ::protozero::ConstChars comm() const { return at<1>().as_string(); }
+  bool has_pid() const { return at<2>().valid(); }
+  int32_t pid() const { return at<2>().as_int32(); }
+  bool has_prio() const { return at<3>().valid(); }
+  int32_t prio() const { return at<3>().as_int32(); }
+  bool has_success() const { return at<4>().valid(); }
+  int32_t success() const { return at<4>().as_int32(); }
+  bool has_target_cpu() const { return at<5>().valid(); }
+  int32_t target_cpu() const { return at<5>().as_int32(); }
+};
+
+class SchedWakingFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = SchedWakingFtraceEvent_Decoder;
+  enum : int32_t {
+    kCommFieldNumber = 1,
+    kPidFieldNumber = 2,
+    kPrioFieldNumber = 3,
+    kSuccessFieldNumber = 4,
+    kTargetCpuFieldNumber = 5,
+  };
+
+  using FieldMetadata_Comm =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      SchedWakingFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Comm kComm() { return {}; }
+  void set_comm(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Comm::kFieldId, data, size);
+  }
+  void set_comm(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Comm::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pid =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SchedWakingFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pid kPid() { return {}; }
+  void set_pid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Prio =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SchedWakingFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Prio kPrio() { return {}; }
+  void set_prio(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Prio::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Success =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SchedWakingFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Success kSuccess() { return {}; }
+  void set_success(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Success::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TargetCpu =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SchedWakingFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TargetCpu kTargetCpu() { return {}; }
+  void set_target_cpu(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TargetCpu::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class SchedCpuHotplugFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  SchedCpuHotplugFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit SchedCpuHotplugFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit SchedCpuHotplugFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_affected_cpu() const { return at<1>().valid(); }
+  int32_t affected_cpu() const { return at<1>().as_int32(); }
+  bool has_error() const { return at<2>().valid(); }
+  int32_t error() const { return at<2>().as_int32(); }
+  bool has_status() const { return at<3>().valid(); }
+  int32_t status() const { return at<3>().as_int32(); }
+};
+
+class SchedCpuHotplugFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = SchedCpuHotplugFtraceEvent_Decoder;
+  enum : int32_t {
+    kAffectedCpuFieldNumber = 1,
+    kErrorFieldNumber = 2,
+    kStatusFieldNumber = 3,
+  };
+
+  using FieldMetadata_AffectedCpu =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SchedCpuHotplugFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AffectedCpu kAffectedCpu() { return {}; }
+  void set_affected_cpu(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_AffectedCpu::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Error =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SchedCpuHotplugFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Error kError() { return {}; }
+  void set_error(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Error::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Status =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SchedCpuHotplugFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Status kStatus() { return {}; }
+  void set_status(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Status::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class SchedBlockedReasonFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  SchedBlockedReasonFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit SchedBlockedReasonFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit SchedBlockedReasonFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_pid() const { return at<1>().valid(); }
+  int32_t pid() const { return at<1>().as_int32(); }
+  bool has_caller() const { return at<2>().valid(); }
+  uint64_t caller() const { return at<2>().as_uint64(); }
+  bool has_io_wait() const { return at<3>().valid(); }
+  uint32_t io_wait() const { return at<3>().as_uint32(); }
+};
+
+class SchedBlockedReasonFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = SchedBlockedReasonFtraceEvent_Decoder;
+  enum : int32_t {
+    kPidFieldNumber = 1,
+    kCallerFieldNumber = 2,
+    kIoWaitFieldNumber = 3,
+  };
+
+  using FieldMetadata_Pid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SchedBlockedReasonFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pid kPid() { return {}; }
+  void set_pid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Caller =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      SchedBlockedReasonFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Caller kCaller() { return {}; }
+  void set_caller(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Caller::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_IoWait =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      SchedBlockedReasonFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IoWait kIoWait() { return {}; }
+  void set_io_wait(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_IoWait::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class SchedWakeupFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  SchedWakeupFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit SchedWakeupFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit SchedWakeupFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_comm() const { return at<1>().valid(); }
+  ::protozero::ConstChars comm() const { return at<1>().as_string(); }
+  bool has_pid() const { return at<2>().valid(); }
+  int32_t pid() const { return at<2>().as_int32(); }
+  bool has_prio() const { return at<3>().valid(); }
+  int32_t prio() const { return at<3>().as_int32(); }
+  bool has_success() const { return at<4>().valid(); }
+  int32_t success() const { return at<4>().as_int32(); }
+  bool has_target_cpu() const { return at<5>().valid(); }
+  int32_t target_cpu() const { return at<5>().as_int32(); }
+};
+
+class SchedWakeupFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = SchedWakeupFtraceEvent_Decoder;
+  enum : int32_t {
+    kCommFieldNumber = 1,
+    kPidFieldNumber = 2,
+    kPrioFieldNumber = 3,
+    kSuccessFieldNumber = 4,
+    kTargetCpuFieldNumber = 5,
+  };
+
+  using FieldMetadata_Comm =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      SchedWakeupFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Comm kComm() { return {}; }
+  void set_comm(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Comm::kFieldId, data, size);
+  }
+  void set_comm(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Comm::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pid =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SchedWakeupFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pid kPid() { return {}; }
+  void set_pid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Prio =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SchedWakeupFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Prio kPrio() { return {}; }
+  void set_prio(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Prio::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Success =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SchedWakeupFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Success kSuccess() { return {}; }
+  void set_success(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Success::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TargetCpu =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SchedWakeupFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TargetCpu kTargetCpu() { return {}; }
+  void set_target_cpu(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TargetCpu::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class SchedSwitchFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/7, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  SchedSwitchFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit SchedSwitchFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit SchedSwitchFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_prev_comm() const { return at<1>().valid(); }
+  ::protozero::ConstChars prev_comm() const { return at<1>().as_string(); }
+  bool has_prev_pid() const { return at<2>().valid(); }
+  int32_t prev_pid() const { return at<2>().as_int32(); }
+  bool has_prev_prio() const { return at<3>().valid(); }
+  int32_t prev_prio() const { return at<3>().as_int32(); }
+  bool has_prev_state() const { return at<4>().valid(); }
+  int64_t prev_state() const { return at<4>().as_int64(); }
+  bool has_next_comm() const { return at<5>().valid(); }
+  ::protozero::ConstChars next_comm() const { return at<5>().as_string(); }
+  bool has_next_pid() const { return at<6>().valid(); }
+  int32_t next_pid() const { return at<6>().as_int32(); }
+  bool has_next_prio() const { return at<7>().valid(); }
+  int32_t next_prio() const { return at<7>().as_int32(); }
+};
+
+class SchedSwitchFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = SchedSwitchFtraceEvent_Decoder;
+  enum : int32_t {
+    kPrevCommFieldNumber = 1,
+    kPrevPidFieldNumber = 2,
+    kPrevPrioFieldNumber = 3,
+    kPrevStateFieldNumber = 4,
+    kNextCommFieldNumber = 5,
+    kNextPidFieldNumber = 6,
+    kNextPrioFieldNumber = 7,
+  };
+
+  using FieldMetadata_PrevComm =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      SchedSwitchFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PrevComm kPrevComm() { return {}; }
+  void set_prev_comm(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_PrevComm::kFieldId, data, size);
+  }
+  void set_prev_comm(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_PrevComm::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PrevPid =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SchedSwitchFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PrevPid kPrevPid() { return {}; }
+  void set_prev_pid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PrevPid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PrevPrio =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SchedSwitchFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PrevPrio kPrevPrio() { return {}; }
+  void set_prev_prio(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PrevPrio::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PrevState =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      SchedSwitchFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PrevState kPrevState() { return {}; }
+  void set_prev_state(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PrevState::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NextComm =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      SchedSwitchFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NextComm kNextComm() { return {}; }
+  void set_next_comm(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_NextComm::kFieldId, data, size);
+  }
+  void set_next_comm(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_NextComm::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NextPid =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SchedSwitchFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NextPid kNextPid() { return {}; }
+  void set_next_pid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NextPid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NextPrio =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SchedSwitchFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NextPrio kNextPrio() { return {}; }
+  void set_next_prio(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NextPrio::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/ftrace/scm.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_SCM_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_SCM_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class ScmCallEndFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/0, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  ScmCallEndFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ScmCallEndFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ScmCallEndFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+};
+
+class ScmCallEndFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = ScmCallEndFtraceEvent_Decoder;
+};
+
+class ScmCallStartFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  ScmCallStartFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ScmCallStartFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ScmCallStartFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_arginfo() const { return at<1>().valid(); }
+  uint32_t arginfo() const { return at<1>().as_uint32(); }
+  bool has_x0() const { return at<2>().valid(); }
+  uint64_t x0() const { return at<2>().as_uint64(); }
+  bool has_x5() const { return at<3>().valid(); }
+  uint64_t x5() const { return at<3>().as_uint64(); }
+};
+
+class ScmCallStartFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = ScmCallStartFtraceEvent_Decoder;
+  enum : int32_t {
+    kArginfoFieldNumber = 1,
+    kX0FieldNumber = 2,
+    kX5FieldNumber = 3,
+  };
+
+  using FieldMetadata_Arginfo =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      ScmCallStartFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Arginfo kArginfo() { return {}; }
+  void set_arginfo(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Arginfo::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_X0 =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ScmCallStartFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_X0 kX0() { return {}; }
+  void set_x0(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_X0::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_X5 =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ScmCallStartFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_X5 kX5() { return {}; }
+  void set_x5(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_X5::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/ftrace/sde.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_SDE_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_SDE_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class SdeSdePerfUpdateBusFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  SdeSdePerfUpdateBusFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit SdeSdePerfUpdateBusFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit SdeSdePerfUpdateBusFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_ab_quota() const { return at<1>().valid(); }
+  uint64_t ab_quota() const { return at<1>().as_uint64(); }
+  bool has_bus_id() const { return at<2>().valid(); }
+  uint32_t bus_id() const { return at<2>().as_uint32(); }
+  bool has_client() const { return at<3>().valid(); }
+  int32_t client() const { return at<3>().as_int32(); }
+  bool has_ib_quota() const { return at<4>().valid(); }
+  uint64_t ib_quota() const { return at<4>().as_uint64(); }
+};
+
+class SdeSdePerfUpdateBusFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = SdeSdePerfUpdateBusFtraceEvent_Decoder;
+  enum : int32_t {
+    kAbQuotaFieldNumber = 1,
+    kBusIdFieldNumber = 2,
+    kClientFieldNumber = 3,
+    kIbQuotaFieldNumber = 4,
+  };
+
+  using FieldMetadata_AbQuota =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      SdeSdePerfUpdateBusFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AbQuota kAbQuota() { return {}; }
+  void set_ab_quota(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_AbQuota::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BusId =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      SdeSdePerfUpdateBusFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BusId kBusId() { return {}; }
+  void set_bus_id(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_BusId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Client =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SdeSdePerfUpdateBusFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Client kClient() { return {}; }
+  void set_client(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Client::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_IbQuota =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      SdeSdePerfUpdateBusFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IbQuota kIbQuota() { return {}; }
+  void set_ib_quota(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_IbQuota::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class SdeSdePerfSetQosLutsFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/6, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  SdeSdePerfSetQosLutsFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit SdeSdePerfSetQosLutsFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit SdeSdePerfSetQosLutsFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_fl() const { return at<1>().valid(); }
+  uint32_t fl() const { return at<1>().as_uint32(); }
+  bool has_fmt() const { return at<2>().valid(); }
+  uint32_t fmt() const { return at<2>().as_uint32(); }
+  bool has_lut() const { return at<3>().valid(); }
+  uint64_t lut() const { return at<3>().as_uint64(); }
+  bool has_lut_usage() const { return at<4>().valid(); }
+  uint32_t lut_usage() const { return at<4>().as_uint32(); }
+  bool has_pnum() const { return at<5>().valid(); }
+  uint32_t pnum() const { return at<5>().as_uint32(); }
+  bool has_rt() const { return at<6>().valid(); }
+  uint32_t rt() const { return at<6>().as_uint32(); }
+};
+
+class SdeSdePerfSetQosLutsFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = SdeSdePerfSetQosLutsFtraceEvent_Decoder;
+  enum : int32_t {
+    kFlFieldNumber = 1,
+    kFmtFieldNumber = 2,
+    kLutFieldNumber = 3,
+    kLutUsageFieldNumber = 4,
+    kPnumFieldNumber = 5,
+    kRtFieldNumber = 6,
+  };
+
+  using FieldMetadata_Fl =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      SdeSdePerfSetQosLutsFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Fl kFl() { return {}; }
+  void set_fl(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Fl::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Fmt =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      SdeSdePerfSetQosLutsFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Fmt kFmt() { return {}; }
+  void set_fmt(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Fmt::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Lut =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      SdeSdePerfSetQosLutsFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Lut kLut() { return {}; }
+  void set_lut(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Lut::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_LutUsage =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      SdeSdePerfSetQosLutsFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_LutUsage kLutUsage() { return {}; }
+  void set_lut_usage(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_LutUsage::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pnum =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      SdeSdePerfSetQosLutsFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pnum kPnum() { return {}; }
+  void set_pnum(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pnum::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Rt =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      SdeSdePerfSetQosLutsFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Rt kRt() { return {}; }
+  void set_rt(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Rt::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class SdeSdePerfCrtcUpdateFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/12, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  SdeSdePerfCrtcUpdateFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit SdeSdePerfCrtcUpdateFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit SdeSdePerfCrtcUpdateFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_bw_ctl_ebi() const { return at<1>().valid(); }
+  uint64_t bw_ctl_ebi() const { return at<1>().as_uint64(); }
+  bool has_bw_ctl_llcc() const { return at<2>().valid(); }
+  uint64_t bw_ctl_llcc() const { return at<2>().as_uint64(); }
+  bool has_bw_ctl_mnoc() const { return at<3>().valid(); }
+  uint64_t bw_ctl_mnoc() const { return at<3>().as_uint64(); }
+  bool has_core_clk_rate() const { return at<4>().valid(); }
+  uint32_t core_clk_rate() const { return at<4>().as_uint32(); }
+  bool has_crtc() const { return at<5>().valid(); }
+  uint32_t crtc() const { return at<5>().as_uint32(); }
+  bool has_params() const { return at<6>().valid(); }
+  int32_t params() const { return at<6>().as_int32(); }
+  bool has_per_pipe_ib_ebi() const { return at<7>().valid(); }
+  uint64_t per_pipe_ib_ebi() const { return at<7>().as_uint64(); }
+  bool has_per_pipe_ib_llcc() const { return at<8>().valid(); }
+  uint64_t per_pipe_ib_llcc() const { return at<8>().as_uint64(); }
+  bool has_per_pipe_ib_mnoc() const { return at<9>().valid(); }
+  uint64_t per_pipe_ib_mnoc() const { return at<9>().as_uint64(); }
+  bool has_stop_req() const { return at<10>().valid(); }
+  uint32_t stop_req() const { return at<10>().as_uint32(); }
+  bool has_update_bus() const { return at<11>().valid(); }
+  uint32_t update_bus() const { return at<11>().as_uint32(); }
+  bool has_update_clk() const { return at<12>().valid(); }
+  uint32_t update_clk() const { return at<12>().as_uint32(); }
+};
+
+class SdeSdePerfCrtcUpdateFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = SdeSdePerfCrtcUpdateFtraceEvent_Decoder;
+  enum : int32_t {
+    kBwCtlEbiFieldNumber = 1,
+    kBwCtlLlccFieldNumber = 2,
+    kBwCtlMnocFieldNumber = 3,
+    kCoreClkRateFieldNumber = 4,
+    kCrtcFieldNumber = 5,
+    kParamsFieldNumber = 6,
+    kPerPipeIbEbiFieldNumber = 7,
+    kPerPipeIbLlccFieldNumber = 8,
+    kPerPipeIbMnocFieldNumber = 9,
+    kStopReqFieldNumber = 10,
+    kUpdateBusFieldNumber = 11,
+    kUpdateClkFieldNumber = 12,
+  };
+
+  using FieldMetadata_BwCtlEbi =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      SdeSdePerfCrtcUpdateFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BwCtlEbi kBwCtlEbi() { return {}; }
+  void set_bw_ctl_ebi(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_BwCtlEbi::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BwCtlLlcc =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      SdeSdePerfCrtcUpdateFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BwCtlLlcc kBwCtlLlcc() { return {}; }
+  void set_bw_ctl_llcc(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_BwCtlLlcc::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BwCtlMnoc =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      SdeSdePerfCrtcUpdateFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BwCtlMnoc kBwCtlMnoc() { return {}; }
+  void set_bw_ctl_mnoc(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_BwCtlMnoc::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CoreClkRate =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      SdeSdePerfCrtcUpdateFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CoreClkRate kCoreClkRate() { return {}; }
+  void set_core_clk_rate(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CoreClkRate::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Crtc =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      SdeSdePerfCrtcUpdateFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Crtc kCrtc() { return {}; }
+  void set_crtc(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Crtc::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Params =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SdeSdePerfCrtcUpdateFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Params kParams() { return {}; }
+  void set_params(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Params::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PerPipeIbEbi =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      SdeSdePerfCrtcUpdateFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PerPipeIbEbi kPerPipeIbEbi() { return {}; }
+  void set_per_pipe_ib_ebi(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PerPipeIbEbi::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PerPipeIbLlcc =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      SdeSdePerfCrtcUpdateFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PerPipeIbLlcc kPerPipeIbLlcc() { return {}; }
+  void set_per_pipe_ib_llcc(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PerPipeIbLlcc::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PerPipeIbMnoc =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      SdeSdePerfCrtcUpdateFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PerPipeIbMnoc kPerPipeIbMnoc() { return {}; }
+  void set_per_pipe_ib_mnoc(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PerPipeIbMnoc::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_StopReq =
+    ::protozero::proto_utils::FieldMetadata<
+      10,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      SdeSdePerfCrtcUpdateFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_StopReq kStopReq() { return {}; }
+  void set_stop_req(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_StopReq::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_UpdateBus =
+    ::protozero::proto_utils::FieldMetadata<
+      11,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      SdeSdePerfCrtcUpdateFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_UpdateBus kUpdateBus() { return {}; }
+  void set_update_bus(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_UpdateBus::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_UpdateClk =
+    ::protozero::proto_utils::FieldMetadata<
+      12,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      SdeSdePerfCrtcUpdateFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_UpdateClk kUpdateClk() { return {}; }
+  void set_update_clk(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_UpdateClk::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class SdeSdePerfCalcCrtcFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/8, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  SdeSdePerfCalcCrtcFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit SdeSdePerfCalcCrtcFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit SdeSdePerfCalcCrtcFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_bw_ctl_ebi() const { return at<1>().valid(); }
+  uint64_t bw_ctl_ebi() const { return at<1>().as_uint64(); }
+  bool has_bw_ctl_llcc() const { return at<2>().valid(); }
+  uint64_t bw_ctl_llcc() const { return at<2>().as_uint64(); }
+  bool has_bw_ctl_mnoc() const { return at<3>().valid(); }
+  uint64_t bw_ctl_mnoc() const { return at<3>().as_uint64(); }
+  bool has_core_clk_rate() const { return at<4>().valid(); }
+  uint32_t core_clk_rate() const { return at<4>().as_uint32(); }
+  bool has_crtc() const { return at<5>().valid(); }
+  uint32_t crtc() const { return at<5>().as_uint32(); }
+  bool has_ib_ebi() const { return at<6>().valid(); }
+  uint64_t ib_ebi() const { return at<6>().as_uint64(); }
+  bool has_ib_llcc() const { return at<7>().valid(); }
+  uint64_t ib_llcc() const { return at<7>().as_uint64(); }
+  bool has_ib_mnoc() const { return at<8>().valid(); }
+  uint64_t ib_mnoc() const { return at<8>().as_uint64(); }
+};
+
+class SdeSdePerfCalcCrtcFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = SdeSdePerfCalcCrtcFtraceEvent_Decoder;
+  enum : int32_t {
+    kBwCtlEbiFieldNumber = 1,
+    kBwCtlLlccFieldNumber = 2,
+    kBwCtlMnocFieldNumber = 3,
+    kCoreClkRateFieldNumber = 4,
+    kCrtcFieldNumber = 5,
+    kIbEbiFieldNumber = 6,
+    kIbLlccFieldNumber = 7,
+    kIbMnocFieldNumber = 8,
+  };
+
+  using FieldMetadata_BwCtlEbi =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      SdeSdePerfCalcCrtcFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BwCtlEbi kBwCtlEbi() { return {}; }
+  void set_bw_ctl_ebi(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_BwCtlEbi::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BwCtlLlcc =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      SdeSdePerfCalcCrtcFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BwCtlLlcc kBwCtlLlcc() { return {}; }
+  void set_bw_ctl_llcc(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_BwCtlLlcc::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BwCtlMnoc =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      SdeSdePerfCalcCrtcFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BwCtlMnoc kBwCtlMnoc() { return {}; }
+  void set_bw_ctl_mnoc(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_BwCtlMnoc::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CoreClkRate =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      SdeSdePerfCalcCrtcFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CoreClkRate kCoreClkRate() { return {}; }
+  void set_core_clk_rate(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CoreClkRate::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Crtc =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      SdeSdePerfCalcCrtcFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Crtc kCrtc() { return {}; }
+  void set_crtc(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Crtc::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_IbEbi =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      SdeSdePerfCalcCrtcFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IbEbi kIbEbi() { return {}; }
+  void set_ib_ebi(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_IbEbi::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_IbLlcc =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      SdeSdePerfCalcCrtcFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IbLlcc kIbLlcc() { return {}; }
+  void set_ib_llcc(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_IbLlcc::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_IbMnoc =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      SdeSdePerfCalcCrtcFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IbMnoc kIbMnoc() { return {}; }
+  void set_ib_mnoc(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_IbMnoc::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class SdeSdeEvtlogFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  SdeSdeEvtlogFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit SdeSdeEvtlogFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit SdeSdeEvtlogFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_evtlog_tag() const { return at<1>().valid(); }
+  ::protozero::ConstChars evtlog_tag() const { return at<1>().as_string(); }
+  bool has_pid() const { return at<2>().valid(); }
+  int32_t pid() const { return at<2>().as_int32(); }
+  bool has_tag_id() const { return at<3>().valid(); }
+  uint32_t tag_id() const { return at<3>().as_uint32(); }
+};
+
+class SdeSdeEvtlogFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = SdeSdeEvtlogFtraceEvent_Decoder;
+  enum : int32_t {
+    kEvtlogTagFieldNumber = 1,
+    kPidFieldNumber = 2,
+    kTagIdFieldNumber = 3,
+  };
+
+  using FieldMetadata_EvtlogTag =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      SdeSdeEvtlogFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_EvtlogTag kEvtlogTag() { return {}; }
+  void set_evtlog_tag(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_EvtlogTag::kFieldId, data, size);
+  }
+  void set_evtlog_tag(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_EvtlogTag::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pid =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SdeSdeEvtlogFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pid kPid() { return {}; }
+  void set_pid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TagId =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      SdeSdeEvtlogFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TagId kTagId() { return {}; }
+  void set_tag_id(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TagId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class SdeTracingMarkWriteFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  SdeTracingMarkWriteFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit SdeTracingMarkWriteFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit SdeTracingMarkWriteFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_pid() const { return at<1>().valid(); }
+  int32_t pid() const { return at<1>().as_int32(); }
+  bool has_trace_name() const { return at<2>().valid(); }
+  ::protozero::ConstChars trace_name() const { return at<2>().as_string(); }
+  bool has_trace_type() const { return at<3>().valid(); }
+  uint32_t trace_type() const { return at<3>().as_uint32(); }
+  bool has_value() const { return at<4>().valid(); }
+  int32_t value() const { return at<4>().as_int32(); }
+  bool has_trace_begin() const { return at<5>().valid(); }
+  uint32_t trace_begin() const { return at<5>().as_uint32(); }
+};
+
+class SdeTracingMarkWriteFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = SdeTracingMarkWriteFtraceEvent_Decoder;
+  enum : int32_t {
+    kPidFieldNumber = 1,
+    kTraceNameFieldNumber = 2,
+    kTraceTypeFieldNumber = 3,
+    kValueFieldNumber = 4,
+    kTraceBeginFieldNumber = 5,
+  };
+
+  using FieldMetadata_Pid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SdeTracingMarkWriteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pid kPid() { return {}; }
+  void set_pid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TraceName =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      SdeTracingMarkWriteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TraceName kTraceName() { return {}; }
+  void set_trace_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_TraceName::kFieldId, data, size);
+  }
+  void set_trace_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_TraceName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TraceType =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      SdeTracingMarkWriteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TraceType kTraceType() { return {}; }
+  void set_trace_type(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TraceType::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Value =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SdeTracingMarkWriteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Value kValue() { return {}; }
+  void set_value(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Value::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TraceBegin =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      SdeTracingMarkWriteFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TraceBegin kTraceBegin() { return {}; }
+  void set_trace_begin(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TraceBegin::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/ftrace/signal.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_SIGNAL_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_SIGNAL_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class SignalGenerateFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/6, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  SignalGenerateFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit SignalGenerateFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit SignalGenerateFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_code() const { return at<1>().valid(); }
+  int32_t code() const { return at<1>().as_int32(); }
+  bool has_comm() const { return at<2>().valid(); }
+  ::protozero::ConstChars comm() const { return at<2>().as_string(); }
+  bool has_group() const { return at<3>().valid(); }
+  int32_t group() const { return at<3>().as_int32(); }
+  bool has_pid() const { return at<4>().valid(); }
+  int32_t pid() const { return at<4>().as_int32(); }
+  bool has_result() const { return at<5>().valid(); }
+  int32_t result() const { return at<5>().as_int32(); }
+  bool has_sig() const { return at<6>().valid(); }
+  int32_t sig() const { return at<6>().as_int32(); }
+};
+
+class SignalGenerateFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = SignalGenerateFtraceEvent_Decoder;
+  enum : int32_t {
+    kCodeFieldNumber = 1,
+    kCommFieldNumber = 2,
+    kGroupFieldNumber = 3,
+    kPidFieldNumber = 4,
+    kResultFieldNumber = 5,
+    kSigFieldNumber = 6,
+  };
+
+  using FieldMetadata_Code =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SignalGenerateFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Code kCode() { return {}; }
+  void set_code(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Code::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Comm =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      SignalGenerateFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Comm kComm() { return {}; }
+  void set_comm(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Comm::kFieldId, data, size);
+  }
+  void set_comm(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Comm::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Group =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SignalGenerateFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Group kGroup() { return {}; }
+  void set_group(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Group::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pid =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SignalGenerateFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pid kPid() { return {}; }
+  void set_pid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Result =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SignalGenerateFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Result kResult() { return {}; }
+  void set_result(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Result::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Sig =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SignalGenerateFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Sig kSig() { return {}; }
+  void set_sig(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Sig::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class SignalDeliverFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  SignalDeliverFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit SignalDeliverFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit SignalDeliverFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_code() const { return at<1>().valid(); }
+  int32_t code() const { return at<1>().as_int32(); }
+  bool has_sa_flags() const { return at<2>().valid(); }
+  uint64_t sa_flags() const { return at<2>().as_uint64(); }
+  bool has_sig() const { return at<3>().valid(); }
+  int32_t sig() const { return at<3>().as_int32(); }
+};
+
+class SignalDeliverFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = SignalDeliverFtraceEvent_Decoder;
+  enum : int32_t {
+    kCodeFieldNumber = 1,
+    kSaFlagsFieldNumber = 2,
+    kSigFieldNumber = 3,
+  };
+
+  using FieldMetadata_Code =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SignalDeliverFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Code kCode() { return {}; }
+  void set_code(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Code::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SaFlags =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      SignalDeliverFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SaFlags kSaFlags() { return {}; }
+  void set_sa_flags(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SaFlags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Sig =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SignalDeliverFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Sig kSig() { return {}; }
+  void set_sig(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Sig::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/ftrace/sync.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_SYNC_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_SYNC_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class SyncWaitFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  SyncWaitFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit SyncWaitFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit SyncWaitFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars name() const { return at<1>().as_string(); }
+  bool has_status() const { return at<2>().valid(); }
+  int32_t status() const { return at<2>().as_int32(); }
+  bool has_begin() const { return at<3>().valid(); }
+  uint32_t begin() const { return at<3>().as_uint32(); }
+};
+
+class SyncWaitFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = SyncWaitFtraceEvent_Decoder;
+  enum : int32_t {
+    kNameFieldNumber = 1,
+    kStatusFieldNumber = 2,
+    kBeginFieldNumber = 3,
+  };
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      SyncWaitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Status =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SyncWaitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Status kStatus() { return {}; }
+  void set_status(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Status::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Begin =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      SyncWaitFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Begin kBegin() { return {}; }
+  void set_begin(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Begin::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class SyncTimelineFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  SyncTimelineFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit SyncTimelineFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit SyncTimelineFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars name() const { return at<1>().as_string(); }
+  bool has_value() const { return at<2>().valid(); }
+  ::protozero::ConstChars value() const { return at<2>().as_string(); }
+};
+
+class SyncTimelineFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = SyncTimelineFtraceEvent_Decoder;
+  enum : int32_t {
+    kNameFieldNumber = 1,
+    kValueFieldNumber = 2,
+  };
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      SyncTimelineFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Value =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      SyncTimelineFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Value kValue() { return {}; }
+  void set_value(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Value::kFieldId, data, size);
+  }
+  void set_value(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Value::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class SyncPtFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  SyncPtFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit SyncPtFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit SyncPtFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_timeline() const { return at<1>().valid(); }
+  ::protozero::ConstChars timeline() const { return at<1>().as_string(); }
+  bool has_value() const { return at<2>().valid(); }
+  ::protozero::ConstChars value() const { return at<2>().as_string(); }
+};
+
+class SyncPtFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = SyncPtFtraceEvent_Decoder;
+  enum : int32_t {
+    kTimelineFieldNumber = 1,
+    kValueFieldNumber = 2,
+  };
+
+  using FieldMetadata_Timeline =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      SyncPtFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Timeline kTimeline() { return {}; }
+  void set_timeline(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Timeline::kFieldId, data, size);
+  }
+  void set_timeline(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Timeline::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Value =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      SyncPtFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Value kValue() { return {}; }
+  void set_value(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Value::kFieldId, data, size);
+  }
+  void set_value(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Value::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/ftrace/systrace.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_SYSTRACE_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_SYSTRACE_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class ZeroFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  ZeroFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ZeroFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ZeroFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_flag() const { return at<1>().valid(); }
+  int32_t flag() const { return at<1>().as_int32(); }
+  bool has_name() const { return at<2>().valid(); }
+  ::protozero::ConstChars name() const { return at<2>().as_string(); }
+  bool has_pid() const { return at<3>().valid(); }
+  int32_t pid() const { return at<3>().as_int32(); }
+  bool has_value() const { return at<4>().valid(); }
+  int64_t value() const { return at<4>().as_int64(); }
+};
+
+class ZeroFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = ZeroFtraceEvent_Decoder;
+  enum : int32_t {
+    kFlagFieldNumber = 1,
+    kNameFieldNumber = 2,
+    kPidFieldNumber = 3,
+    kValueFieldNumber = 4,
+  };
+
+  using FieldMetadata_Flag =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      ZeroFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Flag kFlag() { return {}; }
+  void set_flag(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Flag::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ZeroFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pid =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      ZeroFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pid kPid() { return {}; }
+  void set_pid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Value =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      ZeroFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Value kValue() { return {}; }
+  void set_value(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Value::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/ftrace/task.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_TASK_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_TASK_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class TaskRenameFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  TaskRenameFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TaskRenameFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TaskRenameFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_pid() const { return at<1>().valid(); }
+  int32_t pid() const { return at<1>().as_int32(); }
+  bool has_oldcomm() const { return at<2>().valid(); }
+  ::protozero::ConstChars oldcomm() const { return at<2>().as_string(); }
+  bool has_newcomm() const { return at<3>().valid(); }
+  ::protozero::ConstChars newcomm() const { return at<3>().as_string(); }
+  bool has_oom_score_adj() const { return at<4>().valid(); }
+  int32_t oom_score_adj() const { return at<4>().as_int32(); }
+};
+
+class TaskRenameFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = TaskRenameFtraceEvent_Decoder;
+  enum : int32_t {
+    kPidFieldNumber = 1,
+    kOldcommFieldNumber = 2,
+    kNewcommFieldNumber = 3,
+    kOomScoreAdjFieldNumber = 4,
+  };
+
+  using FieldMetadata_Pid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      TaskRenameFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pid kPid() { return {}; }
+  void set_pid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Oldcomm =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TaskRenameFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Oldcomm kOldcomm() { return {}; }
+  void set_oldcomm(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Oldcomm::kFieldId, data, size);
+  }
+  void set_oldcomm(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Oldcomm::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Newcomm =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TaskRenameFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Newcomm kNewcomm() { return {}; }
+  void set_newcomm(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Newcomm::kFieldId, data, size);
+  }
+  void set_newcomm(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Newcomm::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_OomScoreAdj =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      TaskRenameFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_OomScoreAdj kOomScoreAdj() { return {}; }
+  void set_oom_score_adj(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_OomScoreAdj::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class TaskNewtaskFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  TaskNewtaskFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TaskNewtaskFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TaskNewtaskFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_pid() const { return at<1>().valid(); }
+  int32_t pid() const { return at<1>().as_int32(); }
+  bool has_comm() const { return at<2>().valid(); }
+  ::protozero::ConstChars comm() const { return at<2>().as_string(); }
+  bool has_clone_flags() const { return at<3>().valid(); }
+  uint64_t clone_flags() const { return at<3>().as_uint64(); }
+  bool has_oom_score_adj() const { return at<4>().valid(); }
+  int32_t oom_score_adj() const { return at<4>().as_int32(); }
+};
+
+class TaskNewtaskFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = TaskNewtaskFtraceEvent_Decoder;
+  enum : int32_t {
+    kPidFieldNumber = 1,
+    kCommFieldNumber = 2,
+    kCloneFlagsFieldNumber = 3,
+    kOomScoreAdjFieldNumber = 4,
+  };
+
+  using FieldMetadata_Pid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      TaskNewtaskFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pid kPid() { return {}; }
+  void set_pid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Comm =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TaskNewtaskFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Comm kComm() { return {}; }
+  void set_comm(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Comm::kFieldId, data, size);
+  }
+  void set_comm(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Comm::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CloneFlags =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TaskNewtaskFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CloneFlags kCloneFlags() { return {}; }
+  void set_clone_flags(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CloneFlags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_OomScoreAdj =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      TaskNewtaskFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_OomScoreAdj kOomScoreAdj() { return {}; }
+  void set_oom_score_adj(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_OomScoreAdj::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/ftrace/thermal.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_THERMAL_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_THERMAL_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class CdevUpdateFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  CdevUpdateFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit CdevUpdateFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit CdevUpdateFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_target() const { return at<1>().valid(); }
+  uint64_t target() const { return at<1>().as_uint64(); }
+  bool has_type() const { return at<2>().valid(); }
+  ::protozero::ConstChars type() const { return at<2>().as_string(); }
+};
+
+class CdevUpdateFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = CdevUpdateFtraceEvent_Decoder;
+  enum : int32_t {
+    kTargetFieldNumber = 1,
+    kTypeFieldNumber = 2,
+  };
+
+  using FieldMetadata_Target =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      CdevUpdateFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Target kTarget() { return {}; }
+  void set_target(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Target::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Type =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      CdevUpdateFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Type kType() { return {}; }
+  void set_type(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Type::kFieldId, data, size);
+  }
+  void set_type(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Type::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class ThermalTemperatureFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  ThermalTemperatureFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ThermalTemperatureFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ThermalTemperatureFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_id() const { return at<1>().valid(); }
+  int32_t id() const { return at<1>().as_int32(); }
+  bool has_temp() const { return at<2>().valid(); }
+  int32_t temp() const { return at<2>().as_int32(); }
+  bool has_temp_prev() const { return at<3>().valid(); }
+  int32_t temp_prev() const { return at<3>().as_int32(); }
+  bool has_thermal_zone() const { return at<4>().valid(); }
+  ::protozero::ConstChars thermal_zone() const { return at<4>().as_string(); }
+};
+
+class ThermalTemperatureFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = ThermalTemperatureFtraceEvent_Decoder;
+  enum : int32_t {
+    kIdFieldNumber = 1,
+    kTempFieldNumber = 2,
+    kTempPrevFieldNumber = 3,
+    kThermalZoneFieldNumber = 4,
+  };
+
+  using FieldMetadata_Id =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      ThermalTemperatureFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Id kId() { return {}; }
+  void set_id(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Id::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Temp =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      ThermalTemperatureFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Temp kTemp() { return {}; }
+  void set_temp(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Temp::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TempPrev =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      ThermalTemperatureFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TempPrev kTempPrev() { return {}; }
+  void set_temp_prev(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TempPrev::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ThermalZone =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ThermalTemperatureFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ThermalZone kThermalZone() { return {}; }
+  void set_thermal_zone(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_ThermalZone::kFieldId, data, size);
+  }
+  void set_thermal_zone(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_ThermalZone::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/ftrace/vmscan.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_VMSCAN_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_VMSCAN_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class MmVmscanKswapdSleepFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  MmVmscanKswapdSleepFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MmVmscanKswapdSleepFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MmVmscanKswapdSleepFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_nid() const { return at<1>().valid(); }
+  int32_t nid() const { return at<1>().as_int32(); }
+};
+
+class MmVmscanKswapdSleepFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = MmVmscanKswapdSleepFtraceEvent_Decoder;
+  enum : int32_t {
+    kNidFieldNumber = 1,
+  };
+
+  using FieldMetadata_Nid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      MmVmscanKswapdSleepFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Nid kNid() { return {}; }
+  void set_nid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Nid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class MmVmscanKswapdWakeFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  MmVmscanKswapdWakeFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MmVmscanKswapdWakeFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MmVmscanKswapdWakeFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_nid() const { return at<1>().valid(); }
+  int32_t nid() const { return at<1>().as_int32(); }
+  bool has_order() const { return at<2>().valid(); }
+  int32_t order() const { return at<2>().as_int32(); }
+};
+
+class MmVmscanKswapdWakeFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = MmVmscanKswapdWakeFtraceEvent_Decoder;
+  enum : int32_t {
+    kNidFieldNumber = 1,
+    kOrderFieldNumber = 2,
+  };
+
+  using FieldMetadata_Nid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      MmVmscanKswapdWakeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Nid kNid() { return {}; }
+  void set_nid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Nid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Order =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      MmVmscanKswapdWakeFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Order kOrder() { return {}; }
+  void set_order(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Order::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class MmVmscanDirectReclaimEndFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  MmVmscanDirectReclaimEndFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MmVmscanDirectReclaimEndFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MmVmscanDirectReclaimEndFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_nr_reclaimed() const { return at<1>().valid(); }
+  uint64_t nr_reclaimed() const { return at<1>().as_uint64(); }
+};
+
+class MmVmscanDirectReclaimEndFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = MmVmscanDirectReclaimEndFtraceEvent_Decoder;
+  enum : int32_t {
+    kNrReclaimedFieldNumber = 1,
+  };
+
+  using FieldMetadata_NrReclaimed =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MmVmscanDirectReclaimEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NrReclaimed kNrReclaimed() { return {}; }
+  void set_nr_reclaimed(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NrReclaimed::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class MmVmscanDirectReclaimBeginFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  MmVmscanDirectReclaimBeginFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MmVmscanDirectReclaimBeginFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MmVmscanDirectReclaimBeginFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_order() const { return at<1>().valid(); }
+  int32_t order() const { return at<1>().as_int32(); }
+  bool has_may_writepage() const { return at<2>().valid(); }
+  int32_t may_writepage() const { return at<2>().as_int32(); }
+  bool has_gfp_flags() const { return at<3>().valid(); }
+  uint32_t gfp_flags() const { return at<3>().as_uint32(); }
+};
+
+class MmVmscanDirectReclaimBeginFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = MmVmscanDirectReclaimBeginFtraceEvent_Decoder;
+  enum : int32_t {
+    kOrderFieldNumber = 1,
+    kMayWritepageFieldNumber = 2,
+    kGfpFlagsFieldNumber = 3,
+  };
+
+  using FieldMetadata_Order =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      MmVmscanDirectReclaimBeginFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Order kOrder() { return {}; }
+  void set_order(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Order::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_MayWritepage =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      MmVmscanDirectReclaimBeginFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MayWritepage kMayWritepage() { return {}; }
+  void set_may_writepage(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_MayWritepage::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_GfpFlags =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MmVmscanDirectReclaimBeginFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_GfpFlags kGfpFlags() { return {}; }
+  void set_gfp_flags(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_GfpFlags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/ftrace/workqueue.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_WORKQUEUE_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_FTRACE_WORKQUEUE_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class WorkqueueQueueWorkFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  WorkqueueQueueWorkFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit WorkqueueQueueWorkFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit WorkqueueQueueWorkFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_work() const { return at<1>().valid(); }
+  uint64_t work() const { return at<1>().as_uint64(); }
+  bool has_function() const { return at<2>().valid(); }
+  uint64_t function() const { return at<2>().as_uint64(); }
+  bool has_workqueue() const { return at<3>().valid(); }
+  uint64_t workqueue() const { return at<3>().as_uint64(); }
+  bool has_req_cpu() const { return at<4>().valid(); }
+  uint32_t req_cpu() const { return at<4>().as_uint32(); }
+  bool has_cpu() const { return at<5>().valid(); }
+  uint32_t cpu() const { return at<5>().as_uint32(); }
+};
+
+class WorkqueueQueueWorkFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = WorkqueueQueueWorkFtraceEvent_Decoder;
+  enum : int32_t {
+    kWorkFieldNumber = 1,
+    kFunctionFieldNumber = 2,
+    kWorkqueueFieldNumber = 3,
+    kReqCpuFieldNumber = 4,
+    kCpuFieldNumber = 5,
+  };
+
+  using FieldMetadata_Work =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      WorkqueueQueueWorkFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Work kWork() { return {}; }
+  void set_work(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Work::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Function =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      WorkqueueQueueWorkFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Function kFunction() { return {}; }
+  void set_function(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Function::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Workqueue =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      WorkqueueQueueWorkFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Workqueue kWorkqueue() { return {}; }
+  void set_workqueue(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Workqueue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ReqCpu =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      WorkqueueQueueWorkFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ReqCpu kReqCpu() { return {}; }
+  void set_req_cpu(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ReqCpu::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Cpu =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      WorkqueueQueueWorkFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Cpu kCpu() { return {}; }
+  void set_cpu(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Cpu::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class WorkqueueExecuteStartFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  WorkqueueExecuteStartFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit WorkqueueExecuteStartFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit WorkqueueExecuteStartFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_work() const { return at<1>().valid(); }
+  uint64_t work() const { return at<1>().as_uint64(); }
+  bool has_function() const { return at<2>().valid(); }
+  uint64_t function() const { return at<2>().as_uint64(); }
+};
+
+class WorkqueueExecuteStartFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = WorkqueueExecuteStartFtraceEvent_Decoder;
+  enum : int32_t {
+    kWorkFieldNumber = 1,
+    kFunctionFieldNumber = 2,
+  };
+
+  using FieldMetadata_Work =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      WorkqueueExecuteStartFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Work kWork() { return {}; }
+  void set_work(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Work::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Function =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      WorkqueueExecuteStartFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Function kFunction() { return {}; }
+  void set_function(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Function::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class WorkqueueExecuteEndFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  WorkqueueExecuteEndFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit WorkqueueExecuteEndFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit WorkqueueExecuteEndFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_work() const { return at<1>().valid(); }
+  uint64_t work() const { return at<1>().as_uint64(); }
+};
+
+class WorkqueueExecuteEndFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = WorkqueueExecuteEndFtraceEvent_Decoder;
+  enum : int32_t {
+    kWorkFieldNumber = 1,
+  };
+
+  using FieldMetadata_Work =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      WorkqueueExecuteEndFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Work kWork() { return {}; }
+  void set_work(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Work::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class WorkqueueActivateWorkFtraceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  WorkqueueActivateWorkFtraceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit WorkqueueActivateWorkFtraceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit WorkqueueActivateWorkFtraceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_work() const { return at<1>().valid(); }
+  uint64_t work() const { return at<1>().as_uint64(); }
+};
+
+class WorkqueueActivateWorkFtraceEvent : public ::protozero::Message {
+ public:
+  using Decoder = WorkqueueActivateWorkFtraceEvent_Decoder;
+  enum : int32_t {
+    kWorkFieldNumber = 1,
+  };
+
+  using FieldMetadata_Work =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      WorkqueueActivateWorkFtraceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Work kWork() { return {}; }
+  void set_work(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Work::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/gpu/gpu_counter_event.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_GPU_GPU_COUNTER_EVENT_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_GPU_GPU_COUNTER_EVENT_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class GpuCounterDescriptor;
+class GpuCounterEvent_GpuCounter;
+
+class GpuCounterEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  GpuCounterEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit GpuCounterEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit GpuCounterEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_counter_descriptor() const { return at<1>().valid(); }
+  ::protozero::ConstBytes counter_descriptor() const { return at<1>().as_bytes(); }
+  bool has_counters() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> counters() const { return GetRepeated<::protozero::ConstBytes>(2); }
+  bool has_gpu_id() const { return at<3>().valid(); }
+  int32_t gpu_id() const { return at<3>().as_int32(); }
+};
+
+class GpuCounterEvent : public ::protozero::Message {
+ public:
+  using Decoder = GpuCounterEvent_Decoder;
+  enum : int32_t {
+    kCounterDescriptorFieldNumber = 1,
+    kCountersFieldNumber = 2,
+    kGpuIdFieldNumber = 3,
+  };
+  using GpuCounter = ::perfetto::protos::pbzero::GpuCounterEvent_GpuCounter;
+
+  using FieldMetadata_CounterDescriptor =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      GpuCounterDescriptor,
+      GpuCounterEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CounterDescriptor kCounterDescriptor() { return {}; }
+  template <typename T = GpuCounterDescriptor> T* set_counter_descriptor() {
+    return BeginNestedMessage<T>(1);
+  }
+
+
+  using FieldMetadata_Counters =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      GpuCounterEvent_GpuCounter,
+      GpuCounterEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Counters kCounters() { return {}; }
+  template <typename T = GpuCounterEvent_GpuCounter> T* add_counters() {
+    return BeginNestedMessage<T>(2);
+  }
+
+
+  using FieldMetadata_GpuId =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      GpuCounterEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_GpuId kGpuId() { return {}; }
+  void set_gpu_id(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_GpuId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class GpuCounterEvent_GpuCounter_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  GpuCounterEvent_GpuCounter_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit GpuCounterEvent_GpuCounter_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit GpuCounterEvent_GpuCounter_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_counter_id() const { return at<1>().valid(); }
+  uint32_t counter_id() const { return at<1>().as_uint32(); }
+  bool has_int_value() const { return at<2>().valid(); }
+  int64_t int_value() const { return at<2>().as_int64(); }
+  bool has_double_value() const { return at<3>().valid(); }
+  double double_value() const { return at<3>().as_double(); }
+};
+
+class GpuCounterEvent_GpuCounter : public ::protozero::Message {
+ public:
+  using Decoder = GpuCounterEvent_GpuCounter_Decoder;
+  enum : int32_t {
+    kCounterIdFieldNumber = 1,
+    kIntValueFieldNumber = 2,
+    kDoubleValueFieldNumber = 3,
+  };
+
+  using FieldMetadata_CounterId =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      GpuCounterEvent_GpuCounter>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CounterId kCounterId() { return {}; }
+  void set_counter_id(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CounterId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_IntValue =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      GpuCounterEvent_GpuCounter>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IntValue kIntValue() { return {}; }
+  void set_int_value(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_IntValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DoubleValue =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kDouble,
+      double,
+      GpuCounterEvent_GpuCounter>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DoubleValue kDoubleValue() { return {}; }
+  void set_double_value(double value) {
+    static constexpr uint32_t field_id = FieldMetadata_DoubleValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kDouble>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/gpu/gpu_log.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_GPU_GPU_LOG_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_GPU_GPU_LOG_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+enum GpuLog_Severity : int32_t;
+
+enum GpuLog_Severity : int32_t {
+  GpuLog_Severity_LOG_SEVERITY_UNSPECIFIED = 0,
+  GpuLog_Severity_LOG_SEVERITY_VERBOSE = 1,
+  GpuLog_Severity_LOG_SEVERITY_DEBUG = 2,
+  GpuLog_Severity_LOG_SEVERITY_INFO = 3,
+  GpuLog_Severity_LOG_SEVERITY_WARNING = 4,
+  GpuLog_Severity_LOG_SEVERITY_ERROR = 5,
+};
+
+const GpuLog_Severity GpuLog_Severity_MIN = GpuLog_Severity_LOG_SEVERITY_UNSPECIFIED;
+const GpuLog_Severity GpuLog_Severity_MAX = GpuLog_Severity_LOG_SEVERITY_ERROR;
+
+class GpuLog_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  GpuLog_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit GpuLog_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit GpuLog_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_severity() const { return at<1>().valid(); }
+  int32_t severity() const { return at<1>().as_int32(); }
+  bool has_tag() const { return at<2>().valid(); }
+  ::protozero::ConstChars tag() const { return at<2>().as_string(); }
+  bool has_log_message() const { return at<3>().valid(); }
+  ::protozero::ConstChars log_message() const { return at<3>().as_string(); }
+};
+
+class GpuLog : public ::protozero::Message {
+ public:
+  using Decoder = GpuLog_Decoder;
+  enum : int32_t {
+    kSeverityFieldNumber = 1,
+    kTagFieldNumber = 2,
+    kLogMessageFieldNumber = 3,
+  };
+  using Severity = ::perfetto::protos::pbzero::GpuLog_Severity;
+  static const Severity LOG_SEVERITY_UNSPECIFIED = GpuLog_Severity_LOG_SEVERITY_UNSPECIFIED;
+  static const Severity LOG_SEVERITY_VERBOSE = GpuLog_Severity_LOG_SEVERITY_VERBOSE;
+  static const Severity LOG_SEVERITY_DEBUG = GpuLog_Severity_LOG_SEVERITY_DEBUG;
+  static const Severity LOG_SEVERITY_INFO = GpuLog_Severity_LOG_SEVERITY_INFO;
+  static const Severity LOG_SEVERITY_WARNING = GpuLog_Severity_LOG_SEVERITY_WARNING;
+  static const Severity LOG_SEVERITY_ERROR = GpuLog_Severity_LOG_SEVERITY_ERROR;
+
+  using FieldMetadata_Severity =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::GpuLog_Severity,
+      GpuLog>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Severity kSeverity() { return {}; }
+  void set_severity(::perfetto::protos::pbzero::GpuLog_Severity value) {
+    static constexpr uint32_t field_id = FieldMetadata_Severity::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Tag =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      GpuLog>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Tag kTag() { return {}; }
+  void set_tag(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Tag::kFieldId, data, size);
+  }
+  void set_tag(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Tag::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_LogMessage =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      GpuLog>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_LogMessage kLogMessage() { return {}; }
+  void set_log_message(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_LogMessage::kFieldId, data, size);
+  }
+  void set_log_message(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_LogMessage::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/gpu/gpu_render_stage_event.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_GPU_GPU_RENDER_STAGE_EVENT_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_GPU_GPU_RENDER_STAGE_EVENT_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class GpuRenderStageEvent_ExtraData;
+class GpuRenderStageEvent_Specifications;
+class GpuRenderStageEvent_Specifications_ContextSpec;
+class GpuRenderStageEvent_Specifications_Description;
+enum InternedGpuRenderStageSpecification_RenderStageCategory : int32_t;
+enum InternedGraphicsContext_Api : int32_t;
+
+enum InternedGpuRenderStageSpecification_RenderStageCategory : int32_t {
+  InternedGpuRenderStageSpecification_RenderStageCategory_OTHER = 0,
+  InternedGpuRenderStageSpecification_RenderStageCategory_GRAPHICS = 1,
+  InternedGpuRenderStageSpecification_RenderStageCategory_COMPUTE = 2,
+};
+
+const InternedGpuRenderStageSpecification_RenderStageCategory InternedGpuRenderStageSpecification_RenderStageCategory_MIN = InternedGpuRenderStageSpecification_RenderStageCategory_OTHER;
+const InternedGpuRenderStageSpecification_RenderStageCategory InternedGpuRenderStageSpecification_RenderStageCategory_MAX = InternedGpuRenderStageSpecification_RenderStageCategory_COMPUTE;
+
+enum InternedGraphicsContext_Api : int32_t {
+  InternedGraphicsContext_Api_UNDEFINED = 0,
+  InternedGraphicsContext_Api_OPEN_GL = 1,
+  InternedGraphicsContext_Api_VULKAN = 2,
+  InternedGraphicsContext_Api_OPEN_CL = 3,
+};
+
+const InternedGraphicsContext_Api InternedGraphicsContext_Api_MIN = InternedGraphicsContext_Api_UNDEFINED;
+const InternedGraphicsContext_Api InternedGraphicsContext_Api_MAX = InternedGraphicsContext_Api_OPEN_CL;
+
+class InternedGpuRenderStageSpecification_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  InternedGpuRenderStageSpecification_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit InternedGpuRenderStageSpecification_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit InternedGpuRenderStageSpecification_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_iid() const { return at<1>().valid(); }
+  uint64_t iid() const { return at<1>().as_uint64(); }
+  bool has_name() const { return at<2>().valid(); }
+  ::protozero::ConstChars name() const { return at<2>().as_string(); }
+  bool has_description() const { return at<3>().valid(); }
+  ::protozero::ConstChars description() const { return at<3>().as_string(); }
+  bool has_category() const { return at<4>().valid(); }
+  int32_t category() const { return at<4>().as_int32(); }
+};
+
+class InternedGpuRenderStageSpecification : public ::protozero::Message {
+ public:
+  using Decoder = InternedGpuRenderStageSpecification_Decoder;
+  enum : int32_t {
+    kIidFieldNumber = 1,
+    kNameFieldNumber = 2,
+    kDescriptionFieldNumber = 3,
+    kCategoryFieldNumber = 4,
+  };
+  using RenderStageCategory = ::perfetto::protos::pbzero::InternedGpuRenderStageSpecification_RenderStageCategory;
+  static const RenderStageCategory OTHER = InternedGpuRenderStageSpecification_RenderStageCategory_OTHER;
+  static const RenderStageCategory GRAPHICS = InternedGpuRenderStageSpecification_RenderStageCategory_GRAPHICS;
+  static const RenderStageCategory COMPUTE = InternedGpuRenderStageSpecification_RenderStageCategory_COMPUTE;
+
+  using FieldMetadata_Iid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      InternedGpuRenderStageSpecification>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Iid kIid() { return {}; }
+  void set_iid(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Iid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      InternedGpuRenderStageSpecification>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Description =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      InternedGpuRenderStageSpecification>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Description kDescription() { return {}; }
+  void set_description(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Description::kFieldId, data, size);
+  }
+  void set_description(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Description::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Category =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::InternedGpuRenderStageSpecification_RenderStageCategory,
+      InternedGpuRenderStageSpecification>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Category kCategory() { return {}; }
+  void set_category(::perfetto::protos::pbzero::InternedGpuRenderStageSpecification_RenderStageCategory value) {
+    static constexpr uint32_t field_id = FieldMetadata_Category::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class InternedGraphicsContext_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  InternedGraphicsContext_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit InternedGraphicsContext_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit InternedGraphicsContext_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_iid() const { return at<1>().valid(); }
+  uint64_t iid() const { return at<1>().as_uint64(); }
+  bool has_pid() const { return at<2>().valid(); }
+  int32_t pid() const { return at<2>().as_int32(); }
+  bool has_api() const { return at<3>().valid(); }
+  int32_t api() const { return at<3>().as_int32(); }
+};
+
+class InternedGraphicsContext : public ::protozero::Message {
+ public:
+  using Decoder = InternedGraphicsContext_Decoder;
+  enum : int32_t {
+    kIidFieldNumber = 1,
+    kPidFieldNumber = 2,
+    kApiFieldNumber = 3,
+  };
+  using Api = ::perfetto::protos::pbzero::InternedGraphicsContext_Api;
+  static const Api UNDEFINED = InternedGraphicsContext_Api_UNDEFINED;
+  static const Api OPEN_GL = InternedGraphicsContext_Api_OPEN_GL;
+  static const Api VULKAN = InternedGraphicsContext_Api_VULKAN;
+  static const Api OPEN_CL = InternedGraphicsContext_Api_OPEN_CL;
+
+  using FieldMetadata_Iid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      InternedGraphicsContext>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Iid kIid() { return {}; }
+  void set_iid(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Iid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pid =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      InternedGraphicsContext>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pid kPid() { return {}; }
+  void set_pid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Api =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::InternedGraphicsContext_Api,
+      InternedGraphicsContext>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Api kApi() { return {}; }
+  void set_api(::perfetto::protos::pbzero::InternedGraphicsContext_Api value) {
+    static constexpr uint32_t field_id = FieldMetadata_Api::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class GpuRenderStageEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/15, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  GpuRenderStageEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit GpuRenderStageEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit GpuRenderStageEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_event_id() const { return at<1>().valid(); }
+  uint64_t event_id() const { return at<1>().as_uint64(); }
+  bool has_duration() const { return at<2>().valid(); }
+  uint64_t duration() const { return at<2>().as_uint64(); }
+  bool has_hw_queue_iid() const { return at<13>().valid(); }
+  uint64_t hw_queue_iid() const { return at<13>().as_uint64(); }
+  bool has_stage_iid() const { return at<14>().valid(); }
+  uint64_t stage_iid() const { return at<14>().as_uint64(); }
+  bool has_gpu_id() const { return at<11>().valid(); }
+  int32_t gpu_id() const { return at<11>().as_int32(); }
+  bool has_context() const { return at<5>().valid(); }
+  uint64_t context() const { return at<5>().as_uint64(); }
+  bool has_render_target_handle() const { return at<8>().valid(); }
+  uint64_t render_target_handle() const { return at<8>().as_uint64(); }
+  bool has_submission_id() const { return at<10>().valid(); }
+  uint32_t submission_id() const { return at<10>().as_uint32(); }
+  bool has_extra_data() const { return at<6>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> extra_data() const { return GetRepeated<::protozero::ConstBytes>(6); }
+  bool has_render_pass_handle() const { return at<9>().valid(); }
+  uint64_t render_pass_handle() const { return at<9>().as_uint64(); }
+  bool has_render_subpass_index_mask() const { return at<15>().valid(); }
+  ::protozero::RepeatedFieldIterator<uint64_t> render_subpass_index_mask() const { return GetRepeated<uint64_t>(15); }
+  bool has_command_buffer_handle() const { return at<12>().valid(); }
+  uint64_t command_buffer_handle() const { return at<12>().as_uint64(); }
+  bool has_specifications() const { return at<7>().valid(); }
+  ::protozero::ConstBytes specifications() const { return at<7>().as_bytes(); }
+  bool has_hw_queue_id() const { return at<3>().valid(); }
+  int32_t hw_queue_id() const { return at<3>().as_int32(); }
+  bool has_stage_id() const { return at<4>().valid(); }
+  int32_t stage_id() const { return at<4>().as_int32(); }
+};
+
+class GpuRenderStageEvent : public ::protozero::Message {
+ public:
+  using Decoder = GpuRenderStageEvent_Decoder;
+  enum : int32_t {
+    kEventIdFieldNumber = 1,
+    kDurationFieldNumber = 2,
+    kHwQueueIidFieldNumber = 13,
+    kStageIidFieldNumber = 14,
+    kGpuIdFieldNumber = 11,
+    kContextFieldNumber = 5,
+    kRenderTargetHandleFieldNumber = 8,
+    kSubmissionIdFieldNumber = 10,
+    kExtraDataFieldNumber = 6,
+    kRenderPassHandleFieldNumber = 9,
+    kRenderSubpassIndexMaskFieldNumber = 15,
+    kCommandBufferHandleFieldNumber = 12,
+    kSpecificationsFieldNumber = 7,
+    kHwQueueIdFieldNumber = 3,
+    kStageIdFieldNumber = 4,
+  };
+  using ExtraData = ::perfetto::protos::pbzero::GpuRenderStageEvent_ExtraData;
+  using Specifications = ::perfetto::protos::pbzero::GpuRenderStageEvent_Specifications;
+
+  using FieldMetadata_EventId =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      GpuRenderStageEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_EventId kEventId() { return {}; }
+  void set_event_id(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_EventId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Duration =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      GpuRenderStageEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Duration kDuration() { return {}; }
+  void set_duration(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Duration::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_HwQueueIid =
+    ::protozero::proto_utils::FieldMetadata<
+      13,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      GpuRenderStageEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_HwQueueIid kHwQueueIid() { return {}; }
+  void set_hw_queue_iid(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_HwQueueIid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_StageIid =
+    ::protozero::proto_utils::FieldMetadata<
+      14,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      GpuRenderStageEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_StageIid kStageIid() { return {}; }
+  void set_stage_iid(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_StageIid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_GpuId =
+    ::protozero::proto_utils::FieldMetadata<
+      11,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      GpuRenderStageEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_GpuId kGpuId() { return {}; }
+  void set_gpu_id(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_GpuId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Context =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      GpuRenderStageEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Context kContext() { return {}; }
+  void set_context(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Context::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_RenderTargetHandle =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      GpuRenderStageEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_RenderTargetHandle kRenderTargetHandle() { return {}; }
+  void set_render_target_handle(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_RenderTargetHandle::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SubmissionId =
+    ::protozero::proto_utils::FieldMetadata<
+      10,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      GpuRenderStageEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SubmissionId kSubmissionId() { return {}; }
+  void set_submission_id(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SubmissionId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ExtraData =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      GpuRenderStageEvent_ExtraData,
+      GpuRenderStageEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ExtraData kExtraData() { return {}; }
+  template <typename T = GpuRenderStageEvent_ExtraData> T* add_extra_data() {
+    return BeginNestedMessage<T>(6);
+  }
+
+
+  using FieldMetadata_RenderPassHandle =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      GpuRenderStageEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_RenderPassHandle kRenderPassHandle() { return {}; }
+  void set_render_pass_handle(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_RenderPassHandle::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_RenderSubpassIndexMask =
+    ::protozero::proto_utils::FieldMetadata<
+      15,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      GpuRenderStageEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_RenderSubpassIndexMask kRenderSubpassIndexMask() { return {}; }
+  void add_render_subpass_index_mask(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_RenderSubpassIndexMask::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CommandBufferHandle =
+    ::protozero::proto_utils::FieldMetadata<
+      12,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      GpuRenderStageEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CommandBufferHandle kCommandBufferHandle() { return {}; }
+  void set_command_buffer_handle(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CommandBufferHandle::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Specifications =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      GpuRenderStageEvent_Specifications,
+      GpuRenderStageEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Specifications kSpecifications() { return {}; }
+  template <typename T = GpuRenderStageEvent_Specifications> T* set_specifications() {
+    return BeginNestedMessage<T>(7);
+  }
+
+
+  using FieldMetadata_HwQueueId =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      GpuRenderStageEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_HwQueueId kHwQueueId() { return {}; }
+  void set_hw_queue_id(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_HwQueueId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_StageId =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      GpuRenderStageEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_StageId kStageId() { return {}; }
+  void set_stage_id(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_StageId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class GpuRenderStageEvent_Specifications_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  GpuRenderStageEvent_Specifications_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit GpuRenderStageEvent_Specifications_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit GpuRenderStageEvent_Specifications_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_context_spec() const { return at<1>().valid(); }
+  ::protozero::ConstBytes context_spec() const { return at<1>().as_bytes(); }
+  bool has_hw_queue() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> hw_queue() const { return GetRepeated<::protozero::ConstBytes>(2); }
+  bool has_stage() const { return at<3>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> stage() const { return GetRepeated<::protozero::ConstBytes>(3); }
+};
+
+class GpuRenderStageEvent_Specifications : public ::protozero::Message {
+ public:
+  using Decoder = GpuRenderStageEvent_Specifications_Decoder;
+  enum : int32_t {
+    kContextSpecFieldNumber = 1,
+    kHwQueueFieldNumber = 2,
+    kStageFieldNumber = 3,
+  };
+  using ContextSpec = ::perfetto::protos::pbzero::GpuRenderStageEvent_Specifications_ContextSpec;
+  using Description = ::perfetto::protos::pbzero::GpuRenderStageEvent_Specifications_Description;
+
+  using FieldMetadata_ContextSpec =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      GpuRenderStageEvent_Specifications_ContextSpec,
+      GpuRenderStageEvent_Specifications>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ContextSpec kContextSpec() { return {}; }
+  template <typename T = GpuRenderStageEvent_Specifications_ContextSpec> T* set_context_spec() {
+    return BeginNestedMessage<T>(1);
+  }
+
+
+  using FieldMetadata_HwQueue =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      GpuRenderStageEvent_Specifications_Description,
+      GpuRenderStageEvent_Specifications>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_HwQueue kHwQueue() { return {}; }
+  template <typename T = GpuRenderStageEvent_Specifications_Description> T* add_hw_queue() {
+    return BeginNestedMessage<T>(2);
+  }
+
+
+  using FieldMetadata_Stage =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      GpuRenderStageEvent_Specifications_Description,
+      GpuRenderStageEvent_Specifications>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Stage kStage() { return {}; }
+  template <typename T = GpuRenderStageEvent_Specifications_Description> T* add_stage() {
+    return BeginNestedMessage<T>(3);
+  }
+
+};
+
+class GpuRenderStageEvent_Specifications_Description_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  GpuRenderStageEvent_Specifications_Description_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit GpuRenderStageEvent_Specifications_Description_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit GpuRenderStageEvent_Specifications_Description_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars name() const { return at<1>().as_string(); }
+  bool has_description() const { return at<2>().valid(); }
+  ::protozero::ConstChars description() const { return at<2>().as_string(); }
+};
+
+class GpuRenderStageEvent_Specifications_Description : public ::protozero::Message {
+ public:
+  using Decoder = GpuRenderStageEvent_Specifications_Description_Decoder;
+  enum : int32_t {
+    kNameFieldNumber = 1,
+    kDescriptionFieldNumber = 2,
+  };
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      GpuRenderStageEvent_Specifications_Description>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Description =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      GpuRenderStageEvent_Specifications_Description>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Description kDescription() { return {}; }
+  void set_description(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Description::kFieldId, data, size);
+  }
+  void set_description(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Description::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class GpuRenderStageEvent_Specifications_ContextSpec_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  GpuRenderStageEvent_Specifications_ContextSpec_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit GpuRenderStageEvent_Specifications_ContextSpec_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit GpuRenderStageEvent_Specifications_ContextSpec_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_context() const { return at<1>().valid(); }
+  uint64_t context() const { return at<1>().as_uint64(); }
+  bool has_pid() const { return at<2>().valid(); }
+  int32_t pid() const { return at<2>().as_int32(); }
+};
+
+class GpuRenderStageEvent_Specifications_ContextSpec : public ::protozero::Message {
+ public:
+  using Decoder = GpuRenderStageEvent_Specifications_ContextSpec_Decoder;
+  enum : int32_t {
+    kContextFieldNumber = 1,
+    kPidFieldNumber = 2,
+  };
+
+  using FieldMetadata_Context =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      GpuRenderStageEvent_Specifications_ContextSpec>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Context kContext() { return {}; }
+  void set_context(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Context::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pid =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      GpuRenderStageEvent_Specifications_ContextSpec>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pid kPid() { return {}; }
+  void set_pid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class GpuRenderStageEvent_ExtraData_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  GpuRenderStageEvent_ExtraData_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit GpuRenderStageEvent_ExtraData_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit GpuRenderStageEvent_ExtraData_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars name() const { return at<1>().as_string(); }
+  bool has_value() const { return at<2>().valid(); }
+  ::protozero::ConstChars value() const { return at<2>().as_string(); }
+};
+
+class GpuRenderStageEvent_ExtraData : public ::protozero::Message {
+ public:
+  using Decoder = GpuRenderStageEvent_ExtraData_Decoder;
+  enum : int32_t {
+    kNameFieldNumber = 1,
+    kValueFieldNumber = 2,
+  };
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      GpuRenderStageEvent_ExtraData>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Value =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      GpuRenderStageEvent_ExtraData>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Value kValue() { return {}; }
+  void set_value(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Value::kFieldId, data, size);
+  }
+  void set_value(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Value::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/gpu/vulkan_api_event.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_GPU_VULKAN_API_EVENT_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_GPU_VULKAN_API_EVENT_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class VulkanApiEvent_VkDebugUtilsObjectName;
+class VulkanApiEvent_VkQueueSubmit;
+
+class VulkanApiEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  VulkanApiEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit VulkanApiEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit VulkanApiEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_vk_debug_utils_object_name() const { return at<1>().valid(); }
+  ::protozero::ConstBytes vk_debug_utils_object_name() const { return at<1>().as_bytes(); }
+  bool has_vk_queue_submit() const { return at<2>().valid(); }
+  ::protozero::ConstBytes vk_queue_submit() const { return at<2>().as_bytes(); }
+};
+
+class VulkanApiEvent : public ::protozero::Message {
+ public:
+  using Decoder = VulkanApiEvent_Decoder;
+  enum : int32_t {
+    kVkDebugUtilsObjectNameFieldNumber = 1,
+    kVkQueueSubmitFieldNumber = 2,
+  };
+  using VkDebugUtilsObjectName = ::perfetto::protos::pbzero::VulkanApiEvent_VkDebugUtilsObjectName;
+  using VkQueueSubmit = ::perfetto::protos::pbzero::VulkanApiEvent_VkQueueSubmit;
+
+  using FieldMetadata_VkDebugUtilsObjectName =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      VulkanApiEvent_VkDebugUtilsObjectName,
+      VulkanApiEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_VkDebugUtilsObjectName kVkDebugUtilsObjectName() { return {}; }
+  template <typename T = VulkanApiEvent_VkDebugUtilsObjectName> T* set_vk_debug_utils_object_name() {
+    return BeginNestedMessage<T>(1);
+  }
+
+
+  using FieldMetadata_VkQueueSubmit =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      VulkanApiEvent_VkQueueSubmit,
+      VulkanApiEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_VkQueueSubmit kVkQueueSubmit() { return {}; }
+  template <typename T = VulkanApiEvent_VkQueueSubmit> T* set_vk_queue_submit() {
+    return BeginNestedMessage<T>(2);
+  }
+
+};
+
+class VulkanApiEvent_VkQueueSubmit_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/6, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  VulkanApiEvent_VkQueueSubmit_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit VulkanApiEvent_VkQueueSubmit_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit VulkanApiEvent_VkQueueSubmit_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_duration_ns() const { return at<1>().valid(); }
+  uint64_t duration_ns() const { return at<1>().as_uint64(); }
+  bool has_pid() const { return at<2>().valid(); }
+  uint32_t pid() const { return at<2>().as_uint32(); }
+  bool has_tid() const { return at<3>().valid(); }
+  uint32_t tid() const { return at<3>().as_uint32(); }
+  bool has_vk_queue() const { return at<4>().valid(); }
+  uint64_t vk_queue() const { return at<4>().as_uint64(); }
+  bool has_vk_command_buffers() const { return at<5>().valid(); }
+  ::protozero::RepeatedFieldIterator<uint64_t> vk_command_buffers() const { return GetRepeated<uint64_t>(5); }
+  bool has_submission_id() const { return at<6>().valid(); }
+  uint32_t submission_id() const { return at<6>().as_uint32(); }
+};
+
+class VulkanApiEvent_VkQueueSubmit : public ::protozero::Message {
+ public:
+  using Decoder = VulkanApiEvent_VkQueueSubmit_Decoder;
+  enum : int32_t {
+    kDurationNsFieldNumber = 1,
+    kPidFieldNumber = 2,
+    kTidFieldNumber = 3,
+    kVkQueueFieldNumber = 4,
+    kVkCommandBuffersFieldNumber = 5,
+    kSubmissionIdFieldNumber = 6,
+  };
+
+  using FieldMetadata_DurationNs =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      VulkanApiEvent_VkQueueSubmit>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DurationNs kDurationNs() { return {}; }
+  void set_duration_ns(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DurationNs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pid =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      VulkanApiEvent_VkQueueSubmit>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pid kPid() { return {}; }
+  void set_pid(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Tid =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      VulkanApiEvent_VkQueueSubmit>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Tid kTid() { return {}; }
+  void set_tid(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Tid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_VkQueue =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      VulkanApiEvent_VkQueueSubmit>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_VkQueue kVkQueue() { return {}; }
+  void set_vk_queue(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_VkQueue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_VkCommandBuffers =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      VulkanApiEvent_VkQueueSubmit>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_VkCommandBuffers kVkCommandBuffers() { return {}; }
+  void add_vk_command_buffers(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_VkCommandBuffers::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SubmissionId =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      VulkanApiEvent_VkQueueSubmit>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SubmissionId kSubmissionId() { return {}; }
+  void set_submission_id(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SubmissionId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class VulkanApiEvent_VkDebugUtilsObjectName_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  VulkanApiEvent_VkDebugUtilsObjectName_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit VulkanApiEvent_VkDebugUtilsObjectName_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit VulkanApiEvent_VkDebugUtilsObjectName_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_pid() const { return at<1>().valid(); }
+  uint32_t pid() const { return at<1>().as_uint32(); }
+  bool has_vk_device() const { return at<2>().valid(); }
+  uint64_t vk_device() const { return at<2>().as_uint64(); }
+  bool has_object_type() const { return at<3>().valid(); }
+  int32_t object_type() const { return at<3>().as_int32(); }
+  bool has_object() const { return at<4>().valid(); }
+  uint64_t object() const { return at<4>().as_uint64(); }
+  bool has_object_name() const { return at<5>().valid(); }
+  ::protozero::ConstChars object_name() const { return at<5>().as_string(); }
+};
+
+class VulkanApiEvent_VkDebugUtilsObjectName : public ::protozero::Message {
+ public:
+  using Decoder = VulkanApiEvent_VkDebugUtilsObjectName_Decoder;
+  enum : int32_t {
+    kPidFieldNumber = 1,
+    kVkDeviceFieldNumber = 2,
+    kObjectTypeFieldNumber = 3,
+    kObjectFieldNumber = 4,
+    kObjectNameFieldNumber = 5,
+  };
+
+  using FieldMetadata_Pid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      VulkanApiEvent_VkDebugUtilsObjectName>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pid kPid() { return {}; }
+  void set_pid(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_VkDevice =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      VulkanApiEvent_VkDebugUtilsObjectName>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_VkDevice kVkDevice() { return {}; }
+  void set_vk_device(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_VkDevice::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ObjectType =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      VulkanApiEvent_VkDebugUtilsObjectName>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ObjectType kObjectType() { return {}; }
+  void set_object_type(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ObjectType::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Object =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      VulkanApiEvent_VkDebugUtilsObjectName>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Object kObject() { return {}; }
+  void set_object(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Object::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ObjectName =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      VulkanApiEvent_VkDebugUtilsObjectName>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ObjectName kObjectName() { return {}; }
+  void set_object_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_ObjectName::kFieldId, data, size);
+  }
+  void set_object_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_ObjectName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/gpu/vulkan_memory_event.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_GPU_VULKAN_MEMORY_EVENT_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_GPU_VULKAN_MEMORY_EVENT_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class VulkanMemoryEventAnnotation;
+enum VulkanMemoryEvent_AllocationScope : int32_t;
+enum VulkanMemoryEvent_Operation : int32_t;
+enum VulkanMemoryEvent_Source : int32_t;
+
+enum VulkanMemoryEvent_Source : int32_t {
+  VulkanMemoryEvent_Source_SOURCE_UNSPECIFIED = 0,
+  VulkanMemoryEvent_Source_SOURCE_DRIVER = 1,
+  VulkanMemoryEvent_Source_SOURCE_DEVICE = 2,
+  VulkanMemoryEvent_Source_SOURCE_DEVICE_MEMORY = 3,
+  VulkanMemoryEvent_Source_SOURCE_BUFFER = 4,
+  VulkanMemoryEvent_Source_SOURCE_IMAGE = 5,
+};
+
+const VulkanMemoryEvent_Source VulkanMemoryEvent_Source_MIN = VulkanMemoryEvent_Source_SOURCE_UNSPECIFIED;
+const VulkanMemoryEvent_Source VulkanMemoryEvent_Source_MAX = VulkanMemoryEvent_Source_SOURCE_IMAGE;
+
+enum VulkanMemoryEvent_Operation : int32_t {
+  VulkanMemoryEvent_Operation_OP_UNSPECIFIED = 0,
+  VulkanMemoryEvent_Operation_OP_CREATE = 1,
+  VulkanMemoryEvent_Operation_OP_DESTROY = 2,
+  VulkanMemoryEvent_Operation_OP_BIND = 3,
+  VulkanMemoryEvent_Operation_OP_DESTROY_BOUND = 4,
+  VulkanMemoryEvent_Operation_OP_ANNOTATIONS = 5,
+};
+
+const VulkanMemoryEvent_Operation VulkanMemoryEvent_Operation_MIN = VulkanMemoryEvent_Operation_OP_UNSPECIFIED;
+const VulkanMemoryEvent_Operation VulkanMemoryEvent_Operation_MAX = VulkanMemoryEvent_Operation_OP_ANNOTATIONS;
+
+enum VulkanMemoryEvent_AllocationScope : int32_t {
+  VulkanMemoryEvent_AllocationScope_SCOPE_UNSPECIFIED = 0,
+  VulkanMemoryEvent_AllocationScope_SCOPE_COMMAND = 1,
+  VulkanMemoryEvent_AllocationScope_SCOPE_OBJECT = 2,
+  VulkanMemoryEvent_AllocationScope_SCOPE_CACHE = 3,
+  VulkanMemoryEvent_AllocationScope_SCOPE_DEVICE = 4,
+  VulkanMemoryEvent_AllocationScope_SCOPE_INSTANCE = 5,
+};
+
+const VulkanMemoryEvent_AllocationScope VulkanMemoryEvent_AllocationScope_MIN = VulkanMemoryEvent_AllocationScope_SCOPE_UNSPECIFIED;
+const VulkanMemoryEvent_AllocationScope VulkanMemoryEvent_AllocationScope_MAX = VulkanMemoryEvent_AllocationScope_SCOPE_INSTANCE;
+
+class VulkanMemoryEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/20, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  VulkanMemoryEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit VulkanMemoryEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit VulkanMemoryEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_source() const { return at<1>().valid(); }
+  int32_t source() const { return at<1>().as_int32(); }
+  bool has_operation() const { return at<2>().valid(); }
+  int32_t operation() const { return at<2>().as_int32(); }
+  bool has_timestamp() const { return at<3>().valid(); }
+  int64_t timestamp() const { return at<3>().as_int64(); }
+  bool has_pid() const { return at<4>().valid(); }
+  uint32_t pid() const { return at<4>().as_uint32(); }
+  bool has_memory_address() const { return at<5>().valid(); }
+  uint64_t memory_address() const { return at<5>().as_uint64(); }
+  bool has_memory_size() const { return at<6>().valid(); }
+  uint64_t memory_size() const { return at<6>().as_uint64(); }
+  bool has_caller_iid() const { return at<7>().valid(); }
+  uint64_t caller_iid() const { return at<7>().as_uint64(); }
+  bool has_allocation_scope() const { return at<8>().valid(); }
+  int32_t allocation_scope() const { return at<8>().as_int32(); }
+  bool has_annotations() const { return at<9>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> annotations() const { return GetRepeated<::protozero::ConstBytes>(9); }
+  bool has_device() const { return at<16>().valid(); }
+  uint64_t device() const { return at<16>().as_uint64(); }
+  bool has_device_memory() const { return at<17>().valid(); }
+  uint64_t device_memory() const { return at<17>().as_uint64(); }
+  bool has_memory_type() const { return at<18>().valid(); }
+  uint32_t memory_type() const { return at<18>().as_uint32(); }
+  bool has_heap() const { return at<19>().valid(); }
+  uint32_t heap() const { return at<19>().as_uint32(); }
+  bool has_object_handle() const { return at<20>().valid(); }
+  uint64_t object_handle() const { return at<20>().as_uint64(); }
+};
+
+class VulkanMemoryEvent : public ::protozero::Message {
+ public:
+  using Decoder = VulkanMemoryEvent_Decoder;
+  enum : int32_t {
+    kSourceFieldNumber = 1,
+    kOperationFieldNumber = 2,
+    kTimestampFieldNumber = 3,
+    kPidFieldNumber = 4,
+    kMemoryAddressFieldNumber = 5,
+    kMemorySizeFieldNumber = 6,
+    kCallerIidFieldNumber = 7,
+    kAllocationScopeFieldNumber = 8,
+    kAnnotationsFieldNumber = 9,
+    kDeviceFieldNumber = 16,
+    kDeviceMemoryFieldNumber = 17,
+    kMemoryTypeFieldNumber = 18,
+    kHeapFieldNumber = 19,
+    kObjectHandleFieldNumber = 20,
+  };
+  using Source = ::perfetto::protos::pbzero::VulkanMemoryEvent_Source;
+  using Operation = ::perfetto::protos::pbzero::VulkanMemoryEvent_Operation;
+  using AllocationScope = ::perfetto::protos::pbzero::VulkanMemoryEvent_AllocationScope;
+  static const Source SOURCE_UNSPECIFIED = VulkanMemoryEvent_Source_SOURCE_UNSPECIFIED;
+  static const Source SOURCE_DRIVER = VulkanMemoryEvent_Source_SOURCE_DRIVER;
+  static const Source SOURCE_DEVICE = VulkanMemoryEvent_Source_SOURCE_DEVICE;
+  static const Source SOURCE_DEVICE_MEMORY = VulkanMemoryEvent_Source_SOURCE_DEVICE_MEMORY;
+  static const Source SOURCE_BUFFER = VulkanMemoryEvent_Source_SOURCE_BUFFER;
+  static const Source SOURCE_IMAGE = VulkanMemoryEvent_Source_SOURCE_IMAGE;
+  static const Operation OP_UNSPECIFIED = VulkanMemoryEvent_Operation_OP_UNSPECIFIED;
+  static const Operation OP_CREATE = VulkanMemoryEvent_Operation_OP_CREATE;
+  static const Operation OP_DESTROY = VulkanMemoryEvent_Operation_OP_DESTROY;
+  static const Operation OP_BIND = VulkanMemoryEvent_Operation_OP_BIND;
+  static const Operation OP_DESTROY_BOUND = VulkanMemoryEvent_Operation_OP_DESTROY_BOUND;
+  static const Operation OP_ANNOTATIONS = VulkanMemoryEvent_Operation_OP_ANNOTATIONS;
+  static const AllocationScope SCOPE_UNSPECIFIED = VulkanMemoryEvent_AllocationScope_SCOPE_UNSPECIFIED;
+  static const AllocationScope SCOPE_COMMAND = VulkanMemoryEvent_AllocationScope_SCOPE_COMMAND;
+  static const AllocationScope SCOPE_OBJECT = VulkanMemoryEvent_AllocationScope_SCOPE_OBJECT;
+  static const AllocationScope SCOPE_CACHE = VulkanMemoryEvent_AllocationScope_SCOPE_CACHE;
+  static const AllocationScope SCOPE_DEVICE = VulkanMemoryEvent_AllocationScope_SCOPE_DEVICE;
+  static const AllocationScope SCOPE_INSTANCE = VulkanMemoryEvent_AllocationScope_SCOPE_INSTANCE;
+
+  using FieldMetadata_Source =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::VulkanMemoryEvent_Source,
+      VulkanMemoryEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Source kSource() { return {}; }
+  void set_source(::perfetto::protos::pbzero::VulkanMemoryEvent_Source value) {
+    static constexpr uint32_t field_id = FieldMetadata_Source::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Operation =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::VulkanMemoryEvent_Operation,
+      VulkanMemoryEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Operation kOperation() { return {}; }
+  void set_operation(::perfetto::protos::pbzero::VulkanMemoryEvent_Operation value) {
+    static constexpr uint32_t field_id = FieldMetadata_Operation::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Timestamp =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      VulkanMemoryEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Timestamp kTimestamp() { return {}; }
+  void set_timestamp(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Timestamp::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pid =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      VulkanMemoryEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pid kPid() { return {}; }
+  void set_pid(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_MemoryAddress =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kFixed64,
+      uint64_t,
+      VulkanMemoryEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MemoryAddress kMemoryAddress() { return {}; }
+  void set_memory_address(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_MemoryAddress::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kFixed64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_MemorySize =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      VulkanMemoryEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MemorySize kMemorySize() { return {}; }
+  void set_memory_size(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_MemorySize::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CallerIid =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      VulkanMemoryEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CallerIid kCallerIid() { return {}; }
+  void set_caller_iid(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CallerIid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_AllocationScope =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::VulkanMemoryEvent_AllocationScope,
+      VulkanMemoryEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AllocationScope kAllocationScope() { return {}; }
+  void set_allocation_scope(::perfetto::protos::pbzero::VulkanMemoryEvent_AllocationScope value) {
+    static constexpr uint32_t field_id = FieldMetadata_AllocationScope::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Annotations =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      VulkanMemoryEventAnnotation,
+      VulkanMemoryEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Annotations kAnnotations() { return {}; }
+  template <typename T = VulkanMemoryEventAnnotation> T* add_annotations() {
+    return BeginNestedMessage<T>(9);
+  }
+
+
+  using FieldMetadata_Device =
+    ::protozero::proto_utils::FieldMetadata<
+      16,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kFixed64,
+      uint64_t,
+      VulkanMemoryEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Device kDevice() { return {}; }
+  void set_device(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Device::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kFixed64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DeviceMemory =
+    ::protozero::proto_utils::FieldMetadata<
+      17,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kFixed64,
+      uint64_t,
+      VulkanMemoryEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DeviceMemory kDeviceMemory() { return {}; }
+  void set_device_memory(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DeviceMemory::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kFixed64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_MemoryType =
+    ::protozero::proto_utils::FieldMetadata<
+      18,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      VulkanMemoryEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MemoryType kMemoryType() { return {}; }
+  void set_memory_type(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_MemoryType::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Heap =
+    ::protozero::proto_utils::FieldMetadata<
+      19,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      VulkanMemoryEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Heap kHeap() { return {}; }
+  void set_heap(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Heap::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ObjectHandle =
+    ::protozero::proto_utils::FieldMetadata<
+      20,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kFixed64,
+      uint64_t,
+      VulkanMemoryEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ObjectHandle kObjectHandle() { return {}; }
+  void set_object_handle(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ObjectHandle::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kFixed64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class VulkanMemoryEventAnnotation_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  VulkanMemoryEventAnnotation_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit VulkanMemoryEventAnnotation_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit VulkanMemoryEventAnnotation_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_key_iid() const { return at<1>().valid(); }
+  uint64_t key_iid() const { return at<1>().as_uint64(); }
+  bool has_int_value() const { return at<2>().valid(); }
+  int64_t int_value() const { return at<2>().as_int64(); }
+  bool has_double_value() const { return at<3>().valid(); }
+  double double_value() const { return at<3>().as_double(); }
+  bool has_string_iid() const { return at<4>().valid(); }
+  uint64_t string_iid() const { return at<4>().as_uint64(); }
+};
+
+class VulkanMemoryEventAnnotation : public ::protozero::Message {
+ public:
+  using Decoder = VulkanMemoryEventAnnotation_Decoder;
+  enum : int32_t {
+    kKeyIidFieldNumber = 1,
+    kIntValueFieldNumber = 2,
+    kDoubleValueFieldNumber = 3,
+    kStringIidFieldNumber = 4,
+  };
+
+  using FieldMetadata_KeyIid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      VulkanMemoryEventAnnotation>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_KeyIid kKeyIid() { return {}; }
+  void set_key_iid(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_KeyIid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_IntValue =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      VulkanMemoryEventAnnotation>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IntValue kIntValue() { return {}; }
+  void set_int_value(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_IntValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DoubleValue =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kDouble,
+      double,
+      VulkanMemoryEventAnnotation>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DoubleValue kDoubleValue() { return {}; }
+  void set_double_value(double value) {
+    static constexpr uint32_t field_id = FieldMetadata_DoubleValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kDouble>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_StringIid =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      VulkanMemoryEventAnnotation>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_StringIid kStringIid() { return {}; }
+  void set_string_iid(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_StringIid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/profiling/deobfuscation.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_PROFILING_DEOBFUSCATION_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_PROFILING_DEOBFUSCATION_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class ObfuscatedClass;
+class ObfuscatedMember;
+
+class DeobfuscationMapping_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  DeobfuscationMapping_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit DeobfuscationMapping_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit DeobfuscationMapping_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_package_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars package_name() const { return at<1>().as_string(); }
+  bool has_version_code() const { return at<2>().valid(); }
+  int64_t version_code() const { return at<2>().as_int64(); }
+  bool has_obfuscated_classes() const { return at<3>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> obfuscated_classes() const { return GetRepeated<::protozero::ConstBytes>(3); }
+};
+
+class DeobfuscationMapping : public ::protozero::Message {
+ public:
+  using Decoder = DeobfuscationMapping_Decoder;
+  enum : int32_t {
+    kPackageNameFieldNumber = 1,
+    kVersionCodeFieldNumber = 2,
+    kObfuscatedClassesFieldNumber = 3,
+  };
+
+  using FieldMetadata_PackageName =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      DeobfuscationMapping>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PackageName kPackageName() { return {}; }
+  void set_package_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_PackageName::kFieldId, data, size);
+  }
+  void set_package_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_PackageName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_VersionCode =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      DeobfuscationMapping>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_VersionCode kVersionCode() { return {}; }
+  void set_version_code(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_VersionCode::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ObfuscatedClasses =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ObfuscatedClass,
+      DeobfuscationMapping>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ObfuscatedClasses kObfuscatedClasses() { return {}; }
+  template <typename T = ObfuscatedClass> T* add_obfuscated_classes() {
+    return BeginNestedMessage<T>(3);
+  }
+
+};
+
+class ObfuscatedClass_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  ObfuscatedClass_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ObfuscatedClass_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ObfuscatedClass_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_obfuscated_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars obfuscated_name() const { return at<1>().as_string(); }
+  bool has_deobfuscated_name() const { return at<2>().valid(); }
+  ::protozero::ConstChars deobfuscated_name() const { return at<2>().as_string(); }
+  bool has_obfuscated_members() const { return at<3>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> obfuscated_members() const { return GetRepeated<::protozero::ConstBytes>(3); }
+  bool has_obfuscated_methods() const { return at<4>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> obfuscated_methods() const { return GetRepeated<::protozero::ConstBytes>(4); }
+};
+
+class ObfuscatedClass : public ::protozero::Message {
+ public:
+  using Decoder = ObfuscatedClass_Decoder;
+  enum : int32_t {
+    kObfuscatedNameFieldNumber = 1,
+    kDeobfuscatedNameFieldNumber = 2,
+    kObfuscatedMembersFieldNumber = 3,
+    kObfuscatedMethodsFieldNumber = 4,
+  };
+
+  using FieldMetadata_ObfuscatedName =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ObfuscatedClass>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ObfuscatedName kObfuscatedName() { return {}; }
+  void set_obfuscated_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_ObfuscatedName::kFieldId, data, size);
+  }
+  void set_obfuscated_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_ObfuscatedName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DeobfuscatedName =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ObfuscatedClass>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DeobfuscatedName kDeobfuscatedName() { return {}; }
+  void set_deobfuscated_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_DeobfuscatedName::kFieldId, data, size);
+  }
+  void set_deobfuscated_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_DeobfuscatedName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ObfuscatedMembers =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ObfuscatedMember,
+      ObfuscatedClass>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ObfuscatedMembers kObfuscatedMembers() { return {}; }
+  template <typename T = ObfuscatedMember> T* add_obfuscated_members() {
+    return BeginNestedMessage<T>(3);
+  }
+
+
+  using FieldMetadata_ObfuscatedMethods =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ObfuscatedMember,
+      ObfuscatedClass>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ObfuscatedMethods kObfuscatedMethods() { return {}; }
+  template <typename T = ObfuscatedMember> T* add_obfuscated_methods() {
+    return BeginNestedMessage<T>(4);
+  }
+
+};
+
+class ObfuscatedMember_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  ObfuscatedMember_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ObfuscatedMember_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ObfuscatedMember_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_obfuscated_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars obfuscated_name() const { return at<1>().as_string(); }
+  bool has_deobfuscated_name() const { return at<2>().valid(); }
+  ::protozero::ConstChars deobfuscated_name() const { return at<2>().as_string(); }
+};
+
+class ObfuscatedMember : public ::protozero::Message {
+ public:
+  using Decoder = ObfuscatedMember_Decoder;
+  enum : int32_t {
+    kObfuscatedNameFieldNumber = 1,
+    kDeobfuscatedNameFieldNumber = 2,
+  };
+
+  using FieldMetadata_ObfuscatedName =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ObfuscatedMember>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ObfuscatedName kObfuscatedName() { return {}; }
+  void set_obfuscated_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_ObfuscatedName::kFieldId, data, size);
+  }
+  void set_obfuscated_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_ObfuscatedName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DeobfuscatedName =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ObfuscatedMember>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DeobfuscatedName kDeobfuscatedName() { return {}; }
+  void set_deobfuscated_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_DeobfuscatedName::kFieldId, data, size);
+  }
+  void set_deobfuscated_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_DeobfuscatedName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/profiling/heap_graph.pbzero.h
+// gen_amalgamated begin header: gen/protos/perfetto/trace/profiling/deobfuscation.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_PROFILING_DEOBFUSCATION_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_PROFILING_DEOBFUSCATION_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class ObfuscatedClass;
+class ObfuscatedMember;
+
+class DeobfuscationMapping_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  DeobfuscationMapping_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit DeobfuscationMapping_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit DeobfuscationMapping_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_package_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars package_name() const { return at<1>().as_string(); }
+  bool has_version_code() const { return at<2>().valid(); }
+  int64_t version_code() const { return at<2>().as_int64(); }
+  bool has_obfuscated_classes() const { return at<3>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> obfuscated_classes() const { return GetRepeated<::protozero::ConstBytes>(3); }
+};
+
+class DeobfuscationMapping : public ::protozero::Message {
+ public:
+  using Decoder = DeobfuscationMapping_Decoder;
+  enum : int32_t {
+    kPackageNameFieldNumber = 1,
+    kVersionCodeFieldNumber = 2,
+    kObfuscatedClassesFieldNumber = 3,
+  };
+
+  using FieldMetadata_PackageName =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      DeobfuscationMapping>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PackageName kPackageName() { return {}; }
+  void set_package_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_PackageName::kFieldId, data, size);
+  }
+  void set_package_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_PackageName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_VersionCode =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      DeobfuscationMapping>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_VersionCode kVersionCode() { return {}; }
+  void set_version_code(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_VersionCode::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ObfuscatedClasses =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ObfuscatedClass,
+      DeobfuscationMapping>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ObfuscatedClasses kObfuscatedClasses() { return {}; }
+  template <typename T = ObfuscatedClass> T* add_obfuscated_classes() {
+    return BeginNestedMessage<T>(3);
+  }
+
+};
+
+class ObfuscatedClass_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  ObfuscatedClass_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ObfuscatedClass_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ObfuscatedClass_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_obfuscated_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars obfuscated_name() const { return at<1>().as_string(); }
+  bool has_deobfuscated_name() const { return at<2>().valid(); }
+  ::protozero::ConstChars deobfuscated_name() const { return at<2>().as_string(); }
+  bool has_obfuscated_members() const { return at<3>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> obfuscated_members() const { return GetRepeated<::protozero::ConstBytes>(3); }
+  bool has_obfuscated_methods() const { return at<4>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> obfuscated_methods() const { return GetRepeated<::protozero::ConstBytes>(4); }
+};
+
+class ObfuscatedClass : public ::protozero::Message {
+ public:
+  using Decoder = ObfuscatedClass_Decoder;
+  enum : int32_t {
+    kObfuscatedNameFieldNumber = 1,
+    kDeobfuscatedNameFieldNumber = 2,
+    kObfuscatedMembersFieldNumber = 3,
+    kObfuscatedMethodsFieldNumber = 4,
+  };
+
+  using FieldMetadata_ObfuscatedName =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ObfuscatedClass>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ObfuscatedName kObfuscatedName() { return {}; }
+  void set_obfuscated_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_ObfuscatedName::kFieldId, data, size);
+  }
+  void set_obfuscated_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_ObfuscatedName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DeobfuscatedName =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ObfuscatedClass>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DeobfuscatedName kDeobfuscatedName() { return {}; }
+  void set_deobfuscated_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_DeobfuscatedName::kFieldId, data, size);
+  }
+  void set_deobfuscated_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_DeobfuscatedName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ObfuscatedMembers =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ObfuscatedMember,
+      ObfuscatedClass>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ObfuscatedMembers kObfuscatedMembers() { return {}; }
+  template <typename T = ObfuscatedMember> T* add_obfuscated_members() {
+    return BeginNestedMessage<T>(3);
+  }
+
+
+  using FieldMetadata_ObfuscatedMethods =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ObfuscatedMember,
+      ObfuscatedClass>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ObfuscatedMethods kObfuscatedMethods() { return {}; }
+  template <typename T = ObfuscatedMember> T* add_obfuscated_methods() {
+    return BeginNestedMessage<T>(4);
+  }
+
+};
+
+class ObfuscatedMember_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  ObfuscatedMember_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ObfuscatedMember_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ObfuscatedMember_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_obfuscated_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars obfuscated_name() const { return at<1>().as_string(); }
+  bool has_deobfuscated_name() const { return at<2>().valid(); }
+  ::protozero::ConstChars deobfuscated_name() const { return at<2>().as_string(); }
+};
+
+class ObfuscatedMember : public ::protozero::Message {
+ public:
+  using Decoder = ObfuscatedMember_Decoder;
+  enum : int32_t {
+    kObfuscatedNameFieldNumber = 1,
+    kDeobfuscatedNameFieldNumber = 2,
+  };
+
+  using FieldMetadata_ObfuscatedName =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ObfuscatedMember>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ObfuscatedName kObfuscatedName() { return {}; }
+  void set_obfuscated_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_ObfuscatedName::kFieldId, data, size);
+  }
+  void set_obfuscated_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_ObfuscatedName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DeobfuscatedName =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ObfuscatedMember>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DeobfuscatedName kDeobfuscatedName() { return {}; }
+  void set_deobfuscated_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_DeobfuscatedName::kFieldId, data, size);
+  }
+  void set_deobfuscated_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_DeobfuscatedName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_PROFILING_HEAP_GRAPH_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_PROFILING_HEAP_GRAPH_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/profiling/deobfuscation.pbzero.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class HeapGraphObject;
+class HeapGraphRoot;
+class HeapGraphType;
+class InternedString;
+enum HeapGraphRoot_Type : int32_t;
+enum HeapGraphType_Kind : int32_t;
+
+enum HeapGraphType_Kind : int32_t {
+  HeapGraphType_Kind_KIND_UNKNOWN = 0,
+  HeapGraphType_Kind_KIND_NORMAL = 1,
+  HeapGraphType_Kind_KIND_NOREFERENCES = 2,
+  HeapGraphType_Kind_KIND_STRING = 3,
+  HeapGraphType_Kind_KIND_ARRAY = 4,
+  HeapGraphType_Kind_KIND_CLASS = 5,
+  HeapGraphType_Kind_KIND_CLASSLOADER = 6,
+  HeapGraphType_Kind_KIND_DEXCACHE = 7,
+  HeapGraphType_Kind_KIND_SOFT_REFERENCE = 8,
+  HeapGraphType_Kind_KIND_WEAK_REFERENCE = 9,
+  HeapGraphType_Kind_KIND_FINALIZER_REFERENCE = 10,
+  HeapGraphType_Kind_KIND_PHANTOM_REFERENCE = 11,
+};
+
+const HeapGraphType_Kind HeapGraphType_Kind_MIN = HeapGraphType_Kind_KIND_UNKNOWN;
+const HeapGraphType_Kind HeapGraphType_Kind_MAX = HeapGraphType_Kind_KIND_PHANTOM_REFERENCE;
+
+enum HeapGraphRoot_Type : int32_t {
+  HeapGraphRoot_Type_ROOT_UNKNOWN = 0,
+  HeapGraphRoot_Type_ROOT_JNI_GLOBAL = 1,
+  HeapGraphRoot_Type_ROOT_JNI_LOCAL = 2,
+  HeapGraphRoot_Type_ROOT_JAVA_FRAME = 3,
+  HeapGraphRoot_Type_ROOT_NATIVE_STACK = 4,
+  HeapGraphRoot_Type_ROOT_STICKY_CLASS = 5,
+  HeapGraphRoot_Type_ROOT_THREAD_BLOCK = 6,
+  HeapGraphRoot_Type_ROOT_MONITOR_USED = 7,
+  HeapGraphRoot_Type_ROOT_THREAD_OBJECT = 8,
+  HeapGraphRoot_Type_ROOT_INTERNED_STRING = 9,
+  HeapGraphRoot_Type_ROOT_FINALIZING = 10,
+  HeapGraphRoot_Type_ROOT_DEBUGGER = 11,
+  HeapGraphRoot_Type_ROOT_REFERENCE_CLEANUP = 12,
+  HeapGraphRoot_Type_ROOT_VM_INTERNAL = 13,
+  HeapGraphRoot_Type_ROOT_JNI_MONITOR = 14,
+};
+
+const HeapGraphRoot_Type HeapGraphRoot_Type_MIN = HeapGraphRoot_Type_ROOT_UNKNOWN;
+const HeapGraphRoot_Type HeapGraphRoot_Type_MAX = HeapGraphRoot_Type_ROOT_JNI_MONITOR;
+
+class HeapGraph_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/9, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  HeapGraph_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit HeapGraph_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit HeapGraph_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_pid() const { return at<1>().valid(); }
+  int32_t pid() const { return at<1>().as_int32(); }
+  bool has_objects() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> objects() const { return GetRepeated<::protozero::ConstBytes>(2); }
+  bool has_roots() const { return at<7>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> roots() const { return GetRepeated<::protozero::ConstBytes>(7); }
+  bool has_types() const { return at<9>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> types() const { return GetRepeated<::protozero::ConstBytes>(9); }
+  bool has_field_names() const { return at<4>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> field_names() const { return GetRepeated<::protozero::ConstBytes>(4); }
+  bool has_location_names() const { return at<8>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> location_names() const { return GetRepeated<::protozero::ConstBytes>(8); }
+  bool has_continued() const { return at<5>().valid(); }
+  bool continued() const { return at<5>().as_bool(); }
+  bool has_index() const { return at<6>().valid(); }
+  uint64_t index() const { return at<6>().as_uint64(); }
+};
+
+class HeapGraph : public ::protozero::Message {
+ public:
+  using Decoder = HeapGraph_Decoder;
+  enum : int32_t {
+    kPidFieldNumber = 1,
+    kObjectsFieldNumber = 2,
+    kRootsFieldNumber = 7,
+    kTypesFieldNumber = 9,
+    kFieldNamesFieldNumber = 4,
+    kLocationNamesFieldNumber = 8,
+    kContinuedFieldNumber = 5,
+    kIndexFieldNumber = 6,
+  };
+
+  using FieldMetadata_Pid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      HeapGraph>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pid kPid() { return {}; }
+  void set_pid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Objects =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      HeapGraphObject,
+      HeapGraph>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Objects kObjects() { return {}; }
+  template <typename T = HeapGraphObject> T* add_objects() {
+    return BeginNestedMessage<T>(2);
+  }
+
+
+  using FieldMetadata_Roots =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      HeapGraphRoot,
+      HeapGraph>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Roots kRoots() { return {}; }
+  template <typename T = HeapGraphRoot> T* add_roots() {
+    return BeginNestedMessage<T>(7);
+  }
+
+
+  using FieldMetadata_Types =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      HeapGraphType,
+      HeapGraph>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Types kTypes() { return {}; }
+  template <typename T = HeapGraphType> T* add_types() {
+    return BeginNestedMessage<T>(9);
+  }
+
+
+  using FieldMetadata_FieldNames =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      InternedString,
+      HeapGraph>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FieldNames kFieldNames() { return {}; }
+  template <typename T = InternedString> T* add_field_names() {
+    return BeginNestedMessage<T>(4);
+  }
+
+
+  using FieldMetadata_LocationNames =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      InternedString,
+      HeapGraph>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_LocationNames kLocationNames() { return {}; }
+  template <typename T = InternedString> T* add_location_names() {
+    return BeginNestedMessage<T>(8);
+  }
+
+
+  using FieldMetadata_Continued =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      HeapGraph>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Continued kContinued() { return {}; }
+  void set_continued(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_Continued::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Index =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      HeapGraph>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Index kIndex() { return {}; }
+  void set_index(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Index::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class HeapGraphObject_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/7, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  HeapGraphObject_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit HeapGraphObject_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit HeapGraphObject_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_id() const { return at<1>().valid(); }
+  uint64_t id() const { return at<1>().as_uint64(); }
+  bool has_id_delta() const { return at<7>().valid(); }
+  uint64_t id_delta() const { return at<7>().as_uint64(); }
+  bool has_type_id() const { return at<2>().valid(); }
+  uint64_t type_id() const { return at<2>().as_uint64(); }
+  bool has_self_size() const { return at<3>().valid(); }
+  uint64_t self_size() const { return at<3>().as_uint64(); }
+  bool has_reference_field_id_base() const { return at<6>().valid(); }
+  uint64_t reference_field_id_base() const { return at<6>().as_uint64(); }
+  bool has_reference_field_id() const { return at<4>().valid(); }
+  ::protozero::PackedRepeatedFieldIterator<::protozero::proto_utils::ProtoWireType::kVarInt, uint64_t> reference_field_id(bool* parse_error_ptr) const { return GetPackedRepeated<::protozero::proto_utils::ProtoWireType::kVarInt, uint64_t>(4, parse_error_ptr); }
+  bool has_reference_object_id() const { return at<5>().valid(); }
+  ::protozero::PackedRepeatedFieldIterator<::protozero::proto_utils::ProtoWireType::kVarInt, uint64_t> reference_object_id(bool* parse_error_ptr) const { return GetPackedRepeated<::protozero::proto_utils::ProtoWireType::kVarInt, uint64_t>(5, parse_error_ptr); }
+};
+
+class HeapGraphObject : public ::protozero::Message {
+ public:
+  using Decoder = HeapGraphObject_Decoder;
+  enum : int32_t {
+    kIdFieldNumber = 1,
+    kIdDeltaFieldNumber = 7,
+    kTypeIdFieldNumber = 2,
+    kSelfSizeFieldNumber = 3,
+    kReferenceFieldIdBaseFieldNumber = 6,
+    kReferenceFieldIdFieldNumber = 4,
+    kReferenceObjectIdFieldNumber = 5,
+  };
+
+  using FieldMetadata_Id =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      HeapGraphObject>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Id kId() { return {}; }
+  void set_id(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Id::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_IdDelta =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      HeapGraphObject>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IdDelta kIdDelta() { return {}; }
+  void set_id_delta(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_IdDelta::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TypeId =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      HeapGraphObject>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TypeId kTypeId() { return {}; }
+  void set_type_id(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TypeId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SelfSize =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      HeapGraphObject>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SelfSize kSelfSize() { return {}; }
+  void set_self_size(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SelfSize::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ReferenceFieldIdBase =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      HeapGraphObject>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ReferenceFieldIdBase kReferenceFieldIdBase() { return {}; }
+  void set_reference_field_id_base(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ReferenceFieldIdBase::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ReferenceFieldId =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kRepeatedPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      HeapGraphObject>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ReferenceFieldId kReferenceFieldId() { return {}; }
+  void set_reference_field_id(const ::protozero::PackedVarInt& packed_buffer) {
+    AppendBytes(FieldMetadata_ReferenceFieldId::kFieldId, packed_buffer.data(),
+                packed_buffer.size());
+  }
+
+  using FieldMetadata_ReferenceObjectId =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kRepeatedPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      HeapGraphObject>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ReferenceObjectId kReferenceObjectId() { return {}; }
+  void set_reference_object_id(const ::protozero::PackedVarInt& packed_buffer) {
+    AppendBytes(FieldMetadata_ReferenceObjectId::kFieldId, packed_buffer.data(),
+                packed_buffer.size());
+  }
+};
+
+class HeapGraphType_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/8, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  HeapGraphType_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit HeapGraphType_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit HeapGraphType_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_id() const { return at<1>().valid(); }
+  uint64_t id() const { return at<1>().as_uint64(); }
+  bool has_location_id() const { return at<2>().valid(); }
+  uint64_t location_id() const { return at<2>().as_uint64(); }
+  bool has_class_name() const { return at<3>().valid(); }
+  ::protozero::ConstChars class_name() const { return at<3>().as_string(); }
+  bool has_object_size() const { return at<4>().valid(); }
+  uint64_t object_size() const { return at<4>().as_uint64(); }
+  bool has_superclass_id() const { return at<5>().valid(); }
+  uint64_t superclass_id() const { return at<5>().as_uint64(); }
+  bool has_reference_field_id() const { return at<6>().valid(); }
+  ::protozero::PackedRepeatedFieldIterator<::protozero::proto_utils::ProtoWireType::kVarInt, uint64_t> reference_field_id(bool* parse_error_ptr) const { return GetPackedRepeated<::protozero::proto_utils::ProtoWireType::kVarInt, uint64_t>(6, parse_error_ptr); }
+  bool has_kind() const { return at<7>().valid(); }
+  int32_t kind() const { return at<7>().as_int32(); }
+  bool has_classloader_id() const { return at<8>().valid(); }
+  uint64_t classloader_id() const { return at<8>().as_uint64(); }
+};
+
+class HeapGraphType : public ::protozero::Message {
+ public:
+  using Decoder = HeapGraphType_Decoder;
+  enum : int32_t {
+    kIdFieldNumber = 1,
+    kLocationIdFieldNumber = 2,
+    kClassNameFieldNumber = 3,
+    kObjectSizeFieldNumber = 4,
+    kSuperclassIdFieldNumber = 5,
+    kReferenceFieldIdFieldNumber = 6,
+    kKindFieldNumber = 7,
+    kClassloaderIdFieldNumber = 8,
+  };
+  using Kind = ::perfetto::protos::pbzero::HeapGraphType_Kind;
+  static const Kind KIND_UNKNOWN = HeapGraphType_Kind_KIND_UNKNOWN;
+  static const Kind KIND_NORMAL = HeapGraphType_Kind_KIND_NORMAL;
+  static const Kind KIND_NOREFERENCES = HeapGraphType_Kind_KIND_NOREFERENCES;
+  static const Kind KIND_STRING = HeapGraphType_Kind_KIND_STRING;
+  static const Kind KIND_ARRAY = HeapGraphType_Kind_KIND_ARRAY;
+  static const Kind KIND_CLASS = HeapGraphType_Kind_KIND_CLASS;
+  static const Kind KIND_CLASSLOADER = HeapGraphType_Kind_KIND_CLASSLOADER;
+  static const Kind KIND_DEXCACHE = HeapGraphType_Kind_KIND_DEXCACHE;
+  static const Kind KIND_SOFT_REFERENCE = HeapGraphType_Kind_KIND_SOFT_REFERENCE;
+  static const Kind KIND_WEAK_REFERENCE = HeapGraphType_Kind_KIND_WEAK_REFERENCE;
+  static const Kind KIND_FINALIZER_REFERENCE = HeapGraphType_Kind_KIND_FINALIZER_REFERENCE;
+  static const Kind KIND_PHANTOM_REFERENCE = HeapGraphType_Kind_KIND_PHANTOM_REFERENCE;
+
+  using FieldMetadata_Id =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      HeapGraphType>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Id kId() { return {}; }
+  void set_id(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Id::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_LocationId =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      HeapGraphType>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_LocationId kLocationId() { return {}; }
+  void set_location_id(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_LocationId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ClassName =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      HeapGraphType>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ClassName kClassName() { return {}; }
+  void set_class_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_ClassName::kFieldId, data, size);
+  }
+  void set_class_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_ClassName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ObjectSize =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      HeapGraphType>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ObjectSize kObjectSize() { return {}; }
+  void set_object_size(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ObjectSize::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SuperclassId =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      HeapGraphType>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SuperclassId kSuperclassId() { return {}; }
+  void set_superclass_id(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SuperclassId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ReferenceFieldId =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kRepeatedPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      HeapGraphType>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ReferenceFieldId kReferenceFieldId() { return {}; }
+  void set_reference_field_id(const ::protozero::PackedVarInt& packed_buffer) {
+    AppendBytes(FieldMetadata_ReferenceFieldId::kFieldId, packed_buffer.data(),
+                packed_buffer.size());
+  }
+
+  using FieldMetadata_Kind =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::HeapGraphType_Kind,
+      HeapGraphType>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Kind kKind() { return {}; }
+  void set_kind(::perfetto::protos::pbzero::HeapGraphType_Kind value) {
+    static constexpr uint32_t field_id = FieldMetadata_Kind::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ClassloaderId =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      HeapGraphType>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ClassloaderId kClassloaderId() { return {}; }
+  void set_classloader_id(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ClassloaderId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class HeapGraphRoot_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  HeapGraphRoot_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit HeapGraphRoot_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit HeapGraphRoot_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_object_ids() const { return at<1>().valid(); }
+  ::protozero::PackedRepeatedFieldIterator<::protozero::proto_utils::ProtoWireType::kVarInt, uint64_t> object_ids(bool* parse_error_ptr) const { return GetPackedRepeated<::protozero::proto_utils::ProtoWireType::kVarInt, uint64_t>(1, parse_error_ptr); }
+  bool has_root_type() const { return at<2>().valid(); }
+  int32_t root_type() const { return at<2>().as_int32(); }
+};
+
+class HeapGraphRoot : public ::protozero::Message {
+ public:
+  using Decoder = HeapGraphRoot_Decoder;
+  enum : int32_t {
+    kObjectIdsFieldNumber = 1,
+    kRootTypeFieldNumber = 2,
+  };
+  using Type = ::perfetto::protos::pbzero::HeapGraphRoot_Type;
+  static const Type ROOT_UNKNOWN = HeapGraphRoot_Type_ROOT_UNKNOWN;
+  static const Type ROOT_JNI_GLOBAL = HeapGraphRoot_Type_ROOT_JNI_GLOBAL;
+  static const Type ROOT_JNI_LOCAL = HeapGraphRoot_Type_ROOT_JNI_LOCAL;
+  static const Type ROOT_JAVA_FRAME = HeapGraphRoot_Type_ROOT_JAVA_FRAME;
+  static const Type ROOT_NATIVE_STACK = HeapGraphRoot_Type_ROOT_NATIVE_STACK;
+  static const Type ROOT_STICKY_CLASS = HeapGraphRoot_Type_ROOT_STICKY_CLASS;
+  static const Type ROOT_THREAD_BLOCK = HeapGraphRoot_Type_ROOT_THREAD_BLOCK;
+  static const Type ROOT_MONITOR_USED = HeapGraphRoot_Type_ROOT_MONITOR_USED;
+  static const Type ROOT_THREAD_OBJECT = HeapGraphRoot_Type_ROOT_THREAD_OBJECT;
+  static const Type ROOT_INTERNED_STRING = HeapGraphRoot_Type_ROOT_INTERNED_STRING;
+  static const Type ROOT_FINALIZING = HeapGraphRoot_Type_ROOT_FINALIZING;
+  static const Type ROOT_DEBUGGER = HeapGraphRoot_Type_ROOT_DEBUGGER;
+  static const Type ROOT_REFERENCE_CLEANUP = HeapGraphRoot_Type_ROOT_REFERENCE_CLEANUP;
+  static const Type ROOT_VM_INTERNAL = HeapGraphRoot_Type_ROOT_VM_INTERNAL;
+  static const Type ROOT_JNI_MONITOR = HeapGraphRoot_Type_ROOT_JNI_MONITOR;
+
+  using FieldMetadata_ObjectIds =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kRepeatedPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      HeapGraphRoot>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ObjectIds kObjectIds() { return {}; }
+  void set_object_ids(const ::protozero::PackedVarInt& packed_buffer) {
+    AppendBytes(FieldMetadata_ObjectIds::kFieldId, packed_buffer.data(),
+                packed_buffer.size());
+  }
+
+  using FieldMetadata_RootType =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::HeapGraphRoot_Type,
+      HeapGraphRoot>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_RootType kRootType() { return {}; }
+  void set_root_type(::perfetto::protos::pbzero::HeapGraphRoot_Type value) {
+    static constexpr uint32_t field_id = FieldMetadata_RootType::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/profiling/profile_common.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_PROFILING_PROFILE_COMMON_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_PROFILING_PROFILE_COMMON_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class AddressSymbols;
+class Line;
+
+class Callstack_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  Callstack_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Callstack_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Callstack_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_iid() const { return at<1>().valid(); }
+  uint64_t iid() const { return at<1>().as_uint64(); }
+  bool has_frame_ids() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<uint64_t> frame_ids() const { return GetRepeated<uint64_t>(2); }
+};
+
+class Callstack : public ::protozero::Message {
+ public:
+  using Decoder = Callstack_Decoder;
+  enum : int32_t {
+    kIidFieldNumber = 1,
+    kFrameIdsFieldNumber = 2,
+  };
+
+  using FieldMetadata_Iid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Callstack>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Iid kIid() { return {}; }
+  void set_iid(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Iid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FrameIds =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Callstack>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FrameIds kFrameIds() { return {}; }
+  void add_frame_ids(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_FrameIds::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Frame_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Frame_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Frame_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Frame_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_iid() const { return at<1>().valid(); }
+  uint64_t iid() const { return at<1>().as_uint64(); }
+  bool has_function_name_id() const { return at<2>().valid(); }
+  uint64_t function_name_id() const { return at<2>().as_uint64(); }
+  bool has_mapping_id() const { return at<3>().valid(); }
+  uint64_t mapping_id() const { return at<3>().as_uint64(); }
+  bool has_rel_pc() const { return at<4>().valid(); }
+  uint64_t rel_pc() const { return at<4>().as_uint64(); }
+};
+
+class Frame : public ::protozero::Message {
+ public:
+  using Decoder = Frame_Decoder;
+  enum : int32_t {
+    kIidFieldNumber = 1,
+    kFunctionNameIdFieldNumber = 2,
+    kMappingIdFieldNumber = 3,
+    kRelPcFieldNumber = 4,
+  };
+
+  using FieldMetadata_Iid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Frame>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Iid kIid() { return {}; }
+  void set_iid(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Iid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FunctionNameId =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Frame>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FunctionNameId kFunctionNameId() { return {}; }
+  void set_function_name_id(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_FunctionNameId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_MappingId =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Frame>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MappingId kMappingId() { return {}; }
+  void set_mapping_id(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_MappingId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_RelPc =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Frame>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_RelPc kRelPc() { return {}; }
+  void set_rel_pc(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_RelPc::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Mapping_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/8, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  Mapping_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Mapping_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Mapping_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_iid() const { return at<1>().valid(); }
+  uint64_t iid() const { return at<1>().as_uint64(); }
+  bool has_build_id() const { return at<2>().valid(); }
+  uint64_t build_id() const { return at<2>().as_uint64(); }
+  bool has_exact_offset() const { return at<8>().valid(); }
+  uint64_t exact_offset() const { return at<8>().as_uint64(); }
+  bool has_start_offset() const { return at<3>().valid(); }
+  uint64_t start_offset() const { return at<3>().as_uint64(); }
+  bool has_start() const { return at<4>().valid(); }
+  uint64_t start() const { return at<4>().as_uint64(); }
+  bool has_end() const { return at<5>().valid(); }
+  uint64_t end() const { return at<5>().as_uint64(); }
+  bool has_load_bias() const { return at<6>().valid(); }
+  uint64_t load_bias() const { return at<6>().as_uint64(); }
+  bool has_path_string_ids() const { return at<7>().valid(); }
+  ::protozero::RepeatedFieldIterator<uint64_t> path_string_ids() const { return GetRepeated<uint64_t>(7); }
+};
+
+class Mapping : public ::protozero::Message {
+ public:
+  using Decoder = Mapping_Decoder;
+  enum : int32_t {
+    kIidFieldNumber = 1,
+    kBuildIdFieldNumber = 2,
+    kExactOffsetFieldNumber = 8,
+    kStartOffsetFieldNumber = 3,
+    kStartFieldNumber = 4,
+    kEndFieldNumber = 5,
+    kLoadBiasFieldNumber = 6,
+    kPathStringIdsFieldNumber = 7,
+  };
+
+  using FieldMetadata_Iid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Mapping>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Iid kIid() { return {}; }
+  void set_iid(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Iid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BuildId =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Mapping>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BuildId kBuildId() { return {}; }
+  void set_build_id(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_BuildId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ExactOffset =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Mapping>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ExactOffset kExactOffset() { return {}; }
+  void set_exact_offset(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ExactOffset::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_StartOffset =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Mapping>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_StartOffset kStartOffset() { return {}; }
+  void set_start_offset(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_StartOffset::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Start =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Mapping>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Start kStart() { return {}; }
+  void set_start(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Start::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_End =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Mapping>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_End kEnd() { return {}; }
+  void set_end(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_End::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_LoadBias =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Mapping>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_LoadBias kLoadBias() { return {}; }
+  void set_load_bias(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_LoadBias::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PathStringIds =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      Mapping>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PathStringIds kPathStringIds() { return {}; }
+  void add_path_string_ids(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PathStringIds::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class ModuleSymbols_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  ModuleSymbols_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ModuleSymbols_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ModuleSymbols_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_path() const { return at<1>().valid(); }
+  ::protozero::ConstChars path() const { return at<1>().as_string(); }
+  bool has_build_id() const { return at<2>().valid(); }
+  ::protozero::ConstChars build_id() const { return at<2>().as_string(); }
+  bool has_address_symbols() const { return at<3>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> address_symbols() const { return GetRepeated<::protozero::ConstBytes>(3); }
+};
+
+class ModuleSymbols : public ::protozero::Message {
+ public:
+  using Decoder = ModuleSymbols_Decoder;
+  enum : int32_t {
+    kPathFieldNumber = 1,
+    kBuildIdFieldNumber = 2,
+    kAddressSymbolsFieldNumber = 3,
+  };
+
+  using FieldMetadata_Path =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ModuleSymbols>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Path kPath() { return {}; }
+  void set_path(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Path::kFieldId, data, size);
+  }
+  void set_path(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Path::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BuildId =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ModuleSymbols>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BuildId kBuildId() { return {}; }
+  void set_build_id(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_BuildId::kFieldId, data, size);
+  }
+  void set_build_id(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_BuildId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_AddressSymbols =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      AddressSymbols,
+      ModuleSymbols>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AddressSymbols kAddressSymbols() { return {}; }
+  template <typename T = AddressSymbols> T* add_address_symbols() {
+    return BeginNestedMessage<T>(3);
+  }
+
+};
+
+class AddressSymbols_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  AddressSymbols_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit AddressSymbols_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit AddressSymbols_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_address() const { return at<1>().valid(); }
+  uint64_t address() const { return at<1>().as_uint64(); }
+  bool has_lines() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> lines() const { return GetRepeated<::protozero::ConstBytes>(2); }
+};
+
+class AddressSymbols : public ::protozero::Message {
+ public:
+  using Decoder = AddressSymbols_Decoder;
+  enum : int32_t {
+    kAddressFieldNumber = 1,
+    kLinesFieldNumber = 2,
+  };
+
+  using FieldMetadata_Address =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      AddressSymbols>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Address kAddress() { return {}; }
+  void set_address(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Address::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Lines =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Line,
+      AddressSymbols>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Lines kLines() { return {}; }
+  template <typename T = Line> T* add_lines() {
+    return BeginNestedMessage<T>(2);
+  }
+
+};
+
+class Line_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Line_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Line_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Line_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_function_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars function_name() const { return at<1>().as_string(); }
+  bool has_source_file_name() const { return at<2>().valid(); }
+  ::protozero::ConstChars source_file_name() const { return at<2>().as_string(); }
+  bool has_line_number() const { return at<3>().valid(); }
+  uint32_t line_number() const { return at<3>().as_uint32(); }
+};
+
+class Line : public ::protozero::Message {
+ public:
+  using Decoder = Line_Decoder;
+  enum : int32_t {
+    kFunctionNameFieldNumber = 1,
+    kSourceFileNameFieldNumber = 2,
+    kLineNumberFieldNumber = 3,
+  };
+
+  using FieldMetadata_FunctionName =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      Line>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FunctionName kFunctionName() { return {}; }
+  void set_function_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_FunctionName::kFieldId, data, size);
+  }
+  void set_function_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_FunctionName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SourceFileName =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      Line>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SourceFileName kSourceFileName() { return {}; }
+  void set_source_file_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_SourceFileName::kFieldId, data, size);
+  }
+  void set_source_file_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_SourceFileName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_LineNumber =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      Line>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_LineNumber kLineNumber() { return {}; }
+  void set_line_number(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_LineNumber::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class ProfiledFrameSymbols_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  ProfiledFrameSymbols_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ProfiledFrameSymbols_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ProfiledFrameSymbols_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_frame_iid() const { return at<1>().valid(); }
+  uint64_t frame_iid() const { return at<1>().as_uint64(); }
+  bool has_function_name_id() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<uint64_t> function_name_id() const { return GetRepeated<uint64_t>(2); }
+  bool has_file_name_id() const { return at<3>().valid(); }
+  ::protozero::RepeatedFieldIterator<uint64_t> file_name_id() const { return GetRepeated<uint64_t>(3); }
+  bool has_line_number() const { return at<4>().valid(); }
+  ::protozero::RepeatedFieldIterator<uint32_t> line_number() const { return GetRepeated<uint32_t>(4); }
+};
+
+class ProfiledFrameSymbols : public ::protozero::Message {
+ public:
+  using Decoder = ProfiledFrameSymbols_Decoder;
+  enum : int32_t {
+    kFrameIidFieldNumber = 1,
+    kFunctionNameIdFieldNumber = 2,
+    kFileNameIdFieldNumber = 3,
+    kLineNumberFieldNumber = 4,
+  };
+
+  using FieldMetadata_FrameIid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ProfiledFrameSymbols>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FrameIid kFrameIid() { return {}; }
+  void set_frame_iid(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_FrameIid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FunctionNameId =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ProfiledFrameSymbols>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FunctionNameId kFunctionNameId() { return {}; }
+  void add_function_name_id(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_FunctionNameId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FileNameId =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ProfiledFrameSymbols>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FileNameId kFileNameId() { return {}; }
+  void add_file_name_id(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_FileNameId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_LineNumber =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      ProfiledFrameSymbols>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_LineNumber kLineNumber() { return {}; }
+  void add_line_number(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_LineNumber::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class InternedString_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  InternedString_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit InternedString_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit InternedString_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_iid() const { return at<1>().valid(); }
+  uint64_t iid() const { return at<1>().as_uint64(); }
+  bool has_str() const { return at<2>().valid(); }
+  ::protozero::ConstBytes str() const { return at<2>().as_bytes(); }
+};
+
+class InternedString : public ::protozero::Message {
+ public:
+  using Decoder = InternedString_Decoder;
+  enum : int32_t {
+    kIidFieldNumber = 1,
+    kStrFieldNumber = 2,
+  };
+
+  using FieldMetadata_Iid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      InternedString>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Iid kIid() { return {}; }
+  void set_iid(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Iid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Str =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBytes,
+      std::string,
+      InternedString>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Str kStr() { return {}; }
+  void set_str(const uint8_t* data, size_t size) {
+    AppendBytes(FieldMetadata_Str::kFieldId, data, size);
+  }
+  void set_str(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Str::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBytes>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/profiling/profile_packet.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_PROFILING_PROFILE_PACKET_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_PROFILING_PROFILE_PACKET_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class Callstack;
+class Frame;
+class InternedString;
+class Mapping;
+class PerfEvents_Timebase;
+class PerfSample_ProducerEvent;
+class ProfilePacket_HeapSample;
+class ProfilePacket_Histogram;
+class ProfilePacket_Histogram_Bucket;
+class ProfilePacket_ProcessHeapSamples;
+class ProfilePacket_ProcessStats;
+enum PerfSample_ProducerEvent_DataSourceStopReason : int32_t;
+enum PerfSample_SampleSkipReason : int32_t;
+enum ProfilePacket_ProcessHeapSamples_ClientError : int32_t;
+enum Profiling_CpuMode : int32_t;
+enum Profiling_StackUnwindError : int32_t;
+
+enum PerfSample_SampleSkipReason : int32_t {
+  PerfSample_SampleSkipReason_PROFILER_SKIP_UNKNOWN = 0,
+  PerfSample_SampleSkipReason_PROFILER_SKIP_READ_STAGE = 1,
+  PerfSample_SampleSkipReason_PROFILER_SKIP_UNWIND_STAGE = 2,
+  PerfSample_SampleSkipReason_PROFILER_SKIP_UNWIND_ENQUEUE = 3,
+};
+
+const PerfSample_SampleSkipReason PerfSample_SampleSkipReason_MIN = PerfSample_SampleSkipReason_PROFILER_SKIP_UNKNOWN;
+const PerfSample_SampleSkipReason PerfSample_SampleSkipReason_MAX = PerfSample_SampleSkipReason_PROFILER_SKIP_UNWIND_ENQUEUE;
+
+enum PerfSample_ProducerEvent_DataSourceStopReason : int32_t {
+  PerfSample_ProducerEvent_DataSourceStopReason_PROFILER_STOP_UNKNOWN = 0,
+  PerfSample_ProducerEvent_DataSourceStopReason_PROFILER_STOP_GUARDRAIL = 1,
+};
+
+const PerfSample_ProducerEvent_DataSourceStopReason PerfSample_ProducerEvent_DataSourceStopReason_MIN = PerfSample_ProducerEvent_DataSourceStopReason_PROFILER_STOP_UNKNOWN;
+const PerfSample_ProducerEvent_DataSourceStopReason PerfSample_ProducerEvent_DataSourceStopReason_MAX = PerfSample_ProducerEvent_DataSourceStopReason_PROFILER_STOP_GUARDRAIL;
+
+enum Profiling_CpuMode : int32_t {
+  Profiling_CpuMode_MODE_UNKNOWN = 0,
+  Profiling_CpuMode_MODE_KERNEL = 1,
+  Profiling_CpuMode_MODE_USER = 2,
+  Profiling_CpuMode_MODE_HYPERVISOR = 3,
+  Profiling_CpuMode_MODE_GUEST_KERNEL = 4,
+  Profiling_CpuMode_MODE_GUEST_USER = 5,
+};
+
+const Profiling_CpuMode Profiling_CpuMode_MIN = Profiling_CpuMode_MODE_UNKNOWN;
+const Profiling_CpuMode Profiling_CpuMode_MAX = Profiling_CpuMode_MODE_GUEST_USER;
+
+enum Profiling_StackUnwindError : int32_t {
+  Profiling_StackUnwindError_UNWIND_ERROR_UNKNOWN = 0,
+  Profiling_StackUnwindError_UNWIND_ERROR_NONE = 1,
+  Profiling_StackUnwindError_UNWIND_ERROR_MEMORY_INVALID = 2,
+  Profiling_StackUnwindError_UNWIND_ERROR_UNWIND_INFO = 3,
+  Profiling_StackUnwindError_UNWIND_ERROR_UNSUPPORTED = 4,
+  Profiling_StackUnwindError_UNWIND_ERROR_INVALID_MAP = 5,
+  Profiling_StackUnwindError_UNWIND_ERROR_MAX_FRAMES_EXCEEDED = 6,
+  Profiling_StackUnwindError_UNWIND_ERROR_REPEATED_FRAME = 7,
+  Profiling_StackUnwindError_UNWIND_ERROR_INVALID_ELF = 8,
+  Profiling_StackUnwindError_UNWIND_ERROR_SYSTEM_CALL = 9,
+  Profiling_StackUnwindError_UNWIND_ERROR_THREAD_TIMEOUT = 10,
+  Profiling_StackUnwindError_UNWIND_ERROR_THREAD_DOES_NOT_EXIST = 11,
+};
+
+const Profiling_StackUnwindError Profiling_StackUnwindError_MIN = Profiling_StackUnwindError_UNWIND_ERROR_UNKNOWN;
+const Profiling_StackUnwindError Profiling_StackUnwindError_MAX = Profiling_StackUnwindError_UNWIND_ERROR_THREAD_DOES_NOT_EXIST;
+
+enum ProfilePacket_ProcessHeapSamples_ClientError : int32_t {
+  ProfilePacket_ProcessHeapSamples_ClientError_CLIENT_ERROR_NONE = 0,
+  ProfilePacket_ProcessHeapSamples_ClientError_CLIENT_ERROR_HIT_TIMEOUT = 1,
+  ProfilePacket_ProcessHeapSamples_ClientError_CLIENT_ERROR_INVALID_STACK_BOUNDS = 2,
+};
+
+const ProfilePacket_ProcessHeapSamples_ClientError ProfilePacket_ProcessHeapSamples_ClientError_MIN = ProfilePacket_ProcessHeapSamples_ClientError_CLIENT_ERROR_NONE;
+const ProfilePacket_ProcessHeapSamples_ClientError ProfilePacket_ProcessHeapSamples_ClientError_MAX = ProfilePacket_ProcessHeapSamples_ClientError_CLIENT_ERROR_INVALID_STACK_BOUNDS;
+
+class PerfSampleDefaults_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  PerfSampleDefaults_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit PerfSampleDefaults_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit PerfSampleDefaults_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_timebase() const { return at<1>().valid(); }
+  ::protozero::ConstBytes timebase() const { return at<1>().as_bytes(); }
+};
+
+class PerfSampleDefaults : public ::protozero::Message {
+ public:
+  using Decoder = PerfSampleDefaults_Decoder;
+  enum : int32_t {
+    kTimebaseFieldNumber = 1,
+  };
+
+  using FieldMetadata_Timebase =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      PerfEvents_Timebase,
+      PerfSampleDefaults>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Timebase kTimebase() { return {}; }
+  template <typename T = PerfEvents_Timebase> T* set_timebase() {
+    return BeginNestedMessage<T>(1);
+  }
+
+};
+
+class PerfSample_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/19, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  PerfSample_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit PerfSample_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit PerfSample_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_cpu() const { return at<1>().valid(); }
+  uint32_t cpu() const { return at<1>().as_uint32(); }
+  bool has_pid() const { return at<2>().valid(); }
+  uint32_t pid() const { return at<2>().as_uint32(); }
+  bool has_tid() const { return at<3>().valid(); }
+  uint32_t tid() const { return at<3>().as_uint32(); }
+  bool has_cpu_mode() const { return at<5>().valid(); }
+  int32_t cpu_mode() const { return at<5>().as_int32(); }
+  bool has_timebase_count() const { return at<6>().valid(); }
+  uint64_t timebase_count() const { return at<6>().as_uint64(); }
+  bool has_callstack_iid() const { return at<4>().valid(); }
+  uint64_t callstack_iid() const { return at<4>().as_uint64(); }
+  bool has_unwind_error() const { return at<16>().valid(); }
+  int32_t unwind_error() const { return at<16>().as_int32(); }
+  bool has_kernel_records_lost() const { return at<17>().valid(); }
+  uint64_t kernel_records_lost() const { return at<17>().as_uint64(); }
+  bool has_sample_skipped_reason() const { return at<18>().valid(); }
+  int32_t sample_skipped_reason() const { return at<18>().as_int32(); }
+  bool has_producer_event() const { return at<19>().valid(); }
+  ::protozero::ConstBytes producer_event() const { return at<19>().as_bytes(); }
+};
+
+class PerfSample : public ::protozero::Message {
+ public:
+  using Decoder = PerfSample_Decoder;
+  enum : int32_t {
+    kCpuFieldNumber = 1,
+    kPidFieldNumber = 2,
+    kTidFieldNumber = 3,
+    kCpuModeFieldNumber = 5,
+    kTimebaseCountFieldNumber = 6,
+    kCallstackIidFieldNumber = 4,
+    kUnwindErrorFieldNumber = 16,
+    kKernelRecordsLostFieldNumber = 17,
+    kSampleSkippedReasonFieldNumber = 18,
+    kProducerEventFieldNumber = 19,
+  };
+  using ProducerEvent = ::perfetto::protos::pbzero::PerfSample_ProducerEvent;
+  using SampleSkipReason = ::perfetto::protos::pbzero::PerfSample_SampleSkipReason;
+  static const SampleSkipReason PROFILER_SKIP_UNKNOWN = PerfSample_SampleSkipReason_PROFILER_SKIP_UNKNOWN;
+  static const SampleSkipReason PROFILER_SKIP_READ_STAGE = PerfSample_SampleSkipReason_PROFILER_SKIP_READ_STAGE;
+  static const SampleSkipReason PROFILER_SKIP_UNWIND_STAGE = PerfSample_SampleSkipReason_PROFILER_SKIP_UNWIND_STAGE;
+  static const SampleSkipReason PROFILER_SKIP_UNWIND_ENQUEUE = PerfSample_SampleSkipReason_PROFILER_SKIP_UNWIND_ENQUEUE;
+
+  using FieldMetadata_Cpu =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      PerfSample>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Cpu kCpu() { return {}; }
+  void set_cpu(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Cpu::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Pid =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      PerfSample>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pid kPid() { return {}; }
+  void set_pid(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Tid =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      PerfSample>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Tid kTid() { return {}; }
+  void set_tid(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Tid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CpuMode =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::Profiling_CpuMode,
+      PerfSample>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CpuMode kCpuMode() { return {}; }
+  void set_cpu_mode(::perfetto::protos::pbzero::Profiling_CpuMode value) {
+    static constexpr uint32_t field_id = FieldMetadata_CpuMode::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TimebaseCount =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      PerfSample>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TimebaseCount kTimebaseCount() { return {}; }
+  void set_timebase_count(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TimebaseCount::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CallstackIid =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      PerfSample>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CallstackIid kCallstackIid() { return {}; }
+  void set_callstack_iid(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CallstackIid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_UnwindError =
+    ::protozero::proto_utils::FieldMetadata<
+      16,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::Profiling_StackUnwindError,
+      PerfSample>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_UnwindError kUnwindError() { return {}; }
+  void set_unwind_error(::perfetto::protos::pbzero::Profiling_StackUnwindError value) {
+    static constexpr uint32_t field_id = FieldMetadata_UnwindError::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_KernelRecordsLost =
+    ::protozero::proto_utils::FieldMetadata<
+      17,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      PerfSample>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_KernelRecordsLost kKernelRecordsLost() { return {}; }
+  void set_kernel_records_lost(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_KernelRecordsLost::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SampleSkippedReason =
+    ::protozero::proto_utils::FieldMetadata<
+      18,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::PerfSample_SampleSkipReason,
+      PerfSample>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SampleSkippedReason kSampleSkippedReason() { return {}; }
+  void set_sample_skipped_reason(::perfetto::protos::pbzero::PerfSample_SampleSkipReason value) {
+    static constexpr uint32_t field_id = FieldMetadata_SampleSkippedReason::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ProducerEvent =
+    ::protozero::proto_utils::FieldMetadata<
+      19,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      PerfSample_ProducerEvent,
+      PerfSample>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ProducerEvent kProducerEvent() { return {}; }
+  template <typename T = PerfSample_ProducerEvent> T* set_producer_event() {
+    return BeginNestedMessage<T>(19);
+  }
+
+};
+
+class PerfSample_ProducerEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  PerfSample_ProducerEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit PerfSample_ProducerEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit PerfSample_ProducerEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_source_stop_reason() const { return at<1>().valid(); }
+  int32_t source_stop_reason() const { return at<1>().as_int32(); }
+};
+
+class PerfSample_ProducerEvent : public ::protozero::Message {
+ public:
+  using Decoder = PerfSample_ProducerEvent_Decoder;
+  enum : int32_t {
+    kSourceStopReasonFieldNumber = 1,
+  };
+  using DataSourceStopReason = ::perfetto::protos::pbzero::PerfSample_ProducerEvent_DataSourceStopReason;
+  static const DataSourceStopReason PROFILER_STOP_UNKNOWN = PerfSample_ProducerEvent_DataSourceStopReason_PROFILER_STOP_UNKNOWN;
+  static const DataSourceStopReason PROFILER_STOP_GUARDRAIL = PerfSample_ProducerEvent_DataSourceStopReason_PROFILER_STOP_GUARDRAIL;
+
+  using FieldMetadata_SourceStopReason =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::PerfSample_ProducerEvent_DataSourceStopReason,
+      PerfSample_ProducerEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SourceStopReason kSourceStopReason() { return {}; }
+  void set_source_stop_reason(::perfetto::protos::pbzero::PerfSample_ProducerEvent_DataSourceStopReason value) {
+    static constexpr uint32_t field_id = FieldMetadata_SourceStopReason::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class Profiling_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/0, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  Profiling_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Profiling_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Profiling_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+};
+
+class Profiling : public ::protozero::Message {
+ public:
+  using Decoder = Profiling_Decoder;
+  using CpuMode = ::perfetto::protos::pbzero::Profiling_CpuMode;
+  using StackUnwindError = ::perfetto::protos::pbzero::Profiling_StackUnwindError;
+  static const CpuMode MODE_UNKNOWN = Profiling_CpuMode_MODE_UNKNOWN;
+  static const CpuMode MODE_KERNEL = Profiling_CpuMode_MODE_KERNEL;
+  static const CpuMode MODE_USER = Profiling_CpuMode_MODE_USER;
+  static const CpuMode MODE_HYPERVISOR = Profiling_CpuMode_MODE_HYPERVISOR;
+  static const CpuMode MODE_GUEST_KERNEL = Profiling_CpuMode_MODE_GUEST_KERNEL;
+  static const CpuMode MODE_GUEST_USER = Profiling_CpuMode_MODE_GUEST_USER;
+  static const StackUnwindError UNWIND_ERROR_UNKNOWN = Profiling_StackUnwindError_UNWIND_ERROR_UNKNOWN;
+  static const StackUnwindError UNWIND_ERROR_NONE = Profiling_StackUnwindError_UNWIND_ERROR_NONE;
+  static const StackUnwindError UNWIND_ERROR_MEMORY_INVALID = Profiling_StackUnwindError_UNWIND_ERROR_MEMORY_INVALID;
+  static const StackUnwindError UNWIND_ERROR_UNWIND_INFO = Profiling_StackUnwindError_UNWIND_ERROR_UNWIND_INFO;
+  static const StackUnwindError UNWIND_ERROR_UNSUPPORTED = Profiling_StackUnwindError_UNWIND_ERROR_UNSUPPORTED;
+  static const StackUnwindError UNWIND_ERROR_INVALID_MAP = Profiling_StackUnwindError_UNWIND_ERROR_INVALID_MAP;
+  static const StackUnwindError UNWIND_ERROR_MAX_FRAMES_EXCEEDED = Profiling_StackUnwindError_UNWIND_ERROR_MAX_FRAMES_EXCEEDED;
+  static const StackUnwindError UNWIND_ERROR_REPEATED_FRAME = Profiling_StackUnwindError_UNWIND_ERROR_REPEATED_FRAME;
+  static const StackUnwindError UNWIND_ERROR_INVALID_ELF = Profiling_StackUnwindError_UNWIND_ERROR_INVALID_ELF;
+  static const StackUnwindError UNWIND_ERROR_SYSTEM_CALL = Profiling_StackUnwindError_UNWIND_ERROR_SYSTEM_CALL;
+  static const StackUnwindError UNWIND_ERROR_THREAD_TIMEOUT = Profiling_StackUnwindError_UNWIND_ERROR_THREAD_TIMEOUT;
+  static const StackUnwindError UNWIND_ERROR_THREAD_DOES_NOT_EXIST = Profiling_StackUnwindError_UNWIND_ERROR_THREAD_DOES_NOT_EXIST;
+};
+
+class StreamingProfilePacket_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  StreamingProfilePacket_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit StreamingProfilePacket_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit StreamingProfilePacket_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_callstack_iid() const { return at<1>().valid(); }
+  ::protozero::RepeatedFieldIterator<uint64_t> callstack_iid() const { return GetRepeated<uint64_t>(1); }
+  bool has_timestamp_delta_us() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<int64_t> timestamp_delta_us() const { return GetRepeated<int64_t>(2); }
+  bool has_process_priority() const { return at<3>().valid(); }
+  int32_t process_priority() const { return at<3>().as_int32(); }
+};
+
+class StreamingProfilePacket : public ::protozero::Message {
+ public:
+  using Decoder = StreamingProfilePacket_Decoder;
+  enum : int32_t {
+    kCallstackIidFieldNumber = 1,
+    kTimestampDeltaUsFieldNumber = 2,
+    kProcessPriorityFieldNumber = 3,
+  };
+
+  using FieldMetadata_CallstackIid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      StreamingProfilePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CallstackIid kCallstackIid() { return {}; }
+  void add_callstack_iid(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CallstackIid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TimestampDeltaUs =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      StreamingProfilePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TimestampDeltaUs kTimestampDeltaUs() { return {}; }
+  void add_timestamp_delta_us(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TimestampDeltaUs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ProcessPriority =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      StreamingProfilePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ProcessPriority kProcessPriority() { return {}; }
+  void set_process_priority(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ProcessPriority::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class StreamingFree_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  StreamingFree_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit StreamingFree_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit StreamingFree_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_address() const { return at<1>().valid(); }
+  ::protozero::RepeatedFieldIterator<uint64_t> address() const { return GetRepeated<uint64_t>(1); }
+  bool has_heap_id() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<uint32_t> heap_id() const { return GetRepeated<uint32_t>(2); }
+  bool has_sequence_number() const { return at<3>().valid(); }
+  ::protozero::RepeatedFieldIterator<uint64_t> sequence_number() const { return GetRepeated<uint64_t>(3); }
+};
+
+class StreamingFree : public ::protozero::Message {
+ public:
+  using Decoder = StreamingFree_Decoder;
+  enum : int32_t {
+    kAddressFieldNumber = 1,
+    kHeapIdFieldNumber = 2,
+    kSequenceNumberFieldNumber = 3,
+  };
+
+  using FieldMetadata_Address =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      StreamingFree>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Address kAddress() { return {}; }
+  void add_address(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Address::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_HeapId =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      StreamingFree>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_HeapId kHeapId() { return {}; }
+  void add_heap_id(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_HeapId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SequenceNumber =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      StreamingFree>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SequenceNumber kSequenceNumber() { return {}; }
+  void add_sequence_number(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SequenceNumber::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class StreamingAllocation_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/6, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  StreamingAllocation_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit StreamingAllocation_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit StreamingAllocation_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_address() const { return at<1>().valid(); }
+  ::protozero::RepeatedFieldIterator<uint64_t> address() const { return GetRepeated<uint64_t>(1); }
+  bool has_size() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<uint64_t> size() const { return GetRepeated<uint64_t>(2); }
+  bool has_sample_size() const { return at<3>().valid(); }
+  ::protozero::RepeatedFieldIterator<uint64_t> sample_size() const { return GetRepeated<uint64_t>(3); }
+  bool has_clock_monotonic_coarse_timestamp() const { return at<4>().valid(); }
+  ::protozero::RepeatedFieldIterator<uint64_t> clock_monotonic_coarse_timestamp() const { return GetRepeated<uint64_t>(4); }
+  bool has_heap_id() const { return at<5>().valid(); }
+  ::protozero::RepeatedFieldIterator<uint32_t> heap_id() const { return GetRepeated<uint32_t>(5); }
+  bool has_sequence_number() const { return at<6>().valid(); }
+  ::protozero::RepeatedFieldIterator<uint64_t> sequence_number() const { return GetRepeated<uint64_t>(6); }
+};
+
+class StreamingAllocation : public ::protozero::Message {
+ public:
+  using Decoder = StreamingAllocation_Decoder;
+  enum : int32_t {
+    kAddressFieldNumber = 1,
+    kSizeFieldNumber = 2,
+    kSampleSizeFieldNumber = 3,
+    kClockMonotonicCoarseTimestampFieldNumber = 4,
+    kHeapIdFieldNumber = 5,
+    kSequenceNumberFieldNumber = 6,
+  };
+
+  using FieldMetadata_Address =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      StreamingAllocation>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Address kAddress() { return {}; }
+  void add_address(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Address::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Size =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      StreamingAllocation>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Size kSize() { return {}; }
+  void add_size(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Size::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SampleSize =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      StreamingAllocation>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SampleSize kSampleSize() { return {}; }
+  void add_sample_size(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SampleSize::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ClockMonotonicCoarseTimestamp =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      StreamingAllocation>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ClockMonotonicCoarseTimestamp kClockMonotonicCoarseTimestamp() { return {}; }
+  void add_clock_monotonic_coarse_timestamp(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ClockMonotonicCoarseTimestamp::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_HeapId =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      StreamingAllocation>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_HeapId kHeapId() { return {}; }
+  void add_heap_id(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_HeapId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SequenceNumber =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      StreamingAllocation>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SequenceNumber kSequenceNumber() { return {}; }
+  void add_sequence_number(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SequenceNumber::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class ProfilePacket_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/7, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  ProfilePacket_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ProfilePacket_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ProfilePacket_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_strings() const { return at<1>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> strings() const { return GetRepeated<::protozero::ConstBytes>(1); }
+  bool has_mappings() const { return at<4>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> mappings() const { return GetRepeated<::protozero::ConstBytes>(4); }
+  bool has_frames() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> frames() const { return GetRepeated<::protozero::ConstBytes>(2); }
+  bool has_callstacks() const { return at<3>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> callstacks() const { return GetRepeated<::protozero::ConstBytes>(3); }
+  bool has_process_dumps() const { return at<5>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> process_dumps() const { return GetRepeated<::protozero::ConstBytes>(5); }
+  bool has_continued() const { return at<6>().valid(); }
+  bool continued() const { return at<6>().as_bool(); }
+  bool has_index() const { return at<7>().valid(); }
+  uint64_t index() const { return at<7>().as_uint64(); }
+};
+
+class ProfilePacket : public ::protozero::Message {
+ public:
+  using Decoder = ProfilePacket_Decoder;
+  enum : int32_t {
+    kStringsFieldNumber = 1,
+    kMappingsFieldNumber = 4,
+    kFramesFieldNumber = 2,
+    kCallstacksFieldNumber = 3,
+    kProcessDumpsFieldNumber = 5,
+    kContinuedFieldNumber = 6,
+    kIndexFieldNumber = 7,
+  };
+  using HeapSample = ::perfetto::protos::pbzero::ProfilePacket_HeapSample;
+  using Histogram = ::perfetto::protos::pbzero::ProfilePacket_Histogram;
+  using ProcessStats = ::perfetto::protos::pbzero::ProfilePacket_ProcessStats;
+  using ProcessHeapSamples = ::perfetto::protos::pbzero::ProfilePacket_ProcessHeapSamples;
+
+  using FieldMetadata_Strings =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      InternedString,
+      ProfilePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Strings kStrings() { return {}; }
+  template <typename T = InternedString> T* add_strings() {
+    return BeginNestedMessage<T>(1);
+  }
+
+
+  using FieldMetadata_Mappings =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Mapping,
+      ProfilePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Mappings kMappings() { return {}; }
+  template <typename T = Mapping> T* add_mappings() {
+    return BeginNestedMessage<T>(4);
+  }
+
+
+  using FieldMetadata_Frames =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Frame,
+      ProfilePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Frames kFrames() { return {}; }
+  template <typename T = Frame> T* add_frames() {
+    return BeginNestedMessage<T>(2);
+  }
+
+
+  using FieldMetadata_Callstacks =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Callstack,
+      ProfilePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Callstacks kCallstacks() { return {}; }
+  template <typename T = Callstack> T* add_callstacks() {
+    return BeginNestedMessage<T>(3);
+  }
+
+
+  using FieldMetadata_ProcessDumps =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ProfilePacket_ProcessHeapSamples,
+      ProfilePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ProcessDumps kProcessDumps() { return {}; }
+  template <typename T = ProfilePacket_ProcessHeapSamples> T* add_process_dumps() {
+    return BeginNestedMessage<T>(5);
+  }
+
+
+  using FieldMetadata_Continued =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ProfilePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Continued kContinued() { return {}; }
+  void set_continued(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_Continued::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Index =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ProfilePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Index kIndex() { return {}; }
+  void set_index(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Index::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class ProfilePacket_ProcessHeapSamples_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/14, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  ProfilePacket_ProcessHeapSamples_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ProfilePacket_ProcessHeapSamples_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ProfilePacket_ProcessHeapSamples_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_pid() const { return at<1>().valid(); }
+  uint64_t pid() const { return at<1>().as_uint64(); }
+  bool has_from_startup() const { return at<3>().valid(); }
+  bool from_startup() const { return at<3>().as_bool(); }
+  bool has_rejected_concurrent() const { return at<4>().valid(); }
+  bool rejected_concurrent() const { return at<4>().as_bool(); }
+  bool has_disconnected() const { return at<6>().valid(); }
+  bool disconnected() const { return at<6>().as_bool(); }
+  bool has_buffer_overran() const { return at<7>().valid(); }
+  bool buffer_overran() const { return at<7>().as_bool(); }
+  bool has_client_error() const { return at<14>().valid(); }
+  int32_t client_error() const { return at<14>().as_int32(); }
+  bool has_buffer_corrupted() const { return at<8>().valid(); }
+  bool buffer_corrupted() const { return at<8>().as_bool(); }
+  bool has_hit_guardrail() const { return at<10>().valid(); }
+  bool hit_guardrail() const { return at<10>().as_bool(); }
+  bool has_heap_name() const { return at<11>().valid(); }
+  ::protozero::ConstChars heap_name() const { return at<11>().as_string(); }
+  bool has_sampling_interval_bytes() const { return at<12>().valid(); }
+  uint64_t sampling_interval_bytes() const { return at<12>().as_uint64(); }
+  bool has_orig_sampling_interval_bytes() const { return at<13>().valid(); }
+  uint64_t orig_sampling_interval_bytes() const { return at<13>().as_uint64(); }
+  bool has_timestamp() const { return at<9>().valid(); }
+  uint64_t timestamp() const { return at<9>().as_uint64(); }
+  bool has_stats() const { return at<5>().valid(); }
+  ::protozero::ConstBytes stats() const { return at<5>().as_bytes(); }
+  bool has_samples() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> samples() const { return GetRepeated<::protozero::ConstBytes>(2); }
+};
+
+class ProfilePacket_ProcessHeapSamples : public ::protozero::Message {
+ public:
+  using Decoder = ProfilePacket_ProcessHeapSamples_Decoder;
+  enum : int32_t {
+    kPidFieldNumber = 1,
+    kFromStartupFieldNumber = 3,
+    kRejectedConcurrentFieldNumber = 4,
+    kDisconnectedFieldNumber = 6,
+    kBufferOverranFieldNumber = 7,
+    kClientErrorFieldNumber = 14,
+    kBufferCorruptedFieldNumber = 8,
+    kHitGuardrailFieldNumber = 10,
+    kHeapNameFieldNumber = 11,
+    kSamplingIntervalBytesFieldNumber = 12,
+    kOrigSamplingIntervalBytesFieldNumber = 13,
+    kTimestampFieldNumber = 9,
+    kStatsFieldNumber = 5,
+    kSamplesFieldNumber = 2,
+  };
+  using ClientError = ::perfetto::protos::pbzero::ProfilePacket_ProcessHeapSamples_ClientError;
+  static const ClientError CLIENT_ERROR_NONE = ProfilePacket_ProcessHeapSamples_ClientError_CLIENT_ERROR_NONE;
+  static const ClientError CLIENT_ERROR_HIT_TIMEOUT = ProfilePacket_ProcessHeapSamples_ClientError_CLIENT_ERROR_HIT_TIMEOUT;
+  static const ClientError CLIENT_ERROR_INVALID_STACK_BOUNDS = ProfilePacket_ProcessHeapSamples_ClientError_CLIENT_ERROR_INVALID_STACK_BOUNDS;
+
+  using FieldMetadata_Pid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ProfilePacket_ProcessHeapSamples>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pid kPid() { return {}; }
+  void set_pid(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FromStartup =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ProfilePacket_ProcessHeapSamples>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FromStartup kFromStartup() { return {}; }
+  void set_from_startup(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_FromStartup::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_RejectedConcurrent =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ProfilePacket_ProcessHeapSamples>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_RejectedConcurrent kRejectedConcurrent() { return {}; }
+  void set_rejected_concurrent(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_RejectedConcurrent::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Disconnected =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ProfilePacket_ProcessHeapSamples>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Disconnected kDisconnected() { return {}; }
+  void set_disconnected(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_Disconnected::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BufferOverran =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ProfilePacket_ProcessHeapSamples>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BufferOverran kBufferOverran() { return {}; }
+  void set_buffer_overran(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_BufferOverran::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ClientError =
+    ::protozero::proto_utils::FieldMetadata<
+      14,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::ProfilePacket_ProcessHeapSamples_ClientError,
+      ProfilePacket_ProcessHeapSamples>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ClientError kClientError() { return {}; }
+  void set_client_error(::perfetto::protos::pbzero::ProfilePacket_ProcessHeapSamples_ClientError value) {
+    static constexpr uint32_t field_id = FieldMetadata_ClientError::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BufferCorrupted =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ProfilePacket_ProcessHeapSamples>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BufferCorrupted kBufferCorrupted() { return {}; }
+  void set_buffer_corrupted(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_BufferCorrupted::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_HitGuardrail =
+    ::protozero::proto_utils::FieldMetadata<
+      10,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ProfilePacket_ProcessHeapSamples>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_HitGuardrail kHitGuardrail() { return {}; }
+  void set_hit_guardrail(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_HitGuardrail::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_HeapName =
+    ::protozero::proto_utils::FieldMetadata<
+      11,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ProfilePacket_ProcessHeapSamples>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_HeapName kHeapName() { return {}; }
+  void set_heap_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_HeapName::kFieldId, data, size);
+  }
+  void set_heap_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_HeapName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SamplingIntervalBytes =
+    ::protozero::proto_utils::FieldMetadata<
+      12,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ProfilePacket_ProcessHeapSamples>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SamplingIntervalBytes kSamplingIntervalBytes() { return {}; }
+  void set_sampling_interval_bytes(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SamplingIntervalBytes::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_OrigSamplingIntervalBytes =
+    ::protozero::proto_utils::FieldMetadata<
+      13,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ProfilePacket_ProcessHeapSamples>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_OrigSamplingIntervalBytes kOrigSamplingIntervalBytes() { return {}; }
+  void set_orig_sampling_interval_bytes(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_OrigSamplingIntervalBytes::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Timestamp =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ProfilePacket_ProcessHeapSamples>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Timestamp kTimestamp() { return {}; }
+  void set_timestamp(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Timestamp::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Stats =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ProfilePacket_ProcessStats,
+      ProfilePacket_ProcessHeapSamples>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Stats kStats() { return {}; }
+  template <typename T = ProfilePacket_ProcessStats> T* set_stats() {
+    return BeginNestedMessage<T>(5);
+  }
+
+
+  using FieldMetadata_Samples =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ProfilePacket_HeapSample,
+      ProfilePacket_ProcessHeapSamples>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Samples kSamples() { return {}; }
+  template <typename T = ProfilePacket_HeapSample> T* add_samples() {
+    return BeginNestedMessage<T>(2);
+  }
+
+};
+
+class ProfilePacket_ProcessStats_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/6, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  ProfilePacket_ProcessStats_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ProfilePacket_ProcessStats_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ProfilePacket_ProcessStats_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_unwinding_errors() const { return at<1>().valid(); }
+  uint64_t unwinding_errors() const { return at<1>().as_uint64(); }
+  bool has_heap_samples() const { return at<2>().valid(); }
+  uint64_t heap_samples() const { return at<2>().as_uint64(); }
+  bool has_map_reparses() const { return at<3>().valid(); }
+  uint64_t map_reparses() const { return at<3>().as_uint64(); }
+  bool has_unwinding_time_us() const { return at<4>().valid(); }
+  ::protozero::ConstBytes unwinding_time_us() const { return at<4>().as_bytes(); }
+  bool has_total_unwinding_time_us() const { return at<5>().valid(); }
+  uint64_t total_unwinding_time_us() const { return at<5>().as_uint64(); }
+  bool has_client_spinlock_blocked_us() const { return at<6>().valid(); }
+  uint64_t client_spinlock_blocked_us() const { return at<6>().as_uint64(); }
+};
+
+class ProfilePacket_ProcessStats : public ::protozero::Message {
+ public:
+  using Decoder = ProfilePacket_ProcessStats_Decoder;
+  enum : int32_t {
+    kUnwindingErrorsFieldNumber = 1,
+    kHeapSamplesFieldNumber = 2,
+    kMapReparsesFieldNumber = 3,
+    kUnwindingTimeUsFieldNumber = 4,
+    kTotalUnwindingTimeUsFieldNumber = 5,
+    kClientSpinlockBlockedUsFieldNumber = 6,
+  };
+
+  using FieldMetadata_UnwindingErrors =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ProfilePacket_ProcessStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_UnwindingErrors kUnwindingErrors() { return {}; }
+  void set_unwinding_errors(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_UnwindingErrors::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_HeapSamples =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ProfilePacket_ProcessStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_HeapSamples kHeapSamples() { return {}; }
+  void set_heap_samples(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_HeapSamples::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_MapReparses =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ProfilePacket_ProcessStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MapReparses kMapReparses() { return {}; }
+  void set_map_reparses(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_MapReparses::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_UnwindingTimeUs =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ProfilePacket_Histogram,
+      ProfilePacket_ProcessStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_UnwindingTimeUs kUnwindingTimeUs() { return {}; }
+  template <typename T = ProfilePacket_Histogram> T* set_unwinding_time_us() {
+    return BeginNestedMessage<T>(4);
+  }
+
+
+  using FieldMetadata_TotalUnwindingTimeUs =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ProfilePacket_ProcessStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TotalUnwindingTimeUs kTotalUnwindingTimeUs() { return {}; }
+  void set_total_unwinding_time_us(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TotalUnwindingTimeUs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ClientSpinlockBlockedUs =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ProfilePacket_ProcessStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ClientSpinlockBlockedUs kClientSpinlockBlockedUs() { return {}; }
+  void set_client_spinlock_blocked_us(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ClientSpinlockBlockedUs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class ProfilePacket_Histogram_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  ProfilePacket_Histogram_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ProfilePacket_Histogram_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ProfilePacket_Histogram_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_buckets() const { return at<1>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> buckets() const { return GetRepeated<::protozero::ConstBytes>(1); }
+};
+
+class ProfilePacket_Histogram : public ::protozero::Message {
+ public:
+  using Decoder = ProfilePacket_Histogram_Decoder;
+  enum : int32_t {
+    kBucketsFieldNumber = 1,
+  };
+  using Bucket = ::perfetto::protos::pbzero::ProfilePacket_Histogram_Bucket;
+
+  using FieldMetadata_Buckets =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ProfilePacket_Histogram_Bucket,
+      ProfilePacket_Histogram>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Buckets kBuckets() { return {}; }
+  template <typename T = ProfilePacket_Histogram_Bucket> T* add_buckets() {
+    return BeginNestedMessage<T>(1);
+  }
+
+};
+
+class ProfilePacket_Histogram_Bucket_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  ProfilePacket_Histogram_Bucket_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ProfilePacket_Histogram_Bucket_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ProfilePacket_Histogram_Bucket_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_upper_limit() const { return at<1>().valid(); }
+  uint64_t upper_limit() const { return at<1>().as_uint64(); }
+  bool has_max_bucket() const { return at<2>().valid(); }
+  bool max_bucket() const { return at<2>().as_bool(); }
+  bool has_count() const { return at<3>().valid(); }
+  uint64_t count() const { return at<3>().as_uint64(); }
+};
+
+class ProfilePacket_Histogram_Bucket : public ::protozero::Message {
+ public:
+  using Decoder = ProfilePacket_Histogram_Bucket_Decoder;
+  enum : int32_t {
+    kUpperLimitFieldNumber = 1,
+    kMaxBucketFieldNumber = 2,
+    kCountFieldNumber = 3,
+  };
+
+  using FieldMetadata_UpperLimit =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ProfilePacket_Histogram_Bucket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_UpperLimit kUpperLimit() { return {}; }
+  void set_upper_limit(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_UpperLimit::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_MaxBucket =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ProfilePacket_Histogram_Bucket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MaxBucket kMaxBucket() { return {}; }
+  void set_max_bucket(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_MaxBucket::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Count =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ProfilePacket_Histogram_Bucket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Count kCount() { return {}; }
+  void set_count(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Count::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class ProfilePacket_HeapSample_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/9, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  ProfilePacket_HeapSample_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ProfilePacket_HeapSample_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ProfilePacket_HeapSample_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_callstack_id() const { return at<1>().valid(); }
+  uint64_t callstack_id() const { return at<1>().as_uint64(); }
+  bool has_self_allocated() const { return at<2>().valid(); }
+  uint64_t self_allocated() const { return at<2>().as_uint64(); }
+  bool has_self_freed() const { return at<3>().valid(); }
+  uint64_t self_freed() const { return at<3>().as_uint64(); }
+  bool has_self_max() const { return at<8>().valid(); }
+  uint64_t self_max() const { return at<8>().as_uint64(); }
+  bool has_self_max_count() const { return at<9>().valid(); }
+  uint64_t self_max_count() const { return at<9>().as_uint64(); }
+  bool has_timestamp() const { return at<4>().valid(); }
+  uint64_t timestamp() const { return at<4>().as_uint64(); }
+  bool has_alloc_count() const { return at<5>().valid(); }
+  uint64_t alloc_count() const { return at<5>().as_uint64(); }
+  bool has_free_count() const { return at<6>().valid(); }
+  uint64_t free_count() const { return at<6>().as_uint64(); }
+};
+
+class ProfilePacket_HeapSample : public ::protozero::Message {
+ public:
+  using Decoder = ProfilePacket_HeapSample_Decoder;
+  enum : int32_t {
+    kCallstackIdFieldNumber = 1,
+    kSelfAllocatedFieldNumber = 2,
+    kSelfFreedFieldNumber = 3,
+    kSelfMaxFieldNumber = 8,
+    kSelfMaxCountFieldNumber = 9,
+    kTimestampFieldNumber = 4,
+    kAllocCountFieldNumber = 5,
+    kFreeCountFieldNumber = 6,
+  };
+
+  using FieldMetadata_CallstackId =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ProfilePacket_HeapSample>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CallstackId kCallstackId() { return {}; }
+  void set_callstack_id(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CallstackId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SelfAllocated =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ProfilePacket_HeapSample>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SelfAllocated kSelfAllocated() { return {}; }
+  void set_self_allocated(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SelfAllocated::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SelfFreed =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ProfilePacket_HeapSample>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SelfFreed kSelfFreed() { return {}; }
+  void set_self_freed(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SelfFreed::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SelfMax =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ProfilePacket_HeapSample>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SelfMax kSelfMax() { return {}; }
+  void set_self_max(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SelfMax::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SelfMaxCount =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ProfilePacket_HeapSample>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SelfMaxCount kSelfMaxCount() { return {}; }
+  void set_self_max_count(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SelfMaxCount::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Timestamp =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ProfilePacket_HeapSample>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Timestamp kTimestamp() { return {}; }
+  void set_timestamp(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Timestamp::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_AllocCount =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ProfilePacket_HeapSample>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AllocCount kAllocCount() { return {}; }
+  void set_alloc_count(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_AllocCount::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FreeCount =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ProfilePacket_HeapSample>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FreeCount kFreeCount() { return {}; }
+  void set_free_count(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_FreeCount::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/profiling/smaps.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_PROFILING_SMAPS_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_PROFILING_SMAPS_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class SmapsEntry;
+
+class SmapsPacket_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  SmapsPacket_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit SmapsPacket_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit SmapsPacket_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_pid() const { return at<1>().valid(); }
+  uint32_t pid() const { return at<1>().as_uint32(); }
+  bool has_entries() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> entries() const { return GetRepeated<::protozero::ConstBytes>(2); }
+};
+
+class SmapsPacket : public ::protozero::Message {
+ public:
+  using Decoder = SmapsPacket_Decoder;
+  enum : int32_t {
+    kPidFieldNumber = 1,
+    kEntriesFieldNumber = 2,
+  };
+
+  using FieldMetadata_Pid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      SmapsPacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pid kPid() { return {}; }
+  void set_pid(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Entries =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      SmapsEntry,
+      SmapsPacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Entries kEntries() { return {}; }
+  template <typename T = SmapsEntry> T* add_entries() {
+    return BeginNestedMessage<T>(2);
+  }
+
+};
+
+class SmapsEntry_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/15, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  SmapsEntry_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit SmapsEntry_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit SmapsEntry_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_path() const { return at<1>().valid(); }
+  ::protozero::ConstChars path() const { return at<1>().as_string(); }
+  bool has_size_kb() const { return at<2>().valid(); }
+  uint64_t size_kb() const { return at<2>().as_uint64(); }
+  bool has_private_dirty_kb() const { return at<3>().valid(); }
+  uint64_t private_dirty_kb() const { return at<3>().as_uint64(); }
+  bool has_swap_kb() const { return at<4>().valid(); }
+  uint64_t swap_kb() const { return at<4>().as_uint64(); }
+  bool has_file_name() const { return at<5>().valid(); }
+  ::protozero::ConstChars file_name() const { return at<5>().as_string(); }
+  bool has_start_address() const { return at<6>().valid(); }
+  uint64_t start_address() const { return at<6>().as_uint64(); }
+  bool has_module_timestamp() const { return at<7>().valid(); }
+  uint64_t module_timestamp() const { return at<7>().as_uint64(); }
+  bool has_module_debugid() const { return at<8>().valid(); }
+  ::protozero::ConstChars module_debugid() const { return at<8>().as_string(); }
+  bool has_module_debug_path() const { return at<9>().valid(); }
+  ::protozero::ConstChars module_debug_path() const { return at<9>().as_string(); }
+  bool has_protection_flags() const { return at<10>().valid(); }
+  uint32_t protection_flags() const { return at<10>().as_uint32(); }
+  bool has_private_clean_resident_kb() const { return at<11>().valid(); }
+  uint64_t private_clean_resident_kb() const { return at<11>().as_uint64(); }
+  bool has_shared_dirty_resident_kb() const { return at<12>().valid(); }
+  uint64_t shared_dirty_resident_kb() const { return at<12>().as_uint64(); }
+  bool has_shared_clean_resident_kb() const { return at<13>().valid(); }
+  uint64_t shared_clean_resident_kb() const { return at<13>().as_uint64(); }
+  bool has_locked_kb() const { return at<14>().valid(); }
+  uint64_t locked_kb() const { return at<14>().as_uint64(); }
+  bool has_proportional_resident_kb() const { return at<15>().valid(); }
+  uint64_t proportional_resident_kb() const { return at<15>().as_uint64(); }
+};
+
+class SmapsEntry : public ::protozero::Message {
+ public:
+  using Decoder = SmapsEntry_Decoder;
+  enum : int32_t {
+    kPathFieldNumber = 1,
+    kSizeKbFieldNumber = 2,
+    kPrivateDirtyKbFieldNumber = 3,
+    kSwapKbFieldNumber = 4,
+    kFileNameFieldNumber = 5,
+    kStartAddressFieldNumber = 6,
+    kModuleTimestampFieldNumber = 7,
+    kModuleDebugidFieldNumber = 8,
+    kModuleDebugPathFieldNumber = 9,
+    kProtectionFlagsFieldNumber = 10,
+    kPrivateCleanResidentKbFieldNumber = 11,
+    kSharedDirtyResidentKbFieldNumber = 12,
+    kSharedCleanResidentKbFieldNumber = 13,
+    kLockedKbFieldNumber = 14,
+    kProportionalResidentKbFieldNumber = 15,
+  };
+
+  using FieldMetadata_Path =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      SmapsEntry>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Path kPath() { return {}; }
+  void set_path(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Path::kFieldId, data, size);
+  }
+  void set_path(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Path::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SizeKb =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      SmapsEntry>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SizeKb kSizeKb() { return {}; }
+  void set_size_kb(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SizeKb::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PrivateDirtyKb =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      SmapsEntry>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PrivateDirtyKb kPrivateDirtyKb() { return {}; }
+  void set_private_dirty_kb(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PrivateDirtyKb::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SwapKb =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      SmapsEntry>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SwapKb kSwapKb() { return {}; }
+  void set_swap_kb(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SwapKb::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FileName =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      SmapsEntry>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FileName kFileName() { return {}; }
+  void set_file_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_FileName::kFieldId, data, size);
+  }
+  void set_file_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_FileName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_StartAddress =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      SmapsEntry>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_StartAddress kStartAddress() { return {}; }
+  void set_start_address(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_StartAddress::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ModuleTimestamp =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      SmapsEntry>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ModuleTimestamp kModuleTimestamp() { return {}; }
+  void set_module_timestamp(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ModuleTimestamp::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ModuleDebugid =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      SmapsEntry>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ModuleDebugid kModuleDebugid() { return {}; }
+  void set_module_debugid(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_ModuleDebugid::kFieldId, data, size);
+  }
+  void set_module_debugid(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_ModuleDebugid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ModuleDebugPath =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      SmapsEntry>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ModuleDebugPath kModuleDebugPath() { return {}; }
+  void set_module_debug_path(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_ModuleDebugPath::kFieldId, data, size);
+  }
+  void set_module_debug_path(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_ModuleDebugPath::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ProtectionFlags =
+    ::protozero::proto_utils::FieldMetadata<
+      10,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      SmapsEntry>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ProtectionFlags kProtectionFlags() { return {}; }
+  void set_protection_flags(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ProtectionFlags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PrivateCleanResidentKb =
+    ::protozero::proto_utils::FieldMetadata<
+      11,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      SmapsEntry>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PrivateCleanResidentKb kPrivateCleanResidentKb() { return {}; }
+  void set_private_clean_resident_kb(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PrivateCleanResidentKb::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SharedDirtyResidentKb =
+    ::protozero::proto_utils::FieldMetadata<
+      12,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      SmapsEntry>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SharedDirtyResidentKb kSharedDirtyResidentKb() { return {}; }
+  void set_shared_dirty_resident_kb(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SharedDirtyResidentKb::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SharedCleanResidentKb =
+    ::protozero::proto_utils::FieldMetadata<
+      13,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      SmapsEntry>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SharedCleanResidentKb kSharedCleanResidentKb() { return {}; }
+  void set_shared_clean_resident_kb(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SharedCleanResidentKb::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_LockedKb =
+    ::protozero::proto_utils::FieldMetadata<
+      14,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      SmapsEntry>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_LockedKb kLockedKb() { return {}; }
+  void set_locked_kb(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_LockedKb::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ProportionalResidentKb =
+    ::protozero::proto_utils::FieldMetadata<
+      15,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      SmapsEntry>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ProportionalResidentKb kProportionalResidentKb() { return {}; }
+  void set_proportional_resident_kb(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ProportionalResidentKb::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_application_state_info.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_APPLICATION_STATE_INFO_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_APPLICATION_STATE_INFO_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+enum ChromeApplicationStateInfo_ChromeApplicationState : int32_t;
+
+enum ChromeApplicationStateInfo_ChromeApplicationState : int32_t {
+  ChromeApplicationStateInfo_ChromeApplicationState_APPLICATION_STATE_UNKNOWN = 0,
+  ChromeApplicationStateInfo_ChromeApplicationState_APPLICATION_STATE_HAS_RUNNING_ACTIVITIES = 1,
+  ChromeApplicationStateInfo_ChromeApplicationState_APPLICATION_STATE_HAS_PAUSED_ACTIVITIES = 2,
+  ChromeApplicationStateInfo_ChromeApplicationState_APPLICATION_STATE_HAS_STOPPED_ACTIVITIES = 3,
+  ChromeApplicationStateInfo_ChromeApplicationState_APPLICATION_STATE_HAS_DESTROYED_ACTIVITIES = 4,
+};
+
+const ChromeApplicationStateInfo_ChromeApplicationState ChromeApplicationStateInfo_ChromeApplicationState_MIN = ChromeApplicationStateInfo_ChromeApplicationState_APPLICATION_STATE_UNKNOWN;
+const ChromeApplicationStateInfo_ChromeApplicationState ChromeApplicationStateInfo_ChromeApplicationState_MAX = ChromeApplicationStateInfo_ChromeApplicationState_APPLICATION_STATE_HAS_DESTROYED_ACTIVITIES;
+
+class ChromeApplicationStateInfo_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  ChromeApplicationStateInfo_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ChromeApplicationStateInfo_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ChromeApplicationStateInfo_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_application_state() const { return at<1>().valid(); }
+  int32_t application_state() const { return at<1>().as_int32(); }
+};
+
+class ChromeApplicationStateInfo : public ::protozero::Message {
+ public:
+  using Decoder = ChromeApplicationStateInfo_Decoder;
+  enum : int32_t {
+    kApplicationStateFieldNumber = 1,
+  };
+  using ChromeApplicationState = ::perfetto::protos::pbzero::ChromeApplicationStateInfo_ChromeApplicationState;
+  static const ChromeApplicationState APPLICATION_STATE_UNKNOWN = ChromeApplicationStateInfo_ChromeApplicationState_APPLICATION_STATE_UNKNOWN;
+  static const ChromeApplicationState APPLICATION_STATE_HAS_RUNNING_ACTIVITIES = ChromeApplicationStateInfo_ChromeApplicationState_APPLICATION_STATE_HAS_RUNNING_ACTIVITIES;
+  static const ChromeApplicationState APPLICATION_STATE_HAS_PAUSED_ACTIVITIES = ChromeApplicationStateInfo_ChromeApplicationState_APPLICATION_STATE_HAS_PAUSED_ACTIVITIES;
+  static const ChromeApplicationState APPLICATION_STATE_HAS_STOPPED_ACTIVITIES = ChromeApplicationStateInfo_ChromeApplicationState_APPLICATION_STATE_HAS_STOPPED_ACTIVITIES;
+  static const ChromeApplicationState APPLICATION_STATE_HAS_DESTROYED_ACTIVITIES = ChromeApplicationStateInfo_ChromeApplicationState_APPLICATION_STATE_HAS_DESTROYED_ACTIVITIES;
+
+  using FieldMetadata_ApplicationState =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::ChromeApplicationStateInfo_ChromeApplicationState,
+      ChromeApplicationStateInfo>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ApplicationState kApplicationState() { return {}; }
+  void set_application_state(::perfetto::protos::pbzero::ChromeApplicationStateInfo_ChromeApplicationState value) {
+    static constexpr uint32_t field_id = FieldMetadata_ApplicationState::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_compositor_scheduler_state.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_COMPOSITOR_SCHEDULER_STATE_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_COMPOSITOR_SCHEDULER_STATE_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class BeginFrameArgs;
+class BeginFrameObserverState;
+class BeginFrameSourceState;
+class BeginImplFrameArgs;
+class BeginImplFrameArgs_TimestampsInUs;
+class ChromeCompositorStateMachine;
+class ChromeCompositorStateMachine_MajorState;
+class ChromeCompositorStateMachine_MinorState;
+class CompositorTimingHistory;
+class SourceLocation;
+enum BeginFrameArgs_BeginFrameArgsType : int32_t;
+enum BeginImplFrameArgs_State : int32_t;
+enum ChromeCompositorSchedulerAction : int32_t;
+enum ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode : int32_t;
+enum ChromeCompositorStateMachine_MajorState_BeginImplFrameState : int32_t;
+enum ChromeCompositorStateMachine_MajorState_BeginMainFrameState : int32_t;
+enum ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState : int32_t;
+enum ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState : int32_t;
+enum ChromeCompositorStateMachine_MinorState_ScrollHandlerState : int32_t;
+enum ChromeCompositorStateMachine_MinorState_TreePriority : int32_t;
+
+enum ChromeCompositorSchedulerAction : int32_t {
+  CC_SCHEDULER_ACTION_UNSPECIFIED = 0,
+  CC_SCHEDULER_ACTION_NONE = 1,
+  CC_SCHEDULER_ACTION_SEND_BEGIN_MAIN_FRAME = 2,
+  CC_SCHEDULER_ACTION_COMMIT = 3,
+  CC_SCHEDULER_ACTION_ACTIVATE_SYNC_TREE = 4,
+  CC_SCHEDULER_ACTION_DRAW_IF_POSSIBLE = 5,
+  CC_SCHEDULER_ACTION_DRAW_FORCED = 6,
+  CC_SCHEDULER_ACTION_DRAW_ABORT = 7,
+  CC_SCHEDULER_ACTION_BEGIN_LAYER_TREE_FRAME_SINK_CREATION = 8,
+  CC_SCHEDULER_ACTION_PREPARE_TILES = 9,
+  CC_SCHEDULER_ACTION_INVALIDATE_LAYER_TREE_FRAME_SINK = 10,
+  CC_SCHEDULER_ACTION_PERFORM_IMPL_SIDE_INVALIDATION = 11,
+  CC_SCHEDULER_ACTION_NOTIFY_BEGIN_MAIN_FRAME_NOT_EXPECTED_UNTIL = 12,
+  CC_SCHEDULER_ACTION_NOTIFY_BEGIN_MAIN_FRAME_NOT_EXPECTED_SOON = 13,
+};
+
+const ChromeCompositorSchedulerAction ChromeCompositorSchedulerAction_MIN = CC_SCHEDULER_ACTION_UNSPECIFIED;
+const ChromeCompositorSchedulerAction ChromeCompositorSchedulerAction_MAX = CC_SCHEDULER_ACTION_NOTIFY_BEGIN_MAIN_FRAME_NOT_EXPECTED_SOON;
+
+enum BeginImplFrameArgs_State : int32_t {
+  BeginImplFrameArgs_State_BEGIN_FRAME_FINISHED = 0,
+  BeginImplFrameArgs_State_BEGIN_FRAME_USING = 1,
+};
+
+const BeginImplFrameArgs_State BeginImplFrameArgs_State_MIN = BeginImplFrameArgs_State_BEGIN_FRAME_FINISHED;
+const BeginImplFrameArgs_State BeginImplFrameArgs_State_MAX = BeginImplFrameArgs_State_BEGIN_FRAME_USING;
+
+enum BeginFrameArgs_BeginFrameArgsType : int32_t {
+  BeginFrameArgs_BeginFrameArgsType_BEGIN_FRAME_ARGS_TYPE_UNSPECIFIED = 0,
+  BeginFrameArgs_BeginFrameArgsType_BEGIN_FRAME_ARGS_TYPE_INVALID = 1,
+  BeginFrameArgs_BeginFrameArgsType_BEGIN_FRAME_ARGS_TYPE_NORMAL = 2,
+  BeginFrameArgs_BeginFrameArgsType_BEGIN_FRAME_ARGS_TYPE_MISSED = 3,
+};
+
+const BeginFrameArgs_BeginFrameArgsType BeginFrameArgs_BeginFrameArgsType_MIN = BeginFrameArgs_BeginFrameArgsType_BEGIN_FRAME_ARGS_TYPE_UNSPECIFIED;
+const BeginFrameArgs_BeginFrameArgsType BeginFrameArgs_BeginFrameArgsType_MAX = BeginFrameArgs_BeginFrameArgsType_BEGIN_FRAME_ARGS_TYPE_MISSED;
+
+enum ChromeCompositorStateMachine_MinorState_TreePriority : int32_t {
+  ChromeCompositorStateMachine_MinorState_TreePriority_TREE_PRIORITY_UNSPECIFIED = 0,
+  ChromeCompositorStateMachine_MinorState_TreePriority_TREE_PRIORITY_SAME_PRIORITY_FOR_BOTH_TREES = 1,
+  ChromeCompositorStateMachine_MinorState_TreePriority_TREE_PRIORITY_SMOOTHNESS_TAKES_PRIORITY = 2,
+  ChromeCompositorStateMachine_MinorState_TreePriority_TREE_PRIORITY_NEW_CONTENT_TAKES_PRIORITY = 3,
+};
+
+const ChromeCompositorStateMachine_MinorState_TreePriority ChromeCompositorStateMachine_MinorState_TreePriority_MIN = ChromeCompositorStateMachine_MinorState_TreePriority_TREE_PRIORITY_UNSPECIFIED;
+const ChromeCompositorStateMachine_MinorState_TreePriority ChromeCompositorStateMachine_MinorState_TreePriority_MAX = ChromeCompositorStateMachine_MinorState_TreePriority_TREE_PRIORITY_NEW_CONTENT_TAKES_PRIORITY;
+
+enum ChromeCompositorStateMachine_MinorState_ScrollHandlerState : int32_t {
+  ChromeCompositorStateMachine_MinorState_ScrollHandlerState_SCROLL_HANDLER_UNSPECIFIED = 0,
+  ChromeCompositorStateMachine_MinorState_ScrollHandlerState_SCROLL_AFFECTS_SCROLL_HANDLER = 1,
+  ChromeCompositorStateMachine_MinorState_ScrollHandlerState_SCROLL_DOES_NOT_AFFECT_SCROLL_HANDLER = 2,
+};
+
+const ChromeCompositorStateMachine_MinorState_ScrollHandlerState ChromeCompositorStateMachine_MinorState_ScrollHandlerState_MIN = ChromeCompositorStateMachine_MinorState_ScrollHandlerState_SCROLL_HANDLER_UNSPECIFIED;
+const ChromeCompositorStateMachine_MinorState_ScrollHandlerState ChromeCompositorStateMachine_MinorState_ScrollHandlerState_MAX = ChromeCompositorStateMachine_MinorState_ScrollHandlerState_SCROLL_DOES_NOT_AFFECT_SCROLL_HANDLER;
+
+enum ChromeCompositorStateMachine_MajorState_BeginImplFrameState : int32_t {
+  ChromeCompositorStateMachine_MajorState_BeginImplFrameState_BEGIN_IMPL_FRAME_UNSPECIFIED = 0,
+  ChromeCompositorStateMachine_MajorState_BeginImplFrameState_BEGIN_IMPL_FRAME_IDLE = 1,
+  ChromeCompositorStateMachine_MajorState_BeginImplFrameState_BEGIN_IMPL_FRAME_INSIDE_BEGIN_FRAME = 2,
+  ChromeCompositorStateMachine_MajorState_BeginImplFrameState_BEGIN_IMPL_FRAME_INSIDE_DEADLINE = 3,
+};
+
+const ChromeCompositorStateMachine_MajorState_BeginImplFrameState ChromeCompositorStateMachine_MajorState_BeginImplFrameState_MIN = ChromeCompositorStateMachine_MajorState_BeginImplFrameState_BEGIN_IMPL_FRAME_UNSPECIFIED;
+const ChromeCompositorStateMachine_MajorState_BeginImplFrameState ChromeCompositorStateMachine_MajorState_BeginImplFrameState_MAX = ChromeCompositorStateMachine_MajorState_BeginImplFrameState_BEGIN_IMPL_FRAME_INSIDE_DEADLINE;
+
+enum ChromeCompositorStateMachine_MajorState_BeginMainFrameState : int32_t {
+  ChromeCompositorStateMachine_MajorState_BeginMainFrameState_BEGIN_MAIN_FRAME_UNSPECIFIED = 0,
+  ChromeCompositorStateMachine_MajorState_BeginMainFrameState_BEGIN_MAIN_FRAME_IDLE = 1,
+  ChromeCompositorStateMachine_MajorState_BeginMainFrameState_BEGIN_MAIN_FRAME_SENT = 2,
+  ChromeCompositorStateMachine_MajorState_BeginMainFrameState_BEGIN_MAIN_FRAME_READY_TO_COMMIT = 3,
+};
+
+const ChromeCompositorStateMachine_MajorState_BeginMainFrameState ChromeCompositorStateMachine_MajorState_BeginMainFrameState_MIN = ChromeCompositorStateMachine_MajorState_BeginMainFrameState_BEGIN_MAIN_FRAME_UNSPECIFIED;
+const ChromeCompositorStateMachine_MajorState_BeginMainFrameState ChromeCompositorStateMachine_MajorState_BeginMainFrameState_MAX = ChromeCompositorStateMachine_MajorState_BeginMainFrameState_BEGIN_MAIN_FRAME_READY_TO_COMMIT;
+
+enum ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState : int32_t {
+  ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_UNSPECIFIED = 0,
+  ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_NONE = 1,
+  ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_ACTIVE = 2,
+  ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_CREATING = 3,
+  ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_WAITING_FOR_FIRST_COMMIT = 4,
+  ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_WAITING_FOR_FIRST_ACTIVATION = 5,
+};
+
+const ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_MIN = ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_UNSPECIFIED;
+const ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_MAX = ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_WAITING_FOR_FIRST_ACTIVATION;
+
+enum ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState : int32_t {
+  ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState_FORCED_REDRAW_UNSPECIFIED = 0,
+  ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState_FORCED_REDRAW_IDLE = 1,
+  ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState_FORCED_REDRAW_WAITING_FOR_COMMIT = 2,
+  ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState_FORCED_REDRAW_WAITING_FOR_ACTIVATION = 3,
+  ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState_FORCED_REDRAW_WAITING_FOR_DRAW = 4,
+};
+
+const ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState_MIN = ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState_FORCED_REDRAW_UNSPECIFIED;
+const ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState_MAX = ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState_FORCED_REDRAW_WAITING_FOR_DRAW;
+
+enum ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode : int32_t {
+  ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_UNSPECIFIED = 0,
+  ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_NONE = 1,
+  ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_IMMEDIATE = 2,
+  ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_REGULAR = 3,
+  ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_LATE = 4,
+  ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_BLOCKED = 5,
+};
+
+const ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_MIN = ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_UNSPECIFIED;
+const ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_MAX = ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_BLOCKED;
+
+class CompositorTimingHistory_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/7, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  CompositorTimingHistory_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit CompositorTimingHistory_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit CompositorTimingHistory_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_begin_main_frame_queue_critical_estimate_delta_us() const { return at<1>().valid(); }
+  int64_t begin_main_frame_queue_critical_estimate_delta_us() const { return at<1>().as_int64(); }
+  bool has_begin_main_frame_queue_not_critical_estimate_delta_us() const { return at<2>().valid(); }
+  int64_t begin_main_frame_queue_not_critical_estimate_delta_us() const { return at<2>().as_int64(); }
+  bool has_begin_main_frame_start_to_ready_to_commit_estimate_delta_us() const { return at<3>().valid(); }
+  int64_t begin_main_frame_start_to_ready_to_commit_estimate_delta_us() const { return at<3>().as_int64(); }
+  bool has_commit_to_ready_to_activate_estimate_delta_us() const { return at<4>().valid(); }
+  int64_t commit_to_ready_to_activate_estimate_delta_us() const { return at<4>().as_int64(); }
+  bool has_prepare_tiles_estimate_delta_us() const { return at<5>().valid(); }
+  int64_t prepare_tiles_estimate_delta_us() const { return at<5>().as_int64(); }
+  bool has_activate_estimate_delta_us() const { return at<6>().valid(); }
+  int64_t activate_estimate_delta_us() const { return at<6>().as_int64(); }
+  bool has_draw_estimate_delta_us() const { return at<7>().valid(); }
+  int64_t draw_estimate_delta_us() const { return at<7>().as_int64(); }
+};
+
+class CompositorTimingHistory : public ::protozero::Message {
+ public:
+  using Decoder = CompositorTimingHistory_Decoder;
+  enum : int32_t {
+    kBeginMainFrameQueueCriticalEstimateDeltaUsFieldNumber = 1,
+    kBeginMainFrameQueueNotCriticalEstimateDeltaUsFieldNumber = 2,
+    kBeginMainFrameStartToReadyToCommitEstimateDeltaUsFieldNumber = 3,
+    kCommitToReadyToActivateEstimateDeltaUsFieldNumber = 4,
+    kPrepareTilesEstimateDeltaUsFieldNumber = 5,
+    kActivateEstimateDeltaUsFieldNumber = 6,
+    kDrawEstimateDeltaUsFieldNumber = 7,
+  };
+
+  using FieldMetadata_BeginMainFrameQueueCriticalEstimateDeltaUs =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      CompositorTimingHistory>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BeginMainFrameQueueCriticalEstimateDeltaUs kBeginMainFrameQueueCriticalEstimateDeltaUs() { return {}; }
+  void set_begin_main_frame_queue_critical_estimate_delta_us(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_BeginMainFrameQueueCriticalEstimateDeltaUs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BeginMainFrameQueueNotCriticalEstimateDeltaUs =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      CompositorTimingHistory>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BeginMainFrameQueueNotCriticalEstimateDeltaUs kBeginMainFrameQueueNotCriticalEstimateDeltaUs() { return {}; }
+  void set_begin_main_frame_queue_not_critical_estimate_delta_us(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_BeginMainFrameQueueNotCriticalEstimateDeltaUs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BeginMainFrameStartToReadyToCommitEstimateDeltaUs =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      CompositorTimingHistory>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BeginMainFrameStartToReadyToCommitEstimateDeltaUs kBeginMainFrameStartToReadyToCommitEstimateDeltaUs() { return {}; }
+  void set_begin_main_frame_start_to_ready_to_commit_estimate_delta_us(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_BeginMainFrameStartToReadyToCommitEstimateDeltaUs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CommitToReadyToActivateEstimateDeltaUs =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      CompositorTimingHistory>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CommitToReadyToActivateEstimateDeltaUs kCommitToReadyToActivateEstimateDeltaUs() { return {}; }
+  void set_commit_to_ready_to_activate_estimate_delta_us(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CommitToReadyToActivateEstimateDeltaUs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PrepareTilesEstimateDeltaUs =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      CompositorTimingHistory>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PrepareTilesEstimateDeltaUs kPrepareTilesEstimateDeltaUs() { return {}; }
+  void set_prepare_tiles_estimate_delta_us(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PrepareTilesEstimateDeltaUs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ActivateEstimateDeltaUs =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      CompositorTimingHistory>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ActivateEstimateDeltaUs kActivateEstimateDeltaUs() { return {}; }
+  void set_activate_estimate_delta_us(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ActivateEstimateDeltaUs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DrawEstimateDeltaUs =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      CompositorTimingHistory>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DrawEstimateDeltaUs kDrawEstimateDeltaUs() { return {}; }
+  void set_draw_estimate_delta_us(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DrawEstimateDeltaUs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class BeginFrameSourceState_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  BeginFrameSourceState_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit BeginFrameSourceState_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit BeginFrameSourceState_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_source_id() const { return at<1>().valid(); }
+  uint32_t source_id() const { return at<1>().as_uint32(); }
+  bool has_paused() const { return at<2>().valid(); }
+  bool paused() const { return at<2>().as_bool(); }
+  bool has_num_observers() const { return at<3>().valid(); }
+  uint32_t num_observers() const { return at<3>().as_uint32(); }
+  bool has_last_begin_frame_args() const { return at<4>().valid(); }
+  ::protozero::ConstBytes last_begin_frame_args() const { return at<4>().as_bytes(); }
+};
+
+class BeginFrameSourceState : public ::protozero::Message {
+ public:
+  using Decoder = BeginFrameSourceState_Decoder;
+  enum : int32_t {
+    kSourceIdFieldNumber = 1,
+    kPausedFieldNumber = 2,
+    kNumObserversFieldNumber = 3,
+    kLastBeginFrameArgsFieldNumber = 4,
+  };
+
+  using FieldMetadata_SourceId =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      BeginFrameSourceState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SourceId kSourceId() { return {}; }
+  void set_source_id(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SourceId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Paused =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      BeginFrameSourceState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Paused kPaused() { return {}; }
+  void set_paused(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_Paused::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NumObservers =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      BeginFrameSourceState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NumObservers kNumObservers() { return {}; }
+  void set_num_observers(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NumObservers::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_LastBeginFrameArgs =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      BeginFrameArgs,
+      BeginFrameSourceState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_LastBeginFrameArgs kLastBeginFrameArgs() { return {}; }
+  template <typename T = BeginFrameArgs> T* set_last_begin_frame_args() {
+    return BeginNestedMessage<T>(4);
+  }
+
+};
+
+class BeginFrameObserverState_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  BeginFrameObserverState_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit BeginFrameObserverState_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit BeginFrameObserverState_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dropped_begin_frame_args() const { return at<1>().valid(); }
+  int64_t dropped_begin_frame_args() const { return at<1>().as_int64(); }
+  bool has_last_begin_frame_args() const { return at<2>().valid(); }
+  ::protozero::ConstBytes last_begin_frame_args() const { return at<2>().as_bytes(); }
+};
+
+class BeginFrameObserverState : public ::protozero::Message {
+ public:
+  using Decoder = BeginFrameObserverState_Decoder;
+  enum : int32_t {
+    kDroppedBeginFrameArgsFieldNumber = 1,
+    kLastBeginFrameArgsFieldNumber = 2,
+  };
+
+  using FieldMetadata_DroppedBeginFrameArgs =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      BeginFrameObserverState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DroppedBeginFrameArgs kDroppedBeginFrameArgs() { return {}; }
+  void set_dropped_begin_frame_args(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DroppedBeginFrameArgs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_LastBeginFrameArgs =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      BeginFrameArgs,
+      BeginFrameObserverState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_LastBeginFrameArgs kLastBeginFrameArgs() { return {}; }
+  template <typename T = BeginFrameArgs> T* set_last_begin_frame_args() {
+    return BeginNestedMessage<T>(2);
+  }
+
+};
+
+class BeginImplFrameArgs_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/6, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  BeginImplFrameArgs_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit BeginImplFrameArgs_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit BeginImplFrameArgs_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_updated_at_us() const { return at<1>().valid(); }
+  int64_t updated_at_us() const { return at<1>().as_int64(); }
+  bool has_finished_at_us() const { return at<2>().valid(); }
+  int64_t finished_at_us() const { return at<2>().as_int64(); }
+  bool has_state() const { return at<3>().valid(); }
+  int32_t state() const { return at<3>().as_int32(); }
+  bool has_current_args() const { return at<4>().valid(); }
+  ::protozero::ConstBytes current_args() const { return at<4>().as_bytes(); }
+  bool has_last_args() const { return at<5>().valid(); }
+  ::protozero::ConstBytes last_args() const { return at<5>().as_bytes(); }
+  bool has_timestamps_in_us() const { return at<6>().valid(); }
+  ::protozero::ConstBytes timestamps_in_us() const { return at<6>().as_bytes(); }
+};
+
+class BeginImplFrameArgs : public ::protozero::Message {
+ public:
+  using Decoder = BeginImplFrameArgs_Decoder;
+  enum : int32_t {
+    kUpdatedAtUsFieldNumber = 1,
+    kFinishedAtUsFieldNumber = 2,
+    kStateFieldNumber = 3,
+    kCurrentArgsFieldNumber = 4,
+    kLastArgsFieldNumber = 5,
+    kTimestampsInUsFieldNumber = 6,
+  };
+  using TimestampsInUs = ::perfetto::protos::pbzero::BeginImplFrameArgs_TimestampsInUs;
+  using State = ::perfetto::protos::pbzero::BeginImplFrameArgs_State;
+  static const State BEGIN_FRAME_FINISHED = BeginImplFrameArgs_State_BEGIN_FRAME_FINISHED;
+  static const State BEGIN_FRAME_USING = BeginImplFrameArgs_State_BEGIN_FRAME_USING;
+
+  using FieldMetadata_UpdatedAtUs =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      BeginImplFrameArgs>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_UpdatedAtUs kUpdatedAtUs() { return {}; }
+  void set_updated_at_us(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_UpdatedAtUs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FinishedAtUs =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      BeginImplFrameArgs>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FinishedAtUs kFinishedAtUs() { return {}; }
+  void set_finished_at_us(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_FinishedAtUs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_State =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::BeginImplFrameArgs_State,
+      BeginImplFrameArgs>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_State kState() { return {}; }
+  void set_state(::perfetto::protos::pbzero::BeginImplFrameArgs_State value) {
+    static constexpr uint32_t field_id = FieldMetadata_State::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CurrentArgs =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      BeginFrameArgs,
+      BeginImplFrameArgs>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CurrentArgs kCurrentArgs() { return {}; }
+  template <typename T = BeginFrameArgs> T* set_current_args() {
+    return BeginNestedMessage<T>(4);
+  }
+
+
+  using FieldMetadata_LastArgs =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      BeginFrameArgs,
+      BeginImplFrameArgs>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_LastArgs kLastArgs() { return {}; }
+  template <typename T = BeginFrameArgs> T* set_last_args() {
+    return BeginNestedMessage<T>(5);
+  }
+
+
+  using FieldMetadata_TimestampsInUs =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      BeginImplFrameArgs_TimestampsInUs,
+      BeginImplFrameArgs>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TimestampsInUs kTimestampsInUs() { return {}; }
+  template <typename T = BeginImplFrameArgs_TimestampsInUs> T* set_timestamps_in_us() {
+    return BeginNestedMessage<T>(6);
+  }
+
+};
+
+class BeginImplFrameArgs_TimestampsInUs_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/7, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  BeginImplFrameArgs_TimestampsInUs_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit BeginImplFrameArgs_TimestampsInUs_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit BeginImplFrameArgs_TimestampsInUs_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_interval_delta() const { return at<1>().valid(); }
+  int64_t interval_delta() const { return at<1>().as_int64(); }
+  bool has_now_to_deadline_delta() const { return at<2>().valid(); }
+  int64_t now_to_deadline_delta() const { return at<2>().as_int64(); }
+  bool has_frame_time_to_now_delta() const { return at<3>().valid(); }
+  int64_t frame_time_to_now_delta() const { return at<3>().as_int64(); }
+  bool has_frame_time_to_deadline_delta() const { return at<4>().valid(); }
+  int64_t frame_time_to_deadline_delta() const { return at<4>().as_int64(); }
+  bool has_now() const { return at<5>().valid(); }
+  int64_t now() const { return at<5>().as_int64(); }
+  bool has_frame_time() const { return at<6>().valid(); }
+  int64_t frame_time() const { return at<6>().as_int64(); }
+  bool has_deadline() const { return at<7>().valid(); }
+  int64_t deadline() const { return at<7>().as_int64(); }
+};
+
+class BeginImplFrameArgs_TimestampsInUs : public ::protozero::Message {
+ public:
+  using Decoder = BeginImplFrameArgs_TimestampsInUs_Decoder;
+  enum : int32_t {
+    kIntervalDeltaFieldNumber = 1,
+    kNowToDeadlineDeltaFieldNumber = 2,
+    kFrameTimeToNowDeltaFieldNumber = 3,
+    kFrameTimeToDeadlineDeltaFieldNumber = 4,
+    kNowFieldNumber = 5,
+    kFrameTimeFieldNumber = 6,
+    kDeadlineFieldNumber = 7,
+  };
+
+  using FieldMetadata_IntervalDelta =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      BeginImplFrameArgs_TimestampsInUs>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IntervalDelta kIntervalDelta() { return {}; }
+  void set_interval_delta(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_IntervalDelta::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NowToDeadlineDelta =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      BeginImplFrameArgs_TimestampsInUs>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NowToDeadlineDelta kNowToDeadlineDelta() { return {}; }
+  void set_now_to_deadline_delta(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NowToDeadlineDelta::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FrameTimeToNowDelta =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      BeginImplFrameArgs_TimestampsInUs>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FrameTimeToNowDelta kFrameTimeToNowDelta() { return {}; }
+  void set_frame_time_to_now_delta(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_FrameTimeToNowDelta::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FrameTimeToDeadlineDelta =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      BeginImplFrameArgs_TimestampsInUs>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FrameTimeToDeadlineDelta kFrameTimeToDeadlineDelta() { return {}; }
+  void set_frame_time_to_deadline_delta(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_FrameTimeToDeadlineDelta::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Now =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      BeginImplFrameArgs_TimestampsInUs>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Now kNow() { return {}; }
+  void set_now(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Now::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FrameTime =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      BeginImplFrameArgs_TimestampsInUs>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FrameTime kFrameTime() { return {}; }
+  void set_frame_time(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_FrameTime::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Deadline =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      BeginImplFrameArgs_TimestampsInUs>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Deadline kDeadline() { return {}; }
+  void set_deadline(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Deadline::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class BeginFrameArgs_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/10, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  BeginFrameArgs_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit BeginFrameArgs_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit BeginFrameArgs_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_type() const { return at<1>().valid(); }
+  int32_t type() const { return at<1>().as_int32(); }
+  bool has_source_id() const { return at<2>().valid(); }
+  uint64_t source_id() const { return at<2>().as_uint64(); }
+  bool has_sequence_number() const { return at<3>().valid(); }
+  uint64_t sequence_number() const { return at<3>().as_uint64(); }
+  bool has_frame_time_us() const { return at<4>().valid(); }
+  int64_t frame_time_us() const { return at<4>().as_int64(); }
+  bool has_deadline_us() const { return at<5>().valid(); }
+  int64_t deadline_us() const { return at<5>().as_int64(); }
+  bool has_interval_delta_us() const { return at<6>().valid(); }
+  int64_t interval_delta_us() const { return at<6>().as_int64(); }
+  bool has_on_critical_path() const { return at<7>().valid(); }
+  bool on_critical_path() const { return at<7>().as_bool(); }
+  bool has_animate_only() const { return at<8>().valid(); }
+  bool animate_only() const { return at<8>().as_bool(); }
+  bool has_source_location_iid() const { return at<9>().valid(); }
+  uint64_t source_location_iid() const { return at<9>().as_uint64(); }
+  bool has_source_location() const { return at<10>().valid(); }
+  ::protozero::ConstBytes source_location() const { return at<10>().as_bytes(); }
+};
+
+class BeginFrameArgs : public ::protozero::Message {
+ public:
+  using Decoder = BeginFrameArgs_Decoder;
+  enum : int32_t {
+    kTypeFieldNumber = 1,
+    kSourceIdFieldNumber = 2,
+    kSequenceNumberFieldNumber = 3,
+    kFrameTimeUsFieldNumber = 4,
+    kDeadlineUsFieldNumber = 5,
+    kIntervalDeltaUsFieldNumber = 6,
+    kOnCriticalPathFieldNumber = 7,
+    kAnimateOnlyFieldNumber = 8,
+    kSourceLocationIidFieldNumber = 9,
+    kSourceLocationFieldNumber = 10,
+  };
+  using BeginFrameArgsType = ::perfetto::protos::pbzero::BeginFrameArgs_BeginFrameArgsType;
+  static const BeginFrameArgsType BEGIN_FRAME_ARGS_TYPE_UNSPECIFIED = BeginFrameArgs_BeginFrameArgsType_BEGIN_FRAME_ARGS_TYPE_UNSPECIFIED;
+  static const BeginFrameArgsType BEGIN_FRAME_ARGS_TYPE_INVALID = BeginFrameArgs_BeginFrameArgsType_BEGIN_FRAME_ARGS_TYPE_INVALID;
+  static const BeginFrameArgsType BEGIN_FRAME_ARGS_TYPE_NORMAL = BeginFrameArgs_BeginFrameArgsType_BEGIN_FRAME_ARGS_TYPE_NORMAL;
+  static const BeginFrameArgsType BEGIN_FRAME_ARGS_TYPE_MISSED = BeginFrameArgs_BeginFrameArgsType_BEGIN_FRAME_ARGS_TYPE_MISSED;
+
+  using FieldMetadata_Type =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::BeginFrameArgs_BeginFrameArgsType,
+      BeginFrameArgs>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Type kType() { return {}; }
+  void set_type(::perfetto::protos::pbzero::BeginFrameArgs_BeginFrameArgsType value) {
+    static constexpr uint32_t field_id = FieldMetadata_Type::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SourceId =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      BeginFrameArgs>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SourceId kSourceId() { return {}; }
+  void set_source_id(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SourceId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SequenceNumber =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      BeginFrameArgs>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SequenceNumber kSequenceNumber() { return {}; }
+  void set_sequence_number(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SequenceNumber::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FrameTimeUs =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      BeginFrameArgs>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FrameTimeUs kFrameTimeUs() { return {}; }
+  void set_frame_time_us(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_FrameTimeUs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DeadlineUs =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      BeginFrameArgs>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DeadlineUs kDeadlineUs() { return {}; }
+  void set_deadline_us(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DeadlineUs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_IntervalDeltaUs =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      BeginFrameArgs>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IntervalDeltaUs kIntervalDeltaUs() { return {}; }
+  void set_interval_delta_us(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_IntervalDeltaUs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_OnCriticalPath =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      BeginFrameArgs>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_OnCriticalPath kOnCriticalPath() { return {}; }
+  void set_on_critical_path(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_OnCriticalPath::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_AnimateOnly =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      BeginFrameArgs>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AnimateOnly kAnimateOnly() { return {}; }
+  void set_animate_only(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_AnimateOnly::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SourceLocationIid =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      BeginFrameArgs>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SourceLocationIid kSourceLocationIid() { return {}; }
+  void set_source_location_iid(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SourceLocationIid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SourceLocation =
+    ::protozero::proto_utils::FieldMetadata<
+      10,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      SourceLocation,
+      BeginFrameArgs>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SourceLocation kSourceLocation() { return {}; }
+  template <typename T = SourceLocation> T* set_source_location() {
+    return BeginNestedMessage<T>(10);
+  }
+
+};
+
+class ChromeCompositorStateMachine_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  ChromeCompositorStateMachine_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ChromeCompositorStateMachine_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ChromeCompositorStateMachine_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_major_state() const { return at<1>().valid(); }
+  ::protozero::ConstBytes major_state() const { return at<1>().as_bytes(); }
+  bool has_minor_state() const { return at<2>().valid(); }
+  ::protozero::ConstBytes minor_state() const { return at<2>().as_bytes(); }
+};
+
+class ChromeCompositorStateMachine : public ::protozero::Message {
+ public:
+  using Decoder = ChromeCompositorStateMachine_Decoder;
+  enum : int32_t {
+    kMajorStateFieldNumber = 1,
+    kMinorStateFieldNumber = 2,
+  };
+  using MajorState = ::perfetto::protos::pbzero::ChromeCompositorStateMachine_MajorState;
+  using MinorState = ::perfetto::protos::pbzero::ChromeCompositorStateMachine_MinorState;
+
+  using FieldMetadata_MajorState =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ChromeCompositorStateMachine_MajorState,
+      ChromeCompositorStateMachine>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MajorState kMajorState() { return {}; }
+  template <typename T = ChromeCompositorStateMachine_MajorState> T* set_major_state() {
+    return BeginNestedMessage<T>(1);
+  }
+
+
+  using FieldMetadata_MinorState =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ChromeCompositorStateMachine_MinorState,
+      ChromeCompositorStateMachine>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MinorState kMinorState() { return {}; }
+  template <typename T = ChromeCompositorStateMachine_MinorState> T* set_minor_state() {
+    return BeginNestedMessage<T>(2);
+  }
+
+};
+
+class ChromeCompositorStateMachine_MinorState_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/46, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  ChromeCompositorStateMachine_MinorState_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ChromeCompositorStateMachine_MinorState_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ChromeCompositorStateMachine_MinorState_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_commit_count() const { return at<1>().valid(); }
+  int32_t commit_count() const { return at<1>().as_int32(); }
+  bool has_current_frame_number() const { return at<2>().valid(); }
+  int32_t current_frame_number() const { return at<2>().as_int32(); }
+  bool has_last_frame_number_submit_performed() const { return at<3>().valid(); }
+  int32_t last_frame_number_submit_performed() const { return at<3>().as_int32(); }
+  bool has_last_frame_number_draw_performed() const { return at<4>().valid(); }
+  int32_t last_frame_number_draw_performed() const { return at<4>().as_int32(); }
+  bool has_last_frame_number_begin_main_frame_sent() const { return at<5>().valid(); }
+  int32_t last_frame_number_begin_main_frame_sent() const { return at<5>().as_int32(); }
+  bool has_did_draw() const { return at<6>().valid(); }
+  bool did_draw() const { return at<6>().as_bool(); }
+  bool has_did_send_begin_main_frame_for_current_frame() const { return at<7>().valid(); }
+  bool did_send_begin_main_frame_for_current_frame() const { return at<7>().as_bool(); }
+  bool has_did_notify_begin_main_frame_not_expected_until() const { return at<8>().valid(); }
+  bool did_notify_begin_main_frame_not_expected_until() const { return at<8>().as_bool(); }
+  bool has_did_notify_begin_main_frame_not_expected_soon() const { return at<9>().valid(); }
+  bool did_notify_begin_main_frame_not_expected_soon() const { return at<9>().as_bool(); }
+  bool has_wants_begin_main_frame_not_expected() const { return at<10>().valid(); }
+  bool wants_begin_main_frame_not_expected() const { return at<10>().as_bool(); }
+  bool has_did_commit_during_frame() const { return at<11>().valid(); }
+  bool did_commit_during_frame() const { return at<11>().as_bool(); }
+  bool has_did_invalidate_layer_tree_frame_sink() const { return at<12>().valid(); }
+  bool did_invalidate_layer_tree_frame_sink() const { return at<12>().as_bool(); }
+  bool has_did_perform_impl_side_invalidaion() const { return at<13>().valid(); }
+  bool did_perform_impl_side_invalidaion() const { return at<13>().as_bool(); }
+  bool has_did_prepare_tiles() const { return at<14>().valid(); }
+  bool did_prepare_tiles() const { return at<14>().as_bool(); }
+  bool has_consecutive_checkerboard_animations() const { return at<15>().valid(); }
+  int32_t consecutive_checkerboard_animations() const { return at<15>().as_int32(); }
+  bool has_pending_submit_frames() const { return at<16>().valid(); }
+  int32_t pending_submit_frames() const { return at<16>().as_int32(); }
+  bool has_submit_frames_with_current_layer_tree_frame_sink() const { return at<17>().valid(); }
+  int32_t submit_frames_with_current_layer_tree_frame_sink() const { return at<17>().as_int32(); }
+  bool has_needs_redraw() const { return at<18>().valid(); }
+  bool needs_redraw() const { return at<18>().as_bool(); }
+  bool has_needs_prepare_tiles() const { return at<19>().valid(); }
+  bool needs_prepare_tiles() const { return at<19>().as_bool(); }
+  bool has_needs_begin_main_frame() const { return at<20>().valid(); }
+  bool needs_begin_main_frame() const { return at<20>().as_bool(); }
+  bool has_needs_one_begin_impl_frame() const { return at<21>().valid(); }
+  bool needs_one_begin_impl_frame() const { return at<21>().as_bool(); }
+  bool has_visible() const { return at<22>().valid(); }
+  bool visible() const { return at<22>().as_bool(); }
+  bool has_begin_frame_source_paused() const { return at<23>().valid(); }
+  bool begin_frame_source_paused() const { return at<23>().as_bool(); }
+  bool has_can_draw() const { return at<24>().valid(); }
+  bool can_draw() const { return at<24>().as_bool(); }
+  bool has_resourceless_draw() const { return at<25>().valid(); }
+  bool resourceless_draw() const { return at<25>().as_bool(); }
+  bool has_has_pending_tree() const { return at<26>().valid(); }
+  bool has_pending_tree() const { return at<26>().as_bool(); }
+  bool has_pending_tree_is_ready_for_activation() const { return at<27>().valid(); }
+  bool pending_tree_is_ready_for_activation() const { return at<27>().as_bool(); }
+  bool has_active_tree_needs_first_draw() const { return at<28>().valid(); }
+  bool active_tree_needs_first_draw() const { return at<28>().as_bool(); }
+  bool has_active_tree_is_ready_to_draw() const { return at<29>().valid(); }
+  bool active_tree_is_ready_to_draw() const { return at<29>().as_bool(); }
+  bool has_did_create_and_initialize_first_layer_tree_frame_sink() const { return at<30>().valid(); }
+  bool did_create_and_initialize_first_layer_tree_frame_sink() const { return at<30>().as_bool(); }
+  bool has_tree_priority() const { return at<31>().valid(); }
+  int32_t tree_priority() const { return at<31>().as_int32(); }
+  bool has_scroll_handler_state() const { return at<32>().valid(); }
+  int32_t scroll_handler_state() const { return at<32>().as_int32(); }
+  bool has_critical_begin_main_frame_to_activate_is_fast() const { return at<33>().valid(); }
+  bool critical_begin_main_frame_to_activate_is_fast() const { return at<33>().as_bool(); }
+  bool has_main_thread_missed_last_deadline() const { return at<34>().valid(); }
+  bool main_thread_missed_last_deadline() const { return at<34>().as_bool(); }
+  bool has_skip_next_begin_main_frame_to_reduce_latency() const { return at<35>().valid(); }
+  bool skip_next_begin_main_frame_to_reduce_latency() const { return at<35>().as_bool(); }
+  bool has_video_needs_begin_frames() const { return at<36>().valid(); }
+  bool video_needs_begin_frames() const { return at<36>().as_bool(); }
+  bool has_defer_begin_main_frame() const { return at<37>().valid(); }
+  bool defer_begin_main_frame() const { return at<37>().as_bool(); }
+  bool has_last_commit_had_no_updates() const { return at<38>().valid(); }
+  bool last_commit_had_no_updates() const { return at<38>().as_bool(); }
+  bool has_did_draw_in_last_frame() const { return at<39>().valid(); }
+  bool did_draw_in_last_frame() const { return at<39>().as_bool(); }
+  bool has_did_submit_in_last_frame() const { return at<40>().valid(); }
+  bool did_submit_in_last_frame() const { return at<40>().as_bool(); }
+  bool has_needs_impl_side_invalidation() const { return at<41>().valid(); }
+  bool needs_impl_side_invalidation() const { return at<41>().as_bool(); }
+  bool has_current_pending_tree_is_impl_side() const { return at<42>().valid(); }
+  bool current_pending_tree_is_impl_side() const { return at<42>().as_bool(); }
+  bool has_previous_pending_tree_was_impl_side() const { return at<43>().valid(); }
+  bool previous_pending_tree_was_impl_side() const { return at<43>().as_bool(); }
+  bool has_processing_animation_worklets_for_active_tree() const { return at<44>().valid(); }
+  bool processing_animation_worklets_for_active_tree() const { return at<44>().as_bool(); }
+  bool has_processing_animation_worklets_for_pending_tree() const { return at<45>().valid(); }
+  bool processing_animation_worklets_for_pending_tree() const { return at<45>().as_bool(); }
+  bool has_processing_paint_worklets_for_pending_tree() const { return at<46>().valid(); }
+  bool processing_paint_worklets_for_pending_tree() const { return at<46>().as_bool(); }
+};
+
+class ChromeCompositorStateMachine_MinorState : public ::protozero::Message {
+ public:
+  using Decoder = ChromeCompositorStateMachine_MinorState_Decoder;
+  enum : int32_t {
+    kCommitCountFieldNumber = 1,
+    kCurrentFrameNumberFieldNumber = 2,
+    kLastFrameNumberSubmitPerformedFieldNumber = 3,
+    kLastFrameNumberDrawPerformedFieldNumber = 4,
+    kLastFrameNumberBeginMainFrameSentFieldNumber = 5,
+    kDidDrawFieldNumber = 6,
+    kDidSendBeginMainFrameForCurrentFrameFieldNumber = 7,
+    kDidNotifyBeginMainFrameNotExpectedUntilFieldNumber = 8,
+    kDidNotifyBeginMainFrameNotExpectedSoonFieldNumber = 9,
+    kWantsBeginMainFrameNotExpectedFieldNumber = 10,
+    kDidCommitDuringFrameFieldNumber = 11,
+    kDidInvalidateLayerTreeFrameSinkFieldNumber = 12,
+    kDidPerformImplSideInvalidaionFieldNumber = 13,
+    kDidPrepareTilesFieldNumber = 14,
+    kConsecutiveCheckerboardAnimationsFieldNumber = 15,
+    kPendingSubmitFramesFieldNumber = 16,
+    kSubmitFramesWithCurrentLayerTreeFrameSinkFieldNumber = 17,
+    kNeedsRedrawFieldNumber = 18,
+    kNeedsPrepareTilesFieldNumber = 19,
+    kNeedsBeginMainFrameFieldNumber = 20,
+    kNeedsOneBeginImplFrameFieldNumber = 21,
+    kVisibleFieldNumber = 22,
+    kBeginFrameSourcePausedFieldNumber = 23,
+    kCanDrawFieldNumber = 24,
+    kResourcelessDrawFieldNumber = 25,
+    kHasPendingTreeFieldNumber = 26,
+    kPendingTreeIsReadyForActivationFieldNumber = 27,
+    kActiveTreeNeedsFirstDrawFieldNumber = 28,
+    kActiveTreeIsReadyToDrawFieldNumber = 29,
+    kDidCreateAndInitializeFirstLayerTreeFrameSinkFieldNumber = 30,
+    kTreePriorityFieldNumber = 31,
+    kScrollHandlerStateFieldNumber = 32,
+    kCriticalBeginMainFrameToActivateIsFastFieldNumber = 33,
+    kMainThreadMissedLastDeadlineFieldNumber = 34,
+    kSkipNextBeginMainFrameToReduceLatencyFieldNumber = 35,
+    kVideoNeedsBeginFramesFieldNumber = 36,
+    kDeferBeginMainFrameFieldNumber = 37,
+    kLastCommitHadNoUpdatesFieldNumber = 38,
+    kDidDrawInLastFrameFieldNumber = 39,
+    kDidSubmitInLastFrameFieldNumber = 40,
+    kNeedsImplSideInvalidationFieldNumber = 41,
+    kCurrentPendingTreeIsImplSideFieldNumber = 42,
+    kPreviousPendingTreeWasImplSideFieldNumber = 43,
+    kProcessingAnimationWorkletsForActiveTreeFieldNumber = 44,
+    kProcessingAnimationWorkletsForPendingTreeFieldNumber = 45,
+    kProcessingPaintWorkletsForPendingTreeFieldNumber = 46,
+  };
+  using TreePriority = ::perfetto::protos::pbzero::ChromeCompositorStateMachine_MinorState_TreePriority;
+  using ScrollHandlerState = ::perfetto::protos::pbzero::ChromeCompositorStateMachine_MinorState_ScrollHandlerState;
+  static const TreePriority TREE_PRIORITY_UNSPECIFIED = ChromeCompositorStateMachine_MinorState_TreePriority_TREE_PRIORITY_UNSPECIFIED;
+  static const TreePriority TREE_PRIORITY_SAME_PRIORITY_FOR_BOTH_TREES = ChromeCompositorStateMachine_MinorState_TreePriority_TREE_PRIORITY_SAME_PRIORITY_FOR_BOTH_TREES;
+  static const TreePriority TREE_PRIORITY_SMOOTHNESS_TAKES_PRIORITY = ChromeCompositorStateMachine_MinorState_TreePriority_TREE_PRIORITY_SMOOTHNESS_TAKES_PRIORITY;
+  static const TreePriority TREE_PRIORITY_NEW_CONTENT_TAKES_PRIORITY = ChromeCompositorStateMachine_MinorState_TreePriority_TREE_PRIORITY_NEW_CONTENT_TAKES_PRIORITY;
+  static const ScrollHandlerState SCROLL_HANDLER_UNSPECIFIED = ChromeCompositorStateMachine_MinorState_ScrollHandlerState_SCROLL_HANDLER_UNSPECIFIED;
+  static const ScrollHandlerState SCROLL_AFFECTS_SCROLL_HANDLER = ChromeCompositorStateMachine_MinorState_ScrollHandlerState_SCROLL_AFFECTS_SCROLL_HANDLER;
+  static const ScrollHandlerState SCROLL_DOES_NOT_AFFECT_SCROLL_HANDLER = ChromeCompositorStateMachine_MinorState_ScrollHandlerState_SCROLL_DOES_NOT_AFFECT_SCROLL_HANDLER;
+
+  using FieldMetadata_CommitCount =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      ChromeCompositorStateMachine_MinorState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CommitCount kCommitCount() { return {}; }
+  void set_commit_count(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CommitCount::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CurrentFrameNumber =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      ChromeCompositorStateMachine_MinorState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CurrentFrameNumber kCurrentFrameNumber() { return {}; }
+  void set_current_frame_number(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CurrentFrameNumber::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_LastFrameNumberSubmitPerformed =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      ChromeCompositorStateMachine_MinorState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_LastFrameNumberSubmitPerformed kLastFrameNumberSubmitPerformed() { return {}; }
+  void set_last_frame_number_submit_performed(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_LastFrameNumberSubmitPerformed::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_LastFrameNumberDrawPerformed =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      ChromeCompositorStateMachine_MinorState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_LastFrameNumberDrawPerformed kLastFrameNumberDrawPerformed() { return {}; }
+  void set_last_frame_number_draw_performed(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_LastFrameNumberDrawPerformed::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_LastFrameNumberBeginMainFrameSent =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      ChromeCompositorStateMachine_MinorState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_LastFrameNumberBeginMainFrameSent kLastFrameNumberBeginMainFrameSent() { return {}; }
+  void set_last_frame_number_begin_main_frame_sent(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_LastFrameNumberBeginMainFrameSent::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DidDraw =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeCompositorStateMachine_MinorState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DidDraw kDidDraw() { return {}; }
+  void set_did_draw(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_DidDraw::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DidSendBeginMainFrameForCurrentFrame =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeCompositorStateMachine_MinorState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DidSendBeginMainFrameForCurrentFrame kDidSendBeginMainFrameForCurrentFrame() { return {}; }
+  void set_did_send_begin_main_frame_for_current_frame(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_DidSendBeginMainFrameForCurrentFrame::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DidNotifyBeginMainFrameNotExpectedUntil =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeCompositorStateMachine_MinorState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DidNotifyBeginMainFrameNotExpectedUntil kDidNotifyBeginMainFrameNotExpectedUntil() { return {}; }
+  void set_did_notify_begin_main_frame_not_expected_until(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_DidNotifyBeginMainFrameNotExpectedUntil::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DidNotifyBeginMainFrameNotExpectedSoon =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeCompositorStateMachine_MinorState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DidNotifyBeginMainFrameNotExpectedSoon kDidNotifyBeginMainFrameNotExpectedSoon() { return {}; }
+  void set_did_notify_begin_main_frame_not_expected_soon(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_DidNotifyBeginMainFrameNotExpectedSoon::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_WantsBeginMainFrameNotExpected =
+    ::protozero::proto_utils::FieldMetadata<
+      10,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeCompositorStateMachine_MinorState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_WantsBeginMainFrameNotExpected kWantsBeginMainFrameNotExpected() { return {}; }
+  void set_wants_begin_main_frame_not_expected(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_WantsBeginMainFrameNotExpected::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DidCommitDuringFrame =
+    ::protozero::proto_utils::FieldMetadata<
+      11,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeCompositorStateMachine_MinorState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DidCommitDuringFrame kDidCommitDuringFrame() { return {}; }
+  void set_did_commit_during_frame(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_DidCommitDuringFrame::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DidInvalidateLayerTreeFrameSink =
+    ::protozero::proto_utils::FieldMetadata<
+      12,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeCompositorStateMachine_MinorState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DidInvalidateLayerTreeFrameSink kDidInvalidateLayerTreeFrameSink() { return {}; }
+  void set_did_invalidate_layer_tree_frame_sink(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_DidInvalidateLayerTreeFrameSink::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DidPerformImplSideInvalidaion =
+    ::protozero::proto_utils::FieldMetadata<
+      13,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeCompositorStateMachine_MinorState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DidPerformImplSideInvalidaion kDidPerformImplSideInvalidaion() { return {}; }
+  void set_did_perform_impl_side_invalidaion(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_DidPerformImplSideInvalidaion::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DidPrepareTiles =
+    ::protozero::proto_utils::FieldMetadata<
+      14,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeCompositorStateMachine_MinorState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DidPrepareTiles kDidPrepareTiles() { return {}; }
+  void set_did_prepare_tiles(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_DidPrepareTiles::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ConsecutiveCheckerboardAnimations =
+    ::protozero::proto_utils::FieldMetadata<
+      15,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      ChromeCompositorStateMachine_MinorState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ConsecutiveCheckerboardAnimations kConsecutiveCheckerboardAnimations() { return {}; }
+  void set_consecutive_checkerboard_animations(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ConsecutiveCheckerboardAnimations::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PendingSubmitFrames =
+    ::protozero::proto_utils::FieldMetadata<
+      16,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      ChromeCompositorStateMachine_MinorState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PendingSubmitFrames kPendingSubmitFrames() { return {}; }
+  void set_pending_submit_frames(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PendingSubmitFrames::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SubmitFramesWithCurrentLayerTreeFrameSink =
+    ::protozero::proto_utils::FieldMetadata<
+      17,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      ChromeCompositorStateMachine_MinorState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SubmitFramesWithCurrentLayerTreeFrameSink kSubmitFramesWithCurrentLayerTreeFrameSink() { return {}; }
+  void set_submit_frames_with_current_layer_tree_frame_sink(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SubmitFramesWithCurrentLayerTreeFrameSink::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NeedsRedraw =
+    ::protozero::proto_utils::FieldMetadata<
+      18,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeCompositorStateMachine_MinorState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NeedsRedraw kNeedsRedraw() { return {}; }
+  void set_needs_redraw(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_NeedsRedraw::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NeedsPrepareTiles =
+    ::protozero::proto_utils::FieldMetadata<
+      19,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeCompositorStateMachine_MinorState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NeedsPrepareTiles kNeedsPrepareTiles() { return {}; }
+  void set_needs_prepare_tiles(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_NeedsPrepareTiles::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NeedsBeginMainFrame =
+    ::protozero::proto_utils::FieldMetadata<
+      20,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeCompositorStateMachine_MinorState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NeedsBeginMainFrame kNeedsBeginMainFrame() { return {}; }
+  void set_needs_begin_main_frame(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_NeedsBeginMainFrame::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NeedsOneBeginImplFrame =
+    ::protozero::proto_utils::FieldMetadata<
+      21,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeCompositorStateMachine_MinorState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NeedsOneBeginImplFrame kNeedsOneBeginImplFrame() { return {}; }
+  void set_needs_one_begin_impl_frame(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_NeedsOneBeginImplFrame::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Visible =
+    ::protozero::proto_utils::FieldMetadata<
+      22,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeCompositorStateMachine_MinorState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Visible kVisible() { return {}; }
+  void set_visible(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_Visible::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BeginFrameSourcePaused =
+    ::protozero::proto_utils::FieldMetadata<
+      23,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeCompositorStateMachine_MinorState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BeginFrameSourcePaused kBeginFrameSourcePaused() { return {}; }
+  void set_begin_frame_source_paused(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_BeginFrameSourcePaused::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CanDraw =
+    ::protozero::proto_utils::FieldMetadata<
+      24,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeCompositorStateMachine_MinorState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CanDraw kCanDraw() { return {}; }
+  void set_can_draw(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_CanDraw::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ResourcelessDraw =
+    ::protozero::proto_utils::FieldMetadata<
+      25,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeCompositorStateMachine_MinorState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ResourcelessDraw kResourcelessDraw() { return {}; }
+  void set_resourceless_draw(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_ResourcelessDraw::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_HasPendingTree =
+    ::protozero::proto_utils::FieldMetadata<
+      26,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeCompositorStateMachine_MinorState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_HasPendingTree kHasPendingTree() { return {}; }
+  void set_has_pending_tree(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_HasPendingTree::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PendingTreeIsReadyForActivation =
+    ::protozero::proto_utils::FieldMetadata<
+      27,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeCompositorStateMachine_MinorState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PendingTreeIsReadyForActivation kPendingTreeIsReadyForActivation() { return {}; }
+  void set_pending_tree_is_ready_for_activation(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_PendingTreeIsReadyForActivation::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ActiveTreeNeedsFirstDraw =
+    ::protozero::proto_utils::FieldMetadata<
+      28,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeCompositorStateMachine_MinorState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ActiveTreeNeedsFirstDraw kActiveTreeNeedsFirstDraw() { return {}; }
+  void set_active_tree_needs_first_draw(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_ActiveTreeNeedsFirstDraw::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ActiveTreeIsReadyToDraw =
+    ::protozero::proto_utils::FieldMetadata<
+      29,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeCompositorStateMachine_MinorState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ActiveTreeIsReadyToDraw kActiveTreeIsReadyToDraw() { return {}; }
+  void set_active_tree_is_ready_to_draw(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_ActiveTreeIsReadyToDraw::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DidCreateAndInitializeFirstLayerTreeFrameSink =
+    ::protozero::proto_utils::FieldMetadata<
+      30,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeCompositorStateMachine_MinorState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DidCreateAndInitializeFirstLayerTreeFrameSink kDidCreateAndInitializeFirstLayerTreeFrameSink() { return {}; }
+  void set_did_create_and_initialize_first_layer_tree_frame_sink(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_DidCreateAndInitializeFirstLayerTreeFrameSink::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TreePriority =
+    ::protozero::proto_utils::FieldMetadata<
+      31,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::ChromeCompositorStateMachine_MinorState_TreePriority,
+      ChromeCompositorStateMachine_MinorState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TreePriority kTreePriority() { return {}; }
+  void set_tree_priority(::perfetto::protos::pbzero::ChromeCompositorStateMachine_MinorState_TreePriority value) {
+    static constexpr uint32_t field_id = FieldMetadata_TreePriority::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ScrollHandlerState =
+    ::protozero::proto_utils::FieldMetadata<
+      32,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::ChromeCompositorStateMachine_MinorState_ScrollHandlerState,
+      ChromeCompositorStateMachine_MinorState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ScrollHandlerState kScrollHandlerState() { return {}; }
+  void set_scroll_handler_state(::perfetto::protos::pbzero::ChromeCompositorStateMachine_MinorState_ScrollHandlerState value) {
+    static constexpr uint32_t field_id = FieldMetadata_ScrollHandlerState::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CriticalBeginMainFrameToActivateIsFast =
+    ::protozero::proto_utils::FieldMetadata<
+      33,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeCompositorStateMachine_MinorState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CriticalBeginMainFrameToActivateIsFast kCriticalBeginMainFrameToActivateIsFast() { return {}; }
+  void set_critical_begin_main_frame_to_activate_is_fast(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_CriticalBeginMainFrameToActivateIsFast::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_MainThreadMissedLastDeadline =
+    ::protozero::proto_utils::FieldMetadata<
+      34,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeCompositorStateMachine_MinorState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MainThreadMissedLastDeadline kMainThreadMissedLastDeadline() { return {}; }
+  void set_main_thread_missed_last_deadline(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_MainThreadMissedLastDeadline::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SkipNextBeginMainFrameToReduceLatency =
+    ::protozero::proto_utils::FieldMetadata<
+      35,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeCompositorStateMachine_MinorState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SkipNextBeginMainFrameToReduceLatency kSkipNextBeginMainFrameToReduceLatency() { return {}; }
+  void set_skip_next_begin_main_frame_to_reduce_latency(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_SkipNextBeginMainFrameToReduceLatency::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_VideoNeedsBeginFrames =
+    ::protozero::proto_utils::FieldMetadata<
+      36,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeCompositorStateMachine_MinorState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_VideoNeedsBeginFrames kVideoNeedsBeginFrames() { return {}; }
+  void set_video_needs_begin_frames(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_VideoNeedsBeginFrames::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DeferBeginMainFrame =
+    ::protozero::proto_utils::FieldMetadata<
+      37,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeCompositorStateMachine_MinorState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DeferBeginMainFrame kDeferBeginMainFrame() { return {}; }
+  void set_defer_begin_main_frame(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_DeferBeginMainFrame::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_LastCommitHadNoUpdates =
+    ::protozero::proto_utils::FieldMetadata<
+      38,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeCompositorStateMachine_MinorState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_LastCommitHadNoUpdates kLastCommitHadNoUpdates() { return {}; }
+  void set_last_commit_had_no_updates(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_LastCommitHadNoUpdates::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DidDrawInLastFrame =
+    ::protozero::proto_utils::FieldMetadata<
+      39,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeCompositorStateMachine_MinorState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DidDrawInLastFrame kDidDrawInLastFrame() { return {}; }
+  void set_did_draw_in_last_frame(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_DidDrawInLastFrame::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DidSubmitInLastFrame =
+    ::protozero::proto_utils::FieldMetadata<
+      40,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeCompositorStateMachine_MinorState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DidSubmitInLastFrame kDidSubmitInLastFrame() { return {}; }
+  void set_did_submit_in_last_frame(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_DidSubmitInLastFrame::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NeedsImplSideInvalidation =
+    ::protozero::proto_utils::FieldMetadata<
+      41,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeCompositorStateMachine_MinorState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NeedsImplSideInvalidation kNeedsImplSideInvalidation() { return {}; }
+  void set_needs_impl_side_invalidation(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_NeedsImplSideInvalidation::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CurrentPendingTreeIsImplSide =
+    ::protozero::proto_utils::FieldMetadata<
+      42,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeCompositorStateMachine_MinorState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CurrentPendingTreeIsImplSide kCurrentPendingTreeIsImplSide() { return {}; }
+  void set_current_pending_tree_is_impl_side(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_CurrentPendingTreeIsImplSide::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PreviousPendingTreeWasImplSide =
+    ::protozero::proto_utils::FieldMetadata<
+      43,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeCompositorStateMachine_MinorState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PreviousPendingTreeWasImplSide kPreviousPendingTreeWasImplSide() { return {}; }
+  void set_previous_pending_tree_was_impl_side(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_PreviousPendingTreeWasImplSide::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ProcessingAnimationWorkletsForActiveTree =
+    ::protozero::proto_utils::FieldMetadata<
+      44,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeCompositorStateMachine_MinorState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ProcessingAnimationWorkletsForActiveTree kProcessingAnimationWorkletsForActiveTree() { return {}; }
+  void set_processing_animation_worklets_for_active_tree(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_ProcessingAnimationWorkletsForActiveTree::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ProcessingAnimationWorkletsForPendingTree =
+    ::protozero::proto_utils::FieldMetadata<
+      45,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeCompositorStateMachine_MinorState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ProcessingAnimationWorkletsForPendingTree kProcessingAnimationWorkletsForPendingTree() { return {}; }
+  void set_processing_animation_worklets_for_pending_tree(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_ProcessingAnimationWorkletsForPendingTree::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ProcessingPaintWorkletsForPendingTree =
+    ::protozero::proto_utils::FieldMetadata<
+      46,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeCompositorStateMachine_MinorState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ProcessingPaintWorkletsForPendingTree kProcessingPaintWorkletsForPendingTree() { return {}; }
+  void set_processing_paint_worklets_for_pending_tree(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_ProcessingPaintWorkletsForPendingTree::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class ChromeCompositorStateMachine_MajorState_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  ChromeCompositorStateMachine_MajorState_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ChromeCompositorStateMachine_MajorState_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ChromeCompositorStateMachine_MajorState_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_next_action() const { return at<1>().valid(); }
+  int32_t next_action() const { return at<1>().as_int32(); }
+  bool has_begin_impl_frame_state() const { return at<2>().valid(); }
+  int32_t begin_impl_frame_state() const { return at<2>().as_int32(); }
+  bool has_begin_main_frame_state() const { return at<3>().valid(); }
+  int32_t begin_main_frame_state() const { return at<3>().as_int32(); }
+  bool has_layer_tree_frame_sink_state() const { return at<4>().valid(); }
+  int32_t layer_tree_frame_sink_state() const { return at<4>().as_int32(); }
+  bool has_forced_redraw_state() const { return at<5>().valid(); }
+  int32_t forced_redraw_state() const { return at<5>().as_int32(); }
+};
+
+class ChromeCompositorStateMachine_MajorState : public ::protozero::Message {
+ public:
+  using Decoder = ChromeCompositorStateMachine_MajorState_Decoder;
+  enum : int32_t {
+    kNextActionFieldNumber = 1,
+    kBeginImplFrameStateFieldNumber = 2,
+    kBeginMainFrameStateFieldNumber = 3,
+    kLayerTreeFrameSinkStateFieldNumber = 4,
+    kForcedRedrawStateFieldNumber = 5,
+  };
+  using BeginImplFrameState = ::perfetto::protos::pbzero::ChromeCompositorStateMachine_MajorState_BeginImplFrameState;
+  using BeginMainFrameState = ::perfetto::protos::pbzero::ChromeCompositorStateMachine_MajorState_BeginMainFrameState;
+  using LayerTreeFrameSinkState = ::perfetto::protos::pbzero::ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState;
+  using ForcedRedrawOnTimeoutState = ::perfetto::protos::pbzero::ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState;
+  static const BeginImplFrameState BEGIN_IMPL_FRAME_UNSPECIFIED = ChromeCompositorStateMachine_MajorState_BeginImplFrameState_BEGIN_IMPL_FRAME_UNSPECIFIED;
+  static const BeginImplFrameState BEGIN_IMPL_FRAME_IDLE = ChromeCompositorStateMachine_MajorState_BeginImplFrameState_BEGIN_IMPL_FRAME_IDLE;
+  static const BeginImplFrameState BEGIN_IMPL_FRAME_INSIDE_BEGIN_FRAME = ChromeCompositorStateMachine_MajorState_BeginImplFrameState_BEGIN_IMPL_FRAME_INSIDE_BEGIN_FRAME;
+  static const BeginImplFrameState BEGIN_IMPL_FRAME_INSIDE_DEADLINE = ChromeCompositorStateMachine_MajorState_BeginImplFrameState_BEGIN_IMPL_FRAME_INSIDE_DEADLINE;
+  static const BeginMainFrameState BEGIN_MAIN_FRAME_UNSPECIFIED = ChromeCompositorStateMachine_MajorState_BeginMainFrameState_BEGIN_MAIN_FRAME_UNSPECIFIED;
+  static const BeginMainFrameState BEGIN_MAIN_FRAME_IDLE = ChromeCompositorStateMachine_MajorState_BeginMainFrameState_BEGIN_MAIN_FRAME_IDLE;
+  static const BeginMainFrameState BEGIN_MAIN_FRAME_SENT = ChromeCompositorStateMachine_MajorState_BeginMainFrameState_BEGIN_MAIN_FRAME_SENT;
+  static const BeginMainFrameState BEGIN_MAIN_FRAME_READY_TO_COMMIT = ChromeCompositorStateMachine_MajorState_BeginMainFrameState_BEGIN_MAIN_FRAME_READY_TO_COMMIT;
+  static const LayerTreeFrameSinkState LAYER_TREE_FRAME_UNSPECIFIED = ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_UNSPECIFIED;
+  static const LayerTreeFrameSinkState LAYER_TREE_FRAME_NONE = ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_NONE;
+  static const LayerTreeFrameSinkState LAYER_TREE_FRAME_ACTIVE = ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_ACTIVE;
+  static const LayerTreeFrameSinkState LAYER_TREE_FRAME_CREATING = ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_CREATING;
+  static const LayerTreeFrameSinkState LAYER_TREE_FRAME_WAITING_FOR_FIRST_COMMIT = ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_WAITING_FOR_FIRST_COMMIT;
+  static const LayerTreeFrameSinkState LAYER_TREE_FRAME_WAITING_FOR_FIRST_ACTIVATION = ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_WAITING_FOR_FIRST_ACTIVATION;
+  static const ForcedRedrawOnTimeoutState FORCED_REDRAW_UNSPECIFIED = ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState_FORCED_REDRAW_UNSPECIFIED;
+  static const ForcedRedrawOnTimeoutState FORCED_REDRAW_IDLE = ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState_FORCED_REDRAW_IDLE;
+  static const ForcedRedrawOnTimeoutState FORCED_REDRAW_WAITING_FOR_COMMIT = ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState_FORCED_REDRAW_WAITING_FOR_COMMIT;
+  static const ForcedRedrawOnTimeoutState FORCED_REDRAW_WAITING_FOR_ACTIVATION = ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState_FORCED_REDRAW_WAITING_FOR_ACTIVATION;
+  static const ForcedRedrawOnTimeoutState FORCED_REDRAW_WAITING_FOR_DRAW = ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState_FORCED_REDRAW_WAITING_FOR_DRAW;
+
+  using FieldMetadata_NextAction =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::ChromeCompositorSchedulerAction,
+      ChromeCompositorStateMachine_MajorState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NextAction kNextAction() { return {}; }
+  void set_next_action(::perfetto::protos::pbzero::ChromeCompositorSchedulerAction value) {
+    static constexpr uint32_t field_id = FieldMetadata_NextAction::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BeginImplFrameState =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::ChromeCompositorStateMachine_MajorState_BeginImplFrameState,
+      ChromeCompositorStateMachine_MajorState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BeginImplFrameState kBeginImplFrameState() { return {}; }
+  void set_begin_impl_frame_state(::perfetto::protos::pbzero::ChromeCompositorStateMachine_MajorState_BeginImplFrameState value) {
+    static constexpr uint32_t field_id = FieldMetadata_BeginImplFrameState::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BeginMainFrameState =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::ChromeCompositorStateMachine_MajorState_BeginMainFrameState,
+      ChromeCompositorStateMachine_MajorState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BeginMainFrameState kBeginMainFrameState() { return {}; }
+  void set_begin_main_frame_state(::perfetto::protos::pbzero::ChromeCompositorStateMachine_MajorState_BeginMainFrameState value) {
+    static constexpr uint32_t field_id = FieldMetadata_BeginMainFrameState::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_LayerTreeFrameSinkState =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState,
+      ChromeCompositorStateMachine_MajorState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_LayerTreeFrameSinkState kLayerTreeFrameSinkState() { return {}; }
+  void set_layer_tree_frame_sink_state(::perfetto::protos::pbzero::ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState value) {
+    static constexpr uint32_t field_id = FieldMetadata_LayerTreeFrameSinkState::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ForcedRedrawState =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState,
+      ChromeCompositorStateMachine_MajorState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ForcedRedrawState kForcedRedrawState() { return {}; }
+  void set_forced_redraw_state(::perfetto::protos::pbzero::ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState value) {
+    static constexpr uint32_t field_id = FieldMetadata_ForcedRedrawState::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class ChromeCompositorSchedulerState_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/17, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  ChromeCompositorSchedulerState_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ChromeCompositorSchedulerState_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ChromeCompositorSchedulerState_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_state_machine() const { return at<1>().valid(); }
+  ::protozero::ConstBytes state_machine() const { return at<1>().as_bytes(); }
+  bool has_observing_begin_frame_source() const { return at<2>().valid(); }
+  bool observing_begin_frame_source() const { return at<2>().as_bool(); }
+  bool has_begin_impl_frame_deadline_task() const { return at<3>().valid(); }
+  bool begin_impl_frame_deadline_task() const { return at<3>().as_bool(); }
+  bool has_pending_begin_frame_task() const { return at<4>().valid(); }
+  bool pending_begin_frame_task() const { return at<4>().as_bool(); }
+  bool has_skipped_last_frame_missed_exceeded_deadline() const { return at<5>().valid(); }
+  bool skipped_last_frame_missed_exceeded_deadline() const { return at<5>().as_bool(); }
+  bool has_skipped_last_frame_to_reduce_latency() const { return at<6>().valid(); }
+  bool skipped_last_frame_to_reduce_latency() const { return at<6>().as_bool(); }
+  bool has_inside_action() const { return at<7>().valid(); }
+  int32_t inside_action() const { return at<7>().as_int32(); }
+  bool has_deadline_mode() const { return at<8>().valid(); }
+  int32_t deadline_mode() const { return at<8>().as_int32(); }
+  bool has_deadline_us() const { return at<9>().valid(); }
+  int64_t deadline_us() const { return at<9>().as_int64(); }
+  bool has_deadline_scheduled_at_us() const { return at<10>().valid(); }
+  int64_t deadline_scheduled_at_us() const { return at<10>().as_int64(); }
+  bool has_now_us() const { return at<11>().valid(); }
+  int64_t now_us() const { return at<11>().as_int64(); }
+  bool has_now_to_deadline_delta_us() const { return at<12>().valid(); }
+  int64_t now_to_deadline_delta_us() const { return at<12>().as_int64(); }
+  bool has_now_to_deadline_scheduled_at_delta_us() const { return at<13>().valid(); }
+  int64_t now_to_deadline_scheduled_at_delta_us() const { return at<13>().as_int64(); }
+  bool has_begin_impl_frame_args() const { return at<14>().valid(); }
+  ::protozero::ConstBytes begin_impl_frame_args() const { return at<14>().as_bytes(); }
+  bool has_begin_frame_observer_state() const { return at<15>().valid(); }
+  ::protozero::ConstBytes begin_frame_observer_state() const { return at<15>().as_bytes(); }
+  bool has_begin_frame_source_state() const { return at<16>().valid(); }
+  ::protozero::ConstBytes begin_frame_source_state() const { return at<16>().as_bytes(); }
+  bool has_compositor_timing_history() const { return at<17>().valid(); }
+  ::protozero::ConstBytes compositor_timing_history() const { return at<17>().as_bytes(); }
+};
+
+class ChromeCompositorSchedulerState : public ::protozero::Message {
+ public:
+  using Decoder = ChromeCompositorSchedulerState_Decoder;
+  enum : int32_t {
+    kStateMachineFieldNumber = 1,
+    kObservingBeginFrameSourceFieldNumber = 2,
+    kBeginImplFrameDeadlineTaskFieldNumber = 3,
+    kPendingBeginFrameTaskFieldNumber = 4,
+    kSkippedLastFrameMissedExceededDeadlineFieldNumber = 5,
+    kSkippedLastFrameToReduceLatencyFieldNumber = 6,
+    kInsideActionFieldNumber = 7,
+    kDeadlineModeFieldNumber = 8,
+    kDeadlineUsFieldNumber = 9,
+    kDeadlineScheduledAtUsFieldNumber = 10,
+    kNowUsFieldNumber = 11,
+    kNowToDeadlineDeltaUsFieldNumber = 12,
+    kNowToDeadlineScheduledAtDeltaUsFieldNumber = 13,
+    kBeginImplFrameArgsFieldNumber = 14,
+    kBeginFrameObserverStateFieldNumber = 15,
+    kBeginFrameSourceStateFieldNumber = 16,
+    kCompositorTimingHistoryFieldNumber = 17,
+  };
+  using BeginImplFrameDeadlineMode = ::perfetto::protos::pbzero::ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode;
+  static const BeginImplFrameDeadlineMode DEADLINE_MODE_UNSPECIFIED = ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_UNSPECIFIED;
+  static const BeginImplFrameDeadlineMode DEADLINE_MODE_NONE = ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_NONE;
+  static const BeginImplFrameDeadlineMode DEADLINE_MODE_IMMEDIATE = ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_IMMEDIATE;
+  static const BeginImplFrameDeadlineMode DEADLINE_MODE_REGULAR = ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_REGULAR;
+  static const BeginImplFrameDeadlineMode DEADLINE_MODE_LATE = ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_LATE;
+  static const BeginImplFrameDeadlineMode DEADLINE_MODE_BLOCKED = ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_BLOCKED;
+
+  using FieldMetadata_StateMachine =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ChromeCompositorStateMachine,
+      ChromeCompositorSchedulerState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_StateMachine kStateMachine() { return {}; }
+  template <typename T = ChromeCompositorStateMachine> T* set_state_machine() {
+    return BeginNestedMessage<T>(1);
+  }
+
+
+  using FieldMetadata_ObservingBeginFrameSource =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeCompositorSchedulerState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ObservingBeginFrameSource kObservingBeginFrameSource() { return {}; }
+  void set_observing_begin_frame_source(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_ObservingBeginFrameSource::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BeginImplFrameDeadlineTask =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeCompositorSchedulerState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BeginImplFrameDeadlineTask kBeginImplFrameDeadlineTask() { return {}; }
+  void set_begin_impl_frame_deadline_task(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_BeginImplFrameDeadlineTask::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PendingBeginFrameTask =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeCompositorSchedulerState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PendingBeginFrameTask kPendingBeginFrameTask() { return {}; }
+  void set_pending_begin_frame_task(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_PendingBeginFrameTask::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SkippedLastFrameMissedExceededDeadline =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeCompositorSchedulerState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SkippedLastFrameMissedExceededDeadline kSkippedLastFrameMissedExceededDeadline() { return {}; }
+  void set_skipped_last_frame_missed_exceeded_deadline(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_SkippedLastFrameMissedExceededDeadline::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SkippedLastFrameToReduceLatency =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeCompositorSchedulerState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SkippedLastFrameToReduceLatency kSkippedLastFrameToReduceLatency() { return {}; }
+  void set_skipped_last_frame_to_reduce_latency(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_SkippedLastFrameToReduceLatency::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_InsideAction =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::ChromeCompositorSchedulerAction,
+      ChromeCompositorSchedulerState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_InsideAction kInsideAction() { return {}; }
+  void set_inside_action(::perfetto::protos::pbzero::ChromeCompositorSchedulerAction value) {
+    static constexpr uint32_t field_id = FieldMetadata_InsideAction::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DeadlineMode =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode,
+      ChromeCompositorSchedulerState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DeadlineMode kDeadlineMode() { return {}; }
+  void set_deadline_mode(::perfetto::protos::pbzero::ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode value) {
+    static constexpr uint32_t field_id = FieldMetadata_DeadlineMode::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DeadlineUs =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      ChromeCompositorSchedulerState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DeadlineUs kDeadlineUs() { return {}; }
+  void set_deadline_us(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DeadlineUs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DeadlineScheduledAtUs =
+    ::protozero::proto_utils::FieldMetadata<
+      10,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      ChromeCompositorSchedulerState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DeadlineScheduledAtUs kDeadlineScheduledAtUs() { return {}; }
+  void set_deadline_scheduled_at_us(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DeadlineScheduledAtUs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NowUs =
+    ::protozero::proto_utils::FieldMetadata<
+      11,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      ChromeCompositorSchedulerState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NowUs kNowUs() { return {}; }
+  void set_now_us(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NowUs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NowToDeadlineDeltaUs =
+    ::protozero::proto_utils::FieldMetadata<
+      12,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      ChromeCompositorSchedulerState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NowToDeadlineDeltaUs kNowToDeadlineDeltaUs() { return {}; }
+  void set_now_to_deadline_delta_us(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NowToDeadlineDeltaUs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NowToDeadlineScheduledAtDeltaUs =
+    ::protozero::proto_utils::FieldMetadata<
+      13,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      ChromeCompositorSchedulerState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NowToDeadlineScheduledAtDeltaUs kNowToDeadlineScheduledAtDeltaUs() { return {}; }
+  void set_now_to_deadline_scheduled_at_delta_us(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NowToDeadlineScheduledAtDeltaUs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BeginImplFrameArgs =
+    ::protozero::proto_utils::FieldMetadata<
+      14,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      BeginImplFrameArgs,
+      ChromeCompositorSchedulerState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BeginImplFrameArgs kBeginImplFrameArgs() { return {}; }
+  template <typename T = BeginImplFrameArgs> T* set_begin_impl_frame_args() {
+    return BeginNestedMessage<T>(14);
+  }
+
+
+  using FieldMetadata_BeginFrameObserverState =
+    ::protozero::proto_utils::FieldMetadata<
+      15,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      BeginFrameObserverState,
+      ChromeCompositorSchedulerState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BeginFrameObserverState kBeginFrameObserverState() { return {}; }
+  template <typename T = BeginFrameObserverState> T* set_begin_frame_observer_state() {
+    return BeginNestedMessage<T>(15);
+  }
+
+
+  using FieldMetadata_BeginFrameSourceState =
+    ::protozero::proto_utils::FieldMetadata<
+      16,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      BeginFrameSourceState,
+      ChromeCompositorSchedulerState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BeginFrameSourceState kBeginFrameSourceState() { return {}; }
+  template <typename T = BeginFrameSourceState> T* set_begin_frame_source_state() {
+    return BeginNestedMessage<T>(16);
+  }
+
+
+  using FieldMetadata_CompositorTimingHistory =
+    ::protozero::proto_utils::FieldMetadata<
+      17,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      CompositorTimingHistory,
+      ChromeCompositorSchedulerState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CompositorTimingHistory kCompositorTimingHistory() { return {}; }
+  template <typename T = CompositorTimingHistory> T* set_compositor_timing_history() {
+    return BeginNestedMessage<T>(17);
+  }
+
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_content_settings_event_info.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_CONTENT_SETTINGS_EVENT_INFO_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_CONTENT_SETTINGS_EVENT_INFO_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class ChromeContentSettingsEventInfo_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  ChromeContentSettingsEventInfo_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ChromeContentSettingsEventInfo_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ChromeContentSettingsEventInfo_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_number_of_exceptions() const { return at<1>().valid(); }
+  uint32_t number_of_exceptions() const { return at<1>().as_uint32(); }
+};
+
+class ChromeContentSettingsEventInfo : public ::protozero::Message {
+ public:
+  using Decoder = ChromeContentSettingsEventInfo_Decoder;
+  enum : int32_t {
+    kNumberOfExceptionsFieldNumber = 1,
+  };
+
+  using FieldMetadata_NumberOfExceptions =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      ChromeContentSettingsEventInfo>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NumberOfExceptions kNumberOfExceptions() { return {}; }
+  void set_number_of_exceptions(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NumberOfExceptions::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_frame_reporter.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_FRAME_REPORTER_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_FRAME_REPORTER_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+enum ChromeFrameReporter_FrameDropReason : int32_t;
+enum ChromeFrameReporter_ScrollState : int32_t;
+enum ChromeFrameReporter_State : int32_t;
+
+enum ChromeFrameReporter_State : int32_t {
+  ChromeFrameReporter_State_STATE_NO_UPDATE_DESIRED = 0,
+  ChromeFrameReporter_State_STATE_PRESENTED_ALL = 1,
+  ChromeFrameReporter_State_STATE_PRESENTED_PARTIAL = 2,
+  ChromeFrameReporter_State_STATE_DROPPED = 3,
+};
+
+const ChromeFrameReporter_State ChromeFrameReporter_State_MIN = ChromeFrameReporter_State_STATE_NO_UPDATE_DESIRED;
+const ChromeFrameReporter_State ChromeFrameReporter_State_MAX = ChromeFrameReporter_State_STATE_DROPPED;
+
+enum ChromeFrameReporter_FrameDropReason : int32_t {
+  ChromeFrameReporter_FrameDropReason_REASON_UNSPECIFIED = 0,
+  ChromeFrameReporter_FrameDropReason_REASON_DISPLAY_COMPOSITOR = 1,
+  ChromeFrameReporter_FrameDropReason_REASON_MAIN_THREAD = 2,
+  ChromeFrameReporter_FrameDropReason_REASON_CLIENT_COMPOSITOR = 3,
+};
+
+const ChromeFrameReporter_FrameDropReason ChromeFrameReporter_FrameDropReason_MIN = ChromeFrameReporter_FrameDropReason_REASON_UNSPECIFIED;
+const ChromeFrameReporter_FrameDropReason ChromeFrameReporter_FrameDropReason_MAX = ChromeFrameReporter_FrameDropReason_REASON_CLIENT_COMPOSITOR;
+
+enum ChromeFrameReporter_ScrollState : int32_t {
+  ChromeFrameReporter_ScrollState_SCROLL_NONE = 0,
+  ChromeFrameReporter_ScrollState_SCROLL_MAIN_THREAD = 1,
+  ChromeFrameReporter_ScrollState_SCROLL_COMPOSITOR_THREAD = 2,
+  ChromeFrameReporter_ScrollState_SCROLL_UNKNOWN = 3,
+};
+
+const ChromeFrameReporter_ScrollState ChromeFrameReporter_ScrollState_MIN = ChromeFrameReporter_ScrollState_SCROLL_NONE;
+const ChromeFrameReporter_ScrollState ChromeFrameReporter_ScrollState_MAX = ChromeFrameReporter_ScrollState_SCROLL_UNKNOWN;
+
+class ChromeFrameReporter_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/11, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  ChromeFrameReporter_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ChromeFrameReporter_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ChromeFrameReporter_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_state() const { return at<1>().valid(); }
+  int32_t state() const { return at<1>().as_int32(); }
+  bool has_reason() const { return at<2>().valid(); }
+  int32_t reason() const { return at<2>().as_int32(); }
+  bool has_frame_source() const { return at<3>().valid(); }
+  uint64_t frame_source() const { return at<3>().as_uint64(); }
+  bool has_frame_sequence() const { return at<4>().valid(); }
+  uint64_t frame_sequence() const { return at<4>().as_uint64(); }
+  bool has_affects_smoothness() const { return at<5>().valid(); }
+  bool affects_smoothness() const { return at<5>().as_bool(); }
+  bool has_scroll_state() const { return at<6>().valid(); }
+  int32_t scroll_state() const { return at<6>().as_int32(); }
+  bool has_has_main_animation() const { return at<7>().valid(); }
+  bool has_main_animation() const { return at<7>().as_bool(); }
+  bool has_has_compositor_animation() const { return at<8>().valid(); }
+  bool has_compositor_animation() const { return at<8>().as_bool(); }
+  bool has_has_smooth_input_main() const { return at<9>().valid(); }
+  bool has_smooth_input_main() const { return at<9>().as_bool(); }
+  bool has_has_missing_content() const { return at<10>().valid(); }
+  bool has_missing_content() const { return at<10>().as_bool(); }
+  bool has_layer_tree_host_id() const { return at<11>().valid(); }
+  uint64_t layer_tree_host_id() const { return at<11>().as_uint64(); }
+};
+
+class ChromeFrameReporter : public ::protozero::Message {
+ public:
+  using Decoder = ChromeFrameReporter_Decoder;
+  enum : int32_t {
+    kStateFieldNumber = 1,
+    kReasonFieldNumber = 2,
+    kFrameSourceFieldNumber = 3,
+    kFrameSequenceFieldNumber = 4,
+    kAffectsSmoothnessFieldNumber = 5,
+    kScrollStateFieldNumber = 6,
+    kHasMainAnimationFieldNumber = 7,
+    kHasCompositorAnimationFieldNumber = 8,
+    kHasSmoothInputMainFieldNumber = 9,
+    kHasMissingContentFieldNumber = 10,
+    kLayerTreeHostIdFieldNumber = 11,
+  };
+  using State = ::perfetto::protos::pbzero::ChromeFrameReporter_State;
+  using FrameDropReason = ::perfetto::protos::pbzero::ChromeFrameReporter_FrameDropReason;
+  using ScrollState = ::perfetto::protos::pbzero::ChromeFrameReporter_ScrollState;
+  static const State STATE_NO_UPDATE_DESIRED = ChromeFrameReporter_State_STATE_NO_UPDATE_DESIRED;
+  static const State STATE_PRESENTED_ALL = ChromeFrameReporter_State_STATE_PRESENTED_ALL;
+  static const State STATE_PRESENTED_PARTIAL = ChromeFrameReporter_State_STATE_PRESENTED_PARTIAL;
+  static const State STATE_DROPPED = ChromeFrameReporter_State_STATE_DROPPED;
+  static const FrameDropReason REASON_UNSPECIFIED = ChromeFrameReporter_FrameDropReason_REASON_UNSPECIFIED;
+  static const FrameDropReason REASON_DISPLAY_COMPOSITOR = ChromeFrameReporter_FrameDropReason_REASON_DISPLAY_COMPOSITOR;
+  static const FrameDropReason REASON_MAIN_THREAD = ChromeFrameReporter_FrameDropReason_REASON_MAIN_THREAD;
+  static const FrameDropReason REASON_CLIENT_COMPOSITOR = ChromeFrameReporter_FrameDropReason_REASON_CLIENT_COMPOSITOR;
+  static const ScrollState SCROLL_NONE = ChromeFrameReporter_ScrollState_SCROLL_NONE;
+  static const ScrollState SCROLL_MAIN_THREAD = ChromeFrameReporter_ScrollState_SCROLL_MAIN_THREAD;
+  static const ScrollState SCROLL_COMPOSITOR_THREAD = ChromeFrameReporter_ScrollState_SCROLL_COMPOSITOR_THREAD;
+  static const ScrollState SCROLL_UNKNOWN = ChromeFrameReporter_ScrollState_SCROLL_UNKNOWN;
+
+  using FieldMetadata_State =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::ChromeFrameReporter_State,
+      ChromeFrameReporter>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_State kState() { return {}; }
+  void set_state(::perfetto::protos::pbzero::ChromeFrameReporter_State value) {
+    static constexpr uint32_t field_id = FieldMetadata_State::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Reason =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::ChromeFrameReporter_FrameDropReason,
+      ChromeFrameReporter>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Reason kReason() { return {}; }
+  void set_reason(::perfetto::protos::pbzero::ChromeFrameReporter_FrameDropReason value) {
+    static constexpr uint32_t field_id = FieldMetadata_Reason::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FrameSource =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ChromeFrameReporter>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FrameSource kFrameSource() { return {}; }
+  void set_frame_source(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_FrameSource::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FrameSequence =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ChromeFrameReporter>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FrameSequence kFrameSequence() { return {}; }
+  void set_frame_sequence(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_FrameSequence::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_AffectsSmoothness =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeFrameReporter>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AffectsSmoothness kAffectsSmoothness() { return {}; }
+  void set_affects_smoothness(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_AffectsSmoothness::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ScrollState =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::ChromeFrameReporter_ScrollState,
+      ChromeFrameReporter>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ScrollState kScrollState() { return {}; }
+  void set_scroll_state(::perfetto::protos::pbzero::ChromeFrameReporter_ScrollState value) {
+    static constexpr uint32_t field_id = FieldMetadata_ScrollState::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_HasMainAnimation =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeFrameReporter>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_HasMainAnimation kHasMainAnimation() { return {}; }
+  void set_has_main_animation(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_HasMainAnimation::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_HasCompositorAnimation =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeFrameReporter>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_HasCompositorAnimation kHasCompositorAnimation() { return {}; }
+  void set_has_compositor_animation(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_HasCompositorAnimation::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_HasSmoothInputMain =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeFrameReporter>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_HasSmoothInputMain kHasSmoothInputMain() { return {}; }
+  void set_has_smooth_input_main(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_HasSmoothInputMain::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_HasMissingContent =
+    ::protozero::proto_utils::FieldMetadata<
+      10,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeFrameReporter>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_HasMissingContent kHasMissingContent() { return {}; }
+  void set_has_missing_content(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_HasMissingContent::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_LayerTreeHostId =
+    ::protozero::proto_utils::FieldMetadata<
+      11,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ChromeFrameReporter>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_LayerTreeHostId kLayerTreeHostId() { return {}; }
+  void set_layer_tree_host_id(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_LayerTreeHostId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_histogram_sample.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_HISTOGRAM_SAMPLE_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_HISTOGRAM_SAMPLE_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class ChromeHistogramSample_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  ChromeHistogramSample_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ChromeHistogramSample_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ChromeHistogramSample_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_name_hash() const { return at<1>().valid(); }
+  uint64_t name_hash() const { return at<1>().as_uint64(); }
+  bool has_name() const { return at<2>().valid(); }
+  ::protozero::ConstChars name() const { return at<2>().as_string(); }
+  bool has_sample() const { return at<3>().valid(); }
+  int64_t sample() const { return at<3>().as_int64(); }
+  bool has_name_iid() const { return at<4>().valid(); }
+  uint64_t name_iid() const { return at<4>().as_uint64(); }
+};
+
+class ChromeHistogramSample : public ::protozero::Message {
+ public:
+  using Decoder = ChromeHistogramSample_Decoder;
+  enum : int32_t {
+    kNameHashFieldNumber = 1,
+    kNameFieldNumber = 2,
+    kSampleFieldNumber = 3,
+    kNameIidFieldNumber = 4,
+  };
+
+  using FieldMetadata_NameHash =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ChromeHistogramSample>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NameHash kNameHash() { return {}; }
+  void set_name_hash(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NameHash::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ChromeHistogramSample>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Sample =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      ChromeHistogramSample>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Sample kSample() { return {}; }
+  void set_sample(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Sample::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NameIid =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ChromeHistogramSample>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NameIid kNameIid() { return {}; }
+  void set_name_iid(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NameIid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class HistogramName_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  HistogramName_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit HistogramName_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit HistogramName_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_iid() const { return at<1>().valid(); }
+  uint64_t iid() const { return at<1>().as_uint64(); }
+  bool has_name() const { return at<2>().valid(); }
+  ::protozero::ConstChars name() const { return at<2>().as_string(); }
+};
+
+class HistogramName : public ::protozero::Message {
+ public:
+  using Decoder = HistogramName_Decoder;
+  enum : int32_t {
+    kIidFieldNumber = 1,
+    kNameFieldNumber = 2,
+  };
+
+  using FieldMetadata_Iid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      HistogramName>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Iid kIid() { return {}; }
+  void set_iid(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Iid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      HistogramName>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_keyed_service.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_KEYED_SERVICE_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_KEYED_SERVICE_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class ChromeKeyedService_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  ChromeKeyedService_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ChromeKeyedService_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ChromeKeyedService_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars name() const { return at<1>().as_string(); }
+};
+
+class ChromeKeyedService : public ::protozero::Message {
+ public:
+  using Decoder = ChromeKeyedService_Decoder;
+  enum : int32_t {
+    kNameFieldNumber = 1,
+  };
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ChromeKeyedService>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_latency_info.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_LATENCY_INFO_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_LATENCY_INFO_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class ChromeLatencyInfo_ComponentInfo;
+enum ChromeLatencyInfo_LatencyComponentType : int32_t;
+enum ChromeLatencyInfo_Step : int32_t;
+
+enum ChromeLatencyInfo_Step : int32_t {
+  ChromeLatencyInfo_Step_STEP_UNSPECIFIED = 0,
+  ChromeLatencyInfo_Step_STEP_SEND_INPUT_EVENT_UI = 3,
+  ChromeLatencyInfo_Step_STEP_HANDLE_INPUT_EVENT_IMPL = 5,
+  ChromeLatencyInfo_Step_STEP_DID_HANDLE_INPUT_AND_OVERSCROLL = 8,
+  ChromeLatencyInfo_Step_STEP_HANDLE_INPUT_EVENT_MAIN = 4,
+  ChromeLatencyInfo_Step_STEP_MAIN_THREAD_SCROLL_UPDATE = 2,
+  ChromeLatencyInfo_Step_STEP_HANDLE_INPUT_EVENT_MAIN_COMMIT = 1,
+  ChromeLatencyInfo_Step_STEP_HANDLED_INPUT_EVENT_MAIN_OR_IMPL = 9,
+  ChromeLatencyInfo_Step_STEP_HANDLED_INPUT_EVENT_IMPL = 10,
+  ChromeLatencyInfo_Step_STEP_SWAP_BUFFERS = 6,
+  ChromeLatencyInfo_Step_STEP_DRAW_AND_SWAP = 7,
+  ChromeLatencyInfo_Step_STEP_FINISHED_SWAP_BUFFERS = 11,
+};
+
+const ChromeLatencyInfo_Step ChromeLatencyInfo_Step_MIN = ChromeLatencyInfo_Step_STEP_UNSPECIFIED;
+const ChromeLatencyInfo_Step ChromeLatencyInfo_Step_MAX = ChromeLatencyInfo_Step_STEP_FINISHED_SWAP_BUFFERS;
+
+enum ChromeLatencyInfo_LatencyComponentType : int32_t {
+  ChromeLatencyInfo_LatencyComponentType_COMPONENT_UNSPECIFIED = 0,
+  ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_BEGIN_RWH = 1,
+  ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_SCROLL_UPDATE_ORIGINAL = 2,
+  ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_FIRST_SCROLL_UPDATE_ORIGINAL = 3,
+  ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_ORIGINAL = 4,
+  ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_UI = 5,
+  ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_RENDERER_MAIN = 6,
+  ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_RENDERING_SCHEDULED_MAIN = 7,
+  ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_RENDERING_SCHEDULED_IMPL = 8,
+  ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_SCROLL_UPDATE_LAST_EVENT = 9,
+  ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_ACK_RWH = 10,
+  ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_RENDERER_SWAP = 11,
+  ChromeLatencyInfo_LatencyComponentType_COMPONENT_DISPLAY_COMPOSITOR_RECEIVED_FRAME = 12,
+  ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_GPU_SWAP_BUFFER = 13,
+  ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_FRAME_SWAP = 14,
+};
+
+const ChromeLatencyInfo_LatencyComponentType ChromeLatencyInfo_LatencyComponentType_MIN = ChromeLatencyInfo_LatencyComponentType_COMPONENT_UNSPECIFIED;
+const ChromeLatencyInfo_LatencyComponentType ChromeLatencyInfo_LatencyComponentType_MAX = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_FRAME_SWAP;
+
+class ChromeLatencyInfo_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/6, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  ChromeLatencyInfo_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ChromeLatencyInfo_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ChromeLatencyInfo_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_trace_id() const { return at<1>().valid(); }
+  int64_t trace_id() const { return at<1>().as_int64(); }
+  bool has_step() const { return at<2>().valid(); }
+  int32_t step() const { return at<2>().as_int32(); }
+  bool has_frame_tree_node_id() const { return at<3>().valid(); }
+  int32_t frame_tree_node_id() const { return at<3>().as_int32(); }
+  bool has_component_info() const { return at<4>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> component_info() const { return GetRepeated<::protozero::ConstBytes>(4); }
+  bool has_is_coalesced() const { return at<5>().valid(); }
+  bool is_coalesced() const { return at<5>().as_bool(); }
+  bool has_gesture_scroll_id() const { return at<6>().valid(); }
+  int64_t gesture_scroll_id() const { return at<6>().as_int64(); }
+};
+
+class ChromeLatencyInfo : public ::protozero::Message {
+ public:
+  using Decoder = ChromeLatencyInfo_Decoder;
+  enum : int32_t {
+    kTraceIdFieldNumber = 1,
+    kStepFieldNumber = 2,
+    kFrameTreeNodeIdFieldNumber = 3,
+    kComponentInfoFieldNumber = 4,
+    kIsCoalescedFieldNumber = 5,
+    kGestureScrollIdFieldNumber = 6,
+  };
+  using ComponentInfo = ::perfetto::protos::pbzero::ChromeLatencyInfo_ComponentInfo;
+  using Step = ::perfetto::protos::pbzero::ChromeLatencyInfo_Step;
+  using LatencyComponentType = ::perfetto::protos::pbzero::ChromeLatencyInfo_LatencyComponentType;
+  static const Step STEP_UNSPECIFIED = ChromeLatencyInfo_Step_STEP_UNSPECIFIED;
+  static const Step STEP_SEND_INPUT_EVENT_UI = ChromeLatencyInfo_Step_STEP_SEND_INPUT_EVENT_UI;
+  static const Step STEP_HANDLE_INPUT_EVENT_IMPL = ChromeLatencyInfo_Step_STEP_HANDLE_INPUT_EVENT_IMPL;
+  static const Step STEP_DID_HANDLE_INPUT_AND_OVERSCROLL = ChromeLatencyInfo_Step_STEP_DID_HANDLE_INPUT_AND_OVERSCROLL;
+  static const Step STEP_HANDLE_INPUT_EVENT_MAIN = ChromeLatencyInfo_Step_STEP_HANDLE_INPUT_EVENT_MAIN;
+  static const Step STEP_MAIN_THREAD_SCROLL_UPDATE = ChromeLatencyInfo_Step_STEP_MAIN_THREAD_SCROLL_UPDATE;
+  static const Step STEP_HANDLE_INPUT_EVENT_MAIN_COMMIT = ChromeLatencyInfo_Step_STEP_HANDLE_INPUT_EVENT_MAIN_COMMIT;
+  static const Step STEP_HANDLED_INPUT_EVENT_MAIN_OR_IMPL = ChromeLatencyInfo_Step_STEP_HANDLED_INPUT_EVENT_MAIN_OR_IMPL;
+  static const Step STEP_HANDLED_INPUT_EVENT_IMPL = ChromeLatencyInfo_Step_STEP_HANDLED_INPUT_EVENT_IMPL;
+  static const Step STEP_SWAP_BUFFERS = ChromeLatencyInfo_Step_STEP_SWAP_BUFFERS;
+  static const Step STEP_DRAW_AND_SWAP = ChromeLatencyInfo_Step_STEP_DRAW_AND_SWAP;
+  static const Step STEP_FINISHED_SWAP_BUFFERS = ChromeLatencyInfo_Step_STEP_FINISHED_SWAP_BUFFERS;
+  static const LatencyComponentType COMPONENT_UNSPECIFIED = ChromeLatencyInfo_LatencyComponentType_COMPONENT_UNSPECIFIED;
+  static const LatencyComponentType COMPONENT_INPUT_EVENT_LATENCY_BEGIN_RWH = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_BEGIN_RWH;
+  static const LatencyComponentType COMPONENT_INPUT_EVENT_LATENCY_SCROLL_UPDATE_ORIGINAL = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_SCROLL_UPDATE_ORIGINAL;
+  static const LatencyComponentType COMPONENT_INPUT_EVENT_LATENCY_FIRST_SCROLL_UPDATE_ORIGINAL = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_FIRST_SCROLL_UPDATE_ORIGINAL;
+  static const LatencyComponentType COMPONENT_INPUT_EVENT_LATENCY_ORIGINAL = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_ORIGINAL;
+  static const LatencyComponentType COMPONENT_INPUT_EVENT_LATENCY_UI = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_UI;
+  static const LatencyComponentType COMPONENT_INPUT_EVENT_LATENCY_RENDERER_MAIN = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_RENDERER_MAIN;
+  static const LatencyComponentType COMPONENT_INPUT_EVENT_LATENCY_RENDERING_SCHEDULED_MAIN = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_RENDERING_SCHEDULED_MAIN;
+  static const LatencyComponentType COMPONENT_INPUT_EVENT_LATENCY_RENDERING_SCHEDULED_IMPL = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_RENDERING_SCHEDULED_IMPL;
+  static const LatencyComponentType COMPONENT_INPUT_EVENT_LATENCY_SCROLL_UPDATE_LAST_EVENT = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_SCROLL_UPDATE_LAST_EVENT;
+  static const LatencyComponentType COMPONENT_INPUT_EVENT_LATENCY_ACK_RWH = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_ACK_RWH;
+  static const LatencyComponentType COMPONENT_INPUT_EVENT_LATENCY_RENDERER_SWAP = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_RENDERER_SWAP;
+  static const LatencyComponentType COMPONENT_DISPLAY_COMPOSITOR_RECEIVED_FRAME = ChromeLatencyInfo_LatencyComponentType_COMPONENT_DISPLAY_COMPOSITOR_RECEIVED_FRAME;
+  static const LatencyComponentType COMPONENT_INPUT_EVENT_GPU_SWAP_BUFFER = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_GPU_SWAP_BUFFER;
+  static const LatencyComponentType COMPONENT_INPUT_EVENT_LATENCY_FRAME_SWAP = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_FRAME_SWAP;
+
+  using FieldMetadata_TraceId =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      ChromeLatencyInfo>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TraceId kTraceId() { return {}; }
+  void set_trace_id(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TraceId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Step =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::ChromeLatencyInfo_Step,
+      ChromeLatencyInfo>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Step kStep() { return {}; }
+  void set_step(::perfetto::protos::pbzero::ChromeLatencyInfo_Step value) {
+    static constexpr uint32_t field_id = FieldMetadata_Step::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FrameTreeNodeId =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      ChromeLatencyInfo>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FrameTreeNodeId kFrameTreeNodeId() { return {}; }
+  void set_frame_tree_node_id(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_FrameTreeNodeId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ComponentInfo =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ChromeLatencyInfo_ComponentInfo,
+      ChromeLatencyInfo>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ComponentInfo kComponentInfo() { return {}; }
+  template <typename T = ChromeLatencyInfo_ComponentInfo> T* add_component_info() {
+    return BeginNestedMessage<T>(4);
+  }
+
+
+  using FieldMetadata_IsCoalesced =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeLatencyInfo>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IsCoalesced kIsCoalesced() { return {}; }
+  void set_is_coalesced(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_IsCoalesced::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_GestureScrollId =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      ChromeLatencyInfo>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_GestureScrollId kGestureScrollId() { return {}; }
+  void set_gesture_scroll_id(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_GestureScrollId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class ChromeLatencyInfo_ComponentInfo_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  ChromeLatencyInfo_ComponentInfo_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ChromeLatencyInfo_ComponentInfo_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ChromeLatencyInfo_ComponentInfo_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_component_type() const { return at<1>().valid(); }
+  int32_t component_type() const { return at<1>().as_int32(); }
+  bool has_time_us() const { return at<2>().valid(); }
+  uint64_t time_us() const { return at<2>().as_uint64(); }
+};
+
+class ChromeLatencyInfo_ComponentInfo : public ::protozero::Message {
+ public:
+  using Decoder = ChromeLatencyInfo_ComponentInfo_Decoder;
+  enum : int32_t {
+    kComponentTypeFieldNumber = 1,
+    kTimeUsFieldNumber = 2,
+  };
+
+  using FieldMetadata_ComponentType =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::ChromeLatencyInfo_LatencyComponentType,
+      ChromeLatencyInfo_ComponentInfo>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ComponentType kComponentType() { return {}; }
+  void set_component_type(::perfetto::protos::pbzero::ChromeLatencyInfo_LatencyComponentType value) {
+    static constexpr uint32_t field_id = FieldMetadata_ComponentType::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TimeUs =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ChromeLatencyInfo_ComponentInfo>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TimeUs kTimeUs() { return {}; }
+  void set_time_us(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TimeUs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_legacy_ipc.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_LEGACY_IPC_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_LEGACY_IPC_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+enum ChromeLegacyIpc_MessageClass : int32_t;
+
+enum ChromeLegacyIpc_MessageClass : int32_t {
+  ChromeLegacyIpc_MessageClass_CLASS_UNSPECIFIED = 0,
+  ChromeLegacyIpc_MessageClass_CLASS_AUTOMATION = 1,
+  ChromeLegacyIpc_MessageClass_CLASS_FRAME = 2,
+  ChromeLegacyIpc_MessageClass_CLASS_PAGE = 3,
+  ChromeLegacyIpc_MessageClass_CLASS_VIEW = 4,
+  ChromeLegacyIpc_MessageClass_CLASS_WIDGET = 5,
+  ChromeLegacyIpc_MessageClass_CLASS_INPUT = 6,
+  ChromeLegacyIpc_MessageClass_CLASS_TEST = 7,
+  ChromeLegacyIpc_MessageClass_CLASS_WORKER = 8,
+  ChromeLegacyIpc_MessageClass_CLASS_NACL = 9,
+  ChromeLegacyIpc_MessageClass_CLASS_GPU_CHANNEL = 10,
+  ChromeLegacyIpc_MessageClass_CLASS_MEDIA = 11,
+  ChromeLegacyIpc_MessageClass_CLASS_PPAPI = 12,
+  ChromeLegacyIpc_MessageClass_CLASS_CHROME = 13,
+  ChromeLegacyIpc_MessageClass_CLASS_DRAG = 14,
+  ChromeLegacyIpc_MessageClass_CLASS_PRINT = 15,
+  ChromeLegacyIpc_MessageClass_CLASS_EXTENSION = 16,
+  ChromeLegacyIpc_MessageClass_CLASS_TEXT_INPUT_CLIENT = 17,
+  ChromeLegacyIpc_MessageClass_CLASS_BLINK_TEST = 18,
+  ChromeLegacyIpc_MessageClass_CLASS_ACCESSIBILITY = 19,
+  ChromeLegacyIpc_MessageClass_CLASS_PRERENDER = 20,
+  ChromeLegacyIpc_MessageClass_CLASS_CHROMOTING = 21,
+  ChromeLegacyIpc_MessageClass_CLASS_BROWSER_PLUGIN = 22,
+  ChromeLegacyIpc_MessageClass_CLASS_ANDROID_WEB_VIEW = 23,
+  ChromeLegacyIpc_MessageClass_CLASS_NACL_HOST = 24,
+  ChromeLegacyIpc_MessageClass_CLASS_ENCRYPTED_MEDIA = 25,
+  ChromeLegacyIpc_MessageClass_CLASS_CAST = 26,
+  ChromeLegacyIpc_MessageClass_CLASS_GIN_JAVA_BRIDGE = 27,
+  ChromeLegacyIpc_MessageClass_CLASS_CHROME_UTILITY_PRINTING = 28,
+  ChromeLegacyIpc_MessageClass_CLASS_OZONE_GPU = 29,
+  ChromeLegacyIpc_MessageClass_CLASS_WEB_TEST = 30,
+  ChromeLegacyIpc_MessageClass_CLASS_NETWORK_HINTS = 31,
+  ChromeLegacyIpc_MessageClass_CLASS_EXTENSIONS_GUEST_VIEW = 32,
+  ChromeLegacyIpc_MessageClass_CLASS_GUEST_VIEW = 33,
+  ChromeLegacyIpc_MessageClass_CLASS_MEDIA_PLAYER_DELEGATE = 34,
+  ChromeLegacyIpc_MessageClass_CLASS_EXTENSION_WORKER = 35,
+  ChromeLegacyIpc_MessageClass_CLASS_SUBRESOURCE_FILTER = 36,
+  ChromeLegacyIpc_MessageClass_CLASS_UNFREEZABLE_FRAME = 37,
+};
+
+const ChromeLegacyIpc_MessageClass ChromeLegacyIpc_MessageClass_MIN = ChromeLegacyIpc_MessageClass_CLASS_UNSPECIFIED;
+const ChromeLegacyIpc_MessageClass ChromeLegacyIpc_MessageClass_MAX = ChromeLegacyIpc_MessageClass_CLASS_UNFREEZABLE_FRAME;
+
+class ChromeLegacyIpc_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  ChromeLegacyIpc_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ChromeLegacyIpc_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ChromeLegacyIpc_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_message_class() const { return at<1>().valid(); }
+  int32_t message_class() const { return at<1>().as_int32(); }
+  bool has_message_line() const { return at<2>().valid(); }
+  uint32_t message_line() const { return at<2>().as_uint32(); }
+};
+
+class ChromeLegacyIpc : public ::protozero::Message {
+ public:
+  using Decoder = ChromeLegacyIpc_Decoder;
+  enum : int32_t {
+    kMessageClassFieldNumber = 1,
+    kMessageLineFieldNumber = 2,
+  };
+  using MessageClass = ::perfetto::protos::pbzero::ChromeLegacyIpc_MessageClass;
+  static const MessageClass CLASS_UNSPECIFIED = ChromeLegacyIpc_MessageClass_CLASS_UNSPECIFIED;
+  static const MessageClass CLASS_AUTOMATION = ChromeLegacyIpc_MessageClass_CLASS_AUTOMATION;
+  static const MessageClass CLASS_FRAME = ChromeLegacyIpc_MessageClass_CLASS_FRAME;
+  static const MessageClass CLASS_PAGE = ChromeLegacyIpc_MessageClass_CLASS_PAGE;
+  static const MessageClass CLASS_VIEW = ChromeLegacyIpc_MessageClass_CLASS_VIEW;
+  static const MessageClass CLASS_WIDGET = ChromeLegacyIpc_MessageClass_CLASS_WIDGET;
+  static const MessageClass CLASS_INPUT = ChromeLegacyIpc_MessageClass_CLASS_INPUT;
+  static const MessageClass CLASS_TEST = ChromeLegacyIpc_MessageClass_CLASS_TEST;
+  static const MessageClass CLASS_WORKER = ChromeLegacyIpc_MessageClass_CLASS_WORKER;
+  static const MessageClass CLASS_NACL = ChromeLegacyIpc_MessageClass_CLASS_NACL;
+  static const MessageClass CLASS_GPU_CHANNEL = ChromeLegacyIpc_MessageClass_CLASS_GPU_CHANNEL;
+  static const MessageClass CLASS_MEDIA = ChromeLegacyIpc_MessageClass_CLASS_MEDIA;
+  static const MessageClass CLASS_PPAPI = ChromeLegacyIpc_MessageClass_CLASS_PPAPI;
+  static const MessageClass CLASS_CHROME = ChromeLegacyIpc_MessageClass_CLASS_CHROME;
+  static const MessageClass CLASS_DRAG = ChromeLegacyIpc_MessageClass_CLASS_DRAG;
+  static const MessageClass CLASS_PRINT = ChromeLegacyIpc_MessageClass_CLASS_PRINT;
+  static const MessageClass CLASS_EXTENSION = ChromeLegacyIpc_MessageClass_CLASS_EXTENSION;
+  static const MessageClass CLASS_TEXT_INPUT_CLIENT = ChromeLegacyIpc_MessageClass_CLASS_TEXT_INPUT_CLIENT;
+  static const MessageClass CLASS_BLINK_TEST = ChromeLegacyIpc_MessageClass_CLASS_BLINK_TEST;
+  static const MessageClass CLASS_ACCESSIBILITY = ChromeLegacyIpc_MessageClass_CLASS_ACCESSIBILITY;
+  static const MessageClass CLASS_PRERENDER = ChromeLegacyIpc_MessageClass_CLASS_PRERENDER;
+  static const MessageClass CLASS_CHROMOTING = ChromeLegacyIpc_MessageClass_CLASS_CHROMOTING;
+  static const MessageClass CLASS_BROWSER_PLUGIN = ChromeLegacyIpc_MessageClass_CLASS_BROWSER_PLUGIN;
+  static const MessageClass CLASS_ANDROID_WEB_VIEW = ChromeLegacyIpc_MessageClass_CLASS_ANDROID_WEB_VIEW;
+  static const MessageClass CLASS_NACL_HOST = ChromeLegacyIpc_MessageClass_CLASS_NACL_HOST;
+  static const MessageClass CLASS_ENCRYPTED_MEDIA = ChromeLegacyIpc_MessageClass_CLASS_ENCRYPTED_MEDIA;
+  static const MessageClass CLASS_CAST = ChromeLegacyIpc_MessageClass_CLASS_CAST;
+  static const MessageClass CLASS_GIN_JAVA_BRIDGE = ChromeLegacyIpc_MessageClass_CLASS_GIN_JAVA_BRIDGE;
+  static const MessageClass CLASS_CHROME_UTILITY_PRINTING = ChromeLegacyIpc_MessageClass_CLASS_CHROME_UTILITY_PRINTING;
+  static const MessageClass CLASS_OZONE_GPU = ChromeLegacyIpc_MessageClass_CLASS_OZONE_GPU;
+  static const MessageClass CLASS_WEB_TEST = ChromeLegacyIpc_MessageClass_CLASS_WEB_TEST;
+  static const MessageClass CLASS_NETWORK_HINTS = ChromeLegacyIpc_MessageClass_CLASS_NETWORK_HINTS;
+  static const MessageClass CLASS_EXTENSIONS_GUEST_VIEW = ChromeLegacyIpc_MessageClass_CLASS_EXTENSIONS_GUEST_VIEW;
+  static const MessageClass CLASS_GUEST_VIEW = ChromeLegacyIpc_MessageClass_CLASS_GUEST_VIEW;
+  static const MessageClass CLASS_MEDIA_PLAYER_DELEGATE = ChromeLegacyIpc_MessageClass_CLASS_MEDIA_PLAYER_DELEGATE;
+  static const MessageClass CLASS_EXTENSION_WORKER = ChromeLegacyIpc_MessageClass_CLASS_EXTENSION_WORKER;
+  static const MessageClass CLASS_SUBRESOURCE_FILTER = ChromeLegacyIpc_MessageClass_CLASS_SUBRESOURCE_FILTER;
+  static const MessageClass CLASS_UNFREEZABLE_FRAME = ChromeLegacyIpc_MessageClass_CLASS_UNFREEZABLE_FRAME;
+
+  using FieldMetadata_MessageClass =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::ChromeLegacyIpc_MessageClass,
+      ChromeLegacyIpc>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MessageClass kMessageClass() { return {}; }
+  void set_message_class(::perfetto::protos::pbzero::ChromeLegacyIpc_MessageClass value) {
+    static constexpr uint32_t field_id = FieldMetadata_MessageClass::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_MessageLine =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      ChromeLegacyIpc>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MessageLine kMessageLine() { return {}; }
+  void set_message_line(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_MessageLine::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_message_pump.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_MESSAGE_PUMP_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_MESSAGE_PUMP_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class ChromeMessagePump_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  ChromeMessagePump_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ChromeMessagePump_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ChromeMessagePump_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_sent_messages_in_queue() const { return at<1>().valid(); }
+  bool sent_messages_in_queue() const { return at<1>().as_bool(); }
+  bool has_io_handler_location_iid() const { return at<2>().valid(); }
+  uint64_t io_handler_location_iid() const { return at<2>().as_uint64(); }
+};
+
+class ChromeMessagePump : public ::protozero::Message {
+ public:
+  using Decoder = ChromeMessagePump_Decoder;
+  enum : int32_t {
+    kSentMessagesInQueueFieldNumber = 1,
+    kIoHandlerLocationIidFieldNumber = 2,
+  };
+
+  using FieldMetadata_SentMessagesInQueue =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ChromeMessagePump>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SentMessagesInQueue kSentMessagesInQueue() { return {}; }
+  void set_sent_messages_in_queue(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_SentMessagesInQueue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_IoHandlerLocationIid =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ChromeMessagePump>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IoHandlerLocationIid kIoHandlerLocationIid() { return {}; }
+  void set_io_handler_location_iid(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_IoHandlerLocationIid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_mojo_event_info.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_MOJO_EVENT_INFO_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_MOJO_EVENT_INFO_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class ChromeMojoEventInfo_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  ChromeMojoEventInfo_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ChromeMojoEventInfo_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ChromeMojoEventInfo_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_watcher_notify_interface_tag() const { return at<1>().valid(); }
+  ::protozero::ConstChars watcher_notify_interface_tag() const { return at<1>().as_string(); }
+  bool has_ipc_hash() const { return at<2>().valid(); }
+  uint32_t ipc_hash() const { return at<2>().as_uint32(); }
+  bool has_mojo_interface_tag() const { return at<3>().valid(); }
+  ::protozero::ConstChars mojo_interface_tag() const { return at<3>().as_string(); }
+};
+
+class ChromeMojoEventInfo : public ::protozero::Message {
+ public:
+  using Decoder = ChromeMojoEventInfo_Decoder;
+  enum : int32_t {
+    kWatcherNotifyInterfaceTagFieldNumber = 1,
+    kIpcHashFieldNumber = 2,
+    kMojoInterfaceTagFieldNumber = 3,
+  };
+
+  using FieldMetadata_WatcherNotifyInterfaceTag =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ChromeMojoEventInfo>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_WatcherNotifyInterfaceTag kWatcherNotifyInterfaceTag() { return {}; }
+  void set_watcher_notify_interface_tag(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_WatcherNotifyInterfaceTag::kFieldId, data, size);
+  }
+  void set_watcher_notify_interface_tag(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_WatcherNotifyInterfaceTag::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_IpcHash =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      ChromeMojoEventInfo>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IpcHash kIpcHash() { return {}; }
+  void set_ipc_hash(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_IpcHash::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_MojoInterfaceTag =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ChromeMojoEventInfo>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MojoInterfaceTag kMojoInterfaceTag() { return {}; }
+  void set_mojo_interface_tag(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_MojoInterfaceTag::kFieldId, data, size);
+  }
+  void set_mojo_interface_tag(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_MojoInterfaceTag::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_process_descriptor.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_PROCESS_DESCRIPTOR_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_PROCESS_DESCRIPTOR_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+enum ChromeProcessDescriptor_ProcessType : int32_t;
+
+enum ChromeProcessDescriptor_ProcessType : int32_t {
+  ChromeProcessDescriptor_ProcessType_PROCESS_UNSPECIFIED = 0,
+  ChromeProcessDescriptor_ProcessType_PROCESS_BROWSER = 1,
+  ChromeProcessDescriptor_ProcessType_PROCESS_RENDERER = 2,
+  ChromeProcessDescriptor_ProcessType_PROCESS_UTILITY = 3,
+  ChromeProcessDescriptor_ProcessType_PROCESS_ZYGOTE = 4,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SANDBOX_HELPER = 5,
+  ChromeProcessDescriptor_ProcessType_PROCESS_GPU = 6,
+  ChromeProcessDescriptor_ProcessType_PROCESS_PPAPI_PLUGIN = 7,
+  ChromeProcessDescriptor_ProcessType_PROCESS_PPAPI_BROKER = 8,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_NETWORK = 9,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_TRACING = 10,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_STORAGE = 11,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_AUDIO = 12,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_DATA_DECODER = 13,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_UTIL_WIN = 14,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_PROXY_RESOLVER = 15,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_CDM = 16,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_VIDEO_CAPTURE = 17,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_UNZIPPER = 18,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_MIRRORING = 19,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_FILEPATCHER = 20,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_TTS = 21,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_PRINTING = 22,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_QUARANTINE = 23,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_CROS_LOCALSEARCH = 24,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_CROS_ASSISTANT_AUDIO_DECODER = 25,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_FILEUTIL = 26,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_PRINTCOMPOSITOR = 27,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_PAINTPREVIEW = 28,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_SPEECHRECOGNITION = 29,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_XRDEVICE = 30,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_READICON = 31,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_LANGUAGEDETECTION = 32,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_SHARING = 33,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_MEDIAPARSER = 34,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_QRCODEGENERATOR = 35,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_PROFILEIMPORT = 36,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_IME = 37,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_RECORDING = 38,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_SHAPEDETECTION = 39,
+};
+
+const ChromeProcessDescriptor_ProcessType ChromeProcessDescriptor_ProcessType_MIN = ChromeProcessDescriptor_ProcessType_PROCESS_UNSPECIFIED;
+const ChromeProcessDescriptor_ProcessType ChromeProcessDescriptor_ProcessType_MAX = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_SHAPEDETECTION;
+
+class ChromeProcessDescriptor_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  ChromeProcessDescriptor_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ChromeProcessDescriptor_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ChromeProcessDescriptor_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_process_type() const { return at<1>().valid(); }
+  int32_t process_type() const { return at<1>().as_int32(); }
+  bool has_process_priority() const { return at<2>().valid(); }
+  int32_t process_priority() const { return at<2>().as_int32(); }
+  bool has_legacy_sort_index() const { return at<3>().valid(); }
+  int32_t legacy_sort_index() const { return at<3>().as_int32(); }
+  bool has_host_app_package_name() const { return at<4>().valid(); }
+  ::protozero::ConstChars host_app_package_name() const { return at<4>().as_string(); }
+  bool has_crash_trace_id() const { return at<5>().valid(); }
+  uint64_t crash_trace_id() const { return at<5>().as_uint64(); }
+};
+
+class ChromeProcessDescriptor : public ::protozero::Message {
+ public:
+  using Decoder = ChromeProcessDescriptor_Decoder;
+  enum : int32_t {
+    kProcessTypeFieldNumber = 1,
+    kProcessPriorityFieldNumber = 2,
+    kLegacySortIndexFieldNumber = 3,
+    kHostAppPackageNameFieldNumber = 4,
+    kCrashTraceIdFieldNumber = 5,
+  };
+  using ProcessType = ::perfetto::protos::pbzero::ChromeProcessDescriptor_ProcessType;
+  static const ProcessType PROCESS_UNSPECIFIED = ChromeProcessDescriptor_ProcessType_PROCESS_UNSPECIFIED;
+  static const ProcessType PROCESS_BROWSER = ChromeProcessDescriptor_ProcessType_PROCESS_BROWSER;
+  static const ProcessType PROCESS_RENDERER = ChromeProcessDescriptor_ProcessType_PROCESS_RENDERER;
+  static const ProcessType PROCESS_UTILITY = ChromeProcessDescriptor_ProcessType_PROCESS_UTILITY;
+  static const ProcessType PROCESS_ZYGOTE = ChromeProcessDescriptor_ProcessType_PROCESS_ZYGOTE;
+  static const ProcessType PROCESS_SANDBOX_HELPER = ChromeProcessDescriptor_ProcessType_PROCESS_SANDBOX_HELPER;
+  static const ProcessType PROCESS_GPU = ChromeProcessDescriptor_ProcessType_PROCESS_GPU;
+  static const ProcessType PROCESS_PPAPI_PLUGIN = ChromeProcessDescriptor_ProcessType_PROCESS_PPAPI_PLUGIN;
+  static const ProcessType PROCESS_PPAPI_BROKER = ChromeProcessDescriptor_ProcessType_PROCESS_PPAPI_BROKER;
+  static const ProcessType PROCESS_SERVICE_NETWORK = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_NETWORK;
+  static const ProcessType PROCESS_SERVICE_TRACING = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_TRACING;
+  static const ProcessType PROCESS_SERVICE_STORAGE = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_STORAGE;
+  static const ProcessType PROCESS_SERVICE_AUDIO = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_AUDIO;
+  static const ProcessType PROCESS_SERVICE_DATA_DECODER = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_DATA_DECODER;
+  static const ProcessType PROCESS_SERVICE_UTIL_WIN = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_UTIL_WIN;
+  static const ProcessType PROCESS_SERVICE_PROXY_RESOLVER = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_PROXY_RESOLVER;
+  static const ProcessType PROCESS_SERVICE_CDM = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_CDM;
+  static const ProcessType PROCESS_SERVICE_VIDEO_CAPTURE = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_VIDEO_CAPTURE;
+  static const ProcessType PROCESS_SERVICE_UNZIPPER = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_UNZIPPER;
+  static const ProcessType PROCESS_SERVICE_MIRRORING = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_MIRRORING;
+  static const ProcessType PROCESS_SERVICE_FILEPATCHER = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_FILEPATCHER;
+  static const ProcessType PROCESS_SERVICE_TTS = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_TTS;
+  static const ProcessType PROCESS_SERVICE_PRINTING = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_PRINTING;
+  static const ProcessType PROCESS_SERVICE_QUARANTINE = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_QUARANTINE;
+  static const ProcessType PROCESS_SERVICE_CROS_LOCALSEARCH = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_CROS_LOCALSEARCH;
+  static const ProcessType PROCESS_SERVICE_CROS_ASSISTANT_AUDIO_DECODER = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_CROS_ASSISTANT_AUDIO_DECODER;
+  static const ProcessType PROCESS_SERVICE_FILEUTIL = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_FILEUTIL;
+  static const ProcessType PROCESS_SERVICE_PRINTCOMPOSITOR = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_PRINTCOMPOSITOR;
+  static const ProcessType PROCESS_SERVICE_PAINTPREVIEW = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_PAINTPREVIEW;
+  static const ProcessType PROCESS_SERVICE_SPEECHRECOGNITION = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_SPEECHRECOGNITION;
+  static const ProcessType PROCESS_SERVICE_XRDEVICE = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_XRDEVICE;
+  static const ProcessType PROCESS_SERVICE_READICON = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_READICON;
+  static const ProcessType PROCESS_SERVICE_LANGUAGEDETECTION = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_LANGUAGEDETECTION;
+  static const ProcessType PROCESS_SERVICE_SHARING = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_SHARING;
+  static const ProcessType PROCESS_SERVICE_MEDIAPARSER = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_MEDIAPARSER;
+  static const ProcessType PROCESS_SERVICE_QRCODEGENERATOR = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_QRCODEGENERATOR;
+  static const ProcessType PROCESS_SERVICE_PROFILEIMPORT = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_PROFILEIMPORT;
+  static const ProcessType PROCESS_SERVICE_IME = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_IME;
+  static const ProcessType PROCESS_SERVICE_RECORDING = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_RECORDING;
+  static const ProcessType PROCESS_SERVICE_SHAPEDETECTION = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_SHAPEDETECTION;
+
+  using FieldMetadata_ProcessType =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::ChromeProcessDescriptor_ProcessType,
+      ChromeProcessDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ProcessType kProcessType() { return {}; }
+  void set_process_type(::perfetto::protos::pbzero::ChromeProcessDescriptor_ProcessType value) {
+    static constexpr uint32_t field_id = FieldMetadata_ProcessType::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ProcessPriority =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      ChromeProcessDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ProcessPriority kProcessPriority() { return {}; }
+  void set_process_priority(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ProcessPriority::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_LegacySortIndex =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      ChromeProcessDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_LegacySortIndex kLegacySortIndex() { return {}; }
+  void set_legacy_sort_index(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_LegacySortIndex::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_HostAppPackageName =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ChromeProcessDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_HostAppPackageName kHostAppPackageName() { return {}; }
+  void set_host_app_package_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_HostAppPackageName::kFieldId, data, size);
+  }
+  void set_host_app_package_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_HostAppPackageName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CrashTraceId =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ChromeProcessDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CrashTraceId kCrashTraceId() { return {}; }
+  void set_crash_trace_id(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CrashTraceId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_renderer_scheduler_state.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_RENDERER_SCHEDULER_STATE_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_RENDERER_SCHEDULER_STATE_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+enum ChromeRAILMode : int32_t;
+
+enum ChromeRAILMode : int32_t {
+  RAIL_MODE_NONE = 0,
+  RAIL_MODE_RESPONSE = 1,
+  RAIL_MODE_ANIMATION = 2,
+  RAIL_MODE_IDLE = 3,
+  RAIL_MODE_LOAD = 4,
+};
+
+const ChromeRAILMode ChromeRAILMode_MIN = RAIL_MODE_NONE;
+const ChromeRAILMode ChromeRAILMode_MAX = RAIL_MODE_LOAD;
+
+class ChromeRendererSchedulerState_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  ChromeRendererSchedulerState_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ChromeRendererSchedulerState_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ChromeRendererSchedulerState_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_rail_mode() const { return at<1>().valid(); }
+  int32_t rail_mode() const { return at<1>().as_int32(); }
+};
+
+class ChromeRendererSchedulerState : public ::protozero::Message {
+ public:
+  using Decoder = ChromeRendererSchedulerState_Decoder;
+  enum : int32_t {
+    kRailModeFieldNumber = 1,
+  };
+
+  using FieldMetadata_RailMode =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::ChromeRAILMode,
+      ChromeRendererSchedulerState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_RailMode kRailMode() { return {}; }
+  void set_rail_mode(::perfetto::protos::pbzero::ChromeRAILMode value) {
+    static constexpr uint32_t field_id = FieldMetadata_RailMode::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_thread_descriptor.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_THREAD_DESCRIPTOR_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_THREAD_DESCRIPTOR_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+enum ChromeThreadDescriptor_ThreadType : int32_t;
+
+enum ChromeThreadDescriptor_ThreadType : int32_t {
+  ChromeThreadDescriptor_ThreadType_THREAD_UNSPECIFIED = 0,
+  ChromeThreadDescriptor_ThreadType_THREAD_MAIN = 1,
+  ChromeThreadDescriptor_ThreadType_THREAD_IO = 2,
+  ChromeThreadDescriptor_ThreadType_THREAD_POOL_BG_WORKER = 3,
+  ChromeThreadDescriptor_ThreadType_THREAD_POOL_FG_WORKER = 4,
+  ChromeThreadDescriptor_ThreadType_THREAD_POOL_FG_BLOCKING = 5,
+  ChromeThreadDescriptor_ThreadType_THREAD_POOL_BG_BLOCKING = 6,
+  ChromeThreadDescriptor_ThreadType_THREAD_POOL_SERVICE = 7,
+  ChromeThreadDescriptor_ThreadType_THREAD_COMPOSITOR = 8,
+  ChromeThreadDescriptor_ThreadType_THREAD_VIZ_COMPOSITOR = 9,
+  ChromeThreadDescriptor_ThreadType_THREAD_COMPOSITOR_WORKER = 10,
+  ChromeThreadDescriptor_ThreadType_THREAD_SERVICE_WORKER = 11,
+  ChromeThreadDescriptor_ThreadType_THREAD_NETWORK_SERVICE = 12,
+  ChromeThreadDescriptor_ThreadType_THREAD_CHILD_IO = 13,
+  ChromeThreadDescriptor_ThreadType_THREAD_BROWSER_IO = 14,
+  ChromeThreadDescriptor_ThreadType_THREAD_BROWSER_MAIN = 15,
+  ChromeThreadDescriptor_ThreadType_THREAD_RENDERER_MAIN = 16,
+  ChromeThreadDescriptor_ThreadType_THREAD_UTILITY_MAIN = 17,
+  ChromeThreadDescriptor_ThreadType_THREAD_GPU_MAIN = 18,
+  ChromeThreadDescriptor_ThreadType_THREAD_CACHE_BLOCKFILE = 19,
+  ChromeThreadDescriptor_ThreadType_THREAD_MEDIA = 20,
+  ChromeThreadDescriptor_ThreadType_THREAD_AUDIO_OUTPUTDEVICE = 21,
+  ChromeThreadDescriptor_ThreadType_THREAD_AUDIO_INPUTDEVICE = 22,
+  ChromeThreadDescriptor_ThreadType_THREAD_GPU_MEMORY = 23,
+  ChromeThreadDescriptor_ThreadType_THREAD_GPU_VSYNC = 24,
+  ChromeThreadDescriptor_ThreadType_THREAD_DXA_VIDEODECODER = 25,
+  ChromeThreadDescriptor_ThreadType_THREAD_BROWSER_WATCHDOG = 26,
+  ChromeThreadDescriptor_ThreadType_THREAD_WEBRTC_NETWORK = 27,
+  ChromeThreadDescriptor_ThreadType_THREAD_WINDOW_OWNER = 28,
+  ChromeThreadDescriptor_ThreadType_THREAD_WEBRTC_SIGNALING = 29,
+  ChromeThreadDescriptor_ThreadType_THREAD_WEBRTC_WORKER = 30,
+  ChromeThreadDescriptor_ThreadType_THREAD_PPAPI_MAIN = 31,
+  ChromeThreadDescriptor_ThreadType_THREAD_GPU_WATCHDOG = 32,
+  ChromeThreadDescriptor_ThreadType_THREAD_SWAPPER = 33,
+  ChromeThreadDescriptor_ThreadType_THREAD_GAMEPAD_POLLING = 34,
+  ChromeThreadDescriptor_ThreadType_THREAD_WEBCRYPTO = 35,
+  ChromeThreadDescriptor_ThreadType_THREAD_DATABASE = 36,
+  ChromeThreadDescriptor_ThreadType_THREAD_PROXYRESOLVER = 37,
+  ChromeThreadDescriptor_ThreadType_THREAD_DEVTOOLSADB = 38,
+  ChromeThreadDescriptor_ThreadType_THREAD_NETWORKCONFIGWATCHER = 39,
+  ChromeThreadDescriptor_ThreadType_THREAD_WASAPI_RENDER = 40,
+  ChromeThreadDescriptor_ThreadType_THREAD_MEMORY_INFRA = 50,
+  ChromeThreadDescriptor_ThreadType_THREAD_SAMPLING_PROFILER = 51,
+};
+
+const ChromeThreadDescriptor_ThreadType ChromeThreadDescriptor_ThreadType_MIN = ChromeThreadDescriptor_ThreadType_THREAD_UNSPECIFIED;
+const ChromeThreadDescriptor_ThreadType ChromeThreadDescriptor_ThreadType_MAX = ChromeThreadDescriptor_ThreadType_THREAD_SAMPLING_PROFILER;
+
+class ChromeThreadDescriptor_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  ChromeThreadDescriptor_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ChromeThreadDescriptor_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ChromeThreadDescriptor_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_thread_type() const { return at<1>().valid(); }
+  int32_t thread_type() const { return at<1>().as_int32(); }
+  bool has_legacy_sort_index() const { return at<2>().valid(); }
+  int32_t legacy_sort_index() const { return at<2>().as_int32(); }
+};
+
+class ChromeThreadDescriptor : public ::protozero::Message {
+ public:
+  using Decoder = ChromeThreadDescriptor_Decoder;
+  enum : int32_t {
+    kThreadTypeFieldNumber = 1,
+    kLegacySortIndexFieldNumber = 2,
+  };
+  using ThreadType = ::perfetto::protos::pbzero::ChromeThreadDescriptor_ThreadType;
+  static const ThreadType THREAD_UNSPECIFIED = ChromeThreadDescriptor_ThreadType_THREAD_UNSPECIFIED;
+  static const ThreadType THREAD_MAIN = ChromeThreadDescriptor_ThreadType_THREAD_MAIN;
+  static const ThreadType THREAD_IO = ChromeThreadDescriptor_ThreadType_THREAD_IO;
+  static const ThreadType THREAD_POOL_BG_WORKER = ChromeThreadDescriptor_ThreadType_THREAD_POOL_BG_WORKER;
+  static const ThreadType THREAD_POOL_FG_WORKER = ChromeThreadDescriptor_ThreadType_THREAD_POOL_FG_WORKER;
+  static const ThreadType THREAD_POOL_FG_BLOCKING = ChromeThreadDescriptor_ThreadType_THREAD_POOL_FG_BLOCKING;
+  static const ThreadType THREAD_POOL_BG_BLOCKING = ChromeThreadDescriptor_ThreadType_THREAD_POOL_BG_BLOCKING;
+  static const ThreadType THREAD_POOL_SERVICE = ChromeThreadDescriptor_ThreadType_THREAD_POOL_SERVICE;
+  static const ThreadType THREAD_COMPOSITOR = ChromeThreadDescriptor_ThreadType_THREAD_COMPOSITOR;
+  static const ThreadType THREAD_VIZ_COMPOSITOR = ChromeThreadDescriptor_ThreadType_THREAD_VIZ_COMPOSITOR;
+  static const ThreadType THREAD_COMPOSITOR_WORKER = ChromeThreadDescriptor_ThreadType_THREAD_COMPOSITOR_WORKER;
+  static const ThreadType THREAD_SERVICE_WORKER = ChromeThreadDescriptor_ThreadType_THREAD_SERVICE_WORKER;
+  static const ThreadType THREAD_NETWORK_SERVICE = ChromeThreadDescriptor_ThreadType_THREAD_NETWORK_SERVICE;
+  static const ThreadType THREAD_CHILD_IO = ChromeThreadDescriptor_ThreadType_THREAD_CHILD_IO;
+  static const ThreadType THREAD_BROWSER_IO = ChromeThreadDescriptor_ThreadType_THREAD_BROWSER_IO;
+  static const ThreadType THREAD_BROWSER_MAIN = ChromeThreadDescriptor_ThreadType_THREAD_BROWSER_MAIN;
+  static const ThreadType THREAD_RENDERER_MAIN = ChromeThreadDescriptor_ThreadType_THREAD_RENDERER_MAIN;
+  static const ThreadType THREAD_UTILITY_MAIN = ChromeThreadDescriptor_ThreadType_THREAD_UTILITY_MAIN;
+  static const ThreadType THREAD_GPU_MAIN = ChromeThreadDescriptor_ThreadType_THREAD_GPU_MAIN;
+  static const ThreadType THREAD_CACHE_BLOCKFILE = ChromeThreadDescriptor_ThreadType_THREAD_CACHE_BLOCKFILE;
+  static const ThreadType THREAD_MEDIA = ChromeThreadDescriptor_ThreadType_THREAD_MEDIA;
+  static const ThreadType THREAD_AUDIO_OUTPUTDEVICE = ChromeThreadDescriptor_ThreadType_THREAD_AUDIO_OUTPUTDEVICE;
+  static const ThreadType THREAD_AUDIO_INPUTDEVICE = ChromeThreadDescriptor_ThreadType_THREAD_AUDIO_INPUTDEVICE;
+  static const ThreadType THREAD_GPU_MEMORY = ChromeThreadDescriptor_ThreadType_THREAD_GPU_MEMORY;
+  static const ThreadType THREAD_GPU_VSYNC = ChromeThreadDescriptor_ThreadType_THREAD_GPU_VSYNC;
+  static const ThreadType THREAD_DXA_VIDEODECODER = ChromeThreadDescriptor_ThreadType_THREAD_DXA_VIDEODECODER;
+  static const ThreadType THREAD_BROWSER_WATCHDOG = ChromeThreadDescriptor_ThreadType_THREAD_BROWSER_WATCHDOG;
+  static const ThreadType THREAD_WEBRTC_NETWORK = ChromeThreadDescriptor_ThreadType_THREAD_WEBRTC_NETWORK;
+  static const ThreadType THREAD_WINDOW_OWNER = ChromeThreadDescriptor_ThreadType_THREAD_WINDOW_OWNER;
+  static const ThreadType THREAD_WEBRTC_SIGNALING = ChromeThreadDescriptor_ThreadType_THREAD_WEBRTC_SIGNALING;
+  static const ThreadType THREAD_WEBRTC_WORKER = ChromeThreadDescriptor_ThreadType_THREAD_WEBRTC_WORKER;
+  static const ThreadType THREAD_PPAPI_MAIN = ChromeThreadDescriptor_ThreadType_THREAD_PPAPI_MAIN;
+  static const ThreadType THREAD_GPU_WATCHDOG = ChromeThreadDescriptor_ThreadType_THREAD_GPU_WATCHDOG;
+  static const ThreadType THREAD_SWAPPER = ChromeThreadDescriptor_ThreadType_THREAD_SWAPPER;
+  static const ThreadType THREAD_GAMEPAD_POLLING = ChromeThreadDescriptor_ThreadType_THREAD_GAMEPAD_POLLING;
+  static const ThreadType THREAD_WEBCRYPTO = ChromeThreadDescriptor_ThreadType_THREAD_WEBCRYPTO;
+  static const ThreadType THREAD_DATABASE = ChromeThreadDescriptor_ThreadType_THREAD_DATABASE;
+  static const ThreadType THREAD_PROXYRESOLVER = ChromeThreadDescriptor_ThreadType_THREAD_PROXYRESOLVER;
+  static const ThreadType THREAD_DEVTOOLSADB = ChromeThreadDescriptor_ThreadType_THREAD_DEVTOOLSADB;
+  static const ThreadType THREAD_NETWORKCONFIGWATCHER = ChromeThreadDescriptor_ThreadType_THREAD_NETWORKCONFIGWATCHER;
+  static const ThreadType THREAD_WASAPI_RENDER = ChromeThreadDescriptor_ThreadType_THREAD_WASAPI_RENDER;
+  static const ThreadType THREAD_MEMORY_INFRA = ChromeThreadDescriptor_ThreadType_THREAD_MEMORY_INFRA;
+  static const ThreadType THREAD_SAMPLING_PROFILER = ChromeThreadDescriptor_ThreadType_THREAD_SAMPLING_PROFILER;
+
+  using FieldMetadata_ThreadType =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::ChromeThreadDescriptor_ThreadType,
+      ChromeThreadDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ThreadType kThreadType() { return {}; }
+  void set_thread_type(::perfetto::protos::pbzero::ChromeThreadDescriptor_ThreadType value) {
+    static constexpr uint32_t field_id = FieldMetadata_ThreadType::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_LegacySortIndex =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      ChromeThreadDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_LegacySortIndex kLegacySortIndex() { return {}; }
+  void set_legacy_sort_index(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_LegacySortIndex::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_user_event.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_USER_EVENT_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_USER_EVENT_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class ChromeUserEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  ChromeUserEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ChromeUserEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ChromeUserEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_action() const { return at<1>().valid(); }
+  ::protozero::ConstChars action() const { return at<1>().as_string(); }
+  bool has_action_hash() const { return at<2>().valid(); }
+  uint64_t action_hash() const { return at<2>().as_uint64(); }
+};
+
+class ChromeUserEvent : public ::protozero::Message {
+ public:
+  using Decoder = ChromeUserEvent_Decoder;
+  enum : int32_t {
+    kActionFieldNumber = 1,
+    kActionHashFieldNumber = 2,
+  };
+
+  using FieldMetadata_Action =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ChromeUserEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Action kAction() { return {}; }
+  void set_action(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Action::kFieldId, data, size);
+  }
+  void set_action(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Action::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ActionHash =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ChromeUserEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ActionHash kActionHash() { return {}; }
+  void set_action_hash(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ActionHash::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_window_handle_event_info.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_WINDOW_HANDLE_EVENT_INFO_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_WINDOW_HANDLE_EVENT_INFO_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class ChromeWindowHandleEventInfo_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  ChromeWindowHandleEventInfo_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ChromeWindowHandleEventInfo_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ChromeWindowHandleEventInfo_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_dpi() const { return at<1>().valid(); }
+  uint32_t dpi() const { return at<1>().as_uint32(); }
+  bool has_message_id() const { return at<2>().valid(); }
+  uint32_t message_id() const { return at<2>().as_uint32(); }
+  bool has_hwnd_ptr() const { return at<3>().valid(); }
+  uint64_t hwnd_ptr() const { return at<3>().as_uint64(); }
+};
+
+class ChromeWindowHandleEventInfo : public ::protozero::Message {
+ public:
+  using Decoder = ChromeWindowHandleEventInfo_Decoder;
+  enum : int32_t {
+    kDpiFieldNumber = 1,
+    kMessageIdFieldNumber = 2,
+    kHwndPtrFieldNumber = 3,
+  };
+
+  using FieldMetadata_Dpi =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      ChromeWindowHandleEventInfo>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Dpi kDpi() { return {}; }
+  void set_dpi(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Dpi::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_MessageId =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      ChromeWindowHandleEventInfo>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MessageId kMessageId() { return {}; }
+  void set_message_id(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_MessageId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_HwndPtr =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kFixed64,
+      uint64_t,
+      ChromeWindowHandleEventInfo>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_HwndPtr kHwndPtr() { return {}; }
+  void set_hwnd_ptr(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_HwndPtr::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kFixed64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/counter_descriptor.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_COUNTER_DESCRIPTOR_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_COUNTER_DESCRIPTOR_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+enum CounterDescriptor_BuiltinCounterType : int32_t;
+enum CounterDescriptor_Unit : int32_t;
+
+enum CounterDescriptor_BuiltinCounterType : int32_t {
+  CounterDescriptor_BuiltinCounterType_COUNTER_UNSPECIFIED = 0,
+  CounterDescriptor_BuiltinCounterType_COUNTER_THREAD_TIME_NS = 1,
+  CounterDescriptor_BuiltinCounterType_COUNTER_THREAD_INSTRUCTION_COUNT = 2,
+};
+
+const CounterDescriptor_BuiltinCounterType CounterDescriptor_BuiltinCounterType_MIN = CounterDescriptor_BuiltinCounterType_COUNTER_UNSPECIFIED;
+const CounterDescriptor_BuiltinCounterType CounterDescriptor_BuiltinCounterType_MAX = CounterDescriptor_BuiltinCounterType_COUNTER_THREAD_INSTRUCTION_COUNT;
+
+enum CounterDescriptor_Unit : int32_t {
+  CounterDescriptor_Unit_UNIT_UNSPECIFIED = 0,
+  CounterDescriptor_Unit_UNIT_TIME_NS = 1,
+  CounterDescriptor_Unit_UNIT_COUNT = 2,
+  CounterDescriptor_Unit_UNIT_SIZE_BYTES = 3,
+};
+
+const CounterDescriptor_Unit CounterDescriptor_Unit_MIN = CounterDescriptor_Unit_UNIT_UNSPECIFIED;
+const CounterDescriptor_Unit CounterDescriptor_Unit_MAX = CounterDescriptor_Unit_UNIT_SIZE_BYTES;
+
+class CounterDescriptor_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/6, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  CounterDescriptor_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit CounterDescriptor_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit CounterDescriptor_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_type() const { return at<1>().valid(); }
+  int32_t type() const { return at<1>().as_int32(); }
+  bool has_categories() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstChars> categories() const { return GetRepeated<::protozero::ConstChars>(2); }
+  bool has_unit() const { return at<3>().valid(); }
+  int32_t unit() const { return at<3>().as_int32(); }
+  bool has_unit_name() const { return at<6>().valid(); }
+  ::protozero::ConstChars unit_name() const { return at<6>().as_string(); }
+  bool has_unit_multiplier() const { return at<4>().valid(); }
+  int64_t unit_multiplier() const { return at<4>().as_int64(); }
+  bool has_is_incremental() const { return at<5>().valid(); }
+  bool is_incremental() const { return at<5>().as_bool(); }
+};
+
+class CounterDescriptor : public ::protozero::Message {
+ public:
+  using Decoder = CounterDescriptor_Decoder;
+  enum : int32_t {
+    kTypeFieldNumber = 1,
+    kCategoriesFieldNumber = 2,
+    kUnitFieldNumber = 3,
+    kUnitNameFieldNumber = 6,
+    kUnitMultiplierFieldNumber = 4,
+    kIsIncrementalFieldNumber = 5,
+  };
+  using BuiltinCounterType = ::perfetto::protos::pbzero::CounterDescriptor_BuiltinCounterType;
+  using Unit = ::perfetto::protos::pbzero::CounterDescriptor_Unit;
+  static const BuiltinCounterType COUNTER_UNSPECIFIED = CounterDescriptor_BuiltinCounterType_COUNTER_UNSPECIFIED;
+  static const BuiltinCounterType COUNTER_THREAD_TIME_NS = CounterDescriptor_BuiltinCounterType_COUNTER_THREAD_TIME_NS;
+  static const BuiltinCounterType COUNTER_THREAD_INSTRUCTION_COUNT = CounterDescriptor_BuiltinCounterType_COUNTER_THREAD_INSTRUCTION_COUNT;
+  static const Unit UNIT_UNSPECIFIED = CounterDescriptor_Unit_UNIT_UNSPECIFIED;
+  static const Unit UNIT_TIME_NS = CounterDescriptor_Unit_UNIT_TIME_NS;
+  static const Unit UNIT_COUNT = CounterDescriptor_Unit_UNIT_COUNT;
+  static const Unit UNIT_SIZE_BYTES = CounterDescriptor_Unit_UNIT_SIZE_BYTES;
+
+  using FieldMetadata_Type =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::CounterDescriptor_BuiltinCounterType,
+      CounterDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Type kType() { return {}; }
+  void set_type(::perfetto::protos::pbzero::CounterDescriptor_BuiltinCounterType value) {
+    static constexpr uint32_t field_id = FieldMetadata_Type::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Categories =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      CounterDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Categories kCategories() { return {}; }
+  void add_categories(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Categories::kFieldId, data, size);
+  }
+  void add_categories(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Categories::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Unit =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::CounterDescriptor_Unit,
+      CounterDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Unit kUnit() { return {}; }
+  void set_unit(::perfetto::protos::pbzero::CounterDescriptor_Unit value) {
+    static constexpr uint32_t field_id = FieldMetadata_Unit::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_UnitName =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      CounterDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_UnitName kUnitName() { return {}; }
+  void set_unit_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_UnitName::kFieldId, data, size);
+  }
+  void set_unit_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_UnitName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_UnitMultiplier =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      CounterDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_UnitMultiplier kUnitMultiplier() { return {}; }
+  void set_unit_multiplier(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_UnitMultiplier::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_IsIncremental =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      CounterDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IsIncremental kIsIncremental() { return {}; }
+  void set_is_incremental(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_IsIncremental::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/debug_annotation.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_DEBUG_ANNOTATION_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_DEBUG_ANNOTATION_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class DebugAnnotation;
+class DebugAnnotation_NestedValue;
+enum DebugAnnotation_NestedValue_NestedType : int32_t;
+
+enum DebugAnnotation_NestedValue_NestedType : int32_t {
+  DebugAnnotation_NestedValue_NestedType_UNSPECIFIED = 0,
+  DebugAnnotation_NestedValue_NestedType_DICT = 1,
+  DebugAnnotation_NestedValue_NestedType_ARRAY = 2,
+};
+
+const DebugAnnotation_NestedValue_NestedType DebugAnnotation_NestedValue_NestedType_MIN = DebugAnnotation_NestedValue_NestedType_UNSPECIFIED;
+const DebugAnnotation_NestedValue_NestedType DebugAnnotation_NestedValue_NestedType_MAX = DebugAnnotation_NestedValue_NestedType_ARRAY;
+
+class DebugAnnotationName_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  DebugAnnotationName_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit DebugAnnotationName_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit DebugAnnotationName_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_iid() const { return at<1>().valid(); }
+  uint64_t iid() const { return at<1>().as_uint64(); }
+  bool has_name() const { return at<2>().valid(); }
+  ::protozero::ConstChars name() const { return at<2>().as_string(); }
+};
+
+class DebugAnnotationName : public ::protozero::Message {
+ public:
+  using Decoder = DebugAnnotationName_Decoder;
+  enum : int32_t {
+    kIidFieldNumber = 1,
+    kNameFieldNumber = 2,
+  };
+
+  using FieldMetadata_Iid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      DebugAnnotationName>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Iid kIid() { return {}; }
+  void set_iid(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Iid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      DebugAnnotationName>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class DebugAnnotation_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/12, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  DebugAnnotation_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit DebugAnnotation_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit DebugAnnotation_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_name_iid() const { return at<1>().valid(); }
+  uint64_t name_iid() const { return at<1>().as_uint64(); }
+  bool has_name() const { return at<10>().valid(); }
+  ::protozero::ConstChars name() const { return at<10>().as_string(); }
+  bool has_bool_value() const { return at<2>().valid(); }
+  bool bool_value() const { return at<2>().as_bool(); }
+  bool has_uint_value() const { return at<3>().valid(); }
+  uint64_t uint_value() const { return at<3>().as_uint64(); }
+  bool has_int_value() const { return at<4>().valid(); }
+  int64_t int_value() const { return at<4>().as_int64(); }
+  bool has_double_value() const { return at<5>().valid(); }
+  double double_value() const { return at<5>().as_double(); }
+  bool has_string_value() const { return at<6>().valid(); }
+  ::protozero::ConstChars string_value() const { return at<6>().as_string(); }
+  bool has_pointer_value() const { return at<7>().valid(); }
+  uint64_t pointer_value() const { return at<7>().as_uint64(); }
+  bool has_nested_value() const { return at<8>().valid(); }
+  ::protozero::ConstBytes nested_value() const { return at<8>().as_bytes(); }
+  bool has_legacy_json_value() const { return at<9>().valid(); }
+  ::protozero::ConstChars legacy_json_value() const { return at<9>().as_string(); }
+  bool has_dict_entries() const { return at<11>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> dict_entries() const { return GetRepeated<::protozero::ConstBytes>(11); }
+  bool has_array_values() const { return at<12>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> array_values() const { return GetRepeated<::protozero::ConstBytes>(12); }
+};
+
+class DebugAnnotation : public ::protozero::Message {
+ public:
+  using Decoder = DebugAnnotation_Decoder;
+  enum : int32_t {
+    kNameIidFieldNumber = 1,
+    kNameFieldNumber = 10,
+    kBoolValueFieldNumber = 2,
+    kUintValueFieldNumber = 3,
+    kIntValueFieldNumber = 4,
+    kDoubleValueFieldNumber = 5,
+    kStringValueFieldNumber = 6,
+    kPointerValueFieldNumber = 7,
+    kNestedValueFieldNumber = 8,
+    kLegacyJsonValueFieldNumber = 9,
+    kDictEntriesFieldNumber = 11,
+    kArrayValuesFieldNumber = 12,
+  };
+  using NestedValue = ::perfetto::protos::pbzero::DebugAnnotation_NestedValue;
+
+  using FieldMetadata_NameIid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      DebugAnnotation>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NameIid kNameIid() { return {}; }
+  void set_name_iid(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NameIid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      10,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      DebugAnnotation>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BoolValue =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      DebugAnnotation>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BoolValue kBoolValue() { return {}; }
+  void set_bool_value(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_BoolValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_UintValue =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      DebugAnnotation>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_UintValue kUintValue() { return {}; }
+  void set_uint_value(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_UintValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_IntValue =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      DebugAnnotation>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IntValue kIntValue() { return {}; }
+  void set_int_value(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_IntValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DoubleValue =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kDouble,
+      double,
+      DebugAnnotation>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DoubleValue kDoubleValue() { return {}; }
+  void set_double_value(double value) {
+    static constexpr uint32_t field_id = FieldMetadata_DoubleValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kDouble>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_StringValue =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      DebugAnnotation>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_StringValue kStringValue() { return {}; }
+  void set_string_value(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_StringValue::kFieldId, data, size);
+  }
+  void set_string_value(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_StringValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PointerValue =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      DebugAnnotation>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PointerValue kPointerValue() { return {}; }
+  void set_pointer_value(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PointerValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NestedValue =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      DebugAnnotation_NestedValue,
+      DebugAnnotation>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NestedValue kNestedValue() { return {}; }
+  template <typename T = DebugAnnotation_NestedValue> T* set_nested_value() {
+    return BeginNestedMessage<T>(8);
+  }
+
+
+  using FieldMetadata_LegacyJsonValue =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      DebugAnnotation>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_LegacyJsonValue kLegacyJsonValue() { return {}; }
+  void set_legacy_json_value(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_LegacyJsonValue::kFieldId, data, size);
+  }
+  void set_legacy_json_value(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_LegacyJsonValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DictEntries =
+    ::protozero::proto_utils::FieldMetadata<
+      11,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      DebugAnnotation,
+      DebugAnnotation>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DictEntries kDictEntries() { return {}; }
+  template <typename T = DebugAnnotation> T* add_dict_entries() {
+    return BeginNestedMessage<T>(11);
+  }
+
+
+  using FieldMetadata_ArrayValues =
+    ::protozero::proto_utils::FieldMetadata<
+      12,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      DebugAnnotation,
+      DebugAnnotation>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ArrayValues kArrayValues() { return {}; }
+  template <typename T = DebugAnnotation> T* add_array_values() {
+    return BeginNestedMessage<T>(12);
+  }
+
+};
+
+class DebugAnnotation_NestedValue_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/8, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  DebugAnnotation_NestedValue_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit DebugAnnotation_NestedValue_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit DebugAnnotation_NestedValue_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_nested_type() const { return at<1>().valid(); }
+  int32_t nested_type() const { return at<1>().as_int32(); }
+  bool has_dict_keys() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstChars> dict_keys() const { return GetRepeated<::protozero::ConstChars>(2); }
+  bool has_dict_values() const { return at<3>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> dict_values() const { return GetRepeated<::protozero::ConstBytes>(3); }
+  bool has_array_values() const { return at<4>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> array_values() const { return GetRepeated<::protozero::ConstBytes>(4); }
+  bool has_int_value() const { return at<5>().valid(); }
+  int64_t int_value() const { return at<5>().as_int64(); }
+  bool has_double_value() const { return at<6>().valid(); }
+  double double_value() const { return at<6>().as_double(); }
+  bool has_bool_value() const { return at<7>().valid(); }
+  bool bool_value() const { return at<7>().as_bool(); }
+  bool has_string_value() const { return at<8>().valid(); }
+  ::protozero::ConstChars string_value() const { return at<8>().as_string(); }
+};
+
+class DebugAnnotation_NestedValue : public ::protozero::Message {
+ public:
+  using Decoder = DebugAnnotation_NestedValue_Decoder;
+  enum : int32_t {
+    kNestedTypeFieldNumber = 1,
+    kDictKeysFieldNumber = 2,
+    kDictValuesFieldNumber = 3,
+    kArrayValuesFieldNumber = 4,
+    kIntValueFieldNumber = 5,
+    kDoubleValueFieldNumber = 6,
+    kBoolValueFieldNumber = 7,
+    kStringValueFieldNumber = 8,
+  };
+  using NestedType = ::perfetto::protos::pbzero::DebugAnnotation_NestedValue_NestedType;
+  static const NestedType UNSPECIFIED = DebugAnnotation_NestedValue_NestedType_UNSPECIFIED;
+  static const NestedType DICT = DebugAnnotation_NestedValue_NestedType_DICT;
+  static const NestedType ARRAY = DebugAnnotation_NestedValue_NestedType_ARRAY;
+
+  using FieldMetadata_NestedType =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::DebugAnnotation_NestedValue_NestedType,
+      DebugAnnotation_NestedValue>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NestedType kNestedType() { return {}; }
+  void set_nested_type(::perfetto::protos::pbzero::DebugAnnotation_NestedValue_NestedType value) {
+    static constexpr uint32_t field_id = FieldMetadata_NestedType::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DictKeys =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      DebugAnnotation_NestedValue>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DictKeys kDictKeys() { return {}; }
+  void add_dict_keys(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_DictKeys::kFieldId, data, size);
+  }
+  void add_dict_keys(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_DictKeys::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DictValues =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      DebugAnnotation_NestedValue,
+      DebugAnnotation_NestedValue>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DictValues kDictValues() { return {}; }
+  template <typename T = DebugAnnotation_NestedValue> T* add_dict_values() {
+    return BeginNestedMessage<T>(3);
+  }
+
+
+  using FieldMetadata_ArrayValues =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      DebugAnnotation_NestedValue,
+      DebugAnnotation_NestedValue>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ArrayValues kArrayValues() { return {}; }
+  template <typename T = DebugAnnotation_NestedValue> T* add_array_values() {
+    return BeginNestedMessage<T>(4);
+  }
+
+
+  using FieldMetadata_IntValue =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      DebugAnnotation_NestedValue>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IntValue kIntValue() { return {}; }
+  void set_int_value(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_IntValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DoubleValue =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kDouble,
+      double,
+      DebugAnnotation_NestedValue>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DoubleValue kDoubleValue() { return {}; }
+  void set_double_value(double value) {
+    static constexpr uint32_t field_id = FieldMetadata_DoubleValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kDouble>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BoolValue =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      DebugAnnotation_NestedValue>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BoolValue kBoolValue() { return {}; }
+  void set_bool_value(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_BoolValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_StringValue =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      DebugAnnotation_NestedValue>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_StringValue kStringValue() { return {}; }
+  void set_string_value(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_StringValue::kFieldId, data, size);
+  }
+  void set_string_value(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_StringValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/log_message.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_LOG_MESSAGE_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_LOG_MESSAGE_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class LogMessageBody_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  LogMessageBody_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit LogMessageBody_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit LogMessageBody_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_iid() const { return at<1>().valid(); }
+  uint64_t iid() const { return at<1>().as_uint64(); }
+  bool has_body() const { return at<2>().valid(); }
+  ::protozero::ConstChars body() const { return at<2>().as_string(); }
+};
+
+class LogMessageBody : public ::protozero::Message {
+ public:
+  using Decoder = LogMessageBody_Decoder;
+  enum : int32_t {
+    kIidFieldNumber = 1,
+    kBodyFieldNumber = 2,
+  };
+
+  using FieldMetadata_Iid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      LogMessageBody>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Iid kIid() { return {}; }
+  void set_iid(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Iid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Body =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      LogMessageBody>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Body kBody() { return {}; }
+  void set_body(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Body::kFieldId, data, size);
+  }
+  void set_body(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Body::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class LogMessage_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  LogMessage_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit LogMessage_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit LogMessage_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_source_location_iid() const { return at<1>().valid(); }
+  uint64_t source_location_iid() const { return at<1>().as_uint64(); }
+  bool has_body_iid() const { return at<2>().valid(); }
+  uint64_t body_iid() const { return at<2>().as_uint64(); }
+};
+
+class LogMessage : public ::protozero::Message {
+ public:
+  using Decoder = LogMessage_Decoder;
+  enum : int32_t {
+    kSourceLocationIidFieldNumber = 1,
+    kBodyIidFieldNumber = 2,
+  };
+
+  using FieldMetadata_SourceLocationIid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      LogMessage>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SourceLocationIid kSourceLocationIid() { return {}; }
+  void set_source_location_iid(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SourceLocationIid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BodyIid =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      LogMessage>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BodyIid kBodyIid() { return {}; }
+  void set_body_iid(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_BodyIid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/process_descriptor.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_PROCESS_DESCRIPTOR_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_PROCESS_DESCRIPTOR_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+enum ProcessDescriptor_ChromeProcessType : int32_t;
+
+enum ProcessDescriptor_ChromeProcessType : int32_t {
+  ProcessDescriptor_ChromeProcessType_PROCESS_UNSPECIFIED = 0,
+  ProcessDescriptor_ChromeProcessType_PROCESS_BROWSER = 1,
+  ProcessDescriptor_ChromeProcessType_PROCESS_RENDERER = 2,
+  ProcessDescriptor_ChromeProcessType_PROCESS_UTILITY = 3,
+  ProcessDescriptor_ChromeProcessType_PROCESS_ZYGOTE = 4,
+  ProcessDescriptor_ChromeProcessType_PROCESS_SANDBOX_HELPER = 5,
+  ProcessDescriptor_ChromeProcessType_PROCESS_GPU = 6,
+  ProcessDescriptor_ChromeProcessType_PROCESS_PPAPI_PLUGIN = 7,
+  ProcessDescriptor_ChromeProcessType_PROCESS_PPAPI_BROKER = 8,
+};
+
+const ProcessDescriptor_ChromeProcessType ProcessDescriptor_ChromeProcessType_MIN = ProcessDescriptor_ChromeProcessType_PROCESS_UNSPECIFIED;
+const ProcessDescriptor_ChromeProcessType ProcessDescriptor_ChromeProcessType_MAX = ProcessDescriptor_ChromeProcessType_PROCESS_PPAPI_BROKER;
+
+class ProcessDescriptor_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/7, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  ProcessDescriptor_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ProcessDescriptor_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ProcessDescriptor_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_pid() const { return at<1>().valid(); }
+  int32_t pid() const { return at<1>().as_int32(); }
+  bool has_cmdline() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstChars> cmdline() const { return GetRepeated<::protozero::ConstChars>(2); }
+  bool has_process_name() const { return at<6>().valid(); }
+  ::protozero::ConstChars process_name() const { return at<6>().as_string(); }
+  bool has_process_priority() const { return at<5>().valid(); }
+  int32_t process_priority() const { return at<5>().as_int32(); }
+  bool has_start_timestamp_ns() const { return at<7>().valid(); }
+  int64_t start_timestamp_ns() const { return at<7>().as_int64(); }
+  bool has_chrome_process_type() const { return at<4>().valid(); }
+  int32_t chrome_process_type() const { return at<4>().as_int32(); }
+  bool has_legacy_sort_index() const { return at<3>().valid(); }
+  int32_t legacy_sort_index() const { return at<3>().as_int32(); }
+};
+
+class ProcessDescriptor : public ::protozero::Message {
+ public:
+  using Decoder = ProcessDescriptor_Decoder;
+  enum : int32_t {
+    kPidFieldNumber = 1,
+    kCmdlineFieldNumber = 2,
+    kProcessNameFieldNumber = 6,
+    kProcessPriorityFieldNumber = 5,
+    kStartTimestampNsFieldNumber = 7,
+    kChromeProcessTypeFieldNumber = 4,
+    kLegacySortIndexFieldNumber = 3,
+  };
+  using ChromeProcessType = ::perfetto::protos::pbzero::ProcessDescriptor_ChromeProcessType;
+  static const ChromeProcessType PROCESS_UNSPECIFIED = ProcessDescriptor_ChromeProcessType_PROCESS_UNSPECIFIED;
+  static const ChromeProcessType PROCESS_BROWSER = ProcessDescriptor_ChromeProcessType_PROCESS_BROWSER;
+  static const ChromeProcessType PROCESS_RENDERER = ProcessDescriptor_ChromeProcessType_PROCESS_RENDERER;
+  static const ChromeProcessType PROCESS_UTILITY = ProcessDescriptor_ChromeProcessType_PROCESS_UTILITY;
+  static const ChromeProcessType PROCESS_ZYGOTE = ProcessDescriptor_ChromeProcessType_PROCESS_ZYGOTE;
+  static const ChromeProcessType PROCESS_SANDBOX_HELPER = ProcessDescriptor_ChromeProcessType_PROCESS_SANDBOX_HELPER;
+  static const ChromeProcessType PROCESS_GPU = ProcessDescriptor_ChromeProcessType_PROCESS_GPU;
+  static const ChromeProcessType PROCESS_PPAPI_PLUGIN = ProcessDescriptor_ChromeProcessType_PROCESS_PPAPI_PLUGIN;
+  static const ChromeProcessType PROCESS_PPAPI_BROKER = ProcessDescriptor_ChromeProcessType_PROCESS_PPAPI_BROKER;
+
+  using FieldMetadata_Pid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      ProcessDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pid kPid() { return {}; }
+  void set_pid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Cmdline =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ProcessDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Cmdline kCmdline() { return {}; }
+  void add_cmdline(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Cmdline::kFieldId, data, size);
+  }
+  void add_cmdline(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Cmdline::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ProcessName =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ProcessDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ProcessName kProcessName() { return {}; }
+  void set_process_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_ProcessName::kFieldId, data, size);
+  }
+  void set_process_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_ProcessName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ProcessPriority =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      ProcessDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ProcessPriority kProcessPriority() { return {}; }
+  void set_process_priority(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ProcessPriority::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_StartTimestampNs =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      ProcessDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_StartTimestampNs kStartTimestampNs() { return {}; }
+  void set_start_timestamp_ns(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_StartTimestampNs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ChromeProcessType =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::ProcessDescriptor_ChromeProcessType,
+      ProcessDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChromeProcessType kChromeProcessType() { return {}; }
+  void set_chrome_process_type(::perfetto::protos::pbzero::ProcessDescriptor_ChromeProcessType value) {
+    static constexpr uint32_t field_id = FieldMetadata_ChromeProcessType::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_LegacySortIndex =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      ProcessDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_LegacySortIndex kLegacySortIndex() { return {}; }
+  void set_legacy_sort_index(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_LegacySortIndex::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/source_location.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_SOURCE_LOCATION_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_SOURCE_LOCATION_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class SourceLocation_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  SourceLocation_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit SourceLocation_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit SourceLocation_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_iid() const { return at<1>().valid(); }
+  uint64_t iid() const { return at<1>().as_uint64(); }
+  bool has_file_name() const { return at<2>().valid(); }
+  ::protozero::ConstChars file_name() const { return at<2>().as_string(); }
+  bool has_function_name() const { return at<3>().valid(); }
+  ::protozero::ConstChars function_name() const { return at<3>().as_string(); }
+  bool has_line_number() const { return at<4>().valid(); }
+  uint32_t line_number() const { return at<4>().as_uint32(); }
+};
+
+class SourceLocation : public ::protozero::Message {
+ public:
+  using Decoder = SourceLocation_Decoder;
+  enum : int32_t {
+    kIidFieldNumber = 1,
+    kFileNameFieldNumber = 2,
+    kFunctionNameFieldNumber = 3,
+    kLineNumberFieldNumber = 4,
+  };
+
+  using FieldMetadata_Iid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      SourceLocation>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Iid kIid() { return {}; }
+  void set_iid(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Iid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FileName =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      SourceLocation>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FileName kFileName() { return {}; }
+  void set_file_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_FileName::kFieldId, data, size);
+  }
+  void set_file_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_FileName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FunctionName =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      SourceLocation>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FunctionName kFunctionName() { return {}; }
+  void set_function_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_FunctionName::kFieldId, data, size);
+  }
+  void set_function_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_FunctionName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_LineNumber =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      SourceLocation>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_LineNumber kLineNumber() { return {}; }
+  void set_line_number(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_LineNumber::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/task_execution.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_TASK_EXECUTION_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_TASK_EXECUTION_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class TaskExecution_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  TaskExecution_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TaskExecution_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TaskExecution_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_posted_from_iid() const { return at<1>().valid(); }
+  uint64_t posted_from_iid() const { return at<1>().as_uint64(); }
+};
+
+class TaskExecution : public ::protozero::Message {
+ public:
+  using Decoder = TaskExecution_Decoder;
+  enum : int32_t {
+    kPostedFromIidFieldNumber = 1,
+  };
+
+  using FieldMetadata_PostedFromIid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TaskExecution>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PostedFromIid kPostedFromIid() { return {}; }
+  void set_posted_from_iid(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PostedFromIid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/thread_descriptor.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_THREAD_DESCRIPTOR_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_THREAD_DESCRIPTOR_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+enum ThreadDescriptor_ChromeThreadType : int32_t;
+
+enum ThreadDescriptor_ChromeThreadType : int32_t {
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_UNSPECIFIED = 0,
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_MAIN = 1,
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_IO = 2,
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_BG_WORKER = 3,
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_FG_WORKER = 4,
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_FB_BLOCKING = 5,
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_BG_BLOCKING = 6,
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_SERVICE = 7,
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_COMPOSITOR = 8,
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_VIZ_COMPOSITOR = 9,
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_COMPOSITOR_WORKER = 10,
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_SERVICE_WORKER = 11,
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_MEMORY_INFRA = 50,
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_SAMPLING_PROFILER = 51,
+};
+
+const ThreadDescriptor_ChromeThreadType ThreadDescriptor_ChromeThreadType_MIN = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_UNSPECIFIED;
+const ThreadDescriptor_ChromeThreadType ThreadDescriptor_ChromeThreadType_MAX = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_SAMPLING_PROFILER;
+
+class ThreadDescriptor_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/8, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  ThreadDescriptor_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ThreadDescriptor_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ThreadDescriptor_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_pid() const { return at<1>().valid(); }
+  int32_t pid() const { return at<1>().as_int32(); }
+  bool has_tid() const { return at<2>().valid(); }
+  int32_t tid() const { return at<2>().as_int32(); }
+  bool has_thread_name() const { return at<5>().valid(); }
+  ::protozero::ConstChars thread_name() const { return at<5>().as_string(); }
+  bool has_chrome_thread_type() const { return at<4>().valid(); }
+  int32_t chrome_thread_type() const { return at<4>().as_int32(); }
+  bool has_reference_timestamp_us() const { return at<6>().valid(); }
+  int64_t reference_timestamp_us() const { return at<6>().as_int64(); }
+  bool has_reference_thread_time_us() const { return at<7>().valid(); }
+  int64_t reference_thread_time_us() const { return at<7>().as_int64(); }
+  bool has_reference_thread_instruction_count() const { return at<8>().valid(); }
+  int64_t reference_thread_instruction_count() const { return at<8>().as_int64(); }
+  bool has_legacy_sort_index() const { return at<3>().valid(); }
+  int32_t legacy_sort_index() const { return at<3>().as_int32(); }
+};
+
+class ThreadDescriptor : public ::protozero::Message {
+ public:
+  using Decoder = ThreadDescriptor_Decoder;
+  enum : int32_t {
+    kPidFieldNumber = 1,
+    kTidFieldNumber = 2,
+    kThreadNameFieldNumber = 5,
+    kChromeThreadTypeFieldNumber = 4,
+    kReferenceTimestampUsFieldNumber = 6,
+    kReferenceThreadTimeUsFieldNumber = 7,
+    kReferenceThreadInstructionCountFieldNumber = 8,
+    kLegacySortIndexFieldNumber = 3,
+  };
+  using ChromeThreadType = ::perfetto::protos::pbzero::ThreadDescriptor_ChromeThreadType;
+  static const ChromeThreadType CHROME_THREAD_UNSPECIFIED = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_UNSPECIFIED;
+  static const ChromeThreadType CHROME_THREAD_MAIN = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_MAIN;
+  static const ChromeThreadType CHROME_THREAD_IO = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_IO;
+  static const ChromeThreadType CHROME_THREAD_POOL_BG_WORKER = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_BG_WORKER;
+  static const ChromeThreadType CHROME_THREAD_POOL_FG_WORKER = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_FG_WORKER;
+  static const ChromeThreadType CHROME_THREAD_POOL_FB_BLOCKING = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_FB_BLOCKING;
+  static const ChromeThreadType CHROME_THREAD_POOL_BG_BLOCKING = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_BG_BLOCKING;
+  static const ChromeThreadType CHROME_THREAD_POOL_SERVICE = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_SERVICE;
+  static const ChromeThreadType CHROME_THREAD_COMPOSITOR = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_COMPOSITOR;
+  static const ChromeThreadType CHROME_THREAD_VIZ_COMPOSITOR = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_VIZ_COMPOSITOR;
+  static const ChromeThreadType CHROME_THREAD_COMPOSITOR_WORKER = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_COMPOSITOR_WORKER;
+  static const ChromeThreadType CHROME_THREAD_SERVICE_WORKER = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_SERVICE_WORKER;
+  static const ChromeThreadType CHROME_THREAD_MEMORY_INFRA = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_MEMORY_INFRA;
+  static const ChromeThreadType CHROME_THREAD_SAMPLING_PROFILER = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_SAMPLING_PROFILER;
+
+  using FieldMetadata_Pid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      ThreadDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pid kPid() { return {}; }
+  void set_pid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Tid =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      ThreadDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Tid kTid() { return {}; }
+  void set_tid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Tid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ThreadName =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ThreadDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ThreadName kThreadName() { return {}; }
+  void set_thread_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_ThreadName::kFieldId, data, size);
+  }
+  void set_thread_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_ThreadName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ChromeThreadType =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::ThreadDescriptor_ChromeThreadType,
+      ThreadDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChromeThreadType kChromeThreadType() { return {}; }
+  void set_chrome_thread_type(::perfetto::protos::pbzero::ThreadDescriptor_ChromeThreadType value) {
+    static constexpr uint32_t field_id = FieldMetadata_ChromeThreadType::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ReferenceTimestampUs =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      ThreadDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ReferenceTimestampUs kReferenceTimestampUs() { return {}; }
+  void set_reference_timestamp_us(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ReferenceTimestampUs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ReferenceThreadTimeUs =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      ThreadDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ReferenceThreadTimeUs kReferenceThreadTimeUs() { return {}; }
+  void set_reference_thread_time_us(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ReferenceThreadTimeUs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ReferenceThreadInstructionCount =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      ThreadDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ReferenceThreadInstructionCount kReferenceThreadInstructionCount() { return {}; }
+  void set_reference_thread_instruction_count(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ReferenceThreadInstructionCount::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_LegacySortIndex =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      ThreadDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_LegacySortIndex kLegacySortIndex() { return {}; }
+  void set_legacy_sort_index(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_LegacySortIndex::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/track_descriptor.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_TRACK_DESCRIPTOR_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_TRACK_DESCRIPTOR_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class ChromeProcessDescriptor;
+class ChromeThreadDescriptor;
+class CounterDescriptor;
+class ProcessDescriptor;
+class ThreadDescriptor;
+
+class TrackDescriptor_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/8, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  TrackDescriptor_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TrackDescriptor_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TrackDescriptor_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_uuid() const { return at<1>().valid(); }
+  uint64_t uuid() const { return at<1>().as_uint64(); }
+  bool has_parent_uuid() const { return at<5>().valid(); }
+  uint64_t parent_uuid() const { return at<5>().as_uint64(); }
+  bool has_name() const { return at<2>().valid(); }
+  ::protozero::ConstChars name() const { return at<2>().as_string(); }
+  bool has_process() const { return at<3>().valid(); }
+  ::protozero::ConstBytes process() const { return at<3>().as_bytes(); }
+  bool has_chrome_process() const { return at<6>().valid(); }
+  ::protozero::ConstBytes chrome_process() const { return at<6>().as_bytes(); }
+  bool has_thread() const { return at<4>().valid(); }
+  ::protozero::ConstBytes thread() const { return at<4>().as_bytes(); }
+  bool has_chrome_thread() const { return at<7>().valid(); }
+  ::protozero::ConstBytes chrome_thread() const { return at<7>().as_bytes(); }
+  bool has_counter() const { return at<8>().valid(); }
+  ::protozero::ConstBytes counter() const { return at<8>().as_bytes(); }
+};
+
+class TrackDescriptor : public ::protozero::Message {
+ public:
+  using Decoder = TrackDescriptor_Decoder;
+  enum : int32_t {
+    kUuidFieldNumber = 1,
+    kParentUuidFieldNumber = 5,
+    kNameFieldNumber = 2,
+    kProcessFieldNumber = 3,
+    kChromeProcessFieldNumber = 6,
+    kThreadFieldNumber = 4,
+    kChromeThreadFieldNumber = 7,
+    kCounterFieldNumber = 8,
+  };
+
+  using FieldMetadata_Uuid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TrackDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Uuid kUuid() { return {}; }
+  void set_uuid(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Uuid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ParentUuid =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TrackDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ParentUuid kParentUuid() { return {}; }
+  void set_parent_uuid(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ParentUuid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TrackDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Process =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ProcessDescriptor,
+      TrackDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Process kProcess() { return {}; }
+  template <typename T = ProcessDescriptor> T* set_process() {
+    return BeginNestedMessage<T>(3);
+  }
+
+
+  using FieldMetadata_ChromeProcess =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ChromeProcessDescriptor,
+      TrackDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChromeProcess kChromeProcess() { return {}; }
+  template <typename T = ChromeProcessDescriptor> T* set_chrome_process() {
+    return BeginNestedMessage<T>(6);
+  }
+
+
+  using FieldMetadata_Thread =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ThreadDescriptor,
+      TrackDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Thread kThread() { return {}; }
+  template <typename T = ThreadDescriptor> T* set_thread() {
+    return BeginNestedMessage<T>(4);
+  }
+
+
+  using FieldMetadata_ChromeThread =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ChromeThreadDescriptor,
+      TrackDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChromeThread kChromeThread() { return {}; }
+  template <typename T = ChromeThreadDescriptor> T* set_chrome_thread() {
+    return BeginNestedMessage<T>(7);
+  }
+
+
+  using FieldMetadata_Counter =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      CounterDescriptor,
+      TrackDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Counter kCounter() { return {}; }
+  template <typename T = CounterDescriptor> T* set_counter() {
+    return BeginNestedMessage<T>(8);
+  }
+
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/track_event.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_TRACK_EVENT_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_TRACK_EVENT_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class ChromeApplicationStateInfo;
+class ChromeCompositorSchedulerState;
+class ChromeContentSettingsEventInfo;
+class ChromeFrameReporter;
+class ChromeHistogramSample;
+class ChromeKeyedService;
+class ChromeLatencyInfo;
+class ChromeLegacyIpc;
+class ChromeMessagePump;
+class ChromeMojoEventInfo;
+class ChromeRendererSchedulerState;
+class ChromeUserEvent;
+class ChromeWindowHandleEventInfo;
+class DebugAnnotation;
+class LogMessage;
+class SourceLocation;
+class TaskExecution;
+class TrackEvent_LegacyEvent;
+enum TrackEvent_LegacyEvent_FlowDirection : int32_t;
+enum TrackEvent_LegacyEvent_InstantEventScope : int32_t;
+enum TrackEvent_Type : int32_t;
+
+enum TrackEvent_Type : int32_t {
+  TrackEvent_Type_TYPE_UNSPECIFIED = 0,
+  TrackEvent_Type_TYPE_SLICE_BEGIN = 1,
+  TrackEvent_Type_TYPE_SLICE_END = 2,
+  TrackEvent_Type_TYPE_INSTANT = 3,
+  TrackEvent_Type_TYPE_COUNTER = 4,
+};
+
+const TrackEvent_Type TrackEvent_Type_MIN = TrackEvent_Type_TYPE_UNSPECIFIED;
+const TrackEvent_Type TrackEvent_Type_MAX = TrackEvent_Type_TYPE_COUNTER;
+
+enum TrackEvent_LegacyEvent_FlowDirection : int32_t {
+  TrackEvent_LegacyEvent_FlowDirection_FLOW_UNSPECIFIED = 0,
+  TrackEvent_LegacyEvent_FlowDirection_FLOW_IN = 1,
+  TrackEvent_LegacyEvent_FlowDirection_FLOW_OUT = 2,
+  TrackEvent_LegacyEvent_FlowDirection_FLOW_INOUT = 3,
+};
+
+const TrackEvent_LegacyEvent_FlowDirection TrackEvent_LegacyEvent_FlowDirection_MIN = TrackEvent_LegacyEvent_FlowDirection_FLOW_UNSPECIFIED;
+const TrackEvent_LegacyEvent_FlowDirection TrackEvent_LegacyEvent_FlowDirection_MAX = TrackEvent_LegacyEvent_FlowDirection_FLOW_INOUT;
+
+enum TrackEvent_LegacyEvent_InstantEventScope : int32_t {
+  TrackEvent_LegacyEvent_InstantEventScope_SCOPE_UNSPECIFIED = 0,
+  TrackEvent_LegacyEvent_InstantEventScope_SCOPE_GLOBAL = 1,
+  TrackEvent_LegacyEvent_InstantEventScope_SCOPE_PROCESS = 2,
+  TrackEvent_LegacyEvent_InstantEventScope_SCOPE_THREAD = 3,
+};
+
+const TrackEvent_LegacyEvent_InstantEventScope TrackEvent_LegacyEvent_InstantEventScope_MIN = TrackEvent_LegacyEvent_InstantEventScope_SCOPE_UNSPECIFIED;
+const TrackEvent_LegacyEvent_InstantEventScope TrackEvent_LegacyEvent_InstantEventScope_MAX = TrackEvent_LegacyEvent_InstantEventScope_SCOPE_THREAD;
+
+class EventName_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  EventName_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit EventName_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit EventName_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_iid() const { return at<1>().valid(); }
+  uint64_t iid() const { return at<1>().as_uint64(); }
+  bool has_name() const { return at<2>().valid(); }
+  ::protozero::ConstChars name() const { return at<2>().as_string(); }
+};
+
+class EventName : public ::protozero::Message {
+ public:
+  using Decoder = EventName_Decoder;
+  enum : int32_t {
+    kIidFieldNumber = 1,
+    kNameFieldNumber = 2,
+  };
+
+  using FieldMetadata_Iid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      EventName>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Iid kIid() { return {}; }
+  void set_iid(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Iid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      EventName>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class EventCategory_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  EventCategory_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit EventCategory_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit EventCategory_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_iid() const { return at<1>().valid(); }
+  uint64_t iid() const { return at<1>().as_uint64(); }
+  bool has_name() const { return at<2>().valid(); }
+  ::protozero::ConstChars name() const { return at<2>().as_string(); }
+};
+
+class EventCategory : public ::protozero::Message {
+ public:
+  using Decoder = EventCategory_Decoder;
+  enum : int32_t {
+    kIidFieldNumber = 1,
+    kNameFieldNumber = 2,
+  };
+
+  using FieldMetadata_Iid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      EventCategory>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Iid kIid() { return {}; }
+  void set_iid(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Iid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      EventCategory>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class TrackEventDefaults_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/45, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  TrackEventDefaults_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TrackEventDefaults_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TrackEventDefaults_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_track_uuid() const { return at<11>().valid(); }
+  uint64_t track_uuid() const { return at<11>().as_uint64(); }
+  bool has_extra_counter_track_uuids() const { return at<31>().valid(); }
+  ::protozero::RepeatedFieldIterator<uint64_t> extra_counter_track_uuids() const { return GetRepeated<uint64_t>(31); }
+  bool has_extra_double_counter_track_uuids() const { return at<45>().valid(); }
+  ::protozero::RepeatedFieldIterator<uint64_t> extra_double_counter_track_uuids() const { return GetRepeated<uint64_t>(45); }
+};
+
+class TrackEventDefaults : public ::protozero::Message {
+ public:
+  using Decoder = TrackEventDefaults_Decoder;
+  enum : int32_t {
+    kTrackUuidFieldNumber = 11,
+    kExtraCounterTrackUuidsFieldNumber = 31,
+    kExtraDoubleCounterTrackUuidsFieldNumber = 45,
+  };
+
+  using FieldMetadata_TrackUuid =
+    ::protozero::proto_utils::FieldMetadata<
+      11,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TrackEventDefaults>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TrackUuid kTrackUuid() { return {}; }
+  void set_track_uuid(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TrackUuid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ExtraCounterTrackUuids =
+    ::protozero::proto_utils::FieldMetadata<
+      31,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TrackEventDefaults>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ExtraCounterTrackUuids kExtraCounterTrackUuids() { return {}; }
+  void add_extra_counter_track_uuids(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ExtraCounterTrackUuids::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ExtraDoubleCounterTrackUuids =
+    ::protozero::proto_utils::FieldMetadata<
+      45,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TrackEventDefaults>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ExtraDoubleCounterTrackUuids kExtraDoubleCounterTrackUuids() { return {}; }
+  void add_extra_double_counter_track_uuids(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ExtraDoubleCounterTrackUuids::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class TrackEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/46, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  TrackEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TrackEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TrackEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_category_iids() const { return at<3>().valid(); }
+  ::protozero::RepeatedFieldIterator<uint64_t> category_iids() const { return GetRepeated<uint64_t>(3); }
+  bool has_categories() const { return at<22>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstChars> categories() const { return GetRepeated<::protozero::ConstChars>(22); }
+  bool has_name_iid() const { return at<10>().valid(); }
+  uint64_t name_iid() const { return at<10>().as_uint64(); }
+  bool has_name() const { return at<23>().valid(); }
+  ::protozero::ConstChars name() const { return at<23>().as_string(); }
+  bool has_type() const { return at<9>().valid(); }
+  int32_t type() const { return at<9>().as_int32(); }
+  bool has_track_uuid() const { return at<11>().valid(); }
+  uint64_t track_uuid() const { return at<11>().as_uint64(); }
+  bool has_counter_value() const { return at<30>().valid(); }
+  int64_t counter_value() const { return at<30>().as_int64(); }
+  bool has_double_counter_value() const { return at<44>().valid(); }
+  double double_counter_value() const { return at<44>().as_double(); }
+  bool has_extra_counter_track_uuids() const { return at<31>().valid(); }
+  ::protozero::RepeatedFieldIterator<uint64_t> extra_counter_track_uuids() const { return GetRepeated<uint64_t>(31); }
+  bool has_extra_counter_values() const { return at<12>().valid(); }
+  ::protozero::RepeatedFieldIterator<int64_t> extra_counter_values() const { return GetRepeated<int64_t>(12); }
+  bool has_extra_double_counter_track_uuids() const { return at<45>().valid(); }
+  ::protozero::RepeatedFieldIterator<uint64_t> extra_double_counter_track_uuids() const { return GetRepeated<uint64_t>(45); }
+  bool has_extra_double_counter_values() const { return at<46>().valid(); }
+  ::protozero::RepeatedFieldIterator<double> extra_double_counter_values() const { return GetRepeated<double>(46); }
+  bool has_flow_ids() const { return at<36>().valid(); }
+  ::protozero::RepeatedFieldIterator<uint64_t> flow_ids() const { return GetRepeated<uint64_t>(36); }
+  bool has_terminating_flow_ids() const { return at<42>().valid(); }
+  ::protozero::RepeatedFieldIterator<uint64_t> terminating_flow_ids() const { return GetRepeated<uint64_t>(42); }
+  bool has_debug_annotations() const { return at<4>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> debug_annotations() const { return GetRepeated<::protozero::ConstBytes>(4); }
+  bool has_task_execution() const { return at<5>().valid(); }
+  ::protozero::ConstBytes task_execution() const { return at<5>().as_bytes(); }
+  bool has_log_message() const { return at<21>().valid(); }
+  ::protozero::ConstBytes log_message() const { return at<21>().as_bytes(); }
+  bool has_cc_scheduler_state() const { return at<24>().valid(); }
+  ::protozero::ConstBytes cc_scheduler_state() const { return at<24>().as_bytes(); }
+  bool has_chrome_user_event() const { return at<25>().valid(); }
+  ::protozero::ConstBytes chrome_user_event() const { return at<25>().as_bytes(); }
+  bool has_chrome_keyed_service() const { return at<26>().valid(); }
+  ::protozero::ConstBytes chrome_keyed_service() const { return at<26>().as_bytes(); }
+  bool has_chrome_legacy_ipc() const { return at<27>().valid(); }
+  ::protozero::ConstBytes chrome_legacy_ipc() const { return at<27>().as_bytes(); }
+  bool has_chrome_histogram_sample() const { return at<28>().valid(); }
+  ::protozero::ConstBytes chrome_histogram_sample() const { return at<28>().as_bytes(); }
+  bool has_chrome_latency_info() const { return at<29>().valid(); }
+  ::protozero::ConstBytes chrome_latency_info() const { return at<29>().as_bytes(); }
+  bool has_chrome_frame_reporter() const { return at<32>().valid(); }
+  ::protozero::ConstBytes chrome_frame_reporter() const { return at<32>().as_bytes(); }
+  bool has_chrome_application_state_info() const { return at<39>().valid(); }
+  ::protozero::ConstBytes chrome_application_state_info() const { return at<39>().as_bytes(); }
+  bool has_chrome_renderer_scheduler_state() const { return at<40>().valid(); }
+  ::protozero::ConstBytes chrome_renderer_scheduler_state() const { return at<40>().as_bytes(); }
+  bool has_chrome_window_handle_event_info() const { return at<41>().valid(); }
+  ::protozero::ConstBytes chrome_window_handle_event_info() const { return at<41>().as_bytes(); }
+  bool has_chrome_content_settings_event_info() const { return at<43>().valid(); }
+  ::protozero::ConstBytes chrome_content_settings_event_info() const { return at<43>().as_bytes(); }
+  bool has_source_location() const { return at<33>().valid(); }
+  ::protozero::ConstBytes source_location() const { return at<33>().as_bytes(); }
+  bool has_source_location_iid() const { return at<34>().valid(); }
+  uint64_t source_location_iid() const { return at<34>().as_uint64(); }
+  bool has_chrome_message_pump() const { return at<35>().valid(); }
+  ::protozero::ConstBytes chrome_message_pump() const { return at<35>().as_bytes(); }
+  bool has_chrome_mojo_event_info() const { return at<38>().valid(); }
+  ::protozero::ConstBytes chrome_mojo_event_info() const { return at<38>().as_bytes(); }
+  bool has_timestamp_delta_us() const { return at<1>().valid(); }
+  int64_t timestamp_delta_us() const { return at<1>().as_int64(); }
+  bool has_timestamp_absolute_us() const { return at<16>().valid(); }
+  int64_t timestamp_absolute_us() const { return at<16>().as_int64(); }
+  bool has_thread_time_delta_us() const { return at<2>().valid(); }
+  int64_t thread_time_delta_us() const { return at<2>().as_int64(); }
+  bool has_thread_time_absolute_us() const { return at<17>().valid(); }
+  int64_t thread_time_absolute_us() const { return at<17>().as_int64(); }
+  bool has_thread_instruction_count_delta() const { return at<8>().valid(); }
+  int64_t thread_instruction_count_delta() const { return at<8>().as_int64(); }
+  bool has_thread_instruction_count_absolute() const { return at<20>().valid(); }
+  int64_t thread_instruction_count_absolute() const { return at<20>().as_int64(); }
+  bool has_legacy_event() const { return at<6>().valid(); }
+  ::protozero::ConstBytes legacy_event() const { return at<6>().as_bytes(); }
+};
+
+class TrackEvent : public ::protozero::Message {
+ public:
+  using Decoder = TrackEvent_Decoder;
+  enum : int32_t {
+    kCategoryIidsFieldNumber = 3,
+    kCategoriesFieldNumber = 22,
+    kNameIidFieldNumber = 10,
+    kNameFieldNumber = 23,
+    kTypeFieldNumber = 9,
+    kTrackUuidFieldNumber = 11,
+    kCounterValueFieldNumber = 30,
+    kDoubleCounterValueFieldNumber = 44,
+    kExtraCounterTrackUuidsFieldNumber = 31,
+    kExtraCounterValuesFieldNumber = 12,
+    kExtraDoubleCounterTrackUuidsFieldNumber = 45,
+    kExtraDoubleCounterValuesFieldNumber = 46,
+    kFlowIdsFieldNumber = 36,
+    kTerminatingFlowIdsFieldNumber = 42,
+    kDebugAnnotationsFieldNumber = 4,
+    kTaskExecutionFieldNumber = 5,
+    kLogMessageFieldNumber = 21,
+    kCcSchedulerStateFieldNumber = 24,
+    kChromeUserEventFieldNumber = 25,
+    kChromeKeyedServiceFieldNumber = 26,
+    kChromeLegacyIpcFieldNumber = 27,
+    kChromeHistogramSampleFieldNumber = 28,
+    kChromeLatencyInfoFieldNumber = 29,
+    kChromeFrameReporterFieldNumber = 32,
+    kChromeApplicationStateInfoFieldNumber = 39,
+    kChromeRendererSchedulerStateFieldNumber = 40,
+    kChromeWindowHandleEventInfoFieldNumber = 41,
+    kChromeContentSettingsEventInfoFieldNumber = 43,
+    kSourceLocationFieldNumber = 33,
+    kSourceLocationIidFieldNumber = 34,
+    kChromeMessagePumpFieldNumber = 35,
+    kChromeMojoEventInfoFieldNumber = 38,
+    kTimestampDeltaUsFieldNumber = 1,
+    kTimestampAbsoluteUsFieldNumber = 16,
+    kThreadTimeDeltaUsFieldNumber = 2,
+    kThreadTimeAbsoluteUsFieldNumber = 17,
+    kThreadInstructionCountDeltaFieldNumber = 8,
+    kThreadInstructionCountAbsoluteFieldNumber = 20,
+    kLegacyEventFieldNumber = 6,
+  };
+  using LegacyEvent = ::perfetto::protos::pbzero::TrackEvent_LegacyEvent;
+  using Type = ::perfetto::protos::pbzero::TrackEvent_Type;
+  static const Type TYPE_UNSPECIFIED = TrackEvent_Type_TYPE_UNSPECIFIED;
+  static const Type TYPE_SLICE_BEGIN = TrackEvent_Type_TYPE_SLICE_BEGIN;
+  static const Type TYPE_SLICE_END = TrackEvent_Type_TYPE_SLICE_END;
+  static const Type TYPE_INSTANT = TrackEvent_Type_TYPE_INSTANT;
+  static const Type TYPE_COUNTER = TrackEvent_Type_TYPE_COUNTER;
+
+  using FieldMetadata_CategoryIids =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CategoryIids kCategoryIids() { return {}; }
+  void add_category_iids(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CategoryIids::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Categories =
+    ::protozero::proto_utils::FieldMetadata<
+      22,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Categories kCategories() { return {}; }
+  void add_categories(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Categories::kFieldId, data, size);
+  }
+  void add_categories(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Categories::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NameIid =
+    ::protozero::proto_utils::FieldMetadata<
+      10,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NameIid kNameIid() { return {}; }
+  void set_name_iid(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NameIid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      23,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Type =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::TrackEvent_Type,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Type kType() { return {}; }
+  void set_type(::perfetto::protos::pbzero::TrackEvent_Type value) {
+    static constexpr uint32_t field_id = FieldMetadata_Type::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TrackUuid =
+    ::protozero::proto_utils::FieldMetadata<
+      11,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TrackUuid kTrackUuid() { return {}; }
+  void set_track_uuid(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TrackUuid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CounterValue =
+    ::protozero::proto_utils::FieldMetadata<
+      30,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CounterValue kCounterValue() { return {}; }
+  void set_counter_value(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CounterValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DoubleCounterValue =
+    ::protozero::proto_utils::FieldMetadata<
+      44,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kDouble,
+      double,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DoubleCounterValue kDoubleCounterValue() { return {}; }
+  void set_double_counter_value(double value) {
+    static constexpr uint32_t field_id = FieldMetadata_DoubleCounterValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kDouble>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ExtraCounterTrackUuids =
+    ::protozero::proto_utils::FieldMetadata<
+      31,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ExtraCounterTrackUuids kExtraCounterTrackUuids() { return {}; }
+  void add_extra_counter_track_uuids(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ExtraCounterTrackUuids::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ExtraCounterValues =
+    ::protozero::proto_utils::FieldMetadata<
+      12,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ExtraCounterValues kExtraCounterValues() { return {}; }
+  void add_extra_counter_values(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ExtraCounterValues::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ExtraDoubleCounterTrackUuids =
+    ::protozero::proto_utils::FieldMetadata<
+      45,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ExtraDoubleCounterTrackUuids kExtraDoubleCounterTrackUuids() { return {}; }
+  void add_extra_double_counter_track_uuids(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ExtraDoubleCounterTrackUuids::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ExtraDoubleCounterValues =
+    ::protozero::proto_utils::FieldMetadata<
+      46,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kDouble,
+      double,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ExtraDoubleCounterValues kExtraDoubleCounterValues() { return {}; }
+  void add_extra_double_counter_values(double value) {
+    static constexpr uint32_t field_id = FieldMetadata_ExtraDoubleCounterValues::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kDouble>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FlowIds =
+    ::protozero::proto_utils::FieldMetadata<
+      36,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FlowIds kFlowIds() { return {}; }
+  void add_flow_ids(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_FlowIds::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TerminatingFlowIds =
+    ::protozero::proto_utils::FieldMetadata<
+      42,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TerminatingFlowIds kTerminatingFlowIds() { return {}; }
+  void add_terminating_flow_ids(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TerminatingFlowIds::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DebugAnnotations =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      DebugAnnotation,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DebugAnnotations kDebugAnnotations() { return {}; }
+  template <typename T = DebugAnnotation> T* add_debug_annotations() {
+    return BeginNestedMessage<T>(4);
+  }
+
+
+  using FieldMetadata_TaskExecution =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TaskExecution,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TaskExecution kTaskExecution() { return {}; }
+  template <typename T = TaskExecution> T* set_task_execution() {
+    return BeginNestedMessage<T>(5);
+  }
+
+
+  using FieldMetadata_LogMessage =
+    ::protozero::proto_utils::FieldMetadata<
+      21,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      LogMessage,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_LogMessage kLogMessage() { return {}; }
+  template <typename T = LogMessage> T* set_log_message() {
+    return BeginNestedMessage<T>(21);
+  }
+
+
+  using FieldMetadata_CcSchedulerState =
+    ::protozero::proto_utils::FieldMetadata<
+      24,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ChromeCompositorSchedulerState,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CcSchedulerState kCcSchedulerState() { return {}; }
+  template <typename T = ChromeCompositorSchedulerState> T* set_cc_scheduler_state() {
+    return BeginNestedMessage<T>(24);
+  }
+
+
+  using FieldMetadata_ChromeUserEvent =
+    ::protozero::proto_utils::FieldMetadata<
+      25,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ChromeUserEvent,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChromeUserEvent kChromeUserEvent() { return {}; }
+  template <typename T = ChromeUserEvent> T* set_chrome_user_event() {
+    return BeginNestedMessage<T>(25);
+  }
+
+
+  using FieldMetadata_ChromeKeyedService =
+    ::protozero::proto_utils::FieldMetadata<
+      26,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ChromeKeyedService,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChromeKeyedService kChromeKeyedService() { return {}; }
+  template <typename T = ChromeKeyedService> T* set_chrome_keyed_service() {
+    return BeginNestedMessage<T>(26);
+  }
+
+
+  using FieldMetadata_ChromeLegacyIpc =
+    ::protozero::proto_utils::FieldMetadata<
+      27,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ChromeLegacyIpc,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChromeLegacyIpc kChromeLegacyIpc() { return {}; }
+  template <typename T = ChromeLegacyIpc> T* set_chrome_legacy_ipc() {
+    return BeginNestedMessage<T>(27);
+  }
+
+
+  using FieldMetadata_ChromeHistogramSample =
+    ::protozero::proto_utils::FieldMetadata<
+      28,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ChromeHistogramSample,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChromeHistogramSample kChromeHistogramSample() { return {}; }
+  template <typename T = ChromeHistogramSample> T* set_chrome_histogram_sample() {
+    return BeginNestedMessage<T>(28);
+  }
+
+
+  using FieldMetadata_ChromeLatencyInfo =
+    ::protozero::proto_utils::FieldMetadata<
+      29,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ChromeLatencyInfo,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChromeLatencyInfo kChromeLatencyInfo() { return {}; }
+  template <typename T = ChromeLatencyInfo> T* set_chrome_latency_info() {
+    return BeginNestedMessage<T>(29);
+  }
+
+
+  using FieldMetadata_ChromeFrameReporter =
+    ::protozero::proto_utils::FieldMetadata<
+      32,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ChromeFrameReporter,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChromeFrameReporter kChromeFrameReporter() { return {}; }
+  template <typename T = ChromeFrameReporter> T* set_chrome_frame_reporter() {
+    return BeginNestedMessage<T>(32);
+  }
+
+
+  using FieldMetadata_ChromeApplicationStateInfo =
+    ::protozero::proto_utils::FieldMetadata<
+      39,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ChromeApplicationStateInfo,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChromeApplicationStateInfo kChromeApplicationStateInfo() { return {}; }
+  template <typename T = ChromeApplicationStateInfo> T* set_chrome_application_state_info() {
+    return BeginNestedMessage<T>(39);
+  }
+
+
+  using FieldMetadata_ChromeRendererSchedulerState =
+    ::protozero::proto_utils::FieldMetadata<
+      40,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ChromeRendererSchedulerState,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChromeRendererSchedulerState kChromeRendererSchedulerState() { return {}; }
+  template <typename T = ChromeRendererSchedulerState> T* set_chrome_renderer_scheduler_state() {
+    return BeginNestedMessage<T>(40);
+  }
+
+
+  using FieldMetadata_ChromeWindowHandleEventInfo =
+    ::protozero::proto_utils::FieldMetadata<
+      41,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ChromeWindowHandleEventInfo,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChromeWindowHandleEventInfo kChromeWindowHandleEventInfo() { return {}; }
+  template <typename T = ChromeWindowHandleEventInfo> T* set_chrome_window_handle_event_info() {
+    return BeginNestedMessage<T>(41);
+  }
+
+
+  using FieldMetadata_ChromeContentSettingsEventInfo =
+    ::protozero::proto_utils::FieldMetadata<
+      43,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ChromeContentSettingsEventInfo,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChromeContentSettingsEventInfo kChromeContentSettingsEventInfo() { return {}; }
+  template <typename T = ChromeContentSettingsEventInfo> T* set_chrome_content_settings_event_info() {
+    return BeginNestedMessage<T>(43);
+  }
+
+
+  using FieldMetadata_SourceLocation =
+    ::protozero::proto_utils::FieldMetadata<
+      33,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      SourceLocation,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SourceLocation kSourceLocation() { return {}; }
+  template <typename T = SourceLocation> T* set_source_location() {
+    return BeginNestedMessage<T>(33);
+  }
+
+
+  using FieldMetadata_SourceLocationIid =
+    ::protozero::proto_utils::FieldMetadata<
+      34,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SourceLocationIid kSourceLocationIid() { return {}; }
+  void set_source_location_iid(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SourceLocationIid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ChromeMessagePump =
+    ::protozero::proto_utils::FieldMetadata<
+      35,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ChromeMessagePump,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChromeMessagePump kChromeMessagePump() { return {}; }
+  template <typename T = ChromeMessagePump> T* set_chrome_message_pump() {
+    return BeginNestedMessage<T>(35);
+  }
+
+
+  using FieldMetadata_ChromeMojoEventInfo =
+    ::protozero::proto_utils::FieldMetadata<
+      38,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ChromeMojoEventInfo,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChromeMojoEventInfo kChromeMojoEventInfo() { return {}; }
+  template <typename T = ChromeMojoEventInfo> T* set_chrome_mojo_event_info() {
+    return BeginNestedMessage<T>(38);
+  }
+
+
+  using FieldMetadata_TimestampDeltaUs =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TimestampDeltaUs kTimestampDeltaUs() { return {}; }
+  void set_timestamp_delta_us(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TimestampDeltaUs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TimestampAbsoluteUs =
+    ::protozero::proto_utils::FieldMetadata<
+      16,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TimestampAbsoluteUs kTimestampAbsoluteUs() { return {}; }
+  void set_timestamp_absolute_us(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TimestampAbsoluteUs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ThreadTimeDeltaUs =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ThreadTimeDeltaUs kThreadTimeDeltaUs() { return {}; }
+  void set_thread_time_delta_us(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ThreadTimeDeltaUs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ThreadTimeAbsoluteUs =
+    ::protozero::proto_utils::FieldMetadata<
+      17,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ThreadTimeAbsoluteUs kThreadTimeAbsoluteUs() { return {}; }
+  void set_thread_time_absolute_us(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ThreadTimeAbsoluteUs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ThreadInstructionCountDelta =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ThreadInstructionCountDelta kThreadInstructionCountDelta() { return {}; }
+  void set_thread_instruction_count_delta(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ThreadInstructionCountDelta::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ThreadInstructionCountAbsolute =
+    ::protozero::proto_utils::FieldMetadata<
+      20,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ThreadInstructionCountAbsolute kThreadInstructionCountAbsolute() { return {}; }
+  void set_thread_instruction_count_absolute(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ThreadInstructionCountAbsolute::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_LegacyEvent =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TrackEvent_LegacyEvent,
+      TrackEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_LegacyEvent kLegacyEvent() { return {}; }
+  template <typename T = TrackEvent_LegacyEvent> T* set_legacy_event() {
+    return BeginNestedMessage<T>(6);
+  }
+
+};
+
+class TrackEvent_LegacyEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/19, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  TrackEvent_LegacyEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TrackEvent_LegacyEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TrackEvent_LegacyEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_name_iid() const { return at<1>().valid(); }
+  uint64_t name_iid() const { return at<1>().as_uint64(); }
+  bool has_phase() const { return at<2>().valid(); }
+  int32_t phase() const { return at<2>().as_int32(); }
+  bool has_duration_us() const { return at<3>().valid(); }
+  int64_t duration_us() const { return at<3>().as_int64(); }
+  bool has_thread_duration_us() const { return at<4>().valid(); }
+  int64_t thread_duration_us() const { return at<4>().as_int64(); }
+  bool has_thread_instruction_delta() const { return at<15>().valid(); }
+  int64_t thread_instruction_delta() const { return at<15>().as_int64(); }
+  bool has_unscoped_id() const { return at<6>().valid(); }
+  uint64_t unscoped_id() const { return at<6>().as_uint64(); }
+  bool has_local_id() const { return at<10>().valid(); }
+  uint64_t local_id() const { return at<10>().as_uint64(); }
+  bool has_global_id() const { return at<11>().valid(); }
+  uint64_t global_id() const { return at<11>().as_uint64(); }
+  bool has_id_scope() const { return at<7>().valid(); }
+  ::protozero::ConstChars id_scope() const { return at<7>().as_string(); }
+  bool has_use_async_tts() const { return at<9>().valid(); }
+  bool use_async_tts() const { return at<9>().as_bool(); }
+  bool has_bind_id() const { return at<8>().valid(); }
+  uint64_t bind_id() const { return at<8>().as_uint64(); }
+  bool has_bind_to_enclosing() const { return at<12>().valid(); }
+  bool bind_to_enclosing() const { return at<12>().as_bool(); }
+  bool has_flow_direction() const { return at<13>().valid(); }
+  int32_t flow_direction() const { return at<13>().as_int32(); }
+  bool has_instant_event_scope() const { return at<14>().valid(); }
+  int32_t instant_event_scope() const { return at<14>().as_int32(); }
+  bool has_pid_override() const { return at<18>().valid(); }
+  int32_t pid_override() const { return at<18>().as_int32(); }
+  bool has_tid_override() const { return at<19>().valid(); }
+  int32_t tid_override() const { return at<19>().as_int32(); }
+};
+
+class TrackEvent_LegacyEvent : public ::protozero::Message {
+ public:
+  using Decoder = TrackEvent_LegacyEvent_Decoder;
+  enum : int32_t {
+    kNameIidFieldNumber = 1,
+    kPhaseFieldNumber = 2,
+    kDurationUsFieldNumber = 3,
+    kThreadDurationUsFieldNumber = 4,
+    kThreadInstructionDeltaFieldNumber = 15,
+    kUnscopedIdFieldNumber = 6,
+    kLocalIdFieldNumber = 10,
+    kGlobalIdFieldNumber = 11,
+    kIdScopeFieldNumber = 7,
+    kUseAsyncTtsFieldNumber = 9,
+    kBindIdFieldNumber = 8,
+    kBindToEnclosingFieldNumber = 12,
+    kFlowDirectionFieldNumber = 13,
+    kInstantEventScopeFieldNumber = 14,
+    kPidOverrideFieldNumber = 18,
+    kTidOverrideFieldNumber = 19,
+  };
+  using FlowDirection = ::perfetto::protos::pbzero::TrackEvent_LegacyEvent_FlowDirection;
+  using InstantEventScope = ::perfetto::protos::pbzero::TrackEvent_LegacyEvent_InstantEventScope;
+  static const FlowDirection FLOW_UNSPECIFIED = TrackEvent_LegacyEvent_FlowDirection_FLOW_UNSPECIFIED;
+  static const FlowDirection FLOW_IN = TrackEvent_LegacyEvent_FlowDirection_FLOW_IN;
+  static const FlowDirection FLOW_OUT = TrackEvent_LegacyEvent_FlowDirection_FLOW_OUT;
+  static const FlowDirection FLOW_INOUT = TrackEvent_LegacyEvent_FlowDirection_FLOW_INOUT;
+  static const InstantEventScope SCOPE_UNSPECIFIED = TrackEvent_LegacyEvent_InstantEventScope_SCOPE_UNSPECIFIED;
+  static const InstantEventScope SCOPE_GLOBAL = TrackEvent_LegacyEvent_InstantEventScope_SCOPE_GLOBAL;
+  static const InstantEventScope SCOPE_PROCESS = TrackEvent_LegacyEvent_InstantEventScope_SCOPE_PROCESS;
+  static const InstantEventScope SCOPE_THREAD = TrackEvent_LegacyEvent_InstantEventScope_SCOPE_THREAD;
+
+  using FieldMetadata_NameIid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TrackEvent_LegacyEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NameIid kNameIid() { return {}; }
+  void set_name_iid(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NameIid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Phase =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      TrackEvent_LegacyEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Phase kPhase() { return {}; }
+  void set_phase(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Phase::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_DurationUs =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      TrackEvent_LegacyEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DurationUs kDurationUs() { return {}; }
+  void set_duration_us(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_DurationUs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ThreadDurationUs =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      TrackEvent_LegacyEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ThreadDurationUs kThreadDurationUs() { return {}; }
+  void set_thread_duration_us(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ThreadDurationUs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ThreadInstructionDelta =
+    ::protozero::proto_utils::FieldMetadata<
+      15,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      TrackEvent_LegacyEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ThreadInstructionDelta kThreadInstructionDelta() { return {}; }
+  void set_thread_instruction_delta(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ThreadInstructionDelta::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_UnscopedId =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TrackEvent_LegacyEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_UnscopedId kUnscopedId() { return {}; }
+  void set_unscoped_id(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_UnscopedId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_LocalId =
+    ::protozero::proto_utils::FieldMetadata<
+      10,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TrackEvent_LegacyEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_LocalId kLocalId() { return {}; }
+  void set_local_id(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_LocalId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_GlobalId =
+    ::protozero::proto_utils::FieldMetadata<
+      11,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TrackEvent_LegacyEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_GlobalId kGlobalId() { return {}; }
+  void set_global_id(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_GlobalId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_IdScope =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TrackEvent_LegacyEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IdScope kIdScope() { return {}; }
+  void set_id_scope(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_IdScope::kFieldId, data, size);
+  }
+  void set_id_scope(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_IdScope::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_UseAsyncTts =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      TrackEvent_LegacyEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_UseAsyncTts kUseAsyncTts() { return {}; }
+  void set_use_async_tts(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_UseAsyncTts::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BindId =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TrackEvent_LegacyEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BindId kBindId() { return {}; }
+  void set_bind_id(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_BindId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_BindToEnclosing =
+    ::protozero::proto_utils::FieldMetadata<
+      12,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      TrackEvent_LegacyEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BindToEnclosing kBindToEnclosing() { return {}; }
+  void set_bind_to_enclosing(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_BindToEnclosing::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_FlowDirection =
+    ::protozero::proto_utils::FieldMetadata<
+      13,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::TrackEvent_LegacyEvent_FlowDirection,
+      TrackEvent_LegacyEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FlowDirection kFlowDirection() { return {}; }
+  void set_flow_direction(::perfetto::protos::pbzero::TrackEvent_LegacyEvent_FlowDirection value) {
+    static constexpr uint32_t field_id = FieldMetadata_FlowDirection::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_InstantEventScope =
+    ::protozero::proto_utils::FieldMetadata<
+      14,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::TrackEvent_LegacyEvent_InstantEventScope,
+      TrackEvent_LegacyEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_InstantEventScope kInstantEventScope() { return {}; }
+  void set_instant_event_scope(::perfetto::protos::pbzero::TrackEvent_LegacyEvent_InstantEventScope value) {
+    static constexpr uint32_t field_id = FieldMetadata_InstantEventScope::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PidOverride =
+    ::protozero::proto_utils::FieldMetadata<
+      18,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      TrackEvent_LegacyEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PidOverride kPidOverride() { return {}; }
+  void set_pid_override(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_PidOverride::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TidOverride =
+    ::protozero::proto_utils::FieldMetadata<
+      19,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      TrackEvent_LegacyEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TidOverride kTidOverride() { return {}; }
+  void set_tid_override(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TidOverride::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/interned_data/interned_data.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_INTERNED_DATA_INTERNED_DATA_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_INTERNED_DATA_INTERNED_DATA_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class Callstack;
+class DebugAnnotationName;
+class EventCategory;
+class EventName;
+class Frame;
+class HistogramName;
+class InternedGpuRenderStageSpecification;
+class InternedGraphicsContext;
+class InternedString;
+class LogMessageBody;
+class Mapping;
+class ProfiledFrameSymbols;
+class SourceLocation;
+
+class InternedData_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/26, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  InternedData_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit InternedData_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit InternedData_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_event_categories() const { return at<1>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> event_categories() const { return GetRepeated<::protozero::ConstBytes>(1); }
+  bool has_event_names() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> event_names() const { return GetRepeated<::protozero::ConstBytes>(2); }
+  bool has_debug_annotation_names() const { return at<3>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> debug_annotation_names() const { return GetRepeated<::protozero::ConstBytes>(3); }
+  bool has_source_locations() const { return at<4>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> source_locations() const { return GetRepeated<::protozero::ConstBytes>(4); }
+  bool has_log_message_body() const { return at<20>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> log_message_body() const { return GetRepeated<::protozero::ConstBytes>(20); }
+  bool has_histogram_names() const { return at<25>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> histogram_names() const { return GetRepeated<::protozero::ConstBytes>(25); }
+  bool has_build_ids() const { return at<16>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> build_ids() const { return GetRepeated<::protozero::ConstBytes>(16); }
+  bool has_mapping_paths() const { return at<17>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> mapping_paths() const { return GetRepeated<::protozero::ConstBytes>(17); }
+  bool has_source_paths() const { return at<18>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> source_paths() const { return GetRepeated<::protozero::ConstBytes>(18); }
+  bool has_function_names() const { return at<5>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> function_names() const { return GetRepeated<::protozero::ConstBytes>(5); }
+  bool has_profiled_frame_symbols() const { return at<21>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> profiled_frame_symbols() const { return GetRepeated<::protozero::ConstBytes>(21); }
+  bool has_mappings() const { return at<19>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> mappings() const { return GetRepeated<::protozero::ConstBytes>(19); }
+  bool has_frames() const { return at<6>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> frames() const { return GetRepeated<::protozero::ConstBytes>(6); }
+  bool has_callstacks() const { return at<7>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> callstacks() const { return GetRepeated<::protozero::ConstBytes>(7); }
+  bool has_vulkan_memory_keys() const { return at<22>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> vulkan_memory_keys() const { return GetRepeated<::protozero::ConstBytes>(22); }
+  bool has_graphics_contexts() const { return at<23>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> graphics_contexts() const { return GetRepeated<::protozero::ConstBytes>(23); }
+  bool has_gpu_specifications() const { return at<24>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> gpu_specifications() const { return GetRepeated<::protozero::ConstBytes>(24); }
+  bool has_kernel_symbols() const { return at<26>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> kernel_symbols() const { return GetRepeated<::protozero::ConstBytes>(26); }
+};
+
+class InternedData : public ::protozero::Message {
+ public:
+  using Decoder = InternedData_Decoder;
+  enum : int32_t {
+    kEventCategoriesFieldNumber = 1,
+    kEventNamesFieldNumber = 2,
+    kDebugAnnotationNamesFieldNumber = 3,
+    kSourceLocationsFieldNumber = 4,
+    kLogMessageBodyFieldNumber = 20,
+    kHistogramNamesFieldNumber = 25,
+    kBuildIdsFieldNumber = 16,
+    kMappingPathsFieldNumber = 17,
+    kSourcePathsFieldNumber = 18,
+    kFunctionNamesFieldNumber = 5,
+    kProfiledFrameSymbolsFieldNumber = 21,
+    kMappingsFieldNumber = 19,
+    kFramesFieldNumber = 6,
+    kCallstacksFieldNumber = 7,
+    kVulkanMemoryKeysFieldNumber = 22,
+    kGraphicsContextsFieldNumber = 23,
+    kGpuSpecificationsFieldNumber = 24,
+    kKernelSymbolsFieldNumber = 26,
+  };
+
+  using FieldMetadata_EventCategories =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      EventCategory,
+      InternedData>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_EventCategories kEventCategories() { return {}; }
+  template <typename T = EventCategory> T* add_event_categories() {
+    return BeginNestedMessage<T>(1);
+  }
+
+
+  using FieldMetadata_EventNames =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      EventName,
+      InternedData>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_EventNames kEventNames() { return {}; }
+  template <typename T = EventName> T* add_event_names() {
+    return BeginNestedMessage<T>(2);
+  }
+
+
+  using FieldMetadata_DebugAnnotationNames =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      DebugAnnotationName,
+      InternedData>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DebugAnnotationNames kDebugAnnotationNames() { return {}; }
+  template <typename T = DebugAnnotationName> T* add_debug_annotation_names() {
+    return BeginNestedMessage<T>(3);
+  }
+
+
+  using FieldMetadata_SourceLocations =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      SourceLocation,
+      InternedData>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SourceLocations kSourceLocations() { return {}; }
+  template <typename T = SourceLocation> T* add_source_locations() {
+    return BeginNestedMessage<T>(4);
+  }
+
+
+  using FieldMetadata_LogMessageBody =
+    ::protozero::proto_utils::FieldMetadata<
+      20,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      LogMessageBody,
+      InternedData>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_LogMessageBody kLogMessageBody() { return {}; }
+  template <typename T = LogMessageBody> T* add_log_message_body() {
+    return BeginNestedMessage<T>(20);
+  }
+
+
+  using FieldMetadata_HistogramNames =
+    ::protozero::proto_utils::FieldMetadata<
+      25,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      HistogramName,
+      InternedData>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_HistogramNames kHistogramNames() { return {}; }
+  template <typename T = HistogramName> T* add_histogram_names() {
+    return BeginNestedMessage<T>(25);
+  }
+
+
+  using FieldMetadata_BuildIds =
+    ::protozero::proto_utils::FieldMetadata<
+      16,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      InternedString,
+      InternedData>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_BuildIds kBuildIds() { return {}; }
+  template <typename T = InternedString> T* add_build_ids() {
+    return BeginNestedMessage<T>(16);
+  }
+
+
+  using FieldMetadata_MappingPaths =
+    ::protozero::proto_utils::FieldMetadata<
+      17,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      InternedString,
+      InternedData>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MappingPaths kMappingPaths() { return {}; }
+  template <typename T = InternedString> T* add_mapping_paths() {
+    return BeginNestedMessage<T>(17);
+  }
+
+
+  using FieldMetadata_SourcePaths =
+    ::protozero::proto_utils::FieldMetadata<
+      18,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      InternedString,
+      InternedData>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SourcePaths kSourcePaths() { return {}; }
+  template <typename T = InternedString> T* add_source_paths() {
+    return BeginNestedMessage<T>(18);
+  }
+
+
+  using FieldMetadata_FunctionNames =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      InternedString,
+      InternedData>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FunctionNames kFunctionNames() { return {}; }
+  template <typename T = InternedString> T* add_function_names() {
+    return BeginNestedMessage<T>(5);
+  }
+
+
+  using FieldMetadata_ProfiledFrameSymbols =
+    ::protozero::proto_utils::FieldMetadata<
+      21,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ProfiledFrameSymbols,
+      InternedData>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ProfiledFrameSymbols kProfiledFrameSymbols() { return {}; }
+  template <typename T = ProfiledFrameSymbols> T* add_profiled_frame_symbols() {
+    return BeginNestedMessage<T>(21);
+  }
+
+
+  using FieldMetadata_Mappings =
+    ::protozero::proto_utils::FieldMetadata<
+      19,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Mapping,
+      InternedData>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Mappings kMappings() { return {}; }
+  template <typename T = Mapping> T* add_mappings() {
+    return BeginNestedMessage<T>(19);
+  }
+
+
+  using FieldMetadata_Frames =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Frame,
+      InternedData>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Frames kFrames() { return {}; }
+  template <typename T = Frame> T* add_frames() {
+    return BeginNestedMessage<T>(6);
+  }
+
+
+  using FieldMetadata_Callstacks =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Callstack,
+      InternedData>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Callstacks kCallstacks() { return {}; }
+  template <typename T = Callstack> T* add_callstacks() {
+    return BeginNestedMessage<T>(7);
+  }
+
+
+  using FieldMetadata_VulkanMemoryKeys =
+    ::protozero::proto_utils::FieldMetadata<
+      22,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      InternedString,
+      InternedData>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_VulkanMemoryKeys kVulkanMemoryKeys() { return {}; }
+  template <typename T = InternedString> T* add_vulkan_memory_keys() {
+    return BeginNestedMessage<T>(22);
+  }
+
+
+  using FieldMetadata_GraphicsContexts =
+    ::protozero::proto_utils::FieldMetadata<
+      23,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      InternedGraphicsContext,
+      InternedData>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_GraphicsContexts kGraphicsContexts() { return {}; }
+  template <typename T = InternedGraphicsContext> T* add_graphics_contexts() {
+    return BeginNestedMessage<T>(23);
+  }
+
+
+  using FieldMetadata_GpuSpecifications =
+    ::protozero::proto_utils::FieldMetadata<
+      24,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      InternedGpuRenderStageSpecification,
+      InternedData>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_GpuSpecifications kGpuSpecifications() { return {}; }
+  template <typename T = InternedGpuRenderStageSpecification> T* add_gpu_specifications() {
+    return BeginNestedMessage<T>(24);
+  }
+
+
+  using FieldMetadata_KernelSymbols =
+    ::protozero::proto_utils::FieldMetadata<
+      26,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      InternedString,
+      InternedData>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_KernelSymbols kKernelSymbols() { return {}; }
+  template <typename T = InternedString> T* add_kernel_symbols() {
+    return BeginNestedMessage<T>(26);
+  }
+
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/perfetto/perfetto_metatrace.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_PERFETTO_PERFETTO_METATRACE_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_PERFETTO_PERFETTO_METATRACE_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class PerfettoMetatrace_Arg;
+
+class PerfettoMetatrace_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/9, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  PerfettoMetatrace_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit PerfettoMetatrace_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit PerfettoMetatrace_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_event_id() const { return at<1>().valid(); }
+  uint32_t event_id() const { return at<1>().as_uint32(); }
+  bool has_counter_id() const { return at<2>().valid(); }
+  uint32_t counter_id() const { return at<2>().as_uint32(); }
+  bool has_event_name() const { return at<8>().valid(); }
+  ::protozero::ConstChars event_name() const { return at<8>().as_string(); }
+  bool has_counter_name() const { return at<9>().valid(); }
+  ::protozero::ConstChars counter_name() const { return at<9>().as_string(); }
+  bool has_event_duration_ns() const { return at<3>().valid(); }
+  uint32_t event_duration_ns() const { return at<3>().as_uint32(); }
+  bool has_counter_value() const { return at<4>().valid(); }
+  int32_t counter_value() const { return at<4>().as_int32(); }
+  bool has_thread_id() const { return at<5>().valid(); }
+  uint32_t thread_id() const { return at<5>().as_uint32(); }
+  bool has_has_overruns() const { return at<6>().valid(); }
+  bool has_overruns() const { return at<6>().as_bool(); }
+  bool has_args() const { return at<7>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> args() const { return GetRepeated<::protozero::ConstBytes>(7); }
+};
+
+class PerfettoMetatrace : public ::protozero::Message {
+ public:
+  using Decoder = PerfettoMetatrace_Decoder;
+  enum : int32_t {
+    kEventIdFieldNumber = 1,
+    kCounterIdFieldNumber = 2,
+    kEventNameFieldNumber = 8,
+    kCounterNameFieldNumber = 9,
+    kEventDurationNsFieldNumber = 3,
+    kCounterValueFieldNumber = 4,
+    kThreadIdFieldNumber = 5,
+    kHasOverrunsFieldNumber = 6,
+    kArgsFieldNumber = 7,
+  };
+  using Arg = ::perfetto::protos::pbzero::PerfettoMetatrace_Arg;
+
+  using FieldMetadata_EventId =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      PerfettoMetatrace>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_EventId kEventId() { return {}; }
+  void set_event_id(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_EventId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CounterId =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      PerfettoMetatrace>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CounterId kCounterId() { return {}; }
+  void set_counter_id(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CounterId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_EventName =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      PerfettoMetatrace>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_EventName kEventName() { return {}; }
+  void set_event_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_EventName::kFieldId, data, size);
+  }
+  void set_event_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_EventName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CounterName =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      PerfettoMetatrace>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CounterName kCounterName() { return {}; }
+  void set_counter_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_CounterName::kFieldId, data, size);
+  }
+  void set_counter_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_CounterName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_EventDurationNs =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      PerfettoMetatrace>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_EventDurationNs kEventDurationNs() { return {}; }
+  void set_event_duration_ns(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_EventDurationNs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CounterValue =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      PerfettoMetatrace>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CounterValue kCounterValue() { return {}; }
+  void set_counter_value(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CounterValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ThreadId =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      PerfettoMetatrace>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ThreadId kThreadId() { return {}; }
+  void set_thread_id(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ThreadId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_HasOverruns =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      PerfettoMetatrace>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_HasOverruns kHasOverruns() { return {}; }
+  void set_has_overruns(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_HasOverruns::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Args =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      PerfettoMetatrace_Arg,
+      PerfettoMetatrace>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Args kArgs() { return {}; }
+  template <typename T = PerfettoMetatrace_Arg> T* add_args() {
+    return BeginNestedMessage<T>(7);
+  }
+
+};
+
+class PerfettoMetatrace_Arg_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  PerfettoMetatrace_Arg_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit PerfettoMetatrace_Arg_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit PerfettoMetatrace_Arg_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_key() const { return at<1>().valid(); }
+  ::protozero::ConstChars key() const { return at<1>().as_string(); }
+  bool has_value() const { return at<2>().valid(); }
+  ::protozero::ConstChars value() const { return at<2>().as_string(); }
+};
+
+class PerfettoMetatrace_Arg : public ::protozero::Message {
+ public:
+  using Decoder = PerfettoMetatrace_Arg_Decoder;
+  enum : int32_t {
+    kKeyFieldNumber = 1,
+    kValueFieldNumber = 2,
+  };
+
+  using FieldMetadata_Key =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      PerfettoMetatrace_Arg>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Key kKey() { return {}; }
+  void set_key(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Key::kFieldId, data, size);
+  }
+  void set_key(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Key::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Value =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      PerfettoMetatrace_Arg>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Value kValue() { return {}; }
+  void set_value(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Value::kFieldId, data, size);
+  }
+  void set_value(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Value::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/perfetto/tracing_service_event.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_PERFETTO_TRACING_SERVICE_EVENT_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_PERFETTO_TRACING_SERVICE_EVENT_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class TracingServiceEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/6, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  TracingServiceEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TracingServiceEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TracingServiceEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_tracing_started() const { return at<2>().valid(); }
+  bool tracing_started() const { return at<2>().as_bool(); }
+  bool has_all_data_sources_started() const { return at<1>().valid(); }
+  bool all_data_sources_started() const { return at<1>().as_bool(); }
+  bool has_all_data_sources_flushed() const { return at<3>().valid(); }
+  bool all_data_sources_flushed() const { return at<3>().as_bool(); }
+  bool has_read_tracing_buffers_completed() const { return at<4>().valid(); }
+  bool read_tracing_buffers_completed() const { return at<4>().as_bool(); }
+  bool has_tracing_disabled() const { return at<5>().valid(); }
+  bool tracing_disabled() const { return at<5>().as_bool(); }
+  bool has_seized_for_bugreport() const { return at<6>().valid(); }
+  bool seized_for_bugreport() const { return at<6>().as_bool(); }
+};
+
+class TracingServiceEvent : public ::protozero::Message {
+ public:
+  using Decoder = TracingServiceEvent_Decoder;
+  enum : int32_t {
+    kTracingStartedFieldNumber = 2,
+    kAllDataSourcesStartedFieldNumber = 1,
+    kAllDataSourcesFlushedFieldNumber = 3,
+    kReadTracingBuffersCompletedFieldNumber = 4,
+    kTracingDisabledFieldNumber = 5,
+    kSeizedForBugreportFieldNumber = 6,
+  };
+
+  using FieldMetadata_TracingStarted =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      TracingServiceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TracingStarted kTracingStarted() { return {}; }
+  void set_tracing_started(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_TracingStarted::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_AllDataSourcesStarted =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      TracingServiceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AllDataSourcesStarted kAllDataSourcesStarted() { return {}; }
+  void set_all_data_sources_started(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_AllDataSourcesStarted::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_AllDataSourcesFlushed =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      TracingServiceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AllDataSourcesFlushed kAllDataSourcesFlushed() { return {}; }
+  void set_all_data_sources_flushed(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_AllDataSourcesFlushed::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ReadTracingBuffersCompleted =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      TracingServiceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ReadTracingBuffersCompleted kReadTracingBuffersCompleted() { return {}; }
+  void set_read_tracing_buffers_completed(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_ReadTracingBuffersCompleted::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TracingDisabled =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      TracingServiceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TracingDisabled kTracingDisabled() { return {}; }
+  void set_tracing_disabled(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_TracingDisabled::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SeizedForBugreport =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      TracingServiceEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SeizedForBugreport kSeizedForBugreport() { return {}; }
+  void set_seized_for_bugreport(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_SeizedForBugreport::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/power/android_energy_estimation_breakdown.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_POWER_ANDROID_ENERGY_ESTIMATION_BREAKDOWN_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_POWER_ANDROID_ENERGY_ESTIMATION_BREAKDOWN_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class AndroidEnergyConsumerDescriptor;
+class AndroidEnergyEstimationBreakdown_EnergyUidBreakdown;
+
+class AndroidEnergyEstimationBreakdown_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  AndroidEnergyEstimationBreakdown_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit AndroidEnergyEstimationBreakdown_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit AndroidEnergyEstimationBreakdown_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_energy_consumer_descriptor() const { return at<1>().valid(); }
+  ::protozero::ConstBytes energy_consumer_descriptor() const { return at<1>().as_bytes(); }
+  bool has_energy_consumer_id() const { return at<2>().valid(); }
+  int32_t energy_consumer_id() const { return at<2>().as_int32(); }
+  bool has_energy_uws() const { return at<3>().valid(); }
+  int64_t energy_uws() const { return at<3>().as_int64(); }
+  bool has_per_uid_breakdown() const { return at<4>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> per_uid_breakdown() const { return GetRepeated<::protozero::ConstBytes>(4); }
+};
+
+class AndroidEnergyEstimationBreakdown : public ::protozero::Message {
+ public:
+  using Decoder = AndroidEnergyEstimationBreakdown_Decoder;
+  enum : int32_t {
+    kEnergyConsumerDescriptorFieldNumber = 1,
+    kEnergyConsumerIdFieldNumber = 2,
+    kEnergyUwsFieldNumber = 3,
+    kPerUidBreakdownFieldNumber = 4,
+  };
+  using EnergyUidBreakdown = ::perfetto::protos::pbzero::AndroidEnergyEstimationBreakdown_EnergyUidBreakdown;
+
+  using FieldMetadata_EnergyConsumerDescriptor =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      AndroidEnergyConsumerDescriptor,
+      AndroidEnergyEstimationBreakdown>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_EnergyConsumerDescriptor kEnergyConsumerDescriptor() { return {}; }
+  template <typename T = AndroidEnergyConsumerDescriptor> T* set_energy_consumer_descriptor() {
+    return BeginNestedMessage<T>(1);
+  }
+
+
+  using FieldMetadata_EnergyConsumerId =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      AndroidEnergyEstimationBreakdown>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_EnergyConsumerId kEnergyConsumerId() { return {}; }
+  void set_energy_consumer_id(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_EnergyConsumerId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_EnergyUws =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      AndroidEnergyEstimationBreakdown>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_EnergyUws kEnergyUws() { return {}; }
+  void set_energy_uws(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_EnergyUws::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_PerUidBreakdown =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      AndroidEnergyEstimationBreakdown_EnergyUidBreakdown,
+      AndroidEnergyEstimationBreakdown>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PerUidBreakdown kPerUidBreakdown() { return {}; }
+  template <typename T = AndroidEnergyEstimationBreakdown_EnergyUidBreakdown> T* add_per_uid_breakdown() {
+    return BeginNestedMessage<T>(4);
+  }
+
+};
+
+class AndroidEnergyEstimationBreakdown_EnergyUidBreakdown_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  AndroidEnergyEstimationBreakdown_EnergyUidBreakdown_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit AndroidEnergyEstimationBreakdown_EnergyUidBreakdown_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit AndroidEnergyEstimationBreakdown_EnergyUidBreakdown_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_uid() const { return at<1>().valid(); }
+  int32_t uid() const { return at<1>().as_int32(); }
+  bool has_energy_uws() const { return at<2>().valid(); }
+  int64_t energy_uws() const { return at<2>().as_int64(); }
+};
+
+class AndroidEnergyEstimationBreakdown_EnergyUidBreakdown : public ::protozero::Message {
+ public:
+  using Decoder = AndroidEnergyEstimationBreakdown_EnergyUidBreakdown_Decoder;
+  enum : int32_t {
+    kUidFieldNumber = 1,
+    kEnergyUwsFieldNumber = 2,
+  };
+
+  using FieldMetadata_Uid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      AndroidEnergyEstimationBreakdown_EnergyUidBreakdown>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Uid kUid() { return {}; }
+  void set_uid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Uid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_EnergyUws =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      AndroidEnergyEstimationBreakdown_EnergyUidBreakdown>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_EnergyUws kEnergyUws() { return {}; }
+  void set_energy_uws(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_EnergyUws::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/power/battery_counters.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_POWER_BATTERY_COUNTERS_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_POWER_BATTERY_COUNTERS_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class BatteryCounters_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  BatteryCounters_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit BatteryCounters_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit BatteryCounters_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_charge_counter_uah() const { return at<1>().valid(); }
+  int64_t charge_counter_uah() const { return at<1>().as_int64(); }
+  bool has_capacity_percent() const { return at<2>().valid(); }
+  float capacity_percent() const { return at<2>().as_float(); }
+  bool has_current_ua() const { return at<3>().valid(); }
+  int64_t current_ua() const { return at<3>().as_int64(); }
+  bool has_current_avg_ua() const { return at<4>().valid(); }
+  int64_t current_avg_ua() const { return at<4>().as_int64(); }
+};
+
+class BatteryCounters : public ::protozero::Message {
+ public:
+  using Decoder = BatteryCounters_Decoder;
+  enum : int32_t {
+    kChargeCounterUahFieldNumber = 1,
+    kCapacityPercentFieldNumber = 2,
+    kCurrentUaFieldNumber = 3,
+    kCurrentAvgUaFieldNumber = 4,
+  };
+
+  using FieldMetadata_ChargeCounterUah =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      BatteryCounters>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChargeCounterUah kChargeCounterUah() { return {}; }
+  void set_charge_counter_uah(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ChargeCounterUah::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CapacityPercent =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kFloat,
+      float,
+      BatteryCounters>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CapacityPercent kCapacityPercent() { return {}; }
+  void set_capacity_percent(float value) {
+    static constexpr uint32_t field_id = FieldMetadata_CapacityPercent::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kFloat>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CurrentUa =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      BatteryCounters>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CurrentUa kCurrentUa() { return {}; }
+  void set_current_ua(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CurrentUa::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CurrentAvgUa =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      BatteryCounters>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CurrentAvgUa kCurrentAvgUa() { return {}; }
+  void set_current_avg_ua(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CurrentAvgUa::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/power/power_rails.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_POWER_POWER_RAILS_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_POWER_POWER_RAILS_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class PowerRails_EnergyData;
+class PowerRails_RailDescriptor;
+
+class PowerRails_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  PowerRails_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit PowerRails_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit PowerRails_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_rail_descriptor() const { return at<1>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> rail_descriptor() const { return GetRepeated<::protozero::ConstBytes>(1); }
+  bool has_energy_data() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> energy_data() const { return GetRepeated<::protozero::ConstBytes>(2); }
+};
+
+class PowerRails : public ::protozero::Message {
+ public:
+  using Decoder = PowerRails_Decoder;
+  enum : int32_t {
+    kRailDescriptorFieldNumber = 1,
+    kEnergyDataFieldNumber = 2,
+  };
+  using RailDescriptor = ::perfetto::protos::pbzero::PowerRails_RailDescriptor;
+  using EnergyData = ::perfetto::protos::pbzero::PowerRails_EnergyData;
+
+  using FieldMetadata_RailDescriptor =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      PowerRails_RailDescriptor,
+      PowerRails>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_RailDescriptor kRailDescriptor() { return {}; }
+  template <typename T = PowerRails_RailDescriptor> T* add_rail_descriptor() {
+    return BeginNestedMessage<T>(1);
+  }
+
+
+  using FieldMetadata_EnergyData =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      PowerRails_EnergyData,
+      PowerRails>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_EnergyData kEnergyData() { return {}; }
+  template <typename T = PowerRails_EnergyData> T* add_energy_data() {
+    return BeginNestedMessage<T>(2);
+  }
+
+};
+
+class PowerRails_EnergyData_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  PowerRails_EnergyData_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit PowerRails_EnergyData_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit PowerRails_EnergyData_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_index() const { return at<1>().valid(); }
+  uint32_t index() const { return at<1>().as_uint32(); }
+  bool has_timestamp_ms() const { return at<2>().valid(); }
+  uint64_t timestamp_ms() const { return at<2>().as_uint64(); }
+  bool has_energy() const { return at<3>().valid(); }
+  uint64_t energy() const { return at<3>().as_uint64(); }
+};
+
+class PowerRails_EnergyData : public ::protozero::Message {
+ public:
+  using Decoder = PowerRails_EnergyData_Decoder;
+  enum : int32_t {
+    kIndexFieldNumber = 1,
+    kTimestampMsFieldNumber = 2,
+    kEnergyFieldNumber = 3,
+  };
+
+  using FieldMetadata_Index =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      PowerRails_EnergyData>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Index kIndex() { return {}; }
+  void set_index(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Index::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TimestampMs =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      PowerRails_EnergyData>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TimestampMs kTimestampMs() { return {}; }
+  void set_timestamp_ms(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TimestampMs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Energy =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      PowerRails_EnergyData>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Energy kEnergy() { return {}; }
+  void set_energy(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Energy::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class PowerRails_RailDescriptor_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  PowerRails_RailDescriptor_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit PowerRails_RailDescriptor_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit PowerRails_RailDescriptor_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_index() const { return at<1>().valid(); }
+  uint32_t index() const { return at<1>().as_uint32(); }
+  bool has_rail_name() const { return at<2>().valid(); }
+  ::protozero::ConstChars rail_name() const { return at<2>().as_string(); }
+  bool has_subsys_name() const { return at<3>().valid(); }
+  ::protozero::ConstChars subsys_name() const { return at<3>().as_string(); }
+  bool has_sampling_rate() const { return at<4>().valid(); }
+  uint32_t sampling_rate() const { return at<4>().as_uint32(); }
+};
+
+class PowerRails_RailDescriptor : public ::protozero::Message {
+ public:
+  using Decoder = PowerRails_RailDescriptor_Decoder;
+  enum : int32_t {
+    kIndexFieldNumber = 1,
+    kRailNameFieldNumber = 2,
+    kSubsysNameFieldNumber = 3,
+    kSamplingRateFieldNumber = 4,
+  };
+
+  using FieldMetadata_Index =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      PowerRails_RailDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Index kIndex() { return {}; }
+  void set_index(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Index::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_RailName =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      PowerRails_RailDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_RailName kRailName() { return {}; }
+  void set_rail_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_RailName::kFieldId, data, size);
+  }
+  void set_rail_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_RailName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SubsysName =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      PowerRails_RailDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SubsysName kSubsysName() { return {}; }
+  void set_subsys_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_SubsysName::kFieldId, data, size);
+  }
+  void set_subsys_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_SubsysName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SamplingRate =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      PowerRails_RailDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SamplingRate kSamplingRate() { return {}; }
+  void set_sampling_rate(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SamplingRate::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/ps/process_stats.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_PS_PROCESS_STATS_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_PS_PROCESS_STATS_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class ProcessStats_Process;
+class ProcessStats_Thread;
+
+class ProcessStats_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  ProcessStats_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ProcessStats_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ProcessStats_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_processes() const { return at<1>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> processes() const { return GetRepeated<::protozero::ConstBytes>(1); }
+  bool has_collection_end_timestamp() const { return at<2>().valid(); }
+  uint64_t collection_end_timestamp() const { return at<2>().as_uint64(); }
+};
+
+class ProcessStats : public ::protozero::Message {
+ public:
+  using Decoder = ProcessStats_Decoder;
+  enum : int32_t {
+    kProcessesFieldNumber = 1,
+    kCollectionEndTimestampFieldNumber = 2,
+  };
+  using Thread = ::perfetto::protos::pbzero::ProcessStats_Thread;
+  using Process = ::perfetto::protos::pbzero::ProcessStats_Process;
+
+  using FieldMetadata_Processes =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ProcessStats_Process,
+      ProcessStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Processes kProcesses() { return {}; }
+  template <typename T = ProcessStats_Process> T* add_processes() {
+    return BeginNestedMessage<T>(1);
+  }
+
+
+  using FieldMetadata_CollectionEndTimestamp =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ProcessStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CollectionEndTimestamp kCollectionEndTimestamp() { return {}; }
+  void set_collection_end_timestamp(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CollectionEndTimestamp::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class ProcessStats_Process_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/14, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  ProcessStats_Process_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ProcessStats_Process_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ProcessStats_Process_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_pid() const { return at<1>().valid(); }
+  int32_t pid() const { return at<1>().as_int32(); }
+  bool has_vm_size_kb() const { return at<2>().valid(); }
+  uint64_t vm_size_kb() const { return at<2>().as_uint64(); }
+  bool has_vm_rss_kb() const { return at<3>().valid(); }
+  uint64_t vm_rss_kb() const { return at<3>().as_uint64(); }
+  bool has_rss_anon_kb() const { return at<4>().valid(); }
+  uint64_t rss_anon_kb() const { return at<4>().as_uint64(); }
+  bool has_rss_file_kb() const { return at<5>().valid(); }
+  uint64_t rss_file_kb() const { return at<5>().as_uint64(); }
+  bool has_rss_shmem_kb() const { return at<6>().valid(); }
+  uint64_t rss_shmem_kb() const { return at<6>().as_uint64(); }
+  bool has_vm_swap_kb() const { return at<7>().valid(); }
+  uint64_t vm_swap_kb() const { return at<7>().as_uint64(); }
+  bool has_vm_locked_kb() const { return at<8>().valid(); }
+  uint64_t vm_locked_kb() const { return at<8>().as_uint64(); }
+  bool has_vm_hwm_kb() const { return at<9>().valid(); }
+  uint64_t vm_hwm_kb() const { return at<9>().as_uint64(); }
+  bool has_oom_score_adj() const { return at<10>().valid(); }
+  int64_t oom_score_adj() const { return at<10>().as_int64(); }
+  bool has_threads() const { return at<11>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> threads() const { return GetRepeated<::protozero::ConstBytes>(11); }
+  bool has_is_peak_rss_resettable() const { return at<12>().valid(); }
+  bool is_peak_rss_resettable() const { return at<12>().as_bool(); }
+  bool has_chrome_private_footprint_kb() const { return at<13>().valid(); }
+  uint32_t chrome_private_footprint_kb() const { return at<13>().as_uint32(); }
+  bool has_chrome_peak_resident_set_kb() const { return at<14>().valid(); }
+  uint32_t chrome_peak_resident_set_kb() const { return at<14>().as_uint32(); }
+};
+
+class ProcessStats_Process : public ::protozero::Message {
+ public:
+  using Decoder = ProcessStats_Process_Decoder;
+  enum : int32_t {
+    kPidFieldNumber = 1,
+    kVmSizeKbFieldNumber = 2,
+    kVmRssKbFieldNumber = 3,
+    kRssAnonKbFieldNumber = 4,
+    kRssFileKbFieldNumber = 5,
+    kRssShmemKbFieldNumber = 6,
+    kVmSwapKbFieldNumber = 7,
+    kVmLockedKbFieldNumber = 8,
+    kVmHwmKbFieldNumber = 9,
+    kOomScoreAdjFieldNumber = 10,
+    kThreadsFieldNumber = 11,
+    kIsPeakRssResettableFieldNumber = 12,
+    kChromePrivateFootprintKbFieldNumber = 13,
+    kChromePeakResidentSetKbFieldNumber = 14,
+  };
+
+  using FieldMetadata_Pid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      ProcessStats_Process>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pid kPid() { return {}; }
+  void set_pid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_VmSizeKb =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ProcessStats_Process>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_VmSizeKb kVmSizeKb() { return {}; }
+  void set_vm_size_kb(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_VmSizeKb::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_VmRssKb =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ProcessStats_Process>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_VmRssKb kVmRssKb() { return {}; }
+  void set_vm_rss_kb(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_VmRssKb::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_RssAnonKb =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ProcessStats_Process>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_RssAnonKb kRssAnonKb() { return {}; }
+  void set_rss_anon_kb(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_RssAnonKb::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_RssFileKb =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ProcessStats_Process>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_RssFileKb kRssFileKb() { return {}; }
+  void set_rss_file_kb(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_RssFileKb::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_RssShmemKb =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ProcessStats_Process>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_RssShmemKb kRssShmemKb() { return {}; }
+  void set_rss_shmem_kb(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_RssShmemKb::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_VmSwapKb =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ProcessStats_Process>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_VmSwapKb kVmSwapKb() { return {}; }
+  void set_vm_swap_kb(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_VmSwapKb::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_VmLockedKb =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ProcessStats_Process>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_VmLockedKb kVmLockedKb() { return {}; }
+  void set_vm_locked_kb(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_VmLockedKb::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_VmHwmKb =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ProcessStats_Process>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_VmHwmKb kVmHwmKb() { return {}; }
+  void set_vm_hwm_kb(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_VmHwmKb::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_OomScoreAdj =
+    ::protozero::proto_utils::FieldMetadata<
+      10,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      ProcessStats_Process>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_OomScoreAdj kOomScoreAdj() { return {}; }
+  void set_oom_score_adj(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_OomScoreAdj::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Threads =
+    ::protozero::proto_utils::FieldMetadata<
+      11,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ProcessStats_Thread,
+      ProcessStats_Process>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Threads kThreads() { return {}; }
+  template <typename T = ProcessStats_Thread> T* add_threads() {
+    return BeginNestedMessage<T>(11);
+  }
+
+
+  using FieldMetadata_IsPeakRssResettable =
+    ::protozero::proto_utils::FieldMetadata<
+      12,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ProcessStats_Process>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IsPeakRssResettable kIsPeakRssResettable() { return {}; }
+  void set_is_peak_rss_resettable(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_IsPeakRssResettable::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ChromePrivateFootprintKb =
+    ::protozero::proto_utils::FieldMetadata<
+      13,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      ProcessStats_Process>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChromePrivateFootprintKb kChromePrivateFootprintKb() { return {}; }
+  void set_chrome_private_footprint_kb(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ChromePrivateFootprintKb::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ChromePeakResidentSetKb =
+    ::protozero::proto_utils::FieldMetadata<
+      14,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      ProcessStats_Process>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChromePeakResidentSetKb kChromePeakResidentSetKb() { return {}; }
+  void set_chrome_peak_resident_set_kb(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ChromePeakResidentSetKb::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class ProcessStats_Thread_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  ProcessStats_Thread_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ProcessStats_Thread_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ProcessStats_Thread_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_tid() const { return at<1>().valid(); }
+  int32_t tid() const { return at<1>().as_int32(); }
+  bool has_cpu_freq_indices() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<uint32_t> cpu_freq_indices() const { return GetRepeated<uint32_t>(2); }
+  bool has_cpu_freq_ticks() const { return at<3>().valid(); }
+  ::protozero::RepeatedFieldIterator<uint64_t> cpu_freq_ticks() const { return GetRepeated<uint64_t>(3); }
+  bool has_cpu_freq_full() const { return at<4>().valid(); }
+  bool cpu_freq_full() const { return at<4>().as_bool(); }
+};
+
+class ProcessStats_Thread : public ::protozero::Message {
+ public:
+  using Decoder = ProcessStats_Thread_Decoder;
+  enum : int32_t {
+    kTidFieldNumber = 1,
+    kCpuFreqIndicesFieldNumber = 2,
+    kCpuFreqTicksFieldNumber = 3,
+    kCpuFreqFullFieldNumber = 4,
+  };
+
+  using FieldMetadata_Tid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      ProcessStats_Thread>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Tid kTid() { return {}; }
+  void set_tid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Tid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CpuFreqIndices =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      ProcessStats_Thread>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CpuFreqIndices kCpuFreqIndices() { return {}; }
+  void add_cpu_freq_indices(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CpuFreqIndices::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CpuFreqTicks =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ProcessStats_Thread>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CpuFreqTicks kCpuFreqTicks() { return {}; }
+  void add_cpu_freq_ticks(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CpuFreqTicks::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CpuFreqFull =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      ProcessStats_Thread>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CpuFreqFull kCpuFreqFull() { return {}; }
+  void set_cpu_freq_full(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_CpuFreqFull::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/ps/process_tree.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_PS_PROCESS_TREE_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_PS_PROCESS_TREE_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class ProcessTree_Process;
+class ProcessTree_Thread;
+
+class ProcessTree_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  ProcessTree_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ProcessTree_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ProcessTree_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_processes() const { return at<1>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> processes() const { return GetRepeated<::protozero::ConstBytes>(1); }
+  bool has_threads() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> threads() const { return GetRepeated<::protozero::ConstBytes>(2); }
+  bool has_collection_end_timestamp() const { return at<3>().valid(); }
+  uint64_t collection_end_timestamp() const { return at<3>().as_uint64(); }
+};
+
+class ProcessTree : public ::protozero::Message {
+ public:
+  using Decoder = ProcessTree_Decoder;
+  enum : int32_t {
+    kProcessesFieldNumber = 1,
+    kThreadsFieldNumber = 2,
+    kCollectionEndTimestampFieldNumber = 3,
+  };
+  using Thread = ::perfetto::protos::pbzero::ProcessTree_Thread;
+  using Process = ::perfetto::protos::pbzero::ProcessTree_Process;
+
+  using FieldMetadata_Processes =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ProcessTree_Process,
+      ProcessTree>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Processes kProcesses() { return {}; }
+  template <typename T = ProcessTree_Process> T* add_processes() {
+    return BeginNestedMessage<T>(1);
+  }
+
+
+  using FieldMetadata_Threads =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ProcessTree_Thread,
+      ProcessTree>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Threads kThreads() { return {}; }
+  template <typename T = ProcessTree_Thread> T* add_threads() {
+    return BeginNestedMessage<T>(2);
+  }
+
+
+  using FieldMetadata_CollectionEndTimestamp =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      ProcessTree>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CollectionEndTimestamp kCollectionEndTimestamp() { return {}; }
+  void set_collection_end_timestamp(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CollectionEndTimestamp::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class ProcessTree_Process_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  ProcessTree_Process_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ProcessTree_Process_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ProcessTree_Process_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_pid() const { return at<1>().valid(); }
+  int32_t pid() const { return at<1>().as_int32(); }
+  bool has_ppid() const { return at<2>().valid(); }
+  int32_t ppid() const { return at<2>().as_int32(); }
+  bool has_cmdline() const { return at<3>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstChars> cmdline() const { return GetRepeated<::protozero::ConstChars>(3); }
+  bool has_threads_deprecated() const { return at<4>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> threads_deprecated() const { return GetRepeated<::protozero::ConstBytes>(4); }
+  bool has_uid() const { return at<5>().valid(); }
+  int32_t uid() const { return at<5>().as_int32(); }
+};
+
+class ProcessTree_Process : public ::protozero::Message {
+ public:
+  using Decoder = ProcessTree_Process_Decoder;
+  enum : int32_t {
+    kPidFieldNumber = 1,
+    kPpidFieldNumber = 2,
+    kCmdlineFieldNumber = 3,
+    kThreadsDeprecatedFieldNumber = 4,
+    kUidFieldNumber = 5,
+  };
+
+  using FieldMetadata_Pid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      ProcessTree_Process>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pid kPid() { return {}; }
+  void set_pid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Ppid =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      ProcessTree_Process>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Ppid kPpid() { return {}; }
+  void set_ppid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Ppid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Cmdline =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ProcessTree_Process>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Cmdline kCmdline() { return {}; }
+  void add_cmdline(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Cmdline::kFieldId, data, size);
+  }
+  void add_cmdline(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Cmdline::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ThreadsDeprecated =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ProcessTree_Thread,
+      ProcessTree_Process>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ThreadsDeprecated kThreadsDeprecated() { return {}; }
+  template <typename T = ProcessTree_Thread> T* add_threads_deprecated() {
+    return BeginNestedMessage<T>(4);
+  }
+
+
+  using FieldMetadata_Uid =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      ProcessTree_Process>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Uid kUid() { return {}; }
+  void set_uid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Uid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class ProcessTree_Thread_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  ProcessTree_Thread_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ProcessTree_Thread_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ProcessTree_Thread_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_tid() const { return at<1>().valid(); }
+  int32_t tid() const { return at<1>().as_int32(); }
+  bool has_tgid() const { return at<3>().valid(); }
+  int32_t tgid() const { return at<3>().as_int32(); }
+  bool has_name() const { return at<2>().valid(); }
+  ::protozero::ConstChars name() const { return at<2>().as_string(); }
+};
+
+class ProcessTree_Thread : public ::protozero::Message {
+ public:
+  using Decoder = ProcessTree_Thread_Decoder;
+  enum : int32_t {
+    kTidFieldNumber = 1,
+    kTgidFieldNumber = 3,
+    kNameFieldNumber = 2,
+  };
+
+  using FieldMetadata_Tid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      ProcessTree_Thread>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Tid kTid() { return {}; }
+  void set_tid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Tid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Tgid =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      ProcessTree_Thread>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Tgid kTgid() { return {}; }
+  void set_tgid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Tgid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      ProcessTree_Thread>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/sys_stats/sys_stats.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_SYS_STATS_SYS_STATS_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_SYS_STATS_SYS_STATS_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class SysStats_CpuTimes;
+class SysStats_DevfreqValue;
+class SysStats_InterruptCount;
+class SysStats_MeminfoValue;
+class SysStats_VmstatValue;
+enum MeminfoCounters : int32_t;
+enum VmstatCounters : int32_t;
+
+class SysStats_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/10, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  SysStats_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit SysStats_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit SysStats_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_meminfo() const { return at<1>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> meminfo() const { return GetRepeated<::protozero::ConstBytes>(1); }
+  bool has_vmstat() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> vmstat() const { return GetRepeated<::protozero::ConstBytes>(2); }
+  bool has_cpu_stat() const { return at<3>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> cpu_stat() const { return GetRepeated<::protozero::ConstBytes>(3); }
+  bool has_num_forks() const { return at<4>().valid(); }
+  uint64_t num_forks() const { return at<4>().as_uint64(); }
+  bool has_num_irq_total() const { return at<5>().valid(); }
+  uint64_t num_irq_total() const { return at<5>().as_uint64(); }
+  bool has_num_irq() const { return at<6>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> num_irq() const { return GetRepeated<::protozero::ConstBytes>(6); }
+  bool has_num_softirq_total() const { return at<7>().valid(); }
+  uint64_t num_softirq_total() const { return at<7>().as_uint64(); }
+  bool has_num_softirq() const { return at<8>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> num_softirq() const { return GetRepeated<::protozero::ConstBytes>(8); }
+  bool has_collection_end_timestamp() const { return at<9>().valid(); }
+  uint64_t collection_end_timestamp() const { return at<9>().as_uint64(); }
+  bool has_devfreq() const { return at<10>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> devfreq() const { return GetRepeated<::protozero::ConstBytes>(10); }
+};
+
+class SysStats : public ::protozero::Message {
+ public:
+  using Decoder = SysStats_Decoder;
+  enum : int32_t {
+    kMeminfoFieldNumber = 1,
+    kVmstatFieldNumber = 2,
+    kCpuStatFieldNumber = 3,
+    kNumForksFieldNumber = 4,
+    kNumIrqTotalFieldNumber = 5,
+    kNumIrqFieldNumber = 6,
+    kNumSoftirqTotalFieldNumber = 7,
+    kNumSoftirqFieldNumber = 8,
+    kCollectionEndTimestampFieldNumber = 9,
+    kDevfreqFieldNumber = 10,
+  };
+  using MeminfoValue = ::perfetto::protos::pbzero::SysStats_MeminfoValue;
+  using VmstatValue = ::perfetto::protos::pbzero::SysStats_VmstatValue;
+  using CpuTimes = ::perfetto::protos::pbzero::SysStats_CpuTimes;
+  using InterruptCount = ::perfetto::protos::pbzero::SysStats_InterruptCount;
+  using DevfreqValue = ::perfetto::protos::pbzero::SysStats_DevfreqValue;
+
+  using FieldMetadata_Meminfo =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      SysStats_MeminfoValue,
+      SysStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Meminfo kMeminfo() { return {}; }
+  template <typename T = SysStats_MeminfoValue> T* add_meminfo() {
+    return BeginNestedMessage<T>(1);
+  }
+
+
+  using FieldMetadata_Vmstat =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      SysStats_VmstatValue,
+      SysStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Vmstat kVmstat() { return {}; }
+  template <typename T = SysStats_VmstatValue> T* add_vmstat() {
+    return BeginNestedMessage<T>(2);
+  }
+
+
+  using FieldMetadata_CpuStat =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      SysStats_CpuTimes,
+      SysStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CpuStat kCpuStat() { return {}; }
+  template <typename T = SysStats_CpuTimes> T* add_cpu_stat() {
+    return BeginNestedMessage<T>(3);
+  }
+
+
+  using FieldMetadata_NumForks =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      SysStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NumForks kNumForks() { return {}; }
+  void set_num_forks(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NumForks::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NumIrqTotal =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      SysStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NumIrqTotal kNumIrqTotal() { return {}; }
+  void set_num_irq_total(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NumIrqTotal::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NumIrq =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      SysStats_InterruptCount,
+      SysStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NumIrq kNumIrq() { return {}; }
+  template <typename T = SysStats_InterruptCount> T* add_num_irq() {
+    return BeginNestedMessage<T>(6);
+  }
+
+
+  using FieldMetadata_NumSoftirqTotal =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      SysStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NumSoftirqTotal kNumSoftirqTotal() { return {}; }
+  void set_num_softirq_total(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_NumSoftirqTotal::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NumSoftirq =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      SysStats_InterruptCount,
+      SysStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NumSoftirq kNumSoftirq() { return {}; }
+  template <typename T = SysStats_InterruptCount> T* add_num_softirq() {
+    return BeginNestedMessage<T>(8);
+  }
+
+
+  using FieldMetadata_CollectionEndTimestamp =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      SysStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CollectionEndTimestamp kCollectionEndTimestamp() { return {}; }
+  void set_collection_end_timestamp(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CollectionEndTimestamp::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Devfreq =
+    ::protozero::proto_utils::FieldMetadata<
+      10,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      SysStats_DevfreqValue,
+      SysStats>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Devfreq kDevfreq() { return {}; }
+  template <typename T = SysStats_DevfreqValue> T* add_devfreq() {
+    return BeginNestedMessage<T>(10);
+  }
+
+};
+
+class SysStats_DevfreqValue_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  SysStats_DevfreqValue_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit SysStats_DevfreqValue_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit SysStats_DevfreqValue_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_key() const { return at<1>().valid(); }
+  ::protozero::ConstChars key() const { return at<1>().as_string(); }
+  bool has_value() const { return at<2>().valid(); }
+  uint64_t value() const { return at<2>().as_uint64(); }
+};
+
+class SysStats_DevfreqValue : public ::protozero::Message {
+ public:
+  using Decoder = SysStats_DevfreqValue_Decoder;
+  enum : int32_t {
+    kKeyFieldNumber = 1,
+    kValueFieldNumber = 2,
+  };
+
+  using FieldMetadata_Key =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      SysStats_DevfreqValue>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Key kKey() { return {}; }
+  void set_key(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Key::kFieldId, data, size);
+  }
+  void set_key(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Key::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Value =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      SysStats_DevfreqValue>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Value kValue() { return {}; }
+  void set_value(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Value::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class SysStats_InterruptCount_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  SysStats_InterruptCount_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit SysStats_InterruptCount_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit SysStats_InterruptCount_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_irq() const { return at<1>().valid(); }
+  int32_t irq() const { return at<1>().as_int32(); }
+  bool has_count() const { return at<2>().valid(); }
+  uint64_t count() const { return at<2>().as_uint64(); }
+};
+
+class SysStats_InterruptCount : public ::protozero::Message {
+ public:
+  using Decoder = SysStats_InterruptCount_Decoder;
+  enum : int32_t {
+    kIrqFieldNumber = 1,
+    kCountFieldNumber = 2,
+  };
+
+  using FieldMetadata_Irq =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      SysStats_InterruptCount>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Irq kIrq() { return {}; }
+  void set_irq(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Irq::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Count =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      SysStats_InterruptCount>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Count kCount() { return {}; }
+  void set_count(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Count::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class SysStats_CpuTimes_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/8, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  SysStats_CpuTimes_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit SysStats_CpuTimes_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit SysStats_CpuTimes_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_cpu_id() const { return at<1>().valid(); }
+  uint32_t cpu_id() const { return at<1>().as_uint32(); }
+  bool has_user_ns() const { return at<2>().valid(); }
+  uint64_t user_ns() const { return at<2>().as_uint64(); }
+  bool has_user_ice_ns() const { return at<3>().valid(); }
+  uint64_t user_ice_ns() const { return at<3>().as_uint64(); }
+  bool has_system_mode_ns() const { return at<4>().valid(); }
+  uint64_t system_mode_ns() const { return at<4>().as_uint64(); }
+  bool has_idle_ns() const { return at<5>().valid(); }
+  uint64_t idle_ns() const { return at<5>().as_uint64(); }
+  bool has_io_wait_ns() const { return at<6>().valid(); }
+  uint64_t io_wait_ns() const { return at<6>().as_uint64(); }
+  bool has_irq_ns() const { return at<7>().valid(); }
+  uint64_t irq_ns() const { return at<7>().as_uint64(); }
+  bool has_softirq_ns() const { return at<8>().valid(); }
+  uint64_t softirq_ns() const { return at<8>().as_uint64(); }
+};
+
+class SysStats_CpuTimes : public ::protozero::Message {
+ public:
+  using Decoder = SysStats_CpuTimes_Decoder;
+  enum : int32_t {
+    kCpuIdFieldNumber = 1,
+    kUserNsFieldNumber = 2,
+    kUserIceNsFieldNumber = 3,
+    kSystemModeNsFieldNumber = 4,
+    kIdleNsFieldNumber = 5,
+    kIoWaitNsFieldNumber = 6,
+    kIrqNsFieldNumber = 7,
+    kSoftirqNsFieldNumber = 8,
+  };
+
+  using FieldMetadata_CpuId =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      SysStats_CpuTimes>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CpuId kCpuId() { return {}; }
+  void set_cpu_id(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_CpuId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_UserNs =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      SysStats_CpuTimes>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_UserNs kUserNs() { return {}; }
+  void set_user_ns(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_UserNs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_UserIceNs =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      SysStats_CpuTimes>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_UserIceNs kUserIceNs() { return {}; }
+  void set_user_ice_ns(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_UserIceNs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SystemModeNs =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      SysStats_CpuTimes>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SystemModeNs kSystemModeNs() { return {}; }
+  void set_system_mode_ns(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SystemModeNs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_IdleNs =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      SysStats_CpuTimes>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IdleNs kIdleNs() { return {}; }
+  void set_idle_ns(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_IdleNs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_IoWaitNs =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      SysStats_CpuTimes>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IoWaitNs kIoWaitNs() { return {}; }
+  void set_io_wait_ns(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_IoWaitNs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_IrqNs =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      SysStats_CpuTimes>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IrqNs kIrqNs() { return {}; }
+  void set_irq_ns(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_IrqNs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SoftirqNs =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      SysStats_CpuTimes>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SoftirqNs kSoftirqNs() { return {}; }
+  void set_softirq_ns(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SoftirqNs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class SysStats_VmstatValue_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  SysStats_VmstatValue_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit SysStats_VmstatValue_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit SysStats_VmstatValue_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_key() const { return at<1>().valid(); }
+  int32_t key() const { return at<1>().as_int32(); }
+  bool has_value() const { return at<2>().valid(); }
+  uint64_t value() const { return at<2>().as_uint64(); }
+};
+
+class SysStats_VmstatValue : public ::protozero::Message {
+ public:
+  using Decoder = SysStats_VmstatValue_Decoder;
+  enum : int32_t {
+    kKeyFieldNumber = 1,
+    kValueFieldNumber = 2,
+  };
+
+  using FieldMetadata_Key =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::VmstatCounters,
+      SysStats_VmstatValue>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Key kKey() { return {}; }
+  void set_key(::perfetto::protos::pbzero::VmstatCounters value) {
+    static constexpr uint32_t field_id = FieldMetadata_Key::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Value =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      SysStats_VmstatValue>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Value kValue() { return {}; }
+  void set_value(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Value::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class SysStats_MeminfoValue_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  SysStats_MeminfoValue_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit SysStats_MeminfoValue_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit SysStats_MeminfoValue_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_key() const { return at<1>().valid(); }
+  int32_t key() const { return at<1>().as_int32(); }
+  bool has_value() const { return at<2>().valid(); }
+  uint64_t value() const { return at<2>().as_uint64(); }
+};
+
+class SysStats_MeminfoValue : public ::protozero::Message {
+ public:
+  using Decoder = SysStats_MeminfoValue_Decoder;
+  enum : int32_t {
+    kKeyFieldNumber = 1,
+    kValueFieldNumber = 2,
+  };
+
+  using FieldMetadata_Key =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::MeminfoCounters,
+      SysStats_MeminfoValue>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Key kKey() { return {}; }
+  void set_key(::perfetto::protos::pbzero::MeminfoCounters value) {
+    static constexpr uint32_t field_id = FieldMetadata_Key::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Value =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      SysStats_MeminfoValue>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Value kValue() { return {}; }
+  void set_value(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Value::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/system_info/cpu_info.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_SYSTEM_INFO_CPU_INFO_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_SYSTEM_INFO_CPU_INFO_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class CpuInfo_Cpu;
+
+class CpuInfo_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  CpuInfo_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit CpuInfo_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit CpuInfo_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_cpus() const { return at<1>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> cpus() const { return GetRepeated<::protozero::ConstBytes>(1); }
+};
+
+class CpuInfo : public ::protozero::Message {
+ public:
+  using Decoder = CpuInfo_Decoder;
+  enum : int32_t {
+    kCpusFieldNumber = 1,
+  };
+  using Cpu = ::perfetto::protos::pbzero::CpuInfo_Cpu;
+
+  using FieldMetadata_Cpus =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      CpuInfo_Cpu,
+      CpuInfo>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Cpus kCpus() { return {}; }
+  template <typename T = CpuInfo_Cpu> T* add_cpus() {
+    return BeginNestedMessage<T>(1);
+  }
+
+};
+
+class CpuInfo_Cpu_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  CpuInfo_Cpu_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit CpuInfo_Cpu_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit CpuInfo_Cpu_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_processor() const { return at<1>().valid(); }
+  ::protozero::ConstChars processor() const { return at<1>().as_string(); }
+  bool has_frequencies() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<uint32_t> frequencies() const { return GetRepeated<uint32_t>(2); }
+};
+
+class CpuInfo_Cpu : public ::protozero::Message {
+ public:
+  using Decoder = CpuInfo_Cpu_Decoder;
+  enum : int32_t {
+    kProcessorFieldNumber = 1,
+    kFrequenciesFieldNumber = 2,
+  };
+
+  using FieldMetadata_Processor =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      CpuInfo_Cpu>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Processor kProcessor() { return {}; }
+  void set_processor(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Processor::kFieldId, data, size);
+  }
+  void set_processor(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Processor::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Frequencies =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      CpuInfo_Cpu>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Frequencies kFrequencies() { return {}; }
+  void add_frequencies(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Frequencies::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/trace_packet_defaults.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACE_PACKET_DEFAULTS_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACE_PACKET_DEFAULTS_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class PerfSampleDefaults;
+class TrackEventDefaults;
+
+class TracePacketDefaults_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/58, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  TracePacketDefaults_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TracePacketDefaults_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TracePacketDefaults_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_timestamp_clock_id() const { return at<58>().valid(); }
+  uint32_t timestamp_clock_id() const { return at<58>().as_uint32(); }
+  bool has_track_event_defaults() const { return at<11>().valid(); }
+  ::protozero::ConstBytes track_event_defaults() const { return at<11>().as_bytes(); }
+  bool has_perf_sample_defaults() const { return at<12>().valid(); }
+  ::protozero::ConstBytes perf_sample_defaults() const { return at<12>().as_bytes(); }
+};
+
+class TracePacketDefaults : public ::protozero::Message {
+ public:
+  using Decoder = TracePacketDefaults_Decoder;
+  enum : int32_t {
+    kTimestampClockIdFieldNumber = 58,
+    kTrackEventDefaultsFieldNumber = 11,
+    kPerfSampleDefaultsFieldNumber = 12,
+  };
+
+  using FieldMetadata_TimestampClockId =
+    ::protozero::proto_utils::FieldMetadata<
+      58,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      TracePacketDefaults>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TimestampClockId kTimestampClockId() { return {}; }
+  void set_timestamp_clock_id(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TimestampClockId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TrackEventDefaults =
+    ::protozero::proto_utils::FieldMetadata<
+      11,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TrackEventDefaults,
+      TracePacketDefaults>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TrackEventDefaults kTrackEventDefaults() { return {}; }
+  template <typename T = TrackEventDefaults> T* set_track_event_defaults() {
+    return BeginNestedMessage<T>(11);
+  }
+
+
+  using FieldMetadata_PerfSampleDefaults =
+    ::protozero::proto_utils::FieldMetadata<
+      12,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      PerfSampleDefaults,
+      TracePacketDefaults>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PerfSampleDefaults kPerfSampleDefaults() { return {}; }
+  template <typename T = PerfSampleDefaults> T* set_perf_sample_defaults() {
+    return BeginNestedMessage<T>(12);
+  }
+
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/test_event.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TEST_EVENT_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TEST_EVENT_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class TestEvent_TestPayload;
+
+class TestEvent_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  TestEvent_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TestEvent_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TestEvent_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_str() const { return at<1>().valid(); }
+  ::protozero::ConstChars str() const { return at<1>().as_string(); }
+  bool has_seq_value() const { return at<2>().valid(); }
+  uint32_t seq_value() const { return at<2>().as_uint32(); }
+  bool has_counter() const { return at<3>().valid(); }
+  uint64_t counter() const { return at<3>().as_uint64(); }
+  bool has_is_last() const { return at<4>().valid(); }
+  bool is_last() const { return at<4>().as_bool(); }
+  bool has_payload() const { return at<5>().valid(); }
+  ::protozero::ConstBytes payload() const { return at<5>().as_bytes(); }
+};
+
+class TestEvent : public ::protozero::Message {
+ public:
+  using Decoder = TestEvent_Decoder;
+  enum : int32_t {
+    kStrFieldNumber = 1,
+    kSeqValueFieldNumber = 2,
+    kCounterFieldNumber = 3,
+    kIsLastFieldNumber = 4,
+    kPayloadFieldNumber = 5,
+  };
+  using TestPayload = ::perfetto::protos::pbzero::TestEvent_TestPayload;
+
+  using FieldMetadata_Str =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TestEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Str kStr() { return {}; }
+  void set_str(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Str::kFieldId, data, size);
+  }
+  void set_str(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Str::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SeqValue =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      TestEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SeqValue kSeqValue() { return {}; }
+  void set_seq_value(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SeqValue::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Counter =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TestEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Counter kCounter() { return {}; }
+  void set_counter(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Counter::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_IsLast =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      TestEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IsLast kIsLast() { return {}; }
+  void set_is_last(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_IsLast::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Payload =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TestEvent_TestPayload,
+      TestEvent>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Payload kPayload() { return {}; }
+  template <typename T = TestEvent_TestPayload> T* set_payload() {
+    return BeginNestedMessage<T>(5);
+  }
+
+};
+
+class TestEvent_TestPayload_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/6, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  TestEvent_TestPayload_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TestEvent_TestPayload_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TestEvent_TestPayload_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_str() const { return at<1>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstChars> str() const { return GetRepeated<::protozero::ConstChars>(1); }
+  bool has_nested() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> nested() const { return GetRepeated<::protozero::ConstBytes>(2); }
+  bool has_single_string() const { return at<4>().valid(); }
+  ::protozero::ConstChars single_string() const { return at<4>().as_string(); }
+  bool has_single_int() const { return at<5>().valid(); }
+  int32_t single_int() const { return at<5>().as_int32(); }
+  bool has_repeated_ints() const { return at<6>().valid(); }
+  ::protozero::RepeatedFieldIterator<int32_t> repeated_ints() const { return GetRepeated<int32_t>(6); }
+  bool has_remaining_nesting_depth() const { return at<3>().valid(); }
+  uint32_t remaining_nesting_depth() const { return at<3>().as_uint32(); }
+};
+
+class TestEvent_TestPayload : public ::protozero::Message {
+ public:
+  using Decoder = TestEvent_TestPayload_Decoder;
+  enum : int32_t {
+    kStrFieldNumber = 1,
+    kNestedFieldNumber = 2,
+    kSingleStringFieldNumber = 4,
+    kSingleIntFieldNumber = 5,
+    kRepeatedIntsFieldNumber = 6,
+    kRemainingNestingDepthFieldNumber = 3,
+  };
+
+  using FieldMetadata_Str =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TestEvent_TestPayload>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Str kStr() { return {}; }
+  void add_str(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Str::kFieldId, data, size);
+  }
+  void add_str(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Str::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Nested =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TestEvent_TestPayload,
+      TestEvent_TestPayload>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Nested kNested() { return {}; }
+  template <typename T = TestEvent_TestPayload> T* add_nested() {
+    return BeginNestedMessage<T>(2);
+  }
+
+
+  using FieldMetadata_SingleString =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TestEvent_TestPayload>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SingleString kSingleString() { return {}; }
+  void set_single_string(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_SingleString::kFieldId, data, size);
+  }
+  void set_single_string(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_SingleString::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SingleInt =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      TestEvent_TestPayload>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SingleInt kSingleInt() { return {}; }
+  void set_single_int(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SingleInt::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_RepeatedInts =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      TestEvent_TestPayload>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_RepeatedInts kRepeatedInts() { return {}; }
+  void add_repeated_ints(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_RepeatedInts::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_RemainingNestingDepth =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      TestEvent_TestPayload>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_RemainingNestingDepth kRemainingNestingDepth() { return {}; }
+  void set_remaining_nesting_depth(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_RemainingNestingDepth::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/test_extensions.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TEST_EXTENSIONS_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TEST_EXTENSIONS_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+// gen_amalgamated expanded: #include "protos/perfetto/trace/track_event/track_event.pbzero.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+
+class TestExtensionChild_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  TestExtensionChild_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TestExtensionChild_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TestExtensionChild_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_child_field_for_testing() const { return at<1>().valid(); }
+  ::protozero::ConstChars child_field_for_testing() const { return at<1>().as_string(); }
+};
+
+class TestExtensionChild : public ::protozero::Message {
+ public:
+  using Decoder = TestExtensionChild_Decoder;
+  enum : int32_t {
+    kChildFieldForTestingFieldNumber = 1,
+  };
+
+  using FieldMetadata_ChildFieldForTesting =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TestExtensionChild>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChildFieldForTesting kChildFieldForTesting() { return {}; }
+  void set_child_field_for_testing(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_ChildFieldForTesting::kFieldId, data, size);
+  }
+  void set_child_field_for_testing(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_ChildFieldForTesting::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class TestExtension : public ::perfetto::protos::pbzero::TrackEvent {
+ public:
+
+  using FieldMetadata_StringExtensionForTesting =
+    ::protozero::proto_utils::FieldMetadata<
+      9900,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TestExtension>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_StringExtensionForTesting kStringExtensionForTesting() { return {}; }
+  void set_string_extension_for_testing(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_StringExtensionForTesting::kFieldId, data, size);
+  }
+  void set_string_extension_for_testing(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_StringExtensionForTesting::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_IntExtensionForTesting =
+    ::protozero::proto_utils::FieldMetadata<
+      9901,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      TestExtension>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IntExtensionForTesting kIntExtensionForTesting() { return {}; }
+  void add_int_extension_for_testing(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_IntExtensionForTesting::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_OmittedExtensionForTesting =
+    ::protozero::proto_utils::FieldMetadata<
+      9902,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      TestExtension>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_OmittedExtensionForTesting kOmittedExtensionForTesting() { return {}; }
+  void set_omitted_extension_for_testing(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_OmittedExtensionForTesting::kFieldId, data, size);
+  }
+  void set_omitted_extension_for_testing(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_OmittedExtensionForTesting::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_NestedMessageExtensionForTesting =
+    ::protozero::proto_utils::FieldMetadata<
+      9903,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TestExtensionChild,
+      TestExtension>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_NestedMessageExtensionForTesting kNestedMessageExtensionForTesting() { return {}; }
+  template <typename T = TestExtensionChild> T* set_nested_message_extension_for_testing() {
+    return BeginNestedMessage<T>(9903);
+  }
+
+};
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/trace_packet.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACE_PACKET_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACE_PACKET_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class AndroidEnergyEstimationBreakdown;
+class AndroidLogPacket;
+class BatteryCounters;
+class ChromeBenchmarkMetadata;
+class ChromeEventBundle;
+class ChromeMetadataPacket;
+class ClockSnapshot;
+class CpuInfo;
+class DeobfuscationMapping;
+class ExtensionDescriptor;
+class FrameTimelineEvent;
+class FtraceEventBundle;
+class FtraceStats;
+class GpuCounterEvent;
+class GpuLog;
+class GpuMemTotalEvent;
+class GpuRenderStageEvent;
+class GraphicsFrameEvent;
+class HeapGraph;
+class InitialDisplayState;
+class InodeFileMap;
+class InternedData;
+class MemoryTrackerSnapshot;
+class ModuleSymbols;
+class PackagesList;
+class PerfSample;
+class PerfettoMetatrace;
+class PowerRails;
+class ProcessDescriptor;
+class ProcessStats;
+class ProcessTree;
+class ProfilePacket;
+class ProfiledFrameSymbols;
+class SmapsPacket;
+class StreamingAllocation;
+class StreamingFree;
+class StreamingProfilePacket;
+class SysStats;
+class SystemInfo;
+class TestEvent;
+class ThreadDescriptor;
+class TraceConfig;
+class TracePacketDefaults;
+class TraceStats;
+class TracingServiceEvent;
+class TrackDescriptor;
+class TrackEvent;
+class Trigger;
+class UiState;
+class VulkanApiEvent;
+class VulkanMemoryEvent;
+
+enum TracePacket_SequenceFlags : int32_t {
+  TracePacket_SequenceFlags_SEQ_UNSPECIFIED = 0,
+  TracePacket_SequenceFlags_SEQ_INCREMENTAL_STATE_CLEARED = 1,
+  TracePacket_SequenceFlags_SEQ_NEEDS_INCREMENTAL_STATE = 2,
+};
+
+const TracePacket_SequenceFlags TracePacket_SequenceFlags_MIN = TracePacket_SequenceFlags_SEQ_UNSPECIFIED;
+const TracePacket_SequenceFlags TracePacket_SequenceFlags_MAX = TracePacket_SequenceFlags_SEQ_NEEDS_INCREMENTAL_STATE;
+
+class TracePacket_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/900, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  TracePacket_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit TracePacket_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit TracePacket_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_timestamp() const { return at<8>().valid(); }
+  uint64_t timestamp() const { return at<8>().as_uint64(); }
+  bool has_timestamp_clock_id() const { return at<58>().valid(); }
+  uint32_t timestamp_clock_id() const { return at<58>().as_uint32(); }
+  bool has_process_tree() const { return at<2>().valid(); }
+  ::protozero::ConstBytes process_tree() const { return at<2>().as_bytes(); }
+  bool has_process_stats() const { return at<9>().valid(); }
+  ::protozero::ConstBytes process_stats() const { return at<9>().as_bytes(); }
+  bool has_inode_file_map() const { return at<4>().valid(); }
+  ::protozero::ConstBytes inode_file_map() const { return at<4>().as_bytes(); }
+  bool has_chrome_events() const { return at<5>().valid(); }
+  ::protozero::ConstBytes chrome_events() const { return at<5>().as_bytes(); }
+  bool has_clock_snapshot() const { return at<6>().valid(); }
+  ::protozero::ConstBytes clock_snapshot() const { return at<6>().as_bytes(); }
+  bool has_sys_stats() const { return at<7>().valid(); }
+  ::protozero::ConstBytes sys_stats() const { return at<7>().as_bytes(); }
+  bool has_track_event() const { return at<11>().valid(); }
+  ::protozero::ConstBytes track_event() const { return at<11>().as_bytes(); }
+  bool has_trace_config() const { return at<33>().valid(); }
+  ::protozero::ConstBytes trace_config() const { return at<33>().as_bytes(); }
+  bool has_ftrace_stats() const { return at<34>().valid(); }
+  ::protozero::ConstBytes ftrace_stats() const { return at<34>().as_bytes(); }
+  bool has_trace_stats() const { return at<35>().valid(); }
+  ::protozero::ConstBytes trace_stats() const { return at<35>().as_bytes(); }
+  bool has_profile_packet() const { return at<37>().valid(); }
+  ::protozero::ConstBytes profile_packet() const { return at<37>().as_bytes(); }
+  bool has_streaming_allocation() const { return at<74>().valid(); }
+  ::protozero::ConstBytes streaming_allocation() const { return at<74>().as_bytes(); }
+  bool has_streaming_free() const { return at<75>().valid(); }
+  ::protozero::ConstBytes streaming_free() const { return at<75>().as_bytes(); }
+  bool has_battery() const { return at<38>().valid(); }
+  ::protozero::ConstBytes battery() const { return at<38>().as_bytes(); }
+  bool has_power_rails() const { return at<40>().valid(); }
+  ::protozero::ConstBytes power_rails() const { return at<40>().as_bytes(); }
+  bool has_android_log() const { return at<39>().valid(); }
+  ::protozero::ConstBytes android_log() const { return at<39>().as_bytes(); }
+  bool has_system_info() const { return at<45>().valid(); }
+  ::protozero::ConstBytes system_info() const { return at<45>().as_bytes(); }
+  bool has_trigger() const { return at<46>().valid(); }
+  ::protozero::ConstBytes trigger() const { return at<46>().as_bytes(); }
+  bool has_packages_list() const { return at<47>().valid(); }
+  ::protozero::ConstBytes packages_list() const { return at<47>().as_bytes(); }
+  bool has_chrome_benchmark_metadata() const { return at<48>().valid(); }
+  ::protozero::ConstBytes chrome_benchmark_metadata() const { return at<48>().as_bytes(); }
+  bool has_perfetto_metatrace() const { return at<49>().valid(); }
+  ::protozero::ConstBytes perfetto_metatrace() const { return at<49>().as_bytes(); }
+  bool has_chrome_metadata() const { return at<51>().valid(); }
+  ::protozero::ConstBytes chrome_metadata() const { return at<51>().as_bytes(); }
+  bool has_gpu_counter_event() const { return at<52>().valid(); }
+  ::protozero::ConstBytes gpu_counter_event() const { return at<52>().as_bytes(); }
+  bool has_gpu_render_stage_event() const { return at<53>().valid(); }
+  ::protozero::ConstBytes gpu_render_stage_event() const { return at<53>().as_bytes(); }
+  bool has_streaming_profile_packet() const { return at<54>().valid(); }
+  ::protozero::ConstBytes streaming_profile_packet() const { return at<54>().as_bytes(); }
+  bool has_heap_graph() const { return at<56>().valid(); }
+  ::protozero::ConstBytes heap_graph() const { return at<56>().as_bytes(); }
+  bool has_graphics_frame_event() const { return at<57>().valid(); }
+  ::protozero::ConstBytes graphics_frame_event() const { return at<57>().as_bytes(); }
+  bool has_vulkan_memory_event() const { return at<62>().valid(); }
+  ::protozero::ConstBytes vulkan_memory_event() const { return at<62>().as_bytes(); }
+  bool has_gpu_log() const { return at<63>().valid(); }
+  ::protozero::ConstBytes gpu_log() const { return at<63>().as_bytes(); }
+  bool has_vulkan_api_event() const { return at<65>().valid(); }
+  ::protozero::ConstBytes vulkan_api_event() const { return at<65>().as_bytes(); }
+  bool has_perf_sample() const { return at<66>().valid(); }
+  ::protozero::ConstBytes perf_sample() const { return at<66>().as_bytes(); }
+  bool has_cpu_info() const { return at<67>().valid(); }
+  ::protozero::ConstBytes cpu_info() const { return at<67>().as_bytes(); }
+  bool has_smaps_packet() const { return at<68>().valid(); }
+  ::protozero::ConstBytes smaps_packet() const { return at<68>().as_bytes(); }
+  bool has_service_event() const { return at<69>().valid(); }
+  ::protozero::ConstBytes service_event() const { return at<69>().as_bytes(); }
+  bool has_initial_display_state() const { return at<70>().valid(); }
+  ::protozero::ConstBytes initial_display_state() const { return at<70>().as_bytes(); }
+  bool has_gpu_mem_total_event() const { return at<71>().valid(); }
+  ::protozero::ConstBytes gpu_mem_total_event() const { return at<71>().as_bytes(); }
+  bool has_memory_tracker_snapshot() const { return at<73>().valid(); }
+  ::protozero::ConstBytes memory_tracker_snapshot() const { return at<73>().as_bytes(); }
+  bool has_frame_timeline_event() const { return at<76>().valid(); }
+  ::protozero::ConstBytes frame_timeline_event() const { return at<76>().as_bytes(); }
+  bool has_android_energy_estimation_breakdown() const { return at<77>().valid(); }
+  ::protozero::ConstBytes android_energy_estimation_breakdown() const { return at<77>().as_bytes(); }
+  bool has_ui_state() const { return at<78>().valid(); }
+  ::protozero::ConstBytes ui_state() const { return at<78>().as_bytes(); }
+  bool has_profiled_frame_symbols() const { return at<55>().valid(); }
+  ::protozero::ConstBytes profiled_frame_symbols() const { return at<55>().as_bytes(); }
+  bool has_module_symbols() const { return at<61>().valid(); }
+  ::protozero::ConstBytes module_symbols() const { return at<61>().as_bytes(); }
+  bool has_deobfuscation_mapping() const { return at<64>().valid(); }
+  ::protozero::ConstBytes deobfuscation_mapping() const { return at<64>().as_bytes(); }
+  bool has_track_descriptor() const { return at<60>().valid(); }
+  ::protozero::ConstBytes track_descriptor() const { return at<60>().as_bytes(); }
+  bool has_process_descriptor() const { return at<43>().valid(); }
+  ::protozero::ConstBytes process_descriptor() const { return at<43>().as_bytes(); }
+  bool has_thread_descriptor() const { return at<44>().valid(); }
+  ::protozero::ConstBytes thread_descriptor() const { return at<44>().as_bytes(); }
+  bool has_ftrace_events() const { return at<1>().valid(); }
+  ::protozero::ConstBytes ftrace_events() const { return at<1>().as_bytes(); }
+  bool has_synchronization_marker() const { return at<36>().valid(); }
+  ::protozero::ConstBytes synchronization_marker() const { return at<36>().as_bytes(); }
+  bool has_compressed_packets() const { return at<50>().valid(); }
+  ::protozero::ConstBytes compressed_packets() const { return at<50>().as_bytes(); }
+  bool has_extension_descriptor() const { return at<72>().valid(); }
+  ::protozero::ConstBytes extension_descriptor() const { return at<72>().as_bytes(); }
+  bool has_for_testing() const { return at<900>().valid(); }
+  ::protozero::ConstBytes for_testing() const { return at<900>().as_bytes(); }
+  bool has_trusted_uid() const { return at<3>().valid(); }
+  int32_t trusted_uid() const { return at<3>().as_int32(); }
+  bool has_trusted_packet_sequence_id() const { return at<10>().valid(); }
+  uint32_t trusted_packet_sequence_id() const { return at<10>().as_uint32(); }
+  bool has_interned_data() const { return at<12>().valid(); }
+  ::protozero::ConstBytes interned_data() const { return at<12>().as_bytes(); }
+  bool has_sequence_flags() const { return at<13>().valid(); }
+  uint32_t sequence_flags() const { return at<13>().as_uint32(); }
+  bool has_incremental_state_cleared() const { return at<41>().valid(); }
+  bool incremental_state_cleared() const { return at<41>().as_bool(); }
+  bool has_trace_packet_defaults() const { return at<59>().valid(); }
+  ::protozero::ConstBytes trace_packet_defaults() const { return at<59>().as_bytes(); }
+  bool has_previous_packet_dropped() const { return at<42>().valid(); }
+  bool previous_packet_dropped() const { return at<42>().as_bool(); }
+};
+
+class TracePacket : public ::protozero::Message {
+ public:
+  using Decoder = TracePacket_Decoder;
+  enum : int32_t {
+    kTimestampFieldNumber = 8,
+    kTimestampClockIdFieldNumber = 58,
+    kProcessTreeFieldNumber = 2,
+    kProcessStatsFieldNumber = 9,
+    kInodeFileMapFieldNumber = 4,
+    kChromeEventsFieldNumber = 5,
+    kClockSnapshotFieldNumber = 6,
+    kSysStatsFieldNumber = 7,
+    kTrackEventFieldNumber = 11,
+    kTraceConfigFieldNumber = 33,
+    kFtraceStatsFieldNumber = 34,
+    kTraceStatsFieldNumber = 35,
+    kProfilePacketFieldNumber = 37,
+    kStreamingAllocationFieldNumber = 74,
+    kStreamingFreeFieldNumber = 75,
+    kBatteryFieldNumber = 38,
+    kPowerRailsFieldNumber = 40,
+    kAndroidLogFieldNumber = 39,
+    kSystemInfoFieldNumber = 45,
+    kTriggerFieldNumber = 46,
+    kPackagesListFieldNumber = 47,
+    kChromeBenchmarkMetadataFieldNumber = 48,
+    kPerfettoMetatraceFieldNumber = 49,
+    kChromeMetadataFieldNumber = 51,
+    kGpuCounterEventFieldNumber = 52,
+    kGpuRenderStageEventFieldNumber = 53,
+    kStreamingProfilePacketFieldNumber = 54,
+    kHeapGraphFieldNumber = 56,
+    kGraphicsFrameEventFieldNumber = 57,
+    kVulkanMemoryEventFieldNumber = 62,
+    kGpuLogFieldNumber = 63,
+    kVulkanApiEventFieldNumber = 65,
+    kPerfSampleFieldNumber = 66,
+    kCpuInfoFieldNumber = 67,
+    kSmapsPacketFieldNumber = 68,
+    kServiceEventFieldNumber = 69,
+    kInitialDisplayStateFieldNumber = 70,
+    kGpuMemTotalEventFieldNumber = 71,
+    kMemoryTrackerSnapshotFieldNumber = 73,
+    kFrameTimelineEventFieldNumber = 76,
+    kAndroidEnergyEstimationBreakdownFieldNumber = 77,
+    kUiStateFieldNumber = 78,
+    kProfiledFrameSymbolsFieldNumber = 55,
+    kModuleSymbolsFieldNumber = 61,
+    kDeobfuscationMappingFieldNumber = 64,
+    kTrackDescriptorFieldNumber = 60,
+    kProcessDescriptorFieldNumber = 43,
+    kThreadDescriptorFieldNumber = 44,
+    kFtraceEventsFieldNumber = 1,
+    kSynchronizationMarkerFieldNumber = 36,
+    kCompressedPacketsFieldNumber = 50,
+    kExtensionDescriptorFieldNumber = 72,
+    kForTestingFieldNumber = 900,
+    kTrustedUidFieldNumber = 3,
+    kTrustedPacketSequenceIdFieldNumber = 10,
+    kInternedDataFieldNumber = 12,
+    kSequenceFlagsFieldNumber = 13,
+    kIncrementalStateClearedFieldNumber = 41,
+    kTracePacketDefaultsFieldNumber = 59,
+    kPreviousPacketDroppedFieldNumber = 42,
+  };
+  using SequenceFlags = ::perfetto::protos::pbzero::TracePacket_SequenceFlags;
+  static const SequenceFlags SEQ_UNSPECIFIED = TracePacket_SequenceFlags_SEQ_UNSPECIFIED;
+  static const SequenceFlags SEQ_INCREMENTAL_STATE_CLEARED = TracePacket_SequenceFlags_SEQ_INCREMENTAL_STATE_CLEARED;
+  static const SequenceFlags SEQ_NEEDS_INCREMENTAL_STATE = TracePacket_SequenceFlags_SEQ_NEEDS_INCREMENTAL_STATE;
+
+  using FieldMetadata_Timestamp =
+    ::protozero::proto_utils::FieldMetadata<
+      8,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Timestamp kTimestamp() { return {}; }
+  void set_timestamp(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Timestamp::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TimestampClockId =
+    ::protozero::proto_utils::FieldMetadata<
+      58,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TimestampClockId kTimestampClockId() { return {}; }
+  void set_timestamp_clock_id(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TimestampClockId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ProcessTree =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ProcessTree,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ProcessTree kProcessTree() { return {}; }
+  template <typename T = ProcessTree> T* set_process_tree() {
+    return BeginNestedMessage<T>(2);
+  }
+
+
+  using FieldMetadata_ProcessStats =
+    ::protozero::proto_utils::FieldMetadata<
+      9,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ProcessStats,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ProcessStats kProcessStats() { return {}; }
+  template <typename T = ProcessStats> T* set_process_stats() {
+    return BeginNestedMessage<T>(9);
+  }
+
+
+  using FieldMetadata_InodeFileMap =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      InodeFileMap,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_InodeFileMap kInodeFileMap() { return {}; }
+  template <typename T = InodeFileMap> T* set_inode_file_map() {
+    return BeginNestedMessage<T>(4);
+  }
+
+
+  using FieldMetadata_ChromeEvents =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ChromeEventBundle,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChromeEvents kChromeEvents() { return {}; }
+  template <typename T = ChromeEventBundle> T* set_chrome_events() {
+    return BeginNestedMessage<T>(5);
+  }
+
+
+  using FieldMetadata_ClockSnapshot =
+    ::protozero::proto_utils::FieldMetadata<
+      6,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ClockSnapshot,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ClockSnapshot kClockSnapshot() { return {}; }
+  template <typename T = ClockSnapshot> T* set_clock_snapshot() {
+    return BeginNestedMessage<T>(6);
+  }
+
+
+  using FieldMetadata_SysStats =
+    ::protozero::proto_utils::FieldMetadata<
+      7,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      SysStats,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SysStats kSysStats() { return {}; }
+  template <typename T = SysStats> T* set_sys_stats() {
+    return BeginNestedMessage<T>(7);
+  }
+
+
+  using FieldMetadata_TrackEvent =
+    ::protozero::proto_utils::FieldMetadata<
+      11,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TrackEvent,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TrackEvent kTrackEvent() { return {}; }
+  template <typename T = TrackEvent> T* set_track_event() {
+    return BeginNestedMessage<T>(11);
+  }
+
+
+  using FieldMetadata_TraceConfig =
+    ::protozero::proto_utils::FieldMetadata<
+      33,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TraceConfig,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TraceConfig kTraceConfig() { return {}; }
+  template <typename T = TraceConfig> T* set_trace_config() {
+    return BeginNestedMessage<T>(33);
+  }
+
+
+  using FieldMetadata_FtraceStats =
+    ::protozero::proto_utils::FieldMetadata<
+      34,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      FtraceStats,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FtraceStats kFtraceStats() { return {}; }
+  template <typename T = FtraceStats> T* set_ftrace_stats() {
+    return BeginNestedMessage<T>(34);
+  }
+
+
+  using FieldMetadata_TraceStats =
+    ::protozero::proto_utils::FieldMetadata<
+      35,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TraceStats,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TraceStats kTraceStats() { return {}; }
+  template <typename T = TraceStats> T* set_trace_stats() {
+    return BeginNestedMessage<T>(35);
+  }
+
+
+  using FieldMetadata_ProfilePacket =
+    ::protozero::proto_utils::FieldMetadata<
+      37,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ProfilePacket,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ProfilePacket kProfilePacket() { return {}; }
+  template <typename T = ProfilePacket> T* set_profile_packet() {
+    return BeginNestedMessage<T>(37);
+  }
+
+
+  using FieldMetadata_StreamingAllocation =
+    ::protozero::proto_utils::FieldMetadata<
+      74,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      StreamingAllocation,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_StreamingAllocation kStreamingAllocation() { return {}; }
+  template <typename T = StreamingAllocation> T* set_streaming_allocation() {
+    return BeginNestedMessage<T>(74);
+  }
+
+
+  using FieldMetadata_StreamingFree =
+    ::protozero::proto_utils::FieldMetadata<
+      75,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      StreamingFree,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_StreamingFree kStreamingFree() { return {}; }
+  template <typename T = StreamingFree> T* set_streaming_free() {
+    return BeginNestedMessage<T>(75);
+  }
+
+
+  using FieldMetadata_Battery =
+    ::protozero::proto_utils::FieldMetadata<
+      38,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      BatteryCounters,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Battery kBattery() { return {}; }
+  template <typename T = BatteryCounters> T* set_battery() {
+    return BeginNestedMessage<T>(38);
+  }
+
+
+  using FieldMetadata_PowerRails =
+    ::protozero::proto_utils::FieldMetadata<
+      40,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      PowerRails,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PowerRails kPowerRails() { return {}; }
+  template <typename T = PowerRails> T* set_power_rails() {
+    return BeginNestedMessage<T>(40);
+  }
+
+
+  using FieldMetadata_AndroidLog =
+    ::protozero::proto_utils::FieldMetadata<
+      39,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      AndroidLogPacket,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AndroidLog kAndroidLog() { return {}; }
+  template <typename T = AndroidLogPacket> T* set_android_log() {
+    return BeginNestedMessage<T>(39);
+  }
+
+
+  using FieldMetadata_SystemInfo =
+    ::protozero::proto_utils::FieldMetadata<
+      45,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      SystemInfo,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SystemInfo kSystemInfo() { return {}; }
+  template <typename T = SystemInfo> T* set_system_info() {
+    return BeginNestedMessage<T>(45);
+  }
+
+
+  using FieldMetadata_Trigger =
+    ::protozero::proto_utils::FieldMetadata<
+      46,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      Trigger,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Trigger kTrigger() { return {}; }
+  template <typename T = Trigger> T* set_trigger() {
+    return BeginNestedMessage<T>(46);
+  }
+
+
+  using FieldMetadata_PackagesList =
+    ::protozero::proto_utils::FieldMetadata<
+      47,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      PackagesList,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PackagesList kPackagesList() { return {}; }
+  template <typename T = PackagesList> T* set_packages_list() {
+    return BeginNestedMessage<T>(47);
+  }
+
+
+  using FieldMetadata_ChromeBenchmarkMetadata =
+    ::protozero::proto_utils::FieldMetadata<
+      48,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ChromeBenchmarkMetadata,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChromeBenchmarkMetadata kChromeBenchmarkMetadata() { return {}; }
+  template <typename T = ChromeBenchmarkMetadata> T* set_chrome_benchmark_metadata() {
+    return BeginNestedMessage<T>(48);
+  }
+
+
+  using FieldMetadata_PerfettoMetatrace =
+    ::protozero::proto_utils::FieldMetadata<
+      49,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      PerfettoMetatrace,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PerfettoMetatrace kPerfettoMetatrace() { return {}; }
+  template <typename T = PerfettoMetatrace> T* set_perfetto_metatrace() {
+    return BeginNestedMessage<T>(49);
+  }
+
+
+  using FieldMetadata_ChromeMetadata =
+    ::protozero::proto_utils::FieldMetadata<
+      51,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ChromeMetadataPacket,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ChromeMetadata kChromeMetadata() { return {}; }
+  template <typename T = ChromeMetadataPacket> T* set_chrome_metadata() {
+    return BeginNestedMessage<T>(51);
+  }
+
+
+  using FieldMetadata_GpuCounterEvent =
+    ::protozero::proto_utils::FieldMetadata<
+      52,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      GpuCounterEvent,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_GpuCounterEvent kGpuCounterEvent() { return {}; }
+  template <typename T = GpuCounterEvent> T* set_gpu_counter_event() {
+    return BeginNestedMessage<T>(52);
+  }
+
+
+  using FieldMetadata_GpuRenderStageEvent =
+    ::protozero::proto_utils::FieldMetadata<
+      53,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      GpuRenderStageEvent,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_GpuRenderStageEvent kGpuRenderStageEvent() { return {}; }
+  template <typename T = GpuRenderStageEvent> T* set_gpu_render_stage_event() {
+    return BeginNestedMessage<T>(53);
+  }
+
+
+  using FieldMetadata_StreamingProfilePacket =
+    ::protozero::proto_utils::FieldMetadata<
+      54,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      StreamingProfilePacket,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_StreamingProfilePacket kStreamingProfilePacket() { return {}; }
+  template <typename T = StreamingProfilePacket> T* set_streaming_profile_packet() {
+    return BeginNestedMessage<T>(54);
+  }
+
+
+  using FieldMetadata_HeapGraph =
+    ::protozero::proto_utils::FieldMetadata<
+      56,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      HeapGraph,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_HeapGraph kHeapGraph() { return {}; }
+  template <typename T = HeapGraph> T* set_heap_graph() {
+    return BeginNestedMessage<T>(56);
+  }
+
+
+  using FieldMetadata_GraphicsFrameEvent =
+    ::protozero::proto_utils::FieldMetadata<
+      57,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      GraphicsFrameEvent,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_GraphicsFrameEvent kGraphicsFrameEvent() { return {}; }
+  template <typename T = GraphicsFrameEvent> T* set_graphics_frame_event() {
+    return BeginNestedMessage<T>(57);
+  }
+
+
+  using FieldMetadata_VulkanMemoryEvent =
+    ::protozero::proto_utils::FieldMetadata<
+      62,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      VulkanMemoryEvent,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_VulkanMemoryEvent kVulkanMemoryEvent() { return {}; }
+  template <typename T = VulkanMemoryEvent> T* set_vulkan_memory_event() {
+    return BeginNestedMessage<T>(62);
+  }
+
+
+  using FieldMetadata_GpuLog =
+    ::protozero::proto_utils::FieldMetadata<
+      63,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      GpuLog,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_GpuLog kGpuLog() { return {}; }
+  template <typename T = GpuLog> T* set_gpu_log() {
+    return BeginNestedMessage<T>(63);
+  }
+
+
+  using FieldMetadata_VulkanApiEvent =
+    ::protozero::proto_utils::FieldMetadata<
+      65,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      VulkanApiEvent,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_VulkanApiEvent kVulkanApiEvent() { return {}; }
+  template <typename T = VulkanApiEvent> T* set_vulkan_api_event() {
+    return BeginNestedMessage<T>(65);
+  }
+
+
+  using FieldMetadata_PerfSample =
+    ::protozero::proto_utils::FieldMetadata<
+      66,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      PerfSample,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PerfSample kPerfSample() { return {}; }
+  template <typename T = PerfSample> T* set_perf_sample() {
+    return BeginNestedMessage<T>(66);
+  }
+
+
+  using FieldMetadata_CpuInfo =
+    ::protozero::proto_utils::FieldMetadata<
+      67,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      CpuInfo,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CpuInfo kCpuInfo() { return {}; }
+  template <typename T = CpuInfo> T* set_cpu_info() {
+    return BeginNestedMessage<T>(67);
+  }
+
+
+  using FieldMetadata_SmapsPacket =
+    ::protozero::proto_utils::FieldMetadata<
+      68,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      SmapsPacket,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SmapsPacket kSmapsPacket() { return {}; }
+  template <typename T = SmapsPacket> T* set_smaps_packet() {
+    return BeginNestedMessage<T>(68);
+  }
+
+
+  using FieldMetadata_ServiceEvent =
+    ::protozero::proto_utils::FieldMetadata<
+      69,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TracingServiceEvent,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ServiceEvent kServiceEvent() { return {}; }
+  template <typename T = TracingServiceEvent> T* set_service_event() {
+    return BeginNestedMessage<T>(69);
+  }
+
+
+  using FieldMetadata_InitialDisplayState =
+    ::protozero::proto_utils::FieldMetadata<
+      70,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      InitialDisplayState,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_InitialDisplayState kInitialDisplayState() { return {}; }
+  template <typename T = InitialDisplayState> T* set_initial_display_state() {
+    return BeginNestedMessage<T>(70);
+  }
+
+
+  using FieldMetadata_GpuMemTotalEvent =
+    ::protozero::proto_utils::FieldMetadata<
+      71,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      GpuMemTotalEvent,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_GpuMemTotalEvent kGpuMemTotalEvent() { return {}; }
+  template <typename T = GpuMemTotalEvent> T* set_gpu_mem_total_event() {
+    return BeginNestedMessage<T>(71);
+  }
+
+
+  using FieldMetadata_MemoryTrackerSnapshot =
+    ::protozero::proto_utils::FieldMetadata<
+      73,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MemoryTrackerSnapshot,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MemoryTrackerSnapshot kMemoryTrackerSnapshot() { return {}; }
+  template <typename T = MemoryTrackerSnapshot> T* set_memory_tracker_snapshot() {
+    return BeginNestedMessage<T>(73);
+  }
+
+
+  using FieldMetadata_FrameTimelineEvent =
+    ::protozero::proto_utils::FieldMetadata<
+      76,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      FrameTimelineEvent,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FrameTimelineEvent kFrameTimelineEvent() { return {}; }
+  template <typename T = FrameTimelineEvent> T* set_frame_timeline_event() {
+    return BeginNestedMessage<T>(76);
+  }
+
+
+  using FieldMetadata_AndroidEnergyEstimationBreakdown =
+    ::protozero::proto_utils::FieldMetadata<
+      77,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      AndroidEnergyEstimationBreakdown,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AndroidEnergyEstimationBreakdown kAndroidEnergyEstimationBreakdown() { return {}; }
+  template <typename T = AndroidEnergyEstimationBreakdown> T* set_android_energy_estimation_breakdown() {
+    return BeginNestedMessage<T>(77);
+  }
+
+
+  using FieldMetadata_UiState =
+    ::protozero::proto_utils::FieldMetadata<
+      78,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      UiState,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_UiState kUiState() { return {}; }
+  template <typename T = UiState> T* set_ui_state() {
+    return BeginNestedMessage<T>(78);
+  }
+
+
+  using FieldMetadata_ProfiledFrameSymbols =
+    ::protozero::proto_utils::FieldMetadata<
+      55,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ProfiledFrameSymbols,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ProfiledFrameSymbols kProfiledFrameSymbols() { return {}; }
+  template <typename T = ProfiledFrameSymbols> T* set_profiled_frame_symbols() {
+    return BeginNestedMessage<T>(55);
+  }
+
+
+  using FieldMetadata_ModuleSymbols =
+    ::protozero::proto_utils::FieldMetadata<
+      61,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ModuleSymbols,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ModuleSymbols kModuleSymbols() { return {}; }
+  template <typename T = ModuleSymbols> T* set_module_symbols() {
+    return BeginNestedMessage<T>(61);
+  }
+
+
+  using FieldMetadata_DeobfuscationMapping =
+    ::protozero::proto_utils::FieldMetadata<
+      64,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      DeobfuscationMapping,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_DeobfuscationMapping kDeobfuscationMapping() { return {}; }
+  template <typename T = DeobfuscationMapping> T* set_deobfuscation_mapping() {
+    return BeginNestedMessage<T>(64);
+  }
+
+
+  using FieldMetadata_TrackDescriptor =
+    ::protozero::proto_utils::FieldMetadata<
+      60,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TrackDescriptor,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TrackDescriptor kTrackDescriptor() { return {}; }
+  template <typename T = TrackDescriptor> T* set_track_descriptor() {
+    return BeginNestedMessage<T>(60);
+  }
+
+
+  using FieldMetadata_ProcessDescriptor =
+    ::protozero::proto_utils::FieldMetadata<
+      43,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ProcessDescriptor,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ProcessDescriptor kProcessDescriptor() { return {}; }
+  template <typename T = ProcessDescriptor> T* set_process_descriptor() {
+    return BeginNestedMessage<T>(43);
+  }
+
+
+  using FieldMetadata_ThreadDescriptor =
+    ::protozero::proto_utils::FieldMetadata<
+      44,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ThreadDescriptor,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ThreadDescriptor kThreadDescriptor() { return {}; }
+  template <typename T = ThreadDescriptor> T* set_thread_descriptor() {
+    return BeginNestedMessage<T>(44);
+  }
+
+
+  using FieldMetadata_FtraceEvents =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      FtraceEventBundle,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_FtraceEvents kFtraceEvents() { return {}; }
+  template <typename T = FtraceEventBundle> T* set_ftrace_events() {
+    return BeginNestedMessage<T>(1);
+  }
+
+
+  using FieldMetadata_SynchronizationMarker =
+    ::protozero::proto_utils::FieldMetadata<
+      36,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBytes,
+      std::string,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SynchronizationMarker kSynchronizationMarker() { return {}; }
+  void set_synchronization_marker(const uint8_t* data, size_t size) {
+    AppendBytes(FieldMetadata_SynchronizationMarker::kFieldId, data, size);
+  }
+  void set_synchronization_marker(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_SynchronizationMarker::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBytes>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_CompressedPackets =
+    ::protozero::proto_utils::FieldMetadata<
+      50,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBytes,
+      std::string,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_CompressedPackets kCompressedPackets() { return {}; }
+  void set_compressed_packets(const uint8_t* data, size_t size) {
+    AppendBytes(FieldMetadata_CompressedPackets::kFieldId, data, size);
+  }
+  void set_compressed_packets(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_CompressedPackets::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBytes>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ExtensionDescriptor =
+    ::protozero::proto_utils::FieldMetadata<
+      72,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      ExtensionDescriptor,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ExtensionDescriptor kExtensionDescriptor() { return {}; }
+  template <typename T = ExtensionDescriptor> T* set_extension_descriptor() {
+    return BeginNestedMessage<T>(72);
+  }
+
+
+  using FieldMetadata_ForTesting =
+    ::protozero::proto_utils::FieldMetadata<
+      900,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TestEvent,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ForTesting kForTesting() { return {}; }
+  template <typename T = TestEvent> T* set_for_testing() {
+    return BeginNestedMessage<T>(900);
+  }
+
+
+  using FieldMetadata_TrustedUid =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TrustedUid kTrustedUid() { return {}; }
+  void set_trusted_uid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TrustedUid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TrustedPacketSequenceId =
+    ::protozero::proto_utils::FieldMetadata<
+      10,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TrustedPacketSequenceId kTrustedPacketSequenceId() { return {}; }
+  void set_trusted_packet_sequence_id(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TrustedPacketSequenceId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_InternedData =
+    ::protozero::proto_utils::FieldMetadata<
+      12,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      InternedData,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_InternedData kInternedData() { return {}; }
+  template <typename T = InternedData> T* set_interned_data() {
+    return BeginNestedMessage<T>(12);
+  }
+
+
+  using FieldMetadata_SequenceFlags =
+    ::protozero::proto_utils::FieldMetadata<
+      13,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SequenceFlags kSequenceFlags() { return {}; }
+  void set_sequence_flags(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SequenceFlags::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_IncrementalStateCleared =
+    ::protozero::proto_utils::FieldMetadata<
+      41,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_IncrementalStateCleared kIncrementalStateCleared() { return {}; }
+  void set_incremental_state_cleared(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_IncrementalStateCleared::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TracePacketDefaults =
+    ::protozero::proto_utils::FieldMetadata<
+      59,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TracePacketDefaults,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TracePacketDefaults kTracePacketDefaults() { return {}; }
+  template <typename T = TracePacketDefaults> T* set_trace_packet_defaults() {
+    return BeginNestedMessage<T>(59);
+  }
+
+
+  using FieldMetadata_PreviousPacketDropped =
+    ::protozero::proto_utils::FieldMetadata<
+      42,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      TracePacket>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_PreviousPacketDropped kPreviousPacketDropped() { return {}; }
+  void set_previous_packet_dropped(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_PreviousPacketDropped::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/trace.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACE_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACE_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class TracePacket;
+
+class Trace_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  Trace_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit Trace_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit Trace_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_packet() const { return at<1>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> packet() const { return GetRepeated<::protozero::ConstBytes>(1); }
+};
+
+class Trace : public ::protozero::Message {
+ public:
+  using Decoder = Trace_Decoder;
+  enum : int32_t {
+    kPacketFieldNumber = 1,
+  };
+
+  using FieldMetadata_Packet =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      TracePacket,
+      Trace>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Packet kPacket() { return {}; }
+  template <typename T = TracePacket> T* add_packet() {
+    return BeginNestedMessage<T>(1);
+  }
+
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/extension_descriptor.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_EXTENSION_DESCRIPTOR_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_EXTENSION_DESCRIPTOR_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class FileDescriptorSet;
+
+class ExtensionDescriptor_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/1, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  ExtensionDescriptor_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit ExtensionDescriptor_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit ExtensionDescriptor_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_extension_set() const { return at<1>().valid(); }
+  ::protozero::ConstBytes extension_set() const { return at<1>().as_bytes(); }
+};
+
+class ExtensionDescriptor : public ::protozero::Message {
+ public:
+  using Decoder = ExtensionDescriptor_Decoder;
+  enum : int32_t {
+    kExtensionSetFieldNumber = 1,
+  };
+
+  using FieldMetadata_ExtensionSet =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      FileDescriptorSet,
+      ExtensionDescriptor>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ExtensionSet kExtensionSet() { return {}; }
+  template <typename T = FileDescriptorSet> T* set_extension_set() {
+    return BeginNestedMessage<T>(1);
+  }
+
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/memory_graph.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_MEMORY_GRAPH_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_MEMORY_GRAPH_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class MemoryTrackerSnapshot_ProcessSnapshot;
+class MemoryTrackerSnapshot_ProcessSnapshot_MemoryEdge;
+class MemoryTrackerSnapshot_ProcessSnapshot_MemoryNode;
+class MemoryTrackerSnapshot_ProcessSnapshot_MemoryNode_MemoryNodeEntry;
+enum MemoryTrackerSnapshot_LevelOfDetail : int32_t;
+enum MemoryTrackerSnapshot_ProcessSnapshot_MemoryNode_MemoryNodeEntry_Units : int32_t;
+
+enum MemoryTrackerSnapshot_LevelOfDetail : int32_t {
+  MemoryTrackerSnapshot_LevelOfDetail_DETAIL_FULL = 0,
+  MemoryTrackerSnapshot_LevelOfDetail_DETAIL_LIGHT = 1,
+  MemoryTrackerSnapshot_LevelOfDetail_DETAIL_BACKGROUND = 2,
+};
+
+const MemoryTrackerSnapshot_LevelOfDetail MemoryTrackerSnapshot_LevelOfDetail_MIN = MemoryTrackerSnapshot_LevelOfDetail_DETAIL_FULL;
+const MemoryTrackerSnapshot_LevelOfDetail MemoryTrackerSnapshot_LevelOfDetail_MAX = MemoryTrackerSnapshot_LevelOfDetail_DETAIL_BACKGROUND;
+
+enum MemoryTrackerSnapshot_ProcessSnapshot_MemoryNode_MemoryNodeEntry_Units : int32_t {
+  MemoryTrackerSnapshot_ProcessSnapshot_MemoryNode_MemoryNodeEntry_Units_UNSPECIFIED = 0,
+  MemoryTrackerSnapshot_ProcessSnapshot_MemoryNode_MemoryNodeEntry_Units_BYTES = 1,
+  MemoryTrackerSnapshot_ProcessSnapshot_MemoryNode_MemoryNodeEntry_Units_COUNT = 2,
+};
+
+const MemoryTrackerSnapshot_ProcessSnapshot_MemoryNode_MemoryNodeEntry_Units MemoryTrackerSnapshot_ProcessSnapshot_MemoryNode_MemoryNodeEntry_Units_MIN = MemoryTrackerSnapshot_ProcessSnapshot_MemoryNode_MemoryNodeEntry_Units_UNSPECIFIED;
+const MemoryTrackerSnapshot_ProcessSnapshot_MemoryNode_MemoryNodeEntry_Units MemoryTrackerSnapshot_ProcessSnapshot_MemoryNode_MemoryNodeEntry_Units_MAX = MemoryTrackerSnapshot_ProcessSnapshot_MemoryNode_MemoryNodeEntry_Units_COUNT;
+
+class MemoryTrackerSnapshot_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  MemoryTrackerSnapshot_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MemoryTrackerSnapshot_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MemoryTrackerSnapshot_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_global_dump_id() const { return at<1>().valid(); }
+  uint64_t global_dump_id() const { return at<1>().as_uint64(); }
+  bool has_level_of_detail() const { return at<2>().valid(); }
+  int32_t level_of_detail() const { return at<2>().as_int32(); }
+  bool has_process_memory_dumps() const { return at<3>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> process_memory_dumps() const { return GetRepeated<::protozero::ConstBytes>(3); }
+};
+
+class MemoryTrackerSnapshot : public ::protozero::Message {
+ public:
+  using Decoder = MemoryTrackerSnapshot_Decoder;
+  enum : int32_t {
+    kGlobalDumpIdFieldNumber = 1,
+    kLevelOfDetailFieldNumber = 2,
+    kProcessMemoryDumpsFieldNumber = 3,
+  };
+  using ProcessSnapshot = ::perfetto::protos::pbzero::MemoryTrackerSnapshot_ProcessSnapshot;
+  using LevelOfDetail = ::perfetto::protos::pbzero::MemoryTrackerSnapshot_LevelOfDetail;
+  static const LevelOfDetail DETAIL_FULL = MemoryTrackerSnapshot_LevelOfDetail_DETAIL_FULL;
+  static const LevelOfDetail DETAIL_LIGHT = MemoryTrackerSnapshot_LevelOfDetail_DETAIL_LIGHT;
+  static const LevelOfDetail DETAIL_BACKGROUND = MemoryTrackerSnapshot_LevelOfDetail_DETAIL_BACKGROUND;
+
+  using FieldMetadata_GlobalDumpId =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MemoryTrackerSnapshot>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_GlobalDumpId kGlobalDumpId() { return {}; }
+  void set_global_dump_id(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_GlobalDumpId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_LevelOfDetail =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::MemoryTrackerSnapshot_LevelOfDetail,
+      MemoryTrackerSnapshot>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_LevelOfDetail kLevelOfDetail() { return {}; }
+  void set_level_of_detail(::perfetto::protos::pbzero::MemoryTrackerSnapshot_LevelOfDetail value) {
+    static constexpr uint32_t field_id = FieldMetadata_LevelOfDetail::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ProcessMemoryDumps =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MemoryTrackerSnapshot_ProcessSnapshot,
+      MemoryTrackerSnapshot>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ProcessMemoryDumps kProcessMemoryDumps() { return {}; }
+  template <typename T = MemoryTrackerSnapshot_ProcessSnapshot> T* add_process_memory_dumps() {
+    return BeginNestedMessage<T>(3);
+  }
+
+};
+
+class MemoryTrackerSnapshot_ProcessSnapshot_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  MemoryTrackerSnapshot_ProcessSnapshot_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MemoryTrackerSnapshot_ProcessSnapshot_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MemoryTrackerSnapshot_ProcessSnapshot_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_pid() const { return at<1>().valid(); }
+  int32_t pid() const { return at<1>().as_int32(); }
+  bool has_allocator_dumps() const { return at<2>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> allocator_dumps() const { return GetRepeated<::protozero::ConstBytes>(2); }
+  bool has_memory_edges() const { return at<3>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> memory_edges() const { return GetRepeated<::protozero::ConstBytes>(3); }
+};
+
+class MemoryTrackerSnapshot_ProcessSnapshot : public ::protozero::Message {
+ public:
+  using Decoder = MemoryTrackerSnapshot_ProcessSnapshot_Decoder;
+  enum : int32_t {
+    kPidFieldNumber = 1,
+    kAllocatorDumpsFieldNumber = 2,
+    kMemoryEdgesFieldNumber = 3,
+  };
+  using MemoryNode = ::perfetto::protos::pbzero::MemoryTrackerSnapshot_ProcessSnapshot_MemoryNode;
+  using MemoryEdge = ::perfetto::protos::pbzero::MemoryTrackerSnapshot_ProcessSnapshot_MemoryEdge;
+
+  using FieldMetadata_Pid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt32,
+      int32_t,
+      MemoryTrackerSnapshot_ProcessSnapshot>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pid kPid() { return {}; }
+  void set_pid(int32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_AllocatorDumps =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MemoryTrackerSnapshot_ProcessSnapshot_MemoryNode,
+      MemoryTrackerSnapshot_ProcessSnapshot>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AllocatorDumps kAllocatorDumps() { return {}; }
+  template <typename T = MemoryTrackerSnapshot_ProcessSnapshot_MemoryNode> T* add_allocator_dumps() {
+    return BeginNestedMessage<T>(2);
+  }
+
+
+  using FieldMetadata_MemoryEdges =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MemoryTrackerSnapshot_ProcessSnapshot_MemoryEdge,
+      MemoryTrackerSnapshot_ProcessSnapshot>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_MemoryEdges kMemoryEdges() { return {}; }
+  template <typename T = MemoryTrackerSnapshot_ProcessSnapshot_MemoryEdge> T* add_memory_edges() {
+    return BeginNestedMessage<T>(3);
+  }
+
+};
+
+class MemoryTrackerSnapshot_ProcessSnapshot_MemoryEdge_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  MemoryTrackerSnapshot_ProcessSnapshot_MemoryEdge_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MemoryTrackerSnapshot_ProcessSnapshot_MemoryEdge_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MemoryTrackerSnapshot_ProcessSnapshot_MemoryEdge_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_source_id() const { return at<1>().valid(); }
+  uint64_t source_id() const { return at<1>().as_uint64(); }
+  bool has_target_id() const { return at<2>().valid(); }
+  uint64_t target_id() const { return at<2>().as_uint64(); }
+  bool has_importance() const { return at<3>().valid(); }
+  uint32_t importance() const { return at<3>().as_uint32(); }
+  bool has_overridable() const { return at<4>().valid(); }
+  bool overridable() const { return at<4>().as_bool(); }
+};
+
+class MemoryTrackerSnapshot_ProcessSnapshot_MemoryEdge : public ::protozero::Message {
+ public:
+  using Decoder = MemoryTrackerSnapshot_ProcessSnapshot_MemoryEdge_Decoder;
+  enum : int32_t {
+    kSourceIdFieldNumber = 1,
+    kTargetIdFieldNumber = 2,
+    kImportanceFieldNumber = 3,
+    kOverridableFieldNumber = 4,
+  };
+
+  using FieldMetadata_SourceId =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MemoryTrackerSnapshot_ProcessSnapshot_MemoryEdge>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SourceId kSourceId() { return {}; }
+  void set_source_id(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SourceId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TargetId =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MemoryTrackerSnapshot_ProcessSnapshot_MemoryEdge>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TargetId kTargetId() { return {}; }
+  void set_target_id(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TargetId::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Importance =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      MemoryTrackerSnapshot_ProcessSnapshot_MemoryEdge>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Importance kImportance() { return {}; }
+  void set_importance(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Importance::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Overridable =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      MemoryTrackerSnapshot_ProcessSnapshot_MemoryEdge>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Overridable kOverridable() { return {}; }
+  void set_overridable(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_Overridable::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+};
+
+class MemoryTrackerSnapshot_ProcessSnapshot_MemoryNode_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/5, /*HAS_NONPACKED_REPEATED_FIELDS=*/true> {
+ public:
+  MemoryTrackerSnapshot_ProcessSnapshot_MemoryNode_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MemoryTrackerSnapshot_ProcessSnapshot_MemoryNode_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MemoryTrackerSnapshot_ProcessSnapshot_MemoryNode_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_id() const { return at<1>().valid(); }
+  uint64_t id() const { return at<1>().as_uint64(); }
+  bool has_absolute_name() const { return at<2>().valid(); }
+  ::protozero::ConstChars absolute_name() const { return at<2>().as_string(); }
+  bool has_weak() const { return at<3>().valid(); }
+  bool weak() const { return at<3>().as_bool(); }
+  bool has_size_bytes() const { return at<4>().valid(); }
+  uint64_t size_bytes() const { return at<4>().as_uint64(); }
+  bool has_entries() const { return at<5>().valid(); }
+  ::protozero::RepeatedFieldIterator<::protozero::ConstBytes> entries() const { return GetRepeated<::protozero::ConstBytes>(5); }
+};
+
+class MemoryTrackerSnapshot_ProcessSnapshot_MemoryNode : public ::protozero::Message {
+ public:
+  using Decoder = MemoryTrackerSnapshot_ProcessSnapshot_MemoryNode_Decoder;
+  enum : int32_t {
+    kIdFieldNumber = 1,
+    kAbsoluteNameFieldNumber = 2,
+    kWeakFieldNumber = 3,
+    kSizeBytesFieldNumber = 4,
+    kEntriesFieldNumber = 5,
+  };
+  using MemoryNodeEntry = ::perfetto::protos::pbzero::MemoryTrackerSnapshot_ProcessSnapshot_MemoryNode_MemoryNodeEntry;
+
+  using FieldMetadata_Id =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MemoryTrackerSnapshot_ProcessSnapshot_MemoryNode>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Id kId() { return {}; }
+  void set_id(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Id::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_AbsoluteName =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      MemoryTrackerSnapshot_ProcessSnapshot_MemoryNode>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_AbsoluteName kAbsoluteName() { return {}; }
+  void set_absolute_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_AbsoluteName::kFieldId, data, size);
+  }
+  void set_absolute_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_AbsoluteName::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Weak =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kBool,
+      bool,
+      MemoryTrackerSnapshot_ProcessSnapshot_MemoryNode>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Weak kWeak() { return {}; }
+  void set_weak(bool value) {
+    static constexpr uint32_t field_id = FieldMetadata_Weak::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kBool>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_SizeBytes =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MemoryTrackerSnapshot_ProcessSnapshot_MemoryNode>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_SizeBytes kSizeBytes() { return {}; }
+  void set_size_bytes(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_SizeBytes::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Entries =
+    ::protozero::proto_utils::FieldMetadata<
+      5,
+      ::protozero::proto_utils::RepetitionType::kRepeatedNotPacked,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      MemoryTrackerSnapshot_ProcessSnapshot_MemoryNode_MemoryNodeEntry,
+      MemoryTrackerSnapshot_ProcessSnapshot_MemoryNode>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Entries kEntries() { return {}; }
+  template <typename T = MemoryTrackerSnapshot_ProcessSnapshot_MemoryNode_MemoryNodeEntry> T* add_entries() {
+    return BeginNestedMessage<T>(5);
+  }
+
+};
+
+class MemoryTrackerSnapshot_ProcessSnapshot_MemoryNode_MemoryNodeEntry_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/4, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  MemoryTrackerSnapshot_ProcessSnapshot_MemoryNode_MemoryNodeEntry_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit MemoryTrackerSnapshot_ProcessSnapshot_MemoryNode_MemoryNodeEntry_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit MemoryTrackerSnapshot_ProcessSnapshot_MemoryNode_MemoryNodeEntry_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_name() const { return at<1>().valid(); }
+  ::protozero::ConstChars name() const { return at<1>().as_string(); }
+  bool has_units() const { return at<2>().valid(); }
+  int32_t units() const { return at<2>().as_int32(); }
+  bool has_value_uint64() const { return at<3>().valid(); }
+  uint64_t value_uint64() const { return at<3>().as_uint64(); }
+  bool has_value_string() const { return at<4>().valid(); }
+  ::protozero::ConstChars value_string() const { return at<4>().as_string(); }
+};
+
+class MemoryTrackerSnapshot_ProcessSnapshot_MemoryNode_MemoryNodeEntry : public ::protozero::Message {
+ public:
+  using Decoder = MemoryTrackerSnapshot_ProcessSnapshot_MemoryNode_MemoryNodeEntry_Decoder;
+  enum : int32_t {
+    kNameFieldNumber = 1,
+    kUnitsFieldNumber = 2,
+    kValueUint64FieldNumber = 3,
+    kValueStringFieldNumber = 4,
+  };
+  using Units = ::perfetto::protos::pbzero::MemoryTrackerSnapshot_ProcessSnapshot_MemoryNode_MemoryNodeEntry_Units;
+  static const Units UNSPECIFIED = MemoryTrackerSnapshot_ProcessSnapshot_MemoryNode_MemoryNodeEntry_Units_UNSPECIFIED;
+  static const Units BYTES = MemoryTrackerSnapshot_ProcessSnapshot_MemoryNode_MemoryNodeEntry_Units_BYTES;
+  static const Units COUNT = MemoryTrackerSnapshot_ProcessSnapshot_MemoryNode_MemoryNodeEntry_Units_COUNT;
+
+  using FieldMetadata_Name =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      MemoryTrackerSnapshot_ProcessSnapshot_MemoryNode_MemoryNodeEntry>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Name kName() { return {}; }
+  void set_name(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Name::kFieldId, data, size);
+  }
+  void set_name(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Name::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Units =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kEnum,
+      ::perfetto::protos::pbzero::MemoryTrackerSnapshot_ProcessSnapshot_MemoryNode_MemoryNodeEntry_Units,
+      MemoryTrackerSnapshot_ProcessSnapshot_MemoryNode_MemoryNodeEntry>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Units kUnits() { return {}; }
+  void set_units(::perfetto::protos::pbzero::MemoryTrackerSnapshot_ProcessSnapshot_MemoryNode_MemoryNodeEntry_Units value) {
+    static constexpr uint32_t field_id = FieldMetadata_Units::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kEnum>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ValueUint64 =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint64,
+      uint64_t,
+      MemoryTrackerSnapshot_ProcessSnapshot_MemoryNode_MemoryNodeEntry>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ValueUint64 kValueUint64() { return {}; }
+  void set_value_uint64(uint64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_ValueUint64::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_ValueString =
+    ::protozero::proto_utils::FieldMetadata<
+      4,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      MemoryTrackerSnapshot_ProcessSnapshot_MemoryNode_MemoryNodeEntry>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_ValueString kValueString() { return {}; }
+  void set_value_string(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_ValueString::kFieldId, data, size);
+  }
+  void set_value_string(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_ValueString::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/ui_state.pbzero.h
+// Autogenerated by the ProtoZero compiler plugin. DO NOT EDIT.
+
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_UI_STATE_PROTO_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_UI_STATE_PROTO_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/field_writer.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/packed_repeated_fields.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_decoder.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace perfetto {
+namespace protos {
+namespace pbzero {
+
+class UiState_HighlightProcess;
+
+class UiState_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/3, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  UiState_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit UiState_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit UiState_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_timeline_start_ts() const { return at<1>().valid(); }
+  int64_t timeline_start_ts() const { return at<1>().as_int64(); }
+  bool has_timeline_end_ts() const { return at<2>().valid(); }
+  int64_t timeline_end_ts() const { return at<2>().as_int64(); }
+  bool has_highlight_process() const { return at<3>().valid(); }
+  ::protozero::ConstBytes highlight_process() const { return at<3>().as_bytes(); }
+};
+
+class UiState : public ::protozero::Message {
+ public:
+  using Decoder = UiState_Decoder;
+  enum : int32_t {
+    kTimelineStartTsFieldNumber = 1,
+    kTimelineEndTsFieldNumber = 2,
+    kHighlightProcessFieldNumber = 3,
+  };
+  using HighlightProcess = ::perfetto::protos::pbzero::UiState_HighlightProcess;
+
+  using FieldMetadata_TimelineStartTs =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      UiState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TimelineStartTs kTimelineStartTs() { return {}; }
+  void set_timeline_start_ts(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TimelineStartTs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_TimelineEndTs =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kInt64,
+      int64_t,
+      UiState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_TimelineEndTs kTimelineEndTs() { return {}; }
+  void set_timeline_end_ts(int64_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_TimelineEndTs::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kInt64>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_HighlightProcess =
+    ::protozero::proto_utils::FieldMetadata<
+      3,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kMessage,
+      UiState_HighlightProcess,
+      UiState>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_HighlightProcess kHighlightProcess() { return {}; }
+  template <typename T = UiState_HighlightProcess> T* set_highlight_process() {
+    return BeginNestedMessage<T>(3);
+  }
+
+};
+
+class UiState_HighlightProcess_Decoder : public ::protozero::TypedProtoDecoder</*MAX_FIELD_ID=*/2, /*HAS_NONPACKED_REPEATED_FIELDS=*/false> {
+ public:
+  UiState_HighlightProcess_Decoder(const uint8_t* data, size_t len) : TypedProtoDecoder(data, len) {}
+  explicit UiState_HighlightProcess_Decoder(const std::string& raw) : TypedProtoDecoder(reinterpret_cast<const uint8_t*>(raw.data()), raw.size()) {}
+  explicit UiState_HighlightProcess_Decoder(const ::protozero::ConstBytes& raw) : TypedProtoDecoder(raw.data, raw.size) {}
+  bool has_pid() const { return at<1>().valid(); }
+  uint32_t pid() const { return at<1>().as_uint32(); }
+  bool has_cmdline() const { return at<2>().valid(); }
+  ::protozero::ConstChars cmdline() const { return at<2>().as_string(); }
+};
+
+class UiState_HighlightProcess : public ::protozero::Message {
+ public:
+  using Decoder = UiState_HighlightProcess_Decoder;
+  enum : int32_t {
+    kPidFieldNumber = 1,
+    kCmdlineFieldNumber = 2,
+  };
+
+  using FieldMetadata_Pid =
+    ::protozero::proto_utils::FieldMetadata<
+      1,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kUint32,
+      uint32_t,
+      UiState_HighlightProcess>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Pid kPid() { return {}; }
+  void set_pid(uint32_t value) {
+    static constexpr uint32_t field_id = FieldMetadata_Pid::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kUint32>
+        ::Append(*this, field_id, value);
+  }
+
+  using FieldMetadata_Cmdline =
+    ::protozero::proto_utils::FieldMetadata<
+      2,
+      ::protozero::proto_utils::RepetitionType::kNotRepeated,
+      ::protozero::proto_utils::ProtoSchemaType::kString,
+      std::string,
+      UiState_HighlightProcess>;
+
+  // Ceci n'est pas une pipe.
+  // This is actually a variable of FieldMetadataHelper<FieldMetadata<...>>
+  // type (and users are expected to use it as such, hence kCamelCase name).
+  // It is declared as a function to keep protozero bindings header-only as
+  // inline constexpr variables are not available until C++17 (while inline
+  // functions are).
+  // TODO(altimin): Use inline variable instead after adopting C++17.  
+  static constexpr FieldMetadata_Cmdline kCmdline() { return {}; }
+  void set_cmdline(const char* data, size_t size) {
+    AppendBytes(FieldMetadata_Cmdline::kFieldId, data, size);
+  }
+  void set_cmdline(std::string value) {
+    static constexpr uint32_t field_id = FieldMetadata_Cmdline::kFieldId;
+    // Call the appropriate protozero::Message::Append(field_id, ...)
+    // method based on the type of the field.
+    ::protozero::internal::FieldWriter<
+      ::protozero::proto_utils::ProtoSchemaType::kString>
+        ::Append(*this, field_id, value);
+  }
+};
+
+} // Namespace.
+} // Namespace.
+} // Namespace.
+#endif  // Include guard.
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_application_state_info.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_APPLICATION_STATE_INFO_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_APPLICATION_STATE_INFO_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class ChromeApplicationStateInfo;
+enum ChromeApplicationStateInfo_ChromeApplicationState : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum ChromeApplicationStateInfo_ChromeApplicationState : int {
+  ChromeApplicationStateInfo_ChromeApplicationState_APPLICATION_STATE_UNKNOWN = 0,
+  ChromeApplicationStateInfo_ChromeApplicationState_APPLICATION_STATE_HAS_RUNNING_ACTIVITIES = 1,
+  ChromeApplicationStateInfo_ChromeApplicationState_APPLICATION_STATE_HAS_PAUSED_ACTIVITIES = 2,
+  ChromeApplicationStateInfo_ChromeApplicationState_APPLICATION_STATE_HAS_STOPPED_ACTIVITIES = 3,
+  ChromeApplicationStateInfo_ChromeApplicationState_APPLICATION_STATE_HAS_DESTROYED_ACTIVITIES = 4,
+};
+
+class PERFETTO_EXPORT ChromeApplicationStateInfo : public ::protozero::CppMessageObj {
+ public:
+  using ChromeApplicationState = ChromeApplicationStateInfo_ChromeApplicationState;
+  static constexpr auto APPLICATION_STATE_UNKNOWN = ChromeApplicationStateInfo_ChromeApplicationState_APPLICATION_STATE_UNKNOWN;
+  static constexpr auto APPLICATION_STATE_HAS_RUNNING_ACTIVITIES = ChromeApplicationStateInfo_ChromeApplicationState_APPLICATION_STATE_HAS_RUNNING_ACTIVITIES;
+  static constexpr auto APPLICATION_STATE_HAS_PAUSED_ACTIVITIES = ChromeApplicationStateInfo_ChromeApplicationState_APPLICATION_STATE_HAS_PAUSED_ACTIVITIES;
+  static constexpr auto APPLICATION_STATE_HAS_STOPPED_ACTIVITIES = ChromeApplicationStateInfo_ChromeApplicationState_APPLICATION_STATE_HAS_STOPPED_ACTIVITIES;
+  static constexpr auto APPLICATION_STATE_HAS_DESTROYED_ACTIVITIES = ChromeApplicationStateInfo_ChromeApplicationState_APPLICATION_STATE_HAS_DESTROYED_ACTIVITIES;
+  static constexpr auto ChromeApplicationState_MIN = ChromeApplicationStateInfo_ChromeApplicationState_APPLICATION_STATE_UNKNOWN;
+  static constexpr auto ChromeApplicationState_MAX = ChromeApplicationStateInfo_ChromeApplicationState_APPLICATION_STATE_HAS_DESTROYED_ACTIVITIES;
+  enum FieldNumbers {
+    kApplicationStateFieldNumber = 1,
+  };
+
+  ChromeApplicationStateInfo();
+  ~ChromeApplicationStateInfo() override;
+  ChromeApplicationStateInfo(ChromeApplicationStateInfo&&) noexcept;
+  ChromeApplicationStateInfo& operator=(ChromeApplicationStateInfo&&);
+  ChromeApplicationStateInfo(const ChromeApplicationStateInfo&);
+  ChromeApplicationStateInfo& operator=(const ChromeApplicationStateInfo&);
+  bool operator==(const ChromeApplicationStateInfo&) const;
+  bool operator!=(const ChromeApplicationStateInfo& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_application_state() const { return _has_field_[1]; }
+  ChromeApplicationStateInfo_ChromeApplicationState application_state() const { return application_state_; }
+  void set_application_state(ChromeApplicationStateInfo_ChromeApplicationState value) { application_state_ = value; _has_field_.set(1); }
+
+ private:
+  ChromeApplicationStateInfo_ChromeApplicationState application_state_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_APPLICATION_STATE_INFO_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_compositor_scheduler_state.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_COMPOSITOR_SCHEDULER_STATE_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_COMPOSITOR_SCHEDULER_STATE_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class CompositorTimingHistory;
+class BeginFrameSourceState;
+class BeginFrameArgs;
+class SourceLocation;
+class BeginFrameObserverState;
+class BeginImplFrameArgs;
+class BeginImplFrameArgs_TimestampsInUs;
+class ChromeCompositorStateMachine;
+class ChromeCompositorStateMachine_MinorState;
+class ChromeCompositorStateMachine_MajorState;
+class ChromeCompositorSchedulerState;
+enum ChromeCompositorSchedulerAction : int;
+enum BeginFrameArgs_BeginFrameArgsType : int;
+enum BeginImplFrameArgs_State : int;
+enum ChromeCompositorStateMachine_MinorState_TreePriority : int;
+enum ChromeCompositorStateMachine_MinorState_ScrollHandlerState : int;
+enum ChromeCompositorStateMachine_MajorState_BeginImplFrameState : int;
+enum ChromeCompositorStateMachine_MajorState_BeginMainFrameState : int;
+enum ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState : int;
+enum ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState : int;
+enum ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum ChromeCompositorSchedulerAction : int {
+  CC_SCHEDULER_ACTION_UNSPECIFIED = 0,
+  CC_SCHEDULER_ACTION_NONE = 1,
+  CC_SCHEDULER_ACTION_SEND_BEGIN_MAIN_FRAME = 2,
+  CC_SCHEDULER_ACTION_COMMIT = 3,
+  CC_SCHEDULER_ACTION_ACTIVATE_SYNC_TREE = 4,
+  CC_SCHEDULER_ACTION_DRAW_IF_POSSIBLE = 5,
+  CC_SCHEDULER_ACTION_DRAW_FORCED = 6,
+  CC_SCHEDULER_ACTION_DRAW_ABORT = 7,
+  CC_SCHEDULER_ACTION_BEGIN_LAYER_TREE_FRAME_SINK_CREATION = 8,
+  CC_SCHEDULER_ACTION_PREPARE_TILES = 9,
+  CC_SCHEDULER_ACTION_INVALIDATE_LAYER_TREE_FRAME_SINK = 10,
+  CC_SCHEDULER_ACTION_PERFORM_IMPL_SIDE_INVALIDATION = 11,
+  CC_SCHEDULER_ACTION_NOTIFY_BEGIN_MAIN_FRAME_NOT_EXPECTED_UNTIL = 12,
+  CC_SCHEDULER_ACTION_NOTIFY_BEGIN_MAIN_FRAME_NOT_EXPECTED_SOON = 13,
+};
+enum BeginFrameArgs_BeginFrameArgsType : int {
+  BeginFrameArgs_BeginFrameArgsType_BEGIN_FRAME_ARGS_TYPE_UNSPECIFIED = 0,
+  BeginFrameArgs_BeginFrameArgsType_BEGIN_FRAME_ARGS_TYPE_INVALID = 1,
+  BeginFrameArgs_BeginFrameArgsType_BEGIN_FRAME_ARGS_TYPE_NORMAL = 2,
+  BeginFrameArgs_BeginFrameArgsType_BEGIN_FRAME_ARGS_TYPE_MISSED = 3,
+};
+enum BeginImplFrameArgs_State : int {
+  BeginImplFrameArgs_State_BEGIN_FRAME_FINISHED = 0,
+  BeginImplFrameArgs_State_BEGIN_FRAME_USING = 1,
+};
+enum ChromeCompositorStateMachine_MinorState_TreePriority : int {
+  ChromeCompositorStateMachine_MinorState_TreePriority_TREE_PRIORITY_UNSPECIFIED = 0,
+  ChromeCompositorStateMachine_MinorState_TreePriority_TREE_PRIORITY_SAME_PRIORITY_FOR_BOTH_TREES = 1,
+  ChromeCompositorStateMachine_MinorState_TreePriority_TREE_PRIORITY_SMOOTHNESS_TAKES_PRIORITY = 2,
+  ChromeCompositorStateMachine_MinorState_TreePriority_TREE_PRIORITY_NEW_CONTENT_TAKES_PRIORITY = 3,
+};
+enum ChromeCompositorStateMachine_MinorState_ScrollHandlerState : int {
+  ChromeCompositorStateMachine_MinorState_ScrollHandlerState_SCROLL_HANDLER_UNSPECIFIED = 0,
+  ChromeCompositorStateMachine_MinorState_ScrollHandlerState_SCROLL_AFFECTS_SCROLL_HANDLER = 1,
+  ChromeCompositorStateMachine_MinorState_ScrollHandlerState_SCROLL_DOES_NOT_AFFECT_SCROLL_HANDLER = 2,
+};
+enum ChromeCompositorStateMachine_MajorState_BeginImplFrameState : int {
+  ChromeCompositorStateMachine_MajorState_BeginImplFrameState_BEGIN_IMPL_FRAME_UNSPECIFIED = 0,
+  ChromeCompositorStateMachine_MajorState_BeginImplFrameState_BEGIN_IMPL_FRAME_IDLE = 1,
+  ChromeCompositorStateMachine_MajorState_BeginImplFrameState_BEGIN_IMPL_FRAME_INSIDE_BEGIN_FRAME = 2,
+  ChromeCompositorStateMachine_MajorState_BeginImplFrameState_BEGIN_IMPL_FRAME_INSIDE_DEADLINE = 3,
+};
+enum ChromeCompositorStateMachine_MajorState_BeginMainFrameState : int {
+  ChromeCompositorStateMachine_MajorState_BeginMainFrameState_BEGIN_MAIN_FRAME_UNSPECIFIED = 0,
+  ChromeCompositorStateMachine_MajorState_BeginMainFrameState_BEGIN_MAIN_FRAME_IDLE = 1,
+  ChromeCompositorStateMachine_MajorState_BeginMainFrameState_BEGIN_MAIN_FRAME_SENT = 2,
+  ChromeCompositorStateMachine_MajorState_BeginMainFrameState_BEGIN_MAIN_FRAME_READY_TO_COMMIT = 3,
+};
+enum ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState : int {
+  ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_UNSPECIFIED = 0,
+  ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_NONE = 1,
+  ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_ACTIVE = 2,
+  ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_CREATING = 3,
+  ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_WAITING_FOR_FIRST_COMMIT = 4,
+  ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_WAITING_FOR_FIRST_ACTIVATION = 5,
+};
+enum ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState : int {
+  ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState_FORCED_REDRAW_UNSPECIFIED = 0,
+  ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState_FORCED_REDRAW_IDLE = 1,
+  ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState_FORCED_REDRAW_WAITING_FOR_COMMIT = 2,
+  ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState_FORCED_REDRAW_WAITING_FOR_ACTIVATION = 3,
+  ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState_FORCED_REDRAW_WAITING_FOR_DRAW = 4,
+};
+enum ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode : int {
+  ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_UNSPECIFIED = 0,
+  ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_NONE = 1,
+  ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_IMMEDIATE = 2,
+  ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_REGULAR = 3,
+  ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_LATE = 4,
+  ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_BLOCKED = 5,
+};
+
+class PERFETTO_EXPORT CompositorTimingHistory : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kBeginMainFrameQueueCriticalEstimateDeltaUsFieldNumber = 1,
+    kBeginMainFrameQueueNotCriticalEstimateDeltaUsFieldNumber = 2,
+    kBeginMainFrameStartToReadyToCommitEstimateDeltaUsFieldNumber = 3,
+    kCommitToReadyToActivateEstimateDeltaUsFieldNumber = 4,
+    kPrepareTilesEstimateDeltaUsFieldNumber = 5,
+    kActivateEstimateDeltaUsFieldNumber = 6,
+    kDrawEstimateDeltaUsFieldNumber = 7,
+  };
+
+  CompositorTimingHistory();
+  ~CompositorTimingHistory() override;
+  CompositorTimingHistory(CompositorTimingHistory&&) noexcept;
+  CompositorTimingHistory& operator=(CompositorTimingHistory&&);
+  CompositorTimingHistory(const CompositorTimingHistory&);
+  CompositorTimingHistory& operator=(const CompositorTimingHistory&);
+  bool operator==(const CompositorTimingHistory&) const;
+  bool operator!=(const CompositorTimingHistory& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_begin_main_frame_queue_critical_estimate_delta_us() const { return _has_field_[1]; }
+  int64_t begin_main_frame_queue_critical_estimate_delta_us() const { return begin_main_frame_queue_critical_estimate_delta_us_; }
+  void set_begin_main_frame_queue_critical_estimate_delta_us(int64_t value) { begin_main_frame_queue_critical_estimate_delta_us_ = value; _has_field_.set(1); }
+
+  bool has_begin_main_frame_queue_not_critical_estimate_delta_us() const { return _has_field_[2]; }
+  int64_t begin_main_frame_queue_not_critical_estimate_delta_us() const { return begin_main_frame_queue_not_critical_estimate_delta_us_; }
+  void set_begin_main_frame_queue_not_critical_estimate_delta_us(int64_t value) { begin_main_frame_queue_not_critical_estimate_delta_us_ = value; _has_field_.set(2); }
+
+  bool has_begin_main_frame_start_to_ready_to_commit_estimate_delta_us() const { return _has_field_[3]; }
+  int64_t begin_main_frame_start_to_ready_to_commit_estimate_delta_us() const { return begin_main_frame_start_to_ready_to_commit_estimate_delta_us_; }
+  void set_begin_main_frame_start_to_ready_to_commit_estimate_delta_us(int64_t value) { begin_main_frame_start_to_ready_to_commit_estimate_delta_us_ = value; _has_field_.set(3); }
+
+  bool has_commit_to_ready_to_activate_estimate_delta_us() const { return _has_field_[4]; }
+  int64_t commit_to_ready_to_activate_estimate_delta_us() const { return commit_to_ready_to_activate_estimate_delta_us_; }
+  void set_commit_to_ready_to_activate_estimate_delta_us(int64_t value) { commit_to_ready_to_activate_estimate_delta_us_ = value; _has_field_.set(4); }
+
+  bool has_prepare_tiles_estimate_delta_us() const { return _has_field_[5]; }
+  int64_t prepare_tiles_estimate_delta_us() const { return prepare_tiles_estimate_delta_us_; }
+  void set_prepare_tiles_estimate_delta_us(int64_t value) { prepare_tiles_estimate_delta_us_ = value; _has_field_.set(5); }
+
+  bool has_activate_estimate_delta_us() const { return _has_field_[6]; }
+  int64_t activate_estimate_delta_us() const { return activate_estimate_delta_us_; }
+  void set_activate_estimate_delta_us(int64_t value) { activate_estimate_delta_us_ = value; _has_field_.set(6); }
+
+  bool has_draw_estimate_delta_us() const { return _has_field_[7]; }
+  int64_t draw_estimate_delta_us() const { return draw_estimate_delta_us_; }
+  void set_draw_estimate_delta_us(int64_t value) { draw_estimate_delta_us_ = value; _has_field_.set(7); }
+
+ private:
+  int64_t begin_main_frame_queue_critical_estimate_delta_us_{};
+  int64_t begin_main_frame_queue_not_critical_estimate_delta_us_{};
+  int64_t begin_main_frame_start_to_ready_to_commit_estimate_delta_us_{};
+  int64_t commit_to_ready_to_activate_estimate_delta_us_{};
+  int64_t prepare_tiles_estimate_delta_us_{};
+  int64_t activate_estimate_delta_us_{};
+  int64_t draw_estimate_delta_us_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<8> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT BeginFrameSourceState : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kSourceIdFieldNumber = 1,
+    kPausedFieldNumber = 2,
+    kNumObserversFieldNumber = 3,
+    kLastBeginFrameArgsFieldNumber = 4,
+  };
+
+  BeginFrameSourceState();
+  ~BeginFrameSourceState() override;
+  BeginFrameSourceState(BeginFrameSourceState&&) noexcept;
+  BeginFrameSourceState& operator=(BeginFrameSourceState&&);
+  BeginFrameSourceState(const BeginFrameSourceState&);
+  BeginFrameSourceState& operator=(const BeginFrameSourceState&);
+  bool operator==(const BeginFrameSourceState&) const;
+  bool operator!=(const BeginFrameSourceState& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_source_id() const { return _has_field_[1]; }
+  uint32_t source_id() const { return source_id_; }
+  void set_source_id(uint32_t value) { source_id_ = value; _has_field_.set(1); }
+
+  bool has_paused() const { return _has_field_[2]; }
+  bool paused() const { return paused_; }
+  void set_paused(bool value) { paused_ = value; _has_field_.set(2); }
+
+  bool has_num_observers() const { return _has_field_[3]; }
+  uint32_t num_observers() const { return num_observers_; }
+  void set_num_observers(uint32_t value) { num_observers_ = value; _has_field_.set(3); }
+
+  bool has_last_begin_frame_args() const { return _has_field_[4]; }
+  const BeginFrameArgs& last_begin_frame_args() const { return *last_begin_frame_args_; }
+  BeginFrameArgs* mutable_last_begin_frame_args() { _has_field_.set(4); return last_begin_frame_args_.get(); }
+
+ private:
+  uint32_t source_id_{};
+  bool paused_{};
+  uint32_t num_observers_{};
+  ::protozero::CopyablePtr<BeginFrameArgs> last_begin_frame_args_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<5> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT BeginFrameArgs : public ::protozero::CppMessageObj {
+ public:
+  using BeginFrameArgsType = BeginFrameArgs_BeginFrameArgsType;
+  static constexpr auto BEGIN_FRAME_ARGS_TYPE_UNSPECIFIED = BeginFrameArgs_BeginFrameArgsType_BEGIN_FRAME_ARGS_TYPE_UNSPECIFIED;
+  static constexpr auto BEGIN_FRAME_ARGS_TYPE_INVALID = BeginFrameArgs_BeginFrameArgsType_BEGIN_FRAME_ARGS_TYPE_INVALID;
+  static constexpr auto BEGIN_FRAME_ARGS_TYPE_NORMAL = BeginFrameArgs_BeginFrameArgsType_BEGIN_FRAME_ARGS_TYPE_NORMAL;
+  static constexpr auto BEGIN_FRAME_ARGS_TYPE_MISSED = BeginFrameArgs_BeginFrameArgsType_BEGIN_FRAME_ARGS_TYPE_MISSED;
+  static constexpr auto BeginFrameArgsType_MIN = BeginFrameArgs_BeginFrameArgsType_BEGIN_FRAME_ARGS_TYPE_UNSPECIFIED;
+  static constexpr auto BeginFrameArgsType_MAX = BeginFrameArgs_BeginFrameArgsType_BEGIN_FRAME_ARGS_TYPE_MISSED;
+  enum FieldNumbers {
+    kTypeFieldNumber = 1,
+    kSourceIdFieldNumber = 2,
+    kSequenceNumberFieldNumber = 3,
+    kFrameTimeUsFieldNumber = 4,
+    kDeadlineUsFieldNumber = 5,
+    kIntervalDeltaUsFieldNumber = 6,
+    kOnCriticalPathFieldNumber = 7,
+    kAnimateOnlyFieldNumber = 8,
+    kSourceLocationIidFieldNumber = 9,
+    kSourceLocationFieldNumber = 10,
+  };
+
+  BeginFrameArgs();
+  ~BeginFrameArgs() override;
+  BeginFrameArgs(BeginFrameArgs&&) noexcept;
+  BeginFrameArgs& operator=(BeginFrameArgs&&);
+  BeginFrameArgs(const BeginFrameArgs&);
+  BeginFrameArgs& operator=(const BeginFrameArgs&);
+  bool operator==(const BeginFrameArgs&) const;
+  bool operator!=(const BeginFrameArgs& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_type() const { return _has_field_[1]; }
+  BeginFrameArgs_BeginFrameArgsType type() const { return type_; }
+  void set_type(BeginFrameArgs_BeginFrameArgsType value) { type_ = value; _has_field_.set(1); }
+
+  bool has_source_id() const { return _has_field_[2]; }
+  uint64_t source_id() const { return source_id_; }
+  void set_source_id(uint64_t value) { source_id_ = value; _has_field_.set(2); }
+
+  bool has_sequence_number() const { return _has_field_[3]; }
+  uint64_t sequence_number() const { return sequence_number_; }
+  void set_sequence_number(uint64_t value) { sequence_number_ = value; _has_field_.set(3); }
+
+  bool has_frame_time_us() const { return _has_field_[4]; }
+  int64_t frame_time_us() const { return frame_time_us_; }
+  void set_frame_time_us(int64_t value) { frame_time_us_ = value; _has_field_.set(4); }
+
+  bool has_deadline_us() const { return _has_field_[5]; }
+  int64_t deadline_us() const { return deadline_us_; }
+  void set_deadline_us(int64_t value) { deadline_us_ = value; _has_field_.set(5); }
+
+  bool has_interval_delta_us() const { return _has_field_[6]; }
+  int64_t interval_delta_us() const { return interval_delta_us_; }
+  void set_interval_delta_us(int64_t value) { interval_delta_us_ = value; _has_field_.set(6); }
+
+  bool has_on_critical_path() const { return _has_field_[7]; }
+  bool on_critical_path() const { return on_critical_path_; }
+  void set_on_critical_path(bool value) { on_critical_path_ = value; _has_field_.set(7); }
+
+  bool has_animate_only() const { return _has_field_[8]; }
+  bool animate_only() const { return animate_only_; }
+  void set_animate_only(bool value) { animate_only_ = value; _has_field_.set(8); }
+
+  bool has_source_location_iid() const { return _has_field_[9]; }
+  uint64_t source_location_iid() const { return source_location_iid_; }
+  void set_source_location_iid(uint64_t value) { source_location_iid_ = value; _has_field_.set(9); }
+
+  bool has_source_location() const { return _has_field_[10]; }
+  const SourceLocation& source_location() const { return *source_location_; }
+  SourceLocation* mutable_source_location() { _has_field_.set(10); return source_location_.get(); }
+
+ private:
+  BeginFrameArgs_BeginFrameArgsType type_{};
+  uint64_t source_id_{};
+  uint64_t sequence_number_{};
+  int64_t frame_time_us_{};
+  int64_t deadline_us_{};
+  int64_t interval_delta_us_{};
+  bool on_critical_path_{};
+  bool animate_only_{};
+  uint64_t source_location_iid_{};
+  ::protozero::CopyablePtr<SourceLocation> source_location_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<11> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT BeginFrameObserverState : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kDroppedBeginFrameArgsFieldNumber = 1,
+    kLastBeginFrameArgsFieldNumber = 2,
+  };
+
+  BeginFrameObserverState();
+  ~BeginFrameObserverState() override;
+  BeginFrameObserverState(BeginFrameObserverState&&) noexcept;
+  BeginFrameObserverState& operator=(BeginFrameObserverState&&);
+  BeginFrameObserverState(const BeginFrameObserverState&);
+  BeginFrameObserverState& operator=(const BeginFrameObserverState&);
+  bool operator==(const BeginFrameObserverState&) const;
+  bool operator!=(const BeginFrameObserverState& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_dropped_begin_frame_args() const { return _has_field_[1]; }
+  int64_t dropped_begin_frame_args() const { return dropped_begin_frame_args_; }
+  void set_dropped_begin_frame_args(int64_t value) { dropped_begin_frame_args_ = value; _has_field_.set(1); }
+
+  bool has_last_begin_frame_args() const { return _has_field_[2]; }
+  const BeginFrameArgs& last_begin_frame_args() const { return *last_begin_frame_args_; }
+  BeginFrameArgs* mutable_last_begin_frame_args() { _has_field_.set(2); return last_begin_frame_args_.get(); }
+
+ private:
+  int64_t dropped_begin_frame_args_{};
+  ::protozero::CopyablePtr<BeginFrameArgs> last_begin_frame_args_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT BeginImplFrameArgs : public ::protozero::CppMessageObj {
+ public:
+  using TimestampsInUs = BeginImplFrameArgs_TimestampsInUs;
+  using State = BeginImplFrameArgs_State;
+  static constexpr auto BEGIN_FRAME_FINISHED = BeginImplFrameArgs_State_BEGIN_FRAME_FINISHED;
+  static constexpr auto BEGIN_FRAME_USING = BeginImplFrameArgs_State_BEGIN_FRAME_USING;
+  static constexpr auto State_MIN = BeginImplFrameArgs_State_BEGIN_FRAME_FINISHED;
+  static constexpr auto State_MAX = BeginImplFrameArgs_State_BEGIN_FRAME_USING;
+  enum FieldNumbers {
+    kUpdatedAtUsFieldNumber = 1,
+    kFinishedAtUsFieldNumber = 2,
+    kStateFieldNumber = 3,
+    kCurrentArgsFieldNumber = 4,
+    kLastArgsFieldNumber = 5,
+    kTimestampsInUsFieldNumber = 6,
+  };
+
+  BeginImplFrameArgs();
+  ~BeginImplFrameArgs() override;
+  BeginImplFrameArgs(BeginImplFrameArgs&&) noexcept;
+  BeginImplFrameArgs& operator=(BeginImplFrameArgs&&);
+  BeginImplFrameArgs(const BeginImplFrameArgs&);
+  BeginImplFrameArgs& operator=(const BeginImplFrameArgs&);
+  bool operator==(const BeginImplFrameArgs&) const;
+  bool operator!=(const BeginImplFrameArgs& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_updated_at_us() const { return _has_field_[1]; }
+  int64_t updated_at_us() const { return updated_at_us_; }
+  void set_updated_at_us(int64_t value) { updated_at_us_ = value; _has_field_.set(1); }
+
+  bool has_finished_at_us() const { return _has_field_[2]; }
+  int64_t finished_at_us() const { return finished_at_us_; }
+  void set_finished_at_us(int64_t value) { finished_at_us_ = value; _has_field_.set(2); }
+
+  bool has_state() const { return _has_field_[3]; }
+  BeginImplFrameArgs_State state() const { return state_; }
+  void set_state(BeginImplFrameArgs_State value) { state_ = value; _has_field_.set(3); }
+
+  bool has_current_args() const { return _has_field_[4]; }
+  const BeginFrameArgs& current_args() const { return *current_args_; }
+  BeginFrameArgs* mutable_current_args() { _has_field_.set(4); return current_args_.get(); }
+
+  bool has_last_args() const { return _has_field_[5]; }
+  const BeginFrameArgs& last_args() const { return *last_args_; }
+  BeginFrameArgs* mutable_last_args() { _has_field_.set(5); return last_args_.get(); }
+
+  bool has_timestamps_in_us() const { return _has_field_[6]; }
+  const BeginImplFrameArgs_TimestampsInUs& timestamps_in_us() const { return *timestamps_in_us_; }
+  BeginImplFrameArgs_TimestampsInUs* mutable_timestamps_in_us() { _has_field_.set(6); return timestamps_in_us_.get(); }
+
+ private:
+  int64_t updated_at_us_{};
+  int64_t finished_at_us_{};
+  BeginImplFrameArgs_State state_{};
+  ::protozero::CopyablePtr<BeginFrameArgs> current_args_;
+  ::protozero::CopyablePtr<BeginFrameArgs> last_args_;
+  ::protozero::CopyablePtr<BeginImplFrameArgs_TimestampsInUs> timestamps_in_us_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<7> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT BeginImplFrameArgs_TimestampsInUs : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kIntervalDeltaFieldNumber = 1,
+    kNowToDeadlineDeltaFieldNumber = 2,
+    kFrameTimeToNowDeltaFieldNumber = 3,
+    kFrameTimeToDeadlineDeltaFieldNumber = 4,
+    kNowFieldNumber = 5,
+    kFrameTimeFieldNumber = 6,
+    kDeadlineFieldNumber = 7,
+  };
+
+  BeginImplFrameArgs_TimestampsInUs();
+  ~BeginImplFrameArgs_TimestampsInUs() override;
+  BeginImplFrameArgs_TimestampsInUs(BeginImplFrameArgs_TimestampsInUs&&) noexcept;
+  BeginImplFrameArgs_TimestampsInUs& operator=(BeginImplFrameArgs_TimestampsInUs&&);
+  BeginImplFrameArgs_TimestampsInUs(const BeginImplFrameArgs_TimestampsInUs&);
+  BeginImplFrameArgs_TimestampsInUs& operator=(const BeginImplFrameArgs_TimestampsInUs&);
+  bool operator==(const BeginImplFrameArgs_TimestampsInUs&) const;
+  bool operator!=(const BeginImplFrameArgs_TimestampsInUs& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_interval_delta() const { return _has_field_[1]; }
+  int64_t interval_delta() const { return interval_delta_; }
+  void set_interval_delta(int64_t value) { interval_delta_ = value; _has_field_.set(1); }
+
+  bool has_now_to_deadline_delta() const { return _has_field_[2]; }
+  int64_t now_to_deadline_delta() const { return now_to_deadline_delta_; }
+  void set_now_to_deadline_delta(int64_t value) { now_to_deadline_delta_ = value; _has_field_.set(2); }
+
+  bool has_frame_time_to_now_delta() const { return _has_field_[3]; }
+  int64_t frame_time_to_now_delta() const { return frame_time_to_now_delta_; }
+  void set_frame_time_to_now_delta(int64_t value) { frame_time_to_now_delta_ = value; _has_field_.set(3); }
+
+  bool has_frame_time_to_deadline_delta() const { return _has_field_[4]; }
+  int64_t frame_time_to_deadline_delta() const { return frame_time_to_deadline_delta_; }
+  void set_frame_time_to_deadline_delta(int64_t value) { frame_time_to_deadline_delta_ = value; _has_field_.set(4); }
+
+  bool has_now() const { return _has_field_[5]; }
+  int64_t now() const { return now_; }
+  void set_now(int64_t value) { now_ = value; _has_field_.set(5); }
+
+  bool has_frame_time() const { return _has_field_[6]; }
+  int64_t frame_time() const { return frame_time_; }
+  void set_frame_time(int64_t value) { frame_time_ = value; _has_field_.set(6); }
+
+  bool has_deadline() const { return _has_field_[7]; }
+  int64_t deadline() const { return deadline_; }
+  void set_deadline(int64_t value) { deadline_ = value; _has_field_.set(7); }
+
+ private:
+  int64_t interval_delta_{};
+  int64_t now_to_deadline_delta_{};
+  int64_t frame_time_to_now_delta_{};
+  int64_t frame_time_to_deadline_delta_{};
+  int64_t now_{};
+  int64_t frame_time_{};
+  int64_t deadline_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<8> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT ChromeCompositorStateMachine : public ::protozero::CppMessageObj {
+ public:
+  using MajorState = ChromeCompositorStateMachine_MajorState;
+  using MinorState = ChromeCompositorStateMachine_MinorState;
+  enum FieldNumbers {
+    kMajorStateFieldNumber = 1,
+    kMinorStateFieldNumber = 2,
+  };
+
+  ChromeCompositorStateMachine();
+  ~ChromeCompositorStateMachine() override;
+  ChromeCompositorStateMachine(ChromeCompositorStateMachine&&) noexcept;
+  ChromeCompositorStateMachine& operator=(ChromeCompositorStateMachine&&);
+  ChromeCompositorStateMachine(const ChromeCompositorStateMachine&);
+  ChromeCompositorStateMachine& operator=(const ChromeCompositorStateMachine&);
+  bool operator==(const ChromeCompositorStateMachine&) const;
+  bool operator!=(const ChromeCompositorStateMachine& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_major_state() const { return _has_field_[1]; }
+  const ChromeCompositorStateMachine_MajorState& major_state() const { return *major_state_; }
+  ChromeCompositorStateMachine_MajorState* mutable_major_state() { _has_field_.set(1); return major_state_.get(); }
+
+  bool has_minor_state() const { return _has_field_[2]; }
+  const ChromeCompositorStateMachine_MinorState& minor_state() const { return *minor_state_; }
+  ChromeCompositorStateMachine_MinorState* mutable_minor_state() { _has_field_.set(2); return minor_state_.get(); }
+
+ private:
+  ::protozero::CopyablePtr<ChromeCompositorStateMachine_MajorState> major_state_;
+  ::protozero::CopyablePtr<ChromeCompositorStateMachine_MinorState> minor_state_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT ChromeCompositorStateMachine_MinorState : public ::protozero::CppMessageObj {
+ public:
+  using TreePriority = ChromeCompositorStateMachine_MinorState_TreePriority;
+  static constexpr auto TREE_PRIORITY_UNSPECIFIED = ChromeCompositorStateMachine_MinorState_TreePriority_TREE_PRIORITY_UNSPECIFIED;
+  static constexpr auto TREE_PRIORITY_SAME_PRIORITY_FOR_BOTH_TREES = ChromeCompositorStateMachine_MinorState_TreePriority_TREE_PRIORITY_SAME_PRIORITY_FOR_BOTH_TREES;
+  static constexpr auto TREE_PRIORITY_SMOOTHNESS_TAKES_PRIORITY = ChromeCompositorStateMachine_MinorState_TreePriority_TREE_PRIORITY_SMOOTHNESS_TAKES_PRIORITY;
+  static constexpr auto TREE_PRIORITY_NEW_CONTENT_TAKES_PRIORITY = ChromeCompositorStateMachine_MinorState_TreePriority_TREE_PRIORITY_NEW_CONTENT_TAKES_PRIORITY;
+  static constexpr auto TreePriority_MIN = ChromeCompositorStateMachine_MinorState_TreePriority_TREE_PRIORITY_UNSPECIFIED;
+  static constexpr auto TreePriority_MAX = ChromeCompositorStateMachine_MinorState_TreePriority_TREE_PRIORITY_NEW_CONTENT_TAKES_PRIORITY;
+  using ScrollHandlerState = ChromeCompositorStateMachine_MinorState_ScrollHandlerState;
+  static constexpr auto SCROLL_HANDLER_UNSPECIFIED = ChromeCompositorStateMachine_MinorState_ScrollHandlerState_SCROLL_HANDLER_UNSPECIFIED;
+  static constexpr auto SCROLL_AFFECTS_SCROLL_HANDLER = ChromeCompositorStateMachine_MinorState_ScrollHandlerState_SCROLL_AFFECTS_SCROLL_HANDLER;
+  static constexpr auto SCROLL_DOES_NOT_AFFECT_SCROLL_HANDLER = ChromeCompositorStateMachine_MinorState_ScrollHandlerState_SCROLL_DOES_NOT_AFFECT_SCROLL_HANDLER;
+  static constexpr auto ScrollHandlerState_MIN = ChromeCompositorStateMachine_MinorState_ScrollHandlerState_SCROLL_HANDLER_UNSPECIFIED;
+  static constexpr auto ScrollHandlerState_MAX = ChromeCompositorStateMachine_MinorState_ScrollHandlerState_SCROLL_DOES_NOT_AFFECT_SCROLL_HANDLER;
+  enum FieldNumbers {
+    kCommitCountFieldNumber = 1,
+    kCurrentFrameNumberFieldNumber = 2,
+    kLastFrameNumberSubmitPerformedFieldNumber = 3,
+    kLastFrameNumberDrawPerformedFieldNumber = 4,
+    kLastFrameNumberBeginMainFrameSentFieldNumber = 5,
+    kDidDrawFieldNumber = 6,
+    kDidSendBeginMainFrameForCurrentFrameFieldNumber = 7,
+    kDidNotifyBeginMainFrameNotExpectedUntilFieldNumber = 8,
+    kDidNotifyBeginMainFrameNotExpectedSoonFieldNumber = 9,
+    kWantsBeginMainFrameNotExpectedFieldNumber = 10,
+    kDidCommitDuringFrameFieldNumber = 11,
+    kDidInvalidateLayerTreeFrameSinkFieldNumber = 12,
+    kDidPerformImplSideInvalidaionFieldNumber = 13,
+    kDidPrepareTilesFieldNumber = 14,
+    kConsecutiveCheckerboardAnimationsFieldNumber = 15,
+    kPendingSubmitFramesFieldNumber = 16,
+    kSubmitFramesWithCurrentLayerTreeFrameSinkFieldNumber = 17,
+    kNeedsRedrawFieldNumber = 18,
+    kNeedsPrepareTilesFieldNumber = 19,
+    kNeedsBeginMainFrameFieldNumber = 20,
+    kNeedsOneBeginImplFrameFieldNumber = 21,
+    kVisibleFieldNumber = 22,
+    kBeginFrameSourcePausedFieldNumber = 23,
+    kCanDrawFieldNumber = 24,
+    kResourcelessDrawFieldNumber = 25,
+    kHasPendingTreeFieldNumber = 26,
+    kPendingTreeIsReadyForActivationFieldNumber = 27,
+    kActiveTreeNeedsFirstDrawFieldNumber = 28,
+    kActiveTreeIsReadyToDrawFieldNumber = 29,
+    kDidCreateAndInitializeFirstLayerTreeFrameSinkFieldNumber = 30,
+    kTreePriorityFieldNumber = 31,
+    kScrollHandlerStateFieldNumber = 32,
+    kCriticalBeginMainFrameToActivateIsFastFieldNumber = 33,
+    kMainThreadMissedLastDeadlineFieldNumber = 34,
+    kSkipNextBeginMainFrameToReduceLatencyFieldNumber = 35,
+    kVideoNeedsBeginFramesFieldNumber = 36,
+    kDeferBeginMainFrameFieldNumber = 37,
+    kLastCommitHadNoUpdatesFieldNumber = 38,
+    kDidDrawInLastFrameFieldNumber = 39,
+    kDidSubmitInLastFrameFieldNumber = 40,
+    kNeedsImplSideInvalidationFieldNumber = 41,
+    kCurrentPendingTreeIsImplSideFieldNumber = 42,
+    kPreviousPendingTreeWasImplSideFieldNumber = 43,
+    kProcessingAnimationWorkletsForActiveTreeFieldNumber = 44,
+    kProcessingAnimationWorkletsForPendingTreeFieldNumber = 45,
+    kProcessingPaintWorkletsForPendingTreeFieldNumber = 46,
+  };
+
+  ChromeCompositorStateMachine_MinorState();
+  ~ChromeCompositorStateMachine_MinorState() override;
+  ChromeCompositorStateMachine_MinorState(ChromeCompositorStateMachine_MinorState&&) noexcept;
+  ChromeCompositorStateMachine_MinorState& operator=(ChromeCompositorStateMachine_MinorState&&);
+  ChromeCompositorStateMachine_MinorState(const ChromeCompositorStateMachine_MinorState&);
+  ChromeCompositorStateMachine_MinorState& operator=(const ChromeCompositorStateMachine_MinorState&);
+  bool operator==(const ChromeCompositorStateMachine_MinorState&) const;
+  bool operator!=(const ChromeCompositorStateMachine_MinorState& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_commit_count() const { return _has_field_[1]; }
+  int32_t commit_count() const { return commit_count_; }
+  void set_commit_count(int32_t value) { commit_count_ = value; _has_field_.set(1); }
+
+  bool has_current_frame_number() const { return _has_field_[2]; }
+  int32_t current_frame_number() const { return current_frame_number_; }
+  void set_current_frame_number(int32_t value) { current_frame_number_ = value; _has_field_.set(2); }
+
+  bool has_last_frame_number_submit_performed() const { return _has_field_[3]; }
+  int32_t last_frame_number_submit_performed() const { return last_frame_number_submit_performed_; }
+  void set_last_frame_number_submit_performed(int32_t value) { last_frame_number_submit_performed_ = value; _has_field_.set(3); }
+
+  bool has_last_frame_number_draw_performed() const { return _has_field_[4]; }
+  int32_t last_frame_number_draw_performed() const { return last_frame_number_draw_performed_; }
+  void set_last_frame_number_draw_performed(int32_t value) { last_frame_number_draw_performed_ = value; _has_field_.set(4); }
+
+  bool has_last_frame_number_begin_main_frame_sent() const { return _has_field_[5]; }
+  int32_t last_frame_number_begin_main_frame_sent() const { return last_frame_number_begin_main_frame_sent_; }
+  void set_last_frame_number_begin_main_frame_sent(int32_t value) { last_frame_number_begin_main_frame_sent_ = value; _has_field_.set(5); }
+
+  bool has_did_draw() const { return _has_field_[6]; }
+  bool did_draw() const { return did_draw_; }
+  void set_did_draw(bool value) { did_draw_ = value; _has_field_.set(6); }
+
+  bool has_did_send_begin_main_frame_for_current_frame() const { return _has_field_[7]; }
+  bool did_send_begin_main_frame_for_current_frame() const { return did_send_begin_main_frame_for_current_frame_; }
+  void set_did_send_begin_main_frame_for_current_frame(bool value) { did_send_begin_main_frame_for_current_frame_ = value; _has_field_.set(7); }
+
+  bool has_did_notify_begin_main_frame_not_expected_until() const { return _has_field_[8]; }
+  bool did_notify_begin_main_frame_not_expected_until() const { return did_notify_begin_main_frame_not_expected_until_; }
+  void set_did_notify_begin_main_frame_not_expected_until(bool value) { did_notify_begin_main_frame_not_expected_until_ = value; _has_field_.set(8); }
+
+  bool has_did_notify_begin_main_frame_not_expected_soon() const { return _has_field_[9]; }
+  bool did_notify_begin_main_frame_not_expected_soon() const { return did_notify_begin_main_frame_not_expected_soon_; }
+  void set_did_notify_begin_main_frame_not_expected_soon(bool value) { did_notify_begin_main_frame_not_expected_soon_ = value; _has_field_.set(9); }
+
+  bool has_wants_begin_main_frame_not_expected() const { return _has_field_[10]; }
+  bool wants_begin_main_frame_not_expected() const { return wants_begin_main_frame_not_expected_; }
+  void set_wants_begin_main_frame_not_expected(bool value) { wants_begin_main_frame_not_expected_ = value; _has_field_.set(10); }
+
+  bool has_did_commit_during_frame() const { return _has_field_[11]; }
+  bool did_commit_during_frame() const { return did_commit_during_frame_; }
+  void set_did_commit_during_frame(bool value) { did_commit_during_frame_ = value; _has_field_.set(11); }
+
+  bool has_did_invalidate_layer_tree_frame_sink() const { return _has_field_[12]; }
+  bool did_invalidate_layer_tree_frame_sink() const { return did_invalidate_layer_tree_frame_sink_; }
+  void set_did_invalidate_layer_tree_frame_sink(bool value) { did_invalidate_layer_tree_frame_sink_ = value; _has_field_.set(12); }
+
+  bool has_did_perform_impl_side_invalidaion() const { return _has_field_[13]; }
+  bool did_perform_impl_side_invalidaion() const { return did_perform_impl_side_invalidaion_; }
+  void set_did_perform_impl_side_invalidaion(bool value) { did_perform_impl_side_invalidaion_ = value; _has_field_.set(13); }
+
+  bool has_did_prepare_tiles() const { return _has_field_[14]; }
+  bool did_prepare_tiles() const { return did_prepare_tiles_; }
+  void set_did_prepare_tiles(bool value) { did_prepare_tiles_ = value; _has_field_.set(14); }
+
+  bool has_consecutive_checkerboard_animations() const { return _has_field_[15]; }
+  int32_t consecutive_checkerboard_animations() const { return consecutive_checkerboard_animations_; }
+  void set_consecutive_checkerboard_animations(int32_t value) { consecutive_checkerboard_animations_ = value; _has_field_.set(15); }
+
+  bool has_pending_submit_frames() const { return _has_field_[16]; }
+  int32_t pending_submit_frames() const { return pending_submit_frames_; }
+  void set_pending_submit_frames(int32_t value) { pending_submit_frames_ = value; _has_field_.set(16); }
+
+  bool has_submit_frames_with_current_layer_tree_frame_sink() const { return _has_field_[17]; }
+  int32_t submit_frames_with_current_layer_tree_frame_sink() const { return submit_frames_with_current_layer_tree_frame_sink_; }
+  void set_submit_frames_with_current_layer_tree_frame_sink(int32_t value) { submit_frames_with_current_layer_tree_frame_sink_ = value; _has_field_.set(17); }
+
+  bool has_needs_redraw() const { return _has_field_[18]; }
+  bool needs_redraw() const { return needs_redraw_; }
+  void set_needs_redraw(bool value) { needs_redraw_ = value; _has_field_.set(18); }
+
+  bool has_needs_prepare_tiles() const { return _has_field_[19]; }
+  bool needs_prepare_tiles() const { return needs_prepare_tiles_; }
+  void set_needs_prepare_tiles(bool value) { needs_prepare_tiles_ = value; _has_field_.set(19); }
+
+  bool has_needs_begin_main_frame() const { return _has_field_[20]; }
+  bool needs_begin_main_frame() const { return needs_begin_main_frame_; }
+  void set_needs_begin_main_frame(bool value) { needs_begin_main_frame_ = value; _has_field_.set(20); }
+
+  bool has_needs_one_begin_impl_frame() const { return _has_field_[21]; }
+  bool needs_one_begin_impl_frame() const { return needs_one_begin_impl_frame_; }
+  void set_needs_one_begin_impl_frame(bool value) { needs_one_begin_impl_frame_ = value; _has_field_.set(21); }
+
+  bool has_visible() const { return _has_field_[22]; }
+  bool visible() const { return visible_; }
+  void set_visible(bool value) { visible_ = value; _has_field_.set(22); }
+
+  bool has_begin_frame_source_paused() const { return _has_field_[23]; }
+  bool begin_frame_source_paused() const { return begin_frame_source_paused_; }
+  void set_begin_frame_source_paused(bool value) { begin_frame_source_paused_ = value; _has_field_.set(23); }
+
+  bool has_can_draw() const { return _has_field_[24]; }
+  bool can_draw() const { return can_draw_; }
+  void set_can_draw(bool value) { can_draw_ = value; _has_field_.set(24); }
+
+  bool has_resourceless_draw() const { return _has_field_[25]; }
+  bool resourceless_draw() const { return resourceless_draw_; }
+  void set_resourceless_draw(bool value) { resourceless_draw_ = value; _has_field_.set(25); }
+
+  bool has_has_pending_tree() const { return _has_field_[26]; }
+  bool has_pending_tree() const { return has_pending_tree_; }
+  void set_has_pending_tree(bool value) { has_pending_tree_ = value; _has_field_.set(26); }
+
+  bool has_pending_tree_is_ready_for_activation() const { return _has_field_[27]; }
+  bool pending_tree_is_ready_for_activation() const { return pending_tree_is_ready_for_activation_; }
+  void set_pending_tree_is_ready_for_activation(bool value) { pending_tree_is_ready_for_activation_ = value; _has_field_.set(27); }
+
+  bool has_active_tree_needs_first_draw() const { return _has_field_[28]; }
+  bool active_tree_needs_first_draw() const { return active_tree_needs_first_draw_; }
+  void set_active_tree_needs_first_draw(bool value) { active_tree_needs_first_draw_ = value; _has_field_.set(28); }
+
+  bool has_active_tree_is_ready_to_draw() const { return _has_field_[29]; }
+  bool active_tree_is_ready_to_draw() const { return active_tree_is_ready_to_draw_; }
+  void set_active_tree_is_ready_to_draw(bool value) { active_tree_is_ready_to_draw_ = value; _has_field_.set(29); }
+
+  bool has_did_create_and_initialize_first_layer_tree_frame_sink() const { return _has_field_[30]; }
+  bool did_create_and_initialize_first_layer_tree_frame_sink() const { return did_create_and_initialize_first_layer_tree_frame_sink_; }
+  void set_did_create_and_initialize_first_layer_tree_frame_sink(bool value) { did_create_and_initialize_first_layer_tree_frame_sink_ = value; _has_field_.set(30); }
+
+  bool has_tree_priority() const { return _has_field_[31]; }
+  ChromeCompositorStateMachine_MinorState_TreePriority tree_priority() const { return tree_priority_; }
+  void set_tree_priority(ChromeCompositorStateMachine_MinorState_TreePriority value) { tree_priority_ = value; _has_field_.set(31); }
+
+  bool has_scroll_handler_state() const { return _has_field_[32]; }
+  ChromeCompositorStateMachine_MinorState_ScrollHandlerState scroll_handler_state() const { return scroll_handler_state_; }
+  void set_scroll_handler_state(ChromeCompositorStateMachine_MinorState_ScrollHandlerState value) { scroll_handler_state_ = value; _has_field_.set(32); }
+
+  bool has_critical_begin_main_frame_to_activate_is_fast() const { return _has_field_[33]; }
+  bool critical_begin_main_frame_to_activate_is_fast() const { return critical_begin_main_frame_to_activate_is_fast_; }
+  void set_critical_begin_main_frame_to_activate_is_fast(bool value) { critical_begin_main_frame_to_activate_is_fast_ = value; _has_field_.set(33); }
+
+  bool has_main_thread_missed_last_deadline() const { return _has_field_[34]; }
+  bool main_thread_missed_last_deadline() const { return main_thread_missed_last_deadline_; }
+  void set_main_thread_missed_last_deadline(bool value) { main_thread_missed_last_deadline_ = value; _has_field_.set(34); }
+
+  bool has_skip_next_begin_main_frame_to_reduce_latency() const { return _has_field_[35]; }
+  bool skip_next_begin_main_frame_to_reduce_latency() const { return skip_next_begin_main_frame_to_reduce_latency_; }
+  void set_skip_next_begin_main_frame_to_reduce_latency(bool value) { skip_next_begin_main_frame_to_reduce_latency_ = value; _has_field_.set(35); }
+
+  bool has_video_needs_begin_frames() const { return _has_field_[36]; }
+  bool video_needs_begin_frames() const { return video_needs_begin_frames_; }
+  void set_video_needs_begin_frames(bool value) { video_needs_begin_frames_ = value; _has_field_.set(36); }
+
+  bool has_defer_begin_main_frame() const { return _has_field_[37]; }
+  bool defer_begin_main_frame() const { return defer_begin_main_frame_; }
+  void set_defer_begin_main_frame(bool value) { defer_begin_main_frame_ = value; _has_field_.set(37); }
+
+  bool has_last_commit_had_no_updates() const { return _has_field_[38]; }
+  bool last_commit_had_no_updates() const { return last_commit_had_no_updates_; }
+  void set_last_commit_had_no_updates(bool value) { last_commit_had_no_updates_ = value; _has_field_.set(38); }
+
+  bool has_did_draw_in_last_frame() const { return _has_field_[39]; }
+  bool did_draw_in_last_frame() const { return did_draw_in_last_frame_; }
+  void set_did_draw_in_last_frame(bool value) { did_draw_in_last_frame_ = value; _has_field_.set(39); }
+
+  bool has_did_submit_in_last_frame() const { return _has_field_[40]; }
+  bool did_submit_in_last_frame() const { return did_submit_in_last_frame_; }
+  void set_did_submit_in_last_frame(bool value) { did_submit_in_last_frame_ = value; _has_field_.set(40); }
+
+  bool has_needs_impl_side_invalidation() const { return _has_field_[41]; }
+  bool needs_impl_side_invalidation() const { return needs_impl_side_invalidation_; }
+  void set_needs_impl_side_invalidation(bool value) { needs_impl_side_invalidation_ = value; _has_field_.set(41); }
+
+  bool has_current_pending_tree_is_impl_side() const { return _has_field_[42]; }
+  bool current_pending_tree_is_impl_side() const { return current_pending_tree_is_impl_side_; }
+  void set_current_pending_tree_is_impl_side(bool value) { current_pending_tree_is_impl_side_ = value; _has_field_.set(42); }
+
+  bool has_previous_pending_tree_was_impl_side() const { return _has_field_[43]; }
+  bool previous_pending_tree_was_impl_side() const { return previous_pending_tree_was_impl_side_; }
+  void set_previous_pending_tree_was_impl_side(bool value) { previous_pending_tree_was_impl_side_ = value; _has_field_.set(43); }
+
+  bool has_processing_animation_worklets_for_active_tree() const { return _has_field_[44]; }
+  bool processing_animation_worklets_for_active_tree() const { return processing_animation_worklets_for_active_tree_; }
+  void set_processing_animation_worklets_for_active_tree(bool value) { processing_animation_worklets_for_active_tree_ = value; _has_field_.set(44); }
+
+  bool has_processing_animation_worklets_for_pending_tree() const { return _has_field_[45]; }
+  bool processing_animation_worklets_for_pending_tree() const { return processing_animation_worklets_for_pending_tree_; }
+  void set_processing_animation_worklets_for_pending_tree(bool value) { processing_animation_worklets_for_pending_tree_ = value; _has_field_.set(45); }
+
+  bool has_processing_paint_worklets_for_pending_tree() const { return _has_field_[46]; }
+  bool processing_paint_worklets_for_pending_tree() const { return processing_paint_worklets_for_pending_tree_; }
+  void set_processing_paint_worklets_for_pending_tree(bool value) { processing_paint_worklets_for_pending_tree_ = value; _has_field_.set(46); }
+
+ private:
+  int32_t commit_count_{};
+  int32_t current_frame_number_{};
+  int32_t last_frame_number_submit_performed_{};
+  int32_t last_frame_number_draw_performed_{};
+  int32_t last_frame_number_begin_main_frame_sent_{};
+  bool did_draw_{};
+  bool did_send_begin_main_frame_for_current_frame_{};
+  bool did_notify_begin_main_frame_not_expected_until_{};
+  bool did_notify_begin_main_frame_not_expected_soon_{};
+  bool wants_begin_main_frame_not_expected_{};
+  bool did_commit_during_frame_{};
+  bool did_invalidate_layer_tree_frame_sink_{};
+  bool did_perform_impl_side_invalidaion_{};
+  bool did_prepare_tiles_{};
+  int32_t consecutive_checkerboard_animations_{};
+  int32_t pending_submit_frames_{};
+  int32_t submit_frames_with_current_layer_tree_frame_sink_{};
+  bool needs_redraw_{};
+  bool needs_prepare_tiles_{};
+  bool needs_begin_main_frame_{};
+  bool needs_one_begin_impl_frame_{};
+  bool visible_{};
+  bool begin_frame_source_paused_{};
+  bool can_draw_{};
+  bool resourceless_draw_{};
+  bool has_pending_tree_{};
+  bool pending_tree_is_ready_for_activation_{};
+  bool active_tree_needs_first_draw_{};
+  bool active_tree_is_ready_to_draw_{};
+  bool did_create_and_initialize_first_layer_tree_frame_sink_{};
+  ChromeCompositorStateMachine_MinorState_TreePriority tree_priority_{};
+  ChromeCompositorStateMachine_MinorState_ScrollHandlerState scroll_handler_state_{};
+  bool critical_begin_main_frame_to_activate_is_fast_{};
+  bool main_thread_missed_last_deadline_{};
+  bool skip_next_begin_main_frame_to_reduce_latency_{};
+  bool video_needs_begin_frames_{};
+  bool defer_begin_main_frame_{};
+  bool last_commit_had_no_updates_{};
+  bool did_draw_in_last_frame_{};
+  bool did_submit_in_last_frame_{};
+  bool needs_impl_side_invalidation_{};
+  bool current_pending_tree_is_impl_side_{};
+  bool previous_pending_tree_was_impl_side_{};
+  bool processing_animation_worklets_for_active_tree_{};
+  bool processing_animation_worklets_for_pending_tree_{};
+  bool processing_paint_worklets_for_pending_tree_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<47> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT ChromeCompositorStateMachine_MajorState : public ::protozero::CppMessageObj {
+ public:
+  using BeginImplFrameState = ChromeCompositorStateMachine_MajorState_BeginImplFrameState;
+  static constexpr auto BEGIN_IMPL_FRAME_UNSPECIFIED = ChromeCompositorStateMachine_MajorState_BeginImplFrameState_BEGIN_IMPL_FRAME_UNSPECIFIED;
+  static constexpr auto BEGIN_IMPL_FRAME_IDLE = ChromeCompositorStateMachine_MajorState_BeginImplFrameState_BEGIN_IMPL_FRAME_IDLE;
+  static constexpr auto BEGIN_IMPL_FRAME_INSIDE_BEGIN_FRAME = ChromeCompositorStateMachine_MajorState_BeginImplFrameState_BEGIN_IMPL_FRAME_INSIDE_BEGIN_FRAME;
+  static constexpr auto BEGIN_IMPL_FRAME_INSIDE_DEADLINE = ChromeCompositorStateMachine_MajorState_BeginImplFrameState_BEGIN_IMPL_FRAME_INSIDE_DEADLINE;
+  static constexpr auto BeginImplFrameState_MIN = ChromeCompositorStateMachine_MajorState_BeginImplFrameState_BEGIN_IMPL_FRAME_UNSPECIFIED;
+  static constexpr auto BeginImplFrameState_MAX = ChromeCompositorStateMachine_MajorState_BeginImplFrameState_BEGIN_IMPL_FRAME_INSIDE_DEADLINE;
+  using BeginMainFrameState = ChromeCompositorStateMachine_MajorState_BeginMainFrameState;
+  static constexpr auto BEGIN_MAIN_FRAME_UNSPECIFIED = ChromeCompositorStateMachine_MajorState_BeginMainFrameState_BEGIN_MAIN_FRAME_UNSPECIFIED;
+  static constexpr auto BEGIN_MAIN_FRAME_IDLE = ChromeCompositorStateMachine_MajorState_BeginMainFrameState_BEGIN_MAIN_FRAME_IDLE;
+  static constexpr auto BEGIN_MAIN_FRAME_SENT = ChromeCompositorStateMachine_MajorState_BeginMainFrameState_BEGIN_MAIN_FRAME_SENT;
+  static constexpr auto BEGIN_MAIN_FRAME_READY_TO_COMMIT = ChromeCompositorStateMachine_MajorState_BeginMainFrameState_BEGIN_MAIN_FRAME_READY_TO_COMMIT;
+  static constexpr auto BeginMainFrameState_MIN = ChromeCompositorStateMachine_MajorState_BeginMainFrameState_BEGIN_MAIN_FRAME_UNSPECIFIED;
+  static constexpr auto BeginMainFrameState_MAX = ChromeCompositorStateMachine_MajorState_BeginMainFrameState_BEGIN_MAIN_FRAME_READY_TO_COMMIT;
+  using LayerTreeFrameSinkState = ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState;
+  static constexpr auto LAYER_TREE_FRAME_UNSPECIFIED = ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_UNSPECIFIED;
+  static constexpr auto LAYER_TREE_FRAME_NONE = ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_NONE;
+  static constexpr auto LAYER_TREE_FRAME_ACTIVE = ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_ACTIVE;
+  static constexpr auto LAYER_TREE_FRAME_CREATING = ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_CREATING;
+  static constexpr auto LAYER_TREE_FRAME_WAITING_FOR_FIRST_COMMIT = ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_WAITING_FOR_FIRST_COMMIT;
+  static constexpr auto LAYER_TREE_FRAME_WAITING_FOR_FIRST_ACTIVATION = ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_WAITING_FOR_FIRST_ACTIVATION;
+  static constexpr auto LayerTreeFrameSinkState_MIN = ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_UNSPECIFIED;
+  static constexpr auto LayerTreeFrameSinkState_MAX = ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState_LAYER_TREE_FRAME_WAITING_FOR_FIRST_ACTIVATION;
+  using ForcedRedrawOnTimeoutState = ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState;
+  static constexpr auto FORCED_REDRAW_UNSPECIFIED = ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState_FORCED_REDRAW_UNSPECIFIED;
+  static constexpr auto FORCED_REDRAW_IDLE = ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState_FORCED_REDRAW_IDLE;
+  static constexpr auto FORCED_REDRAW_WAITING_FOR_COMMIT = ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState_FORCED_REDRAW_WAITING_FOR_COMMIT;
+  static constexpr auto FORCED_REDRAW_WAITING_FOR_ACTIVATION = ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState_FORCED_REDRAW_WAITING_FOR_ACTIVATION;
+  static constexpr auto FORCED_REDRAW_WAITING_FOR_DRAW = ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState_FORCED_REDRAW_WAITING_FOR_DRAW;
+  static constexpr auto ForcedRedrawOnTimeoutState_MIN = ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState_FORCED_REDRAW_UNSPECIFIED;
+  static constexpr auto ForcedRedrawOnTimeoutState_MAX = ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState_FORCED_REDRAW_WAITING_FOR_DRAW;
+  enum FieldNumbers {
+    kNextActionFieldNumber = 1,
+    kBeginImplFrameStateFieldNumber = 2,
+    kBeginMainFrameStateFieldNumber = 3,
+    kLayerTreeFrameSinkStateFieldNumber = 4,
+    kForcedRedrawStateFieldNumber = 5,
+  };
+
+  ChromeCompositorStateMachine_MajorState();
+  ~ChromeCompositorStateMachine_MajorState() override;
+  ChromeCompositorStateMachine_MajorState(ChromeCompositorStateMachine_MajorState&&) noexcept;
+  ChromeCompositorStateMachine_MajorState& operator=(ChromeCompositorStateMachine_MajorState&&);
+  ChromeCompositorStateMachine_MajorState(const ChromeCompositorStateMachine_MajorState&);
+  ChromeCompositorStateMachine_MajorState& operator=(const ChromeCompositorStateMachine_MajorState&);
+  bool operator==(const ChromeCompositorStateMachine_MajorState&) const;
+  bool operator!=(const ChromeCompositorStateMachine_MajorState& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_next_action() const { return _has_field_[1]; }
+  ChromeCompositorSchedulerAction next_action() const { return next_action_; }
+  void set_next_action(ChromeCompositorSchedulerAction value) { next_action_ = value; _has_field_.set(1); }
+
+  bool has_begin_impl_frame_state() const { return _has_field_[2]; }
+  ChromeCompositorStateMachine_MajorState_BeginImplFrameState begin_impl_frame_state() const { return begin_impl_frame_state_; }
+  void set_begin_impl_frame_state(ChromeCompositorStateMachine_MajorState_BeginImplFrameState value) { begin_impl_frame_state_ = value; _has_field_.set(2); }
+
+  bool has_begin_main_frame_state() const { return _has_field_[3]; }
+  ChromeCompositorStateMachine_MajorState_BeginMainFrameState begin_main_frame_state() const { return begin_main_frame_state_; }
+  void set_begin_main_frame_state(ChromeCompositorStateMachine_MajorState_BeginMainFrameState value) { begin_main_frame_state_ = value; _has_field_.set(3); }
+
+  bool has_layer_tree_frame_sink_state() const { return _has_field_[4]; }
+  ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState layer_tree_frame_sink_state() const { return layer_tree_frame_sink_state_; }
+  void set_layer_tree_frame_sink_state(ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState value) { layer_tree_frame_sink_state_ = value; _has_field_.set(4); }
+
+  bool has_forced_redraw_state() const { return _has_field_[5]; }
+  ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState forced_redraw_state() const { return forced_redraw_state_; }
+  void set_forced_redraw_state(ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState value) { forced_redraw_state_ = value; _has_field_.set(5); }
+
+ private:
+  ChromeCompositorSchedulerAction next_action_{};
+  ChromeCompositorStateMachine_MajorState_BeginImplFrameState begin_impl_frame_state_{};
+  ChromeCompositorStateMachine_MajorState_BeginMainFrameState begin_main_frame_state_{};
+  ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState layer_tree_frame_sink_state_{};
+  ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState forced_redraw_state_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<6> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT ChromeCompositorSchedulerState : public ::protozero::CppMessageObj {
+ public:
+  using BeginImplFrameDeadlineMode = ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode;
+  static constexpr auto DEADLINE_MODE_UNSPECIFIED = ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_UNSPECIFIED;
+  static constexpr auto DEADLINE_MODE_NONE = ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_NONE;
+  static constexpr auto DEADLINE_MODE_IMMEDIATE = ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_IMMEDIATE;
+  static constexpr auto DEADLINE_MODE_REGULAR = ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_REGULAR;
+  static constexpr auto DEADLINE_MODE_LATE = ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_LATE;
+  static constexpr auto DEADLINE_MODE_BLOCKED = ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_BLOCKED;
+  static constexpr auto BeginImplFrameDeadlineMode_MIN = ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_UNSPECIFIED;
+  static constexpr auto BeginImplFrameDeadlineMode_MAX = ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode_DEADLINE_MODE_BLOCKED;
+  enum FieldNumbers {
+    kStateMachineFieldNumber = 1,
+    kObservingBeginFrameSourceFieldNumber = 2,
+    kBeginImplFrameDeadlineTaskFieldNumber = 3,
+    kPendingBeginFrameTaskFieldNumber = 4,
+    kSkippedLastFrameMissedExceededDeadlineFieldNumber = 5,
+    kSkippedLastFrameToReduceLatencyFieldNumber = 6,
+    kInsideActionFieldNumber = 7,
+    kDeadlineModeFieldNumber = 8,
+    kDeadlineUsFieldNumber = 9,
+    kDeadlineScheduledAtUsFieldNumber = 10,
+    kNowUsFieldNumber = 11,
+    kNowToDeadlineDeltaUsFieldNumber = 12,
+    kNowToDeadlineScheduledAtDeltaUsFieldNumber = 13,
+    kBeginImplFrameArgsFieldNumber = 14,
+    kBeginFrameObserverStateFieldNumber = 15,
+    kBeginFrameSourceStateFieldNumber = 16,
+    kCompositorTimingHistoryFieldNumber = 17,
+  };
+
+  ChromeCompositorSchedulerState();
+  ~ChromeCompositorSchedulerState() override;
+  ChromeCompositorSchedulerState(ChromeCompositorSchedulerState&&) noexcept;
+  ChromeCompositorSchedulerState& operator=(ChromeCompositorSchedulerState&&);
+  ChromeCompositorSchedulerState(const ChromeCompositorSchedulerState&);
+  ChromeCompositorSchedulerState& operator=(const ChromeCompositorSchedulerState&);
+  bool operator==(const ChromeCompositorSchedulerState&) const;
+  bool operator!=(const ChromeCompositorSchedulerState& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_state_machine() const { return _has_field_[1]; }
+  const ChromeCompositorStateMachine& state_machine() const { return *state_machine_; }
+  ChromeCompositorStateMachine* mutable_state_machine() { _has_field_.set(1); return state_machine_.get(); }
+
+  bool has_observing_begin_frame_source() const { return _has_field_[2]; }
+  bool observing_begin_frame_source() const { return observing_begin_frame_source_; }
+  void set_observing_begin_frame_source(bool value) { observing_begin_frame_source_ = value; _has_field_.set(2); }
+
+  bool has_begin_impl_frame_deadline_task() const { return _has_field_[3]; }
+  bool begin_impl_frame_deadline_task() const { return begin_impl_frame_deadline_task_; }
+  void set_begin_impl_frame_deadline_task(bool value) { begin_impl_frame_deadline_task_ = value; _has_field_.set(3); }
+
+  bool has_pending_begin_frame_task() const { return _has_field_[4]; }
+  bool pending_begin_frame_task() const { return pending_begin_frame_task_; }
+  void set_pending_begin_frame_task(bool value) { pending_begin_frame_task_ = value; _has_field_.set(4); }
+
+  bool has_skipped_last_frame_missed_exceeded_deadline() const { return _has_field_[5]; }
+  bool skipped_last_frame_missed_exceeded_deadline() const { return skipped_last_frame_missed_exceeded_deadline_; }
+  void set_skipped_last_frame_missed_exceeded_deadline(bool value) { skipped_last_frame_missed_exceeded_deadline_ = value; _has_field_.set(5); }
+
+  bool has_skipped_last_frame_to_reduce_latency() const { return _has_field_[6]; }
+  bool skipped_last_frame_to_reduce_latency() const { return skipped_last_frame_to_reduce_latency_; }
+  void set_skipped_last_frame_to_reduce_latency(bool value) { skipped_last_frame_to_reduce_latency_ = value; _has_field_.set(6); }
+
+  bool has_inside_action() const { return _has_field_[7]; }
+  ChromeCompositorSchedulerAction inside_action() const { return inside_action_; }
+  void set_inside_action(ChromeCompositorSchedulerAction value) { inside_action_ = value; _has_field_.set(7); }
+
+  bool has_deadline_mode() const { return _has_field_[8]; }
+  ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode deadline_mode() const { return deadline_mode_; }
+  void set_deadline_mode(ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode value) { deadline_mode_ = value; _has_field_.set(8); }
+
+  bool has_deadline_us() const { return _has_field_[9]; }
+  int64_t deadline_us() const { return deadline_us_; }
+  void set_deadline_us(int64_t value) { deadline_us_ = value; _has_field_.set(9); }
+
+  bool has_deadline_scheduled_at_us() const { return _has_field_[10]; }
+  int64_t deadline_scheduled_at_us() const { return deadline_scheduled_at_us_; }
+  void set_deadline_scheduled_at_us(int64_t value) { deadline_scheduled_at_us_ = value; _has_field_.set(10); }
+
+  bool has_now_us() const { return _has_field_[11]; }
+  int64_t now_us() const { return now_us_; }
+  void set_now_us(int64_t value) { now_us_ = value; _has_field_.set(11); }
+
+  bool has_now_to_deadline_delta_us() const { return _has_field_[12]; }
+  int64_t now_to_deadline_delta_us() const { return now_to_deadline_delta_us_; }
+  void set_now_to_deadline_delta_us(int64_t value) { now_to_deadline_delta_us_ = value; _has_field_.set(12); }
+
+  bool has_now_to_deadline_scheduled_at_delta_us() const { return _has_field_[13]; }
+  int64_t now_to_deadline_scheduled_at_delta_us() const { return now_to_deadline_scheduled_at_delta_us_; }
+  void set_now_to_deadline_scheduled_at_delta_us(int64_t value) { now_to_deadline_scheduled_at_delta_us_ = value; _has_field_.set(13); }
+
+  bool has_begin_impl_frame_args() const { return _has_field_[14]; }
+  const BeginImplFrameArgs& begin_impl_frame_args() const { return *begin_impl_frame_args_; }
+  BeginImplFrameArgs* mutable_begin_impl_frame_args() { _has_field_.set(14); return begin_impl_frame_args_.get(); }
+
+  bool has_begin_frame_observer_state() const { return _has_field_[15]; }
+  const BeginFrameObserverState& begin_frame_observer_state() const { return *begin_frame_observer_state_; }
+  BeginFrameObserverState* mutable_begin_frame_observer_state() { _has_field_.set(15); return begin_frame_observer_state_.get(); }
+
+  bool has_begin_frame_source_state() const { return _has_field_[16]; }
+  const BeginFrameSourceState& begin_frame_source_state() const { return *begin_frame_source_state_; }
+  BeginFrameSourceState* mutable_begin_frame_source_state() { _has_field_.set(16); return begin_frame_source_state_.get(); }
+
+  bool has_compositor_timing_history() const { return _has_field_[17]; }
+  const CompositorTimingHistory& compositor_timing_history() const { return *compositor_timing_history_; }
+  CompositorTimingHistory* mutable_compositor_timing_history() { _has_field_.set(17); return compositor_timing_history_.get(); }
+
+ private:
+  ::protozero::CopyablePtr<ChromeCompositorStateMachine> state_machine_;
+  bool observing_begin_frame_source_{};
+  bool begin_impl_frame_deadline_task_{};
+  bool pending_begin_frame_task_{};
+  bool skipped_last_frame_missed_exceeded_deadline_{};
+  bool skipped_last_frame_to_reduce_latency_{};
+  ChromeCompositorSchedulerAction inside_action_{};
+  ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode deadline_mode_{};
+  int64_t deadline_us_{};
+  int64_t deadline_scheduled_at_us_{};
+  int64_t now_us_{};
+  int64_t now_to_deadline_delta_us_{};
+  int64_t now_to_deadline_scheduled_at_delta_us_{};
+  ::protozero::CopyablePtr<BeginImplFrameArgs> begin_impl_frame_args_;
+  ::protozero::CopyablePtr<BeginFrameObserverState> begin_frame_observer_state_;
+  ::protozero::CopyablePtr<BeginFrameSourceState> begin_frame_source_state_;
+  ::protozero::CopyablePtr<CompositorTimingHistory> compositor_timing_history_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<18> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_COMPOSITOR_SCHEDULER_STATE_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_content_settings_event_info.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_CONTENT_SETTINGS_EVENT_INFO_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_CONTENT_SETTINGS_EVENT_INFO_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class ChromeContentSettingsEventInfo;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT ChromeContentSettingsEventInfo : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kNumberOfExceptionsFieldNumber = 1,
+  };
+
+  ChromeContentSettingsEventInfo();
+  ~ChromeContentSettingsEventInfo() override;
+  ChromeContentSettingsEventInfo(ChromeContentSettingsEventInfo&&) noexcept;
+  ChromeContentSettingsEventInfo& operator=(ChromeContentSettingsEventInfo&&);
+  ChromeContentSettingsEventInfo(const ChromeContentSettingsEventInfo&);
+  ChromeContentSettingsEventInfo& operator=(const ChromeContentSettingsEventInfo&);
+  bool operator==(const ChromeContentSettingsEventInfo&) const;
+  bool operator!=(const ChromeContentSettingsEventInfo& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_number_of_exceptions() const { return _has_field_[1]; }
+  uint32_t number_of_exceptions() const { return number_of_exceptions_; }
+  void set_number_of_exceptions(uint32_t value) { number_of_exceptions_ = value; _has_field_.set(1); }
+
+ private:
+  uint32_t number_of_exceptions_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_CONTENT_SETTINGS_EVENT_INFO_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_frame_reporter.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_FRAME_REPORTER_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_FRAME_REPORTER_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class ChromeFrameReporter;
+enum ChromeFrameReporter_State : int;
+enum ChromeFrameReporter_FrameDropReason : int;
+enum ChromeFrameReporter_ScrollState : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum ChromeFrameReporter_State : int {
+  ChromeFrameReporter_State_STATE_NO_UPDATE_DESIRED = 0,
+  ChromeFrameReporter_State_STATE_PRESENTED_ALL = 1,
+  ChromeFrameReporter_State_STATE_PRESENTED_PARTIAL = 2,
+  ChromeFrameReporter_State_STATE_DROPPED = 3,
+};
+enum ChromeFrameReporter_FrameDropReason : int {
+  ChromeFrameReporter_FrameDropReason_REASON_UNSPECIFIED = 0,
+  ChromeFrameReporter_FrameDropReason_REASON_DISPLAY_COMPOSITOR = 1,
+  ChromeFrameReporter_FrameDropReason_REASON_MAIN_THREAD = 2,
+  ChromeFrameReporter_FrameDropReason_REASON_CLIENT_COMPOSITOR = 3,
+};
+enum ChromeFrameReporter_ScrollState : int {
+  ChromeFrameReporter_ScrollState_SCROLL_NONE = 0,
+  ChromeFrameReporter_ScrollState_SCROLL_MAIN_THREAD = 1,
+  ChromeFrameReporter_ScrollState_SCROLL_COMPOSITOR_THREAD = 2,
+  ChromeFrameReporter_ScrollState_SCROLL_UNKNOWN = 3,
+};
+
+class PERFETTO_EXPORT ChromeFrameReporter : public ::protozero::CppMessageObj {
+ public:
+  using State = ChromeFrameReporter_State;
+  static constexpr auto STATE_NO_UPDATE_DESIRED = ChromeFrameReporter_State_STATE_NO_UPDATE_DESIRED;
+  static constexpr auto STATE_PRESENTED_ALL = ChromeFrameReporter_State_STATE_PRESENTED_ALL;
+  static constexpr auto STATE_PRESENTED_PARTIAL = ChromeFrameReporter_State_STATE_PRESENTED_PARTIAL;
+  static constexpr auto STATE_DROPPED = ChromeFrameReporter_State_STATE_DROPPED;
+  static constexpr auto State_MIN = ChromeFrameReporter_State_STATE_NO_UPDATE_DESIRED;
+  static constexpr auto State_MAX = ChromeFrameReporter_State_STATE_DROPPED;
+  using FrameDropReason = ChromeFrameReporter_FrameDropReason;
+  static constexpr auto REASON_UNSPECIFIED = ChromeFrameReporter_FrameDropReason_REASON_UNSPECIFIED;
+  static constexpr auto REASON_DISPLAY_COMPOSITOR = ChromeFrameReporter_FrameDropReason_REASON_DISPLAY_COMPOSITOR;
+  static constexpr auto REASON_MAIN_THREAD = ChromeFrameReporter_FrameDropReason_REASON_MAIN_THREAD;
+  static constexpr auto REASON_CLIENT_COMPOSITOR = ChromeFrameReporter_FrameDropReason_REASON_CLIENT_COMPOSITOR;
+  static constexpr auto FrameDropReason_MIN = ChromeFrameReporter_FrameDropReason_REASON_UNSPECIFIED;
+  static constexpr auto FrameDropReason_MAX = ChromeFrameReporter_FrameDropReason_REASON_CLIENT_COMPOSITOR;
+  using ScrollState = ChromeFrameReporter_ScrollState;
+  static constexpr auto SCROLL_NONE = ChromeFrameReporter_ScrollState_SCROLL_NONE;
+  static constexpr auto SCROLL_MAIN_THREAD = ChromeFrameReporter_ScrollState_SCROLL_MAIN_THREAD;
+  static constexpr auto SCROLL_COMPOSITOR_THREAD = ChromeFrameReporter_ScrollState_SCROLL_COMPOSITOR_THREAD;
+  static constexpr auto SCROLL_UNKNOWN = ChromeFrameReporter_ScrollState_SCROLL_UNKNOWN;
+  static constexpr auto ScrollState_MIN = ChromeFrameReporter_ScrollState_SCROLL_NONE;
+  static constexpr auto ScrollState_MAX = ChromeFrameReporter_ScrollState_SCROLL_UNKNOWN;
+  enum FieldNumbers {
+    kStateFieldNumber = 1,
+    kReasonFieldNumber = 2,
+    kFrameSourceFieldNumber = 3,
+    kFrameSequenceFieldNumber = 4,
+    kAffectsSmoothnessFieldNumber = 5,
+    kScrollStateFieldNumber = 6,
+    kHasMainAnimationFieldNumber = 7,
+    kHasCompositorAnimationFieldNumber = 8,
+    kHasSmoothInputMainFieldNumber = 9,
+    kHasMissingContentFieldNumber = 10,
+    kLayerTreeHostIdFieldNumber = 11,
+  };
+
+  ChromeFrameReporter();
+  ~ChromeFrameReporter() override;
+  ChromeFrameReporter(ChromeFrameReporter&&) noexcept;
+  ChromeFrameReporter& operator=(ChromeFrameReporter&&);
+  ChromeFrameReporter(const ChromeFrameReporter&);
+  ChromeFrameReporter& operator=(const ChromeFrameReporter&);
+  bool operator==(const ChromeFrameReporter&) const;
+  bool operator!=(const ChromeFrameReporter& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_state() const { return _has_field_[1]; }
+  ChromeFrameReporter_State state() const { return state_; }
+  void set_state(ChromeFrameReporter_State value) { state_ = value; _has_field_.set(1); }
+
+  bool has_reason() const { return _has_field_[2]; }
+  ChromeFrameReporter_FrameDropReason reason() const { return reason_; }
+  void set_reason(ChromeFrameReporter_FrameDropReason value) { reason_ = value; _has_field_.set(2); }
+
+  bool has_frame_source() const { return _has_field_[3]; }
+  uint64_t frame_source() const { return frame_source_; }
+  void set_frame_source(uint64_t value) { frame_source_ = value; _has_field_.set(3); }
+
+  bool has_frame_sequence() const { return _has_field_[4]; }
+  uint64_t frame_sequence() const { return frame_sequence_; }
+  void set_frame_sequence(uint64_t value) { frame_sequence_ = value; _has_field_.set(4); }
+
+  bool has_affects_smoothness() const { return _has_field_[5]; }
+  bool affects_smoothness() const { return affects_smoothness_; }
+  void set_affects_smoothness(bool value) { affects_smoothness_ = value; _has_field_.set(5); }
+
+  bool has_scroll_state() const { return _has_field_[6]; }
+  ChromeFrameReporter_ScrollState scroll_state() const { return scroll_state_; }
+  void set_scroll_state(ChromeFrameReporter_ScrollState value) { scroll_state_ = value; _has_field_.set(6); }
+
+  bool has_has_main_animation() const { return _has_field_[7]; }
+  bool has_main_animation() const { return has_main_animation_; }
+  void set_has_main_animation(bool value) { has_main_animation_ = value; _has_field_.set(7); }
+
+  bool has_has_compositor_animation() const { return _has_field_[8]; }
+  bool has_compositor_animation() const { return has_compositor_animation_; }
+  void set_has_compositor_animation(bool value) { has_compositor_animation_ = value; _has_field_.set(8); }
+
+  bool has_has_smooth_input_main() const { return _has_field_[9]; }
+  bool has_smooth_input_main() const { return has_smooth_input_main_; }
+  void set_has_smooth_input_main(bool value) { has_smooth_input_main_ = value; _has_field_.set(9); }
+
+  bool has_has_missing_content() const { return _has_field_[10]; }
+  bool has_missing_content() const { return has_missing_content_; }
+  void set_has_missing_content(bool value) { has_missing_content_ = value; _has_field_.set(10); }
+
+  bool has_layer_tree_host_id() const { return _has_field_[11]; }
+  uint64_t layer_tree_host_id() const { return layer_tree_host_id_; }
+  void set_layer_tree_host_id(uint64_t value) { layer_tree_host_id_ = value; _has_field_.set(11); }
+
+ private:
+  ChromeFrameReporter_State state_{};
+  ChromeFrameReporter_FrameDropReason reason_{};
+  uint64_t frame_source_{};
+  uint64_t frame_sequence_{};
+  bool affects_smoothness_{};
+  ChromeFrameReporter_ScrollState scroll_state_{};
+  bool has_main_animation_{};
+  bool has_compositor_animation_{};
+  bool has_smooth_input_main_{};
+  bool has_missing_content_{};
+  uint64_t layer_tree_host_id_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<12> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_FRAME_REPORTER_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_histogram_sample.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_HISTOGRAM_SAMPLE_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_HISTOGRAM_SAMPLE_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class ChromeHistogramSample;
+class HistogramName;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT ChromeHistogramSample : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kNameHashFieldNumber = 1,
+    kNameFieldNumber = 2,
+    kSampleFieldNumber = 3,
+    kNameIidFieldNumber = 4,
+  };
+
+  ChromeHistogramSample();
+  ~ChromeHistogramSample() override;
+  ChromeHistogramSample(ChromeHistogramSample&&) noexcept;
+  ChromeHistogramSample& operator=(ChromeHistogramSample&&);
+  ChromeHistogramSample(const ChromeHistogramSample&);
+  ChromeHistogramSample& operator=(const ChromeHistogramSample&);
+  bool operator==(const ChromeHistogramSample&) const;
+  bool operator!=(const ChromeHistogramSample& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_name_hash() const { return _has_field_[1]; }
+  uint64_t name_hash() const { return name_hash_; }
+  void set_name_hash(uint64_t value) { name_hash_ = value; _has_field_.set(1); }
+
+  bool has_name() const { return _has_field_[2]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(2); }
+
+  bool has_sample() const { return _has_field_[3]; }
+  int64_t sample() const { return sample_; }
+  void set_sample(int64_t value) { sample_ = value; _has_field_.set(3); }
+
+  bool has_name_iid() const { return _has_field_[4]; }
+  uint64_t name_iid() const { return name_iid_; }
+  void set_name_iid(uint64_t value) { name_iid_ = value; _has_field_.set(4); }
+
+ private:
+  uint64_t name_hash_{};
+  std::string name_{};
+  int64_t sample_{};
+  uint64_t name_iid_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<5> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT HistogramName : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kIidFieldNumber = 1,
+    kNameFieldNumber = 2,
+  };
+
+  HistogramName();
+  ~HistogramName() override;
+  HistogramName(HistogramName&&) noexcept;
+  HistogramName& operator=(HistogramName&&);
+  HistogramName(const HistogramName&);
+  HistogramName& operator=(const HistogramName&);
+  bool operator==(const HistogramName&) const;
+  bool operator!=(const HistogramName& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_iid() const { return _has_field_[1]; }
+  uint64_t iid() const { return iid_; }
+  void set_iid(uint64_t value) { iid_ = value; _has_field_.set(1); }
+
+  bool has_name() const { return _has_field_[2]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(2); }
+
+ private:
+  uint64_t iid_{};
+  std::string name_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_HISTOGRAM_SAMPLE_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_keyed_service.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_KEYED_SERVICE_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_KEYED_SERVICE_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class ChromeKeyedService;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT ChromeKeyedService : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kNameFieldNumber = 1,
+  };
+
+  ChromeKeyedService();
+  ~ChromeKeyedService() override;
+  ChromeKeyedService(ChromeKeyedService&&) noexcept;
+  ChromeKeyedService& operator=(ChromeKeyedService&&);
+  ChromeKeyedService(const ChromeKeyedService&);
+  ChromeKeyedService& operator=(const ChromeKeyedService&);
+  bool operator==(const ChromeKeyedService&) const;
+  bool operator!=(const ChromeKeyedService& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_name() const { return _has_field_[1]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(1); }
+
+ private:
+  std::string name_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_KEYED_SERVICE_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_latency_info.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_LATENCY_INFO_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_LATENCY_INFO_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class ChromeLatencyInfo;
+class ChromeLatencyInfo_ComponentInfo;
+enum ChromeLatencyInfo_Step : int;
+enum ChromeLatencyInfo_LatencyComponentType : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum ChromeLatencyInfo_Step : int {
+  ChromeLatencyInfo_Step_STEP_UNSPECIFIED = 0,
+  ChromeLatencyInfo_Step_STEP_SEND_INPUT_EVENT_UI = 3,
+  ChromeLatencyInfo_Step_STEP_HANDLE_INPUT_EVENT_IMPL = 5,
+  ChromeLatencyInfo_Step_STEP_DID_HANDLE_INPUT_AND_OVERSCROLL = 8,
+  ChromeLatencyInfo_Step_STEP_HANDLE_INPUT_EVENT_MAIN = 4,
+  ChromeLatencyInfo_Step_STEP_MAIN_THREAD_SCROLL_UPDATE = 2,
+  ChromeLatencyInfo_Step_STEP_HANDLE_INPUT_EVENT_MAIN_COMMIT = 1,
+  ChromeLatencyInfo_Step_STEP_HANDLED_INPUT_EVENT_MAIN_OR_IMPL = 9,
+  ChromeLatencyInfo_Step_STEP_HANDLED_INPUT_EVENT_IMPL = 10,
+  ChromeLatencyInfo_Step_STEP_SWAP_BUFFERS = 6,
+  ChromeLatencyInfo_Step_STEP_DRAW_AND_SWAP = 7,
+  ChromeLatencyInfo_Step_STEP_FINISHED_SWAP_BUFFERS = 11,
+};
+enum ChromeLatencyInfo_LatencyComponentType : int {
+  ChromeLatencyInfo_LatencyComponentType_COMPONENT_UNSPECIFIED = 0,
+  ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_BEGIN_RWH = 1,
+  ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_SCROLL_UPDATE_ORIGINAL = 2,
+  ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_FIRST_SCROLL_UPDATE_ORIGINAL = 3,
+  ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_ORIGINAL = 4,
+  ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_UI = 5,
+  ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_RENDERER_MAIN = 6,
+  ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_RENDERING_SCHEDULED_MAIN = 7,
+  ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_RENDERING_SCHEDULED_IMPL = 8,
+  ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_SCROLL_UPDATE_LAST_EVENT = 9,
+  ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_ACK_RWH = 10,
+  ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_RENDERER_SWAP = 11,
+  ChromeLatencyInfo_LatencyComponentType_COMPONENT_DISPLAY_COMPOSITOR_RECEIVED_FRAME = 12,
+  ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_GPU_SWAP_BUFFER = 13,
+  ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_FRAME_SWAP = 14,
+};
+
+class PERFETTO_EXPORT ChromeLatencyInfo : public ::protozero::CppMessageObj {
+ public:
+  using ComponentInfo = ChromeLatencyInfo_ComponentInfo;
+  using Step = ChromeLatencyInfo_Step;
+  static constexpr auto STEP_UNSPECIFIED = ChromeLatencyInfo_Step_STEP_UNSPECIFIED;
+  static constexpr auto STEP_SEND_INPUT_EVENT_UI = ChromeLatencyInfo_Step_STEP_SEND_INPUT_EVENT_UI;
+  static constexpr auto STEP_HANDLE_INPUT_EVENT_IMPL = ChromeLatencyInfo_Step_STEP_HANDLE_INPUT_EVENT_IMPL;
+  static constexpr auto STEP_DID_HANDLE_INPUT_AND_OVERSCROLL = ChromeLatencyInfo_Step_STEP_DID_HANDLE_INPUT_AND_OVERSCROLL;
+  static constexpr auto STEP_HANDLE_INPUT_EVENT_MAIN = ChromeLatencyInfo_Step_STEP_HANDLE_INPUT_EVENT_MAIN;
+  static constexpr auto STEP_MAIN_THREAD_SCROLL_UPDATE = ChromeLatencyInfo_Step_STEP_MAIN_THREAD_SCROLL_UPDATE;
+  static constexpr auto STEP_HANDLE_INPUT_EVENT_MAIN_COMMIT = ChromeLatencyInfo_Step_STEP_HANDLE_INPUT_EVENT_MAIN_COMMIT;
+  static constexpr auto STEP_HANDLED_INPUT_EVENT_MAIN_OR_IMPL = ChromeLatencyInfo_Step_STEP_HANDLED_INPUT_EVENT_MAIN_OR_IMPL;
+  static constexpr auto STEP_HANDLED_INPUT_EVENT_IMPL = ChromeLatencyInfo_Step_STEP_HANDLED_INPUT_EVENT_IMPL;
+  static constexpr auto STEP_SWAP_BUFFERS = ChromeLatencyInfo_Step_STEP_SWAP_BUFFERS;
+  static constexpr auto STEP_DRAW_AND_SWAP = ChromeLatencyInfo_Step_STEP_DRAW_AND_SWAP;
+  static constexpr auto STEP_FINISHED_SWAP_BUFFERS = ChromeLatencyInfo_Step_STEP_FINISHED_SWAP_BUFFERS;
+  static constexpr auto Step_MIN = ChromeLatencyInfo_Step_STEP_UNSPECIFIED;
+  static constexpr auto Step_MAX = ChromeLatencyInfo_Step_STEP_FINISHED_SWAP_BUFFERS;
+  using LatencyComponentType = ChromeLatencyInfo_LatencyComponentType;
+  static constexpr auto COMPONENT_UNSPECIFIED = ChromeLatencyInfo_LatencyComponentType_COMPONENT_UNSPECIFIED;
+  static constexpr auto COMPONENT_INPUT_EVENT_LATENCY_BEGIN_RWH = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_BEGIN_RWH;
+  static constexpr auto COMPONENT_INPUT_EVENT_LATENCY_SCROLL_UPDATE_ORIGINAL = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_SCROLL_UPDATE_ORIGINAL;
+  static constexpr auto COMPONENT_INPUT_EVENT_LATENCY_FIRST_SCROLL_UPDATE_ORIGINAL = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_FIRST_SCROLL_UPDATE_ORIGINAL;
+  static constexpr auto COMPONENT_INPUT_EVENT_LATENCY_ORIGINAL = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_ORIGINAL;
+  static constexpr auto COMPONENT_INPUT_EVENT_LATENCY_UI = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_UI;
+  static constexpr auto COMPONENT_INPUT_EVENT_LATENCY_RENDERER_MAIN = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_RENDERER_MAIN;
+  static constexpr auto COMPONENT_INPUT_EVENT_LATENCY_RENDERING_SCHEDULED_MAIN = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_RENDERING_SCHEDULED_MAIN;
+  static constexpr auto COMPONENT_INPUT_EVENT_LATENCY_RENDERING_SCHEDULED_IMPL = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_RENDERING_SCHEDULED_IMPL;
+  static constexpr auto COMPONENT_INPUT_EVENT_LATENCY_SCROLL_UPDATE_LAST_EVENT = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_SCROLL_UPDATE_LAST_EVENT;
+  static constexpr auto COMPONENT_INPUT_EVENT_LATENCY_ACK_RWH = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_ACK_RWH;
+  static constexpr auto COMPONENT_INPUT_EVENT_LATENCY_RENDERER_SWAP = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_RENDERER_SWAP;
+  static constexpr auto COMPONENT_DISPLAY_COMPOSITOR_RECEIVED_FRAME = ChromeLatencyInfo_LatencyComponentType_COMPONENT_DISPLAY_COMPOSITOR_RECEIVED_FRAME;
+  static constexpr auto COMPONENT_INPUT_EVENT_GPU_SWAP_BUFFER = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_GPU_SWAP_BUFFER;
+  static constexpr auto COMPONENT_INPUT_EVENT_LATENCY_FRAME_SWAP = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_FRAME_SWAP;
+  static constexpr auto LatencyComponentType_MIN = ChromeLatencyInfo_LatencyComponentType_COMPONENT_UNSPECIFIED;
+  static constexpr auto LatencyComponentType_MAX = ChromeLatencyInfo_LatencyComponentType_COMPONENT_INPUT_EVENT_LATENCY_FRAME_SWAP;
+  enum FieldNumbers {
+    kTraceIdFieldNumber = 1,
+    kStepFieldNumber = 2,
+    kFrameTreeNodeIdFieldNumber = 3,
+    kComponentInfoFieldNumber = 4,
+    kIsCoalescedFieldNumber = 5,
+    kGestureScrollIdFieldNumber = 6,
+  };
+
+  ChromeLatencyInfo();
+  ~ChromeLatencyInfo() override;
+  ChromeLatencyInfo(ChromeLatencyInfo&&) noexcept;
+  ChromeLatencyInfo& operator=(ChromeLatencyInfo&&);
+  ChromeLatencyInfo(const ChromeLatencyInfo&);
+  ChromeLatencyInfo& operator=(const ChromeLatencyInfo&);
+  bool operator==(const ChromeLatencyInfo&) const;
+  bool operator!=(const ChromeLatencyInfo& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_trace_id() const { return _has_field_[1]; }
+  int64_t trace_id() const { return trace_id_; }
+  void set_trace_id(int64_t value) { trace_id_ = value; _has_field_.set(1); }
+
+  bool has_step() const { return _has_field_[2]; }
+  ChromeLatencyInfo_Step step() const { return step_; }
+  void set_step(ChromeLatencyInfo_Step value) { step_ = value; _has_field_.set(2); }
+
+  bool has_frame_tree_node_id() const { return _has_field_[3]; }
+  int32_t frame_tree_node_id() const { return frame_tree_node_id_; }
+  void set_frame_tree_node_id(int32_t value) { frame_tree_node_id_ = value; _has_field_.set(3); }
+
+  const std::vector<ChromeLatencyInfo_ComponentInfo>& component_info() const { return component_info_; }
+  std::vector<ChromeLatencyInfo_ComponentInfo>* mutable_component_info() { return &component_info_; }
+  int component_info_size() const;
+  void clear_component_info();
+  ChromeLatencyInfo_ComponentInfo* add_component_info();
+
+  bool has_is_coalesced() const { return _has_field_[5]; }
+  bool is_coalesced() const { return is_coalesced_; }
+  void set_is_coalesced(bool value) { is_coalesced_ = value; _has_field_.set(5); }
+
+  bool has_gesture_scroll_id() const { return _has_field_[6]; }
+  int64_t gesture_scroll_id() const { return gesture_scroll_id_; }
+  void set_gesture_scroll_id(int64_t value) { gesture_scroll_id_ = value; _has_field_.set(6); }
+
+ private:
+  int64_t trace_id_{};
+  ChromeLatencyInfo_Step step_{};
+  int32_t frame_tree_node_id_{};
+  std::vector<ChromeLatencyInfo_ComponentInfo> component_info_;
+  bool is_coalesced_{};
+  int64_t gesture_scroll_id_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<7> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT ChromeLatencyInfo_ComponentInfo : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kComponentTypeFieldNumber = 1,
+    kTimeUsFieldNumber = 2,
+  };
+
+  ChromeLatencyInfo_ComponentInfo();
+  ~ChromeLatencyInfo_ComponentInfo() override;
+  ChromeLatencyInfo_ComponentInfo(ChromeLatencyInfo_ComponentInfo&&) noexcept;
+  ChromeLatencyInfo_ComponentInfo& operator=(ChromeLatencyInfo_ComponentInfo&&);
+  ChromeLatencyInfo_ComponentInfo(const ChromeLatencyInfo_ComponentInfo&);
+  ChromeLatencyInfo_ComponentInfo& operator=(const ChromeLatencyInfo_ComponentInfo&);
+  bool operator==(const ChromeLatencyInfo_ComponentInfo&) const;
+  bool operator!=(const ChromeLatencyInfo_ComponentInfo& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_component_type() const { return _has_field_[1]; }
+  ChromeLatencyInfo_LatencyComponentType component_type() const { return component_type_; }
+  void set_component_type(ChromeLatencyInfo_LatencyComponentType value) { component_type_ = value; _has_field_.set(1); }
+
+  bool has_time_us() const { return _has_field_[2]; }
+  uint64_t time_us() const { return time_us_; }
+  void set_time_us(uint64_t value) { time_us_ = value; _has_field_.set(2); }
+
+ private:
+  ChromeLatencyInfo_LatencyComponentType component_type_{};
+  uint64_t time_us_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_LATENCY_INFO_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_legacy_ipc.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_LEGACY_IPC_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_LEGACY_IPC_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class ChromeLegacyIpc;
+enum ChromeLegacyIpc_MessageClass : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum ChromeLegacyIpc_MessageClass : int {
+  ChromeLegacyIpc_MessageClass_CLASS_UNSPECIFIED = 0,
+  ChromeLegacyIpc_MessageClass_CLASS_AUTOMATION = 1,
+  ChromeLegacyIpc_MessageClass_CLASS_FRAME = 2,
+  ChromeLegacyIpc_MessageClass_CLASS_PAGE = 3,
+  ChromeLegacyIpc_MessageClass_CLASS_VIEW = 4,
+  ChromeLegacyIpc_MessageClass_CLASS_WIDGET = 5,
+  ChromeLegacyIpc_MessageClass_CLASS_INPUT = 6,
+  ChromeLegacyIpc_MessageClass_CLASS_TEST = 7,
+  ChromeLegacyIpc_MessageClass_CLASS_WORKER = 8,
+  ChromeLegacyIpc_MessageClass_CLASS_NACL = 9,
+  ChromeLegacyIpc_MessageClass_CLASS_GPU_CHANNEL = 10,
+  ChromeLegacyIpc_MessageClass_CLASS_MEDIA = 11,
+  ChromeLegacyIpc_MessageClass_CLASS_PPAPI = 12,
+  ChromeLegacyIpc_MessageClass_CLASS_CHROME = 13,
+  ChromeLegacyIpc_MessageClass_CLASS_DRAG = 14,
+  ChromeLegacyIpc_MessageClass_CLASS_PRINT = 15,
+  ChromeLegacyIpc_MessageClass_CLASS_EXTENSION = 16,
+  ChromeLegacyIpc_MessageClass_CLASS_TEXT_INPUT_CLIENT = 17,
+  ChromeLegacyIpc_MessageClass_CLASS_BLINK_TEST = 18,
+  ChromeLegacyIpc_MessageClass_CLASS_ACCESSIBILITY = 19,
+  ChromeLegacyIpc_MessageClass_CLASS_PRERENDER = 20,
+  ChromeLegacyIpc_MessageClass_CLASS_CHROMOTING = 21,
+  ChromeLegacyIpc_MessageClass_CLASS_BROWSER_PLUGIN = 22,
+  ChromeLegacyIpc_MessageClass_CLASS_ANDROID_WEB_VIEW = 23,
+  ChromeLegacyIpc_MessageClass_CLASS_NACL_HOST = 24,
+  ChromeLegacyIpc_MessageClass_CLASS_ENCRYPTED_MEDIA = 25,
+  ChromeLegacyIpc_MessageClass_CLASS_CAST = 26,
+  ChromeLegacyIpc_MessageClass_CLASS_GIN_JAVA_BRIDGE = 27,
+  ChromeLegacyIpc_MessageClass_CLASS_CHROME_UTILITY_PRINTING = 28,
+  ChromeLegacyIpc_MessageClass_CLASS_OZONE_GPU = 29,
+  ChromeLegacyIpc_MessageClass_CLASS_WEB_TEST = 30,
+  ChromeLegacyIpc_MessageClass_CLASS_NETWORK_HINTS = 31,
+  ChromeLegacyIpc_MessageClass_CLASS_EXTENSIONS_GUEST_VIEW = 32,
+  ChromeLegacyIpc_MessageClass_CLASS_GUEST_VIEW = 33,
+  ChromeLegacyIpc_MessageClass_CLASS_MEDIA_PLAYER_DELEGATE = 34,
+  ChromeLegacyIpc_MessageClass_CLASS_EXTENSION_WORKER = 35,
+  ChromeLegacyIpc_MessageClass_CLASS_SUBRESOURCE_FILTER = 36,
+  ChromeLegacyIpc_MessageClass_CLASS_UNFREEZABLE_FRAME = 37,
+};
+
+class PERFETTO_EXPORT ChromeLegacyIpc : public ::protozero::CppMessageObj {
+ public:
+  using MessageClass = ChromeLegacyIpc_MessageClass;
+  static constexpr auto CLASS_UNSPECIFIED = ChromeLegacyIpc_MessageClass_CLASS_UNSPECIFIED;
+  static constexpr auto CLASS_AUTOMATION = ChromeLegacyIpc_MessageClass_CLASS_AUTOMATION;
+  static constexpr auto CLASS_FRAME = ChromeLegacyIpc_MessageClass_CLASS_FRAME;
+  static constexpr auto CLASS_PAGE = ChromeLegacyIpc_MessageClass_CLASS_PAGE;
+  static constexpr auto CLASS_VIEW = ChromeLegacyIpc_MessageClass_CLASS_VIEW;
+  static constexpr auto CLASS_WIDGET = ChromeLegacyIpc_MessageClass_CLASS_WIDGET;
+  static constexpr auto CLASS_INPUT = ChromeLegacyIpc_MessageClass_CLASS_INPUT;
+  static constexpr auto CLASS_TEST = ChromeLegacyIpc_MessageClass_CLASS_TEST;
+  static constexpr auto CLASS_WORKER = ChromeLegacyIpc_MessageClass_CLASS_WORKER;
+  static constexpr auto CLASS_NACL = ChromeLegacyIpc_MessageClass_CLASS_NACL;
+  static constexpr auto CLASS_GPU_CHANNEL = ChromeLegacyIpc_MessageClass_CLASS_GPU_CHANNEL;
+  static constexpr auto CLASS_MEDIA = ChromeLegacyIpc_MessageClass_CLASS_MEDIA;
+  static constexpr auto CLASS_PPAPI = ChromeLegacyIpc_MessageClass_CLASS_PPAPI;
+  static constexpr auto CLASS_CHROME = ChromeLegacyIpc_MessageClass_CLASS_CHROME;
+  static constexpr auto CLASS_DRAG = ChromeLegacyIpc_MessageClass_CLASS_DRAG;
+  static constexpr auto CLASS_PRINT = ChromeLegacyIpc_MessageClass_CLASS_PRINT;
+  static constexpr auto CLASS_EXTENSION = ChromeLegacyIpc_MessageClass_CLASS_EXTENSION;
+  static constexpr auto CLASS_TEXT_INPUT_CLIENT = ChromeLegacyIpc_MessageClass_CLASS_TEXT_INPUT_CLIENT;
+  static constexpr auto CLASS_BLINK_TEST = ChromeLegacyIpc_MessageClass_CLASS_BLINK_TEST;
+  static constexpr auto CLASS_ACCESSIBILITY = ChromeLegacyIpc_MessageClass_CLASS_ACCESSIBILITY;
+  static constexpr auto CLASS_PRERENDER = ChromeLegacyIpc_MessageClass_CLASS_PRERENDER;
+  static constexpr auto CLASS_CHROMOTING = ChromeLegacyIpc_MessageClass_CLASS_CHROMOTING;
+  static constexpr auto CLASS_BROWSER_PLUGIN = ChromeLegacyIpc_MessageClass_CLASS_BROWSER_PLUGIN;
+  static constexpr auto CLASS_ANDROID_WEB_VIEW = ChromeLegacyIpc_MessageClass_CLASS_ANDROID_WEB_VIEW;
+  static constexpr auto CLASS_NACL_HOST = ChromeLegacyIpc_MessageClass_CLASS_NACL_HOST;
+  static constexpr auto CLASS_ENCRYPTED_MEDIA = ChromeLegacyIpc_MessageClass_CLASS_ENCRYPTED_MEDIA;
+  static constexpr auto CLASS_CAST = ChromeLegacyIpc_MessageClass_CLASS_CAST;
+  static constexpr auto CLASS_GIN_JAVA_BRIDGE = ChromeLegacyIpc_MessageClass_CLASS_GIN_JAVA_BRIDGE;
+  static constexpr auto CLASS_CHROME_UTILITY_PRINTING = ChromeLegacyIpc_MessageClass_CLASS_CHROME_UTILITY_PRINTING;
+  static constexpr auto CLASS_OZONE_GPU = ChromeLegacyIpc_MessageClass_CLASS_OZONE_GPU;
+  static constexpr auto CLASS_WEB_TEST = ChromeLegacyIpc_MessageClass_CLASS_WEB_TEST;
+  static constexpr auto CLASS_NETWORK_HINTS = ChromeLegacyIpc_MessageClass_CLASS_NETWORK_HINTS;
+  static constexpr auto CLASS_EXTENSIONS_GUEST_VIEW = ChromeLegacyIpc_MessageClass_CLASS_EXTENSIONS_GUEST_VIEW;
+  static constexpr auto CLASS_GUEST_VIEW = ChromeLegacyIpc_MessageClass_CLASS_GUEST_VIEW;
+  static constexpr auto CLASS_MEDIA_PLAYER_DELEGATE = ChromeLegacyIpc_MessageClass_CLASS_MEDIA_PLAYER_DELEGATE;
+  static constexpr auto CLASS_EXTENSION_WORKER = ChromeLegacyIpc_MessageClass_CLASS_EXTENSION_WORKER;
+  static constexpr auto CLASS_SUBRESOURCE_FILTER = ChromeLegacyIpc_MessageClass_CLASS_SUBRESOURCE_FILTER;
+  static constexpr auto CLASS_UNFREEZABLE_FRAME = ChromeLegacyIpc_MessageClass_CLASS_UNFREEZABLE_FRAME;
+  static constexpr auto MessageClass_MIN = ChromeLegacyIpc_MessageClass_CLASS_UNSPECIFIED;
+  static constexpr auto MessageClass_MAX = ChromeLegacyIpc_MessageClass_CLASS_UNFREEZABLE_FRAME;
+  enum FieldNumbers {
+    kMessageClassFieldNumber = 1,
+    kMessageLineFieldNumber = 2,
+  };
+
+  ChromeLegacyIpc();
+  ~ChromeLegacyIpc() override;
+  ChromeLegacyIpc(ChromeLegacyIpc&&) noexcept;
+  ChromeLegacyIpc& operator=(ChromeLegacyIpc&&);
+  ChromeLegacyIpc(const ChromeLegacyIpc&);
+  ChromeLegacyIpc& operator=(const ChromeLegacyIpc&);
+  bool operator==(const ChromeLegacyIpc&) const;
+  bool operator!=(const ChromeLegacyIpc& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_message_class() const { return _has_field_[1]; }
+  ChromeLegacyIpc_MessageClass message_class() const { return message_class_; }
+  void set_message_class(ChromeLegacyIpc_MessageClass value) { message_class_ = value; _has_field_.set(1); }
+
+  bool has_message_line() const { return _has_field_[2]; }
+  uint32_t message_line() const { return message_line_; }
+  void set_message_line(uint32_t value) { message_line_ = value; _has_field_.set(2); }
+
+ private:
+  ChromeLegacyIpc_MessageClass message_class_{};
+  uint32_t message_line_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_LEGACY_IPC_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_message_pump.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_MESSAGE_PUMP_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_MESSAGE_PUMP_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class ChromeMessagePump;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT ChromeMessagePump : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kSentMessagesInQueueFieldNumber = 1,
+    kIoHandlerLocationIidFieldNumber = 2,
+  };
+
+  ChromeMessagePump();
+  ~ChromeMessagePump() override;
+  ChromeMessagePump(ChromeMessagePump&&) noexcept;
+  ChromeMessagePump& operator=(ChromeMessagePump&&);
+  ChromeMessagePump(const ChromeMessagePump&);
+  ChromeMessagePump& operator=(const ChromeMessagePump&);
+  bool operator==(const ChromeMessagePump&) const;
+  bool operator!=(const ChromeMessagePump& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_sent_messages_in_queue() const { return _has_field_[1]; }
+  bool sent_messages_in_queue() const { return sent_messages_in_queue_; }
+  void set_sent_messages_in_queue(bool value) { sent_messages_in_queue_ = value; _has_field_.set(1); }
+
+  bool has_io_handler_location_iid() const { return _has_field_[2]; }
+  uint64_t io_handler_location_iid() const { return io_handler_location_iid_; }
+  void set_io_handler_location_iid(uint64_t value) { io_handler_location_iid_ = value; _has_field_.set(2); }
+
+ private:
+  bool sent_messages_in_queue_{};
+  uint64_t io_handler_location_iid_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_MESSAGE_PUMP_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_mojo_event_info.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_MOJO_EVENT_INFO_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_MOJO_EVENT_INFO_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class ChromeMojoEventInfo;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT ChromeMojoEventInfo : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kWatcherNotifyInterfaceTagFieldNumber = 1,
+    kIpcHashFieldNumber = 2,
+    kMojoInterfaceTagFieldNumber = 3,
+  };
+
+  ChromeMojoEventInfo();
+  ~ChromeMojoEventInfo() override;
+  ChromeMojoEventInfo(ChromeMojoEventInfo&&) noexcept;
+  ChromeMojoEventInfo& operator=(ChromeMojoEventInfo&&);
+  ChromeMojoEventInfo(const ChromeMojoEventInfo&);
+  ChromeMojoEventInfo& operator=(const ChromeMojoEventInfo&);
+  bool operator==(const ChromeMojoEventInfo&) const;
+  bool operator!=(const ChromeMojoEventInfo& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_watcher_notify_interface_tag() const { return _has_field_[1]; }
+  const std::string& watcher_notify_interface_tag() const { return watcher_notify_interface_tag_; }
+  void set_watcher_notify_interface_tag(const std::string& value) { watcher_notify_interface_tag_ = value; _has_field_.set(1); }
+
+  bool has_ipc_hash() const { return _has_field_[2]; }
+  uint32_t ipc_hash() const { return ipc_hash_; }
+  void set_ipc_hash(uint32_t value) { ipc_hash_ = value; _has_field_.set(2); }
+
+  bool has_mojo_interface_tag() const { return _has_field_[3]; }
+  const std::string& mojo_interface_tag() const { return mojo_interface_tag_; }
+  void set_mojo_interface_tag(const std::string& value) { mojo_interface_tag_ = value; _has_field_.set(3); }
+
+ private:
+  std::string watcher_notify_interface_tag_{};
+  uint32_t ipc_hash_{};
+  std::string mojo_interface_tag_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<4> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_MOJO_EVENT_INFO_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_process_descriptor.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_PROCESS_DESCRIPTOR_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_PROCESS_DESCRIPTOR_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class ChromeProcessDescriptor;
+enum ChromeProcessDescriptor_ProcessType : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum ChromeProcessDescriptor_ProcessType : int {
+  ChromeProcessDescriptor_ProcessType_PROCESS_UNSPECIFIED = 0,
+  ChromeProcessDescriptor_ProcessType_PROCESS_BROWSER = 1,
+  ChromeProcessDescriptor_ProcessType_PROCESS_RENDERER = 2,
+  ChromeProcessDescriptor_ProcessType_PROCESS_UTILITY = 3,
+  ChromeProcessDescriptor_ProcessType_PROCESS_ZYGOTE = 4,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SANDBOX_HELPER = 5,
+  ChromeProcessDescriptor_ProcessType_PROCESS_GPU = 6,
+  ChromeProcessDescriptor_ProcessType_PROCESS_PPAPI_PLUGIN = 7,
+  ChromeProcessDescriptor_ProcessType_PROCESS_PPAPI_BROKER = 8,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_NETWORK = 9,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_TRACING = 10,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_STORAGE = 11,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_AUDIO = 12,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_DATA_DECODER = 13,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_UTIL_WIN = 14,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_PROXY_RESOLVER = 15,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_CDM = 16,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_VIDEO_CAPTURE = 17,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_UNZIPPER = 18,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_MIRRORING = 19,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_FILEPATCHER = 20,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_TTS = 21,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_PRINTING = 22,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_QUARANTINE = 23,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_CROS_LOCALSEARCH = 24,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_CROS_ASSISTANT_AUDIO_DECODER = 25,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_FILEUTIL = 26,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_PRINTCOMPOSITOR = 27,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_PAINTPREVIEW = 28,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_SPEECHRECOGNITION = 29,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_XRDEVICE = 30,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_READICON = 31,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_LANGUAGEDETECTION = 32,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_SHARING = 33,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_MEDIAPARSER = 34,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_QRCODEGENERATOR = 35,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_PROFILEIMPORT = 36,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_IME = 37,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_RECORDING = 38,
+  ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_SHAPEDETECTION = 39,
+};
+
+class PERFETTO_EXPORT ChromeProcessDescriptor : public ::protozero::CppMessageObj {
+ public:
+  using ProcessType = ChromeProcessDescriptor_ProcessType;
+  static constexpr auto PROCESS_UNSPECIFIED = ChromeProcessDescriptor_ProcessType_PROCESS_UNSPECIFIED;
+  static constexpr auto PROCESS_BROWSER = ChromeProcessDescriptor_ProcessType_PROCESS_BROWSER;
+  static constexpr auto PROCESS_RENDERER = ChromeProcessDescriptor_ProcessType_PROCESS_RENDERER;
+  static constexpr auto PROCESS_UTILITY = ChromeProcessDescriptor_ProcessType_PROCESS_UTILITY;
+  static constexpr auto PROCESS_ZYGOTE = ChromeProcessDescriptor_ProcessType_PROCESS_ZYGOTE;
+  static constexpr auto PROCESS_SANDBOX_HELPER = ChromeProcessDescriptor_ProcessType_PROCESS_SANDBOX_HELPER;
+  static constexpr auto PROCESS_GPU = ChromeProcessDescriptor_ProcessType_PROCESS_GPU;
+  static constexpr auto PROCESS_PPAPI_PLUGIN = ChromeProcessDescriptor_ProcessType_PROCESS_PPAPI_PLUGIN;
+  static constexpr auto PROCESS_PPAPI_BROKER = ChromeProcessDescriptor_ProcessType_PROCESS_PPAPI_BROKER;
+  static constexpr auto PROCESS_SERVICE_NETWORK = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_NETWORK;
+  static constexpr auto PROCESS_SERVICE_TRACING = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_TRACING;
+  static constexpr auto PROCESS_SERVICE_STORAGE = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_STORAGE;
+  static constexpr auto PROCESS_SERVICE_AUDIO = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_AUDIO;
+  static constexpr auto PROCESS_SERVICE_DATA_DECODER = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_DATA_DECODER;
+  static constexpr auto PROCESS_SERVICE_UTIL_WIN = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_UTIL_WIN;
+  static constexpr auto PROCESS_SERVICE_PROXY_RESOLVER = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_PROXY_RESOLVER;
+  static constexpr auto PROCESS_SERVICE_CDM = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_CDM;
+  static constexpr auto PROCESS_SERVICE_VIDEO_CAPTURE = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_VIDEO_CAPTURE;
+  static constexpr auto PROCESS_SERVICE_UNZIPPER = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_UNZIPPER;
+  static constexpr auto PROCESS_SERVICE_MIRRORING = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_MIRRORING;
+  static constexpr auto PROCESS_SERVICE_FILEPATCHER = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_FILEPATCHER;
+  static constexpr auto PROCESS_SERVICE_TTS = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_TTS;
+  static constexpr auto PROCESS_SERVICE_PRINTING = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_PRINTING;
+  static constexpr auto PROCESS_SERVICE_QUARANTINE = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_QUARANTINE;
+  static constexpr auto PROCESS_SERVICE_CROS_LOCALSEARCH = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_CROS_LOCALSEARCH;
+  static constexpr auto PROCESS_SERVICE_CROS_ASSISTANT_AUDIO_DECODER = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_CROS_ASSISTANT_AUDIO_DECODER;
+  static constexpr auto PROCESS_SERVICE_FILEUTIL = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_FILEUTIL;
+  static constexpr auto PROCESS_SERVICE_PRINTCOMPOSITOR = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_PRINTCOMPOSITOR;
+  static constexpr auto PROCESS_SERVICE_PAINTPREVIEW = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_PAINTPREVIEW;
+  static constexpr auto PROCESS_SERVICE_SPEECHRECOGNITION = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_SPEECHRECOGNITION;
+  static constexpr auto PROCESS_SERVICE_XRDEVICE = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_XRDEVICE;
+  static constexpr auto PROCESS_SERVICE_READICON = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_READICON;
+  static constexpr auto PROCESS_SERVICE_LANGUAGEDETECTION = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_LANGUAGEDETECTION;
+  static constexpr auto PROCESS_SERVICE_SHARING = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_SHARING;
+  static constexpr auto PROCESS_SERVICE_MEDIAPARSER = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_MEDIAPARSER;
+  static constexpr auto PROCESS_SERVICE_QRCODEGENERATOR = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_QRCODEGENERATOR;
+  static constexpr auto PROCESS_SERVICE_PROFILEIMPORT = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_PROFILEIMPORT;
+  static constexpr auto PROCESS_SERVICE_IME = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_IME;
+  static constexpr auto PROCESS_SERVICE_RECORDING = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_RECORDING;
+  static constexpr auto PROCESS_SERVICE_SHAPEDETECTION = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_SHAPEDETECTION;
+  static constexpr auto ProcessType_MIN = ChromeProcessDescriptor_ProcessType_PROCESS_UNSPECIFIED;
+  static constexpr auto ProcessType_MAX = ChromeProcessDescriptor_ProcessType_PROCESS_SERVICE_SHAPEDETECTION;
+  enum FieldNumbers {
+    kProcessTypeFieldNumber = 1,
+    kProcessPriorityFieldNumber = 2,
+    kLegacySortIndexFieldNumber = 3,
+    kHostAppPackageNameFieldNumber = 4,
+    kCrashTraceIdFieldNumber = 5,
+  };
+
+  ChromeProcessDescriptor();
+  ~ChromeProcessDescriptor() override;
+  ChromeProcessDescriptor(ChromeProcessDescriptor&&) noexcept;
+  ChromeProcessDescriptor& operator=(ChromeProcessDescriptor&&);
+  ChromeProcessDescriptor(const ChromeProcessDescriptor&);
+  ChromeProcessDescriptor& operator=(const ChromeProcessDescriptor&);
+  bool operator==(const ChromeProcessDescriptor&) const;
+  bool operator!=(const ChromeProcessDescriptor& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_process_type() const { return _has_field_[1]; }
+  ChromeProcessDescriptor_ProcessType process_type() const { return process_type_; }
+  void set_process_type(ChromeProcessDescriptor_ProcessType value) { process_type_ = value; _has_field_.set(1); }
+
+  bool has_process_priority() const { return _has_field_[2]; }
+  int32_t process_priority() const { return process_priority_; }
+  void set_process_priority(int32_t value) { process_priority_ = value; _has_field_.set(2); }
+
+  bool has_legacy_sort_index() const { return _has_field_[3]; }
+  int32_t legacy_sort_index() const { return legacy_sort_index_; }
+  void set_legacy_sort_index(int32_t value) { legacy_sort_index_ = value; _has_field_.set(3); }
+
+  bool has_host_app_package_name() const { return _has_field_[4]; }
+  const std::string& host_app_package_name() const { return host_app_package_name_; }
+  void set_host_app_package_name(const std::string& value) { host_app_package_name_ = value; _has_field_.set(4); }
+
+  bool has_crash_trace_id() const { return _has_field_[5]; }
+  uint64_t crash_trace_id() const { return crash_trace_id_; }
+  void set_crash_trace_id(uint64_t value) { crash_trace_id_ = value; _has_field_.set(5); }
+
+ private:
+  ChromeProcessDescriptor_ProcessType process_type_{};
+  int32_t process_priority_{};
+  int32_t legacy_sort_index_{};
+  std::string host_app_package_name_{};
+  uint64_t crash_trace_id_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<6> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_PROCESS_DESCRIPTOR_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_renderer_scheduler_state.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_RENDERER_SCHEDULER_STATE_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_RENDERER_SCHEDULER_STATE_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class ChromeRendererSchedulerState;
+enum ChromeRAILMode : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum ChromeRAILMode : int {
+  RAIL_MODE_NONE = 0,
+  RAIL_MODE_RESPONSE = 1,
+  RAIL_MODE_ANIMATION = 2,
+  RAIL_MODE_IDLE = 3,
+  RAIL_MODE_LOAD = 4,
+};
+
+class PERFETTO_EXPORT ChromeRendererSchedulerState : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kRailModeFieldNumber = 1,
+  };
+
+  ChromeRendererSchedulerState();
+  ~ChromeRendererSchedulerState() override;
+  ChromeRendererSchedulerState(ChromeRendererSchedulerState&&) noexcept;
+  ChromeRendererSchedulerState& operator=(ChromeRendererSchedulerState&&);
+  ChromeRendererSchedulerState(const ChromeRendererSchedulerState&);
+  ChromeRendererSchedulerState& operator=(const ChromeRendererSchedulerState&);
+  bool operator==(const ChromeRendererSchedulerState&) const;
+  bool operator!=(const ChromeRendererSchedulerState& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_rail_mode() const { return _has_field_[1]; }
+  ChromeRAILMode rail_mode() const { return rail_mode_; }
+  void set_rail_mode(ChromeRAILMode value) { rail_mode_ = value; _has_field_.set(1); }
+
+ private:
+  ChromeRAILMode rail_mode_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_RENDERER_SCHEDULER_STATE_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_thread_descriptor.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_THREAD_DESCRIPTOR_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_THREAD_DESCRIPTOR_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class ChromeThreadDescriptor;
+enum ChromeThreadDescriptor_ThreadType : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum ChromeThreadDescriptor_ThreadType : int {
+  ChromeThreadDescriptor_ThreadType_THREAD_UNSPECIFIED = 0,
+  ChromeThreadDescriptor_ThreadType_THREAD_MAIN = 1,
+  ChromeThreadDescriptor_ThreadType_THREAD_IO = 2,
+  ChromeThreadDescriptor_ThreadType_THREAD_POOL_BG_WORKER = 3,
+  ChromeThreadDescriptor_ThreadType_THREAD_POOL_FG_WORKER = 4,
+  ChromeThreadDescriptor_ThreadType_THREAD_POOL_FG_BLOCKING = 5,
+  ChromeThreadDescriptor_ThreadType_THREAD_POOL_BG_BLOCKING = 6,
+  ChromeThreadDescriptor_ThreadType_THREAD_POOL_SERVICE = 7,
+  ChromeThreadDescriptor_ThreadType_THREAD_COMPOSITOR = 8,
+  ChromeThreadDescriptor_ThreadType_THREAD_VIZ_COMPOSITOR = 9,
+  ChromeThreadDescriptor_ThreadType_THREAD_COMPOSITOR_WORKER = 10,
+  ChromeThreadDescriptor_ThreadType_THREAD_SERVICE_WORKER = 11,
+  ChromeThreadDescriptor_ThreadType_THREAD_NETWORK_SERVICE = 12,
+  ChromeThreadDescriptor_ThreadType_THREAD_CHILD_IO = 13,
+  ChromeThreadDescriptor_ThreadType_THREAD_BROWSER_IO = 14,
+  ChromeThreadDescriptor_ThreadType_THREAD_BROWSER_MAIN = 15,
+  ChromeThreadDescriptor_ThreadType_THREAD_RENDERER_MAIN = 16,
+  ChromeThreadDescriptor_ThreadType_THREAD_UTILITY_MAIN = 17,
+  ChromeThreadDescriptor_ThreadType_THREAD_GPU_MAIN = 18,
+  ChromeThreadDescriptor_ThreadType_THREAD_CACHE_BLOCKFILE = 19,
+  ChromeThreadDescriptor_ThreadType_THREAD_MEDIA = 20,
+  ChromeThreadDescriptor_ThreadType_THREAD_AUDIO_OUTPUTDEVICE = 21,
+  ChromeThreadDescriptor_ThreadType_THREAD_AUDIO_INPUTDEVICE = 22,
+  ChromeThreadDescriptor_ThreadType_THREAD_GPU_MEMORY = 23,
+  ChromeThreadDescriptor_ThreadType_THREAD_GPU_VSYNC = 24,
+  ChromeThreadDescriptor_ThreadType_THREAD_DXA_VIDEODECODER = 25,
+  ChromeThreadDescriptor_ThreadType_THREAD_BROWSER_WATCHDOG = 26,
+  ChromeThreadDescriptor_ThreadType_THREAD_WEBRTC_NETWORK = 27,
+  ChromeThreadDescriptor_ThreadType_THREAD_WINDOW_OWNER = 28,
+  ChromeThreadDescriptor_ThreadType_THREAD_WEBRTC_SIGNALING = 29,
+  ChromeThreadDescriptor_ThreadType_THREAD_WEBRTC_WORKER = 30,
+  ChromeThreadDescriptor_ThreadType_THREAD_PPAPI_MAIN = 31,
+  ChromeThreadDescriptor_ThreadType_THREAD_GPU_WATCHDOG = 32,
+  ChromeThreadDescriptor_ThreadType_THREAD_SWAPPER = 33,
+  ChromeThreadDescriptor_ThreadType_THREAD_GAMEPAD_POLLING = 34,
+  ChromeThreadDescriptor_ThreadType_THREAD_WEBCRYPTO = 35,
+  ChromeThreadDescriptor_ThreadType_THREAD_DATABASE = 36,
+  ChromeThreadDescriptor_ThreadType_THREAD_PROXYRESOLVER = 37,
+  ChromeThreadDescriptor_ThreadType_THREAD_DEVTOOLSADB = 38,
+  ChromeThreadDescriptor_ThreadType_THREAD_NETWORKCONFIGWATCHER = 39,
+  ChromeThreadDescriptor_ThreadType_THREAD_WASAPI_RENDER = 40,
+  ChromeThreadDescriptor_ThreadType_THREAD_MEMORY_INFRA = 50,
+  ChromeThreadDescriptor_ThreadType_THREAD_SAMPLING_PROFILER = 51,
+};
+
+class PERFETTO_EXPORT ChromeThreadDescriptor : public ::protozero::CppMessageObj {
+ public:
+  using ThreadType = ChromeThreadDescriptor_ThreadType;
+  static constexpr auto THREAD_UNSPECIFIED = ChromeThreadDescriptor_ThreadType_THREAD_UNSPECIFIED;
+  static constexpr auto THREAD_MAIN = ChromeThreadDescriptor_ThreadType_THREAD_MAIN;
+  static constexpr auto THREAD_IO = ChromeThreadDescriptor_ThreadType_THREAD_IO;
+  static constexpr auto THREAD_POOL_BG_WORKER = ChromeThreadDescriptor_ThreadType_THREAD_POOL_BG_WORKER;
+  static constexpr auto THREAD_POOL_FG_WORKER = ChromeThreadDescriptor_ThreadType_THREAD_POOL_FG_WORKER;
+  static constexpr auto THREAD_POOL_FG_BLOCKING = ChromeThreadDescriptor_ThreadType_THREAD_POOL_FG_BLOCKING;
+  static constexpr auto THREAD_POOL_BG_BLOCKING = ChromeThreadDescriptor_ThreadType_THREAD_POOL_BG_BLOCKING;
+  static constexpr auto THREAD_POOL_SERVICE = ChromeThreadDescriptor_ThreadType_THREAD_POOL_SERVICE;
+  static constexpr auto THREAD_COMPOSITOR = ChromeThreadDescriptor_ThreadType_THREAD_COMPOSITOR;
+  static constexpr auto THREAD_VIZ_COMPOSITOR = ChromeThreadDescriptor_ThreadType_THREAD_VIZ_COMPOSITOR;
+  static constexpr auto THREAD_COMPOSITOR_WORKER = ChromeThreadDescriptor_ThreadType_THREAD_COMPOSITOR_WORKER;
+  static constexpr auto THREAD_SERVICE_WORKER = ChromeThreadDescriptor_ThreadType_THREAD_SERVICE_WORKER;
+  static constexpr auto THREAD_NETWORK_SERVICE = ChromeThreadDescriptor_ThreadType_THREAD_NETWORK_SERVICE;
+  static constexpr auto THREAD_CHILD_IO = ChromeThreadDescriptor_ThreadType_THREAD_CHILD_IO;
+  static constexpr auto THREAD_BROWSER_IO = ChromeThreadDescriptor_ThreadType_THREAD_BROWSER_IO;
+  static constexpr auto THREAD_BROWSER_MAIN = ChromeThreadDescriptor_ThreadType_THREAD_BROWSER_MAIN;
+  static constexpr auto THREAD_RENDERER_MAIN = ChromeThreadDescriptor_ThreadType_THREAD_RENDERER_MAIN;
+  static constexpr auto THREAD_UTILITY_MAIN = ChromeThreadDescriptor_ThreadType_THREAD_UTILITY_MAIN;
+  static constexpr auto THREAD_GPU_MAIN = ChromeThreadDescriptor_ThreadType_THREAD_GPU_MAIN;
+  static constexpr auto THREAD_CACHE_BLOCKFILE = ChromeThreadDescriptor_ThreadType_THREAD_CACHE_BLOCKFILE;
+  static constexpr auto THREAD_MEDIA = ChromeThreadDescriptor_ThreadType_THREAD_MEDIA;
+  static constexpr auto THREAD_AUDIO_OUTPUTDEVICE = ChromeThreadDescriptor_ThreadType_THREAD_AUDIO_OUTPUTDEVICE;
+  static constexpr auto THREAD_AUDIO_INPUTDEVICE = ChromeThreadDescriptor_ThreadType_THREAD_AUDIO_INPUTDEVICE;
+  static constexpr auto THREAD_GPU_MEMORY = ChromeThreadDescriptor_ThreadType_THREAD_GPU_MEMORY;
+  static constexpr auto THREAD_GPU_VSYNC = ChromeThreadDescriptor_ThreadType_THREAD_GPU_VSYNC;
+  static constexpr auto THREAD_DXA_VIDEODECODER = ChromeThreadDescriptor_ThreadType_THREAD_DXA_VIDEODECODER;
+  static constexpr auto THREAD_BROWSER_WATCHDOG = ChromeThreadDescriptor_ThreadType_THREAD_BROWSER_WATCHDOG;
+  static constexpr auto THREAD_WEBRTC_NETWORK = ChromeThreadDescriptor_ThreadType_THREAD_WEBRTC_NETWORK;
+  static constexpr auto THREAD_WINDOW_OWNER = ChromeThreadDescriptor_ThreadType_THREAD_WINDOW_OWNER;
+  static constexpr auto THREAD_WEBRTC_SIGNALING = ChromeThreadDescriptor_ThreadType_THREAD_WEBRTC_SIGNALING;
+  static constexpr auto THREAD_WEBRTC_WORKER = ChromeThreadDescriptor_ThreadType_THREAD_WEBRTC_WORKER;
+  static constexpr auto THREAD_PPAPI_MAIN = ChromeThreadDescriptor_ThreadType_THREAD_PPAPI_MAIN;
+  static constexpr auto THREAD_GPU_WATCHDOG = ChromeThreadDescriptor_ThreadType_THREAD_GPU_WATCHDOG;
+  static constexpr auto THREAD_SWAPPER = ChromeThreadDescriptor_ThreadType_THREAD_SWAPPER;
+  static constexpr auto THREAD_GAMEPAD_POLLING = ChromeThreadDescriptor_ThreadType_THREAD_GAMEPAD_POLLING;
+  static constexpr auto THREAD_WEBCRYPTO = ChromeThreadDescriptor_ThreadType_THREAD_WEBCRYPTO;
+  static constexpr auto THREAD_DATABASE = ChromeThreadDescriptor_ThreadType_THREAD_DATABASE;
+  static constexpr auto THREAD_PROXYRESOLVER = ChromeThreadDescriptor_ThreadType_THREAD_PROXYRESOLVER;
+  static constexpr auto THREAD_DEVTOOLSADB = ChromeThreadDescriptor_ThreadType_THREAD_DEVTOOLSADB;
+  static constexpr auto THREAD_NETWORKCONFIGWATCHER = ChromeThreadDescriptor_ThreadType_THREAD_NETWORKCONFIGWATCHER;
+  static constexpr auto THREAD_WASAPI_RENDER = ChromeThreadDescriptor_ThreadType_THREAD_WASAPI_RENDER;
+  static constexpr auto THREAD_MEMORY_INFRA = ChromeThreadDescriptor_ThreadType_THREAD_MEMORY_INFRA;
+  static constexpr auto THREAD_SAMPLING_PROFILER = ChromeThreadDescriptor_ThreadType_THREAD_SAMPLING_PROFILER;
+  static constexpr auto ThreadType_MIN = ChromeThreadDescriptor_ThreadType_THREAD_UNSPECIFIED;
+  static constexpr auto ThreadType_MAX = ChromeThreadDescriptor_ThreadType_THREAD_SAMPLING_PROFILER;
+  enum FieldNumbers {
+    kThreadTypeFieldNumber = 1,
+    kLegacySortIndexFieldNumber = 2,
+  };
+
+  ChromeThreadDescriptor();
+  ~ChromeThreadDescriptor() override;
+  ChromeThreadDescriptor(ChromeThreadDescriptor&&) noexcept;
+  ChromeThreadDescriptor& operator=(ChromeThreadDescriptor&&);
+  ChromeThreadDescriptor(const ChromeThreadDescriptor&);
+  ChromeThreadDescriptor& operator=(const ChromeThreadDescriptor&);
+  bool operator==(const ChromeThreadDescriptor&) const;
+  bool operator!=(const ChromeThreadDescriptor& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_thread_type() const { return _has_field_[1]; }
+  ChromeThreadDescriptor_ThreadType thread_type() const { return thread_type_; }
+  void set_thread_type(ChromeThreadDescriptor_ThreadType value) { thread_type_ = value; _has_field_.set(1); }
+
+  bool has_legacy_sort_index() const { return _has_field_[2]; }
+  int32_t legacy_sort_index() const { return legacy_sort_index_; }
+  void set_legacy_sort_index(int32_t value) { legacy_sort_index_ = value; _has_field_.set(2); }
+
+ private:
+  ChromeThreadDescriptor_ThreadType thread_type_{};
+  int32_t legacy_sort_index_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_THREAD_DESCRIPTOR_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_user_event.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_USER_EVENT_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_USER_EVENT_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class ChromeUserEvent;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT ChromeUserEvent : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kActionFieldNumber = 1,
+    kActionHashFieldNumber = 2,
+  };
+
+  ChromeUserEvent();
+  ~ChromeUserEvent() override;
+  ChromeUserEvent(ChromeUserEvent&&) noexcept;
+  ChromeUserEvent& operator=(ChromeUserEvent&&);
+  ChromeUserEvent(const ChromeUserEvent&);
+  ChromeUserEvent& operator=(const ChromeUserEvent&);
+  bool operator==(const ChromeUserEvent&) const;
+  bool operator!=(const ChromeUserEvent& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_action() const { return _has_field_[1]; }
+  const std::string& action() const { return action_; }
+  void set_action(const std::string& value) { action_ = value; _has_field_.set(1); }
+
+  bool has_action_hash() const { return _has_field_[2]; }
+  uint64_t action_hash() const { return action_hash_; }
+  void set_action_hash(uint64_t value) { action_hash_ = value; _has_field_.set(2); }
+
+ private:
+  std::string action_{};
+  uint64_t action_hash_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_USER_EVENT_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/chrome_window_handle_event_info.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_WINDOW_HANDLE_EVENT_INFO_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_WINDOW_HANDLE_EVENT_INFO_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class ChromeWindowHandleEventInfo;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT ChromeWindowHandleEventInfo : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kDpiFieldNumber = 1,
+    kMessageIdFieldNumber = 2,
+    kHwndPtrFieldNumber = 3,
+  };
+
+  ChromeWindowHandleEventInfo();
+  ~ChromeWindowHandleEventInfo() override;
+  ChromeWindowHandleEventInfo(ChromeWindowHandleEventInfo&&) noexcept;
+  ChromeWindowHandleEventInfo& operator=(ChromeWindowHandleEventInfo&&);
+  ChromeWindowHandleEventInfo(const ChromeWindowHandleEventInfo&);
+  ChromeWindowHandleEventInfo& operator=(const ChromeWindowHandleEventInfo&);
+  bool operator==(const ChromeWindowHandleEventInfo&) const;
+  bool operator!=(const ChromeWindowHandleEventInfo& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_dpi() const { return _has_field_[1]; }
+  uint32_t dpi() const { return dpi_; }
+  void set_dpi(uint32_t value) { dpi_ = value; _has_field_.set(1); }
+
+  bool has_message_id() const { return _has_field_[2]; }
+  uint32_t message_id() const { return message_id_; }
+  void set_message_id(uint32_t value) { message_id_ = value; _has_field_.set(2); }
+
+  bool has_hwnd_ptr() const { return _has_field_[3]; }
+  uint64_t hwnd_ptr() const { return hwnd_ptr_; }
+  void set_hwnd_ptr(uint64_t value) { hwnd_ptr_ = value; _has_field_.set(3); }
+
+ private:
+  uint32_t dpi_{};
+  uint32_t message_id_{};
+  uint64_t hwnd_ptr_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<4> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_CHROME_WINDOW_HANDLE_EVENT_INFO_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/counter_descriptor.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_COUNTER_DESCRIPTOR_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_COUNTER_DESCRIPTOR_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class CounterDescriptor;
+enum CounterDescriptor_BuiltinCounterType : int;
+enum CounterDescriptor_Unit : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum CounterDescriptor_BuiltinCounterType : int {
+  CounterDescriptor_BuiltinCounterType_COUNTER_UNSPECIFIED = 0,
+  CounterDescriptor_BuiltinCounterType_COUNTER_THREAD_TIME_NS = 1,
+  CounterDescriptor_BuiltinCounterType_COUNTER_THREAD_INSTRUCTION_COUNT = 2,
+};
+enum CounterDescriptor_Unit : int {
+  CounterDescriptor_Unit_UNIT_UNSPECIFIED = 0,
+  CounterDescriptor_Unit_UNIT_TIME_NS = 1,
+  CounterDescriptor_Unit_UNIT_COUNT = 2,
+  CounterDescriptor_Unit_UNIT_SIZE_BYTES = 3,
+};
+
+class PERFETTO_EXPORT CounterDescriptor : public ::protozero::CppMessageObj {
+ public:
+  using BuiltinCounterType = CounterDescriptor_BuiltinCounterType;
+  static constexpr auto COUNTER_UNSPECIFIED = CounterDescriptor_BuiltinCounterType_COUNTER_UNSPECIFIED;
+  static constexpr auto COUNTER_THREAD_TIME_NS = CounterDescriptor_BuiltinCounterType_COUNTER_THREAD_TIME_NS;
+  static constexpr auto COUNTER_THREAD_INSTRUCTION_COUNT = CounterDescriptor_BuiltinCounterType_COUNTER_THREAD_INSTRUCTION_COUNT;
+  static constexpr auto BuiltinCounterType_MIN = CounterDescriptor_BuiltinCounterType_COUNTER_UNSPECIFIED;
+  static constexpr auto BuiltinCounterType_MAX = CounterDescriptor_BuiltinCounterType_COUNTER_THREAD_INSTRUCTION_COUNT;
+  using Unit = CounterDescriptor_Unit;
+  static constexpr auto UNIT_UNSPECIFIED = CounterDescriptor_Unit_UNIT_UNSPECIFIED;
+  static constexpr auto UNIT_TIME_NS = CounterDescriptor_Unit_UNIT_TIME_NS;
+  static constexpr auto UNIT_COUNT = CounterDescriptor_Unit_UNIT_COUNT;
+  static constexpr auto UNIT_SIZE_BYTES = CounterDescriptor_Unit_UNIT_SIZE_BYTES;
+  static constexpr auto Unit_MIN = CounterDescriptor_Unit_UNIT_UNSPECIFIED;
+  static constexpr auto Unit_MAX = CounterDescriptor_Unit_UNIT_SIZE_BYTES;
+  enum FieldNumbers {
+    kTypeFieldNumber = 1,
+    kCategoriesFieldNumber = 2,
+    kUnitFieldNumber = 3,
+    kUnitNameFieldNumber = 6,
+    kUnitMultiplierFieldNumber = 4,
+    kIsIncrementalFieldNumber = 5,
+  };
+
+  CounterDescriptor();
+  ~CounterDescriptor() override;
+  CounterDescriptor(CounterDescriptor&&) noexcept;
+  CounterDescriptor& operator=(CounterDescriptor&&);
+  CounterDescriptor(const CounterDescriptor&);
+  CounterDescriptor& operator=(const CounterDescriptor&);
+  bool operator==(const CounterDescriptor&) const;
+  bool operator!=(const CounterDescriptor& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_type() const { return _has_field_[1]; }
+  CounterDescriptor_BuiltinCounterType type() const { return type_; }
+  void set_type(CounterDescriptor_BuiltinCounterType value) { type_ = value; _has_field_.set(1); }
+
+  const std::vector<std::string>& categories() const { return categories_; }
+  std::vector<std::string>* mutable_categories() { return &categories_; }
+  int categories_size() const { return static_cast<int>(categories_.size()); }
+  void clear_categories() { categories_.clear(); }
+  void add_categories(std::string value) { categories_.emplace_back(value); }
+  std::string* add_categories() { categories_.emplace_back(); return &categories_.back(); }
+
+  bool has_unit() const { return _has_field_[3]; }
+  CounterDescriptor_Unit unit() const { return unit_; }
+  void set_unit(CounterDescriptor_Unit value) { unit_ = value; _has_field_.set(3); }
+
+  bool has_unit_name() const { return _has_field_[6]; }
+  const std::string& unit_name() const { return unit_name_; }
+  void set_unit_name(const std::string& value) { unit_name_ = value; _has_field_.set(6); }
+
+  bool has_unit_multiplier() const { return _has_field_[4]; }
+  int64_t unit_multiplier() const { return unit_multiplier_; }
+  void set_unit_multiplier(int64_t value) { unit_multiplier_ = value; _has_field_.set(4); }
+
+  bool has_is_incremental() const { return _has_field_[5]; }
+  bool is_incremental() const { return is_incremental_; }
+  void set_is_incremental(bool value) { is_incremental_ = value; _has_field_.set(5); }
+
+ private:
+  CounterDescriptor_BuiltinCounterType type_{};
+  std::vector<std::string> categories_;
+  CounterDescriptor_Unit unit_{};
+  std::string unit_name_{};
+  int64_t unit_multiplier_{};
+  bool is_incremental_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<7> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_COUNTER_DESCRIPTOR_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/debug_annotation.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_DEBUG_ANNOTATION_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_DEBUG_ANNOTATION_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class DebugAnnotationName;
+class DebugAnnotation;
+class DebugAnnotation_NestedValue;
+enum DebugAnnotation_NestedValue_NestedType : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum DebugAnnotation_NestedValue_NestedType : int {
+  DebugAnnotation_NestedValue_NestedType_UNSPECIFIED = 0,
+  DebugAnnotation_NestedValue_NestedType_DICT = 1,
+  DebugAnnotation_NestedValue_NestedType_ARRAY = 2,
+};
+
+class PERFETTO_EXPORT DebugAnnotationName : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kIidFieldNumber = 1,
+    kNameFieldNumber = 2,
+  };
+
+  DebugAnnotationName();
+  ~DebugAnnotationName() override;
+  DebugAnnotationName(DebugAnnotationName&&) noexcept;
+  DebugAnnotationName& operator=(DebugAnnotationName&&);
+  DebugAnnotationName(const DebugAnnotationName&);
+  DebugAnnotationName& operator=(const DebugAnnotationName&);
+  bool operator==(const DebugAnnotationName&) const;
+  bool operator!=(const DebugAnnotationName& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_iid() const { return _has_field_[1]; }
+  uint64_t iid() const { return iid_; }
+  void set_iid(uint64_t value) { iid_ = value; _has_field_.set(1); }
+
+  bool has_name() const { return _has_field_[2]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(2); }
+
+ private:
+  uint64_t iid_{};
+  std::string name_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT DebugAnnotation : public ::protozero::CppMessageObj {
+ public:
+  using NestedValue = DebugAnnotation_NestedValue;
+  enum FieldNumbers {
+    kNameIidFieldNumber = 1,
+    kNameFieldNumber = 10,
+    kBoolValueFieldNumber = 2,
+    kUintValueFieldNumber = 3,
+    kIntValueFieldNumber = 4,
+    kDoubleValueFieldNumber = 5,
+    kStringValueFieldNumber = 6,
+    kPointerValueFieldNumber = 7,
+    kNestedValueFieldNumber = 8,
+    kLegacyJsonValueFieldNumber = 9,
+    kDictEntriesFieldNumber = 11,
+    kArrayValuesFieldNumber = 12,
+  };
+
+  DebugAnnotation();
+  ~DebugAnnotation() override;
+  DebugAnnotation(DebugAnnotation&&) noexcept;
+  DebugAnnotation& operator=(DebugAnnotation&&);
+  DebugAnnotation(const DebugAnnotation&);
+  DebugAnnotation& operator=(const DebugAnnotation&);
+  bool operator==(const DebugAnnotation&) const;
+  bool operator!=(const DebugAnnotation& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_name_iid() const { return _has_field_[1]; }
+  uint64_t name_iid() const { return name_iid_; }
+  void set_name_iid(uint64_t value) { name_iid_ = value; _has_field_.set(1); }
+
+  bool has_name() const { return _has_field_[10]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(10); }
+
+  bool has_bool_value() const { return _has_field_[2]; }
+  bool bool_value() const { return bool_value_; }
+  void set_bool_value(bool value) { bool_value_ = value; _has_field_.set(2); }
+
+  bool has_uint_value() const { return _has_field_[3]; }
+  uint64_t uint_value() const { return uint_value_; }
+  void set_uint_value(uint64_t value) { uint_value_ = value; _has_field_.set(3); }
+
+  bool has_int_value() const { return _has_field_[4]; }
+  int64_t int_value() const { return int_value_; }
+  void set_int_value(int64_t value) { int_value_ = value; _has_field_.set(4); }
+
+  bool has_double_value() const { return _has_field_[5]; }
+  double double_value() const { return double_value_; }
+  void set_double_value(double value) { double_value_ = value; _has_field_.set(5); }
+
+  bool has_string_value() const { return _has_field_[6]; }
+  const std::string& string_value() const { return string_value_; }
+  void set_string_value(const std::string& value) { string_value_ = value; _has_field_.set(6); }
+
+  bool has_pointer_value() const { return _has_field_[7]; }
+  uint64_t pointer_value() const { return pointer_value_; }
+  void set_pointer_value(uint64_t value) { pointer_value_ = value; _has_field_.set(7); }
+
+  bool has_nested_value() const { return _has_field_[8]; }
+  const DebugAnnotation_NestedValue& nested_value() const { return *nested_value_; }
+  DebugAnnotation_NestedValue* mutable_nested_value() { _has_field_.set(8); return nested_value_.get(); }
+
+  bool has_legacy_json_value() const { return _has_field_[9]; }
+  const std::string& legacy_json_value() const { return legacy_json_value_; }
+  void set_legacy_json_value(const std::string& value) { legacy_json_value_ = value; _has_field_.set(9); }
+
+  const std::vector<DebugAnnotation>& dict_entries() const { return dict_entries_; }
+  std::vector<DebugAnnotation>* mutable_dict_entries() { return &dict_entries_; }
+  int dict_entries_size() const;
+  void clear_dict_entries();
+  DebugAnnotation* add_dict_entries();
+
+  const std::vector<DebugAnnotation>& array_values() const { return array_values_; }
+  std::vector<DebugAnnotation>* mutable_array_values() { return &array_values_; }
+  int array_values_size() const;
+  void clear_array_values();
+  DebugAnnotation* add_array_values();
+
+ private:
+  uint64_t name_iid_{};
+  std::string name_{};
+  bool bool_value_{};
+  uint64_t uint_value_{};
+  int64_t int_value_{};
+  double double_value_{};
+  std::string string_value_{};
+  uint64_t pointer_value_{};
+  ::protozero::CopyablePtr<DebugAnnotation_NestedValue> nested_value_;
+  std::string legacy_json_value_{};
+  std::vector<DebugAnnotation> dict_entries_;
+  std::vector<DebugAnnotation> array_values_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<13> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT DebugAnnotation_NestedValue : public ::protozero::CppMessageObj {
+ public:
+  using NestedType = DebugAnnotation_NestedValue_NestedType;
+  static constexpr auto UNSPECIFIED = DebugAnnotation_NestedValue_NestedType_UNSPECIFIED;
+  static constexpr auto DICT = DebugAnnotation_NestedValue_NestedType_DICT;
+  static constexpr auto ARRAY = DebugAnnotation_NestedValue_NestedType_ARRAY;
+  static constexpr auto NestedType_MIN = DebugAnnotation_NestedValue_NestedType_UNSPECIFIED;
+  static constexpr auto NestedType_MAX = DebugAnnotation_NestedValue_NestedType_ARRAY;
+  enum FieldNumbers {
+    kNestedTypeFieldNumber = 1,
+    kDictKeysFieldNumber = 2,
+    kDictValuesFieldNumber = 3,
+    kArrayValuesFieldNumber = 4,
+    kIntValueFieldNumber = 5,
+    kDoubleValueFieldNumber = 6,
+    kBoolValueFieldNumber = 7,
+    kStringValueFieldNumber = 8,
+  };
+
+  DebugAnnotation_NestedValue();
+  ~DebugAnnotation_NestedValue() override;
+  DebugAnnotation_NestedValue(DebugAnnotation_NestedValue&&) noexcept;
+  DebugAnnotation_NestedValue& operator=(DebugAnnotation_NestedValue&&);
+  DebugAnnotation_NestedValue(const DebugAnnotation_NestedValue&);
+  DebugAnnotation_NestedValue& operator=(const DebugAnnotation_NestedValue&);
+  bool operator==(const DebugAnnotation_NestedValue&) const;
+  bool operator!=(const DebugAnnotation_NestedValue& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_nested_type() const { return _has_field_[1]; }
+  DebugAnnotation_NestedValue_NestedType nested_type() const { return nested_type_; }
+  void set_nested_type(DebugAnnotation_NestedValue_NestedType value) { nested_type_ = value; _has_field_.set(1); }
+
+  const std::vector<std::string>& dict_keys() const { return dict_keys_; }
+  std::vector<std::string>* mutable_dict_keys() { return &dict_keys_; }
+  int dict_keys_size() const { return static_cast<int>(dict_keys_.size()); }
+  void clear_dict_keys() { dict_keys_.clear(); }
+  void add_dict_keys(std::string value) { dict_keys_.emplace_back(value); }
+  std::string* add_dict_keys() { dict_keys_.emplace_back(); return &dict_keys_.back(); }
+
+  const std::vector<DebugAnnotation_NestedValue>& dict_values() const { return dict_values_; }
+  std::vector<DebugAnnotation_NestedValue>* mutable_dict_values() { return &dict_values_; }
+  int dict_values_size() const;
+  void clear_dict_values();
+  DebugAnnotation_NestedValue* add_dict_values();
+
+  const std::vector<DebugAnnotation_NestedValue>& array_values() const { return array_values_; }
+  std::vector<DebugAnnotation_NestedValue>* mutable_array_values() { return &array_values_; }
+  int array_values_size() const;
+  void clear_array_values();
+  DebugAnnotation_NestedValue* add_array_values();
+
+  bool has_int_value() const { return _has_field_[5]; }
+  int64_t int_value() const { return int_value_; }
+  void set_int_value(int64_t value) { int_value_ = value; _has_field_.set(5); }
+
+  bool has_double_value() const { return _has_field_[6]; }
+  double double_value() const { return double_value_; }
+  void set_double_value(double value) { double_value_ = value; _has_field_.set(6); }
+
+  bool has_bool_value() const { return _has_field_[7]; }
+  bool bool_value() const { return bool_value_; }
+  void set_bool_value(bool value) { bool_value_ = value; _has_field_.set(7); }
+
+  bool has_string_value() const { return _has_field_[8]; }
+  const std::string& string_value() const { return string_value_; }
+  void set_string_value(const std::string& value) { string_value_ = value; _has_field_.set(8); }
+
+ private:
+  DebugAnnotation_NestedValue_NestedType nested_type_{};
+  std::vector<std::string> dict_keys_;
+  std::vector<DebugAnnotation_NestedValue> dict_values_;
+  std::vector<DebugAnnotation_NestedValue> array_values_;
+  int64_t int_value_{};
+  double double_value_{};
+  bool bool_value_{};
+  std::string string_value_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<9> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_DEBUG_ANNOTATION_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/log_message.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_LOG_MESSAGE_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_LOG_MESSAGE_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class LogMessageBody;
+class LogMessage;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT LogMessageBody : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kIidFieldNumber = 1,
+    kBodyFieldNumber = 2,
+  };
+
+  LogMessageBody();
+  ~LogMessageBody() override;
+  LogMessageBody(LogMessageBody&&) noexcept;
+  LogMessageBody& operator=(LogMessageBody&&);
+  LogMessageBody(const LogMessageBody&);
+  LogMessageBody& operator=(const LogMessageBody&);
+  bool operator==(const LogMessageBody&) const;
+  bool operator!=(const LogMessageBody& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_iid() const { return _has_field_[1]; }
+  uint64_t iid() const { return iid_; }
+  void set_iid(uint64_t value) { iid_ = value; _has_field_.set(1); }
+
+  bool has_body() const { return _has_field_[2]; }
+  const std::string& body() const { return body_; }
+  void set_body(const std::string& value) { body_ = value; _has_field_.set(2); }
+
+ private:
+  uint64_t iid_{};
+  std::string body_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT LogMessage : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kSourceLocationIidFieldNumber = 1,
+    kBodyIidFieldNumber = 2,
+  };
+
+  LogMessage();
+  ~LogMessage() override;
+  LogMessage(LogMessage&&) noexcept;
+  LogMessage& operator=(LogMessage&&);
+  LogMessage(const LogMessage&);
+  LogMessage& operator=(const LogMessage&);
+  bool operator==(const LogMessage&) const;
+  bool operator!=(const LogMessage& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_source_location_iid() const { return _has_field_[1]; }
+  uint64_t source_location_iid() const { return source_location_iid_; }
+  void set_source_location_iid(uint64_t value) { source_location_iid_ = value; _has_field_.set(1); }
+
+  bool has_body_iid() const { return _has_field_[2]; }
+  uint64_t body_iid() const { return body_iid_; }
+  void set_body_iid(uint64_t value) { body_iid_ = value; _has_field_.set(2); }
+
+ private:
+  uint64_t source_location_iid_{};
+  uint64_t body_iid_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_LOG_MESSAGE_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/process_descriptor.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_PROCESS_DESCRIPTOR_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_PROCESS_DESCRIPTOR_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class ProcessDescriptor;
+enum ProcessDescriptor_ChromeProcessType : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum ProcessDescriptor_ChromeProcessType : int {
+  ProcessDescriptor_ChromeProcessType_PROCESS_UNSPECIFIED = 0,
+  ProcessDescriptor_ChromeProcessType_PROCESS_BROWSER = 1,
+  ProcessDescriptor_ChromeProcessType_PROCESS_RENDERER = 2,
+  ProcessDescriptor_ChromeProcessType_PROCESS_UTILITY = 3,
+  ProcessDescriptor_ChromeProcessType_PROCESS_ZYGOTE = 4,
+  ProcessDescriptor_ChromeProcessType_PROCESS_SANDBOX_HELPER = 5,
+  ProcessDescriptor_ChromeProcessType_PROCESS_GPU = 6,
+  ProcessDescriptor_ChromeProcessType_PROCESS_PPAPI_PLUGIN = 7,
+  ProcessDescriptor_ChromeProcessType_PROCESS_PPAPI_BROKER = 8,
+};
+
+class PERFETTO_EXPORT ProcessDescriptor : public ::protozero::CppMessageObj {
+ public:
+  using ChromeProcessType = ProcessDescriptor_ChromeProcessType;
+  static constexpr auto PROCESS_UNSPECIFIED = ProcessDescriptor_ChromeProcessType_PROCESS_UNSPECIFIED;
+  static constexpr auto PROCESS_BROWSER = ProcessDescriptor_ChromeProcessType_PROCESS_BROWSER;
+  static constexpr auto PROCESS_RENDERER = ProcessDescriptor_ChromeProcessType_PROCESS_RENDERER;
+  static constexpr auto PROCESS_UTILITY = ProcessDescriptor_ChromeProcessType_PROCESS_UTILITY;
+  static constexpr auto PROCESS_ZYGOTE = ProcessDescriptor_ChromeProcessType_PROCESS_ZYGOTE;
+  static constexpr auto PROCESS_SANDBOX_HELPER = ProcessDescriptor_ChromeProcessType_PROCESS_SANDBOX_HELPER;
+  static constexpr auto PROCESS_GPU = ProcessDescriptor_ChromeProcessType_PROCESS_GPU;
+  static constexpr auto PROCESS_PPAPI_PLUGIN = ProcessDescriptor_ChromeProcessType_PROCESS_PPAPI_PLUGIN;
+  static constexpr auto PROCESS_PPAPI_BROKER = ProcessDescriptor_ChromeProcessType_PROCESS_PPAPI_BROKER;
+  static constexpr auto ChromeProcessType_MIN = ProcessDescriptor_ChromeProcessType_PROCESS_UNSPECIFIED;
+  static constexpr auto ChromeProcessType_MAX = ProcessDescriptor_ChromeProcessType_PROCESS_PPAPI_BROKER;
+  enum FieldNumbers {
+    kPidFieldNumber = 1,
+    kCmdlineFieldNumber = 2,
+    kProcessNameFieldNumber = 6,
+    kProcessPriorityFieldNumber = 5,
+    kStartTimestampNsFieldNumber = 7,
+    kChromeProcessTypeFieldNumber = 4,
+    kLegacySortIndexFieldNumber = 3,
+  };
+
+  ProcessDescriptor();
+  ~ProcessDescriptor() override;
+  ProcessDescriptor(ProcessDescriptor&&) noexcept;
+  ProcessDescriptor& operator=(ProcessDescriptor&&);
+  ProcessDescriptor(const ProcessDescriptor&);
+  ProcessDescriptor& operator=(const ProcessDescriptor&);
+  bool operator==(const ProcessDescriptor&) const;
+  bool operator!=(const ProcessDescriptor& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_pid() const { return _has_field_[1]; }
+  int32_t pid() const { return pid_; }
+  void set_pid(int32_t value) { pid_ = value; _has_field_.set(1); }
+
+  const std::vector<std::string>& cmdline() const { return cmdline_; }
+  std::vector<std::string>* mutable_cmdline() { return &cmdline_; }
+  int cmdline_size() const { return static_cast<int>(cmdline_.size()); }
+  void clear_cmdline() { cmdline_.clear(); }
+  void add_cmdline(std::string value) { cmdline_.emplace_back(value); }
+  std::string* add_cmdline() { cmdline_.emplace_back(); return &cmdline_.back(); }
+
+  bool has_process_name() const { return _has_field_[6]; }
+  const std::string& process_name() const { return process_name_; }
+  void set_process_name(const std::string& value) { process_name_ = value; _has_field_.set(6); }
+
+  bool has_process_priority() const { return _has_field_[5]; }
+  int32_t process_priority() const { return process_priority_; }
+  void set_process_priority(int32_t value) { process_priority_ = value; _has_field_.set(5); }
+
+  bool has_start_timestamp_ns() const { return _has_field_[7]; }
+  int64_t start_timestamp_ns() const { return start_timestamp_ns_; }
+  void set_start_timestamp_ns(int64_t value) { start_timestamp_ns_ = value; _has_field_.set(7); }
+
+  bool has_chrome_process_type() const { return _has_field_[4]; }
+  ProcessDescriptor_ChromeProcessType chrome_process_type() const { return chrome_process_type_; }
+  void set_chrome_process_type(ProcessDescriptor_ChromeProcessType value) { chrome_process_type_ = value; _has_field_.set(4); }
+
+  bool has_legacy_sort_index() const { return _has_field_[3]; }
+  int32_t legacy_sort_index() const { return legacy_sort_index_; }
+  void set_legacy_sort_index(int32_t value) { legacy_sort_index_ = value; _has_field_.set(3); }
+
+ private:
+  int32_t pid_{};
+  std::vector<std::string> cmdline_;
+  std::string process_name_{};
+  int32_t process_priority_{};
+  int64_t start_timestamp_ns_{};
+  ProcessDescriptor_ChromeProcessType chrome_process_type_{};
+  int32_t legacy_sort_index_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<8> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_PROCESS_DESCRIPTOR_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/source_location.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_SOURCE_LOCATION_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_SOURCE_LOCATION_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class SourceLocation;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT SourceLocation : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kIidFieldNumber = 1,
+    kFileNameFieldNumber = 2,
+    kFunctionNameFieldNumber = 3,
+    kLineNumberFieldNumber = 4,
+  };
+
+  SourceLocation();
+  ~SourceLocation() override;
+  SourceLocation(SourceLocation&&) noexcept;
+  SourceLocation& operator=(SourceLocation&&);
+  SourceLocation(const SourceLocation&);
+  SourceLocation& operator=(const SourceLocation&);
+  bool operator==(const SourceLocation&) const;
+  bool operator!=(const SourceLocation& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_iid() const { return _has_field_[1]; }
+  uint64_t iid() const { return iid_; }
+  void set_iid(uint64_t value) { iid_ = value; _has_field_.set(1); }
+
+  bool has_file_name() const { return _has_field_[2]; }
+  const std::string& file_name() const { return file_name_; }
+  void set_file_name(const std::string& value) { file_name_ = value; _has_field_.set(2); }
+
+  bool has_function_name() const { return _has_field_[3]; }
+  const std::string& function_name() const { return function_name_; }
+  void set_function_name(const std::string& value) { function_name_ = value; _has_field_.set(3); }
+
+  bool has_line_number() const { return _has_field_[4]; }
+  uint32_t line_number() const { return line_number_; }
+  void set_line_number(uint32_t value) { line_number_ = value; _has_field_.set(4); }
+
+ private:
+  uint64_t iid_{};
+  std::string file_name_{};
+  std::string function_name_{};
+  uint32_t line_number_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<5> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_SOURCE_LOCATION_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/task_execution.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_TASK_EXECUTION_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_TASK_EXECUTION_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class TaskExecution;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT TaskExecution : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kPostedFromIidFieldNumber = 1,
+  };
+
+  TaskExecution();
+  ~TaskExecution() override;
+  TaskExecution(TaskExecution&&) noexcept;
+  TaskExecution& operator=(TaskExecution&&);
+  TaskExecution(const TaskExecution&);
+  TaskExecution& operator=(const TaskExecution&);
+  bool operator==(const TaskExecution&) const;
+  bool operator!=(const TaskExecution& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_posted_from_iid() const { return _has_field_[1]; }
+  uint64_t posted_from_iid() const { return posted_from_iid_; }
+  void set_posted_from_iid(uint64_t value) { posted_from_iid_ = value; _has_field_.set(1); }
+
+ private:
+  uint64_t posted_from_iid_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_TASK_EXECUTION_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/thread_descriptor.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_THREAD_DESCRIPTOR_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_THREAD_DESCRIPTOR_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class ThreadDescriptor;
+enum ThreadDescriptor_ChromeThreadType : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum ThreadDescriptor_ChromeThreadType : int {
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_UNSPECIFIED = 0,
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_MAIN = 1,
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_IO = 2,
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_BG_WORKER = 3,
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_FG_WORKER = 4,
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_FB_BLOCKING = 5,
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_BG_BLOCKING = 6,
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_SERVICE = 7,
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_COMPOSITOR = 8,
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_VIZ_COMPOSITOR = 9,
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_COMPOSITOR_WORKER = 10,
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_SERVICE_WORKER = 11,
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_MEMORY_INFRA = 50,
+  ThreadDescriptor_ChromeThreadType_CHROME_THREAD_SAMPLING_PROFILER = 51,
+};
+
+class PERFETTO_EXPORT ThreadDescriptor : public ::protozero::CppMessageObj {
+ public:
+  using ChromeThreadType = ThreadDescriptor_ChromeThreadType;
+  static constexpr auto CHROME_THREAD_UNSPECIFIED = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_UNSPECIFIED;
+  static constexpr auto CHROME_THREAD_MAIN = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_MAIN;
+  static constexpr auto CHROME_THREAD_IO = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_IO;
+  static constexpr auto CHROME_THREAD_POOL_BG_WORKER = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_BG_WORKER;
+  static constexpr auto CHROME_THREAD_POOL_FG_WORKER = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_FG_WORKER;
+  static constexpr auto CHROME_THREAD_POOL_FB_BLOCKING = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_FB_BLOCKING;
+  static constexpr auto CHROME_THREAD_POOL_BG_BLOCKING = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_BG_BLOCKING;
+  static constexpr auto CHROME_THREAD_POOL_SERVICE = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_POOL_SERVICE;
+  static constexpr auto CHROME_THREAD_COMPOSITOR = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_COMPOSITOR;
+  static constexpr auto CHROME_THREAD_VIZ_COMPOSITOR = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_VIZ_COMPOSITOR;
+  static constexpr auto CHROME_THREAD_COMPOSITOR_WORKER = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_COMPOSITOR_WORKER;
+  static constexpr auto CHROME_THREAD_SERVICE_WORKER = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_SERVICE_WORKER;
+  static constexpr auto CHROME_THREAD_MEMORY_INFRA = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_MEMORY_INFRA;
+  static constexpr auto CHROME_THREAD_SAMPLING_PROFILER = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_SAMPLING_PROFILER;
+  static constexpr auto ChromeThreadType_MIN = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_UNSPECIFIED;
+  static constexpr auto ChromeThreadType_MAX = ThreadDescriptor_ChromeThreadType_CHROME_THREAD_SAMPLING_PROFILER;
+  enum FieldNumbers {
+    kPidFieldNumber = 1,
+    kTidFieldNumber = 2,
+    kThreadNameFieldNumber = 5,
+    kChromeThreadTypeFieldNumber = 4,
+    kReferenceTimestampUsFieldNumber = 6,
+    kReferenceThreadTimeUsFieldNumber = 7,
+    kReferenceThreadInstructionCountFieldNumber = 8,
+    kLegacySortIndexFieldNumber = 3,
+  };
+
+  ThreadDescriptor();
+  ~ThreadDescriptor() override;
+  ThreadDescriptor(ThreadDescriptor&&) noexcept;
+  ThreadDescriptor& operator=(ThreadDescriptor&&);
+  ThreadDescriptor(const ThreadDescriptor&);
+  ThreadDescriptor& operator=(const ThreadDescriptor&);
+  bool operator==(const ThreadDescriptor&) const;
+  bool operator!=(const ThreadDescriptor& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_pid() const { return _has_field_[1]; }
+  int32_t pid() const { return pid_; }
+  void set_pid(int32_t value) { pid_ = value; _has_field_.set(1); }
+
+  bool has_tid() const { return _has_field_[2]; }
+  int32_t tid() const { return tid_; }
+  void set_tid(int32_t value) { tid_ = value; _has_field_.set(2); }
+
+  bool has_thread_name() const { return _has_field_[5]; }
+  const std::string& thread_name() const { return thread_name_; }
+  void set_thread_name(const std::string& value) { thread_name_ = value; _has_field_.set(5); }
+
+  bool has_chrome_thread_type() const { return _has_field_[4]; }
+  ThreadDescriptor_ChromeThreadType chrome_thread_type() const { return chrome_thread_type_; }
+  void set_chrome_thread_type(ThreadDescriptor_ChromeThreadType value) { chrome_thread_type_ = value; _has_field_.set(4); }
+
+  bool has_reference_timestamp_us() const { return _has_field_[6]; }
+  int64_t reference_timestamp_us() const { return reference_timestamp_us_; }
+  void set_reference_timestamp_us(int64_t value) { reference_timestamp_us_ = value; _has_field_.set(6); }
+
+  bool has_reference_thread_time_us() const { return _has_field_[7]; }
+  int64_t reference_thread_time_us() const { return reference_thread_time_us_; }
+  void set_reference_thread_time_us(int64_t value) { reference_thread_time_us_ = value; _has_field_.set(7); }
+
+  bool has_reference_thread_instruction_count() const { return _has_field_[8]; }
+  int64_t reference_thread_instruction_count() const { return reference_thread_instruction_count_; }
+  void set_reference_thread_instruction_count(int64_t value) { reference_thread_instruction_count_ = value; _has_field_.set(8); }
+
+  bool has_legacy_sort_index() const { return _has_field_[3]; }
+  int32_t legacy_sort_index() const { return legacy_sort_index_; }
+  void set_legacy_sort_index(int32_t value) { legacy_sort_index_ = value; _has_field_.set(3); }
+
+ private:
+  int32_t pid_{};
+  int32_t tid_{};
+  std::string thread_name_{};
+  ThreadDescriptor_ChromeThreadType chrome_thread_type_{};
+  int64_t reference_timestamp_us_{};
+  int64_t reference_thread_time_us_{};
+  int64_t reference_thread_instruction_count_{};
+  int32_t legacy_sort_index_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<9> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_THREAD_DESCRIPTOR_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/track_descriptor.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_TRACK_DESCRIPTOR_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_TRACK_DESCRIPTOR_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class TrackDescriptor;
+class CounterDescriptor;
+class ChromeThreadDescriptor;
+class ThreadDescriptor;
+class ChromeProcessDescriptor;
+class ProcessDescriptor;
+enum CounterDescriptor_BuiltinCounterType : int;
+enum CounterDescriptor_Unit : int;
+enum ChromeThreadDescriptor_ThreadType : int;
+enum ThreadDescriptor_ChromeThreadType : int;
+enum ChromeProcessDescriptor_ProcessType : int;
+enum ProcessDescriptor_ChromeProcessType : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT TrackDescriptor : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kUuidFieldNumber = 1,
+    kParentUuidFieldNumber = 5,
+    kNameFieldNumber = 2,
+    kProcessFieldNumber = 3,
+    kChromeProcessFieldNumber = 6,
+    kThreadFieldNumber = 4,
+    kChromeThreadFieldNumber = 7,
+    kCounterFieldNumber = 8,
+  };
+
+  TrackDescriptor();
+  ~TrackDescriptor() override;
+  TrackDescriptor(TrackDescriptor&&) noexcept;
+  TrackDescriptor& operator=(TrackDescriptor&&);
+  TrackDescriptor(const TrackDescriptor&);
+  TrackDescriptor& operator=(const TrackDescriptor&);
+  bool operator==(const TrackDescriptor&) const;
+  bool operator!=(const TrackDescriptor& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_uuid() const { return _has_field_[1]; }
+  uint64_t uuid() const { return uuid_; }
+  void set_uuid(uint64_t value) { uuid_ = value; _has_field_.set(1); }
+
+  bool has_parent_uuid() const { return _has_field_[5]; }
+  uint64_t parent_uuid() const { return parent_uuid_; }
+  void set_parent_uuid(uint64_t value) { parent_uuid_ = value; _has_field_.set(5); }
+
+  bool has_name() const { return _has_field_[2]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(2); }
+
+  bool has_process() const { return _has_field_[3]; }
+  const ProcessDescriptor& process() const { return *process_; }
+  ProcessDescriptor* mutable_process() { _has_field_.set(3); return process_.get(); }
+
+  bool has_chrome_process() const { return _has_field_[6]; }
+  const ChromeProcessDescriptor& chrome_process() const { return *chrome_process_; }
+  ChromeProcessDescriptor* mutable_chrome_process() { _has_field_.set(6); return chrome_process_.get(); }
+
+  bool has_thread() const { return _has_field_[4]; }
+  const ThreadDescriptor& thread() const { return *thread_; }
+  ThreadDescriptor* mutable_thread() { _has_field_.set(4); return thread_.get(); }
+
+  bool has_chrome_thread() const { return _has_field_[7]; }
+  const ChromeThreadDescriptor& chrome_thread() const { return *chrome_thread_; }
+  ChromeThreadDescriptor* mutable_chrome_thread() { _has_field_.set(7); return chrome_thread_.get(); }
+
+  bool has_counter() const { return _has_field_[8]; }
+  const CounterDescriptor& counter() const { return *counter_; }
+  CounterDescriptor* mutable_counter() { _has_field_.set(8); return counter_.get(); }
+
+ private:
+  uint64_t uuid_{};
+  uint64_t parent_uuid_{};
+  std::string name_{};
+  ::protozero::CopyablePtr<ProcessDescriptor> process_;
+  ::protozero::CopyablePtr<ChromeProcessDescriptor> chrome_process_;
+  ::protozero::CopyablePtr<ThreadDescriptor> thread_;
+  ::protozero::CopyablePtr<ChromeThreadDescriptor> chrome_thread_;
+  ::protozero::CopyablePtr<CounterDescriptor> counter_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<9> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_TRACK_DESCRIPTOR_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/trace/track_event/track_event.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_TRACK_EVENT_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_TRACK_EVENT_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class EventName;
+class EventCategory;
+class TrackEventDefaults;
+class TrackEvent;
+class TrackEvent_LegacyEvent;
+class ChromeMojoEventInfo;
+class ChromeMessagePump;
+class SourceLocation;
+class ChromeContentSettingsEventInfo;
+class ChromeWindowHandleEventInfo;
+class ChromeRendererSchedulerState;
+class ChromeApplicationStateInfo;
+class ChromeFrameReporter;
+class ChromeLatencyInfo;
+class ChromeLatencyInfo_ComponentInfo;
+class ChromeHistogramSample;
+class ChromeLegacyIpc;
+class ChromeKeyedService;
+class ChromeUserEvent;
+class ChromeCompositorSchedulerState;
+class CompositorTimingHistory;
+class BeginFrameSourceState;
+class BeginFrameArgs;
+class BeginFrameObserverState;
+class BeginImplFrameArgs;
+class BeginImplFrameArgs_TimestampsInUs;
+class ChromeCompositorStateMachine;
+class ChromeCompositorStateMachine_MinorState;
+class ChromeCompositorStateMachine_MajorState;
+class LogMessage;
+class TaskExecution;
+class DebugAnnotation;
+class DebugAnnotation_NestedValue;
+enum TrackEvent_Type : int;
+enum TrackEvent_LegacyEvent_FlowDirection : int;
+enum TrackEvent_LegacyEvent_InstantEventScope : int;
+enum ChromeRAILMode : int;
+enum ChromeApplicationStateInfo_ChromeApplicationState : int;
+enum ChromeFrameReporter_State : int;
+enum ChromeFrameReporter_FrameDropReason : int;
+enum ChromeFrameReporter_ScrollState : int;
+enum ChromeLatencyInfo_Step : int;
+enum ChromeLatencyInfo_LatencyComponentType : int;
+enum ChromeLegacyIpc_MessageClass : int;
+enum ChromeCompositorSchedulerState_BeginImplFrameDeadlineMode : int;
+enum ChromeCompositorSchedulerAction : int;
+enum BeginFrameArgs_BeginFrameArgsType : int;
+enum BeginImplFrameArgs_State : int;
+enum ChromeCompositorStateMachine_MinorState_TreePriority : int;
+enum ChromeCompositorStateMachine_MinorState_ScrollHandlerState : int;
+enum ChromeCompositorStateMachine_MajorState_BeginImplFrameState : int;
+enum ChromeCompositorStateMachine_MajorState_BeginMainFrameState : int;
+enum ChromeCompositorStateMachine_MajorState_LayerTreeFrameSinkState : int;
+enum ChromeCompositorStateMachine_MajorState_ForcedRedrawOnTimeoutState : int;
+enum DebugAnnotation_NestedValue_NestedType : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum TrackEvent_Type : int {
+  TrackEvent_Type_TYPE_UNSPECIFIED = 0,
+  TrackEvent_Type_TYPE_SLICE_BEGIN = 1,
+  TrackEvent_Type_TYPE_SLICE_END = 2,
+  TrackEvent_Type_TYPE_INSTANT = 3,
+  TrackEvent_Type_TYPE_COUNTER = 4,
+};
+enum TrackEvent_LegacyEvent_FlowDirection : int {
+  TrackEvent_LegacyEvent_FlowDirection_FLOW_UNSPECIFIED = 0,
+  TrackEvent_LegacyEvent_FlowDirection_FLOW_IN = 1,
+  TrackEvent_LegacyEvent_FlowDirection_FLOW_OUT = 2,
+  TrackEvent_LegacyEvent_FlowDirection_FLOW_INOUT = 3,
+};
+enum TrackEvent_LegacyEvent_InstantEventScope : int {
+  TrackEvent_LegacyEvent_InstantEventScope_SCOPE_UNSPECIFIED = 0,
+  TrackEvent_LegacyEvent_InstantEventScope_SCOPE_GLOBAL = 1,
+  TrackEvent_LegacyEvent_InstantEventScope_SCOPE_PROCESS = 2,
+  TrackEvent_LegacyEvent_InstantEventScope_SCOPE_THREAD = 3,
+};
+
+class PERFETTO_EXPORT EventName : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kIidFieldNumber = 1,
+    kNameFieldNumber = 2,
+  };
+
+  EventName();
+  ~EventName() override;
+  EventName(EventName&&) noexcept;
+  EventName& operator=(EventName&&);
+  EventName(const EventName&);
+  EventName& operator=(const EventName&);
+  bool operator==(const EventName&) const;
+  bool operator!=(const EventName& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_iid() const { return _has_field_[1]; }
+  uint64_t iid() const { return iid_; }
+  void set_iid(uint64_t value) { iid_ = value; _has_field_.set(1); }
+
+  bool has_name() const { return _has_field_[2]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(2); }
+
+ private:
+  uint64_t iid_{};
+  std::string name_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT EventCategory : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kIidFieldNumber = 1,
+    kNameFieldNumber = 2,
+  };
+
+  EventCategory();
+  ~EventCategory() override;
+  EventCategory(EventCategory&&) noexcept;
+  EventCategory& operator=(EventCategory&&);
+  EventCategory(const EventCategory&);
+  EventCategory& operator=(const EventCategory&);
+  bool operator==(const EventCategory&) const;
+  bool operator!=(const EventCategory& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_iid() const { return _has_field_[1]; }
+  uint64_t iid() const { return iid_; }
+  void set_iid(uint64_t value) { iid_ = value; _has_field_.set(1); }
+
+  bool has_name() const { return _has_field_[2]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(2); }
+
+ private:
+  uint64_t iid_{};
+  std::string name_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT TrackEventDefaults : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kTrackUuidFieldNumber = 11,
+    kExtraCounterTrackUuidsFieldNumber = 31,
+    kExtraDoubleCounterTrackUuidsFieldNumber = 45,
+  };
+
+  TrackEventDefaults();
+  ~TrackEventDefaults() override;
+  TrackEventDefaults(TrackEventDefaults&&) noexcept;
+  TrackEventDefaults& operator=(TrackEventDefaults&&);
+  TrackEventDefaults(const TrackEventDefaults&);
+  TrackEventDefaults& operator=(const TrackEventDefaults&);
+  bool operator==(const TrackEventDefaults&) const;
+  bool operator!=(const TrackEventDefaults& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_track_uuid() const { return _has_field_[11]; }
+  uint64_t track_uuid() const { return track_uuid_; }
+  void set_track_uuid(uint64_t value) { track_uuid_ = value; _has_field_.set(11); }
+
+  const std::vector<uint64_t>& extra_counter_track_uuids() const { return extra_counter_track_uuids_; }
+  std::vector<uint64_t>* mutable_extra_counter_track_uuids() { return &extra_counter_track_uuids_; }
+  int extra_counter_track_uuids_size() const { return static_cast<int>(extra_counter_track_uuids_.size()); }
+  void clear_extra_counter_track_uuids() { extra_counter_track_uuids_.clear(); }
+  void add_extra_counter_track_uuids(uint64_t value) { extra_counter_track_uuids_.emplace_back(value); }
+  uint64_t* add_extra_counter_track_uuids() { extra_counter_track_uuids_.emplace_back(); return &extra_counter_track_uuids_.back(); }
+
+  const std::vector<uint64_t>& extra_double_counter_track_uuids() const { return extra_double_counter_track_uuids_; }
+  std::vector<uint64_t>* mutable_extra_double_counter_track_uuids() { return &extra_double_counter_track_uuids_; }
+  int extra_double_counter_track_uuids_size() const { return static_cast<int>(extra_double_counter_track_uuids_.size()); }
+  void clear_extra_double_counter_track_uuids() { extra_double_counter_track_uuids_.clear(); }
+  void add_extra_double_counter_track_uuids(uint64_t value) { extra_double_counter_track_uuids_.emplace_back(value); }
+  uint64_t* add_extra_double_counter_track_uuids() { extra_double_counter_track_uuids_.emplace_back(); return &extra_double_counter_track_uuids_.back(); }
+
+ private:
+  uint64_t track_uuid_{};
+  std::vector<uint64_t> extra_counter_track_uuids_;
+  std::vector<uint64_t> extra_double_counter_track_uuids_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<46> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT TrackEvent : public ::protozero::CppMessageObj {
+ public:
+  using LegacyEvent = TrackEvent_LegacyEvent;
+  using Type = TrackEvent_Type;
+  static constexpr auto TYPE_UNSPECIFIED = TrackEvent_Type_TYPE_UNSPECIFIED;
+  static constexpr auto TYPE_SLICE_BEGIN = TrackEvent_Type_TYPE_SLICE_BEGIN;
+  static constexpr auto TYPE_SLICE_END = TrackEvent_Type_TYPE_SLICE_END;
+  static constexpr auto TYPE_INSTANT = TrackEvent_Type_TYPE_INSTANT;
+  static constexpr auto TYPE_COUNTER = TrackEvent_Type_TYPE_COUNTER;
+  static constexpr auto Type_MIN = TrackEvent_Type_TYPE_UNSPECIFIED;
+  static constexpr auto Type_MAX = TrackEvent_Type_TYPE_COUNTER;
+  enum FieldNumbers {
+    kCategoryIidsFieldNumber = 3,
+    kCategoriesFieldNumber = 22,
+    kNameIidFieldNumber = 10,
+    kNameFieldNumber = 23,
+    kTypeFieldNumber = 9,
+    kTrackUuidFieldNumber = 11,
+    kCounterValueFieldNumber = 30,
+    kDoubleCounterValueFieldNumber = 44,
+    kExtraCounterTrackUuidsFieldNumber = 31,
+    kExtraCounterValuesFieldNumber = 12,
+    kExtraDoubleCounterTrackUuidsFieldNumber = 45,
+    kExtraDoubleCounterValuesFieldNumber = 46,
+    kFlowIdsFieldNumber = 36,
+    kTerminatingFlowIdsFieldNumber = 42,
+    kDebugAnnotationsFieldNumber = 4,
+    kTaskExecutionFieldNumber = 5,
+    kLogMessageFieldNumber = 21,
+    kCcSchedulerStateFieldNumber = 24,
+    kChromeUserEventFieldNumber = 25,
+    kChromeKeyedServiceFieldNumber = 26,
+    kChromeLegacyIpcFieldNumber = 27,
+    kChromeHistogramSampleFieldNumber = 28,
+    kChromeLatencyInfoFieldNumber = 29,
+    kChromeFrameReporterFieldNumber = 32,
+    kChromeApplicationStateInfoFieldNumber = 39,
+    kChromeRendererSchedulerStateFieldNumber = 40,
+    kChromeWindowHandleEventInfoFieldNumber = 41,
+    kChromeContentSettingsEventInfoFieldNumber = 43,
+    kSourceLocationFieldNumber = 33,
+    kSourceLocationIidFieldNumber = 34,
+    kChromeMessagePumpFieldNumber = 35,
+    kChromeMojoEventInfoFieldNumber = 38,
+    kTimestampDeltaUsFieldNumber = 1,
+    kTimestampAbsoluteUsFieldNumber = 16,
+    kThreadTimeDeltaUsFieldNumber = 2,
+    kThreadTimeAbsoluteUsFieldNumber = 17,
+    kThreadInstructionCountDeltaFieldNumber = 8,
+    kThreadInstructionCountAbsoluteFieldNumber = 20,
+    kLegacyEventFieldNumber = 6,
+  };
+
+  TrackEvent();
+  ~TrackEvent() override;
+  TrackEvent(TrackEvent&&) noexcept;
+  TrackEvent& operator=(TrackEvent&&);
+  TrackEvent(const TrackEvent&);
+  TrackEvent& operator=(const TrackEvent&);
+  bool operator==(const TrackEvent&) const;
+  bool operator!=(const TrackEvent& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  const std::vector<uint64_t>& category_iids() const { return category_iids_; }
+  std::vector<uint64_t>* mutable_category_iids() { return &category_iids_; }
+  int category_iids_size() const { return static_cast<int>(category_iids_.size()); }
+  void clear_category_iids() { category_iids_.clear(); }
+  void add_category_iids(uint64_t value) { category_iids_.emplace_back(value); }
+  uint64_t* add_category_iids() { category_iids_.emplace_back(); return &category_iids_.back(); }
+
+  const std::vector<std::string>& categories() const { return categories_; }
+  std::vector<std::string>* mutable_categories() { return &categories_; }
+  int categories_size() const { return static_cast<int>(categories_.size()); }
+  void clear_categories() { categories_.clear(); }
+  void add_categories(std::string value) { categories_.emplace_back(value); }
+  std::string* add_categories() { categories_.emplace_back(); return &categories_.back(); }
+
+  bool has_name_iid() const { return _has_field_[10]; }
+  uint64_t name_iid() const { return name_iid_; }
+  void set_name_iid(uint64_t value) { name_iid_ = value; _has_field_.set(10); }
+
+  bool has_name() const { return _has_field_[23]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(23); }
+
+  bool has_type() const { return _has_field_[9]; }
+  TrackEvent_Type type() const { return type_; }
+  void set_type(TrackEvent_Type value) { type_ = value; _has_field_.set(9); }
+
+  bool has_track_uuid() const { return _has_field_[11]; }
+  uint64_t track_uuid() const { return track_uuid_; }
+  void set_track_uuid(uint64_t value) { track_uuid_ = value; _has_field_.set(11); }
+
+  bool has_counter_value() const { return _has_field_[30]; }
+  int64_t counter_value() const { return counter_value_; }
+  void set_counter_value(int64_t value) { counter_value_ = value; _has_field_.set(30); }
+
+  bool has_double_counter_value() const { return _has_field_[44]; }
+  double double_counter_value() const { return double_counter_value_; }
+  void set_double_counter_value(double value) { double_counter_value_ = value; _has_field_.set(44); }
+
+  const std::vector<uint64_t>& extra_counter_track_uuids() const { return extra_counter_track_uuids_; }
+  std::vector<uint64_t>* mutable_extra_counter_track_uuids() { return &extra_counter_track_uuids_; }
+  int extra_counter_track_uuids_size() const { return static_cast<int>(extra_counter_track_uuids_.size()); }
+  void clear_extra_counter_track_uuids() { extra_counter_track_uuids_.clear(); }
+  void add_extra_counter_track_uuids(uint64_t value) { extra_counter_track_uuids_.emplace_back(value); }
+  uint64_t* add_extra_counter_track_uuids() { extra_counter_track_uuids_.emplace_back(); return &extra_counter_track_uuids_.back(); }
+
+  const std::vector<int64_t>& extra_counter_values() const { return extra_counter_values_; }
+  std::vector<int64_t>* mutable_extra_counter_values() { return &extra_counter_values_; }
+  int extra_counter_values_size() const { return static_cast<int>(extra_counter_values_.size()); }
+  void clear_extra_counter_values() { extra_counter_values_.clear(); }
+  void add_extra_counter_values(int64_t value) { extra_counter_values_.emplace_back(value); }
+  int64_t* add_extra_counter_values() { extra_counter_values_.emplace_back(); return &extra_counter_values_.back(); }
+
+  const std::vector<uint64_t>& extra_double_counter_track_uuids() const { return extra_double_counter_track_uuids_; }
+  std::vector<uint64_t>* mutable_extra_double_counter_track_uuids() { return &extra_double_counter_track_uuids_; }
+  int extra_double_counter_track_uuids_size() const { return static_cast<int>(extra_double_counter_track_uuids_.size()); }
+  void clear_extra_double_counter_track_uuids() { extra_double_counter_track_uuids_.clear(); }
+  void add_extra_double_counter_track_uuids(uint64_t value) { extra_double_counter_track_uuids_.emplace_back(value); }
+  uint64_t* add_extra_double_counter_track_uuids() { extra_double_counter_track_uuids_.emplace_back(); return &extra_double_counter_track_uuids_.back(); }
+
+  const std::vector<double>& extra_double_counter_values() const { return extra_double_counter_values_; }
+  std::vector<double>* mutable_extra_double_counter_values() { return &extra_double_counter_values_; }
+  int extra_double_counter_values_size() const { return static_cast<int>(extra_double_counter_values_.size()); }
+  void clear_extra_double_counter_values() { extra_double_counter_values_.clear(); }
+  void add_extra_double_counter_values(double value) { extra_double_counter_values_.emplace_back(value); }
+  double* add_extra_double_counter_values() { extra_double_counter_values_.emplace_back(); return &extra_double_counter_values_.back(); }
+
+  const std::vector<uint64_t>& flow_ids() const { return flow_ids_; }
+  std::vector<uint64_t>* mutable_flow_ids() { return &flow_ids_; }
+  int flow_ids_size() const { return static_cast<int>(flow_ids_.size()); }
+  void clear_flow_ids() { flow_ids_.clear(); }
+  void add_flow_ids(uint64_t value) { flow_ids_.emplace_back(value); }
+  uint64_t* add_flow_ids() { flow_ids_.emplace_back(); return &flow_ids_.back(); }
+
+  const std::vector<uint64_t>& terminating_flow_ids() const { return terminating_flow_ids_; }
+  std::vector<uint64_t>* mutable_terminating_flow_ids() { return &terminating_flow_ids_; }
+  int terminating_flow_ids_size() const { return static_cast<int>(terminating_flow_ids_.size()); }
+  void clear_terminating_flow_ids() { terminating_flow_ids_.clear(); }
+  void add_terminating_flow_ids(uint64_t value) { terminating_flow_ids_.emplace_back(value); }
+  uint64_t* add_terminating_flow_ids() { terminating_flow_ids_.emplace_back(); return &terminating_flow_ids_.back(); }
+
+  const std::vector<DebugAnnotation>& debug_annotations() const { return debug_annotations_; }
+  std::vector<DebugAnnotation>* mutable_debug_annotations() { return &debug_annotations_; }
+  int debug_annotations_size() const;
+  void clear_debug_annotations();
+  DebugAnnotation* add_debug_annotations();
+
+  bool has_task_execution() const { return _has_field_[5]; }
+  const TaskExecution& task_execution() const { return *task_execution_; }
+  TaskExecution* mutable_task_execution() { _has_field_.set(5); return task_execution_.get(); }
+
+  bool has_log_message() const { return _has_field_[21]; }
+  const LogMessage& log_message() const { return *log_message_; }
+  LogMessage* mutable_log_message() { _has_field_.set(21); return log_message_.get(); }
+
+  bool has_cc_scheduler_state() const { return _has_field_[24]; }
+  const ChromeCompositorSchedulerState& cc_scheduler_state() const { return *cc_scheduler_state_; }
+  ChromeCompositorSchedulerState* mutable_cc_scheduler_state() { _has_field_.set(24); return cc_scheduler_state_.get(); }
+
+  bool has_chrome_user_event() const { return _has_field_[25]; }
+  const ChromeUserEvent& chrome_user_event() const { return *chrome_user_event_; }
+  ChromeUserEvent* mutable_chrome_user_event() { _has_field_.set(25); return chrome_user_event_.get(); }
+
+  bool has_chrome_keyed_service() const { return _has_field_[26]; }
+  const ChromeKeyedService& chrome_keyed_service() const { return *chrome_keyed_service_; }
+  ChromeKeyedService* mutable_chrome_keyed_service() { _has_field_.set(26); return chrome_keyed_service_.get(); }
+
+  bool has_chrome_legacy_ipc() const { return _has_field_[27]; }
+  const ChromeLegacyIpc& chrome_legacy_ipc() const { return *chrome_legacy_ipc_; }
+  ChromeLegacyIpc* mutable_chrome_legacy_ipc() { _has_field_.set(27); return chrome_legacy_ipc_.get(); }
+
+  bool has_chrome_histogram_sample() const { return _has_field_[28]; }
+  const ChromeHistogramSample& chrome_histogram_sample() const { return *chrome_histogram_sample_; }
+  ChromeHistogramSample* mutable_chrome_histogram_sample() { _has_field_.set(28); return chrome_histogram_sample_.get(); }
+
+  bool has_chrome_latency_info() const { return _has_field_[29]; }
+  const ChromeLatencyInfo& chrome_latency_info() const { return *chrome_latency_info_; }
+  ChromeLatencyInfo* mutable_chrome_latency_info() { _has_field_.set(29); return chrome_latency_info_.get(); }
+
+  bool has_chrome_frame_reporter() const { return _has_field_[32]; }
+  const ChromeFrameReporter& chrome_frame_reporter() const { return *chrome_frame_reporter_; }
+  ChromeFrameReporter* mutable_chrome_frame_reporter() { _has_field_.set(32); return chrome_frame_reporter_.get(); }
+
+  bool has_chrome_application_state_info() const { return _has_field_[39]; }
+  const ChromeApplicationStateInfo& chrome_application_state_info() const { return *chrome_application_state_info_; }
+  ChromeApplicationStateInfo* mutable_chrome_application_state_info() { _has_field_.set(39); return chrome_application_state_info_.get(); }
+
+  bool has_chrome_renderer_scheduler_state() const { return _has_field_[40]; }
+  const ChromeRendererSchedulerState& chrome_renderer_scheduler_state() const { return *chrome_renderer_scheduler_state_; }
+  ChromeRendererSchedulerState* mutable_chrome_renderer_scheduler_state() { _has_field_.set(40); return chrome_renderer_scheduler_state_.get(); }
+
+  bool has_chrome_window_handle_event_info() const { return _has_field_[41]; }
+  const ChromeWindowHandleEventInfo& chrome_window_handle_event_info() const { return *chrome_window_handle_event_info_; }
+  ChromeWindowHandleEventInfo* mutable_chrome_window_handle_event_info() { _has_field_.set(41); return chrome_window_handle_event_info_.get(); }
+
+  bool has_chrome_content_settings_event_info() const { return _has_field_[43]; }
+  const ChromeContentSettingsEventInfo& chrome_content_settings_event_info() const { return *chrome_content_settings_event_info_; }
+  ChromeContentSettingsEventInfo* mutable_chrome_content_settings_event_info() { _has_field_.set(43); return chrome_content_settings_event_info_.get(); }
+
+  bool has_source_location() const { return _has_field_[33]; }
+  const SourceLocation& source_location() const { return *source_location_; }
+  SourceLocation* mutable_source_location() { _has_field_.set(33); return source_location_.get(); }
+
+  bool has_source_location_iid() const { return _has_field_[34]; }
+  uint64_t source_location_iid() const { return source_location_iid_; }
+  void set_source_location_iid(uint64_t value) { source_location_iid_ = value; _has_field_.set(34); }
+
+  bool has_chrome_message_pump() const { return _has_field_[35]; }
+  const ChromeMessagePump& chrome_message_pump() const { return *chrome_message_pump_; }
+  ChromeMessagePump* mutable_chrome_message_pump() { _has_field_.set(35); return chrome_message_pump_.get(); }
+
+  bool has_chrome_mojo_event_info() const { return _has_field_[38]; }
+  const ChromeMojoEventInfo& chrome_mojo_event_info() const { return *chrome_mojo_event_info_; }
+  ChromeMojoEventInfo* mutable_chrome_mojo_event_info() { _has_field_.set(38); return chrome_mojo_event_info_.get(); }
+
+  bool has_timestamp_delta_us() const { return _has_field_[1]; }
+  int64_t timestamp_delta_us() const { return timestamp_delta_us_; }
+  void set_timestamp_delta_us(int64_t value) { timestamp_delta_us_ = value; _has_field_.set(1); }
+
+  bool has_timestamp_absolute_us() const { return _has_field_[16]; }
+  int64_t timestamp_absolute_us() const { return timestamp_absolute_us_; }
+  void set_timestamp_absolute_us(int64_t value) { timestamp_absolute_us_ = value; _has_field_.set(16); }
+
+  bool has_thread_time_delta_us() const { return _has_field_[2]; }
+  int64_t thread_time_delta_us() const { return thread_time_delta_us_; }
+  void set_thread_time_delta_us(int64_t value) { thread_time_delta_us_ = value; _has_field_.set(2); }
+
+  bool has_thread_time_absolute_us() const { return _has_field_[17]; }
+  int64_t thread_time_absolute_us() const { return thread_time_absolute_us_; }
+  void set_thread_time_absolute_us(int64_t value) { thread_time_absolute_us_ = value; _has_field_.set(17); }
+
+  bool has_thread_instruction_count_delta() const { return _has_field_[8]; }
+  int64_t thread_instruction_count_delta() const { return thread_instruction_count_delta_; }
+  void set_thread_instruction_count_delta(int64_t value) { thread_instruction_count_delta_ = value; _has_field_.set(8); }
+
+  bool has_thread_instruction_count_absolute() const { return _has_field_[20]; }
+  int64_t thread_instruction_count_absolute() const { return thread_instruction_count_absolute_; }
+  void set_thread_instruction_count_absolute(int64_t value) { thread_instruction_count_absolute_ = value; _has_field_.set(20); }
+
+  bool has_legacy_event() const { return _has_field_[6]; }
+  const TrackEvent_LegacyEvent& legacy_event() const { return *legacy_event_; }
+  TrackEvent_LegacyEvent* mutable_legacy_event() { _has_field_.set(6); return legacy_event_.get(); }
+
+ private:
+  std::vector<uint64_t> category_iids_;
+  std::vector<std::string> categories_;
+  uint64_t name_iid_{};
+  std::string name_{};
+  TrackEvent_Type type_{};
+  uint64_t track_uuid_{};
+  int64_t counter_value_{};
+  double double_counter_value_{};
+  std::vector<uint64_t> extra_counter_track_uuids_;
+  std::vector<int64_t> extra_counter_values_;
+  std::vector<uint64_t> extra_double_counter_track_uuids_;
+  std::vector<double> extra_double_counter_values_;
+  std::vector<uint64_t> flow_ids_;
+  std::vector<uint64_t> terminating_flow_ids_;
+  std::vector<DebugAnnotation> debug_annotations_;
+  ::protozero::CopyablePtr<TaskExecution> task_execution_;
+  ::protozero::CopyablePtr<LogMessage> log_message_;
+  ::protozero::CopyablePtr<ChromeCompositorSchedulerState> cc_scheduler_state_;
+  ::protozero::CopyablePtr<ChromeUserEvent> chrome_user_event_;
+  ::protozero::CopyablePtr<ChromeKeyedService> chrome_keyed_service_;
+  ::protozero::CopyablePtr<ChromeLegacyIpc> chrome_legacy_ipc_;
+  ::protozero::CopyablePtr<ChromeHistogramSample> chrome_histogram_sample_;
+  ::protozero::CopyablePtr<ChromeLatencyInfo> chrome_latency_info_;
+  ::protozero::CopyablePtr<ChromeFrameReporter> chrome_frame_reporter_;
+  ::protozero::CopyablePtr<ChromeApplicationStateInfo> chrome_application_state_info_;
+  ::protozero::CopyablePtr<ChromeRendererSchedulerState> chrome_renderer_scheduler_state_;
+  ::protozero::CopyablePtr<ChromeWindowHandleEventInfo> chrome_window_handle_event_info_;
+  ::protozero::CopyablePtr<ChromeContentSettingsEventInfo> chrome_content_settings_event_info_;
+  ::protozero::CopyablePtr<SourceLocation> source_location_;
+  uint64_t source_location_iid_{};
+  ::protozero::CopyablePtr<ChromeMessagePump> chrome_message_pump_;
+  ::protozero::CopyablePtr<ChromeMojoEventInfo> chrome_mojo_event_info_;
+  int64_t timestamp_delta_us_{};
+  int64_t timestamp_absolute_us_{};
+  int64_t thread_time_delta_us_{};
+  int64_t thread_time_absolute_us_{};
+  int64_t thread_instruction_count_delta_{};
+  int64_t thread_instruction_count_absolute_{};
+  ::protozero::CopyablePtr<TrackEvent_LegacyEvent> legacy_event_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<47> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT TrackEvent_LegacyEvent : public ::protozero::CppMessageObj {
+ public:
+  using FlowDirection = TrackEvent_LegacyEvent_FlowDirection;
+  static constexpr auto FLOW_UNSPECIFIED = TrackEvent_LegacyEvent_FlowDirection_FLOW_UNSPECIFIED;
+  static constexpr auto FLOW_IN = TrackEvent_LegacyEvent_FlowDirection_FLOW_IN;
+  static constexpr auto FLOW_OUT = TrackEvent_LegacyEvent_FlowDirection_FLOW_OUT;
+  static constexpr auto FLOW_INOUT = TrackEvent_LegacyEvent_FlowDirection_FLOW_INOUT;
+  static constexpr auto FlowDirection_MIN = TrackEvent_LegacyEvent_FlowDirection_FLOW_UNSPECIFIED;
+  static constexpr auto FlowDirection_MAX = TrackEvent_LegacyEvent_FlowDirection_FLOW_INOUT;
+  using InstantEventScope = TrackEvent_LegacyEvent_InstantEventScope;
+  static constexpr auto SCOPE_UNSPECIFIED = TrackEvent_LegacyEvent_InstantEventScope_SCOPE_UNSPECIFIED;
+  static constexpr auto SCOPE_GLOBAL = TrackEvent_LegacyEvent_InstantEventScope_SCOPE_GLOBAL;
+  static constexpr auto SCOPE_PROCESS = TrackEvent_LegacyEvent_InstantEventScope_SCOPE_PROCESS;
+  static constexpr auto SCOPE_THREAD = TrackEvent_LegacyEvent_InstantEventScope_SCOPE_THREAD;
+  static constexpr auto InstantEventScope_MIN = TrackEvent_LegacyEvent_InstantEventScope_SCOPE_UNSPECIFIED;
+  static constexpr auto InstantEventScope_MAX = TrackEvent_LegacyEvent_InstantEventScope_SCOPE_THREAD;
+  enum FieldNumbers {
+    kNameIidFieldNumber = 1,
+    kPhaseFieldNumber = 2,
+    kDurationUsFieldNumber = 3,
+    kThreadDurationUsFieldNumber = 4,
+    kThreadInstructionDeltaFieldNumber = 15,
+    kUnscopedIdFieldNumber = 6,
+    kLocalIdFieldNumber = 10,
+    kGlobalIdFieldNumber = 11,
+    kIdScopeFieldNumber = 7,
+    kUseAsyncTtsFieldNumber = 9,
+    kBindIdFieldNumber = 8,
+    kBindToEnclosingFieldNumber = 12,
+    kFlowDirectionFieldNumber = 13,
+    kInstantEventScopeFieldNumber = 14,
+    kPidOverrideFieldNumber = 18,
+    kTidOverrideFieldNumber = 19,
+  };
+
+  TrackEvent_LegacyEvent();
+  ~TrackEvent_LegacyEvent() override;
+  TrackEvent_LegacyEvent(TrackEvent_LegacyEvent&&) noexcept;
+  TrackEvent_LegacyEvent& operator=(TrackEvent_LegacyEvent&&);
+  TrackEvent_LegacyEvent(const TrackEvent_LegacyEvent&);
+  TrackEvent_LegacyEvent& operator=(const TrackEvent_LegacyEvent&);
+  bool operator==(const TrackEvent_LegacyEvent&) const;
+  bool operator!=(const TrackEvent_LegacyEvent& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_name_iid() const { return _has_field_[1]; }
+  uint64_t name_iid() const { return name_iid_; }
+  void set_name_iid(uint64_t value) { name_iid_ = value; _has_field_.set(1); }
+
+  bool has_phase() const { return _has_field_[2]; }
+  int32_t phase() const { return phase_; }
+  void set_phase(int32_t value) { phase_ = value; _has_field_.set(2); }
+
+  bool has_duration_us() const { return _has_field_[3]; }
+  int64_t duration_us() const { return duration_us_; }
+  void set_duration_us(int64_t value) { duration_us_ = value; _has_field_.set(3); }
+
+  bool has_thread_duration_us() const { return _has_field_[4]; }
+  int64_t thread_duration_us() const { return thread_duration_us_; }
+  void set_thread_duration_us(int64_t value) { thread_duration_us_ = value; _has_field_.set(4); }
+
+  bool has_thread_instruction_delta() const { return _has_field_[15]; }
+  int64_t thread_instruction_delta() const { return thread_instruction_delta_; }
+  void set_thread_instruction_delta(int64_t value) { thread_instruction_delta_ = value; _has_field_.set(15); }
+
+  bool has_unscoped_id() const { return _has_field_[6]; }
+  uint64_t unscoped_id() const { return unscoped_id_; }
+  void set_unscoped_id(uint64_t value) { unscoped_id_ = value; _has_field_.set(6); }
+
+  bool has_local_id() const { return _has_field_[10]; }
+  uint64_t local_id() const { return local_id_; }
+  void set_local_id(uint64_t value) { local_id_ = value; _has_field_.set(10); }
+
+  bool has_global_id() const { return _has_field_[11]; }
+  uint64_t global_id() const { return global_id_; }
+  void set_global_id(uint64_t value) { global_id_ = value; _has_field_.set(11); }
+
+  bool has_id_scope() const { return _has_field_[7]; }
+  const std::string& id_scope() const { return id_scope_; }
+  void set_id_scope(const std::string& value) { id_scope_ = value; _has_field_.set(7); }
+
+  bool has_use_async_tts() const { return _has_field_[9]; }
+  bool use_async_tts() const { return use_async_tts_; }
+  void set_use_async_tts(bool value) { use_async_tts_ = value; _has_field_.set(9); }
+
+  bool has_bind_id() const { return _has_field_[8]; }
+  uint64_t bind_id() const { return bind_id_; }
+  void set_bind_id(uint64_t value) { bind_id_ = value; _has_field_.set(8); }
+
+  bool has_bind_to_enclosing() const { return _has_field_[12]; }
+  bool bind_to_enclosing() const { return bind_to_enclosing_; }
+  void set_bind_to_enclosing(bool value) { bind_to_enclosing_ = value; _has_field_.set(12); }
+
+  bool has_flow_direction() const { return _has_field_[13]; }
+  TrackEvent_LegacyEvent_FlowDirection flow_direction() const { return flow_direction_; }
+  void set_flow_direction(TrackEvent_LegacyEvent_FlowDirection value) { flow_direction_ = value; _has_field_.set(13); }
+
+  bool has_instant_event_scope() const { return _has_field_[14]; }
+  TrackEvent_LegacyEvent_InstantEventScope instant_event_scope() const { return instant_event_scope_; }
+  void set_instant_event_scope(TrackEvent_LegacyEvent_InstantEventScope value) { instant_event_scope_ = value; _has_field_.set(14); }
+
+  bool has_pid_override() const { return _has_field_[18]; }
+  int32_t pid_override() const { return pid_override_; }
+  void set_pid_override(int32_t value) { pid_override_ = value; _has_field_.set(18); }
+
+  bool has_tid_override() const { return _has_field_[19]; }
+  int32_t tid_override() const { return tid_override_; }
+  void set_tid_override(int32_t value) { tid_override_ = value; _has_field_.set(19); }
+
+ private:
+  uint64_t name_iid_{};
+  int32_t phase_{};
+  int64_t duration_us_{};
+  int64_t thread_duration_us_{};
+  int64_t thread_instruction_delta_{};
+  uint64_t unscoped_id_{};
+  uint64_t local_id_{};
+  uint64_t global_id_{};
+  std::string id_scope_{};
+  bool use_async_tts_{};
+  uint64_t bind_id_{};
+  bool bind_to_enclosing_{};
+  TrackEvent_LegacyEvent_FlowDirection flow_direction_{};
+  TrackEvent_LegacyEvent_InstantEventScope instant_event_scope_{};
+  int32_t pid_override_{};
+  int32_t tid_override_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<20> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_TRACE_TRACK_EVENT_TRACK_EVENT_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/config/android/android_log_config.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_ANDROID_ANDROID_LOG_CONFIG_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_ANDROID_ANDROID_LOG_CONFIG_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class AndroidLogConfig;
+enum AndroidLogId : int;
+enum AndroidLogPriority : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT AndroidLogConfig : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kLogIdsFieldNumber = 1,
+    kMinPrioFieldNumber = 3,
+    kFilterTagsFieldNumber = 4,
+  };
+
+  AndroidLogConfig();
+  ~AndroidLogConfig() override;
+  AndroidLogConfig(AndroidLogConfig&&) noexcept;
+  AndroidLogConfig& operator=(AndroidLogConfig&&);
+  AndroidLogConfig(const AndroidLogConfig&);
+  AndroidLogConfig& operator=(const AndroidLogConfig&);
+  bool operator==(const AndroidLogConfig&) const;
+  bool operator!=(const AndroidLogConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  const std::vector<AndroidLogId>& log_ids() const { return log_ids_; }
+  std::vector<AndroidLogId>* mutable_log_ids() { return &log_ids_; }
+  int log_ids_size() const { return static_cast<int>(log_ids_.size()); }
+  void clear_log_ids() { log_ids_.clear(); }
+  void add_log_ids(AndroidLogId value) { log_ids_.emplace_back(value); }
+  AndroidLogId* add_log_ids() { log_ids_.emplace_back(); return &log_ids_.back(); }
+
+  bool has_min_prio() const { return _has_field_[3]; }
+  AndroidLogPriority min_prio() const { return min_prio_; }
+  void set_min_prio(AndroidLogPriority value) { min_prio_ = value; _has_field_.set(3); }
+
+  const std::vector<std::string>& filter_tags() const { return filter_tags_; }
+  std::vector<std::string>* mutable_filter_tags() { return &filter_tags_; }
+  int filter_tags_size() const { return static_cast<int>(filter_tags_.size()); }
+  void clear_filter_tags() { filter_tags_.clear(); }
+  void add_filter_tags(std::string value) { filter_tags_.emplace_back(value); }
+  std::string* add_filter_tags() { filter_tags_.emplace_back(); return &filter_tags_.back(); }
+
+ private:
+  std::vector<AndroidLogId> log_ids_;
+  AndroidLogPriority min_prio_{};
+  std::vector<std::string> filter_tags_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<5> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_ANDROID_ANDROID_LOG_CONFIG_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/config/android/android_polled_state_config.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_ANDROID_ANDROID_POLLED_STATE_CONFIG_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_ANDROID_ANDROID_POLLED_STATE_CONFIG_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class AndroidPolledStateConfig;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT AndroidPolledStateConfig : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kPollMsFieldNumber = 1,
+  };
+
+  AndroidPolledStateConfig();
+  ~AndroidPolledStateConfig() override;
+  AndroidPolledStateConfig(AndroidPolledStateConfig&&) noexcept;
+  AndroidPolledStateConfig& operator=(AndroidPolledStateConfig&&);
+  AndroidPolledStateConfig(const AndroidPolledStateConfig&);
+  AndroidPolledStateConfig& operator=(const AndroidPolledStateConfig&);
+  bool operator==(const AndroidPolledStateConfig&) const;
+  bool operator!=(const AndroidPolledStateConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_poll_ms() const { return _has_field_[1]; }
+  uint32_t poll_ms() const { return poll_ms_; }
+  void set_poll_ms(uint32_t value) { poll_ms_ = value; _has_field_.set(1); }
+
+ private:
+  uint32_t poll_ms_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_ANDROID_ANDROID_POLLED_STATE_CONFIG_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/config/android/packages_list_config.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_ANDROID_PACKAGES_LIST_CONFIG_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_ANDROID_PACKAGES_LIST_CONFIG_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class PackagesListConfig;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT PackagesListConfig : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kPackageNameFilterFieldNumber = 1,
+  };
+
+  PackagesListConfig();
+  ~PackagesListConfig() override;
+  PackagesListConfig(PackagesListConfig&&) noexcept;
+  PackagesListConfig& operator=(PackagesListConfig&&);
+  PackagesListConfig(const PackagesListConfig&);
+  PackagesListConfig& operator=(const PackagesListConfig&);
+  bool operator==(const PackagesListConfig&) const;
+  bool operator!=(const PackagesListConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  const std::vector<std::string>& package_name_filter() const { return package_name_filter_; }
+  std::vector<std::string>* mutable_package_name_filter() { return &package_name_filter_; }
+  int package_name_filter_size() const { return static_cast<int>(package_name_filter_.size()); }
+  void clear_package_name_filter() { package_name_filter_.clear(); }
+  void add_package_name_filter(std::string value) { package_name_filter_.emplace_back(value); }
+  std::string* add_package_name_filter() { package_name_filter_.emplace_back(); return &package_name_filter_.back(); }
+
+ private:
+  std::vector<std::string> package_name_filter_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_ANDROID_PACKAGES_LIST_CONFIG_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/config/ftrace/ftrace_config.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_FTRACE_FTRACE_CONFIG_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_FTRACE_FTRACE_CONFIG_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class FtraceConfig;
+class FtraceConfig_CompactSchedConfig;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT FtraceConfig : public ::protozero::CppMessageObj {
+ public:
+  using CompactSchedConfig = FtraceConfig_CompactSchedConfig;
+  enum FieldNumbers {
+    kFtraceEventsFieldNumber = 1,
+    kAtraceCategoriesFieldNumber = 2,
+    kAtraceAppsFieldNumber = 3,
+    kBufferSizeKbFieldNumber = 10,
+    kDrainPeriodMsFieldNumber = 11,
+    kCompactSchedFieldNumber = 12,
+    kSymbolizeKsymsFieldNumber = 13,
+    kInitializeKsymsSynchronouslyForTestingFieldNumber = 14,
+  };
+
+  FtraceConfig();
+  ~FtraceConfig() override;
+  FtraceConfig(FtraceConfig&&) noexcept;
+  FtraceConfig& operator=(FtraceConfig&&);
+  FtraceConfig(const FtraceConfig&);
+  FtraceConfig& operator=(const FtraceConfig&);
+  bool operator==(const FtraceConfig&) const;
+  bool operator!=(const FtraceConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  const std::vector<std::string>& ftrace_events() const { return ftrace_events_; }
+  std::vector<std::string>* mutable_ftrace_events() { return &ftrace_events_; }
+  int ftrace_events_size() const { return static_cast<int>(ftrace_events_.size()); }
+  void clear_ftrace_events() { ftrace_events_.clear(); }
+  void add_ftrace_events(std::string value) { ftrace_events_.emplace_back(value); }
+  std::string* add_ftrace_events() { ftrace_events_.emplace_back(); return &ftrace_events_.back(); }
+
+  const std::vector<std::string>& atrace_categories() const { return atrace_categories_; }
+  std::vector<std::string>* mutable_atrace_categories() { return &atrace_categories_; }
+  int atrace_categories_size() const { return static_cast<int>(atrace_categories_.size()); }
+  void clear_atrace_categories() { atrace_categories_.clear(); }
+  void add_atrace_categories(std::string value) { atrace_categories_.emplace_back(value); }
+  std::string* add_atrace_categories() { atrace_categories_.emplace_back(); return &atrace_categories_.back(); }
+
+  const std::vector<std::string>& atrace_apps() const { return atrace_apps_; }
+  std::vector<std::string>* mutable_atrace_apps() { return &atrace_apps_; }
+  int atrace_apps_size() const { return static_cast<int>(atrace_apps_.size()); }
+  void clear_atrace_apps() { atrace_apps_.clear(); }
+  void add_atrace_apps(std::string value) { atrace_apps_.emplace_back(value); }
+  std::string* add_atrace_apps() { atrace_apps_.emplace_back(); return &atrace_apps_.back(); }
+
+  bool has_buffer_size_kb() const { return _has_field_[10]; }
+  uint32_t buffer_size_kb() const { return buffer_size_kb_; }
+  void set_buffer_size_kb(uint32_t value) { buffer_size_kb_ = value; _has_field_.set(10); }
+
+  bool has_drain_period_ms() const { return _has_field_[11]; }
+  uint32_t drain_period_ms() const { return drain_period_ms_; }
+  void set_drain_period_ms(uint32_t value) { drain_period_ms_ = value; _has_field_.set(11); }
+
+  bool has_compact_sched() const { return _has_field_[12]; }
+  const FtraceConfig_CompactSchedConfig& compact_sched() const { return *compact_sched_; }
+  FtraceConfig_CompactSchedConfig* mutable_compact_sched() { _has_field_.set(12); return compact_sched_.get(); }
+
+  bool has_symbolize_ksyms() const { return _has_field_[13]; }
+  bool symbolize_ksyms() const { return symbolize_ksyms_; }
+  void set_symbolize_ksyms(bool value) { symbolize_ksyms_ = value; _has_field_.set(13); }
+
+  bool has_initialize_ksyms_synchronously_for_testing() const { return _has_field_[14]; }
+  bool initialize_ksyms_synchronously_for_testing() const { return initialize_ksyms_synchronously_for_testing_; }
+  void set_initialize_ksyms_synchronously_for_testing(bool value) { initialize_ksyms_synchronously_for_testing_ = value; _has_field_.set(14); }
+
+ private:
+  std::vector<std::string> ftrace_events_;
+  std::vector<std::string> atrace_categories_;
+  std::vector<std::string> atrace_apps_;
+  uint32_t buffer_size_kb_{};
+  uint32_t drain_period_ms_{};
+  ::protozero::CopyablePtr<FtraceConfig_CompactSchedConfig> compact_sched_;
+  bool symbolize_ksyms_{};
+  bool initialize_ksyms_synchronously_for_testing_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<15> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT FtraceConfig_CompactSchedConfig : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kEnabledFieldNumber = 1,
+  };
+
+  FtraceConfig_CompactSchedConfig();
+  ~FtraceConfig_CompactSchedConfig() override;
+  FtraceConfig_CompactSchedConfig(FtraceConfig_CompactSchedConfig&&) noexcept;
+  FtraceConfig_CompactSchedConfig& operator=(FtraceConfig_CompactSchedConfig&&);
+  FtraceConfig_CompactSchedConfig(const FtraceConfig_CompactSchedConfig&);
+  FtraceConfig_CompactSchedConfig& operator=(const FtraceConfig_CompactSchedConfig&);
+  bool operator==(const FtraceConfig_CompactSchedConfig&) const;
+  bool operator!=(const FtraceConfig_CompactSchedConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_enabled() const { return _has_field_[1]; }
+  bool enabled() const { return enabled_; }
+  void set_enabled(bool value) { enabled_ = value; _has_field_.set(1); }
+
+ private:
+  bool enabled_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_FTRACE_FTRACE_CONFIG_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/config/gpu/gpu_counter_config.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_GPU_GPU_COUNTER_CONFIG_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_GPU_GPU_COUNTER_CONFIG_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class GpuCounterConfig;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT GpuCounterConfig : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kCounterPeriodNsFieldNumber = 1,
+    kCounterIdsFieldNumber = 2,
+    kInstrumentedSamplingFieldNumber = 3,
+    kFixGpuClockFieldNumber = 4,
+  };
+
+  GpuCounterConfig();
+  ~GpuCounterConfig() override;
+  GpuCounterConfig(GpuCounterConfig&&) noexcept;
+  GpuCounterConfig& operator=(GpuCounterConfig&&);
+  GpuCounterConfig(const GpuCounterConfig&);
+  GpuCounterConfig& operator=(const GpuCounterConfig&);
+  bool operator==(const GpuCounterConfig&) const;
+  bool operator!=(const GpuCounterConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_counter_period_ns() const { return _has_field_[1]; }
+  uint64_t counter_period_ns() const { return counter_period_ns_; }
+  void set_counter_period_ns(uint64_t value) { counter_period_ns_ = value; _has_field_.set(1); }
+
+  const std::vector<uint32_t>& counter_ids() const { return counter_ids_; }
+  std::vector<uint32_t>* mutable_counter_ids() { return &counter_ids_; }
+  int counter_ids_size() const { return static_cast<int>(counter_ids_.size()); }
+  void clear_counter_ids() { counter_ids_.clear(); }
+  void add_counter_ids(uint32_t value) { counter_ids_.emplace_back(value); }
+  uint32_t* add_counter_ids() { counter_ids_.emplace_back(); return &counter_ids_.back(); }
+
+  bool has_instrumented_sampling() const { return _has_field_[3]; }
+  bool instrumented_sampling() const { return instrumented_sampling_; }
+  void set_instrumented_sampling(bool value) { instrumented_sampling_ = value; _has_field_.set(3); }
+
+  bool has_fix_gpu_clock() const { return _has_field_[4]; }
+  bool fix_gpu_clock() const { return fix_gpu_clock_; }
+  void set_fix_gpu_clock(bool value) { fix_gpu_clock_ = value; _has_field_.set(4); }
+
+ private:
+  uint64_t counter_period_ns_{};
+  std::vector<uint32_t> counter_ids_;
+  bool instrumented_sampling_{};
+  bool fix_gpu_clock_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<5> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_GPU_GPU_COUNTER_CONFIG_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/config/gpu/vulkan_memory_config.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_GPU_VULKAN_MEMORY_CONFIG_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_GPU_VULKAN_MEMORY_CONFIG_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class VulkanMemoryConfig;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT VulkanMemoryConfig : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kTrackDriverMemoryUsageFieldNumber = 1,
+    kTrackDeviceMemoryUsageFieldNumber = 2,
+  };
+
+  VulkanMemoryConfig();
+  ~VulkanMemoryConfig() override;
+  VulkanMemoryConfig(VulkanMemoryConfig&&) noexcept;
+  VulkanMemoryConfig& operator=(VulkanMemoryConfig&&);
+  VulkanMemoryConfig(const VulkanMemoryConfig&);
+  VulkanMemoryConfig& operator=(const VulkanMemoryConfig&);
+  bool operator==(const VulkanMemoryConfig&) const;
+  bool operator!=(const VulkanMemoryConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_track_driver_memory_usage() const { return _has_field_[1]; }
+  bool track_driver_memory_usage() const { return track_driver_memory_usage_; }
+  void set_track_driver_memory_usage(bool value) { track_driver_memory_usage_ = value; _has_field_.set(1); }
+
+  bool has_track_device_memory_usage() const { return _has_field_[2]; }
+  bool track_device_memory_usage() const { return track_device_memory_usage_; }
+  void set_track_device_memory_usage(bool value) { track_device_memory_usage_ = value; _has_field_.set(2); }
+
+ private:
+  bool track_driver_memory_usage_{};
+  bool track_device_memory_usage_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_GPU_VULKAN_MEMORY_CONFIG_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/config/inode_file/inode_file_config.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_INODE_FILE_INODE_FILE_CONFIG_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_INODE_FILE_INODE_FILE_CONFIG_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class InodeFileConfig;
+class InodeFileConfig_MountPointMappingEntry;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT InodeFileConfig : public ::protozero::CppMessageObj {
+ public:
+  using MountPointMappingEntry = InodeFileConfig_MountPointMappingEntry;
+  enum FieldNumbers {
+    kScanIntervalMsFieldNumber = 1,
+    kScanDelayMsFieldNumber = 2,
+    kScanBatchSizeFieldNumber = 3,
+    kDoNotScanFieldNumber = 4,
+    kScanMountPointsFieldNumber = 5,
+    kMountPointMappingFieldNumber = 6,
+  };
+
+  InodeFileConfig();
+  ~InodeFileConfig() override;
+  InodeFileConfig(InodeFileConfig&&) noexcept;
+  InodeFileConfig& operator=(InodeFileConfig&&);
+  InodeFileConfig(const InodeFileConfig&);
+  InodeFileConfig& operator=(const InodeFileConfig&);
+  bool operator==(const InodeFileConfig&) const;
+  bool operator!=(const InodeFileConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_scan_interval_ms() const { return _has_field_[1]; }
+  uint32_t scan_interval_ms() const { return scan_interval_ms_; }
+  void set_scan_interval_ms(uint32_t value) { scan_interval_ms_ = value; _has_field_.set(1); }
+
+  bool has_scan_delay_ms() const { return _has_field_[2]; }
+  uint32_t scan_delay_ms() const { return scan_delay_ms_; }
+  void set_scan_delay_ms(uint32_t value) { scan_delay_ms_ = value; _has_field_.set(2); }
+
+  bool has_scan_batch_size() const { return _has_field_[3]; }
+  uint32_t scan_batch_size() const { return scan_batch_size_; }
+  void set_scan_batch_size(uint32_t value) { scan_batch_size_ = value; _has_field_.set(3); }
+
+  bool has_do_not_scan() const { return _has_field_[4]; }
+  bool do_not_scan() const { return do_not_scan_; }
+  void set_do_not_scan(bool value) { do_not_scan_ = value; _has_field_.set(4); }
+
+  const std::vector<std::string>& scan_mount_points() const { return scan_mount_points_; }
+  std::vector<std::string>* mutable_scan_mount_points() { return &scan_mount_points_; }
+  int scan_mount_points_size() const { return static_cast<int>(scan_mount_points_.size()); }
+  void clear_scan_mount_points() { scan_mount_points_.clear(); }
+  void add_scan_mount_points(std::string value) { scan_mount_points_.emplace_back(value); }
+  std::string* add_scan_mount_points() { scan_mount_points_.emplace_back(); return &scan_mount_points_.back(); }
+
+  const std::vector<InodeFileConfig_MountPointMappingEntry>& mount_point_mapping() const { return mount_point_mapping_; }
+  std::vector<InodeFileConfig_MountPointMappingEntry>* mutable_mount_point_mapping() { return &mount_point_mapping_; }
+  int mount_point_mapping_size() const;
+  void clear_mount_point_mapping();
+  InodeFileConfig_MountPointMappingEntry* add_mount_point_mapping();
+
+ private:
+  uint32_t scan_interval_ms_{};
+  uint32_t scan_delay_ms_{};
+  uint32_t scan_batch_size_{};
+  bool do_not_scan_{};
+  std::vector<std::string> scan_mount_points_;
+  std::vector<InodeFileConfig_MountPointMappingEntry> mount_point_mapping_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<7> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT InodeFileConfig_MountPointMappingEntry : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kMountpointFieldNumber = 1,
+    kScanRootsFieldNumber = 2,
+  };
+
+  InodeFileConfig_MountPointMappingEntry();
+  ~InodeFileConfig_MountPointMappingEntry() override;
+  InodeFileConfig_MountPointMappingEntry(InodeFileConfig_MountPointMappingEntry&&) noexcept;
+  InodeFileConfig_MountPointMappingEntry& operator=(InodeFileConfig_MountPointMappingEntry&&);
+  InodeFileConfig_MountPointMappingEntry(const InodeFileConfig_MountPointMappingEntry&);
+  InodeFileConfig_MountPointMappingEntry& operator=(const InodeFileConfig_MountPointMappingEntry&);
+  bool operator==(const InodeFileConfig_MountPointMappingEntry&) const;
+  bool operator!=(const InodeFileConfig_MountPointMappingEntry& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_mountpoint() const { return _has_field_[1]; }
+  const std::string& mountpoint() const { return mountpoint_; }
+  void set_mountpoint(const std::string& value) { mountpoint_ = value; _has_field_.set(1); }
+
+  const std::vector<std::string>& scan_roots() const { return scan_roots_; }
+  std::vector<std::string>* mutable_scan_roots() { return &scan_roots_; }
+  int scan_roots_size() const { return static_cast<int>(scan_roots_.size()); }
+  void clear_scan_roots() { scan_roots_.clear(); }
+  void add_scan_roots(std::string value) { scan_roots_.emplace_back(value); }
+  std::string* add_scan_roots() { scan_roots_.emplace_back(); return &scan_roots_.back(); }
+
+ private:
+  std::string mountpoint_{};
+  std::vector<std::string> scan_roots_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_INODE_FILE_INODE_FILE_CONFIG_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/config/interceptors/console_config.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_INTERCEPTORS_CONSOLE_CONFIG_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_INTERCEPTORS_CONSOLE_CONFIG_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class ConsoleConfig;
+enum ConsoleConfig_Output : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum ConsoleConfig_Output : int {
+  ConsoleConfig_Output_OUTPUT_UNSPECIFIED = 0,
+  ConsoleConfig_Output_OUTPUT_STDOUT = 1,
+  ConsoleConfig_Output_OUTPUT_STDERR = 2,
+};
+
+class PERFETTO_EXPORT ConsoleConfig : public ::protozero::CppMessageObj {
+ public:
+  using Output = ConsoleConfig_Output;
+  static constexpr auto OUTPUT_UNSPECIFIED = ConsoleConfig_Output_OUTPUT_UNSPECIFIED;
+  static constexpr auto OUTPUT_STDOUT = ConsoleConfig_Output_OUTPUT_STDOUT;
+  static constexpr auto OUTPUT_STDERR = ConsoleConfig_Output_OUTPUT_STDERR;
+  static constexpr auto Output_MIN = ConsoleConfig_Output_OUTPUT_UNSPECIFIED;
+  static constexpr auto Output_MAX = ConsoleConfig_Output_OUTPUT_STDERR;
+  enum FieldNumbers {
+    kOutputFieldNumber = 1,
+    kEnableColorsFieldNumber = 2,
+  };
+
+  ConsoleConfig();
+  ~ConsoleConfig() override;
+  ConsoleConfig(ConsoleConfig&&) noexcept;
+  ConsoleConfig& operator=(ConsoleConfig&&);
+  ConsoleConfig(const ConsoleConfig&);
+  ConsoleConfig& operator=(const ConsoleConfig&);
+  bool operator==(const ConsoleConfig&) const;
+  bool operator!=(const ConsoleConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_output() const { return _has_field_[1]; }
+  ConsoleConfig_Output output() const { return output_; }
+  void set_output(ConsoleConfig_Output value) { output_ = value; _has_field_.set(1); }
+
+  bool has_enable_colors() const { return _has_field_[2]; }
+  bool enable_colors() const { return enable_colors_; }
+  void set_enable_colors(bool value) { enable_colors_ = value; _has_field_.set(2); }
+
+ private:
+  ConsoleConfig_Output output_{};
+  bool enable_colors_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_INTERCEPTORS_CONSOLE_CONFIG_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/config/power/android_power_config.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_POWER_ANDROID_POWER_CONFIG_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_POWER_ANDROID_POWER_CONFIG_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class AndroidPowerConfig;
+enum AndroidPowerConfig_BatteryCounters : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum AndroidPowerConfig_BatteryCounters : int {
+  AndroidPowerConfig_BatteryCounters_BATTERY_COUNTER_UNSPECIFIED = 0,
+  AndroidPowerConfig_BatteryCounters_BATTERY_COUNTER_CHARGE = 1,
+  AndroidPowerConfig_BatteryCounters_BATTERY_COUNTER_CAPACITY_PERCENT = 2,
+  AndroidPowerConfig_BatteryCounters_BATTERY_COUNTER_CURRENT = 3,
+  AndroidPowerConfig_BatteryCounters_BATTERY_COUNTER_CURRENT_AVG = 4,
+};
+
+class PERFETTO_EXPORT AndroidPowerConfig : public ::protozero::CppMessageObj {
+ public:
+  using BatteryCounters = AndroidPowerConfig_BatteryCounters;
+  static constexpr auto BATTERY_COUNTER_UNSPECIFIED = AndroidPowerConfig_BatteryCounters_BATTERY_COUNTER_UNSPECIFIED;
+  static constexpr auto BATTERY_COUNTER_CHARGE = AndroidPowerConfig_BatteryCounters_BATTERY_COUNTER_CHARGE;
+  static constexpr auto BATTERY_COUNTER_CAPACITY_PERCENT = AndroidPowerConfig_BatteryCounters_BATTERY_COUNTER_CAPACITY_PERCENT;
+  static constexpr auto BATTERY_COUNTER_CURRENT = AndroidPowerConfig_BatteryCounters_BATTERY_COUNTER_CURRENT;
+  static constexpr auto BATTERY_COUNTER_CURRENT_AVG = AndroidPowerConfig_BatteryCounters_BATTERY_COUNTER_CURRENT_AVG;
+  static constexpr auto BatteryCounters_MIN = AndroidPowerConfig_BatteryCounters_BATTERY_COUNTER_UNSPECIFIED;
+  static constexpr auto BatteryCounters_MAX = AndroidPowerConfig_BatteryCounters_BATTERY_COUNTER_CURRENT_AVG;
+  enum FieldNumbers {
+    kBatteryPollMsFieldNumber = 1,
+    kBatteryCountersFieldNumber = 2,
+    kCollectPowerRailsFieldNumber = 3,
+    kCollectEnergyEstimationBreakdownFieldNumber = 4,
+  };
+
+  AndroidPowerConfig();
+  ~AndroidPowerConfig() override;
+  AndroidPowerConfig(AndroidPowerConfig&&) noexcept;
+  AndroidPowerConfig& operator=(AndroidPowerConfig&&);
+  AndroidPowerConfig(const AndroidPowerConfig&);
+  AndroidPowerConfig& operator=(const AndroidPowerConfig&);
+  bool operator==(const AndroidPowerConfig&) const;
+  bool operator!=(const AndroidPowerConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_battery_poll_ms() const { return _has_field_[1]; }
+  uint32_t battery_poll_ms() const { return battery_poll_ms_; }
+  void set_battery_poll_ms(uint32_t value) { battery_poll_ms_ = value; _has_field_.set(1); }
+
+  const std::vector<AndroidPowerConfig_BatteryCounters>& battery_counters() const { return battery_counters_; }
+  std::vector<AndroidPowerConfig_BatteryCounters>* mutable_battery_counters() { return &battery_counters_; }
+  int battery_counters_size() const { return static_cast<int>(battery_counters_.size()); }
+  void clear_battery_counters() { battery_counters_.clear(); }
+  void add_battery_counters(AndroidPowerConfig_BatteryCounters value) { battery_counters_.emplace_back(value); }
+  AndroidPowerConfig_BatteryCounters* add_battery_counters() { battery_counters_.emplace_back(); return &battery_counters_.back(); }
+
+  bool has_collect_power_rails() const { return _has_field_[3]; }
+  bool collect_power_rails() const { return collect_power_rails_; }
+  void set_collect_power_rails(bool value) { collect_power_rails_ = value; _has_field_.set(3); }
+
+  bool has_collect_energy_estimation_breakdown() const { return _has_field_[4]; }
+  bool collect_energy_estimation_breakdown() const { return collect_energy_estimation_breakdown_; }
+  void set_collect_energy_estimation_breakdown(bool value) { collect_energy_estimation_breakdown_ = value; _has_field_.set(4); }
+
+ private:
+  uint32_t battery_poll_ms_{};
+  std::vector<AndroidPowerConfig_BatteryCounters> battery_counters_;
+  bool collect_power_rails_{};
+  bool collect_energy_estimation_breakdown_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<5> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_POWER_ANDROID_POWER_CONFIG_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/config/process_stats/process_stats_config.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_PROCESS_STATS_PROCESS_STATS_CONFIG_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_PROCESS_STATS_PROCESS_STATS_CONFIG_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class ProcessStatsConfig;
+enum ProcessStatsConfig_Quirks : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum ProcessStatsConfig_Quirks : int {
+  ProcessStatsConfig_Quirks_QUIRKS_UNSPECIFIED = 0,
+  ProcessStatsConfig_Quirks_DISABLE_INITIAL_DUMP = 1,
+  ProcessStatsConfig_Quirks_DISABLE_ON_DEMAND = 2,
+};
+
+class PERFETTO_EXPORT ProcessStatsConfig : public ::protozero::CppMessageObj {
+ public:
+  using Quirks = ProcessStatsConfig_Quirks;
+  static constexpr auto QUIRKS_UNSPECIFIED = ProcessStatsConfig_Quirks_QUIRKS_UNSPECIFIED;
+  static constexpr auto DISABLE_INITIAL_DUMP = ProcessStatsConfig_Quirks_DISABLE_INITIAL_DUMP;
+  static constexpr auto DISABLE_ON_DEMAND = ProcessStatsConfig_Quirks_DISABLE_ON_DEMAND;
+  static constexpr auto Quirks_MIN = ProcessStatsConfig_Quirks_QUIRKS_UNSPECIFIED;
+  static constexpr auto Quirks_MAX = ProcessStatsConfig_Quirks_DISABLE_ON_DEMAND;
+  enum FieldNumbers {
+    kQuirksFieldNumber = 1,
+    kScanAllProcessesOnStartFieldNumber = 2,
+    kRecordThreadNamesFieldNumber = 3,
+    kProcStatsPollMsFieldNumber = 4,
+    kProcStatsCacheTtlMsFieldNumber = 6,
+    kRecordThreadTimeInStateFieldNumber = 7,
+    kThreadTimeInStateCacheSizeFieldNumber = 8,
+  };
+
+  ProcessStatsConfig();
+  ~ProcessStatsConfig() override;
+  ProcessStatsConfig(ProcessStatsConfig&&) noexcept;
+  ProcessStatsConfig& operator=(ProcessStatsConfig&&);
+  ProcessStatsConfig(const ProcessStatsConfig&);
+  ProcessStatsConfig& operator=(const ProcessStatsConfig&);
+  bool operator==(const ProcessStatsConfig&) const;
+  bool operator!=(const ProcessStatsConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  const std::vector<ProcessStatsConfig_Quirks>& quirks() const { return quirks_; }
+  std::vector<ProcessStatsConfig_Quirks>* mutable_quirks() { return &quirks_; }
+  int quirks_size() const { return static_cast<int>(quirks_.size()); }
+  void clear_quirks() { quirks_.clear(); }
+  void add_quirks(ProcessStatsConfig_Quirks value) { quirks_.emplace_back(value); }
+  ProcessStatsConfig_Quirks* add_quirks() { quirks_.emplace_back(); return &quirks_.back(); }
+
+  bool has_scan_all_processes_on_start() const { return _has_field_[2]; }
+  bool scan_all_processes_on_start() const { return scan_all_processes_on_start_; }
+  void set_scan_all_processes_on_start(bool value) { scan_all_processes_on_start_ = value; _has_field_.set(2); }
+
+  bool has_record_thread_names() const { return _has_field_[3]; }
+  bool record_thread_names() const { return record_thread_names_; }
+  void set_record_thread_names(bool value) { record_thread_names_ = value; _has_field_.set(3); }
+
+  bool has_proc_stats_poll_ms() const { return _has_field_[4]; }
+  uint32_t proc_stats_poll_ms() const { return proc_stats_poll_ms_; }
+  void set_proc_stats_poll_ms(uint32_t value) { proc_stats_poll_ms_ = value; _has_field_.set(4); }
+
+  bool has_proc_stats_cache_ttl_ms() const { return _has_field_[6]; }
+  uint32_t proc_stats_cache_ttl_ms() const { return proc_stats_cache_ttl_ms_; }
+  void set_proc_stats_cache_ttl_ms(uint32_t value) { proc_stats_cache_ttl_ms_ = value; _has_field_.set(6); }
+
+  bool has_record_thread_time_in_state() const { return _has_field_[7]; }
+  bool record_thread_time_in_state() const { return record_thread_time_in_state_; }
+  void set_record_thread_time_in_state(bool value) { record_thread_time_in_state_ = value; _has_field_.set(7); }
+
+  bool has_thread_time_in_state_cache_size() const { return _has_field_[8]; }
+  uint32_t thread_time_in_state_cache_size() const { return thread_time_in_state_cache_size_; }
+  void set_thread_time_in_state_cache_size(uint32_t value) { thread_time_in_state_cache_size_ = value; _has_field_.set(8); }
+
+ private:
+  std::vector<ProcessStatsConfig_Quirks> quirks_;
+  bool scan_all_processes_on_start_{};
+  bool record_thread_names_{};
+  uint32_t proc_stats_poll_ms_{};
+  uint32_t proc_stats_cache_ttl_ms_{};
+  bool record_thread_time_in_state_{};
+  uint32_t thread_time_in_state_cache_size_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<9> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_PROCESS_STATS_PROCESS_STATS_CONFIG_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/config/profiling/heapprofd_config.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_PROFILING_HEAPPROFD_CONFIG_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_PROFILING_HEAPPROFD_CONFIG_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class HeapprofdConfig;
+class HeapprofdConfig_ContinuousDumpConfig;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT HeapprofdConfig : public ::protozero::CppMessageObj {
+ public:
+  using ContinuousDumpConfig = HeapprofdConfig_ContinuousDumpConfig;
+  enum FieldNumbers {
+    kSamplingIntervalBytesFieldNumber = 1,
+    kAdaptiveSamplingShmemThresholdFieldNumber = 24,
+    kAdaptiveSamplingMaxSamplingIntervalBytesFieldNumber = 25,
+    kProcessCmdlineFieldNumber = 2,
+    kPidFieldNumber = 4,
+    kTargetInstalledByFieldNumber = 26,
+    kHeapsFieldNumber = 20,
+    kExcludeHeapsFieldNumber = 27,
+    kStreamAllocationsFieldNumber = 23,
+    kHeapSamplingIntervalsFieldNumber = 22,
+    kAllHeapsFieldNumber = 21,
+    kAllFieldNumber = 5,
+    kMinAnonymousMemoryKbFieldNumber = 15,
+    kMaxHeapprofdMemoryKbFieldNumber = 16,
+    kMaxHeapprofdCpuSecsFieldNumber = 17,
+    kSkipSymbolPrefixFieldNumber = 7,
+    kContinuousDumpConfigFieldNumber = 6,
+    kShmemSizeBytesFieldNumber = 8,
+    kBlockClientFieldNumber = 9,
+    kBlockClientTimeoutUsFieldNumber = 14,
+    kNoStartupFieldNumber = 10,
+    kNoRunningFieldNumber = 11,
+    kDumpAtMaxFieldNumber = 13,
+    kDisableForkTeardownFieldNumber = 18,
+    kDisableVforkDetectionFieldNumber = 19,
+  };
+
+  HeapprofdConfig();
+  ~HeapprofdConfig() override;
+  HeapprofdConfig(HeapprofdConfig&&) noexcept;
+  HeapprofdConfig& operator=(HeapprofdConfig&&);
+  HeapprofdConfig(const HeapprofdConfig&);
+  HeapprofdConfig& operator=(const HeapprofdConfig&);
+  bool operator==(const HeapprofdConfig&) const;
+  bool operator!=(const HeapprofdConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_sampling_interval_bytes() const { return _has_field_[1]; }
+  uint64_t sampling_interval_bytes() const { return sampling_interval_bytes_; }
+  void set_sampling_interval_bytes(uint64_t value) { sampling_interval_bytes_ = value; _has_field_.set(1); }
+
+  bool has_adaptive_sampling_shmem_threshold() const { return _has_field_[24]; }
+  uint64_t adaptive_sampling_shmem_threshold() const { return adaptive_sampling_shmem_threshold_; }
+  void set_adaptive_sampling_shmem_threshold(uint64_t value) { adaptive_sampling_shmem_threshold_ = value; _has_field_.set(24); }
+
+  bool has_adaptive_sampling_max_sampling_interval_bytes() const { return _has_field_[25]; }
+  uint64_t adaptive_sampling_max_sampling_interval_bytes() const { return adaptive_sampling_max_sampling_interval_bytes_; }
+  void set_adaptive_sampling_max_sampling_interval_bytes(uint64_t value) { adaptive_sampling_max_sampling_interval_bytes_ = value; _has_field_.set(25); }
+
+  const std::vector<std::string>& process_cmdline() const { return process_cmdline_; }
+  std::vector<std::string>* mutable_process_cmdline() { return &process_cmdline_; }
+  int process_cmdline_size() const { return static_cast<int>(process_cmdline_.size()); }
+  void clear_process_cmdline() { process_cmdline_.clear(); }
+  void add_process_cmdline(std::string value) { process_cmdline_.emplace_back(value); }
+  std::string* add_process_cmdline() { process_cmdline_.emplace_back(); return &process_cmdline_.back(); }
+
+  const std::vector<uint64_t>& pid() const { return pid_; }
+  std::vector<uint64_t>* mutable_pid() { return &pid_; }
+  int pid_size() const { return static_cast<int>(pid_.size()); }
+  void clear_pid() { pid_.clear(); }
+  void add_pid(uint64_t value) { pid_.emplace_back(value); }
+  uint64_t* add_pid() { pid_.emplace_back(); return &pid_.back(); }
+
+  const std::vector<std::string>& target_installed_by() const { return target_installed_by_; }
+  std::vector<std::string>* mutable_target_installed_by() { return &target_installed_by_; }
+  int target_installed_by_size() const { return static_cast<int>(target_installed_by_.size()); }
+  void clear_target_installed_by() { target_installed_by_.clear(); }
+  void add_target_installed_by(std::string value) { target_installed_by_.emplace_back(value); }
+  std::string* add_target_installed_by() { target_installed_by_.emplace_back(); return &target_installed_by_.back(); }
+
+  const std::vector<std::string>& heaps() const { return heaps_; }
+  std::vector<std::string>* mutable_heaps() { return &heaps_; }
+  int heaps_size() const { return static_cast<int>(heaps_.size()); }
+  void clear_heaps() { heaps_.clear(); }
+  void add_heaps(std::string value) { heaps_.emplace_back(value); }
+  std::string* add_heaps() { heaps_.emplace_back(); return &heaps_.back(); }
+
+  const std::vector<std::string>& exclude_heaps() const { return exclude_heaps_; }
+  std::vector<std::string>* mutable_exclude_heaps() { return &exclude_heaps_; }
+  int exclude_heaps_size() const { return static_cast<int>(exclude_heaps_.size()); }
+  void clear_exclude_heaps() { exclude_heaps_.clear(); }
+  void add_exclude_heaps(std::string value) { exclude_heaps_.emplace_back(value); }
+  std::string* add_exclude_heaps() { exclude_heaps_.emplace_back(); return &exclude_heaps_.back(); }
+
+  bool has_stream_allocations() const { return _has_field_[23]; }
+  bool stream_allocations() const { return stream_allocations_; }
+  void set_stream_allocations(bool value) { stream_allocations_ = value; _has_field_.set(23); }
+
+  const std::vector<uint64_t>& heap_sampling_intervals() const { return heap_sampling_intervals_; }
+  std::vector<uint64_t>* mutable_heap_sampling_intervals() { return &heap_sampling_intervals_; }
+  int heap_sampling_intervals_size() const { return static_cast<int>(heap_sampling_intervals_.size()); }
+  void clear_heap_sampling_intervals() { heap_sampling_intervals_.clear(); }
+  void add_heap_sampling_intervals(uint64_t value) { heap_sampling_intervals_.emplace_back(value); }
+  uint64_t* add_heap_sampling_intervals() { heap_sampling_intervals_.emplace_back(); return &heap_sampling_intervals_.back(); }
+
+  bool has_all_heaps() const { return _has_field_[21]; }
+  bool all_heaps() const { return all_heaps_; }
+  void set_all_heaps(bool value) { all_heaps_ = value; _has_field_.set(21); }
+
+  bool has_all() const { return _has_field_[5]; }
+  bool all() const { return all_; }
+  void set_all(bool value) { all_ = value; _has_field_.set(5); }
+
+  bool has_min_anonymous_memory_kb() const { return _has_field_[15]; }
+  uint32_t min_anonymous_memory_kb() const { return min_anonymous_memory_kb_; }
+  void set_min_anonymous_memory_kb(uint32_t value) { min_anonymous_memory_kb_ = value; _has_field_.set(15); }
+
+  bool has_max_heapprofd_memory_kb() const { return _has_field_[16]; }
+  uint32_t max_heapprofd_memory_kb() const { return max_heapprofd_memory_kb_; }
+  void set_max_heapprofd_memory_kb(uint32_t value) { max_heapprofd_memory_kb_ = value; _has_field_.set(16); }
+
+  bool has_max_heapprofd_cpu_secs() const { return _has_field_[17]; }
+  uint64_t max_heapprofd_cpu_secs() const { return max_heapprofd_cpu_secs_; }
+  void set_max_heapprofd_cpu_secs(uint64_t value) { max_heapprofd_cpu_secs_ = value; _has_field_.set(17); }
+
+  const std::vector<std::string>& skip_symbol_prefix() const { return skip_symbol_prefix_; }
+  std::vector<std::string>* mutable_skip_symbol_prefix() { return &skip_symbol_prefix_; }
+  int skip_symbol_prefix_size() const { return static_cast<int>(skip_symbol_prefix_.size()); }
+  void clear_skip_symbol_prefix() { skip_symbol_prefix_.clear(); }
+  void add_skip_symbol_prefix(std::string value) { skip_symbol_prefix_.emplace_back(value); }
+  std::string* add_skip_symbol_prefix() { skip_symbol_prefix_.emplace_back(); return &skip_symbol_prefix_.back(); }
+
+  bool has_continuous_dump_config() const { return _has_field_[6]; }
+  const HeapprofdConfig_ContinuousDumpConfig& continuous_dump_config() const { return *continuous_dump_config_; }
+  HeapprofdConfig_ContinuousDumpConfig* mutable_continuous_dump_config() { _has_field_.set(6); return continuous_dump_config_.get(); }
+
+  bool has_shmem_size_bytes() const { return _has_field_[8]; }
+  uint64_t shmem_size_bytes() const { return shmem_size_bytes_; }
+  void set_shmem_size_bytes(uint64_t value) { shmem_size_bytes_ = value; _has_field_.set(8); }
+
+  bool has_block_client() const { return _has_field_[9]; }
+  bool block_client() const { return block_client_; }
+  void set_block_client(bool value) { block_client_ = value; _has_field_.set(9); }
+
+  bool has_block_client_timeout_us() const { return _has_field_[14]; }
+  uint32_t block_client_timeout_us() const { return block_client_timeout_us_; }
+  void set_block_client_timeout_us(uint32_t value) { block_client_timeout_us_ = value; _has_field_.set(14); }
+
+  bool has_no_startup() const { return _has_field_[10]; }
+  bool no_startup() const { return no_startup_; }
+  void set_no_startup(bool value) { no_startup_ = value; _has_field_.set(10); }
+
+  bool has_no_running() const { return _has_field_[11]; }
+  bool no_running() const { return no_running_; }
+  void set_no_running(bool value) { no_running_ = value; _has_field_.set(11); }
+
+  bool has_dump_at_max() const { return _has_field_[13]; }
+  bool dump_at_max() const { return dump_at_max_; }
+  void set_dump_at_max(bool value) { dump_at_max_ = value; _has_field_.set(13); }
+
+  bool has_disable_fork_teardown() const { return _has_field_[18]; }
+  bool disable_fork_teardown() const { return disable_fork_teardown_; }
+  void set_disable_fork_teardown(bool value) { disable_fork_teardown_ = value; _has_field_.set(18); }
+
+  bool has_disable_vfork_detection() const { return _has_field_[19]; }
+  bool disable_vfork_detection() const { return disable_vfork_detection_; }
+  void set_disable_vfork_detection(bool value) { disable_vfork_detection_ = value; _has_field_.set(19); }
+
+ private:
+  uint64_t sampling_interval_bytes_{};
+  uint64_t adaptive_sampling_shmem_threshold_{};
+  uint64_t adaptive_sampling_max_sampling_interval_bytes_{};
+  std::vector<std::string> process_cmdline_;
+  std::vector<uint64_t> pid_;
+  std::vector<std::string> target_installed_by_;
+  std::vector<std::string> heaps_;
+  std::vector<std::string> exclude_heaps_;
+  bool stream_allocations_{};
+  std::vector<uint64_t> heap_sampling_intervals_;
+  bool all_heaps_{};
+  bool all_{};
+  uint32_t min_anonymous_memory_kb_{};
+  uint32_t max_heapprofd_memory_kb_{};
+  uint64_t max_heapprofd_cpu_secs_{};
+  std::vector<std::string> skip_symbol_prefix_;
+  ::protozero::CopyablePtr<HeapprofdConfig_ContinuousDumpConfig> continuous_dump_config_;
+  uint64_t shmem_size_bytes_{};
+  bool block_client_{};
+  uint32_t block_client_timeout_us_{};
+  bool no_startup_{};
+  bool no_running_{};
+  bool dump_at_max_{};
+  bool disable_fork_teardown_{};
+  bool disable_vfork_detection_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<28> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT HeapprofdConfig_ContinuousDumpConfig : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kDumpPhaseMsFieldNumber = 5,
+    kDumpIntervalMsFieldNumber = 6,
+  };
+
+  HeapprofdConfig_ContinuousDumpConfig();
+  ~HeapprofdConfig_ContinuousDumpConfig() override;
+  HeapprofdConfig_ContinuousDumpConfig(HeapprofdConfig_ContinuousDumpConfig&&) noexcept;
+  HeapprofdConfig_ContinuousDumpConfig& operator=(HeapprofdConfig_ContinuousDumpConfig&&);
+  HeapprofdConfig_ContinuousDumpConfig(const HeapprofdConfig_ContinuousDumpConfig&);
+  HeapprofdConfig_ContinuousDumpConfig& operator=(const HeapprofdConfig_ContinuousDumpConfig&);
+  bool operator==(const HeapprofdConfig_ContinuousDumpConfig&) const;
+  bool operator!=(const HeapprofdConfig_ContinuousDumpConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_dump_phase_ms() const { return _has_field_[5]; }
+  uint32_t dump_phase_ms() const { return dump_phase_ms_; }
+  void set_dump_phase_ms(uint32_t value) { dump_phase_ms_ = value; _has_field_.set(5); }
+
+  bool has_dump_interval_ms() const { return _has_field_[6]; }
+  uint32_t dump_interval_ms() const { return dump_interval_ms_; }
+  void set_dump_interval_ms(uint32_t value) { dump_interval_ms_ = value; _has_field_.set(6); }
+
+ private:
+  uint32_t dump_phase_ms_{};
+  uint32_t dump_interval_ms_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<7> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_PROFILING_HEAPPROFD_CONFIG_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/config/profiling/java_hprof_config.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_PROFILING_JAVA_HPROF_CONFIG_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_PROFILING_JAVA_HPROF_CONFIG_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class JavaHprofConfig;
+class JavaHprofConfig_ContinuousDumpConfig;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT JavaHprofConfig : public ::protozero::CppMessageObj {
+ public:
+  using ContinuousDumpConfig = JavaHprofConfig_ContinuousDumpConfig;
+  enum FieldNumbers {
+    kProcessCmdlineFieldNumber = 1,
+    kPidFieldNumber = 2,
+    kTargetInstalledByFieldNumber = 7,
+    kContinuousDumpConfigFieldNumber = 3,
+    kMinAnonymousMemoryKbFieldNumber = 4,
+    kDumpSmapsFieldNumber = 5,
+    kIgnoredTypesFieldNumber = 6,
+  };
+
+  JavaHprofConfig();
+  ~JavaHprofConfig() override;
+  JavaHprofConfig(JavaHprofConfig&&) noexcept;
+  JavaHprofConfig& operator=(JavaHprofConfig&&);
+  JavaHprofConfig(const JavaHprofConfig&);
+  JavaHprofConfig& operator=(const JavaHprofConfig&);
+  bool operator==(const JavaHprofConfig&) const;
+  bool operator!=(const JavaHprofConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  const std::vector<std::string>& process_cmdline() const { return process_cmdline_; }
+  std::vector<std::string>* mutable_process_cmdline() { return &process_cmdline_; }
+  int process_cmdline_size() const { return static_cast<int>(process_cmdline_.size()); }
+  void clear_process_cmdline() { process_cmdline_.clear(); }
+  void add_process_cmdline(std::string value) { process_cmdline_.emplace_back(value); }
+  std::string* add_process_cmdline() { process_cmdline_.emplace_back(); return &process_cmdline_.back(); }
+
+  const std::vector<uint64_t>& pid() const { return pid_; }
+  std::vector<uint64_t>* mutable_pid() { return &pid_; }
+  int pid_size() const { return static_cast<int>(pid_.size()); }
+  void clear_pid() { pid_.clear(); }
+  void add_pid(uint64_t value) { pid_.emplace_back(value); }
+  uint64_t* add_pid() { pid_.emplace_back(); return &pid_.back(); }
+
+  const std::vector<std::string>& target_installed_by() const { return target_installed_by_; }
+  std::vector<std::string>* mutable_target_installed_by() { return &target_installed_by_; }
+  int target_installed_by_size() const { return static_cast<int>(target_installed_by_.size()); }
+  void clear_target_installed_by() { target_installed_by_.clear(); }
+  void add_target_installed_by(std::string value) { target_installed_by_.emplace_back(value); }
+  std::string* add_target_installed_by() { target_installed_by_.emplace_back(); return &target_installed_by_.back(); }
+
+  bool has_continuous_dump_config() const { return _has_field_[3]; }
+  const JavaHprofConfig_ContinuousDumpConfig& continuous_dump_config() const { return *continuous_dump_config_; }
+  JavaHprofConfig_ContinuousDumpConfig* mutable_continuous_dump_config() { _has_field_.set(3); return continuous_dump_config_.get(); }
+
+  bool has_min_anonymous_memory_kb() const { return _has_field_[4]; }
+  uint32_t min_anonymous_memory_kb() const { return min_anonymous_memory_kb_; }
+  void set_min_anonymous_memory_kb(uint32_t value) { min_anonymous_memory_kb_ = value; _has_field_.set(4); }
+
+  bool has_dump_smaps() const { return _has_field_[5]; }
+  bool dump_smaps() const { return dump_smaps_; }
+  void set_dump_smaps(bool value) { dump_smaps_ = value; _has_field_.set(5); }
+
+  const std::vector<std::string>& ignored_types() const { return ignored_types_; }
+  std::vector<std::string>* mutable_ignored_types() { return &ignored_types_; }
+  int ignored_types_size() const { return static_cast<int>(ignored_types_.size()); }
+  void clear_ignored_types() { ignored_types_.clear(); }
+  void add_ignored_types(std::string value) { ignored_types_.emplace_back(value); }
+  std::string* add_ignored_types() { ignored_types_.emplace_back(); return &ignored_types_.back(); }
+
+ private:
+  std::vector<std::string> process_cmdline_;
+  std::vector<uint64_t> pid_;
+  std::vector<std::string> target_installed_by_;
+  ::protozero::CopyablePtr<JavaHprofConfig_ContinuousDumpConfig> continuous_dump_config_;
+  uint32_t min_anonymous_memory_kb_{};
+  bool dump_smaps_{};
+  std::vector<std::string> ignored_types_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<8> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT JavaHprofConfig_ContinuousDumpConfig : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kDumpPhaseMsFieldNumber = 1,
+    kDumpIntervalMsFieldNumber = 2,
+  };
+
+  JavaHprofConfig_ContinuousDumpConfig();
+  ~JavaHprofConfig_ContinuousDumpConfig() override;
+  JavaHprofConfig_ContinuousDumpConfig(JavaHprofConfig_ContinuousDumpConfig&&) noexcept;
+  JavaHprofConfig_ContinuousDumpConfig& operator=(JavaHprofConfig_ContinuousDumpConfig&&);
+  JavaHprofConfig_ContinuousDumpConfig(const JavaHprofConfig_ContinuousDumpConfig&);
+  JavaHprofConfig_ContinuousDumpConfig& operator=(const JavaHprofConfig_ContinuousDumpConfig&);
+  bool operator==(const JavaHprofConfig_ContinuousDumpConfig&) const;
+  bool operator!=(const JavaHprofConfig_ContinuousDumpConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_dump_phase_ms() const { return _has_field_[1]; }
+  uint32_t dump_phase_ms() const { return dump_phase_ms_; }
+  void set_dump_phase_ms(uint32_t value) { dump_phase_ms_ = value; _has_field_.set(1); }
+
+  bool has_dump_interval_ms() const { return _has_field_[2]; }
+  uint32_t dump_interval_ms() const { return dump_interval_ms_; }
+  void set_dump_interval_ms(uint32_t value) { dump_interval_ms_ = value; _has_field_.set(2); }
+
+ private:
+  uint32_t dump_phase_ms_{};
+  uint32_t dump_interval_ms_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_PROFILING_JAVA_HPROF_CONFIG_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/config/profiling/perf_event_config.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_PROFILING_PERF_EVENT_CONFIG_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_PROFILING_PERF_EVENT_CONFIG_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class PerfEventConfig;
+class PerfEventConfig_CallstackSampling;
+class PerfEventConfig_Scope;
+class PerfEvents_Timebase;
+class PerfEvents_Tracepoint;
+enum PerfEvents_Counter : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT PerfEventConfig : public ::protozero::CppMessageObj {
+ public:
+  using CallstackSampling = PerfEventConfig_CallstackSampling;
+  using Scope = PerfEventConfig_Scope;
+  enum FieldNumbers {
+    kTimebaseFieldNumber = 15,
+    kCallstackSamplingFieldNumber = 16,
+    kRingBufferReadPeriodMsFieldNumber = 8,
+    kRingBufferPagesFieldNumber = 3,
+    kMaxEnqueuedFootprintKbFieldNumber = 17,
+    kMaxDaemonMemoryKbFieldNumber = 13,
+    kRemoteDescriptorTimeoutMsFieldNumber = 9,
+    kUnwindStateClearPeriodMsFieldNumber = 10,
+    kAllCpusFieldNumber = 1,
+    kSamplingFrequencyFieldNumber = 2,
+    kKernelFramesFieldNumber = 12,
+    kTargetPidFieldNumber = 4,
+    kTargetCmdlineFieldNumber = 5,
+    kTargetInstalledByFieldNumber = 18,
+    kExcludePidFieldNumber = 6,
+    kExcludeCmdlineFieldNumber = 7,
+    kAdditionalCmdlineCountFieldNumber = 11,
+  };
+
+  PerfEventConfig();
+  ~PerfEventConfig() override;
+  PerfEventConfig(PerfEventConfig&&) noexcept;
+  PerfEventConfig& operator=(PerfEventConfig&&);
+  PerfEventConfig(const PerfEventConfig&);
+  PerfEventConfig& operator=(const PerfEventConfig&);
+  bool operator==(const PerfEventConfig&) const;
+  bool operator!=(const PerfEventConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_timebase() const { return _has_field_[15]; }
+  const PerfEvents_Timebase& timebase() const { return *timebase_; }
+  PerfEvents_Timebase* mutable_timebase() { _has_field_.set(15); return timebase_.get(); }
+
+  bool has_callstack_sampling() const { return _has_field_[16]; }
+  const PerfEventConfig_CallstackSampling& callstack_sampling() const { return *callstack_sampling_; }
+  PerfEventConfig_CallstackSampling* mutable_callstack_sampling() { _has_field_.set(16); return callstack_sampling_.get(); }
+
+  bool has_ring_buffer_read_period_ms() const { return _has_field_[8]; }
+  uint32_t ring_buffer_read_period_ms() const { return ring_buffer_read_period_ms_; }
+  void set_ring_buffer_read_period_ms(uint32_t value) { ring_buffer_read_period_ms_ = value; _has_field_.set(8); }
+
+  bool has_ring_buffer_pages() const { return _has_field_[3]; }
+  uint32_t ring_buffer_pages() const { return ring_buffer_pages_; }
+  void set_ring_buffer_pages(uint32_t value) { ring_buffer_pages_ = value; _has_field_.set(3); }
+
+  bool has_max_enqueued_footprint_kb() const { return _has_field_[17]; }
+  uint64_t max_enqueued_footprint_kb() const { return max_enqueued_footprint_kb_; }
+  void set_max_enqueued_footprint_kb(uint64_t value) { max_enqueued_footprint_kb_ = value; _has_field_.set(17); }
+
+  bool has_max_daemon_memory_kb() const { return _has_field_[13]; }
+  uint32_t max_daemon_memory_kb() const { return max_daemon_memory_kb_; }
+  void set_max_daemon_memory_kb(uint32_t value) { max_daemon_memory_kb_ = value; _has_field_.set(13); }
+
+  bool has_remote_descriptor_timeout_ms() const { return _has_field_[9]; }
+  uint32_t remote_descriptor_timeout_ms() const { return remote_descriptor_timeout_ms_; }
+  void set_remote_descriptor_timeout_ms(uint32_t value) { remote_descriptor_timeout_ms_ = value; _has_field_.set(9); }
+
+  bool has_unwind_state_clear_period_ms() const { return _has_field_[10]; }
+  uint32_t unwind_state_clear_period_ms() const { return unwind_state_clear_period_ms_; }
+  void set_unwind_state_clear_period_ms(uint32_t value) { unwind_state_clear_period_ms_ = value; _has_field_.set(10); }
+
+  bool has_all_cpus() const { return _has_field_[1]; }
+  bool all_cpus() const { return all_cpus_; }
+  void set_all_cpus(bool value) { all_cpus_ = value; _has_field_.set(1); }
+
+  bool has_sampling_frequency() const { return _has_field_[2]; }
+  uint32_t sampling_frequency() const { return sampling_frequency_; }
+  void set_sampling_frequency(uint32_t value) { sampling_frequency_ = value; _has_field_.set(2); }
+
+  bool has_kernel_frames() const { return _has_field_[12]; }
+  bool kernel_frames() const { return kernel_frames_; }
+  void set_kernel_frames(bool value) { kernel_frames_ = value; _has_field_.set(12); }
+
+  const std::vector<int32_t>& target_pid() const { return target_pid_; }
+  std::vector<int32_t>* mutable_target_pid() { return &target_pid_; }
+  int target_pid_size() const { return static_cast<int>(target_pid_.size()); }
+  void clear_target_pid() { target_pid_.clear(); }
+  void add_target_pid(int32_t value) { target_pid_.emplace_back(value); }
+  int32_t* add_target_pid() { target_pid_.emplace_back(); return &target_pid_.back(); }
+
+  const std::vector<std::string>& target_cmdline() const { return target_cmdline_; }
+  std::vector<std::string>* mutable_target_cmdline() { return &target_cmdline_; }
+  int target_cmdline_size() const { return static_cast<int>(target_cmdline_.size()); }
+  void clear_target_cmdline() { target_cmdline_.clear(); }
+  void add_target_cmdline(std::string value) { target_cmdline_.emplace_back(value); }
+  std::string* add_target_cmdline() { target_cmdline_.emplace_back(); return &target_cmdline_.back(); }
+
+  const std::vector<std::string>& target_installed_by() const { return target_installed_by_; }
+  std::vector<std::string>* mutable_target_installed_by() { return &target_installed_by_; }
+  int target_installed_by_size() const { return static_cast<int>(target_installed_by_.size()); }
+  void clear_target_installed_by() { target_installed_by_.clear(); }
+  void add_target_installed_by(std::string value) { target_installed_by_.emplace_back(value); }
+  std::string* add_target_installed_by() { target_installed_by_.emplace_back(); return &target_installed_by_.back(); }
+
+  const std::vector<int32_t>& exclude_pid() const { return exclude_pid_; }
+  std::vector<int32_t>* mutable_exclude_pid() { return &exclude_pid_; }
+  int exclude_pid_size() const { return static_cast<int>(exclude_pid_.size()); }
+  void clear_exclude_pid() { exclude_pid_.clear(); }
+  void add_exclude_pid(int32_t value) { exclude_pid_.emplace_back(value); }
+  int32_t* add_exclude_pid() { exclude_pid_.emplace_back(); return &exclude_pid_.back(); }
+
+  const std::vector<std::string>& exclude_cmdline() const { return exclude_cmdline_; }
+  std::vector<std::string>* mutable_exclude_cmdline() { return &exclude_cmdline_; }
+  int exclude_cmdline_size() const { return static_cast<int>(exclude_cmdline_.size()); }
+  void clear_exclude_cmdline() { exclude_cmdline_.clear(); }
+  void add_exclude_cmdline(std::string value) { exclude_cmdline_.emplace_back(value); }
+  std::string* add_exclude_cmdline() { exclude_cmdline_.emplace_back(); return &exclude_cmdline_.back(); }
+
+  bool has_additional_cmdline_count() const { return _has_field_[11]; }
+  uint32_t additional_cmdline_count() const { return additional_cmdline_count_; }
+  void set_additional_cmdline_count(uint32_t value) { additional_cmdline_count_ = value; _has_field_.set(11); }
+
+ private:
+  ::protozero::CopyablePtr<PerfEvents_Timebase> timebase_;
+  ::protozero::CopyablePtr<PerfEventConfig_CallstackSampling> callstack_sampling_;
+  uint32_t ring_buffer_read_period_ms_{};
+  uint32_t ring_buffer_pages_{};
+  uint64_t max_enqueued_footprint_kb_{};
+  uint32_t max_daemon_memory_kb_{};
+  uint32_t remote_descriptor_timeout_ms_{};
+  uint32_t unwind_state_clear_period_ms_{};
+  bool all_cpus_{};
+  uint32_t sampling_frequency_{};
+  bool kernel_frames_{};
+  std::vector<int32_t> target_pid_;
+  std::vector<std::string> target_cmdline_;
+  std::vector<std::string> target_installed_by_;
+  std::vector<int32_t> exclude_pid_;
+  std::vector<std::string> exclude_cmdline_;
+  uint32_t additional_cmdline_count_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<19> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT PerfEventConfig_CallstackSampling : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kScopeFieldNumber = 1,
+    kKernelFramesFieldNumber = 2,
+  };
+
+  PerfEventConfig_CallstackSampling();
+  ~PerfEventConfig_CallstackSampling() override;
+  PerfEventConfig_CallstackSampling(PerfEventConfig_CallstackSampling&&) noexcept;
+  PerfEventConfig_CallstackSampling& operator=(PerfEventConfig_CallstackSampling&&);
+  PerfEventConfig_CallstackSampling(const PerfEventConfig_CallstackSampling&);
+  PerfEventConfig_CallstackSampling& operator=(const PerfEventConfig_CallstackSampling&);
+  bool operator==(const PerfEventConfig_CallstackSampling&) const;
+  bool operator!=(const PerfEventConfig_CallstackSampling& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_scope() const { return _has_field_[1]; }
+  const PerfEventConfig_Scope& scope() const { return *scope_; }
+  PerfEventConfig_Scope* mutable_scope() { _has_field_.set(1); return scope_.get(); }
+
+  bool has_kernel_frames() const { return _has_field_[2]; }
+  bool kernel_frames() const { return kernel_frames_; }
+  void set_kernel_frames(bool value) { kernel_frames_ = value; _has_field_.set(2); }
+
+ private:
+  ::protozero::CopyablePtr<PerfEventConfig_Scope> scope_;
+  bool kernel_frames_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT PerfEventConfig_Scope : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kTargetPidFieldNumber = 1,
+    kTargetCmdlineFieldNumber = 2,
+    kExcludePidFieldNumber = 3,
+    kExcludeCmdlineFieldNumber = 4,
+    kAdditionalCmdlineCountFieldNumber = 5,
+  };
+
+  PerfEventConfig_Scope();
+  ~PerfEventConfig_Scope() override;
+  PerfEventConfig_Scope(PerfEventConfig_Scope&&) noexcept;
+  PerfEventConfig_Scope& operator=(PerfEventConfig_Scope&&);
+  PerfEventConfig_Scope(const PerfEventConfig_Scope&);
+  PerfEventConfig_Scope& operator=(const PerfEventConfig_Scope&);
+  bool operator==(const PerfEventConfig_Scope&) const;
+  bool operator!=(const PerfEventConfig_Scope& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  const std::vector<int32_t>& target_pid() const { return target_pid_; }
+  std::vector<int32_t>* mutable_target_pid() { return &target_pid_; }
+  int target_pid_size() const { return static_cast<int>(target_pid_.size()); }
+  void clear_target_pid() { target_pid_.clear(); }
+  void add_target_pid(int32_t value) { target_pid_.emplace_back(value); }
+  int32_t* add_target_pid() { target_pid_.emplace_back(); return &target_pid_.back(); }
+
+  const std::vector<std::string>& target_cmdline() const { return target_cmdline_; }
+  std::vector<std::string>* mutable_target_cmdline() { return &target_cmdline_; }
+  int target_cmdline_size() const { return static_cast<int>(target_cmdline_.size()); }
+  void clear_target_cmdline() { target_cmdline_.clear(); }
+  void add_target_cmdline(std::string value) { target_cmdline_.emplace_back(value); }
+  std::string* add_target_cmdline() { target_cmdline_.emplace_back(); return &target_cmdline_.back(); }
+
+  const std::vector<int32_t>& exclude_pid() const { return exclude_pid_; }
+  std::vector<int32_t>* mutable_exclude_pid() { return &exclude_pid_; }
+  int exclude_pid_size() const { return static_cast<int>(exclude_pid_.size()); }
+  void clear_exclude_pid() { exclude_pid_.clear(); }
+  void add_exclude_pid(int32_t value) { exclude_pid_.emplace_back(value); }
+  int32_t* add_exclude_pid() { exclude_pid_.emplace_back(); return &exclude_pid_.back(); }
+
+  const std::vector<std::string>& exclude_cmdline() const { return exclude_cmdline_; }
+  std::vector<std::string>* mutable_exclude_cmdline() { return &exclude_cmdline_; }
+  int exclude_cmdline_size() const { return static_cast<int>(exclude_cmdline_.size()); }
+  void clear_exclude_cmdline() { exclude_cmdline_.clear(); }
+  void add_exclude_cmdline(std::string value) { exclude_cmdline_.emplace_back(value); }
+  std::string* add_exclude_cmdline() { exclude_cmdline_.emplace_back(); return &exclude_cmdline_.back(); }
+
+  bool has_additional_cmdline_count() const { return _has_field_[5]; }
+  uint32_t additional_cmdline_count() const { return additional_cmdline_count_; }
+  void set_additional_cmdline_count(uint32_t value) { additional_cmdline_count_ = value; _has_field_.set(5); }
+
+ private:
+  std::vector<int32_t> target_pid_;
+  std::vector<std::string> target_cmdline_;
+  std::vector<int32_t> exclude_pid_;
+  std::vector<std::string> exclude_cmdline_;
+  uint32_t additional_cmdline_count_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<6> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_PROFILING_PERF_EVENT_CONFIG_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/config/sys_stats/sys_stats_config.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_SYS_STATS_SYS_STATS_CONFIG_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_SYS_STATS_SYS_STATS_CONFIG_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class SysStatsConfig;
+enum SysStatsConfig_StatCounters : int;
+enum MeminfoCounters : int;
+enum VmstatCounters : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum SysStatsConfig_StatCounters : int {
+  SysStatsConfig_StatCounters_STAT_UNSPECIFIED = 0,
+  SysStatsConfig_StatCounters_STAT_CPU_TIMES = 1,
+  SysStatsConfig_StatCounters_STAT_IRQ_COUNTS = 2,
+  SysStatsConfig_StatCounters_STAT_SOFTIRQ_COUNTS = 3,
+  SysStatsConfig_StatCounters_STAT_FORK_COUNT = 4,
+};
+
+class PERFETTO_EXPORT SysStatsConfig : public ::protozero::CppMessageObj {
+ public:
+  using StatCounters = SysStatsConfig_StatCounters;
+  static constexpr auto STAT_UNSPECIFIED = SysStatsConfig_StatCounters_STAT_UNSPECIFIED;
+  static constexpr auto STAT_CPU_TIMES = SysStatsConfig_StatCounters_STAT_CPU_TIMES;
+  static constexpr auto STAT_IRQ_COUNTS = SysStatsConfig_StatCounters_STAT_IRQ_COUNTS;
+  static constexpr auto STAT_SOFTIRQ_COUNTS = SysStatsConfig_StatCounters_STAT_SOFTIRQ_COUNTS;
+  static constexpr auto STAT_FORK_COUNT = SysStatsConfig_StatCounters_STAT_FORK_COUNT;
+  static constexpr auto StatCounters_MIN = SysStatsConfig_StatCounters_STAT_UNSPECIFIED;
+  static constexpr auto StatCounters_MAX = SysStatsConfig_StatCounters_STAT_FORK_COUNT;
+  enum FieldNumbers {
+    kMeminfoPeriodMsFieldNumber = 1,
+    kMeminfoCountersFieldNumber = 2,
+    kVmstatPeriodMsFieldNumber = 3,
+    kVmstatCountersFieldNumber = 4,
+    kStatPeriodMsFieldNumber = 5,
+    kStatCountersFieldNumber = 6,
+    kDevfreqPeriodMsFieldNumber = 7,
+  };
+
+  SysStatsConfig();
+  ~SysStatsConfig() override;
+  SysStatsConfig(SysStatsConfig&&) noexcept;
+  SysStatsConfig& operator=(SysStatsConfig&&);
+  SysStatsConfig(const SysStatsConfig&);
+  SysStatsConfig& operator=(const SysStatsConfig&);
+  bool operator==(const SysStatsConfig&) const;
+  bool operator!=(const SysStatsConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_meminfo_period_ms() const { return _has_field_[1]; }
+  uint32_t meminfo_period_ms() const { return meminfo_period_ms_; }
+  void set_meminfo_period_ms(uint32_t value) { meminfo_period_ms_ = value; _has_field_.set(1); }
+
+  const std::vector<MeminfoCounters>& meminfo_counters() const { return meminfo_counters_; }
+  std::vector<MeminfoCounters>* mutable_meminfo_counters() { return &meminfo_counters_; }
+  int meminfo_counters_size() const { return static_cast<int>(meminfo_counters_.size()); }
+  void clear_meminfo_counters() { meminfo_counters_.clear(); }
+  void add_meminfo_counters(MeminfoCounters value) { meminfo_counters_.emplace_back(value); }
+  MeminfoCounters* add_meminfo_counters() { meminfo_counters_.emplace_back(); return &meminfo_counters_.back(); }
+
+  bool has_vmstat_period_ms() const { return _has_field_[3]; }
+  uint32_t vmstat_period_ms() const { return vmstat_period_ms_; }
+  void set_vmstat_period_ms(uint32_t value) { vmstat_period_ms_ = value; _has_field_.set(3); }
+
+  const std::vector<VmstatCounters>& vmstat_counters() const { return vmstat_counters_; }
+  std::vector<VmstatCounters>* mutable_vmstat_counters() { return &vmstat_counters_; }
+  int vmstat_counters_size() const { return static_cast<int>(vmstat_counters_.size()); }
+  void clear_vmstat_counters() { vmstat_counters_.clear(); }
+  void add_vmstat_counters(VmstatCounters value) { vmstat_counters_.emplace_back(value); }
+  VmstatCounters* add_vmstat_counters() { vmstat_counters_.emplace_back(); return &vmstat_counters_.back(); }
+
+  bool has_stat_period_ms() const { return _has_field_[5]; }
+  uint32_t stat_period_ms() const { return stat_period_ms_; }
+  void set_stat_period_ms(uint32_t value) { stat_period_ms_ = value; _has_field_.set(5); }
+
+  const std::vector<SysStatsConfig_StatCounters>& stat_counters() const { return stat_counters_; }
+  std::vector<SysStatsConfig_StatCounters>* mutable_stat_counters() { return &stat_counters_; }
+  int stat_counters_size() const { return static_cast<int>(stat_counters_.size()); }
+  void clear_stat_counters() { stat_counters_.clear(); }
+  void add_stat_counters(SysStatsConfig_StatCounters value) { stat_counters_.emplace_back(value); }
+  SysStatsConfig_StatCounters* add_stat_counters() { stat_counters_.emplace_back(); return &stat_counters_.back(); }
+
+  bool has_devfreq_period_ms() const { return _has_field_[7]; }
+  uint32_t devfreq_period_ms() const { return devfreq_period_ms_; }
+  void set_devfreq_period_ms(uint32_t value) { devfreq_period_ms_ = value; _has_field_.set(7); }
+
+ private:
+  uint32_t meminfo_period_ms_{};
+  std::vector<MeminfoCounters> meminfo_counters_;
+  uint32_t vmstat_period_ms_{};
+  std::vector<VmstatCounters> vmstat_counters_;
+  uint32_t stat_period_ms_{};
+  std::vector<SysStatsConfig_StatCounters> stat_counters_;
+  uint32_t devfreq_period_ms_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<8> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_SYS_STATS_SYS_STATS_CONFIG_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/config/chrome/chrome_config.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_CHROME_CHROME_CONFIG_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_CHROME_CHROME_CONFIG_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class ChromeConfig;
+enum ChromeConfig_ClientPriority : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum ChromeConfig_ClientPriority : int {
+  ChromeConfig_ClientPriority_UNKNOWN = 0,
+  ChromeConfig_ClientPriority_BACKGROUND = 1,
+  ChromeConfig_ClientPriority_USER_INITIATED = 2,
+};
+
+class PERFETTO_EXPORT ChromeConfig : public ::protozero::CppMessageObj {
+ public:
+  using ClientPriority = ChromeConfig_ClientPriority;
+  static constexpr auto UNKNOWN = ChromeConfig_ClientPriority_UNKNOWN;
+  static constexpr auto BACKGROUND = ChromeConfig_ClientPriority_BACKGROUND;
+  static constexpr auto USER_INITIATED = ChromeConfig_ClientPriority_USER_INITIATED;
+  static constexpr auto ClientPriority_MIN = ChromeConfig_ClientPriority_UNKNOWN;
+  static constexpr auto ClientPriority_MAX = ChromeConfig_ClientPriority_USER_INITIATED;
+  enum FieldNumbers {
+    kTraceConfigFieldNumber = 1,
+    kPrivacyFilteringEnabledFieldNumber = 2,
+    kConvertToLegacyJsonFieldNumber = 3,
+    kClientPriorityFieldNumber = 4,
+    kJsonAgentLabelFilterFieldNumber = 5,
+  };
+
+  ChromeConfig();
+  ~ChromeConfig() override;
+  ChromeConfig(ChromeConfig&&) noexcept;
+  ChromeConfig& operator=(ChromeConfig&&);
+  ChromeConfig(const ChromeConfig&);
+  ChromeConfig& operator=(const ChromeConfig&);
+  bool operator==(const ChromeConfig&) const;
+  bool operator!=(const ChromeConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_trace_config() const { return _has_field_[1]; }
+  const std::string& trace_config() const { return trace_config_; }
+  void set_trace_config(const std::string& value) { trace_config_ = value; _has_field_.set(1); }
+
+  bool has_privacy_filtering_enabled() const { return _has_field_[2]; }
+  bool privacy_filtering_enabled() const { return privacy_filtering_enabled_; }
+  void set_privacy_filtering_enabled(bool value) { privacy_filtering_enabled_ = value; _has_field_.set(2); }
+
+  bool has_convert_to_legacy_json() const { return _has_field_[3]; }
+  bool convert_to_legacy_json() const { return convert_to_legacy_json_; }
+  void set_convert_to_legacy_json(bool value) { convert_to_legacy_json_ = value; _has_field_.set(3); }
+
+  bool has_client_priority() const { return _has_field_[4]; }
+  ChromeConfig_ClientPriority client_priority() const { return client_priority_; }
+  void set_client_priority(ChromeConfig_ClientPriority value) { client_priority_ = value; _has_field_.set(4); }
+
+  bool has_json_agent_label_filter() const { return _has_field_[5]; }
+  const std::string& json_agent_label_filter() const { return json_agent_label_filter_; }
+  void set_json_agent_label_filter(const std::string& value) { json_agent_label_filter_ = value; _has_field_.set(5); }
+
+ private:
+  std::string trace_config_{};
+  bool privacy_filtering_enabled_{};
+  bool convert_to_legacy_json_{};
+  ChromeConfig_ClientPriority client_priority_{};
+  std::string json_agent_label_filter_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<6> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_CHROME_CHROME_CONFIG_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/config/data_source_config.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_DATA_SOURCE_CONFIG_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_DATA_SOURCE_CONFIG_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class DataSourceConfig;
+class TestConfig;
+class TestConfig_DummyFields;
+class InterceptorConfig;
+class ChromeConfig;
+enum DataSourceConfig_SessionInitiator : int;
+enum ChromeConfig_ClientPriority : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum DataSourceConfig_SessionInitiator : int {
+  DataSourceConfig_SessionInitiator_SESSION_INITIATOR_UNSPECIFIED = 0,
+  DataSourceConfig_SessionInitiator_SESSION_INITIATOR_TRUSTED_SYSTEM = 1,
+};
+
+class PERFETTO_EXPORT DataSourceConfig : public ::protozero::CppMessageObj {
+ public:
+  using SessionInitiator = DataSourceConfig_SessionInitiator;
+  static constexpr auto SESSION_INITIATOR_UNSPECIFIED = DataSourceConfig_SessionInitiator_SESSION_INITIATOR_UNSPECIFIED;
+  static constexpr auto SESSION_INITIATOR_TRUSTED_SYSTEM = DataSourceConfig_SessionInitiator_SESSION_INITIATOR_TRUSTED_SYSTEM;
+  static constexpr auto SessionInitiator_MIN = DataSourceConfig_SessionInitiator_SESSION_INITIATOR_UNSPECIFIED;
+  static constexpr auto SessionInitiator_MAX = DataSourceConfig_SessionInitiator_SESSION_INITIATOR_TRUSTED_SYSTEM;
+  enum FieldNumbers {
+    kNameFieldNumber = 1,
+    kTargetBufferFieldNumber = 2,
+    kTraceDurationMsFieldNumber = 3,
+    kStopTimeoutMsFieldNumber = 7,
+    kEnableExtraGuardrailsFieldNumber = 6,
+    kSessionInitiatorFieldNumber = 8,
+    kTracingSessionIdFieldNumber = 4,
+    kFtraceConfigFieldNumber = 100,
+    kInodeFileConfigFieldNumber = 102,
+    kProcessStatsConfigFieldNumber = 103,
+    kSysStatsConfigFieldNumber = 104,
+    kHeapprofdConfigFieldNumber = 105,
+    kJavaHprofConfigFieldNumber = 110,
+    kAndroidPowerConfigFieldNumber = 106,
+    kAndroidLogConfigFieldNumber = 107,
+    kGpuCounterConfigFieldNumber = 108,
+    kPackagesListConfigFieldNumber = 109,
+    kPerfEventConfigFieldNumber = 111,
+    kVulkanMemoryConfigFieldNumber = 112,
+    kTrackEventConfigFieldNumber = 113,
+    kAndroidPolledStateConfigFieldNumber = 114,
+    kChromeConfigFieldNumber = 101,
+    kInterceptorConfigFieldNumber = 115,
+    kLegacyConfigFieldNumber = 1000,
+    kForTestingFieldNumber = 1001,
+  };
+
+  DataSourceConfig();
+  ~DataSourceConfig() override;
+  DataSourceConfig(DataSourceConfig&&) noexcept;
+  DataSourceConfig& operator=(DataSourceConfig&&);
+  DataSourceConfig(const DataSourceConfig&);
+  DataSourceConfig& operator=(const DataSourceConfig&);
+  bool operator==(const DataSourceConfig&) const;
+  bool operator!=(const DataSourceConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_name() const { return _has_field_[1]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(1); }
+
+  bool has_target_buffer() const { return _has_field_[2]; }
+  uint32_t target_buffer() const { return target_buffer_; }
+  void set_target_buffer(uint32_t value) { target_buffer_ = value; _has_field_.set(2); }
+
+  bool has_trace_duration_ms() const { return _has_field_[3]; }
+  uint32_t trace_duration_ms() const { return trace_duration_ms_; }
+  void set_trace_duration_ms(uint32_t value) { trace_duration_ms_ = value; _has_field_.set(3); }
+
+  bool has_stop_timeout_ms() const { return _has_field_[7]; }
+  uint32_t stop_timeout_ms() const { return stop_timeout_ms_; }
+  void set_stop_timeout_ms(uint32_t value) { stop_timeout_ms_ = value; _has_field_.set(7); }
+
+  bool has_enable_extra_guardrails() const { return _has_field_[6]; }
+  bool enable_extra_guardrails() const { return enable_extra_guardrails_; }
+  void set_enable_extra_guardrails(bool value) { enable_extra_guardrails_ = value; _has_field_.set(6); }
+
+  bool has_session_initiator() const { return _has_field_[8]; }
+  DataSourceConfig_SessionInitiator session_initiator() const { return session_initiator_; }
+  void set_session_initiator(DataSourceConfig_SessionInitiator value) { session_initiator_ = value; _has_field_.set(8); }
+
+  bool has_tracing_session_id() const { return _has_field_[4]; }
+  uint64_t tracing_session_id() const { return tracing_session_id_; }
+  void set_tracing_session_id(uint64_t value) { tracing_session_id_ = value; _has_field_.set(4); }
+
+  const std::string& ftrace_config_raw() const { return ftrace_config_; }
+  void set_ftrace_config_raw(const std::string& raw) { ftrace_config_ = raw; _has_field_.set(100); }
+
+  const std::string& inode_file_config_raw() const { return inode_file_config_; }
+  void set_inode_file_config_raw(const std::string& raw) { inode_file_config_ = raw; _has_field_.set(102); }
+
+  const std::string& process_stats_config_raw() const { return process_stats_config_; }
+  void set_process_stats_config_raw(const std::string& raw) { process_stats_config_ = raw; _has_field_.set(103); }
+
+  const std::string& sys_stats_config_raw() const { return sys_stats_config_; }
+  void set_sys_stats_config_raw(const std::string& raw) { sys_stats_config_ = raw; _has_field_.set(104); }
+
+  const std::string& heapprofd_config_raw() const { return heapprofd_config_; }
+  void set_heapprofd_config_raw(const std::string& raw) { heapprofd_config_ = raw; _has_field_.set(105); }
+
+  const std::string& java_hprof_config_raw() const { return java_hprof_config_; }
+  void set_java_hprof_config_raw(const std::string& raw) { java_hprof_config_ = raw; _has_field_.set(110); }
+
+  const std::string& android_power_config_raw() const { return android_power_config_; }
+  void set_android_power_config_raw(const std::string& raw) { android_power_config_ = raw; _has_field_.set(106); }
+
+  const std::string& android_log_config_raw() const { return android_log_config_; }
+  void set_android_log_config_raw(const std::string& raw) { android_log_config_ = raw; _has_field_.set(107); }
+
+  const std::string& gpu_counter_config_raw() const { return gpu_counter_config_; }
+  void set_gpu_counter_config_raw(const std::string& raw) { gpu_counter_config_ = raw; _has_field_.set(108); }
+
+  const std::string& packages_list_config_raw() const { return packages_list_config_; }
+  void set_packages_list_config_raw(const std::string& raw) { packages_list_config_ = raw; _has_field_.set(109); }
+
+  const std::string& perf_event_config_raw() const { return perf_event_config_; }
+  void set_perf_event_config_raw(const std::string& raw) { perf_event_config_ = raw; _has_field_.set(111); }
+
+  const std::string& vulkan_memory_config_raw() const { return vulkan_memory_config_; }
+  void set_vulkan_memory_config_raw(const std::string& raw) { vulkan_memory_config_ = raw; _has_field_.set(112); }
+
+  const std::string& track_event_config_raw() const { return track_event_config_; }
+  void set_track_event_config_raw(const std::string& raw) { track_event_config_ = raw; _has_field_.set(113); }
+
+  const std::string& android_polled_state_config_raw() const { return android_polled_state_config_; }
+  void set_android_polled_state_config_raw(const std::string& raw) { android_polled_state_config_ = raw; _has_field_.set(114); }
+
+  bool has_chrome_config() const { return _has_field_[101]; }
+  const ChromeConfig& chrome_config() const { return *chrome_config_; }
+  ChromeConfig* mutable_chrome_config() { _has_field_.set(101); return chrome_config_.get(); }
+
+  bool has_interceptor_config() const { return _has_field_[115]; }
+  const InterceptorConfig& interceptor_config() const { return *interceptor_config_; }
+  InterceptorConfig* mutable_interceptor_config() { _has_field_.set(115); return interceptor_config_.get(); }
+
+  bool has_legacy_config() const { return _has_field_[1000]; }
+  const std::string& legacy_config() const { return legacy_config_; }
+  void set_legacy_config(const std::string& value) { legacy_config_ = value; _has_field_.set(1000); }
+
+  bool has_for_testing() const { return _has_field_[1001]; }
+  const TestConfig& for_testing() const { return *for_testing_; }
+  TestConfig* mutable_for_testing() { _has_field_.set(1001); return for_testing_.get(); }
+
+ private:
+  std::string name_{};
+  uint32_t target_buffer_{};
+  uint32_t trace_duration_ms_{};
+  uint32_t stop_timeout_ms_{};
+  bool enable_extra_guardrails_{};
+  DataSourceConfig_SessionInitiator session_initiator_{};
+  uint64_t tracing_session_id_{};
+  std::string ftrace_config_;  // [lazy=true]
+  std::string inode_file_config_;  // [lazy=true]
+  std::string process_stats_config_;  // [lazy=true]
+  std::string sys_stats_config_;  // [lazy=true]
+  std::string heapprofd_config_;  // [lazy=true]
+  std::string java_hprof_config_;  // [lazy=true]
+  std::string android_power_config_;  // [lazy=true]
+  std::string android_log_config_;  // [lazy=true]
+  std::string gpu_counter_config_;  // [lazy=true]
+  std::string packages_list_config_;  // [lazy=true]
+  std::string perf_event_config_;  // [lazy=true]
+  std::string vulkan_memory_config_;  // [lazy=true]
+  std::string track_event_config_;  // [lazy=true]
+  std::string android_polled_state_config_;  // [lazy=true]
+  ::protozero::CopyablePtr<ChromeConfig> chrome_config_;
+  ::protozero::CopyablePtr<InterceptorConfig> interceptor_config_;
+  std::string legacy_config_{};
+  ::protozero::CopyablePtr<TestConfig> for_testing_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<1002> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_DATA_SOURCE_CONFIG_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/config/interceptor_config.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_INTERCEPTOR_CONFIG_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_INTERCEPTOR_CONFIG_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class InterceptorConfig;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT InterceptorConfig : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kNameFieldNumber = 1,
+    kConsoleConfigFieldNumber = 100,
+  };
+
+  InterceptorConfig();
+  ~InterceptorConfig() override;
+  InterceptorConfig(InterceptorConfig&&) noexcept;
+  InterceptorConfig& operator=(InterceptorConfig&&);
+  InterceptorConfig(const InterceptorConfig&);
+  InterceptorConfig& operator=(const InterceptorConfig&);
+  bool operator==(const InterceptorConfig&) const;
+  bool operator!=(const InterceptorConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_name() const { return _has_field_[1]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(1); }
+
+  const std::string& console_config_raw() const { return console_config_; }
+  void set_console_config_raw(const std::string& raw) { console_config_ = raw; _has_field_.set(100); }
+
+ private:
+  std::string name_{};
+  std::string console_config_;  // [lazy=true]
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<101> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_INTERCEPTOR_CONFIG_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/config/stress_test_config.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_STRESS_TEST_CONFIG_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_STRESS_TEST_CONFIG_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class StressTestConfig;
+class StressTestConfig_WriterTiming;
+class TraceConfig;
+class TraceConfig_TraceFilter;
+class TraceConfig_IncidentReportConfig;
+class TraceConfig_IncrementalStateConfig;
+class TraceConfig_TriggerConfig;
+class TraceConfig_TriggerConfig_Trigger;
+class TraceConfig_GuardrailOverrides;
+class TraceConfig_StatsdMetadata;
+class TraceConfig_ProducerConfig;
+class TraceConfig_BuiltinDataSource;
+class TraceConfig_DataSource;
+class DataSourceConfig;
+class TestConfig;
+class TestConfig_DummyFields;
+class InterceptorConfig;
+class ChromeConfig;
+class TraceConfig_BufferConfig;
+enum TraceConfig_LockdownModeOperation : int;
+enum TraceConfig_CompressionType : int;
+enum TraceConfig_StatsdLogging : int;
+enum TraceConfig_TriggerConfig_TriggerMode : int;
+enum BuiltinClock : int;
+enum DataSourceConfig_SessionInitiator : int;
+enum ChromeConfig_ClientPriority : int;
+enum TraceConfig_BufferConfig_FillPolicy : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT StressTestConfig : public ::protozero::CppMessageObj {
+ public:
+  using WriterTiming = StressTestConfig_WriterTiming;
+  enum FieldNumbers {
+    kTraceConfigFieldNumber = 1,
+    kShmemSizeKbFieldNumber = 2,
+    kShmemPageSizeKbFieldNumber = 3,
+    kNumProcessesFieldNumber = 4,
+    kNumThreadsFieldNumber = 5,
+    kMaxEventsFieldNumber = 6,
+    kNestingFieldNumber = 7,
+    kSteadyStateTimingsFieldNumber = 8,
+    kBurstPeriodMsFieldNumber = 9,
+    kBurstDurationMsFieldNumber = 10,
+    kBurstTimingsFieldNumber = 11,
+  };
+
+  StressTestConfig();
+  ~StressTestConfig() override;
+  StressTestConfig(StressTestConfig&&) noexcept;
+  StressTestConfig& operator=(StressTestConfig&&);
+  StressTestConfig(const StressTestConfig&);
+  StressTestConfig& operator=(const StressTestConfig&);
+  bool operator==(const StressTestConfig&) const;
+  bool operator!=(const StressTestConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_trace_config() const { return _has_field_[1]; }
+  const TraceConfig& trace_config() const { return *trace_config_; }
+  TraceConfig* mutable_trace_config() { _has_field_.set(1); return trace_config_.get(); }
+
+  bool has_shmem_size_kb() const { return _has_field_[2]; }
+  uint32_t shmem_size_kb() const { return shmem_size_kb_; }
+  void set_shmem_size_kb(uint32_t value) { shmem_size_kb_ = value; _has_field_.set(2); }
+
+  bool has_shmem_page_size_kb() const { return _has_field_[3]; }
+  uint32_t shmem_page_size_kb() const { return shmem_page_size_kb_; }
+  void set_shmem_page_size_kb(uint32_t value) { shmem_page_size_kb_ = value; _has_field_.set(3); }
+
+  bool has_num_processes() const { return _has_field_[4]; }
+  uint32_t num_processes() const { return num_processes_; }
+  void set_num_processes(uint32_t value) { num_processes_ = value; _has_field_.set(4); }
+
+  bool has_num_threads() const { return _has_field_[5]; }
+  uint32_t num_threads() const { return num_threads_; }
+  void set_num_threads(uint32_t value) { num_threads_ = value; _has_field_.set(5); }
+
+  bool has_max_events() const { return _has_field_[6]; }
+  uint32_t max_events() const { return max_events_; }
+  void set_max_events(uint32_t value) { max_events_ = value; _has_field_.set(6); }
+
+  bool has_nesting() const { return _has_field_[7]; }
+  uint32_t nesting() const { return nesting_; }
+  void set_nesting(uint32_t value) { nesting_ = value; _has_field_.set(7); }
+
+  bool has_steady_state_timings() const { return _has_field_[8]; }
+  const StressTestConfig_WriterTiming& steady_state_timings() const { return *steady_state_timings_; }
+  StressTestConfig_WriterTiming* mutable_steady_state_timings() { _has_field_.set(8); return steady_state_timings_.get(); }
+
+  bool has_burst_period_ms() const { return _has_field_[9]; }
+  uint32_t burst_period_ms() const { return burst_period_ms_; }
+  void set_burst_period_ms(uint32_t value) { burst_period_ms_ = value; _has_field_.set(9); }
+
+  bool has_burst_duration_ms() const { return _has_field_[10]; }
+  uint32_t burst_duration_ms() const { return burst_duration_ms_; }
+  void set_burst_duration_ms(uint32_t value) { burst_duration_ms_ = value; _has_field_.set(10); }
+
+  bool has_burst_timings() const { return _has_field_[11]; }
+  const StressTestConfig_WriterTiming& burst_timings() const { return *burst_timings_; }
+  StressTestConfig_WriterTiming* mutable_burst_timings() { _has_field_.set(11); return burst_timings_.get(); }
+
+ private:
+  ::protozero::CopyablePtr<TraceConfig> trace_config_;
+  uint32_t shmem_size_kb_{};
+  uint32_t shmem_page_size_kb_{};
+  uint32_t num_processes_{};
+  uint32_t num_threads_{};
+  uint32_t max_events_{};
+  uint32_t nesting_{};
+  ::protozero::CopyablePtr<StressTestConfig_WriterTiming> steady_state_timings_;
+  uint32_t burst_period_ms_{};
+  uint32_t burst_duration_ms_{};
+  ::protozero::CopyablePtr<StressTestConfig_WriterTiming> burst_timings_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<12> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT StressTestConfig_WriterTiming : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kPayloadMeanFieldNumber = 1,
+    kPayloadStddevFieldNumber = 2,
+    kRateMeanFieldNumber = 3,
+    kRateStddevFieldNumber = 4,
+    kPayloadWriteTimeMsFieldNumber = 5,
+  };
+
+  StressTestConfig_WriterTiming();
+  ~StressTestConfig_WriterTiming() override;
+  StressTestConfig_WriterTiming(StressTestConfig_WriterTiming&&) noexcept;
+  StressTestConfig_WriterTiming& operator=(StressTestConfig_WriterTiming&&);
+  StressTestConfig_WriterTiming(const StressTestConfig_WriterTiming&);
+  StressTestConfig_WriterTiming& operator=(const StressTestConfig_WriterTiming&);
+  bool operator==(const StressTestConfig_WriterTiming&) const;
+  bool operator!=(const StressTestConfig_WriterTiming& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_payload_mean() const { return _has_field_[1]; }
+  double payload_mean() const { return payload_mean_; }
+  void set_payload_mean(double value) { payload_mean_ = value; _has_field_.set(1); }
+
+  bool has_payload_stddev() const { return _has_field_[2]; }
+  double payload_stddev() const { return payload_stddev_; }
+  void set_payload_stddev(double value) { payload_stddev_ = value; _has_field_.set(2); }
+
+  bool has_rate_mean() const { return _has_field_[3]; }
+  double rate_mean() const { return rate_mean_; }
+  void set_rate_mean(double value) { rate_mean_ = value; _has_field_.set(3); }
+
+  bool has_rate_stddev() const { return _has_field_[4]; }
+  double rate_stddev() const { return rate_stddev_; }
+  void set_rate_stddev(double value) { rate_stddev_ = value; _has_field_.set(4); }
+
+  bool has_payload_write_time_ms() const { return _has_field_[5]; }
+  uint32_t payload_write_time_ms() const { return payload_write_time_ms_; }
+  void set_payload_write_time_ms(uint32_t value) { payload_write_time_ms_ = value; _has_field_.set(5); }
+
+ private:
+  double payload_mean_{};
+  double payload_stddev_{};
+  double rate_mean_{};
+  double rate_stddev_{};
+  uint32_t payload_write_time_ms_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<6> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_STRESS_TEST_CONFIG_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/config/test_config.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_TEST_CONFIG_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_TEST_CONFIG_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class TestConfig;
+class TestConfig_DummyFields;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT TestConfig : public ::protozero::CppMessageObj {
+ public:
+  using DummyFields = TestConfig_DummyFields;
+  enum FieldNumbers {
+    kMessageCountFieldNumber = 1,
+    kMaxMessagesPerSecondFieldNumber = 2,
+    kSeedFieldNumber = 3,
+    kMessageSizeFieldNumber = 4,
+    kSendBatchOnRegisterFieldNumber = 5,
+    kDummyFieldsFieldNumber = 6,
+  };
+
+  TestConfig();
+  ~TestConfig() override;
+  TestConfig(TestConfig&&) noexcept;
+  TestConfig& operator=(TestConfig&&);
+  TestConfig(const TestConfig&);
+  TestConfig& operator=(const TestConfig&);
+  bool operator==(const TestConfig&) const;
+  bool operator!=(const TestConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_message_count() const { return _has_field_[1]; }
+  uint32_t message_count() const { return message_count_; }
+  void set_message_count(uint32_t value) { message_count_ = value; _has_field_.set(1); }
+
+  bool has_max_messages_per_second() const { return _has_field_[2]; }
+  uint32_t max_messages_per_second() const { return max_messages_per_second_; }
+  void set_max_messages_per_second(uint32_t value) { max_messages_per_second_ = value; _has_field_.set(2); }
+
+  bool has_seed() const { return _has_field_[3]; }
+  uint32_t seed() const { return seed_; }
+  void set_seed(uint32_t value) { seed_ = value; _has_field_.set(3); }
+
+  bool has_message_size() const { return _has_field_[4]; }
+  uint32_t message_size() const { return message_size_; }
+  void set_message_size(uint32_t value) { message_size_ = value; _has_field_.set(4); }
+
+  bool has_send_batch_on_register() const { return _has_field_[5]; }
+  bool send_batch_on_register() const { return send_batch_on_register_; }
+  void set_send_batch_on_register(bool value) { send_batch_on_register_ = value; _has_field_.set(5); }
+
+  bool has_dummy_fields() const { return _has_field_[6]; }
+  const TestConfig_DummyFields& dummy_fields() const { return *dummy_fields_; }
+  TestConfig_DummyFields* mutable_dummy_fields() { _has_field_.set(6); return dummy_fields_.get(); }
+
+ private:
+  uint32_t message_count_{};
+  uint32_t max_messages_per_second_{};
+  uint32_t seed_{};
+  uint32_t message_size_{};
+  bool send_batch_on_register_{};
+  ::protozero::CopyablePtr<TestConfig_DummyFields> dummy_fields_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<7> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT TestConfig_DummyFields : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kFieldUint32FieldNumber = 1,
+    kFieldInt32FieldNumber = 2,
+    kFieldUint64FieldNumber = 3,
+    kFieldInt64FieldNumber = 4,
+    kFieldFixed64FieldNumber = 5,
+    kFieldSfixed64FieldNumber = 6,
+    kFieldFixed32FieldNumber = 7,
+    kFieldSfixed32FieldNumber = 8,
+    kFieldDoubleFieldNumber = 9,
+    kFieldFloatFieldNumber = 10,
+    kFieldSint64FieldNumber = 11,
+    kFieldSint32FieldNumber = 12,
+    kFieldStringFieldNumber = 13,
+    kFieldBytesFieldNumber = 14,
+  };
+
+  TestConfig_DummyFields();
+  ~TestConfig_DummyFields() override;
+  TestConfig_DummyFields(TestConfig_DummyFields&&) noexcept;
+  TestConfig_DummyFields& operator=(TestConfig_DummyFields&&);
+  TestConfig_DummyFields(const TestConfig_DummyFields&);
+  TestConfig_DummyFields& operator=(const TestConfig_DummyFields&);
+  bool operator==(const TestConfig_DummyFields&) const;
+  bool operator!=(const TestConfig_DummyFields& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_field_uint32() const { return _has_field_[1]; }
+  uint32_t field_uint32() const { return field_uint32_; }
+  void set_field_uint32(uint32_t value) { field_uint32_ = value; _has_field_.set(1); }
+
+  bool has_field_int32() const { return _has_field_[2]; }
+  int32_t field_int32() const { return field_int32_; }
+  void set_field_int32(int32_t value) { field_int32_ = value; _has_field_.set(2); }
+
+  bool has_field_uint64() const { return _has_field_[3]; }
+  uint64_t field_uint64() const { return field_uint64_; }
+  void set_field_uint64(uint64_t value) { field_uint64_ = value; _has_field_.set(3); }
+
+  bool has_field_int64() const { return _has_field_[4]; }
+  int64_t field_int64() const { return field_int64_; }
+  void set_field_int64(int64_t value) { field_int64_ = value; _has_field_.set(4); }
+
+  bool has_field_fixed64() const { return _has_field_[5]; }
+  uint64_t field_fixed64() const { return field_fixed64_; }
+  void set_field_fixed64(uint64_t value) { field_fixed64_ = value; _has_field_.set(5); }
+
+  bool has_field_sfixed64() const { return _has_field_[6]; }
+  int64_t field_sfixed64() const { return field_sfixed64_; }
+  void set_field_sfixed64(int64_t value) { field_sfixed64_ = value; _has_field_.set(6); }
+
+  bool has_field_fixed32() const { return _has_field_[7]; }
+  uint32_t field_fixed32() const { return field_fixed32_; }
+  void set_field_fixed32(uint32_t value) { field_fixed32_ = value; _has_field_.set(7); }
+
+  bool has_field_sfixed32() const { return _has_field_[8]; }
+  int32_t field_sfixed32() const { return field_sfixed32_; }
+  void set_field_sfixed32(int32_t value) { field_sfixed32_ = value; _has_field_.set(8); }
+
+  bool has_field_double() const { return _has_field_[9]; }
+  double field_double() const { return field_double_; }
+  void set_field_double(double value) { field_double_ = value; _has_field_.set(9); }
+
+  bool has_field_float() const { return _has_field_[10]; }
+  float field_float() const { return field_float_; }
+  void set_field_float(float value) { field_float_ = value; _has_field_.set(10); }
+
+  bool has_field_sint64() const { return _has_field_[11]; }
+  int64_t field_sint64() const { return field_sint64_; }
+  void set_field_sint64(int64_t value) { field_sint64_ = value; _has_field_.set(11); }
+
+  bool has_field_sint32() const { return _has_field_[12]; }
+  int32_t field_sint32() const { return field_sint32_; }
+  void set_field_sint32(int32_t value) { field_sint32_ = value; _has_field_.set(12); }
+
+  bool has_field_string() const { return _has_field_[13]; }
+  const std::string& field_string() const { return field_string_; }
+  void set_field_string(const std::string& value) { field_string_ = value; _has_field_.set(13); }
+
+  bool has_field_bytes() const { return _has_field_[14]; }
+  const std::string& field_bytes() const { return field_bytes_; }
+  void set_field_bytes(const std::string& value) { field_bytes_ = value; _has_field_.set(14); }
+  void set_field_bytes(const void* p, size_t s) { field_bytes_.assign(reinterpret_cast<const char*>(p), s); _has_field_.set(14); }
+
+ private:
+  uint32_t field_uint32_{};
+  int32_t field_int32_{};
+  uint64_t field_uint64_{};
+  int64_t field_int64_{};
+  uint64_t field_fixed64_{};
+  int64_t field_sfixed64_{};
+  uint32_t field_fixed32_{};
+  int32_t field_sfixed32_{};
+  double field_double_{};
+  float field_float_{};
+  int64_t field_sint64_{};
+  int32_t field_sint32_{};
+  std::string field_string_{};
+  std::string field_bytes_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<15> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_TEST_CONFIG_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/config/trace_config.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_TRACE_CONFIG_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_TRACE_CONFIG_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class TraceConfig;
+class TraceConfig_TraceFilter;
+class TraceConfig_IncidentReportConfig;
+class TraceConfig_IncrementalStateConfig;
+class TraceConfig_TriggerConfig;
+class TraceConfig_TriggerConfig_Trigger;
+class TraceConfig_GuardrailOverrides;
+class TraceConfig_StatsdMetadata;
+class TraceConfig_ProducerConfig;
+class TraceConfig_BuiltinDataSource;
+class TraceConfig_DataSource;
+class DataSourceConfig;
+class TestConfig;
+class TestConfig_DummyFields;
+class InterceptorConfig;
+class ChromeConfig;
+class TraceConfig_BufferConfig;
+enum TraceConfig_LockdownModeOperation : int;
+enum TraceConfig_CompressionType : int;
+enum TraceConfig_StatsdLogging : int;
+enum TraceConfig_TriggerConfig_TriggerMode : int;
+enum BuiltinClock : int;
+enum DataSourceConfig_SessionInitiator : int;
+enum ChromeConfig_ClientPriority : int;
+enum TraceConfig_BufferConfig_FillPolicy : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum TraceConfig_LockdownModeOperation : int {
+  TraceConfig_LockdownModeOperation_LOCKDOWN_UNCHANGED = 0,
+  TraceConfig_LockdownModeOperation_LOCKDOWN_CLEAR = 1,
+  TraceConfig_LockdownModeOperation_LOCKDOWN_SET = 2,
+};
+enum TraceConfig_CompressionType : int {
+  TraceConfig_CompressionType_COMPRESSION_TYPE_UNSPECIFIED = 0,
+  TraceConfig_CompressionType_COMPRESSION_TYPE_DEFLATE = 1,
+};
+enum TraceConfig_StatsdLogging : int {
+  TraceConfig_StatsdLogging_STATSD_LOGGING_UNSPECIFIED = 0,
+  TraceConfig_StatsdLogging_STATSD_LOGGING_ENABLED = 1,
+  TraceConfig_StatsdLogging_STATSD_LOGGING_DISABLED = 2,
+};
+enum TraceConfig_TriggerConfig_TriggerMode : int {
+  TraceConfig_TriggerConfig_TriggerMode_UNSPECIFIED = 0,
+  TraceConfig_TriggerConfig_TriggerMode_START_TRACING = 1,
+  TraceConfig_TriggerConfig_TriggerMode_STOP_TRACING = 2,
+};
+enum TraceConfig_BufferConfig_FillPolicy : int {
+  TraceConfig_BufferConfig_FillPolicy_UNSPECIFIED = 0,
+  TraceConfig_BufferConfig_FillPolicy_RING_BUFFER = 1,
+  TraceConfig_BufferConfig_FillPolicy_DISCARD = 2,
+};
+
+class PERFETTO_EXPORT TraceConfig : public ::protozero::CppMessageObj {
+ public:
+  using BufferConfig = TraceConfig_BufferConfig;
+  using DataSource = TraceConfig_DataSource;
+  using BuiltinDataSource = TraceConfig_BuiltinDataSource;
+  using ProducerConfig = TraceConfig_ProducerConfig;
+  using StatsdMetadata = TraceConfig_StatsdMetadata;
+  using GuardrailOverrides = TraceConfig_GuardrailOverrides;
+  using TriggerConfig = TraceConfig_TriggerConfig;
+  using IncrementalStateConfig = TraceConfig_IncrementalStateConfig;
+  using IncidentReportConfig = TraceConfig_IncidentReportConfig;
+  using TraceFilter = TraceConfig_TraceFilter;
+  using LockdownModeOperation = TraceConfig_LockdownModeOperation;
+  static constexpr auto LOCKDOWN_UNCHANGED = TraceConfig_LockdownModeOperation_LOCKDOWN_UNCHANGED;
+  static constexpr auto LOCKDOWN_CLEAR = TraceConfig_LockdownModeOperation_LOCKDOWN_CLEAR;
+  static constexpr auto LOCKDOWN_SET = TraceConfig_LockdownModeOperation_LOCKDOWN_SET;
+  static constexpr auto LockdownModeOperation_MIN = TraceConfig_LockdownModeOperation_LOCKDOWN_UNCHANGED;
+  static constexpr auto LockdownModeOperation_MAX = TraceConfig_LockdownModeOperation_LOCKDOWN_SET;
+  using CompressionType = TraceConfig_CompressionType;
+  static constexpr auto COMPRESSION_TYPE_UNSPECIFIED = TraceConfig_CompressionType_COMPRESSION_TYPE_UNSPECIFIED;
+  static constexpr auto COMPRESSION_TYPE_DEFLATE = TraceConfig_CompressionType_COMPRESSION_TYPE_DEFLATE;
+  static constexpr auto CompressionType_MIN = TraceConfig_CompressionType_COMPRESSION_TYPE_UNSPECIFIED;
+  static constexpr auto CompressionType_MAX = TraceConfig_CompressionType_COMPRESSION_TYPE_DEFLATE;
+  using StatsdLogging = TraceConfig_StatsdLogging;
+  static constexpr auto STATSD_LOGGING_UNSPECIFIED = TraceConfig_StatsdLogging_STATSD_LOGGING_UNSPECIFIED;
+  static constexpr auto STATSD_LOGGING_ENABLED = TraceConfig_StatsdLogging_STATSD_LOGGING_ENABLED;
+  static constexpr auto STATSD_LOGGING_DISABLED = TraceConfig_StatsdLogging_STATSD_LOGGING_DISABLED;
+  static constexpr auto StatsdLogging_MIN = TraceConfig_StatsdLogging_STATSD_LOGGING_UNSPECIFIED;
+  static constexpr auto StatsdLogging_MAX = TraceConfig_StatsdLogging_STATSD_LOGGING_DISABLED;
+  enum FieldNumbers {
+    kBuffersFieldNumber = 1,
+    kDataSourcesFieldNumber = 2,
+    kBuiltinDataSourcesFieldNumber = 20,
+    kDurationMsFieldNumber = 3,
+    kEnableExtraGuardrailsFieldNumber = 4,
+    kLockdownModeFieldNumber = 5,
+    kProducersFieldNumber = 6,
+    kStatsdMetadataFieldNumber = 7,
+    kWriteIntoFileFieldNumber = 8,
+    kOutputPathFieldNumber = 29,
+    kFileWritePeriodMsFieldNumber = 9,
+    kMaxFileSizeBytesFieldNumber = 10,
+    kGuardrailOverridesFieldNumber = 11,
+    kDeferredStartFieldNumber = 12,
+    kFlushPeriodMsFieldNumber = 13,
+    kFlushTimeoutMsFieldNumber = 14,
+    kDataSourceStopTimeoutMsFieldNumber = 23,
+    kNotifyTraceurFieldNumber = 16,
+    kBugreportScoreFieldNumber = 30,
+    kTriggerConfigFieldNumber = 17,
+    kActivateTriggersFieldNumber = 18,
+    kIncrementalStateConfigFieldNumber = 21,
+    kAllowUserBuildTracingFieldNumber = 19,
+    kUniqueSessionNameFieldNumber = 22,
+    kCompressionTypeFieldNumber = 24,
+    kIncidentReportConfigFieldNumber = 25,
+    kStatsdLoggingFieldNumber = 31,
+    kTraceUuidMsbFieldNumber = 27,
+    kTraceUuidLsbFieldNumber = 28,
+    kTraceFilterFieldNumber = 32,
+  };
+
+  TraceConfig();
+  ~TraceConfig() override;
+  TraceConfig(TraceConfig&&) noexcept;
+  TraceConfig& operator=(TraceConfig&&);
+  TraceConfig(const TraceConfig&);
+  TraceConfig& operator=(const TraceConfig&);
+  bool operator==(const TraceConfig&) const;
+  bool operator!=(const TraceConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  const std::vector<TraceConfig_BufferConfig>& buffers() const { return buffers_; }
+  std::vector<TraceConfig_BufferConfig>* mutable_buffers() { return &buffers_; }
+  int buffers_size() const;
+  void clear_buffers();
+  TraceConfig_BufferConfig* add_buffers();
+
+  const std::vector<TraceConfig_DataSource>& data_sources() const { return data_sources_; }
+  std::vector<TraceConfig_DataSource>* mutable_data_sources() { return &data_sources_; }
+  int data_sources_size() const;
+  void clear_data_sources();
+  TraceConfig_DataSource* add_data_sources();
+
+  bool has_builtin_data_sources() const { return _has_field_[20]; }
+  const TraceConfig_BuiltinDataSource& builtin_data_sources() const { return *builtin_data_sources_; }
+  TraceConfig_BuiltinDataSource* mutable_builtin_data_sources() { _has_field_.set(20); return builtin_data_sources_.get(); }
+
+  bool has_duration_ms() const { return _has_field_[3]; }
+  uint32_t duration_ms() const { return duration_ms_; }
+  void set_duration_ms(uint32_t value) { duration_ms_ = value; _has_field_.set(3); }
+
+  bool has_enable_extra_guardrails() const { return _has_field_[4]; }
+  bool enable_extra_guardrails() const { return enable_extra_guardrails_; }
+  void set_enable_extra_guardrails(bool value) { enable_extra_guardrails_ = value; _has_field_.set(4); }
+
+  bool has_lockdown_mode() const { return _has_field_[5]; }
+  TraceConfig_LockdownModeOperation lockdown_mode() const { return lockdown_mode_; }
+  void set_lockdown_mode(TraceConfig_LockdownModeOperation value) { lockdown_mode_ = value; _has_field_.set(5); }
+
+  const std::vector<TraceConfig_ProducerConfig>& producers() const { return producers_; }
+  std::vector<TraceConfig_ProducerConfig>* mutable_producers() { return &producers_; }
+  int producers_size() const;
+  void clear_producers();
+  TraceConfig_ProducerConfig* add_producers();
+
+  bool has_statsd_metadata() const { return _has_field_[7]; }
+  const TraceConfig_StatsdMetadata& statsd_metadata() const { return *statsd_metadata_; }
+  TraceConfig_StatsdMetadata* mutable_statsd_metadata() { _has_field_.set(7); return statsd_metadata_.get(); }
+
+  bool has_write_into_file() const { return _has_field_[8]; }
+  bool write_into_file() const { return write_into_file_; }
+  void set_write_into_file(bool value) { write_into_file_ = value; _has_field_.set(8); }
+
+  bool has_output_path() const { return _has_field_[29]; }
+  const std::string& output_path() const { return output_path_; }
+  void set_output_path(const std::string& value) { output_path_ = value; _has_field_.set(29); }
+
+  bool has_file_write_period_ms() const { return _has_field_[9]; }
+  uint32_t file_write_period_ms() const { return file_write_period_ms_; }
+  void set_file_write_period_ms(uint32_t value) { file_write_period_ms_ = value; _has_field_.set(9); }
+
+  bool has_max_file_size_bytes() const { return _has_field_[10]; }
+  uint64_t max_file_size_bytes() const { return max_file_size_bytes_; }
+  void set_max_file_size_bytes(uint64_t value) { max_file_size_bytes_ = value; _has_field_.set(10); }
+
+  bool has_guardrail_overrides() const { return _has_field_[11]; }
+  const TraceConfig_GuardrailOverrides& guardrail_overrides() const { return *guardrail_overrides_; }
+  TraceConfig_GuardrailOverrides* mutable_guardrail_overrides() { _has_field_.set(11); return guardrail_overrides_.get(); }
+
+  bool has_deferred_start() const { return _has_field_[12]; }
+  bool deferred_start() const { return deferred_start_; }
+  void set_deferred_start(bool value) { deferred_start_ = value; _has_field_.set(12); }
+
+  bool has_flush_period_ms() const { return _has_field_[13]; }
+  uint32_t flush_period_ms() const { return flush_period_ms_; }
+  void set_flush_period_ms(uint32_t value) { flush_period_ms_ = value; _has_field_.set(13); }
+
+  bool has_flush_timeout_ms() const { return _has_field_[14]; }
+  uint32_t flush_timeout_ms() const { return flush_timeout_ms_; }
+  void set_flush_timeout_ms(uint32_t value) { flush_timeout_ms_ = value; _has_field_.set(14); }
+
+  bool has_data_source_stop_timeout_ms() const { return _has_field_[23]; }
+  uint32_t data_source_stop_timeout_ms() const { return data_source_stop_timeout_ms_; }
+  void set_data_source_stop_timeout_ms(uint32_t value) { data_source_stop_timeout_ms_ = value; _has_field_.set(23); }
+
+  bool has_notify_traceur() const { return _has_field_[16]; }
+  bool notify_traceur() const { return notify_traceur_; }
+  void set_notify_traceur(bool value) { notify_traceur_ = value; _has_field_.set(16); }
+
+  bool has_bugreport_score() const { return _has_field_[30]; }
+  int32_t bugreport_score() const { return bugreport_score_; }
+  void set_bugreport_score(int32_t value) { bugreport_score_ = value; _has_field_.set(30); }
+
+  bool has_trigger_config() const { return _has_field_[17]; }
+  const TraceConfig_TriggerConfig& trigger_config() const { return *trigger_config_; }
+  TraceConfig_TriggerConfig* mutable_trigger_config() { _has_field_.set(17); return trigger_config_.get(); }
+
+  const std::vector<std::string>& activate_triggers() const { return activate_triggers_; }
+  std::vector<std::string>* mutable_activate_triggers() { return &activate_triggers_; }
+  int activate_triggers_size() const { return static_cast<int>(activate_triggers_.size()); }
+  void clear_activate_triggers() { activate_triggers_.clear(); }
+  void add_activate_triggers(std::string value) { activate_triggers_.emplace_back(value); }
+  std::string* add_activate_triggers() { activate_triggers_.emplace_back(); return &activate_triggers_.back(); }
+
+  bool has_incremental_state_config() const { return _has_field_[21]; }
+  const TraceConfig_IncrementalStateConfig& incremental_state_config() const { return *incremental_state_config_; }
+  TraceConfig_IncrementalStateConfig* mutable_incremental_state_config() { _has_field_.set(21); return incremental_state_config_.get(); }
+
+  bool has_allow_user_build_tracing() const { return _has_field_[19]; }
+  bool allow_user_build_tracing() const { return allow_user_build_tracing_; }
+  void set_allow_user_build_tracing(bool value) { allow_user_build_tracing_ = value; _has_field_.set(19); }
+
+  bool has_unique_session_name() const { return _has_field_[22]; }
+  const std::string& unique_session_name() const { return unique_session_name_; }
+  void set_unique_session_name(const std::string& value) { unique_session_name_ = value; _has_field_.set(22); }
+
+  bool has_compression_type() const { return _has_field_[24]; }
+  TraceConfig_CompressionType compression_type() const { return compression_type_; }
+  void set_compression_type(TraceConfig_CompressionType value) { compression_type_ = value; _has_field_.set(24); }
+
+  bool has_incident_report_config() const { return _has_field_[25]; }
+  const TraceConfig_IncidentReportConfig& incident_report_config() const { return *incident_report_config_; }
+  TraceConfig_IncidentReportConfig* mutable_incident_report_config() { _has_field_.set(25); return incident_report_config_.get(); }
+
+  bool has_statsd_logging() const { return _has_field_[31]; }
+  TraceConfig_StatsdLogging statsd_logging() const { return statsd_logging_; }
+  void set_statsd_logging(TraceConfig_StatsdLogging value) { statsd_logging_ = value; _has_field_.set(31); }
+
+  bool has_trace_uuid_msb() const { return _has_field_[27]; }
+  int64_t trace_uuid_msb() const { return trace_uuid_msb_; }
+  void set_trace_uuid_msb(int64_t value) { trace_uuid_msb_ = value; _has_field_.set(27); }
+
+  bool has_trace_uuid_lsb() const { return _has_field_[28]; }
+  int64_t trace_uuid_lsb() const { return trace_uuid_lsb_; }
+  void set_trace_uuid_lsb(int64_t value) { trace_uuid_lsb_ = value; _has_field_.set(28); }
+
+  bool has_trace_filter() const { return _has_field_[32]; }
+  const TraceConfig_TraceFilter& trace_filter() const { return *trace_filter_; }
+  TraceConfig_TraceFilter* mutable_trace_filter() { _has_field_.set(32); return trace_filter_.get(); }
+
+ private:
+  std::vector<TraceConfig_BufferConfig> buffers_;
+  std::vector<TraceConfig_DataSource> data_sources_;
+  ::protozero::CopyablePtr<TraceConfig_BuiltinDataSource> builtin_data_sources_;
+  uint32_t duration_ms_{};
+  bool enable_extra_guardrails_{};
+  TraceConfig_LockdownModeOperation lockdown_mode_{};
+  std::vector<TraceConfig_ProducerConfig> producers_;
+  ::protozero::CopyablePtr<TraceConfig_StatsdMetadata> statsd_metadata_;
+  bool write_into_file_{};
+  std::string output_path_{};
+  uint32_t file_write_period_ms_{};
+  uint64_t max_file_size_bytes_{};
+  ::protozero::CopyablePtr<TraceConfig_GuardrailOverrides> guardrail_overrides_;
+  bool deferred_start_{};
+  uint32_t flush_period_ms_{};
+  uint32_t flush_timeout_ms_{};
+  uint32_t data_source_stop_timeout_ms_{};
+  bool notify_traceur_{};
+  int32_t bugreport_score_{};
+  ::protozero::CopyablePtr<TraceConfig_TriggerConfig> trigger_config_;
+  std::vector<std::string> activate_triggers_;
+  ::protozero::CopyablePtr<TraceConfig_IncrementalStateConfig> incremental_state_config_;
+  bool allow_user_build_tracing_{};
+  std::string unique_session_name_{};
+  TraceConfig_CompressionType compression_type_{};
+  ::protozero::CopyablePtr<TraceConfig_IncidentReportConfig> incident_report_config_;
+  TraceConfig_StatsdLogging statsd_logging_{};
+  int64_t trace_uuid_msb_{};
+  int64_t trace_uuid_lsb_{};
+  ::protozero::CopyablePtr<TraceConfig_TraceFilter> trace_filter_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<33> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT TraceConfig_TraceFilter : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kBytecodeFieldNumber = 1,
+  };
+
+  TraceConfig_TraceFilter();
+  ~TraceConfig_TraceFilter() override;
+  TraceConfig_TraceFilter(TraceConfig_TraceFilter&&) noexcept;
+  TraceConfig_TraceFilter& operator=(TraceConfig_TraceFilter&&);
+  TraceConfig_TraceFilter(const TraceConfig_TraceFilter&);
+  TraceConfig_TraceFilter& operator=(const TraceConfig_TraceFilter&);
+  bool operator==(const TraceConfig_TraceFilter&) const;
+  bool operator!=(const TraceConfig_TraceFilter& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_bytecode() const { return _has_field_[1]; }
+  const std::string& bytecode() const { return bytecode_; }
+  void set_bytecode(const std::string& value) { bytecode_ = value; _has_field_.set(1); }
+  void set_bytecode(const void* p, size_t s) { bytecode_.assign(reinterpret_cast<const char*>(p), s); _has_field_.set(1); }
+
+ private:
+  std::string bytecode_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT TraceConfig_IncidentReportConfig : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kDestinationPackageFieldNumber = 1,
+    kDestinationClassFieldNumber = 2,
+    kPrivacyLevelFieldNumber = 3,
+    kSkipIncidentdFieldNumber = 5,
+    kSkipDropboxFieldNumber = 4,
+  };
+
+  TraceConfig_IncidentReportConfig();
+  ~TraceConfig_IncidentReportConfig() override;
+  TraceConfig_IncidentReportConfig(TraceConfig_IncidentReportConfig&&) noexcept;
+  TraceConfig_IncidentReportConfig& operator=(TraceConfig_IncidentReportConfig&&);
+  TraceConfig_IncidentReportConfig(const TraceConfig_IncidentReportConfig&);
+  TraceConfig_IncidentReportConfig& operator=(const TraceConfig_IncidentReportConfig&);
+  bool operator==(const TraceConfig_IncidentReportConfig&) const;
+  bool operator!=(const TraceConfig_IncidentReportConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_destination_package() const { return _has_field_[1]; }
+  const std::string& destination_package() const { return destination_package_; }
+  void set_destination_package(const std::string& value) { destination_package_ = value; _has_field_.set(1); }
+
+  bool has_destination_class() const { return _has_field_[2]; }
+  const std::string& destination_class() const { return destination_class_; }
+  void set_destination_class(const std::string& value) { destination_class_ = value; _has_field_.set(2); }
+
+  bool has_privacy_level() const { return _has_field_[3]; }
+  int32_t privacy_level() const { return privacy_level_; }
+  void set_privacy_level(int32_t value) { privacy_level_ = value; _has_field_.set(3); }
+
+  bool has_skip_incidentd() const { return _has_field_[5]; }
+  bool skip_incidentd() const { return skip_incidentd_; }
+  void set_skip_incidentd(bool value) { skip_incidentd_ = value; _has_field_.set(5); }
+
+  bool has_skip_dropbox() const { return _has_field_[4]; }
+  bool skip_dropbox() const { return skip_dropbox_; }
+  void set_skip_dropbox(bool value) { skip_dropbox_ = value; _has_field_.set(4); }
+
+ private:
+  std::string destination_package_{};
+  std::string destination_class_{};
+  int32_t privacy_level_{};
+  bool skip_incidentd_{};
+  bool skip_dropbox_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<6> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT TraceConfig_IncrementalStateConfig : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kClearPeriodMsFieldNumber = 1,
+  };
+
+  TraceConfig_IncrementalStateConfig();
+  ~TraceConfig_IncrementalStateConfig() override;
+  TraceConfig_IncrementalStateConfig(TraceConfig_IncrementalStateConfig&&) noexcept;
+  TraceConfig_IncrementalStateConfig& operator=(TraceConfig_IncrementalStateConfig&&);
+  TraceConfig_IncrementalStateConfig(const TraceConfig_IncrementalStateConfig&);
+  TraceConfig_IncrementalStateConfig& operator=(const TraceConfig_IncrementalStateConfig&);
+  bool operator==(const TraceConfig_IncrementalStateConfig&) const;
+  bool operator!=(const TraceConfig_IncrementalStateConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_clear_period_ms() const { return _has_field_[1]; }
+  uint32_t clear_period_ms() const { return clear_period_ms_; }
+  void set_clear_period_ms(uint32_t value) { clear_period_ms_ = value; _has_field_.set(1); }
+
+ private:
+  uint32_t clear_period_ms_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT TraceConfig_TriggerConfig : public ::protozero::CppMessageObj {
+ public:
+  using Trigger = TraceConfig_TriggerConfig_Trigger;
+  using TriggerMode = TraceConfig_TriggerConfig_TriggerMode;
+  static constexpr auto UNSPECIFIED = TraceConfig_TriggerConfig_TriggerMode_UNSPECIFIED;
+  static constexpr auto START_TRACING = TraceConfig_TriggerConfig_TriggerMode_START_TRACING;
+  static constexpr auto STOP_TRACING = TraceConfig_TriggerConfig_TriggerMode_STOP_TRACING;
+  static constexpr auto TriggerMode_MIN = TraceConfig_TriggerConfig_TriggerMode_UNSPECIFIED;
+  static constexpr auto TriggerMode_MAX = TraceConfig_TriggerConfig_TriggerMode_STOP_TRACING;
+  enum FieldNumbers {
+    kTriggerModeFieldNumber = 1,
+    kTriggersFieldNumber = 2,
+    kTriggerTimeoutMsFieldNumber = 3,
+  };
+
+  TraceConfig_TriggerConfig();
+  ~TraceConfig_TriggerConfig() override;
+  TraceConfig_TriggerConfig(TraceConfig_TriggerConfig&&) noexcept;
+  TraceConfig_TriggerConfig& operator=(TraceConfig_TriggerConfig&&);
+  TraceConfig_TriggerConfig(const TraceConfig_TriggerConfig&);
+  TraceConfig_TriggerConfig& operator=(const TraceConfig_TriggerConfig&);
+  bool operator==(const TraceConfig_TriggerConfig&) const;
+  bool operator!=(const TraceConfig_TriggerConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_trigger_mode() const { return _has_field_[1]; }
+  TraceConfig_TriggerConfig_TriggerMode trigger_mode() const { return trigger_mode_; }
+  void set_trigger_mode(TraceConfig_TriggerConfig_TriggerMode value) { trigger_mode_ = value; _has_field_.set(1); }
+
+  const std::vector<TraceConfig_TriggerConfig_Trigger>& triggers() const { return triggers_; }
+  std::vector<TraceConfig_TriggerConfig_Trigger>* mutable_triggers() { return &triggers_; }
+  int triggers_size() const;
+  void clear_triggers();
+  TraceConfig_TriggerConfig_Trigger* add_triggers();
+
+  bool has_trigger_timeout_ms() const { return _has_field_[3]; }
+  uint32_t trigger_timeout_ms() const { return trigger_timeout_ms_; }
+  void set_trigger_timeout_ms(uint32_t value) { trigger_timeout_ms_ = value; _has_field_.set(3); }
+
+ private:
+  TraceConfig_TriggerConfig_TriggerMode trigger_mode_{};
+  std::vector<TraceConfig_TriggerConfig_Trigger> triggers_;
+  uint32_t trigger_timeout_ms_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<4> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT TraceConfig_TriggerConfig_Trigger : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kNameFieldNumber = 1,
+    kProducerNameRegexFieldNumber = 2,
+    kStopDelayMsFieldNumber = 3,
+    kMaxPer24HFieldNumber = 4,
+    kSkipProbabilityFieldNumber = 5,
+  };
+
+  TraceConfig_TriggerConfig_Trigger();
+  ~TraceConfig_TriggerConfig_Trigger() override;
+  TraceConfig_TriggerConfig_Trigger(TraceConfig_TriggerConfig_Trigger&&) noexcept;
+  TraceConfig_TriggerConfig_Trigger& operator=(TraceConfig_TriggerConfig_Trigger&&);
+  TraceConfig_TriggerConfig_Trigger(const TraceConfig_TriggerConfig_Trigger&);
+  TraceConfig_TriggerConfig_Trigger& operator=(const TraceConfig_TriggerConfig_Trigger&);
+  bool operator==(const TraceConfig_TriggerConfig_Trigger&) const;
+  bool operator!=(const TraceConfig_TriggerConfig_Trigger& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_name() const { return _has_field_[1]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(1); }
+
+  bool has_producer_name_regex() const { return _has_field_[2]; }
+  const std::string& producer_name_regex() const { return producer_name_regex_; }
+  void set_producer_name_regex(const std::string& value) { producer_name_regex_ = value; _has_field_.set(2); }
+
+  bool has_stop_delay_ms() const { return _has_field_[3]; }
+  uint32_t stop_delay_ms() const { return stop_delay_ms_; }
+  void set_stop_delay_ms(uint32_t value) { stop_delay_ms_ = value; _has_field_.set(3); }
+
+  bool has_max_per_24_h() const { return _has_field_[4]; }
+  uint32_t max_per_24_h() const { return max_per_24_h_; }
+  void set_max_per_24_h(uint32_t value) { max_per_24_h_ = value; _has_field_.set(4); }
+
+  bool has_skip_probability() const { return _has_field_[5]; }
+  double skip_probability() const { return skip_probability_; }
+  void set_skip_probability(double value) { skip_probability_ = value; _has_field_.set(5); }
+
+ private:
+  std::string name_{};
+  std::string producer_name_regex_{};
+  uint32_t stop_delay_ms_{};
+  uint32_t max_per_24_h_{};
+  double skip_probability_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<6> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT TraceConfig_GuardrailOverrides : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kMaxUploadPerDayBytesFieldNumber = 1,
+  };
+
+  TraceConfig_GuardrailOverrides();
+  ~TraceConfig_GuardrailOverrides() override;
+  TraceConfig_GuardrailOverrides(TraceConfig_GuardrailOverrides&&) noexcept;
+  TraceConfig_GuardrailOverrides& operator=(TraceConfig_GuardrailOverrides&&);
+  TraceConfig_GuardrailOverrides(const TraceConfig_GuardrailOverrides&);
+  TraceConfig_GuardrailOverrides& operator=(const TraceConfig_GuardrailOverrides&);
+  bool operator==(const TraceConfig_GuardrailOverrides&) const;
+  bool operator!=(const TraceConfig_GuardrailOverrides& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_max_upload_per_day_bytes() const { return _has_field_[1]; }
+  uint64_t max_upload_per_day_bytes() const { return max_upload_per_day_bytes_; }
+  void set_max_upload_per_day_bytes(uint64_t value) { max_upload_per_day_bytes_ = value; _has_field_.set(1); }
+
+ private:
+  uint64_t max_upload_per_day_bytes_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT TraceConfig_StatsdMetadata : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kTriggeringAlertIdFieldNumber = 1,
+    kTriggeringConfigUidFieldNumber = 2,
+    kTriggeringConfigIdFieldNumber = 3,
+    kTriggeringSubscriptionIdFieldNumber = 4,
+  };
+
+  TraceConfig_StatsdMetadata();
+  ~TraceConfig_StatsdMetadata() override;
+  TraceConfig_StatsdMetadata(TraceConfig_StatsdMetadata&&) noexcept;
+  TraceConfig_StatsdMetadata& operator=(TraceConfig_StatsdMetadata&&);
+  TraceConfig_StatsdMetadata(const TraceConfig_StatsdMetadata&);
+  TraceConfig_StatsdMetadata& operator=(const TraceConfig_StatsdMetadata&);
+  bool operator==(const TraceConfig_StatsdMetadata&) const;
+  bool operator!=(const TraceConfig_StatsdMetadata& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_triggering_alert_id() const { return _has_field_[1]; }
+  int64_t triggering_alert_id() const { return triggering_alert_id_; }
+  void set_triggering_alert_id(int64_t value) { triggering_alert_id_ = value; _has_field_.set(1); }
+
+  bool has_triggering_config_uid() const { return _has_field_[2]; }
+  int32_t triggering_config_uid() const { return triggering_config_uid_; }
+  void set_triggering_config_uid(int32_t value) { triggering_config_uid_ = value; _has_field_.set(2); }
+
+  bool has_triggering_config_id() const { return _has_field_[3]; }
+  int64_t triggering_config_id() const { return triggering_config_id_; }
+  void set_triggering_config_id(int64_t value) { triggering_config_id_ = value; _has_field_.set(3); }
+
+  bool has_triggering_subscription_id() const { return _has_field_[4]; }
+  int64_t triggering_subscription_id() const { return triggering_subscription_id_; }
+  void set_triggering_subscription_id(int64_t value) { triggering_subscription_id_ = value; _has_field_.set(4); }
+
+ private:
+  int64_t triggering_alert_id_{};
+  int32_t triggering_config_uid_{};
+  int64_t triggering_config_id_{};
+  int64_t triggering_subscription_id_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<5> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT TraceConfig_ProducerConfig : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kProducerNameFieldNumber = 1,
+    kShmSizeKbFieldNumber = 2,
+    kPageSizeKbFieldNumber = 3,
+  };
+
+  TraceConfig_ProducerConfig();
+  ~TraceConfig_ProducerConfig() override;
+  TraceConfig_ProducerConfig(TraceConfig_ProducerConfig&&) noexcept;
+  TraceConfig_ProducerConfig& operator=(TraceConfig_ProducerConfig&&);
+  TraceConfig_ProducerConfig(const TraceConfig_ProducerConfig&);
+  TraceConfig_ProducerConfig& operator=(const TraceConfig_ProducerConfig&);
+  bool operator==(const TraceConfig_ProducerConfig&) const;
+  bool operator!=(const TraceConfig_ProducerConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_producer_name() const { return _has_field_[1]; }
+  const std::string& producer_name() const { return producer_name_; }
+  void set_producer_name(const std::string& value) { producer_name_ = value; _has_field_.set(1); }
+
+  bool has_shm_size_kb() const { return _has_field_[2]; }
+  uint32_t shm_size_kb() const { return shm_size_kb_; }
+  void set_shm_size_kb(uint32_t value) { shm_size_kb_ = value; _has_field_.set(2); }
+
+  bool has_page_size_kb() const { return _has_field_[3]; }
+  uint32_t page_size_kb() const { return page_size_kb_; }
+  void set_page_size_kb(uint32_t value) { page_size_kb_ = value; _has_field_.set(3); }
+
+ private:
+  std::string producer_name_{};
+  uint32_t shm_size_kb_{};
+  uint32_t page_size_kb_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<4> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT TraceConfig_BuiltinDataSource : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kDisableClockSnapshottingFieldNumber = 1,
+    kDisableTraceConfigFieldNumber = 2,
+    kDisableSystemInfoFieldNumber = 3,
+    kDisableServiceEventsFieldNumber = 4,
+    kPrimaryTraceClockFieldNumber = 5,
+    kSnapshotIntervalMsFieldNumber = 6,
+    kPreferSuspendClockForSnapshotFieldNumber = 7,
+  };
+
+  TraceConfig_BuiltinDataSource();
+  ~TraceConfig_BuiltinDataSource() override;
+  TraceConfig_BuiltinDataSource(TraceConfig_BuiltinDataSource&&) noexcept;
+  TraceConfig_BuiltinDataSource& operator=(TraceConfig_BuiltinDataSource&&);
+  TraceConfig_BuiltinDataSource(const TraceConfig_BuiltinDataSource&);
+  TraceConfig_BuiltinDataSource& operator=(const TraceConfig_BuiltinDataSource&);
+  bool operator==(const TraceConfig_BuiltinDataSource&) const;
+  bool operator!=(const TraceConfig_BuiltinDataSource& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_disable_clock_snapshotting() const { return _has_field_[1]; }
+  bool disable_clock_snapshotting() const { return disable_clock_snapshotting_; }
+  void set_disable_clock_snapshotting(bool value) { disable_clock_snapshotting_ = value; _has_field_.set(1); }
+
+  bool has_disable_trace_config() const { return _has_field_[2]; }
+  bool disable_trace_config() const { return disable_trace_config_; }
+  void set_disable_trace_config(bool value) { disable_trace_config_ = value; _has_field_.set(2); }
+
+  bool has_disable_system_info() const { return _has_field_[3]; }
+  bool disable_system_info() const { return disable_system_info_; }
+  void set_disable_system_info(bool value) { disable_system_info_ = value; _has_field_.set(3); }
+
+  bool has_disable_service_events() const { return _has_field_[4]; }
+  bool disable_service_events() const { return disable_service_events_; }
+  void set_disable_service_events(bool value) { disable_service_events_ = value; _has_field_.set(4); }
+
+  bool has_primary_trace_clock() const { return _has_field_[5]; }
+  BuiltinClock primary_trace_clock() const { return primary_trace_clock_; }
+  void set_primary_trace_clock(BuiltinClock value) { primary_trace_clock_ = value; _has_field_.set(5); }
+
+  bool has_snapshot_interval_ms() const { return _has_field_[6]; }
+  uint32_t snapshot_interval_ms() const { return snapshot_interval_ms_; }
+  void set_snapshot_interval_ms(uint32_t value) { snapshot_interval_ms_ = value; _has_field_.set(6); }
+
+  bool has_prefer_suspend_clock_for_snapshot() const { return _has_field_[7]; }
+  bool prefer_suspend_clock_for_snapshot() const { return prefer_suspend_clock_for_snapshot_; }
+  void set_prefer_suspend_clock_for_snapshot(bool value) { prefer_suspend_clock_for_snapshot_ = value; _has_field_.set(7); }
+
+ private:
+  bool disable_clock_snapshotting_{};
+  bool disable_trace_config_{};
+  bool disable_system_info_{};
+  bool disable_service_events_{};
+  BuiltinClock primary_trace_clock_{};
+  uint32_t snapshot_interval_ms_{};
+  bool prefer_suspend_clock_for_snapshot_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<8> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT TraceConfig_DataSource : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kConfigFieldNumber = 1,
+    kProducerNameFilterFieldNumber = 2,
+    kProducerNameRegexFilterFieldNumber = 3,
+  };
+
+  TraceConfig_DataSource();
+  ~TraceConfig_DataSource() override;
+  TraceConfig_DataSource(TraceConfig_DataSource&&) noexcept;
+  TraceConfig_DataSource& operator=(TraceConfig_DataSource&&);
+  TraceConfig_DataSource(const TraceConfig_DataSource&);
+  TraceConfig_DataSource& operator=(const TraceConfig_DataSource&);
+  bool operator==(const TraceConfig_DataSource&) const;
+  bool operator!=(const TraceConfig_DataSource& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_config() const { return _has_field_[1]; }
+  const DataSourceConfig& config() const { return *config_; }
+  DataSourceConfig* mutable_config() { _has_field_.set(1); return config_.get(); }
+
+  const std::vector<std::string>& producer_name_filter() const { return producer_name_filter_; }
+  std::vector<std::string>* mutable_producer_name_filter() { return &producer_name_filter_; }
+  int producer_name_filter_size() const { return static_cast<int>(producer_name_filter_.size()); }
+  void clear_producer_name_filter() { producer_name_filter_.clear(); }
+  void add_producer_name_filter(std::string value) { producer_name_filter_.emplace_back(value); }
+  std::string* add_producer_name_filter() { producer_name_filter_.emplace_back(); return &producer_name_filter_.back(); }
+
+  const std::vector<std::string>& producer_name_regex_filter() const { return producer_name_regex_filter_; }
+  std::vector<std::string>* mutable_producer_name_regex_filter() { return &producer_name_regex_filter_; }
+  int producer_name_regex_filter_size() const { return static_cast<int>(producer_name_regex_filter_.size()); }
+  void clear_producer_name_regex_filter() { producer_name_regex_filter_.clear(); }
+  void add_producer_name_regex_filter(std::string value) { producer_name_regex_filter_.emplace_back(value); }
+  std::string* add_producer_name_regex_filter() { producer_name_regex_filter_.emplace_back(); return &producer_name_regex_filter_.back(); }
+
+ private:
+  ::protozero::CopyablePtr<DataSourceConfig> config_;
+  std::vector<std::string> producer_name_filter_;
+  std::vector<std::string> producer_name_regex_filter_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<4> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT TraceConfig_BufferConfig : public ::protozero::CppMessageObj {
+ public:
+  using FillPolicy = TraceConfig_BufferConfig_FillPolicy;
+  static constexpr auto UNSPECIFIED = TraceConfig_BufferConfig_FillPolicy_UNSPECIFIED;
+  static constexpr auto RING_BUFFER = TraceConfig_BufferConfig_FillPolicy_RING_BUFFER;
+  static constexpr auto DISCARD = TraceConfig_BufferConfig_FillPolicy_DISCARD;
+  static constexpr auto FillPolicy_MIN = TraceConfig_BufferConfig_FillPolicy_UNSPECIFIED;
+  static constexpr auto FillPolicy_MAX = TraceConfig_BufferConfig_FillPolicy_DISCARD;
+  enum FieldNumbers {
+    kSizeKbFieldNumber = 1,
+    kFillPolicyFieldNumber = 4,
+  };
+
+  TraceConfig_BufferConfig();
+  ~TraceConfig_BufferConfig() override;
+  TraceConfig_BufferConfig(TraceConfig_BufferConfig&&) noexcept;
+  TraceConfig_BufferConfig& operator=(TraceConfig_BufferConfig&&);
+  TraceConfig_BufferConfig(const TraceConfig_BufferConfig&);
+  TraceConfig_BufferConfig& operator=(const TraceConfig_BufferConfig&);
+  bool operator==(const TraceConfig_BufferConfig&) const;
+  bool operator!=(const TraceConfig_BufferConfig& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_size_kb() const { return _has_field_[1]; }
+  uint32_t size_kb() const { return size_kb_; }
+  void set_size_kb(uint32_t value) { size_kb_ = value; _has_field_.set(1); }
+
+  bool has_fill_policy() const { return _has_field_[4]; }
+  TraceConfig_BufferConfig_FillPolicy fill_policy() const { return fill_policy_; }
+  void set_fill_policy(TraceConfig_BufferConfig_FillPolicy value) { fill_policy_ = value; _has_field_.set(4); }
+
+ private:
+  uint32_t size_kb_{};
+  TraceConfig_BufferConfig_FillPolicy fill_policy_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<5> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_CONFIG_TRACE_CONFIG_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/ipc/consumer_port.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_IPC_CONSUMER_PORT_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_IPC_CONSUMER_PORT_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class SaveTraceForBugreportResponse;
+class SaveTraceForBugreportRequest;
+class QueryCapabilitiesResponse;
+class TracingServiceCapabilities;
+class QueryCapabilitiesRequest;
+class QueryServiceStateResponse;
+class TracingServiceState;
+class TracingServiceState_DataSource;
+class DataSourceDescriptor;
+class TracingServiceState_Producer;
+class QueryServiceStateRequest;
+class ObserveEventsResponse;
+class ObservableEvents;
+class ObservableEvents_DataSourceInstanceStateChange;
+class ObserveEventsRequest;
+class GetTraceStatsResponse;
+class TraceStats;
+class TraceStats_FilterStats;
+class TraceStats_BufferStats;
+class GetTraceStatsRequest;
+class AttachResponse;
+class TraceConfig;
+class TraceConfig_TraceFilter;
+class TraceConfig_IncidentReportConfig;
+class TraceConfig_IncrementalStateConfig;
+class TraceConfig_TriggerConfig;
+class TraceConfig_TriggerConfig_Trigger;
+class TraceConfig_GuardrailOverrides;
+class TraceConfig_StatsdMetadata;
+class TraceConfig_ProducerConfig;
+class TraceConfig_BuiltinDataSource;
+class TraceConfig_DataSource;
+class DataSourceConfig;
+class TestConfig;
+class TestConfig_DummyFields;
+class InterceptorConfig;
+class ChromeConfig;
+class TraceConfig_BufferConfig;
+class AttachRequest;
+class DetachResponse;
+class DetachRequest;
+class FlushResponse;
+class FlushRequest;
+class FreeBuffersResponse;
+class FreeBuffersRequest;
+class ReadBuffersResponse;
+class ReadBuffersResponse_Slice;
+class ReadBuffersRequest;
+class DisableTracingResponse;
+class DisableTracingRequest;
+class ChangeTraceConfigResponse;
+class ChangeTraceConfigRequest;
+class StartTracingResponse;
+class StartTracingRequest;
+class EnableTracingResponse;
+class EnableTracingRequest;
+enum ObservableEvents_Type : int;
+enum ObservableEvents_DataSourceInstanceState : int;
+enum TraceConfig_LockdownModeOperation : int;
+enum TraceConfig_CompressionType : int;
+enum TraceConfig_StatsdLogging : int;
+enum TraceConfig_TriggerConfig_TriggerMode : int;
+enum BuiltinClock : int;
+enum DataSourceConfig_SessionInitiator : int;
+enum ChromeConfig_ClientPriority : int;
+enum TraceConfig_BufferConfig_FillPolicy : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT SaveTraceForBugreportResponse : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kSuccessFieldNumber = 1,
+    kMsgFieldNumber = 2,
+  };
+
+  SaveTraceForBugreportResponse();
+  ~SaveTraceForBugreportResponse() override;
+  SaveTraceForBugreportResponse(SaveTraceForBugreportResponse&&) noexcept;
+  SaveTraceForBugreportResponse& operator=(SaveTraceForBugreportResponse&&);
+  SaveTraceForBugreportResponse(const SaveTraceForBugreportResponse&);
+  SaveTraceForBugreportResponse& operator=(const SaveTraceForBugreportResponse&);
+  bool operator==(const SaveTraceForBugreportResponse&) const;
+  bool operator!=(const SaveTraceForBugreportResponse& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_success() const { return _has_field_[1]; }
+  bool success() const { return success_; }
+  void set_success(bool value) { success_ = value; _has_field_.set(1); }
+
+  bool has_msg() const { return _has_field_[2]; }
+  const std::string& msg() const { return msg_; }
+  void set_msg(const std::string& value) { msg_ = value; _has_field_.set(2); }
+
+ private:
+  bool success_{};
+  std::string msg_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT SaveTraceForBugreportRequest : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+  };
+
+  SaveTraceForBugreportRequest();
+  ~SaveTraceForBugreportRequest() override;
+  SaveTraceForBugreportRequest(SaveTraceForBugreportRequest&&) noexcept;
+  SaveTraceForBugreportRequest& operator=(SaveTraceForBugreportRequest&&);
+  SaveTraceForBugreportRequest(const SaveTraceForBugreportRequest&);
+  SaveTraceForBugreportRequest& operator=(const SaveTraceForBugreportRequest&);
+  bool operator==(const SaveTraceForBugreportRequest&) const;
+  bool operator!=(const SaveTraceForBugreportRequest& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+ private:
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT QueryCapabilitiesResponse : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kCapabilitiesFieldNumber = 1,
+  };
+
+  QueryCapabilitiesResponse();
+  ~QueryCapabilitiesResponse() override;
+  QueryCapabilitiesResponse(QueryCapabilitiesResponse&&) noexcept;
+  QueryCapabilitiesResponse& operator=(QueryCapabilitiesResponse&&);
+  QueryCapabilitiesResponse(const QueryCapabilitiesResponse&);
+  QueryCapabilitiesResponse& operator=(const QueryCapabilitiesResponse&);
+  bool operator==(const QueryCapabilitiesResponse&) const;
+  bool operator!=(const QueryCapabilitiesResponse& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_capabilities() const { return _has_field_[1]; }
+  const TracingServiceCapabilities& capabilities() const { return *capabilities_; }
+  TracingServiceCapabilities* mutable_capabilities() { _has_field_.set(1); return capabilities_.get(); }
+
+ private:
+  ::protozero::CopyablePtr<TracingServiceCapabilities> capabilities_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT QueryCapabilitiesRequest : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+  };
+
+  QueryCapabilitiesRequest();
+  ~QueryCapabilitiesRequest() override;
+  QueryCapabilitiesRequest(QueryCapabilitiesRequest&&) noexcept;
+  QueryCapabilitiesRequest& operator=(QueryCapabilitiesRequest&&);
+  QueryCapabilitiesRequest(const QueryCapabilitiesRequest&);
+  QueryCapabilitiesRequest& operator=(const QueryCapabilitiesRequest&);
+  bool operator==(const QueryCapabilitiesRequest&) const;
+  bool operator!=(const QueryCapabilitiesRequest& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+ private:
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT QueryServiceStateResponse : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kServiceStateFieldNumber = 1,
+  };
+
+  QueryServiceStateResponse();
+  ~QueryServiceStateResponse() override;
+  QueryServiceStateResponse(QueryServiceStateResponse&&) noexcept;
+  QueryServiceStateResponse& operator=(QueryServiceStateResponse&&);
+  QueryServiceStateResponse(const QueryServiceStateResponse&);
+  QueryServiceStateResponse& operator=(const QueryServiceStateResponse&);
+  bool operator==(const QueryServiceStateResponse&) const;
+  bool operator!=(const QueryServiceStateResponse& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_service_state() const { return _has_field_[1]; }
+  const TracingServiceState& service_state() const { return *service_state_; }
+  TracingServiceState* mutable_service_state() { _has_field_.set(1); return service_state_.get(); }
+
+ private:
+  ::protozero::CopyablePtr<TracingServiceState> service_state_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT QueryServiceStateRequest : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+  };
+
+  QueryServiceStateRequest();
+  ~QueryServiceStateRequest() override;
+  QueryServiceStateRequest(QueryServiceStateRequest&&) noexcept;
+  QueryServiceStateRequest& operator=(QueryServiceStateRequest&&);
+  QueryServiceStateRequest(const QueryServiceStateRequest&);
+  QueryServiceStateRequest& operator=(const QueryServiceStateRequest&);
+  bool operator==(const QueryServiceStateRequest&) const;
+  bool operator!=(const QueryServiceStateRequest& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+ private:
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT ObserveEventsResponse : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kEventsFieldNumber = 1,
+  };
+
+  ObserveEventsResponse();
+  ~ObserveEventsResponse() override;
+  ObserveEventsResponse(ObserveEventsResponse&&) noexcept;
+  ObserveEventsResponse& operator=(ObserveEventsResponse&&);
+  ObserveEventsResponse(const ObserveEventsResponse&);
+  ObserveEventsResponse& operator=(const ObserveEventsResponse&);
+  bool operator==(const ObserveEventsResponse&) const;
+  bool operator!=(const ObserveEventsResponse& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_events() const { return _has_field_[1]; }
+  const ObservableEvents& events() const { return *events_; }
+  ObservableEvents* mutable_events() { _has_field_.set(1); return events_.get(); }
+
+ private:
+  ::protozero::CopyablePtr<ObservableEvents> events_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT ObserveEventsRequest : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kEventsToObserveFieldNumber = 1,
+  };
+
+  ObserveEventsRequest();
+  ~ObserveEventsRequest() override;
+  ObserveEventsRequest(ObserveEventsRequest&&) noexcept;
+  ObserveEventsRequest& operator=(ObserveEventsRequest&&);
+  ObserveEventsRequest(const ObserveEventsRequest&);
+  ObserveEventsRequest& operator=(const ObserveEventsRequest&);
+  bool operator==(const ObserveEventsRequest&) const;
+  bool operator!=(const ObserveEventsRequest& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  const std::vector<ObservableEvents_Type>& events_to_observe() const { return events_to_observe_; }
+  std::vector<ObservableEvents_Type>* mutable_events_to_observe() { return &events_to_observe_; }
+  int events_to_observe_size() const { return static_cast<int>(events_to_observe_.size()); }
+  void clear_events_to_observe() { events_to_observe_.clear(); }
+  void add_events_to_observe(ObservableEvents_Type value) { events_to_observe_.emplace_back(value); }
+  ObservableEvents_Type* add_events_to_observe() { events_to_observe_.emplace_back(); return &events_to_observe_.back(); }
+
+ private:
+  std::vector<ObservableEvents_Type> events_to_observe_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT GetTraceStatsResponse : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kTraceStatsFieldNumber = 1,
+  };
+
+  GetTraceStatsResponse();
+  ~GetTraceStatsResponse() override;
+  GetTraceStatsResponse(GetTraceStatsResponse&&) noexcept;
+  GetTraceStatsResponse& operator=(GetTraceStatsResponse&&);
+  GetTraceStatsResponse(const GetTraceStatsResponse&);
+  GetTraceStatsResponse& operator=(const GetTraceStatsResponse&);
+  bool operator==(const GetTraceStatsResponse&) const;
+  bool operator!=(const GetTraceStatsResponse& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_trace_stats() const { return _has_field_[1]; }
+  const TraceStats& trace_stats() const { return *trace_stats_; }
+  TraceStats* mutable_trace_stats() { _has_field_.set(1); return trace_stats_.get(); }
+
+ private:
+  ::protozero::CopyablePtr<TraceStats> trace_stats_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT GetTraceStatsRequest : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+  };
+
+  GetTraceStatsRequest();
+  ~GetTraceStatsRequest() override;
+  GetTraceStatsRequest(GetTraceStatsRequest&&) noexcept;
+  GetTraceStatsRequest& operator=(GetTraceStatsRequest&&);
+  GetTraceStatsRequest(const GetTraceStatsRequest&);
+  GetTraceStatsRequest& operator=(const GetTraceStatsRequest&);
+  bool operator==(const GetTraceStatsRequest&) const;
+  bool operator!=(const GetTraceStatsRequest& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+ private:
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT AttachResponse : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kTraceConfigFieldNumber = 1,
+  };
+
+  AttachResponse();
+  ~AttachResponse() override;
+  AttachResponse(AttachResponse&&) noexcept;
+  AttachResponse& operator=(AttachResponse&&);
+  AttachResponse(const AttachResponse&);
+  AttachResponse& operator=(const AttachResponse&);
+  bool operator==(const AttachResponse&) const;
+  bool operator!=(const AttachResponse& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_trace_config() const { return _has_field_[1]; }
+  const TraceConfig& trace_config() const { return *trace_config_; }
+  TraceConfig* mutable_trace_config() { _has_field_.set(1); return trace_config_.get(); }
+
+ private:
+  ::protozero::CopyablePtr<TraceConfig> trace_config_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT AttachRequest : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kKeyFieldNumber = 1,
+  };
+
+  AttachRequest();
+  ~AttachRequest() override;
+  AttachRequest(AttachRequest&&) noexcept;
+  AttachRequest& operator=(AttachRequest&&);
+  AttachRequest(const AttachRequest&);
+  AttachRequest& operator=(const AttachRequest&);
+  bool operator==(const AttachRequest&) const;
+  bool operator!=(const AttachRequest& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_key() const { return _has_field_[1]; }
+  const std::string& key() const { return key_; }
+  void set_key(const std::string& value) { key_ = value; _has_field_.set(1); }
+
+ private:
+  std::string key_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT DetachResponse : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+  };
+
+  DetachResponse();
+  ~DetachResponse() override;
+  DetachResponse(DetachResponse&&) noexcept;
+  DetachResponse& operator=(DetachResponse&&);
+  DetachResponse(const DetachResponse&);
+  DetachResponse& operator=(const DetachResponse&);
+  bool operator==(const DetachResponse&) const;
+  bool operator!=(const DetachResponse& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+ private:
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT DetachRequest : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kKeyFieldNumber = 1,
+  };
+
+  DetachRequest();
+  ~DetachRequest() override;
+  DetachRequest(DetachRequest&&) noexcept;
+  DetachRequest& operator=(DetachRequest&&);
+  DetachRequest(const DetachRequest&);
+  DetachRequest& operator=(const DetachRequest&);
+  bool operator==(const DetachRequest&) const;
+  bool operator!=(const DetachRequest& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_key() const { return _has_field_[1]; }
+  const std::string& key() const { return key_; }
+  void set_key(const std::string& value) { key_ = value; _has_field_.set(1); }
+
+ private:
+  std::string key_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT FlushResponse : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+  };
+
+  FlushResponse();
+  ~FlushResponse() override;
+  FlushResponse(FlushResponse&&) noexcept;
+  FlushResponse& operator=(FlushResponse&&);
+  FlushResponse(const FlushResponse&);
+  FlushResponse& operator=(const FlushResponse&);
+  bool operator==(const FlushResponse&) const;
+  bool operator!=(const FlushResponse& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+ private:
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT FlushRequest : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kTimeoutMsFieldNumber = 1,
+  };
+
+  FlushRequest();
+  ~FlushRequest() override;
+  FlushRequest(FlushRequest&&) noexcept;
+  FlushRequest& operator=(FlushRequest&&);
+  FlushRequest(const FlushRequest&);
+  FlushRequest& operator=(const FlushRequest&);
+  bool operator==(const FlushRequest&) const;
+  bool operator!=(const FlushRequest& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_timeout_ms() const { return _has_field_[1]; }
+  uint32_t timeout_ms() const { return timeout_ms_; }
+  void set_timeout_ms(uint32_t value) { timeout_ms_ = value; _has_field_.set(1); }
+
+ private:
+  uint32_t timeout_ms_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT FreeBuffersResponse : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+  };
+
+  FreeBuffersResponse();
+  ~FreeBuffersResponse() override;
+  FreeBuffersResponse(FreeBuffersResponse&&) noexcept;
+  FreeBuffersResponse& operator=(FreeBuffersResponse&&);
+  FreeBuffersResponse(const FreeBuffersResponse&);
+  FreeBuffersResponse& operator=(const FreeBuffersResponse&);
+  bool operator==(const FreeBuffersResponse&) const;
+  bool operator!=(const FreeBuffersResponse& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+ private:
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT FreeBuffersRequest : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kBufferIdsFieldNumber = 1,
+  };
+
+  FreeBuffersRequest();
+  ~FreeBuffersRequest() override;
+  FreeBuffersRequest(FreeBuffersRequest&&) noexcept;
+  FreeBuffersRequest& operator=(FreeBuffersRequest&&);
+  FreeBuffersRequest(const FreeBuffersRequest&);
+  FreeBuffersRequest& operator=(const FreeBuffersRequest&);
+  bool operator==(const FreeBuffersRequest&) const;
+  bool operator!=(const FreeBuffersRequest& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  const std::vector<uint32_t>& buffer_ids() const { return buffer_ids_; }
+  std::vector<uint32_t>* mutable_buffer_ids() { return &buffer_ids_; }
+  int buffer_ids_size() const { return static_cast<int>(buffer_ids_.size()); }
+  void clear_buffer_ids() { buffer_ids_.clear(); }
+  void add_buffer_ids(uint32_t value) { buffer_ids_.emplace_back(value); }
+  uint32_t* add_buffer_ids() { buffer_ids_.emplace_back(); return &buffer_ids_.back(); }
+
+ private:
+  std::vector<uint32_t> buffer_ids_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT ReadBuffersResponse : public ::protozero::CppMessageObj {
+ public:
+  using Slice = ReadBuffersResponse_Slice;
+  enum FieldNumbers {
+    kSlicesFieldNumber = 2,
+  };
+
+  ReadBuffersResponse();
+  ~ReadBuffersResponse() override;
+  ReadBuffersResponse(ReadBuffersResponse&&) noexcept;
+  ReadBuffersResponse& operator=(ReadBuffersResponse&&);
+  ReadBuffersResponse(const ReadBuffersResponse&);
+  ReadBuffersResponse& operator=(const ReadBuffersResponse&);
+  bool operator==(const ReadBuffersResponse&) const;
+  bool operator!=(const ReadBuffersResponse& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  const std::vector<ReadBuffersResponse_Slice>& slices() const { return slices_; }
+  std::vector<ReadBuffersResponse_Slice>* mutable_slices() { return &slices_; }
+  int slices_size() const;
+  void clear_slices();
+  ReadBuffersResponse_Slice* add_slices();
+
+ private:
+  std::vector<ReadBuffersResponse_Slice> slices_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT ReadBuffersResponse_Slice : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kDataFieldNumber = 1,
+    kLastSliceForPacketFieldNumber = 2,
+  };
+
+  ReadBuffersResponse_Slice();
+  ~ReadBuffersResponse_Slice() override;
+  ReadBuffersResponse_Slice(ReadBuffersResponse_Slice&&) noexcept;
+  ReadBuffersResponse_Slice& operator=(ReadBuffersResponse_Slice&&);
+  ReadBuffersResponse_Slice(const ReadBuffersResponse_Slice&);
+  ReadBuffersResponse_Slice& operator=(const ReadBuffersResponse_Slice&);
+  bool operator==(const ReadBuffersResponse_Slice&) const;
+  bool operator!=(const ReadBuffersResponse_Slice& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_data() const { return _has_field_[1]; }
+  const std::string& data() const { return data_; }
+  void set_data(const std::string& value) { data_ = value; _has_field_.set(1); }
+  void set_data(const void* p, size_t s) { data_.assign(reinterpret_cast<const char*>(p), s); _has_field_.set(1); }
+
+  bool has_last_slice_for_packet() const { return _has_field_[2]; }
+  bool last_slice_for_packet() const { return last_slice_for_packet_; }
+  void set_last_slice_for_packet(bool value) { last_slice_for_packet_ = value; _has_field_.set(2); }
+
+ private:
+  std::string data_{};
+  bool last_slice_for_packet_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT ReadBuffersRequest : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+  };
+
+  ReadBuffersRequest();
+  ~ReadBuffersRequest() override;
+  ReadBuffersRequest(ReadBuffersRequest&&) noexcept;
+  ReadBuffersRequest& operator=(ReadBuffersRequest&&);
+  ReadBuffersRequest(const ReadBuffersRequest&);
+  ReadBuffersRequest& operator=(const ReadBuffersRequest&);
+  bool operator==(const ReadBuffersRequest&) const;
+  bool operator!=(const ReadBuffersRequest& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+ private:
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT DisableTracingResponse : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+  };
+
+  DisableTracingResponse();
+  ~DisableTracingResponse() override;
+  DisableTracingResponse(DisableTracingResponse&&) noexcept;
+  DisableTracingResponse& operator=(DisableTracingResponse&&);
+  DisableTracingResponse(const DisableTracingResponse&);
+  DisableTracingResponse& operator=(const DisableTracingResponse&);
+  bool operator==(const DisableTracingResponse&) const;
+  bool operator!=(const DisableTracingResponse& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+ private:
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT DisableTracingRequest : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+  };
+
+  DisableTracingRequest();
+  ~DisableTracingRequest() override;
+  DisableTracingRequest(DisableTracingRequest&&) noexcept;
+  DisableTracingRequest& operator=(DisableTracingRequest&&);
+  DisableTracingRequest(const DisableTracingRequest&);
+  DisableTracingRequest& operator=(const DisableTracingRequest&);
+  bool operator==(const DisableTracingRequest&) const;
+  bool operator!=(const DisableTracingRequest& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+ private:
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT ChangeTraceConfigResponse : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+  };
+
+  ChangeTraceConfigResponse();
+  ~ChangeTraceConfigResponse() override;
+  ChangeTraceConfigResponse(ChangeTraceConfigResponse&&) noexcept;
+  ChangeTraceConfigResponse& operator=(ChangeTraceConfigResponse&&);
+  ChangeTraceConfigResponse(const ChangeTraceConfigResponse&);
+  ChangeTraceConfigResponse& operator=(const ChangeTraceConfigResponse&);
+  bool operator==(const ChangeTraceConfigResponse&) const;
+  bool operator!=(const ChangeTraceConfigResponse& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+ private:
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT ChangeTraceConfigRequest : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kTraceConfigFieldNumber = 1,
+  };
+
+  ChangeTraceConfigRequest();
+  ~ChangeTraceConfigRequest() override;
+  ChangeTraceConfigRequest(ChangeTraceConfigRequest&&) noexcept;
+  ChangeTraceConfigRequest& operator=(ChangeTraceConfigRequest&&);
+  ChangeTraceConfigRequest(const ChangeTraceConfigRequest&);
+  ChangeTraceConfigRequest& operator=(const ChangeTraceConfigRequest&);
+  bool operator==(const ChangeTraceConfigRequest&) const;
+  bool operator!=(const ChangeTraceConfigRequest& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_trace_config() const { return _has_field_[1]; }
+  const TraceConfig& trace_config() const { return *trace_config_; }
+  TraceConfig* mutable_trace_config() { _has_field_.set(1); return trace_config_.get(); }
+
+ private:
+  ::protozero::CopyablePtr<TraceConfig> trace_config_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT StartTracingResponse : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+  };
+
+  StartTracingResponse();
+  ~StartTracingResponse() override;
+  StartTracingResponse(StartTracingResponse&&) noexcept;
+  StartTracingResponse& operator=(StartTracingResponse&&);
+  StartTracingResponse(const StartTracingResponse&);
+  StartTracingResponse& operator=(const StartTracingResponse&);
+  bool operator==(const StartTracingResponse&) const;
+  bool operator!=(const StartTracingResponse& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+ private:
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT StartTracingRequest : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+  };
+
+  StartTracingRequest();
+  ~StartTracingRequest() override;
+  StartTracingRequest(StartTracingRequest&&) noexcept;
+  StartTracingRequest& operator=(StartTracingRequest&&);
+  StartTracingRequest(const StartTracingRequest&);
+  StartTracingRequest& operator=(const StartTracingRequest&);
+  bool operator==(const StartTracingRequest&) const;
+  bool operator!=(const StartTracingRequest& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+ private:
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT EnableTracingResponse : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kDisabledFieldNumber = 1,
+    kErrorFieldNumber = 3,
+  };
+
+  EnableTracingResponse();
+  ~EnableTracingResponse() override;
+  EnableTracingResponse(EnableTracingResponse&&) noexcept;
+  EnableTracingResponse& operator=(EnableTracingResponse&&);
+  EnableTracingResponse(const EnableTracingResponse&);
+  EnableTracingResponse& operator=(const EnableTracingResponse&);
+  bool operator==(const EnableTracingResponse&) const;
+  bool operator!=(const EnableTracingResponse& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_disabled() const { return _has_field_[1]; }
+  bool disabled() const { return disabled_; }
+  void set_disabled(bool value) { disabled_ = value; _has_field_.set(1); }
+
+  bool has_error() const { return _has_field_[3]; }
+  const std::string& error() const { return error_; }
+  void set_error(const std::string& value) { error_ = value; _has_field_.set(3); }
+
+ private:
+  bool disabled_{};
+  std::string error_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<4> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT EnableTracingRequest : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kTraceConfigFieldNumber = 1,
+    kAttachNotificationOnlyFieldNumber = 2,
+  };
+
+  EnableTracingRequest();
+  ~EnableTracingRequest() override;
+  EnableTracingRequest(EnableTracingRequest&&) noexcept;
+  EnableTracingRequest& operator=(EnableTracingRequest&&);
+  EnableTracingRequest(const EnableTracingRequest&);
+  EnableTracingRequest& operator=(const EnableTracingRequest&);
+  bool operator==(const EnableTracingRequest&) const;
+  bool operator!=(const EnableTracingRequest& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_trace_config() const { return _has_field_[1]; }
+  const TraceConfig& trace_config() const { return *trace_config_; }
+  TraceConfig* mutable_trace_config() { _has_field_.set(1); return trace_config_.get(); }
+
+  bool has_attach_notification_only() const { return _has_field_[2]; }
+  bool attach_notification_only() const { return attach_notification_only_; }
+  void set_attach_notification_only(bool value) { attach_notification_only_ = value; _has_field_.set(2); }
+
+ private:
+  ::protozero::CopyablePtr<TraceConfig> trace_config_;
+  bool attach_notification_only_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_IPC_CONSUMER_PORT_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/ipc/producer_port.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_IPC_PRODUCER_PORT_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_IPC_PRODUCER_PORT_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class SyncResponse;
+class SyncRequest;
+class GetAsyncCommandResponse;
+class GetAsyncCommandResponse_ClearIncrementalState;
+class GetAsyncCommandResponse_Flush;
+class GetAsyncCommandResponse_StopDataSource;
+class GetAsyncCommandResponse_StartDataSource;
+class DataSourceConfig;
+class TestConfig;
+class TestConfig_DummyFields;
+class InterceptorConfig;
+class ChromeConfig;
+class GetAsyncCommandResponse_SetupDataSource;
+class GetAsyncCommandResponse_SetupTracing;
+class GetAsyncCommandRequest;
+class ActivateTriggersResponse;
+class ActivateTriggersRequest;
+class NotifyDataSourceStoppedResponse;
+class NotifyDataSourceStoppedRequest;
+class NotifyDataSourceStartedResponse;
+class NotifyDataSourceStartedRequest;
+class CommitDataResponse;
+class UnregisterTraceWriterResponse;
+class UnregisterTraceWriterRequest;
+class RegisterTraceWriterResponse;
+class RegisterTraceWriterRequest;
+class UnregisterDataSourceResponse;
+class UnregisterDataSourceRequest;
+class RegisterDataSourceResponse;
+class RegisterDataSourceRequest;
+class DataSourceDescriptor;
+class InitializeConnectionResponse;
+class InitializeConnectionRequest;
+enum DataSourceConfig_SessionInitiator : int;
+enum ChromeConfig_ClientPriority : int;
+enum InitializeConnectionRequest_ProducerSMBScrapingMode : int;
+enum InitializeConnectionRequest_ProducerBuildFlags : int;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+enum InitializeConnectionRequest_ProducerSMBScrapingMode : int {
+  InitializeConnectionRequest_ProducerSMBScrapingMode_SMB_SCRAPING_UNSPECIFIED = 0,
+  InitializeConnectionRequest_ProducerSMBScrapingMode_SMB_SCRAPING_ENABLED = 1,
+  InitializeConnectionRequest_ProducerSMBScrapingMode_SMB_SCRAPING_DISABLED = 2,
+};
+enum InitializeConnectionRequest_ProducerBuildFlags : int {
+  InitializeConnectionRequest_ProducerBuildFlags_BUILD_FLAGS_UNSPECIFIED = 0,
+  InitializeConnectionRequest_ProducerBuildFlags_BUILD_FLAGS_DCHECKS_ON = 1,
+  InitializeConnectionRequest_ProducerBuildFlags_BUILD_FLAGS_DCHECKS_OFF = 2,
+};
+
+class PERFETTO_EXPORT SyncResponse : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+  };
+
+  SyncResponse();
+  ~SyncResponse() override;
+  SyncResponse(SyncResponse&&) noexcept;
+  SyncResponse& operator=(SyncResponse&&);
+  SyncResponse(const SyncResponse&);
+  SyncResponse& operator=(const SyncResponse&);
+  bool operator==(const SyncResponse&) const;
+  bool operator!=(const SyncResponse& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+ private:
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT SyncRequest : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+  };
+
+  SyncRequest();
+  ~SyncRequest() override;
+  SyncRequest(SyncRequest&&) noexcept;
+  SyncRequest& operator=(SyncRequest&&);
+  SyncRequest(const SyncRequest&);
+  SyncRequest& operator=(const SyncRequest&);
+  bool operator==(const SyncRequest&) const;
+  bool operator!=(const SyncRequest& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+ private:
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT GetAsyncCommandResponse : public ::protozero::CppMessageObj {
+ public:
+  using SetupDataSource = GetAsyncCommandResponse_SetupDataSource;
+  using StartDataSource = GetAsyncCommandResponse_StartDataSource;
+  using StopDataSource = GetAsyncCommandResponse_StopDataSource;
+  using SetupTracing = GetAsyncCommandResponse_SetupTracing;
+  using Flush = GetAsyncCommandResponse_Flush;
+  using ClearIncrementalState = GetAsyncCommandResponse_ClearIncrementalState;
+  enum FieldNumbers {
+    kSetupTracingFieldNumber = 3,
+    kSetupDataSourceFieldNumber = 6,
+    kStartDataSourceFieldNumber = 1,
+    kStopDataSourceFieldNumber = 2,
+    kFlushFieldNumber = 5,
+    kClearIncrementalStateFieldNumber = 7,
+  };
+
+  GetAsyncCommandResponse();
+  ~GetAsyncCommandResponse() override;
+  GetAsyncCommandResponse(GetAsyncCommandResponse&&) noexcept;
+  GetAsyncCommandResponse& operator=(GetAsyncCommandResponse&&);
+  GetAsyncCommandResponse(const GetAsyncCommandResponse&);
+  GetAsyncCommandResponse& operator=(const GetAsyncCommandResponse&);
+  bool operator==(const GetAsyncCommandResponse&) const;
+  bool operator!=(const GetAsyncCommandResponse& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_setup_tracing() const { return _has_field_[3]; }
+  const GetAsyncCommandResponse_SetupTracing& setup_tracing() const { return *setup_tracing_; }
+  GetAsyncCommandResponse_SetupTracing* mutable_setup_tracing() { _has_field_.set(3); return setup_tracing_.get(); }
+
+  bool has_setup_data_source() const { return _has_field_[6]; }
+  const GetAsyncCommandResponse_SetupDataSource& setup_data_source() const { return *setup_data_source_; }
+  GetAsyncCommandResponse_SetupDataSource* mutable_setup_data_source() { _has_field_.set(6); return setup_data_source_.get(); }
+
+  bool has_start_data_source() const { return _has_field_[1]; }
+  const GetAsyncCommandResponse_StartDataSource& start_data_source() const { return *start_data_source_; }
+  GetAsyncCommandResponse_StartDataSource* mutable_start_data_source() { _has_field_.set(1); return start_data_source_.get(); }
+
+  bool has_stop_data_source() const { return _has_field_[2]; }
+  const GetAsyncCommandResponse_StopDataSource& stop_data_source() const { return *stop_data_source_; }
+  GetAsyncCommandResponse_StopDataSource* mutable_stop_data_source() { _has_field_.set(2); return stop_data_source_.get(); }
+
+  bool has_flush() const { return _has_field_[5]; }
+  const GetAsyncCommandResponse_Flush& flush() const { return *flush_; }
+  GetAsyncCommandResponse_Flush* mutable_flush() { _has_field_.set(5); return flush_.get(); }
+
+  bool has_clear_incremental_state() const { return _has_field_[7]; }
+  const GetAsyncCommandResponse_ClearIncrementalState& clear_incremental_state() const { return *clear_incremental_state_; }
+  GetAsyncCommandResponse_ClearIncrementalState* mutable_clear_incremental_state() { _has_field_.set(7); return clear_incremental_state_.get(); }
+
+ private:
+  ::protozero::CopyablePtr<GetAsyncCommandResponse_SetupTracing> setup_tracing_;
+  ::protozero::CopyablePtr<GetAsyncCommandResponse_SetupDataSource> setup_data_source_;
+  ::protozero::CopyablePtr<GetAsyncCommandResponse_StartDataSource> start_data_source_;
+  ::protozero::CopyablePtr<GetAsyncCommandResponse_StopDataSource> stop_data_source_;
+  ::protozero::CopyablePtr<GetAsyncCommandResponse_Flush> flush_;
+  ::protozero::CopyablePtr<GetAsyncCommandResponse_ClearIncrementalState> clear_incremental_state_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<8> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT GetAsyncCommandResponse_ClearIncrementalState : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kDataSourceIdsFieldNumber = 1,
+  };
+
+  GetAsyncCommandResponse_ClearIncrementalState();
+  ~GetAsyncCommandResponse_ClearIncrementalState() override;
+  GetAsyncCommandResponse_ClearIncrementalState(GetAsyncCommandResponse_ClearIncrementalState&&) noexcept;
+  GetAsyncCommandResponse_ClearIncrementalState& operator=(GetAsyncCommandResponse_ClearIncrementalState&&);
+  GetAsyncCommandResponse_ClearIncrementalState(const GetAsyncCommandResponse_ClearIncrementalState&);
+  GetAsyncCommandResponse_ClearIncrementalState& operator=(const GetAsyncCommandResponse_ClearIncrementalState&);
+  bool operator==(const GetAsyncCommandResponse_ClearIncrementalState&) const;
+  bool operator!=(const GetAsyncCommandResponse_ClearIncrementalState& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  const std::vector<uint64_t>& data_source_ids() const { return data_source_ids_; }
+  std::vector<uint64_t>* mutable_data_source_ids() { return &data_source_ids_; }
+  int data_source_ids_size() const { return static_cast<int>(data_source_ids_.size()); }
+  void clear_data_source_ids() { data_source_ids_.clear(); }
+  void add_data_source_ids(uint64_t value) { data_source_ids_.emplace_back(value); }
+  uint64_t* add_data_source_ids() { data_source_ids_.emplace_back(); return &data_source_ids_.back(); }
+
+ private:
+  std::vector<uint64_t> data_source_ids_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT GetAsyncCommandResponse_Flush : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kDataSourceIdsFieldNumber = 1,
+    kRequestIdFieldNumber = 2,
+  };
+
+  GetAsyncCommandResponse_Flush();
+  ~GetAsyncCommandResponse_Flush() override;
+  GetAsyncCommandResponse_Flush(GetAsyncCommandResponse_Flush&&) noexcept;
+  GetAsyncCommandResponse_Flush& operator=(GetAsyncCommandResponse_Flush&&);
+  GetAsyncCommandResponse_Flush(const GetAsyncCommandResponse_Flush&);
+  GetAsyncCommandResponse_Flush& operator=(const GetAsyncCommandResponse_Flush&);
+  bool operator==(const GetAsyncCommandResponse_Flush&) const;
+  bool operator!=(const GetAsyncCommandResponse_Flush& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  const std::vector<uint64_t>& data_source_ids() const { return data_source_ids_; }
+  std::vector<uint64_t>* mutable_data_source_ids() { return &data_source_ids_; }
+  int data_source_ids_size() const { return static_cast<int>(data_source_ids_.size()); }
+  void clear_data_source_ids() { data_source_ids_.clear(); }
+  void add_data_source_ids(uint64_t value) { data_source_ids_.emplace_back(value); }
+  uint64_t* add_data_source_ids() { data_source_ids_.emplace_back(); return &data_source_ids_.back(); }
+
+  bool has_request_id() const { return _has_field_[2]; }
+  uint64_t request_id() const { return request_id_; }
+  void set_request_id(uint64_t value) { request_id_ = value; _has_field_.set(2); }
+
+ private:
+  std::vector<uint64_t> data_source_ids_;
+  uint64_t request_id_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT GetAsyncCommandResponse_StopDataSource : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kInstanceIdFieldNumber = 1,
+  };
+
+  GetAsyncCommandResponse_StopDataSource();
+  ~GetAsyncCommandResponse_StopDataSource() override;
+  GetAsyncCommandResponse_StopDataSource(GetAsyncCommandResponse_StopDataSource&&) noexcept;
+  GetAsyncCommandResponse_StopDataSource& operator=(GetAsyncCommandResponse_StopDataSource&&);
+  GetAsyncCommandResponse_StopDataSource(const GetAsyncCommandResponse_StopDataSource&);
+  GetAsyncCommandResponse_StopDataSource& operator=(const GetAsyncCommandResponse_StopDataSource&);
+  bool operator==(const GetAsyncCommandResponse_StopDataSource&) const;
+  bool operator!=(const GetAsyncCommandResponse_StopDataSource& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_instance_id() const { return _has_field_[1]; }
+  uint64_t instance_id() const { return instance_id_; }
+  void set_instance_id(uint64_t value) { instance_id_ = value; _has_field_.set(1); }
+
+ private:
+  uint64_t instance_id_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT GetAsyncCommandResponse_StartDataSource : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kNewInstanceIdFieldNumber = 1,
+    kConfigFieldNumber = 2,
+  };
+
+  GetAsyncCommandResponse_StartDataSource();
+  ~GetAsyncCommandResponse_StartDataSource() override;
+  GetAsyncCommandResponse_StartDataSource(GetAsyncCommandResponse_StartDataSource&&) noexcept;
+  GetAsyncCommandResponse_StartDataSource& operator=(GetAsyncCommandResponse_StartDataSource&&);
+  GetAsyncCommandResponse_StartDataSource(const GetAsyncCommandResponse_StartDataSource&);
+  GetAsyncCommandResponse_StartDataSource& operator=(const GetAsyncCommandResponse_StartDataSource&);
+  bool operator==(const GetAsyncCommandResponse_StartDataSource&) const;
+  bool operator!=(const GetAsyncCommandResponse_StartDataSource& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_new_instance_id() const { return _has_field_[1]; }
+  uint64_t new_instance_id() const { return new_instance_id_; }
+  void set_new_instance_id(uint64_t value) { new_instance_id_ = value; _has_field_.set(1); }
+
+  bool has_config() const { return _has_field_[2]; }
+  const DataSourceConfig& config() const { return *config_; }
+  DataSourceConfig* mutable_config() { _has_field_.set(2); return config_.get(); }
+
+ private:
+  uint64_t new_instance_id_{};
+  ::protozero::CopyablePtr<DataSourceConfig> config_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT GetAsyncCommandResponse_SetupDataSource : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kNewInstanceIdFieldNumber = 1,
+    kConfigFieldNumber = 2,
+  };
+
+  GetAsyncCommandResponse_SetupDataSource();
+  ~GetAsyncCommandResponse_SetupDataSource() override;
+  GetAsyncCommandResponse_SetupDataSource(GetAsyncCommandResponse_SetupDataSource&&) noexcept;
+  GetAsyncCommandResponse_SetupDataSource& operator=(GetAsyncCommandResponse_SetupDataSource&&);
+  GetAsyncCommandResponse_SetupDataSource(const GetAsyncCommandResponse_SetupDataSource&);
+  GetAsyncCommandResponse_SetupDataSource& operator=(const GetAsyncCommandResponse_SetupDataSource&);
+  bool operator==(const GetAsyncCommandResponse_SetupDataSource&) const;
+  bool operator!=(const GetAsyncCommandResponse_SetupDataSource& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_new_instance_id() const { return _has_field_[1]; }
+  uint64_t new_instance_id() const { return new_instance_id_; }
+  void set_new_instance_id(uint64_t value) { new_instance_id_ = value; _has_field_.set(1); }
+
+  bool has_config() const { return _has_field_[2]; }
+  const DataSourceConfig& config() const { return *config_; }
+  DataSourceConfig* mutable_config() { _has_field_.set(2); return config_.get(); }
+
+ private:
+  uint64_t new_instance_id_{};
+  ::protozero::CopyablePtr<DataSourceConfig> config_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT GetAsyncCommandResponse_SetupTracing : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kSharedBufferPageSizeKbFieldNumber = 1,
+  };
+
+  GetAsyncCommandResponse_SetupTracing();
+  ~GetAsyncCommandResponse_SetupTracing() override;
+  GetAsyncCommandResponse_SetupTracing(GetAsyncCommandResponse_SetupTracing&&) noexcept;
+  GetAsyncCommandResponse_SetupTracing& operator=(GetAsyncCommandResponse_SetupTracing&&);
+  GetAsyncCommandResponse_SetupTracing(const GetAsyncCommandResponse_SetupTracing&);
+  GetAsyncCommandResponse_SetupTracing& operator=(const GetAsyncCommandResponse_SetupTracing&);
+  bool operator==(const GetAsyncCommandResponse_SetupTracing&) const;
+  bool operator!=(const GetAsyncCommandResponse_SetupTracing& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_shared_buffer_page_size_kb() const { return _has_field_[1]; }
+  uint32_t shared_buffer_page_size_kb() const { return shared_buffer_page_size_kb_; }
+  void set_shared_buffer_page_size_kb(uint32_t value) { shared_buffer_page_size_kb_ = value; _has_field_.set(1); }
+
+ private:
+  uint32_t shared_buffer_page_size_kb_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT GetAsyncCommandRequest : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+  };
+
+  GetAsyncCommandRequest();
+  ~GetAsyncCommandRequest() override;
+  GetAsyncCommandRequest(GetAsyncCommandRequest&&) noexcept;
+  GetAsyncCommandRequest& operator=(GetAsyncCommandRequest&&);
+  GetAsyncCommandRequest(const GetAsyncCommandRequest&);
+  GetAsyncCommandRequest& operator=(const GetAsyncCommandRequest&);
+  bool operator==(const GetAsyncCommandRequest&) const;
+  bool operator!=(const GetAsyncCommandRequest& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+ private:
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT ActivateTriggersResponse : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+  };
+
+  ActivateTriggersResponse();
+  ~ActivateTriggersResponse() override;
+  ActivateTriggersResponse(ActivateTriggersResponse&&) noexcept;
+  ActivateTriggersResponse& operator=(ActivateTriggersResponse&&);
+  ActivateTriggersResponse(const ActivateTriggersResponse&);
+  ActivateTriggersResponse& operator=(const ActivateTriggersResponse&);
+  bool operator==(const ActivateTriggersResponse&) const;
+  bool operator!=(const ActivateTriggersResponse& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+ private:
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT ActivateTriggersRequest : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kTriggerNamesFieldNumber = 1,
+  };
+
+  ActivateTriggersRequest();
+  ~ActivateTriggersRequest() override;
+  ActivateTriggersRequest(ActivateTriggersRequest&&) noexcept;
+  ActivateTriggersRequest& operator=(ActivateTriggersRequest&&);
+  ActivateTriggersRequest(const ActivateTriggersRequest&);
+  ActivateTriggersRequest& operator=(const ActivateTriggersRequest&);
+  bool operator==(const ActivateTriggersRequest&) const;
+  bool operator!=(const ActivateTriggersRequest& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  const std::vector<std::string>& trigger_names() const { return trigger_names_; }
+  std::vector<std::string>* mutable_trigger_names() { return &trigger_names_; }
+  int trigger_names_size() const { return static_cast<int>(trigger_names_.size()); }
+  void clear_trigger_names() { trigger_names_.clear(); }
+  void add_trigger_names(std::string value) { trigger_names_.emplace_back(value); }
+  std::string* add_trigger_names() { trigger_names_.emplace_back(); return &trigger_names_.back(); }
+
+ private:
+  std::vector<std::string> trigger_names_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT NotifyDataSourceStoppedResponse : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+  };
+
+  NotifyDataSourceStoppedResponse();
+  ~NotifyDataSourceStoppedResponse() override;
+  NotifyDataSourceStoppedResponse(NotifyDataSourceStoppedResponse&&) noexcept;
+  NotifyDataSourceStoppedResponse& operator=(NotifyDataSourceStoppedResponse&&);
+  NotifyDataSourceStoppedResponse(const NotifyDataSourceStoppedResponse&);
+  NotifyDataSourceStoppedResponse& operator=(const NotifyDataSourceStoppedResponse&);
+  bool operator==(const NotifyDataSourceStoppedResponse&) const;
+  bool operator!=(const NotifyDataSourceStoppedResponse& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+ private:
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT NotifyDataSourceStoppedRequest : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kDataSourceIdFieldNumber = 1,
+  };
+
+  NotifyDataSourceStoppedRequest();
+  ~NotifyDataSourceStoppedRequest() override;
+  NotifyDataSourceStoppedRequest(NotifyDataSourceStoppedRequest&&) noexcept;
+  NotifyDataSourceStoppedRequest& operator=(NotifyDataSourceStoppedRequest&&);
+  NotifyDataSourceStoppedRequest(const NotifyDataSourceStoppedRequest&);
+  NotifyDataSourceStoppedRequest& operator=(const NotifyDataSourceStoppedRequest&);
+  bool operator==(const NotifyDataSourceStoppedRequest&) const;
+  bool operator!=(const NotifyDataSourceStoppedRequest& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_data_source_id() const { return _has_field_[1]; }
+  uint64_t data_source_id() const { return data_source_id_; }
+  void set_data_source_id(uint64_t value) { data_source_id_ = value; _has_field_.set(1); }
+
+ private:
+  uint64_t data_source_id_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT NotifyDataSourceStartedResponse : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+  };
+
+  NotifyDataSourceStartedResponse();
+  ~NotifyDataSourceStartedResponse() override;
+  NotifyDataSourceStartedResponse(NotifyDataSourceStartedResponse&&) noexcept;
+  NotifyDataSourceStartedResponse& operator=(NotifyDataSourceStartedResponse&&);
+  NotifyDataSourceStartedResponse(const NotifyDataSourceStartedResponse&);
+  NotifyDataSourceStartedResponse& operator=(const NotifyDataSourceStartedResponse&);
+  bool operator==(const NotifyDataSourceStartedResponse&) const;
+  bool operator!=(const NotifyDataSourceStartedResponse& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+ private:
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT NotifyDataSourceStartedRequest : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kDataSourceIdFieldNumber = 1,
+  };
+
+  NotifyDataSourceStartedRequest();
+  ~NotifyDataSourceStartedRequest() override;
+  NotifyDataSourceStartedRequest(NotifyDataSourceStartedRequest&&) noexcept;
+  NotifyDataSourceStartedRequest& operator=(NotifyDataSourceStartedRequest&&);
+  NotifyDataSourceStartedRequest(const NotifyDataSourceStartedRequest&);
+  NotifyDataSourceStartedRequest& operator=(const NotifyDataSourceStartedRequest&);
+  bool operator==(const NotifyDataSourceStartedRequest&) const;
+  bool operator!=(const NotifyDataSourceStartedRequest& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_data_source_id() const { return _has_field_[1]; }
+  uint64_t data_source_id() const { return data_source_id_; }
+  void set_data_source_id(uint64_t value) { data_source_id_ = value; _has_field_.set(1); }
+
+ private:
+  uint64_t data_source_id_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT CommitDataResponse : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+  };
+
+  CommitDataResponse();
+  ~CommitDataResponse() override;
+  CommitDataResponse(CommitDataResponse&&) noexcept;
+  CommitDataResponse& operator=(CommitDataResponse&&);
+  CommitDataResponse(const CommitDataResponse&);
+  CommitDataResponse& operator=(const CommitDataResponse&);
+  bool operator==(const CommitDataResponse&) const;
+  bool operator!=(const CommitDataResponse& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+ private:
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT UnregisterTraceWriterResponse : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+  };
+
+  UnregisterTraceWriterResponse();
+  ~UnregisterTraceWriterResponse() override;
+  UnregisterTraceWriterResponse(UnregisterTraceWriterResponse&&) noexcept;
+  UnregisterTraceWriterResponse& operator=(UnregisterTraceWriterResponse&&);
+  UnregisterTraceWriterResponse(const UnregisterTraceWriterResponse&);
+  UnregisterTraceWriterResponse& operator=(const UnregisterTraceWriterResponse&);
+  bool operator==(const UnregisterTraceWriterResponse&) const;
+  bool operator!=(const UnregisterTraceWriterResponse& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+ private:
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT UnregisterTraceWriterRequest : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kTraceWriterIdFieldNumber = 1,
+  };
+
+  UnregisterTraceWriterRequest();
+  ~UnregisterTraceWriterRequest() override;
+  UnregisterTraceWriterRequest(UnregisterTraceWriterRequest&&) noexcept;
+  UnregisterTraceWriterRequest& operator=(UnregisterTraceWriterRequest&&);
+  UnregisterTraceWriterRequest(const UnregisterTraceWriterRequest&);
+  UnregisterTraceWriterRequest& operator=(const UnregisterTraceWriterRequest&);
+  bool operator==(const UnregisterTraceWriterRequest&) const;
+  bool operator!=(const UnregisterTraceWriterRequest& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_trace_writer_id() const { return _has_field_[1]; }
+  uint32_t trace_writer_id() const { return trace_writer_id_; }
+  void set_trace_writer_id(uint32_t value) { trace_writer_id_ = value; _has_field_.set(1); }
+
+ private:
+  uint32_t trace_writer_id_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT RegisterTraceWriterResponse : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+  };
+
+  RegisterTraceWriterResponse();
+  ~RegisterTraceWriterResponse() override;
+  RegisterTraceWriterResponse(RegisterTraceWriterResponse&&) noexcept;
+  RegisterTraceWriterResponse& operator=(RegisterTraceWriterResponse&&);
+  RegisterTraceWriterResponse(const RegisterTraceWriterResponse&);
+  RegisterTraceWriterResponse& operator=(const RegisterTraceWriterResponse&);
+  bool operator==(const RegisterTraceWriterResponse&) const;
+  bool operator!=(const RegisterTraceWriterResponse& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+ private:
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT RegisterTraceWriterRequest : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kTraceWriterIdFieldNumber = 1,
+    kTargetBufferFieldNumber = 2,
+  };
+
+  RegisterTraceWriterRequest();
+  ~RegisterTraceWriterRequest() override;
+  RegisterTraceWriterRequest(RegisterTraceWriterRequest&&) noexcept;
+  RegisterTraceWriterRequest& operator=(RegisterTraceWriterRequest&&);
+  RegisterTraceWriterRequest(const RegisterTraceWriterRequest&);
+  RegisterTraceWriterRequest& operator=(const RegisterTraceWriterRequest&);
+  bool operator==(const RegisterTraceWriterRequest&) const;
+  bool operator!=(const RegisterTraceWriterRequest& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_trace_writer_id() const { return _has_field_[1]; }
+  uint32_t trace_writer_id() const { return trace_writer_id_; }
+  void set_trace_writer_id(uint32_t value) { trace_writer_id_ = value; _has_field_.set(1); }
+
+  bool has_target_buffer() const { return _has_field_[2]; }
+  uint32_t target_buffer() const { return target_buffer_; }
+  void set_target_buffer(uint32_t value) { target_buffer_ = value; _has_field_.set(2); }
+
+ private:
+  uint32_t trace_writer_id_{};
+  uint32_t target_buffer_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT UnregisterDataSourceResponse : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+  };
+
+  UnregisterDataSourceResponse();
+  ~UnregisterDataSourceResponse() override;
+  UnregisterDataSourceResponse(UnregisterDataSourceResponse&&) noexcept;
+  UnregisterDataSourceResponse& operator=(UnregisterDataSourceResponse&&);
+  UnregisterDataSourceResponse(const UnregisterDataSourceResponse&);
+  UnregisterDataSourceResponse& operator=(const UnregisterDataSourceResponse&);
+  bool operator==(const UnregisterDataSourceResponse&) const;
+  bool operator!=(const UnregisterDataSourceResponse& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+ private:
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT UnregisterDataSourceRequest : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kDataSourceNameFieldNumber = 1,
+  };
+
+  UnregisterDataSourceRequest();
+  ~UnregisterDataSourceRequest() override;
+  UnregisterDataSourceRequest(UnregisterDataSourceRequest&&) noexcept;
+  UnregisterDataSourceRequest& operator=(UnregisterDataSourceRequest&&);
+  UnregisterDataSourceRequest(const UnregisterDataSourceRequest&);
+  UnregisterDataSourceRequest& operator=(const UnregisterDataSourceRequest&);
+  bool operator==(const UnregisterDataSourceRequest&) const;
+  bool operator!=(const UnregisterDataSourceRequest& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_data_source_name() const { return _has_field_[1]; }
+  const std::string& data_source_name() const { return data_source_name_; }
+  void set_data_source_name(const std::string& value) { data_source_name_ = value; _has_field_.set(1); }
+
+ private:
+  std::string data_source_name_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT RegisterDataSourceResponse : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kErrorFieldNumber = 1,
+  };
+
+  RegisterDataSourceResponse();
+  ~RegisterDataSourceResponse() override;
+  RegisterDataSourceResponse(RegisterDataSourceResponse&&) noexcept;
+  RegisterDataSourceResponse& operator=(RegisterDataSourceResponse&&);
+  RegisterDataSourceResponse(const RegisterDataSourceResponse&);
+  RegisterDataSourceResponse& operator=(const RegisterDataSourceResponse&);
+  bool operator==(const RegisterDataSourceResponse&) const;
+  bool operator!=(const RegisterDataSourceResponse& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_error() const { return _has_field_[1]; }
+  const std::string& error() const { return error_; }
+  void set_error(const std::string& value) { error_ = value; _has_field_.set(1); }
+
+ private:
+  std::string error_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT RegisterDataSourceRequest : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kDataSourceDescriptorFieldNumber = 1,
+  };
+
+  RegisterDataSourceRequest();
+  ~RegisterDataSourceRequest() override;
+  RegisterDataSourceRequest(RegisterDataSourceRequest&&) noexcept;
+  RegisterDataSourceRequest& operator=(RegisterDataSourceRequest&&);
+  RegisterDataSourceRequest(const RegisterDataSourceRequest&);
+  RegisterDataSourceRequest& operator=(const RegisterDataSourceRequest&);
+  bool operator==(const RegisterDataSourceRequest&) const;
+  bool operator!=(const RegisterDataSourceRequest& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_data_source_descriptor() const { return _has_field_[1]; }
+  const DataSourceDescriptor& data_source_descriptor() const { return *data_source_descriptor_; }
+  DataSourceDescriptor* mutable_data_source_descriptor() { _has_field_.set(1); return data_source_descriptor_.get(); }
+
+ private:
+  ::protozero::CopyablePtr<DataSourceDescriptor> data_source_descriptor_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT InitializeConnectionResponse : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kUsingShmemProvidedByProducerFieldNumber = 1,
+    kDirectSmbPatchingSupportedFieldNumber = 2,
+  };
+
+  InitializeConnectionResponse();
+  ~InitializeConnectionResponse() override;
+  InitializeConnectionResponse(InitializeConnectionResponse&&) noexcept;
+  InitializeConnectionResponse& operator=(InitializeConnectionResponse&&);
+  InitializeConnectionResponse(const InitializeConnectionResponse&);
+  InitializeConnectionResponse& operator=(const InitializeConnectionResponse&);
+  bool operator==(const InitializeConnectionResponse&) const;
+  bool operator!=(const InitializeConnectionResponse& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_using_shmem_provided_by_producer() const { return _has_field_[1]; }
+  bool using_shmem_provided_by_producer() const { return using_shmem_provided_by_producer_; }
+  void set_using_shmem_provided_by_producer(bool value) { using_shmem_provided_by_producer_ = value; _has_field_.set(1); }
+
+  bool has_direct_smb_patching_supported() const { return _has_field_[2]; }
+  bool direct_smb_patching_supported() const { return direct_smb_patching_supported_; }
+  void set_direct_smb_patching_supported(bool value) { direct_smb_patching_supported_ = value; _has_field_.set(2); }
+
+ private:
+  bool using_shmem_provided_by_producer_{};
+  bool direct_smb_patching_supported_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT InitializeConnectionRequest : public ::protozero::CppMessageObj {
+ public:
+  using ProducerSMBScrapingMode = InitializeConnectionRequest_ProducerSMBScrapingMode;
+  static constexpr auto SMB_SCRAPING_UNSPECIFIED = InitializeConnectionRequest_ProducerSMBScrapingMode_SMB_SCRAPING_UNSPECIFIED;
+  static constexpr auto SMB_SCRAPING_ENABLED = InitializeConnectionRequest_ProducerSMBScrapingMode_SMB_SCRAPING_ENABLED;
+  static constexpr auto SMB_SCRAPING_DISABLED = InitializeConnectionRequest_ProducerSMBScrapingMode_SMB_SCRAPING_DISABLED;
+  static constexpr auto ProducerSMBScrapingMode_MIN = InitializeConnectionRequest_ProducerSMBScrapingMode_SMB_SCRAPING_UNSPECIFIED;
+  static constexpr auto ProducerSMBScrapingMode_MAX = InitializeConnectionRequest_ProducerSMBScrapingMode_SMB_SCRAPING_DISABLED;
+  using ProducerBuildFlags = InitializeConnectionRequest_ProducerBuildFlags;
+  static constexpr auto BUILD_FLAGS_UNSPECIFIED = InitializeConnectionRequest_ProducerBuildFlags_BUILD_FLAGS_UNSPECIFIED;
+  static constexpr auto BUILD_FLAGS_DCHECKS_ON = InitializeConnectionRequest_ProducerBuildFlags_BUILD_FLAGS_DCHECKS_ON;
+  static constexpr auto BUILD_FLAGS_DCHECKS_OFF = InitializeConnectionRequest_ProducerBuildFlags_BUILD_FLAGS_DCHECKS_OFF;
+  static constexpr auto ProducerBuildFlags_MIN = InitializeConnectionRequest_ProducerBuildFlags_BUILD_FLAGS_UNSPECIFIED;
+  static constexpr auto ProducerBuildFlags_MAX = InitializeConnectionRequest_ProducerBuildFlags_BUILD_FLAGS_DCHECKS_OFF;
+  enum FieldNumbers {
+    kSharedMemoryPageSizeHintBytesFieldNumber = 1,
+    kSharedMemorySizeHintBytesFieldNumber = 2,
+    kProducerNameFieldNumber = 3,
+    kSmbScrapingModeFieldNumber = 4,
+    kBuildFlagsFieldNumber = 5,
+    kProducerProvidedShmemFieldNumber = 6,
+    kSdkVersionFieldNumber = 8,
+  };
+
+  InitializeConnectionRequest();
+  ~InitializeConnectionRequest() override;
+  InitializeConnectionRequest(InitializeConnectionRequest&&) noexcept;
+  InitializeConnectionRequest& operator=(InitializeConnectionRequest&&);
+  InitializeConnectionRequest(const InitializeConnectionRequest&);
+  InitializeConnectionRequest& operator=(const InitializeConnectionRequest&);
+  bool operator==(const InitializeConnectionRequest&) const;
+  bool operator!=(const InitializeConnectionRequest& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_shared_memory_page_size_hint_bytes() const { return _has_field_[1]; }
+  uint32_t shared_memory_page_size_hint_bytes() const { return shared_memory_page_size_hint_bytes_; }
+  void set_shared_memory_page_size_hint_bytes(uint32_t value) { shared_memory_page_size_hint_bytes_ = value; _has_field_.set(1); }
+
+  bool has_shared_memory_size_hint_bytes() const { return _has_field_[2]; }
+  uint32_t shared_memory_size_hint_bytes() const { return shared_memory_size_hint_bytes_; }
+  void set_shared_memory_size_hint_bytes(uint32_t value) { shared_memory_size_hint_bytes_ = value; _has_field_.set(2); }
+
+  bool has_producer_name() const { return _has_field_[3]; }
+  const std::string& producer_name() const { return producer_name_; }
+  void set_producer_name(const std::string& value) { producer_name_ = value; _has_field_.set(3); }
+
+  bool has_smb_scraping_mode() const { return _has_field_[4]; }
+  InitializeConnectionRequest_ProducerSMBScrapingMode smb_scraping_mode() const { return smb_scraping_mode_; }
+  void set_smb_scraping_mode(InitializeConnectionRequest_ProducerSMBScrapingMode value) { smb_scraping_mode_ = value; _has_field_.set(4); }
+
+  bool has_build_flags() const { return _has_field_[5]; }
+  InitializeConnectionRequest_ProducerBuildFlags build_flags() const { return build_flags_; }
+  void set_build_flags(InitializeConnectionRequest_ProducerBuildFlags value) { build_flags_ = value; _has_field_.set(5); }
+
+  bool has_producer_provided_shmem() const { return _has_field_[6]; }
+  bool producer_provided_shmem() const { return producer_provided_shmem_; }
+  void set_producer_provided_shmem(bool value) { producer_provided_shmem_ = value; _has_field_.set(6); }
+
+  bool has_sdk_version() const { return _has_field_[8]; }
+  const std::string& sdk_version() const { return sdk_version_; }
+  void set_sdk_version(const std::string& value) { sdk_version_ = value; _has_field_.set(8); }
+
+ private:
+  uint32_t shared_memory_page_size_hint_bytes_{};
+  uint32_t shared_memory_size_hint_bytes_{};
+  std::string producer_name_{};
+  InitializeConnectionRequest_ProducerSMBScrapingMode smb_scraping_mode_{};
+  InitializeConnectionRequest_ProducerBuildFlags build_flags_{};
+  bool producer_provided_shmem_{};
+  std::string sdk_version_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<9> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_IPC_PRODUCER_PORT_PROTO_CPP_H_
+// gen_amalgamated begin header: gen/protos/perfetto/ipc/wire_protocol.gen.h
+// DO NOT EDIT. Autogenerated by Perfetto cppgen_plugin
+#ifndef PERFETTO_PROTOS_PROTOS_PERFETTO_IPC_WIRE_PROTOCOL_PROTO_CPP_H_
+#define PERFETTO_PROTOS_PROTOS_PERFETTO_IPC_WIRE_PROTOCOL_PROTO_CPP_H_
+
+#include <stdint.h>
+#include <bitset>
+#include <vector>
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/protozero/cpp_message_obj.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/copyable_ptr.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+class IPCFrame;
+class IPCFrame_RequestError;
+class IPCFrame_InvokeMethodReply;
+class IPCFrame_InvokeMethod;
+class IPCFrame_BindServiceReply;
+class IPCFrame_BindServiceReply_MethodInfo;
+class IPCFrame_BindService;
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+namespace protozero {
+class Message;
+}  // namespace protozero
+
+namespace perfetto {
+namespace protos {
+namespace gen {
+
+class PERFETTO_EXPORT IPCFrame : public ::protozero::CppMessageObj {
+ public:
+  using BindService = IPCFrame_BindService;
+  using BindServiceReply = IPCFrame_BindServiceReply;
+  using InvokeMethod = IPCFrame_InvokeMethod;
+  using InvokeMethodReply = IPCFrame_InvokeMethodReply;
+  using RequestError = IPCFrame_RequestError;
+  enum FieldNumbers {
+    kRequestIdFieldNumber = 2,
+    kMsgBindServiceFieldNumber = 3,
+    kMsgBindServiceReplyFieldNumber = 4,
+    kMsgInvokeMethodFieldNumber = 5,
+    kMsgInvokeMethodReplyFieldNumber = 6,
+    kMsgRequestErrorFieldNumber = 7,
+    kDataForTestingFieldNumber = 1,
+  };
+
+  IPCFrame();
+  ~IPCFrame() override;
+  IPCFrame(IPCFrame&&) noexcept;
+  IPCFrame& operator=(IPCFrame&&);
+  IPCFrame(const IPCFrame&);
+  IPCFrame& operator=(const IPCFrame&);
+  bool operator==(const IPCFrame&) const;
+  bool operator!=(const IPCFrame& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_request_id() const { return _has_field_[2]; }
+  uint64_t request_id() const { return request_id_; }
+  void set_request_id(uint64_t value) { request_id_ = value; _has_field_.set(2); }
+
+  bool has_msg_bind_service() const { return _has_field_[3]; }
+  const IPCFrame_BindService& msg_bind_service() const { return *msg_bind_service_; }
+  IPCFrame_BindService* mutable_msg_bind_service() { _has_field_.set(3); return msg_bind_service_.get(); }
+
+  bool has_msg_bind_service_reply() const { return _has_field_[4]; }
+  const IPCFrame_BindServiceReply& msg_bind_service_reply() const { return *msg_bind_service_reply_; }
+  IPCFrame_BindServiceReply* mutable_msg_bind_service_reply() { _has_field_.set(4); return msg_bind_service_reply_.get(); }
+
+  bool has_msg_invoke_method() const { return _has_field_[5]; }
+  const IPCFrame_InvokeMethod& msg_invoke_method() const { return *msg_invoke_method_; }
+  IPCFrame_InvokeMethod* mutable_msg_invoke_method() { _has_field_.set(5); return msg_invoke_method_.get(); }
+
+  bool has_msg_invoke_method_reply() const { return _has_field_[6]; }
+  const IPCFrame_InvokeMethodReply& msg_invoke_method_reply() const { return *msg_invoke_method_reply_; }
+  IPCFrame_InvokeMethodReply* mutable_msg_invoke_method_reply() { _has_field_.set(6); return msg_invoke_method_reply_.get(); }
+
+  bool has_msg_request_error() const { return _has_field_[7]; }
+  const IPCFrame_RequestError& msg_request_error() const { return *msg_request_error_; }
+  IPCFrame_RequestError* mutable_msg_request_error() { _has_field_.set(7); return msg_request_error_.get(); }
+
+  const std::vector<std::string>& data_for_testing() const { return data_for_testing_; }
+  std::vector<std::string>* mutable_data_for_testing() { return &data_for_testing_; }
+  int data_for_testing_size() const { return static_cast<int>(data_for_testing_.size()); }
+  void clear_data_for_testing() { data_for_testing_.clear(); }
+  void add_data_for_testing(std::string value) { data_for_testing_.emplace_back(value); }
+  std::string* add_data_for_testing() { data_for_testing_.emplace_back(); return &data_for_testing_.back(); }
+
+ private:
+  uint64_t request_id_{};
+  ::protozero::CopyablePtr<IPCFrame_BindService> msg_bind_service_;
+  ::protozero::CopyablePtr<IPCFrame_BindServiceReply> msg_bind_service_reply_;
+  ::protozero::CopyablePtr<IPCFrame_InvokeMethod> msg_invoke_method_;
+  ::protozero::CopyablePtr<IPCFrame_InvokeMethodReply> msg_invoke_method_reply_;
+  ::protozero::CopyablePtr<IPCFrame_RequestError> msg_request_error_;
+  std::vector<std::string> data_for_testing_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<8> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT IPCFrame_RequestError : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kErrorFieldNumber = 1,
+  };
+
+  IPCFrame_RequestError();
+  ~IPCFrame_RequestError() override;
+  IPCFrame_RequestError(IPCFrame_RequestError&&) noexcept;
+  IPCFrame_RequestError& operator=(IPCFrame_RequestError&&);
+  IPCFrame_RequestError(const IPCFrame_RequestError&);
+  IPCFrame_RequestError& operator=(const IPCFrame_RequestError&);
+  bool operator==(const IPCFrame_RequestError&) const;
+  bool operator!=(const IPCFrame_RequestError& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_error() const { return _has_field_[1]; }
+  const std::string& error() const { return error_; }
+  void set_error(const std::string& value) { error_ = value; _has_field_.set(1); }
+
+ private:
+  std::string error_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT IPCFrame_InvokeMethodReply : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kSuccessFieldNumber = 1,
+    kHasMoreFieldNumber = 2,
+    kReplyProtoFieldNumber = 3,
+  };
+
+  IPCFrame_InvokeMethodReply();
+  ~IPCFrame_InvokeMethodReply() override;
+  IPCFrame_InvokeMethodReply(IPCFrame_InvokeMethodReply&&) noexcept;
+  IPCFrame_InvokeMethodReply& operator=(IPCFrame_InvokeMethodReply&&);
+  IPCFrame_InvokeMethodReply(const IPCFrame_InvokeMethodReply&);
+  IPCFrame_InvokeMethodReply& operator=(const IPCFrame_InvokeMethodReply&);
+  bool operator==(const IPCFrame_InvokeMethodReply&) const;
+  bool operator!=(const IPCFrame_InvokeMethodReply& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_success() const { return _has_field_[1]; }
+  bool success() const { return success_; }
+  void set_success(bool value) { success_ = value; _has_field_.set(1); }
+
+  bool has_has_more() const { return _has_field_[2]; }
+  bool has_more() const { return has_more_; }
+  void set_has_more(bool value) { has_more_ = value; _has_field_.set(2); }
+
+  bool has_reply_proto() const { return _has_field_[3]; }
+  const std::string& reply_proto() const { return reply_proto_; }
+  void set_reply_proto(const std::string& value) { reply_proto_ = value; _has_field_.set(3); }
+  void set_reply_proto(const void* p, size_t s) { reply_proto_.assign(reinterpret_cast<const char*>(p), s); _has_field_.set(3); }
+
+ private:
+  bool success_{};
+  bool has_more_{};
+  std::string reply_proto_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<4> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT IPCFrame_InvokeMethod : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kServiceIdFieldNumber = 1,
+    kMethodIdFieldNumber = 2,
+    kArgsProtoFieldNumber = 3,
+    kDropReplyFieldNumber = 4,
+  };
+
+  IPCFrame_InvokeMethod();
+  ~IPCFrame_InvokeMethod() override;
+  IPCFrame_InvokeMethod(IPCFrame_InvokeMethod&&) noexcept;
+  IPCFrame_InvokeMethod& operator=(IPCFrame_InvokeMethod&&);
+  IPCFrame_InvokeMethod(const IPCFrame_InvokeMethod&);
+  IPCFrame_InvokeMethod& operator=(const IPCFrame_InvokeMethod&);
+  bool operator==(const IPCFrame_InvokeMethod&) const;
+  bool operator!=(const IPCFrame_InvokeMethod& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_service_id() const { return _has_field_[1]; }
+  uint32_t service_id() const { return service_id_; }
+  void set_service_id(uint32_t value) { service_id_ = value; _has_field_.set(1); }
+
+  bool has_method_id() const { return _has_field_[2]; }
+  uint32_t method_id() const { return method_id_; }
+  void set_method_id(uint32_t value) { method_id_ = value; _has_field_.set(2); }
+
+  bool has_args_proto() const { return _has_field_[3]; }
+  const std::string& args_proto() const { return args_proto_; }
+  void set_args_proto(const std::string& value) { args_proto_ = value; _has_field_.set(3); }
+  void set_args_proto(const void* p, size_t s) { args_proto_.assign(reinterpret_cast<const char*>(p), s); _has_field_.set(3); }
+
+  bool has_drop_reply() const { return _has_field_[4]; }
+  bool drop_reply() const { return drop_reply_; }
+  void set_drop_reply(bool value) { drop_reply_ = value; _has_field_.set(4); }
+
+ private:
+  uint32_t service_id_{};
+  uint32_t method_id_{};
+  std::string args_proto_{};
+  bool drop_reply_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<5> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT IPCFrame_BindServiceReply : public ::protozero::CppMessageObj {
+ public:
+  using MethodInfo = IPCFrame_BindServiceReply_MethodInfo;
+  enum FieldNumbers {
+    kSuccessFieldNumber = 1,
+    kServiceIdFieldNumber = 2,
+    kMethodsFieldNumber = 3,
+  };
+
+  IPCFrame_BindServiceReply();
+  ~IPCFrame_BindServiceReply() override;
+  IPCFrame_BindServiceReply(IPCFrame_BindServiceReply&&) noexcept;
+  IPCFrame_BindServiceReply& operator=(IPCFrame_BindServiceReply&&);
+  IPCFrame_BindServiceReply(const IPCFrame_BindServiceReply&);
+  IPCFrame_BindServiceReply& operator=(const IPCFrame_BindServiceReply&);
+  bool operator==(const IPCFrame_BindServiceReply&) const;
+  bool operator!=(const IPCFrame_BindServiceReply& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_success() const { return _has_field_[1]; }
+  bool success() const { return success_; }
+  void set_success(bool value) { success_ = value; _has_field_.set(1); }
+
+  bool has_service_id() const { return _has_field_[2]; }
+  uint32_t service_id() const { return service_id_; }
+  void set_service_id(uint32_t value) { service_id_ = value; _has_field_.set(2); }
+
+  const std::vector<IPCFrame_BindServiceReply_MethodInfo>& methods() const { return methods_; }
+  std::vector<IPCFrame_BindServiceReply_MethodInfo>* mutable_methods() { return &methods_; }
+  int methods_size() const;
+  void clear_methods();
+  IPCFrame_BindServiceReply_MethodInfo* add_methods();
+
+ private:
+  bool success_{};
+  uint32_t service_id_{};
+  std::vector<IPCFrame_BindServiceReply_MethodInfo> methods_;
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<4> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT IPCFrame_BindServiceReply_MethodInfo : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kIdFieldNumber = 1,
+    kNameFieldNumber = 2,
+  };
+
+  IPCFrame_BindServiceReply_MethodInfo();
+  ~IPCFrame_BindServiceReply_MethodInfo() override;
+  IPCFrame_BindServiceReply_MethodInfo(IPCFrame_BindServiceReply_MethodInfo&&) noexcept;
+  IPCFrame_BindServiceReply_MethodInfo& operator=(IPCFrame_BindServiceReply_MethodInfo&&);
+  IPCFrame_BindServiceReply_MethodInfo(const IPCFrame_BindServiceReply_MethodInfo&);
+  IPCFrame_BindServiceReply_MethodInfo& operator=(const IPCFrame_BindServiceReply_MethodInfo&);
+  bool operator==(const IPCFrame_BindServiceReply_MethodInfo&) const;
+  bool operator!=(const IPCFrame_BindServiceReply_MethodInfo& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_id() const { return _has_field_[1]; }
+  uint32_t id() const { return id_; }
+  void set_id(uint32_t value) { id_ = value; _has_field_.set(1); }
+
+  bool has_name() const { return _has_field_[2]; }
+  const std::string& name() const { return name_; }
+  void set_name(const std::string& value) { name_ = value; _has_field_.set(2); }
+
+ private:
+  uint32_t id_{};
+  std::string name_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<3> _has_field_{};
+};
+
+
+class PERFETTO_EXPORT IPCFrame_BindService : public ::protozero::CppMessageObj {
+ public:
+  enum FieldNumbers {
+    kServiceNameFieldNumber = 1,
+  };
+
+  IPCFrame_BindService();
+  ~IPCFrame_BindService() override;
+  IPCFrame_BindService(IPCFrame_BindService&&) noexcept;
+  IPCFrame_BindService& operator=(IPCFrame_BindService&&);
+  IPCFrame_BindService(const IPCFrame_BindService&);
+  IPCFrame_BindService& operator=(const IPCFrame_BindService&);
+  bool operator==(const IPCFrame_BindService&) const;
+  bool operator!=(const IPCFrame_BindService& other) const { return !(*this == other); }
+
+  bool ParseFromArray(const void*, size_t) override;
+  std::string SerializeAsString() const override;
+  std::vector<uint8_t> SerializeAsArray() const override;
+  void Serialize(::protozero::Message*) const;
+
+  bool has_service_name() const { return _has_field_[1]; }
+  const std::string& service_name() const { return service_name_; }
+  void set_service_name(const std::string& value) { service_name_ = value; _has_field_.set(1); }
+
+ private:
+  std::string service_name_{};
+
+  // Allows to preserve unknown protobuf fields for compatibility
+  // with future versions of .proto files.
+  std::string unknown_fields_;
+
+  std::bitset<2> _has_field_{};
+};
+
+}  // namespace perfetto
+}  // namespace protos
+}  // namespace gen
+
+#endif  // PERFETTO_PROTOS_PROTOS_PERFETTO_IPC_WIRE_PROTOCOL_PROTO_CPP_H_
+// gen_amalgamated begin header: include/perfetto/protozero/contiguous_memory_range.h
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_PROTOZERO_CONTIGUOUS_MEMORY_RANGE_H_
+#define INCLUDE_PERFETTO_PROTOZERO_CONTIGUOUS_MEMORY_RANGE_H_
+
+#include <assert.h>
+#include <stddef.h>
+#include <stdint.h>
+
+namespace protozero {
+
+// Keep this struct trivially constructible (no ctors, no default initializers).
+struct ContiguousMemoryRange {
+  uint8_t* begin;
+  uint8_t* end;  // STL style: one byte past the end of the buffer.
+
+  inline bool is_valid() const { return begin != nullptr; }
+  inline void reset() { begin = nullptr; }
+  inline size_t size() const { return static_cast<size_t>(end - begin); }
+};
+
+}  // namespace protozero
+
+#endif  // INCLUDE_PERFETTO_PROTOZERO_CONTIGUOUS_MEMORY_RANGE_H_
+// gen_amalgamated begin header: include/perfetto/protozero/copyable_ptr.h
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_PROTOZERO_COPYABLE_PTR_H_
+#define INCLUDE_PERFETTO_PROTOZERO_COPYABLE_PTR_H_
+
+#include <memory>
+
+namespace protozero {
+
+// This class is essentially a std::vector<T> of fixed size = 1.
+// It's a pointer wrapper with deep copying and deep equality comparison.
+// At all effects this wrapper behaves like the underlying T, with the exception
+// of the heap indirection.
+// Conversely to a std::unique_ptr, the pointer will be always valid, never
+// null. The problem it solves is the following: when generating C++ classes
+// from proto files, we want to keep each header hermetic (i.e. not #include
+// headers of dependent types). As such we can't directly instantiate T
+// field members but we can instead rely on pointers, so only the .cc file needs
+// to see the actual definition of T. If the generated classes were move-only we
+// could just use a unique_ptr there. But they aren't, hence this wrapper.
+// Converesely to unique_ptr, this wrapper:
+// - Default constructs the T instance in its constructor.
+// - Implements deep comparison in operator== instead of pointer comparison.
+template <typename T>
+class CopyablePtr {
+ public:
+  CopyablePtr() : ptr_(new T()) {}
+  ~CopyablePtr() = default;
+
+  // Copy operators.
+  CopyablePtr(const CopyablePtr& other) : ptr_(new T(*other.ptr_)) {}
+  CopyablePtr& operator=(const CopyablePtr& other) {
+    *ptr_ = *other.ptr_;
+    return *this;
+  }
+
+  // Move operators.
+  CopyablePtr(CopyablePtr&& other) noexcept : ptr_(std::move(other.ptr_)) {
+    other.ptr_.reset(new T());
+  }
+
+  CopyablePtr& operator=(CopyablePtr&& other) {
+    ptr_ = std::move(other.ptr_);
+    other.ptr_.reset(new T());
+    return *this;
+  }
+
+  T* get() { return ptr_.get(); }
+  const T* get() const { return ptr_.get(); }
+
+  T* operator->() { return ptr_.get(); }
+  const T* operator->() const { return ptr_.get(); }
+
+  T& operator*() { return *ptr_; }
+  const T& operator*() const { return *ptr_; }
+
+  friend bool operator==(const CopyablePtr& lhs, const CopyablePtr& rhs) {
+    return *lhs == *rhs;
+  }
+
+  friend bool operator!=(const CopyablePtr& lhs, const CopyablePtr& rhs) {
+    // In theory the underlying type might have a special operator!=
+    // implementation which is not just !(x == y). Respect that.
+    return *lhs != *rhs;
+  }
+
+ private:
+  std::unique_ptr<T> ptr_;
+};
+
+}  // namespace protozero
+
+#endif  // INCLUDE_PERFETTO_PROTOZERO_COPYABLE_PTR_H_
+// gen_amalgamated begin header: include/perfetto/protozero/cpp_message_obj.h
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_PROTOZERO_CPP_MESSAGE_OBJ_H_
+#define INCLUDE_PERFETTO_PROTOZERO_CPP_MESSAGE_OBJ_H_
+
+#include <stdint.h>
+
+#include <string>
+#include <vector>
+
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+
+namespace protozero {
+
+// Base class for generated .gen.h classes, which are full C++ objects that
+// support both ser and deserialization (but are not zero-copy).
+// This is only used by the "cpp" targets not the "pbzero" ones.
+class PERFETTO_EXPORT CppMessageObj {
+ public:
+  virtual ~CppMessageObj();
+  virtual std::string SerializeAsString() const = 0;
+  virtual std::vector<uint8_t> SerializeAsArray() const = 0;
+  virtual bool ParseFromArray(const void*, size_t) = 0;
+
+  bool ParseFromString(const std::string& str) {
+    return ParseFromArray(str.data(), str.size());
+  }
+};
+
+}  // namespace protozero
+
+#endif  // INCLUDE_PERFETTO_PROTOZERO_CPP_MESSAGE_OBJ_H_
+// gen_amalgamated begin header: include/perfetto/protozero/field.h
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_PROTOZERO_FIELD_H_
+#define INCLUDE_PERFETTO_PROTOZERO_FIELD_H_
+
+#include <stdint.h>
+
+#include <string>
+#include <vector>
+
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/contiguous_memory_range.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace protozero {
+
+struct ConstBytes {
+  std::string ToStdString() const {
+    return std::string(reinterpret_cast<const char*>(data), size);
+  }
+
+  const uint8_t* data;
+  size_t size;
+};
+
+struct ConstChars {
+  // Allow implicit conversion to perfetto's base::StringView without depending
+  // on perfetto/base or viceversa.
+  static constexpr bool kConvertibleToStringView = true;
+  std::string ToStdString() const { return std::string(data, size); }
+
+  const char* data;
+  size_t size;
+};
+
+// A protobuf field decoded by the protozero proto decoders. It exposes
+// convenience accessors with minimal debug checks.
+// This class is used both by the iterator-based ProtoDecoder and by the
+// one-shot TypedProtoDecoder.
+// If the field is not valid the accessors consistently return zero-integers or
+// null strings.
+class Field {
+ public:
+  bool valid() const { return id_ != 0; }
+  uint16_t id() const { return id_; }
+  explicit operator bool() const { return valid(); }
+
+  proto_utils::ProtoWireType type() const {
+    auto res = static_cast<proto_utils::ProtoWireType>(type_);
+    PERFETTO_DCHECK(res == proto_utils::ProtoWireType::kVarInt ||
+                    res == proto_utils::ProtoWireType::kLengthDelimited ||
+                    res == proto_utils::ProtoWireType::kFixed32 ||
+                    res == proto_utils::ProtoWireType::kFixed64);
+    return res;
+  }
+
+  bool as_bool() const {
+    PERFETTO_DCHECK(!valid() || type() == proto_utils::ProtoWireType::kVarInt);
+    return static_cast<bool>(int_value_);
+  }
+
+  uint32_t as_uint32() const {
+    PERFETTO_DCHECK(!valid() || type() == proto_utils::ProtoWireType::kVarInt ||
+                    type() == proto_utils::ProtoWireType::kFixed32);
+    return static_cast<uint32_t>(int_value_);
+  }
+
+  int32_t as_int32() const {
+    PERFETTO_DCHECK(!valid() || type() == proto_utils::ProtoWireType::kVarInt ||
+                    type() == proto_utils::ProtoWireType::kFixed32);
+    return static_cast<int32_t>(int_value_);
+  }
+
+  int32_t as_sint32() const {
+    PERFETTO_DCHECK(!valid() || type() == proto_utils::ProtoWireType::kVarInt);
+    return proto_utils::ZigZagDecode(static_cast<uint32_t>(int_value_));
+  }
+
+  uint64_t as_uint64() const {
+    PERFETTO_DCHECK(!valid() || type() == proto_utils::ProtoWireType::kVarInt ||
+                    type() == proto_utils::ProtoWireType::kFixed32 ||
+                    type() == proto_utils::ProtoWireType::kFixed64);
+    return int_value_;
+  }
+
+  int64_t as_int64() const {
+    PERFETTO_DCHECK(!valid() || type() == proto_utils::ProtoWireType::kVarInt ||
+                    type() == proto_utils::ProtoWireType::kFixed32 ||
+                    type() == proto_utils::ProtoWireType::kFixed64);
+    return static_cast<int64_t>(int_value_);
+  }
+
+  int64_t as_sint64() const {
+    PERFETTO_DCHECK(!valid() || type() == proto_utils::ProtoWireType::kVarInt);
+    return proto_utils::ZigZagDecode(static_cast<uint64_t>(int_value_));
+  }
+
+  float as_float() const {
+    PERFETTO_DCHECK(!valid() || type() == proto_utils::ProtoWireType::kFixed32);
+    float res;
+    uint32_t value32 = static_cast<uint32_t>(int_value_);
+    memcpy(&res, &value32, sizeof(res));
+    return res;
+  }
+
+  double as_double() const {
+    PERFETTO_DCHECK(!valid() || type() == proto_utils::ProtoWireType::kFixed64);
+    double res;
+    memcpy(&res, &int_value_, sizeof(res));
+    return res;
+  }
+
+  ConstChars as_string() const {
+    PERFETTO_DCHECK(!valid() ||
+                    type() == proto_utils::ProtoWireType::kLengthDelimited);
+    return ConstChars{reinterpret_cast<const char*>(data()), size_};
+  }
+
+  std::string as_std_string() const { return as_string().ToStdString(); }
+
+  ConstBytes as_bytes() const {
+    PERFETTO_DCHECK(!valid() ||
+                    type() == proto_utils::ProtoWireType::kLengthDelimited);
+    return ConstBytes{data(), size_};
+  }
+
+  const uint8_t* data() const {
+    PERFETTO_DCHECK(!valid() ||
+                    type() == proto_utils::ProtoWireType::kLengthDelimited);
+    return reinterpret_cast<const uint8_t*>(int_value_);
+  }
+
+  size_t size() const {
+    PERFETTO_DCHECK(!valid() ||
+                    type() == proto_utils::ProtoWireType::kLengthDelimited);
+    return size_;
+  }
+
+  uint64_t raw_int_value() const { return int_value_; }
+
+  void initialize(uint16_t id,
+                  uint8_t type,
+                  uint64_t int_value,
+                  uint32_t size) {
+    id_ = id;
+    type_ = type;
+    int_value_ = int_value;
+    size_ = size;
+  }
+
+  // For use with templates. This is used by RepeatedFieldIterator::operator*().
+  void get(bool* val) const { *val = as_bool(); }
+  void get(uint32_t* val) const { *val = as_uint32(); }
+  void get(int32_t* val) const { *val = as_int32(); }
+  void get(uint64_t* val) const { *val = as_uint64(); }
+  void get(int64_t* val) const { *val = as_int64(); }
+  void get(float* val) const { *val = as_float(); }
+  void get(double* val) const { *val = as_double(); }
+  void get(std::string* val) const { *val = as_std_string(); }
+  void get(ConstChars* val) const { *val = as_string(); }
+  void get(ConstBytes* val) const { *val = as_bytes(); }
+  void get_signed(int32_t* val) const { *val = as_sint32(); }
+  void get_signed(int64_t* val) const { *val = as_sint64(); }
+
+  // For enum types.
+  template <typename T,
+            typename = typename std::enable_if<std::is_enum<T>::value, T>::type>
+  void get(T* val) const {
+    *val = static_cast<T>(as_int32());
+  }
+
+  // Serializes the field back into a proto-encoded byte stream and appends it
+  // to |dst|. |dst| is resized accordingly.
+  void SerializeAndAppendTo(std::string* dst) const;
+
+  // Serializes the field back into a proto-encoded byte stream and appends it
+  // to |dst|. |dst| is resized accordingly.
+  void SerializeAndAppendTo(std::vector<uint8_t>* dst) const;
+
+ private:
+  template <typename Container>
+  void SerializeAndAppendToInternal(Container* dst) const;
+
+  // Fields are deliberately not initialized to keep the class trivially
+  // constructible. It makes a large perf difference for ProtoDecoder.
+
+  uint64_t int_value_;  // In kLengthDelimited this contains the data() addr.
+  uint32_t size_;       // Only valid when when type == kLengthDelimited.
+  uint16_t id_;         // Proto field ordinal.
+  uint8_t type_;        // proto_utils::ProtoWireType.
+};
+
+// The Field struct is used in a lot of perf-sensitive contexts.
+static_assert(sizeof(Field) == 16, "Field struct too big");
+
+}  // namespace protozero
+
+#endif  // INCLUDE_PERFETTO_PROTOZERO_FIELD_H_
+// gen_amalgamated begin header: include/perfetto/protozero/field_writer.h
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+#ifndef INCLUDE_PERFETTO_PROTOZERO_FIELD_WRITER_H_
+#define INCLUDE_PERFETTO_PROTOZERO_FIELD_WRITER_H_
+
+namespace protozero {
+namespace internal {
+
+template <proto_utils::ProtoSchemaType proto_schema_type>
+struct FieldWriter {
+  static_assert(proto_schema_type != proto_utils::ProtoSchemaType::kMessage,
+                "FieldWriter can't be used with nested messages");
+};
+
+template <>
+struct FieldWriter<proto_utils::ProtoSchemaType::kDouble> {
+  inline static void Append(Message& message, uint32_t field_id, double value) {
+    message.AppendFixed(field_id, value);
+  }
+};
+
+template <>
+struct FieldWriter<proto_utils::ProtoSchemaType::kFloat> {
+  inline static void Append(Message& message, uint32_t field_id, float value) {
+    message.AppendFixed(field_id, value);
+  }
+};
+
+template <>
+struct FieldWriter<proto_utils::ProtoSchemaType::kBool> {
+  inline static void Append(Message& message, uint32_t field_id, bool value) {
+    message.AppendTinyVarInt(field_id, value);
+  }
+};
+
+template <>
+struct FieldWriter<proto_utils::ProtoSchemaType::kInt32> {
+  inline static void Append(Message& message,
+                            uint32_t field_id,
+                            int32_t value) {
+    message.AppendVarInt(field_id, value);
+  }
+};
+
+template <>
+struct FieldWriter<proto_utils::ProtoSchemaType::kInt64> {
+  inline static void Append(Message& message,
+                            uint32_t field_id,
+                            int64_t value) {
+    message.AppendVarInt(field_id, value);
+  }
+};
+
+template <>
+struct FieldWriter<proto_utils::ProtoSchemaType::kUint32> {
+  inline static void Append(Message& message,
+                            uint32_t field_id,
+                            uint32_t value) {
+    message.AppendVarInt(field_id, value);
+  }
+};
+
+template <>
+struct FieldWriter<proto_utils::ProtoSchemaType::kUint64> {
+  inline static void Append(Message& message,
+                            uint32_t field_id,
+                            uint64_t value) {
+    message.AppendVarInt(field_id, value);
+  }
+};
+
+template <>
+struct FieldWriter<proto_utils::ProtoSchemaType::kSint32> {
+  inline static void Append(Message& message,
+                            uint32_t field_id,
+                            int32_t value) {
+    message.AppendSignedVarInt(field_id, value);
+  }
+};
+
+template <>
+struct FieldWriter<proto_utils::ProtoSchemaType::kSint64> {
+  inline static void Append(Message& message,
+                            uint32_t field_id,
+                            int64_t value) {
+    message.AppendSignedVarInt(field_id, value);
+  }
+};
+
+template <>
+struct FieldWriter<proto_utils::ProtoSchemaType::kFixed32> {
+  inline static void Append(Message& message,
+                            uint32_t field_id,
+                            uint32_t value) {
+    message.AppendFixed(field_id, value);
+  }
+};
+
+template <>
+struct FieldWriter<proto_utils::ProtoSchemaType::kFixed64> {
+  inline static void Append(Message& message,
+                            uint32_t field_id,
+                            uint64_t value) {
+    message.AppendFixed(field_id, value);
+  }
+};
+
+template <>
+struct FieldWriter<proto_utils::ProtoSchemaType::kSfixed32> {
+  inline static void Append(Message& message,
+                            uint32_t field_id,
+                            int32_t value) {
+    message.AppendFixed(field_id, value);
+  }
+};
+
+template <>
+struct FieldWriter<proto_utils::ProtoSchemaType::kSfixed64> {
+  inline static void Append(Message& message,
+                            uint32_t field_id,
+                            int64_t value) {
+    message.AppendFixed(field_id, value);
+  }
+};
+
+template <>
+struct FieldWriter<proto_utils::ProtoSchemaType::kEnum> {
+  template <typename EnumType>
+  inline static void Append(Message& message,
+                            uint32_t field_id,
+                            EnumType value) {
+    message.AppendVarInt(field_id, value);
+  }
+};
+
+template <>
+struct FieldWriter<proto_utils::ProtoSchemaType::kString> {
+  inline static void Append(Message& message,
+                            uint32_t field_id,
+                            const char* data,
+                            size_t size) {
+    message.AppendBytes(field_id, data, size);
+  }
+
+  inline static void Append(Message& message,
+                            uint32_t field_id,
+                            const std::string& value) {
+    message.AppendBytes(field_id, value.data(), value.size());
+  }
+};
+
+template <>
+struct FieldWriter<proto_utils::ProtoSchemaType::kBytes> {
+  inline static void Append(Message& message,
+                            uint32_t field_id,
+                            const uint8_t* data,
+                            size_t size) {
+    message.AppendBytes(field_id, data, size);
+  }
+
+  inline static void Append(Message& message,
+                            uint32_t field_id,
+                            const std::string& value) {
+    message.AppendBytes(field_id, value.data(), value.size());
+  }
+};
+
+}  // namespace internal
+}  // namespace protozero
+
+#endif  // INCLUDE_PERFETTO_PROTOZERO_FIELD_WRITER_H_
+// gen_amalgamated begin header: include/perfetto/protozero/message.h
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_PROTOZERO_MESSAGE_H_
+#define INCLUDE_PERFETTO_PROTOZERO_MESSAGE_H_
+
+#include <assert.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <string>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/contiguous_memory_range.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_stream_writer.h"
+
+namespace perfetto {
+namespace shm_fuzz {
+class FakeProducer;
+}  // namespace shm_fuzz
+}  // namespace perfetto
+
+namespace protozero {
+
+class MessageArena;
+class MessageHandleBase;
+
+// Base class extended by the proto C++ stubs generated by the ProtoZero
+// compiler. This class provides the minimal runtime required to support
+// append-only operations and is designed for performance. None of the methods
+// require any dynamic memory allocation, unless more than 16 nested messages
+// are created via BeginNestedMessage() calls.
+class PERFETTO_EXPORT Message {
+ public:
+  friend class MessageHandleBase;
+
+  // The ctor is deliberately a no-op to avoid forwarding args from all
+  // subclasses. The real initialization is performed by Reset().
+  // Nested messages are allocated via placement new by MessageArena and
+  // implictly destroyed when the RootMessage's arena goes away. This is
+  // fine as long as all the fields are PODs, which is checked by the
+  // static_assert()s in the Reset() method.
+  Message() = default;
+
+  // Clears up the state, allowing the message to be reused as a fresh one.
+  void Reset(ScatteredStreamWriter*, MessageArena*);
+
+  // Commits all the changes to the buffer (backfills the size field of this and
+  // all nested messages) and seals the message. Returns the size of the message
+  // (and all nested sub-messages), without taking into account any chunking.
+  // Finalize is idempotent and can be called several times w/o side effects.
+  uint32_t Finalize();
+
+  // Optional. If is_valid() == true, the corresponding memory region (its
+  // length == proto_utils::kMessageLengthFieldSize) is backfilled with the size
+  // of this message (minus |size_already_written| below). This is the mechanism
+  // used by messages to backfill their corresponding size field in the parent
+  // message.
+  uint8_t* size_field() const { return size_field_; }
+  void set_size_field(uint8_t* size_field) { size_field_ = size_field; }
+
+  // This is to deal with case of backfilling the size of a root (non-nested)
+  // message which is split into multiple chunks. Upon finalization only the
+  // partial size that lies in the last chunk has to be backfilled.
+  void inc_size_already_written(uint32_t sz) { size_already_written_ += sz; }
+
+  Message* nested_message() { return nested_message_; }
+
+  bool is_finalized() const { return finalized_; }
+
+#if PERFETTO_DCHECK_IS_ON()
+  void set_handle(MessageHandleBase* handle) { handle_ = handle; }
+#endif
+
+  // Proto types: uint64, uint32, int64, int32, bool, enum.
+  template <typename T>
+  void AppendVarInt(uint32_t field_id, T value) {
+    if (nested_message_)
+      EndNestedMessage();
+
+    uint8_t buffer[proto_utils::kMaxSimpleFieldEncodedSize];
+    uint8_t* pos = buffer;
+
+    pos = proto_utils::WriteVarInt(proto_utils::MakeTagVarInt(field_id), pos);
+    // WriteVarInt encodes signed values in two's complement form.
+    pos = proto_utils::WriteVarInt(value, pos);
+    WriteToStream(buffer, pos);
+  }
+
+  // Proto types: sint64, sint32.
+  template <typename T>
+  void AppendSignedVarInt(uint32_t field_id, T value) {
+    AppendVarInt(field_id, proto_utils::ZigZagEncode(value));
+  }
+
+  // Proto types: bool, enum (small).
+  // Faster version of AppendVarInt for tiny numbers.
+  void AppendTinyVarInt(uint32_t field_id, int32_t value) {
+    PERFETTO_DCHECK(0 <= value && value < 0x80);
+    if (nested_message_)
+      EndNestedMessage();
+
+    uint8_t buffer[proto_utils::kMaxSimpleFieldEncodedSize];
+    uint8_t* pos = buffer;
+    // MakeTagVarInt gets super optimized here for constexpr.
+    pos = proto_utils::WriteVarInt(proto_utils::MakeTagVarInt(field_id), pos);
+    *pos++ = static_cast<uint8_t>(value);
+    WriteToStream(buffer, pos);
+  }
+
+  // Proto types: fixed64, sfixed64, fixed32, sfixed32, double, float.
+  template <typename T>
+  void AppendFixed(uint32_t field_id, T value) {
+    if (nested_message_)
+      EndNestedMessage();
+
+    uint8_t buffer[proto_utils::kMaxSimpleFieldEncodedSize];
+    uint8_t* pos = buffer;
+
+    pos = proto_utils::WriteVarInt(proto_utils::MakeTagFixed<T>(field_id), pos);
+    memcpy(pos, &value, sizeof(T));
+    pos += sizeof(T);
+    // TODO: Optimize memcpy performance, see http://crbug.com/624311 .
+    WriteToStream(buffer, pos);
+  }
+
+  void AppendString(uint32_t field_id, const char* str);
+
+  void AppendString(uint32_t field_id, const std::string& str) {
+    AppendBytes(field_id, str.data(), str.size());
+  }
+
+  void AppendBytes(uint32_t field_id, const void* value, size_t size);
+
+  // Append raw bytes for a field, using the supplied |ranges| to
+  // copy from |num_ranges| individual buffers.
+  size_t AppendScatteredBytes(uint32_t field_id,
+                              ContiguousMemoryRange* ranges,
+                              size_t num_ranges);
+
+  // Begins a nested message. The returned object is owned by the MessageArena
+  // of the root message. The nested message ends either when Finalize() is
+  // called or when any other Append* method is called in the parent class.
+  // The template argument T is supposed to be a stub class auto generated from
+  // a .proto, hence a subclass of Message.
+  template <class T>
+  T* BeginNestedMessage(uint32_t field_id) {
+    // This is to prevent subclasses (which should be autogenerated, though), to
+    // introduce extra state fields (which wouldn't be initialized by Reset()).
+    static_assert(std::is_base_of<Message, T>::value,
+                  "T must be a subclass of Message");
+    static_assert(sizeof(T) == sizeof(Message),
+                  "Message subclasses cannot introduce extra state.");
+    return static_cast<T*>(BeginNestedMessageInternal(field_id));
+  }
+
+  // Gives read-only access to the underlying stream_writer. This is used only
+  // by few internals to query the state of the underlying buffer. It is almost
+  // always a bad idea to poke at the stream_writer() internals.
+  const ScatteredStreamWriter* stream_writer() const { return stream_writer_; }
+
+  // Appends some raw bytes to the message. The use-case for this is preserving
+  // unknown fields in the decode -> re-encode path of xxx.gen.cc classes
+  // generated by the cppgen_plugin.cc.
+  // The caller needs to guarantee that the appended data is properly
+  // proto-encoded and each field has a proto preamble.
+  void AppendRawProtoBytes(const void* data, size_t size) {
+    const uint8_t* src = reinterpret_cast<const uint8_t*>(data);
+    WriteToStream(src, src + size);
+  }
+
+ private:
+  Message(const Message&) = delete;
+  Message& operator=(const Message&) = delete;
+
+  Message* BeginNestedMessageInternal(uint32_t field_id);
+
+  // Called by Finalize and Append* methods.
+  void EndNestedMessage();
+
+  void WriteToStream(const uint8_t* src_begin, const uint8_t* src_end) {
+    PERFETTO_DCHECK(!finalized_);
+    PERFETTO_DCHECK(src_begin <= src_end);
+    const uint32_t size = static_cast<uint32_t>(src_end - src_begin);
+    stream_writer_->WriteBytes(src_begin, size);
+    size_ += size;
+  }
+
+  // Only POD fields are allowed. This class's dtor is never called.
+  // See the comment on the static_assert in the corresponding .cc file.
+
+  // The stream writer interface used for the serialization.
+  ScatteredStreamWriter* stream_writer_;
+
+  // The storage used to allocate nested Message objects.
+  // This is owned by RootMessage<T>.
+  MessageArena* arena_;
+
+  // Pointer to the last child message created through BeginNestedMessage(), if
+  // any, nullptr otherwise. There is no need to keep track of more than one
+  // message per nesting level as the proto-zero API contract mandates that
+  // nested fields can be filled only in a stacked fashion. In other words,
+  // nested messages are finalized and sealed when any other field is set in the
+  // parent message (or the parent message itself is finalized) and cannot be
+  // accessed anymore afterwards.
+  Message* nested_message_;
+
+  // [optional] Pointer to a non-aligned pre-reserved var-int slot of
+  // kMessageLengthFieldSize bytes. When set, the Finalize() method will write
+  // the size of proto-encoded message in the pointed memory region.
+  uint8_t* size_field_;
+
+  // Keeps track of the size of the current message.
+  uint32_t size_;
+
+  // See comment for inc_size_already_written().
+  uint32_t size_already_written_;
+
+  // When true, no more changes to the message are allowed. This is to DCHECK
+  // attempts of writing to a message which has been Finalize()-d.
+  bool finalized_;
+
+#if PERFETTO_DCHECK_IS_ON()
+  // Current generation of message. Incremented on Reset.
+  // Used to detect stale handles.
+  uint32_t generation_;
+
+  MessageHandleBase* handle_;
+#endif
+};
+
+}  // namespace protozero
+
+#endif  // INCLUDE_PERFETTO_PROTOZERO_MESSAGE_H_
+// gen_amalgamated begin header: include/perfetto/protozero/message_arena.h
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_PROTOZERO_MESSAGE_ARENA_H_
+#define INCLUDE_PERFETTO_PROTOZERO_MESSAGE_ARENA_H_
+
+#include <stdint.h>
+
+#include <list>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+
+namespace protozero {
+
+class Message;
+
+// Object allocator for fixed-sized protozero::Message objects.
+// It's a simple bump-pointer allocator which leverages the stack-alike
+// usage pattern of protozero nested messages. It avoids hitting the system
+// allocator in most cases, by reusing the same block, and falls back on
+// allocating new blocks only when using deeply nested messages (which are
+// extremely rare).
+// This is used by RootMessage<T> to handle the storage for root-level messages.
+class PERFETTO_EXPORT MessageArena {
+ public:
+  MessageArena();
+  ~MessageArena();
+
+  // Strictly no copies or moves as this is used to hand out pointers.
+  MessageArena(const MessageArena&) = delete;
+  MessageArena& operator=(const MessageArena&) = delete;
+  MessageArena(MessageArena&&) = delete;
+  MessageArena& operator=(MessageArena&&) = delete;
+
+  // Allocates a new Message object.
+  Message* NewMessage();
+
+  // Deletes the last message allocated. The |msg| argument is used only for
+  // DCHECKs, it MUST be the pointer obtained by the last NewMessage() call.
+  void DeleteLastMessage(Message* msg) {
+    PERFETTO_DCHECK(!blocks_.empty() && blocks_.back().entries > 0);
+    PERFETTO_DCHECK(&blocks_.back().storage[blocks_.back().entries - 1] ==
+                    static_cast<void*>(msg));
+    DeleteLastMessageInternal();
+  }
+
+  // Resets the state of the arena, clearing up all but one block. This is used
+  // to avoid leaking outstanding unfinished sub-messages while recycling the
+  // RootMessage object (this is extremely rare due to the RAII scoped handles
+  // but could happen if some client does some overly clever std::move() trick).
+  void Reset() {
+    PERFETTO_DCHECK(!blocks_.empty());
+    blocks_.resize(1);
+    auto& block = blocks_.back();
+    block.entries = 0;
+    PERFETTO_ASAN_POISON(block.storage, sizeof(block.storage));
+  }
+
+ private:
+  void DeleteLastMessageInternal();
+
+  struct Block {
+    static constexpr size_t kCapacity = 16;
+
+    Block() { PERFETTO_ASAN_POISON(storage, sizeof(storage)); }
+
+    std::aligned_storage<sizeof(Message), alignof(Message)>::type
+        storage[kCapacity];
+    uint32_t entries = 0;  // # Message entries used (<= kCapacity).
+  };
+
+  // blocks are used to hand out pointers and must not be moved. Hence why
+  // std::list rather than std::vector.
+  std::list<Block> blocks_;
+};
+
+}  // namespace protozero
+
+#endif  // INCLUDE_PERFETTO_PROTOZERO_MESSAGE_ARENA_H_
+// gen_amalgamated begin header: include/perfetto/protozero/message_handle.h
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_PROTOZERO_MESSAGE_HANDLE_H_
+#define INCLUDE_PERFETTO_PROTOZERO_MESSAGE_HANDLE_H_
+
+#include <functional>
+
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+
+namespace protozero {
+
+class Message;
+
+// MessageHandle allows to decouple the lifetime of a proto message
+// from the underlying storage. It gives the following guarantees:
+// - The underlying message is finalized (if still alive) if the handle goes
+//   out of scope.
+// - In Debug / DCHECK_ALWAYS_ON builds, the handle becomes null once the
+//   message is finalized. This is to enforce the append-only API. For instance
+//   when adding two repeated messages, the addition of the 2nd one forces
+//   the finalization of the first.
+// Think about this as a WeakPtr<Message> which calls
+// Message::Finalize() when going out of scope.
+
+class PERFETTO_EXPORT MessageHandleBase {
+ public:
+  ~MessageHandleBase();
+
+  // Move-only type.
+  MessageHandleBase(MessageHandleBase&&) noexcept;
+  MessageHandleBase& operator=(MessageHandleBase&&);
+  explicit operator bool() const {
+#if PERFETTO_DCHECK_IS_ON()
+    PERFETTO_DCHECK(!message_ || generation_ == message_->generation_);
+#endif
+    return !!message_;
+  }
+
+ protected:
+  explicit MessageHandleBase(Message* = nullptr);
+  Message* operator->() const {
+#if PERFETTO_DCHECK_IS_ON()
+    PERFETTO_DCHECK(!message_ || generation_ == message_->generation_);
+#endif
+    return message_;
+  }
+  Message& operator*() const { return *(operator->()); }
+
+ private:
+  friend class Message;
+  MessageHandleBase(const MessageHandleBase&) = delete;
+  MessageHandleBase& operator=(const MessageHandleBase&) = delete;
+
+  void reset_message() {
+    // This is called by Message::Finalize().
+    PERFETTO_DCHECK(message_->is_finalized());
+    message_ = nullptr;
+  }
+
+  void Move(MessageHandleBase&&);
+
+  void FinalizeMessage() { message_->Finalize(); }
+
+  Message* message_;
+#if PERFETTO_DCHECK_IS_ON()
+  uint32_t generation_;
+#endif
+};
+
+template <typename T>
+class MessageHandle : public MessageHandleBase {
+ public:
+  MessageHandle() : MessageHandle(nullptr) {}
+  explicit MessageHandle(T* message) : MessageHandleBase(message) {}
+
+  explicit operator bool() const { return MessageHandleBase::operator bool(); }
+
+  T& operator*() const {
+    return static_cast<T&>(MessageHandleBase::operator*());
+  }
+
+  T* operator->() const {
+    return static_cast<T*>(MessageHandleBase::operator->());
+  }
+
+  T* get() const { return static_cast<T*>(MessageHandleBase::operator->()); }
+};
+
+}  // namespace protozero
+
+#endif  // INCLUDE_PERFETTO_PROTOZERO_MESSAGE_HANDLE_H_
+// gen_amalgamated begin header: include/perfetto/protozero/packed_repeated_fields.h
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_PROTOZERO_PACKED_REPEATED_FIELDS_H_
+#define INCLUDE_PERFETTO_PROTOZERO_PACKED_REPEATED_FIELDS_H_
+
+#include <stdint.h>
+
+#include <array>
+#include <memory>
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace protozero {
+
+// This file contains classes used when encoding packed repeated fields.
+// To encode such a field, the caller is first expected to accumulate all of the
+// values in one of the following types (depending on the wire type of the
+// individual elements), defined below:
+// * protozero::PackedVarInt
+// * protozero::PackedFixedSizeInt</*element_type=*/ uint32_t>
+// Then that buffer is passed to the protozero-generated setters as an argument.
+// After calling the setter, the buffer can be destroyed.
+//
+// An example of encoding a packed field:
+//   protozero::HeapBuffered<protozero::Message> msg;
+//   protozero::PackedVarInt buf;
+//   buf.Append(42);
+//   buf.Append(-1);
+//   msg->set_fieldname(buf);
+//   msg.SerializeAsString();
+
+class PackedBufferBase {
+ public:
+  PackedBufferBase() { Reset(); }
+
+  // Copy or move is disabled due to pointers to stack addresses.
+  PackedBufferBase(const PackedBufferBase&) = delete;
+  PackedBufferBase(PackedBufferBase&&) = delete;
+  PackedBufferBase& operator=(const PackedBufferBase&) = delete;
+  PackedBufferBase& operator=(PackedBufferBase&&) = delete;
+
+  void Reset();
+
+  const uint8_t* data() const { return storage_begin_; }
+
+  size_t size() const {
+    return static_cast<size_t>(write_ptr_ - storage_begin_);
+  }
+
+ protected:
+  void GrowIfNeeded() {
+    PERFETTO_DCHECK(write_ptr_ >= storage_begin_ && write_ptr_ <= storage_end_);
+    if (PERFETTO_UNLIKELY(write_ptr_ + kMaxElementSize > storage_end_)) {
+      GrowSlowpath();
+    }
+  }
+
+  void GrowSlowpath();
+
+  // max(uint64_t varint encoding, biggest fixed type (uint64)).
+  static constexpr size_t kMaxElementSize = 10;
+
+  // So sizeof(this) == 8k.
+  static constexpr size_t kOnStackStorageSize = 8192 - 32;
+
+  uint8_t* storage_begin_;
+  uint8_t* storage_end_;
+  uint8_t* write_ptr_;
+  std::unique_ptr<uint8_t[]> heap_buf_;
+  alignas(uint64_t) uint8_t stack_buf_[kOnStackStorageSize];
+};
+
+class PackedVarInt : public PackedBufferBase {
+ public:
+  template <typename T>
+  void Append(T value) {
+    GrowIfNeeded();
+    write_ptr_ = proto_utils::WriteVarInt(value, write_ptr_);
+  }
+};
+
+template <typename T /* e.g. uint32_t for Fixed32 */>
+class PackedFixedSizeInt : public PackedBufferBase {
+ public:
+  void Append(T value) {
+    static_assert(sizeof(T) == 4 || sizeof(T) == 8,
+                  "PackedFixedSizeInt should be used only with 32/64-bit ints");
+    static_assert(sizeof(T) <= kMaxElementSize,
+                  "kMaxElementSize needs to be updated");
+    GrowIfNeeded();
+    PERFETTO_DCHECK(reinterpret_cast<size_t>(write_ptr_) % alignof(T) == 0);
+    memcpy(reinterpret_cast<T*>(write_ptr_), &value, sizeof(T));
+    write_ptr_ += sizeof(T);
+  }
+};
+
+}  // namespace protozero
+
+#endif  // INCLUDE_PERFETTO_PROTOZERO_PACKED_REPEATED_FIELDS_H_
+// gen_amalgamated begin header: include/perfetto/protozero/proto_decoder.h
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_PROTOZERO_PROTO_DECODER_H_
+#define INCLUDE_PERFETTO_PROTOZERO_PROTO_DECODER_H_
+
+#include <stdint.h>
+#include <array>
+#include <memory>
+#include <vector>
+
+// gen_amalgamated expanded: #include "perfetto/base/compiler.h"
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/field.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/proto_utils.h"
+
+namespace protozero {
+
+// A generic protobuf decoder. Doesn't require any knowledge about the proto
+// schema. It tokenizes fields, retrieves their ID and type and exposes
+// accessors to retrieve its values.
+// It does NOT recurse in nested submessages, instead it just computes their
+// boundaries, recursion is left to the caller.
+// This class is designed to be used in perf-sensitive contexts. It does not
+// allocate and does not perform any proto semantic checks (e.g. repeated /
+// required / optional). It's supposedly safe wrt out-of-bounds memory accesses
+// (see proto_decoder_fuzzer.cc).
+// This class serves also as a building block for TypedProtoDecoder, used when
+// the schema is known at compile time.
+class PERFETTO_EXPORT ProtoDecoder {
+ public:
+  // Creates a ProtoDecoder using the given |buffer| with size |length| bytes.
+  ProtoDecoder(const void* buffer, size_t length)
+      : begin_(reinterpret_cast<const uint8_t*>(buffer)),
+        end_(begin_ + length),
+        read_ptr_(begin_) {}
+  ProtoDecoder(const std::string& str) : ProtoDecoder(str.data(), str.size()) {}
+  ProtoDecoder(const ConstBytes& cb) : ProtoDecoder(cb.data, cb.size) {}
+
+  // Reads the next field from the buffer and advances the read cursor. If a
+  // full field cannot be read, the returned Field will be invalid (i.e.
+  // field.valid() == false).
+  Field ReadField();
+
+  // Finds the first field with the given id. Doesn't affect the read cursor.
+  Field FindField(uint32_t field_id);
+
+  // Resets the read cursor to the start of the buffer.
+  void Reset() { read_ptr_ = begin_; }
+
+  // Resets the read cursor to the given position (must be within the buffer).
+  void Reset(const uint8_t* pos) {
+    PERFETTO_DCHECK(pos >= begin_ && pos < end_);
+    read_ptr_ = pos;
+  }
+
+  // Returns the position of read cursor, relative to the start of the buffer.
+  size_t read_offset() const { return static_cast<size_t>(read_ptr_ - begin_); }
+
+  size_t bytes_left() const {
+    PERFETTO_DCHECK(read_ptr_ <= end_);
+    return static_cast<size_t>(end_ - read_ptr_);
+  }
+
+  const uint8_t* begin() const { return begin_; }
+  const uint8_t* end() const { return end_; }
+
+ protected:
+  const uint8_t* const begin_;
+  const uint8_t* const end_;
+  const uint8_t* read_ptr_ = nullptr;
+};
+
+// An iterator-like class used to iterate through repeated fields. Used by
+// TypedProtoDecoder. The iteration sequence is a bit counter-intuitive due to
+// the fact that fields_[field_id] holds the *last* value of the field, not the
+// first, but the remaining storage holds repeated fields in FIFO order.
+// Assume that we push the 10,11,12 into a repeated field with ID=1.
+//
+// Decoder memory layout:  [  fields storage  ] [ repeated fields storage ]
+// 1st iteration:           10
+// 2nd iteration:           11                   10
+// 3rd iteration:           12                   10 11
+//
+// We start the iteration @ fields_[num_fields], which is the start of the
+// repeated fields storage, proceed until the end and lastly jump @ fields_[id].
+template <typename T>
+class RepeatedFieldIterator {
+ public:
+  RepeatedFieldIterator(uint32_t field_id,
+                        const Field* begin,
+                        const Field* end,
+                        const Field* last)
+      : field_id_(field_id), iter_(begin), end_(end), last_(last) {
+    FindNextMatchingId();
+  }
+
+  // Constructs an invalid iterator.
+  RepeatedFieldIterator()
+      : field_id_(0u), iter_(nullptr), end_(nullptr), last_(nullptr) {}
+
+  explicit operator bool() const { return iter_ != end_; }
+  const Field& field() const { return *iter_; }
+
+  T operator*() const {
+    T val{};
+    iter_->get(&val);
+    return val;
+  }
+  const Field* operator->() const { return iter_; }
+
+  RepeatedFieldIterator& operator++() {
+    PERFETTO_DCHECK(iter_ != end_);
+    if (iter_ == last_) {
+      iter_ = end_;
+      return *this;
+    }
+    ++iter_;
+    FindNextMatchingId();
+    return *this;
+  }
+
+  RepeatedFieldIterator operator++(int) {
+    PERFETTO_DCHECK(iter_ != end_);
+    RepeatedFieldIterator it(*this);
+    ++(*this);
+    return it;
+  }
+
+ private:
+  void FindNextMatchingId() {
+    PERFETTO_DCHECK(iter_ != last_);
+    for (; iter_ != end_; ++iter_) {
+      if (iter_->id() == field_id_)
+        return;
+    }
+    iter_ = last_->valid() ? last_ : end_;
+  }
+
+  uint32_t field_id_;
+
+  // Initially points to the beginning of the repeated field storage, then is
+  // incremented as we call operator++().
+  const Field* iter_;
+
+  // Always points to fields_[size_], i.e. past the end of the storage.
+  const Field* end_;
+
+  // Always points to fields_[field_id].
+  const Field* last_;
+};
+
+// As RepeatedFieldIterator, but allows iterating over a packed repeated field
+// (which will be initially stored as a single length-delimited field).
+// See |GetPackedRepeatedField| for details.
+//
+// Assumes little endianness, and that the input buffers are well formed -
+// containing an exact multiple of encoded elements.
+template <proto_utils::ProtoWireType wire_type, typename CppType>
+class PackedRepeatedFieldIterator {
+ public:
+  PackedRepeatedFieldIterator(const uint8_t* data_begin,
+                              size_t size,
+                              bool* parse_error_ptr)
+      : data_end_(data_begin ? data_begin + size : nullptr),
+        read_ptr_(data_begin),
+        parse_error_(parse_error_ptr) {
+    using proto_utils::ProtoWireType;
+    static_assert(wire_type == ProtoWireType::kVarInt ||
+                      wire_type == ProtoWireType::kFixed32 ||
+                      wire_type == ProtoWireType::kFixed64,
+                  "invalid type");
+
+    PERFETTO_DCHECK(parse_error_ptr);
+
+    // Either the field is unset (and there are no data pointer), or the field
+    // is set with a zero length payload. Mark the iterator as invalid in both
+    // cases.
+    if (size == 0) {
+      curr_value_valid_ = false;
+      return;
+    }
+
+    if ((wire_type == ProtoWireType::kFixed32 && (size % 4) != 0) ||
+        (wire_type == ProtoWireType::kFixed64 && (size % 8) != 0)) {
+      *parse_error_ = true;
+      curr_value_valid_ = false;
+      return;
+    }
+
+    ++(*this);
+  }
+
+  const CppType operator*() const { return curr_value_; }
+  explicit operator bool() const { return curr_value_valid_; }
+
+  PackedRepeatedFieldIterator& operator++() {
+    using proto_utils::ProtoWireType;
+
+    if (PERFETTO_UNLIKELY(!curr_value_valid_))
+      return *this;
+
+    if (PERFETTO_UNLIKELY(read_ptr_ == data_end_)) {
+      curr_value_valid_ = false;
+      return *this;
+    }
+
+    if (wire_type == ProtoWireType::kVarInt) {
+      uint64_t new_value = 0;
+      const uint8_t* new_pos =
+          proto_utils::ParseVarInt(read_ptr_, data_end_, &new_value);
+
+      if (PERFETTO_UNLIKELY(new_pos == read_ptr_)) {
+        // Failed to decode the varint (probably incomplete buffer).
+        *parse_error_ = true;
+        curr_value_valid_ = false;
+      } else {
+        read_ptr_ = new_pos;
+        curr_value_ = static_cast<CppType>(new_value);
+      }
+    } else {  // kFixed32 or kFixed64
+      constexpr size_t kStep = wire_type == ProtoWireType::kFixed32 ? 4 : 8;
+
+      // NB: the raw buffer is not guaranteed to be aligned, so neither are
+      // these copies.
+      memcpy(&curr_value_, read_ptr_, sizeof(CppType));
+      read_ptr_ += kStep;
+    }
+
+    return *this;
+  }
+
+  PackedRepeatedFieldIterator operator++(int) {
+    PackedRepeatedFieldIterator it(*this);
+    ++(*this);
+    return it;
+  }
+
+ private:
+  // Might be null if the backing proto field isn't set.
+  const uint8_t* const data_end_;
+
+  // The iterator looks ahead by an element, so |curr_value| holds the value
+  // to be returned when the caller dereferences the iterator, and |read_ptr_|
+  // points at the start of the next element to be decoded.
+  // |read_ptr_| might be null if the backing proto field isn't set.
+  const uint8_t* read_ptr_;
+  CppType curr_value_ = 0;
+
+  // Set to false once we've exhausted the iterator, or encountered an error.
+  bool curr_value_valid_ = true;
+
+  // Where to set parsing errors, supplied by the caller.
+  bool* const parse_error_;
+};
+
+// This decoder loads all fields upfront, without recursing in nested messages.
+// It is used as a base class for typed decoders generated by the pbzero plugin.
+// The split between TypedProtoDecoderBase and TypedProtoDecoder<> is to have
+// unique definition of functions like ParseAllFields() and ExpandHeapStorage().
+// The storage (either on-stack or on-heap) for this class is organized as
+// follows:
+// |-------------------------- fields_ ----------------------|
+// [ field 0 (invalid) ] [ fields 1 .. N ] [ repeated fields ]
+//                                        ^                  ^
+//                                        num_fields_        size_
+class PERFETTO_EXPORT TypedProtoDecoderBase : public ProtoDecoder {
+ public:
+  // If the field |id| is known at compile time, prefer the templated
+  // specialization at<kFieldNumber>().
+  const Field& Get(uint32_t id) const {
+    return PERFETTO_LIKELY(id < num_fields_) ? fields_[id] : fields_[0];
+  }
+
+  // Returns an object that allows to iterate over all instances of a repeated
+  // field given its id. Example usage:
+  //   for (auto it = decoder.GetRepeated<int32_t>(N); it; ++it) { ... }
+  template <typename T>
+  RepeatedFieldIterator<T> GetRepeated(uint32_t field_id) const {
+    return RepeatedFieldIterator<T>(field_id, &fields_[num_fields_],
+                                    &fields_[size_], &fields_[field_id]);
+  }
+
+  // Returns an objects that allows to iterate over all entries of a packed
+  // repeated field given its id and type. The |wire_type| is necessary for
+  // decoding the packed field, the |cpp_type| is for convenience & stronger
+  // typing.
+  //
+  // The caller must also supply a pointer to a bool that is set to true if the
+  // packed buffer is found to be malformed while iterating (so you need to
+  // exhaust the iterator if you want to check the full extent of the buffer).
+  //
+  // Note that unlike standard protobuf parsers, protozero does not allow
+  // treating of packed repeated fields as non-packed and vice-versa (therefore
+  // not making the packed option forwards and backwards compatible). So
+  // the caller needs to use the right accessor for correct results.
+  template <proto_utils::ProtoWireType wire_type, typename cpp_type>
+  PackedRepeatedFieldIterator<wire_type, cpp_type> GetPackedRepeated(
+      uint32_t field_id,
+      bool* parse_error_location) const {
+    const Field& field = Get(field_id);
+    if (field.valid()) {
+      return PackedRepeatedFieldIterator<wire_type, cpp_type>(
+          field.data(), field.size(), parse_error_location);
+    } else {
+      return PackedRepeatedFieldIterator<wire_type, cpp_type>(
+          nullptr, 0, parse_error_location);
+    }
+  }
+
+ protected:
+  TypedProtoDecoderBase(Field* storage,
+                        uint32_t num_fields,
+                        uint32_t capacity,
+                        const uint8_t* buffer,
+                        size_t length)
+      : ProtoDecoder(buffer, length),
+        fields_(storage),
+        num_fields_(num_fields),
+        size_(num_fields),
+        capacity_(capacity) {
+    // The reason why Field needs to be trivially de/constructible is to avoid
+    // implicit initializers on all the ~1000 entries. We need it to initialize
+    // only on the first |max_field_id| fields, the remaining capacity doesn't
+    // require initialization.
+    static_assert(std::is_trivially_constructible<Field>::value &&
+                      std::is_trivially_destructible<Field>::value &&
+                      std::is_trivial<Field>::value,
+                  "Field must be a trivial aggregate type");
+    memset(fields_, 0, sizeof(Field) * num_fields_);
+  }
+
+  void ParseAllFields();
+
+  // Called when the default on-stack storage is exhausted and new repeated
+  // fields need to be pushed.
+  void ExpandHeapStorage();
+
+  // Used only in presence of a large number of repeated fields, when the
+  // default on-stack storage is exhausted.
+  std::unique_ptr<Field[]> heap_storage_;
+
+  // Points to the storage, either on-stack (default, provided by the template
+  // specialization) or |heap_storage_| after ExpandHeapStorage() is called, in
+  // case of a large number of repeated fields.
+  Field* fields_;
+
+  // Number of fields without accounting repeated storage. This is equal to
+  // MAX_FIELD_ID + 1 (to account for the invalid 0th field).
+  // This value is always <= size_ (and hence <= capacity);
+  uint32_t num_fields_;
+
+  // Number of active |fields_| entries. This is initially equal to the highest
+  // number of fields for the message (num_fields_ == MAX_FIELD_ID + 1) and can
+  // grow up to |capacity_| in the case of repeated fields.
+  uint32_t size_;
+
+  // Initially equal to kFieldsCapacity of the TypedProtoDecoder
+  // specialization. Can grow when falling back on heap-based storage, in which
+  // case it represents the size (#fields with each entry of a repeated field
+  // counted individually) of the |heap_storage_| array.
+  uint32_t capacity_;
+};
+
+// Template class instantiated by the auto-generated decoder classes declared in
+// xxx.pbzero.h files.
+template <int MAX_FIELD_ID, bool HAS_NONPACKED_REPEATED_FIELDS>
+class TypedProtoDecoder : public TypedProtoDecoderBase {
+ public:
+  TypedProtoDecoder(const uint8_t* buffer, size_t length)
+      : TypedProtoDecoderBase(on_stack_storage_,
+                              /*num_fields=*/MAX_FIELD_ID + 1,
+                              kCapacity,
+                              buffer,
+                              length) {
+    static_assert(MAX_FIELD_ID <= kMaxDecoderFieldId, "Field ordinal too high");
+    TypedProtoDecoderBase::ParseAllFields();
+  }
+
+  template <uint32_t FIELD_ID>
+  const Field& at() const {
+    static_assert(FIELD_ID <= MAX_FIELD_ID, "FIELD_ID > MAX_FIELD_ID");
+    return fields_[FIELD_ID];
+  }
+
+  TypedProtoDecoder(TypedProtoDecoder&& other) noexcept
+      : TypedProtoDecoderBase(std::move(other)) {
+    // If the moved-from decoder was using on-stack storage, we need to update
+    // our pointer to point to this decoder's on-stack storage.
+    if (fields_ == other.on_stack_storage_) {
+      fields_ = on_stack_storage_;
+      memcpy(on_stack_storage_, other.on_stack_storage_,
+             sizeof(on_stack_storage_));
+    }
+  }
+
+ private:
+  // In the case of non-repeated fields, this constant defines the highest field
+  // id we are able to decode. This is to limit the on-stack storage.
+  // In the case of repeated fields, this constant defines the max number of
+  // repeated fields that we'll be able to store before falling back on the
+  // heap. Keep this value in sync with the one in protozero_generator.cc.
+  static constexpr size_t kMaxDecoderFieldId = 999;
+
+  // If we the message has no repeated fields we need at most N Field entries
+  // in the on-stack storage, where N is the highest field id.
+  // Otherwise we need some room to store repeated fields.
+  static constexpr size_t kCapacity =
+      1 + (HAS_NONPACKED_REPEATED_FIELDS ? kMaxDecoderFieldId : MAX_FIELD_ID);
+
+  Field on_stack_storage_[kCapacity];
+};
+
+}  // namespace protozero
+
+#endif  // INCLUDE_PERFETTO_PROTOZERO_PROTO_DECODER_H_
+// gen_amalgamated begin header: include/perfetto/protozero/proto_utils.h
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_PROTOZERO_PROTO_UTILS_H_
+#define INCLUDE_PERFETTO_PROTOZERO_PROTO_UTILS_H_
+
+#include <inttypes.h>
+#include <stddef.h>
+
+#include <type_traits>
+
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+
+namespace protozero {
+namespace proto_utils {
+
+// See https://developers.google.com/protocol-buffers/docs/encoding wire types.
+// This is a type encoded into the proto that provides just enough info to
+// find the length of the following value.
+enum class ProtoWireType : uint32_t {
+  kVarInt = 0,
+  kFixed64 = 1,
+  kLengthDelimited = 2,
+  kFixed32 = 5,
+};
+
+// This is the type defined in the proto for each field. This information
+// is used to decide the translation strategy when writing the trace.
+enum class ProtoSchemaType {
+  kUnknown = 0,
+  kDouble,
+  kFloat,
+  kInt64,
+  kUint64,
+  kInt32,
+  kFixed64,
+  kFixed32,
+  kBool,
+  kString,
+  kGroup,  // Deprecated (proto2 only)
+  kMessage,
+  kBytes,
+  kUint32,
+  kEnum,
+  kSfixed32,
+  kSfixed64,
+  kSint32,
+  kSint64,
+};
+
+inline const char* ProtoSchemaToString(ProtoSchemaType v) {
+  switch (v) {
+    case ProtoSchemaType::kUnknown:
+      return "unknown";
+    case ProtoSchemaType::kDouble:
+      return "double";
+    case ProtoSchemaType::kFloat:
+      return "float";
+    case ProtoSchemaType::kInt64:
+      return "int64";
+    case ProtoSchemaType::kUint64:
+      return "uint64";
+    case ProtoSchemaType::kInt32:
+      return "int32";
+    case ProtoSchemaType::kFixed64:
+      return "fixed64";
+    case ProtoSchemaType::kFixed32:
+      return "fixed32";
+    case ProtoSchemaType::kBool:
+      return "bool";
+    case ProtoSchemaType::kString:
+      return "string";
+    case ProtoSchemaType::kGroup:
+      return "group";
+    case ProtoSchemaType::kMessage:
+      return "message";
+    case ProtoSchemaType::kBytes:
+      return "bytes";
+    case ProtoSchemaType::kUint32:
+      return "uint32";
+    case ProtoSchemaType::kEnum:
+      return "enum";
+    case ProtoSchemaType::kSfixed32:
+      return "sfixed32";
+    case ProtoSchemaType::kSfixed64:
+      return "sfixed64";
+    case ProtoSchemaType::kSint32:
+      return "sint32";
+    case ProtoSchemaType::kSint64:
+      return "sint64";
+  }
+  // For gcc:
+  PERFETTO_DCHECK(false);
+  return "";
+}
+
+// Maximum message size supported: 256 MiB (4 x 7-bit due to varint encoding).
+constexpr size_t kMessageLengthFieldSize = 4;
+constexpr size_t kMaxMessageLength = (1u << (kMessageLengthFieldSize * 7)) - 1;
+
+// Field tag is encoded as 32-bit varint (5 bytes at most).
+// Largest value of simple (not length-delimited) field is 64-bit varint
+// (10 bytes at most). 15 bytes buffer is enough to store a simple field.
+constexpr size_t kMaxTagEncodedSize = 5;
+constexpr size_t kMaxSimpleFieldEncodedSize = kMaxTagEncodedSize + 10;
+
+// Proto types: (int|uint|sint)(32|64), bool, enum.
+constexpr uint32_t MakeTagVarInt(uint32_t field_id) {
+  return (field_id << 3) | static_cast<uint32_t>(ProtoWireType::kVarInt);
+}
+
+// Proto types: fixed64, sfixed64, fixed32, sfixed32, double, float.
+template <typename T>
+constexpr uint32_t MakeTagFixed(uint32_t field_id) {
+  static_assert(sizeof(T) == 8 || sizeof(T) == 4, "Value must be 4 or 8 bytes");
+  return (field_id << 3) |
+         static_cast<uint32_t>((sizeof(T) == 8 ? ProtoWireType::kFixed64
+                                               : ProtoWireType::kFixed32));
+}
+
+// Proto types: string, bytes, embedded messages.
+constexpr uint32_t MakeTagLengthDelimited(uint32_t field_id) {
+  return (field_id << 3) |
+         static_cast<uint32_t>(ProtoWireType::kLengthDelimited);
+}
+
+// Proto types: sint64, sint32.
+template <typename T>
+inline typename std::make_unsigned<T>::type ZigZagEncode(T value) {
+  using UnsignedType = typename std::make_unsigned<T>::type;
+
+  // Right-shift of negative values is implementation specific.
+  // Assert the implementation does what we expect, which is that shifting any
+  // positive value by sizeof(T) * 8 - 1 gives an all 0 bitmap, and a negative
+  // value gives and all 1 bitmap.
+  constexpr uint64_t kUnsignedZero = 0u;
+  constexpr int64_t kNegativeOne = -1;
+  constexpr int64_t kPositiveOne = 1;
+  static_assert(static_cast<uint64_t>(kNegativeOne >> 63) == ~kUnsignedZero,
+                "implementation does not support assumed rightshift");
+  static_assert(static_cast<uint64_t>(kPositiveOne >> 63) == kUnsignedZero,
+                "implementation does not support assumed rightshift");
+
+  return (static_cast<UnsignedType>(value) << 1) ^
+         static_cast<UnsignedType>(value >> (sizeof(T) * 8 - 1));
+}
+
+// Proto types: sint64, sint32.
+template <typename T>
+inline typename std::make_signed<T>::type ZigZagDecode(T value) {
+  using UnsignedType = typename std::make_unsigned<T>::type;
+  using SignedType = typename std::make_signed<T>::type;
+  auto u_value = static_cast<UnsignedType>(value);
+  auto mask = static_cast<UnsignedType>(-static_cast<SignedType>(u_value & 1));
+  return static_cast<SignedType>((u_value >> 1) ^ mask);
+}
+
+template <typename T>
+inline uint8_t* WriteVarInt(T value, uint8_t* target) {
+  // If value is <= 0 we must first sign extend to int64_t (see [1]).
+  // Finally we always cast to an unsigned value to to avoid arithmetic
+  // (sign expanding) shifts in the while loop.
+  // [1]: "If you use int32 or int64 as the type for a negative number, the
+  // resulting varint is always ten bytes long".
+  // - developers.google.com/protocol-buffers/docs/encoding
+  // So for each input type we do the following casts:
+  // uintX_t -> uintX_t -> uintX_t
+  // int8_t  -> int64_t -> uint64_t
+  // int16_t -> int64_t -> uint64_t
+  // int32_t -> int64_t -> uint64_t
+  // int64_t -> int64_t -> uint64_t
+  using MaybeExtendedType =
+      typename std::conditional<std::is_unsigned<T>::value, T, int64_t>::type;
+  using UnsignedType = typename std::make_unsigned<MaybeExtendedType>::type;
+
+  MaybeExtendedType extended_value = static_cast<MaybeExtendedType>(value);
+  UnsignedType unsigned_value = static_cast<UnsignedType>(extended_value);
+
+  while (unsigned_value >= 0x80) {
+    *target++ = static_cast<uint8_t>(unsigned_value) | 0x80;
+    unsigned_value >>= 7;
+  }
+  *target = static_cast<uint8_t>(unsigned_value);
+  return target + 1;
+}
+
+// Writes a fixed-size redundant encoding of the given |value|. This is
+// used to backfill fixed-size reservations for the length field using a
+// non-canonical varint encoding (e.g. \x81\x80\x80\x00 instead of \x01).
+// See https://github.com/google/protobuf/issues/1530.
+// This is used mainly in two cases:
+// 1) At trace writing time, when starting a nested messages. The size of a
+//    nested message is not known until all its field have been written.
+//    |kMessageLengthFieldSize| bytes are reserved to encode the size field and
+//    backfilled at the end.
+// 2) When rewriting a message at trace filtering time, in protozero/filtering.
+//    At that point we know only the upper bound of the length (a filtered
+//    message is <= the original one) and we backfill after the message has been
+//    filtered.
+inline void WriteRedundantVarInt(uint32_t value,
+                                 uint8_t* buf,
+                                 size_t size = kMessageLengthFieldSize) {
+  for (size_t i = 0; i < size; ++i) {
+    const uint8_t msb = (i < size - 1) ? 0x80 : 0;
+    buf[i] = static_cast<uint8_t>(value) | msb;
+    value >>= 7;
+  }
+}
+
+template <uint32_t field_id>
+void StaticAssertSingleBytePreamble() {
+  static_assert(field_id < 16,
+                "Proto field id too big to fit in a single byte preamble");
+}
+
+// Parses a VarInt from the encoded buffer [start, end). |end| is STL-style and
+// points one byte past the end of buffer.
+// The parsed int value is stored in the output arg |value|. Returns a pointer
+// to the next unconsumed byte (so start < retval <= end) or |start| if the
+// VarInt could not be fully parsed because there was not enough space in the
+// buffer.
+inline const uint8_t* ParseVarInt(const uint8_t* start,
+                                  const uint8_t* end,
+                                  uint64_t* out_value) {
+  const uint8_t* pos = start;
+  uint64_t value = 0;
+  for (uint32_t shift = 0; pos < end && shift < 64u; shift += 7) {
+    // Cache *pos into |cur_byte| to prevent that the compiler dereferences the
+    // pointer twice (here and in the if() below) due to char* aliasing rules.
+    uint8_t cur_byte = *pos++;
+    value |= static_cast<uint64_t>(cur_byte & 0x7f) << shift;
+    if ((cur_byte & 0x80) == 0) {
+      // In valid cases we get here.
+      *out_value = value;
+      return pos;
+    }
+  }
+  *out_value = 0;
+  return start;
+}
+
+enum class RepetitionType {
+  kNotRepeated,
+  kRepeatedPacked,
+  kRepeatedNotPacked,
+};
+
+// Provide a common base struct for all templated FieldMetadata types to allow
+// simple checks if a given type is a FieldMetadata or not.
+struct FieldMetadataBase {
+  constexpr FieldMetadataBase() = default;
+};
+
+template <uint32_t field_id,
+          RepetitionType repetition_type,
+          ProtoSchemaType proto_schema_type,
+          typename CppFieldType,
+          typename MessageType>
+struct FieldMetadata : public FieldMetadataBase {
+  constexpr FieldMetadata() = default;
+
+  static constexpr int kFieldId = field_id;
+  // Whether this field is repeated, packed (repeated [packed-true]) or not
+  // (optional).
+  static constexpr RepetitionType kRepetitionType = repetition_type;
+  // Proto type of this field (e.g. int64, fixed32 or nested message).
+  static constexpr ProtoSchemaType kProtoFieldType = proto_schema_type;
+  // C++ type of this field (for nested messages - C++ protozero class).
+  using cpp_field_type = CppFieldType;
+  // Protozero message which this field belongs to.
+  using message_type = MessageType;
+};
+
+namespace internal {
+
+// Ideally we would create variables of FieldMetadata<...> type directly,
+// but before C++17's support for constexpr inline variables arrive, we have to
+// actually use pointers to inline functions instead to avoid having to define
+// symbols in *.pbzero.cc files.
+//
+// Note: protozero bindings will generate Message::kFieldName variable and which
+// can then be passed to TRACE_EVENT macro for inline writing of typed messages.
+// The fact that the former can be passed to the latter is a part of the stable
+// API, while the particular type is not and users should not rely on it.
+template <typename T>
+using FieldMetadataHelper = T (*)(void);
+
+}  // namespace internal
+}  // namespace proto_utils
+}  // namespace protozero
+
+#endif  // INCLUDE_PERFETTO_PROTOZERO_PROTO_UTILS_H_
+// gen_amalgamated begin header: include/perfetto/protozero/root_message.h
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_PROTOZERO_ROOT_MESSAGE_H_
+#define INCLUDE_PERFETTO_PROTOZERO_ROOT_MESSAGE_H_
+
+// gen_amalgamated expanded: #include "perfetto/protozero/message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/message_arena.h"
+
+namespace protozero {
+
+// Helper class to hand out messages using the default MessageArena.
+// Usage:
+// RootMessage<perfetto::protos::zero::MyMessage> msg;
+// msg.Reset(stream_writer);
+// msg.set_foo(...);
+// auto* nested = msg.set_nested();
+template <typename T = Message>
+class RootMessage : public T {
+ public:
+  RootMessage() { T::Reset(nullptr, &root_arena_); }
+
+  // Disallow copy and move.
+  RootMessage(const RootMessage&) = delete;
+  RootMessage& operator=(const RootMessage&) = delete;
+  RootMessage(RootMessage&&) = delete;
+  RootMessage& operator=(RootMessage&&) = delete;
+
+  void Reset(ScatteredStreamWriter* writer) {
+    root_arena_.Reset();
+    Message::Reset(writer, &root_arena_);
+  }
+
+ private:
+  MessageArena root_arena_;
+};
+
+}  // namespace protozero
+
+#endif  // INCLUDE_PERFETTO_PROTOZERO_ROOT_MESSAGE_H_
+// gen_amalgamated begin header: include/perfetto/protozero/scattered_heap_buffer.h
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_PROTOZERO_SCATTERED_HEAP_BUFFER_H_
+#define INCLUDE_PERFETTO_PROTOZERO_SCATTERED_HEAP_BUFFER_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/root_message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_stream_writer.h"
+
+namespace protozero {
+
+class Message;
+
+class PERFETTO_EXPORT ScatteredHeapBuffer
+    : public protozero::ScatteredStreamWriter::Delegate {
+ public:
+  class PERFETTO_EXPORT Slice {
+   public:
+    Slice();
+    explicit Slice(size_t size);
+    Slice(Slice&& slice) noexcept;
+    ~Slice();
+    Slice& operator=(Slice&&);
+
+    inline protozero::ContiguousMemoryRange GetTotalRange() const {
+      return {buffer_.get(), buffer_.get() + size_};
+    }
+
+    inline protozero::ContiguousMemoryRange GetUsedRange() const {
+      return {buffer_.get(), buffer_.get() + size_ - unused_bytes_};
+    }
+
+    uint8_t* start() const { return buffer_.get(); }
+    size_t size() const { return size_; }
+    size_t unused_bytes() const { return unused_bytes_; }
+    void set_unused_bytes(size_t unused_bytes) {
+      PERFETTO_DCHECK(unused_bytes_ <= size_);
+      unused_bytes_ = unused_bytes;
+    }
+
+    void Clear();
+
+   private:
+    std::unique_ptr<uint8_t[]> buffer_;
+    size_t size_;
+    size_t unused_bytes_;
+  };
+
+  ScatteredHeapBuffer(size_t initial_slice_size_bytes = 128,
+                      size_t maximum_slice_size_bytes = 128 * 1024);
+  ~ScatteredHeapBuffer() override;
+
+  // protozero::ScatteredStreamWriter::Delegate implementation.
+  protozero::ContiguousMemoryRange GetNewBuffer() override;
+
+  // Return the slices backing this buffer, adjusted for the number of bytes the
+  // writer has written.
+  const std::vector<Slice>& GetSlices();
+
+  // Stitch all the slices into a single contiguous buffer.
+  std::vector<uint8_t> StitchSlices();
+
+  // Note that the returned ranges point back to this buffer and thus cannot
+  // outlive it.
+  std::vector<protozero::ContiguousMemoryRange> GetRanges();
+
+  // Note that size of the last slice isn't updated to reflect the number of
+  // bytes written by the trace writer.
+  const std::vector<Slice>& slices() const { return slices_; }
+
+  void set_writer(protozero::ScatteredStreamWriter* writer) {
+    writer_ = writer;
+  }
+
+  // Update unused_bytes() of the current |Slice| based on the writer's state.
+  void AdjustUsedSizeOfCurrentSlice();
+
+  // Returns the total size the slices occupy in heap memory (including unused).
+  size_t GetTotalSize();
+
+  // Reset the contents of this buffer but retain one slice allocation (if it
+  // exists) to be reused for future writes.
+  void Reset();
+
+ private:
+  size_t next_slice_size_;
+  const size_t maximum_slice_size_;
+  protozero::ScatteredStreamWriter* writer_ = nullptr;
+  std::vector<Slice> slices_;
+
+  // Used to keep an allocated slice around after this buffer is reset.
+  Slice cached_slice_;
+};
+
+// Helper function to create heap-based protozero messages in one line.
+// Useful when manually serializing a protozero message (primarily in
+// tests/utilities). So instead of the following:
+//   protozero::MyMessage msg;
+//   protozero::ScatteredHeapBuffer shb;
+//   protozero::ScatteredStreamWriter writer(&shb);
+//   shb.set_writer(&writer);
+//   msg.Reset(&writer);
+//   ...
+// You can write:
+//   protozero::HeapBuffered<protozero::MyMessage> msg;
+//   msg->set_stuff(...);
+//   msg.SerializeAsString();
+template <typename T = ::protozero::Message>
+class HeapBuffered {
+ public:
+  HeapBuffered() : HeapBuffered(4096, 4096) {}
+  HeapBuffered(size_t initial_slice_size_bytes, size_t maximum_slice_size_bytes)
+      : shb_(initial_slice_size_bytes, maximum_slice_size_bytes),
+        writer_(&shb_) {
+    shb_.set_writer(&writer_);
+    msg_.Reset(&writer_);
+  }
+
+  // This can't be neither copied nor moved because Message hands out pointers
+  // to itself when creating submessages.
+  HeapBuffered(const HeapBuffered&) = delete;
+  HeapBuffered& operator=(const HeapBuffered&) = delete;
+  HeapBuffered(HeapBuffered&&) = delete;
+  HeapBuffered& operator=(HeapBuffered&&) = delete;
+
+  T* get() { return &msg_; }
+  T* operator->() { return &msg_; }
+
+  bool empty() const { return shb_.slices().empty(); }
+
+  std::vector<uint8_t> SerializeAsArray() {
+    msg_.Finalize();
+    return shb_.StitchSlices();
+  }
+
+  std::string SerializeAsString() {
+    auto vec = SerializeAsArray();
+    return std::string(reinterpret_cast<const char*>(vec.data()), vec.size());
+  }
+
+  std::vector<protozero::ContiguousMemoryRange> GetRanges() {
+    msg_.Finalize();
+    return shb_.GetRanges();
+  }
+
+  const std::vector<ScatteredHeapBuffer::Slice>& GetSlices() {
+    msg_.Finalize();
+    return shb_.GetSlices();
+  }
+
+  void Reset() {
+    shb_.Reset();
+    writer_.Reset(protozero::ContiguousMemoryRange{});
+    msg_.Reset(&writer_);
+    PERFETTO_DCHECK(empty());
+  }
+
+ private:
+  ScatteredHeapBuffer shb_;
+  ScatteredStreamWriter writer_;
+  RootMessage<T> msg_;
+};
+
+}  // namespace protozero
+
+#endif  // INCLUDE_PERFETTO_PROTOZERO_SCATTERED_HEAP_BUFFER_H_
+// gen_amalgamated begin header: include/perfetto/protozero/scattered_stream_null_delegate.h
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_PROTOZERO_SCATTERED_STREAM_NULL_DELEGATE_H_
+#define INCLUDE_PERFETTO_PROTOZERO_SCATTERED_STREAM_NULL_DELEGATE_H_
+
+#include <memory>
+#include <vector>
+
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+// gen_amalgamated expanded: #include "perfetto/base/logging.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/contiguous_memory_range.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_stream_writer.h"
+
+namespace protozero {
+
+class PERFETTO_EXPORT ScatteredStreamWriterNullDelegate
+    : public ScatteredStreamWriter::Delegate {
+ public:
+  explicit ScatteredStreamWriterNullDelegate(size_t chunk_size);
+  ~ScatteredStreamWriterNullDelegate() override;
+
+  // protozero::ScatteredStreamWriter::Delegate implementation.
+  ContiguousMemoryRange GetNewBuffer() override;
+
+ private:
+  const size_t chunk_size_;
+  std::unique_ptr<uint8_t[]> chunk_;
+};
+
+}  // namespace protozero
+
+#endif  // INCLUDE_PERFETTO_PROTOZERO_SCATTERED_STREAM_NULL_DELEGATE_H_
+// gen_amalgamated begin header: include/perfetto/protozero/scattered_stream_writer.h
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_PROTOZERO_SCATTERED_STREAM_WRITER_H_
+#define INCLUDE_PERFETTO_PROTOZERO_SCATTERED_STREAM_WRITER_H_
+
+#include <assert.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+
+// gen_amalgamated expanded: #include "perfetto/base/compiler.h"
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/contiguous_memory_range.h"
+
+namespace protozero {
+
+// This class deals with the following problem: append-only proto messages want
+// to write a stream of bytes, without caring about the implementation of the
+// underlying buffer (which concretely will be either the trace ring buffer
+// or a heap-allocated buffer). The main deal is: proto messages don't know in
+// advance what their size will be.
+// Due to the tracing buffer being split into fixed-size chunks, on some
+// occasions, these writes need to be spread over two (or more) non-contiguous
+// chunks of memory. Similarly, when the buffer is backed by the heap, we want
+// to avoid realloc() calls, as they might cause a full copy of the contents
+// of the buffer.
+// The purpose of this class is to abstract away the non-contiguous write logic.
+// This class knows how to deal with writes as long as they fall in the same
+// ContiguousMemoryRange and defers the chunk-chaining logic to the Delegate.
+class PERFETTO_EXPORT ScatteredStreamWriter {
+ public:
+  class PERFETTO_EXPORT Delegate {
+   public:
+    virtual ~Delegate();
+    virtual ContiguousMemoryRange GetNewBuffer() = 0;
+  };
+
+  explicit ScatteredStreamWriter(Delegate* delegate);
+  ~ScatteredStreamWriter();
+
+  inline void WriteByte(uint8_t value) {
+    if (write_ptr_ >= cur_range_.end)
+      Extend();
+    *write_ptr_++ = value;
+  }
+
+  // Assumes that the caller checked that there is enough headroom.
+  // TODO(primiano): perf optimization, this is a tracing hot path. The
+  // compiler can make strong optimization on memcpy if the size arg is a
+  // constexpr. Make a templated variant of this for fixed-size writes.
+  // TODO(primiano): restrict / noalias might also help.
+  inline void WriteBytesUnsafe(const uint8_t* src, size_t size) {
+    uint8_t* const end = write_ptr_ + size;
+    assert(end <= cur_range_.end);
+    memcpy(write_ptr_, src, size);
+    write_ptr_ = end;
+  }
+
+  inline void WriteBytes(const uint8_t* src, size_t size) {
+    uint8_t* const end = write_ptr_ + size;
+    if (PERFETTO_LIKELY(end <= cur_range_.end))
+      return WriteBytesUnsafe(src, size);
+    WriteBytesSlowPath(src, size);
+  }
+
+  void WriteBytesSlowPath(const uint8_t* src, size_t size);
+
+  // Reserves a fixed amount of bytes to be backfilled later. The reserved range
+  // is guaranteed to be contiguous and not span across chunks. |size| has to be
+  // <= than the size of a new buffer returned by the Delegate::GetNewBuffer().
+  uint8_t* ReserveBytes(size_t size);
+
+  // Fast (but unsafe) version of the above. The caller must have previously
+  // checked that there are at least |size| contiguous bytes available.
+  // Returns only the start pointer of the reservation.
+  uint8_t* ReserveBytesUnsafe(size_t size) {
+    uint8_t* begin = write_ptr_;
+    write_ptr_ += size;
+    assert(write_ptr_ <= cur_range_.end);
+    return begin;
+  }
+
+  // Resets the buffer boundaries and the write pointer to the given |range|.
+  // Subsequent WriteByte(s) will write into |range|.
+  void Reset(ContiguousMemoryRange range);
+
+  // Number of contiguous free bytes in |cur_range_| that can be written without
+  // requesting a new buffer.
+  size_t bytes_available() const {
+    return static_cast<size_t>(cur_range_.end - write_ptr_);
+  }
+
+  uint8_t* write_ptr() const { return write_ptr_; }
+
+  uint64_t written() const {
+    return written_previously_ +
+           static_cast<uint64_t>(write_ptr_ - cur_range_.begin);
+  }
+
+ private:
+  ScatteredStreamWriter(const ScatteredStreamWriter&) = delete;
+  ScatteredStreamWriter& operator=(const ScatteredStreamWriter&) = delete;
+
+  void Extend();
+
+  Delegate* const delegate_;
+  ContiguousMemoryRange cur_range_;
+  uint8_t* write_ptr_;
+  uint64_t written_previously_ = 0;
+};
+
+}  // namespace protozero
+
+#endif  // INCLUDE_PERFETTO_PROTOZERO_SCATTERED_STREAM_WRITER_H_
+// gen_amalgamated begin header: include/perfetto/protozero/static_buffer.h
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INCLUDE_PERFETTO_PROTOZERO_STATIC_BUFFER_H_
+#define INCLUDE_PERFETTO_PROTOZERO_STATIC_BUFFER_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+// gen_amalgamated expanded: #include "perfetto/base/export.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/root_message.h"
+// gen_amalgamated expanded: #include "perfetto/protozero/scattered_stream_writer.h"
+
+namespace protozero {
+
+class Message;
+
+// A simple implementation of ScatteredStreamWriter::Delegate backed by a
+// fixed-size buffer. It doesn't support expansion. The caller needs to ensure
+// to never write more than the size of the buffer. Will CHECK() otherwise.
+class PERFETTO_EXPORT StaticBufferDelegate
+    : public ScatteredStreamWriter::Delegate {
+ public:
+  StaticBufferDelegate(uint8_t* buf, size_t len) : range_{buf, buf + len} {}
+  ~StaticBufferDelegate() override;
+
+  // ScatteredStreamWriter::Delegate implementation.
+  ContiguousMemoryRange GetNewBuffer() override;
+
+  ContiguousMemoryRange const range_;
+  bool get_new_buffer_called_once_ = false;
+};
+
+// Helper function to create protozero messages backed by a fixed-size buffer
+// in one line. You can write:
+//   protozero::Static<protozero::MyMessage> msg(buf.data(), buf.size());
+//   msg->set_stuff(...);
+//   size_t bytes_encoded = msg.Finalize();
+template <typename T /* protozero::Message */>
+class StaticBuffered {
+ public:
+  StaticBuffered(void* buf, size_t len)
+      : delegate_(reinterpret_cast<uint8_t*>(buf), len), writer_(&delegate_) {
+    msg_.Reset(&writer_);
+  }
+
+  // This can't be neither copied nor moved because Message hands out pointers
+  // to itself when creating submessages.
+  StaticBuffered(const StaticBuffered&) = delete;
+  StaticBuffered& operator=(const StaticBuffered&) = delete;
+  StaticBuffered(StaticBuffered&&) = delete;
+  StaticBuffered& operator=(StaticBuffered&&) = delete;
+
+  T* get() { return &msg_; }
+  T* operator->() { return &msg_; }
+
+  // The lack of a size() method is deliberate. It's to prevent that one
+  // accidentally calls size() before Finalize().
+
+  // Returns the number of encoded bytes (<= the size passed in the ctor).
+  size_t Finalize() {
+    msg_.Finalize();
+    return static_cast<size_t>(writer_.write_ptr() - delegate_.range_.begin);
+  }
+
+ private:
+  StaticBufferDelegate delegate_;
+  ScatteredStreamWriter writer_;
+  RootMessage<T> msg_;
+};
+
+// Helper function to create stack-based protozero messages in one line.
+// You can write:
+//   protozero::StackBuffered<protozero::MyMessage, 16> msg;
+//   msg->set_stuff(...);
+//   size_t bytes_encoded = msg.Finalize();
+template <typename T /* protozero::Message */, size_t N>
+class StackBuffered : public StaticBuffered<T> {
+ public:
+  StackBuffered() : StaticBuffered<T>(&buf_[0], N) {}
+
+ private:
+  uint8_t buf_[N];  // Deliberately not initialized.
+};
+
+}  // namespace protozero
+
+#endif  // INCLUDE_PERFETTO_PROTOZERO_STATIC_BUFFER_H_
+
diff --git a/system/profiler/profiler.cpp b/system/profiler/profiler.cpp
new file mode 100644
index 0000000..97e5fee
--- /dev/null
+++ b/system/profiler/profiler.cpp
@@ -0,0 +1,93 @@
+// Copyright (C) 2021 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "profiler.h"
+
+#include <android-base/properties.h>
+#include <sys/prctl.h>
+
+#include "perfetto.h"
+
+namespace {
+
+class GpuCounterDataSource : public perfetto::DataSource<GpuCounterDataSource> {
+ public:
+  void OnSetup(const SetupArgs& args) override {
+    PERFETTO_ILOG("GpuCounterDataSource OnSetup, name: %s", args.config->name().c_str());
+    const std::string& config_raw = args.config->gpu_counter_config_raw();
+    perfetto::protos::pbzero::GpuCounterConfig::Decoder config(config_raw);
+    for(auto it = config.counter_ids(); it; ++it) {
+      counter_ids.push_back(it->as_uint32());
+    }
+    first = true;
+  }
+
+  void OnStart(const StartArgs&) override {
+    PERFETTO_ILOG("GpuCounterDataSource OnStart called");
+  }
+
+  void OnStop(const StopArgs&) override {
+    PERFETTO_ILOG("GpuCounterDataSource OnStop called");
+  }
+
+  bool first = true;
+  uint64_t count = 0;
+  std::vector<uint32_t> counter_ids;
+};
+
+class GpuRenderStageDataSource: public perfetto::DataSource<GpuRenderStageDataSource> {
+ public:
+  void OnSetup(const SetupArgs& args) override {
+    PERFETTO_ILOG("GpuRenderStageDataSource OnSetup called, name: %s",
+      args.config->name().c_str());
+    first = true;
+  }
+
+  void OnStart(const StartArgs&) override {
+    PERFETTO_ILOG("GpuRenderStageDataSource OnStart called");
+  }
+
+  void OnStop(const StopArgs&) override {
+    PERFETTO_ILOG("GpuRenderStageDataSource OnStop called");
+  }
+
+  bool first = true;
+  uint64_t count = 0;
+};
+
+}
+
+void try_register_goldfish_perfetto() {
+  std::string enableString = android::base::GetProperty("debug.graphics.gpu.profiler.perfetto", "");
+  if (enableString != "1" && enableString != "true") {
+    return;
+  }
+  if (!prctl(PR_GET_DUMPABLE, 0, 0, 0, 0)) {
+    return;
+  }
+  perfetto::TracingInitArgs args;
+  args.backends = perfetto::kSystemBackend;
+  perfetto::Tracing::Initialize(args);
+  {
+    perfetto::DataSourceDescriptor dsd;
+    dsd.set_name("gpu.counters");
+    GpuCounterDataSource::Register(dsd);
+  }
+
+  {
+    perfetto::DataSourceDescriptor dsd;
+    dsd.set_name("gpu.renderstages");
+    GpuRenderStageDataSource::Register(dsd);
+  }
+}
diff --git a/system/profiler/profiler.h b/system/profiler/profiler.h
new file mode 100644
index 0000000..3b4db47
--- /dev/null
+++ b/system/profiler/profiler.h
@@ -0,0 +1,20 @@
+// Copyright (C) 2021 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef __PROFILER_H__
+#define __PROFILER_H__
+
+extern void try_register_goldfish_perfetto();
+
+#endif //__PROFILER_H__
diff --git a/system/profiler/profiler_stub.cpp b/system/profiler/profiler_stub.cpp
new file mode 100644
index 0000000..52fd934
--- /dev/null
+++ b/system/profiler/profiler_stub.cpp
@@ -0,0 +1,19 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "profiler.h"
+
+void try_register_goldfish_perfetto() { }
diff --git a/system/renderControl_enc/CMakeLists.txt b/system/renderControl_enc/CMakeLists.txt
index 8a42acb..a2b4221 100644
--- a/system/renderControl_enc/CMakeLists.txt
+++ b/system/renderControl_enc/CMakeLists.txt
@@ -4,7 +4,7 @@
 android_validate_sha256("${GOLDFISH_DEVICE_ROOT}/system/renderControl_enc/Android.mk" "780a007ac7a3d2255372ddf40e03aeb10e4c759343d2532f6ddf769f4df73810")
 set(_renderControl_enc_src renderControl_client_context.cpp renderControl_enc.cpp renderControl_entry.cpp)
 android_add_library(TARGET _renderControl_enc SHARED LICENSE Apache-2.0 SRC renderControl_client_context.cpp renderControl_enc.cpp renderControl_entry.cpp)
-target_include_directories(_renderControl_enc PRIVATE ${GOLDFISH_DEVICE_ROOT}/shared/OpenglCodecCommon ${GOLDFISH_DEVICE_ROOT}/shared/qemupipe/include-types ${GOLDFISH_DEVICE_ROOT}/shared/qemupipe/include ${GOLDFISH_DEVICE_ROOT}/system/renderControl_enc ${GOLDFISH_DEVICE_ROOT}/./host/include/libOpenglRender ${GOLDFISH_DEVICE_ROOT}/./system/include ${GOLDFISH_DEVICE_ROOT}/./../../../external/qemu/android/android-emugl/guest)
-target_compile_definitions(_renderControl_enc PRIVATE "-DWITH_GLES2" "-DPLATFORM_SDK_VERSION=29" "-DGOLDFISH_HIDL_GRALLOC" "-DEMULATOR_OPENGL_POST_O=1" "-DHOST_BUILD" "-DANDROID" "-DGL_GLEXT_PROTOTYPES" "-DPAGE_SIZE=4096" "-DGOLDFISH_VULKAN")
+target_include_directories(_renderControl_enc PRIVATE ${GOLDFISH_DEVICE_ROOT}/shared/OpenglCodecCommon ${GOLDFISH_DEVICE_ROOT}/android-emu ${GOLDFISH_DEVICE_ROOT}/shared/qemupipe/include-types ${GOLDFISH_DEVICE_ROOT}/shared/qemupipe/include ${GOLDFISH_DEVICE_ROOT}/system/renderControl_enc ${GOLDFISH_DEVICE_ROOT}/./host/include/libOpenglRender ${GOLDFISH_DEVICE_ROOT}/./system/include ${GOLDFISH_DEVICE_ROOT}/./../../../external/qemu/android/android-emugl/guest)
+target_compile_definitions(_renderControl_enc PRIVATE "-DWITH_GLES2" "-DPLATFORM_SDK_VERSION=29" "-DGOLDFISH_HIDL_GRALLOC" "-DEMULATOR_OPENGL_POST_O=1" "-DHOST_BUILD" "-DANDROID" "-DGL_GLEXT_PROTOTYPES" "-DPAGE_SIZE=4096" "-DGFXSTREAM")
 target_compile_options(_renderControl_enc PRIVATE "-fvisibility=default" "-Wno-unused-parameter" "-Wno-unused-function")
-target_link_libraries(_renderControl_enc PRIVATE OpenglCodecCommon_host cutils utils log android-emu-shared PRIVATE qemupipe_host)
\ No newline at end of file
+target_link_libraries(_renderControl_enc PRIVATE OpenglCodecCommon_host cutils utils log androidemu android-emu-shared PRIVATE qemupipe_host)
\ No newline at end of file
diff --git a/system/renderControl_enc/renderControl_client_context.cpp b/system/renderControl_enc/renderControl_client_context.cpp
index 54c3f93..2b3f0ae 100644
--- a/system/renderControl_enc/renderControl_client_context.cpp
+++ b/system/renderControl_enc/renderControl_client_context.cpp
@@ -58,6 +58,22 @@
 	rcSetColorBufferVulkanMode = (rcSetColorBufferVulkanMode_client_proc_t) getProc("rcSetColorBufferVulkanMode", userData);
 	rcReadColorBufferYUV = (rcReadColorBufferYUV_client_proc_t) getProc("rcReadColorBufferYUV", userData);
 	rcIsSyncSignaled = (rcIsSyncSignaled_client_proc_t) getProc("rcIsSyncSignaled", userData);
+	rcCreateColorBufferWithHandle = (rcCreateColorBufferWithHandle_client_proc_t) getProc("rcCreateColorBufferWithHandle", userData);
+	rcCreateBuffer = (rcCreateBuffer_client_proc_t) getProc("rcCreateBuffer", userData);
+	rcCloseBuffer = (rcCloseBuffer_client_proc_t) getProc("rcCloseBuffer", userData);
+	rcSetColorBufferVulkanMode2 = (rcSetColorBufferVulkanMode2_client_proc_t) getProc("rcSetColorBufferVulkanMode2", userData);
+	rcMapGpaToBufferHandle = (rcMapGpaToBufferHandle_client_proc_t) getProc("rcMapGpaToBufferHandle", userData);
+	rcCreateBuffer2 = (rcCreateBuffer2_client_proc_t) getProc("rcCreateBuffer2", userData);
+	rcMapGpaToBufferHandle2 = (rcMapGpaToBufferHandle2_client_proc_t) getProc("rcMapGpaToBufferHandle2", userData);
+	rcFlushWindowColorBufferAsyncWithFrameNumber = (rcFlushWindowColorBufferAsyncWithFrameNumber_client_proc_t) getProc("rcFlushWindowColorBufferAsyncWithFrameNumber", userData);
+	rcSetTracingForPuid = (rcSetTracingForPuid_client_proc_t) getProc("rcSetTracingForPuid", userData);
+	rcMakeCurrentAsync = (rcMakeCurrentAsync_client_proc_t) getProc("rcMakeCurrentAsync", userData);
+	rcComposeAsync = (rcComposeAsync_client_proc_t) getProc("rcComposeAsync", userData);
+	rcDestroySyncKHRAsync = (rcDestroySyncKHRAsync_client_proc_t) getProc("rcDestroySyncKHRAsync", userData);
+	rcComposeWithoutPost = (rcComposeWithoutPost_client_proc_t) getProc("rcComposeWithoutPost", userData);
+	rcComposeAsyncWithoutPost = (rcComposeAsyncWithoutPost_client_proc_t) getProc("rcComposeAsyncWithoutPost", userData);
+	rcCreateDisplayById = (rcCreateDisplayById_client_proc_t) getProc("rcCreateDisplayById", userData);
+	rcSetDisplayPoseDpi = (rcSetDisplayPoseDpi_client_proc_t) getProc("rcSetDisplayPoseDpi", userData);
 	return 0;
 }
 
diff --git a/system/renderControl_enc/renderControl_client_context.h b/system/renderControl_enc/renderControl_client_context.h
index 12486c3..cf2f8f0 100644
--- a/system/renderControl_enc/renderControl_client_context.h
+++ b/system/renderControl_enc/renderControl_client_context.h
@@ -58,6 +58,22 @@
 	rcSetColorBufferVulkanMode_client_proc_t rcSetColorBufferVulkanMode;
 	rcReadColorBufferYUV_client_proc_t rcReadColorBufferYUV;
 	rcIsSyncSignaled_client_proc_t rcIsSyncSignaled;
+	rcCreateColorBufferWithHandle_client_proc_t rcCreateColorBufferWithHandle;
+	rcCreateBuffer_client_proc_t rcCreateBuffer;
+	rcCloseBuffer_client_proc_t rcCloseBuffer;
+	rcSetColorBufferVulkanMode2_client_proc_t rcSetColorBufferVulkanMode2;
+	rcMapGpaToBufferHandle_client_proc_t rcMapGpaToBufferHandle;
+	rcCreateBuffer2_client_proc_t rcCreateBuffer2;
+	rcMapGpaToBufferHandle2_client_proc_t rcMapGpaToBufferHandle2;
+	rcFlushWindowColorBufferAsyncWithFrameNumber_client_proc_t rcFlushWindowColorBufferAsyncWithFrameNumber;
+	rcSetTracingForPuid_client_proc_t rcSetTracingForPuid;
+	rcMakeCurrentAsync_client_proc_t rcMakeCurrentAsync;
+	rcComposeAsync_client_proc_t rcComposeAsync;
+	rcDestroySyncKHRAsync_client_proc_t rcDestroySyncKHRAsync;
+	rcComposeWithoutPost_client_proc_t rcComposeWithoutPost;
+	rcComposeAsyncWithoutPost_client_proc_t rcComposeAsyncWithoutPost;
+	rcCreateDisplayById_client_proc_t rcCreateDisplayById;
+	rcSetDisplayPoseDpi_client_proc_t rcSetDisplayPoseDpi;
 	virtual ~renderControl_client_context_t() {}
 
 	typedef renderControl_client_context_t *CONTEXT_ACCESSOR_TYPE(void);
diff --git a/system/renderControl_enc/renderControl_client_proc.h b/system/renderControl_enc/renderControl_client_proc.h
index d64cf5c..2bd5d1f 100644
--- a/system/renderControl_enc/renderControl_client_proc.h
+++ b/system/renderControl_enc/renderControl_client_proc.h
@@ -60,6 +60,22 @@
 typedef GLint (renderControl_APIENTRY *rcSetColorBufferVulkanMode_client_proc_t) (void * ctx, uint32_t, uint32_t);
 typedef void (renderControl_APIENTRY *rcReadColorBufferYUV_client_proc_t) (void * ctx, uint32_t, GLint, GLint, GLint, GLint, void*, uint32_t);
 typedef int (renderControl_APIENTRY *rcIsSyncSignaled_client_proc_t) (void * ctx, uint64_t);
+typedef void (renderControl_APIENTRY *rcCreateColorBufferWithHandle_client_proc_t) (void * ctx, uint32_t, uint32_t, GLenum, uint32_t);
+typedef uint32_t (renderControl_APIENTRY *rcCreateBuffer_client_proc_t) (void * ctx, uint32_t);
+typedef void (renderControl_APIENTRY *rcCloseBuffer_client_proc_t) (void * ctx, uint32_t);
+typedef GLint (renderControl_APIENTRY *rcSetColorBufferVulkanMode2_client_proc_t) (void * ctx, uint32_t, uint32_t, uint32_t);
+typedef int (renderControl_APIENTRY *rcMapGpaToBufferHandle_client_proc_t) (void * ctx, uint32_t, uint64_t);
+typedef uint32_t (renderControl_APIENTRY *rcCreateBuffer2_client_proc_t) (void * ctx, uint64_t, uint32_t);
+typedef int (renderControl_APIENTRY *rcMapGpaToBufferHandle2_client_proc_t) (void * ctx, uint32_t, uint64_t, uint64_t);
+typedef void (renderControl_APIENTRY *rcFlushWindowColorBufferAsyncWithFrameNumber_client_proc_t) (void * ctx, uint32_t, uint32_t);
+typedef void (renderControl_APIENTRY *rcSetTracingForPuid_client_proc_t) (void * ctx, uint64_t, uint32_t, uint64_t);
+typedef void (renderControl_APIENTRY *rcMakeCurrentAsync_client_proc_t) (void * ctx, uint32_t, uint32_t, uint32_t);
+typedef void (renderControl_APIENTRY *rcComposeAsync_client_proc_t) (void * ctx, uint32_t, void*);
+typedef void (renderControl_APIENTRY *rcDestroySyncKHRAsync_client_proc_t) (void * ctx, uint64_t);
+typedef GLint (renderControl_APIENTRY *rcComposeWithoutPost_client_proc_t) (void * ctx, uint32_t, void*);
+typedef void (renderControl_APIENTRY *rcComposeAsyncWithoutPost_client_proc_t) (void * ctx, uint32_t, void*);
+typedef int (renderControl_APIENTRY *rcCreateDisplayById_client_proc_t) (void * ctx, uint32_t);
+typedef int (renderControl_APIENTRY *rcSetDisplayPoseDpi_client_proc_t) (void * ctx, uint32_t, GLint, GLint, uint32_t, uint32_t, uint32_t);
 
 
 #endif
diff --git a/system/renderControl_enc/renderControl_enc.cpp b/system/renderControl_enc/renderControl_enc.cpp
index d0c3c75..1c544a8 100644
--- a/system/renderControl_enc/renderControl_enc.cpp
+++ b/system/renderControl_enc/renderControl_enc.cpp
@@ -12,6 +12,7 @@
 
 #include <stdio.h>
 
+#include "android/base/Tracing.h"
 namespace {
 
 void enc_unsupported()
@@ -21,6 +22,7 @@
 
 GLint rcGetRendererVersion_enc(void *self )
 {
+	AEMU_SCOPED_TRACE("rcGetRendererVersion encode");
 
 	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -60,6 +62,7 @@
 
 EGLint rcGetEGLVersion_enc(void *self , EGLint* major, EGLint* minor)
 {
+	AEMU_SCOPED_TRACE("rcGetEGLVersion encode");
 
 	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -78,8 +81,8 @@
 	int tmp = OP_rcGetEGLVersion;memcpy(ptr, &tmp, 4); ptr += 4;
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
-	*(unsigned int *)(ptr) = __size_major; ptr += 4;
-	*(unsigned int *)(ptr) = __size_minor; ptr += 4;
+	memcpy(ptr, &__size_major, 4); ptr += 4;
+	memcpy(ptr, &__size_minor, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -107,6 +110,7 @@
 
 EGLint rcQueryEGLString_enc(void *self , EGLenum name, void* buffer, EGLint bufferSize)
 {
+	AEMU_SCOPED_TRACE("rcQueryEGLString encode");
 
 	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -125,7 +129,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &name, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_buffer; ptr += 4;
+	memcpy(ptr, &__size_buffer, 4); ptr += 4;
 		memcpy(ptr, &bufferSize, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -152,6 +156,7 @@
 
 EGLint rcGetGLString_enc(void *self , EGLenum name, void* buffer, EGLint bufferSize)
 {
+	AEMU_SCOPED_TRACE("rcGetGLString encode");
 
 	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -170,7 +175,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &name, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_buffer; ptr += 4;
+	memcpy(ptr, &__size_buffer, 4); ptr += 4;
 		memcpy(ptr, &bufferSize, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -197,6 +202,7 @@
 
 EGLint rcGetNumConfigs_enc(void *self , uint32_t* numAttribs)
 {
+	AEMU_SCOPED_TRACE("rcGetNumConfigs encode");
 
 	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -214,7 +220,7 @@
 	int tmp = OP_rcGetNumConfigs;memcpy(ptr, &tmp, 4); ptr += 4;
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
-	*(unsigned int *)(ptr) = __size_numAttribs; ptr += 4;
+	memcpy(ptr, &__size_numAttribs, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -240,6 +246,7 @@
 
 EGLint rcGetConfigs_enc(void *self , uint32_t bufSize, GLuint* buffer)
 {
+	AEMU_SCOPED_TRACE("rcGetConfigs encode");
 
 	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -258,7 +265,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &bufSize, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_buffer; ptr += 4;
+	memcpy(ptr, &__size_buffer, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -284,6 +291,7 @@
 
 EGLint rcChooseConfig_enc(void *self , EGLint* attribs, uint32_t attribs_size, uint32_t* configs, uint32_t configs_size)
 {
+	AEMU_SCOPED_TRACE("rcChooseConfig encode");
 
 	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -302,10 +310,10 @@
 	int tmp = OP_rcChooseConfig;memcpy(ptr, &tmp, 4); ptr += 4;
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
-	*(unsigned int *)(ptr) = __size_attribs; ptr += 4;
+	memcpy(ptr, &__size_attribs, 4); ptr += 4;
 	memcpy(ptr, attribs, __size_attribs);ptr += __size_attribs;
 		memcpy(ptr, &attribs_size, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_configs; ptr += 4;
+	memcpy(ptr, &__size_configs, 4); ptr += 4;
 		memcpy(ptr, &configs_size, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -334,6 +342,7 @@
 
 EGLint rcGetFBParam_enc(void *self , EGLint param)
 {
+	AEMU_SCOPED_TRACE("rcGetFBParam encode");
 
 	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -374,6 +383,7 @@
 
 uint32_t rcCreateContext_enc(void *self , uint32_t config, uint32_t share, uint32_t glVersion)
 {
+	AEMU_SCOPED_TRACE("rcCreateContext encode");
 
 	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -416,6 +426,7 @@
 
 void rcDestroyContext_enc(void *self , uint32_t context)
 {
+	AEMU_SCOPED_TRACE("rcDestroyContext encode");
 
 	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -441,6 +452,7 @@
 
 uint32_t rcCreateWindowSurface_enc(void *self , uint32_t config, uint32_t width, uint32_t height)
 {
+	AEMU_SCOPED_TRACE("rcCreateWindowSurface encode");
 
 	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -483,6 +495,7 @@
 
 void rcDestroyWindowSurface_enc(void *self , uint32_t windowSurface)
 {
+	AEMU_SCOPED_TRACE("rcDestroyWindowSurface encode");
 
 	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -508,6 +521,7 @@
 
 uint32_t rcCreateColorBuffer_enc(void *self , uint32_t width, uint32_t height, GLenum internalFormat)
 {
+	AEMU_SCOPED_TRACE("rcCreateColorBuffer encode");
 
 	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -550,6 +564,7 @@
 
 void rcOpenColorBuffer_enc(void *self , uint32_t colorbuffer)
 {
+	AEMU_SCOPED_TRACE("rcOpenColorBuffer encode");
 
 	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -575,6 +590,7 @@
 
 void rcCloseColorBuffer_enc(void *self , uint32_t colorbuffer)
 {
+	AEMU_SCOPED_TRACE("rcCloseColorBuffer encode");
 
 	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -601,6 +617,7 @@
 
 void rcSetWindowColorBuffer_enc(void *self , uint32_t windowSurface, uint32_t colorBuffer)
 {
+	AEMU_SCOPED_TRACE("rcSetWindowColorBuffer encode");
 
 	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -627,6 +644,7 @@
 
 int rcFlushWindowColorBuffer_enc(void *self , uint32_t windowSurface)
 {
+	AEMU_SCOPED_TRACE("rcFlushWindowColorBuffer encode");
 
 	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -667,6 +685,7 @@
 
 EGLint rcMakeCurrent_enc(void *self , uint32_t context, uint32_t drawSurf, uint32_t readSurf)
 {
+	AEMU_SCOPED_TRACE("rcMakeCurrent encode");
 
 	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -709,6 +728,7 @@
 
 void rcFBPost_enc(void *self , uint32_t colorBuffer)
 {
+	AEMU_SCOPED_TRACE("rcFBPost encode");
 
 	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -734,6 +754,7 @@
 
 void rcFBSetSwapInterval_enc(void *self , EGLint interval)
 {
+	AEMU_SCOPED_TRACE("rcFBSetSwapInterval encode");
 
 	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -759,6 +780,7 @@
 
 void rcBindTexture_enc(void *self , uint32_t colorBuffer)
 {
+	AEMU_SCOPED_TRACE("rcBindTexture encode");
 
 	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -784,6 +806,7 @@
 
 void rcBindRenderbuffer_enc(void *self , uint32_t colorBuffer)
 {
+	AEMU_SCOPED_TRACE("rcBindRenderbuffer encode");
 
 	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -809,6 +832,7 @@
 
 EGLint rcColorBufferCacheFlush_enc(void *self , uint32_t colorbuffer, EGLint postCount, int forRead)
 {
+	AEMU_SCOPED_TRACE("rcColorBufferCacheFlush encode");
 
 	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -851,6 +875,7 @@
 
 void rcReadColorBuffer_enc(void *self , uint32_t colorbuffer, GLint x, GLint y, GLint width, GLint height, GLenum format, GLenum type, void* pixels)
 {
+	AEMU_SCOPED_TRACE("rcReadColorBuffer encode");
 
 	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -875,7 +900,7 @@
 		memcpy(ptr, &height, 4); ptr += 4;
 		memcpy(ptr, &format, 4); ptr += 4;
 		memcpy(ptr, &type, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_pixels; ptr += 4;
+	memcpy(ptr, &__size_pixels, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -896,6 +921,7 @@
 
 int rcUpdateColorBuffer_enc(void *self , uint32_t colorbuffer, GLint x, GLint y, GLint width, GLint height, GLenum format, GLenum type, void* pixels)
 {
+	AEMU_SCOPED_TRACE("rcUpdateColorBuffer encode");
 
 	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -949,6 +975,7 @@
 
 int rcOpenColorBuffer2_enc(void *self , uint32_t colorbuffer)
 {
+	AEMU_SCOPED_TRACE("rcOpenColorBuffer2 encode");
 
 	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -989,6 +1016,7 @@
 
 uint32_t rcCreateClientImage_enc(void *self , uint32_t context, EGLenum target, GLuint buffer)
 {
+	AEMU_SCOPED_TRACE("rcCreateClientImage encode");
 
 	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1031,6 +1059,7 @@
 
 int rcDestroyClientImage_enc(void *self , uint32_t image)
 {
+	AEMU_SCOPED_TRACE("rcDestroyClientImage encode");
 
 	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1071,6 +1100,7 @@
 
 void rcSelectChecksumHelper_enc(void *self , uint32_t newProtocol, uint32_t reserved)
 {
+	AEMU_SCOPED_TRACE("rcSelectChecksumHelper encode");
 
 	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1097,6 +1127,7 @@
 
 void rcCreateSyncKHR_enc(void *self , EGLenum type, EGLint* attribs, uint32_t num_attribs, int destroy_when_signaled, uint64_t* glsync_out, uint64_t* syncthread_out)
 {
+	AEMU_SCOPED_TRACE("rcCreateSyncKHR encode");
 
 	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1117,12 +1148,12 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &type, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_attribs; ptr += 4;
+	memcpy(ptr, &__size_attribs, 4); ptr += 4;
 	memcpy(ptr, attribs, __size_attribs);ptr += __size_attribs;
 		memcpy(ptr, &num_attribs, 4); ptr += 4;
 		memcpy(ptr, &destroy_when_signaled, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_glsync_out; ptr += 4;
-	*(unsigned int *)(ptr) = __size_syncthread_out; ptr += 4;
+	memcpy(ptr, &__size_glsync_out, 4); ptr += 4;
+	memcpy(ptr, &__size_syncthread_out, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -1145,6 +1176,7 @@
 
 EGLint rcClientWaitSyncKHR_enc(void *self , uint64_t sync, EGLint flags, uint64_t timeout)
 {
+	AEMU_SCOPED_TRACE("rcClientWaitSyncKHR encode");
 
 	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1187,6 +1219,7 @@
 
 void rcFlushWindowColorBufferAsync_enc(void *self , uint32_t windowSurface)
 {
+	AEMU_SCOPED_TRACE("rcFlushWindowColorBufferAsync encode");
 
 	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1212,6 +1245,7 @@
 
 int rcDestroySyncKHR_enc(void *self , uint64_t sync)
 {
+	AEMU_SCOPED_TRACE("rcDestroySyncKHR encode");
 
 	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1252,6 +1286,7 @@
 
 void rcSetPuid_enc(void *self , uint64_t puid)
 {
+	AEMU_SCOPED_TRACE("rcSetPuid encode");
 
 	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1277,6 +1312,7 @@
 
 int rcUpdateColorBufferDMA_enc(void *self , uint32_t colorbuffer, GLint x, GLint y, GLint width, GLint height, GLenum format, GLenum type, void* pixels, uint32_t pixels_size)
 {
+	AEMU_SCOPED_TRACE("rcUpdateColorBufferDMA encode");
 
 	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1326,6 +1362,7 @@
 
 uint32_t rcCreateColorBufferDMA_enc(void *self , uint32_t width, uint32_t height, GLenum internalFormat, int frameworkFormat)
 {
+	AEMU_SCOPED_TRACE("rcCreateColorBufferDMA encode");
 
 	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1369,6 +1406,7 @@
 
 void rcWaitSyncKHR_enc(void *self , uint64_t sync, EGLint flags)
 {
+	AEMU_SCOPED_TRACE("rcWaitSyncKHR encode");
 
 	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1395,6 +1433,7 @@
 
 GLint rcCompose_enc(void *self , uint32_t bufferSize, void* buffer)
 {
+	AEMU_SCOPED_TRACE("rcCompose encode");
 
 	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1413,7 +1452,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &bufferSize, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_buffer; ptr += 4;
+	memcpy(ptr, &__size_buffer, 4); ptr += 4;
 	memcpy(ptr, buffer, __size_buffer);ptr += __size_buffer;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -1438,6 +1477,7 @@
 
 int rcCreateDisplay_enc(void *self , uint32_t* displayId)
 {
+	AEMU_SCOPED_TRACE("rcCreateDisplay encode");
 
 	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1455,7 +1495,7 @@
 	int tmp = OP_rcCreateDisplay;memcpy(ptr, &tmp, 4); ptr += 4;
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
-	*(unsigned int *)(ptr) = __size_displayId; ptr += 4;
+	memcpy(ptr, &__size_displayId, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -1481,6 +1521,7 @@
 
 int rcDestroyDisplay_enc(void *self , uint32_t displayId)
 {
+	AEMU_SCOPED_TRACE("rcDestroyDisplay encode");
 
 	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1521,6 +1562,7 @@
 
 int rcSetDisplayColorBuffer_enc(void *self , uint32_t displayId, uint32_t colorBuffer)
 {
+	AEMU_SCOPED_TRACE("rcSetDisplayColorBuffer encode");
 
 	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1562,6 +1604,7 @@
 
 int rcGetDisplayColorBuffer_enc(void *self , uint32_t displayId, uint32_t* colorBuffer)
 {
+	AEMU_SCOPED_TRACE("rcGetDisplayColorBuffer encode");
 
 	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1580,7 +1623,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &displayId, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_colorBuffer; ptr += 4;
+	memcpy(ptr, &__size_colorBuffer, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -1606,6 +1649,7 @@
 
 int rcGetColorBufferDisplay_enc(void *self , uint32_t colorBuffer, uint32_t* displayId)
 {
+	AEMU_SCOPED_TRACE("rcGetColorBufferDisplay encode");
 
 	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1624,7 +1668,7 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &colorBuffer, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_displayId; ptr += 4;
+	memcpy(ptr, &__size_displayId, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -1650,6 +1694,7 @@
 
 int rcGetDisplayPose_enc(void *self , uint32_t displayId, GLint* x, GLint* y, uint32_t* w, uint32_t* h)
 {
+	AEMU_SCOPED_TRACE("rcGetDisplayPose encode");
 
 	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1671,10 +1716,10 @@
 	memcpy(ptr, &totalSize, 4);  ptr += 4;
 
 		memcpy(ptr, &displayId, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_x; ptr += 4;
-	*(unsigned int *)(ptr) = __size_y; ptr += 4;
-	*(unsigned int *)(ptr) = __size_w; ptr += 4;
-	*(unsigned int *)(ptr) = __size_h; ptr += 4;
+	memcpy(ptr, &__size_x, 4); ptr += 4;
+	memcpy(ptr, &__size_y, 4); ptr += 4;
+	memcpy(ptr, &__size_w, 4); ptr += 4;
+	memcpy(ptr, &__size_h, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
 	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
@@ -1706,6 +1751,7 @@
 
 int rcSetDisplayPose_enc(void *self , uint32_t displayId, GLint x, GLint y, uint32_t w, uint32_t h)
 {
+	AEMU_SCOPED_TRACE("rcSetDisplayPose encode");
 
 	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1750,6 +1796,7 @@
 
 GLint rcSetColorBufferVulkanMode_enc(void *self , uint32_t colorBuffer, uint32_t mode)
 {
+	AEMU_SCOPED_TRACE("rcSetColorBufferVulkanMode encode");
 
 	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1791,6 +1838,7 @@
 
 void rcReadColorBufferYUV_enc(void *self , uint32_t colorbuffer, GLint x, GLint y, GLint width, GLint height, void* pixels, uint32_t pixels_size)
 {
+	AEMU_SCOPED_TRACE("rcReadColorBufferYUV encode");
 
 	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1813,7 +1861,7 @@
 		memcpy(ptr, &y, 4); ptr += 4;
 		memcpy(ptr, &width, 4); ptr += 4;
 		memcpy(ptr, &height, 4); ptr += 4;
-	*(unsigned int *)(ptr) = __size_pixels; ptr += 4;
+	memcpy(ptr, &__size_pixels, 4); ptr += 4;
 		memcpy(ptr, &pixels_size, 4); ptr += 4;
 
 	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
@@ -1835,6 +1883,7 @@
 
 int rcIsSyncSignaled_enc(void *self , uint64_t sync)
 {
+	AEMU_SCOPED_TRACE("rcIsSyncSignaled encode");
 
 	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
 	IOStream *stream = ctx->m_stream;
@@ -1873,6 +1922,574 @@
 	return retval;
 }
 
+void rcCreateColorBufferWithHandle_enc(void *self , uint32_t width, uint32_t height, GLenum internalFormat, uint32_t handle)
+{
+	AEMU_SCOPED_TRACE("rcCreateColorBufferWithHandle encode");
+
+	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
+	IOStream *stream = ctx->m_stream;
+	ChecksumCalculator *checksumCalculator = ctx->m_checksumCalculator;
+	bool useChecksum = checksumCalculator->getVersion() > 0;
+
+	 unsigned char *ptr;
+	 unsigned char *buf;
+	 const size_t sizeWithoutChecksum = 8 + 4 + 4 + 4 + 4;
+	 const size_t checksumSize = checksumCalculator->checksumByteSize();
+	 const size_t totalSize = sizeWithoutChecksum + checksumSize;
+	buf = stream->alloc(totalSize);
+	ptr = buf;
+	int tmp = OP_rcCreateColorBufferWithHandle;memcpy(ptr, &tmp, 4); ptr += 4;
+	memcpy(ptr, &totalSize, 4);  ptr += 4;
+
+		memcpy(ptr, &width, 4); ptr += 4;
+		memcpy(ptr, &height, 4); ptr += 4;
+		memcpy(ptr, &internalFormat, 4); ptr += 4;
+		memcpy(ptr, &handle, 4); ptr += 4;
+
+	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
+	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
+
+}
+
+uint32_t rcCreateBuffer_enc(void *self , uint32_t size)
+{
+	AEMU_SCOPED_TRACE("rcCreateBuffer encode");
+
+	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
+	IOStream *stream = ctx->m_stream;
+	ChecksumCalculator *checksumCalculator = ctx->m_checksumCalculator;
+	bool useChecksum = checksumCalculator->getVersion() > 0;
+
+	 unsigned char *ptr;
+	 unsigned char *buf;
+	 const size_t sizeWithoutChecksum = 8 + 4;
+	 const size_t checksumSize = checksumCalculator->checksumByteSize();
+	 const size_t totalSize = sizeWithoutChecksum + checksumSize;
+	buf = stream->alloc(totalSize);
+	ptr = buf;
+	int tmp = OP_rcCreateBuffer;memcpy(ptr, &tmp, 4); ptr += 4;
+	memcpy(ptr, &totalSize, 4);  ptr += 4;
+
+		memcpy(ptr, &size, 4); ptr += 4;
+
+	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
+	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
+
+
+	uint32_t retval;
+	stream->readback(&retval, 4);
+	if (useChecksum) checksumCalculator->addBuffer(&retval, 4);
+	if (useChecksum) {
+		unsigned char *checksumBufPtr = NULL;
+		unsigned char checksumBuf[ChecksumCalculator::kMaxChecksumSize];
+		if (checksumSize > 0) checksumBufPtr = &checksumBuf[0];
+		stream->readback(checksumBufPtr, checksumSize);
+		if (!checksumCalculator->validate(checksumBufPtr, checksumSize)) {
+			ALOGE("rcCreateBuffer: GL communication error, please report this issue to b.android.com.\n");
+			abort();
+		}
+	}
+	return retval;
+}
+
+void rcCloseBuffer_enc(void *self , uint32_t buffer)
+{
+	AEMU_SCOPED_TRACE("rcCloseBuffer encode");
+
+	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
+	IOStream *stream = ctx->m_stream;
+	ChecksumCalculator *checksumCalculator = ctx->m_checksumCalculator;
+	bool useChecksum = checksumCalculator->getVersion() > 0;
+
+	 unsigned char *ptr;
+	 unsigned char *buf;
+	 const size_t sizeWithoutChecksum = 8 + 4;
+	 const size_t checksumSize = checksumCalculator->checksumByteSize();
+	 const size_t totalSize = sizeWithoutChecksum + checksumSize;
+	buf = stream->alloc(totalSize);
+	ptr = buf;
+	int tmp = OP_rcCloseBuffer;memcpy(ptr, &tmp, 4); ptr += 4;
+	memcpy(ptr, &totalSize, 4);  ptr += 4;
+
+		memcpy(ptr, &buffer, 4); ptr += 4;
+
+	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
+	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
+
+}
+
+GLint rcSetColorBufferVulkanMode2_enc(void *self , uint32_t colorBuffer, uint32_t mode, uint32_t memoryProperty)
+{
+	AEMU_SCOPED_TRACE("rcSetColorBufferVulkanMode2 encode");
+
+	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
+	IOStream *stream = ctx->m_stream;
+	ChecksumCalculator *checksumCalculator = ctx->m_checksumCalculator;
+	bool useChecksum = checksumCalculator->getVersion() > 0;
+
+	 unsigned char *ptr;
+	 unsigned char *buf;
+	 const size_t sizeWithoutChecksum = 8 + 4 + 4 + 4;
+	 const size_t checksumSize = checksumCalculator->checksumByteSize();
+	 const size_t totalSize = sizeWithoutChecksum + checksumSize;
+	buf = stream->alloc(totalSize);
+	ptr = buf;
+	int tmp = OP_rcSetColorBufferVulkanMode2;memcpy(ptr, &tmp, 4); ptr += 4;
+	memcpy(ptr, &totalSize, 4);  ptr += 4;
+
+		memcpy(ptr, &colorBuffer, 4); ptr += 4;
+		memcpy(ptr, &mode, 4); ptr += 4;
+		memcpy(ptr, &memoryProperty, 4); ptr += 4;
+
+	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
+	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
+
+
+	GLint retval;
+	stream->readback(&retval, 4);
+	if (useChecksum) checksumCalculator->addBuffer(&retval, 4);
+	if (useChecksum) {
+		unsigned char *checksumBufPtr = NULL;
+		unsigned char checksumBuf[ChecksumCalculator::kMaxChecksumSize];
+		if (checksumSize > 0) checksumBufPtr = &checksumBuf[0];
+		stream->readback(checksumBufPtr, checksumSize);
+		if (!checksumCalculator->validate(checksumBufPtr, checksumSize)) {
+			ALOGE("rcSetColorBufferVulkanMode2: GL communication error, please report this issue to b.android.com.\n");
+			abort();
+		}
+	}
+	return retval;
+}
+
+int rcMapGpaToBufferHandle_enc(void *self , uint32_t bufferHandle, uint64_t gpa)
+{
+	AEMU_SCOPED_TRACE("rcMapGpaToBufferHandle encode");
+
+	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
+	IOStream *stream = ctx->m_stream;
+	ChecksumCalculator *checksumCalculator = ctx->m_checksumCalculator;
+	bool useChecksum = checksumCalculator->getVersion() > 0;
+
+	 unsigned char *ptr;
+	 unsigned char *buf;
+	 const size_t sizeWithoutChecksum = 8 + 4 + 8;
+	 const size_t checksumSize = checksumCalculator->checksumByteSize();
+	 const size_t totalSize = sizeWithoutChecksum + checksumSize;
+	buf = stream->alloc(totalSize);
+	ptr = buf;
+	int tmp = OP_rcMapGpaToBufferHandle;memcpy(ptr, &tmp, 4); ptr += 4;
+	memcpy(ptr, &totalSize, 4);  ptr += 4;
+
+		memcpy(ptr, &bufferHandle, 4); ptr += 4;
+		memcpy(ptr, &gpa, 8); ptr += 8;
+
+	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
+	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
+
+
+	int retval;
+	stream->readback(&retval, 4);
+	if (useChecksum) checksumCalculator->addBuffer(&retval, 4);
+	if (useChecksum) {
+		unsigned char *checksumBufPtr = NULL;
+		unsigned char checksumBuf[ChecksumCalculator::kMaxChecksumSize];
+		if (checksumSize > 0) checksumBufPtr = &checksumBuf[0];
+		stream->readback(checksumBufPtr, checksumSize);
+		if (!checksumCalculator->validate(checksumBufPtr, checksumSize)) {
+			ALOGE("rcMapGpaToBufferHandle: GL communication error, please report this issue to b.android.com.\n");
+			abort();
+		}
+	}
+	return retval;
+}
+
+uint32_t rcCreateBuffer2_enc(void *self , uint64_t size, uint32_t memoryProperty)
+{
+	AEMU_SCOPED_TRACE("rcCreateBuffer2 encode");
+
+	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
+	IOStream *stream = ctx->m_stream;
+	ChecksumCalculator *checksumCalculator = ctx->m_checksumCalculator;
+	bool useChecksum = checksumCalculator->getVersion() > 0;
+
+	 unsigned char *ptr;
+	 unsigned char *buf;
+	 const size_t sizeWithoutChecksum = 8 + 8 + 4;
+	 const size_t checksumSize = checksumCalculator->checksumByteSize();
+	 const size_t totalSize = sizeWithoutChecksum + checksumSize;
+	buf = stream->alloc(totalSize);
+	ptr = buf;
+	int tmp = OP_rcCreateBuffer2;memcpy(ptr, &tmp, 4); ptr += 4;
+	memcpy(ptr, &totalSize, 4);  ptr += 4;
+
+		memcpy(ptr, &size, 8); ptr += 8;
+		memcpy(ptr, &memoryProperty, 4); ptr += 4;
+
+	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
+	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
+
+
+	uint32_t retval;
+	stream->readback(&retval, 4);
+	if (useChecksum) checksumCalculator->addBuffer(&retval, 4);
+	if (useChecksum) {
+		unsigned char *checksumBufPtr = NULL;
+		unsigned char checksumBuf[ChecksumCalculator::kMaxChecksumSize];
+		if (checksumSize > 0) checksumBufPtr = &checksumBuf[0];
+		stream->readback(checksumBufPtr, checksumSize);
+		if (!checksumCalculator->validate(checksumBufPtr, checksumSize)) {
+			ALOGE("rcCreateBuffer2: GL communication error, please report this issue to b.android.com.\n");
+			abort();
+		}
+	}
+	return retval;
+}
+
+int rcMapGpaToBufferHandle2_enc(void *self , uint32_t bufferHandle, uint64_t gpa, uint64_t size)
+{
+	AEMU_SCOPED_TRACE("rcMapGpaToBufferHandle2 encode");
+
+	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
+	IOStream *stream = ctx->m_stream;
+	ChecksumCalculator *checksumCalculator = ctx->m_checksumCalculator;
+	bool useChecksum = checksumCalculator->getVersion() > 0;
+
+	 unsigned char *ptr;
+	 unsigned char *buf;
+	 const size_t sizeWithoutChecksum = 8 + 4 + 8 + 8;
+	 const size_t checksumSize = checksumCalculator->checksumByteSize();
+	 const size_t totalSize = sizeWithoutChecksum + checksumSize;
+	buf = stream->alloc(totalSize);
+	ptr = buf;
+	int tmp = OP_rcMapGpaToBufferHandle2;memcpy(ptr, &tmp, 4); ptr += 4;
+	memcpy(ptr, &totalSize, 4);  ptr += 4;
+
+		memcpy(ptr, &bufferHandle, 4); ptr += 4;
+		memcpy(ptr, &gpa, 8); ptr += 8;
+		memcpy(ptr, &size, 8); ptr += 8;
+
+	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
+	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
+
+
+	int retval;
+	stream->readback(&retval, 4);
+	if (useChecksum) checksumCalculator->addBuffer(&retval, 4);
+	if (useChecksum) {
+		unsigned char *checksumBufPtr = NULL;
+		unsigned char checksumBuf[ChecksumCalculator::kMaxChecksumSize];
+		if (checksumSize > 0) checksumBufPtr = &checksumBuf[0];
+		stream->readback(checksumBufPtr, checksumSize);
+		if (!checksumCalculator->validate(checksumBufPtr, checksumSize)) {
+			ALOGE("rcMapGpaToBufferHandle2: GL communication error, please report this issue to b.android.com.\n");
+			abort();
+		}
+	}
+	return retval;
+}
+
+void rcFlushWindowColorBufferAsyncWithFrameNumber_enc(void *self , uint32_t windowSurface, uint32_t frameNumber)
+{
+	AEMU_SCOPED_TRACE("rcFlushWindowColorBufferAsyncWithFrameNumber encode");
+
+	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
+	IOStream *stream = ctx->m_stream;
+	ChecksumCalculator *checksumCalculator = ctx->m_checksumCalculator;
+	bool useChecksum = checksumCalculator->getVersion() > 0;
+
+	 unsigned char *ptr;
+	 unsigned char *buf;
+	 const size_t sizeWithoutChecksum = 8 + 4 + 4;
+	 const size_t checksumSize = checksumCalculator->checksumByteSize();
+	 const size_t totalSize = sizeWithoutChecksum + checksumSize;
+	buf = stream->alloc(totalSize);
+	ptr = buf;
+	int tmp = OP_rcFlushWindowColorBufferAsyncWithFrameNumber;memcpy(ptr, &tmp, 4); ptr += 4;
+	memcpy(ptr, &totalSize, 4);  ptr += 4;
+
+		memcpy(ptr, &windowSurface, 4); ptr += 4;
+		memcpy(ptr, &frameNumber, 4); ptr += 4;
+
+	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
+	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
+
+}
+
+void rcSetTracingForPuid_enc(void *self , uint64_t puid, uint32_t enable, uint64_t guestTime)
+{
+	AEMU_SCOPED_TRACE("rcSetTracingForPuid encode");
+
+	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
+	IOStream *stream = ctx->m_stream;
+	ChecksumCalculator *checksumCalculator = ctx->m_checksumCalculator;
+	bool useChecksum = checksumCalculator->getVersion() > 0;
+
+	 unsigned char *ptr;
+	 unsigned char *buf;
+	 const size_t sizeWithoutChecksum = 8 + 8 + 4 + 8;
+	 const size_t checksumSize = checksumCalculator->checksumByteSize();
+	 const size_t totalSize = sizeWithoutChecksum + checksumSize;
+	buf = stream->alloc(totalSize);
+	ptr = buf;
+	int tmp = OP_rcSetTracingForPuid;memcpy(ptr, &tmp, 4); ptr += 4;
+	memcpy(ptr, &totalSize, 4);  ptr += 4;
+
+		memcpy(ptr, &puid, 8); ptr += 8;
+		memcpy(ptr, &enable, 4); ptr += 4;
+		memcpy(ptr, &guestTime, 8); ptr += 8;
+
+	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
+	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
+
+}
+
+void rcMakeCurrentAsync_enc(void *self , uint32_t context, uint32_t drawSurf, uint32_t readSurf)
+{
+	AEMU_SCOPED_TRACE("rcMakeCurrentAsync encode");
+
+	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
+	IOStream *stream = ctx->m_stream;
+	ChecksumCalculator *checksumCalculator = ctx->m_checksumCalculator;
+	bool useChecksum = checksumCalculator->getVersion() > 0;
+
+	 unsigned char *ptr;
+	 unsigned char *buf;
+	 const size_t sizeWithoutChecksum = 8 + 4 + 4 + 4;
+	 const size_t checksumSize = checksumCalculator->checksumByteSize();
+	 const size_t totalSize = sizeWithoutChecksum + checksumSize;
+	buf = stream->alloc(totalSize);
+	ptr = buf;
+	int tmp = OP_rcMakeCurrentAsync;memcpy(ptr, &tmp, 4); ptr += 4;
+	memcpy(ptr, &totalSize, 4);  ptr += 4;
+
+		memcpy(ptr, &context, 4); ptr += 4;
+		memcpy(ptr, &drawSurf, 4); ptr += 4;
+		memcpy(ptr, &readSurf, 4); ptr += 4;
+
+	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
+	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
+
+	stream->flush();
+}
+
+void rcComposeAsync_enc(void *self , uint32_t bufferSize, void* buffer)
+{
+	AEMU_SCOPED_TRACE("rcComposeAsync encode");
+
+	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
+	IOStream *stream = ctx->m_stream;
+	ChecksumCalculator *checksumCalculator = ctx->m_checksumCalculator;
+	bool useChecksum = checksumCalculator->getVersion() > 0;
+
+	const unsigned int __size_buffer =  bufferSize;
+	 unsigned char *ptr;
+	 unsigned char *buf;
+	 const size_t sizeWithoutChecksum = 8 + 4 + __size_buffer + 1*4;
+	 const size_t checksumSize = checksumCalculator->checksumByteSize();
+	 const size_t totalSize = sizeWithoutChecksum + checksumSize;
+	buf = stream->alloc(totalSize);
+	ptr = buf;
+	int tmp = OP_rcComposeAsync;memcpy(ptr, &tmp, 4); ptr += 4;
+	memcpy(ptr, &totalSize, 4);  ptr += 4;
+
+		memcpy(ptr, &bufferSize, 4); ptr += 4;
+	memcpy(ptr, &__size_buffer, 4); ptr += 4;
+	memcpy(ptr, buffer, __size_buffer);ptr += __size_buffer;
+
+	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
+	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
+
+	stream->flush();
+}
+
+void rcDestroySyncKHRAsync_enc(void *self , uint64_t sync)
+{
+	AEMU_SCOPED_TRACE("rcDestroySyncKHRAsync encode");
+
+	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
+	IOStream *stream = ctx->m_stream;
+	ChecksumCalculator *checksumCalculator = ctx->m_checksumCalculator;
+	bool useChecksum = checksumCalculator->getVersion() > 0;
+
+	 unsigned char *ptr;
+	 unsigned char *buf;
+	 const size_t sizeWithoutChecksum = 8 + 8;
+	 const size_t checksumSize = checksumCalculator->checksumByteSize();
+	 const size_t totalSize = sizeWithoutChecksum + checksumSize;
+	buf = stream->alloc(totalSize);
+	ptr = buf;
+	int tmp = OP_rcDestroySyncKHRAsync;memcpy(ptr, &tmp, 4); ptr += 4;
+	memcpy(ptr, &totalSize, 4);  ptr += 4;
+
+		memcpy(ptr, &sync, 8); ptr += 8;
+
+	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
+	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
+
+	stream->flush();
+}
+
+GLint rcComposeWithoutPost_enc(void *self , uint32_t bufferSize, void* buffer)
+{
+	AEMU_SCOPED_TRACE("rcComposeWithoutPost encode");
+
+	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
+	IOStream *stream = ctx->m_stream;
+	ChecksumCalculator *checksumCalculator = ctx->m_checksumCalculator;
+	bool useChecksum = checksumCalculator->getVersion() > 0;
+
+	const unsigned int __size_buffer =  bufferSize;
+	 unsigned char *ptr;
+	 unsigned char *buf;
+	 const size_t sizeWithoutChecksum = 8 + 4 + __size_buffer + 1*4;
+	 const size_t checksumSize = checksumCalculator->checksumByteSize();
+	 const size_t totalSize = sizeWithoutChecksum + checksumSize;
+	buf = stream->alloc(totalSize);
+	ptr = buf;
+	int tmp = OP_rcComposeWithoutPost;memcpy(ptr, &tmp, 4); ptr += 4;
+	memcpy(ptr, &totalSize, 4);  ptr += 4;
+
+		memcpy(ptr, &bufferSize, 4); ptr += 4;
+	memcpy(ptr, &__size_buffer, 4); ptr += 4;
+	memcpy(ptr, buffer, __size_buffer);ptr += __size_buffer;
+
+	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
+	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
+
+
+	GLint retval;
+	stream->readback(&retval, 4);
+	if (useChecksum) checksumCalculator->addBuffer(&retval, 4);
+	if (useChecksum) {
+		unsigned char *checksumBufPtr = NULL;
+		unsigned char checksumBuf[ChecksumCalculator::kMaxChecksumSize];
+		if (checksumSize > 0) checksumBufPtr = &checksumBuf[0];
+		stream->readback(checksumBufPtr, checksumSize);
+		if (!checksumCalculator->validate(checksumBufPtr, checksumSize)) {
+			ALOGE("rcComposeWithoutPost: GL communication error, please report this issue to b.android.com.\n");
+			abort();
+		}
+	}
+	return retval;
+}
+
+void rcComposeAsyncWithoutPost_enc(void *self , uint32_t bufferSize, void* buffer)
+{
+	AEMU_SCOPED_TRACE("rcComposeAsyncWithoutPost encode");
+
+	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
+	IOStream *stream = ctx->m_stream;
+	ChecksumCalculator *checksumCalculator = ctx->m_checksumCalculator;
+	bool useChecksum = checksumCalculator->getVersion() > 0;
+
+	const unsigned int __size_buffer =  bufferSize;
+	 unsigned char *ptr;
+	 unsigned char *buf;
+	 const size_t sizeWithoutChecksum = 8 + 4 + __size_buffer + 1*4;
+	 const size_t checksumSize = checksumCalculator->checksumByteSize();
+	 const size_t totalSize = sizeWithoutChecksum + checksumSize;
+	buf = stream->alloc(totalSize);
+	ptr = buf;
+	int tmp = OP_rcComposeAsyncWithoutPost;memcpy(ptr, &tmp, 4); ptr += 4;
+	memcpy(ptr, &totalSize, 4);  ptr += 4;
+
+		memcpy(ptr, &bufferSize, 4); ptr += 4;
+	memcpy(ptr, &__size_buffer, 4); ptr += 4;
+	memcpy(ptr, buffer, __size_buffer);ptr += __size_buffer;
+
+	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
+	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
+
+	stream->flush();
+}
+
+int rcCreateDisplayById_enc(void *self , uint32_t displayId)
+{
+	AEMU_SCOPED_TRACE("rcCreateDisplayById encode");
+
+	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
+	IOStream *stream = ctx->m_stream;
+	ChecksumCalculator *checksumCalculator = ctx->m_checksumCalculator;
+	bool useChecksum = checksumCalculator->getVersion() > 0;
+
+	 unsigned char *ptr;
+	 unsigned char *buf;
+	 const size_t sizeWithoutChecksum = 8 + 4;
+	 const size_t checksumSize = checksumCalculator->checksumByteSize();
+	 const size_t totalSize = sizeWithoutChecksum + checksumSize;
+	buf = stream->alloc(totalSize);
+	ptr = buf;
+	int tmp = OP_rcCreateDisplayById;memcpy(ptr, &tmp, 4); ptr += 4;
+	memcpy(ptr, &totalSize, 4);  ptr += 4;
+
+		memcpy(ptr, &displayId, 4); ptr += 4;
+
+	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
+	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
+
+
+	int retval;
+	stream->readback(&retval, 4);
+	if (useChecksum) checksumCalculator->addBuffer(&retval, 4);
+	if (useChecksum) {
+		unsigned char *checksumBufPtr = NULL;
+		unsigned char checksumBuf[ChecksumCalculator::kMaxChecksumSize];
+		if (checksumSize > 0) checksumBufPtr = &checksumBuf[0];
+		stream->readback(checksumBufPtr, checksumSize);
+		if (!checksumCalculator->validate(checksumBufPtr, checksumSize)) {
+			ALOGE("rcCreateDisplayById: GL communication error, please report this issue to b.android.com.\n");
+			abort();
+		}
+	}
+	return retval;
+}
+
+int rcSetDisplayPoseDpi_enc(void *self , uint32_t displayId, GLint x, GLint y, uint32_t w, uint32_t h, uint32_t dpi)
+{
+	AEMU_SCOPED_TRACE("rcSetDisplayPoseDpi encode");
+
+	renderControl_encoder_context_t *ctx = (renderControl_encoder_context_t *)self;
+	IOStream *stream = ctx->m_stream;
+	ChecksumCalculator *checksumCalculator = ctx->m_checksumCalculator;
+	bool useChecksum = checksumCalculator->getVersion() > 0;
+
+	 unsigned char *ptr;
+	 unsigned char *buf;
+	 const size_t sizeWithoutChecksum = 8 + 4 + 4 + 4 + 4 + 4 + 4;
+	 const size_t checksumSize = checksumCalculator->checksumByteSize();
+	 const size_t totalSize = sizeWithoutChecksum + checksumSize;
+	buf = stream->alloc(totalSize);
+	ptr = buf;
+	int tmp = OP_rcSetDisplayPoseDpi;memcpy(ptr, &tmp, 4); ptr += 4;
+	memcpy(ptr, &totalSize, 4);  ptr += 4;
+
+		memcpy(ptr, &displayId, 4); ptr += 4;
+		memcpy(ptr, &x, 4); ptr += 4;
+		memcpy(ptr, &y, 4); ptr += 4;
+		memcpy(ptr, &w, 4); ptr += 4;
+		memcpy(ptr, &h, 4); ptr += 4;
+		memcpy(ptr, &dpi, 4); ptr += 4;
+
+	if (useChecksum) checksumCalculator->addBuffer(buf, ptr-buf);
+	if (useChecksum) checksumCalculator->writeChecksum(ptr, checksumSize); ptr += checksumSize;
+
+
+	int retval;
+	stream->readback(&retval, 4);
+	if (useChecksum) checksumCalculator->addBuffer(&retval, 4);
+	if (useChecksum) {
+		unsigned char *checksumBufPtr = NULL;
+		unsigned char checksumBuf[ChecksumCalculator::kMaxChecksumSize];
+		if (checksumSize > 0) checksumBufPtr = &checksumBuf[0];
+		stream->readback(checksumBufPtr, checksumSize);
+		if (!checksumCalculator->validate(checksumBufPtr, checksumSize)) {
+			ALOGE("rcSetDisplayPoseDpi: GL communication error, please report this issue to b.android.com.\n");
+			abort();
+		}
+	}
+	return retval;
+}
+
 }  // namespace
 
 renderControl_encoder_context_t::renderControl_encoder_context_t(IOStream *stream, ChecksumCalculator *checksumCalculator)
@@ -1928,5 +2545,21 @@
 	this->rcSetColorBufferVulkanMode = &rcSetColorBufferVulkanMode_enc;
 	this->rcReadColorBufferYUV = &rcReadColorBufferYUV_enc;
 	this->rcIsSyncSignaled = &rcIsSyncSignaled_enc;
+	this->rcCreateColorBufferWithHandle = &rcCreateColorBufferWithHandle_enc;
+	this->rcCreateBuffer = &rcCreateBuffer_enc;
+	this->rcCloseBuffer = &rcCloseBuffer_enc;
+	this->rcSetColorBufferVulkanMode2 = &rcSetColorBufferVulkanMode2_enc;
+	this->rcMapGpaToBufferHandle = &rcMapGpaToBufferHandle_enc;
+	this->rcCreateBuffer2 = &rcCreateBuffer2_enc;
+	this->rcMapGpaToBufferHandle2 = &rcMapGpaToBufferHandle2_enc;
+	this->rcFlushWindowColorBufferAsyncWithFrameNumber = &rcFlushWindowColorBufferAsyncWithFrameNumber_enc;
+	this->rcSetTracingForPuid = &rcSetTracingForPuid_enc;
+	this->rcMakeCurrentAsync = &rcMakeCurrentAsync_enc;
+	this->rcComposeAsync = &rcComposeAsync_enc;
+	this->rcDestroySyncKHRAsync = &rcDestroySyncKHRAsync_enc;
+	this->rcComposeWithoutPost = &rcComposeWithoutPost_enc;
+	this->rcComposeAsyncWithoutPost = &rcComposeAsyncWithoutPost_enc;
+	this->rcCreateDisplayById = &rcCreateDisplayById_enc;
+	this->rcSetDisplayPoseDpi = &rcSetDisplayPoseDpi_enc;
 }
 
diff --git a/system/renderControl_enc/renderControl_entry.cpp b/system/renderControl_enc/renderControl_entry.cpp
index 0395d8b..c58ddf5 100644
--- a/system/renderControl_enc/renderControl_entry.cpp
+++ b/system/renderControl_enc/renderControl_entry.cpp
@@ -53,6 +53,22 @@
 	GLint rcSetColorBufferVulkanMode(uint32_t colorBuffer, uint32_t mode);
 	void rcReadColorBufferYUV(uint32_t colorbuffer, GLint x, GLint y, GLint width, GLint height, void* pixels, uint32_t pixels_size);
 	int rcIsSyncSignaled(uint64_t sync);
+	void rcCreateColorBufferWithHandle(uint32_t width, uint32_t height, GLenum internalFormat, uint32_t handle);
+	uint32_t rcCreateBuffer(uint32_t size);
+	void rcCloseBuffer(uint32_t buffer);
+	GLint rcSetColorBufferVulkanMode2(uint32_t colorBuffer, uint32_t mode, uint32_t memoryProperty);
+	int rcMapGpaToBufferHandle(uint32_t bufferHandle, uint64_t gpa);
+	uint32_t rcCreateBuffer2(uint64_t size, uint32_t memoryProperty);
+	int rcMapGpaToBufferHandle2(uint32_t bufferHandle, uint64_t gpa, uint64_t size);
+	void rcFlushWindowColorBufferAsyncWithFrameNumber(uint32_t windowSurface, uint32_t frameNumber);
+	void rcSetTracingForPuid(uint64_t puid, uint32_t enable, uint64_t guestTime);
+	void rcMakeCurrentAsync(uint32_t context, uint32_t drawSurf, uint32_t readSurf);
+	void rcComposeAsync(uint32_t bufferSize, void* buffer);
+	void rcDestroySyncKHRAsync(uint64_t sync);
+	GLint rcComposeWithoutPost(uint32_t bufferSize, void* buffer);
+	void rcComposeAsyncWithoutPost(uint32_t bufferSize, void* buffer);
+	int rcCreateDisplayById(uint32_t displayId);
+	int rcSetDisplayPoseDpi(uint32_t displayId, GLint x, GLint y, uint32_t w, uint32_t h, uint32_t dpi);
 };
 
 #ifndef GET_CONTEXT
@@ -349,3 +365,99 @@
 	return ctx->rcIsSyncSignaled(ctx, sync);
 }
 
+void rcCreateColorBufferWithHandle(uint32_t width, uint32_t height, GLenum internalFormat, uint32_t handle)
+{
+	GET_CONTEXT;
+	ctx->rcCreateColorBufferWithHandle(ctx, width, height, internalFormat, handle);
+}
+
+uint32_t rcCreateBuffer(uint32_t size)
+{
+	GET_CONTEXT;
+	return ctx->rcCreateBuffer(ctx, size);
+}
+
+void rcCloseBuffer(uint32_t buffer)
+{
+	GET_CONTEXT;
+	ctx->rcCloseBuffer(ctx, buffer);
+}
+
+GLint rcSetColorBufferVulkanMode2(uint32_t colorBuffer, uint32_t mode, uint32_t memoryProperty)
+{
+	GET_CONTEXT;
+	return ctx->rcSetColorBufferVulkanMode2(ctx, colorBuffer, mode, memoryProperty);
+}
+
+int rcMapGpaToBufferHandle(uint32_t bufferHandle, uint64_t gpa)
+{
+	GET_CONTEXT;
+	return ctx->rcMapGpaToBufferHandle(ctx, bufferHandle, gpa);
+}
+
+uint32_t rcCreateBuffer2(uint64_t size, uint32_t memoryProperty)
+{
+	GET_CONTEXT;
+	return ctx->rcCreateBuffer2(ctx, size, memoryProperty);
+}
+
+int rcMapGpaToBufferHandle2(uint32_t bufferHandle, uint64_t gpa, uint64_t size)
+{
+	GET_CONTEXT;
+	return ctx->rcMapGpaToBufferHandle2(ctx, bufferHandle, gpa, size);
+}
+
+void rcFlushWindowColorBufferAsyncWithFrameNumber(uint32_t windowSurface, uint32_t frameNumber)
+{
+	GET_CONTEXT;
+	ctx->rcFlushWindowColorBufferAsyncWithFrameNumber(ctx, windowSurface, frameNumber);
+}
+
+void rcSetTracingForPuid(uint64_t puid, uint32_t enable, uint64_t guestTime)
+{
+	GET_CONTEXT;
+	ctx->rcSetTracingForPuid(ctx, puid, enable, guestTime);
+}
+
+void rcMakeCurrentAsync(uint32_t context, uint32_t drawSurf, uint32_t readSurf)
+{
+	GET_CONTEXT;
+	ctx->rcMakeCurrentAsync(ctx, context, drawSurf, readSurf);
+}
+
+void rcComposeAsync(uint32_t bufferSize, void* buffer)
+{
+	GET_CONTEXT;
+	ctx->rcComposeAsync(ctx, bufferSize, buffer);
+}
+
+void rcDestroySyncKHRAsync(uint64_t sync)
+{
+	GET_CONTEXT;
+	ctx->rcDestroySyncKHRAsync(ctx, sync);
+}
+
+GLint rcComposeWithoutPost(uint32_t bufferSize, void* buffer)
+{
+	GET_CONTEXT;
+	return ctx->rcComposeWithoutPost(ctx, bufferSize, buffer);
+}
+
+void rcComposeAsyncWithoutPost(uint32_t bufferSize, void* buffer)
+{
+	GET_CONTEXT;
+	ctx->rcComposeAsyncWithoutPost(ctx, bufferSize, buffer);
+}
+
+int rcCreateDisplayById(uint32_t displayId)
+{
+	GET_CONTEXT;
+	return ctx->rcCreateDisplayById(ctx, displayId);
+}
+
+int rcSetDisplayPoseDpi(uint32_t displayId, GLint x, GLint y, uint32_t w, uint32_t h, uint32_t dpi)
+{
+	GET_CONTEXT;
+	return ctx->rcSetDisplayPoseDpi(ctx, displayId, x, y, w, h, dpi);
+}
+
diff --git a/system/renderControl_enc/renderControl_ftable.h b/system/renderControl_enc/renderControl_ftable.h
index 662ade8..2adf3bf 100644
--- a/system/renderControl_enc/renderControl_ftable.h
+++ b/system/renderControl_enc/renderControl_ftable.h
@@ -56,6 +56,22 @@
 	{"rcSetColorBufferVulkanMode", (void*)rcSetColorBufferVulkanMode},
 	{"rcReadColorBufferYUV", (void*)rcReadColorBufferYUV},
 	{"rcIsSyncSignaled", (void*)rcIsSyncSignaled},
+	{"rcCreateColorBufferWithHandle", (void*)rcCreateColorBufferWithHandle},
+	{"rcCreateBuffer", (void*)rcCreateBuffer},
+	{"rcCloseBuffer", (void*)rcCloseBuffer},
+	{"rcSetColorBufferVulkanMode2", (void*)rcSetColorBufferVulkanMode2},
+	{"rcMapGpaToBufferHandle", (void*)rcMapGpaToBufferHandle},
+	{"rcCreateBuffer2", (void*)rcCreateBuffer2},
+	{"rcMapGpaToBufferHandle2", (void*)rcMapGpaToBufferHandle2},
+	{"rcFlushWindowColorBufferAsyncWithFrameNumber", (void*)rcFlushWindowColorBufferAsyncWithFrameNumber},
+	{"rcSetTracingForPuid", (void*)rcSetTracingForPuid},
+	{"rcMakeCurrentAsync", (void*)rcMakeCurrentAsync},
+	{"rcComposeAsync", (void*)rcComposeAsync},
+	{"rcDestroySyncKHRAsync", (void*)rcDestroySyncKHRAsync},
+	{"rcComposeWithoutPost", (void*)rcComposeWithoutPost},
+	{"rcComposeAsyncWithoutPost", (void*)rcComposeAsyncWithoutPost},
+	{"rcCreateDisplayById", (void*)rcCreateDisplayById},
+	{"rcSetDisplayPoseDpi", (void*)rcSetDisplayPoseDpi},
 };
 static const int renderControl_num_funcs = sizeof(renderControl_funcs_by_name) / sizeof(struct _renderControl_funcs_by_name);
 
diff --git a/system/renderControl_enc/renderControl_opcodes.h b/system/renderControl_enc/renderControl_opcodes.h
index 338a38e..732d3e0 100644
--- a/system/renderControl_enc/renderControl_opcodes.h
+++ b/system/renderControl_enc/renderControl_opcodes.h
@@ -51,7 +51,23 @@
 #define OP_rcSetColorBufferVulkanMode 					10045
 #define OP_rcReadColorBufferYUV 					10046
 #define OP_rcIsSyncSignaled 					10047
-#define OP_last 					10048
+#define OP_rcCreateColorBufferWithHandle 					10048
+#define OP_rcCreateBuffer 					10049
+#define OP_rcCloseBuffer 					10050
+#define OP_rcSetColorBufferVulkanMode2 					10051
+#define OP_rcMapGpaToBufferHandle 					10052
+#define OP_rcCreateBuffer2 					10053
+#define OP_rcMapGpaToBufferHandle2 					10054
+#define OP_rcFlushWindowColorBufferAsyncWithFrameNumber 					10055
+#define OP_rcSetTracingForPuid 					10056
+#define OP_rcMakeCurrentAsync 					10057
+#define OP_rcComposeAsync 					10058
+#define OP_rcDestroySyncKHRAsync 					10059
+#define OP_rcComposeWithoutPost 					10060
+#define OP_rcComposeAsyncWithoutPost 					10061
+#define OP_rcCreateDisplayById 					10062
+#define OP_rcSetDisplayPoseDpi 					10063
+#define OP_last 					10064
 
 
 #endif
diff --git a/system/vulkan/Android.mk b/system/vulkan/Android.mk
index 2bec494..b323d35 100644
--- a/system/vulkan/Android.mk
+++ b/system/vulkan/Android.mk
@@ -36,7 +36,6 @@
     -Wno-unused-function
 
 LOCAL_SRC_FILES := \
-    func_table.cpp \
     goldfish_vulkan.cpp \
 
 $(call emugl-end-module)
diff --git a/system/vulkan/CMakeLists.txt b/system/vulkan/CMakeLists.txt
index 65bc00a..fa96e06 100644
--- a/system/vulkan/CMakeLists.txt
+++ b/system/vulkan/CMakeLists.txt
@@ -1,10 +1,10 @@
 # This is an autogenerated file! Do not edit!
 # instead run make from .../device/generic/goldfish-opengl
 # which will re-generate this file.
-android_validate_sha256("${GOLDFISH_DEVICE_ROOT}/system/vulkan/Android.mk" "5cb873c72cc859fac3800961059c1b203ed1abb400ee643178c18e04961d49e8")
-set(vulkan.ranchu_src func_table.cpp goldfish_vulkan.cpp)
-android_add_library(TARGET vulkan.ranchu SHARED LICENSE Apache-2.0 SRC func_table.cpp goldfish_vulkan.cpp)
-target_include_directories(vulkan.ranchu PRIVATE ${GOLDFISH_DEVICE_ROOT}/system/OpenglSystemCommon/bionic-include ${GOLDFISH_DEVICE_ROOT}/system/OpenglSystemCommon ${GOLDFISH_DEVICE_ROOT}/bionic/libc/private ${GOLDFISH_DEVICE_ROOT}/bionic/libc/platform ${GOLDFISH_DEVICE_ROOT}/system/vulkan_enc ${GOLDFISH_DEVICE_ROOT}/android-emu ${GOLDFISH_DEVICE_ROOT}/shared/gralloc_cb/include ${GOLDFISH_DEVICE_ROOT}/shared/GoldfishAddressSpace/include ${GOLDFISH_DEVICE_ROOT}/system/renderControl_enc ${GOLDFISH_DEVICE_ROOT}/system/GLESv2_enc ${GOLDFISH_DEVICE_ROOT}/system/GLESv1_enc ${GOLDFISH_DEVICE_ROOT}/shared/OpenglCodecCommon ${GOLDFISH_DEVICE_ROOT}/shared/qemupipe/include-types ${GOLDFISH_DEVICE_ROOT}/shared/qemupipe/include ${GOLDFISH_DEVICE_ROOT}/system/vulkan ${GOLDFISH_DEVICE_ROOT}/./host/include/libOpenglRender ${GOLDFISH_DEVICE_ROOT}/./system/include ${GOLDFISH_DEVICE_ROOT}/./../../../external/qemu/android/android-emugl/guest ${GOLDFISH_DEVICE_ROOT}/./../../../external/qemu/android/android-emugl/host/include)
-target_compile_definitions(vulkan.ranchu PRIVATE "-DWITH_GLES2" "-DPLATFORM_SDK_VERSION=29" "-DGOLDFISH_HIDL_GRALLOC" "-DEMULATOR_OPENGL_POST_O=1" "-DHOST_BUILD" "-DANDROID" "-DGL_GLEXT_PROTOTYPES" "-DPAGE_SIZE=4096" "-DGOLDFISH_VULKAN" "-DLOG_TAG=\"goldfish_vulkan\"" "-DVK_USE_PLATFORM_ANDROID_KHR" "-DVK_NO_PROTOTYPES")
+android_validate_sha256("${GOLDFISH_DEVICE_ROOT}/system/vulkan/Android.mk" "26b21ee8d8cf1c8ba5b47850310695979178e1117283253befc931dfed43a727")
+set(vulkan.ranchu_src goldfish_vulkan.cpp)
+android_add_library(TARGET vulkan.ranchu SHARED LICENSE Apache-2.0 SRC goldfish_vulkan.cpp)
+target_include_directories(vulkan.ranchu PRIVATE ${GOLDFISH_DEVICE_ROOT}/system/OpenglSystemCommon/bionic-include ${GOLDFISH_DEVICE_ROOT}/system/OpenglSystemCommon ${GOLDFISH_DEVICE_ROOT}/bionic/libc/private ${GOLDFISH_DEVICE_ROOT}/bionic/libc/platform ${GOLDFISH_DEVICE_ROOT}/system/vulkan_enc ${GOLDFISH_DEVICE_ROOT}/shared/gralloc_cb/include ${GOLDFISH_DEVICE_ROOT}/shared/GoldfishAddressSpace/include ${GOLDFISH_DEVICE_ROOT}/system/renderControl_enc ${GOLDFISH_DEVICE_ROOT}/system/GLESv2_enc ${GOLDFISH_DEVICE_ROOT}/system/GLESv1_enc ${GOLDFISH_DEVICE_ROOT}/shared/OpenglCodecCommon ${GOLDFISH_DEVICE_ROOT}/android-emu ${GOLDFISH_DEVICE_ROOT}/shared/qemupipe/include-types ${GOLDFISH_DEVICE_ROOT}/shared/qemupipe/include ${GOLDFISH_DEVICE_ROOT}/system/vulkan ${GOLDFISH_DEVICE_ROOT}/./host/include/libOpenglRender ${GOLDFISH_DEVICE_ROOT}/./system/include ${GOLDFISH_DEVICE_ROOT}/./../../../external/qemu/android/android-emugl/guest ${GOLDFISH_DEVICE_ROOT}/./../../../external/qemu/android/android-emugl/host/include)
+target_compile_definitions(vulkan.ranchu PRIVATE "-DWITH_GLES2" "-DPLATFORM_SDK_VERSION=29" "-DGOLDFISH_HIDL_GRALLOC" "-DEMULATOR_OPENGL_POST_O=1" "-DHOST_BUILD" "-DANDROID" "-DGL_GLEXT_PROTOTYPES" "-DPAGE_SIZE=4096" "-DGFXSTREAM" "-DLOG_TAG=\"goldfish_vulkan\"" "-DVK_USE_PLATFORM_ANDROID_KHR" "-DVK_NO_PROTOTYPES")
 target_compile_options(vulkan.ranchu PRIVATE "-fvisibility=default" "-Wno-unused-parameter" "-Wno-missing-field-initializers" "-fvisibility=hidden" "-fstrict-aliasing" "-Wno-unused-function")
-target_link_libraries(vulkan.ranchu PRIVATE OpenglSystemCommon android-emu-shared vulkan_enc gui androidemu cutils utils log _renderControl_enc GLESv2_enc GLESv1_enc OpenglCodecCommon_host PRIVATE gralloc_cb_host GoldfishAddressSpace_host qemupipe_host)
\ No newline at end of file
+target_link_libraries(vulkan.ranchu PRIVATE OpenglSystemCommon android-emu-shared vulkan_enc gui log _renderControl_enc GLESv2_enc GLESv1_enc OpenglCodecCommon_host cutils utils androidemu GoldfishProfiler PRIVATE gralloc_cb_host GoldfishAddressSpace_host qemupipe_host)
\ No newline at end of file
diff --git a/system/vulkan/func_table.cpp b/system/vulkan/func_table.cpp
deleted file mode 100644
index 721e7c5..0000000
--- a/system/vulkan/func_table.cpp
+++ /dev/null
@@ -1,9927 +0,0 @@
-// Copyright (C) 2018 The Android Open Source Project
-// Copyright (C) 2018 Google Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Autogenerated module func_table
-// (impl) generated by android/android-emugl/host/libs/libOpenglRender/vulkan-registry/xml/genvk.py -registry android/android-emugl/host/libs/libOpenglRender/vulkan-registry/xml/vk.xml cereal -o android/android-emugl/host/libs/libOpenglRender/vulkan/cereal
-// Please do not modify directly;
-// re-run android/scripts/generate-vulkan-sources.sh,
-// or directly from Python by defining:
-// VULKAN_REGISTRY_XML_DIR : Directory containing genvk.py and vk.xml
-// CEREAL_OUTPUT_DIR: Where to put the generated sources.
-// python3 $VULKAN_REGISTRY_XML_DIR/genvk.py -registry $VULKAN_REGISTRY_XML_DIR/vk.xml cereal -o $CEREAL_OUTPUT_DIR
-
-#include "func_table.h"
-
-
-#include "VkEncoder.h"
-#include "HostConnection.h"
-#include "ResourceTracker.h"
-
-#include "goldfish_vk_private_defs.h"
-
-#include <log/log.h>
-
-// Stuff we are not going to use but if included,
-// will cause compile errors. These are Android Vulkan
-// required extensions, but the approach will be to
-// implement them completely on the guest side.
-#undef VK_KHR_android_surface
-
-
-namespace goldfish_vk {
-
-static void sOnInvalidDynamicallyCheckedCall(const char* apiname, const char* neededFeature)
-{
-    ALOGE("invalid call to %s: %s not supported", apiname, neededFeature);
-    abort();
-}
-#ifdef VK_VERSION_1_0
-static VkResult entry_vkCreateInstance(
-    const VkInstanceCreateInfo* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkInstance* pInstance)
-{
-    AEMU_SCOPED_TRACE("vkCreateInstance");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateInstance_VkResult_return = (VkResult)0;
-    vkCreateInstance_VkResult_return = vkEnc->vkCreateInstance(pCreateInfo, pAllocator, pInstance);
-    return vkCreateInstance_VkResult_return;
-}
-static void entry_vkDestroyInstance(
-    VkInstance instance,
-    const VkAllocationCallbacks* pAllocator)
-{
-    AEMU_SCOPED_TRACE("vkDestroyInstance");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkDestroyInstance(instance, pAllocator);
-}
-static VkResult entry_vkEnumeratePhysicalDevices(
-    VkInstance instance,
-    uint32_t* pPhysicalDeviceCount,
-    VkPhysicalDevice* pPhysicalDevices)
-{
-    AEMU_SCOPED_TRACE("vkEnumeratePhysicalDevices");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkEnumeratePhysicalDevices_VkResult_return = (VkResult)0;
-    auto resources = ResourceTracker::get();
-    vkEnumeratePhysicalDevices_VkResult_return = resources->on_vkEnumeratePhysicalDevices(vkEnc, VK_SUCCESS, instance, pPhysicalDeviceCount, pPhysicalDevices);
-    return vkEnumeratePhysicalDevices_VkResult_return;
-}
-static void entry_vkGetPhysicalDeviceFeatures(
-    VkPhysicalDevice physicalDevice,
-    VkPhysicalDeviceFeatures* pFeatures)
-{
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceFeatures");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkGetPhysicalDeviceFeatures(physicalDevice, pFeatures);
-}
-static void entry_vkGetPhysicalDeviceFormatProperties(
-    VkPhysicalDevice physicalDevice,
-    VkFormat format,
-    VkFormatProperties* pFormatProperties)
-{
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceFormatProperties");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkGetPhysicalDeviceFormatProperties(physicalDevice, format, pFormatProperties);
-}
-static VkResult entry_vkGetPhysicalDeviceImageFormatProperties(
-    VkPhysicalDevice physicalDevice,
-    VkFormat format,
-    VkImageType type,
-    VkImageTiling tiling,
-    VkImageUsageFlags usage,
-    VkImageCreateFlags flags,
-    VkImageFormatProperties* pImageFormatProperties)
-{
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceImageFormatProperties");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetPhysicalDeviceImageFormatProperties_VkResult_return = (VkResult)0;
-    vkGetPhysicalDeviceImageFormatProperties_VkResult_return = vkEnc->vkGetPhysicalDeviceImageFormatProperties(physicalDevice, format, type, tiling, usage, flags, pImageFormatProperties);
-    return vkGetPhysicalDeviceImageFormatProperties_VkResult_return;
-}
-static void entry_vkGetPhysicalDeviceProperties(
-    VkPhysicalDevice physicalDevice,
-    VkPhysicalDeviceProperties* pProperties)
-{
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceProperties");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkGetPhysicalDeviceProperties(physicalDevice, pProperties);
-}
-static void entry_vkGetPhysicalDeviceQueueFamilyProperties(
-    VkPhysicalDevice physicalDevice,
-    uint32_t* pQueueFamilyPropertyCount,
-    VkQueueFamilyProperties* pQueueFamilyProperties)
-{
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceQueueFamilyProperties");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkGetPhysicalDeviceQueueFamilyProperties(physicalDevice, pQueueFamilyPropertyCount, pQueueFamilyProperties);
-}
-static void entry_vkGetPhysicalDeviceMemoryProperties(
-    VkPhysicalDevice physicalDevice,
-    VkPhysicalDeviceMemoryProperties* pMemoryProperties)
-{
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceMemoryProperties");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkGetPhysicalDeviceMemoryProperties(physicalDevice, pMemoryProperties);
-}
-static PFN_vkVoidFunction entry_vkGetInstanceProcAddr(
-    VkInstance instance,
-    const char* pName)
-{
-    AEMU_SCOPED_TRACE("vkGetInstanceProcAddr");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    PFN_vkVoidFunction vkGetInstanceProcAddr_PFN_vkVoidFunction_return = (PFN_vkVoidFunction)0;
-    vkGetInstanceProcAddr_PFN_vkVoidFunction_return = vkEnc->vkGetInstanceProcAddr(instance, pName);
-    return vkGetInstanceProcAddr_PFN_vkVoidFunction_return;
-}
-static PFN_vkVoidFunction entry_vkGetDeviceProcAddr(
-    VkDevice device,
-    const char* pName)
-{
-    AEMU_SCOPED_TRACE("vkGetDeviceProcAddr");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    PFN_vkVoidFunction vkGetDeviceProcAddr_PFN_vkVoidFunction_return = (PFN_vkVoidFunction)0;
-    vkGetDeviceProcAddr_PFN_vkVoidFunction_return = vkEnc->vkGetDeviceProcAddr(device, pName);
-    return vkGetDeviceProcAddr_PFN_vkVoidFunction_return;
-}
-static VkResult entry_vkCreateDevice(
-    VkPhysicalDevice physicalDevice,
-    const VkDeviceCreateInfo* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkDevice* pDevice)
-{
-    AEMU_SCOPED_TRACE("vkCreateDevice");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateDevice_VkResult_return = (VkResult)0;
-    vkCreateDevice_VkResult_return = vkEnc->vkCreateDevice(physicalDevice, pCreateInfo, pAllocator, pDevice);
-    return vkCreateDevice_VkResult_return;
-}
-static void entry_vkDestroyDevice(
-    VkDevice device,
-    const VkAllocationCallbacks* pAllocator)
-{
-    AEMU_SCOPED_TRACE("vkDestroyDevice");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkDestroyDevice(device, pAllocator);
-}
-static VkResult entry_vkEnumerateInstanceExtensionProperties(
-    const char* pLayerName,
-    uint32_t* pPropertyCount,
-    VkExtensionProperties* pProperties)
-{
-    AEMU_SCOPED_TRACE("vkEnumerateInstanceExtensionProperties");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkEnumerateInstanceExtensionProperties_VkResult_return = (VkResult)0;
-    auto resources = ResourceTracker::get();
-    vkEnumerateInstanceExtensionProperties_VkResult_return = resources->on_vkEnumerateInstanceExtensionProperties(vkEnc, VK_SUCCESS, pLayerName, pPropertyCount, pProperties);
-    return vkEnumerateInstanceExtensionProperties_VkResult_return;
-}
-static VkResult entry_vkEnumerateDeviceExtensionProperties(
-    VkPhysicalDevice physicalDevice,
-    const char* pLayerName,
-    uint32_t* pPropertyCount,
-    VkExtensionProperties* pProperties)
-{
-    AEMU_SCOPED_TRACE("vkEnumerateDeviceExtensionProperties");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkEnumerateDeviceExtensionProperties_VkResult_return = (VkResult)0;
-    auto resources = ResourceTracker::get();
-    vkEnumerateDeviceExtensionProperties_VkResult_return = resources->on_vkEnumerateDeviceExtensionProperties(vkEnc, VK_SUCCESS, physicalDevice, pLayerName, pPropertyCount, pProperties);
-    return vkEnumerateDeviceExtensionProperties_VkResult_return;
-}
-static VkResult entry_vkEnumerateInstanceLayerProperties(
-    uint32_t* pPropertyCount,
-    VkLayerProperties* pProperties)
-{
-    AEMU_SCOPED_TRACE("vkEnumerateInstanceLayerProperties");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkEnumerateInstanceLayerProperties_VkResult_return = (VkResult)0;
-    vkEnumerateInstanceLayerProperties_VkResult_return = vkEnc->vkEnumerateInstanceLayerProperties(pPropertyCount, pProperties);
-    return vkEnumerateInstanceLayerProperties_VkResult_return;
-}
-static VkResult entry_vkEnumerateDeviceLayerProperties(
-    VkPhysicalDevice physicalDevice,
-    uint32_t* pPropertyCount,
-    VkLayerProperties* pProperties)
-{
-    AEMU_SCOPED_TRACE("vkEnumerateDeviceLayerProperties");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkEnumerateDeviceLayerProperties_VkResult_return = (VkResult)0;
-    vkEnumerateDeviceLayerProperties_VkResult_return = vkEnc->vkEnumerateDeviceLayerProperties(physicalDevice, pPropertyCount, pProperties);
-    return vkEnumerateDeviceLayerProperties_VkResult_return;
-}
-static void entry_vkGetDeviceQueue(
-    VkDevice device,
-    uint32_t queueFamilyIndex,
-    uint32_t queueIndex,
-    VkQueue* pQueue)
-{
-    AEMU_SCOPED_TRACE("vkGetDeviceQueue");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkGetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
-}
-static VkResult entry_vkQueueSubmit(
-    VkQueue queue,
-    uint32_t submitCount,
-    const VkSubmitInfo* pSubmits,
-    VkFence fence)
-{
-    AEMU_SCOPED_TRACE("vkQueueSubmit");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkQueueSubmit_VkResult_return = (VkResult)0;
-    auto resources = ResourceTracker::get();
-    vkQueueSubmit_VkResult_return = resources->on_vkQueueSubmit(vkEnc, VK_SUCCESS, queue, submitCount, pSubmits, fence);
-    return vkQueueSubmit_VkResult_return;
-}
-static VkResult entry_vkQueueWaitIdle(
-    VkQueue queue)
-{
-    AEMU_SCOPED_TRACE("vkQueueWaitIdle");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkQueueWaitIdle_VkResult_return = (VkResult)0;
-    auto resources = ResourceTracker::get();
-    vkQueueWaitIdle_VkResult_return = resources->on_vkQueueWaitIdle(vkEnc, VK_SUCCESS, queue);
-    return vkQueueWaitIdle_VkResult_return;
-}
-static VkResult entry_vkDeviceWaitIdle(
-    VkDevice device)
-{
-    AEMU_SCOPED_TRACE("vkDeviceWaitIdle");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkDeviceWaitIdle_VkResult_return = (VkResult)0;
-    vkDeviceWaitIdle_VkResult_return = vkEnc->vkDeviceWaitIdle(device);
-    return vkDeviceWaitIdle_VkResult_return;
-}
-static VkResult entry_vkAllocateMemory(
-    VkDevice device,
-    const VkMemoryAllocateInfo* pAllocateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkDeviceMemory* pMemory)
-{
-    AEMU_SCOPED_TRACE("vkAllocateMemory");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkAllocateMemory_VkResult_return = (VkResult)0;
-    auto resources = ResourceTracker::get();
-    vkAllocateMemory_VkResult_return = resources->on_vkAllocateMemory(vkEnc, VK_SUCCESS, device, pAllocateInfo, pAllocator, pMemory);
-    return vkAllocateMemory_VkResult_return;
-}
-static void entry_vkFreeMemory(
-    VkDevice device,
-    VkDeviceMemory memory,
-    const VkAllocationCallbacks* pAllocator)
-{
-    AEMU_SCOPED_TRACE("vkFreeMemory");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    auto resources = ResourceTracker::get();
-    resources->on_vkFreeMemory(vkEnc, device, memory, pAllocator);
-}
-static VkResult entry_vkMapMemory(
-    VkDevice device,
-    VkDeviceMemory memory,
-    VkDeviceSize offset,
-    VkDeviceSize size,
-    VkMemoryMapFlags flags,
-    void** ppData)
-{
-    AEMU_SCOPED_TRACE("vkMapMemory");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkMapMemory_VkResult_return = (VkResult)0;
-    vkMapMemory_VkResult_return = vkEnc->vkMapMemory(device, memory, offset, size, flags, ppData);
-    return vkMapMemory_VkResult_return;
-}
-static void entry_vkUnmapMemory(
-    VkDevice device,
-    VkDeviceMemory memory)
-{
-    AEMU_SCOPED_TRACE("vkUnmapMemory");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkUnmapMemory(device, memory);
-}
-static VkResult entry_vkFlushMappedMemoryRanges(
-    VkDevice device,
-    uint32_t memoryRangeCount,
-    const VkMappedMemoryRange* pMemoryRanges)
-{
-    AEMU_SCOPED_TRACE("vkFlushMappedMemoryRanges");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkFlushMappedMemoryRanges_VkResult_return = (VkResult)0;
-    vkFlushMappedMemoryRanges_VkResult_return = vkEnc->vkFlushMappedMemoryRanges(device, memoryRangeCount, pMemoryRanges);
-    return vkFlushMappedMemoryRanges_VkResult_return;
-}
-static VkResult entry_vkInvalidateMappedMemoryRanges(
-    VkDevice device,
-    uint32_t memoryRangeCount,
-    const VkMappedMemoryRange* pMemoryRanges)
-{
-    AEMU_SCOPED_TRACE("vkInvalidateMappedMemoryRanges");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkInvalidateMappedMemoryRanges_VkResult_return = (VkResult)0;
-    vkInvalidateMappedMemoryRanges_VkResult_return = vkEnc->vkInvalidateMappedMemoryRanges(device, memoryRangeCount, pMemoryRanges);
-    return vkInvalidateMappedMemoryRanges_VkResult_return;
-}
-static void entry_vkGetDeviceMemoryCommitment(
-    VkDevice device,
-    VkDeviceMemory memory,
-    VkDeviceSize* pCommittedMemoryInBytes)
-{
-    AEMU_SCOPED_TRACE("vkGetDeviceMemoryCommitment");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkGetDeviceMemoryCommitment(device, memory, pCommittedMemoryInBytes);
-}
-static VkResult entry_vkBindBufferMemory(
-    VkDevice device,
-    VkBuffer buffer,
-    VkDeviceMemory memory,
-    VkDeviceSize memoryOffset)
-{
-    AEMU_SCOPED_TRACE("vkBindBufferMemory");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkBindBufferMemory_VkResult_return = (VkResult)0;
-    auto resources = ResourceTracker::get();
-    vkBindBufferMemory_VkResult_return = resources->on_vkBindBufferMemory(vkEnc, VK_SUCCESS, device, buffer, memory, memoryOffset);
-    return vkBindBufferMemory_VkResult_return;
-}
-static VkResult entry_vkBindImageMemory(
-    VkDevice device,
-    VkImage image,
-    VkDeviceMemory memory,
-    VkDeviceSize memoryOffset)
-{
-    AEMU_SCOPED_TRACE("vkBindImageMemory");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkBindImageMemory_VkResult_return = (VkResult)0;
-    auto resources = ResourceTracker::get();
-    vkBindImageMemory_VkResult_return = resources->on_vkBindImageMemory(vkEnc, VK_SUCCESS, device, image, memory, memoryOffset);
-    return vkBindImageMemory_VkResult_return;
-}
-static void entry_vkGetBufferMemoryRequirements(
-    VkDevice device,
-    VkBuffer buffer,
-    VkMemoryRequirements* pMemoryRequirements)
-{
-    AEMU_SCOPED_TRACE("vkGetBufferMemoryRequirements");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    auto resources = ResourceTracker::get();
-    resources->on_vkGetBufferMemoryRequirements(vkEnc, device, buffer, pMemoryRequirements);
-}
-static void entry_vkGetImageMemoryRequirements(
-    VkDevice device,
-    VkImage image,
-    VkMemoryRequirements* pMemoryRequirements)
-{
-    AEMU_SCOPED_TRACE("vkGetImageMemoryRequirements");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    auto resources = ResourceTracker::get();
-    resources->on_vkGetImageMemoryRequirements(vkEnc, device, image, pMemoryRequirements);
-}
-static void entry_vkGetImageSparseMemoryRequirements(
-    VkDevice device,
-    VkImage image,
-    uint32_t* pSparseMemoryRequirementCount,
-    VkSparseImageMemoryRequirements* pSparseMemoryRequirements)
-{
-    AEMU_SCOPED_TRACE("vkGetImageSparseMemoryRequirements");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkGetImageSparseMemoryRequirements(device, image, pSparseMemoryRequirementCount, pSparseMemoryRequirements);
-}
-static void entry_vkGetPhysicalDeviceSparseImageFormatProperties(
-    VkPhysicalDevice physicalDevice,
-    VkFormat format,
-    VkImageType type,
-    VkSampleCountFlagBits samples,
-    VkImageUsageFlags usage,
-    VkImageTiling tiling,
-    uint32_t* pPropertyCount,
-    VkSparseImageFormatProperties* pProperties)
-{
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceSparseImageFormatProperties");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkGetPhysicalDeviceSparseImageFormatProperties(physicalDevice, format, type, samples, usage, tiling, pPropertyCount, pProperties);
-}
-static VkResult entry_vkQueueBindSparse(
-    VkQueue queue,
-    uint32_t bindInfoCount,
-    const VkBindSparseInfo* pBindInfo,
-    VkFence fence)
-{
-    AEMU_SCOPED_TRACE("vkQueueBindSparse");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkQueueBindSparse_VkResult_return = (VkResult)0;
-    vkQueueBindSparse_VkResult_return = vkEnc->vkQueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
-    return vkQueueBindSparse_VkResult_return;
-}
-static VkResult entry_vkCreateFence(
-    VkDevice device,
-    const VkFenceCreateInfo* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkFence* pFence)
-{
-    AEMU_SCOPED_TRACE("vkCreateFence");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateFence_VkResult_return = (VkResult)0;
-    auto resources = ResourceTracker::get();
-    vkCreateFence_VkResult_return = resources->on_vkCreateFence(vkEnc, VK_SUCCESS, device, pCreateInfo, pAllocator, pFence);
-    return vkCreateFence_VkResult_return;
-}
-static void entry_vkDestroyFence(
-    VkDevice device,
-    VkFence fence,
-    const VkAllocationCallbacks* pAllocator)
-{
-    AEMU_SCOPED_TRACE("vkDestroyFence");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkDestroyFence(device, fence, pAllocator);
-}
-static VkResult entry_vkResetFences(
-    VkDevice device,
-    uint32_t fenceCount,
-    const VkFence* pFences)
-{
-    AEMU_SCOPED_TRACE("vkResetFences");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkResetFences_VkResult_return = (VkResult)0;
-    auto resources = ResourceTracker::get();
-    vkResetFences_VkResult_return = resources->on_vkResetFences(vkEnc, VK_SUCCESS, device, fenceCount, pFences);
-    return vkResetFences_VkResult_return;
-}
-static VkResult entry_vkGetFenceStatus(
-    VkDevice device,
-    VkFence fence)
-{
-    AEMU_SCOPED_TRACE("vkGetFenceStatus");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetFenceStatus_VkResult_return = (VkResult)0;
-    vkGetFenceStatus_VkResult_return = vkEnc->vkGetFenceStatus(device, fence);
-    return vkGetFenceStatus_VkResult_return;
-}
-static VkResult entry_vkWaitForFences(
-    VkDevice device,
-    uint32_t fenceCount,
-    const VkFence* pFences,
-    VkBool32 waitAll,
-    uint64_t timeout)
-{
-    AEMU_SCOPED_TRACE("vkWaitForFences");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkWaitForFences_VkResult_return = (VkResult)0;
-    auto resources = ResourceTracker::get();
-    vkWaitForFences_VkResult_return = resources->on_vkWaitForFences(vkEnc, VK_SUCCESS, device, fenceCount, pFences, waitAll, timeout);
-    return vkWaitForFences_VkResult_return;
-}
-static VkResult entry_vkCreateSemaphore(
-    VkDevice device,
-    const VkSemaphoreCreateInfo* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkSemaphore* pSemaphore)
-{
-    AEMU_SCOPED_TRACE("vkCreateSemaphore");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateSemaphore_VkResult_return = (VkResult)0;
-    auto resources = ResourceTracker::get();
-    vkCreateSemaphore_VkResult_return = resources->on_vkCreateSemaphore(vkEnc, VK_SUCCESS, device, pCreateInfo, pAllocator, pSemaphore);
-    return vkCreateSemaphore_VkResult_return;
-}
-static void entry_vkDestroySemaphore(
-    VkDevice device,
-    VkSemaphore semaphore,
-    const VkAllocationCallbacks* pAllocator)
-{
-    AEMU_SCOPED_TRACE("vkDestroySemaphore");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    auto resources = ResourceTracker::get();
-    resources->on_vkDestroySemaphore(vkEnc, device, semaphore, pAllocator);
-}
-static VkResult entry_vkCreateEvent(
-    VkDevice device,
-    const VkEventCreateInfo* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkEvent* pEvent)
-{
-    AEMU_SCOPED_TRACE("vkCreateEvent");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateEvent_VkResult_return = (VkResult)0;
-    vkCreateEvent_VkResult_return = vkEnc->vkCreateEvent(device, pCreateInfo, pAllocator, pEvent);
-    return vkCreateEvent_VkResult_return;
-}
-static void entry_vkDestroyEvent(
-    VkDevice device,
-    VkEvent event,
-    const VkAllocationCallbacks* pAllocator)
-{
-    AEMU_SCOPED_TRACE("vkDestroyEvent");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkDestroyEvent(device, event, pAllocator);
-}
-static VkResult entry_vkGetEventStatus(
-    VkDevice device,
-    VkEvent event)
-{
-    AEMU_SCOPED_TRACE("vkGetEventStatus");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetEventStatus_VkResult_return = (VkResult)0;
-    vkGetEventStatus_VkResult_return = vkEnc->vkGetEventStatus(device, event);
-    return vkGetEventStatus_VkResult_return;
-}
-static VkResult entry_vkSetEvent(
-    VkDevice device,
-    VkEvent event)
-{
-    AEMU_SCOPED_TRACE("vkSetEvent");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkSetEvent_VkResult_return = (VkResult)0;
-    vkSetEvent_VkResult_return = vkEnc->vkSetEvent(device, event);
-    return vkSetEvent_VkResult_return;
-}
-static VkResult entry_vkResetEvent(
-    VkDevice device,
-    VkEvent event)
-{
-    AEMU_SCOPED_TRACE("vkResetEvent");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkResetEvent_VkResult_return = (VkResult)0;
-    vkResetEvent_VkResult_return = vkEnc->vkResetEvent(device, event);
-    return vkResetEvent_VkResult_return;
-}
-static VkResult entry_vkCreateQueryPool(
-    VkDevice device,
-    const VkQueryPoolCreateInfo* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkQueryPool* pQueryPool)
-{
-    AEMU_SCOPED_TRACE("vkCreateQueryPool");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateQueryPool_VkResult_return = (VkResult)0;
-    vkCreateQueryPool_VkResult_return = vkEnc->vkCreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool);
-    return vkCreateQueryPool_VkResult_return;
-}
-static void entry_vkDestroyQueryPool(
-    VkDevice device,
-    VkQueryPool queryPool,
-    const VkAllocationCallbacks* pAllocator)
-{
-    AEMU_SCOPED_TRACE("vkDestroyQueryPool");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkDestroyQueryPool(device, queryPool, pAllocator);
-}
-static VkResult entry_vkGetQueryPoolResults(
-    VkDevice device,
-    VkQueryPool queryPool,
-    uint32_t firstQuery,
-    uint32_t queryCount,
-    size_t dataSize,
-    void* pData,
-    VkDeviceSize stride,
-    VkQueryResultFlags flags)
-{
-    AEMU_SCOPED_TRACE("vkGetQueryPoolResults");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetQueryPoolResults_VkResult_return = (VkResult)0;
-    vkGetQueryPoolResults_VkResult_return = vkEnc->vkGetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride, flags);
-    return vkGetQueryPoolResults_VkResult_return;
-}
-static VkResult entry_vkCreateBuffer(
-    VkDevice device,
-    const VkBufferCreateInfo* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkBuffer* pBuffer)
-{
-    AEMU_SCOPED_TRACE("vkCreateBuffer");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateBuffer_VkResult_return = (VkResult)0;
-    auto resources = ResourceTracker::get();
-    vkCreateBuffer_VkResult_return = resources->on_vkCreateBuffer(vkEnc, VK_SUCCESS, device, pCreateInfo, pAllocator, pBuffer);
-    return vkCreateBuffer_VkResult_return;
-}
-static void entry_vkDestroyBuffer(
-    VkDevice device,
-    VkBuffer buffer,
-    const VkAllocationCallbacks* pAllocator)
-{
-    AEMU_SCOPED_TRACE("vkDestroyBuffer");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    auto resources = ResourceTracker::get();
-    resources->on_vkDestroyBuffer(vkEnc, device, buffer, pAllocator);
-}
-static VkResult entry_vkCreateBufferView(
-    VkDevice device,
-    const VkBufferViewCreateInfo* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkBufferView* pView)
-{
-    AEMU_SCOPED_TRACE("vkCreateBufferView");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateBufferView_VkResult_return = (VkResult)0;
-    vkCreateBufferView_VkResult_return = vkEnc->vkCreateBufferView(device, pCreateInfo, pAllocator, pView);
-    return vkCreateBufferView_VkResult_return;
-}
-static void entry_vkDestroyBufferView(
-    VkDevice device,
-    VkBufferView bufferView,
-    const VkAllocationCallbacks* pAllocator)
-{
-    AEMU_SCOPED_TRACE("vkDestroyBufferView");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkDestroyBufferView(device, bufferView, pAllocator);
-}
-static VkResult entry_vkCreateImage(
-    VkDevice device,
-    const VkImageCreateInfo* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkImage* pImage)
-{
-    AEMU_SCOPED_TRACE("vkCreateImage");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateImage_VkResult_return = (VkResult)0;
-    auto resources = ResourceTracker::get();
-    vkCreateImage_VkResult_return = resources->on_vkCreateImage(vkEnc, VK_SUCCESS, device, pCreateInfo, pAllocator, pImage);
-    return vkCreateImage_VkResult_return;
-}
-static void entry_vkDestroyImage(
-    VkDevice device,
-    VkImage image,
-    const VkAllocationCallbacks* pAllocator)
-{
-    AEMU_SCOPED_TRACE("vkDestroyImage");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    auto resources = ResourceTracker::get();
-    resources->on_vkDestroyImage(vkEnc, device, image, pAllocator);
-}
-static void entry_vkGetImageSubresourceLayout(
-    VkDevice device,
-    VkImage image,
-    const VkImageSubresource* pSubresource,
-    VkSubresourceLayout* pLayout)
-{
-    AEMU_SCOPED_TRACE("vkGetImageSubresourceLayout");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkGetImageSubresourceLayout(device, image, pSubresource, pLayout);
-}
-static VkResult entry_vkCreateImageView(
-    VkDevice device,
-    const VkImageViewCreateInfo* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkImageView* pView)
-{
-    AEMU_SCOPED_TRACE("vkCreateImageView");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateImageView_VkResult_return = (VkResult)0;
-    auto resources = ResourceTracker::get();
-    vkCreateImageView_VkResult_return = resources->on_vkCreateImageView(vkEnc, VK_SUCCESS, device, pCreateInfo, pAllocator, pView);
-    return vkCreateImageView_VkResult_return;
-}
-static void entry_vkDestroyImageView(
-    VkDevice device,
-    VkImageView imageView,
-    const VkAllocationCallbacks* pAllocator)
-{
-    AEMU_SCOPED_TRACE("vkDestroyImageView");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkDestroyImageView(device, imageView, pAllocator);
-}
-static VkResult entry_vkCreateShaderModule(
-    VkDevice device,
-    const VkShaderModuleCreateInfo* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkShaderModule* pShaderModule)
-{
-    AEMU_SCOPED_TRACE("vkCreateShaderModule");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateShaderModule_VkResult_return = (VkResult)0;
-    vkCreateShaderModule_VkResult_return = vkEnc->vkCreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
-    return vkCreateShaderModule_VkResult_return;
-}
-static void entry_vkDestroyShaderModule(
-    VkDevice device,
-    VkShaderModule shaderModule,
-    const VkAllocationCallbacks* pAllocator)
-{
-    AEMU_SCOPED_TRACE("vkDestroyShaderModule");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkDestroyShaderModule(device, shaderModule, pAllocator);
-}
-static VkResult entry_vkCreatePipelineCache(
-    VkDevice device,
-    const VkPipelineCacheCreateInfo* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkPipelineCache* pPipelineCache)
-{
-    AEMU_SCOPED_TRACE("vkCreatePipelineCache");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreatePipelineCache_VkResult_return = (VkResult)0;
-    vkCreatePipelineCache_VkResult_return = vkEnc->vkCreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache);
-    return vkCreatePipelineCache_VkResult_return;
-}
-static void entry_vkDestroyPipelineCache(
-    VkDevice device,
-    VkPipelineCache pipelineCache,
-    const VkAllocationCallbacks* pAllocator)
-{
-    AEMU_SCOPED_TRACE("vkDestroyPipelineCache");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkDestroyPipelineCache(device, pipelineCache, pAllocator);
-}
-static VkResult entry_vkGetPipelineCacheData(
-    VkDevice device,
-    VkPipelineCache pipelineCache,
-    size_t* pDataSize,
-    void* pData)
-{
-    AEMU_SCOPED_TRACE("vkGetPipelineCacheData");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetPipelineCacheData_VkResult_return = (VkResult)0;
-    vkGetPipelineCacheData_VkResult_return = vkEnc->vkGetPipelineCacheData(device, pipelineCache, pDataSize, pData);
-    return vkGetPipelineCacheData_VkResult_return;
-}
-static VkResult entry_vkMergePipelineCaches(
-    VkDevice device,
-    VkPipelineCache dstCache,
-    uint32_t srcCacheCount,
-    const VkPipelineCache* pSrcCaches)
-{
-    AEMU_SCOPED_TRACE("vkMergePipelineCaches");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkMergePipelineCaches_VkResult_return = (VkResult)0;
-    vkMergePipelineCaches_VkResult_return = vkEnc->vkMergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches);
-    return vkMergePipelineCaches_VkResult_return;
-}
-static VkResult entry_vkCreateGraphicsPipelines(
-    VkDevice device,
-    VkPipelineCache pipelineCache,
-    uint32_t createInfoCount,
-    const VkGraphicsPipelineCreateInfo* pCreateInfos,
-    const VkAllocationCallbacks* pAllocator,
-    VkPipeline* pPipelines)
-{
-    AEMU_SCOPED_TRACE("vkCreateGraphicsPipelines");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateGraphicsPipelines_VkResult_return = (VkResult)0;
-    vkCreateGraphicsPipelines_VkResult_return = vkEnc->vkCreateGraphicsPipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
-    return vkCreateGraphicsPipelines_VkResult_return;
-}
-static VkResult entry_vkCreateComputePipelines(
-    VkDevice device,
-    VkPipelineCache pipelineCache,
-    uint32_t createInfoCount,
-    const VkComputePipelineCreateInfo* pCreateInfos,
-    const VkAllocationCallbacks* pAllocator,
-    VkPipeline* pPipelines)
-{
-    AEMU_SCOPED_TRACE("vkCreateComputePipelines");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateComputePipelines_VkResult_return = (VkResult)0;
-    vkCreateComputePipelines_VkResult_return = vkEnc->vkCreateComputePipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
-    return vkCreateComputePipelines_VkResult_return;
-}
-static void entry_vkDestroyPipeline(
-    VkDevice device,
-    VkPipeline pipeline,
-    const VkAllocationCallbacks* pAllocator)
-{
-    AEMU_SCOPED_TRACE("vkDestroyPipeline");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkDestroyPipeline(device, pipeline, pAllocator);
-}
-static VkResult entry_vkCreatePipelineLayout(
-    VkDevice device,
-    const VkPipelineLayoutCreateInfo* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkPipelineLayout* pPipelineLayout)
-{
-    AEMU_SCOPED_TRACE("vkCreatePipelineLayout");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreatePipelineLayout_VkResult_return = (VkResult)0;
-    vkCreatePipelineLayout_VkResult_return = vkEnc->vkCreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
-    return vkCreatePipelineLayout_VkResult_return;
-}
-static void entry_vkDestroyPipelineLayout(
-    VkDevice device,
-    VkPipelineLayout pipelineLayout,
-    const VkAllocationCallbacks* pAllocator)
-{
-    AEMU_SCOPED_TRACE("vkDestroyPipelineLayout");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkDestroyPipelineLayout(device, pipelineLayout, pAllocator);
-}
-static VkResult entry_vkCreateSampler(
-    VkDevice device,
-    const VkSamplerCreateInfo* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkSampler* pSampler)
-{
-    AEMU_SCOPED_TRACE("vkCreateSampler");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateSampler_VkResult_return = (VkResult)0;
-    auto resources = ResourceTracker::get();
-    vkCreateSampler_VkResult_return = resources->on_vkCreateSampler(vkEnc, VK_SUCCESS, device, pCreateInfo, pAllocator, pSampler);
-    return vkCreateSampler_VkResult_return;
-}
-static void entry_vkDestroySampler(
-    VkDevice device,
-    VkSampler sampler,
-    const VkAllocationCallbacks* pAllocator)
-{
-    AEMU_SCOPED_TRACE("vkDestroySampler");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkDestroySampler(device, sampler, pAllocator);
-}
-static VkResult entry_vkCreateDescriptorSetLayout(
-    VkDevice device,
-    const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkDescriptorSetLayout* pSetLayout)
-{
-    AEMU_SCOPED_TRACE("vkCreateDescriptorSetLayout");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateDescriptorSetLayout_VkResult_return = (VkResult)0;
-    auto resources = ResourceTracker::get();
-    vkCreateDescriptorSetLayout_VkResult_return = resources->on_vkCreateDescriptorSetLayout(vkEnc, VK_SUCCESS, device, pCreateInfo, pAllocator, pSetLayout);
-    return vkCreateDescriptorSetLayout_VkResult_return;
-}
-static void entry_vkDestroyDescriptorSetLayout(
-    VkDevice device,
-    VkDescriptorSetLayout descriptorSetLayout,
-    const VkAllocationCallbacks* pAllocator)
-{
-    AEMU_SCOPED_TRACE("vkDestroyDescriptorSetLayout");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkDestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator);
-}
-static VkResult entry_vkCreateDescriptorPool(
-    VkDevice device,
-    const VkDescriptorPoolCreateInfo* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkDescriptorPool* pDescriptorPool)
-{
-    AEMU_SCOPED_TRACE("vkCreateDescriptorPool");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateDescriptorPool_VkResult_return = (VkResult)0;
-    auto resources = ResourceTracker::get();
-    vkCreateDescriptorPool_VkResult_return = resources->on_vkCreateDescriptorPool(vkEnc, VK_SUCCESS, device, pCreateInfo, pAllocator, pDescriptorPool);
-    return vkCreateDescriptorPool_VkResult_return;
-}
-static void entry_vkDestroyDescriptorPool(
-    VkDevice device,
-    VkDescriptorPool descriptorPool,
-    const VkAllocationCallbacks* pAllocator)
-{
-    AEMU_SCOPED_TRACE("vkDestroyDescriptorPool");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    auto resources = ResourceTracker::get();
-    resources->on_vkDestroyDescriptorPool(vkEnc, device, descriptorPool, pAllocator);
-}
-static VkResult entry_vkResetDescriptorPool(
-    VkDevice device,
-    VkDescriptorPool descriptorPool,
-    VkDescriptorPoolResetFlags flags)
-{
-    AEMU_SCOPED_TRACE("vkResetDescriptorPool");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkResetDescriptorPool_VkResult_return = (VkResult)0;
-    auto resources = ResourceTracker::get();
-    vkResetDescriptorPool_VkResult_return = resources->on_vkResetDescriptorPool(vkEnc, VK_SUCCESS, device, descriptorPool, flags);
-    return vkResetDescriptorPool_VkResult_return;
-}
-static VkResult entry_vkAllocateDescriptorSets(
-    VkDevice device,
-    const VkDescriptorSetAllocateInfo* pAllocateInfo,
-    VkDescriptorSet* pDescriptorSets)
-{
-    AEMU_SCOPED_TRACE("vkAllocateDescriptorSets");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkAllocateDescriptorSets_VkResult_return = (VkResult)0;
-    auto resources = ResourceTracker::get();
-    vkAllocateDescriptorSets_VkResult_return = resources->on_vkAllocateDescriptorSets(vkEnc, VK_SUCCESS, device, pAllocateInfo, pDescriptorSets);
-    return vkAllocateDescriptorSets_VkResult_return;
-}
-static VkResult entry_vkFreeDescriptorSets(
-    VkDevice device,
-    VkDescriptorPool descriptorPool,
-    uint32_t descriptorSetCount,
-    const VkDescriptorSet* pDescriptorSets)
-{
-    AEMU_SCOPED_TRACE("vkFreeDescriptorSets");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkFreeDescriptorSets_VkResult_return = (VkResult)0;
-    auto resources = ResourceTracker::get();
-    vkFreeDescriptorSets_VkResult_return = resources->on_vkFreeDescriptorSets(vkEnc, VK_SUCCESS, device, descriptorPool, descriptorSetCount, pDescriptorSets);
-    return vkFreeDescriptorSets_VkResult_return;
-}
-static void entry_vkUpdateDescriptorSets(
-    VkDevice device,
-    uint32_t descriptorWriteCount,
-    const VkWriteDescriptorSet* pDescriptorWrites,
-    uint32_t descriptorCopyCount,
-    const VkCopyDescriptorSet* pDescriptorCopies)
-{
-    AEMU_SCOPED_TRACE("vkUpdateDescriptorSets");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    auto resources = ResourceTracker::get();
-    resources->on_vkUpdateDescriptorSets(vkEnc, device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies);
-}
-static VkResult entry_vkCreateFramebuffer(
-    VkDevice device,
-    const VkFramebufferCreateInfo* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkFramebuffer* pFramebuffer)
-{
-    AEMU_SCOPED_TRACE("vkCreateFramebuffer");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateFramebuffer_VkResult_return = (VkResult)0;
-    vkCreateFramebuffer_VkResult_return = vkEnc->vkCreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer);
-    return vkCreateFramebuffer_VkResult_return;
-}
-static void entry_vkDestroyFramebuffer(
-    VkDevice device,
-    VkFramebuffer framebuffer,
-    const VkAllocationCallbacks* pAllocator)
-{
-    AEMU_SCOPED_TRACE("vkDestroyFramebuffer");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkDestroyFramebuffer(device, framebuffer, pAllocator);
-}
-static VkResult entry_vkCreateRenderPass(
-    VkDevice device,
-    const VkRenderPassCreateInfo* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkRenderPass* pRenderPass)
-{
-    AEMU_SCOPED_TRACE("vkCreateRenderPass");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateRenderPass_VkResult_return = (VkResult)0;
-    vkCreateRenderPass_VkResult_return = vkEnc->vkCreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass);
-    return vkCreateRenderPass_VkResult_return;
-}
-static void entry_vkDestroyRenderPass(
-    VkDevice device,
-    VkRenderPass renderPass,
-    const VkAllocationCallbacks* pAllocator)
-{
-    AEMU_SCOPED_TRACE("vkDestroyRenderPass");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkDestroyRenderPass(device, renderPass, pAllocator);
-}
-static void entry_vkGetRenderAreaGranularity(
-    VkDevice device,
-    VkRenderPass renderPass,
-    VkExtent2D* pGranularity)
-{
-    AEMU_SCOPED_TRACE("vkGetRenderAreaGranularity");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkGetRenderAreaGranularity(device, renderPass, pGranularity);
-}
-static VkResult entry_vkCreateCommandPool(
-    VkDevice device,
-    const VkCommandPoolCreateInfo* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkCommandPool* pCommandPool)
-{
-    AEMU_SCOPED_TRACE("vkCreateCommandPool");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateCommandPool_VkResult_return = (VkResult)0;
-    vkCreateCommandPool_VkResult_return = vkEnc->vkCreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);
-    return vkCreateCommandPool_VkResult_return;
-}
-static void entry_vkDestroyCommandPool(
-    VkDevice device,
-    VkCommandPool commandPool,
-    const VkAllocationCallbacks* pAllocator)
-{
-    AEMU_SCOPED_TRACE("vkDestroyCommandPool");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkDestroyCommandPool(device, commandPool, pAllocator);
-}
-static VkResult entry_vkResetCommandPool(
-    VkDevice device,
-    VkCommandPool commandPool,
-    VkCommandPoolResetFlags flags)
-{
-    AEMU_SCOPED_TRACE("vkResetCommandPool");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkResetCommandPool_VkResult_return = (VkResult)0;
-    vkResetCommandPool_VkResult_return = vkEnc->vkResetCommandPool(device, commandPool, flags);
-    return vkResetCommandPool_VkResult_return;
-}
-static VkResult entry_vkAllocateCommandBuffers(
-    VkDevice device,
-    const VkCommandBufferAllocateInfo* pAllocateInfo,
-    VkCommandBuffer* pCommandBuffers)
-{
-    AEMU_SCOPED_TRACE("vkAllocateCommandBuffers");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkAllocateCommandBuffers_VkResult_return = (VkResult)0;
-    vkAllocateCommandBuffers_VkResult_return = vkEnc->vkAllocateCommandBuffers(device, pAllocateInfo, pCommandBuffers);
-    return vkAllocateCommandBuffers_VkResult_return;
-}
-static void entry_vkFreeCommandBuffers(
-    VkDevice device,
-    VkCommandPool commandPool,
-    uint32_t commandBufferCount,
-    const VkCommandBuffer* pCommandBuffers)
-{
-    AEMU_SCOPED_TRACE("vkFreeCommandBuffers");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkFreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
-}
-static VkResult entry_vkBeginCommandBuffer(
-    VkCommandBuffer commandBuffer,
-    const VkCommandBufferBeginInfo* pBeginInfo)
-{
-    AEMU_SCOPED_TRACE("vkBeginCommandBuffer");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    VkResult vkBeginCommandBuffer_VkResult_return = (VkResult)0;
-    auto resources = ResourceTracker::get();
-    vkBeginCommandBuffer_VkResult_return = resources->on_vkBeginCommandBuffer(vkEnc, VK_SUCCESS, commandBuffer, pBeginInfo);
-    return vkBeginCommandBuffer_VkResult_return;
-}
-static VkResult entry_vkEndCommandBuffer(
-    VkCommandBuffer commandBuffer)
-{
-    AEMU_SCOPED_TRACE("vkEndCommandBuffer");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    VkResult vkEndCommandBuffer_VkResult_return = (VkResult)0;
-    auto resources = ResourceTracker::get();
-    vkEndCommandBuffer_VkResult_return = resources->on_vkEndCommandBuffer(vkEnc, VK_SUCCESS, commandBuffer);
-    return vkEndCommandBuffer_VkResult_return;
-}
-static VkResult entry_vkResetCommandBuffer(
-    VkCommandBuffer commandBuffer,
-    VkCommandBufferResetFlags flags)
-{
-    AEMU_SCOPED_TRACE("vkResetCommandBuffer");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    VkResult vkResetCommandBuffer_VkResult_return = (VkResult)0;
-    auto resources = ResourceTracker::get();
-    vkResetCommandBuffer_VkResult_return = resources->on_vkResetCommandBuffer(vkEnc, VK_SUCCESS, commandBuffer, flags);
-    return vkResetCommandBuffer_VkResult_return;
-}
-static void entry_vkCmdBindPipeline(
-    VkCommandBuffer commandBuffer,
-    VkPipelineBindPoint pipelineBindPoint,
-    VkPipeline pipeline)
-{
-    AEMU_SCOPED_TRACE("vkCmdBindPipeline");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
-}
-static void entry_vkCmdSetViewport(
-    VkCommandBuffer commandBuffer,
-    uint32_t firstViewport,
-    uint32_t viewportCount,
-    const VkViewport* pViewports)
-{
-    AEMU_SCOPED_TRACE("vkCmdSetViewport");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports);
-}
-static void entry_vkCmdSetScissor(
-    VkCommandBuffer commandBuffer,
-    uint32_t firstScissor,
-    uint32_t scissorCount,
-    const VkRect2D* pScissors)
-{
-    AEMU_SCOPED_TRACE("vkCmdSetScissor");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors);
-}
-static void entry_vkCmdSetLineWidth(
-    VkCommandBuffer commandBuffer,
-    float lineWidth)
-{
-    AEMU_SCOPED_TRACE("vkCmdSetLineWidth");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdSetLineWidth(commandBuffer, lineWidth);
-}
-static void entry_vkCmdSetDepthBias(
-    VkCommandBuffer commandBuffer,
-    float depthBiasConstantFactor,
-    float depthBiasClamp,
-    float depthBiasSlopeFactor)
-{
-    AEMU_SCOPED_TRACE("vkCmdSetDepthBias");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp, depthBiasSlopeFactor);
-}
-static void entry_vkCmdSetBlendConstants(
-    VkCommandBuffer commandBuffer,
-    const float blendConstants[4])
-{
-    AEMU_SCOPED_TRACE("vkCmdSetBlendConstants");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdSetBlendConstants(commandBuffer, blendConstants);
-}
-static void entry_vkCmdSetDepthBounds(
-    VkCommandBuffer commandBuffer,
-    float minDepthBounds,
-    float maxDepthBounds)
-{
-    AEMU_SCOPED_TRACE("vkCmdSetDepthBounds");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds);
-}
-static void entry_vkCmdSetStencilCompareMask(
-    VkCommandBuffer commandBuffer,
-    VkStencilFaceFlags faceMask,
-    uint32_t compareMask)
-{
-    AEMU_SCOPED_TRACE("vkCmdSetStencilCompareMask");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdSetStencilCompareMask(commandBuffer, faceMask, compareMask);
-}
-static void entry_vkCmdSetStencilWriteMask(
-    VkCommandBuffer commandBuffer,
-    VkStencilFaceFlags faceMask,
-    uint32_t writeMask)
-{
-    AEMU_SCOPED_TRACE("vkCmdSetStencilWriteMask");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdSetStencilWriteMask(commandBuffer, faceMask, writeMask);
-}
-static void entry_vkCmdSetStencilReference(
-    VkCommandBuffer commandBuffer,
-    VkStencilFaceFlags faceMask,
-    uint32_t reference)
-{
-    AEMU_SCOPED_TRACE("vkCmdSetStencilReference");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdSetStencilReference(commandBuffer, faceMask, reference);
-}
-static void entry_vkCmdBindDescriptorSets(
-    VkCommandBuffer commandBuffer,
-    VkPipelineBindPoint pipelineBindPoint,
-    VkPipelineLayout layout,
-    uint32_t firstSet,
-    uint32_t descriptorSetCount,
-    const VkDescriptorSet* pDescriptorSets,
-    uint32_t dynamicOffsetCount,
-    const uint32_t* pDynamicOffsets)
-{
-    AEMU_SCOPED_TRACE("vkCmdBindDescriptorSets");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, descriptorSetCount, pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
-}
-static void entry_vkCmdBindIndexBuffer(
-    VkCommandBuffer commandBuffer,
-    VkBuffer buffer,
-    VkDeviceSize offset,
-    VkIndexType indexType)
-{
-    AEMU_SCOPED_TRACE("vkCmdBindIndexBuffer");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdBindIndexBuffer(commandBuffer, buffer, offset, indexType);
-}
-static void entry_vkCmdBindVertexBuffers(
-    VkCommandBuffer commandBuffer,
-    uint32_t firstBinding,
-    uint32_t bindingCount,
-    const VkBuffer* pBuffers,
-    const VkDeviceSize* pOffsets)
-{
-    AEMU_SCOPED_TRACE("vkCmdBindVertexBuffers");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
-}
-static void entry_vkCmdDraw(
-    VkCommandBuffer commandBuffer,
-    uint32_t vertexCount,
-    uint32_t instanceCount,
-    uint32_t firstVertex,
-    uint32_t firstInstance)
-{
-    AEMU_SCOPED_TRACE("vkCmdDraw");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
-}
-static void entry_vkCmdDrawIndexed(
-    VkCommandBuffer commandBuffer,
-    uint32_t indexCount,
-    uint32_t instanceCount,
-    uint32_t firstIndex,
-    int32_t vertexOffset,
-    uint32_t firstInstance)
-{
-    AEMU_SCOPED_TRACE("vkCmdDrawIndexed");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance);
-}
-static void entry_vkCmdDrawIndirect(
-    VkCommandBuffer commandBuffer,
-    VkBuffer buffer,
-    VkDeviceSize offset,
-    uint32_t drawCount,
-    uint32_t stride)
-{
-    AEMU_SCOPED_TRACE("vkCmdDrawIndirect");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdDrawIndirect(commandBuffer, buffer, offset, drawCount, stride);
-}
-static void entry_vkCmdDrawIndexedIndirect(
-    VkCommandBuffer commandBuffer,
-    VkBuffer buffer,
-    VkDeviceSize offset,
-    uint32_t drawCount,
-    uint32_t stride)
-{
-    AEMU_SCOPED_TRACE("vkCmdDrawIndexedIndirect");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdDrawIndexedIndirect(commandBuffer, buffer, offset, drawCount, stride);
-}
-static void entry_vkCmdDispatch(
-    VkCommandBuffer commandBuffer,
-    uint32_t groupCountX,
-    uint32_t groupCountY,
-    uint32_t groupCountZ)
-{
-    AEMU_SCOPED_TRACE("vkCmdDispatch");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdDispatch(commandBuffer, groupCountX, groupCountY, groupCountZ);
-}
-static void entry_vkCmdDispatchIndirect(
-    VkCommandBuffer commandBuffer,
-    VkBuffer buffer,
-    VkDeviceSize offset)
-{
-    AEMU_SCOPED_TRACE("vkCmdDispatchIndirect");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdDispatchIndirect(commandBuffer, buffer, offset);
-}
-static void entry_vkCmdCopyBuffer(
-    VkCommandBuffer commandBuffer,
-    VkBuffer srcBuffer,
-    VkBuffer dstBuffer,
-    uint32_t regionCount,
-    const VkBufferCopy* pRegions)
-{
-    AEMU_SCOPED_TRACE("vkCmdCopyBuffer");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
-}
-static void entry_vkCmdCopyImage(
-    VkCommandBuffer commandBuffer,
-    VkImage srcImage,
-    VkImageLayout srcImageLayout,
-    VkImage dstImage,
-    VkImageLayout dstImageLayout,
-    uint32_t regionCount,
-    const VkImageCopy* pRegions)
-{
-    AEMU_SCOPED_TRACE("vkCmdCopyImage");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions);
-}
-static void entry_vkCmdBlitImage(
-    VkCommandBuffer commandBuffer,
-    VkImage srcImage,
-    VkImageLayout srcImageLayout,
-    VkImage dstImage,
-    VkImageLayout dstImageLayout,
-    uint32_t regionCount,
-    const VkImageBlit* pRegions,
-    VkFilter filter)
-{
-    AEMU_SCOPED_TRACE("vkCmdBlitImage");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions, filter);
-}
-static void entry_vkCmdCopyBufferToImage(
-    VkCommandBuffer commandBuffer,
-    VkBuffer srcBuffer,
-    VkImage dstImage,
-    VkImageLayout dstImageLayout,
-    uint32_t regionCount,
-    const VkBufferImageCopy* pRegions)
-{
-    AEMU_SCOPED_TRACE("vkCmdCopyBufferToImage");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions);
-}
-static void entry_vkCmdCopyImageToBuffer(
-    VkCommandBuffer commandBuffer,
-    VkImage srcImage,
-    VkImageLayout srcImageLayout,
-    VkBuffer dstBuffer,
-    uint32_t regionCount,
-    const VkBufferImageCopy* pRegions)
-{
-    AEMU_SCOPED_TRACE("vkCmdCopyImageToBuffer");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions);
-}
-static void entry_vkCmdUpdateBuffer(
-    VkCommandBuffer commandBuffer,
-    VkBuffer dstBuffer,
-    VkDeviceSize dstOffset,
-    VkDeviceSize dataSize,
-    const void* pData)
-{
-    AEMU_SCOPED_TRACE("vkCmdUpdateBuffer");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
-}
-static void entry_vkCmdFillBuffer(
-    VkCommandBuffer commandBuffer,
-    VkBuffer dstBuffer,
-    VkDeviceSize dstOffset,
-    VkDeviceSize size,
-    uint32_t data)
-{
-    AEMU_SCOPED_TRACE("vkCmdFillBuffer");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
-}
-static void entry_vkCmdClearColorImage(
-    VkCommandBuffer commandBuffer,
-    VkImage image,
-    VkImageLayout imageLayout,
-    const VkClearColorValue* pColor,
-    uint32_t rangeCount,
-    const VkImageSubresourceRange* pRanges)
-{
-    AEMU_SCOPED_TRACE("vkCmdClearColorImage");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
-}
-static void entry_vkCmdClearDepthStencilImage(
-    VkCommandBuffer commandBuffer,
-    VkImage image,
-    VkImageLayout imageLayout,
-    const VkClearDepthStencilValue* pDepthStencil,
-    uint32_t rangeCount,
-    const VkImageSubresourceRange* pRanges)
-{
-    AEMU_SCOPED_TRACE("vkCmdClearDepthStencilImage");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges);
-}
-static void entry_vkCmdClearAttachments(
-    VkCommandBuffer commandBuffer,
-    uint32_t attachmentCount,
-    const VkClearAttachment* pAttachments,
-    uint32_t rectCount,
-    const VkClearRect* pRects)
-{
-    AEMU_SCOPED_TRACE("vkCmdClearAttachments");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
-}
-static void entry_vkCmdResolveImage(
-    VkCommandBuffer commandBuffer,
-    VkImage srcImage,
-    VkImageLayout srcImageLayout,
-    VkImage dstImage,
-    VkImageLayout dstImageLayout,
-    uint32_t regionCount,
-    const VkImageResolve* pRegions)
-{
-    AEMU_SCOPED_TRACE("vkCmdResolveImage");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions);
-}
-static void entry_vkCmdSetEvent(
-    VkCommandBuffer commandBuffer,
-    VkEvent event,
-    VkPipelineStageFlags stageMask)
-{
-    AEMU_SCOPED_TRACE("vkCmdSetEvent");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdSetEvent(commandBuffer, event, stageMask);
-}
-static void entry_vkCmdResetEvent(
-    VkCommandBuffer commandBuffer,
-    VkEvent event,
-    VkPipelineStageFlags stageMask)
-{
-    AEMU_SCOPED_TRACE("vkCmdResetEvent");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdResetEvent(commandBuffer, event, stageMask);
-}
-static void entry_vkCmdWaitEvents(
-    VkCommandBuffer commandBuffer,
-    uint32_t eventCount,
-    const VkEvent* pEvents,
-    VkPipelineStageFlags srcStageMask,
-    VkPipelineStageFlags dstStageMask,
-    uint32_t memoryBarrierCount,
-    const VkMemoryBarrier* pMemoryBarriers,
-    uint32_t bufferMemoryBarrierCount,
-    const VkBufferMemoryBarrier* pBufferMemoryBarriers,
-    uint32_t imageMemoryBarrierCount,
-    const VkImageMemoryBarrier* pImageMemoryBarriers)
-{
-    AEMU_SCOPED_TRACE("vkCmdWaitEvents");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdWaitEvents(commandBuffer, eventCount, pEvents, srcStageMask, dstStageMask, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
-}
-static void entry_vkCmdPipelineBarrier(
-    VkCommandBuffer commandBuffer,
-    VkPipelineStageFlags srcStageMask,
-    VkPipelineStageFlags dstStageMask,
-    VkDependencyFlags dependencyFlags,
-    uint32_t memoryBarrierCount,
-    const VkMemoryBarrier* pMemoryBarriers,
-    uint32_t bufferMemoryBarrierCount,
-    const VkBufferMemoryBarrier* pBufferMemoryBarriers,
-    uint32_t imageMemoryBarrierCount,
-    const VkImageMemoryBarrier* pImageMemoryBarriers)
-{
-    AEMU_SCOPED_TRACE("vkCmdPipelineBarrier");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
-}
-static void entry_vkCmdBeginQuery(
-    VkCommandBuffer commandBuffer,
-    VkQueryPool queryPool,
-    uint32_t query,
-    VkQueryControlFlags flags)
-{
-    AEMU_SCOPED_TRACE("vkCmdBeginQuery");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdBeginQuery(commandBuffer, queryPool, query, flags);
-}
-static void entry_vkCmdEndQuery(
-    VkCommandBuffer commandBuffer,
-    VkQueryPool queryPool,
-    uint32_t query)
-{
-    AEMU_SCOPED_TRACE("vkCmdEndQuery");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdEndQuery(commandBuffer, queryPool, query);
-}
-static void entry_vkCmdResetQueryPool(
-    VkCommandBuffer commandBuffer,
-    VkQueryPool queryPool,
-    uint32_t firstQuery,
-    uint32_t queryCount)
-{
-    AEMU_SCOPED_TRACE("vkCmdResetQueryPool");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount);
-}
-static void entry_vkCmdWriteTimestamp(
-    VkCommandBuffer commandBuffer,
-    VkPipelineStageFlagBits pipelineStage,
-    VkQueryPool queryPool,
-    uint32_t query)
-{
-    AEMU_SCOPED_TRACE("vkCmdWriteTimestamp");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, query);
-}
-static void entry_vkCmdCopyQueryPoolResults(
-    VkCommandBuffer commandBuffer,
-    VkQueryPool queryPool,
-    uint32_t firstQuery,
-    uint32_t queryCount,
-    VkBuffer dstBuffer,
-    VkDeviceSize dstOffset,
-    VkDeviceSize stride,
-    VkQueryResultFlags flags)
-{
-    AEMU_SCOPED_TRACE("vkCmdCopyQueryPoolResults");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer, dstOffset, stride, flags);
-}
-static void entry_vkCmdPushConstants(
-    VkCommandBuffer commandBuffer,
-    VkPipelineLayout layout,
-    VkShaderStageFlags stageFlags,
-    uint32_t offset,
-    uint32_t size,
-    const void* pValues)
-{
-    AEMU_SCOPED_TRACE("vkCmdPushConstants");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues);
-}
-static void entry_vkCmdBeginRenderPass(
-    VkCommandBuffer commandBuffer,
-    const VkRenderPassBeginInfo* pRenderPassBegin,
-    VkSubpassContents contents)
-{
-    AEMU_SCOPED_TRACE("vkCmdBeginRenderPass");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
-}
-static void entry_vkCmdNextSubpass(
-    VkCommandBuffer commandBuffer,
-    VkSubpassContents contents)
-{
-    AEMU_SCOPED_TRACE("vkCmdNextSubpass");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdNextSubpass(commandBuffer, contents);
-}
-static void entry_vkCmdEndRenderPass(
-    VkCommandBuffer commandBuffer)
-{
-    AEMU_SCOPED_TRACE("vkCmdEndRenderPass");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdEndRenderPass(commandBuffer);
-}
-static void entry_vkCmdExecuteCommands(
-    VkCommandBuffer commandBuffer,
-    uint32_t commandBufferCount,
-    const VkCommandBuffer* pCommandBuffers)
-{
-    AEMU_SCOPED_TRACE("vkCmdExecuteCommands");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdExecuteCommands(commandBuffer, commandBufferCount, pCommandBuffers);
-}
-#endif
-#ifdef VK_VERSION_1_1
-static VkResult entry_vkEnumerateInstanceVersion(
-    uint32_t* pApiVersion)
-{
-    AEMU_SCOPED_TRACE("vkEnumerateInstanceVersion");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkEnumerateInstanceVersion_VkResult_return = (VkResult)0;
-    vkEnumerateInstanceVersion_VkResult_return = vkEnc->vkEnumerateInstanceVersion(pApiVersion);
-    return vkEnumerateInstanceVersion_VkResult_return;
-}
-static VkResult entry_vkBindBufferMemory2(
-    VkDevice device,
-    uint32_t bindInfoCount,
-    const VkBindBufferMemoryInfo* pBindInfos)
-{
-    AEMU_SCOPED_TRACE("vkBindBufferMemory2");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkBindBufferMemory2_VkResult_return = (VkResult)0;
-    auto resources = ResourceTracker::get();
-    vkBindBufferMemory2_VkResult_return = resources->on_vkBindBufferMemory2(vkEnc, VK_SUCCESS, device, bindInfoCount, pBindInfos);
-    return vkBindBufferMemory2_VkResult_return;
-}
-static VkResult dynCheck_entry_vkBindBufferMemory2(
-    VkDevice device,
-    uint32_t bindInfoCount,
-    const VkBindBufferMemoryInfo* pBindInfos)
-{
-    auto resources = ResourceTracker::get();
-    if (resources->getApiVersionFromDevice(device) < VK_API_VERSION_1_1)
-    {
-        sOnInvalidDynamicallyCheckedCall("vkBindBufferMemory2", "VK_VERSION_1_1");
-    }
-    AEMU_SCOPED_TRACE("vkBindBufferMemory2");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkBindBufferMemory2_VkResult_return = (VkResult)0;
-    vkBindBufferMemory2_VkResult_return = resources->on_vkBindBufferMemory2(vkEnc, VK_SUCCESS, device, bindInfoCount, pBindInfos);
-    return vkBindBufferMemory2_VkResult_return;
-}
-static VkResult entry_vkBindImageMemory2(
-    VkDevice device,
-    uint32_t bindInfoCount,
-    const VkBindImageMemoryInfo* pBindInfos)
-{
-    AEMU_SCOPED_TRACE("vkBindImageMemory2");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkBindImageMemory2_VkResult_return = (VkResult)0;
-    auto resources = ResourceTracker::get();
-    vkBindImageMemory2_VkResult_return = resources->on_vkBindImageMemory2(vkEnc, VK_SUCCESS, device, bindInfoCount, pBindInfos);
-    return vkBindImageMemory2_VkResult_return;
-}
-static VkResult dynCheck_entry_vkBindImageMemory2(
-    VkDevice device,
-    uint32_t bindInfoCount,
-    const VkBindImageMemoryInfo* pBindInfos)
-{
-    auto resources = ResourceTracker::get();
-    if (resources->getApiVersionFromDevice(device) < VK_API_VERSION_1_1)
-    {
-        sOnInvalidDynamicallyCheckedCall("vkBindImageMemory2", "VK_VERSION_1_1");
-    }
-    AEMU_SCOPED_TRACE("vkBindImageMemory2");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkBindImageMemory2_VkResult_return = (VkResult)0;
-    vkBindImageMemory2_VkResult_return = resources->on_vkBindImageMemory2(vkEnc, VK_SUCCESS, device, bindInfoCount, pBindInfos);
-    return vkBindImageMemory2_VkResult_return;
-}
-static void entry_vkGetDeviceGroupPeerMemoryFeatures(
-    VkDevice device,
-    uint32_t heapIndex,
-    uint32_t localDeviceIndex,
-    uint32_t remoteDeviceIndex,
-    VkPeerMemoryFeatureFlags* pPeerMemoryFeatures)
-{
-    AEMU_SCOPED_TRACE("vkGetDeviceGroupPeerMemoryFeatures");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkGetDeviceGroupPeerMemoryFeatures(device, heapIndex, localDeviceIndex, remoteDeviceIndex, pPeerMemoryFeatures);
-}
-static void dynCheck_entry_vkGetDeviceGroupPeerMemoryFeatures(
-    VkDevice device,
-    uint32_t heapIndex,
-    uint32_t localDeviceIndex,
-    uint32_t remoteDeviceIndex,
-    VkPeerMemoryFeatureFlags* pPeerMemoryFeatures)
-{
-    auto resources = ResourceTracker::get();
-    if (resources->getApiVersionFromDevice(device) < VK_API_VERSION_1_1)
-    {
-        sOnInvalidDynamicallyCheckedCall("vkGetDeviceGroupPeerMemoryFeatures", "VK_VERSION_1_1");
-    }
-    AEMU_SCOPED_TRACE("vkGetDeviceGroupPeerMemoryFeatures");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkGetDeviceGroupPeerMemoryFeatures(device, heapIndex, localDeviceIndex, remoteDeviceIndex, pPeerMemoryFeatures);
-}
-static void entry_vkCmdSetDeviceMask(
-    VkCommandBuffer commandBuffer,
-    uint32_t deviceMask)
-{
-    AEMU_SCOPED_TRACE("vkCmdSetDeviceMask");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdSetDeviceMask(commandBuffer, deviceMask);
-}
-static void entry_vkCmdDispatchBase(
-    VkCommandBuffer commandBuffer,
-    uint32_t baseGroupX,
-    uint32_t baseGroupY,
-    uint32_t baseGroupZ,
-    uint32_t groupCountX,
-    uint32_t groupCountY,
-    uint32_t groupCountZ)
-{
-    AEMU_SCOPED_TRACE("vkCmdDispatchBase");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdDispatchBase(commandBuffer, baseGroupX, baseGroupY, baseGroupZ, groupCountX, groupCountY, groupCountZ);
-}
-static VkResult entry_vkEnumeratePhysicalDeviceGroups(
-    VkInstance instance,
-    uint32_t* pPhysicalDeviceGroupCount,
-    VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties)
-{
-    AEMU_SCOPED_TRACE("vkEnumeratePhysicalDeviceGroups");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkEnumeratePhysicalDeviceGroups_VkResult_return = (VkResult)0;
-    vkEnumeratePhysicalDeviceGroups_VkResult_return = vkEnc->vkEnumeratePhysicalDeviceGroups(instance, pPhysicalDeviceGroupCount, pPhysicalDeviceGroupProperties);
-    return vkEnumeratePhysicalDeviceGroups_VkResult_return;
-}
-static void entry_vkGetImageMemoryRequirements2(
-    VkDevice device,
-    const VkImageMemoryRequirementsInfo2* pInfo,
-    VkMemoryRequirements2* pMemoryRequirements)
-{
-    AEMU_SCOPED_TRACE("vkGetImageMemoryRequirements2");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    auto resources = ResourceTracker::get();
-    resources->on_vkGetImageMemoryRequirements2(vkEnc, device, pInfo, pMemoryRequirements);
-}
-static void dynCheck_entry_vkGetImageMemoryRequirements2(
-    VkDevice device,
-    const VkImageMemoryRequirementsInfo2* pInfo,
-    VkMemoryRequirements2* pMemoryRequirements)
-{
-    auto resources = ResourceTracker::get();
-    if (resources->getApiVersionFromDevice(device) < VK_API_VERSION_1_1)
-    {
-        sOnInvalidDynamicallyCheckedCall("vkGetImageMemoryRequirements2", "VK_VERSION_1_1");
-    }
-    AEMU_SCOPED_TRACE("vkGetImageMemoryRequirements2");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    resources->on_vkGetImageMemoryRequirements2(vkEnc, device, pInfo, pMemoryRequirements);
-}
-static void entry_vkGetBufferMemoryRequirements2(
-    VkDevice device,
-    const VkBufferMemoryRequirementsInfo2* pInfo,
-    VkMemoryRequirements2* pMemoryRequirements)
-{
-    AEMU_SCOPED_TRACE("vkGetBufferMemoryRequirements2");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    auto resources = ResourceTracker::get();
-    resources->on_vkGetBufferMemoryRequirements2(vkEnc, device, pInfo, pMemoryRequirements);
-}
-static void dynCheck_entry_vkGetBufferMemoryRequirements2(
-    VkDevice device,
-    const VkBufferMemoryRequirementsInfo2* pInfo,
-    VkMemoryRequirements2* pMemoryRequirements)
-{
-    auto resources = ResourceTracker::get();
-    if (resources->getApiVersionFromDevice(device) < VK_API_VERSION_1_1)
-    {
-        sOnInvalidDynamicallyCheckedCall("vkGetBufferMemoryRequirements2", "VK_VERSION_1_1");
-    }
-    AEMU_SCOPED_TRACE("vkGetBufferMemoryRequirements2");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    resources->on_vkGetBufferMemoryRequirements2(vkEnc, device, pInfo, pMemoryRequirements);
-}
-static void entry_vkGetImageSparseMemoryRequirements2(
-    VkDevice device,
-    const VkImageSparseMemoryRequirementsInfo2* pInfo,
-    uint32_t* pSparseMemoryRequirementCount,
-    VkSparseImageMemoryRequirements2* pSparseMemoryRequirements)
-{
-    AEMU_SCOPED_TRACE("vkGetImageSparseMemoryRequirements2");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkGetImageSparseMemoryRequirements2(device, pInfo, pSparseMemoryRequirementCount, pSparseMemoryRequirements);
-}
-static void dynCheck_entry_vkGetImageSparseMemoryRequirements2(
-    VkDevice device,
-    const VkImageSparseMemoryRequirementsInfo2* pInfo,
-    uint32_t* pSparseMemoryRequirementCount,
-    VkSparseImageMemoryRequirements2* pSparseMemoryRequirements)
-{
-    auto resources = ResourceTracker::get();
-    if (resources->getApiVersionFromDevice(device) < VK_API_VERSION_1_1)
-    {
-        sOnInvalidDynamicallyCheckedCall("vkGetImageSparseMemoryRequirements2", "VK_VERSION_1_1");
-    }
-    AEMU_SCOPED_TRACE("vkGetImageSparseMemoryRequirements2");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkGetImageSparseMemoryRequirements2(device, pInfo, pSparseMemoryRequirementCount, pSparseMemoryRequirements);
-}
-static void entry_vkGetPhysicalDeviceFeatures2(
-    VkPhysicalDevice physicalDevice,
-    VkPhysicalDeviceFeatures2* pFeatures)
-{
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceFeatures2");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkGetPhysicalDeviceFeatures2(physicalDevice, pFeatures);
-}
-static void entry_vkGetPhysicalDeviceProperties2(
-    VkPhysicalDevice physicalDevice,
-    VkPhysicalDeviceProperties2* pProperties)
-{
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceProperties2");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkGetPhysicalDeviceProperties2(physicalDevice, pProperties);
-}
-static void entry_vkGetPhysicalDeviceFormatProperties2(
-    VkPhysicalDevice physicalDevice,
-    VkFormat format,
-    VkFormatProperties2* pFormatProperties)
-{
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceFormatProperties2");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkGetPhysicalDeviceFormatProperties2(physicalDevice, format, pFormatProperties);
-}
-static VkResult entry_vkGetPhysicalDeviceImageFormatProperties2(
-    VkPhysicalDevice physicalDevice,
-    const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo,
-    VkImageFormatProperties2* pImageFormatProperties)
-{
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceImageFormatProperties2");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetPhysicalDeviceImageFormatProperties2_VkResult_return = (VkResult)0;
-    auto resources = ResourceTracker::get();
-    vkGetPhysicalDeviceImageFormatProperties2_VkResult_return = resources->on_vkGetPhysicalDeviceImageFormatProperties2(vkEnc, VK_SUCCESS, physicalDevice, pImageFormatInfo, pImageFormatProperties);
-    return vkGetPhysicalDeviceImageFormatProperties2_VkResult_return;
-}
-static void entry_vkGetPhysicalDeviceQueueFamilyProperties2(
-    VkPhysicalDevice physicalDevice,
-    uint32_t* pQueueFamilyPropertyCount,
-    VkQueueFamilyProperties2* pQueueFamilyProperties)
-{
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceQueueFamilyProperties2");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkGetPhysicalDeviceQueueFamilyProperties2(physicalDevice, pQueueFamilyPropertyCount, pQueueFamilyProperties);
-}
-static void entry_vkGetPhysicalDeviceMemoryProperties2(
-    VkPhysicalDevice physicalDevice,
-    VkPhysicalDeviceMemoryProperties2* pMemoryProperties)
-{
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceMemoryProperties2");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkGetPhysicalDeviceMemoryProperties2(physicalDevice, pMemoryProperties);
-}
-static void entry_vkGetPhysicalDeviceSparseImageFormatProperties2(
-    VkPhysicalDevice physicalDevice,
-    const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo,
-    uint32_t* pPropertyCount,
-    VkSparseImageFormatProperties2* pProperties)
-{
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceSparseImageFormatProperties2");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkGetPhysicalDeviceSparseImageFormatProperties2(physicalDevice, pFormatInfo, pPropertyCount, pProperties);
-}
-static void entry_vkTrimCommandPool(
-    VkDevice device,
-    VkCommandPool commandPool,
-    VkCommandPoolTrimFlags flags)
-{
-    AEMU_SCOPED_TRACE("vkTrimCommandPool");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkTrimCommandPool(device, commandPool, flags);
-}
-static void dynCheck_entry_vkTrimCommandPool(
-    VkDevice device,
-    VkCommandPool commandPool,
-    VkCommandPoolTrimFlags flags)
-{
-    auto resources = ResourceTracker::get();
-    if (resources->getApiVersionFromDevice(device) < VK_API_VERSION_1_1)
-    {
-        sOnInvalidDynamicallyCheckedCall("vkTrimCommandPool", "VK_VERSION_1_1");
-    }
-    AEMU_SCOPED_TRACE("vkTrimCommandPool");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkTrimCommandPool(device, commandPool, flags);
-}
-static void entry_vkGetDeviceQueue2(
-    VkDevice device,
-    const VkDeviceQueueInfo2* pQueueInfo,
-    VkQueue* pQueue)
-{
-    AEMU_SCOPED_TRACE("vkGetDeviceQueue2");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkGetDeviceQueue2(device, pQueueInfo, pQueue);
-}
-static void dynCheck_entry_vkGetDeviceQueue2(
-    VkDevice device,
-    const VkDeviceQueueInfo2* pQueueInfo,
-    VkQueue* pQueue)
-{
-    auto resources = ResourceTracker::get();
-    if (resources->getApiVersionFromDevice(device) < VK_API_VERSION_1_1)
-    {
-        sOnInvalidDynamicallyCheckedCall("vkGetDeviceQueue2", "VK_VERSION_1_1");
-    }
-    AEMU_SCOPED_TRACE("vkGetDeviceQueue2");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkGetDeviceQueue2(device, pQueueInfo, pQueue);
-}
-static VkResult entry_vkCreateSamplerYcbcrConversion(
-    VkDevice device,
-    const VkSamplerYcbcrConversionCreateInfo* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkSamplerYcbcrConversion* pYcbcrConversion)
-{
-    AEMU_SCOPED_TRACE("vkCreateSamplerYcbcrConversion");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateSamplerYcbcrConversion_VkResult_return = (VkResult)0;
-    auto resources = ResourceTracker::get();
-    vkCreateSamplerYcbcrConversion_VkResult_return = resources->on_vkCreateSamplerYcbcrConversion(vkEnc, VK_SUCCESS, device, pCreateInfo, pAllocator, pYcbcrConversion);
-    return vkCreateSamplerYcbcrConversion_VkResult_return;
-}
-static VkResult dynCheck_entry_vkCreateSamplerYcbcrConversion(
-    VkDevice device,
-    const VkSamplerYcbcrConversionCreateInfo* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkSamplerYcbcrConversion* pYcbcrConversion)
-{
-    auto resources = ResourceTracker::get();
-    if (resources->getApiVersionFromDevice(device) < VK_API_VERSION_1_1)
-    {
-        sOnInvalidDynamicallyCheckedCall("vkCreateSamplerYcbcrConversion", "VK_VERSION_1_1");
-    }
-    AEMU_SCOPED_TRACE("vkCreateSamplerYcbcrConversion");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateSamplerYcbcrConversion_VkResult_return = (VkResult)0;
-    vkCreateSamplerYcbcrConversion_VkResult_return = resources->on_vkCreateSamplerYcbcrConversion(vkEnc, VK_SUCCESS, device, pCreateInfo, pAllocator, pYcbcrConversion);
-    return vkCreateSamplerYcbcrConversion_VkResult_return;
-}
-static void entry_vkDestroySamplerYcbcrConversion(
-    VkDevice device,
-    VkSamplerYcbcrConversion ycbcrConversion,
-    const VkAllocationCallbacks* pAllocator)
-{
-    AEMU_SCOPED_TRACE("vkDestroySamplerYcbcrConversion");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    auto resources = ResourceTracker::get();
-    resources->on_vkDestroySamplerYcbcrConversion(vkEnc, device, ycbcrConversion, pAllocator);
-}
-static void dynCheck_entry_vkDestroySamplerYcbcrConversion(
-    VkDevice device,
-    VkSamplerYcbcrConversion ycbcrConversion,
-    const VkAllocationCallbacks* pAllocator)
-{
-    auto resources = ResourceTracker::get();
-    if (resources->getApiVersionFromDevice(device) < VK_API_VERSION_1_1)
-    {
-        sOnInvalidDynamicallyCheckedCall("vkDestroySamplerYcbcrConversion", "VK_VERSION_1_1");
-    }
-    AEMU_SCOPED_TRACE("vkDestroySamplerYcbcrConversion");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    resources->on_vkDestroySamplerYcbcrConversion(vkEnc, device, ycbcrConversion, pAllocator);
-}
-static VkResult entry_vkCreateDescriptorUpdateTemplate(
-    VkDevice device,
-    const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate)
-{
-    AEMU_SCOPED_TRACE("vkCreateDescriptorUpdateTemplate");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateDescriptorUpdateTemplate_VkResult_return = (VkResult)0;
-    vkCreateDescriptorUpdateTemplate_VkResult_return = vkEnc->vkCreateDescriptorUpdateTemplate(device, pCreateInfo, pAllocator, pDescriptorUpdateTemplate);
-    return vkCreateDescriptorUpdateTemplate_VkResult_return;
-}
-static VkResult dynCheck_entry_vkCreateDescriptorUpdateTemplate(
-    VkDevice device,
-    const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate)
-{
-    auto resources = ResourceTracker::get();
-    if (resources->getApiVersionFromDevice(device) < VK_API_VERSION_1_1)
-    {
-        sOnInvalidDynamicallyCheckedCall("vkCreateDescriptorUpdateTemplate", "VK_VERSION_1_1");
-    }
-    AEMU_SCOPED_TRACE("vkCreateDescriptorUpdateTemplate");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateDescriptorUpdateTemplate_VkResult_return = (VkResult)0;
-    vkCreateDescriptorUpdateTemplate_VkResult_return = vkEnc->vkCreateDescriptorUpdateTemplate(device, pCreateInfo, pAllocator, pDescriptorUpdateTemplate);
-    return vkCreateDescriptorUpdateTemplate_VkResult_return;
-}
-static void entry_vkDestroyDescriptorUpdateTemplate(
-    VkDevice device,
-    VkDescriptorUpdateTemplate descriptorUpdateTemplate,
-    const VkAllocationCallbacks* pAllocator)
-{
-    AEMU_SCOPED_TRACE("vkDestroyDescriptorUpdateTemplate");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkDestroyDescriptorUpdateTemplate(device, descriptorUpdateTemplate, pAllocator);
-}
-static void dynCheck_entry_vkDestroyDescriptorUpdateTemplate(
-    VkDevice device,
-    VkDescriptorUpdateTemplate descriptorUpdateTemplate,
-    const VkAllocationCallbacks* pAllocator)
-{
-    auto resources = ResourceTracker::get();
-    if (resources->getApiVersionFromDevice(device) < VK_API_VERSION_1_1)
-    {
-        sOnInvalidDynamicallyCheckedCall("vkDestroyDescriptorUpdateTemplate", "VK_VERSION_1_1");
-    }
-    AEMU_SCOPED_TRACE("vkDestroyDescriptorUpdateTemplate");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkDestroyDescriptorUpdateTemplate(device, descriptorUpdateTemplate, pAllocator);
-}
-static void entry_vkUpdateDescriptorSetWithTemplate(
-    VkDevice device,
-    VkDescriptorSet descriptorSet,
-    VkDescriptorUpdateTemplate descriptorUpdateTemplate,
-    const void* pData)
-{
-    AEMU_SCOPED_TRACE("vkUpdateDescriptorSetWithTemplate");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    auto resources = ResourceTracker::get();
-    resources->on_vkUpdateDescriptorSetWithTemplate(vkEnc, device, descriptorSet, descriptorUpdateTemplate, pData);
-}
-static void dynCheck_entry_vkUpdateDescriptorSetWithTemplate(
-    VkDevice device,
-    VkDescriptorSet descriptorSet,
-    VkDescriptorUpdateTemplate descriptorUpdateTemplate,
-    const void* pData)
-{
-    auto resources = ResourceTracker::get();
-    if (resources->getApiVersionFromDevice(device) < VK_API_VERSION_1_1)
-    {
-        sOnInvalidDynamicallyCheckedCall("vkUpdateDescriptorSetWithTemplate", "VK_VERSION_1_1");
-    }
-    AEMU_SCOPED_TRACE("vkUpdateDescriptorSetWithTemplate");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    resources->on_vkUpdateDescriptorSetWithTemplate(vkEnc, device, descriptorSet, descriptorUpdateTemplate, pData);
-}
-static void entry_vkGetPhysicalDeviceExternalBufferProperties(
-    VkPhysicalDevice physicalDevice,
-    const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo,
-    VkExternalBufferProperties* pExternalBufferProperties)
-{
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceExternalBufferProperties");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkGetPhysicalDeviceExternalBufferProperties(physicalDevice, pExternalBufferInfo, pExternalBufferProperties);
-}
-static void entry_vkGetPhysicalDeviceExternalFenceProperties(
-    VkPhysicalDevice physicalDevice,
-    const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo,
-    VkExternalFenceProperties* pExternalFenceProperties)
-{
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceExternalFenceProperties");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    auto resources = ResourceTracker::get();
-    resources->on_vkGetPhysicalDeviceExternalFenceProperties(vkEnc, physicalDevice, pExternalFenceInfo, pExternalFenceProperties);
-}
-static void entry_vkGetPhysicalDeviceExternalSemaphoreProperties(
-    VkPhysicalDevice physicalDevice,
-    const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo,
-    VkExternalSemaphoreProperties* pExternalSemaphoreProperties)
-{
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceExternalSemaphoreProperties");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkGetPhysicalDeviceExternalSemaphoreProperties(physicalDevice, pExternalSemaphoreInfo, pExternalSemaphoreProperties);
-}
-static void entry_vkGetDescriptorSetLayoutSupport(
-    VkDevice device,
-    const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
-    VkDescriptorSetLayoutSupport* pSupport)
-{
-    AEMU_SCOPED_TRACE("vkGetDescriptorSetLayoutSupport");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkGetDescriptorSetLayoutSupport(device, pCreateInfo, pSupport);
-}
-static void dynCheck_entry_vkGetDescriptorSetLayoutSupport(
-    VkDevice device,
-    const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
-    VkDescriptorSetLayoutSupport* pSupport)
-{
-    auto resources = ResourceTracker::get();
-    if (resources->getApiVersionFromDevice(device) < VK_API_VERSION_1_1)
-    {
-        sOnInvalidDynamicallyCheckedCall("vkGetDescriptorSetLayoutSupport", "VK_VERSION_1_1");
-    }
-    AEMU_SCOPED_TRACE("vkGetDescriptorSetLayoutSupport");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkGetDescriptorSetLayoutSupport(device, pCreateInfo, pSupport);
-}
-#endif
-#ifdef VK_KHR_surface
-static void entry_vkDestroySurfaceKHR(
-    VkInstance instance,
-    VkSurfaceKHR surface,
-    const VkAllocationCallbacks* pAllocator)
-{
-    AEMU_SCOPED_TRACE("vkDestroySurfaceKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkDestroySurfaceKHR(instance, surface, pAllocator);
-}
-static VkResult entry_vkGetPhysicalDeviceSurfaceSupportKHR(
-    VkPhysicalDevice physicalDevice,
-    uint32_t queueFamilyIndex,
-    VkSurfaceKHR surface,
-    VkBool32* pSupported)
-{
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceSurfaceSupportKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetPhysicalDeviceSurfaceSupportKHR_VkResult_return = (VkResult)0;
-    vkGetPhysicalDeviceSurfaceSupportKHR_VkResult_return = vkEnc->vkGetPhysicalDeviceSurfaceSupportKHR(physicalDevice, queueFamilyIndex, surface, pSupported);
-    return vkGetPhysicalDeviceSurfaceSupportKHR_VkResult_return;
-}
-static VkResult entry_vkGetPhysicalDeviceSurfaceCapabilitiesKHR(
-    VkPhysicalDevice physicalDevice,
-    VkSurfaceKHR surface,
-    VkSurfaceCapabilitiesKHR* pSurfaceCapabilities)
-{
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceSurfaceCapabilitiesKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetPhysicalDeviceSurfaceCapabilitiesKHR_VkResult_return = (VkResult)0;
-    vkGetPhysicalDeviceSurfaceCapabilitiesKHR_VkResult_return = vkEnc->vkGetPhysicalDeviceSurfaceCapabilitiesKHR(physicalDevice, surface, pSurfaceCapabilities);
-    return vkGetPhysicalDeviceSurfaceCapabilitiesKHR_VkResult_return;
-}
-static VkResult entry_vkGetPhysicalDeviceSurfaceFormatsKHR(
-    VkPhysicalDevice physicalDevice,
-    VkSurfaceKHR surface,
-    uint32_t* pSurfaceFormatCount,
-    VkSurfaceFormatKHR* pSurfaceFormats)
-{
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceSurfaceFormatsKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetPhysicalDeviceSurfaceFormatsKHR_VkResult_return = (VkResult)0;
-    vkGetPhysicalDeviceSurfaceFormatsKHR_VkResult_return = vkEnc->vkGetPhysicalDeviceSurfaceFormatsKHR(physicalDevice, surface, pSurfaceFormatCount, pSurfaceFormats);
-    return vkGetPhysicalDeviceSurfaceFormatsKHR_VkResult_return;
-}
-static VkResult entry_vkGetPhysicalDeviceSurfacePresentModesKHR(
-    VkPhysicalDevice physicalDevice,
-    VkSurfaceKHR surface,
-    uint32_t* pPresentModeCount,
-    VkPresentModeKHR* pPresentModes)
-{
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceSurfacePresentModesKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetPhysicalDeviceSurfacePresentModesKHR_VkResult_return = (VkResult)0;
-    vkGetPhysicalDeviceSurfacePresentModesKHR_VkResult_return = vkEnc->vkGetPhysicalDeviceSurfacePresentModesKHR(physicalDevice, surface, pPresentModeCount, pPresentModes);
-    return vkGetPhysicalDeviceSurfacePresentModesKHR_VkResult_return;
-}
-#endif
-#ifdef VK_KHR_swapchain
-static VkResult entry_vkCreateSwapchainKHR(
-    VkDevice device,
-    const VkSwapchainCreateInfoKHR* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkSwapchainKHR* pSwapchain)
-{
-    AEMU_SCOPED_TRACE("vkCreateSwapchainKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateSwapchainKHR_VkResult_return = (VkResult)0;
-    vkCreateSwapchainKHR_VkResult_return = vkEnc->vkCreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
-    return vkCreateSwapchainKHR_VkResult_return;
-}
-static VkResult dynCheck_entry_vkCreateSwapchainKHR(
-    VkDevice device,
-    const VkSwapchainCreateInfoKHR* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkSwapchainKHR* pSwapchain)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_KHR_swapchain"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkCreateSwapchainKHR", "VK_KHR_swapchain");
-    }
-    AEMU_SCOPED_TRACE("vkCreateSwapchainKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateSwapchainKHR_VkResult_return = (VkResult)0;
-    vkCreateSwapchainKHR_VkResult_return = vkEnc->vkCreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
-    return vkCreateSwapchainKHR_VkResult_return;
-}
-static void entry_vkDestroySwapchainKHR(
-    VkDevice device,
-    VkSwapchainKHR swapchain,
-    const VkAllocationCallbacks* pAllocator)
-{
-    AEMU_SCOPED_TRACE("vkDestroySwapchainKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkDestroySwapchainKHR(device, swapchain, pAllocator);
-}
-static void dynCheck_entry_vkDestroySwapchainKHR(
-    VkDevice device,
-    VkSwapchainKHR swapchain,
-    const VkAllocationCallbacks* pAllocator)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_KHR_swapchain"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkDestroySwapchainKHR", "VK_KHR_swapchain");
-    }
-    AEMU_SCOPED_TRACE("vkDestroySwapchainKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkDestroySwapchainKHR(device, swapchain, pAllocator);
-}
-static VkResult entry_vkGetSwapchainImagesKHR(
-    VkDevice device,
-    VkSwapchainKHR swapchain,
-    uint32_t* pSwapchainImageCount,
-    VkImage* pSwapchainImages)
-{
-    AEMU_SCOPED_TRACE("vkGetSwapchainImagesKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetSwapchainImagesKHR_VkResult_return = (VkResult)0;
-    vkGetSwapchainImagesKHR_VkResult_return = vkEnc->vkGetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages);
-    return vkGetSwapchainImagesKHR_VkResult_return;
-}
-static VkResult dynCheck_entry_vkGetSwapchainImagesKHR(
-    VkDevice device,
-    VkSwapchainKHR swapchain,
-    uint32_t* pSwapchainImageCount,
-    VkImage* pSwapchainImages)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_KHR_swapchain"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkGetSwapchainImagesKHR", "VK_KHR_swapchain");
-    }
-    AEMU_SCOPED_TRACE("vkGetSwapchainImagesKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetSwapchainImagesKHR_VkResult_return = (VkResult)0;
-    vkGetSwapchainImagesKHR_VkResult_return = vkEnc->vkGetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages);
-    return vkGetSwapchainImagesKHR_VkResult_return;
-}
-static VkResult entry_vkAcquireNextImageKHR(
-    VkDevice device,
-    VkSwapchainKHR swapchain,
-    uint64_t timeout,
-    VkSemaphore semaphore,
-    VkFence fence,
-    uint32_t* pImageIndex)
-{
-    AEMU_SCOPED_TRACE("vkAcquireNextImageKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkAcquireNextImageKHR_VkResult_return = (VkResult)0;
-    vkAcquireNextImageKHR_VkResult_return = vkEnc->vkAcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);
-    return vkAcquireNextImageKHR_VkResult_return;
-}
-static VkResult dynCheck_entry_vkAcquireNextImageKHR(
-    VkDevice device,
-    VkSwapchainKHR swapchain,
-    uint64_t timeout,
-    VkSemaphore semaphore,
-    VkFence fence,
-    uint32_t* pImageIndex)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_KHR_swapchain"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkAcquireNextImageKHR", "VK_KHR_swapchain");
-    }
-    AEMU_SCOPED_TRACE("vkAcquireNextImageKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkAcquireNextImageKHR_VkResult_return = (VkResult)0;
-    vkAcquireNextImageKHR_VkResult_return = vkEnc->vkAcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);
-    return vkAcquireNextImageKHR_VkResult_return;
-}
-static VkResult entry_vkQueuePresentKHR(
-    VkQueue queue,
-    const VkPresentInfoKHR* pPresentInfo)
-{
-    AEMU_SCOPED_TRACE("vkQueuePresentKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkQueuePresentKHR_VkResult_return = (VkResult)0;
-    vkQueuePresentKHR_VkResult_return = vkEnc->vkQueuePresentKHR(queue, pPresentInfo);
-    return vkQueuePresentKHR_VkResult_return;
-}
-static VkResult entry_vkGetDeviceGroupPresentCapabilitiesKHR(
-    VkDevice device,
-    VkDeviceGroupPresentCapabilitiesKHR* pDeviceGroupPresentCapabilities)
-{
-    AEMU_SCOPED_TRACE("vkGetDeviceGroupPresentCapabilitiesKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetDeviceGroupPresentCapabilitiesKHR_VkResult_return = (VkResult)0;
-    vkGetDeviceGroupPresentCapabilitiesKHR_VkResult_return = vkEnc->vkGetDeviceGroupPresentCapabilitiesKHR(device, pDeviceGroupPresentCapabilities);
-    return vkGetDeviceGroupPresentCapabilitiesKHR_VkResult_return;
-}
-static VkResult dynCheck_entry_vkGetDeviceGroupPresentCapabilitiesKHR(
-    VkDevice device,
-    VkDeviceGroupPresentCapabilitiesKHR* pDeviceGroupPresentCapabilities)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_KHR_swapchain"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkGetDeviceGroupPresentCapabilitiesKHR", "VK_KHR_swapchain");
-    }
-    AEMU_SCOPED_TRACE("vkGetDeviceGroupPresentCapabilitiesKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetDeviceGroupPresentCapabilitiesKHR_VkResult_return = (VkResult)0;
-    vkGetDeviceGroupPresentCapabilitiesKHR_VkResult_return = vkEnc->vkGetDeviceGroupPresentCapabilitiesKHR(device, pDeviceGroupPresentCapabilities);
-    return vkGetDeviceGroupPresentCapabilitiesKHR_VkResult_return;
-}
-static VkResult entry_vkGetDeviceGroupSurfacePresentModesKHR(
-    VkDevice device,
-    VkSurfaceKHR surface,
-    VkDeviceGroupPresentModeFlagsKHR* pModes)
-{
-    AEMU_SCOPED_TRACE("vkGetDeviceGroupSurfacePresentModesKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetDeviceGroupSurfacePresentModesKHR_VkResult_return = (VkResult)0;
-    vkGetDeviceGroupSurfacePresentModesKHR_VkResult_return = vkEnc->vkGetDeviceGroupSurfacePresentModesKHR(device, surface, pModes);
-    return vkGetDeviceGroupSurfacePresentModesKHR_VkResult_return;
-}
-static VkResult dynCheck_entry_vkGetDeviceGroupSurfacePresentModesKHR(
-    VkDevice device,
-    VkSurfaceKHR surface,
-    VkDeviceGroupPresentModeFlagsKHR* pModes)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_KHR_swapchain"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkGetDeviceGroupSurfacePresentModesKHR", "VK_KHR_swapchain");
-    }
-    AEMU_SCOPED_TRACE("vkGetDeviceGroupSurfacePresentModesKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetDeviceGroupSurfacePresentModesKHR_VkResult_return = (VkResult)0;
-    vkGetDeviceGroupSurfacePresentModesKHR_VkResult_return = vkEnc->vkGetDeviceGroupSurfacePresentModesKHR(device, surface, pModes);
-    return vkGetDeviceGroupSurfacePresentModesKHR_VkResult_return;
-}
-static VkResult entry_vkGetPhysicalDevicePresentRectanglesKHR(
-    VkPhysicalDevice physicalDevice,
-    VkSurfaceKHR surface,
-    uint32_t* pRectCount,
-    VkRect2D* pRects)
-{
-    AEMU_SCOPED_TRACE("vkGetPhysicalDevicePresentRectanglesKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetPhysicalDevicePresentRectanglesKHR_VkResult_return = (VkResult)0;
-    vkGetPhysicalDevicePresentRectanglesKHR_VkResult_return = vkEnc->vkGetPhysicalDevicePresentRectanglesKHR(physicalDevice, surface, pRectCount, pRects);
-    return vkGetPhysicalDevicePresentRectanglesKHR_VkResult_return;
-}
-static VkResult entry_vkAcquireNextImage2KHR(
-    VkDevice device,
-    const VkAcquireNextImageInfoKHR* pAcquireInfo,
-    uint32_t* pImageIndex)
-{
-    AEMU_SCOPED_TRACE("vkAcquireNextImage2KHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkAcquireNextImage2KHR_VkResult_return = (VkResult)0;
-    vkAcquireNextImage2KHR_VkResult_return = vkEnc->vkAcquireNextImage2KHR(device, pAcquireInfo, pImageIndex);
-    return vkAcquireNextImage2KHR_VkResult_return;
-}
-static VkResult dynCheck_entry_vkAcquireNextImage2KHR(
-    VkDevice device,
-    const VkAcquireNextImageInfoKHR* pAcquireInfo,
-    uint32_t* pImageIndex)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_KHR_swapchain"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkAcquireNextImage2KHR", "VK_KHR_swapchain");
-    }
-    AEMU_SCOPED_TRACE("vkAcquireNextImage2KHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkAcquireNextImage2KHR_VkResult_return = (VkResult)0;
-    vkAcquireNextImage2KHR_VkResult_return = vkEnc->vkAcquireNextImage2KHR(device, pAcquireInfo, pImageIndex);
-    return vkAcquireNextImage2KHR_VkResult_return;
-}
-#endif
-#ifdef VK_KHR_display
-static VkResult entry_vkGetPhysicalDeviceDisplayPropertiesKHR(
-    VkPhysicalDevice physicalDevice,
-    uint32_t* pPropertyCount,
-    VkDisplayPropertiesKHR* pProperties)
-{
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceDisplayPropertiesKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetPhysicalDeviceDisplayPropertiesKHR_VkResult_return = (VkResult)0;
-    vkGetPhysicalDeviceDisplayPropertiesKHR_VkResult_return = vkEnc->vkGetPhysicalDeviceDisplayPropertiesKHR(physicalDevice, pPropertyCount, pProperties);
-    return vkGetPhysicalDeviceDisplayPropertiesKHR_VkResult_return;
-}
-static VkResult entry_vkGetPhysicalDeviceDisplayPlanePropertiesKHR(
-    VkPhysicalDevice physicalDevice,
-    uint32_t* pPropertyCount,
-    VkDisplayPlanePropertiesKHR* pProperties)
-{
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceDisplayPlanePropertiesKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetPhysicalDeviceDisplayPlanePropertiesKHR_VkResult_return = (VkResult)0;
-    vkGetPhysicalDeviceDisplayPlanePropertiesKHR_VkResult_return = vkEnc->vkGetPhysicalDeviceDisplayPlanePropertiesKHR(physicalDevice, pPropertyCount, pProperties);
-    return vkGetPhysicalDeviceDisplayPlanePropertiesKHR_VkResult_return;
-}
-static VkResult entry_vkGetDisplayPlaneSupportedDisplaysKHR(
-    VkPhysicalDevice physicalDevice,
-    uint32_t planeIndex,
-    uint32_t* pDisplayCount,
-    VkDisplayKHR* pDisplays)
-{
-    AEMU_SCOPED_TRACE("vkGetDisplayPlaneSupportedDisplaysKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetDisplayPlaneSupportedDisplaysKHR_VkResult_return = (VkResult)0;
-    vkGetDisplayPlaneSupportedDisplaysKHR_VkResult_return = vkEnc->vkGetDisplayPlaneSupportedDisplaysKHR(physicalDevice, planeIndex, pDisplayCount, pDisplays);
-    return vkGetDisplayPlaneSupportedDisplaysKHR_VkResult_return;
-}
-static VkResult entry_vkGetDisplayModePropertiesKHR(
-    VkPhysicalDevice physicalDevice,
-    VkDisplayKHR display,
-    uint32_t* pPropertyCount,
-    VkDisplayModePropertiesKHR* pProperties)
-{
-    AEMU_SCOPED_TRACE("vkGetDisplayModePropertiesKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetDisplayModePropertiesKHR_VkResult_return = (VkResult)0;
-    vkGetDisplayModePropertiesKHR_VkResult_return = vkEnc->vkGetDisplayModePropertiesKHR(physicalDevice, display, pPropertyCount, pProperties);
-    return vkGetDisplayModePropertiesKHR_VkResult_return;
-}
-static VkResult entry_vkCreateDisplayModeKHR(
-    VkPhysicalDevice physicalDevice,
-    VkDisplayKHR display,
-    const VkDisplayModeCreateInfoKHR* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkDisplayModeKHR* pMode)
-{
-    AEMU_SCOPED_TRACE("vkCreateDisplayModeKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateDisplayModeKHR_VkResult_return = (VkResult)0;
-    vkCreateDisplayModeKHR_VkResult_return = vkEnc->vkCreateDisplayModeKHR(physicalDevice, display, pCreateInfo, pAllocator, pMode);
-    return vkCreateDisplayModeKHR_VkResult_return;
-}
-static VkResult entry_vkGetDisplayPlaneCapabilitiesKHR(
-    VkPhysicalDevice physicalDevice,
-    VkDisplayModeKHR mode,
-    uint32_t planeIndex,
-    VkDisplayPlaneCapabilitiesKHR* pCapabilities)
-{
-    AEMU_SCOPED_TRACE("vkGetDisplayPlaneCapabilitiesKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetDisplayPlaneCapabilitiesKHR_VkResult_return = (VkResult)0;
-    vkGetDisplayPlaneCapabilitiesKHR_VkResult_return = vkEnc->vkGetDisplayPlaneCapabilitiesKHR(physicalDevice, mode, planeIndex, pCapabilities);
-    return vkGetDisplayPlaneCapabilitiesKHR_VkResult_return;
-}
-static VkResult entry_vkCreateDisplayPlaneSurfaceKHR(
-    VkInstance instance,
-    const VkDisplaySurfaceCreateInfoKHR* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkSurfaceKHR* pSurface)
-{
-    AEMU_SCOPED_TRACE("vkCreateDisplayPlaneSurfaceKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateDisplayPlaneSurfaceKHR_VkResult_return = (VkResult)0;
-    vkCreateDisplayPlaneSurfaceKHR_VkResult_return = vkEnc->vkCreateDisplayPlaneSurfaceKHR(instance, pCreateInfo, pAllocator, pSurface);
-    return vkCreateDisplayPlaneSurfaceKHR_VkResult_return;
-}
-#endif
-#ifdef VK_KHR_display_swapchain
-static VkResult entry_vkCreateSharedSwapchainsKHR(
-    VkDevice device,
-    uint32_t swapchainCount,
-    const VkSwapchainCreateInfoKHR* pCreateInfos,
-    const VkAllocationCallbacks* pAllocator,
-    VkSwapchainKHR* pSwapchains)
-{
-    AEMU_SCOPED_TRACE("vkCreateSharedSwapchainsKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateSharedSwapchainsKHR_VkResult_return = (VkResult)0;
-    vkCreateSharedSwapchainsKHR_VkResult_return = vkEnc->vkCreateSharedSwapchainsKHR(device, swapchainCount, pCreateInfos, pAllocator, pSwapchains);
-    return vkCreateSharedSwapchainsKHR_VkResult_return;
-}
-static VkResult dynCheck_entry_vkCreateSharedSwapchainsKHR(
-    VkDevice device,
-    uint32_t swapchainCount,
-    const VkSwapchainCreateInfoKHR* pCreateInfos,
-    const VkAllocationCallbacks* pAllocator,
-    VkSwapchainKHR* pSwapchains)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_KHR_display_swapchain"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkCreateSharedSwapchainsKHR", "VK_KHR_display_swapchain");
-    }
-    AEMU_SCOPED_TRACE("vkCreateSharedSwapchainsKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateSharedSwapchainsKHR_VkResult_return = (VkResult)0;
-    vkCreateSharedSwapchainsKHR_VkResult_return = vkEnc->vkCreateSharedSwapchainsKHR(device, swapchainCount, pCreateInfos, pAllocator, pSwapchains);
-    return vkCreateSharedSwapchainsKHR_VkResult_return;
-}
-#endif
-#ifdef VK_KHR_xlib_surface
-static VkResult entry_vkCreateXlibSurfaceKHR(
-    VkInstance instance,
-    const VkXlibSurfaceCreateInfoKHR* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkSurfaceKHR* pSurface)
-{
-    AEMU_SCOPED_TRACE("vkCreateXlibSurfaceKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateXlibSurfaceKHR_VkResult_return = (VkResult)0;
-    vkCreateXlibSurfaceKHR_VkResult_return = vkEnc->vkCreateXlibSurfaceKHR(instance, pCreateInfo, pAllocator, pSurface);
-    return vkCreateXlibSurfaceKHR_VkResult_return;
-}
-static VkBool32 entry_vkGetPhysicalDeviceXlibPresentationSupportKHR(
-    VkPhysicalDevice physicalDevice,
-    uint32_t queueFamilyIndex,
-    Display* dpy,
-    VisualID visualID)
-{
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceXlibPresentationSupportKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkBool32 vkGetPhysicalDeviceXlibPresentationSupportKHR_VkBool32_return = (VkBool32)0;
-    vkGetPhysicalDeviceXlibPresentationSupportKHR_VkBool32_return = vkEnc->vkGetPhysicalDeviceXlibPresentationSupportKHR(physicalDevice, queueFamilyIndex, dpy, visualID);
-    return vkGetPhysicalDeviceXlibPresentationSupportKHR_VkBool32_return;
-}
-#endif
-#ifdef VK_KHR_xcb_surface
-static VkResult entry_vkCreateXcbSurfaceKHR(
-    VkInstance instance,
-    const VkXcbSurfaceCreateInfoKHR* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkSurfaceKHR* pSurface)
-{
-    AEMU_SCOPED_TRACE("vkCreateXcbSurfaceKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateXcbSurfaceKHR_VkResult_return = (VkResult)0;
-    vkCreateXcbSurfaceKHR_VkResult_return = vkEnc->vkCreateXcbSurfaceKHR(instance, pCreateInfo, pAllocator, pSurface);
-    return vkCreateXcbSurfaceKHR_VkResult_return;
-}
-static VkBool32 entry_vkGetPhysicalDeviceXcbPresentationSupportKHR(
-    VkPhysicalDevice physicalDevice,
-    uint32_t queueFamilyIndex,
-    xcb_connection_t* connection,
-    xcb_visualid_t visual_id)
-{
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceXcbPresentationSupportKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkBool32 vkGetPhysicalDeviceXcbPresentationSupportKHR_VkBool32_return = (VkBool32)0;
-    vkGetPhysicalDeviceXcbPresentationSupportKHR_VkBool32_return = vkEnc->vkGetPhysicalDeviceXcbPresentationSupportKHR(physicalDevice, queueFamilyIndex, connection, visual_id);
-    return vkGetPhysicalDeviceXcbPresentationSupportKHR_VkBool32_return;
-}
-#endif
-#ifdef VK_KHR_wayland_surface
-static VkResult entry_vkCreateWaylandSurfaceKHR(
-    VkInstance instance,
-    const VkWaylandSurfaceCreateInfoKHR* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkSurfaceKHR* pSurface)
-{
-    AEMU_SCOPED_TRACE("vkCreateWaylandSurfaceKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateWaylandSurfaceKHR_VkResult_return = (VkResult)0;
-    vkCreateWaylandSurfaceKHR_VkResult_return = vkEnc->vkCreateWaylandSurfaceKHR(instance, pCreateInfo, pAllocator, pSurface);
-    return vkCreateWaylandSurfaceKHR_VkResult_return;
-}
-static VkBool32 entry_vkGetPhysicalDeviceWaylandPresentationSupportKHR(
-    VkPhysicalDevice physicalDevice,
-    uint32_t queueFamilyIndex,
-    wl_display* display)
-{
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceWaylandPresentationSupportKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkBool32 vkGetPhysicalDeviceWaylandPresentationSupportKHR_VkBool32_return = (VkBool32)0;
-    vkGetPhysicalDeviceWaylandPresentationSupportKHR_VkBool32_return = vkEnc->vkGetPhysicalDeviceWaylandPresentationSupportKHR(physicalDevice, queueFamilyIndex, display);
-    return vkGetPhysicalDeviceWaylandPresentationSupportKHR_VkBool32_return;
-}
-#endif
-#ifdef VK_KHR_mir_surface
-static VkResult entry_vkCreateMirSurfaceKHR(
-    VkInstance instance,
-    const VkMirSurfaceCreateInfoKHR* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkSurfaceKHR* pSurface)
-{
-    AEMU_SCOPED_TRACE("vkCreateMirSurfaceKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateMirSurfaceKHR_VkResult_return = (VkResult)0;
-    vkCreateMirSurfaceKHR_VkResult_return = vkEnc->vkCreateMirSurfaceKHR(instance, pCreateInfo, pAllocator, pSurface);
-    return vkCreateMirSurfaceKHR_VkResult_return;
-}
-static VkBool32 entry_vkGetPhysicalDeviceMirPresentationSupportKHR(
-    VkPhysicalDevice physicalDevice,
-    uint32_t queueFamilyIndex,
-    MirConnection* connection)
-{
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceMirPresentationSupportKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkBool32 vkGetPhysicalDeviceMirPresentationSupportKHR_VkBool32_return = (VkBool32)0;
-    vkGetPhysicalDeviceMirPresentationSupportKHR_VkBool32_return = vkEnc->vkGetPhysicalDeviceMirPresentationSupportKHR(physicalDevice, queueFamilyIndex, connection);
-    return vkGetPhysicalDeviceMirPresentationSupportKHR_VkBool32_return;
-}
-#endif
-#ifdef VK_KHR_android_surface
-static VkResult entry_vkCreateAndroidSurfaceKHR(
-    VkInstance instance,
-    const VkAndroidSurfaceCreateInfoKHR* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkSurfaceKHR* pSurface)
-{
-    AEMU_SCOPED_TRACE("vkCreateAndroidSurfaceKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateAndroidSurfaceKHR_VkResult_return = (VkResult)0;
-    vkCreateAndroidSurfaceKHR_VkResult_return = vkEnc->vkCreateAndroidSurfaceKHR(instance, pCreateInfo, pAllocator, pSurface);
-    return vkCreateAndroidSurfaceKHR_VkResult_return;
-}
-#endif
-#ifdef VK_KHR_win32_surface
-static VkResult entry_vkCreateWin32SurfaceKHR(
-    VkInstance instance,
-    const VkWin32SurfaceCreateInfoKHR* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkSurfaceKHR* pSurface)
-{
-    AEMU_SCOPED_TRACE("vkCreateWin32SurfaceKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateWin32SurfaceKHR_VkResult_return = (VkResult)0;
-    vkCreateWin32SurfaceKHR_VkResult_return = vkEnc->vkCreateWin32SurfaceKHR(instance, pCreateInfo, pAllocator, pSurface);
-    return vkCreateWin32SurfaceKHR_VkResult_return;
-}
-static VkBool32 entry_vkGetPhysicalDeviceWin32PresentationSupportKHR(
-    VkPhysicalDevice physicalDevice,
-    uint32_t queueFamilyIndex)
-{
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceWin32PresentationSupportKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkBool32 vkGetPhysicalDeviceWin32PresentationSupportKHR_VkBool32_return = (VkBool32)0;
-    vkGetPhysicalDeviceWin32PresentationSupportKHR_VkBool32_return = vkEnc->vkGetPhysicalDeviceWin32PresentationSupportKHR(physicalDevice, queueFamilyIndex);
-    return vkGetPhysicalDeviceWin32PresentationSupportKHR_VkBool32_return;
-}
-#endif
-#ifdef VK_KHR_sampler_mirror_clamp_to_edge
-#endif
-#ifdef VK_KHR_multiview
-#endif
-#ifdef VK_KHR_get_physical_device_properties2
-static void entry_vkGetPhysicalDeviceFeatures2KHR(
-    VkPhysicalDevice physicalDevice,
-    VkPhysicalDeviceFeatures2* pFeatures)
-{
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceFeatures2KHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkGetPhysicalDeviceFeatures2KHR(physicalDevice, pFeatures);
-}
-static void entry_vkGetPhysicalDeviceProperties2KHR(
-    VkPhysicalDevice physicalDevice,
-    VkPhysicalDeviceProperties2* pProperties)
-{
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceProperties2KHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkGetPhysicalDeviceProperties2KHR(physicalDevice, pProperties);
-}
-static void entry_vkGetPhysicalDeviceFormatProperties2KHR(
-    VkPhysicalDevice physicalDevice,
-    VkFormat format,
-    VkFormatProperties2* pFormatProperties)
-{
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceFormatProperties2KHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkGetPhysicalDeviceFormatProperties2KHR(physicalDevice, format, pFormatProperties);
-}
-static VkResult entry_vkGetPhysicalDeviceImageFormatProperties2KHR(
-    VkPhysicalDevice physicalDevice,
-    const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo,
-    VkImageFormatProperties2* pImageFormatProperties)
-{
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceImageFormatProperties2KHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetPhysicalDeviceImageFormatProperties2KHR_VkResult_return = (VkResult)0;
-    auto resources = ResourceTracker::get();
-    vkGetPhysicalDeviceImageFormatProperties2KHR_VkResult_return = resources->on_vkGetPhysicalDeviceImageFormatProperties2KHR(vkEnc, VK_SUCCESS, physicalDevice, pImageFormatInfo, pImageFormatProperties);
-    return vkGetPhysicalDeviceImageFormatProperties2KHR_VkResult_return;
-}
-static void entry_vkGetPhysicalDeviceQueueFamilyProperties2KHR(
-    VkPhysicalDevice physicalDevice,
-    uint32_t* pQueueFamilyPropertyCount,
-    VkQueueFamilyProperties2* pQueueFamilyProperties)
-{
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceQueueFamilyProperties2KHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkGetPhysicalDeviceQueueFamilyProperties2KHR(physicalDevice, pQueueFamilyPropertyCount, pQueueFamilyProperties);
-}
-static void entry_vkGetPhysicalDeviceMemoryProperties2KHR(
-    VkPhysicalDevice physicalDevice,
-    VkPhysicalDeviceMemoryProperties2* pMemoryProperties)
-{
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceMemoryProperties2KHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkGetPhysicalDeviceMemoryProperties2KHR(physicalDevice, pMemoryProperties);
-}
-static void entry_vkGetPhysicalDeviceSparseImageFormatProperties2KHR(
-    VkPhysicalDevice physicalDevice,
-    const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo,
-    uint32_t* pPropertyCount,
-    VkSparseImageFormatProperties2* pProperties)
-{
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceSparseImageFormatProperties2KHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkGetPhysicalDeviceSparseImageFormatProperties2KHR(physicalDevice, pFormatInfo, pPropertyCount, pProperties);
-}
-#endif
-#ifdef VK_KHR_device_group
-static void entry_vkGetDeviceGroupPeerMemoryFeaturesKHR(
-    VkDevice device,
-    uint32_t heapIndex,
-    uint32_t localDeviceIndex,
-    uint32_t remoteDeviceIndex,
-    VkPeerMemoryFeatureFlags* pPeerMemoryFeatures)
-{
-    AEMU_SCOPED_TRACE("vkGetDeviceGroupPeerMemoryFeaturesKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkGetDeviceGroupPeerMemoryFeaturesKHR(device, heapIndex, localDeviceIndex, remoteDeviceIndex, pPeerMemoryFeatures);
-}
-static void dynCheck_entry_vkGetDeviceGroupPeerMemoryFeaturesKHR(
-    VkDevice device,
-    uint32_t heapIndex,
-    uint32_t localDeviceIndex,
-    uint32_t remoteDeviceIndex,
-    VkPeerMemoryFeatureFlags* pPeerMemoryFeatures)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_KHR_device_group"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkGetDeviceGroupPeerMemoryFeaturesKHR", "VK_KHR_device_group");
-    }
-    AEMU_SCOPED_TRACE("vkGetDeviceGroupPeerMemoryFeaturesKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkGetDeviceGroupPeerMemoryFeaturesKHR(device, heapIndex, localDeviceIndex, remoteDeviceIndex, pPeerMemoryFeatures);
-}
-static void entry_vkCmdSetDeviceMaskKHR(
-    VkCommandBuffer commandBuffer,
-    uint32_t deviceMask)
-{
-    AEMU_SCOPED_TRACE("vkCmdSetDeviceMaskKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdSetDeviceMaskKHR(commandBuffer, deviceMask);
-}
-static void entry_vkCmdDispatchBaseKHR(
-    VkCommandBuffer commandBuffer,
-    uint32_t baseGroupX,
-    uint32_t baseGroupY,
-    uint32_t baseGroupZ,
-    uint32_t groupCountX,
-    uint32_t groupCountY,
-    uint32_t groupCountZ)
-{
-    AEMU_SCOPED_TRACE("vkCmdDispatchBaseKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdDispatchBaseKHR(commandBuffer, baseGroupX, baseGroupY, baseGroupZ, groupCountX, groupCountY, groupCountZ);
-}
-#endif
-#ifdef VK_KHR_shader_draw_parameters
-#endif
-#ifdef VK_KHR_maintenance1
-static void entry_vkTrimCommandPoolKHR(
-    VkDevice device,
-    VkCommandPool commandPool,
-    VkCommandPoolTrimFlags flags)
-{
-    AEMU_SCOPED_TRACE("vkTrimCommandPoolKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkTrimCommandPoolKHR(device, commandPool, flags);
-}
-static void dynCheck_entry_vkTrimCommandPoolKHR(
-    VkDevice device,
-    VkCommandPool commandPool,
-    VkCommandPoolTrimFlags flags)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_KHR_maintenance1"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkTrimCommandPoolKHR", "VK_KHR_maintenance1");
-    }
-    AEMU_SCOPED_TRACE("vkTrimCommandPoolKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkTrimCommandPoolKHR(device, commandPool, flags);
-}
-#endif
-#ifdef VK_KHR_device_group_creation
-static VkResult entry_vkEnumeratePhysicalDeviceGroupsKHR(
-    VkInstance instance,
-    uint32_t* pPhysicalDeviceGroupCount,
-    VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties)
-{
-    AEMU_SCOPED_TRACE("vkEnumeratePhysicalDeviceGroupsKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkEnumeratePhysicalDeviceGroupsKHR_VkResult_return = (VkResult)0;
-    vkEnumeratePhysicalDeviceGroupsKHR_VkResult_return = vkEnc->vkEnumeratePhysicalDeviceGroupsKHR(instance, pPhysicalDeviceGroupCount, pPhysicalDeviceGroupProperties);
-    return vkEnumeratePhysicalDeviceGroupsKHR_VkResult_return;
-}
-#endif
-#ifdef VK_KHR_external_memory_capabilities
-static void entry_vkGetPhysicalDeviceExternalBufferPropertiesKHR(
-    VkPhysicalDevice physicalDevice,
-    const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo,
-    VkExternalBufferProperties* pExternalBufferProperties)
-{
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceExternalBufferPropertiesKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkGetPhysicalDeviceExternalBufferPropertiesKHR(physicalDevice, pExternalBufferInfo, pExternalBufferProperties);
-}
-#endif
-#ifdef VK_KHR_external_memory
-#endif
-#ifdef VK_KHR_external_memory_win32
-static VkResult entry_vkGetMemoryWin32HandleKHR(
-    VkDevice device,
-    const VkMemoryGetWin32HandleInfoKHR* pGetWin32HandleInfo,
-    HANDLE* pHandle)
-{
-    AEMU_SCOPED_TRACE("vkGetMemoryWin32HandleKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetMemoryWin32HandleKHR_VkResult_return = (VkResult)0;
-    vkGetMemoryWin32HandleKHR_VkResult_return = vkEnc->vkGetMemoryWin32HandleKHR(device, pGetWin32HandleInfo, pHandle);
-    return vkGetMemoryWin32HandleKHR_VkResult_return;
-}
-static VkResult dynCheck_entry_vkGetMemoryWin32HandleKHR(
-    VkDevice device,
-    const VkMemoryGetWin32HandleInfoKHR* pGetWin32HandleInfo,
-    HANDLE* pHandle)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_KHR_external_memory_win32"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkGetMemoryWin32HandleKHR", "VK_KHR_external_memory_win32");
-    }
-    AEMU_SCOPED_TRACE("vkGetMemoryWin32HandleKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetMemoryWin32HandleKHR_VkResult_return = (VkResult)0;
-    vkGetMemoryWin32HandleKHR_VkResult_return = vkEnc->vkGetMemoryWin32HandleKHR(device, pGetWin32HandleInfo, pHandle);
-    return vkGetMemoryWin32HandleKHR_VkResult_return;
-}
-static VkResult entry_vkGetMemoryWin32HandlePropertiesKHR(
-    VkDevice device,
-    VkExternalMemoryHandleTypeFlagBits handleType,
-    HANDLE handle,
-    VkMemoryWin32HandlePropertiesKHR* pMemoryWin32HandleProperties)
-{
-    AEMU_SCOPED_TRACE("vkGetMemoryWin32HandlePropertiesKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetMemoryWin32HandlePropertiesKHR_VkResult_return = (VkResult)0;
-    vkGetMemoryWin32HandlePropertiesKHR_VkResult_return = vkEnc->vkGetMemoryWin32HandlePropertiesKHR(device, handleType, handle, pMemoryWin32HandleProperties);
-    return vkGetMemoryWin32HandlePropertiesKHR_VkResult_return;
-}
-static VkResult dynCheck_entry_vkGetMemoryWin32HandlePropertiesKHR(
-    VkDevice device,
-    VkExternalMemoryHandleTypeFlagBits handleType,
-    HANDLE handle,
-    VkMemoryWin32HandlePropertiesKHR* pMemoryWin32HandleProperties)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_KHR_external_memory_win32"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkGetMemoryWin32HandlePropertiesKHR", "VK_KHR_external_memory_win32");
-    }
-    AEMU_SCOPED_TRACE("vkGetMemoryWin32HandlePropertiesKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetMemoryWin32HandlePropertiesKHR_VkResult_return = (VkResult)0;
-    vkGetMemoryWin32HandlePropertiesKHR_VkResult_return = vkEnc->vkGetMemoryWin32HandlePropertiesKHR(device, handleType, handle, pMemoryWin32HandleProperties);
-    return vkGetMemoryWin32HandlePropertiesKHR_VkResult_return;
-}
-#endif
-#ifdef VK_KHR_external_memory_fd
-static VkResult entry_vkGetMemoryFdKHR(
-    VkDevice device,
-    const VkMemoryGetFdInfoKHR* pGetFdInfo,
-    int* pFd)
-{
-    AEMU_SCOPED_TRACE("vkGetMemoryFdKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetMemoryFdKHR_VkResult_return = (VkResult)0;
-    vkGetMemoryFdKHR_VkResult_return = vkEnc->vkGetMemoryFdKHR(device, pGetFdInfo, pFd);
-    return vkGetMemoryFdKHR_VkResult_return;
-}
-static VkResult dynCheck_entry_vkGetMemoryFdKHR(
-    VkDevice device,
-    const VkMemoryGetFdInfoKHR* pGetFdInfo,
-    int* pFd)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_KHR_external_memory_fd"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkGetMemoryFdKHR", "VK_KHR_external_memory_fd");
-    }
-    AEMU_SCOPED_TRACE("vkGetMemoryFdKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetMemoryFdKHR_VkResult_return = (VkResult)0;
-    vkGetMemoryFdKHR_VkResult_return = vkEnc->vkGetMemoryFdKHR(device, pGetFdInfo, pFd);
-    return vkGetMemoryFdKHR_VkResult_return;
-}
-static VkResult entry_vkGetMemoryFdPropertiesKHR(
-    VkDevice device,
-    VkExternalMemoryHandleTypeFlagBits handleType,
-    int fd,
-    VkMemoryFdPropertiesKHR* pMemoryFdProperties)
-{
-    AEMU_SCOPED_TRACE("vkGetMemoryFdPropertiesKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetMemoryFdPropertiesKHR_VkResult_return = (VkResult)0;
-    vkGetMemoryFdPropertiesKHR_VkResult_return = vkEnc->vkGetMemoryFdPropertiesKHR(device, handleType, fd, pMemoryFdProperties);
-    return vkGetMemoryFdPropertiesKHR_VkResult_return;
-}
-static VkResult dynCheck_entry_vkGetMemoryFdPropertiesKHR(
-    VkDevice device,
-    VkExternalMemoryHandleTypeFlagBits handleType,
-    int fd,
-    VkMemoryFdPropertiesKHR* pMemoryFdProperties)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_KHR_external_memory_fd"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkGetMemoryFdPropertiesKHR", "VK_KHR_external_memory_fd");
-    }
-    AEMU_SCOPED_TRACE("vkGetMemoryFdPropertiesKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetMemoryFdPropertiesKHR_VkResult_return = (VkResult)0;
-    vkGetMemoryFdPropertiesKHR_VkResult_return = vkEnc->vkGetMemoryFdPropertiesKHR(device, handleType, fd, pMemoryFdProperties);
-    return vkGetMemoryFdPropertiesKHR_VkResult_return;
-}
-#endif
-#ifdef VK_KHR_win32_keyed_mutex
-#endif
-#ifdef VK_KHR_external_semaphore_capabilities
-static void entry_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR(
-    VkPhysicalDevice physicalDevice,
-    const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo,
-    VkExternalSemaphoreProperties* pExternalSemaphoreProperties)
-{
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceExternalSemaphorePropertiesKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkGetPhysicalDeviceExternalSemaphorePropertiesKHR(physicalDevice, pExternalSemaphoreInfo, pExternalSemaphoreProperties);
-}
-#endif
-#ifdef VK_KHR_external_semaphore
-#endif
-#ifdef VK_KHR_external_semaphore_win32
-static VkResult entry_vkImportSemaphoreWin32HandleKHR(
-    VkDevice device,
-    const VkImportSemaphoreWin32HandleInfoKHR* pImportSemaphoreWin32HandleInfo)
-{
-    AEMU_SCOPED_TRACE("vkImportSemaphoreWin32HandleKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkImportSemaphoreWin32HandleKHR_VkResult_return = (VkResult)0;
-    vkImportSemaphoreWin32HandleKHR_VkResult_return = vkEnc->vkImportSemaphoreWin32HandleKHR(device, pImportSemaphoreWin32HandleInfo);
-    return vkImportSemaphoreWin32HandleKHR_VkResult_return;
-}
-static VkResult dynCheck_entry_vkImportSemaphoreWin32HandleKHR(
-    VkDevice device,
-    const VkImportSemaphoreWin32HandleInfoKHR* pImportSemaphoreWin32HandleInfo)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_KHR_external_semaphore_win32"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkImportSemaphoreWin32HandleKHR", "VK_KHR_external_semaphore_win32");
-    }
-    AEMU_SCOPED_TRACE("vkImportSemaphoreWin32HandleKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkImportSemaphoreWin32HandleKHR_VkResult_return = (VkResult)0;
-    vkImportSemaphoreWin32HandleKHR_VkResult_return = vkEnc->vkImportSemaphoreWin32HandleKHR(device, pImportSemaphoreWin32HandleInfo);
-    return vkImportSemaphoreWin32HandleKHR_VkResult_return;
-}
-static VkResult entry_vkGetSemaphoreWin32HandleKHR(
-    VkDevice device,
-    const VkSemaphoreGetWin32HandleInfoKHR* pGetWin32HandleInfo,
-    HANDLE* pHandle)
-{
-    AEMU_SCOPED_TRACE("vkGetSemaphoreWin32HandleKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetSemaphoreWin32HandleKHR_VkResult_return = (VkResult)0;
-    vkGetSemaphoreWin32HandleKHR_VkResult_return = vkEnc->vkGetSemaphoreWin32HandleKHR(device, pGetWin32HandleInfo, pHandle);
-    return vkGetSemaphoreWin32HandleKHR_VkResult_return;
-}
-static VkResult dynCheck_entry_vkGetSemaphoreWin32HandleKHR(
-    VkDevice device,
-    const VkSemaphoreGetWin32HandleInfoKHR* pGetWin32HandleInfo,
-    HANDLE* pHandle)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_KHR_external_semaphore_win32"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkGetSemaphoreWin32HandleKHR", "VK_KHR_external_semaphore_win32");
-    }
-    AEMU_SCOPED_TRACE("vkGetSemaphoreWin32HandleKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetSemaphoreWin32HandleKHR_VkResult_return = (VkResult)0;
-    vkGetSemaphoreWin32HandleKHR_VkResult_return = vkEnc->vkGetSemaphoreWin32HandleKHR(device, pGetWin32HandleInfo, pHandle);
-    return vkGetSemaphoreWin32HandleKHR_VkResult_return;
-}
-#endif
-#ifdef VK_KHR_external_semaphore_fd
-static VkResult entry_vkImportSemaphoreFdKHR(
-    VkDevice device,
-    const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo)
-{
-    AEMU_SCOPED_TRACE("vkImportSemaphoreFdKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkImportSemaphoreFdKHR_VkResult_return = (VkResult)0;
-    auto resources = ResourceTracker::get();
-    vkImportSemaphoreFdKHR_VkResult_return = resources->on_vkImportSemaphoreFdKHR(vkEnc, VK_SUCCESS, device, pImportSemaphoreFdInfo);
-    return vkImportSemaphoreFdKHR_VkResult_return;
-}
-static VkResult dynCheck_entry_vkImportSemaphoreFdKHR(
-    VkDevice device,
-    const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_KHR_external_semaphore_fd"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkImportSemaphoreFdKHR", "VK_KHR_external_semaphore_fd");
-    }
-    AEMU_SCOPED_TRACE("vkImportSemaphoreFdKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkImportSemaphoreFdKHR_VkResult_return = (VkResult)0;
-    vkImportSemaphoreFdKHR_VkResult_return = resources->on_vkImportSemaphoreFdKHR(vkEnc, VK_SUCCESS, device, pImportSemaphoreFdInfo);
-    return vkImportSemaphoreFdKHR_VkResult_return;
-}
-static VkResult entry_vkGetSemaphoreFdKHR(
-    VkDevice device,
-    const VkSemaphoreGetFdInfoKHR* pGetFdInfo,
-    int* pFd)
-{
-    AEMU_SCOPED_TRACE("vkGetSemaphoreFdKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetSemaphoreFdKHR_VkResult_return = (VkResult)0;
-    auto resources = ResourceTracker::get();
-    vkGetSemaphoreFdKHR_VkResult_return = resources->on_vkGetSemaphoreFdKHR(vkEnc, VK_SUCCESS, device, pGetFdInfo, pFd);
-    return vkGetSemaphoreFdKHR_VkResult_return;
-}
-static VkResult dynCheck_entry_vkGetSemaphoreFdKHR(
-    VkDevice device,
-    const VkSemaphoreGetFdInfoKHR* pGetFdInfo,
-    int* pFd)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_KHR_external_semaphore_fd"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkGetSemaphoreFdKHR", "VK_KHR_external_semaphore_fd");
-    }
-    AEMU_SCOPED_TRACE("vkGetSemaphoreFdKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetSemaphoreFdKHR_VkResult_return = (VkResult)0;
-    vkGetSemaphoreFdKHR_VkResult_return = resources->on_vkGetSemaphoreFdKHR(vkEnc, VK_SUCCESS, device, pGetFdInfo, pFd);
-    return vkGetSemaphoreFdKHR_VkResult_return;
-}
-#endif
-#ifdef VK_KHR_push_descriptor
-static void entry_vkCmdPushDescriptorSetKHR(
-    VkCommandBuffer commandBuffer,
-    VkPipelineBindPoint pipelineBindPoint,
-    VkPipelineLayout layout,
-    uint32_t set,
-    uint32_t descriptorWriteCount,
-    const VkWriteDescriptorSet* pDescriptorWrites)
-{
-    AEMU_SCOPED_TRACE("vkCmdPushDescriptorSetKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdPushDescriptorSetKHR(commandBuffer, pipelineBindPoint, layout, set, descriptorWriteCount, pDescriptorWrites);
-}
-static void entry_vkCmdPushDescriptorSetWithTemplateKHR(
-    VkCommandBuffer commandBuffer,
-    VkDescriptorUpdateTemplate descriptorUpdateTemplate,
-    VkPipelineLayout layout,
-    uint32_t set,
-    const void* pData)
-{
-    AEMU_SCOPED_TRACE("vkCmdPushDescriptorSetWithTemplateKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdPushDescriptorSetWithTemplateKHR(commandBuffer, descriptorUpdateTemplate, layout, set, pData);
-}
-#endif
-#ifdef VK_KHR_16bit_storage
-#endif
-#ifdef VK_KHR_incremental_present
-#endif
-#ifdef VK_KHR_descriptor_update_template
-static VkResult entry_vkCreateDescriptorUpdateTemplateKHR(
-    VkDevice device,
-    const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate)
-{
-    AEMU_SCOPED_TRACE("vkCreateDescriptorUpdateTemplateKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateDescriptorUpdateTemplateKHR_VkResult_return = (VkResult)0;
-    vkCreateDescriptorUpdateTemplateKHR_VkResult_return = vkEnc->vkCreateDescriptorUpdateTemplateKHR(device, pCreateInfo, pAllocator, pDescriptorUpdateTemplate);
-    return vkCreateDescriptorUpdateTemplateKHR_VkResult_return;
-}
-static VkResult dynCheck_entry_vkCreateDescriptorUpdateTemplateKHR(
-    VkDevice device,
-    const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_KHR_descriptor_update_template"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkCreateDescriptorUpdateTemplateKHR", "VK_KHR_descriptor_update_template");
-    }
-    AEMU_SCOPED_TRACE("vkCreateDescriptorUpdateTemplateKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateDescriptorUpdateTemplateKHR_VkResult_return = (VkResult)0;
-    vkCreateDescriptorUpdateTemplateKHR_VkResult_return = vkEnc->vkCreateDescriptorUpdateTemplateKHR(device, pCreateInfo, pAllocator, pDescriptorUpdateTemplate);
-    return vkCreateDescriptorUpdateTemplateKHR_VkResult_return;
-}
-static void entry_vkDestroyDescriptorUpdateTemplateKHR(
-    VkDevice device,
-    VkDescriptorUpdateTemplate descriptorUpdateTemplate,
-    const VkAllocationCallbacks* pAllocator)
-{
-    AEMU_SCOPED_TRACE("vkDestroyDescriptorUpdateTemplateKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkDestroyDescriptorUpdateTemplateKHR(device, descriptorUpdateTemplate, pAllocator);
-}
-static void dynCheck_entry_vkDestroyDescriptorUpdateTemplateKHR(
-    VkDevice device,
-    VkDescriptorUpdateTemplate descriptorUpdateTemplate,
-    const VkAllocationCallbacks* pAllocator)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_KHR_descriptor_update_template"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkDestroyDescriptorUpdateTemplateKHR", "VK_KHR_descriptor_update_template");
-    }
-    AEMU_SCOPED_TRACE("vkDestroyDescriptorUpdateTemplateKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkDestroyDescriptorUpdateTemplateKHR(device, descriptorUpdateTemplate, pAllocator);
-}
-static void entry_vkUpdateDescriptorSetWithTemplateKHR(
-    VkDevice device,
-    VkDescriptorSet descriptorSet,
-    VkDescriptorUpdateTemplate descriptorUpdateTemplate,
-    const void* pData)
-{
-    AEMU_SCOPED_TRACE("vkUpdateDescriptorSetWithTemplateKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkUpdateDescriptorSetWithTemplateKHR(device, descriptorSet, descriptorUpdateTemplate, pData);
-}
-static void dynCheck_entry_vkUpdateDescriptorSetWithTemplateKHR(
-    VkDevice device,
-    VkDescriptorSet descriptorSet,
-    VkDescriptorUpdateTemplate descriptorUpdateTemplate,
-    const void* pData)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_KHR_descriptor_update_template"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkUpdateDescriptorSetWithTemplateKHR", "VK_KHR_descriptor_update_template");
-    }
-    AEMU_SCOPED_TRACE("vkUpdateDescriptorSetWithTemplateKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkUpdateDescriptorSetWithTemplateKHR(device, descriptorSet, descriptorUpdateTemplate, pData);
-}
-#endif
-#ifdef VK_KHR_create_renderpass2
-static VkResult entry_vkCreateRenderPass2KHR(
-    VkDevice device,
-    const VkRenderPassCreateInfo2KHR* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkRenderPass* pRenderPass)
-{
-    AEMU_SCOPED_TRACE("vkCreateRenderPass2KHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateRenderPass2KHR_VkResult_return = (VkResult)0;
-    vkCreateRenderPass2KHR_VkResult_return = vkEnc->vkCreateRenderPass2KHR(device, pCreateInfo, pAllocator, pRenderPass);
-    return vkCreateRenderPass2KHR_VkResult_return;
-}
-static VkResult dynCheck_entry_vkCreateRenderPass2KHR(
-    VkDevice device,
-    const VkRenderPassCreateInfo2KHR* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkRenderPass* pRenderPass)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_KHR_create_renderpass2"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkCreateRenderPass2KHR", "VK_KHR_create_renderpass2");
-    }
-    AEMU_SCOPED_TRACE("vkCreateRenderPass2KHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateRenderPass2KHR_VkResult_return = (VkResult)0;
-    vkCreateRenderPass2KHR_VkResult_return = vkEnc->vkCreateRenderPass2KHR(device, pCreateInfo, pAllocator, pRenderPass);
-    return vkCreateRenderPass2KHR_VkResult_return;
-}
-static void entry_vkCmdBeginRenderPass2KHR(
-    VkCommandBuffer commandBuffer,
-    const VkRenderPassBeginInfo* pRenderPassBegin,
-    const VkSubpassBeginInfoKHR* pSubpassBeginInfo)
-{
-    AEMU_SCOPED_TRACE("vkCmdBeginRenderPass2KHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdBeginRenderPass2KHR(commandBuffer, pRenderPassBegin, pSubpassBeginInfo);
-}
-static void entry_vkCmdNextSubpass2KHR(
-    VkCommandBuffer commandBuffer,
-    const VkSubpassBeginInfoKHR* pSubpassBeginInfo,
-    const VkSubpassEndInfoKHR* pSubpassEndInfo)
-{
-    AEMU_SCOPED_TRACE("vkCmdNextSubpass2KHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdNextSubpass2KHR(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo);
-}
-static void entry_vkCmdEndRenderPass2KHR(
-    VkCommandBuffer commandBuffer,
-    const VkSubpassEndInfoKHR* pSubpassEndInfo)
-{
-    AEMU_SCOPED_TRACE("vkCmdEndRenderPass2KHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdEndRenderPass2KHR(commandBuffer, pSubpassEndInfo);
-}
-#endif
-#ifdef VK_KHR_shared_presentable_image
-static VkResult entry_vkGetSwapchainStatusKHR(
-    VkDevice device,
-    VkSwapchainKHR swapchain)
-{
-    AEMU_SCOPED_TRACE("vkGetSwapchainStatusKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetSwapchainStatusKHR_VkResult_return = (VkResult)0;
-    vkGetSwapchainStatusKHR_VkResult_return = vkEnc->vkGetSwapchainStatusKHR(device, swapchain);
-    return vkGetSwapchainStatusKHR_VkResult_return;
-}
-static VkResult dynCheck_entry_vkGetSwapchainStatusKHR(
-    VkDevice device,
-    VkSwapchainKHR swapchain)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_KHR_shared_presentable_image"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkGetSwapchainStatusKHR", "VK_KHR_shared_presentable_image");
-    }
-    AEMU_SCOPED_TRACE("vkGetSwapchainStatusKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetSwapchainStatusKHR_VkResult_return = (VkResult)0;
-    vkGetSwapchainStatusKHR_VkResult_return = vkEnc->vkGetSwapchainStatusKHR(device, swapchain);
-    return vkGetSwapchainStatusKHR_VkResult_return;
-}
-#endif
-#ifdef VK_KHR_external_fence_capabilities
-static void entry_vkGetPhysicalDeviceExternalFencePropertiesKHR(
-    VkPhysicalDevice physicalDevice,
-    const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo,
-    VkExternalFenceProperties* pExternalFenceProperties)
-{
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceExternalFencePropertiesKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    auto resources = ResourceTracker::get();
-    resources->on_vkGetPhysicalDeviceExternalFencePropertiesKHR(vkEnc, physicalDevice, pExternalFenceInfo, pExternalFenceProperties);
-}
-#endif
-#ifdef VK_KHR_external_fence
-#endif
-#ifdef VK_KHR_external_fence_win32
-static VkResult entry_vkImportFenceWin32HandleKHR(
-    VkDevice device,
-    const VkImportFenceWin32HandleInfoKHR* pImportFenceWin32HandleInfo)
-{
-    AEMU_SCOPED_TRACE("vkImportFenceWin32HandleKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkImportFenceWin32HandleKHR_VkResult_return = (VkResult)0;
-    vkImportFenceWin32HandleKHR_VkResult_return = vkEnc->vkImportFenceWin32HandleKHR(device, pImportFenceWin32HandleInfo);
-    return vkImportFenceWin32HandleKHR_VkResult_return;
-}
-static VkResult dynCheck_entry_vkImportFenceWin32HandleKHR(
-    VkDevice device,
-    const VkImportFenceWin32HandleInfoKHR* pImportFenceWin32HandleInfo)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_KHR_external_fence_win32"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkImportFenceWin32HandleKHR", "VK_KHR_external_fence_win32");
-    }
-    AEMU_SCOPED_TRACE("vkImportFenceWin32HandleKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkImportFenceWin32HandleKHR_VkResult_return = (VkResult)0;
-    vkImportFenceWin32HandleKHR_VkResult_return = vkEnc->vkImportFenceWin32HandleKHR(device, pImportFenceWin32HandleInfo);
-    return vkImportFenceWin32HandleKHR_VkResult_return;
-}
-static VkResult entry_vkGetFenceWin32HandleKHR(
-    VkDevice device,
-    const VkFenceGetWin32HandleInfoKHR* pGetWin32HandleInfo,
-    HANDLE* pHandle)
-{
-    AEMU_SCOPED_TRACE("vkGetFenceWin32HandleKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetFenceWin32HandleKHR_VkResult_return = (VkResult)0;
-    vkGetFenceWin32HandleKHR_VkResult_return = vkEnc->vkGetFenceWin32HandleKHR(device, pGetWin32HandleInfo, pHandle);
-    return vkGetFenceWin32HandleKHR_VkResult_return;
-}
-static VkResult dynCheck_entry_vkGetFenceWin32HandleKHR(
-    VkDevice device,
-    const VkFenceGetWin32HandleInfoKHR* pGetWin32HandleInfo,
-    HANDLE* pHandle)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_KHR_external_fence_win32"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkGetFenceWin32HandleKHR", "VK_KHR_external_fence_win32");
-    }
-    AEMU_SCOPED_TRACE("vkGetFenceWin32HandleKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetFenceWin32HandleKHR_VkResult_return = (VkResult)0;
-    vkGetFenceWin32HandleKHR_VkResult_return = vkEnc->vkGetFenceWin32HandleKHR(device, pGetWin32HandleInfo, pHandle);
-    return vkGetFenceWin32HandleKHR_VkResult_return;
-}
-#endif
-#ifdef VK_KHR_external_fence_fd
-static VkResult entry_vkImportFenceFdKHR(
-    VkDevice device,
-    const VkImportFenceFdInfoKHR* pImportFenceFdInfo)
-{
-    AEMU_SCOPED_TRACE("vkImportFenceFdKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkImportFenceFdKHR_VkResult_return = (VkResult)0;
-    auto resources = ResourceTracker::get();
-    vkImportFenceFdKHR_VkResult_return = resources->on_vkImportFenceFdKHR(vkEnc, VK_SUCCESS, device, pImportFenceFdInfo);
-    return vkImportFenceFdKHR_VkResult_return;
-}
-static VkResult dynCheck_entry_vkImportFenceFdKHR(
-    VkDevice device,
-    const VkImportFenceFdInfoKHR* pImportFenceFdInfo)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_KHR_external_fence_fd"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkImportFenceFdKHR", "VK_KHR_external_fence_fd");
-    }
-    AEMU_SCOPED_TRACE("vkImportFenceFdKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkImportFenceFdKHR_VkResult_return = (VkResult)0;
-    vkImportFenceFdKHR_VkResult_return = resources->on_vkImportFenceFdKHR(vkEnc, VK_SUCCESS, device, pImportFenceFdInfo);
-    return vkImportFenceFdKHR_VkResult_return;
-}
-static VkResult entry_vkGetFenceFdKHR(
-    VkDevice device,
-    const VkFenceGetFdInfoKHR* pGetFdInfo,
-    int* pFd)
-{
-    AEMU_SCOPED_TRACE("vkGetFenceFdKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetFenceFdKHR_VkResult_return = (VkResult)0;
-    auto resources = ResourceTracker::get();
-    vkGetFenceFdKHR_VkResult_return = resources->on_vkGetFenceFdKHR(vkEnc, VK_SUCCESS, device, pGetFdInfo, pFd);
-    return vkGetFenceFdKHR_VkResult_return;
-}
-static VkResult dynCheck_entry_vkGetFenceFdKHR(
-    VkDevice device,
-    const VkFenceGetFdInfoKHR* pGetFdInfo,
-    int* pFd)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_KHR_external_fence_fd"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkGetFenceFdKHR", "VK_KHR_external_fence_fd");
-    }
-    AEMU_SCOPED_TRACE("vkGetFenceFdKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetFenceFdKHR_VkResult_return = (VkResult)0;
-    vkGetFenceFdKHR_VkResult_return = resources->on_vkGetFenceFdKHR(vkEnc, VK_SUCCESS, device, pGetFdInfo, pFd);
-    return vkGetFenceFdKHR_VkResult_return;
-}
-#endif
-#ifdef VK_KHR_maintenance2
-#endif
-#ifdef VK_KHR_get_surface_capabilities2
-static VkResult entry_vkGetPhysicalDeviceSurfaceCapabilities2KHR(
-    VkPhysicalDevice physicalDevice,
-    const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo,
-    VkSurfaceCapabilities2KHR* pSurfaceCapabilities)
-{
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceSurfaceCapabilities2KHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetPhysicalDeviceSurfaceCapabilities2KHR_VkResult_return = (VkResult)0;
-    vkGetPhysicalDeviceSurfaceCapabilities2KHR_VkResult_return = vkEnc->vkGetPhysicalDeviceSurfaceCapabilities2KHR(physicalDevice, pSurfaceInfo, pSurfaceCapabilities);
-    return vkGetPhysicalDeviceSurfaceCapabilities2KHR_VkResult_return;
-}
-static VkResult entry_vkGetPhysicalDeviceSurfaceFormats2KHR(
-    VkPhysicalDevice physicalDevice,
-    const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo,
-    uint32_t* pSurfaceFormatCount,
-    VkSurfaceFormat2KHR* pSurfaceFormats)
-{
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceSurfaceFormats2KHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetPhysicalDeviceSurfaceFormats2KHR_VkResult_return = (VkResult)0;
-    vkGetPhysicalDeviceSurfaceFormats2KHR_VkResult_return = vkEnc->vkGetPhysicalDeviceSurfaceFormats2KHR(physicalDevice, pSurfaceInfo, pSurfaceFormatCount, pSurfaceFormats);
-    return vkGetPhysicalDeviceSurfaceFormats2KHR_VkResult_return;
-}
-#endif
-#ifdef VK_KHR_variable_pointers
-#endif
-#ifdef VK_KHR_get_display_properties2
-static VkResult entry_vkGetPhysicalDeviceDisplayProperties2KHR(
-    VkPhysicalDevice physicalDevice,
-    uint32_t* pPropertyCount,
-    VkDisplayProperties2KHR* pProperties)
-{
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceDisplayProperties2KHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetPhysicalDeviceDisplayProperties2KHR_VkResult_return = (VkResult)0;
-    vkGetPhysicalDeviceDisplayProperties2KHR_VkResult_return = vkEnc->vkGetPhysicalDeviceDisplayProperties2KHR(physicalDevice, pPropertyCount, pProperties);
-    return vkGetPhysicalDeviceDisplayProperties2KHR_VkResult_return;
-}
-static VkResult entry_vkGetPhysicalDeviceDisplayPlaneProperties2KHR(
-    VkPhysicalDevice physicalDevice,
-    uint32_t* pPropertyCount,
-    VkDisplayPlaneProperties2KHR* pProperties)
-{
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceDisplayPlaneProperties2KHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetPhysicalDeviceDisplayPlaneProperties2KHR_VkResult_return = (VkResult)0;
-    vkGetPhysicalDeviceDisplayPlaneProperties2KHR_VkResult_return = vkEnc->vkGetPhysicalDeviceDisplayPlaneProperties2KHR(physicalDevice, pPropertyCount, pProperties);
-    return vkGetPhysicalDeviceDisplayPlaneProperties2KHR_VkResult_return;
-}
-static VkResult entry_vkGetDisplayModeProperties2KHR(
-    VkPhysicalDevice physicalDevice,
-    VkDisplayKHR display,
-    uint32_t* pPropertyCount,
-    VkDisplayModeProperties2KHR* pProperties)
-{
-    AEMU_SCOPED_TRACE("vkGetDisplayModeProperties2KHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetDisplayModeProperties2KHR_VkResult_return = (VkResult)0;
-    vkGetDisplayModeProperties2KHR_VkResult_return = vkEnc->vkGetDisplayModeProperties2KHR(physicalDevice, display, pPropertyCount, pProperties);
-    return vkGetDisplayModeProperties2KHR_VkResult_return;
-}
-static VkResult entry_vkGetDisplayPlaneCapabilities2KHR(
-    VkPhysicalDevice physicalDevice,
-    const VkDisplayPlaneInfo2KHR* pDisplayPlaneInfo,
-    VkDisplayPlaneCapabilities2KHR* pCapabilities)
-{
-    AEMU_SCOPED_TRACE("vkGetDisplayPlaneCapabilities2KHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetDisplayPlaneCapabilities2KHR_VkResult_return = (VkResult)0;
-    vkGetDisplayPlaneCapabilities2KHR_VkResult_return = vkEnc->vkGetDisplayPlaneCapabilities2KHR(physicalDevice, pDisplayPlaneInfo, pCapabilities);
-    return vkGetDisplayPlaneCapabilities2KHR_VkResult_return;
-}
-#endif
-#ifdef VK_KHR_dedicated_allocation
-#endif
-#ifdef VK_KHR_storage_buffer_storage_class
-#endif
-#ifdef VK_KHR_relaxed_block_layout
-#endif
-#ifdef VK_KHR_get_memory_requirements2
-static void entry_vkGetImageMemoryRequirements2KHR(
-    VkDevice device,
-    const VkImageMemoryRequirementsInfo2* pInfo,
-    VkMemoryRequirements2* pMemoryRequirements)
-{
-    AEMU_SCOPED_TRACE("vkGetImageMemoryRequirements2KHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    auto resources = ResourceTracker::get();
-    resources->on_vkGetImageMemoryRequirements2KHR(vkEnc, device, pInfo, pMemoryRequirements);
-}
-static void dynCheck_entry_vkGetImageMemoryRequirements2KHR(
-    VkDevice device,
-    const VkImageMemoryRequirementsInfo2* pInfo,
-    VkMemoryRequirements2* pMemoryRequirements)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_KHR_get_memory_requirements2"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkGetImageMemoryRequirements2KHR", "VK_KHR_get_memory_requirements2");
-    }
-    AEMU_SCOPED_TRACE("vkGetImageMemoryRequirements2KHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    resources->on_vkGetImageMemoryRequirements2KHR(vkEnc, device, pInfo, pMemoryRequirements);
-}
-static void entry_vkGetBufferMemoryRequirements2KHR(
-    VkDevice device,
-    const VkBufferMemoryRequirementsInfo2* pInfo,
-    VkMemoryRequirements2* pMemoryRequirements)
-{
-    AEMU_SCOPED_TRACE("vkGetBufferMemoryRequirements2KHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    auto resources = ResourceTracker::get();
-    resources->on_vkGetBufferMemoryRequirements2KHR(vkEnc, device, pInfo, pMemoryRequirements);
-}
-static void dynCheck_entry_vkGetBufferMemoryRequirements2KHR(
-    VkDevice device,
-    const VkBufferMemoryRequirementsInfo2* pInfo,
-    VkMemoryRequirements2* pMemoryRequirements)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_KHR_get_memory_requirements2"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkGetBufferMemoryRequirements2KHR", "VK_KHR_get_memory_requirements2");
-    }
-    AEMU_SCOPED_TRACE("vkGetBufferMemoryRequirements2KHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    resources->on_vkGetBufferMemoryRequirements2KHR(vkEnc, device, pInfo, pMemoryRequirements);
-}
-static void entry_vkGetImageSparseMemoryRequirements2KHR(
-    VkDevice device,
-    const VkImageSparseMemoryRequirementsInfo2* pInfo,
-    uint32_t* pSparseMemoryRequirementCount,
-    VkSparseImageMemoryRequirements2* pSparseMemoryRequirements)
-{
-    AEMU_SCOPED_TRACE("vkGetImageSparseMemoryRequirements2KHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkGetImageSparseMemoryRequirements2KHR(device, pInfo, pSparseMemoryRequirementCount, pSparseMemoryRequirements);
-}
-static void dynCheck_entry_vkGetImageSparseMemoryRequirements2KHR(
-    VkDevice device,
-    const VkImageSparseMemoryRequirementsInfo2* pInfo,
-    uint32_t* pSparseMemoryRequirementCount,
-    VkSparseImageMemoryRequirements2* pSparseMemoryRequirements)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_KHR_get_memory_requirements2"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkGetImageSparseMemoryRequirements2KHR", "VK_KHR_get_memory_requirements2");
-    }
-    AEMU_SCOPED_TRACE("vkGetImageSparseMemoryRequirements2KHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkGetImageSparseMemoryRequirements2KHR(device, pInfo, pSparseMemoryRequirementCount, pSparseMemoryRequirements);
-}
-#endif
-#ifdef VK_KHR_image_format_list
-#endif
-#ifdef VK_KHR_sampler_ycbcr_conversion
-static VkResult entry_vkCreateSamplerYcbcrConversionKHR(
-    VkDevice device,
-    const VkSamplerYcbcrConversionCreateInfo* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkSamplerYcbcrConversion* pYcbcrConversion)
-{
-    AEMU_SCOPED_TRACE("vkCreateSamplerYcbcrConversionKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateSamplerYcbcrConversionKHR_VkResult_return = (VkResult)0;
-    auto resources = ResourceTracker::get();
-    vkCreateSamplerYcbcrConversionKHR_VkResult_return = resources->on_vkCreateSamplerYcbcrConversionKHR(vkEnc, VK_SUCCESS, device, pCreateInfo, pAllocator, pYcbcrConversion);
-    return vkCreateSamplerYcbcrConversionKHR_VkResult_return;
-}
-static VkResult dynCheck_entry_vkCreateSamplerYcbcrConversionKHR(
-    VkDevice device,
-    const VkSamplerYcbcrConversionCreateInfo* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkSamplerYcbcrConversion* pYcbcrConversion)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_KHR_sampler_ycbcr_conversion"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkCreateSamplerYcbcrConversionKHR", "VK_KHR_sampler_ycbcr_conversion");
-    }
-    AEMU_SCOPED_TRACE("vkCreateSamplerYcbcrConversionKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateSamplerYcbcrConversionKHR_VkResult_return = (VkResult)0;
-    vkCreateSamplerYcbcrConversionKHR_VkResult_return = resources->on_vkCreateSamplerYcbcrConversionKHR(vkEnc, VK_SUCCESS, device, pCreateInfo, pAllocator, pYcbcrConversion);
-    return vkCreateSamplerYcbcrConversionKHR_VkResult_return;
-}
-static void entry_vkDestroySamplerYcbcrConversionKHR(
-    VkDevice device,
-    VkSamplerYcbcrConversion ycbcrConversion,
-    const VkAllocationCallbacks* pAllocator)
-{
-    AEMU_SCOPED_TRACE("vkDestroySamplerYcbcrConversionKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    auto resources = ResourceTracker::get();
-    resources->on_vkDestroySamplerYcbcrConversionKHR(vkEnc, device, ycbcrConversion, pAllocator);
-}
-static void dynCheck_entry_vkDestroySamplerYcbcrConversionKHR(
-    VkDevice device,
-    VkSamplerYcbcrConversion ycbcrConversion,
-    const VkAllocationCallbacks* pAllocator)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_KHR_sampler_ycbcr_conversion"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkDestroySamplerYcbcrConversionKHR", "VK_KHR_sampler_ycbcr_conversion");
-    }
-    AEMU_SCOPED_TRACE("vkDestroySamplerYcbcrConversionKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    resources->on_vkDestroySamplerYcbcrConversionKHR(vkEnc, device, ycbcrConversion, pAllocator);
-}
-#endif
-#ifdef VK_KHR_bind_memory2
-static VkResult entry_vkBindBufferMemory2KHR(
-    VkDevice device,
-    uint32_t bindInfoCount,
-    const VkBindBufferMemoryInfo* pBindInfos)
-{
-    AEMU_SCOPED_TRACE("vkBindBufferMemory2KHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkBindBufferMemory2KHR_VkResult_return = (VkResult)0;
-    auto resources = ResourceTracker::get();
-    vkBindBufferMemory2KHR_VkResult_return = resources->on_vkBindBufferMemory2KHR(vkEnc, VK_SUCCESS, device, bindInfoCount, pBindInfos);
-    return vkBindBufferMemory2KHR_VkResult_return;
-}
-static VkResult dynCheck_entry_vkBindBufferMemory2KHR(
-    VkDevice device,
-    uint32_t bindInfoCount,
-    const VkBindBufferMemoryInfo* pBindInfos)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_KHR_bind_memory2"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkBindBufferMemory2KHR", "VK_KHR_bind_memory2");
-    }
-    AEMU_SCOPED_TRACE("vkBindBufferMemory2KHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkBindBufferMemory2KHR_VkResult_return = (VkResult)0;
-    vkBindBufferMemory2KHR_VkResult_return = resources->on_vkBindBufferMemory2KHR(vkEnc, VK_SUCCESS, device, bindInfoCount, pBindInfos);
-    return vkBindBufferMemory2KHR_VkResult_return;
-}
-static VkResult entry_vkBindImageMemory2KHR(
-    VkDevice device,
-    uint32_t bindInfoCount,
-    const VkBindImageMemoryInfo* pBindInfos)
-{
-    AEMU_SCOPED_TRACE("vkBindImageMemory2KHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkBindImageMemory2KHR_VkResult_return = (VkResult)0;
-    auto resources = ResourceTracker::get();
-    vkBindImageMemory2KHR_VkResult_return = resources->on_vkBindImageMemory2KHR(vkEnc, VK_SUCCESS, device, bindInfoCount, pBindInfos);
-    return vkBindImageMemory2KHR_VkResult_return;
-}
-static VkResult dynCheck_entry_vkBindImageMemory2KHR(
-    VkDevice device,
-    uint32_t bindInfoCount,
-    const VkBindImageMemoryInfo* pBindInfos)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_KHR_bind_memory2"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkBindImageMemory2KHR", "VK_KHR_bind_memory2");
-    }
-    AEMU_SCOPED_TRACE("vkBindImageMemory2KHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkBindImageMemory2KHR_VkResult_return = (VkResult)0;
-    vkBindImageMemory2KHR_VkResult_return = resources->on_vkBindImageMemory2KHR(vkEnc, VK_SUCCESS, device, bindInfoCount, pBindInfos);
-    return vkBindImageMemory2KHR_VkResult_return;
-}
-#endif
-#ifdef VK_KHR_maintenance3
-static void entry_vkGetDescriptorSetLayoutSupportKHR(
-    VkDevice device,
-    const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
-    VkDescriptorSetLayoutSupport* pSupport)
-{
-    AEMU_SCOPED_TRACE("vkGetDescriptorSetLayoutSupportKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkGetDescriptorSetLayoutSupportKHR(device, pCreateInfo, pSupport);
-}
-static void dynCheck_entry_vkGetDescriptorSetLayoutSupportKHR(
-    VkDevice device,
-    const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
-    VkDescriptorSetLayoutSupport* pSupport)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_KHR_maintenance3"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkGetDescriptorSetLayoutSupportKHR", "VK_KHR_maintenance3");
-    }
-    AEMU_SCOPED_TRACE("vkGetDescriptorSetLayoutSupportKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkGetDescriptorSetLayoutSupportKHR(device, pCreateInfo, pSupport);
-}
-#endif
-#ifdef VK_KHR_draw_indirect_count
-static void entry_vkCmdDrawIndirectCountKHR(
-    VkCommandBuffer commandBuffer,
-    VkBuffer buffer,
-    VkDeviceSize offset,
-    VkBuffer countBuffer,
-    VkDeviceSize countBufferOffset,
-    uint32_t maxDrawCount,
-    uint32_t stride)
-{
-    AEMU_SCOPED_TRACE("vkCmdDrawIndirectCountKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdDrawIndirectCountKHR(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride);
-}
-static void entry_vkCmdDrawIndexedIndirectCountKHR(
-    VkCommandBuffer commandBuffer,
-    VkBuffer buffer,
-    VkDeviceSize offset,
-    VkBuffer countBuffer,
-    VkDeviceSize countBufferOffset,
-    uint32_t maxDrawCount,
-    uint32_t stride)
-{
-    AEMU_SCOPED_TRACE("vkCmdDrawIndexedIndirectCountKHR");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdDrawIndexedIndirectCountKHR(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride);
-}
-#endif
-#ifdef VK_KHR_8bit_storage
-#endif
-#ifdef VK_ANDROID_native_buffer
-static VkResult entry_vkGetSwapchainGrallocUsageANDROID(
-    VkDevice device,
-    VkFormat format,
-    VkImageUsageFlags imageUsage,
-    int* grallocUsage)
-{
-    AEMU_SCOPED_TRACE("vkGetSwapchainGrallocUsageANDROID");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetSwapchainGrallocUsageANDROID_VkResult_return = (VkResult)0;
-    vkGetSwapchainGrallocUsageANDROID_VkResult_return = vkEnc->vkGetSwapchainGrallocUsageANDROID(device, format, imageUsage, grallocUsage);
-    return vkGetSwapchainGrallocUsageANDROID_VkResult_return;
-}
-static VkResult dynCheck_entry_vkGetSwapchainGrallocUsageANDROID(
-    VkDevice device,
-    VkFormat format,
-    VkImageUsageFlags imageUsage,
-    int* grallocUsage)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_ANDROID_native_buffer"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkGetSwapchainGrallocUsageANDROID", "VK_ANDROID_native_buffer");
-    }
-    AEMU_SCOPED_TRACE("vkGetSwapchainGrallocUsageANDROID");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetSwapchainGrallocUsageANDROID_VkResult_return = (VkResult)0;
-    vkGetSwapchainGrallocUsageANDROID_VkResult_return = vkEnc->vkGetSwapchainGrallocUsageANDROID(device, format, imageUsage, grallocUsage);
-    return vkGetSwapchainGrallocUsageANDROID_VkResult_return;
-}
-static VkResult entry_vkAcquireImageANDROID(
-    VkDevice device,
-    VkImage image,
-    int nativeFenceFd,
-    VkSemaphore semaphore,
-    VkFence fence)
-{
-    AEMU_SCOPED_TRACE("vkAcquireImageANDROID");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkAcquireImageANDROID_VkResult_return = (VkResult)0;
-    vkAcquireImageANDROID_VkResult_return = vkEnc->vkAcquireImageANDROID(device, image, nativeFenceFd, semaphore, fence);
-    return vkAcquireImageANDROID_VkResult_return;
-}
-static VkResult dynCheck_entry_vkAcquireImageANDROID(
-    VkDevice device,
-    VkImage image,
-    int nativeFenceFd,
-    VkSemaphore semaphore,
-    VkFence fence)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_ANDROID_native_buffer"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkAcquireImageANDROID", "VK_ANDROID_native_buffer");
-    }
-    AEMU_SCOPED_TRACE("vkAcquireImageANDROID");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkAcquireImageANDROID_VkResult_return = (VkResult)0;
-    vkAcquireImageANDROID_VkResult_return = vkEnc->vkAcquireImageANDROID(device, image, nativeFenceFd, semaphore, fence);
-    return vkAcquireImageANDROID_VkResult_return;
-}
-static VkResult entry_vkQueueSignalReleaseImageANDROID(
-    VkQueue queue,
-    uint32_t waitSemaphoreCount,
-    const VkSemaphore* pWaitSemaphores,
-    VkImage image,
-    int* pNativeFenceFd)
-{
-    AEMU_SCOPED_TRACE("vkQueueSignalReleaseImageANDROID");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkQueueSignalReleaseImageANDROID_VkResult_return = (VkResult)0;
-    vkQueueSignalReleaseImageANDROID_VkResult_return = vkEnc->vkQueueSignalReleaseImageANDROID(queue, waitSemaphoreCount, pWaitSemaphores, image, pNativeFenceFd);
-    return vkQueueSignalReleaseImageANDROID_VkResult_return;
-}
-#endif
-#ifdef VK_EXT_debug_report
-static VkResult entry_vkCreateDebugReportCallbackEXT(
-    VkInstance instance,
-    const VkDebugReportCallbackCreateInfoEXT* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkDebugReportCallbackEXT* pCallback)
-{
-    AEMU_SCOPED_TRACE("vkCreateDebugReportCallbackEXT");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateDebugReportCallbackEXT_VkResult_return = (VkResult)0;
-    vkCreateDebugReportCallbackEXT_VkResult_return = vkEnc->vkCreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pCallback);
-    return vkCreateDebugReportCallbackEXT_VkResult_return;
-}
-static void entry_vkDestroyDebugReportCallbackEXT(
-    VkInstance instance,
-    VkDebugReportCallbackEXT callback,
-    const VkAllocationCallbacks* pAllocator)
-{
-    AEMU_SCOPED_TRACE("vkDestroyDebugReportCallbackEXT");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkDestroyDebugReportCallbackEXT(instance, callback, pAllocator);
-}
-static void entry_vkDebugReportMessageEXT(
-    VkInstance instance,
-    VkDebugReportFlagsEXT flags,
-    VkDebugReportObjectTypeEXT objectType,
-    uint64_t object,
-    size_t location,
-    int32_t messageCode,
-    const char* pLayerPrefix,
-    const char* pMessage)
-{
-    AEMU_SCOPED_TRACE("vkDebugReportMessageEXT");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkDebugReportMessageEXT(instance, flags, objectType, object, location, messageCode, pLayerPrefix, pMessage);
-}
-#endif
-#ifdef VK_NV_glsl_shader
-#endif
-#ifdef VK_EXT_depth_range_unrestricted
-#endif
-#ifdef VK_IMG_filter_cubic
-#endif
-#ifdef VK_AMD_rasterization_order
-#endif
-#ifdef VK_AMD_shader_trinary_minmax
-#endif
-#ifdef VK_AMD_shader_explicit_vertex_parameter
-#endif
-#ifdef VK_EXT_debug_marker
-static VkResult entry_vkDebugMarkerSetObjectTagEXT(
-    VkDevice device,
-    const VkDebugMarkerObjectTagInfoEXT* pTagInfo)
-{
-    AEMU_SCOPED_TRACE("vkDebugMarkerSetObjectTagEXT");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkDebugMarkerSetObjectTagEXT_VkResult_return = (VkResult)0;
-    vkDebugMarkerSetObjectTagEXT_VkResult_return = vkEnc->vkDebugMarkerSetObjectTagEXT(device, pTagInfo);
-    return vkDebugMarkerSetObjectTagEXT_VkResult_return;
-}
-static VkResult dynCheck_entry_vkDebugMarkerSetObjectTagEXT(
-    VkDevice device,
-    const VkDebugMarkerObjectTagInfoEXT* pTagInfo)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_EXT_debug_marker"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkDebugMarkerSetObjectTagEXT", "VK_EXT_debug_marker");
-    }
-    AEMU_SCOPED_TRACE("vkDebugMarkerSetObjectTagEXT");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkDebugMarkerSetObjectTagEXT_VkResult_return = (VkResult)0;
-    vkDebugMarkerSetObjectTagEXT_VkResult_return = vkEnc->vkDebugMarkerSetObjectTagEXT(device, pTagInfo);
-    return vkDebugMarkerSetObjectTagEXT_VkResult_return;
-}
-static VkResult entry_vkDebugMarkerSetObjectNameEXT(
-    VkDevice device,
-    const VkDebugMarkerObjectNameInfoEXT* pNameInfo)
-{
-    AEMU_SCOPED_TRACE("vkDebugMarkerSetObjectNameEXT");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkDebugMarkerSetObjectNameEXT_VkResult_return = (VkResult)0;
-    vkDebugMarkerSetObjectNameEXT_VkResult_return = vkEnc->vkDebugMarkerSetObjectNameEXT(device, pNameInfo);
-    return vkDebugMarkerSetObjectNameEXT_VkResult_return;
-}
-static VkResult dynCheck_entry_vkDebugMarkerSetObjectNameEXT(
-    VkDevice device,
-    const VkDebugMarkerObjectNameInfoEXT* pNameInfo)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_EXT_debug_marker"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkDebugMarkerSetObjectNameEXT", "VK_EXT_debug_marker");
-    }
-    AEMU_SCOPED_TRACE("vkDebugMarkerSetObjectNameEXT");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkDebugMarkerSetObjectNameEXT_VkResult_return = (VkResult)0;
-    vkDebugMarkerSetObjectNameEXT_VkResult_return = vkEnc->vkDebugMarkerSetObjectNameEXT(device, pNameInfo);
-    return vkDebugMarkerSetObjectNameEXT_VkResult_return;
-}
-static void entry_vkCmdDebugMarkerBeginEXT(
-    VkCommandBuffer commandBuffer,
-    const VkDebugMarkerMarkerInfoEXT* pMarkerInfo)
-{
-    AEMU_SCOPED_TRACE("vkCmdDebugMarkerBeginEXT");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdDebugMarkerBeginEXT(commandBuffer, pMarkerInfo);
-}
-static void entry_vkCmdDebugMarkerEndEXT(
-    VkCommandBuffer commandBuffer)
-{
-    AEMU_SCOPED_TRACE("vkCmdDebugMarkerEndEXT");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdDebugMarkerEndEXT(commandBuffer);
-}
-static void entry_vkCmdDebugMarkerInsertEXT(
-    VkCommandBuffer commandBuffer,
-    const VkDebugMarkerMarkerInfoEXT* pMarkerInfo)
-{
-    AEMU_SCOPED_TRACE("vkCmdDebugMarkerInsertEXT");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdDebugMarkerInsertEXT(commandBuffer, pMarkerInfo);
-}
-#endif
-#ifdef VK_AMD_gcn_shader
-#endif
-#ifdef VK_NV_dedicated_allocation
-#endif
-#ifdef VK_AMD_draw_indirect_count
-static void entry_vkCmdDrawIndirectCountAMD(
-    VkCommandBuffer commandBuffer,
-    VkBuffer buffer,
-    VkDeviceSize offset,
-    VkBuffer countBuffer,
-    VkDeviceSize countBufferOffset,
-    uint32_t maxDrawCount,
-    uint32_t stride)
-{
-    AEMU_SCOPED_TRACE("vkCmdDrawIndirectCountAMD");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdDrawIndirectCountAMD(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride);
-}
-static void entry_vkCmdDrawIndexedIndirectCountAMD(
-    VkCommandBuffer commandBuffer,
-    VkBuffer buffer,
-    VkDeviceSize offset,
-    VkBuffer countBuffer,
-    VkDeviceSize countBufferOffset,
-    uint32_t maxDrawCount,
-    uint32_t stride)
-{
-    AEMU_SCOPED_TRACE("vkCmdDrawIndexedIndirectCountAMD");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdDrawIndexedIndirectCountAMD(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride);
-}
-#endif
-#ifdef VK_AMD_negative_viewport_height
-#endif
-#ifdef VK_AMD_gpu_shader_half_float
-#endif
-#ifdef VK_AMD_shader_ballot
-#endif
-#ifdef VK_AMD_texture_gather_bias_lod
-#endif
-#ifdef VK_AMD_shader_info
-static VkResult entry_vkGetShaderInfoAMD(
-    VkDevice device,
-    VkPipeline pipeline,
-    VkShaderStageFlagBits shaderStage,
-    VkShaderInfoTypeAMD infoType,
-    size_t* pInfoSize,
-    void* pInfo)
-{
-    AEMU_SCOPED_TRACE("vkGetShaderInfoAMD");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetShaderInfoAMD_VkResult_return = (VkResult)0;
-    vkGetShaderInfoAMD_VkResult_return = vkEnc->vkGetShaderInfoAMD(device, pipeline, shaderStage, infoType, pInfoSize, pInfo);
-    return vkGetShaderInfoAMD_VkResult_return;
-}
-static VkResult dynCheck_entry_vkGetShaderInfoAMD(
-    VkDevice device,
-    VkPipeline pipeline,
-    VkShaderStageFlagBits shaderStage,
-    VkShaderInfoTypeAMD infoType,
-    size_t* pInfoSize,
-    void* pInfo)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_AMD_shader_info"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkGetShaderInfoAMD", "VK_AMD_shader_info");
-    }
-    AEMU_SCOPED_TRACE("vkGetShaderInfoAMD");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetShaderInfoAMD_VkResult_return = (VkResult)0;
-    vkGetShaderInfoAMD_VkResult_return = vkEnc->vkGetShaderInfoAMD(device, pipeline, shaderStage, infoType, pInfoSize, pInfo);
-    return vkGetShaderInfoAMD_VkResult_return;
-}
-#endif
-#ifdef VK_AMD_shader_image_load_store_lod
-#endif
-#ifdef VK_IMG_format_pvrtc
-#endif
-#ifdef VK_NV_external_memory_capabilities
-static VkResult entry_vkGetPhysicalDeviceExternalImageFormatPropertiesNV(
-    VkPhysicalDevice physicalDevice,
-    VkFormat format,
-    VkImageType type,
-    VkImageTiling tiling,
-    VkImageUsageFlags usage,
-    VkImageCreateFlags flags,
-    VkExternalMemoryHandleTypeFlagsNV externalHandleType,
-    VkExternalImageFormatPropertiesNV* pExternalImageFormatProperties)
-{
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceExternalImageFormatPropertiesNV");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetPhysicalDeviceExternalImageFormatPropertiesNV_VkResult_return = (VkResult)0;
-    vkGetPhysicalDeviceExternalImageFormatPropertiesNV_VkResult_return = vkEnc->vkGetPhysicalDeviceExternalImageFormatPropertiesNV(physicalDevice, format, type, tiling, usage, flags, externalHandleType, pExternalImageFormatProperties);
-    return vkGetPhysicalDeviceExternalImageFormatPropertiesNV_VkResult_return;
-}
-#endif
-#ifdef VK_NV_external_memory
-#endif
-#ifdef VK_NV_external_memory_win32
-static VkResult entry_vkGetMemoryWin32HandleNV(
-    VkDevice device,
-    VkDeviceMemory memory,
-    VkExternalMemoryHandleTypeFlagsNV handleType,
-    HANDLE* pHandle)
-{
-    AEMU_SCOPED_TRACE("vkGetMemoryWin32HandleNV");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetMemoryWin32HandleNV_VkResult_return = (VkResult)0;
-    vkGetMemoryWin32HandleNV_VkResult_return = vkEnc->vkGetMemoryWin32HandleNV(device, memory, handleType, pHandle);
-    return vkGetMemoryWin32HandleNV_VkResult_return;
-}
-static VkResult dynCheck_entry_vkGetMemoryWin32HandleNV(
-    VkDevice device,
-    VkDeviceMemory memory,
-    VkExternalMemoryHandleTypeFlagsNV handleType,
-    HANDLE* pHandle)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_NV_external_memory_win32"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkGetMemoryWin32HandleNV", "VK_NV_external_memory_win32");
-    }
-    AEMU_SCOPED_TRACE("vkGetMemoryWin32HandleNV");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetMemoryWin32HandleNV_VkResult_return = (VkResult)0;
-    vkGetMemoryWin32HandleNV_VkResult_return = vkEnc->vkGetMemoryWin32HandleNV(device, memory, handleType, pHandle);
-    return vkGetMemoryWin32HandleNV_VkResult_return;
-}
-#endif
-#ifdef VK_NV_win32_keyed_mutex
-#endif
-#ifdef VK_EXT_validation_flags
-#endif
-#ifdef VK_NN_vi_surface
-static VkResult entry_vkCreateViSurfaceNN(
-    VkInstance instance,
-    const VkViSurfaceCreateInfoNN* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkSurfaceKHR* pSurface)
-{
-    AEMU_SCOPED_TRACE("vkCreateViSurfaceNN");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateViSurfaceNN_VkResult_return = (VkResult)0;
-    vkCreateViSurfaceNN_VkResult_return = vkEnc->vkCreateViSurfaceNN(instance, pCreateInfo, pAllocator, pSurface);
-    return vkCreateViSurfaceNN_VkResult_return;
-}
-#endif
-#ifdef VK_EXT_shader_subgroup_ballot
-#endif
-#ifdef VK_EXT_shader_subgroup_vote
-#endif
-#ifdef VK_EXT_conditional_rendering
-static void entry_vkCmdBeginConditionalRenderingEXT(
-    VkCommandBuffer commandBuffer,
-    const VkConditionalRenderingBeginInfoEXT* pConditionalRenderingBegin)
-{
-    AEMU_SCOPED_TRACE("vkCmdBeginConditionalRenderingEXT");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdBeginConditionalRenderingEXT(commandBuffer, pConditionalRenderingBegin);
-}
-static void entry_vkCmdEndConditionalRenderingEXT(
-    VkCommandBuffer commandBuffer)
-{
-    AEMU_SCOPED_TRACE("vkCmdEndConditionalRenderingEXT");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdEndConditionalRenderingEXT(commandBuffer);
-}
-#endif
-#ifdef VK_NVX_device_generated_commands
-static void entry_vkCmdProcessCommandsNVX(
-    VkCommandBuffer commandBuffer,
-    const VkCmdProcessCommandsInfoNVX* pProcessCommandsInfo)
-{
-    AEMU_SCOPED_TRACE("vkCmdProcessCommandsNVX");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdProcessCommandsNVX(commandBuffer, pProcessCommandsInfo);
-}
-static void entry_vkCmdReserveSpaceForCommandsNVX(
-    VkCommandBuffer commandBuffer,
-    const VkCmdReserveSpaceForCommandsInfoNVX* pReserveSpaceInfo)
-{
-    AEMU_SCOPED_TRACE("vkCmdReserveSpaceForCommandsNVX");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdReserveSpaceForCommandsNVX(commandBuffer, pReserveSpaceInfo);
-}
-static VkResult entry_vkCreateIndirectCommandsLayoutNVX(
-    VkDevice device,
-    const VkIndirectCommandsLayoutCreateInfoNVX* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkIndirectCommandsLayoutNVX* pIndirectCommandsLayout)
-{
-    AEMU_SCOPED_TRACE("vkCreateIndirectCommandsLayoutNVX");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateIndirectCommandsLayoutNVX_VkResult_return = (VkResult)0;
-    vkCreateIndirectCommandsLayoutNVX_VkResult_return = vkEnc->vkCreateIndirectCommandsLayoutNVX(device, pCreateInfo, pAllocator, pIndirectCommandsLayout);
-    return vkCreateIndirectCommandsLayoutNVX_VkResult_return;
-}
-static VkResult dynCheck_entry_vkCreateIndirectCommandsLayoutNVX(
-    VkDevice device,
-    const VkIndirectCommandsLayoutCreateInfoNVX* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkIndirectCommandsLayoutNVX* pIndirectCommandsLayout)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_NVX_device_generated_commands"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkCreateIndirectCommandsLayoutNVX", "VK_NVX_device_generated_commands");
-    }
-    AEMU_SCOPED_TRACE("vkCreateIndirectCommandsLayoutNVX");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateIndirectCommandsLayoutNVX_VkResult_return = (VkResult)0;
-    vkCreateIndirectCommandsLayoutNVX_VkResult_return = vkEnc->vkCreateIndirectCommandsLayoutNVX(device, pCreateInfo, pAllocator, pIndirectCommandsLayout);
-    return vkCreateIndirectCommandsLayoutNVX_VkResult_return;
-}
-static void entry_vkDestroyIndirectCommandsLayoutNVX(
-    VkDevice device,
-    VkIndirectCommandsLayoutNVX indirectCommandsLayout,
-    const VkAllocationCallbacks* pAllocator)
-{
-    AEMU_SCOPED_TRACE("vkDestroyIndirectCommandsLayoutNVX");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkDestroyIndirectCommandsLayoutNVX(device, indirectCommandsLayout, pAllocator);
-}
-static void dynCheck_entry_vkDestroyIndirectCommandsLayoutNVX(
-    VkDevice device,
-    VkIndirectCommandsLayoutNVX indirectCommandsLayout,
-    const VkAllocationCallbacks* pAllocator)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_NVX_device_generated_commands"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkDestroyIndirectCommandsLayoutNVX", "VK_NVX_device_generated_commands");
-    }
-    AEMU_SCOPED_TRACE("vkDestroyIndirectCommandsLayoutNVX");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkDestroyIndirectCommandsLayoutNVX(device, indirectCommandsLayout, pAllocator);
-}
-static VkResult entry_vkCreateObjectTableNVX(
-    VkDevice device,
-    const VkObjectTableCreateInfoNVX* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkObjectTableNVX* pObjectTable)
-{
-    AEMU_SCOPED_TRACE("vkCreateObjectTableNVX");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateObjectTableNVX_VkResult_return = (VkResult)0;
-    vkCreateObjectTableNVX_VkResult_return = vkEnc->vkCreateObjectTableNVX(device, pCreateInfo, pAllocator, pObjectTable);
-    return vkCreateObjectTableNVX_VkResult_return;
-}
-static VkResult dynCheck_entry_vkCreateObjectTableNVX(
-    VkDevice device,
-    const VkObjectTableCreateInfoNVX* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkObjectTableNVX* pObjectTable)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_NVX_device_generated_commands"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkCreateObjectTableNVX", "VK_NVX_device_generated_commands");
-    }
-    AEMU_SCOPED_TRACE("vkCreateObjectTableNVX");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateObjectTableNVX_VkResult_return = (VkResult)0;
-    vkCreateObjectTableNVX_VkResult_return = vkEnc->vkCreateObjectTableNVX(device, pCreateInfo, pAllocator, pObjectTable);
-    return vkCreateObjectTableNVX_VkResult_return;
-}
-static void entry_vkDestroyObjectTableNVX(
-    VkDevice device,
-    VkObjectTableNVX objectTable,
-    const VkAllocationCallbacks* pAllocator)
-{
-    AEMU_SCOPED_TRACE("vkDestroyObjectTableNVX");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkDestroyObjectTableNVX(device, objectTable, pAllocator);
-}
-static void dynCheck_entry_vkDestroyObjectTableNVX(
-    VkDevice device,
-    VkObjectTableNVX objectTable,
-    const VkAllocationCallbacks* pAllocator)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_NVX_device_generated_commands"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkDestroyObjectTableNVX", "VK_NVX_device_generated_commands");
-    }
-    AEMU_SCOPED_TRACE("vkDestroyObjectTableNVX");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkDestroyObjectTableNVX(device, objectTable, pAllocator);
-}
-static VkResult entry_vkRegisterObjectsNVX(
-    VkDevice device,
-    VkObjectTableNVX objectTable,
-    uint32_t objectCount,
-    const VkObjectTableEntryNVX* const* ppObjectTableEntries,
-    const uint32_t* pObjectIndices)
-{
-    AEMU_SCOPED_TRACE("vkRegisterObjectsNVX");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkRegisterObjectsNVX_VkResult_return = (VkResult)0;
-    vkRegisterObjectsNVX_VkResult_return = vkEnc->vkRegisterObjectsNVX(device, objectTable, objectCount, ppObjectTableEntries, pObjectIndices);
-    return vkRegisterObjectsNVX_VkResult_return;
-}
-static VkResult dynCheck_entry_vkRegisterObjectsNVX(
-    VkDevice device,
-    VkObjectTableNVX objectTable,
-    uint32_t objectCount,
-    const VkObjectTableEntryNVX* const* ppObjectTableEntries,
-    const uint32_t* pObjectIndices)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_NVX_device_generated_commands"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkRegisterObjectsNVX", "VK_NVX_device_generated_commands");
-    }
-    AEMU_SCOPED_TRACE("vkRegisterObjectsNVX");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkRegisterObjectsNVX_VkResult_return = (VkResult)0;
-    vkRegisterObjectsNVX_VkResult_return = vkEnc->vkRegisterObjectsNVX(device, objectTable, objectCount, ppObjectTableEntries, pObjectIndices);
-    return vkRegisterObjectsNVX_VkResult_return;
-}
-static VkResult entry_vkUnregisterObjectsNVX(
-    VkDevice device,
-    VkObjectTableNVX objectTable,
-    uint32_t objectCount,
-    const VkObjectEntryTypeNVX* pObjectEntryTypes,
-    const uint32_t* pObjectIndices)
-{
-    AEMU_SCOPED_TRACE("vkUnregisterObjectsNVX");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkUnregisterObjectsNVX_VkResult_return = (VkResult)0;
-    vkUnregisterObjectsNVX_VkResult_return = vkEnc->vkUnregisterObjectsNVX(device, objectTable, objectCount, pObjectEntryTypes, pObjectIndices);
-    return vkUnregisterObjectsNVX_VkResult_return;
-}
-static VkResult dynCheck_entry_vkUnregisterObjectsNVX(
-    VkDevice device,
-    VkObjectTableNVX objectTable,
-    uint32_t objectCount,
-    const VkObjectEntryTypeNVX* pObjectEntryTypes,
-    const uint32_t* pObjectIndices)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_NVX_device_generated_commands"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkUnregisterObjectsNVX", "VK_NVX_device_generated_commands");
-    }
-    AEMU_SCOPED_TRACE("vkUnregisterObjectsNVX");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkUnregisterObjectsNVX_VkResult_return = (VkResult)0;
-    vkUnregisterObjectsNVX_VkResult_return = vkEnc->vkUnregisterObjectsNVX(device, objectTable, objectCount, pObjectEntryTypes, pObjectIndices);
-    return vkUnregisterObjectsNVX_VkResult_return;
-}
-static void entry_vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX(
-    VkPhysicalDevice physicalDevice,
-    VkDeviceGeneratedCommandsFeaturesNVX* pFeatures,
-    VkDeviceGeneratedCommandsLimitsNVX* pLimits)
-{
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX(physicalDevice, pFeatures, pLimits);
-}
-#endif
-#ifdef VK_NV_clip_space_w_scaling
-static void entry_vkCmdSetViewportWScalingNV(
-    VkCommandBuffer commandBuffer,
-    uint32_t firstViewport,
-    uint32_t viewportCount,
-    const VkViewportWScalingNV* pViewportWScalings)
-{
-    AEMU_SCOPED_TRACE("vkCmdSetViewportWScalingNV");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdSetViewportWScalingNV(commandBuffer, firstViewport, viewportCount, pViewportWScalings);
-}
-#endif
-#ifdef VK_EXT_direct_mode_display
-static VkResult entry_vkReleaseDisplayEXT(
-    VkPhysicalDevice physicalDevice,
-    VkDisplayKHR display)
-{
-    AEMU_SCOPED_TRACE("vkReleaseDisplayEXT");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkReleaseDisplayEXT_VkResult_return = (VkResult)0;
-    vkReleaseDisplayEXT_VkResult_return = vkEnc->vkReleaseDisplayEXT(physicalDevice, display);
-    return vkReleaseDisplayEXT_VkResult_return;
-}
-#endif
-#ifdef VK_EXT_acquire_xlib_display
-static VkResult entry_vkAcquireXlibDisplayEXT(
-    VkPhysicalDevice physicalDevice,
-    Display* dpy,
-    VkDisplayKHR display)
-{
-    AEMU_SCOPED_TRACE("vkAcquireXlibDisplayEXT");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkAcquireXlibDisplayEXT_VkResult_return = (VkResult)0;
-    vkAcquireXlibDisplayEXT_VkResult_return = vkEnc->vkAcquireXlibDisplayEXT(physicalDevice, dpy, display);
-    return vkAcquireXlibDisplayEXT_VkResult_return;
-}
-static VkResult entry_vkGetRandROutputDisplayEXT(
-    VkPhysicalDevice physicalDevice,
-    Display* dpy,
-    RROutput rrOutput,
-    VkDisplayKHR* pDisplay)
-{
-    AEMU_SCOPED_TRACE("vkGetRandROutputDisplayEXT");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetRandROutputDisplayEXT_VkResult_return = (VkResult)0;
-    vkGetRandROutputDisplayEXT_VkResult_return = vkEnc->vkGetRandROutputDisplayEXT(physicalDevice, dpy, rrOutput, pDisplay);
-    return vkGetRandROutputDisplayEXT_VkResult_return;
-}
-#endif
-#ifdef VK_EXT_display_surface_counter
-static VkResult entry_vkGetPhysicalDeviceSurfaceCapabilities2EXT(
-    VkPhysicalDevice physicalDevice,
-    VkSurfaceKHR surface,
-    VkSurfaceCapabilities2EXT* pSurfaceCapabilities)
-{
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceSurfaceCapabilities2EXT");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetPhysicalDeviceSurfaceCapabilities2EXT_VkResult_return = (VkResult)0;
-    vkGetPhysicalDeviceSurfaceCapabilities2EXT_VkResult_return = vkEnc->vkGetPhysicalDeviceSurfaceCapabilities2EXT(physicalDevice, surface, pSurfaceCapabilities);
-    return vkGetPhysicalDeviceSurfaceCapabilities2EXT_VkResult_return;
-}
-#endif
-#ifdef VK_EXT_display_control
-static VkResult entry_vkDisplayPowerControlEXT(
-    VkDevice device,
-    VkDisplayKHR display,
-    const VkDisplayPowerInfoEXT* pDisplayPowerInfo)
-{
-    AEMU_SCOPED_TRACE("vkDisplayPowerControlEXT");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkDisplayPowerControlEXT_VkResult_return = (VkResult)0;
-    vkDisplayPowerControlEXT_VkResult_return = vkEnc->vkDisplayPowerControlEXT(device, display, pDisplayPowerInfo);
-    return vkDisplayPowerControlEXT_VkResult_return;
-}
-static VkResult dynCheck_entry_vkDisplayPowerControlEXT(
-    VkDevice device,
-    VkDisplayKHR display,
-    const VkDisplayPowerInfoEXT* pDisplayPowerInfo)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_EXT_display_control"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkDisplayPowerControlEXT", "VK_EXT_display_control");
-    }
-    AEMU_SCOPED_TRACE("vkDisplayPowerControlEXT");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkDisplayPowerControlEXT_VkResult_return = (VkResult)0;
-    vkDisplayPowerControlEXT_VkResult_return = vkEnc->vkDisplayPowerControlEXT(device, display, pDisplayPowerInfo);
-    return vkDisplayPowerControlEXT_VkResult_return;
-}
-static VkResult entry_vkRegisterDeviceEventEXT(
-    VkDevice device,
-    const VkDeviceEventInfoEXT* pDeviceEventInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkFence* pFence)
-{
-    AEMU_SCOPED_TRACE("vkRegisterDeviceEventEXT");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkRegisterDeviceEventEXT_VkResult_return = (VkResult)0;
-    vkRegisterDeviceEventEXT_VkResult_return = vkEnc->vkRegisterDeviceEventEXT(device, pDeviceEventInfo, pAllocator, pFence);
-    return vkRegisterDeviceEventEXT_VkResult_return;
-}
-static VkResult dynCheck_entry_vkRegisterDeviceEventEXT(
-    VkDevice device,
-    const VkDeviceEventInfoEXT* pDeviceEventInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkFence* pFence)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_EXT_display_control"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkRegisterDeviceEventEXT", "VK_EXT_display_control");
-    }
-    AEMU_SCOPED_TRACE("vkRegisterDeviceEventEXT");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkRegisterDeviceEventEXT_VkResult_return = (VkResult)0;
-    vkRegisterDeviceEventEXT_VkResult_return = vkEnc->vkRegisterDeviceEventEXT(device, pDeviceEventInfo, pAllocator, pFence);
-    return vkRegisterDeviceEventEXT_VkResult_return;
-}
-static VkResult entry_vkRegisterDisplayEventEXT(
-    VkDevice device,
-    VkDisplayKHR display,
-    const VkDisplayEventInfoEXT* pDisplayEventInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkFence* pFence)
-{
-    AEMU_SCOPED_TRACE("vkRegisterDisplayEventEXT");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkRegisterDisplayEventEXT_VkResult_return = (VkResult)0;
-    vkRegisterDisplayEventEXT_VkResult_return = vkEnc->vkRegisterDisplayEventEXT(device, display, pDisplayEventInfo, pAllocator, pFence);
-    return vkRegisterDisplayEventEXT_VkResult_return;
-}
-static VkResult dynCheck_entry_vkRegisterDisplayEventEXT(
-    VkDevice device,
-    VkDisplayKHR display,
-    const VkDisplayEventInfoEXT* pDisplayEventInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkFence* pFence)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_EXT_display_control"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkRegisterDisplayEventEXT", "VK_EXT_display_control");
-    }
-    AEMU_SCOPED_TRACE("vkRegisterDisplayEventEXT");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkRegisterDisplayEventEXT_VkResult_return = (VkResult)0;
-    vkRegisterDisplayEventEXT_VkResult_return = vkEnc->vkRegisterDisplayEventEXT(device, display, pDisplayEventInfo, pAllocator, pFence);
-    return vkRegisterDisplayEventEXT_VkResult_return;
-}
-static VkResult entry_vkGetSwapchainCounterEXT(
-    VkDevice device,
-    VkSwapchainKHR swapchain,
-    VkSurfaceCounterFlagBitsEXT counter,
-    uint64_t* pCounterValue)
-{
-    AEMU_SCOPED_TRACE("vkGetSwapchainCounterEXT");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetSwapchainCounterEXT_VkResult_return = (VkResult)0;
-    vkGetSwapchainCounterEXT_VkResult_return = vkEnc->vkGetSwapchainCounterEXT(device, swapchain, counter, pCounterValue);
-    return vkGetSwapchainCounterEXT_VkResult_return;
-}
-static VkResult dynCheck_entry_vkGetSwapchainCounterEXT(
-    VkDevice device,
-    VkSwapchainKHR swapchain,
-    VkSurfaceCounterFlagBitsEXT counter,
-    uint64_t* pCounterValue)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_EXT_display_control"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkGetSwapchainCounterEXT", "VK_EXT_display_control");
-    }
-    AEMU_SCOPED_TRACE("vkGetSwapchainCounterEXT");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetSwapchainCounterEXT_VkResult_return = (VkResult)0;
-    vkGetSwapchainCounterEXT_VkResult_return = vkEnc->vkGetSwapchainCounterEXT(device, swapchain, counter, pCounterValue);
-    return vkGetSwapchainCounterEXT_VkResult_return;
-}
-#endif
-#ifdef VK_GOOGLE_display_timing
-static VkResult entry_vkGetRefreshCycleDurationGOOGLE(
-    VkDevice device,
-    VkSwapchainKHR swapchain,
-    VkRefreshCycleDurationGOOGLE* pDisplayTimingProperties)
-{
-    AEMU_SCOPED_TRACE("vkGetRefreshCycleDurationGOOGLE");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetRefreshCycleDurationGOOGLE_VkResult_return = (VkResult)0;
-    vkGetRefreshCycleDurationGOOGLE_VkResult_return = vkEnc->vkGetRefreshCycleDurationGOOGLE(device, swapchain, pDisplayTimingProperties);
-    return vkGetRefreshCycleDurationGOOGLE_VkResult_return;
-}
-static VkResult dynCheck_entry_vkGetRefreshCycleDurationGOOGLE(
-    VkDevice device,
-    VkSwapchainKHR swapchain,
-    VkRefreshCycleDurationGOOGLE* pDisplayTimingProperties)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_GOOGLE_display_timing"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkGetRefreshCycleDurationGOOGLE", "VK_GOOGLE_display_timing");
-    }
-    AEMU_SCOPED_TRACE("vkGetRefreshCycleDurationGOOGLE");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetRefreshCycleDurationGOOGLE_VkResult_return = (VkResult)0;
-    vkGetRefreshCycleDurationGOOGLE_VkResult_return = vkEnc->vkGetRefreshCycleDurationGOOGLE(device, swapchain, pDisplayTimingProperties);
-    return vkGetRefreshCycleDurationGOOGLE_VkResult_return;
-}
-static VkResult entry_vkGetPastPresentationTimingGOOGLE(
-    VkDevice device,
-    VkSwapchainKHR swapchain,
-    uint32_t* pPresentationTimingCount,
-    VkPastPresentationTimingGOOGLE* pPresentationTimings)
-{
-    AEMU_SCOPED_TRACE("vkGetPastPresentationTimingGOOGLE");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetPastPresentationTimingGOOGLE_VkResult_return = (VkResult)0;
-    vkGetPastPresentationTimingGOOGLE_VkResult_return = vkEnc->vkGetPastPresentationTimingGOOGLE(device, swapchain, pPresentationTimingCount, pPresentationTimings);
-    return vkGetPastPresentationTimingGOOGLE_VkResult_return;
-}
-static VkResult dynCheck_entry_vkGetPastPresentationTimingGOOGLE(
-    VkDevice device,
-    VkSwapchainKHR swapchain,
-    uint32_t* pPresentationTimingCount,
-    VkPastPresentationTimingGOOGLE* pPresentationTimings)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_GOOGLE_display_timing"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkGetPastPresentationTimingGOOGLE", "VK_GOOGLE_display_timing");
-    }
-    AEMU_SCOPED_TRACE("vkGetPastPresentationTimingGOOGLE");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetPastPresentationTimingGOOGLE_VkResult_return = (VkResult)0;
-    vkGetPastPresentationTimingGOOGLE_VkResult_return = vkEnc->vkGetPastPresentationTimingGOOGLE(device, swapchain, pPresentationTimingCount, pPresentationTimings);
-    return vkGetPastPresentationTimingGOOGLE_VkResult_return;
-}
-#endif
-#ifdef VK_NV_sample_mask_override_coverage
-#endif
-#ifdef VK_NV_geometry_shader_passthrough
-#endif
-#ifdef VK_NV_viewport_array2
-#endif
-#ifdef VK_NVX_multiview_per_view_attributes
-#endif
-#ifdef VK_NV_viewport_swizzle
-#endif
-#ifdef VK_EXT_discard_rectangles
-static void entry_vkCmdSetDiscardRectangleEXT(
-    VkCommandBuffer commandBuffer,
-    uint32_t firstDiscardRectangle,
-    uint32_t discardRectangleCount,
-    const VkRect2D* pDiscardRectangles)
-{
-    AEMU_SCOPED_TRACE("vkCmdSetDiscardRectangleEXT");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdSetDiscardRectangleEXT(commandBuffer, firstDiscardRectangle, discardRectangleCount, pDiscardRectangles);
-}
-#endif
-#ifdef VK_EXT_conservative_rasterization
-#endif
-#ifdef VK_EXT_swapchain_colorspace
-#endif
-#ifdef VK_EXT_hdr_metadata
-static void entry_vkSetHdrMetadataEXT(
-    VkDevice device,
-    uint32_t swapchainCount,
-    const VkSwapchainKHR* pSwapchains,
-    const VkHdrMetadataEXT* pMetadata)
-{
-    AEMU_SCOPED_TRACE("vkSetHdrMetadataEXT");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkSetHdrMetadataEXT(device, swapchainCount, pSwapchains, pMetadata);
-}
-static void dynCheck_entry_vkSetHdrMetadataEXT(
-    VkDevice device,
-    uint32_t swapchainCount,
-    const VkSwapchainKHR* pSwapchains,
-    const VkHdrMetadataEXT* pMetadata)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_EXT_hdr_metadata"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkSetHdrMetadataEXT", "VK_EXT_hdr_metadata");
-    }
-    AEMU_SCOPED_TRACE("vkSetHdrMetadataEXT");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkSetHdrMetadataEXT(device, swapchainCount, pSwapchains, pMetadata);
-}
-#endif
-#ifdef VK_MVK_ios_surface
-static VkResult entry_vkCreateIOSSurfaceMVK(
-    VkInstance instance,
-    const VkIOSSurfaceCreateInfoMVK* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkSurfaceKHR* pSurface)
-{
-    AEMU_SCOPED_TRACE("vkCreateIOSSurfaceMVK");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateIOSSurfaceMVK_VkResult_return = (VkResult)0;
-    vkCreateIOSSurfaceMVK_VkResult_return = vkEnc->vkCreateIOSSurfaceMVK(instance, pCreateInfo, pAllocator, pSurface);
-    return vkCreateIOSSurfaceMVK_VkResult_return;
-}
-#endif
-#ifdef VK_MVK_macos_surface
-static VkResult entry_vkCreateMacOSSurfaceMVK(
-    VkInstance instance,
-    const VkMacOSSurfaceCreateInfoMVK* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkSurfaceKHR* pSurface)
-{
-    AEMU_SCOPED_TRACE("vkCreateMacOSSurfaceMVK");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateMacOSSurfaceMVK_VkResult_return = (VkResult)0;
-    vkCreateMacOSSurfaceMVK_VkResult_return = vkEnc->vkCreateMacOSSurfaceMVK(instance, pCreateInfo, pAllocator, pSurface);
-    return vkCreateMacOSSurfaceMVK_VkResult_return;
-}
-#endif
-#ifdef VK_EXT_external_memory_dma_buf
-#endif
-#ifdef VK_EXT_queue_family_foreign
-#endif
-#ifdef VK_EXT_debug_utils
-static VkResult entry_vkSetDebugUtilsObjectNameEXT(
-    VkDevice device,
-    const VkDebugUtilsObjectNameInfoEXT* pNameInfo)
-{
-    AEMU_SCOPED_TRACE("vkSetDebugUtilsObjectNameEXT");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkSetDebugUtilsObjectNameEXT_VkResult_return = (VkResult)0;
-    vkSetDebugUtilsObjectNameEXT_VkResult_return = vkEnc->vkSetDebugUtilsObjectNameEXT(device, pNameInfo);
-    return vkSetDebugUtilsObjectNameEXT_VkResult_return;
-}
-static VkResult dynCheck_entry_vkSetDebugUtilsObjectNameEXT(
-    VkDevice device,
-    const VkDebugUtilsObjectNameInfoEXT* pNameInfo)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_EXT_debug_utils"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkSetDebugUtilsObjectNameEXT", "VK_EXT_debug_utils");
-    }
-    AEMU_SCOPED_TRACE("vkSetDebugUtilsObjectNameEXT");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkSetDebugUtilsObjectNameEXT_VkResult_return = (VkResult)0;
-    vkSetDebugUtilsObjectNameEXT_VkResult_return = vkEnc->vkSetDebugUtilsObjectNameEXT(device, pNameInfo);
-    return vkSetDebugUtilsObjectNameEXT_VkResult_return;
-}
-static VkResult entry_vkSetDebugUtilsObjectTagEXT(
-    VkDevice device,
-    const VkDebugUtilsObjectTagInfoEXT* pTagInfo)
-{
-    AEMU_SCOPED_TRACE("vkSetDebugUtilsObjectTagEXT");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkSetDebugUtilsObjectTagEXT_VkResult_return = (VkResult)0;
-    vkSetDebugUtilsObjectTagEXT_VkResult_return = vkEnc->vkSetDebugUtilsObjectTagEXT(device, pTagInfo);
-    return vkSetDebugUtilsObjectTagEXT_VkResult_return;
-}
-static VkResult dynCheck_entry_vkSetDebugUtilsObjectTagEXT(
-    VkDevice device,
-    const VkDebugUtilsObjectTagInfoEXT* pTagInfo)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_EXT_debug_utils"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkSetDebugUtilsObjectTagEXT", "VK_EXT_debug_utils");
-    }
-    AEMU_SCOPED_TRACE("vkSetDebugUtilsObjectTagEXT");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkSetDebugUtilsObjectTagEXT_VkResult_return = (VkResult)0;
-    vkSetDebugUtilsObjectTagEXT_VkResult_return = vkEnc->vkSetDebugUtilsObjectTagEXT(device, pTagInfo);
-    return vkSetDebugUtilsObjectTagEXT_VkResult_return;
-}
-static void entry_vkQueueBeginDebugUtilsLabelEXT(
-    VkQueue queue,
-    const VkDebugUtilsLabelEXT* pLabelInfo)
-{
-    AEMU_SCOPED_TRACE("vkQueueBeginDebugUtilsLabelEXT");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkQueueBeginDebugUtilsLabelEXT(queue, pLabelInfo);
-}
-static void entry_vkQueueEndDebugUtilsLabelEXT(
-    VkQueue queue)
-{
-    AEMU_SCOPED_TRACE("vkQueueEndDebugUtilsLabelEXT");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkQueueEndDebugUtilsLabelEXT(queue);
-}
-static void entry_vkQueueInsertDebugUtilsLabelEXT(
-    VkQueue queue,
-    const VkDebugUtilsLabelEXT* pLabelInfo)
-{
-    AEMU_SCOPED_TRACE("vkQueueInsertDebugUtilsLabelEXT");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkQueueInsertDebugUtilsLabelEXT(queue, pLabelInfo);
-}
-static void entry_vkCmdBeginDebugUtilsLabelEXT(
-    VkCommandBuffer commandBuffer,
-    const VkDebugUtilsLabelEXT* pLabelInfo)
-{
-    AEMU_SCOPED_TRACE("vkCmdBeginDebugUtilsLabelEXT");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdBeginDebugUtilsLabelEXT(commandBuffer, pLabelInfo);
-}
-static void entry_vkCmdEndDebugUtilsLabelEXT(
-    VkCommandBuffer commandBuffer)
-{
-    AEMU_SCOPED_TRACE("vkCmdEndDebugUtilsLabelEXT");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdEndDebugUtilsLabelEXT(commandBuffer);
-}
-static void entry_vkCmdInsertDebugUtilsLabelEXT(
-    VkCommandBuffer commandBuffer,
-    const VkDebugUtilsLabelEXT* pLabelInfo)
-{
-    AEMU_SCOPED_TRACE("vkCmdInsertDebugUtilsLabelEXT");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdInsertDebugUtilsLabelEXT(commandBuffer, pLabelInfo);
-}
-static VkResult entry_vkCreateDebugUtilsMessengerEXT(
-    VkInstance instance,
-    const VkDebugUtilsMessengerCreateInfoEXT* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkDebugUtilsMessengerEXT* pMessenger)
-{
-    AEMU_SCOPED_TRACE("vkCreateDebugUtilsMessengerEXT");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateDebugUtilsMessengerEXT_VkResult_return = (VkResult)0;
-    vkCreateDebugUtilsMessengerEXT_VkResult_return = vkEnc->vkCreateDebugUtilsMessengerEXT(instance, pCreateInfo, pAllocator, pMessenger);
-    return vkCreateDebugUtilsMessengerEXT_VkResult_return;
-}
-static void entry_vkDestroyDebugUtilsMessengerEXT(
-    VkInstance instance,
-    VkDebugUtilsMessengerEXT messenger,
-    const VkAllocationCallbacks* pAllocator)
-{
-    AEMU_SCOPED_TRACE("vkDestroyDebugUtilsMessengerEXT");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkDestroyDebugUtilsMessengerEXT(instance, messenger, pAllocator);
-}
-static void entry_vkSubmitDebugUtilsMessageEXT(
-    VkInstance instance,
-    VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
-    VkDebugUtilsMessageTypeFlagsEXT messageTypes,
-    const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData)
-{
-    AEMU_SCOPED_TRACE("vkSubmitDebugUtilsMessageEXT");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkSubmitDebugUtilsMessageEXT(instance, messageSeverity, messageTypes, pCallbackData);
-}
-#endif
-#ifdef VK_ANDROID_external_memory_android_hardware_buffer
-static VkResult entry_vkGetAndroidHardwareBufferPropertiesANDROID(
-    VkDevice device,
-    const AHardwareBuffer* buffer,
-    VkAndroidHardwareBufferPropertiesANDROID* pProperties)
-{
-    AEMU_SCOPED_TRACE("vkGetAndroidHardwareBufferPropertiesANDROID");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetAndroidHardwareBufferPropertiesANDROID_VkResult_return = (VkResult)0;
-    auto resources = ResourceTracker::get();
-    vkGetAndroidHardwareBufferPropertiesANDROID_VkResult_return = resources->on_vkGetAndroidHardwareBufferPropertiesANDROID(vkEnc, VK_SUCCESS, device, buffer, pProperties);
-    return vkGetAndroidHardwareBufferPropertiesANDROID_VkResult_return;
-}
-static VkResult dynCheck_entry_vkGetAndroidHardwareBufferPropertiesANDROID(
-    VkDevice device,
-    const AHardwareBuffer* buffer,
-    VkAndroidHardwareBufferPropertiesANDROID* pProperties)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_ANDROID_external_memory_android_hardware_buffer"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkGetAndroidHardwareBufferPropertiesANDROID", "VK_ANDROID_external_memory_android_hardware_buffer");
-    }
-    AEMU_SCOPED_TRACE("vkGetAndroidHardwareBufferPropertiesANDROID");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetAndroidHardwareBufferPropertiesANDROID_VkResult_return = (VkResult)0;
-    vkGetAndroidHardwareBufferPropertiesANDROID_VkResult_return = resources->on_vkGetAndroidHardwareBufferPropertiesANDROID(vkEnc, VK_SUCCESS, device, buffer, pProperties);
-    return vkGetAndroidHardwareBufferPropertiesANDROID_VkResult_return;
-}
-static VkResult entry_vkGetMemoryAndroidHardwareBufferANDROID(
-    VkDevice device,
-    const VkMemoryGetAndroidHardwareBufferInfoANDROID* pInfo,
-    AHardwareBuffer** pBuffer)
-{
-    AEMU_SCOPED_TRACE("vkGetMemoryAndroidHardwareBufferANDROID");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetMemoryAndroidHardwareBufferANDROID_VkResult_return = (VkResult)0;
-    auto resources = ResourceTracker::get();
-    vkGetMemoryAndroidHardwareBufferANDROID_VkResult_return = resources->on_vkGetMemoryAndroidHardwareBufferANDROID(vkEnc, VK_SUCCESS, device, pInfo, pBuffer);
-    return vkGetMemoryAndroidHardwareBufferANDROID_VkResult_return;
-}
-static VkResult dynCheck_entry_vkGetMemoryAndroidHardwareBufferANDROID(
-    VkDevice device,
-    const VkMemoryGetAndroidHardwareBufferInfoANDROID* pInfo,
-    AHardwareBuffer** pBuffer)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_ANDROID_external_memory_android_hardware_buffer"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkGetMemoryAndroidHardwareBufferANDROID", "VK_ANDROID_external_memory_android_hardware_buffer");
-    }
-    AEMU_SCOPED_TRACE("vkGetMemoryAndroidHardwareBufferANDROID");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetMemoryAndroidHardwareBufferANDROID_VkResult_return = (VkResult)0;
-    vkGetMemoryAndroidHardwareBufferANDROID_VkResult_return = resources->on_vkGetMemoryAndroidHardwareBufferANDROID(vkEnc, VK_SUCCESS, device, pInfo, pBuffer);
-    return vkGetMemoryAndroidHardwareBufferANDROID_VkResult_return;
-}
-#endif
-#ifdef VK_EXT_sampler_filter_minmax
-#endif
-#ifdef VK_AMD_gpu_shader_int16
-#endif
-#ifdef VK_AMD_mixed_attachment_samples
-#endif
-#ifdef VK_AMD_shader_fragment_mask
-#endif
-#ifdef VK_EXT_shader_stencil_export
-#endif
-#ifdef VK_EXT_sample_locations
-static void entry_vkCmdSetSampleLocationsEXT(
-    VkCommandBuffer commandBuffer,
-    const VkSampleLocationsInfoEXT* pSampleLocationsInfo)
-{
-    AEMU_SCOPED_TRACE("vkCmdSetSampleLocationsEXT");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdSetSampleLocationsEXT(commandBuffer, pSampleLocationsInfo);
-}
-static void entry_vkGetPhysicalDeviceMultisamplePropertiesEXT(
-    VkPhysicalDevice physicalDevice,
-    VkSampleCountFlagBits samples,
-    VkMultisamplePropertiesEXT* pMultisampleProperties)
-{
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceMultisamplePropertiesEXT");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkGetPhysicalDeviceMultisamplePropertiesEXT(physicalDevice, samples, pMultisampleProperties);
-}
-#endif
-#ifdef VK_EXT_blend_operation_advanced
-#endif
-#ifdef VK_NV_fragment_coverage_to_color
-#endif
-#ifdef VK_NV_framebuffer_mixed_samples
-#endif
-#ifdef VK_NV_fill_rectangle
-#endif
-#ifdef VK_EXT_post_depth_coverage
-#endif
-#ifdef VK_EXT_validation_cache
-static VkResult entry_vkCreateValidationCacheEXT(
-    VkDevice device,
-    const VkValidationCacheCreateInfoEXT* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkValidationCacheEXT* pValidationCache)
-{
-    AEMU_SCOPED_TRACE("vkCreateValidationCacheEXT");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateValidationCacheEXT_VkResult_return = (VkResult)0;
-    vkCreateValidationCacheEXT_VkResult_return = vkEnc->vkCreateValidationCacheEXT(device, pCreateInfo, pAllocator, pValidationCache);
-    return vkCreateValidationCacheEXT_VkResult_return;
-}
-static VkResult dynCheck_entry_vkCreateValidationCacheEXT(
-    VkDevice device,
-    const VkValidationCacheCreateInfoEXT* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkValidationCacheEXT* pValidationCache)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_EXT_validation_cache"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkCreateValidationCacheEXT", "VK_EXT_validation_cache");
-    }
-    AEMU_SCOPED_TRACE("vkCreateValidationCacheEXT");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateValidationCacheEXT_VkResult_return = (VkResult)0;
-    vkCreateValidationCacheEXT_VkResult_return = vkEnc->vkCreateValidationCacheEXT(device, pCreateInfo, pAllocator, pValidationCache);
-    return vkCreateValidationCacheEXT_VkResult_return;
-}
-static void entry_vkDestroyValidationCacheEXT(
-    VkDevice device,
-    VkValidationCacheEXT validationCache,
-    const VkAllocationCallbacks* pAllocator)
-{
-    AEMU_SCOPED_TRACE("vkDestroyValidationCacheEXT");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkDestroyValidationCacheEXT(device, validationCache, pAllocator);
-}
-static void dynCheck_entry_vkDestroyValidationCacheEXT(
-    VkDevice device,
-    VkValidationCacheEXT validationCache,
-    const VkAllocationCallbacks* pAllocator)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_EXT_validation_cache"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkDestroyValidationCacheEXT", "VK_EXT_validation_cache");
-    }
-    AEMU_SCOPED_TRACE("vkDestroyValidationCacheEXT");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkDestroyValidationCacheEXT(device, validationCache, pAllocator);
-}
-static VkResult entry_vkMergeValidationCachesEXT(
-    VkDevice device,
-    VkValidationCacheEXT dstCache,
-    uint32_t srcCacheCount,
-    const VkValidationCacheEXT* pSrcCaches)
-{
-    AEMU_SCOPED_TRACE("vkMergeValidationCachesEXT");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkMergeValidationCachesEXT_VkResult_return = (VkResult)0;
-    vkMergeValidationCachesEXT_VkResult_return = vkEnc->vkMergeValidationCachesEXT(device, dstCache, srcCacheCount, pSrcCaches);
-    return vkMergeValidationCachesEXT_VkResult_return;
-}
-static VkResult dynCheck_entry_vkMergeValidationCachesEXT(
-    VkDevice device,
-    VkValidationCacheEXT dstCache,
-    uint32_t srcCacheCount,
-    const VkValidationCacheEXT* pSrcCaches)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_EXT_validation_cache"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkMergeValidationCachesEXT", "VK_EXT_validation_cache");
-    }
-    AEMU_SCOPED_TRACE("vkMergeValidationCachesEXT");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkMergeValidationCachesEXT_VkResult_return = (VkResult)0;
-    vkMergeValidationCachesEXT_VkResult_return = vkEnc->vkMergeValidationCachesEXT(device, dstCache, srcCacheCount, pSrcCaches);
-    return vkMergeValidationCachesEXT_VkResult_return;
-}
-static VkResult entry_vkGetValidationCacheDataEXT(
-    VkDevice device,
-    VkValidationCacheEXT validationCache,
-    size_t* pDataSize,
-    void* pData)
-{
-    AEMU_SCOPED_TRACE("vkGetValidationCacheDataEXT");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetValidationCacheDataEXT_VkResult_return = (VkResult)0;
-    vkGetValidationCacheDataEXT_VkResult_return = vkEnc->vkGetValidationCacheDataEXT(device, validationCache, pDataSize, pData);
-    return vkGetValidationCacheDataEXT_VkResult_return;
-}
-static VkResult dynCheck_entry_vkGetValidationCacheDataEXT(
-    VkDevice device,
-    VkValidationCacheEXT validationCache,
-    size_t* pDataSize,
-    void* pData)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_EXT_validation_cache"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkGetValidationCacheDataEXT", "VK_EXT_validation_cache");
-    }
-    AEMU_SCOPED_TRACE("vkGetValidationCacheDataEXT");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetValidationCacheDataEXT_VkResult_return = (VkResult)0;
-    vkGetValidationCacheDataEXT_VkResult_return = vkEnc->vkGetValidationCacheDataEXT(device, validationCache, pDataSize, pData);
-    return vkGetValidationCacheDataEXT_VkResult_return;
-}
-#endif
-#ifdef VK_EXT_descriptor_indexing
-#endif
-#ifdef VK_EXT_shader_viewport_index_layer
-#endif
-#ifdef VK_EXT_global_priority
-#endif
-#ifdef VK_EXT_external_memory_host
-static VkResult entry_vkGetMemoryHostPointerPropertiesEXT(
-    VkDevice device,
-    VkExternalMemoryHandleTypeFlagBits handleType,
-    const void* pHostPointer,
-    VkMemoryHostPointerPropertiesEXT* pMemoryHostPointerProperties)
-{
-    AEMU_SCOPED_TRACE("vkGetMemoryHostPointerPropertiesEXT");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetMemoryHostPointerPropertiesEXT_VkResult_return = (VkResult)0;
-    vkGetMemoryHostPointerPropertiesEXT_VkResult_return = vkEnc->vkGetMemoryHostPointerPropertiesEXT(device, handleType, pHostPointer, pMemoryHostPointerProperties);
-    return vkGetMemoryHostPointerPropertiesEXT_VkResult_return;
-}
-static VkResult dynCheck_entry_vkGetMemoryHostPointerPropertiesEXT(
-    VkDevice device,
-    VkExternalMemoryHandleTypeFlagBits handleType,
-    const void* pHostPointer,
-    VkMemoryHostPointerPropertiesEXT* pMemoryHostPointerProperties)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_EXT_external_memory_host"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkGetMemoryHostPointerPropertiesEXT", "VK_EXT_external_memory_host");
-    }
-    AEMU_SCOPED_TRACE("vkGetMemoryHostPointerPropertiesEXT");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetMemoryHostPointerPropertiesEXT_VkResult_return = (VkResult)0;
-    vkGetMemoryHostPointerPropertiesEXT_VkResult_return = vkEnc->vkGetMemoryHostPointerPropertiesEXT(device, handleType, pHostPointer, pMemoryHostPointerProperties);
-    return vkGetMemoryHostPointerPropertiesEXT_VkResult_return;
-}
-#endif
-#ifdef VK_AMD_buffer_marker
-static void entry_vkCmdWriteBufferMarkerAMD(
-    VkCommandBuffer commandBuffer,
-    VkPipelineStageFlagBits pipelineStage,
-    VkBuffer dstBuffer,
-    VkDeviceSize dstOffset,
-    uint32_t marker)
-{
-    AEMU_SCOPED_TRACE("vkCmdWriteBufferMarkerAMD");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdWriteBufferMarkerAMD(commandBuffer, pipelineStage, dstBuffer, dstOffset, marker);
-}
-#endif
-#ifdef VK_AMD_shader_core_properties
-#endif
-#ifdef VK_EXT_vertex_attribute_divisor
-#endif
-#ifdef VK_NV_shader_subgroup_partitioned
-#endif
-#ifdef VK_NV_device_diagnostic_checkpoints
-static void entry_vkCmdSetCheckpointNV(
-    VkCommandBuffer commandBuffer,
-    const void* pCheckpointMarker)
-{
-    AEMU_SCOPED_TRACE("vkCmdSetCheckpointNV");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCmdSetCheckpointNV(commandBuffer, pCheckpointMarker);
-}
-static void entry_vkGetQueueCheckpointDataNV(
-    VkQueue queue,
-    uint32_t* pCheckpointDataCount,
-    VkCheckpointDataNV* pCheckpointData)
-{
-    AEMU_SCOPED_TRACE("vkGetQueueCheckpointDataNV");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkGetQueueCheckpointDataNV(queue, pCheckpointDataCount, pCheckpointData);
-}
-#endif
-#ifdef VK_GOOGLE_address_space
-static VkResult entry_vkMapMemoryIntoAddressSpaceGOOGLE(
-    VkDevice device,
-    VkDeviceMemory memory,
-    uint64_t* pAddress)
-{
-    AEMU_SCOPED_TRACE("vkMapMemoryIntoAddressSpaceGOOGLE");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkMapMemoryIntoAddressSpaceGOOGLE_VkResult_return = (VkResult)0;
-    vkMapMemoryIntoAddressSpaceGOOGLE_VkResult_return = vkEnc->vkMapMemoryIntoAddressSpaceGOOGLE(device, memory, pAddress);
-    return vkMapMemoryIntoAddressSpaceGOOGLE_VkResult_return;
-}
-static VkResult dynCheck_entry_vkMapMemoryIntoAddressSpaceGOOGLE(
-    VkDevice device,
-    VkDeviceMemory memory,
-    uint64_t* pAddress)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_GOOGLE_address_space"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkMapMemoryIntoAddressSpaceGOOGLE", "VK_GOOGLE_address_space");
-    }
-    AEMU_SCOPED_TRACE("vkMapMemoryIntoAddressSpaceGOOGLE");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkMapMemoryIntoAddressSpaceGOOGLE_VkResult_return = (VkResult)0;
-    vkMapMemoryIntoAddressSpaceGOOGLE_VkResult_return = vkEnc->vkMapMemoryIntoAddressSpaceGOOGLE(device, memory, pAddress);
-    return vkMapMemoryIntoAddressSpaceGOOGLE_VkResult_return;
-}
-#endif
-#ifdef VK_GOOGLE_color_buffer
-static VkResult entry_vkRegisterImageColorBufferGOOGLE(
-    VkDevice device,
-    VkImage image,
-    uint32_t colorBuffer)
-{
-    AEMU_SCOPED_TRACE("vkRegisterImageColorBufferGOOGLE");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkRegisterImageColorBufferGOOGLE_VkResult_return = (VkResult)0;
-    vkRegisterImageColorBufferGOOGLE_VkResult_return = vkEnc->vkRegisterImageColorBufferGOOGLE(device, image, colorBuffer);
-    return vkRegisterImageColorBufferGOOGLE_VkResult_return;
-}
-static VkResult dynCheck_entry_vkRegisterImageColorBufferGOOGLE(
-    VkDevice device,
-    VkImage image,
-    uint32_t colorBuffer)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_GOOGLE_color_buffer"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkRegisterImageColorBufferGOOGLE", "VK_GOOGLE_color_buffer");
-    }
-    AEMU_SCOPED_TRACE("vkRegisterImageColorBufferGOOGLE");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkRegisterImageColorBufferGOOGLE_VkResult_return = (VkResult)0;
-    vkRegisterImageColorBufferGOOGLE_VkResult_return = vkEnc->vkRegisterImageColorBufferGOOGLE(device, image, colorBuffer);
-    return vkRegisterImageColorBufferGOOGLE_VkResult_return;
-}
-static VkResult entry_vkRegisterBufferColorBufferGOOGLE(
-    VkDevice device,
-    VkBuffer buffer,
-    uint32_t colorBuffer)
-{
-    AEMU_SCOPED_TRACE("vkRegisterBufferColorBufferGOOGLE");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkRegisterBufferColorBufferGOOGLE_VkResult_return = (VkResult)0;
-    vkRegisterBufferColorBufferGOOGLE_VkResult_return = vkEnc->vkRegisterBufferColorBufferGOOGLE(device, buffer, colorBuffer);
-    return vkRegisterBufferColorBufferGOOGLE_VkResult_return;
-}
-static VkResult dynCheck_entry_vkRegisterBufferColorBufferGOOGLE(
-    VkDevice device,
-    VkBuffer buffer,
-    uint32_t colorBuffer)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_GOOGLE_color_buffer"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkRegisterBufferColorBufferGOOGLE", "VK_GOOGLE_color_buffer");
-    }
-    AEMU_SCOPED_TRACE("vkRegisterBufferColorBufferGOOGLE");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkRegisterBufferColorBufferGOOGLE_VkResult_return = (VkResult)0;
-    vkRegisterBufferColorBufferGOOGLE_VkResult_return = vkEnc->vkRegisterBufferColorBufferGOOGLE(device, buffer, colorBuffer);
-    return vkRegisterBufferColorBufferGOOGLE_VkResult_return;
-}
-#endif
-#ifdef VK_GOOGLE_sized_descriptor_update_template
-static void entry_vkUpdateDescriptorSetWithTemplateSizedGOOGLE(
-    VkDevice device,
-    VkDescriptorSet descriptorSet,
-    VkDescriptorUpdateTemplate descriptorUpdateTemplate,
-    uint32_t imageInfoCount,
-    uint32_t bufferInfoCount,
-    uint32_t bufferViewCount,
-    const uint32_t* pImageInfoEntryIndices,
-    const uint32_t* pBufferInfoEntryIndices,
-    const uint32_t* pBufferViewEntryIndices,
-    const VkDescriptorImageInfo* pImageInfos,
-    const VkDescriptorBufferInfo* pBufferInfos,
-    const VkBufferView* pBufferViews)
-{
-    AEMU_SCOPED_TRACE("vkUpdateDescriptorSetWithTemplateSizedGOOGLE");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkUpdateDescriptorSetWithTemplateSizedGOOGLE(device, descriptorSet, descriptorUpdateTemplate, imageInfoCount, bufferInfoCount, bufferViewCount, pImageInfoEntryIndices, pBufferInfoEntryIndices, pBufferViewEntryIndices, pImageInfos, pBufferInfos, pBufferViews);
-}
-static void dynCheck_entry_vkUpdateDescriptorSetWithTemplateSizedGOOGLE(
-    VkDevice device,
-    VkDescriptorSet descriptorSet,
-    VkDescriptorUpdateTemplate descriptorUpdateTemplate,
-    uint32_t imageInfoCount,
-    uint32_t bufferInfoCount,
-    uint32_t bufferViewCount,
-    const uint32_t* pImageInfoEntryIndices,
-    const uint32_t* pBufferInfoEntryIndices,
-    const uint32_t* pBufferViewEntryIndices,
-    const VkDescriptorImageInfo* pImageInfos,
-    const VkDescriptorBufferInfo* pBufferInfos,
-    const VkBufferView* pBufferViews)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_GOOGLE_sized_descriptor_update_template"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkUpdateDescriptorSetWithTemplateSizedGOOGLE", "VK_GOOGLE_sized_descriptor_update_template");
-    }
-    AEMU_SCOPED_TRACE("vkUpdateDescriptorSetWithTemplateSizedGOOGLE");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    vkEnc->vkUpdateDescriptorSetWithTemplateSizedGOOGLE(device, descriptorSet, descriptorUpdateTemplate, imageInfoCount, bufferInfoCount, bufferViewCount, pImageInfoEntryIndices, pBufferInfoEntryIndices, pBufferViewEntryIndices, pImageInfos, pBufferInfos, pBufferViews);
-}
-#endif
-#ifdef VK_GOOGLE_async_command_buffers
-static void entry_vkBeginCommandBufferAsyncGOOGLE(
-    VkCommandBuffer commandBuffer,
-    const VkCommandBufferBeginInfo* pBeginInfo)
-{
-    AEMU_SCOPED_TRACE("vkBeginCommandBufferAsyncGOOGLE");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkBeginCommandBufferAsyncGOOGLE(commandBuffer, pBeginInfo);
-}
-static void entry_vkEndCommandBufferAsyncGOOGLE(
-    VkCommandBuffer commandBuffer)
-{
-    AEMU_SCOPED_TRACE("vkEndCommandBufferAsyncGOOGLE");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkEndCommandBufferAsyncGOOGLE(commandBuffer);
-}
-static void entry_vkResetCommandBufferAsyncGOOGLE(
-    VkCommandBuffer commandBuffer,
-    VkCommandBufferResetFlags flags)
-{
-    AEMU_SCOPED_TRACE("vkResetCommandBufferAsyncGOOGLE");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkResetCommandBufferAsyncGOOGLE(commandBuffer, flags);
-}
-static void entry_vkCommandBufferHostSyncGOOGLE(
-    VkCommandBuffer commandBuffer,
-    uint32_t needHostSync,
-    uint32_t sequenceNumber)
-{
-    AEMU_SCOPED_TRACE("vkCommandBufferHostSyncGOOGLE");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, vkEnc);
-    vkEnc->vkCommandBufferHostSyncGOOGLE(commandBuffer, needHostSync, sequenceNumber);
-}
-#endif
-#ifdef VK_GOOGLE_create_resources_with_requirements
-static VkResult entry_vkCreateImageWithRequirementsGOOGLE(
-    VkDevice device,
-    const VkImageCreateInfo* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkImage* pImage,
-    VkMemoryRequirements* pMemoryRequirements)
-{
-    AEMU_SCOPED_TRACE("vkCreateImageWithRequirementsGOOGLE");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateImageWithRequirementsGOOGLE_VkResult_return = (VkResult)0;
-    vkCreateImageWithRequirementsGOOGLE_VkResult_return = vkEnc->vkCreateImageWithRequirementsGOOGLE(device, pCreateInfo, pAllocator, pImage, pMemoryRequirements);
-    return vkCreateImageWithRequirementsGOOGLE_VkResult_return;
-}
-static VkResult dynCheck_entry_vkCreateImageWithRequirementsGOOGLE(
-    VkDevice device,
-    const VkImageCreateInfo* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkImage* pImage,
-    VkMemoryRequirements* pMemoryRequirements)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_GOOGLE_create_resources_with_requirements"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkCreateImageWithRequirementsGOOGLE", "VK_GOOGLE_create_resources_with_requirements");
-    }
-    AEMU_SCOPED_TRACE("vkCreateImageWithRequirementsGOOGLE");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateImageWithRequirementsGOOGLE_VkResult_return = (VkResult)0;
-    vkCreateImageWithRequirementsGOOGLE_VkResult_return = vkEnc->vkCreateImageWithRequirementsGOOGLE(device, pCreateInfo, pAllocator, pImage, pMemoryRequirements);
-    return vkCreateImageWithRequirementsGOOGLE_VkResult_return;
-}
-static VkResult entry_vkCreateBufferWithRequirementsGOOGLE(
-    VkDevice device,
-    const VkBufferCreateInfo* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkBuffer* pBuffer,
-    VkMemoryRequirements* pMemoryRequirements)
-{
-    AEMU_SCOPED_TRACE("vkCreateBufferWithRequirementsGOOGLE");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateBufferWithRequirementsGOOGLE_VkResult_return = (VkResult)0;
-    vkCreateBufferWithRequirementsGOOGLE_VkResult_return = vkEnc->vkCreateBufferWithRequirementsGOOGLE(device, pCreateInfo, pAllocator, pBuffer, pMemoryRequirements);
-    return vkCreateBufferWithRequirementsGOOGLE_VkResult_return;
-}
-static VkResult dynCheck_entry_vkCreateBufferWithRequirementsGOOGLE(
-    VkDevice device,
-    const VkBufferCreateInfo* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkBuffer* pBuffer,
-    VkMemoryRequirements* pMemoryRequirements)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_GOOGLE_create_resources_with_requirements"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkCreateBufferWithRequirementsGOOGLE", "VK_GOOGLE_create_resources_with_requirements");
-    }
-    AEMU_SCOPED_TRACE("vkCreateBufferWithRequirementsGOOGLE");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkCreateBufferWithRequirementsGOOGLE_VkResult_return = (VkResult)0;
-    vkCreateBufferWithRequirementsGOOGLE_VkResult_return = vkEnc->vkCreateBufferWithRequirementsGOOGLE(device, pCreateInfo, pAllocator, pBuffer, pMemoryRequirements);
-    return vkCreateBufferWithRequirementsGOOGLE_VkResult_return;
-}
-#endif
-#ifdef VK_GOOGLE_address_space_info
-static VkResult entry_vkGetMemoryHostAddressInfoGOOGLE(
-    VkDevice device,
-    VkDeviceMemory memory,
-    uint64_t* pAddress,
-    uint64_t* pSize,
-    uint64_t* pHostmemId)
-{
-    AEMU_SCOPED_TRACE("vkGetMemoryHostAddressInfoGOOGLE");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetMemoryHostAddressInfoGOOGLE_VkResult_return = (VkResult)0;
-    vkGetMemoryHostAddressInfoGOOGLE_VkResult_return = vkEnc->vkGetMemoryHostAddressInfoGOOGLE(device, memory, pAddress, pSize, pHostmemId);
-    return vkGetMemoryHostAddressInfoGOOGLE_VkResult_return;
-}
-static VkResult dynCheck_entry_vkGetMemoryHostAddressInfoGOOGLE(
-    VkDevice device,
-    VkDeviceMemory memory,
-    uint64_t* pAddress,
-    uint64_t* pSize,
-    uint64_t* pHostmemId)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_GOOGLE_address_space_info"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkGetMemoryHostAddressInfoGOOGLE", "VK_GOOGLE_address_space_info");
-    }
-    AEMU_SCOPED_TRACE("vkGetMemoryHostAddressInfoGOOGLE");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkGetMemoryHostAddressInfoGOOGLE_VkResult_return = (VkResult)0;
-    vkGetMemoryHostAddressInfoGOOGLE_VkResult_return = vkEnc->vkGetMemoryHostAddressInfoGOOGLE(device, memory, pAddress, pSize, pHostmemId);
-    return vkGetMemoryHostAddressInfoGOOGLE_VkResult_return;
-}
-#endif
-#ifdef VK_GOOGLE_free_memory_sync
-static VkResult entry_vkFreeMemorySyncGOOGLE(
-    VkDevice device,
-    VkDeviceMemory memory,
-    const VkAllocationCallbacks* pAllocator)
-{
-    AEMU_SCOPED_TRACE("vkFreeMemorySyncGOOGLE");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkFreeMemorySyncGOOGLE_VkResult_return = (VkResult)0;
-    vkFreeMemorySyncGOOGLE_VkResult_return = vkEnc->vkFreeMemorySyncGOOGLE(device, memory, pAllocator);
-    return vkFreeMemorySyncGOOGLE_VkResult_return;
-}
-static VkResult dynCheck_entry_vkFreeMemorySyncGOOGLE(
-    VkDevice device,
-    VkDeviceMemory memory,
-    const VkAllocationCallbacks* pAllocator)
-{
-    auto resources = ResourceTracker::get();
-    if (!resources->hasDeviceExtension(device, "VK_GOOGLE_free_memory_sync"))
-    {
-        sOnInvalidDynamicallyCheckedCall("vkFreeMemorySyncGOOGLE", "VK_GOOGLE_free_memory_sync");
-    }
-    AEMU_SCOPED_TRACE("vkFreeMemorySyncGOOGLE");
-    auto vkEnc = HostConnection::get()->vkEncoder();
-    VkResult vkFreeMemorySyncGOOGLE_VkResult_return = (VkResult)0;
-    vkFreeMemorySyncGOOGLE_VkResult_return = vkEnc->vkFreeMemorySyncGOOGLE(device, memory, pAllocator);
-    return vkFreeMemorySyncGOOGLE_VkResult_return;
-}
-#endif
-void* goldfish_vulkan_get_proc_address(const char* name){
-#ifdef VK_VERSION_1_0
-    if (!strcmp(name, "vkCreateInstance"))
-    {
-        return (void*)entry_vkCreateInstance;
-    }
-    if (!strcmp(name, "vkDestroyInstance"))
-    {
-        return (void*)entry_vkDestroyInstance;
-    }
-    if (!strcmp(name, "vkEnumeratePhysicalDevices"))
-    {
-        return (void*)entry_vkEnumeratePhysicalDevices;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceFeatures"))
-    {
-        return (void*)entry_vkGetPhysicalDeviceFeatures;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceFormatProperties"))
-    {
-        return (void*)entry_vkGetPhysicalDeviceFormatProperties;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceImageFormatProperties"))
-    {
-        return (void*)entry_vkGetPhysicalDeviceImageFormatProperties;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceProperties"))
-    {
-        return (void*)entry_vkGetPhysicalDeviceProperties;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceQueueFamilyProperties"))
-    {
-        return (void*)entry_vkGetPhysicalDeviceQueueFamilyProperties;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceMemoryProperties"))
-    {
-        return (void*)entry_vkGetPhysicalDeviceMemoryProperties;
-    }
-    if (!strcmp(name, "vkGetInstanceProcAddr"))
-    {
-        return (void*)entry_vkGetInstanceProcAddr;
-    }
-    if (!strcmp(name, "vkGetDeviceProcAddr"))
-    {
-        return (void*)entry_vkGetDeviceProcAddr;
-    }
-    if (!strcmp(name, "vkCreateDevice"))
-    {
-        return (void*)entry_vkCreateDevice;
-    }
-    if (!strcmp(name, "vkDestroyDevice"))
-    {
-        return (void*)entry_vkDestroyDevice;
-    }
-    if (!strcmp(name, "vkEnumerateInstanceExtensionProperties"))
-    {
-        return (void*)entry_vkEnumerateInstanceExtensionProperties;
-    }
-    if (!strcmp(name, "vkEnumerateDeviceExtensionProperties"))
-    {
-        return (void*)entry_vkEnumerateDeviceExtensionProperties;
-    }
-    if (!strcmp(name, "vkEnumerateInstanceLayerProperties"))
-    {
-        return (void*)entry_vkEnumerateInstanceLayerProperties;
-    }
-    if (!strcmp(name, "vkEnumerateDeviceLayerProperties"))
-    {
-        return (void*)entry_vkEnumerateDeviceLayerProperties;
-    }
-    if (!strcmp(name, "vkGetDeviceQueue"))
-    {
-        return (void*)entry_vkGetDeviceQueue;
-    }
-    if (!strcmp(name, "vkQueueSubmit"))
-    {
-        return (void*)entry_vkQueueSubmit;
-    }
-    if (!strcmp(name, "vkQueueWaitIdle"))
-    {
-        return (void*)entry_vkQueueWaitIdle;
-    }
-    if (!strcmp(name, "vkDeviceWaitIdle"))
-    {
-        return (void*)entry_vkDeviceWaitIdle;
-    }
-    if (!strcmp(name, "vkAllocateMemory"))
-    {
-        return (void*)entry_vkAllocateMemory;
-    }
-    if (!strcmp(name, "vkFreeMemory"))
-    {
-        return (void*)entry_vkFreeMemory;
-    }
-    if (!strcmp(name, "vkMapMemory"))
-    {
-        return (void*)entry_vkMapMemory;
-    }
-    if (!strcmp(name, "vkUnmapMemory"))
-    {
-        return (void*)entry_vkUnmapMemory;
-    }
-    if (!strcmp(name, "vkFlushMappedMemoryRanges"))
-    {
-        return (void*)entry_vkFlushMappedMemoryRanges;
-    }
-    if (!strcmp(name, "vkInvalidateMappedMemoryRanges"))
-    {
-        return (void*)entry_vkInvalidateMappedMemoryRanges;
-    }
-    if (!strcmp(name, "vkGetDeviceMemoryCommitment"))
-    {
-        return (void*)entry_vkGetDeviceMemoryCommitment;
-    }
-    if (!strcmp(name, "vkBindBufferMemory"))
-    {
-        return (void*)entry_vkBindBufferMemory;
-    }
-    if (!strcmp(name, "vkBindImageMemory"))
-    {
-        return (void*)entry_vkBindImageMemory;
-    }
-    if (!strcmp(name, "vkGetBufferMemoryRequirements"))
-    {
-        return (void*)entry_vkGetBufferMemoryRequirements;
-    }
-    if (!strcmp(name, "vkGetImageMemoryRequirements"))
-    {
-        return (void*)entry_vkGetImageMemoryRequirements;
-    }
-    if (!strcmp(name, "vkGetImageSparseMemoryRequirements"))
-    {
-        return (void*)entry_vkGetImageSparseMemoryRequirements;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceSparseImageFormatProperties"))
-    {
-        return (void*)entry_vkGetPhysicalDeviceSparseImageFormatProperties;
-    }
-    if (!strcmp(name, "vkQueueBindSparse"))
-    {
-        return (void*)entry_vkQueueBindSparse;
-    }
-    if (!strcmp(name, "vkCreateFence"))
-    {
-        return (void*)entry_vkCreateFence;
-    }
-    if (!strcmp(name, "vkDestroyFence"))
-    {
-        return (void*)entry_vkDestroyFence;
-    }
-    if (!strcmp(name, "vkResetFences"))
-    {
-        return (void*)entry_vkResetFences;
-    }
-    if (!strcmp(name, "vkGetFenceStatus"))
-    {
-        return (void*)entry_vkGetFenceStatus;
-    }
-    if (!strcmp(name, "vkWaitForFences"))
-    {
-        return (void*)entry_vkWaitForFences;
-    }
-    if (!strcmp(name, "vkCreateSemaphore"))
-    {
-        return (void*)entry_vkCreateSemaphore;
-    }
-    if (!strcmp(name, "vkDestroySemaphore"))
-    {
-        return (void*)entry_vkDestroySemaphore;
-    }
-    if (!strcmp(name, "vkCreateEvent"))
-    {
-        return (void*)entry_vkCreateEvent;
-    }
-    if (!strcmp(name, "vkDestroyEvent"))
-    {
-        return (void*)entry_vkDestroyEvent;
-    }
-    if (!strcmp(name, "vkGetEventStatus"))
-    {
-        return (void*)entry_vkGetEventStatus;
-    }
-    if (!strcmp(name, "vkSetEvent"))
-    {
-        return (void*)entry_vkSetEvent;
-    }
-    if (!strcmp(name, "vkResetEvent"))
-    {
-        return (void*)entry_vkResetEvent;
-    }
-    if (!strcmp(name, "vkCreateQueryPool"))
-    {
-        return (void*)entry_vkCreateQueryPool;
-    }
-    if (!strcmp(name, "vkDestroyQueryPool"))
-    {
-        return (void*)entry_vkDestroyQueryPool;
-    }
-    if (!strcmp(name, "vkGetQueryPoolResults"))
-    {
-        return (void*)entry_vkGetQueryPoolResults;
-    }
-    if (!strcmp(name, "vkCreateBuffer"))
-    {
-        return (void*)entry_vkCreateBuffer;
-    }
-    if (!strcmp(name, "vkDestroyBuffer"))
-    {
-        return (void*)entry_vkDestroyBuffer;
-    }
-    if (!strcmp(name, "vkCreateBufferView"))
-    {
-        return (void*)entry_vkCreateBufferView;
-    }
-    if (!strcmp(name, "vkDestroyBufferView"))
-    {
-        return (void*)entry_vkDestroyBufferView;
-    }
-    if (!strcmp(name, "vkCreateImage"))
-    {
-        return (void*)entry_vkCreateImage;
-    }
-    if (!strcmp(name, "vkDestroyImage"))
-    {
-        return (void*)entry_vkDestroyImage;
-    }
-    if (!strcmp(name, "vkGetImageSubresourceLayout"))
-    {
-        return (void*)entry_vkGetImageSubresourceLayout;
-    }
-    if (!strcmp(name, "vkCreateImageView"))
-    {
-        return (void*)entry_vkCreateImageView;
-    }
-    if (!strcmp(name, "vkDestroyImageView"))
-    {
-        return (void*)entry_vkDestroyImageView;
-    }
-    if (!strcmp(name, "vkCreateShaderModule"))
-    {
-        return (void*)entry_vkCreateShaderModule;
-    }
-    if (!strcmp(name, "vkDestroyShaderModule"))
-    {
-        return (void*)entry_vkDestroyShaderModule;
-    }
-    if (!strcmp(name, "vkCreatePipelineCache"))
-    {
-        return (void*)entry_vkCreatePipelineCache;
-    }
-    if (!strcmp(name, "vkDestroyPipelineCache"))
-    {
-        return (void*)entry_vkDestroyPipelineCache;
-    }
-    if (!strcmp(name, "vkGetPipelineCacheData"))
-    {
-        return (void*)entry_vkGetPipelineCacheData;
-    }
-    if (!strcmp(name, "vkMergePipelineCaches"))
-    {
-        return (void*)entry_vkMergePipelineCaches;
-    }
-    if (!strcmp(name, "vkCreateGraphicsPipelines"))
-    {
-        return (void*)entry_vkCreateGraphicsPipelines;
-    }
-    if (!strcmp(name, "vkCreateComputePipelines"))
-    {
-        return (void*)entry_vkCreateComputePipelines;
-    }
-    if (!strcmp(name, "vkDestroyPipeline"))
-    {
-        return (void*)entry_vkDestroyPipeline;
-    }
-    if (!strcmp(name, "vkCreatePipelineLayout"))
-    {
-        return (void*)entry_vkCreatePipelineLayout;
-    }
-    if (!strcmp(name, "vkDestroyPipelineLayout"))
-    {
-        return (void*)entry_vkDestroyPipelineLayout;
-    }
-    if (!strcmp(name, "vkCreateSampler"))
-    {
-        return (void*)entry_vkCreateSampler;
-    }
-    if (!strcmp(name, "vkDestroySampler"))
-    {
-        return (void*)entry_vkDestroySampler;
-    }
-    if (!strcmp(name, "vkCreateDescriptorSetLayout"))
-    {
-        return (void*)entry_vkCreateDescriptorSetLayout;
-    }
-    if (!strcmp(name, "vkDestroyDescriptorSetLayout"))
-    {
-        return (void*)entry_vkDestroyDescriptorSetLayout;
-    }
-    if (!strcmp(name, "vkCreateDescriptorPool"))
-    {
-        return (void*)entry_vkCreateDescriptorPool;
-    }
-    if (!strcmp(name, "vkDestroyDescriptorPool"))
-    {
-        return (void*)entry_vkDestroyDescriptorPool;
-    }
-    if (!strcmp(name, "vkResetDescriptorPool"))
-    {
-        return (void*)entry_vkResetDescriptorPool;
-    }
-    if (!strcmp(name, "vkAllocateDescriptorSets"))
-    {
-        return (void*)entry_vkAllocateDescriptorSets;
-    }
-    if (!strcmp(name, "vkFreeDescriptorSets"))
-    {
-        return (void*)entry_vkFreeDescriptorSets;
-    }
-    if (!strcmp(name, "vkUpdateDescriptorSets"))
-    {
-        return (void*)entry_vkUpdateDescriptorSets;
-    }
-    if (!strcmp(name, "vkCreateFramebuffer"))
-    {
-        return (void*)entry_vkCreateFramebuffer;
-    }
-    if (!strcmp(name, "vkDestroyFramebuffer"))
-    {
-        return (void*)entry_vkDestroyFramebuffer;
-    }
-    if (!strcmp(name, "vkCreateRenderPass"))
-    {
-        return (void*)entry_vkCreateRenderPass;
-    }
-    if (!strcmp(name, "vkDestroyRenderPass"))
-    {
-        return (void*)entry_vkDestroyRenderPass;
-    }
-    if (!strcmp(name, "vkGetRenderAreaGranularity"))
-    {
-        return (void*)entry_vkGetRenderAreaGranularity;
-    }
-    if (!strcmp(name, "vkCreateCommandPool"))
-    {
-        return (void*)entry_vkCreateCommandPool;
-    }
-    if (!strcmp(name, "vkDestroyCommandPool"))
-    {
-        return (void*)entry_vkDestroyCommandPool;
-    }
-    if (!strcmp(name, "vkResetCommandPool"))
-    {
-        return (void*)entry_vkResetCommandPool;
-    }
-    if (!strcmp(name, "vkAllocateCommandBuffers"))
-    {
-        return (void*)entry_vkAllocateCommandBuffers;
-    }
-    if (!strcmp(name, "vkFreeCommandBuffers"))
-    {
-        return (void*)entry_vkFreeCommandBuffers;
-    }
-    if (!strcmp(name, "vkBeginCommandBuffer"))
-    {
-        return (void*)entry_vkBeginCommandBuffer;
-    }
-    if (!strcmp(name, "vkEndCommandBuffer"))
-    {
-        return (void*)entry_vkEndCommandBuffer;
-    }
-    if (!strcmp(name, "vkResetCommandBuffer"))
-    {
-        return (void*)entry_vkResetCommandBuffer;
-    }
-    if (!strcmp(name, "vkCmdBindPipeline"))
-    {
-        return (void*)entry_vkCmdBindPipeline;
-    }
-    if (!strcmp(name, "vkCmdSetViewport"))
-    {
-        return (void*)entry_vkCmdSetViewport;
-    }
-    if (!strcmp(name, "vkCmdSetScissor"))
-    {
-        return (void*)entry_vkCmdSetScissor;
-    }
-    if (!strcmp(name, "vkCmdSetLineWidth"))
-    {
-        return (void*)entry_vkCmdSetLineWidth;
-    }
-    if (!strcmp(name, "vkCmdSetDepthBias"))
-    {
-        return (void*)entry_vkCmdSetDepthBias;
-    }
-    if (!strcmp(name, "vkCmdSetBlendConstants"))
-    {
-        return (void*)entry_vkCmdSetBlendConstants;
-    }
-    if (!strcmp(name, "vkCmdSetDepthBounds"))
-    {
-        return (void*)entry_vkCmdSetDepthBounds;
-    }
-    if (!strcmp(name, "vkCmdSetStencilCompareMask"))
-    {
-        return (void*)entry_vkCmdSetStencilCompareMask;
-    }
-    if (!strcmp(name, "vkCmdSetStencilWriteMask"))
-    {
-        return (void*)entry_vkCmdSetStencilWriteMask;
-    }
-    if (!strcmp(name, "vkCmdSetStencilReference"))
-    {
-        return (void*)entry_vkCmdSetStencilReference;
-    }
-    if (!strcmp(name, "vkCmdBindDescriptorSets"))
-    {
-        return (void*)entry_vkCmdBindDescriptorSets;
-    }
-    if (!strcmp(name, "vkCmdBindIndexBuffer"))
-    {
-        return (void*)entry_vkCmdBindIndexBuffer;
-    }
-    if (!strcmp(name, "vkCmdBindVertexBuffers"))
-    {
-        return (void*)entry_vkCmdBindVertexBuffers;
-    }
-    if (!strcmp(name, "vkCmdDraw"))
-    {
-        return (void*)entry_vkCmdDraw;
-    }
-    if (!strcmp(name, "vkCmdDrawIndexed"))
-    {
-        return (void*)entry_vkCmdDrawIndexed;
-    }
-    if (!strcmp(name, "vkCmdDrawIndirect"))
-    {
-        return (void*)entry_vkCmdDrawIndirect;
-    }
-    if (!strcmp(name, "vkCmdDrawIndexedIndirect"))
-    {
-        return (void*)entry_vkCmdDrawIndexedIndirect;
-    }
-    if (!strcmp(name, "vkCmdDispatch"))
-    {
-        return (void*)entry_vkCmdDispatch;
-    }
-    if (!strcmp(name, "vkCmdDispatchIndirect"))
-    {
-        return (void*)entry_vkCmdDispatchIndirect;
-    }
-    if (!strcmp(name, "vkCmdCopyBuffer"))
-    {
-        return (void*)entry_vkCmdCopyBuffer;
-    }
-    if (!strcmp(name, "vkCmdCopyImage"))
-    {
-        return (void*)entry_vkCmdCopyImage;
-    }
-    if (!strcmp(name, "vkCmdBlitImage"))
-    {
-        return (void*)entry_vkCmdBlitImage;
-    }
-    if (!strcmp(name, "vkCmdCopyBufferToImage"))
-    {
-        return (void*)entry_vkCmdCopyBufferToImage;
-    }
-    if (!strcmp(name, "vkCmdCopyImageToBuffer"))
-    {
-        return (void*)entry_vkCmdCopyImageToBuffer;
-    }
-    if (!strcmp(name, "vkCmdUpdateBuffer"))
-    {
-        return (void*)entry_vkCmdUpdateBuffer;
-    }
-    if (!strcmp(name, "vkCmdFillBuffer"))
-    {
-        return (void*)entry_vkCmdFillBuffer;
-    }
-    if (!strcmp(name, "vkCmdClearColorImage"))
-    {
-        return (void*)entry_vkCmdClearColorImage;
-    }
-    if (!strcmp(name, "vkCmdClearDepthStencilImage"))
-    {
-        return (void*)entry_vkCmdClearDepthStencilImage;
-    }
-    if (!strcmp(name, "vkCmdClearAttachments"))
-    {
-        return (void*)entry_vkCmdClearAttachments;
-    }
-    if (!strcmp(name, "vkCmdResolveImage"))
-    {
-        return (void*)entry_vkCmdResolveImage;
-    }
-    if (!strcmp(name, "vkCmdSetEvent"))
-    {
-        return (void*)entry_vkCmdSetEvent;
-    }
-    if (!strcmp(name, "vkCmdResetEvent"))
-    {
-        return (void*)entry_vkCmdResetEvent;
-    }
-    if (!strcmp(name, "vkCmdWaitEvents"))
-    {
-        return (void*)entry_vkCmdWaitEvents;
-    }
-    if (!strcmp(name, "vkCmdPipelineBarrier"))
-    {
-        return (void*)entry_vkCmdPipelineBarrier;
-    }
-    if (!strcmp(name, "vkCmdBeginQuery"))
-    {
-        return (void*)entry_vkCmdBeginQuery;
-    }
-    if (!strcmp(name, "vkCmdEndQuery"))
-    {
-        return (void*)entry_vkCmdEndQuery;
-    }
-    if (!strcmp(name, "vkCmdResetQueryPool"))
-    {
-        return (void*)entry_vkCmdResetQueryPool;
-    }
-    if (!strcmp(name, "vkCmdWriteTimestamp"))
-    {
-        return (void*)entry_vkCmdWriteTimestamp;
-    }
-    if (!strcmp(name, "vkCmdCopyQueryPoolResults"))
-    {
-        return (void*)entry_vkCmdCopyQueryPoolResults;
-    }
-    if (!strcmp(name, "vkCmdPushConstants"))
-    {
-        return (void*)entry_vkCmdPushConstants;
-    }
-    if (!strcmp(name, "vkCmdBeginRenderPass"))
-    {
-        return (void*)entry_vkCmdBeginRenderPass;
-    }
-    if (!strcmp(name, "vkCmdNextSubpass"))
-    {
-        return (void*)entry_vkCmdNextSubpass;
-    }
-    if (!strcmp(name, "vkCmdEndRenderPass"))
-    {
-        return (void*)entry_vkCmdEndRenderPass;
-    }
-    if (!strcmp(name, "vkCmdExecuteCommands"))
-    {
-        return (void*)entry_vkCmdExecuteCommands;
-    }
-#endif
-#ifdef VK_VERSION_1_1
-    if (!strcmp(name, "vkEnumerateInstanceVersion"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkBindBufferMemory2"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkBindImageMemory2"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetDeviceGroupPeerMemoryFeatures"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkCmdSetDeviceMask"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkCmdDispatchBase"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkEnumeratePhysicalDeviceGroups"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetImageMemoryRequirements2"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetBufferMemoryRequirements2"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetImageSparseMemoryRequirements2"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceFeatures2"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceProperties2"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceFormatProperties2"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceImageFormatProperties2"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceQueueFamilyProperties2"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceMemoryProperties2"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceSparseImageFormatProperties2"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkTrimCommandPool"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetDeviceQueue2"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkCreateSamplerYcbcrConversion"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkDestroySamplerYcbcrConversion"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkCreateDescriptorUpdateTemplate"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkDestroyDescriptorUpdateTemplate"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkUpdateDescriptorSetWithTemplate"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceExternalBufferProperties"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceExternalFenceProperties"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceExternalSemaphoreProperties"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetDescriptorSetLayoutSupport"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_KHR_surface
-    if (!strcmp(name, "vkDestroySurfaceKHR"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceSurfaceSupportKHR"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceSurfaceCapabilitiesKHR"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceSurfaceFormatsKHR"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceSurfacePresentModesKHR"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_KHR_swapchain
-    if (!strcmp(name, "vkCreateSwapchainKHR"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkDestroySwapchainKHR"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetSwapchainImagesKHR"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkAcquireNextImageKHR"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkQueuePresentKHR"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetDeviceGroupPresentCapabilitiesKHR"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetDeviceGroupSurfacePresentModesKHR"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDevicePresentRectanglesKHR"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkAcquireNextImage2KHR"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_KHR_display
-    if (!strcmp(name, "vkGetPhysicalDeviceDisplayPropertiesKHR"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceDisplayPlanePropertiesKHR"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetDisplayPlaneSupportedDisplaysKHR"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetDisplayModePropertiesKHR"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkCreateDisplayModeKHR"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetDisplayPlaneCapabilitiesKHR"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkCreateDisplayPlaneSurfaceKHR"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_KHR_display_swapchain
-    if (!strcmp(name, "vkCreateSharedSwapchainsKHR"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_KHR_xlib_surface
-    if (!strcmp(name, "vkCreateXlibSurfaceKHR"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceXlibPresentationSupportKHR"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_KHR_xcb_surface
-    if (!strcmp(name, "vkCreateXcbSurfaceKHR"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceXcbPresentationSupportKHR"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_KHR_wayland_surface
-    if (!strcmp(name, "vkCreateWaylandSurfaceKHR"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceWaylandPresentationSupportKHR"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_KHR_mir_surface
-    if (!strcmp(name, "vkCreateMirSurfaceKHR"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceMirPresentationSupportKHR"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_KHR_android_surface
-    if (!strcmp(name, "vkCreateAndroidSurfaceKHR"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_KHR_win32_surface
-    if (!strcmp(name, "vkCreateWin32SurfaceKHR"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceWin32PresentationSupportKHR"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_KHR_get_physical_device_properties2
-    if (!strcmp(name, "vkGetPhysicalDeviceFeatures2KHR"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceProperties2KHR"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceFormatProperties2KHR"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceImageFormatProperties2KHR"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceQueueFamilyProperties2KHR"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceMemoryProperties2KHR"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceSparseImageFormatProperties2KHR"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_KHR_device_group
-    if (!strcmp(name, "vkGetDeviceGroupPeerMemoryFeaturesKHR"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkCmdSetDeviceMaskKHR"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkCmdDispatchBaseKHR"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_KHR_maintenance1
-    if (!strcmp(name, "vkTrimCommandPoolKHR"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_KHR_device_group_creation
-    if (!strcmp(name, "vkEnumeratePhysicalDeviceGroupsKHR"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_KHR_external_memory_capabilities
-    if (!strcmp(name, "vkGetPhysicalDeviceExternalBufferPropertiesKHR"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_KHR_external_memory_win32
-    if (!strcmp(name, "vkGetMemoryWin32HandleKHR"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetMemoryWin32HandlePropertiesKHR"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_KHR_external_memory_fd
-    if (!strcmp(name, "vkGetMemoryFdKHR"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetMemoryFdPropertiesKHR"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_KHR_external_semaphore_capabilities
-    if (!strcmp(name, "vkGetPhysicalDeviceExternalSemaphorePropertiesKHR"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_KHR_external_semaphore_win32
-    if (!strcmp(name, "vkImportSemaphoreWin32HandleKHR"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetSemaphoreWin32HandleKHR"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_KHR_external_semaphore_fd
-    if (!strcmp(name, "vkImportSemaphoreFdKHR"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetSemaphoreFdKHR"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_KHR_push_descriptor
-    if (!strcmp(name, "vkCmdPushDescriptorSetKHR"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkCmdPushDescriptorSetWithTemplateKHR"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_KHR_descriptor_update_template
-    if (!strcmp(name, "vkCreateDescriptorUpdateTemplateKHR"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkDestroyDescriptorUpdateTemplateKHR"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkUpdateDescriptorSetWithTemplateKHR"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_KHR_create_renderpass2
-    if (!strcmp(name, "vkCreateRenderPass2KHR"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkCmdBeginRenderPass2KHR"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkCmdNextSubpass2KHR"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkCmdEndRenderPass2KHR"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_KHR_shared_presentable_image
-    if (!strcmp(name, "vkGetSwapchainStatusKHR"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_KHR_external_fence_capabilities
-    if (!strcmp(name, "vkGetPhysicalDeviceExternalFencePropertiesKHR"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_KHR_external_fence_win32
-    if (!strcmp(name, "vkImportFenceWin32HandleKHR"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetFenceWin32HandleKHR"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_KHR_external_fence_fd
-    if (!strcmp(name, "vkImportFenceFdKHR"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetFenceFdKHR"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_KHR_get_surface_capabilities2
-    if (!strcmp(name, "vkGetPhysicalDeviceSurfaceCapabilities2KHR"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceSurfaceFormats2KHR"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_KHR_get_display_properties2
-    if (!strcmp(name, "vkGetPhysicalDeviceDisplayProperties2KHR"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceDisplayPlaneProperties2KHR"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetDisplayModeProperties2KHR"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetDisplayPlaneCapabilities2KHR"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_KHR_get_memory_requirements2
-    if (!strcmp(name, "vkGetImageMemoryRequirements2KHR"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetBufferMemoryRequirements2KHR"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetImageSparseMemoryRequirements2KHR"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_KHR_sampler_ycbcr_conversion
-    if (!strcmp(name, "vkCreateSamplerYcbcrConversionKHR"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkDestroySamplerYcbcrConversionKHR"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_KHR_bind_memory2
-    if (!strcmp(name, "vkBindBufferMemory2KHR"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkBindImageMemory2KHR"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_KHR_maintenance3
-    if (!strcmp(name, "vkGetDescriptorSetLayoutSupportKHR"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_KHR_draw_indirect_count
-    if (!strcmp(name, "vkCmdDrawIndirectCountKHR"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkCmdDrawIndexedIndirectCountKHR"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_ANDROID_native_buffer
-    if (!strcmp(name, "vkGetSwapchainGrallocUsageANDROID"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkAcquireImageANDROID"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkQueueSignalReleaseImageANDROID"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_EXT_debug_report
-    if (!strcmp(name, "vkCreateDebugReportCallbackEXT"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkDestroyDebugReportCallbackEXT"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkDebugReportMessageEXT"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_EXT_debug_marker
-    if (!strcmp(name, "vkDebugMarkerSetObjectTagEXT"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkDebugMarkerSetObjectNameEXT"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkCmdDebugMarkerBeginEXT"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkCmdDebugMarkerEndEXT"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkCmdDebugMarkerInsertEXT"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_AMD_draw_indirect_count
-    if (!strcmp(name, "vkCmdDrawIndirectCountAMD"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkCmdDrawIndexedIndirectCountAMD"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_AMD_shader_info
-    if (!strcmp(name, "vkGetShaderInfoAMD"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_NV_external_memory_capabilities
-    if (!strcmp(name, "vkGetPhysicalDeviceExternalImageFormatPropertiesNV"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_NV_external_memory_win32
-    if (!strcmp(name, "vkGetMemoryWin32HandleNV"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_NN_vi_surface
-    if (!strcmp(name, "vkCreateViSurfaceNN"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_EXT_conditional_rendering
-    if (!strcmp(name, "vkCmdBeginConditionalRenderingEXT"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkCmdEndConditionalRenderingEXT"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_NVX_device_generated_commands
-    if (!strcmp(name, "vkCmdProcessCommandsNVX"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkCmdReserveSpaceForCommandsNVX"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkCreateIndirectCommandsLayoutNVX"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkDestroyIndirectCommandsLayoutNVX"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkCreateObjectTableNVX"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkDestroyObjectTableNVX"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkRegisterObjectsNVX"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkUnregisterObjectsNVX"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_NV_clip_space_w_scaling
-    if (!strcmp(name, "vkCmdSetViewportWScalingNV"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_EXT_direct_mode_display
-    if (!strcmp(name, "vkReleaseDisplayEXT"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_EXT_acquire_xlib_display
-    if (!strcmp(name, "vkAcquireXlibDisplayEXT"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetRandROutputDisplayEXT"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_EXT_display_surface_counter
-    if (!strcmp(name, "vkGetPhysicalDeviceSurfaceCapabilities2EXT"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_EXT_display_control
-    if (!strcmp(name, "vkDisplayPowerControlEXT"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkRegisterDeviceEventEXT"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkRegisterDisplayEventEXT"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetSwapchainCounterEXT"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_GOOGLE_display_timing
-    if (!strcmp(name, "vkGetRefreshCycleDurationGOOGLE"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetPastPresentationTimingGOOGLE"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_EXT_discard_rectangles
-    if (!strcmp(name, "vkCmdSetDiscardRectangleEXT"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_EXT_hdr_metadata
-    if (!strcmp(name, "vkSetHdrMetadataEXT"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_MVK_ios_surface
-    if (!strcmp(name, "vkCreateIOSSurfaceMVK"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_MVK_macos_surface
-    if (!strcmp(name, "vkCreateMacOSSurfaceMVK"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_EXT_debug_utils
-    if (!strcmp(name, "vkSetDebugUtilsObjectNameEXT"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkSetDebugUtilsObjectTagEXT"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkQueueBeginDebugUtilsLabelEXT"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkQueueEndDebugUtilsLabelEXT"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkQueueInsertDebugUtilsLabelEXT"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkCmdBeginDebugUtilsLabelEXT"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkCmdEndDebugUtilsLabelEXT"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkCmdInsertDebugUtilsLabelEXT"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkCreateDebugUtilsMessengerEXT"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkDestroyDebugUtilsMessengerEXT"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkSubmitDebugUtilsMessageEXT"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_ANDROID_external_memory_android_hardware_buffer
-    if (!strcmp(name, "vkGetAndroidHardwareBufferPropertiesANDROID"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetMemoryAndroidHardwareBufferANDROID"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_EXT_sample_locations
-    if (!strcmp(name, "vkCmdSetSampleLocationsEXT"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceMultisamplePropertiesEXT"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_EXT_validation_cache
-    if (!strcmp(name, "vkCreateValidationCacheEXT"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkDestroyValidationCacheEXT"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkMergeValidationCachesEXT"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetValidationCacheDataEXT"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_EXT_external_memory_host
-    if (!strcmp(name, "vkGetMemoryHostPointerPropertiesEXT"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_AMD_buffer_marker
-    if (!strcmp(name, "vkCmdWriteBufferMarkerAMD"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_NV_device_diagnostic_checkpoints
-    if (!strcmp(name, "vkCmdSetCheckpointNV"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetQueueCheckpointDataNV"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_GOOGLE_address_space
-    if (!strcmp(name, "vkMapMemoryIntoAddressSpaceGOOGLE"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_GOOGLE_color_buffer
-    if (!strcmp(name, "vkRegisterImageColorBufferGOOGLE"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkRegisterBufferColorBufferGOOGLE"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_GOOGLE_sized_descriptor_update_template
-    if (!strcmp(name, "vkUpdateDescriptorSetWithTemplateSizedGOOGLE"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_GOOGLE_async_command_buffers
-    if (!strcmp(name, "vkBeginCommandBufferAsyncGOOGLE"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkEndCommandBufferAsyncGOOGLE"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkResetCommandBufferAsyncGOOGLE"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkCommandBufferHostSyncGOOGLE"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_GOOGLE_create_resources_with_requirements
-    if (!strcmp(name, "vkCreateImageWithRequirementsGOOGLE"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkCreateBufferWithRequirementsGOOGLE"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_GOOGLE_address_space_info
-    if (!strcmp(name, "vkGetMemoryHostAddressInfoGOOGLE"))
-    {
-        return nullptr;
-    }
-#endif
-#ifdef VK_GOOGLE_free_memory_sync
-    if (!strcmp(name, "vkFreeMemorySyncGOOGLE"))
-    {
-        return nullptr;
-    }
-#endif
-    return nullptr;
-}
-void* goldfish_vulkan_get_instance_proc_address(VkInstance instance, const char* name){
-    auto resources = ResourceTracker::get();
-    bool has1_1OrHigher = resources->getApiVersionFromInstance(instance) >= VK_API_VERSION_1_1;
-#ifdef VK_VERSION_1_0
-    if (!strcmp(name, "vkCreateInstance"))
-    {
-        return (void*)entry_vkCreateInstance;
-    }
-    if (!strcmp(name, "vkDestroyInstance"))
-    {
-        return (void*)entry_vkDestroyInstance;
-    }
-    if (!strcmp(name, "vkEnumeratePhysicalDevices"))
-    {
-        return (void*)entry_vkEnumeratePhysicalDevices;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceFeatures"))
-    {
-        return (void*)entry_vkGetPhysicalDeviceFeatures;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceFormatProperties"))
-    {
-        return (void*)entry_vkGetPhysicalDeviceFormatProperties;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceImageFormatProperties"))
-    {
-        return (void*)entry_vkGetPhysicalDeviceImageFormatProperties;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceProperties"))
-    {
-        return (void*)entry_vkGetPhysicalDeviceProperties;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceQueueFamilyProperties"))
-    {
-        return (void*)entry_vkGetPhysicalDeviceQueueFamilyProperties;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceMemoryProperties"))
-    {
-        return (void*)entry_vkGetPhysicalDeviceMemoryProperties;
-    }
-    if (!strcmp(name, "vkGetInstanceProcAddr"))
-    {
-        return (void*)entry_vkGetInstanceProcAddr;
-    }
-    if (!strcmp(name, "vkGetDeviceProcAddr"))
-    {
-        return (void*)entry_vkGetDeviceProcAddr;
-    }
-    if (!strcmp(name, "vkCreateDevice"))
-    {
-        return (void*)entry_vkCreateDevice;
-    }
-    if (!strcmp(name, "vkDestroyDevice"))
-    {
-        return (void*)entry_vkDestroyDevice;
-    }
-    if (!strcmp(name, "vkEnumerateInstanceExtensionProperties"))
-    {
-        return (void*)entry_vkEnumerateInstanceExtensionProperties;
-    }
-    if (!strcmp(name, "vkEnumerateDeviceExtensionProperties"))
-    {
-        return (void*)entry_vkEnumerateDeviceExtensionProperties;
-    }
-    if (!strcmp(name, "vkEnumerateInstanceLayerProperties"))
-    {
-        return (void*)entry_vkEnumerateInstanceLayerProperties;
-    }
-    if (!strcmp(name, "vkEnumerateDeviceLayerProperties"))
-    {
-        return (void*)entry_vkEnumerateDeviceLayerProperties;
-    }
-    if (!strcmp(name, "vkGetDeviceQueue"))
-    {
-        return (void*)entry_vkGetDeviceQueue;
-    }
-    if (!strcmp(name, "vkQueueSubmit"))
-    {
-        return (void*)entry_vkQueueSubmit;
-    }
-    if (!strcmp(name, "vkQueueWaitIdle"))
-    {
-        return (void*)entry_vkQueueWaitIdle;
-    }
-    if (!strcmp(name, "vkDeviceWaitIdle"))
-    {
-        return (void*)entry_vkDeviceWaitIdle;
-    }
-    if (!strcmp(name, "vkAllocateMemory"))
-    {
-        return (void*)entry_vkAllocateMemory;
-    }
-    if (!strcmp(name, "vkFreeMemory"))
-    {
-        return (void*)entry_vkFreeMemory;
-    }
-    if (!strcmp(name, "vkMapMemory"))
-    {
-        return (void*)entry_vkMapMemory;
-    }
-    if (!strcmp(name, "vkUnmapMemory"))
-    {
-        return (void*)entry_vkUnmapMemory;
-    }
-    if (!strcmp(name, "vkFlushMappedMemoryRanges"))
-    {
-        return (void*)entry_vkFlushMappedMemoryRanges;
-    }
-    if (!strcmp(name, "vkInvalidateMappedMemoryRanges"))
-    {
-        return (void*)entry_vkInvalidateMappedMemoryRanges;
-    }
-    if (!strcmp(name, "vkGetDeviceMemoryCommitment"))
-    {
-        return (void*)entry_vkGetDeviceMemoryCommitment;
-    }
-    if (!strcmp(name, "vkBindBufferMemory"))
-    {
-        return (void*)entry_vkBindBufferMemory;
-    }
-    if (!strcmp(name, "vkBindImageMemory"))
-    {
-        return (void*)entry_vkBindImageMemory;
-    }
-    if (!strcmp(name, "vkGetBufferMemoryRequirements"))
-    {
-        return (void*)entry_vkGetBufferMemoryRequirements;
-    }
-    if (!strcmp(name, "vkGetImageMemoryRequirements"))
-    {
-        return (void*)entry_vkGetImageMemoryRequirements;
-    }
-    if (!strcmp(name, "vkGetImageSparseMemoryRequirements"))
-    {
-        return (void*)entry_vkGetImageSparseMemoryRequirements;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceSparseImageFormatProperties"))
-    {
-        return (void*)entry_vkGetPhysicalDeviceSparseImageFormatProperties;
-    }
-    if (!strcmp(name, "vkQueueBindSparse"))
-    {
-        return (void*)entry_vkQueueBindSparse;
-    }
-    if (!strcmp(name, "vkCreateFence"))
-    {
-        return (void*)entry_vkCreateFence;
-    }
-    if (!strcmp(name, "vkDestroyFence"))
-    {
-        return (void*)entry_vkDestroyFence;
-    }
-    if (!strcmp(name, "vkResetFences"))
-    {
-        return (void*)entry_vkResetFences;
-    }
-    if (!strcmp(name, "vkGetFenceStatus"))
-    {
-        return (void*)entry_vkGetFenceStatus;
-    }
-    if (!strcmp(name, "vkWaitForFences"))
-    {
-        return (void*)entry_vkWaitForFences;
-    }
-    if (!strcmp(name, "vkCreateSemaphore"))
-    {
-        return (void*)entry_vkCreateSemaphore;
-    }
-    if (!strcmp(name, "vkDestroySemaphore"))
-    {
-        return (void*)entry_vkDestroySemaphore;
-    }
-    if (!strcmp(name, "vkCreateEvent"))
-    {
-        return (void*)entry_vkCreateEvent;
-    }
-    if (!strcmp(name, "vkDestroyEvent"))
-    {
-        return (void*)entry_vkDestroyEvent;
-    }
-    if (!strcmp(name, "vkGetEventStatus"))
-    {
-        return (void*)entry_vkGetEventStatus;
-    }
-    if (!strcmp(name, "vkSetEvent"))
-    {
-        return (void*)entry_vkSetEvent;
-    }
-    if (!strcmp(name, "vkResetEvent"))
-    {
-        return (void*)entry_vkResetEvent;
-    }
-    if (!strcmp(name, "vkCreateQueryPool"))
-    {
-        return (void*)entry_vkCreateQueryPool;
-    }
-    if (!strcmp(name, "vkDestroyQueryPool"))
-    {
-        return (void*)entry_vkDestroyQueryPool;
-    }
-    if (!strcmp(name, "vkGetQueryPoolResults"))
-    {
-        return (void*)entry_vkGetQueryPoolResults;
-    }
-    if (!strcmp(name, "vkCreateBuffer"))
-    {
-        return (void*)entry_vkCreateBuffer;
-    }
-    if (!strcmp(name, "vkDestroyBuffer"))
-    {
-        return (void*)entry_vkDestroyBuffer;
-    }
-    if (!strcmp(name, "vkCreateBufferView"))
-    {
-        return (void*)entry_vkCreateBufferView;
-    }
-    if (!strcmp(name, "vkDestroyBufferView"))
-    {
-        return (void*)entry_vkDestroyBufferView;
-    }
-    if (!strcmp(name, "vkCreateImage"))
-    {
-        return (void*)entry_vkCreateImage;
-    }
-    if (!strcmp(name, "vkDestroyImage"))
-    {
-        return (void*)entry_vkDestroyImage;
-    }
-    if (!strcmp(name, "vkGetImageSubresourceLayout"))
-    {
-        return (void*)entry_vkGetImageSubresourceLayout;
-    }
-    if (!strcmp(name, "vkCreateImageView"))
-    {
-        return (void*)entry_vkCreateImageView;
-    }
-    if (!strcmp(name, "vkDestroyImageView"))
-    {
-        return (void*)entry_vkDestroyImageView;
-    }
-    if (!strcmp(name, "vkCreateShaderModule"))
-    {
-        return (void*)entry_vkCreateShaderModule;
-    }
-    if (!strcmp(name, "vkDestroyShaderModule"))
-    {
-        return (void*)entry_vkDestroyShaderModule;
-    }
-    if (!strcmp(name, "vkCreatePipelineCache"))
-    {
-        return (void*)entry_vkCreatePipelineCache;
-    }
-    if (!strcmp(name, "vkDestroyPipelineCache"))
-    {
-        return (void*)entry_vkDestroyPipelineCache;
-    }
-    if (!strcmp(name, "vkGetPipelineCacheData"))
-    {
-        return (void*)entry_vkGetPipelineCacheData;
-    }
-    if (!strcmp(name, "vkMergePipelineCaches"))
-    {
-        return (void*)entry_vkMergePipelineCaches;
-    }
-    if (!strcmp(name, "vkCreateGraphicsPipelines"))
-    {
-        return (void*)entry_vkCreateGraphicsPipelines;
-    }
-    if (!strcmp(name, "vkCreateComputePipelines"))
-    {
-        return (void*)entry_vkCreateComputePipelines;
-    }
-    if (!strcmp(name, "vkDestroyPipeline"))
-    {
-        return (void*)entry_vkDestroyPipeline;
-    }
-    if (!strcmp(name, "vkCreatePipelineLayout"))
-    {
-        return (void*)entry_vkCreatePipelineLayout;
-    }
-    if (!strcmp(name, "vkDestroyPipelineLayout"))
-    {
-        return (void*)entry_vkDestroyPipelineLayout;
-    }
-    if (!strcmp(name, "vkCreateSampler"))
-    {
-        return (void*)entry_vkCreateSampler;
-    }
-    if (!strcmp(name, "vkDestroySampler"))
-    {
-        return (void*)entry_vkDestroySampler;
-    }
-    if (!strcmp(name, "vkCreateDescriptorSetLayout"))
-    {
-        return (void*)entry_vkCreateDescriptorSetLayout;
-    }
-    if (!strcmp(name, "vkDestroyDescriptorSetLayout"))
-    {
-        return (void*)entry_vkDestroyDescriptorSetLayout;
-    }
-    if (!strcmp(name, "vkCreateDescriptorPool"))
-    {
-        return (void*)entry_vkCreateDescriptorPool;
-    }
-    if (!strcmp(name, "vkDestroyDescriptorPool"))
-    {
-        return (void*)entry_vkDestroyDescriptorPool;
-    }
-    if (!strcmp(name, "vkResetDescriptorPool"))
-    {
-        return (void*)entry_vkResetDescriptorPool;
-    }
-    if (!strcmp(name, "vkAllocateDescriptorSets"))
-    {
-        return (void*)entry_vkAllocateDescriptorSets;
-    }
-    if (!strcmp(name, "vkFreeDescriptorSets"))
-    {
-        return (void*)entry_vkFreeDescriptorSets;
-    }
-    if (!strcmp(name, "vkUpdateDescriptorSets"))
-    {
-        return (void*)entry_vkUpdateDescriptorSets;
-    }
-    if (!strcmp(name, "vkCreateFramebuffer"))
-    {
-        return (void*)entry_vkCreateFramebuffer;
-    }
-    if (!strcmp(name, "vkDestroyFramebuffer"))
-    {
-        return (void*)entry_vkDestroyFramebuffer;
-    }
-    if (!strcmp(name, "vkCreateRenderPass"))
-    {
-        return (void*)entry_vkCreateRenderPass;
-    }
-    if (!strcmp(name, "vkDestroyRenderPass"))
-    {
-        return (void*)entry_vkDestroyRenderPass;
-    }
-    if (!strcmp(name, "vkGetRenderAreaGranularity"))
-    {
-        return (void*)entry_vkGetRenderAreaGranularity;
-    }
-    if (!strcmp(name, "vkCreateCommandPool"))
-    {
-        return (void*)entry_vkCreateCommandPool;
-    }
-    if (!strcmp(name, "vkDestroyCommandPool"))
-    {
-        return (void*)entry_vkDestroyCommandPool;
-    }
-    if (!strcmp(name, "vkResetCommandPool"))
-    {
-        return (void*)entry_vkResetCommandPool;
-    }
-    if (!strcmp(name, "vkAllocateCommandBuffers"))
-    {
-        return (void*)entry_vkAllocateCommandBuffers;
-    }
-    if (!strcmp(name, "vkFreeCommandBuffers"))
-    {
-        return (void*)entry_vkFreeCommandBuffers;
-    }
-    if (!strcmp(name, "vkBeginCommandBuffer"))
-    {
-        return (void*)entry_vkBeginCommandBuffer;
-    }
-    if (!strcmp(name, "vkEndCommandBuffer"))
-    {
-        return (void*)entry_vkEndCommandBuffer;
-    }
-    if (!strcmp(name, "vkResetCommandBuffer"))
-    {
-        return (void*)entry_vkResetCommandBuffer;
-    }
-    if (!strcmp(name, "vkCmdBindPipeline"))
-    {
-        return (void*)entry_vkCmdBindPipeline;
-    }
-    if (!strcmp(name, "vkCmdSetViewport"))
-    {
-        return (void*)entry_vkCmdSetViewport;
-    }
-    if (!strcmp(name, "vkCmdSetScissor"))
-    {
-        return (void*)entry_vkCmdSetScissor;
-    }
-    if (!strcmp(name, "vkCmdSetLineWidth"))
-    {
-        return (void*)entry_vkCmdSetLineWidth;
-    }
-    if (!strcmp(name, "vkCmdSetDepthBias"))
-    {
-        return (void*)entry_vkCmdSetDepthBias;
-    }
-    if (!strcmp(name, "vkCmdSetBlendConstants"))
-    {
-        return (void*)entry_vkCmdSetBlendConstants;
-    }
-    if (!strcmp(name, "vkCmdSetDepthBounds"))
-    {
-        return (void*)entry_vkCmdSetDepthBounds;
-    }
-    if (!strcmp(name, "vkCmdSetStencilCompareMask"))
-    {
-        return (void*)entry_vkCmdSetStencilCompareMask;
-    }
-    if (!strcmp(name, "vkCmdSetStencilWriteMask"))
-    {
-        return (void*)entry_vkCmdSetStencilWriteMask;
-    }
-    if (!strcmp(name, "vkCmdSetStencilReference"))
-    {
-        return (void*)entry_vkCmdSetStencilReference;
-    }
-    if (!strcmp(name, "vkCmdBindDescriptorSets"))
-    {
-        return (void*)entry_vkCmdBindDescriptorSets;
-    }
-    if (!strcmp(name, "vkCmdBindIndexBuffer"))
-    {
-        return (void*)entry_vkCmdBindIndexBuffer;
-    }
-    if (!strcmp(name, "vkCmdBindVertexBuffers"))
-    {
-        return (void*)entry_vkCmdBindVertexBuffers;
-    }
-    if (!strcmp(name, "vkCmdDraw"))
-    {
-        return (void*)entry_vkCmdDraw;
-    }
-    if (!strcmp(name, "vkCmdDrawIndexed"))
-    {
-        return (void*)entry_vkCmdDrawIndexed;
-    }
-    if (!strcmp(name, "vkCmdDrawIndirect"))
-    {
-        return (void*)entry_vkCmdDrawIndirect;
-    }
-    if (!strcmp(name, "vkCmdDrawIndexedIndirect"))
-    {
-        return (void*)entry_vkCmdDrawIndexedIndirect;
-    }
-    if (!strcmp(name, "vkCmdDispatch"))
-    {
-        return (void*)entry_vkCmdDispatch;
-    }
-    if (!strcmp(name, "vkCmdDispatchIndirect"))
-    {
-        return (void*)entry_vkCmdDispatchIndirect;
-    }
-    if (!strcmp(name, "vkCmdCopyBuffer"))
-    {
-        return (void*)entry_vkCmdCopyBuffer;
-    }
-    if (!strcmp(name, "vkCmdCopyImage"))
-    {
-        return (void*)entry_vkCmdCopyImage;
-    }
-    if (!strcmp(name, "vkCmdBlitImage"))
-    {
-        return (void*)entry_vkCmdBlitImage;
-    }
-    if (!strcmp(name, "vkCmdCopyBufferToImage"))
-    {
-        return (void*)entry_vkCmdCopyBufferToImage;
-    }
-    if (!strcmp(name, "vkCmdCopyImageToBuffer"))
-    {
-        return (void*)entry_vkCmdCopyImageToBuffer;
-    }
-    if (!strcmp(name, "vkCmdUpdateBuffer"))
-    {
-        return (void*)entry_vkCmdUpdateBuffer;
-    }
-    if (!strcmp(name, "vkCmdFillBuffer"))
-    {
-        return (void*)entry_vkCmdFillBuffer;
-    }
-    if (!strcmp(name, "vkCmdClearColorImage"))
-    {
-        return (void*)entry_vkCmdClearColorImage;
-    }
-    if (!strcmp(name, "vkCmdClearDepthStencilImage"))
-    {
-        return (void*)entry_vkCmdClearDepthStencilImage;
-    }
-    if (!strcmp(name, "vkCmdClearAttachments"))
-    {
-        return (void*)entry_vkCmdClearAttachments;
-    }
-    if (!strcmp(name, "vkCmdResolveImage"))
-    {
-        return (void*)entry_vkCmdResolveImage;
-    }
-    if (!strcmp(name, "vkCmdSetEvent"))
-    {
-        return (void*)entry_vkCmdSetEvent;
-    }
-    if (!strcmp(name, "vkCmdResetEvent"))
-    {
-        return (void*)entry_vkCmdResetEvent;
-    }
-    if (!strcmp(name, "vkCmdWaitEvents"))
-    {
-        return (void*)entry_vkCmdWaitEvents;
-    }
-    if (!strcmp(name, "vkCmdPipelineBarrier"))
-    {
-        return (void*)entry_vkCmdPipelineBarrier;
-    }
-    if (!strcmp(name, "vkCmdBeginQuery"))
-    {
-        return (void*)entry_vkCmdBeginQuery;
-    }
-    if (!strcmp(name, "vkCmdEndQuery"))
-    {
-        return (void*)entry_vkCmdEndQuery;
-    }
-    if (!strcmp(name, "vkCmdResetQueryPool"))
-    {
-        return (void*)entry_vkCmdResetQueryPool;
-    }
-    if (!strcmp(name, "vkCmdWriteTimestamp"))
-    {
-        return (void*)entry_vkCmdWriteTimestamp;
-    }
-    if (!strcmp(name, "vkCmdCopyQueryPoolResults"))
-    {
-        return (void*)entry_vkCmdCopyQueryPoolResults;
-    }
-    if (!strcmp(name, "vkCmdPushConstants"))
-    {
-        return (void*)entry_vkCmdPushConstants;
-    }
-    if (!strcmp(name, "vkCmdBeginRenderPass"))
-    {
-        return (void*)entry_vkCmdBeginRenderPass;
-    }
-    if (!strcmp(name, "vkCmdNextSubpass"))
-    {
-        return (void*)entry_vkCmdNextSubpass;
-    }
-    if (!strcmp(name, "vkCmdEndRenderPass"))
-    {
-        return (void*)entry_vkCmdEndRenderPass;
-    }
-    if (!strcmp(name, "vkCmdExecuteCommands"))
-    {
-        return (void*)entry_vkCmdExecuteCommands;
-    }
-#endif
-#ifdef VK_VERSION_1_1
-    if (!strcmp(name, "vkEnumerateInstanceVersion"))
-    {
-        return has1_1OrHigher ? (void*)entry_vkEnumerateInstanceVersion : nullptr;
-    }
-    if (!strcmp(name, "vkBindBufferMemory2"))
-    {
-        return (void*)dynCheck_entry_vkBindBufferMemory2;
-    }
-    if (!strcmp(name, "vkBindImageMemory2"))
-    {
-        return (void*)dynCheck_entry_vkBindImageMemory2;
-    }
-    if (!strcmp(name, "vkGetDeviceGroupPeerMemoryFeatures"))
-    {
-        return (void*)dynCheck_entry_vkGetDeviceGroupPeerMemoryFeatures;
-    }
-    if (!strcmp(name, "vkCmdSetDeviceMask"))
-    {
-        return has1_1OrHigher ? (void*)entry_vkCmdSetDeviceMask : nullptr;
-    }
-    if (!strcmp(name, "vkCmdDispatchBase"))
-    {
-        return has1_1OrHigher ? (void*)entry_vkCmdDispatchBase : nullptr;
-    }
-    if (!strcmp(name, "vkEnumeratePhysicalDeviceGroups"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetImageMemoryRequirements2"))
-    {
-        return (void*)dynCheck_entry_vkGetImageMemoryRequirements2;
-    }
-    if (!strcmp(name, "vkGetBufferMemoryRequirements2"))
-    {
-        return (void*)dynCheck_entry_vkGetBufferMemoryRequirements2;
-    }
-    if (!strcmp(name, "vkGetImageSparseMemoryRequirements2"))
-    {
-        return (void*)dynCheck_entry_vkGetImageSparseMemoryRequirements2;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceFeatures2"))
-    {
-        return has1_1OrHigher ? (void*)entry_vkGetPhysicalDeviceFeatures2 : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceProperties2"))
-    {
-        return has1_1OrHigher ? (void*)entry_vkGetPhysicalDeviceProperties2 : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceFormatProperties2"))
-    {
-        return has1_1OrHigher ? (void*)entry_vkGetPhysicalDeviceFormatProperties2 : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceImageFormatProperties2"))
-    {
-        return has1_1OrHigher ? (void*)entry_vkGetPhysicalDeviceImageFormatProperties2 : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceQueueFamilyProperties2"))
-    {
-        return has1_1OrHigher ? (void*)entry_vkGetPhysicalDeviceQueueFamilyProperties2 : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceMemoryProperties2"))
-    {
-        return has1_1OrHigher ? (void*)entry_vkGetPhysicalDeviceMemoryProperties2 : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceSparseImageFormatProperties2"))
-    {
-        return has1_1OrHigher ? (void*)entry_vkGetPhysicalDeviceSparseImageFormatProperties2 : nullptr;
-    }
-    if (!strcmp(name, "vkTrimCommandPool"))
-    {
-        return (void*)dynCheck_entry_vkTrimCommandPool;
-    }
-    if (!strcmp(name, "vkGetDeviceQueue2"))
-    {
-        return (void*)dynCheck_entry_vkGetDeviceQueue2;
-    }
-    if (!strcmp(name, "vkCreateSamplerYcbcrConversion"))
-    {
-        return (void*)dynCheck_entry_vkCreateSamplerYcbcrConversion;
-    }
-    if (!strcmp(name, "vkDestroySamplerYcbcrConversion"))
-    {
-        return (void*)dynCheck_entry_vkDestroySamplerYcbcrConversion;
-    }
-    if (!strcmp(name, "vkCreateDescriptorUpdateTemplate"))
-    {
-        return (void*)dynCheck_entry_vkCreateDescriptorUpdateTemplate;
-    }
-    if (!strcmp(name, "vkDestroyDescriptorUpdateTemplate"))
-    {
-        return (void*)dynCheck_entry_vkDestroyDescriptorUpdateTemplate;
-    }
-    if (!strcmp(name, "vkUpdateDescriptorSetWithTemplate"))
-    {
-        return (void*)dynCheck_entry_vkUpdateDescriptorSetWithTemplate;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceExternalBufferProperties"))
-    {
-        return has1_1OrHigher ? (void*)entry_vkGetPhysicalDeviceExternalBufferProperties : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceExternalFenceProperties"))
-    {
-        return has1_1OrHigher ? (void*)entry_vkGetPhysicalDeviceExternalFenceProperties : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceExternalSemaphoreProperties"))
-    {
-        return has1_1OrHigher ? (void*)entry_vkGetPhysicalDeviceExternalSemaphoreProperties : nullptr;
-    }
-    if (!strcmp(name, "vkGetDescriptorSetLayoutSupport"))
-    {
-        return (void*)dynCheck_entry_vkGetDescriptorSetLayoutSupport;
-    }
-#endif
-#ifdef VK_KHR_surface
-    if (!strcmp(name, "vkDestroySurfaceKHR"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_surface");
-        return hasExt ? (void*)entry_vkDestroySurfaceKHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceSurfaceSupportKHR"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_surface");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceSurfaceSupportKHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceSurfaceCapabilitiesKHR"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_surface");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceSurfaceCapabilitiesKHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceSurfaceFormatsKHR"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_surface");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceSurfaceFormatsKHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceSurfacePresentModesKHR"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_surface");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceSurfacePresentModesKHR : nullptr;
-    }
-#endif
-#ifdef VK_KHR_swapchain
-    if (!strcmp(name, "vkCreateSwapchainKHR"))
-    {
-        return (void*)dynCheck_entry_vkCreateSwapchainKHR;
-    }
-    if (!strcmp(name, "vkDestroySwapchainKHR"))
-    {
-        return (void*)dynCheck_entry_vkDestroySwapchainKHR;
-    }
-    if (!strcmp(name, "vkGetSwapchainImagesKHR"))
-    {
-        return (void*)dynCheck_entry_vkGetSwapchainImagesKHR;
-    }
-    if (!strcmp(name, "vkAcquireNextImageKHR"))
-    {
-        return (void*)dynCheck_entry_vkAcquireNextImageKHR;
-    }
-    if (!strcmp(name, "vkQueuePresentKHR"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_swapchain");
-        return hasExt ? (void*)entry_vkQueuePresentKHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetDeviceGroupPresentCapabilitiesKHR"))
-    {
-        return (void*)dynCheck_entry_vkGetDeviceGroupPresentCapabilitiesKHR;
-    }
-    if (!strcmp(name, "vkGetDeviceGroupSurfacePresentModesKHR"))
-    {
-        return (void*)dynCheck_entry_vkGetDeviceGroupSurfacePresentModesKHR;
-    }
-    if (!strcmp(name, "vkGetPhysicalDevicePresentRectanglesKHR"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_swapchain");
-        return hasExt ? (void*)entry_vkGetPhysicalDevicePresentRectanglesKHR : nullptr;
-    }
-    if (!strcmp(name, "vkAcquireNextImage2KHR"))
-    {
-        return (void*)dynCheck_entry_vkAcquireNextImage2KHR;
-    }
-#endif
-#ifdef VK_KHR_display
-    if (!strcmp(name, "vkGetPhysicalDeviceDisplayPropertiesKHR"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_display");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceDisplayPropertiesKHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceDisplayPlanePropertiesKHR"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_display");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceDisplayPlanePropertiesKHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetDisplayPlaneSupportedDisplaysKHR"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_display");
-        return hasExt ? (void*)entry_vkGetDisplayPlaneSupportedDisplaysKHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetDisplayModePropertiesKHR"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_display");
-        return hasExt ? (void*)entry_vkGetDisplayModePropertiesKHR : nullptr;
-    }
-    if (!strcmp(name, "vkCreateDisplayModeKHR"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_display");
-        return hasExt ? (void*)entry_vkCreateDisplayModeKHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetDisplayPlaneCapabilitiesKHR"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_display");
-        return hasExt ? (void*)entry_vkGetDisplayPlaneCapabilitiesKHR : nullptr;
-    }
-    if (!strcmp(name, "vkCreateDisplayPlaneSurfaceKHR"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_display");
-        return hasExt ? (void*)entry_vkCreateDisplayPlaneSurfaceKHR : nullptr;
-    }
-#endif
-#ifdef VK_KHR_display_swapchain
-    if (!strcmp(name, "vkCreateSharedSwapchainsKHR"))
-    {
-        return (void*)dynCheck_entry_vkCreateSharedSwapchainsKHR;
-    }
-#endif
-#ifdef VK_KHR_xlib_surface
-    if (!strcmp(name, "vkCreateXlibSurfaceKHR"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_xlib_surface");
-        return hasExt ? (void*)entry_vkCreateXlibSurfaceKHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceXlibPresentationSupportKHR"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_xlib_surface");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceXlibPresentationSupportKHR : nullptr;
-    }
-#endif
-#ifdef VK_KHR_xcb_surface
-    if (!strcmp(name, "vkCreateXcbSurfaceKHR"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_xcb_surface");
-        return hasExt ? (void*)entry_vkCreateXcbSurfaceKHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceXcbPresentationSupportKHR"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_xcb_surface");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceXcbPresentationSupportKHR : nullptr;
-    }
-#endif
-#ifdef VK_KHR_wayland_surface
-    if (!strcmp(name, "vkCreateWaylandSurfaceKHR"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_wayland_surface");
-        return hasExt ? (void*)entry_vkCreateWaylandSurfaceKHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceWaylandPresentationSupportKHR"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_wayland_surface");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceWaylandPresentationSupportKHR : nullptr;
-    }
-#endif
-#ifdef VK_KHR_mir_surface
-    if (!strcmp(name, "vkCreateMirSurfaceKHR"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_mir_surface");
-        return hasExt ? (void*)entry_vkCreateMirSurfaceKHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceMirPresentationSupportKHR"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_mir_surface");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceMirPresentationSupportKHR : nullptr;
-    }
-#endif
-#ifdef VK_KHR_android_surface
-    if (!strcmp(name, "vkCreateAndroidSurfaceKHR"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_android_surface");
-        return hasExt ? (void*)entry_vkCreateAndroidSurfaceKHR : nullptr;
-    }
-#endif
-#ifdef VK_KHR_win32_surface
-    if (!strcmp(name, "vkCreateWin32SurfaceKHR"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_win32_surface");
-        return hasExt ? (void*)entry_vkCreateWin32SurfaceKHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceWin32PresentationSupportKHR"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_win32_surface");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceWin32PresentationSupportKHR : nullptr;
-    }
-#endif
-#ifdef VK_KHR_get_physical_device_properties2
-    if (!strcmp(name, "vkGetPhysicalDeviceFeatures2KHR"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_get_physical_device_properties2");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceFeatures2KHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceProperties2KHR"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_get_physical_device_properties2");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceProperties2KHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceFormatProperties2KHR"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_get_physical_device_properties2");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceFormatProperties2KHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceImageFormatProperties2KHR"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_get_physical_device_properties2");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceImageFormatProperties2KHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceQueueFamilyProperties2KHR"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_get_physical_device_properties2");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceQueueFamilyProperties2KHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceMemoryProperties2KHR"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_get_physical_device_properties2");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceMemoryProperties2KHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceSparseImageFormatProperties2KHR"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_get_physical_device_properties2");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceSparseImageFormatProperties2KHR : nullptr;
-    }
-#endif
-#ifdef VK_KHR_device_group
-    if (!strcmp(name, "vkGetDeviceGroupPeerMemoryFeaturesKHR"))
-    {
-        return (void*)dynCheck_entry_vkGetDeviceGroupPeerMemoryFeaturesKHR;
-    }
-    if (!strcmp(name, "vkCmdSetDeviceMaskKHR"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_device_group");
-        return hasExt ? (void*)entry_vkCmdSetDeviceMaskKHR : nullptr;
-    }
-    if (!strcmp(name, "vkCmdDispatchBaseKHR"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_device_group");
-        return hasExt ? (void*)entry_vkCmdDispatchBaseKHR : nullptr;
-    }
-#endif
-#ifdef VK_KHR_maintenance1
-    if (!strcmp(name, "vkTrimCommandPoolKHR"))
-    {
-        return (void*)dynCheck_entry_vkTrimCommandPoolKHR;
-    }
-#endif
-#ifdef VK_KHR_device_group_creation
-    if (!strcmp(name, "vkEnumeratePhysicalDeviceGroupsKHR"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_device_group_creation");
-        return hasExt ? (void*)entry_vkEnumeratePhysicalDeviceGroupsKHR : nullptr;
-    }
-#endif
-#ifdef VK_KHR_external_memory_capabilities
-    if (!strcmp(name, "vkGetPhysicalDeviceExternalBufferPropertiesKHR"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_external_memory_capabilities");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceExternalBufferPropertiesKHR : nullptr;
-    }
-#endif
-#ifdef VK_KHR_external_memory_win32
-    if (!strcmp(name, "vkGetMemoryWin32HandleKHR"))
-    {
-        return (void*)dynCheck_entry_vkGetMemoryWin32HandleKHR;
-    }
-    if (!strcmp(name, "vkGetMemoryWin32HandlePropertiesKHR"))
-    {
-        return (void*)dynCheck_entry_vkGetMemoryWin32HandlePropertiesKHR;
-    }
-#endif
-#ifdef VK_KHR_external_memory_fd
-    if (!strcmp(name, "vkGetMemoryFdKHR"))
-    {
-        return (void*)dynCheck_entry_vkGetMemoryFdKHR;
-    }
-    if (!strcmp(name, "vkGetMemoryFdPropertiesKHR"))
-    {
-        return (void*)dynCheck_entry_vkGetMemoryFdPropertiesKHR;
-    }
-#endif
-#ifdef VK_KHR_external_semaphore_capabilities
-    if (!strcmp(name, "vkGetPhysicalDeviceExternalSemaphorePropertiesKHR"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_external_semaphore_capabilities");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR : nullptr;
-    }
-#endif
-#ifdef VK_KHR_external_semaphore_win32
-    if (!strcmp(name, "vkImportSemaphoreWin32HandleKHR"))
-    {
-        return (void*)dynCheck_entry_vkImportSemaphoreWin32HandleKHR;
-    }
-    if (!strcmp(name, "vkGetSemaphoreWin32HandleKHR"))
-    {
-        return (void*)dynCheck_entry_vkGetSemaphoreWin32HandleKHR;
-    }
-#endif
-#ifdef VK_KHR_external_semaphore_fd
-    if (!strcmp(name, "vkImportSemaphoreFdKHR"))
-    {
-        return (void*)dynCheck_entry_vkImportSemaphoreFdKHR;
-    }
-    if (!strcmp(name, "vkGetSemaphoreFdKHR"))
-    {
-        return (void*)dynCheck_entry_vkGetSemaphoreFdKHR;
-    }
-#endif
-#ifdef VK_KHR_push_descriptor
-    if (!strcmp(name, "vkCmdPushDescriptorSetKHR"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_push_descriptor");
-        return hasExt ? (void*)entry_vkCmdPushDescriptorSetKHR : nullptr;
-    }
-    if (!strcmp(name, "vkCmdPushDescriptorSetWithTemplateKHR"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_push_descriptor");
-        return hasExt ? (void*)entry_vkCmdPushDescriptorSetWithTemplateKHR : nullptr;
-    }
-#endif
-#ifdef VK_KHR_descriptor_update_template
-    if (!strcmp(name, "vkCreateDescriptorUpdateTemplateKHR"))
-    {
-        return (void*)dynCheck_entry_vkCreateDescriptorUpdateTemplateKHR;
-    }
-    if (!strcmp(name, "vkDestroyDescriptorUpdateTemplateKHR"))
-    {
-        return (void*)dynCheck_entry_vkDestroyDescriptorUpdateTemplateKHR;
-    }
-    if (!strcmp(name, "vkUpdateDescriptorSetWithTemplateKHR"))
-    {
-        return (void*)dynCheck_entry_vkUpdateDescriptorSetWithTemplateKHR;
-    }
-#endif
-#ifdef VK_KHR_create_renderpass2
-    if (!strcmp(name, "vkCreateRenderPass2KHR"))
-    {
-        return (void*)dynCheck_entry_vkCreateRenderPass2KHR;
-    }
-    if (!strcmp(name, "vkCmdBeginRenderPass2KHR"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_create_renderpass2");
-        return hasExt ? (void*)entry_vkCmdBeginRenderPass2KHR : nullptr;
-    }
-    if (!strcmp(name, "vkCmdNextSubpass2KHR"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_create_renderpass2");
-        return hasExt ? (void*)entry_vkCmdNextSubpass2KHR : nullptr;
-    }
-    if (!strcmp(name, "vkCmdEndRenderPass2KHR"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_create_renderpass2");
-        return hasExt ? (void*)entry_vkCmdEndRenderPass2KHR : nullptr;
-    }
-#endif
-#ifdef VK_KHR_shared_presentable_image
-    if (!strcmp(name, "vkGetSwapchainStatusKHR"))
-    {
-        return (void*)dynCheck_entry_vkGetSwapchainStatusKHR;
-    }
-#endif
-#ifdef VK_KHR_external_fence_capabilities
-    if (!strcmp(name, "vkGetPhysicalDeviceExternalFencePropertiesKHR"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_external_fence_capabilities");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceExternalFencePropertiesKHR : nullptr;
-    }
-#endif
-#ifdef VK_KHR_external_fence_win32
-    if (!strcmp(name, "vkImportFenceWin32HandleKHR"))
-    {
-        return (void*)dynCheck_entry_vkImportFenceWin32HandleKHR;
-    }
-    if (!strcmp(name, "vkGetFenceWin32HandleKHR"))
-    {
-        return (void*)dynCheck_entry_vkGetFenceWin32HandleKHR;
-    }
-#endif
-#ifdef VK_KHR_external_fence_fd
-    if (!strcmp(name, "vkImportFenceFdKHR"))
-    {
-        return (void*)dynCheck_entry_vkImportFenceFdKHR;
-    }
-    if (!strcmp(name, "vkGetFenceFdKHR"))
-    {
-        return (void*)dynCheck_entry_vkGetFenceFdKHR;
-    }
-#endif
-#ifdef VK_KHR_get_surface_capabilities2
-    if (!strcmp(name, "vkGetPhysicalDeviceSurfaceCapabilities2KHR"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_get_surface_capabilities2");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceSurfaceCapabilities2KHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceSurfaceFormats2KHR"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_get_surface_capabilities2");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceSurfaceFormats2KHR : nullptr;
-    }
-#endif
-#ifdef VK_KHR_get_display_properties2
-    if (!strcmp(name, "vkGetPhysicalDeviceDisplayProperties2KHR"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_get_display_properties2");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceDisplayProperties2KHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceDisplayPlaneProperties2KHR"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_get_display_properties2");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceDisplayPlaneProperties2KHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetDisplayModeProperties2KHR"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_get_display_properties2");
-        return hasExt ? (void*)entry_vkGetDisplayModeProperties2KHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetDisplayPlaneCapabilities2KHR"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_get_display_properties2");
-        return hasExt ? (void*)entry_vkGetDisplayPlaneCapabilities2KHR : nullptr;
-    }
-#endif
-#ifdef VK_KHR_get_memory_requirements2
-    if (!strcmp(name, "vkGetImageMemoryRequirements2KHR"))
-    {
-        return (void*)dynCheck_entry_vkGetImageMemoryRequirements2KHR;
-    }
-    if (!strcmp(name, "vkGetBufferMemoryRequirements2KHR"))
-    {
-        return (void*)dynCheck_entry_vkGetBufferMemoryRequirements2KHR;
-    }
-    if (!strcmp(name, "vkGetImageSparseMemoryRequirements2KHR"))
-    {
-        return (void*)dynCheck_entry_vkGetImageSparseMemoryRequirements2KHR;
-    }
-#endif
-#ifdef VK_KHR_sampler_ycbcr_conversion
-    if (!strcmp(name, "vkCreateSamplerYcbcrConversionKHR"))
-    {
-        return (void*)dynCheck_entry_vkCreateSamplerYcbcrConversionKHR;
-    }
-    if (!strcmp(name, "vkDestroySamplerYcbcrConversionKHR"))
-    {
-        return (void*)dynCheck_entry_vkDestroySamplerYcbcrConversionKHR;
-    }
-#endif
-#ifdef VK_KHR_bind_memory2
-    if (!strcmp(name, "vkBindBufferMemory2KHR"))
-    {
-        return (void*)dynCheck_entry_vkBindBufferMemory2KHR;
-    }
-    if (!strcmp(name, "vkBindImageMemory2KHR"))
-    {
-        return (void*)dynCheck_entry_vkBindImageMemory2KHR;
-    }
-#endif
-#ifdef VK_KHR_maintenance3
-    if (!strcmp(name, "vkGetDescriptorSetLayoutSupportKHR"))
-    {
-        return (void*)dynCheck_entry_vkGetDescriptorSetLayoutSupportKHR;
-    }
-#endif
-#ifdef VK_KHR_draw_indirect_count
-    if (!strcmp(name, "vkCmdDrawIndirectCountKHR"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_draw_indirect_count");
-        return hasExt ? (void*)entry_vkCmdDrawIndirectCountKHR : nullptr;
-    }
-    if (!strcmp(name, "vkCmdDrawIndexedIndirectCountKHR"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_draw_indirect_count");
-        return hasExt ? (void*)entry_vkCmdDrawIndexedIndirectCountKHR : nullptr;
-    }
-#endif
-#ifdef VK_ANDROID_native_buffer
-    if (!strcmp(name, "vkGetSwapchainGrallocUsageANDROID"))
-    {
-        return (void*)dynCheck_entry_vkGetSwapchainGrallocUsageANDROID;
-    }
-    if (!strcmp(name, "vkAcquireImageANDROID"))
-    {
-        return (void*)dynCheck_entry_vkAcquireImageANDROID;
-    }
-    if (!strcmp(name, "vkQueueSignalReleaseImageANDROID"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_ANDROID_native_buffer");
-        return hasExt ? (void*)entry_vkQueueSignalReleaseImageANDROID : nullptr;
-    }
-#endif
-#ifdef VK_EXT_debug_report
-    if (!strcmp(name, "vkCreateDebugReportCallbackEXT"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_debug_report");
-        return hasExt ? (void*)entry_vkCreateDebugReportCallbackEXT : nullptr;
-    }
-    if (!strcmp(name, "vkDestroyDebugReportCallbackEXT"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_debug_report");
-        return hasExt ? (void*)entry_vkDestroyDebugReportCallbackEXT : nullptr;
-    }
-    if (!strcmp(name, "vkDebugReportMessageEXT"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_debug_report");
-        return hasExt ? (void*)entry_vkDebugReportMessageEXT : nullptr;
-    }
-#endif
-#ifdef VK_EXT_debug_marker
-    if (!strcmp(name, "vkDebugMarkerSetObjectTagEXT"))
-    {
-        return (void*)dynCheck_entry_vkDebugMarkerSetObjectTagEXT;
-    }
-    if (!strcmp(name, "vkDebugMarkerSetObjectNameEXT"))
-    {
-        return (void*)dynCheck_entry_vkDebugMarkerSetObjectNameEXT;
-    }
-    if (!strcmp(name, "vkCmdDebugMarkerBeginEXT"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_debug_marker");
-        return hasExt ? (void*)entry_vkCmdDebugMarkerBeginEXT : nullptr;
-    }
-    if (!strcmp(name, "vkCmdDebugMarkerEndEXT"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_debug_marker");
-        return hasExt ? (void*)entry_vkCmdDebugMarkerEndEXT : nullptr;
-    }
-    if (!strcmp(name, "vkCmdDebugMarkerInsertEXT"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_debug_marker");
-        return hasExt ? (void*)entry_vkCmdDebugMarkerInsertEXT : nullptr;
-    }
-#endif
-#ifdef VK_AMD_draw_indirect_count
-    if (!strcmp(name, "vkCmdDrawIndirectCountAMD"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_AMD_draw_indirect_count");
-        return hasExt ? (void*)entry_vkCmdDrawIndirectCountAMD : nullptr;
-    }
-    if (!strcmp(name, "vkCmdDrawIndexedIndirectCountAMD"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_AMD_draw_indirect_count");
-        return hasExt ? (void*)entry_vkCmdDrawIndexedIndirectCountAMD : nullptr;
-    }
-#endif
-#ifdef VK_AMD_shader_info
-    if (!strcmp(name, "vkGetShaderInfoAMD"))
-    {
-        return (void*)dynCheck_entry_vkGetShaderInfoAMD;
-    }
-#endif
-#ifdef VK_NV_external_memory_capabilities
-    if (!strcmp(name, "vkGetPhysicalDeviceExternalImageFormatPropertiesNV"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_NV_external_memory_capabilities");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceExternalImageFormatPropertiesNV : nullptr;
-    }
-#endif
-#ifdef VK_NV_external_memory_win32
-    if (!strcmp(name, "vkGetMemoryWin32HandleNV"))
-    {
-        return (void*)dynCheck_entry_vkGetMemoryWin32HandleNV;
-    }
-#endif
-#ifdef VK_NN_vi_surface
-    if (!strcmp(name, "vkCreateViSurfaceNN"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_NN_vi_surface");
-        return hasExt ? (void*)entry_vkCreateViSurfaceNN : nullptr;
-    }
-#endif
-#ifdef VK_EXT_conditional_rendering
-    if (!strcmp(name, "vkCmdBeginConditionalRenderingEXT"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_conditional_rendering");
-        return hasExt ? (void*)entry_vkCmdBeginConditionalRenderingEXT : nullptr;
-    }
-    if (!strcmp(name, "vkCmdEndConditionalRenderingEXT"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_conditional_rendering");
-        return hasExt ? (void*)entry_vkCmdEndConditionalRenderingEXT : nullptr;
-    }
-#endif
-#ifdef VK_NVX_device_generated_commands
-    if (!strcmp(name, "vkCmdProcessCommandsNVX"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_NVX_device_generated_commands");
-        return hasExt ? (void*)entry_vkCmdProcessCommandsNVX : nullptr;
-    }
-    if (!strcmp(name, "vkCmdReserveSpaceForCommandsNVX"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_NVX_device_generated_commands");
-        return hasExt ? (void*)entry_vkCmdReserveSpaceForCommandsNVX : nullptr;
-    }
-    if (!strcmp(name, "vkCreateIndirectCommandsLayoutNVX"))
-    {
-        return (void*)dynCheck_entry_vkCreateIndirectCommandsLayoutNVX;
-    }
-    if (!strcmp(name, "vkDestroyIndirectCommandsLayoutNVX"))
-    {
-        return (void*)dynCheck_entry_vkDestroyIndirectCommandsLayoutNVX;
-    }
-    if (!strcmp(name, "vkCreateObjectTableNVX"))
-    {
-        return (void*)dynCheck_entry_vkCreateObjectTableNVX;
-    }
-    if (!strcmp(name, "vkDestroyObjectTableNVX"))
-    {
-        return (void*)dynCheck_entry_vkDestroyObjectTableNVX;
-    }
-    if (!strcmp(name, "vkRegisterObjectsNVX"))
-    {
-        return (void*)dynCheck_entry_vkRegisterObjectsNVX;
-    }
-    if (!strcmp(name, "vkUnregisterObjectsNVX"))
-    {
-        return (void*)dynCheck_entry_vkUnregisterObjectsNVX;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_NVX_device_generated_commands");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX : nullptr;
-    }
-#endif
-#ifdef VK_NV_clip_space_w_scaling
-    if (!strcmp(name, "vkCmdSetViewportWScalingNV"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_NV_clip_space_w_scaling");
-        return hasExt ? (void*)entry_vkCmdSetViewportWScalingNV : nullptr;
-    }
-#endif
-#ifdef VK_EXT_direct_mode_display
-    if (!strcmp(name, "vkReleaseDisplayEXT"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_direct_mode_display");
-        return hasExt ? (void*)entry_vkReleaseDisplayEXT : nullptr;
-    }
-#endif
-#ifdef VK_EXT_acquire_xlib_display
-    if (!strcmp(name, "vkAcquireXlibDisplayEXT"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_acquire_xlib_display");
-        return hasExt ? (void*)entry_vkAcquireXlibDisplayEXT : nullptr;
-    }
-    if (!strcmp(name, "vkGetRandROutputDisplayEXT"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_acquire_xlib_display");
-        return hasExt ? (void*)entry_vkGetRandROutputDisplayEXT : nullptr;
-    }
-#endif
-#ifdef VK_EXT_display_surface_counter
-    if (!strcmp(name, "vkGetPhysicalDeviceSurfaceCapabilities2EXT"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_display_surface_counter");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceSurfaceCapabilities2EXT : nullptr;
-    }
-#endif
-#ifdef VK_EXT_display_control
-    if (!strcmp(name, "vkDisplayPowerControlEXT"))
-    {
-        return (void*)dynCheck_entry_vkDisplayPowerControlEXT;
-    }
-    if (!strcmp(name, "vkRegisterDeviceEventEXT"))
-    {
-        return (void*)dynCheck_entry_vkRegisterDeviceEventEXT;
-    }
-    if (!strcmp(name, "vkRegisterDisplayEventEXT"))
-    {
-        return (void*)dynCheck_entry_vkRegisterDisplayEventEXT;
-    }
-    if (!strcmp(name, "vkGetSwapchainCounterEXT"))
-    {
-        return (void*)dynCheck_entry_vkGetSwapchainCounterEXT;
-    }
-#endif
-#ifdef VK_GOOGLE_display_timing
-    if (!strcmp(name, "vkGetRefreshCycleDurationGOOGLE"))
-    {
-        return (void*)dynCheck_entry_vkGetRefreshCycleDurationGOOGLE;
-    }
-    if (!strcmp(name, "vkGetPastPresentationTimingGOOGLE"))
-    {
-        return (void*)dynCheck_entry_vkGetPastPresentationTimingGOOGLE;
-    }
-#endif
-#ifdef VK_EXT_discard_rectangles
-    if (!strcmp(name, "vkCmdSetDiscardRectangleEXT"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_discard_rectangles");
-        return hasExt ? (void*)entry_vkCmdSetDiscardRectangleEXT : nullptr;
-    }
-#endif
-#ifdef VK_EXT_hdr_metadata
-    if (!strcmp(name, "vkSetHdrMetadataEXT"))
-    {
-        return (void*)dynCheck_entry_vkSetHdrMetadataEXT;
-    }
-#endif
-#ifdef VK_MVK_ios_surface
-    if (!strcmp(name, "vkCreateIOSSurfaceMVK"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_MVK_ios_surface");
-        return hasExt ? (void*)entry_vkCreateIOSSurfaceMVK : nullptr;
-    }
-#endif
-#ifdef VK_MVK_macos_surface
-    if (!strcmp(name, "vkCreateMacOSSurfaceMVK"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_MVK_macos_surface");
-        return hasExt ? (void*)entry_vkCreateMacOSSurfaceMVK : nullptr;
-    }
-#endif
-#ifdef VK_EXT_debug_utils
-    if (!strcmp(name, "vkSetDebugUtilsObjectNameEXT"))
-    {
-        return (void*)dynCheck_entry_vkSetDebugUtilsObjectNameEXT;
-    }
-    if (!strcmp(name, "vkSetDebugUtilsObjectTagEXT"))
-    {
-        return (void*)dynCheck_entry_vkSetDebugUtilsObjectTagEXT;
-    }
-    if (!strcmp(name, "vkQueueBeginDebugUtilsLabelEXT"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_debug_utils");
-        return hasExt ? (void*)entry_vkQueueBeginDebugUtilsLabelEXT : nullptr;
-    }
-    if (!strcmp(name, "vkQueueEndDebugUtilsLabelEXT"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_debug_utils");
-        return hasExt ? (void*)entry_vkQueueEndDebugUtilsLabelEXT : nullptr;
-    }
-    if (!strcmp(name, "vkQueueInsertDebugUtilsLabelEXT"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_debug_utils");
-        return hasExt ? (void*)entry_vkQueueInsertDebugUtilsLabelEXT : nullptr;
-    }
-    if (!strcmp(name, "vkCmdBeginDebugUtilsLabelEXT"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_debug_utils");
-        return hasExt ? (void*)entry_vkCmdBeginDebugUtilsLabelEXT : nullptr;
-    }
-    if (!strcmp(name, "vkCmdEndDebugUtilsLabelEXT"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_debug_utils");
-        return hasExt ? (void*)entry_vkCmdEndDebugUtilsLabelEXT : nullptr;
-    }
-    if (!strcmp(name, "vkCmdInsertDebugUtilsLabelEXT"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_debug_utils");
-        return hasExt ? (void*)entry_vkCmdInsertDebugUtilsLabelEXT : nullptr;
-    }
-    if (!strcmp(name, "vkCreateDebugUtilsMessengerEXT"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_debug_utils");
-        return hasExt ? (void*)entry_vkCreateDebugUtilsMessengerEXT : nullptr;
-    }
-    if (!strcmp(name, "vkDestroyDebugUtilsMessengerEXT"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_debug_utils");
-        return hasExt ? (void*)entry_vkDestroyDebugUtilsMessengerEXT : nullptr;
-    }
-    if (!strcmp(name, "vkSubmitDebugUtilsMessageEXT"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_debug_utils");
-        return hasExt ? (void*)entry_vkSubmitDebugUtilsMessageEXT : nullptr;
-    }
-#endif
-#ifdef VK_ANDROID_external_memory_android_hardware_buffer
-    if (!strcmp(name, "vkGetAndroidHardwareBufferPropertiesANDROID"))
-    {
-        return (void*)dynCheck_entry_vkGetAndroidHardwareBufferPropertiesANDROID;
-    }
-    if (!strcmp(name, "vkGetMemoryAndroidHardwareBufferANDROID"))
-    {
-        return (void*)dynCheck_entry_vkGetMemoryAndroidHardwareBufferANDROID;
-    }
-#endif
-#ifdef VK_EXT_sample_locations
-    if (!strcmp(name, "vkCmdSetSampleLocationsEXT"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_sample_locations");
-        return hasExt ? (void*)entry_vkCmdSetSampleLocationsEXT : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceMultisamplePropertiesEXT"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_sample_locations");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceMultisamplePropertiesEXT : nullptr;
-    }
-#endif
-#ifdef VK_EXT_validation_cache
-    if (!strcmp(name, "vkCreateValidationCacheEXT"))
-    {
-        return (void*)dynCheck_entry_vkCreateValidationCacheEXT;
-    }
-    if (!strcmp(name, "vkDestroyValidationCacheEXT"))
-    {
-        return (void*)dynCheck_entry_vkDestroyValidationCacheEXT;
-    }
-    if (!strcmp(name, "vkMergeValidationCachesEXT"))
-    {
-        return (void*)dynCheck_entry_vkMergeValidationCachesEXT;
-    }
-    if (!strcmp(name, "vkGetValidationCacheDataEXT"))
-    {
-        return (void*)dynCheck_entry_vkGetValidationCacheDataEXT;
-    }
-#endif
-#ifdef VK_EXT_external_memory_host
-    if (!strcmp(name, "vkGetMemoryHostPointerPropertiesEXT"))
-    {
-        return (void*)dynCheck_entry_vkGetMemoryHostPointerPropertiesEXT;
-    }
-#endif
-#ifdef VK_AMD_buffer_marker
-    if (!strcmp(name, "vkCmdWriteBufferMarkerAMD"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_AMD_buffer_marker");
-        return hasExt ? (void*)entry_vkCmdWriteBufferMarkerAMD : nullptr;
-    }
-#endif
-#ifdef VK_NV_device_diagnostic_checkpoints
-    if (!strcmp(name, "vkCmdSetCheckpointNV"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_NV_device_diagnostic_checkpoints");
-        return hasExt ? (void*)entry_vkCmdSetCheckpointNV : nullptr;
-    }
-    if (!strcmp(name, "vkGetQueueCheckpointDataNV"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_NV_device_diagnostic_checkpoints");
-        return hasExt ? (void*)entry_vkGetQueueCheckpointDataNV : nullptr;
-    }
-#endif
-#ifdef VK_GOOGLE_address_space
-    if (!strcmp(name, "vkMapMemoryIntoAddressSpaceGOOGLE"))
-    {
-        return (void*)dynCheck_entry_vkMapMemoryIntoAddressSpaceGOOGLE;
-    }
-#endif
-#ifdef VK_GOOGLE_color_buffer
-    if (!strcmp(name, "vkRegisterImageColorBufferGOOGLE"))
-    {
-        return (void*)dynCheck_entry_vkRegisterImageColorBufferGOOGLE;
-    }
-    if (!strcmp(name, "vkRegisterBufferColorBufferGOOGLE"))
-    {
-        return (void*)dynCheck_entry_vkRegisterBufferColorBufferGOOGLE;
-    }
-#endif
-#ifdef VK_GOOGLE_sized_descriptor_update_template
-    if (!strcmp(name, "vkUpdateDescriptorSetWithTemplateSizedGOOGLE"))
-    {
-        return (void*)dynCheck_entry_vkUpdateDescriptorSetWithTemplateSizedGOOGLE;
-    }
-#endif
-#ifdef VK_GOOGLE_async_command_buffers
-    if (!strcmp(name, "vkBeginCommandBufferAsyncGOOGLE"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_GOOGLE_async_command_buffers");
-        return hasExt ? (void*)entry_vkBeginCommandBufferAsyncGOOGLE : nullptr;
-    }
-    if (!strcmp(name, "vkEndCommandBufferAsyncGOOGLE"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_GOOGLE_async_command_buffers");
-        return hasExt ? (void*)entry_vkEndCommandBufferAsyncGOOGLE : nullptr;
-    }
-    if (!strcmp(name, "vkResetCommandBufferAsyncGOOGLE"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_GOOGLE_async_command_buffers");
-        return hasExt ? (void*)entry_vkResetCommandBufferAsyncGOOGLE : nullptr;
-    }
-    if (!strcmp(name, "vkCommandBufferHostSyncGOOGLE"))
-    {
-        bool hasExt = resources->hasInstanceExtension(instance, "VK_GOOGLE_async_command_buffers");
-        return hasExt ? (void*)entry_vkCommandBufferHostSyncGOOGLE : nullptr;
-    }
-#endif
-#ifdef VK_GOOGLE_create_resources_with_requirements
-    if (!strcmp(name, "vkCreateImageWithRequirementsGOOGLE"))
-    {
-        return (void*)dynCheck_entry_vkCreateImageWithRequirementsGOOGLE;
-    }
-    if (!strcmp(name, "vkCreateBufferWithRequirementsGOOGLE"))
-    {
-        return (void*)dynCheck_entry_vkCreateBufferWithRequirementsGOOGLE;
-    }
-#endif
-#ifdef VK_GOOGLE_address_space_info
-    if (!strcmp(name, "vkGetMemoryHostAddressInfoGOOGLE"))
-    {
-        return (void*)dynCheck_entry_vkGetMemoryHostAddressInfoGOOGLE;
-    }
-#endif
-#ifdef VK_GOOGLE_free_memory_sync
-    if (!strcmp(name, "vkFreeMemorySyncGOOGLE"))
-    {
-        return (void*)dynCheck_entry_vkFreeMemorySyncGOOGLE;
-    }
-#endif
-    return nullptr;
-}
-void* goldfish_vulkan_get_device_proc_address(VkDevice device, const char* name){
-    auto resources = ResourceTracker::get();
-    bool has1_1OrHigher = resources->getApiVersionFromDevice(device) >= VK_API_VERSION_1_1;
-#ifdef VK_VERSION_1_0
-    if (!strcmp(name, "vkCreateInstance"))
-    {
-        return (void*)entry_vkCreateInstance;
-    }
-    if (!strcmp(name, "vkDestroyInstance"))
-    {
-        return (void*)entry_vkDestroyInstance;
-    }
-    if (!strcmp(name, "vkEnumeratePhysicalDevices"))
-    {
-        return (void*)entry_vkEnumeratePhysicalDevices;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceFeatures"))
-    {
-        return (void*)entry_vkGetPhysicalDeviceFeatures;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceFormatProperties"))
-    {
-        return (void*)entry_vkGetPhysicalDeviceFormatProperties;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceImageFormatProperties"))
-    {
-        return (void*)entry_vkGetPhysicalDeviceImageFormatProperties;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceProperties"))
-    {
-        return (void*)entry_vkGetPhysicalDeviceProperties;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceQueueFamilyProperties"))
-    {
-        return (void*)entry_vkGetPhysicalDeviceQueueFamilyProperties;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceMemoryProperties"))
-    {
-        return (void*)entry_vkGetPhysicalDeviceMemoryProperties;
-    }
-    if (!strcmp(name, "vkGetInstanceProcAddr"))
-    {
-        return (void*)entry_vkGetInstanceProcAddr;
-    }
-    if (!strcmp(name, "vkGetDeviceProcAddr"))
-    {
-        return (void*)entry_vkGetDeviceProcAddr;
-    }
-    if (!strcmp(name, "vkCreateDevice"))
-    {
-        return (void*)entry_vkCreateDevice;
-    }
-    if (!strcmp(name, "vkDestroyDevice"))
-    {
-        return (void*)entry_vkDestroyDevice;
-    }
-    if (!strcmp(name, "vkEnumerateInstanceExtensionProperties"))
-    {
-        return (void*)entry_vkEnumerateInstanceExtensionProperties;
-    }
-    if (!strcmp(name, "vkEnumerateDeviceExtensionProperties"))
-    {
-        return (void*)entry_vkEnumerateDeviceExtensionProperties;
-    }
-    if (!strcmp(name, "vkEnumerateInstanceLayerProperties"))
-    {
-        return (void*)entry_vkEnumerateInstanceLayerProperties;
-    }
-    if (!strcmp(name, "vkEnumerateDeviceLayerProperties"))
-    {
-        return (void*)entry_vkEnumerateDeviceLayerProperties;
-    }
-    if (!strcmp(name, "vkGetDeviceQueue"))
-    {
-        return (void*)entry_vkGetDeviceQueue;
-    }
-    if (!strcmp(name, "vkQueueSubmit"))
-    {
-        return (void*)entry_vkQueueSubmit;
-    }
-    if (!strcmp(name, "vkQueueWaitIdle"))
-    {
-        return (void*)entry_vkQueueWaitIdle;
-    }
-    if (!strcmp(name, "vkDeviceWaitIdle"))
-    {
-        return (void*)entry_vkDeviceWaitIdle;
-    }
-    if (!strcmp(name, "vkAllocateMemory"))
-    {
-        return (void*)entry_vkAllocateMemory;
-    }
-    if (!strcmp(name, "vkFreeMemory"))
-    {
-        return (void*)entry_vkFreeMemory;
-    }
-    if (!strcmp(name, "vkMapMemory"))
-    {
-        return (void*)entry_vkMapMemory;
-    }
-    if (!strcmp(name, "vkUnmapMemory"))
-    {
-        return (void*)entry_vkUnmapMemory;
-    }
-    if (!strcmp(name, "vkFlushMappedMemoryRanges"))
-    {
-        return (void*)entry_vkFlushMappedMemoryRanges;
-    }
-    if (!strcmp(name, "vkInvalidateMappedMemoryRanges"))
-    {
-        return (void*)entry_vkInvalidateMappedMemoryRanges;
-    }
-    if (!strcmp(name, "vkGetDeviceMemoryCommitment"))
-    {
-        return (void*)entry_vkGetDeviceMemoryCommitment;
-    }
-    if (!strcmp(name, "vkBindBufferMemory"))
-    {
-        return (void*)entry_vkBindBufferMemory;
-    }
-    if (!strcmp(name, "vkBindImageMemory"))
-    {
-        return (void*)entry_vkBindImageMemory;
-    }
-    if (!strcmp(name, "vkGetBufferMemoryRequirements"))
-    {
-        return (void*)entry_vkGetBufferMemoryRequirements;
-    }
-    if (!strcmp(name, "vkGetImageMemoryRequirements"))
-    {
-        return (void*)entry_vkGetImageMemoryRequirements;
-    }
-    if (!strcmp(name, "vkGetImageSparseMemoryRequirements"))
-    {
-        return (void*)entry_vkGetImageSparseMemoryRequirements;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceSparseImageFormatProperties"))
-    {
-        return (void*)entry_vkGetPhysicalDeviceSparseImageFormatProperties;
-    }
-    if (!strcmp(name, "vkQueueBindSparse"))
-    {
-        return (void*)entry_vkQueueBindSparse;
-    }
-    if (!strcmp(name, "vkCreateFence"))
-    {
-        return (void*)entry_vkCreateFence;
-    }
-    if (!strcmp(name, "vkDestroyFence"))
-    {
-        return (void*)entry_vkDestroyFence;
-    }
-    if (!strcmp(name, "vkResetFences"))
-    {
-        return (void*)entry_vkResetFences;
-    }
-    if (!strcmp(name, "vkGetFenceStatus"))
-    {
-        return (void*)entry_vkGetFenceStatus;
-    }
-    if (!strcmp(name, "vkWaitForFences"))
-    {
-        return (void*)entry_vkWaitForFences;
-    }
-    if (!strcmp(name, "vkCreateSemaphore"))
-    {
-        return (void*)entry_vkCreateSemaphore;
-    }
-    if (!strcmp(name, "vkDestroySemaphore"))
-    {
-        return (void*)entry_vkDestroySemaphore;
-    }
-    if (!strcmp(name, "vkCreateEvent"))
-    {
-        return (void*)entry_vkCreateEvent;
-    }
-    if (!strcmp(name, "vkDestroyEvent"))
-    {
-        return (void*)entry_vkDestroyEvent;
-    }
-    if (!strcmp(name, "vkGetEventStatus"))
-    {
-        return (void*)entry_vkGetEventStatus;
-    }
-    if (!strcmp(name, "vkSetEvent"))
-    {
-        return (void*)entry_vkSetEvent;
-    }
-    if (!strcmp(name, "vkResetEvent"))
-    {
-        return (void*)entry_vkResetEvent;
-    }
-    if (!strcmp(name, "vkCreateQueryPool"))
-    {
-        return (void*)entry_vkCreateQueryPool;
-    }
-    if (!strcmp(name, "vkDestroyQueryPool"))
-    {
-        return (void*)entry_vkDestroyQueryPool;
-    }
-    if (!strcmp(name, "vkGetQueryPoolResults"))
-    {
-        return (void*)entry_vkGetQueryPoolResults;
-    }
-    if (!strcmp(name, "vkCreateBuffer"))
-    {
-        return (void*)entry_vkCreateBuffer;
-    }
-    if (!strcmp(name, "vkDestroyBuffer"))
-    {
-        return (void*)entry_vkDestroyBuffer;
-    }
-    if (!strcmp(name, "vkCreateBufferView"))
-    {
-        return (void*)entry_vkCreateBufferView;
-    }
-    if (!strcmp(name, "vkDestroyBufferView"))
-    {
-        return (void*)entry_vkDestroyBufferView;
-    }
-    if (!strcmp(name, "vkCreateImage"))
-    {
-        return (void*)entry_vkCreateImage;
-    }
-    if (!strcmp(name, "vkDestroyImage"))
-    {
-        return (void*)entry_vkDestroyImage;
-    }
-    if (!strcmp(name, "vkGetImageSubresourceLayout"))
-    {
-        return (void*)entry_vkGetImageSubresourceLayout;
-    }
-    if (!strcmp(name, "vkCreateImageView"))
-    {
-        return (void*)entry_vkCreateImageView;
-    }
-    if (!strcmp(name, "vkDestroyImageView"))
-    {
-        return (void*)entry_vkDestroyImageView;
-    }
-    if (!strcmp(name, "vkCreateShaderModule"))
-    {
-        return (void*)entry_vkCreateShaderModule;
-    }
-    if (!strcmp(name, "vkDestroyShaderModule"))
-    {
-        return (void*)entry_vkDestroyShaderModule;
-    }
-    if (!strcmp(name, "vkCreatePipelineCache"))
-    {
-        return (void*)entry_vkCreatePipelineCache;
-    }
-    if (!strcmp(name, "vkDestroyPipelineCache"))
-    {
-        return (void*)entry_vkDestroyPipelineCache;
-    }
-    if (!strcmp(name, "vkGetPipelineCacheData"))
-    {
-        return (void*)entry_vkGetPipelineCacheData;
-    }
-    if (!strcmp(name, "vkMergePipelineCaches"))
-    {
-        return (void*)entry_vkMergePipelineCaches;
-    }
-    if (!strcmp(name, "vkCreateGraphicsPipelines"))
-    {
-        return (void*)entry_vkCreateGraphicsPipelines;
-    }
-    if (!strcmp(name, "vkCreateComputePipelines"))
-    {
-        return (void*)entry_vkCreateComputePipelines;
-    }
-    if (!strcmp(name, "vkDestroyPipeline"))
-    {
-        return (void*)entry_vkDestroyPipeline;
-    }
-    if (!strcmp(name, "vkCreatePipelineLayout"))
-    {
-        return (void*)entry_vkCreatePipelineLayout;
-    }
-    if (!strcmp(name, "vkDestroyPipelineLayout"))
-    {
-        return (void*)entry_vkDestroyPipelineLayout;
-    }
-    if (!strcmp(name, "vkCreateSampler"))
-    {
-        return (void*)entry_vkCreateSampler;
-    }
-    if (!strcmp(name, "vkDestroySampler"))
-    {
-        return (void*)entry_vkDestroySampler;
-    }
-    if (!strcmp(name, "vkCreateDescriptorSetLayout"))
-    {
-        return (void*)entry_vkCreateDescriptorSetLayout;
-    }
-    if (!strcmp(name, "vkDestroyDescriptorSetLayout"))
-    {
-        return (void*)entry_vkDestroyDescriptorSetLayout;
-    }
-    if (!strcmp(name, "vkCreateDescriptorPool"))
-    {
-        return (void*)entry_vkCreateDescriptorPool;
-    }
-    if (!strcmp(name, "vkDestroyDescriptorPool"))
-    {
-        return (void*)entry_vkDestroyDescriptorPool;
-    }
-    if (!strcmp(name, "vkResetDescriptorPool"))
-    {
-        return (void*)entry_vkResetDescriptorPool;
-    }
-    if (!strcmp(name, "vkAllocateDescriptorSets"))
-    {
-        return (void*)entry_vkAllocateDescriptorSets;
-    }
-    if (!strcmp(name, "vkFreeDescriptorSets"))
-    {
-        return (void*)entry_vkFreeDescriptorSets;
-    }
-    if (!strcmp(name, "vkUpdateDescriptorSets"))
-    {
-        return (void*)entry_vkUpdateDescriptorSets;
-    }
-    if (!strcmp(name, "vkCreateFramebuffer"))
-    {
-        return (void*)entry_vkCreateFramebuffer;
-    }
-    if (!strcmp(name, "vkDestroyFramebuffer"))
-    {
-        return (void*)entry_vkDestroyFramebuffer;
-    }
-    if (!strcmp(name, "vkCreateRenderPass"))
-    {
-        return (void*)entry_vkCreateRenderPass;
-    }
-    if (!strcmp(name, "vkDestroyRenderPass"))
-    {
-        return (void*)entry_vkDestroyRenderPass;
-    }
-    if (!strcmp(name, "vkGetRenderAreaGranularity"))
-    {
-        return (void*)entry_vkGetRenderAreaGranularity;
-    }
-    if (!strcmp(name, "vkCreateCommandPool"))
-    {
-        return (void*)entry_vkCreateCommandPool;
-    }
-    if (!strcmp(name, "vkDestroyCommandPool"))
-    {
-        return (void*)entry_vkDestroyCommandPool;
-    }
-    if (!strcmp(name, "vkResetCommandPool"))
-    {
-        return (void*)entry_vkResetCommandPool;
-    }
-    if (!strcmp(name, "vkAllocateCommandBuffers"))
-    {
-        return (void*)entry_vkAllocateCommandBuffers;
-    }
-    if (!strcmp(name, "vkFreeCommandBuffers"))
-    {
-        return (void*)entry_vkFreeCommandBuffers;
-    }
-    if (!strcmp(name, "vkBeginCommandBuffer"))
-    {
-        return (void*)entry_vkBeginCommandBuffer;
-    }
-    if (!strcmp(name, "vkEndCommandBuffer"))
-    {
-        return (void*)entry_vkEndCommandBuffer;
-    }
-    if (!strcmp(name, "vkResetCommandBuffer"))
-    {
-        return (void*)entry_vkResetCommandBuffer;
-    }
-    if (!strcmp(name, "vkCmdBindPipeline"))
-    {
-        return (void*)entry_vkCmdBindPipeline;
-    }
-    if (!strcmp(name, "vkCmdSetViewport"))
-    {
-        return (void*)entry_vkCmdSetViewport;
-    }
-    if (!strcmp(name, "vkCmdSetScissor"))
-    {
-        return (void*)entry_vkCmdSetScissor;
-    }
-    if (!strcmp(name, "vkCmdSetLineWidth"))
-    {
-        return (void*)entry_vkCmdSetLineWidth;
-    }
-    if (!strcmp(name, "vkCmdSetDepthBias"))
-    {
-        return (void*)entry_vkCmdSetDepthBias;
-    }
-    if (!strcmp(name, "vkCmdSetBlendConstants"))
-    {
-        return (void*)entry_vkCmdSetBlendConstants;
-    }
-    if (!strcmp(name, "vkCmdSetDepthBounds"))
-    {
-        return (void*)entry_vkCmdSetDepthBounds;
-    }
-    if (!strcmp(name, "vkCmdSetStencilCompareMask"))
-    {
-        return (void*)entry_vkCmdSetStencilCompareMask;
-    }
-    if (!strcmp(name, "vkCmdSetStencilWriteMask"))
-    {
-        return (void*)entry_vkCmdSetStencilWriteMask;
-    }
-    if (!strcmp(name, "vkCmdSetStencilReference"))
-    {
-        return (void*)entry_vkCmdSetStencilReference;
-    }
-    if (!strcmp(name, "vkCmdBindDescriptorSets"))
-    {
-        return (void*)entry_vkCmdBindDescriptorSets;
-    }
-    if (!strcmp(name, "vkCmdBindIndexBuffer"))
-    {
-        return (void*)entry_vkCmdBindIndexBuffer;
-    }
-    if (!strcmp(name, "vkCmdBindVertexBuffers"))
-    {
-        return (void*)entry_vkCmdBindVertexBuffers;
-    }
-    if (!strcmp(name, "vkCmdDraw"))
-    {
-        return (void*)entry_vkCmdDraw;
-    }
-    if (!strcmp(name, "vkCmdDrawIndexed"))
-    {
-        return (void*)entry_vkCmdDrawIndexed;
-    }
-    if (!strcmp(name, "vkCmdDrawIndirect"))
-    {
-        return (void*)entry_vkCmdDrawIndirect;
-    }
-    if (!strcmp(name, "vkCmdDrawIndexedIndirect"))
-    {
-        return (void*)entry_vkCmdDrawIndexedIndirect;
-    }
-    if (!strcmp(name, "vkCmdDispatch"))
-    {
-        return (void*)entry_vkCmdDispatch;
-    }
-    if (!strcmp(name, "vkCmdDispatchIndirect"))
-    {
-        return (void*)entry_vkCmdDispatchIndirect;
-    }
-    if (!strcmp(name, "vkCmdCopyBuffer"))
-    {
-        return (void*)entry_vkCmdCopyBuffer;
-    }
-    if (!strcmp(name, "vkCmdCopyImage"))
-    {
-        return (void*)entry_vkCmdCopyImage;
-    }
-    if (!strcmp(name, "vkCmdBlitImage"))
-    {
-        return (void*)entry_vkCmdBlitImage;
-    }
-    if (!strcmp(name, "vkCmdCopyBufferToImage"))
-    {
-        return (void*)entry_vkCmdCopyBufferToImage;
-    }
-    if (!strcmp(name, "vkCmdCopyImageToBuffer"))
-    {
-        return (void*)entry_vkCmdCopyImageToBuffer;
-    }
-    if (!strcmp(name, "vkCmdUpdateBuffer"))
-    {
-        return (void*)entry_vkCmdUpdateBuffer;
-    }
-    if (!strcmp(name, "vkCmdFillBuffer"))
-    {
-        return (void*)entry_vkCmdFillBuffer;
-    }
-    if (!strcmp(name, "vkCmdClearColorImage"))
-    {
-        return (void*)entry_vkCmdClearColorImage;
-    }
-    if (!strcmp(name, "vkCmdClearDepthStencilImage"))
-    {
-        return (void*)entry_vkCmdClearDepthStencilImage;
-    }
-    if (!strcmp(name, "vkCmdClearAttachments"))
-    {
-        return (void*)entry_vkCmdClearAttachments;
-    }
-    if (!strcmp(name, "vkCmdResolveImage"))
-    {
-        return (void*)entry_vkCmdResolveImage;
-    }
-    if (!strcmp(name, "vkCmdSetEvent"))
-    {
-        return (void*)entry_vkCmdSetEvent;
-    }
-    if (!strcmp(name, "vkCmdResetEvent"))
-    {
-        return (void*)entry_vkCmdResetEvent;
-    }
-    if (!strcmp(name, "vkCmdWaitEvents"))
-    {
-        return (void*)entry_vkCmdWaitEvents;
-    }
-    if (!strcmp(name, "vkCmdPipelineBarrier"))
-    {
-        return (void*)entry_vkCmdPipelineBarrier;
-    }
-    if (!strcmp(name, "vkCmdBeginQuery"))
-    {
-        return (void*)entry_vkCmdBeginQuery;
-    }
-    if (!strcmp(name, "vkCmdEndQuery"))
-    {
-        return (void*)entry_vkCmdEndQuery;
-    }
-    if (!strcmp(name, "vkCmdResetQueryPool"))
-    {
-        return (void*)entry_vkCmdResetQueryPool;
-    }
-    if (!strcmp(name, "vkCmdWriteTimestamp"))
-    {
-        return (void*)entry_vkCmdWriteTimestamp;
-    }
-    if (!strcmp(name, "vkCmdCopyQueryPoolResults"))
-    {
-        return (void*)entry_vkCmdCopyQueryPoolResults;
-    }
-    if (!strcmp(name, "vkCmdPushConstants"))
-    {
-        return (void*)entry_vkCmdPushConstants;
-    }
-    if (!strcmp(name, "vkCmdBeginRenderPass"))
-    {
-        return (void*)entry_vkCmdBeginRenderPass;
-    }
-    if (!strcmp(name, "vkCmdNextSubpass"))
-    {
-        return (void*)entry_vkCmdNextSubpass;
-    }
-    if (!strcmp(name, "vkCmdEndRenderPass"))
-    {
-        return (void*)entry_vkCmdEndRenderPass;
-    }
-    if (!strcmp(name, "vkCmdExecuteCommands"))
-    {
-        return (void*)entry_vkCmdExecuteCommands;
-    }
-#endif
-#ifdef VK_VERSION_1_1
-    if (!strcmp(name, "vkEnumerateInstanceVersion"))
-    {
-        return has1_1OrHigher ? (void*)entry_vkEnumerateInstanceVersion : nullptr;
-    }
-    if (!strcmp(name, "vkBindBufferMemory2"))
-    {
-        return has1_1OrHigher ? (void*)entry_vkBindBufferMemory2 : nullptr;
-    }
-    if (!strcmp(name, "vkBindImageMemory2"))
-    {
-        return has1_1OrHigher ? (void*)entry_vkBindImageMemory2 : nullptr;
-    }
-    if (!strcmp(name, "vkGetDeviceGroupPeerMemoryFeatures"))
-    {
-        return has1_1OrHigher ? (void*)entry_vkGetDeviceGroupPeerMemoryFeatures : nullptr;
-    }
-    if (!strcmp(name, "vkCmdSetDeviceMask"))
-    {
-        return has1_1OrHigher ? (void*)entry_vkCmdSetDeviceMask : nullptr;
-    }
-    if (!strcmp(name, "vkCmdDispatchBase"))
-    {
-        return has1_1OrHigher ? (void*)entry_vkCmdDispatchBase : nullptr;
-    }
-    if (!strcmp(name, "vkEnumeratePhysicalDeviceGroups"))
-    {
-        return nullptr;
-    }
-    if (!strcmp(name, "vkGetImageMemoryRequirements2"))
-    {
-        return has1_1OrHigher ? (void*)entry_vkGetImageMemoryRequirements2 : nullptr;
-    }
-    if (!strcmp(name, "vkGetBufferMemoryRequirements2"))
-    {
-        return has1_1OrHigher ? (void*)entry_vkGetBufferMemoryRequirements2 : nullptr;
-    }
-    if (!strcmp(name, "vkGetImageSparseMemoryRequirements2"))
-    {
-        return has1_1OrHigher ? (void*)entry_vkGetImageSparseMemoryRequirements2 : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceFeatures2"))
-    {
-        return has1_1OrHigher ? (void*)entry_vkGetPhysicalDeviceFeatures2 : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceProperties2"))
-    {
-        return has1_1OrHigher ? (void*)entry_vkGetPhysicalDeviceProperties2 : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceFormatProperties2"))
-    {
-        return has1_1OrHigher ? (void*)entry_vkGetPhysicalDeviceFormatProperties2 : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceImageFormatProperties2"))
-    {
-        return has1_1OrHigher ? (void*)entry_vkGetPhysicalDeviceImageFormatProperties2 : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceQueueFamilyProperties2"))
-    {
-        return has1_1OrHigher ? (void*)entry_vkGetPhysicalDeviceQueueFamilyProperties2 : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceMemoryProperties2"))
-    {
-        return has1_1OrHigher ? (void*)entry_vkGetPhysicalDeviceMemoryProperties2 : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceSparseImageFormatProperties2"))
-    {
-        return has1_1OrHigher ? (void*)entry_vkGetPhysicalDeviceSparseImageFormatProperties2 : nullptr;
-    }
-    if (!strcmp(name, "vkTrimCommandPool"))
-    {
-        return has1_1OrHigher ? (void*)entry_vkTrimCommandPool : nullptr;
-    }
-    if (!strcmp(name, "vkGetDeviceQueue2"))
-    {
-        return has1_1OrHigher ? (void*)entry_vkGetDeviceQueue2 : nullptr;
-    }
-    if (!strcmp(name, "vkCreateSamplerYcbcrConversion"))
-    {
-        return has1_1OrHigher ? (void*)entry_vkCreateSamplerYcbcrConversion : nullptr;
-    }
-    if (!strcmp(name, "vkDestroySamplerYcbcrConversion"))
-    {
-        return has1_1OrHigher ? (void*)entry_vkDestroySamplerYcbcrConversion : nullptr;
-    }
-    if (!strcmp(name, "vkCreateDescriptorUpdateTemplate"))
-    {
-        return has1_1OrHigher ? (void*)entry_vkCreateDescriptorUpdateTemplate : nullptr;
-    }
-    if (!strcmp(name, "vkDestroyDescriptorUpdateTemplate"))
-    {
-        return has1_1OrHigher ? (void*)entry_vkDestroyDescriptorUpdateTemplate : nullptr;
-    }
-    if (!strcmp(name, "vkUpdateDescriptorSetWithTemplate"))
-    {
-        return has1_1OrHigher ? (void*)entry_vkUpdateDescriptorSetWithTemplate : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceExternalBufferProperties"))
-    {
-        return has1_1OrHigher ? (void*)entry_vkGetPhysicalDeviceExternalBufferProperties : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceExternalFenceProperties"))
-    {
-        return has1_1OrHigher ? (void*)entry_vkGetPhysicalDeviceExternalFenceProperties : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceExternalSemaphoreProperties"))
-    {
-        return has1_1OrHigher ? (void*)entry_vkGetPhysicalDeviceExternalSemaphoreProperties : nullptr;
-    }
-    if (!strcmp(name, "vkGetDescriptorSetLayoutSupport"))
-    {
-        return has1_1OrHigher ? (void*)entry_vkGetDescriptorSetLayoutSupport : nullptr;
-    }
-#endif
-#ifdef VK_KHR_surface
-    if (!strcmp(name, "vkDestroySurfaceKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_surface");
-        return hasExt ? (void*)entry_vkDestroySurfaceKHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceSurfaceSupportKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_surface");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceSurfaceSupportKHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceSurfaceCapabilitiesKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_surface");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceSurfaceCapabilitiesKHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceSurfaceFormatsKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_surface");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceSurfaceFormatsKHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceSurfacePresentModesKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_surface");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceSurfacePresentModesKHR : nullptr;
-    }
-#endif
-#ifdef VK_KHR_swapchain
-    if (!strcmp(name, "vkCreateSwapchainKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_swapchain");
-        return hasExt ? (void*)entry_vkCreateSwapchainKHR : nullptr;
-    }
-    if (!strcmp(name, "vkDestroySwapchainKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_swapchain");
-        return hasExt ? (void*)entry_vkDestroySwapchainKHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetSwapchainImagesKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_swapchain");
-        return hasExt ? (void*)entry_vkGetSwapchainImagesKHR : nullptr;
-    }
-    if (!strcmp(name, "vkAcquireNextImageKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_swapchain");
-        return hasExt ? (void*)entry_vkAcquireNextImageKHR : nullptr;
-    }
-    if (!strcmp(name, "vkQueuePresentKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_swapchain");
-        return hasExt ? (void*)entry_vkQueuePresentKHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetDeviceGroupPresentCapabilitiesKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_swapchain");
-        return hasExt ? (void*)entry_vkGetDeviceGroupPresentCapabilitiesKHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetDeviceGroupSurfacePresentModesKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_swapchain");
-        return hasExt ? (void*)entry_vkGetDeviceGroupSurfacePresentModesKHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDevicePresentRectanglesKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_swapchain");
-        return hasExt ? (void*)entry_vkGetPhysicalDevicePresentRectanglesKHR : nullptr;
-    }
-    if (!strcmp(name, "vkAcquireNextImage2KHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_swapchain");
-        return hasExt ? (void*)entry_vkAcquireNextImage2KHR : nullptr;
-    }
-#endif
-#ifdef VK_KHR_display
-    if (!strcmp(name, "vkGetPhysicalDeviceDisplayPropertiesKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_display");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceDisplayPropertiesKHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceDisplayPlanePropertiesKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_display");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceDisplayPlanePropertiesKHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetDisplayPlaneSupportedDisplaysKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_display");
-        return hasExt ? (void*)entry_vkGetDisplayPlaneSupportedDisplaysKHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetDisplayModePropertiesKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_display");
-        return hasExt ? (void*)entry_vkGetDisplayModePropertiesKHR : nullptr;
-    }
-    if (!strcmp(name, "vkCreateDisplayModeKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_display");
-        return hasExt ? (void*)entry_vkCreateDisplayModeKHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetDisplayPlaneCapabilitiesKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_display");
-        return hasExt ? (void*)entry_vkGetDisplayPlaneCapabilitiesKHR : nullptr;
-    }
-    if (!strcmp(name, "vkCreateDisplayPlaneSurfaceKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_display");
-        return hasExt ? (void*)entry_vkCreateDisplayPlaneSurfaceKHR : nullptr;
-    }
-#endif
-#ifdef VK_KHR_display_swapchain
-    if (!strcmp(name, "vkCreateSharedSwapchainsKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_display_swapchain");
-        return hasExt ? (void*)entry_vkCreateSharedSwapchainsKHR : nullptr;
-    }
-#endif
-#ifdef VK_KHR_xlib_surface
-    if (!strcmp(name, "vkCreateXlibSurfaceKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_xlib_surface");
-        return hasExt ? (void*)entry_vkCreateXlibSurfaceKHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceXlibPresentationSupportKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_xlib_surface");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceXlibPresentationSupportKHR : nullptr;
-    }
-#endif
-#ifdef VK_KHR_xcb_surface
-    if (!strcmp(name, "vkCreateXcbSurfaceKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_xcb_surface");
-        return hasExt ? (void*)entry_vkCreateXcbSurfaceKHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceXcbPresentationSupportKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_xcb_surface");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceXcbPresentationSupportKHR : nullptr;
-    }
-#endif
-#ifdef VK_KHR_wayland_surface
-    if (!strcmp(name, "vkCreateWaylandSurfaceKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_wayland_surface");
-        return hasExt ? (void*)entry_vkCreateWaylandSurfaceKHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceWaylandPresentationSupportKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_wayland_surface");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceWaylandPresentationSupportKHR : nullptr;
-    }
-#endif
-#ifdef VK_KHR_mir_surface
-    if (!strcmp(name, "vkCreateMirSurfaceKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_mir_surface");
-        return hasExt ? (void*)entry_vkCreateMirSurfaceKHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceMirPresentationSupportKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_mir_surface");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceMirPresentationSupportKHR : nullptr;
-    }
-#endif
-#ifdef VK_KHR_android_surface
-    if (!strcmp(name, "vkCreateAndroidSurfaceKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_android_surface");
-        return hasExt ? (void*)entry_vkCreateAndroidSurfaceKHR : nullptr;
-    }
-#endif
-#ifdef VK_KHR_win32_surface
-    if (!strcmp(name, "vkCreateWin32SurfaceKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_win32_surface");
-        return hasExt ? (void*)entry_vkCreateWin32SurfaceKHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceWin32PresentationSupportKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_win32_surface");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceWin32PresentationSupportKHR : nullptr;
-    }
-#endif
-#ifdef VK_KHR_get_physical_device_properties2
-    if (!strcmp(name, "vkGetPhysicalDeviceFeatures2KHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_get_physical_device_properties2");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceFeatures2KHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceProperties2KHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_get_physical_device_properties2");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceProperties2KHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceFormatProperties2KHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_get_physical_device_properties2");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceFormatProperties2KHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceImageFormatProperties2KHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_get_physical_device_properties2");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceImageFormatProperties2KHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceQueueFamilyProperties2KHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_get_physical_device_properties2");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceQueueFamilyProperties2KHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceMemoryProperties2KHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_get_physical_device_properties2");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceMemoryProperties2KHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceSparseImageFormatProperties2KHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_get_physical_device_properties2");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceSparseImageFormatProperties2KHR : nullptr;
-    }
-#endif
-#ifdef VK_KHR_device_group
-    if (!strcmp(name, "vkGetDeviceGroupPeerMemoryFeaturesKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_device_group");
-        return hasExt ? (void*)entry_vkGetDeviceGroupPeerMemoryFeaturesKHR : nullptr;
-    }
-    if (!strcmp(name, "vkCmdSetDeviceMaskKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_device_group");
-        return hasExt ? (void*)entry_vkCmdSetDeviceMaskKHR : nullptr;
-    }
-    if (!strcmp(name, "vkCmdDispatchBaseKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_device_group");
-        return hasExt ? (void*)entry_vkCmdDispatchBaseKHR : nullptr;
-    }
-#endif
-#ifdef VK_KHR_maintenance1
-    if (!strcmp(name, "vkTrimCommandPoolKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_maintenance1");
-        return hasExt ? (void*)entry_vkTrimCommandPoolKHR : nullptr;
-    }
-#endif
-#ifdef VK_KHR_device_group_creation
-    if (!strcmp(name, "vkEnumeratePhysicalDeviceGroupsKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_device_group_creation");
-        return hasExt ? (void*)entry_vkEnumeratePhysicalDeviceGroupsKHR : nullptr;
-    }
-#endif
-#ifdef VK_KHR_external_memory_capabilities
-    if (!strcmp(name, "vkGetPhysicalDeviceExternalBufferPropertiesKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_external_memory_capabilities");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceExternalBufferPropertiesKHR : nullptr;
-    }
-#endif
-#ifdef VK_KHR_external_memory_win32
-    if (!strcmp(name, "vkGetMemoryWin32HandleKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_external_memory_win32");
-        return hasExt ? (void*)entry_vkGetMemoryWin32HandleKHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetMemoryWin32HandlePropertiesKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_external_memory_win32");
-        return hasExt ? (void*)entry_vkGetMemoryWin32HandlePropertiesKHR : nullptr;
-    }
-#endif
-#ifdef VK_KHR_external_memory_fd
-    if (!strcmp(name, "vkGetMemoryFdKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_external_memory_fd");
-        return hasExt ? (void*)entry_vkGetMemoryFdKHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetMemoryFdPropertiesKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_external_memory_fd");
-        return hasExt ? (void*)entry_vkGetMemoryFdPropertiesKHR : nullptr;
-    }
-#endif
-#ifdef VK_KHR_external_semaphore_capabilities
-    if (!strcmp(name, "vkGetPhysicalDeviceExternalSemaphorePropertiesKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_external_semaphore_capabilities");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR : nullptr;
-    }
-#endif
-#ifdef VK_KHR_external_semaphore_win32
-    if (!strcmp(name, "vkImportSemaphoreWin32HandleKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_external_semaphore_win32");
-        return hasExt ? (void*)entry_vkImportSemaphoreWin32HandleKHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetSemaphoreWin32HandleKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_external_semaphore_win32");
-        return hasExt ? (void*)entry_vkGetSemaphoreWin32HandleKHR : nullptr;
-    }
-#endif
-#ifdef VK_KHR_external_semaphore_fd
-    if (!strcmp(name, "vkImportSemaphoreFdKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_external_semaphore_fd");
-        return hasExt ? (void*)entry_vkImportSemaphoreFdKHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetSemaphoreFdKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_external_semaphore_fd");
-        return hasExt ? (void*)entry_vkGetSemaphoreFdKHR : nullptr;
-    }
-#endif
-#ifdef VK_KHR_push_descriptor
-    if (!strcmp(name, "vkCmdPushDescriptorSetKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_push_descriptor");
-        return hasExt ? (void*)entry_vkCmdPushDescriptorSetKHR : nullptr;
-    }
-    if (!strcmp(name, "vkCmdPushDescriptorSetWithTemplateKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_push_descriptor");
-        return hasExt ? (void*)entry_vkCmdPushDescriptorSetWithTemplateKHR : nullptr;
-    }
-#endif
-#ifdef VK_KHR_descriptor_update_template
-    if (!strcmp(name, "vkCreateDescriptorUpdateTemplateKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_descriptor_update_template");
-        return hasExt ? (void*)entry_vkCreateDescriptorUpdateTemplateKHR : nullptr;
-    }
-    if (!strcmp(name, "vkDestroyDescriptorUpdateTemplateKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_descriptor_update_template");
-        return hasExt ? (void*)entry_vkDestroyDescriptorUpdateTemplateKHR : nullptr;
-    }
-    if (!strcmp(name, "vkUpdateDescriptorSetWithTemplateKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_descriptor_update_template");
-        return hasExt ? (void*)entry_vkUpdateDescriptorSetWithTemplateKHR : nullptr;
-    }
-#endif
-#ifdef VK_KHR_create_renderpass2
-    if (!strcmp(name, "vkCreateRenderPass2KHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_create_renderpass2");
-        return hasExt ? (void*)entry_vkCreateRenderPass2KHR : nullptr;
-    }
-    if (!strcmp(name, "vkCmdBeginRenderPass2KHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_create_renderpass2");
-        return hasExt ? (void*)entry_vkCmdBeginRenderPass2KHR : nullptr;
-    }
-    if (!strcmp(name, "vkCmdNextSubpass2KHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_create_renderpass2");
-        return hasExt ? (void*)entry_vkCmdNextSubpass2KHR : nullptr;
-    }
-    if (!strcmp(name, "vkCmdEndRenderPass2KHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_create_renderpass2");
-        return hasExt ? (void*)entry_vkCmdEndRenderPass2KHR : nullptr;
-    }
-#endif
-#ifdef VK_KHR_shared_presentable_image
-    if (!strcmp(name, "vkGetSwapchainStatusKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_shared_presentable_image");
-        return hasExt ? (void*)entry_vkGetSwapchainStatusKHR : nullptr;
-    }
-#endif
-#ifdef VK_KHR_external_fence_capabilities
-    if (!strcmp(name, "vkGetPhysicalDeviceExternalFencePropertiesKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_external_fence_capabilities");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceExternalFencePropertiesKHR : nullptr;
-    }
-#endif
-#ifdef VK_KHR_external_fence_win32
-    if (!strcmp(name, "vkImportFenceWin32HandleKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_external_fence_win32");
-        return hasExt ? (void*)entry_vkImportFenceWin32HandleKHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetFenceWin32HandleKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_external_fence_win32");
-        return hasExt ? (void*)entry_vkGetFenceWin32HandleKHR : nullptr;
-    }
-#endif
-#ifdef VK_KHR_external_fence_fd
-    if (!strcmp(name, "vkImportFenceFdKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_external_fence_fd");
-        return hasExt ? (void*)entry_vkImportFenceFdKHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetFenceFdKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_external_fence_fd");
-        return hasExt ? (void*)entry_vkGetFenceFdKHR : nullptr;
-    }
-#endif
-#ifdef VK_KHR_get_surface_capabilities2
-    if (!strcmp(name, "vkGetPhysicalDeviceSurfaceCapabilities2KHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_get_surface_capabilities2");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceSurfaceCapabilities2KHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceSurfaceFormats2KHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_get_surface_capabilities2");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceSurfaceFormats2KHR : nullptr;
-    }
-#endif
-#ifdef VK_KHR_get_display_properties2
-    if (!strcmp(name, "vkGetPhysicalDeviceDisplayProperties2KHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_get_display_properties2");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceDisplayProperties2KHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceDisplayPlaneProperties2KHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_get_display_properties2");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceDisplayPlaneProperties2KHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetDisplayModeProperties2KHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_get_display_properties2");
-        return hasExt ? (void*)entry_vkGetDisplayModeProperties2KHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetDisplayPlaneCapabilities2KHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_get_display_properties2");
-        return hasExt ? (void*)entry_vkGetDisplayPlaneCapabilities2KHR : nullptr;
-    }
-#endif
-#ifdef VK_KHR_get_memory_requirements2
-    if (!strcmp(name, "vkGetImageMemoryRequirements2KHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_get_memory_requirements2");
-        return hasExt ? (void*)entry_vkGetImageMemoryRequirements2KHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetBufferMemoryRequirements2KHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_get_memory_requirements2");
-        return hasExt ? (void*)entry_vkGetBufferMemoryRequirements2KHR : nullptr;
-    }
-    if (!strcmp(name, "vkGetImageSparseMemoryRequirements2KHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_get_memory_requirements2");
-        return hasExt ? (void*)entry_vkGetImageSparseMemoryRequirements2KHR : nullptr;
-    }
-#endif
-#ifdef VK_KHR_sampler_ycbcr_conversion
-    if (!strcmp(name, "vkCreateSamplerYcbcrConversionKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_sampler_ycbcr_conversion");
-        return hasExt ? (void*)entry_vkCreateSamplerYcbcrConversionKHR : nullptr;
-    }
-    if (!strcmp(name, "vkDestroySamplerYcbcrConversionKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_sampler_ycbcr_conversion");
-        return hasExt ? (void*)entry_vkDestroySamplerYcbcrConversionKHR : nullptr;
-    }
-#endif
-#ifdef VK_KHR_bind_memory2
-    if (!strcmp(name, "vkBindBufferMemory2KHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_bind_memory2");
-        return hasExt ? (void*)entry_vkBindBufferMemory2KHR : nullptr;
-    }
-    if (!strcmp(name, "vkBindImageMemory2KHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_bind_memory2");
-        return hasExt ? (void*)entry_vkBindImageMemory2KHR : nullptr;
-    }
-#endif
-#ifdef VK_KHR_maintenance3
-    if (!strcmp(name, "vkGetDescriptorSetLayoutSupportKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_maintenance3");
-        return hasExt ? (void*)entry_vkGetDescriptorSetLayoutSupportKHR : nullptr;
-    }
-#endif
-#ifdef VK_KHR_draw_indirect_count
-    if (!strcmp(name, "vkCmdDrawIndirectCountKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_draw_indirect_count");
-        return hasExt ? (void*)entry_vkCmdDrawIndirectCountKHR : nullptr;
-    }
-    if (!strcmp(name, "vkCmdDrawIndexedIndirectCountKHR"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_draw_indirect_count");
-        return hasExt ? (void*)entry_vkCmdDrawIndexedIndirectCountKHR : nullptr;
-    }
-#endif
-#ifdef VK_ANDROID_native_buffer
-    if (!strcmp(name, "vkGetSwapchainGrallocUsageANDROID"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_ANDROID_native_buffer");
-        return hasExt ? (void*)entry_vkGetSwapchainGrallocUsageANDROID : nullptr;
-    }
-    if (!strcmp(name, "vkAcquireImageANDROID"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_ANDROID_native_buffer");
-        return hasExt ? (void*)entry_vkAcquireImageANDROID : nullptr;
-    }
-    if (!strcmp(name, "vkQueueSignalReleaseImageANDROID"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_ANDROID_native_buffer");
-        return hasExt ? (void*)entry_vkQueueSignalReleaseImageANDROID : nullptr;
-    }
-#endif
-#ifdef VK_EXT_debug_report
-    if (!strcmp(name, "vkCreateDebugReportCallbackEXT"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_debug_report");
-        return hasExt ? (void*)entry_vkCreateDebugReportCallbackEXT : nullptr;
-    }
-    if (!strcmp(name, "vkDestroyDebugReportCallbackEXT"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_debug_report");
-        return hasExt ? (void*)entry_vkDestroyDebugReportCallbackEXT : nullptr;
-    }
-    if (!strcmp(name, "vkDebugReportMessageEXT"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_debug_report");
-        return hasExt ? (void*)entry_vkDebugReportMessageEXT : nullptr;
-    }
-#endif
-#ifdef VK_EXT_debug_marker
-    if (!strcmp(name, "vkDebugMarkerSetObjectTagEXT"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_debug_marker");
-        return hasExt ? (void*)entry_vkDebugMarkerSetObjectTagEXT : nullptr;
-    }
-    if (!strcmp(name, "vkDebugMarkerSetObjectNameEXT"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_debug_marker");
-        return hasExt ? (void*)entry_vkDebugMarkerSetObjectNameEXT : nullptr;
-    }
-    if (!strcmp(name, "vkCmdDebugMarkerBeginEXT"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_debug_marker");
-        return hasExt ? (void*)entry_vkCmdDebugMarkerBeginEXT : nullptr;
-    }
-    if (!strcmp(name, "vkCmdDebugMarkerEndEXT"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_debug_marker");
-        return hasExt ? (void*)entry_vkCmdDebugMarkerEndEXT : nullptr;
-    }
-    if (!strcmp(name, "vkCmdDebugMarkerInsertEXT"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_debug_marker");
-        return hasExt ? (void*)entry_vkCmdDebugMarkerInsertEXT : nullptr;
-    }
-#endif
-#ifdef VK_AMD_draw_indirect_count
-    if (!strcmp(name, "vkCmdDrawIndirectCountAMD"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_AMD_draw_indirect_count");
-        return hasExt ? (void*)entry_vkCmdDrawIndirectCountAMD : nullptr;
-    }
-    if (!strcmp(name, "vkCmdDrawIndexedIndirectCountAMD"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_AMD_draw_indirect_count");
-        return hasExt ? (void*)entry_vkCmdDrawIndexedIndirectCountAMD : nullptr;
-    }
-#endif
-#ifdef VK_AMD_shader_info
-    if (!strcmp(name, "vkGetShaderInfoAMD"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_AMD_shader_info");
-        return hasExt ? (void*)entry_vkGetShaderInfoAMD : nullptr;
-    }
-#endif
-#ifdef VK_NV_external_memory_capabilities
-    if (!strcmp(name, "vkGetPhysicalDeviceExternalImageFormatPropertiesNV"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_NV_external_memory_capabilities");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceExternalImageFormatPropertiesNV : nullptr;
-    }
-#endif
-#ifdef VK_NV_external_memory_win32
-    if (!strcmp(name, "vkGetMemoryWin32HandleNV"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_NV_external_memory_win32");
-        return hasExt ? (void*)entry_vkGetMemoryWin32HandleNV : nullptr;
-    }
-#endif
-#ifdef VK_NN_vi_surface
-    if (!strcmp(name, "vkCreateViSurfaceNN"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_NN_vi_surface");
-        return hasExt ? (void*)entry_vkCreateViSurfaceNN : nullptr;
-    }
-#endif
-#ifdef VK_EXT_conditional_rendering
-    if (!strcmp(name, "vkCmdBeginConditionalRenderingEXT"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_conditional_rendering");
-        return hasExt ? (void*)entry_vkCmdBeginConditionalRenderingEXT : nullptr;
-    }
-    if (!strcmp(name, "vkCmdEndConditionalRenderingEXT"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_conditional_rendering");
-        return hasExt ? (void*)entry_vkCmdEndConditionalRenderingEXT : nullptr;
-    }
-#endif
-#ifdef VK_NVX_device_generated_commands
-    if (!strcmp(name, "vkCmdProcessCommandsNVX"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_NVX_device_generated_commands");
-        return hasExt ? (void*)entry_vkCmdProcessCommandsNVX : nullptr;
-    }
-    if (!strcmp(name, "vkCmdReserveSpaceForCommandsNVX"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_NVX_device_generated_commands");
-        return hasExt ? (void*)entry_vkCmdReserveSpaceForCommandsNVX : nullptr;
-    }
-    if (!strcmp(name, "vkCreateIndirectCommandsLayoutNVX"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_NVX_device_generated_commands");
-        return hasExt ? (void*)entry_vkCreateIndirectCommandsLayoutNVX : nullptr;
-    }
-    if (!strcmp(name, "vkDestroyIndirectCommandsLayoutNVX"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_NVX_device_generated_commands");
-        return hasExt ? (void*)entry_vkDestroyIndirectCommandsLayoutNVX : nullptr;
-    }
-    if (!strcmp(name, "vkCreateObjectTableNVX"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_NVX_device_generated_commands");
-        return hasExt ? (void*)entry_vkCreateObjectTableNVX : nullptr;
-    }
-    if (!strcmp(name, "vkDestroyObjectTableNVX"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_NVX_device_generated_commands");
-        return hasExt ? (void*)entry_vkDestroyObjectTableNVX : nullptr;
-    }
-    if (!strcmp(name, "vkRegisterObjectsNVX"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_NVX_device_generated_commands");
-        return hasExt ? (void*)entry_vkRegisterObjectsNVX : nullptr;
-    }
-    if (!strcmp(name, "vkUnregisterObjectsNVX"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_NVX_device_generated_commands");
-        return hasExt ? (void*)entry_vkUnregisterObjectsNVX : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_NVX_device_generated_commands");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX : nullptr;
-    }
-#endif
-#ifdef VK_NV_clip_space_w_scaling
-    if (!strcmp(name, "vkCmdSetViewportWScalingNV"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_NV_clip_space_w_scaling");
-        return hasExt ? (void*)entry_vkCmdSetViewportWScalingNV : nullptr;
-    }
-#endif
-#ifdef VK_EXT_direct_mode_display
-    if (!strcmp(name, "vkReleaseDisplayEXT"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_direct_mode_display");
-        return hasExt ? (void*)entry_vkReleaseDisplayEXT : nullptr;
-    }
-#endif
-#ifdef VK_EXT_acquire_xlib_display
-    if (!strcmp(name, "vkAcquireXlibDisplayEXT"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_acquire_xlib_display");
-        return hasExt ? (void*)entry_vkAcquireXlibDisplayEXT : nullptr;
-    }
-    if (!strcmp(name, "vkGetRandROutputDisplayEXT"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_acquire_xlib_display");
-        return hasExt ? (void*)entry_vkGetRandROutputDisplayEXT : nullptr;
-    }
-#endif
-#ifdef VK_EXT_display_surface_counter
-    if (!strcmp(name, "vkGetPhysicalDeviceSurfaceCapabilities2EXT"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_display_surface_counter");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceSurfaceCapabilities2EXT : nullptr;
-    }
-#endif
-#ifdef VK_EXT_display_control
-    if (!strcmp(name, "vkDisplayPowerControlEXT"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_display_control");
-        return hasExt ? (void*)entry_vkDisplayPowerControlEXT : nullptr;
-    }
-    if (!strcmp(name, "vkRegisterDeviceEventEXT"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_display_control");
-        return hasExt ? (void*)entry_vkRegisterDeviceEventEXT : nullptr;
-    }
-    if (!strcmp(name, "vkRegisterDisplayEventEXT"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_display_control");
-        return hasExt ? (void*)entry_vkRegisterDisplayEventEXT : nullptr;
-    }
-    if (!strcmp(name, "vkGetSwapchainCounterEXT"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_display_control");
-        return hasExt ? (void*)entry_vkGetSwapchainCounterEXT : nullptr;
-    }
-#endif
-#ifdef VK_GOOGLE_display_timing
-    if (!strcmp(name, "vkGetRefreshCycleDurationGOOGLE"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_GOOGLE_display_timing");
-        return hasExt ? (void*)entry_vkGetRefreshCycleDurationGOOGLE : nullptr;
-    }
-    if (!strcmp(name, "vkGetPastPresentationTimingGOOGLE"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_GOOGLE_display_timing");
-        return hasExt ? (void*)entry_vkGetPastPresentationTimingGOOGLE : nullptr;
-    }
-#endif
-#ifdef VK_EXT_discard_rectangles
-    if (!strcmp(name, "vkCmdSetDiscardRectangleEXT"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_discard_rectangles");
-        return hasExt ? (void*)entry_vkCmdSetDiscardRectangleEXT : nullptr;
-    }
-#endif
-#ifdef VK_EXT_hdr_metadata
-    if (!strcmp(name, "vkSetHdrMetadataEXT"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_hdr_metadata");
-        return hasExt ? (void*)entry_vkSetHdrMetadataEXT : nullptr;
-    }
-#endif
-#ifdef VK_MVK_ios_surface
-    if (!strcmp(name, "vkCreateIOSSurfaceMVK"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_MVK_ios_surface");
-        return hasExt ? (void*)entry_vkCreateIOSSurfaceMVK : nullptr;
-    }
-#endif
-#ifdef VK_MVK_macos_surface
-    if (!strcmp(name, "vkCreateMacOSSurfaceMVK"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_MVK_macos_surface");
-        return hasExt ? (void*)entry_vkCreateMacOSSurfaceMVK : nullptr;
-    }
-#endif
-#ifdef VK_EXT_debug_utils
-    if (!strcmp(name, "vkSetDebugUtilsObjectNameEXT"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_debug_utils");
-        return hasExt ? (void*)entry_vkSetDebugUtilsObjectNameEXT : nullptr;
-    }
-    if (!strcmp(name, "vkSetDebugUtilsObjectTagEXT"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_debug_utils");
-        return hasExt ? (void*)entry_vkSetDebugUtilsObjectTagEXT : nullptr;
-    }
-    if (!strcmp(name, "vkQueueBeginDebugUtilsLabelEXT"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_debug_utils");
-        return hasExt ? (void*)entry_vkQueueBeginDebugUtilsLabelEXT : nullptr;
-    }
-    if (!strcmp(name, "vkQueueEndDebugUtilsLabelEXT"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_debug_utils");
-        return hasExt ? (void*)entry_vkQueueEndDebugUtilsLabelEXT : nullptr;
-    }
-    if (!strcmp(name, "vkQueueInsertDebugUtilsLabelEXT"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_debug_utils");
-        return hasExt ? (void*)entry_vkQueueInsertDebugUtilsLabelEXT : nullptr;
-    }
-    if (!strcmp(name, "vkCmdBeginDebugUtilsLabelEXT"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_debug_utils");
-        return hasExt ? (void*)entry_vkCmdBeginDebugUtilsLabelEXT : nullptr;
-    }
-    if (!strcmp(name, "vkCmdEndDebugUtilsLabelEXT"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_debug_utils");
-        return hasExt ? (void*)entry_vkCmdEndDebugUtilsLabelEXT : nullptr;
-    }
-    if (!strcmp(name, "vkCmdInsertDebugUtilsLabelEXT"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_debug_utils");
-        return hasExt ? (void*)entry_vkCmdInsertDebugUtilsLabelEXT : nullptr;
-    }
-    if (!strcmp(name, "vkCreateDebugUtilsMessengerEXT"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_debug_utils");
-        return hasExt ? (void*)entry_vkCreateDebugUtilsMessengerEXT : nullptr;
-    }
-    if (!strcmp(name, "vkDestroyDebugUtilsMessengerEXT"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_debug_utils");
-        return hasExt ? (void*)entry_vkDestroyDebugUtilsMessengerEXT : nullptr;
-    }
-    if (!strcmp(name, "vkSubmitDebugUtilsMessageEXT"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_debug_utils");
-        return hasExt ? (void*)entry_vkSubmitDebugUtilsMessageEXT : nullptr;
-    }
-#endif
-#ifdef VK_ANDROID_external_memory_android_hardware_buffer
-    if (!strcmp(name, "vkGetAndroidHardwareBufferPropertiesANDROID"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_ANDROID_external_memory_android_hardware_buffer");
-        return hasExt ? (void*)entry_vkGetAndroidHardwareBufferPropertiesANDROID : nullptr;
-    }
-    if (!strcmp(name, "vkGetMemoryAndroidHardwareBufferANDROID"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_ANDROID_external_memory_android_hardware_buffer");
-        return hasExt ? (void*)entry_vkGetMemoryAndroidHardwareBufferANDROID : nullptr;
-    }
-#endif
-#ifdef VK_EXT_sample_locations
-    if (!strcmp(name, "vkCmdSetSampleLocationsEXT"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_sample_locations");
-        return hasExt ? (void*)entry_vkCmdSetSampleLocationsEXT : nullptr;
-    }
-    if (!strcmp(name, "vkGetPhysicalDeviceMultisamplePropertiesEXT"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_sample_locations");
-        return hasExt ? (void*)entry_vkGetPhysicalDeviceMultisamplePropertiesEXT : nullptr;
-    }
-#endif
-#ifdef VK_EXT_validation_cache
-    if (!strcmp(name, "vkCreateValidationCacheEXT"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_validation_cache");
-        return hasExt ? (void*)entry_vkCreateValidationCacheEXT : nullptr;
-    }
-    if (!strcmp(name, "vkDestroyValidationCacheEXT"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_validation_cache");
-        return hasExt ? (void*)entry_vkDestroyValidationCacheEXT : nullptr;
-    }
-    if (!strcmp(name, "vkMergeValidationCachesEXT"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_validation_cache");
-        return hasExt ? (void*)entry_vkMergeValidationCachesEXT : nullptr;
-    }
-    if (!strcmp(name, "vkGetValidationCacheDataEXT"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_validation_cache");
-        return hasExt ? (void*)entry_vkGetValidationCacheDataEXT : nullptr;
-    }
-#endif
-#ifdef VK_EXT_external_memory_host
-    if (!strcmp(name, "vkGetMemoryHostPointerPropertiesEXT"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_external_memory_host");
-        return hasExt ? (void*)entry_vkGetMemoryHostPointerPropertiesEXT : nullptr;
-    }
-#endif
-#ifdef VK_AMD_buffer_marker
-    if (!strcmp(name, "vkCmdWriteBufferMarkerAMD"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_AMD_buffer_marker");
-        return hasExt ? (void*)entry_vkCmdWriteBufferMarkerAMD : nullptr;
-    }
-#endif
-#ifdef VK_NV_device_diagnostic_checkpoints
-    if (!strcmp(name, "vkCmdSetCheckpointNV"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_NV_device_diagnostic_checkpoints");
-        return hasExt ? (void*)entry_vkCmdSetCheckpointNV : nullptr;
-    }
-    if (!strcmp(name, "vkGetQueueCheckpointDataNV"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_NV_device_diagnostic_checkpoints");
-        return hasExt ? (void*)entry_vkGetQueueCheckpointDataNV : nullptr;
-    }
-#endif
-#ifdef VK_GOOGLE_address_space
-    if (!strcmp(name, "vkMapMemoryIntoAddressSpaceGOOGLE"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_GOOGLE_address_space");
-        return hasExt ? (void*)entry_vkMapMemoryIntoAddressSpaceGOOGLE : nullptr;
-    }
-#endif
-#ifdef VK_GOOGLE_color_buffer
-    if (!strcmp(name, "vkRegisterImageColorBufferGOOGLE"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_GOOGLE_color_buffer");
-        return hasExt ? (void*)entry_vkRegisterImageColorBufferGOOGLE : nullptr;
-    }
-    if (!strcmp(name, "vkRegisterBufferColorBufferGOOGLE"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_GOOGLE_color_buffer");
-        return hasExt ? (void*)entry_vkRegisterBufferColorBufferGOOGLE : nullptr;
-    }
-#endif
-#ifdef VK_GOOGLE_sized_descriptor_update_template
-    if (!strcmp(name, "vkUpdateDescriptorSetWithTemplateSizedGOOGLE"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_GOOGLE_sized_descriptor_update_template");
-        return hasExt ? (void*)entry_vkUpdateDescriptorSetWithTemplateSizedGOOGLE : nullptr;
-    }
-#endif
-#ifdef VK_GOOGLE_async_command_buffers
-    if (!strcmp(name, "vkBeginCommandBufferAsyncGOOGLE"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_GOOGLE_async_command_buffers");
-        return hasExt ? (void*)entry_vkBeginCommandBufferAsyncGOOGLE : nullptr;
-    }
-    if (!strcmp(name, "vkEndCommandBufferAsyncGOOGLE"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_GOOGLE_async_command_buffers");
-        return hasExt ? (void*)entry_vkEndCommandBufferAsyncGOOGLE : nullptr;
-    }
-    if (!strcmp(name, "vkResetCommandBufferAsyncGOOGLE"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_GOOGLE_async_command_buffers");
-        return hasExt ? (void*)entry_vkResetCommandBufferAsyncGOOGLE : nullptr;
-    }
-    if (!strcmp(name, "vkCommandBufferHostSyncGOOGLE"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_GOOGLE_async_command_buffers");
-        return hasExt ? (void*)entry_vkCommandBufferHostSyncGOOGLE : nullptr;
-    }
-#endif
-#ifdef VK_GOOGLE_create_resources_with_requirements
-    if (!strcmp(name, "vkCreateImageWithRequirementsGOOGLE"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_GOOGLE_create_resources_with_requirements");
-        return hasExt ? (void*)entry_vkCreateImageWithRequirementsGOOGLE : nullptr;
-    }
-    if (!strcmp(name, "vkCreateBufferWithRequirementsGOOGLE"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_GOOGLE_create_resources_with_requirements");
-        return hasExt ? (void*)entry_vkCreateBufferWithRequirementsGOOGLE : nullptr;
-    }
-#endif
-#ifdef VK_GOOGLE_address_space_info
-    if (!strcmp(name, "vkGetMemoryHostAddressInfoGOOGLE"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_GOOGLE_address_space_info");
-        return hasExt ? (void*)entry_vkGetMemoryHostAddressInfoGOOGLE : nullptr;
-    }
-#endif
-#ifdef VK_GOOGLE_free_memory_sync
-    if (!strcmp(name, "vkFreeMemorySyncGOOGLE"))
-    {
-        bool hasExt = resources->hasDeviceExtension(device, "VK_GOOGLE_free_memory_sync");
-        return hasExt ? (void*)entry_vkFreeMemorySyncGOOGLE : nullptr;
-    }
-#endif
-    return nullptr;
-}
-
-} // namespace goldfish_vk
diff --git a/system/vulkan/goldfish_vulkan.cpp b/system/vulkan/goldfish_vulkan.cpp
index 9884d90..c4b3320 100644
--- a/system/vulkan/goldfish_vulkan.cpp
+++ b/system/vulkan/goldfish_vulkan.cpp
@@ -26,13 +26,14 @@
 #include <lib/zxio/inception.h>
 #include <unistd.h>
 
+#include "TraceProviderFuchsia.h"
 #include "services/service_connector.h"
 #endif
 
 #include "HostConnection.h"
+#include "ProcessPipe.h"
 #include "ResourceTracker.h"
 #include "VkEncoder.h"
-
 #include "func_table.h"
 
 // Used when there is no Vulkan support on the host.
@@ -221,6 +222,22 @@
     return VK_SUCCESS;
 }
 
+VkResult SetBufferCollectionImageConstraintsFUCHSIA(
+    VkDevice /*device*/,
+    VkBufferCollectionFUCHSIA /*collection*/,
+    const VkImageConstraintsInfoFUCHSIA* /*pImageConstraintsInfo*/) {
+    AEMU_SCOPED_TRACE("vkstubhal::SetBufferCollectionImageConstraintsFUCHSIA");
+    return VK_SUCCESS;
+}
+
+VkResult SetBufferCollectionBufferConstraintsFUCHSIA(
+    VkDevice /*device*/,
+    VkBufferCollectionFUCHSIA /*collection*/,
+    const VkBufferConstraintsInfoFUCHSIA* /*pBufferConstraintsInfo*/) {
+    AEMU_SCOPED_TRACE("vkstubhal::SetBufferCollectionBufferConstraintsFUCHSIA");
+    return VK_SUCCESS;
+}
+
 VkResult
 GetBufferCollectionPropertiesFUCHSIA(VkDevice /*device*/,
                                      VkBufferCollectionFUCHSIA /*collection*/,
@@ -228,6 +245,14 @@
     AEMU_SCOPED_TRACE("vkstubhal::GetBufferCollectionPropertiesFUCHSIA");
     return VK_SUCCESS;
 }
+
+VkResult GetBufferCollectionProperties2FUCHSIA(
+    VkDevice /*device*/,
+    VkBufferCollectionFUCHSIA /*collection*/,
+    VkBufferCollectionProperties2FUCHSIA* /*pProperties*/) {
+    AEMU_SCOPED_TRACE("vkstubhal::GetBufferCollectionProperties2FUCHSIA");
+    return VK_SUCCESS;
+}
 #endif
 
 PFN_vkVoidFunction GetInstanceProcAddr(VkInstance instance,
@@ -279,8 +304,16 @@
         return reinterpret_cast<PFN_vkVoidFunction>(DestroyBufferCollectionFUCHSIA);
     if (strcmp(name, "vkSetBufferCollectionConstraintsFUCHSIA") == 0)
         return reinterpret_cast<PFN_vkVoidFunction>(SetBufferCollectionConstraintsFUCHSIA);
+    if (strcmp(name, "vkSetBufferCollectionImageConstraintsFUCHSIA") == 0)
+        return reinterpret_cast<PFN_vkVoidFunction>(
+            SetBufferCollectionImageConstraintsFUCHSIA);
+    if (strcmp(name, "vkSetBufferCollectionBufferConstraintsFUCHSIA") == 0)
+        return reinterpret_cast<PFN_vkVoidFunction>(SetBufferCollectionBufferConstraintsFUCHSIA);
     if (strcmp(name, "vkGetBufferCollectionPropertiesFUCHSIA") == 0)
         return reinterpret_cast<PFN_vkVoidFunction>(GetBufferCollectionPropertiesFUCHSIA);
+    if (strcmp(name, "vkGetBufferCollectionProperties2FUCHSIA") == 0)
+        return reinterpret_cast<PFN_vkVoidFunction>(
+            GetBufferCollectionProperties2FUCHSIA);
 #endif
     // Return NoOp for entrypoints that should never be called.
     if (strcmp(name, "vkGetPhysicalDeviceFeatures") == 0 ||
@@ -353,10 +386,13 @@
         return ret; \
     } \
     goldfish_vk::ResourceTracker::get()->setupFeatures(rcEnc->featureInfo_const()); \
+    goldfish_vk::ResourceTracker::get()->setSeqnoPtr(getSeqnoPtrForProcess()); \
     goldfish_vk::ResourceTracker::ThreadingCallbacks threadingCallbacks = { \
-        [] { auto hostCon = HostConnection::get(); \
-            ExtendedRCEncoderContext *rcEnc = hostCon->rcEncoder(); \
-            return hostCon; }, \
+        [] { \
+          auto hostCon = HostConnection::get(); \
+          hostCon->rcEncoder(); \
+          return hostCon; \
+        }, \
         [](HostConnection* hostCon) { return hostCon->vkEncoder(); }, \
     }; \
     goldfish_vk::ResourceTracker::get()->setThreadingCallbacks(threadingCallbacks); \
@@ -405,7 +441,7 @@
         return vkstubhal::CreateInstance(create_info, allocator, out_instance);
     }
 
-    VkResult res = vkEnc->vkCreateInstance(create_info, nullptr, out_instance);
+    VkResult res = vkEnc->vkCreateInstance(create_info, nullptr, out_instance, true /* do lock */);
 
     return res;
 }
@@ -554,6 +590,50 @@
 }
 
 VKAPI_ATTR
+VkResult SetBufferCollectionBufferConstraintsFUCHSIA(
+    VkDevice device,
+    VkBufferCollectionFUCHSIA collection,
+    const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo) {
+    AEMU_SCOPED_TRACE("goldfish_vulkan::SetBufferCollectionBufferConstraintsFUCHSIA");
+
+    VK_HOST_CONNECTION(VK_ERROR_DEVICE_LOST)
+
+    if (!hostSupportsVulkan) {
+        return vkstubhal::SetBufferCollectionBufferConstraintsFUCHSIA(device, collection,
+                                                                      pBufferConstraintsInfo);
+    }
+
+    VkResult res =
+        goldfish_vk::ResourceTracker::get()->on_vkSetBufferCollectionBufferConstraintsFUCHSIA(
+            vkEnc, VK_SUCCESS, device, collection, pBufferConstraintsInfo);
+
+    return res;
+}
+
+VKAPI_ATTR
+VkResult SetBufferCollectionImageConstraintsFUCHSIA(
+    VkDevice device,
+    VkBufferCollectionFUCHSIA collection,
+    const VkImageConstraintsInfoFUCHSIA* pImageConstraintsInfo) {
+    AEMU_SCOPED_TRACE(
+        "goldfish_vulkan::SetBufferCollectionBufferConstraintsFUCHSIA");
+
+    VK_HOST_CONNECTION(VK_ERROR_DEVICE_LOST)
+
+    if (!hostSupportsVulkan) {
+        return vkstubhal::SetBufferCollectionImageConstraintsFUCHSIA(
+            device, collection, pImageConstraintsInfo);
+    }
+
+    VkResult res =
+        goldfish_vk::ResourceTracker::get()
+            ->on_vkSetBufferCollectionImageConstraintsFUCHSIA(
+                vkEnc, VK_SUCCESS, device, collection, pImageConstraintsInfo);
+
+    return res;
+}
+
+VKAPI_ATTR
 VkResult GetBufferCollectionPropertiesFUCHSIA(
     VkDevice device,
     VkBufferCollectionFUCHSIA collection,
@@ -572,8 +652,80 @@
 
     return res;
 }
+
+VKAPI_ATTR
+VkResult GetBufferCollectionProperties2FUCHSIA(
+    VkDevice device,
+    VkBufferCollectionFUCHSIA collection,
+    VkBufferCollectionProperties2FUCHSIA* pProperties) {
+    AEMU_SCOPED_TRACE("goldfish_vulkan::GetBufferCollectionProperties2FUCHSIA");
+
+    VK_HOST_CONNECTION(VK_ERROR_DEVICE_LOST)
+
+    if (!hostSupportsVulkan) {
+        return vkstubhal::GetBufferCollectionProperties2FUCHSIA(
+            device, collection, pProperties);
+    }
+
+    VkResult res = goldfish_vk::ResourceTracker::get()
+                       ->on_vkGetBufferCollectionProperties2FUCHSIA(
+                           vkEnc, VK_SUCCESS, device, collection, pProperties);
+
+    return res;
+}
 #endif
 
+uint64_t currGuestTimeNs() {
+    struct timespec ts;
+#ifdef __APPLE__
+    clock_gettime(CLOCK_REALTIME, &ts);
+#else
+    clock_gettime(CLOCK_BOOTTIME, &ts);
+#endif
+    uint64_t res = (uint64_t)(ts.tv_sec * 1000000000ULL + ts.tv_nsec);
+    return res;
+}
+
+struct FrameTracingState {
+    uint32_t frameNumber = 0;
+    bool tracingEnabled = false;
+    void onSwapBuffersSuccessful(ExtendedRCEncoderContext* rcEnc) {
+#ifdef GFXSTREAM
+        bool current = android::base::isTracingEnabled();
+        // edge trigger
+        if (current && !tracingEnabled) {
+            if (rcEnc->hasHostSideTracing()) {
+                rcEnc->rcSetTracingForPuid(rcEnc, getPuid(), 1, currGuestTimeNs());
+            }
+        }
+        if (!current && tracingEnabled) {
+            if (rcEnc->hasHostSideTracing()) {
+                rcEnc->rcSetTracingForPuid(rcEnc, getPuid(), 0, currGuestTimeNs());
+            }
+        }
+        tracingEnabled = current;
+#endif
+        ++frameNumber;
+    }
+};
+
+static FrameTracingState sFrameTracingState;
+
+static PFN_vkVoidFunction sQueueSignalReleaseImageAndroidImpl = 0;
+
+static VkResult
+QueueSignalReleaseImageANDROID(
+    VkQueue queue,
+    uint32_t waitSemaphoreCount,
+    const VkSemaphore* pWaitSemaphores,
+    VkImage image,
+    int* pNativeFenceFd)
+{
+    sFrameTracingState.onSwapBuffersSuccessful(HostConnection::get()->rcEncoder());
+    ((PFN_vkQueueSignalReleaseImageANDROID)sQueueSignalReleaseImageAndroidImpl)(queue, waitSemaphoreCount, pWaitSemaphores, image, pNativeFenceFd);
+    return VK_SUCCESS;
+}
+
 static PFN_vkVoidFunction GetDeviceProcAddr(VkDevice device, const char* name) {
     AEMU_SCOPED_TRACE("goldfish_vulkan::GetDeviceProcAddr");
 
@@ -605,10 +757,27 @@
     if (!strcmp(name, "vkSetBufferCollectionConstraintsFUCHSIA")) {
         return (PFN_vkVoidFunction)SetBufferCollectionConstraintsFUCHSIA;
     }
+    if (!strcmp(name, "vkSetBufferCollectionImageConstraintsFUCHSIA")) {
+        return (PFN_vkVoidFunction)SetBufferCollectionImageConstraintsFUCHSIA;
+    }
+    if (!strcmp(name, "vkSetBufferCollectionBufferConstraintsFUCHSIA")) {
+        return (PFN_vkVoidFunction)SetBufferCollectionBufferConstraintsFUCHSIA;
+    }
     if (!strcmp(name, "vkGetBufferCollectionPropertiesFUCHSIA")) {
         return (PFN_vkVoidFunction)GetBufferCollectionPropertiesFUCHSIA;
     }
+    if (!strcmp(name, "vkGetBufferCollectionProperties2FUCHSIA")) {
+        return (PFN_vkVoidFunction)GetBufferCollectionProperties2FUCHSIA;
+    }
 #endif
+    if (!strcmp(name, "vkQueueSignalReleaseImageANDROID")) {
+        if (!sQueueSignalReleaseImageAndroidImpl) {
+            sQueueSignalReleaseImageAndroidImpl =
+                (PFN_vkVoidFunction)(
+                    goldfish_vk::goldfish_vulkan_get_device_proc_address(device, "vkQueueSignalReleaseImageANDROID"));
+        }
+        return (PFN_vkVoidFunction)QueueSignalReleaseImageANDROID;
+    }
     if (!strcmp(name, "vkGetDeviceProcAddr")) {
         return (PFN_vkVoidFunction)(GetDeviceProcAddr);
     }
@@ -634,6 +803,14 @@
     if (!strcmp(name, "vkGetDeviceProcAddr")) {
         return (PFN_vkVoidFunction)(GetDeviceProcAddr);
     }
+    if (!strcmp(name, "vkQueueSignalReleaseImageANDROID")) {
+        if (!sQueueSignalReleaseImageAndroidImpl) {
+            sQueueSignalReleaseImageAndroidImpl =
+                (PFN_vkVoidFunction)(
+                    goldfish_vk::goldfish_vulkan_get_instance_proc_address(instance, "vkQueueSignalReleaseImageANDROID"));
+        }
+        return (PFN_vkVoidFunction)QueueSignalReleaseImageANDROID;
+    }
     return (PFN_vkVoidFunction)(goldfish_vk::goldfish_vulkan_get_instance_proc_address(instance, name));
 }
 
@@ -672,6 +849,7 @@
 public:
     VulkanDevice() : mHostSupportsGoldfish(IsAccessible(QEMU_PIPE_PATH)) {
         InitLogger();
+        InitTraceProvider();
         goldfish_vk::ResourceTracker::get();
     }
 
@@ -687,7 +865,7 @@
         if (status != ZX_OK)
             return false;
 
-        zxio_node_attr_t attr;
+        zxio_node_attributes_t attr;
         status = zxio_attr_get(&io_storage.io, &attr);
         zxio_close(&io_storage.io);
         if (status != ZX_OK)
@@ -709,33 +887,47 @@
     }
 
 private:
+    void InitTraceProvider();
+
+    TraceProviderFuchsia mTraceProvider;
     const bool mHostSupportsGoldfish;
 };
 
 void VulkanDevice::InitLogger() {
-   zx_handle_t channel = GetConnectToServiceFunction()("/svc/fuchsia.logger.LogSink");
-   if (channel == ZX_HANDLE_INVALID)
-      return;
+  auto log_service = ([] () -> std::optional<zx::socket> {
+    fidl::ClientEnd<fuchsia_logger::LogSink> channel{zx::channel{
+      GetConnectToServiceFunction()("/svc/fuchsia.logger.LogSink")}};
+    if (!channel.is_valid())
+      return std::nullopt;
 
-  zx::socket local_socket, remote_socket;
-  zx_status_t status = zx::socket::create(ZX_SOCKET_DATAGRAM, &local_socket, &remote_socket);
-  if (status != ZX_OK)
-    return;
+    zx::socket local_socket, remote_socket;
+    zx_status_t status = zx::socket::create(ZX_SOCKET_DATAGRAM, &local_socket, &remote_socket);
+    if (status != ZX_OK)
+      return std::nullopt;
 
-  auto result = llcpp::fuchsia::logger::LogSink::Call::Connect(
-      zx::unowned_channel(channel), std::move(remote_socket));
-  zx_handle_close(channel);
+    auto result = WireCall(channel).Connect(std::move(remote_socket));
 
-  if (result.status() != ZX_OK)
+    if (!result.ok())
+      return std::nullopt;
+
+    return local_socket;
+  })();
+  if (!log_service)
     return;
 
   fx_logger_config_t config = {.min_severity = FX_LOG_INFO,
                                .console_fd = -1,
-                               .log_service_channel = local_socket.release(),
+                               .log_service_channel = log_service->release(),
                                .tags = nullptr,
                                .num_tags = 0};
 
-  fx_log_init_with_config(&config);
+  fx_log_reconfigure(&config);
+}
+
+void VulkanDevice::InitTraceProvider() {
+    if (!mTraceProvider.Initialize()) {
+        ALOGE("Trace provider failed to initialize");
+    }
 }
 
 extern "C" __attribute__((visibility("default"))) PFN_vkVoidFunction
@@ -749,11 +941,11 @@
     return VK_SUCCESS;
 }
 
-typedef VkResult(VKAPI_PTR *PFN_vkConnectToServiceAddr)(const char *pName, uint32_t handle);
+typedef VkResult(VKAPI_PTR *PFN_vkOpenInNamespaceAddr)(const char *pName, uint32_t handle);
 
 namespace {
 
-PFN_vkConnectToServiceAddr g_vulkan_connector;
+PFN_vkOpenInNamespaceAddr g_vulkan_connector;
 
 zx_handle_t LocalConnectToServiceFunction(const char* pName) {
     zx::channel remote_endpoint, local_endpoint;
@@ -772,7 +964,7 @@
 }
 
 extern "C" __attribute__((visibility("default"))) void
-vk_icdInitializeConnectToServiceCallback(PFN_vkConnectToServiceAddr callback) {
+vk_icdInitializeOpenInNamespaceCallback(PFN_vkOpenInNamespaceAddr callback) {
     g_vulkan_connector = callback;
     SetConnectToServiceFunction(&LocalConnectToServiceFunction);
 }
diff --git a/system/vulkan_enc/Android.mk b/system/vulkan_enc/Android.mk
index e49aac4..3fa99df 100644
--- a/system/vulkan_enc/Android.mk
+++ b/system/vulkan_enc/Android.mk
@@ -37,7 +37,8 @@
 LOCAL_CFLAGS += \
     -DLOG_TAG=\"goldfish_vulkan\" \
     -DVK_ANDROID_native_buffer \
-    -DVK_GOOGLE_address_space \
+    -DVK_EXT_device_memory_report \
+    -DVK_GOOGLE_gfxstream \
     -Wno-missing-field-initializers \
     -Werror \
     -fstrict-aliasing \
@@ -45,6 +46,8 @@
     -DVK_NO_PROTOTYPES \
 
 LOCAL_SRC_FILES := AndroidHardwareBuffer.cpp \
+    CommandBufferStagingStream.cpp \
+    DescriptorSetVirtualization.cpp \
     HostVisibleMemoryVirtualization.cpp \
     Resources.cpp \
     Validation.cpp \
@@ -54,9 +57,12 @@
     VkEncoder.cpp \
     goldfish_vk_extension_structs_guest.cpp \
     goldfish_vk_marshaling_guest.cpp \
+    goldfish_vk_reserved_marshaling_guest.cpp \
     goldfish_vk_deepcopy_guest.cpp \
+    goldfish_vk_counting_guest.cpp \
     goldfish_vk_handlemap_guest.cpp \
     goldfish_vk_transform_guest.cpp \
+	func_table.cpp \
 
 ifeq (true,$(GOLDFISH_OPENGL_BUILD_FOR_HOST))
 LOCAL_CFLAGS += -D__ANDROID_API__=28
diff --git a/system/vulkan_enc/CMakeLists.txt b/system/vulkan_enc/CMakeLists.txt
index 5b97015..de6380e 100644
--- a/system/vulkan_enc/CMakeLists.txt
+++ b/system/vulkan_enc/CMakeLists.txt
@@ -1,10 +1,10 @@
 # This is an autogenerated file! Do not edit!
 # instead run make from .../device/generic/goldfish-opengl
 # which will re-generate this file.
-android_validate_sha256("${GOLDFISH_DEVICE_ROOT}/system/vulkan_enc/Android.mk" "103f3f10c8af73d3d5c4263c4faa5bb70ebdbfbc9ed6f068a338d57344e2aa45")
-set(vulkan_enc_src AndroidHardwareBuffer.cpp HostVisibleMemoryVirtualization.cpp Resources.cpp Validation.cpp VulkanStreamGuest.cpp VulkanHandleMapping.cpp ResourceTracker.cpp VkEncoder.cpp goldfish_vk_extension_structs_guest.cpp goldfish_vk_marshaling_guest.cpp goldfish_vk_deepcopy_guest.cpp goldfish_vk_handlemap_guest.cpp goldfish_vk_transform_guest.cpp)
-android_add_library(TARGET vulkan_enc SHARED LICENSE Apache-2.0 SRC AndroidHardwareBuffer.cpp HostVisibleMemoryVirtualization.cpp Resources.cpp Validation.cpp VulkanStreamGuest.cpp VulkanHandleMapping.cpp ResourceTracker.cpp VkEncoder.cpp goldfish_vk_extension_structs_guest.cpp goldfish_vk_marshaling_guest.cpp goldfish_vk_deepcopy_guest.cpp goldfish_vk_handlemap_guest.cpp goldfish_vk_transform_guest.cpp)
-target_include_directories(vulkan_enc PRIVATE ${GOLDFISH_DEVICE_ROOT}/shared/GoldfishAddressSpace/include ${GOLDFISH_DEVICE_ROOT}/android-emu ${GOLDFISH_DEVICE_ROOT}/system/renderControl_enc ${GOLDFISH_DEVICE_ROOT}/shared/OpenglCodecCommon ${GOLDFISH_DEVICE_ROOT}/shared/qemupipe/include-types ${GOLDFISH_DEVICE_ROOT}/shared/qemupipe/include ${GOLDFISH_DEVICE_ROOT}/system/vulkan_enc ${GOLDFISH_DEVICE_ROOT}/./host/include/libOpenglRender ${GOLDFISH_DEVICE_ROOT}/./system/include ${GOLDFISH_DEVICE_ROOT}/./../../../external/qemu/android/android-emugl/guest ${GOLDFISH_DEVICE_ROOT}/./../../../external/qemu/android/android-emugl/host/include ${GOLDFISH_DEVICE_ROOT}/./../../../external/qemu/android/android-emugl/host/include/vulkan)
-target_compile_definitions(vulkan_enc PRIVATE "-DWITH_GLES2" "-DPLATFORM_SDK_VERSION=29" "-DGOLDFISH_HIDL_GRALLOC" "-DEMULATOR_OPENGL_POST_O=1" "-DHOST_BUILD" "-DANDROID" "-DGL_GLEXT_PROTOTYPES" "-DPAGE_SIZE=4096" "-DGOLDFISH_VULKAN" "-DLOG_TAG=\"goldfish_vulkan\"" "-DVK_ANDROID_native_buffer" "-DVK_GOOGLE_address_space" "-DVK_USE_PLATFORM_ANDROID_KHR" "-DVK_NO_PROTOTYPES" "-D__ANDROID_API__=28")
+android_validate_sha256("${GOLDFISH_DEVICE_ROOT}/system/vulkan_enc/Android.mk" "b1d2aa0910307fc5084d14e06fee02a540235b844672aeefa772327651bff9b6")
+set(vulkan_enc_src AndroidHardwareBuffer.cpp CommandBufferStagingStream.cpp DescriptorSetVirtualization.cpp HostVisibleMemoryVirtualization.cpp Resources.cpp Validation.cpp VulkanStreamGuest.cpp VulkanHandleMapping.cpp ResourceTracker.cpp VkEncoder.cpp goldfish_vk_extension_structs_guest.cpp goldfish_vk_marshaling_guest.cpp goldfish_vk_reserved_marshaling_guest.cpp goldfish_vk_deepcopy_guest.cpp goldfish_vk_counting_guest.cpp goldfish_vk_handlemap_guest.cpp goldfish_vk_transform_guest.cpp func_table.cpp)
+android_add_library(TARGET vulkan_enc SHARED LICENSE Apache-2.0 SRC AndroidHardwareBuffer.cpp CommandBufferStagingStream.cpp DescriptorSetVirtualization.cpp HostVisibleMemoryVirtualization.cpp Resources.cpp Validation.cpp VulkanStreamGuest.cpp VulkanHandleMapping.cpp ResourceTracker.cpp VkEncoder.cpp goldfish_vk_extension_structs_guest.cpp goldfish_vk_marshaling_guest.cpp goldfish_vk_reserved_marshaling_guest.cpp goldfish_vk_deepcopy_guest.cpp goldfish_vk_counting_guest.cpp goldfish_vk_handlemap_guest.cpp goldfish_vk_transform_guest.cpp func_table.cpp)
+target_include_directories(vulkan_enc PRIVATE ${GOLDFISH_DEVICE_ROOT}/shared/GoldfishAddressSpace/include ${GOLDFISH_DEVICE_ROOT}/system/renderControl_enc ${GOLDFISH_DEVICE_ROOT}/shared/OpenglCodecCommon ${GOLDFISH_DEVICE_ROOT}/android-emu ${GOLDFISH_DEVICE_ROOT}/shared/qemupipe/include-types ${GOLDFISH_DEVICE_ROOT}/shared/qemupipe/include ${GOLDFISH_DEVICE_ROOT}/system/vulkan_enc ${GOLDFISH_DEVICE_ROOT}/./host/include/libOpenglRender ${GOLDFISH_DEVICE_ROOT}/./system/include ${GOLDFISH_DEVICE_ROOT}/./../../../external/qemu/android/android-emugl/guest ${GOLDFISH_DEVICE_ROOT}/./../../../external/qemu/android/android-emugl/host/include ${GOLDFISH_DEVICE_ROOT}/./../../../external/qemu/android/android-emugl/host/include/vulkan)
+target_compile_definitions(vulkan_enc PRIVATE "-DWITH_GLES2" "-DPLATFORM_SDK_VERSION=29" "-DGOLDFISH_HIDL_GRALLOC" "-DEMULATOR_OPENGL_POST_O=1" "-DHOST_BUILD" "-DANDROID" "-DGL_GLEXT_PROTOTYPES" "-DPAGE_SIZE=4096" "-DGFXSTREAM" "-DLOG_TAG=\"goldfish_vulkan\"" "-DVK_ANDROID_native_buffer" "-DVK_EXT_device_memory_report" "-DVK_GOOGLE_gfxstream" "-DVK_USE_PLATFORM_ANDROID_KHR" "-DVK_NO_PROTOTYPES" "-D__ANDROID_API__=28")
 target_compile_options(vulkan_enc PRIVATE "-fvisibility=default" "-Wno-unused-parameter" "-Wno-missing-field-initializers" "-Werror" "-fstrict-aliasing")
-target_link_libraries(vulkan_enc PRIVATE gui log android-emu-shared androidemu cutils utils _renderControl_enc OpenglCodecCommon_host PRIVATE GoldfishAddressSpace_host qemupipe_host)
\ No newline at end of file
+target_link_libraries(vulkan_enc PRIVATE gui log android-emu-shared _renderControl_enc OpenglCodecCommon_host cutils utils androidemu PRIVATE GoldfishAddressSpace_host qemupipe_host)
\ No newline at end of file
diff --git a/system/vulkan_enc/CommandBufferStagingStream.cpp b/system/vulkan_enc/CommandBufferStagingStream.cpp
new file mode 100644
index 0000000..75d8a39
--- /dev/null
+++ b/system/vulkan_enc/CommandBufferStagingStream.cpp
@@ -0,0 +1,114 @@
+/*
+* Copyright (C) 2021 The Android Open Source Project
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+#include "CommandBufferStagingStream.h"
+
+#if PLATFORM_SDK_VERSION < 26
+#include <cutils/log.h>
+#else
+#include <log/log.h>
+#endif
+#include <cutils/properties.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+
+static const size_t kReadSize = 512 * 1024;
+static const size_t kWriteOffset = kReadSize;
+
+CommandBufferStagingStream::CommandBufferStagingStream() :
+    IOStream(1048576), m_buf(nullptr), m_size(1048576), m_writePos(0) { }
+
+CommandBufferStagingStream::~CommandBufferStagingStream() { flush(); if (m_buf) free(m_buf); }
+
+size_t CommandBufferStagingStream::idealAllocSize(size_t len) {
+    if (len > 1048576) return len;
+    return 1048576;
+}
+
+void *CommandBufferStagingStream::allocBuffer(size_t minSize) {
+    size_t allocSize =
+        (1048576 < minSize ? minSize : 1048576);
+
+    // Initial case: blank
+    if (!m_buf) {
+        m_buf = (unsigned char *)malloc(allocSize);
+        m_size = allocSize;
+        return (void*)m_buf;
+    }
+
+    // Calculate remaining
+    size_t remaining = m_size - m_writePos;
+
+    if (remaining < allocSize) {
+        size_t newAllocSize = m_size * 2 + allocSize;
+        unsigned char *p = (unsigned char *)realloc(m_buf, newAllocSize);
+        m_buf = p;
+        m_size = newAllocSize;
+        return (void*)(m_buf + m_writePos);
+
+    }
+
+    return (void*)(m_buf + m_writePos);
+};
+
+int CommandBufferStagingStream::commitBuffer(size_t size)
+{
+    m_writePos += size;
+    return 0;
+}
+
+const unsigned char *CommandBufferStagingStream::readFully(void*, size_t) {
+    // Not supported
+    ALOGE("CommandBufferStagingStream::%s: Fatal: not supported\n", __func__);
+    abort();
+    return nullptr;
+}
+
+const unsigned char *CommandBufferStagingStream::read(void*, size_t*) {
+    // Not supported
+    ALOGE("CommandBufferStagingStream::%s: Fatal: not supported\n", __func__);
+    abort();
+    return nullptr;
+}
+
+int CommandBufferStagingStream::writeFully(const void*, size_t)
+{
+    // Not supported
+    ALOGE("CommandBufferStagingStream::%s: Fatal: not supported\n", __func__);
+    abort();
+    return 0;
+}
+
+const unsigned char *CommandBufferStagingStream::commitBufferAndReadFully(
+    size_t, void *, size_t) {
+
+    // Not supported
+    ALOGE("CommandBufferStagingStream::%s: Fatal: not supported\n", __func__);
+    abort();
+    return nullptr;
+}
+
+void CommandBufferStagingStream::getWritten(unsigned char** bufOut, size_t* sizeOut) {
+    *bufOut = m_buf;
+    *sizeOut = m_writePos;
+}
+
+void CommandBufferStagingStream::reset() {
+    m_writePos = 0;
+    IOStream::rewind();
+}
diff --git a/system/vulkan_enc/CommandBufferStagingStream.h b/system/vulkan_enc/CommandBufferStagingStream.h
new file mode 100644
index 0000000..3856b4c
--- /dev/null
+++ b/system/vulkan_enc/CommandBufferStagingStream.h
@@ -0,0 +1,43 @@
+/*
+* Copyright (C) 2021 The Android Open Source Project
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+#ifndef __COMMAND_BUFFER_STAGING_STREAM_H
+#define __COMMAND_BUFFER_STAGING_STREAM_H
+
+#include "IOStream.h"
+
+class CommandBufferStagingStream : public IOStream {
+public:
+    explicit CommandBufferStagingStream();
+    ~CommandBufferStagingStream();
+
+    virtual size_t idealAllocSize(size_t len);
+    virtual void *allocBuffer(size_t minSize);
+    virtual int commitBuffer(size_t size);
+    virtual const unsigned char *readFully( void *buf, size_t len);
+    virtual const unsigned char *read( void *buf, size_t *inout_len);
+    virtual int writeFully(const void *buf, size_t len);
+    virtual const unsigned char *commitBufferAndReadFully(size_t size, void *buf, size_t len);
+
+    void getWritten(unsigned char** bufOut, size_t* sizeOut);
+    void reset();
+
+private:
+    unsigned char* m_buf;
+    size_t m_size;
+    uint32_t m_writePos;
+};
+
+#endif
diff --git a/system/vulkan_enc/DescriptorSetVirtualization.cpp b/system/vulkan_enc/DescriptorSetVirtualization.cpp
new file mode 100644
index 0000000..d32f9e4
--- /dev/null
+++ b/system/vulkan_enc/DescriptorSetVirtualization.cpp
@@ -0,0 +1,501 @@
+// Copyright (C) 2021 The Android Open Source Project
+// Copyright (C) 2021 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#include "DescriptorSetVirtualization.h"
+#include "Resources.h"
+
+namespace goldfish_vk {
+
+void clearReifiedDescriptorSet(ReifiedDescriptorSet* set) {
+    set->pool = VK_NULL_HANDLE;
+    set->setLayout = VK_NULL_HANDLE;
+    set->poolId = -1;
+    set->allocationPending = false;
+    set->allWrites.clear();
+    set->pendingWriteArrayRanges.clear();
+}
+
+void initDescriptorWriteTable(const std::vector<VkDescriptorSetLayoutBinding>& layoutBindings, DescriptorWriteTable& table) {
+    uint32_t highestBindingNumber = 0;
+
+    for (uint32_t i = 0; i < layoutBindings.size(); ++i) {
+        if (layoutBindings[i].binding > highestBindingNumber) {
+            highestBindingNumber = layoutBindings[i].binding;
+        }
+    }
+
+    std::vector<uint32_t> countsEachBinding(highestBindingNumber + 1, 0);
+
+    for (uint32_t i = 0; i < layoutBindings.size(); ++i) {
+        countsEachBinding[layoutBindings[i].binding] =
+            layoutBindings[i].descriptorCount;
+    }
+
+    table.resize(countsEachBinding.size());
+
+    for (uint32_t i = 0; i < table.size(); ++i) {
+        table[i].resize(countsEachBinding[i]);
+
+        for (uint32_t j = 0; j < countsEachBinding[i]; ++j) {
+            table[i][j].type = DescriptorWriteType::Empty;
+            table[i][j].dstArrayElement = 0;
+        }
+    }
+}
+
+static void initializeReifiedDescriptorSet(VkDescriptorPool pool, VkDescriptorSetLayout setLayout, ReifiedDescriptorSet* set) {
+
+    set->pendingWriteArrayRanges.clear();
+
+    const auto& layoutInfo = *(as_goldfish_VkDescriptorSetLayout(setLayout)->layoutInfo);
+
+    initDescriptorWriteTable(layoutInfo.bindings, set->allWrites);
+
+    for (size_t i = 0; i < layoutInfo.bindings.size(); ++i) {
+        // Bindings can be sparsely defined
+        const auto& binding = layoutInfo.bindings[i];
+        uint32_t bindingIndex = binding.binding;
+        if (set->bindingIsImmutableSampler.size() <= bindingIndex) {
+            set->bindingIsImmutableSampler.resize(bindingIndex + 1, false);
+        }
+        set->bindingIsImmutableSampler[bindingIndex] =
+            binding.descriptorCount > 0 &&
+            (binding.descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER ||
+             binding.descriptorType ==
+             VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) &&
+            binding.pImmutableSamplers;
+    }
+
+    set->pool = pool;
+    set->setLayout = setLayout;
+    set->allocationPending = true;
+    set->bindings = layoutInfo.bindings;
+}
+
+bool isDescriptorTypeImageInfo(VkDescriptorType descType) {
+    return (descType == VK_DESCRIPTOR_TYPE_SAMPLER) ||
+           (descType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) ||
+           (descType == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE) ||
+           (descType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) ||
+           (descType == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT);
+}
+
+bool isDescriptorTypeBufferInfo(VkDescriptorType descType) {
+    return (descType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) ||
+           (descType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) ||
+           (descType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER) ||
+           (descType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC);
+}
+
+bool isDescriptorTypeBufferView(VkDescriptorType descType) {
+    return (descType == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) ||
+           (descType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER);
+}
+
+bool isDescriptorTypeInlineUniformBlock(VkDescriptorType descType) {
+    return descType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT;
+}
+
+bool isDescriptorTypeAccelerationStructure(VkDescriptorType descType) {
+    return descType == VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR;
+}
+
+void doEmulatedDescriptorWrite(const VkWriteDescriptorSet* write, ReifiedDescriptorSet* toWrite) {
+    VkDescriptorType descType = write->descriptorType;
+    uint32_t dstBinding = write->dstBinding;
+    uint32_t dstArrayElement = write->dstArrayElement;
+    uint32_t descriptorCount = write->descriptorCount;
+
+    DescriptorWriteTable& table = toWrite->allWrites;
+
+    uint32_t arrOffset = dstArrayElement;
+
+    if (isDescriptorTypeImageInfo(descType)) {
+        for (uint32_t i = 0; i < descriptorCount; ++i, ++arrOffset) {
+            if (arrOffset >= table[dstBinding].size()) {
+                ++dstBinding;
+                arrOffset = 0;
+            }
+            auto& entry = table[dstBinding][arrOffset];
+            entry.imageInfo = write->pImageInfo[i];
+            entry.type = DescriptorWriteType::ImageInfo;
+            entry.descriptorType = descType;
+        }
+    } else if (isDescriptorTypeBufferInfo(descType)) {
+        for (uint32_t i = 0; i < descriptorCount; ++i, ++arrOffset) {
+            if (arrOffset >= table[dstBinding].size()) {
+                ++dstBinding;
+                arrOffset = 0;
+            }
+            auto& entry = table[dstBinding][arrOffset];
+            entry.bufferInfo = write->pBufferInfo[i];
+            entry.type = DescriptorWriteType::BufferInfo;
+            entry.descriptorType = descType;
+        }
+    } else if (isDescriptorTypeBufferView(descType)) {
+        for (uint32_t i = 0; i < descriptorCount; ++i, ++arrOffset) {
+            if (arrOffset >= table[dstBinding].size()) {
+                ++dstBinding;
+                arrOffset = 0;
+            }
+            auto& entry = table[dstBinding][arrOffset];
+            entry.bufferView = write->pTexelBufferView[i];
+            entry.type = DescriptorWriteType::BufferView;
+            entry.descriptorType = descType;
+        }
+    } else if (isDescriptorTypeInlineUniformBlock(descType)) {
+        // TODO
+        // Look for pNext inline uniform block
+        // Append new DescriptorWrite entry that holds the buffer
+    } else if (isDescriptorTypeAccelerationStructure(descType)) {
+        // TODO
+        // Look for pNext acceleration structure
+        // Append new DescriptorWrite entry that holds it
+    } else {
+        return;
+    }
+}
+
+void doEmulatedDescriptorCopy(const VkCopyDescriptorSet* copy, const ReifiedDescriptorSet* src, ReifiedDescriptorSet* dst) {
+    const DescriptorWriteTable& srcTable = src->allWrites;
+    DescriptorWriteTable& dstTable = dst->allWrites;
+
+    // src/dst may be the same descriptor set, so we need to create a temporary array for that case.
+    // (TODO: Maybe just notice the pointers are the same? can aliasing in any other way happen?)
+
+    std::vector<DescriptorWrite> toCopy;
+    uint32_t currBinding = copy->srcBinding;
+    uint32_t arrOffset = copy->srcArrayElement;
+    for (uint32_t i = 0; i < copy->descriptorCount; ++i, ++arrOffset) {
+        if (arrOffset >= srcTable[currBinding].size()) {
+            ++currBinding;
+            arrOffset = 0;
+        }
+        toCopy.push_back(srcTable[currBinding][arrOffset]);
+    }
+
+    currBinding = copy->dstBinding;
+    arrOffset = copy->dstArrayElement;
+    for (uint32_t i = 0; i < copy->descriptorCount; ++i, ++arrOffset) {
+        if (arrOffset >= dstTable[currBinding].size()) {
+            ++currBinding;
+            arrOffset = 0;
+        }
+        dstTable[currBinding][arrOffset] = toCopy[i];
+    }
+}
+
+void doEmulatedDescriptorImageInfoWriteFromTemplate(
+    VkDescriptorType descType,
+    uint32_t binding,
+    uint32_t dstArrayElement,
+    uint32_t count,
+    const VkDescriptorImageInfo* imageInfos,
+    ReifiedDescriptorSet* set) {
+
+    DescriptorWriteTable& table = set->allWrites;
+
+    uint32_t currBinding = binding;
+    uint32_t arrOffset = dstArrayElement;
+
+    for (uint32_t i = 0; i < count; ++i, ++arrOffset) {
+        if (arrOffset >= table[currBinding].size()) {
+            ++currBinding;
+            arrOffset = 0;
+        }
+        auto& entry = table[currBinding][arrOffset];
+        entry.imageInfo = imageInfos[i];
+        entry.type = DescriptorWriteType::ImageInfo;
+        entry.descriptorType = descType;
+    }
+}
+
+void doEmulatedDescriptorBufferInfoWriteFromTemplate(
+    VkDescriptorType descType,
+    uint32_t binding,
+    uint32_t dstArrayElement,
+    uint32_t count,
+    const VkDescriptorBufferInfo* bufferInfos,
+    ReifiedDescriptorSet* set) {
+
+    DescriptorWriteTable& table = set->allWrites;
+
+    uint32_t currBinding = binding;
+    uint32_t arrOffset = dstArrayElement;
+
+    for (uint32_t i = 0; i < count; ++i, ++arrOffset) {
+        if (arrOffset >= table[currBinding].size()) {
+            ++currBinding;
+            arrOffset = 0;
+        }
+        auto& entry = table[currBinding][dstArrayElement + i];
+        entry.bufferInfo = bufferInfos[i];
+        entry.type = DescriptorWriteType::BufferInfo;
+        entry.descriptorType = descType;
+    }
+}
+
+void doEmulatedDescriptorBufferViewWriteFromTemplate(
+    VkDescriptorType descType,
+    uint32_t binding,
+    uint32_t dstArrayElement,
+    uint32_t count,
+    const VkBufferView* bufferViews,
+    ReifiedDescriptorSet* set) {
+
+    DescriptorWriteTable& table = set->allWrites;
+
+    uint32_t currBinding = binding;
+    uint32_t arrOffset = dstArrayElement;
+
+    for (uint32_t i = 0; i < count; ++i, ++arrOffset) {
+        if (arrOffset >= table[currBinding].size()) {
+            ++currBinding;
+            arrOffset = 0;
+        }
+        auto& entry = table[currBinding][dstArrayElement + i];
+        entry.bufferView = bufferViews[i];
+        entry.type = DescriptorWriteType::BufferView;
+        entry.descriptorType = descType;
+    }
+}
+
+static bool isBindingFeasibleForAlloc(
+    const DescriptorPoolAllocationInfo::DescriptorCountInfo& countInfo,
+    const VkDescriptorSetLayoutBinding& binding) {
+
+    if (binding.descriptorCount && (countInfo.type != binding.descriptorType)) {
+        return false;
+    }
+
+    uint32_t availDescriptorCount =
+        countInfo.descriptorCount - countInfo.used;
+
+    if (availDescriptorCount < binding.descriptorCount) {
+        ALOGV("%s: Ran out of descriptors of type 0x%x. "
+              "Wanted %u from layout but "
+              "we only have %u free (total in pool: %u)\n", __func__,
+              binding.descriptorType,
+              binding.descriptorCount,
+              countInfo.descriptorCount - countInfo.used,
+              countInfo.descriptorCount);
+        return false;
+    }
+
+    return true;
+}
+
+static bool isBindingFeasibleForFree(
+    const DescriptorPoolAllocationInfo::DescriptorCountInfo& countInfo,
+    const VkDescriptorSetLayoutBinding& binding) {
+
+    if (countInfo.type != binding.descriptorType) return false;
+    if (countInfo.used < binding.descriptorCount) {
+        ALOGV("%s: Was a descriptor set double freed? "
+              "Ran out of descriptors of type 0x%x. "
+              "Wanted to free %u from layout but "
+              "we only have %u used (total in pool: %u)\n", __func__,
+              binding.descriptorType,
+              binding.descriptorCount,
+              countInfo.used,
+              countInfo.descriptorCount);
+        return false;
+    }
+    return true;
+}
+
+static void allocBindingFeasible(
+    const VkDescriptorSetLayoutBinding& binding,
+    DescriptorPoolAllocationInfo::DescriptorCountInfo& poolState) {
+    poolState.used += binding.descriptorCount;
+}
+
+static void freeBindingFeasible(
+    const VkDescriptorSetLayoutBinding& binding,
+    DescriptorPoolAllocationInfo::DescriptorCountInfo& poolState) {
+    poolState.used -= binding.descriptorCount;
+}
+
+static VkResult validateDescriptorSetAllocation(const VkDescriptorSetAllocateInfo* pAllocateInfo) {
+    VkDescriptorPool pool = pAllocateInfo->descriptorPool;
+    DescriptorPoolAllocationInfo* poolInfo = as_goldfish_VkDescriptorPool(pool)->allocInfo;
+
+    // Check the number of sets available.
+    auto setsAvailable = poolInfo->maxSets - poolInfo->usedSets;
+
+    if (setsAvailable < pAllocateInfo->descriptorSetCount) {
+        ALOGV("%s: Error: VkDescriptorSetAllocateInfo wants %u sets "
+              "but we only have %u available. "
+              "Bailing with VK_ERROR_OUT_OF_POOL_MEMORY.\n", __func__,
+              pAllocateInfo->descriptorSetCount,
+              setsAvailable);
+        return VK_ERROR_OUT_OF_POOL_MEMORY;
+    }
+
+    // Perform simulated allocation and error out with
+    // VK_ERROR_OUT_OF_POOL_MEMORY if it fails.
+    std::vector<DescriptorPoolAllocationInfo::DescriptorCountInfo> descriptorCountCopy =
+        poolInfo->descriptorCountInfo;
+
+    for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; ++i) {
+        if (!pAllocateInfo->pSetLayouts[i]) {
+            ALOGV("%s: Error: Tried to allocate a descriptor set with null set layout.\n", __func__);
+            return VK_ERROR_INITIALIZATION_FAILED;
+        }
+
+        auto setLayoutInfo = as_goldfish_VkDescriptorSetLayout(pAllocateInfo->pSetLayouts[i])->layoutInfo;
+        if (!setLayoutInfo) {
+            return VK_ERROR_INITIALIZATION_FAILED;
+        }
+
+        for (const auto& binding : setLayoutInfo->bindings) {
+            bool success = false;
+            for (auto& pool : descriptorCountCopy) {
+                if (!isBindingFeasibleForAlloc(pool, binding)) continue;
+
+                success = true;
+                allocBindingFeasible(binding, pool);
+                break;
+            }
+
+            if (!success) {
+                return VK_ERROR_OUT_OF_POOL_MEMORY;
+            }
+        }
+    }
+    return VK_SUCCESS;
+}
+
+void applyDescriptorSetAllocation(VkDescriptorPool pool, VkDescriptorSetLayout setLayout) {
+    auto allocInfo = as_goldfish_VkDescriptorPool(pool)->allocInfo;
+    auto setLayoutInfo = as_goldfish_VkDescriptorSetLayout(setLayout)->layoutInfo;
+
+    ++allocInfo->usedSets;
+
+    for (const auto& binding : setLayoutInfo->bindings) {
+        for (auto& countForPool : allocInfo->descriptorCountInfo) {
+            if (!isBindingFeasibleForAlloc(countForPool, binding)) continue;
+            allocBindingFeasible(binding, countForPool);
+            break;
+        }
+    }
+}
+
+void removeDescriptorSetAllocation(VkDescriptorPool pool, const std::vector<VkDescriptorSetLayoutBinding>& bindings) {
+    auto allocInfo = as_goldfish_VkDescriptorPool(pool)->allocInfo;
+
+    if (0 == allocInfo->usedSets) {
+        ALOGV("%s: Warning: a descriptor set was double freed.\n", __func__);
+        return;
+    }
+
+    --allocInfo->usedSets;
+
+    for (const auto& binding : bindings) {
+        for (auto& countForPool : allocInfo->descriptorCountInfo) {
+            if (!isBindingFeasibleForFree(countForPool, binding)) continue;
+            freeBindingFeasible(binding, countForPool);
+            break;
+        }
+    }
+}
+
+void fillDescriptorSetInfoForPool(VkDescriptorPool pool, VkDescriptorSetLayout setLayout, VkDescriptorSet set) {
+    DescriptorPoolAllocationInfo* allocInfo = as_goldfish_VkDescriptorPool(pool)->allocInfo;
+
+    ReifiedDescriptorSet* newReified = new ReifiedDescriptorSet;
+    newReified->poolId = as_goldfish_VkDescriptorSet(set)->underlying;
+    newReified->allocationPending = true;
+
+    as_goldfish_VkDescriptorSet(set)->reified = newReified;
+
+    allocInfo->allocedPoolIds.insert(newReified->poolId);
+    allocInfo->allocedSets.insert(set);
+
+    initializeReifiedDescriptorSet(pool, setLayout, newReified);
+}
+
+VkResult validateAndApplyVirtualDescriptorSetAllocation(const VkDescriptorSetAllocateInfo* pAllocateInfo, VkDescriptorSet* pSets) {
+    VkResult validateRes = validateDescriptorSetAllocation(pAllocateInfo);
+
+    if (validateRes != VK_SUCCESS) return validateRes;
+
+    for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; ++i) {
+        applyDescriptorSetAllocation(pAllocateInfo->descriptorPool, pAllocateInfo->pSetLayouts[i]);
+    }
+
+    VkDescriptorPool pool = pAllocateInfo->descriptorPool;
+    DescriptorPoolAllocationInfo* allocInfo = as_goldfish_VkDescriptorPool(pool)->allocInfo;
+
+    if (allocInfo->freePoolIds.size() < pAllocateInfo->descriptorSetCount) {
+        ALOGE("%s: FATAL: Somehow out of descriptor pool IDs. Wanted %u IDs but only have %u free IDs remaining. The count for maxSets was %u and used was %u\n", __func__,
+                pAllocateInfo->descriptorSetCount,
+                (uint32_t)allocInfo->freePoolIds.size(),
+                allocInfo->maxSets,
+                allocInfo->usedSets);
+        abort();
+    }
+
+    for (uint32_t i = 0 ; i < pAllocateInfo->descriptorSetCount; ++i) {
+        uint64_t id = allocInfo->freePoolIds.back();
+        allocInfo->freePoolIds.pop_back();
+
+        VkDescriptorSet newSet = new_from_host_VkDescriptorSet((VkDescriptorSet)id);
+        pSets[i] = newSet;
+
+        fillDescriptorSetInfoForPool(pool, pAllocateInfo->pSetLayouts[i], newSet);
+    }
+
+    return VK_SUCCESS;
+}
+
+bool removeDescriptorSetFromPool(VkDescriptorSet set, bool usePoolIds) {
+    ReifiedDescriptorSet* reified = as_goldfish_VkDescriptorSet(set)->reified;
+
+    VkDescriptorPool pool = reified->pool;
+    DescriptorPoolAllocationInfo* allocInfo = as_goldfish_VkDescriptorPool(pool)->allocInfo;
+
+    if (usePoolIds) {
+        // Look for the set's pool Id in the pool. If not found, then this wasn't really allocated, and bail.
+        if (allocInfo->allocedPoolIds.find(reified->poolId) == allocInfo->allocedPoolIds.end()) {
+            return false;
+        }
+    }
+
+    const std::vector<VkDescriptorSetLayoutBinding>& bindings = reified->bindings;
+    removeDescriptorSetAllocation(pool, bindings);
+
+    if (usePoolIds) {
+        allocInfo->freePoolIds.push_back(reified->poolId);
+        allocInfo->allocedPoolIds.erase(reified->poolId);
+    }
+    allocInfo->allocedSets.erase(set);
+
+    return true;
+}
+
+std::vector<VkDescriptorSet> clearDescriptorPool(VkDescriptorPool pool, bool usePoolIds) {
+    std::vector<VkDescriptorSet> toClear;
+    for (auto set : as_goldfish_VkDescriptorPool(pool)->allocInfo->allocedSets) {
+        toClear.push_back(set);
+    }
+
+    for (auto set: toClear) {
+        removeDescriptorSetFromPool(set, usePoolIds);
+    }
+
+    return toClear;
+}
+
+} // namespace goldfish_vk
diff --git a/system/vulkan_enc/DescriptorSetVirtualization.h b/system/vulkan_enc/DescriptorSetVirtualization.h
new file mode 100644
index 0000000..7c7b1f6
--- /dev/null
+++ b/system/vulkan_enc/DescriptorSetVirtualization.h
@@ -0,0 +1,151 @@
+// Copyright (C) 2021 The Android Open Source Project
+// Copyright (C) 2021 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#pragma once
+
+#include "android/base/containers/EntityManager.h"
+
+#include <vulkan/vulkan.h>
+
+#include <unordered_set>
+#include <vector>
+
+namespace goldfish_vk {
+
+enum DescriptorWriteType {
+    Empty = 0,
+    ImageInfo = 1,
+    BufferInfo = 2,
+    BufferView = 3,
+    InlineUniformBlock = 4,
+    AccelerationStructure = 5,
+};
+
+struct DescriptorWrite {
+    DescriptorWriteType type;
+    VkDescriptorType descriptorType;
+
+    uint32_t dstArrayElement; // Only used for inlineUniformBlock and accelerationStructure.
+
+    union {
+        VkDescriptorImageInfo imageInfo;
+        VkDescriptorBufferInfo bufferInfo;
+        VkBufferView bufferView;
+        VkWriteDescriptorSetInlineUniformBlockEXT inlineUniformBlock;
+        VkWriteDescriptorSetAccelerationStructureKHR accelerationStructure;
+    };
+
+    std::vector<uint8_t> inlineUniformBlockBuffer;
+};
+
+using DescriptorWriteTable = std::vector<std::vector<DescriptorWrite>>;
+
+struct DescriptorWriteArrayRange {
+    uint32_t begin;
+    uint32_t count;
+};
+
+using DescriptorWriteDstArrayRangeTable = std::vector<std::vector<DescriptorWriteArrayRange>>;
+
+struct ReifiedDescriptorSet {
+    VkDescriptorPool pool;
+    VkDescriptorSetLayout setLayout;
+    uint64_t poolId;
+    bool allocationPending;
+
+    // Indexed first by binding number
+    DescriptorWriteTable allWrites;
+
+    // Indexed first by binding number
+    DescriptorWriteDstArrayRangeTable pendingWriteArrayRanges;
+
+    // Indexed by binding number
+    std::vector<bool> bindingIsImmutableSampler;
+
+    // Copied from the descriptor set layout
+    std::vector<VkDescriptorSetLayoutBinding> bindings;
+};
+
+struct DescriptorPoolAllocationInfo {
+    VkDevice device;
+    VkDescriptorPoolCreateFlags createFlags;
+
+    // TODO: This should be in a single fancy data structure of some kind.
+    std::vector<uint64_t> freePoolIds;
+    std::unordered_set<uint32_t> allocedPoolIds;
+    std::unordered_set<VkDescriptorSet> allocedSets;
+    uint32_t maxSets;
+    uint32_t usedSets;
+
+    // Fine-grained tracking of descriptor counts in individual pools
+    struct DescriptorCountInfo {
+        VkDescriptorType type;
+        uint32_t descriptorCount;
+        uint32_t used;
+    };
+    std::vector<DescriptorCountInfo> descriptorCountInfo;
+};
+
+struct DescriptorSetLayoutInfo {
+    std::vector<VkDescriptorSetLayoutBinding> bindings;
+    uint32_t refcount;
+};
+
+void clearReifiedDescriptorSet(ReifiedDescriptorSet* set);
+
+void initDescriptorWriteTable(const std::vector<VkDescriptorSetLayoutBinding>& layoutBindings, DescriptorWriteTable& table);
+
+bool isDescriptorTypeImageInfo(VkDescriptorType descType);
+bool isDescriptorTypeBufferInfo(VkDescriptorType descType);
+bool isDescriptorTypeBufferView(VkDescriptorType descType);
+bool isDescriptorTypeInlineUniformBlock(VkDescriptorType descType);
+bool isDescriptorTypeAccelerationStructure(VkDescriptorType descType);
+
+void doEmulatedDescriptorWrite(const VkWriteDescriptorSet* write, ReifiedDescriptorSet* toWrite);
+void doEmulatedDescriptorCopy(const VkCopyDescriptorSet* copy, const ReifiedDescriptorSet* src, ReifiedDescriptorSet* dst);
+
+void doEmulatedDescriptorImageInfoWriteFromTemplate(
+    VkDescriptorType descType,
+    uint32_t binding,
+    uint32_t dstArrayElement,
+    uint32_t count,
+    const VkDescriptorImageInfo* imageInfos,
+    ReifiedDescriptorSet* set);
+
+void doEmulatedDescriptorBufferInfoWriteFromTemplate(
+    VkDescriptorType descType,
+    uint32_t binding,
+    uint32_t dstArrayElement,
+    uint32_t count,
+    const VkDescriptorBufferInfo* bufferInfos,
+    ReifiedDescriptorSet* set);
+
+void doEmulatedDescriptorBufferViewWriteFromTemplate(
+    VkDescriptorType descType,
+    uint32_t binding,
+    uint32_t dstArrayElement,
+    uint32_t count,
+    const VkBufferView* bufferViews,
+    ReifiedDescriptorSet* set);
+
+void applyDescriptorSetAllocation(VkDescriptorPool pool, VkDescriptorSetLayout setLayout);
+void fillDescriptorSetInfoForPool(VkDescriptorPool pool, VkDescriptorSetLayout setLayout, VkDescriptorSet set);
+VkResult validateAndApplyVirtualDescriptorSetAllocation(const VkDescriptorSetAllocateInfo* pAllocateInfo, VkDescriptorSet* pSets);
+
+// Returns false if set wasn't found in its pool.
+bool removeDescriptorSetFromPool(VkDescriptorSet set, bool usePoolIds);
+
+std::vector<VkDescriptorSet> clearDescriptorPool(VkDescriptorPool pool, bool usePoolIds);
+
+} // namespace goldfish_vk
diff --git a/system/vulkan_enc/HostVisibleMemoryVirtualization.cpp b/system/vulkan_enc/HostVisibleMemoryVirtualization.cpp
index a5b02f8..e66c951 100644
--- a/system/vulkan_enc/HostVisibleMemoryVirtualization.cpp
+++ b/system/vulkan_enc/HostVisibleMemoryVirtualization.cpp
@@ -25,6 +25,12 @@
 
 #include <set>
 
+#ifdef ANDROID
+#include <unistd.h>
+#include <errno.h>
+#endif
+#include <sys/mman.h>
+
 using android::base::guest::SubAllocator;
 
 namespace goldfish_vk {
@@ -258,10 +264,28 @@
     if (toDestroy->initResult != VK_SUCCESS) return;
     if (!toDestroy->initialized) return;
 
+#ifdef ANDROID
+    if (toDestroy->fd > 0) {
+
+        if (toDestroy->memoryAddr) {
+            int ret = munmap((void*)toDestroy->memoryAddr, toDestroy->memorySize);
+            ALOGE("%s: trying to unmap addr = 0x%" PRIx64", size = %d, ret = %d, errno = %d\n", __func__, toDestroy->memoryAddr, (int32_t)toDestroy->memorySize, ret, errno);
+        }
+
+        ALOGE("%s: trying to close fd = %d\n", __func__, toDestroy->fd);
+        int ret = close(toDestroy->fd);
+        if (ret != 0) {
+            ALOGE("%s: fail to close fd = %d, ret = %d, errno = %d\n", __func__, toDestroy->fd, ret, errno);
+        } else {
+            ALOGE("%s: successfully close fd = %d, ret = %d\n", __func__, toDestroy->fd, ret);
+        }
+    }
+#endif
+
     if (freeMemorySyncSupported) {
-        enc->vkFreeMemorySyncGOOGLE(device, toDestroy->memory, nullptr);
+        enc->vkFreeMemorySyncGOOGLE(device, toDestroy->memory, nullptr, false /* no lock */);
     } else {
-        enc->vkFreeMemory(device, toDestroy->memory, nullptr);
+        enc->vkFreeMemory(device, toDestroy->memory, nullptr, false /* no lock */);
     }
 
     delete toDestroy->subAlloc;
diff --git a/system/vulkan_enc/HostVisibleMemoryVirtualization.h b/system/vulkan_enc/HostVisibleMemoryVirtualization.h
index e190a1e..8697206 100644
--- a/system/vulkan_enc/HostVisibleMemoryVirtualization.h
+++ b/system/vulkan_enc/HostVisibleMemoryVirtualization.h
@@ -83,6 +83,9 @@
     VkDeviceSize mappedSize = 0;
     uint8_t* mappedPtr = nullptr;
     android::base::guest::SubAllocator* subAlloc = nullptr;
+    int fd = -1;
+    uint64_t memoryAddr = 0;
+    size_t memorySize = 0;
 };
 
 VkResult finishHostMemAllocInit(
diff --git a/system/vulkan_enc/ResourceTracker.cpp b/system/vulkan_enc/ResourceTracker.cpp
index 8bd4b20..2e89e28 100644
--- a/system/vulkan_enc/ResourceTracker.cpp
+++ b/system/vulkan_enc/ResourceTracker.cpp
@@ -15,6 +15,11 @@
 
 #include "ResourceTracker.h"
 
+#include "Resources.h"
+#include "CommandBufferStagingStream.h"
+#include "DescriptorSetVirtualization.h"
+
+#include "android/base/Optional.h"
 #include "android/base/threads/AndroidWorkPool.h"
 
 #include "goldfish_vk_private_defs.h"
@@ -27,7 +32,9 @@
 #include "../egl/goldfish_sync.h"
 
 typedef uint32_t zx_handle_t;
+typedef uint64_t zx_koid_t;
 #define ZX_HANDLE_INVALID         ((zx_handle_t)0)
+#define ZX_KOID_INVALID ((zx_koid_t)0)
 void zx_handle_close(zx_handle_t) { }
 void zx_event_create(int, zx_handle_t*) { }
 
@@ -45,16 +52,25 @@
 #ifdef VK_USE_PLATFORM_FUCHSIA
 
 #include <cutils/native_handle.h>
-#include <fuchsia/hardware/goldfish/cpp/fidl.h>
-#include <fuchsia/sysmem/cpp/fidl.h>
+#include <fuchsia/hardware/goldfish/llcpp/fidl.h>
+#include <fuchsia/sysmem/llcpp/fidl.h>
 #include <lib/zx/channel.h>
 #include <lib/zx/vmo.h>
+#include <zircon/errors.h>
 #include <zircon/process.h>
+#include <zircon/rights.h>
 #include <zircon/syscalls.h>
 #include <zircon/syscalls/object.h>
 
 #include "services/service_connector.h"
 
+#ifndef FUCHSIA_NO_TRACE
+#include <lib/trace/event.h>
+#endif
+
+#define GET_STATUS_SAFE(result, member) \
+    ((result).ok() ? ((result).Unwrap()->member) : ZX_OK)
+
 struct AHardwareBuffer;
 
 void AHardwareBuffer_release(AHardwareBuffer*) { }
@@ -113,6 +129,7 @@
 #include "goldfish_address_space.h"
 #include "goldfish_vk_private_defs.h"
 #include "vk_format_info.h"
+#include "vk_struct_id.h"
 #include "vk_util.h"
 
 #include <set>
@@ -160,7 +177,9 @@
 
 using android::aligned_buf_alloc;
 using android::aligned_buf_free;
+using android::base::Optional;
 using android::base::guest::AutoLock;
+using android::base::guest::RecursiveLock;
 using android::base::guest::Lock;
 using android::base::guest::WorkPool;
 
@@ -212,6 +231,54 @@
 DEFINE_RESOURCE_TRACKING_CLASS(UnwrapMapping, UNWRAP_MAPPING_IMPL_FOR_TYPE)
 DEFINE_RESOURCE_TRACKING_CLASS(DestroyMapping, DESTROY_MAPPING_IMPL_FOR_TYPE)
 
+static uint32_t* sSeqnoPtr = nullptr;
+
+// static
+uint32_t ResourceTracker::streamFeatureBits = 0;
+ResourceTracker::ThreadingCallbacks ResourceTracker::threadingCallbacks;
+
+struct StagingInfo {
+    Lock mLock;
+    std::vector<CommandBufferStagingStream*> streams;
+    std::vector<VkEncoder*> encoders;
+
+    ~StagingInfo() {
+        for (auto stream : streams) {
+            delete stream;
+        }
+
+        for (auto encoder : encoders) {
+            delete encoder;
+        }
+    }
+
+    void pushStaging(CommandBufferStagingStream* stream, VkEncoder* encoder) {
+        AutoLock lock(mLock);
+        stream->reset();
+        streams.push_back(stream);
+        encoders.push_back(encoder);
+    }
+
+    void popStaging(CommandBufferStagingStream** streamOut, VkEncoder** encoderOut) {
+        AutoLock lock(mLock);
+        CommandBufferStagingStream* stream;
+        VkEncoder* encoder;
+        if (streams.empty()) {
+            stream = new CommandBufferStagingStream;
+            encoder = new VkEncoder(stream);
+        } else {
+            stream = streams.back();
+            encoder = encoders.back();
+            streams.pop_back();
+            encoders.pop_back();
+        }
+        *streamOut = stream;
+        *encoderOut = encoder;
+    }
+};
+
+static StagingInfo sStaging;
+
 class ResourceTracker::Impl {
 public:
     Impl() = default;
@@ -246,6 +313,7 @@
         std::vector<HostMemBlocks> hostMemBlocks { VK_MAX_MEMORY_TYPES };
         uint32_t apiVersion;
         std::set<std::string> enabledExtensions;
+        std::vector<std::pair<PFN_vkDeviceMemoryReportCallbackEXT, void *>> deviceMemoryReportCallbacks;
     };
 
     struct VirtioGpuHostmemResourceInfo {
@@ -265,12 +333,16 @@
         VirtioGpuHostmemResourceInfo resInfo;
         SubAlloc subAlloc;
         AHardwareBuffer* ahw = nullptr;
+        bool imported = false;
         zx_handle_t vmoHandle = ZX_HANDLE_INVALID;
     };
 
     struct VkCommandBuffer_Info {
-        VkEncoder** lastUsedEncoderPtr = nullptr;
-        uint32_t sequenceNumber = 0;
+        uint32_t placeholder;
+    };
+
+    struct VkQueue_Info {
+        VkDevice device;
     };
 
     // custom guest-side structs for images/buffers because of AHardwareBuffer :((
@@ -299,24 +371,31 @@
         VkDeviceSize currentBackingSize = 0;
         bool baseRequirementsKnown = false;
         VkMemoryRequirements baseRequirements;
+#ifdef VK_USE_PLATFORM_FUCHSIA
+        bool isSysmemBackedMemory = false;
+#endif
     };
 
     struct VkSemaphore_Info {
         VkDevice device;
         zx_handle_t eventHandle = ZX_HANDLE_INVALID;
+        zx_koid_t eventKoid = ZX_KOID_INVALID;
         int syncFd = -1;
     };
 
     struct VkDescriptorUpdateTemplate_Info {
-        std::vector<VkDescriptorUpdateTemplateEntry> templateEntries;
+        uint32_t templateEntryCount = 0;
+        VkDescriptorUpdateTemplateEntry* templateEntries;
 
-        // Flattened versions
-        std::vector<uint32_t> imageInfoEntryIndices;
-        std::vector<uint32_t> bufferInfoEntryIndices;
-        std::vector<uint32_t> bufferViewEntryIndices;
-        std::vector<VkDescriptorImageInfo> imageInfos;
-        std::vector<VkDescriptorBufferInfo> bufferInfos;
-        std::vector<VkBufferView> bufferViews;
+        uint32_t imageInfoCount = 0;
+        uint32_t bufferInfoCount = 0;
+        uint32_t bufferViewCount = 0;
+        uint32_t* imageInfoIndices;
+        uint32_t* bufferInfoIndices;
+        uint32_t* bufferViewIndices;
+        VkDescriptorImageInfo* imageInfos;
+        VkDescriptorBufferInfo* bufferInfos;
+        VkBufferView* bufferViews;
     };
 
     struct VkFence_Info {
@@ -329,17 +408,37 @@
     };
 
     struct VkDescriptorPool_Info {
-        std::unordered_set<VkDescriptorSet> allocedSets;
-        VkDescriptorPoolCreateFlags createFlags;
+        uint32_t unused;
     };
 
     struct VkDescriptorSet_Info {
-        VkDescriptorPool pool;
-        std::vector<bool> bindingIsImmutableSampler;
+        uint32_t unused;
     };
 
     struct VkDescriptorSetLayout_Info {
-        std::vector<VkDescriptorSetLayoutBinding> bindings;
+        uint32_t unused;
+    };
+
+    struct VkCommandPool_Info {
+        uint32_t unused;
+    };
+
+    struct VkSampler_Info {
+        uint32_t unused;
+    };
+
+    struct VkBufferCollectionFUCHSIA_Info {
+#ifdef VK_USE_PLATFORM_FUCHSIA
+        android::base::Optional<
+            fuchsia_sysmem::wire::BufferCollectionConstraints>
+            constraints;
+        android::base::Optional<VkBufferCollectionProperties2FUCHSIA>
+            properties;
+
+        // the index of corresponding createInfo for each image format
+        // constraints in |constraints|.
+        std::vector<uint32_t> createInfoIndex;
+#endif  // VK_USE_PLATFORM_FUCHSIA
     };
 
 #define HANDLE_REGISTER_IMPL_IMPL(type) \
@@ -378,24 +477,53 @@
         lock.unlock();
     }
 
-    void unregister_VkCommandBuffer(VkCommandBuffer commandBuffer) {
+    void unregister_VkCommandPool(VkCommandPool pool) {
+        if (!pool) return;
+
+        clearCommandPool(pool);
+
         AutoLock lock(mLock);
+        info_VkCommandPool.erase(pool);
+    }
 
-        auto it = info_VkCommandBuffer.find(commandBuffer);
-        if (it == info_VkCommandBuffer.end()) return;
-        auto& info = it->second;
-        auto lastUsedEncoder =
-            info.lastUsedEncoderPtr ?
-            *(info.lastUsedEncoderPtr) : nullptr;
+    void unregister_VkSampler(VkSampler sampler) {
+        if (!sampler) return;
 
-        if (lastUsedEncoder) {
-            lastUsedEncoder->unregisterCleanupCallback(commandBuffer);
-            delete info.lastUsedEncoderPtr;
+        AutoLock lock(mLock);
+        info_VkSampler.erase(sampler);
+    }
+
+    void unregister_VkCommandBuffer(VkCommandBuffer commandBuffer) {
+        resetCommandBufferStagingInfo(commandBuffer, true /* also reset primaries */, true /* also clear pending descriptor sets */);
+
+        struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
+        if (!cb) return;
+        if (cb->lastUsedEncoder) { cb->lastUsedEncoder->decRef(); }
+        eraseObjects(&cb->subObjects);
+        forAllObjects(cb->poolObjects, [cb](void* commandPool) {
+            struct goldfish_VkCommandPool* p = as_goldfish_VkCommandPool((VkCommandPool)commandPool);
+            eraseObject(&p->subObjects, (void*)cb);
+        });
+        eraseObjects(&cb->poolObjects);
+
+        if (cb->userPtr) {
+            CommandBufferPendingDescriptorSets* pendingSets = (CommandBufferPendingDescriptorSets*)cb->userPtr;
+            delete pendingSets;
         }
 
+        AutoLock lock(mLock);
         info_VkCommandBuffer.erase(commandBuffer);
     }
 
+    void unregister_VkQueue(VkQueue queue) {
+        struct goldfish_VkQueue* q = as_goldfish_VkQueue(queue);
+        if (!q) return;
+        if (q->lastUsedEncoder) { q->lastUsedEncoder->decRef(); }
+
+        AutoLock lock(mLock);
+        info_VkQueue.erase(queue);
+    }
+
     void unregister_VkDeviceMemory(VkDeviceMemory mem) {
         AutoLock lock(mLock);
 
@@ -459,11 +587,37 @@
             zx_handle_close(semInfo.eventHandle);
         }
 
+#ifdef VK_USE_PLATFORM_ANDROID_KHR
+        if (semInfo.syncFd >= 0) {
+            close(semInfo.syncFd);
+        }
+#endif
+
         info_VkSemaphore.erase(sem);
     }
 
     void unregister_VkDescriptorUpdateTemplate(VkDescriptorUpdateTemplate templ) {
-        info_VkDescriptorUpdateTemplate.erase(templ);
+
+        AutoLock lock(mLock);
+        auto it = info_VkDescriptorUpdateTemplate.find(templ);
+        if (it == info_VkDescriptorUpdateTemplate.end())
+            return;
+
+        auto& info = it->second;
+        if (info.templateEntryCount) delete [] info.templateEntries;
+        if (info.imageInfoCount) {
+            delete [] info.imageInfoIndices;
+            delete [] info.imageInfos;
+        }
+        if (info.bufferInfoCount) {
+            delete [] info.bufferInfoIndices;
+            delete [] info.bufferInfos;
+        }
+        if (info.bufferViewCount) {
+            delete [] info.bufferViewIndices;
+            delete [] info.bufferViews;
+        }
+        info_VkDescriptorUpdateTemplate.erase(it);
     }
 
     void unregister_VkFence(VkFence fence) {
@@ -483,155 +637,160 @@
         info_VkFence.erase(fence);
     }
 
+#ifdef VK_USE_PLATFORM_FUCHSIA
+    void unregister_VkBufferCollectionFUCHSIA(
+        VkBufferCollectionFUCHSIA collection) {
+        AutoLock lock(mLock);
+        info_VkBufferCollectionFUCHSIA.erase(collection);
+    }
+#endif
+
     void unregister_VkDescriptorSet_locked(VkDescriptorSet set) {
-        auto it = info_VkDescriptorSet.find(set);
-        if (it == info_VkDescriptorSet.end()) return;
-
-        const auto& setInfo = it->second;
-
-        auto poolIt = info_VkDescriptorPool.find(setInfo.pool);
-
+        struct goldfish_VkDescriptorSet* ds = as_goldfish_VkDescriptorSet(set);
+        delete ds->reified;
         info_VkDescriptorSet.erase(set);
-
-        if (poolIt == info_VkDescriptorPool.end()) return;
-
-        auto& poolInfo = poolIt->second;
-        poolInfo.allocedSets.erase(set);
     }
 
     void unregister_VkDescriptorSet(VkDescriptorSet set) {
+        if (!set) return;
+
         AutoLock lock(mLock);
         unregister_VkDescriptorSet_locked(set);
     }
 
     void unregister_VkDescriptorSetLayout(VkDescriptorSetLayout setLayout) {
+        if (!setLayout) return;
+
         AutoLock lock(mLock);
+        delete as_goldfish_VkDescriptorSetLayout(setLayout)->layoutInfo;
         info_VkDescriptorSetLayout.erase(setLayout);
     }
 
-    void initDescriptorSetStateLocked(const VkDescriptorSetAllocateInfo* ci, const VkDescriptorSet* sets) {
-        auto it = info_VkDescriptorPool.find(ci->descriptorPool);
-        if (it == info_VkDescriptorPool.end()) return;
+    VkResult allocAndInitializeDescriptorSets(
+        void* context,
+        VkDevice device,
+        const VkDescriptorSetAllocateInfo* ci,
+        VkDescriptorSet* sets) {
 
-        auto& info = it->second;
-        for (uint32_t i = 0; i < ci->descriptorSetCount; ++i) {
-            info.allocedSets.insert(sets[i]);
+        if (mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate) {
+            // Using the pool ID's we collected earlier from the host
+            VkResult poolAllocResult = validateAndApplyVirtualDescriptorSetAllocation(ci, sets);
 
-            auto setIt = info_VkDescriptorSet.find(sets[i]);
-            if (setIt == info_VkDescriptorSet.end()) continue;
+            if (poolAllocResult != VK_SUCCESS) return poolAllocResult;
 
-            auto& setInfo = setIt->second;
-            setInfo.pool = ci->descriptorPool;
+            for (uint32_t i = 0; i < ci->descriptorSetCount; ++i) {
+                register_VkDescriptorSet(sets[i]);
+                VkDescriptorSetLayout setLayout = as_goldfish_VkDescriptorSet(sets[i])->reified->setLayout;
 
-            VkDescriptorSetLayout setLayout = ci->pSetLayouts[i];
-            auto layoutIt = info_VkDescriptorSetLayout.find(setLayout);
-            if (layoutIt == info_VkDescriptorSetLayout.end()) continue;
+                // Need to add ref to the set layout in the virtual case
+                // because the set itself might not be realized on host at the
+                // same time
+                struct goldfish_VkDescriptorSetLayout* dsl = as_goldfish_VkDescriptorSetLayout(setLayout);
+                ++dsl->layoutInfo->refcount;
+            }
+        } else {
+            // Pass through and use host allocation
+            VkEncoder* enc = (VkEncoder*)context;
+            VkResult allocRes = enc->vkAllocateDescriptorSets(device, ci, sets, true /* do lock */);
 
-            const auto& layoutInfo = layoutIt->second;
-            for (size_t i = 0; i < layoutInfo.bindings.size(); ++i) {
-                // Bindings can be sparsely defined
-                const auto& binding = layoutInfo.bindings[i];
-                uint32_t bindingIndex = binding.binding;
-                if (setInfo.bindingIsImmutableSampler.size() <= bindingIndex) {
-                    setInfo.bindingIsImmutableSampler.resize(bindingIndex + 1, false);
-                }
-                setInfo.bindingIsImmutableSampler[bindingIndex] =
-                    binding.descriptorCount > 0 &&
-                     (binding.descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER ||
-                      binding.descriptorType ==
-                          VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) &&
-                    binding.pImmutableSamplers;
+            if (allocRes != VK_SUCCESS) return allocRes;
+
+            for (uint32_t i = 0; i < ci->descriptorSetCount; ++i) {
+                applyDescriptorSetAllocation(ci->descriptorPool, ci->pSetLayouts[i]);
+                fillDescriptorSetInfoForPool(ci->descriptorPool, ci->pSetLayouts[i], sets[i]);
             }
         }
+
+        return VK_SUCCESS;
     }
 
-    VkWriteDescriptorSet
-    createImmutableSamplersFilteredWriteDescriptorSetLocked(
-        const VkWriteDescriptorSet* descriptorWrite,
-        std::vector<VkDescriptorImageInfo>* imageInfoArray) {
+    VkDescriptorImageInfo createImmutableSamplersFilteredImageInfo(
+        VkDescriptorType descType,
+        VkDescriptorSet descSet,
+        uint32_t binding,
+        const VkDescriptorImageInfo* pImageInfo) {
 
-        VkWriteDescriptorSet res = *descriptorWrite;
+        VkDescriptorImageInfo res = *pImageInfo;
 
-        if  (descriptorWrite->descriptorCount == 0) return res;
+        if (descType != VK_DESCRIPTOR_TYPE_SAMPLER &&
+            descType != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) return res;
 
-        if  (descriptorWrite->descriptorType != VK_DESCRIPTOR_TYPE_SAMPLER &&
-             descriptorWrite->descriptorType != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) return res;
-
-        VkDescriptorSet set = descriptorWrite->dstSet;
-        auto descSetIt = info_VkDescriptorSet.find(set);
-        if (descSetIt == info_VkDescriptorSet.end()) {
-            ALOGE("%s: error: descriptor set 0x%llx not found\n", __func__,
-                  (unsigned long long)set);
-            return res;
-        }
-
-        const auto& descInfo = descSetIt->second;
-        uint32_t binding = descriptorWrite->dstBinding;
-
-        bool immutableSampler = descInfo.bindingIsImmutableSampler[binding];
+        bool immutableSampler = as_goldfish_VkDescriptorSet(descSet)->reified->bindingIsImmutableSampler[binding];
 
         if (!immutableSampler) return res;
 
-        for (uint32_t i = 0; i < descriptorWrite->descriptorCount; ++i) {
-            VkDescriptorImageInfo imageInfo = descriptorWrite->pImageInfo[i];
-            imageInfo.sampler = 0;
-            imageInfoArray->push_back(imageInfo);
-        }
-
-        res.pImageInfo = imageInfoArray->data();
+        res.sampler = 0;
 
         return res;
     }
 
-    // Also unregisters underlying descriptor sets
-    // and deletes their guest-side wrapped handles.
-    void clearDescriptorPoolLocked(VkDescriptorPool pool) {
-        auto it = info_VkDescriptorPool.find(pool);
-        if (it == info_VkDescriptorPool.end()) return;
+    bool descriptorBindingIsImmutableSampler(
+        VkDescriptorSet dstSet,
+        uint32_t dstBinding) {
 
-        std::vector<VkDescriptorSet> toClear;
-        for (auto set : it->second.allocedSets) {
-            toClear.push_back(set);
+        return as_goldfish_VkDescriptorSet(dstSet)->reified->bindingIsImmutableSampler[dstBinding];
+    }
+
+    VkDescriptorImageInfo
+    filterNonexistentSampler(
+        const VkDescriptorImageInfo& inputInfo) {
+
+        VkSampler sampler =
+            inputInfo.sampler;
+
+        VkDescriptorImageInfo res = inputInfo;
+
+        if (sampler) {
+            auto it = info_VkSampler.find(sampler);
+            bool samplerExists = it != info_VkSampler.end();
+            if (!samplerExists) res.sampler = 0;
         }
 
+        return res;
+    }
+
+
+    void freeDescriptorSetsIfHostAllocated(VkEncoder* enc, VkDevice device, uint32_t descriptorSetCount, const VkDescriptorSet* sets) {
+        for (uint32_t i = 0; i < descriptorSetCount; ++i) {
+            struct goldfish_VkDescriptorSet* ds = as_goldfish_VkDescriptorSet(sets[i]);
+            if (ds->reified->allocationPending) {
+                unregister_VkDescriptorSet(sets[i]);
+                delete_goldfish_VkDescriptorSet(sets[i]);
+            } else {
+                enc->vkFreeDescriptorSets(device, ds->reified->pool, 1, &sets[i], false /* no lock */);
+            }
+        }
+    }
+
+    void clearDescriptorPoolAndUnregisterDescriptorSets(void* context, VkDevice device, VkDescriptorPool pool) {
+
+        std::vector<VkDescriptorSet> toClear =
+            clearDescriptorPool(pool, mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate);
+
         for (auto set : toClear) {
-            unregister_VkDescriptorSet_locked(set);
+            if (mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate) {
+                VkDescriptorSetLayout setLayout = as_goldfish_VkDescriptorSet(set)->reified->setLayout;
+                decDescriptorSetLayoutRef(context, device, setLayout, nullptr);
+            }
+            unregister_VkDescriptorSet(set);
             delete_goldfish_VkDescriptorSet(set);
         }
     }
 
     void unregister_VkDescriptorPool(VkDescriptorPool pool) {
+        if (!pool) return;
+
         AutoLock lock(mLock);
-        clearDescriptorPoolLocked(pool);
+
+        struct goldfish_VkDescriptorPool* dp = as_goldfish_VkDescriptorPool(pool);
+        delete dp->allocInfo;
+
         info_VkDescriptorPool.erase(pool);
     }
 
     bool descriptorPoolSupportsIndividualFreeLocked(VkDescriptorPool pool) {
-        auto it = info_VkDescriptorPool.find(pool);
-        if (it == info_VkDescriptorPool.end()) return false;
-
-        const auto& info = it->second;
-
-        return VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT &
-            info.createFlags;
-    }
-
-    bool descriptorSetReallyAllocedFromPoolLocked(VkDescriptorSet set, VkDescriptorPool pool) {
-        auto it = info_VkDescriptorSet.find(set);
-        if (it == info_VkDescriptorSet.end()) return false;
-
-        const auto& info = it->second;
-
-        if (pool != info.pool) return false;
-
-        auto poolIt = info_VkDescriptorPool.find(info.pool);
-        if (poolIt == info_VkDescriptorPool.end()) return false;
-
-        const auto& poolInfo = poolIt->second;
-
-        if (poolInfo.allocedSets.find(set) == poolInfo.allocedSets.end()) return false;
-
-        return true;
+        return as_goldfish_VkDescriptorPool(pool)->allocInfo->createFlags &
+            VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
     }
 
     static constexpr uint32_t kDefaultApiVersion = VK_MAKE_VERSION(1, 1, 0);
@@ -656,7 +815,8 @@
                        VkPhysicalDeviceProperties props,
                        VkPhysicalDeviceMemoryProperties memProps,
                        uint32_t enabledExtensionCount,
-                       const char* const* ppEnabledExtensionNames) {
+                       const char* const* ppEnabledExtensionNames,
+                       const void* pNext) {
         AutoLock lock(mLock);
         auto& info = info_VkDevice[device];
         info.physdev = physdev;
@@ -668,6 +828,23 @@
             &mHostVisibleMemoryVirtInfo);
         info.apiVersion = props.apiVersion;
 
+        const VkBaseInStructure *extensionCreateInfo =
+            reinterpret_cast<const VkBaseInStructure *>(pNext);
+        while(extensionCreateInfo) {
+            if(extensionCreateInfo->sType
+                == VK_STRUCTURE_TYPE_DEVICE_DEVICE_MEMORY_REPORT_CREATE_INFO_EXT) {
+                auto deviceMemoryReportCreateInfo =
+                    reinterpret_cast<const VkDeviceDeviceMemoryReportCreateInfoEXT *>(
+                        extensionCreateInfo);
+                if(deviceMemoryReportCreateInfo->pfnUserCallback != nullptr) {
+                    info.deviceMemoryReportCallbacks.emplace_back(
+                        deviceMemoryReportCreateInfo->pfnUserCallback,
+                        deviceMemoryReportCreateInfo->pUserData);
+                }
+            }
+            extensionCreateInfo = extensionCreateInfo->pNext;
+        }
+
         if (!ppEnabledExtensionNames) return;
 
         for (uint32_t i = 0; i < enabledExtensionCount; ++i) {
@@ -675,6 +852,31 @@
         }
     }
 
+    void emitDeviceMemoryReport(VkDevice_Info info,
+                                VkDeviceMemoryReportEventTypeEXT type,
+                                uint64_t memoryObjectId,
+                                VkDeviceSize size,
+                                VkObjectType objectType,
+                                uint64_t objectHandle,
+                                uint32_t heapIndex = 0) {
+        if(info.deviceMemoryReportCallbacks.empty()) return;
+
+        const VkDeviceMemoryReportCallbackDataEXT callbackData = {
+            VK_STRUCTURE_TYPE_DEVICE_MEMORY_REPORT_CALLBACK_DATA_EXT,  // sType
+            nullptr,                                                   // pNext
+            0,                                                         // flags
+            type,                                                      // type
+            memoryObjectId,                                            // memoryObjectId
+            size,                                                      // size
+            objectType,                                                // objectType
+            objectHandle,                                              // objectHandle
+            heapIndex,                                                 // heapIndex
+        };
+        for(const auto &callback : info.deviceMemoryReportCallbacks) {
+            callback.first(&callbackData, callback.second);
+        }
+    }
+
     void setDeviceMemoryInfo(VkDevice device,
                              VkDeviceMemory memory,
                              VkDeviceSize allocationSize,
@@ -682,6 +884,7 @@
                              uint8_t* ptr,
                              uint32_t memoryTypeIndex,
                              AHardwareBuffer* ahw = nullptr,
+                             bool imported = false,
                              zx_handle_t vmoHandle = ZX_HANDLE_INVALID) {
         AutoLock lock(mLock);
         auto& deviceInfo = info_VkDevice[device];
@@ -692,6 +895,7 @@
         info.mappedPtr = ptr;
         info.memoryTypeIndex = memoryTypeIndex;
         info.ahw = ahw;
+        info.imported = imported;
         info.vmoHandle = vmoHandle;
     }
 
@@ -778,38 +982,58 @@
 
 #ifdef VK_USE_PLATFORM_FUCHSIA
         if (mFeatureInfo->hasVulkan) {
-            zx::channel channel(GetConnectToServiceFunction()("/dev/class/goldfish-control/000"));
+            fidl::ClientEnd<fuchsia_hardware_goldfish::ControlDevice> channel{
+                zx::channel(GetConnectToServiceFunction()("/dev/class/goldfish-control/000"))};
             if (!channel) {
                 ALOGE("failed to open control device");
                 abort();
             }
-            mControlDevice.Bind(std::move(channel));
+            mControlDevice = std::make_unique<
+                fidl::WireSyncClient<fuchsia_hardware_goldfish::ControlDevice>>(
+                std::move(channel));
 
-            zx::channel sysmem_channel(GetConnectToServiceFunction()("/svc/fuchsia.sysmem.Allocator"));
+            fidl::ClientEnd<fuchsia_sysmem::Allocator> sysmem_channel{
+                zx::channel(GetConnectToServiceFunction()("/svc/fuchsia.sysmem.Allocator"))};
             if (!sysmem_channel) {
                 ALOGE("failed to open sysmem connection");
             }
-            mSysmemAllocator.Bind(std::move(sysmem_channel));
+            mSysmemAllocator =
+                std::make_unique<fidl::WireSyncClient<fuchsia_sysmem::Allocator>>(
+                    std::move(sysmem_channel));
+            char name[ZX_MAX_NAME_LEN] = {};
+            zx_object_get_property(zx_process_self(), ZX_PROP_NAME, name, sizeof(name));
+            std::string client_name(name);
+            client_name += "-goldfish";
+            zx_info_handle_basic_t info;
+            zx_object_get_info(zx_process_self(), ZX_INFO_HANDLE_BASIC, &info, sizeof(info),
+                               nullptr, nullptr);
+            mSysmemAllocator->SetDebugClientInfo(fidl::StringView::FromExternal(client_name),
+                                                 info.koid);
         }
 #endif
 
         if (mFeatureInfo->hasVulkanNullOptionalStrings) {
-            mStreamFeatureBits |= VULKAN_STREAM_FEATURE_NULL_OPTIONAL_STRINGS_BIT;
+            ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_NULL_OPTIONAL_STRINGS_BIT;
         }
         if (mFeatureInfo->hasVulkanIgnoredHandles) {
-            mStreamFeatureBits |= VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT;
+            ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT;
         }
-
+        if (mFeatureInfo->hasVulkanShaderFloat16Int8) {
+            ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_SHADER_FLOAT16_INT8_BIT;
+        }
+        if (mFeatureInfo->hasVulkanQueueSubmitWithCommands) {
+            ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+        }
 #if !defined(HOST_BUILD) && defined(VK_USE_PLATFORM_ANDROID_KHR)
-        if (mFeatureInfo->hasVirtioGpuNext) {
-            ALOGD("%s: has virtio-gpu-next; create hostmem rendernode\n", __func__);
-            mRendernodeFd = drmOpenRender(128 /* RENDERNODE_MINOR */);
-        }
+       if (mFeatureInfo->hasVirtioGpuNext) {
+           ALOGD("%s: has virtio-gpu-next; create hostmem rendernode\n", __func__);
+           mRendernodeFd = drmOpenRender(128 /* RENDERNODE_MINOR */);
+       }
 #endif
     }
 
     void setThreadingCallbacks(const ResourceTracker::ThreadingCallbacks& callbacks) {
-        mThreadingCallbacks = callbacks;
+        ResourceTracker::threadingCallbacks = callbacks;
     }
 
     bool hostSupportsVulkan() const {
@@ -823,7 +1047,7 @@
     }
 
     uint32_t getStreamFeatures() const {
-        return mStreamFeatureBits;
+        return ResourceTracker::streamFeatureBits;
     }
 
     bool supportsDeferredCommands() const {
@@ -831,6 +1055,11 @@
         return mFeatureInfo->hasDeferredVulkanCommands;
     }
 
+    bool supportsAsyncQueueSubmit() const {
+        if (!mFeatureInfo) return false;
+        return mFeatureInfo->hasVulkanAsyncQueueSubmit;
+    }
+
     bool supportsCreateResourcesWithRequirements() const {
         if (!mFeatureInfo) return false;
         return mFeatureInfo->hasVulkanCreateResourcesWithRequirements;
@@ -969,6 +1198,26 @@
         }
     }
 
+    void transformImpl_VkExternalMemoryProperties_fromhost(
+        VkExternalMemoryProperties* pProperties,
+        uint32_t) {
+        VkExternalMemoryHandleTypeFlags supportedHandleType = 0u;
+#ifdef VK_USE_PLATFORM_FUCHSIA
+        supportedHandleType |=
+            VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA |
+            VK_EXTERNAL_MEMORY_HANDLE_TYPE_TEMP_ZIRCON_VMO_BIT_FUCHSIA;
+#endif  // VK_USE_PLATFORM_FUCHSIA
+#ifdef VK_USE_PLATFORM_ANDROID_KHR
+        supportedHandleType |=
+            VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
+            VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
+#endif  // VK_USE_PLATFORM_ANDROID_KHR
+        if (supportedHandleType) {
+            pProperties->compatibleHandleTypes &= supportedHandleType;
+            pProperties->exportFromImportedHandleTypes &= supportedHandleType;
+        }
+    }
+
     VkResult on_vkEnumerateInstanceExtensionProperties(
         void* context,
         VkResult,
@@ -983,8 +1232,6 @@
             "VK_KHR_external_memory_capabilities",
             "VK_KHR_external_fence_capabilities",
 #endif
-            // TODO:
-            // VK_KHR_external_memory_capabilities
         };
 
         VkEncoder* enc = (VkEncoder*)context;
@@ -992,12 +1239,12 @@
         // Only advertise a select set of extensions.
         if (mHostInstanceExtensions.empty()) {
             uint32_t hostPropCount = 0;
-            enc->vkEnumerateInstanceExtensionProperties(nullptr, &hostPropCount, nullptr);
+            enc->vkEnumerateInstanceExtensionProperties(nullptr, &hostPropCount, nullptr, true /* do lock */);
             mHostInstanceExtensions.resize(hostPropCount);
 
             VkResult hostRes =
                 enc->vkEnumerateInstanceExtensionProperties(
-                    nullptr, &hostPropCount, mHostInstanceExtensions.data());
+                    nullptr, &hostPropCount, mHostInstanceExtensions.data(), true /* do lock */);
 
             if (hostRes != VK_SUCCESS) {
                 return hostRes;
@@ -1072,16 +1319,23 @@
         VkExtensionProperties* pProperties) {
 
         std::vector<const char*> allowedExtensionNames = {
+            "VK_KHR_vulkan_memory_model",
+            "VK_KHR_buffer_device_address",
             "VK_KHR_maintenance1",
             "VK_KHR_maintenance2",
             "VK_KHR_maintenance3",
-            "VK_KHR_get_memory_requirements2",
-            "VK_KHR_dedicated_allocation",
             "VK_KHR_bind_memory2",
+            "VK_KHR_dedicated_allocation",
+            "VK_KHR_get_memory_requirements2",
+            "VK_KHR_image_format_list",
             "VK_KHR_sampler_ycbcr_conversion",
             "VK_KHR_shader_float16_int8",
+            "VK_KHR_timeline_semaphore",
             "VK_AMD_gpu_shader_half_float",
             "VK_NV_shader_subgroup_partitioned",
+            "VK_KHR_shader_subgroup_extended_types",
+            "VK_EXT_subgroup_size_control",
+            "VK_KHR_pipeline_executable_properties",
 #ifdef VK_USE_PLATFORM_ANDROID_KHR
             "VK_KHR_external_semaphore",
             "VK_KHR_external_semaphore_fd",
@@ -1089,21 +1343,20 @@
             "VK_KHR_external_memory",
             "VK_KHR_external_fence",
             "VK_KHR_external_fence_fd",
+            "VK_EXT_device_memory_report",
 #endif
-            // TODO:
-            // VK_KHR_external_memory_capabilities
         };
 
         VkEncoder* enc = (VkEncoder*)context;
 
         if (mHostDeviceExtensions.empty()) {
             uint32_t hostPropCount = 0;
-            enc->vkEnumerateDeviceExtensionProperties(physdev, nullptr, &hostPropCount, nullptr);
+            enc->vkEnumerateDeviceExtensionProperties(physdev, nullptr, &hostPropCount, nullptr, true /* do lock */);
             mHostDeviceExtensions.resize(hostPropCount);
 
             VkResult hostRes =
                 enc->vkEnumerateDeviceExtensionProperties(
-                    physdev, nullptr, &hostPropCount, mHostDeviceExtensions.data());
+                    physdev, nullptr, &hostPropCount, mHostDeviceExtensions.data(), true /* do lock */);
 
             if (hostRes != VK_SUCCESS) {
                 return hostRes;
@@ -1143,7 +1396,6 @@
             { "VK_KHR_external_memory", 1 },
             { "VK_KHR_external_semaphore", 1 },
             { "VK_FUCHSIA_external_semaphore", 1 },
-            { "VK_FUCHSIA_buffer_collection", 1 },
 #endif
         };
 
@@ -1151,11 +1403,22 @@
             filteredExts.push_back(anbExtProp);
         }
 
+#ifdef VK_USE_PLATFORM_ANDROID_KHR
+        bool hostSupportsExternalFenceFd =
+            getHostDeviceExtensionIndex(
+                "VK_KHR_external_fence_fd") != -1;
+        if (!hostSupportsExternalFenceFd) {
+            filteredExts.push_back({ "VK_KHR_external_fence_fd", 1});
+        }
+#endif
+
+#ifndef VK_USE_PLATFORM_FUCHSIA
         if (hostSupportsExternalSemaphore &&
             !hostHasPosixExternalSemaphore) {
             filteredExts.push_back(
                 { "VK_KHR_external_semaphore_fd", 1});
         }
+#endif
 
         bool win32ExtMemAvailable =
             getHostDeviceExtensionIndex(
@@ -1163,12 +1426,12 @@
         bool posixExtMemAvailable =
             getHostDeviceExtensionIndex(
                 "VK_KHR_external_memory_fd") != -1;
-        bool extMoltenVkAvailable =
+        bool moltenVkExtAvailable =
             getHostDeviceExtensionIndex(
                 "VK_MVK_moltenvk") != -1;
 
         bool hostHasExternalMemorySupport =
-            win32ExtMemAvailable || posixExtMemAvailable || extMoltenVkAvailable;
+            win32ExtMemAvailable || posixExtMemAvailable || moltenVkExtAvailable;
 
         if (hostHasExternalMemorySupport) {
 #ifdef VK_USE_PLATFORM_ANDROID_KHR
@@ -1183,6 +1446,9 @@
             filteredExts.push_back({
                 "VK_FUCHSIA_external_memory", 1
             });
+            filteredExts.push_back({
+                "VK_FUCHSIA_buffer_collection", 1
+            });
 #endif
         }
 
@@ -1265,7 +1531,7 @@
 
             lock.unlock();
             VkResult countRes = enc->vkEnumeratePhysicalDevices(
-                instance, &hostPhysicalDeviceCount, nullptr);
+                instance, &hostPhysicalDeviceCount, nullptr, false /* no lock */);
             lock.lock();
 
             if (countRes != VK_SUCCESS) {
@@ -1278,7 +1544,7 @@
 
             lock.unlock();
             VkResult enumRes = enc->vkEnumeratePhysicalDevices(
-                instance, &hostPhysicalDeviceCount, info.physicalDevices.data());
+                instance, &hostPhysicalDeviceCount, info.physicalDevices.data(), false /* no lock */);
             lock.lock();
 
             if (enumRes != VK_SUCCESS) {
@@ -1325,6 +1591,39 @@
         }
     }
 
+    void on_vkGetPhysicalDeviceProperties(
+        void*,
+        VkPhysicalDevice,
+        VkPhysicalDeviceProperties* pProperties) {
+        // We have host properties at this point
+        if (pProperties) {
+            // We need this to ignore some cts tests when using Swiftshader Vk
+            if (pProperties->deviceType != VK_PHYSICAL_DEVICE_TYPE_CPU) {
+                // Otherwise, if not CPU type, mark as virtual type
+                pProperties->deviceType = VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU;
+            }
+        }
+    }
+
+    void on_vkGetPhysicalDeviceProperties2(
+        void*,
+        VkPhysicalDevice,
+        VkPhysicalDeviceProperties2* pProperties) {
+        if (pProperties) {
+            // We need this to ignore some cts tests when using Swiftshader Vk
+            if (pProperties->properties.deviceType != VK_PHYSICAL_DEVICE_TYPE_CPU) {
+                // Otherwise, if not CPU type, mark as virtual type
+                pProperties->properties.deviceType = VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU;
+            }
+
+            VkPhysicalDeviceDeviceMemoryReportFeaturesEXT* memoryReportFeaturesEXT =
+                vk_find_struct<VkPhysicalDeviceDeviceMemoryReportFeaturesEXT>(pProperties);
+            if (memoryReportFeaturesEXT) {
+                memoryReportFeaturesEXT->deviceMemoryReport = VK_TRUE;
+            }
+        }
+    }
+
     void on_vkGetPhysicalDeviceMemoryProperties(
         void*,
         VkPhysicalDevice physdev,
@@ -1357,6 +1656,23 @@
         }
     }
 
+    void on_vkGetDeviceQueue(void*,
+                             VkDevice device,
+                             uint32_t,
+                             uint32_t,
+                             VkQueue* pQueue) {
+        AutoLock lock(mLock);
+        info_VkQueue[*pQueue].device = device;
+    }
+
+    void on_vkGetDeviceQueue2(void*,
+                              VkDevice device,
+                              const VkDeviceQueueInfo2*,
+                              VkQueue* pQueue) {
+        AutoLock lock(mLock);
+        info_VkQueue[*pQueue].device = device;
+    }
+
     VkResult on_vkCreateInstance(
         void* context,
         VkResult input_result,
@@ -1370,7 +1686,7 @@
 
         uint32_t apiVersion;
         VkResult enumInstanceVersionRes =
-            enc->vkEnumerateInstanceVersion(&apiVersion);
+            enc->vkEnumerateInstanceVersion(&apiVersion, false /* no lock */);
 
         setInstanceInfo(
             *pInstance,
@@ -1395,12 +1711,13 @@
 
         VkPhysicalDeviceProperties props;
         VkPhysicalDeviceMemoryProperties memProps;
-        enc->vkGetPhysicalDeviceProperties(physicalDevice, &props);
-        enc->vkGetPhysicalDeviceMemoryProperties(physicalDevice, &memProps);
+        enc->vkGetPhysicalDeviceProperties(physicalDevice, &props, false /* no lock */);
+        enc->vkGetPhysicalDeviceMemoryProperties(physicalDevice, &memProps, false /* no lock */);
 
         setDeviceInfo(
             *pDevice, physicalDevice, props, memProps,
-            pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames);
+            pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames,
+            pCreateInfo->pNext);
 
         return input_result;
     }
@@ -1437,7 +1754,7 @@
         const AHardwareBuffer* buffer,
         VkAndroidHardwareBufferPropertiesANDROID* pProperties) {
         auto grallocHelper =
-            mThreadingCallbacks.hostConnectionGetFunc()->grallocHelper();
+            ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->grallocHelper();
         return getAndroidHardwareBufferPropertiesANDROID(
             grallocHelper,
             &mHostVisibleMemoryVirtInfo,
@@ -1521,10 +1838,22 @@
         VkExternalMemoryHandleTypeFlagBits handleType,
         uint32_t handle,
         VkMemoryZirconHandlePropertiesFUCHSIA* pProperties) {
-        if (handleType != VK_EXTERNAL_MEMORY_HANDLE_TYPE_TEMP_ZIRCON_VMO_BIT_FUCHSIA) {
+        using fuchsia_hardware_goldfish::wire::kMemoryPropertyDeviceLocal;
+        using fuchsia_hardware_goldfish::wire::kMemoryPropertyHostVisible;
+
+        if (handleType != VK_EXTERNAL_MEMORY_HANDLE_TYPE_TEMP_ZIRCON_VMO_BIT_FUCHSIA &&
+            handleType != VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA) {
             return VK_ERROR_INITIALIZATION_FAILED;
         }
 
+        zx_info_handle_basic_t handleInfo;
+        zx_status_t status = zx::unowned_vmo(handle)->get_info(
+            ZX_INFO_HANDLE_BASIC, &handleInfo, sizeof(handleInfo), nullptr,
+            nullptr);
+        if (status != ZX_OK || handleInfo.type != ZX_OBJ_TYPE_VMO) {
+            return VK_ERROR_INVALID_EXTERNAL_HANDLE;
+        }
+
         AutoLock lock(mLock);
 
         auto deviceIt = info_VkDevice.find(device);
@@ -1535,17 +1864,72 @@
 
         auto& info = deviceIt->second;
 
-        // Device local memory type supported.
+        zx::vmo vmo_dup;
+        status =
+            zx::unowned_vmo(handle)->duplicate(ZX_RIGHT_SAME_RIGHTS, &vmo_dup);
+        if (status != ZX_OK) {
+            ALOGE("zx_handle_duplicate() error: %d", status);
+            return VK_ERROR_INITIALIZATION_FAILED;
+        }
+
+        uint32_t memoryProperty = 0u;
+
+        auto result = mControlDevice->GetBufferHandleInfo(std::move(vmo_dup));
+        if (!result.ok()) {
+            ALOGE(
+                "mControlDevice->GetBufferHandleInfo fatal error: epitaph: %d",
+                result.status());
+            return VK_ERROR_INITIALIZATION_FAILED;
+        }
+        if (result->result.is_response()) {
+            memoryProperty = result->result.response().info.memory_property();
+        } else if (result->result.err() == ZX_ERR_NOT_FOUND) {
+            // If an VMO is allocated while ColorBuffer/Buffer is not created,
+            // it must be a device-local buffer, since for host-visible buffers,
+            // ColorBuffer/Buffer is created at sysmem allocation time.
+            memoryProperty = kMemoryPropertyDeviceLocal;
+        } else {
+            // Importing read-only host memory into the Vulkan driver should not
+            // work, but it is not an error to try to do so. Returning a
+            // VkMemoryZirconHandlePropertiesFUCHSIA with no available
+            // memoryType bits should be enough for clients. See fxbug.dev/24225
+            // for other issues this this flow.
+            ALOGW("GetBufferHandleInfo failed: %d", result->result.err());
+            pProperties->memoryTypeBits = 0;
+            return VK_SUCCESS;
+        }
+
         pProperties->memoryTypeBits = 0;
         for (uint32_t i = 0; i < info.memProps.memoryTypeCount; ++i) {
-            if (info.memProps.memoryTypes[i].propertyFlags &
-                VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) {
+            if (((memoryProperty & kMemoryPropertyDeviceLocal) &&
+                 (info.memProps.memoryTypes[i].propertyFlags &
+                  VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)) ||
+                ((memoryProperty & kMemoryPropertyHostVisible) &&
+                 (info.memProps.memoryTypes[i].propertyFlags &
+                  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT))) {
                 pProperties->memoryTypeBits |= 1ull << i;
             }
         }
         return VK_SUCCESS;
     }
 
+    zx_koid_t getEventKoid(zx_handle_t eventHandle) {
+        if (eventHandle == ZX_HANDLE_INVALID) {
+            return ZX_KOID_INVALID;
+        }
+
+        zx_info_handle_basic_t info;
+        zx_status_t status =
+            zx_object_get_info(eventHandle, ZX_INFO_HANDLE_BASIC, &info,
+                               sizeof(info), nullptr, nullptr);
+        if (status != ZX_OK) {
+            ALOGE("Cannot get object info of handle %u: %d", eventHandle,
+                  status);
+            return ZX_KOID_INVALID;
+        }
+        return info.koid;
+    }
+
     VkResult on_vkImportSemaphoreZirconHandleFUCHSIA(
         void*, VkResult,
         VkDevice device,
@@ -1573,7 +1957,14 @@
         if (info.eventHandle != ZX_HANDLE_INVALID) {
             zx_handle_close(info.eventHandle);
         }
+#if VK_HEADER_VERSION < 174
         info.eventHandle = pInfo->handle;
+#else // VK_HEADER_VERSION >= 174
+        info.eventHandle = pInfo->zirconHandle;
+#endif // VK_HEADER_VERSION < 174
+        if (info.eventHandle != ZX_HANDLE_INVALID) {
+            info.eventKoid = getEventKoid(info.eventHandle);
+        }
 
         return VK_SUCCESS;
     }
@@ -1617,24 +2008,48 @@
         const VkBufferCollectionCreateInfoFUCHSIA* pInfo,
         const VkAllocationCallbacks*,
         VkBufferCollectionFUCHSIA* pCollection) {
-        fuchsia::sysmem::BufferCollectionTokenSyncPtr token;
+        fidl::ClientEnd<::fuchsia_sysmem::BufferCollectionToken> token_client;
+
         if (pInfo->collectionToken) {
-            token.Bind(zx::channel(pInfo->collectionToken));
+            token_client = fidl::ClientEnd<::fuchsia_sysmem::BufferCollectionToken>(
+                zx::channel(pInfo->collectionToken));
         } else {
-            zx_status_t status = mSysmemAllocator->AllocateSharedCollection(token.NewRequest());
-            if (status != ZX_OK) {
-                ALOGE("AllocateSharedCollection failed: %d", status);
+            auto endpoints =
+                fidl::CreateEndpoints<::fuchsia_sysmem::BufferCollectionToken>();
+            if (!endpoints.is_ok()) {
+                ALOGE("zx_channel_create failed: %d", endpoints.status_value());
                 return VK_ERROR_INITIALIZATION_FAILED;
             }
+
+            auto result = mSysmemAllocator->AllocateSharedCollection(
+                std::move(endpoints->server));
+            if (!result.ok()) {
+                ALOGE("AllocateSharedCollection failed: %d", result.status());
+                return VK_ERROR_INITIALIZATION_FAILED;
+            }
+            token_client = std::move(endpoints->client);
         }
-        auto sysmem_collection = new fuchsia::sysmem::BufferCollectionSyncPtr;
-        zx_status_t status = mSysmemAllocator->BindSharedCollection(
-            std::move(token), sysmem_collection->NewRequest());
-        if (status != ZX_OK) {
-            ALOGE("BindSharedCollection failed: %d", status);
+
+        auto endpoints = fidl::CreateEndpoints<::fuchsia_sysmem::BufferCollection>();
+        if (!endpoints.is_ok()) {
+            ALOGE("zx_channel_create failed: %d", endpoints.status_value());
             return VK_ERROR_INITIALIZATION_FAILED;
         }
+        auto [collection_client, collection_server] = std::move(endpoints.value());
+
+        auto result = mSysmemAllocator->BindSharedCollection(
+            std::move(token_client), std::move(collection_server));
+        if (!result.ok()) {
+            ALOGE("BindSharedCollection failed: %d", result.status());
+            return VK_ERROR_INITIALIZATION_FAILED;
+        }
+
+        auto* sysmem_collection =
+            new fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>(
+                std::move(collection_client));
         *pCollection = reinterpret_cast<VkBufferCollectionFUCHSIA>(sysmem_collection);
+
+        register_VkBufferCollectionFUCHSIA(*pCollection);
         return VK_SUCCESS;
     }
 
@@ -1642,108 +2057,854 @@
         void*, VkResult, VkDevice,
         VkBufferCollectionFUCHSIA collection,
         const VkAllocationCallbacks*) {
-        auto sysmem_collection = reinterpret_cast<fuchsia::sysmem::BufferCollectionSyncPtr*>(collection);
-        if (sysmem_collection->is_bound()) {
-            (*sysmem_collection)->Close();
+        auto sysmem_collection = reinterpret_cast<
+            fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(collection);
+        if (sysmem_collection) {
+            sysmem_collection->Close();
         }
         delete sysmem_collection;
+
+        unregister_VkBufferCollectionFUCHSIA(collection);
     }
 
-    void setBufferCollectionConstraints(fuchsia::sysmem::BufferCollectionSyncPtr* collection,
-                                        const VkImageCreateInfo* pImageInfo,
-                                        size_t min_size_bytes) {
-        fuchsia::sysmem::BufferCollectionConstraints constraints = {};
-        constraints.usage.vulkan = fuchsia::sysmem::vulkanUsageColorAttachment |
-                                   fuchsia::sysmem::vulkanUsageTransferSrc |
-                                   fuchsia::sysmem::vulkanUsageTransferDst |
-                                   fuchsia::sysmem::vulkanUsageSampled;
-        constraints.min_buffer_count = 1;
+    inline fuchsia_sysmem::wire::BufferCollectionConstraints
+    defaultBufferCollectionConstraints(
+        size_t minSizeBytes,
+        size_t minBufferCount,
+        size_t maxBufferCount = 0u,
+        size_t minBufferCountForCamping = 0u,
+        size_t minBufferCountForDedicatedSlack = 0u,
+        size_t minBufferCountForSharedSlack = 0u) {
+        fuchsia_sysmem::wire::BufferCollectionConstraints constraints = {};
+        constraints.min_buffer_count = minBufferCount;
+        if (maxBufferCount > 0) {
+            constraints.max_buffer_count = maxBufferCount;
+        }
+        if (minBufferCountForCamping) {
+            constraints.min_buffer_count_for_camping = minBufferCountForCamping;
+        }
+        if (minBufferCountForSharedSlack) {
+            constraints.min_buffer_count_for_shared_slack =
+                minBufferCountForSharedSlack;
+        }
         constraints.has_buffer_memory_constraints = true;
-        fuchsia::sysmem::BufferMemoryConstraints& buffer_constraints =
+        fuchsia_sysmem::wire::BufferMemoryConstraints& buffer_constraints =
             constraints.buffer_memory_constraints;
-        buffer_constraints.min_size_bytes = min_size_bytes;
+
+        buffer_constraints.min_size_bytes = minSizeBytes;
         buffer_constraints.max_size_bytes = 0xffffffff;
         buffer_constraints.physically_contiguous_required = false;
         buffer_constraints.secure_required = false;
-        buffer_constraints.ram_domain_supported = false;
-        buffer_constraints.cpu_domain_supported = false;
-        buffer_constraints.inaccessible_domain_supported = true;
-        buffer_constraints.heap_permitted_count = 1;
-        buffer_constraints.heap_permitted[0] =
-            fuchsia::sysmem::HeapType::GOLDFISH_DEVICE_LOCAL;
-        constraints.image_format_constraints_count = 1;
-        fuchsia::sysmem::ImageFormatConstraints& image_constraints =
-            constraints.image_format_constraints[0];
-        image_constraints.pixel_format.type = fuchsia::sysmem::PixelFormatType::BGRA32;
-        image_constraints.color_spaces_count = 1;
-        image_constraints.color_space[0].type = fuchsia::sysmem::ColorSpaceType::SRGB;
-        image_constraints.min_coded_width = pImageInfo->extent.width;
-        image_constraints.max_coded_width = 0xfffffff;
-        image_constraints.min_coded_height = pImageInfo->extent.height;
-        image_constraints.max_coded_height = 0xffffffff;
-        image_constraints.min_bytes_per_row = pImageInfo->extent.width * 4;
-        image_constraints.max_bytes_per_row = 0xffffffff;
-        image_constraints.max_coded_width_times_coded_height = 0xffffffff;
-        image_constraints.layers = 1;
-        image_constraints.coded_width_divisor = 1;
-        image_constraints.coded_height_divisor = 1;
-        image_constraints.bytes_per_row_divisor = 1;
-        image_constraints.start_offset_divisor = 1;
-        image_constraints.display_width_divisor = 1;
-        image_constraints.display_height_divisor = 1;
 
-        (*collection)->SetConstraints(true, constraints);
+        // No restrictions on coherency domain or Heaps.
+        buffer_constraints.ram_domain_supported = true;
+        buffer_constraints.cpu_domain_supported = true;
+        buffer_constraints.inaccessible_domain_supported = true;
+        buffer_constraints.heap_permitted_count = 2;
+        buffer_constraints.heap_permitted[0] =
+            fuchsia_sysmem::wire::HeapType::kGoldfishDeviceLocal;
+        buffer_constraints.heap_permitted[1] =
+            fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible;
+
+        return constraints;
+    }
+
+    uint32_t getBufferCollectionConstraintsVulkanImageUsage(
+        const VkImageCreateInfo* pImageInfo) {
+        uint32_t usage = 0u;
+        VkImageUsageFlags imageUsage = pImageInfo->usage;
+
+#define SetUsageBit(BIT, VALUE)                                           \
+    if (imageUsage & VK_IMAGE_USAGE_##BIT##_BIT) {                 \
+        usage |= fuchsia_sysmem::wire::kVulkanImageUsage##VALUE; \
+    }
+
+        SetUsageBit(COLOR_ATTACHMENT, ColorAttachment);
+        SetUsageBit(TRANSFER_SRC, TransferSrc);
+        SetUsageBit(TRANSFER_DST, TransferDst);
+        SetUsageBit(SAMPLED, Sampled);
+
+#undef SetUsageBit
+        return usage;
+    }
+
+    uint32_t getBufferCollectionConstraintsVulkanBufferUsage(
+        const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo) {
+        uint32_t usage = 0u;
+        VkBufferUsageFlags bufferUsage =
+            pBufferConstraintsInfo->pBufferCreateInfo->usage;
+
+#define SetUsageBit(BIT, VALUE)                                            \
+    if (bufferUsage & VK_BUFFER_USAGE_##BIT##_BIT) {                \
+        usage |= fuchsia_sysmem::wire::kVulkanBufferUsage##VALUE; \
+    }
+
+        SetUsageBit(TRANSFER_SRC, TransferSrc);
+        SetUsageBit(TRANSFER_DST, TransferDst);
+        SetUsageBit(UNIFORM_TEXEL_BUFFER, UniformTexelBuffer);
+        SetUsageBit(STORAGE_TEXEL_BUFFER, StorageTexelBuffer);
+        SetUsageBit(UNIFORM_BUFFER, UniformBuffer);
+        SetUsageBit(STORAGE_BUFFER, StorageBuffer);
+        SetUsageBit(INDEX_BUFFER, IndexBuffer);
+        SetUsageBit(VERTEX_BUFFER, VertexBuffer);
+        SetUsageBit(INDIRECT_BUFFER, IndirectBuffer);
+
+#undef SetUsageBit
+        return usage;
+    }
+
+    static fuchsia_sysmem::wire::PixelFormatType vkFormatTypeToSysmem(
+        VkFormat format) {
+        switch (format) {
+            case VK_FORMAT_B8G8R8A8_SINT:
+            case VK_FORMAT_B8G8R8A8_UNORM:
+            case VK_FORMAT_B8G8R8A8_SRGB:
+            case VK_FORMAT_B8G8R8A8_SNORM:
+            case VK_FORMAT_B8G8R8A8_SSCALED:
+            case VK_FORMAT_B8G8R8A8_USCALED:
+                return fuchsia_sysmem::wire::PixelFormatType::kBgra32;
+            case VK_FORMAT_R8G8B8A8_SINT:
+            case VK_FORMAT_R8G8B8A8_UNORM:
+            case VK_FORMAT_R8G8B8A8_SRGB:
+            case VK_FORMAT_R8G8B8A8_SNORM:
+            case VK_FORMAT_R8G8B8A8_SSCALED:
+            case VK_FORMAT_R8G8B8A8_USCALED:
+                return fuchsia_sysmem::wire::PixelFormatType::kR8G8B8A8;
+            case VK_FORMAT_R8_UNORM:
+            case VK_FORMAT_R8_UINT:
+            case VK_FORMAT_R8_USCALED:
+            case VK_FORMAT_R8_SNORM:
+            case VK_FORMAT_R8_SINT:
+            case VK_FORMAT_R8_SSCALED:
+            case VK_FORMAT_R8_SRGB:
+                return fuchsia_sysmem::wire::PixelFormatType::kR8;
+            case VK_FORMAT_R8G8_UNORM:
+            case VK_FORMAT_R8G8_UINT:
+            case VK_FORMAT_R8G8_USCALED:
+            case VK_FORMAT_R8G8_SNORM:
+            case VK_FORMAT_R8G8_SINT:
+            case VK_FORMAT_R8G8_SSCALED:
+            case VK_FORMAT_R8G8_SRGB:
+                return fuchsia_sysmem::wire::PixelFormatType::kR8G8;
+            default:
+                return fuchsia_sysmem::wire::PixelFormatType::kInvalid;
+        }
+    }
+
+    static bool vkFormatMatchesSysmemFormat(
+        VkFormat vkFormat,
+        fuchsia_sysmem::wire::PixelFormatType sysmemFormat) {
+        switch (vkFormat) {
+            case VK_FORMAT_B8G8R8A8_SINT:
+            case VK_FORMAT_B8G8R8A8_UNORM:
+            case VK_FORMAT_B8G8R8A8_SRGB:
+            case VK_FORMAT_B8G8R8A8_SNORM:
+            case VK_FORMAT_B8G8R8A8_SSCALED:
+            case VK_FORMAT_B8G8R8A8_USCALED:
+                return sysmemFormat ==
+                       fuchsia_sysmem::wire::PixelFormatType::kBgra32;
+            case VK_FORMAT_R8G8B8A8_SINT:
+            case VK_FORMAT_R8G8B8A8_UNORM:
+            case VK_FORMAT_R8G8B8A8_SRGB:
+            case VK_FORMAT_R8G8B8A8_SNORM:
+            case VK_FORMAT_R8G8B8A8_SSCALED:
+            case VK_FORMAT_R8G8B8A8_USCALED:
+                return sysmemFormat ==
+                       fuchsia_sysmem::wire::PixelFormatType::kR8G8B8A8;
+            case VK_FORMAT_R8_UNORM:
+            case VK_FORMAT_R8_UINT:
+            case VK_FORMAT_R8_USCALED:
+            case VK_FORMAT_R8_SNORM:
+            case VK_FORMAT_R8_SINT:
+            case VK_FORMAT_R8_SSCALED:
+            case VK_FORMAT_R8_SRGB:
+                return sysmemFormat ==
+                           fuchsia_sysmem::wire::PixelFormatType::kR8 ||
+                       sysmemFormat ==
+                           fuchsia_sysmem::wire::PixelFormatType::kL8;
+            case VK_FORMAT_R8G8_UNORM:
+            case VK_FORMAT_R8G8_UINT:
+            case VK_FORMAT_R8G8_USCALED:
+            case VK_FORMAT_R8G8_SNORM:
+            case VK_FORMAT_R8G8_SINT:
+            case VK_FORMAT_R8G8_SSCALED:
+            case VK_FORMAT_R8G8_SRGB:
+                return sysmemFormat ==
+                       fuchsia_sysmem::wire::PixelFormatType::kR8G8;
+            default:
+                return false;
+        }
+    }
+
+    static VkFormat sysmemPixelFormatTypeToVk(
+        fuchsia_sysmem::wire::PixelFormatType format) {
+        switch (format) {
+            case fuchsia_sysmem::wire::PixelFormatType::kBgra32:
+                return VK_FORMAT_B8G8R8A8_SRGB;
+            case fuchsia_sysmem::wire::PixelFormatType::kR8G8B8A8:
+                return VK_FORMAT_R8G8B8A8_SRGB;
+            case fuchsia_sysmem::wire::PixelFormatType::kL8:
+            case fuchsia_sysmem::wire::PixelFormatType::kR8:
+                return VK_FORMAT_R8_UNORM;
+            case fuchsia_sysmem::wire::PixelFormatType::kR8G8:
+                return VK_FORMAT_R8G8_UNORM;
+            default:
+                return VK_FORMAT_UNDEFINED;
+        }
+    }
+
+    VkResult setBufferCollectionConstraints(
+        VkEncoder* enc, VkDevice device,
+        fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* collection,
+        const VkImageCreateInfo* pImageInfo) {
+        if (pImageInfo == nullptr) {
+            ALOGE("setBufferCollectionConstraints: pImageInfo cannot be null.");
+            return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+        }
+
+        std::vector<VkImageCreateInfo> createInfos;
+        if (pImageInfo->format == VK_FORMAT_UNDEFINED) {
+            const auto kFormats = {
+                VK_FORMAT_B8G8R8A8_SRGB,
+                VK_FORMAT_R8G8B8A8_SRGB,
+            };
+            for (auto format : kFormats) {
+                // shallow copy, using pNext from pImageInfo directly.
+                auto createInfo = *pImageInfo;
+                createInfo.format = format;
+                createInfos.push_back(createInfo);
+            }
+        } else {
+            createInfos.push_back(*pImageInfo);
+        }
+
+        VkImageConstraintsInfoFUCHSIA imageConstraints;
+        imageConstraints.sType =
+            VK_STRUCTURE_TYPE_IMAGE_CONSTRAINTS_INFO_FUCHSIA;
+        imageConstraints.pNext = nullptr;
+        imageConstraints.createInfoCount = createInfos.size();
+        imageConstraints.pCreateInfos = createInfos.data();
+        imageConstraints.pFormatConstraints = nullptr;
+        imageConstraints.maxBufferCount = 0;
+        imageConstraints.minBufferCount = 1;
+        imageConstraints.minBufferCountForCamping = 0;
+        imageConstraints.minBufferCountForDedicatedSlack = 0;
+        imageConstraints.minBufferCountForSharedSlack = 0;
+        imageConstraints.flags = 0u;
+
+        return setBufferCollectionImageConstraints(enc, device, collection,
+                                                   &imageConstraints);
+    }
+
+    VkResult addImageBufferCollectionConstraints(
+        VkEncoder* enc,
+        VkDevice device,
+        VkPhysicalDevice physicalDevice,
+        const VkImageCreateInfo* createInfo,
+        const VkImageFormatConstraintsInfoFUCHSIA* formatConstraints,
+        VkImageTiling tiling,
+        fuchsia_sysmem::wire::BufferCollectionConstraints* constraints) {
+        // First check if the format, tiling and usage is supported on host.
+        VkImageFormatProperties imageFormatProperties;
+        auto result = enc->vkGetPhysicalDeviceImageFormatProperties(
+            physicalDevice, createInfo->format, createInfo->imageType, tiling,
+            createInfo->usage, createInfo->flags, &imageFormatProperties,
+            true /* do lock */);
+        if (result != VK_SUCCESS) {
+            ALOGW(
+                "%s: Image format (%u) type (%u) tiling (%u) "
+                "usage (%u) flags (%u) not supported by physical "
+                "device",
+                __func__, static_cast<uint32_t>(createInfo->format),
+                static_cast<uint32_t>(createInfo->imageType),
+                static_cast<uint32_t>(tiling),
+                static_cast<uint32_t>(createInfo->usage),
+                static_cast<uint32_t>(createInfo->flags));
+            return VK_ERROR_FORMAT_NOT_SUPPORTED;
+        }
+
+        // Check if format constraints contains unsupported format features.
+        if (formatConstraints) {
+            VkFormatProperties formatProperties;
+            enc->vkGetPhysicalDeviceFormatProperties(
+                physicalDevice, createInfo->format, &formatProperties,
+                true /* do lock */);
+
+            auto supportedFeatures =
+                (tiling == VK_IMAGE_TILING_LINEAR)
+                    ? formatProperties.linearTilingFeatures
+                    : formatProperties.optimalTilingFeatures;
+            auto requiredFeatures = formatConstraints->requiredFormatFeatures;
+            if ((~supportedFeatures) & requiredFeatures) {
+                ALOGW(
+                    "%s: Host device support features for %s tiling: %08x, "
+                    "required features: %08x, feature bits %08x missing",
+                    __func__,
+                    tiling == VK_IMAGE_TILING_LINEAR ? "LINEAR" : "OPTIMAL",
+                    static_cast<uint32_t>(requiredFeatures),
+                    static_cast<uint32_t>(supportedFeatures),
+                    static_cast<uint32_t>((~supportedFeatures) &
+                                          requiredFeatures));
+                return VK_ERROR_FORMAT_NOT_SUPPORTED;
+            }
+        }
+
+        fuchsia_sysmem::wire::ImageFormatConstraints imageConstraints;
+        if (formatConstraints && formatConstraints->sysmemFormat != 0) {
+            auto pixelFormat =
+                static_cast<fuchsia_sysmem::wire::PixelFormatType>(
+                    formatConstraints->sysmemFormat);
+            if (createInfo->format != VK_FORMAT_UNDEFINED &&
+                !vkFormatMatchesSysmemFormat(createInfo->format, pixelFormat)) {
+                ALOGW("%s: VkFormat %u doesn't match sysmem pixelFormat %lu",
+                      __func__, static_cast<uint32_t>(createInfo->format),
+                      formatConstraints->sysmemFormat);
+                return VK_ERROR_FORMAT_NOT_SUPPORTED;
+            }
+            imageConstraints.pixel_format.type = pixelFormat;
+        } else {
+            auto pixel_format = vkFormatTypeToSysmem(createInfo->format);
+            if (pixel_format ==
+                fuchsia_sysmem::wire::PixelFormatType::kInvalid) {
+                ALOGW("%s: Unsupported VkFormat %u", __func__,
+                      static_cast<uint32_t>(createInfo->format));
+                return VK_ERROR_FORMAT_NOT_SUPPORTED;
+            }
+            imageConstraints.pixel_format.type = pixel_format;
+        }
+
+        if (!formatConstraints || formatConstraints->colorSpaceCount == 0u) {
+            imageConstraints.color_spaces_count = 1;
+            imageConstraints.color_space[0].type =
+                fuchsia_sysmem::wire::ColorSpaceType::kSrgb;
+        } else {
+            imageConstraints.color_spaces_count =
+                formatConstraints->colorSpaceCount;
+            for (size_t i = 0; i < formatConstraints->colorSpaceCount; i++) {
+                imageConstraints.color_space[0].type =
+                    static_cast<fuchsia_sysmem::wire::ColorSpaceType>(
+                        formatConstraints->pColorSpaces[i].colorSpace);
+            }
+        }
+
+        // Get row alignment from host GPU.
+        VkDeviceSize offset;
+        VkDeviceSize rowPitchAlignment;
+        enc->vkGetLinearImageLayoutGOOGLE(device, createInfo->format, &offset,
+                                          &rowPitchAlignment,
+                                          true /* do lock */);
+        ALOGD(
+            "vkGetLinearImageLayoutGOOGLE: format %d offset %lu "
+            "rowPitchAlignment = %lu",
+            (int)createInfo->format, offset, rowPitchAlignment);
+
+        imageConstraints.min_coded_width = createInfo->extent.width;
+        imageConstraints.max_coded_width = 0xfffffff;
+        imageConstraints.min_coded_height = createInfo->extent.height;
+        imageConstraints.max_coded_height = 0xffffffff;
+        // The min_bytes_per_row can be calculated by sysmem using
+        // |min_coded_width|, |bytes_per_row_divisor| and color format.
+        imageConstraints.min_bytes_per_row = 0;
+        imageConstraints.max_bytes_per_row = 0xffffffff;
+        imageConstraints.max_coded_width_times_coded_height = 0xffffffff;
+
+        imageConstraints.layers = 1;
+        imageConstraints.coded_width_divisor = 1;
+        imageConstraints.coded_height_divisor = 1;
+        imageConstraints.bytes_per_row_divisor = rowPitchAlignment;
+        imageConstraints.start_offset_divisor = 1;
+        imageConstraints.display_width_divisor = 1;
+        imageConstraints.display_height_divisor = 1;
+        imageConstraints.pixel_format.has_format_modifier = true;
+        imageConstraints.pixel_format.format_modifier.value =
+            (tiling == VK_IMAGE_TILING_LINEAR)
+                ? fuchsia_sysmem::wire::kFormatModifierLinear
+                : fuchsia_sysmem::wire::kFormatModifierGoogleGoldfishOptimal;
+
+        constraints->image_format_constraints
+            [constraints->image_format_constraints_count++] =
+            std::move(imageConstraints);
+        return VK_SUCCESS;
+    }
+
+    VkResult setBufferCollectionImageConstraints(
+        VkEncoder* enc,
+        VkDevice device,
+        fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* collection,
+        const VkImageConstraintsInfoFUCHSIA* pImageConstraintsInfo) {
+        if (!pImageConstraintsInfo ||
+            pImageConstraintsInfo->sType !=
+                VK_STRUCTURE_TYPE_IMAGE_CONSTRAINTS_INFO_FUCHSIA) {
+            ALOGE("%s: invalid pImageConstraintsInfo", __func__);
+            return VK_ERROR_INITIALIZATION_FAILED;
+        }
+
+        if (pImageConstraintsInfo->createInfoCount == 0) {
+            ALOGE("%s: createInfoCount must be greater than 0", __func__);
+            return VK_ERROR_INITIALIZATION_FAILED;
+        }
+
+        fuchsia_sysmem::wire::BufferCollectionConstraints constraints =
+            defaultBufferCollectionConstraints(
+                /* min_size_bytes */ 0, pImageConstraintsInfo->minBufferCount,
+                pImageConstraintsInfo->maxBufferCount,
+                pImageConstraintsInfo->minBufferCountForCamping,
+                pImageConstraintsInfo->minBufferCountForDedicatedSlack,
+                pImageConstraintsInfo->minBufferCountForSharedSlack);
+
+        std::vector<fuchsia_sysmem::wire::ImageFormatConstraints>
+            format_constraints;
+
+        VkPhysicalDevice physicalDevice;
+        {
+            AutoLock lock(mLock);
+            auto deviceIt = info_VkDevice.find(device);
+            if (deviceIt == info_VkDevice.end()) {
+                return VK_ERROR_INITIALIZATION_FAILED;
+            }
+            physicalDevice = deviceIt->second.physdev;
+        }
+
+        std::vector<uint32_t> createInfoIndex;
+
+        bool hasOptimalTiling = false;
+        for (uint32_t i = 0; i < pImageConstraintsInfo->createInfoCount; i++) {
+            const VkImageCreateInfo* createInfo =
+                &pImageConstraintsInfo->pCreateInfos[i];
+            const VkImageFormatConstraintsInfoFUCHSIA* formatConstraints =
+                pImageConstraintsInfo->pFormatConstraints
+                    ? &pImageConstraintsInfo->pFormatConstraints[i]
+                    : nullptr;
+
+            // add ImageFormatConstraints for *optimal* tiling
+            VkResult optimalResult = VK_ERROR_FORMAT_NOT_SUPPORTED;
+            if (createInfo->tiling == VK_IMAGE_TILING_OPTIMAL) {
+                optimalResult = addImageBufferCollectionConstraints(
+                    enc, device, physicalDevice, createInfo, formatConstraints,
+                    VK_IMAGE_TILING_OPTIMAL, &constraints);
+                if (optimalResult == VK_SUCCESS) {
+                    createInfoIndex.push_back(i);
+                    hasOptimalTiling = true;
+                }
+            }
+
+            // Add ImageFormatConstraints for *linear* tiling
+            VkResult linearResult = addImageBufferCollectionConstraints(
+                enc, device, physicalDevice, createInfo, formatConstraints,
+                VK_IMAGE_TILING_LINEAR, &constraints);
+            if (linearResult == VK_SUCCESS) {
+                createInfoIndex.push_back(i);
+            }
+
+            // Update usage and BufferMemoryConstraints
+            if (linearResult == VK_SUCCESS || optimalResult == VK_SUCCESS) {
+                constraints.usage.vulkan |=
+                    getBufferCollectionConstraintsVulkanImageUsage(createInfo);
+
+                if (formatConstraints && formatConstraints->flags) {
+                    ALOGW(
+                        "%s: Non-zero flags (%08x) in image format "
+                        "constraints; this is currently not supported, see "
+                        "fxbug.dev/68833.",
+                        __func__, formatConstraints->flags);
+                }
+            }
+        }
+
+        // Set buffer memory constraints based on optimal/linear tiling support
+        // and flags.
+        VkImageConstraintsInfoFlagsFUCHSIA flags = pImageConstraintsInfo->flags;
+        if (flags & VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_RARELY_FUCHSIA)
+            constraints.usage.cpu |= fuchsia_sysmem::wire::kCpuUsageRead;
+        if (flags & VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_OFTEN_FUCHSIA)
+            constraints.usage.cpu |= fuchsia_sysmem::wire::kCpuUsageReadOften;
+        if (flags & VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_RARELY_FUCHSIA)
+            constraints.usage.cpu |= fuchsia_sysmem::wire::kCpuUsageWrite;
+        if (flags & VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_OFTEN_FUCHSIA)
+            constraints.usage.cpu |= fuchsia_sysmem::wire::kCpuUsageWriteOften;
+
+        constraints.has_buffer_memory_constraints = true;
+        auto& memory_constraints = constraints.buffer_memory_constraints;
+        memory_constraints.cpu_domain_supported = true;
+        memory_constraints.ram_domain_supported = true;
+        memory_constraints.inaccessible_domain_supported =
+            hasOptimalTiling &&
+            !(flags & (VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_RARELY_FUCHSIA |
+                       VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_OFTEN_FUCHSIA |
+                       VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_RARELY_FUCHSIA |
+                       VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_OFTEN_FUCHSIA));
+
+        if (memory_constraints.inaccessible_domain_supported) {
+            memory_constraints.heap_permitted_count = 2;
+            memory_constraints.heap_permitted[0] =
+                fuchsia_sysmem::wire::HeapType::kGoldfishDeviceLocal;
+            memory_constraints.heap_permitted[1] =
+                fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible;
+        } else {
+            memory_constraints.heap_permitted_count = 1;
+            memory_constraints.heap_permitted[0] =
+                fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible;
+        }
+
+        if (constraints.image_format_constraints_count == 0) {
+            ALOGE("%s: none of the specified formats is supported by device",
+                  __func__);
+            return VK_ERROR_FORMAT_NOT_SUPPORTED;
+        }
+
+        constexpr uint32_t kVulkanPriority = 5;
+        const char kName[] = "GoldfishSysmemShared";
+        collection->SetName(kVulkanPriority, fidl::StringView(kName));
+
+        auto result = collection->SetConstraints(true, constraints);
+        if (!result.ok()) {
+            ALOGE("setBufferCollectionConstraints: SetConstraints failed: %d",
+                  result.status());
+            return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+        }
+
+        // copy constraints to info_VkBufferCollectionFUCHSIA if
+        // |collection| is a valid VkBufferCollectionFUCHSIA handle.
+        AutoLock lock(mLock);
+        VkBufferCollectionFUCHSIA buffer_collection =
+            reinterpret_cast<VkBufferCollectionFUCHSIA>(collection);
+        if (info_VkBufferCollectionFUCHSIA.find(buffer_collection) !=
+            info_VkBufferCollectionFUCHSIA.end()) {
+            info_VkBufferCollectionFUCHSIA[buffer_collection].constraints =
+                android::base::makeOptional(std::move(constraints));
+            info_VkBufferCollectionFUCHSIA[buffer_collection].createInfoIndex =
+                std::move(createInfoIndex);
+        }
+
+        return VK_SUCCESS;
+    }
+
+    VkResult setBufferCollectionBufferConstraints(
+        fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* collection,
+        const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo) {
+        if (pBufferConstraintsInfo == nullptr) {
+            ALOGE(
+                "setBufferCollectionBufferConstraints: "
+                "pBufferConstraintsInfo cannot be null.");
+            return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+        }
+
+        fuchsia_sysmem::wire::BufferCollectionConstraints constraints =
+            defaultBufferCollectionConstraints(
+                /* min_size_bytes */ pBufferConstraintsInfo->pBufferCreateInfo->size,
+                /* buffer_count */ pBufferConstraintsInfo->minCount);
+        constraints.usage.vulkan =
+            getBufferCollectionConstraintsVulkanBufferUsage(
+                pBufferConstraintsInfo);
+
+        constexpr uint32_t kVulkanPriority = 5;
+        const char kName[] = "GoldfishBufferSysmemShared";
+        collection->SetName(kVulkanPriority, fidl::StringView(kName));
+
+        auto result = collection->SetConstraints(true, constraints);
+        if (!result.ok()) {
+            ALOGE("setBufferCollectionConstraints: SetConstraints failed: %d",
+                  result.status());
+            return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+        }
+
+        // copy constraints to info_VkBufferCollectionFUCHSIA if
+        // |collection| is a valid VkBufferCollectionFUCHSIA handle.
+        AutoLock lock(mLock);
+        VkBufferCollectionFUCHSIA buffer_collection =
+            reinterpret_cast<VkBufferCollectionFUCHSIA>(collection);
+        if (info_VkBufferCollectionFUCHSIA.find(buffer_collection) !=
+            info_VkBufferCollectionFUCHSIA.end()) {
+            info_VkBufferCollectionFUCHSIA[buffer_collection].constraints =
+                android::base::makeOptional(std::move(constraints));
+        }
+
+        return VK_SUCCESS;
     }
 
     VkResult on_vkSetBufferCollectionConstraintsFUCHSIA(
-        void*, VkResult, VkDevice,
+        void* context, VkResult, VkDevice device,
         VkBufferCollectionFUCHSIA collection,
         const VkImageCreateInfo* pImageInfo) {
-        auto sysmem_collection =
-            reinterpret_cast<fuchsia::sysmem::BufferCollectionSyncPtr*>(collection);
-        setBufferCollectionConstraints(
-            sysmem_collection, pImageInfo,
-            pImageInfo->extent.width * pImageInfo->extent.height * 4);
-        return VK_SUCCESS;
+        VkEncoder* enc = (VkEncoder*)context;
+        auto sysmem_collection = reinterpret_cast<
+            fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(collection);
+        return setBufferCollectionConstraints(enc, device, sysmem_collection, pImageInfo);
+    }
+
+    VkResult on_vkSetBufferCollectionImageConstraintsFUCHSIA(
+        void* context,
+        VkResult,
+        VkDevice device,
+        VkBufferCollectionFUCHSIA collection,
+        const VkImageConstraintsInfoFUCHSIA* pImageConstraintsInfo) {
+        VkEncoder* enc = (VkEncoder*)context;
+        auto sysmem_collection = reinterpret_cast<
+            fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(collection);
+        return setBufferCollectionImageConstraints(
+            enc, device, sysmem_collection, pImageConstraintsInfo);
+    }
+
+    VkResult on_vkSetBufferCollectionBufferConstraintsFUCHSIA(
+        void*,
+        VkResult,
+        VkDevice,
+        VkBufferCollectionFUCHSIA collection,
+        const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo) {
+        auto sysmem_collection = reinterpret_cast<
+            fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(collection);
+        return setBufferCollectionBufferConstraints(sysmem_collection,
+                                                    pBufferConstraintsInfo);
     }
 
     VkResult on_vkGetBufferCollectionPropertiesFUCHSIA(
-        void*, VkResult,
+        void* context,
+        VkResult,
         VkDevice device,
         VkBufferCollectionFUCHSIA collection,
         VkBufferCollectionPropertiesFUCHSIA* pProperties) {
-        auto sysmem_collection = reinterpret_cast<fuchsia::sysmem::BufferCollectionSyncPtr*>(collection);
-        fuchsia::sysmem::BufferCollectionInfo_2 info;
-        zx_status_t status2;
-        zx_status_t status = (*sysmem_collection)->WaitForBuffersAllocated(&status2, &info);
-        if (status != ZX_OK || status2 != ZX_OK) {
-            ALOGE("Failed wait for allocation: %d %d", status, status2);
-            return VK_ERROR_INITIALIZATION_FAILED;
+        VkBufferCollectionProperties2FUCHSIA properties2 = {
+            .sType = VK_STRUCTURE_TYPE_BUFFER_COLLECTION_PROPERTIES2_FUCHSIA,
+            .pNext = nullptr};
+        auto result = on_vkGetBufferCollectionProperties2FUCHSIA(
+            context, VK_SUCCESS, device, collection, &properties2);
+        if (result != VK_SUCCESS) {
+            return result;
         }
+
+        pProperties->count = properties2.bufferCount;
+        pProperties->memoryTypeBits = properties2.memoryTypeBits;
+        return VK_SUCCESS;
+    }
+
+    VkResult getBufferCollectionImageCreateInfoIndexLocked(
+        VkBufferCollectionFUCHSIA collection,
+        fuchsia_sysmem::wire::BufferCollectionInfo2& info,
+        uint32_t* outCreateInfoIndex) {
+        if (!info_VkBufferCollectionFUCHSIA[collection]
+                 .constraints.hasValue()) {
+            ALOGE("%s: constraints not set", __func__);
+            return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+        }
+
         if (!info.settings.has_image_format_constraints) {
+            // no image format constraints, skip getting createInfoIndex.
+            return VK_SUCCESS;
+        }
+
+        const auto& constraints =
+            *info_VkBufferCollectionFUCHSIA[collection].constraints;
+        const auto& createInfoIndices =
+            info_VkBufferCollectionFUCHSIA[collection].createInfoIndex;
+        const auto& out = info.settings.image_format_constraints;
+        bool foundCreateInfo = false;
+
+        for (size_t imageFormatIndex = 0;
+             imageFormatIndex < constraints.image_format_constraints_count;
+             imageFormatIndex++) {
+            const auto& in =
+                constraints.image_format_constraints[imageFormatIndex];
+            // These checks are sorted in order of how often they're expected to
+            // mismatch, from most likely to least likely. They aren't always
+            // equality comparisons, since sysmem may change some values in
+            // compatible ways on behalf of the other participants.
+            if ((out.pixel_format.type != in.pixel_format.type) ||
+                (out.pixel_format.has_format_modifier !=
+                 in.pixel_format.has_format_modifier) ||
+                (out.pixel_format.format_modifier.value !=
+                 in.pixel_format.format_modifier.value) ||
+                (out.min_bytes_per_row < in.min_bytes_per_row) ||
+                (out.required_max_coded_width < in.required_max_coded_width) ||
+                (out.required_max_coded_height <
+                 in.required_max_coded_height) ||
+                (out.bytes_per_row_divisor % in.bytes_per_row_divisor != 0)) {
+                continue;
+            }
+            // Check if the out colorspaces are a subset of the in color spaces.
+            bool all_color_spaces_found = true;
+            for (uint32_t j = 0; j < out.color_spaces_count; j++) {
+                bool found_matching_color_space = false;
+                for (uint32_t k = 0; k < in.color_spaces_count; k++) {
+                    if (out.color_space[j].type == in.color_space[k].type) {
+                        found_matching_color_space = true;
+                        break;
+                    }
+                }
+                if (!found_matching_color_space) {
+                    all_color_spaces_found = false;
+                    break;
+                }
+            }
+            if (!all_color_spaces_found) {
+                continue;
+            }
+
+            // Choose the first valid format for now.
+            *outCreateInfoIndex = createInfoIndices[imageFormatIndex];
+            return VK_SUCCESS;
+        }
+
+        ALOGE("%s: cannot find a valid image format in constraints", __func__);
+        return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+    }
+
+    VkResult on_vkGetBufferCollectionProperties2FUCHSIA(
+        void* context,
+        VkResult,
+        VkDevice device,
+        VkBufferCollectionFUCHSIA collection,
+        VkBufferCollectionProperties2FUCHSIA* pProperties) {
+        VkEncoder* enc = (VkEncoder*)context;
+        auto sysmem_collection = reinterpret_cast<
+            fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(collection);
+
+        auto result = sysmem_collection->WaitForBuffersAllocated();
+        if (!result.ok() || result.Unwrap()->status != ZX_OK) {
+            ALOGE("Failed wait for allocation: %d %d", result.status(),
+                  GET_STATUS_SAFE(result, status));
             return VK_ERROR_INITIALIZATION_FAILED;
         }
-        pProperties->count = info.buffer_count;
+        fuchsia_sysmem::wire::BufferCollectionInfo2 info =
+            std::move(result.Unwrap()->buffer_collection_info);
 
-        AutoLock lock(mLock);
-
-        auto deviceIt = info_VkDevice.find(device);
-
-        if (deviceIt == info_VkDevice.end()) {
+        bool is_host_visible = info.settings.buffer_settings.heap ==
+                               fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible;
+        bool is_device_local = info.settings.buffer_settings.heap ==
+                               fuchsia_sysmem::wire::HeapType::kGoldfishDeviceLocal;
+        if (!is_host_visible && !is_device_local) {
+            ALOGE("buffer collection uses a non-goldfish heap (type 0x%lu)",
+                static_cast<uint64_t>(info.settings.buffer_settings.heap));
             return VK_ERROR_INITIALIZATION_FAILED;
         }
 
-        auto& deviceInfo = deviceIt->second;
+        // memoryTypeBits
+        // ====================================================================
+        {
+            AutoLock lock(mLock);
+            auto deviceIt = info_VkDevice.find(device);
+            if (deviceIt == info_VkDevice.end()) {
+                return VK_ERROR_INITIALIZATION_FAILED;
+            }
+            auto& deviceInfo = deviceIt->second;
 
-        // Device local memory type supported.
-        pProperties->memoryTypeBits = 0;
-        for (uint32_t i = 0; i < deviceInfo.memProps.memoryTypeCount; ++i) {
-            if (deviceInfo.memProps.memoryTypes[i].propertyFlags &
-                VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) {
-                pProperties->memoryTypeBits |= 1ull << i;
+            // Device local memory type supported.
+            pProperties->memoryTypeBits = 0;
+            for (uint32_t i = 0; i < deviceInfo.memProps.memoryTypeCount; ++i) {
+                if ((is_device_local &&
+                     (deviceInfo.memProps.memoryTypes[i].propertyFlags &
+                      VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)) ||
+                    (is_host_visible &&
+                     (deviceInfo.memProps.memoryTypes[i].propertyFlags &
+                      VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT))) {
+                    pProperties->memoryTypeBits |= 1ull << i;
+                }
             }
         }
-        return VK_SUCCESS;
+
+        // bufferCount
+        // ====================================================================
+        pProperties->bufferCount = info.buffer_count;
+
+        auto storeProperties = [this, collection, pProperties]() -> VkResult {
+            // store properties to storage
+            AutoLock lock(mLock);
+            if (info_VkBufferCollectionFUCHSIA.find(collection) ==
+                info_VkBufferCollectionFUCHSIA.end()) {
+                return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+            }
+
+            info_VkBufferCollectionFUCHSIA[collection].properties =
+                android::base::makeOptional(*pProperties);
+
+            // We only do a shallow copy so we should remove all pNext pointers.
+            info_VkBufferCollectionFUCHSIA[collection].properties->pNext =
+                nullptr;
+            info_VkBufferCollectionFUCHSIA[collection]
+                .properties->colorSpace.pNext = nullptr;
+            return VK_SUCCESS;
+        };
+
+        // The fields below only apply to buffer collections with image formats.
+        if (!info.settings.has_image_format_constraints) {
+            ALOGD("%s: buffer collection doesn't have image format constraints",
+                  __func__);
+            return storeProperties();
+        }
+
+        // sysmemFormat
+        // ====================================================================
+
+        pProperties->sysmemFormat = static_cast<uint64_t>(
+            info.settings.image_format_constraints.pixel_format.type);
+
+        // colorSpace
+        // ====================================================================
+        if (info.settings.image_format_constraints.color_spaces_count == 0) {
+            ALOGE(
+                "%s: color space missing from allocated buffer collection "
+                "constraints",
+                __func__);
+            return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+        }
+        // Only report first colorspace for now.
+        pProperties->colorSpace.colorSpace = static_cast<uint32_t>(
+            info.settings.image_format_constraints.color_space[0].type);
+
+        // createInfoIndex
+        // ====================================================================
+        {
+            AutoLock lock(mLock);
+            auto getIndexResult = getBufferCollectionImageCreateInfoIndexLocked(
+                collection, info, &pProperties->createInfoIndex);
+            if (getIndexResult != VK_SUCCESS) {
+                return getIndexResult;
+            }
+        }
+
+        // formatFeatures
+        // ====================================================================
+        VkPhysicalDevice physicalDevice;
+        {
+            AutoLock lock(mLock);
+            auto deviceIt = info_VkDevice.find(device);
+            if (deviceIt == info_VkDevice.end()) {
+                return VK_ERROR_INITIALIZATION_FAILED;
+            }
+            physicalDevice = deviceIt->second.physdev;
+        }
+
+        VkFormat vkFormat = sysmemPixelFormatTypeToVk(
+            info.settings.image_format_constraints.pixel_format.type);
+        VkFormatProperties formatProperties;
+        enc->vkGetPhysicalDeviceFormatProperties(
+            physicalDevice, vkFormat, &formatProperties, true /* do lock */);
+        if (is_device_local) {
+            pProperties->formatFeatures =
+                formatProperties.optimalTilingFeatures;
+        }
+        if (is_host_visible) {
+            pProperties->formatFeatures = formatProperties.linearTilingFeatures;
+        }
+
+        // YCbCr properties
+        // ====================================================================
+        // TODO(59804): Implement this correctly when we support YUV pixel
+        // formats in goldfish ICD.
+        pProperties->samplerYcbcrConversionComponents.r =
+            VK_COMPONENT_SWIZZLE_IDENTITY;
+        pProperties->samplerYcbcrConversionComponents.g =
+            VK_COMPONENT_SWIZZLE_IDENTITY;
+        pProperties->samplerYcbcrConversionComponents.b =
+            VK_COMPONENT_SWIZZLE_IDENTITY;
+        pProperties->samplerYcbcrConversionComponents.a =
+            VK_COMPONENT_SWIZZLE_IDENTITY;
+        pProperties->suggestedYcbcrModel =
+            VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY;
+        pProperties->suggestedYcbcrRange = VK_SAMPLER_YCBCR_RANGE_ITU_FULL;
+        pProperties->suggestedXChromaOffset = VK_CHROMA_LOCATION_MIDPOINT;
+        pProperties->suggestedYChromaOffset = VK_CHROMA_LOCATION_MIDPOINT;
+
+        return storeProperties();
     }
 #endif
 
@@ -1801,7 +2962,7 @@
                     device,
                     &allocInfoForHost,
                     nullptr,
-                    &hostMemAlloc.memory);
+                    &hostMemAlloc.memory, true /* do lock */);
             mLock.lock();
 
             if (host_res != VK_SUCCESS) {
@@ -1829,16 +2990,19 @@
                 mLock.unlock();
                 directMapResult =
                     enc->vkMapMemoryIntoAddressSpaceGOOGLE(
-                            device, hostMemAlloc.memory, &directMappedAddr);
+                            device, hostMemAlloc.memory, &directMappedAddr, true /* do lock */);
                 mLock.lock();
             } else if (mFeatureInfo->hasVirtioGpuNext) {
 #if !defined(HOST_BUILD) && defined(VK_USE_PLATFORM_ANDROID_KHR)
                 uint64_t hvaSizeId[3];
 
+                int rendernodeFdForMem = drmOpenRender(128 /* RENDERNODE_MINOR */);
+                ALOGE("%s: render fd = %d\n", __func__, rendernodeFdForMem);
+
                 mLock.unlock();
                 enc->vkGetMemoryHostAddressInfoGOOGLE(
                         device, hostMemAlloc.memory,
-                        &hvaSizeId[0], &hvaSizeId[1], &hvaSizeId[2]);
+                        &hvaSizeId[0], &hvaSizeId[1], &hvaSizeId[2], true /* do lock */);
                 ALOGD("%s: hvaOff, size: 0x%llx 0x%llx id: 0x%llx\n", __func__,
                         (unsigned long long)hvaSizeId[0],
                         (unsigned long long)hvaSizeId[1],
@@ -1852,7 +3016,7 @@
                 drm_rc_blob.size = hvaSizeId[1];
 
                 int res = drmIoctl(
-                    mRendernodeFd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB, &drm_rc_blob);
+                    rendernodeFdForMem, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB, &drm_rc_blob);
 
                 if (res) {
                     ALOGE("%s: Failed to resource create v2: sterror: %s errno: %d\n", __func__,
@@ -1860,11 +3024,11 @@
                     abort();
                 }
 
-                struct drm_virtgpu_map map_info = {
-                    .handle = drm_rc_blob.bo_handle,
-                };
+                drm_virtgpu_map map_info;
+                memset(&map_info, 0, sizeof(map_info));
+                map_info.handle = drm_rc_blob.bo_handle;
 
-                res = drmIoctl(mRendernodeFd, DRM_IOCTL_VIRTGPU_MAP, &map_info);
+                res = drmIoctl(rendernodeFdForMem, DRM_IOCTL_VIRTGPU_MAP, &map_info);
                 if (res) {
                     ALOGE("%s: Failed to virtgpu map: sterror: %s errno: %d\n", __func__,
                             strerror(errno), errno);
@@ -1872,16 +3036,21 @@
                 }
 
                 directMappedAddr = (uint64_t)(uintptr_t)
-                    mmap64(0, hvaSizeId[1], PROT_WRITE, MAP_SHARED, mRendernodeFd, map_info.offset);
+                    mmap64(0, hvaSizeId[1], PROT_WRITE, MAP_SHARED, rendernodeFdForMem, map_info.offset);
 
                 if (!directMappedAddr) {
                     ALOGE("%s: mmap of virtio gpu resource failed\n", __func__);
                     abort();
                 }
 
+                hostMemAlloc.memoryAddr = directMappedAddr;
+                hostMemAlloc.memorySize = hvaSizeId[1];
+
                 // add the host's page offset
                 directMappedAddr += (uint64_t)(uintptr_t)(hvaSizeId[0]) & (PAGE_SIZE - 1);
 				directMapResult = VK_SUCCESS;
+
+                hostMemAlloc.fd = rendernodeFdForMem;
 #endif // VK_USE_PLATFORM_ANDROID_KHR
             }
 
@@ -1889,7 +3058,7 @@
                 hostMemAlloc.initialized = true;
                 hostMemAlloc.initResult = directMapResult;
                 mLock.unlock();
-                enc->vkFreeMemory(device, hostMemAlloc.memory, nullptr);
+                enc->vkFreeMemory(device, hostMemAlloc.memory, nullptr, true /* do lock */);
                 mLock.lock();
                 return INVALID_HOST_MEM_BLOCK;
             }
@@ -1918,6 +3087,16 @@
         return INVALID_HOST_MEM_BLOCK;
     }
 
+    uint64_t getAHardwareBufferId(AHardwareBuffer* ahw) {
+        uint64_t id = 0;
+#if defined(PLATFORM_SDK_VERSION) && PLATFORM_SDK_VERSION >= 31
+        AHardwareBuffer_getId(ahw, &id);
+#else
+        (void)ahw;
+#endif
+        return id;
+    }
+
     VkResult on_vkAllocateMemory(
         void* context,
         VkResult input_result,
@@ -1926,7 +3105,40 @@
         const VkAllocationCallbacks* pAllocator,
         VkDeviceMemory* pMemory) {
 
-        if (input_result != VK_SUCCESS) return input_result;
+#define _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(result) \
+        { \
+            auto it = info_VkDevice.find(device); \
+            if (it == info_VkDevice.end()) return result; \
+            emitDeviceMemoryReport( \
+                it->second, \
+                VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATION_FAILED_EXT, \
+                0, \
+                pAllocateInfo->allocationSize, \
+                VK_OBJECT_TYPE_DEVICE_MEMORY, \
+                0, \
+                pAllocateInfo->memoryTypeIndex); \
+            return result; \
+        }
+
+#define _RETURN_SCUCCESS_WITH_DEVICE_MEMORY_REPORT \
+        { \
+            uint64_t memoryObjectId = (uint64_t)(void*)*pMemory; \
+            if (ahw) { \
+                memoryObjectId = getAHardwareBufferId(ahw); \
+            } \
+            emitDeviceMemoryReport( \
+                info_VkDevice[device], \
+                isImport ? VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_IMPORT_EXT : VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_ALLOCATE_EXT, \
+                memoryObjectId, \
+                pAllocateInfo->allocationSize, \
+                VK_OBJECT_TYPE_DEVICE_MEMORY, \
+                (uint64_t)(void*)*pMemory, \
+                pAllocateInfo->memoryTypeIndex); \
+            return VK_SUCCESS; \
+        }
+
+
+        if (input_result != VK_SUCCESS) _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(input_result);
 
         VkEncoder* enc = (VkEncoder*)context;
 
@@ -1937,6 +3149,10 @@
         VkImportColorBufferGOOGLE importCbInfo = {
             VK_STRUCTURE_TYPE_IMPORT_COLOR_BUFFER_GOOGLE, 0,
         };
+        VkImportBufferGOOGLE importBufferInfo = {
+                VK_STRUCTURE_TYPE_IMPORT_BUFFER_GOOGLE,
+                0,
+        };
         // VkImportPhysicalAddressGOOGLE importPhysAddrInfo = {
         //     VK_STRUCTURE_TYPE_IMPORT_PHYSICAL_ADDRESS_GOOGLE, 0,
         // };
@@ -1952,29 +3168,36 @@
 
         const VkImportMemoryZirconHandleInfoFUCHSIA* importVmoInfoPtr =
             vk_find_struct<VkImportMemoryZirconHandleInfoFUCHSIA>(pAllocateInfo);
+        if (!importVmoInfoPtr) {
+            importVmoInfoPtr = reinterpret_cast<const VkImportMemoryZirconHandleInfoFUCHSIA*>(
+                __vk_find_struct(const_cast<void*>(pAllocateInfo->pNext),
+                    VK_STRUCTURE_TYPE_TEMP_IMPORT_MEMORY_ZIRCON_HANDLE_INFO_FUCHSIA));
+        }
 
         const VkMemoryDedicatedAllocateInfo* dedicatedAllocInfoPtr =
             vk_find_struct<VkMemoryDedicatedAllocateInfo>(pAllocateInfo);
 
         bool shouldPassThroughDedicatedAllocInfo =
-            !exportAllocateInfoPtr &&
-            !importAhbInfoPtr &&
-            !importBufferCollectionInfoPtr &&
-            !importVmoInfoPtr &&
+            !exportAllocateInfoPtr && !importAhbInfoPtr &&
+            !importBufferCollectionInfoPtr && !importVmoInfoPtr;
+
+#ifndef VK_USE_PLATFORM_FUCHSIA
+        shouldPassThroughDedicatedAllocInfo &=
             !isHostVisibleMemoryTypeIndexForGuest(
-                &mHostVisibleMemoryVirtInfo,
-                pAllocateInfo->memoryTypeIndex);
+                &mHostVisibleMemoryVirtInfo, pAllocateInfo->memoryTypeIndex);
 
         if (!exportAllocateInfoPtr &&
-            (importAhbInfoPtr || importBufferCollectionInfoPtr || importVmoInfoPtr) &&
+            (importAhbInfoPtr || importBufferCollectionInfoPtr ||
+             importVmoInfoPtr) &&
             dedicatedAllocInfoPtr &&
             isHostVisibleMemoryTypeIndexForGuest(
-                &mHostVisibleMemoryVirtInfo,
-                pAllocateInfo->memoryTypeIndex)) {
-            ALOGE("FATAL: It is not yet supported to import-allocate "
-                  "external memory that is both host visible and dedicated.");
+                &mHostVisibleMemoryVirtInfo, pAllocateInfo->memoryTypeIndex)) {
+            ALOGE(
+                "FATAL: It is not yet supported to import-allocate "
+                "external memory that is both host visible and dedicated.");
             abort();
         }
+#endif  // VK_USE_PLATFORM_FUCHSIA
 
         if (shouldPassThroughDedicatedAllocInfo &&
             dedicatedAllocInfoPtr) {
@@ -2006,8 +3229,10 @@
                 exportAllocateInfoPtr->handleTypes &
                 VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
             exportVmo =
-                exportAllocateInfoPtr->handleTypes &
-                VK_EXTERNAL_MEMORY_HANDLE_TYPE_TEMP_ZIRCON_VMO_BIT_FUCHSIA;
+                (exportAllocateInfoPtr->handleTypes &
+                    VK_EXTERNAL_MEMORY_HANDLE_TYPE_TEMP_ZIRCON_VMO_BIT_FUCHSIA) ||
+                (exportAllocateInfoPtr->handleTypes &
+                    VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA);
         } else if (importAhbInfoPtr) {
             importAhb = true;
         } else if (importBufferCollectionInfoPtr) {
@@ -2015,6 +3240,7 @@
         } else if (importVmoInfoPtr) {
             importVmo = true;
         }
+        bool isImport = importAhb || importBufferCollection || importVmo;
 
         if (exportAhb) {
             bool hasDedicatedImage = dedicatedAllocInfoPtr &&
@@ -2035,7 +3261,7 @@
 
                 auto it = info_VkImage.find(
                     dedicatedAllocInfoPtr->image);
-                if (it == info_VkImage.end()) return VK_ERROR_INITIALIZATION_FAILED;
+                if (it == info_VkImage.end()) _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(VK_ERROR_INITIALIZATION_FAILED);
                 const auto& info = it->second;
                 const auto& imgCi = info.createInfo;
 
@@ -2051,7 +3277,7 @@
 
                 auto it = info_VkBuffer.find(
                     dedicatedAllocInfoPtr->buffer);
-                if (it == info_VkBuffer.end()) return VK_ERROR_INITIALIZATION_FAILED;
+                if (it == info_VkBuffer.end()) _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(VK_ERROR_INITIALIZATION_FAILED);
                 const auto& info = it->second;
                 const auto& bufCi = info.createInfo;
 
@@ -2072,7 +3298,7 @@
                     &ahw);
 
             if (ahbCreateRes != VK_SUCCESS) {
-                return ahbCreateRes;
+                _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(ahbCreateRes);
             }
         }
 
@@ -2080,14 +3306,14 @@
             ahw = importAhbInfoPtr->buffer;
             // We still need to acquire the AHardwareBuffer.
             importAndroidHardwareBuffer(
-                mThreadingCallbacks.hostConnectionGetFunc()->grallocHelper(),
+                ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->grallocHelper(),
                 importAhbInfoPtr, nullptr);
         }
 
         if (ahw) {
             ALOGD("%s: Import AHardwareBuffer", __func__);
             importCbInfo.colorBuffer =
-                mThreadingCallbacks.hostConnectionGetFunc()->grallocHelper()->
+                ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->grallocHelper()->
                     getHostHandle(AHardwareBuffer_getNativeHandle(ahw));
             vk_append_struct(&structChainIter, &importCbInfo);
         }
@@ -2097,19 +3323,21 @@
         if (importBufferCollection) {
 
 #ifdef VK_USE_PLATFORM_FUCHSIA
-            auto collection = reinterpret_cast<fuchsia::sysmem::BufferCollectionSyncPtr*>(
+            auto collection = reinterpret_cast<
+                fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(
                 importBufferCollectionInfoPtr->collection);
-            fuchsia::sysmem::BufferCollectionInfo_2 info;
-            zx_status_t status2;
-            zx_status_t status = (*collection)->WaitForBuffersAllocated(&status2, &info);
-            if (status != ZX_OK || status2 != ZX_OK) {
-                ALOGE("WaitForBuffersAllocated failed: %d %d", status);
-                return VK_ERROR_INITIALIZATION_FAILED;
+            auto result = collection->WaitForBuffersAllocated();
+            if (!result.ok() || result.Unwrap()->status != ZX_OK) {
+                ALOGE("WaitForBuffersAllocated failed: %d %d", result.status(),
+                      GET_STATUS_SAFE(result, status));
+                _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(VK_ERROR_INITIALIZATION_FAILED);
             }
+            fuchsia_sysmem::wire::BufferCollectionInfo2& info =
+                result.Unwrap()->buffer_collection_info;
             uint32_t index = importBufferCollectionInfoPtr->index;
             if (info.buffer_count < index) {
                 ALOGE("Invalid buffer index: %d %d", index);
-                return VK_ERROR_INITIALIZATION_FAILED;
+                _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(VK_ERROR_INITIALIZATION_FAILED);
             }
             vmo_handle = info.buffers[index].vmo.release();
 #endif
@@ -2124,7 +3352,29 @@
         if (exportVmo) {
             bool hasDedicatedImage = dedicatedAllocInfoPtr &&
                 (dedicatedAllocInfoPtr->image != VK_NULL_HANDLE);
-            VkImageCreateInfo imageCreateInfo = {};
+            bool hasDedicatedBuffer =
+                dedicatedAllocInfoPtr &&
+                (dedicatedAllocInfoPtr->buffer != VK_NULL_HANDLE);
+
+            if (hasDedicatedImage && hasDedicatedBuffer) {
+                ALOGE(
+                    "Invalid VkMemoryDedicatedAllocationInfo: At least one "
+                    "of image and buffer must be VK_NULL_HANDLE.");
+                return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+            }
+
+            const VkImageCreateInfo* pImageCreateInfo = nullptr;
+
+            VkBufferConstraintsInfoFUCHSIA bufferConstraintsInfo = {
+                .sType =
+                    VK_STRUCTURE_TYPE_BUFFER_COLLECTION_CREATE_INFO_FUCHSIA,
+                .pNext = nullptr,
+                .pBufferCreateInfo = nullptr,
+                .requiredFormatFeatures = 0,
+                .minCount = 1,
+            };
+            const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo =
+                nullptr;
 
             if (hasDedicatedImage) {
                 AutoLock lock(mLock);
@@ -2133,67 +3383,219 @@
                 if (it == info_VkImage.end()) return VK_ERROR_INITIALIZATION_FAILED;
                 const auto& imageInfo = it->second;
 
-                imageCreateInfo = imageInfo.createInfo;
+                pImageCreateInfo = &imageInfo.createInfo;
             }
 
-            if (imageCreateInfo.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
-                                         VK_IMAGE_USAGE_TRANSFER_DST_BIT |
-                                         VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
-                                         VK_IMAGE_USAGE_SAMPLED_BIT)) {
-                fuchsia::sysmem::BufferCollectionTokenSyncPtr token;
-                zx_status_t status = mSysmemAllocator->AllocateSharedCollection(
-                    token.NewRequest());
-                if (status != ZX_OK) {
-                    ALOGE("AllocateSharedCollection failed: %d", status);
+            if (hasDedicatedBuffer) {
+                AutoLock lock(mLock);
+
+                auto it = info_VkBuffer.find(dedicatedAllocInfoPtr->buffer);
+                if (it == info_VkBuffer.end())
+                    return VK_ERROR_INITIALIZATION_FAILED;
+                const auto& bufferInfo = it->second;
+
+                bufferConstraintsInfo.pBufferCreateInfo =
+                    &bufferInfo.createInfo;
+                pBufferConstraintsInfo = &bufferConstraintsInfo;
+            }
+
+            hasDedicatedImage = hasDedicatedImage &&
+                                getBufferCollectionConstraintsVulkanImageUsage(
+                                    pImageCreateInfo);
+            hasDedicatedBuffer =
+                hasDedicatedBuffer &&
+                getBufferCollectionConstraintsVulkanBufferUsage(
+                    pBufferConstraintsInfo);
+
+            if (hasDedicatedImage || hasDedicatedBuffer) {
+                auto token_ends =
+                    fidl::CreateEndpoints<::fuchsia_sysmem::BufferCollectionToken>();
+                if (!token_ends.is_ok()) {
+                    ALOGE("zx_channel_create failed: %d", token_ends.status_value());
                     abort();
                 }
 
-                fuchsia::sysmem::BufferCollectionSyncPtr collection;
-                status = mSysmemAllocator->BindSharedCollection(
-                    std::move(token), collection.NewRequest());
-                if (status != ZX_OK) {
-                    ALOGE("BindSharedCollection failed: %d", status);
-                    abort();
-                }
-                setBufferCollectionConstraints(&collection,
-                                               &imageCreateInfo,
-                                               finalAllocInfo.allocationSize);
-
-                fuchsia::sysmem::BufferCollectionInfo_2 info;
-                zx_status_t status2;
-                status = collection->WaitForBuffersAllocated(&status2, &info);
-                if (status == ZX_OK && status2 == ZX_OK) {
-                    if (!info.buffer_count) {
-                      ALOGE("WaitForBuffersAllocated returned invalid count: %d", status);
-                      abort();
+                {
+                    auto result = mSysmemAllocator->AllocateSharedCollection(
+                        std::move(token_ends->server));
+                    if (!result.ok()) {
+                        ALOGE("AllocateSharedCollection failed: %d",
+                              result.status());
+                        abort();
                     }
-                    vmo_handle = info.buffers[0].vmo.release();
-                } else {
-                    ALOGE("WaitForBuffersAllocated failed: %d %d", status, status2);
+                }
+
+                auto collection_ends =
+                    fidl::CreateEndpoints<::fuchsia_sysmem::BufferCollection>();
+                if (!collection_ends.is_ok()) {
+                    ALOGE("zx_channel_create failed: %d", collection_ends.status_value());
                     abort();
                 }
 
-                collection->Close();
+                {
+                    auto result = mSysmemAllocator->BindSharedCollection(
+                        std::move(token_ends->client), std::move(collection_ends->server));
+                    if (!result.ok()) {
+                        ALOGE("BindSharedCollection failed: %d",
+                              result.status());
+                        abort();
+                    }
+                }
+
+                fidl::WireSyncClient<fuchsia_sysmem::BufferCollection> collection(
+                    std::move(collection_ends->client));
+                if (hasDedicatedImage) {
+                    VkResult res = setBufferCollectionConstraints(
+                        enc, device, &collection, pImageCreateInfo);
+                    if (res == VK_ERROR_FORMAT_NOT_SUPPORTED) {
+                      ALOGE("setBufferCollectionConstraints failed: format %u is not supported",
+                            pImageCreateInfo->format);
+                      return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+                    }
+                    if (res != VK_SUCCESS) {
+                        ALOGE("setBufferCollectionConstraints failed: %d", res);
+                        abort();
+                    }
+                }
+
+                if (hasDedicatedBuffer) {
+                    VkResult res = setBufferCollectionBufferConstraints(
+                        &collection, pBufferConstraintsInfo);
+                    if (res != VK_SUCCESS) {
+                        ALOGE("setBufferCollectionBufferConstraints failed: %d",
+                              res);
+                        abort();
+                    }
+                }
+
+                {
+                    auto result = collection.WaitForBuffersAllocated();
+                    if (result.ok() && result.Unwrap()->status == ZX_OK) {
+                        fuchsia_sysmem::wire::BufferCollectionInfo2& info =
+                            result.Unwrap()->buffer_collection_info;
+                        if (!info.buffer_count) {
+                            ALOGE(
+                                "WaitForBuffersAllocated returned "
+                                "invalid count: %d",
+                                info.buffer_count);
+                            abort();
+                        }
+                        vmo_handle = info.buffers[0].vmo.release();
+                    } else {
+                        ALOGE("WaitForBuffersAllocated failed: %d %d",
+                              result.status(), GET_STATUS_SAFE(result, status));
+                        abort();
+                    }
+                }
+
+                collection.Close();
 
                 zx::vmo vmo_copy;
-                status = zx_handle_duplicate(vmo_handle,
-                                             ZX_RIGHT_SAME_RIGHTS,
-                                             vmo_copy.reset_and_get_address());
+                zx_status_t status = zx_handle_duplicate(vmo_handle, ZX_RIGHT_SAME_RIGHTS,
+                                                         vmo_copy.reset_and_get_address());
                 if (status != ZX_OK) {
                     ALOGE("Failed to duplicate VMO: %d", status);
                     abort();
                 }
-                // TODO(reveman): Use imageCreateInfo.format to determine color
-                // buffer format.
-                status = mControlDevice->CreateColorBuffer(
-                    std::move(vmo_copy),
-                    imageCreateInfo.extent.width,
-                    imageCreateInfo.extent.height,
-                    fuchsia::hardware::goldfish::ColorBufferFormatType::BGRA,
-                    &status2);
-                if (status != ZX_OK || status2 != ZX_OK) {
-                    ALOGE("CreateColorBuffer failed: %d:%d", status, status2);
-                    abort();
+
+                bool isHostVisible = isHostVisibleMemoryTypeIndexForGuest(
+                    &mHostVisibleMemoryVirtInfo,
+                    pAllocateInfo->memoryTypeIndex);
+
+                // Only device-local images need to create color buffer; for
+                // host-visible images, the color buffer is already created when
+                // sysmem allocates memory.
+                if (!isHostVisible) {
+                    if (pImageCreateInfo) {
+                        fuchsia_hardware_goldfish::wire::
+                            ColorBufferFormatType format;
+                        switch (pImageCreateInfo->format) {
+                            case VK_FORMAT_B8G8R8A8_SINT:
+                            case VK_FORMAT_B8G8R8A8_UNORM:
+                            case VK_FORMAT_B8G8R8A8_SRGB:
+                            case VK_FORMAT_B8G8R8A8_SNORM:
+                            case VK_FORMAT_B8G8R8A8_SSCALED:
+                            case VK_FORMAT_B8G8R8A8_USCALED:
+                                format = fuchsia_hardware_goldfish::wire::
+                                    ColorBufferFormatType::kBgra;
+                                break;
+                            case VK_FORMAT_R8G8B8A8_SINT:
+                            case VK_FORMAT_R8G8B8A8_UNORM:
+                            case VK_FORMAT_R8G8B8A8_SRGB:
+                            case VK_FORMAT_R8G8B8A8_SNORM:
+                            case VK_FORMAT_R8G8B8A8_SSCALED:
+                            case VK_FORMAT_R8G8B8A8_USCALED:
+                                format = fuchsia_hardware_goldfish::wire::
+                                    ColorBufferFormatType::kRgba;
+                                break;
+                            case VK_FORMAT_R8_UNORM:
+                            case VK_FORMAT_R8_UINT:
+                            case VK_FORMAT_R8_USCALED:
+                            case VK_FORMAT_R8_SNORM:
+                            case VK_FORMAT_R8_SINT:
+                            case VK_FORMAT_R8_SSCALED:
+                            case VK_FORMAT_R8_SRGB:
+                                format = fuchsia_hardware_goldfish::wire::
+                                    ColorBufferFormatType::kLuminance;
+                                break;
+                            case VK_FORMAT_R8G8_UNORM:
+                            case VK_FORMAT_R8G8_UINT:
+                            case VK_FORMAT_R8G8_USCALED:
+                            case VK_FORMAT_R8G8_SNORM:
+                            case VK_FORMAT_R8G8_SINT:
+                            case VK_FORMAT_R8G8_SSCALED:
+                            case VK_FORMAT_R8G8_SRGB:
+                                format = fuchsia_hardware_goldfish::wire::
+                                    ColorBufferFormatType::kRg;
+                                break;
+                            default:
+                                ALOGE("Unsupported format: %d",
+                                      pImageCreateInfo->format);
+                                abort();
+                        }
+
+                        fidl::FidlAllocator allocator;
+                        fuchsia_hardware_goldfish::wire::CreateColorBuffer2Params createParams(
+                            allocator);
+                        createParams.set_width(allocator, pImageCreateInfo->extent.width)
+                            .set_height(allocator, pImageCreateInfo->extent.height)
+                            .set_format(allocator, format)
+                            .set_memory_property(allocator,
+                                fuchsia_hardware_goldfish::wire::kMemoryPropertyDeviceLocal);
+
+                        auto result = mControlDevice->CreateColorBuffer2(
+                            std::move(vmo_copy), std::move(createParams));
+                        if (!result.ok() || result.Unwrap()->res != ZX_OK) {
+                            if (result.ok() &&
+                                result.Unwrap()->res == ZX_ERR_ALREADY_EXISTS) {
+                                ALOGD(
+                                    "CreateColorBuffer: color buffer already "
+                                    "exists\n");
+                            } else {
+                                ALOGE("CreateColorBuffer failed: %d:%d",
+                                      result.status(),
+                                      GET_STATUS_SAFE(result, res));
+                                abort();
+                            }
+                        }
+                    }
+                }
+
+                if (pBufferConstraintsInfo) {
+                    fidl::FidlAllocator allocator;
+                    fuchsia_hardware_goldfish::wire::CreateBuffer2Params createParams(allocator);
+                    createParams.set_size(allocator,
+                            pBufferConstraintsInfo->pBufferCreateInfo->size)
+                        .set_memory_property(allocator,
+                            fuchsia_hardware_goldfish::wire::kMemoryPropertyDeviceLocal);
+
+                    auto result =
+                        mControlDevice->CreateBuffer2(std::move(vmo_copy), std::move(createParams));
+                    if (!result.ok() || result.Unwrap()->result.is_err()) {
+                        ALOGE("CreateBuffer2 failed: %d:%d", result.status(),
+                              GET_STATUS_SAFE(result, result.err()));
+                        abort();
+                    }
                 }
             }
         }
@@ -2208,12 +3610,25 @@
                 abort();
             }
             zx_status_t status2 = ZX_OK;
-            status = mControlDevice->GetColorBuffer(
-                std::move(vmo_copy), &status2, &importCbInfo.colorBuffer);
-            if (status != ZX_OK || status2 != ZX_OK) {
-                ALOGE("GetColorBuffer failed: %d:%d", status, status2);
+
+            auto result = mControlDevice->GetBufferHandle(std::move(vmo_copy));
+            if (!result.ok() || result.Unwrap()->res != ZX_OK) {
+                ALOGE("GetBufferHandle failed: %d:%d", result.status(),
+                      GET_STATUS_SAFE(result, res));
+            } else {
+                fuchsia_hardware_goldfish::wire::BufferHandleType
+                    handle_type = result.Unwrap()->type;
+                uint32_t buffer_handle = result.Unwrap()->id;
+
+                if (handle_type == fuchsia_hardware_goldfish::wire::
+                                       BufferHandleType::kBuffer) {
+                    importBufferInfo.buffer = buffer_handle;
+                    vk_append_struct(&structChainIter, &importBufferInfo);
+                } else {
+                    importCbInfo.colorBuffer = buffer_handle;
+                    vk_append_struct(&structChainIter, &importCbInfo);
+                }
             }
-            vk_append_struct(&structChainIter, &importCbInfo);
         }
 #endif
 
@@ -2222,9 +3637,9 @@
                 finalAllocInfo.memoryTypeIndex)) {
             input_result =
                 enc->vkAllocateMemory(
-                    device, &finalAllocInfo, pAllocator, pMemory);
+                    device, &finalAllocInfo, pAllocator, pMemory, true /* do lock */);
 
-            if (input_result != VK_SUCCESS) return input_result;
+            if (input_result != VK_SUCCESS) _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(input_result);
 
             VkDeviceSize allocationSize = finalAllocInfo.allocationSize;
             setDeviceMemoryInfo(
@@ -2233,9 +3648,10 @@
                 0, nullptr,
                 finalAllocInfo.memoryTypeIndex,
                 ahw,
+                isImport,
                 vmo_handle);
 
-            return VK_SUCCESS;
+            _RETURN_SCUCCESS_WITH_DEVICE_MEMORY_REPORT;
         }
 
         // Device-local memory dealing is over. What follows:
@@ -2248,19 +3664,51 @@
             abort();
         }
 
+#ifdef VK_USE_PLATFORM_FUCHSIA
         if (vmo_handle != ZX_HANDLE_INVALID) {
-            ALOGE("%s: Host visible export/import allocation "
-                  "of VMO is not supported yet.",
-                  __func__);
-            abort();
+            input_result = enc->vkAllocateMemory(device, &finalAllocInfo, pAllocator, pMemory, true /* do lock */);
+
+            // Get VMO handle rights, and only use allowed rights to map the
+            // host memory.
+            zx_info_handle_basic handle_info;
+            zx_status_t status = zx_object_get_info(vmo_handle, ZX_INFO_HANDLE_BASIC, &handle_info,
+                                        sizeof(handle_info), nullptr, nullptr);
+            if (status != ZX_OK) {
+                ALOGE("%s: cannot get vmo object info: vmo = %u status: %d.", __func__, vmo_handle,
+                      status);
+                return VK_ERROR_OUT_OF_HOST_MEMORY;
+            }
+
+            zx_vm_option_t vm_permission = 0u;
+            vm_permission |= (handle_info.rights & ZX_RIGHT_READ) ? ZX_VM_PERM_READ : 0;
+            vm_permission |= (handle_info.rights & ZX_RIGHT_WRITE) ? ZX_VM_PERM_WRITE : 0;
+
+            zx_paddr_t addr;
+            status = zx_vmar_map(zx_vmar_root_self(), vm_permission, 0, vmo_handle, 0,
+                finalAllocInfo.allocationSize, &addr);
+            if (status != ZX_OK) {
+                ALOGE("%s: cannot map vmar: status %d.", __func__, status);
+                return VK_ERROR_OUT_OF_HOST_MEMORY;
+            }
+
+            D("host visible alloc (external): "
+              "size 0x%llx host ptr %p mapped size 0x%llx",
+              (unsigned long long)finalAllocInfo.allocationSize, mappedPtr,
+              (unsigned long long)mappedSize);
+            setDeviceMemoryInfo(device, *pMemory,
+                finalAllocInfo.allocationSize, finalAllocInfo.allocationSize,
+                reinterpret_cast<uint8_t*>(addr), finalAllocInfo.memoryTypeIndex,
+                /*ahw=*/nullptr, isImport, vmo_handle);
+            return VK_SUCCESS;
         }
+#endif
 
         // Host visible memory, non external
         bool directMappingSupported = usingDirectMapping();
         if (!directMappingSupported) {
             input_result =
                 enc->vkAllocateMemory(
-                    device, &finalAllocInfo, pAllocator, pMemory);
+                    device, &finalAllocInfo, pAllocator, pMemory, true /* do lock */);
 
             if (input_result != VK_SUCCESS) return input_result;
 
@@ -2277,7 +3725,7 @@
                 finalAllocInfo.allocationSize,
                 mappedSize, mappedPtr,
                 finalAllocInfo.memoryTypeIndex);
-            return VK_SUCCESS;
+            _RETURN_SCUCCESS_WITH_DEVICE_MEMORY_REPORT;
         }
 
         // Host visible memory with direct mapping via
@@ -2293,7 +3741,7 @@
         AutoLock lock(mLock);
 
         auto it = info_VkDevice.find(device);
-        if (it == info_VkDevice.end()) return VK_ERROR_DEVICE_LOST;
+        if (it == info_VkDevice.end()) _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(VK_ERROR_DEVICE_LOST);
         auto& deviceInfo = it->second;
 
         auto& hostMemBlocksForTypeIndex =
@@ -2308,7 +3756,7 @@
                 deviceInfo);
 
         if (blockIndex == (HostMemBlockIndex) INVALID_HOST_MEM_BLOCK) {
-            return VK_ERROR_OUT_OF_HOST_MEMORY;
+            _RETURN_FAILURE_WITH_DEVICE_MEMORY_REPORT(VK_ERROR_OUT_OF_HOST_MEMORY);
         }
 
         VkDeviceMemory_Info virtualMemInfo;
@@ -2334,7 +3782,7 @@
 
         *pMemory = virtualMemInfo.subAlloc.subMemory;
 
-        return VK_SUCCESS;
+        _RETURN_SCUCCESS_WITH_DEVICE_MEMORY_REPORT;
     }
 
     void on_vkFreeMemory(
@@ -2348,11 +3796,35 @@
         auto it = info_VkDeviceMemory.find(memory);
         if (it == info_VkDeviceMemory.end()) return;
         auto& info = it->second;
+        uint64_t memoryObjectId = (uint64_t)(void*)memory;
+        if (info.ahw) {
+            memoryObjectId = getAHardwareBufferId(info.ahw);
+        }
+        emitDeviceMemoryReport(
+            info_VkDevice[device],
+            info.imported ? VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_UNIMPORT_EXT
+                          : VK_DEVICE_MEMORY_REPORT_EVENT_TYPE_FREE_EXT,
+            memoryObjectId,
+            0 /* size */,
+            VK_OBJECT_TYPE_DEVICE_MEMORY,
+            (uint64_t)(void*)memory
+        );
+
+#ifdef VK_USE_PLATFORM_FUCHSIA
+        if (info.vmoHandle && info.mappedPtr) {
+            zx_status_t status = zx_vmar_unmap(
+                zx_vmar_root_self(), reinterpret_cast<zx_paddr_t>(info.mappedPtr), info.mappedSize);
+            if (status != ZX_OK) {
+                ALOGE("%s: Cannot unmap mappedPtr: status %d", status);
+            }
+            info.mappedPtr = nullptr;
+        }
+#endif
 
         if (!info.directMapped) {
             lock.unlock();
             VkEncoder* enc = (VkEncoder*)context;
-            enc->vkFreeMemory(device, memory, pAllocateInfo);
+            enc->vkFreeMemory(device, memory, pAllocateInfo, true /* do lock */);
             return;
         }
 
@@ -2412,9 +3884,13 @@
         uint32_t normalBits) {
         uint32_t res = 0;
         for (uint32_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i) {
-            if (normalBits & (1 << i) &&
-                !isHostVisibleMemoryTypeIndexForGuest(
-                    &mHostVisibleMemoryVirtInfo, i)) {
+            bool shouldAcceptMemoryIndex = normalBits & (1 << i);
+#ifndef VK_USE_PLATFORM_FUCHSIA
+            shouldAcceptMemoryIndex &= !isHostVisibleMemoryTypeIndexForGuest(
+                &mHostVisibleMemoryVirtInfo, i);
+#endif  // VK_USE_PLATFORM_FUCHSIA
+
+            if (shouldAcceptMemoryIndex) {
                 res |= (1 << i);
             }
         }
@@ -2453,11 +3929,9 @@
         if (!info.external ||
             !info.externalCreateInfo.handleTypes) {
             transformNonExternalResourceMemoryRequirementsForGuest(reqs);
-            return;
+        } else {
+            transformExternalResourceMemoryRequirementsForGuest(reqs);
         }
-
-        transformExternalResourceMemoryRequirementsForGuest(reqs);
-
         setMemoryRequirementsForSysmemBackedImage(image, reqs);
     }
 
@@ -2494,6 +3968,7 @@
             !info.externalCreateInfo.handleTypes) {
             transformNonExternalResourceMemoryRequirementsForGuest(
                 &reqs2->memoryRequirements);
+            setMemoryRequirementsForSysmemBackedImage(image, &reqs2->memoryRequirements);
             return;
         }
 
@@ -2588,46 +4063,114 @@
         const VkBufferCollectionImageCreateInfoFUCHSIA* extBufferCollectionPtr =
             vk_find_struct<VkBufferCollectionImageCreateInfoFUCHSIA>(pCreateInfo);
         bool isSysmemBackedMemory = false;
+
+        if (extImgCiPtr &&
+            ((extImgCiPtr->handleTypes &
+                VK_EXTERNAL_MEMORY_HANDLE_TYPE_TEMP_ZIRCON_VMO_BIT_FUCHSIA) ||
+            (extImgCiPtr->handleTypes &
+                VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA))) {
+            isSysmemBackedMemory = true;
+        }
+
         if (extBufferCollectionPtr) {
-            auto collection = reinterpret_cast<fuchsia::sysmem::BufferCollectionSyncPtr*>(
+            auto collection = reinterpret_cast<
+                fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(
                 extBufferCollectionPtr->collection);
             uint32_t index = extBufferCollectionPtr->index;
             zx::vmo vmo;
 
-            fuchsia::sysmem::BufferCollectionInfo_2 info;
-            zx_status_t status2;
-            zx_status_t status = (*collection)->WaitForBuffersAllocated(&status2, &info);
-            if (status == ZX_OK && status2 == ZX_OK) {
-                if (index < info.buffer_count) {
+            fuchsia_sysmem::wire::BufferCollectionInfo2 info;
+
+            auto result = collection->WaitForBuffersAllocated();
+            if (result.ok() && result.Unwrap()->status == ZX_OK) {
+                info = std::move(result.Unwrap()->buffer_collection_info);
+                if (index < info.buffer_count && info.settings.has_image_format_constraints) {
                     vmo = std::move(info.buffers[index].vmo);
                 }
             } else {
-                ALOGE("WaitForBuffersAllocated failed: %d %d", status, status2);
+                ALOGE("WaitForBuffersAllocated failed: %d %d", result.status(),
+                      GET_STATUS_SAFE(result, status));
             }
 
             if (vmo.is_valid()) {
-                zx_status_t status2 = ZX_OK;
-                status = mControlDevice->CreateColorBuffer(
-                    std::move(vmo),
-                    localCreateInfo.extent.width,
-                    localCreateInfo.extent.height,
-                    fuchsia::hardware::goldfish::ColorBufferFormatType::BGRA,
-                    &status2);
-                if (status != ZX_OK || (status2 != ZX_OK && status2 != ZX_ERR_ALREADY_EXISTS)) {
-                    ALOGE("CreateColorBuffer failed: %d:%d", status, status2);
+                zx::vmo vmo_dup;
+                if (zx_status_t status = vmo.duplicate(ZX_RIGHT_SAME_RIGHTS, &vmo_dup);
+                    status != ZX_OK) {
+                    ALOGE("%s: zx_vmo_duplicate failed: %d", __func__, status);
+                    abort();
+                }
+
+                auto buffer_handle_result = mControlDevice->GetBufferHandle(std::move(vmo_dup));
+                if (!buffer_handle_result.ok()) {
+                    ALOGE("%s: GetBufferHandle FIDL error: %d", __func__,
+                          buffer_handle_result.status());
+                    abort();
+                }
+                if (buffer_handle_result.value().res == ZX_OK) {
+                    // Buffer handle already exists.
+                    // If it is a ColorBuffer, no-op; Otherwise return error.
+                    if (buffer_handle_result.value().type !=
+                        fuchsia_hardware_goldfish::wire::BufferHandleType::kColorBuffer) {
+                        ALOGE("%s: BufferHandle %u is not a ColorBuffer", __func__,
+                              buffer_handle_result.value().id);
+                        return VK_ERROR_OUT_OF_HOST_MEMORY;
+                    }
+                } else if (buffer_handle_result.value().res == ZX_ERR_NOT_FOUND) {
+                    // Buffer handle not found. Create ColorBuffer based on buffer settings.
+                    auto format =
+                        info.settings.image_format_constraints.pixel_format.type ==
+                                fuchsia_sysmem::wire::PixelFormatType::kR8G8B8A8
+                            ? fuchsia_hardware_goldfish::wire::ColorBufferFormatType::kRgba
+                            : fuchsia_hardware_goldfish::wire::ColorBufferFormatType::kBgra;
+
+                    uint32_t memory_property =
+                        info.settings.buffer_settings.heap ==
+                                fuchsia_sysmem::wire::HeapType::kGoldfishDeviceLocal
+                            ? fuchsia_hardware_goldfish::wire::kMemoryPropertyDeviceLocal
+                            : fuchsia_hardware_goldfish::wire::kMemoryPropertyHostVisible;
+
+                    fidl::FidlAllocator allocator;
+                    fuchsia_hardware_goldfish::wire::CreateColorBuffer2Params createParams(
+                        allocator);
+                    createParams.set_width(allocator,
+                            info.settings.image_format_constraints.min_coded_width)
+                        .set_height(allocator,
+                            info.settings.image_format_constraints.min_coded_height)
+                        .set_format(allocator, format)
+                        .set_memory_property(allocator, memory_property);
+
+                    auto result =
+                        mControlDevice->CreateColorBuffer2(std::move(vmo), std::move(createParams));
+                    if (!result.ok() || result.Unwrap()->res != ZX_OK) {
+                        ALOGE("CreateColorBuffer failed: %d:%d", result.status(),
+                              GET_STATUS_SAFE(result, res));
+                    }
+                }
+
+                if (info.settings.buffer_settings.heap ==
+                    fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible) {
+                    ALOGD(
+                        "%s: Image uses host visible memory heap; set tiling "
+                        "to linear to match host ImageCreateInfo",
+                        __func__);
+                    localCreateInfo.tiling = VK_IMAGE_TILING_LINEAR;
                 }
             }
             isSysmemBackedMemory = true;
         }
+
+        if (isSysmemBackedMemory) {
+            localCreateInfo.flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
+        }
 #endif
 
         VkResult res;
         VkMemoryRequirements memReqs;
 
         if (supportsCreateResourcesWithRequirements()) {
-            res = enc->vkCreateImageWithRequirementsGOOGLE(device, &localCreateInfo, pAllocator, pImage, &memReqs);
+            res = enc->vkCreateImageWithRequirementsGOOGLE(device, &localCreateInfo, pAllocator, pImage, &memReqs, true /* do lock */);
         } else {
-            res = enc->vkCreateImage(device, &localCreateInfo, pAllocator, pImage);
+            res = enc->vkCreateImage(device, &localCreateInfo, pAllocator, pImage, true /* do lock */);
         }
 
         if (res != VK_SUCCESS) return res;
@@ -2695,7 +4238,7 @@
 
         VkEncoder* enc = (VkEncoder*)context;
         VkResult res = enc->vkCreateSamplerYcbcrConversion(
-            device, &localCreateInfo, pAllocator, pYcbcrConversion);
+            device, &localCreateInfo, pAllocator, pYcbcrConversion, true /* do lock */);
 
         if (*pYcbcrConversion == VK_YCBCR_CONVERSION_DO_NOTHING) {
             ALOGE("FATAL: vkCreateSamplerYcbcrConversion returned a reserved value (VK_YCBCR_CONVERSION_DO_NOTHING)");
@@ -2711,7 +4254,7 @@
         const VkAllocationCallbacks* pAllocator) {
         VkEncoder* enc = (VkEncoder*)context;
         if (ycbcrConversion != VK_YCBCR_CONVERSION_DO_NOTHING) {
-            enc->vkDestroySamplerYcbcrConversion(device, ycbcrConversion, pAllocator);
+            enc->vkDestroySamplerYcbcrConversion(device, ycbcrConversion, pAllocator, true /* do lock */);
         }
     }
 
@@ -2745,7 +4288,7 @@
 
         VkEncoder* enc = (VkEncoder*)context;
         VkResult res = enc->vkCreateSamplerYcbcrConversionKHR(
-            device, &localCreateInfo, pAllocator, pYcbcrConversion);
+            device, &localCreateInfo, pAllocator, pYcbcrConversion, true /* do lock */);
 
         if (*pYcbcrConversion == VK_YCBCR_CONVERSION_DO_NOTHING) {
             ALOGE("FATAL: vkCreateSamplerYcbcrConversionKHR returned a reserved value (VK_YCBCR_CONVERSION_DO_NOTHING)");
@@ -2761,7 +4304,7 @@
         const VkAllocationCallbacks* pAllocator) {
         VkEncoder* enc = (VkEncoder*)context;
         if (ycbcrConversion != VK_YCBCR_CONVERSION_DO_NOTHING) {
-            enc->vkDestroySamplerYcbcrConversionKHR(device, ycbcrConversion, pAllocator);
+            enc->vkDestroySamplerYcbcrConversionKHR(device, ycbcrConversion, pAllocator, true /* do lock */);
         }
     }
 
@@ -2775,7 +4318,7 @@
         VkSamplerCreateInfo localCreateInfo = vk_make_orphan_copy(*pCreateInfo);
         vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&localCreateInfo);
 
-#if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(VK_USE_PLATFORM_FUCHSIA_KHR)
+#if defined(VK_USE_PLATFORM_ANDROID_KHR) || defined(VK_USE_PLATFORM_FUCHSIA)
         VkSamplerYcbcrConversionInfo localVkSamplerYcbcrConversionInfo;
         const VkSamplerYcbcrConversionInfo* samplerYcbcrConversionInfo =
             vk_find_struct<VkSamplerYcbcrConversionInfo>(pCreateInfo);
@@ -2788,7 +4331,7 @@
 #endif
 
         VkEncoder* enc = (VkEncoder*)context;
-        return enc->vkCreateSampler(device, &localCreateInfo, pAllocator, pSampler);
+        return enc->vkCreateSampler(device, &localCreateInfo, pAllocator, pSampler, true /* do lock */);
     }
 
     void on_vkGetPhysicalDeviceExternalFenceProperties(
@@ -2851,14 +4394,16 @@
 #endif
 
         input_result = enc->vkCreateFence(
-            device, &finalCreateInfo, pAllocator, pFence);
+            device, &finalCreateInfo, pAllocator, pFence, true /* do lock */);
 
         if (input_result != VK_SUCCESS) return input_result;
 
 #ifdef VK_USE_PLATFORM_ANDROID_KHR
         if (exportSyncFd) {
-            ALOGV("%s: ensure sync device\n", __func__);
-            ensureSyncDeviceFd();
+            if (!mFeatureInfo->hasVirtioGpuNativeSync) {
+                ALOGV("%s: ensure sync device\n", __func__);
+                ensureSyncDeviceFd();
+            }
 
             ALOGV("%s: getting fence info\n", __func__);
             AutoLock lock(mLock);
@@ -2886,7 +4431,7 @@
         VkFence fence,
         const VkAllocationCallbacks* pAllocator) {
         VkEncoder* enc = (VkEncoder*)context;
-        enc->vkDestroyFence(device, fence, pAllocator);
+        enc->vkDestroyFence(device, fence, pAllocator, true /* do lock */);
     }
 
     VkResult on_vkResetFences(
@@ -2897,7 +4442,7 @@
         const VkFence* pFences) {
 
         VkEncoder* enc = (VkEncoder*)context;
-        VkResult res = enc->vkResetFences(device, fenceCount, pFences);
+        VkResult res = enc->vkResetFences(device, fenceCount, pFences, true /* do lock */);
 
         if (res != VK_SUCCESS) return res;
 
@@ -3014,7 +4559,7 @@
             return VK_ERROR_OUT_OF_HOST_MEMORY;
         }
 
-        VkResult currentFenceStatus = enc->vkGetFenceStatus(device, pGetFdInfo->fence);
+        VkResult currentFenceStatus = enc->vkGetFenceStatus(device, pGetFdInfo->fence, true /* do lock */);
 
         if (VK_SUCCESS == currentFenceStatus) { // Fence already signaled
             ALOGV("%s: VK_SUCCESS: already signaled\n", __func__);
@@ -3049,11 +4594,52 @@
                 return VK_ERROR_OUT_OF_HOST_MEMORY;
             }
 
-            goldfish_sync_queue_work(
-                mSyncDeviceFd,
-                get_host_u64_VkFence(pGetFdInfo->fence) /* the handle */,
-                GOLDFISH_SYNC_VULKAN_SEMAPHORE_SYNC /* thread handle (doubling as type field) */,
-                pFd);
+            if (mFeatureInfo->hasVirtioGpuNativeSync) {
+#if !defined(HOST_BUILD) && defined(VK_USE_PLATFORM_ANDROID_KHR)
+                uint64_t hostFenceHandle = get_host_u64_VkFence(pGetFdInfo->fence);
+                uint32_t hostFenceHandleLo = (uint32_t)hostFenceHandle;
+                uint32_t hostFenceHandleHi = (uint32_t)(hostFenceHandle >> 32);
+
+                uint64_t hostDeviceHandle = get_host_u64_VkDevice(device);
+                uint32_t hostDeviceHandleLo = (uint32_t)hostDeviceHandle;
+                uint32_t hostDeviceHandleHi = (uint32_t)(hostFenceHandle >> 32);
+
+                #define VIRTIO_GPU_NATIVE_SYNC_VULKAN_CREATE_EXPORT_FD 0xa000
+
+                uint32_t cmdDwords[5] = {
+                    VIRTIO_GPU_NATIVE_SYNC_VULKAN_CREATE_EXPORT_FD,
+                    hostDeviceHandleLo,
+                    hostDeviceHandleHi,
+                    hostFenceHandleLo,
+                    hostFenceHandleHi,
+                };
+
+                drm_virtgpu_execbuffer execbuffer = {
+                    .flags = VIRTGPU_EXECBUF_FENCE_FD_OUT,
+                    .size = 5 * sizeof(uint32_t),
+                    .command = (uint64_t)(cmdDwords),
+                    .bo_handles = 0,
+                    .num_bo_handles = 0,
+                    .fence_fd = -1,
+                };
+
+                int res = drmIoctl(mRendernodeFd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &execbuffer);
+                if (res) {
+                    ALOGE("%s: Failed to virtgpu execbuffer: sterror: %s errno: %d\n", __func__,
+                            strerror(errno), errno);
+                    abort();
+                }
+
+                *pFd = execbuffer.fence_fd;
+#endif
+            } else {
+                goldfish_sync_queue_work(
+                    mSyncDeviceFd,
+                    get_host_u64_VkFence(pGetFdInfo->fence) /* the handle */,
+                    GOLDFISH_SYNC_VULKAN_SEMAPHORE_SYNC /* thread handle (doubling as type field) */,
+                    pFd);
+            }
+
             // relinquish ownership
             info.syncFd = -1;
             ALOGV("%s: got fd: %d\n", __func__, *pFd);
@@ -3095,10 +4681,12 @@
             }
         }
 
+        lock.unlock();
+
         if (fencesExternal.empty()) {
             // No need for work pool, just wait with host driver.
             return enc->vkWaitForFences(
-                device, fenceCount, pFences, waitAll, timeout);
+                device, fenceCount, pFences, waitAll, timeout, true /* do lock */);
         } else {
             // Depending on wait any or wait all,
             // schedule a wait group with waitAny/waitAll
@@ -3118,10 +4706,10 @@
                 tasks.push_back([this,
                                  fencesNonExternal /* copy of vector */,
                                  device, waitAll, timeout] {
-                    auto hostConn = mThreadingCallbacks.hostConnectionGetFunc();
-                    auto vkEncoder = mThreadingCallbacks.vkEncoderGetFunc(hostConn);
+                    auto hostConn = ResourceTracker::threadingCallbacks.hostConnectionGetFunc();
+                    auto vkEncoder = ResourceTracker::threadingCallbacks.vkEncoderGetFunc(hostConn);
                     ALOGV("%s: vkWaitForFences to host\n", __func__);
-                    vkEncoder->vkWaitForFences(device, fencesNonExternal.size(), fencesNonExternal.data(), waitAll, timeout);
+                    vkEncoder->vkWaitForFences(device, fencesNonExternal.size(), fencesNonExternal.data(), waitAll, timeout, true /* do lock */);
                 });
             }
 
@@ -3145,7 +4733,7 @@
         }
 #else
         return enc->vkWaitForFences(
-            device, fenceCount, pFences, waitAll, timeout);
+            device, fenceCount, pFences, waitAll, timeout, true /* do lock */);
 #endif
     }
 
@@ -3160,16 +4748,36 @@
         VkEncoder* enc = (VkEncoder*)context;
 
         VkResult res = enc->vkCreateDescriptorPool(
-            device, pCreateInfo, pAllocator, pDescriptorPool);
+            device, pCreateInfo, pAllocator, pDescriptorPool, true /* do lock */);
 
         if (res != VK_SUCCESS) return res;
 
-        AutoLock lock(mLock);
-        auto it = info_VkDescriptorPool.find(*pDescriptorPool);
-        if (it == info_VkDescriptorPool.end()) return res;
+        VkDescriptorPool pool = *pDescriptorPool;
 
-        auto &info = it->second;
-        info.createFlags = pCreateInfo->flags;
+        struct goldfish_VkDescriptorPool* dp = as_goldfish_VkDescriptorPool(pool);
+        dp->allocInfo = new DescriptorPoolAllocationInfo;
+        dp->allocInfo->device = device;
+        dp->allocInfo->createFlags = pCreateInfo->flags;
+        dp->allocInfo->maxSets = pCreateInfo->maxSets;
+        dp->allocInfo->usedSets = 0;
+
+        for (uint32_t i = 0; i < pCreateInfo->poolSizeCount; ++i) {
+            dp->allocInfo->descriptorCountInfo.push_back({
+                pCreateInfo->pPoolSizes[i].type,
+                pCreateInfo->pPoolSizes[i].descriptorCount,
+                0, /* used */
+            });
+        }
+
+        if (mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate) {
+            std::vector<uint64_t> poolIds(pCreateInfo->maxSets);
+
+            uint32_t count = pCreateInfo->maxSets;
+            enc->vkCollectDescriptorPoolIdsGOOGLE(
+                device, pool, &count, poolIds.data(), true /* do lock */);
+
+            dp->allocInfo->freePoolIds = poolIds;
+        }
 
         return res;
     }
@@ -3180,9 +4788,13 @@
         VkDescriptorPool descriptorPool,
         const VkAllocationCallbacks* pAllocator) {
 
+        if (!descriptorPool) return;
+
         VkEncoder* enc = (VkEncoder*)context;
 
-        enc->vkDestroyDescriptorPool(device, descriptorPool, pAllocator);
+        clearDescriptorPoolAndUnregisterDescriptorSets(context, device, descriptorPool);
+
+        enc->vkDestroyDescriptorPool(device, descriptorPool, pAllocator, true /* do lock */);
     }
 
     VkResult on_vkResetDescriptorPool(
@@ -3192,33 +4804,28 @@
         VkDescriptorPool descriptorPool,
         VkDescriptorPoolResetFlags flags) {
 
+        if (!descriptorPool) return VK_ERROR_INITIALIZATION_FAILED;
+
         VkEncoder* enc = (VkEncoder*)context;
 
-        VkResult res = enc->vkResetDescriptorPool(device, descriptorPool, flags);
+        VkResult res = enc->vkResetDescriptorPool(device, descriptorPool, flags, true /* do lock */);
 
         if (res != VK_SUCCESS) return res;
 
-        AutoLock lock(mLock);
-        clearDescriptorPoolLocked(descriptorPool);
+        clearDescriptorPoolAndUnregisterDescriptorSets(context, device, descriptorPool);
         return res;
     }
 
     VkResult on_vkAllocateDescriptorSets(
         void* context,
         VkResult,
-        VkDevice                                    device,
+        VkDevice device,
         const VkDescriptorSetAllocateInfo*          pAllocateInfo,
         VkDescriptorSet*                            pDescriptorSets) {
 
         VkEncoder* enc = (VkEncoder*)context;
 
-        VkResult res = enc->vkAllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
-
-        if (res != VK_SUCCESS) return res;
-
-        AutoLock lock(mLock);
-        initDescriptorSetStateLocked(pAllocateInfo, pDescriptorSets);
-        return res;
+        return allocAndInitializeDescriptorSets(context, device, pAllocateInfo, pDescriptorSets);
     }
 
     VkResult on_vkFreeDescriptorSets(
@@ -3239,23 +4846,61 @@
         {
             AutoLock lock(mLock);
 
+            // Pool was destroyed
+            if (info_VkDescriptorPool.find(descriptorPool) == info_VkDescriptorPool.end()) {
+                return VK_SUCCESS;
+            }
+
             if (!descriptorPoolSupportsIndividualFreeLocked(descriptorPool))
                 return VK_SUCCESS;
 
-            for (uint32_t i = 0; i < descriptorSetCount; ++i) {
-                if (descriptorSetReallyAllocedFromPoolLocked(
-                        pDescriptorSets[i], descriptorPool)) {
-                    toActuallyFree.push_back(pDescriptorSets[i]);
+            std::vector<VkDescriptorSet> existingDescriptorSets;;
+
+            // Check if this descriptor set was in the pool's set of allocated descriptor sets,
+            // to guard against double free (Double free is allowed by the client)
+            {
+                auto allocedSets = as_goldfish_VkDescriptorPool(descriptorPool)->allocInfo->allocedSets;
+
+                for (uint32_t i = 0; i < descriptorSetCount; ++i) {
+
+                    if (allocedSets.end() == allocedSets.find(pDescriptorSets[i])) {
+                        ALOGV("%s: Warning: descriptor set %p not found in pool. Was this double-freed?\n", __func__,
+                              (void*)pDescriptorSets[i]);
+                        continue;
+                    }
+
+                    auto it = info_VkDescriptorSet.find(pDescriptorSets[i]);
+                    if (it == info_VkDescriptorSet.end())
+                        continue;
+
+                    existingDescriptorSets.push_back(pDescriptorSets[i]);
+                }
+            }
+
+            for (auto set : existingDescriptorSets) {
+                if (removeDescriptorSetFromPool(set, mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate)) {
+                    toActuallyFree.push_back(set);
                 }
             }
 
             if (toActuallyFree.empty()) return VK_SUCCESS;
         }
 
-        return enc->vkFreeDescriptorSets(
-            device, descriptorPool,
-            (uint32_t)toActuallyFree.size(),
-            toActuallyFree.data());
+        if (mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate) {
+            // In the batched set update case, decrement refcount on the set layout
+            // and only free on host if we satisfied a pending allocation on the
+            // host.
+            for (uint32_t i = 0; i < toActuallyFree.size(); ++i) {
+                VkDescriptorSetLayout setLayout = as_goldfish_VkDescriptorSet(toActuallyFree[i])->reified->setLayout;
+                decDescriptorSetLayoutRef(context, device, setLayout, nullptr);
+            }
+            freeDescriptorSetsIfHostAllocated(
+                enc, device, (uint32_t)toActuallyFree.size(), toActuallyFree.data());
+        } else {
+            // In the non-batched set update case, just free them directly.
+            enc->vkFreeDescriptorSets(device, descriptorPool, (uint32_t)toActuallyFree.size(), toActuallyFree.data(), true /* do lock */);
+        }
+        return VK_SUCCESS;
     }
 
     VkResult on_vkCreateDescriptorSetLayout(
@@ -3269,19 +4914,17 @@
         VkEncoder* enc = (VkEncoder*)context;
 
         VkResult res = enc->vkCreateDescriptorSetLayout(
-            device, pCreateInfo, pAllocator, pSetLayout);
+            device, pCreateInfo, pAllocator, pSetLayout, true /* do lock */);
 
         if (res != VK_SUCCESS) return res;
 
-        AutoLock lock(mLock);
-
-        auto it = info_VkDescriptorSetLayout.find(*pSetLayout);
-        if (it == info_VkDescriptorSetLayout.end()) return res;
-
-        auto& info = it->second;
+        struct goldfish_VkDescriptorSetLayout* dsl =
+            as_goldfish_VkDescriptorSetLayout(*pSetLayout);
+        dsl->layoutInfo = new DescriptorSetLayoutInfo;
         for (uint32_t i = 0; i < pCreateInfo->bindingCount; ++i) {
-            info.bindings.push_back(pCreateInfo->pBindings[i]);
+            dsl->layoutInfo->bindings.push_back(pCreateInfo->pBindings[i]);
         }
+        dsl->layoutInfo->refcount = 1;
 
         return res;
     }
@@ -3296,31 +4939,82 @@
 
         VkEncoder* enc = (VkEncoder*)context;
 
-        std::vector<std::vector<VkDescriptorImageInfo>> imageInfosPerWrite(
-            descriptorWriteCount);
+        std::vector<VkDescriptorImageInfo> transformedImageInfos;
+        std::vector<VkWriteDescriptorSet> transformedWrites(descriptorWriteCount);
 
-        std::vector<VkWriteDescriptorSet> writesWithSuppressedSamplers;
+        memcpy(transformedWrites.data(), pDescriptorWrites, sizeof(VkWriteDescriptorSet) * descriptorWriteCount);
+
+        size_t imageInfosNeeded = 0;
+        for (uint32_t i = 0; i < descriptorWriteCount; ++i) {
+            if (!isDescriptorTypeImageInfo(transformedWrites[i].descriptorType)) continue;
+            if (!transformedWrites[i].pImageInfo) continue;
+
+            imageInfosNeeded += transformedWrites[i].descriptorCount;
+        }
+
+        transformedImageInfos.resize(imageInfosNeeded);
+
+        size_t imageInfoIndex = 0;
+        for (uint32_t i = 0; i < descriptorWriteCount; ++i) {
+            if (!isDescriptorTypeImageInfo(transformedWrites[i].descriptorType)) continue;
+            if (!transformedWrites[i].pImageInfo) continue;
+
+            for (uint32_t j = 0; j < transformedWrites[i].descriptorCount; ++j) {
+                transformedImageInfos[imageInfoIndex] = transformedWrites[i].pImageInfo[j];
+                ++imageInfoIndex;
+            }
+            transformedWrites[i].pImageInfo = &transformedImageInfos[imageInfoIndex - transformedWrites[i].descriptorCount];
+        }
 
         {
+            // Validate and filter samplers
             AutoLock lock(mLock);
+            size_t imageInfoIndex = 0;
             for (uint32_t i = 0; i < descriptorWriteCount; ++i) {
-                writesWithSuppressedSamplers.push_back(
-                    createImmutableSamplersFilteredWriteDescriptorSetLocked(
-                        pDescriptorWrites + i,
-                        imageInfosPerWrite.data() + i));
+
+                if (!isDescriptorTypeImageInfo(transformedWrites[i].descriptorType)) continue;
+                if (!transformedWrites[i].pImageInfo) continue;
+
+                bool isImmutableSampler =
+                    descriptorBindingIsImmutableSampler(
+                        transformedWrites[i].dstSet,
+                        transformedWrites[i].dstBinding);
+
+                for (uint32_t j = 0; j < transformedWrites[i].descriptorCount; ++j) {
+                    if (isImmutableSampler) {
+                        transformedImageInfos[imageInfoIndex].sampler = 0;
+                    }
+                    transformedImageInfos[imageInfoIndex] =
+                        filterNonexistentSampler(transformedImageInfos[imageInfoIndex]);
+                    ++imageInfoIndex;
+                }
             }
         }
 
-        enc->vkUpdateDescriptorSets(
-            device, descriptorWriteCount, writesWithSuppressedSamplers.data(),
-            descriptorCopyCount, pDescriptorCopies);
+        if (mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate) {
+            for (uint32_t i = 0; i < descriptorWriteCount; ++i) {
+                VkDescriptorSet set = transformedWrites[i].dstSet;
+                doEmulatedDescriptorWrite(&transformedWrites[i],
+                        as_goldfish_VkDescriptorSet(set)->reified);
+            }
+
+            for (uint32_t i = 0; i < descriptorCopyCount; ++i) {
+                doEmulatedDescriptorCopy(&pDescriptorCopies[i],
+                        as_goldfish_VkDescriptorSet(pDescriptorCopies[i].srcSet)->reified,
+                        as_goldfish_VkDescriptorSet(pDescriptorCopies[i].dstSet)->reified);
+            }
+        } else {
+            enc->vkUpdateDescriptorSets(
+                    device, descriptorWriteCount, transformedWrites.data(),
+                    descriptorCopyCount, pDescriptorCopies, true /* do lock */);
+        }
     }
 
     void on_vkDestroyImage(
         void* context,
         VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
         VkEncoder* enc = (VkEncoder*)context;
-        enc->vkDestroyImage(device, image, pAllocator);
+        enc->vkDestroyImage(device, image, pAllocator, true /* do lock */);
     }
 
     void setMemoryRequirementsForSysmemBackedImage(
@@ -3362,7 +5056,7 @@
         VkEncoder* enc = (VkEncoder*)context;
 
         enc->vkGetImageMemoryRequirements(
-            device, image, pMemoryRequirements);
+            device, image, pMemoryRequirements, true /* do lock */);
 
         lock.lock();
 
@@ -3378,7 +5072,7 @@
         VkMemoryRequirements2 *pMemoryRequirements) {
         VkEncoder* enc = (VkEncoder*)context;
         enc->vkGetImageMemoryRequirements2(
-            device, pInfo, pMemoryRequirements);
+            device, pInfo, pMemoryRequirements, true /* do lock */);
         transformImageMemoryRequirements2ForGuest(
             pInfo->image, pMemoryRequirements);
     }
@@ -3388,7 +5082,7 @@
         VkMemoryRequirements2 *pMemoryRequirements) {
         VkEncoder* enc = (VkEncoder*)context;
         enc->vkGetImageMemoryRequirements2KHR(
-            device, pInfo, pMemoryRequirements);
+            device, pInfo, pMemoryRequirements, true /* do lock */);
         transformImageMemoryRequirements2ForGuest(
             pInfo->image, pMemoryRequirements);
     }
@@ -3398,21 +5092,21 @@
         VkDevice device, VkImage image, VkDeviceMemory memory,
         VkDeviceSize memoryOffset) {
         VkEncoder* enc = (VkEncoder*)context;
-        return enc->vkBindImageMemory(device, image, memory, memoryOffset);
+        return enc->vkBindImageMemory(device, image, memory, memoryOffset, true /* do lock */);
     }
 
     VkResult on_vkBindImageMemory2(
         void* context, VkResult,
         VkDevice device, uint32_t bindingCount, const VkBindImageMemoryInfo* pBindInfos) {
         VkEncoder* enc = (VkEncoder*)context;
-        return enc->vkBindImageMemory2(device, bindingCount, pBindInfos);
+        return enc->vkBindImageMemory2(device, bindingCount, pBindInfos, true /* do lock */);
     }
 
     VkResult on_vkBindImageMemory2KHR(
         void* context, VkResult,
         VkDevice device, uint32_t bindingCount, const VkBindImageMemoryInfo* pBindInfos) {
         VkEncoder* enc = (VkEncoder*)context;
-        return enc->vkBindImageMemory2KHR(device, bindingCount, pBindInfos);
+        return enc->vkBindImageMemory2KHR(device, bindingCount, pBindInfos, true /* do lock */);
     }
 
     VkResult on_vkCreateBuffer(
@@ -3422,13 +5116,69 @@
         VkBuffer *pBuffer) {
         VkEncoder* enc = (VkEncoder*)context;
 
+#ifdef VK_USE_PLATFORM_FUCHSIA
+        Optional<zx::vmo> vmo;
+        bool isSysmemBackedMemory = false;
+
+        const VkExternalMemoryBufferCreateInfo* extBufCiPtr =
+            vk_find_struct<VkExternalMemoryBufferCreateInfo>(pCreateInfo);
+        if (extBufCiPtr &&
+            ((extBufCiPtr->handleTypes &
+             VK_EXTERNAL_MEMORY_HANDLE_TYPE_TEMP_ZIRCON_VMO_BIT_FUCHSIA) ||
+            (extBufCiPtr->handleTypes &
+             VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA))) {
+            isSysmemBackedMemory = true;
+        }
+
+        const auto* extBufferCollectionPtr =
+                vk_find_struct<VkBufferCollectionBufferCreateInfoFUCHSIA>(
+                        pCreateInfo);
+
+        if (extBufferCollectionPtr) {
+            auto collection = reinterpret_cast<
+                fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(
+                extBufferCollectionPtr->collection);
+            uint32_t index = extBufferCollectionPtr->index;
+
+            auto result = collection->WaitForBuffersAllocated();
+            if (result.ok() && result.Unwrap()->status == ZX_OK) {
+                auto& info = result.Unwrap()->buffer_collection_info;
+                if (index < info.buffer_count) {
+                    vmo = android::base::makeOptional(
+                            std::move(info.buffers[index].vmo));
+                }
+            } else {
+                ALOGE("WaitForBuffersAllocated failed: %d %d", result.status(),
+                      GET_STATUS_SAFE(result, status));
+            }
+
+            if (vmo && vmo->is_valid()) {
+                fidl::FidlAllocator allocator;
+                fuchsia_hardware_goldfish::wire::CreateBuffer2Params createParams(allocator);
+                createParams.set_size(allocator, pCreateInfo->size)
+                    .set_memory_property(allocator,
+                        fuchsia_hardware_goldfish::wire::kMemoryPropertyDeviceLocal);
+
+                auto result =
+                    mControlDevice->CreateBuffer2(std::move(*vmo), std::move(createParams));
+                if (!result.ok() ||
+                    (result.Unwrap()->result.is_err() != ZX_OK &&
+                     result.Unwrap()->result.err() != ZX_ERR_ALREADY_EXISTS)) {
+                    ALOGE("CreateBuffer2 failed: %d:%d", result.status(),
+                          GET_STATUS_SAFE(result, result.err()));
+                }
+                isSysmemBackedMemory = true;
+            }
+        }
+#endif  // VK_USE_PLATFORM_FUCHSIA
+
         VkResult res;
         VkMemoryRequirements memReqs;
 
         if (supportsCreateResourcesWithRequirements()) {
-            res = enc->vkCreateBufferWithRequirementsGOOGLE(device, pCreateInfo, pAllocator, pBuffer, &memReqs);
+            res = enc->vkCreateBufferWithRequirementsGOOGLE(device, pCreateInfo, pAllocator, pBuffer, &memReqs, true /* do lock */);
         } else {
-            res = enc->vkCreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
+            res = enc->vkCreateBuffer(device, pCreateInfo, pAllocator, pBuffer, true /* do lock */);
         }
 
         if (res != VK_SUCCESS) return res;
@@ -3455,6 +5205,12 @@
             info.externalCreateInfo = *extBufCi;
         }
 
+#ifdef VK_USE_PLATFORM_FUCHSIA
+        if (isSysmemBackedMemory) {
+            info.isSysmemBackedMemory = true;
+        }
+#endif
+
         if (info.baseRequirementsKnown) {
             transformBufferMemoryRequirementsForGuestLocked(*pBuffer, &memReqs);
             info.baseRequirements = memReqs;
@@ -3467,7 +5223,7 @@
         void* context,
         VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator) {
         VkEncoder* enc = (VkEncoder*)context;
-        enc->vkDestroyBuffer(device, buffer, pAllocator);
+        enc->vkDestroyBuffer(device, buffer, pAllocator, true /* do lock */);
     }
 
     void on_vkGetBufferMemoryRequirements(
@@ -3489,7 +5245,7 @@
 
         VkEncoder* enc = (VkEncoder*)context;
         enc->vkGetBufferMemoryRequirements(
-            device, buffer, pMemoryRequirements);
+            device, buffer, pMemoryRequirements, true /* do lock */);
 
         lock.lock();
 
@@ -3503,7 +5259,7 @@
         void* context, VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo,
         VkMemoryRequirements2* pMemoryRequirements) {
         VkEncoder* enc = (VkEncoder*)context;
-        enc->vkGetBufferMemoryRequirements2(device, pInfo, pMemoryRequirements);
+        enc->vkGetBufferMemoryRequirements2(device, pInfo, pMemoryRequirements, true /* do lock */);
         transformBufferMemoryRequirements2ForGuest(
             pInfo->buffer, pMemoryRequirements);
     }
@@ -3512,7 +5268,7 @@
         void* context, VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo,
         VkMemoryRequirements2* pMemoryRequirements) {
         VkEncoder* enc = (VkEncoder*)context;
-        enc->vkGetBufferMemoryRequirements2KHR(device, pInfo, pMemoryRequirements);
+        enc->vkGetBufferMemoryRequirements2KHR(device, pInfo, pMemoryRequirements, true /* do lock */);
         transformBufferMemoryRequirements2ForGuest(
             pInfo->buffer, pMemoryRequirements);
     }
@@ -3522,7 +5278,7 @@
         VkDevice device, VkBuffer buffer, VkDeviceMemory memory, VkDeviceSize memoryOffset) {
         VkEncoder *enc = (VkEncoder *)context;
         return enc->vkBindBufferMemory(
-            device, buffer, memory, memoryOffset);
+            device, buffer, memory, memoryOffset, true /* do lock */);
     }
 
     VkResult on_vkBindBufferMemory2(
@@ -3530,7 +5286,7 @@
         VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo *pBindInfos) {
         VkEncoder *enc = (VkEncoder *)context;
         return enc->vkBindBufferMemory2(
-            device, bindInfoCount, pBindInfos);
+            device, bindInfoCount, pBindInfos, true /* do lock */);
     }
 
     VkResult on_vkBindBufferMemory2KHR(
@@ -3538,7 +5294,7 @@
         VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo *pBindInfos) {
         VkEncoder *enc = (VkEncoder *)context;
         return enc->vkBindBufferMemory2KHR(
-            device, bindInfoCount, pBindInfos);
+            device, bindInfoCount, pBindInfos, true /* do lock */);
     }
 
     void ensureSyncDeviceFd() {
@@ -3568,11 +5324,17 @@
 
 #ifdef VK_USE_PLATFORM_FUCHSIA
         bool exportEvent = exportSemaphoreInfoPtr &&
+            ((exportSemaphoreInfoPtr->handleTypes &
+             VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TEMP_ZIRCON_EVENT_BIT_FUCHSIA) ||
             (exportSemaphoreInfoPtr->handleTypes &
-             VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TEMP_ZIRCON_EVENT_BIT_FUCHSIA);
+             VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA));
 
         if (exportEvent) {
             finalCreateInfo.pNext = nullptr;
+            // If we have timeline semaphores externally, leave it there.
+            const VkSemaphoreTypeCreateInfo* typeCi =
+                vk_find_struct<VkSemaphoreTypeCreateInfo>(pCreateInfo);
+            if (typeCi) finalCreateInfo.pNext = typeCi;
         }
 #endif
 
@@ -3583,10 +5345,14 @@
 
         if (exportSyncFd) {
             finalCreateInfo.pNext = nullptr;
+            // If we have timeline semaphores externally, leave it there.
+            const VkSemaphoreTypeCreateInfo* typeCi =
+                vk_find_struct<VkSemaphoreTypeCreateInfo>(pCreateInfo);
+            if (typeCi) finalCreateInfo.pNext = typeCi;
         }
 #endif
         input_result = enc->vkCreateSemaphore(
-            device, &finalCreateInfo, pAllocator, pSemaphore);
+            device, &finalCreateInfo, pAllocator, pSemaphore, true /* do lock */);
 
         zx_handle_t event_handle = ZX_HANDLE_INVALID;
 
@@ -3605,20 +5371,62 @@
 
         info.device = device;
         info.eventHandle = event_handle;
+#ifdef VK_USE_PLATFORM_FUCHSIA
+        info.eventKoid = getEventKoid(info.eventHandle);
+#endif
 
 #ifdef VK_USE_PLATFORM_ANDROID_KHR
         if (exportSyncFd) {
+            if (mFeatureInfo->hasVirtioGpuNativeSync) {
+#if !defined(HOST_BUILD)
+                uint64_t hostFenceHandle = get_host_u64_VkSemaphore(*pSemaphore);
+                uint32_t hostFenceHandleLo = (uint32_t)hostFenceHandle;
+                uint32_t hostFenceHandleHi = (uint32_t)(hostFenceHandle >> 32);
 
-            ensureSyncDeviceFd();
+                uint64_t hostDeviceHandle = get_host_u64_VkDevice(device);
+                uint32_t hostDeviceHandleLo = (uint32_t)hostDeviceHandle;
+                uint32_t hostDeviceHandleHi = (uint32_t)(hostFenceHandle >> 32);
 
-            if (exportSyncFd) {
-                int syncFd = -1;
-                goldfish_sync_queue_work(
-                    mSyncDeviceFd,
-                    get_host_u64_VkSemaphore(*pSemaphore) /* the handle */,
-                    GOLDFISH_SYNC_VULKAN_SEMAPHORE_SYNC /* thread handle (doubling as type field) */,
-                    &syncFd);
-                info.syncFd = syncFd;
+                #define VIRTIO_GPU_NATIVE_SYNC_VULKAN_CREATE_EXPORT_FD 0xa000
+
+                uint32_t cmdDwords[5] = {
+                    VIRTIO_GPU_NATIVE_SYNC_VULKAN_CREATE_EXPORT_FD,
+                    hostDeviceHandleLo,
+                    hostDeviceHandleHi,
+                    hostFenceHandleLo,
+                    hostFenceHandleHi,
+                };
+
+                drm_virtgpu_execbuffer execbuffer = {
+                    .flags = VIRTGPU_EXECBUF_FENCE_FD_OUT,
+                    .size = 5 * sizeof(uint32_t),
+                    .command = (uint64_t)(cmdDwords),
+                    .bo_handles = 0,
+                    .num_bo_handles = 0,
+                    .fence_fd = -1,
+                };
+
+                int res = drmIoctl(mRendernodeFd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &execbuffer);
+                if (res) {
+                    ALOGE("%s: Failed to virtgpu execbuffer: sterror: %s errno: %d\n", __func__,
+                            strerror(errno), errno);
+                    abort();
+                }
+
+                info.syncFd = execbuffer.fence_fd;
+#endif
+            } else {
+                ensureSyncDeviceFd();
+
+                if (exportSyncFd) {
+                    int syncFd = -1;
+                    goldfish_sync_queue_work(
+                            mSyncDeviceFd,
+                            get_host_u64_VkSemaphore(*pSemaphore) /* the handle */,
+                            GOLDFISH_SYNC_VULKAN_SEMAPHORE_SYNC /* thread handle (doubling as type field) */,
+                            &syncFd);
+                    info.syncFd = syncFd;
+                }
             }
         }
 #endif
@@ -3630,7 +5438,7 @@
         void* context,
         VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
         VkEncoder* enc = (VkEncoder*)context;
-        enc->vkDestroySemaphore(device, semaphore, pAllocator);
+        enc->vkDestroySemaphore(device, semaphore, pAllocator, true /* do lock */);
     }
 
     // https://www.khronos.org/registry/vulkan/specs/1.0-extensions/html/vkspec.html#vkGetSemaphoreFdKHR
@@ -3656,7 +5464,7 @@
         } else {
             // opaque fd
             int hostFd = 0;
-            VkResult result = enc->vkGetSemaphoreFdKHR(device, pGetFdInfo, &hostFd);
+            VkResult result = enc->vkGetSemaphoreFdKHR(device, pGetFdInfo, &hostFd, true /* do lock */);
             if (result != VK_SUCCESS) {
                 return result;
             }
@@ -3709,7 +5517,7 @@
             read(fd, &hostFd, sizeof(hostFd));
             VkImportSemaphoreFdInfoKHR tmpInfo = *pImportSemaphoreFdInfo;
             tmpInfo.fd = hostFd;
-            VkResult result = enc->vkImportSemaphoreFdKHR(device, &tmpInfo);
+            VkResult result = enc->vkImportSemaphoreFdKHR(device, &tmpInfo, true /* do lock */);
             close(fd);
             return result;
         }
@@ -3722,14 +5530,252 @@
 #endif
     }
 
+    struct CommandBufferPendingDescriptorSets {
+        std::unordered_set<VkDescriptorSet> sets;
+    };
+
+    void collectAllPendingDescriptorSetsBottomUp(const std::vector<VkCommandBuffer>& workingSet, std::unordered_set<VkDescriptorSet>& allDs) {
+        if (workingSet.empty()) return;
+
+        std::vector<VkCommandBuffer> nextLevel;
+        for (auto commandBuffer : workingSet) {
+            struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
+            forAllObjects(cb->subObjects, [&nextLevel](void* secondary) {
+                    nextLevel.push_back((VkCommandBuffer)secondary);
+                    });
+        }
+
+        collectAllPendingDescriptorSetsBottomUp(nextLevel, allDs);
+
+        for (auto cmdbuf : workingSet) {
+            struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(cmdbuf);
+
+            if (!cb->userPtr) {
+                continue; // No descriptors to update.
+            }
+
+            CommandBufferPendingDescriptorSets* pendingDescriptorSets =
+                (CommandBufferPendingDescriptorSets*)(cb->userPtr);
+
+            if (pendingDescriptorSets->sets.empty()) {
+                continue; // No descriptors to update.
+            }
+
+            allDs.insert(pendingDescriptorSets->sets.begin(), pendingDescriptorSets->sets.end());
+        }
+    }
+
+    void commitDescriptorSetUpdates(void* context, VkQueue queue, const std::unordered_set<VkDescriptorSet>& sets) {
+        VkEncoder* enc = (VkEncoder*)context;
+
+        std::unordered_map<VkDescriptorPool, uint32_t> poolSet;
+        std::vector<VkDescriptorPool> pools;
+        std::vector<VkDescriptorSetLayout> setLayouts;
+        std::vector<uint64_t> poolIds;
+        std::vector<uint32_t> descriptorSetWhichPool;
+        std::vector<uint32_t> pendingAllocations;
+        std::vector<uint32_t> writeStartingIndices;
+        std::vector<VkWriteDescriptorSet> writesForHost;
+
+        uint32_t poolIndex = 0;
+        uint32_t currentWriteIndex = 0;
+        for (auto set : sets) {
+            ReifiedDescriptorSet* reified = as_goldfish_VkDescriptorSet(set)->reified;
+            VkDescriptorPool pool = reified->pool;
+            VkDescriptorSetLayout setLayout = reified->setLayout;
+
+            auto it = poolSet.find(pool);
+            if (it == poolSet.end()) {
+                poolSet[pool] = poolIndex;
+                descriptorSetWhichPool.push_back(poolIndex);
+                pools.push_back(pool);
+                ++poolIndex;
+            } else {
+                uint32_t savedPoolIndex = it->second;
+                descriptorSetWhichPool.push_back(savedPoolIndex);
+            }
+
+            poolIds.push_back(reified->poolId);
+            setLayouts.push_back(setLayout);
+            pendingAllocations.push_back(reified->allocationPending ? 1 : 0);
+            writeStartingIndices.push_back(currentWriteIndex);
+
+            auto& writes = reified->allWrites;
+
+            for (size_t i = 0; i < writes.size(); ++i) {
+                uint32_t binding = i;
+
+                for (size_t j = 0; j < writes[i].size(); ++j) {
+                    auto& write = writes[i][j];
+
+                    if (write.type == DescriptorWriteType::Empty) continue;
+
+                    uint32_t dstArrayElement = 0;
+
+                    VkDescriptorImageInfo* imageInfo = nullptr;
+                    VkDescriptorBufferInfo* bufferInfo = nullptr;
+                    VkBufferView* bufferView = nullptr;
+
+                    switch (write.type) {
+                        case DescriptorWriteType::Empty:
+                            break;
+                        case DescriptorWriteType::ImageInfo:
+                            dstArrayElement = j;
+                            imageInfo = &write.imageInfo;
+                            break;
+                        case DescriptorWriteType::BufferInfo:
+                            dstArrayElement = j;
+                            bufferInfo = &write.bufferInfo;
+                            break;
+                        case DescriptorWriteType::BufferView:
+                            dstArrayElement = j;
+                            bufferView = &write.bufferView;
+                            break;
+                        case DescriptorWriteType::InlineUniformBlock:
+                        case DescriptorWriteType::AccelerationStructure:
+                            // TODO
+                            ALOGE("Encountered pending inline uniform block or acceleration structure desc write, abort (NYI)\n");
+                            abort();
+                        default:
+                            break;
+
+                    }
+
+                    // TODO: Combine multiple writes into one VkWriteDescriptorSet.
+                    VkWriteDescriptorSet forHost = {
+                        VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, 0 /* TODO: inline uniform block */,
+                        set,
+                        binding,
+                        dstArrayElement,
+                        1,
+                        write.descriptorType,
+                        imageInfo,
+                        bufferInfo,
+                        bufferView,
+                    };
+
+                    writesForHost.push_back(forHost);
+                    ++currentWriteIndex;
+
+                    // Set it back to empty.
+                    write.type = DescriptorWriteType::Empty;
+                }
+            }
+        }
+
+        // Skip out if there's nothing to VkWriteDescriptorSet home about.
+        if (writesForHost.empty()) {
+            return;
+        }
+
+        enc->vkQueueCommitDescriptorSetUpdatesGOOGLE(
+            queue, 
+            (uint32_t)pools.size(), pools.data(),
+            (uint32_t)sets.size(),
+            setLayouts.data(),
+            poolIds.data(),
+            descriptorSetWhichPool.data(),
+            pendingAllocations.data(),
+            writeStartingIndices.data(),
+            (uint32_t)writesForHost.size(),
+            writesForHost.data(),
+            false /* no lock */);
+
+        // If we got here, then we definitely serviced the allocations.
+        for (auto set : sets) {
+            ReifiedDescriptorSet* reified = as_goldfish_VkDescriptorSet(set)->reified;
+            reified->allocationPending = false;
+        }
+    }
+
+    void flushCommandBufferPendingCommandsBottomUp(void* context, VkQueue queue, const std::vector<VkCommandBuffer>& workingSet) {
+        if (workingSet.empty()) return;
+
+        std::vector<VkCommandBuffer> nextLevel;
+        for (auto commandBuffer : workingSet) {
+            struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
+            forAllObjects(cb->subObjects, [&nextLevel](void* secondary) {
+                nextLevel.push_back((VkCommandBuffer)secondary);
+            });
+        }
+
+        flushCommandBufferPendingCommandsBottomUp(context, queue, nextLevel);
+
+        // After this point, everyone at the previous level has been flushed
+        for (auto cmdbuf : workingSet) {
+            struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(cmdbuf);
+
+            // There's no pending commands here, skip. (case 1)
+            if (!cb->privateStream) continue;
+
+            unsigned char* writtenPtr = 0;
+            size_t written = 0;
+            ((CommandBufferStagingStream*)cb->privateStream)->getWritten(&writtenPtr, &written);
+
+            // There's no pending commands here, skip. (case 2, stream created but no new recordings)
+            if (!written) continue;
+
+            // There are pending commands to flush.
+            VkEncoder* enc = (VkEncoder*)context;
+            enc->vkQueueFlushCommandsGOOGLE(queue, cmdbuf, written, (const void*)writtenPtr, true /* do lock */);
+
+            // Reset this stream.
+            ((CommandBufferStagingStream*)cb->privateStream)->reset();
+        }
+    }
+
+    // Unlike resetCommandBufferStagingInfo, this does not always erase its
+    // superObjects pointers because the command buffer has merely been
+    // submitted, not reset.  However, if the command buffer was recorded with
+    // ONE_TIME_SUBMIT_BIT, then it will also reset its primaries.
+    //
+    // Also, we save the set of descriptor sets referenced by this command
+    // buffer because we only submitted the command buffer and it's possible to
+    // update the descriptor set again and re-submit the same command without
+    // recording it (Update-after-bind descriptor sets)
+    void resetCommandBufferPendingTopology(VkCommandBuffer commandBuffer) {
+        struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
+        if (cb->flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) {
+            resetCommandBufferStagingInfo(commandBuffer,
+                true /* reset primaries */,
+                true /* clear pending descriptor sets */);
+        } else {
+            resetCommandBufferStagingInfo(commandBuffer,
+                false /* Don't reset primaries */,
+                false /* Don't clear pending descriptor sets */);
+        }
+    }
+
+    void flushStagingStreams(void* context, VkQueue queue, uint32_t submitCount, const VkSubmitInfo* pSubmits) {
+        std::vector<VkCommandBuffer> toFlush;
+        for (uint32_t i = 0; i < submitCount; ++i) {
+            for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; ++j) {
+                toFlush.push_back(pSubmits[i].pCommandBuffers[j]);
+            }
+        }
+
+        std::unordered_set<VkDescriptorSet> pendingSets;
+        collectAllPendingDescriptorSetsBottomUp(toFlush, pendingSets);
+        commitDescriptorSetUpdates(context, queue, pendingSets);
+
+        flushCommandBufferPendingCommandsBottomUp(context, queue, toFlush);
+
+        for (auto cb : toFlush) {
+            resetCommandBufferPendingTopology(cb);
+        }
+    }
+
     VkResult on_vkQueueSubmit(
         void* context, VkResult input_result,
         VkQueue queue, uint32_t submitCount, const VkSubmitInfo* pSubmits, VkFence fence) {
+        AEMU_SCOPED_TRACE("on_vkQueueSubmit");
+
+        flushStagingStreams(context, queue, submitCount, pSubmits);
 
         std::vector<VkSemaphore> pre_signal_semaphores;
         std::vector<zx_handle_t> pre_signal_events;
         std::vector<int> pre_signal_sync_fds;
-        std::vector<zx_handle_t> post_wait_events;
+        std::vector<std::pair<zx_handle_t, zx_koid_t>> post_wait_events;
         std::vector<int> post_wait_sync_fds;
 
         VkEncoder* enc = (VkEncoder*)context;
@@ -3761,7 +5807,19 @@
                     auto& semInfo = it->second;
 #ifdef VK_USE_PLATFORM_FUCHSIA
                     if (semInfo.eventHandle) {
-                        post_wait_events.push_back(semInfo.eventHandle);
+                        post_wait_events.push_back(
+                            {semInfo.eventHandle, semInfo.eventKoid});
+#ifndef FUCHSIA_NO_TRACE
+                        if (semInfo.eventKoid != ZX_KOID_INVALID) {
+                            // TODO(fxbug.dev/66098): Remove the "semaphore"
+                            // FLOW_END events once it is removed from clients
+                            // (for example, gfx Engine).
+                            TRACE_FLOW_END("gfx", "semaphore",
+                                           semInfo.eventKoid);
+                            TRACE_FLOW_BEGIN("gfx", "goldfish_post_wait_event",
+                                             semInfo.eventKoid);
+                        }
+#endif
                     }
 #endif
 #ifdef VK_USE_PLATFORM_ANDROID_KHR
@@ -3775,8 +5833,13 @@
         lock.unlock();
 
         if (pre_signal_semaphores.empty()) {
-            input_result = enc->vkQueueSubmit(queue, submitCount, pSubmits, fence);
-            if (input_result != VK_SUCCESS) return input_result;
+            if (supportsAsyncQueueSubmit()) {
+                enc->vkQueueSubmitAsyncGOOGLE(queue, submitCount, pSubmits, fence, true /* do lock */);
+                input_result = VK_SUCCESS;
+            } else {
+                input_result = enc->vkQueueSubmit(queue, submitCount, pSubmits, fence, true /* do lock */);
+                if (input_result != VK_SUCCESS) return input_result;
+            }
         } else {
             // Schedule waits on the OS external objects and
             // signal the wait semaphores
@@ -3809,12 +5872,23 @@
                 .waitSemaphoreCount = 0,
                 .pWaitSemaphores = nullptr,
                 .pWaitDstStageMask = nullptr,
-                .signalSemaphoreCount = static_cast<uint32_t>(pre_signal_semaphores.size()),
+                .signalSemaphoreCount =
+                    static_cast<uint32_t>(pre_signal_semaphores.size()),
                 .pSignalSemaphores = pre_signal_semaphores.data()};
-            enc->vkQueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE);
 
-            input_result = enc->vkQueueSubmit(queue, submitCount, pSubmits, fence);
-            if (input_result != VK_SUCCESS) return input_result;
+            if (supportsAsyncQueueSubmit()) {
+                enc->vkQueueSubmitAsyncGOOGLE(queue, 1, &submit_info, VK_NULL_HANDLE, true /* do lock */);
+            } else {
+                enc->vkQueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE, true /* do lock */);
+            }
+
+            if (supportsAsyncQueueSubmit()) {
+                enc->vkQueueSubmitAsyncGOOGLE(queue, submitCount, pSubmits, fence, true /* do lock */);
+                input_result = VK_SUCCESS;
+            } else {
+                input_result = enc->vkQueueSubmit(queue, submitCount, pSubmits, fence, true /* do lock */);
+                if (input_result != VK_SUCCESS) return input_result;
+            }
         }
 
         lock.lock();
@@ -3837,15 +5911,22 @@
 
             std::vector<WorkPool::Task> tasks;
 
-            tasks.push_back([this, queue, externalFenceFdToSignal,
+            tasks.push_back([queue, externalFenceFdToSignal,
                              post_wait_events /* copy of zx handles */,
                              post_wait_sync_fds /* copy of sync fds */] {
-                auto hostConn = mThreadingCallbacks.hostConnectionGetFunc();
-                auto vkEncoder = mThreadingCallbacks.vkEncoderGetFunc(hostConn);
-                auto waitIdleRes = vkEncoder->vkQueueWaitIdle(queue);
+                auto hostConn = ResourceTracker::threadingCallbacks.hostConnectionGetFunc();
+                auto vkEncoder = ResourceTracker::threadingCallbacks.vkEncoderGetFunc(hostConn);
+                auto waitIdleRes = vkEncoder->vkQueueWaitIdle(queue, true /* do lock */);
 #ifdef VK_USE_PLATFORM_FUCHSIA
+                AEMU_SCOPED_TRACE("on_vkQueueSubmit::SignalSemaphores");
                 (void)externalFenceFdToSignal;
-                for (auto& event : post_wait_events) {
+                for (auto& [event, koid] : post_wait_events) {
+#ifndef FUCHSIA_NO_TRACE
+                    if (koid != ZX_KOID_INVALID) {
+                        TRACE_FLOW_END("gfx", "goldfish_post_wait_event", koid);
+                        TRACE_FLOW_BEGIN("gfx", "event_signal", koid);
+                    }
+#endif
                     zx_object_signal(event, 0, ZX_EVENT_SIGNALED);
                 }
 #endif
@@ -3882,17 +5963,17 @@
 
         if (toWait.empty()) {
             ALOGV("%s: No queue-specific work pool items\n", __func__);
-            return enc->vkQueueWaitIdle(queue);
+            return enc->vkQueueWaitIdle(queue, true /* do lock */);
         }
 
         for (auto handle : toWait) {
-            ALOGV("%s: waiting on work group item: %llu\n", __func__, 
+            ALOGV("%s: waiting on work group item: %llu\n", __func__,
                   (unsigned long long)handle);
             mWorkPool.waitAll(handle);
         }
 
         // now done waiting, get the host's opinion
-        return enc->vkQueueWaitIdle(queue);
+        return enc->vkQueueWaitIdle(queue, true /* do lock */);
     }
 
     void unwrap_VkNativeBufferANDROID(
@@ -3920,7 +6001,7 @@
         }
 
         *(uint32_t*)(nativeInfoOut->handle) =
-            mThreadingCallbacks.hostConnectionGetFunc()->
+            ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->
                 grallocHelper()->getHostHandle(
                     (const native_handle_t*)nativeInfo->handle);
     }
@@ -4025,26 +6106,6 @@
         return input_result;
     }
 
-    bool isDescriptorTypeImageInfo(VkDescriptorType descType) {
-        return (descType == VK_DESCRIPTOR_TYPE_SAMPLER) ||
-               (descType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) ||
-               (descType == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE) ||
-               (descType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) ||
-               (descType == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT);
-    }
-
-    bool isDescriptorTypeBufferInfo(VkDescriptorType descType) {
-        return (descType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) ||
-               (descType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) ||
-               (descType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER) ||
-               (descType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC);
-    }
-
-    bool isDescriptorTypeBufferView(VkDescriptorType descType) {
-        return (descType == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) ||
-               (descType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER);
-    }
-
     VkResult initDescriptorUpdateTemplateBuffers(
         const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo,
         VkDescriptorUpdateTemplate descriptorUpdateTemplate) {
@@ -4058,27 +6119,18 @@
 
         auto& info = it->second;
 
-        size_t imageInfosNeeded = 0;
-        size_t bufferInfosNeeded = 0;
-        size_t bufferViewsNeeded = 0;
-
         for (uint32_t i = 0; i < pCreateInfo->descriptorUpdateEntryCount; ++i) {
             const auto& entry = pCreateInfo->pDescriptorUpdateEntries[i];
             uint32_t descCount = entry.descriptorCount;
             VkDescriptorType descType = entry.descriptorType;
-
-            info.templateEntries.push_back(entry);
-
+            ++info.templateEntryCount;
             for (uint32_t j = 0; j < descCount; ++j) {
                 if (isDescriptorTypeImageInfo(descType)) {
-                    ++imageInfosNeeded;
-                    info.imageInfoEntryIndices.push_back(i);
+                    ++info.imageInfoCount;
                 } else if (isDescriptorTypeBufferInfo(descType)) {
-                    ++bufferInfosNeeded;
-                    info.bufferInfoEntryIndices.push_back(i);
+                    ++info.bufferInfoCount;
                 } else if (isDescriptorTypeBufferView(descType)) {
-                    ++bufferViewsNeeded;
-                    info.bufferViewEntryIndices.push_back(i);
+                    ++info.bufferViewCount;
                 } else {
                     ALOGE("%s: FATAL: Unknown descriptor type %d\n", __func__, descType);
                     abort();
@@ -4086,10 +6138,51 @@
             }
         }
 
-        // To be filled in later (our flat structure)
-        info.imageInfos.resize(imageInfosNeeded);
-        info.bufferInfos.resize(bufferInfosNeeded);
-        info.bufferViews.resize(bufferViewsNeeded);
+        if (info.templateEntryCount)
+            info.templateEntries = new VkDescriptorUpdateTemplateEntry[info.templateEntryCount];
+
+        if (info.imageInfoCount) {
+            info.imageInfoIndices = new uint32_t[info.imageInfoCount];
+            info.imageInfos = new VkDescriptorImageInfo[info.imageInfoCount];
+        }
+
+        if (info.bufferInfoCount) {
+            info.bufferInfoIndices = new uint32_t[info.bufferInfoCount];
+            info.bufferInfos = new VkDescriptorBufferInfo[info.bufferInfoCount];
+        }
+
+        if (info.bufferViewCount) {
+            info.bufferViewIndices = new uint32_t[info.bufferViewCount];
+            info.bufferViews = new VkBufferView[info.bufferViewCount];
+        }
+
+        uint32_t imageInfoIndex = 0;
+        uint32_t bufferInfoIndex = 0;
+        uint32_t bufferViewIndex = 0;
+
+        for (uint32_t i = 0; i < pCreateInfo->descriptorUpdateEntryCount; ++i) {
+            const auto& entry = pCreateInfo->pDescriptorUpdateEntries[i];
+            uint32_t descCount = entry.descriptorCount;
+            VkDescriptorType descType = entry.descriptorType;
+
+            info.templateEntries[i] = entry;
+
+            for (uint32_t j = 0; j < descCount; ++j) {
+                if (isDescriptorTypeImageInfo(descType)) {
+                    info.imageInfoIndices[imageInfoIndex] = i;
+                    ++imageInfoIndex;
+                } else if (isDescriptorTypeBufferInfo(descType)) {
+                    info.bufferInfoIndices[bufferInfoIndex] = i;
+                    ++bufferInfoIndex;
+                } else if (isDescriptorTypeBufferView(descType)) {
+                    info.bufferViewIndices[bufferViewIndex] = i;
+                    ++bufferViewIndex;
+                } else {
+                    ALOGE("%s: FATAL: Unknown descriptor type %d\n", __func__, descType);
+                    abort();
+                }
+            }
+        }
 
         return VK_SUCCESS;
     }
@@ -4138,6 +6231,7 @@
         uint8_t* userBuffer = (uint8_t*)pData;
         if (!userBuffer) return;
 
+        // TODO: Make this thread safe
         AutoLock lock(mLock);
 
         auto it = info_VkDescriptorUpdateTemplate.find(descriptorUpdateTemplate);
@@ -4147,61 +6241,134 @@
 
         auto& info = it->second;
 
+        uint32_t templateEntryCount = info.templateEntryCount;
+        VkDescriptorUpdateTemplateEntry* templateEntries = info.templateEntries;
+
+        uint32_t imageInfoCount = info.imageInfoCount;
+        uint32_t bufferInfoCount = info.bufferInfoCount;
+        uint32_t bufferViewCount = info.bufferViewCount;
+        uint32_t* imageInfoIndices = info.imageInfoIndices;
+        uint32_t* bufferInfoIndices = info.bufferInfoIndices;
+        uint32_t* bufferViewIndices = info.bufferViewIndices;
+        VkDescriptorImageInfo* imageInfos = info.imageInfos;
+        VkDescriptorBufferInfo* bufferInfos = info.bufferInfos;
+        VkBufferView* bufferViews = info.bufferViews;
+
+        lock.unlock();
+
         size_t currImageInfoOffset = 0;
         size_t currBufferInfoOffset = 0;
         size_t currBufferViewOffset = 0;
 
-        for (const auto& entry : info.templateEntries) {
+        struct goldfish_VkDescriptorSet* ds = as_goldfish_VkDescriptorSet(descriptorSet);
+        ReifiedDescriptorSet* reified = ds->reified;
+
+        bool batched = mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate;
+
+        for (uint32_t i = 0; i < templateEntryCount; ++i) {
+            const auto& entry = templateEntries[i];
             VkDescriptorType descType = entry.descriptorType;
+            uint32_t dstBinding = entry.dstBinding;
 
             auto offset = entry.offset;
             auto stride = entry.stride;
+            auto dstArrayElement = entry.dstArrayElement;
 
             uint32_t descCount = entry.descriptorCount;
 
             if (isDescriptorTypeImageInfo(descType)) {
+
                 if (!stride) stride = sizeof(VkDescriptorImageInfo);
+
+                const VkDescriptorImageInfo* currImageInfoBegin =
+                    (const VkDescriptorImageInfo*)((uint8_t*)imageInfos + currImageInfoOffset);
+
                 for (uint32_t j = 0; j < descCount; ++j) {
-                    memcpy(((uint8_t*)info.imageInfos.data()) + currImageInfoOffset,
+                    const VkDescriptorImageInfo* user =
+                        (const VkDescriptorImageInfo*)(userBuffer + offset + j * stride);
+
+                    memcpy(((uint8_t*)imageInfos) + currImageInfoOffset,
                            userBuffer + offset + j * stride,
                            sizeof(VkDescriptorImageInfo));
                     currImageInfoOffset += sizeof(VkDescriptorImageInfo);
                 }
+
+                if (batched) doEmulatedDescriptorImageInfoWriteFromTemplate(
+                        descType,
+                        dstBinding,
+                        dstArrayElement,
+                        descCount,
+                        currImageInfoBegin,
+                        reified);
+
             } else if (isDescriptorTypeBufferInfo(descType)) {
+
+
                 if (!stride) stride = sizeof(VkDescriptorBufferInfo);
+
+                const VkDescriptorBufferInfo* currBufferInfoBegin =
+                    (const VkDescriptorBufferInfo*)((uint8_t*)bufferInfos + currBufferInfoOffset);
+
                 for (uint32_t j = 0; j < descCount; ++j) {
-                    memcpy(((uint8_t*)info.bufferInfos.data()) + currBufferInfoOffset,
+                    const VkDescriptorBufferInfo* user =
+                        (const VkDescriptorBufferInfo*)(userBuffer + offset + j * stride);
+
+                    memcpy(((uint8_t*)bufferInfos) + currBufferInfoOffset,
                            userBuffer + offset + j * stride,
                            sizeof(VkDescriptorBufferInfo));
                     currBufferInfoOffset += sizeof(VkDescriptorBufferInfo);
                 }
+
+                if (batched) doEmulatedDescriptorBufferInfoWriteFromTemplate(
+                        descType,
+                        dstBinding,
+                        dstArrayElement,
+                        descCount,
+                        currBufferInfoBegin,
+                        reified);
+
             } else if (isDescriptorTypeBufferView(descType)) {
                 if (!stride) stride = sizeof(VkBufferView);
+
+                const VkBufferView* currBufferViewBegin =
+                    (const VkBufferView*)((uint8_t*)bufferViews + currBufferViewOffset);
+
                 for (uint32_t j = 0; j < descCount; ++j) {
-                    memcpy(((uint8_t*)info.bufferViews.data()) + currBufferViewOffset,
+                    memcpy(((uint8_t*)bufferViews) + currBufferViewOffset,
                            userBuffer + offset + j * stride,
                            sizeof(VkBufferView));
                     currBufferViewOffset += sizeof(VkBufferView);
                 }
+
+                if (batched) doEmulatedDescriptorBufferViewWriteFromTemplate(
+                        descType,
+                        dstBinding,
+                        dstArrayElement,
+                        descCount,
+                        currBufferViewBegin,
+                        reified);
             } else {
                 ALOGE("%s: FATAL: Unknown descriptor type %d\n", __func__, descType);
                 abort();
             }
         }
 
+        if (batched) return;
+
         enc->vkUpdateDescriptorSetWithTemplateSizedGOOGLE(
             device,
             descriptorSet,
             descriptorUpdateTemplate,
-            (uint32_t)info.imageInfos.size(),
-            (uint32_t)info.bufferInfos.size(),
-            (uint32_t)info.bufferViews.size(),
-            info.imageInfoEntryIndices.data(),
-            info.bufferInfoEntryIndices.data(),
-            info.bufferViewEntryIndices.data(),
-            info.imageInfos.data(),
-            info.bufferInfos.data(),
-            info.bufferViews.data());
+            imageInfoCount,
+            bufferInfoCount,
+            bufferViewCount,
+            imageInfoIndices,
+            bufferInfoIndices,
+            bufferViewIndices,
+            imageInfos,
+            bufferInfos,
+            bufferViews,
+            true /* do lock */);
     }
 
     VkResult on_vkGetPhysicalDeviceImageFormatProperties2_common(
@@ -4214,6 +6381,49 @@
         VkEncoder* enc = (VkEncoder*)context;
         (void)input_result;
 
+#ifdef VK_USE_PLATFORM_FUCHSIA
+
+        constexpr VkFormat kExternalImageSupportedFormats[] = {
+            VK_FORMAT_B8G8R8A8_SINT,
+            VK_FORMAT_B8G8R8A8_UNORM,
+            VK_FORMAT_B8G8R8A8_SRGB,
+            VK_FORMAT_B8G8R8A8_SNORM,
+            VK_FORMAT_B8G8R8A8_SSCALED,
+            VK_FORMAT_B8G8R8A8_USCALED,
+            VK_FORMAT_R8G8B8A8_SINT,
+            VK_FORMAT_R8G8B8A8_UNORM,
+            VK_FORMAT_R8G8B8A8_SRGB,
+            VK_FORMAT_R8G8B8A8_SNORM,
+            VK_FORMAT_R8G8B8A8_SSCALED,
+            VK_FORMAT_R8G8B8A8_USCALED,
+            VK_FORMAT_R8_UNORM,
+            VK_FORMAT_R8_UINT,
+            VK_FORMAT_R8_USCALED,
+            VK_FORMAT_R8_SNORM,
+            VK_FORMAT_R8_SINT,
+            VK_FORMAT_R8_SSCALED,
+            VK_FORMAT_R8_SRGB,
+            VK_FORMAT_R8G8_UNORM,
+            VK_FORMAT_R8G8_UINT,
+            VK_FORMAT_R8G8_USCALED,
+            VK_FORMAT_R8G8_SNORM,
+            VK_FORMAT_R8G8_SINT,
+            VK_FORMAT_R8G8_SSCALED,
+            VK_FORMAT_R8G8_SRGB,
+        };
+
+        VkExternalImageFormatProperties* ext_img_properties =
+            vk_find_struct<VkExternalImageFormatProperties>(pImageFormatProperties);
+
+        if (ext_img_properties) {
+          if (std::find(std::begin(kExternalImageSupportedFormats),
+                        std::end(kExternalImageSupportedFormats),
+                        pImageFormatInfo->format) == std::end(kExternalImageSupportedFormats)) {
+            return VK_ERROR_FORMAT_NOT_SUPPORTED;
+          }
+        }
+#endif
+
         VkAndroidHardwareBufferUsageANDROID* output_ahw_usage =
             vk_find_struct<VkAndroidHardwareBufferUsageANDROID>(pImageFormatProperties);
 
@@ -4222,15 +6432,46 @@
         if (isKhr) {
             hostRes = enc->vkGetPhysicalDeviceImageFormatProperties2KHR(
                 physicalDevice, pImageFormatInfo,
-                pImageFormatProperties);
+                pImageFormatProperties, true /* do lock */);
         } else {
             hostRes = enc->vkGetPhysicalDeviceImageFormatProperties2(
                 physicalDevice, pImageFormatInfo,
-                pImageFormatProperties);
+                pImageFormatProperties, true /* do lock */);
         }
 
         if (hostRes != VK_SUCCESS) return hostRes;
 
+#ifdef VK_USE_PLATFORM_FUCHSIA
+        if (ext_img_properties) {
+            const VkPhysicalDeviceExternalImageFormatInfo* ext_img_info =
+                vk_find_struct<VkPhysicalDeviceExternalImageFormatInfo>(pImageFormatInfo);
+            if (ext_img_info) {
+                switch (static_cast<uint32_t>(ext_img_info->handleType)) {
+                case VK_EXTERNAL_MEMORY_HANDLE_TYPE_TEMP_ZIRCON_VMO_BIT_FUCHSIA:
+                    ext_img_properties->externalMemoryProperties = {
+                        .externalMemoryFeatures = VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT |
+                                                  VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT,
+                        .exportFromImportedHandleTypes =
+                            VK_EXTERNAL_MEMORY_HANDLE_TYPE_TEMP_ZIRCON_VMO_BIT_FUCHSIA,
+                        .compatibleHandleTypes =
+                            VK_EXTERNAL_MEMORY_HANDLE_TYPE_TEMP_ZIRCON_VMO_BIT_FUCHSIA,
+                    };
+                    break;
+                case VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA:
+                    ext_img_properties->externalMemoryProperties = {
+                        .externalMemoryFeatures = VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT |
+                                                  VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT,
+                        .exportFromImportedHandleTypes =
+                            VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA,
+                        .compatibleHandleTypes =
+                            VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA,
+                    };
+                    break;
+                }
+            }
+        }
+#endif
+
         if (output_ahw_usage) {
             output_ahw_usage->androidHardwareBufferUsage =
                 getAndroidHardwareBufferUsageFromVkUsage(
@@ -4261,46 +6502,129 @@
             physicalDevice, pImageFormatInfo, pImageFormatProperties);
     }
 
-    uint32_t syncEncodersForCommandBuffer(VkCommandBuffer commandBuffer, VkEncoder* currentEncoder) {
-        AutoLock lock(mLock);
-
-        auto it = info_VkCommandBuffer.find(commandBuffer);
-        if (it == info_VkCommandBuffer.end()) return 0;
-
-        auto& info = it->second;
-
-        if (!info.lastUsedEncoderPtr) {
-            info.lastUsedEncoderPtr = new VkEncoder*;
-            *(info.lastUsedEncoderPtr) = currentEncoder;
+    void on_vkGetPhysicalDeviceExternalSemaphoreProperties(
+        void*,
+        VkPhysicalDevice,
+        const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo,
+        VkExternalSemaphoreProperties* pExternalSemaphoreProperties) {
+        (void)pExternalSemaphoreInfo;
+        (void)pExternalSemaphoreProperties;
+#ifdef VK_USE_PLATFORM_FUCHSIA
+        if (pExternalSemaphoreInfo->handleType ==
+            VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TEMP_ZIRCON_EVENT_BIT_FUCHSIA) {
+            pExternalSemaphoreProperties->compatibleHandleTypes |=
+                VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TEMP_ZIRCON_EVENT_BIT_FUCHSIA;
+            pExternalSemaphoreProperties->exportFromImportedHandleTypes |=
+                VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TEMP_ZIRCON_EVENT_BIT_FUCHSIA;
+            pExternalSemaphoreProperties->externalSemaphoreFeatures |=
+                VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
+                VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
         }
+        if (pExternalSemaphoreInfo->handleType ==
+            static_cast<uint32_t>(VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA)) {
+            pExternalSemaphoreProperties->compatibleHandleTypes |=
+                VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA;
+            pExternalSemaphoreProperties->exportFromImportedHandleTypes |=
+                VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA;
+            pExternalSemaphoreProperties->externalSemaphoreFeatures |=
+                VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
+                VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
+        }
+#endif  // VK_USE_PLATFORM_FUCHSIA
+#ifdef VK_USE_PLATFORM_ANDROID_KHR
+        if (pExternalSemaphoreInfo->handleType ==
+            VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT) {
+            pExternalSemaphoreProperties->compatibleHandleTypes |=
+                VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
+            pExternalSemaphoreProperties->exportFromImportedHandleTypes |=
+                VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
+            pExternalSemaphoreProperties->externalSemaphoreFeatures |=
+                VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT |
+                VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT;
+        }
+#endif  // VK_USE_PLATFORM_ANDROID_KHR
+    }
 
-        auto lastUsedEncoderPtr = info.lastUsedEncoderPtr;
+    void registerEncoderCleanupCallback(const VkEncoder* encoder, void* object, CleanupCallback callback) {
+        AutoLock lock(mLock);
+        auto& callbacks = mEncoderCleanupCallbacks[encoder];
+        callbacks[object] = callback;
+    }
 
-        auto lastEncoder = *(lastUsedEncoderPtr);
+    void unregisterEncoderCleanupCallback(const VkEncoder* encoder, void* object) {
+        AutoLock lock(mLock);
+        mEncoderCleanupCallbacks[encoder].erase(object);
+    }
 
-        // We always make lastUsedEncoderPtr track
-        // the current encoder, even if the last encoder
-        // is null.
-        *(lastUsedEncoderPtr) = currentEncoder;
+    void onEncoderDeleted(const VkEncoder* encoder) {
+        AutoLock lock(mLock);
+        if (mEncoderCleanupCallbacks.find(encoder) == mEncoderCleanupCallbacks.end()) return;
 
-        if (!lastEncoder) return 0;
+        std::unordered_map<void*, CleanupCallback> callbackCopies = mEncoderCleanupCallbacks[encoder];
+
+        mEncoderCleanupCallbacks.erase(encoder);
+        lock.unlock();
+
+        for (auto it : callbackCopies) {
+            it.second();
+        }
+    }
+
+    uint32_t syncEncodersForCommandBuffer(VkCommandBuffer commandBuffer, VkEncoder* currentEncoder) {
+        struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
+        if (!cb) return 0;
+
+        auto lastEncoder = cb->lastUsedEncoder;
+
         if (lastEncoder == currentEncoder) return 0;
 
-        info.sequenceNumber++;
-        lastEncoder->vkCommandBufferHostSyncGOOGLE(commandBuffer, false, info.sequenceNumber);
+        currentEncoder->incRef();
+
+        cb->lastUsedEncoder = currentEncoder;
+
+        if (!lastEncoder) return 0;
+
+        auto oldSeq = cb->sequenceNumber;
+        cb->sequenceNumber += 2;
+        lastEncoder->vkCommandBufferHostSyncGOOGLE(commandBuffer, false, oldSeq + 1, true /* do lock */);
         lastEncoder->flush();
-        info.sequenceNumber++;
-        currentEncoder->vkCommandBufferHostSyncGOOGLE(commandBuffer, true, info.sequenceNumber);
+        currentEncoder->vkCommandBufferHostSyncGOOGLE(commandBuffer, true, oldSeq + 2, true /* do lock */);
 
-        lastEncoder->unregisterCleanupCallback(commandBuffer);
+        if (lastEncoder->decRef()) {
+            cb->lastUsedEncoder = nullptr;
+        }
+        return 0;
+    }
 
-        currentEncoder->registerCleanupCallback(commandBuffer, [currentEncoder, lastUsedEncoderPtr]() {
-            if (*(lastUsedEncoderPtr) == currentEncoder) {
-                *(lastUsedEncoderPtr) = nullptr;
-            }
-        });
+    uint32_t syncEncodersForQueue(VkQueue queue, VkEncoder* currentEncoder) {
+        if (!supportsAsyncQueueSubmit()) {
+            return 0;
+        }
 
-        return 1;
+        struct goldfish_VkQueue* q = as_goldfish_VkQueue(queue);
+        if (!q) return 0;
+
+        auto lastEncoder = q->lastUsedEncoder;
+
+        if (lastEncoder == currentEncoder) return 0;
+
+        currentEncoder->incRef();
+
+        q->lastUsedEncoder = currentEncoder;
+
+        if (!lastEncoder) return 0;
+
+        auto oldSeq = q->sequenceNumber;
+        q->sequenceNumber += 2;
+        lastEncoder->vkQueueHostSyncGOOGLE(queue, false, oldSeq + 1, true /* do lock */);
+        lastEncoder->flush();
+        currentEncoder->vkQueueHostSyncGOOGLE(queue, true, oldSeq + 2, true /* do lock */);
+
+        if (lastEncoder->decRef()) {
+            q->lastUsedEncoder = nullptr;
+        }
+
+        return 0;
     }
 
     VkResult on_vkBeginCommandBuffer(
@@ -4308,14 +6632,29 @@
         VkCommandBuffer commandBuffer,
         const VkCommandBufferBeginInfo* pBeginInfo) {
 
-        VkEncoder* enc = (VkEncoder*)context;
+        (void)context;
+
+        resetCommandBufferStagingInfo(commandBuffer, true /* also reset primaries */, true /* also clear pending descriptor sets */);
+
+        VkEncoder* enc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
         (void)input_result;
 
-        if (!supportsDeferredCommands()) {
-            return enc->vkBeginCommandBuffer(commandBuffer, pBeginInfo);
+        struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
+        cb->flags = pBeginInfo->flags;
+
+        VkCommandBufferBeginInfo modifiedBeginInfo;
+
+        if (pBeginInfo->pInheritanceInfo && !cb->isSecondary) {
+            modifiedBeginInfo = *pBeginInfo;
+            modifiedBeginInfo.pInheritanceInfo = nullptr;
+            pBeginInfo = &modifiedBeginInfo;
         }
 
-        enc->vkBeginCommandBufferAsyncGOOGLE(commandBuffer, pBeginInfo);
+        if (!supportsDeferredCommands()) {
+            return enc->vkBeginCommandBuffer(commandBuffer, pBeginInfo, true /* do lock */);
+        }
+
+        enc->vkBeginCommandBufferAsyncGOOGLE(commandBuffer, pBeginInfo, true /* do lock */);
 
         return VK_SUCCESS;
     }
@@ -4328,10 +6667,10 @@
         (void)input_result;
 
         if (!supportsDeferredCommands()) {
-            return enc->vkEndCommandBuffer(commandBuffer);
+            return enc->vkEndCommandBuffer(commandBuffer, true /* do lock */);
         }
 
-        enc->vkEndCommandBufferAsyncGOOGLE(commandBuffer);
+        enc->vkEndCommandBufferAsyncGOOGLE(commandBuffer, true /* do lock */);
 
         return VK_SUCCESS;
     }
@@ -4341,14 +6680,16 @@
         VkCommandBuffer commandBuffer,
         VkCommandBufferResetFlags flags) {
 
+        resetCommandBufferStagingInfo(commandBuffer, true /* also reset primaries */, true /* also clear pending descriptor sets */);
+
         VkEncoder* enc = (VkEncoder*)context;
         (void)input_result;
 
         if (!supportsDeferredCommands()) {
-            return enc->vkResetCommandBuffer(commandBuffer, flags);
+            return enc->vkResetCommandBuffer(commandBuffer, flags, true /* do lock */);
         }
 
-        enc->vkResetCommandBufferAsyncGOOGLE(commandBuffer, flags);
+        enc->vkResetCommandBufferAsyncGOOGLE(commandBuffer, flags, true /* do lock */);
         return VK_SUCCESS;
     }
 
@@ -4375,7 +6716,120 @@
         }
 #endif
 
-        return enc->vkCreateImageView(device, &localCreateInfo, pAllocator, pView);
+        return enc->vkCreateImageView(device, &localCreateInfo, pAllocator, pView, true /* do lock */);
+    }
+
+    void on_vkCmdExecuteCommands(
+        void* context,
+        VkCommandBuffer commandBuffer,
+        uint32_t commandBufferCount,
+        const VkCommandBuffer* pCommandBuffers) {
+
+        VkEncoder* enc = (VkEncoder*)context;
+
+        if (!mFeatureInfo->hasVulkanQueueSubmitWithCommands) {
+            enc->vkCmdExecuteCommands(commandBuffer, commandBufferCount, pCommandBuffers, true /* do lock */);
+            return;
+        }
+
+        struct goldfish_VkCommandBuffer* primary = as_goldfish_VkCommandBuffer(commandBuffer);
+        for (uint32_t i = 0; i < commandBufferCount; ++i) {
+            struct goldfish_VkCommandBuffer* secondary = as_goldfish_VkCommandBuffer(pCommandBuffers[i]);
+            appendObject(&secondary->superObjects, primary);
+            appendObject(&primary->subObjects, secondary);
+        }
+
+        enc->vkCmdExecuteCommands(commandBuffer, commandBufferCount, pCommandBuffers, true /* do lock */);
+    }
+
+    void addPendingDescriptorSets(VkCommandBuffer commandBuffer, uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets) {
+        struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
+
+        if (!cb->userPtr) {
+            CommandBufferPendingDescriptorSets* newPendingSets =
+                new CommandBufferPendingDescriptorSets;
+            cb->userPtr = newPendingSets;
+        }
+
+        CommandBufferPendingDescriptorSets* pendingSets =
+            (CommandBufferPendingDescriptorSets*)cb->userPtr;
+
+        for (uint32_t i = 0; i < descriptorSetCount; ++i) {
+            pendingSets->sets.insert(pDescriptorSets[i]);
+        }
+    }
+
+    void on_vkCmdBindDescriptorSets(
+        void* context,
+        VkCommandBuffer commandBuffer,
+        VkPipelineBindPoint pipelineBindPoint,
+        VkPipelineLayout layout,
+        uint32_t firstSet,
+        uint32_t descriptorSetCount,
+        const VkDescriptorSet* pDescriptorSets,
+        uint32_t dynamicOffsetCount,
+        const uint32_t* pDynamicOffsets) {
+
+        VkEncoder* enc = (VkEncoder*)context;
+
+        if (mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate)
+            addPendingDescriptorSets(commandBuffer, descriptorSetCount, pDescriptorSets);
+
+        enc->vkCmdBindDescriptorSets(
+            commandBuffer,
+            pipelineBindPoint,
+            layout,
+            firstSet,
+            descriptorSetCount,
+            pDescriptorSets,
+            dynamicOffsetCount,
+            pDynamicOffsets,
+            true /* do lock */);
+    }
+
+    void decDescriptorSetLayoutRef(
+        void* context,
+        VkDevice device,
+        VkDescriptorSetLayout descriptorSetLayout,
+        const VkAllocationCallbacks* pAllocator) {
+
+        if (!descriptorSetLayout) return;
+
+        struct goldfish_VkDescriptorSetLayout* setLayout = as_goldfish_VkDescriptorSetLayout(descriptorSetLayout);
+
+        if (0 == --setLayout->layoutInfo->refcount) {
+            VkEncoder* enc = (VkEncoder*)context;
+            enc->vkDestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator, true /* do lock */);
+        }
+    }
+
+    void on_vkDestroyDescriptorSetLayout(
+        void* context,
+        VkDevice device,
+        VkDescriptorSetLayout descriptorSetLayout,
+        const VkAllocationCallbacks* pAllocator) {
+        decDescriptorSetLayoutRef(context, device, descriptorSetLayout, pAllocator);
+    }
+
+    VkResult on_vkAllocateCommandBuffers(
+        void* context,
+        VkResult input_result,
+        VkDevice device,
+        const VkCommandBufferAllocateInfo* pAllocateInfo,
+        VkCommandBuffer* pCommandBuffers) {
+
+        (void)input_result;
+
+        VkEncoder* enc = (VkEncoder*)context;
+        VkResult res = enc->vkAllocateCommandBuffers(device, pAllocateInfo, pCommandBuffers, true /* do lock */);
+        if (VK_SUCCESS != res) return res;
+
+        for (uint32_t i = 0; i < pAllocateInfo->commandBufferCount; ++i) {
+            struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(pCommandBuffers[i]);
+            cb->isSecondary = pAllocateInfo->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY;
+        }
+
+        return res;
     }
 
     uint32_t getApiVersionFromInstance(VkInstance instance) const {
@@ -4423,12 +6877,78 @@
                it->second.enabledExtensions.end();
     }
 
+    // Resets staging stream for this command buffer and primary command buffers
+    // where this command buffer has been recorded.
+    void resetCommandBufferStagingInfo(VkCommandBuffer commandBuffer, bool alsoResetPrimaries, bool alsoClearPendingDescriptorSets) {
+        struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
+        if (!cb) {
+            return;
+        }
+        if (cb->privateEncoder) {
+            sStaging.pushStaging((CommandBufferStagingStream*)cb->privateStream, cb->privateEncoder);
+            cb->privateEncoder = nullptr;
+            cb->privateStream = nullptr;
+        }
+
+        if (alsoClearPendingDescriptorSets && cb->userPtr) {
+            CommandBufferPendingDescriptorSets* pendingSets = (CommandBufferPendingDescriptorSets*)cb->userPtr;
+            pendingSets->sets.clear();
+        }
+
+        if (alsoResetPrimaries) {
+            forAllObjects(cb->superObjects, [this, alsoResetPrimaries, alsoClearPendingDescriptorSets](void* obj) {
+                VkCommandBuffer superCommandBuffer = (VkCommandBuffer)obj;
+                struct goldfish_VkCommandBuffer* superCb = as_goldfish_VkCommandBuffer(superCommandBuffer);
+                this->resetCommandBufferStagingInfo(superCommandBuffer, alsoResetPrimaries, alsoClearPendingDescriptorSets);
+            });
+            eraseObjects(&cb->superObjects);
+        }
+
+        forAllObjects(cb->subObjects, [cb](void* obj) {
+            VkCommandBuffer subCommandBuffer = (VkCommandBuffer)obj;
+            struct goldfish_VkCommandBuffer* subCb = as_goldfish_VkCommandBuffer(subCommandBuffer);
+            // We don't do resetCommandBufferStagingInfo(subCommandBuffer)
+            // since the user still might have submittable stuff pending there.
+            eraseObject(&subCb->superObjects, (void*)cb);
+        });
+
+        eraseObjects(&cb->subObjects);
+    }
+
+    void resetCommandPoolStagingInfo(VkCommandPool commandPool) {
+        struct goldfish_VkCommandPool* p = as_goldfish_VkCommandPool(commandPool);
+
+        if (!p) return;
+
+        forAllObjects(p->subObjects, [this](void* commandBuffer) {
+            this->resetCommandBufferStagingInfo((VkCommandBuffer)commandBuffer, true /* also reset primaries */, true /* also clear pending descriptor sets */);
+        });
+    }
+
+    void addToCommandPool(VkCommandPool commandPool,
+                          uint32_t commandBufferCount,
+                          VkCommandBuffer* pCommandBuffers) {
+        for (uint32_t i = 0; i < commandBufferCount; ++i) {
+            struct goldfish_VkCommandPool* p = as_goldfish_VkCommandPool(commandPool);
+            struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(pCommandBuffers[i]);
+            appendObject(&p->subObjects, (void*)(pCommandBuffers[i]));
+            appendObject(&cb->poolObjects, (void*)commandPool);
+        }
+    }
+
+    void clearCommandPool(VkCommandPool commandPool) {
+        resetCommandPoolStagingInfo(commandPool);
+        struct goldfish_VkCommandPool* p = as_goldfish_VkCommandPool(commandPool);
+        forAllObjects(p->subObjects, [this](void* commandBuffer) {
+            this->unregister_VkCommandBuffer((VkCommandBuffer)commandBuffer);
+        });
+        eraseObjects(&p->subObjects);
+    }
+
 private:
-    mutable Lock mLock;
+    mutable RecursiveLock mLock;
     HostVisibleMemoryVirtualizationInfo mHostVisibleMemoryVirtInfo;
     std::unique_ptr<EmulatorFeatureInfo> mFeatureInfo;
-    ResourceTracker::ThreadingCallbacks mThreadingCallbacks;
-    uint32_t mStreamFeatureBits = 0;
     std::unique_ptr<GoldfishAddressSpaceBlockProvider> mGoldfishAddressSpaceBlockProvider;
 
     std::vector<VkExtensionProperties> mHostInstanceExtensions;
@@ -4440,13 +6960,19 @@
 #endif
 
 #ifdef VK_USE_PLATFORM_FUCHSIA
-    fuchsia::hardware::goldfish::ControlDeviceSyncPtr mControlDevice;
-    fuchsia::sysmem::AllocatorSyncPtr mSysmemAllocator;
+    std::unique_ptr<
+        fidl::WireSyncClient<fuchsia_hardware_goldfish::ControlDevice>>
+        mControlDevice;
+    std::unique_ptr<fidl::WireSyncClient<fuchsia_sysmem::Allocator>>
+        mSysmemAllocator;
 #endif
 
     WorkPool mWorkPool { 4 };
     std::unordered_map<VkQueue, std::vector<WorkPool::WaitGroupHandle>>
         mQueueSensitiveWorkPoolItems;
+
+    std::unordered_map<const VkEncoder*, std::unordered_map<void*, CleanupCallback>> mEncoderCleanupCallbacks;
+
 };
 
 ResourceTracker::ResourceTracker() : mImpl(new ResourceTracker::Impl()) { }
@@ -4537,6 +7063,65 @@
 bool ResourceTracker::hasDeviceExtension(VkDevice device, const std::string &name) const {
     return mImpl->hasDeviceExtension(device, name);
 }
+void ResourceTracker::addToCommandPool(VkCommandPool commandPool,
+                      uint32_t commandBufferCount,
+                      VkCommandBuffer* pCommandBuffers) {
+    mImpl->addToCommandPool(commandPool, commandBufferCount, pCommandBuffers);
+}
+void ResourceTracker::resetCommandPoolStagingInfo(VkCommandPool commandPool) {
+    mImpl->resetCommandPoolStagingInfo(commandPool);
+}
+
+
+// static
+__attribute__((always_inline)) VkEncoder* ResourceTracker::getCommandBufferEncoder(VkCommandBuffer commandBuffer) {
+    if (!(ResourceTracker::streamFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT)) {
+        auto enc = ResourceTracker::getThreadLocalEncoder();
+        ResourceTracker::get()->syncEncodersForCommandBuffer(commandBuffer, enc);
+        return enc;
+    }
+
+    struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
+    if (!cb->privateEncoder) {
+        sStaging.popStaging((CommandBufferStagingStream**)&cb->privateStream, &cb->privateEncoder);
+    }
+    uint8_t* writtenPtr; size_t written;
+    ((CommandBufferStagingStream*)cb->privateStream)->getWritten(&writtenPtr, &written);
+    return cb->privateEncoder;
+}
+
+// static
+__attribute__((always_inline)) VkEncoder* ResourceTracker::getQueueEncoder(VkQueue queue) {
+    auto enc = ResourceTracker::getThreadLocalEncoder();
+    if (!(ResourceTracker::streamFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT)) {
+        ResourceTracker::get()->syncEncodersForQueue(queue, enc);
+    }
+    return enc;
+}
+
+// static
+__attribute__((always_inline)) VkEncoder* ResourceTracker::getThreadLocalEncoder() {
+    auto hostConn = ResourceTracker::threadingCallbacks.hostConnectionGetFunc();
+    auto vkEncoder = ResourceTracker::threadingCallbacks.vkEncoderGetFunc(hostConn);
+    return vkEncoder;
+}
+
+// static
+void ResourceTracker::setSeqnoPtr(uint32_t* seqnoptr) {
+    sSeqnoPtr = seqnoptr;
+}
+
+// static
+__attribute__((always_inline)) uint32_t ResourceTracker::nextSeqno() {
+    uint32_t res = __atomic_add_fetch(sSeqnoPtr, 1, __ATOMIC_SEQ_CST);
+    return res;
+}
+
+// static
+__attribute__((always_inline)) uint32_t ResourceTracker::getSeqno() {
+    uint32_t res = __atomic_load_n(sSeqnoPtr, __ATOMIC_SEQ_CST);
+    return res;
+}
 
 VkResult ResourceTracker::on_vkEnumerateInstanceExtensionProperties(
     void* context,
@@ -4568,6 +7153,30 @@
         pPhysicalDevices);
 }
 
+void ResourceTracker::on_vkGetPhysicalDeviceProperties(
+    void* context,
+    VkPhysicalDevice physicalDevice,
+    VkPhysicalDeviceProperties* pProperties) {
+    mImpl->on_vkGetPhysicalDeviceProperties(context, physicalDevice,
+        pProperties);
+}
+
+void ResourceTracker::on_vkGetPhysicalDeviceProperties2(
+    void* context,
+    VkPhysicalDevice physicalDevice,
+    VkPhysicalDeviceProperties2* pProperties) {
+    mImpl->on_vkGetPhysicalDeviceProperties2(context, physicalDevice,
+        pProperties);
+}
+
+void ResourceTracker::on_vkGetPhysicalDeviceProperties2KHR(
+    void* context,
+    VkPhysicalDevice physicalDevice,
+    VkPhysicalDeviceProperties2* pProperties) {
+    mImpl->on_vkGetPhysicalDeviceProperties2(context, physicalDevice,
+        pProperties);
+}
+
 void ResourceTracker::on_vkGetPhysicalDeviceMemoryProperties(
     void* context,
     VkPhysicalDevice physicalDevice,
@@ -4592,6 +7201,22 @@
         context, physicalDevice, pMemoryProperties);
 }
 
+void ResourceTracker::on_vkGetDeviceQueue(void* context,
+                                          VkDevice device,
+                                          uint32_t queueFamilyIndex,
+                                          uint32_t queueIndex,
+                                          VkQueue* pQueue) {
+    mImpl->on_vkGetDeviceQueue(context, device, queueFamilyIndex, queueIndex,
+                               pQueue);
+}
+
+void ResourceTracker::on_vkGetDeviceQueue2(void* context,
+                                           VkDevice device,
+                                           const VkDeviceQueueInfo2* pQueueInfo,
+                                           VkQueue* pQueue) {
+    mImpl->on_vkGetDeviceQueue2(context, device, pQueueInfo, pQueue);
+}
+
 VkResult ResourceTracker::on_vkCreateInstance(
     void* context,
     VkResult input_result,
@@ -4898,6 +7523,25 @@
         context, input_result, device, collection, pImageInfo);
 }
 
+VkResult ResourceTracker::on_vkSetBufferCollectionBufferConstraintsFUCHSIA(
+        void* context, VkResult input_result,
+        VkDevice device,
+        VkBufferCollectionFUCHSIA collection,
+        const VkBufferConstraintsInfoFUCHSIA* pBufferDConstraintsInfo) {
+    return mImpl->on_vkSetBufferCollectionBufferConstraintsFUCHSIA(
+        context, input_result, device, collection, pBufferDConstraintsInfo);
+}
+
+VkResult ResourceTracker::on_vkSetBufferCollectionImageConstraintsFUCHSIA(
+    void* context,
+    VkResult input_result,
+    VkDevice device,
+    VkBufferCollectionFUCHSIA collection,
+    const VkImageConstraintsInfoFUCHSIA* pImageConstraintsInfo) {
+    return mImpl->on_vkSetBufferCollectionImageConstraintsFUCHSIA(
+        context, input_result, device, collection, pImageConstraintsInfo);
+}
+
 VkResult ResourceTracker::on_vkGetBufferCollectionPropertiesFUCHSIA(
         void* context, VkResult input_result,
         VkDevice device,
@@ -4906,6 +7550,16 @@
     return mImpl->on_vkGetBufferCollectionPropertiesFUCHSIA(
         context, input_result, device, collection, pProperties);
 }
+
+VkResult ResourceTracker::on_vkGetBufferCollectionProperties2FUCHSIA(
+    void* context,
+    VkResult input_result,
+    VkDevice device,
+    VkBufferCollectionFUCHSIA collection,
+    VkBufferCollectionProperties2FUCHSIA* pProperties) {
+    return mImpl->on_vkGetBufferCollectionProperties2FUCHSIA(
+        context, input_result, device, collection, pProperties);
+}
 #endif
 
 VkResult ResourceTracker::on_vkGetAndroidHardwareBufferPropertiesANDROID(
@@ -5197,10 +7851,47 @@
         pImageFormatProperties);
 }
 
+void ResourceTracker::on_vkGetPhysicalDeviceExternalSemaphoreProperties(
+    void* context,
+    VkPhysicalDevice physicalDevice,
+    const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo,
+    VkExternalSemaphoreProperties* pExternalSemaphoreProperties) {
+    mImpl->on_vkGetPhysicalDeviceExternalSemaphoreProperties(
+        context, physicalDevice, pExternalSemaphoreInfo,
+        pExternalSemaphoreProperties);
+}
+
+void ResourceTracker::on_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR(
+    void* context,
+    VkPhysicalDevice physicalDevice,
+    const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo,
+    VkExternalSemaphoreProperties* pExternalSemaphoreProperties) {
+    mImpl->on_vkGetPhysicalDeviceExternalSemaphoreProperties(
+        context, physicalDevice, pExternalSemaphoreInfo,
+        pExternalSemaphoreProperties);
+}
+
+void ResourceTracker::registerEncoderCleanupCallback(const VkEncoder* encoder, void* handle, ResourceTracker::CleanupCallback callback) {
+    mImpl->registerEncoderCleanupCallback(encoder, handle, callback);
+}
+
+void ResourceTracker::unregisterEncoderCleanupCallback(const VkEncoder* encoder, void* handle) {
+    mImpl->unregisterEncoderCleanupCallback(encoder, handle);
+}
+
+void ResourceTracker::onEncoderDeleted(const VkEncoder* encoder) {
+    mImpl->onEncoderDeleted(encoder);
+}
+
 uint32_t ResourceTracker::syncEncodersForCommandBuffer(VkCommandBuffer commandBuffer, VkEncoder* current) {
     return mImpl->syncEncodersForCommandBuffer(commandBuffer, current);
 }
 
+uint32_t ResourceTracker::syncEncodersForQueue(VkQueue queue, VkEncoder* current) {
+    return mImpl->syncEncodersForQueue(queue, current);
+}
+
+
 VkResult ResourceTracker::on_vkBeginCommandBuffer(
     void* context, VkResult input_result,
     VkCommandBuffer commandBuffer,
@@ -5234,6 +7925,54 @@
         context, input_result, device, pCreateInfo, pAllocator, pView);
 }
 
+void ResourceTracker::on_vkCmdExecuteCommands(
+    void* context,
+    VkCommandBuffer commandBuffer,
+    uint32_t commandBufferCount,
+    const VkCommandBuffer* pCommandBuffers) {
+    mImpl->on_vkCmdExecuteCommands(
+        context, commandBuffer, commandBufferCount, pCommandBuffers);
+}
+
+void ResourceTracker::on_vkCmdBindDescriptorSets(
+    void* context,
+    VkCommandBuffer commandBuffer,
+    VkPipelineBindPoint pipelineBindPoint,
+    VkPipelineLayout layout,
+    uint32_t firstSet,
+    uint32_t descriptorSetCount,
+    const VkDescriptorSet* pDescriptorSets,
+    uint32_t dynamicOffsetCount,
+    const uint32_t* pDynamicOffsets) {
+    mImpl->on_vkCmdBindDescriptorSets(
+        context,
+        commandBuffer,
+        pipelineBindPoint, 
+        layout,
+        firstSet,
+        descriptorSetCount,
+        pDescriptorSets,
+        dynamicOffsetCount,
+        pDynamicOffsets);
+}
+
+void ResourceTracker::on_vkDestroyDescriptorSetLayout(
+    void* context,
+    VkDevice device,
+    VkDescriptorSetLayout descriptorSetLayout,
+    const VkAllocationCallbacks* pAllocator) {
+    mImpl->on_vkDestroyDescriptorSetLayout(context, device, descriptorSetLayout, pAllocator);
+}
+
+VkResult ResourceTracker::on_vkAllocateCommandBuffers(
+    void* context,
+    VkResult input_result,
+    VkDevice device,
+    const VkCommandBufferAllocateInfo* pAllocateInfo,
+    VkCommandBuffer* pCommandBuffers) {
+    return mImpl->on_vkAllocateCommandBuffers(context, input_result, device, pAllocateInfo, pCommandBuffers);
+}
+
 void ResourceTracker::deviceMemoryTransform_tohost(
     VkDeviceMemory* memory, uint32_t memoryCount,
     VkDeviceSize* offset, uint32_t offsetCount,
@@ -5262,10 +8001,20 @@
         typeBits, typeBitsCount);
 }
 
-#define DEFINE_TRANSFORMED_TYPE_IMPL(type) \
-    void ResourceTracker::transformImpl_##type##_tohost(const type*, uint32_t) { } \
-    void ResourceTracker::transformImpl_##type##_fromhost(const type*, uint32_t) { } \
+void ResourceTracker::transformImpl_VkExternalMemoryProperties_fromhost(
+    VkExternalMemoryProperties* pProperties,
+    uint32_t lenAccess) {
+    mImpl->transformImpl_VkExternalMemoryProperties_fromhost(pProperties,
+                                                             lenAccess);
+}
 
-LIST_TRANSFORMED_TYPES(DEFINE_TRANSFORMED_TYPE_IMPL)
+void ResourceTracker::transformImpl_VkExternalMemoryProperties_tohost(
+    VkExternalMemoryProperties*, uint32_t) {}
+
+#define DEFINE_TRANSFORMED_TYPE_IMPL(type)                                  \
+    void ResourceTracker::transformImpl_##type##_tohost(type*, uint32_t) {} \
+    void ResourceTracker::transformImpl_##type##_fromhost(type*, uint32_t) {}
+
+LIST_TRIVIAL_TRANSFORMED_TYPES(DEFINE_TRANSFORMED_TYPE_IMPL)
 
 } // namespace goldfish_vk
diff --git a/system/vulkan_enc/ResourceTracker.h b/system/vulkan_enc/ResourceTracker.h
index 9a72bdf..86e4dde 100644
--- a/system/vulkan_enc/ResourceTracker.h
+++ b/system/vulkan_enc/ResourceTracker.h
@@ -20,6 +20,7 @@
 
 #include "VulkanHandleMapping.h"
 #include "VulkanHandles.h"
+#include <functional>
 #include <memory>
 
 #include "goldfish_vk_transform_guest.h"
@@ -37,6 +38,7 @@
     ResourceTracker();
     virtual ~ResourceTracker();
     static ResourceTracker* get();
+
     VulkanHandleMapping* createMapping();
     VulkanHandleMapping* unwrapMapping();
     VulkanHandleMapping* destroyMapping();
@@ -44,12 +46,16 @@
 
     using HostConnectionGetFunc = HostConnection* (*)();
     using VkEncoderGetFunc = VkEncoder* (*)(HostConnection*);
+    using CleanupCallback = std::function<void()>;
 
     struct ThreadingCallbacks {
         HostConnectionGetFunc hostConnectionGetFunc = 0;
         VkEncoderGetFunc vkEncoderGetFunc = 0;
     };
 
+    static uint32_t streamFeatureBits;
+    static ThreadingCallbacks threadingCallbacks;
+
 #define HANDLE_REGISTER_DECL(type) \
     void register_##type(type); \
     void unregister_##type(type); \
@@ -76,6 +82,19 @@
         VkInstance instance, uint32_t* pPhysicalDeviceCount,
         VkPhysicalDevice* pPhysicalDevices);
 
+    void on_vkGetPhysicalDeviceProperties(
+        void* context,
+        VkPhysicalDevice physicalDevice,
+        VkPhysicalDeviceProperties* pProperties);
+    void on_vkGetPhysicalDeviceProperties2(
+        void* context,
+        VkPhysicalDevice physicalDevice,
+        VkPhysicalDeviceProperties2* pProperties);
+    void on_vkGetPhysicalDeviceProperties2KHR(
+        void* context,
+        VkPhysicalDevice physicalDevice,
+        VkPhysicalDeviceProperties2* pProperties);
+
     void on_vkGetPhysicalDeviceMemoryProperties(
         void* context,
         VkPhysicalDevice physicalDevice,
@@ -88,6 +107,15 @@
         void* context,
         VkPhysicalDevice physicalDevice,
         VkPhysicalDeviceMemoryProperties2* pMemoryProperties);
+    void on_vkGetDeviceQueue(void* context,
+                             VkDevice device,
+                             uint32_t queueFamilyIndex,
+                             uint32_t queueIndex,
+                             VkQueue* pQueue);
+    void on_vkGetDeviceQueue2(void* context,
+                              VkDevice device,
+                              const VkDeviceQueueInfo2* pQueueInfo,
+                              VkQueue* pQueue);
 
     VkResult on_vkCreateInstance(
         void* context,
@@ -261,11 +289,29 @@
         VkDevice device,
         VkBufferCollectionFUCHSIA collection,
         const VkImageCreateInfo* pImageInfo);
+    VkResult on_vkSetBufferCollectionBufferConstraintsFUCHSIA(
+        void* context,
+        VkResult input_result,
+        VkDevice device,
+        VkBufferCollectionFUCHSIA collection,
+        const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo);
+    VkResult on_vkSetBufferCollectionImageConstraintsFUCHSIA(
+        void* context,
+        VkResult input_result,
+        VkDevice device,
+        VkBufferCollectionFUCHSIA collection,
+        const VkImageConstraintsInfoFUCHSIA* pImageConstraintsInfo);
     VkResult on_vkGetBufferCollectionPropertiesFUCHSIA(
         void* context, VkResult input_result,
         VkDevice device,
         VkBufferCollectionFUCHSIA collection,
         VkBufferCollectionPropertiesFUCHSIA* pProperties);
+    VkResult on_vkGetBufferCollectionProperties2FUCHSIA(
+        void* context,
+        VkResult input_result,
+        VkDevice device,
+        VkBufferCollectionFUCHSIA collection,
+        VkBufferCollectionProperties2FUCHSIA* pProperties);
 #endif
 
     VkResult on_vkGetAndroidHardwareBufferPropertiesANDROID(
@@ -461,7 +507,24 @@
         const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo,
         VkImageFormatProperties2* pImageFormatProperties);
 
+    void on_vkGetPhysicalDeviceExternalSemaphoreProperties(
+        void* context,
+        VkPhysicalDevice physicalDevice,
+        const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo,
+        VkExternalSemaphoreProperties* pExternalSemaphoreProperties);
+
+    void on_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR(
+        void* context,
+        VkPhysicalDevice physicalDevice,
+        const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo,
+        VkExternalSemaphoreProperties* pExternalSemaphoreProperties);
+
+    void registerEncoderCleanupCallback(const VkEncoder* encoder, void* handle, CleanupCallback callback);
+    void unregisterEncoderCleanupCallback(const VkEncoder* encoder, void* handle);
+    void onEncoderDeleted(const VkEncoder* encoder);
+
     uint32_t syncEncodersForCommandBuffer(VkCommandBuffer commandBuffer, VkEncoder* current);
+    uint32_t syncEncodersForQueue(VkQueue queue, VkEncoder* current);
 
     VkResult on_vkBeginCommandBuffer(
         void* context, VkResult input_result,
@@ -482,6 +545,36 @@
         const VkAllocationCallbacks* pAllocator,
         VkImageView* pView);
 
+    void on_vkCmdExecuteCommands(
+        void* context,
+        VkCommandBuffer commandBuffer,
+        uint32_t commandBufferCount,
+        const VkCommandBuffer* pCommandBuffers);
+
+    void on_vkCmdBindDescriptorSets(
+        void* context,
+        VkCommandBuffer commandBuffer,
+        VkPipelineBindPoint pipelineBindPoint,
+        VkPipelineLayout layout,
+        uint32_t firstSet,
+        uint32_t descriptorSetCount,
+        const VkDescriptorSet* pDescriptorSets,
+        uint32_t dynamicOffsetCount,
+        const uint32_t* pDynamicOffsets);
+
+    void on_vkDestroyDescriptorSetLayout(
+        void* context,
+        VkDevice device,
+        VkDescriptorSetLayout descriptorSetLayout,
+        const VkAllocationCallbacks* pAllocator);
+
+    VkResult on_vkAllocateCommandBuffers(
+        void* context,
+        VkResult input_result,
+        VkDevice device,
+        const VkCommandBufferAllocateInfo* pAllocateInfo,
+        VkCommandBuffer* pCommandBuffers);
+
     bool isMemoryTypeHostVisible(VkDevice device, uint32_t typeIndex) const;
     uint8_t* getMappedPointer(VkDeviceMemory memory);
     VkDeviceSize getMappedSize(VkDeviceMemory memory);
@@ -496,6 +589,19 @@
     uint32_t getApiVersionFromDevice(VkDevice device) const;
     bool hasInstanceExtension(VkInstance instance, const std::string& name) const;
     bool hasDeviceExtension(VkDevice instance, const std::string& name) const;
+    void addToCommandPool(VkCommandPool commandPool,
+                          uint32_t commandBufferCount,
+                          VkCommandBuffer* pCommandBuffers);
+    void resetCommandPoolStagingInfo(VkCommandPool commandPool);
+
+
+    static VkEncoder* getCommandBufferEncoder(VkCommandBuffer commandBuffer);
+    static VkEncoder* getQueueEncoder(VkQueue queue);
+    static VkEncoder* getThreadLocalEncoder();
+
+    static void setSeqnoPtr(uint32_t* seqnoptr);
+    static __attribute__((always_inline)) uint32_t nextSeqno();
+    static __attribute__((always_inline)) uint32_t getSeqno();
 
     // Transforms
     void deviceMemoryTransform_tohost(
@@ -511,13 +617,20 @@
         uint32_t* typeIndex, uint32_t typeIndexCount,
         uint32_t* typeBits, uint32_t typeBitsCount);
 
-#define DEFINE_TRANSFORMED_TYPE_PROTOTYPE(type) \
-    void transformImpl_##type##_tohost(const type*, uint32_t); \
-    void transformImpl_##type##_fromhost(const type*, uint32_t); \
+    void transformImpl_VkExternalMemoryProperties_fromhost(
+        VkExternalMemoryProperties* pProperties,
+        uint32_t);
+    void transformImpl_VkExternalMemoryProperties_tohost(
+        VkExternalMemoryProperties* pProperties,
+        uint32_t);
 
-LIST_TRANSFORMED_TYPES(DEFINE_TRANSFORMED_TYPE_PROTOTYPE)
+#define DEFINE_TRANSFORMED_TYPE_PROTOTYPE(type)          \
+    void transformImpl_##type##_tohost(type*, uint32_t); \
+    void transformImpl_##type##_fromhost(type*, uint32_t);
 
-  private:
+    LIST_TRIVIAL_TRANSFORMED_TYPES(DEFINE_TRANSFORMED_TYPE_PROTOTYPE)
+
+private:
     class Impl;
     std::unique_ptr<Impl> mImpl;
 };
diff --git a/system/vulkan_enc/Resources.cpp b/system/vulkan_enc/Resources.cpp
index b70d222..041b00b 100644
--- a/system/vulkan_enc/Resources.cpp
+++ b/system/vulkan_enc/Resources.cpp
@@ -38,6 +38,15 @@
         } \
         res->dispatch.magic = HWVULKAN_DISPATCH_MAGIC; \
         res->underlying = (uint64_t)underlying; \
+        res->lastUsedEncoder = nullptr; \
+        res->sequenceNumber = 0; \
+        res->privateEncoder = 0; \
+        res->privateStream = 0; \
+        res->flags = 0; \
+        res->poolObjects = 0; \
+        res->subObjects = 0; \
+        res->superObjects = 0; \
+        res->userPtr = 0; \
         return reinterpret_cast<type>(res); \
     } \
 
@@ -46,6 +55,10 @@
         struct goldfish_##type* res = \
             static_cast<goldfish_##type*>(malloc(sizeof(goldfish_##type))); \
         res->underlying = (uint64_t)underlying; \
+        res->poolObjects = 0; \
+        res->subObjects = 0; \
+        res->superObjects = 0; \
+        res->userPtr = 0; \
         return reinterpret_cast<type>(res); \
     } \
 
@@ -82,6 +95,15 @@
         } \
         res->dispatch.magic = HWVULKAN_DISPATCH_MAGIC; \
         res->underlying = underlying; \
+        res->lastUsedEncoder = nullptr; \
+        res->sequenceNumber = 0; \
+        res->privateEncoder = 0; \
+        res->privateStream = 0; \
+        res->flags = 0; \
+        res->poolObjects = 0; \
+        res->subObjects = 0; \
+        res->superObjects = 0; \
+        res->userPtr = 0; \
         return reinterpret_cast<type>(res); \
     } \
 
@@ -91,6 +113,10 @@
             static_cast<goldfish_##type*>(malloc(sizeof(goldfish_##type))); \
         res->underlying = underlying; \
         D("guest %p: host u64: 0x%llx", res, (unsigned long long)res->underlying); \
+        res->poolObjects = 0; \
+        res->subObjects = 0; \
+        res->superObjects = 0; \
+        res->userPtr = 0; \
         return reinterpret_cast<type>(res); \
     } \
 
@@ -114,8 +140,125 @@
 GOLDFISH_VK_LIST_NON_DISPATCHABLE_HANDLE_TYPES(GOLDFISH_VK_GET_HOST_IMPL)
 GOLDFISH_VK_LIST_NON_DISPATCHABLE_HANDLE_TYPES(GOLDFISH_VK_IDENTITY_IMPL)
 GOLDFISH_VK_LIST_NON_DISPATCHABLE_HANDLE_TYPES(GOLDFISH_VK_GET_HOST_U64_IMPL)
-GOLDFISH_VK_LIST_NON_DISPATCHABLE_HANDLE_TYPES(GOLDFISH_VK_NEW_TRIVIAL_NON_DISPATCHABLE_FROM_HOST_IMPL)
-GOLDFISH_VK_LIST_NON_DISPATCHABLE_HANDLE_TYPES(GOLDFISH_VK_NEW_TRIVIAL_NON_DISPATCHABLE_FROM_HOST_U64_IMPL)
+GOLDFISH_VK_LIST_AUTODEFINED_STRUCT_NON_DISPATCHABLE_HANDLE_TYPES(GOLDFISH_VK_NEW_TRIVIAL_NON_DISPATCHABLE_FROM_HOST_IMPL)
+GOLDFISH_VK_LIST_AUTODEFINED_STRUCT_NON_DISPATCHABLE_HANDLE_TYPES(GOLDFISH_VK_NEW_TRIVIAL_NON_DISPATCHABLE_FROM_HOST_U64_IMPL)
 GOLDFISH_VK_LIST_NON_DISPATCHABLE_HANDLE_TYPES(GOLDFISH_VK_DELETE_GOLDFISH_IMPL)
 
+VkDescriptorPool new_from_host_VkDescriptorPool(VkDescriptorPool underlying) {
+    struct goldfish_VkDescriptorPool* res =
+        static_cast<goldfish_VkDescriptorPool*>(malloc(sizeof(goldfish_VkDescriptorPool)));
+    res->underlying = (uint64_t)underlying;
+    res->allocInfo = nullptr;
+    return reinterpret_cast<VkDescriptorPool>(res);
+}
+
+VkDescriptorPool new_from_host_u64_VkDescriptorPool(uint64_t underlying) {
+    return new_from_host_VkDescriptorPool((VkDescriptorPool)underlying);
+}
+
+VkDescriptorSet new_from_host_VkDescriptorSet(VkDescriptorSet underlying) {
+    struct goldfish_VkDescriptorSet* res =
+        static_cast<goldfish_VkDescriptorSet*>(malloc(sizeof(goldfish_VkDescriptorSet)));
+    res->underlying = (uint64_t)underlying;
+    res->reified = nullptr;
+    return reinterpret_cast<VkDescriptorSet>(res);
+}
+
+VkDescriptorSet new_from_host_u64_VkDescriptorSet(uint64_t underlying) {
+    return new_from_host_VkDescriptorSet((VkDescriptorSet)underlying);
+}
+
+VkDescriptorSetLayout new_from_host_VkDescriptorSetLayout(VkDescriptorSetLayout underlying) {
+    struct goldfish_VkDescriptorSetLayout* res =
+        static_cast<goldfish_VkDescriptorSetLayout*>(malloc(sizeof(goldfish_VkDescriptorSetLayout)));
+    res->underlying = (uint64_t)underlying;
+    res->layoutInfo = nullptr;
+    return reinterpret_cast<VkDescriptorSetLayout>(res);
+}
+
+VkDescriptorSetLayout new_from_host_u64_VkDescriptorSetLayout(uint64_t underlying) {
+    return new_from_host_VkDescriptorSetLayout((VkDescriptorSetLayout)underlying);
+}
+
 } // extern "C"
+
+namespace goldfish_vk {
+
+void appendObject(struct goldfish_vk_object_list** begin, void* val) {
+    D("for %p", val);
+    struct goldfish_vk_object_list* o = new goldfish_vk_object_list;
+    o->next = nullptr;
+    o->obj = val;
+    D("new ptr: %p", o);
+    if (!*begin) { D("first"); *begin = o; return; }
+
+    struct goldfish_vk_object_list* q = *begin;
+    struct goldfish_vk_object_list* p = q;
+
+    while (q) {
+        p = q;
+        q = q->next;
+    }
+
+    D("set next of %p to %p", p, o);
+    p->next = o;
+}
+
+void eraseObject(struct goldfish_vk_object_list** begin, void* val) {
+        D("for val %p", val);
+    if (!*begin) {
+        D("val %p notfound", val);
+        return;
+    }
+
+    struct goldfish_vk_object_list* q = *begin;
+    struct goldfish_vk_object_list* p = q;
+
+    while (q) {
+        struct goldfish_vk_object_list* n = q->next;
+        if (val == q->obj) {
+            D("val %p found, delete", val);
+            delete q;
+            if (*begin == q) {
+                D("val %p set begin to %p:", val, n);
+                *begin = n;
+            } else {
+                D("val %p set pnext to %p:", val, n);
+                p->next = n;
+            }
+            return;
+        }
+        p = q;
+        q = n;
+    }
+
+        D("val %p notfound after looping", val);
+}
+
+void eraseObjects(struct goldfish_vk_object_list** begin) {
+    struct goldfish_vk_object_list* q = *begin;
+    struct goldfish_vk_object_list* p = q;
+
+    while (q) {
+        p = q;
+        q = q->next;
+        delete p;
+    }
+
+    *begin = nullptr;
+}
+
+void forAllObjects(struct goldfish_vk_object_list* begin, std::function<void(void*)> func) {
+    struct goldfish_vk_object_list* q = begin;
+    struct goldfish_vk_object_list* p = q;
+
+    D("call");
+    while (q) {
+        D("iter");
+        p = q;
+        q = q->next;
+        func(p->obj);
+    }
+}
+
+} // namespace goldfish_vk
diff --git a/system/vulkan_enc/Resources.h b/system/vulkan_enc/Resources.h
index 0948df6..67c498e 100644
--- a/system/vulkan_enc/Resources.h
+++ b/system/vulkan_enc/Resources.h
@@ -20,17 +20,46 @@
 
 #include <inttypes.h>
 
+#include <functional>
+
+namespace goldfish_vk {
+class VkEncoder;
+struct DescriptorPoolAllocationInfo;
+struct ReifiedDescriptorSet;
+struct DescriptorSetLayoutInfo;
+} // namespace goldfish_vk
+
+class IOStream;
+
 extern "C" {
 
+struct goldfish_vk_object_list {
+    void* obj;
+    struct goldfish_vk_object_list* next;
+};
+
 #define GOLDFISH_VK_DEFINE_DISPATCHABLE_HANDLE_STRUCT(type) \
     struct goldfish_##type { \
         hwvulkan_dispatch_t dispatch; \
         uint64_t underlying; \
+        goldfish_vk::VkEncoder* lastUsedEncoder; \
+        uint32_t sequenceNumber; \
+        goldfish_vk::VkEncoder* privateEncoder; \
+        IOStream* privateStream; \
+        uint32_t flags; \
+        struct goldfish_vk_object_list* poolObjects; \
+        struct goldfish_vk_object_list* subObjects; \
+        struct goldfish_vk_object_list* superObjects; \
+        void* userPtr; \
     }; \
 
 #define GOLDFISH_VK_DEFINE_TRIVIAL_NON_DISPATCHABLE_HANDLE_STRUCT(type) \
     struct goldfish_##type { \
         uint64_t underlying; \
+        struct goldfish_vk_object_list* poolObjects; \
+        struct goldfish_vk_object_list* subObjects; \
+        struct goldfish_vk_object_list* superObjects; \
+        void* userPtr; \
     }; \
 
 #define GOLDFISH_VK_NEW_FROM_HOST_DECL(type) \
@@ -54,7 +83,7 @@
 #define GOLDFISH_VK_GET_HOST_U64_DECL(type) \
     uint64_t get_host_u64_##type(type);
 
-GOLDFISH_VK_LIST_DISPATCHABLE_HANDLE_TYPES(GOLDFISH_VK_DEFINE_DISPATCHABLE_HANDLE_STRUCT)
+GOLDFISH_VK_LIST_AUTODEFINED_STRUCT_DISPATCHABLE_HANDLE_TYPES(GOLDFISH_VK_DEFINE_DISPATCHABLE_HANDLE_STRUCT)
 GOLDFISH_VK_LIST_DISPATCHABLE_HANDLE_TYPES(GOLDFISH_VK_NEW_FROM_HOST_DECL)
 GOLDFISH_VK_LIST_DISPATCHABLE_HANDLE_TYPES(GOLDFISH_VK_AS_GOLDFISH_DECL)
 GOLDFISH_VK_LIST_DISPATCHABLE_HANDLE_TYPES(GOLDFISH_VK_GET_HOST_DECL)
@@ -70,6 +99,45 @@
 GOLDFISH_VK_LIST_NON_DISPATCHABLE_HANDLE_TYPES(GOLDFISH_VK_IDENTITY_DECL)
 GOLDFISH_VK_LIST_NON_DISPATCHABLE_HANDLE_TYPES(GOLDFISH_VK_NEW_FROM_HOST_U64_DECL)
 GOLDFISH_VK_LIST_NON_DISPATCHABLE_HANDLE_TYPES(GOLDFISH_VK_GET_HOST_U64_DECL)
-GOLDFISH_VK_LIST_NON_DISPATCHABLE_HANDLE_TYPES(GOLDFISH_VK_DEFINE_TRIVIAL_NON_DISPATCHABLE_HANDLE_STRUCT)
+GOLDFISH_VK_LIST_AUTODEFINED_STRUCT_NON_DISPATCHABLE_HANDLE_TYPES(GOLDFISH_VK_DEFINE_TRIVIAL_NON_DISPATCHABLE_HANDLE_STRUCT)
+
+struct goldfish_VkDescriptorPool {
+    uint64_t underlying;
+    goldfish_vk::DescriptorPoolAllocationInfo* allocInfo;
+};
+
+struct goldfish_VkDescriptorSet {
+    uint64_t underlying;
+    goldfish_vk::ReifiedDescriptorSet* reified;
+};
+
+struct goldfish_VkDescriptorSetLayout {
+    uint64_t underlying;
+    goldfish_vk::DescriptorSetLayoutInfo* layoutInfo;
+};
+
+struct goldfish_VkCommandBuffer {
+    hwvulkan_dispatch_t dispatch;
+    uint64_t underlying;
+    goldfish_vk::VkEncoder* lastUsedEncoder;
+    uint32_t sequenceNumber;
+    goldfish_vk::VkEncoder* privateEncoder;
+    IOStream* privateStream;
+    uint32_t flags;
+    struct goldfish_vk_object_list* poolObjects;
+    struct goldfish_vk_object_list* subObjects;
+    struct goldfish_vk_object_list* superObjects;
+    void* userPtr;
+    bool isSecondary;
+};
 
 } // extern "C"
+
+namespace goldfish_vk {
+
+void appendObject(struct goldfish_vk_object_list** begin, void* val);
+void eraseObject(struct goldfish_vk_object_list** begin, void* val);
+void eraseObjects(struct goldfish_vk_object_list** begin);
+void forAllObjects(struct goldfish_vk_object_list* begin, std::function<void(void*)> func);
+
+} // namespace goldfish_vk
diff --git a/system/vulkan_enc/VirtioGpuNext.h b/system/vulkan_enc/VirtioGpuNext.h
index 7da7da2..64342e5 100644
--- a/system/vulkan_enc/VirtioGpuNext.h
+++ b/system/vulkan_enc/VirtioGpuNext.h
@@ -23,6 +23,7 @@
 #define VIRTGPU_PARAM_RESOURCE_BLOB 3 /* DRM_VIRTGPU_RESOURCE_CREATE_BLOB */
 #define VIRTGPU_PARAM_HOST_VISIBLE 4
 
+#ifndef VIRTGPU_BLOB_MEM_HOST3D
 struct drm_virtgpu_resource_create_blob {
 #define VIRTGPU_BLOB_MEM_GUEST              0x0001
 #define VIRTGPU_BLOB_MEM_HOST               0x0002
@@ -47,6 +48,14 @@
     uint64_t cmd;
     uint64_t blob_id;
 };
+#else
+#define VIRTGPU_BLOB_MEM_HOST               VIRTGPU_BLOB_MEM_HOST3D
+#define VIRTGPU_BLOB_MEM_HOST_GUEST         VIRTGPU_BLOB_MEM_HOST3D_GUEST
+
+#define VIRTGPU_BLOB_FLAG_MAPPABLE          VIRTGPU_BLOB_FLAG_USE_MAPPABLE
+#define VIRTGPU_BLOB_FLAG_SHAREABLE         VIRTGPU_BLOB_FLAG_USE_SHAREABLE
+#define VIRTGPU_BLOB_FLAG_CROSS_DEVICE      VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE
+#endif
 
 
 #define DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB              \
diff --git a/system/vulkan_enc/VkEncoder.cpp b/system/vulkan_enc/VkEncoder.cpp
index 8c00e0c..5660460 100644
--- a/system/vulkan_enc/VkEncoder.cpp
+++ b/system/vulkan_enc/VkEncoder.cpp
@@ -32,13 +32,15 @@
 #include "VulkanStreamGuest.h"
 
 #include "android/base/AlignedBuf.h"
-#include "android/base/Pool.h"
+#include "android/base/BumpPool.h"
 #include "android/base/synchronization/AndroidLock.h"
 
 #include <cutils/properties.h>
 
 #include "goldfish_vk_marshaling_guest.h"
+#include "goldfish_vk_reserved_marshaling_guest.h"
 #include "goldfish_vk_deepcopy_guest.h"
+#include "goldfish_vk_counting_guest.h"
 #include "goldfish_vk_handlemap_guest.h"
 #include "goldfish_vk_private_defs.h"
 #include "goldfish_vk_transform_guest.h"
@@ -57,81 +59,9 @@
 using android::aligned_buf_free;
 using android::base::guest::AutoLock;
 using android::base::guest::Lock;
-using android::base::Pool;
+using android::base::BumpPool;
 
-class VkEncoder::Impl {
-public:
-    Impl(IOStream* stream) : m_stream(stream), m_logEncodes(false) {
-        const char* emuVkLogEncodesPropName = "qemu.vk.log";
-        char encodeProp[PROPERTY_VALUE_MAX];
-        if (property_get(emuVkLogEncodesPropName, encodeProp, nullptr) > 0) {
-            m_logEncodes = atoi(encodeProp) > 0;
-        }
-    }
-
-    ~Impl() {
-        for (auto it : mCleanupCallbacks) {
-            fprintf(stderr, "%s: run cleanup callback for %p\n", __func__, it.first);
-            it.second();
-        }
-    }
-
-    VulkanCountingStream* countingStream() { return &m_countingStream; }
-    VulkanStreamGuest* stream() { return &m_stream; }
-    Pool* pool() { return &m_pool; }
-    ResourceTracker* resources() { return ResourceTracker::get(); }
-    Validation* validation() { return &m_validation; }
-
-    void log(const char* text) {
-        if (!m_logEncodes) return;
-        ALOGD("encoder log: %s", text);
-    }
-
-    void flush() {
-        AutoLock encoderLock(lock);
-        m_stream.flush();
-    }
-
-    // Assume the lock for the current encoder is held.
-    void registerCleanupCallback(void* handle, VkEncoder::CleanupCallback cb) {
-        if (mCleanupCallbacks.end() == mCleanupCallbacks.find(handle)) {
-            mCleanupCallbacks[handle] = cb;
-        } else {
-            return;
-        }
-    }
-
-    void unregisterCleanupCallback(void* handle) {
-        mCleanupCallbacks.erase(handle);
-    }
-
-    Lock lock;
-
-private:
-    VulkanCountingStream m_countingStream;
-    VulkanStreamGuest m_stream;
-    Pool m_pool { 8, 4096, 64 };
-
-    Validation m_validation;
-    bool m_logEncodes;
-
-    std::unordered_map<void*, VkEncoder::CleanupCallback> mCleanupCallbacks;
-};
-
-VkEncoder::VkEncoder(IOStream *stream) :
-    mImpl(new VkEncoder::Impl(stream)) { }
-
-void VkEncoder::flush() {
-    mImpl->flush();
-}
-
-void VkEncoder::registerCleanupCallback(void* handle, VkEncoder::CleanupCallback cb) {
-    mImpl->registerCleanupCallback(handle, cb);
-}
-
-void VkEncoder::unregisterCleanupCallback(void* handle) {
-    mImpl->unregisterCleanupCallback(handle);
-}
+#include "VkEncoder.cpp.inl"
 
 #define VALIDATE_RET(retType, success, validate) \
     retType goldfish_vk_validateResult = validate; \
@@ -145,102 +75,102 @@
 VkResult VkEncoder::vkCreateInstance(
     const VkInstanceCreateInfo* pCreateInfo,
     const VkAllocationCallbacks* pAllocator,
-    VkInstance* pInstance)
+    VkInstance* pInstance,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCreateInstance encode");
-    mImpl->log("start vkCreateInstance");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkInstanceCreateInfo* local_pCreateInfo;
     VkAllocationCallbacks* local_pAllocator;
     local_pCreateInfo = nullptr;
     if (pCreateInfo)
     {
         local_pCreateInfo = (VkInstanceCreateInfo*)pool->alloc(sizeof(const VkInstanceCreateInfo));
-        deepcopy_VkInstanceCreateInfo(pool, pCreateInfo, (VkInstanceCreateInfo*)(local_pCreateInfo));
+        deepcopy_VkInstanceCreateInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, (VkInstanceCreateInfo*)(local_pCreateInfo));
     }
     local_pAllocator = nullptr;
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pCreateInfo)
     {
-        transform_tohost_VkInstanceCreateInfo(mImpl->resources(), (VkInstanceCreateInfo*)(local_pCreateInfo));
+        transform_tohost_VkInstanceCreateInfo(sResourceTracker, (VkInstanceCreateInfo*)(local_pCreateInfo));
     }
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        marshal_VkInstanceCreateInfo(countingStream, (VkInstanceCreateInfo*)(local_pCreateInfo));
+        count_VkInstanceCreateInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkInstanceCreateInfo*)(local_pCreateInfo), countPtr);
         // WARNING PTR CHECK
-        uint64_t cgen_var_0 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_0);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
-        uint64_t cgen_var_1;
-        countingStream->handleMapping()->mapHandles_VkInstance_u64(pInstance, &cgen_var_1, 1);
-        countingStream->write((uint64_t*)&cgen_var_1, 8);
+        uint64_t cgen_var_0;
+        *countPtr += 8;
     }
-    uint32_t packetSize_vkCreateInstance = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCreateInstance = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreateInstance);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCreateInstance = OP_vkCreateInstance;
-    stream->write(&opcode_vkCreateInstance, sizeof(uint32_t));
-    stream->write(&packetSize_vkCreateInstance, sizeof(uint32_t));
-    marshal_VkInstanceCreateInfo(stream, (VkInstanceCreateInfo*)(local_pCreateInfo));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreateInstance, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreateInstance, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    reservedmarshal_VkInstanceCreateInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkInstanceCreateInfo*)(local_pCreateInfo), streamPtrPtr);
     // WARNING PTR CHECK
-    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_2);
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
-    uint64_t cgen_var_3;
-    stream->handleMapping()->mapHandles_VkInstance_u64(pInstance, &cgen_var_3, 1);
-    stream->write((uint64_t*)&cgen_var_3, 8);
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkCreateInstance readParams");
-    stream->setHandleMapping(resources->createMapping());
-    uint64_t cgen_var_4;
-    stream->read((uint64_t*)&cgen_var_4, 8);
-    stream->handleMapping()->mapHandles_u64_VkInstance(&cgen_var_4, (VkInstance*)pInstance, 1);
+    /* is handle, possibly out */;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = (uint64_t)((*pInstance));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    stream->setHandleMapping(sResourceTracker->createMapping());
+    uint64_t cgen_var_2;
+    stream->read((uint64_t*)&cgen_var_2, 8);
+    stream->handleMapping()->mapHandles_u64_VkInstance(&cgen_var_2, (VkInstance*)pInstance, 1);
     stream->unsetHandleMapping();
-    AEMU_SCOPED_TRACE("vkCreateInstance returnUnmarshal");
     VkResult vkCreateInstance_VkResult_return = (VkResult)0;
     stream->read(&vkCreateInstance_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    encoderLock.unlock();
-    mImpl->resources()->on_vkCreateInstance(this, vkCreateInstance_VkResult_return, pCreateInfo, pAllocator, pInstance);
-    encoderLock.lock();
-    mImpl->log("finish vkCreateInstance");;
+    sResourceTracker->on_vkCreateInstance(this, vkCreateInstance_VkResult_return, pCreateInfo, pAllocator, pInstance);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkCreateInstance_VkResult_return;
 }
 
 void VkEncoder::vkDestroyInstance(
     VkInstance instance,
-    const VkAllocationCallbacks* pAllocator)
+    const VkAllocationCallbacks* pAllocator,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkDestroyInstance encode");
-    mImpl->log("start vkDestroyInstance");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkInstance local_instance;
     VkAllocationCallbacks* local_pAllocator;
     local_instance = instance;
@@ -248,119 +178,136 @@
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_5;
-        countingStream->handleMapping()->mapHandles_VkInstance_u64(&local_instance, &cgen_var_5, 1);
-        countingStream->write((uint64_t*)&cgen_var_5, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_6 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_6);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
     }
-    uint32_t packetSize_vkDestroyInstance = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkDestroyInstance = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkDestroyInstance);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkDestroyInstance = OP_vkDestroyInstance;
-    stream->write(&opcode_vkDestroyInstance, sizeof(uint32_t));
-    stream->write(&packetSize_vkDestroyInstance, sizeof(uint32_t));
-    uint64_t cgen_var_7;
-    stream->handleMapping()->mapHandles_VkInstance_u64(&local_instance, &cgen_var_7, 1);
-    stream->write((uint64_t*)&cgen_var_7, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkDestroyInstance, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkDestroyInstance, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkInstance((*&local_instance));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_8 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_8);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    AEMU_SCOPED_TRACE("vkDestroyInstance readParams");
-    AEMU_SCOPED_TRACE("vkDestroyInstance returnUnmarshal");
-    resources->destroyMapping()->mapHandles_VkInstance((VkInstance*)&instance);
-    mImpl->log("finish vkDestroyInstance");;
+    sResourceTracker->destroyMapping()->mapHandles_VkInstance((VkInstance*)&instance);
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 VkResult VkEncoder::vkEnumeratePhysicalDevices(
     VkInstance instance,
     uint32_t* pPhysicalDeviceCount,
-    VkPhysicalDevice* pPhysicalDevices)
+    VkPhysicalDevice* pPhysicalDevices,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkEnumeratePhysicalDevices encode");
-    mImpl->log("start vkEnumeratePhysicalDevices");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkInstance local_instance;
     local_instance = instance;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_9;
-        countingStream->handleMapping()->mapHandles_VkInstance_u64(&local_instance, &cgen_var_9, 1);
-        countingStream->write((uint64_t*)&cgen_var_9, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_10 = (uint64_t)(uintptr_t)pPhysicalDeviceCount;
-        countingStream->putBe64(cgen_var_10);
+        *countPtr += 8;
         if (pPhysicalDeviceCount)
         {
-            countingStream->write((uint32_t*)pPhysicalDeviceCount, sizeof(uint32_t));
+            *countPtr += sizeof(uint32_t);
         }
         // WARNING PTR CHECK
-        uint64_t cgen_var_11 = (uint64_t)(uintptr_t)pPhysicalDevices;
-        countingStream->putBe64(cgen_var_11);
+        *countPtr += 8;
         if (pPhysicalDevices)
         {
             if ((*(pPhysicalDeviceCount)))
             {
-                uint64_t* cgen_var_12;
-                countingStream->alloc((void**)&cgen_var_12, (*(pPhysicalDeviceCount)) * 8);
-                countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(pPhysicalDevices, cgen_var_12, (*(pPhysicalDeviceCount)));
-                countingStream->write((uint64_t*)cgen_var_12, (*(pPhysicalDeviceCount)) * 8);
+                *countPtr += (*(pPhysicalDeviceCount)) * 8;
             }
         }
     }
-    uint32_t packetSize_vkEnumeratePhysicalDevices = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkEnumeratePhysicalDevices = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkEnumeratePhysicalDevices);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkEnumeratePhysicalDevices = OP_vkEnumeratePhysicalDevices;
-    stream->write(&opcode_vkEnumeratePhysicalDevices, sizeof(uint32_t));
-    stream->write(&packetSize_vkEnumeratePhysicalDevices, sizeof(uint32_t));
-    uint64_t cgen_var_13;
-    stream->handleMapping()->mapHandles_VkInstance_u64(&local_instance, &cgen_var_13, 1);
-    stream->write((uint64_t*)&cgen_var_13, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkEnumeratePhysicalDevices, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkEnumeratePhysicalDevices, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkInstance((*&local_instance));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_14 = (uint64_t)(uintptr_t)pPhysicalDeviceCount;
-    stream->putBe64(cgen_var_14);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)pPhysicalDeviceCount;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pPhysicalDeviceCount)
     {
-        stream->write((uint32_t*)pPhysicalDeviceCount, sizeof(uint32_t));
+        memcpy(*streamPtrPtr, (uint32_t*)pPhysicalDeviceCount, sizeof(uint32_t));
+        *streamPtrPtr += sizeof(uint32_t);
     }
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
+    /* is handle, possibly out */;
     // WARNING PTR CHECK
-    uint64_t cgen_var_15 = (uint64_t)(uintptr_t)pPhysicalDevices;
-    stream->putBe64(cgen_var_15);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)pPhysicalDevices;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pPhysicalDevices)
     {
         if ((*(pPhysicalDeviceCount)))
         {
-            uint64_t* cgen_var_16;
-            stream->alloc((void**)&cgen_var_16, (*(pPhysicalDeviceCount)) * 8);
-            stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(pPhysicalDevices, cgen_var_16, (*(pPhysicalDeviceCount)));
-            stream->write((uint64_t*)cgen_var_16, (*(pPhysicalDeviceCount)) * 8);
+            uint8_t* cgen_var_2_0_ptr = (uint8_t*)(*streamPtrPtr);
+            if (pPhysicalDeviceCount)
+            {
+                for (uint32_t k = 0; k < (*(pPhysicalDeviceCount)); ++k)
+                {
+                    uint64_t tmpval = (uint64_t)(pPhysicalDevices[k]);
+                    memcpy(cgen_var_2_0_ptr + k * 8, &tmpval, sizeof(uint64_t));
+                }
+            }
+            *streamPtrPtr += 8 * (*(pPhysicalDeviceCount));
         }
     }
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkEnumeratePhysicalDevices readParams");
+    /* is handle, possibly out */;
     // WARNING PTR CHECK
     uint32_t* check_pPhysicalDeviceCount;
     check_pPhysicalDeviceCount = (uint32_t*)(uintptr_t)stream->getBe64();
@@ -372,7 +319,7 @@
         }
         stream->read((uint32_t*)pPhysicalDeviceCount, sizeof(uint32_t));
     }
-    stream->setHandleMapping(resources->createMapping());
+    stream->setHandleMapping(sResourceTracker->createMapping());
     // WARNING PTR CHECK
     VkPhysicalDevice* check_pPhysicalDevices;
     check_pPhysicalDevices = (VkPhysicalDevice*)(uintptr_t)stream->getBe64();
@@ -384,106 +331,121 @@
         }
         if ((*(pPhysicalDeviceCount)))
         {
-            uint64_t* cgen_var_19;
-            stream->alloc((void**)&cgen_var_19, (*(pPhysicalDeviceCount)) * 8);
-            stream->read((uint64_t*)cgen_var_19, (*(pPhysicalDeviceCount)) * 8);
-            stream->handleMapping()->mapHandles_u64_VkPhysicalDevice(cgen_var_19, (VkPhysicalDevice*)pPhysicalDevices, (*(pPhysicalDeviceCount)));
+            uint64_t* cgen_var_4_0;
+            stream->alloc((void**)&cgen_var_4_0, (*(pPhysicalDeviceCount)) * 8);
+            stream->read((uint64_t*)cgen_var_4_0, (*(pPhysicalDeviceCount)) * 8);
+            stream->handleMapping()->mapHandles_u64_VkPhysicalDevice(cgen_var_4_0, (VkPhysicalDevice*)pPhysicalDevices, (*(pPhysicalDeviceCount)));
         }
     }
     stream->unsetHandleMapping();
-    AEMU_SCOPED_TRACE("vkEnumeratePhysicalDevices returnUnmarshal");
     VkResult vkEnumeratePhysicalDevices_VkResult_return = (VkResult)0;
     stream->read(&vkEnumeratePhysicalDevices_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkEnumeratePhysicalDevices");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkEnumeratePhysicalDevices_VkResult_return;
 }
 
 void VkEncoder::vkGetPhysicalDeviceFeatures(
     VkPhysicalDevice physicalDevice,
-    VkPhysicalDeviceFeatures* pFeatures)
+    VkPhysicalDeviceFeatures* pFeatures,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceFeatures encode");
-    mImpl->log("start vkGetPhysicalDeviceFeatures");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     local_physicalDevice = physicalDevice;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_20;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_20, 1);
-        countingStream->write((uint64_t*)&cgen_var_20, 1 * 8);
-        marshal_VkPhysicalDeviceFeatures(countingStream, (VkPhysicalDeviceFeatures*)(pFeatures));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkPhysicalDeviceFeatures(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceFeatures*)(pFeatures), countPtr);
     }
-    uint32_t packetSize_vkGetPhysicalDeviceFeatures = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetPhysicalDeviceFeatures = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPhysicalDeviceFeatures);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetPhysicalDeviceFeatures = OP_vkGetPhysicalDeviceFeatures;
-    stream->write(&opcode_vkGetPhysicalDeviceFeatures, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetPhysicalDeviceFeatures, sizeof(uint32_t));
-    uint64_t cgen_var_21;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_21, 1);
-    stream->write((uint64_t*)&cgen_var_21, 1 * 8);
-    marshal_VkPhysicalDeviceFeatures(stream, (VkPhysicalDeviceFeatures*)(pFeatures));
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceFeatures readParams");
-    unmarshal_VkPhysicalDeviceFeatures(stream, (VkPhysicalDeviceFeatures*)(pFeatures));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPhysicalDeviceFeatures, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPhysicalDeviceFeatures, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkPhysicalDeviceFeatures(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceFeatures*)(pFeatures), streamPtrPtr);
+    unmarshal_VkPhysicalDeviceFeatures(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceFeatures*)(pFeatures));
     if (pFeatures)
     {
-        transform_fromhost_VkPhysicalDeviceFeatures(mImpl->resources(), (VkPhysicalDeviceFeatures*)(pFeatures));
+        transform_fromhost_VkPhysicalDeviceFeatures(sResourceTracker, (VkPhysicalDeviceFeatures*)(pFeatures));
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceFeatures returnUnmarshal");
-    mImpl->log("finish vkGetPhysicalDeviceFeatures");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkGetPhysicalDeviceFormatProperties(
     VkPhysicalDevice physicalDevice,
     VkFormat format,
-    VkFormatProperties* pFormatProperties)
+    VkFormatProperties* pFormatProperties,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceFormatProperties encode");
-    mImpl->log("start vkGetPhysicalDeviceFormatProperties");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     VkFormat local_format;
     local_physicalDevice = physicalDevice;
     local_format = format;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_22;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_22, 1);
-        countingStream->write((uint64_t*)&cgen_var_22, 1 * 8);
-        countingStream->write((VkFormat*)&local_format, sizeof(VkFormat));
-        marshal_VkFormatProperties(countingStream, (VkFormatProperties*)(pFormatProperties));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkFormat);
+        count_VkFormatProperties(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkFormatProperties*)(pFormatProperties), countPtr);
     }
-    uint32_t packetSize_vkGetPhysicalDeviceFormatProperties = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetPhysicalDeviceFormatProperties = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPhysicalDeviceFormatProperties);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetPhysicalDeviceFormatProperties = OP_vkGetPhysicalDeviceFormatProperties;
-    stream->write(&opcode_vkGetPhysicalDeviceFormatProperties, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetPhysicalDeviceFormatProperties, sizeof(uint32_t));
-    uint64_t cgen_var_23;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_23, 1);
-    stream->write((uint64_t*)&cgen_var_23, 1 * 8);
-    stream->write((VkFormat*)&local_format, sizeof(VkFormat));
-    marshal_VkFormatProperties(stream, (VkFormatProperties*)(pFormatProperties));
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceFormatProperties readParams");
-    unmarshal_VkFormatProperties(stream, (VkFormatProperties*)(pFormatProperties));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPhysicalDeviceFormatProperties, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPhysicalDeviceFormatProperties, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkFormat*)&local_format, sizeof(VkFormat));
+    *streamPtrPtr += sizeof(VkFormat);
+    reservedmarshal_VkFormatProperties(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkFormatProperties*)(pFormatProperties), streamPtrPtr);
+    unmarshal_VkFormatProperties(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkFormatProperties*)(pFormatProperties));
     if (pFormatProperties)
     {
-        transform_fromhost_VkFormatProperties(mImpl->resources(), (VkFormatProperties*)(pFormatProperties));
+        transform_fromhost_VkFormatProperties(sResourceTracker, (VkFormatProperties*)(pFormatProperties));
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceFormatProperties returnUnmarshal");
-    mImpl->log("finish vkGetPhysicalDeviceFormatProperties");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 VkResult VkEncoder::vkGetPhysicalDeviceImageFormatProperties(
@@ -493,16 +455,14 @@
     VkImageTiling tiling,
     VkImageUsageFlags usage,
     VkImageCreateFlags flags,
-    VkImageFormatProperties* pImageFormatProperties)
+    VkImageFormatProperties* pImageFormatProperties,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceImageFormatProperties encode");
-    mImpl->log("start vkGetPhysicalDeviceImageFormatProperties");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     VkFormat local_format;
     VkImageType local_type;
@@ -515,152 +475,176 @@
     local_tiling = tiling;
     local_usage = usage;
     local_flags = flags;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_24;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_24, 1);
-        countingStream->write((uint64_t*)&cgen_var_24, 1 * 8);
-        countingStream->write((VkFormat*)&local_format, sizeof(VkFormat));
-        countingStream->write((VkImageType*)&local_type, sizeof(VkImageType));
-        countingStream->write((VkImageTiling*)&local_tiling, sizeof(VkImageTiling));
-        countingStream->write((VkImageUsageFlags*)&local_usage, sizeof(VkImageUsageFlags));
-        countingStream->write((VkImageCreateFlags*)&local_flags, sizeof(VkImageCreateFlags));
-        marshal_VkImageFormatProperties(countingStream, (VkImageFormatProperties*)(pImageFormatProperties));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkFormat);
+        *countPtr += sizeof(VkImageType);
+        *countPtr += sizeof(VkImageTiling);
+        *countPtr += sizeof(VkImageUsageFlags);
+        *countPtr += sizeof(VkImageCreateFlags);
+        count_VkImageFormatProperties(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImageFormatProperties*)(pImageFormatProperties), countPtr);
     }
-    uint32_t packetSize_vkGetPhysicalDeviceImageFormatProperties = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetPhysicalDeviceImageFormatProperties = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPhysicalDeviceImageFormatProperties);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetPhysicalDeviceImageFormatProperties = OP_vkGetPhysicalDeviceImageFormatProperties;
-    stream->write(&opcode_vkGetPhysicalDeviceImageFormatProperties, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetPhysicalDeviceImageFormatProperties, sizeof(uint32_t));
-    uint64_t cgen_var_25;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_25, 1);
-    stream->write((uint64_t*)&cgen_var_25, 1 * 8);
-    stream->write((VkFormat*)&local_format, sizeof(VkFormat));
-    stream->write((VkImageType*)&local_type, sizeof(VkImageType));
-    stream->write((VkImageTiling*)&local_tiling, sizeof(VkImageTiling));
-    stream->write((VkImageUsageFlags*)&local_usage, sizeof(VkImageUsageFlags));
-    stream->write((VkImageCreateFlags*)&local_flags, sizeof(VkImageCreateFlags));
-    marshal_VkImageFormatProperties(stream, (VkImageFormatProperties*)(pImageFormatProperties));
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceImageFormatProperties readParams");
-    unmarshal_VkImageFormatProperties(stream, (VkImageFormatProperties*)(pImageFormatProperties));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPhysicalDeviceImageFormatProperties, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPhysicalDeviceImageFormatProperties, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkFormat*)&local_format, sizeof(VkFormat));
+    *streamPtrPtr += sizeof(VkFormat);
+    memcpy(*streamPtrPtr, (VkImageType*)&local_type, sizeof(VkImageType));
+    *streamPtrPtr += sizeof(VkImageType);
+    memcpy(*streamPtrPtr, (VkImageTiling*)&local_tiling, sizeof(VkImageTiling));
+    *streamPtrPtr += sizeof(VkImageTiling);
+    memcpy(*streamPtrPtr, (VkImageUsageFlags*)&local_usage, sizeof(VkImageUsageFlags));
+    *streamPtrPtr += sizeof(VkImageUsageFlags);
+    memcpy(*streamPtrPtr, (VkImageCreateFlags*)&local_flags, sizeof(VkImageCreateFlags));
+    *streamPtrPtr += sizeof(VkImageCreateFlags);
+    reservedmarshal_VkImageFormatProperties(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImageFormatProperties*)(pImageFormatProperties), streamPtrPtr);
+    unmarshal_VkImageFormatProperties(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImageFormatProperties*)(pImageFormatProperties));
     if (pImageFormatProperties)
     {
-        transform_fromhost_VkImageFormatProperties(mImpl->resources(), (VkImageFormatProperties*)(pImageFormatProperties));
+        transform_fromhost_VkImageFormatProperties(sResourceTracker, (VkImageFormatProperties*)(pImageFormatProperties));
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceImageFormatProperties returnUnmarshal");
     VkResult vkGetPhysicalDeviceImageFormatProperties_VkResult_return = (VkResult)0;
     stream->read(&vkGetPhysicalDeviceImageFormatProperties_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetPhysicalDeviceImageFormatProperties");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetPhysicalDeviceImageFormatProperties_VkResult_return;
 }
 
 void VkEncoder::vkGetPhysicalDeviceProperties(
     VkPhysicalDevice physicalDevice,
-    VkPhysicalDeviceProperties* pProperties)
+    VkPhysicalDeviceProperties* pProperties,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceProperties encode");
-    mImpl->log("start vkGetPhysicalDeviceProperties");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     local_physicalDevice = physicalDevice;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_26;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_26, 1);
-        countingStream->write((uint64_t*)&cgen_var_26, 1 * 8);
-        marshal_VkPhysicalDeviceProperties(countingStream, (VkPhysicalDeviceProperties*)(pProperties));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkPhysicalDeviceProperties(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceProperties*)(pProperties), countPtr);
     }
-    uint32_t packetSize_vkGetPhysicalDeviceProperties = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetPhysicalDeviceProperties = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPhysicalDeviceProperties);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetPhysicalDeviceProperties = OP_vkGetPhysicalDeviceProperties;
-    stream->write(&opcode_vkGetPhysicalDeviceProperties, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetPhysicalDeviceProperties, sizeof(uint32_t));
-    uint64_t cgen_var_27;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_27, 1);
-    stream->write((uint64_t*)&cgen_var_27, 1 * 8);
-    marshal_VkPhysicalDeviceProperties(stream, (VkPhysicalDeviceProperties*)(pProperties));
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceProperties readParams");
-    unmarshal_VkPhysicalDeviceProperties(stream, (VkPhysicalDeviceProperties*)(pProperties));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPhysicalDeviceProperties, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPhysicalDeviceProperties, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkPhysicalDeviceProperties(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceProperties*)(pProperties), streamPtrPtr);
+    unmarshal_VkPhysicalDeviceProperties(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceProperties*)(pProperties));
     if (pProperties)
     {
-        transform_fromhost_VkPhysicalDeviceProperties(mImpl->resources(), (VkPhysicalDeviceProperties*)(pProperties));
+        transform_fromhost_VkPhysicalDeviceProperties(sResourceTracker, (VkPhysicalDeviceProperties*)(pProperties));
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceProperties returnUnmarshal");
-    mImpl->log("finish vkGetPhysicalDeviceProperties");;
+    sResourceTracker->on_vkGetPhysicalDeviceProperties(this, physicalDevice, pProperties);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkGetPhysicalDeviceQueueFamilyProperties(
     VkPhysicalDevice physicalDevice,
     uint32_t* pQueueFamilyPropertyCount,
-    VkQueueFamilyProperties* pQueueFamilyProperties)
+    VkQueueFamilyProperties* pQueueFamilyProperties,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceQueueFamilyProperties encode");
-    mImpl->log("start vkGetPhysicalDeviceQueueFamilyProperties");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     local_physicalDevice = physicalDevice;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_28;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_28, 1);
-        countingStream->write((uint64_t*)&cgen_var_28, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_29 = (uint64_t)(uintptr_t)pQueueFamilyPropertyCount;
-        countingStream->putBe64(cgen_var_29);
+        *countPtr += 8;
         if (pQueueFamilyPropertyCount)
         {
-            countingStream->write((uint32_t*)pQueueFamilyPropertyCount, sizeof(uint32_t));
+            *countPtr += sizeof(uint32_t);
         }
         // WARNING PTR CHECK
-        uint64_t cgen_var_30 = (uint64_t)(uintptr_t)pQueueFamilyProperties;
-        countingStream->putBe64(cgen_var_30);
+        *countPtr += 8;
         if (pQueueFamilyProperties)
         {
-            for (uint32_t i = 0; i < (uint32_t)(*(pQueueFamilyPropertyCount)); ++i)
+            if (pQueueFamilyPropertyCount)
             {
-                marshal_VkQueueFamilyProperties(countingStream, (VkQueueFamilyProperties*)(pQueueFamilyProperties + i));
+                for (uint32_t i = 0; i < (uint32_t)(*(pQueueFamilyPropertyCount)); ++i)
+                {
+                    count_VkQueueFamilyProperties(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkQueueFamilyProperties*)(pQueueFamilyProperties + i), countPtr);
+                }
             }
         }
     }
-    uint32_t packetSize_vkGetPhysicalDeviceQueueFamilyProperties = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetPhysicalDeviceQueueFamilyProperties = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPhysicalDeviceQueueFamilyProperties);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetPhysicalDeviceQueueFamilyProperties = OP_vkGetPhysicalDeviceQueueFamilyProperties;
-    stream->write(&opcode_vkGetPhysicalDeviceQueueFamilyProperties, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetPhysicalDeviceQueueFamilyProperties, sizeof(uint32_t));
-    uint64_t cgen_var_31;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_31, 1);
-    stream->write((uint64_t*)&cgen_var_31, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPhysicalDeviceQueueFamilyProperties, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPhysicalDeviceQueueFamilyProperties, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_32 = (uint64_t)(uintptr_t)pQueueFamilyPropertyCount;
-    stream->putBe64(cgen_var_32);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)pQueueFamilyPropertyCount;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pQueueFamilyPropertyCount)
     {
-        stream->write((uint32_t*)pQueueFamilyPropertyCount, sizeof(uint32_t));
+        memcpy(*streamPtrPtr, (uint32_t*)pQueueFamilyPropertyCount, sizeof(uint32_t));
+        *streamPtrPtr += sizeof(uint32_t);
     }
     // WARNING PTR CHECK
-    uint64_t cgen_var_33 = (uint64_t)(uintptr_t)pQueueFamilyProperties;
-    stream->putBe64(cgen_var_33);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)pQueueFamilyProperties;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pQueueFamilyProperties)
     {
         for (uint32_t i = 0; i < (uint32_t)(*(pQueueFamilyPropertyCount)); ++i)
         {
-            marshal_VkQueueFamilyProperties(stream, (VkQueueFamilyProperties*)(pQueueFamilyProperties + i));
+            reservedmarshal_VkQueueFamilyProperties(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkQueueFamilyProperties*)(pQueueFamilyProperties + i), streamPtrPtr);
         }
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceQueueFamilyProperties readParams");
     // WARNING PTR CHECK
     uint32_t* check_pQueueFamilyPropertyCount;
     check_pQueueFamilyPropertyCount = (uint32_t*)(uintptr_t)stream->getBe64();
@@ -681,156 +665,185 @@
         {
             fprintf(stderr, "fatal: pQueueFamilyProperties inconsistent between guest and host\n");
         }
-        for (uint32_t i = 0; i < (uint32_t)(*(pQueueFamilyPropertyCount)); ++i)
+        if (pQueueFamilyPropertyCount)
         {
-            unmarshal_VkQueueFamilyProperties(stream, (VkQueueFamilyProperties*)(pQueueFamilyProperties + i));
+            for (uint32_t i = 0; i < (uint32_t)(*(pQueueFamilyPropertyCount)); ++i)
+            {
+                unmarshal_VkQueueFamilyProperties(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkQueueFamilyProperties*)(pQueueFamilyProperties + i));
+            }
         }
     }
-    if (pQueueFamilyProperties)
+    if (pQueueFamilyPropertyCount)
     {
-        for (uint32_t i = 0; i < (uint32_t)(*(pQueueFamilyPropertyCount)); ++i)
+        if (pQueueFamilyProperties)
         {
-            transform_fromhost_VkQueueFamilyProperties(mImpl->resources(), (VkQueueFamilyProperties*)(pQueueFamilyProperties + i));
+            for (uint32_t i = 0; i < (uint32_t)(*(pQueueFamilyPropertyCount)); ++i)
+            {
+                transform_fromhost_VkQueueFamilyProperties(sResourceTracker, (VkQueueFamilyProperties*)(pQueueFamilyProperties + i));
+            }
         }
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceQueueFamilyProperties returnUnmarshal");
-    mImpl->log("finish vkGetPhysicalDeviceQueueFamilyProperties");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkGetPhysicalDeviceMemoryProperties(
     VkPhysicalDevice physicalDevice,
-    VkPhysicalDeviceMemoryProperties* pMemoryProperties)
+    VkPhysicalDeviceMemoryProperties* pMemoryProperties,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceMemoryProperties encode");
-    mImpl->log("start vkGetPhysicalDeviceMemoryProperties");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     local_physicalDevice = physicalDevice;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_36;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_36, 1);
-        countingStream->write((uint64_t*)&cgen_var_36, 1 * 8);
-        marshal_VkPhysicalDeviceMemoryProperties(countingStream, (VkPhysicalDeviceMemoryProperties*)(pMemoryProperties));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkPhysicalDeviceMemoryProperties(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceMemoryProperties*)(pMemoryProperties), countPtr);
     }
-    uint32_t packetSize_vkGetPhysicalDeviceMemoryProperties = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetPhysicalDeviceMemoryProperties = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPhysicalDeviceMemoryProperties);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetPhysicalDeviceMemoryProperties = OP_vkGetPhysicalDeviceMemoryProperties;
-    stream->write(&opcode_vkGetPhysicalDeviceMemoryProperties, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetPhysicalDeviceMemoryProperties, sizeof(uint32_t));
-    uint64_t cgen_var_37;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_37, 1);
-    stream->write((uint64_t*)&cgen_var_37, 1 * 8);
-    marshal_VkPhysicalDeviceMemoryProperties(stream, (VkPhysicalDeviceMemoryProperties*)(pMemoryProperties));
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceMemoryProperties readParams");
-    unmarshal_VkPhysicalDeviceMemoryProperties(stream, (VkPhysicalDeviceMemoryProperties*)(pMemoryProperties));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPhysicalDeviceMemoryProperties, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPhysicalDeviceMemoryProperties, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkPhysicalDeviceMemoryProperties(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceMemoryProperties*)(pMemoryProperties), streamPtrPtr);
+    unmarshal_VkPhysicalDeviceMemoryProperties(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceMemoryProperties*)(pMemoryProperties));
     if (pMemoryProperties)
     {
-        transform_fromhost_VkPhysicalDeviceMemoryProperties(mImpl->resources(), (VkPhysicalDeviceMemoryProperties*)(pMemoryProperties));
+        transform_fromhost_VkPhysicalDeviceMemoryProperties(sResourceTracker, (VkPhysicalDeviceMemoryProperties*)(pMemoryProperties));
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceMemoryProperties returnUnmarshal");
-    encoderLock.unlock();
-    mImpl->resources()->on_vkGetPhysicalDeviceMemoryProperties(this, physicalDevice, pMemoryProperties);
-    encoderLock.lock();
-    mImpl->log("finish vkGetPhysicalDeviceMemoryProperties");;
+    sResourceTracker->on_vkGetPhysicalDeviceMemoryProperties(this, physicalDevice, pMemoryProperties);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 PFN_vkVoidFunction VkEncoder::vkGetInstanceProcAddr(
     VkInstance instance,
-    const char* pName)
+    const char* pName,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetInstanceProcAddr encode");
-    mImpl->log("start vkGetInstanceProcAddr");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkInstance local_instance;
     char* local_pName;
     local_instance = instance;
-    local_pName = nullptr;
-    if (pName)
+    // Avoiding deepcopy for pName
+    local_pName = (char*)pName;
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        local_pName = pool->strDup(pName);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t) + (local_pName ? strlen(local_pName) : 0);
     }
-    countingStream->rewind();
-    {
-        uint64_t cgen_var_38;
-        countingStream->handleMapping()->mapHandles_VkInstance_u64(&local_instance, &cgen_var_38, 1);
-        countingStream->write((uint64_t*)&cgen_var_38, 1 * 8);
-        countingStream->putString(local_pName);
-    }
-    uint32_t packetSize_vkGetInstanceProcAddr = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetInstanceProcAddr = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetInstanceProcAddr);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetInstanceProcAddr = OP_vkGetInstanceProcAddr;
-    stream->write(&opcode_vkGetInstanceProcAddr, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetInstanceProcAddr, sizeof(uint32_t));
-    uint64_t cgen_var_39;
-    stream->handleMapping()->mapHandles_VkInstance_u64(&local_instance, &cgen_var_39, 1);
-    stream->write((uint64_t*)&cgen_var_39, 1 * 8);
-    stream->putString(local_pName);
-    AEMU_SCOPED_TRACE("vkGetInstanceProcAddr readParams");
-    AEMU_SCOPED_TRACE("vkGetInstanceProcAddr returnUnmarshal");
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetInstanceProcAddr, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetInstanceProcAddr, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkInstance((*&local_instance));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    {
+        uint32_t l = local_pName ? strlen(local_pName): 0;
+        memcpy(*streamPtrPtr, (uint32_t*)&l, sizeof(uint32_t));
+        android::base::Stream::toBe32((uint8_t*)*streamPtrPtr);
+        *streamPtrPtr += sizeof(uint32_t);
+        memcpy(*streamPtrPtr, (char*)local_pName, l);
+        *streamPtrPtr += l;
+    }
     PFN_vkVoidFunction vkGetInstanceProcAddr_PFN_vkVoidFunction_return = (PFN_vkVoidFunction)0;
     stream->read(&vkGetInstanceProcAddr_PFN_vkVoidFunction_return, sizeof(PFN_vkVoidFunction));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetInstanceProcAddr");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetInstanceProcAddr_PFN_vkVoidFunction_return;
 }
 
 PFN_vkVoidFunction VkEncoder::vkGetDeviceProcAddr(
     VkDevice device,
-    const char* pName)
+    const char* pName,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetDeviceProcAddr encode");
-    mImpl->log("start vkGetDeviceProcAddr");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     char* local_pName;
     local_device = device;
-    local_pName = nullptr;
-    if (pName)
+    // Avoiding deepcopy for pName
+    local_pName = (char*)pName;
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        local_pName = pool->strDup(pName);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t) + (local_pName ? strlen(local_pName) : 0);
     }
-    countingStream->rewind();
-    {
-        uint64_t cgen_var_40;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_40, 1);
-        countingStream->write((uint64_t*)&cgen_var_40, 1 * 8);
-        countingStream->putString(local_pName);
-    }
-    uint32_t packetSize_vkGetDeviceProcAddr = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetDeviceProcAddr = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetDeviceProcAddr);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetDeviceProcAddr = OP_vkGetDeviceProcAddr;
-    stream->write(&opcode_vkGetDeviceProcAddr, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetDeviceProcAddr, sizeof(uint32_t));
-    uint64_t cgen_var_41;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_41, 1);
-    stream->write((uint64_t*)&cgen_var_41, 1 * 8);
-    stream->putString(local_pName);
-    AEMU_SCOPED_TRACE("vkGetDeviceProcAddr readParams");
-    AEMU_SCOPED_TRACE("vkGetDeviceProcAddr returnUnmarshal");
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetDeviceProcAddr, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetDeviceProcAddr, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    {
+        uint32_t l = local_pName ? strlen(local_pName): 0;
+        memcpy(*streamPtrPtr, (uint32_t*)&l, sizeof(uint32_t));
+        android::base::Stream::toBe32((uint8_t*)*streamPtrPtr);
+        *streamPtrPtr += sizeof(uint32_t);
+        memcpy(*streamPtrPtr, (char*)local_pName, l);
+        *streamPtrPtr += l;
+    }
     PFN_vkVoidFunction vkGetDeviceProcAddr_PFN_vkVoidFunction_return = (PFN_vkVoidFunction)0;
     stream->read(&vkGetDeviceProcAddr_PFN_vkVoidFunction_return, sizeof(PFN_vkVoidFunction));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetDeviceProcAddr");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetDeviceProcAddr_PFN_vkVoidFunction_return;
 }
 
@@ -838,16 +851,14 @@
     VkPhysicalDevice physicalDevice,
     const VkDeviceCreateInfo* pCreateInfo,
     const VkAllocationCallbacks* pAllocator,
-    VkDevice* pDevice)
+    VkDevice* pDevice,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCreateDevice encode");
-    mImpl->log("start vkCreateDevice");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     VkDeviceCreateInfo* local_pCreateInfo;
     VkAllocationCallbacks* local_pAllocator;
@@ -856,95 +867,95 @@
     if (pCreateInfo)
     {
         local_pCreateInfo = (VkDeviceCreateInfo*)pool->alloc(sizeof(const VkDeviceCreateInfo));
-        deepcopy_VkDeviceCreateInfo(pool, pCreateInfo, (VkDeviceCreateInfo*)(local_pCreateInfo));
+        deepcopy_VkDeviceCreateInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, (VkDeviceCreateInfo*)(local_pCreateInfo));
     }
     local_pAllocator = nullptr;
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pCreateInfo)
     {
-        transform_tohost_VkDeviceCreateInfo(mImpl->resources(), (VkDeviceCreateInfo*)(local_pCreateInfo));
+        transform_tohost_VkDeviceCreateInfo(sResourceTracker, (VkDeviceCreateInfo*)(local_pCreateInfo));
     }
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_42;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_42, 1);
-        countingStream->write((uint64_t*)&cgen_var_42, 1 * 8);
-        marshal_VkDeviceCreateInfo(countingStream, (VkDeviceCreateInfo*)(local_pCreateInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkDeviceCreateInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDeviceCreateInfo*)(local_pCreateInfo), countPtr);
         // WARNING PTR CHECK
-        uint64_t cgen_var_43 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_43);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
-        uint64_t cgen_var_44;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(pDevice, &cgen_var_44, 1);
-        countingStream->write((uint64_t*)&cgen_var_44, 8);
+        uint64_t cgen_var_1;
+        *countPtr += 8;
     }
-    uint32_t packetSize_vkCreateDevice = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCreateDevice = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreateDevice);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCreateDevice = OP_vkCreateDevice;
-    stream->write(&opcode_vkCreateDevice, sizeof(uint32_t));
-    stream->write(&packetSize_vkCreateDevice, sizeof(uint32_t));
-    uint64_t cgen_var_45;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_45, 1);
-    stream->write((uint64_t*)&cgen_var_45, 1 * 8);
-    marshal_VkDeviceCreateInfo(stream, (VkDeviceCreateInfo*)(local_pCreateInfo));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreateDevice, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreateDevice, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkDeviceCreateInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDeviceCreateInfo*)(local_pCreateInfo), streamPtrPtr);
     // WARNING PTR CHECK
-    uint64_t cgen_var_46 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_46);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
-    uint64_t cgen_var_47;
-    stream->handleMapping()->mapHandles_VkDevice_u64(pDevice, &cgen_var_47, 1);
-    stream->write((uint64_t*)&cgen_var_47, 8);
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkCreateDevice readParams");
-    stream->setHandleMapping(resources->createMapping());
-    uint64_t cgen_var_48;
-    stream->read((uint64_t*)&cgen_var_48, 8);
-    stream->handleMapping()->mapHandles_u64_VkDevice(&cgen_var_48, (VkDevice*)pDevice, 1);
+    /* is handle, possibly out */;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = (uint64_t)((*pDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    stream->setHandleMapping(sResourceTracker->createMapping());
+    uint64_t cgen_var_3;
+    stream->read((uint64_t*)&cgen_var_3, 8);
+    stream->handleMapping()->mapHandles_u64_VkDevice(&cgen_var_3, (VkDevice*)pDevice, 1);
     stream->unsetHandleMapping();
-    AEMU_SCOPED_TRACE("vkCreateDevice returnUnmarshal");
     VkResult vkCreateDevice_VkResult_return = (VkResult)0;
     stream->read(&vkCreateDevice_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    encoderLock.unlock();
-    mImpl->resources()->on_vkCreateDevice(this, vkCreateDevice_VkResult_return, physicalDevice, pCreateInfo, pAllocator, pDevice);
-    encoderLock.lock();
-    mImpl->log("finish vkCreateDevice");;
+    sResourceTracker->on_vkCreateDevice(this, vkCreateDevice_VkResult_return, physicalDevice, pCreateInfo, pAllocator, pDevice);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkCreateDevice_VkResult_return;
 }
 
 void VkEncoder::vkDestroyDevice(
     VkDevice device,
-    const VkAllocationCallbacks* pAllocator)
+    const VkAllocationCallbacks* pAllocator,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkDestroyDevice encode");
-    mImpl->log("start vkDestroyDevice");
-    encoderLock.unlock();
-    mImpl->resources()->on_vkDestroyDevice_pre(this, device, pAllocator);
-    encoderLock.lock();
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    sResourceTracker->on_vkDestroyDevice_pre(this, device, pAllocator);
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkAllocationCallbacks* local_pAllocator;
     local_device = device;
@@ -952,138 +963,166 @@
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_49;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_49, 1);
-        countingStream->write((uint64_t*)&cgen_var_49, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_50 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_50);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
     }
-    uint32_t packetSize_vkDestroyDevice = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkDestroyDevice = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkDestroyDevice);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkDestroyDevice = OP_vkDestroyDevice;
-    stream->write(&opcode_vkDestroyDevice, sizeof(uint32_t));
-    stream->write(&packetSize_vkDestroyDevice, sizeof(uint32_t));
-    uint64_t cgen_var_51;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_51, 1);
-    stream->write((uint64_t*)&cgen_var_51, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkDestroyDevice, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkDestroyDevice, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_52 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_52);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    AEMU_SCOPED_TRACE("vkDestroyDevice readParams");
-    AEMU_SCOPED_TRACE("vkDestroyDevice returnUnmarshal");
-    resources->destroyMapping()->mapHandles_VkDevice((VkDevice*)&device);
+    sResourceTracker->destroyMapping()->mapHandles_VkDevice((VkDevice*)&device);
     stream->flush();
-    mImpl->log("finish vkDestroyDevice");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 VkResult VkEncoder::vkEnumerateInstanceExtensionProperties(
     const char* pLayerName,
     uint32_t* pPropertyCount,
-    VkExtensionProperties* pProperties)
+    VkExtensionProperties* pProperties,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkEnumerateInstanceExtensionProperties encode");
-    mImpl->log("start vkEnumerateInstanceExtensionProperties");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     char* local_pLayerName;
-    local_pLayerName = nullptr;
-    if (pLayerName)
+    // Avoiding deepcopy for pLayerName
+    local_pLayerName = (char*)pLayerName;
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        local_pLayerName = pool->strDup(pLayerName);
-    }
-    countingStream->rewind();
-    {
-        if (countingStream->getFeatureBits() & VULKAN_STREAM_FEATURE_NULL_OPTIONAL_STRINGS_BIT)
+        if (sFeatureBits & VULKAN_STREAM_FEATURE_NULL_OPTIONAL_STRINGS_BIT)
         {
             // WARNING PTR CHECK
-            uint64_t cgen_var_53 = (uint64_t)(uintptr_t)local_pLayerName;
-            countingStream->putBe64(cgen_var_53);
+            *countPtr += 8;
             if (local_pLayerName)
             {
-                countingStream->putString(local_pLayerName);
+                *countPtr += sizeof(uint32_t) + (local_pLayerName ? strlen(local_pLayerName) : 0);
             }
         }
         else
         {
-            countingStream->putString(local_pLayerName);
+            *countPtr += sizeof(uint32_t) + (local_pLayerName ? strlen(local_pLayerName) : 0);
         }
         // WARNING PTR CHECK
-        uint64_t cgen_var_54 = (uint64_t)(uintptr_t)pPropertyCount;
-        countingStream->putBe64(cgen_var_54);
+        *countPtr += 8;
         if (pPropertyCount)
         {
-            countingStream->write((uint32_t*)pPropertyCount, sizeof(uint32_t));
+            *countPtr += sizeof(uint32_t);
         }
         // WARNING PTR CHECK
-        uint64_t cgen_var_55 = (uint64_t)(uintptr_t)pProperties;
-        countingStream->putBe64(cgen_var_55);
+        *countPtr += 8;
         if (pProperties)
         {
-            for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+            if (pPropertyCount)
             {
-                marshal_VkExtensionProperties(countingStream, (VkExtensionProperties*)(pProperties + i));
+                for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+                {
+                    count_VkExtensionProperties(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkExtensionProperties*)(pProperties + i), countPtr);
+                }
             }
         }
     }
-    uint32_t packetSize_vkEnumerateInstanceExtensionProperties = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkEnumerateInstanceExtensionProperties = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkEnumerateInstanceExtensionProperties);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkEnumerateInstanceExtensionProperties = OP_vkEnumerateInstanceExtensionProperties;
-    stream->write(&opcode_vkEnumerateInstanceExtensionProperties, sizeof(uint32_t));
-    stream->write(&packetSize_vkEnumerateInstanceExtensionProperties, sizeof(uint32_t));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkEnumerateInstanceExtensionProperties, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkEnumerateInstanceExtensionProperties, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
     if (stream->getFeatureBits() & VULKAN_STREAM_FEATURE_NULL_OPTIONAL_STRINGS_BIT)
     {
         // WARNING PTR CHECK
-        uint64_t cgen_var_56 = (uint64_t)(uintptr_t)local_pLayerName;
-        stream->putBe64(cgen_var_56);
+        uint64_t cgen_var_0 = (uint64_t)(uintptr_t)local_pLayerName;
+        memcpy((*streamPtrPtr), &cgen_var_0, 8);
+        android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+        *streamPtrPtr += 8;
         if (local_pLayerName)
         {
-            stream->putString(local_pLayerName);
+            {
+                uint32_t l = local_pLayerName ? strlen(local_pLayerName): 0;
+                memcpy(*streamPtrPtr, (uint32_t*)&l, sizeof(uint32_t));
+                android::base::Stream::toBe32((uint8_t*)*streamPtrPtr);
+                *streamPtrPtr += sizeof(uint32_t);
+                memcpy(*streamPtrPtr, (char*)local_pLayerName, l);
+                *streamPtrPtr += l;
+            }
         }
     }
     else
     {
-        stream->putString(local_pLayerName);
+        {
+            uint32_t l = local_pLayerName ? strlen(local_pLayerName): 0;
+            memcpy(*streamPtrPtr, (uint32_t*)&l, sizeof(uint32_t));
+            android::base::Stream::toBe32((uint8_t*)*streamPtrPtr);
+            *streamPtrPtr += sizeof(uint32_t);
+            memcpy(*streamPtrPtr, (char*)local_pLayerName, l);
+            *streamPtrPtr += l;
+        }
     }
     // WARNING PTR CHECK
-    uint64_t cgen_var_57 = (uint64_t)(uintptr_t)pPropertyCount;
-    stream->putBe64(cgen_var_57);
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)pPropertyCount;
+    memcpy((*streamPtrPtr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pPropertyCount)
     {
-        stream->write((uint32_t*)pPropertyCount, sizeof(uint32_t));
+        memcpy(*streamPtrPtr, (uint32_t*)pPropertyCount, sizeof(uint32_t));
+        *streamPtrPtr += sizeof(uint32_t);
     }
     // WARNING PTR CHECK
-    uint64_t cgen_var_58 = (uint64_t)(uintptr_t)pProperties;
-    stream->putBe64(cgen_var_58);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)pProperties;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pProperties)
     {
         for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
         {
-            marshal_VkExtensionProperties(stream, (VkExtensionProperties*)(pProperties + i));
+            reservedmarshal_VkExtensionProperties(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkExtensionProperties*)(pProperties + i), streamPtrPtr);
         }
     }
-    AEMU_SCOPED_TRACE("vkEnumerateInstanceExtensionProperties readParams");
     // WARNING PTR CHECK
     uint32_t* check_pPropertyCount;
     check_pPropertyCount = (uint32_t*)(uintptr_t)stream->getBe64();
@@ -1104,25 +1143,33 @@
         {
             fprintf(stderr, "fatal: pProperties inconsistent between guest and host\n");
         }
-        for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+        if (pPropertyCount)
         {
-            unmarshal_VkExtensionProperties(stream, (VkExtensionProperties*)(pProperties + i));
+            for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+            {
+                unmarshal_VkExtensionProperties(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkExtensionProperties*)(pProperties + i));
+            }
         }
     }
-    if (pProperties)
+    if (pPropertyCount)
     {
-        for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+        if (pProperties)
         {
-            transform_fromhost_VkExtensionProperties(mImpl->resources(), (VkExtensionProperties*)(pProperties + i));
+            for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+            {
+                transform_fromhost_VkExtensionProperties(sResourceTracker, (VkExtensionProperties*)(pProperties + i));
+            }
         }
     }
-    AEMU_SCOPED_TRACE("vkEnumerateInstanceExtensionProperties returnUnmarshal");
     VkResult vkEnumerateInstanceExtensionProperties_VkResult_return = (VkResult)0;
     stream->read(&vkEnumerateInstanceExtensionProperties_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkEnumerateInstanceExtensionProperties");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkEnumerateInstanceExtensionProperties_VkResult_return;
 }
 
@@ -1130,101 +1177,120 @@
     VkPhysicalDevice physicalDevice,
     const char* pLayerName,
     uint32_t* pPropertyCount,
-    VkExtensionProperties* pProperties)
+    VkExtensionProperties* pProperties,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkEnumerateDeviceExtensionProperties encode");
-    mImpl->log("start vkEnumerateDeviceExtensionProperties");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     char* local_pLayerName;
     local_physicalDevice = physicalDevice;
-    local_pLayerName = nullptr;
-    if (pLayerName)
+    // Avoiding deepcopy for pLayerName
+    local_pLayerName = (char*)pLayerName;
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        local_pLayerName = pool->strDup(pLayerName);
-    }
-    countingStream->rewind();
-    {
-        uint64_t cgen_var_61;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_61, 1);
-        countingStream->write((uint64_t*)&cgen_var_61, 1 * 8);
-        if (countingStream->getFeatureBits() & VULKAN_STREAM_FEATURE_NULL_OPTIONAL_STRINGS_BIT)
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        if (sFeatureBits & VULKAN_STREAM_FEATURE_NULL_OPTIONAL_STRINGS_BIT)
         {
             // WARNING PTR CHECK
-            uint64_t cgen_var_62 = (uint64_t)(uintptr_t)local_pLayerName;
-            countingStream->putBe64(cgen_var_62);
+            *countPtr += 8;
             if (local_pLayerName)
             {
-                countingStream->putString(local_pLayerName);
+                *countPtr += sizeof(uint32_t) + (local_pLayerName ? strlen(local_pLayerName) : 0);
             }
         }
         else
         {
-            countingStream->putString(local_pLayerName);
+            *countPtr += sizeof(uint32_t) + (local_pLayerName ? strlen(local_pLayerName) : 0);
         }
         // WARNING PTR CHECK
-        uint64_t cgen_var_63 = (uint64_t)(uintptr_t)pPropertyCount;
-        countingStream->putBe64(cgen_var_63);
+        *countPtr += 8;
         if (pPropertyCount)
         {
-            countingStream->write((uint32_t*)pPropertyCount, sizeof(uint32_t));
+            *countPtr += sizeof(uint32_t);
         }
         // WARNING PTR CHECK
-        uint64_t cgen_var_64 = (uint64_t)(uintptr_t)pProperties;
-        countingStream->putBe64(cgen_var_64);
+        *countPtr += 8;
         if (pProperties)
         {
-            for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+            if (pPropertyCount)
             {
-                marshal_VkExtensionProperties(countingStream, (VkExtensionProperties*)(pProperties + i));
+                for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+                {
+                    count_VkExtensionProperties(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkExtensionProperties*)(pProperties + i), countPtr);
+                }
             }
         }
     }
-    uint32_t packetSize_vkEnumerateDeviceExtensionProperties = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkEnumerateDeviceExtensionProperties = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkEnumerateDeviceExtensionProperties);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkEnumerateDeviceExtensionProperties = OP_vkEnumerateDeviceExtensionProperties;
-    stream->write(&opcode_vkEnumerateDeviceExtensionProperties, sizeof(uint32_t));
-    stream->write(&packetSize_vkEnumerateDeviceExtensionProperties, sizeof(uint32_t));
-    uint64_t cgen_var_65;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_65, 1);
-    stream->write((uint64_t*)&cgen_var_65, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkEnumerateDeviceExtensionProperties, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkEnumerateDeviceExtensionProperties, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     if (stream->getFeatureBits() & VULKAN_STREAM_FEATURE_NULL_OPTIONAL_STRINGS_BIT)
     {
         // WARNING PTR CHECK
-        uint64_t cgen_var_66 = (uint64_t)(uintptr_t)local_pLayerName;
-        stream->putBe64(cgen_var_66);
+        uint64_t cgen_var_0_0 = (uint64_t)(uintptr_t)local_pLayerName;
+        memcpy((*streamPtrPtr), &cgen_var_0_0, 8);
+        android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+        *streamPtrPtr += 8;
         if (local_pLayerName)
         {
-            stream->putString(local_pLayerName);
+            {
+                uint32_t l = local_pLayerName ? strlen(local_pLayerName): 0;
+                memcpy(*streamPtrPtr, (uint32_t*)&l, sizeof(uint32_t));
+                android::base::Stream::toBe32((uint8_t*)*streamPtrPtr);
+                *streamPtrPtr += sizeof(uint32_t);
+                memcpy(*streamPtrPtr, (char*)local_pLayerName, l);
+                *streamPtrPtr += l;
+            }
         }
     }
     else
     {
-        stream->putString(local_pLayerName);
+        {
+            uint32_t l = local_pLayerName ? strlen(local_pLayerName): 0;
+            memcpy(*streamPtrPtr, (uint32_t*)&l, sizeof(uint32_t));
+            android::base::Stream::toBe32((uint8_t*)*streamPtrPtr);
+            *streamPtrPtr += sizeof(uint32_t);
+            memcpy(*streamPtrPtr, (char*)local_pLayerName, l);
+            *streamPtrPtr += l;
+        }
     }
     // WARNING PTR CHECK
-    uint64_t cgen_var_67 = (uint64_t)(uintptr_t)pPropertyCount;
-    stream->putBe64(cgen_var_67);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)pPropertyCount;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pPropertyCount)
     {
-        stream->write((uint32_t*)pPropertyCount, sizeof(uint32_t));
+        memcpy(*streamPtrPtr, (uint32_t*)pPropertyCount, sizeof(uint32_t));
+        *streamPtrPtr += sizeof(uint32_t);
     }
     // WARNING PTR CHECK
-    uint64_t cgen_var_68 = (uint64_t)(uintptr_t)pProperties;
-    stream->putBe64(cgen_var_68);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)pProperties;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pProperties)
     {
         for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
         {
-            marshal_VkExtensionProperties(stream, (VkExtensionProperties*)(pProperties + i));
+            reservedmarshal_VkExtensionProperties(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkExtensionProperties*)(pProperties + i), streamPtrPtr);
         }
     }
-    AEMU_SCOPED_TRACE("vkEnumerateDeviceExtensionProperties readParams");
     // WARNING PTR CHECK
     uint32_t* check_pPropertyCount;
     check_pPropertyCount = (uint32_t*)(uintptr_t)stream->getBe64();
@@ -1245,83 +1311,98 @@
         {
             fprintf(stderr, "fatal: pProperties inconsistent between guest and host\n");
         }
-        for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+        if (pPropertyCount)
         {
-            unmarshal_VkExtensionProperties(stream, (VkExtensionProperties*)(pProperties + i));
+            for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+            {
+                unmarshal_VkExtensionProperties(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkExtensionProperties*)(pProperties + i));
+            }
         }
     }
-    if (pProperties)
+    if (pPropertyCount)
     {
-        for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+        if (pProperties)
         {
-            transform_fromhost_VkExtensionProperties(mImpl->resources(), (VkExtensionProperties*)(pProperties + i));
+            for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+            {
+                transform_fromhost_VkExtensionProperties(sResourceTracker, (VkExtensionProperties*)(pProperties + i));
+            }
         }
     }
-    AEMU_SCOPED_TRACE("vkEnumerateDeviceExtensionProperties returnUnmarshal");
     VkResult vkEnumerateDeviceExtensionProperties_VkResult_return = (VkResult)0;
     stream->read(&vkEnumerateDeviceExtensionProperties_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkEnumerateDeviceExtensionProperties");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkEnumerateDeviceExtensionProperties_VkResult_return;
 }
 
 VkResult VkEncoder::vkEnumerateInstanceLayerProperties(
     uint32_t* pPropertyCount,
-    VkLayerProperties* pProperties)
+    VkLayerProperties* pProperties,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkEnumerateInstanceLayerProperties encode");
-    mImpl->log("start vkEnumerateInstanceLayerProperties");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
         // WARNING PTR CHECK
-        uint64_t cgen_var_71 = (uint64_t)(uintptr_t)pPropertyCount;
-        countingStream->putBe64(cgen_var_71);
+        *countPtr += 8;
         if (pPropertyCount)
         {
-            countingStream->write((uint32_t*)pPropertyCount, sizeof(uint32_t));
+            *countPtr += sizeof(uint32_t);
         }
         // WARNING PTR CHECK
-        uint64_t cgen_var_72 = (uint64_t)(uintptr_t)pProperties;
-        countingStream->putBe64(cgen_var_72);
+        *countPtr += 8;
         if (pProperties)
         {
-            for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+            if (pPropertyCount)
             {
-                marshal_VkLayerProperties(countingStream, (VkLayerProperties*)(pProperties + i));
+                for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+                {
+                    count_VkLayerProperties(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkLayerProperties*)(pProperties + i), countPtr);
+                }
             }
         }
     }
-    uint32_t packetSize_vkEnumerateInstanceLayerProperties = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkEnumerateInstanceLayerProperties = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkEnumerateInstanceLayerProperties);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkEnumerateInstanceLayerProperties = OP_vkEnumerateInstanceLayerProperties;
-    stream->write(&opcode_vkEnumerateInstanceLayerProperties, sizeof(uint32_t));
-    stream->write(&packetSize_vkEnumerateInstanceLayerProperties, sizeof(uint32_t));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkEnumerateInstanceLayerProperties, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkEnumerateInstanceLayerProperties, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
     // WARNING PTR CHECK
-    uint64_t cgen_var_73 = (uint64_t)(uintptr_t)pPropertyCount;
-    stream->putBe64(cgen_var_73);
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)pPropertyCount;
+    memcpy((*streamPtrPtr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pPropertyCount)
     {
-        stream->write((uint32_t*)pPropertyCount, sizeof(uint32_t));
+        memcpy(*streamPtrPtr, (uint32_t*)pPropertyCount, sizeof(uint32_t));
+        *streamPtrPtr += sizeof(uint32_t);
     }
     // WARNING PTR CHECK
-    uint64_t cgen_var_74 = (uint64_t)(uintptr_t)pProperties;
-    stream->putBe64(cgen_var_74);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)pProperties;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pProperties)
     {
         for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
         {
-            marshal_VkLayerProperties(stream, (VkLayerProperties*)(pProperties + i));
+            reservedmarshal_VkLayerProperties(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkLayerProperties*)(pProperties + i), streamPtrPtr);
         }
     }
-    AEMU_SCOPED_TRACE("vkEnumerateInstanceLayerProperties readParams");
     // WARNING PTR CHECK
     uint32_t* check_pPropertyCount;
     check_pPropertyCount = (uint32_t*)(uintptr_t)stream->getBe64();
@@ -1342,92 +1423,107 @@
         {
             fprintf(stderr, "fatal: pProperties inconsistent between guest and host\n");
         }
-        for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+        if (pPropertyCount)
         {
-            unmarshal_VkLayerProperties(stream, (VkLayerProperties*)(pProperties + i));
+            for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+            {
+                unmarshal_VkLayerProperties(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkLayerProperties*)(pProperties + i));
+            }
         }
     }
-    if (pProperties)
+    if (pPropertyCount)
     {
-        for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+        if (pProperties)
         {
-            transform_fromhost_VkLayerProperties(mImpl->resources(), (VkLayerProperties*)(pProperties + i));
+            for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+            {
+                transform_fromhost_VkLayerProperties(sResourceTracker, (VkLayerProperties*)(pProperties + i));
+            }
         }
     }
-    AEMU_SCOPED_TRACE("vkEnumerateInstanceLayerProperties returnUnmarshal");
     VkResult vkEnumerateInstanceLayerProperties_VkResult_return = (VkResult)0;
     stream->read(&vkEnumerateInstanceLayerProperties_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkEnumerateInstanceLayerProperties");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkEnumerateInstanceLayerProperties_VkResult_return;
 }
 
 VkResult VkEncoder::vkEnumerateDeviceLayerProperties(
     VkPhysicalDevice physicalDevice,
     uint32_t* pPropertyCount,
-    VkLayerProperties* pProperties)
+    VkLayerProperties* pProperties,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkEnumerateDeviceLayerProperties encode");
-    mImpl->log("start vkEnumerateDeviceLayerProperties");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     local_physicalDevice = physicalDevice;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_77;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_77, 1);
-        countingStream->write((uint64_t*)&cgen_var_77, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_78 = (uint64_t)(uintptr_t)pPropertyCount;
-        countingStream->putBe64(cgen_var_78);
+        *countPtr += 8;
         if (pPropertyCount)
         {
-            countingStream->write((uint32_t*)pPropertyCount, sizeof(uint32_t));
+            *countPtr += sizeof(uint32_t);
         }
         // WARNING PTR CHECK
-        uint64_t cgen_var_79 = (uint64_t)(uintptr_t)pProperties;
-        countingStream->putBe64(cgen_var_79);
+        *countPtr += 8;
         if (pProperties)
         {
-            for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+            if (pPropertyCount)
             {
-                marshal_VkLayerProperties(countingStream, (VkLayerProperties*)(pProperties + i));
+                for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+                {
+                    count_VkLayerProperties(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkLayerProperties*)(pProperties + i), countPtr);
+                }
             }
         }
     }
-    uint32_t packetSize_vkEnumerateDeviceLayerProperties = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkEnumerateDeviceLayerProperties = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkEnumerateDeviceLayerProperties);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkEnumerateDeviceLayerProperties = OP_vkEnumerateDeviceLayerProperties;
-    stream->write(&opcode_vkEnumerateDeviceLayerProperties, sizeof(uint32_t));
-    stream->write(&packetSize_vkEnumerateDeviceLayerProperties, sizeof(uint32_t));
-    uint64_t cgen_var_80;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_80, 1);
-    stream->write((uint64_t*)&cgen_var_80, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkEnumerateDeviceLayerProperties, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkEnumerateDeviceLayerProperties, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_81 = (uint64_t)(uintptr_t)pPropertyCount;
-    stream->putBe64(cgen_var_81);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)pPropertyCount;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pPropertyCount)
     {
-        stream->write((uint32_t*)pPropertyCount, sizeof(uint32_t));
+        memcpy(*streamPtrPtr, (uint32_t*)pPropertyCount, sizeof(uint32_t));
+        *streamPtrPtr += sizeof(uint32_t);
     }
     // WARNING PTR CHECK
-    uint64_t cgen_var_82 = (uint64_t)(uintptr_t)pProperties;
-    stream->putBe64(cgen_var_82);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)pProperties;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pProperties)
     {
         for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
         {
-            marshal_VkLayerProperties(stream, (VkLayerProperties*)(pProperties + i));
+            reservedmarshal_VkLayerProperties(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkLayerProperties*)(pProperties + i), streamPtrPtr);
         }
     }
-    AEMU_SCOPED_TRACE("vkEnumerateDeviceLayerProperties readParams");
     // WARNING PTR CHECK
     uint32_t* check_pPropertyCount;
     check_pPropertyCount = (uint32_t*)(uintptr_t)stream->getBe64();
@@ -1448,25 +1544,33 @@
         {
             fprintf(stderr, "fatal: pProperties inconsistent between guest and host\n");
         }
-        for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+        if (pPropertyCount)
         {
-            unmarshal_VkLayerProperties(stream, (VkLayerProperties*)(pProperties + i));
+            for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+            {
+                unmarshal_VkLayerProperties(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkLayerProperties*)(pProperties + i));
+            }
         }
     }
-    if (pProperties)
+    if (pPropertyCount)
     {
-        for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+        if (pProperties)
         {
-            transform_fromhost_VkLayerProperties(mImpl->resources(), (VkLayerProperties*)(pProperties + i));
+            for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+            {
+                transform_fromhost_VkLayerProperties(sResourceTracker, (VkLayerProperties*)(pProperties + i));
+            }
         }
     }
-    AEMU_SCOPED_TRACE("vkEnumerateDeviceLayerProperties returnUnmarshal");
     VkResult vkEnumerateDeviceLayerProperties_VkResult_return = (VkResult)0;
     stream->read(&vkEnumerateDeviceLayerProperties_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkEnumerateDeviceLayerProperties");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkEnumerateDeviceLayerProperties_VkResult_return;
 }
 
@@ -1474,72 +1578,80 @@
     VkDevice device,
     uint32_t queueFamilyIndex,
     uint32_t queueIndex,
-    VkQueue* pQueue)
+    VkQueue* pQueue,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetDeviceQueue encode");
-    mImpl->log("start vkGetDeviceQueue");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     uint32_t local_queueFamilyIndex;
     uint32_t local_queueIndex;
     local_device = device;
     local_queueFamilyIndex = queueFamilyIndex;
     local_queueIndex = queueIndex;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_85;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_85, 1);
-        countingStream->write((uint64_t*)&cgen_var_85, 1 * 8);
-        countingStream->write((uint32_t*)&local_queueFamilyIndex, sizeof(uint32_t));
-        countingStream->write((uint32_t*)&local_queueIndex, sizeof(uint32_t));
-        uint64_t cgen_var_86;
-        countingStream->handleMapping()->mapHandles_VkQueue_u64(pQueue, &cgen_var_86, 1);
-        countingStream->write((uint64_t*)&cgen_var_86, 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
+        uint64_t cgen_var_1;
+        *countPtr += 8;
     }
-    uint32_t packetSize_vkGetDeviceQueue = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetDeviceQueue = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetDeviceQueue);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetDeviceQueue = OP_vkGetDeviceQueue;
-    stream->write(&opcode_vkGetDeviceQueue, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetDeviceQueue, sizeof(uint32_t));
-    uint64_t cgen_var_87;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_87, 1);
-    stream->write((uint64_t*)&cgen_var_87, 1 * 8);
-    stream->write((uint32_t*)&local_queueFamilyIndex, sizeof(uint32_t));
-    stream->write((uint32_t*)&local_queueIndex, sizeof(uint32_t));
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
-    uint64_t cgen_var_88;
-    stream->handleMapping()->mapHandles_VkQueue_u64(pQueue, &cgen_var_88, 1);
-    stream->write((uint64_t*)&cgen_var_88, 8);
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkGetDeviceQueue readParams");
-    stream->setHandleMapping(resources->createMapping());
-    uint64_t cgen_var_89;
-    stream->read((uint64_t*)&cgen_var_89, 8);
-    stream->handleMapping()->mapHandles_u64_VkQueue(&cgen_var_89, (VkQueue*)pQueue, 1);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetDeviceQueue, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetDeviceQueue, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_queueFamilyIndex, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_queueIndex, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    /* is handle, possibly out */;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = (uint64_t)((*pQueue));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    stream->setHandleMapping(sResourceTracker->createMapping());
+    uint64_t cgen_var_2;
+    stream->read((uint64_t*)&cgen_var_2, 8);
+    stream->handleMapping()->mapHandles_u64_VkQueue(&cgen_var_2, (VkQueue*)pQueue, 1);
     stream->unsetHandleMapping();
-    AEMU_SCOPED_TRACE("vkGetDeviceQueue returnUnmarshal");
-    mImpl->log("finish vkGetDeviceQueue");;
+    sResourceTracker->on_vkGetDeviceQueue(this, device, queueFamilyIndex,
+                                          queueIndex, pQueue);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 VkResult VkEncoder::vkQueueSubmit(
     VkQueue queue,
     uint32_t submitCount,
     const VkSubmitInfo* pSubmits,
-    VkFence fence)
+    VkFence fence,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkQueueSubmit encode");
-    mImpl->log("start vkQueueSubmit");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkQueue local_queue;
     uint32_t local_submitCount;
     VkSubmitInfo* local_pSubmits;
@@ -1552,7 +1664,7 @@
         local_pSubmits = (VkSubmitInfo*)pool->alloc(((submitCount)) * sizeof(const VkSubmitInfo));
         for (uint32_t i = 0; i < (uint32_t)((submitCount)); ++i)
         {
-            deepcopy_VkSubmitInfo(pool, pSubmits + i, (VkSubmitInfo*)(local_pSubmits + i));
+            deepcopy_VkSubmitInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pSubmits + i, (VkSubmitInfo*)(local_pSubmits + i));
         }
     }
     local_fence = fence;
@@ -1560,123 +1672,135 @@
     {
         for (uint32_t i = 0; i < (uint32_t)((submitCount)); ++i)
         {
-            transform_tohost_VkSubmitInfo(mImpl->resources(), (VkSubmitInfo*)(local_pSubmits + i));
+            transform_tohost_VkSubmitInfo(sResourceTracker, (VkSubmitInfo*)(local_pSubmits + i));
         }
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_90;
-        countingStream->handleMapping()->mapHandles_VkQueue_u64(&local_queue, &cgen_var_90, 1);
-        countingStream->write((uint64_t*)&cgen_var_90, 1 * 8);
-        countingStream->write((uint32_t*)&local_submitCount, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
         for (uint32_t i = 0; i < (uint32_t)((submitCount)); ++i)
         {
-            marshal_VkSubmitInfo(countingStream, (VkSubmitInfo*)(local_pSubmits + i));
+            count_VkSubmitInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSubmitInfo*)(local_pSubmits + i), countPtr);
         }
-        uint64_t cgen_var_91;
-        countingStream->handleMapping()->mapHandles_VkFence_u64(&local_fence, &cgen_var_91, 1);
-        countingStream->write((uint64_t*)&cgen_var_91, 1 * 8);
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
     }
-    uint32_t packetSize_vkQueueSubmit = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkQueueSubmit = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkQueueSubmit);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkQueueSubmit = OP_vkQueueSubmit;
-    stream->write(&opcode_vkQueueSubmit, sizeof(uint32_t));
-    stream->write(&packetSize_vkQueueSubmit, sizeof(uint32_t));
-    uint64_t cgen_var_92;
-    stream->handleMapping()->mapHandles_VkQueue_u64(&local_queue, &cgen_var_92, 1);
-    stream->write((uint64_t*)&cgen_var_92, 1 * 8);
-    stream->write((uint32_t*)&local_submitCount, sizeof(uint32_t));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkQueueSubmit, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkQueueSubmit, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkQueue((*&local_queue));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_submitCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
     for (uint32_t i = 0; i < (uint32_t)((submitCount)); ++i)
     {
-        marshal_VkSubmitInfo(stream, (VkSubmitInfo*)(local_pSubmits + i));
+        reservedmarshal_VkSubmitInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSubmitInfo*)(local_pSubmits + i), streamPtrPtr);
     }
-    uint64_t cgen_var_93;
-    stream->handleMapping()->mapHandles_VkFence_u64(&local_fence, &cgen_var_93, 1);
-    stream->write((uint64_t*)&cgen_var_93, 1 * 8);
-    AEMU_SCOPED_TRACE("vkQueueSubmit readParams");
-    AEMU_SCOPED_TRACE("vkQueueSubmit returnUnmarshal");
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkFence((*&local_fence));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     VkResult vkQueueSubmit_VkResult_return = (VkResult)0;
     stream->read(&vkQueueSubmit_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkQueueSubmit");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkQueueSubmit_VkResult_return;
 }
 
 VkResult VkEncoder::vkQueueWaitIdle(
-    VkQueue queue)
+    VkQueue queue,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkQueueWaitIdle encode");
-    mImpl->log("start vkQueueWaitIdle");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkQueue local_queue;
     local_queue = queue;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_94;
-        countingStream->handleMapping()->mapHandles_VkQueue_u64(&local_queue, &cgen_var_94, 1);
-        countingStream->write((uint64_t*)&cgen_var_94, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
     }
-    uint32_t packetSize_vkQueueWaitIdle = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkQueueWaitIdle = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkQueueWaitIdle);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkQueueWaitIdle = OP_vkQueueWaitIdle;
-    stream->write(&opcode_vkQueueWaitIdle, sizeof(uint32_t));
-    stream->write(&packetSize_vkQueueWaitIdle, sizeof(uint32_t));
-    uint64_t cgen_var_95;
-    stream->handleMapping()->mapHandles_VkQueue_u64(&local_queue, &cgen_var_95, 1);
-    stream->write((uint64_t*)&cgen_var_95, 1 * 8);
-    AEMU_SCOPED_TRACE("vkQueueWaitIdle readParams");
-    AEMU_SCOPED_TRACE("vkQueueWaitIdle returnUnmarshal");
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkQueueWaitIdle, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkQueueWaitIdle, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkQueue((*&local_queue));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     VkResult vkQueueWaitIdle_VkResult_return = (VkResult)0;
     stream->read(&vkQueueWaitIdle_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkQueueWaitIdle");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkQueueWaitIdle_VkResult_return;
 }
 
 VkResult VkEncoder::vkDeviceWaitIdle(
-    VkDevice device)
+    VkDevice device,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkDeviceWaitIdle encode");
-    mImpl->log("start vkDeviceWaitIdle");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     local_device = device;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_96;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_96, 1);
-        countingStream->write((uint64_t*)&cgen_var_96, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
     }
-    uint32_t packetSize_vkDeviceWaitIdle = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkDeviceWaitIdle = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkDeviceWaitIdle);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkDeviceWaitIdle = OP_vkDeviceWaitIdle;
-    stream->write(&opcode_vkDeviceWaitIdle, sizeof(uint32_t));
-    stream->write(&packetSize_vkDeviceWaitIdle, sizeof(uint32_t));
-    uint64_t cgen_var_97;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_97, 1);
-    stream->write((uint64_t*)&cgen_var_97, 1 * 8);
-    AEMU_SCOPED_TRACE("vkDeviceWaitIdle readParams");
-    AEMU_SCOPED_TRACE("vkDeviceWaitIdle returnUnmarshal");
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkDeviceWaitIdle, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkDeviceWaitIdle, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     VkResult vkDeviceWaitIdle_VkResult_return = (VkResult)0;
     stream->read(&vkDeviceWaitIdle_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkDeviceWaitIdle");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkDeviceWaitIdle_VkResult_return;
 }
 
@@ -1684,16 +1808,14 @@
     VkDevice device,
     const VkMemoryAllocateInfo* pAllocateInfo,
     const VkAllocationCallbacks* pAllocator,
-    VkDeviceMemory* pMemory)
+    VkDeviceMemory* pMemory,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkAllocateMemory encode");
-    mImpl->log("start vkAllocateMemory");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkMemoryAllocateInfo* local_pAllocateInfo;
     VkAllocationCallbacks* local_pAllocator;
@@ -1702,90 +1824,94 @@
     if (pAllocateInfo)
     {
         local_pAllocateInfo = (VkMemoryAllocateInfo*)pool->alloc(sizeof(const VkMemoryAllocateInfo));
-        deepcopy_VkMemoryAllocateInfo(pool, pAllocateInfo, (VkMemoryAllocateInfo*)(local_pAllocateInfo));
+        deepcopy_VkMemoryAllocateInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocateInfo, (VkMemoryAllocateInfo*)(local_pAllocateInfo));
     }
     local_pAllocator = nullptr;
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pAllocateInfo)
     {
-        transform_tohost_VkMemoryAllocateInfo(mImpl->resources(), (VkMemoryAllocateInfo*)(local_pAllocateInfo));
+        transform_tohost_VkMemoryAllocateInfo(sResourceTracker, (VkMemoryAllocateInfo*)(local_pAllocateInfo));
     }
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_98;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_98, 1);
-        countingStream->write((uint64_t*)&cgen_var_98, 1 * 8);
-        marshal_VkMemoryAllocateInfo(countingStream, (VkMemoryAllocateInfo*)(local_pAllocateInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkMemoryAllocateInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMemoryAllocateInfo*)(local_pAllocateInfo), countPtr);
         // WARNING PTR CHECK
-        uint64_t cgen_var_99 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_99);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
-        uint64_t cgen_var_100;
-        countingStream->handleMapping()->mapHandles_VkDeviceMemory_u64(pMemory, &cgen_var_100, 1);
-        countingStream->write((uint64_t*)&cgen_var_100, 8);
+        uint64_t cgen_var_1;
+        *countPtr += 8;
     }
-    uint32_t packetSize_vkAllocateMemory = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkAllocateMemory = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkAllocateMemory);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkAllocateMemory = OP_vkAllocateMemory;
-    stream->write(&opcode_vkAllocateMemory, sizeof(uint32_t));
-    stream->write(&packetSize_vkAllocateMemory, sizeof(uint32_t));
-    uint64_t cgen_var_101;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_101, 1);
-    stream->write((uint64_t*)&cgen_var_101, 1 * 8);
-    marshal_VkMemoryAllocateInfo(stream, (VkMemoryAllocateInfo*)(local_pAllocateInfo));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkAllocateMemory, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkAllocateMemory, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkMemoryAllocateInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMemoryAllocateInfo*)(local_pAllocateInfo), streamPtrPtr);
     // WARNING PTR CHECK
-    uint64_t cgen_var_102 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_102);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
-    uint64_t cgen_var_103;
-    stream->handleMapping()->mapHandles_VkDeviceMemory_u64(pMemory, &cgen_var_103, 1);
-    stream->write((uint64_t*)&cgen_var_103, 8);
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkAllocateMemory readParams");
-    stream->setHandleMapping(resources->createMapping());
-    uint64_t cgen_var_104;
-    stream->read((uint64_t*)&cgen_var_104, 8);
-    stream->handleMapping()->mapHandles_u64_VkDeviceMemory(&cgen_var_104, (VkDeviceMemory*)pMemory, 1);
+    /* is handle, possibly out */;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = (uint64_t)((*pMemory));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    stream->setHandleMapping(sResourceTracker->createMapping());
+    uint64_t cgen_var_3;
+    stream->read((uint64_t*)&cgen_var_3, 8);
+    stream->handleMapping()->mapHandles_u64_VkDeviceMemory(&cgen_var_3, (VkDeviceMemory*)pMemory, 1);
     stream->unsetHandleMapping();
-    AEMU_SCOPED_TRACE("vkAllocateMemory returnUnmarshal");
     VkResult vkAllocateMemory_VkResult_return = (VkResult)0;
     stream->read(&vkAllocateMemory_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkAllocateMemory");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkAllocateMemory_VkResult_return;
 }
 
 void VkEncoder::vkFreeMemory(
     VkDevice device,
     VkDeviceMemory memory,
-    const VkAllocationCallbacks* pAllocator)
+    const VkAllocationCallbacks* pAllocator,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkFreeMemory encode");
-    mImpl->log("start vkFreeMemory");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkDeviceMemory local_memory;
     VkAllocationCallbacks* local_pAllocator;
@@ -1795,48 +1921,58 @@
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
-    mImpl->resources()->deviceMemoryTransform_tohost((VkDeviceMemory*)&local_memory, 1, (VkDeviceSize*)nullptr, 0, (VkDeviceSize*)nullptr, 0, (uint32_t*)nullptr, 0, (uint32_t*)nullptr, 0);
-    countingStream->rewind();
+    sResourceTracker->deviceMemoryTransform_tohost((VkDeviceMemory*)&local_memory, 1, (VkDeviceSize*)nullptr, 0, (VkDeviceSize*)nullptr, 0, (uint32_t*)nullptr, 0, (uint32_t*)nullptr, 0);
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_105;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_105, 1);
-        countingStream->write((uint64_t*)&cgen_var_105, 1 * 8);
-        uint64_t cgen_var_106;
-        countingStream->handleMapping()->mapHandles_VkDeviceMemory_u64(&local_memory, &cgen_var_106, 1);
-        countingStream->write((uint64_t*)&cgen_var_106, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_107 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_107);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
     }
-    uint32_t packetSize_vkFreeMemory = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkFreeMemory = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkFreeMemory);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkFreeMemory = OP_vkFreeMemory;
-    stream->write(&opcode_vkFreeMemory, sizeof(uint32_t));
-    stream->write(&packetSize_vkFreeMemory, sizeof(uint32_t));
-    uint64_t cgen_var_108;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_108, 1);
-    stream->write((uint64_t*)&cgen_var_108, 1 * 8);
-    uint64_t cgen_var_109;
-    stream->handleMapping()->mapHandles_VkDeviceMemory_u64(&local_memory, &cgen_var_109, 1);
-    stream->write((uint64_t*)&cgen_var_109, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkFreeMemory, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkFreeMemory, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkDeviceMemory((*&local_memory));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_110 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_110);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    AEMU_SCOPED_TRACE("vkFreeMemory readParams");
-    AEMU_SCOPED_TRACE("vkFreeMemory returnUnmarshal");
-    resources->destroyMapping()->mapHandles_VkDeviceMemory((VkDeviceMemory*)&memory);
-    mImpl->log("finish vkFreeMemory");;
+    sResourceTracker->destroyMapping()->mapHandles_VkDeviceMemory((VkDeviceMemory*)&memory);
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 VkResult VkEncoder::vkMapMemory(
@@ -1845,37 +1981,36 @@
     VkDeviceSize offset,
     VkDeviceSize size,
     VkMemoryMapFlags flags,
-    void** ppData)
+    void** ppData,
+    uint32_t doLock)
 {
-    AEMU_SCOPED_TRACE("vkMapMemory resourceEvent");
+    (void)doLock;
     VkResult vkMapMemory_VkResult_return = (VkResult)0;
-    vkMapMemory_VkResult_return = mImpl->resources()->on_vkMapMemory(this, VK_SUCCESS, device, memory, offset, size, flags, ppData);
-    mImpl->log("finish vkMapMemory");;
+    vkMapMemory_VkResult_return = sResourceTracker->on_vkMapMemory(this, VK_SUCCESS, device, memory, offset, size, flags, ppData);
     return vkMapMemory_VkResult_return;
 }
 
 void VkEncoder::vkUnmapMemory(
     VkDevice device,
-    VkDeviceMemory memory)
+    VkDeviceMemory memory,
+    uint32_t doLock)
 {
-    AEMU_SCOPED_TRACE("vkUnmapMemory resourceEvent");
-    mImpl->resources()->on_vkUnmapMemory(this, device, memory);
+    (void)doLock;
+    sResourceTracker->on_vkUnmapMemory(this, device, memory);
 }
 
 VkResult VkEncoder::vkFlushMappedMemoryRanges(
     VkDevice device,
     uint32_t memoryRangeCount,
-    const VkMappedMemoryRange* pMemoryRanges)
+    const VkMappedMemoryRange* pMemoryRanges,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkFlushMappedMemoryRanges encode");
-    mImpl->log("start vkFlushMappedMemoryRanges");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     VALIDATE_RET(VkResult, VK_SUCCESS, mImpl->validation()->on_vkFlushMappedMemoryRanges(this, VK_SUCCESS, device, memoryRangeCount, pMemoryRanges));
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     uint32_t local_memoryRangeCount;
     VkMappedMemoryRange* local_pMemoryRanges;
@@ -1887,60 +2022,46 @@
         local_pMemoryRanges = (VkMappedMemoryRange*)pool->alloc(((memoryRangeCount)) * sizeof(const VkMappedMemoryRange));
         for (uint32_t i = 0; i < (uint32_t)((memoryRangeCount)); ++i)
         {
-            deepcopy_VkMappedMemoryRange(pool, pMemoryRanges + i, (VkMappedMemoryRange*)(local_pMemoryRanges + i));
+            deepcopy_VkMappedMemoryRange(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pMemoryRanges + i, (VkMappedMemoryRange*)(local_pMemoryRanges + i));
         }
     }
     if (local_pMemoryRanges)
     {
         for (uint32_t i = 0; i < (uint32_t)((memoryRangeCount)); ++i)
         {
-            transform_tohost_VkMappedMemoryRange(mImpl->resources(), (VkMappedMemoryRange*)(local_pMemoryRanges + i));
+            transform_tohost_VkMappedMemoryRange(sResourceTracker, (VkMappedMemoryRange*)(local_pMemoryRanges + i));
         }
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_111;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_111, 1);
-        countingStream->write((uint64_t*)&cgen_var_111, 1 * 8);
-        countingStream->write((uint32_t*)&local_memoryRangeCount, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
         for (uint32_t i = 0; i < (uint32_t)((memoryRangeCount)); ++i)
         {
-            marshal_VkMappedMemoryRange(countingStream, (VkMappedMemoryRange*)(local_pMemoryRanges + i));
+            count_VkMappedMemoryRange(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMappedMemoryRange*)(local_pMemoryRanges + i), countPtr);
         }
     }
-    if (!resources->usingDirectMapping())
-    {
-        for (uint32_t i = 0; i < memoryRangeCount; ++i)
-        {
-            auto range = pMemoryRanges[i];
-            auto memory = pMemoryRanges[i].memory;
-            auto size = pMemoryRanges[i].size;
-            auto offset = pMemoryRanges[i].offset;
-            uint64_t streamSize = 0;
-            if (!memory) { countingStream->write(&streamSize, sizeof(uint64_t)); continue; };
-            auto hostPtr = resources->getMappedPointer(memory);
-            auto actualSize = size == VK_WHOLE_SIZE ? resources->getMappedSize(memory) : size;
-            if (!hostPtr) { countingStream->write(&streamSize, sizeof(uint64_t)); continue; };
-            streamSize = actualSize;
-            countingStream->write(&streamSize, sizeof(uint64_t));
-            uint8_t* targetRange = hostPtr + offset;
-            countingStream->write(targetRange, actualSize);
-        }
-    }
-    uint32_t packetSize_vkFlushMappedMemoryRanges = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkFlushMappedMemoryRanges = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkFlushMappedMemoryRanges);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkFlushMappedMemoryRanges = OP_vkFlushMappedMemoryRanges;
-    stream->write(&opcode_vkFlushMappedMemoryRanges, sizeof(uint32_t));
-    stream->write(&packetSize_vkFlushMappedMemoryRanges, sizeof(uint32_t));
-    uint64_t cgen_var_112;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_112, 1);
-    stream->write((uint64_t*)&cgen_var_112, 1 * 8);
-    stream->write((uint32_t*)&local_memoryRangeCount, sizeof(uint32_t));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkFlushMappedMemoryRanges, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkFlushMappedMemoryRanges, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_memoryRangeCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
     for (uint32_t i = 0; i < (uint32_t)((memoryRangeCount)); ++i)
     {
-        marshal_VkMappedMemoryRange(stream, (VkMappedMemoryRange*)(local_pMemoryRanges + i));
+        reservedmarshal_VkMappedMemoryRange(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMappedMemoryRange*)(local_pMemoryRanges + i), streamPtrPtr);
     }
-    if (!resources->usingDirectMapping())
+    if (!sResourceTracker->usingDirectMapping())
     {
         for (uint32_t i = 0; i < memoryRangeCount; ++i)
         {
@@ -1950,8 +2071,8 @@
             auto offset = pMemoryRanges[i].offset;
             uint64_t streamSize = 0;
             if (!memory) { stream->write(&streamSize, sizeof(uint64_t)); continue; };
-            auto hostPtr = resources->getMappedPointer(memory);
-            auto actualSize = size == VK_WHOLE_SIZE ? resources->getMappedSize(memory) : size;
+            auto hostPtr = sResourceTracker->getMappedPointer(memory);
+            auto actualSize = size == VK_WHOLE_SIZE ? sResourceTracker->getMappedSize(memory) : size;
             if (!hostPtr) { stream->write(&streamSize, sizeof(uint64_t)); continue; };
             streamSize = actualSize;
             stream->write(&streamSize, sizeof(uint64_t));
@@ -1959,31 +2080,30 @@
             stream->write(targetRange, actualSize);
         }
     }
-    AEMU_SCOPED_TRACE("vkFlushMappedMemoryRanges readParams");
-    AEMU_SCOPED_TRACE("vkFlushMappedMemoryRanges returnUnmarshal");
     VkResult vkFlushMappedMemoryRanges_VkResult_return = (VkResult)0;
     stream->read(&vkFlushMappedMemoryRanges_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkFlushMappedMemoryRanges");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkFlushMappedMemoryRanges_VkResult_return;
 }
 
 VkResult VkEncoder::vkInvalidateMappedMemoryRanges(
     VkDevice device,
     uint32_t memoryRangeCount,
-    const VkMappedMemoryRange* pMemoryRanges)
+    const VkMappedMemoryRange* pMemoryRanges,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkInvalidateMappedMemoryRanges encode");
-    mImpl->log("start vkInvalidateMappedMemoryRanges");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     VALIDATE_RET(VkResult, VK_SUCCESS, mImpl->validation()->on_vkInvalidateMappedMemoryRanges(this, VK_SUCCESS, device, memoryRangeCount, pMemoryRanges));
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     uint32_t local_memoryRangeCount;
     VkMappedMemoryRange* local_pMemoryRanges;
@@ -1995,48 +2115,48 @@
         local_pMemoryRanges = (VkMappedMemoryRange*)pool->alloc(((memoryRangeCount)) * sizeof(const VkMappedMemoryRange));
         for (uint32_t i = 0; i < (uint32_t)((memoryRangeCount)); ++i)
         {
-            deepcopy_VkMappedMemoryRange(pool, pMemoryRanges + i, (VkMappedMemoryRange*)(local_pMemoryRanges + i));
+            deepcopy_VkMappedMemoryRange(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pMemoryRanges + i, (VkMappedMemoryRange*)(local_pMemoryRanges + i));
         }
     }
     if (local_pMemoryRanges)
     {
         for (uint32_t i = 0; i < (uint32_t)((memoryRangeCount)); ++i)
         {
-            transform_tohost_VkMappedMemoryRange(mImpl->resources(), (VkMappedMemoryRange*)(local_pMemoryRanges + i));
+            transform_tohost_VkMappedMemoryRange(sResourceTracker, (VkMappedMemoryRange*)(local_pMemoryRanges + i));
         }
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_113;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_113, 1);
-        countingStream->write((uint64_t*)&cgen_var_113, 1 * 8);
-        countingStream->write((uint32_t*)&local_memoryRangeCount, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
         for (uint32_t i = 0; i < (uint32_t)((memoryRangeCount)); ++i)
         {
-            marshal_VkMappedMemoryRange(countingStream, (VkMappedMemoryRange*)(local_pMemoryRanges + i));
+            count_VkMappedMemoryRange(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMappedMemoryRange*)(local_pMemoryRanges + i), countPtr);
         }
     }
-    uint32_t packetSize_vkInvalidateMappedMemoryRanges = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkInvalidateMappedMemoryRanges = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkInvalidateMappedMemoryRanges);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkInvalidateMappedMemoryRanges = OP_vkInvalidateMappedMemoryRanges;
-    stream->write(&opcode_vkInvalidateMappedMemoryRanges, sizeof(uint32_t));
-    stream->write(&packetSize_vkInvalidateMappedMemoryRanges, sizeof(uint32_t));
-    uint64_t cgen_var_114;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_114, 1);
-    stream->write((uint64_t*)&cgen_var_114, 1 * 8);
-    stream->write((uint32_t*)&local_memoryRangeCount, sizeof(uint32_t));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkInvalidateMappedMemoryRanges, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkInvalidateMappedMemoryRanges, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_memoryRangeCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
     for (uint32_t i = 0; i < (uint32_t)((memoryRangeCount)); ++i)
     {
-        marshal_VkMappedMemoryRange(stream, (VkMappedMemoryRange*)(local_pMemoryRanges + i));
+        reservedmarshal_VkMappedMemoryRange(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMappedMemoryRange*)(local_pMemoryRanges + i), streamPtrPtr);
     }
-    AEMU_SCOPED_TRACE("vkInvalidateMappedMemoryRanges readParams");
-    AEMU_SCOPED_TRACE("vkInvalidateMappedMemoryRanges returnUnmarshal");
     VkResult vkInvalidateMappedMemoryRanges_VkResult_return = (VkResult)0;
     stream->read(&vkInvalidateMappedMemoryRanges_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    if (!resources->usingDirectMapping())
+    if (!sResourceTracker->usingDirectMapping())
     {
         for (uint32_t i = 0; i < memoryRangeCount; ++i)
         {
@@ -2046,8 +2166,8 @@
             auto offset = pMemoryRanges[i].offset;
             uint64_t streamSize = 0;
             if (!memory) { stream->read(&streamSize, sizeof(uint64_t)); continue; };
-            auto hostPtr = resources->getMappedPointer(memory);
-            auto actualSize = size == VK_WHOLE_SIZE ? resources->getMappedSize(memory) : size;
+            auto hostPtr = sResourceTracker->getMappedPointer(memory);
+            auto actualSize = size == VK_WHOLE_SIZE ? sResourceTracker->getMappedSize(memory) : size;
             if (!hostPtr) { stream->read(&streamSize, sizeof(uint64_t)); continue; };
             streamSize = actualSize;
             stream->read(&streamSize, sizeof(uint64_t));
@@ -2055,70 +2175,81 @@
             stream->read(targetRange, actualSize);
         }
     }
-    mImpl->log("finish vkInvalidateMappedMemoryRanges");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkInvalidateMappedMemoryRanges_VkResult_return;
 }
 
 void VkEncoder::vkGetDeviceMemoryCommitment(
     VkDevice device,
     VkDeviceMemory memory,
-    VkDeviceSize* pCommittedMemoryInBytes)
+    VkDeviceSize* pCommittedMemoryInBytes,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetDeviceMemoryCommitment encode");
-    mImpl->log("start vkGetDeviceMemoryCommitment");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkDeviceMemory local_memory;
     local_device = device;
     local_memory = memory;
-    mImpl->resources()->deviceMemoryTransform_tohost((VkDeviceMemory*)&local_memory, 1, (VkDeviceSize*)nullptr, 0, (VkDeviceSize*)nullptr, 0, (uint32_t*)nullptr, 0, (uint32_t*)nullptr, 0);
-    countingStream->rewind();
+    sResourceTracker->deviceMemoryTransform_tohost((VkDeviceMemory*)&local_memory, 1, (VkDeviceSize*)nullptr, 0, (VkDeviceSize*)nullptr, 0, (uint32_t*)nullptr, 0, (uint32_t*)nullptr, 0);
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_115;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_115, 1);
-        countingStream->write((uint64_t*)&cgen_var_115, 1 * 8);
-        uint64_t cgen_var_116;
-        countingStream->handleMapping()->mapHandles_VkDeviceMemory_u64(&local_memory, &cgen_var_116, 1);
-        countingStream->write((uint64_t*)&cgen_var_116, 1 * 8);
-        countingStream->write((VkDeviceSize*)pCommittedMemoryInBytes, sizeof(VkDeviceSize));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkDeviceSize);
     }
-    uint32_t packetSize_vkGetDeviceMemoryCommitment = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetDeviceMemoryCommitment = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetDeviceMemoryCommitment);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetDeviceMemoryCommitment = OP_vkGetDeviceMemoryCommitment;
-    stream->write(&opcode_vkGetDeviceMemoryCommitment, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetDeviceMemoryCommitment, sizeof(uint32_t));
-    uint64_t cgen_var_117;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_117, 1);
-    stream->write((uint64_t*)&cgen_var_117, 1 * 8);
-    uint64_t cgen_var_118;
-    stream->handleMapping()->mapHandles_VkDeviceMemory_u64(&local_memory, &cgen_var_118, 1);
-    stream->write((uint64_t*)&cgen_var_118, 1 * 8);
-    stream->write((VkDeviceSize*)pCommittedMemoryInBytes, sizeof(VkDeviceSize));
-    AEMU_SCOPED_TRACE("vkGetDeviceMemoryCommitment readParams");
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetDeviceMemoryCommitment, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetDeviceMemoryCommitment, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkDeviceMemory((*&local_memory));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkDeviceSize*)pCommittedMemoryInBytes, sizeof(VkDeviceSize));
+    *streamPtrPtr += sizeof(VkDeviceSize);
     stream->read((VkDeviceSize*)pCommittedMemoryInBytes, sizeof(VkDeviceSize));
-    AEMU_SCOPED_TRACE("vkGetDeviceMemoryCommitment returnUnmarshal");
-    mImpl->log("finish vkGetDeviceMemoryCommitment");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 VkResult VkEncoder::vkBindBufferMemory(
     VkDevice device,
     VkBuffer buffer,
     VkDeviceMemory memory,
-    VkDeviceSize memoryOffset)
+    VkDeviceSize memoryOffset,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkBindBufferMemory encode");
-    mImpl->log("start vkBindBufferMemory");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkBuffer local_buffer;
     VkDeviceMemory local_memory;
@@ -2127,43 +2258,49 @@
     local_buffer = buffer;
     local_memory = memory;
     local_memoryOffset = memoryOffset;
-    mImpl->resources()->deviceMemoryTransform_tohost((VkDeviceMemory*)&local_memory, 1, (VkDeviceSize*)&local_memoryOffset, 1, (VkDeviceSize*)nullptr, 0, (uint32_t*)nullptr, 0, (uint32_t*)nullptr, 0);
-    countingStream->rewind();
+    sResourceTracker->deviceMemoryTransform_tohost((VkDeviceMemory*)&local_memory, 1, (VkDeviceSize*)&local_memoryOffset, 1, (VkDeviceSize*)nullptr, 0, (uint32_t*)nullptr, 0, (uint32_t*)nullptr, 0);
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_119;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_119, 1);
-        countingStream->write((uint64_t*)&cgen_var_119, 1 * 8);
-        uint64_t cgen_var_120;
-        countingStream->handleMapping()->mapHandles_VkBuffer_u64(&local_buffer, &cgen_var_120, 1);
-        countingStream->write((uint64_t*)&cgen_var_120, 1 * 8);
-        uint64_t cgen_var_121;
-        countingStream->handleMapping()->mapHandles_VkDeviceMemory_u64(&local_memory, &cgen_var_121, 1);
-        countingStream->write((uint64_t*)&cgen_var_121, 1 * 8);
-        countingStream->write((VkDeviceSize*)&local_memoryOffset, sizeof(VkDeviceSize));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_2;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkDeviceSize);
     }
-    uint32_t packetSize_vkBindBufferMemory = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkBindBufferMemory = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkBindBufferMemory);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkBindBufferMemory = OP_vkBindBufferMemory;
-    stream->write(&opcode_vkBindBufferMemory, sizeof(uint32_t));
-    stream->write(&packetSize_vkBindBufferMemory, sizeof(uint32_t));
-    uint64_t cgen_var_122;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_122, 1);
-    stream->write((uint64_t*)&cgen_var_122, 1 * 8);
-    uint64_t cgen_var_123;
-    stream->handleMapping()->mapHandles_VkBuffer_u64(&local_buffer, &cgen_var_123, 1);
-    stream->write((uint64_t*)&cgen_var_123, 1 * 8);
-    uint64_t cgen_var_124;
-    stream->handleMapping()->mapHandles_VkDeviceMemory_u64(&local_memory, &cgen_var_124, 1);
-    stream->write((uint64_t*)&cgen_var_124, 1 * 8);
-    stream->write((VkDeviceSize*)&local_memoryOffset, sizeof(VkDeviceSize));
-    AEMU_SCOPED_TRACE("vkBindBufferMemory readParams");
-    AEMU_SCOPED_TRACE("vkBindBufferMemory returnUnmarshal");
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkBindBufferMemory, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkBindBufferMemory, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkBuffer((*&local_buffer));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = get_host_u64_VkDeviceMemory((*&local_memory));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkDeviceSize*)&local_memoryOffset, sizeof(VkDeviceSize));
+    *streamPtrPtr += sizeof(VkDeviceSize);
     VkResult vkBindBufferMemory_VkResult_return = (VkResult)0;
     stream->read(&vkBindBufferMemory_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkBindBufferMemory");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkBindBufferMemory_VkResult_return;
 }
 
@@ -2171,16 +2308,14 @@
     VkDevice device,
     VkImage image,
     VkDeviceMemory memory,
-    VkDeviceSize memoryOffset)
+    VkDeviceSize memoryOffset,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkBindImageMemory encode");
-    mImpl->log("start vkBindImageMemory");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkImage local_image;
     VkDeviceMemory local_memory;
@@ -2189,217 +2324,242 @@
     local_image = image;
     local_memory = memory;
     local_memoryOffset = memoryOffset;
-    mImpl->resources()->deviceMemoryTransform_tohost((VkDeviceMemory*)&local_memory, 1, (VkDeviceSize*)&local_memoryOffset, 1, (VkDeviceSize*)nullptr, 0, (uint32_t*)nullptr, 0, (uint32_t*)nullptr, 0);
-    countingStream->rewind();
+    sResourceTracker->deviceMemoryTransform_tohost((VkDeviceMemory*)&local_memory, 1, (VkDeviceSize*)&local_memoryOffset, 1, (VkDeviceSize*)nullptr, 0, (uint32_t*)nullptr, 0, (uint32_t*)nullptr, 0);
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_125;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_125, 1);
-        countingStream->write((uint64_t*)&cgen_var_125, 1 * 8);
-        uint64_t cgen_var_126;
-        countingStream->handleMapping()->mapHandles_VkImage_u64(&local_image, &cgen_var_126, 1);
-        countingStream->write((uint64_t*)&cgen_var_126, 1 * 8);
-        uint64_t cgen_var_127;
-        countingStream->handleMapping()->mapHandles_VkDeviceMemory_u64(&local_memory, &cgen_var_127, 1);
-        countingStream->write((uint64_t*)&cgen_var_127, 1 * 8);
-        countingStream->write((VkDeviceSize*)&local_memoryOffset, sizeof(VkDeviceSize));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_2;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkDeviceSize);
     }
-    uint32_t packetSize_vkBindImageMemory = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkBindImageMemory = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkBindImageMemory);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkBindImageMemory = OP_vkBindImageMemory;
-    stream->write(&opcode_vkBindImageMemory, sizeof(uint32_t));
-    stream->write(&packetSize_vkBindImageMemory, sizeof(uint32_t));
-    uint64_t cgen_var_128;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_128, 1);
-    stream->write((uint64_t*)&cgen_var_128, 1 * 8);
-    uint64_t cgen_var_129;
-    stream->handleMapping()->mapHandles_VkImage_u64(&local_image, &cgen_var_129, 1);
-    stream->write((uint64_t*)&cgen_var_129, 1 * 8);
-    uint64_t cgen_var_130;
-    stream->handleMapping()->mapHandles_VkDeviceMemory_u64(&local_memory, &cgen_var_130, 1);
-    stream->write((uint64_t*)&cgen_var_130, 1 * 8);
-    stream->write((VkDeviceSize*)&local_memoryOffset, sizeof(VkDeviceSize));
-    AEMU_SCOPED_TRACE("vkBindImageMemory readParams");
-    AEMU_SCOPED_TRACE("vkBindImageMemory returnUnmarshal");
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkBindImageMemory, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkBindImageMemory, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkImage((*&local_image));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = get_host_u64_VkDeviceMemory((*&local_memory));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkDeviceSize*)&local_memoryOffset, sizeof(VkDeviceSize));
+    *streamPtrPtr += sizeof(VkDeviceSize);
     VkResult vkBindImageMemory_VkResult_return = (VkResult)0;
     stream->read(&vkBindImageMemory_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkBindImageMemory");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkBindImageMemory_VkResult_return;
 }
 
 void VkEncoder::vkGetBufferMemoryRequirements(
     VkDevice device,
     VkBuffer buffer,
-    VkMemoryRequirements* pMemoryRequirements)
+    VkMemoryRequirements* pMemoryRequirements,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetBufferMemoryRequirements encode");
-    mImpl->log("start vkGetBufferMemoryRequirements");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkBuffer local_buffer;
     local_device = device;
     local_buffer = buffer;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_131;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_131, 1);
-        countingStream->write((uint64_t*)&cgen_var_131, 1 * 8);
-        uint64_t cgen_var_132;
-        countingStream->handleMapping()->mapHandles_VkBuffer_u64(&local_buffer, &cgen_var_132, 1);
-        countingStream->write((uint64_t*)&cgen_var_132, 1 * 8);
-        marshal_VkMemoryRequirements(countingStream, (VkMemoryRequirements*)(pMemoryRequirements));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        count_VkMemoryRequirements(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMemoryRequirements*)(pMemoryRequirements), countPtr);
     }
-    uint32_t packetSize_vkGetBufferMemoryRequirements = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetBufferMemoryRequirements = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetBufferMemoryRequirements);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetBufferMemoryRequirements = OP_vkGetBufferMemoryRequirements;
-    stream->write(&opcode_vkGetBufferMemoryRequirements, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetBufferMemoryRequirements, sizeof(uint32_t));
-    uint64_t cgen_var_133;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_133, 1);
-    stream->write((uint64_t*)&cgen_var_133, 1 * 8);
-    uint64_t cgen_var_134;
-    stream->handleMapping()->mapHandles_VkBuffer_u64(&local_buffer, &cgen_var_134, 1);
-    stream->write((uint64_t*)&cgen_var_134, 1 * 8);
-    marshal_VkMemoryRequirements(stream, (VkMemoryRequirements*)(pMemoryRequirements));
-    AEMU_SCOPED_TRACE("vkGetBufferMemoryRequirements readParams");
-    unmarshal_VkMemoryRequirements(stream, (VkMemoryRequirements*)(pMemoryRequirements));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetBufferMemoryRequirements, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetBufferMemoryRequirements, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkBuffer((*&local_buffer));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkMemoryRequirements(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMemoryRequirements*)(pMemoryRequirements), streamPtrPtr);
+    unmarshal_VkMemoryRequirements(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMemoryRequirements*)(pMemoryRequirements));
     if (pMemoryRequirements)
     {
-        transform_fromhost_VkMemoryRequirements(mImpl->resources(), (VkMemoryRequirements*)(pMemoryRequirements));
+        transform_fromhost_VkMemoryRequirements(sResourceTracker, (VkMemoryRequirements*)(pMemoryRequirements));
     }
-    AEMU_SCOPED_TRACE("vkGetBufferMemoryRequirements returnUnmarshal");
-    mImpl->log("finish vkGetBufferMemoryRequirements");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkGetImageMemoryRequirements(
     VkDevice device,
     VkImage image,
-    VkMemoryRequirements* pMemoryRequirements)
+    VkMemoryRequirements* pMemoryRequirements,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetImageMemoryRequirements encode");
-    mImpl->log("start vkGetImageMemoryRequirements");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkImage local_image;
     local_device = device;
     local_image = image;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_135;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_135, 1);
-        countingStream->write((uint64_t*)&cgen_var_135, 1 * 8);
-        uint64_t cgen_var_136;
-        countingStream->handleMapping()->mapHandles_VkImage_u64(&local_image, &cgen_var_136, 1);
-        countingStream->write((uint64_t*)&cgen_var_136, 1 * 8);
-        marshal_VkMemoryRequirements(countingStream, (VkMemoryRequirements*)(pMemoryRequirements));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        count_VkMemoryRequirements(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMemoryRequirements*)(pMemoryRequirements), countPtr);
     }
-    uint32_t packetSize_vkGetImageMemoryRequirements = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetImageMemoryRequirements = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetImageMemoryRequirements);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetImageMemoryRequirements = OP_vkGetImageMemoryRequirements;
-    stream->write(&opcode_vkGetImageMemoryRequirements, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetImageMemoryRequirements, sizeof(uint32_t));
-    uint64_t cgen_var_137;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_137, 1);
-    stream->write((uint64_t*)&cgen_var_137, 1 * 8);
-    uint64_t cgen_var_138;
-    stream->handleMapping()->mapHandles_VkImage_u64(&local_image, &cgen_var_138, 1);
-    stream->write((uint64_t*)&cgen_var_138, 1 * 8);
-    marshal_VkMemoryRequirements(stream, (VkMemoryRequirements*)(pMemoryRequirements));
-    AEMU_SCOPED_TRACE("vkGetImageMemoryRequirements readParams");
-    unmarshal_VkMemoryRequirements(stream, (VkMemoryRequirements*)(pMemoryRequirements));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetImageMemoryRequirements, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetImageMemoryRequirements, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkImage((*&local_image));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkMemoryRequirements(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMemoryRequirements*)(pMemoryRequirements), streamPtrPtr);
+    unmarshal_VkMemoryRequirements(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMemoryRequirements*)(pMemoryRequirements));
     if (pMemoryRequirements)
     {
-        transform_fromhost_VkMemoryRequirements(mImpl->resources(), (VkMemoryRequirements*)(pMemoryRequirements));
+        transform_fromhost_VkMemoryRequirements(sResourceTracker, (VkMemoryRequirements*)(pMemoryRequirements));
     }
-    AEMU_SCOPED_TRACE("vkGetImageMemoryRequirements returnUnmarshal");
-    mImpl->log("finish vkGetImageMemoryRequirements");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkGetImageSparseMemoryRequirements(
     VkDevice device,
     VkImage image,
     uint32_t* pSparseMemoryRequirementCount,
-    VkSparseImageMemoryRequirements* pSparseMemoryRequirements)
+    VkSparseImageMemoryRequirements* pSparseMemoryRequirements,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetImageSparseMemoryRequirements encode");
-    mImpl->log("start vkGetImageSparseMemoryRequirements");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkImage local_image;
     local_device = device;
     local_image = image;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_139;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_139, 1);
-        countingStream->write((uint64_t*)&cgen_var_139, 1 * 8);
-        uint64_t cgen_var_140;
-        countingStream->handleMapping()->mapHandles_VkImage_u64(&local_image, &cgen_var_140, 1);
-        countingStream->write((uint64_t*)&cgen_var_140, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_141 = (uint64_t)(uintptr_t)pSparseMemoryRequirementCount;
-        countingStream->putBe64(cgen_var_141);
+        *countPtr += 8;
         if (pSparseMemoryRequirementCount)
         {
-            countingStream->write((uint32_t*)pSparseMemoryRequirementCount, sizeof(uint32_t));
+            *countPtr += sizeof(uint32_t);
         }
         // WARNING PTR CHECK
-        uint64_t cgen_var_142 = (uint64_t)(uintptr_t)pSparseMemoryRequirements;
-        countingStream->putBe64(cgen_var_142);
+        *countPtr += 8;
         if (pSparseMemoryRequirements)
         {
-            for (uint32_t i = 0; i < (uint32_t)(*(pSparseMemoryRequirementCount)); ++i)
+            if (pSparseMemoryRequirementCount)
             {
-                marshal_VkSparseImageMemoryRequirements(countingStream, (VkSparseImageMemoryRequirements*)(pSparseMemoryRequirements + i));
+                for (uint32_t i = 0; i < (uint32_t)(*(pSparseMemoryRequirementCount)); ++i)
+                {
+                    count_VkSparseImageMemoryRequirements(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSparseImageMemoryRequirements*)(pSparseMemoryRequirements + i), countPtr);
+                }
             }
         }
     }
-    uint32_t packetSize_vkGetImageSparseMemoryRequirements = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetImageSparseMemoryRequirements = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetImageSparseMemoryRequirements);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetImageSparseMemoryRequirements = OP_vkGetImageSparseMemoryRequirements;
-    stream->write(&opcode_vkGetImageSparseMemoryRequirements, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetImageSparseMemoryRequirements, sizeof(uint32_t));
-    uint64_t cgen_var_143;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_143, 1);
-    stream->write((uint64_t*)&cgen_var_143, 1 * 8);
-    uint64_t cgen_var_144;
-    stream->handleMapping()->mapHandles_VkImage_u64(&local_image, &cgen_var_144, 1);
-    stream->write((uint64_t*)&cgen_var_144, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetImageSparseMemoryRequirements, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetImageSparseMemoryRequirements, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkImage((*&local_image));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_145 = (uint64_t)(uintptr_t)pSparseMemoryRequirementCount;
-    stream->putBe64(cgen_var_145);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)pSparseMemoryRequirementCount;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pSparseMemoryRequirementCount)
     {
-        stream->write((uint32_t*)pSparseMemoryRequirementCount, sizeof(uint32_t));
+        memcpy(*streamPtrPtr, (uint32_t*)pSparseMemoryRequirementCount, sizeof(uint32_t));
+        *streamPtrPtr += sizeof(uint32_t);
     }
     // WARNING PTR CHECK
-    uint64_t cgen_var_146 = (uint64_t)(uintptr_t)pSparseMemoryRequirements;
-    stream->putBe64(cgen_var_146);
+    uint64_t cgen_var_3 = (uint64_t)(uintptr_t)pSparseMemoryRequirements;
+    memcpy((*streamPtrPtr), &cgen_var_3, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pSparseMemoryRequirements)
     {
         for (uint32_t i = 0; i < (uint32_t)(*(pSparseMemoryRequirementCount)); ++i)
         {
-            marshal_VkSparseImageMemoryRequirements(stream, (VkSparseImageMemoryRequirements*)(pSparseMemoryRequirements + i));
+            reservedmarshal_VkSparseImageMemoryRequirements(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSparseImageMemoryRequirements*)(pSparseMemoryRequirements + i), streamPtrPtr);
         }
     }
-    AEMU_SCOPED_TRACE("vkGetImageSparseMemoryRequirements readParams");
     // WARNING PTR CHECK
     uint32_t* check_pSparseMemoryRequirementCount;
     check_pSparseMemoryRequirementCount = (uint32_t*)(uintptr_t)stream->getBe64();
@@ -2420,20 +2580,31 @@
         {
             fprintf(stderr, "fatal: pSparseMemoryRequirements inconsistent between guest and host\n");
         }
-        for (uint32_t i = 0; i < (uint32_t)(*(pSparseMemoryRequirementCount)); ++i)
+        if (pSparseMemoryRequirementCount)
         {
-            unmarshal_VkSparseImageMemoryRequirements(stream, (VkSparseImageMemoryRequirements*)(pSparseMemoryRequirements + i));
+            for (uint32_t i = 0; i < (uint32_t)(*(pSparseMemoryRequirementCount)); ++i)
+            {
+                unmarshal_VkSparseImageMemoryRequirements(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSparseImageMemoryRequirements*)(pSparseMemoryRequirements + i));
+            }
         }
     }
-    if (pSparseMemoryRequirements)
+    if (pSparseMemoryRequirementCount)
     {
-        for (uint32_t i = 0; i < (uint32_t)(*(pSparseMemoryRequirementCount)); ++i)
+        if (pSparseMemoryRequirements)
         {
-            transform_fromhost_VkSparseImageMemoryRequirements(mImpl->resources(), (VkSparseImageMemoryRequirements*)(pSparseMemoryRequirements + i));
+            for (uint32_t i = 0; i < (uint32_t)(*(pSparseMemoryRequirementCount)); ++i)
+            {
+                transform_fromhost_VkSparseImageMemoryRequirements(sResourceTracker, (VkSparseImageMemoryRequirements*)(pSparseMemoryRequirements + i));
+            }
         }
     }
-    AEMU_SCOPED_TRACE("vkGetImageSparseMemoryRequirements returnUnmarshal");
-    mImpl->log("finish vkGetImageSparseMemoryRequirements");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkGetPhysicalDeviceSparseImageFormatProperties(
@@ -2444,16 +2615,14 @@
     VkImageUsageFlags usage,
     VkImageTiling tiling,
     uint32_t* pPropertyCount,
-    VkSparseImageFormatProperties* pProperties)
+    VkSparseImageFormatProperties* pProperties,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceSparseImageFormatProperties encode");
-    mImpl->log("start vkGetPhysicalDeviceSparseImageFormatProperties");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     VkFormat local_format;
     VkImageType local_type;
@@ -2466,65 +2635,79 @@
     local_samples = samples;
     local_usage = usage;
     local_tiling = tiling;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_149;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_149, 1);
-        countingStream->write((uint64_t*)&cgen_var_149, 1 * 8);
-        countingStream->write((VkFormat*)&local_format, sizeof(VkFormat));
-        countingStream->write((VkImageType*)&local_type, sizeof(VkImageType));
-        countingStream->write((VkSampleCountFlagBits*)&local_samples, sizeof(VkSampleCountFlagBits));
-        countingStream->write((VkImageUsageFlags*)&local_usage, sizeof(VkImageUsageFlags));
-        countingStream->write((VkImageTiling*)&local_tiling, sizeof(VkImageTiling));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkFormat);
+        *countPtr += sizeof(VkImageType);
+        *countPtr += sizeof(VkSampleCountFlagBits);
+        *countPtr += sizeof(VkImageUsageFlags);
+        *countPtr += sizeof(VkImageTiling);
         // WARNING PTR CHECK
-        uint64_t cgen_var_150 = (uint64_t)(uintptr_t)pPropertyCount;
-        countingStream->putBe64(cgen_var_150);
+        *countPtr += 8;
         if (pPropertyCount)
         {
-            countingStream->write((uint32_t*)pPropertyCount, sizeof(uint32_t));
+            *countPtr += sizeof(uint32_t);
         }
         // WARNING PTR CHECK
-        uint64_t cgen_var_151 = (uint64_t)(uintptr_t)pProperties;
-        countingStream->putBe64(cgen_var_151);
+        *countPtr += 8;
         if (pProperties)
         {
-            for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+            if (pPropertyCount)
             {
-                marshal_VkSparseImageFormatProperties(countingStream, (VkSparseImageFormatProperties*)(pProperties + i));
+                for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+                {
+                    count_VkSparseImageFormatProperties(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSparseImageFormatProperties*)(pProperties + i), countPtr);
+                }
             }
         }
     }
-    uint32_t packetSize_vkGetPhysicalDeviceSparseImageFormatProperties = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetPhysicalDeviceSparseImageFormatProperties = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPhysicalDeviceSparseImageFormatProperties);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetPhysicalDeviceSparseImageFormatProperties = OP_vkGetPhysicalDeviceSparseImageFormatProperties;
-    stream->write(&opcode_vkGetPhysicalDeviceSparseImageFormatProperties, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetPhysicalDeviceSparseImageFormatProperties, sizeof(uint32_t));
-    uint64_t cgen_var_152;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_152, 1);
-    stream->write((uint64_t*)&cgen_var_152, 1 * 8);
-    stream->write((VkFormat*)&local_format, sizeof(VkFormat));
-    stream->write((VkImageType*)&local_type, sizeof(VkImageType));
-    stream->write((VkSampleCountFlagBits*)&local_samples, sizeof(VkSampleCountFlagBits));
-    stream->write((VkImageUsageFlags*)&local_usage, sizeof(VkImageUsageFlags));
-    stream->write((VkImageTiling*)&local_tiling, sizeof(VkImageTiling));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPhysicalDeviceSparseImageFormatProperties, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPhysicalDeviceSparseImageFormatProperties, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkFormat*)&local_format, sizeof(VkFormat));
+    *streamPtrPtr += sizeof(VkFormat);
+    memcpy(*streamPtrPtr, (VkImageType*)&local_type, sizeof(VkImageType));
+    *streamPtrPtr += sizeof(VkImageType);
+    memcpy(*streamPtrPtr, (VkSampleCountFlagBits*)&local_samples, sizeof(VkSampleCountFlagBits));
+    *streamPtrPtr += sizeof(VkSampleCountFlagBits);
+    memcpy(*streamPtrPtr, (VkImageUsageFlags*)&local_usage, sizeof(VkImageUsageFlags));
+    *streamPtrPtr += sizeof(VkImageUsageFlags);
+    memcpy(*streamPtrPtr, (VkImageTiling*)&local_tiling, sizeof(VkImageTiling));
+    *streamPtrPtr += sizeof(VkImageTiling);
     // WARNING PTR CHECK
-    uint64_t cgen_var_153 = (uint64_t)(uintptr_t)pPropertyCount;
-    stream->putBe64(cgen_var_153);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)pPropertyCount;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pPropertyCount)
     {
-        stream->write((uint32_t*)pPropertyCount, sizeof(uint32_t));
+        memcpy(*streamPtrPtr, (uint32_t*)pPropertyCount, sizeof(uint32_t));
+        *streamPtrPtr += sizeof(uint32_t);
     }
     // WARNING PTR CHECK
-    uint64_t cgen_var_154 = (uint64_t)(uintptr_t)pProperties;
-    stream->putBe64(cgen_var_154);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)pProperties;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pProperties)
     {
         for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
         {
-            marshal_VkSparseImageFormatProperties(stream, (VkSparseImageFormatProperties*)(pProperties + i));
+            reservedmarshal_VkSparseImageFormatProperties(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSparseImageFormatProperties*)(pProperties + i), streamPtrPtr);
         }
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceSparseImageFormatProperties readParams");
     // WARNING PTR CHECK
     uint32_t* check_pPropertyCount;
     check_pPropertyCount = (uint32_t*)(uintptr_t)stream->getBe64();
@@ -2545,36 +2728,45 @@
         {
             fprintf(stderr, "fatal: pProperties inconsistent between guest and host\n");
         }
-        for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+        if (pPropertyCount)
         {
-            unmarshal_VkSparseImageFormatProperties(stream, (VkSparseImageFormatProperties*)(pProperties + i));
+            for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+            {
+                unmarshal_VkSparseImageFormatProperties(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSparseImageFormatProperties*)(pProperties + i));
+            }
         }
     }
-    if (pProperties)
+    if (pPropertyCount)
     {
-        for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+        if (pProperties)
         {
-            transform_fromhost_VkSparseImageFormatProperties(mImpl->resources(), (VkSparseImageFormatProperties*)(pProperties + i));
+            for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+            {
+                transform_fromhost_VkSparseImageFormatProperties(sResourceTracker, (VkSparseImageFormatProperties*)(pProperties + i));
+            }
         }
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceSparseImageFormatProperties returnUnmarshal");
-    mImpl->log("finish vkGetPhysicalDeviceSparseImageFormatProperties");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 VkResult VkEncoder::vkQueueBindSparse(
     VkQueue queue,
     uint32_t bindInfoCount,
     const VkBindSparseInfo* pBindInfo,
-    VkFence fence)
+    VkFence fence,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkQueueBindSparse encode");
-    mImpl->log("start vkQueueBindSparse");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkQueue local_queue;
     uint32_t local_bindInfoCount;
     VkBindSparseInfo* local_pBindInfo;
@@ -2587,7 +2779,7 @@
         local_pBindInfo = (VkBindSparseInfo*)pool->alloc(((bindInfoCount)) * sizeof(const VkBindSparseInfo));
         for (uint32_t i = 0; i < (uint32_t)((bindInfoCount)); ++i)
         {
-            deepcopy_VkBindSparseInfo(pool, pBindInfo + i, (VkBindSparseInfo*)(local_pBindInfo + i));
+            deepcopy_VkBindSparseInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pBindInfo + i, (VkBindSparseInfo*)(local_pBindInfo + i));
         }
     }
     local_fence = fence;
@@ -2595,47 +2787,53 @@
     {
         for (uint32_t i = 0; i < (uint32_t)((bindInfoCount)); ++i)
         {
-            transform_tohost_VkBindSparseInfo(mImpl->resources(), (VkBindSparseInfo*)(local_pBindInfo + i));
+            transform_tohost_VkBindSparseInfo(sResourceTracker, (VkBindSparseInfo*)(local_pBindInfo + i));
         }
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_157;
-        countingStream->handleMapping()->mapHandles_VkQueue_u64(&local_queue, &cgen_var_157, 1);
-        countingStream->write((uint64_t*)&cgen_var_157, 1 * 8);
-        countingStream->write((uint32_t*)&local_bindInfoCount, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
         for (uint32_t i = 0; i < (uint32_t)((bindInfoCount)); ++i)
         {
-            marshal_VkBindSparseInfo(countingStream, (VkBindSparseInfo*)(local_pBindInfo + i));
+            count_VkBindSparseInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkBindSparseInfo*)(local_pBindInfo + i), countPtr);
         }
-        uint64_t cgen_var_158;
-        countingStream->handleMapping()->mapHandles_VkFence_u64(&local_fence, &cgen_var_158, 1);
-        countingStream->write((uint64_t*)&cgen_var_158, 1 * 8);
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
     }
-    uint32_t packetSize_vkQueueBindSparse = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkQueueBindSparse = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkQueueBindSparse);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkQueueBindSparse = OP_vkQueueBindSparse;
-    stream->write(&opcode_vkQueueBindSparse, sizeof(uint32_t));
-    stream->write(&packetSize_vkQueueBindSparse, sizeof(uint32_t));
-    uint64_t cgen_var_159;
-    stream->handleMapping()->mapHandles_VkQueue_u64(&local_queue, &cgen_var_159, 1);
-    stream->write((uint64_t*)&cgen_var_159, 1 * 8);
-    stream->write((uint32_t*)&local_bindInfoCount, sizeof(uint32_t));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkQueueBindSparse, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkQueueBindSparse, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkQueue((*&local_queue));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_bindInfoCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
     for (uint32_t i = 0; i < (uint32_t)((bindInfoCount)); ++i)
     {
-        marshal_VkBindSparseInfo(stream, (VkBindSparseInfo*)(local_pBindInfo + i));
+        reservedmarshal_VkBindSparseInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkBindSparseInfo*)(local_pBindInfo + i), streamPtrPtr);
     }
-    uint64_t cgen_var_160;
-    stream->handleMapping()->mapHandles_VkFence_u64(&local_fence, &cgen_var_160, 1);
-    stream->write((uint64_t*)&cgen_var_160, 1 * 8);
-    AEMU_SCOPED_TRACE("vkQueueBindSparse readParams");
-    AEMU_SCOPED_TRACE("vkQueueBindSparse returnUnmarshal");
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkFence((*&local_fence));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     VkResult vkQueueBindSparse_VkResult_return = (VkResult)0;
     stream->read(&vkQueueBindSparse_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkQueueBindSparse");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkQueueBindSparse_VkResult_return;
 }
 
@@ -2643,16 +2841,14 @@
     VkDevice device,
     const VkFenceCreateInfo* pCreateInfo,
     const VkAllocationCallbacks* pAllocator,
-    VkFence* pFence)
+    VkFence* pFence,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCreateFence encode");
-    mImpl->log("start vkCreateFence");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkFenceCreateInfo* local_pCreateInfo;
     VkAllocationCallbacks* local_pAllocator;
@@ -2661,90 +2857,94 @@
     if (pCreateInfo)
     {
         local_pCreateInfo = (VkFenceCreateInfo*)pool->alloc(sizeof(const VkFenceCreateInfo));
-        deepcopy_VkFenceCreateInfo(pool, pCreateInfo, (VkFenceCreateInfo*)(local_pCreateInfo));
+        deepcopy_VkFenceCreateInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, (VkFenceCreateInfo*)(local_pCreateInfo));
     }
     local_pAllocator = nullptr;
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pCreateInfo)
     {
-        transform_tohost_VkFenceCreateInfo(mImpl->resources(), (VkFenceCreateInfo*)(local_pCreateInfo));
+        transform_tohost_VkFenceCreateInfo(sResourceTracker, (VkFenceCreateInfo*)(local_pCreateInfo));
     }
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_161;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_161, 1);
-        countingStream->write((uint64_t*)&cgen_var_161, 1 * 8);
-        marshal_VkFenceCreateInfo(countingStream, (VkFenceCreateInfo*)(local_pCreateInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkFenceCreateInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkFenceCreateInfo*)(local_pCreateInfo), countPtr);
         // WARNING PTR CHECK
-        uint64_t cgen_var_162 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_162);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
-        uint64_t cgen_var_163;
-        countingStream->handleMapping()->mapHandles_VkFence_u64(pFence, &cgen_var_163, 1);
-        countingStream->write((uint64_t*)&cgen_var_163, 8);
+        uint64_t cgen_var_1;
+        *countPtr += 8;
     }
-    uint32_t packetSize_vkCreateFence = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCreateFence = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreateFence);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCreateFence = OP_vkCreateFence;
-    stream->write(&opcode_vkCreateFence, sizeof(uint32_t));
-    stream->write(&packetSize_vkCreateFence, sizeof(uint32_t));
-    uint64_t cgen_var_164;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_164, 1);
-    stream->write((uint64_t*)&cgen_var_164, 1 * 8);
-    marshal_VkFenceCreateInfo(stream, (VkFenceCreateInfo*)(local_pCreateInfo));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreateFence, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreateFence, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkFenceCreateInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkFenceCreateInfo*)(local_pCreateInfo), streamPtrPtr);
     // WARNING PTR CHECK
-    uint64_t cgen_var_165 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_165);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
-    uint64_t cgen_var_166;
-    stream->handleMapping()->mapHandles_VkFence_u64(pFence, &cgen_var_166, 1);
-    stream->write((uint64_t*)&cgen_var_166, 8);
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkCreateFence readParams");
-    stream->setHandleMapping(resources->createMapping());
-    uint64_t cgen_var_167;
-    stream->read((uint64_t*)&cgen_var_167, 8);
-    stream->handleMapping()->mapHandles_u64_VkFence(&cgen_var_167, (VkFence*)pFence, 1);
+    /* is handle, possibly out */;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = (uint64_t)((*pFence));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    stream->setHandleMapping(sResourceTracker->createMapping());
+    uint64_t cgen_var_3;
+    stream->read((uint64_t*)&cgen_var_3, 8);
+    stream->handleMapping()->mapHandles_u64_VkFence(&cgen_var_3, (VkFence*)pFence, 1);
     stream->unsetHandleMapping();
-    AEMU_SCOPED_TRACE("vkCreateFence returnUnmarshal");
     VkResult vkCreateFence_VkResult_return = (VkResult)0;
     stream->read(&vkCreateFence_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkCreateFence");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkCreateFence_VkResult_return;
 }
 
 void VkEncoder::vkDestroyFence(
     VkDevice device,
     VkFence fence,
-    const VkAllocationCallbacks* pAllocator)
+    const VkAllocationCallbacks* pAllocator,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkDestroyFence encode");
-    mImpl->log("start vkDestroyFence");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkFence local_fence;
     VkAllocationCallbacks* local_pAllocator;
@@ -2754,161 +2954,175 @@
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_168;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_168, 1);
-        countingStream->write((uint64_t*)&cgen_var_168, 1 * 8);
-        uint64_t cgen_var_169;
-        countingStream->handleMapping()->mapHandles_VkFence_u64(&local_fence, &cgen_var_169, 1);
-        countingStream->write((uint64_t*)&cgen_var_169, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_170 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_170);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
     }
-    uint32_t packetSize_vkDestroyFence = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkDestroyFence = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkDestroyFence);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkDestroyFence = OP_vkDestroyFence;
-    stream->write(&opcode_vkDestroyFence, sizeof(uint32_t));
-    stream->write(&packetSize_vkDestroyFence, sizeof(uint32_t));
-    uint64_t cgen_var_171;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_171, 1);
-    stream->write((uint64_t*)&cgen_var_171, 1 * 8);
-    uint64_t cgen_var_172;
-    stream->handleMapping()->mapHandles_VkFence_u64(&local_fence, &cgen_var_172, 1);
-    stream->write((uint64_t*)&cgen_var_172, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkDestroyFence, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkDestroyFence, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkFence((*&local_fence));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_173 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_173);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    AEMU_SCOPED_TRACE("vkDestroyFence readParams");
-    AEMU_SCOPED_TRACE("vkDestroyFence returnUnmarshal");
-    resources->destroyMapping()->mapHandles_VkFence((VkFence*)&fence);
-    mImpl->log("finish vkDestroyFence");;
+    sResourceTracker->destroyMapping()->mapHandles_VkFence((VkFence*)&fence);
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 VkResult VkEncoder::vkResetFences(
     VkDevice device,
     uint32_t fenceCount,
-    const VkFence* pFences)
+    const VkFence* pFences,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkResetFences encode");
-    mImpl->log("start vkResetFences");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     uint32_t local_fenceCount;
     VkFence* local_pFences;
     local_device = device;
     local_fenceCount = fenceCount;
-    local_pFences = nullptr;
-    if (pFences)
+    // Avoiding deepcopy for pFences
+    local_pFences = (VkFence*)pFences;
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        local_pFences = (VkFence*)pool->dupArray(pFences, ((fenceCount)) * sizeof(const VkFence));
-    }
-    countingStream->rewind();
-    {
-        uint64_t cgen_var_174;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_174, 1);
-        countingStream->write((uint64_t*)&cgen_var_174, 1 * 8);
-        countingStream->write((uint32_t*)&local_fenceCount, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
         if (((fenceCount)))
         {
-            uint64_t* cgen_var_175;
-            countingStream->alloc((void**)&cgen_var_175, ((fenceCount)) * 8);
-            countingStream->handleMapping()->mapHandles_VkFence_u64(local_pFences, cgen_var_175, ((fenceCount)));
-            countingStream->write((uint64_t*)cgen_var_175, ((fenceCount)) * 8);
+            *countPtr += ((fenceCount)) * 8;
         }
     }
-    uint32_t packetSize_vkResetFences = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkResetFences = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkResetFences);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkResetFences = OP_vkResetFences;
-    stream->write(&opcode_vkResetFences, sizeof(uint32_t));
-    stream->write(&packetSize_vkResetFences, sizeof(uint32_t));
-    uint64_t cgen_var_176;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_176, 1);
-    stream->write((uint64_t*)&cgen_var_176, 1 * 8);
-    stream->write((uint32_t*)&local_fenceCount, sizeof(uint32_t));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkResetFences, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkResetFences, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_fenceCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
     if (((fenceCount)))
     {
-        uint64_t* cgen_var_177;
-        stream->alloc((void**)&cgen_var_177, ((fenceCount)) * 8);
-        stream->handleMapping()->mapHandles_VkFence_u64(local_pFences, cgen_var_177, ((fenceCount)));
-        stream->write((uint64_t*)cgen_var_177, ((fenceCount)) * 8);
+        uint8_t* cgen_var_1_ptr = (uint8_t*)(*streamPtrPtr);
+        for (uint32_t k = 0; k < ((fenceCount)); ++k)
+        {
+            uint64_t tmpval = get_host_u64_VkFence(local_pFences[k]);
+            memcpy(cgen_var_1_ptr + k * 8, &tmpval, sizeof(uint64_t));
+        }
+        *streamPtrPtr += 8 * ((fenceCount));
     }
-    AEMU_SCOPED_TRACE("vkResetFences readParams");
-    AEMU_SCOPED_TRACE("vkResetFences returnUnmarshal");
     VkResult vkResetFences_VkResult_return = (VkResult)0;
     stream->read(&vkResetFences_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkResetFences");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkResetFences_VkResult_return;
 }
 
 VkResult VkEncoder::vkGetFenceStatus(
     VkDevice device,
-    VkFence fence)
+    VkFence fence,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetFenceStatus encode");
-    mImpl->log("start vkGetFenceStatus");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkFence local_fence;
     local_device = device;
     local_fence = fence;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_178;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_178, 1);
-        countingStream->write((uint64_t*)&cgen_var_178, 1 * 8);
-        uint64_t cgen_var_179;
-        countingStream->handleMapping()->mapHandles_VkFence_u64(&local_fence, &cgen_var_179, 1);
-        countingStream->write((uint64_t*)&cgen_var_179, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
     }
-    uint32_t packetSize_vkGetFenceStatus = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetFenceStatus = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetFenceStatus);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetFenceStatus = OP_vkGetFenceStatus;
-    stream->write(&opcode_vkGetFenceStatus, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetFenceStatus, sizeof(uint32_t));
-    uint64_t cgen_var_180;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_180, 1);
-    stream->write((uint64_t*)&cgen_var_180, 1 * 8);
-    uint64_t cgen_var_181;
-    stream->handleMapping()->mapHandles_VkFence_u64(&local_fence, &cgen_var_181, 1);
-    stream->write((uint64_t*)&cgen_var_181, 1 * 8);
-    AEMU_SCOPED_TRACE("vkGetFenceStatus readParams");
-    AEMU_SCOPED_TRACE("vkGetFenceStatus returnUnmarshal");
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetFenceStatus, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetFenceStatus, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkFence((*&local_fence));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     VkResult vkGetFenceStatus_VkResult_return = (VkResult)0;
     stream->read(&vkGetFenceStatus_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetFenceStatus");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetFenceStatus_VkResult_return;
 }
 
@@ -2917,16 +3131,14 @@
     uint32_t fenceCount,
     const VkFence* pFences,
     VkBool32 waitAll,
-    uint64_t timeout)
+    uint64_t timeout,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkWaitForFences encode");
-    mImpl->log("start vkWaitForFences");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     uint32_t local_fenceCount;
     VkFence* local_pFences;
@@ -2934,55 +3146,60 @@
     uint64_t local_timeout;
     local_device = device;
     local_fenceCount = fenceCount;
-    local_pFences = nullptr;
-    if (pFences)
-    {
-        local_pFences = (VkFence*)pool->dupArray(pFences, ((fenceCount)) * sizeof(const VkFence));
-    }
+    // Avoiding deepcopy for pFences
+    local_pFences = (VkFence*)pFences;
     local_waitAll = waitAll;
     local_timeout = timeout;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_182;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_182, 1);
-        countingStream->write((uint64_t*)&cgen_var_182, 1 * 8);
-        countingStream->write((uint32_t*)&local_fenceCount, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
         if (((fenceCount)))
         {
-            uint64_t* cgen_var_183;
-            countingStream->alloc((void**)&cgen_var_183, ((fenceCount)) * 8);
-            countingStream->handleMapping()->mapHandles_VkFence_u64(local_pFences, cgen_var_183, ((fenceCount)));
-            countingStream->write((uint64_t*)cgen_var_183, ((fenceCount)) * 8);
+            *countPtr += ((fenceCount)) * 8;
         }
-        countingStream->write((VkBool32*)&local_waitAll, sizeof(VkBool32));
-        countingStream->write((uint64_t*)&local_timeout, sizeof(uint64_t));
+        *countPtr += sizeof(VkBool32);
+        *countPtr += sizeof(uint64_t);
     }
-    uint32_t packetSize_vkWaitForFences = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkWaitForFences = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkWaitForFences);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkWaitForFences = OP_vkWaitForFences;
-    stream->write(&opcode_vkWaitForFences, sizeof(uint32_t));
-    stream->write(&packetSize_vkWaitForFences, sizeof(uint32_t));
-    uint64_t cgen_var_184;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_184, 1);
-    stream->write((uint64_t*)&cgen_var_184, 1 * 8);
-    stream->write((uint32_t*)&local_fenceCount, sizeof(uint32_t));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkWaitForFences, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkWaitForFences, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_fenceCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
     if (((fenceCount)))
     {
-        uint64_t* cgen_var_185;
-        stream->alloc((void**)&cgen_var_185, ((fenceCount)) * 8);
-        stream->handleMapping()->mapHandles_VkFence_u64(local_pFences, cgen_var_185, ((fenceCount)));
-        stream->write((uint64_t*)cgen_var_185, ((fenceCount)) * 8);
+        uint8_t* cgen_var_1_ptr = (uint8_t*)(*streamPtrPtr);
+        for (uint32_t k = 0; k < ((fenceCount)); ++k)
+        {
+            uint64_t tmpval = get_host_u64_VkFence(local_pFences[k]);
+            memcpy(cgen_var_1_ptr + k * 8, &tmpval, sizeof(uint64_t));
+        }
+        *streamPtrPtr += 8 * ((fenceCount));
     }
-    stream->write((VkBool32*)&local_waitAll, sizeof(VkBool32));
-    stream->write((uint64_t*)&local_timeout, sizeof(uint64_t));
-    AEMU_SCOPED_TRACE("vkWaitForFences readParams");
-    AEMU_SCOPED_TRACE("vkWaitForFences returnUnmarshal");
+    memcpy(*streamPtrPtr, (VkBool32*)&local_waitAll, sizeof(VkBool32));
+    *streamPtrPtr += sizeof(VkBool32);
+    memcpy(*streamPtrPtr, (uint64_t*)&local_timeout, sizeof(uint64_t));
+    *streamPtrPtr += sizeof(uint64_t);
     VkResult vkWaitForFences_VkResult_return = (VkResult)0;
     stream->read(&vkWaitForFences_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkWaitForFences");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkWaitForFences_VkResult_return;
 }
 
@@ -2990,16 +3207,14 @@
     VkDevice device,
     const VkSemaphoreCreateInfo* pCreateInfo,
     const VkAllocationCallbacks* pAllocator,
-    VkSemaphore* pSemaphore)
+    VkSemaphore* pSemaphore,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCreateSemaphore encode");
-    mImpl->log("start vkCreateSemaphore");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkSemaphoreCreateInfo* local_pCreateInfo;
     VkAllocationCallbacks* local_pAllocator;
@@ -3008,90 +3223,94 @@
     if (pCreateInfo)
     {
         local_pCreateInfo = (VkSemaphoreCreateInfo*)pool->alloc(sizeof(const VkSemaphoreCreateInfo));
-        deepcopy_VkSemaphoreCreateInfo(pool, pCreateInfo, (VkSemaphoreCreateInfo*)(local_pCreateInfo));
+        deepcopy_VkSemaphoreCreateInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, (VkSemaphoreCreateInfo*)(local_pCreateInfo));
     }
     local_pAllocator = nullptr;
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pCreateInfo)
     {
-        transform_tohost_VkSemaphoreCreateInfo(mImpl->resources(), (VkSemaphoreCreateInfo*)(local_pCreateInfo));
+        transform_tohost_VkSemaphoreCreateInfo(sResourceTracker, (VkSemaphoreCreateInfo*)(local_pCreateInfo));
     }
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_186;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_186, 1);
-        countingStream->write((uint64_t*)&cgen_var_186, 1 * 8);
-        marshal_VkSemaphoreCreateInfo(countingStream, (VkSemaphoreCreateInfo*)(local_pCreateInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkSemaphoreCreateInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSemaphoreCreateInfo*)(local_pCreateInfo), countPtr);
         // WARNING PTR CHECK
-        uint64_t cgen_var_187 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_187);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
-        uint64_t cgen_var_188;
-        countingStream->handleMapping()->mapHandles_VkSemaphore_u64(pSemaphore, &cgen_var_188, 1);
-        countingStream->write((uint64_t*)&cgen_var_188, 8);
+        uint64_t cgen_var_1;
+        *countPtr += 8;
     }
-    uint32_t packetSize_vkCreateSemaphore = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCreateSemaphore = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreateSemaphore);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCreateSemaphore = OP_vkCreateSemaphore;
-    stream->write(&opcode_vkCreateSemaphore, sizeof(uint32_t));
-    stream->write(&packetSize_vkCreateSemaphore, sizeof(uint32_t));
-    uint64_t cgen_var_189;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_189, 1);
-    stream->write((uint64_t*)&cgen_var_189, 1 * 8);
-    marshal_VkSemaphoreCreateInfo(stream, (VkSemaphoreCreateInfo*)(local_pCreateInfo));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreateSemaphore, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreateSemaphore, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkSemaphoreCreateInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSemaphoreCreateInfo*)(local_pCreateInfo), streamPtrPtr);
     // WARNING PTR CHECK
-    uint64_t cgen_var_190 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_190);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
-    uint64_t cgen_var_191;
-    stream->handleMapping()->mapHandles_VkSemaphore_u64(pSemaphore, &cgen_var_191, 1);
-    stream->write((uint64_t*)&cgen_var_191, 8);
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkCreateSemaphore readParams");
-    stream->setHandleMapping(resources->createMapping());
-    uint64_t cgen_var_192;
-    stream->read((uint64_t*)&cgen_var_192, 8);
-    stream->handleMapping()->mapHandles_u64_VkSemaphore(&cgen_var_192, (VkSemaphore*)pSemaphore, 1);
+    /* is handle, possibly out */;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = (uint64_t)((*pSemaphore));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    stream->setHandleMapping(sResourceTracker->createMapping());
+    uint64_t cgen_var_3;
+    stream->read((uint64_t*)&cgen_var_3, 8);
+    stream->handleMapping()->mapHandles_u64_VkSemaphore(&cgen_var_3, (VkSemaphore*)pSemaphore, 1);
     stream->unsetHandleMapping();
-    AEMU_SCOPED_TRACE("vkCreateSemaphore returnUnmarshal");
     VkResult vkCreateSemaphore_VkResult_return = (VkResult)0;
     stream->read(&vkCreateSemaphore_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkCreateSemaphore");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkCreateSemaphore_VkResult_return;
 }
 
 void VkEncoder::vkDestroySemaphore(
     VkDevice device,
     VkSemaphore semaphore,
-    const VkAllocationCallbacks* pAllocator)
+    const VkAllocationCallbacks* pAllocator,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkDestroySemaphore encode");
-    mImpl->log("start vkDestroySemaphore");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkSemaphore local_semaphore;
     VkAllocationCallbacks* local_pAllocator;
@@ -3101,67 +3320,75 @@
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_193;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_193, 1);
-        countingStream->write((uint64_t*)&cgen_var_193, 1 * 8);
-        uint64_t cgen_var_194;
-        countingStream->handleMapping()->mapHandles_VkSemaphore_u64(&local_semaphore, &cgen_var_194, 1);
-        countingStream->write((uint64_t*)&cgen_var_194, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_195 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_195);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
     }
-    uint32_t packetSize_vkDestroySemaphore = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkDestroySemaphore = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkDestroySemaphore);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkDestroySemaphore = OP_vkDestroySemaphore;
-    stream->write(&opcode_vkDestroySemaphore, sizeof(uint32_t));
-    stream->write(&packetSize_vkDestroySemaphore, sizeof(uint32_t));
-    uint64_t cgen_var_196;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_196, 1);
-    stream->write((uint64_t*)&cgen_var_196, 1 * 8);
-    uint64_t cgen_var_197;
-    stream->handleMapping()->mapHandles_VkSemaphore_u64(&local_semaphore, &cgen_var_197, 1);
-    stream->write((uint64_t*)&cgen_var_197, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkDestroySemaphore, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkDestroySemaphore, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkSemaphore((*&local_semaphore));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_198 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_198);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    AEMU_SCOPED_TRACE("vkDestroySemaphore readParams");
-    AEMU_SCOPED_TRACE("vkDestroySemaphore returnUnmarshal");
-    resources->destroyMapping()->mapHandles_VkSemaphore((VkSemaphore*)&semaphore);
-    mImpl->log("finish vkDestroySemaphore");;
+    sResourceTracker->destroyMapping()->mapHandles_VkSemaphore((VkSemaphore*)&semaphore);
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 VkResult VkEncoder::vkCreateEvent(
     VkDevice device,
     const VkEventCreateInfo* pCreateInfo,
     const VkAllocationCallbacks* pAllocator,
-    VkEvent* pEvent)
+    VkEvent* pEvent,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCreateEvent encode");
-    mImpl->log("start vkCreateEvent");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkEventCreateInfo* local_pCreateInfo;
     VkAllocationCallbacks* local_pAllocator;
@@ -3170,90 +3397,94 @@
     if (pCreateInfo)
     {
         local_pCreateInfo = (VkEventCreateInfo*)pool->alloc(sizeof(const VkEventCreateInfo));
-        deepcopy_VkEventCreateInfo(pool, pCreateInfo, (VkEventCreateInfo*)(local_pCreateInfo));
+        deepcopy_VkEventCreateInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, (VkEventCreateInfo*)(local_pCreateInfo));
     }
     local_pAllocator = nullptr;
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pCreateInfo)
     {
-        transform_tohost_VkEventCreateInfo(mImpl->resources(), (VkEventCreateInfo*)(local_pCreateInfo));
+        transform_tohost_VkEventCreateInfo(sResourceTracker, (VkEventCreateInfo*)(local_pCreateInfo));
     }
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_199;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_199, 1);
-        countingStream->write((uint64_t*)&cgen_var_199, 1 * 8);
-        marshal_VkEventCreateInfo(countingStream, (VkEventCreateInfo*)(local_pCreateInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkEventCreateInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkEventCreateInfo*)(local_pCreateInfo), countPtr);
         // WARNING PTR CHECK
-        uint64_t cgen_var_200 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_200);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
-        uint64_t cgen_var_201;
-        countingStream->handleMapping()->mapHandles_VkEvent_u64(pEvent, &cgen_var_201, 1);
-        countingStream->write((uint64_t*)&cgen_var_201, 8);
+        uint64_t cgen_var_1;
+        *countPtr += 8;
     }
-    uint32_t packetSize_vkCreateEvent = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCreateEvent = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreateEvent);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCreateEvent = OP_vkCreateEvent;
-    stream->write(&opcode_vkCreateEvent, sizeof(uint32_t));
-    stream->write(&packetSize_vkCreateEvent, sizeof(uint32_t));
-    uint64_t cgen_var_202;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_202, 1);
-    stream->write((uint64_t*)&cgen_var_202, 1 * 8);
-    marshal_VkEventCreateInfo(stream, (VkEventCreateInfo*)(local_pCreateInfo));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreateEvent, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreateEvent, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkEventCreateInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkEventCreateInfo*)(local_pCreateInfo), streamPtrPtr);
     // WARNING PTR CHECK
-    uint64_t cgen_var_203 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_203);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
-    uint64_t cgen_var_204;
-    stream->handleMapping()->mapHandles_VkEvent_u64(pEvent, &cgen_var_204, 1);
-    stream->write((uint64_t*)&cgen_var_204, 8);
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkCreateEvent readParams");
-    stream->setHandleMapping(resources->createMapping());
-    uint64_t cgen_var_205;
-    stream->read((uint64_t*)&cgen_var_205, 8);
-    stream->handleMapping()->mapHandles_u64_VkEvent(&cgen_var_205, (VkEvent*)pEvent, 1);
+    /* is handle, possibly out */;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = (uint64_t)((*pEvent));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    stream->setHandleMapping(sResourceTracker->createMapping());
+    uint64_t cgen_var_3;
+    stream->read((uint64_t*)&cgen_var_3, 8);
+    stream->handleMapping()->mapHandles_u64_VkEvent(&cgen_var_3, (VkEvent*)pEvent, 1);
     stream->unsetHandleMapping();
-    AEMU_SCOPED_TRACE("vkCreateEvent returnUnmarshal");
     VkResult vkCreateEvent_VkResult_return = (VkResult)0;
     stream->read(&vkCreateEvent_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkCreateEvent");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkCreateEvent_VkResult_return;
 }
 
 void VkEncoder::vkDestroyEvent(
     VkDevice device,
     VkEvent event,
-    const VkAllocationCallbacks* pAllocator)
+    const VkAllocationCallbacks* pAllocator,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkDestroyEvent encode");
-    mImpl->log("start vkDestroyEvent");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkEvent local_event;
     VkAllocationCallbacks* local_pAllocator;
@@ -3263,191 +3494,210 @@
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_206;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_206, 1);
-        countingStream->write((uint64_t*)&cgen_var_206, 1 * 8);
-        uint64_t cgen_var_207;
-        countingStream->handleMapping()->mapHandles_VkEvent_u64(&local_event, &cgen_var_207, 1);
-        countingStream->write((uint64_t*)&cgen_var_207, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_208 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_208);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
     }
-    uint32_t packetSize_vkDestroyEvent = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkDestroyEvent = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkDestroyEvent);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkDestroyEvent = OP_vkDestroyEvent;
-    stream->write(&opcode_vkDestroyEvent, sizeof(uint32_t));
-    stream->write(&packetSize_vkDestroyEvent, sizeof(uint32_t));
-    uint64_t cgen_var_209;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_209, 1);
-    stream->write((uint64_t*)&cgen_var_209, 1 * 8);
-    uint64_t cgen_var_210;
-    stream->handleMapping()->mapHandles_VkEvent_u64(&local_event, &cgen_var_210, 1);
-    stream->write((uint64_t*)&cgen_var_210, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkDestroyEvent, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkDestroyEvent, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkEvent((*&local_event));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_211 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_211);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    AEMU_SCOPED_TRACE("vkDestroyEvent readParams");
-    AEMU_SCOPED_TRACE("vkDestroyEvent returnUnmarshal");
-    resources->destroyMapping()->mapHandles_VkEvent((VkEvent*)&event);
-    mImpl->log("finish vkDestroyEvent");;
+    sResourceTracker->destroyMapping()->mapHandles_VkEvent((VkEvent*)&event);
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 VkResult VkEncoder::vkGetEventStatus(
     VkDevice device,
-    VkEvent event)
+    VkEvent event,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetEventStatus encode");
-    mImpl->log("start vkGetEventStatus");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkEvent local_event;
     local_device = device;
     local_event = event;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_212;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_212, 1);
-        countingStream->write((uint64_t*)&cgen_var_212, 1 * 8);
-        uint64_t cgen_var_213;
-        countingStream->handleMapping()->mapHandles_VkEvent_u64(&local_event, &cgen_var_213, 1);
-        countingStream->write((uint64_t*)&cgen_var_213, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
     }
-    uint32_t packetSize_vkGetEventStatus = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetEventStatus = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetEventStatus);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetEventStatus = OP_vkGetEventStatus;
-    stream->write(&opcode_vkGetEventStatus, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetEventStatus, sizeof(uint32_t));
-    uint64_t cgen_var_214;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_214, 1);
-    stream->write((uint64_t*)&cgen_var_214, 1 * 8);
-    uint64_t cgen_var_215;
-    stream->handleMapping()->mapHandles_VkEvent_u64(&local_event, &cgen_var_215, 1);
-    stream->write((uint64_t*)&cgen_var_215, 1 * 8);
-    AEMU_SCOPED_TRACE("vkGetEventStatus readParams");
-    AEMU_SCOPED_TRACE("vkGetEventStatus returnUnmarshal");
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetEventStatus, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetEventStatus, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkEvent((*&local_event));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     VkResult vkGetEventStatus_VkResult_return = (VkResult)0;
     stream->read(&vkGetEventStatus_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetEventStatus");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetEventStatus_VkResult_return;
 }
 
 VkResult VkEncoder::vkSetEvent(
     VkDevice device,
-    VkEvent event)
+    VkEvent event,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkSetEvent encode");
-    mImpl->log("start vkSetEvent");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkEvent local_event;
     local_device = device;
     local_event = event;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_216;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_216, 1);
-        countingStream->write((uint64_t*)&cgen_var_216, 1 * 8);
-        uint64_t cgen_var_217;
-        countingStream->handleMapping()->mapHandles_VkEvent_u64(&local_event, &cgen_var_217, 1);
-        countingStream->write((uint64_t*)&cgen_var_217, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
     }
-    uint32_t packetSize_vkSetEvent = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkSetEvent = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkSetEvent);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkSetEvent = OP_vkSetEvent;
-    stream->write(&opcode_vkSetEvent, sizeof(uint32_t));
-    stream->write(&packetSize_vkSetEvent, sizeof(uint32_t));
-    uint64_t cgen_var_218;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_218, 1);
-    stream->write((uint64_t*)&cgen_var_218, 1 * 8);
-    uint64_t cgen_var_219;
-    stream->handleMapping()->mapHandles_VkEvent_u64(&local_event, &cgen_var_219, 1);
-    stream->write((uint64_t*)&cgen_var_219, 1 * 8);
-    AEMU_SCOPED_TRACE("vkSetEvent readParams");
-    AEMU_SCOPED_TRACE("vkSetEvent returnUnmarshal");
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkSetEvent, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkSetEvent, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkEvent((*&local_event));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     VkResult vkSetEvent_VkResult_return = (VkResult)0;
     stream->read(&vkSetEvent_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkSetEvent");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkSetEvent_VkResult_return;
 }
 
 VkResult VkEncoder::vkResetEvent(
     VkDevice device,
-    VkEvent event)
+    VkEvent event,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkResetEvent encode");
-    mImpl->log("start vkResetEvent");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkEvent local_event;
     local_device = device;
     local_event = event;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_220;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_220, 1);
-        countingStream->write((uint64_t*)&cgen_var_220, 1 * 8);
-        uint64_t cgen_var_221;
-        countingStream->handleMapping()->mapHandles_VkEvent_u64(&local_event, &cgen_var_221, 1);
-        countingStream->write((uint64_t*)&cgen_var_221, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
     }
-    uint32_t packetSize_vkResetEvent = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkResetEvent = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkResetEvent);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkResetEvent = OP_vkResetEvent;
-    stream->write(&opcode_vkResetEvent, sizeof(uint32_t));
-    stream->write(&packetSize_vkResetEvent, sizeof(uint32_t));
-    uint64_t cgen_var_222;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_222, 1);
-    stream->write((uint64_t*)&cgen_var_222, 1 * 8);
-    uint64_t cgen_var_223;
-    stream->handleMapping()->mapHandles_VkEvent_u64(&local_event, &cgen_var_223, 1);
-    stream->write((uint64_t*)&cgen_var_223, 1 * 8);
-    AEMU_SCOPED_TRACE("vkResetEvent readParams");
-    AEMU_SCOPED_TRACE("vkResetEvent returnUnmarshal");
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkResetEvent, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkResetEvent, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkEvent((*&local_event));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     VkResult vkResetEvent_VkResult_return = (VkResult)0;
     stream->read(&vkResetEvent_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkResetEvent");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkResetEvent_VkResult_return;
 }
 
@@ -3455,16 +3705,14 @@
     VkDevice device,
     const VkQueryPoolCreateInfo* pCreateInfo,
     const VkAllocationCallbacks* pAllocator,
-    VkQueryPool* pQueryPool)
+    VkQueryPool* pQueryPool,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCreateQueryPool encode");
-    mImpl->log("start vkCreateQueryPool");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkQueryPoolCreateInfo* local_pCreateInfo;
     VkAllocationCallbacks* local_pAllocator;
@@ -3473,90 +3721,94 @@
     if (pCreateInfo)
     {
         local_pCreateInfo = (VkQueryPoolCreateInfo*)pool->alloc(sizeof(const VkQueryPoolCreateInfo));
-        deepcopy_VkQueryPoolCreateInfo(pool, pCreateInfo, (VkQueryPoolCreateInfo*)(local_pCreateInfo));
+        deepcopy_VkQueryPoolCreateInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, (VkQueryPoolCreateInfo*)(local_pCreateInfo));
     }
     local_pAllocator = nullptr;
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pCreateInfo)
     {
-        transform_tohost_VkQueryPoolCreateInfo(mImpl->resources(), (VkQueryPoolCreateInfo*)(local_pCreateInfo));
+        transform_tohost_VkQueryPoolCreateInfo(sResourceTracker, (VkQueryPoolCreateInfo*)(local_pCreateInfo));
     }
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_224;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_224, 1);
-        countingStream->write((uint64_t*)&cgen_var_224, 1 * 8);
-        marshal_VkQueryPoolCreateInfo(countingStream, (VkQueryPoolCreateInfo*)(local_pCreateInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkQueryPoolCreateInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkQueryPoolCreateInfo*)(local_pCreateInfo), countPtr);
         // WARNING PTR CHECK
-        uint64_t cgen_var_225 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_225);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
-        uint64_t cgen_var_226;
-        countingStream->handleMapping()->mapHandles_VkQueryPool_u64(pQueryPool, &cgen_var_226, 1);
-        countingStream->write((uint64_t*)&cgen_var_226, 8);
+        uint64_t cgen_var_1;
+        *countPtr += 8;
     }
-    uint32_t packetSize_vkCreateQueryPool = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCreateQueryPool = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreateQueryPool);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCreateQueryPool = OP_vkCreateQueryPool;
-    stream->write(&opcode_vkCreateQueryPool, sizeof(uint32_t));
-    stream->write(&packetSize_vkCreateQueryPool, sizeof(uint32_t));
-    uint64_t cgen_var_227;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_227, 1);
-    stream->write((uint64_t*)&cgen_var_227, 1 * 8);
-    marshal_VkQueryPoolCreateInfo(stream, (VkQueryPoolCreateInfo*)(local_pCreateInfo));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreateQueryPool, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreateQueryPool, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkQueryPoolCreateInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkQueryPoolCreateInfo*)(local_pCreateInfo), streamPtrPtr);
     // WARNING PTR CHECK
-    uint64_t cgen_var_228 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_228);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
-    uint64_t cgen_var_229;
-    stream->handleMapping()->mapHandles_VkQueryPool_u64(pQueryPool, &cgen_var_229, 1);
-    stream->write((uint64_t*)&cgen_var_229, 8);
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkCreateQueryPool readParams");
-    stream->setHandleMapping(resources->createMapping());
-    uint64_t cgen_var_230;
-    stream->read((uint64_t*)&cgen_var_230, 8);
-    stream->handleMapping()->mapHandles_u64_VkQueryPool(&cgen_var_230, (VkQueryPool*)pQueryPool, 1);
+    /* is handle, possibly out */;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = (uint64_t)((*pQueryPool));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    stream->setHandleMapping(sResourceTracker->createMapping());
+    uint64_t cgen_var_3;
+    stream->read((uint64_t*)&cgen_var_3, 8);
+    stream->handleMapping()->mapHandles_u64_VkQueryPool(&cgen_var_3, (VkQueryPool*)pQueryPool, 1);
     stream->unsetHandleMapping();
-    AEMU_SCOPED_TRACE("vkCreateQueryPool returnUnmarshal");
     VkResult vkCreateQueryPool_VkResult_return = (VkResult)0;
     stream->read(&vkCreateQueryPool_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkCreateQueryPool");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkCreateQueryPool_VkResult_return;
 }
 
 void VkEncoder::vkDestroyQueryPool(
     VkDevice device,
     VkQueryPool queryPool,
-    const VkAllocationCallbacks* pAllocator)
+    const VkAllocationCallbacks* pAllocator,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkDestroyQueryPool encode");
-    mImpl->log("start vkDestroyQueryPool");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkQueryPool local_queryPool;
     VkAllocationCallbacks* local_pAllocator;
@@ -3566,51 +3818,61 @@
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_231;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_231, 1);
-        countingStream->write((uint64_t*)&cgen_var_231, 1 * 8);
-        uint64_t cgen_var_232;
-        countingStream->handleMapping()->mapHandles_VkQueryPool_u64(&local_queryPool, &cgen_var_232, 1);
-        countingStream->write((uint64_t*)&cgen_var_232, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_233 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_233);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
     }
-    uint32_t packetSize_vkDestroyQueryPool = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkDestroyQueryPool = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkDestroyQueryPool);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkDestroyQueryPool = OP_vkDestroyQueryPool;
-    stream->write(&opcode_vkDestroyQueryPool, sizeof(uint32_t));
-    stream->write(&packetSize_vkDestroyQueryPool, sizeof(uint32_t));
-    uint64_t cgen_var_234;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_234, 1);
-    stream->write((uint64_t*)&cgen_var_234, 1 * 8);
-    uint64_t cgen_var_235;
-    stream->handleMapping()->mapHandles_VkQueryPool_u64(&local_queryPool, &cgen_var_235, 1);
-    stream->write((uint64_t*)&cgen_var_235, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkDestroyQueryPool, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkDestroyQueryPool, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkQueryPool((*&local_queryPool));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_236 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_236);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    AEMU_SCOPED_TRACE("vkDestroyQueryPool readParams");
-    AEMU_SCOPED_TRACE("vkDestroyQueryPool returnUnmarshal");
-    resources->destroyMapping()->mapHandles_VkQueryPool((VkQueryPool*)&queryPool);
-    mImpl->log("finish vkDestroyQueryPool");;
+    sResourceTracker->destroyMapping()->mapHandles_VkQueryPool((VkQueryPool*)&queryPool);
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 VkResult VkEncoder::vkGetQueryPoolResults(
@@ -3621,16 +3883,14 @@
     size_t dataSize,
     void* pData,
     VkDeviceSize stride,
-    VkQueryResultFlags flags)
+    VkQueryResultFlags flags,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetQueryPoolResults encode");
-    mImpl->log("start vkGetQueryPoolResults");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkQueryPool local_queryPool;
     uint32_t local_firstQuery;
@@ -3645,49 +3905,60 @@
     local_dataSize = dataSize;
     local_stride = stride;
     local_flags = flags;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_237;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_237, 1);
-        countingStream->write((uint64_t*)&cgen_var_237, 1 * 8);
-        uint64_t cgen_var_238;
-        countingStream->handleMapping()->mapHandles_VkQueryPool_u64(&local_queryPool, &cgen_var_238, 1);
-        countingStream->write((uint64_t*)&cgen_var_238, 1 * 8);
-        countingStream->write((uint32_t*)&local_firstQuery, sizeof(uint32_t));
-        countingStream->write((uint32_t*)&local_queryCount, sizeof(uint32_t));
-        uint64_t cgen_var_239 = (uint64_t)local_dataSize;
-        countingStream->putBe64(cgen_var_239);
-        countingStream->write((void*)pData, ((dataSize)) * sizeof(uint8_t));
-        countingStream->write((VkDeviceSize*)&local_stride, sizeof(VkDeviceSize));
-        countingStream->write((VkQueryResultFlags*)&local_flags, sizeof(VkQueryResultFlags));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
+        *countPtr += 8;
+        *countPtr += ((dataSize)) * sizeof(uint8_t);
+        *countPtr += sizeof(VkDeviceSize);
+        *countPtr += sizeof(VkQueryResultFlags);
     }
-    uint32_t packetSize_vkGetQueryPoolResults = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetQueryPoolResults = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetQueryPoolResults);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetQueryPoolResults = OP_vkGetQueryPoolResults;
-    stream->write(&opcode_vkGetQueryPoolResults, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetQueryPoolResults, sizeof(uint32_t));
-    uint64_t cgen_var_240;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_240, 1);
-    stream->write((uint64_t*)&cgen_var_240, 1 * 8);
-    uint64_t cgen_var_241;
-    stream->handleMapping()->mapHandles_VkQueryPool_u64(&local_queryPool, &cgen_var_241, 1);
-    stream->write((uint64_t*)&cgen_var_241, 1 * 8);
-    stream->write((uint32_t*)&local_firstQuery, sizeof(uint32_t));
-    stream->write((uint32_t*)&local_queryCount, sizeof(uint32_t));
-    uint64_t cgen_var_242 = (uint64_t)local_dataSize;
-    stream->putBe64(cgen_var_242);
-    stream->write((void*)pData, ((dataSize)) * sizeof(uint8_t));
-    stream->write((VkDeviceSize*)&local_stride, sizeof(VkDeviceSize));
-    stream->write((VkQueryResultFlags*)&local_flags, sizeof(VkQueryResultFlags));
-    AEMU_SCOPED_TRACE("vkGetQueryPoolResults readParams");
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetQueryPoolResults, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetQueryPoolResults, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkQueryPool((*&local_queryPool));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_firstQuery, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_queryCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    uint64_t cgen_var_2 = (uint64_t)local_dataSize;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    memcpy(*streamPtrPtr, (void*)pData, ((dataSize)) * sizeof(uint8_t));
+    *streamPtrPtr += ((dataSize)) * sizeof(uint8_t);
+    memcpy(*streamPtrPtr, (VkDeviceSize*)&local_stride, sizeof(VkDeviceSize));
+    *streamPtrPtr += sizeof(VkDeviceSize);
+    memcpy(*streamPtrPtr, (VkQueryResultFlags*)&local_flags, sizeof(VkQueryResultFlags));
+    *streamPtrPtr += sizeof(VkQueryResultFlags);
     stream->read((void*)pData, ((dataSize)) * sizeof(uint8_t));
-    AEMU_SCOPED_TRACE("vkGetQueryPoolResults returnUnmarshal");
     VkResult vkGetQueryPoolResults_VkResult_return = (VkResult)0;
     stream->read(&vkGetQueryPoolResults_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetQueryPoolResults");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetQueryPoolResults_VkResult_return;
 }
 
@@ -3695,16 +3966,14 @@
     VkDevice device,
     const VkBufferCreateInfo* pCreateInfo,
     const VkAllocationCallbacks* pAllocator,
-    VkBuffer* pBuffer)
+    VkBuffer* pBuffer,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCreateBuffer encode");
-    mImpl->log("start vkCreateBuffer");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkBufferCreateInfo* local_pCreateInfo;
     VkAllocationCallbacks* local_pAllocator;
@@ -3713,90 +3982,94 @@
     if (pCreateInfo)
     {
         local_pCreateInfo = (VkBufferCreateInfo*)pool->alloc(sizeof(const VkBufferCreateInfo));
-        deepcopy_VkBufferCreateInfo(pool, pCreateInfo, (VkBufferCreateInfo*)(local_pCreateInfo));
+        deepcopy_VkBufferCreateInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, (VkBufferCreateInfo*)(local_pCreateInfo));
     }
     local_pAllocator = nullptr;
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pCreateInfo)
     {
-        transform_tohost_VkBufferCreateInfo(mImpl->resources(), (VkBufferCreateInfo*)(local_pCreateInfo));
+        transform_tohost_VkBufferCreateInfo(sResourceTracker, (VkBufferCreateInfo*)(local_pCreateInfo));
     }
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_243;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_243, 1);
-        countingStream->write((uint64_t*)&cgen_var_243, 1 * 8);
-        marshal_VkBufferCreateInfo(countingStream, (VkBufferCreateInfo*)(local_pCreateInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkBufferCreateInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkBufferCreateInfo*)(local_pCreateInfo), countPtr);
         // WARNING PTR CHECK
-        uint64_t cgen_var_244 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_244);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
-        uint64_t cgen_var_245;
-        countingStream->handleMapping()->mapHandles_VkBuffer_u64(pBuffer, &cgen_var_245, 1);
-        countingStream->write((uint64_t*)&cgen_var_245, 8);
+        uint64_t cgen_var_1;
+        *countPtr += 8;
     }
-    uint32_t packetSize_vkCreateBuffer = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCreateBuffer = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreateBuffer);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCreateBuffer = OP_vkCreateBuffer;
-    stream->write(&opcode_vkCreateBuffer, sizeof(uint32_t));
-    stream->write(&packetSize_vkCreateBuffer, sizeof(uint32_t));
-    uint64_t cgen_var_246;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_246, 1);
-    stream->write((uint64_t*)&cgen_var_246, 1 * 8);
-    marshal_VkBufferCreateInfo(stream, (VkBufferCreateInfo*)(local_pCreateInfo));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreateBuffer, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreateBuffer, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkBufferCreateInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkBufferCreateInfo*)(local_pCreateInfo), streamPtrPtr);
     // WARNING PTR CHECK
-    uint64_t cgen_var_247 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_247);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
-    uint64_t cgen_var_248;
-    stream->handleMapping()->mapHandles_VkBuffer_u64(pBuffer, &cgen_var_248, 1);
-    stream->write((uint64_t*)&cgen_var_248, 8);
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkCreateBuffer readParams");
-    stream->setHandleMapping(resources->createMapping());
-    uint64_t cgen_var_249;
-    stream->read((uint64_t*)&cgen_var_249, 8);
-    stream->handleMapping()->mapHandles_u64_VkBuffer(&cgen_var_249, (VkBuffer*)pBuffer, 1);
+    /* is handle, possibly out */;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = (uint64_t)((*pBuffer));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    stream->setHandleMapping(sResourceTracker->createMapping());
+    uint64_t cgen_var_3;
+    stream->read((uint64_t*)&cgen_var_3, 8);
+    stream->handleMapping()->mapHandles_u64_VkBuffer(&cgen_var_3, (VkBuffer*)pBuffer, 1);
     stream->unsetHandleMapping();
-    AEMU_SCOPED_TRACE("vkCreateBuffer returnUnmarshal");
     VkResult vkCreateBuffer_VkResult_return = (VkResult)0;
     stream->read(&vkCreateBuffer_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkCreateBuffer");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkCreateBuffer_VkResult_return;
 }
 
 void VkEncoder::vkDestroyBuffer(
     VkDevice device,
     VkBuffer buffer,
-    const VkAllocationCallbacks* pAllocator)
+    const VkAllocationCallbacks* pAllocator,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkDestroyBuffer encode");
-    mImpl->log("start vkDestroyBuffer");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkBuffer local_buffer;
     VkAllocationCallbacks* local_pAllocator;
@@ -3806,67 +4079,75 @@
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_250;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_250, 1);
-        countingStream->write((uint64_t*)&cgen_var_250, 1 * 8);
-        uint64_t cgen_var_251;
-        countingStream->handleMapping()->mapHandles_VkBuffer_u64(&local_buffer, &cgen_var_251, 1);
-        countingStream->write((uint64_t*)&cgen_var_251, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_252 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_252);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
     }
-    uint32_t packetSize_vkDestroyBuffer = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkDestroyBuffer = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkDestroyBuffer);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkDestroyBuffer = OP_vkDestroyBuffer;
-    stream->write(&opcode_vkDestroyBuffer, sizeof(uint32_t));
-    stream->write(&packetSize_vkDestroyBuffer, sizeof(uint32_t));
-    uint64_t cgen_var_253;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_253, 1);
-    stream->write((uint64_t*)&cgen_var_253, 1 * 8);
-    uint64_t cgen_var_254;
-    stream->handleMapping()->mapHandles_VkBuffer_u64(&local_buffer, &cgen_var_254, 1);
-    stream->write((uint64_t*)&cgen_var_254, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkDestroyBuffer, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkDestroyBuffer, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkBuffer((*&local_buffer));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_255 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_255);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    AEMU_SCOPED_TRACE("vkDestroyBuffer readParams");
-    AEMU_SCOPED_TRACE("vkDestroyBuffer returnUnmarshal");
-    resources->destroyMapping()->mapHandles_VkBuffer((VkBuffer*)&buffer);
-    mImpl->log("finish vkDestroyBuffer");;
+    sResourceTracker->destroyMapping()->mapHandles_VkBuffer((VkBuffer*)&buffer);
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 VkResult VkEncoder::vkCreateBufferView(
     VkDevice device,
     const VkBufferViewCreateInfo* pCreateInfo,
     const VkAllocationCallbacks* pAllocator,
-    VkBufferView* pView)
+    VkBufferView* pView,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCreateBufferView encode");
-    mImpl->log("start vkCreateBufferView");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkBufferViewCreateInfo* local_pCreateInfo;
     VkAllocationCallbacks* local_pAllocator;
@@ -3875,90 +4156,94 @@
     if (pCreateInfo)
     {
         local_pCreateInfo = (VkBufferViewCreateInfo*)pool->alloc(sizeof(const VkBufferViewCreateInfo));
-        deepcopy_VkBufferViewCreateInfo(pool, pCreateInfo, (VkBufferViewCreateInfo*)(local_pCreateInfo));
+        deepcopy_VkBufferViewCreateInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, (VkBufferViewCreateInfo*)(local_pCreateInfo));
     }
     local_pAllocator = nullptr;
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pCreateInfo)
     {
-        transform_tohost_VkBufferViewCreateInfo(mImpl->resources(), (VkBufferViewCreateInfo*)(local_pCreateInfo));
+        transform_tohost_VkBufferViewCreateInfo(sResourceTracker, (VkBufferViewCreateInfo*)(local_pCreateInfo));
     }
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_256;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_256, 1);
-        countingStream->write((uint64_t*)&cgen_var_256, 1 * 8);
-        marshal_VkBufferViewCreateInfo(countingStream, (VkBufferViewCreateInfo*)(local_pCreateInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkBufferViewCreateInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkBufferViewCreateInfo*)(local_pCreateInfo), countPtr);
         // WARNING PTR CHECK
-        uint64_t cgen_var_257 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_257);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
-        uint64_t cgen_var_258;
-        countingStream->handleMapping()->mapHandles_VkBufferView_u64(pView, &cgen_var_258, 1);
-        countingStream->write((uint64_t*)&cgen_var_258, 8);
+        uint64_t cgen_var_1;
+        *countPtr += 8;
     }
-    uint32_t packetSize_vkCreateBufferView = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCreateBufferView = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreateBufferView);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCreateBufferView = OP_vkCreateBufferView;
-    stream->write(&opcode_vkCreateBufferView, sizeof(uint32_t));
-    stream->write(&packetSize_vkCreateBufferView, sizeof(uint32_t));
-    uint64_t cgen_var_259;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_259, 1);
-    stream->write((uint64_t*)&cgen_var_259, 1 * 8);
-    marshal_VkBufferViewCreateInfo(stream, (VkBufferViewCreateInfo*)(local_pCreateInfo));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreateBufferView, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreateBufferView, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkBufferViewCreateInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkBufferViewCreateInfo*)(local_pCreateInfo), streamPtrPtr);
     // WARNING PTR CHECK
-    uint64_t cgen_var_260 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_260);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
-    uint64_t cgen_var_261;
-    stream->handleMapping()->mapHandles_VkBufferView_u64(pView, &cgen_var_261, 1);
-    stream->write((uint64_t*)&cgen_var_261, 8);
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkCreateBufferView readParams");
-    stream->setHandleMapping(resources->createMapping());
-    uint64_t cgen_var_262;
-    stream->read((uint64_t*)&cgen_var_262, 8);
-    stream->handleMapping()->mapHandles_u64_VkBufferView(&cgen_var_262, (VkBufferView*)pView, 1);
+    /* is handle, possibly out */;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = (uint64_t)((*pView));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    stream->setHandleMapping(sResourceTracker->createMapping());
+    uint64_t cgen_var_3;
+    stream->read((uint64_t*)&cgen_var_3, 8);
+    stream->handleMapping()->mapHandles_u64_VkBufferView(&cgen_var_3, (VkBufferView*)pView, 1);
     stream->unsetHandleMapping();
-    AEMU_SCOPED_TRACE("vkCreateBufferView returnUnmarshal");
     VkResult vkCreateBufferView_VkResult_return = (VkResult)0;
     stream->read(&vkCreateBufferView_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkCreateBufferView");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkCreateBufferView_VkResult_return;
 }
 
 void VkEncoder::vkDestroyBufferView(
     VkDevice device,
     VkBufferView bufferView,
-    const VkAllocationCallbacks* pAllocator)
+    const VkAllocationCallbacks* pAllocator,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkDestroyBufferView encode");
-    mImpl->log("start vkDestroyBufferView");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkBufferView local_bufferView;
     VkAllocationCallbacks* local_pAllocator;
@@ -3968,67 +4253,75 @@
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_263;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_263, 1);
-        countingStream->write((uint64_t*)&cgen_var_263, 1 * 8);
-        uint64_t cgen_var_264;
-        countingStream->handleMapping()->mapHandles_VkBufferView_u64(&local_bufferView, &cgen_var_264, 1);
-        countingStream->write((uint64_t*)&cgen_var_264, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_265 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_265);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
     }
-    uint32_t packetSize_vkDestroyBufferView = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkDestroyBufferView = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkDestroyBufferView);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkDestroyBufferView = OP_vkDestroyBufferView;
-    stream->write(&opcode_vkDestroyBufferView, sizeof(uint32_t));
-    stream->write(&packetSize_vkDestroyBufferView, sizeof(uint32_t));
-    uint64_t cgen_var_266;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_266, 1);
-    stream->write((uint64_t*)&cgen_var_266, 1 * 8);
-    uint64_t cgen_var_267;
-    stream->handleMapping()->mapHandles_VkBufferView_u64(&local_bufferView, &cgen_var_267, 1);
-    stream->write((uint64_t*)&cgen_var_267, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkDestroyBufferView, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkDestroyBufferView, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkBufferView((*&local_bufferView));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_268 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_268);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    AEMU_SCOPED_TRACE("vkDestroyBufferView readParams");
-    AEMU_SCOPED_TRACE("vkDestroyBufferView returnUnmarshal");
-    resources->destroyMapping()->mapHandles_VkBufferView((VkBufferView*)&bufferView);
-    mImpl->log("finish vkDestroyBufferView");;
+    sResourceTracker->destroyMapping()->mapHandles_VkBufferView((VkBufferView*)&bufferView);
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 VkResult VkEncoder::vkCreateImage(
     VkDevice device,
     const VkImageCreateInfo* pCreateInfo,
     const VkAllocationCallbacks* pAllocator,
-    VkImage* pImage)
+    VkImage* pImage,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCreateImage encode");
-    mImpl->log("start vkCreateImage");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkImageCreateInfo* local_pCreateInfo;
     VkAllocationCallbacks* local_pAllocator;
@@ -4037,91 +4330,95 @@
     if (pCreateInfo)
     {
         local_pCreateInfo = (VkImageCreateInfo*)pool->alloc(sizeof(const VkImageCreateInfo));
-        deepcopy_VkImageCreateInfo(pool, pCreateInfo, (VkImageCreateInfo*)(local_pCreateInfo));
+        deepcopy_VkImageCreateInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, (VkImageCreateInfo*)(local_pCreateInfo));
     }
     local_pAllocator = nullptr;
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    mImpl->resources()->unwrap_VkNativeBufferANDROID(pCreateInfo, local_pCreateInfo);
+    sResourceTracker->unwrap_VkNativeBufferANDROID(pCreateInfo, local_pCreateInfo);
     local_pAllocator = nullptr;
     if (local_pCreateInfo)
     {
-        transform_tohost_VkImageCreateInfo(mImpl->resources(), (VkImageCreateInfo*)(local_pCreateInfo));
+        transform_tohost_VkImageCreateInfo(sResourceTracker, (VkImageCreateInfo*)(local_pCreateInfo));
     }
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_269;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_269, 1);
-        countingStream->write((uint64_t*)&cgen_var_269, 1 * 8);
-        marshal_VkImageCreateInfo(countingStream, (VkImageCreateInfo*)(local_pCreateInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkImageCreateInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImageCreateInfo*)(local_pCreateInfo), countPtr);
         // WARNING PTR CHECK
-        uint64_t cgen_var_270 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_270);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
-        uint64_t cgen_var_271;
-        countingStream->handleMapping()->mapHandles_VkImage_u64(pImage, &cgen_var_271, 1);
-        countingStream->write((uint64_t*)&cgen_var_271, 8);
+        uint64_t cgen_var_1;
+        *countPtr += 8;
     }
-    uint32_t packetSize_vkCreateImage = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCreateImage = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreateImage);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCreateImage = OP_vkCreateImage;
-    stream->write(&opcode_vkCreateImage, sizeof(uint32_t));
-    stream->write(&packetSize_vkCreateImage, sizeof(uint32_t));
-    uint64_t cgen_var_272;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_272, 1);
-    stream->write((uint64_t*)&cgen_var_272, 1 * 8);
-    marshal_VkImageCreateInfo(stream, (VkImageCreateInfo*)(local_pCreateInfo));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreateImage, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreateImage, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkImageCreateInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImageCreateInfo*)(local_pCreateInfo), streamPtrPtr);
     // WARNING PTR CHECK
-    uint64_t cgen_var_273 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_273);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
-    uint64_t cgen_var_274;
-    stream->handleMapping()->mapHandles_VkImage_u64(pImage, &cgen_var_274, 1);
-    stream->write((uint64_t*)&cgen_var_274, 8);
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkCreateImage readParams");
-    stream->setHandleMapping(resources->createMapping());
-    uint64_t cgen_var_275;
-    stream->read((uint64_t*)&cgen_var_275, 8);
-    stream->handleMapping()->mapHandles_u64_VkImage(&cgen_var_275, (VkImage*)pImage, 1);
+    /* is handle, possibly out */;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = (uint64_t)((*pImage));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    stream->setHandleMapping(sResourceTracker->createMapping());
+    uint64_t cgen_var_3;
+    stream->read((uint64_t*)&cgen_var_3, 8);
+    stream->handleMapping()->mapHandles_u64_VkImage(&cgen_var_3, (VkImage*)pImage, 1);
     stream->unsetHandleMapping();
-    AEMU_SCOPED_TRACE("vkCreateImage returnUnmarshal");
     VkResult vkCreateImage_VkResult_return = (VkResult)0;
     stream->read(&vkCreateImage_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkCreateImage");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkCreateImage_VkResult_return;
 }
 
 void VkEncoder::vkDestroyImage(
     VkDevice device,
     VkImage image,
-    const VkAllocationCallbacks* pAllocator)
+    const VkAllocationCallbacks* pAllocator,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkDestroyImage encode");
-    mImpl->log("start vkDestroyImage");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkImage local_image;
     VkAllocationCallbacks* local_pAllocator;
@@ -4131,67 +4428,75 @@
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_276;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_276, 1);
-        countingStream->write((uint64_t*)&cgen_var_276, 1 * 8);
-        uint64_t cgen_var_277;
-        countingStream->handleMapping()->mapHandles_VkImage_u64(&local_image, &cgen_var_277, 1);
-        countingStream->write((uint64_t*)&cgen_var_277, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_278 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_278);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
     }
-    uint32_t packetSize_vkDestroyImage = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkDestroyImage = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkDestroyImage);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkDestroyImage = OP_vkDestroyImage;
-    stream->write(&opcode_vkDestroyImage, sizeof(uint32_t));
-    stream->write(&packetSize_vkDestroyImage, sizeof(uint32_t));
-    uint64_t cgen_var_279;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_279, 1);
-    stream->write((uint64_t*)&cgen_var_279, 1 * 8);
-    uint64_t cgen_var_280;
-    stream->handleMapping()->mapHandles_VkImage_u64(&local_image, &cgen_var_280, 1);
-    stream->write((uint64_t*)&cgen_var_280, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkDestroyImage, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkDestroyImage, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkImage((*&local_image));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_281 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_281);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    AEMU_SCOPED_TRACE("vkDestroyImage readParams");
-    AEMU_SCOPED_TRACE("vkDestroyImage returnUnmarshal");
-    resources->destroyMapping()->mapHandles_VkImage((VkImage*)&image);
-    mImpl->log("finish vkDestroyImage");;
+    sResourceTracker->destroyMapping()->mapHandles_VkImage((VkImage*)&image);
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkGetImageSubresourceLayout(
     VkDevice device,
     VkImage image,
     const VkImageSubresource* pSubresource,
-    VkSubresourceLayout* pLayout)
+    VkSubresourceLayout* pLayout,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetImageSubresourceLayout encode");
-    mImpl->log("start vkGetImageSubresourceLayout");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkImage local_image;
     VkImageSubresource* local_pSubresource;
@@ -4201,60 +4506,66 @@
     if (pSubresource)
     {
         local_pSubresource = (VkImageSubresource*)pool->alloc(sizeof(const VkImageSubresource));
-        deepcopy_VkImageSubresource(pool, pSubresource, (VkImageSubresource*)(local_pSubresource));
+        deepcopy_VkImageSubresource(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pSubresource, (VkImageSubresource*)(local_pSubresource));
     }
     if (local_pSubresource)
     {
-        transform_tohost_VkImageSubresource(mImpl->resources(), (VkImageSubresource*)(local_pSubresource));
+        transform_tohost_VkImageSubresource(sResourceTracker, (VkImageSubresource*)(local_pSubresource));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_282;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_282, 1);
-        countingStream->write((uint64_t*)&cgen_var_282, 1 * 8);
-        uint64_t cgen_var_283;
-        countingStream->handleMapping()->mapHandles_VkImage_u64(&local_image, &cgen_var_283, 1);
-        countingStream->write((uint64_t*)&cgen_var_283, 1 * 8);
-        marshal_VkImageSubresource(countingStream, (VkImageSubresource*)(local_pSubresource));
-        marshal_VkSubresourceLayout(countingStream, (VkSubresourceLayout*)(pLayout));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        count_VkImageSubresource(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImageSubresource*)(local_pSubresource), countPtr);
+        count_VkSubresourceLayout(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSubresourceLayout*)(pLayout), countPtr);
     }
-    uint32_t packetSize_vkGetImageSubresourceLayout = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetImageSubresourceLayout = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetImageSubresourceLayout);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetImageSubresourceLayout = OP_vkGetImageSubresourceLayout;
-    stream->write(&opcode_vkGetImageSubresourceLayout, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetImageSubresourceLayout, sizeof(uint32_t));
-    uint64_t cgen_var_284;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_284, 1);
-    stream->write((uint64_t*)&cgen_var_284, 1 * 8);
-    uint64_t cgen_var_285;
-    stream->handleMapping()->mapHandles_VkImage_u64(&local_image, &cgen_var_285, 1);
-    stream->write((uint64_t*)&cgen_var_285, 1 * 8);
-    marshal_VkImageSubresource(stream, (VkImageSubresource*)(local_pSubresource));
-    marshal_VkSubresourceLayout(stream, (VkSubresourceLayout*)(pLayout));
-    AEMU_SCOPED_TRACE("vkGetImageSubresourceLayout readParams");
-    unmarshal_VkSubresourceLayout(stream, (VkSubresourceLayout*)(pLayout));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetImageSubresourceLayout, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetImageSubresourceLayout, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkImage((*&local_image));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkImageSubresource(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImageSubresource*)(local_pSubresource), streamPtrPtr);
+    reservedmarshal_VkSubresourceLayout(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSubresourceLayout*)(pLayout), streamPtrPtr);
+    unmarshal_VkSubresourceLayout(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSubresourceLayout*)(pLayout));
     if (pLayout)
     {
-        transform_fromhost_VkSubresourceLayout(mImpl->resources(), (VkSubresourceLayout*)(pLayout));
+        transform_fromhost_VkSubresourceLayout(sResourceTracker, (VkSubresourceLayout*)(pLayout));
     }
-    AEMU_SCOPED_TRACE("vkGetImageSubresourceLayout returnUnmarshal");
-    mImpl->log("finish vkGetImageSubresourceLayout");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 VkResult VkEncoder::vkCreateImageView(
     VkDevice device,
     const VkImageViewCreateInfo* pCreateInfo,
     const VkAllocationCallbacks* pAllocator,
-    VkImageView* pView)
+    VkImageView* pView,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCreateImageView encode");
-    mImpl->log("start vkCreateImageView");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkImageViewCreateInfo* local_pCreateInfo;
     VkAllocationCallbacks* local_pAllocator;
@@ -4263,90 +4574,94 @@
     if (pCreateInfo)
     {
         local_pCreateInfo = (VkImageViewCreateInfo*)pool->alloc(sizeof(const VkImageViewCreateInfo));
-        deepcopy_VkImageViewCreateInfo(pool, pCreateInfo, (VkImageViewCreateInfo*)(local_pCreateInfo));
+        deepcopy_VkImageViewCreateInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, (VkImageViewCreateInfo*)(local_pCreateInfo));
     }
     local_pAllocator = nullptr;
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pCreateInfo)
     {
-        transform_tohost_VkImageViewCreateInfo(mImpl->resources(), (VkImageViewCreateInfo*)(local_pCreateInfo));
+        transform_tohost_VkImageViewCreateInfo(sResourceTracker, (VkImageViewCreateInfo*)(local_pCreateInfo));
     }
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_286;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_286, 1);
-        countingStream->write((uint64_t*)&cgen_var_286, 1 * 8);
-        marshal_VkImageViewCreateInfo(countingStream, (VkImageViewCreateInfo*)(local_pCreateInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkImageViewCreateInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImageViewCreateInfo*)(local_pCreateInfo), countPtr);
         // WARNING PTR CHECK
-        uint64_t cgen_var_287 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_287);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
-        uint64_t cgen_var_288;
-        countingStream->handleMapping()->mapHandles_VkImageView_u64(pView, &cgen_var_288, 1);
-        countingStream->write((uint64_t*)&cgen_var_288, 8);
+        uint64_t cgen_var_1;
+        *countPtr += 8;
     }
-    uint32_t packetSize_vkCreateImageView = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCreateImageView = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreateImageView);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCreateImageView = OP_vkCreateImageView;
-    stream->write(&opcode_vkCreateImageView, sizeof(uint32_t));
-    stream->write(&packetSize_vkCreateImageView, sizeof(uint32_t));
-    uint64_t cgen_var_289;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_289, 1);
-    stream->write((uint64_t*)&cgen_var_289, 1 * 8);
-    marshal_VkImageViewCreateInfo(stream, (VkImageViewCreateInfo*)(local_pCreateInfo));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreateImageView, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreateImageView, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkImageViewCreateInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImageViewCreateInfo*)(local_pCreateInfo), streamPtrPtr);
     // WARNING PTR CHECK
-    uint64_t cgen_var_290 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_290);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
-    uint64_t cgen_var_291;
-    stream->handleMapping()->mapHandles_VkImageView_u64(pView, &cgen_var_291, 1);
-    stream->write((uint64_t*)&cgen_var_291, 8);
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkCreateImageView readParams");
-    stream->setHandleMapping(resources->createMapping());
-    uint64_t cgen_var_292;
-    stream->read((uint64_t*)&cgen_var_292, 8);
-    stream->handleMapping()->mapHandles_u64_VkImageView(&cgen_var_292, (VkImageView*)pView, 1);
+    /* is handle, possibly out */;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = (uint64_t)((*pView));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    stream->setHandleMapping(sResourceTracker->createMapping());
+    uint64_t cgen_var_3;
+    stream->read((uint64_t*)&cgen_var_3, 8);
+    stream->handleMapping()->mapHandles_u64_VkImageView(&cgen_var_3, (VkImageView*)pView, 1);
     stream->unsetHandleMapping();
-    AEMU_SCOPED_TRACE("vkCreateImageView returnUnmarshal");
     VkResult vkCreateImageView_VkResult_return = (VkResult)0;
     stream->read(&vkCreateImageView_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkCreateImageView");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkCreateImageView_VkResult_return;
 }
 
 void VkEncoder::vkDestroyImageView(
     VkDevice device,
     VkImageView imageView,
-    const VkAllocationCallbacks* pAllocator)
+    const VkAllocationCallbacks* pAllocator,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkDestroyImageView encode");
-    mImpl->log("start vkDestroyImageView");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkImageView local_imageView;
     VkAllocationCallbacks* local_pAllocator;
@@ -4356,67 +4671,75 @@
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_293;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_293, 1);
-        countingStream->write((uint64_t*)&cgen_var_293, 1 * 8);
-        uint64_t cgen_var_294;
-        countingStream->handleMapping()->mapHandles_VkImageView_u64(&local_imageView, &cgen_var_294, 1);
-        countingStream->write((uint64_t*)&cgen_var_294, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_295 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_295);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
     }
-    uint32_t packetSize_vkDestroyImageView = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkDestroyImageView = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkDestroyImageView);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkDestroyImageView = OP_vkDestroyImageView;
-    stream->write(&opcode_vkDestroyImageView, sizeof(uint32_t));
-    stream->write(&packetSize_vkDestroyImageView, sizeof(uint32_t));
-    uint64_t cgen_var_296;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_296, 1);
-    stream->write((uint64_t*)&cgen_var_296, 1 * 8);
-    uint64_t cgen_var_297;
-    stream->handleMapping()->mapHandles_VkImageView_u64(&local_imageView, &cgen_var_297, 1);
-    stream->write((uint64_t*)&cgen_var_297, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkDestroyImageView, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkDestroyImageView, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkImageView((*&local_imageView));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_298 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_298);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    AEMU_SCOPED_TRACE("vkDestroyImageView readParams");
-    AEMU_SCOPED_TRACE("vkDestroyImageView returnUnmarshal");
-    resources->destroyMapping()->mapHandles_VkImageView((VkImageView*)&imageView);
-    mImpl->log("finish vkDestroyImageView");;
+    sResourceTracker->destroyMapping()->mapHandles_VkImageView((VkImageView*)&imageView);
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 VkResult VkEncoder::vkCreateShaderModule(
     VkDevice device,
     const VkShaderModuleCreateInfo* pCreateInfo,
     const VkAllocationCallbacks* pAllocator,
-    VkShaderModule* pShaderModule)
+    VkShaderModule* pShaderModule,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCreateShaderModule encode");
-    mImpl->log("start vkCreateShaderModule");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkShaderModuleCreateInfo* local_pCreateInfo;
     VkAllocationCallbacks* local_pAllocator;
@@ -4425,90 +4748,94 @@
     if (pCreateInfo)
     {
         local_pCreateInfo = (VkShaderModuleCreateInfo*)pool->alloc(sizeof(const VkShaderModuleCreateInfo));
-        deepcopy_VkShaderModuleCreateInfo(pool, pCreateInfo, (VkShaderModuleCreateInfo*)(local_pCreateInfo));
+        deepcopy_VkShaderModuleCreateInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, (VkShaderModuleCreateInfo*)(local_pCreateInfo));
     }
     local_pAllocator = nullptr;
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pCreateInfo)
     {
-        transform_tohost_VkShaderModuleCreateInfo(mImpl->resources(), (VkShaderModuleCreateInfo*)(local_pCreateInfo));
+        transform_tohost_VkShaderModuleCreateInfo(sResourceTracker, (VkShaderModuleCreateInfo*)(local_pCreateInfo));
     }
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_299;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_299, 1);
-        countingStream->write((uint64_t*)&cgen_var_299, 1 * 8);
-        marshal_VkShaderModuleCreateInfo(countingStream, (VkShaderModuleCreateInfo*)(local_pCreateInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkShaderModuleCreateInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkShaderModuleCreateInfo*)(local_pCreateInfo), countPtr);
         // WARNING PTR CHECK
-        uint64_t cgen_var_300 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_300);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
-        uint64_t cgen_var_301;
-        countingStream->handleMapping()->mapHandles_VkShaderModule_u64(pShaderModule, &cgen_var_301, 1);
-        countingStream->write((uint64_t*)&cgen_var_301, 8);
+        uint64_t cgen_var_1;
+        *countPtr += 8;
     }
-    uint32_t packetSize_vkCreateShaderModule = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCreateShaderModule = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreateShaderModule);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCreateShaderModule = OP_vkCreateShaderModule;
-    stream->write(&opcode_vkCreateShaderModule, sizeof(uint32_t));
-    stream->write(&packetSize_vkCreateShaderModule, sizeof(uint32_t));
-    uint64_t cgen_var_302;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_302, 1);
-    stream->write((uint64_t*)&cgen_var_302, 1 * 8);
-    marshal_VkShaderModuleCreateInfo(stream, (VkShaderModuleCreateInfo*)(local_pCreateInfo));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreateShaderModule, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreateShaderModule, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkShaderModuleCreateInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkShaderModuleCreateInfo*)(local_pCreateInfo), streamPtrPtr);
     // WARNING PTR CHECK
-    uint64_t cgen_var_303 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_303);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
-    uint64_t cgen_var_304;
-    stream->handleMapping()->mapHandles_VkShaderModule_u64(pShaderModule, &cgen_var_304, 1);
-    stream->write((uint64_t*)&cgen_var_304, 8);
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkCreateShaderModule readParams");
-    stream->setHandleMapping(resources->createMapping());
-    uint64_t cgen_var_305;
-    stream->read((uint64_t*)&cgen_var_305, 8);
-    stream->handleMapping()->mapHandles_u64_VkShaderModule(&cgen_var_305, (VkShaderModule*)pShaderModule, 1);
+    /* is handle, possibly out */;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = (uint64_t)((*pShaderModule));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    stream->setHandleMapping(sResourceTracker->createMapping());
+    uint64_t cgen_var_3;
+    stream->read((uint64_t*)&cgen_var_3, 8);
+    stream->handleMapping()->mapHandles_u64_VkShaderModule(&cgen_var_3, (VkShaderModule*)pShaderModule, 1);
     stream->unsetHandleMapping();
-    AEMU_SCOPED_TRACE("vkCreateShaderModule returnUnmarshal");
     VkResult vkCreateShaderModule_VkResult_return = (VkResult)0;
     stream->read(&vkCreateShaderModule_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkCreateShaderModule");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkCreateShaderModule_VkResult_return;
 }
 
 void VkEncoder::vkDestroyShaderModule(
     VkDevice device,
     VkShaderModule shaderModule,
-    const VkAllocationCallbacks* pAllocator)
+    const VkAllocationCallbacks* pAllocator,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkDestroyShaderModule encode");
-    mImpl->log("start vkDestroyShaderModule");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkShaderModule local_shaderModule;
     VkAllocationCallbacks* local_pAllocator;
@@ -4518,67 +4845,75 @@
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_306;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_306, 1);
-        countingStream->write((uint64_t*)&cgen_var_306, 1 * 8);
-        uint64_t cgen_var_307;
-        countingStream->handleMapping()->mapHandles_VkShaderModule_u64(&local_shaderModule, &cgen_var_307, 1);
-        countingStream->write((uint64_t*)&cgen_var_307, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_308 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_308);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
     }
-    uint32_t packetSize_vkDestroyShaderModule = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkDestroyShaderModule = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkDestroyShaderModule);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkDestroyShaderModule = OP_vkDestroyShaderModule;
-    stream->write(&opcode_vkDestroyShaderModule, sizeof(uint32_t));
-    stream->write(&packetSize_vkDestroyShaderModule, sizeof(uint32_t));
-    uint64_t cgen_var_309;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_309, 1);
-    stream->write((uint64_t*)&cgen_var_309, 1 * 8);
-    uint64_t cgen_var_310;
-    stream->handleMapping()->mapHandles_VkShaderModule_u64(&local_shaderModule, &cgen_var_310, 1);
-    stream->write((uint64_t*)&cgen_var_310, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkDestroyShaderModule, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkDestroyShaderModule, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkShaderModule((*&local_shaderModule));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_311 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_311);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    AEMU_SCOPED_TRACE("vkDestroyShaderModule readParams");
-    AEMU_SCOPED_TRACE("vkDestroyShaderModule returnUnmarshal");
-    resources->destroyMapping()->mapHandles_VkShaderModule((VkShaderModule*)&shaderModule);
-    mImpl->log("finish vkDestroyShaderModule");;
+    sResourceTracker->destroyMapping()->mapHandles_VkShaderModule((VkShaderModule*)&shaderModule);
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 VkResult VkEncoder::vkCreatePipelineCache(
     VkDevice device,
     const VkPipelineCacheCreateInfo* pCreateInfo,
     const VkAllocationCallbacks* pAllocator,
-    VkPipelineCache* pPipelineCache)
+    VkPipelineCache* pPipelineCache,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCreatePipelineCache encode");
-    mImpl->log("start vkCreatePipelineCache");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkPipelineCacheCreateInfo* local_pCreateInfo;
     VkAllocationCallbacks* local_pAllocator;
@@ -4587,90 +4922,94 @@
     if (pCreateInfo)
     {
         local_pCreateInfo = (VkPipelineCacheCreateInfo*)pool->alloc(sizeof(const VkPipelineCacheCreateInfo));
-        deepcopy_VkPipelineCacheCreateInfo(pool, pCreateInfo, (VkPipelineCacheCreateInfo*)(local_pCreateInfo));
+        deepcopy_VkPipelineCacheCreateInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, (VkPipelineCacheCreateInfo*)(local_pCreateInfo));
     }
     local_pAllocator = nullptr;
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pCreateInfo)
     {
-        transform_tohost_VkPipelineCacheCreateInfo(mImpl->resources(), (VkPipelineCacheCreateInfo*)(local_pCreateInfo));
+        transform_tohost_VkPipelineCacheCreateInfo(sResourceTracker, (VkPipelineCacheCreateInfo*)(local_pCreateInfo));
     }
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_312;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_312, 1);
-        countingStream->write((uint64_t*)&cgen_var_312, 1 * 8);
-        marshal_VkPipelineCacheCreateInfo(countingStream, (VkPipelineCacheCreateInfo*)(local_pCreateInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkPipelineCacheCreateInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPipelineCacheCreateInfo*)(local_pCreateInfo), countPtr);
         // WARNING PTR CHECK
-        uint64_t cgen_var_313 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_313);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
-        uint64_t cgen_var_314;
-        countingStream->handleMapping()->mapHandles_VkPipelineCache_u64(pPipelineCache, &cgen_var_314, 1);
-        countingStream->write((uint64_t*)&cgen_var_314, 8);
+        uint64_t cgen_var_1;
+        *countPtr += 8;
     }
-    uint32_t packetSize_vkCreatePipelineCache = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCreatePipelineCache = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreatePipelineCache);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCreatePipelineCache = OP_vkCreatePipelineCache;
-    stream->write(&opcode_vkCreatePipelineCache, sizeof(uint32_t));
-    stream->write(&packetSize_vkCreatePipelineCache, sizeof(uint32_t));
-    uint64_t cgen_var_315;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_315, 1);
-    stream->write((uint64_t*)&cgen_var_315, 1 * 8);
-    marshal_VkPipelineCacheCreateInfo(stream, (VkPipelineCacheCreateInfo*)(local_pCreateInfo));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreatePipelineCache, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreatePipelineCache, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkPipelineCacheCreateInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPipelineCacheCreateInfo*)(local_pCreateInfo), streamPtrPtr);
     // WARNING PTR CHECK
-    uint64_t cgen_var_316 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_316);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
-    uint64_t cgen_var_317;
-    stream->handleMapping()->mapHandles_VkPipelineCache_u64(pPipelineCache, &cgen_var_317, 1);
-    stream->write((uint64_t*)&cgen_var_317, 8);
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkCreatePipelineCache readParams");
-    stream->setHandleMapping(resources->createMapping());
-    uint64_t cgen_var_318;
-    stream->read((uint64_t*)&cgen_var_318, 8);
-    stream->handleMapping()->mapHandles_u64_VkPipelineCache(&cgen_var_318, (VkPipelineCache*)pPipelineCache, 1);
+    /* is handle, possibly out */;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = (uint64_t)((*pPipelineCache));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    stream->setHandleMapping(sResourceTracker->createMapping());
+    uint64_t cgen_var_3;
+    stream->read((uint64_t*)&cgen_var_3, 8);
+    stream->handleMapping()->mapHandles_u64_VkPipelineCache(&cgen_var_3, (VkPipelineCache*)pPipelineCache, 1);
     stream->unsetHandleMapping();
-    AEMU_SCOPED_TRACE("vkCreatePipelineCache returnUnmarshal");
     VkResult vkCreatePipelineCache_VkResult_return = (VkResult)0;
     stream->read(&vkCreatePipelineCache_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkCreatePipelineCache");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkCreatePipelineCache_VkResult_return;
 }
 
 void VkEncoder::vkDestroyPipelineCache(
     VkDevice device,
     VkPipelineCache pipelineCache,
-    const VkAllocationCallbacks* pAllocator)
+    const VkAllocationCallbacks* pAllocator,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkDestroyPipelineCache encode");
-    mImpl->log("start vkDestroyPipelineCache");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkPipelineCache local_pipelineCache;
     VkAllocationCallbacks* local_pAllocator;
@@ -4680,122 +5019,140 @@
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_319;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_319, 1);
-        countingStream->write((uint64_t*)&cgen_var_319, 1 * 8);
-        uint64_t cgen_var_320;
-        countingStream->handleMapping()->mapHandles_VkPipelineCache_u64(&local_pipelineCache, &cgen_var_320, 1);
-        countingStream->write((uint64_t*)&cgen_var_320, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_321 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_321);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
     }
-    uint32_t packetSize_vkDestroyPipelineCache = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkDestroyPipelineCache = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkDestroyPipelineCache);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkDestroyPipelineCache = OP_vkDestroyPipelineCache;
-    stream->write(&opcode_vkDestroyPipelineCache, sizeof(uint32_t));
-    stream->write(&packetSize_vkDestroyPipelineCache, sizeof(uint32_t));
-    uint64_t cgen_var_322;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_322, 1);
-    stream->write((uint64_t*)&cgen_var_322, 1 * 8);
-    uint64_t cgen_var_323;
-    stream->handleMapping()->mapHandles_VkPipelineCache_u64(&local_pipelineCache, &cgen_var_323, 1);
-    stream->write((uint64_t*)&cgen_var_323, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkDestroyPipelineCache, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkDestroyPipelineCache, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkPipelineCache((*&local_pipelineCache));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_324 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_324);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    AEMU_SCOPED_TRACE("vkDestroyPipelineCache readParams");
-    AEMU_SCOPED_TRACE("vkDestroyPipelineCache returnUnmarshal");
-    resources->destroyMapping()->mapHandles_VkPipelineCache((VkPipelineCache*)&pipelineCache);
-    mImpl->log("finish vkDestroyPipelineCache");;
+    sResourceTracker->destroyMapping()->mapHandles_VkPipelineCache((VkPipelineCache*)&pipelineCache);
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 VkResult VkEncoder::vkGetPipelineCacheData(
     VkDevice device,
     VkPipelineCache pipelineCache,
     size_t* pDataSize,
-    void* pData)
+    void* pData,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetPipelineCacheData encode");
-    mImpl->log("start vkGetPipelineCacheData");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkPipelineCache local_pipelineCache;
     local_device = device;
     local_pipelineCache = pipelineCache;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_325;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_325, 1);
-        countingStream->write((uint64_t*)&cgen_var_325, 1 * 8);
-        uint64_t cgen_var_326;
-        countingStream->handleMapping()->mapHandles_VkPipelineCache_u64(&local_pipelineCache, &cgen_var_326, 1);
-        countingStream->write((uint64_t*)&cgen_var_326, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_327 = (uint64_t)(uintptr_t)pDataSize;
-        countingStream->putBe64(cgen_var_327);
+        *countPtr += 8;
         if (pDataSize)
         {
-            uint64_t cgen_var_328 = (uint64_t)(*pDataSize);
-            countingStream->putBe64(cgen_var_328);
+            *countPtr += 8;
         }
         // WARNING PTR CHECK
-        uint64_t cgen_var_329 = (uint64_t)(uintptr_t)pData;
-        countingStream->putBe64(cgen_var_329);
+        *countPtr += 8;
         if (pData)
         {
-            countingStream->write((void*)pData, (*(pDataSize)) * sizeof(uint8_t));
+            if (pDataSize)
+            {
+                *countPtr += (*(pDataSize)) * sizeof(uint8_t);
+            }
         }
     }
-    uint32_t packetSize_vkGetPipelineCacheData = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetPipelineCacheData = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPipelineCacheData);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetPipelineCacheData = OP_vkGetPipelineCacheData;
-    stream->write(&opcode_vkGetPipelineCacheData, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetPipelineCacheData, sizeof(uint32_t));
-    uint64_t cgen_var_330;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_330, 1);
-    stream->write((uint64_t*)&cgen_var_330, 1 * 8);
-    uint64_t cgen_var_331;
-    stream->handleMapping()->mapHandles_VkPipelineCache_u64(&local_pipelineCache, &cgen_var_331, 1);
-    stream->write((uint64_t*)&cgen_var_331, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPipelineCacheData, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPipelineCacheData, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkPipelineCache((*&local_pipelineCache));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_332 = (uint64_t)(uintptr_t)pDataSize;
-    stream->putBe64(cgen_var_332);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)pDataSize;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pDataSize)
     {
-        uint64_t cgen_var_333 = (uint64_t)(*pDataSize);
-        stream->putBe64(cgen_var_333);
+        uint64_t cgen_var_2_0 = (uint64_t)(*pDataSize);
+        memcpy((*streamPtrPtr), &cgen_var_2_0, 8);
+        android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+        *streamPtrPtr += 8;
     }
     // WARNING PTR CHECK
-    uint64_t cgen_var_334 = (uint64_t)(uintptr_t)pData;
-    stream->putBe64(cgen_var_334);
+    uint64_t cgen_var_3 = (uint64_t)(uintptr_t)pData;
+    memcpy((*streamPtrPtr), &cgen_var_3, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pData)
     {
-        stream->write((void*)pData, (*(pDataSize)) * sizeof(uint8_t));
+        memcpy(*streamPtrPtr, (void*)pData, (*(pDataSize)) * sizeof(uint8_t));
+        *streamPtrPtr += (*(pDataSize)) * sizeof(uint8_t);
     }
-    AEMU_SCOPED_TRACE("vkGetPipelineCacheData readParams");
     // WARNING PTR CHECK
     size_t* check_pDataSize;
     check_pDataSize = (size_t*)(uintptr_t)stream->getBe64();
@@ -4818,13 +5175,15 @@
         }
         stream->read((void*)pData, (*(pDataSize)) * sizeof(uint8_t));
     }
-    AEMU_SCOPED_TRACE("vkGetPipelineCacheData returnUnmarshal");
     VkResult vkGetPipelineCacheData_VkResult_return = (VkResult)0;
     stream->read(&vkGetPipelineCacheData_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetPipelineCacheData");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetPipelineCacheData_VkResult_return;
 }
 
@@ -4832,16 +5191,14 @@
     VkDevice device,
     VkPipelineCache dstCache,
     uint32_t srcCacheCount,
-    const VkPipelineCache* pSrcCaches)
+    const VkPipelineCache* pSrcCaches,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkMergePipelineCaches encode");
-    mImpl->log("start vkMergePipelineCaches");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkPipelineCache local_dstCache;
     uint32_t local_srcCacheCount;
@@ -4849,55 +5206,58 @@
     local_device = device;
     local_dstCache = dstCache;
     local_srcCacheCount = srcCacheCount;
-    local_pSrcCaches = nullptr;
-    if (pSrcCaches)
+    // Avoiding deepcopy for pSrcCaches
+    local_pSrcCaches = (VkPipelineCache*)pSrcCaches;
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        local_pSrcCaches = (VkPipelineCache*)pool->dupArray(pSrcCaches, ((srcCacheCount)) * sizeof(const VkPipelineCache));
-    }
-    countingStream->rewind();
-    {
-        uint64_t cgen_var_338;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_338, 1);
-        countingStream->write((uint64_t*)&cgen_var_338, 1 * 8);
-        uint64_t cgen_var_339;
-        countingStream->handleMapping()->mapHandles_VkPipelineCache_u64(&local_dstCache, &cgen_var_339, 1);
-        countingStream->write((uint64_t*)&cgen_var_339, 1 * 8);
-        countingStream->write((uint32_t*)&local_srcCacheCount, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
         if (((srcCacheCount)))
         {
-            uint64_t* cgen_var_340;
-            countingStream->alloc((void**)&cgen_var_340, ((srcCacheCount)) * 8);
-            countingStream->handleMapping()->mapHandles_VkPipelineCache_u64(local_pSrcCaches, cgen_var_340, ((srcCacheCount)));
-            countingStream->write((uint64_t*)cgen_var_340, ((srcCacheCount)) * 8);
+            *countPtr += ((srcCacheCount)) * 8;
         }
     }
-    uint32_t packetSize_vkMergePipelineCaches = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkMergePipelineCaches = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkMergePipelineCaches);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkMergePipelineCaches = OP_vkMergePipelineCaches;
-    stream->write(&opcode_vkMergePipelineCaches, sizeof(uint32_t));
-    stream->write(&packetSize_vkMergePipelineCaches, sizeof(uint32_t));
-    uint64_t cgen_var_341;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_341, 1);
-    stream->write((uint64_t*)&cgen_var_341, 1 * 8);
-    uint64_t cgen_var_342;
-    stream->handleMapping()->mapHandles_VkPipelineCache_u64(&local_dstCache, &cgen_var_342, 1);
-    stream->write((uint64_t*)&cgen_var_342, 1 * 8);
-    stream->write((uint32_t*)&local_srcCacheCount, sizeof(uint32_t));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkMergePipelineCaches, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkMergePipelineCaches, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkPipelineCache((*&local_dstCache));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_srcCacheCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
     if (((srcCacheCount)))
     {
-        uint64_t* cgen_var_343;
-        stream->alloc((void**)&cgen_var_343, ((srcCacheCount)) * 8);
-        stream->handleMapping()->mapHandles_VkPipelineCache_u64(local_pSrcCaches, cgen_var_343, ((srcCacheCount)));
-        stream->write((uint64_t*)cgen_var_343, ((srcCacheCount)) * 8);
+        uint8_t* cgen_var_2_ptr = (uint8_t*)(*streamPtrPtr);
+        for (uint32_t k = 0; k < ((srcCacheCount)); ++k)
+        {
+            uint64_t tmpval = get_host_u64_VkPipelineCache(local_pSrcCaches[k]);
+            memcpy(cgen_var_2_ptr + k * 8, &tmpval, sizeof(uint64_t));
+        }
+        *streamPtrPtr += 8 * ((srcCacheCount));
     }
-    AEMU_SCOPED_TRACE("vkMergePipelineCaches readParams");
-    AEMU_SCOPED_TRACE("vkMergePipelineCaches returnUnmarshal");
     VkResult vkMergePipelineCaches_VkResult_return = (VkResult)0;
     stream->read(&vkMergePipelineCaches_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkMergePipelineCaches");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkMergePipelineCaches_VkResult_return;
 }
 
@@ -4907,16 +5267,14 @@
     uint32_t createInfoCount,
     const VkGraphicsPipelineCreateInfo* pCreateInfos,
     const VkAllocationCallbacks* pAllocator,
-    VkPipeline* pPipelines)
+    VkPipeline* pPipelines,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCreateGraphicsPipelines encode");
-    mImpl->log("start vkCreateGraphicsPipelines");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkPipelineCache local_pipelineCache;
     uint32_t local_createInfoCount;
@@ -4931,104 +5289,111 @@
         local_pCreateInfos = (VkGraphicsPipelineCreateInfo*)pool->alloc(((createInfoCount)) * sizeof(const VkGraphicsPipelineCreateInfo));
         for (uint32_t i = 0; i < (uint32_t)((createInfoCount)); ++i)
         {
-            deepcopy_VkGraphicsPipelineCreateInfo(pool, pCreateInfos + i, (VkGraphicsPipelineCreateInfo*)(local_pCreateInfos + i));
+            deepcopy_VkGraphicsPipelineCreateInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfos + i, (VkGraphicsPipelineCreateInfo*)(local_pCreateInfos + i));
         }
     }
     local_pAllocator = nullptr;
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pCreateInfos)
     {
         for (uint32_t i = 0; i < (uint32_t)((createInfoCount)); ++i)
         {
-            transform_tohost_VkGraphicsPipelineCreateInfo(mImpl->resources(), (VkGraphicsPipelineCreateInfo*)(local_pCreateInfos + i));
+            transform_tohost_VkGraphicsPipelineCreateInfo(sResourceTracker, (VkGraphicsPipelineCreateInfo*)(local_pCreateInfos + i));
         }
     }
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_344;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_344, 1);
-        countingStream->write((uint64_t*)&cgen_var_344, 1 * 8);
-        uint64_t cgen_var_345;
-        countingStream->handleMapping()->mapHandles_VkPipelineCache_u64(&local_pipelineCache, &cgen_var_345, 1);
-        countingStream->write((uint64_t*)&cgen_var_345, 1 * 8);
-        countingStream->write((uint32_t*)&local_createInfoCount, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
         for (uint32_t i = 0; i < (uint32_t)((createInfoCount)); ++i)
         {
-            marshal_VkGraphicsPipelineCreateInfo(countingStream, (VkGraphicsPipelineCreateInfo*)(local_pCreateInfos + i));
+            count_VkGraphicsPipelineCreateInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkGraphicsPipelineCreateInfo*)(local_pCreateInfos + i), countPtr);
         }
         // WARNING PTR CHECK
-        uint64_t cgen_var_346 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_346);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
         if (((createInfoCount)))
         {
-            uint64_t* cgen_var_347;
-            countingStream->alloc((void**)&cgen_var_347, ((createInfoCount)) * 8);
-            countingStream->handleMapping()->mapHandles_VkPipeline_u64(pPipelines, cgen_var_347, ((createInfoCount)));
-            countingStream->write((uint64_t*)cgen_var_347, ((createInfoCount)) * 8);
+            *countPtr += ((createInfoCount)) * 8;
         }
     }
-    uint32_t packetSize_vkCreateGraphicsPipelines = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCreateGraphicsPipelines = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreateGraphicsPipelines);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCreateGraphicsPipelines = OP_vkCreateGraphicsPipelines;
-    stream->write(&opcode_vkCreateGraphicsPipelines, sizeof(uint32_t));
-    stream->write(&packetSize_vkCreateGraphicsPipelines, sizeof(uint32_t));
-    uint64_t cgen_var_348;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_348, 1);
-    stream->write((uint64_t*)&cgen_var_348, 1 * 8);
-    uint64_t cgen_var_349;
-    stream->handleMapping()->mapHandles_VkPipelineCache_u64(&local_pipelineCache, &cgen_var_349, 1);
-    stream->write((uint64_t*)&cgen_var_349, 1 * 8);
-    stream->write((uint32_t*)&local_createInfoCount, sizeof(uint32_t));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreateGraphicsPipelines, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreateGraphicsPipelines, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkPipelineCache((*&local_pipelineCache));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_createInfoCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
     for (uint32_t i = 0; i < (uint32_t)((createInfoCount)); ++i)
     {
-        marshal_VkGraphicsPipelineCreateInfo(stream, (VkGraphicsPipelineCreateInfo*)(local_pCreateInfos + i));
+        reservedmarshal_VkGraphicsPipelineCreateInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkGraphicsPipelineCreateInfo*)(local_pCreateInfos + i), streamPtrPtr);
     }
     // WARNING PTR CHECK
-    uint64_t cgen_var_350 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_350);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
+    /* is handle, possibly out */;
     if (((createInfoCount)))
     {
-        uint64_t* cgen_var_351;
-        stream->alloc((void**)&cgen_var_351, ((createInfoCount)) * 8);
-        stream->handleMapping()->mapHandles_VkPipeline_u64(pPipelines, cgen_var_351, ((createInfoCount)));
-        stream->write((uint64_t*)cgen_var_351, ((createInfoCount)) * 8);
+        uint8_t* cgen_var_3_ptr = (uint8_t*)(*streamPtrPtr);
+        for (uint32_t k = 0; k < ((createInfoCount)); ++k)
+        {
+            uint64_t tmpval = (uint64_t)(pPipelines[k]);
+            memcpy(cgen_var_3_ptr + k * 8, &tmpval, sizeof(uint64_t));
+        }
+        *streamPtrPtr += 8 * ((createInfoCount));
     }
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkCreateGraphicsPipelines readParams");
-    stream->setHandleMapping(resources->createMapping());
+    /* is handle, possibly out */;
+    stream->setHandleMapping(sResourceTracker->createMapping());
     if (((createInfoCount)))
     {
-        uint64_t* cgen_var_352;
-        stream->alloc((void**)&cgen_var_352, ((createInfoCount)) * 8);
-        stream->read((uint64_t*)cgen_var_352, ((createInfoCount)) * 8);
-        stream->handleMapping()->mapHandles_u64_VkPipeline(cgen_var_352, (VkPipeline*)pPipelines, ((createInfoCount)));
+        uint64_t* cgen_var_4;
+        stream->alloc((void**)&cgen_var_4, ((createInfoCount)) * 8);
+        stream->read((uint64_t*)cgen_var_4, ((createInfoCount)) * 8);
+        stream->handleMapping()->mapHandles_u64_VkPipeline(cgen_var_4, (VkPipeline*)pPipelines, ((createInfoCount)));
     }
     stream->unsetHandleMapping();
-    AEMU_SCOPED_TRACE("vkCreateGraphicsPipelines returnUnmarshal");
     VkResult vkCreateGraphicsPipelines_VkResult_return = (VkResult)0;
     stream->read(&vkCreateGraphicsPipelines_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkCreateGraphicsPipelines");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkCreateGraphicsPipelines_VkResult_return;
 }
 
@@ -5038,16 +5403,14 @@
     uint32_t createInfoCount,
     const VkComputePipelineCreateInfo* pCreateInfos,
     const VkAllocationCallbacks* pAllocator,
-    VkPipeline* pPipelines)
+    VkPipeline* pPipelines,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCreateComputePipelines encode");
-    mImpl->log("start vkCreateComputePipelines");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkPipelineCache local_pipelineCache;
     uint32_t local_createInfoCount;
@@ -5062,120 +5425,125 @@
         local_pCreateInfos = (VkComputePipelineCreateInfo*)pool->alloc(((createInfoCount)) * sizeof(const VkComputePipelineCreateInfo));
         for (uint32_t i = 0; i < (uint32_t)((createInfoCount)); ++i)
         {
-            deepcopy_VkComputePipelineCreateInfo(pool, pCreateInfos + i, (VkComputePipelineCreateInfo*)(local_pCreateInfos + i));
+            deepcopy_VkComputePipelineCreateInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfos + i, (VkComputePipelineCreateInfo*)(local_pCreateInfos + i));
         }
     }
     local_pAllocator = nullptr;
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pCreateInfos)
     {
         for (uint32_t i = 0; i < (uint32_t)((createInfoCount)); ++i)
         {
-            transform_tohost_VkComputePipelineCreateInfo(mImpl->resources(), (VkComputePipelineCreateInfo*)(local_pCreateInfos + i));
+            transform_tohost_VkComputePipelineCreateInfo(sResourceTracker, (VkComputePipelineCreateInfo*)(local_pCreateInfos + i));
         }
     }
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_353;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_353, 1);
-        countingStream->write((uint64_t*)&cgen_var_353, 1 * 8);
-        uint64_t cgen_var_354;
-        countingStream->handleMapping()->mapHandles_VkPipelineCache_u64(&local_pipelineCache, &cgen_var_354, 1);
-        countingStream->write((uint64_t*)&cgen_var_354, 1 * 8);
-        countingStream->write((uint32_t*)&local_createInfoCount, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
         for (uint32_t i = 0; i < (uint32_t)((createInfoCount)); ++i)
         {
-            marshal_VkComputePipelineCreateInfo(countingStream, (VkComputePipelineCreateInfo*)(local_pCreateInfos + i));
+            count_VkComputePipelineCreateInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkComputePipelineCreateInfo*)(local_pCreateInfos + i), countPtr);
         }
         // WARNING PTR CHECK
-        uint64_t cgen_var_355 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_355);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
         if (((createInfoCount)))
         {
-            uint64_t* cgen_var_356;
-            countingStream->alloc((void**)&cgen_var_356, ((createInfoCount)) * 8);
-            countingStream->handleMapping()->mapHandles_VkPipeline_u64(pPipelines, cgen_var_356, ((createInfoCount)));
-            countingStream->write((uint64_t*)cgen_var_356, ((createInfoCount)) * 8);
+            *countPtr += ((createInfoCount)) * 8;
         }
     }
-    uint32_t packetSize_vkCreateComputePipelines = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCreateComputePipelines = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreateComputePipelines);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCreateComputePipelines = OP_vkCreateComputePipelines;
-    stream->write(&opcode_vkCreateComputePipelines, sizeof(uint32_t));
-    stream->write(&packetSize_vkCreateComputePipelines, sizeof(uint32_t));
-    uint64_t cgen_var_357;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_357, 1);
-    stream->write((uint64_t*)&cgen_var_357, 1 * 8);
-    uint64_t cgen_var_358;
-    stream->handleMapping()->mapHandles_VkPipelineCache_u64(&local_pipelineCache, &cgen_var_358, 1);
-    stream->write((uint64_t*)&cgen_var_358, 1 * 8);
-    stream->write((uint32_t*)&local_createInfoCount, sizeof(uint32_t));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreateComputePipelines, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreateComputePipelines, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkPipelineCache((*&local_pipelineCache));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_createInfoCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
     for (uint32_t i = 0; i < (uint32_t)((createInfoCount)); ++i)
     {
-        marshal_VkComputePipelineCreateInfo(stream, (VkComputePipelineCreateInfo*)(local_pCreateInfos + i));
+        reservedmarshal_VkComputePipelineCreateInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkComputePipelineCreateInfo*)(local_pCreateInfos + i), streamPtrPtr);
     }
     // WARNING PTR CHECK
-    uint64_t cgen_var_359 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_359);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
+    /* is handle, possibly out */;
     if (((createInfoCount)))
     {
-        uint64_t* cgen_var_360;
-        stream->alloc((void**)&cgen_var_360, ((createInfoCount)) * 8);
-        stream->handleMapping()->mapHandles_VkPipeline_u64(pPipelines, cgen_var_360, ((createInfoCount)));
-        stream->write((uint64_t*)cgen_var_360, ((createInfoCount)) * 8);
+        uint8_t* cgen_var_3_ptr = (uint8_t*)(*streamPtrPtr);
+        for (uint32_t k = 0; k < ((createInfoCount)); ++k)
+        {
+            uint64_t tmpval = (uint64_t)(pPipelines[k]);
+            memcpy(cgen_var_3_ptr + k * 8, &tmpval, sizeof(uint64_t));
+        }
+        *streamPtrPtr += 8 * ((createInfoCount));
     }
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkCreateComputePipelines readParams");
-    stream->setHandleMapping(resources->createMapping());
+    /* is handle, possibly out */;
+    stream->setHandleMapping(sResourceTracker->createMapping());
     if (((createInfoCount)))
     {
-        uint64_t* cgen_var_361;
-        stream->alloc((void**)&cgen_var_361, ((createInfoCount)) * 8);
-        stream->read((uint64_t*)cgen_var_361, ((createInfoCount)) * 8);
-        stream->handleMapping()->mapHandles_u64_VkPipeline(cgen_var_361, (VkPipeline*)pPipelines, ((createInfoCount)));
+        uint64_t* cgen_var_4;
+        stream->alloc((void**)&cgen_var_4, ((createInfoCount)) * 8);
+        stream->read((uint64_t*)cgen_var_4, ((createInfoCount)) * 8);
+        stream->handleMapping()->mapHandles_u64_VkPipeline(cgen_var_4, (VkPipeline*)pPipelines, ((createInfoCount)));
     }
     stream->unsetHandleMapping();
-    AEMU_SCOPED_TRACE("vkCreateComputePipelines returnUnmarshal");
     VkResult vkCreateComputePipelines_VkResult_return = (VkResult)0;
     stream->read(&vkCreateComputePipelines_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkCreateComputePipelines");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkCreateComputePipelines_VkResult_return;
 }
 
 void VkEncoder::vkDestroyPipeline(
     VkDevice device,
     VkPipeline pipeline,
-    const VkAllocationCallbacks* pAllocator)
+    const VkAllocationCallbacks* pAllocator,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkDestroyPipeline encode");
-    mImpl->log("start vkDestroyPipeline");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkPipeline local_pipeline;
     VkAllocationCallbacks* local_pAllocator;
@@ -5185,67 +5553,75 @@
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_362;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_362, 1);
-        countingStream->write((uint64_t*)&cgen_var_362, 1 * 8);
-        uint64_t cgen_var_363;
-        countingStream->handleMapping()->mapHandles_VkPipeline_u64(&local_pipeline, &cgen_var_363, 1);
-        countingStream->write((uint64_t*)&cgen_var_363, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_364 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_364);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
     }
-    uint32_t packetSize_vkDestroyPipeline = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkDestroyPipeline = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkDestroyPipeline);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkDestroyPipeline = OP_vkDestroyPipeline;
-    stream->write(&opcode_vkDestroyPipeline, sizeof(uint32_t));
-    stream->write(&packetSize_vkDestroyPipeline, sizeof(uint32_t));
-    uint64_t cgen_var_365;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_365, 1);
-    stream->write((uint64_t*)&cgen_var_365, 1 * 8);
-    uint64_t cgen_var_366;
-    stream->handleMapping()->mapHandles_VkPipeline_u64(&local_pipeline, &cgen_var_366, 1);
-    stream->write((uint64_t*)&cgen_var_366, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkDestroyPipeline, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkDestroyPipeline, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkPipeline((*&local_pipeline));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_367 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_367);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    AEMU_SCOPED_TRACE("vkDestroyPipeline readParams");
-    AEMU_SCOPED_TRACE("vkDestroyPipeline returnUnmarshal");
-    resources->destroyMapping()->mapHandles_VkPipeline((VkPipeline*)&pipeline);
-    mImpl->log("finish vkDestroyPipeline");;
+    sResourceTracker->destroyMapping()->mapHandles_VkPipeline((VkPipeline*)&pipeline);
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 VkResult VkEncoder::vkCreatePipelineLayout(
     VkDevice device,
     const VkPipelineLayoutCreateInfo* pCreateInfo,
     const VkAllocationCallbacks* pAllocator,
-    VkPipelineLayout* pPipelineLayout)
+    VkPipelineLayout* pPipelineLayout,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCreatePipelineLayout encode");
-    mImpl->log("start vkCreatePipelineLayout");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkPipelineLayoutCreateInfo* local_pCreateInfo;
     VkAllocationCallbacks* local_pAllocator;
@@ -5254,90 +5630,94 @@
     if (pCreateInfo)
     {
         local_pCreateInfo = (VkPipelineLayoutCreateInfo*)pool->alloc(sizeof(const VkPipelineLayoutCreateInfo));
-        deepcopy_VkPipelineLayoutCreateInfo(pool, pCreateInfo, (VkPipelineLayoutCreateInfo*)(local_pCreateInfo));
+        deepcopy_VkPipelineLayoutCreateInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, (VkPipelineLayoutCreateInfo*)(local_pCreateInfo));
     }
     local_pAllocator = nullptr;
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pCreateInfo)
     {
-        transform_tohost_VkPipelineLayoutCreateInfo(mImpl->resources(), (VkPipelineLayoutCreateInfo*)(local_pCreateInfo));
+        transform_tohost_VkPipelineLayoutCreateInfo(sResourceTracker, (VkPipelineLayoutCreateInfo*)(local_pCreateInfo));
     }
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_368;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_368, 1);
-        countingStream->write((uint64_t*)&cgen_var_368, 1 * 8);
-        marshal_VkPipelineLayoutCreateInfo(countingStream, (VkPipelineLayoutCreateInfo*)(local_pCreateInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkPipelineLayoutCreateInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPipelineLayoutCreateInfo*)(local_pCreateInfo), countPtr);
         // WARNING PTR CHECK
-        uint64_t cgen_var_369 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_369);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
-        uint64_t cgen_var_370;
-        countingStream->handleMapping()->mapHandles_VkPipelineLayout_u64(pPipelineLayout, &cgen_var_370, 1);
-        countingStream->write((uint64_t*)&cgen_var_370, 8);
+        uint64_t cgen_var_1;
+        *countPtr += 8;
     }
-    uint32_t packetSize_vkCreatePipelineLayout = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCreatePipelineLayout = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreatePipelineLayout);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCreatePipelineLayout = OP_vkCreatePipelineLayout;
-    stream->write(&opcode_vkCreatePipelineLayout, sizeof(uint32_t));
-    stream->write(&packetSize_vkCreatePipelineLayout, sizeof(uint32_t));
-    uint64_t cgen_var_371;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_371, 1);
-    stream->write((uint64_t*)&cgen_var_371, 1 * 8);
-    marshal_VkPipelineLayoutCreateInfo(stream, (VkPipelineLayoutCreateInfo*)(local_pCreateInfo));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreatePipelineLayout, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreatePipelineLayout, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkPipelineLayoutCreateInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPipelineLayoutCreateInfo*)(local_pCreateInfo), streamPtrPtr);
     // WARNING PTR CHECK
-    uint64_t cgen_var_372 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_372);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
-    uint64_t cgen_var_373;
-    stream->handleMapping()->mapHandles_VkPipelineLayout_u64(pPipelineLayout, &cgen_var_373, 1);
-    stream->write((uint64_t*)&cgen_var_373, 8);
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkCreatePipelineLayout readParams");
-    stream->setHandleMapping(resources->createMapping());
-    uint64_t cgen_var_374;
-    stream->read((uint64_t*)&cgen_var_374, 8);
-    stream->handleMapping()->mapHandles_u64_VkPipelineLayout(&cgen_var_374, (VkPipelineLayout*)pPipelineLayout, 1);
+    /* is handle, possibly out */;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = (uint64_t)((*pPipelineLayout));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    stream->setHandleMapping(sResourceTracker->createMapping());
+    uint64_t cgen_var_3;
+    stream->read((uint64_t*)&cgen_var_3, 8);
+    stream->handleMapping()->mapHandles_u64_VkPipelineLayout(&cgen_var_3, (VkPipelineLayout*)pPipelineLayout, 1);
     stream->unsetHandleMapping();
-    AEMU_SCOPED_TRACE("vkCreatePipelineLayout returnUnmarshal");
     VkResult vkCreatePipelineLayout_VkResult_return = (VkResult)0;
     stream->read(&vkCreatePipelineLayout_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkCreatePipelineLayout");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkCreatePipelineLayout_VkResult_return;
 }
 
 void VkEncoder::vkDestroyPipelineLayout(
     VkDevice device,
     VkPipelineLayout pipelineLayout,
-    const VkAllocationCallbacks* pAllocator)
+    const VkAllocationCallbacks* pAllocator,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkDestroyPipelineLayout encode");
-    mImpl->log("start vkDestroyPipelineLayout");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkPipelineLayout local_pipelineLayout;
     VkAllocationCallbacks* local_pAllocator;
@@ -5347,67 +5727,75 @@
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_375;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_375, 1);
-        countingStream->write((uint64_t*)&cgen_var_375, 1 * 8);
-        uint64_t cgen_var_376;
-        countingStream->handleMapping()->mapHandles_VkPipelineLayout_u64(&local_pipelineLayout, &cgen_var_376, 1);
-        countingStream->write((uint64_t*)&cgen_var_376, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_377 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_377);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
     }
-    uint32_t packetSize_vkDestroyPipelineLayout = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkDestroyPipelineLayout = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkDestroyPipelineLayout);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkDestroyPipelineLayout = OP_vkDestroyPipelineLayout;
-    stream->write(&opcode_vkDestroyPipelineLayout, sizeof(uint32_t));
-    stream->write(&packetSize_vkDestroyPipelineLayout, sizeof(uint32_t));
-    uint64_t cgen_var_378;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_378, 1);
-    stream->write((uint64_t*)&cgen_var_378, 1 * 8);
-    uint64_t cgen_var_379;
-    stream->handleMapping()->mapHandles_VkPipelineLayout_u64(&local_pipelineLayout, &cgen_var_379, 1);
-    stream->write((uint64_t*)&cgen_var_379, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkDestroyPipelineLayout, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkDestroyPipelineLayout, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkPipelineLayout((*&local_pipelineLayout));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_380 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_380);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    AEMU_SCOPED_TRACE("vkDestroyPipelineLayout readParams");
-    AEMU_SCOPED_TRACE("vkDestroyPipelineLayout returnUnmarshal");
-    resources->destroyMapping()->mapHandles_VkPipelineLayout((VkPipelineLayout*)&pipelineLayout);
-    mImpl->log("finish vkDestroyPipelineLayout");;
+    sResourceTracker->destroyMapping()->mapHandles_VkPipelineLayout((VkPipelineLayout*)&pipelineLayout);
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 VkResult VkEncoder::vkCreateSampler(
     VkDevice device,
     const VkSamplerCreateInfo* pCreateInfo,
     const VkAllocationCallbacks* pAllocator,
-    VkSampler* pSampler)
+    VkSampler* pSampler,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCreateSampler encode");
-    mImpl->log("start vkCreateSampler");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkSamplerCreateInfo* local_pCreateInfo;
     VkAllocationCallbacks* local_pAllocator;
@@ -5416,90 +5804,94 @@
     if (pCreateInfo)
     {
         local_pCreateInfo = (VkSamplerCreateInfo*)pool->alloc(sizeof(const VkSamplerCreateInfo));
-        deepcopy_VkSamplerCreateInfo(pool, pCreateInfo, (VkSamplerCreateInfo*)(local_pCreateInfo));
+        deepcopy_VkSamplerCreateInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, (VkSamplerCreateInfo*)(local_pCreateInfo));
     }
     local_pAllocator = nullptr;
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pCreateInfo)
     {
-        transform_tohost_VkSamplerCreateInfo(mImpl->resources(), (VkSamplerCreateInfo*)(local_pCreateInfo));
+        transform_tohost_VkSamplerCreateInfo(sResourceTracker, (VkSamplerCreateInfo*)(local_pCreateInfo));
     }
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_381;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_381, 1);
-        countingStream->write((uint64_t*)&cgen_var_381, 1 * 8);
-        marshal_VkSamplerCreateInfo(countingStream, (VkSamplerCreateInfo*)(local_pCreateInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkSamplerCreateInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSamplerCreateInfo*)(local_pCreateInfo), countPtr);
         // WARNING PTR CHECK
-        uint64_t cgen_var_382 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_382);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
-        uint64_t cgen_var_383;
-        countingStream->handleMapping()->mapHandles_VkSampler_u64(pSampler, &cgen_var_383, 1);
-        countingStream->write((uint64_t*)&cgen_var_383, 8);
+        uint64_t cgen_var_1;
+        *countPtr += 8;
     }
-    uint32_t packetSize_vkCreateSampler = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCreateSampler = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreateSampler);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCreateSampler = OP_vkCreateSampler;
-    stream->write(&opcode_vkCreateSampler, sizeof(uint32_t));
-    stream->write(&packetSize_vkCreateSampler, sizeof(uint32_t));
-    uint64_t cgen_var_384;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_384, 1);
-    stream->write((uint64_t*)&cgen_var_384, 1 * 8);
-    marshal_VkSamplerCreateInfo(stream, (VkSamplerCreateInfo*)(local_pCreateInfo));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreateSampler, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreateSampler, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkSamplerCreateInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSamplerCreateInfo*)(local_pCreateInfo), streamPtrPtr);
     // WARNING PTR CHECK
-    uint64_t cgen_var_385 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_385);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
-    uint64_t cgen_var_386;
-    stream->handleMapping()->mapHandles_VkSampler_u64(pSampler, &cgen_var_386, 1);
-    stream->write((uint64_t*)&cgen_var_386, 8);
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkCreateSampler readParams");
-    stream->setHandleMapping(resources->createMapping());
-    uint64_t cgen_var_387;
-    stream->read((uint64_t*)&cgen_var_387, 8);
-    stream->handleMapping()->mapHandles_u64_VkSampler(&cgen_var_387, (VkSampler*)pSampler, 1);
+    /* is handle, possibly out */;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = (uint64_t)((*pSampler));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    stream->setHandleMapping(sResourceTracker->createMapping());
+    uint64_t cgen_var_3;
+    stream->read((uint64_t*)&cgen_var_3, 8);
+    stream->handleMapping()->mapHandles_u64_VkSampler(&cgen_var_3, (VkSampler*)pSampler, 1);
     stream->unsetHandleMapping();
-    AEMU_SCOPED_TRACE("vkCreateSampler returnUnmarshal");
     VkResult vkCreateSampler_VkResult_return = (VkResult)0;
     stream->read(&vkCreateSampler_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkCreateSampler");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkCreateSampler_VkResult_return;
 }
 
 void VkEncoder::vkDestroySampler(
     VkDevice device,
     VkSampler sampler,
-    const VkAllocationCallbacks* pAllocator)
+    const VkAllocationCallbacks* pAllocator,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkDestroySampler encode");
-    mImpl->log("start vkDestroySampler");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkSampler local_sampler;
     VkAllocationCallbacks* local_pAllocator;
@@ -5509,67 +5901,75 @@
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_388;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_388, 1);
-        countingStream->write((uint64_t*)&cgen_var_388, 1 * 8);
-        uint64_t cgen_var_389;
-        countingStream->handleMapping()->mapHandles_VkSampler_u64(&local_sampler, &cgen_var_389, 1);
-        countingStream->write((uint64_t*)&cgen_var_389, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_390 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_390);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
     }
-    uint32_t packetSize_vkDestroySampler = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkDestroySampler = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkDestroySampler);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkDestroySampler = OP_vkDestroySampler;
-    stream->write(&opcode_vkDestroySampler, sizeof(uint32_t));
-    stream->write(&packetSize_vkDestroySampler, sizeof(uint32_t));
-    uint64_t cgen_var_391;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_391, 1);
-    stream->write((uint64_t*)&cgen_var_391, 1 * 8);
-    uint64_t cgen_var_392;
-    stream->handleMapping()->mapHandles_VkSampler_u64(&local_sampler, &cgen_var_392, 1);
-    stream->write((uint64_t*)&cgen_var_392, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkDestroySampler, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkDestroySampler, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkSampler((*&local_sampler));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_393 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_393);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    AEMU_SCOPED_TRACE("vkDestroySampler readParams");
-    AEMU_SCOPED_TRACE("vkDestroySampler returnUnmarshal");
-    resources->destroyMapping()->mapHandles_VkSampler((VkSampler*)&sampler);
-    mImpl->log("finish vkDestroySampler");;
+    sResourceTracker->destroyMapping()->mapHandles_VkSampler((VkSampler*)&sampler);
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 VkResult VkEncoder::vkCreateDescriptorSetLayout(
     VkDevice device,
     const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
     const VkAllocationCallbacks* pAllocator,
-    VkDescriptorSetLayout* pSetLayout)
+    VkDescriptorSetLayout* pSetLayout,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCreateDescriptorSetLayout encode");
-    mImpl->log("start vkCreateDescriptorSetLayout");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkDescriptorSetLayoutCreateInfo* local_pCreateInfo;
     VkAllocationCallbacks* local_pAllocator;
@@ -5578,90 +5978,94 @@
     if (pCreateInfo)
     {
         local_pCreateInfo = (VkDescriptorSetLayoutCreateInfo*)pool->alloc(sizeof(const VkDescriptorSetLayoutCreateInfo));
-        deepcopy_VkDescriptorSetLayoutCreateInfo(pool, pCreateInfo, (VkDescriptorSetLayoutCreateInfo*)(local_pCreateInfo));
+        deepcopy_VkDescriptorSetLayoutCreateInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, (VkDescriptorSetLayoutCreateInfo*)(local_pCreateInfo));
     }
     local_pAllocator = nullptr;
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pCreateInfo)
     {
-        transform_tohost_VkDescriptorSetLayoutCreateInfo(mImpl->resources(), (VkDescriptorSetLayoutCreateInfo*)(local_pCreateInfo));
+        transform_tohost_VkDescriptorSetLayoutCreateInfo(sResourceTracker, (VkDescriptorSetLayoutCreateInfo*)(local_pCreateInfo));
     }
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_394;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_394, 1);
-        countingStream->write((uint64_t*)&cgen_var_394, 1 * 8);
-        marshal_VkDescriptorSetLayoutCreateInfo(countingStream, (VkDescriptorSetLayoutCreateInfo*)(local_pCreateInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkDescriptorSetLayoutCreateInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDescriptorSetLayoutCreateInfo*)(local_pCreateInfo), countPtr);
         // WARNING PTR CHECK
-        uint64_t cgen_var_395 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_395);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
-        uint64_t cgen_var_396;
-        countingStream->handleMapping()->mapHandles_VkDescriptorSetLayout_u64(pSetLayout, &cgen_var_396, 1);
-        countingStream->write((uint64_t*)&cgen_var_396, 8);
+        uint64_t cgen_var_1;
+        *countPtr += 8;
     }
-    uint32_t packetSize_vkCreateDescriptorSetLayout = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCreateDescriptorSetLayout = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreateDescriptorSetLayout);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCreateDescriptorSetLayout = OP_vkCreateDescriptorSetLayout;
-    stream->write(&opcode_vkCreateDescriptorSetLayout, sizeof(uint32_t));
-    stream->write(&packetSize_vkCreateDescriptorSetLayout, sizeof(uint32_t));
-    uint64_t cgen_var_397;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_397, 1);
-    stream->write((uint64_t*)&cgen_var_397, 1 * 8);
-    marshal_VkDescriptorSetLayoutCreateInfo(stream, (VkDescriptorSetLayoutCreateInfo*)(local_pCreateInfo));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreateDescriptorSetLayout, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreateDescriptorSetLayout, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkDescriptorSetLayoutCreateInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDescriptorSetLayoutCreateInfo*)(local_pCreateInfo), streamPtrPtr);
     // WARNING PTR CHECK
-    uint64_t cgen_var_398 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_398);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
-    uint64_t cgen_var_399;
-    stream->handleMapping()->mapHandles_VkDescriptorSetLayout_u64(pSetLayout, &cgen_var_399, 1);
-    stream->write((uint64_t*)&cgen_var_399, 8);
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkCreateDescriptorSetLayout readParams");
-    stream->setHandleMapping(resources->createMapping());
-    uint64_t cgen_var_400;
-    stream->read((uint64_t*)&cgen_var_400, 8);
-    stream->handleMapping()->mapHandles_u64_VkDescriptorSetLayout(&cgen_var_400, (VkDescriptorSetLayout*)pSetLayout, 1);
+    /* is handle, possibly out */;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = (uint64_t)((*pSetLayout));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    stream->setHandleMapping(sResourceTracker->createMapping());
+    uint64_t cgen_var_3;
+    stream->read((uint64_t*)&cgen_var_3, 8);
+    stream->handleMapping()->mapHandles_u64_VkDescriptorSetLayout(&cgen_var_3, (VkDescriptorSetLayout*)pSetLayout, 1);
     stream->unsetHandleMapping();
-    AEMU_SCOPED_TRACE("vkCreateDescriptorSetLayout returnUnmarshal");
     VkResult vkCreateDescriptorSetLayout_VkResult_return = (VkResult)0;
     stream->read(&vkCreateDescriptorSetLayout_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkCreateDescriptorSetLayout");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkCreateDescriptorSetLayout_VkResult_return;
 }
 
 void VkEncoder::vkDestroyDescriptorSetLayout(
     VkDevice device,
     VkDescriptorSetLayout descriptorSetLayout,
-    const VkAllocationCallbacks* pAllocator)
+    const VkAllocationCallbacks* pAllocator,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkDestroyDescriptorSetLayout encode");
-    mImpl->log("start vkDestroyDescriptorSetLayout");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkDescriptorSetLayout local_descriptorSetLayout;
     VkAllocationCallbacks* local_pAllocator;
@@ -5671,67 +6075,75 @@
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_401;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_401, 1);
-        countingStream->write((uint64_t*)&cgen_var_401, 1 * 8);
-        uint64_t cgen_var_402;
-        countingStream->handleMapping()->mapHandles_VkDescriptorSetLayout_u64(&local_descriptorSetLayout, &cgen_var_402, 1);
-        countingStream->write((uint64_t*)&cgen_var_402, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_403 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_403);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
     }
-    uint32_t packetSize_vkDestroyDescriptorSetLayout = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkDestroyDescriptorSetLayout = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkDestroyDescriptorSetLayout);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkDestroyDescriptorSetLayout = OP_vkDestroyDescriptorSetLayout;
-    stream->write(&opcode_vkDestroyDescriptorSetLayout, sizeof(uint32_t));
-    stream->write(&packetSize_vkDestroyDescriptorSetLayout, sizeof(uint32_t));
-    uint64_t cgen_var_404;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_404, 1);
-    stream->write((uint64_t*)&cgen_var_404, 1 * 8);
-    uint64_t cgen_var_405;
-    stream->handleMapping()->mapHandles_VkDescriptorSetLayout_u64(&local_descriptorSetLayout, &cgen_var_405, 1);
-    stream->write((uint64_t*)&cgen_var_405, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkDestroyDescriptorSetLayout, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkDestroyDescriptorSetLayout, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkDescriptorSetLayout((*&local_descriptorSetLayout));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_406 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_406);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    AEMU_SCOPED_TRACE("vkDestroyDescriptorSetLayout readParams");
-    AEMU_SCOPED_TRACE("vkDestroyDescriptorSetLayout returnUnmarshal");
-    resources->destroyMapping()->mapHandles_VkDescriptorSetLayout((VkDescriptorSetLayout*)&descriptorSetLayout);
-    mImpl->log("finish vkDestroyDescriptorSetLayout");;
+    sResourceTracker->destroyMapping()->mapHandles_VkDescriptorSetLayout((VkDescriptorSetLayout*)&descriptorSetLayout);
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 VkResult VkEncoder::vkCreateDescriptorPool(
     VkDevice device,
     const VkDescriptorPoolCreateInfo* pCreateInfo,
     const VkAllocationCallbacks* pAllocator,
-    VkDescriptorPool* pDescriptorPool)
+    VkDescriptorPool* pDescriptorPool,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCreateDescriptorPool encode");
-    mImpl->log("start vkCreateDescriptorPool");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkDescriptorPoolCreateInfo* local_pCreateInfo;
     VkAllocationCallbacks* local_pAllocator;
@@ -5740,90 +6152,94 @@
     if (pCreateInfo)
     {
         local_pCreateInfo = (VkDescriptorPoolCreateInfo*)pool->alloc(sizeof(const VkDescriptorPoolCreateInfo));
-        deepcopy_VkDescriptorPoolCreateInfo(pool, pCreateInfo, (VkDescriptorPoolCreateInfo*)(local_pCreateInfo));
+        deepcopy_VkDescriptorPoolCreateInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, (VkDescriptorPoolCreateInfo*)(local_pCreateInfo));
     }
     local_pAllocator = nullptr;
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pCreateInfo)
     {
-        transform_tohost_VkDescriptorPoolCreateInfo(mImpl->resources(), (VkDescriptorPoolCreateInfo*)(local_pCreateInfo));
+        transform_tohost_VkDescriptorPoolCreateInfo(sResourceTracker, (VkDescriptorPoolCreateInfo*)(local_pCreateInfo));
     }
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_407;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_407, 1);
-        countingStream->write((uint64_t*)&cgen_var_407, 1 * 8);
-        marshal_VkDescriptorPoolCreateInfo(countingStream, (VkDescriptorPoolCreateInfo*)(local_pCreateInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkDescriptorPoolCreateInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDescriptorPoolCreateInfo*)(local_pCreateInfo), countPtr);
         // WARNING PTR CHECK
-        uint64_t cgen_var_408 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_408);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
-        uint64_t cgen_var_409;
-        countingStream->handleMapping()->mapHandles_VkDescriptorPool_u64(pDescriptorPool, &cgen_var_409, 1);
-        countingStream->write((uint64_t*)&cgen_var_409, 8);
+        uint64_t cgen_var_1;
+        *countPtr += 8;
     }
-    uint32_t packetSize_vkCreateDescriptorPool = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCreateDescriptorPool = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreateDescriptorPool);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCreateDescriptorPool = OP_vkCreateDescriptorPool;
-    stream->write(&opcode_vkCreateDescriptorPool, sizeof(uint32_t));
-    stream->write(&packetSize_vkCreateDescriptorPool, sizeof(uint32_t));
-    uint64_t cgen_var_410;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_410, 1);
-    stream->write((uint64_t*)&cgen_var_410, 1 * 8);
-    marshal_VkDescriptorPoolCreateInfo(stream, (VkDescriptorPoolCreateInfo*)(local_pCreateInfo));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreateDescriptorPool, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreateDescriptorPool, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkDescriptorPoolCreateInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDescriptorPoolCreateInfo*)(local_pCreateInfo), streamPtrPtr);
     // WARNING PTR CHECK
-    uint64_t cgen_var_411 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_411);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
-    uint64_t cgen_var_412;
-    stream->handleMapping()->mapHandles_VkDescriptorPool_u64(pDescriptorPool, &cgen_var_412, 1);
-    stream->write((uint64_t*)&cgen_var_412, 8);
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkCreateDescriptorPool readParams");
-    stream->setHandleMapping(resources->createMapping());
-    uint64_t cgen_var_413;
-    stream->read((uint64_t*)&cgen_var_413, 8);
-    stream->handleMapping()->mapHandles_u64_VkDescriptorPool(&cgen_var_413, (VkDescriptorPool*)pDescriptorPool, 1);
+    /* is handle, possibly out */;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = (uint64_t)((*pDescriptorPool));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    stream->setHandleMapping(sResourceTracker->createMapping());
+    uint64_t cgen_var_3;
+    stream->read((uint64_t*)&cgen_var_3, 8);
+    stream->handleMapping()->mapHandles_u64_VkDescriptorPool(&cgen_var_3, (VkDescriptorPool*)pDescriptorPool, 1);
     stream->unsetHandleMapping();
-    AEMU_SCOPED_TRACE("vkCreateDescriptorPool returnUnmarshal");
     VkResult vkCreateDescriptorPool_VkResult_return = (VkResult)0;
     stream->read(&vkCreateDescriptorPool_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkCreateDescriptorPool");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkCreateDescriptorPool_VkResult_return;
 }
 
 void VkEncoder::vkDestroyDescriptorPool(
     VkDevice device,
     VkDescriptorPool descriptorPool,
-    const VkAllocationCallbacks* pAllocator)
+    const VkAllocationCallbacks* pAllocator,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkDestroyDescriptorPool encode");
-    mImpl->log("start vkDestroyDescriptorPool");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkDescriptorPool local_descriptorPool;
     VkAllocationCallbacks* local_pAllocator;
@@ -5833,118 +6249,130 @@
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_414;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_414, 1);
-        countingStream->write((uint64_t*)&cgen_var_414, 1 * 8);
-        uint64_t cgen_var_415;
-        countingStream->handleMapping()->mapHandles_VkDescriptorPool_u64(&local_descriptorPool, &cgen_var_415, 1);
-        countingStream->write((uint64_t*)&cgen_var_415, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_416 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_416);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
     }
-    uint32_t packetSize_vkDestroyDescriptorPool = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkDestroyDescriptorPool = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkDestroyDescriptorPool);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkDestroyDescriptorPool = OP_vkDestroyDescriptorPool;
-    stream->write(&opcode_vkDestroyDescriptorPool, sizeof(uint32_t));
-    stream->write(&packetSize_vkDestroyDescriptorPool, sizeof(uint32_t));
-    uint64_t cgen_var_417;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_417, 1);
-    stream->write((uint64_t*)&cgen_var_417, 1 * 8);
-    uint64_t cgen_var_418;
-    stream->handleMapping()->mapHandles_VkDescriptorPool_u64(&local_descriptorPool, &cgen_var_418, 1);
-    stream->write((uint64_t*)&cgen_var_418, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkDestroyDescriptorPool, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkDestroyDescriptorPool, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkDescriptorPool((*&local_descriptorPool));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_419 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_419);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    AEMU_SCOPED_TRACE("vkDestroyDescriptorPool readParams");
-    AEMU_SCOPED_TRACE("vkDestroyDescriptorPool returnUnmarshal");
-    resources->destroyMapping()->mapHandles_VkDescriptorPool((VkDescriptorPool*)&descriptorPool);
-    mImpl->log("finish vkDestroyDescriptorPool");;
+    sResourceTracker->destroyMapping()->mapHandles_VkDescriptorPool((VkDescriptorPool*)&descriptorPool);
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 VkResult VkEncoder::vkResetDescriptorPool(
     VkDevice device,
     VkDescriptorPool descriptorPool,
-    VkDescriptorPoolResetFlags flags)
+    VkDescriptorPoolResetFlags flags,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkResetDescriptorPool encode");
-    mImpl->log("start vkResetDescriptorPool");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkDescriptorPool local_descriptorPool;
     VkDescriptorPoolResetFlags local_flags;
     local_device = device;
     local_descriptorPool = descriptorPool;
     local_flags = flags;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_420;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_420, 1);
-        countingStream->write((uint64_t*)&cgen_var_420, 1 * 8);
-        uint64_t cgen_var_421;
-        countingStream->handleMapping()->mapHandles_VkDescriptorPool_u64(&local_descriptorPool, &cgen_var_421, 1);
-        countingStream->write((uint64_t*)&cgen_var_421, 1 * 8);
-        countingStream->write((VkDescriptorPoolResetFlags*)&local_flags, sizeof(VkDescriptorPoolResetFlags));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkDescriptorPoolResetFlags);
     }
-    uint32_t packetSize_vkResetDescriptorPool = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkResetDescriptorPool = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkResetDescriptorPool);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkResetDescriptorPool = OP_vkResetDescriptorPool;
-    stream->write(&opcode_vkResetDescriptorPool, sizeof(uint32_t));
-    stream->write(&packetSize_vkResetDescriptorPool, sizeof(uint32_t));
-    uint64_t cgen_var_422;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_422, 1);
-    stream->write((uint64_t*)&cgen_var_422, 1 * 8);
-    uint64_t cgen_var_423;
-    stream->handleMapping()->mapHandles_VkDescriptorPool_u64(&local_descriptorPool, &cgen_var_423, 1);
-    stream->write((uint64_t*)&cgen_var_423, 1 * 8);
-    stream->write((VkDescriptorPoolResetFlags*)&local_flags, sizeof(VkDescriptorPoolResetFlags));
-    AEMU_SCOPED_TRACE("vkResetDescriptorPool readParams");
-    AEMU_SCOPED_TRACE("vkResetDescriptorPool returnUnmarshal");
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkResetDescriptorPool, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkResetDescriptorPool, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkDescriptorPool((*&local_descriptorPool));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkDescriptorPoolResetFlags*)&local_flags, sizeof(VkDescriptorPoolResetFlags));
+    *streamPtrPtr += sizeof(VkDescriptorPoolResetFlags);
     VkResult vkResetDescriptorPool_VkResult_return = (VkResult)0;
     stream->read(&vkResetDescriptorPool_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkResetDescriptorPool");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkResetDescriptorPool_VkResult_return;
 }
 
 VkResult VkEncoder::vkAllocateDescriptorSets(
     VkDevice device,
     const VkDescriptorSetAllocateInfo* pAllocateInfo,
-    VkDescriptorSet* pDescriptorSets)
+    VkDescriptorSet* pDescriptorSets,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkAllocateDescriptorSets encode");
-    mImpl->log("start vkAllocateDescriptorSets");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkDescriptorSetAllocateInfo* local_pAllocateInfo;
     local_device = device;
@@ -5952,61 +6380,66 @@
     if (pAllocateInfo)
     {
         local_pAllocateInfo = (VkDescriptorSetAllocateInfo*)pool->alloc(sizeof(const VkDescriptorSetAllocateInfo));
-        deepcopy_VkDescriptorSetAllocateInfo(pool, pAllocateInfo, (VkDescriptorSetAllocateInfo*)(local_pAllocateInfo));
+        deepcopy_VkDescriptorSetAllocateInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocateInfo, (VkDescriptorSetAllocateInfo*)(local_pAllocateInfo));
     }
     if (local_pAllocateInfo)
     {
-        transform_tohost_VkDescriptorSetAllocateInfo(mImpl->resources(), (VkDescriptorSetAllocateInfo*)(local_pAllocateInfo));
+        transform_tohost_VkDescriptorSetAllocateInfo(sResourceTracker, (VkDescriptorSetAllocateInfo*)(local_pAllocateInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_424;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_424, 1);
-        countingStream->write((uint64_t*)&cgen_var_424, 1 * 8);
-        marshal_VkDescriptorSetAllocateInfo(countingStream, (VkDescriptorSetAllocateInfo*)(local_pAllocateInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkDescriptorSetAllocateInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDescriptorSetAllocateInfo*)(local_pAllocateInfo), countPtr);
         if (pAllocateInfo->descriptorSetCount)
         {
-            uint64_t* cgen_var_425;
-            countingStream->alloc((void**)&cgen_var_425, pAllocateInfo->descriptorSetCount * 8);
-            countingStream->handleMapping()->mapHandles_VkDescriptorSet_u64(pDescriptorSets, cgen_var_425, pAllocateInfo->descriptorSetCount);
-            countingStream->write((uint64_t*)cgen_var_425, pAllocateInfo->descriptorSetCount * 8);
+            *countPtr += pAllocateInfo->descriptorSetCount * 8;
         }
     }
-    uint32_t packetSize_vkAllocateDescriptorSets = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkAllocateDescriptorSets = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkAllocateDescriptorSets);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkAllocateDescriptorSets = OP_vkAllocateDescriptorSets;
-    stream->write(&opcode_vkAllocateDescriptorSets, sizeof(uint32_t));
-    stream->write(&packetSize_vkAllocateDescriptorSets, sizeof(uint32_t));
-    uint64_t cgen_var_426;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_426, 1);
-    stream->write((uint64_t*)&cgen_var_426, 1 * 8);
-    marshal_VkDescriptorSetAllocateInfo(stream, (VkDescriptorSetAllocateInfo*)(local_pAllocateInfo));
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkAllocateDescriptorSets, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkAllocateDescriptorSets, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkDescriptorSetAllocateInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDescriptorSetAllocateInfo*)(local_pAllocateInfo), streamPtrPtr);
+    /* is handle, possibly out */;
     if (pAllocateInfo->descriptorSetCount)
     {
-        uint64_t* cgen_var_427;
-        stream->alloc((void**)&cgen_var_427, pAllocateInfo->descriptorSetCount * 8);
-        stream->handleMapping()->mapHandles_VkDescriptorSet_u64(pDescriptorSets, cgen_var_427, pAllocateInfo->descriptorSetCount);
-        stream->write((uint64_t*)cgen_var_427, pAllocateInfo->descriptorSetCount * 8);
+        uint8_t* cgen_var_1_ptr = (uint8_t*)(*streamPtrPtr);
+        for (uint32_t k = 0; k < pAllocateInfo->descriptorSetCount; ++k)
+        {
+            uint64_t tmpval = (uint64_t)(pDescriptorSets[k]);
+            memcpy(cgen_var_1_ptr + k * 8, &tmpval, sizeof(uint64_t));
+        }
+        *streamPtrPtr += 8 * pAllocateInfo->descriptorSetCount;
     }
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkAllocateDescriptorSets readParams");
-    stream->setHandleMapping(resources->createMapping());
+    /* is handle, possibly out */;
+    stream->setHandleMapping(sResourceTracker->createMapping());
     if (pAllocateInfo->descriptorSetCount)
     {
-        uint64_t* cgen_var_428;
-        stream->alloc((void**)&cgen_var_428, pAllocateInfo->descriptorSetCount * 8);
-        stream->read((uint64_t*)cgen_var_428, pAllocateInfo->descriptorSetCount * 8);
-        stream->handleMapping()->mapHandles_u64_VkDescriptorSet(cgen_var_428, (VkDescriptorSet*)pDescriptorSets, pAllocateInfo->descriptorSetCount);
+        uint64_t* cgen_var_2;
+        stream->alloc((void**)&cgen_var_2, pAllocateInfo->descriptorSetCount * 8);
+        stream->read((uint64_t*)cgen_var_2, pAllocateInfo->descriptorSetCount * 8);
+        stream->handleMapping()->mapHandles_u64_VkDescriptorSet(cgen_var_2, (VkDescriptorSet*)pDescriptorSets, pAllocateInfo->descriptorSetCount);
     }
     stream->unsetHandleMapping();
-    AEMU_SCOPED_TRACE("vkAllocateDescriptorSets returnUnmarshal");
     VkResult vkAllocateDescriptorSets_VkResult_return = (VkResult)0;
     stream->read(&vkAllocateDescriptorSets_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkAllocateDescriptorSets");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkAllocateDescriptorSets_VkResult_return;
 }
 
@@ -6014,16 +6447,14 @@
     VkDevice device,
     VkDescriptorPool descriptorPool,
     uint32_t descriptorSetCount,
-    const VkDescriptorSet* pDescriptorSets)
+    const VkDescriptorSet* pDescriptorSets,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkFreeDescriptorSets encode");
-    mImpl->log("start vkFreeDescriptorSets");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkDescriptorPool local_descriptorPool;
     uint32_t local_descriptorSetCount;
@@ -6031,71 +6462,75 @@
     local_device = device;
     local_descriptorPool = descriptorPool;
     local_descriptorSetCount = descriptorSetCount;
-    local_pDescriptorSets = nullptr;
-    if (pDescriptorSets)
+    // Avoiding deepcopy for pDescriptorSets
+    local_pDescriptorSets = (VkDescriptorSet*)pDescriptorSets;
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        local_pDescriptorSets = (VkDescriptorSet*)pool->dupArray(pDescriptorSets, ((descriptorSetCount)) * sizeof(const VkDescriptorSet));
-    }
-    countingStream->rewind();
-    {
-        uint64_t cgen_var_429;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_429, 1);
-        countingStream->write((uint64_t*)&cgen_var_429, 1 * 8);
-        uint64_t cgen_var_430;
-        countingStream->handleMapping()->mapHandles_VkDescriptorPool_u64(&local_descriptorPool, &cgen_var_430, 1);
-        countingStream->write((uint64_t*)&cgen_var_430, 1 * 8);
-        countingStream->write((uint32_t*)&local_descriptorSetCount, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
         // WARNING PTR CHECK
-        uint64_t cgen_var_431 = (uint64_t)(uintptr_t)local_pDescriptorSets;
-        countingStream->putBe64(cgen_var_431);
+        *countPtr += 8;
         if (local_pDescriptorSets)
         {
             if (((descriptorSetCount)))
             {
-                uint64_t* cgen_var_432;
-                countingStream->alloc((void**)&cgen_var_432, ((descriptorSetCount)) * 8);
-                countingStream->handleMapping()->mapHandles_VkDescriptorSet_u64(local_pDescriptorSets, cgen_var_432, ((descriptorSetCount)));
-                countingStream->write((uint64_t*)cgen_var_432, ((descriptorSetCount)) * 8);
+                *countPtr += ((descriptorSetCount)) * 8;
             }
         }
     }
-    uint32_t packetSize_vkFreeDescriptorSets = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkFreeDescriptorSets = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkFreeDescriptorSets);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkFreeDescriptorSets = OP_vkFreeDescriptorSets;
-    stream->write(&opcode_vkFreeDescriptorSets, sizeof(uint32_t));
-    stream->write(&packetSize_vkFreeDescriptorSets, sizeof(uint32_t));
-    uint64_t cgen_var_433;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_433, 1);
-    stream->write((uint64_t*)&cgen_var_433, 1 * 8);
-    uint64_t cgen_var_434;
-    stream->handleMapping()->mapHandles_VkDescriptorPool_u64(&local_descriptorPool, &cgen_var_434, 1);
-    stream->write((uint64_t*)&cgen_var_434, 1 * 8);
-    stream->write((uint32_t*)&local_descriptorSetCount, sizeof(uint32_t));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkFreeDescriptorSets, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkFreeDescriptorSets, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkDescriptorPool((*&local_descriptorPool));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_descriptorSetCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
     // WARNING PTR CHECK
-    uint64_t cgen_var_435 = (uint64_t)(uintptr_t)local_pDescriptorSets;
-    stream->putBe64(cgen_var_435);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)local_pDescriptorSets;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pDescriptorSets)
     {
         if (((descriptorSetCount)))
         {
-            uint64_t* cgen_var_436;
-            stream->alloc((void**)&cgen_var_436, ((descriptorSetCount)) * 8);
-            stream->handleMapping()->mapHandles_VkDescriptorSet_u64(local_pDescriptorSets, cgen_var_436, ((descriptorSetCount)));
-            stream->write((uint64_t*)cgen_var_436, ((descriptorSetCount)) * 8);
+            uint8_t* cgen_var_2_0_ptr = (uint8_t*)(*streamPtrPtr);
+            for (uint32_t k = 0; k < ((descriptorSetCount)); ++k)
+            {
+                uint64_t tmpval = get_host_u64_VkDescriptorSet(local_pDescriptorSets[k]);
+                memcpy(cgen_var_2_0_ptr + k * 8, &tmpval, sizeof(uint64_t));
+            }
+            *streamPtrPtr += 8 * ((descriptorSetCount));
         }
     }
-    AEMU_SCOPED_TRACE("vkFreeDescriptorSets readParams");
-    AEMU_SCOPED_TRACE("vkFreeDescriptorSets returnUnmarshal");
     VkResult vkFreeDescriptorSets_VkResult_return = (VkResult)0;
     stream->read(&vkFreeDescriptorSets_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
     if (pDescriptorSets)
     {
-        resources->destroyMapping()->mapHandles_VkDescriptorSet((VkDescriptorSet*)pDescriptorSets, ((descriptorSetCount)));
+        sResourceTracker->destroyMapping()->mapHandles_VkDescriptorSet((VkDescriptorSet*)pDescriptorSets, ((descriptorSetCount)));
     }
-    mImpl->log("finish vkFreeDescriptorSets");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkFreeDescriptorSets_VkResult_return;
 }
 
@@ -6104,16 +6539,14 @@
     uint32_t descriptorWriteCount,
     const VkWriteDescriptorSet* pDescriptorWrites,
     uint32_t descriptorCopyCount,
-    const VkCopyDescriptorSet* pDescriptorCopies)
+    const VkCopyDescriptorSet* pDescriptorCopies,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkUpdateDescriptorSets encode");
-    mImpl->log("start vkUpdateDescriptorSets");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     uint32_t local_descriptorWriteCount;
     VkWriteDescriptorSet* local_pDescriptorWrites;
@@ -6127,7 +6560,7 @@
         local_pDescriptorWrites = (VkWriteDescriptorSet*)pool->alloc(((descriptorWriteCount)) * sizeof(const VkWriteDescriptorSet));
         for (uint32_t i = 0; i < (uint32_t)((descriptorWriteCount)); ++i)
         {
-            deepcopy_VkWriteDescriptorSet(pool, pDescriptorWrites + i, (VkWriteDescriptorSet*)(local_pDescriptorWrites + i));
+            deepcopy_VkWriteDescriptorSet(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pDescriptorWrites + i, (VkWriteDescriptorSet*)(local_pDescriptorWrites + i));
         }
     }
     local_descriptorCopyCount = descriptorCopyCount;
@@ -6137,76 +6570,85 @@
         local_pDescriptorCopies = (VkCopyDescriptorSet*)pool->alloc(((descriptorCopyCount)) * sizeof(const VkCopyDescriptorSet));
         for (uint32_t i = 0; i < (uint32_t)((descriptorCopyCount)); ++i)
         {
-            deepcopy_VkCopyDescriptorSet(pool, pDescriptorCopies + i, (VkCopyDescriptorSet*)(local_pDescriptorCopies + i));
+            deepcopy_VkCopyDescriptorSet(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pDescriptorCopies + i, (VkCopyDescriptorSet*)(local_pDescriptorCopies + i));
         }
     }
     if (local_pDescriptorWrites)
     {
         for (uint32_t i = 0; i < (uint32_t)((descriptorWriteCount)); ++i)
         {
-            transform_tohost_VkWriteDescriptorSet(mImpl->resources(), (VkWriteDescriptorSet*)(local_pDescriptorWrites + i));
+            transform_tohost_VkWriteDescriptorSet(sResourceTracker, (VkWriteDescriptorSet*)(local_pDescriptorWrites + i));
         }
     }
     if (local_pDescriptorCopies)
     {
         for (uint32_t i = 0; i < (uint32_t)((descriptorCopyCount)); ++i)
         {
-            transform_tohost_VkCopyDescriptorSet(mImpl->resources(), (VkCopyDescriptorSet*)(local_pDescriptorCopies + i));
+            transform_tohost_VkCopyDescriptorSet(sResourceTracker, (VkCopyDescriptorSet*)(local_pDescriptorCopies + i));
         }
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_437;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_437, 1);
-        countingStream->write((uint64_t*)&cgen_var_437, 1 * 8);
-        countingStream->write((uint32_t*)&local_descriptorWriteCount, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
         for (uint32_t i = 0; i < (uint32_t)((descriptorWriteCount)); ++i)
         {
-            marshal_VkWriteDescriptorSet(countingStream, (VkWriteDescriptorSet*)(local_pDescriptorWrites + i));
+            count_VkWriteDescriptorSet(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkWriteDescriptorSet*)(local_pDescriptorWrites + i), countPtr);
         }
-        countingStream->write((uint32_t*)&local_descriptorCopyCount, sizeof(uint32_t));
+        *countPtr += sizeof(uint32_t);
         for (uint32_t i = 0; i < (uint32_t)((descriptorCopyCount)); ++i)
         {
-            marshal_VkCopyDescriptorSet(countingStream, (VkCopyDescriptorSet*)(local_pDescriptorCopies + i));
+            count_VkCopyDescriptorSet(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkCopyDescriptorSet*)(local_pDescriptorCopies + i), countPtr);
         }
     }
-    uint32_t packetSize_vkUpdateDescriptorSets = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkUpdateDescriptorSets = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkUpdateDescriptorSets);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkUpdateDescriptorSets = OP_vkUpdateDescriptorSets;
-    stream->write(&opcode_vkUpdateDescriptorSets, sizeof(uint32_t));
-    stream->write(&packetSize_vkUpdateDescriptorSets, sizeof(uint32_t));
-    uint64_t cgen_var_438;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_438, 1);
-    stream->write((uint64_t*)&cgen_var_438, 1 * 8);
-    stream->write((uint32_t*)&local_descriptorWriteCount, sizeof(uint32_t));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkUpdateDescriptorSets, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkUpdateDescriptorSets, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_descriptorWriteCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
     for (uint32_t i = 0; i < (uint32_t)((descriptorWriteCount)); ++i)
     {
-        marshal_VkWriteDescriptorSet(stream, (VkWriteDescriptorSet*)(local_pDescriptorWrites + i));
+        reservedmarshal_VkWriteDescriptorSet(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkWriteDescriptorSet*)(local_pDescriptorWrites + i), streamPtrPtr);
     }
-    stream->write((uint32_t*)&local_descriptorCopyCount, sizeof(uint32_t));
+    memcpy(*streamPtrPtr, (uint32_t*)&local_descriptorCopyCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
     for (uint32_t i = 0; i < (uint32_t)((descriptorCopyCount)); ++i)
     {
-        marshal_VkCopyDescriptorSet(stream, (VkCopyDescriptorSet*)(local_pDescriptorCopies + i));
+        reservedmarshal_VkCopyDescriptorSet(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkCopyDescriptorSet*)(local_pDescriptorCopies + i), streamPtrPtr);
     }
-    AEMU_SCOPED_TRACE("vkUpdateDescriptorSets readParams");
-    AEMU_SCOPED_TRACE("vkUpdateDescriptorSets returnUnmarshal");
-    mImpl->log("finish vkUpdateDescriptorSets");;
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 VkResult VkEncoder::vkCreateFramebuffer(
     VkDevice device,
     const VkFramebufferCreateInfo* pCreateInfo,
     const VkAllocationCallbacks* pAllocator,
-    VkFramebuffer* pFramebuffer)
+    VkFramebuffer* pFramebuffer,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCreateFramebuffer encode");
-    mImpl->log("start vkCreateFramebuffer");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkFramebufferCreateInfo* local_pCreateInfo;
     VkAllocationCallbacks* local_pAllocator;
@@ -6215,90 +6657,94 @@
     if (pCreateInfo)
     {
         local_pCreateInfo = (VkFramebufferCreateInfo*)pool->alloc(sizeof(const VkFramebufferCreateInfo));
-        deepcopy_VkFramebufferCreateInfo(pool, pCreateInfo, (VkFramebufferCreateInfo*)(local_pCreateInfo));
+        deepcopy_VkFramebufferCreateInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, (VkFramebufferCreateInfo*)(local_pCreateInfo));
     }
     local_pAllocator = nullptr;
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pCreateInfo)
     {
-        transform_tohost_VkFramebufferCreateInfo(mImpl->resources(), (VkFramebufferCreateInfo*)(local_pCreateInfo));
+        transform_tohost_VkFramebufferCreateInfo(sResourceTracker, (VkFramebufferCreateInfo*)(local_pCreateInfo));
     }
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_439;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_439, 1);
-        countingStream->write((uint64_t*)&cgen_var_439, 1 * 8);
-        marshal_VkFramebufferCreateInfo(countingStream, (VkFramebufferCreateInfo*)(local_pCreateInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkFramebufferCreateInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkFramebufferCreateInfo*)(local_pCreateInfo), countPtr);
         // WARNING PTR CHECK
-        uint64_t cgen_var_440 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_440);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
-        uint64_t cgen_var_441;
-        countingStream->handleMapping()->mapHandles_VkFramebuffer_u64(pFramebuffer, &cgen_var_441, 1);
-        countingStream->write((uint64_t*)&cgen_var_441, 8);
+        uint64_t cgen_var_1;
+        *countPtr += 8;
     }
-    uint32_t packetSize_vkCreateFramebuffer = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCreateFramebuffer = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreateFramebuffer);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCreateFramebuffer = OP_vkCreateFramebuffer;
-    stream->write(&opcode_vkCreateFramebuffer, sizeof(uint32_t));
-    stream->write(&packetSize_vkCreateFramebuffer, sizeof(uint32_t));
-    uint64_t cgen_var_442;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_442, 1);
-    stream->write((uint64_t*)&cgen_var_442, 1 * 8);
-    marshal_VkFramebufferCreateInfo(stream, (VkFramebufferCreateInfo*)(local_pCreateInfo));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreateFramebuffer, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreateFramebuffer, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkFramebufferCreateInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkFramebufferCreateInfo*)(local_pCreateInfo), streamPtrPtr);
     // WARNING PTR CHECK
-    uint64_t cgen_var_443 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_443);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
-    uint64_t cgen_var_444;
-    stream->handleMapping()->mapHandles_VkFramebuffer_u64(pFramebuffer, &cgen_var_444, 1);
-    stream->write((uint64_t*)&cgen_var_444, 8);
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkCreateFramebuffer readParams");
-    stream->setHandleMapping(resources->createMapping());
-    uint64_t cgen_var_445;
-    stream->read((uint64_t*)&cgen_var_445, 8);
-    stream->handleMapping()->mapHandles_u64_VkFramebuffer(&cgen_var_445, (VkFramebuffer*)pFramebuffer, 1);
+    /* is handle, possibly out */;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = (uint64_t)((*pFramebuffer));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    stream->setHandleMapping(sResourceTracker->createMapping());
+    uint64_t cgen_var_3;
+    stream->read((uint64_t*)&cgen_var_3, 8);
+    stream->handleMapping()->mapHandles_u64_VkFramebuffer(&cgen_var_3, (VkFramebuffer*)pFramebuffer, 1);
     stream->unsetHandleMapping();
-    AEMU_SCOPED_TRACE("vkCreateFramebuffer returnUnmarshal");
     VkResult vkCreateFramebuffer_VkResult_return = (VkResult)0;
     stream->read(&vkCreateFramebuffer_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkCreateFramebuffer");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkCreateFramebuffer_VkResult_return;
 }
 
 void VkEncoder::vkDestroyFramebuffer(
     VkDevice device,
     VkFramebuffer framebuffer,
-    const VkAllocationCallbacks* pAllocator)
+    const VkAllocationCallbacks* pAllocator,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkDestroyFramebuffer encode");
-    mImpl->log("start vkDestroyFramebuffer");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkFramebuffer local_framebuffer;
     VkAllocationCallbacks* local_pAllocator;
@@ -6308,67 +6754,75 @@
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_446;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_446, 1);
-        countingStream->write((uint64_t*)&cgen_var_446, 1 * 8);
-        uint64_t cgen_var_447;
-        countingStream->handleMapping()->mapHandles_VkFramebuffer_u64(&local_framebuffer, &cgen_var_447, 1);
-        countingStream->write((uint64_t*)&cgen_var_447, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_448 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_448);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
     }
-    uint32_t packetSize_vkDestroyFramebuffer = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkDestroyFramebuffer = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkDestroyFramebuffer);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkDestroyFramebuffer = OP_vkDestroyFramebuffer;
-    stream->write(&opcode_vkDestroyFramebuffer, sizeof(uint32_t));
-    stream->write(&packetSize_vkDestroyFramebuffer, sizeof(uint32_t));
-    uint64_t cgen_var_449;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_449, 1);
-    stream->write((uint64_t*)&cgen_var_449, 1 * 8);
-    uint64_t cgen_var_450;
-    stream->handleMapping()->mapHandles_VkFramebuffer_u64(&local_framebuffer, &cgen_var_450, 1);
-    stream->write((uint64_t*)&cgen_var_450, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkDestroyFramebuffer, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkDestroyFramebuffer, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkFramebuffer((*&local_framebuffer));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_451 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_451);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    AEMU_SCOPED_TRACE("vkDestroyFramebuffer readParams");
-    AEMU_SCOPED_TRACE("vkDestroyFramebuffer returnUnmarshal");
-    resources->destroyMapping()->mapHandles_VkFramebuffer((VkFramebuffer*)&framebuffer);
-    mImpl->log("finish vkDestroyFramebuffer");;
+    sResourceTracker->destroyMapping()->mapHandles_VkFramebuffer((VkFramebuffer*)&framebuffer);
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 VkResult VkEncoder::vkCreateRenderPass(
     VkDevice device,
     const VkRenderPassCreateInfo* pCreateInfo,
     const VkAllocationCallbacks* pAllocator,
-    VkRenderPass* pRenderPass)
+    VkRenderPass* pRenderPass,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCreateRenderPass encode");
-    mImpl->log("start vkCreateRenderPass");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkRenderPassCreateInfo* local_pCreateInfo;
     VkAllocationCallbacks* local_pAllocator;
@@ -6377,90 +6831,94 @@
     if (pCreateInfo)
     {
         local_pCreateInfo = (VkRenderPassCreateInfo*)pool->alloc(sizeof(const VkRenderPassCreateInfo));
-        deepcopy_VkRenderPassCreateInfo(pool, pCreateInfo, (VkRenderPassCreateInfo*)(local_pCreateInfo));
+        deepcopy_VkRenderPassCreateInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, (VkRenderPassCreateInfo*)(local_pCreateInfo));
     }
     local_pAllocator = nullptr;
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pCreateInfo)
     {
-        transform_tohost_VkRenderPassCreateInfo(mImpl->resources(), (VkRenderPassCreateInfo*)(local_pCreateInfo));
+        transform_tohost_VkRenderPassCreateInfo(sResourceTracker, (VkRenderPassCreateInfo*)(local_pCreateInfo));
     }
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_452;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_452, 1);
-        countingStream->write((uint64_t*)&cgen_var_452, 1 * 8);
-        marshal_VkRenderPassCreateInfo(countingStream, (VkRenderPassCreateInfo*)(local_pCreateInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkRenderPassCreateInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkRenderPassCreateInfo*)(local_pCreateInfo), countPtr);
         // WARNING PTR CHECK
-        uint64_t cgen_var_453 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_453);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
-        uint64_t cgen_var_454;
-        countingStream->handleMapping()->mapHandles_VkRenderPass_u64(pRenderPass, &cgen_var_454, 1);
-        countingStream->write((uint64_t*)&cgen_var_454, 8);
+        uint64_t cgen_var_1;
+        *countPtr += 8;
     }
-    uint32_t packetSize_vkCreateRenderPass = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCreateRenderPass = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreateRenderPass);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCreateRenderPass = OP_vkCreateRenderPass;
-    stream->write(&opcode_vkCreateRenderPass, sizeof(uint32_t));
-    stream->write(&packetSize_vkCreateRenderPass, sizeof(uint32_t));
-    uint64_t cgen_var_455;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_455, 1);
-    stream->write((uint64_t*)&cgen_var_455, 1 * 8);
-    marshal_VkRenderPassCreateInfo(stream, (VkRenderPassCreateInfo*)(local_pCreateInfo));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreateRenderPass, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreateRenderPass, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkRenderPassCreateInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkRenderPassCreateInfo*)(local_pCreateInfo), streamPtrPtr);
     // WARNING PTR CHECK
-    uint64_t cgen_var_456 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_456);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
-    uint64_t cgen_var_457;
-    stream->handleMapping()->mapHandles_VkRenderPass_u64(pRenderPass, &cgen_var_457, 1);
-    stream->write((uint64_t*)&cgen_var_457, 8);
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkCreateRenderPass readParams");
-    stream->setHandleMapping(resources->createMapping());
-    uint64_t cgen_var_458;
-    stream->read((uint64_t*)&cgen_var_458, 8);
-    stream->handleMapping()->mapHandles_u64_VkRenderPass(&cgen_var_458, (VkRenderPass*)pRenderPass, 1);
+    /* is handle, possibly out */;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = (uint64_t)((*pRenderPass));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    stream->setHandleMapping(sResourceTracker->createMapping());
+    uint64_t cgen_var_3;
+    stream->read((uint64_t*)&cgen_var_3, 8);
+    stream->handleMapping()->mapHandles_u64_VkRenderPass(&cgen_var_3, (VkRenderPass*)pRenderPass, 1);
     stream->unsetHandleMapping();
-    AEMU_SCOPED_TRACE("vkCreateRenderPass returnUnmarshal");
     VkResult vkCreateRenderPass_VkResult_return = (VkResult)0;
     stream->read(&vkCreateRenderPass_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkCreateRenderPass");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkCreateRenderPass_VkResult_return;
 }
 
 void VkEncoder::vkDestroyRenderPass(
     VkDevice device,
     VkRenderPass renderPass,
-    const VkAllocationCallbacks* pAllocator)
+    const VkAllocationCallbacks* pAllocator,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkDestroyRenderPass encode");
-    mImpl->log("start vkDestroyRenderPass");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkRenderPass local_renderPass;
     VkAllocationCallbacks* local_pAllocator;
@@ -6470,116 +6928,130 @@
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_459;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_459, 1);
-        countingStream->write((uint64_t*)&cgen_var_459, 1 * 8);
-        uint64_t cgen_var_460;
-        countingStream->handleMapping()->mapHandles_VkRenderPass_u64(&local_renderPass, &cgen_var_460, 1);
-        countingStream->write((uint64_t*)&cgen_var_460, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_461 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_461);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
     }
-    uint32_t packetSize_vkDestroyRenderPass = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkDestroyRenderPass = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkDestroyRenderPass);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkDestroyRenderPass = OP_vkDestroyRenderPass;
-    stream->write(&opcode_vkDestroyRenderPass, sizeof(uint32_t));
-    stream->write(&packetSize_vkDestroyRenderPass, sizeof(uint32_t));
-    uint64_t cgen_var_462;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_462, 1);
-    stream->write((uint64_t*)&cgen_var_462, 1 * 8);
-    uint64_t cgen_var_463;
-    stream->handleMapping()->mapHandles_VkRenderPass_u64(&local_renderPass, &cgen_var_463, 1);
-    stream->write((uint64_t*)&cgen_var_463, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkDestroyRenderPass, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkDestroyRenderPass, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkRenderPass((*&local_renderPass));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_464 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_464);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    AEMU_SCOPED_TRACE("vkDestroyRenderPass readParams");
-    AEMU_SCOPED_TRACE("vkDestroyRenderPass returnUnmarshal");
-    resources->destroyMapping()->mapHandles_VkRenderPass((VkRenderPass*)&renderPass);
-    mImpl->log("finish vkDestroyRenderPass");;
+    sResourceTracker->destroyMapping()->mapHandles_VkRenderPass((VkRenderPass*)&renderPass);
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkGetRenderAreaGranularity(
     VkDevice device,
     VkRenderPass renderPass,
-    VkExtent2D* pGranularity)
+    VkExtent2D* pGranularity,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetRenderAreaGranularity encode");
-    mImpl->log("start vkGetRenderAreaGranularity");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkRenderPass local_renderPass;
     local_device = device;
     local_renderPass = renderPass;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_465;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_465, 1);
-        countingStream->write((uint64_t*)&cgen_var_465, 1 * 8);
-        uint64_t cgen_var_466;
-        countingStream->handleMapping()->mapHandles_VkRenderPass_u64(&local_renderPass, &cgen_var_466, 1);
-        countingStream->write((uint64_t*)&cgen_var_466, 1 * 8);
-        marshal_VkExtent2D(countingStream, (VkExtent2D*)(pGranularity));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        count_VkExtent2D(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkExtent2D*)(pGranularity), countPtr);
     }
-    uint32_t packetSize_vkGetRenderAreaGranularity = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetRenderAreaGranularity = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetRenderAreaGranularity);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetRenderAreaGranularity = OP_vkGetRenderAreaGranularity;
-    stream->write(&opcode_vkGetRenderAreaGranularity, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetRenderAreaGranularity, sizeof(uint32_t));
-    uint64_t cgen_var_467;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_467, 1);
-    stream->write((uint64_t*)&cgen_var_467, 1 * 8);
-    uint64_t cgen_var_468;
-    stream->handleMapping()->mapHandles_VkRenderPass_u64(&local_renderPass, &cgen_var_468, 1);
-    stream->write((uint64_t*)&cgen_var_468, 1 * 8);
-    marshal_VkExtent2D(stream, (VkExtent2D*)(pGranularity));
-    AEMU_SCOPED_TRACE("vkGetRenderAreaGranularity readParams");
-    unmarshal_VkExtent2D(stream, (VkExtent2D*)(pGranularity));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetRenderAreaGranularity, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetRenderAreaGranularity, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkRenderPass((*&local_renderPass));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkExtent2D(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkExtent2D*)(pGranularity), streamPtrPtr);
+    unmarshal_VkExtent2D(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkExtent2D*)(pGranularity));
     if (pGranularity)
     {
-        transform_fromhost_VkExtent2D(mImpl->resources(), (VkExtent2D*)(pGranularity));
+        transform_fromhost_VkExtent2D(sResourceTracker, (VkExtent2D*)(pGranularity));
     }
-    AEMU_SCOPED_TRACE("vkGetRenderAreaGranularity returnUnmarshal");
-    mImpl->log("finish vkGetRenderAreaGranularity");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 VkResult VkEncoder::vkCreateCommandPool(
     VkDevice device,
     const VkCommandPoolCreateInfo* pCreateInfo,
     const VkAllocationCallbacks* pAllocator,
-    VkCommandPool* pCommandPool)
+    VkCommandPool* pCommandPool,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCreateCommandPool encode");
-    mImpl->log("start vkCreateCommandPool");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkCommandPoolCreateInfo* local_pCreateInfo;
     VkAllocationCallbacks* local_pAllocator;
@@ -6588,90 +7060,94 @@
     if (pCreateInfo)
     {
         local_pCreateInfo = (VkCommandPoolCreateInfo*)pool->alloc(sizeof(const VkCommandPoolCreateInfo));
-        deepcopy_VkCommandPoolCreateInfo(pool, pCreateInfo, (VkCommandPoolCreateInfo*)(local_pCreateInfo));
+        deepcopy_VkCommandPoolCreateInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, (VkCommandPoolCreateInfo*)(local_pCreateInfo));
     }
     local_pAllocator = nullptr;
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pCreateInfo)
     {
-        transform_tohost_VkCommandPoolCreateInfo(mImpl->resources(), (VkCommandPoolCreateInfo*)(local_pCreateInfo));
+        transform_tohost_VkCommandPoolCreateInfo(sResourceTracker, (VkCommandPoolCreateInfo*)(local_pCreateInfo));
     }
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_469;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_469, 1);
-        countingStream->write((uint64_t*)&cgen_var_469, 1 * 8);
-        marshal_VkCommandPoolCreateInfo(countingStream, (VkCommandPoolCreateInfo*)(local_pCreateInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkCommandPoolCreateInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkCommandPoolCreateInfo*)(local_pCreateInfo), countPtr);
         // WARNING PTR CHECK
-        uint64_t cgen_var_470 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_470);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
-        uint64_t cgen_var_471;
-        countingStream->handleMapping()->mapHandles_VkCommandPool_u64(pCommandPool, &cgen_var_471, 1);
-        countingStream->write((uint64_t*)&cgen_var_471, 8);
+        uint64_t cgen_var_1;
+        *countPtr += 8;
     }
-    uint32_t packetSize_vkCreateCommandPool = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCreateCommandPool = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreateCommandPool);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCreateCommandPool = OP_vkCreateCommandPool;
-    stream->write(&opcode_vkCreateCommandPool, sizeof(uint32_t));
-    stream->write(&packetSize_vkCreateCommandPool, sizeof(uint32_t));
-    uint64_t cgen_var_472;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_472, 1);
-    stream->write((uint64_t*)&cgen_var_472, 1 * 8);
-    marshal_VkCommandPoolCreateInfo(stream, (VkCommandPoolCreateInfo*)(local_pCreateInfo));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreateCommandPool, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreateCommandPool, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkCommandPoolCreateInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkCommandPoolCreateInfo*)(local_pCreateInfo), streamPtrPtr);
     // WARNING PTR CHECK
-    uint64_t cgen_var_473 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_473);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
-    uint64_t cgen_var_474;
-    stream->handleMapping()->mapHandles_VkCommandPool_u64(pCommandPool, &cgen_var_474, 1);
-    stream->write((uint64_t*)&cgen_var_474, 8);
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkCreateCommandPool readParams");
-    stream->setHandleMapping(resources->createMapping());
-    uint64_t cgen_var_475;
-    stream->read((uint64_t*)&cgen_var_475, 8);
-    stream->handleMapping()->mapHandles_u64_VkCommandPool(&cgen_var_475, (VkCommandPool*)pCommandPool, 1);
+    /* is handle, possibly out */;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = (uint64_t)((*pCommandPool));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    stream->setHandleMapping(sResourceTracker->createMapping());
+    uint64_t cgen_var_3;
+    stream->read((uint64_t*)&cgen_var_3, 8);
+    stream->handleMapping()->mapHandles_u64_VkCommandPool(&cgen_var_3, (VkCommandPool*)pCommandPool, 1);
     stream->unsetHandleMapping();
-    AEMU_SCOPED_TRACE("vkCreateCommandPool returnUnmarshal");
     VkResult vkCreateCommandPool_VkResult_return = (VkResult)0;
     stream->read(&vkCreateCommandPool_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkCreateCommandPool");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkCreateCommandPool_VkResult_return;
 }
 
 void VkEncoder::vkDestroyCommandPool(
     VkDevice device,
     VkCommandPool commandPool,
-    const VkAllocationCallbacks* pAllocator)
+    const VkAllocationCallbacks* pAllocator,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkDestroyCommandPool encode");
-    mImpl->log("start vkDestroyCommandPool");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkCommandPool local_commandPool;
     VkAllocationCallbacks* local_pAllocator;
@@ -6681,118 +7157,130 @@
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_476;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_476, 1);
-        countingStream->write((uint64_t*)&cgen_var_476, 1 * 8);
-        uint64_t cgen_var_477;
-        countingStream->handleMapping()->mapHandles_VkCommandPool_u64(&local_commandPool, &cgen_var_477, 1);
-        countingStream->write((uint64_t*)&cgen_var_477, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_478 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_478);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
     }
-    uint32_t packetSize_vkDestroyCommandPool = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkDestroyCommandPool = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkDestroyCommandPool);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkDestroyCommandPool = OP_vkDestroyCommandPool;
-    stream->write(&opcode_vkDestroyCommandPool, sizeof(uint32_t));
-    stream->write(&packetSize_vkDestroyCommandPool, sizeof(uint32_t));
-    uint64_t cgen_var_479;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_479, 1);
-    stream->write((uint64_t*)&cgen_var_479, 1 * 8);
-    uint64_t cgen_var_480;
-    stream->handleMapping()->mapHandles_VkCommandPool_u64(&local_commandPool, &cgen_var_480, 1);
-    stream->write((uint64_t*)&cgen_var_480, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkDestroyCommandPool, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkDestroyCommandPool, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkCommandPool((*&local_commandPool));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_481 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_481);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    AEMU_SCOPED_TRACE("vkDestroyCommandPool readParams");
-    AEMU_SCOPED_TRACE("vkDestroyCommandPool returnUnmarshal");
-    resources->destroyMapping()->mapHandles_VkCommandPool((VkCommandPool*)&commandPool);
-    mImpl->log("finish vkDestroyCommandPool");;
+    sResourceTracker->destroyMapping()->mapHandles_VkCommandPool((VkCommandPool*)&commandPool);
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 VkResult VkEncoder::vkResetCommandPool(
     VkDevice device,
     VkCommandPool commandPool,
-    VkCommandPoolResetFlags flags)
+    VkCommandPoolResetFlags flags,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkResetCommandPool encode");
-    mImpl->log("start vkResetCommandPool");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkCommandPool local_commandPool;
     VkCommandPoolResetFlags local_flags;
     local_device = device;
     local_commandPool = commandPool;
     local_flags = flags;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_482;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_482, 1);
-        countingStream->write((uint64_t*)&cgen_var_482, 1 * 8);
-        uint64_t cgen_var_483;
-        countingStream->handleMapping()->mapHandles_VkCommandPool_u64(&local_commandPool, &cgen_var_483, 1);
-        countingStream->write((uint64_t*)&cgen_var_483, 1 * 8);
-        countingStream->write((VkCommandPoolResetFlags*)&local_flags, sizeof(VkCommandPoolResetFlags));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkCommandPoolResetFlags);
     }
-    uint32_t packetSize_vkResetCommandPool = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkResetCommandPool = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkResetCommandPool);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkResetCommandPool = OP_vkResetCommandPool;
-    stream->write(&opcode_vkResetCommandPool, sizeof(uint32_t));
-    stream->write(&packetSize_vkResetCommandPool, sizeof(uint32_t));
-    uint64_t cgen_var_484;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_484, 1);
-    stream->write((uint64_t*)&cgen_var_484, 1 * 8);
-    uint64_t cgen_var_485;
-    stream->handleMapping()->mapHandles_VkCommandPool_u64(&local_commandPool, &cgen_var_485, 1);
-    stream->write((uint64_t*)&cgen_var_485, 1 * 8);
-    stream->write((VkCommandPoolResetFlags*)&local_flags, sizeof(VkCommandPoolResetFlags));
-    AEMU_SCOPED_TRACE("vkResetCommandPool readParams");
-    AEMU_SCOPED_TRACE("vkResetCommandPool returnUnmarshal");
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkResetCommandPool, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkResetCommandPool, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkCommandPool((*&local_commandPool));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkCommandPoolResetFlags*)&local_flags, sizeof(VkCommandPoolResetFlags));
+    *streamPtrPtr += sizeof(VkCommandPoolResetFlags);
     VkResult vkResetCommandPool_VkResult_return = (VkResult)0;
     stream->read(&vkResetCommandPool_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkResetCommandPool");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkResetCommandPool_VkResult_return;
 }
 
 VkResult VkEncoder::vkAllocateCommandBuffers(
     VkDevice device,
     const VkCommandBufferAllocateInfo* pAllocateInfo,
-    VkCommandBuffer* pCommandBuffers)
+    VkCommandBuffer* pCommandBuffers,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkAllocateCommandBuffers encode");
-    mImpl->log("start vkAllocateCommandBuffers");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkCommandBufferAllocateInfo* local_pAllocateInfo;
     local_device = device;
@@ -6800,61 +7288,66 @@
     if (pAllocateInfo)
     {
         local_pAllocateInfo = (VkCommandBufferAllocateInfo*)pool->alloc(sizeof(const VkCommandBufferAllocateInfo));
-        deepcopy_VkCommandBufferAllocateInfo(pool, pAllocateInfo, (VkCommandBufferAllocateInfo*)(local_pAllocateInfo));
+        deepcopy_VkCommandBufferAllocateInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocateInfo, (VkCommandBufferAllocateInfo*)(local_pAllocateInfo));
     }
     if (local_pAllocateInfo)
     {
-        transform_tohost_VkCommandBufferAllocateInfo(mImpl->resources(), (VkCommandBufferAllocateInfo*)(local_pAllocateInfo));
+        transform_tohost_VkCommandBufferAllocateInfo(sResourceTracker, (VkCommandBufferAllocateInfo*)(local_pAllocateInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_486;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_486, 1);
-        countingStream->write((uint64_t*)&cgen_var_486, 1 * 8);
-        marshal_VkCommandBufferAllocateInfo(countingStream, (VkCommandBufferAllocateInfo*)(local_pAllocateInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkCommandBufferAllocateInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkCommandBufferAllocateInfo*)(local_pAllocateInfo), countPtr);
         if (pAllocateInfo->commandBufferCount)
         {
-            uint64_t* cgen_var_487;
-            countingStream->alloc((void**)&cgen_var_487, pAllocateInfo->commandBufferCount * 8);
-            countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(pCommandBuffers, cgen_var_487, pAllocateInfo->commandBufferCount);
-            countingStream->write((uint64_t*)cgen_var_487, pAllocateInfo->commandBufferCount * 8);
+            *countPtr += pAllocateInfo->commandBufferCount * 8;
         }
     }
-    uint32_t packetSize_vkAllocateCommandBuffers = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkAllocateCommandBuffers = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkAllocateCommandBuffers);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkAllocateCommandBuffers = OP_vkAllocateCommandBuffers;
-    stream->write(&opcode_vkAllocateCommandBuffers, sizeof(uint32_t));
-    stream->write(&packetSize_vkAllocateCommandBuffers, sizeof(uint32_t));
-    uint64_t cgen_var_488;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_488, 1);
-    stream->write((uint64_t*)&cgen_var_488, 1 * 8);
-    marshal_VkCommandBufferAllocateInfo(stream, (VkCommandBufferAllocateInfo*)(local_pAllocateInfo));
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkAllocateCommandBuffers, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkAllocateCommandBuffers, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkCommandBufferAllocateInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkCommandBufferAllocateInfo*)(local_pAllocateInfo), streamPtrPtr);
+    /* is handle, possibly out */;
     if (pAllocateInfo->commandBufferCount)
     {
-        uint64_t* cgen_var_489;
-        stream->alloc((void**)&cgen_var_489, pAllocateInfo->commandBufferCount * 8);
-        stream->handleMapping()->mapHandles_VkCommandBuffer_u64(pCommandBuffers, cgen_var_489, pAllocateInfo->commandBufferCount);
-        stream->write((uint64_t*)cgen_var_489, pAllocateInfo->commandBufferCount * 8);
+        uint8_t* cgen_var_1_ptr = (uint8_t*)(*streamPtrPtr);
+        for (uint32_t k = 0; k < pAllocateInfo->commandBufferCount; ++k)
+        {
+            uint64_t tmpval = (uint64_t)(pCommandBuffers[k]);
+            memcpy(cgen_var_1_ptr + k * 8, &tmpval, sizeof(uint64_t));
+        }
+        *streamPtrPtr += 8 * pAllocateInfo->commandBufferCount;
     }
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkAllocateCommandBuffers readParams");
-    stream->setHandleMapping(resources->createMapping());
+    /* is handle, possibly out */;
+    stream->setHandleMapping(sResourceTracker->createMapping());
     if (pAllocateInfo->commandBufferCount)
     {
-        uint64_t* cgen_var_490;
-        stream->alloc((void**)&cgen_var_490, pAllocateInfo->commandBufferCount * 8);
-        stream->read((uint64_t*)cgen_var_490, pAllocateInfo->commandBufferCount * 8);
-        stream->handleMapping()->mapHandles_u64_VkCommandBuffer(cgen_var_490, (VkCommandBuffer*)pCommandBuffers, pAllocateInfo->commandBufferCount);
+        uint64_t* cgen_var_2;
+        stream->alloc((void**)&cgen_var_2, pAllocateInfo->commandBufferCount * 8);
+        stream->read((uint64_t*)cgen_var_2, pAllocateInfo->commandBufferCount * 8);
+        stream->handleMapping()->mapHandles_u64_VkCommandBuffer(cgen_var_2, (VkCommandBuffer*)pCommandBuffers, pAllocateInfo->commandBufferCount);
     }
     stream->unsetHandleMapping();
-    AEMU_SCOPED_TRACE("vkAllocateCommandBuffers returnUnmarshal");
     VkResult vkAllocateCommandBuffers_VkResult_return = (VkResult)0;
     stream->read(&vkAllocateCommandBuffers_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkAllocateCommandBuffers");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkAllocateCommandBuffers_VkResult_return;
 }
 
@@ -6862,16 +7355,14 @@
     VkDevice device,
     VkCommandPool commandPool,
     uint32_t commandBufferCount,
-    const VkCommandBuffer* pCommandBuffers)
+    const VkCommandBuffer* pCommandBuffers,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkFreeCommandBuffers encode");
-    mImpl->log("start vkFreeCommandBuffers");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkCommandPool local_commandPool;
     uint32_t local_commandBufferCount;
@@ -6879,80 +7370,86 @@
     local_device = device;
     local_commandPool = commandPool;
     local_commandBufferCount = commandBufferCount;
-    local_pCommandBuffers = nullptr;
-    if (pCommandBuffers)
+    // Avoiding deepcopy for pCommandBuffers
+    local_pCommandBuffers = (VkCommandBuffer*)pCommandBuffers;
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        local_pCommandBuffers = (VkCommandBuffer*)pool->dupArray(pCommandBuffers, ((commandBufferCount)) * sizeof(const VkCommandBuffer));
-    }
-    countingStream->rewind();
-    {
-        uint64_t cgen_var_491;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_491, 1);
-        countingStream->write((uint64_t*)&cgen_var_491, 1 * 8);
-        uint64_t cgen_var_492;
-        countingStream->handleMapping()->mapHandles_VkCommandPool_u64(&local_commandPool, &cgen_var_492, 1);
-        countingStream->write((uint64_t*)&cgen_var_492, 1 * 8);
-        countingStream->write((uint32_t*)&local_commandBufferCount, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
         // WARNING PTR CHECK
-        uint64_t cgen_var_493 = (uint64_t)(uintptr_t)local_pCommandBuffers;
-        countingStream->putBe64(cgen_var_493);
+        *countPtr += 8;
         if (local_pCommandBuffers)
         {
             if (((commandBufferCount)))
             {
-                uint64_t* cgen_var_494;
-                countingStream->alloc((void**)&cgen_var_494, ((commandBufferCount)) * 8);
-                countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(local_pCommandBuffers, cgen_var_494, ((commandBufferCount)));
-                countingStream->write((uint64_t*)cgen_var_494, ((commandBufferCount)) * 8);
+                *countPtr += ((commandBufferCount)) * 8;
             }
         }
     }
-    uint32_t packetSize_vkFreeCommandBuffers = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkFreeCommandBuffers = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkFreeCommandBuffers);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkFreeCommandBuffers = OP_vkFreeCommandBuffers;
-    stream->write(&opcode_vkFreeCommandBuffers, sizeof(uint32_t));
-    stream->write(&packetSize_vkFreeCommandBuffers, sizeof(uint32_t));
-    uint64_t cgen_var_495;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_495, 1);
-    stream->write((uint64_t*)&cgen_var_495, 1 * 8);
-    uint64_t cgen_var_496;
-    stream->handleMapping()->mapHandles_VkCommandPool_u64(&local_commandPool, &cgen_var_496, 1);
-    stream->write((uint64_t*)&cgen_var_496, 1 * 8);
-    stream->write((uint32_t*)&local_commandBufferCount, sizeof(uint32_t));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkFreeCommandBuffers, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkFreeCommandBuffers, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkCommandPool((*&local_commandPool));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_commandBufferCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
     // WARNING PTR CHECK
-    uint64_t cgen_var_497 = (uint64_t)(uintptr_t)local_pCommandBuffers;
-    stream->putBe64(cgen_var_497);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)local_pCommandBuffers;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pCommandBuffers)
     {
         if (((commandBufferCount)))
         {
-            uint64_t* cgen_var_498;
-            stream->alloc((void**)&cgen_var_498, ((commandBufferCount)) * 8);
-            stream->handleMapping()->mapHandles_VkCommandBuffer_u64(local_pCommandBuffers, cgen_var_498, ((commandBufferCount)));
-            stream->write((uint64_t*)cgen_var_498, ((commandBufferCount)) * 8);
+            uint8_t* cgen_var_2_0_ptr = (uint8_t*)(*streamPtrPtr);
+            for (uint32_t k = 0; k < ((commandBufferCount)); ++k)
+            {
+                uint64_t tmpval = get_host_u64_VkCommandBuffer(local_pCommandBuffers[k]);
+                memcpy(cgen_var_2_0_ptr + k * 8, &tmpval, sizeof(uint64_t));
+            }
+            *streamPtrPtr += 8 * ((commandBufferCount));
         }
     }
-    AEMU_SCOPED_TRACE("vkFreeCommandBuffers readParams");
-    AEMU_SCOPED_TRACE("vkFreeCommandBuffers returnUnmarshal");
     if (pCommandBuffers)
     {
-        resources->destroyMapping()->mapHandles_VkCommandBuffer((VkCommandBuffer*)pCommandBuffers, ((commandBufferCount)));
+        sResourceTracker->destroyMapping()->mapHandles_VkCommandBuffer((VkCommandBuffer*)pCommandBuffers, ((commandBufferCount)));
     }
-    mImpl->log("finish vkFreeCommandBuffers");;
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 VkResult VkEncoder::vkBeginCommandBuffer(
     VkCommandBuffer commandBuffer,
-    const VkCommandBufferBeginInfo* pBeginInfo)
+    const VkCommandBufferBeginInfo* pBeginInfo,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkBeginCommandBuffer encode");
-    mImpl->log("start vkBeginCommandBuffer");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     VkCommandBufferBeginInfo* local_pBeginInfo;
     local_commandBuffer = commandBuffer;
@@ -6960,180 +7457,205 @@
     if (pBeginInfo)
     {
         local_pBeginInfo = (VkCommandBufferBeginInfo*)pool->alloc(sizeof(const VkCommandBufferBeginInfo));
-        deepcopy_VkCommandBufferBeginInfo(pool, pBeginInfo, (VkCommandBufferBeginInfo*)(local_pBeginInfo));
+        deepcopy_VkCommandBufferBeginInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pBeginInfo, (VkCommandBufferBeginInfo*)(local_pBeginInfo));
     }
     if (local_pBeginInfo)
     {
-        transform_tohost_VkCommandBufferBeginInfo(mImpl->resources(), (VkCommandBufferBeginInfo*)(local_pBeginInfo));
+        transform_tohost_VkCommandBufferBeginInfo(sResourceTracker, (VkCommandBufferBeginInfo*)(local_pBeginInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_499;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_499, 1);
-        countingStream->write((uint64_t*)&cgen_var_499, 1 * 8);
-        marshal_VkCommandBufferBeginInfo(countingStream, (VkCommandBufferBeginInfo*)(local_pBeginInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkCommandBufferBeginInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkCommandBufferBeginInfo*)(local_pBeginInfo), countPtr);
     }
-    uint32_t packetSize_vkBeginCommandBuffer = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkBeginCommandBuffer = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkBeginCommandBuffer -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkBeginCommandBuffer);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkBeginCommandBuffer = OP_vkBeginCommandBuffer;
-    stream->write(&opcode_vkBeginCommandBuffer, sizeof(uint32_t));
-    stream->write(&packetSize_vkBeginCommandBuffer, sizeof(uint32_t));
-    uint64_t cgen_var_500;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_500, 1);
-    stream->write((uint64_t*)&cgen_var_500, 1 * 8);
-    marshal_VkCommandBufferBeginInfo(stream, (VkCommandBufferBeginInfo*)(local_pBeginInfo));
-    AEMU_SCOPED_TRACE("vkBeginCommandBuffer readParams");
-    AEMU_SCOPED_TRACE("vkBeginCommandBuffer returnUnmarshal");
+    memcpy(streamPtr, &opcode_vkBeginCommandBuffer, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkBeginCommandBuffer, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    reservedmarshal_VkCommandBufferBeginInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkCommandBufferBeginInfo*)(local_pBeginInfo), streamPtrPtr);
     VkResult vkBeginCommandBuffer_VkResult_return = (VkResult)0;
     stream->read(&vkBeginCommandBuffer_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkBeginCommandBuffer");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkBeginCommandBuffer_VkResult_return;
 }
 
 VkResult VkEncoder::vkEndCommandBuffer(
-    VkCommandBuffer commandBuffer)
+    VkCommandBuffer commandBuffer,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkEndCommandBuffer encode");
-    mImpl->log("start vkEndCommandBuffer");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     local_commandBuffer = commandBuffer;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_501;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_501, 1);
-        countingStream->write((uint64_t*)&cgen_var_501, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
     }
-    uint32_t packetSize_vkEndCommandBuffer = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkEndCommandBuffer = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkEndCommandBuffer -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkEndCommandBuffer);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkEndCommandBuffer = OP_vkEndCommandBuffer;
-    stream->write(&opcode_vkEndCommandBuffer, sizeof(uint32_t));
-    stream->write(&packetSize_vkEndCommandBuffer, sizeof(uint32_t));
-    uint64_t cgen_var_502;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_502, 1);
-    stream->write((uint64_t*)&cgen_var_502, 1 * 8);
-    AEMU_SCOPED_TRACE("vkEndCommandBuffer readParams");
-    AEMU_SCOPED_TRACE("vkEndCommandBuffer returnUnmarshal");
+    memcpy(streamPtr, &opcode_vkEndCommandBuffer, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkEndCommandBuffer, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
     VkResult vkEndCommandBuffer_VkResult_return = (VkResult)0;
     stream->read(&vkEndCommandBuffer_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkEndCommandBuffer");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkEndCommandBuffer_VkResult_return;
 }
 
 VkResult VkEncoder::vkResetCommandBuffer(
     VkCommandBuffer commandBuffer,
-    VkCommandBufferResetFlags flags)
+    VkCommandBufferResetFlags flags,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkResetCommandBuffer encode");
-    mImpl->log("start vkResetCommandBuffer");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     VkCommandBufferResetFlags local_flags;
     local_commandBuffer = commandBuffer;
     local_flags = flags;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_503;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_503, 1);
-        countingStream->write((uint64_t*)&cgen_var_503, 1 * 8);
-        countingStream->write((VkCommandBufferResetFlags*)&local_flags, sizeof(VkCommandBufferResetFlags));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkCommandBufferResetFlags);
     }
-    uint32_t packetSize_vkResetCommandBuffer = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkResetCommandBuffer = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkResetCommandBuffer -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkResetCommandBuffer);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkResetCommandBuffer = OP_vkResetCommandBuffer;
-    stream->write(&opcode_vkResetCommandBuffer, sizeof(uint32_t));
-    stream->write(&packetSize_vkResetCommandBuffer, sizeof(uint32_t));
-    uint64_t cgen_var_504;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_504, 1);
-    stream->write((uint64_t*)&cgen_var_504, 1 * 8);
-    stream->write((VkCommandBufferResetFlags*)&local_flags, sizeof(VkCommandBufferResetFlags));
-    AEMU_SCOPED_TRACE("vkResetCommandBuffer readParams");
-    AEMU_SCOPED_TRACE("vkResetCommandBuffer returnUnmarshal");
+    memcpy(streamPtr, &opcode_vkResetCommandBuffer, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkResetCommandBuffer, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (VkCommandBufferResetFlags*)&local_flags, sizeof(VkCommandBufferResetFlags));
+    *streamPtrPtr += sizeof(VkCommandBufferResetFlags);
     VkResult vkResetCommandBuffer_VkResult_return = (VkResult)0;
     stream->read(&vkResetCommandBuffer_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkResetCommandBuffer");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkResetCommandBuffer_VkResult_return;
 }
 
 void VkEncoder::vkCmdBindPipeline(
     VkCommandBuffer commandBuffer,
     VkPipelineBindPoint pipelineBindPoint,
-    VkPipeline pipeline)
+    VkPipeline pipeline,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdBindPipeline encode");
-    mImpl->log("start vkCmdBindPipeline");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     VkPipelineBindPoint local_pipelineBindPoint;
     VkPipeline local_pipeline;
     local_commandBuffer = commandBuffer;
     local_pipelineBindPoint = pipelineBindPoint;
     local_pipeline = pipeline;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_505;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_505, 1);
-        countingStream->write((uint64_t*)&cgen_var_505, 1 * 8);
-        countingStream->write((VkPipelineBindPoint*)&local_pipelineBindPoint, sizeof(VkPipelineBindPoint));
-        uint64_t cgen_var_506;
-        countingStream->handleMapping()->mapHandles_VkPipeline_u64(&local_pipeline, &cgen_var_506, 1);
-        countingStream->write((uint64_t*)&cgen_var_506, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkPipelineBindPoint);
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
     }
-    uint32_t packetSize_vkCmdBindPipeline = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdBindPipeline = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdBindPipeline -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdBindPipeline);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdBindPipeline = OP_vkCmdBindPipeline;
-    stream->write(&opcode_vkCmdBindPipeline, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdBindPipeline, sizeof(uint32_t));
-    uint64_t cgen_var_507;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_507, 1);
-    stream->write((uint64_t*)&cgen_var_507, 1 * 8);
-    stream->write((VkPipelineBindPoint*)&local_pipelineBindPoint, sizeof(VkPipelineBindPoint));
-    uint64_t cgen_var_508;
-    stream->handleMapping()->mapHandles_VkPipeline_u64(&local_pipeline, &cgen_var_508, 1);
-    stream->write((uint64_t*)&cgen_var_508, 1 * 8);
-    AEMU_SCOPED_TRACE("vkCmdBindPipeline readParams");
-    AEMU_SCOPED_TRACE("vkCmdBindPipeline returnUnmarshal");
-    mImpl->log("finish vkCmdBindPipeline");;
+    memcpy(streamPtr, &opcode_vkCmdBindPipeline, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdBindPipeline, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (VkPipelineBindPoint*)&local_pipelineBindPoint, sizeof(VkPipelineBindPoint));
+    *streamPtrPtr += sizeof(VkPipelineBindPoint);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPipeline((*&local_pipeline));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdSetViewport(
     VkCommandBuffer commandBuffer,
     uint32_t firstViewport,
     uint32_t viewportCount,
-    const VkViewport* pViewports)
+    const VkViewport* pViewports,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdSetViewport encode");
-    mImpl->log("start vkCmdSetViewport");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     uint32_t local_firstViewport;
     uint32_t local_viewportCount;
@@ -7147,61 +7669,71 @@
         local_pViewports = (VkViewport*)pool->alloc(((viewportCount)) * sizeof(const VkViewport));
         for (uint32_t i = 0; i < (uint32_t)((viewportCount)); ++i)
         {
-            deepcopy_VkViewport(pool, pViewports + i, (VkViewport*)(local_pViewports + i));
+            deepcopy_VkViewport(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pViewports + i, (VkViewport*)(local_pViewports + i));
         }
     }
     if (local_pViewports)
     {
         for (uint32_t i = 0; i < (uint32_t)((viewportCount)); ++i)
         {
-            transform_tohost_VkViewport(mImpl->resources(), (VkViewport*)(local_pViewports + i));
+            transform_tohost_VkViewport(sResourceTracker, (VkViewport*)(local_pViewports + i));
         }
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_509;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_509, 1);
-        countingStream->write((uint64_t*)&cgen_var_509, 1 * 8);
-        countingStream->write((uint32_t*)&local_firstViewport, sizeof(uint32_t));
-        countingStream->write((uint32_t*)&local_viewportCount, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
         for (uint32_t i = 0; i < (uint32_t)((viewportCount)); ++i)
         {
-            marshal_VkViewport(countingStream, (VkViewport*)(local_pViewports + i));
+            count_VkViewport(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkViewport*)(local_pViewports + i), countPtr);
         }
     }
-    uint32_t packetSize_vkCmdSetViewport = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdSetViewport = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdSetViewport -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdSetViewport);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdSetViewport = OP_vkCmdSetViewport;
-    stream->write(&opcode_vkCmdSetViewport, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdSetViewport, sizeof(uint32_t));
-    uint64_t cgen_var_510;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_510, 1);
-    stream->write((uint64_t*)&cgen_var_510, 1 * 8);
-    stream->write((uint32_t*)&local_firstViewport, sizeof(uint32_t));
-    stream->write((uint32_t*)&local_viewportCount, sizeof(uint32_t));
+    memcpy(streamPtr, &opcode_vkCmdSetViewport, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdSetViewport, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (uint32_t*)&local_firstViewport, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_viewportCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
     for (uint32_t i = 0; i < (uint32_t)((viewportCount)); ++i)
     {
-        marshal_VkViewport(stream, (VkViewport*)(local_pViewports + i));
+        reservedmarshal_VkViewport(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkViewport*)(local_pViewports + i), streamPtrPtr);
     }
-    AEMU_SCOPED_TRACE("vkCmdSetViewport readParams");
-    AEMU_SCOPED_TRACE("vkCmdSetViewport returnUnmarshal");
-    mImpl->log("finish vkCmdSetViewport");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdSetScissor(
     VkCommandBuffer commandBuffer,
     uint32_t firstScissor,
     uint32_t scissorCount,
-    const VkRect2D* pScissors)
+    const VkRect2D* pScissors,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdSetScissor encode");
-    mImpl->log("start vkCmdSetScissor");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     uint32_t local_firstScissor;
     uint32_t local_scissorCount;
@@ -7215,98 +7747,117 @@
         local_pScissors = (VkRect2D*)pool->alloc(((scissorCount)) * sizeof(const VkRect2D));
         for (uint32_t i = 0; i < (uint32_t)((scissorCount)); ++i)
         {
-            deepcopy_VkRect2D(pool, pScissors + i, (VkRect2D*)(local_pScissors + i));
+            deepcopy_VkRect2D(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pScissors + i, (VkRect2D*)(local_pScissors + i));
         }
     }
     if (local_pScissors)
     {
         for (uint32_t i = 0; i < (uint32_t)((scissorCount)); ++i)
         {
-            transform_tohost_VkRect2D(mImpl->resources(), (VkRect2D*)(local_pScissors + i));
+            transform_tohost_VkRect2D(sResourceTracker, (VkRect2D*)(local_pScissors + i));
         }
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_511;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_511, 1);
-        countingStream->write((uint64_t*)&cgen_var_511, 1 * 8);
-        countingStream->write((uint32_t*)&local_firstScissor, sizeof(uint32_t));
-        countingStream->write((uint32_t*)&local_scissorCount, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
         for (uint32_t i = 0; i < (uint32_t)((scissorCount)); ++i)
         {
-            marshal_VkRect2D(countingStream, (VkRect2D*)(local_pScissors + i));
+            count_VkRect2D(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkRect2D*)(local_pScissors + i), countPtr);
         }
     }
-    uint32_t packetSize_vkCmdSetScissor = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdSetScissor = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdSetScissor -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdSetScissor);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdSetScissor = OP_vkCmdSetScissor;
-    stream->write(&opcode_vkCmdSetScissor, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdSetScissor, sizeof(uint32_t));
-    uint64_t cgen_var_512;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_512, 1);
-    stream->write((uint64_t*)&cgen_var_512, 1 * 8);
-    stream->write((uint32_t*)&local_firstScissor, sizeof(uint32_t));
-    stream->write((uint32_t*)&local_scissorCount, sizeof(uint32_t));
+    memcpy(streamPtr, &opcode_vkCmdSetScissor, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdSetScissor, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (uint32_t*)&local_firstScissor, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_scissorCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
     for (uint32_t i = 0; i < (uint32_t)((scissorCount)); ++i)
     {
-        marshal_VkRect2D(stream, (VkRect2D*)(local_pScissors + i));
+        reservedmarshal_VkRect2D(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkRect2D*)(local_pScissors + i), streamPtrPtr);
     }
-    AEMU_SCOPED_TRACE("vkCmdSetScissor readParams");
-    AEMU_SCOPED_TRACE("vkCmdSetScissor returnUnmarshal");
-    mImpl->log("finish vkCmdSetScissor");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdSetLineWidth(
     VkCommandBuffer commandBuffer,
-    float lineWidth)
+    float lineWidth,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdSetLineWidth encode");
-    mImpl->log("start vkCmdSetLineWidth");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     float local_lineWidth;
     local_commandBuffer = commandBuffer;
     local_lineWidth = lineWidth;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_513;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_513, 1);
-        countingStream->write((uint64_t*)&cgen_var_513, 1 * 8);
-        countingStream->write((float*)&local_lineWidth, sizeof(float));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(float);
     }
-    uint32_t packetSize_vkCmdSetLineWidth = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdSetLineWidth = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdSetLineWidth -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdSetLineWidth);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdSetLineWidth = OP_vkCmdSetLineWidth;
-    stream->write(&opcode_vkCmdSetLineWidth, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdSetLineWidth, sizeof(uint32_t));
-    uint64_t cgen_var_514;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_514, 1);
-    stream->write((uint64_t*)&cgen_var_514, 1 * 8);
-    stream->write((float*)&local_lineWidth, sizeof(float));
-    AEMU_SCOPED_TRACE("vkCmdSetLineWidth readParams");
-    AEMU_SCOPED_TRACE("vkCmdSetLineWidth returnUnmarshal");
-    mImpl->log("finish vkCmdSetLineWidth");;
+    memcpy(streamPtr, &opcode_vkCmdSetLineWidth, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdSetLineWidth, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (float*)&local_lineWidth, sizeof(float));
+    *streamPtrPtr += sizeof(float);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdSetDepthBias(
     VkCommandBuffer commandBuffer,
     float depthBiasConstantFactor,
     float depthBiasClamp,
-    float depthBiasSlopeFactor)
+    float depthBiasSlopeFactor,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdSetDepthBias encode");
-    mImpl->log("start vkCmdSetDepthBias");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     float local_depthBiasConstantFactor;
     float local_depthBiasClamp;
@@ -7315,234 +7866,296 @@
     local_depthBiasConstantFactor = depthBiasConstantFactor;
     local_depthBiasClamp = depthBiasClamp;
     local_depthBiasSlopeFactor = depthBiasSlopeFactor;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_515;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_515, 1);
-        countingStream->write((uint64_t*)&cgen_var_515, 1 * 8);
-        countingStream->write((float*)&local_depthBiasConstantFactor, sizeof(float));
-        countingStream->write((float*)&local_depthBiasClamp, sizeof(float));
-        countingStream->write((float*)&local_depthBiasSlopeFactor, sizeof(float));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(float);
+        *countPtr += sizeof(float);
+        *countPtr += sizeof(float);
     }
-    uint32_t packetSize_vkCmdSetDepthBias = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdSetDepthBias = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdSetDepthBias -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdSetDepthBias);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdSetDepthBias = OP_vkCmdSetDepthBias;
-    stream->write(&opcode_vkCmdSetDepthBias, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdSetDepthBias, sizeof(uint32_t));
-    uint64_t cgen_var_516;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_516, 1);
-    stream->write((uint64_t*)&cgen_var_516, 1 * 8);
-    stream->write((float*)&local_depthBiasConstantFactor, sizeof(float));
-    stream->write((float*)&local_depthBiasClamp, sizeof(float));
-    stream->write((float*)&local_depthBiasSlopeFactor, sizeof(float));
-    AEMU_SCOPED_TRACE("vkCmdSetDepthBias readParams");
-    AEMU_SCOPED_TRACE("vkCmdSetDepthBias returnUnmarshal");
-    mImpl->log("finish vkCmdSetDepthBias");;
+    memcpy(streamPtr, &opcode_vkCmdSetDepthBias, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdSetDepthBias, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (float*)&local_depthBiasConstantFactor, sizeof(float));
+    *streamPtrPtr += sizeof(float);
+    memcpy(*streamPtrPtr, (float*)&local_depthBiasClamp, sizeof(float));
+    *streamPtrPtr += sizeof(float);
+    memcpy(*streamPtrPtr, (float*)&local_depthBiasSlopeFactor, sizeof(float));
+    *streamPtrPtr += sizeof(float);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdSetBlendConstants(
     VkCommandBuffer commandBuffer,
-    const float blendConstants[4])
+    const float blendConstants[4],
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdSetBlendConstants encode");
-    mImpl->log("start vkCmdSetBlendConstants");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     float local_blendConstants[4];
     local_commandBuffer = commandBuffer;
     memcpy(local_blendConstants, blendConstants, 4 * sizeof(const float));
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_517;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_517, 1);
-        countingStream->write((uint64_t*)&cgen_var_517, 1 * 8);
-        countingStream->write((float*)local_blendConstants, 4 * sizeof(float));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += 4 * sizeof(float);
     }
-    uint32_t packetSize_vkCmdSetBlendConstants = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdSetBlendConstants = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdSetBlendConstants -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdSetBlendConstants);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdSetBlendConstants = OP_vkCmdSetBlendConstants;
-    stream->write(&opcode_vkCmdSetBlendConstants, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdSetBlendConstants, sizeof(uint32_t));
-    uint64_t cgen_var_518;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_518, 1);
-    stream->write((uint64_t*)&cgen_var_518, 1 * 8);
-    stream->write((float*)local_blendConstants, 4 * sizeof(float));
-    AEMU_SCOPED_TRACE("vkCmdSetBlendConstants readParams");
-    AEMU_SCOPED_TRACE("vkCmdSetBlendConstants returnUnmarshal");
-    mImpl->log("finish vkCmdSetBlendConstants");;
+    memcpy(streamPtr, &opcode_vkCmdSetBlendConstants, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdSetBlendConstants, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (float*)local_blendConstants, 4 * sizeof(float));
+    *streamPtrPtr += 4 * sizeof(float);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdSetDepthBounds(
     VkCommandBuffer commandBuffer,
     float minDepthBounds,
-    float maxDepthBounds)
+    float maxDepthBounds,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdSetDepthBounds encode");
-    mImpl->log("start vkCmdSetDepthBounds");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     float local_minDepthBounds;
     float local_maxDepthBounds;
     local_commandBuffer = commandBuffer;
     local_minDepthBounds = minDepthBounds;
     local_maxDepthBounds = maxDepthBounds;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_519;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_519, 1);
-        countingStream->write((uint64_t*)&cgen_var_519, 1 * 8);
-        countingStream->write((float*)&local_minDepthBounds, sizeof(float));
-        countingStream->write((float*)&local_maxDepthBounds, sizeof(float));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(float);
+        *countPtr += sizeof(float);
     }
-    uint32_t packetSize_vkCmdSetDepthBounds = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdSetDepthBounds = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdSetDepthBounds -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdSetDepthBounds);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdSetDepthBounds = OP_vkCmdSetDepthBounds;
-    stream->write(&opcode_vkCmdSetDepthBounds, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdSetDepthBounds, sizeof(uint32_t));
-    uint64_t cgen_var_520;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_520, 1);
-    stream->write((uint64_t*)&cgen_var_520, 1 * 8);
-    stream->write((float*)&local_minDepthBounds, sizeof(float));
-    stream->write((float*)&local_maxDepthBounds, sizeof(float));
-    AEMU_SCOPED_TRACE("vkCmdSetDepthBounds readParams");
-    AEMU_SCOPED_TRACE("vkCmdSetDepthBounds returnUnmarshal");
-    mImpl->log("finish vkCmdSetDepthBounds");;
+    memcpy(streamPtr, &opcode_vkCmdSetDepthBounds, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdSetDepthBounds, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (float*)&local_minDepthBounds, sizeof(float));
+    *streamPtrPtr += sizeof(float);
+    memcpy(*streamPtrPtr, (float*)&local_maxDepthBounds, sizeof(float));
+    *streamPtrPtr += sizeof(float);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdSetStencilCompareMask(
     VkCommandBuffer commandBuffer,
     VkStencilFaceFlags faceMask,
-    uint32_t compareMask)
+    uint32_t compareMask,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdSetStencilCompareMask encode");
-    mImpl->log("start vkCmdSetStencilCompareMask");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     VkStencilFaceFlags local_faceMask;
     uint32_t local_compareMask;
     local_commandBuffer = commandBuffer;
     local_faceMask = faceMask;
     local_compareMask = compareMask;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_521;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_521, 1);
-        countingStream->write((uint64_t*)&cgen_var_521, 1 * 8);
-        countingStream->write((VkStencilFaceFlags*)&local_faceMask, sizeof(VkStencilFaceFlags));
-        countingStream->write((uint32_t*)&local_compareMask, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkStencilFaceFlags);
+        *countPtr += sizeof(uint32_t);
     }
-    uint32_t packetSize_vkCmdSetStencilCompareMask = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdSetStencilCompareMask = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdSetStencilCompareMask -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdSetStencilCompareMask);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdSetStencilCompareMask = OP_vkCmdSetStencilCompareMask;
-    stream->write(&opcode_vkCmdSetStencilCompareMask, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdSetStencilCompareMask, sizeof(uint32_t));
-    uint64_t cgen_var_522;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_522, 1);
-    stream->write((uint64_t*)&cgen_var_522, 1 * 8);
-    stream->write((VkStencilFaceFlags*)&local_faceMask, sizeof(VkStencilFaceFlags));
-    stream->write((uint32_t*)&local_compareMask, sizeof(uint32_t));
-    AEMU_SCOPED_TRACE("vkCmdSetStencilCompareMask readParams");
-    AEMU_SCOPED_TRACE("vkCmdSetStencilCompareMask returnUnmarshal");
-    mImpl->log("finish vkCmdSetStencilCompareMask");;
+    memcpy(streamPtr, &opcode_vkCmdSetStencilCompareMask, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdSetStencilCompareMask, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (VkStencilFaceFlags*)&local_faceMask, sizeof(VkStencilFaceFlags));
+    *streamPtrPtr += sizeof(VkStencilFaceFlags);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_compareMask, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdSetStencilWriteMask(
     VkCommandBuffer commandBuffer,
     VkStencilFaceFlags faceMask,
-    uint32_t writeMask)
+    uint32_t writeMask,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdSetStencilWriteMask encode");
-    mImpl->log("start vkCmdSetStencilWriteMask");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     VkStencilFaceFlags local_faceMask;
     uint32_t local_writeMask;
     local_commandBuffer = commandBuffer;
     local_faceMask = faceMask;
     local_writeMask = writeMask;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_523;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_523, 1);
-        countingStream->write((uint64_t*)&cgen_var_523, 1 * 8);
-        countingStream->write((VkStencilFaceFlags*)&local_faceMask, sizeof(VkStencilFaceFlags));
-        countingStream->write((uint32_t*)&local_writeMask, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkStencilFaceFlags);
+        *countPtr += sizeof(uint32_t);
     }
-    uint32_t packetSize_vkCmdSetStencilWriteMask = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdSetStencilWriteMask = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdSetStencilWriteMask -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdSetStencilWriteMask);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdSetStencilWriteMask = OP_vkCmdSetStencilWriteMask;
-    stream->write(&opcode_vkCmdSetStencilWriteMask, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdSetStencilWriteMask, sizeof(uint32_t));
-    uint64_t cgen_var_524;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_524, 1);
-    stream->write((uint64_t*)&cgen_var_524, 1 * 8);
-    stream->write((VkStencilFaceFlags*)&local_faceMask, sizeof(VkStencilFaceFlags));
-    stream->write((uint32_t*)&local_writeMask, sizeof(uint32_t));
-    AEMU_SCOPED_TRACE("vkCmdSetStencilWriteMask readParams");
-    AEMU_SCOPED_TRACE("vkCmdSetStencilWriteMask returnUnmarshal");
-    mImpl->log("finish vkCmdSetStencilWriteMask");;
+    memcpy(streamPtr, &opcode_vkCmdSetStencilWriteMask, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdSetStencilWriteMask, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (VkStencilFaceFlags*)&local_faceMask, sizeof(VkStencilFaceFlags));
+    *streamPtrPtr += sizeof(VkStencilFaceFlags);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_writeMask, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdSetStencilReference(
     VkCommandBuffer commandBuffer,
     VkStencilFaceFlags faceMask,
-    uint32_t reference)
+    uint32_t reference,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdSetStencilReference encode");
-    mImpl->log("start vkCmdSetStencilReference");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     VkStencilFaceFlags local_faceMask;
     uint32_t local_reference;
     local_commandBuffer = commandBuffer;
     local_faceMask = faceMask;
     local_reference = reference;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_525;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_525, 1);
-        countingStream->write((uint64_t*)&cgen_var_525, 1 * 8);
-        countingStream->write((VkStencilFaceFlags*)&local_faceMask, sizeof(VkStencilFaceFlags));
-        countingStream->write((uint32_t*)&local_reference, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkStencilFaceFlags);
+        *countPtr += sizeof(uint32_t);
     }
-    uint32_t packetSize_vkCmdSetStencilReference = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdSetStencilReference = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdSetStencilReference -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdSetStencilReference);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdSetStencilReference = OP_vkCmdSetStencilReference;
-    stream->write(&opcode_vkCmdSetStencilReference, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdSetStencilReference, sizeof(uint32_t));
-    uint64_t cgen_var_526;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_526, 1);
-    stream->write((uint64_t*)&cgen_var_526, 1 * 8);
-    stream->write((VkStencilFaceFlags*)&local_faceMask, sizeof(VkStencilFaceFlags));
-    stream->write((uint32_t*)&local_reference, sizeof(uint32_t));
-    AEMU_SCOPED_TRACE("vkCmdSetStencilReference readParams");
-    AEMU_SCOPED_TRACE("vkCmdSetStencilReference returnUnmarshal");
-    mImpl->log("finish vkCmdSetStencilReference");;
+    memcpy(streamPtr, &opcode_vkCmdSetStencilReference, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdSetStencilReference, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (VkStencilFaceFlags*)&local_faceMask, sizeof(VkStencilFaceFlags));
+    *streamPtrPtr += sizeof(VkStencilFaceFlags);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_reference, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdBindDescriptorSets(
@@ -7553,16 +8166,14 @@
     uint32_t descriptorSetCount,
     const VkDescriptorSet* pDescriptorSets,
     uint32_t dynamicOffsetCount,
-    const uint32_t* pDynamicOffsets)
+    const uint32_t* pDynamicOffsets,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdBindDescriptorSets encode");
-    mImpl->log("start vkCmdBindDescriptorSets");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     VkPipelineBindPoint local_pipelineBindPoint;
     VkPipelineLayout local_layout;
@@ -7576,80 +8187,87 @@
     local_layout = layout;
     local_firstSet = firstSet;
     local_descriptorSetCount = descriptorSetCount;
-    local_pDescriptorSets = nullptr;
-    if (pDescriptorSets)
-    {
-        local_pDescriptorSets = (VkDescriptorSet*)pool->dupArray(pDescriptorSets, ((descriptorSetCount)) * sizeof(const VkDescriptorSet));
-    }
+    // Avoiding deepcopy for pDescriptorSets
+    local_pDescriptorSets = (VkDescriptorSet*)pDescriptorSets;
     local_dynamicOffsetCount = dynamicOffsetCount;
-    local_pDynamicOffsets = nullptr;
-    if (pDynamicOffsets)
+    // Avoiding deepcopy for pDynamicOffsets
+    local_pDynamicOffsets = (uint32_t*)pDynamicOffsets;
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        local_pDynamicOffsets = (uint32_t*)pool->dupArray(pDynamicOffsets, ((dynamicOffsetCount)) * sizeof(const uint32_t));
-    }
-    countingStream->rewind();
-    {
-        uint64_t cgen_var_527;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_527, 1);
-        countingStream->write((uint64_t*)&cgen_var_527, 1 * 8);
-        countingStream->write((VkPipelineBindPoint*)&local_pipelineBindPoint, sizeof(VkPipelineBindPoint));
-        uint64_t cgen_var_528;
-        countingStream->handleMapping()->mapHandles_VkPipelineLayout_u64(&local_layout, &cgen_var_528, 1);
-        countingStream->write((uint64_t*)&cgen_var_528, 1 * 8);
-        countingStream->write((uint32_t*)&local_firstSet, sizeof(uint32_t));
-        countingStream->write((uint32_t*)&local_descriptorSetCount, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkPipelineBindPoint);
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
         if (((descriptorSetCount)))
         {
-            uint64_t* cgen_var_529;
-            countingStream->alloc((void**)&cgen_var_529, ((descriptorSetCount)) * 8);
-            countingStream->handleMapping()->mapHandles_VkDescriptorSet_u64(local_pDescriptorSets, cgen_var_529, ((descriptorSetCount)));
-            countingStream->write((uint64_t*)cgen_var_529, ((descriptorSetCount)) * 8);
+            *countPtr += ((descriptorSetCount)) * 8;
         }
-        countingStream->write((uint32_t*)&local_dynamicOffsetCount, sizeof(uint32_t));
-        countingStream->write((uint32_t*)local_pDynamicOffsets, ((dynamicOffsetCount)) * sizeof(uint32_t));
+        *countPtr += sizeof(uint32_t);
+        *countPtr += ((dynamicOffsetCount)) * sizeof(uint32_t);
     }
-    uint32_t packetSize_vkCmdBindDescriptorSets = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdBindDescriptorSets = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdBindDescriptorSets -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdBindDescriptorSets);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdBindDescriptorSets = OP_vkCmdBindDescriptorSets;
-    stream->write(&opcode_vkCmdBindDescriptorSets, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdBindDescriptorSets, sizeof(uint32_t));
-    uint64_t cgen_var_530;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_530, 1);
-    stream->write((uint64_t*)&cgen_var_530, 1 * 8);
-    stream->write((VkPipelineBindPoint*)&local_pipelineBindPoint, sizeof(VkPipelineBindPoint));
-    uint64_t cgen_var_531;
-    stream->handleMapping()->mapHandles_VkPipelineLayout_u64(&local_layout, &cgen_var_531, 1);
-    stream->write((uint64_t*)&cgen_var_531, 1 * 8);
-    stream->write((uint32_t*)&local_firstSet, sizeof(uint32_t));
-    stream->write((uint32_t*)&local_descriptorSetCount, sizeof(uint32_t));
+    memcpy(streamPtr, &opcode_vkCmdBindDescriptorSets, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdBindDescriptorSets, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (VkPipelineBindPoint*)&local_pipelineBindPoint, sizeof(VkPipelineBindPoint));
+    *streamPtrPtr += sizeof(VkPipelineBindPoint);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPipelineLayout((*&local_layout));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_firstSet, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_descriptorSetCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
     if (((descriptorSetCount)))
     {
-        uint64_t* cgen_var_532;
-        stream->alloc((void**)&cgen_var_532, ((descriptorSetCount)) * 8);
-        stream->handleMapping()->mapHandles_VkDescriptorSet_u64(local_pDescriptorSets, cgen_var_532, ((descriptorSetCount)));
-        stream->write((uint64_t*)cgen_var_532, ((descriptorSetCount)) * 8);
+        uint8_t* cgen_var_1_ptr = (uint8_t*)(*streamPtrPtr);
+        for (uint32_t k = 0; k < ((descriptorSetCount)); ++k)
+        {
+            uint64_t tmpval = get_host_u64_VkDescriptorSet(local_pDescriptorSets[k]);
+            memcpy(cgen_var_1_ptr + k * 8, &tmpval, sizeof(uint64_t));
+        }
+        *streamPtrPtr += 8 * ((descriptorSetCount));
     }
-    stream->write((uint32_t*)&local_dynamicOffsetCount, sizeof(uint32_t));
-    stream->write((uint32_t*)local_pDynamicOffsets, ((dynamicOffsetCount)) * sizeof(uint32_t));
-    AEMU_SCOPED_TRACE("vkCmdBindDescriptorSets readParams");
-    AEMU_SCOPED_TRACE("vkCmdBindDescriptorSets returnUnmarshal");
-    mImpl->log("finish vkCmdBindDescriptorSets");;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_dynamicOffsetCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)local_pDynamicOffsets, ((dynamicOffsetCount)) * sizeof(uint32_t));
+    *streamPtrPtr += ((dynamicOffsetCount)) * sizeof(uint32_t);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdBindIndexBuffer(
     VkCommandBuffer commandBuffer,
     VkBuffer buffer,
     VkDeviceSize offset,
-    VkIndexType indexType)
+    VkIndexType indexType,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdBindIndexBuffer encode");
-    mImpl->log("start vkCmdBindIndexBuffer");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     VkBuffer local_buffer;
     VkDeviceSize local_offset;
@@ -7658,33 +8276,45 @@
     local_buffer = buffer;
     local_offset = offset;
     local_indexType = indexType;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_533;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_533, 1);
-        countingStream->write((uint64_t*)&cgen_var_533, 1 * 8);
-        uint64_t cgen_var_534;
-        countingStream->handleMapping()->mapHandles_VkBuffer_u64(&local_buffer, &cgen_var_534, 1);
-        countingStream->write((uint64_t*)&cgen_var_534, 1 * 8);
-        countingStream->write((VkDeviceSize*)&local_offset, sizeof(VkDeviceSize));
-        countingStream->write((VkIndexType*)&local_indexType, sizeof(VkIndexType));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkDeviceSize);
+        *countPtr += sizeof(VkIndexType);
     }
-    uint32_t packetSize_vkCmdBindIndexBuffer = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdBindIndexBuffer = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdBindIndexBuffer -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdBindIndexBuffer);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdBindIndexBuffer = OP_vkCmdBindIndexBuffer;
-    stream->write(&opcode_vkCmdBindIndexBuffer, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdBindIndexBuffer, sizeof(uint32_t));
-    uint64_t cgen_var_535;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_535, 1);
-    stream->write((uint64_t*)&cgen_var_535, 1 * 8);
-    uint64_t cgen_var_536;
-    stream->handleMapping()->mapHandles_VkBuffer_u64(&local_buffer, &cgen_var_536, 1);
-    stream->write((uint64_t*)&cgen_var_536, 1 * 8);
-    stream->write((VkDeviceSize*)&local_offset, sizeof(VkDeviceSize));
-    stream->write((VkIndexType*)&local_indexType, sizeof(VkIndexType));
-    AEMU_SCOPED_TRACE("vkCmdBindIndexBuffer readParams");
-    AEMU_SCOPED_TRACE("vkCmdBindIndexBuffer returnUnmarshal");
-    mImpl->log("finish vkCmdBindIndexBuffer");;
+    memcpy(streamPtr, &opcode_vkCmdBindIndexBuffer, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdBindIndexBuffer, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkBuffer((*&local_buffer));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkDeviceSize*)&local_offset, sizeof(VkDeviceSize));
+    *streamPtrPtr += sizeof(VkDeviceSize);
+    memcpy(*streamPtrPtr, (VkIndexType*)&local_indexType, sizeof(VkIndexType));
+    *streamPtrPtr += sizeof(VkIndexType);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdBindVertexBuffers(
@@ -7692,16 +8322,14 @@
     uint32_t firstBinding,
     uint32_t bindingCount,
     const VkBuffer* pBuffers,
-    const VkDeviceSize* pOffsets)
+    const VkDeviceSize* pOffsets,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdBindVertexBuffers encode");
-    mImpl->log("start vkCmdBindVertexBuffers");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     uint32_t local_firstBinding;
     uint32_t local_bindingCount;
@@ -7710,53 +8338,60 @@
     local_commandBuffer = commandBuffer;
     local_firstBinding = firstBinding;
     local_bindingCount = bindingCount;
-    local_pBuffers = nullptr;
-    if (pBuffers)
+    // Avoiding deepcopy for pBuffers
+    local_pBuffers = (VkBuffer*)pBuffers;
+    // Avoiding deepcopy for pOffsets
+    local_pOffsets = (VkDeviceSize*)pOffsets;
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        local_pBuffers = (VkBuffer*)pool->dupArray(pBuffers, ((bindingCount)) * sizeof(const VkBuffer));
-    }
-    local_pOffsets = nullptr;
-    if (pOffsets)
-    {
-        local_pOffsets = (VkDeviceSize*)pool->dupArray(pOffsets, ((bindingCount)) * sizeof(const VkDeviceSize));
-    }
-    countingStream->rewind();
-    {
-        uint64_t cgen_var_537;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_537, 1);
-        countingStream->write((uint64_t*)&cgen_var_537, 1 * 8);
-        countingStream->write((uint32_t*)&local_firstBinding, sizeof(uint32_t));
-        countingStream->write((uint32_t*)&local_bindingCount, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
         if (((bindingCount)))
         {
-            uint64_t* cgen_var_538;
-            countingStream->alloc((void**)&cgen_var_538, ((bindingCount)) * 8);
-            countingStream->handleMapping()->mapHandles_VkBuffer_u64(local_pBuffers, cgen_var_538, ((bindingCount)));
-            countingStream->write((uint64_t*)cgen_var_538, ((bindingCount)) * 8);
+            *countPtr += ((bindingCount)) * 8;
         }
-        countingStream->write((VkDeviceSize*)local_pOffsets, ((bindingCount)) * sizeof(VkDeviceSize));
+        *countPtr += ((bindingCount)) * sizeof(VkDeviceSize);
     }
-    uint32_t packetSize_vkCmdBindVertexBuffers = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdBindVertexBuffers = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdBindVertexBuffers -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdBindVertexBuffers);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdBindVertexBuffers = OP_vkCmdBindVertexBuffers;
-    stream->write(&opcode_vkCmdBindVertexBuffers, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdBindVertexBuffers, sizeof(uint32_t));
-    uint64_t cgen_var_539;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_539, 1);
-    stream->write((uint64_t*)&cgen_var_539, 1 * 8);
-    stream->write((uint32_t*)&local_firstBinding, sizeof(uint32_t));
-    stream->write((uint32_t*)&local_bindingCount, sizeof(uint32_t));
+    memcpy(streamPtr, &opcode_vkCmdBindVertexBuffers, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdBindVertexBuffers, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (uint32_t*)&local_firstBinding, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_bindingCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
     if (((bindingCount)))
     {
-        uint64_t* cgen_var_540;
-        stream->alloc((void**)&cgen_var_540, ((bindingCount)) * 8);
-        stream->handleMapping()->mapHandles_VkBuffer_u64(local_pBuffers, cgen_var_540, ((bindingCount)));
-        stream->write((uint64_t*)cgen_var_540, ((bindingCount)) * 8);
+        uint8_t* cgen_var_0_ptr = (uint8_t*)(*streamPtrPtr);
+        for (uint32_t k = 0; k < ((bindingCount)); ++k)
+        {
+            uint64_t tmpval = get_host_u64_VkBuffer(local_pBuffers[k]);
+            memcpy(cgen_var_0_ptr + k * 8, &tmpval, sizeof(uint64_t));
+        }
+        *streamPtrPtr += 8 * ((bindingCount));
     }
-    stream->write((VkDeviceSize*)local_pOffsets, ((bindingCount)) * sizeof(VkDeviceSize));
-    AEMU_SCOPED_TRACE("vkCmdBindVertexBuffers readParams");
-    AEMU_SCOPED_TRACE("vkCmdBindVertexBuffers returnUnmarshal");
-    mImpl->log("finish vkCmdBindVertexBuffers");;
+    memcpy(*streamPtrPtr, (VkDeviceSize*)local_pOffsets, ((bindingCount)) * sizeof(VkDeviceSize));
+    *streamPtrPtr += ((bindingCount)) * sizeof(VkDeviceSize);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdDraw(
@@ -7764,16 +8399,14 @@
     uint32_t vertexCount,
     uint32_t instanceCount,
     uint32_t firstVertex,
-    uint32_t firstInstance)
+    uint32_t firstInstance,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdDraw encode");
-    mImpl->log("start vkCmdDraw");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     uint32_t local_vertexCount;
     uint32_t local_instanceCount;
@@ -7784,31 +8417,45 @@
     local_instanceCount = instanceCount;
     local_firstVertex = firstVertex;
     local_firstInstance = firstInstance;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_541;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_541, 1);
-        countingStream->write((uint64_t*)&cgen_var_541, 1 * 8);
-        countingStream->write((uint32_t*)&local_vertexCount, sizeof(uint32_t));
-        countingStream->write((uint32_t*)&local_instanceCount, sizeof(uint32_t));
-        countingStream->write((uint32_t*)&local_firstVertex, sizeof(uint32_t));
-        countingStream->write((uint32_t*)&local_firstInstance, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
     }
-    uint32_t packetSize_vkCmdDraw = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdDraw = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdDraw -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdDraw);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdDraw = OP_vkCmdDraw;
-    stream->write(&opcode_vkCmdDraw, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdDraw, sizeof(uint32_t));
-    uint64_t cgen_var_542;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_542, 1);
-    stream->write((uint64_t*)&cgen_var_542, 1 * 8);
-    stream->write((uint32_t*)&local_vertexCount, sizeof(uint32_t));
-    stream->write((uint32_t*)&local_instanceCount, sizeof(uint32_t));
-    stream->write((uint32_t*)&local_firstVertex, sizeof(uint32_t));
-    stream->write((uint32_t*)&local_firstInstance, sizeof(uint32_t));
-    AEMU_SCOPED_TRACE("vkCmdDraw readParams");
-    AEMU_SCOPED_TRACE("vkCmdDraw returnUnmarshal");
-    mImpl->log("finish vkCmdDraw");;
+    memcpy(streamPtr, &opcode_vkCmdDraw, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdDraw, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (uint32_t*)&local_vertexCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_instanceCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_firstVertex, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_firstInstance, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdDrawIndexed(
@@ -7817,16 +8464,14 @@
     uint32_t instanceCount,
     uint32_t firstIndex,
     int32_t vertexOffset,
-    uint32_t firstInstance)
+    uint32_t firstInstance,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdDrawIndexed encode");
-    mImpl->log("start vkCmdDrawIndexed");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     uint32_t local_indexCount;
     uint32_t local_instanceCount;
@@ -7839,33 +8484,48 @@
     local_firstIndex = firstIndex;
     local_vertexOffset = vertexOffset;
     local_firstInstance = firstInstance;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_543;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_543, 1);
-        countingStream->write((uint64_t*)&cgen_var_543, 1 * 8);
-        countingStream->write((uint32_t*)&local_indexCount, sizeof(uint32_t));
-        countingStream->write((uint32_t*)&local_instanceCount, sizeof(uint32_t));
-        countingStream->write((uint32_t*)&local_firstIndex, sizeof(uint32_t));
-        countingStream->write((int32_t*)&local_vertexOffset, sizeof(int32_t));
-        countingStream->write((uint32_t*)&local_firstInstance, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(int32_t);
+        *countPtr += sizeof(uint32_t);
     }
-    uint32_t packetSize_vkCmdDrawIndexed = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdDrawIndexed = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdDrawIndexed -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdDrawIndexed);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdDrawIndexed = OP_vkCmdDrawIndexed;
-    stream->write(&opcode_vkCmdDrawIndexed, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdDrawIndexed, sizeof(uint32_t));
-    uint64_t cgen_var_544;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_544, 1);
-    stream->write((uint64_t*)&cgen_var_544, 1 * 8);
-    stream->write((uint32_t*)&local_indexCount, sizeof(uint32_t));
-    stream->write((uint32_t*)&local_instanceCount, sizeof(uint32_t));
-    stream->write((uint32_t*)&local_firstIndex, sizeof(uint32_t));
-    stream->write((int32_t*)&local_vertexOffset, sizeof(int32_t));
-    stream->write((uint32_t*)&local_firstInstance, sizeof(uint32_t));
-    AEMU_SCOPED_TRACE("vkCmdDrawIndexed readParams");
-    AEMU_SCOPED_TRACE("vkCmdDrawIndexed returnUnmarshal");
-    mImpl->log("finish vkCmdDrawIndexed");;
+    memcpy(streamPtr, &opcode_vkCmdDrawIndexed, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdDrawIndexed, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (uint32_t*)&local_indexCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_instanceCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_firstIndex, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (int32_t*)&local_vertexOffset, sizeof(int32_t));
+    *streamPtrPtr += sizeof(int32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_firstInstance, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdDrawIndirect(
@@ -7873,16 +8533,14 @@
     VkBuffer buffer,
     VkDeviceSize offset,
     uint32_t drawCount,
-    uint32_t stride)
+    uint32_t stride,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdDrawIndirect encode");
-    mImpl->log("start vkCmdDrawIndirect");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     VkBuffer local_buffer;
     VkDeviceSize local_offset;
@@ -7893,35 +8551,48 @@
     local_offset = offset;
     local_drawCount = drawCount;
     local_stride = stride;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_545;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_545, 1);
-        countingStream->write((uint64_t*)&cgen_var_545, 1 * 8);
-        uint64_t cgen_var_546;
-        countingStream->handleMapping()->mapHandles_VkBuffer_u64(&local_buffer, &cgen_var_546, 1);
-        countingStream->write((uint64_t*)&cgen_var_546, 1 * 8);
-        countingStream->write((VkDeviceSize*)&local_offset, sizeof(VkDeviceSize));
-        countingStream->write((uint32_t*)&local_drawCount, sizeof(uint32_t));
-        countingStream->write((uint32_t*)&local_stride, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkDeviceSize);
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
     }
-    uint32_t packetSize_vkCmdDrawIndirect = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdDrawIndirect = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdDrawIndirect -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdDrawIndirect);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdDrawIndirect = OP_vkCmdDrawIndirect;
-    stream->write(&opcode_vkCmdDrawIndirect, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdDrawIndirect, sizeof(uint32_t));
-    uint64_t cgen_var_547;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_547, 1);
-    stream->write((uint64_t*)&cgen_var_547, 1 * 8);
-    uint64_t cgen_var_548;
-    stream->handleMapping()->mapHandles_VkBuffer_u64(&local_buffer, &cgen_var_548, 1);
-    stream->write((uint64_t*)&cgen_var_548, 1 * 8);
-    stream->write((VkDeviceSize*)&local_offset, sizeof(VkDeviceSize));
-    stream->write((uint32_t*)&local_drawCount, sizeof(uint32_t));
-    stream->write((uint32_t*)&local_stride, sizeof(uint32_t));
-    AEMU_SCOPED_TRACE("vkCmdDrawIndirect readParams");
-    AEMU_SCOPED_TRACE("vkCmdDrawIndirect returnUnmarshal");
-    mImpl->log("finish vkCmdDrawIndirect");;
+    memcpy(streamPtr, &opcode_vkCmdDrawIndirect, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdDrawIndirect, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkBuffer((*&local_buffer));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkDeviceSize*)&local_offset, sizeof(VkDeviceSize));
+    *streamPtrPtr += sizeof(VkDeviceSize);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_drawCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_stride, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdDrawIndexedIndirect(
@@ -7929,16 +8600,14 @@
     VkBuffer buffer,
     VkDeviceSize offset,
     uint32_t drawCount,
-    uint32_t stride)
+    uint32_t stride,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdDrawIndexedIndirect encode");
-    mImpl->log("start vkCmdDrawIndexedIndirect");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     VkBuffer local_buffer;
     VkDeviceSize local_offset;
@@ -7949,51 +8618,62 @@
     local_offset = offset;
     local_drawCount = drawCount;
     local_stride = stride;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_549;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_549, 1);
-        countingStream->write((uint64_t*)&cgen_var_549, 1 * 8);
-        uint64_t cgen_var_550;
-        countingStream->handleMapping()->mapHandles_VkBuffer_u64(&local_buffer, &cgen_var_550, 1);
-        countingStream->write((uint64_t*)&cgen_var_550, 1 * 8);
-        countingStream->write((VkDeviceSize*)&local_offset, sizeof(VkDeviceSize));
-        countingStream->write((uint32_t*)&local_drawCount, sizeof(uint32_t));
-        countingStream->write((uint32_t*)&local_stride, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkDeviceSize);
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
     }
-    uint32_t packetSize_vkCmdDrawIndexedIndirect = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdDrawIndexedIndirect = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdDrawIndexedIndirect -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdDrawIndexedIndirect);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdDrawIndexedIndirect = OP_vkCmdDrawIndexedIndirect;
-    stream->write(&opcode_vkCmdDrawIndexedIndirect, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdDrawIndexedIndirect, sizeof(uint32_t));
-    uint64_t cgen_var_551;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_551, 1);
-    stream->write((uint64_t*)&cgen_var_551, 1 * 8);
-    uint64_t cgen_var_552;
-    stream->handleMapping()->mapHandles_VkBuffer_u64(&local_buffer, &cgen_var_552, 1);
-    stream->write((uint64_t*)&cgen_var_552, 1 * 8);
-    stream->write((VkDeviceSize*)&local_offset, sizeof(VkDeviceSize));
-    stream->write((uint32_t*)&local_drawCount, sizeof(uint32_t));
-    stream->write((uint32_t*)&local_stride, sizeof(uint32_t));
-    AEMU_SCOPED_TRACE("vkCmdDrawIndexedIndirect readParams");
-    AEMU_SCOPED_TRACE("vkCmdDrawIndexedIndirect returnUnmarshal");
-    mImpl->log("finish vkCmdDrawIndexedIndirect");;
+    memcpy(streamPtr, &opcode_vkCmdDrawIndexedIndirect, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdDrawIndexedIndirect, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkBuffer((*&local_buffer));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkDeviceSize*)&local_offset, sizeof(VkDeviceSize));
+    *streamPtrPtr += sizeof(VkDeviceSize);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_drawCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_stride, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdDispatch(
     VkCommandBuffer commandBuffer,
     uint32_t groupCountX,
     uint32_t groupCountY,
-    uint32_t groupCountZ)
+    uint32_t groupCountZ,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdDispatch encode");
-    mImpl->log("start vkCmdDispatch");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     uint32_t local_groupCountX;
     uint32_t local_groupCountY;
@@ -8002,75 +8682,97 @@
     local_groupCountX = groupCountX;
     local_groupCountY = groupCountY;
     local_groupCountZ = groupCountZ;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_553;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_553, 1);
-        countingStream->write((uint64_t*)&cgen_var_553, 1 * 8);
-        countingStream->write((uint32_t*)&local_groupCountX, sizeof(uint32_t));
-        countingStream->write((uint32_t*)&local_groupCountY, sizeof(uint32_t));
-        countingStream->write((uint32_t*)&local_groupCountZ, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
     }
-    uint32_t packetSize_vkCmdDispatch = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdDispatch = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdDispatch -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdDispatch);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdDispatch = OP_vkCmdDispatch;
-    stream->write(&opcode_vkCmdDispatch, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdDispatch, sizeof(uint32_t));
-    uint64_t cgen_var_554;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_554, 1);
-    stream->write((uint64_t*)&cgen_var_554, 1 * 8);
-    stream->write((uint32_t*)&local_groupCountX, sizeof(uint32_t));
-    stream->write((uint32_t*)&local_groupCountY, sizeof(uint32_t));
-    stream->write((uint32_t*)&local_groupCountZ, sizeof(uint32_t));
-    AEMU_SCOPED_TRACE("vkCmdDispatch readParams");
-    AEMU_SCOPED_TRACE("vkCmdDispatch returnUnmarshal");
-    mImpl->log("finish vkCmdDispatch");;
+    memcpy(streamPtr, &opcode_vkCmdDispatch, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdDispatch, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (uint32_t*)&local_groupCountX, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_groupCountY, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_groupCountZ, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdDispatchIndirect(
     VkCommandBuffer commandBuffer,
     VkBuffer buffer,
-    VkDeviceSize offset)
+    VkDeviceSize offset,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdDispatchIndirect encode");
-    mImpl->log("start vkCmdDispatchIndirect");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     VkBuffer local_buffer;
     VkDeviceSize local_offset;
     local_commandBuffer = commandBuffer;
     local_buffer = buffer;
     local_offset = offset;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_555;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_555, 1);
-        countingStream->write((uint64_t*)&cgen_var_555, 1 * 8);
-        uint64_t cgen_var_556;
-        countingStream->handleMapping()->mapHandles_VkBuffer_u64(&local_buffer, &cgen_var_556, 1);
-        countingStream->write((uint64_t*)&cgen_var_556, 1 * 8);
-        countingStream->write((VkDeviceSize*)&local_offset, sizeof(VkDeviceSize));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkDeviceSize);
     }
-    uint32_t packetSize_vkCmdDispatchIndirect = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdDispatchIndirect = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdDispatchIndirect -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdDispatchIndirect);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdDispatchIndirect = OP_vkCmdDispatchIndirect;
-    stream->write(&opcode_vkCmdDispatchIndirect, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdDispatchIndirect, sizeof(uint32_t));
-    uint64_t cgen_var_557;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_557, 1);
-    stream->write((uint64_t*)&cgen_var_557, 1 * 8);
-    uint64_t cgen_var_558;
-    stream->handleMapping()->mapHandles_VkBuffer_u64(&local_buffer, &cgen_var_558, 1);
-    stream->write((uint64_t*)&cgen_var_558, 1 * 8);
-    stream->write((VkDeviceSize*)&local_offset, sizeof(VkDeviceSize));
-    AEMU_SCOPED_TRACE("vkCmdDispatchIndirect readParams");
-    AEMU_SCOPED_TRACE("vkCmdDispatchIndirect returnUnmarshal");
-    mImpl->log("finish vkCmdDispatchIndirect");;
+    memcpy(streamPtr, &opcode_vkCmdDispatchIndirect, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdDispatchIndirect, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkBuffer((*&local_buffer));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkDeviceSize*)&local_offset, sizeof(VkDeviceSize));
+    *streamPtrPtr += sizeof(VkDeviceSize);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdCopyBuffer(
@@ -8078,16 +8780,14 @@
     VkBuffer srcBuffer,
     VkBuffer dstBuffer,
     uint32_t regionCount,
-    const VkBufferCopy* pRegions)
+    const VkBufferCopy* pRegions,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdCopyBuffer encode");
-    mImpl->log("start vkCmdCopyBuffer");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     VkBuffer local_srcBuffer;
     VkBuffer local_dstBuffer;
@@ -8103,55 +8803,66 @@
         local_pRegions = (VkBufferCopy*)pool->alloc(((regionCount)) * sizeof(const VkBufferCopy));
         for (uint32_t i = 0; i < (uint32_t)((regionCount)); ++i)
         {
-            deepcopy_VkBufferCopy(pool, pRegions + i, (VkBufferCopy*)(local_pRegions + i));
+            deepcopy_VkBufferCopy(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pRegions + i, (VkBufferCopy*)(local_pRegions + i));
         }
     }
     if (local_pRegions)
     {
         for (uint32_t i = 0; i < (uint32_t)((regionCount)); ++i)
         {
-            transform_tohost_VkBufferCopy(mImpl->resources(), (VkBufferCopy*)(local_pRegions + i));
+            transform_tohost_VkBufferCopy(sResourceTracker, (VkBufferCopy*)(local_pRegions + i));
         }
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_559;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_559, 1);
-        countingStream->write((uint64_t*)&cgen_var_559, 1 * 8);
-        uint64_t cgen_var_560;
-        countingStream->handleMapping()->mapHandles_VkBuffer_u64(&local_srcBuffer, &cgen_var_560, 1);
-        countingStream->write((uint64_t*)&cgen_var_560, 1 * 8);
-        uint64_t cgen_var_561;
-        countingStream->handleMapping()->mapHandles_VkBuffer_u64(&local_dstBuffer, &cgen_var_561, 1);
-        countingStream->write((uint64_t*)&cgen_var_561, 1 * 8);
-        countingStream->write((uint32_t*)&local_regionCount, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_2;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
         for (uint32_t i = 0; i < (uint32_t)((regionCount)); ++i)
         {
-            marshal_VkBufferCopy(countingStream, (VkBufferCopy*)(local_pRegions + i));
+            count_VkBufferCopy(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkBufferCopy*)(local_pRegions + i), countPtr);
         }
     }
-    uint32_t packetSize_vkCmdCopyBuffer = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdCopyBuffer = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdCopyBuffer -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdCopyBuffer);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdCopyBuffer = OP_vkCmdCopyBuffer;
-    stream->write(&opcode_vkCmdCopyBuffer, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdCopyBuffer, sizeof(uint32_t));
-    uint64_t cgen_var_562;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_562, 1);
-    stream->write((uint64_t*)&cgen_var_562, 1 * 8);
-    uint64_t cgen_var_563;
-    stream->handleMapping()->mapHandles_VkBuffer_u64(&local_srcBuffer, &cgen_var_563, 1);
-    stream->write((uint64_t*)&cgen_var_563, 1 * 8);
-    uint64_t cgen_var_564;
-    stream->handleMapping()->mapHandles_VkBuffer_u64(&local_dstBuffer, &cgen_var_564, 1);
-    stream->write((uint64_t*)&cgen_var_564, 1 * 8);
-    stream->write((uint32_t*)&local_regionCount, sizeof(uint32_t));
+    memcpy(streamPtr, &opcode_vkCmdCopyBuffer, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdCopyBuffer, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkBuffer((*&local_srcBuffer));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkBuffer((*&local_dstBuffer));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_regionCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
     for (uint32_t i = 0; i < (uint32_t)((regionCount)); ++i)
     {
-        marshal_VkBufferCopy(stream, (VkBufferCopy*)(local_pRegions + i));
+        reservedmarshal_VkBufferCopy(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkBufferCopy*)(local_pRegions + i), streamPtrPtr);
     }
-    AEMU_SCOPED_TRACE("vkCmdCopyBuffer readParams");
-    AEMU_SCOPED_TRACE("vkCmdCopyBuffer returnUnmarshal");
-    mImpl->log("finish vkCmdCopyBuffer");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdCopyImage(
@@ -8161,16 +8872,14 @@
     VkImage dstImage,
     VkImageLayout dstImageLayout,
     uint32_t regionCount,
-    const VkImageCopy* pRegions)
+    const VkImageCopy* pRegions,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdCopyImage encode");
-    mImpl->log("start vkCmdCopyImage");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     VkImage local_srcImage;
     VkImageLayout local_srcImageLayout;
@@ -8190,59 +8899,72 @@
         local_pRegions = (VkImageCopy*)pool->alloc(((regionCount)) * sizeof(const VkImageCopy));
         for (uint32_t i = 0; i < (uint32_t)((regionCount)); ++i)
         {
-            deepcopy_VkImageCopy(pool, pRegions + i, (VkImageCopy*)(local_pRegions + i));
+            deepcopy_VkImageCopy(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pRegions + i, (VkImageCopy*)(local_pRegions + i));
         }
     }
     if (local_pRegions)
     {
         for (uint32_t i = 0; i < (uint32_t)((regionCount)); ++i)
         {
-            transform_tohost_VkImageCopy(mImpl->resources(), (VkImageCopy*)(local_pRegions + i));
+            transform_tohost_VkImageCopy(sResourceTracker, (VkImageCopy*)(local_pRegions + i));
         }
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_565;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_565, 1);
-        countingStream->write((uint64_t*)&cgen_var_565, 1 * 8);
-        uint64_t cgen_var_566;
-        countingStream->handleMapping()->mapHandles_VkImage_u64(&local_srcImage, &cgen_var_566, 1);
-        countingStream->write((uint64_t*)&cgen_var_566, 1 * 8);
-        countingStream->write((VkImageLayout*)&local_srcImageLayout, sizeof(VkImageLayout));
-        uint64_t cgen_var_567;
-        countingStream->handleMapping()->mapHandles_VkImage_u64(&local_dstImage, &cgen_var_567, 1);
-        countingStream->write((uint64_t*)&cgen_var_567, 1 * 8);
-        countingStream->write((VkImageLayout*)&local_dstImageLayout, sizeof(VkImageLayout));
-        countingStream->write((uint32_t*)&local_regionCount, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkImageLayout);
+        uint64_t cgen_var_2;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkImageLayout);
+        *countPtr += sizeof(uint32_t);
         for (uint32_t i = 0; i < (uint32_t)((regionCount)); ++i)
         {
-            marshal_VkImageCopy(countingStream, (VkImageCopy*)(local_pRegions + i));
+            count_VkImageCopy(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImageCopy*)(local_pRegions + i), countPtr);
         }
     }
-    uint32_t packetSize_vkCmdCopyImage = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdCopyImage = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdCopyImage -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdCopyImage);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdCopyImage = OP_vkCmdCopyImage;
-    stream->write(&opcode_vkCmdCopyImage, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdCopyImage, sizeof(uint32_t));
-    uint64_t cgen_var_568;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_568, 1);
-    stream->write((uint64_t*)&cgen_var_568, 1 * 8);
-    uint64_t cgen_var_569;
-    stream->handleMapping()->mapHandles_VkImage_u64(&local_srcImage, &cgen_var_569, 1);
-    stream->write((uint64_t*)&cgen_var_569, 1 * 8);
-    stream->write((VkImageLayout*)&local_srcImageLayout, sizeof(VkImageLayout));
-    uint64_t cgen_var_570;
-    stream->handleMapping()->mapHandles_VkImage_u64(&local_dstImage, &cgen_var_570, 1);
-    stream->write((uint64_t*)&cgen_var_570, 1 * 8);
-    stream->write((VkImageLayout*)&local_dstImageLayout, sizeof(VkImageLayout));
-    stream->write((uint32_t*)&local_regionCount, sizeof(uint32_t));
+    memcpy(streamPtr, &opcode_vkCmdCopyImage, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdCopyImage, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkImage((*&local_srcImage));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkImageLayout*)&local_srcImageLayout, sizeof(VkImageLayout));
+    *streamPtrPtr += sizeof(VkImageLayout);
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkImage((*&local_dstImage));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkImageLayout*)&local_dstImageLayout, sizeof(VkImageLayout));
+    *streamPtrPtr += sizeof(VkImageLayout);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_regionCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
     for (uint32_t i = 0; i < (uint32_t)((regionCount)); ++i)
     {
-        marshal_VkImageCopy(stream, (VkImageCopy*)(local_pRegions + i));
+        reservedmarshal_VkImageCopy(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImageCopy*)(local_pRegions + i), streamPtrPtr);
     }
-    AEMU_SCOPED_TRACE("vkCmdCopyImage readParams");
-    AEMU_SCOPED_TRACE("vkCmdCopyImage returnUnmarshal");
-    mImpl->log("finish vkCmdCopyImage");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdBlitImage(
@@ -8253,16 +8975,14 @@
     VkImageLayout dstImageLayout,
     uint32_t regionCount,
     const VkImageBlit* pRegions,
-    VkFilter filter)
+    VkFilter filter,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdBlitImage encode");
-    mImpl->log("start vkCmdBlitImage");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     VkImage local_srcImage;
     VkImageLayout local_srcImageLayout;
@@ -8283,7 +9003,7 @@
         local_pRegions = (VkImageBlit*)pool->alloc(((regionCount)) * sizeof(const VkImageBlit));
         for (uint32_t i = 0; i < (uint32_t)((regionCount)); ++i)
         {
-            deepcopy_VkImageBlit(pool, pRegions + i, (VkImageBlit*)(local_pRegions + i));
+            deepcopy_VkImageBlit(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pRegions + i, (VkImageBlit*)(local_pRegions + i));
         }
     }
     local_filter = filter;
@@ -8291,54 +9011,68 @@
     {
         for (uint32_t i = 0; i < (uint32_t)((regionCount)); ++i)
         {
-            transform_tohost_VkImageBlit(mImpl->resources(), (VkImageBlit*)(local_pRegions + i));
+            transform_tohost_VkImageBlit(sResourceTracker, (VkImageBlit*)(local_pRegions + i));
         }
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_571;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_571, 1);
-        countingStream->write((uint64_t*)&cgen_var_571, 1 * 8);
-        uint64_t cgen_var_572;
-        countingStream->handleMapping()->mapHandles_VkImage_u64(&local_srcImage, &cgen_var_572, 1);
-        countingStream->write((uint64_t*)&cgen_var_572, 1 * 8);
-        countingStream->write((VkImageLayout*)&local_srcImageLayout, sizeof(VkImageLayout));
-        uint64_t cgen_var_573;
-        countingStream->handleMapping()->mapHandles_VkImage_u64(&local_dstImage, &cgen_var_573, 1);
-        countingStream->write((uint64_t*)&cgen_var_573, 1 * 8);
-        countingStream->write((VkImageLayout*)&local_dstImageLayout, sizeof(VkImageLayout));
-        countingStream->write((uint32_t*)&local_regionCount, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkImageLayout);
+        uint64_t cgen_var_2;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkImageLayout);
+        *countPtr += sizeof(uint32_t);
         for (uint32_t i = 0; i < (uint32_t)((regionCount)); ++i)
         {
-            marshal_VkImageBlit(countingStream, (VkImageBlit*)(local_pRegions + i));
+            count_VkImageBlit(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImageBlit*)(local_pRegions + i), countPtr);
         }
-        countingStream->write((VkFilter*)&local_filter, sizeof(VkFilter));
+        *countPtr += sizeof(VkFilter);
     }
-    uint32_t packetSize_vkCmdBlitImage = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdBlitImage = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdBlitImage -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdBlitImage);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdBlitImage = OP_vkCmdBlitImage;
-    stream->write(&opcode_vkCmdBlitImage, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdBlitImage, sizeof(uint32_t));
-    uint64_t cgen_var_574;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_574, 1);
-    stream->write((uint64_t*)&cgen_var_574, 1 * 8);
-    uint64_t cgen_var_575;
-    stream->handleMapping()->mapHandles_VkImage_u64(&local_srcImage, &cgen_var_575, 1);
-    stream->write((uint64_t*)&cgen_var_575, 1 * 8);
-    stream->write((VkImageLayout*)&local_srcImageLayout, sizeof(VkImageLayout));
-    uint64_t cgen_var_576;
-    stream->handleMapping()->mapHandles_VkImage_u64(&local_dstImage, &cgen_var_576, 1);
-    stream->write((uint64_t*)&cgen_var_576, 1 * 8);
-    stream->write((VkImageLayout*)&local_dstImageLayout, sizeof(VkImageLayout));
-    stream->write((uint32_t*)&local_regionCount, sizeof(uint32_t));
+    memcpy(streamPtr, &opcode_vkCmdBlitImage, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdBlitImage, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkImage((*&local_srcImage));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkImageLayout*)&local_srcImageLayout, sizeof(VkImageLayout));
+    *streamPtrPtr += sizeof(VkImageLayout);
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkImage((*&local_dstImage));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkImageLayout*)&local_dstImageLayout, sizeof(VkImageLayout));
+    *streamPtrPtr += sizeof(VkImageLayout);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_regionCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
     for (uint32_t i = 0; i < (uint32_t)((regionCount)); ++i)
     {
-        marshal_VkImageBlit(stream, (VkImageBlit*)(local_pRegions + i));
+        reservedmarshal_VkImageBlit(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImageBlit*)(local_pRegions + i), streamPtrPtr);
     }
-    stream->write((VkFilter*)&local_filter, sizeof(VkFilter));
-    AEMU_SCOPED_TRACE("vkCmdBlitImage readParams");
-    AEMU_SCOPED_TRACE("vkCmdBlitImage returnUnmarshal");
-    mImpl->log("finish vkCmdBlitImage");;
+    memcpy(*streamPtrPtr, (VkFilter*)&local_filter, sizeof(VkFilter));
+    *streamPtrPtr += sizeof(VkFilter);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdCopyBufferToImage(
@@ -8347,16 +9081,14 @@
     VkImage dstImage,
     VkImageLayout dstImageLayout,
     uint32_t regionCount,
-    const VkBufferImageCopy* pRegions)
+    const VkBufferImageCopy* pRegions,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdCopyBufferToImage encode");
-    mImpl->log("start vkCmdCopyBufferToImage");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     VkBuffer local_srcBuffer;
     VkImage local_dstImage;
@@ -8374,57 +9106,69 @@
         local_pRegions = (VkBufferImageCopy*)pool->alloc(((regionCount)) * sizeof(const VkBufferImageCopy));
         for (uint32_t i = 0; i < (uint32_t)((regionCount)); ++i)
         {
-            deepcopy_VkBufferImageCopy(pool, pRegions + i, (VkBufferImageCopy*)(local_pRegions + i));
+            deepcopy_VkBufferImageCopy(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pRegions + i, (VkBufferImageCopy*)(local_pRegions + i));
         }
     }
     if (local_pRegions)
     {
         for (uint32_t i = 0; i < (uint32_t)((regionCount)); ++i)
         {
-            transform_tohost_VkBufferImageCopy(mImpl->resources(), (VkBufferImageCopy*)(local_pRegions + i));
+            transform_tohost_VkBufferImageCopy(sResourceTracker, (VkBufferImageCopy*)(local_pRegions + i));
         }
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_577;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_577, 1);
-        countingStream->write((uint64_t*)&cgen_var_577, 1 * 8);
-        uint64_t cgen_var_578;
-        countingStream->handleMapping()->mapHandles_VkBuffer_u64(&local_srcBuffer, &cgen_var_578, 1);
-        countingStream->write((uint64_t*)&cgen_var_578, 1 * 8);
-        uint64_t cgen_var_579;
-        countingStream->handleMapping()->mapHandles_VkImage_u64(&local_dstImage, &cgen_var_579, 1);
-        countingStream->write((uint64_t*)&cgen_var_579, 1 * 8);
-        countingStream->write((VkImageLayout*)&local_dstImageLayout, sizeof(VkImageLayout));
-        countingStream->write((uint32_t*)&local_regionCount, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_2;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkImageLayout);
+        *countPtr += sizeof(uint32_t);
         for (uint32_t i = 0; i < (uint32_t)((regionCount)); ++i)
         {
-            marshal_VkBufferImageCopy(countingStream, (VkBufferImageCopy*)(local_pRegions + i));
+            count_VkBufferImageCopy(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkBufferImageCopy*)(local_pRegions + i), countPtr);
         }
     }
-    uint32_t packetSize_vkCmdCopyBufferToImage = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdCopyBufferToImage = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdCopyBufferToImage -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdCopyBufferToImage);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdCopyBufferToImage = OP_vkCmdCopyBufferToImage;
-    stream->write(&opcode_vkCmdCopyBufferToImage, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdCopyBufferToImage, sizeof(uint32_t));
-    uint64_t cgen_var_580;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_580, 1);
-    stream->write((uint64_t*)&cgen_var_580, 1 * 8);
-    uint64_t cgen_var_581;
-    stream->handleMapping()->mapHandles_VkBuffer_u64(&local_srcBuffer, &cgen_var_581, 1);
-    stream->write((uint64_t*)&cgen_var_581, 1 * 8);
-    uint64_t cgen_var_582;
-    stream->handleMapping()->mapHandles_VkImage_u64(&local_dstImage, &cgen_var_582, 1);
-    stream->write((uint64_t*)&cgen_var_582, 1 * 8);
-    stream->write((VkImageLayout*)&local_dstImageLayout, sizeof(VkImageLayout));
-    stream->write((uint32_t*)&local_regionCount, sizeof(uint32_t));
+    memcpy(streamPtr, &opcode_vkCmdCopyBufferToImage, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdCopyBufferToImage, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkBuffer((*&local_srcBuffer));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkImage((*&local_dstImage));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkImageLayout*)&local_dstImageLayout, sizeof(VkImageLayout));
+    *streamPtrPtr += sizeof(VkImageLayout);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_regionCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
     for (uint32_t i = 0; i < (uint32_t)((regionCount)); ++i)
     {
-        marshal_VkBufferImageCopy(stream, (VkBufferImageCopy*)(local_pRegions + i));
+        reservedmarshal_VkBufferImageCopy(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkBufferImageCopy*)(local_pRegions + i), streamPtrPtr);
     }
-    AEMU_SCOPED_TRACE("vkCmdCopyBufferToImage readParams");
-    AEMU_SCOPED_TRACE("vkCmdCopyBufferToImage returnUnmarshal");
-    mImpl->log("finish vkCmdCopyBufferToImage");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdCopyImageToBuffer(
@@ -8433,16 +9177,14 @@
     VkImageLayout srcImageLayout,
     VkBuffer dstBuffer,
     uint32_t regionCount,
-    const VkBufferImageCopy* pRegions)
+    const VkBufferImageCopy* pRegions,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdCopyImageToBuffer encode");
-    mImpl->log("start vkCmdCopyImageToBuffer");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     VkImage local_srcImage;
     VkImageLayout local_srcImageLayout;
@@ -8460,57 +9202,69 @@
         local_pRegions = (VkBufferImageCopy*)pool->alloc(((regionCount)) * sizeof(const VkBufferImageCopy));
         for (uint32_t i = 0; i < (uint32_t)((regionCount)); ++i)
         {
-            deepcopy_VkBufferImageCopy(pool, pRegions + i, (VkBufferImageCopy*)(local_pRegions + i));
+            deepcopy_VkBufferImageCopy(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pRegions + i, (VkBufferImageCopy*)(local_pRegions + i));
         }
     }
     if (local_pRegions)
     {
         for (uint32_t i = 0; i < (uint32_t)((regionCount)); ++i)
         {
-            transform_tohost_VkBufferImageCopy(mImpl->resources(), (VkBufferImageCopy*)(local_pRegions + i));
+            transform_tohost_VkBufferImageCopy(sResourceTracker, (VkBufferImageCopy*)(local_pRegions + i));
         }
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_583;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_583, 1);
-        countingStream->write((uint64_t*)&cgen_var_583, 1 * 8);
-        uint64_t cgen_var_584;
-        countingStream->handleMapping()->mapHandles_VkImage_u64(&local_srcImage, &cgen_var_584, 1);
-        countingStream->write((uint64_t*)&cgen_var_584, 1 * 8);
-        countingStream->write((VkImageLayout*)&local_srcImageLayout, sizeof(VkImageLayout));
-        uint64_t cgen_var_585;
-        countingStream->handleMapping()->mapHandles_VkBuffer_u64(&local_dstBuffer, &cgen_var_585, 1);
-        countingStream->write((uint64_t*)&cgen_var_585, 1 * 8);
-        countingStream->write((uint32_t*)&local_regionCount, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkImageLayout);
+        uint64_t cgen_var_2;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
         for (uint32_t i = 0; i < (uint32_t)((regionCount)); ++i)
         {
-            marshal_VkBufferImageCopy(countingStream, (VkBufferImageCopy*)(local_pRegions + i));
+            count_VkBufferImageCopy(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkBufferImageCopy*)(local_pRegions + i), countPtr);
         }
     }
-    uint32_t packetSize_vkCmdCopyImageToBuffer = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdCopyImageToBuffer = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdCopyImageToBuffer -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdCopyImageToBuffer);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdCopyImageToBuffer = OP_vkCmdCopyImageToBuffer;
-    stream->write(&opcode_vkCmdCopyImageToBuffer, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdCopyImageToBuffer, sizeof(uint32_t));
-    uint64_t cgen_var_586;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_586, 1);
-    stream->write((uint64_t*)&cgen_var_586, 1 * 8);
-    uint64_t cgen_var_587;
-    stream->handleMapping()->mapHandles_VkImage_u64(&local_srcImage, &cgen_var_587, 1);
-    stream->write((uint64_t*)&cgen_var_587, 1 * 8);
-    stream->write((VkImageLayout*)&local_srcImageLayout, sizeof(VkImageLayout));
-    uint64_t cgen_var_588;
-    stream->handleMapping()->mapHandles_VkBuffer_u64(&local_dstBuffer, &cgen_var_588, 1);
-    stream->write((uint64_t*)&cgen_var_588, 1 * 8);
-    stream->write((uint32_t*)&local_regionCount, sizeof(uint32_t));
+    memcpy(streamPtr, &opcode_vkCmdCopyImageToBuffer, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdCopyImageToBuffer, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkImage((*&local_srcImage));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkImageLayout*)&local_srcImageLayout, sizeof(VkImageLayout));
+    *streamPtrPtr += sizeof(VkImageLayout);
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkBuffer((*&local_dstBuffer));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_regionCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
     for (uint32_t i = 0; i < (uint32_t)((regionCount)); ++i)
     {
-        marshal_VkBufferImageCopy(stream, (VkBufferImageCopy*)(local_pRegions + i));
+        reservedmarshal_VkBufferImageCopy(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkBufferImageCopy*)(local_pRegions + i), streamPtrPtr);
     }
-    AEMU_SCOPED_TRACE("vkCmdCopyImageToBuffer readParams");
-    AEMU_SCOPED_TRACE("vkCmdCopyImageToBuffer returnUnmarshal");
-    mImpl->log("finish vkCmdCopyImageToBuffer");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdUpdateBuffer(
@@ -8518,16 +9272,14 @@
     VkBuffer dstBuffer,
     VkDeviceSize dstOffset,
     VkDeviceSize dataSize,
-    const void* pData)
+    const void* pData,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdUpdateBuffer encode");
-    mImpl->log("start vkCmdUpdateBuffer");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     VkBuffer local_dstBuffer;
     VkDeviceSize local_dstOffset;
@@ -8537,40 +9289,50 @@
     local_dstBuffer = dstBuffer;
     local_dstOffset = dstOffset;
     local_dataSize = dataSize;
-    local_pData = nullptr;
-    if (pData)
+    // Avoiding deepcopy for pData
+    local_pData = (void*)pData;
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        local_pData = (void*)pool->dupArray(pData, ((dataSize)) * sizeof(const uint8_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkDeviceSize);
+        *countPtr += sizeof(VkDeviceSize);
+        *countPtr += ((dataSize)) * sizeof(uint8_t);
     }
-    countingStream->rewind();
-    {
-        uint64_t cgen_var_589;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_589, 1);
-        countingStream->write((uint64_t*)&cgen_var_589, 1 * 8);
-        uint64_t cgen_var_590;
-        countingStream->handleMapping()->mapHandles_VkBuffer_u64(&local_dstBuffer, &cgen_var_590, 1);
-        countingStream->write((uint64_t*)&cgen_var_590, 1 * 8);
-        countingStream->write((VkDeviceSize*)&local_dstOffset, sizeof(VkDeviceSize));
-        countingStream->write((VkDeviceSize*)&local_dataSize, sizeof(VkDeviceSize));
-        countingStream->write((void*)local_pData, ((dataSize)) * sizeof(uint8_t));
-    }
-    uint32_t packetSize_vkCmdUpdateBuffer = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdUpdateBuffer = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdUpdateBuffer -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdUpdateBuffer);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdUpdateBuffer = OP_vkCmdUpdateBuffer;
-    stream->write(&opcode_vkCmdUpdateBuffer, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdUpdateBuffer, sizeof(uint32_t));
-    uint64_t cgen_var_591;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_591, 1);
-    stream->write((uint64_t*)&cgen_var_591, 1 * 8);
-    uint64_t cgen_var_592;
-    stream->handleMapping()->mapHandles_VkBuffer_u64(&local_dstBuffer, &cgen_var_592, 1);
-    stream->write((uint64_t*)&cgen_var_592, 1 * 8);
-    stream->write((VkDeviceSize*)&local_dstOffset, sizeof(VkDeviceSize));
-    stream->write((VkDeviceSize*)&local_dataSize, sizeof(VkDeviceSize));
-    stream->write((void*)local_pData, ((dataSize)) * sizeof(uint8_t));
-    AEMU_SCOPED_TRACE("vkCmdUpdateBuffer readParams");
-    AEMU_SCOPED_TRACE("vkCmdUpdateBuffer returnUnmarshal");
-    mImpl->log("finish vkCmdUpdateBuffer");;
+    memcpy(streamPtr, &opcode_vkCmdUpdateBuffer, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdUpdateBuffer, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkBuffer((*&local_dstBuffer));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkDeviceSize*)&local_dstOffset, sizeof(VkDeviceSize));
+    *streamPtrPtr += sizeof(VkDeviceSize);
+    memcpy(*streamPtrPtr, (VkDeviceSize*)&local_dataSize, sizeof(VkDeviceSize));
+    *streamPtrPtr += sizeof(VkDeviceSize);
+    memcpy(*streamPtrPtr, (void*)local_pData, ((dataSize)) * sizeof(uint8_t));
+    *streamPtrPtr += ((dataSize)) * sizeof(uint8_t);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdFillBuffer(
@@ -8578,16 +9340,14 @@
     VkBuffer dstBuffer,
     VkDeviceSize dstOffset,
     VkDeviceSize size,
-    uint32_t data)
+    uint32_t data,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdFillBuffer encode");
-    mImpl->log("start vkCmdFillBuffer");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     VkBuffer local_dstBuffer;
     VkDeviceSize local_dstOffset;
@@ -8598,35 +9358,48 @@
     local_dstOffset = dstOffset;
     local_size = size;
     local_data = data;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_593;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_593, 1);
-        countingStream->write((uint64_t*)&cgen_var_593, 1 * 8);
-        uint64_t cgen_var_594;
-        countingStream->handleMapping()->mapHandles_VkBuffer_u64(&local_dstBuffer, &cgen_var_594, 1);
-        countingStream->write((uint64_t*)&cgen_var_594, 1 * 8);
-        countingStream->write((VkDeviceSize*)&local_dstOffset, sizeof(VkDeviceSize));
-        countingStream->write((VkDeviceSize*)&local_size, sizeof(VkDeviceSize));
-        countingStream->write((uint32_t*)&local_data, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkDeviceSize);
+        *countPtr += sizeof(VkDeviceSize);
+        *countPtr += sizeof(uint32_t);
     }
-    uint32_t packetSize_vkCmdFillBuffer = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdFillBuffer = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdFillBuffer -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdFillBuffer);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdFillBuffer = OP_vkCmdFillBuffer;
-    stream->write(&opcode_vkCmdFillBuffer, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdFillBuffer, sizeof(uint32_t));
-    uint64_t cgen_var_595;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_595, 1);
-    stream->write((uint64_t*)&cgen_var_595, 1 * 8);
-    uint64_t cgen_var_596;
-    stream->handleMapping()->mapHandles_VkBuffer_u64(&local_dstBuffer, &cgen_var_596, 1);
-    stream->write((uint64_t*)&cgen_var_596, 1 * 8);
-    stream->write((VkDeviceSize*)&local_dstOffset, sizeof(VkDeviceSize));
-    stream->write((VkDeviceSize*)&local_size, sizeof(VkDeviceSize));
-    stream->write((uint32_t*)&local_data, sizeof(uint32_t));
-    AEMU_SCOPED_TRACE("vkCmdFillBuffer readParams");
-    AEMU_SCOPED_TRACE("vkCmdFillBuffer returnUnmarshal");
-    mImpl->log("finish vkCmdFillBuffer");;
+    memcpy(streamPtr, &opcode_vkCmdFillBuffer, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdFillBuffer, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkBuffer((*&local_dstBuffer));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkDeviceSize*)&local_dstOffset, sizeof(VkDeviceSize));
+    *streamPtrPtr += sizeof(VkDeviceSize);
+    memcpy(*streamPtrPtr, (VkDeviceSize*)&local_size, sizeof(VkDeviceSize));
+    *streamPtrPtr += sizeof(VkDeviceSize);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_data, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdClearColorImage(
@@ -8635,16 +9408,14 @@
     VkImageLayout imageLayout,
     const VkClearColorValue* pColor,
     uint32_t rangeCount,
-    const VkImageSubresourceRange* pRanges)
+    const VkImageSubresourceRange* pRanges,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdClearColorImage encode");
-    mImpl->log("start vkCmdClearColorImage");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     VkImage local_image;
     VkImageLayout local_imageLayout;
@@ -8658,7 +9429,7 @@
     if (pColor)
     {
         local_pColor = (VkClearColorValue*)pool->alloc(sizeof(const VkClearColorValue));
-        deepcopy_VkClearColorValue(pool, pColor, (VkClearColorValue*)(local_pColor));
+        deepcopy_VkClearColorValue(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pColor, (VkClearColorValue*)(local_pColor));
     }
     local_rangeCount = rangeCount;
     local_pRanges = nullptr;
@@ -8667,57 +9438,69 @@
         local_pRanges = (VkImageSubresourceRange*)pool->alloc(((rangeCount)) * sizeof(const VkImageSubresourceRange));
         for (uint32_t i = 0; i < (uint32_t)((rangeCount)); ++i)
         {
-            deepcopy_VkImageSubresourceRange(pool, pRanges + i, (VkImageSubresourceRange*)(local_pRanges + i));
+            deepcopy_VkImageSubresourceRange(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pRanges + i, (VkImageSubresourceRange*)(local_pRanges + i));
         }
     }
     if (local_pColor)
     {
-        transform_tohost_VkClearColorValue(mImpl->resources(), (VkClearColorValue*)(local_pColor));
+        transform_tohost_VkClearColorValue(sResourceTracker, (VkClearColorValue*)(local_pColor));
     }
     if (local_pRanges)
     {
         for (uint32_t i = 0; i < (uint32_t)((rangeCount)); ++i)
         {
-            transform_tohost_VkImageSubresourceRange(mImpl->resources(), (VkImageSubresourceRange*)(local_pRanges + i));
+            transform_tohost_VkImageSubresourceRange(sResourceTracker, (VkImageSubresourceRange*)(local_pRanges + i));
         }
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_597;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_597, 1);
-        countingStream->write((uint64_t*)&cgen_var_597, 1 * 8);
-        uint64_t cgen_var_598;
-        countingStream->handleMapping()->mapHandles_VkImage_u64(&local_image, &cgen_var_598, 1);
-        countingStream->write((uint64_t*)&cgen_var_598, 1 * 8);
-        countingStream->write((VkImageLayout*)&local_imageLayout, sizeof(VkImageLayout));
-        marshal_VkClearColorValue(countingStream, (VkClearColorValue*)(local_pColor));
-        countingStream->write((uint32_t*)&local_rangeCount, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkImageLayout);
+        count_VkClearColorValue(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkClearColorValue*)(local_pColor), countPtr);
+        *countPtr += sizeof(uint32_t);
         for (uint32_t i = 0; i < (uint32_t)((rangeCount)); ++i)
         {
-            marshal_VkImageSubresourceRange(countingStream, (VkImageSubresourceRange*)(local_pRanges + i));
+            count_VkImageSubresourceRange(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImageSubresourceRange*)(local_pRanges + i), countPtr);
         }
     }
-    uint32_t packetSize_vkCmdClearColorImage = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdClearColorImage = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdClearColorImage -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdClearColorImage);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdClearColorImage = OP_vkCmdClearColorImage;
-    stream->write(&opcode_vkCmdClearColorImage, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdClearColorImage, sizeof(uint32_t));
-    uint64_t cgen_var_599;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_599, 1);
-    stream->write((uint64_t*)&cgen_var_599, 1 * 8);
-    uint64_t cgen_var_600;
-    stream->handleMapping()->mapHandles_VkImage_u64(&local_image, &cgen_var_600, 1);
-    stream->write((uint64_t*)&cgen_var_600, 1 * 8);
-    stream->write((VkImageLayout*)&local_imageLayout, sizeof(VkImageLayout));
-    marshal_VkClearColorValue(stream, (VkClearColorValue*)(local_pColor));
-    stream->write((uint32_t*)&local_rangeCount, sizeof(uint32_t));
+    memcpy(streamPtr, &opcode_vkCmdClearColorImage, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdClearColorImage, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkImage((*&local_image));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkImageLayout*)&local_imageLayout, sizeof(VkImageLayout));
+    *streamPtrPtr += sizeof(VkImageLayout);
+    reservedmarshal_VkClearColorValue(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkClearColorValue*)(local_pColor), streamPtrPtr);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_rangeCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
     for (uint32_t i = 0; i < (uint32_t)((rangeCount)); ++i)
     {
-        marshal_VkImageSubresourceRange(stream, (VkImageSubresourceRange*)(local_pRanges + i));
+        reservedmarshal_VkImageSubresourceRange(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImageSubresourceRange*)(local_pRanges + i), streamPtrPtr);
     }
-    AEMU_SCOPED_TRACE("vkCmdClearColorImage readParams");
-    AEMU_SCOPED_TRACE("vkCmdClearColorImage returnUnmarshal");
-    mImpl->log("finish vkCmdClearColorImage");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdClearDepthStencilImage(
@@ -8726,16 +9509,14 @@
     VkImageLayout imageLayout,
     const VkClearDepthStencilValue* pDepthStencil,
     uint32_t rangeCount,
-    const VkImageSubresourceRange* pRanges)
+    const VkImageSubresourceRange* pRanges,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdClearDepthStencilImage encode");
-    mImpl->log("start vkCmdClearDepthStencilImage");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     VkImage local_image;
     VkImageLayout local_imageLayout;
@@ -8749,7 +9530,7 @@
     if (pDepthStencil)
     {
         local_pDepthStencil = (VkClearDepthStencilValue*)pool->alloc(sizeof(const VkClearDepthStencilValue));
-        deepcopy_VkClearDepthStencilValue(pool, pDepthStencil, (VkClearDepthStencilValue*)(local_pDepthStencil));
+        deepcopy_VkClearDepthStencilValue(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pDepthStencil, (VkClearDepthStencilValue*)(local_pDepthStencil));
     }
     local_rangeCount = rangeCount;
     local_pRanges = nullptr;
@@ -8758,57 +9539,69 @@
         local_pRanges = (VkImageSubresourceRange*)pool->alloc(((rangeCount)) * sizeof(const VkImageSubresourceRange));
         for (uint32_t i = 0; i < (uint32_t)((rangeCount)); ++i)
         {
-            deepcopy_VkImageSubresourceRange(pool, pRanges + i, (VkImageSubresourceRange*)(local_pRanges + i));
+            deepcopy_VkImageSubresourceRange(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pRanges + i, (VkImageSubresourceRange*)(local_pRanges + i));
         }
     }
     if (local_pDepthStencil)
     {
-        transform_tohost_VkClearDepthStencilValue(mImpl->resources(), (VkClearDepthStencilValue*)(local_pDepthStencil));
+        transform_tohost_VkClearDepthStencilValue(sResourceTracker, (VkClearDepthStencilValue*)(local_pDepthStencil));
     }
     if (local_pRanges)
     {
         for (uint32_t i = 0; i < (uint32_t)((rangeCount)); ++i)
         {
-            transform_tohost_VkImageSubresourceRange(mImpl->resources(), (VkImageSubresourceRange*)(local_pRanges + i));
+            transform_tohost_VkImageSubresourceRange(sResourceTracker, (VkImageSubresourceRange*)(local_pRanges + i));
         }
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_601;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_601, 1);
-        countingStream->write((uint64_t*)&cgen_var_601, 1 * 8);
-        uint64_t cgen_var_602;
-        countingStream->handleMapping()->mapHandles_VkImage_u64(&local_image, &cgen_var_602, 1);
-        countingStream->write((uint64_t*)&cgen_var_602, 1 * 8);
-        countingStream->write((VkImageLayout*)&local_imageLayout, sizeof(VkImageLayout));
-        marshal_VkClearDepthStencilValue(countingStream, (VkClearDepthStencilValue*)(local_pDepthStencil));
-        countingStream->write((uint32_t*)&local_rangeCount, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkImageLayout);
+        count_VkClearDepthStencilValue(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkClearDepthStencilValue*)(local_pDepthStencil), countPtr);
+        *countPtr += sizeof(uint32_t);
         for (uint32_t i = 0; i < (uint32_t)((rangeCount)); ++i)
         {
-            marshal_VkImageSubresourceRange(countingStream, (VkImageSubresourceRange*)(local_pRanges + i));
+            count_VkImageSubresourceRange(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImageSubresourceRange*)(local_pRanges + i), countPtr);
         }
     }
-    uint32_t packetSize_vkCmdClearDepthStencilImage = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdClearDepthStencilImage = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdClearDepthStencilImage -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdClearDepthStencilImage);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdClearDepthStencilImage = OP_vkCmdClearDepthStencilImage;
-    stream->write(&opcode_vkCmdClearDepthStencilImage, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdClearDepthStencilImage, sizeof(uint32_t));
-    uint64_t cgen_var_603;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_603, 1);
-    stream->write((uint64_t*)&cgen_var_603, 1 * 8);
-    uint64_t cgen_var_604;
-    stream->handleMapping()->mapHandles_VkImage_u64(&local_image, &cgen_var_604, 1);
-    stream->write((uint64_t*)&cgen_var_604, 1 * 8);
-    stream->write((VkImageLayout*)&local_imageLayout, sizeof(VkImageLayout));
-    marshal_VkClearDepthStencilValue(stream, (VkClearDepthStencilValue*)(local_pDepthStencil));
-    stream->write((uint32_t*)&local_rangeCount, sizeof(uint32_t));
+    memcpy(streamPtr, &opcode_vkCmdClearDepthStencilImage, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdClearDepthStencilImage, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkImage((*&local_image));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkImageLayout*)&local_imageLayout, sizeof(VkImageLayout));
+    *streamPtrPtr += sizeof(VkImageLayout);
+    reservedmarshal_VkClearDepthStencilValue(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkClearDepthStencilValue*)(local_pDepthStencil), streamPtrPtr);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_rangeCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
     for (uint32_t i = 0; i < (uint32_t)((rangeCount)); ++i)
     {
-        marshal_VkImageSubresourceRange(stream, (VkImageSubresourceRange*)(local_pRanges + i));
+        reservedmarshal_VkImageSubresourceRange(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImageSubresourceRange*)(local_pRanges + i), streamPtrPtr);
     }
-    AEMU_SCOPED_TRACE("vkCmdClearDepthStencilImage readParams");
-    AEMU_SCOPED_TRACE("vkCmdClearDepthStencilImage returnUnmarshal");
-    mImpl->log("finish vkCmdClearDepthStencilImage");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdClearAttachments(
@@ -8816,16 +9609,14 @@
     uint32_t attachmentCount,
     const VkClearAttachment* pAttachments,
     uint32_t rectCount,
-    const VkClearRect* pRects)
+    const VkClearRect* pRects,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdClearAttachments encode");
-    mImpl->log("start vkCmdClearAttachments");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     uint32_t local_attachmentCount;
     VkClearAttachment* local_pAttachments;
@@ -8839,7 +9630,7 @@
         local_pAttachments = (VkClearAttachment*)pool->alloc(((attachmentCount)) * sizeof(const VkClearAttachment));
         for (uint32_t i = 0; i < (uint32_t)((attachmentCount)); ++i)
         {
-            deepcopy_VkClearAttachment(pool, pAttachments + i, (VkClearAttachment*)(local_pAttachments + i));
+            deepcopy_VkClearAttachment(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAttachments + i, (VkClearAttachment*)(local_pAttachments + i));
         }
     }
     local_rectCount = rectCount;
@@ -8849,60 +9640,72 @@
         local_pRects = (VkClearRect*)pool->alloc(((rectCount)) * sizeof(const VkClearRect));
         for (uint32_t i = 0; i < (uint32_t)((rectCount)); ++i)
         {
-            deepcopy_VkClearRect(pool, pRects + i, (VkClearRect*)(local_pRects + i));
+            deepcopy_VkClearRect(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pRects + i, (VkClearRect*)(local_pRects + i));
         }
     }
     if (local_pAttachments)
     {
         for (uint32_t i = 0; i < (uint32_t)((attachmentCount)); ++i)
         {
-            transform_tohost_VkClearAttachment(mImpl->resources(), (VkClearAttachment*)(local_pAttachments + i));
+            transform_tohost_VkClearAttachment(sResourceTracker, (VkClearAttachment*)(local_pAttachments + i));
         }
     }
     if (local_pRects)
     {
         for (uint32_t i = 0; i < (uint32_t)((rectCount)); ++i)
         {
-            transform_tohost_VkClearRect(mImpl->resources(), (VkClearRect*)(local_pRects + i));
+            transform_tohost_VkClearRect(sResourceTracker, (VkClearRect*)(local_pRects + i));
         }
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_605;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_605, 1);
-        countingStream->write((uint64_t*)&cgen_var_605, 1 * 8);
-        countingStream->write((uint32_t*)&local_attachmentCount, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
         for (uint32_t i = 0; i < (uint32_t)((attachmentCount)); ++i)
         {
-            marshal_VkClearAttachment(countingStream, (VkClearAttachment*)(local_pAttachments + i));
+            count_VkClearAttachment(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkClearAttachment*)(local_pAttachments + i), countPtr);
         }
-        countingStream->write((uint32_t*)&local_rectCount, sizeof(uint32_t));
+        *countPtr += sizeof(uint32_t);
         for (uint32_t i = 0; i < (uint32_t)((rectCount)); ++i)
         {
-            marshal_VkClearRect(countingStream, (VkClearRect*)(local_pRects + i));
+            count_VkClearRect(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkClearRect*)(local_pRects + i), countPtr);
         }
     }
-    uint32_t packetSize_vkCmdClearAttachments = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdClearAttachments = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdClearAttachments -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdClearAttachments);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdClearAttachments = OP_vkCmdClearAttachments;
-    stream->write(&opcode_vkCmdClearAttachments, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdClearAttachments, sizeof(uint32_t));
-    uint64_t cgen_var_606;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_606, 1);
-    stream->write((uint64_t*)&cgen_var_606, 1 * 8);
-    stream->write((uint32_t*)&local_attachmentCount, sizeof(uint32_t));
+    memcpy(streamPtr, &opcode_vkCmdClearAttachments, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdClearAttachments, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (uint32_t*)&local_attachmentCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
     for (uint32_t i = 0; i < (uint32_t)((attachmentCount)); ++i)
     {
-        marshal_VkClearAttachment(stream, (VkClearAttachment*)(local_pAttachments + i));
+        reservedmarshal_VkClearAttachment(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkClearAttachment*)(local_pAttachments + i), streamPtrPtr);
     }
-    stream->write((uint32_t*)&local_rectCount, sizeof(uint32_t));
+    memcpy(*streamPtrPtr, (uint32_t*)&local_rectCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
     for (uint32_t i = 0; i < (uint32_t)((rectCount)); ++i)
     {
-        marshal_VkClearRect(stream, (VkClearRect*)(local_pRects + i));
+        reservedmarshal_VkClearRect(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkClearRect*)(local_pRects + i), streamPtrPtr);
     }
-    AEMU_SCOPED_TRACE("vkCmdClearAttachments readParams");
-    AEMU_SCOPED_TRACE("vkCmdClearAttachments returnUnmarshal");
-    mImpl->log("finish vkCmdClearAttachments");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdResolveImage(
@@ -8912,16 +9715,14 @@
     VkImage dstImage,
     VkImageLayout dstImageLayout,
     uint32_t regionCount,
-    const VkImageResolve* pRegions)
+    const VkImageResolve* pRegions,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdResolveImage encode");
-    mImpl->log("start vkCmdResolveImage");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     VkImage local_srcImage;
     VkImageLayout local_srcImageLayout;
@@ -8941,151 +9742,182 @@
         local_pRegions = (VkImageResolve*)pool->alloc(((regionCount)) * sizeof(const VkImageResolve));
         for (uint32_t i = 0; i < (uint32_t)((regionCount)); ++i)
         {
-            deepcopy_VkImageResolve(pool, pRegions + i, (VkImageResolve*)(local_pRegions + i));
+            deepcopy_VkImageResolve(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pRegions + i, (VkImageResolve*)(local_pRegions + i));
         }
     }
     if (local_pRegions)
     {
         for (uint32_t i = 0; i < (uint32_t)((regionCount)); ++i)
         {
-            transform_tohost_VkImageResolve(mImpl->resources(), (VkImageResolve*)(local_pRegions + i));
+            transform_tohost_VkImageResolve(sResourceTracker, (VkImageResolve*)(local_pRegions + i));
         }
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_607;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_607, 1);
-        countingStream->write((uint64_t*)&cgen_var_607, 1 * 8);
-        uint64_t cgen_var_608;
-        countingStream->handleMapping()->mapHandles_VkImage_u64(&local_srcImage, &cgen_var_608, 1);
-        countingStream->write((uint64_t*)&cgen_var_608, 1 * 8);
-        countingStream->write((VkImageLayout*)&local_srcImageLayout, sizeof(VkImageLayout));
-        uint64_t cgen_var_609;
-        countingStream->handleMapping()->mapHandles_VkImage_u64(&local_dstImage, &cgen_var_609, 1);
-        countingStream->write((uint64_t*)&cgen_var_609, 1 * 8);
-        countingStream->write((VkImageLayout*)&local_dstImageLayout, sizeof(VkImageLayout));
-        countingStream->write((uint32_t*)&local_regionCount, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkImageLayout);
+        uint64_t cgen_var_2;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkImageLayout);
+        *countPtr += sizeof(uint32_t);
         for (uint32_t i = 0; i < (uint32_t)((regionCount)); ++i)
         {
-            marshal_VkImageResolve(countingStream, (VkImageResolve*)(local_pRegions + i));
+            count_VkImageResolve(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImageResolve*)(local_pRegions + i), countPtr);
         }
     }
-    uint32_t packetSize_vkCmdResolveImage = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdResolveImage = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdResolveImage -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdResolveImage);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdResolveImage = OP_vkCmdResolveImage;
-    stream->write(&opcode_vkCmdResolveImage, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdResolveImage, sizeof(uint32_t));
-    uint64_t cgen_var_610;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_610, 1);
-    stream->write((uint64_t*)&cgen_var_610, 1 * 8);
-    uint64_t cgen_var_611;
-    stream->handleMapping()->mapHandles_VkImage_u64(&local_srcImage, &cgen_var_611, 1);
-    stream->write((uint64_t*)&cgen_var_611, 1 * 8);
-    stream->write((VkImageLayout*)&local_srcImageLayout, sizeof(VkImageLayout));
-    uint64_t cgen_var_612;
-    stream->handleMapping()->mapHandles_VkImage_u64(&local_dstImage, &cgen_var_612, 1);
-    stream->write((uint64_t*)&cgen_var_612, 1 * 8);
-    stream->write((VkImageLayout*)&local_dstImageLayout, sizeof(VkImageLayout));
-    stream->write((uint32_t*)&local_regionCount, sizeof(uint32_t));
+    memcpy(streamPtr, &opcode_vkCmdResolveImage, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdResolveImage, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkImage((*&local_srcImage));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkImageLayout*)&local_srcImageLayout, sizeof(VkImageLayout));
+    *streamPtrPtr += sizeof(VkImageLayout);
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkImage((*&local_dstImage));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkImageLayout*)&local_dstImageLayout, sizeof(VkImageLayout));
+    *streamPtrPtr += sizeof(VkImageLayout);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_regionCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
     for (uint32_t i = 0; i < (uint32_t)((regionCount)); ++i)
     {
-        marshal_VkImageResolve(stream, (VkImageResolve*)(local_pRegions + i));
+        reservedmarshal_VkImageResolve(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImageResolve*)(local_pRegions + i), streamPtrPtr);
     }
-    AEMU_SCOPED_TRACE("vkCmdResolveImage readParams");
-    AEMU_SCOPED_TRACE("vkCmdResolveImage returnUnmarshal");
-    mImpl->log("finish vkCmdResolveImage");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdSetEvent(
     VkCommandBuffer commandBuffer,
     VkEvent event,
-    VkPipelineStageFlags stageMask)
+    VkPipelineStageFlags stageMask,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdSetEvent encode");
-    mImpl->log("start vkCmdSetEvent");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     VkEvent local_event;
     VkPipelineStageFlags local_stageMask;
     local_commandBuffer = commandBuffer;
     local_event = event;
     local_stageMask = stageMask;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_613;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_613, 1);
-        countingStream->write((uint64_t*)&cgen_var_613, 1 * 8);
-        uint64_t cgen_var_614;
-        countingStream->handleMapping()->mapHandles_VkEvent_u64(&local_event, &cgen_var_614, 1);
-        countingStream->write((uint64_t*)&cgen_var_614, 1 * 8);
-        countingStream->write((VkPipelineStageFlags*)&local_stageMask, sizeof(VkPipelineStageFlags));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkPipelineStageFlags);
     }
-    uint32_t packetSize_vkCmdSetEvent = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdSetEvent = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdSetEvent -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdSetEvent);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdSetEvent = OP_vkCmdSetEvent;
-    stream->write(&opcode_vkCmdSetEvent, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdSetEvent, sizeof(uint32_t));
-    uint64_t cgen_var_615;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_615, 1);
-    stream->write((uint64_t*)&cgen_var_615, 1 * 8);
-    uint64_t cgen_var_616;
-    stream->handleMapping()->mapHandles_VkEvent_u64(&local_event, &cgen_var_616, 1);
-    stream->write((uint64_t*)&cgen_var_616, 1 * 8);
-    stream->write((VkPipelineStageFlags*)&local_stageMask, sizeof(VkPipelineStageFlags));
-    AEMU_SCOPED_TRACE("vkCmdSetEvent readParams");
-    AEMU_SCOPED_TRACE("vkCmdSetEvent returnUnmarshal");
-    mImpl->log("finish vkCmdSetEvent");;
+    memcpy(streamPtr, &opcode_vkCmdSetEvent, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdSetEvent, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkEvent((*&local_event));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkPipelineStageFlags*)&local_stageMask, sizeof(VkPipelineStageFlags));
+    *streamPtrPtr += sizeof(VkPipelineStageFlags);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdResetEvent(
     VkCommandBuffer commandBuffer,
     VkEvent event,
-    VkPipelineStageFlags stageMask)
+    VkPipelineStageFlags stageMask,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdResetEvent encode");
-    mImpl->log("start vkCmdResetEvent");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     VkEvent local_event;
     VkPipelineStageFlags local_stageMask;
     local_commandBuffer = commandBuffer;
     local_event = event;
     local_stageMask = stageMask;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_617;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_617, 1);
-        countingStream->write((uint64_t*)&cgen_var_617, 1 * 8);
-        uint64_t cgen_var_618;
-        countingStream->handleMapping()->mapHandles_VkEvent_u64(&local_event, &cgen_var_618, 1);
-        countingStream->write((uint64_t*)&cgen_var_618, 1 * 8);
-        countingStream->write((VkPipelineStageFlags*)&local_stageMask, sizeof(VkPipelineStageFlags));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkPipelineStageFlags);
     }
-    uint32_t packetSize_vkCmdResetEvent = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdResetEvent = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdResetEvent -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdResetEvent);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdResetEvent = OP_vkCmdResetEvent;
-    stream->write(&opcode_vkCmdResetEvent, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdResetEvent, sizeof(uint32_t));
-    uint64_t cgen_var_619;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_619, 1);
-    stream->write((uint64_t*)&cgen_var_619, 1 * 8);
-    uint64_t cgen_var_620;
-    stream->handleMapping()->mapHandles_VkEvent_u64(&local_event, &cgen_var_620, 1);
-    stream->write((uint64_t*)&cgen_var_620, 1 * 8);
-    stream->write((VkPipelineStageFlags*)&local_stageMask, sizeof(VkPipelineStageFlags));
-    AEMU_SCOPED_TRACE("vkCmdResetEvent readParams");
-    AEMU_SCOPED_TRACE("vkCmdResetEvent returnUnmarshal");
-    mImpl->log("finish vkCmdResetEvent");;
+    memcpy(streamPtr, &opcode_vkCmdResetEvent, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdResetEvent, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkEvent((*&local_event));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkPipelineStageFlags*)&local_stageMask, sizeof(VkPipelineStageFlags));
+    *streamPtrPtr += sizeof(VkPipelineStageFlags);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdWaitEvents(
@@ -9099,16 +9931,14 @@
     uint32_t bufferMemoryBarrierCount,
     const VkBufferMemoryBarrier* pBufferMemoryBarriers,
     uint32_t imageMemoryBarrierCount,
-    const VkImageMemoryBarrier* pImageMemoryBarriers)
+    const VkImageMemoryBarrier* pImageMemoryBarriers,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdWaitEvents encode");
-    mImpl->log("start vkCmdWaitEvents");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     uint32_t local_eventCount;
     VkEvent* local_pEvents;
@@ -9122,11 +9952,8 @@
     VkImageMemoryBarrier* local_pImageMemoryBarriers;
     local_commandBuffer = commandBuffer;
     local_eventCount = eventCount;
-    local_pEvents = nullptr;
-    if (pEvents)
-    {
-        local_pEvents = (VkEvent*)pool->dupArray(pEvents, ((eventCount)) * sizeof(const VkEvent));
-    }
+    // Avoiding deepcopy for pEvents
+    local_pEvents = (VkEvent*)pEvents;
     local_srcStageMask = srcStageMask;
     local_dstStageMask = dstStageMask;
     local_memoryBarrierCount = memoryBarrierCount;
@@ -9136,7 +9963,7 @@
         local_pMemoryBarriers = (VkMemoryBarrier*)pool->alloc(((memoryBarrierCount)) * sizeof(const VkMemoryBarrier));
         for (uint32_t i = 0; i < (uint32_t)((memoryBarrierCount)); ++i)
         {
-            deepcopy_VkMemoryBarrier(pool, pMemoryBarriers + i, (VkMemoryBarrier*)(local_pMemoryBarriers + i));
+            deepcopy_VkMemoryBarrier(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pMemoryBarriers + i, (VkMemoryBarrier*)(local_pMemoryBarriers + i));
         }
     }
     local_bufferMemoryBarrierCount = bufferMemoryBarrierCount;
@@ -9146,7 +9973,7 @@
         local_pBufferMemoryBarriers = (VkBufferMemoryBarrier*)pool->alloc(((bufferMemoryBarrierCount)) * sizeof(const VkBufferMemoryBarrier));
         for (uint32_t i = 0; i < (uint32_t)((bufferMemoryBarrierCount)); ++i)
         {
-            deepcopy_VkBufferMemoryBarrier(pool, pBufferMemoryBarriers + i, (VkBufferMemoryBarrier*)(local_pBufferMemoryBarriers + i));
+            deepcopy_VkBufferMemoryBarrier(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pBufferMemoryBarriers + i, (VkBufferMemoryBarrier*)(local_pBufferMemoryBarriers + i));
         }
     }
     local_imageMemoryBarrierCount = imageMemoryBarrierCount;
@@ -9156,97 +9983,113 @@
         local_pImageMemoryBarriers = (VkImageMemoryBarrier*)pool->alloc(((imageMemoryBarrierCount)) * sizeof(const VkImageMemoryBarrier));
         for (uint32_t i = 0; i < (uint32_t)((imageMemoryBarrierCount)); ++i)
         {
-            deepcopy_VkImageMemoryBarrier(pool, pImageMemoryBarriers + i, (VkImageMemoryBarrier*)(local_pImageMemoryBarriers + i));
+            deepcopy_VkImageMemoryBarrier(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pImageMemoryBarriers + i, (VkImageMemoryBarrier*)(local_pImageMemoryBarriers + i));
         }
     }
     if (local_pMemoryBarriers)
     {
         for (uint32_t i = 0; i < (uint32_t)((memoryBarrierCount)); ++i)
         {
-            transform_tohost_VkMemoryBarrier(mImpl->resources(), (VkMemoryBarrier*)(local_pMemoryBarriers + i));
+            transform_tohost_VkMemoryBarrier(sResourceTracker, (VkMemoryBarrier*)(local_pMemoryBarriers + i));
         }
     }
     if (local_pBufferMemoryBarriers)
     {
         for (uint32_t i = 0; i < (uint32_t)((bufferMemoryBarrierCount)); ++i)
         {
-            transform_tohost_VkBufferMemoryBarrier(mImpl->resources(), (VkBufferMemoryBarrier*)(local_pBufferMemoryBarriers + i));
+            transform_tohost_VkBufferMemoryBarrier(sResourceTracker, (VkBufferMemoryBarrier*)(local_pBufferMemoryBarriers + i));
         }
     }
     if (local_pImageMemoryBarriers)
     {
         for (uint32_t i = 0; i < (uint32_t)((imageMemoryBarrierCount)); ++i)
         {
-            transform_tohost_VkImageMemoryBarrier(mImpl->resources(), (VkImageMemoryBarrier*)(local_pImageMemoryBarriers + i));
+            transform_tohost_VkImageMemoryBarrier(sResourceTracker, (VkImageMemoryBarrier*)(local_pImageMemoryBarriers + i));
         }
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_621;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_621, 1);
-        countingStream->write((uint64_t*)&cgen_var_621, 1 * 8);
-        countingStream->write((uint32_t*)&local_eventCount, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
         if (((eventCount)))
         {
-            uint64_t* cgen_var_622;
-            countingStream->alloc((void**)&cgen_var_622, ((eventCount)) * 8);
-            countingStream->handleMapping()->mapHandles_VkEvent_u64(local_pEvents, cgen_var_622, ((eventCount)));
-            countingStream->write((uint64_t*)cgen_var_622, ((eventCount)) * 8);
+            *countPtr += ((eventCount)) * 8;
         }
-        countingStream->write((VkPipelineStageFlags*)&local_srcStageMask, sizeof(VkPipelineStageFlags));
-        countingStream->write((VkPipelineStageFlags*)&local_dstStageMask, sizeof(VkPipelineStageFlags));
-        countingStream->write((uint32_t*)&local_memoryBarrierCount, sizeof(uint32_t));
+        *countPtr += sizeof(VkPipelineStageFlags);
+        *countPtr += sizeof(VkPipelineStageFlags);
+        *countPtr += sizeof(uint32_t);
         for (uint32_t i = 0; i < (uint32_t)((memoryBarrierCount)); ++i)
         {
-            marshal_VkMemoryBarrier(countingStream, (VkMemoryBarrier*)(local_pMemoryBarriers + i));
+            count_VkMemoryBarrier(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMemoryBarrier*)(local_pMemoryBarriers + i), countPtr);
         }
-        countingStream->write((uint32_t*)&local_bufferMemoryBarrierCount, sizeof(uint32_t));
+        *countPtr += sizeof(uint32_t);
         for (uint32_t i = 0; i < (uint32_t)((bufferMemoryBarrierCount)); ++i)
         {
-            marshal_VkBufferMemoryBarrier(countingStream, (VkBufferMemoryBarrier*)(local_pBufferMemoryBarriers + i));
+            count_VkBufferMemoryBarrier(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkBufferMemoryBarrier*)(local_pBufferMemoryBarriers + i), countPtr);
         }
-        countingStream->write((uint32_t*)&local_imageMemoryBarrierCount, sizeof(uint32_t));
+        *countPtr += sizeof(uint32_t);
         for (uint32_t i = 0; i < (uint32_t)((imageMemoryBarrierCount)); ++i)
         {
-            marshal_VkImageMemoryBarrier(countingStream, (VkImageMemoryBarrier*)(local_pImageMemoryBarriers + i));
+            count_VkImageMemoryBarrier(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImageMemoryBarrier*)(local_pImageMemoryBarriers + i), countPtr);
         }
     }
-    uint32_t packetSize_vkCmdWaitEvents = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdWaitEvents = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdWaitEvents -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdWaitEvents);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdWaitEvents = OP_vkCmdWaitEvents;
-    stream->write(&opcode_vkCmdWaitEvents, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdWaitEvents, sizeof(uint32_t));
-    uint64_t cgen_var_623;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_623, 1);
-    stream->write((uint64_t*)&cgen_var_623, 1 * 8);
-    stream->write((uint32_t*)&local_eventCount, sizeof(uint32_t));
+    memcpy(streamPtr, &opcode_vkCmdWaitEvents, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdWaitEvents, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (uint32_t*)&local_eventCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
     if (((eventCount)))
     {
-        uint64_t* cgen_var_624;
-        stream->alloc((void**)&cgen_var_624, ((eventCount)) * 8);
-        stream->handleMapping()->mapHandles_VkEvent_u64(local_pEvents, cgen_var_624, ((eventCount)));
-        stream->write((uint64_t*)cgen_var_624, ((eventCount)) * 8);
+        uint8_t* cgen_var_0_ptr = (uint8_t*)(*streamPtrPtr);
+        for (uint32_t k = 0; k < ((eventCount)); ++k)
+        {
+            uint64_t tmpval = get_host_u64_VkEvent(local_pEvents[k]);
+            memcpy(cgen_var_0_ptr + k * 8, &tmpval, sizeof(uint64_t));
+        }
+        *streamPtrPtr += 8 * ((eventCount));
     }
-    stream->write((VkPipelineStageFlags*)&local_srcStageMask, sizeof(VkPipelineStageFlags));
-    stream->write((VkPipelineStageFlags*)&local_dstStageMask, sizeof(VkPipelineStageFlags));
-    stream->write((uint32_t*)&local_memoryBarrierCount, sizeof(uint32_t));
+    memcpy(*streamPtrPtr, (VkPipelineStageFlags*)&local_srcStageMask, sizeof(VkPipelineStageFlags));
+    *streamPtrPtr += sizeof(VkPipelineStageFlags);
+    memcpy(*streamPtrPtr, (VkPipelineStageFlags*)&local_dstStageMask, sizeof(VkPipelineStageFlags));
+    *streamPtrPtr += sizeof(VkPipelineStageFlags);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_memoryBarrierCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
     for (uint32_t i = 0; i < (uint32_t)((memoryBarrierCount)); ++i)
     {
-        marshal_VkMemoryBarrier(stream, (VkMemoryBarrier*)(local_pMemoryBarriers + i));
+        reservedmarshal_VkMemoryBarrier(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMemoryBarrier*)(local_pMemoryBarriers + i), streamPtrPtr);
     }
-    stream->write((uint32_t*)&local_bufferMemoryBarrierCount, sizeof(uint32_t));
+    memcpy(*streamPtrPtr, (uint32_t*)&local_bufferMemoryBarrierCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
     for (uint32_t i = 0; i < (uint32_t)((bufferMemoryBarrierCount)); ++i)
     {
-        marshal_VkBufferMemoryBarrier(stream, (VkBufferMemoryBarrier*)(local_pBufferMemoryBarriers + i));
+        reservedmarshal_VkBufferMemoryBarrier(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkBufferMemoryBarrier*)(local_pBufferMemoryBarriers + i), streamPtrPtr);
     }
-    stream->write((uint32_t*)&local_imageMemoryBarrierCount, sizeof(uint32_t));
+    memcpy(*streamPtrPtr, (uint32_t*)&local_imageMemoryBarrierCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
     for (uint32_t i = 0; i < (uint32_t)((imageMemoryBarrierCount)); ++i)
     {
-        marshal_VkImageMemoryBarrier(stream, (VkImageMemoryBarrier*)(local_pImageMemoryBarriers + i));
+        reservedmarshal_VkImageMemoryBarrier(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImageMemoryBarrier*)(local_pImageMemoryBarriers + i), streamPtrPtr);
     }
-    AEMU_SCOPED_TRACE("vkCmdWaitEvents readParams");
-    AEMU_SCOPED_TRACE("vkCmdWaitEvents returnUnmarshal");
-    mImpl->log("finish vkCmdWaitEvents");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdPipelineBarrier(
@@ -9259,16 +10102,14 @@
     uint32_t bufferMemoryBarrierCount,
     const VkBufferMemoryBarrier* pBufferMemoryBarriers,
     uint32_t imageMemoryBarrierCount,
-    const VkImageMemoryBarrier* pImageMemoryBarriers)
+    const VkImageMemoryBarrier* pImageMemoryBarriers,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdPipelineBarrier encode");
-    mImpl->log("start vkCmdPipelineBarrier");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     VkPipelineStageFlags local_srcStageMask;
     VkPipelineStageFlags local_dstStageMask;
@@ -9290,7 +10131,7 @@
         local_pMemoryBarriers = (VkMemoryBarrier*)pool->alloc(((memoryBarrierCount)) * sizeof(const VkMemoryBarrier));
         for (uint32_t i = 0; i < (uint32_t)((memoryBarrierCount)); ++i)
         {
-            deepcopy_VkMemoryBarrier(pool, pMemoryBarriers + i, (VkMemoryBarrier*)(local_pMemoryBarriers + i));
+            deepcopy_VkMemoryBarrier(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pMemoryBarriers + i, (VkMemoryBarrier*)(local_pMemoryBarriers + i));
         }
     }
     local_bufferMemoryBarrierCount = bufferMemoryBarrierCount;
@@ -9300,7 +10141,7 @@
         local_pBufferMemoryBarriers = (VkBufferMemoryBarrier*)pool->alloc(((bufferMemoryBarrierCount)) * sizeof(const VkBufferMemoryBarrier));
         for (uint32_t i = 0; i < (uint32_t)((bufferMemoryBarrierCount)); ++i)
         {
-            deepcopy_VkBufferMemoryBarrier(pool, pBufferMemoryBarriers + i, (VkBufferMemoryBarrier*)(local_pBufferMemoryBarriers + i));
+            deepcopy_VkBufferMemoryBarrier(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pBufferMemoryBarriers + i, (VkBufferMemoryBarrier*)(local_pBufferMemoryBarriers + i));
         }
     }
     local_imageMemoryBarrierCount = imageMemoryBarrierCount;
@@ -9310,99 +10151,113 @@
         local_pImageMemoryBarriers = (VkImageMemoryBarrier*)pool->alloc(((imageMemoryBarrierCount)) * sizeof(const VkImageMemoryBarrier));
         for (uint32_t i = 0; i < (uint32_t)((imageMemoryBarrierCount)); ++i)
         {
-            deepcopy_VkImageMemoryBarrier(pool, pImageMemoryBarriers + i, (VkImageMemoryBarrier*)(local_pImageMemoryBarriers + i));
+            deepcopy_VkImageMemoryBarrier(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pImageMemoryBarriers + i, (VkImageMemoryBarrier*)(local_pImageMemoryBarriers + i));
         }
     }
     if (local_pMemoryBarriers)
     {
         for (uint32_t i = 0; i < (uint32_t)((memoryBarrierCount)); ++i)
         {
-            transform_tohost_VkMemoryBarrier(mImpl->resources(), (VkMemoryBarrier*)(local_pMemoryBarriers + i));
+            transform_tohost_VkMemoryBarrier(sResourceTracker, (VkMemoryBarrier*)(local_pMemoryBarriers + i));
         }
     }
     if (local_pBufferMemoryBarriers)
     {
         for (uint32_t i = 0; i < (uint32_t)((bufferMemoryBarrierCount)); ++i)
         {
-            transform_tohost_VkBufferMemoryBarrier(mImpl->resources(), (VkBufferMemoryBarrier*)(local_pBufferMemoryBarriers + i));
+            transform_tohost_VkBufferMemoryBarrier(sResourceTracker, (VkBufferMemoryBarrier*)(local_pBufferMemoryBarriers + i));
         }
     }
     if (local_pImageMemoryBarriers)
     {
         for (uint32_t i = 0; i < (uint32_t)((imageMemoryBarrierCount)); ++i)
         {
-            transform_tohost_VkImageMemoryBarrier(mImpl->resources(), (VkImageMemoryBarrier*)(local_pImageMemoryBarriers + i));
+            transform_tohost_VkImageMemoryBarrier(sResourceTracker, (VkImageMemoryBarrier*)(local_pImageMemoryBarriers + i));
         }
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_625;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_625, 1);
-        countingStream->write((uint64_t*)&cgen_var_625, 1 * 8);
-        countingStream->write((VkPipelineStageFlags*)&local_srcStageMask, sizeof(VkPipelineStageFlags));
-        countingStream->write((VkPipelineStageFlags*)&local_dstStageMask, sizeof(VkPipelineStageFlags));
-        countingStream->write((VkDependencyFlags*)&local_dependencyFlags, sizeof(VkDependencyFlags));
-        countingStream->write((uint32_t*)&local_memoryBarrierCount, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkPipelineStageFlags);
+        *countPtr += sizeof(VkPipelineStageFlags);
+        *countPtr += sizeof(VkDependencyFlags);
+        *countPtr += sizeof(uint32_t);
         for (uint32_t i = 0; i < (uint32_t)((memoryBarrierCount)); ++i)
         {
-            marshal_VkMemoryBarrier(countingStream, (VkMemoryBarrier*)(local_pMemoryBarriers + i));
+            count_VkMemoryBarrier(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMemoryBarrier*)(local_pMemoryBarriers + i), countPtr);
         }
-        countingStream->write((uint32_t*)&local_bufferMemoryBarrierCount, sizeof(uint32_t));
+        *countPtr += sizeof(uint32_t);
         for (uint32_t i = 0; i < (uint32_t)((bufferMemoryBarrierCount)); ++i)
         {
-            marshal_VkBufferMemoryBarrier(countingStream, (VkBufferMemoryBarrier*)(local_pBufferMemoryBarriers + i));
+            count_VkBufferMemoryBarrier(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkBufferMemoryBarrier*)(local_pBufferMemoryBarriers + i), countPtr);
         }
-        countingStream->write((uint32_t*)&local_imageMemoryBarrierCount, sizeof(uint32_t));
+        *countPtr += sizeof(uint32_t);
         for (uint32_t i = 0; i < (uint32_t)((imageMemoryBarrierCount)); ++i)
         {
-            marshal_VkImageMemoryBarrier(countingStream, (VkImageMemoryBarrier*)(local_pImageMemoryBarriers + i));
+            count_VkImageMemoryBarrier(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImageMemoryBarrier*)(local_pImageMemoryBarriers + i), countPtr);
         }
     }
-    uint32_t packetSize_vkCmdPipelineBarrier = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdPipelineBarrier = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdPipelineBarrier -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdPipelineBarrier);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdPipelineBarrier = OP_vkCmdPipelineBarrier;
-    stream->write(&opcode_vkCmdPipelineBarrier, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdPipelineBarrier, sizeof(uint32_t));
-    uint64_t cgen_var_626;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_626, 1);
-    stream->write((uint64_t*)&cgen_var_626, 1 * 8);
-    stream->write((VkPipelineStageFlags*)&local_srcStageMask, sizeof(VkPipelineStageFlags));
-    stream->write((VkPipelineStageFlags*)&local_dstStageMask, sizeof(VkPipelineStageFlags));
-    stream->write((VkDependencyFlags*)&local_dependencyFlags, sizeof(VkDependencyFlags));
-    stream->write((uint32_t*)&local_memoryBarrierCount, sizeof(uint32_t));
+    memcpy(streamPtr, &opcode_vkCmdPipelineBarrier, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdPipelineBarrier, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (VkPipelineStageFlags*)&local_srcStageMask, sizeof(VkPipelineStageFlags));
+    *streamPtrPtr += sizeof(VkPipelineStageFlags);
+    memcpy(*streamPtrPtr, (VkPipelineStageFlags*)&local_dstStageMask, sizeof(VkPipelineStageFlags));
+    *streamPtrPtr += sizeof(VkPipelineStageFlags);
+    memcpy(*streamPtrPtr, (VkDependencyFlags*)&local_dependencyFlags, sizeof(VkDependencyFlags));
+    *streamPtrPtr += sizeof(VkDependencyFlags);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_memoryBarrierCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
     for (uint32_t i = 0; i < (uint32_t)((memoryBarrierCount)); ++i)
     {
-        marshal_VkMemoryBarrier(stream, (VkMemoryBarrier*)(local_pMemoryBarriers + i));
+        reservedmarshal_VkMemoryBarrier(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMemoryBarrier*)(local_pMemoryBarriers + i), streamPtrPtr);
     }
-    stream->write((uint32_t*)&local_bufferMemoryBarrierCount, sizeof(uint32_t));
+    memcpy(*streamPtrPtr, (uint32_t*)&local_bufferMemoryBarrierCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
     for (uint32_t i = 0; i < (uint32_t)((bufferMemoryBarrierCount)); ++i)
     {
-        marshal_VkBufferMemoryBarrier(stream, (VkBufferMemoryBarrier*)(local_pBufferMemoryBarriers + i));
+        reservedmarshal_VkBufferMemoryBarrier(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkBufferMemoryBarrier*)(local_pBufferMemoryBarriers + i), streamPtrPtr);
     }
-    stream->write((uint32_t*)&local_imageMemoryBarrierCount, sizeof(uint32_t));
+    memcpy(*streamPtrPtr, (uint32_t*)&local_imageMemoryBarrierCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
     for (uint32_t i = 0; i < (uint32_t)((imageMemoryBarrierCount)); ++i)
     {
-        marshal_VkImageMemoryBarrier(stream, (VkImageMemoryBarrier*)(local_pImageMemoryBarriers + i));
+        reservedmarshal_VkImageMemoryBarrier(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImageMemoryBarrier*)(local_pImageMemoryBarriers + i), streamPtrPtr);
     }
-    AEMU_SCOPED_TRACE("vkCmdPipelineBarrier readParams");
-    AEMU_SCOPED_TRACE("vkCmdPipelineBarrier returnUnmarshal");
-    mImpl->log("finish vkCmdPipelineBarrier");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdBeginQuery(
     VkCommandBuffer commandBuffer,
     VkQueryPool queryPool,
     uint32_t query,
-    VkQueryControlFlags flags)
+    VkQueryControlFlags flags,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdBeginQuery encode");
-    mImpl->log("start vkCmdBeginQuery");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     VkQueryPool local_queryPool;
     uint32_t local_query;
@@ -9411,95 +10266,114 @@
     local_queryPool = queryPool;
     local_query = query;
     local_flags = flags;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_627;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_627, 1);
-        countingStream->write((uint64_t*)&cgen_var_627, 1 * 8);
-        uint64_t cgen_var_628;
-        countingStream->handleMapping()->mapHandles_VkQueryPool_u64(&local_queryPool, &cgen_var_628, 1);
-        countingStream->write((uint64_t*)&cgen_var_628, 1 * 8);
-        countingStream->write((uint32_t*)&local_query, sizeof(uint32_t));
-        countingStream->write((VkQueryControlFlags*)&local_flags, sizeof(VkQueryControlFlags));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(VkQueryControlFlags);
     }
-    uint32_t packetSize_vkCmdBeginQuery = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdBeginQuery = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdBeginQuery -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdBeginQuery);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdBeginQuery = OP_vkCmdBeginQuery;
-    stream->write(&opcode_vkCmdBeginQuery, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdBeginQuery, sizeof(uint32_t));
-    uint64_t cgen_var_629;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_629, 1);
-    stream->write((uint64_t*)&cgen_var_629, 1 * 8);
-    uint64_t cgen_var_630;
-    stream->handleMapping()->mapHandles_VkQueryPool_u64(&local_queryPool, &cgen_var_630, 1);
-    stream->write((uint64_t*)&cgen_var_630, 1 * 8);
-    stream->write((uint32_t*)&local_query, sizeof(uint32_t));
-    stream->write((VkQueryControlFlags*)&local_flags, sizeof(VkQueryControlFlags));
-    AEMU_SCOPED_TRACE("vkCmdBeginQuery readParams");
-    AEMU_SCOPED_TRACE("vkCmdBeginQuery returnUnmarshal");
-    mImpl->log("finish vkCmdBeginQuery");;
+    memcpy(streamPtr, &opcode_vkCmdBeginQuery, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdBeginQuery, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkQueryPool((*&local_queryPool));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_query, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (VkQueryControlFlags*)&local_flags, sizeof(VkQueryControlFlags));
+    *streamPtrPtr += sizeof(VkQueryControlFlags);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdEndQuery(
     VkCommandBuffer commandBuffer,
     VkQueryPool queryPool,
-    uint32_t query)
+    uint32_t query,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdEndQuery encode");
-    mImpl->log("start vkCmdEndQuery");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     VkQueryPool local_queryPool;
     uint32_t local_query;
     local_commandBuffer = commandBuffer;
     local_queryPool = queryPool;
     local_query = query;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_631;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_631, 1);
-        countingStream->write((uint64_t*)&cgen_var_631, 1 * 8);
-        uint64_t cgen_var_632;
-        countingStream->handleMapping()->mapHandles_VkQueryPool_u64(&local_queryPool, &cgen_var_632, 1);
-        countingStream->write((uint64_t*)&cgen_var_632, 1 * 8);
-        countingStream->write((uint32_t*)&local_query, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
     }
-    uint32_t packetSize_vkCmdEndQuery = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdEndQuery = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdEndQuery -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdEndQuery);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdEndQuery = OP_vkCmdEndQuery;
-    stream->write(&opcode_vkCmdEndQuery, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdEndQuery, sizeof(uint32_t));
-    uint64_t cgen_var_633;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_633, 1);
-    stream->write((uint64_t*)&cgen_var_633, 1 * 8);
-    uint64_t cgen_var_634;
-    stream->handleMapping()->mapHandles_VkQueryPool_u64(&local_queryPool, &cgen_var_634, 1);
-    stream->write((uint64_t*)&cgen_var_634, 1 * 8);
-    stream->write((uint32_t*)&local_query, sizeof(uint32_t));
-    AEMU_SCOPED_TRACE("vkCmdEndQuery readParams");
-    AEMU_SCOPED_TRACE("vkCmdEndQuery returnUnmarshal");
-    mImpl->log("finish vkCmdEndQuery");;
+    memcpy(streamPtr, &opcode_vkCmdEndQuery, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdEndQuery, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkQueryPool((*&local_queryPool));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_query, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdResetQueryPool(
     VkCommandBuffer commandBuffer,
     VkQueryPool queryPool,
     uint32_t firstQuery,
-    uint32_t queryCount)
+    uint32_t queryCount,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdResetQueryPool encode");
-    mImpl->log("start vkCmdResetQueryPool");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     VkQueryPool local_queryPool;
     uint32_t local_firstQuery;
@@ -9508,49 +10382,59 @@
     local_queryPool = queryPool;
     local_firstQuery = firstQuery;
     local_queryCount = queryCount;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_635;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_635, 1);
-        countingStream->write((uint64_t*)&cgen_var_635, 1 * 8);
-        uint64_t cgen_var_636;
-        countingStream->handleMapping()->mapHandles_VkQueryPool_u64(&local_queryPool, &cgen_var_636, 1);
-        countingStream->write((uint64_t*)&cgen_var_636, 1 * 8);
-        countingStream->write((uint32_t*)&local_firstQuery, sizeof(uint32_t));
-        countingStream->write((uint32_t*)&local_queryCount, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
     }
-    uint32_t packetSize_vkCmdResetQueryPool = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdResetQueryPool = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdResetQueryPool -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdResetQueryPool);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdResetQueryPool = OP_vkCmdResetQueryPool;
-    stream->write(&opcode_vkCmdResetQueryPool, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdResetQueryPool, sizeof(uint32_t));
-    uint64_t cgen_var_637;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_637, 1);
-    stream->write((uint64_t*)&cgen_var_637, 1 * 8);
-    uint64_t cgen_var_638;
-    stream->handleMapping()->mapHandles_VkQueryPool_u64(&local_queryPool, &cgen_var_638, 1);
-    stream->write((uint64_t*)&cgen_var_638, 1 * 8);
-    stream->write((uint32_t*)&local_firstQuery, sizeof(uint32_t));
-    stream->write((uint32_t*)&local_queryCount, sizeof(uint32_t));
-    AEMU_SCOPED_TRACE("vkCmdResetQueryPool readParams");
-    AEMU_SCOPED_TRACE("vkCmdResetQueryPool returnUnmarshal");
-    mImpl->log("finish vkCmdResetQueryPool");;
+    memcpy(streamPtr, &opcode_vkCmdResetQueryPool, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdResetQueryPool, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkQueryPool((*&local_queryPool));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_firstQuery, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_queryCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdWriteTimestamp(
     VkCommandBuffer commandBuffer,
     VkPipelineStageFlagBits pipelineStage,
     VkQueryPool queryPool,
-    uint32_t query)
+    uint32_t query,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdWriteTimestamp encode");
-    mImpl->log("start vkCmdWriteTimestamp");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     VkPipelineStageFlagBits local_pipelineStage;
     VkQueryPool local_queryPool;
@@ -9559,33 +10443,45 @@
     local_pipelineStage = pipelineStage;
     local_queryPool = queryPool;
     local_query = query;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_639;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_639, 1);
-        countingStream->write((uint64_t*)&cgen_var_639, 1 * 8);
-        countingStream->write((VkPipelineStageFlagBits*)&local_pipelineStage, sizeof(VkPipelineStageFlagBits));
-        uint64_t cgen_var_640;
-        countingStream->handleMapping()->mapHandles_VkQueryPool_u64(&local_queryPool, &cgen_var_640, 1);
-        countingStream->write((uint64_t*)&cgen_var_640, 1 * 8);
-        countingStream->write((uint32_t*)&local_query, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkPipelineStageFlagBits);
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
     }
-    uint32_t packetSize_vkCmdWriteTimestamp = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdWriteTimestamp = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdWriteTimestamp -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdWriteTimestamp);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdWriteTimestamp = OP_vkCmdWriteTimestamp;
-    stream->write(&opcode_vkCmdWriteTimestamp, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdWriteTimestamp, sizeof(uint32_t));
-    uint64_t cgen_var_641;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_641, 1);
-    stream->write((uint64_t*)&cgen_var_641, 1 * 8);
-    stream->write((VkPipelineStageFlagBits*)&local_pipelineStage, sizeof(VkPipelineStageFlagBits));
-    uint64_t cgen_var_642;
-    stream->handleMapping()->mapHandles_VkQueryPool_u64(&local_queryPool, &cgen_var_642, 1);
-    stream->write((uint64_t*)&cgen_var_642, 1 * 8);
-    stream->write((uint32_t*)&local_query, sizeof(uint32_t));
-    AEMU_SCOPED_TRACE("vkCmdWriteTimestamp readParams");
-    AEMU_SCOPED_TRACE("vkCmdWriteTimestamp returnUnmarshal");
-    mImpl->log("finish vkCmdWriteTimestamp");;
+    memcpy(streamPtr, &opcode_vkCmdWriteTimestamp, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdWriteTimestamp, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (VkPipelineStageFlagBits*)&local_pipelineStage, sizeof(VkPipelineStageFlagBits));
+    *streamPtrPtr += sizeof(VkPipelineStageFlagBits);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkQueryPool((*&local_queryPool));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_query, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdCopyQueryPoolResults(
@@ -9596,16 +10492,14 @@
     VkBuffer dstBuffer,
     VkDeviceSize dstOffset,
     VkDeviceSize stride,
-    VkQueryResultFlags flags)
+    VkQueryResultFlags flags,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdCopyQueryPoolResults encode");
-    mImpl->log("start vkCmdCopyQueryPoolResults");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     VkQueryPool local_queryPool;
     uint32_t local_firstQuery;
@@ -9622,45 +10516,60 @@
     local_dstOffset = dstOffset;
     local_stride = stride;
     local_flags = flags;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_643;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_643, 1);
-        countingStream->write((uint64_t*)&cgen_var_643, 1 * 8);
-        uint64_t cgen_var_644;
-        countingStream->handleMapping()->mapHandles_VkQueryPool_u64(&local_queryPool, &cgen_var_644, 1);
-        countingStream->write((uint64_t*)&cgen_var_644, 1 * 8);
-        countingStream->write((uint32_t*)&local_firstQuery, sizeof(uint32_t));
-        countingStream->write((uint32_t*)&local_queryCount, sizeof(uint32_t));
-        uint64_t cgen_var_645;
-        countingStream->handleMapping()->mapHandles_VkBuffer_u64(&local_dstBuffer, &cgen_var_645, 1);
-        countingStream->write((uint64_t*)&cgen_var_645, 1 * 8);
-        countingStream->write((VkDeviceSize*)&local_dstOffset, sizeof(VkDeviceSize));
-        countingStream->write((VkDeviceSize*)&local_stride, sizeof(VkDeviceSize));
-        countingStream->write((VkQueryResultFlags*)&local_flags, sizeof(VkQueryResultFlags));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
+        uint64_t cgen_var_2;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkDeviceSize);
+        *countPtr += sizeof(VkDeviceSize);
+        *countPtr += sizeof(VkQueryResultFlags);
     }
-    uint32_t packetSize_vkCmdCopyQueryPoolResults = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdCopyQueryPoolResults = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdCopyQueryPoolResults -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdCopyQueryPoolResults);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdCopyQueryPoolResults = OP_vkCmdCopyQueryPoolResults;
-    stream->write(&opcode_vkCmdCopyQueryPoolResults, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdCopyQueryPoolResults, sizeof(uint32_t));
-    uint64_t cgen_var_646;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_646, 1);
-    stream->write((uint64_t*)&cgen_var_646, 1 * 8);
-    uint64_t cgen_var_647;
-    stream->handleMapping()->mapHandles_VkQueryPool_u64(&local_queryPool, &cgen_var_647, 1);
-    stream->write((uint64_t*)&cgen_var_647, 1 * 8);
-    stream->write((uint32_t*)&local_firstQuery, sizeof(uint32_t));
-    stream->write((uint32_t*)&local_queryCount, sizeof(uint32_t));
-    uint64_t cgen_var_648;
-    stream->handleMapping()->mapHandles_VkBuffer_u64(&local_dstBuffer, &cgen_var_648, 1);
-    stream->write((uint64_t*)&cgen_var_648, 1 * 8);
-    stream->write((VkDeviceSize*)&local_dstOffset, sizeof(VkDeviceSize));
-    stream->write((VkDeviceSize*)&local_stride, sizeof(VkDeviceSize));
-    stream->write((VkQueryResultFlags*)&local_flags, sizeof(VkQueryResultFlags));
-    AEMU_SCOPED_TRACE("vkCmdCopyQueryPoolResults readParams");
-    AEMU_SCOPED_TRACE("vkCmdCopyQueryPoolResults returnUnmarshal");
-    mImpl->log("finish vkCmdCopyQueryPoolResults");;
+    memcpy(streamPtr, &opcode_vkCmdCopyQueryPoolResults, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdCopyQueryPoolResults, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkQueryPool((*&local_queryPool));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_firstQuery, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_queryCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkBuffer((*&local_dstBuffer));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkDeviceSize*)&local_dstOffset, sizeof(VkDeviceSize));
+    *streamPtrPtr += sizeof(VkDeviceSize);
+    memcpy(*streamPtrPtr, (VkDeviceSize*)&local_stride, sizeof(VkDeviceSize));
+    *streamPtrPtr += sizeof(VkDeviceSize);
+    memcpy(*streamPtrPtr, (VkQueryResultFlags*)&local_flags, sizeof(VkQueryResultFlags));
+    *streamPtrPtr += sizeof(VkQueryResultFlags);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdPushConstants(
@@ -9669,16 +10578,14 @@
     VkShaderStageFlags stageFlags,
     uint32_t offset,
     uint32_t size,
-    const void* pValues)
+    const void* pValues,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdPushConstants encode");
-    mImpl->log("start vkCmdPushConstants");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     VkPipelineLayout local_layout;
     VkShaderStageFlags local_stageFlags;
@@ -9690,57 +10597,66 @@
     local_stageFlags = stageFlags;
     local_offset = offset;
     local_size = size;
-    local_pValues = nullptr;
-    if (pValues)
+    // Avoiding deepcopy for pValues
+    local_pValues = (void*)pValues;
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        local_pValues = (void*)pool->dupArray(pValues, ((size)) * sizeof(const uint8_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkShaderStageFlags);
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
+        *countPtr += ((size)) * sizeof(uint8_t);
     }
-    countingStream->rewind();
-    {
-        uint64_t cgen_var_649;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_649, 1);
-        countingStream->write((uint64_t*)&cgen_var_649, 1 * 8);
-        uint64_t cgen_var_650;
-        countingStream->handleMapping()->mapHandles_VkPipelineLayout_u64(&local_layout, &cgen_var_650, 1);
-        countingStream->write((uint64_t*)&cgen_var_650, 1 * 8);
-        countingStream->write((VkShaderStageFlags*)&local_stageFlags, sizeof(VkShaderStageFlags));
-        countingStream->write((uint32_t*)&local_offset, sizeof(uint32_t));
-        countingStream->write((uint32_t*)&local_size, sizeof(uint32_t));
-        countingStream->write((void*)local_pValues, ((size)) * sizeof(uint8_t));
-    }
-    uint32_t packetSize_vkCmdPushConstants = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdPushConstants = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdPushConstants -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdPushConstants);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdPushConstants = OP_vkCmdPushConstants;
-    stream->write(&opcode_vkCmdPushConstants, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdPushConstants, sizeof(uint32_t));
-    uint64_t cgen_var_651;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_651, 1);
-    stream->write((uint64_t*)&cgen_var_651, 1 * 8);
-    uint64_t cgen_var_652;
-    stream->handleMapping()->mapHandles_VkPipelineLayout_u64(&local_layout, &cgen_var_652, 1);
-    stream->write((uint64_t*)&cgen_var_652, 1 * 8);
-    stream->write((VkShaderStageFlags*)&local_stageFlags, sizeof(VkShaderStageFlags));
-    stream->write((uint32_t*)&local_offset, sizeof(uint32_t));
-    stream->write((uint32_t*)&local_size, sizeof(uint32_t));
-    stream->write((void*)local_pValues, ((size)) * sizeof(uint8_t));
-    AEMU_SCOPED_TRACE("vkCmdPushConstants readParams");
-    AEMU_SCOPED_TRACE("vkCmdPushConstants returnUnmarshal");
-    mImpl->log("finish vkCmdPushConstants");;
+    memcpy(streamPtr, &opcode_vkCmdPushConstants, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdPushConstants, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPipelineLayout((*&local_layout));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkShaderStageFlags*)&local_stageFlags, sizeof(VkShaderStageFlags));
+    *streamPtrPtr += sizeof(VkShaderStageFlags);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_offset, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_size, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (void*)local_pValues, ((size)) * sizeof(uint8_t));
+    *streamPtrPtr += ((size)) * sizeof(uint8_t);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdBeginRenderPass(
     VkCommandBuffer commandBuffer,
     const VkRenderPassBeginInfo* pRenderPassBegin,
-    VkSubpassContents contents)
+    VkSubpassContents contents,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdBeginRenderPass encode");
-    mImpl->log("start vkCmdBeginRenderPass");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     VkRenderPassBeginInfo* local_pRenderPassBegin;
     VkSubpassContents local_contents;
@@ -9749,211 +10665,247 @@
     if (pRenderPassBegin)
     {
         local_pRenderPassBegin = (VkRenderPassBeginInfo*)pool->alloc(sizeof(const VkRenderPassBeginInfo));
-        deepcopy_VkRenderPassBeginInfo(pool, pRenderPassBegin, (VkRenderPassBeginInfo*)(local_pRenderPassBegin));
+        deepcopy_VkRenderPassBeginInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pRenderPassBegin, (VkRenderPassBeginInfo*)(local_pRenderPassBegin));
     }
     local_contents = contents;
     if (local_pRenderPassBegin)
     {
-        transform_tohost_VkRenderPassBeginInfo(mImpl->resources(), (VkRenderPassBeginInfo*)(local_pRenderPassBegin));
+        transform_tohost_VkRenderPassBeginInfo(sResourceTracker, (VkRenderPassBeginInfo*)(local_pRenderPassBegin));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_653;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_653, 1);
-        countingStream->write((uint64_t*)&cgen_var_653, 1 * 8);
-        marshal_VkRenderPassBeginInfo(countingStream, (VkRenderPassBeginInfo*)(local_pRenderPassBegin));
-        countingStream->write((VkSubpassContents*)&local_contents, sizeof(VkSubpassContents));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkRenderPassBeginInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkRenderPassBeginInfo*)(local_pRenderPassBegin), countPtr);
+        *countPtr += sizeof(VkSubpassContents);
     }
-    uint32_t packetSize_vkCmdBeginRenderPass = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdBeginRenderPass = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdBeginRenderPass -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdBeginRenderPass);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdBeginRenderPass = OP_vkCmdBeginRenderPass;
-    stream->write(&opcode_vkCmdBeginRenderPass, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdBeginRenderPass, sizeof(uint32_t));
-    uint64_t cgen_var_654;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_654, 1);
-    stream->write((uint64_t*)&cgen_var_654, 1 * 8);
-    marshal_VkRenderPassBeginInfo(stream, (VkRenderPassBeginInfo*)(local_pRenderPassBegin));
-    stream->write((VkSubpassContents*)&local_contents, sizeof(VkSubpassContents));
-    AEMU_SCOPED_TRACE("vkCmdBeginRenderPass readParams");
-    AEMU_SCOPED_TRACE("vkCmdBeginRenderPass returnUnmarshal");
-    mImpl->log("finish vkCmdBeginRenderPass");;
+    memcpy(streamPtr, &opcode_vkCmdBeginRenderPass, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdBeginRenderPass, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    reservedmarshal_VkRenderPassBeginInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkRenderPassBeginInfo*)(local_pRenderPassBegin), streamPtrPtr);
+    memcpy(*streamPtrPtr, (VkSubpassContents*)&local_contents, sizeof(VkSubpassContents));
+    *streamPtrPtr += sizeof(VkSubpassContents);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdNextSubpass(
     VkCommandBuffer commandBuffer,
-    VkSubpassContents contents)
+    VkSubpassContents contents,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdNextSubpass encode");
-    mImpl->log("start vkCmdNextSubpass");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     VkSubpassContents local_contents;
     local_commandBuffer = commandBuffer;
     local_contents = contents;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_655;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_655, 1);
-        countingStream->write((uint64_t*)&cgen_var_655, 1 * 8);
-        countingStream->write((VkSubpassContents*)&local_contents, sizeof(VkSubpassContents));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkSubpassContents);
     }
-    uint32_t packetSize_vkCmdNextSubpass = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdNextSubpass = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdNextSubpass -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdNextSubpass);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdNextSubpass = OP_vkCmdNextSubpass;
-    stream->write(&opcode_vkCmdNextSubpass, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdNextSubpass, sizeof(uint32_t));
-    uint64_t cgen_var_656;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_656, 1);
-    stream->write((uint64_t*)&cgen_var_656, 1 * 8);
-    stream->write((VkSubpassContents*)&local_contents, sizeof(VkSubpassContents));
-    AEMU_SCOPED_TRACE("vkCmdNextSubpass readParams");
-    AEMU_SCOPED_TRACE("vkCmdNextSubpass returnUnmarshal");
-    mImpl->log("finish vkCmdNextSubpass");;
+    memcpy(streamPtr, &opcode_vkCmdNextSubpass, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdNextSubpass, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (VkSubpassContents*)&local_contents, sizeof(VkSubpassContents));
+    *streamPtrPtr += sizeof(VkSubpassContents);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdEndRenderPass(
-    VkCommandBuffer commandBuffer)
+    VkCommandBuffer commandBuffer,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdEndRenderPass encode");
-    mImpl->log("start vkCmdEndRenderPass");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     local_commandBuffer = commandBuffer;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_657;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_657, 1);
-        countingStream->write((uint64_t*)&cgen_var_657, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
     }
-    uint32_t packetSize_vkCmdEndRenderPass = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdEndRenderPass = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdEndRenderPass -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdEndRenderPass);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdEndRenderPass = OP_vkCmdEndRenderPass;
-    stream->write(&opcode_vkCmdEndRenderPass, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdEndRenderPass, sizeof(uint32_t));
-    uint64_t cgen_var_658;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_658, 1);
-    stream->write((uint64_t*)&cgen_var_658, 1 * 8);
-    AEMU_SCOPED_TRACE("vkCmdEndRenderPass readParams");
-    AEMU_SCOPED_TRACE("vkCmdEndRenderPass returnUnmarshal");
-    mImpl->log("finish vkCmdEndRenderPass");;
+    memcpy(streamPtr, &opcode_vkCmdEndRenderPass, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdEndRenderPass, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdExecuteCommands(
     VkCommandBuffer commandBuffer,
     uint32_t commandBufferCount,
-    const VkCommandBuffer* pCommandBuffers)
+    const VkCommandBuffer* pCommandBuffers,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdExecuteCommands encode");
-    mImpl->log("start vkCmdExecuteCommands");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     uint32_t local_commandBufferCount;
     VkCommandBuffer* local_pCommandBuffers;
     local_commandBuffer = commandBuffer;
     local_commandBufferCount = commandBufferCount;
-    local_pCommandBuffers = nullptr;
-    if (pCommandBuffers)
+    // Avoiding deepcopy for pCommandBuffers
+    local_pCommandBuffers = (VkCommandBuffer*)pCommandBuffers;
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        local_pCommandBuffers = (VkCommandBuffer*)pool->dupArray(pCommandBuffers, ((commandBufferCount)) * sizeof(const VkCommandBuffer));
-    }
-    countingStream->rewind();
-    {
-        uint64_t cgen_var_659;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_659, 1);
-        countingStream->write((uint64_t*)&cgen_var_659, 1 * 8);
-        countingStream->write((uint32_t*)&local_commandBufferCount, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
         if (((commandBufferCount)))
         {
-            uint64_t* cgen_var_660;
-            countingStream->alloc((void**)&cgen_var_660, ((commandBufferCount)) * 8);
-            countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(local_pCommandBuffers, cgen_var_660, ((commandBufferCount)));
-            countingStream->write((uint64_t*)cgen_var_660, ((commandBufferCount)) * 8);
+            *countPtr += ((commandBufferCount)) * 8;
         }
     }
-    uint32_t packetSize_vkCmdExecuteCommands = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdExecuteCommands = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdExecuteCommands -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdExecuteCommands);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdExecuteCommands = OP_vkCmdExecuteCommands;
-    stream->write(&opcode_vkCmdExecuteCommands, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdExecuteCommands, sizeof(uint32_t));
-    uint64_t cgen_var_661;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_661, 1);
-    stream->write((uint64_t*)&cgen_var_661, 1 * 8);
-    stream->write((uint32_t*)&local_commandBufferCount, sizeof(uint32_t));
+    memcpy(streamPtr, &opcode_vkCmdExecuteCommands, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdExecuteCommands, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (uint32_t*)&local_commandBufferCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
     if (((commandBufferCount)))
     {
-        uint64_t* cgen_var_662;
-        stream->alloc((void**)&cgen_var_662, ((commandBufferCount)) * 8);
-        stream->handleMapping()->mapHandles_VkCommandBuffer_u64(local_pCommandBuffers, cgen_var_662, ((commandBufferCount)));
-        stream->write((uint64_t*)cgen_var_662, ((commandBufferCount)) * 8);
+        uint8_t* cgen_var_0_ptr = (uint8_t*)(*streamPtrPtr);
+        for (uint32_t k = 0; k < ((commandBufferCount)); ++k)
+        {
+            uint64_t tmpval = get_host_u64_VkCommandBuffer(local_pCommandBuffers[k]);
+            memcpy(cgen_var_0_ptr + k * 8, &tmpval, sizeof(uint64_t));
+        }
+        *streamPtrPtr += 8 * ((commandBufferCount));
     }
-    AEMU_SCOPED_TRACE("vkCmdExecuteCommands readParams");
-    AEMU_SCOPED_TRACE("vkCmdExecuteCommands returnUnmarshal");
-    mImpl->log("finish vkCmdExecuteCommands");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 #endif
 #ifdef VK_VERSION_1_1
 VkResult VkEncoder::vkEnumerateInstanceVersion(
-    uint32_t* pApiVersion)
+    uint32_t* pApiVersion,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkEnumerateInstanceVersion encode");
-    mImpl->log("start vkEnumerateInstanceVersion");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        countingStream->write((uint32_t*)pApiVersion, sizeof(uint32_t));
+        *countPtr += sizeof(uint32_t);
     }
-    uint32_t packetSize_vkEnumerateInstanceVersion = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkEnumerateInstanceVersion = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkEnumerateInstanceVersion);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkEnumerateInstanceVersion = OP_vkEnumerateInstanceVersion;
-    stream->write(&opcode_vkEnumerateInstanceVersion, sizeof(uint32_t));
-    stream->write(&packetSize_vkEnumerateInstanceVersion, sizeof(uint32_t));
-    stream->write((uint32_t*)pApiVersion, sizeof(uint32_t));
-    AEMU_SCOPED_TRACE("vkEnumerateInstanceVersion readParams");
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkEnumerateInstanceVersion, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkEnumerateInstanceVersion, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    memcpy(*streamPtrPtr, (uint32_t*)pApiVersion, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
     stream->read((uint32_t*)pApiVersion, sizeof(uint32_t));
-    AEMU_SCOPED_TRACE("vkEnumerateInstanceVersion returnUnmarshal");
     VkResult vkEnumerateInstanceVersion_VkResult_return = (VkResult)0;
     stream->read(&vkEnumerateInstanceVersion_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkEnumerateInstanceVersion");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkEnumerateInstanceVersion_VkResult_return;
 }
 
 VkResult VkEncoder::vkBindBufferMemory2(
     VkDevice device,
     uint32_t bindInfoCount,
-    const VkBindBufferMemoryInfo* pBindInfos)
+    const VkBindBufferMemoryInfo* pBindInfos,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkBindBufferMemory2 encode");
-    mImpl->log("start vkBindBufferMemory2");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     uint32_t local_bindInfoCount;
     VkBindBufferMemoryInfo* local_pBindInfos;
@@ -9965,64 +10917,68 @@
         local_pBindInfos = (VkBindBufferMemoryInfo*)pool->alloc(((bindInfoCount)) * sizeof(const VkBindBufferMemoryInfo));
         for (uint32_t i = 0; i < (uint32_t)((bindInfoCount)); ++i)
         {
-            deepcopy_VkBindBufferMemoryInfo(pool, pBindInfos + i, (VkBindBufferMemoryInfo*)(local_pBindInfos + i));
+            deepcopy_VkBindBufferMemoryInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pBindInfos + i, (VkBindBufferMemoryInfo*)(local_pBindInfos + i));
         }
     }
     if (local_pBindInfos)
     {
         for (uint32_t i = 0; i < (uint32_t)((bindInfoCount)); ++i)
         {
-            transform_tohost_VkBindBufferMemoryInfo(mImpl->resources(), (VkBindBufferMemoryInfo*)(local_pBindInfos + i));
+            transform_tohost_VkBindBufferMemoryInfo(sResourceTracker, (VkBindBufferMemoryInfo*)(local_pBindInfos + i));
         }
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_663;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_663, 1);
-        countingStream->write((uint64_t*)&cgen_var_663, 1 * 8);
-        countingStream->write((uint32_t*)&local_bindInfoCount, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
         for (uint32_t i = 0; i < (uint32_t)((bindInfoCount)); ++i)
         {
-            marshal_VkBindBufferMemoryInfo(countingStream, (VkBindBufferMemoryInfo*)(local_pBindInfos + i));
+            count_VkBindBufferMemoryInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkBindBufferMemoryInfo*)(local_pBindInfos + i), countPtr);
         }
     }
-    uint32_t packetSize_vkBindBufferMemory2 = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkBindBufferMemory2 = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkBindBufferMemory2);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkBindBufferMemory2 = OP_vkBindBufferMemory2;
-    stream->write(&opcode_vkBindBufferMemory2, sizeof(uint32_t));
-    stream->write(&packetSize_vkBindBufferMemory2, sizeof(uint32_t));
-    uint64_t cgen_var_664;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_664, 1);
-    stream->write((uint64_t*)&cgen_var_664, 1 * 8);
-    stream->write((uint32_t*)&local_bindInfoCount, sizeof(uint32_t));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkBindBufferMemory2, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkBindBufferMemory2, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_bindInfoCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
     for (uint32_t i = 0; i < (uint32_t)((bindInfoCount)); ++i)
     {
-        marshal_VkBindBufferMemoryInfo(stream, (VkBindBufferMemoryInfo*)(local_pBindInfos + i));
+        reservedmarshal_VkBindBufferMemoryInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkBindBufferMemoryInfo*)(local_pBindInfos + i), streamPtrPtr);
     }
-    AEMU_SCOPED_TRACE("vkBindBufferMemory2 readParams");
-    AEMU_SCOPED_TRACE("vkBindBufferMemory2 returnUnmarshal");
     VkResult vkBindBufferMemory2_VkResult_return = (VkResult)0;
     stream->read(&vkBindBufferMemory2_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkBindBufferMemory2");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkBindBufferMemory2_VkResult_return;
 }
 
 VkResult VkEncoder::vkBindImageMemory2(
     VkDevice device,
     uint32_t bindInfoCount,
-    const VkBindImageMemoryInfo* pBindInfos)
+    const VkBindImageMemoryInfo* pBindInfos,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkBindImageMemory2 encode");
-    mImpl->log("start vkBindImageMemory2");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     uint32_t local_bindInfoCount;
     VkBindImageMemoryInfo* local_pBindInfos;
@@ -10034,48 +10990,54 @@
         local_pBindInfos = (VkBindImageMemoryInfo*)pool->alloc(((bindInfoCount)) * sizeof(const VkBindImageMemoryInfo));
         for (uint32_t i = 0; i < (uint32_t)((bindInfoCount)); ++i)
         {
-            deepcopy_VkBindImageMemoryInfo(pool, pBindInfos + i, (VkBindImageMemoryInfo*)(local_pBindInfos + i));
+            deepcopy_VkBindImageMemoryInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pBindInfos + i, (VkBindImageMemoryInfo*)(local_pBindInfos + i));
         }
     }
     if (local_pBindInfos)
     {
         for (uint32_t i = 0; i < (uint32_t)((bindInfoCount)); ++i)
         {
-            transform_tohost_VkBindImageMemoryInfo(mImpl->resources(), (VkBindImageMemoryInfo*)(local_pBindInfos + i));
+            transform_tohost_VkBindImageMemoryInfo(sResourceTracker, (VkBindImageMemoryInfo*)(local_pBindInfos + i));
         }
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_665;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_665, 1);
-        countingStream->write((uint64_t*)&cgen_var_665, 1 * 8);
-        countingStream->write((uint32_t*)&local_bindInfoCount, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
         for (uint32_t i = 0; i < (uint32_t)((bindInfoCount)); ++i)
         {
-            marshal_VkBindImageMemoryInfo(countingStream, (VkBindImageMemoryInfo*)(local_pBindInfos + i));
+            count_VkBindImageMemoryInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkBindImageMemoryInfo*)(local_pBindInfos + i), countPtr);
         }
     }
-    uint32_t packetSize_vkBindImageMemory2 = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkBindImageMemory2 = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkBindImageMemory2);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkBindImageMemory2 = OP_vkBindImageMemory2;
-    stream->write(&opcode_vkBindImageMemory2, sizeof(uint32_t));
-    stream->write(&packetSize_vkBindImageMemory2, sizeof(uint32_t));
-    uint64_t cgen_var_666;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_666, 1);
-    stream->write((uint64_t*)&cgen_var_666, 1 * 8);
-    stream->write((uint32_t*)&local_bindInfoCount, sizeof(uint32_t));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkBindImageMemory2, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkBindImageMemory2, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_bindInfoCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
     for (uint32_t i = 0; i < (uint32_t)((bindInfoCount)); ++i)
     {
-        marshal_VkBindImageMemoryInfo(stream, (VkBindImageMemoryInfo*)(local_pBindInfos + i));
+        reservedmarshal_VkBindImageMemoryInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkBindImageMemoryInfo*)(local_pBindInfos + i), streamPtrPtr);
     }
-    AEMU_SCOPED_TRACE("vkBindImageMemory2 readParams");
-    AEMU_SCOPED_TRACE("vkBindImageMemory2 returnUnmarshal");
     VkResult vkBindImageMemory2_VkResult_return = (VkResult)0;
     stream->read(&vkBindImageMemory2_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkBindImageMemory2");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkBindImageMemory2_VkResult_return;
 }
 
@@ -10084,16 +11046,14 @@
     uint32_t heapIndex,
     uint32_t localDeviceIndex,
     uint32_t remoteDeviceIndex,
-    VkPeerMemoryFeatureFlags* pPeerMemoryFeatures)
+    VkPeerMemoryFeatureFlags* pPeerMemoryFeatures,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetDeviceGroupPeerMemoryFeatures encode");
-    mImpl->log("start vkGetDeviceGroupPeerMemoryFeatures");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     uint32_t local_heapIndex;
     uint32_t local_localDeviceIndex;
@@ -10102,69 +11062,90 @@
     local_heapIndex = heapIndex;
     local_localDeviceIndex = localDeviceIndex;
     local_remoteDeviceIndex = remoteDeviceIndex;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_667;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_667, 1);
-        countingStream->write((uint64_t*)&cgen_var_667, 1 * 8);
-        countingStream->write((uint32_t*)&local_heapIndex, sizeof(uint32_t));
-        countingStream->write((uint32_t*)&local_localDeviceIndex, sizeof(uint32_t));
-        countingStream->write((uint32_t*)&local_remoteDeviceIndex, sizeof(uint32_t));
-        countingStream->write((VkPeerMemoryFeatureFlags*)pPeerMemoryFeatures, sizeof(VkPeerMemoryFeatureFlags));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(VkPeerMemoryFeatureFlags);
     }
-    uint32_t packetSize_vkGetDeviceGroupPeerMemoryFeatures = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetDeviceGroupPeerMemoryFeatures = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetDeviceGroupPeerMemoryFeatures);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetDeviceGroupPeerMemoryFeatures = OP_vkGetDeviceGroupPeerMemoryFeatures;
-    stream->write(&opcode_vkGetDeviceGroupPeerMemoryFeatures, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetDeviceGroupPeerMemoryFeatures, sizeof(uint32_t));
-    uint64_t cgen_var_668;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_668, 1);
-    stream->write((uint64_t*)&cgen_var_668, 1 * 8);
-    stream->write((uint32_t*)&local_heapIndex, sizeof(uint32_t));
-    stream->write((uint32_t*)&local_localDeviceIndex, sizeof(uint32_t));
-    stream->write((uint32_t*)&local_remoteDeviceIndex, sizeof(uint32_t));
-    stream->write((VkPeerMemoryFeatureFlags*)pPeerMemoryFeatures, sizeof(VkPeerMemoryFeatureFlags));
-    AEMU_SCOPED_TRACE("vkGetDeviceGroupPeerMemoryFeatures readParams");
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetDeviceGroupPeerMemoryFeatures, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetDeviceGroupPeerMemoryFeatures, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_heapIndex, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_localDeviceIndex, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_remoteDeviceIndex, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (VkPeerMemoryFeatureFlags*)pPeerMemoryFeatures, sizeof(VkPeerMemoryFeatureFlags));
+    *streamPtrPtr += sizeof(VkPeerMemoryFeatureFlags);
     stream->read((VkPeerMemoryFeatureFlags*)pPeerMemoryFeatures, sizeof(VkPeerMemoryFeatureFlags));
-    AEMU_SCOPED_TRACE("vkGetDeviceGroupPeerMemoryFeatures returnUnmarshal");
-    mImpl->log("finish vkGetDeviceGroupPeerMemoryFeatures");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdSetDeviceMask(
     VkCommandBuffer commandBuffer,
-    uint32_t deviceMask)
+    uint32_t deviceMask,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdSetDeviceMask encode");
-    mImpl->log("start vkCmdSetDeviceMask");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     uint32_t local_deviceMask;
     local_commandBuffer = commandBuffer;
     local_deviceMask = deviceMask;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_669;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_669, 1);
-        countingStream->write((uint64_t*)&cgen_var_669, 1 * 8);
-        countingStream->write((uint32_t*)&local_deviceMask, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
     }
-    uint32_t packetSize_vkCmdSetDeviceMask = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdSetDeviceMask = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdSetDeviceMask -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdSetDeviceMask);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdSetDeviceMask = OP_vkCmdSetDeviceMask;
-    stream->write(&opcode_vkCmdSetDeviceMask, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdSetDeviceMask, sizeof(uint32_t));
-    uint64_t cgen_var_670;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_670, 1);
-    stream->write((uint64_t*)&cgen_var_670, 1 * 8);
-    stream->write((uint32_t*)&local_deviceMask, sizeof(uint32_t));
-    AEMU_SCOPED_TRACE("vkCmdSetDeviceMask readParams");
-    AEMU_SCOPED_TRACE("vkCmdSetDeviceMask returnUnmarshal");
-    mImpl->log("finish vkCmdSetDeviceMask");;
+    memcpy(streamPtr, &opcode_vkCmdSetDeviceMask, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdSetDeviceMask, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (uint32_t*)&local_deviceMask, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdDispatchBase(
@@ -10174,16 +11155,14 @@
     uint32_t baseGroupZ,
     uint32_t groupCountX,
     uint32_t groupCountY,
-    uint32_t groupCountZ)
+    uint32_t groupCountZ,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdDispatchBase encode");
-    mImpl->log("start vkCmdDispatchBase");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     uint32_t local_baseGroupX;
     uint32_t local_baseGroupY;
@@ -10198,101 +11177,124 @@
     local_groupCountX = groupCountX;
     local_groupCountY = groupCountY;
     local_groupCountZ = groupCountZ;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_671;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_671, 1);
-        countingStream->write((uint64_t*)&cgen_var_671, 1 * 8);
-        countingStream->write((uint32_t*)&local_baseGroupX, sizeof(uint32_t));
-        countingStream->write((uint32_t*)&local_baseGroupY, sizeof(uint32_t));
-        countingStream->write((uint32_t*)&local_baseGroupZ, sizeof(uint32_t));
-        countingStream->write((uint32_t*)&local_groupCountX, sizeof(uint32_t));
-        countingStream->write((uint32_t*)&local_groupCountY, sizeof(uint32_t));
-        countingStream->write((uint32_t*)&local_groupCountZ, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
     }
-    uint32_t packetSize_vkCmdDispatchBase = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdDispatchBase = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdDispatchBase -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdDispatchBase);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdDispatchBase = OP_vkCmdDispatchBase;
-    stream->write(&opcode_vkCmdDispatchBase, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdDispatchBase, sizeof(uint32_t));
-    uint64_t cgen_var_672;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_672, 1);
-    stream->write((uint64_t*)&cgen_var_672, 1 * 8);
-    stream->write((uint32_t*)&local_baseGroupX, sizeof(uint32_t));
-    stream->write((uint32_t*)&local_baseGroupY, sizeof(uint32_t));
-    stream->write((uint32_t*)&local_baseGroupZ, sizeof(uint32_t));
-    stream->write((uint32_t*)&local_groupCountX, sizeof(uint32_t));
-    stream->write((uint32_t*)&local_groupCountY, sizeof(uint32_t));
-    stream->write((uint32_t*)&local_groupCountZ, sizeof(uint32_t));
-    AEMU_SCOPED_TRACE("vkCmdDispatchBase readParams");
-    AEMU_SCOPED_TRACE("vkCmdDispatchBase returnUnmarshal");
-    mImpl->log("finish vkCmdDispatchBase");;
+    memcpy(streamPtr, &opcode_vkCmdDispatchBase, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdDispatchBase, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (uint32_t*)&local_baseGroupX, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_baseGroupY, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_baseGroupZ, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_groupCountX, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_groupCountY, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_groupCountZ, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 VkResult VkEncoder::vkEnumeratePhysicalDeviceGroups(
     VkInstance instance,
     uint32_t* pPhysicalDeviceGroupCount,
-    VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties)
+    VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkEnumeratePhysicalDeviceGroups encode");
-    mImpl->log("start vkEnumeratePhysicalDeviceGroups");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkInstance local_instance;
     local_instance = instance;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_673;
-        countingStream->handleMapping()->mapHandles_VkInstance_u64(&local_instance, &cgen_var_673, 1);
-        countingStream->write((uint64_t*)&cgen_var_673, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_674 = (uint64_t)(uintptr_t)pPhysicalDeviceGroupCount;
-        countingStream->putBe64(cgen_var_674);
+        *countPtr += 8;
         if (pPhysicalDeviceGroupCount)
         {
-            countingStream->write((uint32_t*)pPhysicalDeviceGroupCount, sizeof(uint32_t));
+            *countPtr += sizeof(uint32_t);
         }
         // WARNING PTR CHECK
-        uint64_t cgen_var_675 = (uint64_t)(uintptr_t)pPhysicalDeviceGroupProperties;
-        countingStream->putBe64(cgen_var_675);
+        *countPtr += 8;
         if (pPhysicalDeviceGroupProperties)
         {
-            for (uint32_t i = 0; i < (uint32_t)(*(pPhysicalDeviceGroupCount)); ++i)
+            if (pPhysicalDeviceGroupCount)
             {
-                marshal_VkPhysicalDeviceGroupProperties(countingStream, (VkPhysicalDeviceGroupProperties*)(pPhysicalDeviceGroupProperties + i));
+                for (uint32_t i = 0; i < (uint32_t)(*(pPhysicalDeviceGroupCount)); ++i)
+                {
+                    count_VkPhysicalDeviceGroupProperties(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceGroupProperties*)(pPhysicalDeviceGroupProperties + i), countPtr);
+                }
             }
         }
     }
-    uint32_t packetSize_vkEnumeratePhysicalDeviceGroups = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkEnumeratePhysicalDeviceGroups = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkEnumeratePhysicalDeviceGroups);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkEnumeratePhysicalDeviceGroups = OP_vkEnumeratePhysicalDeviceGroups;
-    stream->write(&opcode_vkEnumeratePhysicalDeviceGroups, sizeof(uint32_t));
-    stream->write(&packetSize_vkEnumeratePhysicalDeviceGroups, sizeof(uint32_t));
-    uint64_t cgen_var_676;
-    stream->handleMapping()->mapHandles_VkInstance_u64(&local_instance, &cgen_var_676, 1);
-    stream->write((uint64_t*)&cgen_var_676, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkEnumeratePhysicalDeviceGroups, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkEnumeratePhysicalDeviceGroups, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkInstance((*&local_instance));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_677 = (uint64_t)(uintptr_t)pPhysicalDeviceGroupCount;
-    stream->putBe64(cgen_var_677);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)pPhysicalDeviceGroupCount;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pPhysicalDeviceGroupCount)
     {
-        stream->write((uint32_t*)pPhysicalDeviceGroupCount, sizeof(uint32_t));
+        memcpy(*streamPtrPtr, (uint32_t*)pPhysicalDeviceGroupCount, sizeof(uint32_t));
+        *streamPtrPtr += sizeof(uint32_t);
     }
     // WARNING PTR CHECK
-    uint64_t cgen_var_678 = (uint64_t)(uintptr_t)pPhysicalDeviceGroupProperties;
-    stream->putBe64(cgen_var_678);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)pPhysicalDeviceGroupProperties;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pPhysicalDeviceGroupProperties)
     {
         for (uint32_t i = 0; i < (uint32_t)(*(pPhysicalDeviceGroupCount)); ++i)
         {
-            marshal_VkPhysicalDeviceGroupProperties(stream, (VkPhysicalDeviceGroupProperties*)(pPhysicalDeviceGroupProperties + i));
+            reservedmarshal_VkPhysicalDeviceGroupProperties(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceGroupProperties*)(pPhysicalDeviceGroupProperties + i), streamPtrPtr);
         }
     }
-    AEMU_SCOPED_TRACE("vkEnumeratePhysicalDeviceGroups readParams");
     // WARNING PTR CHECK
     uint32_t* check_pPhysicalDeviceGroupCount;
     check_pPhysicalDeviceGroupCount = (uint32_t*)(uintptr_t)stream->getBe64();
@@ -10313,41 +11315,47 @@
         {
             fprintf(stderr, "fatal: pPhysicalDeviceGroupProperties inconsistent between guest and host\n");
         }
-        for (uint32_t i = 0; i < (uint32_t)(*(pPhysicalDeviceGroupCount)); ++i)
+        if (pPhysicalDeviceGroupCount)
         {
-            unmarshal_VkPhysicalDeviceGroupProperties(stream, (VkPhysicalDeviceGroupProperties*)(pPhysicalDeviceGroupProperties + i));
+            for (uint32_t i = 0; i < (uint32_t)(*(pPhysicalDeviceGroupCount)); ++i)
+            {
+                unmarshal_VkPhysicalDeviceGroupProperties(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceGroupProperties*)(pPhysicalDeviceGroupProperties + i));
+            }
         }
     }
-    if (pPhysicalDeviceGroupProperties)
+    if (pPhysicalDeviceGroupCount)
     {
-        for (uint32_t i = 0; i < (uint32_t)(*(pPhysicalDeviceGroupCount)); ++i)
+        if (pPhysicalDeviceGroupProperties)
         {
-            transform_fromhost_VkPhysicalDeviceGroupProperties(mImpl->resources(), (VkPhysicalDeviceGroupProperties*)(pPhysicalDeviceGroupProperties + i));
+            for (uint32_t i = 0; i < (uint32_t)(*(pPhysicalDeviceGroupCount)); ++i)
+            {
+                transform_fromhost_VkPhysicalDeviceGroupProperties(sResourceTracker, (VkPhysicalDeviceGroupProperties*)(pPhysicalDeviceGroupProperties + i));
+            }
         }
     }
-    AEMU_SCOPED_TRACE("vkEnumeratePhysicalDeviceGroups returnUnmarshal");
     VkResult vkEnumeratePhysicalDeviceGroups_VkResult_return = (VkResult)0;
     stream->read(&vkEnumeratePhysicalDeviceGroups_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkEnumeratePhysicalDeviceGroups");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkEnumeratePhysicalDeviceGroups_VkResult_return;
 }
 
 void VkEncoder::vkGetImageMemoryRequirements2(
     VkDevice device,
     const VkImageMemoryRequirementsInfo2* pInfo,
-    VkMemoryRequirements2* pMemoryRequirements)
+    VkMemoryRequirements2* pMemoryRequirements,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetImageMemoryRequirements2 encode");
-    mImpl->log("start vkGetImageMemoryRequirements2");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkImageMemoryRequirementsInfo2* local_pInfo;
     local_device = device;
@@ -10355,53 +11363,59 @@
     if (pInfo)
     {
         local_pInfo = (VkImageMemoryRequirementsInfo2*)pool->alloc(sizeof(const VkImageMemoryRequirementsInfo2));
-        deepcopy_VkImageMemoryRequirementsInfo2(pool, pInfo, (VkImageMemoryRequirementsInfo2*)(local_pInfo));
+        deepcopy_VkImageMemoryRequirementsInfo2(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pInfo, (VkImageMemoryRequirementsInfo2*)(local_pInfo));
     }
     if (local_pInfo)
     {
-        transform_tohost_VkImageMemoryRequirementsInfo2(mImpl->resources(), (VkImageMemoryRequirementsInfo2*)(local_pInfo));
+        transform_tohost_VkImageMemoryRequirementsInfo2(sResourceTracker, (VkImageMemoryRequirementsInfo2*)(local_pInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_681;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_681, 1);
-        countingStream->write((uint64_t*)&cgen_var_681, 1 * 8);
-        marshal_VkImageMemoryRequirementsInfo2(countingStream, (VkImageMemoryRequirementsInfo2*)(local_pInfo));
-        marshal_VkMemoryRequirements2(countingStream, (VkMemoryRequirements2*)(pMemoryRequirements));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkImageMemoryRequirementsInfo2(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImageMemoryRequirementsInfo2*)(local_pInfo), countPtr);
+        count_VkMemoryRequirements2(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMemoryRequirements2*)(pMemoryRequirements), countPtr);
     }
-    uint32_t packetSize_vkGetImageMemoryRequirements2 = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetImageMemoryRequirements2 = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetImageMemoryRequirements2);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetImageMemoryRequirements2 = OP_vkGetImageMemoryRequirements2;
-    stream->write(&opcode_vkGetImageMemoryRequirements2, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetImageMemoryRequirements2, sizeof(uint32_t));
-    uint64_t cgen_var_682;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_682, 1);
-    stream->write((uint64_t*)&cgen_var_682, 1 * 8);
-    marshal_VkImageMemoryRequirementsInfo2(stream, (VkImageMemoryRequirementsInfo2*)(local_pInfo));
-    marshal_VkMemoryRequirements2(stream, (VkMemoryRequirements2*)(pMemoryRequirements));
-    AEMU_SCOPED_TRACE("vkGetImageMemoryRequirements2 readParams");
-    unmarshal_VkMemoryRequirements2(stream, (VkMemoryRequirements2*)(pMemoryRequirements));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetImageMemoryRequirements2, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetImageMemoryRequirements2, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkImageMemoryRequirementsInfo2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImageMemoryRequirementsInfo2*)(local_pInfo), streamPtrPtr);
+    reservedmarshal_VkMemoryRequirements2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMemoryRequirements2*)(pMemoryRequirements), streamPtrPtr);
+    unmarshal_VkMemoryRequirements2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMemoryRequirements2*)(pMemoryRequirements));
     if (pMemoryRequirements)
     {
-        transform_fromhost_VkMemoryRequirements2(mImpl->resources(), (VkMemoryRequirements2*)(pMemoryRequirements));
+        transform_fromhost_VkMemoryRequirements2(sResourceTracker, (VkMemoryRequirements2*)(pMemoryRequirements));
     }
-    AEMU_SCOPED_TRACE("vkGetImageMemoryRequirements2 returnUnmarshal");
-    mImpl->log("finish vkGetImageMemoryRequirements2");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkGetBufferMemoryRequirements2(
     VkDevice device,
     const VkBufferMemoryRequirementsInfo2* pInfo,
-    VkMemoryRequirements2* pMemoryRequirements)
+    VkMemoryRequirements2* pMemoryRequirements,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetBufferMemoryRequirements2 encode");
-    mImpl->log("start vkGetBufferMemoryRequirements2");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkBufferMemoryRequirementsInfo2* local_pInfo;
     local_device = device;
@@ -10409,54 +11423,60 @@
     if (pInfo)
     {
         local_pInfo = (VkBufferMemoryRequirementsInfo2*)pool->alloc(sizeof(const VkBufferMemoryRequirementsInfo2));
-        deepcopy_VkBufferMemoryRequirementsInfo2(pool, pInfo, (VkBufferMemoryRequirementsInfo2*)(local_pInfo));
+        deepcopy_VkBufferMemoryRequirementsInfo2(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pInfo, (VkBufferMemoryRequirementsInfo2*)(local_pInfo));
     }
     if (local_pInfo)
     {
-        transform_tohost_VkBufferMemoryRequirementsInfo2(mImpl->resources(), (VkBufferMemoryRequirementsInfo2*)(local_pInfo));
+        transform_tohost_VkBufferMemoryRequirementsInfo2(sResourceTracker, (VkBufferMemoryRequirementsInfo2*)(local_pInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_683;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_683, 1);
-        countingStream->write((uint64_t*)&cgen_var_683, 1 * 8);
-        marshal_VkBufferMemoryRequirementsInfo2(countingStream, (VkBufferMemoryRequirementsInfo2*)(local_pInfo));
-        marshal_VkMemoryRequirements2(countingStream, (VkMemoryRequirements2*)(pMemoryRequirements));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkBufferMemoryRequirementsInfo2(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkBufferMemoryRequirementsInfo2*)(local_pInfo), countPtr);
+        count_VkMemoryRequirements2(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMemoryRequirements2*)(pMemoryRequirements), countPtr);
     }
-    uint32_t packetSize_vkGetBufferMemoryRequirements2 = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetBufferMemoryRequirements2 = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetBufferMemoryRequirements2);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetBufferMemoryRequirements2 = OP_vkGetBufferMemoryRequirements2;
-    stream->write(&opcode_vkGetBufferMemoryRequirements2, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetBufferMemoryRequirements2, sizeof(uint32_t));
-    uint64_t cgen_var_684;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_684, 1);
-    stream->write((uint64_t*)&cgen_var_684, 1 * 8);
-    marshal_VkBufferMemoryRequirementsInfo2(stream, (VkBufferMemoryRequirementsInfo2*)(local_pInfo));
-    marshal_VkMemoryRequirements2(stream, (VkMemoryRequirements2*)(pMemoryRequirements));
-    AEMU_SCOPED_TRACE("vkGetBufferMemoryRequirements2 readParams");
-    unmarshal_VkMemoryRequirements2(stream, (VkMemoryRequirements2*)(pMemoryRequirements));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetBufferMemoryRequirements2, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetBufferMemoryRequirements2, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkBufferMemoryRequirementsInfo2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkBufferMemoryRequirementsInfo2*)(local_pInfo), streamPtrPtr);
+    reservedmarshal_VkMemoryRequirements2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMemoryRequirements2*)(pMemoryRequirements), streamPtrPtr);
+    unmarshal_VkMemoryRequirements2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMemoryRequirements2*)(pMemoryRequirements));
     if (pMemoryRequirements)
     {
-        transform_fromhost_VkMemoryRequirements2(mImpl->resources(), (VkMemoryRequirements2*)(pMemoryRequirements));
+        transform_fromhost_VkMemoryRequirements2(sResourceTracker, (VkMemoryRequirements2*)(pMemoryRequirements));
     }
-    AEMU_SCOPED_TRACE("vkGetBufferMemoryRequirements2 returnUnmarshal");
-    mImpl->log("finish vkGetBufferMemoryRequirements2");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkGetImageSparseMemoryRequirements2(
     VkDevice device,
     const VkImageSparseMemoryRequirementsInfo2* pInfo,
     uint32_t* pSparseMemoryRequirementCount,
-    VkSparseImageMemoryRequirements2* pSparseMemoryRequirements)
+    VkSparseImageMemoryRequirements2* pSparseMemoryRequirements,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetImageSparseMemoryRequirements2 encode");
-    mImpl->log("start vkGetImageSparseMemoryRequirements2");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkImageSparseMemoryRequirementsInfo2* local_pInfo;
     local_device = device;
@@ -10464,63 +11484,72 @@
     if (pInfo)
     {
         local_pInfo = (VkImageSparseMemoryRequirementsInfo2*)pool->alloc(sizeof(const VkImageSparseMemoryRequirementsInfo2));
-        deepcopy_VkImageSparseMemoryRequirementsInfo2(pool, pInfo, (VkImageSparseMemoryRequirementsInfo2*)(local_pInfo));
+        deepcopy_VkImageSparseMemoryRequirementsInfo2(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pInfo, (VkImageSparseMemoryRequirementsInfo2*)(local_pInfo));
     }
     if (local_pInfo)
     {
-        transform_tohost_VkImageSparseMemoryRequirementsInfo2(mImpl->resources(), (VkImageSparseMemoryRequirementsInfo2*)(local_pInfo));
+        transform_tohost_VkImageSparseMemoryRequirementsInfo2(sResourceTracker, (VkImageSparseMemoryRequirementsInfo2*)(local_pInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_685;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_685, 1);
-        countingStream->write((uint64_t*)&cgen_var_685, 1 * 8);
-        marshal_VkImageSparseMemoryRequirementsInfo2(countingStream, (VkImageSparseMemoryRequirementsInfo2*)(local_pInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkImageSparseMemoryRequirementsInfo2(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImageSparseMemoryRequirementsInfo2*)(local_pInfo), countPtr);
         // WARNING PTR CHECK
-        uint64_t cgen_var_686 = (uint64_t)(uintptr_t)pSparseMemoryRequirementCount;
-        countingStream->putBe64(cgen_var_686);
+        *countPtr += 8;
         if (pSparseMemoryRequirementCount)
         {
-            countingStream->write((uint32_t*)pSparseMemoryRequirementCount, sizeof(uint32_t));
+            *countPtr += sizeof(uint32_t);
         }
         // WARNING PTR CHECK
-        uint64_t cgen_var_687 = (uint64_t)(uintptr_t)pSparseMemoryRequirements;
-        countingStream->putBe64(cgen_var_687);
+        *countPtr += 8;
         if (pSparseMemoryRequirements)
         {
-            for (uint32_t i = 0; i < (uint32_t)(*(pSparseMemoryRequirementCount)); ++i)
+            if (pSparseMemoryRequirementCount)
             {
-                marshal_VkSparseImageMemoryRequirements2(countingStream, (VkSparseImageMemoryRequirements2*)(pSparseMemoryRequirements + i));
+                for (uint32_t i = 0; i < (uint32_t)(*(pSparseMemoryRequirementCount)); ++i)
+                {
+                    count_VkSparseImageMemoryRequirements2(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSparseImageMemoryRequirements2*)(pSparseMemoryRequirements + i), countPtr);
+                }
             }
         }
     }
-    uint32_t packetSize_vkGetImageSparseMemoryRequirements2 = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetImageSparseMemoryRequirements2 = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetImageSparseMemoryRequirements2);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetImageSparseMemoryRequirements2 = OP_vkGetImageSparseMemoryRequirements2;
-    stream->write(&opcode_vkGetImageSparseMemoryRequirements2, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetImageSparseMemoryRequirements2, sizeof(uint32_t));
-    uint64_t cgen_var_688;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_688, 1);
-    stream->write((uint64_t*)&cgen_var_688, 1 * 8);
-    marshal_VkImageSparseMemoryRequirementsInfo2(stream, (VkImageSparseMemoryRequirementsInfo2*)(local_pInfo));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetImageSparseMemoryRequirements2, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetImageSparseMemoryRequirements2, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkImageSparseMemoryRequirementsInfo2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImageSparseMemoryRequirementsInfo2*)(local_pInfo), streamPtrPtr);
     // WARNING PTR CHECK
-    uint64_t cgen_var_689 = (uint64_t)(uintptr_t)pSparseMemoryRequirementCount;
-    stream->putBe64(cgen_var_689);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)pSparseMemoryRequirementCount;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pSparseMemoryRequirementCount)
     {
-        stream->write((uint32_t*)pSparseMemoryRequirementCount, sizeof(uint32_t));
+        memcpy(*streamPtrPtr, (uint32_t*)pSparseMemoryRequirementCount, sizeof(uint32_t));
+        *streamPtrPtr += sizeof(uint32_t);
     }
     // WARNING PTR CHECK
-    uint64_t cgen_var_690 = (uint64_t)(uintptr_t)pSparseMemoryRequirements;
-    stream->putBe64(cgen_var_690);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)pSparseMemoryRequirements;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pSparseMemoryRequirements)
     {
         for (uint32_t i = 0; i < (uint32_t)(*(pSparseMemoryRequirementCount)); ++i)
         {
-            marshal_VkSparseImageMemoryRequirements2(stream, (VkSparseImageMemoryRequirements2*)(pSparseMemoryRequirements + i));
+            reservedmarshal_VkSparseImageMemoryRequirements2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSparseImageMemoryRequirements2*)(pSparseMemoryRequirements + i), streamPtrPtr);
         }
     }
-    AEMU_SCOPED_TRACE("vkGetImageSparseMemoryRequirements2 readParams");
     // WARNING PTR CHECK
     uint32_t* check_pSparseMemoryRequirementCount;
     check_pSparseMemoryRequirementCount = (uint32_t*)(uintptr_t)stream->getBe64();
@@ -10541,160 +11570,189 @@
         {
             fprintf(stderr, "fatal: pSparseMemoryRequirements inconsistent between guest and host\n");
         }
-        for (uint32_t i = 0; i < (uint32_t)(*(pSparseMemoryRequirementCount)); ++i)
+        if (pSparseMemoryRequirementCount)
         {
-            unmarshal_VkSparseImageMemoryRequirements2(stream, (VkSparseImageMemoryRequirements2*)(pSparseMemoryRequirements + i));
+            for (uint32_t i = 0; i < (uint32_t)(*(pSparseMemoryRequirementCount)); ++i)
+            {
+                unmarshal_VkSparseImageMemoryRequirements2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSparseImageMemoryRequirements2*)(pSparseMemoryRequirements + i));
+            }
         }
     }
-    if (pSparseMemoryRequirements)
+    if (pSparseMemoryRequirementCount)
     {
-        for (uint32_t i = 0; i < (uint32_t)(*(pSparseMemoryRequirementCount)); ++i)
+        if (pSparseMemoryRequirements)
         {
-            transform_fromhost_VkSparseImageMemoryRequirements2(mImpl->resources(), (VkSparseImageMemoryRequirements2*)(pSparseMemoryRequirements + i));
+            for (uint32_t i = 0; i < (uint32_t)(*(pSparseMemoryRequirementCount)); ++i)
+            {
+                transform_fromhost_VkSparseImageMemoryRequirements2(sResourceTracker, (VkSparseImageMemoryRequirements2*)(pSparseMemoryRequirements + i));
+            }
         }
     }
-    AEMU_SCOPED_TRACE("vkGetImageSparseMemoryRequirements2 returnUnmarshal");
-    mImpl->log("finish vkGetImageSparseMemoryRequirements2");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkGetPhysicalDeviceFeatures2(
     VkPhysicalDevice physicalDevice,
-    VkPhysicalDeviceFeatures2* pFeatures)
+    VkPhysicalDeviceFeatures2* pFeatures,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceFeatures2 encode");
-    mImpl->log("start vkGetPhysicalDeviceFeatures2");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     local_physicalDevice = physicalDevice;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_693;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_693, 1);
-        countingStream->write((uint64_t*)&cgen_var_693, 1 * 8);
-        marshal_VkPhysicalDeviceFeatures2(countingStream, (VkPhysicalDeviceFeatures2*)(pFeatures));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkPhysicalDeviceFeatures2(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceFeatures2*)(pFeatures), countPtr);
     }
-    uint32_t packetSize_vkGetPhysicalDeviceFeatures2 = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetPhysicalDeviceFeatures2 = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPhysicalDeviceFeatures2);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetPhysicalDeviceFeatures2 = OP_vkGetPhysicalDeviceFeatures2;
-    stream->write(&opcode_vkGetPhysicalDeviceFeatures2, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetPhysicalDeviceFeatures2, sizeof(uint32_t));
-    uint64_t cgen_var_694;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_694, 1);
-    stream->write((uint64_t*)&cgen_var_694, 1 * 8);
-    marshal_VkPhysicalDeviceFeatures2(stream, (VkPhysicalDeviceFeatures2*)(pFeatures));
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceFeatures2 readParams");
-    unmarshal_VkPhysicalDeviceFeatures2(stream, (VkPhysicalDeviceFeatures2*)(pFeatures));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPhysicalDeviceFeatures2, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPhysicalDeviceFeatures2, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkPhysicalDeviceFeatures2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceFeatures2*)(pFeatures), streamPtrPtr);
+    unmarshal_VkPhysicalDeviceFeatures2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceFeatures2*)(pFeatures));
     if (pFeatures)
     {
-        transform_fromhost_VkPhysicalDeviceFeatures2(mImpl->resources(), (VkPhysicalDeviceFeatures2*)(pFeatures));
+        transform_fromhost_VkPhysicalDeviceFeatures2(sResourceTracker, (VkPhysicalDeviceFeatures2*)(pFeatures));
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceFeatures2 returnUnmarshal");
-    mImpl->log("finish vkGetPhysicalDeviceFeatures2");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkGetPhysicalDeviceProperties2(
     VkPhysicalDevice physicalDevice,
-    VkPhysicalDeviceProperties2* pProperties)
+    VkPhysicalDeviceProperties2* pProperties,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceProperties2 encode");
-    mImpl->log("start vkGetPhysicalDeviceProperties2");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     local_physicalDevice = physicalDevice;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_695;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_695, 1);
-        countingStream->write((uint64_t*)&cgen_var_695, 1 * 8);
-        marshal_VkPhysicalDeviceProperties2(countingStream, (VkPhysicalDeviceProperties2*)(pProperties));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkPhysicalDeviceProperties2(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceProperties2*)(pProperties), countPtr);
     }
-    uint32_t packetSize_vkGetPhysicalDeviceProperties2 = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetPhysicalDeviceProperties2 = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPhysicalDeviceProperties2);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetPhysicalDeviceProperties2 = OP_vkGetPhysicalDeviceProperties2;
-    stream->write(&opcode_vkGetPhysicalDeviceProperties2, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetPhysicalDeviceProperties2, sizeof(uint32_t));
-    uint64_t cgen_var_696;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_696, 1);
-    stream->write((uint64_t*)&cgen_var_696, 1 * 8);
-    marshal_VkPhysicalDeviceProperties2(stream, (VkPhysicalDeviceProperties2*)(pProperties));
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceProperties2 readParams");
-    unmarshal_VkPhysicalDeviceProperties2(stream, (VkPhysicalDeviceProperties2*)(pProperties));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPhysicalDeviceProperties2, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPhysicalDeviceProperties2, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkPhysicalDeviceProperties2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceProperties2*)(pProperties), streamPtrPtr);
+    unmarshal_VkPhysicalDeviceProperties2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceProperties2*)(pProperties));
     if (pProperties)
     {
-        transform_fromhost_VkPhysicalDeviceProperties2(mImpl->resources(), (VkPhysicalDeviceProperties2*)(pProperties));
+        transform_fromhost_VkPhysicalDeviceProperties2(sResourceTracker, (VkPhysicalDeviceProperties2*)(pProperties));
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceProperties2 returnUnmarshal");
-    mImpl->log("finish vkGetPhysicalDeviceProperties2");;
+    sResourceTracker->on_vkGetPhysicalDeviceProperties2(this, physicalDevice, pProperties);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkGetPhysicalDeviceFormatProperties2(
     VkPhysicalDevice physicalDevice,
     VkFormat format,
-    VkFormatProperties2* pFormatProperties)
+    VkFormatProperties2* pFormatProperties,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceFormatProperties2 encode");
-    mImpl->log("start vkGetPhysicalDeviceFormatProperties2");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     VkFormat local_format;
     local_physicalDevice = physicalDevice;
     local_format = format;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_697;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_697, 1);
-        countingStream->write((uint64_t*)&cgen_var_697, 1 * 8);
-        countingStream->write((VkFormat*)&local_format, sizeof(VkFormat));
-        marshal_VkFormatProperties2(countingStream, (VkFormatProperties2*)(pFormatProperties));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkFormat);
+        count_VkFormatProperties2(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkFormatProperties2*)(pFormatProperties), countPtr);
     }
-    uint32_t packetSize_vkGetPhysicalDeviceFormatProperties2 = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetPhysicalDeviceFormatProperties2 = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPhysicalDeviceFormatProperties2);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetPhysicalDeviceFormatProperties2 = OP_vkGetPhysicalDeviceFormatProperties2;
-    stream->write(&opcode_vkGetPhysicalDeviceFormatProperties2, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetPhysicalDeviceFormatProperties2, sizeof(uint32_t));
-    uint64_t cgen_var_698;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_698, 1);
-    stream->write((uint64_t*)&cgen_var_698, 1 * 8);
-    stream->write((VkFormat*)&local_format, sizeof(VkFormat));
-    marshal_VkFormatProperties2(stream, (VkFormatProperties2*)(pFormatProperties));
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceFormatProperties2 readParams");
-    unmarshal_VkFormatProperties2(stream, (VkFormatProperties2*)(pFormatProperties));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPhysicalDeviceFormatProperties2, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPhysicalDeviceFormatProperties2, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkFormat*)&local_format, sizeof(VkFormat));
+    *streamPtrPtr += sizeof(VkFormat);
+    reservedmarshal_VkFormatProperties2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkFormatProperties2*)(pFormatProperties), streamPtrPtr);
+    unmarshal_VkFormatProperties2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkFormatProperties2*)(pFormatProperties));
     if (pFormatProperties)
     {
-        transform_fromhost_VkFormatProperties2(mImpl->resources(), (VkFormatProperties2*)(pFormatProperties));
+        transform_fromhost_VkFormatProperties2(sResourceTracker, (VkFormatProperties2*)(pFormatProperties));
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceFormatProperties2 returnUnmarshal");
-    mImpl->log("finish vkGetPhysicalDeviceFormatProperties2");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 VkResult VkEncoder::vkGetPhysicalDeviceImageFormatProperties2(
     VkPhysicalDevice physicalDevice,
     const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo,
-    VkImageFormatProperties2* pImageFormatProperties)
+    VkImageFormatProperties2* pImageFormatProperties,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceImageFormatProperties2 encode");
-    mImpl->log("start vkGetPhysicalDeviceImageFormatProperties2");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     VkPhysicalDeviceImageFormatInfo2* local_pImageFormatInfo;
     local_physicalDevice = physicalDevice;
@@ -10702,110 +11760,122 @@
     if (pImageFormatInfo)
     {
         local_pImageFormatInfo = (VkPhysicalDeviceImageFormatInfo2*)pool->alloc(sizeof(const VkPhysicalDeviceImageFormatInfo2));
-        deepcopy_VkPhysicalDeviceImageFormatInfo2(pool, pImageFormatInfo, (VkPhysicalDeviceImageFormatInfo2*)(local_pImageFormatInfo));
+        deepcopy_VkPhysicalDeviceImageFormatInfo2(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pImageFormatInfo, (VkPhysicalDeviceImageFormatInfo2*)(local_pImageFormatInfo));
     }
     if (local_pImageFormatInfo)
     {
-        transform_tohost_VkPhysicalDeviceImageFormatInfo2(mImpl->resources(), (VkPhysicalDeviceImageFormatInfo2*)(local_pImageFormatInfo));
+        transform_tohost_VkPhysicalDeviceImageFormatInfo2(sResourceTracker, (VkPhysicalDeviceImageFormatInfo2*)(local_pImageFormatInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_699;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_699, 1);
-        countingStream->write((uint64_t*)&cgen_var_699, 1 * 8);
-        marshal_VkPhysicalDeviceImageFormatInfo2(countingStream, (VkPhysicalDeviceImageFormatInfo2*)(local_pImageFormatInfo));
-        marshal_VkImageFormatProperties2(countingStream, (VkImageFormatProperties2*)(pImageFormatProperties));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkPhysicalDeviceImageFormatInfo2(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceImageFormatInfo2*)(local_pImageFormatInfo), countPtr);
+        count_VkImageFormatProperties2(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImageFormatProperties2*)(pImageFormatProperties), countPtr);
     }
-    uint32_t packetSize_vkGetPhysicalDeviceImageFormatProperties2 = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetPhysicalDeviceImageFormatProperties2 = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPhysicalDeviceImageFormatProperties2);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetPhysicalDeviceImageFormatProperties2 = OP_vkGetPhysicalDeviceImageFormatProperties2;
-    stream->write(&opcode_vkGetPhysicalDeviceImageFormatProperties2, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetPhysicalDeviceImageFormatProperties2, sizeof(uint32_t));
-    uint64_t cgen_var_700;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_700, 1);
-    stream->write((uint64_t*)&cgen_var_700, 1 * 8);
-    marshal_VkPhysicalDeviceImageFormatInfo2(stream, (VkPhysicalDeviceImageFormatInfo2*)(local_pImageFormatInfo));
-    marshal_VkImageFormatProperties2(stream, (VkImageFormatProperties2*)(pImageFormatProperties));
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceImageFormatProperties2 readParams");
-    unmarshal_VkImageFormatProperties2(stream, (VkImageFormatProperties2*)(pImageFormatProperties));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPhysicalDeviceImageFormatProperties2, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPhysicalDeviceImageFormatProperties2, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkPhysicalDeviceImageFormatInfo2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceImageFormatInfo2*)(local_pImageFormatInfo), streamPtrPtr);
+    reservedmarshal_VkImageFormatProperties2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImageFormatProperties2*)(pImageFormatProperties), streamPtrPtr);
+    unmarshal_VkImageFormatProperties2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImageFormatProperties2*)(pImageFormatProperties));
     if (pImageFormatProperties)
     {
-        transform_fromhost_VkImageFormatProperties2(mImpl->resources(), (VkImageFormatProperties2*)(pImageFormatProperties));
+        transform_fromhost_VkImageFormatProperties2(sResourceTracker, (VkImageFormatProperties2*)(pImageFormatProperties));
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceImageFormatProperties2 returnUnmarshal");
     VkResult vkGetPhysicalDeviceImageFormatProperties2_VkResult_return = (VkResult)0;
     stream->read(&vkGetPhysicalDeviceImageFormatProperties2_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetPhysicalDeviceImageFormatProperties2");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetPhysicalDeviceImageFormatProperties2_VkResult_return;
 }
 
 void VkEncoder::vkGetPhysicalDeviceQueueFamilyProperties2(
     VkPhysicalDevice physicalDevice,
     uint32_t* pQueueFamilyPropertyCount,
-    VkQueueFamilyProperties2* pQueueFamilyProperties)
+    VkQueueFamilyProperties2* pQueueFamilyProperties,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceQueueFamilyProperties2 encode");
-    mImpl->log("start vkGetPhysicalDeviceQueueFamilyProperties2");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     local_physicalDevice = physicalDevice;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_701;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_701, 1);
-        countingStream->write((uint64_t*)&cgen_var_701, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_702 = (uint64_t)(uintptr_t)pQueueFamilyPropertyCount;
-        countingStream->putBe64(cgen_var_702);
+        *countPtr += 8;
         if (pQueueFamilyPropertyCount)
         {
-            countingStream->write((uint32_t*)pQueueFamilyPropertyCount, sizeof(uint32_t));
+            *countPtr += sizeof(uint32_t);
         }
         // WARNING PTR CHECK
-        uint64_t cgen_var_703 = (uint64_t)(uintptr_t)pQueueFamilyProperties;
-        countingStream->putBe64(cgen_var_703);
+        *countPtr += 8;
         if (pQueueFamilyProperties)
         {
-            for (uint32_t i = 0; i < (uint32_t)(*(pQueueFamilyPropertyCount)); ++i)
+            if (pQueueFamilyPropertyCount)
             {
-                marshal_VkQueueFamilyProperties2(countingStream, (VkQueueFamilyProperties2*)(pQueueFamilyProperties + i));
+                for (uint32_t i = 0; i < (uint32_t)(*(pQueueFamilyPropertyCount)); ++i)
+                {
+                    count_VkQueueFamilyProperties2(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkQueueFamilyProperties2*)(pQueueFamilyProperties + i), countPtr);
+                }
             }
         }
     }
-    uint32_t packetSize_vkGetPhysicalDeviceQueueFamilyProperties2 = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetPhysicalDeviceQueueFamilyProperties2 = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPhysicalDeviceQueueFamilyProperties2);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetPhysicalDeviceQueueFamilyProperties2 = OP_vkGetPhysicalDeviceQueueFamilyProperties2;
-    stream->write(&opcode_vkGetPhysicalDeviceQueueFamilyProperties2, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetPhysicalDeviceQueueFamilyProperties2, sizeof(uint32_t));
-    uint64_t cgen_var_704;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_704, 1);
-    stream->write((uint64_t*)&cgen_var_704, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPhysicalDeviceQueueFamilyProperties2, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPhysicalDeviceQueueFamilyProperties2, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_705 = (uint64_t)(uintptr_t)pQueueFamilyPropertyCount;
-    stream->putBe64(cgen_var_705);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)pQueueFamilyPropertyCount;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pQueueFamilyPropertyCount)
     {
-        stream->write((uint32_t*)pQueueFamilyPropertyCount, sizeof(uint32_t));
+        memcpy(*streamPtrPtr, (uint32_t*)pQueueFamilyPropertyCount, sizeof(uint32_t));
+        *streamPtrPtr += sizeof(uint32_t);
     }
     // WARNING PTR CHECK
-    uint64_t cgen_var_706 = (uint64_t)(uintptr_t)pQueueFamilyProperties;
-    stream->putBe64(cgen_var_706);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)pQueueFamilyProperties;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pQueueFamilyProperties)
     {
         for (uint32_t i = 0; i < (uint32_t)(*(pQueueFamilyPropertyCount)); ++i)
         {
-            marshal_VkQueueFamilyProperties2(stream, (VkQueueFamilyProperties2*)(pQueueFamilyProperties + i));
+            reservedmarshal_VkQueueFamilyProperties2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkQueueFamilyProperties2*)(pQueueFamilyProperties + i), streamPtrPtr);
         }
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceQueueFamilyProperties2 readParams");
     // WARNING PTR CHECK
     uint32_t* check_pQueueFamilyPropertyCount;
     check_pQueueFamilyPropertyCount = (uint32_t*)(uintptr_t)stream->getBe64();
@@ -10826,79 +11896,92 @@
         {
             fprintf(stderr, "fatal: pQueueFamilyProperties inconsistent between guest and host\n");
         }
-        for (uint32_t i = 0; i < (uint32_t)(*(pQueueFamilyPropertyCount)); ++i)
+        if (pQueueFamilyPropertyCount)
         {
-            unmarshal_VkQueueFamilyProperties2(stream, (VkQueueFamilyProperties2*)(pQueueFamilyProperties + i));
+            for (uint32_t i = 0; i < (uint32_t)(*(pQueueFamilyPropertyCount)); ++i)
+            {
+                unmarshal_VkQueueFamilyProperties2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkQueueFamilyProperties2*)(pQueueFamilyProperties + i));
+            }
         }
     }
-    if (pQueueFamilyProperties)
+    if (pQueueFamilyPropertyCount)
     {
-        for (uint32_t i = 0; i < (uint32_t)(*(pQueueFamilyPropertyCount)); ++i)
+        if (pQueueFamilyProperties)
         {
-            transform_fromhost_VkQueueFamilyProperties2(mImpl->resources(), (VkQueueFamilyProperties2*)(pQueueFamilyProperties + i));
+            for (uint32_t i = 0; i < (uint32_t)(*(pQueueFamilyPropertyCount)); ++i)
+            {
+                transform_fromhost_VkQueueFamilyProperties2(sResourceTracker, (VkQueueFamilyProperties2*)(pQueueFamilyProperties + i));
+            }
         }
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceQueueFamilyProperties2 returnUnmarshal");
-    mImpl->log("finish vkGetPhysicalDeviceQueueFamilyProperties2");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkGetPhysicalDeviceMemoryProperties2(
     VkPhysicalDevice physicalDevice,
-    VkPhysicalDeviceMemoryProperties2* pMemoryProperties)
+    VkPhysicalDeviceMemoryProperties2* pMemoryProperties,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceMemoryProperties2 encode");
-    mImpl->log("start vkGetPhysicalDeviceMemoryProperties2");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     local_physicalDevice = physicalDevice;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_709;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_709, 1);
-        countingStream->write((uint64_t*)&cgen_var_709, 1 * 8);
-        marshal_VkPhysicalDeviceMemoryProperties2(countingStream, (VkPhysicalDeviceMemoryProperties2*)(pMemoryProperties));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkPhysicalDeviceMemoryProperties2(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceMemoryProperties2*)(pMemoryProperties), countPtr);
     }
-    uint32_t packetSize_vkGetPhysicalDeviceMemoryProperties2 = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetPhysicalDeviceMemoryProperties2 = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPhysicalDeviceMemoryProperties2);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetPhysicalDeviceMemoryProperties2 = OP_vkGetPhysicalDeviceMemoryProperties2;
-    stream->write(&opcode_vkGetPhysicalDeviceMemoryProperties2, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetPhysicalDeviceMemoryProperties2, sizeof(uint32_t));
-    uint64_t cgen_var_710;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_710, 1);
-    stream->write((uint64_t*)&cgen_var_710, 1 * 8);
-    marshal_VkPhysicalDeviceMemoryProperties2(stream, (VkPhysicalDeviceMemoryProperties2*)(pMemoryProperties));
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceMemoryProperties2 readParams");
-    unmarshal_VkPhysicalDeviceMemoryProperties2(stream, (VkPhysicalDeviceMemoryProperties2*)(pMemoryProperties));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPhysicalDeviceMemoryProperties2, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPhysicalDeviceMemoryProperties2, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkPhysicalDeviceMemoryProperties2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceMemoryProperties2*)(pMemoryProperties), streamPtrPtr);
+    unmarshal_VkPhysicalDeviceMemoryProperties2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceMemoryProperties2*)(pMemoryProperties));
     if (pMemoryProperties)
     {
-        transform_fromhost_VkPhysicalDeviceMemoryProperties2(mImpl->resources(), (VkPhysicalDeviceMemoryProperties2*)(pMemoryProperties));
+        transform_fromhost_VkPhysicalDeviceMemoryProperties2(sResourceTracker, (VkPhysicalDeviceMemoryProperties2*)(pMemoryProperties));
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceMemoryProperties2 returnUnmarshal");
-    encoderLock.unlock();
-    mImpl->resources()->on_vkGetPhysicalDeviceMemoryProperties2(this, physicalDevice, pMemoryProperties);
-    encoderLock.lock();
-    mImpl->log("finish vkGetPhysicalDeviceMemoryProperties2");;
+    sResourceTracker->on_vkGetPhysicalDeviceMemoryProperties2(this, physicalDevice, pMemoryProperties);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkGetPhysicalDeviceSparseImageFormatProperties2(
     VkPhysicalDevice physicalDevice,
     const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo,
     uint32_t* pPropertyCount,
-    VkSparseImageFormatProperties2* pProperties)
+    VkSparseImageFormatProperties2* pProperties,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceSparseImageFormatProperties2 encode");
-    mImpl->log("start vkGetPhysicalDeviceSparseImageFormatProperties2");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     VkPhysicalDeviceSparseImageFormatInfo2* local_pFormatInfo;
     local_physicalDevice = physicalDevice;
@@ -10906,63 +11989,72 @@
     if (pFormatInfo)
     {
         local_pFormatInfo = (VkPhysicalDeviceSparseImageFormatInfo2*)pool->alloc(sizeof(const VkPhysicalDeviceSparseImageFormatInfo2));
-        deepcopy_VkPhysicalDeviceSparseImageFormatInfo2(pool, pFormatInfo, (VkPhysicalDeviceSparseImageFormatInfo2*)(local_pFormatInfo));
+        deepcopy_VkPhysicalDeviceSparseImageFormatInfo2(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pFormatInfo, (VkPhysicalDeviceSparseImageFormatInfo2*)(local_pFormatInfo));
     }
     if (local_pFormatInfo)
     {
-        transform_tohost_VkPhysicalDeviceSparseImageFormatInfo2(mImpl->resources(), (VkPhysicalDeviceSparseImageFormatInfo2*)(local_pFormatInfo));
+        transform_tohost_VkPhysicalDeviceSparseImageFormatInfo2(sResourceTracker, (VkPhysicalDeviceSparseImageFormatInfo2*)(local_pFormatInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_711;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_711, 1);
-        countingStream->write((uint64_t*)&cgen_var_711, 1 * 8);
-        marshal_VkPhysicalDeviceSparseImageFormatInfo2(countingStream, (VkPhysicalDeviceSparseImageFormatInfo2*)(local_pFormatInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkPhysicalDeviceSparseImageFormatInfo2(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceSparseImageFormatInfo2*)(local_pFormatInfo), countPtr);
         // WARNING PTR CHECK
-        uint64_t cgen_var_712 = (uint64_t)(uintptr_t)pPropertyCount;
-        countingStream->putBe64(cgen_var_712);
+        *countPtr += 8;
         if (pPropertyCount)
         {
-            countingStream->write((uint32_t*)pPropertyCount, sizeof(uint32_t));
+            *countPtr += sizeof(uint32_t);
         }
         // WARNING PTR CHECK
-        uint64_t cgen_var_713 = (uint64_t)(uintptr_t)pProperties;
-        countingStream->putBe64(cgen_var_713);
+        *countPtr += 8;
         if (pProperties)
         {
-            for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+            if (pPropertyCount)
             {
-                marshal_VkSparseImageFormatProperties2(countingStream, (VkSparseImageFormatProperties2*)(pProperties + i));
+                for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+                {
+                    count_VkSparseImageFormatProperties2(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSparseImageFormatProperties2*)(pProperties + i), countPtr);
+                }
             }
         }
     }
-    uint32_t packetSize_vkGetPhysicalDeviceSparseImageFormatProperties2 = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetPhysicalDeviceSparseImageFormatProperties2 = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPhysicalDeviceSparseImageFormatProperties2);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetPhysicalDeviceSparseImageFormatProperties2 = OP_vkGetPhysicalDeviceSparseImageFormatProperties2;
-    stream->write(&opcode_vkGetPhysicalDeviceSparseImageFormatProperties2, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetPhysicalDeviceSparseImageFormatProperties2, sizeof(uint32_t));
-    uint64_t cgen_var_714;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_714, 1);
-    stream->write((uint64_t*)&cgen_var_714, 1 * 8);
-    marshal_VkPhysicalDeviceSparseImageFormatInfo2(stream, (VkPhysicalDeviceSparseImageFormatInfo2*)(local_pFormatInfo));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPhysicalDeviceSparseImageFormatProperties2, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPhysicalDeviceSparseImageFormatProperties2, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkPhysicalDeviceSparseImageFormatInfo2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceSparseImageFormatInfo2*)(local_pFormatInfo), streamPtrPtr);
     // WARNING PTR CHECK
-    uint64_t cgen_var_715 = (uint64_t)(uintptr_t)pPropertyCount;
-    stream->putBe64(cgen_var_715);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)pPropertyCount;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pPropertyCount)
     {
-        stream->write((uint32_t*)pPropertyCount, sizeof(uint32_t));
+        memcpy(*streamPtrPtr, (uint32_t*)pPropertyCount, sizeof(uint32_t));
+        *streamPtrPtr += sizeof(uint32_t);
     }
     // WARNING PTR CHECK
-    uint64_t cgen_var_716 = (uint64_t)(uintptr_t)pProperties;
-    stream->putBe64(cgen_var_716);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)pProperties;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pProperties)
     {
         for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
         {
-            marshal_VkSparseImageFormatProperties2(stream, (VkSparseImageFormatProperties2*)(pProperties + i));
+            reservedmarshal_VkSparseImageFormatProperties2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSparseImageFormatProperties2*)(pProperties + i), streamPtrPtr);
         }
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceSparseImageFormatProperties2 readParams");
     // WARNING PTR CHECK
     uint32_t* check_pPropertyCount;
     check_pPropertyCount = (uint32_t*)(uintptr_t)stream->getBe64();
@@ -10983,81 +12075,98 @@
         {
             fprintf(stderr, "fatal: pProperties inconsistent between guest and host\n");
         }
-        for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+        if (pPropertyCount)
         {
-            unmarshal_VkSparseImageFormatProperties2(stream, (VkSparseImageFormatProperties2*)(pProperties + i));
+            for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+            {
+                unmarshal_VkSparseImageFormatProperties2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSparseImageFormatProperties2*)(pProperties + i));
+            }
         }
     }
-    if (pProperties)
+    if (pPropertyCount)
     {
-        for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+        if (pProperties)
         {
-            transform_fromhost_VkSparseImageFormatProperties2(mImpl->resources(), (VkSparseImageFormatProperties2*)(pProperties + i));
+            for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+            {
+                transform_fromhost_VkSparseImageFormatProperties2(sResourceTracker, (VkSparseImageFormatProperties2*)(pProperties + i));
+            }
         }
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceSparseImageFormatProperties2 returnUnmarshal");
-    mImpl->log("finish vkGetPhysicalDeviceSparseImageFormatProperties2");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkTrimCommandPool(
     VkDevice device,
     VkCommandPool commandPool,
-    VkCommandPoolTrimFlags flags)
+    VkCommandPoolTrimFlags flags,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkTrimCommandPool encode");
-    mImpl->log("start vkTrimCommandPool");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkCommandPool local_commandPool;
     VkCommandPoolTrimFlags local_flags;
     local_device = device;
     local_commandPool = commandPool;
     local_flags = flags;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_719;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_719, 1);
-        countingStream->write((uint64_t*)&cgen_var_719, 1 * 8);
-        uint64_t cgen_var_720;
-        countingStream->handleMapping()->mapHandles_VkCommandPool_u64(&local_commandPool, &cgen_var_720, 1);
-        countingStream->write((uint64_t*)&cgen_var_720, 1 * 8);
-        countingStream->write((VkCommandPoolTrimFlags*)&local_flags, sizeof(VkCommandPoolTrimFlags));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkCommandPoolTrimFlags);
     }
-    uint32_t packetSize_vkTrimCommandPool = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkTrimCommandPool = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkTrimCommandPool);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkTrimCommandPool = OP_vkTrimCommandPool;
-    stream->write(&opcode_vkTrimCommandPool, sizeof(uint32_t));
-    stream->write(&packetSize_vkTrimCommandPool, sizeof(uint32_t));
-    uint64_t cgen_var_721;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_721, 1);
-    stream->write((uint64_t*)&cgen_var_721, 1 * 8);
-    uint64_t cgen_var_722;
-    stream->handleMapping()->mapHandles_VkCommandPool_u64(&local_commandPool, &cgen_var_722, 1);
-    stream->write((uint64_t*)&cgen_var_722, 1 * 8);
-    stream->write((VkCommandPoolTrimFlags*)&local_flags, sizeof(VkCommandPoolTrimFlags));
-    AEMU_SCOPED_TRACE("vkTrimCommandPool readParams");
-    AEMU_SCOPED_TRACE("vkTrimCommandPool returnUnmarshal");
-    mImpl->log("finish vkTrimCommandPool");;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkTrimCommandPool, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkTrimCommandPool, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkCommandPool((*&local_commandPool));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkCommandPoolTrimFlags*)&local_flags, sizeof(VkCommandPoolTrimFlags));
+    *streamPtrPtr += sizeof(VkCommandPoolTrimFlags);
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkGetDeviceQueue2(
     VkDevice device,
     const VkDeviceQueueInfo2* pQueueInfo,
-    VkQueue* pQueue)
+    VkQueue* pQueue,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetDeviceQueue2 encode");
-    mImpl->log("start vkGetDeviceQueue2");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkDeviceQueueInfo2* local_pQueueInfo;
     local_device = device;
@@ -11065,58 +12174,65 @@
     if (pQueueInfo)
     {
         local_pQueueInfo = (VkDeviceQueueInfo2*)pool->alloc(sizeof(const VkDeviceQueueInfo2));
-        deepcopy_VkDeviceQueueInfo2(pool, pQueueInfo, (VkDeviceQueueInfo2*)(local_pQueueInfo));
+        deepcopy_VkDeviceQueueInfo2(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pQueueInfo, (VkDeviceQueueInfo2*)(local_pQueueInfo));
     }
     if (local_pQueueInfo)
     {
-        transform_tohost_VkDeviceQueueInfo2(mImpl->resources(), (VkDeviceQueueInfo2*)(local_pQueueInfo));
+        transform_tohost_VkDeviceQueueInfo2(sResourceTracker, (VkDeviceQueueInfo2*)(local_pQueueInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_723;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_723, 1);
-        countingStream->write((uint64_t*)&cgen_var_723, 1 * 8);
-        marshal_VkDeviceQueueInfo2(countingStream, (VkDeviceQueueInfo2*)(local_pQueueInfo));
-        uint64_t cgen_var_724;
-        countingStream->handleMapping()->mapHandles_VkQueue_u64(pQueue, &cgen_var_724, 1);
-        countingStream->write((uint64_t*)&cgen_var_724, 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkDeviceQueueInfo2(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDeviceQueueInfo2*)(local_pQueueInfo), countPtr);
+        uint64_t cgen_var_1;
+        *countPtr += 8;
     }
-    uint32_t packetSize_vkGetDeviceQueue2 = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetDeviceQueue2 = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetDeviceQueue2);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetDeviceQueue2 = OP_vkGetDeviceQueue2;
-    stream->write(&opcode_vkGetDeviceQueue2, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetDeviceQueue2, sizeof(uint32_t));
-    uint64_t cgen_var_725;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_725, 1);
-    stream->write((uint64_t*)&cgen_var_725, 1 * 8);
-    marshal_VkDeviceQueueInfo2(stream, (VkDeviceQueueInfo2*)(local_pQueueInfo));
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
-    uint64_t cgen_var_726;
-    stream->handleMapping()->mapHandles_VkQueue_u64(pQueue, &cgen_var_726, 1);
-    stream->write((uint64_t*)&cgen_var_726, 8);
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkGetDeviceQueue2 readParams");
-    uint64_t cgen_var_727;
-    stream->read((uint64_t*)&cgen_var_727, 8);
-    stream->handleMapping()->mapHandles_u64_VkQueue(&cgen_var_727, (VkQueue*)pQueue, 1);
-    AEMU_SCOPED_TRACE("vkGetDeviceQueue2 returnUnmarshal");
-    mImpl->log("finish vkGetDeviceQueue2");;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetDeviceQueue2, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetDeviceQueue2, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkDeviceQueueInfo2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDeviceQueueInfo2*)(local_pQueueInfo), streamPtrPtr);
+    /* is handle, possibly out */;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = (uint64_t)((*pQueue));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    uint64_t cgen_var_2;
+    stream->read((uint64_t*)&cgen_var_2, 8);
+    stream->handleMapping()->mapHandles_u64_VkQueue(&cgen_var_2, (VkQueue*)pQueue, 1);
+    sResourceTracker->on_vkGetDeviceQueue2(this, device, pQueueInfo, pQueue);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 VkResult VkEncoder::vkCreateSamplerYcbcrConversion(
     VkDevice device,
     const VkSamplerYcbcrConversionCreateInfo* pCreateInfo,
     const VkAllocationCallbacks* pAllocator,
-    VkSamplerYcbcrConversion* pYcbcrConversion)
+    VkSamplerYcbcrConversion* pYcbcrConversion,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCreateSamplerYcbcrConversion encode");
-    mImpl->log("start vkCreateSamplerYcbcrConversion");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkSamplerYcbcrConversionCreateInfo* local_pCreateInfo;
     VkAllocationCallbacks* local_pAllocator;
@@ -11125,90 +12241,94 @@
     if (pCreateInfo)
     {
         local_pCreateInfo = (VkSamplerYcbcrConversionCreateInfo*)pool->alloc(sizeof(const VkSamplerYcbcrConversionCreateInfo));
-        deepcopy_VkSamplerYcbcrConversionCreateInfo(pool, pCreateInfo, (VkSamplerYcbcrConversionCreateInfo*)(local_pCreateInfo));
+        deepcopy_VkSamplerYcbcrConversionCreateInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, (VkSamplerYcbcrConversionCreateInfo*)(local_pCreateInfo));
     }
     local_pAllocator = nullptr;
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pCreateInfo)
     {
-        transform_tohost_VkSamplerYcbcrConversionCreateInfo(mImpl->resources(), (VkSamplerYcbcrConversionCreateInfo*)(local_pCreateInfo));
+        transform_tohost_VkSamplerYcbcrConversionCreateInfo(sResourceTracker, (VkSamplerYcbcrConversionCreateInfo*)(local_pCreateInfo));
     }
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_728;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_728, 1);
-        countingStream->write((uint64_t*)&cgen_var_728, 1 * 8);
-        marshal_VkSamplerYcbcrConversionCreateInfo(countingStream, (VkSamplerYcbcrConversionCreateInfo*)(local_pCreateInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkSamplerYcbcrConversionCreateInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSamplerYcbcrConversionCreateInfo*)(local_pCreateInfo), countPtr);
         // WARNING PTR CHECK
-        uint64_t cgen_var_729 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_729);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
-        uint64_t cgen_var_730;
-        countingStream->handleMapping()->mapHandles_VkSamplerYcbcrConversion_u64(pYcbcrConversion, &cgen_var_730, 1);
-        countingStream->write((uint64_t*)&cgen_var_730, 8);
+        uint64_t cgen_var_1;
+        *countPtr += 8;
     }
-    uint32_t packetSize_vkCreateSamplerYcbcrConversion = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCreateSamplerYcbcrConversion = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreateSamplerYcbcrConversion);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCreateSamplerYcbcrConversion = OP_vkCreateSamplerYcbcrConversion;
-    stream->write(&opcode_vkCreateSamplerYcbcrConversion, sizeof(uint32_t));
-    stream->write(&packetSize_vkCreateSamplerYcbcrConversion, sizeof(uint32_t));
-    uint64_t cgen_var_731;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_731, 1);
-    stream->write((uint64_t*)&cgen_var_731, 1 * 8);
-    marshal_VkSamplerYcbcrConversionCreateInfo(stream, (VkSamplerYcbcrConversionCreateInfo*)(local_pCreateInfo));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreateSamplerYcbcrConversion, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreateSamplerYcbcrConversion, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkSamplerYcbcrConversionCreateInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSamplerYcbcrConversionCreateInfo*)(local_pCreateInfo), streamPtrPtr);
     // WARNING PTR CHECK
-    uint64_t cgen_var_732 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_732);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
-    uint64_t cgen_var_733;
-    stream->handleMapping()->mapHandles_VkSamplerYcbcrConversion_u64(pYcbcrConversion, &cgen_var_733, 1);
-    stream->write((uint64_t*)&cgen_var_733, 8);
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkCreateSamplerYcbcrConversion readParams");
-    stream->setHandleMapping(resources->createMapping());
-    uint64_t cgen_var_734;
-    stream->read((uint64_t*)&cgen_var_734, 8);
-    stream->handleMapping()->mapHandles_u64_VkSamplerYcbcrConversion(&cgen_var_734, (VkSamplerYcbcrConversion*)pYcbcrConversion, 1);
+    /* is handle, possibly out */;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = (uint64_t)((*pYcbcrConversion));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    stream->setHandleMapping(sResourceTracker->createMapping());
+    uint64_t cgen_var_3;
+    stream->read((uint64_t*)&cgen_var_3, 8);
+    stream->handleMapping()->mapHandles_u64_VkSamplerYcbcrConversion(&cgen_var_3, (VkSamplerYcbcrConversion*)pYcbcrConversion, 1);
     stream->unsetHandleMapping();
-    AEMU_SCOPED_TRACE("vkCreateSamplerYcbcrConversion returnUnmarshal");
     VkResult vkCreateSamplerYcbcrConversion_VkResult_return = (VkResult)0;
     stream->read(&vkCreateSamplerYcbcrConversion_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkCreateSamplerYcbcrConversion");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkCreateSamplerYcbcrConversion_VkResult_return;
 }
 
 void VkEncoder::vkDestroySamplerYcbcrConversion(
     VkDevice device,
     VkSamplerYcbcrConversion ycbcrConversion,
-    const VkAllocationCallbacks* pAllocator)
+    const VkAllocationCallbacks* pAllocator,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkDestroySamplerYcbcrConversion encode");
-    mImpl->log("start vkDestroySamplerYcbcrConversion");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkSamplerYcbcrConversion local_ycbcrConversion;
     VkAllocationCallbacks* local_pAllocator;
@@ -11218,67 +12338,75 @@
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_735;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_735, 1);
-        countingStream->write((uint64_t*)&cgen_var_735, 1 * 8);
-        uint64_t cgen_var_736;
-        countingStream->handleMapping()->mapHandles_VkSamplerYcbcrConversion_u64(&local_ycbcrConversion, &cgen_var_736, 1);
-        countingStream->write((uint64_t*)&cgen_var_736, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_737 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_737);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
     }
-    uint32_t packetSize_vkDestroySamplerYcbcrConversion = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkDestroySamplerYcbcrConversion = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkDestroySamplerYcbcrConversion);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkDestroySamplerYcbcrConversion = OP_vkDestroySamplerYcbcrConversion;
-    stream->write(&opcode_vkDestroySamplerYcbcrConversion, sizeof(uint32_t));
-    stream->write(&packetSize_vkDestroySamplerYcbcrConversion, sizeof(uint32_t));
-    uint64_t cgen_var_738;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_738, 1);
-    stream->write((uint64_t*)&cgen_var_738, 1 * 8);
-    uint64_t cgen_var_739;
-    stream->handleMapping()->mapHandles_VkSamplerYcbcrConversion_u64(&local_ycbcrConversion, &cgen_var_739, 1);
-    stream->write((uint64_t*)&cgen_var_739, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkDestroySamplerYcbcrConversion, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkDestroySamplerYcbcrConversion, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkSamplerYcbcrConversion((*&local_ycbcrConversion));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_740 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_740);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    AEMU_SCOPED_TRACE("vkDestroySamplerYcbcrConversion readParams");
-    AEMU_SCOPED_TRACE("vkDestroySamplerYcbcrConversion returnUnmarshal");
-    resources->destroyMapping()->mapHandles_VkSamplerYcbcrConversion((VkSamplerYcbcrConversion*)&ycbcrConversion);
-    mImpl->log("finish vkDestroySamplerYcbcrConversion");;
+    sResourceTracker->destroyMapping()->mapHandles_VkSamplerYcbcrConversion((VkSamplerYcbcrConversion*)&ycbcrConversion);
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 VkResult VkEncoder::vkCreateDescriptorUpdateTemplate(
     VkDevice device,
     const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo,
     const VkAllocationCallbacks* pAllocator,
-    VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate)
+    VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCreateDescriptorUpdateTemplate encode");
-    mImpl->log("start vkCreateDescriptorUpdateTemplate");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkDescriptorUpdateTemplateCreateInfo* local_pCreateInfo;
     VkAllocationCallbacks* local_pAllocator;
@@ -11287,93 +12415,95 @@
     if (pCreateInfo)
     {
         local_pCreateInfo = (VkDescriptorUpdateTemplateCreateInfo*)pool->alloc(sizeof(const VkDescriptorUpdateTemplateCreateInfo));
-        deepcopy_VkDescriptorUpdateTemplateCreateInfo(pool, pCreateInfo, (VkDescriptorUpdateTemplateCreateInfo*)(local_pCreateInfo));
+        deepcopy_VkDescriptorUpdateTemplateCreateInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, (VkDescriptorUpdateTemplateCreateInfo*)(local_pCreateInfo));
     }
     local_pAllocator = nullptr;
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pCreateInfo)
     {
-        transform_tohost_VkDescriptorUpdateTemplateCreateInfo(mImpl->resources(), (VkDescriptorUpdateTemplateCreateInfo*)(local_pCreateInfo));
+        transform_tohost_VkDescriptorUpdateTemplateCreateInfo(sResourceTracker, (VkDescriptorUpdateTemplateCreateInfo*)(local_pCreateInfo));
     }
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_741;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_741, 1);
-        countingStream->write((uint64_t*)&cgen_var_741, 1 * 8);
-        marshal_VkDescriptorUpdateTemplateCreateInfo(countingStream, (VkDescriptorUpdateTemplateCreateInfo*)(local_pCreateInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkDescriptorUpdateTemplateCreateInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDescriptorUpdateTemplateCreateInfo*)(local_pCreateInfo), countPtr);
         // WARNING PTR CHECK
-        uint64_t cgen_var_742 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_742);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
-        uint64_t cgen_var_743;
-        countingStream->handleMapping()->mapHandles_VkDescriptorUpdateTemplate_u64(pDescriptorUpdateTemplate, &cgen_var_743, 1);
-        countingStream->write((uint64_t*)&cgen_var_743, 8);
+        uint64_t cgen_var_1;
+        *countPtr += 8;
     }
-    uint32_t packetSize_vkCreateDescriptorUpdateTemplate = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCreateDescriptorUpdateTemplate = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreateDescriptorUpdateTemplate);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCreateDescriptorUpdateTemplate = OP_vkCreateDescriptorUpdateTemplate;
-    stream->write(&opcode_vkCreateDescriptorUpdateTemplate, sizeof(uint32_t));
-    stream->write(&packetSize_vkCreateDescriptorUpdateTemplate, sizeof(uint32_t));
-    uint64_t cgen_var_744;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_744, 1);
-    stream->write((uint64_t*)&cgen_var_744, 1 * 8);
-    marshal_VkDescriptorUpdateTemplateCreateInfo(stream, (VkDescriptorUpdateTemplateCreateInfo*)(local_pCreateInfo));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreateDescriptorUpdateTemplate, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreateDescriptorUpdateTemplate, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkDescriptorUpdateTemplateCreateInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDescriptorUpdateTemplateCreateInfo*)(local_pCreateInfo), streamPtrPtr);
     // WARNING PTR CHECK
-    uint64_t cgen_var_745 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_745);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
-    uint64_t cgen_var_746;
-    stream->handleMapping()->mapHandles_VkDescriptorUpdateTemplate_u64(pDescriptorUpdateTemplate, &cgen_var_746, 1);
-    stream->write((uint64_t*)&cgen_var_746, 8);
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkCreateDescriptorUpdateTemplate readParams");
-    stream->setHandleMapping(resources->createMapping());
-    uint64_t cgen_var_747;
-    stream->read((uint64_t*)&cgen_var_747, 8);
-    stream->handleMapping()->mapHandles_u64_VkDescriptorUpdateTemplate(&cgen_var_747, (VkDescriptorUpdateTemplate*)pDescriptorUpdateTemplate, 1);
+    /* is handle, possibly out */;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = (uint64_t)((*pDescriptorUpdateTemplate));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    stream->setHandleMapping(sResourceTracker->createMapping());
+    uint64_t cgen_var_3;
+    stream->read((uint64_t*)&cgen_var_3, 8);
+    stream->handleMapping()->mapHandles_u64_VkDescriptorUpdateTemplate(&cgen_var_3, (VkDescriptorUpdateTemplate*)pDescriptorUpdateTemplate, 1);
     stream->unsetHandleMapping();
-    AEMU_SCOPED_TRACE("vkCreateDescriptorUpdateTemplate returnUnmarshal");
     VkResult vkCreateDescriptorUpdateTemplate_VkResult_return = (VkResult)0;
     stream->read(&vkCreateDescriptorUpdateTemplate_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    encoderLock.unlock();
-    mImpl->resources()->on_vkCreateDescriptorUpdateTemplate(this, vkCreateDescriptorUpdateTemplate_VkResult_return, device, pCreateInfo, pAllocator, pDescriptorUpdateTemplate);
-    encoderLock.lock();
-    mImpl->log("finish vkCreateDescriptorUpdateTemplate");;
+    sResourceTracker->on_vkCreateDescriptorUpdateTemplate(this, vkCreateDescriptorUpdateTemplate_VkResult_return, device, pCreateInfo, pAllocator, pDescriptorUpdateTemplate);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkCreateDescriptorUpdateTemplate_VkResult_return;
 }
 
 void VkEncoder::vkDestroyDescriptorUpdateTemplate(
     VkDevice device,
     VkDescriptorUpdateTemplate descriptorUpdateTemplate,
-    const VkAllocationCallbacks* pAllocator)
+    const VkAllocationCallbacks* pAllocator,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkDestroyDescriptorUpdateTemplate encode");
-    mImpl->log("start vkDestroyDescriptorUpdateTemplate");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkDescriptorUpdateTemplate local_descriptorUpdateTemplate;
     VkAllocationCallbacks* local_pAllocator;
@@ -11383,67 +12513,75 @@
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_748;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_748, 1);
-        countingStream->write((uint64_t*)&cgen_var_748, 1 * 8);
-        uint64_t cgen_var_749;
-        countingStream->handleMapping()->mapHandles_VkDescriptorUpdateTemplate_u64(&local_descriptorUpdateTemplate, &cgen_var_749, 1);
-        countingStream->write((uint64_t*)&cgen_var_749, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_750 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_750);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
     }
-    uint32_t packetSize_vkDestroyDescriptorUpdateTemplate = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkDestroyDescriptorUpdateTemplate = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkDestroyDescriptorUpdateTemplate);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkDestroyDescriptorUpdateTemplate = OP_vkDestroyDescriptorUpdateTemplate;
-    stream->write(&opcode_vkDestroyDescriptorUpdateTemplate, sizeof(uint32_t));
-    stream->write(&packetSize_vkDestroyDescriptorUpdateTemplate, sizeof(uint32_t));
-    uint64_t cgen_var_751;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_751, 1);
-    stream->write((uint64_t*)&cgen_var_751, 1 * 8);
-    uint64_t cgen_var_752;
-    stream->handleMapping()->mapHandles_VkDescriptorUpdateTemplate_u64(&local_descriptorUpdateTemplate, &cgen_var_752, 1);
-    stream->write((uint64_t*)&cgen_var_752, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkDestroyDescriptorUpdateTemplate, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkDestroyDescriptorUpdateTemplate, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkDescriptorUpdateTemplate((*&local_descriptorUpdateTemplate));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_753 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_753);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    AEMU_SCOPED_TRACE("vkDestroyDescriptorUpdateTemplate readParams");
-    AEMU_SCOPED_TRACE("vkDestroyDescriptorUpdateTemplate returnUnmarshal");
-    resources->destroyMapping()->mapHandles_VkDescriptorUpdateTemplate((VkDescriptorUpdateTemplate*)&descriptorUpdateTemplate);
-    mImpl->log("finish vkDestroyDescriptorUpdateTemplate");;
+    sResourceTracker->destroyMapping()->mapHandles_VkDescriptorUpdateTemplate((VkDescriptorUpdateTemplate*)&descriptorUpdateTemplate);
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkUpdateDescriptorSetWithTemplate(
     VkDevice device,
     VkDescriptorSet descriptorSet,
     VkDescriptorUpdateTemplate descriptorUpdateTemplate,
-    const void* pData)
+    const void* pData,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkUpdateDescriptorSetWithTemplate encode");
-    mImpl->log("start vkUpdateDescriptorSetWithTemplate");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkDescriptorSet local_descriptorSet;
     VkDescriptorUpdateTemplate local_descriptorUpdateTemplate;
@@ -11451,69 +12589,75 @@
     local_device = device;
     local_descriptorSet = descriptorSet;
     local_descriptorUpdateTemplate = descriptorUpdateTemplate;
-    local_pData = nullptr;
-    if (pData)
+    // Avoiding deepcopy for pData
+    local_pData = (void*)pData;
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        local_pData = (void*)pool->dupArray(pData, sizeof(const uint8_t));
-    }
-    countingStream->rewind();
-    {
-        uint64_t cgen_var_754;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_754, 1);
-        countingStream->write((uint64_t*)&cgen_var_754, 1 * 8);
-        uint64_t cgen_var_755;
-        countingStream->handleMapping()->mapHandles_VkDescriptorSet_u64(&local_descriptorSet, &cgen_var_755, 1);
-        countingStream->write((uint64_t*)&cgen_var_755, 1 * 8);
-        uint64_t cgen_var_756;
-        countingStream->handleMapping()->mapHandles_VkDescriptorUpdateTemplate_u64(&local_descriptorUpdateTemplate, &cgen_var_756, 1);
-        countingStream->write((uint64_t*)&cgen_var_756, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_2;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_757 = (uint64_t)(uintptr_t)local_pData;
-        countingStream->putBe64(cgen_var_757);
+        *countPtr += 8;
         if (local_pData)
         {
-            countingStream->write((void*)local_pData, sizeof(uint8_t));
+            *countPtr += sizeof(uint8_t);
         }
     }
-    uint32_t packetSize_vkUpdateDescriptorSetWithTemplate = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkUpdateDescriptorSetWithTemplate = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkUpdateDescriptorSetWithTemplate);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkUpdateDescriptorSetWithTemplate = OP_vkUpdateDescriptorSetWithTemplate;
-    stream->write(&opcode_vkUpdateDescriptorSetWithTemplate, sizeof(uint32_t));
-    stream->write(&packetSize_vkUpdateDescriptorSetWithTemplate, sizeof(uint32_t));
-    uint64_t cgen_var_758;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_758, 1);
-    stream->write((uint64_t*)&cgen_var_758, 1 * 8);
-    uint64_t cgen_var_759;
-    stream->handleMapping()->mapHandles_VkDescriptorSet_u64(&local_descriptorSet, &cgen_var_759, 1);
-    stream->write((uint64_t*)&cgen_var_759, 1 * 8);
-    uint64_t cgen_var_760;
-    stream->handleMapping()->mapHandles_VkDescriptorUpdateTemplate_u64(&local_descriptorUpdateTemplate, &cgen_var_760, 1);
-    stream->write((uint64_t*)&cgen_var_760, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkUpdateDescriptorSetWithTemplate, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkUpdateDescriptorSetWithTemplate, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkDescriptorSet((*&local_descriptorSet));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = get_host_u64_VkDescriptorUpdateTemplate((*&local_descriptorUpdateTemplate));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_761 = (uint64_t)(uintptr_t)local_pData;
-    stream->putBe64(cgen_var_761);
+    uint64_t cgen_var_3 = (uint64_t)(uintptr_t)local_pData;
+    memcpy((*streamPtrPtr), &cgen_var_3, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pData)
     {
-        stream->write((void*)local_pData, sizeof(uint8_t));
+        memcpy(*streamPtrPtr, (void*)local_pData, sizeof(uint8_t));
+        *streamPtrPtr += sizeof(uint8_t);
     }
-    AEMU_SCOPED_TRACE("vkUpdateDescriptorSetWithTemplate readParams");
-    AEMU_SCOPED_TRACE("vkUpdateDescriptorSetWithTemplate returnUnmarshal");
-    mImpl->log("finish vkUpdateDescriptorSetWithTemplate");;
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkGetPhysicalDeviceExternalBufferProperties(
     VkPhysicalDevice physicalDevice,
     const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo,
-    VkExternalBufferProperties* pExternalBufferProperties)
+    VkExternalBufferProperties* pExternalBufferProperties,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceExternalBufferProperties encode");
-    mImpl->log("start vkGetPhysicalDeviceExternalBufferProperties");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     VkPhysicalDeviceExternalBufferInfo* local_pExternalBufferInfo;
     local_physicalDevice = physicalDevice;
@@ -11521,55 +12665,61 @@
     if (pExternalBufferInfo)
     {
         local_pExternalBufferInfo = (VkPhysicalDeviceExternalBufferInfo*)pool->alloc(sizeof(const VkPhysicalDeviceExternalBufferInfo));
-        deepcopy_VkPhysicalDeviceExternalBufferInfo(pool, pExternalBufferInfo, (VkPhysicalDeviceExternalBufferInfo*)(local_pExternalBufferInfo));
+        deepcopy_VkPhysicalDeviceExternalBufferInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pExternalBufferInfo, (VkPhysicalDeviceExternalBufferInfo*)(local_pExternalBufferInfo));
     }
     if (local_pExternalBufferInfo)
     {
-        mImpl->resources()->transformImpl_VkPhysicalDeviceExternalBufferInfo_tohost(local_pExternalBufferInfo, 1);
-        transform_tohost_VkPhysicalDeviceExternalBufferInfo(mImpl->resources(), (VkPhysicalDeviceExternalBufferInfo*)(local_pExternalBufferInfo));
+        sResourceTracker->transformImpl_VkPhysicalDeviceExternalBufferInfo_tohost(local_pExternalBufferInfo, 1);
+        transform_tohost_VkPhysicalDeviceExternalBufferInfo(sResourceTracker, (VkPhysicalDeviceExternalBufferInfo*)(local_pExternalBufferInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_762;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_762, 1);
-        countingStream->write((uint64_t*)&cgen_var_762, 1 * 8);
-        marshal_VkPhysicalDeviceExternalBufferInfo(countingStream, (VkPhysicalDeviceExternalBufferInfo*)(local_pExternalBufferInfo));
-        marshal_VkExternalBufferProperties(countingStream, (VkExternalBufferProperties*)(pExternalBufferProperties));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkPhysicalDeviceExternalBufferInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceExternalBufferInfo*)(local_pExternalBufferInfo), countPtr);
+        count_VkExternalBufferProperties(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkExternalBufferProperties*)(pExternalBufferProperties), countPtr);
     }
-    uint32_t packetSize_vkGetPhysicalDeviceExternalBufferProperties = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetPhysicalDeviceExternalBufferProperties = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPhysicalDeviceExternalBufferProperties);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetPhysicalDeviceExternalBufferProperties = OP_vkGetPhysicalDeviceExternalBufferProperties;
-    stream->write(&opcode_vkGetPhysicalDeviceExternalBufferProperties, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetPhysicalDeviceExternalBufferProperties, sizeof(uint32_t));
-    uint64_t cgen_var_763;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_763, 1);
-    stream->write((uint64_t*)&cgen_var_763, 1 * 8);
-    marshal_VkPhysicalDeviceExternalBufferInfo(stream, (VkPhysicalDeviceExternalBufferInfo*)(local_pExternalBufferInfo));
-    marshal_VkExternalBufferProperties(stream, (VkExternalBufferProperties*)(pExternalBufferProperties));
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceExternalBufferProperties readParams");
-    unmarshal_VkExternalBufferProperties(stream, (VkExternalBufferProperties*)(pExternalBufferProperties));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPhysicalDeviceExternalBufferProperties, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPhysicalDeviceExternalBufferProperties, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkPhysicalDeviceExternalBufferInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceExternalBufferInfo*)(local_pExternalBufferInfo), streamPtrPtr);
+    reservedmarshal_VkExternalBufferProperties(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkExternalBufferProperties*)(pExternalBufferProperties), streamPtrPtr);
+    unmarshal_VkExternalBufferProperties(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkExternalBufferProperties*)(pExternalBufferProperties));
     if (pExternalBufferProperties)
     {
-        mImpl->resources()->transformImpl_VkExternalBufferProperties_fromhost(pExternalBufferProperties, 1);
-        transform_fromhost_VkExternalBufferProperties(mImpl->resources(), (VkExternalBufferProperties*)(pExternalBufferProperties));
+        sResourceTracker->transformImpl_VkExternalBufferProperties_fromhost(pExternalBufferProperties, 1);
+        transform_fromhost_VkExternalBufferProperties(sResourceTracker, (VkExternalBufferProperties*)(pExternalBufferProperties));
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceExternalBufferProperties returnUnmarshal");
-    mImpl->log("finish vkGetPhysicalDeviceExternalBufferProperties");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkGetPhysicalDeviceExternalFenceProperties(
     VkPhysicalDevice physicalDevice,
     const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo,
-    VkExternalFenceProperties* pExternalFenceProperties)
+    VkExternalFenceProperties* pExternalFenceProperties,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceExternalFenceProperties encode");
-    mImpl->log("start vkGetPhysicalDeviceExternalFenceProperties");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     VkPhysicalDeviceExternalFenceInfo* local_pExternalFenceInfo;
     local_physicalDevice = physicalDevice;
@@ -11577,53 +12727,59 @@
     if (pExternalFenceInfo)
     {
         local_pExternalFenceInfo = (VkPhysicalDeviceExternalFenceInfo*)pool->alloc(sizeof(const VkPhysicalDeviceExternalFenceInfo));
-        deepcopy_VkPhysicalDeviceExternalFenceInfo(pool, pExternalFenceInfo, (VkPhysicalDeviceExternalFenceInfo*)(local_pExternalFenceInfo));
+        deepcopy_VkPhysicalDeviceExternalFenceInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pExternalFenceInfo, (VkPhysicalDeviceExternalFenceInfo*)(local_pExternalFenceInfo));
     }
     if (local_pExternalFenceInfo)
     {
-        transform_tohost_VkPhysicalDeviceExternalFenceInfo(mImpl->resources(), (VkPhysicalDeviceExternalFenceInfo*)(local_pExternalFenceInfo));
+        transform_tohost_VkPhysicalDeviceExternalFenceInfo(sResourceTracker, (VkPhysicalDeviceExternalFenceInfo*)(local_pExternalFenceInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_764;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_764, 1);
-        countingStream->write((uint64_t*)&cgen_var_764, 1 * 8);
-        marshal_VkPhysicalDeviceExternalFenceInfo(countingStream, (VkPhysicalDeviceExternalFenceInfo*)(local_pExternalFenceInfo));
-        marshal_VkExternalFenceProperties(countingStream, (VkExternalFenceProperties*)(pExternalFenceProperties));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkPhysicalDeviceExternalFenceInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceExternalFenceInfo*)(local_pExternalFenceInfo), countPtr);
+        count_VkExternalFenceProperties(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkExternalFenceProperties*)(pExternalFenceProperties), countPtr);
     }
-    uint32_t packetSize_vkGetPhysicalDeviceExternalFenceProperties = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetPhysicalDeviceExternalFenceProperties = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPhysicalDeviceExternalFenceProperties);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetPhysicalDeviceExternalFenceProperties = OP_vkGetPhysicalDeviceExternalFenceProperties;
-    stream->write(&opcode_vkGetPhysicalDeviceExternalFenceProperties, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetPhysicalDeviceExternalFenceProperties, sizeof(uint32_t));
-    uint64_t cgen_var_765;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_765, 1);
-    stream->write((uint64_t*)&cgen_var_765, 1 * 8);
-    marshal_VkPhysicalDeviceExternalFenceInfo(stream, (VkPhysicalDeviceExternalFenceInfo*)(local_pExternalFenceInfo));
-    marshal_VkExternalFenceProperties(stream, (VkExternalFenceProperties*)(pExternalFenceProperties));
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceExternalFenceProperties readParams");
-    unmarshal_VkExternalFenceProperties(stream, (VkExternalFenceProperties*)(pExternalFenceProperties));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPhysicalDeviceExternalFenceProperties, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPhysicalDeviceExternalFenceProperties, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkPhysicalDeviceExternalFenceInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceExternalFenceInfo*)(local_pExternalFenceInfo), streamPtrPtr);
+    reservedmarshal_VkExternalFenceProperties(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkExternalFenceProperties*)(pExternalFenceProperties), streamPtrPtr);
+    unmarshal_VkExternalFenceProperties(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkExternalFenceProperties*)(pExternalFenceProperties));
     if (pExternalFenceProperties)
     {
-        transform_fromhost_VkExternalFenceProperties(mImpl->resources(), (VkExternalFenceProperties*)(pExternalFenceProperties));
+        transform_fromhost_VkExternalFenceProperties(sResourceTracker, (VkExternalFenceProperties*)(pExternalFenceProperties));
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceExternalFenceProperties returnUnmarshal");
-    mImpl->log("finish vkGetPhysicalDeviceExternalFenceProperties");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkGetPhysicalDeviceExternalSemaphoreProperties(
     VkPhysicalDevice physicalDevice,
     const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo,
-    VkExternalSemaphoreProperties* pExternalSemaphoreProperties)
+    VkExternalSemaphoreProperties* pExternalSemaphoreProperties,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceExternalSemaphoreProperties encode");
-    mImpl->log("start vkGetPhysicalDeviceExternalSemaphoreProperties");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     VkPhysicalDeviceExternalSemaphoreInfo* local_pExternalSemaphoreInfo;
     local_physicalDevice = physicalDevice;
@@ -11631,53 +12787,60 @@
     if (pExternalSemaphoreInfo)
     {
         local_pExternalSemaphoreInfo = (VkPhysicalDeviceExternalSemaphoreInfo*)pool->alloc(sizeof(const VkPhysicalDeviceExternalSemaphoreInfo));
-        deepcopy_VkPhysicalDeviceExternalSemaphoreInfo(pool, pExternalSemaphoreInfo, (VkPhysicalDeviceExternalSemaphoreInfo*)(local_pExternalSemaphoreInfo));
+        deepcopy_VkPhysicalDeviceExternalSemaphoreInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pExternalSemaphoreInfo, (VkPhysicalDeviceExternalSemaphoreInfo*)(local_pExternalSemaphoreInfo));
     }
     if (local_pExternalSemaphoreInfo)
     {
-        transform_tohost_VkPhysicalDeviceExternalSemaphoreInfo(mImpl->resources(), (VkPhysicalDeviceExternalSemaphoreInfo*)(local_pExternalSemaphoreInfo));
+        transform_tohost_VkPhysicalDeviceExternalSemaphoreInfo(sResourceTracker, (VkPhysicalDeviceExternalSemaphoreInfo*)(local_pExternalSemaphoreInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_766;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_766, 1);
-        countingStream->write((uint64_t*)&cgen_var_766, 1 * 8);
-        marshal_VkPhysicalDeviceExternalSemaphoreInfo(countingStream, (VkPhysicalDeviceExternalSemaphoreInfo*)(local_pExternalSemaphoreInfo));
-        marshal_VkExternalSemaphoreProperties(countingStream, (VkExternalSemaphoreProperties*)(pExternalSemaphoreProperties));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkPhysicalDeviceExternalSemaphoreInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceExternalSemaphoreInfo*)(local_pExternalSemaphoreInfo), countPtr);
+        count_VkExternalSemaphoreProperties(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkExternalSemaphoreProperties*)(pExternalSemaphoreProperties), countPtr);
     }
-    uint32_t packetSize_vkGetPhysicalDeviceExternalSemaphoreProperties = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetPhysicalDeviceExternalSemaphoreProperties = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPhysicalDeviceExternalSemaphoreProperties);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetPhysicalDeviceExternalSemaphoreProperties = OP_vkGetPhysicalDeviceExternalSemaphoreProperties;
-    stream->write(&opcode_vkGetPhysicalDeviceExternalSemaphoreProperties, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetPhysicalDeviceExternalSemaphoreProperties, sizeof(uint32_t));
-    uint64_t cgen_var_767;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_767, 1);
-    stream->write((uint64_t*)&cgen_var_767, 1 * 8);
-    marshal_VkPhysicalDeviceExternalSemaphoreInfo(stream, (VkPhysicalDeviceExternalSemaphoreInfo*)(local_pExternalSemaphoreInfo));
-    marshal_VkExternalSemaphoreProperties(stream, (VkExternalSemaphoreProperties*)(pExternalSemaphoreProperties));
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceExternalSemaphoreProperties readParams");
-    unmarshal_VkExternalSemaphoreProperties(stream, (VkExternalSemaphoreProperties*)(pExternalSemaphoreProperties));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPhysicalDeviceExternalSemaphoreProperties, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPhysicalDeviceExternalSemaphoreProperties, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkPhysicalDeviceExternalSemaphoreInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceExternalSemaphoreInfo*)(local_pExternalSemaphoreInfo), streamPtrPtr);
+    reservedmarshal_VkExternalSemaphoreProperties(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkExternalSemaphoreProperties*)(pExternalSemaphoreProperties), streamPtrPtr);
+    unmarshal_VkExternalSemaphoreProperties(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkExternalSemaphoreProperties*)(pExternalSemaphoreProperties));
     if (pExternalSemaphoreProperties)
     {
-        transform_fromhost_VkExternalSemaphoreProperties(mImpl->resources(), (VkExternalSemaphoreProperties*)(pExternalSemaphoreProperties));
+        transform_fromhost_VkExternalSemaphoreProperties(sResourceTracker, (VkExternalSemaphoreProperties*)(pExternalSemaphoreProperties));
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceExternalSemaphoreProperties returnUnmarshal");
-    mImpl->log("finish vkGetPhysicalDeviceExternalSemaphoreProperties");;
+    sResourceTracker->on_vkGetPhysicalDeviceExternalSemaphoreProperties(this, physicalDevice, pExternalSemaphoreInfo, pExternalSemaphoreProperties);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkGetDescriptorSetLayoutSupport(
     VkDevice device,
     const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
-    VkDescriptorSetLayoutSupport* pSupport)
+    VkDescriptorSetLayoutSupport* pSupport,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetDescriptorSetLayoutSupport encode");
-    mImpl->log("start vkGetDescriptorSetLayoutSupport");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkDescriptorSetLayoutCreateInfo* local_pCreateInfo;
     local_device = device;
@@ -11685,38 +12848,893 @@
     if (pCreateInfo)
     {
         local_pCreateInfo = (VkDescriptorSetLayoutCreateInfo*)pool->alloc(sizeof(const VkDescriptorSetLayoutCreateInfo));
-        deepcopy_VkDescriptorSetLayoutCreateInfo(pool, pCreateInfo, (VkDescriptorSetLayoutCreateInfo*)(local_pCreateInfo));
+        deepcopy_VkDescriptorSetLayoutCreateInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, (VkDescriptorSetLayoutCreateInfo*)(local_pCreateInfo));
     }
     if (local_pCreateInfo)
     {
-        transform_tohost_VkDescriptorSetLayoutCreateInfo(mImpl->resources(), (VkDescriptorSetLayoutCreateInfo*)(local_pCreateInfo));
+        transform_tohost_VkDescriptorSetLayoutCreateInfo(sResourceTracker, (VkDescriptorSetLayoutCreateInfo*)(local_pCreateInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_768;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_768, 1);
-        countingStream->write((uint64_t*)&cgen_var_768, 1 * 8);
-        marshal_VkDescriptorSetLayoutCreateInfo(countingStream, (VkDescriptorSetLayoutCreateInfo*)(local_pCreateInfo));
-        marshal_VkDescriptorSetLayoutSupport(countingStream, (VkDescriptorSetLayoutSupport*)(pSupport));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkDescriptorSetLayoutCreateInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDescriptorSetLayoutCreateInfo*)(local_pCreateInfo), countPtr);
+        count_VkDescriptorSetLayoutSupport(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDescriptorSetLayoutSupport*)(pSupport), countPtr);
     }
-    uint32_t packetSize_vkGetDescriptorSetLayoutSupport = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetDescriptorSetLayoutSupport = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetDescriptorSetLayoutSupport);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetDescriptorSetLayoutSupport = OP_vkGetDescriptorSetLayoutSupport;
-    stream->write(&opcode_vkGetDescriptorSetLayoutSupport, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetDescriptorSetLayoutSupport, sizeof(uint32_t));
-    uint64_t cgen_var_769;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_769, 1);
-    stream->write((uint64_t*)&cgen_var_769, 1 * 8);
-    marshal_VkDescriptorSetLayoutCreateInfo(stream, (VkDescriptorSetLayoutCreateInfo*)(local_pCreateInfo));
-    marshal_VkDescriptorSetLayoutSupport(stream, (VkDescriptorSetLayoutSupport*)(pSupport));
-    AEMU_SCOPED_TRACE("vkGetDescriptorSetLayoutSupport readParams");
-    unmarshal_VkDescriptorSetLayoutSupport(stream, (VkDescriptorSetLayoutSupport*)(pSupport));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetDescriptorSetLayoutSupport, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetDescriptorSetLayoutSupport, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkDescriptorSetLayoutCreateInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDescriptorSetLayoutCreateInfo*)(local_pCreateInfo), streamPtrPtr);
+    reservedmarshal_VkDescriptorSetLayoutSupport(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDescriptorSetLayoutSupport*)(pSupport), streamPtrPtr);
+    unmarshal_VkDescriptorSetLayoutSupport(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDescriptorSetLayoutSupport*)(pSupport));
     if (pSupport)
     {
-        transform_fromhost_VkDescriptorSetLayoutSupport(mImpl->resources(), (VkDescriptorSetLayoutSupport*)(pSupport));
+        transform_fromhost_VkDescriptorSetLayoutSupport(sResourceTracker, (VkDescriptorSetLayoutSupport*)(pSupport));
     }
-    AEMU_SCOPED_TRACE("vkGetDescriptorSetLayoutSupport returnUnmarshal");
-    mImpl->log("finish vkGetDescriptorSetLayoutSupport");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+#endif
+#ifdef VK_VERSION_1_2
+void VkEncoder::vkCmdDrawIndirectCount(
+    VkCommandBuffer commandBuffer,
+    VkBuffer buffer,
+    VkDeviceSize offset,
+    VkBuffer countBuffer,
+    VkDeviceSize countBufferOffset,
+    uint32_t maxDrawCount,
+    uint32_t stride,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    VkBuffer local_buffer;
+    VkDeviceSize local_offset;
+    VkBuffer local_countBuffer;
+    VkDeviceSize local_countBufferOffset;
+    uint32_t local_maxDrawCount;
+    uint32_t local_stride;
+    local_commandBuffer = commandBuffer;
+    local_buffer = buffer;
+    local_offset = offset;
+    local_countBuffer = countBuffer;
+    local_countBufferOffset = countBufferOffset;
+    local_maxDrawCount = maxDrawCount;
+    local_stride = stride;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkDeviceSize);
+        uint64_t cgen_var_2;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkDeviceSize);
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
+    }
+    uint32_t packetSize_vkCmdDrawIndirectCount = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdDrawIndirectCount -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdDrawIndirectCount);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdDrawIndirectCount = OP_vkCmdDrawIndirectCount;
+    memcpy(streamPtr, &opcode_vkCmdDrawIndirectCount, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdDrawIndirectCount, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkBuffer((*&local_buffer));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkDeviceSize*)&local_offset, sizeof(VkDeviceSize));
+    *streamPtrPtr += sizeof(VkDeviceSize);
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkBuffer((*&local_countBuffer));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkDeviceSize*)&local_countBufferOffset, sizeof(VkDeviceSize));
+    *streamPtrPtr += sizeof(VkDeviceSize);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_maxDrawCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_stride, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+void VkEncoder::vkCmdDrawIndexedIndirectCount(
+    VkCommandBuffer commandBuffer,
+    VkBuffer buffer,
+    VkDeviceSize offset,
+    VkBuffer countBuffer,
+    VkDeviceSize countBufferOffset,
+    uint32_t maxDrawCount,
+    uint32_t stride,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    VkBuffer local_buffer;
+    VkDeviceSize local_offset;
+    VkBuffer local_countBuffer;
+    VkDeviceSize local_countBufferOffset;
+    uint32_t local_maxDrawCount;
+    uint32_t local_stride;
+    local_commandBuffer = commandBuffer;
+    local_buffer = buffer;
+    local_offset = offset;
+    local_countBuffer = countBuffer;
+    local_countBufferOffset = countBufferOffset;
+    local_maxDrawCount = maxDrawCount;
+    local_stride = stride;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkDeviceSize);
+        uint64_t cgen_var_2;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkDeviceSize);
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
+    }
+    uint32_t packetSize_vkCmdDrawIndexedIndirectCount = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdDrawIndexedIndirectCount -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdDrawIndexedIndirectCount);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdDrawIndexedIndirectCount = OP_vkCmdDrawIndexedIndirectCount;
+    memcpy(streamPtr, &opcode_vkCmdDrawIndexedIndirectCount, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdDrawIndexedIndirectCount, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkBuffer((*&local_buffer));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkDeviceSize*)&local_offset, sizeof(VkDeviceSize));
+    *streamPtrPtr += sizeof(VkDeviceSize);
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkBuffer((*&local_countBuffer));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkDeviceSize*)&local_countBufferOffset, sizeof(VkDeviceSize));
+    *streamPtrPtr += sizeof(VkDeviceSize);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_maxDrawCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_stride, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+VkResult VkEncoder::vkCreateRenderPass2(
+    VkDevice device,
+    const VkRenderPassCreateInfo2* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkRenderPass* pRenderPass,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkRenderPassCreateInfo2* local_pCreateInfo;
+    VkAllocationCallbacks* local_pAllocator;
+    local_device = device;
+    local_pCreateInfo = nullptr;
+    if (pCreateInfo)
+    {
+        local_pCreateInfo = (VkRenderPassCreateInfo2*)pool->alloc(sizeof(const VkRenderPassCreateInfo2));
+        deepcopy_VkRenderPassCreateInfo2(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, (VkRenderPassCreateInfo2*)(local_pCreateInfo));
+    }
+    local_pAllocator = nullptr;
+    if (pAllocator)
+    {
+        local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+    }
+    local_pAllocator = nullptr;
+    if (local_pCreateInfo)
+    {
+        transform_tohost_VkRenderPassCreateInfo2(sResourceTracker, (VkRenderPassCreateInfo2*)(local_pCreateInfo));
+    }
+    if (local_pAllocator)
+    {
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkRenderPassCreateInfo2(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkRenderPassCreateInfo2*)(local_pCreateInfo), countPtr);
+        // WARNING PTR CHECK
+        *countPtr += 8;
+        if (local_pAllocator)
+        {
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
+        }
+        uint64_t cgen_var_1;
+        *countPtr += 8;
+    }
+    uint32_t packetSize_vkCreateRenderPass2 = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreateRenderPass2);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCreateRenderPass2 = OP_vkCreateRenderPass2;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreateRenderPass2, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreateRenderPass2, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkRenderPassCreateInfo2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkRenderPassCreateInfo2*)(local_pCreateInfo), streamPtrPtr);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    if (local_pAllocator)
+    {
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
+    }
+    /* is handle, possibly out */;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = (uint64_t)((*pRenderPass));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    uint64_t cgen_var_3;
+    stream->read((uint64_t*)&cgen_var_3, 8);
+    stream->handleMapping()->mapHandles_u64_VkRenderPass(&cgen_var_3, (VkRenderPass*)pRenderPass, 1);
+    VkResult vkCreateRenderPass2_VkResult_return = (VkResult)0;
+    stream->read(&vkCreateRenderPass2_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkCreateRenderPass2_VkResult_return;
+}
+
+void VkEncoder::vkCmdBeginRenderPass2(
+    VkCommandBuffer commandBuffer,
+    const VkRenderPassBeginInfo* pRenderPassBegin,
+    const VkSubpassBeginInfo* pSubpassBeginInfo,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    VkRenderPassBeginInfo* local_pRenderPassBegin;
+    VkSubpassBeginInfo* local_pSubpassBeginInfo;
+    local_commandBuffer = commandBuffer;
+    local_pRenderPassBegin = nullptr;
+    if (pRenderPassBegin)
+    {
+        local_pRenderPassBegin = (VkRenderPassBeginInfo*)pool->alloc(sizeof(const VkRenderPassBeginInfo));
+        deepcopy_VkRenderPassBeginInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pRenderPassBegin, (VkRenderPassBeginInfo*)(local_pRenderPassBegin));
+    }
+    local_pSubpassBeginInfo = nullptr;
+    if (pSubpassBeginInfo)
+    {
+        local_pSubpassBeginInfo = (VkSubpassBeginInfo*)pool->alloc(sizeof(const VkSubpassBeginInfo));
+        deepcopy_VkSubpassBeginInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pSubpassBeginInfo, (VkSubpassBeginInfo*)(local_pSubpassBeginInfo));
+    }
+    if (local_pRenderPassBegin)
+    {
+        transform_tohost_VkRenderPassBeginInfo(sResourceTracker, (VkRenderPassBeginInfo*)(local_pRenderPassBegin));
+    }
+    if (local_pSubpassBeginInfo)
+    {
+        transform_tohost_VkSubpassBeginInfo(sResourceTracker, (VkSubpassBeginInfo*)(local_pSubpassBeginInfo));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkRenderPassBeginInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkRenderPassBeginInfo*)(local_pRenderPassBegin), countPtr);
+        count_VkSubpassBeginInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSubpassBeginInfo*)(local_pSubpassBeginInfo), countPtr);
+    }
+    uint32_t packetSize_vkCmdBeginRenderPass2 = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdBeginRenderPass2 -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdBeginRenderPass2);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdBeginRenderPass2 = OP_vkCmdBeginRenderPass2;
+    memcpy(streamPtr, &opcode_vkCmdBeginRenderPass2, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdBeginRenderPass2, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    reservedmarshal_VkRenderPassBeginInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkRenderPassBeginInfo*)(local_pRenderPassBegin), streamPtrPtr);
+    reservedmarshal_VkSubpassBeginInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSubpassBeginInfo*)(local_pSubpassBeginInfo), streamPtrPtr);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+void VkEncoder::vkCmdNextSubpass2(
+    VkCommandBuffer commandBuffer,
+    const VkSubpassBeginInfo* pSubpassBeginInfo,
+    const VkSubpassEndInfo* pSubpassEndInfo,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    VkSubpassBeginInfo* local_pSubpassBeginInfo;
+    VkSubpassEndInfo* local_pSubpassEndInfo;
+    local_commandBuffer = commandBuffer;
+    local_pSubpassBeginInfo = nullptr;
+    if (pSubpassBeginInfo)
+    {
+        local_pSubpassBeginInfo = (VkSubpassBeginInfo*)pool->alloc(sizeof(const VkSubpassBeginInfo));
+        deepcopy_VkSubpassBeginInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pSubpassBeginInfo, (VkSubpassBeginInfo*)(local_pSubpassBeginInfo));
+    }
+    local_pSubpassEndInfo = nullptr;
+    if (pSubpassEndInfo)
+    {
+        local_pSubpassEndInfo = (VkSubpassEndInfo*)pool->alloc(sizeof(const VkSubpassEndInfo));
+        deepcopy_VkSubpassEndInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pSubpassEndInfo, (VkSubpassEndInfo*)(local_pSubpassEndInfo));
+    }
+    if (local_pSubpassBeginInfo)
+    {
+        transform_tohost_VkSubpassBeginInfo(sResourceTracker, (VkSubpassBeginInfo*)(local_pSubpassBeginInfo));
+    }
+    if (local_pSubpassEndInfo)
+    {
+        transform_tohost_VkSubpassEndInfo(sResourceTracker, (VkSubpassEndInfo*)(local_pSubpassEndInfo));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkSubpassBeginInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSubpassBeginInfo*)(local_pSubpassBeginInfo), countPtr);
+        count_VkSubpassEndInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSubpassEndInfo*)(local_pSubpassEndInfo), countPtr);
+    }
+    uint32_t packetSize_vkCmdNextSubpass2 = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdNextSubpass2 -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdNextSubpass2);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdNextSubpass2 = OP_vkCmdNextSubpass2;
+    memcpy(streamPtr, &opcode_vkCmdNextSubpass2, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdNextSubpass2, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    reservedmarshal_VkSubpassBeginInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSubpassBeginInfo*)(local_pSubpassBeginInfo), streamPtrPtr);
+    reservedmarshal_VkSubpassEndInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSubpassEndInfo*)(local_pSubpassEndInfo), streamPtrPtr);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+void VkEncoder::vkCmdEndRenderPass2(
+    VkCommandBuffer commandBuffer,
+    const VkSubpassEndInfo* pSubpassEndInfo,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    VkSubpassEndInfo* local_pSubpassEndInfo;
+    local_commandBuffer = commandBuffer;
+    local_pSubpassEndInfo = nullptr;
+    if (pSubpassEndInfo)
+    {
+        local_pSubpassEndInfo = (VkSubpassEndInfo*)pool->alloc(sizeof(const VkSubpassEndInfo));
+        deepcopy_VkSubpassEndInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pSubpassEndInfo, (VkSubpassEndInfo*)(local_pSubpassEndInfo));
+    }
+    if (local_pSubpassEndInfo)
+    {
+        transform_tohost_VkSubpassEndInfo(sResourceTracker, (VkSubpassEndInfo*)(local_pSubpassEndInfo));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkSubpassEndInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSubpassEndInfo*)(local_pSubpassEndInfo), countPtr);
+    }
+    uint32_t packetSize_vkCmdEndRenderPass2 = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdEndRenderPass2 -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdEndRenderPass2);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdEndRenderPass2 = OP_vkCmdEndRenderPass2;
+    memcpy(streamPtr, &opcode_vkCmdEndRenderPass2, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdEndRenderPass2, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    reservedmarshal_VkSubpassEndInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSubpassEndInfo*)(local_pSubpassEndInfo), streamPtrPtr);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+void VkEncoder::vkResetQueryPool(
+    VkDevice device,
+    VkQueryPool queryPool,
+    uint32_t firstQuery,
+    uint32_t queryCount,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkQueryPool local_queryPool;
+    uint32_t local_firstQuery;
+    uint32_t local_queryCount;
+    local_device = device;
+    local_queryPool = queryPool;
+    local_firstQuery = firstQuery;
+    local_queryCount = queryCount;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
+    }
+    uint32_t packetSize_vkResetQueryPool = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkResetQueryPool);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkResetQueryPool = OP_vkResetQueryPool;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkResetQueryPool, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkResetQueryPool, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkQueryPool((*&local_queryPool));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_firstQuery, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_queryCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+VkResult VkEncoder::vkGetSemaphoreCounterValue(
+    VkDevice device,
+    VkSemaphore semaphore,
+    uint64_t* pValue,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkSemaphore local_semaphore;
+    local_device = device;
+    local_semaphore = semaphore;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint64_t);
+    }
+    uint32_t packetSize_vkGetSemaphoreCounterValue = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetSemaphoreCounterValue);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkGetSemaphoreCounterValue = OP_vkGetSemaphoreCounterValue;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetSemaphoreCounterValue, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetSemaphoreCounterValue, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkSemaphore((*&local_semaphore));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint64_t*)pValue, sizeof(uint64_t));
+    *streamPtrPtr += sizeof(uint64_t);
+    stream->read((uint64_t*)pValue, sizeof(uint64_t));
+    VkResult vkGetSemaphoreCounterValue_VkResult_return = (VkResult)0;
+    stream->read(&vkGetSemaphoreCounterValue_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkGetSemaphoreCounterValue_VkResult_return;
+}
+
+VkResult VkEncoder::vkWaitSemaphores(
+    VkDevice device,
+    const VkSemaphoreWaitInfo* pWaitInfo,
+    uint64_t timeout,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkSemaphoreWaitInfo* local_pWaitInfo;
+    uint64_t local_timeout;
+    local_device = device;
+    local_pWaitInfo = nullptr;
+    if (pWaitInfo)
+    {
+        local_pWaitInfo = (VkSemaphoreWaitInfo*)pool->alloc(sizeof(const VkSemaphoreWaitInfo));
+        deepcopy_VkSemaphoreWaitInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pWaitInfo, (VkSemaphoreWaitInfo*)(local_pWaitInfo));
+    }
+    local_timeout = timeout;
+    if (local_pWaitInfo)
+    {
+        transform_tohost_VkSemaphoreWaitInfo(sResourceTracker, (VkSemaphoreWaitInfo*)(local_pWaitInfo));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkSemaphoreWaitInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSemaphoreWaitInfo*)(local_pWaitInfo), countPtr);
+        *countPtr += sizeof(uint64_t);
+    }
+    uint32_t packetSize_vkWaitSemaphores = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkWaitSemaphores);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkWaitSemaphores = OP_vkWaitSemaphores;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkWaitSemaphores, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkWaitSemaphores, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkSemaphoreWaitInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSemaphoreWaitInfo*)(local_pWaitInfo), streamPtrPtr);
+    memcpy(*streamPtrPtr, (uint64_t*)&local_timeout, sizeof(uint64_t));
+    *streamPtrPtr += sizeof(uint64_t);
+    VkResult vkWaitSemaphores_VkResult_return = (VkResult)0;
+    stream->read(&vkWaitSemaphores_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkWaitSemaphores_VkResult_return;
+}
+
+VkResult VkEncoder::vkSignalSemaphore(
+    VkDevice device,
+    const VkSemaphoreSignalInfo* pSignalInfo,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkSemaphoreSignalInfo* local_pSignalInfo;
+    local_device = device;
+    local_pSignalInfo = nullptr;
+    if (pSignalInfo)
+    {
+        local_pSignalInfo = (VkSemaphoreSignalInfo*)pool->alloc(sizeof(const VkSemaphoreSignalInfo));
+        deepcopy_VkSemaphoreSignalInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pSignalInfo, (VkSemaphoreSignalInfo*)(local_pSignalInfo));
+    }
+    if (local_pSignalInfo)
+    {
+        transform_tohost_VkSemaphoreSignalInfo(sResourceTracker, (VkSemaphoreSignalInfo*)(local_pSignalInfo));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkSemaphoreSignalInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSemaphoreSignalInfo*)(local_pSignalInfo), countPtr);
+    }
+    uint32_t packetSize_vkSignalSemaphore = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkSignalSemaphore);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkSignalSemaphore = OP_vkSignalSemaphore;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkSignalSemaphore, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkSignalSemaphore, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkSemaphoreSignalInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSemaphoreSignalInfo*)(local_pSignalInfo), streamPtrPtr);
+    VkResult vkSignalSemaphore_VkResult_return = (VkResult)0;
+    stream->read(&vkSignalSemaphore_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkSignalSemaphore_VkResult_return;
+}
+
+VkDeviceAddress VkEncoder::vkGetBufferDeviceAddress(
+    VkDevice device,
+    const VkBufferDeviceAddressInfo* pInfo,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkBufferDeviceAddressInfo* local_pInfo;
+    local_device = device;
+    local_pInfo = nullptr;
+    if (pInfo)
+    {
+        local_pInfo = (VkBufferDeviceAddressInfo*)pool->alloc(sizeof(const VkBufferDeviceAddressInfo));
+        deepcopy_VkBufferDeviceAddressInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pInfo, (VkBufferDeviceAddressInfo*)(local_pInfo));
+    }
+    if (local_pInfo)
+    {
+        transform_tohost_VkBufferDeviceAddressInfo(sResourceTracker, (VkBufferDeviceAddressInfo*)(local_pInfo));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkBufferDeviceAddressInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkBufferDeviceAddressInfo*)(local_pInfo), countPtr);
+    }
+    uint32_t packetSize_vkGetBufferDeviceAddress = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetBufferDeviceAddress);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkGetBufferDeviceAddress = OP_vkGetBufferDeviceAddress;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetBufferDeviceAddress, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetBufferDeviceAddress, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkBufferDeviceAddressInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkBufferDeviceAddressInfo*)(local_pInfo), streamPtrPtr);
+    VkDeviceAddress vkGetBufferDeviceAddress_VkDeviceAddress_return = (VkDeviceAddress)0;
+    stream->read(&vkGetBufferDeviceAddress_VkDeviceAddress_return, sizeof(VkDeviceAddress));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkGetBufferDeviceAddress_VkDeviceAddress_return;
+}
+
+uint64_t VkEncoder::vkGetBufferOpaqueCaptureAddress(
+    VkDevice device,
+    const VkBufferDeviceAddressInfo* pInfo,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkBufferDeviceAddressInfo* local_pInfo;
+    local_device = device;
+    local_pInfo = nullptr;
+    if (pInfo)
+    {
+        local_pInfo = (VkBufferDeviceAddressInfo*)pool->alloc(sizeof(const VkBufferDeviceAddressInfo));
+        deepcopy_VkBufferDeviceAddressInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pInfo, (VkBufferDeviceAddressInfo*)(local_pInfo));
+    }
+    if (local_pInfo)
+    {
+        transform_tohost_VkBufferDeviceAddressInfo(sResourceTracker, (VkBufferDeviceAddressInfo*)(local_pInfo));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkBufferDeviceAddressInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkBufferDeviceAddressInfo*)(local_pInfo), countPtr);
+    }
+    uint32_t packetSize_vkGetBufferOpaqueCaptureAddress = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetBufferOpaqueCaptureAddress);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkGetBufferOpaqueCaptureAddress = OP_vkGetBufferOpaqueCaptureAddress;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetBufferOpaqueCaptureAddress, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetBufferOpaqueCaptureAddress, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkBufferDeviceAddressInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkBufferDeviceAddressInfo*)(local_pInfo), streamPtrPtr);
+    uint64_t vkGetBufferOpaqueCaptureAddress_uint64_t_return = (uint64_t)0;
+    stream->read(&vkGetBufferOpaqueCaptureAddress_uint64_t_return, sizeof(uint64_t));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkGetBufferOpaqueCaptureAddress_uint64_t_return;
+}
+
+uint64_t VkEncoder::vkGetDeviceMemoryOpaqueCaptureAddress(
+    VkDevice device,
+    const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkDeviceMemoryOpaqueCaptureAddressInfo* local_pInfo;
+    local_device = device;
+    local_pInfo = nullptr;
+    if (pInfo)
+    {
+        local_pInfo = (VkDeviceMemoryOpaqueCaptureAddressInfo*)pool->alloc(sizeof(const VkDeviceMemoryOpaqueCaptureAddressInfo));
+        deepcopy_VkDeviceMemoryOpaqueCaptureAddressInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pInfo, (VkDeviceMemoryOpaqueCaptureAddressInfo*)(local_pInfo));
+    }
+    if (local_pInfo)
+    {
+        transform_tohost_VkDeviceMemoryOpaqueCaptureAddressInfo(sResourceTracker, (VkDeviceMemoryOpaqueCaptureAddressInfo*)(local_pInfo));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkDeviceMemoryOpaqueCaptureAddressInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDeviceMemoryOpaqueCaptureAddressInfo*)(local_pInfo), countPtr);
+    }
+    uint32_t packetSize_vkGetDeviceMemoryOpaqueCaptureAddress = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetDeviceMemoryOpaqueCaptureAddress);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkGetDeviceMemoryOpaqueCaptureAddress = OP_vkGetDeviceMemoryOpaqueCaptureAddress;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetDeviceMemoryOpaqueCaptureAddress, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetDeviceMemoryOpaqueCaptureAddress, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkDeviceMemoryOpaqueCaptureAddressInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDeviceMemoryOpaqueCaptureAddressInfo*)(local_pInfo), streamPtrPtr);
+    uint64_t vkGetDeviceMemoryOpaqueCaptureAddress_uint64_t_return = (uint64_t)0;
+    stream->read(&vkGetDeviceMemoryOpaqueCaptureAddress_uint64_t_return, sizeof(uint64_t));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkGetDeviceMemoryOpaqueCaptureAddress_uint64_t_return;
 }
 
 #endif
@@ -11724,16 +13742,14 @@
 void VkEncoder::vkDestroySurfaceKHR(
     VkInstance instance,
     VkSurfaceKHR surface,
-    const VkAllocationCallbacks* pAllocator)
+    const VkAllocationCallbacks* pAllocator,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkDestroySurfaceKHR encode");
-    mImpl->log("start vkDestroySurfaceKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkInstance local_instance;
     VkSurfaceKHR local_surface;
     VkAllocationCallbacks* local_pAllocator;
@@ -11743,161 +13759,179 @@
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_770;
-        countingStream->handleMapping()->mapHandles_VkInstance_u64(&local_instance, &cgen_var_770, 1);
-        countingStream->write((uint64_t*)&cgen_var_770, 1 * 8);
-        uint64_t cgen_var_771;
-        countingStream->handleMapping()->mapHandles_VkSurfaceKHR_u64(&local_surface, &cgen_var_771, 1);
-        countingStream->write((uint64_t*)&cgen_var_771, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_772 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_772);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
     }
-    uint32_t packetSize_vkDestroySurfaceKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkDestroySurfaceKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkDestroySurfaceKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkDestroySurfaceKHR = OP_vkDestroySurfaceKHR;
-    stream->write(&opcode_vkDestroySurfaceKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkDestroySurfaceKHR, sizeof(uint32_t));
-    uint64_t cgen_var_773;
-    stream->handleMapping()->mapHandles_VkInstance_u64(&local_instance, &cgen_var_773, 1);
-    stream->write((uint64_t*)&cgen_var_773, 1 * 8);
-    uint64_t cgen_var_774;
-    stream->handleMapping()->mapHandles_VkSurfaceKHR_u64(&local_surface, &cgen_var_774, 1);
-    stream->write((uint64_t*)&cgen_var_774, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkDestroySurfaceKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkDestroySurfaceKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkInstance((*&local_instance));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkSurfaceKHR((*&local_surface));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_775 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_775);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    AEMU_SCOPED_TRACE("vkDestroySurfaceKHR readParams");
-    AEMU_SCOPED_TRACE("vkDestroySurfaceKHR returnUnmarshal");
-    resources->destroyMapping()->mapHandles_VkSurfaceKHR((VkSurfaceKHR*)&surface);
-    mImpl->log("finish vkDestroySurfaceKHR");;
+    sResourceTracker->destroyMapping()->mapHandles_VkSurfaceKHR((VkSurfaceKHR*)&surface);
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 VkResult VkEncoder::vkGetPhysicalDeviceSurfaceSupportKHR(
     VkPhysicalDevice physicalDevice,
     uint32_t queueFamilyIndex,
     VkSurfaceKHR surface,
-    VkBool32* pSupported)
+    VkBool32* pSupported,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceSurfaceSupportKHR encode");
-    mImpl->log("start vkGetPhysicalDeviceSurfaceSupportKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     uint32_t local_queueFamilyIndex;
     VkSurfaceKHR local_surface;
     local_physicalDevice = physicalDevice;
     local_queueFamilyIndex = queueFamilyIndex;
     local_surface = surface;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_776;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_776, 1);
-        countingStream->write((uint64_t*)&cgen_var_776, 1 * 8);
-        countingStream->write((uint32_t*)&local_queueFamilyIndex, sizeof(uint32_t));
-        uint64_t cgen_var_777;
-        countingStream->handleMapping()->mapHandles_VkSurfaceKHR_u64(&local_surface, &cgen_var_777, 1);
-        countingStream->write((uint64_t*)&cgen_var_777, 1 * 8);
-        countingStream->write((VkBool32*)pSupported, sizeof(VkBool32));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkBool32);
     }
-    uint32_t packetSize_vkGetPhysicalDeviceSurfaceSupportKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetPhysicalDeviceSurfaceSupportKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPhysicalDeviceSurfaceSupportKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetPhysicalDeviceSurfaceSupportKHR = OP_vkGetPhysicalDeviceSurfaceSupportKHR;
-    stream->write(&opcode_vkGetPhysicalDeviceSurfaceSupportKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetPhysicalDeviceSurfaceSupportKHR, sizeof(uint32_t));
-    uint64_t cgen_var_778;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_778, 1);
-    stream->write((uint64_t*)&cgen_var_778, 1 * 8);
-    stream->write((uint32_t*)&local_queueFamilyIndex, sizeof(uint32_t));
-    uint64_t cgen_var_779;
-    stream->handleMapping()->mapHandles_VkSurfaceKHR_u64(&local_surface, &cgen_var_779, 1);
-    stream->write((uint64_t*)&cgen_var_779, 1 * 8);
-    stream->write((VkBool32*)pSupported, sizeof(VkBool32));
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceSurfaceSupportKHR readParams");
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPhysicalDeviceSurfaceSupportKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPhysicalDeviceSurfaceSupportKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_queueFamilyIndex, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkSurfaceKHR((*&local_surface));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkBool32*)pSupported, sizeof(VkBool32));
+    *streamPtrPtr += sizeof(VkBool32);
     stream->read((VkBool32*)pSupported, sizeof(VkBool32));
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceSurfaceSupportKHR returnUnmarshal");
     VkResult vkGetPhysicalDeviceSurfaceSupportKHR_VkResult_return = (VkResult)0;
     stream->read(&vkGetPhysicalDeviceSurfaceSupportKHR_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetPhysicalDeviceSurfaceSupportKHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetPhysicalDeviceSurfaceSupportKHR_VkResult_return;
 }
 
 VkResult VkEncoder::vkGetPhysicalDeviceSurfaceCapabilitiesKHR(
     VkPhysicalDevice physicalDevice,
     VkSurfaceKHR surface,
-    VkSurfaceCapabilitiesKHR* pSurfaceCapabilities)
+    VkSurfaceCapabilitiesKHR* pSurfaceCapabilities,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceSurfaceCapabilitiesKHR encode");
-    mImpl->log("start vkGetPhysicalDeviceSurfaceCapabilitiesKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     VkSurfaceKHR local_surface;
     local_physicalDevice = physicalDevice;
     local_surface = surface;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_780;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_780, 1);
-        countingStream->write((uint64_t*)&cgen_var_780, 1 * 8);
-        uint64_t cgen_var_781;
-        countingStream->handleMapping()->mapHandles_VkSurfaceKHR_u64(&local_surface, &cgen_var_781, 1);
-        countingStream->write((uint64_t*)&cgen_var_781, 1 * 8);
-        marshal_VkSurfaceCapabilitiesKHR(countingStream, (VkSurfaceCapabilitiesKHR*)(pSurfaceCapabilities));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        count_VkSurfaceCapabilitiesKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSurfaceCapabilitiesKHR*)(pSurfaceCapabilities), countPtr);
     }
-    uint32_t packetSize_vkGetPhysicalDeviceSurfaceCapabilitiesKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetPhysicalDeviceSurfaceCapabilitiesKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPhysicalDeviceSurfaceCapabilitiesKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetPhysicalDeviceSurfaceCapabilitiesKHR = OP_vkGetPhysicalDeviceSurfaceCapabilitiesKHR;
-    stream->write(&opcode_vkGetPhysicalDeviceSurfaceCapabilitiesKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetPhysicalDeviceSurfaceCapabilitiesKHR, sizeof(uint32_t));
-    uint64_t cgen_var_782;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_782, 1);
-    stream->write((uint64_t*)&cgen_var_782, 1 * 8);
-    uint64_t cgen_var_783;
-    stream->handleMapping()->mapHandles_VkSurfaceKHR_u64(&local_surface, &cgen_var_783, 1);
-    stream->write((uint64_t*)&cgen_var_783, 1 * 8);
-    marshal_VkSurfaceCapabilitiesKHR(stream, (VkSurfaceCapabilitiesKHR*)(pSurfaceCapabilities));
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceSurfaceCapabilitiesKHR readParams");
-    unmarshal_VkSurfaceCapabilitiesKHR(stream, (VkSurfaceCapabilitiesKHR*)(pSurfaceCapabilities));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPhysicalDeviceSurfaceCapabilitiesKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPhysicalDeviceSurfaceCapabilitiesKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkSurfaceKHR((*&local_surface));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkSurfaceCapabilitiesKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSurfaceCapabilitiesKHR*)(pSurfaceCapabilities), streamPtrPtr);
+    unmarshal_VkSurfaceCapabilitiesKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSurfaceCapabilitiesKHR*)(pSurfaceCapabilities));
     if (pSurfaceCapabilities)
     {
-        transform_fromhost_VkSurfaceCapabilitiesKHR(mImpl->resources(), (VkSurfaceCapabilitiesKHR*)(pSurfaceCapabilities));
+        transform_fromhost_VkSurfaceCapabilitiesKHR(sResourceTracker, (VkSurfaceCapabilitiesKHR*)(pSurfaceCapabilities));
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceSurfaceCapabilitiesKHR returnUnmarshal");
     VkResult vkGetPhysicalDeviceSurfaceCapabilitiesKHR_VkResult_return = (VkResult)0;
     stream->read(&vkGetPhysicalDeviceSurfaceCapabilitiesKHR_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetPhysicalDeviceSurfaceCapabilitiesKHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetPhysicalDeviceSurfaceCapabilitiesKHR_VkResult_return;
 }
 
@@ -11905,75 +13939,82 @@
     VkPhysicalDevice physicalDevice,
     VkSurfaceKHR surface,
     uint32_t* pSurfaceFormatCount,
-    VkSurfaceFormatKHR* pSurfaceFormats)
+    VkSurfaceFormatKHR* pSurfaceFormats,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceSurfaceFormatsKHR encode");
-    mImpl->log("start vkGetPhysicalDeviceSurfaceFormatsKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     VkSurfaceKHR local_surface;
     local_physicalDevice = physicalDevice;
     local_surface = surface;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_784;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_784, 1);
-        countingStream->write((uint64_t*)&cgen_var_784, 1 * 8);
-        uint64_t cgen_var_785;
-        countingStream->handleMapping()->mapHandles_VkSurfaceKHR_u64(&local_surface, &cgen_var_785, 1);
-        countingStream->write((uint64_t*)&cgen_var_785, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_786 = (uint64_t)(uintptr_t)pSurfaceFormatCount;
-        countingStream->putBe64(cgen_var_786);
+        *countPtr += 8;
         if (pSurfaceFormatCount)
         {
-            countingStream->write((uint32_t*)pSurfaceFormatCount, sizeof(uint32_t));
+            *countPtr += sizeof(uint32_t);
         }
         // WARNING PTR CHECK
-        uint64_t cgen_var_787 = (uint64_t)(uintptr_t)pSurfaceFormats;
-        countingStream->putBe64(cgen_var_787);
+        *countPtr += 8;
         if (pSurfaceFormats)
         {
-            for (uint32_t i = 0; i < (uint32_t)(*(pSurfaceFormatCount)); ++i)
+            if (pSurfaceFormatCount)
             {
-                marshal_VkSurfaceFormatKHR(countingStream, (VkSurfaceFormatKHR*)(pSurfaceFormats + i));
+                for (uint32_t i = 0; i < (uint32_t)(*(pSurfaceFormatCount)); ++i)
+                {
+                    count_VkSurfaceFormatKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSurfaceFormatKHR*)(pSurfaceFormats + i), countPtr);
+                }
             }
         }
     }
-    uint32_t packetSize_vkGetPhysicalDeviceSurfaceFormatsKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetPhysicalDeviceSurfaceFormatsKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPhysicalDeviceSurfaceFormatsKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetPhysicalDeviceSurfaceFormatsKHR = OP_vkGetPhysicalDeviceSurfaceFormatsKHR;
-    stream->write(&opcode_vkGetPhysicalDeviceSurfaceFormatsKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetPhysicalDeviceSurfaceFormatsKHR, sizeof(uint32_t));
-    uint64_t cgen_var_788;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_788, 1);
-    stream->write((uint64_t*)&cgen_var_788, 1 * 8);
-    uint64_t cgen_var_789;
-    stream->handleMapping()->mapHandles_VkSurfaceKHR_u64(&local_surface, &cgen_var_789, 1);
-    stream->write((uint64_t*)&cgen_var_789, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPhysicalDeviceSurfaceFormatsKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPhysicalDeviceSurfaceFormatsKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkSurfaceKHR((*&local_surface));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_790 = (uint64_t)(uintptr_t)pSurfaceFormatCount;
-    stream->putBe64(cgen_var_790);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)pSurfaceFormatCount;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pSurfaceFormatCount)
     {
-        stream->write((uint32_t*)pSurfaceFormatCount, sizeof(uint32_t));
+        memcpy(*streamPtrPtr, (uint32_t*)pSurfaceFormatCount, sizeof(uint32_t));
+        *streamPtrPtr += sizeof(uint32_t);
     }
     // WARNING PTR CHECK
-    uint64_t cgen_var_791 = (uint64_t)(uintptr_t)pSurfaceFormats;
-    stream->putBe64(cgen_var_791);
+    uint64_t cgen_var_3 = (uint64_t)(uintptr_t)pSurfaceFormats;
+    memcpy((*streamPtrPtr), &cgen_var_3, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pSurfaceFormats)
     {
         for (uint32_t i = 0; i < (uint32_t)(*(pSurfaceFormatCount)); ++i)
         {
-            marshal_VkSurfaceFormatKHR(stream, (VkSurfaceFormatKHR*)(pSurfaceFormats + i));
+            reservedmarshal_VkSurfaceFormatKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSurfaceFormatKHR*)(pSurfaceFormats + i), streamPtrPtr);
         }
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceSurfaceFormatsKHR readParams");
     // WARNING PTR CHECK
     uint32_t* check_pSurfaceFormatCount;
     check_pSurfaceFormatCount = (uint32_t*)(uintptr_t)stream->getBe64();
@@ -11994,25 +14035,33 @@
         {
             fprintf(stderr, "fatal: pSurfaceFormats inconsistent between guest and host\n");
         }
-        for (uint32_t i = 0; i < (uint32_t)(*(pSurfaceFormatCount)); ++i)
+        if (pSurfaceFormatCount)
         {
-            unmarshal_VkSurfaceFormatKHR(stream, (VkSurfaceFormatKHR*)(pSurfaceFormats + i));
+            for (uint32_t i = 0; i < (uint32_t)(*(pSurfaceFormatCount)); ++i)
+            {
+                unmarshal_VkSurfaceFormatKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSurfaceFormatKHR*)(pSurfaceFormats + i));
+            }
         }
     }
-    if (pSurfaceFormats)
+    if (pSurfaceFormatCount)
     {
-        for (uint32_t i = 0; i < (uint32_t)(*(pSurfaceFormatCount)); ++i)
+        if (pSurfaceFormats)
         {
-            transform_fromhost_VkSurfaceFormatKHR(mImpl->resources(), (VkSurfaceFormatKHR*)(pSurfaceFormats + i));
+            for (uint32_t i = 0; i < (uint32_t)(*(pSurfaceFormatCount)); ++i)
+            {
+                transform_fromhost_VkSurfaceFormatKHR(sResourceTracker, (VkSurfaceFormatKHR*)(pSurfaceFormats + i));
+            }
         }
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceSurfaceFormatsKHR returnUnmarshal");
     VkResult vkGetPhysicalDeviceSurfaceFormatsKHR_VkResult_return = (VkResult)0;
     stream->read(&vkGetPhysicalDeviceSurfaceFormatsKHR_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetPhysicalDeviceSurfaceFormatsKHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetPhysicalDeviceSurfaceFormatsKHR_VkResult_return;
 }
 
@@ -12020,69 +14069,77 @@
     VkPhysicalDevice physicalDevice,
     VkSurfaceKHR surface,
     uint32_t* pPresentModeCount,
-    VkPresentModeKHR* pPresentModes)
+    VkPresentModeKHR* pPresentModes,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceSurfacePresentModesKHR encode");
-    mImpl->log("start vkGetPhysicalDeviceSurfacePresentModesKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     VkSurfaceKHR local_surface;
     local_physicalDevice = physicalDevice;
     local_surface = surface;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_794;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_794, 1);
-        countingStream->write((uint64_t*)&cgen_var_794, 1 * 8);
-        uint64_t cgen_var_795;
-        countingStream->handleMapping()->mapHandles_VkSurfaceKHR_u64(&local_surface, &cgen_var_795, 1);
-        countingStream->write((uint64_t*)&cgen_var_795, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_796 = (uint64_t)(uintptr_t)pPresentModeCount;
-        countingStream->putBe64(cgen_var_796);
+        *countPtr += 8;
         if (pPresentModeCount)
         {
-            countingStream->write((uint32_t*)pPresentModeCount, sizeof(uint32_t));
+            *countPtr += sizeof(uint32_t);
         }
         // WARNING PTR CHECK
-        uint64_t cgen_var_797 = (uint64_t)(uintptr_t)pPresentModes;
-        countingStream->putBe64(cgen_var_797);
+        *countPtr += 8;
         if (pPresentModes)
         {
-            countingStream->write((VkPresentModeKHR*)pPresentModes, (*(pPresentModeCount)) * sizeof(VkPresentModeKHR));
+            if (pPresentModeCount)
+            {
+                *countPtr += (*(pPresentModeCount)) * sizeof(VkPresentModeKHR);
+            }
         }
     }
-    uint32_t packetSize_vkGetPhysicalDeviceSurfacePresentModesKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetPhysicalDeviceSurfacePresentModesKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPhysicalDeviceSurfacePresentModesKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetPhysicalDeviceSurfacePresentModesKHR = OP_vkGetPhysicalDeviceSurfacePresentModesKHR;
-    stream->write(&opcode_vkGetPhysicalDeviceSurfacePresentModesKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetPhysicalDeviceSurfacePresentModesKHR, sizeof(uint32_t));
-    uint64_t cgen_var_798;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_798, 1);
-    stream->write((uint64_t*)&cgen_var_798, 1 * 8);
-    uint64_t cgen_var_799;
-    stream->handleMapping()->mapHandles_VkSurfaceKHR_u64(&local_surface, &cgen_var_799, 1);
-    stream->write((uint64_t*)&cgen_var_799, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPhysicalDeviceSurfacePresentModesKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPhysicalDeviceSurfacePresentModesKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkSurfaceKHR((*&local_surface));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_800 = (uint64_t)(uintptr_t)pPresentModeCount;
-    stream->putBe64(cgen_var_800);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)pPresentModeCount;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pPresentModeCount)
     {
-        stream->write((uint32_t*)pPresentModeCount, sizeof(uint32_t));
+        memcpy(*streamPtrPtr, (uint32_t*)pPresentModeCount, sizeof(uint32_t));
+        *streamPtrPtr += sizeof(uint32_t);
     }
     // WARNING PTR CHECK
-    uint64_t cgen_var_801 = (uint64_t)(uintptr_t)pPresentModes;
-    stream->putBe64(cgen_var_801);
+    uint64_t cgen_var_3 = (uint64_t)(uintptr_t)pPresentModes;
+    memcpy((*streamPtrPtr), &cgen_var_3, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pPresentModes)
     {
-        stream->write((VkPresentModeKHR*)pPresentModes, (*(pPresentModeCount)) * sizeof(VkPresentModeKHR));
+        memcpy(*streamPtrPtr, (VkPresentModeKHR*)pPresentModes, (*(pPresentModeCount)) * sizeof(VkPresentModeKHR));
+        *streamPtrPtr += (*(pPresentModeCount)) * sizeof(VkPresentModeKHR);
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceSurfacePresentModesKHR readParams");
     // WARNING PTR CHECK
     uint32_t* check_pPresentModeCount;
     check_pPresentModeCount = (uint32_t*)(uintptr_t)stream->getBe64();
@@ -12105,13 +14162,15 @@
         }
         stream->read((VkPresentModeKHR*)pPresentModes, (*(pPresentModeCount)) * sizeof(VkPresentModeKHR));
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceSurfacePresentModesKHR returnUnmarshal");
     VkResult vkGetPhysicalDeviceSurfacePresentModesKHR_VkResult_return = (VkResult)0;
     stream->read(&vkGetPhysicalDeviceSurfacePresentModesKHR_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetPhysicalDeviceSurfacePresentModesKHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetPhysicalDeviceSurfacePresentModesKHR_VkResult_return;
 }
 
@@ -12121,16 +14180,14 @@
     VkDevice device,
     const VkSwapchainCreateInfoKHR* pCreateInfo,
     const VkAllocationCallbacks* pAllocator,
-    VkSwapchainKHR* pSwapchain)
+    VkSwapchainKHR* pSwapchain,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCreateSwapchainKHR encode");
-    mImpl->log("start vkCreateSwapchainKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkSwapchainCreateInfoKHR* local_pCreateInfo;
     VkAllocationCallbacks* local_pAllocator;
@@ -12139,90 +14196,94 @@
     if (pCreateInfo)
     {
         local_pCreateInfo = (VkSwapchainCreateInfoKHR*)pool->alloc(sizeof(const VkSwapchainCreateInfoKHR));
-        deepcopy_VkSwapchainCreateInfoKHR(pool, pCreateInfo, (VkSwapchainCreateInfoKHR*)(local_pCreateInfo));
+        deepcopy_VkSwapchainCreateInfoKHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, (VkSwapchainCreateInfoKHR*)(local_pCreateInfo));
     }
     local_pAllocator = nullptr;
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pCreateInfo)
     {
-        transform_tohost_VkSwapchainCreateInfoKHR(mImpl->resources(), (VkSwapchainCreateInfoKHR*)(local_pCreateInfo));
+        transform_tohost_VkSwapchainCreateInfoKHR(sResourceTracker, (VkSwapchainCreateInfoKHR*)(local_pCreateInfo));
     }
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_804;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_804, 1);
-        countingStream->write((uint64_t*)&cgen_var_804, 1 * 8);
-        marshal_VkSwapchainCreateInfoKHR(countingStream, (VkSwapchainCreateInfoKHR*)(local_pCreateInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkSwapchainCreateInfoKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSwapchainCreateInfoKHR*)(local_pCreateInfo), countPtr);
         // WARNING PTR CHECK
-        uint64_t cgen_var_805 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_805);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
-        uint64_t cgen_var_806;
-        countingStream->handleMapping()->mapHandles_VkSwapchainKHR_u64(pSwapchain, &cgen_var_806, 1);
-        countingStream->write((uint64_t*)&cgen_var_806, 8);
+        uint64_t cgen_var_1;
+        *countPtr += 8;
     }
-    uint32_t packetSize_vkCreateSwapchainKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCreateSwapchainKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreateSwapchainKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCreateSwapchainKHR = OP_vkCreateSwapchainKHR;
-    stream->write(&opcode_vkCreateSwapchainKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkCreateSwapchainKHR, sizeof(uint32_t));
-    uint64_t cgen_var_807;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_807, 1);
-    stream->write((uint64_t*)&cgen_var_807, 1 * 8);
-    marshal_VkSwapchainCreateInfoKHR(stream, (VkSwapchainCreateInfoKHR*)(local_pCreateInfo));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreateSwapchainKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreateSwapchainKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkSwapchainCreateInfoKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSwapchainCreateInfoKHR*)(local_pCreateInfo), streamPtrPtr);
     // WARNING PTR CHECK
-    uint64_t cgen_var_808 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_808);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
-    uint64_t cgen_var_809;
-    stream->handleMapping()->mapHandles_VkSwapchainKHR_u64(pSwapchain, &cgen_var_809, 1);
-    stream->write((uint64_t*)&cgen_var_809, 8);
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkCreateSwapchainKHR readParams");
-    stream->setHandleMapping(resources->createMapping());
-    uint64_t cgen_var_810;
-    stream->read((uint64_t*)&cgen_var_810, 8);
-    stream->handleMapping()->mapHandles_u64_VkSwapchainKHR(&cgen_var_810, (VkSwapchainKHR*)pSwapchain, 1);
+    /* is handle, possibly out */;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = (uint64_t)((*pSwapchain));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    stream->setHandleMapping(sResourceTracker->createMapping());
+    uint64_t cgen_var_3;
+    stream->read((uint64_t*)&cgen_var_3, 8);
+    stream->handleMapping()->mapHandles_u64_VkSwapchainKHR(&cgen_var_3, (VkSwapchainKHR*)pSwapchain, 1);
     stream->unsetHandleMapping();
-    AEMU_SCOPED_TRACE("vkCreateSwapchainKHR returnUnmarshal");
     VkResult vkCreateSwapchainKHR_VkResult_return = (VkResult)0;
     stream->read(&vkCreateSwapchainKHR_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkCreateSwapchainKHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkCreateSwapchainKHR_VkResult_return;
 }
 
 void VkEncoder::vkDestroySwapchainKHR(
     VkDevice device,
     VkSwapchainKHR swapchain,
-    const VkAllocationCallbacks* pAllocator)
+    const VkAllocationCallbacks* pAllocator,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkDestroySwapchainKHR encode");
-    mImpl->log("start vkDestroySwapchainKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkSwapchainKHR local_swapchain;
     VkAllocationCallbacks* local_pAllocator;
@@ -12232,134 +14293,151 @@
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_811;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_811, 1);
-        countingStream->write((uint64_t*)&cgen_var_811, 1 * 8);
-        uint64_t cgen_var_812;
-        countingStream->handleMapping()->mapHandles_VkSwapchainKHR_u64(&local_swapchain, &cgen_var_812, 1);
-        countingStream->write((uint64_t*)&cgen_var_812, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_813 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_813);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
     }
-    uint32_t packetSize_vkDestroySwapchainKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkDestroySwapchainKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkDestroySwapchainKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkDestroySwapchainKHR = OP_vkDestroySwapchainKHR;
-    stream->write(&opcode_vkDestroySwapchainKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkDestroySwapchainKHR, sizeof(uint32_t));
-    uint64_t cgen_var_814;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_814, 1);
-    stream->write((uint64_t*)&cgen_var_814, 1 * 8);
-    uint64_t cgen_var_815;
-    stream->handleMapping()->mapHandles_VkSwapchainKHR_u64(&local_swapchain, &cgen_var_815, 1);
-    stream->write((uint64_t*)&cgen_var_815, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkDestroySwapchainKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkDestroySwapchainKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkSwapchainKHR((*&local_swapchain));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_816 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_816);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    AEMU_SCOPED_TRACE("vkDestroySwapchainKHR readParams");
-    AEMU_SCOPED_TRACE("vkDestroySwapchainKHR returnUnmarshal");
-    resources->destroyMapping()->mapHandles_VkSwapchainKHR((VkSwapchainKHR*)&swapchain);
-    mImpl->log("finish vkDestroySwapchainKHR");;
+    sResourceTracker->destroyMapping()->mapHandles_VkSwapchainKHR((VkSwapchainKHR*)&swapchain);
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 VkResult VkEncoder::vkGetSwapchainImagesKHR(
     VkDevice device,
     VkSwapchainKHR swapchain,
     uint32_t* pSwapchainImageCount,
-    VkImage* pSwapchainImages)
+    VkImage* pSwapchainImages,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetSwapchainImagesKHR encode");
-    mImpl->log("start vkGetSwapchainImagesKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkSwapchainKHR local_swapchain;
     local_device = device;
     local_swapchain = swapchain;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_817;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_817, 1);
-        countingStream->write((uint64_t*)&cgen_var_817, 1 * 8);
-        uint64_t cgen_var_818;
-        countingStream->handleMapping()->mapHandles_VkSwapchainKHR_u64(&local_swapchain, &cgen_var_818, 1);
-        countingStream->write((uint64_t*)&cgen_var_818, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_819 = (uint64_t)(uintptr_t)pSwapchainImageCount;
-        countingStream->putBe64(cgen_var_819);
+        *countPtr += 8;
         if (pSwapchainImageCount)
         {
-            countingStream->write((uint32_t*)pSwapchainImageCount, sizeof(uint32_t));
+            *countPtr += sizeof(uint32_t);
         }
         // WARNING PTR CHECK
-        uint64_t cgen_var_820 = (uint64_t)(uintptr_t)pSwapchainImages;
-        countingStream->putBe64(cgen_var_820);
+        *countPtr += 8;
         if (pSwapchainImages)
         {
             if ((*(pSwapchainImageCount)))
             {
-                uint64_t* cgen_var_821;
-                countingStream->alloc((void**)&cgen_var_821, (*(pSwapchainImageCount)) * 8);
-                countingStream->handleMapping()->mapHandles_VkImage_u64(pSwapchainImages, cgen_var_821, (*(pSwapchainImageCount)));
-                countingStream->write((uint64_t*)cgen_var_821, (*(pSwapchainImageCount)) * 8);
+                *countPtr += (*(pSwapchainImageCount)) * 8;
             }
         }
     }
-    uint32_t packetSize_vkGetSwapchainImagesKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetSwapchainImagesKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetSwapchainImagesKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetSwapchainImagesKHR = OP_vkGetSwapchainImagesKHR;
-    stream->write(&opcode_vkGetSwapchainImagesKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetSwapchainImagesKHR, sizeof(uint32_t));
-    uint64_t cgen_var_822;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_822, 1);
-    stream->write((uint64_t*)&cgen_var_822, 1 * 8);
-    uint64_t cgen_var_823;
-    stream->handleMapping()->mapHandles_VkSwapchainKHR_u64(&local_swapchain, &cgen_var_823, 1);
-    stream->write((uint64_t*)&cgen_var_823, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetSwapchainImagesKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetSwapchainImagesKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkSwapchainKHR((*&local_swapchain));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_824 = (uint64_t)(uintptr_t)pSwapchainImageCount;
-    stream->putBe64(cgen_var_824);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)pSwapchainImageCount;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pSwapchainImageCount)
     {
-        stream->write((uint32_t*)pSwapchainImageCount, sizeof(uint32_t));
+        memcpy(*streamPtrPtr, (uint32_t*)pSwapchainImageCount, sizeof(uint32_t));
+        *streamPtrPtr += sizeof(uint32_t);
     }
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
+    /* is handle, possibly out */;
     // WARNING PTR CHECK
-    uint64_t cgen_var_825 = (uint64_t)(uintptr_t)pSwapchainImages;
-    stream->putBe64(cgen_var_825);
+    uint64_t cgen_var_3 = (uint64_t)(uintptr_t)pSwapchainImages;
+    memcpy((*streamPtrPtr), &cgen_var_3, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pSwapchainImages)
     {
         if ((*(pSwapchainImageCount)))
         {
-            uint64_t* cgen_var_826;
-            stream->alloc((void**)&cgen_var_826, (*(pSwapchainImageCount)) * 8);
-            stream->handleMapping()->mapHandles_VkImage_u64(pSwapchainImages, cgen_var_826, (*(pSwapchainImageCount)));
-            stream->write((uint64_t*)cgen_var_826, (*(pSwapchainImageCount)) * 8);
+            uint8_t* cgen_var_3_0_ptr = (uint8_t*)(*streamPtrPtr);
+            if (pSwapchainImageCount)
+            {
+                for (uint32_t k = 0; k < (*(pSwapchainImageCount)); ++k)
+                {
+                    uint64_t tmpval = (uint64_t)(pSwapchainImages[k]);
+                    memcpy(cgen_var_3_0_ptr + k * 8, &tmpval, sizeof(uint64_t));
+                }
+            }
+            *streamPtrPtr += 8 * (*(pSwapchainImageCount));
         }
     }
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkGetSwapchainImagesKHR readParams");
+    /* is handle, possibly out */;
     // WARNING PTR CHECK
     uint32_t* check_pSwapchainImageCount;
     check_pSwapchainImageCount = (uint32_t*)(uintptr_t)stream->getBe64();
@@ -12382,19 +14460,21 @@
         }
         if ((*(pSwapchainImageCount)))
         {
-            uint64_t* cgen_var_829;
-            stream->alloc((void**)&cgen_var_829, (*(pSwapchainImageCount)) * 8);
-            stream->read((uint64_t*)cgen_var_829, (*(pSwapchainImageCount)) * 8);
-            stream->handleMapping()->mapHandles_u64_VkImage(cgen_var_829, (VkImage*)pSwapchainImages, (*(pSwapchainImageCount)));
+            uint64_t* cgen_var_5_0;
+            stream->alloc((void**)&cgen_var_5_0, (*(pSwapchainImageCount)) * 8);
+            stream->read((uint64_t*)cgen_var_5_0, (*(pSwapchainImageCount)) * 8);
+            stream->handleMapping()->mapHandles_u64_VkImage(cgen_var_5_0, (VkImage*)pSwapchainImages, (*(pSwapchainImageCount)));
         }
     }
-    AEMU_SCOPED_TRACE("vkGetSwapchainImagesKHR returnUnmarshal");
     VkResult vkGetSwapchainImagesKHR_VkResult_return = (VkResult)0;
     stream->read(&vkGetSwapchainImagesKHR_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetSwapchainImagesKHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetSwapchainImagesKHR_VkResult_return;
 }
 
@@ -12404,16 +14484,14 @@
     uint64_t timeout,
     VkSemaphore semaphore,
     VkFence fence,
-    uint32_t* pImageIndex)
+    uint32_t* pImageIndex,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkAcquireNextImageKHR encode");
-    mImpl->log("start vkAcquireNextImageKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkSwapchainKHR local_swapchain;
     uint64_t local_timeout;
@@ -12424,66 +14502,71 @@
     local_timeout = timeout;
     local_semaphore = semaphore;
     local_fence = fence;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_830;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_830, 1);
-        countingStream->write((uint64_t*)&cgen_var_830, 1 * 8);
-        uint64_t cgen_var_831;
-        countingStream->handleMapping()->mapHandles_VkSwapchainKHR_u64(&local_swapchain, &cgen_var_831, 1);
-        countingStream->write((uint64_t*)&cgen_var_831, 1 * 8);
-        countingStream->write((uint64_t*)&local_timeout, sizeof(uint64_t));
-        uint64_t cgen_var_832;
-        countingStream->handleMapping()->mapHandles_VkSemaphore_u64(&local_semaphore, &cgen_var_832, 1);
-        countingStream->write((uint64_t*)&cgen_var_832, 1 * 8);
-        uint64_t cgen_var_833;
-        countingStream->handleMapping()->mapHandles_VkFence_u64(&local_fence, &cgen_var_833, 1);
-        countingStream->write((uint64_t*)&cgen_var_833, 1 * 8);
-        countingStream->write((uint32_t*)pImageIndex, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint64_t);
+        uint64_t cgen_var_2;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_3;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
     }
-    uint32_t packetSize_vkAcquireNextImageKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkAcquireNextImageKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkAcquireNextImageKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkAcquireNextImageKHR = OP_vkAcquireNextImageKHR;
-    stream->write(&opcode_vkAcquireNextImageKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkAcquireNextImageKHR, sizeof(uint32_t));
-    uint64_t cgen_var_834;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_834, 1);
-    stream->write((uint64_t*)&cgen_var_834, 1 * 8);
-    uint64_t cgen_var_835;
-    stream->handleMapping()->mapHandles_VkSwapchainKHR_u64(&local_swapchain, &cgen_var_835, 1);
-    stream->write((uint64_t*)&cgen_var_835, 1 * 8);
-    stream->write((uint64_t*)&local_timeout, sizeof(uint64_t));
-    uint64_t cgen_var_836;
-    stream->handleMapping()->mapHandles_VkSemaphore_u64(&local_semaphore, &cgen_var_836, 1);
-    stream->write((uint64_t*)&cgen_var_836, 1 * 8);
-    uint64_t cgen_var_837;
-    stream->handleMapping()->mapHandles_VkFence_u64(&local_fence, &cgen_var_837, 1);
-    stream->write((uint64_t*)&cgen_var_837, 1 * 8);
-    stream->write((uint32_t*)pImageIndex, sizeof(uint32_t));
-    AEMU_SCOPED_TRACE("vkAcquireNextImageKHR readParams");
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkAcquireNextImageKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkAcquireNextImageKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkSwapchainKHR((*&local_swapchain));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint64_t*)&local_timeout, sizeof(uint64_t));
+    *streamPtrPtr += sizeof(uint64_t);
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = get_host_u64_VkSemaphore((*&local_semaphore));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_3;
+    *&cgen_var_3 = get_host_u64_VkFence((*&local_fence));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_3, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)pImageIndex, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
     stream->read((uint32_t*)pImageIndex, sizeof(uint32_t));
-    AEMU_SCOPED_TRACE("vkAcquireNextImageKHR returnUnmarshal");
     VkResult vkAcquireNextImageKHR_VkResult_return = (VkResult)0;
     stream->read(&vkAcquireNextImageKHR_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkAcquireNextImageKHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkAcquireNextImageKHR_VkResult_return;
 }
 
 VkResult VkEncoder::vkQueuePresentKHR(
     VkQueue queue,
-    const VkPresentInfoKHR* pPresentInfo)
+    const VkPresentInfoKHR* pPresentInfo,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkQueuePresentKHR encode");
-    mImpl->log("start vkQueuePresentKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkQueue local_queue;
     VkPresentInfoKHR* local_pPresentInfo;
     local_queue = queue;
@@ -12491,137 +14574,148 @@
     if (pPresentInfo)
     {
         local_pPresentInfo = (VkPresentInfoKHR*)pool->alloc(sizeof(const VkPresentInfoKHR));
-        deepcopy_VkPresentInfoKHR(pool, pPresentInfo, (VkPresentInfoKHR*)(local_pPresentInfo));
+        deepcopy_VkPresentInfoKHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pPresentInfo, (VkPresentInfoKHR*)(local_pPresentInfo));
     }
     if (local_pPresentInfo)
     {
-        transform_tohost_VkPresentInfoKHR(mImpl->resources(), (VkPresentInfoKHR*)(local_pPresentInfo));
+        transform_tohost_VkPresentInfoKHR(sResourceTracker, (VkPresentInfoKHR*)(local_pPresentInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_838;
-        countingStream->handleMapping()->mapHandles_VkQueue_u64(&local_queue, &cgen_var_838, 1);
-        countingStream->write((uint64_t*)&cgen_var_838, 1 * 8);
-        marshal_VkPresentInfoKHR(countingStream, (VkPresentInfoKHR*)(local_pPresentInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkPresentInfoKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPresentInfoKHR*)(local_pPresentInfo), countPtr);
     }
-    uint32_t packetSize_vkQueuePresentKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkQueuePresentKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkQueuePresentKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkQueuePresentKHR = OP_vkQueuePresentKHR;
-    stream->write(&opcode_vkQueuePresentKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkQueuePresentKHR, sizeof(uint32_t));
-    uint64_t cgen_var_839;
-    stream->handleMapping()->mapHandles_VkQueue_u64(&local_queue, &cgen_var_839, 1);
-    stream->write((uint64_t*)&cgen_var_839, 1 * 8);
-    marshal_VkPresentInfoKHR(stream, (VkPresentInfoKHR*)(local_pPresentInfo));
-    AEMU_SCOPED_TRACE("vkQueuePresentKHR readParams");
-    AEMU_SCOPED_TRACE("vkQueuePresentKHR returnUnmarshal");
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkQueuePresentKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkQueuePresentKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkQueue((*&local_queue));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkPresentInfoKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPresentInfoKHR*)(local_pPresentInfo), streamPtrPtr);
     VkResult vkQueuePresentKHR_VkResult_return = (VkResult)0;
     stream->read(&vkQueuePresentKHR_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkQueuePresentKHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkQueuePresentKHR_VkResult_return;
 }
 
 VkResult VkEncoder::vkGetDeviceGroupPresentCapabilitiesKHR(
     VkDevice device,
-    VkDeviceGroupPresentCapabilitiesKHR* pDeviceGroupPresentCapabilities)
+    VkDeviceGroupPresentCapabilitiesKHR* pDeviceGroupPresentCapabilities,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetDeviceGroupPresentCapabilitiesKHR encode");
-    mImpl->log("start vkGetDeviceGroupPresentCapabilitiesKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     local_device = device;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_840;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_840, 1);
-        countingStream->write((uint64_t*)&cgen_var_840, 1 * 8);
-        marshal_VkDeviceGroupPresentCapabilitiesKHR(countingStream, (VkDeviceGroupPresentCapabilitiesKHR*)(pDeviceGroupPresentCapabilities));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkDeviceGroupPresentCapabilitiesKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDeviceGroupPresentCapabilitiesKHR*)(pDeviceGroupPresentCapabilities), countPtr);
     }
-    uint32_t packetSize_vkGetDeviceGroupPresentCapabilitiesKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetDeviceGroupPresentCapabilitiesKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetDeviceGroupPresentCapabilitiesKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetDeviceGroupPresentCapabilitiesKHR = OP_vkGetDeviceGroupPresentCapabilitiesKHR;
-    stream->write(&opcode_vkGetDeviceGroupPresentCapabilitiesKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetDeviceGroupPresentCapabilitiesKHR, sizeof(uint32_t));
-    uint64_t cgen_var_841;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_841, 1);
-    stream->write((uint64_t*)&cgen_var_841, 1 * 8);
-    marshal_VkDeviceGroupPresentCapabilitiesKHR(stream, (VkDeviceGroupPresentCapabilitiesKHR*)(pDeviceGroupPresentCapabilities));
-    AEMU_SCOPED_TRACE("vkGetDeviceGroupPresentCapabilitiesKHR readParams");
-    unmarshal_VkDeviceGroupPresentCapabilitiesKHR(stream, (VkDeviceGroupPresentCapabilitiesKHR*)(pDeviceGroupPresentCapabilities));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetDeviceGroupPresentCapabilitiesKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetDeviceGroupPresentCapabilitiesKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkDeviceGroupPresentCapabilitiesKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDeviceGroupPresentCapabilitiesKHR*)(pDeviceGroupPresentCapabilities), streamPtrPtr);
+    unmarshal_VkDeviceGroupPresentCapabilitiesKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDeviceGroupPresentCapabilitiesKHR*)(pDeviceGroupPresentCapabilities));
     if (pDeviceGroupPresentCapabilities)
     {
-        transform_fromhost_VkDeviceGroupPresentCapabilitiesKHR(mImpl->resources(), (VkDeviceGroupPresentCapabilitiesKHR*)(pDeviceGroupPresentCapabilities));
+        transform_fromhost_VkDeviceGroupPresentCapabilitiesKHR(sResourceTracker, (VkDeviceGroupPresentCapabilitiesKHR*)(pDeviceGroupPresentCapabilities));
     }
-    AEMU_SCOPED_TRACE("vkGetDeviceGroupPresentCapabilitiesKHR returnUnmarshal");
     VkResult vkGetDeviceGroupPresentCapabilitiesKHR_VkResult_return = (VkResult)0;
     stream->read(&vkGetDeviceGroupPresentCapabilitiesKHR_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetDeviceGroupPresentCapabilitiesKHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetDeviceGroupPresentCapabilitiesKHR_VkResult_return;
 }
 
 VkResult VkEncoder::vkGetDeviceGroupSurfacePresentModesKHR(
     VkDevice device,
     VkSurfaceKHR surface,
-    VkDeviceGroupPresentModeFlagsKHR* pModes)
+    VkDeviceGroupPresentModeFlagsKHR* pModes,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetDeviceGroupSurfacePresentModesKHR encode");
-    mImpl->log("start vkGetDeviceGroupSurfacePresentModesKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkSurfaceKHR local_surface;
     local_device = device;
     local_surface = surface;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_842;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_842, 1);
-        countingStream->write((uint64_t*)&cgen_var_842, 1 * 8);
-        uint64_t cgen_var_843;
-        countingStream->handleMapping()->mapHandles_VkSurfaceKHR_u64(&local_surface, &cgen_var_843, 1);
-        countingStream->write((uint64_t*)&cgen_var_843, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_844 = (uint64_t)(uintptr_t)pModes;
-        countingStream->putBe64(cgen_var_844);
+        *countPtr += 8;
         if (pModes)
         {
-            countingStream->write((VkDeviceGroupPresentModeFlagsKHR*)pModes, sizeof(VkDeviceGroupPresentModeFlagsKHR));
+            *countPtr += sizeof(VkDeviceGroupPresentModeFlagsKHR);
         }
     }
-    uint32_t packetSize_vkGetDeviceGroupSurfacePresentModesKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetDeviceGroupSurfacePresentModesKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetDeviceGroupSurfacePresentModesKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetDeviceGroupSurfacePresentModesKHR = OP_vkGetDeviceGroupSurfacePresentModesKHR;
-    stream->write(&opcode_vkGetDeviceGroupSurfacePresentModesKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetDeviceGroupSurfacePresentModesKHR, sizeof(uint32_t));
-    uint64_t cgen_var_845;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_845, 1);
-    stream->write((uint64_t*)&cgen_var_845, 1 * 8);
-    uint64_t cgen_var_846;
-    stream->handleMapping()->mapHandles_VkSurfaceKHR_u64(&local_surface, &cgen_var_846, 1);
-    stream->write((uint64_t*)&cgen_var_846, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetDeviceGroupSurfacePresentModesKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetDeviceGroupSurfacePresentModesKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkSurfaceKHR((*&local_surface));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_847 = (uint64_t)(uintptr_t)pModes;
-    stream->putBe64(cgen_var_847);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)pModes;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pModes)
     {
-        stream->write((VkDeviceGroupPresentModeFlagsKHR*)pModes, sizeof(VkDeviceGroupPresentModeFlagsKHR));
+        memcpy(*streamPtrPtr, (VkDeviceGroupPresentModeFlagsKHR*)pModes, sizeof(VkDeviceGroupPresentModeFlagsKHR));
+        *streamPtrPtr += sizeof(VkDeviceGroupPresentModeFlagsKHR);
     }
-    AEMU_SCOPED_TRACE("vkGetDeviceGroupSurfacePresentModesKHR readParams");
     // WARNING PTR CHECK
     VkDeviceGroupPresentModeFlagsKHR* check_pModes;
     check_pModes = (VkDeviceGroupPresentModeFlagsKHR*)(uintptr_t)stream->getBe64();
@@ -12633,13 +14727,15 @@
         }
         stream->read((VkDeviceGroupPresentModeFlagsKHR*)pModes, sizeof(VkDeviceGroupPresentModeFlagsKHR));
     }
-    AEMU_SCOPED_TRACE("vkGetDeviceGroupSurfacePresentModesKHR returnUnmarshal");
     VkResult vkGetDeviceGroupSurfacePresentModesKHR_VkResult_return = (VkResult)0;
     stream->read(&vkGetDeviceGroupSurfacePresentModesKHR_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetDeviceGroupSurfacePresentModesKHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetDeviceGroupSurfacePresentModesKHR_VkResult_return;
 }
 
@@ -12647,75 +14743,82 @@
     VkPhysicalDevice physicalDevice,
     VkSurfaceKHR surface,
     uint32_t* pRectCount,
-    VkRect2D* pRects)
+    VkRect2D* pRects,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetPhysicalDevicePresentRectanglesKHR encode");
-    mImpl->log("start vkGetPhysicalDevicePresentRectanglesKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     VkSurfaceKHR local_surface;
     local_physicalDevice = physicalDevice;
     local_surface = surface;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_849;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_849, 1);
-        countingStream->write((uint64_t*)&cgen_var_849, 1 * 8);
-        uint64_t cgen_var_850;
-        countingStream->handleMapping()->mapHandles_VkSurfaceKHR_u64(&local_surface, &cgen_var_850, 1);
-        countingStream->write((uint64_t*)&cgen_var_850, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_851 = (uint64_t)(uintptr_t)pRectCount;
-        countingStream->putBe64(cgen_var_851);
+        *countPtr += 8;
         if (pRectCount)
         {
-            countingStream->write((uint32_t*)pRectCount, sizeof(uint32_t));
+            *countPtr += sizeof(uint32_t);
         }
         // WARNING PTR CHECK
-        uint64_t cgen_var_852 = (uint64_t)(uintptr_t)pRects;
-        countingStream->putBe64(cgen_var_852);
+        *countPtr += 8;
         if (pRects)
         {
-            for (uint32_t i = 0; i < (uint32_t)(*(pRectCount)); ++i)
+            if (pRectCount)
             {
-                marshal_VkRect2D(countingStream, (VkRect2D*)(pRects + i));
+                for (uint32_t i = 0; i < (uint32_t)(*(pRectCount)); ++i)
+                {
+                    count_VkRect2D(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkRect2D*)(pRects + i), countPtr);
+                }
             }
         }
     }
-    uint32_t packetSize_vkGetPhysicalDevicePresentRectanglesKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetPhysicalDevicePresentRectanglesKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPhysicalDevicePresentRectanglesKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetPhysicalDevicePresentRectanglesKHR = OP_vkGetPhysicalDevicePresentRectanglesKHR;
-    stream->write(&opcode_vkGetPhysicalDevicePresentRectanglesKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetPhysicalDevicePresentRectanglesKHR, sizeof(uint32_t));
-    uint64_t cgen_var_853;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_853, 1);
-    stream->write((uint64_t*)&cgen_var_853, 1 * 8);
-    uint64_t cgen_var_854;
-    stream->handleMapping()->mapHandles_VkSurfaceKHR_u64(&local_surface, &cgen_var_854, 1);
-    stream->write((uint64_t*)&cgen_var_854, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPhysicalDevicePresentRectanglesKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPhysicalDevicePresentRectanglesKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkSurfaceKHR((*&local_surface));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_855 = (uint64_t)(uintptr_t)pRectCount;
-    stream->putBe64(cgen_var_855);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)pRectCount;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pRectCount)
     {
-        stream->write((uint32_t*)pRectCount, sizeof(uint32_t));
+        memcpy(*streamPtrPtr, (uint32_t*)pRectCount, sizeof(uint32_t));
+        *streamPtrPtr += sizeof(uint32_t);
     }
     // WARNING PTR CHECK
-    uint64_t cgen_var_856 = (uint64_t)(uintptr_t)pRects;
-    stream->putBe64(cgen_var_856);
+    uint64_t cgen_var_3 = (uint64_t)(uintptr_t)pRects;
+    memcpy((*streamPtrPtr), &cgen_var_3, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pRects)
     {
         for (uint32_t i = 0; i < (uint32_t)(*(pRectCount)); ++i)
         {
-            marshal_VkRect2D(stream, (VkRect2D*)(pRects + i));
+            reservedmarshal_VkRect2D(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkRect2D*)(pRects + i), streamPtrPtr);
         }
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDevicePresentRectanglesKHR readParams");
     // WARNING PTR CHECK
     uint32_t* check_pRectCount;
     check_pRectCount = (uint32_t*)(uintptr_t)stream->getBe64();
@@ -12736,41 +14839,47 @@
         {
             fprintf(stderr, "fatal: pRects inconsistent between guest and host\n");
         }
-        for (uint32_t i = 0; i < (uint32_t)(*(pRectCount)); ++i)
+        if (pRectCount)
         {
-            unmarshal_VkRect2D(stream, (VkRect2D*)(pRects + i));
+            for (uint32_t i = 0; i < (uint32_t)(*(pRectCount)); ++i)
+            {
+                unmarshal_VkRect2D(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkRect2D*)(pRects + i));
+            }
         }
     }
-    if (pRects)
+    if (pRectCount)
     {
-        for (uint32_t i = 0; i < (uint32_t)(*(pRectCount)); ++i)
+        if (pRects)
         {
-            transform_fromhost_VkRect2D(mImpl->resources(), (VkRect2D*)(pRects + i));
+            for (uint32_t i = 0; i < (uint32_t)(*(pRectCount)); ++i)
+            {
+                transform_fromhost_VkRect2D(sResourceTracker, (VkRect2D*)(pRects + i));
+            }
         }
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDevicePresentRectanglesKHR returnUnmarshal");
     VkResult vkGetPhysicalDevicePresentRectanglesKHR_VkResult_return = (VkResult)0;
     stream->read(&vkGetPhysicalDevicePresentRectanglesKHR_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetPhysicalDevicePresentRectanglesKHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetPhysicalDevicePresentRectanglesKHR_VkResult_return;
 }
 
 VkResult VkEncoder::vkAcquireNextImage2KHR(
     VkDevice device,
     const VkAcquireNextImageInfoKHR* pAcquireInfo,
-    uint32_t* pImageIndex)
+    uint32_t* pImageIndex,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkAcquireNextImage2KHR encode");
-    mImpl->log("start vkAcquireNextImage2KHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkAcquireNextImageInfoKHR* local_pAcquireInfo;
     local_device = device;
@@ -12778,39 +14887,45 @@
     if (pAcquireInfo)
     {
         local_pAcquireInfo = (VkAcquireNextImageInfoKHR*)pool->alloc(sizeof(const VkAcquireNextImageInfoKHR));
-        deepcopy_VkAcquireNextImageInfoKHR(pool, pAcquireInfo, (VkAcquireNextImageInfoKHR*)(local_pAcquireInfo));
+        deepcopy_VkAcquireNextImageInfoKHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAcquireInfo, (VkAcquireNextImageInfoKHR*)(local_pAcquireInfo));
     }
     if (local_pAcquireInfo)
     {
-        transform_tohost_VkAcquireNextImageInfoKHR(mImpl->resources(), (VkAcquireNextImageInfoKHR*)(local_pAcquireInfo));
+        transform_tohost_VkAcquireNextImageInfoKHR(sResourceTracker, (VkAcquireNextImageInfoKHR*)(local_pAcquireInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_859;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_859, 1);
-        countingStream->write((uint64_t*)&cgen_var_859, 1 * 8);
-        marshal_VkAcquireNextImageInfoKHR(countingStream, (VkAcquireNextImageInfoKHR*)(local_pAcquireInfo));
-        countingStream->write((uint32_t*)pImageIndex, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkAcquireNextImageInfoKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAcquireNextImageInfoKHR*)(local_pAcquireInfo), countPtr);
+        *countPtr += sizeof(uint32_t);
     }
-    uint32_t packetSize_vkAcquireNextImage2KHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkAcquireNextImage2KHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkAcquireNextImage2KHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkAcquireNextImage2KHR = OP_vkAcquireNextImage2KHR;
-    stream->write(&opcode_vkAcquireNextImage2KHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkAcquireNextImage2KHR, sizeof(uint32_t));
-    uint64_t cgen_var_860;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_860, 1);
-    stream->write((uint64_t*)&cgen_var_860, 1 * 8);
-    marshal_VkAcquireNextImageInfoKHR(stream, (VkAcquireNextImageInfoKHR*)(local_pAcquireInfo));
-    stream->write((uint32_t*)pImageIndex, sizeof(uint32_t));
-    AEMU_SCOPED_TRACE("vkAcquireNextImage2KHR readParams");
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkAcquireNextImage2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkAcquireNextImage2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkAcquireNextImageInfoKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAcquireNextImageInfoKHR*)(local_pAcquireInfo), streamPtrPtr);
+    memcpy(*streamPtrPtr, (uint32_t*)pImageIndex, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
     stream->read((uint32_t*)pImageIndex, sizeof(uint32_t));
-    AEMU_SCOPED_TRACE("vkAcquireNextImage2KHR returnUnmarshal");
     VkResult vkAcquireNextImage2KHR_VkResult_return = (VkResult)0;
     stream->read(&vkAcquireNextImage2KHR_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkAcquireNextImage2KHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkAcquireNextImage2KHR_VkResult_return;
 }
 
@@ -12819,67 +14934,74 @@
 VkResult VkEncoder::vkGetPhysicalDeviceDisplayPropertiesKHR(
     VkPhysicalDevice physicalDevice,
     uint32_t* pPropertyCount,
-    VkDisplayPropertiesKHR* pProperties)
+    VkDisplayPropertiesKHR* pProperties,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceDisplayPropertiesKHR encode");
-    mImpl->log("start vkGetPhysicalDeviceDisplayPropertiesKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     local_physicalDevice = physicalDevice;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_861;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_861, 1);
-        countingStream->write((uint64_t*)&cgen_var_861, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_862 = (uint64_t)(uintptr_t)pPropertyCount;
-        countingStream->putBe64(cgen_var_862);
+        *countPtr += 8;
         if (pPropertyCount)
         {
-            countingStream->write((uint32_t*)pPropertyCount, sizeof(uint32_t));
+            *countPtr += sizeof(uint32_t);
         }
         // WARNING PTR CHECK
-        uint64_t cgen_var_863 = (uint64_t)(uintptr_t)pProperties;
-        countingStream->putBe64(cgen_var_863);
+        *countPtr += 8;
         if (pProperties)
         {
-            for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+            if (pPropertyCount)
             {
-                marshal_VkDisplayPropertiesKHR(countingStream, (VkDisplayPropertiesKHR*)(pProperties + i));
+                for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+                {
+                    count_VkDisplayPropertiesKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDisplayPropertiesKHR*)(pProperties + i), countPtr);
+                }
             }
         }
     }
-    uint32_t packetSize_vkGetPhysicalDeviceDisplayPropertiesKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetPhysicalDeviceDisplayPropertiesKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPhysicalDeviceDisplayPropertiesKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetPhysicalDeviceDisplayPropertiesKHR = OP_vkGetPhysicalDeviceDisplayPropertiesKHR;
-    stream->write(&opcode_vkGetPhysicalDeviceDisplayPropertiesKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetPhysicalDeviceDisplayPropertiesKHR, sizeof(uint32_t));
-    uint64_t cgen_var_864;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_864, 1);
-    stream->write((uint64_t*)&cgen_var_864, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPhysicalDeviceDisplayPropertiesKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPhysicalDeviceDisplayPropertiesKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_865 = (uint64_t)(uintptr_t)pPropertyCount;
-    stream->putBe64(cgen_var_865);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)pPropertyCount;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pPropertyCount)
     {
-        stream->write((uint32_t*)pPropertyCount, sizeof(uint32_t));
+        memcpy(*streamPtrPtr, (uint32_t*)pPropertyCount, sizeof(uint32_t));
+        *streamPtrPtr += sizeof(uint32_t);
     }
     // WARNING PTR CHECK
-    uint64_t cgen_var_866 = (uint64_t)(uintptr_t)pProperties;
-    stream->putBe64(cgen_var_866);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)pProperties;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pProperties)
     {
         for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
         {
-            marshal_VkDisplayPropertiesKHR(stream, (VkDisplayPropertiesKHR*)(pProperties + i));
+            reservedmarshal_VkDisplayPropertiesKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDisplayPropertiesKHR*)(pProperties + i), streamPtrPtr);
         }
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceDisplayPropertiesKHR readParams");
     // WARNING PTR CHECK
     uint32_t* check_pPropertyCount;
     check_pPropertyCount = (uint32_t*)(uintptr_t)stream->getBe64();
@@ -12900,92 +15022,107 @@
         {
             fprintf(stderr, "fatal: pProperties inconsistent between guest and host\n");
         }
-        for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+        if (pPropertyCount)
         {
-            unmarshal_VkDisplayPropertiesKHR(stream, (VkDisplayPropertiesKHR*)(pProperties + i));
+            for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+            {
+                unmarshal_VkDisplayPropertiesKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDisplayPropertiesKHR*)(pProperties + i));
+            }
         }
     }
-    if (pProperties)
+    if (pPropertyCount)
     {
-        for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+        if (pProperties)
         {
-            transform_fromhost_VkDisplayPropertiesKHR(mImpl->resources(), (VkDisplayPropertiesKHR*)(pProperties + i));
+            for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+            {
+                transform_fromhost_VkDisplayPropertiesKHR(sResourceTracker, (VkDisplayPropertiesKHR*)(pProperties + i));
+            }
         }
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceDisplayPropertiesKHR returnUnmarshal");
     VkResult vkGetPhysicalDeviceDisplayPropertiesKHR_VkResult_return = (VkResult)0;
     stream->read(&vkGetPhysicalDeviceDisplayPropertiesKHR_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetPhysicalDeviceDisplayPropertiesKHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetPhysicalDeviceDisplayPropertiesKHR_VkResult_return;
 }
 
 VkResult VkEncoder::vkGetPhysicalDeviceDisplayPlanePropertiesKHR(
     VkPhysicalDevice physicalDevice,
     uint32_t* pPropertyCount,
-    VkDisplayPlanePropertiesKHR* pProperties)
+    VkDisplayPlanePropertiesKHR* pProperties,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceDisplayPlanePropertiesKHR encode");
-    mImpl->log("start vkGetPhysicalDeviceDisplayPlanePropertiesKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     local_physicalDevice = physicalDevice;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_869;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_869, 1);
-        countingStream->write((uint64_t*)&cgen_var_869, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_870 = (uint64_t)(uintptr_t)pPropertyCount;
-        countingStream->putBe64(cgen_var_870);
+        *countPtr += 8;
         if (pPropertyCount)
         {
-            countingStream->write((uint32_t*)pPropertyCount, sizeof(uint32_t));
+            *countPtr += sizeof(uint32_t);
         }
         // WARNING PTR CHECK
-        uint64_t cgen_var_871 = (uint64_t)(uintptr_t)pProperties;
-        countingStream->putBe64(cgen_var_871);
+        *countPtr += 8;
         if (pProperties)
         {
-            for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+            if (pPropertyCount)
             {
-                marshal_VkDisplayPlanePropertiesKHR(countingStream, (VkDisplayPlanePropertiesKHR*)(pProperties + i));
+                for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+                {
+                    count_VkDisplayPlanePropertiesKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDisplayPlanePropertiesKHR*)(pProperties + i), countPtr);
+                }
             }
         }
     }
-    uint32_t packetSize_vkGetPhysicalDeviceDisplayPlanePropertiesKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetPhysicalDeviceDisplayPlanePropertiesKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPhysicalDeviceDisplayPlanePropertiesKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetPhysicalDeviceDisplayPlanePropertiesKHR = OP_vkGetPhysicalDeviceDisplayPlanePropertiesKHR;
-    stream->write(&opcode_vkGetPhysicalDeviceDisplayPlanePropertiesKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetPhysicalDeviceDisplayPlanePropertiesKHR, sizeof(uint32_t));
-    uint64_t cgen_var_872;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_872, 1);
-    stream->write((uint64_t*)&cgen_var_872, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPhysicalDeviceDisplayPlanePropertiesKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPhysicalDeviceDisplayPlanePropertiesKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_873 = (uint64_t)(uintptr_t)pPropertyCount;
-    stream->putBe64(cgen_var_873);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)pPropertyCount;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pPropertyCount)
     {
-        stream->write((uint32_t*)pPropertyCount, sizeof(uint32_t));
+        memcpy(*streamPtrPtr, (uint32_t*)pPropertyCount, sizeof(uint32_t));
+        *streamPtrPtr += sizeof(uint32_t);
     }
     // WARNING PTR CHECK
-    uint64_t cgen_var_874 = (uint64_t)(uintptr_t)pProperties;
-    stream->putBe64(cgen_var_874);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)pProperties;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pProperties)
     {
         for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
         {
-            marshal_VkDisplayPlanePropertiesKHR(stream, (VkDisplayPlanePropertiesKHR*)(pProperties + i));
+            reservedmarshal_VkDisplayPlanePropertiesKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDisplayPlanePropertiesKHR*)(pProperties + i), streamPtrPtr);
         }
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceDisplayPlanePropertiesKHR readParams");
     // WARNING PTR CHECK
     uint32_t* check_pPropertyCount;
     check_pPropertyCount = (uint32_t*)(uintptr_t)stream->getBe64();
@@ -13006,25 +15143,33 @@
         {
             fprintf(stderr, "fatal: pProperties inconsistent between guest and host\n");
         }
-        for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+        if (pPropertyCount)
         {
-            unmarshal_VkDisplayPlanePropertiesKHR(stream, (VkDisplayPlanePropertiesKHR*)(pProperties + i));
+            for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+            {
+                unmarshal_VkDisplayPlanePropertiesKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDisplayPlanePropertiesKHR*)(pProperties + i));
+            }
         }
     }
-    if (pProperties)
+    if (pPropertyCount)
     {
-        for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+        if (pProperties)
         {
-            transform_fromhost_VkDisplayPlanePropertiesKHR(mImpl->resources(), (VkDisplayPlanePropertiesKHR*)(pProperties + i));
+            for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+            {
+                transform_fromhost_VkDisplayPlanePropertiesKHR(sResourceTracker, (VkDisplayPlanePropertiesKHR*)(pProperties + i));
+            }
         }
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceDisplayPlanePropertiesKHR returnUnmarshal");
     VkResult vkGetPhysicalDeviceDisplayPlanePropertiesKHR_VkResult_return = (VkResult)0;
     stream->read(&vkGetPhysicalDeviceDisplayPlanePropertiesKHR_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetPhysicalDeviceDisplayPlanePropertiesKHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetPhysicalDeviceDisplayPlanePropertiesKHR_VkResult_return;
 }
 
@@ -13032,79 +15177,87 @@
     VkPhysicalDevice physicalDevice,
     uint32_t planeIndex,
     uint32_t* pDisplayCount,
-    VkDisplayKHR* pDisplays)
+    VkDisplayKHR* pDisplays,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetDisplayPlaneSupportedDisplaysKHR encode");
-    mImpl->log("start vkGetDisplayPlaneSupportedDisplaysKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     uint32_t local_planeIndex;
     local_physicalDevice = physicalDevice;
     local_planeIndex = planeIndex;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_877;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_877, 1);
-        countingStream->write((uint64_t*)&cgen_var_877, 1 * 8);
-        countingStream->write((uint32_t*)&local_planeIndex, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
         // WARNING PTR CHECK
-        uint64_t cgen_var_878 = (uint64_t)(uintptr_t)pDisplayCount;
-        countingStream->putBe64(cgen_var_878);
+        *countPtr += 8;
         if (pDisplayCount)
         {
-            countingStream->write((uint32_t*)pDisplayCount, sizeof(uint32_t));
+            *countPtr += sizeof(uint32_t);
         }
         // WARNING PTR CHECK
-        uint64_t cgen_var_879 = (uint64_t)(uintptr_t)pDisplays;
-        countingStream->putBe64(cgen_var_879);
+        *countPtr += 8;
         if (pDisplays)
         {
             if ((*(pDisplayCount)))
             {
-                uint64_t* cgen_var_880;
-                countingStream->alloc((void**)&cgen_var_880, (*(pDisplayCount)) * 8);
-                countingStream->handleMapping()->mapHandles_VkDisplayKHR_u64(pDisplays, cgen_var_880, (*(pDisplayCount)));
-                countingStream->write((uint64_t*)cgen_var_880, (*(pDisplayCount)) * 8);
+                *countPtr += (*(pDisplayCount)) * 8;
             }
         }
     }
-    uint32_t packetSize_vkGetDisplayPlaneSupportedDisplaysKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetDisplayPlaneSupportedDisplaysKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetDisplayPlaneSupportedDisplaysKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetDisplayPlaneSupportedDisplaysKHR = OP_vkGetDisplayPlaneSupportedDisplaysKHR;
-    stream->write(&opcode_vkGetDisplayPlaneSupportedDisplaysKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetDisplayPlaneSupportedDisplaysKHR, sizeof(uint32_t));
-    uint64_t cgen_var_881;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_881, 1);
-    stream->write((uint64_t*)&cgen_var_881, 1 * 8);
-    stream->write((uint32_t*)&local_planeIndex, sizeof(uint32_t));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetDisplayPlaneSupportedDisplaysKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetDisplayPlaneSupportedDisplaysKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_planeIndex, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
     // WARNING PTR CHECK
-    uint64_t cgen_var_882 = (uint64_t)(uintptr_t)pDisplayCount;
-    stream->putBe64(cgen_var_882);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)pDisplayCount;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pDisplayCount)
     {
-        stream->write((uint32_t*)pDisplayCount, sizeof(uint32_t));
+        memcpy(*streamPtrPtr, (uint32_t*)pDisplayCount, sizeof(uint32_t));
+        *streamPtrPtr += sizeof(uint32_t);
     }
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
+    /* is handle, possibly out */;
     // WARNING PTR CHECK
-    uint64_t cgen_var_883 = (uint64_t)(uintptr_t)pDisplays;
-    stream->putBe64(cgen_var_883);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)pDisplays;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pDisplays)
     {
         if ((*(pDisplayCount)))
         {
-            uint64_t* cgen_var_884;
-            stream->alloc((void**)&cgen_var_884, (*(pDisplayCount)) * 8);
-            stream->handleMapping()->mapHandles_VkDisplayKHR_u64(pDisplays, cgen_var_884, (*(pDisplayCount)));
-            stream->write((uint64_t*)cgen_var_884, (*(pDisplayCount)) * 8);
+            uint8_t* cgen_var_2_0_ptr = (uint8_t*)(*streamPtrPtr);
+            if (pDisplayCount)
+            {
+                for (uint32_t k = 0; k < (*(pDisplayCount)); ++k)
+                {
+                    uint64_t tmpval = (uint64_t)(pDisplays[k]);
+                    memcpy(cgen_var_2_0_ptr + k * 8, &tmpval, sizeof(uint64_t));
+                }
+            }
+            *streamPtrPtr += 8 * (*(pDisplayCount));
         }
     }
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkGetDisplayPlaneSupportedDisplaysKHR readParams");
+    /* is handle, possibly out */;
     // WARNING PTR CHECK
     uint32_t* check_pDisplayCount;
     check_pDisplayCount = (uint32_t*)(uintptr_t)stream->getBe64();
@@ -13127,19 +15280,21 @@
         }
         if ((*(pDisplayCount)))
         {
-            uint64_t* cgen_var_887;
-            stream->alloc((void**)&cgen_var_887, (*(pDisplayCount)) * 8);
-            stream->read((uint64_t*)cgen_var_887, (*(pDisplayCount)) * 8);
-            stream->handleMapping()->mapHandles_u64_VkDisplayKHR(cgen_var_887, (VkDisplayKHR*)pDisplays, (*(pDisplayCount)));
+            uint64_t* cgen_var_4_0;
+            stream->alloc((void**)&cgen_var_4_0, (*(pDisplayCount)) * 8);
+            stream->read((uint64_t*)cgen_var_4_0, (*(pDisplayCount)) * 8);
+            stream->handleMapping()->mapHandles_u64_VkDisplayKHR(cgen_var_4_0, (VkDisplayKHR*)pDisplays, (*(pDisplayCount)));
         }
     }
-    AEMU_SCOPED_TRACE("vkGetDisplayPlaneSupportedDisplaysKHR returnUnmarshal");
     VkResult vkGetDisplayPlaneSupportedDisplaysKHR_VkResult_return = (VkResult)0;
     stream->read(&vkGetDisplayPlaneSupportedDisplaysKHR_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetDisplayPlaneSupportedDisplaysKHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetDisplayPlaneSupportedDisplaysKHR_VkResult_return;
 }
 
@@ -13147,75 +15302,82 @@
     VkPhysicalDevice physicalDevice,
     VkDisplayKHR display,
     uint32_t* pPropertyCount,
-    VkDisplayModePropertiesKHR* pProperties)
+    VkDisplayModePropertiesKHR* pProperties,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetDisplayModePropertiesKHR encode");
-    mImpl->log("start vkGetDisplayModePropertiesKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     VkDisplayKHR local_display;
     local_physicalDevice = physicalDevice;
     local_display = display;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_888;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_888, 1);
-        countingStream->write((uint64_t*)&cgen_var_888, 1 * 8);
-        uint64_t cgen_var_889;
-        countingStream->handleMapping()->mapHandles_VkDisplayKHR_u64(&local_display, &cgen_var_889, 1);
-        countingStream->write((uint64_t*)&cgen_var_889, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_890 = (uint64_t)(uintptr_t)pPropertyCount;
-        countingStream->putBe64(cgen_var_890);
+        *countPtr += 8;
         if (pPropertyCount)
         {
-            countingStream->write((uint32_t*)pPropertyCount, sizeof(uint32_t));
+            *countPtr += sizeof(uint32_t);
         }
         // WARNING PTR CHECK
-        uint64_t cgen_var_891 = (uint64_t)(uintptr_t)pProperties;
-        countingStream->putBe64(cgen_var_891);
+        *countPtr += 8;
         if (pProperties)
         {
-            for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+            if (pPropertyCount)
             {
-                marshal_VkDisplayModePropertiesKHR(countingStream, (VkDisplayModePropertiesKHR*)(pProperties + i));
+                for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+                {
+                    count_VkDisplayModePropertiesKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDisplayModePropertiesKHR*)(pProperties + i), countPtr);
+                }
             }
         }
     }
-    uint32_t packetSize_vkGetDisplayModePropertiesKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetDisplayModePropertiesKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetDisplayModePropertiesKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetDisplayModePropertiesKHR = OP_vkGetDisplayModePropertiesKHR;
-    stream->write(&opcode_vkGetDisplayModePropertiesKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetDisplayModePropertiesKHR, sizeof(uint32_t));
-    uint64_t cgen_var_892;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_892, 1);
-    stream->write((uint64_t*)&cgen_var_892, 1 * 8);
-    uint64_t cgen_var_893;
-    stream->handleMapping()->mapHandles_VkDisplayKHR_u64(&local_display, &cgen_var_893, 1);
-    stream->write((uint64_t*)&cgen_var_893, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetDisplayModePropertiesKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetDisplayModePropertiesKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkDisplayKHR((*&local_display));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_894 = (uint64_t)(uintptr_t)pPropertyCount;
-    stream->putBe64(cgen_var_894);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)pPropertyCount;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pPropertyCount)
     {
-        stream->write((uint32_t*)pPropertyCount, sizeof(uint32_t));
+        memcpy(*streamPtrPtr, (uint32_t*)pPropertyCount, sizeof(uint32_t));
+        *streamPtrPtr += sizeof(uint32_t);
     }
     // WARNING PTR CHECK
-    uint64_t cgen_var_895 = (uint64_t)(uintptr_t)pProperties;
-    stream->putBe64(cgen_var_895);
+    uint64_t cgen_var_3 = (uint64_t)(uintptr_t)pProperties;
+    memcpy((*streamPtrPtr), &cgen_var_3, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pProperties)
     {
         for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
         {
-            marshal_VkDisplayModePropertiesKHR(stream, (VkDisplayModePropertiesKHR*)(pProperties + i));
+            reservedmarshal_VkDisplayModePropertiesKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDisplayModePropertiesKHR*)(pProperties + i), streamPtrPtr);
         }
     }
-    AEMU_SCOPED_TRACE("vkGetDisplayModePropertiesKHR readParams");
     // WARNING PTR CHECK
     uint32_t* check_pPropertyCount;
     check_pPropertyCount = (uint32_t*)(uintptr_t)stream->getBe64();
@@ -13236,25 +15398,33 @@
         {
             fprintf(stderr, "fatal: pProperties inconsistent between guest and host\n");
         }
-        for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+        if (pPropertyCount)
         {
-            unmarshal_VkDisplayModePropertiesKHR(stream, (VkDisplayModePropertiesKHR*)(pProperties + i));
+            for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+            {
+                unmarshal_VkDisplayModePropertiesKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDisplayModePropertiesKHR*)(pProperties + i));
+            }
         }
     }
-    if (pProperties)
+    if (pPropertyCount)
     {
-        for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+        if (pProperties)
         {
-            transform_fromhost_VkDisplayModePropertiesKHR(mImpl->resources(), (VkDisplayModePropertiesKHR*)(pProperties + i));
+            for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+            {
+                transform_fromhost_VkDisplayModePropertiesKHR(sResourceTracker, (VkDisplayModePropertiesKHR*)(pProperties + i));
+            }
         }
     }
-    AEMU_SCOPED_TRACE("vkGetDisplayModePropertiesKHR returnUnmarshal");
     VkResult vkGetDisplayModePropertiesKHR_VkResult_return = (VkResult)0;
     stream->read(&vkGetDisplayModePropertiesKHR_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetDisplayModePropertiesKHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetDisplayModePropertiesKHR_VkResult_return;
 }
 
@@ -13263,16 +15433,14 @@
     VkDisplayKHR display,
     const VkDisplayModeCreateInfoKHR* pCreateInfo,
     const VkAllocationCallbacks* pAllocator,
-    VkDisplayModeKHR* pMode)
+    VkDisplayModeKHR* pMode,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCreateDisplayModeKHR encode");
-    mImpl->log("start vkCreateDisplayModeKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     VkDisplayKHR local_display;
     VkDisplayModeCreateInfoKHR* local_pCreateInfo;
@@ -13283,80 +15451,86 @@
     if (pCreateInfo)
     {
         local_pCreateInfo = (VkDisplayModeCreateInfoKHR*)pool->alloc(sizeof(const VkDisplayModeCreateInfoKHR));
-        deepcopy_VkDisplayModeCreateInfoKHR(pool, pCreateInfo, (VkDisplayModeCreateInfoKHR*)(local_pCreateInfo));
+        deepcopy_VkDisplayModeCreateInfoKHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, (VkDisplayModeCreateInfoKHR*)(local_pCreateInfo));
     }
     local_pAllocator = nullptr;
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pCreateInfo)
     {
-        transform_tohost_VkDisplayModeCreateInfoKHR(mImpl->resources(), (VkDisplayModeCreateInfoKHR*)(local_pCreateInfo));
+        transform_tohost_VkDisplayModeCreateInfoKHR(sResourceTracker, (VkDisplayModeCreateInfoKHR*)(local_pCreateInfo));
     }
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_898;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_898, 1);
-        countingStream->write((uint64_t*)&cgen_var_898, 1 * 8);
-        uint64_t cgen_var_899;
-        countingStream->handleMapping()->mapHandles_VkDisplayKHR_u64(&local_display, &cgen_var_899, 1);
-        countingStream->write((uint64_t*)&cgen_var_899, 1 * 8);
-        marshal_VkDisplayModeCreateInfoKHR(countingStream, (VkDisplayModeCreateInfoKHR*)(local_pCreateInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        count_VkDisplayModeCreateInfoKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDisplayModeCreateInfoKHR*)(local_pCreateInfo), countPtr);
         // WARNING PTR CHECK
-        uint64_t cgen_var_900 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_900);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
-        uint64_t cgen_var_901;
-        countingStream->handleMapping()->mapHandles_VkDisplayModeKHR_u64(pMode, &cgen_var_901, 1);
-        countingStream->write((uint64_t*)&cgen_var_901, 8);
+        uint64_t cgen_var_2;
+        *countPtr += 8;
     }
-    uint32_t packetSize_vkCreateDisplayModeKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCreateDisplayModeKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreateDisplayModeKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCreateDisplayModeKHR = OP_vkCreateDisplayModeKHR;
-    stream->write(&opcode_vkCreateDisplayModeKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkCreateDisplayModeKHR, sizeof(uint32_t));
-    uint64_t cgen_var_902;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_902, 1);
-    stream->write((uint64_t*)&cgen_var_902, 1 * 8);
-    uint64_t cgen_var_903;
-    stream->handleMapping()->mapHandles_VkDisplayKHR_u64(&local_display, &cgen_var_903, 1);
-    stream->write((uint64_t*)&cgen_var_903, 1 * 8);
-    marshal_VkDisplayModeCreateInfoKHR(stream, (VkDisplayModeCreateInfoKHR*)(local_pCreateInfo));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreateDisplayModeKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreateDisplayModeKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkDisplayKHR((*&local_display));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkDisplayModeCreateInfoKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDisplayModeCreateInfoKHR*)(local_pCreateInfo), streamPtrPtr);
     // WARNING PTR CHECK
-    uint64_t cgen_var_904 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_904);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
-    uint64_t cgen_var_905;
-    stream->handleMapping()->mapHandles_VkDisplayModeKHR_u64(pMode, &cgen_var_905, 1);
-    stream->write((uint64_t*)&cgen_var_905, 8);
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkCreateDisplayModeKHR readParams");
-    stream->setHandleMapping(resources->createMapping());
-    uint64_t cgen_var_906;
-    stream->read((uint64_t*)&cgen_var_906, 8);
-    stream->handleMapping()->mapHandles_u64_VkDisplayModeKHR(&cgen_var_906, (VkDisplayModeKHR*)pMode, 1);
+    /* is handle, possibly out */;
+    uint64_t cgen_var_3;
+    *&cgen_var_3 = (uint64_t)((*pMode));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_3, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    stream->setHandleMapping(sResourceTracker->createMapping());
+    uint64_t cgen_var_4;
+    stream->read((uint64_t*)&cgen_var_4, 8);
+    stream->handleMapping()->mapHandles_u64_VkDisplayModeKHR(&cgen_var_4, (VkDisplayModeKHR*)pMode, 1);
     stream->unsetHandleMapping();
-    AEMU_SCOPED_TRACE("vkCreateDisplayModeKHR returnUnmarshal");
     VkResult vkCreateDisplayModeKHR_VkResult_return = (VkResult)0;
     stream->read(&vkCreateDisplayModeKHR_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkCreateDisplayModeKHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkCreateDisplayModeKHR_VkResult_return;
 }
 
@@ -13364,59 +15538,63 @@
     VkPhysicalDevice physicalDevice,
     VkDisplayModeKHR mode,
     uint32_t planeIndex,
-    VkDisplayPlaneCapabilitiesKHR* pCapabilities)
+    VkDisplayPlaneCapabilitiesKHR* pCapabilities,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetDisplayPlaneCapabilitiesKHR encode");
-    mImpl->log("start vkGetDisplayPlaneCapabilitiesKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     VkDisplayModeKHR local_mode;
     uint32_t local_planeIndex;
     local_physicalDevice = physicalDevice;
     local_mode = mode;
     local_planeIndex = planeIndex;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_907;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_907, 1);
-        countingStream->write((uint64_t*)&cgen_var_907, 1 * 8);
-        uint64_t cgen_var_908;
-        countingStream->handleMapping()->mapHandles_VkDisplayModeKHR_u64(&local_mode, &cgen_var_908, 1);
-        countingStream->write((uint64_t*)&cgen_var_908, 1 * 8);
-        countingStream->write((uint32_t*)&local_planeIndex, sizeof(uint32_t));
-        marshal_VkDisplayPlaneCapabilitiesKHR(countingStream, (VkDisplayPlaneCapabilitiesKHR*)(pCapabilities));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        count_VkDisplayPlaneCapabilitiesKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDisplayPlaneCapabilitiesKHR*)(pCapabilities), countPtr);
     }
-    uint32_t packetSize_vkGetDisplayPlaneCapabilitiesKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetDisplayPlaneCapabilitiesKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetDisplayPlaneCapabilitiesKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetDisplayPlaneCapabilitiesKHR = OP_vkGetDisplayPlaneCapabilitiesKHR;
-    stream->write(&opcode_vkGetDisplayPlaneCapabilitiesKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetDisplayPlaneCapabilitiesKHR, sizeof(uint32_t));
-    uint64_t cgen_var_909;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_909, 1);
-    stream->write((uint64_t*)&cgen_var_909, 1 * 8);
-    uint64_t cgen_var_910;
-    stream->handleMapping()->mapHandles_VkDisplayModeKHR_u64(&local_mode, &cgen_var_910, 1);
-    stream->write((uint64_t*)&cgen_var_910, 1 * 8);
-    stream->write((uint32_t*)&local_planeIndex, sizeof(uint32_t));
-    marshal_VkDisplayPlaneCapabilitiesKHR(stream, (VkDisplayPlaneCapabilitiesKHR*)(pCapabilities));
-    AEMU_SCOPED_TRACE("vkGetDisplayPlaneCapabilitiesKHR readParams");
-    unmarshal_VkDisplayPlaneCapabilitiesKHR(stream, (VkDisplayPlaneCapabilitiesKHR*)(pCapabilities));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetDisplayPlaneCapabilitiesKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetDisplayPlaneCapabilitiesKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkDisplayModeKHR((*&local_mode));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_planeIndex, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    reservedmarshal_VkDisplayPlaneCapabilitiesKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDisplayPlaneCapabilitiesKHR*)(pCapabilities), streamPtrPtr);
+    unmarshal_VkDisplayPlaneCapabilitiesKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDisplayPlaneCapabilitiesKHR*)(pCapabilities));
     if (pCapabilities)
     {
-        transform_fromhost_VkDisplayPlaneCapabilitiesKHR(mImpl->resources(), (VkDisplayPlaneCapabilitiesKHR*)(pCapabilities));
+        transform_fromhost_VkDisplayPlaneCapabilitiesKHR(sResourceTracker, (VkDisplayPlaneCapabilitiesKHR*)(pCapabilities));
     }
-    AEMU_SCOPED_TRACE("vkGetDisplayPlaneCapabilitiesKHR returnUnmarshal");
     VkResult vkGetDisplayPlaneCapabilitiesKHR_VkResult_return = (VkResult)0;
     stream->read(&vkGetDisplayPlaneCapabilitiesKHR_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetDisplayPlaneCapabilitiesKHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetDisplayPlaneCapabilitiesKHR_VkResult_return;
 }
 
@@ -13424,16 +15602,14 @@
     VkInstance instance,
     const VkDisplaySurfaceCreateInfoKHR* pCreateInfo,
     const VkAllocationCallbacks* pAllocator,
-    VkSurfaceKHR* pSurface)
+    VkSurfaceKHR* pSurface,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCreateDisplayPlaneSurfaceKHR encode");
-    mImpl->log("start vkCreateDisplayPlaneSurfaceKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkInstance local_instance;
     VkDisplaySurfaceCreateInfoKHR* local_pCreateInfo;
     VkAllocationCallbacks* local_pAllocator;
@@ -13442,72 +15618,78 @@
     if (pCreateInfo)
     {
         local_pCreateInfo = (VkDisplaySurfaceCreateInfoKHR*)pool->alloc(sizeof(const VkDisplaySurfaceCreateInfoKHR));
-        deepcopy_VkDisplaySurfaceCreateInfoKHR(pool, pCreateInfo, (VkDisplaySurfaceCreateInfoKHR*)(local_pCreateInfo));
+        deepcopy_VkDisplaySurfaceCreateInfoKHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, (VkDisplaySurfaceCreateInfoKHR*)(local_pCreateInfo));
     }
     local_pAllocator = nullptr;
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pCreateInfo)
     {
-        transform_tohost_VkDisplaySurfaceCreateInfoKHR(mImpl->resources(), (VkDisplaySurfaceCreateInfoKHR*)(local_pCreateInfo));
+        transform_tohost_VkDisplaySurfaceCreateInfoKHR(sResourceTracker, (VkDisplaySurfaceCreateInfoKHR*)(local_pCreateInfo));
     }
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_911;
-        countingStream->handleMapping()->mapHandles_VkInstance_u64(&local_instance, &cgen_var_911, 1);
-        countingStream->write((uint64_t*)&cgen_var_911, 1 * 8);
-        marshal_VkDisplaySurfaceCreateInfoKHR(countingStream, (VkDisplaySurfaceCreateInfoKHR*)(local_pCreateInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkDisplaySurfaceCreateInfoKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDisplaySurfaceCreateInfoKHR*)(local_pCreateInfo), countPtr);
         // WARNING PTR CHECK
-        uint64_t cgen_var_912 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_912);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
-        uint64_t cgen_var_913;
-        countingStream->handleMapping()->mapHandles_VkSurfaceKHR_u64(pSurface, &cgen_var_913, 1);
-        countingStream->write((uint64_t*)&cgen_var_913, 8);
+        uint64_t cgen_var_1;
+        *countPtr += 8;
     }
-    uint32_t packetSize_vkCreateDisplayPlaneSurfaceKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCreateDisplayPlaneSurfaceKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreateDisplayPlaneSurfaceKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCreateDisplayPlaneSurfaceKHR = OP_vkCreateDisplayPlaneSurfaceKHR;
-    stream->write(&opcode_vkCreateDisplayPlaneSurfaceKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkCreateDisplayPlaneSurfaceKHR, sizeof(uint32_t));
-    uint64_t cgen_var_914;
-    stream->handleMapping()->mapHandles_VkInstance_u64(&local_instance, &cgen_var_914, 1);
-    stream->write((uint64_t*)&cgen_var_914, 1 * 8);
-    marshal_VkDisplaySurfaceCreateInfoKHR(stream, (VkDisplaySurfaceCreateInfoKHR*)(local_pCreateInfo));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreateDisplayPlaneSurfaceKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreateDisplayPlaneSurfaceKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkInstance((*&local_instance));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkDisplaySurfaceCreateInfoKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDisplaySurfaceCreateInfoKHR*)(local_pCreateInfo), streamPtrPtr);
     // WARNING PTR CHECK
-    uint64_t cgen_var_915 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_915);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
-    uint64_t cgen_var_916;
-    stream->handleMapping()->mapHandles_VkSurfaceKHR_u64(pSurface, &cgen_var_916, 1);
-    stream->write((uint64_t*)&cgen_var_916, 8);
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkCreateDisplayPlaneSurfaceKHR readParams");
-    uint64_t cgen_var_917;
-    stream->read((uint64_t*)&cgen_var_917, 8);
-    stream->handleMapping()->mapHandles_u64_VkSurfaceKHR(&cgen_var_917, (VkSurfaceKHR*)pSurface, 1);
-    AEMU_SCOPED_TRACE("vkCreateDisplayPlaneSurfaceKHR returnUnmarshal");
+    /* is handle, possibly out */;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = (uint64_t)((*pSurface));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    uint64_t cgen_var_3;
+    stream->read((uint64_t*)&cgen_var_3, 8);
+    stream->handleMapping()->mapHandles_u64_VkSurfaceKHR(&cgen_var_3, (VkSurfaceKHR*)pSurface, 1);
     VkResult vkCreateDisplayPlaneSurfaceKHR_VkResult_return = (VkResult)0;
     stream->read(&vkCreateDisplayPlaneSurfaceKHR_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkCreateDisplayPlaneSurfaceKHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkCreateDisplayPlaneSurfaceKHR_VkResult_return;
 }
 
@@ -13518,16 +15700,14 @@
     uint32_t swapchainCount,
     const VkSwapchainCreateInfoKHR* pCreateInfos,
     const VkAllocationCallbacks* pAllocator,
-    VkSwapchainKHR* pSwapchains)
+    VkSwapchainKHR* pSwapchains,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCreateSharedSwapchainsKHR encode");
-    mImpl->log("start vkCreateSharedSwapchainsKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     uint32_t local_swapchainCount;
     VkSwapchainCreateInfoKHR* local_pCreateInfos;
@@ -13540,96 +15720,103 @@
         local_pCreateInfos = (VkSwapchainCreateInfoKHR*)pool->alloc(((swapchainCount)) * sizeof(const VkSwapchainCreateInfoKHR));
         for (uint32_t i = 0; i < (uint32_t)((swapchainCount)); ++i)
         {
-            deepcopy_VkSwapchainCreateInfoKHR(pool, pCreateInfos + i, (VkSwapchainCreateInfoKHR*)(local_pCreateInfos + i));
+            deepcopy_VkSwapchainCreateInfoKHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfos + i, (VkSwapchainCreateInfoKHR*)(local_pCreateInfos + i));
         }
     }
     local_pAllocator = nullptr;
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pCreateInfos)
     {
         for (uint32_t i = 0; i < (uint32_t)((swapchainCount)); ++i)
         {
-            transform_tohost_VkSwapchainCreateInfoKHR(mImpl->resources(), (VkSwapchainCreateInfoKHR*)(local_pCreateInfos + i));
+            transform_tohost_VkSwapchainCreateInfoKHR(sResourceTracker, (VkSwapchainCreateInfoKHR*)(local_pCreateInfos + i));
         }
     }
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_918;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_918, 1);
-        countingStream->write((uint64_t*)&cgen_var_918, 1 * 8);
-        countingStream->write((uint32_t*)&local_swapchainCount, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
         for (uint32_t i = 0; i < (uint32_t)((swapchainCount)); ++i)
         {
-            marshal_VkSwapchainCreateInfoKHR(countingStream, (VkSwapchainCreateInfoKHR*)(local_pCreateInfos + i));
+            count_VkSwapchainCreateInfoKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSwapchainCreateInfoKHR*)(local_pCreateInfos + i), countPtr);
         }
         // WARNING PTR CHECK
-        uint64_t cgen_var_919 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_919);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
         if (((swapchainCount)))
         {
-            uint64_t* cgen_var_920;
-            countingStream->alloc((void**)&cgen_var_920, ((swapchainCount)) * 8);
-            countingStream->handleMapping()->mapHandles_VkSwapchainKHR_u64(pSwapchains, cgen_var_920, ((swapchainCount)));
-            countingStream->write((uint64_t*)cgen_var_920, ((swapchainCount)) * 8);
+            *countPtr += ((swapchainCount)) * 8;
         }
     }
-    uint32_t packetSize_vkCreateSharedSwapchainsKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCreateSharedSwapchainsKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreateSharedSwapchainsKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCreateSharedSwapchainsKHR = OP_vkCreateSharedSwapchainsKHR;
-    stream->write(&opcode_vkCreateSharedSwapchainsKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkCreateSharedSwapchainsKHR, sizeof(uint32_t));
-    uint64_t cgen_var_921;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_921, 1);
-    stream->write((uint64_t*)&cgen_var_921, 1 * 8);
-    stream->write((uint32_t*)&local_swapchainCount, sizeof(uint32_t));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreateSharedSwapchainsKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreateSharedSwapchainsKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_swapchainCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
     for (uint32_t i = 0; i < (uint32_t)((swapchainCount)); ++i)
     {
-        marshal_VkSwapchainCreateInfoKHR(stream, (VkSwapchainCreateInfoKHR*)(local_pCreateInfos + i));
+        reservedmarshal_VkSwapchainCreateInfoKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSwapchainCreateInfoKHR*)(local_pCreateInfos + i), streamPtrPtr);
     }
     // WARNING PTR CHECK
-    uint64_t cgen_var_922 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_922);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
+    /* is handle, possibly out */;
     if (((swapchainCount)))
     {
-        uint64_t* cgen_var_923;
-        stream->alloc((void**)&cgen_var_923, ((swapchainCount)) * 8);
-        stream->handleMapping()->mapHandles_VkSwapchainKHR_u64(pSwapchains, cgen_var_923, ((swapchainCount)));
-        stream->write((uint64_t*)cgen_var_923, ((swapchainCount)) * 8);
+        uint8_t* cgen_var_2_ptr = (uint8_t*)(*streamPtrPtr);
+        for (uint32_t k = 0; k < ((swapchainCount)); ++k)
+        {
+            uint64_t tmpval = (uint64_t)(pSwapchains[k]);
+            memcpy(cgen_var_2_ptr + k * 8, &tmpval, sizeof(uint64_t));
+        }
+        *streamPtrPtr += 8 * ((swapchainCount));
     }
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkCreateSharedSwapchainsKHR readParams");
+    /* is handle, possibly out */;
     if (((swapchainCount)))
     {
-        uint64_t* cgen_var_924;
-        stream->alloc((void**)&cgen_var_924, ((swapchainCount)) * 8);
-        stream->read((uint64_t*)cgen_var_924, ((swapchainCount)) * 8);
-        stream->handleMapping()->mapHandles_u64_VkSwapchainKHR(cgen_var_924, (VkSwapchainKHR*)pSwapchains, ((swapchainCount)));
+        uint64_t* cgen_var_3;
+        stream->alloc((void**)&cgen_var_3, ((swapchainCount)) * 8);
+        stream->read((uint64_t*)cgen_var_3, ((swapchainCount)) * 8);
+        stream->handleMapping()->mapHandles_u64_VkSwapchainKHR(cgen_var_3, (VkSwapchainKHR*)pSwapchains, ((swapchainCount)));
     }
-    AEMU_SCOPED_TRACE("vkCreateSharedSwapchainsKHR returnUnmarshal");
     VkResult vkCreateSharedSwapchainsKHR_VkResult_return = (VkResult)0;
     stream->read(&vkCreateSharedSwapchainsKHR_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkCreateSharedSwapchainsKHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkCreateSharedSwapchainsKHR_VkResult_return;
 }
 
@@ -13639,16 +15826,14 @@
     VkInstance instance,
     const VkXlibSurfaceCreateInfoKHR* pCreateInfo,
     const VkAllocationCallbacks* pAllocator,
-    VkSurfaceKHR* pSurface)
+    VkSurfaceKHR* pSurface,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCreateXlibSurfaceKHR encode");
-    mImpl->log("start vkCreateXlibSurfaceKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkInstance local_instance;
     VkXlibSurfaceCreateInfoKHR* local_pCreateInfo;
     VkAllocationCallbacks* local_pAllocator;
@@ -13657,72 +15842,78 @@
     if (pCreateInfo)
     {
         local_pCreateInfo = (VkXlibSurfaceCreateInfoKHR*)pool->alloc(sizeof(const VkXlibSurfaceCreateInfoKHR));
-        deepcopy_VkXlibSurfaceCreateInfoKHR(pool, pCreateInfo, (VkXlibSurfaceCreateInfoKHR*)(local_pCreateInfo));
+        deepcopy_VkXlibSurfaceCreateInfoKHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, (VkXlibSurfaceCreateInfoKHR*)(local_pCreateInfo));
     }
     local_pAllocator = nullptr;
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pCreateInfo)
     {
-        transform_tohost_VkXlibSurfaceCreateInfoKHR(mImpl->resources(), (VkXlibSurfaceCreateInfoKHR*)(local_pCreateInfo));
+        transform_tohost_VkXlibSurfaceCreateInfoKHR(sResourceTracker, (VkXlibSurfaceCreateInfoKHR*)(local_pCreateInfo));
     }
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_925;
-        countingStream->handleMapping()->mapHandles_VkInstance_u64(&local_instance, &cgen_var_925, 1);
-        countingStream->write((uint64_t*)&cgen_var_925, 1 * 8);
-        marshal_VkXlibSurfaceCreateInfoKHR(countingStream, (VkXlibSurfaceCreateInfoKHR*)(local_pCreateInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkXlibSurfaceCreateInfoKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkXlibSurfaceCreateInfoKHR*)(local_pCreateInfo), countPtr);
         // WARNING PTR CHECK
-        uint64_t cgen_var_926 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_926);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
-        uint64_t cgen_var_927;
-        countingStream->handleMapping()->mapHandles_VkSurfaceKHR_u64(pSurface, &cgen_var_927, 1);
-        countingStream->write((uint64_t*)&cgen_var_927, 8);
+        uint64_t cgen_var_1;
+        *countPtr += 8;
     }
-    uint32_t packetSize_vkCreateXlibSurfaceKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCreateXlibSurfaceKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreateXlibSurfaceKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCreateXlibSurfaceKHR = OP_vkCreateXlibSurfaceKHR;
-    stream->write(&opcode_vkCreateXlibSurfaceKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkCreateXlibSurfaceKHR, sizeof(uint32_t));
-    uint64_t cgen_var_928;
-    stream->handleMapping()->mapHandles_VkInstance_u64(&local_instance, &cgen_var_928, 1);
-    stream->write((uint64_t*)&cgen_var_928, 1 * 8);
-    marshal_VkXlibSurfaceCreateInfoKHR(stream, (VkXlibSurfaceCreateInfoKHR*)(local_pCreateInfo));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreateXlibSurfaceKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreateXlibSurfaceKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkInstance((*&local_instance));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkXlibSurfaceCreateInfoKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkXlibSurfaceCreateInfoKHR*)(local_pCreateInfo), streamPtrPtr);
     // WARNING PTR CHECK
-    uint64_t cgen_var_929 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_929);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
-    uint64_t cgen_var_930;
-    stream->handleMapping()->mapHandles_VkSurfaceKHR_u64(pSurface, &cgen_var_930, 1);
-    stream->write((uint64_t*)&cgen_var_930, 8);
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkCreateXlibSurfaceKHR readParams");
-    uint64_t cgen_var_931;
-    stream->read((uint64_t*)&cgen_var_931, 8);
-    stream->handleMapping()->mapHandles_u64_VkSurfaceKHR(&cgen_var_931, (VkSurfaceKHR*)pSurface, 1);
-    AEMU_SCOPED_TRACE("vkCreateXlibSurfaceKHR returnUnmarshal");
+    /* is handle, possibly out */;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = (uint64_t)((*pSurface));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    uint64_t cgen_var_3;
+    stream->read((uint64_t*)&cgen_var_3, 8);
+    stream->handleMapping()->mapHandles_u64_VkSurfaceKHR(&cgen_var_3, (VkSurfaceKHR*)pSurface, 1);
     VkResult vkCreateXlibSurfaceKHR_VkResult_return = (VkResult)0;
     stream->read(&vkCreateXlibSurfaceKHR_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkCreateXlibSurfaceKHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkCreateXlibSurfaceKHR_VkResult_return;
 }
 
@@ -13730,51 +15921,57 @@
     VkPhysicalDevice physicalDevice,
     uint32_t queueFamilyIndex,
     Display* dpy,
-    VisualID visualID)
+    VisualID visualID,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceXlibPresentationSupportKHR encode");
-    mImpl->log("start vkGetPhysicalDeviceXlibPresentationSupportKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     uint32_t local_queueFamilyIndex;
     VisualID local_visualID;
     local_physicalDevice = physicalDevice;
     local_queueFamilyIndex = queueFamilyIndex;
     local_visualID = visualID;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_932;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_932, 1);
-        countingStream->write((uint64_t*)&cgen_var_932, 1 * 8);
-        countingStream->write((uint32_t*)&local_queueFamilyIndex, sizeof(uint32_t));
-        countingStream->write((Display*)dpy, sizeof(Display));
-        countingStream->write((VisualID*)&local_visualID, sizeof(VisualID));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(Display);
+        *countPtr += sizeof(VisualID);
     }
-    uint32_t packetSize_vkGetPhysicalDeviceXlibPresentationSupportKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetPhysicalDeviceXlibPresentationSupportKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPhysicalDeviceXlibPresentationSupportKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetPhysicalDeviceXlibPresentationSupportKHR = OP_vkGetPhysicalDeviceXlibPresentationSupportKHR;
-    stream->write(&opcode_vkGetPhysicalDeviceXlibPresentationSupportKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetPhysicalDeviceXlibPresentationSupportKHR, sizeof(uint32_t));
-    uint64_t cgen_var_933;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_933, 1);
-    stream->write((uint64_t*)&cgen_var_933, 1 * 8);
-    stream->write((uint32_t*)&local_queueFamilyIndex, sizeof(uint32_t));
-    stream->write((Display*)dpy, sizeof(Display));
-    stream->write((VisualID*)&local_visualID, sizeof(VisualID));
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceXlibPresentationSupportKHR readParams");
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPhysicalDeviceXlibPresentationSupportKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPhysicalDeviceXlibPresentationSupportKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_queueFamilyIndex, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (Display*)dpy, sizeof(Display));
+    *streamPtrPtr += sizeof(Display);
+    memcpy(*streamPtrPtr, (VisualID*)&local_visualID, sizeof(VisualID));
+    *streamPtrPtr += sizeof(VisualID);
     stream->read((Display*)dpy, sizeof(Display));
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceXlibPresentationSupportKHR returnUnmarshal");
     VkBool32 vkGetPhysicalDeviceXlibPresentationSupportKHR_VkBool32_return = (VkBool32)0;
     stream->read(&vkGetPhysicalDeviceXlibPresentationSupportKHR_VkBool32_return, sizeof(VkBool32));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetPhysicalDeviceXlibPresentationSupportKHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetPhysicalDeviceXlibPresentationSupportKHR_VkBool32_return;
 }
 
@@ -13784,16 +15981,14 @@
     VkInstance instance,
     const VkXcbSurfaceCreateInfoKHR* pCreateInfo,
     const VkAllocationCallbacks* pAllocator,
-    VkSurfaceKHR* pSurface)
+    VkSurfaceKHR* pSurface,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCreateXcbSurfaceKHR encode");
-    mImpl->log("start vkCreateXcbSurfaceKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkInstance local_instance;
     VkXcbSurfaceCreateInfoKHR* local_pCreateInfo;
     VkAllocationCallbacks* local_pAllocator;
@@ -13802,72 +15997,78 @@
     if (pCreateInfo)
     {
         local_pCreateInfo = (VkXcbSurfaceCreateInfoKHR*)pool->alloc(sizeof(const VkXcbSurfaceCreateInfoKHR));
-        deepcopy_VkXcbSurfaceCreateInfoKHR(pool, pCreateInfo, (VkXcbSurfaceCreateInfoKHR*)(local_pCreateInfo));
+        deepcopy_VkXcbSurfaceCreateInfoKHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, (VkXcbSurfaceCreateInfoKHR*)(local_pCreateInfo));
     }
     local_pAllocator = nullptr;
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pCreateInfo)
     {
-        transform_tohost_VkXcbSurfaceCreateInfoKHR(mImpl->resources(), (VkXcbSurfaceCreateInfoKHR*)(local_pCreateInfo));
+        transform_tohost_VkXcbSurfaceCreateInfoKHR(sResourceTracker, (VkXcbSurfaceCreateInfoKHR*)(local_pCreateInfo));
     }
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_934;
-        countingStream->handleMapping()->mapHandles_VkInstance_u64(&local_instance, &cgen_var_934, 1);
-        countingStream->write((uint64_t*)&cgen_var_934, 1 * 8);
-        marshal_VkXcbSurfaceCreateInfoKHR(countingStream, (VkXcbSurfaceCreateInfoKHR*)(local_pCreateInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkXcbSurfaceCreateInfoKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkXcbSurfaceCreateInfoKHR*)(local_pCreateInfo), countPtr);
         // WARNING PTR CHECK
-        uint64_t cgen_var_935 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_935);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
-        uint64_t cgen_var_936;
-        countingStream->handleMapping()->mapHandles_VkSurfaceKHR_u64(pSurface, &cgen_var_936, 1);
-        countingStream->write((uint64_t*)&cgen_var_936, 8);
+        uint64_t cgen_var_1;
+        *countPtr += 8;
     }
-    uint32_t packetSize_vkCreateXcbSurfaceKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCreateXcbSurfaceKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreateXcbSurfaceKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCreateXcbSurfaceKHR = OP_vkCreateXcbSurfaceKHR;
-    stream->write(&opcode_vkCreateXcbSurfaceKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkCreateXcbSurfaceKHR, sizeof(uint32_t));
-    uint64_t cgen_var_937;
-    stream->handleMapping()->mapHandles_VkInstance_u64(&local_instance, &cgen_var_937, 1);
-    stream->write((uint64_t*)&cgen_var_937, 1 * 8);
-    marshal_VkXcbSurfaceCreateInfoKHR(stream, (VkXcbSurfaceCreateInfoKHR*)(local_pCreateInfo));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreateXcbSurfaceKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreateXcbSurfaceKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkInstance((*&local_instance));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkXcbSurfaceCreateInfoKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkXcbSurfaceCreateInfoKHR*)(local_pCreateInfo), streamPtrPtr);
     // WARNING PTR CHECK
-    uint64_t cgen_var_938 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_938);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
-    uint64_t cgen_var_939;
-    stream->handleMapping()->mapHandles_VkSurfaceKHR_u64(pSurface, &cgen_var_939, 1);
-    stream->write((uint64_t*)&cgen_var_939, 8);
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkCreateXcbSurfaceKHR readParams");
-    uint64_t cgen_var_940;
-    stream->read((uint64_t*)&cgen_var_940, 8);
-    stream->handleMapping()->mapHandles_u64_VkSurfaceKHR(&cgen_var_940, (VkSurfaceKHR*)pSurface, 1);
-    AEMU_SCOPED_TRACE("vkCreateXcbSurfaceKHR returnUnmarshal");
+    /* is handle, possibly out */;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = (uint64_t)((*pSurface));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    uint64_t cgen_var_3;
+    stream->read((uint64_t*)&cgen_var_3, 8);
+    stream->handleMapping()->mapHandles_u64_VkSurfaceKHR(&cgen_var_3, (VkSurfaceKHR*)pSurface, 1);
     VkResult vkCreateXcbSurfaceKHR_VkResult_return = (VkResult)0;
     stream->read(&vkCreateXcbSurfaceKHR_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkCreateXcbSurfaceKHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkCreateXcbSurfaceKHR_VkResult_return;
 }
 
@@ -13875,51 +16076,57 @@
     VkPhysicalDevice physicalDevice,
     uint32_t queueFamilyIndex,
     xcb_connection_t* connection,
-    xcb_visualid_t visual_id)
+    xcb_visualid_t visual_id,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceXcbPresentationSupportKHR encode");
-    mImpl->log("start vkGetPhysicalDeviceXcbPresentationSupportKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     uint32_t local_queueFamilyIndex;
     xcb_visualid_t local_visual_id;
     local_physicalDevice = physicalDevice;
     local_queueFamilyIndex = queueFamilyIndex;
     local_visual_id = visual_id;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_941;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_941, 1);
-        countingStream->write((uint64_t*)&cgen_var_941, 1 * 8);
-        countingStream->write((uint32_t*)&local_queueFamilyIndex, sizeof(uint32_t));
-        countingStream->write((xcb_connection_t*)connection, sizeof(xcb_connection_t));
-        countingStream->write((xcb_visualid_t*)&local_visual_id, sizeof(xcb_visualid_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(xcb_connection_t);
+        *countPtr += sizeof(xcb_visualid_t);
     }
-    uint32_t packetSize_vkGetPhysicalDeviceXcbPresentationSupportKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetPhysicalDeviceXcbPresentationSupportKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPhysicalDeviceXcbPresentationSupportKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetPhysicalDeviceXcbPresentationSupportKHR = OP_vkGetPhysicalDeviceXcbPresentationSupportKHR;
-    stream->write(&opcode_vkGetPhysicalDeviceXcbPresentationSupportKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetPhysicalDeviceXcbPresentationSupportKHR, sizeof(uint32_t));
-    uint64_t cgen_var_942;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_942, 1);
-    stream->write((uint64_t*)&cgen_var_942, 1 * 8);
-    stream->write((uint32_t*)&local_queueFamilyIndex, sizeof(uint32_t));
-    stream->write((xcb_connection_t*)connection, sizeof(xcb_connection_t));
-    stream->write((xcb_visualid_t*)&local_visual_id, sizeof(xcb_visualid_t));
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceXcbPresentationSupportKHR readParams");
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPhysicalDeviceXcbPresentationSupportKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPhysicalDeviceXcbPresentationSupportKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_queueFamilyIndex, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (xcb_connection_t*)connection, sizeof(xcb_connection_t));
+    *streamPtrPtr += sizeof(xcb_connection_t);
+    memcpy(*streamPtrPtr, (xcb_visualid_t*)&local_visual_id, sizeof(xcb_visualid_t));
+    *streamPtrPtr += sizeof(xcb_visualid_t);
     stream->read((xcb_connection_t*)connection, sizeof(xcb_connection_t));
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceXcbPresentationSupportKHR returnUnmarshal");
     VkBool32 vkGetPhysicalDeviceXcbPresentationSupportKHR_VkBool32_return = (VkBool32)0;
     stream->read(&vkGetPhysicalDeviceXcbPresentationSupportKHR_VkBool32_return, sizeof(VkBool32));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetPhysicalDeviceXcbPresentationSupportKHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetPhysicalDeviceXcbPresentationSupportKHR_VkBool32_return;
 }
 
@@ -13929,16 +16136,14 @@
     VkInstance instance,
     const VkWaylandSurfaceCreateInfoKHR* pCreateInfo,
     const VkAllocationCallbacks* pAllocator,
-    VkSurfaceKHR* pSurface)
+    VkSurfaceKHR* pSurface,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCreateWaylandSurfaceKHR encode");
-    mImpl->log("start vkCreateWaylandSurfaceKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkInstance local_instance;
     VkWaylandSurfaceCreateInfoKHR* local_pCreateInfo;
     VkAllocationCallbacks* local_pAllocator;
@@ -13947,278 +16152,147 @@
     if (pCreateInfo)
     {
         local_pCreateInfo = (VkWaylandSurfaceCreateInfoKHR*)pool->alloc(sizeof(const VkWaylandSurfaceCreateInfoKHR));
-        deepcopy_VkWaylandSurfaceCreateInfoKHR(pool, pCreateInfo, (VkWaylandSurfaceCreateInfoKHR*)(local_pCreateInfo));
+        deepcopy_VkWaylandSurfaceCreateInfoKHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, (VkWaylandSurfaceCreateInfoKHR*)(local_pCreateInfo));
     }
     local_pAllocator = nullptr;
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pCreateInfo)
     {
-        transform_tohost_VkWaylandSurfaceCreateInfoKHR(mImpl->resources(), (VkWaylandSurfaceCreateInfoKHR*)(local_pCreateInfo));
+        transform_tohost_VkWaylandSurfaceCreateInfoKHR(sResourceTracker, (VkWaylandSurfaceCreateInfoKHR*)(local_pCreateInfo));
     }
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_943;
-        countingStream->handleMapping()->mapHandles_VkInstance_u64(&local_instance, &cgen_var_943, 1);
-        countingStream->write((uint64_t*)&cgen_var_943, 1 * 8);
-        marshal_VkWaylandSurfaceCreateInfoKHR(countingStream, (VkWaylandSurfaceCreateInfoKHR*)(local_pCreateInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkWaylandSurfaceCreateInfoKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkWaylandSurfaceCreateInfoKHR*)(local_pCreateInfo), countPtr);
         // WARNING PTR CHECK
-        uint64_t cgen_var_944 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_944);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
-        uint64_t cgen_var_945;
-        countingStream->handleMapping()->mapHandles_VkSurfaceKHR_u64(pSurface, &cgen_var_945, 1);
-        countingStream->write((uint64_t*)&cgen_var_945, 8);
+        uint64_t cgen_var_1;
+        *countPtr += 8;
     }
-    uint32_t packetSize_vkCreateWaylandSurfaceKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCreateWaylandSurfaceKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreateWaylandSurfaceKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCreateWaylandSurfaceKHR = OP_vkCreateWaylandSurfaceKHR;
-    stream->write(&opcode_vkCreateWaylandSurfaceKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkCreateWaylandSurfaceKHR, sizeof(uint32_t));
-    uint64_t cgen_var_946;
-    stream->handleMapping()->mapHandles_VkInstance_u64(&local_instance, &cgen_var_946, 1);
-    stream->write((uint64_t*)&cgen_var_946, 1 * 8);
-    marshal_VkWaylandSurfaceCreateInfoKHR(stream, (VkWaylandSurfaceCreateInfoKHR*)(local_pCreateInfo));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreateWaylandSurfaceKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreateWaylandSurfaceKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkInstance((*&local_instance));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkWaylandSurfaceCreateInfoKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkWaylandSurfaceCreateInfoKHR*)(local_pCreateInfo), streamPtrPtr);
     // WARNING PTR CHECK
-    uint64_t cgen_var_947 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_947);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
-    uint64_t cgen_var_948;
-    stream->handleMapping()->mapHandles_VkSurfaceKHR_u64(pSurface, &cgen_var_948, 1);
-    stream->write((uint64_t*)&cgen_var_948, 8);
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkCreateWaylandSurfaceKHR readParams");
-    uint64_t cgen_var_949;
-    stream->read((uint64_t*)&cgen_var_949, 8);
-    stream->handleMapping()->mapHandles_u64_VkSurfaceKHR(&cgen_var_949, (VkSurfaceKHR*)pSurface, 1);
-    AEMU_SCOPED_TRACE("vkCreateWaylandSurfaceKHR returnUnmarshal");
+    /* is handle, possibly out */;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = (uint64_t)((*pSurface));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    uint64_t cgen_var_3;
+    stream->read((uint64_t*)&cgen_var_3, 8);
+    stream->handleMapping()->mapHandles_u64_VkSurfaceKHR(&cgen_var_3, (VkSurfaceKHR*)pSurface, 1);
     VkResult vkCreateWaylandSurfaceKHR_VkResult_return = (VkResult)0;
     stream->read(&vkCreateWaylandSurfaceKHR_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkCreateWaylandSurfaceKHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkCreateWaylandSurfaceKHR_VkResult_return;
 }
 
 VkBool32 VkEncoder::vkGetPhysicalDeviceWaylandPresentationSupportKHR(
     VkPhysicalDevice physicalDevice,
     uint32_t queueFamilyIndex,
-    wl_display* display)
+    wl_display* display,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceWaylandPresentationSupportKHR encode");
-    mImpl->log("start vkGetPhysicalDeviceWaylandPresentationSupportKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     uint32_t local_queueFamilyIndex;
     local_physicalDevice = physicalDevice;
     local_queueFamilyIndex = queueFamilyIndex;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_950;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_950, 1);
-        countingStream->write((uint64_t*)&cgen_var_950, 1 * 8);
-        countingStream->write((uint32_t*)&local_queueFamilyIndex, sizeof(uint32_t));
-        countingStream->write((wl_display*)display, sizeof(wl_display));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(wl_display);
     }
-    uint32_t packetSize_vkGetPhysicalDeviceWaylandPresentationSupportKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetPhysicalDeviceWaylandPresentationSupportKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPhysicalDeviceWaylandPresentationSupportKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetPhysicalDeviceWaylandPresentationSupportKHR = OP_vkGetPhysicalDeviceWaylandPresentationSupportKHR;
-    stream->write(&opcode_vkGetPhysicalDeviceWaylandPresentationSupportKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetPhysicalDeviceWaylandPresentationSupportKHR, sizeof(uint32_t));
-    uint64_t cgen_var_951;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_951, 1);
-    stream->write((uint64_t*)&cgen_var_951, 1 * 8);
-    stream->write((uint32_t*)&local_queueFamilyIndex, sizeof(uint32_t));
-    stream->write((wl_display*)display, sizeof(wl_display));
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceWaylandPresentationSupportKHR readParams");
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPhysicalDeviceWaylandPresentationSupportKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPhysicalDeviceWaylandPresentationSupportKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_queueFamilyIndex, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (wl_display*)display, sizeof(wl_display));
+    *streamPtrPtr += sizeof(wl_display);
     stream->read((wl_display*)display, sizeof(wl_display));
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceWaylandPresentationSupportKHR returnUnmarshal");
     VkBool32 vkGetPhysicalDeviceWaylandPresentationSupportKHR_VkBool32_return = (VkBool32)0;
     stream->read(&vkGetPhysicalDeviceWaylandPresentationSupportKHR_VkBool32_return, sizeof(VkBool32));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetPhysicalDeviceWaylandPresentationSupportKHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetPhysicalDeviceWaylandPresentationSupportKHR_VkBool32_return;
 }
 
 #endif
-#ifdef VK_KHR_mir_surface
-VkResult VkEncoder::vkCreateMirSurfaceKHR(
-    VkInstance instance,
-    const VkMirSurfaceCreateInfoKHR* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkSurfaceKHR* pSurface)
-{
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCreateMirSurfaceKHR encode");
-    mImpl->log("start vkCreateMirSurfaceKHR");
-    auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
-    auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
-    VkInstance local_instance;
-    VkMirSurfaceCreateInfoKHR* local_pCreateInfo;
-    VkAllocationCallbacks* local_pAllocator;
-    local_instance = instance;
-    local_pCreateInfo = nullptr;
-    if (pCreateInfo)
-    {
-        local_pCreateInfo = (VkMirSurfaceCreateInfoKHR*)pool->alloc(sizeof(const VkMirSurfaceCreateInfoKHR));
-        deepcopy_VkMirSurfaceCreateInfoKHR(pool, pCreateInfo, (VkMirSurfaceCreateInfoKHR*)(local_pCreateInfo));
-    }
-    local_pAllocator = nullptr;
-    if (pAllocator)
-    {
-        local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
-    }
-    local_pAllocator = nullptr;
-    if (local_pCreateInfo)
-    {
-        transform_tohost_VkMirSurfaceCreateInfoKHR(mImpl->resources(), (VkMirSurfaceCreateInfoKHR*)(local_pCreateInfo));
-    }
-    if (local_pAllocator)
-    {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
-    }
-    countingStream->rewind();
-    {
-        uint64_t cgen_var_952;
-        countingStream->handleMapping()->mapHandles_VkInstance_u64(&local_instance, &cgen_var_952, 1);
-        countingStream->write((uint64_t*)&cgen_var_952, 1 * 8);
-        marshal_VkMirSurfaceCreateInfoKHR(countingStream, (VkMirSurfaceCreateInfoKHR*)(local_pCreateInfo));
-        // WARNING PTR CHECK
-        uint64_t cgen_var_953 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_953);
-        if (local_pAllocator)
-        {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
-        }
-        uint64_t cgen_var_954;
-        countingStream->handleMapping()->mapHandles_VkSurfaceKHR_u64(pSurface, &cgen_var_954, 1);
-        countingStream->write((uint64_t*)&cgen_var_954, 8);
-    }
-    uint32_t packetSize_vkCreateMirSurfaceKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
-    uint32_t opcode_vkCreateMirSurfaceKHR = OP_vkCreateMirSurfaceKHR;
-    stream->write(&opcode_vkCreateMirSurfaceKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkCreateMirSurfaceKHR, sizeof(uint32_t));
-    uint64_t cgen_var_955;
-    stream->handleMapping()->mapHandles_VkInstance_u64(&local_instance, &cgen_var_955, 1);
-    stream->write((uint64_t*)&cgen_var_955, 1 * 8);
-    marshal_VkMirSurfaceCreateInfoKHR(stream, (VkMirSurfaceCreateInfoKHR*)(local_pCreateInfo));
-    // WARNING PTR CHECK
-    uint64_t cgen_var_956 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_956);
-    if (local_pAllocator)
-    {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
-    }
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
-    uint64_t cgen_var_957;
-    stream->handleMapping()->mapHandles_VkSurfaceKHR_u64(pSurface, &cgen_var_957, 1);
-    stream->write((uint64_t*)&cgen_var_957, 8);
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkCreateMirSurfaceKHR readParams");
-    uint64_t cgen_var_958;
-    stream->read((uint64_t*)&cgen_var_958, 8);
-    stream->handleMapping()->mapHandles_u64_VkSurfaceKHR(&cgen_var_958, (VkSurfaceKHR*)pSurface, 1);
-    AEMU_SCOPED_TRACE("vkCreateMirSurfaceKHR returnUnmarshal");
-    VkResult vkCreateMirSurfaceKHR_VkResult_return = (VkResult)0;
-    stream->read(&vkCreateMirSurfaceKHR_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkCreateMirSurfaceKHR");;
-    return vkCreateMirSurfaceKHR_VkResult_return;
-}
-
-VkBool32 VkEncoder::vkGetPhysicalDeviceMirPresentationSupportKHR(
-    VkPhysicalDevice physicalDevice,
-    uint32_t queueFamilyIndex,
-    MirConnection* connection)
-{
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceMirPresentationSupportKHR encode");
-    mImpl->log("start vkGetPhysicalDeviceMirPresentationSupportKHR");
-    auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
-    auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
-    VkPhysicalDevice local_physicalDevice;
-    uint32_t local_queueFamilyIndex;
-    local_physicalDevice = physicalDevice;
-    local_queueFamilyIndex = queueFamilyIndex;
-    countingStream->rewind();
-    {
-        uint64_t cgen_var_959;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_959, 1);
-        countingStream->write((uint64_t*)&cgen_var_959, 1 * 8);
-        countingStream->write((uint32_t*)&local_queueFamilyIndex, sizeof(uint32_t));
-        countingStream->write((MirConnection*)connection, sizeof(MirConnection));
-    }
-    uint32_t packetSize_vkGetPhysicalDeviceMirPresentationSupportKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
-    uint32_t opcode_vkGetPhysicalDeviceMirPresentationSupportKHR = OP_vkGetPhysicalDeviceMirPresentationSupportKHR;
-    stream->write(&opcode_vkGetPhysicalDeviceMirPresentationSupportKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetPhysicalDeviceMirPresentationSupportKHR, sizeof(uint32_t));
-    uint64_t cgen_var_960;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_960, 1);
-    stream->write((uint64_t*)&cgen_var_960, 1 * 8);
-    stream->write((uint32_t*)&local_queueFamilyIndex, sizeof(uint32_t));
-    stream->write((MirConnection*)connection, sizeof(MirConnection));
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceMirPresentationSupportKHR readParams");
-    stream->read((MirConnection*)connection, sizeof(MirConnection));
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceMirPresentationSupportKHR returnUnmarshal");
-    VkBool32 vkGetPhysicalDeviceMirPresentationSupportKHR_VkBool32_return = (VkBool32)0;
-    stream->read(&vkGetPhysicalDeviceMirPresentationSupportKHR_VkBool32_return, sizeof(VkBool32));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetPhysicalDeviceMirPresentationSupportKHR");;
-    return vkGetPhysicalDeviceMirPresentationSupportKHR_VkBool32_return;
-}
-
-#endif
 #ifdef VK_KHR_android_surface
 VkResult VkEncoder::vkCreateAndroidSurfaceKHR(
     VkInstance instance,
     const VkAndroidSurfaceCreateInfoKHR* pCreateInfo,
     const VkAllocationCallbacks* pAllocator,
-    VkSurfaceKHR* pSurface)
+    VkSurfaceKHR* pSurface,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCreateAndroidSurfaceKHR encode");
-    mImpl->log("start vkCreateAndroidSurfaceKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkInstance local_instance;
     VkAndroidSurfaceCreateInfoKHR* local_pCreateInfo;
     VkAllocationCallbacks* local_pAllocator;
@@ -14227,72 +16301,78 @@
     if (pCreateInfo)
     {
         local_pCreateInfo = (VkAndroidSurfaceCreateInfoKHR*)pool->alloc(sizeof(const VkAndroidSurfaceCreateInfoKHR));
-        deepcopy_VkAndroidSurfaceCreateInfoKHR(pool, pCreateInfo, (VkAndroidSurfaceCreateInfoKHR*)(local_pCreateInfo));
+        deepcopy_VkAndroidSurfaceCreateInfoKHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, (VkAndroidSurfaceCreateInfoKHR*)(local_pCreateInfo));
     }
     local_pAllocator = nullptr;
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pCreateInfo)
     {
-        transform_tohost_VkAndroidSurfaceCreateInfoKHR(mImpl->resources(), (VkAndroidSurfaceCreateInfoKHR*)(local_pCreateInfo));
+        transform_tohost_VkAndroidSurfaceCreateInfoKHR(sResourceTracker, (VkAndroidSurfaceCreateInfoKHR*)(local_pCreateInfo));
     }
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_961;
-        countingStream->handleMapping()->mapHandles_VkInstance_u64(&local_instance, &cgen_var_961, 1);
-        countingStream->write((uint64_t*)&cgen_var_961, 1 * 8);
-        marshal_VkAndroidSurfaceCreateInfoKHR(countingStream, (VkAndroidSurfaceCreateInfoKHR*)(local_pCreateInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkAndroidSurfaceCreateInfoKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAndroidSurfaceCreateInfoKHR*)(local_pCreateInfo), countPtr);
         // WARNING PTR CHECK
-        uint64_t cgen_var_962 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_962);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
-        uint64_t cgen_var_963;
-        countingStream->handleMapping()->mapHandles_VkSurfaceKHR_u64(pSurface, &cgen_var_963, 1);
-        countingStream->write((uint64_t*)&cgen_var_963, 8);
+        uint64_t cgen_var_1;
+        *countPtr += 8;
     }
-    uint32_t packetSize_vkCreateAndroidSurfaceKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCreateAndroidSurfaceKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreateAndroidSurfaceKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCreateAndroidSurfaceKHR = OP_vkCreateAndroidSurfaceKHR;
-    stream->write(&opcode_vkCreateAndroidSurfaceKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkCreateAndroidSurfaceKHR, sizeof(uint32_t));
-    uint64_t cgen_var_964;
-    stream->handleMapping()->mapHandles_VkInstance_u64(&local_instance, &cgen_var_964, 1);
-    stream->write((uint64_t*)&cgen_var_964, 1 * 8);
-    marshal_VkAndroidSurfaceCreateInfoKHR(stream, (VkAndroidSurfaceCreateInfoKHR*)(local_pCreateInfo));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreateAndroidSurfaceKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreateAndroidSurfaceKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkInstance((*&local_instance));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkAndroidSurfaceCreateInfoKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAndroidSurfaceCreateInfoKHR*)(local_pCreateInfo), streamPtrPtr);
     // WARNING PTR CHECK
-    uint64_t cgen_var_965 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_965);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
-    uint64_t cgen_var_966;
-    stream->handleMapping()->mapHandles_VkSurfaceKHR_u64(pSurface, &cgen_var_966, 1);
-    stream->write((uint64_t*)&cgen_var_966, 8);
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkCreateAndroidSurfaceKHR readParams");
-    uint64_t cgen_var_967;
-    stream->read((uint64_t*)&cgen_var_967, 8);
-    stream->handleMapping()->mapHandles_u64_VkSurfaceKHR(&cgen_var_967, (VkSurfaceKHR*)pSurface, 1);
-    AEMU_SCOPED_TRACE("vkCreateAndroidSurfaceKHR returnUnmarshal");
+    /* is handle, possibly out */;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = (uint64_t)((*pSurface));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    uint64_t cgen_var_3;
+    stream->read((uint64_t*)&cgen_var_3, 8);
+    stream->handleMapping()->mapHandles_u64_VkSurfaceKHR(&cgen_var_3, (VkSurfaceKHR*)pSurface, 1);
     VkResult vkCreateAndroidSurfaceKHR_VkResult_return = (VkResult)0;
     stream->read(&vkCreateAndroidSurfaceKHR_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkCreateAndroidSurfaceKHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkCreateAndroidSurfaceKHR_VkResult_return;
 }
 
@@ -14302,16 +16382,14 @@
     VkInstance instance,
     const VkWin32SurfaceCreateInfoKHR* pCreateInfo,
     const VkAllocationCallbacks* pAllocator,
-    VkSurfaceKHR* pSurface)
+    VkSurfaceKHR* pSurface,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCreateWin32SurfaceKHR encode");
-    mImpl->log("start vkCreateWin32SurfaceKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkInstance local_instance;
     VkWin32SurfaceCreateInfoKHR* local_pCreateInfo;
     VkAllocationCallbacks* local_pAllocator;
@@ -14320,115 +16398,125 @@
     if (pCreateInfo)
     {
         local_pCreateInfo = (VkWin32SurfaceCreateInfoKHR*)pool->alloc(sizeof(const VkWin32SurfaceCreateInfoKHR));
-        deepcopy_VkWin32SurfaceCreateInfoKHR(pool, pCreateInfo, (VkWin32SurfaceCreateInfoKHR*)(local_pCreateInfo));
+        deepcopy_VkWin32SurfaceCreateInfoKHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, (VkWin32SurfaceCreateInfoKHR*)(local_pCreateInfo));
     }
     local_pAllocator = nullptr;
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pCreateInfo)
     {
-        transform_tohost_VkWin32SurfaceCreateInfoKHR(mImpl->resources(), (VkWin32SurfaceCreateInfoKHR*)(local_pCreateInfo));
+        transform_tohost_VkWin32SurfaceCreateInfoKHR(sResourceTracker, (VkWin32SurfaceCreateInfoKHR*)(local_pCreateInfo));
     }
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_968;
-        countingStream->handleMapping()->mapHandles_VkInstance_u64(&local_instance, &cgen_var_968, 1);
-        countingStream->write((uint64_t*)&cgen_var_968, 1 * 8);
-        marshal_VkWin32SurfaceCreateInfoKHR(countingStream, (VkWin32SurfaceCreateInfoKHR*)(local_pCreateInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkWin32SurfaceCreateInfoKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkWin32SurfaceCreateInfoKHR*)(local_pCreateInfo), countPtr);
         // WARNING PTR CHECK
-        uint64_t cgen_var_969 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_969);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
-        uint64_t cgen_var_970;
-        countingStream->handleMapping()->mapHandles_VkSurfaceKHR_u64(pSurface, &cgen_var_970, 1);
-        countingStream->write((uint64_t*)&cgen_var_970, 8);
+        uint64_t cgen_var_1;
+        *countPtr += 8;
     }
-    uint32_t packetSize_vkCreateWin32SurfaceKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCreateWin32SurfaceKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreateWin32SurfaceKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCreateWin32SurfaceKHR = OP_vkCreateWin32SurfaceKHR;
-    stream->write(&opcode_vkCreateWin32SurfaceKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkCreateWin32SurfaceKHR, sizeof(uint32_t));
-    uint64_t cgen_var_971;
-    stream->handleMapping()->mapHandles_VkInstance_u64(&local_instance, &cgen_var_971, 1);
-    stream->write((uint64_t*)&cgen_var_971, 1 * 8);
-    marshal_VkWin32SurfaceCreateInfoKHR(stream, (VkWin32SurfaceCreateInfoKHR*)(local_pCreateInfo));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreateWin32SurfaceKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreateWin32SurfaceKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkInstance((*&local_instance));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkWin32SurfaceCreateInfoKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkWin32SurfaceCreateInfoKHR*)(local_pCreateInfo), streamPtrPtr);
     // WARNING PTR CHECK
-    uint64_t cgen_var_972 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_972);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
-    uint64_t cgen_var_973;
-    stream->handleMapping()->mapHandles_VkSurfaceKHR_u64(pSurface, &cgen_var_973, 1);
-    stream->write((uint64_t*)&cgen_var_973, 8);
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkCreateWin32SurfaceKHR readParams");
-    uint64_t cgen_var_974;
-    stream->read((uint64_t*)&cgen_var_974, 8);
-    stream->handleMapping()->mapHandles_u64_VkSurfaceKHR(&cgen_var_974, (VkSurfaceKHR*)pSurface, 1);
-    AEMU_SCOPED_TRACE("vkCreateWin32SurfaceKHR returnUnmarshal");
+    /* is handle, possibly out */;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = (uint64_t)((*pSurface));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    uint64_t cgen_var_3;
+    stream->read((uint64_t*)&cgen_var_3, 8);
+    stream->handleMapping()->mapHandles_u64_VkSurfaceKHR(&cgen_var_3, (VkSurfaceKHR*)pSurface, 1);
     VkResult vkCreateWin32SurfaceKHR_VkResult_return = (VkResult)0;
     stream->read(&vkCreateWin32SurfaceKHR_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkCreateWin32SurfaceKHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkCreateWin32SurfaceKHR_VkResult_return;
 }
 
 VkBool32 VkEncoder::vkGetPhysicalDeviceWin32PresentationSupportKHR(
     VkPhysicalDevice physicalDevice,
-    uint32_t queueFamilyIndex)
+    uint32_t queueFamilyIndex,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceWin32PresentationSupportKHR encode");
-    mImpl->log("start vkGetPhysicalDeviceWin32PresentationSupportKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     uint32_t local_queueFamilyIndex;
     local_physicalDevice = physicalDevice;
     local_queueFamilyIndex = queueFamilyIndex;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_975;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_975, 1);
-        countingStream->write((uint64_t*)&cgen_var_975, 1 * 8);
-        countingStream->write((uint32_t*)&local_queueFamilyIndex, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
     }
-    uint32_t packetSize_vkGetPhysicalDeviceWin32PresentationSupportKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetPhysicalDeviceWin32PresentationSupportKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPhysicalDeviceWin32PresentationSupportKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetPhysicalDeviceWin32PresentationSupportKHR = OP_vkGetPhysicalDeviceWin32PresentationSupportKHR;
-    stream->write(&opcode_vkGetPhysicalDeviceWin32PresentationSupportKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetPhysicalDeviceWin32PresentationSupportKHR, sizeof(uint32_t));
-    uint64_t cgen_var_976;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_976, 1);
-    stream->write((uint64_t*)&cgen_var_976, 1 * 8);
-    stream->write((uint32_t*)&local_queueFamilyIndex, sizeof(uint32_t));
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceWin32PresentationSupportKHR readParams");
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceWin32PresentationSupportKHR returnUnmarshal");
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPhysicalDeviceWin32PresentationSupportKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPhysicalDeviceWin32PresentationSupportKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_queueFamilyIndex, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
     VkBool32 vkGetPhysicalDeviceWin32PresentationSupportKHR_VkBool32_return = (VkBool32)0;
     stream->read(&vkGetPhysicalDeviceWin32PresentationSupportKHR_VkBool32_return, sizeof(VkBool32));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetPhysicalDeviceWin32PresentationSupportKHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetPhysicalDeviceWin32PresentationSupportKHR_VkBool32_return;
 }
 
@@ -14440,142 +16528,160 @@
 #ifdef VK_KHR_get_physical_device_properties2
 void VkEncoder::vkGetPhysicalDeviceFeatures2KHR(
     VkPhysicalDevice physicalDevice,
-    VkPhysicalDeviceFeatures2* pFeatures)
+    VkPhysicalDeviceFeatures2* pFeatures,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceFeatures2KHR encode");
-    mImpl->log("start vkGetPhysicalDeviceFeatures2KHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     local_physicalDevice = physicalDevice;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_977;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_977, 1);
-        countingStream->write((uint64_t*)&cgen_var_977, 1 * 8);
-        marshal_VkPhysicalDeviceFeatures2(countingStream, (VkPhysicalDeviceFeatures2*)(pFeatures));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkPhysicalDeviceFeatures2(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceFeatures2*)(pFeatures), countPtr);
     }
-    uint32_t packetSize_vkGetPhysicalDeviceFeatures2KHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetPhysicalDeviceFeatures2KHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPhysicalDeviceFeatures2KHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetPhysicalDeviceFeatures2KHR = OP_vkGetPhysicalDeviceFeatures2KHR;
-    stream->write(&opcode_vkGetPhysicalDeviceFeatures2KHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetPhysicalDeviceFeatures2KHR, sizeof(uint32_t));
-    uint64_t cgen_var_978;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_978, 1);
-    stream->write((uint64_t*)&cgen_var_978, 1 * 8);
-    marshal_VkPhysicalDeviceFeatures2(stream, (VkPhysicalDeviceFeatures2*)(pFeatures));
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceFeatures2KHR readParams");
-    unmarshal_VkPhysicalDeviceFeatures2(stream, (VkPhysicalDeviceFeatures2*)(pFeatures));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPhysicalDeviceFeatures2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPhysicalDeviceFeatures2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkPhysicalDeviceFeatures2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceFeatures2*)(pFeatures), streamPtrPtr);
+    unmarshal_VkPhysicalDeviceFeatures2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceFeatures2*)(pFeatures));
     if (pFeatures)
     {
-        transform_fromhost_VkPhysicalDeviceFeatures2(mImpl->resources(), (VkPhysicalDeviceFeatures2*)(pFeatures));
+        transform_fromhost_VkPhysicalDeviceFeatures2(sResourceTracker, (VkPhysicalDeviceFeatures2*)(pFeatures));
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceFeatures2KHR returnUnmarshal");
-    mImpl->log("finish vkGetPhysicalDeviceFeatures2KHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkGetPhysicalDeviceProperties2KHR(
     VkPhysicalDevice physicalDevice,
-    VkPhysicalDeviceProperties2* pProperties)
+    VkPhysicalDeviceProperties2* pProperties,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceProperties2KHR encode");
-    mImpl->log("start vkGetPhysicalDeviceProperties2KHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     local_physicalDevice = physicalDevice;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_979;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_979, 1);
-        countingStream->write((uint64_t*)&cgen_var_979, 1 * 8);
-        marshal_VkPhysicalDeviceProperties2(countingStream, (VkPhysicalDeviceProperties2*)(pProperties));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkPhysicalDeviceProperties2(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceProperties2*)(pProperties), countPtr);
     }
-    uint32_t packetSize_vkGetPhysicalDeviceProperties2KHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetPhysicalDeviceProperties2KHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPhysicalDeviceProperties2KHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetPhysicalDeviceProperties2KHR = OP_vkGetPhysicalDeviceProperties2KHR;
-    stream->write(&opcode_vkGetPhysicalDeviceProperties2KHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetPhysicalDeviceProperties2KHR, sizeof(uint32_t));
-    uint64_t cgen_var_980;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_980, 1);
-    stream->write((uint64_t*)&cgen_var_980, 1 * 8);
-    marshal_VkPhysicalDeviceProperties2(stream, (VkPhysicalDeviceProperties2*)(pProperties));
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceProperties2KHR readParams");
-    unmarshal_VkPhysicalDeviceProperties2(stream, (VkPhysicalDeviceProperties2*)(pProperties));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPhysicalDeviceProperties2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPhysicalDeviceProperties2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkPhysicalDeviceProperties2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceProperties2*)(pProperties), streamPtrPtr);
+    unmarshal_VkPhysicalDeviceProperties2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceProperties2*)(pProperties));
     if (pProperties)
     {
-        transform_fromhost_VkPhysicalDeviceProperties2(mImpl->resources(), (VkPhysicalDeviceProperties2*)(pProperties));
+        transform_fromhost_VkPhysicalDeviceProperties2(sResourceTracker, (VkPhysicalDeviceProperties2*)(pProperties));
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceProperties2KHR returnUnmarshal");
-    mImpl->log("finish vkGetPhysicalDeviceProperties2KHR");;
+    sResourceTracker->on_vkGetPhysicalDeviceProperties2KHR(this, physicalDevice, pProperties);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkGetPhysicalDeviceFormatProperties2KHR(
     VkPhysicalDevice physicalDevice,
     VkFormat format,
-    VkFormatProperties2* pFormatProperties)
+    VkFormatProperties2* pFormatProperties,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceFormatProperties2KHR encode");
-    mImpl->log("start vkGetPhysicalDeviceFormatProperties2KHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     VkFormat local_format;
     local_physicalDevice = physicalDevice;
     local_format = format;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_981;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_981, 1);
-        countingStream->write((uint64_t*)&cgen_var_981, 1 * 8);
-        countingStream->write((VkFormat*)&local_format, sizeof(VkFormat));
-        marshal_VkFormatProperties2(countingStream, (VkFormatProperties2*)(pFormatProperties));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkFormat);
+        count_VkFormatProperties2(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkFormatProperties2*)(pFormatProperties), countPtr);
     }
-    uint32_t packetSize_vkGetPhysicalDeviceFormatProperties2KHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetPhysicalDeviceFormatProperties2KHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPhysicalDeviceFormatProperties2KHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetPhysicalDeviceFormatProperties2KHR = OP_vkGetPhysicalDeviceFormatProperties2KHR;
-    stream->write(&opcode_vkGetPhysicalDeviceFormatProperties2KHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetPhysicalDeviceFormatProperties2KHR, sizeof(uint32_t));
-    uint64_t cgen_var_982;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_982, 1);
-    stream->write((uint64_t*)&cgen_var_982, 1 * 8);
-    stream->write((VkFormat*)&local_format, sizeof(VkFormat));
-    marshal_VkFormatProperties2(stream, (VkFormatProperties2*)(pFormatProperties));
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceFormatProperties2KHR readParams");
-    unmarshal_VkFormatProperties2(stream, (VkFormatProperties2*)(pFormatProperties));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPhysicalDeviceFormatProperties2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPhysicalDeviceFormatProperties2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkFormat*)&local_format, sizeof(VkFormat));
+    *streamPtrPtr += sizeof(VkFormat);
+    reservedmarshal_VkFormatProperties2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkFormatProperties2*)(pFormatProperties), streamPtrPtr);
+    unmarshal_VkFormatProperties2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkFormatProperties2*)(pFormatProperties));
     if (pFormatProperties)
     {
-        transform_fromhost_VkFormatProperties2(mImpl->resources(), (VkFormatProperties2*)(pFormatProperties));
+        transform_fromhost_VkFormatProperties2(sResourceTracker, (VkFormatProperties2*)(pFormatProperties));
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceFormatProperties2KHR returnUnmarshal");
-    mImpl->log("finish vkGetPhysicalDeviceFormatProperties2KHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 VkResult VkEncoder::vkGetPhysicalDeviceImageFormatProperties2KHR(
     VkPhysicalDevice physicalDevice,
     const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo,
-    VkImageFormatProperties2* pImageFormatProperties)
+    VkImageFormatProperties2* pImageFormatProperties,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceImageFormatProperties2KHR encode");
-    mImpl->log("start vkGetPhysicalDeviceImageFormatProperties2KHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     VkPhysicalDeviceImageFormatInfo2* local_pImageFormatInfo;
     local_physicalDevice = physicalDevice;
@@ -14583,110 +16689,122 @@
     if (pImageFormatInfo)
     {
         local_pImageFormatInfo = (VkPhysicalDeviceImageFormatInfo2*)pool->alloc(sizeof(const VkPhysicalDeviceImageFormatInfo2));
-        deepcopy_VkPhysicalDeviceImageFormatInfo2(pool, pImageFormatInfo, (VkPhysicalDeviceImageFormatInfo2*)(local_pImageFormatInfo));
+        deepcopy_VkPhysicalDeviceImageFormatInfo2(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pImageFormatInfo, (VkPhysicalDeviceImageFormatInfo2*)(local_pImageFormatInfo));
     }
     if (local_pImageFormatInfo)
     {
-        transform_tohost_VkPhysicalDeviceImageFormatInfo2(mImpl->resources(), (VkPhysicalDeviceImageFormatInfo2*)(local_pImageFormatInfo));
+        transform_tohost_VkPhysicalDeviceImageFormatInfo2(sResourceTracker, (VkPhysicalDeviceImageFormatInfo2*)(local_pImageFormatInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_983;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_983, 1);
-        countingStream->write((uint64_t*)&cgen_var_983, 1 * 8);
-        marshal_VkPhysicalDeviceImageFormatInfo2(countingStream, (VkPhysicalDeviceImageFormatInfo2*)(local_pImageFormatInfo));
-        marshal_VkImageFormatProperties2(countingStream, (VkImageFormatProperties2*)(pImageFormatProperties));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkPhysicalDeviceImageFormatInfo2(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceImageFormatInfo2*)(local_pImageFormatInfo), countPtr);
+        count_VkImageFormatProperties2(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImageFormatProperties2*)(pImageFormatProperties), countPtr);
     }
-    uint32_t packetSize_vkGetPhysicalDeviceImageFormatProperties2KHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetPhysicalDeviceImageFormatProperties2KHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPhysicalDeviceImageFormatProperties2KHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetPhysicalDeviceImageFormatProperties2KHR = OP_vkGetPhysicalDeviceImageFormatProperties2KHR;
-    stream->write(&opcode_vkGetPhysicalDeviceImageFormatProperties2KHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetPhysicalDeviceImageFormatProperties2KHR, sizeof(uint32_t));
-    uint64_t cgen_var_984;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_984, 1);
-    stream->write((uint64_t*)&cgen_var_984, 1 * 8);
-    marshal_VkPhysicalDeviceImageFormatInfo2(stream, (VkPhysicalDeviceImageFormatInfo2*)(local_pImageFormatInfo));
-    marshal_VkImageFormatProperties2(stream, (VkImageFormatProperties2*)(pImageFormatProperties));
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceImageFormatProperties2KHR readParams");
-    unmarshal_VkImageFormatProperties2(stream, (VkImageFormatProperties2*)(pImageFormatProperties));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPhysicalDeviceImageFormatProperties2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPhysicalDeviceImageFormatProperties2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkPhysicalDeviceImageFormatInfo2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceImageFormatInfo2*)(local_pImageFormatInfo), streamPtrPtr);
+    reservedmarshal_VkImageFormatProperties2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImageFormatProperties2*)(pImageFormatProperties), streamPtrPtr);
+    unmarshal_VkImageFormatProperties2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImageFormatProperties2*)(pImageFormatProperties));
     if (pImageFormatProperties)
     {
-        transform_fromhost_VkImageFormatProperties2(mImpl->resources(), (VkImageFormatProperties2*)(pImageFormatProperties));
+        transform_fromhost_VkImageFormatProperties2(sResourceTracker, (VkImageFormatProperties2*)(pImageFormatProperties));
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceImageFormatProperties2KHR returnUnmarshal");
     VkResult vkGetPhysicalDeviceImageFormatProperties2KHR_VkResult_return = (VkResult)0;
     stream->read(&vkGetPhysicalDeviceImageFormatProperties2KHR_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetPhysicalDeviceImageFormatProperties2KHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetPhysicalDeviceImageFormatProperties2KHR_VkResult_return;
 }
 
 void VkEncoder::vkGetPhysicalDeviceQueueFamilyProperties2KHR(
     VkPhysicalDevice physicalDevice,
     uint32_t* pQueueFamilyPropertyCount,
-    VkQueueFamilyProperties2* pQueueFamilyProperties)
+    VkQueueFamilyProperties2* pQueueFamilyProperties,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceQueueFamilyProperties2KHR encode");
-    mImpl->log("start vkGetPhysicalDeviceQueueFamilyProperties2KHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     local_physicalDevice = physicalDevice;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_985;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_985, 1);
-        countingStream->write((uint64_t*)&cgen_var_985, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_986 = (uint64_t)(uintptr_t)pQueueFamilyPropertyCount;
-        countingStream->putBe64(cgen_var_986);
+        *countPtr += 8;
         if (pQueueFamilyPropertyCount)
         {
-            countingStream->write((uint32_t*)pQueueFamilyPropertyCount, sizeof(uint32_t));
+            *countPtr += sizeof(uint32_t);
         }
         // WARNING PTR CHECK
-        uint64_t cgen_var_987 = (uint64_t)(uintptr_t)pQueueFamilyProperties;
-        countingStream->putBe64(cgen_var_987);
+        *countPtr += 8;
         if (pQueueFamilyProperties)
         {
-            for (uint32_t i = 0; i < (uint32_t)(*(pQueueFamilyPropertyCount)); ++i)
+            if (pQueueFamilyPropertyCount)
             {
-                marshal_VkQueueFamilyProperties2(countingStream, (VkQueueFamilyProperties2*)(pQueueFamilyProperties + i));
+                for (uint32_t i = 0; i < (uint32_t)(*(pQueueFamilyPropertyCount)); ++i)
+                {
+                    count_VkQueueFamilyProperties2(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkQueueFamilyProperties2*)(pQueueFamilyProperties + i), countPtr);
+                }
             }
         }
     }
-    uint32_t packetSize_vkGetPhysicalDeviceQueueFamilyProperties2KHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetPhysicalDeviceQueueFamilyProperties2KHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPhysicalDeviceQueueFamilyProperties2KHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetPhysicalDeviceQueueFamilyProperties2KHR = OP_vkGetPhysicalDeviceQueueFamilyProperties2KHR;
-    stream->write(&opcode_vkGetPhysicalDeviceQueueFamilyProperties2KHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetPhysicalDeviceQueueFamilyProperties2KHR, sizeof(uint32_t));
-    uint64_t cgen_var_988;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_988, 1);
-    stream->write((uint64_t*)&cgen_var_988, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPhysicalDeviceQueueFamilyProperties2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPhysicalDeviceQueueFamilyProperties2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_989 = (uint64_t)(uintptr_t)pQueueFamilyPropertyCount;
-    stream->putBe64(cgen_var_989);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)pQueueFamilyPropertyCount;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pQueueFamilyPropertyCount)
     {
-        stream->write((uint32_t*)pQueueFamilyPropertyCount, sizeof(uint32_t));
+        memcpy(*streamPtrPtr, (uint32_t*)pQueueFamilyPropertyCount, sizeof(uint32_t));
+        *streamPtrPtr += sizeof(uint32_t);
     }
     // WARNING PTR CHECK
-    uint64_t cgen_var_990 = (uint64_t)(uintptr_t)pQueueFamilyProperties;
-    stream->putBe64(cgen_var_990);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)pQueueFamilyProperties;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pQueueFamilyProperties)
     {
         for (uint32_t i = 0; i < (uint32_t)(*(pQueueFamilyPropertyCount)); ++i)
         {
-            marshal_VkQueueFamilyProperties2(stream, (VkQueueFamilyProperties2*)(pQueueFamilyProperties + i));
+            reservedmarshal_VkQueueFamilyProperties2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkQueueFamilyProperties2*)(pQueueFamilyProperties + i), streamPtrPtr);
         }
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceQueueFamilyProperties2KHR readParams");
     // WARNING PTR CHECK
     uint32_t* check_pQueueFamilyPropertyCount;
     check_pQueueFamilyPropertyCount = (uint32_t*)(uintptr_t)stream->getBe64();
@@ -14707,79 +16825,92 @@
         {
             fprintf(stderr, "fatal: pQueueFamilyProperties inconsistent between guest and host\n");
         }
-        for (uint32_t i = 0; i < (uint32_t)(*(pQueueFamilyPropertyCount)); ++i)
+        if (pQueueFamilyPropertyCount)
         {
-            unmarshal_VkQueueFamilyProperties2(stream, (VkQueueFamilyProperties2*)(pQueueFamilyProperties + i));
+            for (uint32_t i = 0; i < (uint32_t)(*(pQueueFamilyPropertyCount)); ++i)
+            {
+                unmarshal_VkQueueFamilyProperties2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkQueueFamilyProperties2*)(pQueueFamilyProperties + i));
+            }
         }
     }
-    if (pQueueFamilyProperties)
+    if (pQueueFamilyPropertyCount)
     {
-        for (uint32_t i = 0; i < (uint32_t)(*(pQueueFamilyPropertyCount)); ++i)
+        if (pQueueFamilyProperties)
         {
-            transform_fromhost_VkQueueFamilyProperties2(mImpl->resources(), (VkQueueFamilyProperties2*)(pQueueFamilyProperties + i));
+            for (uint32_t i = 0; i < (uint32_t)(*(pQueueFamilyPropertyCount)); ++i)
+            {
+                transform_fromhost_VkQueueFamilyProperties2(sResourceTracker, (VkQueueFamilyProperties2*)(pQueueFamilyProperties + i));
+            }
         }
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceQueueFamilyProperties2KHR returnUnmarshal");
-    mImpl->log("finish vkGetPhysicalDeviceQueueFamilyProperties2KHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkGetPhysicalDeviceMemoryProperties2KHR(
     VkPhysicalDevice physicalDevice,
-    VkPhysicalDeviceMemoryProperties2* pMemoryProperties)
+    VkPhysicalDeviceMemoryProperties2* pMemoryProperties,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceMemoryProperties2KHR encode");
-    mImpl->log("start vkGetPhysicalDeviceMemoryProperties2KHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     local_physicalDevice = physicalDevice;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_993;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_993, 1);
-        countingStream->write((uint64_t*)&cgen_var_993, 1 * 8);
-        marshal_VkPhysicalDeviceMemoryProperties2(countingStream, (VkPhysicalDeviceMemoryProperties2*)(pMemoryProperties));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkPhysicalDeviceMemoryProperties2(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceMemoryProperties2*)(pMemoryProperties), countPtr);
     }
-    uint32_t packetSize_vkGetPhysicalDeviceMemoryProperties2KHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetPhysicalDeviceMemoryProperties2KHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPhysicalDeviceMemoryProperties2KHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetPhysicalDeviceMemoryProperties2KHR = OP_vkGetPhysicalDeviceMemoryProperties2KHR;
-    stream->write(&opcode_vkGetPhysicalDeviceMemoryProperties2KHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetPhysicalDeviceMemoryProperties2KHR, sizeof(uint32_t));
-    uint64_t cgen_var_994;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_994, 1);
-    stream->write((uint64_t*)&cgen_var_994, 1 * 8);
-    marshal_VkPhysicalDeviceMemoryProperties2(stream, (VkPhysicalDeviceMemoryProperties2*)(pMemoryProperties));
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceMemoryProperties2KHR readParams");
-    unmarshal_VkPhysicalDeviceMemoryProperties2(stream, (VkPhysicalDeviceMemoryProperties2*)(pMemoryProperties));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPhysicalDeviceMemoryProperties2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPhysicalDeviceMemoryProperties2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkPhysicalDeviceMemoryProperties2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceMemoryProperties2*)(pMemoryProperties), streamPtrPtr);
+    unmarshal_VkPhysicalDeviceMemoryProperties2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceMemoryProperties2*)(pMemoryProperties));
     if (pMemoryProperties)
     {
-        transform_fromhost_VkPhysicalDeviceMemoryProperties2(mImpl->resources(), (VkPhysicalDeviceMemoryProperties2*)(pMemoryProperties));
+        transform_fromhost_VkPhysicalDeviceMemoryProperties2(sResourceTracker, (VkPhysicalDeviceMemoryProperties2*)(pMemoryProperties));
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceMemoryProperties2KHR returnUnmarshal");
-    encoderLock.unlock();
-    mImpl->resources()->on_vkGetPhysicalDeviceMemoryProperties2KHR(this, physicalDevice, pMemoryProperties);
-    encoderLock.lock();
-    mImpl->log("finish vkGetPhysicalDeviceMemoryProperties2KHR");;
+    sResourceTracker->on_vkGetPhysicalDeviceMemoryProperties2KHR(this, physicalDevice, pMemoryProperties);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkGetPhysicalDeviceSparseImageFormatProperties2KHR(
     VkPhysicalDevice physicalDevice,
     const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo,
     uint32_t* pPropertyCount,
-    VkSparseImageFormatProperties2* pProperties)
+    VkSparseImageFormatProperties2* pProperties,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceSparseImageFormatProperties2KHR encode");
-    mImpl->log("start vkGetPhysicalDeviceSparseImageFormatProperties2KHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     VkPhysicalDeviceSparseImageFormatInfo2* local_pFormatInfo;
     local_physicalDevice = physicalDevice;
@@ -14787,63 +16918,72 @@
     if (pFormatInfo)
     {
         local_pFormatInfo = (VkPhysicalDeviceSparseImageFormatInfo2*)pool->alloc(sizeof(const VkPhysicalDeviceSparseImageFormatInfo2));
-        deepcopy_VkPhysicalDeviceSparseImageFormatInfo2(pool, pFormatInfo, (VkPhysicalDeviceSparseImageFormatInfo2*)(local_pFormatInfo));
+        deepcopy_VkPhysicalDeviceSparseImageFormatInfo2(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pFormatInfo, (VkPhysicalDeviceSparseImageFormatInfo2*)(local_pFormatInfo));
     }
     if (local_pFormatInfo)
     {
-        transform_tohost_VkPhysicalDeviceSparseImageFormatInfo2(mImpl->resources(), (VkPhysicalDeviceSparseImageFormatInfo2*)(local_pFormatInfo));
+        transform_tohost_VkPhysicalDeviceSparseImageFormatInfo2(sResourceTracker, (VkPhysicalDeviceSparseImageFormatInfo2*)(local_pFormatInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_995;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_995, 1);
-        countingStream->write((uint64_t*)&cgen_var_995, 1 * 8);
-        marshal_VkPhysicalDeviceSparseImageFormatInfo2(countingStream, (VkPhysicalDeviceSparseImageFormatInfo2*)(local_pFormatInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkPhysicalDeviceSparseImageFormatInfo2(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceSparseImageFormatInfo2*)(local_pFormatInfo), countPtr);
         // WARNING PTR CHECK
-        uint64_t cgen_var_996 = (uint64_t)(uintptr_t)pPropertyCount;
-        countingStream->putBe64(cgen_var_996);
+        *countPtr += 8;
         if (pPropertyCount)
         {
-            countingStream->write((uint32_t*)pPropertyCount, sizeof(uint32_t));
+            *countPtr += sizeof(uint32_t);
         }
         // WARNING PTR CHECK
-        uint64_t cgen_var_997 = (uint64_t)(uintptr_t)pProperties;
-        countingStream->putBe64(cgen_var_997);
+        *countPtr += 8;
         if (pProperties)
         {
-            for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+            if (pPropertyCount)
             {
-                marshal_VkSparseImageFormatProperties2(countingStream, (VkSparseImageFormatProperties2*)(pProperties + i));
+                for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+                {
+                    count_VkSparseImageFormatProperties2(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSparseImageFormatProperties2*)(pProperties + i), countPtr);
+                }
             }
         }
     }
-    uint32_t packetSize_vkGetPhysicalDeviceSparseImageFormatProperties2KHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetPhysicalDeviceSparseImageFormatProperties2KHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPhysicalDeviceSparseImageFormatProperties2KHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetPhysicalDeviceSparseImageFormatProperties2KHR = OP_vkGetPhysicalDeviceSparseImageFormatProperties2KHR;
-    stream->write(&opcode_vkGetPhysicalDeviceSparseImageFormatProperties2KHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetPhysicalDeviceSparseImageFormatProperties2KHR, sizeof(uint32_t));
-    uint64_t cgen_var_998;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_998, 1);
-    stream->write((uint64_t*)&cgen_var_998, 1 * 8);
-    marshal_VkPhysicalDeviceSparseImageFormatInfo2(stream, (VkPhysicalDeviceSparseImageFormatInfo2*)(local_pFormatInfo));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPhysicalDeviceSparseImageFormatProperties2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPhysicalDeviceSparseImageFormatProperties2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkPhysicalDeviceSparseImageFormatInfo2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceSparseImageFormatInfo2*)(local_pFormatInfo), streamPtrPtr);
     // WARNING PTR CHECK
-    uint64_t cgen_var_999 = (uint64_t)(uintptr_t)pPropertyCount;
-    stream->putBe64(cgen_var_999);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)pPropertyCount;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pPropertyCount)
     {
-        stream->write((uint32_t*)pPropertyCount, sizeof(uint32_t));
+        memcpy(*streamPtrPtr, (uint32_t*)pPropertyCount, sizeof(uint32_t));
+        *streamPtrPtr += sizeof(uint32_t);
     }
     // WARNING PTR CHECK
-    uint64_t cgen_var_1000 = (uint64_t)(uintptr_t)pProperties;
-    stream->putBe64(cgen_var_1000);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)pProperties;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pProperties)
     {
         for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
         {
-            marshal_VkSparseImageFormatProperties2(stream, (VkSparseImageFormatProperties2*)(pProperties + i));
+            reservedmarshal_VkSparseImageFormatProperties2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSparseImageFormatProperties2*)(pProperties + i), streamPtrPtr);
         }
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceSparseImageFormatProperties2KHR readParams");
     // WARNING PTR CHECK
     uint32_t* check_pPropertyCount;
     check_pPropertyCount = (uint32_t*)(uintptr_t)stream->getBe64();
@@ -14864,20 +17004,31 @@
         {
             fprintf(stderr, "fatal: pProperties inconsistent between guest and host\n");
         }
-        for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+        if (pPropertyCount)
         {
-            unmarshal_VkSparseImageFormatProperties2(stream, (VkSparseImageFormatProperties2*)(pProperties + i));
+            for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+            {
+                unmarshal_VkSparseImageFormatProperties2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSparseImageFormatProperties2*)(pProperties + i));
+            }
         }
     }
-    if (pProperties)
+    if (pPropertyCount)
     {
-        for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+        if (pProperties)
         {
-            transform_fromhost_VkSparseImageFormatProperties2(mImpl->resources(), (VkSparseImageFormatProperties2*)(pProperties + i));
+            for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+            {
+                transform_fromhost_VkSparseImageFormatProperties2(sResourceTracker, (VkSparseImageFormatProperties2*)(pProperties + i));
+            }
         }
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceSparseImageFormatProperties2KHR returnUnmarshal");
-    mImpl->log("finish vkGetPhysicalDeviceSparseImageFormatProperties2KHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 #endif
@@ -14887,16 +17038,14 @@
     uint32_t heapIndex,
     uint32_t localDeviceIndex,
     uint32_t remoteDeviceIndex,
-    VkPeerMemoryFeatureFlags* pPeerMemoryFeatures)
+    VkPeerMemoryFeatureFlags* pPeerMemoryFeatures,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetDeviceGroupPeerMemoryFeaturesKHR encode");
-    mImpl->log("start vkGetDeviceGroupPeerMemoryFeaturesKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     uint32_t local_heapIndex;
     uint32_t local_localDeviceIndex;
@@ -14905,69 +17054,90 @@
     local_heapIndex = heapIndex;
     local_localDeviceIndex = localDeviceIndex;
     local_remoteDeviceIndex = remoteDeviceIndex;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1003;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1003, 1);
-        countingStream->write((uint64_t*)&cgen_var_1003, 1 * 8);
-        countingStream->write((uint32_t*)&local_heapIndex, sizeof(uint32_t));
-        countingStream->write((uint32_t*)&local_localDeviceIndex, sizeof(uint32_t));
-        countingStream->write((uint32_t*)&local_remoteDeviceIndex, sizeof(uint32_t));
-        countingStream->write((VkPeerMemoryFeatureFlags*)pPeerMemoryFeatures, sizeof(VkPeerMemoryFeatureFlags));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(VkPeerMemoryFeatureFlags);
     }
-    uint32_t packetSize_vkGetDeviceGroupPeerMemoryFeaturesKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetDeviceGroupPeerMemoryFeaturesKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetDeviceGroupPeerMemoryFeaturesKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetDeviceGroupPeerMemoryFeaturesKHR = OP_vkGetDeviceGroupPeerMemoryFeaturesKHR;
-    stream->write(&opcode_vkGetDeviceGroupPeerMemoryFeaturesKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetDeviceGroupPeerMemoryFeaturesKHR, sizeof(uint32_t));
-    uint64_t cgen_var_1004;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1004, 1);
-    stream->write((uint64_t*)&cgen_var_1004, 1 * 8);
-    stream->write((uint32_t*)&local_heapIndex, sizeof(uint32_t));
-    stream->write((uint32_t*)&local_localDeviceIndex, sizeof(uint32_t));
-    stream->write((uint32_t*)&local_remoteDeviceIndex, sizeof(uint32_t));
-    stream->write((VkPeerMemoryFeatureFlags*)pPeerMemoryFeatures, sizeof(VkPeerMemoryFeatureFlags));
-    AEMU_SCOPED_TRACE("vkGetDeviceGroupPeerMemoryFeaturesKHR readParams");
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetDeviceGroupPeerMemoryFeaturesKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetDeviceGroupPeerMemoryFeaturesKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_heapIndex, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_localDeviceIndex, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_remoteDeviceIndex, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (VkPeerMemoryFeatureFlags*)pPeerMemoryFeatures, sizeof(VkPeerMemoryFeatureFlags));
+    *streamPtrPtr += sizeof(VkPeerMemoryFeatureFlags);
     stream->read((VkPeerMemoryFeatureFlags*)pPeerMemoryFeatures, sizeof(VkPeerMemoryFeatureFlags));
-    AEMU_SCOPED_TRACE("vkGetDeviceGroupPeerMemoryFeaturesKHR returnUnmarshal");
-    mImpl->log("finish vkGetDeviceGroupPeerMemoryFeaturesKHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdSetDeviceMaskKHR(
     VkCommandBuffer commandBuffer,
-    uint32_t deviceMask)
+    uint32_t deviceMask,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdSetDeviceMaskKHR encode");
-    mImpl->log("start vkCmdSetDeviceMaskKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     uint32_t local_deviceMask;
     local_commandBuffer = commandBuffer;
     local_deviceMask = deviceMask;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1005;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1005, 1);
-        countingStream->write((uint64_t*)&cgen_var_1005, 1 * 8);
-        countingStream->write((uint32_t*)&local_deviceMask, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
     }
-    uint32_t packetSize_vkCmdSetDeviceMaskKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdSetDeviceMaskKHR = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdSetDeviceMaskKHR -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdSetDeviceMaskKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdSetDeviceMaskKHR = OP_vkCmdSetDeviceMaskKHR;
-    stream->write(&opcode_vkCmdSetDeviceMaskKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdSetDeviceMaskKHR, sizeof(uint32_t));
-    uint64_t cgen_var_1006;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1006, 1);
-    stream->write((uint64_t*)&cgen_var_1006, 1 * 8);
-    stream->write((uint32_t*)&local_deviceMask, sizeof(uint32_t));
-    AEMU_SCOPED_TRACE("vkCmdSetDeviceMaskKHR readParams");
-    AEMU_SCOPED_TRACE("vkCmdSetDeviceMaskKHR returnUnmarshal");
-    mImpl->log("finish vkCmdSetDeviceMaskKHR");;
+    memcpy(streamPtr, &opcode_vkCmdSetDeviceMaskKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdSetDeviceMaskKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (uint32_t*)&local_deviceMask, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdDispatchBaseKHR(
@@ -14977,16 +17147,14 @@
     uint32_t baseGroupZ,
     uint32_t groupCountX,
     uint32_t groupCountY,
-    uint32_t groupCountZ)
+    uint32_t groupCountZ,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdDispatchBaseKHR encode");
-    mImpl->log("start vkCmdDispatchBaseKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     uint32_t local_baseGroupX;
     uint32_t local_baseGroupY;
@@ -15001,35 +17169,51 @@
     local_groupCountX = groupCountX;
     local_groupCountY = groupCountY;
     local_groupCountZ = groupCountZ;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1007;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1007, 1);
-        countingStream->write((uint64_t*)&cgen_var_1007, 1 * 8);
-        countingStream->write((uint32_t*)&local_baseGroupX, sizeof(uint32_t));
-        countingStream->write((uint32_t*)&local_baseGroupY, sizeof(uint32_t));
-        countingStream->write((uint32_t*)&local_baseGroupZ, sizeof(uint32_t));
-        countingStream->write((uint32_t*)&local_groupCountX, sizeof(uint32_t));
-        countingStream->write((uint32_t*)&local_groupCountY, sizeof(uint32_t));
-        countingStream->write((uint32_t*)&local_groupCountZ, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
     }
-    uint32_t packetSize_vkCmdDispatchBaseKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdDispatchBaseKHR = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdDispatchBaseKHR -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdDispatchBaseKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdDispatchBaseKHR = OP_vkCmdDispatchBaseKHR;
-    stream->write(&opcode_vkCmdDispatchBaseKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdDispatchBaseKHR, sizeof(uint32_t));
-    uint64_t cgen_var_1008;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1008, 1);
-    stream->write((uint64_t*)&cgen_var_1008, 1 * 8);
-    stream->write((uint32_t*)&local_baseGroupX, sizeof(uint32_t));
-    stream->write((uint32_t*)&local_baseGroupY, sizeof(uint32_t));
-    stream->write((uint32_t*)&local_baseGroupZ, sizeof(uint32_t));
-    stream->write((uint32_t*)&local_groupCountX, sizeof(uint32_t));
-    stream->write((uint32_t*)&local_groupCountY, sizeof(uint32_t));
-    stream->write((uint32_t*)&local_groupCountZ, sizeof(uint32_t));
-    AEMU_SCOPED_TRACE("vkCmdDispatchBaseKHR readParams");
-    AEMU_SCOPED_TRACE("vkCmdDispatchBaseKHR returnUnmarshal");
-    mImpl->log("finish vkCmdDispatchBaseKHR");;
+    memcpy(streamPtr, &opcode_vkCmdDispatchBaseKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdDispatchBaseKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (uint32_t*)&local_baseGroupX, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_baseGroupY, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_baseGroupZ, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_groupCountX, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_groupCountY, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_groupCountZ, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 #endif
@@ -15039,47 +17223,55 @@
 void VkEncoder::vkTrimCommandPoolKHR(
     VkDevice device,
     VkCommandPool commandPool,
-    VkCommandPoolTrimFlags flags)
+    VkCommandPoolTrimFlags flags,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkTrimCommandPoolKHR encode");
-    mImpl->log("start vkTrimCommandPoolKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkCommandPool local_commandPool;
     VkCommandPoolTrimFlags local_flags;
     local_device = device;
     local_commandPool = commandPool;
     local_flags = flags;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1009;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1009, 1);
-        countingStream->write((uint64_t*)&cgen_var_1009, 1 * 8);
-        uint64_t cgen_var_1010;
-        countingStream->handleMapping()->mapHandles_VkCommandPool_u64(&local_commandPool, &cgen_var_1010, 1);
-        countingStream->write((uint64_t*)&cgen_var_1010, 1 * 8);
-        countingStream->write((VkCommandPoolTrimFlags*)&local_flags, sizeof(VkCommandPoolTrimFlags));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkCommandPoolTrimFlags);
     }
-    uint32_t packetSize_vkTrimCommandPoolKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkTrimCommandPoolKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkTrimCommandPoolKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkTrimCommandPoolKHR = OP_vkTrimCommandPoolKHR;
-    stream->write(&opcode_vkTrimCommandPoolKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkTrimCommandPoolKHR, sizeof(uint32_t));
-    uint64_t cgen_var_1011;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1011, 1);
-    stream->write((uint64_t*)&cgen_var_1011, 1 * 8);
-    uint64_t cgen_var_1012;
-    stream->handleMapping()->mapHandles_VkCommandPool_u64(&local_commandPool, &cgen_var_1012, 1);
-    stream->write((uint64_t*)&cgen_var_1012, 1 * 8);
-    stream->write((VkCommandPoolTrimFlags*)&local_flags, sizeof(VkCommandPoolTrimFlags));
-    AEMU_SCOPED_TRACE("vkTrimCommandPoolKHR readParams");
-    AEMU_SCOPED_TRACE("vkTrimCommandPoolKHR returnUnmarshal");
-    mImpl->log("finish vkTrimCommandPoolKHR");;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkTrimCommandPoolKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkTrimCommandPoolKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkCommandPool((*&local_commandPool));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkCommandPoolTrimFlags*)&local_flags, sizeof(VkCommandPoolTrimFlags));
+    *streamPtrPtr += sizeof(VkCommandPoolTrimFlags);
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 #endif
@@ -15087,67 +17279,74 @@
 VkResult VkEncoder::vkEnumeratePhysicalDeviceGroupsKHR(
     VkInstance instance,
     uint32_t* pPhysicalDeviceGroupCount,
-    VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties)
+    VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkEnumeratePhysicalDeviceGroupsKHR encode");
-    mImpl->log("start vkEnumeratePhysicalDeviceGroupsKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkInstance local_instance;
     local_instance = instance;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1013;
-        countingStream->handleMapping()->mapHandles_VkInstance_u64(&local_instance, &cgen_var_1013, 1);
-        countingStream->write((uint64_t*)&cgen_var_1013, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_1014 = (uint64_t)(uintptr_t)pPhysicalDeviceGroupCount;
-        countingStream->putBe64(cgen_var_1014);
+        *countPtr += 8;
         if (pPhysicalDeviceGroupCount)
         {
-            countingStream->write((uint32_t*)pPhysicalDeviceGroupCount, sizeof(uint32_t));
+            *countPtr += sizeof(uint32_t);
         }
         // WARNING PTR CHECK
-        uint64_t cgen_var_1015 = (uint64_t)(uintptr_t)pPhysicalDeviceGroupProperties;
-        countingStream->putBe64(cgen_var_1015);
+        *countPtr += 8;
         if (pPhysicalDeviceGroupProperties)
         {
-            for (uint32_t i = 0; i < (uint32_t)(*(pPhysicalDeviceGroupCount)); ++i)
+            if (pPhysicalDeviceGroupCount)
             {
-                marshal_VkPhysicalDeviceGroupProperties(countingStream, (VkPhysicalDeviceGroupProperties*)(pPhysicalDeviceGroupProperties + i));
+                for (uint32_t i = 0; i < (uint32_t)(*(pPhysicalDeviceGroupCount)); ++i)
+                {
+                    count_VkPhysicalDeviceGroupProperties(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceGroupProperties*)(pPhysicalDeviceGroupProperties + i), countPtr);
+                }
             }
         }
     }
-    uint32_t packetSize_vkEnumeratePhysicalDeviceGroupsKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkEnumeratePhysicalDeviceGroupsKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkEnumeratePhysicalDeviceGroupsKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkEnumeratePhysicalDeviceGroupsKHR = OP_vkEnumeratePhysicalDeviceGroupsKHR;
-    stream->write(&opcode_vkEnumeratePhysicalDeviceGroupsKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkEnumeratePhysicalDeviceGroupsKHR, sizeof(uint32_t));
-    uint64_t cgen_var_1016;
-    stream->handleMapping()->mapHandles_VkInstance_u64(&local_instance, &cgen_var_1016, 1);
-    stream->write((uint64_t*)&cgen_var_1016, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkEnumeratePhysicalDeviceGroupsKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkEnumeratePhysicalDeviceGroupsKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkInstance((*&local_instance));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_1017 = (uint64_t)(uintptr_t)pPhysicalDeviceGroupCount;
-    stream->putBe64(cgen_var_1017);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)pPhysicalDeviceGroupCount;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pPhysicalDeviceGroupCount)
     {
-        stream->write((uint32_t*)pPhysicalDeviceGroupCount, sizeof(uint32_t));
+        memcpy(*streamPtrPtr, (uint32_t*)pPhysicalDeviceGroupCount, sizeof(uint32_t));
+        *streamPtrPtr += sizeof(uint32_t);
     }
     // WARNING PTR CHECK
-    uint64_t cgen_var_1018 = (uint64_t)(uintptr_t)pPhysicalDeviceGroupProperties;
-    stream->putBe64(cgen_var_1018);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)pPhysicalDeviceGroupProperties;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pPhysicalDeviceGroupProperties)
     {
         for (uint32_t i = 0; i < (uint32_t)(*(pPhysicalDeviceGroupCount)); ++i)
         {
-            marshal_VkPhysicalDeviceGroupProperties(stream, (VkPhysicalDeviceGroupProperties*)(pPhysicalDeviceGroupProperties + i));
+            reservedmarshal_VkPhysicalDeviceGroupProperties(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceGroupProperties*)(pPhysicalDeviceGroupProperties + i), streamPtrPtr);
         }
     }
-    AEMU_SCOPED_TRACE("vkEnumeratePhysicalDeviceGroupsKHR readParams");
     // WARNING PTR CHECK
     uint32_t* check_pPhysicalDeviceGroupCount;
     check_pPhysicalDeviceGroupCount = (uint32_t*)(uintptr_t)stream->getBe64();
@@ -15168,25 +17367,33 @@
         {
             fprintf(stderr, "fatal: pPhysicalDeviceGroupProperties inconsistent between guest and host\n");
         }
-        for (uint32_t i = 0; i < (uint32_t)(*(pPhysicalDeviceGroupCount)); ++i)
+        if (pPhysicalDeviceGroupCount)
         {
-            unmarshal_VkPhysicalDeviceGroupProperties(stream, (VkPhysicalDeviceGroupProperties*)(pPhysicalDeviceGroupProperties + i));
+            for (uint32_t i = 0; i < (uint32_t)(*(pPhysicalDeviceGroupCount)); ++i)
+            {
+                unmarshal_VkPhysicalDeviceGroupProperties(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceGroupProperties*)(pPhysicalDeviceGroupProperties + i));
+            }
         }
     }
-    if (pPhysicalDeviceGroupProperties)
+    if (pPhysicalDeviceGroupCount)
     {
-        for (uint32_t i = 0; i < (uint32_t)(*(pPhysicalDeviceGroupCount)); ++i)
+        if (pPhysicalDeviceGroupProperties)
         {
-            transform_fromhost_VkPhysicalDeviceGroupProperties(mImpl->resources(), (VkPhysicalDeviceGroupProperties*)(pPhysicalDeviceGroupProperties + i));
+            for (uint32_t i = 0; i < (uint32_t)(*(pPhysicalDeviceGroupCount)); ++i)
+            {
+                transform_fromhost_VkPhysicalDeviceGroupProperties(sResourceTracker, (VkPhysicalDeviceGroupProperties*)(pPhysicalDeviceGroupProperties + i));
+            }
         }
     }
-    AEMU_SCOPED_TRACE("vkEnumeratePhysicalDeviceGroupsKHR returnUnmarshal");
     VkResult vkEnumeratePhysicalDeviceGroupsKHR_VkResult_return = (VkResult)0;
     stream->read(&vkEnumeratePhysicalDeviceGroupsKHR_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkEnumeratePhysicalDeviceGroupsKHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkEnumeratePhysicalDeviceGroupsKHR_VkResult_return;
 }
 
@@ -15195,16 +17402,14 @@
 void VkEncoder::vkGetPhysicalDeviceExternalBufferPropertiesKHR(
     VkPhysicalDevice physicalDevice,
     const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo,
-    VkExternalBufferProperties* pExternalBufferProperties)
+    VkExternalBufferProperties* pExternalBufferProperties,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceExternalBufferPropertiesKHR encode");
-    mImpl->log("start vkGetPhysicalDeviceExternalBufferPropertiesKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     VkPhysicalDeviceExternalBufferInfo* local_pExternalBufferInfo;
     local_physicalDevice = physicalDevice;
@@ -15212,40 +17417,48 @@
     if (pExternalBufferInfo)
     {
         local_pExternalBufferInfo = (VkPhysicalDeviceExternalBufferInfo*)pool->alloc(sizeof(const VkPhysicalDeviceExternalBufferInfo));
-        deepcopy_VkPhysicalDeviceExternalBufferInfo(pool, pExternalBufferInfo, (VkPhysicalDeviceExternalBufferInfo*)(local_pExternalBufferInfo));
+        deepcopy_VkPhysicalDeviceExternalBufferInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pExternalBufferInfo, (VkPhysicalDeviceExternalBufferInfo*)(local_pExternalBufferInfo));
     }
     if (local_pExternalBufferInfo)
     {
-        mImpl->resources()->transformImpl_VkPhysicalDeviceExternalBufferInfo_tohost(local_pExternalBufferInfo, 1);
-        transform_tohost_VkPhysicalDeviceExternalBufferInfo(mImpl->resources(), (VkPhysicalDeviceExternalBufferInfo*)(local_pExternalBufferInfo));
+        sResourceTracker->transformImpl_VkPhysicalDeviceExternalBufferInfo_tohost(local_pExternalBufferInfo, 1);
+        transform_tohost_VkPhysicalDeviceExternalBufferInfo(sResourceTracker, (VkPhysicalDeviceExternalBufferInfo*)(local_pExternalBufferInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1021;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_1021, 1);
-        countingStream->write((uint64_t*)&cgen_var_1021, 1 * 8);
-        marshal_VkPhysicalDeviceExternalBufferInfo(countingStream, (VkPhysicalDeviceExternalBufferInfo*)(local_pExternalBufferInfo));
-        marshal_VkExternalBufferProperties(countingStream, (VkExternalBufferProperties*)(pExternalBufferProperties));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkPhysicalDeviceExternalBufferInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceExternalBufferInfo*)(local_pExternalBufferInfo), countPtr);
+        count_VkExternalBufferProperties(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkExternalBufferProperties*)(pExternalBufferProperties), countPtr);
     }
-    uint32_t packetSize_vkGetPhysicalDeviceExternalBufferPropertiesKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetPhysicalDeviceExternalBufferPropertiesKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPhysicalDeviceExternalBufferPropertiesKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetPhysicalDeviceExternalBufferPropertiesKHR = OP_vkGetPhysicalDeviceExternalBufferPropertiesKHR;
-    stream->write(&opcode_vkGetPhysicalDeviceExternalBufferPropertiesKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetPhysicalDeviceExternalBufferPropertiesKHR, sizeof(uint32_t));
-    uint64_t cgen_var_1022;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_1022, 1);
-    stream->write((uint64_t*)&cgen_var_1022, 1 * 8);
-    marshal_VkPhysicalDeviceExternalBufferInfo(stream, (VkPhysicalDeviceExternalBufferInfo*)(local_pExternalBufferInfo));
-    marshal_VkExternalBufferProperties(stream, (VkExternalBufferProperties*)(pExternalBufferProperties));
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceExternalBufferPropertiesKHR readParams");
-    unmarshal_VkExternalBufferProperties(stream, (VkExternalBufferProperties*)(pExternalBufferProperties));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPhysicalDeviceExternalBufferPropertiesKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPhysicalDeviceExternalBufferPropertiesKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkPhysicalDeviceExternalBufferInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceExternalBufferInfo*)(local_pExternalBufferInfo), streamPtrPtr);
+    reservedmarshal_VkExternalBufferProperties(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkExternalBufferProperties*)(pExternalBufferProperties), streamPtrPtr);
+    unmarshal_VkExternalBufferProperties(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkExternalBufferProperties*)(pExternalBufferProperties));
     if (pExternalBufferProperties)
     {
-        mImpl->resources()->transformImpl_VkExternalBufferProperties_fromhost(pExternalBufferProperties, 1);
-        transform_fromhost_VkExternalBufferProperties(mImpl->resources(), (VkExternalBufferProperties*)(pExternalBufferProperties));
+        sResourceTracker->transformImpl_VkExternalBufferProperties_fromhost(pExternalBufferProperties, 1);
+        transform_fromhost_VkExternalBufferProperties(sResourceTracker, (VkExternalBufferProperties*)(pExternalBufferProperties));
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceExternalBufferPropertiesKHR returnUnmarshal");
-    mImpl->log("finish vkGetPhysicalDeviceExternalBufferPropertiesKHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 #endif
@@ -15255,16 +17468,14 @@
 VkResult VkEncoder::vkGetMemoryWin32HandleKHR(
     VkDevice device,
     const VkMemoryGetWin32HandleInfoKHR* pGetWin32HandleInfo,
-    HANDLE* pHandle)
+    HANDLE* pHandle,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetMemoryWin32HandleKHR encode");
-    mImpl->log("start vkGetMemoryWin32HandleKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkMemoryGetWin32HandleInfoKHR* local_pGetWin32HandleInfo;
     local_device = device;
@@ -15272,39 +17483,45 @@
     if (pGetWin32HandleInfo)
     {
         local_pGetWin32HandleInfo = (VkMemoryGetWin32HandleInfoKHR*)pool->alloc(sizeof(const VkMemoryGetWin32HandleInfoKHR));
-        deepcopy_VkMemoryGetWin32HandleInfoKHR(pool, pGetWin32HandleInfo, (VkMemoryGetWin32HandleInfoKHR*)(local_pGetWin32HandleInfo));
+        deepcopy_VkMemoryGetWin32HandleInfoKHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pGetWin32HandleInfo, (VkMemoryGetWin32HandleInfoKHR*)(local_pGetWin32HandleInfo));
     }
     if (local_pGetWin32HandleInfo)
     {
-        transform_tohost_VkMemoryGetWin32HandleInfoKHR(mImpl->resources(), (VkMemoryGetWin32HandleInfoKHR*)(local_pGetWin32HandleInfo));
+        transform_tohost_VkMemoryGetWin32HandleInfoKHR(sResourceTracker, (VkMemoryGetWin32HandleInfoKHR*)(local_pGetWin32HandleInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1023;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1023, 1);
-        countingStream->write((uint64_t*)&cgen_var_1023, 1 * 8);
-        marshal_VkMemoryGetWin32HandleInfoKHR(countingStream, (VkMemoryGetWin32HandleInfoKHR*)(local_pGetWin32HandleInfo));
-        countingStream->write((HANDLE*)pHandle, sizeof(HANDLE));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkMemoryGetWin32HandleInfoKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMemoryGetWin32HandleInfoKHR*)(local_pGetWin32HandleInfo), countPtr);
+        *countPtr += sizeof(HANDLE);
     }
-    uint32_t packetSize_vkGetMemoryWin32HandleKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetMemoryWin32HandleKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetMemoryWin32HandleKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetMemoryWin32HandleKHR = OP_vkGetMemoryWin32HandleKHR;
-    stream->write(&opcode_vkGetMemoryWin32HandleKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetMemoryWin32HandleKHR, sizeof(uint32_t));
-    uint64_t cgen_var_1024;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1024, 1);
-    stream->write((uint64_t*)&cgen_var_1024, 1 * 8);
-    marshal_VkMemoryGetWin32HandleInfoKHR(stream, (VkMemoryGetWin32HandleInfoKHR*)(local_pGetWin32HandleInfo));
-    stream->write((HANDLE*)pHandle, sizeof(HANDLE));
-    AEMU_SCOPED_TRACE("vkGetMemoryWin32HandleKHR readParams");
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetMemoryWin32HandleKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetMemoryWin32HandleKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkMemoryGetWin32HandleInfoKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMemoryGetWin32HandleInfoKHR*)(local_pGetWin32HandleInfo), streamPtrPtr);
+    memcpy(*streamPtrPtr, (HANDLE*)pHandle, sizeof(HANDLE));
+    *streamPtrPtr += sizeof(HANDLE);
     stream->read((HANDLE*)pHandle, sizeof(HANDLE));
-    AEMU_SCOPED_TRACE("vkGetMemoryWin32HandleKHR returnUnmarshal");
     VkResult vkGetMemoryWin32HandleKHR_VkResult_return = (VkResult)0;
     stream->read(&vkGetMemoryWin32HandleKHR_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetMemoryWin32HandleKHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetMemoryWin32HandleKHR_VkResult_return;
 }
 
@@ -15312,55 +17529,60 @@
     VkDevice device,
     VkExternalMemoryHandleTypeFlagBits handleType,
     HANDLE handle,
-    VkMemoryWin32HandlePropertiesKHR* pMemoryWin32HandleProperties)
+    VkMemoryWin32HandlePropertiesKHR* pMemoryWin32HandleProperties,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetMemoryWin32HandlePropertiesKHR encode");
-    mImpl->log("start vkGetMemoryWin32HandlePropertiesKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkExternalMemoryHandleTypeFlagBits local_handleType;
     HANDLE local_handle;
     local_device = device;
     local_handleType = handleType;
     local_handle = handle;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1025;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1025, 1);
-        countingStream->write((uint64_t*)&cgen_var_1025, 1 * 8);
-        countingStream->write((VkExternalMemoryHandleTypeFlagBits*)&local_handleType, sizeof(VkExternalMemoryHandleTypeFlagBits));
-        countingStream->write((HANDLE*)&local_handle, sizeof(HANDLE));
-        marshal_VkMemoryWin32HandlePropertiesKHR(countingStream, (VkMemoryWin32HandlePropertiesKHR*)(pMemoryWin32HandleProperties));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkExternalMemoryHandleTypeFlagBits);
+        *countPtr += sizeof(HANDLE);
+        count_VkMemoryWin32HandlePropertiesKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMemoryWin32HandlePropertiesKHR*)(pMemoryWin32HandleProperties), countPtr);
     }
-    uint32_t packetSize_vkGetMemoryWin32HandlePropertiesKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetMemoryWin32HandlePropertiesKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetMemoryWin32HandlePropertiesKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetMemoryWin32HandlePropertiesKHR = OP_vkGetMemoryWin32HandlePropertiesKHR;
-    stream->write(&opcode_vkGetMemoryWin32HandlePropertiesKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetMemoryWin32HandlePropertiesKHR, sizeof(uint32_t));
-    uint64_t cgen_var_1026;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1026, 1);
-    stream->write((uint64_t*)&cgen_var_1026, 1 * 8);
-    stream->write((VkExternalMemoryHandleTypeFlagBits*)&local_handleType, sizeof(VkExternalMemoryHandleTypeFlagBits));
-    stream->write((HANDLE*)&local_handle, sizeof(HANDLE));
-    marshal_VkMemoryWin32HandlePropertiesKHR(stream, (VkMemoryWin32HandlePropertiesKHR*)(pMemoryWin32HandleProperties));
-    AEMU_SCOPED_TRACE("vkGetMemoryWin32HandlePropertiesKHR readParams");
-    unmarshal_VkMemoryWin32HandlePropertiesKHR(stream, (VkMemoryWin32HandlePropertiesKHR*)(pMemoryWin32HandleProperties));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetMemoryWin32HandlePropertiesKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetMemoryWin32HandlePropertiesKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkExternalMemoryHandleTypeFlagBits*)&local_handleType, sizeof(VkExternalMemoryHandleTypeFlagBits));
+    *streamPtrPtr += sizeof(VkExternalMemoryHandleTypeFlagBits);
+    memcpy(*streamPtrPtr, (HANDLE*)&local_handle, sizeof(HANDLE));
+    *streamPtrPtr += sizeof(HANDLE);
+    reservedmarshal_VkMemoryWin32HandlePropertiesKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMemoryWin32HandlePropertiesKHR*)(pMemoryWin32HandleProperties), streamPtrPtr);
+    unmarshal_VkMemoryWin32HandlePropertiesKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMemoryWin32HandlePropertiesKHR*)(pMemoryWin32HandleProperties));
     if (pMemoryWin32HandleProperties)
     {
-        transform_fromhost_VkMemoryWin32HandlePropertiesKHR(mImpl->resources(), (VkMemoryWin32HandlePropertiesKHR*)(pMemoryWin32HandleProperties));
+        transform_fromhost_VkMemoryWin32HandlePropertiesKHR(sResourceTracker, (VkMemoryWin32HandlePropertiesKHR*)(pMemoryWin32HandleProperties));
     }
-    AEMU_SCOPED_TRACE("vkGetMemoryWin32HandlePropertiesKHR returnUnmarshal");
     VkResult vkGetMemoryWin32HandlePropertiesKHR_VkResult_return = (VkResult)0;
     stream->read(&vkGetMemoryWin32HandlePropertiesKHR_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetMemoryWin32HandlePropertiesKHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetMemoryWin32HandlePropertiesKHR_VkResult_return;
 }
 
@@ -15369,16 +17591,14 @@
 VkResult VkEncoder::vkGetMemoryFdKHR(
     VkDevice device,
     const VkMemoryGetFdInfoKHR* pGetFdInfo,
-    int* pFd)
+    int* pFd,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetMemoryFdKHR encode");
-    mImpl->log("start vkGetMemoryFdKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkMemoryGetFdInfoKHR* local_pGetFdInfo;
     local_device = device;
@@ -15386,39 +17606,45 @@
     if (pGetFdInfo)
     {
         local_pGetFdInfo = (VkMemoryGetFdInfoKHR*)pool->alloc(sizeof(const VkMemoryGetFdInfoKHR));
-        deepcopy_VkMemoryGetFdInfoKHR(pool, pGetFdInfo, (VkMemoryGetFdInfoKHR*)(local_pGetFdInfo));
+        deepcopy_VkMemoryGetFdInfoKHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pGetFdInfo, (VkMemoryGetFdInfoKHR*)(local_pGetFdInfo));
     }
     if (local_pGetFdInfo)
     {
-        transform_tohost_VkMemoryGetFdInfoKHR(mImpl->resources(), (VkMemoryGetFdInfoKHR*)(local_pGetFdInfo));
+        transform_tohost_VkMemoryGetFdInfoKHR(sResourceTracker, (VkMemoryGetFdInfoKHR*)(local_pGetFdInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1027;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1027, 1);
-        countingStream->write((uint64_t*)&cgen_var_1027, 1 * 8);
-        marshal_VkMemoryGetFdInfoKHR(countingStream, (VkMemoryGetFdInfoKHR*)(local_pGetFdInfo));
-        countingStream->write((int*)pFd, sizeof(int));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkMemoryGetFdInfoKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMemoryGetFdInfoKHR*)(local_pGetFdInfo), countPtr);
+        *countPtr += sizeof(int);
     }
-    uint32_t packetSize_vkGetMemoryFdKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetMemoryFdKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetMemoryFdKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetMemoryFdKHR = OP_vkGetMemoryFdKHR;
-    stream->write(&opcode_vkGetMemoryFdKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetMemoryFdKHR, sizeof(uint32_t));
-    uint64_t cgen_var_1028;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1028, 1);
-    stream->write((uint64_t*)&cgen_var_1028, 1 * 8);
-    marshal_VkMemoryGetFdInfoKHR(stream, (VkMemoryGetFdInfoKHR*)(local_pGetFdInfo));
-    stream->write((int*)pFd, sizeof(int));
-    AEMU_SCOPED_TRACE("vkGetMemoryFdKHR readParams");
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetMemoryFdKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetMemoryFdKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkMemoryGetFdInfoKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMemoryGetFdInfoKHR*)(local_pGetFdInfo), streamPtrPtr);
+    memcpy(*streamPtrPtr, (int*)pFd, sizeof(int));
+    *streamPtrPtr += sizeof(int);
     stream->read((int*)pFd, sizeof(int));
-    AEMU_SCOPED_TRACE("vkGetMemoryFdKHR returnUnmarshal");
     VkResult vkGetMemoryFdKHR_VkResult_return = (VkResult)0;
     stream->read(&vkGetMemoryFdKHR_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetMemoryFdKHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetMemoryFdKHR_VkResult_return;
 }
 
@@ -15426,55 +17652,60 @@
     VkDevice device,
     VkExternalMemoryHandleTypeFlagBits handleType,
     int fd,
-    VkMemoryFdPropertiesKHR* pMemoryFdProperties)
+    VkMemoryFdPropertiesKHR* pMemoryFdProperties,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetMemoryFdPropertiesKHR encode");
-    mImpl->log("start vkGetMemoryFdPropertiesKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkExternalMemoryHandleTypeFlagBits local_handleType;
     int local_fd;
     local_device = device;
     local_handleType = handleType;
     local_fd = fd;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1029;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1029, 1);
-        countingStream->write((uint64_t*)&cgen_var_1029, 1 * 8);
-        countingStream->write((VkExternalMemoryHandleTypeFlagBits*)&local_handleType, sizeof(VkExternalMemoryHandleTypeFlagBits));
-        countingStream->write((int*)&local_fd, sizeof(int));
-        marshal_VkMemoryFdPropertiesKHR(countingStream, (VkMemoryFdPropertiesKHR*)(pMemoryFdProperties));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkExternalMemoryHandleTypeFlagBits);
+        *countPtr += sizeof(int);
+        count_VkMemoryFdPropertiesKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMemoryFdPropertiesKHR*)(pMemoryFdProperties), countPtr);
     }
-    uint32_t packetSize_vkGetMemoryFdPropertiesKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetMemoryFdPropertiesKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetMemoryFdPropertiesKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetMemoryFdPropertiesKHR = OP_vkGetMemoryFdPropertiesKHR;
-    stream->write(&opcode_vkGetMemoryFdPropertiesKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetMemoryFdPropertiesKHR, sizeof(uint32_t));
-    uint64_t cgen_var_1030;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1030, 1);
-    stream->write((uint64_t*)&cgen_var_1030, 1 * 8);
-    stream->write((VkExternalMemoryHandleTypeFlagBits*)&local_handleType, sizeof(VkExternalMemoryHandleTypeFlagBits));
-    stream->write((int*)&local_fd, sizeof(int));
-    marshal_VkMemoryFdPropertiesKHR(stream, (VkMemoryFdPropertiesKHR*)(pMemoryFdProperties));
-    AEMU_SCOPED_TRACE("vkGetMemoryFdPropertiesKHR readParams");
-    unmarshal_VkMemoryFdPropertiesKHR(stream, (VkMemoryFdPropertiesKHR*)(pMemoryFdProperties));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetMemoryFdPropertiesKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetMemoryFdPropertiesKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkExternalMemoryHandleTypeFlagBits*)&local_handleType, sizeof(VkExternalMemoryHandleTypeFlagBits));
+    *streamPtrPtr += sizeof(VkExternalMemoryHandleTypeFlagBits);
+    memcpy(*streamPtrPtr, (int*)&local_fd, sizeof(int));
+    *streamPtrPtr += sizeof(int);
+    reservedmarshal_VkMemoryFdPropertiesKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMemoryFdPropertiesKHR*)(pMemoryFdProperties), streamPtrPtr);
+    unmarshal_VkMemoryFdPropertiesKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMemoryFdPropertiesKHR*)(pMemoryFdProperties));
     if (pMemoryFdProperties)
     {
-        transform_fromhost_VkMemoryFdPropertiesKHR(mImpl->resources(), (VkMemoryFdPropertiesKHR*)(pMemoryFdProperties));
+        transform_fromhost_VkMemoryFdPropertiesKHR(sResourceTracker, (VkMemoryFdPropertiesKHR*)(pMemoryFdProperties));
     }
-    AEMU_SCOPED_TRACE("vkGetMemoryFdPropertiesKHR returnUnmarshal");
     VkResult vkGetMemoryFdPropertiesKHR_VkResult_return = (VkResult)0;
     stream->read(&vkGetMemoryFdPropertiesKHR_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetMemoryFdPropertiesKHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetMemoryFdPropertiesKHR_VkResult_return;
 }
 
@@ -15485,16 +17716,14 @@
 void VkEncoder::vkGetPhysicalDeviceExternalSemaphorePropertiesKHR(
     VkPhysicalDevice physicalDevice,
     const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo,
-    VkExternalSemaphoreProperties* pExternalSemaphoreProperties)
+    VkExternalSemaphoreProperties* pExternalSemaphoreProperties,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceExternalSemaphorePropertiesKHR encode");
-    mImpl->log("start vkGetPhysicalDeviceExternalSemaphorePropertiesKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     VkPhysicalDeviceExternalSemaphoreInfo* local_pExternalSemaphoreInfo;
     local_physicalDevice = physicalDevice;
@@ -15502,38 +17731,47 @@
     if (pExternalSemaphoreInfo)
     {
         local_pExternalSemaphoreInfo = (VkPhysicalDeviceExternalSemaphoreInfo*)pool->alloc(sizeof(const VkPhysicalDeviceExternalSemaphoreInfo));
-        deepcopy_VkPhysicalDeviceExternalSemaphoreInfo(pool, pExternalSemaphoreInfo, (VkPhysicalDeviceExternalSemaphoreInfo*)(local_pExternalSemaphoreInfo));
+        deepcopy_VkPhysicalDeviceExternalSemaphoreInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pExternalSemaphoreInfo, (VkPhysicalDeviceExternalSemaphoreInfo*)(local_pExternalSemaphoreInfo));
     }
     if (local_pExternalSemaphoreInfo)
     {
-        transform_tohost_VkPhysicalDeviceExternalSemaphoreInfo(mImpl->resources(), (VkPhysicalDeviceExternalSemaphoreInfo*)(local_pExternalSemaphoreInfo));
+        transform_tohost_VkPhysicalDeviceExternalSemaphoreInfo(sResourceTracker, (VkPhysicalDeviceExternalSemaphoreInfo*)(local_pExternalSemaphoreInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1031;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_1031, 1);
-        countingStream->write((uint64_t*)&cgen_var_1031, 1 * 8);
-        marshal_VkPhysicalDeviceExternalSemaphoreInfo(countingStream, (VkPhysicalDeviceExternalSemaphoreInfo*)(local_pExternalSemaphoreInfo));
-        marshal_VkExternalSemaphoreProperties(countingStream, (VkExternalSemaphoreProperties*)(pExternalSemaphoreProperties));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkPhysicalDeviceExternalSemaphoreInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceExternalSemaphoreInfo*)(local_pExternalSemaphoreInfo), countPtr);
+        count_VkExternalSemaphoreProperties(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkExternalSemaphoreProperties*)(pExternalSemaphoreProperties), countPtr);
     }
-    uint32_t packetSize_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR = OP_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR;
-    stream->write(&opcode_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR, sizeof(uint32_t));
-    uint64_t cgen_var_1032;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_1032, 1);
-    stream->write((uint64_t*)&cgen_var_1032, 1 * 8);
-    marshal_VkPhysicalDeviceExternalSemaphoreInfo(stream, (VkPhysicalDeviceExternalSemaphoreInfo*)(local_pExternalSemaphoreInfo));
-    marshal_VkExternalSemaphoreProperties(stream, (VkExternalSemaphoreProperties*)(pExternalSemaphoreProperties));
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceExternalSemaphorePropertiesKHR readParams");
-    unmarshal_VkExternalSemaphoreProperties(stream, (VkExternalSemaphoreProperties*)(pExternalSemaphoreProperties));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkPhysicalDeviceExternalSemaphoreInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceExternalSemaphoreInfo*)(local_pExternalSemaphoreInfo), streamPtrPtr);
+    reservedmarshal_VkExternalSemaphoreProperties(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkExternalSemaphoreProperties*)(pExternalSemaphoreProperties), streamPtrPtr);
+    unmarshal_VkExternalSemaphoreProperties(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkExternalSemaphoreProperties*)(pExternalSemaphoreProperties));
     if (pExternalSemaphoreProperties)
     {
-        transform_fromhost_VkExternalSemaphoreProperties(mImpl->resources(), (VkExternalSemaphoreProperties*)(pExternalSemaphoreProperties));
+        transform_fromhost_VkExternalSemaphoreProperties(sResourceTracker, (VkExternalSemaphoreProperties*)(pExternalSemaphoreProperties));
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceExternalSemaphorePropertiesKHR returnUnmarshal");
-    mImpl->log("finish vkGetPhysicalDeviceExternalSemaphorePropertiesKHR");;
+    sResourceTracker->on_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR(this, physicalDevice, pExternalSemaphoreInfo, pExternalSemaphoreProperties);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 #endif
@@ -15542,16 +17780,14 @@
 #ifdef VK_KHR_external_semaphore_win32
 VkResult VkEncoder::vkImportSemaphoreWin32HandleKHR(
     VkDevice device,
-    const VkImportSemaphoreWin32HandleInfoKHR* pImportSemaphoreWin32HandleInfo)
+    const VkImportSemaphoreWin32HandleInfoKHR* pImportSemaphoreWin32HandleInfo,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkImportSemaphoreWin32HandleKHR encode");
-    mImpl->log("start vkImportSemaphoreWin32HandleKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkImportSemaphoreWin32HandleInfoKHR* local_pImportSemaphoreWin32HandleInfo;
     local_device = device;
@@ -15559,52 +17795,55 @@
     if (pImportSemaphoreWin32HandleInfo)
     {
         local_pImportSemaphoreWin32HandleInfo = (VkImportSemaphoreWin32HandleInfoKHR*)pool->alloc(sizeof(const VkImportSemaphoreWin32HandleInfoKHR));
-        deepcopy_VkImportSemaphoreWin32HandleInfoKHR(pool, pImportSemaphoreWin32HandleInfo, (VkImportSemaphoreWin32HandleInfoKHR*)(local_pImportSemaphoreWin32HandleInfo));
+        deepcopy_VkImportSemaphoreWin32HandleInfoKHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pImportSemaphoreWin32HandleInfo, (VkImportSemaphoreWin32HandleInfoKHR*)(local_pImportSemaphoreWin32HandleInfo));
     }
     if (local_pImportSemaphoreWin32HandleInfo)
     {
-        transform_tohost_VkImportSemaphoreWin32HandleInfoKHR(mImpl->resources(), (VkImportSemaphoreWin32HandleInfoKHR*)(local_pImportSemaphoreWin32HandleInfo));
+        transform_tohost_VkImportSemaphoreWin32HandleInfoKHR(sResourceTracker, (VkImportSemaphoreWin32HandleInfoKHR*)(local_pImportSemaphoreWin32HandleInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1033;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1033, 1);
-        countingStream->write((uint64_t*)&cgen_var_1033, 1 * 8);
-        marshal_VkImportSemaphoreWin32HandleInfoKHR(countingStream, (VkImportSemaphoreWin32HandleInfoKHR*)(local_pImportSemaphoreWin32HandleInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkImportSemaphoreWin32HandleInfoKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImportSemaphoreWin32HandleInfoKHR*)(local_pImportSemaphoreWin32HandleInfo), countPtr);
     }
-    uint32_t packetSize_vkImportSemaphoreWin32HandleKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkImportSemaphoreWin32HandleKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkImportSemaphoreWin32HandleKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkImportSemaphoreWin32HandleKHR = OP_vkImportSemaphoreWin32HandleKHR;
-    stream->write(&opcode_vkImportSemaphoreWin32HandleKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkImportSemaphoreWin32HandleKHR, sizeof(uint32_t));
-    uint64_t cgen_var_1034;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1034, 1);
-    stream->write((uint64_t*)&cgen_var_1034, 1 * 8);
-    marshal_VkImportSemaphoreWin32HandleInfoKHR(stream, (VkImportSemaphoreWin32HandleInfoKHR*)(local_pImportSemaphoreWin32HandleInfo));
-    AEMU_SCOPED_TRACE("vkImportSemaphoreWin32HandleKHR readParams");
-    AEMU_SCOPED_TRACE("vkImportSemaphoreWin32HandleKHR returnUnmarshal");
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkImportSemaphoreWin32HandleKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkImportSemaphoreWin32HandleKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkImportSemaphoreWin32HandleInfoKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImportSemaphoreWin32HandleInfoKHR*)(local_pImportSemaphoreWin32HandleInfo), streamPtrPtr);
     VkResult vkImportSemaphoreWin32HandleKHR_VkResult_return = (VkResult)0;
     stream->read(&vkImportSemaphoreWin32HandleKHR_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkImportSemaphoreWin32HandleKHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkImportSemaphoreWin32HandleKHR_VkResult_return;
 }
 
 VkResult VkEncoder::vkGetSemaphoreWin32HandleKHR(
     VkDevice device,
     const VkSemaphoreGetWin32HandleInfoKHR* pGetWin32HandleInfo,
-    HANDLE* pHandle)
+    HANDLE* pHandle,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetSemaphoreWin32HandleKHR encode");
-    mImpl->log("start vkGetSemaphoreWin32HandleKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkSemaphoreGetWin32HandleInfoKHR* local_pGetWin32HandleInfo;
     local_device = device;
@@ -15612,39 +17851,45 @@
     if (pGetWin32HandleInfo)
     {
         local_pGetWin32HandleInfo = (VkSemaphoreGetWin32HandleInfoKHR*)pool->alloc(sizeof(const VkSemaphoreGetWin32HandleInfoKHR));
-        deepcopy_VkSemaphoreGetWin32HandleInfoKHR(pool, pGetWin32HandleInfo, (VkSemaphoreGetWin32HandleInfoKHR*)(local_pGetWin32HandleInfo));
+        deepcopy_VkSemaphoreGetWin32HandleInfoKHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pGetWin32HandleInfo, (VkSemaphoreGetWin32HandleInfoKHR*)(local_pGetWin32HandleInfo));
     }
     if (local_pGetWin32HandleInfo)
     {
-        transform_tohost_VkSemaphoreGetWin32HandleInfoKHR(mImpl->resources(), (VkSemaphoreGetWin32HandleInfoKHR*)(local_pGetWin32HandleInfo));
+        transform_tohost_VkSemaphoreGetWin32HandleInfoKHR(sResourceTracker, (VkSemaphoreGetWin32HandleInfoKHR*)(local_pGetWin32HandleInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1035;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1035, 1);
-        countingStream->write((uint64_t*)&cgen_var_1035, 1 * 8);
-        marshal_VkSemaphoreGetWin32HandleInfoKHR(countingStream, (VkSemaphoreGetWin32HandleInfoKHR*)(local_pGetWin32HandleInfo));
-        countingStream->write((HANDLE*)pHandle, sizeof(HANDLE));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkSemaphoreGetWin32HandleInfoKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSemaphoreGetWin32HandleInfoKHR*)(local_pGetWin32HandleInfo), countPtr);
+        *countPtr += sizeof(HANDLE);
     }
-    uint32_t packetSize_vkGetSemaphoreWin32HandleKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetSemaphoreWin32HandleKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetSemaphoreWin32HandleKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetSemaphoreWin32HandleKHR = OP_vkGetSemaphoreWin32HandleKHR;
-    stream->write(&opcode_vkGetSemaphoreWin32HandleKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetSemaphoreWin32HandleKHR, sizeof(uint32_t));
-    uint64_t cgen_var_1036;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1036, 1);
-    stream->write((uint64_t*)&cgen_var_1036, 1 * 8);
-    marshal_VkSemaphoreGetWin32HandleInfoKHR(stream, (VkSemaphoreGetWin32HandleInfoKHR*)(local_pGetWin32HandleInfo));
-    stream->write((HANDLE*)pHandle, sizeof(HANDLE));
-    AEMU_SCOPED_TRACE("vkGetSemaphoreWin32HandleKHR readParams");
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetSemaphoreWin32HandleKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetSemaphoreWin32HandleKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkSemaphoreGetWin32HandleInfoKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSemaphoreGetWin32HandleInfoKHR*)(local_pGetWin32HandleInfo), streamPtrPtr);
+    memcpy(*streamPtrPtr, (HANDLE*)pHandle, sizeof(HANDLE));
+    *streamPtrPtr += sizeof(HANDLE);
     stream->read((HANDLE*)pHandle, sizeof(HANDLE));
-    AEMU_SCOPED_TRACE("vkGetSemaphoreWin32HandleKHR returnUnmarshal");
     VkResult vkGetSemaphoreWin32HandleKHR_VkResult_return = (VkResult)0;
     stream->read(&vkGetSemaphoreWin32HandleKHR_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetSemaphoreWin32HandleKHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetSemaphoreWin32HandleKHR_VkResult_return;
 }
 
@@ -15652,16 +17897,14 @@
 #ifdef VK_KHR_external_semaphore_fd
 VkResult VkEncoder::vkImportSemaphoreFdKHR(
     VkDevice device,
-    const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo)
+    const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkImportSemaphoreFdKHR encode");
-    mImpl->log("start vkImportSemaphoreFdKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkImportSemaphoreFdInfoKHR* local_pImportSemaphoreFdInfo;
     local_device = device;
@@ -15669,52 +17912,55 @@
     if (pImportSemaphoreFdInfo)
     {
         local_pImportSemaphoreFdInfo = (VkImportSemaphoreFdInfoKHR*)pool->alloc(sizeof(const VkImportSemaphoreFdInfoKHR));
-        deepcopy_VkImportSemaphoreFdInfoKHR(pool, pImportSemaphoreFdInfo, (VkImportSemaphoreFdInfoKHR*)(local_pImportSemaphoreFdInfo));
+        deepcopy_VkImportSemaphoreFdInfoKHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pImportSemaphoreFdInfo, (VkImportSemaphoreFdInfoKHR*)(local_pImportSemaphoreFdInfo));
     }
     if (local_pImportSemaphoreFdInfo)
     {
-        transform_tohost_VkImportSemaphoreFdInfoKHR(mImpl->resources(), (VkImportSemaphoreFdInfoKHR*)(local_pImportSemaphoreFdInfo));
+        transform_tohost_VkImportSemaphoreFdInfoKHR(sResourceTracker, (VkImportSemaphoreFdInfoKHR*)(local_pImportSemaphoreFdInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1037;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1037, 1);
-        countingStream->write((uint64_t*)&cgen_var_1037, 1 * 8);
-        marshal_VkImportSemaphoreFdInfoKHR(countingStream, (VkImportSemaphoreFdInfoKHR*)(local_pImportSemaphoreFdInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkImportSemaphoreFdInfoKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImportSemaphoreFdInfoKHR*)(local_pImportSemaphoreFdInfo), countPtr);
     }
-    uint32_t packetSize_vkImportSemaphoreFdKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkImportSemaphoreFdKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkImportSemaphoreFdKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkImportSemaphoreFdKHR = OP_vkImportSemaphoreFdKHR;
-    stream->write(&opcode_vkImportSemaphoreFdKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkImportSemaphoreFdKHR, sizeof(uint32_t));
-    uint64_t cgen_var_1038;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1038, 1);
-    stream->write((uint64_t*)&cgen_var_1038, 1 * 8);
-    marshal_VkImportSemaphoreFdInfoKHR(stream, (VkImportSemaphoreFdInfoKHR*)(local_pImportSemaphoreFdInfo));
-    AEMU_SCOPED_TRACE("vkImportSemaphoreFdKHR readParams");
-    AEMU_SCOPED_TRACE("vkImportSemaphoreFdKHR returnUnmarshal");
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkImportSemaphoreFdKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkImportSemaphoreFdKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkImportSemaphoreFdInfoKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImportSemaphoreFdInfoKHR*)(local_pImportSemaphoreFdInfo), streamPtrPtr);
     VkResult vkImportSemaphoreFdKHR_VkResult_return = (VkResult)0;
     stream->read(&vkImportSemaphoreFdKHR_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkImportSemaphoreFdKHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkImportSemaphoreFdKHR_VkResult_return;
 }
 
 VkResult VkEncoder::vkGetSemaphoreFdKHR(
     VkDevice device,
     const VkSemaphoreGetFdInfoKHR* pGetFdInfo,
-    int* pFd)
+    int* pFd,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetSemaphoreFdKHR encode");
-    mImpl->log("start vkGetSemaphoreFdKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkSemaphoreGetFdInfoKHR* local_pGetFdInfo;
     local_device = device;
@@ -15722,39 +17968,45 @@
     if (pGetFdInfo)
     {
         local_pGetFdInfo = (VkSemaphoreGetFdInfoKHR*)pool->alloc(sizeof(const VkSemaphoreGetFdInfoKHR));
-        deepcopy_VkSemaphoreGetFdInfoKHR(pool, pGetFdInfo, (VkSemaphoreGetFdInfoKHR*)(local_pGetFdInfo));
+        deepcopy_VkSemaphoreGetFdInfoKHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pGetFdInfo, (VkSemaphoreGetFdInfoKHR*)(local_pGetFdInfo));
     }
     if (local_pGetFdInfo)
     {
-        transform_tohost_VkSemaphoreGetFdInfoKHR(mImpl->resources(), (VkSemaphoreGetFdInfoKHR*)(local_pGetFdInfo));
+        transform_tohost_VkSemaphoreGetFdInfoKHR(sResourceTracker, (VkSemaphoreGetFdInfoKHR*)(local_pGetFdInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1039;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1039, 1);
-        countingStream->write((uint64_t*)&cgen_var_1039, 1 * 8);
-        marshal_VkSemaphoreGetFdInfoKHR(countingStream, (VkSemaphoreGetFdInfoKHR*)(local_pGetFdInfo));
-        countingStream->write((int*)pFd, sizeof(int));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkSemaphoreGetFdInfoKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSemaphoreGetFdInfoKHR*)(local_pGetFdInfo), countPtr);
+        *countPtr += sizeof(int);
     }
-    uint32_t packetSize_vkGetSemaphoreFdKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetSemaphoreFdKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetSemaphoreFdKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetSemaphoreFdKHR = OP_vkGetSemaphoreFdKHR;
-    stream->write(&opcode_vkGetSemaphoreFdKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetSemaphoreFdKHR, sizeof(uint32_t));
-    uint64_t cgen_var_1040;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1040, 1);
-    stream->write((uint64_t*)&cgen_var_1040, 1 * 8);
-    marshal_VkSemaphoreGetFdInfoKHR(stream, (VkSemaphoreGetFdInfoKHR*)(local_pGetFdInfo));
-    stream->write((int*)pFd, sizeof(int));
-    AEMU_SCOPED_TRACE("vkGetSemaphoreFdKHR readParams");
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetSemaphoreFdKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetSemaphoreFdKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkSemaphoreGetFdInfoKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSemaphoreGetFdInfoKHR*)(local_pGetFdInfo), streamPtrPtr);
+    memcpy(*streamPtrPtr, (int*)pFd, sizeof(int));
+    *streamPtrPtr += sizeof(int);
     stream->read((int*)pFd, sizeof(int));
-    AEMU_SCOPED_TRACE("vkGetSemaphoreFdKHR returnUnmarshal");
     VkResult vkGetSemaphoreFdKHR_VkResult_return = (VkResult)0;
     stream->read(&vkGetSemaphoreFdKHR_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetSemaphoreFdKHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetSemaphoreFdKHR_VkResult_return;
 }
 
@@ -15766,16 +18018,14 @@
     VkPipelineLayout layout,
     uint32_t set,
     uint32_t descriptorWriteCount,
-    const VkWriteDescriptorSet* pDescriptorWrites)
+    const VkWriteDescriptorSet* pDescriptorWrites,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdPushDescriptorSetKHR encode");
-    mImpl->log("start vkCmdPushDescriptorSetKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     VkPipelineBindPoint local_pipelineBindPoint;
     VkPipelineLayout local_layout;
@@ -15793,53 +18043,66 @@
         local_pDescriptorWrites = (VkWriteDescriptorSet*)pool->alloc(((descriptorWriteCount)) * sizeof(const VkWriteDescriptorSet));
         for (uint32_t i = 0; i < (uint32_t)((descriptorWriteCount)); ++i)
         {
-            deepcopy_VkWriteDescriptorSet(pool, pDescriptorWrites + i, (VkWriteDescriptorSet*)(local_pDescriptorWrites + i));
+            deepcopy_VkWriteDescriptorSet(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pDescriptorWrites + i, (VkWriteDescriptorSet*)(local_pDescriptorWrites + i));
         }
     }
     if (local_pDescriptorWrites)
     {
         for (uint32_t i = 0; i < (uint32_t)((descriptorWriteCount)); ++i)
         {
-            transform_tohost_VkWriteDescriptorSet(mImpl->resources(), (VkWriteDescriptorSet*)(local_pDescriptorWrites + i));
+            transform_tohost_VkWriteDescriptorSet(sResourceTracker, (VkWriteDescriptorSet*)(local_pDescriptorWrites + i));
         }
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1041;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1041, 1);
-        countingStream->write((uint64_t*)&cgen_var_1041, 1 * 8);
-        countingStream->write((VkPipelineBindPoint*)&local_pipelineBindPoint, sizeof(VkPipelineBindPoint));
-        uint64_t cgen_var_1042;
-        countingStream->handleMapping()->mapHandles_VkPipelineLayout_u64(&local_layout, &cgen_var_1042, 1);
-        countingStream->write((uint64_t*)&cgen_var_1042, 1 * 8);
-        countingStream->write((uint32_t*)&local_set, sizeof(uint32_t));
-        countingStream->write((uint32_t*)&local_descriptorWriteCount, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkPipelineBindPoint);
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
         for (uint32_t i = 0; i < (uint32_t)((descriptorWriteCount)); ++i)
         {
-            marshal_VkWriteDescriptorSet(countingStream, (VkWriteDescriptorSet*)(local_pDescriptorWrites + i));
+            count_VkWriteDescriptorSet(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkWriteDescriptorSet*)(local_pDescriptorWrites + i), countPtr);
         }
     }
-    uint32_t packetSize_vkCmdPushDescriptorSetKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdPushDescriptorSetKHR = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdPushDescriptorSetKHR -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdPushDescriptorSetKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdPushDescriptorSetKHR = OP_vkCmdPushDescriptorSetKHR;
-    stream->write(&opcode_vkCmdPushDescriptorSetKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdPushDescriptorSetKHR, sizeof(uint32_t));
-    uint64_t cgen_var_1043;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1043, 1);
-    stream->write((uint64_t*)&cgen_var_1043, 1 * 8);
-    stream->write((VkPipelineBindPoint*)&local_pipelineBindPoint, sizeof(VkPipelineBindPoint));
-    uint64_t cgen_var_1044;
-    stream->handleMapping()->mapHandles_VkPipelineLayout_u64(&local_layout, &cgen_var_1044, 1);
-    stream->write((uint64_t*)&cgen_var_1044, 1 * 8);
-    stream->write((uint32_t*)&local_set, sizeof(uint32_t));
-    stream->write((uint32_t*)&local_descriptorWriteCount, sizeof(uint32_t));
+    memcpy(streamPtr, &opcode_vkCmdPushDescriptorSetKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdPushDescriptorSetKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (VkPipelineBindPoint*)&local_pipelineBindPoint, sizeof(VkPipelineBindPoint));
+    *streamPtrPtr += sizeof(VkPipelineBindPoint);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPipelineLayout((*&local_layout));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_set, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_descriptorWriteCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
     for (uint32_t i = 0; i < (uint32_t)((descriptorWriteCount)); ++i)
     {
-        marshal_VkWriteDescriptorSet(stream, (VkWriteDescriptorSet*)(local_pDescriptorWrites + i));
+        reservedmarshal_VkWriteDescriptorSet(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkWriteDescriptorSet*)(local_pDescriptorWrites + i), streamPtrPtr);
     }
-    AEMU_SCOPED_TRACE("vkCmdPushDescriptorSetKHR readParams");
-    AEMU_SCOPED_TRACE("vkCmdPushDescriptorSetKHR returnUnmarshal");
-    mImpl->log("finish vkCmdPushDescriptorSetKHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdPushDescriptorSetWithTemplateKHR(
@@ -15847,16 +18110,14 @@
     VkDescriptorUpdateTemplate descriptorUpdateTemplate,
     VkPipelineLayout layout,
     uint32_t set,
-    const void* pData)
+    const void* pData,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdPushDescriptorSetWithTemplateKHR encode");
-    mImpl->log("start vkCmdPushDescriptorSetWithTemplateKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     VkDescriptorUpdateTemplate local_descriptorUpdateTemplate;
     VkPipelineLayout local_layout;
@@ -15866,59 +18127,71 @@
     local_descriptorUpdateTemplate = descriptorUpdateTemplate;
     local_layout = layout;
     local_set = set;
-    local_pData = nullptr;
-    if (pData)
+    // Avoiding deepcopy for pData
+    local_pData = (void*)pData;
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        local_pData = (void*)pool->dupArray(pData, sizeof(const uint8_t));
-    }
-    countingStream->rewind();
-    {
-        uint64_t cgen_var_1045;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1045, 1);
-        countingStream->write((uint64_t*)&cgen_var_1045, 1 * 8);
-        uint64_t cgen_var_1046;
-        countingStream->handleMapping()->mapHandles_VkDescriptorUpdateTemplate_u64(&local_descriptorUpdateTemplate, &cgen_var_1046, 1);
-        countingStream->write((uint64_t*)&cgen_var_1046, 1 * 8);
-        uint64_t cgen_var_1047;
-        countingStream->handleMapping()->mapHandles_VkPipelineLayout_u64(&local_layout, &cgen_var_1047, 1);
-        countingStream->write((uint64_t*)&cgen_var_1047, 1 * 8);
-        countingStream->write((uint32_t*)&local_set, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_2;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
         // WARNING PTR CHECK
-        uint64_t cgen_var_1048 = (uint64_t)(uintptr_t)local_pData;
-        countingStream->putBe64(cgen_var_1048);
+        *countPtr += 8;
         if (local_pData)
         {
-            countingStream->write((void*)local_pData, sizeof(uint8_t));
+            *countPtr += sizeof(uint8_t);
         }
     }
-    uint32_t packetSize_vkCmdPushDescriptorSetWithTemplateKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdPushDescriptorSetWithTemplateKHR = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdPushDescriptorSetWithTemplateKHR -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdPushDescriptorSetWithTemplateKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdPushDescriptorSetWithTemplateKHR = OP_vkCmdPushDescriptorSetWithTemplateKHR;
-    stream->write(&opcode_vkCmdPushDescriptorSetWithTemplateKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdPushDescriptorSetWithTemplateKHR, sizeof(uint32_t));
-    uint64_t cgen_var_1049;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1049, 1);
-    stream->write((uint64_t*)&cgen_var_1049, 1 * 8);
-    uint64_t cgen_var_1050;
-    stream->handleMapping()->mapHandles_VkDescriptorUpdateTemplate_u64(&local_descriptorUpdateTemplate, &cgen_var_1050, 1);
-    stream->write((uint64_t*)&cgen_var_1050, 1 * 8);
-    uint64_t cgen_var_1051;
-    stream->handleMapping()->mapHandles_VkPipelineLayout_u64(&local_layout, &cgen_var_1051, 1);
-    stream->write((uint64_t*)&cgen_var_1051, 1 * 8);
-    stream->write((uint32_t*)&local_set, sizeof(uint32_t));
+    memcpy(streamPtr, &opcode_vkCmdPushDescriptorSetWithTemplateKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdPushDescriptorSetWithTemplateKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDescriptorUpdateTemplate((*&local_descriptorUpdateTemplate));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkPipelineLayout((*&local_layout));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_set, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
     // WARNING PTR CHECK
-    uint64_t cgen_var_1052 = (uint64_t)(uintptr_t)local_pData;
-    stream->putBe64(cgen_var_1052);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)local_pData;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pData)
     {
-        stream->write((void*)local_pData, sizeof(uint8_t));
+        memcpy(*streamPtrPtr, (void*)local_pData, sizeof(uint8_t));
+        *streamPtrPtr += sizeof(uint8_t);
     }
-    AEMU_SCOPED_TRACE("vkCmdPushDescriptorSetWithTemplateKHR readParams");
-    AEMU_SCOPED_TRACE("vkCmdPushDescriptorSetWithTemplateKHR returnUnmarshal");
-    mImpl->log("finish vkCmdPushDescriptorSetWithTemplateKHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 #endif
+#ifdef VK_KHR_shader_float16_int8
+#endif
 #ifdef VK_KHR_16bit_storage
 #endif
 #ifdef VK_KHR_incremental_present
@@ -15928,16 +18201,14 @@
     VkDevice device,
     const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo,
     const VkAllocationCallbacks* pAllocator,
-    VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate)
+    VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCreateDescriptorUpdateTemplateKHR encode");
-    mImpl->log("start vkCreateDescriptorUpdateTemplateKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkDescriptorUpdateTemplateCreateInfo* local_pCreateInfo;
     VkAllocationCallbacks* local_pAllocator;
@@ -15946,93 +18217,95 @@
     if (pCreateInfo)
     {
         local_pCreateInfo = (VkDescriptorUpdateTemplateCreateInfo*)pool->alloc(sizeof(const VkDescriptorUpdateTemplateCreateInfo));
-        deepcopy_VkDescriptorUpdateTemplateCreateInfo(pool, pCreateInfo, (VkDescriptorUpdateTemplateCreateInfo*)(local_pCreateInfo));
+        deepcopy_VkDescriptorUpdateTemplateCreateInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, (VkDescriptorUpdateTemplateCreateInfo*)(local_pCreateInfo));
     }
     local_pAllocator = nullptr;
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pCreateInfo)
     {
-        transform_tohost_VkDescriptorUpdateTemplateCreateInfo(mImpl->resources(), (VkDescriptorUpdateTemplateCreateInfo*)(local_pCreateInfo));
+        transform_tohost_VkDescriptorUpdateTemplateCreateInfo(sResourceTracker, (VkDescriptorUpdateTemplateCreateInfo*)(local_pCreateInfo));
     }
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1053;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1053, 1);
-        countingStream->write((uint64_t*)&cgen_var_1053, 1 * 8);
-        marshal_VkDescriptorUpdateTemplateCreateInfo(countingStream, (VkDescriptorUpdateTemplateCreateInfo*)(local_pCreateInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkDescriptorUpdateTemplateCreateInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDescriptorUpdateTemplateCreateInfo*)(local_pCreateInfo), countPtr);
         // WARNING PTR CHECK
-        uint64_t cgen_var_1054 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_1054);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
-        uint64_t cgen_var_1055;
-        countingStream->handleMapping()->mapHandles_VkDescriptorUpdateTemplate_u64(pDescriptorUpdateTemplate, &cgen_var_1055, 1);
-        countingStream->write((uint64_t*)&cgen_var_1055, 8);
+        uint64_t cgen_var_1;
+        *countPtr += 8;
     }
-    uint32_t packetSize_vkCreateDescriptorUpdateTemplateKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCreateDescriptorUpdateTemplateKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreateDescriptorUpdateTemplateKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCreateDescriptorUpdateTemplateKHR = OP_vkCreateDescriptorUpdateTemplateKHR;
-    stream->write(&opcode_vkCreateDescriptorUpdateTemplateKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkCreateDescriptorUpdateTemplateKHR, sizeof(uint32_t));
-    uint64_t cgen_var_1056;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1056, 1);
-    stream->write((uint64_t*)&cgen_var_1056, 1 * 8);
-    marshal_VkDescriptorUpdateTemplateCreateInfo(stream, (VkDescriptorUpdateTemplateCreateInfo*)(local_pCreateInfo));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreateDescriptorUpdateTemplateKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreateDescriptorUpdateTemplateKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkDescriptorUpdateTemplateCreateInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDescriptorUpdateTemplateCreateInfo*)(local_pCreateInfo), streamPtrPtr);
     // WARNING PTR CHECK
-    uint64_t cgen_var_1057 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_1057);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
-    uint64_t cgen_var_1058;
-    stream->handleMapping()->mapHandles_VkDescriptorUpdateTemplate_u64(pDescriptorUpdateTemplate, &cgen_var_1058, 1);
-    stream->write((uint64_t*)&cgen_var_1058, 8);
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkCreateDescriptorUpdateTemplateKHR readParams");
-    stream->setHandleMapping(resources->createMapping());
-    uint64_t cgen_var_1059;
-    stream->read((uint64_t*)&cgen_var_1059, 8);
-    stream->handleMapping()->mapHandles_u64_VkDescriptorUpdateTemplate(&cgen_var_1059, (VkDescriptorUpdateTemplate*)pDescriptorUpdateTemplate, 1);
+    /* is handle, possibly out */;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = (uint64_t)((*pDescriptorUpdateTemplate));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    stream->setHandleMapping(sResourceTracker->createMapping());
+    uint64_t cgen_var_3;
+    stream->read((uint64_t*)&cgen_var_3, 8);
+    stream->handleMapping()->mapHandles_u64_VkDescriptorUpdateTemplate(&cgen_var_3, (VkDescriptorUpdateTemplate*)pDescriptorUpdateTemplate, 1);
     stream->unsetHandleMapping();
-    AEMU_SCOPED_TRACE("vkCreateDescriptorUpdateTemplateKHR returnUnmarshal");
     VkResult vkCreateDescriptorUpdateTemplateKHR_VkResult_return = (VkResult)0;
     stream->read(&vkCreateDescriptorUpdateTemplateKHR_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    encoderLock.unlock();
-    mImpl->resources()->on_vkCreateDescriptorUpdateTemplateKHR(this, vkCreateDescriptorUpdateTemplateKHR_VkResult_return, device, pCreateInfo, pAllocator, pDescriptorUpdateTemplate);
-    encoderLock.lock();
-    mImpl->log("finish vkCreateDescriptorUpdateTemplateKHR");;
+    sResourceTracker->on_vkCreateDescriptorUpdateTemplateKHR(this, vkCreateDescriptorUpdateTemplateKHR_VkResult_return, device, pCreateInfo, pAllocator, pDescriptorUpdateTemplate);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkCreateDescriptorUpdateTemplateKHR_VkResult_return;
 }
 
 void VkEncoder::vkDestroyDescriptorUpdateTemplateKHR(
     VkDevice device,
     VkDescriptorUpdateTemplate descriptorUpdateTemplate,
-    const VkAllocationCallbacks* pAllocator)
+    const VkAllocationCallbacks* pAllocator,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkDestroyDescriptorUpdateTemplateKHR encode");
-    mImpl->log("start vkDestroyDescriptorUpdateTemplateKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkDescriptorUpdateTemplate local_descriptorUpdateTemplate;
     VkAllocationCallbacks* local_pAllocator;
@@ -16042,67 +18315,75 @@
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1060;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1060, 1);
-        countingStream->write((uint64_t*)&cgen_var_1060, 1 * 8);
-        uint64_t cgen_var_1061;
-        countingStream->handleMapping()->mapHandles_VkDescriptorUpdateTemplate_u64(&local_descriptorUpdateTemplate, &cgen_var_1061, 1);
-        countingStream->write((uint64_t*)&cgen_var_1061, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_1062 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_1062);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
     }
-    uint32_t packetSize_vkDestroyDescriptorUpdateTemplateKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkDestroyDescriptorUpdateTemplateKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkDestroyDescriptorUpdateTemplateKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkDestroyDescriptorUpdateTemplateKHR = OP_vkDestroyDescriptorUpdateTemplateKHR;
-    stream->write(&opcode_vkDestroyDescriptorUpdateTemplateKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkDestroyDescriptorUpdateTemplateKHR, sizeof(uint32_t));
-    uint64_t cgen_var_1063;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1063, 1);
-    stream->write((uint64_t*)&cgen_var_1063, 1 * 8);
-    uint64_t cgen_var_1064;
-    stream->handleMapping()->mapHandles_VkDescriptorUpdateTemplate_u64(&local_descriptorUpdateTemplate, &cgen_var_1064, 1);
-    stream->write((uint64_t*)&cgen_var_1064, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkDestroyDescriptorUpdateTemplateKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkDestroyDescriptorUpdateTemplateKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkDescriptorUpdateTemplate((*&local_descriptorUpdateTemplate));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_1065 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_1065);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    AEMU_SCOPED_TRACE("vkDestroyDescriptorUpdateTemplateKHR readParams");
-    AEMU_SCOPED_TRACE("vkDestroyDescriptorUpdateTemplateKHR returnUnmarshal");
-    resources->destroyMapping()->mapHandles_VkDescriptorUpdateTemplate((VkDescriptorUpdateTemplate*)&descriptorUpdateTemplate);
-    mImpl->log("finish vkDestroyDescriptorUpdateTemplateKHR");;
+    sResourceTracker->destroyMapping()->mapHandles_VkDescriptorUpdateTemplate((VkDescriptorUpdateTemplate*)&descriptorUpdateTemplate);
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkUpdateDescriptorSetWithTemplateKHR(
     VkDevice device,
     VkDescriptorSet descriptorSet,
     VkDescriptorUpdateTemplate descriptorUpdateTemplate,
-    const void* pData)
+    const void* pData,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkUpdateDescriptorSetWithTemplateKHR encode");
-    mImpl->log("start vkUpdateDescriptorSetWithTemplateKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkDescriptorSet local_descriptorSet;
     VkDescriptorUpdateTemplate local_descriptorUpdateTemplate;
@@ -16110,361 +18391,402 @@
     local_device = device;
     local_descriptorSet = descriptorSet;
     local_descriptorUpdateTemplate = descriptorUpdateTemplate;
-    local_pData = nullptr;
-    if (pData)
+    // Avoiding deepcopy for pData
+    local_pData = (void*)pData;
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        local_pData = (void*)pool->dupArray(pData, sizeof(const uint8_t));
-    }
-    countingStream->rewind();
-    {
-        uint64_t cgen_var_1066;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1066, 1);
-        countingStream->write((uint64_t*)&cgen_var_1066, 1 * 8);
-        uint64_t cgen_var_1067;
-        countingStream->handleMapping()->mapHandles_VkDescriptorSet_u64(&local_descriptorSet, &cgen_var_1067, 1);
-        countingStream->write((uint64_t*)&cgen_var_1067, 1 * 8);
-        uint64_t cgen_var_1068;
-        countingStream->handleMapping()->mapHandles_VkDescriptorUpdateTemplate_u64(&local_descriptorUpdateTemplate, &cgen_var_1068, 1);
-        countingStream->write((uint64_t*)&cgen_var_1068, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_2;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_1069 = (uint64_t)(uintptr_t)local_pData;
-        countingStream->putBe64(cgen_var_1069);
+        *countPtr += 8;
         if (local_pData)
         {
-            countingStream->write((void*)local_pData, sizeof(uint8_t));
+            *countPtr += sizeof(uint8_t);
         }
     }
-    uint32_t packetSize_vkUpdateDescriptorSetWithTemplateKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkUpdateDescriptorSetWithTemplateKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkUpdateDescriptorSetWithTemplateKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkUpdateDescriptorSetWithTemplateKHR = OP_vkUpdateDescriptorSetWithTemplateKHR;
-    stream->write(&opcode_vkUpdateDescriptorSetWithTemplateKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkUpdateDescriptorSetWithTemplateKHR, sizeof(uint32_t));
-    uint64_t cgen_var_1070;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1070, 1);
-    stream->write((uint64_t*)&cgen_var_1070, 1 * 8);
-    uint64_t cgen_var_1071;
-    stream->handleMapping()->mapHandles_VkDescriptorSet_u64(&local_descriptorSet, &cgen_var_1071, 1);
-    stream->write((uint64_t*)&cgen_var_1071, 1 * 8);
-    uint64_t cgen_var_1072;
-    stream->handleMapping()->mapHandles_VkDescriptorUpdateTemplate_u64(&local_descriptorUpdateTemplate, &cgen_var_1072, 1);
-    stream->write((uint64_t*)&cgen_var_1072, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkUpdateDescriptorSetWithTemplateKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkUpdateDescriptorSetWithTemplateKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkDescriptorSet((*&local_descriptorSet));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = get_host_u64_VkDescriptorUpdateTemplate((*&local_descriptorUpdateTemplate));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_1073 = (uint64_t)(uintptr_t)local_pData;
-    stream->putBe64(cgen_var_1073);
+    uint64_t cgen_var_3 = (uint64_t)(uintptr_t)local_pData;
+    memcpy((*streamPtrPtr), &cgen_var_3, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pData)
     {
-        stream->write((void*)local_pData, sizeof(uint8_t));
+        memcpy(*streamPtrPtr, (void*)local_pData, sizeof(uint8_t));
+        *streamPtrPtr += sizeof(uint8_t);
     }
-    AEMU_SCOPED_TRACE("vkUpdateDescriptorSetWithTemplateKHR readParams");
-    AEMU_SCOPED_TRACE("vkUpdateDescriptorSetWithTemplateKHR returnUnmarshal");
-    mImpl->log("finish vkUpdateDescriptorSetWithTemplateKHR");;
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 #endif
+#ifdef VK_KHR_imageless_framebuffer
+#endif
 #ifdef VK_KHR_create_renderpass2
 VkResult VkEncoder::vkCreateRenderPass2KHR(
     VkDevice device,
-    const VkRenderPassCreateInfo2KHR* pCreateInfo,
+    const VkRenderPassCreateInfo2* pCreateInfo,
     const VkAllocationCallbacks* pAllocator,
-    VkRenderPass* pRenderPass)
+    VkRenderPass* pRenderPass,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCreateRenderPass2KHR encode");
-    mImpl->log("start vkCreateRenderPass2KHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
-    VkRenderPassCreateInfo2KHR* local_pCreateInfo;
+    VkRenderPassCreateInfo2* local_pCreateInfo;
     VkAllocationCallbacks* local_pAllocator;
     local_device = device;
     local_pCreateInfo = nullptr;
     if (pCreateInfo)
     {
-        local_pCreateInfo = (VkRenderPassCreateInfo2KHR*)pool->alloc(sizeof(const VkRenderPassCreateInfo2KHR));
-        deepcopy_VkRenderPassCreateInfo2KHR(pool, pCreateInfo, (VkRenderPassCreateInfo2KHR*)(local_pCreateInfo));
+        local_pCreateInfo = (VkRenderPassCreateInfo2*)pool->alloc(sizeof(const VkRenderPassCreateInfo2));
+        deepcopy_VkRenderPassCreateInfo2(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, (VkRenderPassCreateInfo2*)(local_pCreateInfo));
     }
     local_pAllocator = nullptr;
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pCreateInfo)
     {
-        transform_tohost_VkRenderPassCreateInfo2KHR(mImpl->resources(), (VkRenderPassCreateInfo2KHR*)(local_pCreateInfo));
+        transform_tohost_VkRenderPassCreateInfo2(sResourceTracker, (VkRenderPassCreateInfo2*)(local_pCreateInfo));
     }
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1074;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1074, 1);
-        countingStream->write((uint64_t*)&cgen_var_1074, 1 * 8);
-        marshal_VkRenderPassCreateInfo2KHR(countingStream, (VkRenderPassCreateInfo2KHR*)(local_pCreateInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkRenderPassCreateInfo2(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkRenderPassCreateInfo2*)(local_pCreateInfo), countPtr);
         // WARNING PTR CHECK
-        uint64_t cgen_var_1075 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_1075);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
-        uint64_t cgen_var_1076;
-        countingStream->handleMapping()->mapHandles_VkRenderPass_u64(pRenderPass, &cgen_var_1076, 1);
-        countingStream->write((uint64_t*)&cgen_var_1076, 8);
+        uint64_t cgen_var_1;
+        *countPtr += 8;
     }
-    uint32_t packetSize_vkCreateRenderPass2KHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCreateRenderPass2KHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreateRenderPass2KHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCreateRenderPass2KHR = OP_vkCreateRenderPass2KHR;
-    stream->write(&opcode_vkCreateRenderPass2KHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkCreateRenderPass2KHR, sizeof(uint32_t));
-    uint64_t cgen_var_1077;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1077, 1);
-    stream->write((uint64_t*)&cgen_var_1077, 1 * 8);
-    marshal_VkRenderPassCreateInfo2KHR(stream, (VkRenderPassCreateInfo2KHR*)(local_pCreateInfo));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreateRenderPass2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreateRenderPass2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkRenderPassCreateInfo2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkRenderPassCreateInfo2*)(local_pCreateInfo), streamPtrPtr);
     // WARNING PTR CHECK
-    uint64_t cgen_var_1078 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_1078);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
-    uint64_t cgen_var_1079;
-    stream->handleMapping()->mapHandles_VkRenderPass_u64(pRenderPass, &cgen_var_1079, 1);
-    stream->write((uint64_t*)&cgen_var_1079, 8);
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkCreateRenderPass2KHR readParams");
-    uint64_t cgen_var_1080;
-    stream->read((uint64_t*)&cgen_var_1080, 8);
-    stream->handleMapping()->mapHandles_u64_VkRenderPass(&cgen_var_1080, (VkRenderPass*)pRenderPass, 1);
-    AEMU_SCOPED_TRACE("vkCreateRenderPass2KHR returnUnmarshal");
+    /* is handle, possibly out */;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = (uint64_t)((*pRenderPass));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    uint64_t cgen_var_3;
+    stream->read((uint64_t*)&cgen_var_3, 8);
+    stream->handleMapping()->mapHandles_u64_VkRenderPass(&cgen_var_3, (VkRenderPass*)pRenderPass, 1);
     VkResult vkCreateRenderPass2KHR_VkResult_return = (VkResult)0;
     stream->read(&vkCreateRenderPass2KHR_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkCreateRenderPass2KHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkCreateRenderPass2KHR_VkResult_return;
 }
 
 void VkEncoder::vkCmdBeginRenderPass2KHR(
     VkCommandBuffer commandBuffer,
     const VkRenderPassBeginInfo* pRenderPassBegin,
-    const VkSubpassBeginInfoKHR* pSubpassBeginInfo)
+    const VkSubpassBeginInfo* pSubpassBeginInfo,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdBeginRenderPass2KHR encode");
-    mImpl->log("start vkCmdBeginRenderPass2KHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     VkRenderPassBeginInfo* local_pRenderPassBegin;
-    VkSubpassBeginInfoKHR* local_pSubpassBeginInfo;
+    VkSubpassBeginInfo* local_pSubpassBeginInfo;
     local_commandBuffer = commandBuffer;
     local_pRenderPassBegin = nullptr;
     if (pRenderPassBegin)
     {
         local_pRenderPassBegin = (VkRenderPassBeginInfo*)pool->alloc(sizeof(const VkRenderPassBeginInfo));
-        deepcopy_VkRenderPassBeginInfo(pool, pRenderPassBegin, (VkRenderPassBeginInfo*)(local_pRenderPassBegin));
+        deepcopy_VkRenderPassBeginInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pRenderPassBegin, (VkRenderPassBeginInfo*)(local_pRenderPassBegin));
     }
     local_pSubpassBeginInfo = nullptr;
     if (pSubpassBeginInfo)
     {
-        local_pSubpassBeginInfo = (VkSubpassBeginInfoKHR*)pool->alloc(sizeof(const VkSubpassBeginInfoKHR));
-        deepcopy_VkSubpassBeginInfoKHR(pool, pSubpassBeginInfo, (VkSubpassBeginInfoKHR*)(local_pSubpassBeginInfo));
+        local_pSubpassBeginInfo = (VkSubpassBeginInfo*)pool->alloc(sizeof(const VkSubpassBeginInfo));
+        deepcopy_VkSubpassBeginInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pSubpassBeginInfo, (VkSubpassBeginInfo*)(local_pSubpassBeginInfo));
     }
     if (local_pRenderPassBegin)
     {
-        transform_tohost_VkRenderPassBeginInfo(mImpl->resources(), (VkRenderPassBeginInfo*)(local_pRenderPassBegin));
+        transform_tohost_VkRenderPassBeginInfo(sResourceTracker, (VkRenderPassBeginInfo*)(local_pRenderPassBegin));
     }
     if (local_pSubpassBeginInfo)
     {
-        transform_tohost_VkSubpassBeginInfoKHR(mImpl->resources(), (VkSubpassBeginInfoKHR*)(local_pSubpassBeginInfo));
+        transform_tohost_VkSubpassBeginInfo(sResourceTracker, (VkSubpassBeginInfo*)(local_pSubpassBeginInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1081;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1081, 1);
-        countingStream->write((uint64_t*)&cgen_var_1081, 1 * 8);
-        marshal_VkRenderPassBeginInfo(countingStream, (VkRenderPassBeginInfo*)(local_pRenderPassBegin));
-        marshal_VkSubpassBeginInfoKHR(countingStream, (VkSubpassBeginInfoKHR*)(local_pSubpassBeginInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkRenderPassBeginInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkRenderPassBeginInfo*)(local_pRenderPassBegin), countPtr);
+        count_VkSubpassBeginInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSubpassBeginInfo*)(local_pSubpassBeginInfo), countPtr);
     }
-    uint32_t packetSize_vkCmdBeginRenderPass2KHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdBeginRenderPass2KHR = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdBeginRenderPass2KHR -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdBeginRenderPass2KHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdBeginRenderPass2KHR = OP_vkCmdBeginRenderPass2KHR;
-    stream->write(&opcode_vkCmdBeginRenderPass2KHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdBeginRenderPass2KHR, sizeof(uint32_t));
-    uint64_t cgen_var_1082;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1082, 1);
-    stream->write((uint64_t*)&cgen_var_1082, 1 * 8);
-    marshal_VkRenderPassBeginInfo(stream, (VkRenderPassBeginInfo*)(local_pRenderPassBegin));
-    marshal_VkSubpassBeginInfoKHR(stream, (VkSubpassBeginInfoKHR*)(local_pSubpassBeginInfo));
-    AEMU_SCOPED_TRACE("vkCmdBeginRenderPass2KHR readParams");
-    AEMU_SCOPED_TRACE("vkCmdBeginRenderPass2KHR returnUnmarshal");
-    mImpl->log("finish vkCmdBeginRenderPass2KHR");;
+    memcpy(streamPtr, &opcode_vkCmdBeginRenderPass2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdBeginRenderPass2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    reservedmarshal_VkRenderPassBeginInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkRenderPassBeginInfo*)(local_pRenderPassBegin), streamPtrPtr);
+    reservedmarshal_VkSubpassBeginInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSubpassBeginInfo*)(local_pSubpassBeginInfo), streamPtrPtr);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdNextSubpass2KHR(
     VkCommandBuffer commandBuffer,
-    const VkSubpassBeginInfoKHR* pSubpassBeginInfo,
-    const VkSubpassEndInfoKHR* pSubpassEndInfo)
+    const VkSubpassBeginInfo* pSubpassBeginInfo,
+    const VkSubpassEndInfo* pSubpassEndInfo,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdNextSubpass2KHR encode");
-    mImpl->log("start vkCmdNextSubpass2KHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
-    VkSubpassBeginInfoKHR* local_pSubpassBeginInfo;
-    VkSubpassEndInfoKHR* local_pSubpassEndInfo;
+    VkSubpassBeginInfo* local_pSubpassBeginInfo;
+    VkSubpassEndInfo* local_pSubpassEndInfo;
     local_commandBuffer = commandBuffer;
     local_pSubpassBeginInfo = nullptr;
     if (pSubpassBeginInfo)
     {
-        local_pSubpassBeginInfo = (VkSubpassBeginInfoKHR*)pool->alloc(sizeof(const VkSubpassBeginInfoKHR));
-        deepcopy_VkSubpassBeginInfoKHR(pool, pSubpassBeginInfo, (VkSubpassBeginInfoKHR*)(local_pSubpassBeginInfo));
+        local_pSubpassBeginInfo = (VkSubpassBeginInfo*)pool->alloc(sizeof(const VkSubpassBeginInfo));
+        deepcopy_VkSubpassBeginInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pSubpassBeginInfo, (VkSubpassBeginInfo*)(local_pSubpassBeginInfo));
     }
     local_pSubpassEndInfo = nullptr;
     if (pSubpassEndInfo)
     {
-        local_pSubpassEndInfo = (VkSubpassEndInfoKHR*)pool->alloc(sizeof(const VkSubpassEndInfoKHR));
-        deepcopy_VkSubpassEndInfoKHR(pool, pSubpassEndInfo, (VkSubpassEndInfoKHR*)(local_pSubpassEndInfo));
+        local_pSubpassEndInfo = (VkSubpassEndInfo*)pool->alloc(sizeof(const VkSubpassEndInfo));
+        deepcopy_VkSubpassEndInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pSubpassEndInfo, (VkSubpassEndInfo*)(local_pSubpassEndInfo));
     }
     if (local_pSubpassBeginInfo)
     {
-        transform_tohost_VkSubpassBeginInfoKHR(mImpl->resources(), (VkSubpassBeginInfoKHR*)(local_pSubpassBeginInfo));
+        transform_tohost_VkSubpassBeginInfo(sResourceTracker, (VkSubpassBeginInfo*)(local_pSubpassBeginInfo));
     }
     if (local_pSubpassEndInfo)
     {
-        transform_tohost_VkSubpassEndInfoKHR(mImpl->resources(), (VkSubpassEndInfoKHR*)(local_pSubpassEndInfo));
+        transform_tohost_VkSubpassEndInfo(sResourceTracker, (VkSubpassEndInfo*)(local_pSubpassEndInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1083;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1083, 1);
-        countingStream->write((uint64_t*)&cgen_var_1083, 1 * 8);
-        marshal_VkSubpassBeginInfoKHR(countingStream, (VkSubpassBeginInfoKHR*)(local_pSubpassBeginInfo));
-        marshal_VkSubpassEndInfoKHR(countingStream, (VkSubpassEndInfoKHR*)(local_pSubpassEndInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkSubpassBeginInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSubpassBeginInfo*)(local_pSubpassBeginInfo), countPtr);
+        count_VkSubpassEndInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSubpassEndInfo*)(local_pSubpassEndInfo), countPtr);
     }
-    uint32_t packetSize_vkCmdNextSubpass2KHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdNextSubpass2KHR = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdNextSubpass2KHR -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdNextSubpass2KHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdNextSubpass2KHR = OP_vkCmdNextSubpass2KHR;
-    stream->write(&opcode_vkCmdNextSubpass2KHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdNextSubpass2KHR, sizeof(uint32_t));
-    uint64_t cgen_var_1084;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1084, 1);
-    stream->write((uint64_t*)&cgen_var_1084, 1 * 8);
-    marshal_VkSubpassBeginInfoKHR(stream, (VkSubpassBeginInfoKHR*)(local_pSubpassBeginInfo));
-    marshal_VkSubpassEndInfoKHR(stream, (VkSubpassEndInfoKHR*)(local_pSubpassEndInfo));
-    AEMU_SCOPED_TRACE("vkCmdNextSubpass2KHR readParams");
-    AEMU_SCOPED_TRACE("vkCmdNextSubpass2KHR returnUnmarshal");
-    mImpl->log("finish vkCmdNextSubpass2KHR");;
+    memcpy(streamPtr, &opcode_vkCmdNextSubpass2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdNextSubpass2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    reservedmarshal_VkSubpassBeginInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSubpassBeginInfo*)(local_pSubpassBeginInfo), streamPtrPtr);
+    reservedmarshal_VkSubpassEndInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSubpassEndInfo*)(local_pSubpassEndInfo), streamPtrPtr);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdEndRenderPass2KHR(
     VkCommandBuffer commandBuffer,
-    const VkSubpassEndInfoKHR* pSubpassEndInfo)
+    const VkSubpassEndInfo* pSubpassEndInfo,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdEndRenderPass2KHR encode");
-    mImpl->log("start vkCmdEndRenderPass2KHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
-    VkSubpassEndInfoKHR* local_pSubpassEndInfo;
+    VkSubpassEndInfo* local_pSubpassEndInfo;
     local_commandBuffer = commandBuffer;
     local_pSubpassEndInfo = nullptr;
     if (pSubpassEndInfo)
     {
-        local_pSubpassEndInfo = (VkSubpassEndInfoKHR*)pool->alloc(sizeof(const VkSubpassEndInfoKHR));
-        deepcopy_VkSubpassEndInfoKHR(pool, pSubpassEndInfo, (VkSubpassEndInfoKHR*)(local_pSubpassEndInfo));
+        local_pSubpassEndInfo = (VkSubpassEndInfo*)pool->alloc(sizeof(const VkSubpassEndInfo));
+        deepcopy_VkSubpassEndInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pSubpassEndInfo, (VkSubpassEndInfo*)(local_pSubpassEndInfo));
     }
     if (local_pSubpassEndInfo)
     {
-        transform_tohost_VkSubpassEndInfoKHR(mImpl->resources(), (VkSubpassEndInfoKHR*)(local_pSubpassEndInfo));
+        transform_tohost_VkSubpassEndInfo(sResourceTracker, (VkSubpassEndInfo*)(local_pSubpassEndInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1085;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1085, 1);
-        countingStream->write((uint64_t*)&cgen_var_1085, 1 * 8);
-        marshal_VkSubpassEndInfoKHR(countingStream, (VkSubpassEndInfoKHR*)(local_pSubpassEndInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkSubpassEndInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSubpassEndInfo*)(local_pSubpassEndInfo), countPtr);
     }
-    uint32_t packetSize_vkCmdEndRenderPass2KHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdEndRenderPass2KHR = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdEndRenderPass2KHR -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdEndRenderPass2KHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdEndRenderPass2KHR = OP_vkCmdEndRenderPass2KHR;
-    stream->write(&opcode_vkCmdEndRenderPass2KHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdEndRenderPass2KHR, sizeof(uint32_t));
-    uint64_t cgen_var_1086;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1086, 1);
-    stream->write((uint64_t*)&cgen_var_1086, 1 * 8);
-    marshal_VkSubpassEndInfoKHR(stream, (VkSubpassEndInfoKHR*)(local_pSubpassEndInfo));
-    AEMU_SCOPED_TRACE("vkCmdEndRenderPass2KHR readParams");
-    AEMU_SCOPED_TRACE("vkCmdEndRenderPass2KHR returnUnmarshal");
-    mImpl->log("finish vkCmdEndRenderPass2KHR");;
+    memcpy(streamPtr, &opcode_vkCmdEndRenderPass2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdEndRenderPass2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    reservedmarshal_VkSubpassEndInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSubpassEndInfo*)(local_pSubpassEndInfo), streamPtrPtr);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 #endif
 #ifdef VK_KHR_shared_presentable_image
 VkResult VkEncoder::vkGetSwapchainStatusKHR(
     VkDevice device,
-    VkSwapchainKHR swapchain)
+    VkSwapchainKHR swapchain,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetSwapchainStatusKHR encode");
-    mImpl->log("start vkGetSwapchainStatusKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkSwapchainKHR local_swapchain;
     local_device = device;
     local_swapchain = swapchain;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1087;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1087, 1);
-        countingStream->write((uint64_t*)&cgen_var_1087, 1 * 8);
-        uint64_t cgen_var_1088;
-        countingStream->handleMapping()->mapHandles_VkSwapchainKHR_u64(&local_swapchain, &cgen_var_1088, 1);
-        countingStream->write((uint64_t*)&cgen_var_1088, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
     }
-    uint32_t packetSize_vkGetSwapchainStatusKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetSwapchainStatusKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetSwapchainStatusKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetSwapchainStatusKHR = OP_vkGetSwapchainStatusKHR;
-    stream->write(&opcode_vkGetSwapchainStatusKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetSwapchainStatusKHR, sizeof(uint32_t));
-    uint64_t cgen_var_1089;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1089, 1);
-    stream->write((uint64_t*)&cgen_var_1089, 1 * 8);
-    uint64_t cgen_var_1090;
-    stream->handleMapping()->mapHandles_VkSwapchainKHR_u64(&local_swapchain, &cgen_var_1090, 1);
-    stream->write((uint64_t*)&cgen_var_1090, 1 * 8);
-    AEMU_SCOPED_TRACE("vkGetSwapchainStatusKHR readParams");
-    AEMU_SCOPED_TRACE("vkGetSwapchainStatusKHR returnUnmarshal");
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetSwapchainStatusKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetSwapchainStatusKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkSwapchainKHR((*&local_swapchain));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     VkResult vkGetSwapchainStatusKHR_VkResult_return = (VkResult)0;
     stream->read(&vkGetSwapchainStatusKHR_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetSwapchainStatusKHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetSwapchainStatusKHR_VkResult_return;
 }
 
@@ -16473,16 +18795,14 @@
 void VkEncoder::vkGetPhysicalDeviceExternalFencePropertiesKHR(
     VkPhysicalDevice physicalDevice,
     const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo,
-    VkExternalFenceProperties* pExternalFenceProperties)
+    VkExternalFenceProperties* pExternalFenceProperties,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceExternalFencePropertiesKHR encode");
-    mImpl->log("start vkGetPhysicalDeviceExternalFencePropertiesKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     VkPhysicalDeviceExternalFenceInfo* local_pExternalFenceInfo;
     local_physicalDevice = physicalDevice;
@@ -16490,38 +18810,46 @@
     if (pExternalFenceInfo)
     {
         local_pExternalFenceInfo = (VkPhysicalDeviceExternalFenceInfo*)pool->alloc(sizeof(const VkPhysicalDeviceExternalFenceInfo));
-        deepcopy_VkPhysicalDeviceExternalFenceInfo(pool, pExternalFenceInfo, (VkPhysicalDeviceExternalFenceInfo*)(local_pExternalFenceInfo));
+        deepcopy_VkPhysicalDeviceExternalFenceInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pExternalFenceInfo, (VkPhysicalDeviceExternalFenceInfo*)(local_pExternalFenceInfo));
     }
     if (local_pExternalFenceInfo)
     {
-        transform_tohost_VkPhysicalDeviceExternalFenceInfo(mImpl->resources(), (VkPhysicalDeviceExternalFenceInfo*)(local_pExternalFenceInfo));
+        transform_tohost_VkPhysicalDeviceExternalFenceInfo(sResourceTracker, (VkPhysicalDeviceExternalFenceInfo*)(local_pExternalFenceInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1091;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_1091, 1);
-        countingStream->write((uint64_t*)&cgen_var_1091, 1 * 8);
-        marshal_VkPhysicalDeviceExternalFenceInfo(countingStream, (VkPhysicalDeviceExternalFenceInfo*)(local_pExternalFenceInfo));
-        marshal_VkExternalFenceProperties(countingStream, (VkExternalFenceProperties*)(pExternalFenceProperties));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkPhysicalDeviceExternalFenceInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceExternalFenceInfo*)(local_pExternalFenceInfo), countPtr);
+        count_VkExternalFenceProperties(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkExternalFenceProperties*)(pExternalFenceProperties), countPtr);
     }
-    uint32_t packetSize_vkGetPhysicalDeviceExternalFencePropertiesKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetPhysicalDeviceExternalFencePropertiesKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPhysicalDeviceExternalFencePropertiesKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetPhysicalDeviceExternalFencePropertiesKHR = OP_vkGetPhysicalDeviceExternalFencePropertiesKHR;
-    stream->write(&opcode_vkGetPhysicalDeviceExternalFencePropertiesKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetPhysicalDeviceExternalFencePropertiesKHR, sizeof(uint32_t));
-    uint64_t cgen_var_1092;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_1092, 1);
-    stream->write((uint64_t*)&cgen_var_1092, 1 * 8);
-    marshal_VkPhysicalDeviceExternalFenceInfo(stream, (VkPhysicalDeviceExternalFenceInfo*)(local_pExternalFenceInfo));
-    marshal_VkExternalFenceProperties(stream, (VkExternalFenceProperties*)(pExternalFenceProperties));
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceExternalFencePropertiesKHR readParams");
-    unmarshal_VkExternalFenceProperties(stream, (VkExternalFenceProperties*)(pExternalFenceProperties));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPhysicalDeviceExternalFencePropertiesKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPhysicalDeviceExternalFencePropertiesKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkPhysicalDeviceExternalFenceInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceExternalFenceInfo*)(local_pExternalFenceInfo), streamPtrPtr);
+    reservedmarshal_VkExternalFenceProperties(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkExternalFenceProperties*)(pExternalFenceProperties), streamPtrPtr);
+    unmarshal_VkExternalFenceProperties(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkExternalFenceProperties*)(pExternalFenceProperties));
     if (pExternalFenceProperties)
     {
-        transform_fromhost_VkExternalFenceProperties(mImpl->resources(), (VkExternalFenceProperties*)(pExternalFenceProperties));
+        transform_fromhost_VkExternalFenceProperties(sResourceTracker, (VkExternalFenceProperties*)(pExternalFenceProperties));
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceExternalFencePropertiesKHR returnUnmarshal");
-    mImpl->log("finish vkGetPhysicalDeviceExternalFencePropertiesKHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 #endif
@@ -16530,16 +18858,14 @@
 #ifdef VK_KHR_external_fence_win32
 VkResult VkEncoder::vkImportFenceWin32HandleKHR(
     VkDevice device,
-    const VkImportFenceWin32HandleInfoKHR* pImportFenceWin32HandleInfo)
+    const VkImportFenceWin32HandleInfoKHR* pImportFenceWin32HandleInfo,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkImportFenceWin32HandleKHR encode");
-    mImpl->log("start vkImportFenceWin32HandleKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkImportFenceWin32HandleInfoKHR* local_pImportFenceWin32HandleInfo;
     local_device = device;
@@ -16547,52 +18873,55 @@
     if (pImportFenceWin32HandleInfo)
     {
         local_pImportFenceWin32HandleInfo = (VkImportFenceWin32HandleInfoKHR*)pool->alloc(sizeof(const VkImportFenceWin32HandleInfoKHR));
-        deepcopy_VkImportFenceWin32HandleInfoKHR(pool, pImportFenceWin32HandleInfo, (VkImportFenceWin32HandleInfoKHR*)(local_pImportFenceWin32HandleInfo));
+        deepcopy_VkImportFenceWin32HandleInfoKHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pImportFenceWin32HandleInfo, (VkImportFenceWin32HandleInfoKHR*)(local_pImportFenceWin32HandleInfo));
     }
     if (local_pImportFenceWin32HandleInfo)
     {
-        transform_tohost_VkImportFenceWin32HandleInfoKHR(mImpl->resources(), (VkImportFenceWin32HandleInfoKHR*)(local_pImportFenceWin32HandleInfo));
+        transform_tohost_VkImportFenceWin32HandleInfoKHR(sResourceTracker, (VkImportFenceWin32HandleInfoKHR*)(local_pImportFenceWin32HandleInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1093;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1093, 1);
-        countingStream->write((uint64_t*)&cgen_var_1093, 1 * 8);
-        marshal_VkImportFenceWin32HandleInfoKHR(countingStream, (VkImportFenceWin32HandleInfoKHR*)(local_pImportFenceWin32HandleInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkImportFenceWin32HandleInfoKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImportFenceWin32HandleInfoKHR*)(local_pImportFenceWin32HandleInfo), countPtr);
     }
-    uint32_t packetSize_vkImportFenceWin32HandleKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkImportFenceWin32HandleKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkImportFenceWin32HandleKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkImportFenceWin32HandleKHR = OP_vkImportFenceWin32HandleKHR;
-    stream->write(&opcode_vkImportFenceWin32HandleKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkImportFenceWin32HandleKHR, sizeof(uint32_t));
-    uint64_t cgen_var_1094;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1094, 1);
-    stream->write((uint64_t*)&cgen_var_1094, 1 * 8);
-    marshal_VkImportFenceWin32HandleInfoKHR(stream, (VkImportFenceWin32HandleInfoKHR*)(local_pImportFenceWin32HandleInfo));
-    AEMU_SCOPED_TRACE("vkImportFenceWin32HandleKHR readParams");
-    AEMU_SCOPED_TRACE("vkImportFenceWin32HandleKHR returnUnmarshal");
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkImportFenceWin32HandleKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkImportFenceWin32HandleKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkImportFenceWin32HandleInfoKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImportFenceWin32HandleInfoKHR*)(local_pImportFenceWin32HandleInfo), streamPtrPtr);
     VkResult vkImportFenceWin32HandleKHR_VkResult_return = (VkResult)0;
     stream->read(&vkImportFenceWin32HandleKHR_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkImportFenceWin32HandleKHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkImportFenceWin32HandleKHR_VkResult_return;
 }
 
 VkResult VkEncoder::vkGetFenceWin32HandleKHR(
     VkDevice device,
     const VkFenceGetWin32HandleInfoKHR* pGetWin32HandleInfo,
-    HANDLE* pHandle)
+    HANDLE* pHandle,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetFenceWin32HandleKHR encode");
-    mImpl->log("start vkGetFenceWin32HandleKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkFenceGetWin32HandleInfoKHR* local_pGetWin32HandleInfo;
     local_device = device;
@@ -16600,39 +18929,45 @@
     if (pGetWin32HandleInfo)
     {
         local_pGetWin32HandleInfo = (VkFenceGetWin32HandleInfoKHR*)pool->alloc(sizeof(const VkFenceGetWin32HandleInfoKHR));
-        deepcopy_VkFenceGetWin32HandleInfoKHR(pool, pGetWin32HandleInfo, (VkFenceGetWin32HandleInfoKHR*)(local_pGetWin32HandleInfo));
+        deepcopy_VkFenceGetWin32HandleInfoKHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pGetWin32HandleInfo, (VkFenceGetWin32HandleInfoKHR*)(local_pGetWin32HandleInfo));
     }
     if (local_pGetWin32HandleInfo)
     {
-        transform_tohost_VkFenceGetWin32HandleInfoKHR(mImpl->resources(), (VkFenceGetWin32HandleInfoKHR*)(local_pGetWin32HandleInfo));
+        transform_tohost_VkFenceGetWin32HandleInfoKHR(sResourceTracker, (VkFenceGetWin32HandleInfoKHR*)(local_pGetWin32HandleInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1095;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1095, 1);
-        countingStream->write((uint64_t*)&cgen_var_1095, 1 * 8);
-        marshal_VkFenceGetWin32HandleInfoKHR(countingStream, (VkFenceGetWin32HandleInfoKHR*)(local_pGetWin32HandleInfo));
-        countingStream->write((HANDLE*)pHandle, sizeof(HANDLE));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkFenceGetWin32HandleInfoKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkFenceGetWin32HandleInfoKHR*)(local_pGetWin32HandleInfo), countPtr);
+        *countPtr += sizeof(HANDLE);
     }
-    uint32_t packetSize_vkGetFenceWin32HandleKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetFenceWin32HandleKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetFenceWin32HandleKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetFenceWin32HandleKHR = OP_vkGetFenceWin32HandleKHR;
-    stream->write(&opcode_vkGetFenceWin32HandleKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetFenceWin32HandleKHR, sizeof(uint32_t));
-    uint64_t cgen_var_1096;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1096, 1);
-    stream->write((uint64_t*)&cgen_var_1096, 1 * 8);
-    marshal_VkFenceGetWin32HandleInfoKHR(stream, (VkFenceGetWin32HandleInfoKHR*)(local_pGetWin32HandleInfo));
-    stream->write((HANDLE*)pHandle, sizeof(HANDLE));
-    AEMU_SCOPED_TRACE("vkGetFenceWin32HandleKHR readParams");
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetFenceWin32HandleKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetFenceWin32HandleKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkFenceGetWin32HandleInfoKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkFenceGetWin32HandleInfoKHR*)(local_pGetWin32HandleInfo), streamPtrPtr);
+    memcpy(*streamPtrPtr, (HANDLE*)pHandle, sizeof(HANDLE));
+    *streamPtrPtr += sizeof(HANDLE);
     stream->read((HANDLE*)pHandle, sizeof(HANDLE));
-    AEMU_SCOPED_TRACE("vkGetFenceWin32HandleKHR returnUnmarshal");
     VkResult vkGetFenceWin32HandleKHR_VkResult_return = (VkResult)0;
     stream->read(&vkGetFenceWin32HandleKHR_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetFenceWin32HandleKHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetFenceWin32HandleKHR_VkResult_return;
 }
 
@@ -16640,16 +18975,14 @@
 #ifdef VK_KHR_external_fence_fd
 VkResult VkEncoder::vkImportFenceFdKHR(
     VkDevice device,
-    const VkImportFenceFdInfoKHR* pImportFenceFdInfo)
+    const VkImportFenceFdInfoKHR* pImportFenceFdInfo,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkImportFenceFdKHR encode");
-    mImpl->log("start vkImportFenceFdKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkImportFenceFdInfoKHR* local_pImportFenceFdInfo;
     local_device = device;
@@ -16657,52 +18990,55 @@
     if (pImportFenceFdInfo)
     {
         local_pImportFenceFdInfo = (VkImportFenceFdInfoKHR*)pool->alloc(sizeof(const VkImportFenceFdInfoKHR));
-        deepcopy_VkImportFenceFdInfoKHR(pool, pImportFenceFdInfo, (VkImportFenceFdInfoKHR*)(local_pImportFenceFdInfo));
+        deepcopy_VkImportFenceFdInfoKHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pImportFenceFdInfo, (VkImportFenceFdInfoKHR*)(local_pImportFenceFdInfo));
     }
     if (local_pImportFenceFdInfo)
     {
-        transform_tohost_VkImportFenceFdInfoKHR(mImpl->resources(), (VkImportFenceFdInfoKHR*)(local_pImportFenceFdInfo));
+        transform_tohost_VkImportFenceFdInfoKHR(sResourceTracker, (VkImportFenceFdInfoKHR*)(local_pImportFenceFdInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1097;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1097, 1);
-        countingStream->write((uint64_t*)&cgen_var_1097, 1 * 8);
-        marshal_VkImportFenceFdInfoKHR(countingStream, (VkImportFenceFdInfoKHR*)(local_pImportFenceFdInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkImportFenceFdInfoKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImportFenceFdInfoKHR*)(local_pImportFenceFdInfo), countPtr);
     }
-    uint32_t packetSize_vkImportFenceFdKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkImportFenceFdKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkImportFenceFdKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkImportFenceFdKHR = OP_vkImportFenceFdKHR;
-    stream->write(&opcode_vkImportFenceFdKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkImportFenceFdKHR, sizeof(uint32_t));
-    uint64_t cgen_var_1098;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1098, 1);
-    stream->write((uint64_t*)&cgen_var_1098, 1 * 8);
-    marshal_VkImportFenceFdInfoKHR(stream, (VkImportFenceFdInfoKHR*)(local_pImportFenceFdInfo));
-    AEMU_SCOPED_TRACE("vkImportFenceFdKHR readParams");
-    AEMU_SCOPED_TRACE("vkImportFenceFdKHR returnUnmarshal");
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkImportFenceFdKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkImportFenceFdKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkImportFenceFdInfoKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImportFenceFdInfoKHR*)(local_pImportFenceFdInfo), streamPtrPtr);
     VkResult vkImportFenceFdKHR_VkResult_return = (VkResult)0;
     stream->read(&vkImportFenceFdKHR_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkImportFenceFdKHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkImportFenceFdKHR_VkResult_return;
 }
 
 VkResult VkEncoder::vkGetFenceFdKHR(
     VkDevice device,
     const VkFenceGetFdInfoKHR* pGetFdInfo,
-    int* pFd)
+    int* pFd,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetFenceFdKHR encode");
-    mImpl->log("start vkGetFenceFdKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkFenceGetFdInfoKHR* local_pGetFdInfo;
     local_device = device;
@@ -16710,59 +19046,395 @@
     if (pGetFdInfo)
     {
         local_pGetFdInfo = (VkFenceGetFdInfoKHR*)pool->alloc(sizeof(const VkFenceGetFdInfoKHR));
-        deepcopy_VkFenceGetFdInfoKHR(pool, pGetFdInfo, (VkFenceGetFdInfoKHR*)(local_pGetFdInfo));
+        deepcopy_VkFenceGetFdInfoKHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pGetFdInfo, (VkFenceGetFdInfoKHR*)(local_pGetFdInfo));
     }
     if (local_pGetFdInfo)
     {
-        transform_tohost_VkFenceGetFdInfoKHR(mImpl->resources(), (VkFenceGetFdInfoKHR*)(local_pGetFdInfo));
+        transform_tohost_VkFenceGetFdInfoKHR(sResourceTracker, (VkFenceGetFdInfoKHR*)(local_pGetFdInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1099;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1099, 1);
-        countingStream->write((uint64_t*)&cgen_var_1099, 1 * 8);
-        marshal_VkFenceGetFdInfoKHR(countingStream, (VkFenceGetFdInfoKHR*)(local_pGetFdInfo));
-        countingStream->write((int*)pFd, sizeof(int));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkFenceGetFdInfoKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkFenceGetFdInfoKHR*)(local_pGetFdInfo), countPtr);
+        *countPtr += sizeof(int);
     }
-    uint32_t packetSize_vkGetFenceFdKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetFenceFdKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetFenceFdKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetFenceFdKHR = OP_vkGetFenceFdKHR;
-    stream->write(&opcode_vkGetFenceFdKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetFenceFdKHR, sizeof(uint32_t));
-    uint64_t cgen_var_1100;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1100, 1);
-    stream->write((uint64_t*)&cgen_var_1100, 1 * 8);
-    marshal_VkFenceGetFdInfoKHR(stream, (VkFenceGetFdInfoKHR*)(local_pGetFdInfo));
-    stream->write((int*)pFd, sizeof(int));
-    AEMU_SCOPED_TRACE("vkGetFenceFdKHR readParams");
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetFenceFdKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetFenceFdKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkFenceGetFdInfoKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkFenceGetFdInfoKHR*)(local_pGetFdInfo), streamPtrPtr);
+    memcpy(*streamPtrPtr, (int*)pFd, sizeof(int));
+    *streamPtrPtr += sizeof(int);
     stream->read((int*)pFd, sizeof(int));
-    AEMU_SCOPED_TRACE("vkGetFenceFdKHR returnUnmarshal");
     VkResult vkGetFenceFdKHR_VkResult_return = (VkResult)0;
     stream->read(&vkGetFenceFdKHR_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetFenceFdKHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetFenceFdKHR_VkResult_return;
 }
 
 #endif
+#ifdef VK_KHR_performance_query
+VkResult VkEncoder::vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR(
+    VkPhysicalDevice physicalDevice,
+    uint32_t queueFamilyIndex,
+    uint32_t* pCounterCount,
+    VkPerformanceCounterKHR* pCounters,
+    VkPerformanceCounterDescriptionKHR* pCounterDescriptions,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkPhysicalDevice local_physicalDevice;
+    uint32_t local_queueFamilyIndex;
+    local_physicalDevice = physicalDevice;
+    local_queueFamilyIndex = queueFamilyIndex;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        // WARNING PTR CHECK
+        *countPtr += 8;
+        if (pCounterCount)
+        {
+            *countPtr += sizeof(uint32_t);
+        }
+        // WARNING PTR CHECK
+        *countPtr += 8;
+        if (pCounters)
+        {
+            if (pCounterCount)
+            {
+                for (uint32_t i = 0; i < (uint32_t)(*(pCounterCount)); ++i)
+                {
+                    count_VkPerformanceCounterKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPerformanceCounterKHR*)(pCounters + i), countPtr);
+                }
+            }
+        }
+        // WARNING PTR CHECK
+        *countPtr += 8;
+        if (pCounterDescriptions)
+        {
+            if (pCounterCount)
+            {
+                for (uint32_t i = 0; i < (uint32_t)(*(pCounterCount)); ++i)
+                {
+                    count_VkPerformanceCounterDescriptionKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPerformanceCounterDescriptionKHR*)(pCounterDescriptions + i), countPtr);
+                }
+            }
+        }
+    }
+    uint32_t packetSize_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR = OP_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_queueFamilyIndex, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)pCounterCount;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    if (pCounterCount)
+    {
+        memcpy(*streamPtrPtr, (uint32_t*)pCounterCount, sizeof(uint32_t));
+        *streamPtrPtr += sizeof(uint32_t);
+    }
+    // WARNING PTR CHECK
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)pCounters;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    if (pCounters)
+    {
+        for (uint32_t i = 0; i < (uint32_t)(*(pCounterCount)); ++i)
+        {
+            reservedmarshal_VkPerformanceCounterKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPerformanceCounterKHR*)(pCounters + i), streamPtrPtr);
+        }
+    }
+    // WARNING PTR CHECK
+    uint64_t cgen_var_3 = (uint64_t)(uintptr_t)pCounterDescriptions;
+    memcpy((*streamPtrPtr), &cgen_var_3, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    if (pCounterDescriptions)
+    {
+        for (uint32_t i = 0; i < (uint32_t)(*(pCounterCount)); ++i)
+        {
+            reservedmarshal_VkPerformanceCounterDescriptionKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPerformanceCounterDescriptionKHR*)(pCounterDescriptions + i), streamPtrPtr);
+        }
+    }
+    // WARNING PTR CHECK
+    uint32_t* check_pCounterCount;
+    check_pCounterCount = (uint32_t*)(uintptr_t)stream->getBe64();
+    if (pCounterCount)
+    {
+        if (!(check_pCounterCount))
+        {
+            fprintf(stderr, "fatal: pCounterCount inconsistent between guest and host\n");
+        }
+        stream->read((uint32_t*)pCounterCount, sizeof(uint32_t));
+    }
+    // WARNING PTR CHECK
+    VkPerformanceCounterKHR* check_pCounters;
+    check_pCounters = (VkPerformanceCounterKHR*)(uintptr_t)stream->getBe64();
+    if (pCounters)
+    {
+        if (!(check_pCounters))
+        {
+            fprintf(stderr, "fatal: pCounters inconsistent between guest and host\n");
+        }
+        if (pCounterCount)
+        {
+            for (uint32_t i = 0; i < (uint32_t)(*(pCounterCount)); ++i)
+            {
+                unmarshal_VkPerformanceCounterKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPerformanceCounterKHR*)(pCounters + i));
+            }
+        }
+    }
+    if (pCounterCount)
+    {
+        if (pCounters)
+        {
+            for (uint32_t i = 0; i < (uint32_t)(*(pCounterCount)); ++i)
+            {
+                transform_fromhost_VkPerformanceCounterKHR(sResourceTracker, (VkPerformanceCounterKHR*)(pCounters + i));
+            }
+        }
+    }
+    // WARNING PTR CHECK
+    VkPerformanceCounterDescriptionKHR* check_pCounterDescriptions;
+    check_pCounterDescriptions = (VkPerformanceCounterDescriptionKHR*)(uintptr_t)stream->getBe64();
+    if (pCounterDescriptions)
+    {
+        if (!(check_pCounterDescriptions))
+        {
+            fprintf(stderr, "fatal: pCounterDescriptions inconsistent between guest and host\n");
+        }
+        if (pCounterCount)
+        {
+            for (uint32_t i = 0; i < (uint32_t)(*(pCounterCount)); ++i)
+            {
+                unmarshal_VkPerformanceCounterDescriptionKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPerformanceCounterDescriptionKHR*)(pCounterDescriptions + i));
+            }
+        }
+    }
+    if (pCounterCount)
+    {
+        if (pCounterDescriptions)
+        {
+            for (uint32_t i = 0; i < (uint32_t)(*(pCounterCount)); ++i)
+            {
+                transform_fromhost_VkPerformanceCounterDescriptionKHR(sResourceTracker, (VkPerformanceCounterDescriptionKHR*)(pCounterDescriptions + i));
+            }
+        }
+    }
+    VkResult vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR_VkResult_return = (VkResult)0;
+    stream->read(&vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR_VkResult_return;
+}
+
+void VkEncoder::vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR(
+    VkPhysicalDevice physicalDevice,
+    const VkQueryPoolPerformanceCreateInfoKHR* pPerformanceQueryCreateInfo,
+    uint32_t* pNumPasses,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkPhysicalDevice local_physicalDevice;
+    VkQueryPoolPerformanceCreateInfoKHR* local_pPerformanceQueryCreateInfo;
+    local_physicalDevice = physicalDevice;
+    local_pPerformanceQueryCreateInfo = nullptr;
+    if (pPerformanceQueryCreateInfo)
+    {
+        local_pPerformanceQueryCreateInfo = (VkQueryPoolPerformanceCreateInfoKHR*)pool->alloc(sizeof(const VkQueryPoolPerformanceCreateInfoKHR));
+        deepcopy_VkQueryPoolPerformanceCreateInfoKHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pPerformanceQueryCreateInfo, (VkQueryPoolPerformanceCreateInfoKHR*)(local_pPerformanceQueryCreateInfo));
+    }
+    if (local_pPerformanceQueryCreateInfo)
+    {
+        transform_tohost_VkQueryPoolPerformanceCreateInfoKHR(sResourceTracker, (VkQueryPoolPerformanceCreateInfoKHR*)(local_pPerformanceQueryCreateInfo));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkQueryPoolPerformanceCreateInfoKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkQueryPoolPerformanceCreateInfoKHR*)(local_pPerformanceQueryCreateInfo), countPtr);
+        *countPtr += sizeof(uint32_t);
+    }
+    uint32_t packetSize_vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR = OP_vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkQueryPoolPerformanceCreateInfoKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkQueryPoolPerformanceCreateInfoKHR*)(local_pPerformanceQueryCreateInfo), streamPtrPtr);
+    memcpy(*streamPtrPtr, (uint32_t*)pNumPasses, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    stream->read((uint32_t*)pNumPasses, sizeof(uint32_t));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+VkResult VkEncoder::vkAcquireProfilingLockKHR(
+    VkDevice device,
+    const VkAcquireProfilingLockInfoKHR* pInfo,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkAcquireProfilingLockInfoKHR* local_pInfo;
+    local_device = device;
+    local_pInfo = nullptr;
+    if (pInfo)
+    {
+        local_pInfo = (VkAcquireProfilingLockInfoKHR*)pool->alloc(sizeof(const VkAcquireProfilingLockInfoKHR));
+        deepcopy_VkAcquireProfilingLockInfoKHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pInfo, (VkAcquireProfilingLockInfoKHR*)(local_pInfo));
+    }
+    if (local_pInfo)
+    {
+        transform_tohost_VkAcquireProfilingLockInfoKHR(sResourceTracker, (VkAcquireProfilingLockInfoKHR*)(local_pInfo));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkAcquireProfilingLockInfoKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAcquireProfilingLockInfoKHR*)(local_pInfo), countPtr);
+    }
+    uint32_t packetSize_vkAcquireProfilingLockKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkAcquireProfilingLockKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkAcquireProfilingLockKHR = OP_vkAcquireProfilingLockKHR;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkAcquireProfilingLockKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkAcquireProfilingLockKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkAcquireProfilingLockInfoKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAcquireProfilingLockInfoKHR*)(local_pInfo), streamPtrPtr);
+    VkResult vkAcquireProfilingLockKHR_VkResult_return = (VkResult)0;
+    stream->read(&vkAcquireProfilingLockKHR_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkAcquireProfilingLockKHR_VkResult_return;
+}
+
+void VkEncoder::vkReleaseProfilingLockKHR(
+    VkDevice device,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    local_device = device;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+    }
+    uint32_t packetSize_vkReleaseProfilingLockKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkReleaseProfilingLockKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkReleaseProfilingLockKHR = OP_vkReleaseProfilingLockKHR;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkReleaseProfilingLockKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkReleaseProfilingLockKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+#endif
 #ifdef VK_KHR_maintenance2
 #endif
 #ifdef VK_KHR_get_surface_capabilities2
 VkResult VkEncoder::vkGetPhysicalDeviceSurfaceCapabilities2KHR(
     VkPhysicalDevice physicalDevice,
     const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo,
-    VkSurfaceCapabilities2KHR* pSurfaceCapabilities)
+    VkSurfaceCapabilities2KHR* pSurfaceCapabilities,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceSurfaceCapabilities2KHR encode");
-    mImpl->log("start vkGetPhysicalDeviceSurfaceCapabilities2KHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     VkPhysicalDeviceSurfaceInfo2KHR* local_pSurfaceInfo;
     local_physicalDevice = physicalDevice;
@@ -16770,43 +19442,48 @@
     if (pSurfaceInfo)
     {
         local_pSurfaceInfo = (VkPhysicalDeviceSurfaceInfo2KHR*)pool->alloc(sizeof(const VkPhysicalDeviceSurfaceInfo2KHR));
-        deepcopy_VkPhysicalDeviceSurfaceInfo2KHR(pool, pSurfaceInfo, (VkPhysicalDeviceSurfaceInfo2KHR*)(local_pSurfaceInfo));
+        deepcopy_VkPhysicalDeviceSurfaceInfo2KHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pSurfaceInfo, (VkPhysicalDeviceSurfaceInfo2KHR*)(local_pSurfaceInfo));
     }
     if (local_pSurfaceInfo)
     {
-        transform_tohost_VkPhysicalDeviceSurfaceInfo2KHR(mImpl->resources(), (VkPhysicalDeviceSurfaceInfo2KHR*)(local_pSurfaceInfo));
+        transform_tohost_VkPhysicalDeviceSurfaceInfo2KHR(sResourceTracker, (VkPhysicalDeviceSurfaceInfo2KHR*)(local_pSurfaceInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1101;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_1101, 1);
-        countingStream->write((uint64_t*)&cgen_var_1101, 1 * 8);
-        marshal_VkPhysicalDeviceSurfaceInfo2KHR(countingStream, (VkPhysicalDeviceSurfaceInfo2KHR*)(local_pSurfaceInfo));
-        marshal_VkSurfaceCapabilities2KHR(countingStream, (VkSurfaceCapabilities2KHR*)(pSurfaceCapabilities));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkPhysicalDeviceSurfaceInfo2KHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceSurfaceInfo2KHR*)(local_pSurfaceInfo), countPtr);
+        count_VkSurfaceCapabilities2KHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSurfaceCapabilities2KHR*)(pSurfaceCapabilities), countPtr);
     }
-    uint32_t packetSize_vkGetPhysicalDeviceSurfaceCapabilities2KHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetPhysicalDeviceSurfaceCapabilities2KHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPhysicalDeviceSurfaceCapabilities2KHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetPhysicalDeviceSurfaceCapabilities2KHR = OP_vkGetPhysicalDeviceSurfaceCapabilities2KHR;
-    stream->write(&opcode_vkGetPhysicalDeviceSurfaceCapabilities2KHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetPhysicalDeviceSurfaceCapabilities2KHR, sizeof(uint32_t));
-    uint64_t cgen_var_1102;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_1102, 1);
-    stream->write((uint64_t*)&cgen_var_1102, 1 * 8);
-    marshal_VkPhysicalDeviceSurfaceInfo2KHR(stream, (VkPhysicalDeviceSurfaceInfo2KHR*)(local_pSurfaceInfo));
-    marshal_VkSurfaceCapabilities2KHR(stream, (VkSurfaceCapabilities2KHR*)(pSurfaceCapabilities));
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceSurfaceCapabilities2KHR readParams");
-    unmarshal_VkSurfaceCapabilities2KHR(stream, (VkSurfaceCapabilities2KHR*)(pSurfaceCapabilities));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPhysicalDeviceSurfaceCapabilities2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPhysicalDeviceSurfaceCapabilities2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkPhysicalDeviceSurfaceInfo2KHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceSurfaceInfo2KHR*)(local_pSurfaceInfo), streamPtrPtr);
+    reservedmarshal_VkSurfaceCapabilities2KHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSurfaceCapabilities2KHR*)(pSurfaceCapabilities), streamPtrPtr);
+    unmarshal_VkSurfaceCapabilities2KHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSurfaceCapabilities2KHR*)(pSurfaceCapabilities));
     if (pSurfaceCapabilities)
     {
-        transform_fromhost_VkSurfaceCapabilities2KHR(mImpl->resources(), (VkSurfaceCapabilities2KHR*)(pSurfaceCapabilities));
+        transform_fromhost_VkSurfaceCapabilities2KHR(sResourceTracker, (VkSurfaceCapabilities2KHR*)(pSurfaceCapabilities));
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceSurfaceCapabilities2KHR returnUnmarshal");
     VkResult vkGetPhysicalDeviceSurfaceCapabilities2KHR_VkResult_return = (VkResult)0;
     stream->read(&vkGetPhysicalDeviceSurfaceCapabilities2KHR_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetPhysicalDeviceSurfaceCapabilities2KHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetPhysicalDeviceSurfaceCapabilities2KHR_VkResult_return;
 }
 
@@ -16814,16 +19491,14 @@
     VkPhysicalDevice physicalDevice,
     const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo,
     uint32_t* pSurfaceFormatCount,
-    VkSurfaceFormat2KHR* pSurfaceFormats)
+    VkSurfaceFormat2KHR* pSurfaceFormats,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceSurfaceFormats2KHR encode");
-    mImpl->log("start vkGetPhysicalDeviceSurfaceFormats2KHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     VkPhysicalDeviceSurfaceInfo2KHR* local_pSurfaceInfo;
     local_physicalDevice = physicalDevice;
@@ -16831,63 +19506,72 @@
     if (pSurfaceInfo)
     {
         local_pSurfaceInfo = (VkPhysicalDeviceSurfaceInfo2KHR*)pool->alloc(sizeof(const VkPhysicalDeviceSurfaceInfo2KHR));
-        deepcopy_VkPhysicalDeviceSurfaceInfo2KHR(pool, pSurfaceInfo, (VkPhysicalDeviceSurfaceInfo2KHR*)(local_pSurfaceInfo));
+        deepcopy_VkPhysicalDeviceSurfaceInfo2KHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pSurfaceInfo, (VkPhysicalDeviceSurfaceInfo2KHR*)(local_pSurfaceInfo));
     }
     if (local_pSurfaceInfo)
     {
-        transform_tohost_VkPhysicalDeviceSurfaceInfo2KHR(mImpl->resources(), (VkPhysicalDeviceSurfaceInfo2KHR*)(local_pSurfaceInfo));
+        transform_tohost_VkPhysicalDeviceSurfaceInfo2KHR(sResourceTracker, (VkPhysicalDeviceSurfaceInfo2KHR*)(local_pSurfaceInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1103;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_1103, 1);
-        countingStream->write((uint64_t*)&cgen_var_1103, 1 * 8);
-        marshal_VkPhysicalDeviceSurfaceInfo2KHR(countingStream, (VkPhysicalDeviceSurfaceInfo2KHR*)(local_pSurfaceInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkPhysicalDeviceSurfaceInfo2KHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceSurfaceInfo2KHR*)(local_pSurfaceInfo), countPtr);
         // WARNING PTR CHECK
-        uint64_t cgen_var_1104 = (uint64_t)(uintptr_t)pSurfaceFormatCount;
-        countingStream->putBe64(cgen_var_1104);
+        *countPtr += 8;
         if (pSurfaceFormatCount)
         {
-            countingStream->write((uint32_t*)pSurfaceFormatCount, sizeof(uint32_t));
+            *countPtr += sizeof(uint32_t);
         }
         // WARNING PTR CHECK
-        uint64_t cgen_var_1105 = (uint64_t)(uintptr_t)pSurfaceFormats;
-        countingStream->putBe64(cgen_var_1105);
+        *countPtr += 8;
         if (pSurfaceFormats)
         {
-            for (uint32_t i = 0; i < (uint32_t)(*(pSurfaceFormatCount)); ++i)
+            if (pSurfaceFormatCount)
             {
-                marshal_VkSurfaceFormat2KHR(countingStream, (VkSurfaceFormat2KHR*)(pSurfaceFormats + i));
+                for (uint32_t i = 0; i < (uint32_t)(*(pSurfaceFormatCount)); ++i)
+                {
+                    count_VkSurfaceFormat2KHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSurfaceFormat2KHR*)(pSurfaceFormats + i), countPtr);
+                }
             }
         }
     }
-    uint32_t packetSize_vkGetPhysicalDeviceSurfaceFormats2KHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetPhysicalDeviceSurfaceFormats2KHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPhysicalDeviceSurfaceFormats2KHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetPhysicalDeviceSurfaceFormats2KHR = OP_vkGetPhysicalDeviceSurfaceFormats2KHR;
-    stream->write(&opcode_vkGetPhysicalDeviceSurfaceFormats2KHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetPhysicalDeviceSurfaceFormats2KHR, sizeof(uint32_t));
-    uint64_t cgen_var_1106;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_1106, 1);
-    stream->write((uint64_t*)&cgen_var_1106, 1 * 8);
-    marshal_VkPhysicalDeviceSurfaceInfo2KHR(stream, (VkPhysicalDeviceSurfaceInfo2KHR*)(local_pSurfaceInfo));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPhysicalDeviceSurfaceFormats2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPhysicalDeviceSurfaceFormats2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkPhysicalDeviceSurfaceInfo2KHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceSurfaceInfo2KHR*)(local_pSurfaceInfo), streamPtrPtr);
     // WARNING PTR CHECK
-    uint64_t cgen_var_1107 = (uint64_t)(uintptr_t)pSurfaceFormatCount;
-    stream->putBe64(cgen_var_1107);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)pSurfaceFormatCount;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pSurfaceFormatCount)
     {
-        stream->write((uint32_t*)pSurfaceFormatCount, sizeof(uint32_t));
+        memcpy(*streamPtrPtr, (uint32_t*)pSurfaceFormatCount, sizeof(uint32_t));
+        *streamPtrPtr += sizeof(uint32_t);
     }
     // WARNING PTR CHECK
-    uint64_t cgen_var_1108 = (uint64_t)(uintptr_t)pSurfaceFormats;
-    stream->putBe64(cgen_var_1108);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)pSurfaceFormats;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pSurfaceFormats)
     {
         for (uint32_t i = 0; i < (uint32_t)(*(pSurfaceFormatCount)); ++i)
         {
-            marshal_VkSurfaceFormat2KHR(stream, (VkSurfaceFormat2KHR*)(pSurfaceFormats + i));
+            reservedmarshal_VkSurfaceFormat2KHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSurfaceFormat2KHR*)(pSurfaceFormats + i), streamPtrPtr);
         }
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceSurfaceFormats2KHR readParams");
     // WARNING PTR CHECK
     uint32_t* check_pSurfaceFormatCount;
     check_pSurfaceFormatCount = (uint32_t*)(uintptr_t)stream->getBe64();
@@ -16908,25 +19592,33 @@
         {
             fprintf(stderr, "fatal: pSurfaceFormats inconsistent between guest and host\n");
         }
-        for (uint32_t i = 0; i < (uint32_t)(*(pSurfaceFormatCount)); ++i)
+        if (pSurfaceFormatCount)
         {
-            unmarshal_VkSurfaceFormat2KHR(stream, (VkSurfaceFormat2KHR*)(pSurfaceFormats + i));
+            for (uint32_t i = 0; i < (uint32_t)(*(pSurfaceFormatCount)); ++i)
+            {
+                unmarshal_VkSurfaceFormat2KHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSurfaceFormat2KHR*)(pSurfaceFormats + i));
+            }
         }
     }
-    if (pSurfaceFormats)
+    if (pSurfaceFormatCount)
     {
-        for (uint32_t i = 0; i < (uint32_t)(*(pSurfaceFormatCount)); ++i)
+        if (pSurfaceFormats)
         {
-            transform_fromhost_VkSurfaceFormat2KHR(mImpl->resources(), (VkSurfaceFormat2KHR*)(pSurfaceFormats + i));
+            for (uint32_t i = 0; i < (uint32_t)(*(pSurfaceFormatCount)); ++i)
+            {
+                transform_fromhost_VkSurfaceFormat2KHR(sResourceTracker, (VkSurfaceFormat2KHR*)(pSurfaceFormats + i));
+            }
         }
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceSurfaceFormats2KHR returnUnmarshal");
     VkResult vkGetPhysicalDeviceSurfaceFormats2KHR_VkResult_return = (VkResult)0;
     stream->read(&vkGetPhysicalDeviceSurfaceFormats2KHR_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetPhysicalDeviceSurfaceFormats2KHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetPhysicalDeviceSurfaceFormats2KHR_VkResult_return;
 }
 
@@ -16937,67 +19629,74 @@
 VkResult VkEncoder::vkGetPhysicalDeviceDisplayProperties2KHR(
     VkPhysicalDevice physicalDevice,
     uint32_t* pPropertyCount,
-    VkDisplayProperties2KHR* pProperties)
+    VkDisplayProperties2KHR* pProperties,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceDisplayProperties2KHR encode");
-    mImpl->log("start vkGetPhysicalDeviceDisplayProperties2KHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     local_physicalDevice = physicalDevice;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1111;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_1111, 1);
-        countingStream->write((uint64_t*)&cgen_var_1111, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_1112 = (uint64_t)(uintptr_t)pPropertyCount;
-        countingStream->putBe64(cgen_var_1112);
+        *countPtr += 8;
         if (pPropertyCount)
         {
-            countingStream->write((uint32_t*)pPropertyCount, sizeof(uint32_t));
+            *countPtr += sizeof(uint32_t);
         }
         // WARNING PTR CHECK
-        uint64_t cgen_var_1113 = (uint64_t)(uintptr_t)pProperties;
-        countingStream->putBe64(cgen_var_1113);
+        *countPtr += 8;
         if (pProperties)
         {
-            for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+            if (pPropertyCount)
             {
-                marshal_VkDisplayProperties2KHR(countingStream, (VkDisplayProperties2KHR*)(pProperties + i));
+                for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+                {
+                    count_VkDisplayProperties2KHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDisplayProperties2KHR*)(pProperties + i), countPtr);
+                }
             }
         }
     }
-    uint32_t packetSize_vkGetPhysicalDeviceDisplayProperties2KHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetPhysicalDeviceDisplayProperties2KHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPhysicalDeviceDisplayProperties2KHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetPhysicalDeviceDisplayProperties2KHR = OP_vkGetPhysicalDeviceDisplayProperties2KHR;
-    stream->write(&opcode_vkGetPhysicalDeviceDisplayProperties2KHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetPhysicalDeviceDisplayProperties2KHR, sizeof(uint32_t));
-    uint64_t cgen_var_1114;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_1114, 1);
-    stream->write((uint64_t*)&cgen_var_1114, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPhysicalDeviceDisplayProperties2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPhysicalDeviceDisplayProperties2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_1115 = (uint64_t)(uintptr_t)pPropertyCount;
-    stream->putBe64(cgen_var_1115);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)pPropertyCount;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pPropertyCount)
     {
-        stream->write((uint32_t*)pPropertyCount, sizeof(uint32_t));
+        memcpy(*streamPtrPtr, (uint32_t*)pPropertyCount, sizeof(uint32_t));
+        *streamPtrPtr += sizeof(uint32_t);
     }
     // WARNING PTR CHECK
-    uint64_t cgen_var_1116 = (uint64_t)(uintptr_t)pProperties;
-    stream->putBe64(cgen_var_1116);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)pProperties;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pProperties)
     {
         for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
         {
-            marshal_VkDisplayProperties2KHR(stream, (VkDisplayProperties2KHR*)(pProperties + i));
+            reservedmarshal_VkDisplayProperties2KHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDisplayProperties2KHR*)(pProperties + i), streamPtrPtr);
         }
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceDisplayProperties2KHR readParams");
     // WARNING PTR CHECK
     uint32_t* check_pPropertyCount;
     check_pPropertyCount = (uint32_t*)(uintptr_t)stream->getBe64();
@@ -17018,92 +19717,107 @@
         {
             fprintf(stderr, "fatal: pProperties inconsistent between guest and host\n");
         }
-        for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+        if (pPropertyCount)
         {
-            unmarshal_VkDisplayProperties2KHR(stream, (VkDisplayProperties2KHR*)(pProperties + i));
+            for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+            {
+                unmarshal_VkDisplayProperties2KHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDisplayProperties2KHR*)(pProperties + i));
+            }
         }
     }
-    if (pProperties)
+    if (pPropertyCount)
     {
-        for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+        if (pProperties)
         {
-            transform_fromhost_VkDisplayProperties2KHR(mImpl->resources(), (VkDisplayProperties2KHR*)(pProperties + i));
+            for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+            {
+                transform_fromhost_VkDisplayProperties2KHR(sResourceTracker, (VkDisplayProperties2KHR*)(pProperties + i));
+            }
         }
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceDisplayProperties2KHR returnUnmarshal");
     VkResult vkGetPhysicalDeviceDisplayProperties2KHR_VkResult_return = (VkResult)0;
     stream->read(&vkGetPhysicalDeviceDisplayProperties2KHR_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetPhysicalDeviceDisplayProperties2KHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetPhysicalDeviceDisplayProperties2KHR_VkResult_return;
 }
 
 VkResult VkEncoder::vkGetPhysicalDeviceDisplayPlaneProperties2KHR(
     VkPhysicalDevice physicalDevice,
     uint32_t* pPropertyCount,
-    VkDisplayPlaneProperties2KHR* pProperties)
+    VkDisplayPlaneProperties2KHR* pProperties,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceDisplayPlaneProperties2KHR encode");
-    mImpl->log("start vkGetPhysicalDeviceDisplayPlaneProperties2KHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     local_physicalDevice = physicalDevice;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1119;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_1119, 1);
-        countingStream->write((uint64_t*)&cgen_var_1119, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_1120 = (uint64_t)(uintptr_t)pPropertyCount;
-        countingStream->putBe64(cgen_var_1120);
+        *countPtr += 8;
         if (pPropertyCount)
         {
-            countingStream->write((uint32_t*)pPropertyCount, sizeof(uint32_t));
+            *countPtr += sizeof(uint32_t);
         }
         // WARNING PTR CHECK
-        uint64_t cgen_var_1121 = (uint64_t)(uintptr_t)pProperties;
-        countingStream->putBe64(cgen_var_1121);
+        *countPtr += 8;
         if (pProperties)
         {
-            for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+            if (pPropertyCount)
             {
-                marshal_VkDisplayPlaneProperties2KHR(countingStream, (VkDisplayPlaneProperties2KHR*)(pProperties + i));
+                for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+                {
+                    count_VkDisplayPlaneProperties2KHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDisplayPlaneProperties2KHR*)(pProperties + i), countPtr);
+                }
             }
         }
     }
-    uint32_t packetSize_vkGetPhysicalDeviceDisplayPlaneProperties2KHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetPhysicalDeviceDisplayPlaneProperties2KHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPhysicalDeviceDisplayPlaneProperties2KHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetPhysicalDeviceDisplayPlaneProperties2KHR = OP_vkGetPhysicalDeviceDisplayPlaneProperties2KHR;
-    stream->write(&opcode_vkGetPhysicalDeviceDisplayPlaneProperties2KHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetPhysicalDeviceDisplayPlaneProperties2KHR, sizeof(uint32_t));
-    uint64_t cgen_var_1122;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_1122, 1);
-    stream->write((uint64_t*)&cgen_var_1122, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPhysicalDeviceDisplayPlaneProperties2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPhysicalDeviceDisplayPlaneProperties2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_1123 = (uint64_t)(uintptr_t)pPropertyCount;
-    stream->putBe64(cgen_var_1123);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)pPropertyCount;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pPropertyCount)
     {
-        stream->write((uint32_t*)pPropertyCount, sizeof(uint32_t));
+        memcpy(*streamPtrPtr, (uint32_t*)pPropertyCount, sizeof(uint32_t));
+        *streamPtrPtr += sizeof(uint32_t);
     }
     // WARNING PTR CHECK
-    uint64_t cgen_var_1124 = (uint64_t)(uintptr_t)pProperties;
-    stream->putBe64(cgen_var_1124);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)pProperties;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pProperties)
     {
         for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
         {
-            marshal_VkDisplayPlaneProperties2KHR(stream, (VkDisplayPlaneProperties2KHR*)(pProperties + i));
+            reservedmarshal_VkDisplayPlaneProperties2KHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDisplayPlaneProperties2KHR*)(pProperties + i), streamPtrPtr);
         }
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceDisplayPlaneProperties2KHR readParams");
     // WARNING PTR CHECK
     uint32_t* check_pPropertyCount;
     check_pPropertyCount = (uint32_t*)(uintptr_t)stream->getBe64();
@@ -17124,25 +19838,33 @@
         {
             fprintf(stderr, "fatal: pProperties inconsistent between guest and host\n");
         }
-        for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+        if (pPropertyCount)
         {
-            unmarshal_VkDisplayPlaneProperties2KHR(stream, (VkDisplayPlaneProperties2KHR*)(pProperties + i));
+            for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+            {
+                unmarshal_VkDisplayPlaneProperties2KHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDisplayPlaneProperties2KHR*)(pProperties + i));
+            }
         }
     }
-    if (pProperties)
+    if (pPropertyCount)
     {
-        for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+        if (pProperties)
         {
-            transform_fromhost_VkDisplayPlaneProperties2KHR(mImpl->resources(), (VkDisplayPlaneProperties2KHR*)(pProperties + i));
+            for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+            {
+                transform_fromhost_VkDisplayPlaneProperties2KHR(sResourceTracker, (VkDisplayPlaneProperties2KHR*)(pProperties + i));
+            }
         }
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceDisplayPlaneProperties2KHR returnUnmarshal");
     VkResult vkGetPhysicalDeviceDisplayPlaneProperties2KHR_VkResult_return = (VkResult)0;
     stream->read(&vkGetPhysicalDeviceDisplayPlaneProperties2KHR_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetPhysicalDeviceDisplayPlaneProperties2KHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetPhysicalDeviceDisplayPlaneProperties2KHR_VkResult_return;
 }
 
@@ -17150,75 +19872,82 @@
     VkPhysicalDevice physicalDevice,
     VkDisplayKHR display,
     uint32_t* pPropertyCount,
-    VkDisplayModeProperties2KHR* pProperties)
+    VkDisplayModeProperties2KHR* pProperties,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetDisplayModeProperties2KHR encode");
-    mImpl->log("start vkGetDisplayModeProperties2KHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     VkDisplayKHR local_display;
     local_physicalDevice = physicalDevice;
     local_display = display;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1127;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_1127, 1);
-        countingStream->write((uint64_t*)&cgen_var_1127, 1 * 8);
-        uint64_t cgen_var_1128;
-        countingStream->handleMapping()->mapHandles_VkDisplayKHR_u64(&local_display, &cgen_var_1128, 1);
-        countingStream->write((uint64_t*)&cgen_var_1128, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_1129 = (uint64_t)(uintptr_t)pPropertyCount;
-        countingStream->putBe64(cgen_var_1129);
+        *countPtr += 8;
         if (pPropertyCount)
         {
-            countingStream->write((uint32_t*)pPropertyCount, sizeof(uint32_t));
+            *countPtr += sizeof(uint32_t);
         }
         // WARNING PTR CHECK
-        uint64_t cgen_var_1130 = (uint64_t)(uintptr_t)pProperties;
-        countingStream->putBe64(cgen_var_1130);
+        *countPtr += 8;
         if (pProperties)
         {
-            for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+            if (pPropertyCount)
             {
-                marshal_VkDisplayModeProperties2KHR(countingStream, (VkDisplayModeProperties2KHR*)(pProperties + i));
+                for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+                {
+                    count_VkDisplayModeProperties2KHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDisplayModeProperties2KHR*)(pProperties + i), countPtr);
+                }
             }
         }
     }
-    uint32_t packetSize_vkGetDisplayModeProperties2KHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetDisplayModeProperties2KHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetDisplayModeProperties2KHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetDisplayModeProperties2KHR = OP_vkGetDisplayModeProperties2KHR;
-    stream->write(&opcode_vkGetDisplayModeProperties2KHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetDisplayModeProperties2KHR, sizeof(uint32_t));
-    uint64_t cgen_var_1131;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_1131, 1);
-    stream->write((uint64_t*)&cgen_var_1131, 1 * 8);
-    uint64_t cgen_var_1132;
-    stream->handleMapping()->mapHandles_VkDisplayKHR_u64(&local_display, &cgen_var_1132, 1);
-    stream->write((uint64_t*)&cgen_var_1132, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetDisplayModeProperties2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetDisplayModeProperties2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkDisplayKHR((*&local_display));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_1133 = (uint64_t)(uintptr_t)pPropertyCount;
-    stream->putBe64(cgen_var_1133);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)pPropertyCount;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pPropertyCount)
     {
-        stream->write((uint32_t*)pPropertyCount, sizeof(uint32_t));
+        memcpy(*streamPtrPtr, (uint32_t*)pPropertyCount, sizeof(uint32_t));
+        *streamPtrPtr += sizeof(uint32_t);
     }
     // WARNING PTR CHECK
-    uint64_t cgen_var_1134 = (uint64_t)(uintptr_t)pProperties;
-    stream->putBe64(cgen_var_1134);
+    uint64_t cgen_var_3 = (uint64_t)(uintptr_t)pProperties;
+    memcpy((*streamPtrPtr), &cgen_var_3, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pProperties)
     {
         for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
         {
-            marshal_VkDisplayModeProperties2KHR(stream, (VkDisplayModeProperties2KHR*)(pProperties + i));
+            reservedmarshal_VkDisplayModeProperties2KHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDisplayModeProperties2KHR*)(pProperties + i), streamPtrPtr);
         }
     }
-    AEMU_SCOPED_TRACE("vkGetDisplayModeProperties2KHR readParams");
     // WARNING PTR CHECK
     uint32_t* check_pPropertyCount;
     check_pPropertyCount = (uint32_t*)(uintptr_t)stream->getBe64();
@@ -17239,41 +19968,47 @@
         {
             fprintf(stderr, "fatal: pProperties inconsistent between guest and host\n");
         }
-        for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+        if (pPropertyCount)
         {
-            unmarshal_VkDisplayModeProperties2KHR(stream, (VkDisplayModeProperties2KHR*)(pProperties + i));
+            for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+            {
+                unmarshal_VkDisplayModeProperties2KHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDisplayModeProperties2KHR*)(pProperties + i));
+            }
         }
     }
-    if (pProperties)
+    if (pPropertyCount)
     {
-        for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+        if (pProperties)
         {
-            transform_fromhost_VkDisplayModeProperties2KHR(mImpl->resources(), (VkDisplayModeProperties2KHR*)(pProperties + i));
+            for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+            {
+                transform_fromhost_VkDisplayModeProperties2KHR(sResourceTracker, (VkDisplayModeProperties2KHR*)(pProperties + i));
+            }
         }
     }
-    AEMU_SCOPED_TRACE("vkGetDisplayModeProperties2KHR returnUnmarshal");
     VkResult vkGetDisplayModeProperties2KHR_VkResult_return = (VkResult)0;
     stream->read(&vkGetDisplayModeProperties2KHR_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetDisplayModeProperties2KHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetDisplayModeProperties2KHR_VkResult_return;
 }
 
 VkResult VkEncoder::vkGetDisplayPlaneCapabilities2KHR(
     VkPhysicalDevice physicalDevice,
     const VkDisplayPlaneInfo2KHR* pDisplayPlaneInfo,
-    VkDisplayPlaneCapabilities2KHR* pCapabilities)
+    VkDisplayPlaneCapabilities2KHR* pCapabilities,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetDisplayPlaneCapabilities2KHR encode");
-    mImpl->log("start vkGetDisplayPlaneCapabilities2KHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     VkDisplayPlaneInfo2KHR* local_pDisplayPlaneInfo;
     local_physicalDevice = physicalDevice;
@@ -17281,43 +20016,48 @@
     if (pDisplayPlaneInfo)
     {
         local_pDisplayPlaneInfo = (VkDisplayPlaneInfo2KHR*)pool->alloc(sizeof(const VkDisplayPlaneInfo2KHR));
-        deepcopy_VkDisplayPlaneInfo2KHR(pool, pDisplayPlaneInfo, (VkDisplayPlaneInfo2KHR*)(local_pDisplayPlaneInfo));
+        deepcopy_VkDisplayPlaneInfo2KHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pDisplayPlaneInfo, (VkDisplayPlaneInfo2KHR*)(local_pDisplayPlaneInfo));
     }
     if (local_pDisplayPlaneInfo)
     {
-        transform_tohost_VkDisplayPlaneInfo2KHR(mImpl->resources(), (VkDisplayPlaneInfo2KHR*)(local_pDisplayPlaneInfo));
+        transform_tohost_VkDisplayPlaneInfo2KHR(sResourceTracker, (VkDisplayPlaneInfo2KHR*)(local_pDisplayPlaneInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1137;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_1137, 1);
-        countingStream->write((uint64_t*)&cgen_var_1137, 1 * 8);
-        marshal_VkDisplayPlaneInfo2KHR(countingStream, (VkDisplayPlaneInfo2KHR*)(local_pDisplayPlaneInfo));
-        marshal_VkDisplayPlaneCapabilities2KHR(countingStream, (VkDisplayPlaneCapabilities2KHR*)(pCapabilities));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkDisplayPlaneInfo2KHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDisplayPlaneInfo2KHR*)(local_pDisplayPlaneInfo), countPtr);
+        count_VkDisplayPlaneCapabilities2KHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDisplayPlaneCapabilities2KHR*)(pCapabilities), countPtr);
     }
-    uint32_t packetSize_vkGetDisplayPlaneCapabilities2KHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetDisplayPlaneCapabilities2KHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetDisplayPlaneCapabilities2KHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetDisplayPlaneCapabilities2KHR = OP_vkGetDisplayPlaneCapabilities2KHR;
-    stream->write(&opcode_vkGetDisplayPlaneCapabilities2KHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetDisplayPlaneCapabilities2KHR, sizeof(uint32_t));
-    uint64_t cgen_var_1138;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_1138, 1);
-    stream->write((uint64_t*)&cgen_var_1138, 1 * 8);
-    marshal_VkDisplayPlaneInfo2KHR(stream, (VkDisplayPlaneInfo2KHR*)(local_pDisplayPlaneInfo));
-    marshal_VkDisplayPlaneCapabilities2KHR(stream, (VkDisplayPlaneCapabilities2KHR*)(pCapabilities));
-    AEMU_SCOPED_TRACE("vkGetDisplayPlaneCapabilities2KHR readParams");
-    unmarshal_VkDisplayPlaneCapabilities2KHR(stream, (VkDisplayPlaneCapabilities2KHR*)(pCapabilities));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetDisplayPlaneCapabilities2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetDisplayPlaneCapabilities2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkDisplayPlaneInfo2KHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDisplayPlaneInfo2KHR*)(local_pDisplayPlaneInfo), streamPtrPtr);
+    reservedmarshal_VkDisplayPlaneCapabilities2KHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDisplayPlaneCapabilities2KHR*)(pCapabilities), streamPtrPtr);
+    unmarshal_VkDisplayPlaneCapabilities2KHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDisplayPlaneCapabilities2KHR*)(pCapabilities));
     if (pCapabilities)
     {
-        transform_fromhost_VkDisplayPlaneCapabilities2KHR(mImpl->resources(), (VkDisplayPlaneCapabilities2KHR*)(pCapabilities));
+        transform_fromhost_VkDisplayPlaneCapabilities2KHR(sResourceTracker, (VkDisplayPlaneCapabilities2KHR*)(pCapabilities));
     }
-    AEMU_SCOPED_TRACE("vkGetDisplayPlaneCapabilities2KHR returnUnmarshal");
     VkResult vkGetDisplayPlaneCapabilities2KHR_VkResult_return = (VkResult)0;
     stream->read(&vkGetDisplayPlaneCapabilities2KHR_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetDisplayPlaneCapabilities2KHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetDisplayPlaneCapabilities2KHR_VkResult_return;
 }
 
@@ -17332,16 +20072,14 @@
 void VkEncoder::vkGetImageMemoryRequirements2KHR(
     VkDevice device,
     const VkImageMemoryRequirementsInfo2* pInfo,
-    VkMemoryRequirements2* pMemoryRequirements)
+    VkMemoryRequirements2* pMemoryRequirements,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetImageMemoryRequirements2KHR encode");
-    mImpl->log("start vkGetImageMemoryRequirements2KHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkImageMemoryRequirementsInfo2* local_pInfo;
     local_device = device;
@@ -17349,53 +20087,59 @@
     if (pInfo)
     {
         local_pInfo = (VkImageMemoryRequirementsInfo2*)pool->alloc(sizeof(const VkImageMemoryRequirementsInfo2));
-        deepcopy_VkImageMemoryRequirementsInfo2(pool, pInfo, (VkImageMemoryRequirementsInfo2*)(local_pInfo));
+        deepcopy_VkImageMemoryRequirementsInfo2(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pInfo, (VkImageMemoryRequirementsInfo2*)(local_pInfo));
     }
     if (local_pInfo)
     {
-        transform_tohost_VkImageMemoryRequirementsInfo2(mImpl->resources(), (VkImageMemoryRequirementsInfo2*)(local_pInfo));
+        transform_tohost_VkImageMemoryRequirementsInfo2(sResourceTracker, (VkImageMemoryRequirementsInfo2*)(local_pInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1139;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1139, 1);
-        countingStream->write((uint64_t*)&cgen_var_1139, 1 * 8);
-        marshal_VkImageMemoryRequirementsInfo2(countingStream, (VkImageMemoryRequirementsInfo2*)(local_pInfo));
-        marshal_VkMemoryRequirements2(countingStream, (VkMemoryRequirements2*)(pMemoryRequirements));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkImageMemoryRequirementsInfo2(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImageMemoryRequirementsInfo2*)(local_pInfo), countPtr);
+        count_VkMemoryRequirements2(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMemoryRequirements2*)(pMemoryRequirements), countPtr);
     }
-    uint32_t packetSize_vkGetImageMemoryRequirements2KHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetImageMemoryRequirements2KHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetImageMemoryRequirements2KHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetImageMemoryRequirements2KHR = OP_vkGetImageMemoryRequirements2KHR;
-    stream->write(&opcode_vkGetImageMemoryRequirements2KHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetImageMemoryRequirements2KHR, sizeof(uint32_t));
-    uint64_t cgen_var_1140;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1140, 1);
-    stream->write((uint64_t*)&cgen_var_1140, 1 * 8);
-    marshal_VkImageMemoryRequirementsInfo2(stream, (VkImageMemoryRequirementsInfo2*)(local_pInfo));
-    marshal_VkMemoryRequirements2(stream, (VkMemoryRequirements2*)(pMemoryRequirements));
-    AEMU_SCOPED_TRACE("vkGetImageMemoryRequirements2KHR readParams");
-    unmarshal_VkMemoryRequirements2(stream, (VkMemoryRequirements2*)(pMemoryRequirements));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetImageMemoryRequirements2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetImageMemoryRequirements2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkImageMemoryRequirementsInfo2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImageMemoryRequirementsInfo2*)(local_pInfo), streamPtrPtr);
+    reservedmarshal_VkMemoryRequirements2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMemoryRequirements2*)(pMemoryRequirements), streamPtrPtr);
+    unmarshal_VkMemoryRequirements2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMemoryRequirements2*)(pMemoryRequirements));
     if (pMemoryRequirements)
     {
-        transform_fromhost_VkMemoryRequirements2(mImpl->resources(), (VkMemoryRequirements2*)(pMemoryRequirements));
+        transform_fromhost_VkMemoryRequirements2(sResourceTracker, (VkMemoryRequirements2*)(pMemoryRequirements));
     }
-    AEMU_SCOPED_TRACE("vkGetImageMemoryRequirements2KHR returnUnmarshal");
-    mImpl->log("finish vkGetImageMemoryRequirements2KHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkGetBufferMemoryRequirements2KHR(
     VkDevice device,
     const VkBufferMemoryRequirementsInfo2* pInfo,
-    VkMemoryRequirements2* pMemoryRequirements)
+    VkMemoryRequirements2* pMemoryRequirements,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetBufferMemoryRequirements2KHR encode");
-    mImpl->log("start vkGetBufferMemoryRequirements2KHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkBufferMemoryRequirementsInfo2* local_pInfo;
     local_device = device;
@@ -17403,54 +20147,60 @@
     if (pInfo)
     {
         local_pInfo = (VkBufferMemoryRequirementsInfo2*)pool->alloc(sizeof(const VkBufferMemoryRequirementsInfo2));
-        deepcopy_VkBufferMemoryRequirementsInfo2(pool, pInfo, (VkBufferMemoryRequirementsInfo2*)(local_pInfo));
+        deepcopy_VkBufferMemoryRequirementsInfo2(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pInfo, (VkBufferMemoryRequirementsInfo2*)(local_pInfo));
     }
     if (local_pInfo)
     {
-        transform_tohost_VkBufferMemoryRequirementsInfo2(mImpl->resources(), (VkBufferMemoryRequirementsInfo2*)(local_pInfo));
+        transform_tohost_VkBufferMemoryRequirementsInfo2(sResourceTracker, (VkBufferMemoryRequirementsInfo2*)(local_pInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1141;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1141, 1);
-        countingStream->write((uint64_t*)&cgen_var_1141, 1 * 8);
-        marshal_VkBufferMemoryRequirementsInfo2(countingStream, (VkBufferMemoryRequirementsInfo2*)(local_pInfo));
-        marshal_VkMemoryRequirements2(countingStream, (VkMemoryRequirements2*)(pMemoryRequirements));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkBufferMemoryRequirementsInfo2(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkBufferMemoryRequirementsInfo2*)(local_pInfo), countPtr);
+        count_VkMemoryRequirements2(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMemoryRequirements2*)(pMemoryRequirements), countPtr);
     }
-    uint32_t packetSize_vkGetBufferMemoryRequirements2KHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetBufferMemoryRequirements2KHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetBufferMemoryRequirements2KHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetBufferMemoryRequirements2KHR = OP_vkGetBufferMemoryRequirements2KHR;
-    stream->write(&opcode_vkGetBufferMemoryRequirements2KHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetBufferMemoryRequirements2KHR, sizeof(uint32_t));
-    uint64_t cgen_var_1142;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1142, 1);
-    stream->write((uint64_t*)&cgen_var_1142, 1 * 8);
-    marshal_VkBufferMemoryRequirementsInfo2(stream, (VkBufferMemoryRequirementsInfo2*)(local_pInfo));
-    marshal_VkMemoryRequirements2(stream, (VkMemoryRequirements2*)(pMemoryRequirements));
-    AEMU_SCOPED_TRACE("vkGetBufferMemoryRequirements2KHR readParams");
-    unmarshal_VkMemoryRequirements2(stream, (VkMemoryRequirements2*)(pMemoryRequirements));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetBufferMemoryRequirements2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetBufferMemoryRequirements2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkBufferMemoryRequirementsInfo2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkBufferMemoryRequirementsInfo2*)(local_pInfo), streamPtrPtr);
+    reservedmarshal_VkMemoryRequirements2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMemoryRequirements2*)(pMemoryRequirements), streamPtrPtr);
+    unmarshal_VkMemoryRequirements2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMemoryRequirements2*)(pMemoryRequirements));
     if (pMemoryRequirements)
     {
-        transform_fromhost_VkMemoryRequirements2(mImpl->resources(), (VkMemoryRequirements2*)(pMemoryRequirements));
+        transform_fromhost_VkMemoryRequirements2(sResourceTracker, (VkMemoryRequirements2*)(pMemoryRequirements));
     }
-    AEMU_SCOPED_TRACE("vkGetBufferMemoryRequirements2KHR returnUnmarshal");
-    mImpl->log("finish vkGetBufferMemoryRequirements2KHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkGetImageSparseMemoryRequirements2KHR(
     VkDevice device,
     const VkImageSparseMemoryRequirementsInfo2* pInfo,
     uint32_t* pSparseMemoryRequirementCount,
-    VkSparseImageMemoryRequirements2* pSparseMemoryRequirements)
+    VkSparseImageMemoryRequirements2* pSparseMemoryRequirements,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetImageSparseMemoryRequirements2KHR encode");
-    mImpl->log("start vkGetImageSparseMemoryRequirements2KHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkImageSparseMemoryRequirementsInfo2* local_pInfo;
     local_device = device;
@@ -17458,63 +20208,72 @@
     if (pInfo)
     {
         local_pInfo = (VkImageSparseMemoryRequirementsInfo2*)pool->alloc(sizeof(const VkImageSparseMemoryRequirementsInfo2));
-        deepcopy_VkImageSparseMemoryRequirementsInfo2(pool, pInfo, (VkImageSparseMemoryRequirementsInfo2*)(local_pInfo));
+        deepcopy_VkImageSparseMemoryRequirementsInfo2(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pInfo, (VkImageSparseMemoryRequirementsInfo2*)(local_pInfo));
     }
     if (local_pInfo)
     {
-        transform_tohost_VkImageSparseMemoryRequirementsInfo2(mImpl->resources(), (VkImageSparseMemoryRequirementsInfo2*)(local_pInfo));
+        transform_tohost_VkImageSparseMemoryRequirementsInfo2(sResourceTracker, (VkImageSparseMemoryRequirementsInfo2*)(local_pInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1143;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1143, 1);
-        countingStream->write((uint64_t*)&cgen_var_1143, 1 * 8);
-        marshal_VkImageSparseMemoryRequirementsInfo2(countingStream, (VkImageSparseMemoryRequirementsInfo2*)(local_pInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkImageSparseMemoryRequirementsInfo2(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImageSparseMemoryRequirementsInfo2*)(local_pInfo), countPtr);
         // WARNING PTR CHECK
-        uint64_t cgen_var_1144 = (uint64_t)(uintptr_t)pSparseMemoryRequirementCount;
-        countingStream->putBe64(cgen_var_1144);
+        *countPtr += 8;
         if (pSparseMemoryRequirementCount)
         {
-            countingStream->write((uint32_t*)pSparseMemoryRequirementCount, sizeof(uint32_t));
+            *countPtr += sizeof(uint32_t);
         }
         // WARNING PTR CHECK
-        uint64_t cgen_var_1145 = (uint64_t)(uintptr_t)pSparseMemoryRequirements;
-        countingStream->putBe64(cgen_var_1145);
+        *countPtr += 8;
         if (pSparseMemoryRequirements)
         {
-            for (uint32_t i = 0; i < (uint32_t)(*(pSparseMemoryRequirementCount)); ++i)
+            if (pSparseMemoryRequirementCount)
             {
-                marshal_VkSparseImageMemoryRequirements2(countingStream, (VkSparseImageMemoryRequirements2*)(pSparseMemoryRequirements + i));
+                for (uint32_t i = 0; i < (uint32_t)(*(pSparseMemoryRequirementCount)); ++i)
+                {
+                    count_VkSparseImageMemoryRequirements2(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSparseImageMemoryRequirements2*)(pSparseMemoryRequirements + i), countPtr);
+                }
             }
         }
     }
-    uint32_t packetSize_vkGetImageSparseMemoryRequirements2KHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetImageSparseMemoryRequirements2KHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetImageSparseMemoryRequirements2KHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetImageSparseMemoryRequirements2KHR = OP_vkGetImageSparseMemoryRequirements2KHR;
-    stream->write(&opcode_vkGetImageSparseMemoryRequirements2KHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetImageSparseMemoryRequirements2KHR, sizeof(uint32_t));
-    uint64_t cgen_var_1146;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1146, 1);
-    stream->write((uint64_t*)&cgen_var_1146, 1 * 8);
-    marshal_VkImageSparseMemoryRequirementsInfo2(stream, (VkImageSparseMemoryRequirementsInfo2*)(local_pInfo));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetImageSparseMemoryRequirements2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetImageSparseMemoryRequirements2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkImageSparseMemoryRequirementsInfo2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImageSparseMemoryRequirementsInfo2*)(local_pInfo), streamPtrPtr);
     // WARNING PTR CHECK
-    uint64_t cgen_var_1147 = (uint64_t)(uintptr_t)pSparseMemoryRequirementCount;
-    stream->putBe64(cgen_var_1147);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)pSparseMemoryRequirementCount;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pSparseMemoryRequirementCount)
     {
-        stream->write((uint32_t*)pSparseMemoryRequirementCount, sizeof(uint32_t));
+        memcpy(*streamPtrPtr, (uint32_t*)pSparseMemoryRequirementCount, sizeof(uint32_t));
+        *streamPtrPtr += sizeof(uint32_t);
     }
     // WARNING PTR CHECK
-    uint64_t cgen_var_1148 = (uint64_t)(uintptr_t)pSparseMemoryRequirements;
-    stream->putBe64(cgen_var_1148);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)pSparseMemoryRequirements;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pSparseMemoryRequirements)
     {
         for (uint32_t i = 0; i < (uint32_t)(*(pSparseMemoryRequirementCount)); ++i)
         {
-            marshal_VkSparseImageMemoryRequirements2(stream, (VkSparseImageMemoryRequirements2*)(pSparseMemoryRequirements + i));
+            reservedmarshal_VkSparseImageMemoryRequirements2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSparseImageMemoryRequirements2*)(pSparseMemoryRequirements + i), streamPtrPtr);
         }
     }
-    AEMU_SCOPED_TRACE("vkGetImageSparseMemoryRequirements2KHR readParams");
     // WARNING PTR CHECK
     uint32_t* check_pSparseMemoryRequirementCount;
     check_pSparseMemoryRequirementCount = (uint32_t*)(uintptr_t)stream->getBe64();
@@ -17535,20 +20294,31 @@
         {
             fprintf(stderr, "fatal: pSparseMemoryRequirements inconsistent between guest and host\n");
         }
-        for (uint32_t i = 0; i < (uint32_t)(*(pSparseMemoryRequirementCount)); ++i)
+        if (pSparseMemoryRequirementCount)
         {
-            unmarshal_VkSparseImageMemoryRequirements2(stream, (VkSparseImageMemoryRequirements2*)(pSparseMemoryRequirements + i));
+            for (uint32_t i = 0; i < (uint32_t)(*(pSparseMemoryRequirementCount)); ++i)
+            {
+                unmarshal_VkSparseImageMemoryRequirements2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSparseImageMemoryRequirements2*)(pSparseMemoryRequirements + i));
+            }
         }
     }
-    if (pSparseMemoryRequirements)
+    if (pSparseMemoryRequirementCount)
     {
-        for (uint32_t i = 0; i < (uint32_t)(*(pSparseMemoryRequirementCount)); ++i)
+        if (pSparseMemoryRequirements)
         {
-            transform_fromhost_VkSparseImageMemoryRequirements2(mImpl->resources(), (VkSparseImageMemoryRequirements2*)(pSparseMemoryRequirements + i));
+            for (uint32_t i = 0; i < (uint32_t)(*(pSparseMemoryRequirementCount)); ++i)
+            {
+                transform_fromhost_VkSparseImageMemoryRequirements2(sResourceTracker, (VkSparseImageMemoryRequirements2*)(pSparseMemoryRequirements + i));
+            }
         }
     }
-    AEMU_SCOPED_TRACE("vkGetImageSparseMemoryRequirements2KHR returnUnmarshal");
-    mImpl->log("finish vkGetImageSparseMemoryRequirements2KHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 #endif
@@ -17559,16 +20329,14 @@
     VkDevice device,
     const VkSamplerYcbcrConversionCreateInfo* pCreateInfo,
     const VkAllocationCallbacks* pAllocator,
-    VkSamplerYcbcrConversion* pYcbcrConversion)
+    VkSamplerYcbcrConversion* pYcbcrConversion,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCreateSamplerYcbcrConversionKHR encode");
-    mImpl->log("start vkCreateSamplerYcbcrConversionKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkSamplerYcbcrConversionCreateInfo* local_pCreateInfo;
     VkAllocationCallbacks* local_pAllocator;
@@ -17577,90 +20345,94 @@
     if (pCreateInfo)
     {
         local_pCreateInfo = (VkSamplerYcbcrConversionCreateInfo*)pool->alloc(sizeof(const VkSamplerYcbcrConversionCreateInfo));
-        deepcopy_VkSamplerYcbcrConversionCreateInfo(pool, pCreateInfo, (VkSamplerYcbcrConversionCreateInfo*)(local_pCreateInfo));
+        deepcopy_VkSamplerYcbcrConversionCreateInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, (VkSamplerYcbcrConversionCreateInfo*)(local_pCreateInfo));
     }
     local_pAllocator = nullptr;
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pCreateInfo)
     {
-        transform_tohost_VkSamplerYcbcrConversionCreateInfo(mImpl->resources(), (VkSamplerYcbcrConversionCreateInfo*)(local_pCreateInfo));
+        transform_tohost_VkSamplerYcbcrConversionCreateInfo(sResourceTracker, (VkSamplerYcbcrConversionCreateInfo*)(local_pCreateInfo));
     }
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1151;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1151, 1);
-        countingStream->write((uint64_t*)&cgen_var_1151, 1 * 8);
-        marshal_VkSamplerYcbcrConversionCreateInfo(countingStream, (VkSamplerYcbcrConversionCreateInfo*)(local_pCreateInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkSamplerYcbcrConversionCreateInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSamplerYcbcrConversionCreateInfo*)(local_pCreateInfo), countPtr);
         // WARNING PTR CHECK
-        uint64_t cgen_var_1152 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_1152);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
-        uint64_t cgen_var_1153;
-        countingStream->handleMapping()->mapHandles_VkSamplerYcbcrConversion_u64(pYcbcrConversion, &cgen_var_1153, 1);
-        countingStream->write((uint64_t*)&cgen_var_1153, 8);
+        uint64_t cgen_var_1;
+        *countPtr += 8;
     }
-    uint32_t packetSize_vkCreateSamplerYcbcrConversionKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCreateSamplerYcbcrConversionKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreateSamplerYcbcrConversionKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCreateSamplerYcbcrConversionKHR = OP_vkCreateSamplerYcbcrConversionKHR;
-    stream->write(&opcode_vkCreateSamplerYcbcrConversionKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkCreateSamplerYcbcrConversionKHR, sizeof(uint32_t));
-    uint64_t cgen_var_1154;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1154, 1);
-    stream->write((uint64_t*)&cgen_var_1154, 1 * 8);
-    marshal_VkSamplerYcbcrConversionCreateInfo(stream, (VkSamplerYcbcrConversionCreateInfo*)(local_pCreateInfo));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreateSamplerYcbcrConversionKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreateSamplerYcbcrConversionKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkSamplerYcbcrConversionCreateInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSamplerYcbcrConversionCreateInfo*)(local_pCreateInfo), streamPtrPtr);
     // WARNING PTR CHECK
-    uint64_t cgen_var_1155 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_1155);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
-    uint64_t cgen_var_1156;
-    stream->handleMapping()->mapHandles_VkSamplerYcbcrConversion_u64(pYcbcrConversion, &cgen_var_1156, 1);
-    stream->write((uint64_t*)&cgen_var_1156, 8);
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkCreateSamplerYcbcrConversionKHR readParams");
-    stream->setHandleMapping(resources->createMapping());
-    uint64_t cgen_var_1157;
-    stream->read((uint64_t*)&cgen_var_1157, 8);
-    stream->handleMapping()->mapHandles_u64_VkSamplerYcbcrConversion(&cgen_var_1157, (VkSamplerYcbcrConversion*)pYcbcrConversion, 1);
+    /* is handle, possibly out */;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = (uint64_t)((*pYcbcrConversion));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    stream->setHandleMapping(sResourceTracker->createMapping());
+    uint64_t cgen_var_3;
+    stream->read((uint64_t*)&cgen_var_3, 8);
+    stream->handleMapping()->mapHandles_u64_VkSamplerYcbcrConversion(&cgen_var_3, (VkSamplerYcbcrConversion*)pYcbcrConversion, 1);
     stream->unsetHandleMapping();
-    AEMU_SCOPED_TRACE("vkCreateSamplerYcbcrConversionKHR returnUnmarshal");
     VkResult vkCreateSamplerYcbcrConversionKHR_VkResult_return = (VkResult)0;
     stream->read(&vkCreateSamplerYcbcrConversionKHR_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkCreateSamplerYcbcrConversionKHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkCreateSamplerYcbcrConversionKHR_VkResult_return;
 }
 
 void VkEncoder::vkDestroySamplerYcbcrConversionKHR(
     VkDevice device,
     VkSamplerYcbcrConversion ycbcrConversion,
-    const VkAllocationCallbacks* pAllocator)
+    const VkAllocationCallbacks* pAllocator,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkDestroySamplerYcbcrConversionKHR encode");
-    mImpl->log("start vkDestroySamplerYcbcrConversionKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkSamplerYcbcrConversion local_ycbcrConversion;
     VkAllocationCallbacks* local_pAllocator;
@@ -17670,51 +20442,61 @@
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1158;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1158, 1);
-        countingStream->write((uint64_t*)&cgen_var_1158, 1 * 8);
-        uint64_t cgen_var_1159;
-        countingStream->handleMapping()->mapHandles_VkSamplerYcbcrConversion_u64(&local_ycbcrConversion, &cgen_var_1159, 1);
-        countingStream->write((uint64_t*)&cgen_var_1159, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_1160 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_1160);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
     }
-    uint32_t packetSize_vkDestroySamplerYcbcrConversionKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkDestroySamplerYcbcrConversionKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkDestroySamplerYcbcrConversionKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkDestroySamplerYcbcrConversionKHR = OP_vkDestroySamplerYcbcrConversionKHR;
-    stream->write(&opcode_vkDestroySamplerYcbcrConversionKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkDestroySamplerYcbcrConversionKHR, sizeof(uint32_t));
-    uint64_t cgen_var_1161;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1161, 1);
-    stream->write((uint64_t*)&cgen_var_1161, 1 * 8);
-    uint64_t cgen_var_1162;
-    stream->handleMapping()->mapHandles_VkSamplerYcbcrConversion_u64(&local_ycbcrConversion, &cgen_var_1162, 1);
-    stream->write((uint64_t*)&cgen_var_1162, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkDestroySamplerYcbcrConversionKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkDestroySamplerYcbcrConversionKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkSamplerYcbcrConversion((*&local_ycbcrConversion));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_1163 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_1163);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    AEMU_SCOPED_TRACE("vkDestroySamplerYcbcrConversionKHR readParams");
-    AEMU_SCOPED_TRACE("vkDestroySamplerYcbcrConversionKHR returnUnmarshal");
-    resources->destroyMapping()->mapHandles_VkSamplerYcbcrConversion((VkSamplerYcbcrConversion*)&ycbcrConversion);
-    mImpl->log("finish vkDestroySamplerYcbcrConversionKHR");;
+    sResourceTracker->destroyMapping()->mapHandles_VkSamplerYcbcrConversion((VkSamplerYcbcrConversion*)&ycbcrConversion);
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 #endif
@@ -17722,16 +20504,14 @@
 VkResult VkEncoder::vkBindBufferMemory2KHR(
     VkDevice device,
     uint32_t bindInfoCount,
-    const VkBindBufferMemoryInfo* pBindInfos)
+    const VkBindBufferMemoryInfo* pBindInfos,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkBindBufferMemory2KHR encode");
-    mImpl->log("start vkBindBufferMemory2KHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     uint32_t local_bindInfoCount;
     VkBindBufferMemoryInfo* local_pBindInfos;
@@ -17743,64 +20523,68 @@
         local_pBindInfos = (VkBindBufferMemoryInfo*)pool->alloc(((bindInfoCount)) * sizeof(const VkBindBufferMemoryInfo));
         for (uint32_t i = 0; i < (uint32_t)((bindInfoCount)); ++i)
         {
-            deepcopy_VkBindBufferMemoryInfo(pool, pBindInfos + i, (VkBindBufferMemoryInfo*)(local_pBindInfos + i));
+            deepcopy_VkBindBufferMemoryInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pBindInfos + i, (VkBindBufferMemoryInfo*)(local_pBindInfos + i));
         }
     }
     if (local_pBindInfos)
     {
         for (uint32_t i = 0; i < (uint32_t)((bindInfoCount)); ++i)
         {
-            transform_tohost_VkBindBufferMemoryInfo(mImpl->resources(), (VkBindBufferMemoryInfo*)(local_pBindInfos + i));
+            transform_tohost_VkBindBufferMemoryInfo(sResourceTracker, (VkBindBufferMemoryInfo*)(local_pBindInfos + i));
         }
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1164;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1164, 1);
-        countingStream->write((uint64_t*)&cgen_var_1164, 1 * 8);
-        countingStream->write((uint32_t*)&local_bindInfoCount, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
         for (uint32_t i = 0; i < (uint32_t)((bindInfoCount)); ++i)
         {
-            marshal_VkBindBufferMemoryInfo(countingStream, (VkBindBufferMemoryInfo*)(local_pBindInfos + i));
+            count_VkBindBufferMemoryInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkBindBufferMemoryInfo*)(local_pBindInfos + i), countPtr);
         }
     }
-    uint32_t packetSize_vkBindBufferMemory2KHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkBindBufferMemory2KHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkBindBufferMemory2KHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkBindBufferMemory2KHR = OP_vkBindBufferMemory2KHR;
-    stream->write(&opcode_vkBindBufferMemory2KHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkBindBufferMemory2KHR, sizeof(uint32_t));
-    uint64_t cgen_var_1165;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1165, 1);
-    stream->write((uint64_t*)&cgen_var_1165, 1 * 8);
-    stream->write((uint32_t*)&local_bindInfoCount, sizeof(uint32_t));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkBindBufferMemory2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkBindBufferMemory2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_bindInfoCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
     for (uint32_t i = 0; i < (uint32_t)((bindInfoCount)); ++i)
     {
-        marshal_VkBindBufferMemoryInfo(stream, (VkBindBufferMemoryInfo*)(local_pBindInfos + i));
+        reservedmarshal_VkBindBufferMemoryInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkBindBufferMemoryInfo*)(local_pBindInfos + i), streamPtrPtr);
     }
-    AEMU_SCOPED_TRACE("vkBindBufferMemory2KHR readParams");
-    AEMU_SCOPED_TRACE("vkBindBufferMemory2KHR returnUnmarshal");
     VkResult vkBindBufferMemory2KHR_VkResult_return = (VkResult)0;
     stream->read(&vkBindBufferMemory2KHR_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkBindBufferMemory2KHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkBindBufferMemory2KHR_VkResult_return;
 }
 
 VkResult VkEncoder::vkBindImageMemory2KHR(
     VkDevice device,
     uint32_t bindInfoCount,
-    const VkBindImageMemoryInfo* pBindInfos)
+    const VkBindImageMemoryInfo* pBindInfos,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkBindImageMemory2KHR encode");
-    mImpl->log("start vkBindImageMemory2KHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     uint32_t local_bindInfoCount;
     VkBindImageMemoryInfo* local_pBindInfos;
@@ -17812,66 +20596,72 @@
         local_pBindInfos = (VkBindImageMemoryInfo*)pool->alloc(((bindInfoCount)) * sizeof(const VkBindImageMemoryInfo));
         for (uint32_t i = 0; i < (uint32_t)((bindInfoCount)); ++i)
         {
-            deepcopy_VkBindImageMemoryInfo(pool, pBindInfos + i, (VkBindImageMemoryInfo*)(local_pBindInfos + i));
+            deepcopy_VkBindImageMemoryInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pBindInfos + i, (VkBindImageMemoryInfo*)(local_pBindInfos + i));
         }
     }
     if (local_pBindInfos)
     {
         for (uint32_t i = 0; i < (uint32_t)((bindInfoCount)); ++i)
         {
-            transform_tohost_VkBindImageMemoryInfo(mImpl->resources(), (VkBindImageMemoryInfo*)(local_pBindInfos + i));
+            transform_tohost_VkBindImageMemoryInfo(sResourceTracker, (VkBindImageMemoryInfo*)(local_pBindInfos + i));
         }
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1166;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1166, 1);
-        countingStream->write((uint64_t*)&cgen_var_1166, 1 * 8);
-        countingStream->write((uint32_t*)&local_bindInfoCount, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
         for (uint32_t i = 0; i < (uint32_t)((bindInfoCount)); ++i)
         {
-            marshal_VkBindImageMemoryInfo(countingStream, (VkBindImageMemoryInfo*)(local_pBindInfos + i));
+            count_VkBindImageMemoryInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkBindImageMemoryInfo*)(local_pBindInfos + i), countPtr);
         }
     }
-    uint32_t packetSize_vkBindImageMemory2KHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkBindImageMemory2KHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkBindImageMemory2KHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkBindImageMemory2KHR = OP_vkBindImageMemory2KHR;
-    stream->write(&opcode_vkBindImageMemory2KHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkBindImageMemory2KHR, sizeof(uint32_t));
-    uint64_t cgen_var_1167;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1167, 1);
-    stream->write((uint64_t*)&cgen_var_1167, 1 * 8);
-    stream->write((uint32_t*)&local_bindInfoCount, sizeof(uint32_t));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkBindImageMemory2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkBindImageMemory2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_bindInfoCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
     for (uint32_t i = 0; i < (uint32_t)((bindInfoCount)); ++i)
     {
-        marshal_VkBindImageMemoryInfo(stream, (VkBindImageMemoryInfo*)(local_pBindInfos + i));
+        reservedmarshal_VkBindImageMemoryInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkBindImageMemoryInfo*)(local_pBindInfos + i), streamPtrPtr);
     }
-    AEMU_SCOPED_TRACE("vkBindImageMemory2KHR readParams");
-    AEMU_SCOPED_TRACE("vkBindImageMemory2KHR returnUnmarshal");
     VkResult vkBindImageMemory2KHR_VkResult_return = (VkResult)0;
     stream->read(&vkBindImageMemory2KHR_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkBindImageMemory2KHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkBindImageMemory2KHR_VkResult_return;
 }
 
 #endif
+#ifdef VK_KHR_portability_subset
+#endif
 #ifdef VK_KHR_maintenance3
 void VkEncoder::vkGetDescriptorSetLayoutSupportKHR(
     VkDevice device,
     const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
-    VkDescriptorSetLayoutSupport* pSupport)
+    VkDescriptorSetLayoutSupport* pSupport,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetDescriptorSetLayoutSupportKHR encode");
-    mImpl->log("start vkGetDescriptorSetLayoutSupportKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkDescriptorSetLayoutCreateInfo* local_pCreateInfo;
     local_device = device;
@@ -17879,38 +20669,46 @@
     if (pCreateInfo)
     {
         local_pCreateInfo = (VkDescriptorSetLayoutCreateInfo*)pool->alloc(sizeof(const VkDescriptorSetLayoutCreateInfo));
-        deepcopy_VkDescriptorSetLayoutCreateInfo(pool, pCreateInfo, (VkDescriptorSetLayoutCreateInfo*)(local_pCreateInfo));
+        deepcopy_VkDescriptorSetLayoutCreateInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, (VkDescriptorSetLayoutCreateInfo*)(local_pCreateInfo));
     }
     if (local_pCreateInfo)
     {
-        transform_tohost_VkDescriptorSetLayoutCreateInfo(mImpl->resources(), (VkDescriptorSetLayoutCreateInfo*)(local_pCreateInfo));
+        transform_tohost_VkDescriptorSetLayoutCreateInfo(sResourceTracker, (VkDescriptorSetLayoutCreateInfo*)(local_pCreateInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1168;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1168, 1);
-        countingStream->write((uint64_t*)&cgen_var_1168, 1 * 8);
-        marshal_VkDescriptorSetLayoutCreateInfo(countingStream, (VkDescriptorSetLayoutCreateInfo*)(local_pCreateInfo));
-        marshal_VkDescriptorSetLayoutSupport(countingStream, (VkDescriptorSetLayoutSupport*)(pSupport));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkDescriptorSetLayoutCreateInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDescriptorSetLayoutCreateInfo*)(local_pCreateInfo), countPtr);
+        count_VkDescriptorSetLayoutSupport(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDescriptorSetLayoutSupport*)(pSupport), countPtr);
     }
-    uint32_t packetSize_vkGetDescriptorSetLayoutSupportKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetDescriptorSetLayoutSupportKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetDescriptorSetLayoutSupportKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetDescriptorSetLayoutSupportKHR = OP_vkGetDescriptorSetLayoutSupportKHR;
-    stream->write(&opcode_vkGetDescriptorSetLayoutSupportKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetDescriptorSetLayoutSupportKHR, sizeof(uint32_t));
-    uint64_t cgen_var_1169;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1169, 1);
-    stream->write((uint64_t*)&cgen_var_1169, 1 * 8);
-    marshal_VkDescriptorSetLayoutCreateInfo(stream, (VkDescriptorSetLayoutCreateInfo*)(local_pCreateInfo));
-    marshal_VkDescriptorSetLayoutSupport(stream, (VkDescriptorSetLayoutSupport*)(pSupport));
-    AEMU_SCOPED_TRACE("vkGetDescriptorSetLayoutSupportKHR readParams");
-    unmarshal_VkDescriptorSetLayoutSupport(stream, (VkDescriptorSetLayoutSupport*)(pSupport));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetDescriptorSetLayoutSupportKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetDescriptorSetLayoutSupportKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkDescriptorSetLayoutCreateInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDescriptorSetLayoutCreateInfo*)(local_pCreateInfo), streamPtrPtr);
+    reservedmarshal_VkDescriptorSetLayoutSupport(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDescriptorSetLayoutSupport*)(pSupport), streamPtrPtr);
+    unmarshal_VkDescriptorSetLayoutSupport(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDescriptorSetLayoutSupport*)(pSupport));
     if (pSupport)
     {
-        transform_fromhost_VkDescriptorSetLayoutSupport(mImpl->resources(), (VkDescriptorSetLayoutSupport*)(pSupport));
+        transform_fromhost_VkDescriptorSetLayoutSupport(sResourceTracker, (VkDescriptorSetLayoutSupport*)(pSupport));
     }
-    AEMU_SCOPED_TRACE("vkGetDescriptorSetLayoutSupportKHR returnUnmarshal");
-    mImpl->log("finish vkGetDescriptorSetLayoutSupportKHR");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 #endif
@@ -17922,16 +20720,14 @@
     VkBuffer countBuffer,
     VkDeviceSize countBufferOffset,
     uint32_t maxDrawCount,
-    uint32_t stride)
+    uint32_t stride,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdDrawIndirectCountKHR encode");
-    mImpl->log("start vkCmdDrawIndirectCountKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     VkBuffer local_buffer;
     VkDeviceSize local_offset;
@@ -17946,43 +20742,57 @@
     local_countBufferOffset = countBufferOffset;
     local_maxDrawCount = maxDrawCount;
     local_stride = stride;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1170;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1170, 1);
-        countingStream->write((uint64_t*)&cgen_var_1170, 1 * 8);
-        uint64_t cgen_var_1171;
-        countingStream->handleMapping()->mapHandles_VkBuffer_u64(&local_buffer, &cgen_var_1171, 1);
-        countingStream->write((uint64_t*)&cgen_var_1171, 1 * 8);
-        countingStream->write((VkDeviceSize*)&local_offset, sizeof(VkDeviceSize));
-        uint64_t cgen_var_1172;
-        countingStream->handleMapping()->mapHandles_VkBuffer_u64(&local_countBuffer, &cgen_var_1172, 1);
-        countingStream->write((uint64_t*)&cgen_var_1172, 1 * 8);
-        countingStream->write((VkDeviceSize*)&local_countBufferOffset, sizeof(VkDeviceSize));
-        countingStream->write((uint32_t*)&local_maxDrawCount, sizeof(uint32_t));
-        countingStream->write((uint32_t*)&local_stride, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkDeviceSize);
+        uint64_t cgen_var_2;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkDeviceSize);
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
     }
-    uint32_t packetSize_vkCmdDrawIndirectCountKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdDrawIndirectCountKHR = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdDrawIndirectCountKHR -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdDrawIndirectCountKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdDrawIndirectCountKHR = OP_vkCmdDrawIndirectCountKHR;
-    stream->write(&opcode_vkCmdDrawIndirectCountKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdDrawIndirectCountKHR, sizeof(uint32_t));
-    uint64_t cgen_var_1173;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1173, 1);
-    stream->write((uint64_t*)&cgen_var_1173, 1 * 8);
-    uint64_t cgen_var_1174;
-    stream->handleMapping()->mapHandles_VkBuffer_u64(&local_buffer, &cgen_var_1174, 1);
-    stream->write((uint64_t*)&cgen_var_1174, 1 * 8);
-    stream->write((VkDeviceSize*)&local_offset, sizeof(VkDeviceSize));
-    uint64_t cgen_var_1175;
-    stream->handleMapping()->mapHandles_VkBuffer_u64(&local_countBuffer, &cgen_var_1175, 1);
-    stream->write((uint64_t*)&cgen_var_1175, 1 * 8);
-    stream->write((VkDeviceSize*)&local_countBufferOffset, sizeof(VkDeviceSize));
-    stream->write((uint32_t*)&local_maxDrawCount, sizeof(uint32_t));
-    stream->write((uint32_t*)&local_stride, sizeof(uint32_t));
-    AEMU_SCOPED_TRACE("vkCmdDrawIndirectCountKHR readParams");
-    AEMU_SCOPED_TRACE("vkCmdDrawIndirectCountKHR returnUnmarshal");
-    mImpl->log("finish vkCmdDrawIndirectCountKHR");;
+    memcpy(streamPtr, &opcode_vkCmdDrawIndirectCountKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdDrawIndirectCountKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkBuffer((*&local_buffer));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkDeviceSize*)&local_offset, sizeof(VkDeviceSize));
+    *streamPtrPtr += sizeof(VkDeviceSize);
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkBuffer((*&local_countBuffer));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkDeviceSize*)&local_countBufferOffset, sizeof(VkDeviceSize));
+    *streamPtrPtr += sizeof(VkDeviceSize);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_maxDrawCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_stride, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdDrawIndexedIndirectCountKHR(
@@ -17992,16 +20802,14 @@
     VkBuffer countBuffer,
     VkDeviceSize countBufferOffset,
     uint32_t maxDrawCount,
-    uint32_t stride)
+    uint32_t stride,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdDrawIndexedIndirectCountKHR encode");
-    mImpl->log("start vkCmdDrawIndexedIndirectCountKHR");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     VkBuffer local_buffer;
     VkDeviceSize local_offset;
@@ -18016,98 +20824,1704 @@
     local_countBufferOffset = countBufferOffset;
     local_maxDrawCount = maxDrawCount;
     local_stride = stride;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1176;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1176, 1);
-        countingStream->write((uint64_t*)&cgen_var_1176, 1 * 8);
-        uint64_t cgen_var_1177;
-        countingStream->handleMapping()->mapHandles_VkBuffer_u64(&local_buffer, &cgen_var_1177, 1);
-        countingStream->write((uint64_t*)&cgen_var_1177, 1 * 8);
-        countingStream->write((VkDeviceSize*)&local_offset, sizeof(VkDeviceSize));
-        uint64_t cgen_var_1178;
-        countingStream->handleMapping()->mapHandles_VkBuffer_u64(&local_countBuffer, &cgen_var_1178, 1);
-        countingStream->write((uint64_t*)&cgen_var_1178, 1 * 8);
-        countingStream->write((VkDeviceSize*)&local_countBufferOffset, sizeof(VkDeviceSize));
-        countingStream->write((uint32_t*)&local_maxDrawCount, sizeof(uint32_t));
-        countingStream->write((uint32_t*)&local_stride, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkDeviceSize);
+        uint64_t cgen_var_2;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkDeviceSize);
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
     }
-    uint32_t packetSize_vkCmdDrawIndexedIndirectCountKHR = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdDrawIndexedIndirectCountKHR = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdDrawIndexedIndirectCountKHR -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdDrawIndexedIndirectCountKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdDrawIndexedIndirectCountKHR = OP_vkCmdDrawIndexedIndirectCountKHR;
-    stream->write(&opcode_vkCmdDrawIndexedIndirectCountKHR, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdDrawIndexedIndirectCountKHR, sizeof(uint32_t));
-    uint64_t cgen_var_1179;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1179, 1);
-    stream->write((uint64_t*)&cgen_var_1179, 1 * 8);
-    uint64_t cgen_var_1180;
-    stream->handleMapping()->mapHandles_VkBuffer_u64(&local_buffer, &cgen_var_1180, 1);
-    stream->write((uint64_t*)&cgen_var_1180, 1 * 8);
-    stream->write((VkDeviceSize*)&local_offset, sizeof(VkDeviceSize));
-    uint64_t cgen_var_1181;
-    stream->handleMapping()->mapHandles_VkBuffer_u64(&local_countBuffer, &cgen_var_1181, 1);
-    stream->write((uint64_t*)&cgen_var_1181, 1 * 8);
-    stream->write((VkDeviceSize*)&local_countBufferOffset, sizeof(VkDeviceSize));
-    stream->write((uint32_t*)&local_maxDrawCount, sizeof(uint32_t));
-    stream->write((uint32_t*)&local_stride, sizeof(uint32_t));
-    AEMU_SCOPED_TRACE("vkCmdDrawIndexedIndirectCountKHR readParams");
-    AEMU_SCOPED_TRACE("vkCmdDrawIndexedIndirectCountKHR returnUnmarshal");
-    mImpl->log("finish vkCmdDrawIndexedIndirectCountKHR");;
+    memcpy(streamPtr, &opcode_vkCmdDrawIndexedIndirectCountKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdDrawIndexedIndirectCountKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkBuffer((*&local_buffer));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkDeviceSize*)&local_offset, sizeof(VkDeviceSize));
+    *streamPtrPtr += sizeof(VkDeviceSize);
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkBuffer((*&local_countBuffer));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkDeviceSize*)&local_countBufferOffset, sizeof(VkDeviceSize));
+    *streamPtrPtr += sizeof(VkDeviceSize);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_maxDrawCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_stride, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 #endif
+#ifdef VK_KHR_shader_subgroup_extended_types
+#endif
 #ifdef VK_KHR_8bit_storage
 #endif
+#ifdef VK_KHR_shader_atomic_int64
+#endif
+#ifdef VK_KHR_shader_clock
+#endif
+#ifdef VK_KHR_driver_properties
+#endif
+#ifdef VK_KHR_shader_float_controls
+#endif
+#ifdef VK_KHR_depth_stencil_resolve
+#endif
+#ifdef VK_KHR_swapchain_mutable_format
+#endif
+#ifdef VK_KHR_timeline_semaphore
+VkResult VkEncoder::vkGetSemaphoreCounterValueKHR(
+    VkDevice device,
+    VkSemaphore semaphore,
+    uint64_t* pValue,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkSemaphore local_semaphore;
+    local_device = device;
+    local_semaphore = semaphore;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint64_t);
+    }
+    uint32_t packetSize_vkGetSemaphoreCounterValueKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetSemaphoreCounterValueKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkGetSemaphoreCounterValueKHR = OP_vkGetSemaphoreCounterValueKHR;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetSemaphoreCounterValueKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetSemaphoreCounterValueKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkSemaphore((*&local_semaphore));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint64_t*)pValue, sizeof(uint64_t));
+    *streamPtrPtr += sizeof(uint64_t);
+    stream->read((uint64_t*)pValue, sizeof(uint64_t));
+    VkResult vkGetSemaphoreCounterValueKHR_VkResult_return = (VkResult)0;
+    stream->read(&vkGetSemaphoreCounterValueKHR_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkGetSemaphoreCounterValueKHR_VkResult_return;
+}
+
+VkResult VkEncoder::vkWaitSemaphoresKHR(
+    VkDevice device,
+    const VkSemaphoreWaitInfo* pWaitInfo,
+    uint64_t timeout,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkSemaphoreWaitInfo* local_pWaitInfo;
+    uint64_t local_timeout;
+    local_device = device;
+    local_pWaitInfo = nullptr;
+    if (pWaitInfo)
+    {
+        local_pWaitInfo = (VkSemaphoreWaitInfo*)pool->alloc(sizeof(const VkSemaphoreWaitInfo));
+        deepcopy_VkSemaphoreWaitInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pWaitInfo, (VkSemaphoreWaitInfo*)(local_pWaitInfo));
+    }
+    local_timeout = timeout;
+    if (local_pWaitInfo)
+    {
+        transform_tohost_VkSemaphoreWaitInfo(sResourceTracker, (VkSemaphoreWaitInfo*)(local_pWaitInfo));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkSemaphoreWaitInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSemaphoreWaitInfo*)(local_pWaitInfo), countPtr);
+        *countPtr += sizeof(uint64_t);
+    }
+    uint32_t packetSize_vkWaitSemaphoresKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkWaitSemaphoresKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkWaitSemaphoresKHR = OP_vkWaitSemaphoresKHR;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkWaitSemaphoresKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkWaitSemaphoresKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkSemaphoreWaitInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSemaphoreWaitInfo*)(local_pWaitInfo), streamPtrPtr);
+    memcpy(*streamPtrPtr, (uint64_t*)&local_timeout, sizeof(uint64_t));
+    *streamPtrPtr += sizeof(uint64_t);
+    VkResult vkWaitSemaphoresKHR_VkResult_return = (VkResult)0;
+    stream->read(&vkWaitSemaphoresKHR_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkWaitSemaphoresKHR_VkResult_return;
+}
+
+VkResult VkEncoder::vkSignalSemaphoreKHR(
+    VkDevice device,
+    const VkSemaphoreSignalInfo* pSignalInfo,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkSemaphoreSignalInfo* local_pSignalInfo;
+    local_device = device;
+    local_pSignalInfo = nullptr;
+    if (pSignalInfo)
+    {
+        local_pSignalInfo = (VkSemaphoreSignalInfo*)pool->alloc(sizeof(const VkSemaphoreSignalInfo));
+        deepcopy_VkSemaphoreSignalInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pSignalInfo, (VkSemaphoreSignalInfo*)(local_pSignalInfo));
+    }
+    if (local_pSignalInfo)
+    {
+        transform_tohost_VkSemaphoreSignalInfo(sResourceTracker, (VkSemaphoreSignalInfo*)(local_pSignalInfo));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkSemaphoreSignalInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSemaphoreSignalInfo*)(local_pSignalInfo), countPtr);
+    }
+    uint32_t packetSize_vkSignalSemaphoreKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkSignalSemaphoreKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkSignalSemaphoreKHR = OP_vkSignalSemaphoreKHR;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkSignalSemaphoreKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkSignalSemaphoreKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkSemaphoreSignalInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSemaphoreSignalInfo*)(local_pSignalInfo), streamPtrPtr);
+    VkResult vkSignalSemaphoreKHR_VkResult_return = (VkResult)0;
+    stream->read(&vkSignalSemaphoreKHR_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkSignalSemaphoreKHR_VkResult_return;
+}
+
+#endif
+#ifdef VK_KHR_vulkan_memory_model
+#endif
+#ifdef VK_KHR_shader_terminate_invocation
+#endif
+#ifdef VK_KHR_fragment_shading_rate
+VkResult VkEncoder::vkGetPhysicalDeviceFragmentShadingRatesKHR(
+    VkPhysicalDevice physicalDevice,
+    uint32_t* pFragmentShadingRateCount,
+    VkPhysicalDeviceFragmentShadingRateKHR* pFragmentShadingRates,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkPhysicalDevice local_physicalDevice;
+    local_physicalDevice = physicalDevice;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        // WARNING PTR CHECK
+        *countPtr += 8;
+        if (pFragmentShadingRateCount)
+        {
+            *countPtr += sizeof(uint32_t);
+        }
+        // WARNING PTR CHECK
+        *countPtr += 8;
+        if (pFragmentShadingRates)
+        {
+            if (pFragmentShadingRateCount)
+            {
+                for (uint32_t i = 0; i < (uint32_t)(*(pFragmentShadingRateCount)); ++i)
+                {
+                    count_VkPhysicalDeviceFragmentShadingRateKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceFragmentShadingRateKHR*)(pFragmentShadingRates + i), countPtr);
+                }
+            }
+        }
+    }
+    uint32_t packetSize_vkGetPhysicalDeviceFragmentShadingRatesKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPhysicalDeviceFragmentShadingRatesKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkGetPhysicalDeviceFragmentShadingRatesKHR = OP_vkGetPhysicalDeviceFragmentShadingRatesKHR;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPhysicalDeviceFragmentShadingRatesKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPhysicalDeviceFragmentShadingRatesKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    // WARNING PTR CHECK
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)pFragmentShadingRateCount;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    if (pFragmentShadingRateCount)
+    {
+        memcpy(*streamPtrPtr, (uint32_t*)pFragmentShadingRateCount, sizeof(uint32_t));
+        *streamPtrPtr += sizeof(uint32_t);
+    }
+    // WARNING PTR CHECK
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)pFragmentShadingRates;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    if (pFragmentShadingRates)
+    {
+        for (uint32_t i = 0; i < (uint32_t)(*(pFragmentShadingRateCount)); ++i)
+        {
+            reservedmarshal_VkPhysicalDeviceFragmentShadingRateKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceFragmentShadingRateKHR*)(pFragmentShadingRates + i), streamPtrPtr);
+        }
+    }
+    // WARNING PTR CHECK
+    uint32_t* check_pFragmentShadingRateCount;
+    check_pFragmentShadingRateCount = (uint32_t*)(uintptr_t)stream->getBe64();
+    if (pFragmentShadingRateCount)
+    {
+        if (!(check_pFragmentShadingRateCount))
+        {
+            fprintf(stderr, "fatal: pFragmentShadingRateCount inconsistent between guest and host\n");
+        }
+        stream->read((uint32_t*)pFragmentShadingRateCount, sizeof(uint32_t));
+    }
+    // WARNING PTR CHECK
+    VkPhysicalDeviceFragmentShadingRateKHR* check_pFragmentShadingRates;
+    check_pFragmentShadingRates = (VkPhysicalDeviceFragmentShadingRateKHR*)(uintptr_t)stream->getBe64();
+    if (pFragmentShadingRates)
+    {
+        if (!(check_pFragmentShadingRates))
+        {
+            fprintf(stderr, "fatal: pFragmentShadingRates inconsistent between guest and host\n");
+        }
+        if (pFragmentShadingRateCount)
+        {
+            for (uint32_t i = 0; i < (uint32_t)(*(pFragmentShadingRateCount)); ++i)
+            {
+                unmarshal_VkPhysicalDeviceFragmentShadingRateKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceFragmentShadingRateKHR*)(pFragmentShadingRates + i));
+            }
+        }
+    }
+    if (pFragmentShadingRateCount)
+    {
+        if (pFragmentShadingRates)
+        {
+            for (uint32_t i = 0; i < (uint32_t)(*(pFragmentShadingRateCount)); ++i)
+            {
+                transform_fromhost_VkPhysicalDeviceFragmentShadingRateKHR(sResourceTracker, (VkPhysicalDeviceFragmentShadingRateKHR*)(pFragmentShadingRates + i));
+            }
+        }
+    }
+    VkResult vkGetPhysicalDeviceFragmentShadingRatesKHR_VkResult_return = (VkResult)0;
+    stream->read(&vkGetPhysicalDeviceFragmentShadingRatesKHR_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkGetPhysicalDeviceFragmentShadingRatesKHR_VkResult_return;
+}
+
+void VkEncoder::vkCmdSetFragmentShadingRateKHR(
+    VkCommandBuffer commandBuffer,
+    const VkExtent2D* pFragmentSize,
+    const VkFragmentShadingRateCombinerOpKHR combinerOps[2],
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    VkExtent2D* local_pFragmentSize;
+    VkFragmentShadingRateCombinerOpKHR local_combinerOps[2];
+    local_commandBuffer = commandBuffer;
+    local_pFragmentSize = nullptr;
+    if (pFragmentSize)
+    {
+        local_pFragmentSize = (VkExtent2D*)pool->alloc(sizeof(const VkExtent2D));
+        deepcopy_VkExtent2D(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pFragmentSize, (VkExtent2D*)(local_pFragmentSize));
+    }
+    memcpy(local_combinerOps, combinerOps, 2 * sizeof(const VkFragmentShadingRateCombinerOpKHR));
+    if (local_pFragmentSize)
+    {
+        transform_tohost_VkExtent2D(sResourceTracker, (VkExtent2D*)(local_pFragmentSize));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkExtent2D(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkExtent2D*)(local_pFragmentSize), countPtr);
+        *countPtr += 2 * sizeof(VkFragmentShadingRateCombinerOpKHR);
+    }
+    uint32_t packetSize_vkCmdSetFragmentShadingRateKHR = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdSetFragmentShadingRateKHR -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdSetFragmentShadingRateKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdSetFragmentShadingRateKHR = OP_vkCmdSetFragmentShadingRateKHR;
+    memcpy(streamPtr, &opcode_vkCmdSetFragmentShadingRateKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdSetFragmentShadingRateKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    reservedmarshal_VkExtent2D(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkExtent2D*)(local_pFragmentSize), streamPtrPtr);
+    memcpy(*streamPtrPtr, (VkFragmentShadingRateCombinerOpKHR*)local_combinerOps, 2 * sizeof(VkFragmentShadingRateCombinerOpKHR));
+    *streamPtrPtr += 2 * sizeof(VkFragmentShadingRateCombinerOpKHR);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+#endif
+#ifdef VK_KHR_spirv_1_4
+#endif
+#ifdef VK_KHR_surface_protected_capabilities
+#endif
+#ifdef VK_KHR_separate_depth_stencil_layouts
+#endif
+#ifdef VK_KHR_uniform_buffer_standard_layout
+#endif
+#ifdef VK_KHR_buffer_device_address
+VkDeviceAddress VkEncoder::vkGetBufferDeviceAddressKHR(
+    VkDevice device,
+    const VkBufferDeviceAddressInfo* pInfo,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkBufferDeviceAddressInfo* local_pInfo;
+    local_device = device;
+    local_pInfo = nullptr;
+    if (pInfo)
+    {
+        local_pInfo = (VkBufferDeviceAddressInfo*)pool->alloc(sizeof(const VkBufferDeviceAddressInfo));
+        deepcopy_VkBufferDeviceAddressInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pInfo, (VkBufferDeviceAddressInfo*)(local_pInfo));
+    }
+    if (local_pInfo)
+    {
+        transform_tohost_VkBufferDeviceAddressInfo(sResourceTracker, (VkBufferDeviceAddressInfo*)(local_pInfo));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkBufferDeviceAddressInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkBufferDeviceAddressInfo*)(local_pInfo), countPtr);
+    }
+    uint32_t packetSize_vkGetBufferDeviceAddressKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetBufferDeviceAddressKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkGetBufferDeviceAddressKHR = OP_vkGetBufferDeviceAddressKHR;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetBufferDeviceAddressKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetBufferDeviceAddressKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkBufferDeviceAddressInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkBufferDeviceAddressInfo*)(local_pInfo), streamPtrPtr);
+    VkDeviceAddress vkGetBufferDeviceAddressKHR_VkDeviceAddress_return = (VkDeviceAddress)0;
+    stream->read(&vkGetBufferDeviceAddressKHR_VkDeviceAddress_return, sizeof(VkDeviceAddress));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkGetBufferDeviceAddressKHR_VkDeviceAddress_return;
+}
+
+uint64_t VkEncoder::vkGetBufferOpaqueCaptureAddressKHR(
+    VkDevice device,
+    const VkBufferDeviceAddressInfo* pInfo,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkBufferDeviceAddressInfo* local_pInfo;
+    local_device = device;
+    local_pInfo = nullptr;
+    if (pInfo)
+    {
+        local_pInfo = (VkBufferDeviceAddressInfo*)pool->alloc(sizeof(const VkBufferDeviceAddressInfo));
+        deepcopy_VkBufferDeviceAddressInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pInfo, (VkBufferDeviceAddressInfo*)(local_pInfo));
+    }
+    if (local_pInfo)
+    {
+        transform_tohost_VkBufferDeviceAddressInfo(sResourceTracker, (VkBufferDeviceAddressInfo*)(local_pInfo));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkBufferDeviceAddressInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkBufferDeviceAddressInfo*)(local_pInfo), countPtr);
+    }
+    uint32_t packetSize_vkGetBufferOpaqueCaptureAddressKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetBufferOpaqueCaptureAddressKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkGetBufferOpaqueCaptureAddressKHR = OP_vkGetBufferOpaqueCaptureAddressKHR;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetBufferOpaqueCaptureAddressKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetBufferOpaqueCaptureAddressKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkBufferDeviceAddressInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkBufferDeviceAddressInfo*)(local_pInfo), streamPtrPtr);
+    uint64_t vkGetBufferOpaqueCaptureAddressKHR_uint64_t_return = (uint64_t)0;
+    stream->read(&vkGetBufferOpaqueCaptureAddressKHR_uint64_t_return, sizeof(uint64_t));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkGetBufferOpaqueCaptureAddressKHR_uint64_t_return;
+}
+
+uint64_t VkEncoder::vkGetDeviceMemoryOpaqueCaptureAddressKHR(
+    VkDevice device,
+    const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkDeviceMemoryOpaqueCaptureAddressInfo* local_pInfo;
+    local_device = device;
+    local_pInfo = nullptr;
+    if (pInfo)
+    {
+        local_pInfo = (VkDeviceMemoryOpaqueCaptureAddressInfo*)pool->alloc(sizeof(const VkDeviceMemoryOpaqueCaptureAddressInfo));
+        deepcopy_VkDeviceMemoryOpaqueCaptureAddressInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pInfo, (VkDeviceMemoryOpaqueCaptureAddressInfo*)(local_pInfo));
+    }
+    if (local_pInfo)
+    {
+        transform_tohost_VkDeviceMemoryOpaqueCaptureAddressInfo(sResourceTracker, (VkDeviceMemoryOpaqueCaptureAddressInfo*)(local_pInfo));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkDeviceMemoryOpaqueCaptureAddressInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDeviceMemoryOpaqueCaptureAddressInfo*)(local_pInfo), countPtr);
+    }
+    uint32_t packetSize_vkGetDeviceMemoryOpaqueCaptureAddressKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetDeviceMemoryOpaqueCaptureAddressKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkGetDeviceMemoryOpaqueCaptureAddressKHR = OP_vkGetDeviceMemoryOpaqueCaptureAddressKHR;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetDeviceMemoryOpaqueCaptureAddressKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetDeviceMemoryOpaqueCaptureAddressKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkDeviceMemoryOpaqueCaptureAddressInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDeviceMemoryOpaqueCaptureAddressInfo*)(local_pInfo), streamPtrPtr);
+    uint64_t vkGetDeviceMemoryOpaqueCaptureAddressKHR_uint64_t_return = (uint64_t)0;
+    stream->read(&vkGetDeviceMemoryOpaqueCaptureAddressKHR_uint64_t_return, sizeof(uint64_t));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkGetDeviceMemoryOpaqueCaptureAddressKHR_uint64_t_return;
+}
+
+#endif
+#ifdef VK_KHR_deferred_host_operations
+VkResult VkEncoder::vkCreateDeferredOperationKHR(
+    VkDevice device,
+    const VkAllocationCallbacks* pAllocator,
+    VkDeferredOperationKHR* pDeferredOperation,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkAllocationCallbacks* local_pAllocator;
+    local_device = device;
+    local_pAllocator = nullptr;
+    if (pAllocator)
+    {
+        local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+    }
+    local_pAllocator = nullptr;
+    if (local_pAllocator)
+    {
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        // WARNING PTR CHECK
+        *countPtr += 8;
+        if (local_pAllocator)
+        {
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
+        }
+        *countPtr += 8;
+    }
+    uint32_t packetSize_vkCreateDeferredOperationKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreateDeferredOperationKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCreateDeferredOperationKHR = OP_vkCreateDeferredOperationKHR;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreateDeferredOperationKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreateDeferredOperationKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    // WARNING PTR CHECK
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    if (local_pAllocator)
+    {
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
+    }
+    uint64_t cgen_var_2 = (uint64_t)(*pDeferredOperation);
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    (*pDeferredOperation) = (VkDeferredOperationKHR)stream->getBe64();
+    VkResult vkCreateDeferredOperationKHR_VkResult_return = (VkResult)0;
+    stream->read(&vkCreateDeferredOperationKHR_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkCreateDeferredOperationKHR_VkResult_return;
+}
+
+void VkEncoder::vkDestroyDeferredOperationKHR(
+    VkDevice device,
+    VkDeferredOperationKHR operation,
+    const VkAllocationCallbacks* pAllocator,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkDeferredOperationKHR local_operation;
+    VkAllocationCallbacks* local_pAllocator;
+    local_device = device;
+    local_operation = operation;
+    local_pAllocator = nullptr;
+    if (pAllocator)
+    {
+        local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+    }
+    local_pAllocator = nullptr;
+    if (local_pAllocator)
+    {
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += 8;
+        // WARNING PTR CHECK
+        *countPtr += 8;
+        if (local_pAllocator)
+        {
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
+        }
+    }
+    uint32_t packetSize_vkDestroyDeferredOperationKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkDestroyDeferredOperationKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkDestroyDeferredOperationKHR = OP_vkDestroyDeferredOperationKHR;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkDestroyDeferredOperationKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkDestroyDeferredOperationKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1 = (uint64_t)local_operation;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    // WARNING PTR CHECK
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    if (local_pAllocator)
+    {
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
+    }
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+uint32_t VkEncoder::vkGetDeferredOperationMaxConcurrencyKHR(
+    VkDevice device,
+    VkDeferredOperationKHR operation,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkDeferredOperationKHR local_operation;
+    local_device = device;
+    local_operation = operation;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += 8;
+    }
+    uint32_t packetSize_vkGetDeferredOperationMaxConcurrencyKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetDeferredOperationMaxConcurrencyKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkGetDeferredOperationMaxConcurrencyKHR = OP_vkGetDeferredOperationMaxConcurrencyKHR;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetDeferredOperationMaxConcurrencyKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetDeferredOperationMaxConcurrencyKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1 = (uint64_t)local_operation;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    uint32_t vkGetDeferredOperationMaxConcurrencyKHR_uint32_t_return = (uint32_t)0;
+    stream->read(&vkGetDeferredOperationMaxConcurrencyKHR_uint32_t_return, sizeof(uint32_t));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkGetDeferredOperationMaxConcurrencyKHR_uint32_t_return;
+}
+
+VkResult VkEncoder::vkGetDeferredOperationResultKHR(
+    VkDevice device,
+    VkDeferredOperationKHR operation,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkDeferredOperationKHR local_operation;
+    local_device = device;
+    local_operation = operation;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += 8;
+    }
+    uint32_t packetSize_vkGetDeferredOperationResultKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetDeferredOperationResultKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkGetDeferredOperationResultKHR = OP_vkGetDeferredOperationResultKHR;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetDeferredOperationResultKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetDeferredOperationResultKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1 = (uint64_t)local_operation;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    VkResult vkGetDeferredOperationResultKHR_VkResult_return = (VkResult)0;
+    stream->read(&vkGetDeferredOperationResultKHR_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkGetDeferredOperationResultKHR_VkResult_return;
+}
+
+VkResult VkEncoder::vkDeferredOperationJoinKHR(
+    VkDevice device,
+    VkDeferredOperationKHR operation,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkDeferredOperationKHR local_operation;
+    local_device = device;
+    local_operation = operation;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += 8;
+    }
+    uint32_t packetSize_vkDeferredOperationJoinKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkDeferredOperationJoinKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkDeferredOperationJoinKHR = OP_vkDeferredOperationJoinKHR;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkDeferredOperationJoinKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkDeferredOperationJoinKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1 = (uint64_t)local_operation;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    VkResult vkDeferredOperationJoinKHR_VkResult_return = (VkResult)0;
+    stream->read(&vkDeferredOperationJoinKHR_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkDeferredOperationJoinKHR_VkResult_return;
+}
+
+#endif
+#ifdef VK_KHR_pipeline_executable_properties
+VkResult VkEncoder::vkGetPipelineExecutablePropertiesKHR(
+    VkDevice device,
+    const VkPipelineInfoKHR* pPipelineInfo,
+    uint32_t* pExecutableCount,
+    VkPipelineExecutablePropertiesKHR* pProperties,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkPipelineInfoKHR* local_pPipelineInfo;
+    local_device = device;
+    local_pPipelineInfo = nullptr;
+    if (pPipelineInfo)
+    {
+        local_pPipelineInfo = (VkPipelineInfoKHR*)pool->alloc(sizeof(const VkPipelineInfoKHR));
+        deepcopy_VkPipelineInfoKHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pPipelineInfo, (VkPipelineInfoKHR*)(local_pPipelineInfo));
+    }
+    if (local_pPipelineInfo)
+    {
+        transform_tohost_VkPipelineInfoKHR(sResourceTracker, (VkPipelineInfoKHR*)(local_pPipelineInfo));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkPipelineInfoKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPipelineInfoKHR*)(local_pPipelineInfo), countPtr);
+        // WARNING PTR CHECK
+        *countPtr += 8;
+        if (pExecutableCount)
+        {
+            *countPtr += sizeof(uint32_t);
+        }
+        // WARNING PTR CHECK
+        *countPtr += 8;
+        if (pProperties)
+        {
+            if (pExecutableCount)
+            {
+                for (uint32_t i = 0; i < (uint32_t)(*(pExecutableCount)); ++i)
+                {
+                    count_VkPipelineExecutablePropertiesKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPipelineExecutablePropertiesKHR*)(pProperties + i), countPtr);
+                }
+            }
+        }
+    }
+    uint32_t packetSize_vkGetPipelineExecutablePropertiesKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPipelineExecutablePropertiesKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkGetPipelineExecutablePropertiesKHR = OP_vkGetPipelineExecutablePropertiesKHR;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPipelineExecutablePropertiesKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPipelineExecutablePropertiesKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkPipelineInfoKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPipelineInfoKHR*)(local_pPipelineInfo), streamPtrPtr);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)pExecutableCount;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    if (pExecutableCount)
+    {
+        memcpy(*streamPtrPtr, (uint32_t*)pExecutableCount, sizeof(uint32_t));
+        *streamPtrPtr += sizeof(uint32_t);
+    }
+    // WARNING PTR CHECK
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)pProperties;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    if (pProperties)
+    {
+        for (uint32_t i = 0; i < (uint32_t)(*(pExecutableCount)); ++i)
+        {
+            reservedmarshal_VkPipelineExecutablePropertiesKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPipelineExecutablePropertiesKHR*)(pProperties + i), streamPtrPtr);
+        }
+    }
+    // WARNING PTR CHECK
+    uint32_t* check_pExecutableCount;
+    check_pExecutableCount = (uint32_t*)(uintptr_t)stream->getBe64();
+    if (pExecutableCount)
+    {
+        if (!(check_pExecutableCount))
+        {
+            fprintf(stderr, "fatal: pExecutableCount inconsistent between guest and host\n");
+        }
+        stream->read((uint32_t*)pExecutableCount, sizeof(uint32_t));
+    }
+    // WARNING PTR CHECK
+    VkPipelineExecutablePropertiesKHR* check_pProperties;
+    check_pProperties = (VkPipelineExecutablePropertiesKHR*)(uintptr_t)stream->getBe64();
+    if (pProperties)
+    {
+        if (!(check_pProperties))
+        {
+            fprintf(stderr, "fatal: pProperties inconsistent between guest and host\n");
+        }
+        if (pExecutableCount)
+        {
+            for (uint32_t i = 0; i < (uint32_t)(*(pExecutableCount)); ++i)
+            {
+                unmarshal_VkPipelineExecutablePropertiesKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPipelineExecutablePropertiesKHR*)(pProperties + i));
+            }
+        }
+    }
+    if (pExecutableCount)
+    {
+        if (pProperties)
+        {
+            for (uint32_t i = 0; i < (uint32_t)(*(pExecutableCount)); ++i)
+            {
+                transform_fromhost_VkPipelineExecutablePropertiesKHR(sResourceTracker, (VkPipelineExecutablePropertiesKHR*)(pProperties + i));
+            }
+        }
+    }
+    VkResult vkGetPipelineExecutablePropertiesKHR_VkResult_return = (VkResult)0;
+    stream->read(&vkGetPipelineExecutablePropertiesKHR_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkGetPipelineExecutablePropertiesKHR_VkResult_return;
+}
+
+VkResult VkEncoder::vkGetPipelineExecutableStatisticsKHR(
+    VkDevice device,
+    const VkPipelineExecutableInfoKHR* pExecutableInfo,
+    uint32_t* pStatisticCount,
+    VkPipelineExecutableStatisticKHR* pStatistics,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkPipelineExecutableInfoKHR* local_pExecutableInfo;
+    local_device = device;
+    local_pExecutableInfo = nullptr;
+    if (pExecutableInfo)
+    {
+        local_pExecutableInfo = (VkPipelineExecutableInfoKHR*)pool->alloc(sizeof(const VkPipelineExecutableInfoKHR));
+        deepcopy_VkPipelineExecutableInfoKHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pExecutableInfo, (VkPipelineExecutableInfoKHR*)(local_pExecutableInfo));
+    }
+    if (local_pExecutableInfo)
+    {
+        transform_tohost_VkPipelineExecutableInfoKHR(sResourceTracker, (VkPipelineExecutableInfoKHR*)(local_pExecutableInfo));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkPipelineExecutableInfoKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPipelineExecutableInfoKHR*)(local_pExecutableInfo), countPtr);
+        // WARNING PTR CHECK
+        *countPtr += 8;
+        if (pStatisticCount)
+        {
+            *countPtr += sizeof(uint32_t);
+        }
+        // WARNING PTR CHECK
+        *countPtr += 8;
+        if (pStatistics)
+        {
+            if (pStatisticCount)
+            {
+                for (uint32_t i = 0; i < (uint32_t)(*(pStatisticCount)); ++i)
+                {
+                    count_VkPipelineExecutableStatisticKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPipelineExecutableStatisticKHR*)(pStatistics + i), countPtr);
+                }
+            }
+        }
+    }
+    uint32_t packetSize_vkGetPipelineExecutableStatisticsKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPipelineExecutableStatisticsKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkGetPipelineExecutableStatisticsKHR = OP_vkGetPipelineExecutableStatisticsKHR;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPipelineExecutableStatisticsKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPipelineExecutableStatisticsKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkPipelineExecutableInfoKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPipelineExecutableInfoKHR*)(local_pExecutableInfo), streamPtrPtr);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)pStatisticCount;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    if (pStatisticCount)
+    {
+        memcpy(*streamPtrPtr, (uint32_t*)pStatisticCount, sizeof(uint32_t));
+        *streamPtrPtr += sizeof(uint32_t);
+    }
+    // WARNING PTR CHECK
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)pStatistics;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    if (pStatistics)
+    {
+        for (uint32_t i = 0; i < (uint32_t)(*(pStatisticCount)); ++i)
+        {
+            reservedmarshal_VkPipelineExecutableStatisticKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPipelineExecutableStatisticKHR*)(pStatistics + i), streamPtrPtr);
+        }
+    }
+    // WARNING PTR CHECK
+    uint32_t* check_pStatisticCount;
+    check_pStatisticCount = (uint32_t*)(uintptr_t)stream->getBe64();
+    if (pStatisticCount)
+    {
+        if (!(check_pStatisticCount))
+        {
+            fprintf(stderr, "fatal: pStatisticCount inconsistent between guest and host\n");
+        }
+        stream->read((uint32_t*)pStatisticCount, sizeof(uint32_t));
+    }
+    // WARNING PTR CHECK
+    VkPipelineExecutableStatisticKHR* check_pStatistics;
+    check_pStatistics = (VkPipelineExecutableStatisticKHR*)(uintptr_t)stream->getBe64();
+    if (pStatistics)
+    {
+        if (!(check_pStatistics))
+        {
+            fprintf(stderr, "fatal: pStatistics inconsistent between guest and host\n");
+        }
+        if (pStatisticCount)
+        {
+            for (uint32_t i = 0; i < (uint32_t)(*(pStatisticCount)); ++i)
+            {
+                unmarshal_VkPipelineExecutableStatisticKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPipelineExecutableStatisticKHR*)(pStatistics + i));
+            }
+        }
+    }
+    if (pStatisticCount)
+    {
+        if (pStatistics)
+        {
+            for (uint32_t i = 0; i < (uint32_t)(*(pStatisticCount)); ++i)
+            {
+                transform_fromhost_VkPipelineExecutableStatisticKHR(sResourceTracker, (VkPipelineExecutableStatisticKHR*)(pStatistics + i));
+            }
+        }
+    }
+    VkResult vkGetPipelineExecutableStatisticsKHR_VkResult_return = (VkResult)0;
+    stream->read(&vkGetPipelineExecutableStatisticsKHR_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkGetPipelineExecutableStatisticsKHR_VkResult_return;
+}
+
+VkResult VkEncoder::vkGetPipelineExecutableInternalRepresentationsKHR(
+    VkDevice device,
+    const VkPipelineExecutableInfoKHR* pExecutableInfo,
+    uint32_t* pInternalRepresentationCount,
+    VkPipelineExecutableInternalRepresentationKHR* pInternalRepresentations,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkPipelineExecutableInfoKHR* local_pExecutableInfo;
+    local_device = device;
+    local_pExecutableInfo = nullptr;
+    if (pExecutableInfo)
+    {
+        local_pExecutableInfo = (VkPipelineExecutableInfoKHR*)pool->alloc(sizeof(const VkPipelineExecutableInfoKHR));
+        deepcopy_VkPipelineExecutableInfoKHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pExecutableInfo, (VkPipelineExecutableInfoKHR*)(local_pExecutableInfo));
+    }
+    if (local_pExecutableInfo)
+    {
+        transform_tohost_VkPipelineExecutableInfoKHR(sResourceTracker, (VkPipelineExecutableInfoKHR*)(local_pExecutableInfo));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkPipelineExecutableInfoKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPipelineExecutableInfoKHR*)(local_pExecutableInfo), countPtr);
+        // WARNING PTR CHECK
+        *countPtr += 8;
+        if (pInternalRepresentationCount)
+        {
+            *countPtr += sizeof(uint32_t);
+        }
+        // WARNING PTR CHECK
+        *countPtr += 8;
+        if (pInternalRepresentations)
+        {
+            if (pInternalRepresentationCount)
+            {
+                for (uint32_t i = 0; i < (uint32_t)(*(pInternalRepresentationCount)); ++i)
+                {
+                    count_VkPipelineExecutableInternalRepresentationKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPipelineExecutableInternalRepresentationKHR*)(pInternalRepresentations + i), countPtr);
+                }
+            }
+        }
+    }
+    uint32_t packetSize_vkGetPipelineExecutableInternalRepresentationsKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPipelineExecutableInternalRepresentationsKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkGetPipelineExecutableInternalRepresentationsKHR = OP_vkGetPipelineExecutableInternalRepresentationsKHR;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPipelineExecutableInternalRepresentationsKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPipelineExecutableInternalRepresentationsKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkPipelineExecutableInfoKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPipelineExecutableInfoKHR*)(local_pExecutableInfo), streamPtrPtr);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)pInternalRepresentationCount;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    if (pInternalRepresentationCount)
+    {
+        memcpy(*streamPtrPtr, (uint32_t*)pInternalRepresentationCount, sizeof(uint32_t));
+        *streamPtrPtr += sizeof(uint32_t);
+    }
+    // WARNING PTR CHECK
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)pInternalRepresentations;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    if (pInternalRepresentations)
+    {
+        for (uint32_t i = 0; i < (uint32_t)(*(pInternalRepresentationCount)); ++i)
+        {
+            reservedmarshal_VkPipelineExecutableInternalRepresentationKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPipelineExecutableInternalRepresentationKHR*)(pInternalRepresentations + i), streamPtrPtr);
+        }
+    }
+    // WARNING PTR CHECK
+    uint32_t* check_pInternalRepresentationCount;
+    check_pInternalRepresentationCount = (uint32_t*)(uintptr_t)stream->getBe64();
+    if (pInternalRepresentationCount)
+    {
+        if (!(check_pInternalRepresentationCount))
+        {
+            fprintf(stderr, "fatal: pInternalRepresentationCount inconsistent between guest and host\n");
+        }
+        stream->read((uint32_t*)pInternalRepresentationCount, sizeof(uint32_t));
+    }
+    // WARNING PTR CHECK
+    VkPipelineExecutableInternalRepresentationKHR* check_pInternalRepresentations;
+    check_pInternalRepresentations = (VkPipelineExecutableInternalRepresentationKHR*)(uintptr_t)stream->getBe64();
+    if (pInternalRepresentations)
+    {
+        if (!(check_pInternalRepresentations))
+        {
+            fprintf(stderr, "fatal: pInternalRepresentations inconsistent between guest and host\n");
+        }
+        if (pInternalRepresentationCount)
+        {
+            for (uint32_t i = 0; i < (uint32_t)(*(pInternalRepresentationCount)); ++i)
+            {
+                unmarshal_VkPipelineExecutableInternalRepresentationKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPipelineExecutableInternalRepresentationKHR*)(pInternalRepresentations + i));
+            }
+        }
+    }
+    if (pInternalRepresentationCount)
+    {
+        if (pInternalRepresentations)
+        {
+            for (uint32_t i = 0; i < (uint32_t)(*(pInternalRepresentationCount)); ++i)
+            {
+                transform_fromhost_VkPipelineExecutableInternalRepresentationKHR(sResourceTracker, (VkPipelineExecutableInternalRepresentationKHR*)(pInternalRepresentations + i));
+            }
+        }
+    }
+    VkResult vkGetPipelineExecutableInternalRepresentationsKHR_VkResult_return = (VkResult)0;
+    stream->read(&vkGetPipelineExecutableInternalRepresentationsKHR_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkGetPipelineExecutableInternalRepresentationsKHR_VkResult_return;
+}
+
+#endif
+#ifdef VK_KHR_pipeline_library
+#endif
+#ifdef VK_KHR_shader_non_semantic_info
+#endif
+#ifdef VK_KHR_copy_commands2
+void VkEncoder::vkCmdCopyBuffer2KHR(
+    VkCommandBuffer commandBuffer,
+    const VkCopyBufferInfo2KHR* pCopyBufferInfo,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    VkCopyBufferInfo2KHR* local_pCopyBufferInfo;
+    local_commandBuffer = commandBuffer;
+    local_pCopyBufferInfo = nullptr;
+    if (pCopyBufferInfo)
+    {
+        local_pCopyBufferInfo = (VkCopyBufferInfo2KHR*)pool->alloc(sizeof(const VkCopyBufferInfo2KHR));
+        deepcopy_VkCopyBufferInfo2KHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCopyBufferInfo, (VkCopyBufferInfo2KHR*)(local_pCopyBufferInfo));
+    }
+    if (local_pCopyBufferInfo)
+    {
+        transform_tohost_VkCopyBufferInfo2KHR(sResourceTracker, (VkCopyBufferInfo2KHR*)(local_pCopyBufferInfo));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkCopyBufferInfo2KHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkCopyBufferInfo2KHR*)(local_pCopyBufferInfo), countPtr);
+    }
+    uint32_t packetSize_vkCmdCopyBuffer2KHR = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdCopyBuffer2KHR -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdCopyBuffer2KHR);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdCopyBuffer2KHR = OP_vkCmdCopyBuffer2KHR;
+    memcpy(streamPtr, &opcode_vkCmdCopyBuffer2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdCopyBuffer2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    reservedmarshal_VkCopyBufferInfo2KHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkCopyBufferInfo2KHR*)(local_pCopyBufferInfo), streamPtrPtr);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+void VkEncoder::vkCmdCopyImage2KHR(
+    VkCommandBuffer commandBuffer,
+    const VkCopyImageInfo2KHR* pCopyImageInfo,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    VkCopyImageInfo2KHR* local_pCopyImageInfo;
+    local_commandBuffer = commandBuffer;
+    local_pCopyImageInfo = nullptr;
+    if (pCopyImageInfo)
+    {
+        local_pCopyImageInfo = (VkCopyImageInfo2KHR*)pool->alloc(sizeof(const VkCopyImageInfo2KHR));
+        deepcopy_VkCopyImageInfo2KHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCopyImageInfo, (VkCopyImageInfo2KHR*)(local_pCopyImageInfo));
+    }
+    if (local_pCopyImageInfo)
+    {
+        transform_tohost_VkCopyImageInfo2KHR(sResourceTracker, (VkCopyImageInfo2KHR*)(local_pCopyImageInfo));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkCopyImageInfo2KHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkCopyImageInfo2KHR*)(local_pCopyImageInfo), countPtr);
+    }
+    uint32_t packetSize_vkCmdCopyImage2KHR = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdCopyImage2KHR -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdCopyImage2KHR);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdCopyImage2KHR = OP_vkCmdCopyImage2KHR;
+    memcpy(streamPtr, &opcode_vkCmdCopyImage2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdCopyImage2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    reservedmarshal_VkCopyImageInfo2KHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkCopyImageInfo2KHR*)(local_pCopyImageInfo), streamPtrPtr);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+void VkEncoder::vkCmdCopyBufferToImage2KHR(
+    VkCommandBuffer commandBuffer,
+    const VkCopyBufferToImageInfo2KHR* pCopyBufferToImageInfo,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    VkCopyBufferToImageInfo2KHR* local_pCopyBufferToImageInfo;
+    local_commandBuffer = commandBuffer;
+    local_pCopyBufferToImageInfo = nullptr;
+    if (pCopyBufferToImageInfo)
+    {
+        local_pCopyBufferToImageInfo = (VkCopyBufferToImageInfo2KHR*)pool->alloc(sizeof(const VkCopyBufferToImageInfo2KHR));
+        deepcopy_VkCopyBufferToImageInfo2KHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCopyBufferToImageInfo, (VkCopyBufferToImageInfo2KHR*)(local_pCopyBufferToImageInfo));
+    }
+    if (local_pCopyBufferToImageInfo)
+    {
+        transform_tohost_VkCopyBufferToImageInfo2KHR(sResourceTracker, (VkCopyBufferToImageInfo2KHR*)(local_pCopyBufferToImageInfo));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkCopyBufferToImageInfo2KHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkCopyBufferToImageInfo2KHR*)(local_pCopyBufferToImageInfo), countPtr);
+    }
+    uint32_t packetSize_vkCmdCopyBufferToImage2KHR = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdCopyBufferToImage2KHR -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdCopyBufferToImage2KHR);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdCopyBufferToImage2KHR = OP_vkCmdCopyBufferToImage2KHR;
+    memcpy(streamPtr, &opcode_vkCmdCopyBufferToImage2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdCopyBufferToImage2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    reservedmarshal_VkCopyBufferToImageInfo2KHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkCopyBufferToImageInfo2KHR*)(local_pCopyBufferToImageInfo), streamPtrPtr);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+void VkEncoder::vkCmdCopyImageToBuffer2KHR(
+    VkCommandBuffer commandBuffer,
+    const VkCopyImageToBufferInfo2KHR* pCopyImageToBufferInfo,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    VkCopyImageToBufferInfo2KHR* local_pCopyImageToBufferInfo;
+    local_commandBuffer = commandBuffer;
+    local_pCopyImageToBufferInfo = nullptr;
+    if (pCopyImageToBufferInfo)
+    {
+        local_pCopyImageToBufferInfo = (VkCopyImageToBufferInfo2KHR*)pool->alloc(sizeof(const VkCopyImageToBufferInfo2KHR));
+        deepcopy_VkCopyImageToBufferInfo2KHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCopyImageToBufferInfo, (VkCopyImageToBufferInfo2KHR*)(local_pCopyImageToBufferInfo));
+    }
+    if (local_pCopyImageToBufferInfo)
+    {
+        transform_tohost_VkCopyImageToBufferInfo2KHR(sResourceTracker, (VkCopyImageToBufferInfo2KHR*)(local_pCopyImageToBufferInfo));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkCopyImageToBufferInfo2KHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkCopyImageToBufferInfo2KHR*)(local_pCopyImageToBufferInfo), countPtr);
+    }
+    uint32_t packetSize_vkCmdCopyImageToBuffer2KHR = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdCopyImageToBuffer2KHR -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdCopyImageToBuffer2KHR);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdCopyImageToBuffer2KHR = OP_vkCmdCopyImageToBuffer2KHR;
+    memcpy(streamPtr, &opcode_vkCmdCopyImageToBuffer2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdCopyImageToBuffer2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    reservedmarshal_VkCopyImageToBufferInfo2KHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkCopyImageToBufferInfo2KHR*)(local_pCopyImageToBufferInfo), streamPtrPtr);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+void VkEncoder::vkCmdBlitImage2KHR(
+    VkCommandBuffer commandBuffer,
+    const VkBlitImageInfo2KHR* pBlitImageInfo,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    VkBlitImageInfo2KHR* local_pBlitImageInfo;
+    local_commandBuffer = commandBuffer;
+    local_pBlitImageInfo = nullptr;
+    if (pBlitImageInfo)
+    {
+        local_pBlitImageInfo = (VkBlitImageInfo2KHR*)pool->alloc(sizeof(const VkBlitImageInfo2KHR));
+        deepcopy_VkBlitImageInfo2KHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pBlitImageInfo, (VkBlitImageInfo2KHR*)(local_pBlitImageInfo));
+    }
+    if (local_pBlitImageInfo)
+    {
+        transform_tohost_VkBlitImageInfo2KHR(sResourceTracker, (VkBlitImageInfo2KHR*)(local_pBlitImageInfo));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkBlitImageInfo2KHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkBlitImageInfo2KHR*)(local_pBlitImageInfo), countPtr);
+    }
+    uint32_t packetSize_vkCmdBlitImage2KHR = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdBlitImage2KHR -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdBlitImage2KHR);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdBlitImage2KHR = OP_vkCmdBlitImage2KHR;
+    memcpy(streamPtr, &opcode_vkCmdBlitImage2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdBlitImage2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    reservedmarshal_VkBlitImageInfo2KHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkBlitImageInfo2KHR*)(local_pBlitImageInfo), streamPtrPtr);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+void VkEncoder::vkCmdResolveImage2KHR(
+    VkCommandBuffer commandBuffer,
+    const VkResolveImageInfo2KHR* pResolveImageInfo,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    VkResolveImageInfo2KHR* local_pResolveImageInfo;
+    local_commandBuffer = commandBuffer;
+    local_pResolveImageInfo = nullptr;
+    if (pResolveImageInfo)
+    {
+        local_pResolveImageInfo = (VkResolveImageInfo2KHR*)pool->alloc(sizeof(const VkResolveImageInfo2KHR));
+        deepcopy_VkResolveImageInfo2KHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pResolveImageInfo, (VkResolveImageInfo2KHR*)(local_pResolveImageInfo));
+    }
+    if (local_pResolveImageInfo)
+    {
+        transform_tohost_VkResolveImageInfo2KHR(sResourceTracker, (VkResolveImageInfo2KHR*)(local_pResolveImageInfo));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkResolveImageInfo2KHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkResolveImageInfo2KHR*)(local_pResolveImageInfo), countPtr);
+    }
+    uint32_t packetSize_vkCmdResolveImage2KHR = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdResolveImage2KHR -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdResolveImage2KHR);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdResolveImage2KHR = OP_vkCmdResolveImage2KHR;
+    memcpy(streamPtr, &opcode_vkCmdResolveImage2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdResolveImage2KHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    reservedmarshal_VkResolveImageInfo2KHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkResolveImageInfo2KHR*)(local_pResolveImageInfo), streamPtrPtr);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+#endif
 #ifdef VK_ANDROID_native_buffer
 VkResult VkEncoder::vkGetSwapchainGrallocUsageANDROID(
     VkDevice device,
     VkFormat format,
     VkImageUsageFlags imageUsage,
-    int* grallocUsage)
+    int* grallocUsage,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetSwapchainGrallocUsageANDROID encode");
-    mImpl->log("start vkGetSwapchainGrallocUsageANDROID");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkFormat local_format;
     VkImageUsageFlags local_imageUsage;
     local_device = device;
     local_format = format;
     local_imageUsage = imageUsage;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1182;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1182, 1);
-        countingStream->write((uint64_t*)&cgen_var_1182, 1 * 8);
-        countingStream->write((VkFormat*)&local_format, sizeof(VkFormat));
-        countingStream->write((VkImageUsageFlags*)&local_imageUsage, sizeof(VkImageUsageFlags));
-        countingStream->write((int*)grallocUsage, sizeof(int));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkFormat);
+        *countPtr += sizeof(VkImageUsageFlags);
+        *countPtr += sizeof(int);
     }
-    uint32_t packetSize_vkGetSwapchainGrallocUsageANDROID = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetSwapchainGrallocUsageANDROID = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetSwapchainGrallocUsageANDROID);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetSwapchainGrallocUsageANDROID = OP_vkGetSwapchainGrallocUsageANDROID;
-    stream->write(&opcode_vkGetSwapchainGrallocUsageANDROID, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetSwapchainGrallocUsageANDROID, sizeof(uint32_t));
-    uint64_t cgen_var_1183;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1183, 1);
-    stream->write((uint64_t*)&cgen_var_1183, 1 * 8);
-    stream->write((VkFormat*)&local_format, sizeof(VkFormat));
-    stream->write((VkImageUsageFlags*)&local_imageUsage, sizeof(VkImageUsageFlags));
-    stream->write((int*)grallocUsage, sizeof(int));
-    AEMU_SCOPED_TRACE("vkGetSwapchainGrallocUsageANDROID readParams");
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetSwapchainGrallocUsageANDROID, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetSwapchainGrallocUsageANDROID, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkFormat*)&local_format, sizeof(VkFormat));
+    *streamPtrPtr += sizeof(VkFormat);
+    memcpy(*streamPtrPtr, (VkImageUsageFlags*)&local_imageUsage, sizeof(VkImageUsageFlags));
+    *streamPtrPtr += sizeof(VkImageUsageFlags);
+    memcpy(*streamPtrPtr, (int*)grallocUsage, sizeof(int));
+    *streamPtrPtr += sizeof(int);
     stream->read((int*)grallocUsage, sizeof(int));
-    AEMU_SCOPED_TRACE("vkGetSwapchainGrallocUsageANDROID returnUnmarshal");
     VkResult vkGetSwapchainGrallocUsageANDROID_VkResult_return = (VkResult)0;
     stream->read(&vkGetSwapchainGrallocUsageANDROID_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetSwapchainGrallocUsageANDROID");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetSwapchainGrallocUsageANDROID_VkResult_return;
 }
 
@@ -18116,16 +22530,14 @@
     VkImage image,
     int nativeFenceFd,
     VkSemaphore semaphore,
-    VkFence fence)
+    VkFence fence,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkAcquireImageANDROID encode");
-    mImpl->log("start vkAcquireImageANDROID");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkImage local_image;
     int local_nativeFenceFd;
@@ -18136,49 +22548,55 @@
     local_nativeFenceFd = nativeFenceFd;
     local_semaphore = semaphore;
     local_fence = fence;
-    mImpl->resources()->unwrap_vkAcquireImageANDROID_nativeFenceFd(nativeFenceFd, &local_nativeFenceFd);
-    countingStream->rewind();
+    sResourceTracker->unwrap_vkAcquireImageANDROID_nativeFenceFd(nativeFenceFd, &local_nativeFenceFd);
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1184;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1184, 1);
-        countingStream->write((uint64_t*)&cgen_var_1184, 1 * 8);
-        uint64_t cgen_var_1185;
-        countingStream->handleMapping()->mapHandles_VkImage_u64(&local_image, &cgen_var_1185, 1);
-        countingStream->write((uint64_t*)&cgen_var_1185, 1 * 8);
-        countingStream->write((int*)&local_nativeFenceFd, sizeof(int));
-        uint64_t cgen_var_1186;
-        countingStream->handleMapping()->mapHandles_VkSemaphore_u64(&local_semaphore, &cgen_var_1186, 1);
-        countingStream->write((uint64_t*)&cgen_var_1186, 1 * 8);
-        uint64_t cgen_var_1187;
-        countingStream->handleMapping()->mapHandles_VkFence_u64(&local_fence, &cgen_var_1187, 1);
-        countingStream->write((uint64_t*)&cgen_var_1187, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(int);
+        uint64_t cgen_var_2;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_3;
+        *countPtr += 1 * 8;
     }
-    uint32_t packetSize_vkAcquireImageANDROID = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkAcquireImageANDROID = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkAcquireImageANDROID);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkAcquireImageANDROID = OP_vkAcquireImageANDROID;
-    stream->write(&opcode_vkAcquireImageANDROID, sizeof(uint32_t));
-    stream->write(&packetSize_vkAcquireImageANDROID, sizeof(uint32_t));
-    uint64_t cgen_var_1188;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1188, 1);
-    stream->write((uint64_t*)&cgen_var_1188, 1 * 8);
-    uint64_t cgen_var_1189;
-    stream->handleMapping()->mapHandles_VkImage_u64(&local_image, &cgen_var_1189, 1);
-    stream->write((uint64_t*)&cgen_var_1189, 1 * 8);
-    stream->write((int*)&local_nativeFenceFd, sizeof(int));
-    uint64_t cgen_var_1190;
-    stream->handleMapping()->mapHandles_VkSemaphore_u64(&local_semaphore, &cgen_var_1190, 1);
-    stream->write((uint64_t*)&cgen_var_1190, 1 * 8);
-    uint64_t cgen_var_1191;
-    stream->handleMapping()->mapHandles_VkFence_u64(&local_fence, &cgen_var_1191, 1);
-    stream->write((uint64_t*)&cgen_var_1191, 1 * 8);
-    AEMU_SCOPED_TRACE("vkAcquireImageANDROID readParams");
-    AEMU_SCOPED_TRACE("vkAcquireImageANDROID returnUnmarshal");
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkAcquireImageANDROID, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkAcquireImageANDROID, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkImage((*&local_image));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (int*)&local_nativeFenceFd, sizeof(int));
+    *streamPtrPtr += sizeof(int);
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = get_host_u64_VkSemaphore((*&local_semaphore));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_3;
+    *&cgen_var_3 = get_host_u64_VkFence((*&local_fence));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_3, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     VkResult vkAcquireImageANDROID_VkResult_return = (VkResult)0;
     stream->read(&vkAcquireImageANDROID_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkAcquireImageANDROID");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkAcquireImageANDROID_VkResult_return;
 }
 
@@ -18187,87 +22605,91 @@
     uint32_t waitSemaphoreCount,
     const VkSemaphore* pWaitSemaphores,
     VkImage image,
-    int* pNativeFenceFd)
+    int* pNativeFenceFd,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkQueueSignalReleaseImageANDROID encode");
-    mImpl->log("start vkQueueSignalReleaseImageANDROID");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkQueue local_queue;
     uint32_t local_waitSemaphoreCount;
     VkSemaphore* local_pWaitSemaphores;
     VkImage local_image;
     local_queue = queue;
     local_waitSemaphoreCount = waitSemaphoreCount;
-    local_pWaitSemaphores = nullptr;
-    if (pWaitSemaphores)
-    {
-        local_pWaitSemaphores = (VkSemaphore*)pool->dupArray(pWaitSemaphores, ((waitSemaphoreCount)) * sizeof(const VkSemaphore));
-    }
+    // Avoiding deepcopy for pWaitSemaphores
+    local_pWaitSemaphores = (VkSemaphore*)pWaitSemaphores;
     local_image = image;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1192;
-        countingStream->handleMapping()->mapHandles_VkQueue_u64(&local_queue, &cgen_var_1192, 1);
-        countingStream->write((uint64_t*)&cgen_var_1192, 1 * 8);
-        countingStream->write((uint32_t*)&local_waitSemaphoreCount, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
         // WARNING PTR CHECK
-        uint64_t cgen_var_1193 = (uint64_t)(uintptr_t)local_pWaitSemaphores;
-        countingStream->putBe64(cgen_var_1193);
+        *countPtr += 8;
         if (local_pWaitSemaphores)
         {
             if (((waitSemaphoreCount)))
             {
-                uint64_t* cgen_var_1194;
-                countingStream->alloc((void**)&cgen_var_1194, ((waitSemaphoreCount)) * 8);
-                countingStream->handleMapping()->mapHandles_VkSemaphore_u64(local_pWaitSemaphores, cgen_var_1194, ((waitSemaphoreCount)));
-                countingStream->write((uint64_t*)cgen_var_1194, ((waitSemaphoreCount)) * 8);
+                *countPtr += ((waitSemaphoreCount)) * 8;
             }
         }
-        uint64_t cgen_var_1195;
-        countingStream->handleMapping()->mapHandles_VkImage_u64(&local_image, &cgen_var_1195, 1);
-        countingStream->write((uint64_t*)&cgen_var_1195, 1 * 8);
-        countingStream->write((int*)pNativeFenceFd, sizeof(int));
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(int);
     }
-    uint32_t packetSize_vkQueueSignalReleaseImageANDROID = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkQueueSignalReleaseImageANDROID = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkQueueSignalReleaseImageANDROID);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkQueueSignalReleaseImageANDROID = OP_vkQueueSignalReleaseImageANDROID;
-    stream->write(&opcode_vkQueueSignalReleaseImageANDROID, sizeof(uint32_t));
-    stream->write(&packetSize_vkQueueSignalReleaseImageANDROID, sizeof(uint32_t));
-    uint64_t cgen_var_1196;
-    stream->handleMapping()->mapHandles_VkQueue_u64(&local_queue, &cgen_var_1196, 1);
-    stream->write((uint64_t*)&cgen_var_1196, 1 * 8);
-    stream->write((uint32_t*)&local_waitSemaphoreCount, sizeof(uint32_t));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkQueueSignalReleaseImageANDROID, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkQueueSignalReleaseImageANDROID, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkQueue((*&local_queue));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_waitSemaphoreCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
     // WARNING PTR CHECK
-    uint64_t cgen_var_1197 = (uint64_t)(uintptr_t)local_pWaitSemaphores;
-    stream->putBe64(cgen_var_1197);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pWaitSemaphores;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pWaitSemaphores)
     {
         if (((waitSemaphoreCount)))
         {
-            uint64_t* cgen_var_1198;
-            stream->alloc((void**)&cgen_var_1198, ((waitSemaphoreCount)) * 8);
-            stream->handleMapping()->mapHandles_VkSemaphore_u64(local_pWaitSemaphores, cgen_var_1198, ((waitSemaphoreCount)));
-            stream->write((uint64_t*)cgen_var_1198, ((waitSemaphoreCount)) * 8);
+            uint8_t* cgen_var_1_0_ptr = (uint8_t*)(*streamPtrPtr);
+            for (uint32_t k = 0; k < ((waitSemaphoreCount)); ++k)
+            {
+                uint64_t tmpval = get_host_u64_VkSemaphore(local_pWaitSemaphores[k]);
+                memcpy(cgen_var_1_0_ptr + k * 8, &tmpval, sizeof(uint64_t));
+            }
+            *streamPtrPtr += 8 * ((waitSemaphoreCount));
         }
     }
-    uint64_t cgen_var_1199;
-    stream->handleMapping()->mapHandles_VkImage_u64(&local_image, &cgen_var_1199, 1);
-    stream->write((uint64_t*)&cgen_var_1199, 1 * 8);
-    stream->write((int*)pNativeFenceFd, sizeof(int));
-    AEMU_SCOPED_TRACE("vkQueueSignalReleaseImageANDROID readParams");
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = get_host_u64_VkImage((*&local_image));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (int*)pNativeFenceFd, sizeof(int));
+    *streamPtrPtr += sizeof(int);
     stream->read((int*)pNativeFenceFd, sizeof(int));
-    AEMU_SCOPED_TRACE("vkQueueSignalReleaseImageANDROID returnUnmarshal");
     VkResult vkQueueSignalReleaseImageANDROID_VkResult_return = (VkResult)0;
     stream->read(&vkQueueSignalReleaseImageANDROID_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkQueueSignalReleaseImageANDROID");;
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkQueueSignalReleaseImageANDROID_VkResult_return;
 }
 
@@ -18277,16 +22699,14 @@
     VkInstance instance,
     const VkDebugReportCallbackCreateInfoEXT* pCreateInfo,
     const VkAllocationCallbacks* pAllocator,
-    VkDebugReportCallbackEXT* pCallback)
+    VkDebugReportCallbackEXT* pCallback,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCreateDebugReportCallbackEXT encode");
-    mImpl->log("start vkCreateDebugReportCallbackEXT");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkInstance local_instance;
     VkDebugReportCallbackCreateInfoEXT* local_pCreateInfo;
     VkAllocationCallbacks* local_pAllocator;
@@ -18295,90 +22715,94 @@
     if (pCreateInfo)
     {
         local_pCreateInfo = (VkDebugReportCallbackCreateInfoEXT*)pool->alloc(sizeof(const VkDebugReportCallbackCreateInfoEXT));
-        deepcopy_VkDebugReportCallbackCreateInfoEXT(pool, pCreateInfo, (VkDebugReportCallbackCreateInfoEXT*)(local_pCreateInfo));
+        deepcopy_VkDebugReportCallbackCreateInfoEXT(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, (VkDebugReportCallbackCreateInfoEXT*)(local_pCreateInfo));
     }
     local_pAllocator = nullptr;
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pCreateInfo)
     {
-        transform_tohost_VkDebugReportCallbackCreateInfoEXT(mImpl->resources(), (VkDebugReportCallbackCreateInfoEXT*)(local_pCreateInfo));
+        transform_tohost_VkDebugReportCallbackCreateInfoEXT(sResourceTracker, (VkDebugReportCallbackCreateInfoEXT*)(local_pCreateInfo));
     }
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1200;
-        countingStream->handleMapping()->mapHandles_VkInstance_u64(&local_instance, &cgen_var_1200, 1);
-        countingStream->write((uint64_t*)&cgen_var_1200, 1 * 8);
-        marshal_VkDebugReportCallbackCreateInfoEXT(countingStream, (VkDebugReportCallbackCreateInfoEXT*)(local_pCreateInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkDebugReportCallbackCreateInfoEXT(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDebugReportCallbackCreateInfoEXT*)(local_pCreateInfo), countPtr);
         // WARNING PTR CHECK
-        uint64_t cgen_var_1201 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_1201);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
-        uint64_t cgen_var_1202;
-        countingStream->handleMapping()->mapHandles_VkDebugReportCallbackEXT_u64(pCallback, &cgen_var_1202, 1);
-        countingStream->write((uint64_t*)&cgen_var_1202, 8);
+        uint64_t cgen_var_1;
+        *countPtr += 8;
     }
-    uint32_t packetSize_vkCreateDebugReportCallbackEXT = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCreateDebugReportCallbackEXT = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreateDebugReportCallbackEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCreateDebugReportCallbackEXT = OP_vkCreateDebugReportCallbackEXT;
-    stream->write(&opcode_vkCreateDebugReportCallbackEXT, sizeof(uint32_t));
-    stream->write(&packetSize_vkCreateDebugReportCallbackEXT, sizeof(uint32_t));
-    uint64_t cgen_var_1203;
-    stream->handleMapping()->mapHandles_VkInstance_u64(&local_instance, &cgen_var_1203, 1);
-    stream->write((uint64_t*)&cgen_var_1203, 1 * 8);
-    marshal_VkDebugReportCallbackCreateInfoEXT(stream, (VkDebugReportCallbackCreateInfoEXT*)(local_pCreateInfo));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreateDebugReportCallbackEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreateDebugReportCallbackEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkInstance((*&local_instance));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkDebugReportCallbackCreateInfoEXT(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDebugReportCallbackCreateInfoEXT*)(local_pCreateInfo), streamPtrPtr);
     // WARNING PTR CHECK
-    uint64_t cgen_var_1204 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_1204);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
-    uint64_t cgen_var_1205;
-    stream->handleMapping()->mapHandles_VkDebugReportCallbackEXT_u64(pCallback, &cgen_var_1205, 1);
-    stream->write((uint64_t*)&cgen_var_1205, 8);
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkCreateDebugReportCallbackEXT readParams");
-    stream->setHandleMapping(resources->createMapping());
-    uint64_t cgen_var_1206;
-    stream->read((uint64_t*)&cgen_var_1206, 8);
-    stream->handleMapping()->mapHandles_u64_VkDebugReportCallbackEXT(&cgen_var_1206, (VkDebugReportCallbackEXT*)pCallback, 1);
+    /* is handle, possibly out */;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = (uint64_t)((*pCallback));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    stream->setHandleMapping(sResourceTracker->createMapping());
+    uint64_t cgen_var_3;
+    stream->read((uint64_t*)&cgen_var_3, 8);
+    stream->handleMapping()->mapHandles_u64_VkDebugReportCallbackEXT(&cgen_var_3, (VkDebugReportCallbackEXT*)pCallback, 1);
     stream->unsetHandleMapping();
-    AEMU_SCOPED_TRACE("vkCreateDebugReportCallbackEXT returnUnmarshal");
     VkResult vkCreateDebugReportCallbackEXT_VkResult_return = (VkResult)0;
     stream->read(&vkCreateDebugReportCallbackEXT_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkCreateDebugReportCallbackEXT");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkCreateDebugReportCallbackEXT_VkResult_return;
 }
 
 void VkEncoder::vkDestroyDebugReportCallbackEXT(
     VkInstance instance,
     VkDebugReportCallbackEXT callback,
-    const VkAllocationCallbacks* pAllocator)
+    const VkAllocationCallbacks* pAllocator,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkDestroyDebugReportCallbackEXT encode");
-    mImpl->log("start vkDestroyDebugReportCallbackEXT");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkInstance local_instance;
     VkDebugReportCallbackEXT local_callback;
     VkAllocationCallbacks* local_pAllocator;
@@ -18388,51 +22812,61 @@
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1207;
-        countingStream->handleMapping()->mapHandles_VkInstance_u64(&local_instance, &cgen_var_1207, 1);
-        countingStream->write((uint64_t*)&cgen_var_1207, 1 * 8);
-        uint64_t cgen_var_1208;
-        countingStream->handleMapping()->mapHandles_VkDebugReportCallbackEXT_u64(&local_callback, &cgen_var_1208, 1);
-        countingStream->write((uint64_t*)&cgen_var_1208, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_1209 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_1209);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
     }
-    uint32_t packetSize_vkDestroyDebugReportCallbackEXT = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkDestroyDebugReportCallbackEXT = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkDestroyDebugReportCallbackEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkDestroyDebugReportCallbackEXT = OP_vkDestroyDebugReportCallbackEXT;
-    stream->write(&opcode_vkDestroyDebugReportCallbackEXT, sizeof(uint32_t));
-    stream->write(&packetSize_vkDestroyDebugReportCallbackEXT, sizeof(uint32_t));
-    uint64_t cgen_var_1210;
-    stream->handleMapping()->mapHandles_VkInstance_u64(&local_instance, &cgen_var_1210, 1);
-    stream->write((uint64_t*)&cgen_var_1210, 1 * 8);
-    uint64_t cgen_var_1211;
-    stream->handleMapping()->mapHandles_VkDebugReportCallbackEXT_u64(&local_callback, &cgen_var_1211, 1);
-    stream->write((uint64_t*)&cgen_var_1211, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkDestroyDebugReportCallbackEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkDestroyDebugReportCallbackEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkInstance((*&local_instance));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkDebugReportCallbackEXT((*&local_callback));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_1212 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_1212);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    AEMU_SCOPED_TRACE("vkDestroyDebugReportCallbackEXT readParams");
-    AEMU_SCOPED_TRACE("vkDestroyDebugReportCallbackEXT returnUnmarshal");
-    resources->destroyMapping()->mapHandles_VkDebugReportCallbackEXT((VkDebugReportCallbackEXT*)&callback);
-    mImpl->log("finish vkDestroyDebugReportCallbackEXT");;
+    sResourceTracker->destroyMapping()->mapHandles_VkDebugReportCallbackEXT((VkDebugReportCallbackEXT*)&callback);
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkDebugReportMessageEXT(
@@ -18443,16 +22877,14 @@
     size_t location,
     int32_t messageCode,
     const char* pLayerPrefix,
-    const char* pMessage)
+    const char* pMessage,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkDebugReportMessageEXT encode");
-    mImpl->log("start vkDebugReportMessageEXT");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkInstance local_instance;
     VkDebugReportFlagsEXT local_flags;
     VkDebugReportObjectTypeEXT local_objectType;
@@ -18467,49 +22899,71 @@
     local_object = object;
     local_location = location;
     local_messageCode = messageCode;
-    local_pLayerPrefix = nullptr;
-    if (pLayerPrefix)
+    // Avoiding deepcopy for pLayerPrefix
+    local_pLayerPrefix = (char*)pLayerPrefix;
+    // Avoiding deepcopy for pMessage
+    local_pMessage = (char*)pMessage;
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        local_pLayerPrefix = pool->strDup(pLayerPrefix);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkDebugReportFlagsEXT);
+        *countPtr += sizeof(VkDebugReportObjectTypeEXT);
+        *countPtr += sizeof(uint64_t);
+        *countPtr += 8;
+        *countPtr += sizeof(int32_t);
+        *countPtr += sizeof(uint32_t) + (local_pLayerPrefix ? strlen(local_pLayerPrefix) : 0);
+        *countPtr += sizeof(uint32_t) + (local_pMessage ? strlen(local_pMessage) : 0);
     }
-    local_pMessage = nullptr;
-    if (pMessage)
-    {
-        local_pMessage = pool->strDup(pMessage);
-    }
-    countingStream->rewind();
-    {
-        uint64_t cgen_var_1213;
-        countingStream->handleMapping()->mapHandles_VkInstance_u64(&local_instance, &cgen_var_1213, 1);
-        countingStream->write((uint64_t*)&cgen_var_1213, 1 * 8);
-        countingStream->write((VkDebugReportFlagsEXT*)&local_flags, sizeof(VkDebugReportFlagsEXT));
-        countingStream->write((VkDebugReportObjectTypeEXT*)&local_objectType, sizeof(VkDebugReportObjectTypeEXT));
-        countingStream->write((uint64_t*)&local_object, sizeof(uint64_t));
-        uint64_t cgen_var_1214 = (uint64_t)local_location;
-        countingStream->putBe64(cgen_var_1214);
-        countingStream->write((int32_t*)&local_messageCode, sizeof(int32_t));
-        countingStream->putString(local_pLayerPrefix);
-        countingStream->putString(local_pMessage);
-    }
-    uint32_t packetSize_vkDebugReportMessageEXT = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkDebugReportMessageEXT = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkDebugReportMessageEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkDebugReportMessageEXT = OP_vkDebugReportMessageEXT;
-    stream->write(&opcode_vkDebugReportMessageEXT, sizeof(uint32_t));
-    stream->write(&packetSize_vkDebugReportMessageEXT, sizeof(uint32_t));
-    uint64_t cgen_var_1215;
-    stream->handleMapping()->mapHandles_VkInstance_u64(&local_instance, &cgen_var_1215, 1);
-    stream->write((uint64_t*)&cgen_var_1215, 1 * 8);
-    stream->write((VkDebugReportFlagsEXT*)&local_flags, sizeof(VkDebugReportFlagsEXT));
-    stream->write((VkDebugReportObjectTypeEXT*)&local_objectType, sizeof(VkDebugReportObjectTypeEXT));
-    stream->write((uint64_t*)&local_object, sizeof(uint64_t));
-    uint64_t cgen_var_1216 = (uint64_t)local_location;
-    stream->putBe64(cgen_var_1216);
-    stream->write((int32_t*)&local_messageCode, sizeof(int32_t));
-    stream->putString(local_pLayerPrefix);
-    stream->putString(local_pMessage);
-    AEMU_SCOPED_TRACE("vkDebugReportMessageEXT readParams");
-    AEMU_SCOPED_TRACE("vkDebugReportMessageEXT returnUnmarshal");
-    mImpl->log("finish vkDebugReportMessageEXT");;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkDebugReportMessageEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkDebugReportMessageEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkInstance((*&local_instance));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkDebugReportFlagsEXT*)&local_flags, sizeof(VkDebugReportFlagsEXT));
+    *streamPtrPtr += sizeof(VkDebugReportFlagsEXT);
+    memcpy(*streamPtrPtr, (VkDebugReportObjectTypeEXT*)&local_objectType, sizeof(VkDebugReportObjectTypeEXT));
+    *streamPtrPtr += sizeof(VkDebugReportObjectTypeEXT);
+    memcpy(*streamPtrPtr, (uint64_t*)&local_object, sizeof(uint64_t));
+    *streamPtrPtr += sizeof(uint64_t);
+    uint64_t cgen_var_1 = (uint64_t)local_location;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    memcpy(*streamPtrPtr, (int32_t*)&local_messageCode, sizeof(int32_t));
+    *streamPtrPtr += sizeof(int32_t);
+    {
+        uint32_t l = local_pLayerPrefix ? strlen(local_pLayerPrefix): 0;
+        memcpy(*streamPtrPtr, (uint32_t*)&l, sizeof(uint32_t));
+        android::base::Stream::toBe32((uint8_t*)*streamPtrPtr);
+        *streamPtrPtr += sizeof(uint32_t);
+        memcpy(*streamPtrPtr, (char*)local_pLayerPrefix, l);
+        *streamPtrPtr += l;
+    }
+    {
+        uint32_t l = local_pMessage ? strlen(local_pMessage): 0;
+        memcpy(*streamPtrPtr, (uint32_t*)&l, sizeof(uint32_t));
+        android::base::Stream::toBe32((uint8_t*)*streamPtrPtr);
+        *streamPtrPtr += sizeof(uint32_t);
+        memcpy(*streamPtrPtr, (char*)local_pMessage, l);
+        *streamPtrPtr += l;
+    }
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 #endif
@@ -18528,16 +22982,14 @@
 #ifdef VK_EXT_debug_marker
 VkResult VkEncoder::vkDebugMarkerSetObjectTagEXT(
     VkDevice device,
-    const VkDebugMarkerObjectTagInfoEXT* pTagInfo)
+    const VkDebugMarkerObjectTagInfoEXT* pTagInfo,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkDebugMarkerSetObjectTagEXT encode");
-    mImpl->log("start vkDebugMarkerSetObjectTagEXT");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkDebugMarkerObjectTagInfoEXT* local_pTagInfo;
     local_device = device;
@@ -18545,51 +22997,54 @@
     if (pTagInfo)
     {
         local_pTagInfo = (VkDebugMarkerObjectTagInfoEXT*)pool->alloc(sizeof(const VkDebugMarkerObjectTagInfoEXT));
-        deepcopy_VkDebugMarkerObjectTagInfoEXT(pool, pTagInfo, (VkDebugMarkerObjectTagInfoEXT*)(local_pTagInfo));
+        deepcopy_VkDebugMarkerObjectTagInfoEXT(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pTagInfo, (VkDebugMarkerObjectTagInfoEXT*)(local_pTagInfo));
     }
     if (local_pTagInfo)
     {
-        transform_tohost_VkDebugMarkerObjectTagInfoEXT(mImpl->resources(), (VkDebugMarkerObjectTagInfoEXT*)(local_pTagInfo));
+        transform_tohost_VkDebugMarkerObjectTagInfoEXT(sResourceTracker, (VkDebugMarkerObjectTagInfoEXT*)(local_pTagInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1217;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1217, 1);
-        countingStream->write((uint64_t*)&cgen_var_1217, 1 * 8);
-        marshal_VkDebugMarkerObjectTagInfoEXT(countingStream, (VkDebugMarkerObjectTagInfoEXT*)(local_pTagInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkDebugMarkerObjectTagInfoEXT(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDebugMarkerObjectTagInfoEXT*)(local_pTagInfo), countPtr);
     }
-    uint32_t packetSize_vkDebugMarkerSetObjectTagEXT = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkDebugMarkerSetObjectTagEXT = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkDebugMarkerSetObjectTagEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkDebugMarkerSetObjectTagEXT = OP_vkDebugMarkerSetObjectTagEXT;
-    stream->write(&opcode_vkDebugMarkerSetObjectTagEXT, sizeof(uint32_t));
-    stream->write(&packetSize_vkDebugMarkerSetObjectTagEXT, sizeof(uint32_t));
-    uint64_t cgen_var_1218;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1218, 1);
-    stream->write((uint64_t*)&cgen_var_1218, 1 * 8);
-    marshal_VkDebugMarkerObjectTagInfoEXT(stream, (VkDebugMarkerObjectTagInfoEXT*)(local_pTagInfo));
-    AEMU_SCOPED_TRACE("vkDebugMarkerSetObjectTagEXT readParams");
-    AEMU_SCOPED_TRACE("vkDebugMarkerSetObjectTagEXT returnUnmarshal");
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkDebugMarkerSetObjectTagEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkDebugMarkerSetObjectTagEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkDebugMarkerObjectTagInfoEXT(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDebugMarkerObjectTagInfoEXT*)(local_pTagInfo), streamPtrPtr);
     VkResult vkDebugMarkerSetObjectTagEXT_VkResult_return = (VkResult)0;
     stream->read(&vkDebugMarkerSetObjectTagEXT_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkDebugMarkerSetObjectTagEXT");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkDebugMarkerSetObjectTagEXT_VkResult_return;
 }
 
 VkResult VkEncoder::vkDebugMarkerSetObjectNameEXT(
     VkDevice device,
-    const VkDebugMarkerObjectNameInfoEXT* pNameInfo)
+    const VkDebugMarkerObjectNameInfoEXT* pNameInfo,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkDebugMarkerSetObjectNameEXT encode");
-    mImpl->log("start vkDebugMarkerSetObjectNameEXT");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkDebugMarkerObjectNameInfoEXT* local_pNameInfo;
     local_device = device;
@@ -18597,51 +23052,54 @@
     if (pNameInfo)
     {
         local_pNameInfo = (VkDebugMarkerObjectNameInfoEXT*)pool->alloc(sizeof(const VkDebugMarkerObjectNameInfoEXT));
-        deepcopy_VkDebugMarkerObjectNameInfoEXT(pool, pNameInfo, (VkDebugMarkerObjectNameInfoEXT*)(local_pNameInfo));
+        deepcopy_VkDebugMarkerObjectNameInfoEXT(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pNameInfo, (VkDebugMarkerObjectNameInfoEXT*)(local_pNameInfo));
     }
     if (local_pNameInfo)
     {
-        transform_tohost_VkDebugMarkerObjectNameInfoEXT(mImpl->resources(), (VkDebugMarkerObjectNameInfoEXT*)(local_pNameInfo));
+        transform_tohost_VkDebugMarkerObjectNameInfoEXT(sResourceTracker, (VkDebugMarkerObjectNameInfoEXT*)(local_pNameInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1219;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1219, 1);
-        countingStream->write((uint64_t*)&cgen_var_1219, 1 * 8);
-        marshal_VkDebugMarkerObjectNameInfoEXT(countingStream, (VkDebugMarkerObjectNameInfoEXT*)(local_pNameInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkDebugMarkerObjectNameInfoEXT(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDebugMarkerObjectNameInfoEXT*)(local_pNameInfo), countPtr);
     }
-    uint32_t packetSize_vkDebugMarkerSetObjectNameEXT = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkDebugMarkerSetObjectNameEXT = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkDebugMarkerSetObjectNameEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkDebugMarkerSetObjectNameEXT = OP_vkDebugMarkerSetObjectNameEXT;
-    stream->write(&opcode_vkDebugMarkerSetObjectNameEXT, sizeof(uint32_t));
-    stream->write(&packetSize_vkDebugMarkerSetObjectNameEXT, sizeof(uint32_t));
-    uint64_t cgen_var_1220;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1220, 1);
-    stream->write((uint64_t*)&cgen_var_1220, 1 * 8);
-    marshal_VkDebugMarkerObjectNameInfoEXT(stream, (VkDebugMarkerObjectNameInfoEXT*)(local_pNameInfo));
-    AEMU_SCOPED_TRACE("vkDebugMarkerSetObjectNameEXT readParams");
-    AEMU_SCOPED_TRACE("vkDebugMarkerSetObjectNameEXT returnUnmarshal");
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkDebugMarkerSetObjectNameEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkDebugMarkerSetObjectNameEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkDebugMarkerObjectNameInfoEXT(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDebugMarkerObjectNameInfoEXT*)(local_pNameInfo), streamPtrPtr);
     VkResult vkDebugMarkerSetObjectNameEXT_VkResult_return = (VkResult)0;
     stream->read(&vkDebugMarkerSetObjectNameEXT_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkDebugMarkerSetObjectNameEXT");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkDebugMarkerSetObjectNameEXT_VkResult_return;
 }
 
 void VkEncoder::vkCmdDebugMarkerBeginEXT(
     VkCommandBuffer commandBuffer,
-    const VkDebugMarkerMarkerInfoEXT* pMarkerInfo)
+    const VkDebugMarkerMarkerInfoEXT* pMarkerInfo,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdDebugMarkerBeginEXT encode");
-    mImpl->log("start vkCmdDebugMarkerBeginEXT");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     VkDebugMarkerMarkerInfoEXT* local_pMarkerInfo;
     local_commandBuffer = commandBuffer;
@@ -18649,77 +23107,93 @@
     if (pMarkerInfo)
     {
         local_pMarkerInfo = (VkDebugMarkerMarkerInfoEXT*)pool->alloc(sizeof(const VkDebugMarkerMarkerInfoEXT));
-        deepcopy_VkDebugMarkerMarkerInfoEXT(pool, pMarkerInfo, (VkDebugMarkerMarkerInfoEXT*)(local_pMarkerInfo));
+        deepcopy_VkDebugMarkerMarkerInfoEXT(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pMarkerInfo, (VkDebugMarkerMarkerInfoEXT*)(local_pMarkerInfo));
     }
     if (local_pMarkerInfo)
     {
-        transform_tohost_VkDebugMarkerMarkerInfoEXT(mImpl->resources(), (VkDebugMarkerMarkerInfoEXT*)(local_pMarkerInfo));
+        transform_tohost_VkDebugMarkerMarkerInfoEXT(sResourceTracker, (VkDebugMarkerMarkerInfoEXT*)(local_pMarkerInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1221;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1221, 1);
-        countingStream->write((uint64_t*)&cgen_var_1221, 1 * 8);
-        marshal_VkDebugMarkerMarkerInfoEXT(countingStream, (VkDebugMarkerMarkerInfoEXT*)(local_pMarkerInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkDebugMarkerMarkerInfoEXT(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDebugMarkerMarkerInfoEXT*)(local_pMarkerInfo), countPtr);
     }
-    uint32_t packetSize_vkCmdDebugMarkerBeginEXT = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdDebugMarkerBeginEXT = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdDebugMarkerBeginEXT -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdDebugMarkerBeginEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdDebugMarkerBeginEXT = OP_vkCmdDebugMarkerBeginEXT;
-    stream->write(&opcode_vkCmdDebugMarkerBeginEXT, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdDebugMarkerBeginEXT, sizeof(uint32_t));
-    uint64_t cgen_var_1222;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1222, 1);
-    stream->write((uint64_t*)&cgen_var_1222, 1 * 8);
-    marshal_VkDebugMarkerMarkerInfoEXT(stream, (VkDebugMarkerMarkerInfoEXT*)(local_pMarkerInfo));
-    AEMU_SCOPED_TRACE("vkCmdDebugMarkerBeginEXT readParams");
-    AEMU_SCOPED_TRACE("vkCmdDebugMarkerBeginEXT returnUnmarshal");
-    mImpl->log("finish vkCmdDebugMarkerBeginEXT");;
+    memcpy(streamPtr, &opcode_vkCmdDebugMarkerBeginEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdDebugMarkerBeginEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    reservedmarshal_VkDebugMarkerMarkerInfoEXT(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDebugMarkerMarkerInfoEXT*)(local_pMarkerInfo), streamPtrPtr);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdDebugMarkerEndEXT(
-    VkCommandBuffer commandBuffer)
+    VkCommandBuffer commandBuffer,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdDebugMarkerEndEXT encode");
-    mImpl->log("start vkCmdDebugMarkerEndEXT");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     local_commandBuffer = commandBuffer;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1223;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1223, 1);
-        countingStream->write((uint64_t*)&cgen_var_1223, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
     }
-    uint32_t packetSize_vkCmdDebugMarkerEndEXT = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdDebugMarkerEndEXT = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdDebugMarkerEndEXT -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdDebugMarkerEndEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdDebugMarkerEndEXT = OP_vkCmdDebugMarkerEndEXT;
-    stream->write(&opcode_vkCmdDebugMarkerEndEXT, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdDebugMarkerEndEXT, sizeof(uint32_t));
-    uint64_t cgen_var_1224;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1224, 1);
-    stream->write((uint64_t*)&cgen_var_1224, 1 * 8);
-    AEMU_SCOPED_TRACE("vkCmdDebugMarkerEndEXT readParams");
-    AEMU_SCOPED_TRACE("vkCmdDebugMarkerEndEXT returnUnmarshal");
-    mImpl->log("finish vkCmdDebugMarkerEndEXT");;
+    memcpy(streamPtr, &opcode_vkCmdDebugMarkerEndEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdDebugMarkerEndEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdDebugMarkerInsertEXT(
     VkCommandBuffer commandBuffer,
-    const VkDebugMarkerMarkerInfoEXT* pMarkerInfo)
+    const VkDebugMarkerMarkerInfoEXT* pMarkerInfo,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdDebugMarkerInsertEXT encode");
-    mImpl->log("start vkCmdDebugMarkerInsertEXT");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     VkDebugMarkerMarkerInfoEXT* local_pMarkerInfo;
     local_commandBuffer = commandBuffer;
@@ -18727,31 +23201,41 @@
     if (pMarkerInfo)
     {
         local_pMarkerInfo = (VkDebugMarkerMarkerInfoEXT*)pool->alloc(sizeof(const VkDebugMarkerMarkerInfoEXT));
-        deepcopy_VkDebugMarkerMarkerInfoEXT(pool, pMarkerInfo, (VkDebugMarkerMarkerInfoEXT*)(local_pMarkerInfo));
+        deepcopy_VkDebugMarkerMarkerInfoEXT(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pMarkerInfo, (VkDebugMarkerMarkerInfoEXT*)(local_pMarkerInfo));
     }
     if (local_pMarkerInfo)
     {
-        transform_tohost_VkDebugMarkerMarkerInfoEXT(mImpl->resources(), (VkDebugMarkerMarkerInfoEXT*)(local_pMarkerInfo));
+        transform_tohost_VkDebugMarkerMarkerInfoEXT(sResourceTracker, (VkDebugMarkerMarkerInfoEXT*)(local_pMarkerInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1225;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1225, 1);
-        countingStream->write((uint64_t*)&cgen_var_1225, 1 * 8);
-        marshal_VkDebugMarkerMarkerInfoEXT(countingStream, (VkDebugMarkerMarkerInfoEXT*)(local_pMarkerInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkDebugMarkerMarkerInfoEXT(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDebugMarkerMarkerInfoEXT*)(local_pMarkerInfo), countPtr);
     }
-    uint32_t packetSize_vkCmdDebugMarkerInsertEXT = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdDebugMarkerInsertEXT = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdDebugMarkerInsertEXT -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdDebugMarkerInsertEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdDebugMarkerInsertEXT = OP_vkCmdDebugMarkerInsertEXT;
-    stream->write(&opcode_vkCmdDebugMarkerInsertEXT, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdDebugMarkerInsertEXT, sizeof(uint32_t));
-    uint64_t cgen_var_1226;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1226, 1);
-    stream->write((uint64_t*)&cgen_var_1226, 1 * 8);
-    marshal_VkDebugMarkerMarkerInfoEXT(stream, (VkDebugMarkerMarkerInfoEXT*)(local_pMarkerInfo));
-    AEMU_SCOPED_TRACE("vkCmdDebugMarkerInsertEXT readParams");
-    AEMU_SCOPED_TRACE("vkCmdDebugMarkerInsertEXT returnUnmarshal");
-    mImpl->log("finish vkCmdDebugMarkerInsertEXT");;
+    memcpy(streamPtr, &opcode_vkCmdDebugMarkerInsertEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdDebugMarkerInsertEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    reservedmarshal_VkDebugMarkerMarkerInfoEXT(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDebugMarkerMarkerInfoEXT*)(local_pMarkerInfo), streamPtrPtr);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 #endif
@@ -18759,6 +23243,633 @@
 #endif
 #ifdef VK_NV_dedicated_allocation
 #endif
+#ifdef VK_EXT_transform_feedback
+void VkEncoder::vkCmdBindTransformFeedbackBuffersEXT(
+    VkCommandBuffer commandBuffer,
+    uint32_t firstBinding,
+    uint32_t bindingCount,
+    const VkBuffer* pBuffers,
+    const VkDeviceSize* pOffsets,
+    const VkDeviceSize* pSizes,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    uint32_t local_firstBinding;
+    uint32_t local_bindingCount;
+    VkBuffer* local_pBuffers;
+    VkDeviceSize* local_pOffsets;
+    VkDeviceSize* local_pSizes;
+    local_commandBuffer = commandBuffer;
+    local_firstBinding = firstBinding;
+    local_bindingCount = bindingCount;
+    // Avoiding deepcopy for pBuffers
+    local_pBuffers = (VkBuffer*)pBuffers;
+    // Avoiding deepcopy for pOffsets
+    local_pOffsets = (VkDeviceSize*)pOffsets;
+    // Avoiding deepcopy for pSizes
+    local_pSizes = (VkDeviceSize*)pSizes;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
+        if (((bindingCount)))
+        {
+            *countPtr += ((bindingCount)) * 8;
+        }
+        *countPtr += ((bindingCount)) * sizeof(VkDeviceSize);
+        // WARNING PTR CHECK
+        *countPtr += 8;
+        if (local_pSizes)
+        {
+            *countPtr += ((bindingCount)) * sizeof(VkDeviceSize);
+        }
+    }
+    uint32_t packetSize_vkCmdBindTransformFeedbackBuffersEXT = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdBindTransformFeedbackBuffersEXT -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdBindTransformFeedbackBuffersEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdBindTransformFeedbackBuffersEXT = OP_vkCmdBindTransformFeedbackBuffersEXT;
+    memcpy(streamPtr, &opcode_vkCmdBindTransformFeedbackBuffersEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdBindTransformFeedbackBuffersEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (uint32_t*)&local_firstBinding, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_bindingCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    if (((bindingCount)))
+    {
+        uint8_t* cgen_var_0_ptr = (uint8_t*)(*streamPtrPtr);
+        for (uint32_t k = 0; k < ((bindingCount)); ++k)
+        {
+            uint64_t tmpval = get_host_u64_VkBuffer(local_pBuffers[k]);
+            memcpy(cgen_var_0_ptr + k * 8, &tmpval, sizeof(uint64_t));
+        }
+        *streamPtrPtr += 8 * ((bindingCount));
+    }
+    memcpy(*streamPtrPtr, (VkDeviceSize*)local_pOffsets, ((bindingCount)) * sizeof(VkDeviceSize));
+    *streamPtrPtr += ((bindingCount)) * sizeof(VkDeviceSize);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pSizes;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    if (local_pSizes)
+    {
+        memcpy(*streamPtrPtr, (VkDeviceSize*)local_pSizes, ((bindingCount)) * sizeof(VkDeviceSize));
+        *streamPtrPtr += ((bindingCount)) * sizeof(VkDeviceSize);
+    }
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+void VkEncoder::vkCmdBeginTransformFeedbackEXT(
+    VkCommandBuffer commandBuffer,
+    uint32_t firstCounterBuffer,
+    uint32_t counterBufferCount,
+    const VkBuffer* pCounterBuffers,
+    const VkDeviceSize* pCounterBufferOffsets,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    uint32_t local_firstCounterBuffer;
+    uint32_t local_counterBufferCount;
+    VkBuffer* local_pCounterBuffers;
+    VkDeviceSize* local_pCounterBufferOffsets;
+    local_commandBuffer = commandBuffer;
+    local_firstCounterBuffer = firstCounterBuffer;
+    local_counterBufferCount = counterBufferCount;
+    // Avoiding deepcopy for pCounterBuffers
+    local_pCounterBuffers = (VkBuffer*)pCounterBuffers;
+    // Avoiding deepcopy for pCounterBufferOffsets
+    local_pCounterBufferOffsets = (VkDeviceSize*)pCounterBufferOffsets;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
+        // WARNING PTR CHECK
+        *countPtr += 8;
+        if (local_pCounterBuffers)
+        {
+            if (((counterBufferCount)))
+            {
+                *countPtr += ((counterBufferCount)) * 8;
+            }
+        }
+        // WARNING PTR CHECK
+        *countPtr += 8;
+        if (local_pCounterBufferOffsets)
+        {
+            *countPtr += ((counterBufferCount)) * sizeof(VkDeviceSize);
+        }
+    }
+    uint32_t packetSize_vkCmdBeginTransformFeedbackEXT = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdBeginTransformFeedbackEXT -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdBeginTransformFeedbackEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdBeginTransformFeedbackEXT = OP_vkCmdBeginTransformFeedbackEXT;
+    memcpy(streamPtr, &opcode_vkCmdBeginTransformFeedbackEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdBeginTransformFeedbackEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (uint32_t*)&local_firstCounterBuffer, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_counterBufferCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)local_pCounterBuffers;
+    memcpy((*streamPtrPtr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    if (local_pCounterBuffers)
+    {
+        if (((counterBufferCount)))
+        {
+            uint8_t* cgen_var_0_0_ptr = (uint8_t*)(*streamPtrPtr);
+            for (uint32_t k = 0; k < ((counterBufferCount)); ++k)
+            {
+                uint64_t tmpval = get_host_u64_VkBuffer(local_pCounterBuffers[k]);
+                memcpy(cgen_var_0_0_ptr + k * 8, &tmpval, sizeof(uint64_t));
+            }
+            *streamPtrPtr += 8 * ((counterBufferCount));
+        }
+    }
+    // WARNING PTR CHECK
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pCounterBufferOffsets;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    if (local_pCounterBufferOffsets)
+    {
+        memcpy(*streamPtrPtr, (VkDeviceSize*)local_pCounterBufferOffsets, ((counterBufferCount)) * sizeof(VkDeviceSize));
+        *streamPtrPtr += ((counterBufferCount)) * sizeof(VkDeviceSize);
+    }
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+void VkEncoder::vkCmdEndTransformFeedbackEXT(
+    VkCommandBuffer commandBuffer,
+    uint32_t firstCounterBuffer,
+    uint32_t counterBufferCount,
+    const VkBuffer* pCounterBuffers,
+    const VkDeviceSize* pCounterBufferOffsets,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    uint32_t local_firstCounterBuffer;
+    uint32_t local_counterBufferCount;
+    VkBuffer* local_pCounterBuffers;
+    VkDeviceSize* local_pCounterBufferOffsets;
+    local_commandBuffer = commandBuffer;
+    local_firstCounterBuffer = firstCounterBuffer;
+    local_counterBufferCount = counterBufferCount;
+    // Avoiding deepcopy for pCounterBuffers
+    local_pCounterBuffers = (VkBuffer*)pCounterBuffers;
+    // Avoiding deepcopy for pCounterBufferOffsets
+    local_pCounterBufferOffsets = (VkDeviceSize*)pCounterBufferOffsets;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
+        // WARNING PTR CHECK
+        *countPtr += 8;
+        if (local_pCounterBuffers)
+        {
+            if (((counterBufferCount)))
+            {
+                *countPtr += ((counterBufferCount)) * 8;
+            }
+        }
+        // WARNING PTR CHECK
+        *countPtr += 8;
+        if (local_pCounterBufferOffsets)
+        {
+            *countPtr += ((counterBufferCount)) * sizeof(VkDeviceSize);
+        }
+    }
+    uint32_t packetSize_vkCmdEndTransformFeedbackEXT = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdEndTransformFeedbackEXT -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdEndTransformFeedbackEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdEndTransformFeedbackEXT = OP_vkCmdEndTransformFeedbackEXT;
+    memcpy(streamPtr, &opcode_vkCmdEndTransformFeedbackEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdEndTransformFeedbackEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (uint32_t*)&local_firstCounterBuffer, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_counterBufferCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)local_pCounterBuffers;
+    memcpy((*streamPtrPtr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    if (local_pCounterBuffers)
+    {
+        if (((counterBufferCount)))
+        {
+            uint8_t* cgen_var_0_0_ptr = (uint8_t*)(*streamPtrPtr);
+            for (uint32_t k = 0; k < ((counterBufferCount)); ++k)
+            {
+                uint64_t tmpval = get_host_u64_VkBuffer(local_pCounterBuffers[k]);
+                memcpy(cgen_var_0_0_ptr + k * 8, &tmpval, sizeof(uint64_t));
+            }
+            *streamPtrPtr += 8 * ((counterBufferCount));
+        }
+    }
+    // WARNING PTR CHECK
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pCounterBufferOffsets;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    if (local_pCounterBufferOffsets)
+    {
+        memcpy(*streamPtrPtr, (VkDeviceSize*)local_pCounterBufferOffsets, ((counterBufferCount)) * sizeof(VkDeviceSize));
+        *streamPtrPtr += ((counterBufferCount)) * sizeof(VkDeviceSize);
+    }
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+void VkEncoder::vkCmdBeginQueryIndexedEXT(
+    VkCommandBuffer commandBuffer,
+    VkQueryPool queryPool,
+    uint32_t query,
+    VkQueryControlFlags flags,
+    uint32_t index,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    VkQueryPool local_queryPool;
+    uint32_t local_query;
+    VkQueryControlFlags local_flags;
+    uint32_t local_index;
+    local_commandBuffer = commandBuffer;
+    local_queryPool = queryPool;
+    local_query = query;
+    local_flags = flags;
+    local_index = index;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(VkQueryControlFlags);
+        *countPtr += sizeof(uint32_t);
+    }
+    uint32_t packetSize_vkCmdBeginQueryIndexedEXT = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdBeginQueryIndexedEXT -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdBeginQueryIndexedEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdBeginQueryIndexedEXT = OP_vkCmdBeginQueryIndexedEXT;
+    memcpy(streamPtr, &opcode_vkCmdBeginQueryIndexedEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdBeginQueryIndexedEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkQueryPool((*&local_queryPool));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_query, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (VkQueryControlFlags*)&local_flags, sizeof(VkQueryControlFlags));
+    *streamPtrPtr += sizeof(VkQueryControlFlags);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_index, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+void VkEncoder::vkCmdEndQueryIndexedEXT(
+    VkCommandBuffer commandBuffer,
+    VkQueryPool queryPool,
+    uint32_t query,
+    uint32_t index,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    VkQueryPool local_queryPool;
+    uint32_t local_query;
+    uint32_t local_index;
+    local_commandBuffer = commandBuffer;
+    local_queryPool = queryPool;
+    local_query = query;
+    local_index = index;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
+    }
+    uint32_t packetSize_vkCmdEndQueryIndexedEXT = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdEndQueryIndexedEXT -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdEndQueryIndexedEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdEndQueryIndexedEXT = OP_vkCmdEndQueryIndexedEXT;
+    memcpy(streamPtr, &opcode_vkCmdEndQueryIndexedEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdEndQueryIndexedEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkQueryPool((*&local_queryPool));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_query, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_index, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+void VkEncoder::vkCmdDrawIndirectByteCountEXT(
+    VkCommandBuffer commandBuffer,
+    uint32_t instanceCount,
+    uint32_t firstInstance,
+    VkBuffer counterBuffer,
+    VkDeviceSize counterBufferOffset,
+    uint32_t counterOffset,
+    uint32_t vertexStride,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    uint32_t local_instanceCount;
+    uint32_t local_firstInstance;
+    VkBuffer local_counterBuffer;
+    VkDeviceSize local_counterBufferOffset;
+    uint32_t local_counterOffset;
+    uint32_t local_vertexStride;
+    local_commandBuffer = commandBuffer;
+    local_instanceCount = instanceCount;
+    local_firstInstance = firstInstance;
+    local_counterBuffer = counterBuffer;
+    local_counterBufferOffset = counterBufferOffset;
+    local_counterOffset = counterOffset;
+    local_vertexStride = vertexStride;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkDeviceSize);
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
+    }
+    uint32_t packetSize_vkCmdDrawIndirectByteCountEXT = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdDrawIndirectByteCountEXT -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdDrawIndirectByteCountEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdDrawIndirectByteCountEXT = OP_vkCmdDrawIndirectByteCountEXT;
+    memcpy(streamPtr, &opcode_vkCmdDrawIndirectByteCountEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdDrawIndirectByteCountEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (uint32_t*)&local_instanceCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_firstInstance, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkBuffer((*&local_counterBuffer));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkDeviceSize*)&local_counterBufferOffset, sizeof(VkDeviceSize));
+    *streamPtrPtr += sizeof(VkDeviceSize);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_counterOffset, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_vertexStride, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+#endif
+#ifdef VK_NVX_image_view_handle
+uint32_t VkEncoder::vkGetImageViewHandleNVX(
+    VkDevice device,
+    const VkImageViewHandleInfoNVX* pInfo,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkImageViewHandleInfoNVX* local_pInfo;
+    local_device = device;
+    local_pInfo = nullptr;
+    if (pInfo)
+    {
+        local_pInfo = (VkImageViewHandleInfoNVX*)pool->alloc(sizeof(const VkImageViewHandleInfoNVX));
+        deepcopy_VkImageViewHandleInfoNVX(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pInfo, (VkImageViewHandleInfoNVX*)(local_pInfo));
+    }
+    if (local_pInfo)
+    {
+        transform_tohost_VkImageViewHandleInfoNVX(sResourceTracker, (VkImageViewHandleInfoNVX*)(local_pInfo));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkImageViewHandleInfoNVX(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImageViewHandleInfoNVX*)(local_pInfo), countPtr);
+    }
+    uint32_t packetSize_vkGetImageViewHandleNVX = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetImageViewHandleNVX);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkGetImageViewHandleNVX = OP_vkGetImageViewHandleNVX;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetImageViewHandleNVX, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetImageViewHandleNVX, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkImageViewHandleInfoNVX(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImageViewHandleInfoNVX*)(local_pInfo), streamPtrPtr);
+    uint32_t vkGetImageViewHandleNVX_uint32_t_return = (uint32_t)0;
+    stream->read(&vkGetImageViewHandleNVX_uint32_t_return, sizeof(uint32_t));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkGetImageViewHandleNVX_uint32_t_return;
+}
+
+VkResult VkEncoder::vkGetImageViewAddressNVX(
+    VkDevice device,
+    VkImageView imageView,
+    VkImageViewAddressPropertiesNVX* pProperties,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkImageView local_imageView;
+    local_device = device;
+    local_imageView = imageView;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        count_VkImageViewAddressPropertiesNVX(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImageViewAddressPropertiesNVX*)(pProperties), countPtr);
+    }
+    uint32_t packetSize_vkGetImageViewAddressNVX = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetImageViewAddressNVX);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkGetImageViewAddressNVX = OP_vkGetImageViewAddressNVX;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetImageViewAddressNVX, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetImageViewAddressNVX, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkImageView((*&local_imageView));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkImageViewAddressPropertiesNVX(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImageViewAddressPropertiesNVX*)(pProperties), streamPtrPtr);
+    unmarshal_VkImageViewAddressPropertiesNVX(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImageViewAddressPropertiesNVX*)(pProperties));
+    if (pProperties)
+    {
+        transform_fromhost_VkImageViewAddressPropertiesNVX(sResourceTracker, (VkImageViewAddressPropertiesNVX*)(pProperties));
+    }
+    VkResult vkGetImageViewAddressNVX_VkResult_return = (VkResult)0;
+    stream->read(&vkGetImageViewAddressNVX_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkGetImageViewAddressNVX_VkResult_return;
+}
+
+#endif
 #ifdef VK_AMD_draw_indirect_count
 void VkEncoder::vkCmdDrawIndirectCountAMD(
     VkCommandBuffer commandBuffer,
@@ -18767,16 +23878,14 @@
     VkBuffer countBuffer,
     VkDeviceSize countBufferOffset,
     uint32_t maxDrawCount,
-    uint32_t stride)
+    uint32_t stride,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdDrawIndirectCountAMD encode");
-    mImpl->log("start vkCmdDrawIndirectCountAMD");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     VkBuffer local_buffer;
     VkDeviceSize local_offset;
@@ -18791,43 +23900,57 @@
     local_countBufferOffset = countBufferOffset;
     local_maxDrawCount = maxDrawCount;
     local_stride = stride;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1227;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1227, 1);
-        countingStream->write((uint64_t*)&cgen_var_1227, 1 * 8);
-        uint64_t cgen_var_1228;
-        countingStream->handleMapping()->mapHandles_VkBuffer_u64(&local_buffer, &cgen_var_1228, 1);
-        countingStream->write((uint64_t*)&cgen_var_1228, 1 * 8);
-        countingStream->write((VkDeviceSize*)&local_offset, sizeof(VkDeviceSize));
-        uint64_t cgen_var_1229;
-        countingStream->handleMapping()->mapHandles_VkBuffer_u64(&local_countBuffer, &cgen_var_1229, 1);
-        countingStream->write((uint64_t*)&cgen_var_1229, 1 * 8);
-        countingStream->write((VkDeviceSize*)&local_countBufferOffset, sizeof(VkDeviceSize));
-        countingStream->write((uint32_t*)&local_maxDrawCount, sizeof(uint32_t));
-        countingStream->write((uint32_t*)&local_stride, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkDeviceSize);
+        uint64_t cgen_var_2;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkDeviceSize);
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
     }
-    uint32_t packetSize_vkCmdDrawIndirectCountAMD = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdDrawIndirectCountAMD = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdDrawIndirectCountAMD -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdDrawIndirectCountAMD);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdDrawIndirectCountAMD = OP_vkCmdDrawIndirectCountAMD;
-    stream->write(&opcode_vkCmdDrawIndirectCountAMD, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdDrawIndirectCountAMD, sizeof(uint32_t));
-    uint64_t cgen_var_1230;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1230, 1);
-    stream->write((uint64_t*)&cgen_var_1230, 1 * 8);
-    uint64_t cgen_var_1231;
-    stream->handleMapping()->mapHandles_VkBuffer_u64(&local_buffer, &cgen_var_1231, 1);
-    stream->write((uint64_t*)&cgen_var_1231, 1 * 8);
-    stream->write((VkDeviceSize*)&local_offset, sizeof(VkDeviceSize));
-    uint64_t cgen_var_1232;
-    stream->handleMapping()->mapHandles_VkBuffer_u64(&local_countBuffer, &cgen_var_1232, 1);
-    stream->write((uint64_t*)&cgen_var_1232, 1 * 8);
-    stream->write((VkDeviceSize*)&local_countBufferOffset, sizeof(VkDeviceSize));
-    stream->write((uint32_t*)&local_maxDrawCount, sizeof(uint32_t));
-    stream->write((uint32_t*)&local_stride, sizeof(uint32_t));
-    AEMU_SCOPED_TRACE("vkCmdDrawIndirectCountAMD readParams");
-    AEMU_SCOPED_TRACE("vkCmdDrawIndirectCountAMD returnUnmarshal");
-    mImpl->log("finish vkCmdDrawIndirectCountAMD");;
+    memcpy(streamPtr, &opcode_vkCmdDrawIndirectCountAMD, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdDrawIndirectCountAMD, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkBuffer((*&local_buffer));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkDeviceSize*)&local_offset, sizeof(VkDeviceSize));
+    *streamPtrPtr += sizeof(VkDeviceSize);
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkBuffer((*&local_countBuffer));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkDeviceSize*)&local_countBufferOffset, sizeof(VkDeviceSize));
+    *streamPtrPtr += sizeof(VkDeviceSize);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_maxDrawCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_stride, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdDrawIndexedIndirectCountAMD(
@@ -18837,16 +23960,14 @@
     VkBuffer countBuffer,
     VkDeviceSize countBufferOffset,
     uint32_t maxDrawCount,
-    uint32_t stride)
+    uint32_t stride,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdDrawIndexedIndirectCountAMD encode");
-    mImpl->log("start vkCmdDrawIndexedIndirectCountAMD");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     VkBuffer local_buffer;
     VkDeviceSize local_offset;
@@ -18861,43 +23982,57 @@
     local_countBufferOffset = countBufferOffset;
     local_maxDrawCount = maxDrawCount;
     local_stride = stride;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1233;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1233, 1);
-        countingStream->write((uint64_t*)&cgen_var_1233, 1 * 8);
-        uint64_t cgen_var_1234;
-        countingStream->handleMapping()->mapHandles_VkBuffer_u64(&local_buffer, &cgen_var_1234, 1);
-        countingStream->write((uint64_t*)&cgen_var_1234, 1 * 8);
-        countingStream->write((VkDeviceSize*)&local_offset, sizeof(VkDeviceSize));
-        uint64_t cgen_var_1235;
-        countingStream->handleMapping()->mapHandles_VkBuffer_u64(&local_countBuffer, &cgen_var_1235, 1);
-        countingStream->write((uint64_t*)&cgen_var_1235, 1 * 8);
-        countingStream->write((VkDeviceSize*)&local_countBufferOffset, sizeof(VkDeviceSize));
-        countingStream->write((uint32_t*)&local_maxDrawCount, sizeof(uint32_t));
-        countingStream->write((uint32_t*)&local_stride, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkDeviceSize);
+        uint64_t cgen_var_2;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkDeviceSize);
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
     }
-    uint32_t packetSize_vkCmdDrawIndexedIndirectCountAMD = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdDrawIndexedIndirectCountAMD = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdDrawIndexedIndirectCountAMD -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdDrawIndexedIndirectCountAMD);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdDrawIndexedIndirectCountAMD = OP_vkCmdDrawIndexedIndirectCountAMD;
-    stream->write(&opcode_vkCmdDrawIndexedIndirectCountAMD, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdDrawIndexedIndirectCountAMD, sizeof(uint32_t));
-    uint64_t cgen_var_1236;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1236, 1);
-    stream->write((uint64_t*)&cgen_var_1236, 1 * 8);
-    uint64_t cgen_var_1237;
-    stream->handleMapping()->mapHandles_VkBuffer_u64(&local_buffer, &cgen_var_1237, 1);
-    stream->write((uint64_t*)&cgen_var_1237, 1 * 8);
-    stream->write((VkDeviceSize*)&local_offset, sizeof(VkDeviceSize));
-    uint64_t cgen_var_1238;
-    stream->handleMapping()->mapHandles_VkBuffer_u64(&local_countBuffer, &cgen_var_1238, 1);
-    stream->write((uint64_t*)&cgen_var_1238, 1 * 8);
-    stream->write((VkDeviceSize*)&local_countBufferOffset, sizeof(VkDeviceSize));
-    stream->write((uint32_t*)&local_maxDrawCount, sizeof(uint32_t));
-    stream->write((uint32_t*)&local_stride, sizeof(uint32_t));
-    AEMU_SCOPED_TRACE("vkCmdDrawIndexedIndirectCountAMD readParams");
-    AEMU_SCOPED_TRACE("vkCmdDrawIndexedIndirectCountAMD returnUnmarshal");
-    mImpl->log("finish vkCmdDrawIndexedIndirectCountAMD");;
+    memcpy(streamPtr, &opcode_vkCmdDrawIndexedIndirectCountAMD, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdDrawIndexedIndirectCountAMD, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkBuffer((*&local_buffer));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkDeviceSize*)&local_offset, sizeof(VkDeviceSize));
+    *streamPtrPtr += sizeof(VkDeviceSize);
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkBuffer((*&local_countBuffer));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkDeviceSize*)&local_countBufferOffset, sizeof(VkDeviceSize));
+    *streamPtrPtr += sizeof(VkDeviceSize);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_maxDrawCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_stride, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 #endif
@@ -18916,16 +24051,14 @@
     VkShaderStageFlagBits shaderStage,
     VkShaderInfoTypeAMD infoType,
     size_t* pInfoSize,
-    void* pInfo)
+    void* pInfo,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetShaderInfoAMD encode");
-    mImpl->log("start vkGetShaderInfoAMD");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkPipeline local_pipeline;
     VkShaderStageFlagBits local_shaderStage;
@@ -18934,61 +24067,73 @@
     local_pipeline = pipeline;
     local_shaderStage = shaderStage;
     local_infoType = infoType;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1239;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1239, 1);
-        countingStream->write((uint64_t*)&cgen_var_1239, 1 * 8);
-        uint64_t cgen_var_1240;
-        countingStream->handleMapping()->mapHandles_VkPipeline_u64(&local_pipeline, &cgen_var_1240, 1);
-        countingStream->write((uint64_t*)&cgen_var_1240, 1 * 8);
-        countingStream->write((VkShaderStageFlagBits*)&local_shaderStage, sizeof(VkShaderStageFlagBits));
-        countingStream->write((VkShaderInfoTypeAMD*)&local_infoType, sizeof(VkShaderInfoTypeAMD));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkShaderStageFlagBits);
+        *countPtr += sizeof(VkShaderInfoTypeAMD);
         // WARNING PTR CHECK
-        uint64_t cgen_var_1241 = (uint64_t)(uintptr_t)pInfoSize;
-        countingStream->putBe64(cgen_var_1241);
+        *countPtr += 8;
         if (pInfoSize)
         {
-            uint64_t cgen_var_1242 = (uint64_t)(*pInfoSize);
-            countingStream->putBe64(cgen_var_1242);
+            *countPtr += 8;
         }
         // WARNING PTR CHECK
-        uint64_t cgen_var_1243 = (uint64_t)(uintptr_t)pInfo;
-        countingStream->putBe64(cgen_var_1243);
+        *countPtr += 8;
         if (pInfo)
         {
-            countingStream->write((void*)pInfo, (*(pInfoSize)) * sizeof(uint8_t));
+            if (pInfoSize)
+            {
+                *countPtr += (*(pInfoSize)) * sizeof(uint8_t);
+            }
         }
     }
-    uint32_t packetSize_vkGetShaderInfoAMD = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetShaderInfoAMD = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetShaderInfoAMD);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetShaderInfoAMD = OP_vkGetShaderInfoAMD;
-    stream->write(&opcode_vkGetShaderInfoAMD, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetShaderInfoAMD, sizeof(uint32_t));
-    uint64_t cgen_var_1244;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1244, 1);
-    stream->write((uint64_t*)&cgen_var_1244, 1 * 8);
-    uint64_t cgen_var_1245;
-    stream->handleMapping()->mapHandles_VkPipeline_u64(&local_pipeline, &cgen_var_1245, 1);
-    stream->write((uint64_t*)&cgen_var_1245, 1 * 8);
-    stream->write((VkShaderStageFlagBits*)&local_shaderStage, sizeof(VkShaderStageFlagBits));
-    stream->write((VkShaderInfoTypeAMD*)&local_infoType, sizeof(VkShaderInfoTypeAMD));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetShaderInfoAMD, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetShaderInfoAMD, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkPipeline((*&local_pipeline));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkShaderStageFlagBits*)&local_shaderStage, sizeof(VkShaderStageFlagBits));
+    *streamPtrPtr += sizeof(VkShaderStageFlagBits);
+    memcpy(*streamPtrPtr, (VkShaderInfoTypeAMD*)&local_infoType, sizeof(VkShaderInfoTypeAMD));
+    *streamPtrPtr += sizeof(VkShaderInfoTypeAMD);
     // WARNING PTR CHECK
-    uint64_t cgen_var_1246 = (uint64_t)(uintptr_t)pInfoSize;
-    stream->putBe64(cgen_var_1246);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)pInfoSize;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pInfoSize)
     {
-        uint64_t cgen_var_1247 = (uint64_t)(*pInfoSize);
-        stream->putBe64(cgen_var_1247);
+        uint64_t cgen_var_2_0 = (uint64_t)(*pInfoSize);
+        memcpy((*streamPtrPtr), &cgen_var_2_0, 8);
+        android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+        *streamPtrPtr += 8;
     }
     // WARNING PTR CHECK
-    uint64_t cgen_var_1248 = (uint64_t)(uintptr_t)pInfo;
-    stream->putBe64(cgen_var_1248);
+    uint64_t cgen_var_3 = (uint64_t)(uintptr_t)pInfo;
+    memcpy((*streamPtrPtr), &cgen_var_3, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pInfo)
     {
-        stream->write((void*)pInfo, (*(pInfoSize)) * sizeof(uint8_t));
+        memcpy(*streamPtrPtr, (void*)pInfo, (*(pInfoSize)) * sizeof(uint8_t));
+        *streamPtrPtr += (*(pInfoSize)) * sizeof(uint8_t);
     }
-    AEMU_SCOPED_TRACE("vkGetShaderInfoAMD readParams");
     // WARNING PTR CHECK
     size_t* check_pInfoSize;
     check_pInfoSize = (size_t*)(uintptr_t)stream->getBe64();
@@ -19011,19 +24156,120 @@
         }
         stream->read((void*)pInfo, (*(pInfoSize)) * sizeof(uint8_t));
     }
-    AEMU_SCOPED_TRACE("vkGetShaderInfoAMD returnUnmarshal");
     VkResult vkGetShaderInfoAMD_VkResult_return = (VkResult)0;
     stream->read(&vkGetShaderInfoAMD_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetShaderInfoAMD");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetShaderInfoAMD_VkResult_return;
 }
 
 #endif
 #ifdef VK_AMD_shader_image_load_store_lod
 #endif
+#ifdef VK_GGP_stream_descriptor_surface
+VkResult VkEncoder::vkCreateStreamDescriptorSurfaceGGP(
+    VkInstance instance,
+    const VkStreamDescriptorSurfaceCreateInfoGGP* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkSurfaceKHR* pSurface,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkInstance local_instance;
+    VkStreamDescriptorSurfaceCreateInfoGGP* local_pCreateInfo;
+    VkAllocationCallbacks* local_pAllocator;
+    local_instance = instance;
+    local_pCreateInfo = nullptr;
+    if (pCreateInfo)
+    {
+        local_pCreateInfo = (VkStreamDescriptorSurfaceCreateInfoGGP*)pool->alloc(sizeof(const VkStreamDescriptorSurfaceCreateInfoGGP));
+        deepcopy_VkStreamDescriptorSurfaceCreateInfoGGP(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, (VkStreamDescriptorSurfaceCreateInfoGGP*)(local_pCreateInfo));
+    }
+    local_pAllocator = nullptr;
+    if (pAllocator)
+    {
+        local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+    }
+    local_pAllocator = nullptr;
+    if (local_pCreateInfo)
+    {
+        transform_tohost_VkStreamDescriptorSurfaceCreateInfoGGP(sResourceTracker, (VkStreamDescriptorSurfaceCreateInfoGGP*)(local_pCreateInfo));
+    }
+    if (local_pAllocator)
+    {
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkStreamDescriptorSurfaceCreateInfoGGP(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkStreamDescriptorSurfaceCreateInfoGGP*)(local_pCreateInfo), countPtr);
+        // WARNING PTR CHECK
+        *countPtr += 8;
+        if (local_pAllocator)
+        {
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
+        }
+        uint64_t cgen_var_1;
+        *countPtr += 8;
+    }
+    uint32_t packetSize_vkCreateStreamDescriptorSurfaceGGP = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreateStreamDescriptorSurfaceGGP);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCreateStreamDescriptorSurfaceGGP = OP_vkCreateStreamDescriptorSurfaceGGP;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreateStreamDescriptorSurfaceGGP, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreateStreamDescriptorSurfaceGGP, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkInstance((*&local_instance));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkStreamDescriptorSurfaceCreateInfoGGP(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkStreamDescriptorSurfaceCreateInfoGGP*)(local_pCreateInfo), streamPtrPtr);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    if (local_pAllocator)
+    {
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
+    }
+    /* is handle, possibly out */;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = (uint64_t)((*pSurface));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    uint64_t cgen_var_3;
+    stream->read((uint64_t*)&cgen_var_3, 8);
+    stream->handleMapping()->mapHandles_u64_VkSurfaceKHR(&cgen_var_3, (VkSurfaceKHR*)pSurface, 1);
+    VkResult vkCreateStreamDescriptorSurfaceGGP_VkResult_return = (VkResult)0;
+    stream->read(&vkCreateStreamDescriptorSurfaceGGP_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkCreateStreamDescriptorSurfaceGGP_VkResult_return;
+}
+
+#endif
+#ifdef VK_NV_corner_sampled_image
+#endif
 #ifdef VK_IMG_format_pvrtc
 #endif
 #ifdef VK_NV_external_memory_capabilities
@@ -19035,16 +24281,14 @@
     VkImageUsageFlags usage,
     VkImageCreateFlags flags,
     VkExternalMemoryHandleTypeFlagsNV externalHandleType,
-    VkExternalImageFormatPropertiesNV* pExternalImageFormatProperties)
+    VkExternalImageFormatPropertiesNV* pExternalImageFormatProperties,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceExternalImageFormatPropertiesNV encode");
-    mImpl->log("start vkGetPhysicalDeviceExternalImageFormatPropertiesNV");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     VkFormat local_format;
     VkImageType local_type;
@@ -19059,47 +24303,58 @@
     local_usage = usage;
     local_flags = flags;
     local_externalHandleType = externalHandleType;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1252;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_1252, 1);
-        countingStream->write((uint64_t*)&cgen_var_1252, 1 * 8);
-        countingStream->write((VkFormat*)&local_format, sizeof(VkFormat));
-        countingStream->write((VkImageType*)&local_type, sizeof(VkImageType));
-        countingStream->write((VkImageTiling*)&local_tiling, sizeof(VkImageTiling));
-        countingStream->write((VkImageUsageFlags*)&local_usage, sizeof(VkImageUsageFlags));
-        countingStream->write((VkImageCreateFlags*)&local_flags, sizeof(VkImageCreateFlags));
-        countingStream->write((VkExternalMemoryHandleTypeFlagsNV*)&local_externalHandleType, sizeof(VkExternalMemoryHandleTypeFlagsNV));
-        marshal_VkExternalImageFormatPropertiesNV(countingStream, (VkExternalImageFormatPropertiesNV*)(pExternalImageFormatProperties));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkFormat);
+        *countPtr += sizeof(VkImageType);
+        *countPtr += sizeof(VkImageTiling);
+        *countPtr += sizeof(VkImageUsageFlags);
+        *countPtr += sizeof(VkImageCreateFlags);
+        *countPtr += sizeof(VkExternalMemoryHandleTypeFlagsNV);
+        count_VkExternalImageFormatPropertiesNV(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkExternalImageFormatPropertiesNV*)(pExternalImageFormatProperties), countPtr);
     }
-    uint32_t packetSize_vkGetPhysicalDeviceExternalImageFormatPropertiesNV = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetPhysicalDeviceExternalImageFormatPropertiesNV = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPhysicalDeviceExternalImageFormatPropertiesNV);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetPhysicalDeviceExternalImageFormatPropertiesNV = OP_vkGetPhysicalDeviceExternalImageFormatPropertiesNV;
-    stream->write(&opcode_vkGetPhysicalDeviceExternalImageFormatPropertiesNV, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetPhysicalDeviceExternalImageFormatPropertiesNV, sizeof(uint32_t));
-    uint64_t cgen_var_1253;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_1253, 1);
-    stream->write((uint64_t*)&cgen_var_1253, 1 * 8);
-    stream->write((VkFormat*)&local_format, sizeof(VkFormat));
-    stream->write((VkImageType*)&local_type, sizeof(VkImageType));
-    stream->write((VkImageTiling*)&local_tiling, sizeof(VkImageTiling));
-    stream->write((VkImageUsageFlags*)&local_usage, sizeof(VkImageUsageFlags));
-    stream->write((VkImageCreateFlags*)&local_flags, sizeof(VkImageCreateFlags));
-    stream->write((VkExternalMemoryHandleTypeFlagsNV*)&local_externalHandleType, sizeof(VkExternalMemoryHandleTypeFlagsNV));
-    marshal_VkExternalImageFormatPropertiesNV(stream, (VkExternalImageFormatPropertiesNV*)(pExternalImageFormatProperties));
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceExternalImageFormatPropertiesNV readParams");
-    unmarshal_VkExternalImageFormatPropertiesNV(stream, (VkExternalImageFormatPropertiesNV*)(pExternalImageFormatProperties));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPhysicalDeviceExternalImageFormatPropertiesNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPhysicalDeviceExternalImageFormatPropertiesNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkFormat*)&local_format, sizeof(VkFormat));
+    *streamPtrPtr += sizeof(VkFormat);
+    memcpy(*streamPtrPtr, (VkImageType*)&local_type, sizeof(VkImageType));
+    *streamPtrPtr += sizeof(VkImageType);
+    memcpy(*streamPtrPtr, (VkImageTiling*)&local_tiling, sizeof(VkImageTiling));
+    *streamPtrPtr += sizeof(VkImageTiling);
+    memcpy(*streamPtrPtr, (VkImageUsageFlags*)&local_usage, sizeof(VkImageUsageFlags));
+    *streamPtrPtr += sizeof(VkImageUsageFlags);
+    memcpy(*streamPtrPtr, (VkImageCreateFlags*)&local_flags, sizeof(VkImageCreateFlags));
+    *streamPtrPtr += sizeof(VkImageCreateFlags);
+    memcpy(*streamPtrPtr, (VkExternalMemoryHandleTypeFlagsNV*)&local_externalHandleType, sizeof(VkExternalMemoryHandleTypeFlagsNV));
+    *streamPtrPtr += sizeof(VkExternalMemoryHandleTypeFlagsNV);
+    reservedmarshal_VkExternalImageFormatPropertiesNV(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkExternalImageFormatPropertiesNV*)(pExternalImageFormatProperties), streamPtrPtr);
+    unmarshal_VkExternalImageFormatPropertiesNV(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkExternalImageFormatPropertiesNV*)(pExternalImageFormatProperties));
     if (pExternalImageFormatProperties)
     {
-        transform_fromhost_VkExternalImageFormatPropertiesNV(mImpl->resources(), (VkExternalImageFormatPropertiesNV*)(pExternalImageFormatProperties));
+        transform_fromhost_VkExternalImageFormatPropertiesNV(sResourceTracker, (VkExternalImageFormatPropertiesNV*)(pExternalImageFormatProperties));
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceExternalImageFormatPropertiesNV returnUnmarshal");
     VkResult vkGetPhysicalDeviceExternalImageFormatPropertiesNV_VkResult_return = (VkResult)0;
     stream->read(&vkGetPhysicalDeviceExternalImageFormatPropertiesNV_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetPhysicalDeviceExternalImageFormatPropertiesNV");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetPhysicalDeviceExternalImageFormatPropertiesNV_VkResult_return;
 }
 
@@ -19111,56 +24366,61 @@
     VkDevice device,
     VkDeviceMemory memory,
     VkExternalMemoryHandleTypeFlagsNV handleType,
-    HANDLE* pHandle)
+    HANDLE* pHandle,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetMemoryWin32HandleNV encode");
-    mImpl->log("start vkGetMemoryWin32HandleNV");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkDeviceMemory local_memory;
     VkExternalMemoryHandleTypeFlagsNV local_handleType;
     local_device = device;
     local_memory = memory;
     local_handleType = handleType;
-    mImpl->resources()->deviceMemoryTransform_tohost((VkDeviceMemory*)&local_memory, 1, (VkDeviceSize*)nullptr, 0, (VkDeviceSize*)nullptr, 0, (uint32_t*)nullptr, 0, (uint32_t*)nullptr, 0);
-    countingStream->rewind();
+    sResourceTracker->deviceMemoryTransform_tohost((VkDeviceMemory*)&local_memory, 1, (VkDeviceSize*)nullptr, 0, (VkDeviceSize*)nullptr, 0, (uint32_t*)nullptr, 0, (uint32_t*)nullptr, 0);
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1254;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1254, 1);
-        countingStream->write((uint64_t*)&cgen_var_1254, 1 * 8);
-        uint64_t cgen_var_1255;
-        countingStream->handleMapping()->mapHandles_VkDeviceMemory_u64(&local_memory, &cgen_var_1255, 1);
-        countingStream->write((uint64_t*)&cgen_var_1255, 1 * 8);
-        countingStream->write((VkExternalMemoryHandleTypeFlagsNV*)&local_handleType, sizeof(VkExternalMemoryHandleTypeFlagsNV));
-        countingStream->write((HANDLE*)pHandle, sizeof(HANDLE));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkExternalMemoryHandleTypeFlagsNV);
+        *countPtr += sizeof(HANDLE);
     }
-    uint32_t packetSize_vkGetMemoryWin32HandleNV = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetMemoryWin32HandleNV = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetMemoryWin32HandleNV);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetMemoryWin32HandleNV = OP_vkGetMemoryWin32HandleNV;
-    stream->write(&opcode_vkGetMemoryWin32HandleNV, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetMemoryWin32HandleNV, sizeof(uint32_t));
-    uint64_t cgen_var_1256;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1256, 1);
-    stream->write((uint64_t*)&cgen_var_1256, 1 * 8);
-    uint64_t cgen_var_1257;
-    stream->handleMapping()->mapHandles_VkDeviceMemory_u64(&local_memory, &cgen_var_1257, 1);
-    stream->write((uint64_t*)&cgen_var_1257, 1 * 8);
-    stream->write((VkExternalMemoryHandleTypeFlagsNV*)&local_handleType, sizeof(VkExternalMemoryHandleTypeFlagsNV));
-    stream->write((HANDLE*)pHandle, sizeof(HANDLE));
-    AEMU_SCOPED_TRACE("vkGetMemoryWin32HandleNV readParams");
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetMemoryWin32HandleNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetMemoryWin32HandleNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkDeviceMemory((*&local_memory));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkExternalMemoryHandleTypeFlagsNV*)&local_handleType, sizeof(VkExternalMemoryHandleTypeFlagsNV));
+    *streamPtrPtr += sizeof(VkExternalMemoryHandleTypeFlagsNV);
+    memcpy(*streamPtrPtr, (HANDLE*)pHandle, sizeof(HANDLE));
+    *streamPtrPtr += sizeof(HANDLE);
     stream->read((HANDLE*)pHandle, sizeof(HANDLE));
-    AEMU_SCOPED_TRACE("vkGetMemoryWin32HandleNV returnUnmarshal");
     VkResult vkGetMemoryWin32HandleNV_VkResult_return = (VkResult)0;
     stream->read(&vkGetMemoryWin32HandleNV_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetMemoryWin32HandleNV");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetMemoryWin32HandleNV_VkResult_return;
 }
 
@@ -19174,16 +24434,14 @@
     VkInstance instance,
     const VkViSurfaceCreateInfoNN* pCreateInfo,
     const VkAllocationCallbacks* pAllocator,
-    VkSurfaceKHR* pSurface)
+    VkSurfaceKHR* pSurface,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCreateViSurfaceNN encode");
-    mImpl->log("start vkCreateViSurfaceNN");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkInstance local_instance;
     VkViSurfaceCreateInfoNN* local_pCreateInfo;
     VkAllocationCallbacks* local_pAllocator;
@@ -19192,72 +24450,78 @@
     if (pCreateInfo)
     {
         local_pCreateInfo = (VkViSurfaceCreateInfoNN*)pool->alloc(sizeof(const VkViSurfaceCreateInfoNN));
-        deepcopy_VkViSurfaceCreateInfoNN(pool, pCreateInfo, (VkViSurfaceCreateInfoNN*)(local_pCreateInfo));
+        deepcopy_VkViSurfaceCreateInfoNN(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, (VkViSurfaceCreateInfoNN*)(local_pCreateInfo));
     }
     local_pAllocator = nullptr;
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pCreateInfo)
     {
-        transform_tohost_VkViSurfaceCreateInfoNN(mImpl->resources(), (VkViSurfaceCreateInfoNN*)(local_pCreateInfo));
+        transform_tohost_VkViSurfaceCreateInfoNN(sResourceTracker, (VkViSurfaceCreateInfoNN*)(local_pCreateInfo));
     }
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1258;
-        countingStream->handleMapping()->mapHandles_VkInstance_u64(&local_instance, &cgen_var_1258, 1);
-        countingStream->write((uint64_t*)&cgen_var_1258, 1 * 8);
-        marshal_VkViSurfaceCreateInfoNN(countingStream, (VkViSurfaceCreateInfoNN*)(local_pCreateInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkViSurfaceCreateInfoNN(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkViSurfaceCreateInfoNN*)(local_pCreateInfo), countPtr);
         // WARNING PTR CHECK
-        uint64_t cgen_var_1259 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_1259);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
-        uint64_t cgen_var_1260;
-        countingStream->handleMapping()->mapHandles_VkSurfaceKHR_u64(pSurface, &cgen_var_1260, 1);
-        countingStream->write((uint64_t*)&cgen_var_1260, 8);
+        uint64_t cgen_var_1;
+        *countPtr += 8;
     }
-    uint32_t packetSize_vkCreateViSurfaceNN = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCreateViSurfaceNN = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreateViSurfaceNN);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCreateViSurfaceNN = OP_vkCreateViSurfaceNN;
-    stream->write(&opcode_vkCreateViSurfaceNN, sizeof(uint32_t));
-    stream->write(&packetSize_vkCreateViSurfaceNN, sizeof(uint32_t));
-    uint64_t cgen_var_1261;
-    stream->handleMapping()->mapHandles_VkInstance_u64(&local_instance, &cgen_var_1261, 1);
-    stream->write((uint64_t*)&cgen_var_1261, 1 * 8);
-    marshal_VkViSurfaceCreateInfoNN(stream, (VkViSurfaceCreateInfoNN*)(local_pCreateInfo));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreateViSurfaceNN, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreateViSurfaceNN, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkInstance((*&local_instance));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkViSurfaceCreateInfoNN(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkViSurfaceCreateInfoNN*)(local_pCreateInfo), streamPtrPtr);
     // WARNING PTR CHECK
-    uint64_t cgen_var_1262 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_1262);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
-    uint64_t cgen_var_1263;
-    stream->handleMapping()->mapHandles_VkSurfaceKHR_u64(pSurface, &cgen_var_1263, 1);
-    stream->write((uint64_t*)&cgen_var_1263, 8);
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkCreateViSurfaceNN readParams");
-    uint64_t cgen_var_1264;
-    stream->read((uint64_t*)&cgen_var_1264, 8);
-    stream->handleMapping()->mapHandles_u64_VkSurfaceKHR(&cgen_var_1264, (VkSurfaceKHR*)pSurface, 1);
-    AEMU_SCOPED_TRACE("vkCreateViSurfaceNN returnUnmarshal");
+    /* is handle, possibly out */;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = (uint64_t)((*pSurface));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    uint64_t cgen_var_3;
+    stream->read((uint64_t*)&cgen_var_3, 8);
+    stream->handleMapping()->mapHandles_u64_VkSurfaceKHR(&cgen_var_3, (VkSurfaceKHR*)pSurface, 1);
     VkResult vkCreateViSurfaceNN_VkResult_return = (VkResult)0;
     stream->read(&vkCreateViSurfaceNN_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkCreateViSurfaceNN");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkCreateViSurfaceNN_VkResult_return;
 }
 
@@ -19266,19 +24530,21 @@
 #endif
 #ifdef VK_EXT_shader_subgroup_vote
 #endif
+#ifdef VK_EXT_texture_compression_astc_hdr
+#endif
+#ifdef VK_EXT_astc_decode_mode
+#endif
 #ifdef VK_EXT_conditional_rendering
 void VkEncoder::vkCmdBeginConditionalRenderingEXT(
     VkCommandBuffer commandBuffer,
-    const VkConditionalRenderingBeginInfoEXT* pConditionalRenderingBegin)
+    const VkConditionalRenderingBeginInfoEXT* pConditionalRenderingBegin,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdBeginConditionalRenderingEXT encode");
-    mImpl->log("start vkCmdBeginConditionalRenderingEXT");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     VkConditionalRenderingBeginInfoEXT* local_pConditionalRenderingBegin;
     local_commandBuffer = commandBuffer;
@@ -19286,666 +24552,81 @@
     if (pConditionalRenderingBegin)
     {
         local_pConditionalRenderingBegin = (VkConditionalRenderingBeginInfoEXT*)pool->alloc(sizeof(const VkConditionalRenderingBeginInfoEXT));
-        deepcopy_VkConditionalRenderingBeginInfoEXT(pool, pConditionalRenderingBegin, (VkConditionalRenderingBeginInfoEXT*)(local_pConditionalRenderingBegin));
+        deepcopy_VkConditionalRenderingBeginInfoEXT(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pConditionalRenderingBegin, (VkConditionalRenderingBeginInfoEXT*)(local_pConditionalRenderingBegin));
     }
     if (local_pConditionalRenderingBegin)
     {
-        transform_tohost_VkConditionalRenderingBeginInfoEXT(mImpl->resources(), (VkConditionalRenderingBeginInfoEXT*)(local_pConditionalRenderingBegin));
+        transform_tohost_VkConditionalRenderingBeginInfoEXT(sResourceTracker, (VkConditionalRenderingBeginInfoEXT*)(local_pConditionalRenderingBegin));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1265;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1265, 1);
-        countingStream->write((uint64_t*)&cgen_var_1265, 1 * 8);
-        marshal_VkConditionalRenderingBeginInfoEXT(countingStream, (VkConditionalRenderingBeginInfoEXT*)(local_pConditionalRenderingBegin));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkConditionalRenderingBeginInfoEXT(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkConditionalRenderingBeginInfoEXT*)(local_pConditionalRenderingBegin), countPtr);
     }
-    uint32_t packetSize_vkCmdBeginConditionalRenderingEXT = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdBeginConditionalRenderingEXT = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdBeginConditionalRenderingEXT -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdBeginConditionalRenderingEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdBeginConditionalRenderingEXT = OP_vkCmdBeginConditionalRenderingEXT;
-    stream->write(&opcode_vkCmdBeginConditionalRenderingEXT, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdBeginConditionalRenderingEXT, sizeof(uint32_t));
-    uint64_t cgen_var_1266;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1266, 1);
-    stream->write((uint64_t*)&cgen_var_1266, 1 * 8);
-    marshal_VkConditionalRenderingBeginInfoEXT(stream, (VkConditionalRenderingBeginInfoEXT*)(local_pConditionalRenderingBegin));
-    AEMU_SCOPED_TRACE("vkCmdBeginConditionalRenderingEXT readParams");
-    AEMU_SCOPED_TRACE("vkCmdBeginConditionalRenderingEXT returnUnmarshal");
-    mImpl->log("finish vkCmdBeginConditionalRenderingEXT");;
+    memcpy(streamPtr, &opcode_vkCmdBeginConditionalRenderingEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdBeginConditionalRenderingEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    reservedmarshal_VkConditionalRenderingBeginInfoEXT(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkConditionalRenderingBeginInfoEXT*)(local_pConditionalRenderingBegin), streamPtrPtr);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdEndConditionalRenderingEXT(
-    VkCommandBuffer commandBuffer)
+    VkCommandBuffer commandBuffer,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdEndConditionalRenderingEXT encode");
-    mImpl->log("start vkCmdEndConditionalRenderingEXT");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     local_commandBuffer = commandBuffer;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1267;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1267, 1);
-        countingStream->write((uint64_t*)&cgen_var_1267, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
     }
-    uint32_t packetSize_vkCmdEndConditionalRenderingEXT = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdEndConditionalRenderingEXT = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdEndConditionalRenderingEXT -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdEndConditionalRenderingEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdEndConditionalRenderingEXT = OP_vkCmdEndConditionalRenderingEXT;
-    stream->write(&opcode_vkCmdEndConditionalRenderingEXT, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdEndConditionalRenderingEXT, sizeof(uint32_t));
-    uint64_t cgen_var_1268;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1268, 1);
-    stream->write((uint64_t*)&cgen_var_1268, 1 * 8);
-    AEMU_SCOPED_TRACE("vkCmdEndConditionalRenderingEXT readParams");
-    AEMU_SCOPED_TRACE("vkCmdEndConditionalRenderingEXT returnUnmarshal");
-    mImpl->log("finish vkCmdEndConditionalRenderingEXT");;
-}
-
-#endif
-#ifdef VK_NVX_device_generated_commands
-void VkEncoder::vkCmdProcessCommandsNVX(
-    VkCommandBuffer commandBuffer,
-    const VkCmdProcessCommandsInfoNVX* pProcessCommandsInfo)
-{
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdProcessCommandsNVX encode");
-    mImpl->log("start vkCmdProcessCommandsNVX");
-    auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
-    auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
-    VkCommandBuffer local_commandBuffer;
-    VkCmdProcessCommandsInfoNVX* local_pProcessCommandsInfo;
-    local_commandBuffer = commandBuffer;
-    local_pProcessCommandsInfo = nullptr;
-    if (pProcessCommandsInfo)
+    memcpy(streamPtr, &opcode_vkCmdEndConditionalRenderingEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdEndConditionalRenderingEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
     {
-        local_pProcessCommandsInfo = (VkCmdProcessCommandsInfoNVX*)pool->alloc(sizeof(const VkCmdProcessCommandsInfoNVX));
-        deepcopy_VkCmdProcessCommandsInfoNVX(pool, pProcessCommandsInfo, (VkCmdProcessCommandsInfoNVX*)(local_pProcessCommandsInfo));
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
     }
-    if (local_pProcessCommandsInfo)
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
     {
-        transform_tohost_VkCmdProcessCommandsInfoNVX(mImpl->resources(), (VkCmdProcessCommandsInfoNVX*)(local_pProcessCommandsInfo));
+        pool->freeAll();
+        stream->clearPool();
     }
-    countingStream->rewind();
-    {
-        uint64_t cgen_var_1269;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1269, 1);
-        countingStream->write((uint64_t*)&cgen_var_1269, 1 * 8);
-        marshal_VkCmdProcessCommandsInfoNVX(countingStream, (VkCmdProcessCommandsInfoNVX*)(local_pProcessCommandsInfo));
-    }
-    uint32_t packetSize_vkCmdProcessCommandsNVX = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
-    uint32_t opcode_vkCmdProcessCommandsNVX = OP_vkCmdProcessCommandsNVX;
-    stream->write(&opcode_vkCmdProcessCommandsNVX, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdProcessCommandsNVX, sizeof(uint32_t));
-    uint64_t cgen_var_1270;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1270, 1);
-    stream->write((uint64_t*)&cgen_var_1270, 1 * 8);
-    marshal_VkCmdProcessCommandsInfoNVX(stream, (VkCmdProcessCommandsInfoNVX*)(local_pProcessCommandsInfo));
-    AEMU_SCOPED_TRACE("vkCmdProcessCommandsNVX readParams");
-    AEMU_SCOPED_TRACE("vkCmdProcessCommandsNVX returnUnmarshal");
-    mImpl->log("finish vkCmdProcessCommandsNVX");;
-}
-
-void VkEncoder::vkCmdReserveSpaceForCommandsNVX(
-    VkCommandBuffer commandBuffer,
-    const VkCmdReserveSpaceForCommandsInfoNVX* pReserveSpaceInfo)
-{
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdReserveSpaceForCommandsNVX encode");
-    mImpl->log("start vkCmdReserveSpaceForCommandsNVX");
-    auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
-    auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
-    VkCommandBuffer local_commandBuffer;
-    VkCmdReserveSpaceForCommandsInfoNVX* local_pReserveSpaceInfo;
-    local_commandBuffer = commandBuffer;
-    local_pReserveSpaceInfo = nullptr;
-    if (pReserveSpaceInfo)
-    {
-        local_pReserveSpaceInfo = (VkCmdReserveSpaceForCommandsInfoNVX*)pool->alloc(sizeof(const VkCmdReserveSpaceForCommandsInfoNVX));
-        deepcopy_VkCmdReserveSpaceForCommandsInfoNVX(pool, pReserveSpaceInfo, (VkCmdReserveSpaceForCommandsInfoNVX*)(local_pReserveSpaceInfo));
-    }
-    if (local_pReserveSpaceInfo)
-    {
-        transform_tohost_VkCmdReserveSpaceForCommandsInfoNVX(mImpl->resources(), (VkCmdReserveSpaceForCommandsInfoNVX*)(local_pReserveSpaceInfo));
-    }
-    countingStream->rewind();
-    {
-        uint64_t cgen_var_1271;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1271, 1);
-        countingStream->write((uint64_t*)&cgen_var_1271, 1 * 8);
-        marshal_VkCmdReserveSpaceForCommandsInfoNVX(countingStream, (VkCmdReserveSpaceForCommandsInfoNVX*)(local_pReserveSpaceInfo));
-    }
-    uint32_t packetSize_vkCmdReserveSpaceForCommandsNVX = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
-    uint32_t opcode_vkCmdReserveSpaceForCommandsNVX = OP_vkCmdReserveSpaceForCommandsNVX;
-    stream->write(&opcode_vkCmdReserveSpaceForCommandsNVX, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdReserveSpaceForCommandsNVX, sizeof(uint32_t));
-    uint64_t cgen_var_1272;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1272, 1);
-    stream->write((uint64_t*)&cgen_var_1272, 1 * 8);
-    marshal_VkCmdReserveSpaceForCommandsInfoNVX(stream, (VkCmdReserveSpaceForCommandsInfoNVX*)(local_pReserveSpaceInfo));
-    AEMU_SCOPED_TRACE("vkCmdReserveSpaceForCommandsNVX readParams");
-    AEMU_SCOPED_TRACE("vkCmdReserveSpaceForCommandsNVX returnUnmarshal");
-    mImpl->log("finish vkCmdReserveSpaceForCommandsNVX");;
-}
-
-VkResult VkEncoder::vkCreateIndirectCommandsLayoutNVX(
-    VkDevice device,
-    const VkIndirectCommandsLayoutCreateInfoNVX* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkIndirectCommandsLayoutNVX* pIndirectCommandsLayout)
-{
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCreateIndirectCommandsLayoutNVX encode");
-    mImpl->log("start vkCreateIndirectCommandsLayoutNVX");
-    auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
-    auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
-    VkDevice local_device;
-    VkIndirectCommandsLayoutCreateInfoNVX* local_pCreateInfo;
-    VkAllocationCallbacks* local_pAllocator;
-    local_device = device;
-    local_pCreateInfo = nullptr;
-    if (pCreateInfo)
-    {
-        local_pCreateInfo = (VkIndirectCommandsLayoutCreateInfoNVX*)pool->alloc(sizeof(const VkIndirectCommandsLayoutCreateInfoNVX));
-        deepcopy_VkIndirectCommandsLayoutCreateInfoNVX(pool, pCreateInfo, (VkIndirectCommandsLayoutCreateInfoNVX*)(local_pCreateInfo));
-    }
-    local_pAllocator = nullptr;
-    if (pAllocator)
-    {
-        local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
-    }
-    local_pAllocator = nullptr;
-    if (local_pCreateInfo)
-    {
-        transform_tohost_VkIndirectCommandsLayoutCreateInfoNVX(mImpl->resources(), (VkIndirectCommandsLayoutCreateInfoNVX*)(local_pCreateInfo));
-    }
-    if (local_pAllocator)
-    {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
-    }
-    countingStream->rewind();
-    {
-        uint64_t cgen_var_1273;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1273, 1);
-        countingStream->write((uint64_t*)&cgen_var_1273, 1 * 8);
-        marshal_VkIndirectCommandsLayoutCreateInfoNVX(countingStream, (VkIndirectCommandsLayoutCreateInfoNVX*)(local_pCreateInfo));
-        // WARNING PTR CHECK
-        uint64_t cgen_var_1274 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_1274);
-        if (local_pAllocator)
-        {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
-        }
-        uint64_t cgen_var_1275;
-        countingStream->handleMapping()->mapHandles_VkIndirectCommandsLayoutNVX_u64(pIndirectCommandsLayout, &cgen_var_1275, 1);
-        countingStream->write((uint64_t*)&cgen_var_1275, 8);
-    }
-    uint32_t packetSize_vkCreateIndirectCommandsLayoutNVX = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
-    uint32_t opcode_vkCreateIndirectCommandsLayoutNVX = OP_vkCreateIndirectCommandsLayoutNVX;
-    stream->write(&opcode_vkCreateIndirectCommandsLayoutNVX, sizeof(uint32_t));
-    stream->write(&packetSize_vkCreateIndirectCommandsLayoutNVX, sizeof(uint32_t));
-    uint64_t cgen_var_1276;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1276, 1);
-    stream->write((uint64_t*)&cgen_var_1276, 1 * 8);
-    marshal_VkIndirectCommandsLayoutCreateInfoNVX(stream, (VkIndirectCommandsLayoutCreateInfoNVX*)(local_pCreateInfo));
-    // WARNING PTR CHECK
-    uint64_t cgen_var_1277 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_1277);
-    if (local_pAllocator)
-    {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
-    }
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
-    uint64_t cgen_var_1278;
-    stream->handleMapping()->mapHandles_VkIndirectCommandsLayoutNVX_u64(pIndirectCommandsLayout, &cgen_var_1278, 1);
-    stream->write((uint64_t*)&cgen_var_1278, 8);
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkCreateIndirectCommandsLayoutNVX readParams");
-    stream->setHandleMapping(resources->createMapping());
-    uint64_t cgen_var_1279;
-    stream->read((uint64_t*)&cgen_var_1279, 8);
-    stream->handleMapping()->mapHandles_u64_VkIndirectCommandsLayoutNVX(&cgen_var_1279, (VkIndirectCommandsLayoutNVX*)pIndirectCommandsLayout, 1);
-    stream->unsetHandleMapping();
-    AEMU_SCOPED_TRACE("vkCreateIndirectCommandsLayoutNVX returnUnmarshal");
-    VkResult vkCreateIndirectCommandsLayoutNVX_VkResult_return = (VkResult)0;
-    stream->read(&vkCreateIndirectCommandsLayoutNVX_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkCreateIndirectCommandsLayoutNVX");;
-    return vkCreateIndirectCommandsLayoutNVX_VkResult_return;
-}
-
-void VkEncoder::vkDestroyIndirectCommandsLayoutNVX(
-    VkDevice device,
-    VkIndirectCommandsLayoutNVX indirectCommandsLayout,
-    const VkAllocationCallbacks* pAllocator)
-{
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkDestroyIndirectCommandsLayoutNVX encode");
-    mImpl->log("start vkDestroyIndirectCommandsLayoutNVX");
-    auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
-    auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
-    VkDevice local_device;
-    VkIndirectCommandsLayoutNVX local_indirectCommandsLayout;
-    VkAllocationCallbacks* local_pAllocator;
-    local_device = device;
-    local_indirectCommandsLayout = indirectCommandsLayout;
-    local_pAllocator = nullptr;
-    if (pAllocator)
-    {
-        local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
-    }
-    local_pAllocator = nullptr;
-    if (local_pAllocator)
-    {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
-    }
-    countingStream->rewind();
-    {
-        uint64_t cgen_var_1280;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1280, 1);
-        countingStream->write((uint64_t*)&cgen_var_1280, 1 * 8);
-        uint64_t cgen_var_1281;
-        countingStream->handleMapping()->mapHandles_VkIndirectCommandsLayoutNVX_u64(&local_indirectCommandsLayout, &cgen_var_1281, 1);
-        countingStream->write((uint64_t*)&cgen_var_1281, 1 * 8);
-        // WARNING PTR CHECK
-        uint64_t cgen_var_1282 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_1282);
-        if (local_pAllocator)
-        {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
-        }
-    }
-    uint32_t packetSize_vkDestroyIndirectCommandsLayoutNVX = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
-    uint32_t opcode_vkDestroyIndirectCommandsLayoutNVX = OP_vkDestroyIndirectCommandsLayoutNVX;
-    stream->write(&opcode_vkDestroyIndirectCommandsLayoutNVX, sizeof(uint32_t));
-    stream->write(&packetSize_vkDestroyIndirectCommandsLayoutNVX, sizeof(uint32_t));
-    uint64_t cgen_var_1283;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1283, 1);
-    stream->write((uint64_t*)&cgen_var_1283, 1 * 8);
-    uint64_t cgen_var_1284;
-    stream->handleMapping()->mapHandles_VkIndirectCommandsLayoutNVX_u64(&local_indirectCommandsLayout, &cgen_var_1284, 1);
-    stream->write((uint64_t*)&cgen_var_1284, 1 * 8);
-    // WARNING PTR CHECK
-    uint64_t cgen_var_1285 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_1285);
-    if (local_pAllocator)
-    {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
-    }
-    AEMU_SCOPED_TRACE("vkDestroyIndirectCommandsLayoutNVX readParams");
-    AEMU_SCOPED_TRACE("vkDestroyIndirectCommandsLayoutNVX returnUnmarshal");
-    resources->destroyMapping()->mapHandles_VkIndirectCommandsLayoutNVX((VkIndirectCommandsLayoutNVX*)&indirectCommandsLayout);
-    mImpl->log("finish vkDestroyIndirectCommandsLayoutNVX");;
-}
-
-VkResult VkEncoder::vkCreateObjectTableNVX(
-    VkDevice device,
-    const VkObjectTableCreateInfoNVX* pCreateInfo,
-    const VkAllocationCallbacks* pAllocator,
-    VkObjectTableNVX* pObjectTable)
-{
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCreateObjectTableNVX encode");
-    mImpl->log("start vkCreateObjectTableNVX");
-    auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
-    auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
-    VkDevice local_device;
-    VkObjectTableCreateInfoNVX* local_pCreateInfo;
-    VkAllocationCallbacks* local_pAllocator;
-    local_device = device;
-    local_pCreateInfo = nullptr;
-    if (pCreateInfo)
-    {
-        local_pCreateInfo = (VkObjectTableCreateInfoNVX*)pool->alloc(sizeof(const VkObjectTableCreateInfoNVX));
-        deepcopy_VkObjectTableCreateInfoNVX(pool, pCreateInfo, (VkObjectTableCreateInfoNVX*)(local_pCreateInfo));
-    }
-    local_pAllocator = nullptr;
-    if (pAllocator)
-    {
-        local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
-    }
-    local_pAllocator = nullptr;
-    if (local_pCreateInfo)
-    {
-        transform_tohost_VkObjectTableCreateInfoNVX(mImpl->resources(), (VkObjectTableCreateInfoNVX*)(local_pCreateInfo));
-    }
-    if (local_pAllocator)
-    {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
-    }
-    countingStream->rewind();
-    {
-        uint64_t cgen_var_1286;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1286, 1);
-        countingStream->write((uint64_t*)&cgen_var_1286, 1 * 8);
-        marshal_VkObjectTableCreateInfoNVX(countingStream, (VkObjectTableCreateInfoNVX*)(local_pCreateInfo));
-        // WARNING PTR CHECK
-        uint64_t cgen_var_1287 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_1287);
-        if (local_pAllocator)
-        {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
-        }
-        uint64_t cgen_var_1288;
-        countingStream->handleMapping()->mapHandles_VkObjectTableNVX_u64(pObjectTable, &cgen_var_1288, 1);
-        countingStream->write((uint64_t*)&cgen_var_1288, 8);
-    }
-    uint32_t packetSize_vkCreateObjectTableNVX = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
-    uint32_t opcode_vkCreateObjectTableNVX = OP_vkCreateObjectTableNVX;
-    stream->write(&opcode_vkCreateObjectTableNVX, sizeof(uint32_t));
-    stream->write(&packetSize_vkCreateObjectTableNVX, sizeof(uint32_t));
-    uint64_t cgen_var_1289;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1289, 1);
-    stream->write((uint64_t*)&cgen_var_1289, 1 * 8);
-    marshal_VkObjectTableCreateInfoNVX(stream, (VkObjectTableCreateInfoNVX*)(local_pCreateInfo));
-    // WARNING PTR CHECK
-    uint64_t cgen_var_1290 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_1290);
-    if (local_pAllocator)
-    {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
-    }
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
-    uint64_t cgen_var_1291;
-    stream->handleMapping()->mapHandles_VkObjectTableNVX_u64(pObjectTable, &cgen_var_1291, 1);
-    stream->write((uint64_t*)&cgen_var_1291, 8);
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkCreateObjectTableNVX readParams");
-    stream->setHandleMapping(resources->createMapping());
-    uint64_t cgen_var_1292;
-    stream->read((uint64_t*)&cgen_var_1292, 8);
-    stream->handleMapping()->mapHandles_u64_VkObjectTableNVX(&cgen_var_1292, (VkObjectTableNVX*)pObjectTable, 1);
-    stream->unsetHandleMapping();
-    AEMU_SCOPED_TRACE("vkCreateObjectTableNVX returnUnmarshal");
-    VkResult vkCreateObjectTableNVX_VkResult_return = (VkResult)0;
-    stream->read(&vkCreateObjectTableNVX_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkCreateObjectTableNVX");;
-    return vkCreateObjectTableNVX_VkResult_return;
-}
-
-void VkEncoder::vkDestroyObjectTableNVX(
-    VkDevice device,
-    VkObjectTableNVX objectTable,
-    const VkAllocationCallbacks* pAllocator)
-{
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkDestroyObjectTableNVX encode");
-    mImpl->log("start vkDestroyObjectTableNVX");
-    auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
-    auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
-    VkDevice local_device;
-    VkObjectTableNVX local_objectTable;
-    VkAllocationCallbacks* local_pAllocator;
-    local_device = device;
-    local_objectTable = objectTable;
-    local_pAllocator = nullptr;
-    if (pAllocator)
-    {
-        local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
-    }
-    local_pAllocator = nullptr;
-    if (local_pAllocator)
-    {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
-    }
-    countingStream->rewind();
-    {
-        uint64_t cgen_var_1293;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1293, 1);
-        countingStream->write((uint64_t*)&cgen_var_1293, 1 * 8);
-        uint64_t cgen_var_1294;
-        countingStream->handleMapping()->mapHandles_VkObjectTableNVX_u64(&local_objectTable, &cgen_var_1294, 1);
-        countingStream->write((uint64_t*)&cgen_var_1294, 1 * 8);
-        // WARNING PTR CHECK
-        uint64_t cgen_var_1295 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_1295);
-        if (local_pAllocator)
-        {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
-        }
-    }
-    uint32_t packetSize_vkDestroyObjectTableNVX = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
-    uint32_t opcode_vkDestroyObjectTableNVX = OP_vkDestroyObjectTableNVX;
-    stream->write(&opcode_vkDestroyObjectTableNVX, sizeof(uint32_t));
-    stream->write(&packetSize_vkDestroyObjectTableNVX, sizeof(uint32_t));
-    uint64_t cgen_var_1296;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1296, 1);
-    stream->write((uint64_t*)&cgen_var_1296, 1 * 8);
-    uint64_t cgen_var_1297;
-    stream->handleMapping()->mapHandles_VkObjectTableNVX_u64(&local_objectTable, &cgen_var_1297, 1);
-    stream->write((uint64_t*)&cgen_var_1297, 1 * 8);
-    // WARNING PTR CHECK
-    uint64_t cgen_var_1298 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_1298);
-    if (local_pAllocator)
-    {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
-    }
-    AEMU_SCOPED_TRACE("vkDestroyObjectTableNVX readParams");
-    AEMU_SCOPED_TRACE("vkDestroyObjectTableNVX returnUnmarshal");
-    resources->destroyMapping()->mapHandles_VkObjectTableNVX((VkObjectTableNVX*)&objectTable);
-    mImpl->log("finish vkDestroyObjectTableNVX");;
-}
-
-VkResult VkEncoder::vkRegisterObjectsNVX(
-    VkDevice device,
-    VkObjectTableNVX objectTable,
-    uint32_t objectCount,
-    const VkObjectTableEntryNVX* const* ppObjectTableEntries,
-    const uint32_t* pObjectIndices)
-{
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkRegisterObjectsNVX encode");
-    mImpl->log("start vkRegisterObjectsNVX");
-    auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
-    auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
-    VkDevice local_device;
-    VkObjectTableNVX local_objectTable;
-    uint32_t local_objectCount;
-    VkObjectTableEntryNVX** local_ppObjectTableEntries;
-    uint32_t* local_pObjectIndices;
-    local_device = device;
-    local_objectTable = objectTable;
-    local_objectCount = objectCount;
-    (void)ppObjectTableEntries;
-    local_pObjectIndices = nullptr;
-    if (pObjectIndices)
-    {
-        local_pObjectIndices = (uint32_t*)pool->dupArray(pObjectIndices, ((objectCount)) * sizeof(const uint32_t));
-    }
-    (void)local_ppObjectTableEntries;
-    countingStream->rewind();
-    {
-        uint64_t cgen_var_1299;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1299, 1);
-        countingStream->write((uint64_t*)&cgen_var_1299, 1 * 8);
-        uint64_t cgen_var_1300;
-        countingStream->handleMapping()->mapHandles_VkObjectTableNVX_u64(&local_objectTable, &cgen_var_1300, 1);
-        countingStream->write((uint64_t*)&cgen_var_1300, 1 * 8);
-        countingStream->write((uint32_t*)&local_objectCount, sizeof(uint32_t));
-        (void)local_ppObjectTableEntries;
-        countingStream->write((uint32_t*)local_pObjectIndices, ((objectCount)) * sizeof(uint32_t));
-    }
-    uint32_t packetSize_vkRegisterObjectsNVX = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
-    uint32_t opcode_vkRegisterObjectsNVX = OP_vkRegisterObjectsNVX;
-    stream->write(&opcode_vkRegisterObjectsNVX, sizeof(uint32_t));
-    stream->write(&packetSize_vkRegisterObjectsNVX, sizeof(uint32_t));
-    uint64_t cgen_var_1301;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1301, 1);
-    stream->write((uint64_t*)&cgen_var_1301, 1 * 8);
-    uint64_t cgen_var_1302;
-    stream->handleMapping()->mapHandles_VkObjectTableNVX_u64(&local_objectTable, &cgen_var_1302, 1);
-    stream->write((uint64_t*)&cgen_var_1302, 1 * 8);
-    stream->write((uint32_t*)&local_objectCount, sizeof(uint32_t));
-    (void)local_ppObjectTableEntries;
-    stream->write((uint32_t*)local_pObjectIndices, ((objectCount)) * sizeof(uint32_t));
-    AEMU_SCOPED_TRACE("vkRegisterObjectsNVX readParams");
-    AEMU_SCOPED_TRACE("vkRegisterObjectsNVX returnUnmarshal");
-    VkResult vkRegisterObjectsNVX_VkResult_return = (VkResult)0;
-    stream->read(&vkRegisterObjectsNVX_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkRegisterObjectsNVX");;
-    return vkRegisterObjectsNVX_VkResult_return;
-}
-
-VkResult VkEncoder::vkUnregisterObjectsNVX(
-    VkDevice device,
-    VkObjectTableNVX objectTable,
-    uint32_t objectCount,
-    const VkObjectEntryTypeNVX* pObjectEntryTypes,
-    const uint32_t* pObjectIndices)
-{
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkUnregisterObjectsNVX encode");
-    mImpl->log("start vkUnregisterObjectsNVX");
-    auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
-    auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
-    VkDevice local_device;
-    VkObjectTableNVX local_objectTable;
-    uint32_t local_objectCount;
-    VkObjectEntryTypeNVX* local_pObjectEntryTypes;
-    uint32_t* local_pObjectIndices;
-    local_device = device;
-    local_objectTable = objectTable;
-    local_objectCount = objectCount;
-    local_pObjectEntryTypes = nullptr;
-    if (pObjectEntryTypes)
-    {
-        local_pObjectEntryTypes = (VkObjectEntryTypeNVX*)pool->dupArray(pObjectEntryTypes, ((objectCount)) * sizeof(const VkObjectEntryTypeNVX));
-    }
-    local_pObjectIndices = nullptr;
-    if (pObjectIndices)
-    {
-        local_pObjectIndices = (uint32_t*)pool->dupArray(pObjectIndices, ((objectCount)) * sizeof(const uint32_t));
-    }
-    countingStream->rewind();
-    {
-        uint64_t cgen_var_1303;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1303, 1);
-        countingStream->write((uint64_t*)&cgen_var_1303, 1 * 8);
-        uint64_t cgen_var_1304;
-        countingStream->handleMapping()->mapHandles_VkObjectTableNVX_u64(&local_objectTable, &cgen_var_1304, 1);
-        countingStream->write((uint64_t*)&cgen_var_1304, 1 * 8);
-        countingStream->write((uint32_t*)&local_objectCount, sizeof(uint32_t));
-        countingStream->write((VkObjectEntryTypeNVX*)local_pObjectEntryTypes, ((objectCount)) * sizeof(VkObjectEntryTypeNVX));
-        countingStream->write((uint32_t*)local_pObjectIndices, ((objectCount)) * sizeof(uint32_t));
-    }
-    uint32_t packetSize_vkUnregisterObjectsNVX = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
-    uint32_t opcode_vkUnregisterObjectsNVX = OP_vkUnregisterObjectsNVX;
-    stream->write(&opcode_vkUnregisterObjectsNVX, sizeof(uint32_t));
-    stream->write(&packetSize_vkUnregisterObjectsNVX, sizeof(uint32_t));
-    uint64_t cgen_var_1305;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1305, 1);
-    stream->write((uint64_t*)&cgen_var_1305, 1 * 8);
-    uint64_t cgen_var_1306;
-    stream->handleMapping()->mapHandles_VkObjectTableNVX_u64(&local_objectTable, &cgen_var_1306, 1);
-    stream->write((uint64_t*)&cgen_var_1306, 1 * 8);
-    stream->write((uint32_t*)&local_objectCount, sizeof(uint32_t));
-    stream->write((VkObjectEntryTypeNVX*)local_pObjectEntryTypes, ((objectCount)) * sizeof(VkObjectEntryTypeNVX));
-    stream->write((uint32_t*)local_pObjectIndices, ((objectCount)) * sizeof(uint32_t));
-    AEMU_SCOPED_TRACE("vkUnregisterObjectsNVX readParams");
-    AEMU_SCOPED_TRACE("vkUnregisterObjectsNVX returnUnmarshal");
-    VkResult vkUnregisterObjectsNVX_VkResult_return = (VkResult)0;
-    stream->read(&vkUnregisterObjectsNVX_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkUnregisterObjectsNVX");;
-    return vkUnregisterObjectsNVX_VkResult_return;
-}
-
-void VkEncoder::vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX(
-    VkPhysicalDevice physicalDevice,
-    VkDeviceGeneratedCommandsFeaturesNVX* pFeatures,
-    VkDeviceGeneratedCommandsLimitsNVX* pLimits)
-{
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX encode");
-    mImpl->log("start vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX");
-    auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
-    auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
-    VkPhysicalDevice local_physicalDevice;
-    local_physicalDevice = physicalDevice;
-    countingStream->rewind();
-    {
-        uint64_t cgen_var_1307;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_1307, 1);
-        countingStream->write((uint64_t*)&cgen_var_1307, 1 * 8);
-        marshal_VkDeviceGeneratedCommandsFeaturesNVX(countingStream, (VkDeviceGeneratedCommandsFeaturesNVX*)(pFeatures));
-        marshal_VkDeviceGeneratedCommandsLimitsNVX(countingStream, (VkDeviceGeneratedCommandsLimitsNVX*)(pLimits));
-    }
-    uint32_t packetSize_vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
-    uint32_t opcode_vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX = OP_vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX;
-    stream->write(&opcode_vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX, sizeof(uint32_t));
-    uint64_t cgen_var_1308;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_1308, 1);
-    stream->write((uint64_t*)&cgen_var_1308, 1 * 8);
-    marshal_VkDeviceGeneratedCommandsFeaturesNVX(stream, (VkDeviceGeneratedCommandsFeaturesNVX*)(pFeatures));
-    marshal_VkDeviceGeneratedCommandsLimitsNVX(stream, (VkDeviceGeneratedCommandsLimitsNVX*)(pLimits));
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX readParams");
-    unmarshal_VkDeviceGeneratedCommandsFeaturesNVX(stream, (VkDeviceGeneratedCommandsFeaturesNVX*)(pFeatures));
-    if (pFeatures)
-    {
-        transform_fromhost_VkDeviceGeneratedCommandsFeaturesNVX(mImpl->resources(), (VkDeviceGeneratedCommandsFeaturesNVX*)(pFeatures));
-    }
-    unmarshal_VkDeviceGeneratedCommandsLimitsNVX(stream, (VkDeviceGeneratedCommandsLimitsNVX*)(pLimits));
-    if (pLimits)
-    {
-        transform_fromhost_VkDeviceGeneratedCommandsLimitsNVX(mImpl->resources(), (VkDeviceGeneratedCommandsLimitsNVX*)(pLimits));
-    }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX returnUnmarshal");
-    mImpl->log("finish vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX");;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 #endif
@@ -19954,16 +24635,14 @@
     VkCommandBuffer commandBuffer,
     uint32_t firstViewport,
     uint32_t viewportCount,
-    const VkViewportWScalingNV* pViewportWScalings)
+    const VkViewportWScalingNV* pViewportWScalings,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdSetViewportWScalingNV encode");
-    mImpl->log("start vkCmdSetViewportWScalingNV");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     uint32_t local_firstViewport;
     uint32_t local_viewportCount;
@@ -19977,93 +24656,108 @@
         local_pViewportWScalings = (VkViewportWScalingNV*)pool->alloc(((viewportCount)) * sizeof(const VkViewportWScalingNV));
         for (uint32_t i = 0; i < (uint32_t)((viewportCount)); ++i)
         {
-            deepcopy_VkViewportWScalingNV(pool, pViewportWScalings + i, (VkViewportWScalingNV*)(local_pViewportWScalings + i));
+            deepcopy_VkViewportWScalingNV(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pViewportWScalings + i, (VkViewportWScalingNV*)(local_pViewportWScalings + i));
         }
     }
     if (local_pViewportWScalings)
     {
         for (uint32_t i = 0; i < (uint32_t)((viewportCount)); ++i)
         {
-            transform_tohost_VkViewportWScalingNV(mImpl->resources(), (VkViewportWScalingNV*)(local_pViewportWScalings + i));
+            transform_tohost_VkViewportWScalingNV(sResourceTracker, (VkViewportWScalingNV*)(local_pViewportWScalings + i));
         }
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1309;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1309, 1);
-        countingStream->write((uint64_t*)&cgen_var_1309, 1 * 8);
-        countingStream->write((uint32_t*)&local_firstViewport, sizeof(uint32_t));
-        countingStream->write((uint32_t*)&local_viewportCount, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
         for (uint32_t i = 0; i < (uint32_t)((viewportCount)); ++i)
         {
-            marshal_VkViewportWScalingNV(countingStream, (VkViewportWScalingNV*)(local_pViewportWScalings + i));
+            count_VkViewportWScalingNV(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkViewportWScalingNV*)(local_pViewportWScalings + i), countPtr);
         }
     }
-    uint32_t packetSize_vkCmdSetViewportWScalingNV = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdSetViewportWScalingNV = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdSetViewportWScalingNV -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdSetViewportWScalingNV);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdSetViewportWScalingNV = OP_vkCmdSetViewportWScalingNV;
-    stream->write(&opcode_vkCmdSetViewportWScalingNV, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdSetViewportWScalingNV, sizeof(uint32_t));
-    uint64_t cgen_var_1310;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1310, 1);
-    stream->write((uint64_t*)&cgen_var_1310, 1 * 8);
-    stream->write((uint32_t*)&local_firstViewport, sizeof(uint32_t));
-    stream->write((uint32_t*)&local_viewportCount, sizeof(uint32_t));
+    memcpy(streamPtr, &opcode_vkCmdSetViewportWScalingNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdSetViewportWScalingNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (uint32_t*)&local_firstViewport, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_viewportCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
     for (uint32_t i = 0; i < (uint32_t)((viewportCount)); ++i)
     {
-        marshal_VkViewportWScalingNV(stream, (VkViewportWScalingNV*)(local_pViewportWScalings + i));
+        reservedmarshal_VkViewportWScalingNV(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkViewportWScalingNV*)(local_pViewportWScalings + i), streamPtrPtr);
     }
-    AEMU_SCOPED_TRACE("vkCmdSetViewportWScalingNV readParams");
-    AEMU_SCOPED_TRACE("vkCmdSetViewportWScalingNV returnUnmarshal");
-    mImpl->log("finish vkCmdSetViewportWScalingNV");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 #endif
 #ifdef VK_EXT_direct_mode_display
 VkResult VkEncoder::vkReleaseDisplayEXT(
     VkPhysicalDevice physicalDevice,
-    VkDisplayKHR display)
+    VkDisplayKHR display,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkReleaseDisplayEXT encode");
-    mImpl->log("start vkReleaseDisplayEXT");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     VkDisplayKHR local_display;
     local_physicalDevice = physicalDevice;
     local_display = display;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1311;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_1311, 1);
-        countingStream->write((uint64_t*)&cgen_var_1311, 1 * 8);
-        uint64_t cgen_var_1312;
-        countingStream->handleMapping()->mapHandles_VkDisplayKHR_u64(&local_display, &cgen_var_1312, 1);
-        countingStream->write((uint64_t*)&cgen_var_1312, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
     }
-    uint32_t packetSize_vkReleaseDisplayEXT = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkReleaseDisplayEXT = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkReleaseDisplayEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkReleaseDisplayEXT = OP_vkReleaseDisplayEXT;
-    stream->write(&opcode_vkReleaseDisplayEXT, sizeof(uint32_t));
-    stream->write(&packetSize_vkReleaseDisplayEXT, sizeof(uint32_t));
-    uint64_t cgen_var_1313;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_1313, 1);
-    stream->write((uint64_t*)&cgen_var_1313, 1 * 8);
-    uint64_t cgen_var_1314;
-    stream->handleMapping()->mapHandles_VkDisplayKHR_u64(&local_display, &cgen_var_1314, 1);
-    stream->write((uint64_t*)&cgen_var_1314, 1 * 8);
-    AEMU_SCOPED_TRACE("vkReleaseDisplayEXT readParams");
-    AEMU_SCOPED_TRACE("vkReleaseDisplayEXT returnUnmarshal");
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkReleaseDisplayEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkReleaseDisplayEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkDisplayKHR((*&local_display));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     VkResult vkReleaseDisplayEXT_VkResult_return = (VkResult)0;
     stream->read(&vkReleaseDisplayEXT_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkReleaseDisplayEXT");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkReleaseDisplayEXT_VkResult_return;
 }
 
@@ -20072,51 +24766,55 @@
 VkResult VkEncoder::vkAcquireXlibDisplayEXT(
     VkPhysicalDevice physicalDevice,
     Display* dpy,
-    VkDisplayKHR display)
+    VkDisplayKHR display,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkAcquireXlibDisplayEXT encode");
-    mImpl->log("start vkAcquireXlibDisplayEXT");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     VkDisplayKHR local_display;
     local_physicalDevice = physicalDevice;
     local_display = display;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1315;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_1315, 1);
-        countingStream->write((uint64_t*)&cgen_var_1315, 1 * 8);
-        countingStream->write((Display*)dpy, sizeof(Display));
-        uint64_t cgen_var_1316;
-        countingStream->handleMapping()->mapHandles_VkDisplayKHR_u64(&local_display, &cgen_var_1316, 1);
-        countingStream->write((uint64_t*)&cgen_var_1316, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(Display);
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
     }
-    uint32_t packetSize_vkAcquireXlibDisplayEXT = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkAcquireXlibDisplayEXT = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkAcquireXlibDisplayEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkAcquireXlibDisplayEXT = OP_vkAcquireXlibDisplayEXT;
-    stream->write(&opcode_vkAcquireXlibDisplayEXT, sizeof(uint32_t));
-    stream->write(&packetSize_vkAcquireXlibDisplayEXT, sizeof(uint32_t));
-    uint64_t cgen_var_1317;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_1317, 1);
-    stream->write((uint64_t*)&cgen_var_1317, 1 * 8);
-    stream->write((Display*)dpy, sizeof(Display));
-    uint64_t cgen_var_1318;
-    stream->handleMapping()->mapHandles_VkDisplayKHR_u64(&local_display, &cgen_var_1318, 1);
-    stream->write((uint64_t*)&cgen_var_1318, 1 * 8);
-    AEMU_SCOPED_TRACE("vkAcquireXlibDisplayEXT readParams");
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkAcquireXlibDisplayEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkAcquireXlibDisplayEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (Display*)dpy, sizeof(Display));
+    *streamPtrPtr += sizeof(Display);
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkDisplayKHR((*&local_display));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     stream->read((Display*)dpy, sizeof(Display));
-    AEMU_SCOPED_TRACE("vkAcquireXlibDisplayEXT returnUnmarshal");
     VkResult vkAcquireXlibDisplayEXT_VkResult_return = (VkResult)0;
     stream->read(&vkAcquireXlibDisplayEXT_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkAcquireXlibDisplayEXT");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkAcquireXlibDisplayEXT_VkResult_return;
 }
 
@@ -20124,58 +24822,63 @@
     VkPhysicalDevice physicalDevice,
     Display* dpy,
     RROutput rrOutput,
-    VkDisplayKHR* pDisplay)
+    VkDisplayKHR* pDisplay,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetRandROutputDisplayEXT encode");
-    mImpl->log("start vkGetRandROutputDisplayEXT");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     RROutput local_rrOutput;
     local_physicalDevice = physicalDevice;
     local_rrOutput = rrOutput;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1319;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_1319, 1);
-        countingStream->write((uint64_t*)&cgen_var_1319, 1 * 8);
-        countingStream->write((Display*)dpy, sizeof(Display));
-        countingStream->write((RROutput*)&local_rrOutput, sizeof(RROutput));
-        uint64_t cgen_var_1320;
-        countingStream->handleMapping()->mapHandles_VkDisplayKHR_u64(pDisplay, &cgen_var_1320, 1);
-        countingStream->write((uint64_t*)&cgen_var_1320, 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(Display);
+        *countPtr += sizeof(RROutput);
+        uint64_t cgen_var_1;
+        *countPtr += 8;
     }
-    uint32_t packetSize_vkGetRandROutputDisplayEXT = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetRandROutputDisplayEXT = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetRandROutputDisplayEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetRandROutputDisplayEXT = OP_vkGetRandROutputDisplayEXT;
-    stream->write(&opcode_vkGetRandROutputDisplayEXT, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetRandROutputDisplayEXT, sizeof(uint32_t));
-    uint64_t cgen_var_1321;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_1321, 1);
-    stream->write((uint64_t*)&cgen_var_1321, 1 * 8);
-    stream->write((Display*)dpy, sizeof(Display));
-    stream->write((RROutput*)&local_rrOutput, sizeof(RROutput));
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
-    uint64_t cgen_var_1322;
-    stream->handleMapping()->mapHandles_VkDisplayKHR_u64(pDisplay, &cgen_var_1322, 1);
-    stream->write((uint64_t*)&cgen_var_1322, 8);
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkGetRandROutputDisplayEXT readParams");
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetRandROutputDisplayEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetRandROutputDisplayEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (Display*)dpy, sizeof(Display));
+    *streamPtrPtr += sizeof(Display);
+    memcpy(*streamPtrPtr, (RROutput*)&local_rrOutput, sizeof(RROutput));
+    *streamPtrPtr += sizeof(RROutput);
+    /* is handle, possibly out */;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = (uint64_t)((*pDisplay));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
     stream->read((Display*)dpy, sizeof(Display));
-    uint64_t cgen_var_1323;
-    stream->read((uint64_t*)&cgen_var_1323, 8);
-    stream->handleMapping()->mapHandles_u64_VkDisplayKHR(&cgen_var_1323, (VkDisplayKHR*)pDisplay, 1);
-    AEMU_SCOPED_TRACE("vkGetRandROutputDisplayEXT returnUnmarshal");
+    uint64_t cgen_var_2;
+    stream->read((uint64_t*)&cgen_var_2, 8);
+    stream->handleMapping()->mapHandles_u64_VkDisplayKHR(&cgen_var_2, (VkDisplayKHR*)pDisplay, 1);
     VkResult vkGetRandROutputDisplayEXT_VkResult_return = (VkResult)0;
     stream->read(&vkGetRandROutputDisplayEXT_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetRandROutputDisplayEXT");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetRandROutputDisplayEXT_VkResult_return;
 }
 
@@ -20184,55 +24887,58 @@
 VkResult VkEncoder::vkGetPhysicalDeviceSurfaceCapabilities2EXT(
     VkPhysicalDevice physicalDevice,
     VkSurfaceKHR surface,
-    VkSurfaceCapabilities2EXT* pSurfaceCapabilities)
+    VkSurfaceCapabilities2EXT* pSurfaceCapabilities,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceSurfaceCapabilities2EXT encode");
-    mImpl->log("start vkGetPhysicalDeviceSurfaceCapabilities2EXT");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     VkSurfaceKHR local_surface;
     local_physicalDevice = physicalDevice;
     local_surface = surface;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1324;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_1324, 1);
-        countingStream->write((uint64_t*)&cgen_var_1324, 1 * 8);
-        uint64_t cgen_var_1325;
-        countingStream->handleMapping()->mapHandles_VkSurfaceKHR_u64(&local_surface, &cgen_var_1325, 1);
-        countingStream->write((uint64_t*)&cgen_var_1325, 1 * 8);
-        marshal_VkSurfaceCapabilities2EXT(countingStream, (VkSurfaceCapabilities2EXT*)(pSurfaceCapabilities));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        count_VkSurfaceCapabilities2EXT(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSurfaceCapabilities2EXT*)(pSurfaceCapabilities), countPtr);
     }
-    uint32_t packetSize_vkGetPhysicalDeviceSurfaceCapabilities2EXT = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetPhysicalDeviceSurfaceCapabilities2EXT = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPhysicalDeviceSurfaceCapabilities2EXT);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetPhysicalDeviceSurfaceCapabilities2EXT = OP_vkGetPhysicalDeviceSurfaceCapabilities2EXT;
-    stream->write(&opcode_vkGetPhysicalDeviceSurfaceCapabilities2EXT, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetPhysicalDeviceSurfaceCapabilities2EXT, sizeof(uint32_t));
-    uint64_t cgen_var_1326;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_1326, 1);
-    stream->write((uint64_t*)&cgen_var_1326, 1 * 8);
-    uint64_t cgen_var_1327;
-    stream->handleMapping()->mapHandles_VkSurfaceKHR_u64(&local_surface, &cgen_var_1327, 1);
-    stream->write((uint64_t*)&cgen_var_1327, 1 * 8);
-    marshal_VkSurfaceCapabilities2EXT(stream, (VkSurfaceCapabilities2EXT*)(pSurfaceCapabilities));
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceSurfaceCapabilities2EXT readParams");
-    unmarshal_VkSurfaceCapabilities2EXT(stream, (VkSurfaceCapabilities2EXT*)(pSurfaceCapabilities));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPhysicalDeviceSurfaceCapabilities2EXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPhysicalDeviceSurfaceCapabilities2EXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkSurfaceKHR((*&local_surface));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkSurfaceCapabilities2EXT(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSurfaceCapabilities2EXT*)(pSurfaceCapabilities), streamPtrPtr);
+    unmarshal_VkSurfaceCapabilities2EXT(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSurfaceCapabilities2EXT*)(pSurfaceCapabilities));
     if (pSurfaceCapabilities)
     {
-        transform_fromhost_VkSurfaceCapabilities2EXT(mImpl->resources(), (VkSurfaceCapabilities2EXT*)(pSurfaceCapabilities));
+        transform_fromhost_VkSurfaceCapabilities2EXT(sResourceTracker, (VkSurfaceCapabilities2EXT*)(pSurfaceCapabilities));
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceSurfaceCapabilities2EXT returnUnmarshal");
     VkResult vkGetPhysicalDeviceSurfaceCapabilities2EXT_VkResult_return = (VkResult)0;
     stream->read(&vkGetPhysicalDeviceSurfaceCapabilities2EXT_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetPhysicalDeviceSurfaceCapabilities2EXT");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetPhysicalDeviceSurfaceCapabilities2EXT_VkResult_return;
 }
 
@@ -20241,16 +24947,14 @@
 VkResult VkEncoder::vkDisplayPowerControlEXT(
     VkDevice device,
     VkDisplayKHR display,
-    const VkDisplayPowerInfoEXT* pDisplayPowerInfo)
+    const VkDisplayPowerInfoEXT* pDisplayPowerInfo,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkDisplayPowerControlEXT encode");
-    mImpl->log("start vkDisplayPowerControlEXT");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkDisplayKHR local_display;
     VkDisplayPowerInfoEXT* local_pDisplayPowerInfo;
@@ -20260,42 +24964,47 @@
     if (pDisplayPowerInfo)
     {
         local_pDisplayPowerInfo = (VkDisplayPowerInfoEXT*)pool->alloc(sizeof(const VkDisplayPowerInfoEXT));
-        deepcopy_VkDisplayPowerInfoEXT(pool, pDisplayPowerInfo, (VkDisplayPowerInfoEXT*)(local_pDisplayPowerInfo));
+        deepcopy_VkDisplayPowerInfoEXT(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pDisplayPowerInfo, (VkDisplayPowerInfoEXT*)(local_pDisplayPowerInfo));
     }
     if (local_pDisplayPowerInfo)
     {
-        transform_tohost_VkDisplayPowerInfoEXT(mImpl->resources(), (VkDisplayPowerInfoEXT*)(local_pDisplayPowerInfo));
+        transform_tohost_VkDisplayPowerInfoEXT(sResourceTracker, (VkDisplayPowerInfoEXT*)(local_pDisplayPowerInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1328;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1328, 1);
-        countingStream->write((uint64_t*)&cgen_var_1328, 1 * 8);
-        uint64_t cgen_var_1329;
-        countingStream->handleMapping()->mapHandles_VkDisplayKHR_u64(&local_display, &cgen_var_1329, 1);
-        countingStream->write((uint64_t*)&cgen_var_1329, 1 * 8);
-        marshal_VkDisplayPowerInfoEXT(countingStream, (VkDisplayPowerInfoEXT*)(local_pDisplayPowerInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        count_VkDisplayPowerInfoEXT(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDisplayPowerInfoEXT*)(local_pDisplayPowerInfo), countPtr);
     }
-    uint32_t packetSize_vkDisplayPowerControlEXT = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkDisplayPowerControlEXT = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkDisplayPowerControlEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkDisplayPowerControlEXT = OP_vkDisplayPowerControlEXT;
-    stream->write(&opcode_vkDisplayPowerControlEXT, sizeof(uint32_t));
-    stream->write(&packetSize_vkDisplayPowerControlEXT, sizeof(uint32_t));
-    uint64_t cgen_var_1330;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1330, 1);
-    stream->write((uint64_t*)&cgen_var_1330, 1 * 8);
-    uint64_t cgen_var_1331;
-    stream->handleMapping()->mapHandles_VkDisplayKHR_u64(&local_display, &cgen_var_1331, 1);
-    stream->write((uint64_t*)&cgen_var_1331, 1 * 8);
-    marshal_VkDisplayPowerInfoEXT(stream, (VkDisplayPowerInfoEXT*)(local_pDisplayPowerInfo));
-    AEMU_SCOPED_TRACE("vkDisplayPowerControlEXT readParams");
-    AEMU_SCOPED_TRACE("vkDisplayPowerControlEXT returnUnmarshal");
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkDisplayPowerControlEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkDisplayPowerControlEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkDisplayKHR((*&local_display));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkDisplayPowerInfoEXT(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDisplayPowerInfoEXT*)(local_pDisplayPowerInfo), streamPtrPtr);
     VkResult vkDisplayPowerControlEXT_VkResult_return = (VkResult)0;
     stream->read(&vkDisplayPowerControlEXT_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkDisplayPowerControlEXT");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkDisplayPowerControlEXT_VkResult_return;
 }
 
@@ -20303,16 +25012,14 @@
     VkDevice device,
     const VkDeviceEventInfoEXT* pDeviceEventInfo,
     const VkAllocationCallbacks* pAllocator,
-    VkFence* pFence)
+    VkFence* pFence,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkRegisterDeviceEventEXT encode");
-    mImpl->log("start vkRegisterDeviceEventEXT");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkDeviceEventInfoEXT* local_pDeviceEventInfo;
     VkAllocationCallbacks* local_pAllocator;
@@ -20321,72 +25028,78 @@
     if (pDeviceEventInfo)
     {
         local_pDeviceEventInfo = (VkDeviceEventInfoEXT*)pool->alloc(sizeof(const VkDeviceEventInfoEXT));
-        deepcopy_VkDeviceEventInfoEXT(pool, pDeviceEventInfo, (VkDeviceEventInfoEXT*)(local_pDeviceEventInfo));
+        deepcopy_VkDeviceEventInfoEXT(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pDeviceEventInfo, (VkDeviceEventInfoEXT*)(local_pDeviceEventInfo));
     }
     local_pAllocator = nullptr;
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pDeviceEventInfo)
     {
-        transform_tohost_VkDeviceEventInfoEXT(mImpl->resources(), (VkDeviceEventInfoEXT*)(local_pDeviceEventInfo));
+        transform_tohost_VkDeviceEventInfoEXT(sResourceTracker, (VkDeviceEventInfoEXT*)(local_pDeviceEventInfo));
     }
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1332;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1332, 1);
-        countingStream->write((uint64_t*)&cgen_var_1332, 1 * 8);
-        marshal_VkDeviceEventInfoEXT(countingStream, (VkDeviceEventInfoEXT*)(local_pDeviceEventInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkDeviceEventInfoEXT(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDeviceEventInfoEXT*)(local_pDeviceEventInfo), countPtr);
         // WARNING PTR CHECK
-        uint64_t cgen_var_1333 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_1333);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
-        uint64_t cgen_var_1334;
-        countingStream->handleMapping()->mapHandles_VkFence_u64(pFence, &cgen_var_1334, 1);
-        countingStream->write((uint64_t*)&cgen_var_1334, 8);
+        uint64_t cgen_var_1;
+        *countPtr += 8;
     }
-    uint32_t packetSize_vkRegisterDeviceEventEXT = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkRegisterDeviceEventEXT = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkRegisterDeviceEventEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkRegisterDeviceEventEXT = OP_vkRegisterDeviceEventEXT;
-    stream->write(&opcode_vkRegisterDeviceEventEXT, sizeof(uint32_t));
-    stream->write(&packetSize_vkRegisterDeviceEventEXT, sizeof(uint32_t));
-    uint64_t cgen_var_1335;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1335, 1);
-    stream->write((uint64_t*)&cgen_var_1335, 1 * 8);
-    marshal_VkDeviceEventInfoEXT(stream, (VkDeviceEventInfoEXT*)(local_pDeviceEventInfo));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkRegisterDeviceEventEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkRegisterDeviceEventEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkDeviceEventInfoEXT(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDeviceEventInfoEXT*)(local_pDeviceEventInfo), streamPtrPtr);
     // WARNING PTR CHECK
-    uint64_t cgen_var_1336 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_1336);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
-    uint64_t cgen_var_1337;
-    stream->handleMapping()->mapHandles_VkFence_u64(pFence, &cgen_var_1337, 1);
-    stream->write((uint64_t*)&cgen_var_1337, 8);
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkRegisterDeviceEventEXT readParams");
-    uint64_t cgen_var_1338;
-    stream->read((uint64_t*)&cgen_var_1338, 8);
-    stream->handleMapping()->mapHandles_u64_VkFence(&cgen_var_1338, (VkFence*)pFence, 1);
-    AEMU_SCOPED_TRACE("vkRegisterDeviceEventEXT returnUnmarshal");
+    /* is handle, possibly out */;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = (uint64_t)((*pFence));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    uint64_t cgen_var_3;
+    stream->read((uint64_t*)&cgen_var_3, 8);
+    stream->handleMapping()->mapHandles_u64_VkFence(&cgen_var_3, (VkFence*)pFence, 1);
     VkResult vkRegisterDeviceEventEXT_VkResult_return = (VkResult)0;
     stream->read(&vkRegisterDeviceEventEXT_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkRegisterDeviceEventEXT");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkRegisterDeviceEventEXT_VkResult_return;
 }
 
@@ -20395,16 +25108,14 @@
     VkDisplayKHR display,
     const VkDisplayEventInfoEXT* pDisplayEventInfo,
     const VkAllocationCallbacks* pAllocator,
-    VkFence* pFence)
+    VkFence* pFence,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkRegisterDisplayEventEXT encode");
-    mImpl->log("start vkRegisterDisplayEventEXT");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkDisplayKHR local_display;
     VkDisplayEventInfoEXT* local_pDisplayEventInfo;
@@ -20415,78 +25126,84 @@
     if (pDisplayEventInfo)
     {
         local_pDisplayEventInfo = (VkDisplayEventInfoEXT*)pool->alloc(sizeof(const VkDisplayEventInfoEXT));
-        deepcopy_VkDisplayEventInfoEXT(pool, pDisplayEventInfo, (VkDisplayEventInfoEXT*)(local_pDisplayEventInfo));
+        deepcopy_VkDisplayEventInfoEXT(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pDisplayEventInfo, (VkDisplayEventInfoEXT*)(local_pDisplayEventInfo));
     }
     local_pAllocator = nullptr;
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pDisplayEventInfo)
     {
-        transform_tohost_VkDisplayEventInfoEXT(mImpl->resources(), (VkDisplayEventInfoEXT*)(local_pDisplayEventInfo));
+        transform_tohost_VkDisplayEventInfoEXT(sResourceTracker, (VkDisplayEventInfoEXT*)(local_pDisplayEventInfo));
     }
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1339;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1339, 1);
-        countingStream->write((uint64_t*)&cgen_var_1339, 1 * 8);
-        uint64_t cgen_var_1340;
-        countingStream->handleMapping()->mapHandles_VkDisplayKHR_u64(&local_display, &cgen_var_1340, 1);
-        countingStream->write((uint64_t*)&cgen_var_1340, 1 * 8);
-        marshal_VkDisplayEventInfoEXT(countingStream, (VkDisplayEventInfoEXT*)(local_pDisplayEventInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        count_VkDisplayEventInfoEXT(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDisplayEventInfoEXT*)(local_pDisplayEventInfo), countPtr);
         // WARNING PTR CHECK
-        uint64_t cgen_var_1341 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_1341);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
-        uint64_t cgen_var_1342;
-        countingStream->handleMapping()->mapHandles_VkFence_u64(pFence, &cgen_var_1342, 1);
-        countingStream->write((uint64_t*)&cgen_var_1342, 8);
+        uint64_t cgen_var_2;
+        *countPtr += 8;
     }
-    uint32_t packetSize_vkRegisterDisplayEventEXT = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkRegisterDisplayEventEXT = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkRegisterDisplayEventEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkRegisterDisplayEventEXT = OP_vkRegisterDisplayEventEXT;
-    stream->write(&opcode_vkRegisterDisplayEventEXT, sizeof(uint32_t));
-    stream->write(&packetSize_vkRegisterDisplayEventEXT, sizeof(uint32_t));
-    uint64_t cgen_var_1343;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1343, 1);
-    stream->write((uint64_t*)&cgen_var_1343, 1 * 8);
-    uint64_t cgen_var_1344;
-    stream->handleMapping()->mapHandles_VkDisplayKHR_u64(&local_display, &cgen_var_1344, 1);
-    stream->write((uint64_t*)&cgen_var_1344, 1 * 8);
-    marshal_VkDisplayEventInfoEXT(stream, (VkDisplayEventInfoEXT*)(local_pDisplayEventInfo));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkRegisterDisplayEventEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkRegisterDisplayEventEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkDisplayKHR((*&local_display));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkDisplayEventInfoEXT(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDisplayEventInfoEXT*)(local_pDisplayEventInfo), streamPtrPtr);
     // WARNING PTR CHECK
-    uint64_t cgen_var_1345 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_1345);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
-    uint64_t cgen_var_1346;
-    stream->handleMapping()->mapHandles_VkFence_u64(pFence, &cgen_var_1346, 1);
-    stream->write((uint64_t*)&cgen_var_1346, 8);
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkRegisterDisplayEventEXT readParams");
-    uint64_t cgen_var_1347;
-    stream->read((uint64_t*)&cgen_var_1347, 8);
-    stream->handleMapping()->mapHandles_u64_VkFence(&cgen_var_1347, (VkFence*)pFence, 1);
-    AEMU_SCOPED_TRACE("vkRegisterDisplayEventEXT returnUnmarshal");
+    /* is handle, possibly out */;
+    uint64_t cgen_var_3;
+    *&cgen_var_3 = (uint64_t)((*pFence));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_3, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    uint64_t cgen_var_4;
+    stream->read((uint64_t*)&cgen_var_4, 8);
+    stream->handleMapping()->mapHandles_u64_VkFence(&cgen_var_4, (VkFence*)pFence, 1);
     VkResult vkRegisterDisplayEventEXT_VkResult_return = (VkResult)0;
     stream->read(&vkRegisterDisplayEventEXT_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkRegisterDisplayEventEXT");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkRegisterDisplayEventEXT_VkResult_return;
 }
 
@@ -20494,55 +25211,60 @@
     VkDevice device,
     VkSwapchainKHR swapchain,
     VkSurfaceCounterFlagBitsEXT counter,
-    uint64_t* pCounterValue)
+    uint64_t* pCounterValue,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetSwapchainCounterEXT encode");
-    mImpl->log("start vkGetSwapchainCounterEXT");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkSwapchainKHR local_swapchain;
     VkSurfaceCounterFlagBitsEXT local_counter;
     local_device = device;
     local_swapchain = swapchain;
     local_counter = counter;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1348;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1348, 1);
-        countingStream->write((uint64_t*)&cgen_var_1348, 1 * 8);
-        uint64_t cgen_var_1349;
-        countingStream->handleMapping()->mapHandles_VkSwapchainKHR_u64(&local_swapchain, &cgen_var_1349, 1);
-        countingStream->write((uint64_t*)&cgen_var_1349, 1 * 8);
-        countingStream->write((VkSurfaceCounterFlagBitsEXT*)&local_counter, sizeof(VkSurfaceCounterFlagBitsEXT));
-        countingStream->write((uint64_t*)pCounterValue, sizeof(uint64_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkSurfaceCounterFlagBitsEXT);
+        *countPtr += sizeof(uint64_t);
     }
-    uint32_t packetSize_vkGetSwapchainCounterEXT = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetSwapchainCounterEXT = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetSwapchainCounterEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetSwapchainCounterEXT = OP_vkGetSwapchainCounterEXT;
-    stream->write(&opcode_vkGetSwapchainCounterEXT, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetSwapchainCounterEXT, sizeof(uint32_t));
-    uint64_t cgen_var_1350;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1350, 1);
-    stream->write((uint64_t*)&cgen_var_1350, 1 * 8);
-    uint64_t cgen_var_1351;
-    stream->handleMapping()->mapHandles_VkSwapchainKHR_u64(&local_swapchain, &cgen_var_1351, 1);
-    stream->write((uint64_t*)&cgen_var_1351, 1 * 8);
-    stream->write((VkSurfaceCounterFlagBitsEXT*)&local_counter, sizeof(VkSurfaceCounterFlagBitsEXT));
-    stream->write((uint64_t*)pCounterValue, sizeof(uint64_t));
-    AEMU_SCOPED_TRACE("vkGetSwapchainCounterEXT readParams");
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetSwapchainCounterEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetSwapchainCounterEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkSwapchainKHR((*&local_swapchain));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkSurfaceCounterFlagBitsEXT*)&local_counter, sizeof(VkSurfaceCounterFlagBitsEXT));
+    *streamPtrPtr += sizeof(VkSurfaceCounterFlagBitsEXT);
+    memcpy(*streamPtrPtr, (uint64_t*)pCounterValue, sizeof(uint64_t));
+    *streamPtrPtr += sizeof(uint64_t);
     stream->read((uint64_t*)pCounterValue, sizeof(uint64_t));
-    AEMU_SCOPED_TRACE("vkGetSwapchainCounterEXT returnUnmarshal");
     VkResult vkGetSwapchainCounterEXT_VkResult_return = (VkResult)0;
     stream->read(&vkGetSwapchainCounterEXT_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetSwapchainCounterEXT");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetSwapchainCounterEXT_VkResult_return;
 }
 
@@ -20551,55 +25273,58 @@
 VkResult VkEncoder::vkGetRefreshCycleDurationGOOGLE(
     VkDevice device,
     VkSwapchainKHR swapchain,
-    VkRefreshCycleDurationGOOGLE* pDisplayTimingProperties)
+    VkRefreshCycleDurationGOOGLE* pDisplayTimingProperties,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetRefreshCycleDurationGOOGLE encode");
-    mImpl->log("start vkGetRefreshCycleDurationGOOGLE");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkSwapchainKHR local_swapchain;
     local_device = device;
     local_swapchain = swapchain;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1352;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1352, 1);
-        countingStream->write((uint64_t*)&cgen_var_1352, 1 * 8);
-        uint64_t cgen_var_1353;
-        countingStream->handleMapping()->mapHandles_VkSwapchainKHR_u64(&local_swapchain, &cgen_var_1353, 1);
-        countingStream->write((uint64_t*)&cgen_var_1353, 1 * 8);
-        marshal_VkRefreshCycleDurationGOOGLE(countingStream, (VkRefreshCycleDurationGOOGLE*)(pDisplayTimingProperties));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        count_VkRefreshCycleDurationGOOGLE(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkRefreshCycleDurationGOOGLE*)(pDisplayTimingProperties), countPtr);
     }
-    uint32_t packetSize_vkGetRefreshCycleDurationGOOGLE = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetRefreshCycleDurationGOOGLE = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetRefreshCycleDurationGOOGLE);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetRefreshCycleDurationGOOGLE = OP_vkGetRefreshCycleDurationGOOGLE;
-    stream->write(&opcode_vkGetRefreshCycleDurationGOOGLE, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetRefreshCycleDurationGOOGLE, sizeof(uint32_t));
-    uint64_t cgen_var_1354;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1354, 1);
-    stream->write((uint64_t*)&cgen_var_1354, 1 * 8);
-    uint64_t cgen_var_1355;
-    stream->handleMapping()->mapHandles_VkSwapchainKHR_u64(&local_swapchain, &cgen_var_1355, 1);
-    stream->write((uint64_t*)&cgen_var_1355, 1 * 8);
-    marshal_VkRefreshCycleDurationGOOGLE(stream, (VkRefreshCycleDurationGOOGLE*)(pDisplayTimingProperties));
-    AEMU_SCOPED_TRACE("vkGetRefreshCycleDurationGOOGLE readParams");
-    unmarshal_VkRefreshCycleDurationGOOGLE(stream, (VkRefreshCycleDurationGOOGLE*)(pDisplayTimingProperties));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetRefreshCycleDurationGOOGLE, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetRefreshCycleDurationGOOGLE, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkSwapchainKHR((*&local_swapchain));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkRefreshCycleDurationGOOGLE(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkRefreshCycleDurationGOOGLE*)(pDisplayTimingProperties), streamPtrPtr);
+    unmarshal_VkRefreshCycleDurationGOOGLE(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkRefreshCycleDurationGOOGLE*)(pDisplayTimingProperties));
     if (pDisplayTimingProperties)
     {
-        transform_fromhost_VkRefreshCycleDurationGOOGLE(mImpl->resources(), (VkRefreshCycleDurationGOOGLE*)(pDisplayTimingProperties));
+        transform_fromhost_VkRefreshCycleDurationGOOGLE(sResourceTracker, (VkRefreshCycleDurationGOOGLE*)(pDisplayTimingProperties));
     }
-    AEMU_SCOPED_TRACE("vkGetRefreshCycleDurationGOOGLE returnUnmarshal");
     VkResult vkGetRefreshCycleDurationGOOGLE_VkResult_return = (VkResult)0;
     stream->read(&vkGetRefreshCycleDurationGOOGLE_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetRefreshCycleDurationGOOGLE");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetRefreshCycleDurationGOOGLE_VkResult_return;
 }
 
@@ -20607,75 +25332,82 @@
     VkDevice device,
     VkSwapchainKHR swapchain,
     uint32_t* pPresentationTimingCount,
-    VkPastPresentationTimingGOOGLE* pPresentationTimings)
+    VkPastPresentationTimingGOOGLE* pPresentationTimings,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetPastPresentationTimingGOOGLE encode");
-    mImpl->log("start vkGetPastPresentationTimingGOOGLE");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkSwapchainKHR local_swapchain;
     local_device = device;
     local_swapchain = swapchain;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1356;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1356, 1);
-        countingStream->write((uint64_t*)&cgen_var_1356, 1 * 8);
-        uint64_t cgen_var_1357;
-        countingStream->handleMapping()->mapHandles_VkSwapchainKHR_u64(&local_swapchain, &cgen_var_1357, 1);
-        countingStream->write((uint64_t*)&cgen_var_1357, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_1358 = (uint64_t)(uintptr_t)pPresentationTimingCount;
-        countingStream->putBe64(cgen_var_1358);
+        *countPtr += 8;
         if (pPresentationTimingCount)
         {
-            countingStream->write((uint32_t*)pPresentationTimingCount, sizeof(uint32_t));
+            *countPtr += sizeof(uint32_t);
         }
         // WARNING PTR CHECK
-        uint64_t cgen_var_1359 = (uint64_t)(uintptr_t)pPresentationTimings;
-        countingStream->putBe64(cgen_var_1359);
+        *countPtr += 8;
         if (pPresentationTimings)
         {
-            for (uint32_t i = 0; i < (uint32_t)(*(pPresentationTimingCount)); ++i)
+            if (pPresentationTimingCount)
             {
-                marshal_VkPastPresentationTimingGOOGLE(countingStream, (VkPastPresentationTimingGOOGLE*)(pPresentationTimings + i));
+                for (uint32_t i = 0; i < (uint32_t)(*(pPresentationTimingCount)); ++i)
+                {
+                    count_VkPastPresentationTimingGOOGLE(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPastPresentationTimingGOOGLE*)(pPresentationTimings + i), countPtr);
+                }
             }
         }
     }
-    uint32_t packetSize_vkGetPastPresentationTimingGOOGLE = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetPastPresentationTimingGOOGLE = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPastPresentationTimingGOOGLE);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetPastPresentationTimingGOOGLE = OP_vkGetPastPresentationTimingGOOGLE;
-    stream->write(&opcode_vkGetPastPresentationTimingGOOGLE, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetPastPresentationTimingGOOGLE, sizeof(uint32_t));
-    uint64_t cgen_var_1360;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1360, 1);
-    stream->write((uint64_t*)&cgen_var_1360, 1 * 8);
-    uint64_t cgen_var_1361;
-    stream->handleMapping()->mapHandles_VkSwapchainKHR_u64(&local_swapchain, &cgen_var_1361, 1);
-    stream->write((uint64_t*)&cgen_var_1361, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPastPresentationTimingGOOGLE, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPastPresentationTimingGOOGLE, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkSwapchainKHR((*&local_swapchain));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_1362 = (uint64_t)(uintptr_t)pPresentationTimingCount;
-    stream->putBe64(cgen_var_1362);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)pPresentationTimingCount;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pPresentationTimingCount)
     {
-        stream->write((uint32_t*)pPresentationTimingCount, sizeof(uint32_t));
+        memcpy(*streamPtrPtr, (uint32_t*)pPresentationTimingCount, sizeof(uint32_t));
+        *streamPtrPtr += sizeof(uint32_t);
     }
     // WARNING PTR CHECK
-    uint64_t cgen_var_1363 = (uint64_t)(uintptr_t)pPresentationTimings;
-    stream->putBe64(cgen_var_1363);
+    uint64_t cgen_var_3 = (uint64_t)(uintptr_t)pPresentationTimings;
+    memcpy((*streamPtrPtr), &cgen_var_3, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pPresentationTimings)
     {
         for (uint32_t i = 0; i < (uint32_t)(*(pPresentationTimingCount)); ++i)
         {
-            marshal_VkPastPresentationTimingGOOGLE(stream, (VkPastPresentationTimingGOOGLE*)(pPresentationTimings + i));
+            reservedmarshal_VkPastPresentationTimingGOOGLE(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPastPresentationTimingGOOGLE*)(pPresentationTimings + i), streamPtrPtr);
         }
     }
-    AEMU_SCOPED_TRACE("vkGetPastPresentationTimingGOOGLE readParams");
     // WARNING PTR CHECK
     uint32_t* check_pPresentationTimingCount;
     check_pPresentationTimingCount = (uint32_t*)(uintptr_t)stream->getBe64();
@@ -20696,25 +25428,33 @@
         {
             fprintf(stderr, "fatal: pPresentationTimings inconsistent between guest and host\n");
         }
-        for (uint32_t i = 0; i < (uint32_t)(*(pPresentationTimingCount)); ++i)
+        if (pPresentationTimingCount)
         {
-            unmarshal_VkPastPresentationTimingGOOGLE(stream, (VkPastPresentationTimingGOOGLE*)(pPresentationTimings + i));
+            for (uint32_t i = 0; i < (uint32_t)(*(pPresentationTimingCount)); ++i)
+            {
+                unmarshal_VkPastPresentationTimingGOOGLE(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPastPresentationTimingGOOGLE*)(pPresentationTimings + i));
+            }
         }
     }
-    if (pPresentationTimings)
+    if (pPresentationTimingCount)
     {
-        for (uint32_t i = 0; i < (uint32_t)(*(pPresentationTimingCount)); ++i)
+        if (pPresentationTimings)
         {
-            transform_fromhost_VkPastPresentationTimingGOOGLE(mImpl->resources(), (VkPastPresentationTimingGOOGLE*)(pPresentationTimings + i));
+            for (uint32_t i = 0; i < (uint32_t)(*(pPresentationTimingCount)); ++i)
+            {
+                transform_fromhost_VkPastPresentationTimingGOOGLE(sResourceTracker, (VkPastPresentationTimingGOOGLE*)(pPresentationTimings + i));
+            }
         }
     }
-    AEMU_SCOPED_TRACE("vkGetPastPresentationTimingGOOGLE returnUnmarshal");
     VkResult vkGetPastPresentationTimingGOOGLE_VkResult_return = (VkResult)0;
     stream->read(&vkGetPastPresentationTimingGOOGLE_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetPastPresentationTimingGOOGLE");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetPastPresentationTimingGOOGLE_VkResult_return;
 }
 
@@ -20734,16 +25474,14 @@
     VkCommandBuffer commandBuffer,
     uint32_t firstDiscardRectangle,
     uint32_t discardRectangleCount,
-    const VkRect2D* pDiscardRectangles)
+    const VkRect2D* pDiscardRectangles,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdSetDiscardRectangleEXT encode");
-    mImpl->log("start vkCmdSetDiscardRectangleEXT");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     uint32_t local_firstDiscardRectangle;
     uint32_t local_discardRectangleCount;
@@ -20757,50 +25495,64 @@
         local_pDiscardRectangles = (VkRect2D*)pool->alloc(((discardRectangleCount)) * sizeof(const VkRect2D));
         for (uint32_t i = 0; i < (uint32_t)((discardRectangleCount)); ++i)
         {
-            deepcopy_VkRect2D(pool, pDiscardRectangles + i, (VkRect2D*)(local_pDiscardRectangles + i));
+            deepcopy_VkRect2D(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pDiscardRectangles + i, (VkRect2D*)(local_pDiscardRectangles + i));
         }
     }
     if (local_pDiscardRectangles)
     {
         for (uint32_t i = 0; i < (uint32_t)((discardRectangleCount)); ++i)
         {
-            transform_tohost_VkRect2D(mImpl->resources(), (VkRect2D*)(local_pDiscardRectangles + i));
+            transform_tohost_VkRect2D(sResourceTracker, (VkRect2D*)(local_pDiscardRectangles + i));
         }
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1366;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1366, 1);
-        countingStream->write((uint64_t*)&cgen_var_1366, 1 * 8);
-        countingStream->write((uint32_t*)&local_firstDiscardRectangle, sizeof(uint32_t));
-        countingStream->write((uint32_t*)&local_discardRectangleCount, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
         for (uint32_t i = 0; i < (uint32_t)((discardRectangleCount)); ++i)
         {
-            marshal_VkRect2D(countingStream, (VkRect2D*)(local_pDiscardRectangles + i));
+            count_VkRect2D(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkRect2D*)(local_pDiscardRectangles + i), countPtr);
         }
     }
-    uint32_t packetSize_vkCmdSetDiscardRectangleEXT = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdSetDiscardRectangleEXT = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdSetDiscardRectangleEXT -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdSetDiscardRectangleEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdSetDiscardRectangleEXT = OP_vkCmdSetDiscardRectangleEXT;
-    stream->write(&opcode_vkCmdSetDiscardRectangleEXT, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdSetDiscardRectangleEXT, sizeof(uint32_t));
-    uint64_t cgen_var_1367;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1367, 1);
-    stream->write((uint64_t*)&cgen_var_1367, 1 * 8);
-    stream->write((uint32_t*)&local_firstDiscardRectangle, sizeof(uint32_t));
-    stream->write((uint32_t*)&local_discardRectangleCount, sizeof(uint32_t));
+    memcpy(streamPtr, &opcode_vkCmdSetDiscardRectangleEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdSetDiscardRectangleEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (uint32_t*)&local_firstDiscardRectangle, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_discardRectangleCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
     for (uint32_t i = 0; i < (uint32_t)((discardRectangleCount)); ++i)
     {
-        marshal_VkRect2D(stream, (VkRect2D*)(local_pDiscardRectangles + i));
+        reservedmarshal_VkRect2D(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkRect2D*)(local_pDiscardRectangles + i), streamPtrPtr);
     }
-    AEMU_SCOPED_TRACE("vkCmdSetDiscardRectangleEXT readParams");
-    AEMU_SCOPED_TRACE("vkCmdSetDiscardRectangleEXT returnUnmarshal");
-    mImpl->log("finish vkCmdSetDiscardRectangleEXT");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 #endif
 #ifdef VK_EXT_conservative_rasterization
 #endif
+#ifdef VK_EXT_depth_clip_enable
+#endif
 #ifdef VK_EXT_swapchain_colorspace
 #endif
 #ifdef VK_EXT_hdr_metadata
@@ -20808,84 +25560,89 @@
     VkDevice device,
     uint32_t swapchainCount,
     const VkSwapchainKHR* pSwapchains,
-    const VkHdrMetadataEXT* pMetadata)
+    const VkHdrMetadataEXT* pMetadata,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkSetHdrMetadataEXT encode");
-    mImpl->log("start vkSetHdrMetadataEXT");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     uint32_t local_swapchainCount;
     VkSwapchainKHR* local_pSwapchains;
     VkHdrMetadataEXT* local_pMetadata;
     local_device = device;
     local_swapchainCount = swapchainCount;
-    local_pSwapchains = nullptr;
-    if (pSwapchains)
-    {
-        local_pSwapchains = (VkSwapchainKHR*)pool->dupArray(pSwapchains, ((swapchainCount)) * sizeof(const VkSwapchainKHR));
-    }
+    // Avoiding deepcopy for pSwapchains
+    local_pSwapchains = (VkSwapchainKHR*)pSwapchains;
     local_pMetadata = nullptr;
     if (pMetadata)
     {
         local_pMetadata = (VkHdrMetadataEXT*)pool->alloc(((swapchainCount)) * sizeof(const VkHdrMetadataEXT));
         for (uint32_t i = 0; i < (uint32_t)((swapchainCount)); ++i)
         {
-            deepcopy_VkHdrMetadataEXT(pool, pMetadata + i, (VkHdrMetadataEXT*)(local_pMetadata + i));
+            deepcopy_VkHdrMetadataEXT(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pMetadata + i, (VkHdrMetadataEXT*)(local_pMetadata + i));
         }
     }
     if (local_pMetadata)
     {
         for (uint32_t i = 0; i < (uint32_t)((swapchainCount)); ++i)
         {
-            transform_tohost_VkHdrMetadataEXT(mImpl->resources(), (VkHdrMetadataEXT*)(local_pMetadata + i));
+            transform_tohost_VkHdrMetadataEXT(sResourceTracker, (VkHdrMetadataEXT*)(local_pMetadata + i));
         }
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1368;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1368, 1);
-        countingStream->write((uint64_t*)&cgen_var_1368, 1 * 8);
-        countingStream->write((uint32_t*)&local_swapchainCount, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
         if (((swapchainCount)))
         {
-            uint64_t* cgen_var_1369;
-            countingStream->alloc((void**)&cgen_var_1369, ((swapchainCount)) * 8);
-            countingStream->handleMapping()->mapHandles_VkSwapchainKHR_u64(local_pSwapchains, cgen_var_1369, ((swapchainCount)));
-            countingStream->write((uint64_t*)cgen_var_1369, ((swapchainCount)) * 8);
+            *countPtr += ((swapchainCount)) * 8;
         }
         for (uint32_t i = 0; i < (uint32_t)((swapchainCount)); ++i)
         {
-            marshal_VkHdrMetadataEXT(countingStream, (VkHdrMetadataEXT*)(local_pMetadata + i));
+            count_VkHdrMetadataEXT(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkHdrMetadataEXT*)(local_pMetadata + i), countPtr);
         }
     }
-    uint32_t packetSize_vkSetHdrMetadataEXT = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkSetHdrMetadataEXT = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkSetHdrMetadataEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkSetHdrMetadataEXT = OP_vkSetHdrMetadataEXT;
-    stream->write(&opcode_vkSetHdrMetadataEXT, sizeof(uint32_t));
-    stream->write(&packetSize_vkSetHdrMetadataEXT, sizeof(uint32_t));
-    uint64_t cgen_var_1370;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1370, 1);
-    stream->write((uint64_t*)&cgen_var_1370, 1 * 8);
-    stream->write((uint32_t*)&local_swapchainCount, sizeof(uint32_t));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkSetHdrMetadataEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkSetHdrMetadataEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_swapchainCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
     if (((swapchainCount)))
     {
-        uint64_t* cgen_var_1371;
-        stream->alloc((void**)&cgen_var_1371, ((swapchainCount)) * 8);
-        stream->handleMapping()->mapHandles_VkSwapchainKHR_u64(local_pSwapchains, cgen_var_1371, ((swapchainCount)));
-        stream->write((uint64_t*)cgen_var_1371, ((swapchainCount)) * 8);
+        uint8_t* cgen_var_1_ptr = (uint8_t*)(*streamPtrPtr);
+        for (uint32_t k = 0; k < ((swapchainCount)); ++k)
+        {
+            uint64_t tmpval = get_host_u64_VkSwapchainKHR(local_pSwapchains[k]);
+            memcpy(cgen_var_1_ptr + k * 8, &tmpval, sizeof(uint64_t));
+        }
+        *streamPtrPtr += 8 * ((swapchainCount));
     }
     for (uint32_t i = 0; i < (uint32_t)((swapchainCount)); ++i)
     {
-        marshal_VkHdrMetadataEXT(stream, (VkHdrMetadataEXT*)(local_pMetadata + i));
+        reservedmarshal_VkHdrMetadataEXT(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkHdrMetadataEXT*)(local_pMetadata + i), streamPtrPtr);
     }
-    AEMU_SCOPED_TRACE("vkSetHdrMetadataEXT readParams");
-    AEMU_SCOPED_TRACE("vkSetHdrMetadataEXT returnUnmarshal");
-    mImpl->log("finish vkSetHdrMetadataEXT");;
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 #endif
@@ -20894,16 +25651,14 @@
     VkInstance instance,
     const VkIOSSurfaceCreateInfoMVK* pCreateInfo,
     const VkAllocationCallbacks* pAllocator,
-    VkSurfaceKHR* pSurface)
+    VkSurfaceKHR* pSurface,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCreateIOSSurfaceMVK encode");
-    mImpl->log("start vkCreateIOSSurfaceMVK");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkInstance local_instance;
     VkIOSSurfaceCreateInfoMVK* local_pCreateInfo;
     VkAllocationCallbacks* local_pAllocator;
@@ -20912,72 +25667,78 @@
     if (pCreateInfo)
     {
         local_pCreateInfo = (VkIOSSurfaceCreateInfoMVK*)pool->alloc(sizeof(const VkIOSSurfaceCreateInfoMVK));
-        deepcopy_VkIOSSurfaceCreateInfoMVK(pool, pCreateInfo, (VkIOSSurfaceCreateInfoMVK*)(local_pCreateInfo));
+        deepcopy_VkIOSSurfaceCreateInfoMVK(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, (VkIOSSurfaceCreateInfoMVK*)(local_pCreateInfo));
     }
     local_pAllocator = nullptr;
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pCreateInfo)
     {
-        transform_tohost_VkIOSSurfaceCreateInfoMVK(mImpl->resources(), (VkIOSSurfaceCreateInfoMVK*)(local_pCreateInfo));
+        transform_tohost_VkIOSSurfaceCreateInfoMVK(sResourceTracker, (VkIOSSurfaceCreateInfoMVK*)(local_pCreateInfo));
     }
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1372;
-        countingStream->handleMapping()->mapHandles_VkInstance_u64(&local_instance, &cgen_var_1372, 1);
-        countingStream->write((uint64_t*)&cgen_var_1372, 1 * 8);
-        marshal_VkIOSSurfaceCreateInfoMVK(countingStream, (VkIOSSurfaceCreateInfoMVK*)(local_pCreateInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkIOSSurfaceCreateInfoMVK(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkIOSSurfaceCreateInfoMVK*)(local_pCreateInfo), countPtr);
         // WARNING PTR CHECK
-        uint64_t cgen_var_1373 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_1373);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
-        uint64_t cgen_var_1374;
-        countingStream->handleMapping()->mapHandles_VkSurfaceKHR_u64(pSurface, &cgen_var_1374, 1);
-        countingStream->write((uint64_t*)&cgen_var_1374, 8);
+        uint64_t cgen_var_1;
+        *countPtr += 8;
     }
-    uint32_t packetSize_vkCreateIOSSurfaceMVK = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCreateIOSSurfaceMVK = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreateIOSSurfaceMVK);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCreateIOSSurfaceMVK = OP_vkCreateIOSSurfaceMVK;
-    stream->write(&opcode_vkCreateIOSSurfaceMVK, sizeof(uint32_t));
-    stream->write(&packetSize_vkCreateIOSSurfaceMVK, sizeof(uint32_t));
-    uint64_t cgen_var_1375;
-    stream->handleMapping()->mapHandles_VkInstance_u64(&local_instance, &cgen_var_1375, 1);
-    stream->write((uint64_t*)&cgen_var_1375, 1 * 8);
-    marshal_VkIOSSurfaceCreateInfoMVK(stream, (VkIOSSurfaceCreateInfoMVK*)(local_pCreateInfo));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreateIOSSurfaceMVK, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreateIOSSurfaceMVK, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkInstance((*&local_instance));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkIOSSurfaceCreateInfoMVK(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkIOSSurfaceCreateInfoMVK*)(local_pCreateInfo), streamPtrPtr);
     // WARNING PTR CHECK
-    uint64_t cgen_var_1376 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_1376);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
-    uint64_t cgen_var_1377;
-    stream->handleMapping()->mapHandles_VkSurfaceKHR_u64(pSurface, &cgen_var_1377, 1);
-    stream->write((uint64_t*)&cgen_var_1377, 8);
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkCreateIOSSurfaceMVK readParams");
-    uint64_t cgen_var_1378;
-    stream->read((uint64_t*)&cgen_var_1378, 8);
-    stream->handleMapping()->mapHandles_u64_VkSurfaceKHR(&cgen_var_1378, (VkSurfaceKHR*)pSurface, 1);
-    AEMU_SCOPED_TRACE("vkCreateIOSSurfaceMVK returnUnmarshal");
+    /* is handle, possibly out */;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = (uint64_t)((*pSurface));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    uint64_t cgen_var_3;
+    stream->read((uint64_t*)&cgen_var_3, 8);
+    stream->handleMapping()->mapHandles_u64_VkSurfaceKHR(&cgen_var_3, (VkSurfaceKHR*)pSurface, 1);
     VkResult vkCreateIOSSurfaceMVK_VkResult_return = (VkResult)0;
     stream->read(&vkCreateIOSSurfaceMVK_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkCreateIOSSurfaceMVK");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkCreateIOSSurfaceMVK_VkResult_return;
 }
 
@@ -20987,16 +25748,14 @@
     VkInstance instance,
     const VkMacOSSurfaceCreateInfoMVK* pCreateInfo,
     const VkAllocationCallbacks* pAllocator,
-    VkSurfaceKHR* pSurface)
+    VkSurfaceKHR* pSurface,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCreateMacOSSurfaceMVK encode");
-    mImpl->log("start vkCreateMacOSSurfaceMVK");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkInstance local_instance;
     VkMacOSSurfaceCreateInfoMVK* local_pCreateInfo;
     VkAllocationCallbacks* local_pAllocator;
@@ -21005,76 +25764,348 @@
     if (pCreateInfo)
     {
         local_pCreateInfo = (VkMacOSSurfaceCreateInfoMVK*)pool->alloc(sizeof(const VkMacOSSurfaceCreateInfoMVK));
-        deepcopy_VkMacOSSurfaceCreateInfoMVK(pool, pCreateInfo, (VkMacOSSurfaceCreateInfoMVK*)(local_pCreateInfo));
+        deepcopy_VkMacOSSurfaceCreateInfoMVK(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, (VkMacOSSurfaceCreateInfoMVK*)(local_pCreateInfo));
     }
     local_pAllocator = nullptr;
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pCreateInfo)
     {
-        transform_tohost_VkMacOSSurfaceCreateInfoMVK(mImpl->resources(), (VkMacOSSurfaceCreateInfoMVK*)(local_pCreateInfo));
+        transform_tohost_VkMacOSSurfaceCreateInfoMVK(sResourceTracker, (VkMacOSSurfaceCreateInfoMVK*)(local_pCreateInfo));
     }
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1379;
-        countingStream->handleMapping()->mapHandles_VkInstance_u64(&local_instance, &cgen_var_1379, 1);
-        countingStream->write((uint64_t*)&cgen_var_1379, 1 * 8);
-        marshal_VkMacOSSurfaceCreateInfoMVK(countingStream, (VkMacOSSurfaceCreateInfoMVK*)(local_pCreateInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkMacOSSurfaceCreateInfoMVK(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMacOSSurfaceCreateInfoMVK*)(local_pCreateInfo), countPtr);
         // WARNING PTR CHECK
-        uint64_t cgen_var_1380 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_1380);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
-        uint64_t cgen_var_1381;
-        countingStream->handleMapping()->mapHandles_VkSurfaceKHR_u64(pSurface, &cgen_var_1381, 1);
-        countingStream->write((uint64_t*)&cgen_var_1381, 8);
+        uint64_t cgen_var_1;
+        *countPtr += 8;
     }
-    uint32_t packetSize_vkCreateMacOSSurfaceMVK = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCreateMacOSSurfaceMVK = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreateMacOSSurfaceMVK);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCreateMacOSSurfaceMVK = OP_vkCreateMacOSSurfaceMVK;
-    stream->write(&opcode_vkCreateMacOSSurfaceMVK, sizeof(uint32_t));
-    stream->write(&packetSize_vkCreateMacOSSurfaceMVK, sizeof(uint32_t));
-    uint64_t cgen_var_1382;
-    stream->handleMapping()->mapHandles_VkInstance_u64(&local_instance, &cgen_var_1382, 1);
-    stream->write((uint64_t*)&cgen_var_1382, 1 * 8);
-    marshal_VkMacOSSurfaceCreateInfoMVK(stream, (VkMacOSSurfaceCreateInfoMVK*)(local_pCreateInfo));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreateMacOSSurfaceMVK, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreateMacOSSurfaceMVK, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkInstance((*&local_instance));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkMacOSSurfaceCreateInfoMVK(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMacOSSurfaceCreateInfoMVK*)(local_pCreateInfo), streamPtrPtr);
     // WARNING PTR CHECK
-    uint64_t cgen_var_1383 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_1383);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
-    uint64_t cgen_var_1384;
-    stream->handleMapping()->mapHandles_VkSurfaceKHR_u64(pSurface, &cgen_var_1384, 1);
-    stream->write((uint64_t*)&cgen_var_1384, 8);
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkCreateMacOSSurfaceMVK readParams");
-    uint64_t cgen_var_1385;
-    stream->read((uint64_t*)&cgen_var_1385, 8);
-    stream->handleMapping()->mapHandles_u64_VkSurfaceKHR(&cgen_var_1385, (VkSurfaceKHR*)pSurface, 1);
-    AEMU_SCOPED_TRACE("vkCreateMacOSSurfaceMVK returnUnmarshal");
+    /* is handle, possibly out */;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = (uint64_t)((*pSurface));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    uint64_t cgen_var_3;
+    stream->read((uint64_t*)&cgen_var_3, 8);
+    stream->handleMapping()->mapHandles_u64_VkSurfaceKHR(&cgen_var_3, (VkSurfaceKHR*)pSurface, 1);
     VkResult vkCreateMacOSSurfaceMVK_VkResult_return = (VkResult)0;
     stream->read(&vkCreateMacOSSurfaceMVK_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkCreateMacOSSurfaceMVK");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkCreateMacOSSurfaceMVK_VkResult_return;
 }
 
 #endif
+#ifdef VK_MVK_moltenvk
+void VkEncoder::vkGetMTLDeviceMVK(
+    VkPhysicalDevice physicalDevice,
+    void** pMTLDevice,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkPhysicalDevice local_physicalDevice;
+    local_physicalDevice = physicalDevice;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(void*);
+    }
+    uint32_t packetSize_vkGetMTLDeviceMVK = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetMTLDeviceMVK);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkGetMTLDeviceMVK = OP_vkGetMTLDeviceMVK;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetMTLDeviceMVK, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetMTLDeviceMVK, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (void**)pMTLDevice, sizeof(void*));
+    *streamPtrPtr += sizeof(void*);
+    stream->read((void**)pMTLDevice, sizeof(void*));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+VkResult VkEncoder::vkSetMTLTextureMVK(
+    VkImage image,
+    void* mtlTexture,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkImage local_image;
+    local_image = image;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint8_t);
+    }
+    uint32_t packetSize_vkSetMTLTextureMVK = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkSetMTLTextureMVK);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkSetMTLTextureMVK = OP_vkSetMTLTextureMVK;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkSetMTLTextureMVK, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkSetMTLTextureMVK, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkImage((*&local_image));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (void*)mtlTexture, sizeof(uint8_t));
+    *streamPtrPtr += sizeof(uint8_t);
+    stream->read((void*)mtlTexture, sizeof(uint8_t));
+    VkResult vkSetMTLTextureMVK_VkResult_return = (VkResult)0;
+    stream->read(&vkSetMTLTextureMVK_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkSetMTLTextureMVK_VkResult_return;
+}
+
+void VkEncoder::vkGetMTLTextureMVK(
+    VkImage image,
+    void** pMTLTexture,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkImage local_image;
+    local_image = image;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(void*);
+    }
+    uint32_t packetSize_vkGetMTLTextureMVK = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetMTLTextureMVK);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkGetMTLTextureMVK = OP_vkGetMTLTextureMVK;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetMTLTextureMVK, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetMTLTextureMVK, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkImage((*&local_image));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (void**)pMTLTexture, sizeof(void*));
+    *streamPtrPtr += sizeof(void*);
+    stream->read((void**)pMTLTexture, sizeof(void*));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+void VkEncoder::vkGetMTLBufferMVK(
+    VkBuffer buffer,
+    void** pMTLBuffer,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkBuffer local_buffer;
+    local_buffer = buffer;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(void*);
+    }
+    uint32_t packetSize_vkGetMTLBufferMVK = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetMTLBufferMVK);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkGetMTLBufferMVK = OP_vkGetMTLBufferMVK;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetMTLBufferMVK, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetMTLBufferMVK, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkBuffer((*&local_buffer));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (void**)pMTLBuffer, sizeof(void*));
+    *streamPtrPtr += sizeof(void*);
+    stream->read((void**)pMTLBuffer, sizeof(void*));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+VkResult VkEncoder::vkUseIOSurfaceMVK(
+    VkImage image,
+    void* ioSurface,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkImage local_image;
+    local_image = image;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint8_t);
+    }
+    uint32_t packetSize_vkUseIOSurfaceMVK = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkUseIOSurfaceMVK);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkUseIOSurfaceMVK = OP_vkUseIOSurfaceMVK;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkUseIOSurfaceMVK, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkUseIOSurfaceMVK, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkImage((*&local_image));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (void*)ioSurface, sizeof(uint8_t));
+    *streamPtrPtr += sizeof(uint8_t);
+    stream->read((void*)ioSurface, sizeof(uint8_t));
+    VkResult vkUseIOSurfaceMVK_VkResult_return = (VkResult)0;
+    stream->read(&vkUseIOSurfaceMVK_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkUseIOSurfaceMVK_VkResult_return;
+}
+
+void VkEncoder::vkGetIOSurfaceMVK(
+    VkImage image,
+    void** pIOSurface,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkImage local_image;
+    local_image = image;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(void*);
+    }
+    uint32_t packetSize_vkGetIOSurfaceMVK = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetIOSurfaceMVK);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkGetIOSurfaceMVK = OP_vkGetIOSurfaceMVK;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetIOSurfaceMVK, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetIOSurfaceMVK, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkImage((*&local_image));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (void**)pIOSurface, sizeof(void*));
+    *streamPtrPtr += sizeof(void*);
+    stream->read((void**)pIOSurface, sizeof(void*));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+#endif
 #ifdef VK_EXT_external_memory_dma_buf
 #endif
 #ifdef VK_EXT_queue_family_foreign
@@ -21082,16 +26113,14 @@
 #ifdef VK_EXT_debug_utils
 VkResult VkEncoder::vkSetDebugUtilsObjectNameEXT(
     VkDevice device,
-    const VkDebugUtilsObjectNameInfoEXT* pNameInfo)
+    const VkDebugUtilsObjectNameInfoEXT* pNameInfo,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkSetDebugUtilsObjectNameEXT encode");
-    mImpl->log("start vkSetDebugUtilsObjectNameEXT");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkDebugUtilsObjectNameInfoEXT* local_pNameInfo;
     local_device = device;
@@ -21099,51 +26128,54 @@
     if (pNameInfo)
     {
         local_pNameInfo = (VkDebugUtilsObjectNameInfoEXT*)pool->alloc(sizeof(const VkDebugUtilsObjectNameInfoEXT));
-        deepcopy_VkDebugUtilsObjectNameInfoEXT(pool, pNameInfo, (VkDebugUtilsObjectNameInfoEXT*)(local_pNameInfo));
+        deepcopy_VkDebugUtilsObjectNameInfoEXT(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pNameInfo, (VkDebugUtilsObjectNameInfoEXT*)(local_pNameInfo));
     }
     if (local_pNameInfo)
     {
-        transform_tohost_VkDebugUtilsObjectNameInfoEXT(mImpl->resources(), (VkDebugUtilsObjectNameInfoEXT*)(local_pNameInfo));
+        transform_tohost_VkDebugUtilsObjectNameInfoEXT(sResourceTracker, (VkDebugUtilsObjectNameInfoEXT*)(local_pNameInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1386;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1386, 1);
-        countingStream->write((uint64_t*)&cgen_var_1386, 1 * 8);
-        marshal_VkDebugUtilsObjectNameInfoEXT(countingStream, (VkDebugUtilsObjectNameInfoEXT*)(local_pNameInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkDebugUtilsObjectNameInfoEXT(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDebugUtilsObjectNameInfoEXT*)(local_pNameInfo), countPtr);
     }
-    uint32_t packetSize_vkSetDebugUtilsObjectNameEXT = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkSetDebugUtilsObjectNameEXT = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkSetDebugUtilsObjectNameEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkSetDebugUtilsObjectNameEXT = OP_vkSetDebugUtilsObjectNameEXT;
-    stream->write(&opcode_vkSetDebugUtilsObjectNameEXT, sizeof(uint32_t));
-    stream->write(&packetSize_vkSetDebugUtilsObjectNameEXT, sizeof(uint32_t));
-    uint64_t cgen_var_1387;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1387, 1);
-    stream->write((uint64_t*)&cgen_var_1387, 1 * 8);
-    marshal_VkDebugUtilsObjectNameInfoEXT(stream, (VkDebugUtilsObjectNameInfoEXT*)(local_pNameInfo));
-    AEMU_SCOPED_TRACE("vkSetDebugUtilsObjectNameEXT readParams");
-    AEMU_SCOPED_TRACE("vkSetDebugUtilsObjectNameEXT returnUnmarshal");
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkSetDebugUtilsObjectNameEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkSetDebugUtilsObjectNameEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkDebugUtilsObjectNameInfoEXT(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDebugUtilsObjectNameInfoEXT*)(local_pNameInfo), streamPtrPtr);
     VkResult vkSetDebugUtilsObjectNameEXT_VkResult_return = (VkResult)0;
     stream->read(&vkSetDebugUtilsObjectNameEXT_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkSetDebugUtilsObjectNameEXT");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkSetDebugUtilsObjectNameEXT_VkResult_return;
 }
 
 VkResult VkEncoder::vkSetDebugUtilsObjectTagEXT(
     VkDevice device,
-    const VkDebugUtilsObjectTagInfoEXT* pTagInfo)
+    const VkDebugUtilsObjectTagInfoEXT* pTagInfo,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkSetDebugUtilsObjectTagEXT encode");
-    mImpl->log("start vkSetDebugUtilsObjectTagEXT");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkDebugUtilsObjectTagInfoEXT* local_pTagInfo;
     local_device = device;
@@ -21151,51 +26183,54 @@
     if (pTagInfo)
     {
         local_pTagInfo = (VkDebugUtilsObjectTagInfoEXT*)pool->alloc(sizeof(const VkDebugUtilsObjectTagInfoEXT));
-        deepcopy_VkDebugUtilsObjectTagInfoEXT(pool, pTagInfo, (VkDebugUtilsObjectTagInfoEXT*)(local_pTagInfo));
+        deepcopy_VkDebugUtilsObjectTagInfoEXT(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pTagInfo, (VkDebugUtilsObjectTagInfoEXT*)(local_pTagInfo));
     }
     if (local_pTagInfo)
     {
-        transform_tohost_VkDebugUtilsObjectTagInfoEXT(mImpl->resources(), (VkDebugUtilsObjectTagInfoEXT*)(local_pTagInfo));
+        transform_tohost_VkDebugUtilsObjectTagInfoEXT(sResourceTracker, (VkDebugUtilsObjectTagInfoEXT*)(local_pTagInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1388;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1388, 1);
-        countingStream->write((uint64_t*)&cgen_var_1388, 1 * 8);
-        marshal_VkDebugUtilsObjectTagInfoEXT(countingStream, (VkDebugUtilsObjectTagInfoEXT*)(local_pTagInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkDebugUtilsObjectTagInfoEXT(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDebugUtilsObjectTagInfoEXT*)(local_pTagInfo), countPtr);
     }
-    uint32_t packetSize_vkSetDebugUtilsObjectTagEXT = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkSetDebugUtilsObjectTagEXT = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkSetDebugUtilsObjectTagEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkSetDebugUtilsObjectTagEXT = OP_vkSetDebugUtilsObjectTagEXT;
-    stream->write(&opcode_vkSetDebugUtilsObjectTagEXT, sizeof(uint32_t));
-    stream->write(&packetSize_vkSetDebugUtilsObjectTagEXT, sizeof(uint32_t));
-    uint64_t cgen_var_1389;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1389, 1);
-    stream->write((uint64_t*)&cgen_var_1389, 1 * 8);
-    marshal_VkDebugUtilsObjectTagInfoEXT(stream, (VkDebugUtilsObjectTagInfoEXT*)(local_pTagInfo));
-    AEMU_SCOPED_TRACE("vkSetDebugUtilsObjectTagEXT readParams");
-    AEMU_SCOPED_TRACE("vkSetDebugUtilsObjectTagEXT returnUnmarshal");
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkSetDebugUtilsObjectTagEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkSetDebugUtilsObjectTagEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkDebugUtilsObjectTagInfoEXT(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDebugUtilsObjectTagInfoEXT*)(local_pTagInfo), streamPtrPtr);
     VkResult vkSetDebugUtilsObjectTagEXT_VkResult_return = (VkResult)0;
     stream->read(&vkSetDebugUtilsObjectTagEXT_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkSetDebugUtilsObjectTagEXT");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkSetDebugUtilsObjectTagEXT_VkResult_return;
 }
 
 void VkEncoder::vkQueueBeginDebugUtilsLabelEXT(
     VkQueue queue,
-    const VkDebugUtilsLabelEXT* pLabelInfo)
+    const VkDebugUtilsLabelEXT* pLabelInfo,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkQueueBeginDebugUtilsLabelEXT encode");
-    mImpl->log("start vkQueueBeginDebugUtilsLabelEXT");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkQueue local_queue;
     VkDebugUtilsLabelEXT* local_pLabelInfo;
     local_queue = queue;
@@ -21203,77 +26238,91 @@
     if (pLabelInfo)
     {
         local_pLabelInfo = (VkDebugUtilsLabelEXT*)pool->alloc(sizeof(const VkDebugUtilsLabelEXT));
-        deepcopy_VkDebugUtilsLabelEXT(pool, pLabelInfo, (VkDebugUtilsLabelEXT*)(local_pLabelInfo));
+        deepcopy_VkDebugUtilsLabelEXT(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pLabelInfo, (VkDebugUtilsLabelEXT*)(local_pLabelInfo));
     }
     if (local_pLabelInfo)
     {
-        transform_tohost_VkDebugUtilsLabelEXT(mImpl->resources(), (VkDebugUtilsLabelEXT*)(local_pLabelInfo));
+        transform_tohost_VkDebugUtilsLabelEXT(sResourceTracker, (VkDebugUtilsLabelEXT*)(local_pLabelInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1390;
-        countingStream->handleMapping()->mapHandles_VkQueue_u64(&local_queue, &cgen_var_1390, 1);
-        countingStream->write((uint64_t*)&cgen_var_1390, 1 * 8);
-        marshal_VkDebugUtilsLabelEXT(countingStream, (VkDebugUtilsLabelEXT*)(local_pLabelInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkDebugUtilsLabelEXT(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDebugUtilsLabelEXT*)(local_pLabelInfo), countPtr);
     }
-    uint32_t packetSize_vkQueueBeginDebugUtilsLabelEXT = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkQueueBeginDebugUtilsLabelEXT = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkQueueBeginDebugUtilsLabelEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkQueueBeginDebugUtilsLabelEXT = OP_vkQueueBeginDebugUtilsLabelEXT;
-    stream->write(&opcode_vkQueueBeginDebugUtilsLabelEXT, sizeof(uint32_t));
-    stream->write(&packetSize_vkQueueBeginDebugUtilsLabelEXT, sizeof(uint32_t));
-    uint64_t cgen_var_1391;
-    stream->handleMapping()->mapHandles_VkQueue_u64(&local_queue, &cgen_var_1391, 1);
-    stream->write((uint64_t*)&cgen_var_1391, 1 * 8);
-    marshal_VkDebugUtilsLabelEXT(stream, (VkDebugUtilsLabelEXT*)(local_pLabelInfo));
-    AEMU_SCOPED_TRACE("vkQueueBeginDebugUtilsLabelEXT readParams");
-    AEMU_SCOPED_TRACE("vkQueueBeginDebugUtilsLabelEXT returnUnmarshal");
-    mImpl->log("finish vkQueueBeginDebugUtilsLabelEXT");;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkQueueBeginDebugUtilsLabelEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkQueueBeginDebugUtilsLabelEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkQueue((*&local_queue));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkDebugUtilsLabelEXT(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDebugUtilsLabelEXT*)(local_pLabelInfo), streamPtrPtr);
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkQueueEndDebugUtilsLabelEXT(
-    VkQueue queue)
+    VkQueue queue,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkQueueEndDebugUtilsLabelEXT encode");
-    mImpl->log("start vkQueueEndDebugUtilsLabelEXT");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkQueue local_queue;
     local_queue = queue;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1392;
-        countingStream->handleMapping()->mapHandles_VkQueue_u64(&local_queue, &cgen_var_1392, 1);
-        countingStream->write((uint64_t*)&cgen_var_1392, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
     }
-    uint32_t packetSize_vkQueueEndDebugUtilsLabelEXT = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkQueueEndDebugUtilsLabelEXT = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkQueueEndDebugUtilsLabelEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkQueueEndDebugUtilsLabelEXT = OP_vkQueueEndDebugUtilsLabelEXT;
-    stream->write(&opcode_vkQueueEndDebugUtilsLabelEXT, sizeof(uint32_t));
-    stream->write(&packetSize_vkQueueEndDebugUtilsLabelEXT, sizeof(uint32_t));
-    uint64_t cgen_var_1393;
-    stream->handleMapping()->mapHandles_VkQueue_u64(&local_queue, &cgen_var_1393, 1);
-    stream->write((uint64_t*)&cgen_var_1393, 1 * 8);
-    AEMU_SCOPED_TRACE("vkQueueEndDebugUtilsLabelEXT readParams");
-    AEMU_SCOPED_TRACE("vkQueueEndDebugUtilsLabelEXT returnUnmarshal");
-    mImpl->log("finish vkQueueEndDebugUtilsLabelEXT");;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkQueueEndDebugUtilsLabelEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkQueueEndDebugUtilsLabelEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkQueue((*&local_queue));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkQueueInsertDebugUtilsLabelEXT(
     VkQueue queue,
-    const VkDebugUtilsLabelEXT* pLabelInfo)
+    const VkDebugUtilsLabelEXT* pLabelInfo,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkQueueInsertDebugUtilsLabelEXT encode");
-    mImpl->log("start vkQueueInsertDebugUtilsLabelEXT");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkQueue local_queue;
     VkDebugUtilsLabelEXT* local_pLabelInfo;
     local_queue = queue;
@@ -21281,45 +26330,52 @@
     if (pLabelInfo)
     {
         local_pLabelInfo = (VkDebugUtilsLabelEXT*)pool->alloc(sizeof(const VkDebugUtilsLabelEXT));
-        deepcopy_VkDebugUtilsLabelEXT(pool, pLabelInfo, (VkDebugUtilsLabelEXT*)(local_pLabelInfo));
+        deepcopy_VkDebugUtilsLabelEXT(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pLabelInfo, (VkDebugUtilsLabelEXT*)(local_pLabelInfo));
     }
     if (local_pLabelInfo)
     {
-        transform_tohost_VkDebugUtilsLabelEXT(mImpl->resources(), (VkDebugUtilsLabelEXT*)(local_pLabelInfo));
+        transform_tohost_VkDebugUtilsLabelEXT(sResourceTracker, (VkDebugUtilsLabelEXT*)(local_pLabelInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1394;
-        countingStream->handleMapping()->mapHandles_VkQueue_u64(&local_queue, &cgen_var_1394, 1);
-        countingStream->write((uint64_t*)&cgen_var_1394, 1 * 8);
-        marshal_VkDebugUtilsLabelEXT(countingStream, (VkDebugUtilsLabelEXT*)(local_pLabelInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkDebugUtilsLabelEXT(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDebugUtilsLabelEXT*)(local_pLabelInfo), countPtr);
     }
-    uint32_t packetSize_vkQueueInsertDebugUtilsLabelEXT = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkQueueInsertDebugUtilsLabelEXT = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkQueueInsertDebugUtilsLabelEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkQueueInsertDebugUtilsLabelEXT = OP_vkQueueInsertDebugUtilsLabelEXT;
-    stream->write(&opcode_vkQueueInsertDebugUtilsLabelEXT, sizeof(uint32_t));
-    stream->write(&packetSize_vkQueueInsertDebugUtilsLabelEXT, sizeof(uint32_t));
-    uint64_t cgen_var_1395;
-    stream->handleMapping()->mapHandles_VkQueue_u64(&local_queue, &cgen_var_1395, 1);
-    stream->write((uint64_t*)&cgen_var_1395, 1 * 8);
-    marshal_VkDebugUtilsLabelEXT(stream, (VkDebugUtilsLabelEXT*)(local_pLabelInfo));
-    AEMU_SCOPED_TRACE("vkQueueInsertDebugUtilsLabelEXT readParams");
-    AEMU_SCOPED_TRACE("vkQueueInsertDebugUtilsLabelEXT returnUnmarshal");
-    mImpl->log("finish vkQueueInsertDebugUtilsLabelEXT");;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkQueueInsertDebugUtilsLabelEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkQueueInsertDebugUtilsLabelEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkQueue((*&local_queue));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkDebugUtilsLabelEXT(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDebugUtilsLabelEXT*)(local_pLabelInfo), streamPtrPtr);
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdBeginDebugUtilsLabelEXT(
     VkCommandBuffer commandBuffer,
-    const VkDebugUtilsLabelEXT* pLabelInfo)
+    const VkDebugUtilsLabelEXT* pLabelInfo,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdBeginDebugUtilsLabelEXT encode");
-    mImpl->log("start vkCmdBeginDebugUtilsLabelEXT");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     VkDebugUtilsLabelEXT* local_pLabelInfo;
     local_commandBuffer = commandBuffer;
@@ -21327,77 +26383,93 @@
     if (pLabelInfo)
     {
         local_pLabelInfo = (VkDebugUtilsLabelEXT*)pool->alloc(sizeof(const VkDebugUtilsLabelEXT));
-        deepcopy_VkDebugUtilsLabelEXT(pool, pLabelInfo, (VkDebugUtilsLabelEXT*)(local_pLabelInfo));
+        deepcopy_VkDebugUtilsLabelEXT(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pLabelInfo, (VkDebugUtilsLabelEXT*)(local_pLabelInfo));
     }
     if (local_pLabelInfo)
     {
-        transform_tohost_VkDebugUtilsLabelEXT(mImpl->resources(), (VkDebugUtilsLabelEXT*)(local_pLabelInfo));
+        transform_tohost_VkDebugUtilsLabelEXT(sResourceTracker, (VkDebugUtilsLabelEXT*)(local_pLabelInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1396;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1396, 1);
-        countingStream->write((uint64_t*)&cgen_var_1396, 1 * 8);
-        marshal_VkDebugUtilsLabelEXT(countingStream, (VkDebugUtilsLabelEXT*)(local_pLabelInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkDebugUtilsLabelEXT(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDebugUtilsLabelEXT*)(local_pLabelInfo), countPtr);
     }
-    uint32_t packetSize_vkCmdBeginDebugUtilsLabelEXT = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdBeginDebugUtilsLabelEXT = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdBeginDebugUtilsLabelEXT -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdBeginDebugUtilsLabelEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdBeginDebugUtilsLabelEXT = OP_vkCmdBeginDebugUtilsLabelEXT;
-    stream->write(&opcode_vkCmdBeginDebugUtilsLabelEXT, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdBeginDebugUtilsLabelEXT, sizeof(uint32_t));
-    uint64_t cgen_var_1397;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1397, 1);
-    stream->write((uint64_t*)&cgen_var_1397, 1 * 8);
-    marshal_VkDebugUtilsLabelEXT(stream, (VkDebugUtilsLabelEXT*)(local_pLabelInfo));
-    AEMU_SCOPED_TRACE("vkCmdBeginDebugUtilsLabelEXT readParams");
-    AEMU_SCOPED_TRACE("vkCmdBeginDebugUtilsLabelEXT returnUnmarshal");
-    mImpl->log("finish vkCmdBeginDebugUtilsLabelEXT");;
+    memcpy(streamPtr, &opcode_vkCmdBeginDebugUtilsLabelEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdBeginDebugUtilsLabelEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    reservedmarshal_VkDebugUtilsLabelEXT(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDebugUtilsLabelEXT*)(local_pLabelInfo), streamPtrPtr);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdEndDebugUtilsLabelEXT(
-    VkCommandBuffer commandBuffer)
+    VkCommandBuffer commandBuffer,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdEndDebugUtilsLabelEXT encode");
-    mImpl->log("start vkCmdEndDebugUtilsLabelEXT");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     local_commandBuffer = commandBuffer;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1398;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1398, 1);
-        countingStream->write((uint64_t*)&cgen_var_1398, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
     }
-    uint32_t packetSize_vkCmdEndDebugUtilsLabelEXT = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdEndDebugUtilsLabelEXT = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdEndDebugUtilsLabelEXT -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdEndDebugUtilsLabelEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdEndDebugUtilsLabelEXT = OP_vkCmdEndDebugUtilsLabelEXT;
-    stream->write(&opcode_vkCmdEndDebugUtilsLabelEXT, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdEndDebugUtilsLabelEXT, sizeof(uint32_t));
-    uint64_t cgen_var_1399;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1399, 1);
-    stream->write((uint64_t*)&cgen_var_1399, 1 * 8);
-    AEMU_SCOPED_TRACE("vkCmdEndDebugUtilsLabelEXT readParams");
-    AEMU_SCOPED_TRACE("vkCmdEndDebugUtilsLabelEXT returnUnmarshal");
-    mImpl->log("finish vkCmdEndDebugUtilsLabelEXT");;
+    memcpy(streamPtr, &opcode_vkCmdEndDebugUtilsLabelEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdEndDebugUtilsLabelEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCmdInsertDebugUtilsLabelEXT(
     VkCommandBuffer commandBuffer,
-    const VkDebugUtilsLabelEXT* pLabelInfo)
+    const VkDebugUtilsLabelEXT* pLabelInfo,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdInsertDebugUtilsLabelEXT encode");
-    mImpl->log("start vkCmdInsertDebugUtilsLabelEXT");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     VkDebugUtilsLabelEXT* local_pLabelInfo;
     local_commandBuffer = commandBuffer;
@@ -21405,47 +26477,55 @@
     if (pLabelInfo)
     {
         local_pLabelInfo = (VkDebugUtilsLabelEXT*)pool->alloc(sizeof(const VkDebugUtilsLabelEXT));
-        deepcopy_VkDebugUtilsLabelEXT(pool, pLabelInfo, (VkDebugUtilsLabelEXT*)(local_pLabelInfo));
+        deepcopy_VkDebugUtilsLabelEXT(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pLabelInfo, (VkDebugUtilsLabelEXT*)(local_pLabelInfo));
     }
     if (local_pLabelInfo)
     {
-        transform_tohost_VkDebugUtilsLabelEXT(mImpl->resources(), (VkDebugUtilsLabelEXT*)(local_pLabelInfo));
+        transform_tohost_VkDebugUtilsLabelEXT(sResourceTracker, (VkDebugUtilsLabelEXT*)(local_pLabelInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1400;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1400, 1);
-        countingStream->write((uint64_t*)&cgen_var_1400, 1 * 8);
-        marshal_VkDebugUtilsLabelEXT(countingStream, (VkDebugUtilsLabelEXT*)(local_pLabelInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkDebugUtilsLabelEXT(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDebugUtilsLabelEXT*)(local_pLabelInfo), countPtr);
     }
-    uint32_t packetSize_vkCmdInsertDebugUtilsLabelEXT = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdInsertDebugUtilsLabelEXT = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdInsertDebugUtilsLabelEXT -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdInsertDebugUtilsLabelEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdInsertDebugUtilsLabelEXT = OP_vkCmdInsertDebugUtilsLabelEXT;
-    stream->write(&opcode_vkCmdInsertDebugUtilsLabelEXT, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdInsertDebugUtilsLabelEXT, sizeof(uint32_t));
-    uint64_t cgen_var_1401;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1401, 1);
-    stream->write((uint64_t*)&cgen_var_1401, 1 * 8);
-    marshal_VkDebugUtilsLabelEXT(stream, (VkDebugUtilsLabelEXT*)(local_pLabelInfo));
-    AEMU_SCOPED_TRACE("vkCmdInsertDebugUtilsLabelEXT readParams");
-    AEMU_SCOPED_TRACE("vkCmdInsertDebugUtilsLabelEXT returnUnmarshal");
-    mImpl->log("finish vkCmdInsertDebugUtilsLabelEXT");;
+    memcpy(streamPtr, &opcode_vkCmdInsertDebugUtilsLabelEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdInsertDebugUtilsLabelEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    reservedmarshal_VkDebugUtilsLabelEXT(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDebugUtilsLabelEXT*)(local_pLabelInfo), streamPtrPtr);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 VkResult VkEncoder::vkCreateDebugUtilsMessengerEXT(
     VkInstance instance,
     const VkDebugUtilsMessengerCreateInfoEXT* pCreateInfo,
     const VkAllocationCallbacks* pAllocator,
-    VkDebugUtilsMessengerEXT* pMessenger)
+    VkDebugUtilsMessengerEXT* pMessenger,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCreateDebugUtilsMessengerEXT encode");
-    mImpl->log("start vkCreateDebugUtilsMessengerEXT");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkInstance local_instance;
     VkDebugUtilsMessengerCreateInfoEXT* local_pCreateInfo;
     VkAllocationCallbacks* local_pAllocator;
@@ -21454,90 +26534,94 @@
     if (pCreateInfo)
     {
         local_pCreateInfo = (VkDebugUtilsMessengerCreateInfoEXT*)pool->alloc(sizeof(const VkDebugUtilsMessengerCreateInfoEXT));
-        deepcopy_VkDebugUtilsMessengerCreateInfoEXT(pool, pCreateInfo, (VkDebugUtilsMessengerCreateInfoEXT*)(local_pCreateInfo));
+        deepcopy_VkDebugUtilsMessengerCreateInfoEXT(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, (VkDebugUtilsMessengerCreateInfoEXT*)(local_pCreateInfo));
     }
     local_pAllocator = nullptr;
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pCreateInfo)
     {
-        transform_tohost_VkDebugUtilsMessengerCreateInfoEXT(mImpl->resources(), (VkDebugUtilsMessengerCreateInfoEXT*)(local_pCreateInfo));
+        transform_tohost_VkDebugUtilsMessengerCreateInfoEXT(sResourceTracker, (VkDebugUtilsMessengerCreateInfoEXT*)(local_pCreateInfo));
     }
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1402;
-        countingStream->handleMapping()->mapHandles_VkInstance_u64(&local_instance, &cgen_var_1402, 1);
-        countingStream->write((uint64_t*)&cgen_var_1402, 1 * 8);
-        marshal_VkDebugUtilsMessengerCreateInfoEXT(countingStream, (VkDebugUtilsMessengerCreateInfoEXT*)(local_pCreateInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkDebugUtilsMessengerCreateInfoEXT(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDebugUtilsMessengerCreateInfoEXT*)(local_pCreateInfo), countPtr);
         // WARNING PTR CHECK
-        uint64_t cgen_var_1403 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_1403);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
-        uint64_t cgen_var_1404;
-        countingStream->handleMapping()->mapHandles_VkDebugUtilsMessengerEXT_u64(pMessenger, &cgen_var_1404, 1);
-        countingStream->write((uint64_t*)&cgen_var_1404, 8);
+        uint64_t cgen_var_1;
+        *countPtr += 8;
     }
-    uint32_t packetSize_vkCreateDebugUtilsMessengerEXT = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCreateDebugUtilsMessengerEXT = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreateDebugUtilsMessengerEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCreateDebugUtilsMessengerEXT = OP_vkCreateDebugUtilsMessengerEXT;
-    stream->write(&opcode_vkCreateDebugUtilsMessengerEXT, sizeof(uint32_t));
-    stream->write(&packetSize_vkCreateDebugUtilsMessengerEXT, sizeof(uint32_t));
-    uint64_t cgen_var_1405;
-    stream->handleMapping()->mapHandles_VkInstance_u64(&local_instance, &cgen_var_1405, 1);
-    stream->write((uint64_t*)&cgen_var_1405, 1 * 8);
-    marshal_VkDebugUtilsMessengerCreateInfoEXT(stream, (VkDebugUtilsMessengerCreateInfoEXT*)(local_pCreateInfo));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreateDebugUtilsMessengerEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreateDebugUtilsMessengerEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkInstance((*&local_instance));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkDebugUtilsMessengerCreateInfoEXT(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDebugUtilsMessengerCreateInfoEXT*)(local_pCreateInfo), streamPtrPtr);
     // WARNING PTR CHECK
-    uint64_t cgen_var_1406 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_1406);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
-    uint64_t cgen_var_1407;
-    stream->handleMapping()->mapHandles_VkDebugUtilsMessengerEXT_u64(pMessenger, &cgen_var_1407, 1);
-    stream->write((uint64_t*)&cgen_var_1407, 8);
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkCreateDebugUtilsMessengerEXT readParams");
-    stream->setHandleMapping(resources->createMapping());
-    uint64_t cgen_var_1408;
-    stream->read((uint64_t*)&cgen_var_1408, 8);
-    stream->handleMapping()->mapHandles_u64_VkDebugUtilsMessengerEXT(&cgen_var_1408, (VkDebugUtilsMessengerEXT*)pMessenger, 1);
+    /* is handle, possibly out */;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = (uint64_t)((*pMessenger));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    stream->setHandleMapping(sResourceTracker->createMapping());
+    uint64_t cgen_var_3;
+    stream->read((uint64_t*)&cgen_var_3, 8);
+    stream->handleMapping()->mapHandles_u64_VkDebugUtilsMessengerEXT(&cgen_var_3, (VkDebugUtilsMessengerEXT*)pMessenger, 1);
     stream->unsetHandleMapping();
-    AEMU_SCOPED_TRACE("vkCreateDebugUtilsMessengerEXT returnUnmarshal");
     VkResult vkCreateDebugUtilsMessengerEXT_VkResult_return = (VkResult)0;
     stream->read(&vkCreateDebugUtilsMessengerEXT_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkCreateDebugUtilsMessengerEXT");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkCreateDebugUtilsMessengerEXT_VkResult_return;
 }
 
 void VkEncoder::vkDestroyDebugUtilsMessengerEXT(
     VkInstance instance,
     VkDebugUtilsMessengerEXT messenger,
-    const VkAllocationCallbacks* pAllocator)
+    const VkAllocationCallbacks* pAllocator,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkDestroyDebugUtilsMessengerEXT encode");
-    mImpl->log("start vkDestroyDebugUtilsMessengerEXT");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkInstance local_instance;
     VkDebugUtilsMessengerEXT local_messenger;
     VkAllocationCallbacks* local_pAllocator;
@@ -21547,67 +26631,75 @@
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1409;
-        countingStream->handleMapping()->mapHandles_VkInstance_u64(&local_instance, &cgen_var_1409, 1);
-        countingStream->write((uint64_t*)&cgen_var_1409, 1 * 8);
-        uint64_t cgen_var_1410;
-        countingStream->handleMapping()->mapHandles_VkDebugUtilsMessengerEXT_u64(&local_messenger, &cgen_var_1410, 1);
-        countingStream->write((uint64_t*)&cgen_var_1410, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_1411 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_1411);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
     }
-    uint32_t packetSize_vkDestroyDebugUtilsMessengerEXT = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkDestroyDebugUtilsMessengerEXT = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkDestroyDebugUtilsMessengerEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkDestroyDebugUtilsMessengerEXT = OP_vkDestroyDebugUtilsMessengerEXT;
-    stream->write(&opcode_vkDestroyDebugUtilsMessengerEXT, sizeof(uint32_t));
-    stream->write(&packetSize_vkDestroyDebugUtilsMessengerEXT, sizeof(uint32_t));
-    uint64_t cgen_var_1412;
-    stream->handleMapping()->mapHandles_VkInstance_u64(&local_instance, &cgen_var_1412, 1);
-    stream->write((uint64_t*)&cgen_var_1412, 1 * 8);
-    uint64_t cgen_var_1413;
-    stream->handleMapping()->mapHandles_VkDebugUtilsMessengerEXT_u64(&local_messenger, &cgen_var_1413, 1);
-    stream->write((uint64_t*)&cgen_var_1413, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkDestroyDebugUtilsMessengerEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkDestroyDebugUtilsMessengerEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkInstance((*&local_instance));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkDebugUtilsMessengerEXT((*&local_messenger));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_1414 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_1414);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    AEMU_SCOPED_TRACE("vkDestroyDebugUtilsMessengerEXT readParams");
-    AEMU_SCOPED_TRACE("vkDestroyDebugUtilsMessengerEXT returnUnmarshal");
-    resources->destroyMapping()->mapHandles_VkDebugUtilsMessengerEXT((VkDebugUtilsMessengerEXT*)&messenger);
-    mImpl->log("finish vkDestroyDebugUtilsMessengerEXT");;
+    sResourceTracker->destroyMapping()->mapHandles_VkDebugUtilsMessengerEXT((VkDebugUtilsMessengerEXT*)&messenger);
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkSubmitDebugUtilsMessageEXT(
     VkInstance instance,
     VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
     VkDebugUtilsMessageTypeFlagsEXT messageTypes,
-    const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData)
+    const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkSubmitDebugUtilsMessageEXT encode");
-    mImpl->log("start vkSubmitDebugUtilsMessageEXT");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkInstance local_instance;
     VkDebugUtilsMessageSeverityFlagBitsEXT local_messageSeverity;
     VkDebugUtilsMessageTypeFlagsEXT local_messageTypes;
@@ -21619,35 +26711,46 @@
     if (pCallbackData)
     {
         local_pCallbackData = (VkDebugUtilsMessengerCallbackDataEXT*)pool->alloc(sizeof(const VkDebugUtilsMessengerCallbackDataEXT));
-        deepcopy_VkDebugUtilsMessengerCallbackDataEXT(pool, pCallbackData, (VkDebugUtilsMessengerCallbackDataEXT*)(local_pCallbackData));
+        deepcopy_VkDebugUtilsMessengerCallbackDataEXT(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCallbackData, (VkDebugUtilsMessengerCallbackDataEXT*)(local_pCallbackData));
     }
     if (local_pCallbackData)
     {
-        transform_tohost_VkDebugUtilsMessengerCallbackDataEXT(mImpl->resources(), (VkDebugUtilsMessengerCallbackDataEXT*)(local_pCallbackData));
+        transform_tohost_VkDebugUtilsMessengerCallbackDataEXT(sResourceTracker, (VkDebugUtilsMessengerCallbackDataEXT*)(local_pCallbackData));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1415;
-        countingStream->handleMapping()->mapHandles_VkInstance_u64(&local_instance, &cgen_var_1415, 1);
-        countingStream->write((uint64_t*)&cgen_var_1415, 1 * 8);
-        countingStream->write((VkDebugUtilsMessageSeverityFlagBitsEXT*)&local_messageSeverity, sizeof(VkDebugUtilsMessageSeverityFlagBitsEXT));
-        countingStream->write((VkDebugUtilsMessageTypeFlagsEXT*)&local_messageTypes, sizeof(VkDebugUtilsMessageTypeFlagsEXT));
-        marshal_VkDebugUtilsMessengerCallbackDataEXT(countingStream, (VkDebugUtilsMessengerCallbackDataEXT*)(local_pCallbackData));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkDebugUtilsMessageSeverityFlagBitsEXT);
+        *countPtr += sizeof(VkDebugUtilsMessageTypeFlagsEXT);
+        count_VkDebugUtilsMessengerCallbackDataEXT(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDebugUtilsMessengerCallbackDataEXT*)(local_pCallbackData), countPtr);
     }
-    uint32_t packetSize_vkSubmitDebugUtilsMessageEXT = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkSubmitDebugUtilsMessageEXT = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkSubmitDebugUtilsMessageEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkSubmitDebugUtilsMessageEXT = OP_vkSubmitDebugUtilsMessageEXT;
-    stream->write(&opcode_vkSubmitDebugUtilsMessageEXT, sizeof(uint32_t));
-    stream->write(&packetSize_vkSubmitDebugUtilsMessageEXT, sizeof(uint32_t));
-    uint64_t cgen_var_1416;
-    stream->handleMapping()->mapHandles_VkInstance_u64(&local_instance, &cgen_var_1416, 1);
-    stream->write((uint64_t*)&cgen_var_1416, 1 * 8);
-    stream->write((VkDebugUtilsMessageSeverityFlagBitsEXT*)&local_messageSeverity, sizeof(VkDebugUtilsMessageSeverityFlagBitsEXT));
-    stream->write((VkDebugUtilsMessageTypeFlagsEXT*)&local_messageTypes, sizeof(VkDebugUtilsMessageTypeFlagsEXT));
-    marshal_VkDebugUtilsMessengerCallbackDataEXT(stream, (VkDebugUtilsMessengerCallbackDataEXT*)(local_pCallbackData));
-    AEMU_SCOPED_TRACE("vkSubmitDebugUtilsMessageEXT readParams");
-    AEMU_SCOPED_TRACE("vkSubmitDebugUtilsMessageEXT returnUnmarshal");
-    mImpl->log("finish vkSubmitDebugUtilsMessageEXT");;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkSubmitDebugUtilsMessageEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkSubmitDebugUtilsMessageEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkInstance((*&local_instance));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkDebugUtilsMessageSeverityFlagBitsEXT*)&local_messageSeverity, sizeof(VkDebugUtilsMessageSeverityFlagBitsEXT));
+    *streamPtrPtr += sizeof(VkDebugUtilsMessageSeverityFlagBitsEXT);
+    memcpy(*streamPtrPtr, (VkDebugUtilsMessageTypeFlagsEXT*)&local_messageTypes, sizeof(VkDebugUtilsMessageTypeFlagsEXT));
+    *streamPtrPtr += sizeof(VkDebugUtilsMessageTypeFlagsEXT);
+    reservedmarshal_VkDebugUtilsMessengerCallbackDataEXT(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDebugUtilsMessengerCallbackDataEXT*)(local_pCallbackData), streamPtrPtr);
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 #endif
@@ -21655,71 +26758,70 @@
 VkResult VkEncoder::vkGetAndroidHardwareBufferPropertiesANDROID(
     VkDevice device,
     const AHardwareBuffer* buffer,
-    VkAndroidHardwareBufferPropertiesANDROID* pProperties)
+    VkAndroidHardwareBufferPropertiesANDROID* pProperties,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetAndroidHardwareBufferPropertiesANDROID encode");
-    mImpl->log("start vkGetAndroidHardwareBufferPropertiesANDROID");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     AHardwareBuffer* local_buffer;
     local_device = device;
-    local_buffer = nullptr;
-    if (buffer)
+    // Avoiding deepcopy for buffer
+    local_buffer = (AHardwareBuffer*)buffer;
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        local_buffer = (AHardwareBuffer*)pool->dupArray(buffer, sizeof(const AHardwareBuffer));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(AHardwareBuffer);
+        count_VkAndroidHardwareBufferPropertiesANDROID(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAndroidHardwareBufferPropertiesANDROID*)(pProperties), countPtr);
     }
-    countingStream->rewind();
-    {
-        uint64_t cgen_var_1417;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1417, 1);
-        countingStream->write((uint64_t*)&cgen_var_1417, 1 * 8);
-        countingStream->write((AHardwareBuffer*)local_buffer, sizeof(AHardwareBuffer));
-        marshal_VkAndroidHardwareBufferPropertiesANDROID(countingStream, (VkAndroidHardwareBufferPropertiesANDROID*)(pProperties));
-    }
-    uint32_t packetSize_vkGetAndroidHardwareBufferPropertiesANDROID = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetAndroidHardwareBufferPropertiesANDROID = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetAndroidHardwareBufferPropertiesANDROID);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetAndroidHardwareBufferPropertiesANDROID = OP_vkGetAndroidHardwareBufferPropertiesANDROID;
-    stream->write(&opcode_vkGetAndroidHardwareBufferPropertiesANDROID, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetAndroidHardwareBufferPropertiesANDROID, sizeof(uint32_t));
-    uint64_t cgen_var_1418;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1418, 1);
-    stream->write((uint64_t*)&cgen_var_1418, 1 * 8);
-    stream->write((AHardwareBuffer*)local_buffer, sizeof(AHardwareBuffer));
-    marshal_VkAndroidHardwareBufferPropertiesANDROID(stream, (VkAndroidHardwareBufferPropertiesANDROID*)(pProperties));
-    AEMU_SCOPED_TRACE("vkGetAndroidHardwareBufferPropertiesANDROID readParams");
-    unmarshal_VkAndroidHardwareBufferPropertiesANDROID(stream, (VkAndroidHardwareBufferPropertiesANDROID*)(pProperties));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetAndroidHardwareBufferPropertiesANDROID, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetAndroidHardwareBufferPropertiesANDROID, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (AHardwareBuffer*)local_buffer, sizeof(AHardwareBuffer));
+    *streamPtrPtr += sizeof(AHardwareBuffer);
+    reservedmarshal_VkAndroidHardwareBufferPropertiesANDROID(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAndroidHardwareBufferPropertiesANDROID*)(pProperties), streamPtrPtr);
+    unmarshal_VkAndroidHardwareBufferPropertiesANDROID(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAndroidHardwareBufferPropertiesANDROID*)(pProperties));
     if (pProperties)
     {
-        transform_fromhost_VkAndroidHardwareBufferPropertiesANDROID(mImpl->resources(), (VkAndroidHardwareBufferPropertiesANDROID*)(pProperties));
+        transform_fromhost_VkAndroidHardwareBufferPropertiesANDROID(sResourceTracker, (VkAndroidHardwareBufferPropertiesANDROID*)(pProperties));
     }
-    AEMU_SCOPED_TRACE("vkGetAndroidHardwareBufferPropertiesANDROID returnUnmarshal");
     VkResult vkGetAndroidHardwareBufferPropertiesANDROID_VkResult_return = (VkResult)0;
     stream->read(&vkGetAndroidHardwareBufferPropertiesANDROID_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetAndroidHardwareBufferPropertiesANDROID");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetAndroidHardwareBufferPropertiesANDROID_VkResult_return;
 }
 
 VkResult VkEncoder::vkGetMemoryAndroidHardwareBufferANDROID(
     VkDevice device,
     const VkMemoryGetAndroidHardwareBufferInfoANDROID* pInfo,
-    AHardwareBuffer** pBuffer)
+    AHardwareBuffer** pBuffer,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetMemoryAndroidHardwareBufferANDROID encode");
-    mImpl->log("start vkGetMemoryAndroidHardwareBufferANDROID");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkMemoryGetAndroidHardwareBufferInfoANDROID* local_pInfo;
     local_device = device;
@@ -21727,39 +26829,45 @@
     if (pInfo)
     {
         local_pInfo = (VkMemoryGetAndroidHardwareBufferInfoANDROID*)pool->alloc(sizeof(const VkMemoryGetAndroidHardwareBufferInfoANDROID));
-        deepcopy_VkMemoryGetAndroidHardwareBufferInfoANDROID(pool, pInfo, (VkMemoryGetAndroidHardwareBufferInfoANDROID*)(local_pInfo));
+        deepcopy_VkMemoryGetAndroidHardwareBufferInfoANDROID(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pInfo, (VkMemoryGetAndroidHardwareBufferInfoANDROID*)(local_pInfo));
     }
     if (local_pInfo)
     {
-        transform_tohost_VkMemoryGetAndroidHardwareBufferInfoANDROID(mImpl->resources(), (VkMemoryGetAndroidHardwareBufferInfoANDROID*)(local_pInfo));
+        transform_tohost_VkMemoryGetAndroidHardwareBufferInfoANDROID(sResourceTracker, (VkMemoryGetAndroidHardwareBufferInfoANDROID*)(local_pInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1419;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1419, 1);
-        countingStream->write((uint64_t*)&cgen_var_1419, 1 * 8);
-        marshal_VkMemoryGetAndroidHardwareBufferInfoANDROID(countingStream, (VkMemoryGetAndroidHardwareBufferInfoANDROID*)(local_pInfo));
-        countingStream->write((AHardwareBuffer**)pBuffer, sizeof(AHardwareBuffer*));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkMemoryGetAndroidHardwareBufferInfoANDROID(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMemoryGetAndroidHardwareBufferInfoANDROID*)(local_pInfo), countPtr);
+        *countPtr += sizeof(AHardwareBuffer*);
     }
-    uint32_t packetSize_vkGetMemoryAndroidHardwareBufferANDROID = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetMemoryAndroidHardwareBufferANDROID = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetMemoryAndroidHardwareBufferANDROID);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetMemoryAndroidHardwareBufferANDROID = OP_vkGetMemoryAndroidHardwareBufferANDROID;
-    stream->write(&opcode_vkGetMemoryAndroidHardwareBufferANDROID, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetMemoryAndroidHardwareBufferANDROID, sizeof(uint32_t));
-    uint64_t cgen_var_1420;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1420, 1);
-    stream->write((uint64_t*)&cgen_var_1420, 1 * 8);
-    marshal_VkMemoryGetAndroidHardwareBufferInfoANDROID(stream, (VkMemoryGetAndroidHardwareBufferInfoANDROID*)(local_pInfo));
-    stream->write((AHardwareBuffer**)pBuffer, sizeof(AHardwareBuffer*));
-    AEMU_SCOPED_TRACE("vkGetMemoryAndroidHardwareBufferANDROID readParams");
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetMemoryAndroidHardwareBufferANDROID, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetMemoryAndroidHardwareBufferANDROID, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkMemoryGetAndroidHardwareBufferInfoANDROID(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMemoryGetAndroidHardwareBufferInfoANDROID*)(local_pInfo), streamPtrPtr);
+    memcpy(*streamPtrPtr, (AHardwareBuffer**)pBuffer, sizeof(AHardwareBuffer*));
+    *streamPtrPtr += sizeof(AHardwareBuffer*);
     stream->read((AHardwareBuffer**)pBuffer, sizeof(AHardwareBuffer*));
-    AEMU_SCOPED_TRACE("vkGetMemoryAndroidHardwareBufferANDROID returnUnmarshal");
     VkResult vkGetMemoryAndroidHardwareBufferANDROID_VkResult_return = (VkResult)0;
     stream->read(&vkGetMemoryAndroidHardwareBufferANDROID_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetMemoryAndroidHardwareBufferANDROID");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetMemoryAndroidHardwareBufferANDROID_VkResult_return;
 }
 
@@ -21772,21 +26880,21 @@
 #endif
 #ifdef VK_AMD_shader_fragment_mask
 #endif
+#ifdef VK_EXT_inline_uniform_block
+#endif
 #ifdef VK_EXT_shader_stencil_export
 #endif
 #ifdef VK_EXT_sample_locations
 void VkEncoder::vkCmdSetSampleLocationsEXT(
     VkCommandBuffer commandBuffer,
-    const VkSampleLocationsInfoEXT* pSampleLocationsInfo)
+    const VkSampleLocationsInfoEXT* pSampleLocationsInfo,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdSetSampleLocationsEXT encode");
-    mImpl->log("start vkCmdSetSampleLocationsEXT");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     VkSampleLocationsInfoEXT* local_pSampleLocationsInfo;
     local_commandBuffer = commandBuffer;
@@ -21794,76 +26902,93 @@
     if (pSampleLocationsInfo)
     {
         local_pSampleLocationsInfo = (VkSampleLocationsInfoEXT*)pool->alloc(sizeof(const VkSampleLocationsInfoEXT));
-        deepcopy_VkSampleLocationsInfoEXT(pool, pSampleLocationsInfo, (VkSampleLocationsInfoEXT*)(local_pSampleLocationsInfo));
+        deepcopy_VkSampleLocationsInfoEXT(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pSampleLocationsInfo, (VkSampleLocationsInfoEXT*)(local_pSampleLocationsInfo));
     }
     if (local_pSampleLocationsInfo)
     {
-        transform_tohost_VkSampleLocationsInfoEXT(mImpl->resources(), (VkSampleLocationsInfoEXT*)(local_pSampleLocationsInfo));
+        transform_tohost_VkSampleLocationsInfoEXT(sResourceTracker, (VkSampleLocationsInfoEXT*)(local_pSampleLocationsInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1421;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1421, 1);
-        countingStream->write((uint64_t*)&cgen_var_1421, 1 * 8);
-        marshal_VkSampleLocationsInfoEXT(countingStream, (VkSampleLocationsInfoEXT*)(local_pSampleLocationsInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkSampleLocationsInfoEXT(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSampleLocationsInfoEXT*)(local_pSampleLocationsInfo), countPtr);
     }
-    uint32_t packetSize_vkCmdSetSampleLocationsEXT = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdSetSampleLocationsEXT = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdSetSampleLocationsEXT -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdSetSampleLocationsEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdSetSampleLocationsEXT = OP_vkCmdSetSampleLocationsEXT;
-    stream->write(&opcode_vkCmdSetSampleLocationsEXT, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdSetSampleLocationsEXT, sizeof(uint32_t));
-    uint64_t cgen_var_1422;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1422, 1);
-    stream->write((uint64_t*)&cgen_var_1422, 1 * 8);
-    marshal_VkSampleLocationsInfoEXT(stream, (VkSampleLocationsInfoEXT*)(local_pSampleLocationsInfo));
-    AEMU_SCOPED_TRACE("vkCmdSetSampleLocationsEXT readParams");
-    AEMU_SCOPED_TRACE("vkCmdSetSampleLocationsEXT returnUnmarshal");
-    mImpl->log("finish vkCmdSetSampleLocationsEXT");;
+    memcpy(streamPtr, &opcode_vkCmdSetSampleLocationsEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdSetSampleLocationsEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    reservedmarshal_VkSampleLocationsInfoEXT(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSampleLocationsInfoEXT*)(local_pSampleLocationsInfo), streamPtrPtr);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkGetPhysicalDeviceMultisamplePropertiesEXT(
     VkPhysicalDevice physicalDevice,
     VkSampleCountFlagBits samples,
-    VkMultisamplePropertiesEXT* pMultisampleProperties)
+    VkMultisamplePropertiesEXT* pMultisampleProperties,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceMultisamplePropertiesEXT encode");
-    mImpl->log("start vkGetPhysicalDeviceMultisamplePropertiesEXT");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkPhysicalDevice local_physicalDevice;
     VkSampleCountFlagBits local_samples;
     local_physicalDevice = physicalDevice;
     local_samples = samples;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1423;
-        countingStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_1423, 1);
-        countingStream->write((uint64_t*)&cgen_var_1423, 1 * 8);
-        countingStream->write((VkSampleCountFlagBits*)&local_samples, sizeof(VkSampleCountFlagBits));
-        marshal_VkMultisamplePropertiesEXT(countingStream, (VkMultisamplePropertiesEXT*)(pMultisampleProperties));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkSampleCountFlagBits);
+        count_VkMultisamplePropertiesEXT(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMultisamplePropertiesEXT*)(pMultisampleProperties), countPtr);
     }
-    uint32_t packetSize_vkGetPhysicalDeviceMultisamplePropertiesEXT = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetPhysicalDeviceMultisamplePropertiesEXT = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPhysicalDeviceMultisamplePropertiesEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetPhysicalDeviceMultisamplePropertiesEXT = OP_vkGetPhysicalDeviceMultisamplePropertiesEXT;
-    stream->write(&opcode_vkGetPhysicalDeviceMultisamplePropertiesEXT, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetPhysicalDeviceMultisamplePropertiesEXT, sizeof(uint32_t));
-    uint64_t cgen_var_1424;
-    stream->handleMapping()->mapHandles_VkPhysicalDevice_u64(&local_physicalDevice, &cgen_var_1424, 1);
-    stream->write((uint64_t*)&cgen_var_1424, 1 * 8);
-    stream->write((VkSampleCountFlagBits*)&local_samples, sizeof(VkSampleCountFlagBits));
-    marshal_VkMultisamplePropertiesEXT(stream, (VkMultisamplePropertiesEXT*)(pMultisampleProperties));
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceMultisamplePropertiesEXT readParams");
-    unmarshal_VkMultisamplePropertiesEXT(stream, (VkMultisamplePropertiesEXT*)(pMultisampleProperties));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPhysicalDeviceMultisamplePropertiesEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPhysicalDeviceMultisamplePropertiesEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkSampleCountFlagBits*)&local_samples, sizeof(VkSampleCountFlagBits));
+    *streamPtrPtr += sizeof(VkSampleCountFlagBits);
+    reservedmarshal_VkMultisamplePropertiesEXT(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMultisamplePropertiesEXT*)(pMultisampleProperties), streamPtrPtr);
+    unmarshal_VkMultisamplePropertiesEXT(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMultisamplePropertiesEXT*)(pMultisampleProperties));
     if (pMultisampleProperties)
     {
-        transform_fromhost_VkMultisamplePropertiesEXT(mImpl->resources(), (VkMultisamplePropertiesEXT*)(pMultisampleProperties));
+        transform_fromhost_VkMultisamplePropertiesEXT(sResourceTracker, (VkMultisamplePropertiesEXT*)(pMultisampleProperties));
     }
-    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceMultisamplePropertiesEXT returnUnmarshal");
-    mImpl->log("finish vkGetPhysicalDeviceMultisamplePropertiesEXT");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 #endif
@@ -21875,23 +27000,83 @@
 #endif
 #ifdef VK_NV_fill_rectangle
 #endif
+#ifdef VK_NV_shader_sm_builtins
+#endif
 #ifdef VK_EXT_post_depth_coverage
 #endif
+#ifdef VK_EXT_image_drm_format_modifier
+VkResult VkEncoder::vkGetImageDrmFormatModifierPropertiesEXT(
+    VkDevice device,
+    VkImage image,
+    VkImageDrmFormatModifierPropertiesEXT* pProperties,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkImage local_image;
+    local_device = device;
+    local_image = image;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        count_VkImageDrmFormatModifierPropertiesEXT(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImageDrmFormatModifierPropertiesEXT*)(pProperties), countPtr);
+    }
+    uint32_t packetSize_vkGetImageDrmFormatModifierPropertiesEXT = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetImageDrmFormatModifierPropertiesEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkGetImageDrmFormatModifierPropertiesEXT = OP_vkGetImageDrmFormatModifierPropertiesEXT;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetImageDrmFormatModifierPropertiesEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetImageDrmFormatModifierPropertiesEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkImage((*&local_image));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkImageDrmFormatModifierPropertiesEXT(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImageDrmFormatModifierPropertiesEXT*)(pProperties), streamPtrPtr);
+    unmarshal_VkImageDrmFormatModifierPropertiesEXT(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImageDrmFormatModifierPropertiesEXT*)(pProperties));
+    if (pProperties)
+    {
+        transform_fromhost_VkImageDrmFormatModifierPropertiesEXT(sResourceTracker, (VkImageDrmFormatModifierPropertiesEXT*)(pProperties));
+    }
+    VkResult vkGetImageDrmFormatModifierPropertiesEXT_VkResult_return = (VkResult)0;
+    stream->read(&vkGetImageDrmFormatModifierPropertiesEXT_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkGetImageDrmFormatModifierPropertiesEXT_VkResult_return;
+}
+
+#endif
 #ifdef VK_EXT_validation_cache
 VkResult VkEncoder::vkCreateValidationCacheEXT(
     VkDevice device,
     const VkValidationCacheCreateInfoEXT* pCreateInfo,
     const VkAllocationCallbacks* pAllocator,
-    VkValidationCacheEXT* pValidationCache)
+    VkValidationCacheEXT* pValidationCache,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCreateValidationCacheEXT encode");
-    mImpl->log("start vkCreateValidationCacheEXT");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkValidationCacheCreateInfoEXT* local_pCreateInfo;
     VkAllocationCallbacks* local_pAllocator;
@@ -21900,90 +27085,94 @@
     if (pCreateInfo)
     {
         local_pCreateInfo = (VkValidationCacheCreateInfoEXT*)pool->alloc(sizeof(const VkValidationCacheCreateInfoEXT));
-        deepcopy_VkValidationCacheCreateInfoEXT(pool, pCreateInfo, (VkValidationCacheCreateInfoEXT*)(local_pCreateInfo));
+        deepcopy_VkValidationCacheCreateInfoEXT(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, (VkValidationCacheCreateInfoEXT*)(local_pCreateInfo));
     }
     local_pAllocator = nullptr;
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pCreateInfo)
     {
-        transform_tohost_VkValidationCacheCreateInfoEXT(mImpl->resources(), (VkValidationCacheCreateInfoEXT*)(local_pCreateInfo));
+        transform_tohost_VkValidationCacheCreateInfoEXT(sResourceTracker, (VkValidationCacheCreateInfoEXT*)(local_pCreateInfo));
     }
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1425;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1425, 1);
-        countingStream->write((uint64_t*)&cgen_var_1425, 1 * 8);
-        marshal_VkValidationCacheCreateInfoEXT(countingStream, (VkValidationCacheCreateInfoEXT*)(local_pCreateInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkValidationCacheCreateInfoEXT(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkValidationCacheCreateInfoEXT*)(local_pCreateInfo), countPtr);
         // WARNING PTR CHECK
-        uint64_t cgen_var_1426 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_1426);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
-        uint64_t cgen_var_1427;
-        countingStream->handleMapping()->mapHandles_VkValidationCacheEXT_u64(pValidationCache, &cgen_var_1427, 1);
-        countingStream->write((uint64_t*)&cgen_var_1427, 8);
+        uint64_t cgen_var_1;
+        *countPtr += 8;
     }
-    uint32_t packetSize_vkCreateValidationCacheEXT = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCreateValidationCacheEXT = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreateValidationCacheEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCreateValidationCacheEXT = OP_vkCreateValidationCacheEXT;
-    stream->write(&opcode_vkCreateValidationCacheEXT, sizeof(uint32_t));
-    stream->write(&packetSize_vkCreateValidationCacheEXT, sizeof(uint32_t));
-    uint64_t cgen_var_1428;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1428, 1);
-    stream->write((uint64_t*)&cgen_var_1428, 1 * 8);
-    marshal_VkValidationCacheCreateInfoEXT(stream, (VkValidationCacheCreateInfoEXT*)(local_pCreateInfo));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreateValidationCacheEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreateValidationCacheEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkValidationCacheCreateInfoEXT(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkValidationCacheCreateInfoEXT*)(local_pCreateInfo), streamPtrPtr);
     // WARNING PTR CHECK
-    uint64_t cgen_var_1429 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_1429);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
-    uint64_t cgen_var_1430;
-    stream->handleMapping()->mapHandles_VkValidationCacheEXT_u64(pValidationCache, &cgen_var_1430, 1);
-    stream->write((uint64_t*)&cgen_var_1430, 8);
-    stream->setHandleMapping(resources->unwrapMapping());
-    AEMU_SCOPED_TRACE("vkCreateValidationCacheEXT readParams");
-    stream->setHandleMapping(resources->createMapping());
-    uint64_t cgen_var_1431;
-    stream->read((uint64_t*)&cgen_var_1431, 8);
-    stream->handleMapping()->mapHandles_u64_VkValidationCacheEXT(&cgen_var_1431, (VkValidationCacheEXT*)pValidationCache, 1);
+    /* is handle, possibly out */;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = (uint64_t)((*pValidationCache));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    stream->setHandleMapping(sResourceTracker->createMapping());
+    uint64_t cgen_var_3;
+    stream->read((uint64_t*)&cgen_var_3, 8);
+    stream->handleMapping()->mapHandles_u64_VkValidationCacheEXT(&cgen_var_3, (VkValidationCacheEXT*)pValidationCache, 1);
     stream->unsetHandleMapping();
-    AEMU_SCOPED_TRACE("vkCreateValidationCacheEXT returnUnmarshal");
     VkResult vkCreateValidationCacheEXT_VkResult_return = (VkResult)0;
     stream->read(&vkCreateValidationCacheEXT_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkCreateValidationCacheEXT");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkCreateValidationCacheEXT_VkResult_return;
 }
 
 void VkEncoder::vkDestroyValidationCacheEXT(
     VkDevice device,
     VkValidationCacheEXT validationCache,
-    const VkAllocationCallbacks* pAllocator)
+    const VkAllocationCallbacks* pAllocator,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkDestroyValidationCacheEXT encode");
-    mImpl->log("start vkDestroyValidationCacheEXT");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkValidationCacheEXT local_validationCache;
     VkAllocationCallbacks* local_pAllocator;
@@ -21993,67 +27182,75 @@
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1432;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1432, 1);
-        countingStream->write((uint64_t*)&cgen_var_1432, 1 * 8);
-        uint64_t cgen_var_1433;
-        countingStream->handleMapping()->mapHandles_VkValidationCacheEXT_u64(&local_validationCache, &cgen_var_1433, 1);
-        countingStream->write((uint64_t*)&cgen_var_1433, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_1434 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_1434);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
     }
-    uint32_t packetSize_vkDestroyValidationCacheEXT = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkDestroyValidationCacheEXT = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkDestroyValidationCacheEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkDestroyValidationCacheEXT = OP_vkDestroyValidationCacheEXT;
-    stream->write(&opcode_vkDestroyValidationCacheEXT, sizeof(uint32_t));
-    stream->write(&packetSize_vkDestroyValidationCacheEXT, sizeof(uint32_t));
-    uint64_t cgen_var_1435;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1435, 1);
-    stream->write((uint64_t*)&cgen_var_1435, 1 * 8);
-    uint64_t cgen_var_1436;
-    stream->handleMapping()->mapHandles_VkValidationCacheEXT_u64(&local_validationCache, &cgen_var_1436, 1);
-    stream->write((uint64_t*)&cgen_var_1436, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkDestroyValidationCacheEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkDestroyValidationCacheEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkValidationCacheEXT((*&local_validationCache));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_1437 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_1437);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    AEMU_SCOPED_TRACE("vkDestroyValidationCacheEXT readParams");
-    AEMU_SCOPED_TRACE("vkDestroyValidationCacheEXT returnUnmarshal");
-    resources->destroyMapping()->mapHandles_VkValidationCacheEXT((VkValidationCacheEXT*)&validationCache);
-    mImpl->log("finish vkDestroyValidationCacheEXT");;
+    sResourceTracker->destroyMapping()->mapHandles_VkValidationCacheEXT((VkValidationCacheEXT*)&validationCache);
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 VkResult VkEncoder::vkMergeValidationCachesEXT(
     VkDevice device,
     VkValidationCacheEXT dstCache,
     uint32_t srcCacheCount,
-    const VkValidationCacheEXT* pSrcCaches)
+    const VkValidationCacheEXT* pSrcCaches,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkMergeValidationCachesEXT encode");
-    mImpl->log("start vkMergeValidationCachesEXT");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkValidationCacheEXT local_dstCache;
     uint32_t local_srcCacheCount;
@@ -22061,55 +27258,58 @@
     local_device = device;
     local_dstCache = dstCache;
     local_srcCacheCount = srcCacheCount;
-    local_pSrcCaches = nullptr;
-    if (pSrcCaches)
+    // Avoiding deepcopy for pSrcCaches
+    local_pSrcCaches = (VkValidationCacheEXT*)pSrcCaches;
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        local_pSrcCaches = (VkValidationCacheEXT*)pool->dupArray(pSrcCaches, ((srcCacheCount)) * sizeof(const VkValidationCacheEXT));
-    }
-    countingStream->rewind();
-    {
-        uint64_t cgen_var_1438;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1438, 1);
-        countingStream->write((uint64_t*)&cgen_var_1438, 1 * 8);
-        uint64_t cgen_var_1439;
-        countingStream->handleMapping()->mapHandles_VkValidationCacheEXT_u64(&local_dstCache, &cgen_var_1439, 1);
-        countingStream->write((uint64_t*)&cgen_var_1439, 1 * 8);
-        countingStream->write((uint32_t*)&local_srcCacheCount, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
         if (((srcCacheCount)))
         {
-            uint64_t* cgen_var_1440;
-            countingStream->alloc((void**)&cgen_var_1440, ((srcCacheCount)) * 8);
-            countingStream->handleMapping()->mapHandles_VkValidationCacheEXT_u64(local_pSrcCaches, cgen_var_1440, ((srcCacheCount)));
-            countingStream->write((uint64_t*)cgen_var_1440, ((srcCacheCount)) * 8);
+            *countPtr += ((srcCacheCount)) * 8;
         }
     }
-    uint32_t packetSize_vkMergeValidationCachesEXT = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkMergeValidationCachesEXT = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkMergeValidationCachesEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkMergeValidationCachesEXT = OP_vkMergeValidationCachesEXT;
-    stream->write(&opcode_vkMergeValidationCachesEXT, sizeof(uint32_t));
-    stream->write(&packetSize_vkMergeValidationCachesEXT, sizeof(uint32_t));
-    uint64_t cgen_var_1441;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1441, 1);
-    stream->write((uint64_t*)&cgen_var_1441, 1 * 8);
-    uint64_t cgen_var_1442;
-    stream->handleMapping()->mapHandles_VkValidationCacheEXT_u64(&local_dstCache, &cgen_var_1442, 1);
-    stream->write((uint64_t*)&cgen_var_1442, 1 * 8);
-    stream->write((uint32_t*)&local_srcCacheCount, sizeof(uint32_t));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkMergeValidationCachesEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkMergeValidationCachesEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkValidationCacheEXT((*&local_dstCache));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_srcCacheCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
     if (((srcCacheCount)))
     {
-        uint64_t* cgen_var_1443;
-        stream->alloc((void**)&cgen_var_1443, ((srcCacheCount)) * 8);
-        stream->handleMapping()->mapHandles_VkValidationCacheEXT_u64(local_pSrcCaches, cgen_var_1443, ((srcCacheCount)));
-        stream->write((uint64_t*)cgen_var_1443, ((srcCacheCount)) * 8);
+        uint8_t* cgen_var_2_ptr = (uint8_t*)(*streamPtrPtr);
+        for (uint32_t k = 0; k < ((srcCacheCount)); ++k)
+        {
+            uint64_t tmpval = get_host_u64_VkValidationCacheEXT(local_pSrcCaches[k]);
+            memcpy(cgen_var_2_ptr + k * 8, &tmpval, sizeof(uint64_t));
+        }
+        *streamPtrPtr += 8 * ((srcCacheCount));
     }
-    AEMU_SCOPED_TRACE("vkMergeValidationCachesEXT readParams");
-    AEMU_SCOPED_TRACE("vkMergeValidationCachesEXT returnUnmarshal");
     VkResult vkMergeValidationCachesEXT_VkResult_return = (VkResult)0;
     stream->read(&vkMergeValidationCachesEXT_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkMergeValidationCachesEXT");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkMergeValidationCachesEXT_VkResult_return;
 }
 
@@ -22117,71 +27317,79 @@
     VkDevice device,
     VkValidationCacheEXT validationCache,
     size_t* pDataSize,
-    void* pData)
+    void* pData,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetValidationCacheDataEXT encode");
-    mImpl->log("start vkGetValidationCacheDataEXT");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkValidationCacheEXT local_validationCache;
     local_device = device;
     local_validationCache = validationCache;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1444;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1444, 1);
-        countingStream->write((uint64_t*)&cgen_var_1444, 1 * 8);
-        uint64_t cgen_var_1445;
-        countingStream->handleMapping()->mapHandles_VkValidationCacheEXT_u64(&local_validationCache, &cgen_var_1445, 1);
-        countingStream->write((uint64_t*)&cgen_var_1445, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_1446 = (uint64_t)(uintptr_t)pDataSize;
-        countingStream->putBe64(cgen_var_1446);
+        *countPtr += 8;
         if (pDataSize)
         {
-            uint64_t cgen_var_1447 = (uint64_t)(*pDataSize);
-            countingStream->putBe64(cgen_var_1447);
+            *countPtr += 8;
         }
         // WARNING PTR CHECK
-        uint64_t cgen_var_1448 = (uint64_t)(uintptr_t)pData;
-        countingStream->putBe64(cgen_var_1448);
+        *countPtr += 8;
         if (pData)
         {
-            countingStream->write((void*)pData, (*(pDataSize)) * sizeof(uint8_t));
+            if (pDataSize)
+            {
+                *countPtr += (*(pDataSize)) * sizeof(uint8_t);
+            }
         }
     }
-    uint32_t packetSize_vkGetValidationCacheDataEXT = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetValidationCacheDataEXT = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetValidationCacheDataEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetValidationCacheDataEXT = OP_vkGetValidationCacheDataEXT;
-    stream->write(&opcode_vkGetValidationCacheDataEXT, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetValidationCacheDataEXT, sizeof(uint32_t));
-    uint64_t cgen_var_1449;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1449, 1);
-    stream->write((uint64_t*)&cgen_var_1449, 1 * 8);
-    uint64_t cgen_var_1450;
-    stream->handleMapping()->mapHandles_VkValidationCacheEXT_u64(&local_validationCache, &cgen_var_1450, 1);
-    stream->write((uint64_t*)&cgen_var_1450, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetValidationCacheDataEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetValidationCacheDataEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkValidationCacheEXT((*&local_validationCache));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_1451 = (uint64_t)(uintptr_t)pDataSize;
-    stream->putBe64(cgen_var_1451);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)pDataSize;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pDataSize)
     {
-        uint64_t cgen_var_1452 = (uint64_t)(*pDataSize);
-        stream->putBe64(cgen_var_1452);
+        uint64_t cgen_var_2_0 = (uint64_t)(*pDataSize);
+        memcpy((*streamPtrPtr), &cgen_var_2_0, 8);
+        android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+        *streamPtrPtr += 8;
     }
     // WARNING PTR CHECK
-    uint64_t cgen_var_1453 = (uint64_t)(uintptr_t)pData;
-    stream->putBe64(cgen_var_1453);
+    uint64_t cgen_var_3 = (uint64_t)(uintptr_t)pData;
+    memcpy((*streamPtrPtr), &cgen_var_3, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pData)
     {
-        stream->write((void*)pData, (*(pDataSize)) * sizeof(uint8_t));
+        memcpy(*streamPtrPtr, (void*)pData, (*(pDataSize)) * sizeof(uint8_t));
+        *streamPtrPtr += (*(pDataSize)) * sizeof(uint8_t);
     }
-    AEMU_SCOPED_TRACE("vkGetValidationCacheDataEXT readParams");
     // WARNING PTR CHECK
     size_t* check_pDataSize;
     check_pDataSize = (size_t*)(uintptr_t)stream->getBe64();
@@ -22204,13 +27412,15 @@
         }
         stream->read((void*)pData, (*(pDataSize)) * sizeof(uint8_t));
     }
-    AEMU_SCOPED_TRACE("vkGetValidationCacheDataEXT returnUnmarshal");
     VkResult vkGetValidationCacheDataEXT_VkResult_return = (VkResult)0;
     stream->read(&vkGetValidationCacheDataEXT_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetValidationCacheDataEXT");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetValidationCacheDataEXT_VkResult_return;
 }
 
@@ -22219,6 +27429,1330 @@
 #endif
 #ifdef VK_EXT_shader_viewport_index_layer
 #endif
+#ifdef VK_NV_shading_rate_image
+void VkEncoder::vkCmdBindShadingRateImageNV(
+    VkCommandBuffer commandBuffer,
+    VkImageView imageView,
+    VkImageLayout imageLayout,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    VkImageView local_imageView;
+    VkImageLayout local_imageLayout;
+    local_commandBuffer = commandBuffer;
+    local_imageView = imageView;
+    local_imageLayout = imageLayout;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkImageLayout);
+    }
+    uint32_t packetSize_vkCmdBindShadingRateImageNV = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdBindShadingRateImageNV -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdBindShadingRateImageNV);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdBindShadingRateImageNV = OP_vkCmdBindShadingRateImageNV;
+    memcpy(streamPtr, &opcode_vkCmdBindShadingRateImageNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdBindShadingRateImageNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkImageView((*&local_imageView));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkImageLayout*)&local_imageLayout, sizeof(VkImageLayout));
+    *streamPtrPtr += sizeof(VkImageLayout);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+void VkEncoder::vkCmdSetViewportShadingRatePaletteNV(
+    VkCommandBuffer commandBuffer,
+    uint32_t firstViewport,
+    uint32_t viewportCount,
+    const VkShadingRatePaletteNV* pShadingRatePalettes,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    uint32_t local_firstViewport;
+    uint32_t local_viewportCount;
+    VkShadingRatePaletteNV* local_pShadingRatePalettes;
+    local_commandBuffer = commandBuffer;
+    local_firstViewport = firstViewport;
+    local_viewportCount = viewportCount;
+    local_pShadingRatePalettes = nullptr;
+    if (pShadingRatePalettes)
+    {
+        local_pShadingRatePalettes = (VkShadingRatePaletteNV*)pool->alloc(((viewportCount)) * sizeof(const VkShadingRatePaletteNV));
+        for (uint32_t i = 0; i < (uint32_t)((viewportCount)); ++i)
+        {
+            deepcopy_VkShadingRatePaletteNV(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pShadingRatePalettes + i, (VkShadingRatePaletteNV*)(local_pShadingRatePalettes + i));
+        }
+    }
+    if (local_pShadingRatePalettes)
+    {
+        for (uint32_t i = 0; i < (uint32_t)((viewportCount)); ++i)
+        {
+            transform_tohost_VkShadingRatePaletteNV(sResourceTracker, (VkShadingRatePaletteNV*)(local_pShadingRatePalettes + i));
+        }
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
+        for (uint32_t i = 0; i < (uint32_t)((viewportCount)); ++i)
+        {
+            count_VkShadingRatePaletteNV(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkShadingRatePaletteNV*)(local_pShadingRatePalettes + i), countPtr);
+        }
+    }
+    uint32_t packetSize_vkCmdSetViewportShadingRatePaletteNV = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdSetViewportShadingRatePaletteNV -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdSetViewportShadingRatePaletteNV);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdSetViewportShadingRatePaletteNV = OP_vkCmdSetViewportShadingRatePaletteNV;
+    memcpy(streamPtr, &opcode_vkCmdSetViewportShadingRatePaletteNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdSetViewportShadingRatePaletteNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (uint32_t*)&local_firstViewport, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_viewportCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)((viewportCount)); ++i)
+    {
+        reservedmarshal_VkShadingRatePaletteNV(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkShadingRatePaletteNV*)(local_pShadingRatePalettes + i), streamPtrPtr);
+    }
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+void VkEncoder::vkCmdSetCoarseSampleOrderNV(
+    VkCommandBuffer commandBuffer,
+    VkCoarseSampleOrderTypeNV sampleOrderType,
+    uint32_t customSampleOrderCount,
+    const VkCoarseSampleOrderCustomNV* pCustomSampleOrders,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    VkCoarseSampleOrderTypeNV local_sampleOrderType;
+    uint32_t local_customSampleOrderCount;
+    VkCoarseSampleOrderCustomNV* local_pCustomSampleOrders;
+    local_commandBuffer = commandBuffer;
+    local_sampleOrderType = sampleOrderType;
+    local_customSampleOrderCount = customSampleOrderCount;
+    local_pCustomSampleOrders = nullptr;
+    if (pCustomSampleOrders)
+    {
+        local_pCustomSampleOrders = (VkCoarseSampleOrderCustomNV*)pool->alloc(((customSampleOrderCount)) * sizeof(const VkCoarseSampleOrderCustomNV));
+        for (uint32_t i = 0; i < (uint32_t)((customSampleOrderCount)); ++i)
+        {
+            deepcopy_VkCoarseSampleOrderCustomNV(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCustomSampleOrders + i, (VkCoarseSampleOrderCustomNV*)(local_pCustomSampleOrders + i));
+        }
+    }
+    if (local_pCustomSampleOrders)
+    {
+        for (uint32_t i = 0; i < (uint32_t)((customSampleOrderCount)); ++i)
+        {
+            transform_tohost_VkCoarseSampleOrderCustomNV(sResourceTracker, (VkCoarseSampleOrderCustomNV*)(local_pCustomSampleOrders + i));
+        }
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkCoarseSampleOrderTypeNV);
+        *countPtr += sizeof(uint32_t);
+        for (uint32_t i = 0; i < (uint32_t)((customSampleOrderCount)); ++i)
+        {
+            count_VkCoarseSampleOrderCustomNV(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkCoarseSampleOrderCustomNV*)(local_pCustomSampleOrders + i), countPtr);
+        }
+    }
+    uint32_t packetSize_vkCmdSetCoarseSampleOrderNV = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdSetCoarseSampleOrderNV -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdSetCoarseSampleOrderNV);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdSetCoarseSampleOrderNV = OP_vkCmdSetCoarseSampleOrderNV;
+    memcpy(streamPtr, &opcode_vkCmdSetCoarseSampleOrderNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdSetCoarseSampleOrderNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (VkCoarseSampleOrderTypeNV*)&local_sampleOrderType, sizeof(VkCoarseSampleOrderTypeNV));
+    *streamPtrPtr += sizeof(VkCoarseSampleOrderTypeNV);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_customSampleOrderCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)((customSampleOrderCount)); ++i)
+    {
+        reservedmarshal_VkCoarseSampleOrderCustomNV(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkCoarseSampleOrderCustomNV*)(local_pCustomSampleOrders + i), streamPtrPtr);
+    }
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+#endif
+#ifdef VK_NV_ray_tracing
+VkResult VkEncoder::vkCreateAccelerationStructureNV(
+    VkDevice device,
+    const VkAccelerationStructureCreateInfoNV* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkAccelerationStructureNV* pAccelerationStructure,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkAccelerationStructureCreateInfoNV* local_pCreateInfo;
+    VkAllocationCallbacks* local_pAllocator;
+    local_device = device;
+    local_pCreateInfo = nullptr;
+    if (pCreateInfo)
+    {
+        local_pCreateInfo = (VkAccelerationStructureCreateInfoNV*)pool->alloc(sizeof(const VkAccelerationStructureCreateInfoNV));
+        deepcopy_VkAccelerationStructureCreateInfoNV(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, (VkAccelerationStructureCreateInfoNV*)(local_pCreateInfo));
+    }
+    local_pAllocator = nullptr;
+    if (pAllocator)
+    {
+        local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+    }
+    local_pAllocator = nullptr;
+    if (local_pCreateInfo)
+    {
+        transform_tohost_VkAccelerationStructureCreateInfoNV(sResourceTracker, (VkAccelerationStructureCreateInfoNV*)(local_pCreateInfo));
+    }
+    if (local_pAllocator)
+    {
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkAccelerationStructureCreateInfoNV(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAccelerationStructureCreateInfoNV*)(local_pCreateInfo), countPtr);
+        // WARNING PTR CHECK
+        *countPtr += 8;
+        if (local_pAllocator)
+        {
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
+        }
+        uint64_t cgen_var_1;
+        *countPtr += 8;
+    }
+    uint32_t packetSize_vkCreateAccelerationStructureNV = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreateAccelerationStructureNV);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCreateAccelerationStructureNV = OP_vkCreateAccelerationStructureNV;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreateAccelerationStructureNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreateAccelerationStructureNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkAccelerationStructureCreateInfoNV(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAccelerationStructureCreateInfoNV*)(local_pCreateInfo), streamPtrPtr);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    if (local_pAllocator)
+    {
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
+    }
+    /* is handle, possibly out */;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = (uint64_t)((*pAccelerationStructure));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    stream->setHandleMapping(sResourceTracker->createMapping());
+    uint64_t cgen_var_3;
+    stream->read((uint64_t*)&cgen_var_3, 8);
+    stream->handleMapping()->mapHandles_u64_VkAccelerationStructureNV(&cgen_var_3, (VkAccelerationStructureNV*)pAccelerationStructure, 1);
+    stream->unsetHandleMapping();
+    VkResult vkCreateAccelerationStructureNV_VkResult_return = (VkResult)0;
+    stream->read(&vkCreateAccelerationStructureNV_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkCreateAccelerationStructureNV_VkResult_return;
+}
+
+void VkEncoder::vkDestroyAccelerationStructureNV(
+    VkDevice device,
+    VkAccelerationStructureNV accelerationStructure,
+    const VkAllocationCallbacks* pAllocator,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkAccelerationStructureNV local_accelerationStructure;
+    VkAllocationCallbacks* local_pAllocator;
+    local_device = device;
+    local_accelerationStructure = accelerationStructure;
+    local_pAllocator = nullptr;
+    if (pAllocator)
+    {
+        local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+    }
+    local_pAllocator = nullptr;
+    if (local_pAllocator)
+    {
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        // WARNING PTR CHECK
+        *countPtr += 8;
+        if (local_pAllocator)
+        {
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
+        }
+    }
+    uint32_t packetSize_vkDestroyAccelerationStructureNV = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkDestroyAccelerationStructureNV);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkDestroyAccelerationStructureNV = OP_vkDestroyAccelerationStructureNV;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkDestroyAccelerationStructureNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkDestroyAccelerationStructureNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkAccelerationStructureNV((*&local_accelerationStructure));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    // WARNING PTR CHECK
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    if (local_pAllocator)
+    {
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
+    }
+    sResourceTracker->destroyMapping()->mapHandles_VkAccelerationStructureNV((VkAccelerationStructureNV*)&accelerationStructure);
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+void VkEncoder::vkGetAccelerationStructureMemoryRequirementsNV(
+    VkDevice device,
+    const VkAccelerationStructureMemoryRequirementsInfoNV* pInfo,
+    VkMemoryRequirements2KHR* pMemoryRequirements,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkAccelerationStructureMemoryRequirementsInfoNV* local_pInfo;
+    local_device = device;
+    local_pInfo = nullptr;
+    if (pInfo)
+    {
+        local_pInfo = (VkAccelerationStructureMemoryRequirementsInfoNV*)pool->alloc(sizeof(const VkAccelerationStructureMemoryRequirementsInfoNV));
+        deepcopy_VkAccelerationStructureMemoryRequirementsInfoNV(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pInfo, (VkAccelerationStructureMemoryRequirementsInfoNV*)(local_pInfo));
+    }
+    if (local_pInfo)
+    {
+        transform_tohost_VkAccelerationStructureMemoryRequirementsInfoNV(sResourceTracker, (VkAccelerationStructureMemoryRequirementsInfoNV*)(local_pInfo));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkAccelerationStructureMemoryRequirementsInfoNV(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAccelerationStructureMemoryRequirementsInfoNV*)(local_pInfo), countPtr);
+        count_VkMemoryRequirements2KHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMemoryRequirements2KHR*)(pMemoryRequirements), countPtr);
+    }
+    uint32_t packetSize_vkGetAccelerationStructureMemoryRequirementsNV = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetAccelerationStructureMemoryRequirementsNV);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkGetAccelerationStructureMemoryRequirementsNV = OP_vkGetAccelerationStructureMemoryRequirementsNV;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetAccelerationStructureMemoryRequirementsNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetAccelerationStructureMemoryRequirementsNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkAccelerationStructureMemoryRequirementsInfoNV(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAccelerationStructureMemoryRequirementsInfoNV*)(local_pInfo), streamPtrPtr);
+    reservedmarshal_VkMemoryRequirements2KHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMemoryRequirements2KHR*)(pMemoryRequirements), streamPtrPtr);
+    unmarshal_VkMemoryRequirements2KHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMemoryRequirements2KHR*)(pMemoryRequirements));
+    if (pMemoryRequirements)
+    {
+        transform_fromhost_VkMemoryRequirements2KHR(sResourceTracker, (VkMemoryRequirements2KHR*)(pMemoryRequirements));
+    }
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+VkResult VkEncoder::vkBindAccelerationStructureMemoryNV(
+    VkDevice device,
+    uint32_t bindInfoCount,
+    const VkBindAccelerationStructureMemoryInfoNV* pBindInfos,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    uint32_t local_bindInfoCount;
+    VkBindAccelerationStructureMemoryInfoNV* local_pBindInfos;
+    local_device = device;
+    local_bindInfoCount = bindInfoCount;
+    local_pBindInfos = nullptr;
+    if (pBindInfos)
+    {
+        local_pBindInfos = (VkBindAccelerationStructureMemoryInfoNV*)pool->alloc(((bindInfoCount)) * sizeof(const VkBindAccelerationStructureMemoryInfoNV));
+        for (uint32_t i = 0; i < (uint32_t)((bindInfoCount)); ++i)
+        {
+            deepcopy_VkBindAccelerationStructureMemoryInfoNV(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pBindInfos + i, (VkBindAccelerationStructureMemoryInfoNV*)(local_pBindInfos + i));
+        }
+    }
+    if (local_pBindInfos)
+    {
+        for (uint32_t i = 0; i < (uint32_t)((bindInfoCount)); ++i)
+        {
+            transform_tohost_VkBindAccelerationStructureMemoryInfoNV(sResourceTracker, (VkBindAccelerationStructureMemoryInfoNV*)(local_pBindInfos + i));
+        }
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        for (uint32_t i = 0; i < (uint32_t)((bindInfoCount)); ++i)
+        {
+            count_VkBindAccelerationStructureMemoryInfoNV(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkBindAccelerationStructureMemoryInfoNV*)(local_pBindInfos + i), countPtr);
+        }
+    }
+    uint32_t packetSize_vkBindAccelerationStructureMemoryNV = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkBindAccelerationStructureMemoryNV);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkBindAccelerationStructureMemoryNV = OP_vkBindAccelerationStructureMemoryNV;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkBindAccelerationStructureMemoryNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkBindAccelerationStructureMemoryNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_bindInfoCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)((bindInfoCount)); ++i)
+    {
+        reservedmarshal_VkBindAccelerationStructureMemoryInfoNV(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkBindAccelerationStructureMemoryInfoNV*)(local_pBindInfos + i), streamPtrPtr);
+    }
+    VkResult vkBindAccelerationStructureMemoryNV_VkResult_return = (VkResult)0;
+    stream->read(&vkBindAccelerationStructureMemoryNV_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkBindAccelerationStructureMemoryNV_VkResult_return;
+}
+
+void VkEncoder::vkCmdBuildAccelerationStructureNV(
+    VkCommandBuffer commandBuffer,
+    const VkAccelerationStructureInfoNV* pInfo,
+    VkBuffer instanceData,
+    VkDeviceSize instanceOffset,
+    VkBool32 update,
+    VkAccelerationStructureNV dst,
+    VkAccelerationStructureNV src,
+    VkBuffer scratch,
+    VkDeviceSize scratchOffset,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    VkAccelerationStructureInfoNV* local_pInfo;
+    VkBuffer local_instanceData;
+    VkDeviceSize local_instanceOffset;
+    VkBool32 local_update;
+    VkAccelerationStructureNV local_dst;
+    VkAccelerationStructureNV local_src;
+    VkBuffer local_scratch;
+    VkDeviceSize local_scratchOffset;
+    local_commandBuffer = commandBuffer;
+    local_pInfo = nullptr;
+    if (pInfo)
+    {
+        local_pInfo = (VkAccelerationStructureInfoNV*)pool->alloc(sizeof(const VkAccelerationStructureInfoNV));
+        deepcopy_VkAccelerationStructureInfoNV(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pInfo, (VkAccelerationStructureInfoNV*)(local_pInfo));
+    }
+    local_instanceData = instanceData;
+    local_instanceOffset = instanceOffset;
+    local_update = update;
+    local_dst = dst;
+    local_src = src;
+    local_scratch = scratch;
+    local_scratchOffset = scratchOffset;
+    if (local_pInfo)
+    {
+        transform_tohost_VkAccelerationStructureInfoNV(sResourceTracker, (VkAccelerationStructureInfoNV*)(local_pInfo));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkAccelerationStructureInfoNV(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAccelerationStructureInfoNV*)(local_pInfo), countPtr);
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkDeviceSize);
+        *countPtr += sizeof(VkBool32);
+        uint64_t cgen_var_2;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_3;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_4;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkDeviceSize);
+    }
+    uint32_t packetSize_vkCmdBuildAccelerationStructureNV = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdBuildAccelerationStructureNV -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdBuildAccelerationStructureNV);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdBuildAccelerationStructureNV = OP_vkCmdBuildAccelerationStructureNV;
+    memcpy(streamPtr, &opcode_vkCmdBuildAccelerationStructureNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdBuildAccelerationStructureNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    reservedmarshal_VkAccelerationStructureInfoNV(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAccelerationStructureInfoNV*)(local_pInfo), streamPtrPtr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkBuffer((*&local_instanceData));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkDeviceSize*)&local_instanceOffset, sizeof(VkDeviceSize));
+    *streamPtrPtr += sizeof(VkDeviceSize);
+    memcpy(*streamPtrPtr, (VkBool32*)&local_update, sizeof(VkBool32));
+    *streamPtrPtr += sizeof(VkBool32);
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkAccelerationStructureNV((*&local_dst));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = get_host_u64_VkAccelerationStructureNV((*&local_src));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_3;
+    *&cgen_var_3 = get_host_u64_VkBuffer((*&local_scratch));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_3, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkDeviceSize*)&local_scratchOffset, sizeof(VkDeviceSize));
+    *streamPtrPtr += sizeof(VkDeviceSize);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+void VkEncoder::vkCmdCopyAccelerationStructureNV(
+    VkCommandBuffer commandBuffer,
+    VkAccelerationStructureNV dst,
+    VkAccelerationStructureNV src,
+    VkCopyAccelerationStructureModeKHR mode,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    VkAccelerationStructureNV local_dst;
+    VkAccelerationStructureNV local_src;
+    VkCopyAccelerationStructureModeKHR local_mode;
+    local_commandBuffer = commandBuffer;
+    local_dst = dst;
+    local_src = src;
+    local_mode = mode;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_2;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkCopyAccelerationStructureModeKHR);
+    }
+    uint32_t packetSize_vkCmdCopyAccelerationStructureNV = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdCopyAccelerationStructureNV -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdCopyAccelerationStructureNV);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdCopyAccelerationStructureNV = OP_vkCmdCopyAccelerationStructureNV;
+    memcpy(streamPtr, &opcode_vkCmdCopyAccelerationStructureNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdCopyAccelerationStructureNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkAccelerationStructureNV((*&local_dst));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkAccelerationStructureNV((*&local_src));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkCopyAccelerationStructureModeKHR*)&local_mode, sizeof(VkCopyAccelerationStructureModeKHR));
+    *streamPtrPtr += sizeof(VkCopyAccelerationStructureModeKHR);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+void VkEncoder::vkCmdTraceRaysNV(
+    VkCommandBuffer commandBuffer,
+    VkBuffer raygenShaderBindingTableBuffer,
+    VkDeviceSize raygenShaderBindingOffset,
+    VkBuffer missShaderBindingTableBuffer,
+    VkDeviceSize missShaderBindingOffset,
+    VkDeviceSize missShaderBindingStride,
+    VkBuffer hitShaderBindingTableBuffer,
+    VkDeviceSize hitShaderBindingOffset,
+    VkDeviceSize hitShaderBindingStride,
+    VkBuffer callableShaderBindingTableBuffer,
+    VkDeviceSize callableShaderBindingOffset,
+    VkDeviceSize callableShaderBindingStride,
+    uint32_t width,
+    uint32_t height,
+    uint32_t depth,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    VkBuffer local_raygenShaderBindingTableBuffer;
+    VkDeviceSize local_raygenShaderBindingOffset;
+    VkBuffer local_missShaderBindingTableBuffer;
+    VkDeviceSize local_missShaderBindingOffset;
+    VkDeviceSize local_missShaderBindingStride;
+    VkBuffer local_hitShaderBindingTableBuffer;
+    VkDeviceSize local_hitShaderBindingOffset;
+    VkDeviceSize local_hitShaderBindingStride;
+    VkBuffer local_callableShaderBindingTableBuffer;
+    VkDeviceSize local_callableShaderBindingOffset;
+    VkDeviceSize local_callableShaderBindingStride;
+    uint32_t local_width;
+    uint32_t local_height;
+    uint32_t local_depth;
+    local_commandBuffer = commandBuffer;
+    local_raygenShaderBindingTableBuffer = raygenShaderBindingTableBuffer;
+    local_raygenShaderBindingOffset = raygenShaderBindingOffset;
+    local_missShaderBindingTableBuffer = missShaderBindingTableBuffer;
+    local_missShaderBindingOffset = missShaderBindingOffset;
+    local_missShaderBindingStride = missShaderBindingStride;
+    local_hitShaderBindingTableBuffer = hitShaderBindingTableBuffer;
+    local_hitShaderBindingOffset = hitShaderBindingOffset;
+    local_hitShaderBindingStride = hitShaderBindingStride;
+    local_callableShaderBindingTableBuffer = callableShaderBindingTableBuffer;
+    local_callableShaderBindingOffset = callableShaderBindingOffset;
+    local_callableShaderBindingStride = callableShaderBindingStride;
+    local_width = width;
+    local_height = height;
+    local_depth = depth;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkDeviceSize);
+        uint64_t cgen_var_2;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkDeviceSize);
+        *countPtr += sizeof(VkDeviceSize);
+        uint64_t cgen_var_3;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkDeviceSize);
+        *countPtr += sizeof(VkDeviceSize);
+        uint64_t cgen_var_4;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkDeviceSize);
+        *countPtr += sizeof(VkDeviceSize);
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
+    }
+    uint32_t packetSize_vkCmdTraceRaysNV = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdTraceRaysNV -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdTraceRaysNV);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdTraceRaysNV = OP_vkCmdTraceRaysNV;
+    memcpy(streamPtr, &opcode_vkCmdTraceRaysNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdTraceRaysNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkBuffer((*&local_raygenShaderBindingTableBuffer));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkDeviceSize*)&local_raygenShaderBindingOffset, sizeof(VkDeviceSize));
+    *streamPtrPtr += sizeof(VkDeviceSize);
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkBuffer((*&local_missShaderBindingTableBuffer));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkDeviceSize*)&local_missShaderBindingOffset, sizeof(VkDeviceSize));
+    *streamPtrPtr += sizeof(VkDeviceSize);
+    memcpy(*streamPtrPtr, (VkDeviceSize*)&local_missShaderBindingStride, sizeof(VkDeviceSize));
+    *streamPtrPtr += sizeof(VkDeviceSize);
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = get_host_u64_VkBuffer((*&local_hitShaderBindingTableBuffer));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkDeviceSize*)&local_hitShaderBindingOffset, sizeof(VkDeviceSize));
+    *streamPtrPtr += sizeof(VkDeviceSize);
+    memcpy(*streamPtrPtr, (VkDeviceSize*)&local_hitShaderBindingStride, sizeof(VkDeviceSize));
+    *streamPtrPtr += sizeof(VkDeviceSize);
+    uint64_t cgen_var_3;
+    *&cgen_var_3 = get_host_u64_VkBuffer((*&local_callableShaderBindingTableBuffer));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_3, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkDeviceSize*)&local_callableShaderBindingOffset, sizeof(VkDeviceSize));
+    *streamPtrPtr += sizeof(VkDeviceSize);
+    memcpy(*streamPtrPtr, (VkDeviceSize*)&local_callableShaderBindingStride, sizeof(VkDeviceSize));
+    *streamPtrPtr += sizeof(VkDeviceSize);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_width, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_height, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_depth, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+VkResult VkEncoder::vkCreateRayTracingPipelinesNV(
+    VkDevice device,
+    VkPipelineCache pipelineCache,
+    uint32_t createInfoCount,
+    const VkRayTracingPipelineCreateInfoNV* pCreateInfos,
+    const VkAllocationCallbacks* pAllocator,
+    VkPipeline* pPipelines,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkPipelineCache local_pipelineCache;
+    uint32_t local_createInfoCount;
+    VkRayTracingPipelineCreateInfoNV* local_pCreateInfos;
+    VkAllocationCallbacks* local_pAllocator;
+    local_device = device;
+    local_pipelineCache = pipelineCache;
+    local_createInfoCount = createInfoCount;
+    local_pCreateInfos = nullptr;
+    if (pCreateInfos)
+    {
+        local_pCreateInfos = (VkRayTracingPipelineCreateInfoNV*)pool->alloc(((createInfoCount)) * sizeof(const VkRayTracingPipelineCreateInfoNV));
+        for (uint32_t i = 0; i < (uint32_t)((createInfoCount)); ++i)
+        {
+            deepcopy_VkRayTracingPipelineCreateInfoNV(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfos + i, (VkRayTracingPipelineCreateInfoNV*)(local_pCreateInfos + i));
+        }
+    }
+    local_pAllocator = nullptr;
+    if (pAllocator)
+    {
+        local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+    }
+    local_pAllocator = nullptr;
+    if (local_pCreateInfos)
+    {
+        for (uint32_t i = 0; i < (uint32_t)((createInfoCount)); ++i)
+        {
+            transform_tohost_VkRayTracingPipelineCreateInfoNV(sResourceTracker, (VkRayTracingPipelineCreateInfoNV*)(local_pCreateInfos + i));
+        }
+    }
+    if (local_pAllocator)
+    {
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        for (uint32_t i = 0; i < (uint32_t)((createInfoCount)); ++i)
+        {
+            count_VkRayTracingPipelineCreateInfoNV(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkRayTracingPipelineCreateInfoNV*)(local_pCreateInfos + i), countPtr);
+        }
+        // WARNING PTR CHECK
+        *countPtr += 8;
+        if (local_pAllocator)
+        {
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
+        }
+        if (((createInfoCount)))
+        {
+            *countPtr += ((createInfoCount)) * 8;
+        }
+    }
+    uint32_t packetSize_vkCreateRayTracingPipelinesNV = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreateRayTracingPipelinesNV);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCreateRayTracingPipelinesNV = OP_vkCreateRayTracingPipelinesNV;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreateRayTracingPipelinesNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreateRayTracingPipelinesNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkPipelineCache((*&local_pipelineCache));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_createInfoCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)((createInfoCount)); ++i)
+    {
+        reservedmarshal_VkRayTracingPipelineCreateInfoNV(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkRayTracingPipelineCreateInfoNV*)(local_pCreateInfos + i), streamPtrPtr);
+    }
+    // WARNING PTR CHECK
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    if (local_pAllocator)
+    {
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
+    }
+    /* is handle, possibly out */;
+    if (((createInfoCount)))
+    {
+        uint8_t* cgen_var_3_ptr = (uint8_t*)(*streamPtrPtr);
+        for (uint32_t k = 0; k < ((createInfoCount)); ++k)
+        {
+            uint64_t tmpval = (uint64_t)(pPipelines[k]);
+            memcpy(cgen_var_3_ptr + k * 8, &tmpval, sizeof(uint64_t));
+        }
+        *streamPtrPtr += 8 * ((createInfoCount));
+    }
+    /* is handle, possibly out */;
+    if (((createInfoCount)))
+    {
+        uint64_t* cgen_var_4;
+        stream->alloc((void**)&cgen_var_4, ((createInfoCount)) * 8);
+        stream->read((uint64_t*)cgen_var_4, ((createInfoCount)) * 8);
+        stream->handleMapping()->mapHandles_u64_VkPipeline(cgen_var_4, (VkPipeline*)pPipelines, ((createInfoCount)));
+    }
+    VkResult vkCreateRayTracingPipelinesNV_VkResult_return = (VkResult)0;
+    stream->read(&vkCreateRayTracingPipelinesNV_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkCreateRayTracingPipelinesNV_VkResult_return;
+}
+
+VkResult VkEncoder::vkGetRayTracingShaderGroupHandlesKHR(
+    VkDevice device,
+    VkPipeline pipeline,
+    uint32_t firstGroup,
+    uint32_t groupCount,
+    size_t dataSize,
+    void* pData,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkPipeline local_pipeline;
+    uint32_t local_firstGroup;
+    uint32_t local_groupCount;
+    size_t local_dataSize;
+    local_device = device;
+    local_pipeline = pipeline;
+    local_firstGroup = firstGroup;
+    local_groupCount = groupCount;
+    local_dataSize = dataSize;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
+        *countPtr += 8;
+        *countPtr += ((dataSize)) * sizeof(uint8_t);
+    }
+    uint32_t packetSize_vkGetRayTracingShaderGroupHandlesKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetRayTracingShaderGroupHandlesKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkGetRayTracingShaderGroupHandlesKHR = OP_vkGetRayTracingShaderGroupHandlesKHR;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetRayTracingShaderGroupHandlesKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetRayTracingShaderGroupHandlesKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkPipeline((*&local_pipeline));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_firstGroup, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_groupCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    uint64_t cgen_var_2 = (uint64_t)local_dataSize;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    memcpy(*streamPtrPtr, (void*)pData, ((dataSize)) * sizeof(uint8_t));
+    *streamPtrPtr += ((dataSize)) * sizeof(uint8_t);
+    stream->read((void*)pData, ((dataSize)) * sizeof(uint8_t));
+    VkResult vkGetRayTracingShaderGroupHandlesKHR_VkResult_return = (VkResult)0;
+    stream->read(&vkGetRayTracingShaderGroupHandlesKHR_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkGetRayTracingShaderGroupHandlesKHR_VkResult_return;
+}
+
+VkResult VkEncoder::vkGetRayTracingShaderGroupHandlesNV(
+    VkDevice device,
+    VkPipeline pipeline,
+    uint32_t firstGroup,
+    uint32_t groupCount,
+    size_t dataSize,
+    void* pData,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkPipeline local_pipeline;
+    uint32_t local_firstGroup;
+    uint32_t local_groupCount;
+    size_t local_dataSize;
+    local_device = device;
+    local_pipeline = pipeline;
+    local_firstGroup = firstGroup;
+    local_groupCount = groupCount;
+    local_dataSize = dataSize;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
+        *countPtr += 8;
+        *countPtr += ((dataSize)) * sizeof(uint8_t);
+    }
+    uint32_t packetSize_vkGetRayTracingShaderGroupHandlesNV = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetRayTracingShaderGroupHandlesNV);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkGetRayTracingShaderGroupHandlesNV = OP_vkGetRayTracingShaderGroupHandlesNV;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetRayTracingShaderGroupHandlesNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetRayTracingShaderGroupHandlesNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkPipeline((*&local_pipeline));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_firstGroup, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_groupCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    uint64_t cgen_var_2 = (uint64_t)local_dataSize;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    memcpy(*streamPtrPtr, (void*)pData, ((dataSize)) * sizeof(uint8_t));
+    *streamPtrPtr += ((dataSize)) * sizeof(uint8_t);
+    stream->read((void*)pData, ((dataSize)) * sizeof(uint8_t));
+    VkResult vkGetRayTracingShaderGroupHandlesNV_VkResult_return = (VkResult)0;
+    stream->read(&vkGetRayTracingShaderGroupHandlesNV_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkGetRayTracingShaderGroupHandlesNV_VkResult_return;
+}
+
+VkResult VkEncoder::vkGetAccelerationStructureHandleNV(
+    VkDevice device,
+    VkAccelerationStructureNV accelerationStructure,
+    size_t dataSize,
+    void* pData,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkAccelerationStructureNV local_accelerationStructure;
+    size_t local_dataSize;
+    local_device = device;
+    local_accelerationStructure = accelerationStructure;
+    local_dataSize = dataSize;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += 8;
+        *countPtr += ((dataSize)) * sizeof(uint8_t);
+    }
+    uint32_t packetSize_vkGetAccelerationStructureHandleNV = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetAccelerationStructureHandleNV);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkGetAccelerationStructureHandleNV = OP_vkGetAccelerationStructureHandleNV;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetAccelerationStructureHandleNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetAccelerationStructureHandleNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkAccelerationStructureNV((*&local_accelerationStructure));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_2 = (uint64_t)local_dataSize;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    memcpy(*streamPtrPtr, (void*)pData, ((dataSize)) * sizeof(uint8_t));
+    *streamPtrPtr += ((dataSize)) * sizeof(uint8_t);
+    stream->read((void*)pData, ((dataSize)) * sizeof(uint8_t));
+    VkResult vkGetAccelerationStructureHandleNV_VkResult_return = (VkResult)0;
+    stream->read(&vkGetAccelerationStructureHandleNV_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkGetAccelerationStructureHandleNV_VkResult_return;
+}
+
+void VkEncoder::vkCmdWriteAccelerationStructuresPropertiesNV(
+    VkCommandBuffer commandBuffer,
+    uint32_t accelerationStructureCount,
+    const VkAccelerationStructureNV* pAccelerationStructures,
+    VkQueryType queryType,
+    VkQueryPool queryPool,
+    uint32_t firstQuery,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    uint32_t local_accelerationStructureCount;
+    VkAccelerationStructureNV* local_pAccelerationStructures;
+    VkQueryType local_queryType;
+    VkQueryPool local_queryPool;
+    uint32_t local_firstQuery;
+    local_commandBuffer = commandBuffer;
+    local_accelerationStructureCount = accelerationStructureCount;
+    // Avoiding deepcopy for pAccelerationStructures
+    local_pAccelerationStructures = (VkAccelerationStructureNV*)pAccelerationStructures;
+    local_queryType = queryType;
+    local_queryPool = queryPool;
+    local_firstQuery = firstQuery;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        if (((accelerationStructureCount)))
+        {
+            *countPtr += ((accelerationStructureCount)) * 8;
+        }
+        *countPtr += sizeof(VkQueryType);
+        uint64_t cgen_var_2;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+    }
+    uint32_t packetSize_vkCmdWriteAccelerationStructuresPropertiesNV = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdWriteAccelerationStructuresPropertiesNV -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdWriteAccelerationStructuresPropertiesNV);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdWriteAccelerationStructuresPropertiesNV = OP_vkCmdWriteAccelerationStructuresPropertiesNV;
+    memcpy(streamPtr, &opcode_vkCmdWriteAccelerationStructuresPropertiesNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdWriteAccelerationStructuresPropertiesNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (uint32_t*)&local_accelerationStructureCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    if (((accelerationStructureCount)))
+    {
+        uint8_t* cgen_var_0_ptr = (uint8_t*)(*streamPtrPtr);
+        for (uint32_t k = 0; k < ((accelerationStructureCount)); ++k)
+        {
+            uint64_t tmpval = get_host_u64_VkAccelerationStructureNV(local_pAccelerationStructures[k]);
+            memcpy(cgen_var_0_ptr + k * 8, &tmpval, sizeof(uint64_t));
+        }
+        *streamPtrPtr += 8 * ((accelerationStructureCount));
+    }
+    memcpy(*streamPtrPtr, (VkQueryType*)&local_queryType, sizeof(VkQueryType));
+    *streamPtrPtr += sizeof(VkQueryType);
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkQueryPool((*&local_queryPool));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_firstQuery, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+VkResult VkEncoder::vkCompileDeferredNV(
+    VkDevice device,
+    VkPipeline pipeline,
+    uint32_t shader,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkPipeline local_pipeline;
+    uint32_t local_shader;
+    local_device = device;
+    local_pipeline = pipeline;
+    local_shader = shader;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+    }
+    uint32_t packetSize_vkCompileDeferredNV = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCompileDeferredNV);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCompileDeferredNV = OP_vkCompileDeferredNV;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCompileDeferredNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCompileDeferredNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkPipeline((*&local_pipeline));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_shader, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    VkResult vkCompileDeferredNV_VkResult_return = (VkResult)0;
+    stream->read(&vkCompileDeferredNV_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkCompileDeferredNV_VkResult_return;
+}
+
+#endif
+#ifdef VK_NV_representative_fragment_test
+#endif
+#ifdef VK_EXT_filter_cubic
+#endif
+#ifdef VK_QCOM_render_pass_shader_resolve
+#endif
 #ifdef VK_EXT_global_priority
 #endif
 #ifdef VK_EXT_external_memory_host
@@ -22226,71 +28760,74 @@
     VkDevice device,
     VkExternalMemoryHandleTypeFlagBits handleType,
     const void* pHostPointer,
-    VkMemoryHostPointerPropertiesEXT* pMemoryHostPointerProperties)
+    VkMemoryHostPointerPropertiesEXT* pMemoryHostPointerProperties,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetMemoryHostPointerPropertiesEXT encode");
-    mImpl->log("start vkGetMemoryHostPointerPropertiesEXT");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkExternalMemoryHandleTypeFlagBits local_handleType;
     void* local_pHostPointer;
     local_device = device;
     local_handleType = handleType;
-    local_pHostPointer = nullptr;
-    if (pHostPointer)
+    // Avoiding deepcopy for pHostPointer
+    local_pHostPointer = (void*)pHostPointer;
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        local_pHostPointer = (void*)pool->dupArray(pHostPointer, sizeof(const uint8_t));
-    }
-    countingStream->rewind();
-    {
-        uint64_t cgen_var_1457;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1457, 1);
-        countingStream->write((uint64_t*)&cgen_var_1457, 1 * 8);
-        countingStream->write((VkExternalMemoryHandleTypeFlagBits*)&local_handleType, sizeof(VkExternalMemoryHandleTypeFlagBits));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkExternalMemoryHandleTypeFlagBits);
         // WARNING PTR CHECK
-        uint64_t cgen_var_1458 = (uint64_t)(uintptr_t)local_pHostPointer;
-        countingStream->putBe64(cgen_var_1458);
+        *countPtr += 8;
         if (local_pHostPointer)
         {
-            countingStream->write((void*)local_pHostPointer, sizeof(uint8_t));
+            *countPtr += sizeof(uint8_t);
         }
-        marshal_VkMemoryHostPointerPropertiesEXT(countingStream, (VkMemoryHostPointerPropertiesEXT*)(pMemoryHostPointerProperties));
+        count_VkMemoryHostPointerPropertiesEXT(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMemoryHostPointerPropertiesEXT*)(pMemoryHostPointerProperties), countPtr);
     }
-    uint32_t packetSize_vkGetMemoryHostPointerPropertiesEXT = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetMemoryHostPointerPropertiesEXT = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetMemoryHostPointerPropertiesEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetMemoryHostPointerPropertiesEXT = OP_vkGetMemoryHostPointerPropertiesEXT;
-    stream->write(&opcode_vkGetMemoryHostPointerPropertiesEXT, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetMemoryHostPointerPropertiesEXT, sizeof(uint32_t));
-    uint64_t cgen_var_1459;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1459, 1);
-    stream->write((uint64_t*)&cgen_var_1459, 1 * 8);
-    stream->write((VkExternalMemoryHandleTypeFlagBits*)&local_handleType, sizeof(VkExternalMemoryHandleTypeFlagBits));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetMemoryHostPointerPropertiesEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetMemoryHostPointerPropertiesEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkExternalMemoryHandleTypeFlagBits*)&local_handleType, sizeof(VkExternalMemoryHandleTypeFlagBits));
+    *streamPtrPtr += sizeof(VkExternalMemoryHandleTypeFlagBits);
     // WARNING PTR CHECK
-    uint64_t cgen_var_1460 = (uint64_t)(uintptr_t)local_pHostPointer;
-    stream->putBe64(cgen_var_1460);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pHostPointer;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pHostPointer)
     {
-        stream->write((void*)local_pHostPointer, sizeof(uint8_t));
+        memcpy(*streamPtrPtr, (void*)local_pHostPointer, sizeof(uint8_t));
+        *streamPtrPtr += sizeof(uint8_t);
     }
-    marshal_VkMemoryHostPointerPropertiesEXT(stream, (VkMemoryHostPointerPropertiesEXT*)(pMemoryHostPointerProperties));
-    AEMU_SCOPED_TRACE("vkGetMemoryHostPointerPropertiesEXT readParams");
-    unmarshal_VkMemoryHostPointerPropertiesEXT(stream, (VkMemoryHostPointerPropertiesEXT*)(pMemoryHostPointerProperties));
+    reservedmarshal_VkMemoryHostPointerPropertiesEXT(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMemoryHostPointerPropertiesEXT*)(pMemoryHostPointerProperties), streamPtrPtr);
+    unmarshal_VkMemoryHostPointerPropertiesEXT(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMemoryHostPointerPropertiesEXT*)(pMemoryHostPointerProperties));
     if (pMemoryHostPointerProperties)
     {
-        transform_fromhost_VkMemoryHostPointerPropertiesEXT(mImpl->resources(), (VkMemoryHostPointerPropertiesEXT*)(pMemoryHostPointerProperties));
+        transform_fromhost_VkMemoryHostPointerPropertiesEXT(sResourceTracker, (VkMemoryHostPointerPropertiesEXT*)(pMemoryHostPointerProperties));
     }
-    AEMU_SCOPED_TRACE("vkGetMemoryHostPointerPropertiesEXT returnUnmarshal");
     VkResult vkGetMemoryHostPointerPropertiesEXT_VkResult_return = (VkResult)0;
     stream->read(&vkGetMemoryHostPointerPropertiesEXT_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetMemoryHostPointerPropertiesEXT");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetMemoryHostPointerPropertiesEXT_VkResult_return;
 }
 
@@ -22301,16 +28838,14 @@
     VkPipelineStageFlagBits pipelineStage,
     VkBuffer dstBuffer,
     VkDeviceSize dstOffset,
-    uint32_t marker)
+    uint32_t marker,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdWriteBufferMarkerAMD encode");
-    mImpl->log("start vkCmdWriteBufferMarkerAMD");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     VkPipelineStageFlagBits local_pipelineStage;
     VkBuffer local_dstBuffer;
@@ -22321,162 +28856,671 @@
     local_dstBuffer = dstBuffer;
     local_dstOffset = dstOffset;
     local_marker = marker;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1461;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1461, 1);
-        countingStream->write((uint64_t*)&cgen_var_1461, 1 * 8);
-        countingStream->write((VkPipelineStageFlagBits*)&local_pipelineStage, sizeof(VkPipelineStageFlagBits));
-        uint64_t cgen_var_1462;
-        countingStream->handleMapping()->mapHandles_VkBuffer_u64(&local_dstBuffer, &cgen_var_1462, 1);
-        countingStream->write((uint64_t*)&cgen_var_1462, 1 * 8);
-        countingStream->write((VkDeviceSize*)&local_dstOffset, sizeof(VkDeviceSize));
-        countingStream->write((uint32_t*)&local_marker, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkPipelineStageFlagBits);
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkDeviceSize);
+        *countPtr += sizeof(uint32_t);
     }
-    uint32_t packetSize_vkCmdWriteBufferMarkerAMD = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdWriteBufferMarkerAMD = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdWriteBufferMarkerAMD -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdWriteBufferMarkerAMD);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdWriteBufferMarkerAMD = OP_vkCmdWriteBufferMarkerAMD;
-    stream->write(&opcode_vkCmdWriteBufferMarkerAMD, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdWriteBufferMarkerAMD, sizeof(uint32_t));
-    uint64_t cgen_var_1463;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1463, 1);
-    stream->write((uint64_t*)&cgen_var_1463, 1 * 8);
-    stream->write((VkPipelineStageFlagBits*)&local_pipelineStage, sizeof(VkPipelineStageFlagBits));
-    uint64_t cgen_var_1464;
-    stream->handleMapping()->mapHandles_VkBuffer_u64(&local_dstBuffer, &cgen_var_1464, 1);
-    stream->write((uint64_t*)&cgen_var_1464, 1 * 8);
-    stream->write((VkDeviceSize*)&local_dstOffset, sizeof(VkDeviceSize));
-    stream->write((uint32_t*)&local_marker, sizeof(uint32_t));
-    AEMU_SCOPED_TRACE("vkCmdWriteBufferMarkerAMD readParams");
-    AEMU_SCOPED_TRACE("vkCmdWriteBufferMarkerAMD returnUnmarshal");
-    mImpl->log("finish vkCmdWriteBufferMarkerAMD");;
+    memcpy(streamPtr, &opcode_vkCmdWriteBufferMarkerAMD, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdWriteBufferMarkerAMD, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (VkPipelineStageFlagBits*)&local_pipelineStage, sizeof(VkPipelineStageFlagBits));
+    *streamPtrPtr += sizeof(VkPipelineStageFlagBits);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkBuffer((*&local_dstBuffer));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkDeviceSize*)&local_dstOffset, sizeof(VkDeviceSize));
+    *streamPtrPtr += sizeof(VkDeviceSize);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_marker, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+#endif
+#ifdef VK_AMD_pipeline_compiler_control
+#endif
+#ifdef VK_EXT_calibrated_timestamps
+VkResult VkEncoder::vkGetPhysicalDeviceCalibrateableTimeDomainsEXT(
+    VkPhysicalDevice physicalDevice,
+    uint32_t* pTimeDomainCount,
+    VkTimeDomainEXT* pTimeDomains,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkPhysicalDevice local_physicalDevice;
+    local_physicalDevice = physicalDevice;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        // WARNING PTR CHECK
+        *countPtr += 8;
+        if (pTimeDomainCount)
+        {
+            *countPtr += sizeof(uint32_t);
+        }
+        // WARNING PTR CHECK
+        *countPtr += 8;
+        if (pTimeDomains)
+        {
+            if (pTimeDomainCount)
+            {
+                *countPtr += (*(pTimeDomainCount)) * sizeof(VkTimeDomainEXT);
+            }
+        }
+    }
+    uint32_t packetSize_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT = OP_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    // WARNING PTR CHECK
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)pTimeDomainCount;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    if (pTimeDomainCount)
+    {
+        memcpy(*streamPtrPtr, (uint32_t*)pTimeDomainCount, sizeof(uint32_t));
+        *streamPtrPtr += sizeof(uint32_t);
+    }
+    // WARNING PTR CHECK
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)pTimeDomains;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    if (pTimeDomains)
+    {
+        memcpy(*streamPtrPtr, (VkTimeDomainEXT*)pTimeDomains, (*(pTimeDomainCount)) * sizeof(VkTimeDomainEXT));
+        *streamPtrPtr += (*(pTimeDomainCount)) * sizeof(VkTimeDomainEXT);
+    }
+    // WARNING PTR CHECK
+    uint32_t* check_pTimeDomainCount;
+    check_pTimeDomainCount = (uint32_t*)(uintptr_t)stream->getBe64();
+    if (pTimeDomainCount)
+    {
+        if (!(check_pTimeDomainCount))
+        {
+            fprintf(stderr, "fatal: pTimeDomainCount inconsistent between guest and host\n");
+        }
+        stream->read((uint32_t*)pTimeDomainCount, sizeof(uint32_t));
+    }
+    // WARNING PTR CHECK
+    VkTimeDomainEXT* check_pTimeDomains;
+    check_pTimeDomains = (VkTimeDomainEXT*)(uintptr_t)stream->getBe64();
+    if (pTimeDomains)
+    {
+        if (!(check_pTimeDomains))
+        {
+            fprintf(stderr, "fatal: pTimeDomains inconsistent between guest and host\n");
+        }
+        stream->read((VkTimeDomainEXT*)pTimeDomains, (*(pTimeDomainCount)) * sizeof(VkTimeDomainEXT));
+    }
+    VkResult vkGetPhysicalDeviceCalibrateableTimeDomainsEXT_VkResult_return = (VkResult)0;
+    stream->read(&vkGetPhysicalDeviceCalibrateableTimeDomainsEXT_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkGetPhysicalDeviceCalibrateableTimeDomainsEXT_VkResult_return;
+}
+
+VkResult VkEncoder::vkGetCalibratedTimestampsEXT(
+    VkDevice device,
+    uint32_t timestampCount,
+    const VkCalibratedTimestampInfoEXT* pTimestampInfos,
+    uint64_t* pTimestamps,
+    uint64_t* pMaxDeviation,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    uint32_t local_timestampCount;
+    VkCalibratedTimestampInfoEXT* local_pTimestampInfos;
+    local_device = device;
+    local_timestampCount = timestampCount;
+    local_pTimestampInfos = nullptr;
+    if (pTimestampInfos)
+    {
+        local_pTimestampInfos = (VkCalibratedTimestampInfoEXT*)pool->alloc(((timestampCount)) * sizeof(const VkCalibratedTimestampInfoEXT));
+        for (uint32_t i = 0; i < (uint32_t)((timestampCount)); ++i)
+        {
+            deepcopy_VkCalibratedTimestampInfoEXT(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pTimestampInfos + i, (VkCalibratedTimestampInfoEXT*)(local_pTimestampInfos + i));
+        }
+    }
+    if (local_pTimestampInfos)
+    {
+        for (uint32_t i = 0; i < (uint32_t)((timestampCount)); ++i)
+        {
+            transform_tohost_VkCalibratedTimestampInfoEXT(sResourceTracker, (VkCalibratedTimestampInfoEXT*)(local_pTimestampInfos + i));
+        }
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        for (uint32_t i = 0; i < (uint32_t)((timestampCount)); ++i)
+        {
+            count_VkCalibratedTimestampInfoEXT(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkCalibratedTimestampInfoEXT*)(local_pTimestampInfos + i), countPtr);
+        }
+        *countPtr += ((timestampCount)) * sizeof(uint64_t);
+        *countPtr += sizeof(uint64_t);
+    }
+    uint32_t packetSize_vkGetCalibratedTimestampsEXT = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetCalibratedTimestampsEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkGetCalibratedTimestampsEXT = OP_vkGetCalibratedTimestampsEXT;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetCalibratedTimestampsEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetCalibratedTimestampsEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_timestampCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)((timestampCount)); ++i)
+    {
+        reservedmarshal_VkCalibratedTimestampInfoEXT(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkCalibratedTimestampInfoEXT*)(local_pTimestampInfos + i), streamPtrPtr);
+    }
+    memcpy(*streamPtrPtr, (uint64_t*)pTimestamps, ((timestampCount)) * sizeof(uint64_t));
+    *streamPtrPtr += ((timestampCount)) * sizeof(uint64_t);
+    memcpy(*streamPtrPtr, (uint64_t*)pMaxDeviation, sizeof(uint64_t));
+    *streamPtrPtr += sizeof(uint64_t);
+    stream->read((uint64_t*)pTimestamps, ((timestampCount)) * sizeof(uint64_t));
+    stream->read((uint64_t*)pMaxDeviation, sizeof(uint64_t));
+    VkResult vkGetCalibratedTimestampsEXT_VkResult_return = (VkResult)0;
+    stream->read(&vkGetCalibratedTimestampsEXT_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkGetCalibratedTimestampsEXT_VkResult_return;
 }
 
 #endif
 #ifdef VK_AMD_shader_core_properties
 #endif
+#ifdef VK_AMD_memory_overallocation_behavior
+#endif
 #ifdef VK_EXT_vertex_attribute_divisor
 #endif
+#ifdef VK_GGP_frame_token
+#endif
+#ifdef VK_EXT_pipeline_creation_feedback
+#endif
 #ifdef VK_NV_shader_subgroup_partitioned
 #endif
+#ifdef VK_NV_compute_shader_derivatives
+#endif
+#ifdef VK_NV_mesh_shader
+void VkEncoder::vkCmdDrawMeshTasksNV(
+    VkCommandBuffer commandBuffer,
+    uint32_t taskCount,
+    uint32_t firstTask,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    uint32_t local_taskCount;
+    uint32_t local_firstTask;
+    local_commandBuffer = commandBuffer;
+    local_taskCount = taskCount;
+    local_firstTask = firstTask;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
+    }
+    uint32_t packetSize_vkCmdDrawMeshTasksNV = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdDrawMeshTasksNV -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdDrawMeshTasksNV);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdDrawMeshTasksNV = OP_vkCmdDrawMeshTasksNV;
+    memcpy(streamPtr, &opcode_vkCmdDrawMeshTasksNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdDrawMeshTasksNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (uint32_t*)&local_taskCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_firstTask, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+void VkEncoder::vkCmdDrawMeshTasksIndirectNV(
+    VkCommandBuffer commandBuffer,
+    VkBuffer buffer,
+    VkDeviceSize offset,
+    uint32_t drawCount,
+    uint32_t stride,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    VkBuffer local_buffer;
+    VkDeviceSize local_offset;
+    uint32_t local_drawCount;
+    uint32_t local_stride;
+    local_commandBuffer = commandBuffer;
+    local_buffer = buffer;
+    local_offset = offset;
+    local_drawCount = drawCount;
+    local_stride = stride;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkDeviceSize);
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
+    }
+    uint32_t packetSize_vkCmdDrawMeshTasksIndirectNV = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdDrawMeshTasksIndirectNV -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdDrawMeshTasksIndirectNV);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdDrawMeshTasksIndirectNV = OP_vkCmdDrawMeshTasksIndirectNV;
+    memcpy(streamPtr, &opcode_vkCmdDrawMeshTasksIndirectNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdDrawMeshTasksIndirectNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkBuffer((*&local_buffer));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkDeviceSize*)&local_offset, sizeof(VkDeviceSize));
+    *streamPtrPtr += sizeof(VkDeviceSize);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_drawCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_stride, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+void VkEncoder::vkCmdDrawMeshTasksIndirectCountNV(
+    VkCommandBuffer commandBuffer,
+    VkBuffer buffer,
+    VkDeviceSize offset,
+    VkBuffer countBuffer,
+    VkDeviceSize countBufferOffset,
+    uint32_t maxDrawCount,
+    uint32_t stride,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    VkBuffer local_buffer;
+    VkDeviceSize local_offset;
+    VkBuffer local_countBuffer;
+    VkDeviceSize local_countBufferOffset;
+    uint32_t local_maxDrawCount;
+    uint32_t local_stride;
+    local_commandBuffer = commandBuffer;
+    local_buffer = buffer;
+    local_offset = offset;
+    local_countBuffer = countBuffer;
+    local_countBufferOffset = countBufferOffset;
+    local_maxDrawCount = maxDrawCount;
+    local_stride = stride;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkDeviceSize);
+        uint64_t cgen_var_2;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkDeviceSize);
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
+    }
+    uint32_t packetSize_vkCmdDrawMeshTasksIndirectCountNV = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdDrawMeshTasksIndirectCountNV -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdDrawMeshTasksIndirectCountNV);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdDrawMeshTasksIndirectCountNV = OP_vkCmdDrawMeshTasksIndirectCountNV;
+    memcpy(streamPtr, &opcode_vkCmdDrawMeshTasksIndirectCountNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdDrawMeshTasksIndirectCountNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkBuffer((*&local_buffer));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkDeviceSize*)&local_offset, sizeof(VkDeviceSize));
+    *streamPtrPtr += sizeof(VkDeviceSize);
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkBuffer((*&local_countBuffer));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkDeviceSize*)&local_countBufferOffset, sizeof(VkDeviceSize));
+    *streamPtrPtr += sizeof(VkDeviceSize);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_maxDrawCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_stride, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+#endif
+#ifdef VK_NV_fragment_shader_barycentric
+#endif
+#ifdef VK_NV_shader_image_footprint
+#endif
+#ifdef VK_NV_scissor_exclusive
+void VkEncoder::vkCmdSetExclusiveScissorNV(
+    VkCommandBuffer commandBuffer,
+    uint32_t firstExclusiveScissor,
+    uint32_t exclusiveScissorCount,
+    const VkRect2D* pExclusiveScissors,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    uint32_t local_firstExclusiveScissor;
+    uint32_t local_exclusiveScissorCount;
+    VkRect2D* local_pExclusiveScissors;
+    local_commandBuffer = commandBuffer;
+    local_firstExclusiveScissor = firstExclusiveScissor;
+    local_exclusiveScissorCount = exclusiveScissorCount;
+    local_pExclusiveScissors = nullptr;
+    if (pExclusiveScissors)
+    {
+        local_pExclusiveScissors = (VkRect2D*)pool->alloc(((exclusiveScissorCount)) * sizeof(const VkRect2D));
+        for (uint32_t i = 0; i < (uint32_t)((exclusiveScissorCount)); ++i)
+        {
+            deepcopy_VkRect2D(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pExclusiveScissors + i, (VkRect2D*)(local_pExclusiveScissors + i));
+        }
+    }
+    if (local_pExclusiveScissors)
+    {
+        for (uint32_t i = 0; i < (uint32_t)((exclusiveScissorCount)); ++i)
+        {
+            transform_tohost_VkRect2D(sResourceTracker, (VkRect2D*)(local_pExclusiveScissors + i));
+        }
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
+        for (uint32_t i = 0; i < (uint32_t)((exclusiveScissorCount)); ++i)
+        {
+            count_VkRect2D(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkRect2D*)(local_pExclusiveScissors + i), countPtr);
+        }
+    }
+    uint32_t packetSize_vkCmdSetExclusiveScissorNV = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdSetExclusiveScissorNV -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdSetExclusiveScissorNV);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdSetExclusiveScissorNV = OP_vkCmdSetExclusiveScissorNV;
+    memcpy(streamPtr, &opcode_vkCmdSetExclusiveScissorNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdSetExclusiveScissorNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (uint32_t*)&local_firstExclusiveScissor, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_exclusiveScissorCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)((exclusiveScissorCount)); ++i)
+    {
+        reservedmarshal_VkRect2D(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkRect2D*)(local_pExclusiveScissors + i), streamPtrPtr);
+    }
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+#endif
 #ifdef VK_NV_device_diagnostic_checkpoints
 void VkEncoder::vkCmdSetCheckpointNV(
     VkCommandBuffer commandBuffer,
-    const void* pCheckpointMarker)
+    const void* pCheckpointMarker,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCmdSetCheckpointNV encode");
-    mImpl->log("start vkCmdSetCheckpointNV");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     void* local_pCheckpointMarker;
     local_commandBuffer = commandBuffer;
-    local_pCheckpointMarker = nullptr;
-    if (pCheckpointMarker)
+    // Avoiding deepcopy for pCheckpointMarker
+    local_pCheckpointMarker = (void*)pCheckpointMarker;
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        local_pCheckpointMarker = (void*)pool->dupArray(pCheckpointMarker, sizeof(const uint8_t));
-    }
-    countingStream->rewind();
-    {
-        uint64_t cgen_var_1465;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1465, 1);
-        countingStream->write((uint64_t*)&cgen_var_1465, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_1466 = (uint64_t)(uintptr_t)local_pCheckpointMarker;
-        countingStream->putBe64(cgen_var_1466);
+        *countPtr += 8;
         if (local_pCheckpointMarker)
         {
-            countingStream->write((void*)local_pCheckpointMarker, sizeof(uint8_t));
+            *countPtr += sizeof(uint8_t);
         }
     }
-    uint32_t packetSize_vkCmdSetCheckpointNV = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCmdSetCheckpointNV = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdSetCheckpointNV -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdSetCheckpointNV);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCmdSetCheckpointNV = OP_vkCmdSetCheckpointNV;
-    stream->write(&opcode_vkCmdSetCheckpointNV, sizeof(uint32_t));
-    stream->write(&packetSize_vkCmdSetCheckpointNV, sizeof(uint32_t));
-    uint64_t cgen_var_1467;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1467, 1);
-    stream->write((uint64_t*)&cgen_var_1467, 1 * 8);
+    memcpy(streamPtr, &opcode_vkCmdSetCheckpointNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdSetCheckpointNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
     // WARNING PTR CHECK
-    uint64_t cgen_var_1468 = (uint64_t)(uintptr_t)local_pCheckpointMarker;
-    stream->putBe64(cgen_var_1468);
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)local_pCheckpointMarker;
+    memcpy((*streamPtrPtr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pCheckpointMarker)
     {
-        stream->write((void*)local_pCheckpointMarker, sizeof(uint8_t));
+        memcpy(*streamPtrPtr, (void*)local_pCheckpointMarker, sizeof(uint8_t));
+        *streamPtrPtr += sizeof(uint8_t);
     }
-    AEMU_SCOPED_TRACE("vkCmdSetCheckpointNV readParams");
-    AEMU_SCOPED_TRACE("vkCmdSetCheckpointNV returnUnmarshal");
-    mImpl->log("finish vkCmdSetCheckpointNV");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkGetQueueCheckpointDataNV(
     VkQueue queue,
     uint32_t* pCheckpointDataCount,
-    VkCheckpointDataNV* pCheckpointData)
+    VkCheckpointDataNV* pCheckpointData,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetQueueCheckpointDataNV encode");
-    mImpl->log("start vkGetQueueCheckpointDataNV");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkQueue local_queue;
     local_queue = queue;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1469;
-        countingStream->handleMapping()->mapHandles_VkQueue_u64(&local_queue, &cgen_var_1469, 1);
-        countingStream->write((uint64_t*)&cgen_var_1469, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_1470 = (uint64_t)(uintptr_t)pCheckpointDataCount;
-        countingStream->putBe64(cgen_var_1470);
+        *countPtr += 8;
         if (pCheckpointDataCount)
         {
-            countingStream->write((uint32_t*)pCheckpointDataCount, sizeof(uint32_t));
+            *countPtr += sizeof(uint32_t);
         }
         // WARNING PTR CHECK
-        uint64_t cgen_var_1471 = (uint64_t)(uintptr_t)pCheckpointData;
-        countingStream->putBe64(cgen_var_1471);
+        *countPtr += 8;
         if (pCheckpointData)
         {
-            for (uint32_t i = 0; i < (uint32_t)(*(pCheckpointDataCount)); ++i)
+            if (pCheckpointDataCount)
             {
-                marshal_VkCheckpointDataNV(countingStream, (VkCheckpointDataNV*)(pCheckpointData + i));
+                for (uint32_t i = 0; i < (uint32_t)(*(pCheckpointDataCount)); ++i)
+                {
+                    count_VkCheckpointDataNV(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkCheckpointDataNV*)(pCheckpointData + i), countPtr);
+                }
             }
         }
     }
-    uint32_t packetSize_vkGetQueueCheckpointDataNV = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetQueueCheckpointDataNV = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetQueueCheckpointDataNV);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetQueueCheckpointDataNV = OP_vkGetQueueCheckpointDataNV;
-    stream->write(&opcode_vkGetQueueCheckpointDataNV, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetQueueCheckpointDataNV, sizeof(uint32_t));
-    uint64_t cgen_var_1472;
-    stream->handleMapping()->mapHandles_VkQueue_u64(&local_queue, &cgen_var_1472, 1);
-    stream->write((uint64_t*)&cgen_var_1472, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetQueueCheckpointDataNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetQueueCheckpointDataNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkQueue((*&local_queue));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_1473 = (uint64_t)(uintptr_t)pCheckpointDataCount;
-    stream->putBe64(cgen_var_1473);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)pCheckpointDataCount;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pCheckpointDataCount)
     {
-        stream->write((uint32_t*)pCheckpointDataCount, sizeof(uint32_t));
+        memcpy(*streamPtrPtr, (uint32_t*)pCheckpointDataCount, sizeof(uint32_t));
+        *streamPtrPtr += sizeof(uint32_t);
     }
     // WARNING PTR CHECK
-    uint64_t cgen_var_1474 = (uint64_t)(uintptr_t)pCheckpointData;
-    stream->putBe64(cgen_var_1474);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)pCheckpointData;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pCheckpointData)
     {
         for (uint32_t i = 0; i < (uint32_t)(*(pCheckpointDataCount)); ++i)
         {
-            marshal_VkCheckpointDataNV(stream, (VkCheckpointDataNV*)(pCheckpointData + i));
+            reservedmarshal_VkCheckpointDataNV(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkCheckpointDataNV*)(pCheckpointData + i), streamPtrPtr);
         }
     }
-    AEMU_SCOPED_TRACE("vkGetQueueCheckpointDataNV readParams");
     // WARNING PTR CHECK
     uint32_t* check_pCheckpointDataCount;
     check_pCheckpointDataCount = (uint32_t*)(uintptr_t)stream->getBe64();
@@ -22497,80 +29541,3552 @@
         {
             fprintf(stderr, "fatal: pCheckpointData inconsistent between guest and host\n");
         }
-        for (uint32_t i = 0; i < (uint32_t)(*(pCheckpointDataCount)); ++i)
+        if (pCheckpointDataCount)
         {
-            unmarshal_VkCheckpointDataNV(stream, (VkCheckpointDataNV*)(pCheckpointData + i));
+            for (uint32_t i = 0; i < (uint32_t)(*(pCheckpointDataCount)); ++i)
+            {
+                unmarshal_VkCheckpointDataNV(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkCheckpointDataNV*)(pCheckpointData + i));
+            }
         }
     }
-    if (pCheckpointData)
+    if (pCheckpointDataCount)
     {
-        for (uint32_t i = 0; i < (uint32_t)(*(pCheckpointDataCount)); ++i)
+        if (pCheckpointData)
         {
-            transform_fromhost_VkCheckpointDataNV(mImpl->resources(), (VkCheckpointDataNV*)(pCheckpointData + i));
+            for (uint32_t i = 0; i < (uint32_t)(*(pCheckpointDataCount)); ++i)
+            {
+                transform_fromhost_VkCheckpointDataNV(sResourceTracker, (VkCheckpointDataNV*)(pCheckpointData + i));
+            }
         }
     }
-    AEMU_SCOPED_TRACE("vkGetQueueCheckpointDataNV returnUnmarshal");
-    mImpl->log("finish vkGetQueueCheckpointDataNV");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 #endif
-#ifdef VK_GOOGLE_address_space
+#ifdef VK_INTEL_shader_integer_functions2
+#endif
+#ifdef VK_INTEL_performance_query
+VkResult VkEncoder::vkInitializePerformanceApiINTEL(
+    VkDevice device,
+    const VkInitializePerformanceApiInfoINTEL* pInitializeInfo,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkInitializePerformanceApiInfoINTEL* local_pInitializeInfo;
+    local_device = device;
+    local_pInitializeInfo = nullptr;
+    if (pInitializeInfo)
+    {
+        local_pInitializeInfo = (VkInitializePerformanceApiInfoINTEL*)pool->alloc(sizeof(const VkInitializePerformanceApiInfoINTEL));
+        deepcopy_VkInitializePerformanceApiInfoINTEL(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pInitializeInfo, (VkInitializePerformanceApiInfoINTEL*)(local_pInitializeInfo));
+    }
+    if (local_pInitializeInfo)
+    {
+        transform_tohost_VkInitializePerformanceApiInfoINTEL(sResourceTracker, (VkInitializePerformanceApiInfoINTEL*)(local_pInitializeInfo));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkInitializePerformanceApiInfoINTEL(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkInitializePerformanceApiInfoINTEL*)(local_pInitializeInfo), countPtr);
+    }
+    uint32_t packetSize_vkInitializePerformanceApiINTEL = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkInitializePerformanceApiINTEL);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkInitializePerformanceApiINTEL = OP_vkInitializePerformanceApiINTEL;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkInitializePerformanceApiINTEL, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkInitializePerformanceApiINTEL, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkInitializePerformanceApiInfoINTEL(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkInitializePerformanceApiInfoINTEL*)(local_pInitializeInfo), streamPtrPtr);
+    VkResult vkInitializePerformanceApiINTEL_VkResult_return = (VkResult)0;
+    stream->read(&vkInitializePerformanceApiINTEL_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkInitializePerformanceApiINTEL_VkResult_return;
+}
+
+void VkEncoder::vkUninitializePerformanceApiINTEL(
+    VkDevice device,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    local_device = device;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+    }
+    uint32_t packetSize_vkUninitializePerformanceApiINTEL = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkUninitializePerformanceApiINTEL);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkUninitializePerformanceApiINTEL = OP_vkUninitializePerformanceApiINTEL;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkUninitializePerformanceApiINTEL, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkUninitializePerformanceApiINTEL, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+VkResult VkEncoder::vkCmdSetPerformanceMarkerINTEL(
+    VkCommandBuffer commandBuffer,
+    const VkPerformanceMarkerInfoINTEL* pMarkerInfo,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    VkPerformanceMarkerInfoINTEL* local_pMarkerInfo;
+    local_commandBuffer = commandBuffer;
+    local_pMarkerInfo = nullptr;
+    if (pMarkerInfo)
+    {
+        local_pMarkerInfo = (VkPerformanceMarkerInfoINTEL*)pool->alloc(sizeof(const VkPerformanceMarkerInfoINTEL));
+        deepcopy_VkPerformanceMarkerInfoINTEL(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pMarkerInfo, (VkPerformanceMarkerInfoINTEL*)(local_pMarkerInfo));
+    }
+    if (local_pMarkerInfo)
+    {
+        transform_tohost_VkPerformanceMarkerInfoINTEL(sResourceTracker, (VkPerformanceMarkerInfoINTEL*)(local_pMarkerInfo));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkPerformanceMarkerInfoINTEL(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPerformanceMarkerInfoINTEL*)(local_pMarkerInfo), countPtr);
+    }
+    uint32_t packetSize_vkCmdSetPerformanceMarkerINTEL = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdSetPerformanceMarkerINTEL -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdSetPerformanceMarkerINTEL);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdSetPerformanceMarkerINTEL = OP_vkCmdSetPerformanceMarkerINTEL;
+    memcpy(streamPtr, &opcode_vkCmdSetPerformanceMarkerINTEL, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdSetPerformanceMarkerINTEL, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    reservedmarshal_VkPerformanceMarkerInfoINTEL(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPerformanceMarkerInfoINTEL*)(local_pMarkerInfo), streamPtrPtr);
+    VkResult vkCmdSetPerformanceMarkerINTEL_VkResult_return = (VkResult)0;
+    stream->read(&vkCmdSetPerformanceMarkerINTEL_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkCmdSetPerformanceMarkerINTEL_VkResult_return;
+}
+
+VkResult VkEncoder::vkCmdSetPerformanceStreamMarkerINTEL(
+    VkCommandBuffer commandBuffer,
+    const VkPerformanceStreamMarkerInfoINTEL* pMarkerInfo,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    VkPerformanceStreamMarkerInfoINTEL* local_pMarkerInfo;
+    local_commandBuffer = commandBuffer;
+    local_pMarkerInfo = nullptr;
+    if (pMarkerInfo)
+    {
+        local_pMarkerInfo = (VkPerformanceStreamMarkerInfoINTEL*)pool->alloc(sizeof(const VkPerformanceStreamMarkerInfoINTEL));
+        deepcopy_VkPerformanceStreamMarkerInfoINTEL(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pMarkerInfo, (VkPerformanceStreamMarkerInfoINTEL*)(local_pMarkerInfo));
+    }
+    if (local_pMarkerInfo)
+    {
+        transform_tohost_VkPerformanceStreamMarkerInfoINTEL(sResourceTracker, (VkPerformanceStreamMarkerInfoINTEL*)(local_pMarkerInfo));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkPerformanceStreamMarkerInfoINTEL(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPerformanceStreamMarkerInfoINTEL*)(local_pMarkerInfo), countPtr);
+    }
+    uint32_t packetSize_vkCmdSetPerformanceStreamMarkerINTEL = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdSetPerformanceStreamMarkerINTEL -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdSetPerformanceStreamMarkerINTEL);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdSetPerformanceStreamMarkerINTEL = OP_vkCmdSetPerformanceStreamMarkerINTEL;
+    memcpy(streamPtr, &opcode_vkCmdSetPerformanceStreamMarkerINTEL, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdSetPerformanceStreamMarkerINTEL, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    reservedmarshal_VkPerformanceStreamMarkerInfoINTEL(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPerformanceStreamMarkerInfoINTEL*)(local_pMarkerInfo), streamPtrPtr);
+    VkResult vkCmdSetPerformanceStreamMarkerINTEL_VkResult_return = (VkResult)0;
+    stream->read(&vkCmdSetPerformanceStreamMarkerINTEL_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkCmdSetPerformanceStreamMarkerINTEL_VkResult_return;
+}
+
+VkResult VkEncoder::vkCmdSetPerformanceOverrideINTEL(
+    VkCommandBuffer commandBuffer,
+    const VkPerformanceOverrideInfoINTEL* pOverrideInfo,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    VkPerformanceOverrideInfoINTEL* local_pOverrideInfo;
+    local_commandBuffer = commandBuffer;
+    local_pOverrideInfo = nullptr;
+    if (pOverrideInfo)
+    {
+        local_pOverrideInfo = (VkPerformanceOverrideInfoINTEL*)pool->alloc(sizeof(const VkPerformanceOverrideInfoINTEL));
+        deepcopy_VkPerformanceOverrideInfoINTEL(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pOverrideInfo, (VkPerformanceOverrideInfoINTEL*)(local_pOverrideInfo));
+    }
+    if (local_pOverrideInfo)
+    {
+        transform_tohost_VkPerformanceOverrideInfoINTEL(sResourceTracker, (VkPerformanceOverrideInfoINTEL*)(local_pOverrideInfo));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkPerformanceOverrideInfoINTEL(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPerformanceOverrideInfoINTEL*)(local_pOverrideInfo), countPtr);
+    }
+    uint32_t packetSize_vkCmdSetPerformanceOverrideINTEL = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdSetPerformanceOverrideINTEL -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdSetPerformanceOverrideINTEL);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdSetPerformanceOverrideINTEL = OP_vkCmdSetPerformanceOverrideINTEL;
+    memcpy(streamPtr, &opcode_vkCmdSetPerformanceOverrideINTEL, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdSetPerformanceOverrideINTEL, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    reservedmarshal_VkPerformanceOverrideInfoINTEL(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPerformanceOverrideInfoINTEL*)(local_pOverrideInfo), streamPtrPtr);
+    VkResult vkCmdSetPerformanceOverrideINTEL_VkResult_return = (VkResult)0;
+    stream->read(&vkCmdSetPerformanceOverrideINTEL_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkCmdSetPerformanceOverrideINTEL_VkResult_return;
+}
+
+VkResult VkEncoder::vkAcquirePerformanceConfigurationINTEL(
+    VkDevice device,
+    const VkPerformanceConfigurationAcquireInfoINTEL* pAcquireInfo,
+    VkPerformanceConfigurationINTEL* pConfiguration,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkPerformanceConfigurationAcquireInfoINTEL* local_pAcquireInfo;
+    local_device = device;
+    local_pAcquireInfo = nullptr;
+    if (pAcquireInfo)
+    {
+        local_pAcquireInfo = (VkPerformanceConfigurationAcquireInfoINTEL*)pool->alloc(sizeof(const VkPerformanceConfigurationAcquireInfoINTEL));
+        deepcopy_VkPerformanceConfigurationAcquireInfoINTEL(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAcquireInfo, (VkPerformanceConfigurationAcquireInfoINTEL*)(local_pAcquireInfo));
+    }
+    if (local_pAcquireInfo)
+    {
+        transform_tohost_VkPerformanceConfigurationAcquireInfoINTEL(sResourceTracker, (VkPerformanceConfigurationAcquireInfoINTEL*)(local_pAcquireInfo));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkPerformanceConfigurationAcquireInfoINTEL(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPerformanceConfigurationAcquireInfoINTEL*)(local_pAcquireInfo), countPtr);
+        *countPtr += 8;
+    }
+    uint32_t packetSize_vkAcquirePerformanceConfigurationINTEL = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkAcquirePerformanceConfigurationINTEL);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkAcquirePerformanceConfigurationINTEL = OP_vkAcquirePerformanceConfigurationINTEL;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkAcquirePerformanceConfigurationINTEL, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkAcquirePerformanceConfigurationINTEL, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkPerformanceConfigurationAcquireInfoINTEL(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPerformanceConfigurationAcquireInfoINTEL*)(local_pAcquireInfo), streamPtrPtr);
+    uint64_t cgen_var_1 = (uint64_t)(*pConfiguration);
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    (*pConfiguration) = (VkPerformanceConfigurationINTEL)stream->getBe64();
+    VkResult vkAcquirePerformanceConfigurationINTEL_VkResult_return = (VkResult)0;
+    stream->read(&vkAcquirePerformanceConfigurationINTEL_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkAcquirePerformanceConfigurationINTEL_VkResult_return;
+}
+
+VkResult VkEncoder::vkReleasePerformanceConfigurationINTEL(
+    VkDevice device,
+    VkPerformanceConfigurationINTEL configuration,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkPerformanceConfigurationINTEL local_configuration;
+    local_device = device;
+    local_configuration = configuration;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += 8;
+    }
+    uint32_t packetSize_vkReleasePerformanceConfigurationINTEL = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkReleasePerformanceConfigurationINTEL);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkReleasePerformanceConfigurationINTEL = OP_vkReleasePerformanceConfigurationINTEL;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkReleasePerformanceConfigurationINTEL, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkReleasePerformanceConfigurationINTEL, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1 = (uint64_t)local_configuration;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    VkResult vkReleasePerformanceConfigurationINTEL_VkResult_return = (VkResult)0;
+    stream->read(&vkReleasePerformanceConfigurationINTEL_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkReleasePerformanceConfigurationINTEL_VkResult_return;
+}
+
+VkResult VkEncoder::vkQueueSetPerformanceConfigurationINTEL(
+    VkQueue queue,
+    VkPerformanceConfigurationINTEL configuration,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkQueue local_queue;
+    VkPerformanceConfigurationINTEL local_configuration;
+    local_queue = queue;
+    local_configuration = configuration;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += 8;
+    }
+    uint32_t packetSize_vkQueueSetPerformanceConfigurationINTEL = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkQueueSetPerformanceConfigurationINTEL);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkQueueSetPerformanceConfigurationINTEL = OP_vkQueueSetPerformanceConfigurationINTEL;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkQueueSetPerformanceConfigurationINTEL, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkQueueSetPerformanceConfigurationINTEL, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkQueue((*&local_queue));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1 = (uint64_t)local_configuration;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    VkResult vkQueueSetPerformanceConfigurationINTEL_VkResult_return = (VkResult)0;
+    stream->read(&vkQueueSetPerformanceConfigurationINTEL_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkQueueSetPerformanceConfigurationINTEL_VkResult_return;
+}
+
+VkResult VkEncoder::vkGetPerformanceParameterINTEL(
+    VkDevice device,
+    VkPerformanceParameterTypeINTEL parameter,
+    VkPerformanceValueINTEL* pValue,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkPerformanceParameterTypeINTEL local_parameter;
+    local_device = device;
+    local_parameter = parameter;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkPerformanceParameterTypeINTEL);
+        count_VkPerformanceValueINTEL(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPerformanceValueINTEL*)(pValue), countPtr);
+    }
+    uint32_t packetSize_vkGetPerformanceParameterINTEL = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPerformanceParameterINTEL);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkGetPerformanceParameterINTEL = OP_vkGetPerformanceParameterINTEL;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPerformanceParameterINTEL, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPerformanceParameterINTEL, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkPerformanceParameterTypeINTEL*)&local_parameter, sizeof(VkPerformanceParameterTypeINTEL));
+    *streamPtrPtr += sizeof(VkPerformanceParameterTypeINTEL);
+    reservedmarshal_VkPerformanceValueINTEL(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPerformanceValueINTEL*)(pValue), streamPtrPtr);
+    unmarshal_VkPerformanceValueINTEL(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPerformanceValueINTEL*)(pValue));
+    if (pValue)
+    {
+        transform_fromhost_VkPerformanceValueINTEL(sResourceTracker, (VkPerformanceValueINTEL*)(pValue));
+    }
+    VkResult vkGetPerformanceParameterINTEL_VkResult_return = (VkResult)0;
+    stream->read(&vkGetPerformanceParameterINTEL_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkGetPerformanceParameterINTEL_VkResult_return;
+}
+
+#endif
+#ifdef VK_EXT_pci_bus_info
+#endif
+#ifdef VK_AMD_display_native_hdr
+void VkEncoder::vkSetLocalDimmingAMD(
+    VkDevice device,
+    VkSwapchainKHR swapChain,
+    VkBool32 localDimmingEnable,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkSwapchainKHR local_swapChain;
+    VkBool32 local_localDimmingEnable;
+    local_device = device;
+    local_swapChain = swapChain;
+    local_localDimmingEnable = localDimmingEnable;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkBool32);
+    }
+    uint32_t packetSize_vkSetLocalDimmingAMD = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkSetLocalDimmingAMD);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkSetLocalDimmingAMD = OP_vkSetLocalDimmingAMD;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkSetLocalDimmingAMD, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkSetLocalDimmingAMD, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkSwapchainKHR((*&local_swapChain));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkBool32*)&local_localDimmingEnable, sizeof(VkBool32));
+    *streamPtrPtr += sizeof(VkBool32);
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+#endif
+#ifdef VK_FUCHSIA_imagepipe_surface
+VkResult VkEncoder::vkCreateImagePipeSurfaceFUCHSIA(
+    VkInstance instance,
+    const VkImagePipeSurfaceCreateInfoFUCHSIA* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkSurfaceKHR* pSurface,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkInstance local_instance;
+    VkImagePipeSurfaceCreateInfoFUCHSIA* local_pCreateInfo;
+    VkAllocationCallbacks* local_pAllocator;
+    local_instance = instance;
+    local_pCreateInfo = nullptr;
+    if (pCreateInfo)
+    {
+        local_pCreateInfo = (VkImagePipeSurfaceCreateInfoFUCHSIA*)pool->alloc(sizeof(const VkImagePipeSurfaceCreateInfoFUCHSIA));
+        deepcopy_VkImagePipeSurfaceCreateInfoFUCHSIA(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, (VkImagePipeSurfaceCreateInfoFUCHSIA*)(local_pCreateInfo));
+    }
+    local_pAllocator = nullptr;
+    if (pAllocator)
+    {
+        local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+    }
+    local_pAllocator = nullptr;
+    if (local_pCreateInfo)
+    {
+        transform_tohost_VkImagePipeSurfaceCreateInfoFUCHSIA(sResourceTracker, (VkImagePipeSurfaceCreateInfoFUCHSIA*)(local_pCreateInfo));
+    }
+    if (local_pAllocator)
+    {
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkImagePipeSurfaceCreateInfoFUCHSIA(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImagePipeSurfaceCreateInfoFUCHSIA*)(local_pCreateInfo), countPtr);
+        // WARNING PTR CHECK
+        *countPtr += 8;
+        if (local_pAllocator)
+        {
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
+        }
+        uint64_t cgen_var_1;
+        *countPtr += 8;
+    }
+    uint32_t packetSize_vkCreateImagePipeSurfaceFUCHSIA = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreateImagePipeSurfaceFUCHSIA);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCreateImagePipeSurfaceFUCHSIA = OP_vkCreateImagePipeSurfaceFUCHSIA;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreateImagePipeSurfaceFUCHSIA, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreateImagePipeSurfaceFUCHSIA, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkInstance((*&local_instance));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkImagePipeSurfaceCreateInfoFUCHSIA(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImagePipeSurfaceCreateInfoFUCHSIA*)(local_pCreateInfo), streamPtrPtr);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    if (local_pAllocator)
+    {
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
+    }
+    /* is handle, possibly out */;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = (uint64_t)((*pSurface));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    uint64_t cgen_var_3;
+    stream->read((uint64_t*)&cgen_var_3, 8);
+    stream->handleMapping()->mapHandles_u64_VkSurfaceKHR(&cgen_var_3, (VkSurfaceKHR*)pSurface, 1);
+    VkResult vkCreateImagePipeSurfaceFUCHSIA_VkResult_return = (VkResult)0;
+    stream->read(&vkCreateImagePipeSurfaceFUCHSIA_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkCreateImagePipeSurfaceFUCHSIA_VkResult_return;
+}
+
+#endif
+#ifdef VK_EXT_metal_surface
+VkResult VkEncoder::vkCreateMetalSurfaceEXT(
+    VkInstance instance,
+    const VkMetalSurfaceCreateInfoEXT* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkSurfaceKHR* pSurface,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkInstance local_instance;
+    VkMetalSurfaceCreateInfoEXT* local_pCreateInfo;
+    VkAllocationCallbacks* local_pAllocator;
+    local_instance = instance;
+    local_pCreateInfo = nullptr;
+    if (pCreateInfo)
+    {
+        local_pCreateInfo = (VkMetalSurfaceCreateInfoEXT*)pool->alloc(sizeof(const VkMetalSurfaceCreateInfoEXT));
+        deepcopy_VkMetalSurfaceCreateInfoEXT(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, (VkMetalSurfaceCreateInfoEXT*)(local_pCreateInfo));
+    }
+    local_pAllocator = nullptr;
+    if (pAllocator)
+    {
+        local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+    }
+    local_pAllocator = nullptr;
+    if (local_pCreateInfo)
+    {
+        transform_tohost_VkMetalSurfaceCreateInfoEXT(sResourceTracker, (VkMetalSurfaceCreateInfoEXT*)(local_pCreateInfo));
+    }
+    if (local_pAllocator)
+    {
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkMetalSurfaceCreateInfoEXT(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMetalSurfaceCreateInfoEXT*)(local_pCreateInfo), countPtr);
+        // WARNING PTR CHECK
+        *countPtr += 8;
+        if (local_pAllocator)
+        {
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
+        }
+        uint64_t cgen_var_1;
+        *countPtr += 8;
+    }
+    uint32_t packetSize_vkCreateMetalSurfaceEXT = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreateMetalSurfaceEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCreateMetalSurfaceEXT = OP_vkCreateMetalSurfaceEXT;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreateMetalSurfaceEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreateMetalSurfaceEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkInstance((*&local_instance));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkMetalSurfaceCreateInfoEXT(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMetalSurfaceCreateInfoEXT*)(local_pCreateInfo), streamPtrPtr);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    if (local_pAllocator)
+    {
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
+    }
+    /* is handle, possibly out */;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = (uint64_t)((*pSurface));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    uint64_t cgen_var_3;
+    stream->read((uint64_t*)&cgen_var_3, 8);
+    stream->handleMapping()->mapHandles_u64_VkSurfaceKHR(&cgen_var_3, (VkSurfaceKHR*)pSurface, 1);
+    VkResult vkCreateMetalSurfaceEXT_VkResult_return = (VkResult)0;
+    stream->read(&vkCreateMetalSurfaceEXT_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkCreateMetalSurfaceEXT_VkResult_return;
+}
+
+#endif
+#ifdef VK_EXT_fragment_density_map
+#endif
+#ifdef VK_EXT_scalar_block_layout
+#endif
+#ifdef VK_GOOGLE_hlsl_functionality1
+#endif
+#ifdef VK_GOOGLE_decorate_string
+#endif
+#ifdef VK_EXT_subgroup_size_control
+#endif
+#ifdef VK_AMD_shader_core_properties2
+#endif
+#ifdef VK_AMD_device_coherent_memory
+#endif
+#ifdef VK_EXT_shader_image_atomic_int64
+#endif
+#ifdef VK_EXT_memory_budget
+#endif
+#ifdef VK_EXT_memory_priority
+#endif
+#ifdef VK_NV_dedicated_allocation_image_aliasing
+#endif
+#ifdef VK_EXT_buffer_device_address
+VkDeviceAddress VkEncoder::vkGetBufferDeviceAddressEXT(
+    VkDevice device,
+    const VkBufferDeviceAddressInfo* pInfo,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkBufferDeviceAddressInfo* local_pInfo;
+    local_device = device;
+    local_pInfo = nullptr;
+    if (pInfo)
+    {
+        local_pInfo = (VkBufferDeviceAddressInfo*)pool->alloc(sizeof(const VkBufferDeviceAddressInfo));
+        deepcopy_VkBufferDeviceAddressInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pInfo, (VkBufferDeviceAddressInfo*)(local_pInfo));
+    }
+    if (local_pInfo)
+    {
+        transform_tohost_VkBufferDeviceAddressInfo(sResourceTracker, (VkBufferDeviceAddressInfo*)(local_pInfo));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkBufferDeviceAddressInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkBufferDeviceAddressInfo*)(local_pInfo), countPtr);
+    }
+    uint32_t packetSize_vkGetBufferDeviceAddressEXT = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetBufferDeviceAddressEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkGetBufferDeviceAddressEXT = OP_vkGetBufferDeviceAddressEXT;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetBufferDeviceAddressEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetBufferDeviceAddressEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkBufferDeviceAddressInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkBufferDeviceAddressInfo*)(local_pInfo), streamPtrPtr);
+    VkDeviceAddress vkGetBufferDeviceAddressEXT_VkDeviceAddress_return = (VkDeviceAddress)0;
+    stream->read(&vkGetBufferDeviceAddressEXT_VkDeviceAddress_return, sizeof(VkDeviceAddress));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkGetBufferDeviceAddressEXT_VkDeviceAddress_return;
+}
+
+#endif
+#ifdef VK_EXT_tooling_info
+VkResult VkEncoder::vkGetPhysicalDeviceToolPropertiesEXT(
+    VkPhysicalDevice physicalDevice,
+    uint32_t* pToolCount,
+    VkPhysicalDeviceToolPropertiesEXT* pToolProperties,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkPhysicalDevice local_physicalDevice;
+    local_physicalDevice = physicalDevice;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        // WARNING PTR CHECK
+        *countPtr += 8;
+        if (pToolCount)
+        {
+            *countPtr += sizeof(uint32_t);
+        }
+        // WARNING PTR CHECK
+        *countPtr += 8;
+        if (pToolProperties)
+        {
+            if (pToolCount)
+            {
+                for (uint32_t i = 0; i < (uint32_t)(*(pToolCount)); ++i)
+                {
+                    count_VkPhysicalDeviceToolPropertiesEXT(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceToolPropertiesEXT*)(pToolProperties + i), countPtr);
+                }
+            }
+        }
+    }
+    uint32_t packetSize_vkGetPhysicalDeviceToolPropertiesEXT = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPhysicalDeviceToolPropertiesEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkGetPhysicalDeviceToolPropertiesEXT = OP_vkGetPhysicalDeviceToolPropertiesEXT;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPhysicalDeviceToolPropertiesEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPhysicalDeviceToolPropertiesEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    // WARNING PTR CHECK
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)pToolCount;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    if (pToolCount)
+    {
+        memcpy(*streamPtrPtr, (uint32_t*)pToolCount, sizeof(uint32_t));
+        *streamPtrPtr += sizeof(uint32_t);
+    }
+    // WARNING PTR CHECK
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)pToolProperties;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    if (pToolProperties)
+    {
+        for (uint32_t i = 0; i < (uint32_t)(*(pToolCount)); ++i)
+        {
+            reservedmarshal_VkPhysicalDeviceToolPropertiesEXT(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceToolPropertiesEXT*)(pToolProperties + i), streamPtrPtr);
+        }
+    }
+    // WARNING PTR CHECK
+    uint32_t* check_pToolCount;
+    check_pToolCount = (uint32_t*)(uintptr_t)stream->getBe64();
+    if (pToolCount)
+    {
+        if (!(check_pToolCount))
+        {
+            fprintf(stderr, "fatal: pToolCount inconsistent between guest and host\n");
+        }
+        stream->read((uint32_t*)pToolCount, sizeof(uint32_t));
+    }
+    // WARNING PTR CHECK
+    VkPhysicalDeviceToolPropertiesEXT* check_pToolProperties;
+    check_pToolProperties = (VkPhysicalDeviceToolPropertiesEXT*)(uintptr_t)stream->getBe64();
+    if (pToolProperties)
+    {
+        if (!(check_pToolProperties))
+        {
+            fprintf(stderr, "fatal: pToolProperties inconsistent between guest and host\n");
+        }
+        if (pToolCount)
+        {
+            for (uint32_t i = 0; i < (uint32_t)(*(pToolCount)); ++i)
+            {
+                unmarshal_VkPhysicalDeviceToolPropertiesEXT(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceToolPropertiesEXT*)(pToolProperties + i));
+            }
+        }
+    }
+    if (pToolCount)
+    {
+        if (pToolProperties)
+        {
+            for (uint32_t i = 0; i < (uint32_t)(*(pToolCount)); ++i)
+            {
+                transform_fromhost_VkPhysicalDeviceToolPropertiesEXT(sResourceTracker, (VkPhysicalDeviceToolPropertiesEXT*)(pToolProperties + i));
+            }
+        }
+    }
+    VkResult vkGetPhysicalDeviceToolPropertiesEXT_VkResult_return = (VkResult)0;
+    stream->read(&vkGetPhysicalDeviceToolPropertiesEXT_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkGetPhysicalDeviceToolPropertiesEXT_VkResult_return;
+}
+
+#endif
+#ifdef VK_EXT_separate_stencil_usage
+#endif
+#ifdef VK_EXT_validation_features
+#endif
+#ifdef VK_NV_cooperative_matrix
+VkResult VkEncoder::vkGetPhysicalDeviceCooperativeMatrixPropertiesNV(
+    VkPhysicalDevice physicalDevice,
+    uint32_t* pPropertyCount,
+    VkCooperativeMatrixPropertiesNV* pProperties,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkPhysicalDevice local_physicalDevice;
+    local_physicalDevice = physicalDevice;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        // WARNING PTR CHECK
+        *countPtr += 8;
+        if (pPropertyCount)
+        {
+            *countPtr += sizeof(uint32_t);
+        }
+        // WARNING PTR CHECK
+        *countPtr += 8;
+        if (pProperties)
+        {
+            if (pPropertyCount)
+            {
+                for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+                {
+                    count_VkCooperativeMatrixPropertiesNV(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkCooperativeMatrixPropertiesNV*)(pProperties + i), countPtr);
+                }
+            }
+        }
+    }
+    uint32_t packetSize_vkGetPhysicalDeviceCooperativeMatrixPropertiesNV = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPhysicalDeviceCooperativeMatrixPropertiesNV);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkGetPhysicalDeviceCooperativeMatrixPropertiesNV = OP_vkGetPhysicalDeviceCooperativeMatrixPropertiesNV;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPhysicalDeviceCooperativeMatrixPropertiesNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPhysicalDeviceCooperativeMatrixPropertiesNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    // WARNING PTR CHECK
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)pPropertyCount;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    if (pPropertyCount)
+    {
+        memcpy(*streamPtrPtr, (uint32_t*)pPropertyCount, sizeof(uint32_t));
+        *streamPtrPtr += sizeof(uint32_t);
+    }
+    // WARNING PTR CHECK
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)pProperties;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    if (pProperties)
+    {
+        for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+        {
+            reservedmarshal_VkCooperativeMatrixPropertiesNV(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkCooperativeMatrixPropertiesNV*)(pProperties + i), streamPtrPtr);
+        }
+    }
+    // WARNING PTR CHECK
+    uint32_t* check_pPropertyCount;
+    check_pPropertyCount = (uint32_t*)(uintptr_t)stream->getBe64();
+    if (pPropertyCount)
+    {
+        if (!(check_pPropertyCount))
+        {
+            fprintf(stderr, "fatal: pPropertyCount inconsistent between guest and host\n");
+        }
+        stream->read((uint32_t*)pPropertyCount, sizeof(uint32_t));
+    }
+    // WARNING PTR CHECK
+    VkCooperativeMatrixPropertiesNV* check_pProperties;
+    check_pProperties = (VkCooperativeMatrixPropertiesNV*)(uintptr_t)stream->getBe64();
+    if (pProperties)
+    {
+        if (!(check_pProperties))
+        {
+            fprintf(stderr, "fatal: pProperties inconsistent between guest and host\n");
+        }
+        if (pPropertyCount)
+        {
+            for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+            {
+                unmarshal_VkCooperativeMatrixPropertiesNV(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkCooperativeMatrixPropertiesNV*)(pProperties + i));
+            }
+        }
+    }
+    if (pPropertyCount)
+    {
+        if (pProperties)
+        {
+            for (uint32_t i = 0; i < (uint32_t)(*(pPropertyCount)); ++i)
+            {
+                transform_fromhost_VkCooperativeMatrixPropertiesNV(sResourceTracker, (VkCooperativeMatrixPropertiesNV*)(pProperties + i));
+            }
+        }
+    }
+    VkResult vkGetPhysicalDeviceCooperativeMatrixPropertiesNV_VkResult_return = (VkResult)0;
+    stream->read(&vkGetPhysicalDeviceCooperativeMatrixPropertiesNV_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkGetPhysicalDeviceCooperativeMatrixPropertiesNV_VkResult_return;
+}
+
+#endif
+#ifdef VK_NV_coverage_reduction_mode
+VkResult VkEncoder::vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV(
+    VkPhysicalDevice physicalDevice,
+    uint32_t* pCombinationCount,
+    VkFramebufferMixedSamplesCombinationNV* pCombinations,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkPhysicalDevice local_physicalDevice;
+    local_physicalDevice = physicalDevice;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        // WARNING PTR CHECK
+        *countPtr += 8;
+        if (pCombinationCount)
+        {
+            *countPtr += sizeof(uint32_t);
+        }
+        // WARNING PTR CHECK
+        *countPtr += 8;
+        if (pCombinations)
+        {
+            if (pCombinationCount)
+            {
+                for (uint32_t i = 0; i < (uint32_t)(*(pCombinationCount)); ++i)
+                {
+                    count_VkFramebufferMixedSamplesCombinationNV(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkFramebufferMixedSamplesCombinationNV*)(pCombinations + i), countPtr);
+                }
+            }
+        }
+    }
+    uint32_t packetSize_vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV = OP_vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    // WARNING PTR CHECK
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)pCombinationCount;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    if (pCombinationCount)
+    {
+        memcpy(*streamPtrPtr, (uint32_t*)pCombinationCount, sizeof(uint32_t));
+        *streamPtrPtr += sizeof(uint32_t);
+    }
+    // WARNING PTR CHECK
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)pCombinations;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    if (pCombinations)
+    {
+        for (uint32_t i = 0; i < (uint32_t)(*(pCombinationCount)); ++i)
+        {
+            reservedmarshal_VkFramebufferMixedSamplesCombinationNV(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkFramebufferMixedSamplesCombinationNV*)(pCombinations + i), streamPtrPtr);
+        }
+    }
+    // WARNING PTR CHECK
+    uint32_t* check_pCombinationCount;
+    check_pCombinationCount = (uint32_t*)(uintptr_t)stream->getBe64();
+    if (pCombinationCount)
+    {
+        if (!(check_pCombinationCount))
+        {
+            fprintf(stderr, "fatal: pCombinationCount inconsistent between guest and host\n");
+        }
+        stream->read((uint32_t*)pCombinationCount, sizeof(uint32_t));
+    }
+    // WARNING PTR CHECK
+    VkFramebufferMixedSamplesCombinationNV* check_pCombinations;
+    check_pCombinations = (VkFramebufferMixedSamplesCombinationNV*)(uintptr_t)stream->getBe64();
+    if (pCombinations)
+    {
+        if (!(check_pCombinations))
+        {
+            fprintf(stderr, "fatal: pCombinations inconsistent between guest and host\n");
+        }
+        if (pCombinationCount)
+        {
+            for (uint32_t i = 0; i < (uint32_t)(*(pCombinationCount)); ++i)
+            {
+                unmarshal_VkFramebufferMixedSamplesCombinationNV(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkFramebufferMixedSamplesCombinationNV*)(pCombinations + i));
+            }
+        }
+    }
+    if (pCombinationCount)
+    {
+        if (pCombinations)
+        {
+            for (uint32_t i = 0; i < (uint32_t)(*(pCombinationCount)); ++i)
+            {
+                transform_fromhost_VkFramebufferMixedSamplesCombinationNV(sResourceTracker, (VkFramebufferMixedSamplesCombinationNV*)(pCombinations + i));
+            }
+        }
+    }
+    VkResult vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV_VkResult_return = (VkResult)0;
+    stream->read(&vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV_VkResult_return;
+}
+
+#endif
+#ifdef VK_EXT_fragment_shader_interlock
+#endif
+#ifdef VK_EXT_ycbcr_image_arrays
+#endif
+#ifdef VK_EXT_full_screen_exclusive
+VkResult VkEncoder::vkGetPhysicalDeviceSurfacePresentModes2EXT(
+    VkPhysicalDevice physicalDevice,
+    const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo,
+    uint32_t* pPresentModeCount,
+    VkPresentModeKHR* pPresentModes,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkPhysicalDevice local_physicalDevice;
+    VkPhysicalDeviceSurfaceInfo2KHR* local_pSurfaceInfo;
+    local_physicalDevice = physicalDevice;
+    local_pSurfaceInfo = nullptr;
+    if (pSurfaceInfo)
+    {
+        local_pSurfaceInfo = (VkPhysicalDeviceSurfaceInfo2KHR*)pool->alloc(sizeof(const VkPhysicalDeviceSurfaceInfo2KHR));
+        deepcopy_VkPhysicalDeviceSurfaceInfo2KHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pSurfaceInfo, (VkPhysicalDeviceSurfaceInfo2KHR*)(local_pSurfaceInfo));
+    }
+    if (local_pSurfaceInfo)
+    {
+        transform_tohost_VkPhysicalDeviceSurfaceInfo2KHR(sResourceTracker, (VkPhysicalDeviceSurfaceInfo2KHR*)(local_pSurfaceInfo));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkPhysicalDeviceSurfaceInfo2KHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceSurfaceInfo2KHR*)(local_pSurfaceInfo), countPtr);
+        // WARNING PTR CHECK
+        *countPtr += 8;
+        if (pPresentModeCount)
+        {
+            *countPtr += sizeof(uint32_t);
+        }
+        // WARNING PTR CHECK
+        *countPtr += 8;
+        if (pPresentModes)
+        {
+            if (pPresentModeCount)
+            {
+                *countPtr += (*(pPresentModeCount)) * sizeof(VkPresentModeKHR);
+            }
+        }
+    }
+    uint32_t packetSize_vkGetPhysicalDeviceSurfacePresentModes2EXT = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPhysicalDeviceSurfacePresentModes2EXT);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkGetPhysicalDeviceSurfacePresentModes2EXT = OP_vkGetPhysicalDeviceSurfacePresentModes2EXT;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPhysicalDeviceSurfacePresentModes2EXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPhysicalDeviceSurfacePresentModes2EXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkPhysicalDeviceSurfaceInfo2KHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceSurfaceInfo2KHR*)(local_pSurfaceInfo), streamPtrPtr);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)pPresentModeCount;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    if (pPresentModeCount)
+    {
+        memcpy(*streamPtrPtr, (uint32_t*)pPresentModeCount, sizeof(uint32_t));
+        *streamPtrPtr += sizeof(uint32_t);
+    }
+    // WARNING PTR CHECK
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)pPresentModes;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    if (pPresentModes)
+    {
+        memcpy(*streamPtrPtr, (VkPresentModeKHR*)pPresentModes, (*(pPresentModeCount)) * sizeof(VkPresentModeKHR));
+        *streamPtrPtr += (*(pPresentModeCount)) * sizeof(VkPresentModeKHR);
+    }
+    // WARNING PTR CHECK
+    uint32_t* check_pPresentModeCount;
+    check_pPresentModeCount = (uint32_t*)(uintptr_t)stream->getBe64();
+    if (pPresentModeCount)
+    {
+        if (!(check_pPresentModeCount))
+        {
+            fprintf(stderr, "fatal: pPresentModeCount inconsistent between guest and host\n");
+        }
+        stream->read((uint32_t*)pPresentModeCount, sizeof(uint32_t));
+    }
+    // WARNING PTR CHECK
+    VkPresentModeKHR* check_pPresentModes;
+    check_pPresentModes = (VkPresentModeKHR*)(uintptr_t)stream->getBe64();
+    if (pPresentModes)
+    {
+        if (!(check_pPresentModes))
+        {
+            fprintf(stderr, "fatal: pPresentModes inconsistent between guest and host\n");
+        }
+        stream->read((VkPresentModeKHR*)pPresentModes, (*(pPresentModeCount)) * sizeof(VkPresentModeKHR));
+    }
+    VkResult vkGetPhysicalDeviceSurfacePresentModes2EXT_VkResult_return = (VkResult)0;
+    stream->read(&vkGetPhysicalDeviceSurfacePresentModes2EXT_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkGetPhysicalDeviceSurfacePresentModes2EXT_VkResult_return;
+}
+
+VkResult VkEncoder::vkAcquireFullScreenExclusiveModeEXT(
+    VkDevice device,
+    VkSwapchainKHR swapchain,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkSwapchainKHR local_swapchain;
+    local_device = device;
+    local_swapchain = swapchain;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+    }
+    uint32_t packetSize_vkAcquireFullScreenExclusiveModeEXT = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkAcquireFullScreenExclusiveModeEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkAcquireFullScreenExclusiveModeEXT = OP_vkAcquireFullScreenExclusiveModeEXT;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkAcquireFullScreenExclusiveModeEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkAcquireFullScreenExclusiveModeEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkSwapchainKHR((*&local_swapchain));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    VkResult vkAcquireFullScreenExclusiveModeEXT_VkResult_return = (VkResult)0;
+    stream->read(&vkAcquireFullScreenExclusiveModeEXT_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkAcquireFullScreenExclusiveModeEXT_VkResult_return;
+}
+
+VkResult VkEncoder::vkReleaseFullScreenExclusiveModeEXT(
+    VkDevice device,
+    VkSwapchainKHR swapchain,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkSwapchainKHR local_swapchain;
+    local_device = device;
+    local_swapchain = swapchain;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+    }
+    uint32_t packetSize_vkReleaseFullScreenExclusiveModeEXT = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkReleaseFullScreenExclusiveModeEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkReleaseFullScreenExclusiveModeEXT = OP_vkReleaseFullScreenExclusiveModeEXT;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkReleaseFullScreenExclusiveModeEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkReleaseFullScreenExclusiveModeEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkSwapchainKHR((*&local_swapchain));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    VkResult vkReleaseFullScreenExclusiveModeEXT_VkResult_return = (VkResult)0;
+    stream->read(&vkReleaseFullScreenExclusiveModeEXT_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkReleaseFullScreenExclusiveModeEXT_VkResult_return;
+}
+
+VkResult VkEncoder::vkGetDeviceGroupSurfacePresentModes2EXT(
+    VkDevice device,
+    const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo,
+    VkDeviceGroupPresentModeFlagsKHR* pModes,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkPhysicalDeviceSurfaceInfo2KHR* local_pSurfaceInfo;
+    local_device = device;
+    local_pSurfaceInfo = nullptr;
+    if (pSurfaceInfo)
+    {
+        local_pSurfaceInfo = (VkPhysicalDeviceSurfaceInfo2KHR*)pool->alloc(sizeof(const VkPhysicalDeviceSurfaceInfo2KHR));
+        deepcopy_VkPhysicalDeviceSurfaceInfo2KHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pSurfaceInfo, (VkPhysicalDeviceSurfaceInfo2KHR*)(local_pSurfaceInfo));
+    }
+    if (local_pSurfaceInfo)
+    {
+        transform_tohost_VkPhysicalDeviceSurfaceInfo2KHR(sResourceTracker, (VkPhysicalDeviceSurfaceInfo2KHR*)(local_pSurfaceInfo));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkPhysicalDeviceSurfaceInfo2KHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceSurfaceInfo2KHR*)(local_pSurfaceInfo), countPtr);
+        // WARNING PTR CHECK
+        *countPtr += 8;
+        if (pModes)
+        {
+            *countPtr += sizeof(VkDeviceGroupPresentModeFlagsKHR);
+        }
+    }
+    uint32_t packetSize_vkGetDeviceGroupSurfacePresentModes2EXT = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetDeviceGroupSurfacePresentModes2EXT);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkGetDeviceGroupSurfacePresentModes2EXT = OP_vkGetDeviceGroupSurfacePresentModes2EXT;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetDeviceGroupSurfacePresentModes2EXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetDeviceGroupSurfacePresentModes2EXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkPhysicalDeviceSurfaceInfo2KHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPhysicalDeviceSurfaceInfo2KHR*)(local_pSurfaceInfo), streamPtrPtr);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)pModes;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    if (pModes)
+    {
+        memcpy(*streamPtrPtr, (VkDeviceGroupPresentModeFlagsKHR*)pModes, sizeof(VkDeviceGroupPresentModeFlagsKHR));
+        *streamPtrPtr += sizeof(VkDeviceGroupPresentModeFlagsKHR);
+    }
+    // WARNING PTR CHECK
+    VkDeviceGroupPresentModeFlagsKHR* check_pModes;
+    check_pModes = (VkDeviceGroupPresentModeFlagsKHR*)(uintptr_t)stream->getBe64();
+    if (pModes)
+    {
+        if (!(check_pModes))
+        {
+            fprintf(stderr, "fatal: pModes inconsistent between guest and host\n");
+        }
+        stream->read((VkDeviceGroupPresentModeFlagsKHR*)pModes, sizeof(VkDeviceGroupPresentModeFlagsKHR));
+    }
+    VkResult vkGetDeviceGroupSurfacePresentModes2EXT_VkResult_return = (VkResult)0;
+    stream->read(&vkGetDeviceGroupSurfacePresentModes2EXT_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkGetDeviceGroupSurfacePresentModes2EXT_VkResult_return;
+}
+
+#endif
+#ifdef VK_EXT_headless_surface
+VkResult VkEncoder::vkCreateHeadlessSurfaceEXT(
+    VkInstance instance,
+    const VkHeadlessSurfaceCreateInfoEXT* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkSurfaceKHR* pSurface,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkInstance local_instance;
+    VkHeadlessSurfaceCreateInfoEXT* local_pCreateInfo;
+    VkAllocationCallbacks* local_pAllocator;
+    local_instance = instance;
+    local_pCreateInfo = nullptr;
+    if (pCreateInfo)
+    {
+        local_pCreateInfo = (VkHeadlessSurfaceCreateInfoEXT*)pool->alloc(sizeof(const VkHeadlessSurfaceCreateInfoEXT));
+        deepcopy_VkHeadlessSurfaceCreateInfoEXT(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, (VkHeadlessSurfaceCreateInfoEXT*)(local_pCreateInfo));
+    }
+    local_pAllocator = nullptr;
+    if (pAllocator)
+    {
+        local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+    }
+    local_pAllocator = nullptr;
+    if (local_pCreateInfo)
+    {
+        transform_tohost_VkHeadlessSurfaceCreateInfoEXT(sResourceTracker, (VkHeadlessSurfaceCreateInfoEXT*)(local_pCreateInfo));
+    }
+    if (local_pAllocator)
+    {
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkHeadlessSurfaceCreateInfoEXT(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkHeadlessSurfaceCreateInfoEXT*)(local_pCreateInfo), countPtr);
+        // WARNING PTR CHECK
+        *countPtr += 8;
+        if (local_pAllocator)
+        {
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
+        }
+        uint64_t cgen_var_1;
+        *countPtr += 8;
+    }
+    uint32_t packetSize_vkCreateHeadlessSurfaceEXT = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreateHeadlessSurfaceEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCreateHeadlessSurfaceEXT = OP_vkCreateHeadlessSurfaceEXT;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreateHeadlessSurfaceEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreateHeadlessSurfaceEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkInstance((*&local_instance));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkHeadlessSurfaceCreateInfoEXT(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkHeadlessSurfaceCreateInfoEXT*)(local_pCreateInfo), streamPtrPtr);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    if (local_pAllocator)
+    {
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
+    }
+    /* is handle, possibly out */;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = (uint64_t)((*pSurface));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    uint64_t cgen_var_3;
+    stream->read((uint64_t*)&cgen_var_3, 8);
+    stream->handleMapping()->mapHandles_u64_VkSurfaceKHR(&cgen_var_3, (VkSurfaceKHR*)pSurface, 1);
+    VkResult vkCreateHeadlessSurfaceEXT_VkResult_return = (VkResult)0;
+    stream->read(&vkCreateHeadlessSurfaceEXT_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkCreateHeadlessSurfaceEXT_VkResult_return;
+}
+
+#endif
+#ifdef VK_EXT_line_rasterization
+void VkEncoder::vkCmdSetLineStippleEXT(
+    VkCommandBuffer commandBuffer,
+    uint32_t lineStippleFactor,
+    uint16_t lineStipplePattern,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    uint32_t local_lineStippleFactor;
+    uint16_t local_lineStipplePattern;
+    local_commandBuffer = commandBuffer;
+    local_lineStippleFactor = lineStippleFactor;
+    local_lineStipplePattern = lineStipplePattern;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint16_t);
+    }
+    uint32_t packetSize_vkCmdSetLineStippleEXT = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdSetLineStippleEXT -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdSetLineStippleEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdSetLineStippleEXT = OP_vkCmdSetLineStippleEXT;
+    memcpy(streamPtr, &opcode_vkCmdSetLineStippleEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdSetLineStippleEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (uint32_t*)&local_lineStippleFactor, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint16_t*)&local_lineStipplePattern, sizeof(uint16_t));
+    *streamPtrPtr += sizeof(uint16_t);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+#endif
+#ifdef VK_EXT_shader_atomic_float
+#endif
+#ifdef VK_EXT_host_query_reset
+void VkEncoder::vkResetQueryPoolEXT(
+    VkDevice device,
+    VkQueryPool queryPool,
+    uint32_t firstQuery,
+    uint32_t queryCount,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkQueryPool local_queryPool;
+    uint32_t local_firstQuery;
+    uint32_t local_queryCount;
+    local_device = device;
+    local_queryPool = queryPool;
+    local_firstQuery = firstQuery;
+    local_queryCount = queryCount;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
+    }
+    uint32_t packetSize_vkResetQueryPoolEXT = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkResetQueryPoolEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkResetQueryPoolEXT = OP_vkResetQueryPoolEXT;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkResetQueryPoolEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkResetQueryPoolEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkQueryPool((*&local_queryPool));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_firstQuery, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_queryCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+#endif
+#ifdef VK_EXT_index_type_uint8
+#endif
+#ifdef VK_EXT_extended_dynamic_state
+void VkEncoder::vkCmdSetCullModeEXT(
+    VkCommandBuffer commandBuffer,
+    VkCullModeFlags cullMode,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    VkCullModeFlags local_cullMode;
+    local_commandBuffer = commandBuffer;
+    local_cullMode = cullMode;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkCullModeFlags);
+    }
+    uint32_t packetSize_vkCmdSetCullModeEXT = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdSetCullModeEXT -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdSetCullModeEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdSetCullModeEXT = OP_vkCmdSetCullModeEXT;
+    memcpy(streamPtr, &opcode_vkCmdSetCullModeEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdSetCullModeEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (VkCullModeFlags*)&local_cullMode, sizeof(VkCullModeFlags));
+    *streamPtrPtr += sizeof(VkCullModeFlags);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+void VkEncoder::vkCmdSetFrontFaceEXT(
+    VkCommandBuffer commandBuffer,
+    VkFrontFace frontFace,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    VkFrontFace local_frontFace;
+    local_commandBuffer = commandBuffer;
+    local_frontFace = frontFace;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkFrontFace);
+    }
+    uint32_t packetSize_vkCmdSetFrontFaceEXT = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdSetFrontFaceEXT -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdSetFrontFaceEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdSetFrontFaceEXT = OP_vkCmdSetFrontFaceEXT;
+    memcpy(streamPtr, &opcode_vkCmdSetFrontFaceEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdSetFrontFaceEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (VkFrontFace*)&local_frontFace, sizeof(VkFrontFace));
+    *streamPtrPtr += sizeof(VkFrontFace);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+void VkEncoder::vkCmdSetPrimitiveTopologyEXT(
+    VkCommandBuffer commandBuffer,
+    VkPrimitiveTopology primitiveTopology,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    VkPrimitiveTopology local_primitiveTopology;
+    local_commandBuffer = commandBuffer;
+    local_primitiveTopology = primitiveTopology;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkPrimitiveTopology);
+    }
+    uint32_t packetSize_vkCmdSetPrimitiveTopologyEXT = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdSetPrimitiveTopologyEXT -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdSetPrimitiveTopologyEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdSetPrimitiveTopologyEXT = OP_vkCmdSetPrimitiveTopologyEXT;
+    memcpy(streamPtr, &opcode_vkCmdSetPrimitiveTopologyEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdSetPrimitiveTopologyEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (VkPrimitiveTopology*)&local_primitiveTopology, sizeof(VkPrimitiveTopology));
+    *streamPtrPtr += sizeof(VkPrimitiveTopology);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+void VkEncoder::vkCmdSetViewportWithCountEXT(
+    VkCommandBuffer commandBuffer,
+    uint32_t viewportCount,
+    const VkViewport* pViewports,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    uint32_t local_viewportCount;
+    VkViewport* local_pViewports;
+    local_commandBuffer = commandBuffer;
+    local_viewportCount = viewportCount;
+    local_pViewports = nullptr;
+    if (pViewports)
+    {
+        local_pViewports = (VkViewport*)pool->alloc(((viewportCount)) * sizeof(const VkViewport));
+        for (uint32_t i = 0; i < (uint32_t)((viewportCount)); ++i)
+        {
+            deepcopy_VkViewport(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pViewports + i, (VkViewport*)(local_pViewports + i));
+        }
+    }
+    if (local_pViewports)
+    {
+        for (uint32_t i = 0; i < (uint32_t)((viewportCount)); ++i)
+        {
+            transform_tohost_VkViewport(sResourceTracker, (VkViewport*)(local_pViewports + i));
+        }
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        for (uint32_t i = 0; i < (uint32_t)((viewportCount)); ++i)
+        {
+            count_VkViewport(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkViewport*)(local_pViewports + i), countPtr);
+        }
+    }
+    uint32_t packetSize_vkCmdSetViewportWithCountEXT = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdSetViewportWithCountEXT -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdSetViewportWithCountEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdSetViewportWithCountEXT = OP_vkCmdSetViewportWithCountEXT;
+    memcpy(streamPtr, &opcode_vkCmdSetViewportWithCountEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdSetViewportWithCountEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (uint32_t*)&local_viewportCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)((viewportCount)); ++i)
+    {
+        reservedmarshal_VkViewport(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkViewport*)(local_pViewports + i), streamPtrPtr);
+    }
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+void VkEncoder::vkCmdSetScissorWithCountEXT(
+    VkCommandBuffer commandBuffer,
+    uint32_t scissorCount,
+    const VkRect2D* pScissors,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    uint32_t local_scissorCount;
+    VkRect2D* local_pScissors;
+    local_commandBuffer = commandBuffer;
+    local_scissorCount = scissorCount;
+    local_pScissors = nullptr;
+    if (pScissors)
+    {
+        local_pScissors = (VkRect2D*)pool->alloc(((scissorCount)) * sizeof(const VkRect2D));
+        for (uint32_t i = 0; i < (uint32_t)((scissorCount)); ++i)
+        {
+            deepcopy_VkRect2D(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pScissors + i, (VkRect2D*)(local_pScissors + i));
+        }
+    }
+    if (local_pScissors)
+    {
+        for (uint32_t i = 0; i < (uint32_t)((scissorCount)); ++i)
+        {
+            transform_tohost_VkRect2D(sResourceTracker, (VkRect2D*)(local_pScissors + i));
+        }
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        for (uint32_t i = 0; i < (uint32_t)((scissorCount)); ++i)
+        {
+            count_VkRect2D(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkRect2D*)(local_pScissors + i), countPtr);
+        }
+    }
+    uint32_t packetSize_vkCmdSetScissorWithCountEXT = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdSetScissorWithCountEXT -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdSetScissorWithCountEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdSetScissorWithCountEXT = OP_vkCmdSetScissorWithCountEXT;
+    memcpy(streamPtr, &opcode_vkCmdSetScissorWithCountEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdSetScissorWithCountEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (uint32_t*)&local_scissorCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)((scissorCount)); ++i)
+    {
+        reservedmarshal_VkRect2D(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkRect2D*)(local_pScissors + i), streamPtrPtr);
+    }
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+void VkEncoder::vkCmdBindVertexBuffers2EXT(
+    VkCommandBuffer commandBuffer,
+    uint32_t firstBinding,
+    uint32_t bindingCount,
+    const VkBuffer* pBuffers,
+    const VkDeviceSize* pOffsets,
+    const VkDeviceSize* pSizes,
+    const VkDeviceSize* pStrides,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    uint32_t local_firstBinding;
+    uint32_t local_bindingCount;
+    VkBuffer* local_pBuffers;
+    VkDeviceSize* local_pOffsets;
+    VkDeviceSize* local_pSizes;
+    VkDeviceSize* local_pStrides;
+    local_commandBuffer = commandBuffer;
+    local_firstBinding = firstBinding;
+    local_bindingCount = bindingCount;
+    // Avoiding deepcopy for pBuffers
+    local_pBuffers = (VkBuffer*)pBuffers;
+    // Avoiding deepcopy for pOffsets
+    local_pOffsets = (VkDeviceSize*)pOffsets;
+    // Avoiding deepcopy for pSizes
+    local_pSizes = (VkDeviceSize*)pSizes;
+    // Avoiding deepcopy for pStrides
+    local_pStrides = (VkDeviceSize*)pStrides;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
+        if (((bindingCount)))
+        {
+            *countPtr += ((bindingCount)) * 8;
+        }
+        *countPtr += ((bindingCount)) * sizeof(VkDeviceSize);
+        // WARNING PTR CHECK
+        *countPtr += 8;
+        if (local_pSizes)
+        {
+            *countPtr += ((bindingCount)) * sizeof(VkDeviceSize);
+        }
+        // WARNING PTR CHECK
+        *countPtr += 8;
+        if (local_pStrides)
+        {
+            *countPtr += ((bindingCount)) * sizeof(VkDeviceSize);
+        }
+    }
+    uint32_t packetSize_vkCmdBindVertexBuffers2EXT = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdBindVertexBuffers2EXT -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdBindVertexBuffers2EXT);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdBindVertexBuffers2EXT = OP_vkCmdBindVertexBuffers2EXT;
+    memcpy(streamPtr, &opcode_vkCmdBindVertexBuffers2EXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdBindVertexBuffers2EXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (uint32_t*)&local_firstBinding, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_bindingCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    if (((bindingCount)))
+    {
+        uint8_t* cgen_var_0_ptr = (uint8_t*)(*streamPtrPtr);
+        for (uint32_t k = 0; k < ((bindingCount)); ++k)
+        {
+            uint64_t tmpval = get_host_u64_VkBuffer(local_pBuffers[k]);
+            memcpy(cgen_var_0_ptr + k * 8, &tmpval, sizeof(uint64_t));
+        }
+        *streamPtrPtr += 8 * ((bindingCount));
+    }
+    memcpy(*streamPtrPtr, (VkDeviceSize*)local_pOffsets, ((bindingCount)) * sizeof(VkDeviceSize));
+    *streamPtrPtr += ((bindingCount)) * sizeof(VkDeviceSize);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pSizes;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    if (local_pSizes)
+    {
+        memcpy(*streamPtrPtr, (VkDeviceSize*)local_pSizes, ((bindingCount)) * sizeof(VkDeviceSize));
+        *streamPtrPtr += ((bindingCount)) * sizeof(VkDeviceSize);
+    }
+    // WARNING PTR CHECK
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)local_pStrides;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    if (local_pStrides)
+    {
+        memcpy(*streamPtrPtr, (VkDeviceSize*)local_pStrides, ((bindingCount)) * sizeof(VkDeviceSize));
+        *streamPtrPtr += ((bindingCount)) * sizeof(VkDeviceSize);
+    }
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+void VkEncoder::vkCmdSetDepthTestEnableEXT(
+    VkCommandBuffer commandBuffer,
+    VkBool32 depthTestEnable,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    VkBool32 local_depthTestEnable;
+    local_commandBuffer = commandBuffer;
+    local_depthTestEnable = depthTestEnable;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkBool32);
+    }
+    uint32_t packetSize_vkCmdSetDepthTestEnableEXT = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdSetDepthTestEnableEXT -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdSetDepthTestEnableEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdSetDepthTestEnableEXT = OP_vkCmdSetDepthTestEnableEXT;
+    memcpy(streamPtr, &opcode_vkCmdSetDepthTestEnableEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdSetDepthTestEnableEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (VkBool32*)&local_depthTestEnable, sizeof(VkBool32));
+    *streamPtrPtr += sizeof(VkBool32);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+void VkEncoder::vkCmdSetDepthWriteEnableEXT(
+    VkCommandBuffer commandBuffer,
+    VkBool32 depthWriteEnable,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    VkBool32 local_depthWriteEnable;
+    local_commandBuffer = commandBuffer;
+    local_depthWriteEnable = depthWriteEnable;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkBool32);
+    }
+    uint32_t packetSize_vkCmdSetDepthWriteEnableEXT = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdSetDepthWriteEnableEXT -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdSetDepthWriteEnableEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdSetDepthWriteEnableEXT = OP_vkCmdSetDepthWriteEnableEXT;
+    memcpy(streamPtr, &opcode_vkCmdSetDepthWriteEnableEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdSetDepthWriteEnableEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (VkBool32*)&local_depthWriteEnable, sizeof(VkBool32));
+    *streamPtrPtr += sizeof(VkBool32);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+void VkEncoder::vkCmdSetDepthCompareOpEXT(
+    VkCommandBuffer commandBuffer,
+    VkCompareOp depthCompareOp,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    VkCompareOp local_depthCompareOp;
+    local_commandBuffer = commandBuffer;
+    local_depthCompareOp = depthCompareOp;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkCompareOp);
+    }
+    uint32_t packetSize_vkCmdSetDepthCompareOpEXT = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdSetDepthCompareOpEXT -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdSetDepthCompareOpEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdSetDepthCompareOpEXT = OP_vkCmdSetDepthCompareOpEXT;
+    memcpy(streamPtr, &opcode_vkCmdSetDepthCompareOpEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdSetDepthCompareOpEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (VkCompareOp*)&local_depthCompareOp, sizeof(VkCompareOp));
+    *streamPtrPtr += sizeof(VkCompareOp);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+void VkEncoder::vkCmdSetDepthBoundsTestEnableEXT(
+    VkCommandBuffer commandBuffer,
+    VkBool32 depthBoundsTestEnable,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    VkBool32 local_depthBoundsTestEnable;
+    local_commandBuffer = commandBuffer;
+    local_depthBoundsTestEnable = depthBoundsTestEnable;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkBool32);
+    }
+    uint32_t packetSize_vkCmdSetDepthBoundsTestEnableEXT = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdSetDepthBoundsTestEnableEXT -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdSetDepthBoundsTestEnableEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdSetDepthBoundsTestEnableEXT = OP_vkCmdSetDepthBoundsTestEnableEXT;
+    memcpy(streamPtr, &opcode_vkCmdSetDepthBoundsTestEnableEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdSetDepthBoundsTestEnableEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (VkBool32*)&local_depthBoundsTestEnable, sizeof(VkBool32));
+    *streamPtrPtr += sizeof(VkBool32);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+void VkEncoder::vkCmdSetStencilTestEnableEXT(
+    VkCommandBuffer commandBuffer,
+    VkBool32 stencilTestEnable,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    VkBool32 local_stencilTestEnable;
+    local_commandBuffer = commandBuffer;
+    local_stencilTestEnable = stencilTestEnable;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkBool32);
+    }
+    uint32_t packetSize_vkCmdSetStencilTestEnableEXT = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdSetStencilTestEnableEXT -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdSetStencilTestEnableEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdSetStencilTestEnableEXT = OP_vkCmdSetStencilTestEnableEXT;
+    memcpy(streamPtr, &opcode_vkCmdSetStencilTestEnableEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdSetStencilTestEnableEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (VkBool32*)&local_stencilTestEnable, sizeof(VkBool32));
+    *streamPtrPtr += sizeof(VkBool32);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+void VkEncoder::vkCmdSetStencilOpEXT(
+    VkCommandBuffer commandBuffer,
+    VkStencilFaceFlags faceMask,
+    VkStencilOp failOp,
+    VkStencilOp passOp,
+    VkStencilOp depthFailOp,
+    VkCompareOp compareOp,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    VkStencilFaceFlags local_faceMask;
+    VkStencilOp local_failOp;
+    VkStencilOp local_passOp;
+    VkStencilOp local_depthFailOp;
+    VkCompareOp local_compareOp;
+    local_commandBuffer = commandBuffer;
+    local_faceMask = faceMask;
+    local_failOp = failOp;
+    local_passOp = passOp;
+    local_depthFailOp = depthFailOp;
+    local_compareOp = compareOp;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkStencilFaceFlags);
+        *countPtr += sizeof(VkStencilOp);
+        *countPtr += sizeof(VkStencilOp);
+        *countPtr += sizeof(VkStencilOp);
+        *countPtr += sizeof(VkCompareOp);
+    }
+    uint32_t packetSize_vkCmdSetStencilOpEXT = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdSetStencilOpEXT -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdSetStencilOpEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdSetStencilOpEXT = OP_vkCmdSetStencilOpEXT;
+    memcpy(streamPtr, &opcode_vkCmdSetStencilOpEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdSetStencilOpEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (VkStencilFaceFlags*)&local_faceMask, sizeof(VkStencilFaceFlags));
+    *streamPtrPtr += sizeof(VkStencilFaceFlags);
+    memcpy(*streamPtrPtr, (VkStencilOp*)&local_failOp, sizeof(VkStencilOp));
+    *streamPtrPtr += sizeof(VkStencilOp);
+    memcpy(*streamPtrPtr, (VkStencilOp*)&local_passOp, sizeof(VkStencilOp));
+    *streamPtrPtr += sizeof(VkStencilOp);
+    memcpy(*streamPtrPtr, (VkStencilOp*)&local_depthFailOp, sizeof(VkStencilOp));
+    *streamPtrPtr += sizeof(VkStencilOp);
+    memcpy(*streamPtrPtr, (VkCompareOp*)&local_compareOp, sizeof(VkCompareOp));
+    *streamPtrPtr += sizeof(VkCompareOp);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+#endif
+#ifdef VK_EXT_shader_demote_to_helper_invocation
+#endif
+#ifdef VK_NV_device_generated_commands
+void VkEncoder::vkGetGeneratedCommandsMemoryRequirementsNV(
+    VkDevice device,
+    const VkGeneratedCommandsMemoryRequirementsInfoNV* pInfo,
+    VkMemoryRequirements2* pMemoryRequirements,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkGeneratedCommandsMemoryRequirementsInfoNV* local_pInfo;
+    local_device = device;
+    local_pInfo = nullptr;
+    if (pInfo)
+    {
+        local_pInfo = (VkGeneratedCommandsMemoryRequirementsInfoNV*)pool->alloc(sizeof(const VkGeneratedCommandsMemoryRequirementsInfoNV));
+        deepcopy_VkGeneratedCommandsMemoryRequirementsInfoNV(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pInfo, (VkGeneratedCommandsMemoryRequirementsInfoNV*)(local_pInfo));
+    }
+    if (local_pInfo)
+    {
+        transform_tohost_VkGeneratedCommandsMemoryRequirementsInfoNV(sResourceTracker, (VkGeneratedCommandsMemoryRequirementsInfoNV*)(local_pInfo));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkGeneratedCommandsMemoryRequirementsInfoNV(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkGeneratedCommandsMemoryRequirementsInfoNV*)(local_pInfo), countPtr);
+        count_VkMemoryRequirements2(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMemoryRequirements2*)(pMemoryRequirements), countPtr);
+    }
+    uint32_t packetSize_vkGetGeneratedCommandsMemoryRequirementsNV = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetGeneratedCommandsMemoryRequirementsNV);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkGetGeneratedCommandsMemoryRequirementsNV = OP_vkGetGeneratedCommandsMemoryRequirementsNV;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetGeneratedCommandsMemoryRequirementsNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetGeneratedCommandsMemoryRequirementsNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkGeneratedCommandsMemoryRequirementsInfoNV(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkGeneratedCommandsMemoryRequirementsInfoNV*)(local_pInfo), streamPtrPtr);
+    reservedmarshal_VkMemoryRequirements2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMemoryRequirements2*)(pMemoryRequirements), streamPtrPtr);
+    unmarshal_VkMemoryRequirements2(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMemoryRequirements2*)(pMemoryRequirements));
+    if (pMemoryRequirements)
+    {
+        transform_fromhost_VkMemoryRequirements2(sResourceTracker, (VkMemoryRequirements2*)(pMemoryRequirements));
+    }
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+void VkEncoder::vkCmdPreprocessGeneratedCommandsNV(
+    VkCommandBuffer commandBuffer,
+    const VkGeneratedCommandsInfoNV* pGeneratedCommandsInfo,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    VkGeneratedCommandsInfoNV* local_pGeneratedCommandsInfo;
+    local_commandBuffer = commandBuffer;
+    local_pGeneratedCommandsInfo = nullptr;
+    if (pGeneratedCommandsInfo)
+    {
+        local_pGeneratedCommandsInfo = (VkGeneratedCommandsInfoNV*)pool->alloc(sizeof(const VkGeneratedCommandsInfoNV));
+        deepcopy_VkGeneratedCommandsInfoNV(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pGeneratedCommandsInfo, (VkGeneratedCommandsInfoNV*)(local_pGeneratedCommandsInfo));
+    }
+    if (local_pGeneratedCommandsInfo)
+    {
+        transform_tohost_VkGeneratedCommandsInfoNV(sResourceTracker, (VkGeneratedCommandsInfoNV*)(local_pGeneratedCommandsInfo));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkGeneratedCommandsInfoNV(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkGeneratedCommandsInfoNV*)(local_pGeneratedCommandsInfo), countPtr);
+    }
+    uint32_t packetSize_vkCmdPreprocessGeneratedCommandsNV = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdPreprocessGeneratedCommandsNV -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdPreprocessGeneratedCommandsNV);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdPreprocessGeneratedCommandsNV = OP_vkCmdPreprocessGeneratedCommandsNV;
+    memcpy(streamPtr, &opcode_vkCmdPreprocessGeneratedCommandsNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdPreprocessGeneratedCommandsNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    reservedmarshal_VkGeneratedCommandsInfoNV(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkGeneratedCommandsInfoNV*)(local_pGeneratedCommandsInfo), streamPtrPtr);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+void VkEncoder::vkCmdExecuteGeneratedCommandsNV(
+    VkCommandBuffer commandBuffer,
+    VkBool32 isPreprocessed,
+    const VkGeneratedCommandsInfoNV* pGeneratedCommandsInfo,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    VkBool32 local_isPreprocessed;
+    VkGeneratedCommandsInfoNV* local_pGeneratedCommandsInfo;
+    local_commandBuffer = commandBuffer;
+    local_isPreprocessed = isPreprocessed;
+    local_pGeneratedCommandsInfo = nullptr;
+    if (pGeneratedCommandsInfo)
+    {
+        local_pGeneratedCommandsInfo = (VkGeneratedCommandsInfoNV*)pool->alloc(sizeof(const VkGeneratedCommandsInfoNV));
+        deepcopy_VkGeneratedCommandsInfoNV(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pGeneratedCommandsInfo, (VkGeneratedCommandsInfoNV*)(local_pGeneratedCommandsInfo));
+    }
+    if (local_pGeneratedCommandsInfo)
+    {
+        transform_tohost_VkGeneratedCommandsInfoNV(sResourceTracker, (VkGeneratedCommandsInfoNV*)(local_pGeneratedCommandsInfo));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkBool32);
+        count_VkGeneratedCommandsInfoNV(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkGeneratedCommandsInfoNV*)(local_pGeneratedCommandsInfo), countPtr);
+    }
+    uint32_t packetSize_vkCmdExecuteGeneratedCommandsNV = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdExecuteGeneratedCommandsNV -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdExecuteGeneratedCommandsNV);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdExecuteGeneratedCommandsNV = OP_vkCmdExecuteGeneratedCommandsNV;
+    memcpy(streamPtr, &opcode_vkCmdExecuteGeneratedCommandsNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdExecuteGeneratedCommandsNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (VkBool32*)&local_isPreprocessed, sizeof(VkBool32));
+    *streamPtrPtr += sizeof(VkBool32);
+    reservedmarshal_VkGeneratedCommandsInfoNV(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkGeneratedCommandsInfoNV*)(local_pGeneratedCommandsInfo), streamPtrPtr);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+void VkEncoder::vkCmdBindPipelineShaderGroupNV(
+    VkCommandBuffer commandBuffer,
+    VkPipelineBindPoint pipelineBindPoint,
+    VkPipeline pipeline,
+    uint32_t groupIndex,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    VkPipelineBindPoint local_pipelineBindPoint;
+    VkPipeline local_pipeline;
+    uint32_t local_groupIndex;
+    local_commandBuffer = commandBuffer;
+    local_pipelineBindPoint = pipelineBindPoint;
+    local_pipeline = pipeline;
+    local_groupIndex = groupIndex;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkPipelineBindPoint);
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+    }
+    uint32_t packetSize_vkCmdBindPipelineShaderGroupNV = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdBindPipelineShaderGroupNV -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdBindPipelineShaderGroupNV);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdBindPipelineShaderGroupNV = OP_vkCmdBindPipelineShaderGroupNV;
+    memcpy(streamPtr, &opcode_vkCmdBindPipelineShaderGroupNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdBindPipelineShaderGroupNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (VkPipelineBindPoint*)&local_pipelineBindPoint, sizeof(VkPipelineBindPoint));
+    *streamPtrPtr += sizeof(VkPipelineBindPoint);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPipeline((*&local_pipeline));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_groupIndex, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+VkResult VkEncoder::vkCreateIndirectCommandsLayoutNV(
+    VkDevice device,
+    const VkIndirectCommandsLayoutCreateInfoNV* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkIndirectCommandsLayoutNV* pIndirectCommandsLayout,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkIndirectCommandsLayoutCreateInfoNV* local_pCreateInfo;
+    VkAllocationCallbacks* local_pAllocator;
+    local_device = device;
+    local_pCreateInfo = nullptr;
+    if (pCreateInfo)
+    {
+        local_pCreateInfo = (VkIndirectCommandsLayoutCreateInfoNV*)pool->alloc(sizeof(const VkIndirectCommandsLayoutCreateInfoNV));
+        deepcopy_VkIndirectCommandsLayoutCreateInfoNV(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, (VkIndirectCommandsLayoutCreateInfoNV*)(local_pCreateInfo));
+    }
+    local_pAllocator = nullptr;
+    if (pAllocator)
+    {
+        local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+    }
+    local_pAllocator = nullptr;
+    if (local_pCreateInfo)
+    {
+        transform_tohost_VkIndirectCommandsLayoutCreateInfoNV(sResourceTracker, (VkIndirectCommandsLayoutCreateInfoNV*)(local_pCreateInfo));
+    }
+    if (local_pAllocator)
+    {
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkIndirectCommandsLayoutCreateInfoNV(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkIndirectCommandsLayoutCreateInfoNV*)(local_pCreateInfo), countPtr);
+        // WARNING PTR CHECK
+        *countPtr += 8;
+        if (local_pAllocator)
+        {
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
+        }
+        uint64_t cgen_var_1;
+        *countPtr += 8;
+    }
+    uint32_t packetSize_vkCreateIndirectCommandsLayoutNV = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreateIndirectCommandsLayoutNV);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCreateIndirectCommandsLayoutNV = OP_vkCreateIndirectCommandsLayoutNV;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreateIndirectCommandsLayoutNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreateIndirectCommandsLayoutNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkIndirectCommandsLayoutCreateInfoNV(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkIndirectCommandsLayoutCreateInfoNV*)(local_pCreateInfo), streamPtrPtr);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    if (local_pAllocator)
+    {
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
+    }
+    /* is handle, possibly out */;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = (uint64_t)((*pIndirectCommandsLayout));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    stream->setHandleMapping(sResourceTracker->createMapping());
+    uint64_t cgen_var_3;
+    stream->read((uint64_t*)&cgen_var_3, 8);
+    stream->handleMapping()->mapHandles_u64_VkIndirectCommandsLayoutNV(&cgen_var_3, (VkIndirectCommandsLayoutNV*)pIndirectCommandsLayout, 1);
+    stream->unsetHandleMapping();
+    VkResult vkCreateIndirectCommandsLayoutNV_VkResult_return = (VkResult)0;
+    stream->read(&vkCreateIndirectCommandsLayoutNV_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkCreateIndirectCommandsLayoutNV_VkResult_return;
+}
+
+void VkEncoder::vkDestroyIndirectCommandsLayoutNV(
+    VkDevice device,
+    VkIndirectCommandsLayoutNV indirectCommandsLayout,
+    const VkAllocationCallbacks* pAllocator,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkIndirectCommandsLayoutNV local_indirectCommandsLayout;
+    VkAllocationCallbacks* local_pAllocator;
+    local_device = device;
+    local_indirectCommandsLayout = indirectCommandsLayout;
+    local_pAllocator = nullptr;
+    if (pAllocator)
+    {
+        local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+    }
+    local_pAllocator = nullptr;
+    if (local_pAllocator)
+    {
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        // WARNING PTR CHECK
+        *countPtr += 8;
+        if (local_pAllocator)
+        {
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
+        }
+    }
+    uint32_t packetSize_vkDestroyIndirectCommandsLayoutNV = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkDestroyIndirectCommandsLayoutNV);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkDestroyIndirectCommandsLayoutNV = OP_vkDestroyIndirectCommandsLayoutNV;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkDestroyIndirectCommandsLayoutNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkDestroyIndirectCommandsLayoutNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkIndirectCommandsLayoutNV((*&local_indirectCommandsLayout));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    // WARNING PTR CHECK
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    if (local_pAllocator)
+    {
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
+    }
+    sResourceTracker->destroyMapping()->mapHandles_VkIndirectCommandsLayoutNV((VkIndirectCommandsLayoutNV*)&indirectCommandsLayout);
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+#endif
+#ifdef VK_EXT_texel_buffer_alignment
+#endif
+#ifdef VK_QCOM_render_pass_transform
+#endif
+#ifdef VK_EXT_device_memory_report
+#endif
+#ifdef VK_EXT_robustness2
+#endif
+#ifdef VK_EXT_custom_border_color
+#endif
+#ifdef VK_GOOGLE_user_type
+#endif
+#ifdef VK_EXT_private_data
+VkResult VkEncoder::vkCreatePrivateDataSlotEXT(
+    VkDevice device,
+    const VkPrivateDataSlotCreateInfoEXT* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkPrivateDataSlotEXT* pPrivateDataSlot,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkPrivateDataSlotCreateInfoEXT* local_pCreateInfo;
+    VkAllocationCallbacks* local_pAllocator;
+    local_device = device;
+    local_pCreateInfo = nullptr;
+    if (pCreateInfo)
+    {
+        local_pCreateInfo = (VkPrivateDataSlotCreateInfoEXT*)pool->alloc(sizeof(const VkPrivateDataSlotCreateInfoEXT));
+        deepcopy_VkPrivateDataSlotCreateInfoEXT(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, (VkPrivateDataSlotCreateInfoEXT*)(local_pCreateInfo));
+    }
+    local_pAllocator = nullptr;
+    if (pAllocator)
+    {
+        local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+    }
+    local_pAllocator = nullptr;
+    if (local_pCreateInfo)
+    {
+        transform_tohost_VkPrivateDataSlotCreateInfoEXT(sResourceTracker, (VkPrivateDataSlotCreateInfoEXT*)(local_pCreateInfo));
+    }
+    if (local_pAllocator)
+    {
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkPrivateDataSlotCreateInfoEXT(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPrivateDataSlotCreateInfoEXT*)(local_pCreateInfo), countPtr);
+        // WARNING PTR CHECK
+        *countPtr += 8;
+        if (local_pAllocator)
+        {
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
+        }
+        *countPtr += 8;
+    }
+    uint32_t packetSize_vkCreatePrivateDataSlotEXT = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreatePrivateDataSlotEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCreatePrivateDataSlotEXT = OP_vkCreatePrivateDataSlotEXT;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreatePrivateDataSlotEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreatePrivateDataSlotEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkPrivateDataSlotCreateInfoEXT(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkPrivateDataSlotCreateInfoEXT*)(local_pCreateInfo), streamPtrPtr);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    if (local_pAllocator)
+    {
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
+    }
+    uint64_t cgen_var_2 = (uint64_t)(*pPrivateDataSlot);
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    (*pPrivateDataSlot) = (VkPrivateDataSlotEXT)stream->getBe64();
+    VkResult vkCreatePrivateDataSlotEXT_VkResult_return = (VkResult)0;
+    stream->read(&vkCreatePrivateDataSlotEXT_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkCreatePrivateDataSlotEXT_VkResult_return;
+}
+
+void VkEncoder::vkDestroyPrivateDataSlotEXT(
+    VkDevice device,
+    VkPrivateDataSlotEXT privateDataSlot,
+    const VkAllocationCallbacks* pAllocator,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkPrivateDataSlotEXT local_privateDataSlot;
+    VkAllocationCallbacks* local_pAllocator;
+    local_device = device;
+    local_privateDataSlot = privateDataSlot;
+    local_pAllocator = nullptr;
+    if (pAllocator)
+    {
+        local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+    }
+    local_pAllocator = nullptr;
+    if (local_pAllocator)
+    {
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += 8;
+        // WARNING PTR CHECK
+        *countPtr += 8;
+        if (local_pAllocator)
+        {
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
+        }
+    }
+    uint32_t packetSize_vkDestroyPrivateDataSlotEXT = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkDestroyPrivateDataSlotEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkDestroyPrivateDataSlotEXT = OP_vkDestroyPrivateDataSlotEXT;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkDestroyPrivateDataSlotEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkDestroyPrivateDataSlotEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1 = (uint64_t)local_privateDataSlot;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    // WARNING PTR CHECK
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    if (local_pAllocator)
+    {
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
+    }
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+VkResult VkEncoder::vkSetPrivateDataEXT(
+    VkDevice device,
+    VkObjectType objectType,
+    uint64_t objectHandle,
+    VkPrivateDataSlotEXT privateDataSlot,
+    uint64_t data,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkObjectType local_objectType;
+    uint64_t local_objectHandle;
+    VkPrivateDataSlotEXT local_privateDataSlot;
+    uint64_t local_data;
+    local_device = device;
+    local_objectType = objectType;
+    local_objectHandle = objectHandle;
+    local_privateDataSlot = privateDataSlot;
+    local_data = data;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkObjectType);
+        *countPtr += sizeof(uint64_t);
+        *countPtr += 8;
+        *countPtr += sizeof(uint64_t);
+    }
+    uint32_t packetSize_vkSetPrivateDataEXT = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkSetPrivateDataEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkSetPrivateDataEXT = OP_vkSetPrivateDataEXT;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkSetPrivateDataEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkSetPrivateDataEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkObjectType*)&local_objectType, sizeof(VkObjectType));
+    *streamPtrPtr += sizeof(VkObjectType);
+    memcpy(*streamPtrPtr, (uint64_t*)&local_objectHandle, sizeof(uint64_t));
+    *streamPtrPtr += sizeof(uint64_t);
+    uint64_t cgen_var_1 = (uint64_t)local_privateDataSlot;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    memcpy(*streamPtrPtr, (uint64_t*)&local_data, sizeof(uint64_t));
+    *streamPtrPtr += sizeof(uint64_t);
+    VkResult vkSetPrivateDataEXT_VkResult_return = (VkResult)0;
+    stream->read(&vkSetPrivateDataEXT_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkSetPrivateDataEXT_VkResult_return;
+}
+
+void VkEncoder::vkGetPrivateDataEXT(
+    VkDevice device,
+    VkObjectType objectType,
+    uint64_t objectHandle,
+    VkPrivateDataSlotEXT privateDataSlot,
+    uint64_t* pData,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkObjectType local_objectType;
+    uint64_t local_objectHandle;
+    VkPrivateDataSlotEXT local_privateDataSlot;
+    local_device = device;
+    local_objectType = objectType;
+    local_objectHandle = objectHandle;
+    local_privateDataSlot = privateDataSlot;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkObjectType);
+        *countPtr += sizeof(uint64_t);
+        *countPtr += 8;
+        *countPtr += sizeof(uint64_t);
+    }
+    uint32_t packetSize_vkGetPrivateDataEXT = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPrivateDataEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkGetPrivateDataEXT = OP_vkGetPrivateDataEXT;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPrivateDataEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPrivateDataEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkObjectType*)&local_objectType, sizeof(VkObjectType));
+    *streamPtrPtr += sizeof(VkObjectType);
+    memcpy(*streamPtrPtr, (uint64_t*)&local_objectHandle, sizeof(uint64_t));
+    *streamPtrPtr += sizeof(uint64_t);
+    uint64_t cgen_var_1 = (uint64_t)local_privateDataSlot;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    memcpy(*streamPtrPtr, (uint64_t*)pData, sizeof(uint64_t));
+    *streamPtrPtr += sizeof(uint64_t);
+    stream->read((uint64_t*)pData, sizeof(uint64_t));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+#endif
+#ifdef VK_EXT_pipeline_creation_cache_control
+#endif
+#ifdef VK_NV_device_diagnostics_config
+#endif
+#ifdef VK_QCOM_render_pass_store_ops
+#endif
+#ifdef VK_NV_fragment_shading_rate_enums
+void VkEncoder::vkCmdSetFragmentShadingRateEnumNV(
+    VkCommandBuffer commandBuffer,
+    VkFragmentShadingRateNV shadingRate,
+    const VkFragmentShadingRateCombinerOpKHR combinerOps[2],
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    VkFragmentShadingRateNV local_shadingRate;
+    VkFragmentShadingRateCombinerOpKHR local_combinerOps[2];
+    local_commandBuffer = commandBuffer;
+    local_shadingRate = shadingRate;
+    memcpy(local_combinerOps, combinerOps, 2 * sizeof(const VkFragmentShadingRateCombinerOpKHR));
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkFragmentShadingRateNV);
+        *countPtr += 2 * sizeof(VkFragmentShadingRateCombinerOpKHR);
+    }
+    uint32_t packetSize_vkCmdSetFragmentShadingRateEnumNV = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdSetFragmentShadingRateEnumNV -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdSetFragmentShadingRateEnumNV);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdSetFragmentShadingRateEnumNV = OP_vkCmdSetFragmentShadingRateEnumNV;
+    memcpy(streamPtr, &opcode_vkCmdSetFragmentShadingRateEnumNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdSetFragmentShadingRateEnumNV, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (VkFragmentShadingRateNV*)&local_shadingRate, sizeof(VkFragmentShadingRateNV));
+    *streamPtrPtr += sizeof(VkFragmentShadingRateNV);
+    memcpy(*streamPtrPtr, (VkFragmentShadingRateCombinerOpKHR*)local_combinerOps, 2 * sizeof(VkFragmentShadingRateCombinerOpKHR));
+    *streamPtrPtr += 2 * sizeof(VkFragmentShadingRateCombinerOpKHR);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+#endif
+#ifdef VK_EXT_fragment_density_map2
+#endif
+#ifdef VK_QCOM_rotated_copy_commands
+#endif
+#ifdef VK_EXT_image_robustness
+#endif
+#ifdef VK_EXT_4444_formats
+#endif
+#ifdef VK_EXT_directfb_surface
+VkResult VkEncoder::vkCreateDirectFBSurfaceEXT(
+    VkInstance instance,
+    const VkDirectFBSurfaceCreateInfoEXT* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkSurfaceKHR* pSurface,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkInstance local_instance;
+    VkDirectFBSurfaceCreateInfoEXT* local_pCreateInfo;
+    VkAllocationCallbacks* local_pAllocator;
+    local_instance = instance;
+    local_pCreateInfo = nullptr;
+    if (pCreateInfo)
+    {
+        local_pCreateInfo = (VkDirectFBSurfaceCreateInfoEXT*)pool->alloc(sizeof(const VkDirectFBSurfaceCreateInfoEXT));
+        deepcopy_VkDirectFBSurfaceCreateInfoEXT(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, (VkDirectFBSurfaceCreateInfoEXT*)(local_pCreateInfo));
+    }
+    local_pAllocator = nullptr;
+    if (pAllocator)
+    {
+        local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+    }
+    local_pAllocator = nullptr;
+    if (local_pCreateInfo)
+    {
+        transform_tohost_VkDirectFBSurfaceCreateInfoEXT(sResourceTracker, (VkDirectFBSurfaceCreateInfoEXT*)(local_pCreateInfo));
+    }
+    if (local_pAllocator)
+    {
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkDirectFBSurfaceCreateInfoEXT(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDirectFBSurfaceCreateInfoEXT*)(local_pCreateInfo), countPtr);
+        // WARNING PTR CHECK
+        *countPtr += 8;
+        if (local_pAllocator)
+        {
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
+        }
+        uint64_t cgen_var_1;
+        *countPtr += 8;
+    }
+    uint32_t packetSize_vkCreateDirectFBSurfaceEXT = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreateDirectFBSurfaceEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCreateDirectFBSurfaceEXT = OP_vkCreateDirectFBSurfaceEXT;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreateDirectFBSurfaceEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreateDirectFBSurfaceEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkInstance((*&local_instance));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkDirectFBSurfaceCreateInfoEXT(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDirectFBSurfaceCreateInfoEXT*)(local_pCreateInfo), streamPtrPtr);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    if (local_pAllocator)
+    {
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
+    }
+    /* is handle, possibly out */;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = (uint64_t)((*pSurface));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    uint64_t cgen_var_3;
+    stream->read((uint64_t*)&cgen_var_3, 8);
+    stream->handleMapping()->mapHandles_u64_VkSurfaceKHR(&cgen_var_3, (VkSurfaceKHR*)pSurface, 1);
+    VkResult vkCreateDirectFBSurfaceEXT_VkResult_return = (VkResult)0;
+    stream->read(&vkCreateDirectFBSurfaceEXT_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkCreateDirectFBSurfaceEXT_VkResult_return;
+}
+
+VkBool32 VkEncoder::vkGetPhysicalDeviceDirectFBPresentationSupportEXT(
+    VkPhysicalDevice physicalDevice,
+    uint32_t queueFamilyIndex,
+    IDirectFB* dfb,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkPhysicalDevice local_physicalDevice;
+    uint32_t local_queueFamilyIndex;
+    local_physicalDevice = physicalDevice;
+    local_queueFamilyIndex = queueFamilyIndex;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(IDirectFB);
+    }
+    uint32_t packetSize_vkGetPhysicalDeviceDirectFBPresentationSupportEXT = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetPhysicalDeviceDirectFBPresentationSupportEXT);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkGetPhysicalDeviceDirectFBPresentationSupportEXT = OP_vkGetPhysicalDeviceDirectFBPresentationSupportEXT;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetPhysicalDeviceDirectFBPresentationSupportEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetPhysicalDeviceDirectFBPresentationSupportEXT, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPhysicalDevice((*&local_physicalDevice));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_queueFamilyIndex, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (IDirectFB*)dfb, sizeof(IDirectFB));
+    *streamPtrPtr += sizeof(IDirectFB);
+    stream->read((IDirectFB*)dfb, sizeof(IDirectFB));
+    VkBool32 vkGetPhysicalDeviceDirectFBPresentationSupportEXT_VkBool32_return = (VkBool32)0;
+    stream->read(&vkGetPhysicalDeviceDirectFBPresentationSupportEXT_VkBool32_return, sizeof(VkBool32));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkGetPhysicalDeviceDirectFBPresentationSupportEXT_VkBool32_return;
+}
+
+#endif
+#ifdef VK_GOOGLE_gfxstream
+VkResult VkEncoder::vkRegisterImageColorBufferGOOGLE(
+    VkDevice device,
+    VkImage image,
+    uint32_t colorBuffer,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkImage local_image;
+    uint32_t local_colorBuffer;
+    local_device = device;
+    local_image = image;
+    local_colorBuffer = colorBuffer;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+    }
+    uint32_t packetSize_vkRegisterImageColorBufferGOOGLE = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkRegisterImageColorBufferGOOGLE);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkRegisterImageColorBufferGOOGLE = OP_vkRegisterImageColorBufferGOOGLE;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkRegisterImageColorBufferGOOGLE, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkRegisterImageColorBufferGOOGLE, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkImage((*&local_image));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_colorBuffer, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    VkResult vkRegisterImageColorBufferGOOGLE_VkResult_return = (VkResult)0;
+    stream->read(&vkRegisterImageColorBufferGOOGLE_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkRegisterImageColorBufferGOOGLE_VkResult_return;
+}
+
+VkResult VkEncoder::vkRegisterBufferColorBufferGOOGLE(
+    VkDevice device,
+    VkBuffer buffer,
+    uint32_t colorBuffer,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkBuffer local_buffer;
+    uint32_t local_colorBuffer;
+    local_device = device;
+    local_buffer = buffer;
+    local_colorBuffer = colorBuffer;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+    }
+    uint32_t packetSize_vkRegisterBufferColorBufferGOOGLE = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkRegisterBufferColorBufferGOOGLE);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkRegisterBufferColorBufferGOOGLE = OP_vkRegisterBufferColorBufferGOOGLE;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkRegisterBufferColorBufferGOOGLE, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkRegisterBufferColorBufferGOOGLE, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkBuffer((*&local_buffer));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_colorBuffer, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    VkResult vkRegisterBufferColorBufferGOOGLE_VkResult_return = (VkResult)0;
+    stream->read(&vkRegisterBufferColorBufferGOOGLE_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkRegisterBufferColorBufferGOOGLE_VkResult_return;
+}
+
 VkResult VkEncoder::vkMapMemoryIntoAddressSpaceGOOGLE(
     VkDevice device,
     VkDeviceMemory memory,
-    uint64_t* pAddress)
+    uint64_t* pAddress,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkMapMemoryIntoAddressSpaceGOOGLE encode");
-    mImpl->log("start vkMapMemoryIntoAddressSpaceGOOGLE");
-    encoderLock.unlock();
-    mImpl->resources()->on_vkMapMemoryIntoAddressSpaceGOOGLE_pre(this, VK_SUCCESS, device, memory, pAddress);
-    encoderLock.lock();
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    sResourceTracker->on_vkMapMemoryIntoAddressSpaceGOOGLE_pre(this, VK_SUCCESS, device, memory, pAddress);
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkDeviceMemory local_memory;
     local_device = device;
     local_memory = memory;
-    mImpl->resources()->deviceMemoryTransform_tohost((VkDeviceMemory*)&local_memory, 1, (VkDeviceSize*)nullptr, 0, (VkDeviceSize*)nullptr, 0, (uint32_t*)nullptr, 0, (uint32_t*)nullptr, 0);
-    countingStream->rewind();
+    sResourceTracker->deviceMemoryTransform_tohost((VkDeviceMemory*)&local_memory, 1, (VkDeviceSize*)nullptr, 0, (VkDeviceSize*)nullptr, 0, (uint32_t*)nullptr, 0, (uint32_t*)nullptr, 0);
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1477;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1477, 1);
-        countingStream->write((uint64_t*)&cgen_var_1477, 1 * 8);
-        uint64_t cgen_var_1478;
-        countingStream->handleMapping()->mapHandles_VkDeviceMemory_u64(&local_memory, &cgen_var_1478, 1);
-        countingStream->write((uint64_t*)&cgen_var_1478, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_1479 = (uint64_t)(uintptr_t)pAddress;
-        countingStream->putBe64(cgen_var_1479);
+        *countPtr += 8;
         if (pAddress)
         {
-            countingStream->write((uint64_t*)pAddress, sizeof(uint64_t));
+            *countPtr += sizeof(uint64_t);
         }
     }
-    uint32_t packetSize_vkMapMemoryIntoAddressSpaceGOOGLE = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkMapMemoryIntoAddressSpaceGOOGLE = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkMapMemoryIntoAddressSpaceGOOGLE);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkMapMemoryIntoAddressSpaceGOOGLE = OP_vkMapMemoryIntoAddressSpaceGOOGLE;
-    stream->write(&opcode_vkMapMemoryIntoAddressSpaceGOOGLE, sizeof(uint32_t));
-    stream->write(&packetSize_vkMapMemoryIntoAddressSpaceGOOGLE, sizeof(uint32_t));
-    uint64_t cgen_var_1480;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1480, 1);
-    stream->write((uint64_t*)&cgen_var_1480, 1 * 8);
-    uint64_t cgen_var_1481;
-    stream->handleMapping()->mapHandles_VkDeviceMemory_u64(&local_memory, &cgen_var_1481, 1);
-    stream->write((uint64_t*)&cgen_var_1481, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkMapMemoryIntoAddressSpaceGOOGLE, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkMapMemoryIntoAddressSpaceGOOGLE, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkDeviceMemory((*&local_memory));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_1482 = (uint64_t)(uintptr_t)pAddress;
-    stream->putBe64(cgen_var_1482);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)pAddress;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pAddress)
     {
-        stream->write((uint64_t*)pAddress, sizeof(uint64_t));
+        memcpy(*streamPtrPtr, (uint64_t*)pAddress, sizeof(uint64_t));
+        *streamPtrPtr += sizeof(uint64_t);
     }
-    AEMU_SCOPED_TRACE("vkMapMemoryIntoAddressSpaceGOOGLE readParams");
     // WARNING PTR CHECK
     uint64_t* check_pAddress;
     check_pAddress = (uint64_t*)(uintptr_t)stream->getBe64();
@@ -22582,127 +33098,19 @@
         }
         stream->read((uint64_t*)pAddress, sizeof(uint64_t));
     }
-    AEMU_SCOPED_TRACE("vkMapMemoryIntoAddressSpaceGOOGLE returnUnmarshal");
     VkResult vkMapMemoryIntoAddressSpaceGOOGLE_VkResult_return = (VkResult)0;
     stream->read(&vkMapMemoryIntoAddressSpaceGOOGLE_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    encoderLock.unlock();
-    mImpl->resources()->on_vkMapMemoryIntoAddressSpaceGOOGLE(this, vkMapMemoryIntoAddressSpaceGOOGLE_VkResult_return, device, memory, pAddress);
-    encoderLock.lock();
-    mImpl->log("finish vkMapMemoryIntoAddressSpaceGOOGLE");;
+    sResourceTracker->on_vkMapMemoryIntoAddressSpaceGOOGLE(this, vkMapMemoryIntoAddressSpaceGOOGLE_VkResult_return, device, memory, pAddress);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkMapMemoryIntoAddressSpaceGOOGLE_VkResult_return;
 }
 
-#endif
-#ifdef VK_GOOGLE_color_buffer
-VkResult VkEncoder::vkRegisterImageColorBufferGOOGLE(
-    VkDevice device,
-    VkImage image,
-    uint32_t colorBuffer)
-{
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkRegisterImageColorBufferGOOGLE encode");
-    mImpl->log("start vkRegisterImageColorBufferGOOGLE");
-    auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
-    auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
-    VkDevice local_device;
-    VkImage local_image;
-    uint32_t local_colorBuffer;
-    local_device = device;
-    local_image = image;
-    local_colorBuffer = colorBuffer;
-    countingStream->rewind();
-    {
-        uint64_t cgen_var_1484;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1484, 1);
-        countingStream->write((uint64_t*)&cgen_var_1484, 1 * 8);
-        uint64_t cgen_var_1485;
-        countingStream->handleMapping()->mapHandles_VkImage_u64(&local_image, &cgen_var_1485, 1);
-        countingStream->write((uint64_t*)&cgen_var_1485, 1 * 8);
-        countingStream->write((uint32_t*)&local_colorBuffer, sizeof(uint32_t));
-    }
-    uint32_t packetSize_vkRegisterImageColorBufferGOOGLE = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
-    uint32_t opcode_vkRegisterImageColorBufferGOOGLE = OP_vkRegisterImageColorBufferGOOGLE;
-    stream->write(&opcode_vkRegisterImageColorBufferGOOGLE, sizeof(uint32_t));
-    stream->write(&packetSize_vkRegisterImageColorBufferGOOGLE, sizeof(uint32_t));
-    uint64_t cgen_var_1486;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1486, 1);
-    stream->write((uint64_t*)&cgen_var_1486, 1 * 8);
-    uint64_t cgen_var_1487;
-    stream->handleMapping()->mapHandles_VkImage_u64(&local_image, &cgen_var_1487, 1);
-    stream->write((uint64_t*)&cgen_var_1487, 1 * 8);
-    stream->write((uint32_t*)&local_colorBuffer, sizeof(uint32_t));
-    AEMU_SCOPED_TRACE("vkRegisterImageColorBufferGOOGLE readParams");
-    AEMU_SCOPED_TRACE("vkRegisterImageColorBufferGOOGLE returnUnmarshal");
-    VkResult vkRegisterImageColorBufferGOOGLE_VkResult_return = (VkResult)0;
-    stream->read(&vkRegisterImageColorBufferGOOGLE_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkRegisterImageColorBufferGOOGLE");;
-    return vkRegisterImageColorBufferGOOGLE_VkResult_return;
-}
-
-VkResult VkEncoder::vkRegisterBufferColorBufferGOOGLE(
-    VkDevice device,
-    VkBuffer buffer,
-    uint32_t colorBuffer)
-{
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkRegisterBufferColorBufferGOOGLE encode");
-    mImpl->log("start vkRegisterBufferColorBufferGOOGLE");
-    auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
-    auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
-    VkDevice local_device;
-    VkBuffer local_buffer;
-    uint32_t local_colorBuffer;
-    local_device = device;
-    local_buffer = buffer;
-    local_colorBuffer = colorBuffer;
-    countingStream->rewind();
-    {
-        uint64_t cgen_var_1488;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1488, 1);
-        countingStream->write((uint64_t*)&cgen_var_1488, 1 * 8);
-        uint64_t cgen_var_1489;
-        countingStream->handleMapping()->mapHandles_VkBuffer_u64(&local_buffer, &cgen_var_1489, 1);
-        countingStream->write((uint64_t*)&cgen_var_1489, 1 * 8);
-        countingStream->write((uint32_t*)&local_colorBuffer, sizeof(uint32_t));
-    }
-    uint32_t packetSize_vkRegisterBufferColorBufferGOOGLE = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
-    uint32_t opcode_vkRegisterBufferColorBufferGOOGLE = OP_vkRegisterBufferColorBufferGOOGLE;
-    stream->write(&opcode_vkRegisterBufferColorBufferGOOGLE, sizeof(uint32_t));
-    stream->write(&packetSize_vkRegisterBufferColorBufferGOOGLE, sizeof(uint32_t));
-    uint64_t cgen_var_1490;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1490, 1);
-    stream->write((uint64_t*)&cgen_var_1490, 1 * 8);
-    uint64_t cgen_var_1491;
-    stream->handleMapping()->mapHandles_VkBuffer_u64(&local_buffer, &cgen_var_1491, 1);
-    stream->write((uint64_t*)&cgen_var_1491, 1 * 8);
-    stream->write((uint32_t*)&local_colorBuffer, sizeof(uint32_t));
-    AEMU_SCOPED_TRACE("vkRegisterBufferColorBufferGOOGLE readParams");
-    AEMU_SCOPED_TRACE("vkRegisterBufferColorBufferGOOGLE returnUnmarshal");
-    VkResult vkRegisterBufferColorBufferGOOGLE_VkResult_return = (VkResult)0;
-    stream->read(&vkRegisterBufferColorBufferGOOGLE_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkRegisterBufferColorBufferGOOGLE");;
-    return vkRegisterBufferColorBufferGOOGLE_VkResult_return;
-}
-
-#endif
-#ifdef VK_GOOGLE_sized_descriptor_update_template
 void VkEncoder::vkUpdateDescriptorSetWithTemplateSizedGOOGLE(
     VkDevice device,
     VkDescriptorSet descriptorSet,
@@ -22715,16 +33123,14 @@
     const uint32_t* pBufferViewEntryIndices,
     const VkDescriptorImageInfo* pImageInfos,
     const VkDescriptorBufferInfo* pBufferInfos,
-    const VkBufferView* pBufferViews)
+    const VkBufferView* pBufferViews,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkUpdateDescriptorSetWithTemplateSizedGOOGLE encode");
-    mImpl->log("start vkUpdateDescriptorSetWithTemplateSizedGOOGLE");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkDescriptorSet local_descriptorSet;
     VkDescriptorUpdateTemplate local_descriptorUpdateTemplate;
@@ -22743,28 +33149,19 @@
     local_imageInfoCount = imageInfoCount;
     local_bufferInfoCount = bufferInfoCount;
     local_bufferViewCount = bufferViewCount;
-    local_pImageInfoEntryIndices = nullptr;
-    if (pImageInfoEntryIndices)
-    {
-        local_pImageInfoEntryIndices = (uint32_t*)pool->dupArray(pImageInfoEntryIndices, ((imageInfoCount)) * sizeof(const uint32_t));
-    }
-    local_pBufferInfoEntryIndices = nullptr;
-    if (pBufferInfoEntryIndices)
-    {
-        local_pBufferInfoEntryIndices = (uint32_t*)pool->dupArray(pBufferInfoEntryIndices, ((bufferInfoCount)) * sizeof(const uint32_t));
-    }
-    local_pBufferViewEntryIndices = nullptr;
-    if (pBufferViewEntryIndices)
-    {
-        local_pBufferViewEntryIndices = (uint32_t*)pool->dupArray(pBufferViewEntryIndices, ((bufferViewCount)) * sizeof(const uint32_t));
-    }
+    // Avoiding deepcopy for pImageInfoEntryIndices
+    local_pImageInfoEntryIndices = (uint32_t*)pImageInfoEntryIndices;
+    // Avoiding deepcopy for pBufferInfoEntryIndices
+    local_pBufferInfoEntryIndices = (uint32_t*)pBufferInfoEntryIndices;
+    // Avoiding deepcopy for pBufferViewEntryIndices
+    local_pBufferViewEntryIndices = (uint32_t*)pBufferViewEntryIndices;
     local_pImageInfos = nullptr;
     if (pImageInfos)
     {
         local_pImageInfos = (VkDescriptorImageInfo*)pool->alloc(((imageInfoCount)) * sizeof(const VkDescriptorImageInfo));
         for (uint32_t i = 0; i < (uint32_t)((imageInfoCount)); ++i)
         {
-            deepcopy_VkDescriptorImageInfo(pool, pImageInfos + i, (VkDescriptorImageInfo*)(local_pImageInfos + i));
+            deepcopy_VkDescriptorImageInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pImageInfos + i, (VkDescriptorImageInfo*)(local_pImageInfos + i));
         }
     }
     local_pBufferInfos = nullptr;
@@ -22773,187 +33170,201 @@
         local_pBufferInfos = (VkDescriptorBufferInfo*)pool->alloc(((bufferInfoCount)) * sizeof(const VkDescriptorBufferInfo));
         for (uint32_t i = 0; i < (uint32_t)((bufferInfoCount)); ++i)
         {
-            deepcopy_VkDescriptorBufferInfo(pool, pBufferInfos + i, (VkDescriptorBufferInfo*)(local_pBufferInfos + i));
+            deepcopy_VkDescriptorBufferInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pBufferInfos + i, (VkDescriptorBufferInfo*)(local_pBufferInfos + i));
         }
     }
-    local_pBufferViews = nullptr;
-    if (pBufferViews)
-    {
-        local_pBufferViews = (VkBufferView*)pool->dupArray(pBufferViews, ((bufferViewCount)) * sizeof(const VkBufferView));
-    }
+    // Avoiding deepcopy for pBufferViews
+    local_pBufferViews = (VkBufferView*)pBufferViews;
     if (local_pImageInfos)
     {
         for (uint32_t i = 0; i < (uint32_t)((imageInfoCount)); ++i)
         {
-            transform_tohost_VkDescriptorImageInfo(mImpl->resources(), (VkDescriptorImageInfo*)(local_pImageInfos + i));
+            transform_tohost_VkDescriptorImageInfo(sResourceTracker, (VkDescriptorImageInfo*)(local_pImageInfos + i));
         }
     }
     if (local_pBufferInfos)
     {
         for (uint32_t i = 0; i < (uint32_t)((bufferInfoCount)); ++i)
         {
-            transform_tohost_VkDescriptorBufferInfo(mImpl->resources(), (VkDescriptorBufferInfo*)(local_pBufferInfos + i));
+            transform_tohost_VkDescriptorBufferInfo(sResourceTracker, (VkDescriptorBufferInfo*)(local_pBufferInfos + i));
         }
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1492;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1492, 1);
-        countingStream->write((uint64_t*)&cgen_var_1492, 1 * 8);
-        uint64_t cgen_var_1493;
-        countingStream->handleMapping()->mapHandles_VkDescriptorSet_u64(&local_descriptorSet, &cgen_var_1493, 1);
-        countingStream->write((uint64_t*)&cgen_var_1493, 1 * 8);
-        uint64_t cgen_var_1494;
-        countingStream->handleMapping()->mapHandles_VkDescriptorUpdateTemplate_u64(&local_descriptorUpdateTemplate, &cgen_var_1494, 1);
-        countingStream->write((uint64_t*)&cgen_var_1494, 1 * 8);
-        countingStream->write((uint32_t*)&local_imageInfoCount, sizeof(uint32_t));
-        countingStream->write((uint32_t*)&local_bufferInfoCount, sizeof(uint32_t));
-        countingStream->write((uint32_t*)&local_bufferViewCount, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_2;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
         // WARNING PTR CHECK
-        uint64_t cgen_var_1495 = (uint64_t)(uintptr_t)local_pImageInfoEntryIndices;
-        countingStream->putBe64(cgen_var_1495);
+        *countPtr += 8;
         if (local_pImageInfoEntryIndices)
         {
-            countingStream->write((uint32_t*)local_pImageInfoEntryIndices, ((imageInfoCount)) * sizeof(uint32_t));
+            *countPtr += ((imageInfoCount)) * sizeof(uint32_t);
         }
         // WARNING PTR CHECK
-        uint64_t cgen_var_1496 = (uint64_t)(uintptr_t)local_pBufferInfoEntryIndices;
-        countingStream->putBe64(cgen_var_1496);
+        *countPtr += 8;
         if (local_pBufferInfoEntryIndices)
         {
-            countingStream->write((uint32_t*)local_pBufferInfoEntryIndices, ((bufferInfoCount)) * sizeof(uint32_t));
+            *countPtr += ((bufferInfoCount)) * sizeof(uint32_t);
         }
         // WARNING PTR CHECK
-        uint64_t cgen_var_1497 = (uint64_t)(uintptr_t)local_pBufferViewEntryIndices;
-        countingStream->putBe64(cgen_var_1497);
+        *countPtr += 8;
         if (local_pBufferViewEntryIndices)
         {
-            countingStream->write((uint32_t*)local_pBufferViewEntryIndices, ((bufferViewCount)) * sizeof(uint32_t));
+            *countPtr += ((bufferViewCount)) * sizeof(uint32_t);
         }
         // WARNING PTR CHECK
-        uint64_t cgen_var_1498 = (uint64_t)(uintptr_t)local_pImageInfos;
-        countingStream->putBe64(cgen_var_1498);
+        *countPtr += 8;
         if (local_pImageInfos)
         {
             for (uint32_t i = 0; i < (uint32_t)((imageInfoCount)); ++i)
             {
-                marshal_VkDescriptorImageInfo(countingStream, (VkDescriptorImageInfo*)(local_pImageInfos + i));
+                count_VkDescriptorImageInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDescriptorImageInfo*)(local_pImageInfos + i), countPtr);
             }
         }
         // WARNING PTR CHECK
-        uint64_t cgen_var_1499 = (uint64_t)(uintptr_t)local_pBufferInfos;
-        countingStream->putBe64(cgen_var_1499);
+        *countPtr += 8;
         if (local_pBufferInfos)
         {
             for (uint32_t i = 0; i < (uint32_t)((bufferInfoCount)); ++i)
             {
-                marshal_VkDescriptorBufferInfo(countingStream, (VkDescriptorBufferInfo*)(local_pBufferInfos + i));
+                count_VkDescriptorBufferInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDescriptorBufferInfo*)(local_pBufferInfos + i), countPtr);
             }
         }
         // WARNING PTR CHECK
-        uint64_t cgen_var_1500 = (uint64_t)(uintptr_t)local_pBufferViews;
-        countingStream->putBe64(cgen_var_1500);
+        *countPtr += 8;
         if (local_pBufferViews)
         {
             if (((bufferViewCount)))
             {
-                uint64_t* cgen_var_1501;
-                countingStream->alloc((void**)&cgen_var_1501, ((bufferViewCount)) * 8);
-                countingStream->handleMapping()->mapHandles_VkBufferView_u64(local_pBufferViews, cgen_var_1501, ((bufferViewCount)));
-                countingStream->write((uint64_t*)cgen_var_1501, ((bufferViewCount)) * 8);
+                *countPtr += ((bufferViewCount)) * 8;
             }
         }
     }
-    uint32_t packetSize_vkUpdateDescriptorSetWithTemplateSizedGOOGLE = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkUpdateDescriptorSetWithTemplateSizedGOOGLE = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkUpdateDescriptorSetWithTemplateSizedGOOGLE);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkUpdateDescriptorSetWithTemplateSizedGOOGLE = OP_vkUpdateDescriptorSetWithTemplateSizedGOOGLE;
-    stream->write(&opcode_vkUpdateDescriptorSetWithTemplateSizedGOOGLE, sizeof(uint32_t));
-    stream->write(&packetSize_vkUpdateDescriptorSetWithTemplateSizedGOOGLE, sizeof(uint32_t));
-    uint64_t cgen_var_1502;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1502, 1);
-    stream->write((uint64_t*)&cgen_var_1502, 1 * 8);
-    uint64_t cgen_var_1503;
-    stream->handleMapping()->mapHandles_VkDescriptorSet_u64(&local_descriptorSet, &cgen_var_1503, 1);
-    stream->write((uint64_t*)&cgen_var_1503, 1 * 8);
-    uint64_t cgen_var_1504;
-    stream->handleMapping()->mapHandles_VkDescriptorUpdateTemplate_u64(&local_descriptorUpdateTemplate, &cgen_var_1504, 1);
-    stream->write((uint64_t*)&cgen_var_1504, 1 * 8);
-    stream->write((uint32_t*)&local_imageInfoCount, sizeof(uint32_t));
-    stream->write((uint32_t*)&local_bufferInfoCount, sizeof(uint32_t));
-    stream->write((uint32_t*)&local_bufferViewCount, sizeof(uint32_t));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkUpdateDescriptorSetWithTemplateSizedGOOGLE, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkUpdateDescriptorSetWithTemplateSizedGOOGLE, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkDescriptorSet((*&local_descriptorSet));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = get_host_u64_VkDescriptorUpdateTemplate((*&local_descriptorUpdateTemplate));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_imageInfoCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_bufferInfoCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_bufferViewCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
     // WARNING PTR CHECK
-    uint64_t cgen_var_1505 = (uint64_t)(uintptr_t)local_pImageInfoEntryIndices;
-    stream->putBe64(cgen_var_1505);
+    uint64_t cgen_var_3 = (uint64_t)(uintptr_t)local_pImageInfoEntryIndices;
+    memcpy((*streamPtrPtr), &cgen_var_3, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pImageInfoEntryIndices)
     {
-        stream->write((uint32_t*)local_pImageInfoEntryIndices, ((imageInfoCount)) * sizeof(uint32_t));
+        memcpy(*streamPtrPtr, (uint32_t*)local_pImageInfoEntryIndices, ((imageInfoCount)) * sizeof(uint32_t));
+        *streamPtrPtr += ((imageInfoCount)) * sizeof(uint32_t);
     }
     // WARNING PTR CHECK
-    uint64_t cgen_var_1506 = (uint64_t)(uintptr_t)local_pBufferInfoEntryIndices;
-    stream->putBe64(cgen_var_1506);
+    uint64_t cgen_var_4 = (uint64_t)(uintptr_t)local_pBufferInfoEntryIndices;
+    memcpy((*streamPtrPtr), &cgen_var_4, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pBufferInfoEntryIndices)
     {
-        stream->write((uint32_t*)local_pBufferInfoEntryIndices, ((bufferInfoCount)) * sizeof(uint32_t));
+        memcpy(*streamPtrPtr, (uint32_t*)local_pBufferInfoEntryIndices, ((bufferInfoCount)) * sizeof(uint32_t));
+        *streamPtrPtr += ((bufferInfoCount)) * sizeof(uint32_t);
     }
     // WARNING PTR CHECK
-    uint64_t cgen_var_1507 = (uint64_t)(uintptr_t)local_pBufferViewEntryIndices;
-    stream->putBe64(cgen_var_1507);
+    uint64_t cgen_var_5 = (uint64_t)(uintptr_t)local_pBufferViewEntryIndices;
+    memcpy((*streamPtrPtr), &cgen_var_5, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pBufferViewEntryIndices)
     {
-        stream->write((uint32_t*)local_pBufferViewEntryIndices, ((bufferViewCount)) * sizeof(uint32_t));
+        memcpy(*streamPtrPtr, (uint32_t*)local_pBufferViewEntryIndices, ((bufferViewCount)) * sizeof(uint32_t));
+        *streamPtrPtr += ((bufferViewCount)) * sizeof(uint32_t);
     }
     // WARNING PTR CHECK
-    uint64_t cgen_var_1508 = (uint64_t)(uintptr_t)local_pImageInfos;
-    stream->putBe64(cgen_var_1508);
+    uint64_t cgen_var_6 = (uint64_t)(uintptr_t)local_pImageInfos;
+    memcpy((*streamPtrPtr), &cgen_var_6, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pImageInfos)
     {
         for (uint32_t i = 0; i < (uint32_t)((imageInfoCount)); ++i)
         {
-            marshal_VkDescriptorImageInfo(stream, (VkDescriptorImageInfo*)(local_pImageInfos + i));
+            reservedmarshal_VkDescriptorImageInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDescriptorImageInfo*)(local_pImageInfos + i), streamPtrPtr);
         }
     }
     // WARNING PTR CHECK
-    uint64_t cgen_var_1509 = (uint64_t)(uintptr_t)local_pBufferInfos;
-    stream->putBe64(cgen_var_1509);
+    uint64_t cgen_var_7 = (uint64_t)(uintptr_t)local_pBufferInfos;
+    memcpy((*streamPtrPtr), &cgen_var_7, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pBufferInfos)
     {
         for (uint32_t i = 0; i < (uint32_t)((bufferInfoCount)); ++i)
         {
-            marshal_VkDescriptorBufferInfo(stream, (VkDescriptorBufferInfo*)(local_pBufferInfos + i));
+            reservedmarshal_VkDescriptorBufferInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkDescriptorBufferInfo*)(local_pBufferInfos + i), streamPtrPtr);
         }
     }
     // WARNING PTR CHECK
-    uint64_t cgen_var_1510 = (uint64_t)(uintptr_t)local_pBufferViews;
-    stream->putBe64(cgen_var_1510);
+    uint64_t cgen_var_8 = (uint64_t)(uintptr_t)local_pBufferViews;
+    memcpy((*streamPtrPtr), &cgen_var_8, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pBufferViews)
     {
         if (((bufferViewCount)))
         {
-            uint64_t* cgen_var_1511;
-            stream->alloc((void**)&cgen_var_1511, ((bufferViewCount)) * 8);
-            stream->handleMapping()->mapHandles_VkBufferView_u64(local_pBufferViews, cgen_var_1511, ((bufferViewCount)));
-            stream->write((uint64_t*)cgen_var_1511, ((bufferViewCount)) * 8);
+            uint8_t* cgen_var_8_0_ptr = (uint8_t*)(*streamPtrPtr);
+            for (uint32_t k = 0; k < ((bufferViewCount)); ++k)
+            {
+                uint64_t tmpval = get_host_u64_VkBufferView(local_pBufferViews[k]);
+                memcpy(cgen_var_8_0_ptr + k * 8, &tmpval, sizeof(uint64_t));
+            }
+            *streamPtrPtr += 8 * ((bufferViewCount));
         }
     }
-    AEMU_SCOPED_TRACE("vkUpdateDescriptorSetWithTemplateSizedGOOGLE readParams");
-    AEMU_SCOPED_TRACE("vkUpdateDescriptorSetWithTemplateSizedGOOGLE returnUnmarshal");
-    mImpl->log("finish vkUpdateDescriptorSetWithTemplateSizedGOOGLE");;
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
-#endif
-#ifdef VK_GOOGLE_async_command_buffers
 void VkEncoder::vkBeginCommandBufferAsyncGOOGLE(
     VkCommandBuffer commandBuffer,
-    const VkCommandBufferBeginInfo* pBeginInfo)
+    const VkCommandBufferBeginInfo* pBeginInfo,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkBeginCommandBufferAsyncGOOGLE encode");
-    mImpl->log("start vkBeginCommandBufferAsyncGOOGLE");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     VkCommandBufferBeginInfo* local_pBeginInfo;
     local_commandBuffer = commandBuffer;
@@ -22961,161 +33372,195 @@
     if (pBeginInfo)
     {
         local_pBeginInfo = (VkCommandBufferBeginInfo*)pool->alloc(sizeof(const VkCommandBufferBeginInfo));
-        deepcopy_VkCommandBufferBeginInfo(pool, pBeginInfo, (VkCommandBufferBeginInfo*)(local_pBeginInfo));
+        deepcopy_VkCommandBufferBeginInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pBeginInfo, (VkCommandBufferBeginInfo*)(local_pBeginInfo));
     }
     if (local_pBeginInfo)
     {
-        transform_tohost_VkCommandBufferBeginInfo(mImpl->resources(), (VkCommandBufferBeginInfo*)(local_pBeginInfo));
+        transform_tohost_VkCommandBufferBeginInfo(sResourceTracker, (VkCommandBufferBeginInfo*)(local_pBeginInfo));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1512;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1512, 1);
-        countingStream->write((uint64_t*)&cgen_var_1512, 1 * 8);
-        marshal_VkCommandBufferBeginInfo(countingStream, (VkCommandBufferBeginInfo*)(local_pBeginInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkCommandBufferBeginInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkCommandBufferBeginInfo*)(local_pBeginInfo), countPtr);
     }
-    uint32_t packetSize_vkBeginCommandBufferAsyncGOOGLE = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkBeginCommandBufferAsyncGOOGLE = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkBeginCommandBufferAsyncGOOGLE -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkBeginCommandBufferAsyncGOOGLE);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkBeginCommandBufferAsyncGOOGLE = OP_vkBeginCommandBufferAsyncGOOGLE;
-    stream->write(&opcode_vkBeginCommandBufferAsyncGOOGLE, sizeof(uint32_t));
-    stream->write(&packetSize_vkBeginCommandBufferAsyncGOOGLE, sizeof(uint32_t));
-    uint64_t cgen_var_1513;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1513, 1);
-    stream->write((uint64_t*)&cgen_var_1513, 1 * 8);
-    marshal_VkCommandBufferBeginInfo(stream, (VkCommandBufferBeginInfo*)(local_pBeginInfo));
-    AEMU_SCOPED_TRACE("vkBeginCommandBufferAsyncGOOGLE readParams");
-    AEMU_SCOPED_TRACE("vkBeginCommandBufferAsyncGOOGLE returnUnmarshal");
-    mImpl->log("finish vkBeginCommandBufferAsyncGOOGLE");;
+    memcpy(streamPtr, &opcode_vkBeginCommandBufferAsyncGOOGLE, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkBeginCommandBufferAsyncGOOGLE, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    reservedmarshal_VkCommandBufferBeginInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkCommandBufferBeginInfo*)(local_pBeginInfo), streamPtrPtr);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkEndCommandBufferAsyncGOOGLE(
-    VkCommandBuffer commandBuffer)
+    VkCommandBuffer commandBuffer,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkEndCommandBufferAsyncGOOGLE encode");
-    mImpl->log("start vkEndCommandBufferAsyncGOOGLE");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     local_commandBuffer = commandBuffer;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1514;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1514, 1);
-        countingStream->write((uint64_t*)&cgen_var_1514, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
     }
-    uint32_t packetSize_vkEndCommandBufferAsyncGOOGLE = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkEndCommandBufferAsyncGOOGLE = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkEndCommandBufferAsyncGOOGLE -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkEndCommandBufferAsyncGOOGLE);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkEndCommandBufferAsyncGOOGLE = OP_vkEndCommandBufferAsyncGOOGLE;
-    stream->write(&opcode_vkEndCommandBufferAsyncGOOGLE, sizeof(uint32_t));
-    stream->write(&packetSize_vkEndCommandBufferAsyncGOOGLE, sizeof(uint32_t));
-    uint64_t cgen_var_1515;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1515, 1);
-    stream->write((uint64_t*)&cgen_var_1515, 1 * 8);
-    AEMU_SCOPED_TRACE("vkEndCommandBufferAsyncGOOGLE readParams");
-    AEMU_SCOPED_TRACE("vkEndCommandBufferAsyncGOOGLE returnUnmarshal");
-    mImpl->log("finish vkEndCommandBufferAsyncGOOGLE");;
+    memcpy(streamPtr, &opcode_vkEndCommandBufferAsyncGOOGLE, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkEndCommandBufferAsyncGOOGLE, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkResetCommandBufferAsyncGOOGLE(
     VkCommandBuffer commandBuffer,
-    VkCommandBufferResetFlags flags)
+    VkCommandBufferResetFlags flags,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkResetCommandBufferAsyncGOOGLE encode");
-    mImpl->log("start vkResetCommandBufferAsyncGOOGLE");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     VkCommandBufferResetFlags local_flags;
     local_commandBuffer = commandBuffer;
     local_flags = flags;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1516;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1516, 1);
-        countingStream->write((uint64_t*)&cgen_var_1516, 1 * 8);
-        countingStream->write((VkCommandBufferResetFlags*)&local_flags, sizeof(VkCommandBufferResetFlags));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkCommandBufferResetFlags);
     }
-    uint32_t packetSize_vkResetCommandBufferAsyncGOOGLE = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkResetCommandBufferAsyncGOOGLE = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkResetCommandBufferAsyncGOOGLE -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkResetCommandBufferAsyncGOOGLE);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkResetCommandBufferAsyncGOOGLE = OP_vkResetCommandBufferAsyncGOOGLE;
-    stream->write(&opcode_vkResetCommandBufferAsyncGOOGLE, sizeof(uint32_t));
-    stream->write(&packetSize_vkResetCommandBufferAsyncGOOGLE, sizeof(uint32_t));
-    uint64_t cgen_var_1517;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1517, 1);
-    stream->write((uint64_t*)&cgen_var_1517, 1 * 8);
-    stream->write((VkCommandBufferResetFlags*)&local_flags, sizeof(VkCommandBufferResetFlags));
-    AEMU_SCOPED_TRACE("vkResetCommandBufferAsyncGOOGLE readParams");
-    AEMU_SCOPED_TRACE("vkResetCommandBufferAsyncGOOGLE returnUnmarshal");
-    mImpl->log("finish vkResetCommandBufferAsyncGOOGLE");;
+    memcpy(streamPtr, &opcode_vkResetCommandBufferAsyncGOOGLE, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkResetCommandBufferAsyncGOOGLE, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (VkCommandBufferResetFlags*)&local_flags, sizeof(VkCommandBufferResetFlags));
+    *streamPtrPtr += sizeof(VkCommandBufferResetFlags);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
 void VkEncoder::vkCommandBufferHostSyncGOOGLE(
     VkCommandBuffer commandBuffer,
     uint32_t needHostSync,
-    uint32_t sequenceNumber)
+    uint32_t sequenceNumber,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCommandBufferHostSyncGOOGLE encode");
-    mImpl->log("start vkCommandBufferHostSyncGOOGLE");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkCommandBuffer local_commandBuffer;
     uint32_t local_needHostSync;
     uint32_t local_sequenceNumber;
     local_commandBuffer = commandBuffer;
     local_needHostSync = needHostSync;
     local_sequenceNumber = sequenceNumber;
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1518;
-        countingStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1518, 1);
-        countingStream->write((uint64_t*)&cgen_var_1518, 1 * 8);
-        countingStream->write((uint32_t*)&local_needHostSync, sizeof(uint32_t));
-        countingStream->write((uint32_t*)&local_sequenceNumber, sizeof(uint32_t));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
     }
-    uint32_t packetSize_vkCommandBufferHostSyncGOOGLE = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCommandBufferHostSyncGOOGLE = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCommandBufferHostSyncGOOGLE -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCommandBufferHostSyncGOOGLE);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCommandBufferHostSyncGOOGLE = OP_vkCommandBufferHostSyncGOOGLE;
-    stream->write(&opcode_vkCommandBufferHostSyncGOOGLE, sizeof(uint32_t));
-    stream->write(&packetSize_vkCommandBufferHostSyncGOOGLE, sizeof(uint32_t));
-    uint64_t cgen_var_1519;
-    stream->handleMapping()->mapHandles_VkCommandBuffer_u64(&local_commandBuffer, &cgen_var_1519, 1);
-    stream->write((uint64_t*)&cgen_var_1519, 1 * 8);
-    stream->write((uint32_t*)&local_needHostSync, sizeof(uint32_t));
-    stream->write((uint32_t*)&local_sequenceNumber, sizeof(uint32_t));
-    AEMU_SCOPED_TRACE("vkCommandBufferHostSyncGOOGLE readParams");
-    AEMU_SCOPED_TRACE("vkCommandBufferHostSyncGOOGLE returnUnmarshal");
-    mImpl->log("finish vkCommandBufferHostSyncGOOGLE");;
+    memcpy(streamPtr, &opcode_vkCommandBufferHostSyncGOOGLE, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCommandBufferHostSyncGOOGLE, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (uint32_t*)&local_needHostSync, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_sequenceNumber, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
 }
 
-#endif
-#ifdef VK_GOOGLE_create_resources_with_requirements
 VkResult VkEncoder::vkCreateImageWithRequirementsGOOGLE(
     VkDevice device,
     const VkImageCreateInfo* pCreateInfo,
     const VkAllocationCallbacks* pAllocator,
     VkImage* pImage,
-    VkMemoryRequirements* pMemoryRequirements)
+    VkMemoryRequirements* pMemoryRequirements,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCreateImageWithRequirementsGOOGLE encode");
-    mImpl->log("start vkCreateImageWithRequirementsGOOGLE");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkImageCreateInfo* local_pCreateInfo;
     VkAllocationCallbacks* local_pAllocator;
@@ -23124,82 +33569,88 @@
     if (pCreateInfo)
     {
         local_pCreateInfo = (VkImageCreateInfo*)pool->alloc(sizeof(const VkImageCreateInfo));
-        deepcopy_VkImageCreateInfo(pool, pCreateInfo, (VkImageCreateInfo*)(local_pCreateInfo));
+        deepcopy_VkImageCreateInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, (VkImageCreateInfo*)(local_pCreateInfo));
     }
     local_pAllocator = nullptr;
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    mImpl->resources()->unwrap_VkNativeBufferANDROID(pCreateInfo, local_pCreateInfo);
+    sResourceTracker->unwrap_VkNativeBufferANDROID(pCreateInfo, local_pCreateInfo);
     local_pAllocator = nullptr;
     if (local_pCreateInfo)
     {
-        transform_tohost_VkImageCreateInfo(mImpl->resources(), (VkImageCreateInfo*)(local_pCreateInfo));
+        transform_tohost_VkImageCreateInfo(sResourceTracker, (VkImageCreateInfo*)(local_pCreateInfo));
     }
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1520;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1520, 1);
-        countingStream->write((uint64_t*)&cgen_var_1520, 1 * 8);
-        marshal_VkImageCreateInfo(countingStream, (VkImageCreateInfo*)(local_pCreateInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkImageCreateInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImageCreateInfo*)(local_pCreateInfo), countPtr);
         // WARNING PTR CHECK
-        uint64_t cgen_var_1521 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_1521);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
-        uint64_t cgen_var_1522;
-        countingStream->handleMapping()->mapHandles_VkImage_u64(pImage, &cgen_var_1522, 1);
-        countingStream->write((uint64_t*)&cgen_var_1522, 8);
-        marshal_VkMemoryRequirements(countingStream, (VkMemoryRequirements*)(pMemoryRequirements));
+        uint64_t cgen_var_1;
+        *countPtr += 8;
+        count_VkMemoryRequirements(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMemoryRequirements*)(pMemoryRequirements), countPtr);
     }
-    uint32_t packetSize_vkCreateImageWithRequirementsGOOGLE = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCreateImageWithRequirementsGOOGLE = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreateImageWithRequirementsGOOGLE);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCreateImageWithRequirementsGOOGLE = OP_vkCreateImageWithRequirementsGOOGLE;
-    stream->write(&opcode_vkCreateImageWithRequirementsGOOGLE, sizeof(uint32_t));
-    stream->write(&packetSize_vkCreateImageWithRequirementsGOOGLE, sizeof(uint32_t));
-    uint64_t cgen_var_1523;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1523, 1);
-    stream->write((uint64_t*)&cgen_var_1523, 1 * 8);
-    marshal_VkImageCreateInfo(stream, (VkImageCreateInfo*)(local_pCreateInfo));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreateImageWithRequirementsGOOGLE, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreateImageWithRequirementsGOOGLE, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkImageCreateInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkImageCreateInfo*)(local_pCreateInfo), streamPtrPtr);
     // WARNING PTR CHECK
-    uint64_t cgen_var_1524 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_1524);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
-    uint64_t cgen_var_1525;
-    stream->handleMapping()->mapHandles_VkImage_u64(pImage, &cgen_var_1525, 1);
-    stream->write((uint64_t*)&cgen_var_1525, 8);
-    stream->setHandleMapping(resources->unwrapMapping());
-    marshal_VkMemoryRequirements(stream, (VkMemoryRequirements*)(pMemoryRequirements));
-    AEMU_SCOPED_TRACE("vkCreateImageWithRequirementsGOOGLE readParams");
-    stream->setHandleMapping(resources->createMapping());
-    uint64_t cgen_var_1526;
-    stream->read((uint64_t*)&cgen_var_1526, 8);
-    stream->handleMapping()->mapHandles_u64_VkImage(&cgen_var_1526, (VkImage*)pImage, 1);
+    /* is handle, possibly out */;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = (uint64_t)((*pImage));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    reservedmarshal_VkMemoryRequirements(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMemoryRequirements*)(pMemoryRequirements), streamPtrPtr);
+    stream->setHandleMapping(sResourceTracker->createMapping());
+    uint64_t cgen_var_3;
+    stream->read((uint64_t*)&cgen_var_3, 8);
+    stream->handleMapping()->mapHandles_u64_VkImage(&cgen_var_3, (VkImage*)pImage, 1);
     stream->unsetHandleMapping();
-    unmarshal_VkMemoryRequirements(stream, (VkMemoryRequirements*)(pMemoryRequirements));
+    unmarshal_VkMemoryRequirements(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMemoryRequirements*)(pMemoryRequirements));
     if (pMemoryRequirements)
     {
-        transform_fromhost_VkMemoryRequirements(mImpl->resources(), (VkMemoryRequirements*)(pMemoryRequirements));
+        transform_fromhost_VkMemoryRequirements(sResourceTracker, (VkMemoryRequirements*)(pMemoryRequirements));
     }
-    AEMU_SCOPED_TRACE("vkCreateImageWithRequirementsGOOGLE returnUnmarshal");
     VkResult vkCreateImageWithRequirementsGOOGLE_VkResult_return = (VkResult)0;
     stream->read(&vkCreateImageWithRequirementsGOOGLE_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkCreateImageWithRequirementsGOOGLE");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkCreateImageWithRequirementsGOOGLE_VkResult_return;
 }
 
@@ -23208,16 +33659,14 @@
     const VkBufferCreateInfo* pCreateInfo,
     const VkAllocationCallbacks* pAllocator,
     VkBuffer* pBuffer,
-    VkMemoryRequirements* pMemoryRequirements)
+    VkMemoryRequirements* pMemoryRequirements,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkCreateBufferWithRequirementsGOOGLE encode");
-    mImpl->log("start vkCreateBufferWithRequirementsGOOGLE");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkBufferCreateInfo* local_pCreateInfo;
     VkAllocationCallbacks* local_pAllocator;
@@ -23226,169 +33675,180 @@
     if (pCreateInfo)
     {
         local_pCreateInfo = (VkBufferCreateInfo*)pool->alloc(sizeof(const VkBufferCreateInfo));
-        deepcopy_VkBufferCreateInfo(pool, pCreateInfo, (VkBufferCreateInfo*)(local_pCreateInfo));
+        deepcopy_VkBufferCreateInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, (VkBufferCreateInfo*)(local_pCreateInfo));
     }
     local_pAllocator = nullptr;
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
     if (local_pCreateInfo)
     {
-        transform_tohost_VkBufferCreateInfo(mImpl->resources(), (VkBufferCreateInfo*)(local_pCreateInfo));
+        transform_tohost_VkBufferCreateInfo(sResourceTracker, (VkBufferCreateInfo*)(local_pCreateInfo));
     }
     if (local_pAllocator)
     {
-        transform_tohost_VkAllocationCallbacks(mImpl->resources(), (VkAllocationCallbacks*)(local_pAllocator));
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
     }
-    countingStream->rewind();
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1527;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1527, 1);
-        countingStream->write((uint64_t*)&cgen_var_1527, 1 * 8);
-        marshal_VkBufferCreateInfo(countingStream, (VkBufferCreateInfo*)(local_pCreateInfo));
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkBufferCreateInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkBufferCreateInfo*)(local_pCreateInfo), countPtr);
         // WARNING PTR CHECK
-        uint64_t cgen_var_1528 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_1528);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
-        uint64_t cgen_var_1529;
-        countingStream->handleMapping()->mapHandles_VkBuffer_u64(pBuffer, &cgen_var_1529, 1);
-        countingStream->write((uint64_t*)&cgen_var_1529, 8);
-        marshal_VkMemoryRequirements(countingStream, (VkMemoryRequirements*)(pMemoryRequirements));
+        uint64_t cgen_var_1;
+        *countPtr += 8;
+        count_VkMemoryRequirements(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMemoryRequirements*)(pMemoryRequirements), countPtr);
     }
-    uint32_t packetSize_vkCreateBufferWithRequirementsGOOGLE = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkCreateBufferWithRequirementsGOOGLE = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreateBufferWithRequirementsGOOGLE);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkCreateBufferWithRequirementsGOOGLE = OP_vkCreateBufferWithRequirementsGOOGLE;
-    stream->write(&opcode_vkCreateBufferWithRequirementsGOOGLE, sizeof(uint32_t));
-    stream->write(&packetSize_vkCreateBufferWithRequirementsGOOGLE, sizeof(uint32_t));
-    uint64_t cgen_var_1530;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1530, 1);
-    stream->write((uint64_t*)&cgen_var_1530, 1 * 8);
-    marshal_VkBufferCreateInfo(stream, (VkBufferCreateInfo*)(local_pCreateInfo));
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreateBufferWithRequirementsGOOGLE, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreateBufferWithRequirementsGOOGLE, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkBufferCreateInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkBufferCreateInfo*)(local_pCreateInfo), streamPtrPtr);
     // WARNING PTR CHECK
-    uint64_t cgen_var_1531 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_1531);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    stream->unsetHandleMapping() /* emit_marshal, is handle, possibly out */;
-    uint64_t cgen_var_1532;
-    stream->handleMapping()->mapHandles_VkBuffer_u64(pBuffer, &cgen_var_1532, 1);
-    stream->write((uint64_t*)&cgen_var_1532, 8);
-    stream->setHandleMapping(resources->unwrapMapping());
-    marshal_VkMemoryRequirements(stream, (VkMemoryRequirements*)(pMemoryRequirements));
-    AEMU_SCOPED_TRACE("vkCreateBufferWithRequirementsGOOGLE readParams");
-    stream->setHandleMapping(resources->createMapping());
-    uint64_t cgen_var_1533;
-    stream->read((uint64_t*)&cgen_var_1533, 8);
-    stream->handleMapping()->mapHandles_u64_VkBuffer(&cgen_var_1533, (VkBuffer*)pBuffer, 1);
+    /* is handle, possibly out */;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = (uint64_t)((*pBuffer));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    reservedmarshal_VkMemoryRequirements(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMemoryRequirements*)(pMemoryRequirements), streamPtrPtr);
+    stream->setHandleMapping(sResourceTracker->createMapping());
+    uint64_t cgen_var_3;
+    stream->read((uint64_t*)&cgen_var_3, 8);
+    stream->handleMapping()->mapHandles_u64_VkBuffer(&cgen_var_3, (VkBuffer*)pBuffer, 1);
     stream->unsetHandleMapping();
-    unmarshal_VkMemoryRequirements(stream, (VkMemoryRequirements*)(pMemoryRequirements));
+    unmarshal_VkMemoryRequirements(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkMemoryRequirements*)(pMemoryRequirements));
     if (pMemoryRequirements)
     {
-        transform_fromhost_VkMemoryRequirements(mImpl->resources(), (VkMemoryRequirements*)(pMemoryRequirements));
+        transform_fromhost_VkMemoryRequirements(sResourceTracker, (VkMemoryRequirements*)(pMemoryRequirements));
     }
-    AEMU_SCOPED_TRACE("vkCreateBufferWithRequirementsGOOGLE returnUnmarshal");
     VkResult vkCreateBufferWithRequirementsGOOGLE_VkResult_return = (VkResult)0;
     stream->read(&vkCreateBufferWithRequirementsGOOGLE_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkCreateBufferWithRequirementsGOOGLE");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkCreateBufferWithRequirementsGOOGLE_VkResult_return;
 }
 
-#endif
-#ifdef VK_GOOGLE_address_space_info
 VkResult VkEncoder::vkGetMemoryHostAddressInfoGOOGLE(
     VkDevice device,
     VkDeviceMemory memory,
     uint64_t* pAddress,
     uint64_t* pSize,
-    uint64_t* pHostmemId)
+    uint64_t* pHostmemId,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkGetMemoryHostAddressInfoGOOGLE encode");
-    mImpl->log("start vkGetMemoryHostAddressInfoGOOGLE");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkDeviceMemory local_memory;
     local_device = device;
     local_memory = memory;
-    mImpl->resources()->deviceMemoryTransform_tohost((VkDeviceMemory*)&local_memory, 1, (VkDeviceSize*)nullptr, 0, (VkDeviceSize*)nullptr, 0, (uint32_t*)nullptr, 0, (uint32_t*)nullptr, 0);
-    countingStream->rewind();
+    sResourceTracker->deviceMemoryTransform_tohost((VkDeviceMemory*)&local_memory, 1, (VkDeviceSize*)nullptr, 0, (VkDeviceSize*)nullptr, 0, (uint32_t*)nullptr, 0, (uint32_t*)nullptr, 0);
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1534;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1534, 1);
-        countingStream->write((uint64_t*)&cgen_var_1534, 1 * 8);
-        uint64_t cgen_var_1535;
-        countingStream->handleMapping()->mapHandles_VkDeviceMemory_u64(&local_memory, &cgen_var_1535, 1);
-        countingStream->write((uint64_t*)&cgen_var_1535, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_1536 = (uint64_t)(uintptr_t)pAddress;
-        countingStream->putBe64(cgen_var_1536);
+        *countPtr += 8;
         if (pAddress)
         {
-            countingStream->write((uint64_t*)pAddress, sizeof(uint64_t));
+            *countPtr += sizeof(uint64_t);
         }
         // WARNING PTR CHECK
-        uint64_t cgen_var_1537 = (uint64_t)(uintptr_t)pSize;
-        countingStream->putBe64(cgen_var_1537);
+        *countPtr += 8;
         if (pSize)
         {
-            countingStream->write((uint64_t*)pSize, sizeof(uint64_t));
+            *countPtr += sizeof(uint64_t);
         }
         // WARNING PTR CHECK
-        uint64_t cgen_var_1538 = (uint64_t)(uintptr_t)pHostmemId;
-        countingStream->putBe64(cgen_var_1538);
+        *countPtr += 8;
         if (pHostmemId)
         {
-            countingStream->write((uint64_t*)pHostmemId, sizeof(uint64_t));
+            *countPtr += sizeof(uint64_t);
         }
     }
-    uint32_t packetSize_vkGetMemoryHostAddressInfoGOOGLE = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkGetMemoryHostAddressInfoGOOGLE = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetMemoryHostAddressInfoGOOGLE);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkGetMemoryHostAddressInfoGOOGLE = OP_vkGetMemoryHostAddressInfoGOOGLE;
-    stream->write(&opcode_vkGetMemoryHostAddressInfoGOOGLE, sizeof(uint32_t));
-    stream->write(&packetSize_vkGetMemoryHostAddressInfoGOOGLE, sizeof(uint32_t));
-    uint64_t cgen_var_1539;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1539, 1);
-    stream->write((uint64_t*)&cgen_var_1539, 1 * 8);
-    uint64_t cgen_var_1540;
-    stream->handleMapping()->mapHandles_VkDeviceMemory_u64(&local_memory, &cgen_var_1540, 1);
-    stream->write((uint64_t*)&cgen_var_1540, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetMemoryHostAddressInfoGOOGLE, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetMemoryHostAddressInfoGOOGLE, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkDeviceMemory((*&local_memory));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_1541 = (uint64_t)(uintptr_t)pAddress;
-    stream->putBe64(cgen_var_1541);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)pAddress;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pAddress)
     {
-        stream->write((uint64_t*)pAddress, sizeof(uint64_t));
+        memcpy(*streamPtrPtr, (uint64_t*)pAddress, sizeof(uint64_t));
+        *streamPtrPtr += sizeof(uint64_t);
     }
     // WARNING PTR CHECK
-    uint64_t cgen_var_1542 = (uint64_t)(uintptr_t)pSize;
-    stream->putBe64(cgen_var_1542);
+    uint64_t cgen_var_3 = (uint64_t)(uintptr_t)pSize;
+    memcpy((*streamPtrPtr), &cgen_var_3, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pSize)
     {
-        stream->write((uint64_t*)pSize, sizeof(uint64_t));
+        memcpy(*streamPtrPtr, (uint64_t*)pSize, sizeof(uint64_t));
+        *streamPtrPtr += sizeof(uint64_t);
     }
     // WARNING PTR CHECK
-    uint64_t cgen_var_1543 = (uint64_t)(uintptr_t)pHostmemId;
-    stream->putBe64(cgen_var_1543);
+    uint64_t cgen_var_4 = (uint64_t)(uintptr_t)pHostmemId;
+    memcpy((*streamPtrPtr), &cgen_var_4, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (pHostmemId)
     {
-        stream->write((uint64_t*)pHostmemId, sizeof(uint64_t));
+        memcpy(*streamPtrPtr, (uint64_t*)pHostmemId, sizeof(uint64_t));
+        *streamPtrPtr += sizeof(uint64_t);
     }
-    AEMU_SCOPED_TRACE("vkGetMemoryHostAddressInfoGOOGLE readParams");
     // WARNING PTR CHECK
     uint64_t* check_pAddress;
     check_pAddress = (uint64_t*)(uintptr_t)stream->getBe64();
@@ -23422,31 +33882,29 @@
         }
         stream->read((uint64_t*)pHostmemId, sizeof(uint64_t));
     }
-    AEMU_SCOPED_TRACE("vkGetMemoryHostAddressInfoGOOGLE returnUnmarshal");
     VkResult vkGetMemoryHostAddressInfoGOOGLE_VkResult_return = (VkResult)0;
     stream->read(&vkGetMemoryHostAddressInfoGOOGLE_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    mImpl->log("finish vkGetMemoryHostAddressInfoGOOGLE");;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkGetMemoryHostAddressInfoGOOGLE_VkResult_return;
 }
 
-#endif
-#ifdef VK_GOOGLE_free_memory_sync
 VkResult VkEncoder::vkFreeMemorySyncGOOGLE(
     VkDevice device,
     VkDeviceMemory memory,
-    const VkAllocationCallbacks* pAllocator)
+    const VkAllocationCallbacks* pAllocator,
+    uint32_t doLock)
 {
-    AutoLock encoderLock(mImpl->lock);
-    AEMU_SCOPED_TRACE("vkFreeMemorySyncGOOGLE encode");
-    mImpl->log("start vkFreeMemorySyncGOOGLE");
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
     auto stream = mImpl->stream();
-    auto countingStream = mImpl->countingStream();
-    auto resources = mImpl->resources();
     auto pool = mImpl->pool();
-    stream->setHandleMapping(resources->unwrapMapping());
     VkDevice local_device;
     VkDeviceMemory local_memory;
     VkAllocationCallbacks* local_pAllocator;
@@ -23456,56 +33914,2297 @@
     if (pAllocator)
     {
         local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
-        deepcopy_VkAllocationCallbacks(pool, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
     }
     local_pAllocator = nullptr;
-    mImpl->resources()->deviceMemoryTransform_tohost((VkDeviceMemory*)&local_memory, 1, (VkDeviceSize*)nullptr, 0, (VkDeviceSize*)nullptr, 0, (uint32_t*)nullptr, 0, (uint32_t*)nullptr, 0);
-    countingStream->rewind();
+    sResourceTracker->deviceMemoryTransform_tohost((VkDeviceMemory*)&local_memory, 1, (VkDeviceSize*)nullptr, 0, (VkDeviceSize*)nullptr, 0, (uint32_t*)nullptr, 0, (uint32_t*)nullptr, 0);
+    size_t count = 0;
+    size_t* countPtr = &count;
     {
-        uint64_t cgen_var_1547;
-        countingStream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1547, 1);
-        countingStream->write((uint64_t*)&cgen_var_1547, 1 * 8);
-        uint64_t cgen_var_1548;
-        countingStream->handleMapping()->mapHandles_VkDeviceMemory_u64(&local_memory, &cgen_var_1548, 1);
-        countingStream->write((uint64_t*)&cgen_var_1548, 1 * 8);
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
         // WARNING PTR CHECK
-        uint64_t cgen_var_1549 = (uint64_t)(uintptr_t)local_pAllocator;
-        countingStream->putBe64(cgen_var_1549);
+        *countPtr += 8;
         if (local_pAllocator)
         {
-            marshal_VkAllocationCallbacks(countingStream, (VkAllocationCallbacks*)(local_pAllocator));
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
         }
     }
-    uint32_t packetSize_vkFreeMemorySyncGOOGLE = 4 + 4 + (uint32_t)countingStream->bytesWritten();
-    countingStream->rewind();
+    uint32_t packetSize_vkFreeMemorySyncGOOGLE = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkFreeMemorySyncGOOGLE);
+    uint8_t** streamPtrPtr = &streamPtr;
     uint32_t opcode_vkFreeMemorySyncGOOGLE = OP_vkFreeMemorySyncGOOGLE;
-    stream->write(&opcode_vkFreeMemorySyncGOOGLE, sizeof(uint32_t));
-    stream->write(&packetSize_vkFreeMemorySyncGOOGLE, sizeof(uint32_t));
-    uint64_t cgen_var_1550;
-    stream->handleMapping()->mapHandles_VkDevice_u64(&local_device, &cgen_var_1550, 1);
-    stream->write((uint64_t*)&cgen_var_1550, 1 * 8);
-    uint64_t cgen_var_1551;
-    stream->handleMapping()->mapHandles_VkDeviceMemory_u64(&local_memory, &cgen_var_1551, 1);
-    stream->write((uint64_t*)&cgen_var_1551, 1 * 8);
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkFreeMemorySyncGOOGLE, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkFreeMemorySyncGOOGLE, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkDeviceMemory((*&local_memory));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
     // WARNING PTR CHECK
-    uint64_t cgen_var_1552 = (uint64_t)(uintptr_t)local_pAllocator;
-    stream->putBe64(cgen_var_1552);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
     if (local_pAllocator)
     {
-        marshal_VkAllocationCallbacks(stream, (VkAllocationCallbacks*)(local_pAllocator));
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
     }
-    AEMU_SCOPED_TRACE("vkFreeMemorySyncGOOGLE readParams");
-    AEMU_SCOPED_TRACE("vkFreeMemorySyncGOOGLE returnUnmarshal");
     VkResult vkFreeMemorySyncGOOGLE_VkResult_return = (VkResult)0;
     stream->read(&vkFreeMemorySyncGOOGLE_VkResult_return, sizeof(VkResult));
-    countingStream->clearPool();
-    stream->clearPool();
-    pool->freeAll();
-    resources->destroyMapping()->mapHandles_VkDeviceMemory((VkDeviceMemory*)&memory);
-    mImpl->log("finish vkFreeMemorySyncGOOGLE");;
+    sResourceTracker->destroyMapping()->mapHandles_VkDeviceMemory((VkDeviceMemory*)&memory);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
     return vkFreeMemorySyncGOOGLE_VkResult_return;
 }
 
+void VkEncoder::vkQueueHostSyncGOOGLE(
+    VkQueue queue,
+    uint32_t needHostSync,
+    uint32_t sequenceNumber,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkQueue local_queue;
+    uint32_t local_needHostSync;
+    uint32_t local_sequenceNumber;
+    local_queue = queue;
+    local_needHostSync = needHostSync;
+    local_sequenceNumber = sequenceNumber;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
+    }
+    uint32_t packetSize_vkQueueHostSyncGOOGLE = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkQueueHostSyncGOOGLE);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkQueueHostSyncGOOGLE = OP_vkQueueHostSyncGOOGLE;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkQueueHostSyncGOOGLE, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkQueueHostSyncGOOGLE, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkQueue((*&local_queue));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_needHostSync, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_sequenceNumber, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+void VkEncoder::vkQueueSubmitAsyncGOOGLE(
+    VkQueue queue,
+    uint32_t submitCount,
+    const VkSubmitInfo* pSubmits,
+    VkFence fence,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkQueue local_queue;
+    uint32_t local_submitCount;
+    VkSubmitInfo* local_pSubmits;
+    VkFence local_fence;
+    local_queue = queue;
+    local_submitCount = submitCount;
+    local_pSubmits = nullptr;
+    if (pSubmits)
+    {
+        local_pSubmits = (VkSubmitInfo*)pool->alloc(((submitCount)) * sizeof(const VkSubmitInfo));
+        for (uint32_t i = 0; i < (uint32_t)((submitCount)); ++i)
+        {
+            deepcopy_VkSubmitInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pSubmits + i, (VkSubmitInfo*)(local_pSubmits + i));
+        }
+    }
+    local_fence = fence;
+    if (local_pSubmits)
+    {
+        for (uint32_t i = 0; i < (uint32_t)((submitCount)); ++i)
+        {
+            transform_tohost_VkSubmitInfo(sResourceTracker, (VkSubmitInfo*)(local_pSubmits + i));
+        }
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        for (uint32_t i = 0; i < (uint32_t)((submitCount)); ++i)
+        {
+            count_VkSubmitInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSubmitInfo*)(local_pSubmits + i), countPtr);
+        }
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+    }
+    uint32_t packetSize_vkQueueSubmitAsyncGOOGLE = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkQueueSubmitAsyncGOOGLE);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkQueueSubmitAsyncGOOGLE = OP_vkQueueSubmitAsyncGOOGLE;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkQueueSubmitAsyncGOOGLE, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkQueueSubmitAsyncGOOGLE, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkQueue((*&local_queue));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_submitCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)((submitCount)); ++i)
+    {
+        reservedmarshal_VkSubmitInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkSubmitInfo*)(local_pSubmits + i), streamPtrPtr);
+    }
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkFence((*&local_fence));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+void VkEncoder::vkQueueWaitIdleAsyncGOOGLE(
+    VkQueue queue,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkQueue local_queue;
+    local_queue = queue;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+    }
+    uint32_t packetSize_vkQueueWaitIdleAsyncGOOGLE = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkQueueWaitIdleAsyncGOOGLE);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkQueueWaitIdleAsyncGOOGLE = OP_vkQueueWaitIdleAsyncGOOGLE;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkQueueWaitIdleAsyncGOOGLE, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkQueueWaitIdleAsyncGOOGLE, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkQueue((*&local_queue));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+void VkEncoder::vkQueueBindSparseAsyncGOOGLE(
+    VkQueue queue,
+    uint32_t bindInfoCount,
+    const VkBindSparseInfo* pBindInfo,
+    VkFence fence,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkQueue local_queue;
+    uint32_t local_bindInfoCount;
+    VkBindSparseInfo* local_pBindInfo;
+    VkFence local_fence;
+    local_queue = queue;
+    local_bindInfoCount = bindInfoCount;
+    local_pBindInfo = nullptr;
+    if (pBindInfo)
+    {
+        local_pBindInfo = (VkBindSparseInfo*)pool->alloc(((bindInfoCount)) * sizeof(const VkBindSparseInfo));
+        for (uint32_t i = 0; i < (uint32_t)((bindInfoCount)); ++i)
+        {
+            deepcopy_VkBindSparseInfo(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pBindInfo + i, (VkBindSparseInfo*)(local_pBindInfo + i));
+        }
+    }
+    local_fence = fence;
+    if (local_pBindInfo)
+    {
+        for (uint32_t i = 0; i < (uint32_t)((bindInfoCount)); ++i)
+        {
+            transform_tohost_VkBindSparseInfo(sResourceTracker, (VkBindSparseInfo*)(local_pBindInfo + i));
+        }
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        for (uint32_t i = 0; i < (uint32_t)((bindInfoCount)); ++i)
+        {
+            count_VkBindSparseInfo(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkBindSparseInfo*)(local_pBindInfo + i), countPtr);
+        }
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+    }
+    uint32_t packetSize_vkQueueBindSparseAsyncGOOGLE = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkQueueBindSparseAsyncGOOGLE);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkQueueBindSparseAsyncGOOGLE = OP_vkQueueBindSparseAsyncGOOGLE;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkQueueBindSparseAsyncGOOGLE, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkQueueBindSparseAsyncGOOGLE, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkQueue((*&local_queue));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_bindInfoCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)((bindInfoCount)); ++i)
+    {
+        reservedmarshal_VkBindSparseInfo(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkBindSparseInfo*)(local_pBindInfo + i), streamPtrPtr);
+    }
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkFence((*&local_fence));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+void VkEncoder::vkGetLinearImageLayoutGOOGLE(
+    VkDevice device,
+    VkFormat format,
+    VkDeviceSize* pOffset,
+    VkDeviceSize* pRowPitchAlignment,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkFormat local_format;
+    local_device = device;
+    local_format = format;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkFormat);
+        *countPtr += sizeof(VkDeviceSize);
+        *countPtr += sizeof(VkDeviceSize);
+    }
+    uint32_t packetSize_vkGetLinearImageLayoutGOOGLE = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetLinearImageLayoutGOOGLE);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkGetLinearImageLayoutGOOGLE = OP_vkGetLinearImageLayoutGOOGLE;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetLinearImageLayoutGOOGLE, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetLinearImageLayoutGOOGLE, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkFormat*)&local_format, sizeof(VkFormat));
+    *streamPtrPtr += sizeof(VkFormat);
+    memcpy(*streamPtrPtr, (VkDeviceSize*)pOffset, sizeof(VkDeviceSize));
+    *streamPtrPtr += sizeof(VkDeviceSize);
+    memcpy(*streamPtrPtr, (VkDeviceSize*)pRowPitchAlignment, sizeof(VkDeviceSize));
+    *streamPtrPtr += sizeof(VkDeviceSize);
+    stream->read((VkDeviceSize*)pOffset, sizeof(VkDeviceSize));
+    stream->read((VkDeviceSize*)pRowPitchAlignment, sizeof(VkDeviceSize));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+void VkEncoder::vkQueueFlushCommandsGOOGLE(
+    VkQueue queue,
+    VkCommandBuffer commandBuffer,
+    VkDeviceSize dataSize,
+    const void* pData,
+    uint32_t doLock)
+{
+    #include "vkQueueFlushCommandsGOOGLE_encode_impl.cpp.inl"
+}
+
+void VkEncoder::vkQueueCommitDescriptorSetUpdatesGOOGLE(
+    VkQueue queue,
+    uint32_t descriptorPoolCount,
+    const VkDescriptorPool* pDescriptorPools,
+    uint32_t descriptorSetCount,
+    const VkDescriptorSetLayout* pSetLayouts,
+    const uint64_t* pDescriptorSetPoolIds,
+    const uint32_t* pDescriptorSetWhichPool,
+    const uint32_t* pDescriptorSetPendingAllocation,
+    const uint32_t* pDescriptorWriteStartingIndices,
+    uint32_t pendingDescriptorWriteCount,
+    const VkWriteDescriptorSet* pPendingDescriptorWrites,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkQueue local_queue;
+    uint32_t local_descriptorPoolCount;
+    VkDescriptorPool* local_pDescriptorPools;
+    uint32_t local_descriptorSetCount;
+    VkDescriptorSetLayout* local_pSetLayouts;
+    uint64_t* local_pDescriptorSetPoolIds;
+    uint32_t* local_pDescriptorSetWhichPool;
+    uint32_t* local_pDescriptorSetPendingAllocation;
+    uint32_t* local_pDescriptorWriteStartingIndices;
+    uint32_t local_pendingDescriptorWriteCount;
+    VkWriteDescriptorSet* local_pPendingDescriptorWrites;
+    local_queue = queue;
+    local_descriptorPoolCount = descriptorPoolCount;
+    // Avoiding deepcopy for pDescriptorPools
+    local_pDescriptorPools = (VkDescriptorPool*)pDescriptorPools;
+    local_descriptorSetCount = descriptorSetCount;
+    // Avoiding deepcopy for pSetLayouts
+    local_pSetLayouts = (VkDescriptorSetLayout*)pSetLayouts;
+    // Avoiding deepcopy for pDescriptorSetPoolIds
+    local_pDescriptorSetPoolIds = (uint64_t*)pDescriptorSetPoolIds;
+    // Avoiding deepcopy for pDescriptorSetWhichPool
+    local_pDescriptorSetWhichPool = (uint32_t*)pDescriptorSetWhichPool;
+    // Avoiding deepcopy for pDescriptorSetPendingAllocation
+    local_pDescriptorSetPendingAllocation = (uint32_t*)pDescriptorSetPendingAllocation;
+    // Avoiding deepcopy for pDescriptorWriteStartingIndices
+    local_pDescriptorWriteStartingIndices = (uint32_t*)pDescriptorWriteStartingIndices;
+    local_pendingDescriptorWriteCount = pendingDescriptorWriteCount;
+    local_pPendingDescriptorWrites = nullptr;
+    if (pPendingDescriptorWrites)
+    {
+        local_pPendingDescriptorWrites = (VkWriteDescriptorSet*)pool->alloc(((pendingDescriptorWriteCount)) * sizeof(const VkWriteDescriptorSet));
+        for (uint32_t i = 0; i < (uint32_t)((pendingDescriptorWriteCount)); ++i)
+        {
+            deepcopy_VkWriteDescriptorSet(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pPendingDescriptorWrites + i, (VkWriteDescriptorSet*)(local_pPendingDescriptorWrites + i));
+        }
+    }
+    if (local_pPendingDescriptorWrites)
+    {
+        for (uint32_t i = 0; i < (uint32_t)((pendingDescriptorWriteCount)); ++i)
+        {
+            transform_tohost_VkWriteDescriptorSet(sResourceTracker, (VkWriteDescriptorSet*)(local_pPendingDescriptorWrites + i));
+        }
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        if (((descriptorPoolCount)))
+        {
+            *countPtr += ((descriptorPoolCount)) * 8;
+        }
+        *countPtr += sizeof(uint32_t);
+        if (((descriptorSetCount)))
+        {
+            *countPtr += ((descriptorSetCount)) * 8;
+        }
+        *countPtr += ((descriptorSetCount)) * sizeof(uint64_t);
+        *countPtr += ((descriptorSetCount)) * sizeof(uint32_t);
+        *countPtr += ((descriptorSetCount)) * sizeof(uint32_t);
+        *countPtr += ((descriptorSetCount)) * sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
+        for (uint32_t i = 0; i < (uint32_t)((pendingDescriptorWriteCount)); ++i)
+        {
+            count_VkWriteDescriptorSet(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkWriteDescriptorSet*)(local_pPendingDescriptorWrites + i), countPtr);
+        }
+    }
+    uint32_t packetSize_vkQueueCommitDescriptorSetUpdatesGOOGLE = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkQueueCommitDescriptorSetUpdatesGOOGLE);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkQueueCommitDescriptorSetUpdatesGOOGLE = OP_vkQueueCommitDescriptorSetUpdatesGOOGLE;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkQueueCommitDescriptorSetUpdatesGOOGLE, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkQueueCommitDescriptorSetUpdatesGOOGLE, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkQueue((*&local_queue));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_descriptorPoolCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    if (((descriptorPoolCount)))
+    {
+        uint8_t* cgen_var_1_ptr = (uint8_t*)(*streamPtrPtr);
+        for (uint32_t k = 0; k < ((descriptorPoolCount)); ++k)
+        {
+            uint64_t tmpval = get_host_u64_VkDescriptorPool(local_pDescriptorPools[k]);
+            memcpy(cgen_var_1_ptr + k * 8, &tmpval, sizeof(uint64_t));
+        }
+        *streamPtrPtr += 8 * ((descriptorPoolCount));
+    }
+    memcpy(*streamPtrPtr, (uint32_t*)&local_descriptorSetCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    if (((descriptorSetCount)))
+    {
+        uint8_t* cgen_var_2_ptr = (uint8_t*)(*streamPtrPtr);
+        for (uint32_t k = 0; k < ((descriptorSetCount)); ++k)
+        {
+            uint64_t tmpval = get_host_u64_VkDescriptorSetLayout(local_pSetLayouts[k]);
+            memcpy(cgen_var_2_ptr + k * 8, &tmpval, sizeof(uint64_t));
+        }
+        *streamPtrPtr += 8 * ((descriptorSetCount));
+    }
+    memcpy(*streamPtrPtr, (uint64_t*)local_pDescriptorSetPoolIds, ((descriptorSetCount)) * sizeof(uint64_t));
+    *streamPtrPtr += ((descriptorSetCount)) * sizeof(uint64_t);
+    memcpy(*streamPtrPtr, (uint32_t*)local_pDescriptorSetWhichPool, ((descriptorSetCount)) * sizeof(uint32_t));
+    *streamPtrPtr += ((descriptorSetCount)) * sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)local_pDescriptorSetPendingAllocation, ((descriptorSetCount)) * sizeof(uint32_t));
+    *streamPtrPtr += ((descriptorSetCount)) * sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)local_pDescriptorWriteStartingIndices, ((descriptorSetCount)) * sizeof(uint32_t));
+    *streamPtrPtr += ((descriptorSetCount)) * sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_pendingDescriptorWriteCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)((pendingDescriptorWriteCount)); ++i)
+    {
+        reservedmarshal_VkWriteDescriptorSet(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkWriteDescriptorSet*)(local_pPendingDescriptorWrites + i), streamPtrPtr);
+    }
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+void VkEncoder::vkCollectDescriptorPoolIdsGOOGLE(
+    VkDevice device,
+    VkDescriptorPool descriptorPool,
+    uint32_t* pPoolIdCount,
+    uint64_t* pPoolIds,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkDescriptorPool local_descriptorPool;
+    local_device = device;
+    local_descriptorPool = descriptorPool;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        // WARNING PTR CHECK
+        *countPtr += 8;
+        if (pPoolIds)
+        {
+            if (pPoolIdCount)
+            {
+                *countPtr += (*(pPoolIdCount)) * sizeof(uint64_t);
+            }
+        }
+    }
+    uint32_t packetSize_vkCollectDescriptorPoolIdsGOOGLE = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCollectDescriptorPoolIdsGOOGLE);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCollectDescriptorPoolIdsGOOGLE = OP_vkCollectDescriptorPoolIdsGOOGLE;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCollectDescriptorPoolIdsGOOGLE, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCollectDescriptorPoolIdsGOOGLE, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkDescriptorPool((*&local_descriptorPool));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)pPoolIdCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)pPoolIds;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    if (pPoolIds)
+    {
+        memcpy(*streamPtrPtr, (uint64_t*)pPoolIds, (*(pPoolIdCount)) * sizeof(uint64_t));
+        *streamPtrPtr += (*(pPoolIdCount)) * sizeof(uint64_t);
+    }
+    stream->read((uint32_t*)pPoolIdCount, sizeof(uint32_t));
+    // WARNING PTR CHECK
+    uint64_t* check_pPoolIds;
+    check_pPoolIds = (uint64_t*)(uintptr_t)stream->getBe64();
+    if (pPoolIds)
+    {
+        if (!(check_pPoolIds))
+        {
+            fprintf(stderr, "fatal: pPoolIds inconsistent between guest and host\n");
+        }
+        stream->read((uint64_t*)pPoolIds, (*(pPoolIdCount)) * sizeof(uint64_t));
+    }
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+#endif
+#ifdef VK_KHR_acceleration_structure
+VkResult VkEncoder::vkCreateAccelerationStructureKHR(
+    VkDevice device,
+    const VkAccelerationStructureCreateInfoKHR* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkAccelerationStructureKHR* pAccelerationStructure,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkAccelerationStructureCreateInfoKHR* local_pCreateInfo;
+    VkAllocationCallbacks* local_pAllocator;
+    local_device = device;
+    local_pCreateInfo = nullptr;
+    if (pCreateInfo)
+    {
+        local_pCreateInfo = (VkAccelerationStructureCreateInfoKHR*)pool->alloc(sizeof(const VkAccelerationStructureCreateInfoKHR));
+        deepcopy_VkAccelerationStructureCreateInfoKHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfo, (VkAccelerationStructureCreateInfoKHR*)(local_pCreateInfo));
+    }
+    local_pAllocator = nullptr;
+    if (pAllocator)
+    {
+        local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+    }
+    local_pAllocator = nullptr;
+    if (local_pCreateInfo)
+    {
+        transform_tohost_VkAccelerationStructureCreateInfoKHR(sResourceTracker, (VkAccelerationStructureCreateInfoKHR*)(local_pCreateInfo));
+    }
+    if (local_pAllocator)
+    {
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkAccelerationStructureCreateInfoKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAccelerationStructureCreateInfoKHR*)(local_pCreateInfo), countPtr);
+        // WARNING PTR CHECK
+        *countPtr += 8;
+        if (local_pAllocator)
+        {
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
+        }
+        uint64_t cgen_var_1;
+        *countPtr += 8;
+    }
+    uint32_t packetSize_vkCreateAccelerationStructureKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreateAccelerationStructureKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCreateAccelerationStructureKHR = OP_vkCreateAccelerationStructureKHR;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreateAccelerationStructureKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreateAccelerationStructureKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkAccelerationStructureCreateInfoKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAccelerationStructureCreateInfoKHR*)(local_pCreateInfo), streamPtrPtr);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    if (local_pAllocator)
+    {
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
+    }
+    /* is handle, possibly out */;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = (uint64_t)((*pAccelerationStructure));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 8);
+    *streamPtrPtr += 8;
+    /* is handle, possibly out */;
+    stream->setHandleMapping(sResourceTracker->createMapping());
+    uint64_t cgen_var_3;
+    stream->read((uint64_t*)&cgen_var_3, 8);
+    stream->handleMapping()->mapHandles_u64_VkAccelerationStructureKHR(&cgen_var_3, (VkAccelerationStructureKHR*)pAccelerationStructure, 1);
+    stream->unsetHandleMapping();
+    VkResult vkCreateAccelerationStructureKHR_VkResult_return = (VkResult)0;
+    stream->read(&vkCreateAccelerationStructureKHR_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkCreateAccelerationStructureKHR_VkResult_return;
+}
+
+void VkEncoder::vkDestroyAccelerationStructureKHR(
+    VkDevice device,
+    VkAccelerationStructureKHR accelerationStructure,
+    const VkAllocationCallbacks* pAllocator,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkAccelerationStructureKHR local_accelerationStructure;
+    VkAllocationCallbacks* local_pAllocator;
+    local_device = device;
+    local_accelerationStructure = accelerationStructure;
+    local_pAllocator = nullptr;
+    if (pAllocator)
+    {
+        local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+    }
+    local_pAllocator = nullptr;
+    if (local_pAllocator)
+    {
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        // WARNING PTR CHECK
+        *countPtr += 8;
+        if (local_pAllocator)
+        {
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
+        }
+    }
+    uint32_t packetSize_vkDestroyAccelerationStructureKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkDestroyAccelerationStructureKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkDestroyAccelerationStructureKHR = OP_vkDestroyAccelerationStructureKHR;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkDestroyAccelerationStructureKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkDestroyAccelerationStructureKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkAccelerationStructureKHR((*&local_accelerationStructure));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    // WARNING PTR CHECK
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    if (local_pAllocator)
+    {
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
+    }
+    sResourceTracker->destroyMapping()->mapHandles_VkAccelerationStructureKHR((VkAccelerationStructureKHR*)&accelerationStructure);
+    stream->flush();
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+void VkEncoder::vkCmdBuildAccelerationStructuresKHR(
+    VkCommandBuffer commandBuffer,
+    uint32_t infoCount,
+    const VkAccelerationStructureBuildGeometryInfoKHR* pInfos,
+    const VkAccelerationStructureBuildRangeInfoKHR* const* ppBuildRangeInfos,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    uint32_t local_infoCount;
+    VkAccelerationStructureBuildGeometryInfoKHR* local_pInfos;
+    VkAccelerationStructureBuildRangeInfoKHR** local_ppBuildRangeInfos;
+    local_commandBuffer = commandBuffer;
+    local_infoCount = infoCount;
+    local_pInfos = nullptr;
+    if (pInfos)
+    {
+        local_pInfos = (VkAccelerationStructureBuildGeometryInfoKHR*)pool->alloc(((infoCount)) * sizeof(const VkAccelerationStructureBuildGeometryInfoKHR));
+        for (uint32_t i = 0; i < (uint32_t)((infoCount)); ++i)
+        {
+            deepcopy_VkAccelerationStructureBuildGeometryInfoKHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pInfos + i, (VkAccelerationStructureBuildGeometryInfoKHR*)(local_pInfos + i));
+        }
+    }
+    (void)ppBuildRangeInfos;
+    if (local_pInfos)
+    {
+        for (uint32_t i = 0; i < (uint32_t)((infoCount)); ++i)
+        {
+            transform_tohost_VkAccelerationStructureBuildGeometryInfoKHR(sResourceTracker, (VkAccelerationStructureBuildGeometryInfoKHR*)(local_pInfos + i));
+        }
+    }
+    (void)local_ppBuildRangeInfos;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        for (uint32_t i = 0; i < (uint32_t)((infoCount)); ++i)
+        {
+            count_VkAccelerationStructureBuildGeometryInfoKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAccelerationStructureBuildGeometryInfoKHR*)(local_pInfos + i), countPtr);
+        }
+        (void)local_ppBuildRangeInfos;
+    }
+    uint32_t packetSize_vkCmdBuildAccelerationStructuresKHR = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdBuildAccelerationStructuresKHR -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdBuildAccelerationStructuresKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdBuildAccelerationStructuresKHR = OP_vkCmdBuildAccelerationStructuresKHR;
+    memcpy(streamPtr, &opcode_vkCmdBuildAccelerationStructuresKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdBuildAccelerationStructuresKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (uint32_t*)&local_infoCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)((infoCount)); ++i)
+    {
+        reservedmarshal_VkAccelerationStructureBuildGeometryInfoKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAccelerationStructureBuildGeometryInfoKHR*)(local_pInfos + i), streamPtrPtr);
+    }
+    (void)local_ppBuildRangeInfos;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+void VkEncoder::vkCmdBuildAccelerationStructuresIndirectKHR(
+    VkCommandBuffer commandBuffer,
+    uint32_t infoCount,
+    const VkAccelerationStructureBuildGeometryInfoKHR* pInfos,
+    const VkDeviceAddress* pIndirectDeviceAddresses,
+    const uint32_t* pIndirectStrides,
+    const uint32_t* const* ppMaxPrimitiveCounts,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    uint32_t local_infoCount;
+    VkAccelerationStructureBuildGeometryInfoKHR* local_pInfos;
+    VkDeviceAddress* local_pIndirectDeviceAddresses;
+    uint32_t* local_pIndirectStrides;
+    uint32_t** local_ppMaxPrimitiveCounts;
+    local_commandBuffer = commandBuffer;
+    local_infoCount = infoCount;
+    local_pInfos = nullptr;
+    if (pInfos)
+    {
+        local_pInfos = (VkAccelerationStructureBuildGeometryInfoKHR*)pool->alloc(((infoCount)) * sizeof(const VkAccelerationStructureBuildGeometryInfoKHR));
+        for (uint32_t i = 0; i < (uint32_t)((infoCount)); ++i)
+        {
+            deepcopy_VkAccelerationStructureBuildGeometryInfoKHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pInfos + i, (VkAccelerationStructureBuildGeometryInfoKHR*)(local_pInfos + i));
+        }
+    }
+    // Avoiding deepcopy for pIndirectDeviceAddresses
+    local_pIndirectDeviceAddresses = (VkDeviceAddress*)pIndirectDeviceAddresses;
+    // Avoiding deepcopy for pIndirectStrides
+    local_pIndirectStrides = (uint32_t*)pIndirectStrides;
+    (void)ppMaxPrimitiveCounts;
+    if (local_pInfos)
+    {
+        for (uint32_t i = 0; i < (uint32_t)((infoCount)); ++i)
+        {
+            transform_tohost_VkAccelerationStructureBuildGeometryInfoKHR(sResourceTracker, (VkAccelerationStructureBuildGeometryInfoKHR*)(local_pInfos + i));
+        }
+    }
+    (void)local_ppMaxPrimitiveCounts;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        for (uint32_t i = 0; i < (uint32_t)((infoCount)); ++i)
+        {
+            count_VkAccelerationStructureBuildGeometryInfoKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAccelerationStructureBuildGeometryInfoKHR*)(local_pInfos + i), countPtr);
+        }
+        *countPtr += ((infoCount)) * sizeof(VkDeviceAddress);
+        *countPtr += ((infoCount)) * sizeof(uint32_t);
+        (void)local_ppMaxPrimitiveCounts;
+    }
+    uint32_t packetSize_vkCmdBuildAccelerationStructuresIndirectKHR = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdBuildAccelerationStructuresIndirectKHR -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdBuildAccelerationStructuresIndirectKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdBuildAccelerationStructuresIndirectKHR = OP_vkCmdBuildAccelerationStructuresIndirectKHR;
+    memcpy(streamPtr, &opcode_vkCmdBuildAccelerationStructuresIndirectKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdBuildAccelerationStructuresIndirectKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (uint32_t*)&local_infoCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)((infoCount)); ++i)
+    {
+        reservedmarshal_VkAccelerationStructureBuildGeometryInfoKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAccelerationStructureBuildGeometryInfoKHR*)(local_pInfos + i), streamPtrPtr);
+    }
+    memcpy(*streamPtrPtr, (VkDeviceAddress*)local_pIndirectDeviceAddresses, ((infoCount)) * sizeof(VkDeviceAddress));
+    *streamPtrPtr += ((infoCount)) * sizeof(VkDeviceAddress);
+    memcpy(*streamPtrPtr, (uint32_t*)local_pIndirectStrides, ((infoCount)) * sizeof(uint32_t));
+    *streamPtrPtr += ((infoCount)) * sizeof(uint32_t);
+    (void)local_ppMaxPrimitiveCounts;
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+VkResult VkEncoder::vkBuildAccelerationStructuresKHR(
+    VkDevice device,
+    VkDeferredOperationKHR deferredOperation,
+    uint32_t infoCount,
+    const VkAccelerationStructureBuildGeometryInfoKHR* pInfos,
+    const VkAccelerationStructureBuildRangeInfoKHR* const* ppBuildRangeInfos,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkDeferredOperationKHR local_deferredOperation;
+    uint32_t local_infoCount;
+    VkAccelerationStructureBuildGeometryInfoKHR* local_pInfos;
+    VkAccelerationStructureBuildRangeInfoKHR** local_ppBuildRangeInfos;
+    local_device = device;
+    local_deferredOperation = deferredOperation;
+    local_infoCount = infoCount;
+    local_pInfos = nullptr;
+    if (pInfos)
+    {
+        local_pInfos = (VkAccelerationStructureBuildGeometryInfoKHR*)pool->alloc(((infoCount)) * sizeof(const VkAccelerationStructureBuildGeometryInfoKHR));
+        for (uint32_t i = 0; i < (uint32_t)((infoCount)); ++i)
+        {
+            deepcopy_VkAccelerationStructureBuildGeometryInfoKHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pInfos + i, (VkAccelerationStructureBuildGeometryInfoKHR*)(local_pInfos + i));
+        }
+    }
+    (void)ppBuildRangeInfos;
+    if (local_pInfos)
+    {
+        for (uint32_t i = 0; i < (uint32_t)((infoCount)); ++i)
+        {
+            transform_tohost_VkAccelerationStructureBuildGeometryInfoKHR(sResourceTracker, (VkAccelerationStructureBuildGeometryInfoKHR*)(local_pInfos + i));
+        }
+    }
+    (void)local_ppBuildRangeInfos;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += 8;
+        *countPtr += sizeof(uint32_t);
+        for (uint32_t i = 0; i < (uint32_t)((infoCount)); ++i)
+        {
+            count_VkAccelerationStructureBuildGeometryInfoKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAccelerationStructureBuildGeometryInfoKHR*)(local_pInfos + i), countPtr);
+        }
+        (void)local_ppBuildRangeInfos;
+    }
+    uint32_t packetSize_vkBuildAccelerationStructuresKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkBuildAccelerationStructuresKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkBuildAccelerationStructuresKHR = OP_vkBuildAccelerationStructuresKHR;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkBuildAccelerationStructuresKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkBuildAccelerationStructuresKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1 = (uint64_t)local_deferredOperation;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_infoCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)((infoCount)); ++i)
+    {
+        reservedmarshal_VkAccelerationStructureBuildGeometryInfoKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAccelerationStructureBuildGeometryInfoKHR*)(local_pInfos + i), streamPtrPtr);
+    }
+    (void)local_ppBuildRangeInfos;
+    VkResult vkBuildAccelerationStructuresKHR_VkResult_return = (VkResult)0;
+    stream->read(&vkBuildAccelerationStructuresKHR_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkBuildAccelerationStructuresKHR_VkResult_return;
+}
+
+VkResult VkEncoder::vkCopyAccelerationStructureKHR(
+    VkDevice device,
+    VkDeferredOperationKHR deferredOperation,
+    const VkCopyAccelerationStructureInfoKHR* pInfo,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkDeferredOperationKHR local_deferredOperation;
+    VkCopyAccelerationStructureInfoKHR* local_pInfo;
+    local_device = device;
+    local_deferredOperation = deferredOperation;
+    local_pInfo = nullptr;
+    if (pInfo)
+    {
+        local_pInfo = (VkCopyAccelerationStructureInfoKHR*)pool->alloc(sizeof(const VkCopyAccelerationStructureInfoKHR));
+        deepcopy_VkCopyAccelerationStructureInfoKHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pInfo, (VkCopyAccelerationStructureInfoKHR*)(local_pInfo));
+    }
+    if (local_pInfo)
+    {
+        transform_tohost_VkCopyAccelerationStructureInfoKHR(sResourceTracker, (VkCopyAccelerationStructureInfoKHR*)(local_pInfo));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += 8;
+        count_VkCopyAccelerationStructureInfoKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkCopyAccelerationStructureInfoKHR*)(local_pInfo), countPtr);
+    }
+    uint32_t packetSize_vkCopyAccelerationStructureKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCopyAccelerationStructureKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCopyAccelerationStructureKHR = OP_vkCopyAccelerationStructureKHR;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCopyAccelerationStructureKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCopyAccelerationStructureKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1 = (uint64_t)local_deferredOperation;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    reservedmarshal_VkCopyAccelerationStructureInfoKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkCopyAccelerationStructureInfoKHR*)(local_pInfo), streamPtrPtr);
+    VkResult vkCopyAccelerationStructureKHR_VkResult_return = (VkResult)0;
+    stream->read(&vkCopyAccelerationStructureKHR_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkCopyAccelerationStructureKHR_VkResult_return;
+}
+
+VkResult VkEncoder::vkCopyAccelerationStructureToMemoryKHR(
+    VkDevice device,
+    VkDeferredOperationKHR deferredOperation,
+    const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkDeferredOperationKHR local_deferredOperation;
+    VkCopyAccelerationStructureToMemoryInfoKHR* local_pInfo;
+    local_device = device;
+    local_deferredOperation = deferredOperation;
+    local_pInfo = nullptr;
+    if (pInfo)
+    {
+        local_pInfo = (VkCopyAccelerationStructureToMemoryInfoKHR*)pool->alloc(sizeof(const VkCopyAccelerationStructureToMemoryInfoKHR));
+        deepcopy_VkCopyAccelerationStructureToMemoryInfoKHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pInfo, (VkCopyAccelerationStructureToMemoryInfoKHR*)(local_pInfo));
+    }
+    if (local_pInfo)
+    {
+        transform_tohost_VkCopyAccelerationStructureToMemoryInfoKHR(sResourceTracker, (VkCopyAccelerationStructureToMemoryInfoKHR*)(local_pInfo));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += 8;
+        count_VkCopyAccelerationStructureToMemoryInfoKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkCopyAccelerationStructureToMemoryInfoKHR*)(local_pInfo), countPtr);
+    }
+    uint32_t packetSize_vkCopyAccelerationStructureToMemoryKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCopyAccelerationStructureToMemoryKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCopyAccelerationStructureToMemoryKHR = OP_vkCopyAccelerationStructureToMemoryKHR;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCopyAccelerationStructureToMemoryKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCopyAccelerationStructureToMemoryKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1 = (uint64_t)local_deferredOperation;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    reservedmarshal_VkCopyAccelerationStructureToMemoryInfoKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkCopyAccelerationStructureToMemoryInfoKHR*)(local_pInfo), streamPtrPtr);
+    VkResult vkCopyAccelerationStructureToMemoryKHR_VkResult_return = (VkResult)0;
+    stream->read(&vkCopyAccelerationStructureToMemoryKHR_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkCopyAccelerationStructureToMemoryKHR_VkResult_return;
+}
+
+VkResult VkEncoder::vkCopyMemoryToAccelerationStructureKHR(
+    VkDevice device,
+    VkDeferredOperationKHR deferredOperation,
+    const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkDeferredOperationKHR local_deferredOperation;
+    VkCopyMemoryToAccelerationStructureInfoKHR* local_pInfo;
+    local_device = device;
+    local_deferredOperation = deferredOperation;
+    local_pInfo = nullptr;
+    if (pInfo)
+    {
+        local_pInfo = (VkCopyMemoryToAccelerationStructureInfoKHR*)pool->alloc(sizeof(const VkCopyMemoryToAccelerationStructureInfoKHR));
+        deepcopy_VkCopyMemoryToAccelerationStructureInfoKHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pInfo, (VkCopyMemoryToAccelerationStructureInfoKHR*)(local_pInfo));
+    }
+    if (local_pInfo)
+    {
+        transform_tohost_VkCopyMemoryToAccelerationStructureInfoKHR(sResourceTracker, (VkCopyMemoryToAccelerationStructureInfoKHR*)(local_pInfo));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += 8;
+        count_VkCopyMemoryToAccelerationStructureInfoKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkCopyMemoryToAccelerationStructureInfoKHR*)(local_pInfo), countPtr);
+    }
+    uint32_t packetSize_vkCopyMemoryToAccelerationStructureKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCopyMemoryToAccelerationStructureKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCopyMemoryToAccelerationStructureKHR = OP_vkCopyMemoryToAccelerationStructureKHR;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCopyMemoryToAccelerationStructureKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCopyMemoryToAccelerationStructureKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1 = (uint64_t)local_deferredOperation;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    reservedmarshal_VkCopyMemoryToAccelerationStructureInfoKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkCopyMemoryToAccelerationStructureInfoKHR*)(local_pInfo), streamPtrPtr);
+    VkResult vkCopyMemoryToAccelerationStructureKHR_VkResult_return = (VkResult)0;
+    stream->read(&vkCopyMemoryToAccelerationStructureKHR_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkCopyMemoryToAccelerationStructureKHR_VkResult_return;
+}
+
+VkResult VkEncoder::vkWriteAccelerationStructuresPropertiesKHR(
+    VkDevice device,
+    uint32_t accelerationStructureCount,
+    const VkAccelerationStructureKHR* pAccelerationStructures,
+    VkQueryType queryType,
+    size_t dataSize,
+    void* pData,
+    size_t stride,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    uint32_t local_accelerationStructureCount;
+    VkAccelerationStructureKHR* local_pAccelerationStructures;
+    VkQueryType local_queryType;
+    size_t local_dataSize;
+    size_t local_stride;
+    local_device = device;
+    local_accelerationStructureCount = accelerationStructureCount;
+    // Avoiding deepcopy for pAccelerationStructures
+    local_pAccelerationStructures = (VkAccelerationStructureKHR*)pAccelerationStructures;
+    local_queryType = queryType;
+    local_dataSize = dataSize;
+    local_stride = stride;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        if (((accelerationStructureCount)))
+        {
+            *countPtr += ((accelerationStructureCount)) * 8;
+        }
+        *countPtr += sizeof(VkQueryType);
+        *countPtr += 8;
+        *countPtr += ((dataSize)) * sizeof(uint8_t);
+        *countPtr += 8;
+    }
+    uint32_t packetSize_vkWriteAccelerationStructuresPropertiesKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkWriteAccelerationStructuresPropertiesKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkWriteAccelerationStructuresPropertiesKHR = OP_vkWriteAccelerationStructuresPropertiesKHR;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkWriteAccelerationStructuresPropertiesKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkWriteAccelerationStructuresPropertiesKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_accelerationStructureCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    if (((accelerationStructureCount)))
+    {
+        uint8_t* cgen_var_1_ptr = (uint8_t*)(*streamPtrPtr);
+        for (uint32_t k = 0; k < ((accelerationStructureCount)); ++k)
+        {
+            uint64_t tmpval = get_host_u64_VkAccelerationStructureKHR(local_pAccelerationStructures[k]);
+            memcpy(cgen_var_1_ptr + k * 8, &tmpval, sizeof(uint64_t));
+        }
+        *streamPtrPtr += 8 * ((accelerationStructureCount));
+    }
+    memcpy(*streamPtrPtr, (VkQueryType*)&local_queryType, sizeof(VkQueryType));
+    *streamPtrPtr += sizeof(VkQueryType);
+    uint64_t cgen_var_2 = (uint64_t)local_dataSize;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    memcpy(*streamPtrPtr, (void*)pData, ((dataSize)) * sizeof(uint8_t));
+    *streamPtrPtr += ((dataSize)) * sizeof(uint8_t);
+    uint64_t cgen_var_3 = (uint64_t)local_stride;
+    memcpy((*streamPtrPtr), &cgen_var_3, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    stream->read((void*)pData, ((dataSize)) * sizeof(uint8_t));
+    VkResult vkWriteAccelerationStructuresPropertiesKHR_VkResult_return = (VkResult)0;
+    stream->read(&vkWriteAccelerationStructuresPropertiesKHR_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkWriteAccelerationStructuresPropertiesKHR_VkResult_return;
+}
+
+void VkEncoder::vkCmdCopyAccelerationStructureKHR(
+    VkCommandBuffer commandBuffer,
+    const VkCopyAccelerationStructureInfoKHR* pInfo,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    VkCopyAccelerationStructureInfoKHR* local_pInfo;
+    local_commandBuffer = commandBuffer;
+    local_pInfo = nullptr;
+    if (pInfo)
+    {
+        local_pInfo = (VkCopyAccelerationStructureInfoKHR*)pool->alloc(sizeof(const VkCopyAccelerationStructureInfoKHR));
+        deepcopy_VkCopyAccelerationStructureInfoKHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pInfo, (VkCopyAccelerationStructureInfoKHR*)(local_pInfo));
+    }
+    if (local_pInfo)
+    {
+        transform_tohost_VkCopyAccelerationStructureInfoKHR(sResourceTracker, (VkCopyAccelerationStructureInfoKHR*)(local_pInfo));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkCopyAccelerationStructureInfoKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkCopyAccelerationStructureInfoKHR*)(local_pInfo), countPtr);
+    }
+    uint32_t packetSize_vkCmdCopyAccelerationStructureKHR = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdCopyAccelerationStructureKHR -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdCopyAccelerationStructureKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdCopyAccelerationStructureKHR = OP_vkCmdCopyAccelerationStructureKHR;
+    memcpy(streamPtr, &opcode_vkCmdCopyAccelerationStructureKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdCopyAccelerationStructureKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    reservedmarshal_VkCopyAccelerationStructureInfoKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkCopyAccelerationStructureInfoKHR*)(local_pInfo), streamPtrPtr);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+void VkEncoder::vkCmdCopyAccelerationStructureToMemoryKHR(
+    VkCommandBuffer commandBuffer,
+    const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    VkCopyAccelerationStructureToMemoryInfoKHR* local_pInfo;
+    local_commandBuffer = commandBuffer;
+    local_pInfo = nullptr;
+    if (pInfo)
+    {
+        local_pInfo = (VkCopyAccelerationStructureToMemoryInfoKHR*)pool->alloc(sizeof(const VkCopyAccelerationStructureToMemoryInfoKHR));
+        deepcopy_VkCopyAccelerationStructureToMemoryInfoKHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pInfo, (VkCopyAccelerationStructureToMemoryInfoKHR*)(local_pInfo));
+    }
+    if (local_pInfo)
+    {
+        transform_tohost_VkCopyAccelerationStructureToMemoryInfoKHR(sResourceTracker, (VkCopyAccelerationStructureToMemoryInfoKHR*)(local_pInfo));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkCopyAccelerationStructureToMemoryInfoKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkCopyAccelerationStructureToMemoryInfoKHR*)(local_pInfo), countPtr);
+    }
+    uint32_t packetSize_vkCmdCopyAccelerationStructureToMemoryKHR = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdCopyAccelerationStructureToMemoryKHR -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdCopyAccelerationStructureToMemoryKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdCopyAccelerationStructureToMemoryKHR = OP_vkCmdCopyAccelerationStructureToMemoryKHR;
+    memcpy(streamPtr, &opcode_vkCmdCopyAccelerationStructureToMemoryKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdCopyAccelerationStructureToMemoryKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    reservedmarshal_VkCopyAccelerationStructureToMemoryInfoKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkCopyAccelerationStructureToMemoryInfoKHR*)(local_pInfo), streamPtrPtr);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+void VkEncoder::vkCmdCopyMemoryToAccelerationStructureKHR(
+    VkCommandBuffer commandBuffer,
+    const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    VkCopyMemoryToAccelerationStructureInfoKHR* local_pInfo;
+    local_commandBuffer = commandBuffer;
+    local_pInfo = nullptr;
+    if (pInfo)
+    {
+        local_pInfo = (VkCopyMemoryToAccelerationStructureInfoKHR*)pool->alloc(sizeof(const VkCopyMemoryToAccelerationStructureInfoKHR));
+        deepcopy_VkCopyMemoryToAccelerationStructureInfoKHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pInfo, (VkCopyMemoryToAccelerationStructureInfoKHR*)(local_pInfo));
+    }
+    if (local_pInfo)
+    {
+        transform_tohost_VkCopyMemoryToAccelerationStructureInfoKHR(sResourceTracker, (VkCopyMemoryToAccelerationStructureInfoKHR*)(local_pInfo));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkCopyMemoryToAccelerationStructureInfoKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkCopyMemoryToAccelerationStructureInfoKHR*)(local_pInfo), countPtr);
+    }
+    uint32_t packetSize_vkCmdCopyMemoryToAccelerationStructureKHR = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdCopyMemoryToAccelerationStructureKHR -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdCopyMemoryToAccelerationStructureKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdCopyMemoryToAccelerationStructureKHR = OP_vkCmdCopyMemoryToAccelerationStructureKHR;
+    memcpy(streamPtr, &opcode_vkCmdCopyMemoryToAccelerationStructureKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdCopyMemoryToAccelerationStructureKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    reservedmarshal_VkCopyMemoryToAccelerationStructureInfoKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkCopyMemoryToAccelerationStructureInfoKHR*)(local_pInfo), streamPtrPtr);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+VkDeviceAddress VkEncoder::vkGetAccelerationStructureDeviceAddressKHR(
+    VkDevice device,
+    const VkAccelerationStructureDeviceAddressInfoKHR* pInfo,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkAccelerationStructureDeviceAddressInfoKHR* local_pInfo;
+    local_device = device;
+    local_pInfo = nullptr;
+    if (pInfo)
+    {
+        local_pInfo = (VkAccelerationStructureDeviceAddressInfoKHR*)pool->alloc(sizeof(const VkAccelerationStructureDeviceAddressInfoKHR));
+        deepcopy_VkAccelerationStructureDeviceAddressInfoKHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pInfo, (VkAccelerationStructureDeviceAddressInfoKHR*)(local_pInfo));
+    }
+    if (local_pInfo)
+    {
+        transform_tohost_VkAccelerationStructureDeviceAddressInfoKHR(sResourceTracker, (VkAccelerationStructureDeviceAddressInfoKHR*)(local_pInfo));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkAccelerationStructureDeviceAddressInfoKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAccelerationStructureDeviceAddressInfoKHR*)(local_pInfo), countPtr);
+    }
+    uint32_t packetSize_vkGetAccelerationStructureDeviceAddressKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetAccelerationStructureDeviceAddressKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkGetAccelerationStructureDeviceAddressKHR = OP_vkGetAccelerationStructureDeviceAddressKHR;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetAccelerationStructureDeviceAddressKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetAccelerationStructureDeviceAddressKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkAccelerationStructureDeviceAddressInfoKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAccelerationStructureDeviceAddressInfoKHR*)(local_pInfo), streamPtrPtr);
+    VkDeviceAddress vkGetAccelerationStructureDeviceAddressKHR_VkDeviceAddress_return = (VkDeviceAddress)0;
+    stream->read(&vkGetAccelerationStructureDeviceAddressKHR_VkDeviceAddress_return, sizeof(VkDeviceAddress));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkGetAccelerationStructureDeviceAddressKHR_VkDeviceAddress_return;
+}
+
+void VkEncoder::vkCmdWriteAccelerationStructuresPropertiesKHR(
+    VkCommandBuffer commandBuffer,
+    uint32_t accelerationStructureCount,
+    const VkAccelerationStructureKHR* pAccelerationStructures,
+    VkQueryType queryType,
+    VkQueryPool queryPool,
+    uint32_t firstQuery,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    uint32_t local_accelerationStructureCount;
+    VkAccelerationStructureKHR* local_pAccelerationStructures;
+    VkQueryType local_queryType;
+    VkQueryPool local_queryPool;
+    uint32_t local_firstQuery;
+    local_commandBuffer = commandBuffer;
+    local_accelerationStructureCount = accelerationStructureCount;
+    // Avoiding deepcopy for pAccelerationStructures
+    local_pAccelerationStructures = (VkAccelerationStructureKHR*)pAccelerationStructures;
+    local_queryType = queryType;
+    local_queryPool = queryPool;
+    local_firstQuery = firstQuery;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        if (((accelerationStructureCount)))
+        {
+            *countPtr += ((accelerationStructureCount)) * 8;
+        }
+        *countPtr += sizeof(VkQueryType);
+        uint64_t cgen_var_2;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+    }
+    uint32_t packetSize_vkCmdWriteAccelerationStructuresPropertiesKHR = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdWriteAccelerationStructuresPropertiesKHR -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdWriteAccelerationStructuresPropertiesKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdWriteAccelerationStructuresPropertiesKHR = OP_vkCmdWriteAccelerationStructuresPropertiesKHR;
+    memcpy(streamPtr, &opcode_vkCmdWriteAccelerationStructuresPropertiesKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdWriteAccelerationStructuresPropertiesKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (uint32_t*)&local_accelerationStructureCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    if (((accelerationStructureCount)))
+    {
+        uint8_t* cgen_var_0_ptr = (uint8_t*)(*streamPtrPtr);
+        for (uint32_t k = 0; k < ((accelerationStructureCount)); ++k)
+        {
+            uint64_t tmpval = get_host_u64_VkAccelerationStructureKHR(local_pAccelerationStructures[k]);
+            memcpy(cgen_var_0_ptr + k * 8, &tmpval, sizeof(uint64_t));
+        }
+        *streamPtrPtr += 8 * ((accelerationStructureCount));
+    }
+    memcpy(*streamPtrPtr, (VkQueryType*)&local_queryType, sizeof(VkQueryType));
+    *streamPtrPtr += sizeof(VkQueryType);
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkQueryPool((*&local_queryPool));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_firstQuery, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+void VkEncoder::vkGetDeviceAccelerationStructureCompatibilityKHR(
+    VkDevice device,
+    const VkAccelerationStructureVersionInfoKHR* pVersionInfo,
+    VkAccelerationStructureCompatibilityKHR* pCompatibility,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkAccelerationStructureVersionInfoKHR* local_pVersionInfo;
+    local_device = device;
+    local_pVersionInfo = nullptr;
+    if (pVersionInfo)
+    {
+        local_pVersionInfo = (VkAccelerationStructureVersionInfoKHR*)pool->alloc(sizeof(const VkAccelerationStructureVersionInfoKHR));
+        deepcopy_VkAccelerationStructureVersionInfoKHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pVersionInfo, (VkAccelerationStructureVersionInfoKHR*)(local_pVersionInfo));
+    }
+    if (local_pVersionInfo)
+    {
+        transform_tohost_VkAccelerationStructureVersionInfoKHR(sResourceTracker, (VkAccelerationStructureVersionInfoKHR*)(local_pVersionInfo));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkAccelerationStructureVersionInfoKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAccelerationStructureVersionInfoKHR*)(local_pVersionInfo), countPtr);
+        *countPtr += sizeof(VkAccelerationStructureCompatibilityKHR);
+    }
+    uint32_t packetSize_vkGetDeviceAccelerationStructureCompatibilityKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetDeviceAccelerationStructureCompatibilityKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkGetDeviceAccelerationStructureCompatibilityKHR = OP_vkGetDeviceAccelerationStructureCompatibilityKHR;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetDeviceAccelerationStructureCompatibilityKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetDeviceAccelerationStructureCompatibilityKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    reservedmarshal_VkAccelerationStructureVersionInfoKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAccelerationStructureVersionInfoKHR*)(local_pVersionInfo), streamPtrPtr);
+    memcpy(*streamPtrPtr, (VkAccelerationStructureCompatibilityKHR*)pCompatibility, sizeof(VkAccelerationStructureCompatibilityKHR));
+    *streamPtrPtr += sizeof(VkAccelerationStructureCompatibilityKHR);
+    stream->read((VkAccelerationStructureCompatibilityKHR*)pCompatibility, sizeof(VkAccelerationStructureCompatibilityKHR));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+void VkEncoder::vkGetAccelerationStructureBuildSizesKHR(
+    VkDevice device,
+    VkAccelerationStructureBuildTypeKHR buildType,
+    const VkAccelerationStructureBuildGeometryInfoKHR* pBuildInfo,
+    const uint32_t* pMaxPrimitiveCounts,
+    VkAccelerationStructureBuildSizesInfoKHR* pSizeInfo,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkAccelerationStructureBuildTypeKHR local_buildType;
+    VkAccelerationStructureBuildGeometryInfoKHR* local_pBuildInfo;
+    uint32_t* local_pMaxPrimitiveCounts;
+    local_device = device;
+    local_buildType = buildType;
+    local_pBuildInfo = nullptr;
+    if (pBuildInfo)
+    {
+        local_pBuildInfo = (VkAccelerationStructureBuildGeometryInfoKHR*)pool->alloc(sizeof(const VkAccelerationStructureBuildGeometryInfoKHR));
+        deepcopy_VkAccelerationStructureBuildGeometryInfoKHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pBuildInfo, (VkAccelerationStructureBuildGeometryInfoKHR*)(local_pBuildInfo));
+    }
+    // Avoiding deepcopy for pMaxPrimitiveCounts
+    local_pMaxPrimitiveCounts = (uint32_t*)pMaxPrimitiveCounts;
+    if (local_pBuildInfo)
+    {
+        transform_tohost_VkAccelerationStructureBuildGeometryInfoKHR(sResourceTracker, (VkAccelerationStructureBuildGeometryInfoKHR*)(local_pBuildInfo));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(VkAccelerationStructureBuildTypeKHR);
+        count_VkAccelerationStructureBuildGeometryInfoKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAccelerationStructureBuildGeometryInfoKHR*)(local_pBuildInfo), countPtr);
+        *countPtr += pBuildInfo->geometryCount * sizeof(uint32_t);
+        count_VkAccelerationStructureBuildSizesInfoKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAccelerationStructureBuildSizesInfoKHR*)(pSizeInfo), countPtr);
+    }
+    uint32_t packetSize_vkGetAccelerationStructureBuildSizesKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetAccelerationStructureBuildSizesKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkGetAccelerationStructureBuildSizesKHR = OP_vkGetAccelerationStructureBuildSizesKHR;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetAccelerationStructureBuildSizesKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetAccelerationStructureBuildSizesKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (VkAccelerationStructureBuildTypeKHR*)&local_buildType, sizeof(VkAccelerationStructureBuildTypeKHR));
+    *streamPtrPtr += sizeof(VkAccelerationStructureBuildTypeKHR);
+    reservedmarshal_VkAccelerationStructureBuildGeometryInfoKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAccelerationStructureBuildGeometryInfoKHR*)(local_pBuildInfo), streamPtrPtr);
+    memcpy(*streamPtrPtr, (uint32_t*)local_pMaxPrimitiveCounts, pBuildInfo->geometryCount * sizeof(uint32_t));
+    *streamPtrPtr += pBuildInfo->geometryCount * sizeof(uint32_t);
+    reservedmarshal_VkAccelerationStructureBuildSizesInfoKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAccelerationStructureBuildSizesInfoKHR*)(pSizeInfo), streamPtrPtr);
+    unmarshal_VkAccelerationStructureBuildSizesInfoKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAccelerationStructureBuildSizesInfoKHR*)(pSizeInfo));
+    if (pSizeInfo)
+    {
+        transform_fromhost_VkAccelerationStructureBuildSizesInfoKHR(sResourceTracker, (VkAccelerationStructureBuildSizesInfoKHR*)(pSizeInfo));
+    }
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+#endif
+#ifdef VK_KHR_ray_tracing_pipeline
+void VkEncoder::vkCmdTraceRaysKHR(
+    VkCommandBuffer commandBuffer,
+    const VkStridedDeviceAddressRegionKHR* pRaygenShaderBindingTable,
+    const VkStridedDeviceAddressRegionKHR* pMissShaderBindingTable,
+    const VkStridedDeviceAddressRegionKHR* pHitShaderBindingTable,
+    const VkStridedDeviceAddressRegionKHR* pCallableShaderBindingTable,
+    uint32_t width,
+    uint32_t height,
+    uint32_t depth,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    VkStridedDeviceAddressRegionKHR* local_pRaygenShaderBindingTable;
+    VkStridedDeviceAddressRegionKHR* local_pMissShaderBindingTable;
+    VkStridedDeviceAddressRegionKHR* local_pHitShaderBindingTable;
+    VkStridedDeviceAddressRegionKHR* local_pCallableShaderBindingTable;
+    uint32_t local_width;
+    uint32_t local_height;
+    uint32_t local_depth;
+    local_commandBuffer = commandBuffer;
+    local_pRaygenShaderBindingTable = nullptr;
+    if (pRaygenShaderBindingTable)
+    {
+        local_pRaygenShaderBindingTable = (VkStridedDeviceAddressRegionKHR*)pool->alloc(sizeof(const VkStridedDeviceAddressRegionKHR));
+        deepcopy_VkStridedDeviceAddressRegionKHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pRaygenShaderBindingTable, (VkStridedDeviceAddressRegionKHR*)(local_pRaygenShaderBindingTable));
+    }
+    local_pMissShaderBindingTable = nullptr;
+    if (pMissShaderBindingTable)
+    {
+        local_pMissShaderBindingTable = (VkStridedDeviceAddressRegionKHR*)pool->alloc(sizeof(const VkStridedDeviceAddressRegionKHR));
+        deepcopy_VkStridedDeviceAddressRegionKHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pMissShaderBindingTable, (VkStridedDeviceAddressRegionKHR*)(local_pMissShaderBindingTable));
+    }
+    local_pHitShaderBindingTable = nullptr;
+    if (pHitShaderBindingTable)
+    {
+        local_pHitShaderBindingTable = (VkStridedDeviceAddressRegionKHR*)pool->alloc(sizeof(const VkStridedDeviceAddressRegionKHR));
+        deepcopy_VkStridedDeviceAddressRegionKHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pHitShaderBindingTable, (VkStridedDeviceAddressRegionKHR*)(local_pHitShaderBindingTable));
+    }
+    local_pCallableShaderBindingTable = nullptr;
+    if (pCallableShaderBindingTable)
+    {
+        local_pCallableShaderBindingTable = (VkStridedDeviceAddressRegionKHR*)pool->alloc(sizeof(const VkStridedDeviceAddressRegionKHR));
+        deepcopy_VkStridedDeviceAddressRegionKHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCallableShaderBindingTable, (VkStridedDeviceAddressRegionKHR*)(local_pCallableShaderBindingTable));
+    }
+    local_width = width;
+    local_height = height;
+    local_depth = depth;
+    if (local_pRaygenShaderBindingTable)
+    {
+        transform_tohost_VkStridedDeviceAddressRegionKHR(sResourceTracker, (VkStridedDeviceAddressRegionKHR*)(local_pRaygenShaderBindingTable));
+    }
+    if (local_pMissShaderBindingTable)
+    {
+        transform_tohost_VkStridedDeviceAddressRegionKHR(sResourceTracker, (VkStridedDeviceAddressRegionKHR*)(local_pMissShaderBindingTable));
+    }
+    if (local_pHitShaderBindingTable)
+    {
+        transform_tohost_VkStridedDeviceAddressRegionKHR(sResourceTracker, (VkStridedDeviceAddressRegionKHR*)(local_pHitShaderBindingTable));
+    }
+    if (local_pCallableShaderBindingTable)
+    {
+        transform_tohost_VkStridedDeviceAddressRegionKHR(sResourceTracker, (VkStridedDeviceAddressRegionKHR*)(local_pCallableShaderBindingTable));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkStridedDeviceAddressRegionKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkStridedDeviceAddressRegionKHR*)(local_pRaygenShaderBindingTable), countPtr);
+        count_VkStridedDeviceAddressRegionKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkStridedDeviceAddressRegionKHR*)(local_pMissShaderBindingTable), countPtr);
+        count_VkStridedDeviceAddressRegionKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkStridedDeviceAddressRegionKHR*)(local_pHitShaderBindingTable), countPtr);
+        count_VkStridedDeviceAddressRegionKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkStridedDeviceAddressRegionKHR*)(local_pCallableShaderBindingTable), countPtr);
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
+    }
+    uint32_t packetSize_vkCmdTraceRaysKHR = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdTraceRaysKHR -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdTraceRaysKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdTraceRaysKHR = OP_vkCmdTraceRaysKHR;
+    memcpy(streamPtr, &opcode_vkCmdTraceRaysKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdTraceRaysKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    reservedmarshal_VkStridedDeviceAddressRegionKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkStridedDeviceAddressRegionKHR*)(local_pRaygenShaderBindingTable), streamPtrPtr);
+    reservedmarshal_VkStridedDeviceAddressRegionKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkStridedDeviceAddressRegionKHR*)(local_pMissShaderBindingTable), streamPtrPtr);
+    reservedmarshal_VkStridedDeviceAddressRegionKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkStridedDeviceAddressRegionKHR*)(local_pHitShaderBindingTable), streamPtrPtr);
+    reservedmarshal_VkStridedDeviceAddressRegionKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkStridedDeviceAddressRegionKHR*)(local_pCallableShaderBindingTable), streamPtrPtr);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_width, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_height, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_depth, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+VkResult VkEncoder::vkCreateRayTracingPipelinesKHR(
+    VkDevice device,
+    VkDeferredOperationKHR deferredOperation,
+    VkPipelineCache pipelineCache,
+    uint32_t createInfoCount,
+    const VkRayTracingPipelineCreateInfoKHR* pCreateInfos,
+    const VkAllocationCallbacks* pAllocator,
+    VkPipeline* pPipelines,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkDeferredOperationKHR local_deferredOperation;
+    VkPipelineCache local_pipelineCache;
+    uint32_t local_createInfoCount;
+    VkRayTracingPipelineCreateInfoKHR* local_pCreateInfos;
+    VkAllocationCallbacks* local_pAllocator;
+    local_device = device;
+    local_deferredOperation = deferredOperation;
+    local_pipelineCache = pipelineCache;
+    local_createInfoCount = createInfoCount;
+    local_pCreateInfos = nullptr;
+    if (pCreateInfos)
+    {
+        local_pCreateInfos = (VkRayTracingPipelineCreateInfoKHR*)pool->alloc(((createInfoCount)) * sizeof(const VkRayTracingPipelineCreateInfoKHR));
+        for (uint32_t i = 0; i < (uint32_t)((createInfoCount)); ++i)
+        {
+            deepcopy_VkRayTracingPipelineCreateInfoKHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCreateInfos + i, (VkRayTracingPipelineCreateInfoKHR*)(local_pCreateInfos + i));
+        }
+    }
+    local_pAllocator = nullptr;
+    if (pAllocator)
+    {
+        local_pAllocator = (VkAllocationCallbacks*)pool->alloc(sizeof(const VkAllocationCallbacks));
+        deepcopy_VkAllocationCallbacks(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pAllocator, (VkAllocationCallbacks*)(local_pAllocator));
+    }
+    local_pAllocator = nullptr;
+    if (local_pCreateInfos)
+    {
+        for (uint32_t i = 0; i < (uint32_t)((createInfoCount)); ++i)
+        {
+            transform_tohost_VkRayTracingPipelineCreateInfoKHR(sResourceTracker, (VkRayTracingPipelineCreateInfoKHR*)(local_pCreateInfos + i));
+        }
+    }
+    if (local_pAllocator)
+    {
+        transform_tohost_VkAllocationCallbacks(sResourceTracker, (VkAllocationCallbacks*)(local_pAllocator));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        for (uint32_t i = 0; i < (uint32_t)((createInfoCount)); ++i)
+        {
+            count_VkRayTracingPipelineCreateInfoKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkRayTracingPipelineCreateInfoKHR*)(local_pCreateInfos + i), countPtr);
+        }
+        // WARNING PTR CHECK
+        *countPtr += 8;
+        if (local_pAllocator)
+        {
+            count_VkAllocationCallbacks(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), countPtr);
+        }
+        if (((createInfoCount)))
+        {
+            *countPtr += ((createInfoCount)) * 8;
+        }
+    }
+    uint32_t packetSize_vkCreateRayTracingPipelinesKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCreateRayTracingPipelinesKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCreateRayTracingPipelinesKHR = OP_vkCreateRayTracingPipelinesKHR;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkCreateRayTracingPipelinesKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCreateRayTracingPipelinesKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1 = (uint64_t)local_deferredOperation;
+    memcpy((*streamPtrPtr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = get_host_u64_VkPipelineCache((*&local_pipelineCache));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_2, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_createInfoCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)((createInfoCount)); ++i)
+    {
+        reservedmarshal_VkRayTracingPipelineCreateInfoKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkRayTracingPipelineCreateInfoKHR*)(local_pCreateInfos + i), streamPtrPtr);
+    }
+    // WARNING PTR CHECK
+    uint64_t cgen_var_3 = (uint64_t)(uintptr_t)local_pAllocator;
+    memcpy((*streamPtrPtr), &cgen_var_3, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    if (local_pAllocator)
+    {
+        reservedmarshal_VkAllocationCallbacks(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkAllocationCallbacks*)(local_pAllocator), streamPtrPtr);
+    }
+    /* is handle, possibly out */;
+    if (((createInfoCount)))
+    {
+        uint8_t* cgen_var_4_ptr = (uint8_t*)(*streamPtrPtr);
+        for (uint32_t k = 0; k < ((createInfoCount)); ++k)
+        {
+            uint64_t tmpval = (uint64_t)(pPipelines[k]);
+            memcpy(cgen_var_4_ptr + k * 8, &tmpval, sizeof(uint64_t));
+        }
+        *streamPtrPtr += 8 * ((createInfoCount));
+    }
+    /* is handle, possibly out */;
+    if (((createInfoCount)))
+    {
+        uint64_t* cgen_var_5;
+        stream->alloc((void**)&cgen_var_5, ((createInfoCount)) * 8);
+        stream->read((uint64_t*)cgen_var_5, ((createInfoCount)) * 8);
+        stream->handleMapping()->mapHandles_u64_VkPipeline(cgen_var_5, (VkPipeline*)pPipelines, ((createInfoCount)));
+    }
+    VkResult vkCreateRayTracingPipelinesKHR_VkResult_return = (VkResult)0;
+    stream->read(&vkCreateRayTracingPipelinesKHR_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkCreateRayTracingPipelinesKHR_VkResult_return;
+}
+
+VkResult VkEncoder::vkGetRayTracingCaptureReplayShaderGroupHandlesKHR(
+    VkDevice device,
+    VkPipeline pipeline,
+    uint32_t firstGroup,
+    uint32_t groupCount,
+    size_t dataSize,
+    void* pData,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkPipeline local_pipeline;
+    uint32_t local_firstGroup;
+    uint32_t local_groupCount;
+    size_t local_dataSize;
+    local_device = device;
+    local_pipeline = pipeline;
+    local_firstGroup = firstGroup;
+    local_groupCount = groupCount;
+    local_dataSize = dataSize;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(uint32_t);
+        *countPtr += 8;
+        *countPtr += ((dataSize)) * sizeof(uint8_t);
+    }
+    uint32_t packetSize_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR = OP_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkPipeline((*&local_pipeline));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_firstGroup, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (uint32_t*)&local_groupCount, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    uint64_t cgen_var_2 = (uint64_t)local_dataSize;
+    memcpy((*streamPtrPtr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*streamPtrPtr));
+    *streamPtrPtr += 8;
+    memcpy(*streamPtrPtr, (void*)pData, ((dataSize)) * sizeof(uint8_t));
+    *streamPtrPtr += ((dataSize)) * sizeof(uint8_t);
+    stream->read((void*)pData, ((dataSize)) * sizeof(uint8_t));
+    VkResult vkGetRayTracingCaptureReplayShaderGroupHandlesKHR_VkResult_return = (VkResult)0;
+    stream->read(&vkGetRayTracingCaptureReplayShaderGroupHandlesKHR_VkResult_return, sizeof(VkResult));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkGetRayTracingCaptureReplayShaderGroupHandlesKHR_VkResult_return;
+}
+
+void VkEncoder::vkCmdTraceRaysIndirectKHR(
+    VkCommandBuffer commandBuffer,
+    const VkStridedDeviceAddressRegionKHR* pRaygenShaderBindingTable,
+    const VkStridedDeviceAddressRegionKHR* pMissShaderBindingTable,
+    const VkStridedDeviceAddressRegionKHR* pHitShaderBindingTable,
+    const VkStridedDeviceAddressRegionKHR* pCallableShaderBindingTable,
+    VkDeviceAddress indirectDeviceAddress,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    VkStridedDeviceAddressRegionKHR* local_pRaygenShaderBindingTable;
+    VkStridedDeviceAddressRegionKHR* local_pMissShaderBindingTable;
+    VkStridedDeviceAddressRegionKHR* local_pHitShaderBindingTable;
+    VkStridedDeviceAddressRegionKHR* local_pCallableShaderBindingTable;
+    VkDeviceAddress local_indirectDeviceAddress;
+    local_commandBuffer = commandBuffer;
+    local_pRaygenShaderBindingTable = nullptr;
+    if (pRaygenShaderBindingTable)
+    {
+        local_pRaygenShaderBindingTable = (VkStridedDeviceAddressRegionKHR*)pool->alloc(sizeof(const VkStridedDeviceAddressRegionKHR));
+        deepcopy_VkStridedDeviceAddressRegionKHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pRaygenShaderBindingTable, (VkStridedDeviceAddressRegionKHR*)(local_pRaygenShaderBindingTable));
+    }
+    local_pMissShaderBindingTable = nullptr;
+    if (pMissShaderBindingTable)
+    {
+        local_pMissShaderBindingTable = (VkStridedDeviceAddressRegionKHR*)pool->alloc(sizeof(const VkStridedDeviceAddressRegionKHR));
+        deepcopy_VkStridedDeviceAddressRegionKHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pMissShaderBindingTable, (VkStridedDeviceAddressRegionKHR*)(local_pMissShaderBindingTable));
+    }
+    local_pHitShaderBindingTable = nullptr;
+    if (pHitShaderBindingTable)
+    {
+        local_pHitShaderBindingTable = (VkStridedDeviceAddressRegionKHR*)pool->alloc(sizeof(const VkStridedDeviceAddressRegionKHR));
+        deepcopy_VkStridedDeviceAddressRegionKHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pHitShaderBindingTable, (VkStridedDeviceAddressRegionKHR*)(local_pHitShaderBindingTable));
+    }
+    local_pCallableShaderBindingTable = nullptr;
+    if (pCallableShaderBindingTable)
+    {
+        local_pCallableShaderBindingTable = (VkStridedDeviceAddressRegionKHR*)pool->alloc(sizeof(const VkStridedDeviceAddressRegionKHR));
+        deepcopy_VkStridedDeviceAddressRegionKHR(pool, VK_STRUCTURE_TYPE_MAX_ENUM, pCallableShaderBindingTable, (VkStridedDeviceAddressRegionKHR*)(local_pCallableShaderBindingTable));
+    }
+    local_indirectDeviceAddress = indirectDeviceAddress;
+    if (local_pRaygenShaderBindingTable)
+    {
+        transform_tohost_VkStridedDeviceAddressRegionKHR(sResourceTracker, (VkStridedDeviceAddressRegionKHR*)(local_pRaygenShaderBindingTable));
+    }
+    if (local_pMissShaderBindingTable)
+    {
+        transform_tohost_VkStridedDeviceAddressRegionKHR(sResourceTracker, (VkStridedDeviceAddressRegionKHR*)(local_pMissShaderBindingTable));
+    }
+    if (local_pHitShaderBindingTable)
+    {
+        transform_tohost_VkStridedDeviceAddressRegionKHR(sResourceTracker, (VkStridedDeviceAddressRegionKHR*)(local_pHitShaderBindingTable));
+    }
+    if (local_pCallableShaderBindingTable)
+    {
+        transform_tohost_VkStridedDeviceAddressRegionKHR(sResourceTracker, (VkStridedDeviceAddressRegionKHR*)(local_pCallableShaderBindingTable));
+    }
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        count_VkStridedDeviceAddressRegionKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkStridedDeviceAddressRegionKHR*)(local_pRaygenShaderBindingTable), countPtr);
+        count_VkStridedDeviceAddressRegionKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkStridedDeviceAddressRegionKHR*)(local_pMissShaderBindingTable), countPtr);
+        count_VkStridedDeviceAddressRegionKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkStridedDeviceAddressRegionKHR*)(local_pHitShaderBindingTable), countPtr);
+        count_VkStridedDeviceAddressRegionKHR(sFeatureBits, VK_STRUCTURE_TYPE_MAX_ENUM, (VkStridedDeviceAddressRegionKHR*)(local_pCallableShaderBindingTable), countPtr);
+        *countPtr += sizeof(VkDeviceAddress);
+    }
+    uint32_t packetSize_vkCmdTraceRaysIndirectKHR = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdTraceRaysIndirectKHR -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdTraceRaysIndirectKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdTraceRaysIndirectKHR = OP_vkCmdTraceRaysIndirectKHR;
+    memcpy(streamPtr, &opcode_vkCmdTraceRaysIndirectKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdTraceRaysIndirectKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    reservedmarshal_VkStridedDeviceAddressRegionKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkStridedDeviceAddressRegionKHR*)(local_pRaygenShaderBindingTable), streamPtrPtr);
+    reservedmarshal_VkStridedDeviceAddressRegionKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkStridedDeviceAddressRegionKHR*)(local_pMissShaderBindingTable), streamPtrPtr);
+    reservedmarshal_VkStridedDeviceAddressRegionKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkStridedDeviceAddressRegionKHR*)(local_pHitShaderBindingTable), streamPtrPtr);
+    reservedmarshal_VkStridedDeviceAddressRegionKHR(stream, VK_STRUCTURE_TYPE_MAX_ENUM, (VkStridedDeviceAddressRegionKHR*)(local_pCallableShaderBindingTable), streamPtrPtr);
+    memcpy(*streamPtrPtr, (VkDeviceAddress*)&local_indirectDeviceAddress, sizeof(VkDeviceAddress));
+    *streamPtrPtr += sizeof(VkDeviceAddress);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+VkDeviceSize VkEncoder::vkGetRayTracingShaderGroupStackSizeKHR(
+    VkDevice device,
+    VkPipeline pipeline,
+    uint32_t group,
+    VkShaderGroupShaderKHR groupShader,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkDevice local_device;
+    VkPipeline local_pipeline;
+    uint32_t local_group;
+    VkShaderGroupShaderKHR local_groupShader;
+    local_device = device;
+    local_pipeline = pipeline;
+    local_group = group;
+    local_groupShader = groupShader;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        uint64_t cgen_var_1;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+        *countPtr += sizeof(VkShaderGroupShaderKHR);
+    }
+    uint32_t packetSize_vkGetRayTracingShaderGroupStackSizeKHR = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkGetRayTracingShaderGroupStackSizeKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkGetRayTracingShaderGroupStackSizeKHR = OP_vkGetRayTracingShaderGroupStackSizeKHR;
+    uint32_t seqno; if (queueSubmitWithCommandsEnabled) seqno = ResourceTracker::nextSeqno();
+    memcpy(streamPtr, &opcode_vkGetRayTracingShaderGroupStackSizeKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkGetRayTracingShaderGroupStackSizeKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (queueSubmitWithCommandsEnabled) { memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t); }
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDevice((*&local_device));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkPipeline((*&local_pipeline));
+    memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *streamPtrPtr += 1 * 8;
+    memcpy(*streamPtrPtr, (uint32_t*)&local_group, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    memcpy(*streamPtrPtr, (VkShaderGroupShaderKHR*)&local_groupShader, sizeof(VkShaderGroupShaderKHR));
+    *streamPtrPtr += sizeof(VkShaderGroupShaderKHR);
+    VkDeviceSize vkGetRayTracingShaderGroupStackSizeKHR_VkDeviceSize_return = (VkDeviceSize)0;
+    stream->read(&vkGetRayTracingShaderGroupStackSizeKHR_VkDeviceSize_return, sizeof(VkDeviceSize));
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+    return vkGetRayTracingShaderGroupStackSizeKHR_VkDeviceSize_return;
+}
+
+void VkEncoder::vkCmdSetRayTracingPipelineStackSizeKHR(
+    VkCommandBuffer commandBuffer,
+    uint32_t pipelineStackSize,
+    uint32_t doLock)
+{
+    (void)doLock;
+    bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+    if (!queueSubmitWithCommandsEnabled && doLock) this->lock();
+    auto stream = mImpl->stream();
+    auto pool = mImpl->pool();
+    VkCommandBuffer local_commandBuffer;
+    uint32_t local_pipelineStackSize;
+    local_commandBuffer = commandBuffer;
+    local_pipelineStackSize = pipelineStackSize;
+    size_t count = 0;
+    size_t* countPtr = &count;
+    {
+        uint64_t cgen_var_0;
+        *countPtr += 1 * 8;
+        *countPtr += sizeof(uint32_t);
+    }
+    uint32_t packetSize_vkCmdSetRayTracingPipelineStackSizeKHR = 4 + 4 + count;
+    if (queueSubmitWithCommandsEnabled) packetSize_vkCmdSetRayTracingPipelineStackSizeKHR -= 8;
+    uint8_t* streamPtr = stream->reserve(packetSize_vkCmdSetRayTracingPipelineStackSizeKHR);
+    uint8_t** streamPtrPtr = &streamPtr;
+    uint32_t opcode_vkCmdSetRayTracingPipelineStackSizeKHR = OP_vkCmdSetRayTracingPipelineStackSizeKHR;
+    memcpy(streamPtr, &opcode_vkCmdSetRayTracingPipelineStackSizeKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    memcpy(streamPtr, &packetSize_vkCmdSetRayTracingPipelineStackSizeKHR, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+    if (!queueSubmitWithCommandsEnabled)
+    {
+        uint64_t cgen_var_0;
+        *&cgen_var_0 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+        memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_0, 1 * 8);
+        *streamPtrPtr += 1 * 8;
+    }
+    memcpy(*streamPtrPtr, (uint32_t*)&local_pipelineStackSize, sizeof(uint32_t));
+    *streamPtrPtr += sizeof(uint32_t);
+    ++encodeCount;;
+    if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+    {
+        pool->freeAll();
+        stream->clearPool();
+    }
+    if (!queueSubmitWithCommandsEnabled && doLock) this->unlock();
+}
+
+#endif
+#ifdef VK_KHR_ray_query
 #endif
 
 } // namespace goldfish_vk
diff --git a/system/vulkan_enc/VkEncoder.cpp.inl b/system/vulkan_enc/VkEncoder.cpp.inl
new file mode 100644
index 0000000..dae61b5
--- /dev/null
+++ b/system/vulkan_enc/VkEncoder.cpp.inl
@@ -0,0 +1,94 @@
+static ResourceTracker* sResourceTracker = nullptr;
+static uint32_t sFeatureBits = 0;
+
+class VkEncoder::Impl {
+public:
+    Impl(IOStream* stream) : m_stream(stream), m_logEncodes(false) {
+        if (!sResourceTracker) sResourceTracker = ResourceTracker::get();
+        m_stream.incStreamRef();
+        const char* emuVkLogEncodesPropName = "qemu.vk.log";
+        char encodeProp[PROPERTY_VALUE_MAX];
+        if (property_get(emuVkLogEncodesPropName, encodeProp, nullptr) > 0) {
+            m_logEncodes = atoi(encodeProp) > 0;
+        }
+        sFeatureBits = m_stream.getFeatureBits();
+    }
+
+    ~Impl() {
+        m_stream.decStreamRef();
+    }
+
+    VulkanCountingStream* countingStream() { return &m_countingStream; }
+    VulkanStreamGuest* stream() { return &m_stream; }
+    BumpPool* pool() { return &m_pool; }
+    ResourceTracker* resources() { return ResourceTracker::get(); }
+    Validation* validation() { return &m_validation; }
+
+    void log(const char* text) {
+        if (!m_logEncodes) return;
+        ALOGD("encoder log: %s", text);
+    }
+
+    void flush() {
+        lock();
+        m_stream.flush();
+        unlock();
+    }
+
+    // can be recursive
+    void lock() {
+        while (mLock.test_and_set(std::memory_order_acquire));
+    }
+
+    void unlock() {
+        mLock.clear(std::memory_order_release);
+    }
+
+private:
+    VulkanCountingStream m_countingStream;
+    VulkanStreamGuest m_stream;
+    BumpPool m_pool;
+
+    Validation m_validation;
+    bool m_logEncodes;
+    std::atomic_flag mLock = ATOMIC_FLAG_INIT;
+};
+
+VkEncoder::~VkEncoder() { }
+
+struct EncoderAutoLock {
+    EncoderAutoLock(VkEncoder* enc) : mEnc(enc) {
+        mEnc->lock();
+    }
+    ~EncoderAutoLock() {
+        mEnc->unlock();
+    }
+    VkEncoder* mEnc;
+};
+
+VkEncoder::VkEncoder(IOStream *stream) :
+    mImpl(new VkEncoder::Impl(stream)) { }
+
+void VkEncoder::flush() {
+    mImpl->flush();
+}
+
+void VkEncoder::lock() {
+    mImpl->lock();
+}
+
+void VkEncoder::unlock() {
+    mImpl->unlock();
+}
+
+void VkEncoder::incRef() {
+    __atomic_add_fetch(&refCount, 1, __ATOMIC_SEQ_CST);
+}
+
+bool VkEncoder::decRef() {
+    if (0 == __atomic_sub_fetch(&refCount, 1, __ATOMIC_SEQ_CST)) {
+        delete this;
+        return true;
+    }
+    return false;
+}
diff --git a/system/vulkan_enc/VkEncoder.h b/system/vulkan_enc/VkEncoder.h
index 8f49f7d..160632a 100644
--- a/system/vulkan_enc/VkEncoder.h
+++ b/system/vulkan_enc/VkEncoder.h
@@ -28,7 +28,6 @@
 
 
 #include "goldfish_vk_private_defs.h"
-#include <functional>
 #include <memory>
 class IOStream;
 
@@ -41,30 +40,31 @@
     VkEncoder(IOStream* stream);
     ~VkEncoder();
 
-    void flush();
-
-    using CleanupCallback = std::function<void()>;
-    void registerCleanupCallback(void* handle, CleanupCallback cb);
-    void unregisterCleanupCallback(void* handle);
+#include "VkEncoder.h.inl"
 #ifdef VK_VERSION_1_0
     VkResult vkCreateInstance(
     const VkInstanceCreateInfo* pCreateInfo,
         const VkAllocationCallbacks* pAllocator,
-        VkInstance* pInstance);
+        VkInstance* pInstance,
+        uint32_t doLock);
     void vkDestroyInstance(
     VkInstance instance,
-        const VkAllocationCallbacks* pAllocator);
+        const VkAllocationCallbacks* pAllocator,
+        uint32_t doLock);
     VkResult vkEnumeratePhysicalDevices(
     VkInstance instance,
         uint32_t* pPhysicalDeviceCount,
-        VkPhysicalDevice* pPhysicalDevices);
+        VkPhysicalDevice* pPhysicalDevices,
+        uint32_t doLock);
     void vkGetPhysicalDeviceFeatures(
     VkPhysicalDevice physicalDevice,
-        VkPhysicalDeviceFeatures* pFeatures);
+        VkPhysicalDeviceFeatures* pFeatures,
+        uint32_t doLock);
     void vkGetPhysicalDeviceFormatProperties(
     VkPhysicalDevice physicalDevice,
         VkFormat format,
-        VkFormatProperties* pFormatProperties);
+        VkFormatProperties* pFormatProperties,
+        uint32_t doLock);
     VkResult vkGetPhysicalDeviceImageFormatProperties(
     VkPhysicalDevice physicalDevice,
         VkFormat format,
@@ -72,115 +72,143 @@
         VkImageTiling tiling,
         VkImageUsageFlags usage,
         VkImageCreateFlags flags,
-        VkImageFormatProperties* pImageFormatProperties);
+        VkImageFormatProperties* pImageFormatProperties,
+        uint32_t doLock);
     void vkGetPhysicalDeviceProperties(
     VkPhysicalDevice physicalDevice,
-        VkPhysicalDeviceProperties* pProperties);
+        VkPhysicalDeviceProperties* pProperties,
+        uint32_t doLock);
     void vkGetPhysicalDeviceQueueFamilyProperties(
     VkPhysicalDevice physicalDevice,
         uint32_t* pQueueFamilyPropertyCount,
-        VkQueueFamilyProperties* pQueueFamilyProperties);
+        VkQueueFamilyProperties* pQueueFamilyProperties,
+        uint32_t doLock);
     void vkGetPhysicalDeviceMemoryProperties(
     VkPhysicalDevice physicalDevice,
-        VkPhysicalDeviceMemoryProperties* pMemoryProperties);
+        VkPhysicalDeviceMemoryProperties* pMemoryProperties,
+        uint32_t doLock);
     PFN_vkVoidFunction vkGetInstanceProcAddr(
     VkInstance instance,
-        const char* pName);
+        const char* pName,
+        uint32_t doLock);
     PFN_vkVoidFunction vkGetDeviceProcAddr(
     VkDevice device,
-        const char* pName);
+        const char* pName,
+        uint32_t doLock);
     VkResult vkCreateDevice(
     VkPhysicalDevice physicalDevice,
         const VkDeviceCreateInfo* pCreateInfo,
         const VkAllocationCallbacks* pAllocator,
-        VkDevice* pDevice);
+        VkDevice* pDevice,
+        uint32_t doLock);
     void vkDestroyDevice(
     VkDevice device,
-        const VkAllocationCallbacks* pAllocator);
+        const VkAllocationCallbacks* pAllocator,
+        uint32_t doLock);
     VkResult vkEnumerateInstanceExtensionProperties(
     const char* pLayerName,
         uint32_t* pPropertyCount,
-        VkExtensionProperties* pProperties);
+        VkExtensionProperties* pProperties,
+        uint32_t doLock);
     VkResult vkEnumerateDeviceExtensionProperties(
     VkPhysicalDevice physicalDevice,
         const char* pLayerName,
         uint32_t* pPropertyCount,
-        VkExtensionProperties* pProperties);
+        VkExtensionProperties* pProperties,
+        uint32_t doLock);
     VkResult vkEnumerateInstanceLayerProperties(
     uint32_t* pPropertyCount,
-        VkLayerProperties* pProperties);
+        VkLayerProperties* pProperties,
+        uint32_t doLock);
     VkResult vkEnumerateDeviceLayerProperties(
     VkPhysicalDevice physicalDevice,
         uint32_t* pPropertyCount,
-        VkLayerProperties* pProperties);
+        VkLayerProperties* pProperties,
+        uint32_t doLock);
     void vkGetDeviceQueue(
     VkDevice device,
         uint32_t queueFamilyIndex,
         uint32_t queueIndex,
-        VkQueue* pQueue);
+        VkQueue* pQueue,
+        uint32_t doLock);
     VkResult vkQueueSubmit(
     VkQueue queue,
         uint32_t submitCount,
         const VkSubmitInfo* pSubmits,
-        VkFence fence);
+        VkFence fence,
+        uint32_t doLock);
     VkResult vkQueueWaitIdle(
-    VkQueue queue);
+    VkQueue queue,
+        uint32_t doLock);
     VkResult vkDeviceWaitIdle(
-    VkDevice device);
+    VkDevice device,
+        uint32_t doLock);
     VkResult vkAllocateMemory(
     VkDevice device,
         const VkMemoryAllocateInfo* pAllocateInfo,
         const VkAllocationCallbacks* pAllocator,
-        VkDeviceMemory* pMemory);
+        VkDeviceMemory* pMemory,
+        uint32_t doLock);
     void vkFreeMemory(
     VkDevice device,
         VkDeviceMemory memory,
-        const VkAllocationCallbacks* pAllocator);
+        const VkAllocationCallbacks* pAllocator,
+        uint32_t doLock);
     VkResult vkMapMemory(
     VkDevice device,
         VkDeviceMemory memory,
         VkDeviceSize offset,
         VkDeviceSize size,
         VkMemoryMapFlags flags,
-        void** ppData);
+        void** ppData,
+        uint32_t doLock);
     void vkUnmapMemory(
     VkDevice device,
-        VkDeviceMemory memory);
+        VkDeviceMemory memory,
+        uint32_t doLock);
     VkResult vkFlushMappedMemoryRanges(
     VkDevice device,
         uint32_t memoryRangeCount,
-        const VkMappedMemoryRange* pMemoryRanges);
+        const VkMappedMemoryRange* pMemoryRanges,
+        uint32_t doLock);
     VkResult vkInvalidateMappedMemoryRanges(
     VkDevice device,
         uint32_t memoryRangeCount,
-        const VkMappedMemoryRange* pMemoryRanges);
+        const VkMappedMemoryRange* pMemoryRanges,
+        uint32_t doLock);
     void vkGetDeviceMemoryCommitment(
     VkDevice device,
         VkDeviceMemory memory,
-        VkDeviceSize* pCommittedMemoryInBytes);
+        VkDeviceSize* pCommittedMemoryInBytes,
+        uint32_t doLock);
     VkResult vkBindBufferMemory(
     VkDevice device,
         VkBuffer buffer,
         VkDeviceMemory memory,
-        VkDeviceSize memoryOffset);
+        VkDeviceSize memoryOffset,
+        uint32_t doLock);
     VkResult vkBindImageMemory(
     VkDevice device,
         VkImage image,
         VkDeviceMemory memory,
-        VkDeviceSize memoryOffset);
+        VkDeviceSize memoryOffset,
+        uint32_t doLock);
     void vkGetBufferMemoryRequirements(
     VkDevice device,
         VkBuffer buffer,
-        VkMemoryRequirements* pMemoryRequirements);
+        VkMemoryRequirements* pMemoryRequirements,
+        uint32_t doLock);
     void vkGetImageMemoryRequirements(
     VkDevice device,
         VkImage image,
-        VkMemoryRequirements* pMemoryRequirements);
+        VkMemoryRequirements* pMemoryRequirements,
+        uint32_t doLock);
     void vkGetImageSparseMemoryRequirements(
     VkDevice device,
         VkImage image,
         uint32_t* pSparseMemoryRequirementCount,
-        VkSparseImageMemoryRequirements* pSparseMemoryRequirements);
+        VkSparseImageMemoryRequirements* pSparseMemoryRequirements,
+        uint32_t doLock);
     void vkGetPhysicalDeviceSparseImageFormatProperties(
     VkPhysicalDevice physicalDevice,
         VkFormat format,
@@ -189,70 +217,86 @@
         VkImageUsageFlags usage,
         VkImageTiling tiling,
         uint32_t* pPropertyCount,
-        VkSparseImageFormatProperties* pProperties);
+        VkSparseImageFormatProperties* pProperties,
+        uint32_t doLock);
     VkResult vkQueueBindSparse(
     VkQueue queue,
         uint32_t bindInfoCount,
         const VkBindSparseInfo* pBindInfo,
-        VkFence fence);
+        VkFence fence,
+        uint32_t doLock);
     VkResult vkCreateFence(
     VkDevice device,
         const VkFenceCreateInfo* pCreateInfo,
         const VkAllocationCallbacks* pAllocator,
-        VkFence* pFence);
+        VkFence* pFence,
+        uint32_t doLock);
     void vkDestroyFence(
     VkDevice device,
         VkFence fence,
-        const VkAllocationCallbacks* pAllocator);
+        const VkAllocationCallbacks* pAllocator,
+        uint32_t doLock);
     VkResult vkResetFences(
     VkDevice device,
         uint32_t fenceCount,
-        const VkFence* pFences);
+        const VkFence* pFences,
+        uint32_t doLock);
     VkResult vkGetFenceStatus(
     VkDevice device,
-        VkFence fence);
+        VkFence fence,
+        uint32_t doLock);
     VkResult vkWaitForFences(
     VkDevice device,
         uint32_t fenceCount,
         const VkFence* pFences,
         VkBool32 waitAll,
-        uint64_t timeout);
+        uint64_t timeout,
+        uint32_t doLock);
     VkResult vkCreateSemaphore(
     VkDevice device,
         const VkSemaphoreCreateInfo* pCreateInfo,
         const VkAllocationCallbacks* pAllocator,
-        VkSemaphore* pSemaphore);
+        VkSemaphore* pSemaphore,
+        uint32_t doLock);
     void vkDestroySemaphore(
     VkDevice device,
         VkSemaphore semaphore,
-        const VkAllocationCallbacks* pAllocator);
+        const VkAllocationCallbacks* pAllocator,
+        uint32_t doLock);
     VkResult vkCreateEvent(
     VkDevice device,
         const VkEventCreateInfo* pCreateInfo,
         const VkAllocationCallbacks* pAllocator,
-        VkEvent* pEvent);
+        VkEvent* pEvent,
+        uint32_t doLock);
     void vkDestroyEvent(
     VkDevice device,
         VkEvent event,
-        const VkAllocationCallbacks* pAllocator);
+        const VkAllocationCallbacks* pAllocator,
+        uint32_t doLock);
     VkResult vkGetEventStatus(
     VkDevice device,
-        VkEvent event);
+        VkEvent event,
+        uint32_t doLock);
     VkResult vkSetEvent(
     VkDevice device,
-        VkEvent event);
+        VkEvent event,
+        uint32_t doLock);
     VkResult vkResetEvent(
     VkDevice device,
-        VkEvent event);
+        VkEvent event,
+        uint32_t doLock);
     VkResult vkCreateQueryPool(
     VkDevice device,
         const VkQueryPoolCreateInfo* pCreateInfo,
         const VkAllocationCallbacks* pAllocator,
-        VkQueryPool* pQueryPool);
+        VkQueryPool* pQueryPool,
+        uint32_t doLock);
     void vkDestroyQueryPool(
     VkDevice device,
         VkQueryPool queryPool,
-        const VkAllocationCallbacks* pAllocator);
+        const VkAllocationCallbacks* pAllocator,
+        uint32_t doLock);
     VkResult vkGetQueryPoolResults(
     VkDevice device,
         VkQueryPool queryPool,
@@ -261,242 +305,296 @@
         size_t dataSize,
         void* pData,
         VkDeviceSize stride,
-        VkQueryResultFlags flags);
+        VkQueryResultFlags flags,
+        uint32_t doLock);
     VkResult vkCreateBuffer(
     VkDevice device,
         const VkBufferCreateInfo* pCreateInfo,
         const VkAllocationCallbacks* pAllocator,
-        VkBuffer* pBuffer);
+        VkBuffer* pBuffer,
+        uint32_t doLock);
     void vkDestroyBuffer(
     VkDevice device,
         VkBuffer buffer,
-        const VkAllocationCallbacks* pAllocator);
+        const VkAllocationCallbacks* pAllocator,
+        uint32_t doLock);
     VkResult vkCreateBufferView(
     VkDevice device,
         const VkBufferViewCreateInfo* pCreateInfo,
         const VkAllocationCallbacks* pAllocator,
-        VkBufferView* pView);
+        VkBufferView* pView,
+        uint32_t doLock);
     void vkDestroyBufferView(
     VkDevice device,
         VkBufferView bufferView,
-        const VkAllocationCallbacks* pAllocator);
+        const VkAllocationCallbacks* pAllocator,
+        uint32_t doLock);
     VkResult vkCreateImage(
     VkDevice device,
         const VkImageCreateInfo* pCreateInfo,
         const VkAllocationCallbacks* pAllocator,
-        VkImage* pImage);
+        VkImage* pImage,
+        uint32_t doLock);
     void vkDestroyImage(
     VkDevice device,
         VkImage image,
-        const VkAllocationCallbacks* pAllocator);
+        const VkAllocationCallbacks* pAllocator,
+        uint32_t doLock);
     void vkGetImageSubresourceLayout(
     VkDevice device,
         VkImage image,
         const VkImageSubresource* pSubresource,
-        VkSubresourceLayout* pLayout);
+        VkSubresourceLayout* pLayout,
+        uint32_t doLock);
     VkResult vkCreateImageView(
     VkDevice device,
         const VkImageViewCreateInfo* pCreateInfo,
         const VkAllocationCallbacks* pAllocator,
-        VkImageView* pView);
+        VkImageView* pView,
+        uint32_t doLock);
     void vkDestroyImageView(
     VkDevice device,
         VkImageView imageView,
-        const VkAllocationCallbacks* pAllocator);
+        const VkAllocationCallbacks* pAllocator,
+        uint32_t doLock);
     VkResult vkCreateShaderModule(
     VkDevice device,
         const VkShaderModuleCreateInfo* pCreateInfo,
         const VkAllocationCallbacks* pAllocator,
-        VkShaderModule* pShaderModule);
+        VkShaderModule* pShaderModule,
+        uint32_t doLock);
     void vkDestroyShaderModule(
     VkDevice device,
         VkShaderModule shaderModule,
-        const VkAllocationCallbacks* pAllocator);
+        const VkAllocationCallbacks* pAllocator,
+        uint32_t doLock);
     VkResult vkCreatePipelineCache(
     VkDevice device,
         const VkPipelineCacheCreateInfo* pCreateInfo,
         const VkAllocationCallbacks* pAllocator,
-        VkPipelineCache* pPipelineCache);
+        VkPipelineCache* pPipelineCache,
+        uint32_t doLock);
     void vkDestroyPipelineCache(
     VkDevice device,
         VkPipelineCache pipelineCache,
-        const VkAllocationCallbacks* pAllocator);
+        const VkAllocationCallbacks* pAllocator,
+        uint32_t doLock);
     VkResult vkGetPipelineCacheData(
     VkDevice device,
         VkPipelineCache pipelineCache,
         size_t* pDataSize,
-        void* pData);
+        void* pData,
+        uint32_t doLock);
     VkResult vkMergePipelineCaches(
     VkDevice device,
         VkPipelineCache dstCache,
         uint32_t srcCacheCount,
-        const VkPipelineCache* pSrcCaches);
+        const VkPipelineCache* pSrcCaches,
+        uint32_t doLock);
     VkResult vkCreateGraphicsPipelines(
     VkDevice device,
         VkPipelineCache pipelineCache,
         uint32_t createInfoCount,
         const VkGraphicsPipelineCreateInfo* pCreateInfos,
         const VkAllocationCallbacks* pAllocator,
-        VkPipeline* pPipelines);
+        VkPipeline* pPipelines,
+        uint32_t doLock);
     VkResult vkCreateComputePipelines(
     VkDevice device,
         VkPipelineCache pipelineCache,
         uint32_t createInfoCount,
         const VkComputePipelineCreateInfo* pCreateInfos,
         const VkAllocationCallbacks* pAllocator,
-        VkPipeline* pPipelines);
+        VkPipeline* pPipelines,
+        uint32_t doLock);
     void vkDestroyPipeline(
     VkDevice device,
         VkPipeline pipeline,
-        const VkAllocationCallbacks* pAllocator);
+        const VkAllocationCallbacks* pAllocator,
+        uint32_t doLock);
     VkResult vkCreatePipelineLayout(
     VkDevice device,
         const VkPipelineLayoutCreateInfo* pCreateInfo,
         const VkAllocationCallbacks* pAllocator,
-        VkPipelineLayout* pPipelineLayout);
+        VkPipelineLayout* pPipelineLayout,
+        uint32_t doLock);
     void vkDestroyPipelineLayout(
     VkDevice device,
         VkPipelineLayout pipelineLayout,
-        const VkAllocationCallbacks* pAllocator);
+        const VkAllocationCallbacks* pAllocator,
+        uint32_t doLock);
     VkResult vkCreateSampler(
     VkDevice device,
         const VkSamplerCreateInfo* pCreateInfo,
         const VkAllocationCallbacks* pAllocator,
-        VkSampler* pSampler);
+        VkSampler* pSampler,
+        uint32_t doLock);
     void vkDestroySampler(
     VkDevice device,
         VkSampler sampler,
-        const VkAllocationCallbacks* pAllocator);
+        const VkAllocationCallbacks* pAllocator,
+        uint32_t doLock);
     VkResult vkCreateDescriptorSetLayout(
     VkDevice device,
         const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
         const VkAllocationCallbacks* pAllocator,
-        VkDescriptorSetLayout* pSetLayout);
+        VkDescriptorSetLayout* pSetLayout,
+        uint32_t doLock);
     void vkDestroyDescriptorSetLayout(
     VkDevice device,
         VkDescriptorSetLayout descriptorSetLayout,
-        const VkAllocationCallbacks* pAllocator);
+        const VkAllocationCallbacks* pAllocator,
+        uint32_t doLock);
     VkResult vkCreateDescriptorPool(
     VkDevice device,
         const VkDescriptorPoolCreateInfo* pCreateInfo,
         const VkAllocationCallbacks* pAllocator,
-        VkDescriptorPool* pDescriptorPool);
+        VkDescriptorPool* pDescriptorPool,
+        uint32_t doLock);
     void vkDestroyDescriptorPool(
     VkDevice device,
         VkDescriptorPool descriptorPool,
-        const VkAllocationCallbacks* pAllocator);
+        const VkAllocationCallbacks* pAllocator,
+        uint32_t doLock);
     VkResult vkResetDescriptorPool(
     VkDevice device,
         VkDescriptorPool descriptorPool,
-        VkDescriptorPoolResetFlags flags);
+        VkDescriptorPoolResetFlags flags,
+        uint32_t doLock);
     VkResult vkAllocateDescriptorSets(
     VkDevice device,
         const VkDescriptorSetAllocateInfo* pAllocateInfo,
-        VkDescriptorSet* pDescriptorSets);
+        VkDescriptorSet* pDescriptorSets,
+        uint32_t doLock);
     VkResult vkFreeDescriptorSets(
     VkDevice device,
         VkDescriptorPool descriptorPool,
         uint32_t descriptorSetCount,
-        const VkDescriptorSet* pDescriptorSets);
+        const VkDescriptorSet* pDescriptorSets,
+        uint32_t doLock);
     void vkUpdateDescriptorSets(
     VkDevice device,
         uint32_t descriptorWriteCount,
         const VkWriteDescriptorSet* pDescriptorWrites,
         uint32_t descriptorCopyCount,
-        const VkCopyDescriptorSet* pDescriptorCopies);
+        const VkCopyDescriptorSet* pDescriptorCopies,
+        uint32_t doLock);
     VkResult vkCreateFramebuffer(
     VkDevice device,
         const VkFramebufferCreateInfo* pCreateInfo,
         const VkAllocationCallbacks* pAllocator,
-        VkFramebuffer* pFramebuffer);
+        VkFramebuffer* pFramebuffer,
+        uint32_t doLock);
     void vkDestroyFramebuffer(
     VkDevice device,
         VkFramebuffer framebuffer,
-        const VkAllocationCallbacks* pAllocator);
+        const VkAllocationCallbacks* pAllocator,
+        uint32_t doLock);
     VkResult vkCreateRenderPass(
     VkDevice device,
         const VkRenderPassCreateInfo* pCreateInfo,
         const VkAllocationCallbacks* pAllocator,
-        VkRenderPass* pRenderPass);
+        VkRenderPass* pRenderPass,
+        uint32_t doLock);
     void vkDestroyRenderPass(
     VkDevice device,
         VkRenderPass renderPass,
-        const VkAllocationCallbacks* pAllocator);
+        const VkAllocationCallbacks* pAllocator,
+        uint32_t doLock);
     void vkGetRenderAreaGranularity(
     VkDevice device,
         VkRenderPass renderPass,
-        VkExtent2D* pGranularity);
+        VkExtent2D* pGranularity,
+        uint32_t doLock);
     VkResult vkCreateCommandPool(
     VkDevice device,
         const VkCommandPoolCreateInfo* pCreateInfo,
         const VkAllocationCallbacks* pAllocator,
-        VkCommandPool* pCommandPool);
+        VkCommandPool* pCommandPool,
+        uint32_t doLock);
     void vkDestroyCommandPool(
     VkDevice device,
         VkCommandPool commandPool,
-        const VkAllocationCallbacks* pAllocator);
+        const VkAllocationCallbacks* pAllocator,
+        uint32_t doLock);
     VkResult vkResetCommandPool(
     VkDevice device,
         VkCommandPool commandPool,
-        VkCommandPoolResetFlags flags);
+        VkCommandPoolResetFlags flags,
+        uint32_t doLock);
     VkResult vkAllocateCommandBuffers(
     VkDevice device,
         const VkCommandBufferAllocateInfo* pAllocateInfo,
-        VkCommandBuffer* pCommandBuffers);
+        VkCommandBuffer* pCommandBuffers,
+        uint32_t doLock);
     void vkFreeCommandBuffers(
     VkDevice device,
         VkCommandPool commandPool,
         uint32_t commandBufferCount,
-        const VkCommandBuffer* pCommandBuffers);
+        const VkCommandBuffer* pCommandBuffers,
+        uint32_t doLock);
     VkResult vkBeginCommandBuffer(
     VkCommandBuffer commandBuffer,
-        const VkCommandBufferBeginInfo* pBeginInfo);
+        const VkCommandBufferBeginInfo* pBeginInfo,
+        uint32_t doLock);
     VkResult vkEndCommandBuffer(
-    VkCommandBuffer commandBuffer);
+    VkCommandBuffer commandBuffer,
+        uint32_t doLock);
     VkResult vkResetCommandBuffer(
     VkCommandBuffer commandBuffer,
-        VkCommandBufferResetFlags flags);
+        VkCommandBufferResetFlags flags,
+        uint32_t doLock);
     void vkCmdBindPipeline(
     VkCommandBuffer commandBuffer,
         VkPipelineBindPoint pipelineBindPoint,
-        VkPipeline pipeline);
+        VkPipeline pipeline,
+        uint32_t doLock);
     void vkCmdSetViewport(
     VkCommandBuffer commandBuffer,
         uint32_t firstViewport,
         uint32_t viewportCount,
-        const VkViewport* pViewports);
+        const VkViewport* pViewports,
+        uint32_t doLock);
     void vkCmdSetScissor(
     VkCommandBuffer commandBuffer,
         uint32_t firstScissor,
         uint32_t scissorCount,
-        const VkRect2D* pScissors);
+        const VkRect2D* pScissors,
+        uint32_t doLock);
     void vkCmdSetLineWidth(
     VkCommandBuffer commandBuffer,
-        float lineWidth);
+        float lineWidth,
+        uint32_t doLock);
     void vkCmdSetDepthBias(
     VkCommandBuffer commandBuffer,
         float depthBiasConstantFactor,
         float depthBiasClamp,
-        float depthBiasSlopeFactor);
+        float depthBiasSlopeFactor,
+        uint32_t doLock);
     void vkCmdSetBlendConstants(
     VkCommandBuffer commandBuffer,
-        const float blendConstants[4]);
+        const float blendConstants[4],
+        uint32_t doLock);
     void vkCmdSetDepthBounds(
     VkCommandBuffer commandBuffer,
         float minDepthBounds,
-        float maxDepthBounds);
+        float maxDepthBounds,
+        uint32_t doLock);
     void vkCmdSetStencilCompareMask(
     VkCommandBuffer commandBuffer,
         VkStencilFaceFlags faceMask,
-        uint32_t compareMask);
+        uint32_t compareMask,
+        uint32_t doLock);
     void vkCmdSetStencilWriteMask(
     VkCommandBuffer commandBuffer,
         VkStencilFaceFlags faceMask,
-        uint32_t writeMask);
+        uint32_t writeMask,
+        uint32_t doLock);
     void vkCmdSetStencilReference(
     VkCommandBuffer commandBuffer,
         VkStencilFaceFlags faceMask,
-        uint32_t reference);
+        uint32_t reference,
+        uint32_t doLock);
     void vkCmdBindDescriptorSets(
     VkCommandBuffer commandBuffer,
         VkPipelineBindPoint pipelineBindPoint,
@@ -505,58 +603,68 @@
         uint32_t descriptorSetCount,
         const VkDescriptorSet* pDescriptorSets,
         uint32_t dynamicOffsetCount,
-        const uint32_t* pDynamicOffsets);
+        const uint32_t* pDynamicOffsets,
+        uint32_t doLock);
     void vkCmdBindIndexBuffer(
     VkCommandBuffer commandBuffer,
         VkBuffer buffer,
         VkDeviceSize offset,
-        VkIndexType indexType);
+        VkIndexType indexType,
+        uint32_t doLock);
     void vkCmdBindVertexBuffers(
     VkCommandBuffer commandBuffer,
         uint32_t firstBinding,
         uint32_t bindingCount,
         const VkBuffer* pBuffers,
-        const VkDeviceSize* pOffsets);
+        const VkDeviceSize* pOffsets,
+        uint32_t doLock);
     void vkCmdDraw(
     VkCommandBuffer commandBuffer,
         uint32_t vertexCount,
         uint32_t instanceCount,
         uint32_t firstVertex,
-        uint32_t firstInstance);
+        uint32_t firstInstance,
+        uint32_t doLock);
     void vkCmdDrawIndexed(
     VkCommandBuffer commandBuffer,
         uint32_t indexCount,
         uint32_t instanceCount,
         uint32_t firstIndex,
         int32_t vertexOffset,
-        uint32_t firstInstance);
+        uint32_t firstInstance,
+        uint32_t doLock);
     void vkCmdDrawIndirect(
     VkCommandBuffer commandBuffer,
         VkBuffer buffer,
         VkDeviceSize offset,
         uint32_t drawCount,
-        uint32_t stride);
+        uint32_t stride,
+        uint32_t doLock);
     void vkCmdDrawIndexedIndirect(
     VkCommandBuffer commandBuffer,
         VkBuffer buffer,
         VkDeviceSize offset,
         uint32_t drawCount,
-        uint32_t stride);
+        uint32_t stride,
+        uint32_t doLock);
     void vkCmdDispatch(
     VkCommandBuffer commandBuffer,
         uint32_t groupCountX,
         uint32_t groupCountY,
-        uint32_t groupCountZ);
+        uint32_t groupCountZ,
+        uint32_t doLock);
     void vkCmdDispatchIndirect(
     VkCommandBuffer commandBuffer,
         VkBuffer buffer,
-        VkDeviceSize offset);
+        VkDeviceSize offset,
+        uint32_t doLock);
     void vkCmdCopyBuffer(
     VkCommandBuffer commandBuffer,
         VkBuffer srcBuffer,
         VkBuffer dstBuffer,
         uint32_t regionCount,
-        const VkBufferCopy* pRegions);
+        const VkBufferCopy* pRegions,
+        uint32_t doLock);
     void vkCmdCopyImage(
     VkCommandBuffer commandBuffer,
         VkImage srcImage,
@@ -564,7 +672,8 @@
         VkImage dstImage,
         VkImageLayout dstImageLayout,
         uint32_t regionCount,
-        const VkImageCopy* pRegions);
+        const VkImageCopy* pRegions,
+        uint32_t doLock);
     void vkCmdBlitImage(
     VkCommandBuffer commandBuffer,
         VkImage srcImage,
@@ -573,53 +682,61 @@
         VkImageLayout dstImageLayout,
         uint32_t regionCount,
         const VkImageBlit* pRegions,
-        VkFilter filter);
+        VkFilter filter,
+        uint32_t doLock);
     void vkCmdCopyBufferToImage(
     VkCommandBuffer commandBuffer,
         VkBuffer srcBuffer,
         VkImage dstImage,
         VkImageLayout dstImageLayout,
         uint32_t regionCount,
-        const VkBufferImageCopy* pRegions);
+        const VkBufferImageCopy* pRegions,
+        uint32_t doLock);
     void vkCmdCopyImageToBuffer(
     VkCommandBuffer commandBuffer,
         VkImage srcImage,
         VkImageLayout srcImageLayout,
         VkBuffer dstBuffer,
         uint32_t regionCount,
-        const VkBufferImageCopy* pRegions);
+        const VkBufferImageCopy* pRegions,
+        uint32_t doLock);
     void vkCmdUpdateBuffer(
     VkCommandBuffer commandBuffer,
         VkBuffer dstBuffer,
         VkDeviceSize dstOffset,
         VkDeviceSize dataSize,
-        const void* pData);
+        const void* pData,
+        uint32_t doLock);
     void vkCmdFillBuffer(
     VkCommandBuffer commandBuffer,
         VkBuffer dstBuffer,
         VkDeviceSize dstOffset,
         VkDeviceSize size,
-        uint32_t data);
+        uint32_t data,
+        uint32_t doLock);
     void vkCmdClearColorImage(
     VkCommandBuffer commandBuffer,
         VkImage image,
         VkImageLayout imageLayout,
         const VkClearColorValue* pColor,
         uint32_t rangeCount,
-        const VkImageSubresourceRange* pRanges);
+        const VkImageSubresourceRange* pRanges,
+        uint32_t doLock);
     void vkCmdClearDepthStencilImage(
     VkCommandBuffer commandBuffer,
         VkImage image,
         VkImageLayout imageLayout,
         const VkClearDepthStencilValue* pDepthStencil,
         uint32_t rangeCount,
-        const VkImageSubresourceRange* pRanges);
+        const VkImageSubresourceRange* pRanges,
+        uint32_t doLock);
     void vkCmdClearAttachments(
     VkCommandBuffer commandBuffer,
         uint32_t attachmentCount,
         const VkClearAttachment* pAttachments,
         uint32_t rectCount,
-        const VkClearRect* pRects);
+        const VkClearRect* pRects,
+        uint32_t doLock);
     void vkCmdResolveImage(
     VkCommandBuffer commandBuffer,
         VkImage srcImage,
@@ -627,15 +744,18 @@
         VkImage dstImage,
         VkImageLayout dstImageLayout,
         uint32_t regionCount,
-        const VkImageResolve* pRegions);
+        const VkImageResolve* pRegions,
+        uint32_t doLock);
     void vkCmdSetEvent(
     VkCommandBuffer commandBuffer,
         VkEvent event,
-        VkPipelineStageFlags stageMask);
+        VkPipelineStageFlags stageMask,
+        uint32_t doLock);
     void vkCmdResetEvent(
     VkCommandBuffer commandBuffer,
         VkEvent event,
-        VkPipelineStageFlags stageMask);
+        VkPipelineStageFlags stageMask,
+        uint32_t doLock);
     void vkCmdWaitEvents(
     VkCommandBuffer commandBuffer,
         uint32_t eventCount,
@@ -647,7 +767,8 @@
         uint32_t bufferMemoryBarrierCount,
         const VkBufferMemoryBarrier* pBufferMemoryBarriers,
         uint32_t imageMemoryBarrierCount,
-        const VkImageMemoryBarrier* pImageMemoryBarriers);
+        const VkImageMemoryBarrier* pImageMemoryBarriers,
+        uint32_t doLock);
     void vkCmdPipelineBarrier(
     VkCommandBuffer commandBuffer,
         VkPipelineStageFlags srcStageMask,
@@ -658,26 +779,31 @@
         uint32_t bufferMemoryBarrierCount,
         const VkBufferMemoryBarrier* pBufferMemoryBarriers,
         uint32_t imageMemoryBarrierCount,
-        const VkImageMemoryBarrier* pImageMemoryBarriers);
+        const VkImageMemoryBarrier* pImageMemoryBarriers,
+        uint32_t doLock);
     void vkCmdBeginQuery(
     VkCommandBuffer commandBuffer,
         VkQueryPool queryPool,
         uint32_t query,
-        VkQueryControlFlags flags);
+        VkQueryControlFlags flags,
+        uint32_t doLock);
     void vkCmdEndQuery(
     VkCommandBuffer commandBuffer,
         VkQueryPool queryPool,
-        uint32_t query);
+        uint32_t query,
+        uint32_t doLock);
     void vkCmdResetQueryPool(
     VkCommandBuffer commandBuffer,
         VkQueryPool queryPool,
         uint32_t firstQuery,
-        uint32_t queryCount);
+        uint32_t queryCount,
+        uint32_t doLock);
     void vkCmdWriteTimestamp(
     VkCommandBuffer commandBuffer,
         VkPipelineStageFlagBits pipelineStage,
         VkQueryPool queryPool,
-        uint32_t query);
+        uint32_t query,
+        uint32_t doLock);
     void vkCmdCopyQueryPoolResults(
     VkCommandBuffer commandBuffer,
         VkQueryPool queryPool,
@@ -686,48 +812,59 @@
         VkBuffer dstBuffer,
         VkDeviceSize dstOffset,
         VkDeviceSize stride,
-        VkQueryResultFlags flags);
+        VkQueryResultFlags flags,
+        uint32_t doLock);
     void vkCmdPushConstants(
     VkCommandBuffer commandBuffer,
         VkPipelineLayout layout,
         VkShaderStageFlags stageFlags,
         uint32_t offset,
         uint32_t size,
-        const void* pValues);
+        const void* pValues,
+        uint32_t doLock);
     void vkCmdBeginRenderPass(
     VkCommandBuffer commandBuffer,
         const VkRenderPassBeginInfo* pRenderPassBegin,
-        VkSubpassContents contents);
+        VkSubpassContents contents,
+        uint32_t doLock);
     void vkCmdNextSubpass(
     VkCommandBuffer commandBuffer,
-        VkSubpassContents contents);
+        VkSubpassContents contents,
+        uint32_t doLock);
     void vkCmdEndRenderPass(
-    VkCommandBuffer commandBuffer);
+    VkCommandBuffer commandBuffer,
+        uint32_t doLock);
     void vkCmdExecuteCommands(
     VkCommandBuffer commandBuffer,
         uint32_t commandBufferCount,
-        const VkCommandBuffer* pCommandBuffers);
+        const VkCommandBuffer* pCommandBuffers,
+        uint32_t doLock);
 #endif
 #ifdef VK_VERSION_1_1
     VkResult vkEnumerateInstanceVersion(
-    uint32_t* pApiVersion);
+    uint32_t* pApiVersion,
+        uint32_t doLock);
     VkResult vkBindBufferMemory2(
     VkDevice device,
         uint32_t bindInfoCount,
-        const VkBindBufferMemoryInfo* pBindInfos);
+        const VkBindBufferMemoryInfo* pBindInfos,
+        uint32_t doLock);
     VkResult vkBindImageMemory2(
     VkDevice device,
         uint32_t bindInfoCount,
-        const VkBindImageMemoryInfo* pBindInfos);
+        const VkBindImageMemoryInfo* pBindInfos,
+        uint32_t doLock);
     void vkGetDeviceGroupPeerMemoryFeatures(
     VkDevice device,
         uint32_t heapIndex,
         uint32_t localDeviceIndex,
         uint32_t remoteDeviceIndex,
-        VkPeerMemoryFeatureFlags* pPeerMemoryFeatures);
+        VkPeerMemoryFeatureFlags* pPeerMemoryFeatures,
+        uint32_t doLock);
     void vkCmdSetDeviceMask(
     VkCommandBuffer commandBuffer,
-        uint32_t deviceMask);
+        uint32_t deviceMask,
+        uint32_t doLock);
     void vkCmdDispatchBase(
     VkCommandBuffer commandBuffer,
         uint32_t baseGroupX,
@@ -735,200 +872,316 @@
         uint32_t baseGroupZ,
         uint32_t groupCountX,
         uint32_t groupCountY,
-        uint32_t groupCountZ);
+        uint32_t groupCountZ,
+        uint32_t doLock);
     VkResult vkEnumeratePhysicalDeviceGroups(
     VkInstance instance,
         uint32_t* pPhysicalDeviceGroupCount,
-        VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties);
+        VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties,
+        uint32_t doLock);
     void vkGetImageMemoryRequirements2(
     VkDevice device,
         const VkImageMemoryRequirementsInfo2* pInfo,
-        VkMemoryRequirements2* pMemoryRequirements);
+        VkMemoryRequirements2* pMemoryRequirements,
+        uint32_t doLock);
     void vkGetBufferMemoryRequirements2(
     VkDevice device,
         const VkBufferMemoryRequirementsInfo2* pInfo,
-        VkMemoryRequirements2* pMemoryRequirements);
+        VkMemoryRequirements2* pMemoryRequirements,
+        uint32_t doLock);
     void vkGetImageSparseMemoryRequirements2(
     VkDevice device,
         const VkImageSparseMemoryRequirementsInfo2* pInfo,
         uint32_t* pSparseMemoryRequirementCount,
-        VkSparseImageMemoryRequirements2* pSparseMemoryRequirements);
+        VkSparseImageMemoryRequirements2* pSparseMemoryRequirements,
+        uint32_t doLock);
     void vkGetPhysicalDeviceFeatures2(
     VkPhysicalDevice physicalDevice,
-        VkPhysicalDeviceFeatures2* pFeatures);
+        VkPhysicalDeviceFeatures2* pFeatures,
+        uint32_t doLock);
     void vkGetPhysicalDeviceProperties2(
     VkPhysicalDevice physicalDevice,
-        VkPhysicalDeviceProperties2* pProperties);
+        VkPhysicalDeviceProperties2* pProperties,
+        uint32_t doLock);
     void vkGetPhysicalDeviceFormatProperties2(
     VkPhysicalDevice physicalDevice,
         VkFormat format,
-        VkFormatProperties2* pFormatProperties);
+        VkFormatProperties2* pFormatProperties,
+        uint32_t doLock);
     VkResult vkGetPhysicalDeviceImageFormatProperties2(
     VkPhysicalDevice physicalDevice,
         const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo,
-        VkImageFormatProperties2* pImageFormatProperties);
+        VkImageFormatProperties2* pImageFormatProperties,
+        uint32_t doLock);
     void vkGetPhysicalDeviceQueueFamilyProperties2(
     VkPhysicalDevice physicalDevice,
         uint32_t* pQueueFamilyPropertyCount,
-        VkQueueFamilyProperties2* pQueueFamilyProperties);
+        VkQueueFamilyProperties2* pQueueFamilyProperties,
+        uint32_t doLock);
     void vkGetPhysicalDeviceMemoryProperties2(
     VkPhysicalDevice physicalDevice,
-        VkPhysicalDeviceMemoryProperties2* pMemoryProperties);
+        VkPhysicalDeviceMemoryProperties2* pMemoryProperties,
+        uint32_t doLock);
     void vkGetPhysicalDeviceSparseImageFormatProperties2(
     VkPhysicalDevice physicalDevice,
         const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo,
         uint32_t* pPropertyCount,
-        VkSparseImageFormatProperties2* pProperties);
+        VkSparseImageFormatProperties2* pProperties,
+        uint32_t doLock);
     void vkTrimCommandPool(
     VkDevice device,
         VkCommandPool commandPool,
-        VkCommandPoolTrimFlags flags);
+        VkCommandPoolTrimFlags flags,
+        uint32_t doLock);
     void vkGetDeviceQueue2(
     VkDevice device,
         const VkDeviceQueueInfo2* pQueueInfo,
-        VkQueue* pQueue);
+        VkQueue* pQueue,
+        uint32_t doLock);
     VkResult vkCreateSamplerYcbcrConversion(
     VkDevice device,
         const VkSamplerYcbcrConversionCreateInfo* pCreateInfo,
         const VkAllocationCallbacks* pAllocator,
-        VkSamplerYcbcrConversion* pYcbcrConversion);
+        VkSamplerYcbcrConversion* pYcbcrConversion,
+        uint32_t doLock);
     void vkDestroySamplerYcbcrConversion(
     VkDevice device,
         VkSamplerYcbcrConversion ycbcrConversion,
-        const VkAllocationCallbacks* pAllocator);
+        const VkAllocationCallbacks* pAllocator,
+        uint32_t doLock);
     VkResult vkCreateDescriptorUpdateTemplate(
     VkDevice device,
         const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo,
         const VkAllocationCallbacks* pAllocator,
-        VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate);
+        VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate,
+        uint32_t doLock);
     void vkDestroyDescriptorUpdateTemplate(
     VkDevice device,
         VkDescriptorUpdateTemplate descriptorUpdateTemplate,
-        const VkAllocationCallbacks* pAllocator);
+        const VkAllocationCallbacks* pAllocator,
+        uint32_t doLock);
     void vkUpdateDescriptorSetWithTemplate(
     VkDevice device,
         VkDescriptorSet descriptorSet,
         VkDescriptorUpdateTemplate descriptorUpdateTemplate,
-        const void* pData);
+        const void* pData,
+        uint32_t doLock);
     void vkGetPhysicalDeviceExternalBufferProperties(
     VkPhysicalDevice physicalDevice,
         const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo,
-        VkExternalBufferProperties* pExternalBufferProperties);
+        VkExternalBufferProperties* pExternalBufferProperties,
+        uint32_t doLock);
     void vkGetPhysicalDeviceExternalFenceProperties(
     VkPhysicalDevice physicalDevice,
         const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo,
-        VkExternalFenceProperties* pExternalFenceProperties);
+        VkExternalFenceProperties* pExternalFenceProperties,
+        uint32_t doLock);
     void vkGetPhysicalDeviceExternalSemaphoreProperties(
     VkPhysicalDevice physicalDevice,
         const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo,
-        VkExternalSemaphoreProperties* pExternalSemaphoreProperties);
+        VkExternalSemaphoreProperties* pExternalSemaphoreProperties,
+        uint32_t doLock);
     void vkGetDescriptorSetLayoutSupport(
     VkDevice device,
         const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
-        VkDescriptorSetLayoutSupport* pSupport);
+        VkDescriptorSetLayoutSupport* pSupport,
+        uint32_t doLock);
+#endif
+#ifdef VK_VERSION_1_2
+    void vkCmdDrawIndirectCount(
+    VkCommandBuffer commandBuffer,
+        VkBuffer buffer,
+        VkDeviceSize offset,
+        VkBuffer countBuffer,
+        VkDeviceSize countBufferOffset,
+        uint32_t maxDrawCount,
+        uint32_t stride,
+        uint32_t doLock);
+    void vkCmdDrawIndexedIndirectCount(
+    VkCommandBuffer commandBuffer,
+        VkBuffer buffer,
+        VkDeviceSize offset,
+        VkBuffer countBuffer,
+        VkDeviceSize countBufferOffset,
+        uint32_t maxDrawCount,
+        uint32_t stride,
+        uint32_t doLock);
+    VkResult vkCreateRenderPass2(
+    VkDevice device,
+        const VkRenderPassCreateInfo2* pCreateInfo,
+        const VkAllocationCallbacks* pAllocator,
+        VkRenderPass* pRenderPass,
+        uint32_t doLock);
+    void vkCmdBeginRenderPass2(
+    VkCommandBuffer commandBuffer,
+        const VkRenderPassBeginInfo* pRenderPassBegin,
+        const VkSubpassBeginInfo* pSubpassBeginInfo,
+        uint32_t doLock);
+    void vkCmdNextSubpass2(
+    VkCommandBuffer commandBuffer,
+        const VkSubpassBeginInfo* pSubpassBeginInfo,
+        const VkSubpassEndInfo* pSubpassEndInfo,
+        uint32_t doLock);
+    void vkCmdEndRenderPass2(
+    VkCommandBuffer commandBuffer,
+        const VkSubpassEndInfo* pSubpassEndInfo,
+        uint32_t doLock);
+    void vkResetQueryPool(
+    VkDevice device,
+        VkQueryPool queryPool,
+        uint32_t firstQuery,
+        uint32_t queryCount,
+        uint32_t doLock);
+    VkResult vkGetSemaphoreCounterValue(
+    VkDevice device,
+        VkSemaphore semaphore,
+        uint64_t* pValue,
+        uint32_t doLock);
+    VkResult vkWaitSemaphores(
+    VkDevice device,
+        const VkSemaphoreWaitInfo* pWaitInfo,
+        uint64_t timeout,
+        uint32_t doLock);
+    VkResult vkSignalSemaphore(
+    VkDevice device,
+        const VkSemaphoreSignalInfo* pSignalInfo,
+        uint32_t doLock);
+    VkDeviceAddress vkGetBufferDeviceAddress(
+    VkDevice device,
+        const VkBufferDeviceAddressInfo* pInfo,
+        uint32_t doLock);
+    uint64_t vkGetBufferOpaqueCaptureAddress(
+    VkDevice device,
+        const VkBufferDeviceAddressInfo* pInfo,
+        uint32_t doLock);
+    uint64_t vkGetDeviceMemoryOpaqueCaptureAddress(
+    VkDevice device,
+        const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo,
+        uint32_t doLock);
 #endif
 #ifdef VK_KHR_surface
     void vkDestroySurfaceKHR(
     VkInstance instance,
         VkSurfaceKHR surface,
-        const VkAllocationCallbacks* pAllocator);
+        const VkAllocationCallbacks* pAllocator,
+        uint32_t doLock);
     VkResult vkGetPhysicalDeviceSurfaceSupportKHR(
     VkPhysicalDevice physicalDevice,
         uint32_t queueFamilyIndex,
         VkSurfaceKHR surface,
-        VkBool32* pSupported);
+        VkBool32* pSupported,
+        uint32_t doLock);
     VkResult vkGetPhysicalDeviceSurfaceCapabilitiesKHR(
     VkPhysicalDevice physicalDevice,
         VkSurfaceKHR surface,
-        VkSurfaceCapabilitiesKHR* pSurfaceCapabilities);
+        VkSurfaceCapabilitiesKHR* pSurfaceCapabilities,
+        uint32_t doLock);
     VkResult vkGetPhysicalDeviceSurfaceFormatsKHR(
     VkPhysicalDevice physicalDevice,
         VkSurfaceKHR surface,
         uint32_t* pSurfaceFormatCount,
-        VkSurfaceFormatKHR* pSurfaceFormats);
+        VkSurfaceFormatKHR* pSurfaceFormats,
+        uint32_t doLock);
     VkResult vkGetPhysicalDeviceSurfacePresentModesKHR(
     VkPhysicalDevice physicalDevice,
         VkSurfaceKHR surface,
         uint32_t* pPresentModeCount,
-        VkPresentModeKHR* pPresentModes);
+        VkPresentModeKHR* pPresentModes,
+        uint32_t doLock);
 #endif
 #ifdef VK_KHR_swapchain
     VkResult vkCreateSwapchainKHR(
     VkDevice device,
         const VkSwapchainCreateInfoKHR* pCreateInfo,
         const VkAllocationCallbacks* pAllocator,
-        VkSwapchainKHR* pSwapchain);
+        VkSwapchainKHR* pSwapchain,
+        uint32_t doLock);
     void vkDestroySwapchainKHR(
     VkDevice device,
         VkSwapchainKHR swapchain,
-        const VkAllocationCallbacks* pAllocator);
+        const VkAllocationCallbacks* pAllocator,
+        uint32_t doLock);
     VkResult vkGetSwapchainImagesKHR(
     VkDevice device,
         VkSwapchainKHR swapchain,
         uint32_t* pSwapchainImageCount,
-        VkImage* pSwapchainImages);
+        VkImage* pSwapchainImages,
+        uint32_t doLock);
     VkResult vkAcquireNextImageKHR(
     VkDevice device,
         VkSwapchainKHR swapchain,
         uint64_t timeout,
         VkSemaphore semaphore,
         VkFence fence,
-        uint32_t* pImageIndex);
+        uint32_t* pImageIndex,
+        uint32_t doLock);
     VkResult vkQueuePresentKHR(
     VkQueue queue,
-        const VkPresentInfoKHR* pPresentInfo);
+        const VkPresentInfoKHR* pPresentInfo,
+        uint32_t doLock);
     VkResult vkGetDeviceGroupPresentCapabilitiesKHR(
     VkDevice device,
-        VkDeviceGroupPresentCapabilitiesKHR* pDeviceGroupPresentCapabilities);
+        VkDeviceGroupPresentCapabilitiesKHR* pDeviceGroupPresentCapabilities,
+        uint32_t doLock);
     VkResult vkGetDeviceGroupSurfacePresentModesKHR(
     VkDevice device,
         VkSurfaceKHR surface,
-        VkDeviceGroupPresentModeFlagsKHR* pModes);
+        VkDeviceGroupPresentModeFlagsKHR* pModes,
+        uint32_t doLock);
     VkResult vkGetPhysicalDevicePresentRectanglesKHR(
     VkPhysicalDevice physicalDevice,
         VkSurfaceKHR surface,
         uint32_t* pRectCount,
-        VkRect2D* pRects);
+        VkRect2D* pRects,
+        uint32_t doLock);
     VkResult vkAcquireNextImage2KHR(
     VkDevice device,
         const VkAcquireNextImageInfoKHR* pAcquireInfo,
-        uint32_t* pImageIndex);
+        uint32_t* pImageIndex,
+        uint32_t doLock);
 #endif
 #ifdef VK_KHR_display
     VkResult vkGetPhysicalDeviceDisplayPropertiesKHR(
     VkPhysicalDevice physicalDevice,
         uint32_t* pPropertyCount,
-        VkDisplayPropertiesKHR* pProperties);
+        VkDisplayPropertiesKHR* pProperties,
+        uint32_t doLock);
     VkResult vkGetPhysicalDeviceDisplayPlanePropertiesKHR(
     VkPhysicalDevice physicalDevice,
         uint32_t* pPropertyCount,
-        VkDisplayPlanePropertiesKHR* pProperties);
+        VkDisplayPlanePropertiesKHR* pProperties,
+        uint32_t doLock);
     VkResult vkGetDisplayPlaneSupportedDisplaysKHR(
     VkPhysicalDevice physicalDevice,
         uint32_t planeIndex,
         uint32_t* pDisplayCount,
-        VkDisplayKHR* pDisplays);
+        VkDisplayKHR* pDisplays,
+        uint32_t doLock);
     VkResult vkGetDisplayModePropertiesKHR(
     VkPhysicalDevice physicalDevice,
         VkDisplayKHR display,
         uint32_t* pPropertyCount,
-        VkDisplayModePropertiesKHR* pProperties);
+        VkDisplayModePropertiesKHR* pProperties,
+        uint32_t doLock);
     VkResult vkCreateDisplayModeKHR(
     VkPhysicalDevice physicalDevice,
         VkDisplayKHR display,
         const VkDisplayModeCreateInfoKHR* pCreateInfo,
         const VkAllocationCallbacks* pAllocator,
-        VkDisplayModeKHR* pMode);
+        VkDisplayModeKHR* pMode,
+        uint32_t doLock);
     VkResult vkGetDisplayPlaneCapabilitiesKHR(
     VkPhysicalDevice physicalDevice,
         VkDisplayModeKHR mode,
         uint32_t planeIndex,
-        VkDisplayPlaneCapabilitiesKHR* pCapabilities);
+        VkDisplayPlaneCapabilitiesKHR* pCapabilities,
+        uint32_t doLock);
     VkResult vkCreateDisplayPlaneSurfaceKHR(
     VkInstance instance,
         const VkDisplaySurfaceCreateInfoKHR* pCreateInfo,
         const VkAllocationCallbacks* pAllocator,
-        VkSurfaceKHR* pSurface);
+        VkSurfaceKHR* pSurface,
+        uint32_t doLock);
 #endif
 #ifdef VK_KHR_display_swapchain
     VkResult vkCreateSharedSwapchainsKHR(
@@ -936,70 +1189,69 @@
         uint32_t swapchainCount,
         const VkSwapchainCreateInfoKHR* pCreateInfos,
         const VkAllocationCallbacks* pAllocator,
-        VkSwapchainKHR* pSwapchains);
+        VkSwapchainKHR* pSwapchains,
+        uint32_t doLock);
 #endif
 #ifdef VK_KHR_xlib_surface
     VkResult vkCreateXlibSurfaceKHR(
     VkInstance instance,
         const VkXlibSurfaceCreateInfoKHR* pCreateInfo,
         const VkAllocationCallbacks* pAllocator,
-        VkSurfaceKHR* pSurface);
+        VkSurfaceKHR* pSurface,
+        uint32_t doLock);
     VkBool32 vkGetPhysicalDeviceXlibPresentationSupportKHR(
     VkPhysicalDevice physicalDevice,
         uint32_t queueFamilyIndex,
         Display* dpy,
-        VisualID visualID);
+        VisualID visualID,
+        uint32_t doLock);
 #endif
 #ifdef VK_KHR_xcb_surface
     VkResult vkCreateXcbSurfaceKHR(
     VkInstance instance,
         const VkXcbSurfaceCreateInfoKHR* pCreateInfo,
         const VkAllocationCallbacks* pAllocator,
-        VkSurfaceKHR* pSurface);
+        VkSurfaceKHR* pSurface,
+        uint32_t doLock);
     VkBool32 vkGetPhysicalDeviceXcbPresentationSupportKHR(
     VkPhysicalDevice physicalDevice,
         uint32_t queueFamilyIndex,
         xcb_connection_t* connection,
-        xcb_visualid_t visual_id);
+        xcb_visualid_t visual_id,
+        uint32_t doLock);
 #endif
 #ifdef VK_KHR_wayland_surface
     VkResult vkCreateWaylandSurfaceKHR(
     VkInstance instance,
         const VkWaylandSurfaceCreateInfoKHR* pCreateInfo,
         const VkAllocationCallbacks* pAllocator,
-        VkSurfaceKHR* pSurface);
+        VkSurfaceKHR* pSurface,
+        uint32_t doLock);
     VkBool32 vkGetPhysicalDeviceWaylandPresentationSupportKHR(
     VkPhysicalDevice physicalDevice,
         uint32_t queueFamilyIndex,
-        wl_display* display);
-#endif
-#ifdef VK_KHR_mir_surface
-    VkResult vkCreateMirSurfaceKHR(
-    VkInstance instance,
-        const VkMirSurfaceCreateInfoKHR* pCreateInfo,
-        const VkAllocationCallbacks* pAllocator,
-        VkSurfaceKHR* pSurface);
-    VkBool32 vkGetPhysicalDeviceMirPresentationSupportKHR(
-    VkPhysicalDevice physicalDevice,
-        uint32_t queueFamilyIndex,
-        MirConnection* connection);
+        wl_display* display,
+        uint32_t doLock);
 #endif
 #ifdef VK_KHR_android_surface
     VkResult vkCreateAndroidSurfaceKHR(
     VkInstance instance,
         const VkAndroidSurfaceCreateInfoKHR* pCreateInfo,
         const VkAllocationCallbacks* pAllocator,
-        VkSurfaceKHR* pSurface);
+        VkSurfaceKHR* pSurface,
+        uint32_t doLock);
 #endif
 #ifdef VK_KHR_win32_surface
     VkResult vkCreateWin32SurfaceKHR(
     VkInstance instance,
         const VkWin32SurfaceCreateInfoKHR* pCreateInfo,
         const VkAllocationCallbacks* pAllocator,
-        VkSurfaceKHR* pSurface);
+        VkSurfaceKHR* pSurface,
+        uint32_t doLock);
     VkBool32 vkGetPhysicalDeviceWin32PresentationSupportKHR(
     VkPhysicalDevice physicalDevice,
-        uint32_t queueFamilyIndex);
+        uint32_t queueFamilyIndex,
+        uint32_t doLock);
 #endif
 #ifdef VK_KHR_sampler_mirror_clamp_to_edge
 #endif
@@ -1008,30 +1260,37 @@
 #ifdef VK_KHR_get_physical_device_properties2
     void vkGetPhysicalDeviceFeatures2KHR(
     VkPhysicalDevice physicalDevice,
-        VkPhysicalDeviceFeatures2* pFeatures);
+        VkPhysicalDeviceFeatures2* pFeatures,
+        uint32_t doLock);
     void vkGetPhysicalDeviceProperties2KHR(
     VkPhysicalDevice physicalDevice,
-        VkPhysicalDeviceProperties2* pProperties);
+        VkPhysicalDeviceProperties2* pProperties,
+        uint32_t doLock);
     void vkGetPhysicalDeviceFormatProperties2KHR(
     VkPhysicalDevice physicalDevice,
         VkFormat format,
-        VkFormatProperties2* pFormatProperties);
+        VkFormatProperties2* pFormatProperties,
+        uint32_t doLock);
     VkResult vkGetPhysicalDeviceImageFormatProperties2KHR(
     VkPhysicalDevice physicalDevice,
         const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo,
-        VkImageFormatProperties2* pImageFormatProperties);
+        VkImageFormatProperties2* pImageFormatProperties,
+        uint32_t doLock);
     void vkGetPhysicalDeviceQueueFamilyProperties2KHR(
     VkPhysicalDevice physicalDevice,
         uint32_t* pQueueFamilyPropertyCount,
-        VkQueueFamilyProperties2* pQueueFamilyProperties);
+        VkQueueFamilyProperties2* pQueueFamilyProperties,
+        uint32_t doLock);
     void vkGetPhysicalDeviceMemoryProperties2KHR(
     VkPhysicalDevice physicalDevice,
-        VkPhysicalDeviceMemoryProperties2* pMemoryProperties);
+        VkPhysicalDeviceMemoryProperties2* pMemoryProperties,
+        uint32_t doLock);
     void vkGetPhysicalDeviceSparseImageFormatProperties2KHR(
     VkPhysicalDevice physicalDevice,
         const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo,
         uint32_t* pPropertyCount,
-        VkSparseImageFormatProperties2* pProperties);
+        VkSparseImageFormatProperties2* pProperties,
+        uint32_t doLock);
 #endif
 #ifdef VK_KHR_device_group
     void vkGetDeviceGroupPeerMemoryFeaturesKHR(
@@ -1039,10 +1298,12 @@
         uint32_t heapIndex,
         uint32_t localDeviceIndex,
         uint32_t remoteDeviceIndex,
-        VkPeerMemoryFeatureFlags* pPeerMemoryFeatures);
+        VkPeerMemoryFeatureFlags* pPeerMemoryFeatures,
+        uint32_t doLock);
     void vkCmdSetDeviceMaskKHR(
     VkCommandBuffer commandBuffer,
-        uint32_t deviceMask);
+        uint32_t deviceMask,
+        uint32_t doLock);
     void vkCmdDispatchBaseKHR(
     VkCommandBuffer commandBuffer,
         uint32_t baseGroupX,
@@ -1050,7 +1311,8 @@
         uint32_t baseGroupZ,
         uint32_t groupCountX,
         uint32_t groupCountY,
-        uint32_t groupCountZ);
+        uint32_t groupCountZ,
+        uint32_t doLock);
 #endif
 #ifdef VK_KHR_shader_draw_parameters
 #endif
@@ -1058,19 +1320,22 @@
     void vkTrimCommandPoolKHR(
     VkDevice device,
         VkCommandPool commandPool,
-        VkCommandPoolTrimFlags flags);
+        VkCommandPoolTrimFlags flags,
+        uint32_t doLock);
 #endif
 #ifdef VK_KHR_device_group_creation
     VkResult vkEnumeratePhysicalDeviceGroupsKHR(
     VkInstance instance,
         uint32_t* pPhysicalDeviceGroupCount,
-        VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties);
+        VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties,
+        uint32_t doLock);
 #endif
 #ifdef VK_KHR_external_memory_capabilities
     void vkGetPhysicalDeviceExternalBufferPropertiesKHR(
     VkPhysicalDevice physicalDevice,
         const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo,
-        VkExternalBufferProperties* pExternalBufferProperties);
+        VkExternalBufferProperties* pExternalBufferProperties,
+        uint32_t doLock);
 #endif
 #ifdef VK_KHR_external_memory
 #endif
@@ -1078,23 +1343,27 @@
     VkResult vkGetMemoryWin32HandleKHR(
     VkDevice device,
         const VkMemoryGetWin32HandleInfoKHR* pGetWin32HandleInfo,
-        HANDLE* pHandle);
+        HANDLE* pHandle,
+        uint32_t doLock);
     VkResult vkGetMemoryWin32HandlePropertiesKHR(
     VkDevice device,
         VkExternalMemoryHandleTypeFlagBits handleType,
         HANDLE handle,
-        VkMemoryWin32HandlePropertiesKHR* pMemoryWin32HandleProperties);
+        VkMemoryWin32HandlePropertiesKHR* pMemoryWin32HandleProperties,
+        uint32_t doLock);
 #endif
 #ifdef VK_KHR_external_memory_fd
     VkResult vkGetMemoryFdKHR(
     VkDevice device,
         const VkMemoryGetFdInfoKHR* pGetFdInfo,
-        int* pFd);
+        int* pFd,
+        uint32_t doLock);
     VkResult vkGetMemoryFdPropertiesKHR(
     VkDevice device,
         VkExternalMemoryHandleTypeFlagBits handleType,
         int fd,
-        VkMemoryFdPropertiesKHR* pMemoryFdProperties);
+        VkMemoryFdPropertiesKHR* pMemoryFdProperties,
+        uint32_t doLock);
 #endif
 #ifdef VK_KHR_win32_keyed_mutex
 #endif
@@ -1102,27 +1371,32 @@
     void vkGetPhysicalDeviceExternalSemaphorePropertiesKHR(
     VkPhysicalDevice physicalDevice,
         const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo,
-        VkExternalSemaphoreProperties* pExternalSemaphoreProperties);
+        VkExternalSemaphoreProperties* pExternalSemaphoreProperties,
+        uint32_t doLock);
 #endif
 #ifdef VK_KHR_external_semaphore
 #endif
 #ifdef VK_KHR_external_semaphore_win32
     VkResult vkImportSemaphoreWin32HandleKHR(
     VkDevice device,
-        const VkImportSemaphoreWin32HandleInfoKHR* pImportSemaphoreWin32HandleInfo);
+        const VkImportSemaphoreWin32HandleInfoKHR* pImportSemaphoreWin32HandleInfo,
+        uint32_t doLock);
     VkResult vkGetSemaphoreWin32HandleKHR(
     VkDevice device,
         const VkSemaphoreGetWin32HandleInfoKHR* pGetWin32HandleInfo,
-        HANDLE* pHandle);
+        HANDLE* pHandle,
+        uint32_t doLock);
 #endif
 #ifdef VK_KHR_external_semaphore_fd
     VkResult vkImportSemaphoreFdKHR(
     VkDevice device,
-        const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo);
+        const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo,
+        uint32_t doLock);
     VkResult vkGetSemaphoreFdKHR(
     VkDevice device,
         const VkSemaphoreGetFdInfoKHR* pGetFdInfo,
-        int* pFd);
+        int* pFd,
+        uint32_t doLock);
 #endif
 #ifdef VK_KHR_push_descriptor
     void vkCmdPushDescriptorSetKHR(
@@ -1131,13 +1405,17 @@
         VkPipelineLayout layout,
         uint32_t set,
         uint32_t descriptorWriteCount,
-        const VkWriteDescriptorSet* pDescriptorWrites);
+        const VkWriteDescriptorSet* pDescriptorWrites,
+        uint32_t doLock);
     void vkCmdPushDescriptorSetWithTemplateKHR(
     VkCommandBuffer commandBuffer,
         VkDescriptorUpdateTemplate descriptorUpdateTemplate,
         VkPipelineLayout layout,
         uint32_t set,
-        const void* pData);
+        const void* pData,
+        uint32_t doLock);
+#endif
+#ifdef VK_KHR_shader_float16_int8
 #endif
 #ifdef VK_KHR_16bit_storage
 #endif
@@ -1148,65 +1426,101 @@
     VkDevice device,
         const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo,
         const VkAllocationCallbacks* pAllocator,
-        VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate);
+        VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate,
+        uint32_t doLock);
     void vkDestroyDescriptorUpdateTemplateKHR(
     VkDevice device,
         VkDescriptorUpdateTemplate descriptorUpdateTemplate,
-        const VkAllocationCallbacks* pAllocator);
+        const VkAllocationCallbacks* pAllocator,
+        uint32_t doLock);
     void vkUpdateDescriptorSetWithTemplateKHR(
     VkDevice device,
         VkDescriptorSet descriptorSet,
         VkDescriptorUpdateTemplate descriptorUpdateTemplate,
-        const void* pData);
+        const void* pData,
+        uint32_t doLock);
+#endif
+#ifdef VK_KHR_imageless_framebuffer
 #endif
 #ifdef VK_KHR_create_renderpass2
     VkResult vkCreateRenderPass2KHR(
     VkDevice device,
-        const VkRenderPassCreateInfo2KHR* pCreateInfo,
+        const VkRenderPassCreateInfo2* pCreateInfo,
         const VkAllocationCallbacks* pAllocator,
-        VkRenderPass* pRenderPass);
+        VkRenderPass* pRenderPass,
+        uint32_t doLock);
     void vkCmdBeginRenderPass2KHR(
     VkCommandBuffer commandBuffer,
         const VkRenderPassBeginInfo* pRenderPassBegin,
-        const VkSubpassBeginInfoKHR* pSubpassBeginInfo);
+        const VkSubpassBeginInfo* pSubpassBeginInfo,
+        uint32_t doLock);
     void vkCmdNextSubpass2KHR(
     VkCommandBuffer commandBuffer,
-        const VkSubpassBeginInfoKHR* pSubpassBeginInfo,
-        const VkSubpassEndInfoKHR* pSubpassEndInfo);
+        const VkSubpassBeginInfo* pSubpassBeginInfo,
+        const VkSubpassEndInfo* pSubpassEndInfo,
+        uint32_t doLock);
     void vkCmdEndRenderPass2KHR(
     VkCommandBuffer commandBuffer,
-        const VkSubpassEndInfoKHR* pSubpassEndInfo);
+        const VkSubpassEndInfo* pSubpassEndInfo,
+        uint32_t doLock);
 #endif
 #ifdef VK_KHR_shared_presentable_image
     VkResult vkGetSwapchainStatusKHR(
     VkDevice device,
-        VkSwapchainKHR swapchain);
+        VkSwapchainKHR swapchain,
+        uint32_t doLock);
 #endif
 #ifdef VK_KHR_external_fence_capabilities
     void vkGetPhysicalDeviceExternalFencePropertiesKHR(
     VkPhysicalDevice physicalDevice,
         const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo,
-        VkExternalFenceProperties* pExternalFenceProperties);
+        VkExternalFenceProperties* pExternalFenceProperties,
+        uint32_t doLock);
 #endif
 #ifdef VK_KHR_external_fence
 #endif
 #ifdef VK_KHR_external_fence_win32
     VkResult vkImportFenceWin32HandleKHR(
     VkDevice device,
-        const VkImportFenceWin32HandleInfoKHR* pImportFenceWin32HandleInfo);
+        const VkImportFenceWin32HandleInfoKHR* pImportFenceWin32HandleInfo,
+        uint32_t doLock);
     VkResult vkGetFenceWin32HandleKHR(
     VkDevice device,
         const VkFenceGetWin32HandleInfoKHR* pGetWin32HandleInfo,
-        HANDLE* pHandle);
+        HANDLE* pHandle,
+        uint32_t doLock);
 #endif
 #ifdef VK_KHR_external_fence_fd
     VkResult vkImportFenceFdKHR(
     VkDevice device,
-        const VkImportFenceFdInfoKHR* pImportFenceFdInfo);
+        const VkImportFenceFdInfoKHR* pImportFenceFdInfo,
+        uint32_t doLock);
     VkResult vkGetFenceFdKHR(
     VkDevice device,
         const VkFenceGetFdInfoKHR* pGetFdInfo,
-        int* pFd);
+        int* pFd,
+        uint32_t doLock);
+#endif
+#ifdef VK_KHR_performance_query
+    VkResult vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR(
+    VkPhysicalDevice physicalDevice,
+        uint32_t queueFamilyIndex,
+        uint32_t* pCounterCount,
+        VkPerformanceCounterKHR* pCounters,
+        VkPerformanceCounterDescriptionKHR* pCounterDescriptions,
+        uint32_t doLock);
+    void vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR(
+    VkPhysicalDevice physicalDevice,
+        const VkQueryPoolPerformanceCreateInfoKHR* pPerformanceQueryCreateInfo,
+        uint32_t* pNumPasses,
+        uint32_t doLock);
+    VkResult vkAcquireProfilingLockKHR(
+    VkDevice device,
+        const VkAcquireProfilingLockInfoKHR* pInfo,
+        uint32_t doLock);
+    void vkReleaseProfilingLockKHR(
+    VkDevice device,
+        uint32_t doLock);
 #endif
 #ifdef VK_KHR_maintenance2
 #endif
@@ -1214,12 +1528,14 @@
     VkResult vkGetPhysicalDeviceSurfaceCapabilities2KHR(
     VkPhysicalDevice physicalDevice,
         const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo,
-        VkSurfaceCapabilities2KHR* pSurfaceCapabilities);
+        VkSurfaceCapabilities2KHR* pSurfaceCapabilities,
+        uint32_t doLock);
     VkResult vkGetPhysicalDeviceSurfaceFormats2KHR(
     VkPhysicalDevice physicalDevice,
         const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo,
         uint32_t* pSurfaceFormatCount,
-        VkSurfaceFormat2KHR* pSurfaceFormats);
+        VkSurfaceFormat2KHR* pSurfaceFormats,
+        uint32_t doLock);
 #endif
 #ifdef VK_KHR_variable_pointers
 #endif
@@ -1227,20 +1543,24 @@
     VkResult vkGetPhysicalDeviceDisplayProperties2KHR(
     VkPhysicalDevice physicalDevice,
         uint32_t* pPropertyCount,
-        VkDisplayProperties2KHR* pProperties);
+        VkDisplayProperties2KHR* pProperties,
+        uint32_t doLock);
     VkResult vkGetPhysicalDeviceDisplayPlaneProperties2KHR(
     VkPhysicalDevice physicalDevice,
         uint32_t* pPropertyCount,
-        VkDisplayPlaneProperties2KHR* pProperties);
+        VkDisplayPlaneProperties2KHR* pProperties,
+        uint32_t doLock);
     VkResult vkGetDisplayModeProperties2KHR(
     VkPhysicalDevice physicalDevice,
         VkDisplayKHR display,
         uint32_t* pPropertyCount,
-        VkDisplayModeProperties2KHR* pProperties);
+        VkDisplayModeProperties2KHR* pProperties,
+        uint32_t doLock);
     VkResult vkGetDisplayPlaneCapabilities2KHR(
     VkPhysicalDevice physicalDevice,
         const VkDisplayPlaneInfo2KHR* pDisplayPlaneInfo,
-        VkDisplayPlaneCapabilities2KHR* pCapabilities);
+        VkDisplayPlaneCapabilities2KHR* pCapabilities,
+        uint32_t doLock);
 #endif
 #ifdef VK_KHR_dedicated_allocation
 #endif
@@ -1252,16 +1572,19 @@
     void vkGetImageMemoryRequirements2KHR(
     VkDevice device,
         const VkImageMemoryRequirementsInfo2* pInfo,
-        VkMemoryRequirements2* pMemoryRequirements);
+        VkMemoryRequirements2* pMemoryRequirements,
+        uint32_t doLock);
     void vkGetBufferMemoryRequirements2KHR(
     VkDevice device,
         const VkBufferMemoryRequirementsInfo2* pInfo,
-        VkMemoryRequirements2* pMemoryRequirements);
+        VkMemoryRequirements2* pMemoryRequirements,
+        uint32_t doLock);
     void vkGetImageSparseMemoryRequirements2KHR(
     VkDevice device,
         const VkImageSparseMemoryRequirementsInfo2* pInfo,
         uint32_t* pSparseMemoryRequirementCount,
-        VkSparseImageMemoryRequirements2* pSparseMemoryRequirements);
+        VkSparseImageMemoryRequirements2* pSparseMemoryRequirements,
+        uint32_t doLock);
 #endif
 #ifdef VK_KHR_image_format_list
 #endif
@@ -1270,27 +1593,34 @@
     VkDevice device,
         const VkSamplerYcbcrConversionCreateInfo* pCreateInfo,
         const VkAllocationCallbacks* pAllocator,
-        VkSamplerYcbcrConversion* pYcbcrConversion);
+        VkSamplerYcbcrConversion* pYcbcrConversion,
+        uint32_t doLock);
     void vkDestroySamplerYcbcrConversionKHR(
     VkDevice device,
         VkSamplerYcbcrConversion ycbcrConversion,
-        const VkAllocationCallbacks* pAllocator);
+        const VkAllocationCallbacks* pAllocator,
+        uint32_t doLock);
 #endif
 #ifdef VK_KHR_bind_memory2
     VkResult vkBindBufferMemory2KHR(
     VkDevice device,
         uint32_t bindInfoCount,
-        const VkBindBufferMemoryInfo* pBindInfos);
+        const VkBindBufferMemoryInfo* pBindInfos,
+        uint32_t doLock);
     VkResult vkBindImageMemory2KHR(
     VkDevice device,
         uint32_t bindInfoCount,
-        const VkBindImageMemoryInfo* pBindInfos);
+        const VkBindImageMemoryInfo* pBindInfos,
+        uint32_t doLock);
+#endif
+#ifdef VK_KHR_portability_subset
 #endif
 #ifdef VK_KHR_maintenance3
     void vkGetDescriptorSetLayoutSupportKHR(
     VkDevice device,
         const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
-        VkDescriptorSetLayoutSupport* pSupport);
+        VkDescriptorSetLayoutSupport* pSupport,
+        uint32_t doLock);
 #endif
 #ifdef VK_KHR_draw_indirect_count
     void vkCmdDrawIndirectCountKHR(
@@ -1300,7 +1630,8 @@
         VkBuffer countBuffer,
         VkDeviceSize countBufferOffset,
         uint32_t maxDrawCount,
-        uint32_t stride);
+        uint32_t stride,
+        uint32_t doLock);
     void vkCmdDrawIndexedIndirectCountKHR(
     VkCommandBuffer commandBuffer,
         VkBuffer buffer,
@@ -1308,39 +1639,187 @@
         VkBuffer countBuffer,
         VkDeviceSize countBufferOffset,
         uint32_t maxDrawCount,
-        uint32_t stride);
+        uint32_t stride,
+        uint32_t doLock);
+#endif
+#ifdef VK_KHR_shader_subgroup_extended_types
 #endif
 #ifdef VK_KHR_8bit_storage
 #endif
+#ifdef VK_KHR_shader_atomic_int64
+#endif
+#ifdef VK_KHR_shader_clock
+#endif
+#ifdef VK_KHR_driver_properties
+#endif
+#ifdef VK_KHR_shader_float_controls
+#endif
+#ifdef VK_KHR_depth_stencil_resolve
+#endif
+#ifdef VK_KHR_swapchain_mutable_format
+#endif
+#ifdef VK_KHR_timeline_semaphore
+    VkResult vkGetSemaphoreCounterValueKHR(
+    VkDevice device,
+        VkSemaphore semaphore,
+        uint64_t* pValue,
+        uint32_t doLock);
+    VkResult vkWaitSemaphoresKHR(
+    VkDevice device,
+        const VkSemaphoreWaitInfo* pWaitInfo,
+        uint64_t timeout,
+        uint32_t doLock);
+    VkResult vkSignalSemaphoreKHR(
+    VkDevice device,
+        const VkSemaphoreSignalInfo* pSignalInfo,
+        uint32_t doLock);
+#endif
+#ifdef VK_KHR_vulkan_memory_model
+#endif
+#ifdef VK_KHR_shader_terminate_invocation
+#endif
+#ifdef VK_KHR_fragment_shading_rate
+    VkResult vkGetPhysicalDeviceFragmentShadingRatesKHR(
+    VkPhysicalDevice physicalDevice,
+        uint32_t* pFragmentShadingRateCount,
+        VkPhysicalDeviceFragmentShadingRateKHR* pFragmentShadingRates,
+        uint32_t doLock);
+    void vkCmdSetFragmentShadingRateKHR(
+    VkCommandBuffer commandBuffer,
+        const VkExtent2D* pFragmentSize,
+        const VkFragmentShadingRateCombinerOpKHR combinerOps[2],
+        uint32_t doLock);
+#endif
+#ifdef VK_KHR_spirv_1_4
+#endif
+#ifdef VK_KHR_surface_protected_capabilities
+#endif
+#ifdef VK_KHR_separate_depth_stencil_layouts
+#endif
+#ifdef VK_KHR_uniform_buffer_standard_layout
+#endif
+#ifdef VK_KHR_buffer_device_address
+    VkDeviceAddress vkGetBufferDeviceAddressKHR(
+    VkDevice device,
+        const VkBufferDeviceAddressInfo* pInfo,
+        uint32_t doLock);
+    uint64_t vkGetBufferOpaqueCaptureAddressKHR(
+    VkDevice device,
+        const VkBufferDeviceAddressInfo* pInfo,
+        uint32_t doLock);
+    uint64_t vkGetDeviceMemoryOpaqueCaptureAddressKHR(
+    VkDevice device,
+        const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo,
+        uint32_t doLock);
+#endif
+#ifdef VK_KHR_deferred_host_operations
+    VkResult vkCreateDeferredOperationKHR(
+    VkDevice device,
+        const VkAllocationCallbacks* pAllocator,
+        VkDeferredOperationKHR* pDeferredOperation,
+        uint32_t doLock);
+    void vkDestroyDeferredOperationKHR(
+    VkDevice device,
+        VkDeferredOperationKHR operation,
+        const VkAllocationCallbacks* pAllocator,
+        uint32_t doLock);
+    uint32_t vkGetDeferredOperationMaxConcurrencyKHR(
+    VkDevice device,
+        VkDeferredOperationKHR operation,
+        uint32_t doLock);
+    VkResult vkGetDeferredOperationResultKHR(
+    VkDevice device,
+        VkDeferredOperationKHR operation,
+        uint32_t doLock);
+    VkResult vkDeferredOperationJoinKHR(
+    VkDevice device,
+        VkDeferredOperationKHR operation,
+        uint32_t doLock);
+#endif
+#ifdef VK_KHR_pipeline_executable_properties
+    VkResult vkGetPipelineExecutablePropertiesKHR(
+    VkDevice device,
+        const VkPipelineInfoKHR* pPipelineInfo,
+        uint32_t* pExecutableCount,
+        VkPipelineExecutablePropertiesKHR* pProperties,
+        uint32_t doLock);
+    VkResult vkGetPipelineExecutableStatisticsKHR(
+    VkDevice device,
+        const VkPipelineExecutableInfoKHR* pExecutableInfo,
+        uint32_t* pStatisticCount,
+        VkPipelineExecutableStatisticKHR* pStatistics,
+        uint32_t doLock);
+    VkResult vkGetPipelineExecutableInternalRepresentationsKHR(
+    VkDevice device,
+        const VkPipelineExecutableInfoKHR* pExecutableInfo,
+        uint32_t* pInternalRepresentationCount,
+        VkPipelineExecutableInternalRepresentationKHR* pInternalRepresentations,
+        uint32_t doLock);
+#endif
+#ifdef VK_KHR_pipeline_library
+#endif
+#ifdef VK_KHR_shader_non_semantic_info
+#endif
+#ifdef VK_KHR_copy_commands2
+    void vkCmdCopyBuffer2KHR(
+    VkCommandBuffer commandBuffer,
+        const VkCopyBufferInfo2KHR* pCopyBufferInfo,
+        uint32_t doLock);
+    void vkCmdCopyImage2KHR(
+    VkCommandBuffer commandBuffer,
+        const VkCopyImageInfo2KHR* pCopyImageInfo,
+        uint32_t doLock);
+    void vkCmdCopyBufferToImage2KHR(
+    VkCommandBuffer commandBuffer,
+        const VkCopyBufferToImageInfo2KHR* pCopyBufferToImageInfo,
+        uint32_t doLock);
+    void vkCmdCopyImageToBuffer2KHR(
+    VkCommandBuffer commandBuffer,
+        const VkCopyImageToBufferInfo2KHR* pCopyImageToBufferInfo,
+        uint32_t doLock);
+    void vkCmdBlitImage2KHR(
+    VkCommandBuffer commandBuffer,
+        const VkBlitImageInfo2KHR* pBlitImageInfo,
+        uint32_t doLock);
+    void vkCmdResolveImage2KHR(
+    VkCommandBuffer commandBuffer,
+        const VkResolveImageInfo2KHR* pResolveImageInfo,
+        uint32_t doLock);
+#endif
 #ifdef VK_ANDROID_native_buffer
     VkResult vkGetSwapchainGrallocUsageANDROID(
     VkDevice device,
         VkFormat format,
         VkImageUsageFlags imageUsage,
-        int* grallocUsage);
+        int* grallocUsage,
+        uint32_t doLock);
     VkResult vkAcquireImageANDROID(
     VkDevice device,
         VkImage image,
         int nativeFenceFd,
         VkSemaphore semaphore,
-        VkFence fence);
+        VkFence fence,
+        uint32_t doLock);
     VkResult vkQueueSignalReleaseImageANDROID(
     VkQueue queue,
         uint32_t waitSemaphoreCount,
         const VkSemaphore* pWaitSemaphores,
         VkImage image,
-        int* pNativeFenceFd);
+        int* pNativeFenceFd,
+        uint32_t doLock);
 #endif
 #ifdef VK_EXT_debug_report
     VkResult vkCreateDebugReportCallbackEXT(
     VkInstance instance,
         const VkDebugReportCallbackCreateInfoEXT* pCreateInfo,
         const VkAllocationCallbacks* pAllocator,
-        VkDebugReportCallbackEXT* pCallback);
+        VkDebugReportCallbackEXT* pCallback,
+        uint32_t doLock);
     void vkDestroyDebugReportCallbackEXT(
     VkInstance instance,
         VkDebugReportCallbackEXT callback,
-        const VkAllocationCallbacks* pAllocator);
+        const VkAllocationCallbacks* pAllocator,
+        uint32_t doLock);
     void vkDebugReportMessageEXT(
     VkInstance instance,
         VkDebugReportFlagsEXT flags,
@@ -1349,7 +1828,8 @@
         size_t location,
         int32_t messageCode,
         const char* pLayerPrefix,
-        const char* pMessage);
+        const char* pMessage,
+        uint32_t doLock);
 #endif
 #ifdef VK_NV_glsl_shader
 #endif
@@ -1366,23 +1846,85 @@
 #ifdef VK_EXT_debug_marker
     VkResult vkDebugMarkerSetObjectTagEXT(
     VkDevice device,
-        const VkDebugMarkerObjectTagInfoEXT* pTagInfo);
+        const VkDebugMarkerObjectTagInfoEXT* pTagInfo,
+        uint32_t doLock);
     VkResult vkDebugMarkerSetObjectNameEXT(
     VkDevice device,
-        const VkDebugMarkerObjectNameInfoEXT* pNameInfo);
+        const VkDebugMarkerObjectNameInfoEXT* pNameInfo,
+        uint32_t doLock);
     void vkCmdDebugMarkerBeginEXT(
     VkCommandBuffer commandBuffer,
-        const VkDebugMarkerMarkerInfoEXT* pMarkerInfo);
+        const VkDebugMarkerMarkerInfoEXT* pMarkerInfo,
+        uint32_t doLock);
     void vkCmdDebugMarkerEndEXT(
-    VkCommandBuffer commandBuffer);
+    VkCommandBuffer commandBuffer,
+        uint32_t doLock);
     void vkCmdDebugMarkerInsertEXT(
     VkCommandBuffer commandBuffer,
-        const VkDebugMarkerMarkerInfoEXT* pMarkerInfo);
+        const VkDebugMarkerMarkerInfoEXT* pMarkerInfo,
+        uint32_t doLock);
 #endif
 #ifdef VK_AMD_gcn_shader
 #endif
 #ifdef VK_NV_dedicated_allocation
 #endif
+#ifdef VK_EXT_transform_feedback
+    void vkCmdBindTransformFeedbackBuffersEXT(
+    VkCommandBuffer commandBuffer,
+        uint32_t firstBinding,
+        uint32_t bindingCount,
+        const VkBuffer* pBuffers,
+        const VkDeviceSize* pOffsets,
+        const VkDeviceSize* pSizes,
+        uint32_t doLock);
+    void vkCmdBeginTransformFeedbackEXT(
+    VkCommandBuffer commandBuffer,
+        uint32_t firstCounterBuffer,
+        uint32_t counterBufferCount,
+        const VkBuffer* pCounterBuffers,
+        const VkDeviceSize* pCounterBufferOffsets,
+        uint32_t doLock);
+    void vkCmdEndTransformFeedbackEXT(
+    VkCommandBuffer commandBuffer,
+        uint32_t firstCounterBuffer,
+        uint32_t counterBufferCount,
+        const VkBuffer* pCounterBuffers,
+        const VkDeviceSize* pCounterBufferOffsets,
+        uint32_t doLock);
+    void vkCmdBeginQueryIndexedEXT(
+    VkCommandBuffer commandBuffer,
+        VkQueryPool queryPool,
+        uint32_t query,
+        VkQueryControlFlags flags,
+        uint32_t index,
+        uint32_t doLock);
+    void vkCmdEndQueryIndexedEXT(
+    VkCommandBuffer commandBuffer,
+        VkQueryPool queryPool,
+        uint32_t query,
+        uint32_t index,
+        uint32_t doLock);
+    void vkCmdDrawIndirectByteCountEXT(
+    VkCommandBuffer commandBuffer,
+        uint32_t instanceCount,
+        uint32_t firstInstance,
+        VkBuffer counterBuffer,
+        VkDeviceSize counterBufferOffset,
+        uint32_t counterOffset,
+        uint32_t vertexStride,
+        uint32_t doLock);
+#endif
+#ifdef VK_NVX_image_view_handle
+    uint32_t vkGetImageViewHandleNVX(
+    VkDevice device,
+        const VkImageViewHandleInfoNVX* pInfo,
+        uint32_t doLock);
+    VkResult vkGetImageViewAddressNVX(
+    VkDevice device,
+        VkImageView imageView,
+        VkImageViewAddressPropertiesNVX* pProperties,
+        uint32_t doLock);
+#endif
 #ifdef VK_AMD_draw_indirect_count
     void vkCmdDrawIndirectCountAMD(
     VkCommandBuffer commandBuffer,
@@ -1391,7 +1933,8 @@
         VkBuffer countBuffer,
         VkDeviceSize countBufferOffset,
         uint32_t maxDrawCount,
-        uint32_t stride);
+        uint32_t stride,
+        uint32_t doLock);
     void vkCmdDrawIndexedIndirectCountAMD(
     VkCommandBuffer commandBuffer,
         VkBuffer buffer,
@@ -1399,7 +1942,8 @@
         VkBuffer countBuffer,
         VkDeviceSize countBufferOffset,
         uint32_t maxDrawCount,
-        uint32_t stride);
+        uint32_t stride,
+        uint32_t doLock);
 #endif
 #ifdef VK_AMD_negative_viewport_height
 #endif
@@ -1416,10 +1960,21 @@
         VkShaderStageFlagBits shaderStage,
         VkShaderInfoTypeAMD infoType,
         size_t* pInfoSize,
-        void* pInfo);
+        void* pInfo,
+        uint32_t doLock);
 #endif
 #ifdef VK_AMD_shader_image_load_store_lod
 #endif
+#ifdef VK_GGP_stream_descriptor_surface
+    VkResult vkCreateStreamDescriptorSurfaceGGP(
+    VkInstance instance,
+        const VkStreamDescriptorSurfaceCreateInfoGGP* pCreateInfo,
+        const VkAllocationCallbacks* pAllocator,
+        VkSurfaceKHR* pSurface,
+        uint32_t doLock);
+#endif
+#ifdef VK_NV_corner_sampled_image
+#endif
 #ifdef VK_IMG_format_pvrtc
 #endif
 #ifdef VK_NV_external_memory_capabilities
@@ -1431,7 +1986,8 @@
         VkImageUsageFlags usage,
         VkImageCreateFlags flags,
         VkExternalMemoryHandleTypeFlagsNV externalHandleType,
-        VkExternalImageFormatPropertiesNV* pExternalImageFormatProperties);
+        VkExternalImageFormatPropertiesNV* pExternalImageFormatProperties,
+        uint32_t doLock);
 #endif
 #ifdef VK_NV_external_memory
 #endif
@@ -1440,7 +1996,8 @@
     VkDevice device,
         VkDeviceMemory memory,
         VkExternalMemoryHandleTypeFlagsNV handleType,
-        HANDLE* pHandle);
+        HANDLE* pHandle,
+        uint32_t doLock);
 #endif
 #ifdef VK_NV_win32_keyed_mutex
 #endif
@@ -1451,122 +2008,98 @@
     VkInstance instance,
         const VkViSurfaceCreateInfoNN* pCreateInfo,
         const VkAllocationCallbacks* pAllocator,
-        VkSurfaceKHR* pSurface);
+        VkSurfaceKHR* pSurface,
+        uint32_t doLock);
 #endif
 #ifdef VK_EXT_shader_subgroup_ballot
 #endif
 #ifdef VK_EXT_shader_subgroup_vote
 #endif
+#ifdef VK_EXT_texture_compression_astc_hdr
+#endif
+#ifdef VK_EXT_astc_decode_mode
+#endif
 #ifdef VK_EXT_conditional_rendering
     void vkCmdBeginConditionalRenderingEXT(
     VkCommandBuffer commandBuffer,
-        const VkConditionalRenderingBeginInfoEXT* pConditionalRenderingBegin);
+        const VkConditionalRenderingBeginInfoEXT* pConditionalRenderingBegin,
+        uint32_t doLock);
     void vkCmdEndConditionalRenderingEXT(
-    VkCommandBuffer commandBuffer);
-#endif
-#ifdef VK_NVX_device_generated_commands
-    void vkCmdProcessCommandsNVX(
     VkCommandBuffer commandBuffer,
-        const VkCmdProcessCommandsInfoNVX* pProcessCommandsInfo);
-    void vkCmdReserveSpaceForCommandsNVX(
-    VkCommandBuffer commandBuffer,
-        const VkCmdReserveSpaceForCommandsInfoNVX* pReserveSpaceInfo);
-    VkResult vkCreateIndirectCommandsLayoutNVX(
-    VkDevice device,
-        const VkIndirectCommandsLayoutCreateInfoNVX* pCreateInfo,
-        const VkAllocationCallbacks* pAllocator,
-        VkIndirectCommandsLayoutNVX* pIndirectCommandsLayout);
-    void vkDestroyIndirectCommandsLayoutNVX(
-    VkDevice device,
-        VkIndirectCommandsLayoutNVX indirectCommandsLayout,
-        const VkAllocationCallbacks* pAllocator);
-    VkResult vkCreateObjectTableNVX(
-    VkDevice device,
-        const VkObjectTableCreateInfoNVX* pCreateInfo,
-        const VkAllocationCallbacks* pAllocator,
-        VkObjectTableNVX* pObjectTable);
-    void vkDestroyObjectTableNVX(
-    VkDevice device,
-        VkObjectTableNVX objectTable,
-        const VkAllocationCallbacks* pAllocator);
-    VkResult vkRegisterObjectsNVX(
-    VkDevice device,
-        VkObjectTableNVX objectTable,
-        uint32_t objectCount,
-        const VkObjectTableEntryNVX* const* ppObjectTableEntries,
-        const uint32_t* pObjectIndices);
-    VkResult vkUnregisterObjectsNVX(
-    VkDevice device,
-        VkObjectTableNVX objectTable,
-        uint32_t objectCount,
-        const VkObjectEntryTypeNVX* pObjectEntryTypes,
-        const uint32_t* pObjectIndices);
-    void vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX(
-    VkPhysicalDevice physicalDevice,
-        VkDeviceGeneratedCommandsFeaturesNVX* pFeatures,
-        VkDeviceGeneratedCommandsLimitsNVX* pLimits);
+        uint32_t doLock);
 #endif
 #ifdef VK_NV_clip_space_w_scaling
     void vkCmdSetViewportWScalingNV(
     VkCommandBuffer commandBuffer,
         uint32_t firstViewport,
         uint32_t viewportCount,
-        const VkViewportWScalingNV* pViewportWScalings);
+        const VkViewportWScalingNV* pViewportWScalings,
+        uint32_t doLock);
 #endif
 #ifdef VK_EXT_direct_mode_display
     VkResult vkReleaseDisplayEXT(
     VkPhysicalDevice physicalDevice,
-        VkDisplayKHR display);
+        VkDisplayKHR display,
+        uint32_t doLock);
 #endif
 #ifdef VK_EXT_acquire_xlib_display
     VkResult vkAcquireXlibDisplayEXT(
     VkPhysicalDevice physicalDevice,
         Display* dpy,
-        VkDisplayKHR display);
+        VkDisplayKHR display,
+        uint32_t doLock);
     VkResult vkGetRandROutputDisplayEXT(
     VkPhysicalDevice physicalDevice,
         Display* dpy,
         RROutput rrOutput,
-        VkDisplayKHR* pDisplay);
+        VkDisplayKHR* pDisplay,
+        uint32_t doLock);
 #endif
 #ifdef VK_EXT_display_surface_counter
     VkResult vkGetPhysicalDeviceSurfaceCapabilities2EXT(
     VkPhysicalDevice physicalDevice,
         VkSurfaceKHR surface,
-        VkSurfaceCapabilities2EXT* pSurfaceCapabilities);
+        VkSurfaceCapabilities2EXT* pSurfaceCapabilities,
+        uint32_t doLock);
 #endif
 #ifdef VK_EXT_display_control
     VkResult vkDisplayPowerControlEXT(
     VkDevice device,
         VkDisplayKHR display,
-        const VkDisplayPowerInfoEXT* pDisplayPowerInfo);
+        const VkDisplayPowerInfoEXT* pDisplayPowerInfo,
+        uint32_t doLock);
     VkResult vkRegisterDeviceEventEXT(
     VkDevice device,
         const VkDeviceEventInfoEXT* pDeviceEventInfo,
         const VkAllocationCallbacks* pAllocator,
-        VkFence* pFence);
+        VkFence* pFence,
+        uint32_t doLock);
     VkResult vkRegisterDisplayEventEXT(
     VkDevice device,
         VkDisplayKHR display,
         const VkDisplayEventInfoEXT* pDisplayEventInfo,
         const VkAllocationCallbacks* pAllocator,
-        VkFence* pFence);
+        VkFence* pFence,
+        uint32_t doLock);
     VkResult vkGetSwapchainCounterEXT(
     VkDevice device,
         VkSwapchainKHR swapchain,
         VkSurfaceCounterFlagBitsEXT counter,
-        uint64_t* pCounterValue);
+        uint64_t* pCounterValue,
+        uint32_t doLock);
 #endif
 #ifdef VK_GOOGLE_display_timing
     VkResult vkGetRefreshCycleDurationGOOGLE(
     VkDevice device,
         VkSwapchainKHR swapchain,
-        VkRefreshCycleDurationGOOGLE* pDisplayTimingProperties);
+        VkRefreshCycleDurationGOOGLE* pDisplayTimingProperties,
+        uint32_t doLock);
     VkResult vkGetPastPresentationTimingGOOGLE(
     VkDevice device,
         VkSwapchainKHR swapchain,
         uint32_t* pPresentationTimingCount,
-        VkPastPresentationTimingGOOGLE* pPresentationTimings);
+        VkPastPresentationTimingGOOGLE* pPresentationTimings,
+        uint32_t doLock);
 #endif
 #ifdef VK_NV_sample_mask_override_coverage
 #endif
@@ -1583,10 +2116,13 @@
     VkCommandBuffer commandBuffer,
         uint32_t firstDiscardRectangle,
         uint32_t discardRectangleCount,
-        const VkRect2D* pDiscardRectangles);
+        const VkRect2D* pDiscardRectangles,
+        uint32_t doLock);
 #endif
 #ifdef VK_EXT_conservative_rasterization
 #endif
+#ifdef VK_EXT_depth_clip_enable
+#endif
 #ifdef VK_EXT_swapchain_colorspace
 #endif
 #ifdef VK_EXT_hdr_metadata
@@ -1594,21 +2130,50 @@
     VkDevice device,
         uint32_t swapchainCount,
         const VkSwapchainKHR* pSwapchains,
-        const VkHdrMetadataEXT* pMetadata);
+        const VkHdrMetadataEXT* pMetadata,
+        uint32_t doLock);
 #endif
 #ifdef VK_MVK_ios_surface
     VkResult vkCreateIOSSurfaceMVK(
     VkInstance instance,
         const VkIOSSurfaceCreateInfoMVK* pCreateInfo,
         const VkAllocationCallbacks* pAllocator,
-        VkSurfaceKHR* pSurface);
+        VkSurfaceKHR* pSurface,
+        uint32_t doLock);
 #endif
 #ifdef VK_MVK_macos_surface
     VkResult vkCreateMacOSSurfaceMVK(
     VkInstance instance,
         const VkMacOSSurfaceCreateInfoMVK* pCreateInfo,
         const VkAllocationCallbacks* pAllocator,
-        VkSurfaceKHR* pSurface);
+        VkSurfaceKHR* pSurface,
+        uint32_t doLock);
+#endif
+#ifdef VK_MVK_moltenvk
+    void vkGetMTLDeviceMVK(
+    VkPhysicalDevice physicalDevice,
+        void** pMTLDevice,
+        uint32_t doLock);
+    VkResult vkSetMTLTextureMVK(
+    VkImage image,
+        void* mtlTexture,
+        uint32_t doLock);
+    void vkGetMTLTextureMVK(
+    VkImage image,
+        void** pMTLTexture,
+        uint32_t doLock);
+    void vkGetMTLBufferMVK(
+    VkBuffer buffer,
+        void** pMTLBuffer,
+        uint32_t doLock);
+    VkResult vkUseIOSurfaceMVK(
+    VkImage image,
+        void* ioSurface,
+        uint32_t doLock);
+    void vkGetIOSurfaceMVK(
+    VkImage image,
+        void** pIOSurface,
+        uint32_t doLock);
 #endif
 #ifdef VK_EXT_external_memory_dma_buf
 #endif
@@ -1617,50 +2182,63 @@
 #ifdef VK_EXT_debug_utils
     VkResult vkSetDebugUtilsObjectNameEXT(
     VkDevice device,
-        const VkDebugUtilsObjectNameInfoEXT* pNameInfo);
+        const VkDebugUtilsObjectNameInfoEXT* pNameInfo,
+        uint32_t doLock);
     VkResult vkSetDebugUtilsObjectTagEXT(
     VkDevice device,
-        const VkDebugUtilsObjectTagInfoEXT* pTagInfo);
+        const VkDebugUtilsObjectTagInfoEXT* pTagInfo,
+        uint32_t doLock);
     void vkQueueBeginDebugUtilsLabelEXT(
     VkQueue queue,
-        const VkDebugUtilsLabelEXT* pLabelInfo);
+        const VkDebugUtilsLabelEXT* pLabelInfo,
+        uint32_t doLock);
     void vkQueueEndDebugUtilsLabelEXT(
-    VkQueue queue);
+    VkQueue queue,
+        uint32_t doLock);
     void vkQueueInsertDebugUtilsLabelEXT(
     VkQueue queue,
-        const VkDebugUtilsLabelEXT* pLabelInfo);
+        const VkDebugUtilsLabelEXT* pLabelInfo,
+        uint32_t doLock);
     void vkCmdBeginDebugUtilsLabelEXT(
     VkCommandBuffer commandBuffer,
-        const VkDebugUtilsLabelEXT* pLabelInfo);
+        const VkDebugUtilsLabelEXT* pLabelInfo,
+        uint32_t doLock);
     void vkCmdEndDebugUtilsLabelEXT(
-    VkCommandBuffer commandBuffer);
+    VkCommandBuffer commandBuffer,
+        uint32_t doLock);
     void vkCmdInsertDebugUtilsLabelEXT(
     VkCommandBuffer commandBuffer,
-        const VkDebugUtilsLabelEXT* pLabelInfo);
+        const VkDebugUtilsLabelEXT* pLabelInfo,
+        uint32_t doLock);
     VkResult vkCreateDebugUtilsMessengerEXT(
     VkInstance instance,
         const VkDebugUtilsMessengerCreateInfoEXT* pCreateInfo,
         const VkAllocationCallbacks* pAllocator,
-        VkDebugUtilsMessengerEXT* pMessenger);
+        VkDebugUtilsMessengerEXT* pMessenger,
+        uint32_t doLock);
     void vkDestroyDebugUtilsMessengerEXT(
     VkInstance instance,
         VkDebugUtilsMessengerEXT messenger,
-        const VkAllocationCallbacks* pAllocator);
+        const VkAllocationCallbacks* pAllocator,
+        uint32_t doLock);
     void vkSubmitDebugUtilsMessageEXT(
     VkInstance instance,
         VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
         VkDebugUtilsMessageTypeFlagsEXT messageTypes,
-        const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData);
+        const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData,
+        uint32_t doLock);
 #endif
 #ifdef VK_ANDROID_external_memory_android_hardware_buffer
     VkResult vkGetAndroidHardwareBufferPropertiesANDROID(
     VkDevice device,
         const AHardwareBuffer* buffer,
-        VkAndroidHardwareBufferPropertiesANDROID* pProperties);
+        VkAndroidHardwareBufferPropertiesANDROID* pProperties,
+        uint32_t doLock);
     VkResult vkGetMemoryAndroidHardwareBufferANDROID(
     VkDevice device,
         const VkMemoryGetAndroidHardwareBufferInfoANDROID* pInfo,
-        AHardwareBuffer** pBuffer);
+        AHardwareBuffer** pBuffer,
+        uint32_t doLock);
 #endif
 #ifdef VK_EXT_sampler_filter_minmax
 #endif
@@ -1670,16 +2248,20 @@
 #endif
 #ifdef VK_AMD_shader_fragment_mask
 #endif
+#ifdef VK_EXT_inline_uniform_block
+#endif
 #ifdef VK_EXT_shader_stencil_export
 #endif
 #ifdef VK_EXT_sample_locations
     void vkCmdSetSampleLocationsEXT(
     VkCommandBuffer commandBuffer,
-        const VkSampleLocationsInfoEXT* pSampleLocationsInfo);
+        const VkSampleLocationsInfoEXT* pSampleLocationsInfo,
+        uint32_t doLock);
     void vkGetPhysicalDeviceMultisamplePropertiesEXT(
     VkPhysicalDevice physicalDevice,
         VkSampleCountFlagBits samples,
-        VkMultisamplePropertiesEXT* pMultisampleProperties);
+        VkMultisamplePropertiesEXT* pMultisampleProperties,
+        uint32_t doLock);
 #endif
 #ifdef VK_EXT_blend_operation_advanced
 #endif
@@ -1689,33 +2271,171 @@
 #endif
 #ifdef VK_NV_fill_rectangle
 #endif
+#ifdef VK_NV_shader_sm_builtins
+#endif
 #ifdef VK_EXT_post_depth_coverage
 #endif
+#ifdef VK_EXT_image_drm_format_modifier
+    VkResult vkGetImageDrmFormatModifierPropertiesEXT(
+    VkDevice device,
+        VkImage image,
+        VkImageDrmFormatModifierPropertiesEXT* pProperties,
+        uint32_t doLock);
+#endif
 #ifdef VK_EXT_validation_cache
     VkResult vkCreateValidationCacheEXT(
     VkDevice device,
         const VkValidationCacheCreateInfoEXT* pCreateInfo,
         const VkAllocationCallbacks* pAllocator,
-        VkValidationCacheEXT* pValidationCache);
+        VkValidationCacheEXT* pValidationCache,
+        uint32_t doLock);
     void vkDestroyValidationCacheEXT(
     VkDevice device,
         VkValidationCacheEXT validationCache,
-        const VkAllocationCallbacks* pAllocator);
+        const VkAllocationCallbacks* pAllocator,
+        uint32_t doLock);
     VkResult vkMergeValidationCachesEXT(
     VkDevice device,
         VkValidationCacheEXT dstCache,
         uint32_t srcCacheCount,
-        const VkValidationCacheEXT* pSrcCaches);
+        const VkValidationCacheEXT* pSrcCaches,
+        uint32_t doLock);
     VkResult vkGetValidationCacheDataEXT(
     VkDevice device,
         VkValidationCacheEXT validationCache,
         size_t* pDataSize,
-        void* pData);
+        void* pData,
+        uint32_t doLock);
 #endif
 #ifdef VK_EXT_descriptor_indexing
 #endif
 #ifdef VK_EXT_shader_viewport_index_layer
 #endif
+#ifdef VK_NV_shading_rate_image
+    void vkCmdBindShadingRateImageNV(
+    VkCommandBuffer commandBuffer,
+        VkImageView imageView,
+        VkImageLayout imageLayout,
+        uint32_t doLock);
+    void vkCmdSetViewportShadingRatePaletteNV(
+    VkCommandBuffer commandBuffer,
+        uint32_t firstViewport,
+        uint32_t viewportCount,
+        const VkShadingRatePaletteNV* pShadingRatePalettes,
+        uint32_t doLock);
+    void vkCmdSetCoarseSampleOrderNV(
+    VkCommandBuffer commandBuffer,
+        VkCoarseSampleOrderTypeNV sampleOrderType,
+        uint32_t customSampleOrderCount,
+        const VkCoarseSampleOrderCustomNV* pCustomSampleOrders,
+        uint32_t doLock);
+#endif
+#ifdef VK_NV_ray_tracing
+    VkResult vkCreateAccelerationStructureNV(
+    VkDevice device,
+        const VkAccelerationStructureCreateInfoNV* pCreateInfo,
+        const VkAllocationCallbacks* pAllocator,
+        VkAccelerationStructureNV* pAccelerationStructure,
+        uint32_t doLock);
+    void vkDestroyAccelerationStructureNV(
+    VkDevice device,
+        VkAccelerationStructureNV accelerationStructure,
+        const VkAllocationCallbacks* pAllocator,
+        uint32_t doLock);
+    void vkGetAccelerationStructureMemoryRequirementsNV(
+    VkDevice device,
+        const VkAccelerationStructureMemoryRequirementsInfoNV* pInfo,
+        VkMemoryRequirements2KHR* pMemoryRequirements,
+        uint32_t doLock);
+    VkResult vkBindAccelerationStructureMemoryNV(
+    VkDevice device,
+        uint32_t bindInfoCount,
+        const VkBindAccelerationStructureMemoryInfoNV* pBindInfos,
+        uint32_t doLock);
+    void vkCmdBuildAccelerationStructureNV(
+    VkCommandBuffer commandBuffer,
+        const VkAccelerationStructureInfoNV* pInfo,
+        VkBuffer instanceData,
+        VkDeviceSize instanceOffset,
+        VkBool32 update,
+        VkAccelerationStructureNV dst,
+        VkAccelerationStructureNV src,
+        VkBuffer scratch,
+        VkDeviceSize scratchOffset,
+        uint32_t doLock);
+    void vkCmdCopyAccelerationStructureNV(
+    VkCommandBuffer commandBuffer,
+        VkAccelerationStructureNV dst,
+        VkAccelerationStructureNV src,
+        VkCopyAccelerationStructureModeKHR mode,
+        uint32_t doLock);
+    void vkCmdTraceRaysNV(
+    VkCommandBuffer commandBuffer,
+        VkBuffer raygenShaderBindingTableBuffer,
+        VkDeviceSize raygenShaderBindingOffset,
+        VkBuffer missShaderBindingTableBuffer,
+        VkDeviceSize missShaderBindingOffset,
+        VkDeviceSize missShaderBindingStride,
+        VkBuffer hitShaderBindingTableBuffer,
+        VkDeviceSize hitShaderBindingOffset,
+        VkDeviceSize hitShaderBindingStride,
+        VkBuffer callableShaderBindingTableBuffer,
+        VkDeviceSize callableShaderBindingOffset,
+        VkDeviceSize callableShaderBindingStride,
+        uint32_t width,
+        uint32_t height,
+        uint32_t depth,
+        uint32_t doLock);
+    VkResult vkCreateRayTracingPipelinesNV(
+    VkDevice device,
+        VkPipelineCache pipelineCache,
+        uint32_t createInfoCount,
+        const VkRayTracingPipelineCreateInfoNV* pCreateInfos,
+        const VkAllocationCallbacks* pAllocator,
+        VkPipeline* pPipelines,
+        uint32_t doLock);
+    VkResult vkGetRayTracingShaderGroupHandlesKHR(
+    VkDevice device,
+        VkPipeline pipeline,
+        uint32_t firstGroup,
+        uint32_t groupCount,
+        size_t dataSize,
+        void* pData,
+        uint32_t doLock);
+    VkResult vkGetRayTracingShaderGroupHandlesNV(
+    VkDevice device,
+        VkPipeline pipeline,
+        uint32_t firstGroup,
+        uint32_t groupCount,
+        size_t dataSize,
+        void* pData,
+        uint32_t doLock);
+    VkResult vkGetAccelerationStructureHandleNV(
+    VkDevice device,
+        VkAccelerationStructureNV accelerationStructure,
+        size_t dataSize,
+        void* pData,
+        uint32_t doLock);
+    void vkCmdWriteAccelerationStructuresPropertiesNV(
+    VkCommandBuffer commandBuffer,
+        uint32_t accelerationStructureCount,
+        const VkAccelerationStructureNV* pAccelerationStructures,
+        VkQueryType queryType,
+        VkQueryPool queryPool,
+        uint32_t firstQuery,
+        uint32_t doLock);
+    VkResult vkCompileDeferredNV(
+    VkDevice device,
+        VkPipeline pipeline,
+        uint32_t shader,
+        uint32_t doLock);
+#endif
+#ifdef VK_NV_representative_fragment_test
+#endif
+#ifdef VK_EXT_filter_cubic
+#endif
+#ifdef VK_QCOM_render_pass_shader_resolve
+#endif
 #ifdef VK_EXT_global_priority
 #endif
 #ifdef VK_EXT_external_memory_host
@@ -1723,7 +2443,8 @@
     VkDevice device,
         VkExternalMemoryHandleTypeFlagBits handleType,
         const void* pHostPointer,
-        VkMemoryHostPointerPropertiesEXT* pMemoryHostPointerProperties);
+        VkMemoryHostPointerPropertiesEXT* pMemoryHostPointerProperties,
+        uint32_t doLock);
 #endif
 #ifdef VK_AMD_buffer_marker
     void vkCmdWriteBufferMarkerAMD(
@@ -1731,40 +2452,441 @@
         VkPipelineStageFlagBits pipelineStage,
         VkBuffer dstBuffer,
         VkDeviceSize dstOffset,
-        uint32_t marker);
+        uint32_t marker,
+        uint32_t doLock);
+#endif
+#ifdef VK_AMD_pipeline_compiler_control
+#endif
+#ifdef VK_EXT_calibrated_timestamps
+    VkResult vkGetPhysicalDeviceCalibrateableTimeDomainsEXT(
+    VkPhysicalDevice physicalDevice,
+        uint32_t* pTimeDomainCount,
+        VkTimeDomainEXT* pTimeDomains,
+        uint32_t doLock);
+    VkResult vkGetCalibratedTimestampsEXT(
+    VkDevice device,
+        uint32_t timestampCount,
+        const VkCalibratedTimestampInfoEXT* pTimestampInfos,
+        uint64_t* pTimestamps,
+        uint64_t* pMaxDeviation,
+        uint32_t doLock);
 #endif
 #ifdef VK_AMD_shader_core_properties
 #endif
+#ifdef VK_AMD_memory_overallocation_behavior
+#endif
 #ifdef VK_EXT_vertex_attribute_divisor
 #endif
+#ifdef VK_GGP_frame_token
+#endif
+#ifdef VK_EXT_pipeline_creation_feedback
+#endif
 #ifdef VK_NV_shader_subgroup_partitioned
 #endif
+#ifdef VK_NV_compute_shader_derivatives
+#endif
+#ifdef VK_NV_mesh_shader
+    void vkCmdDrawMeshTasksNV(
+    VkCommandBuffer commandBuffer,
+        uint32_t taskCount,
+        uint32_t firstTask,
+        uint32_t doLock);
+    void vkCmdDrawMeshTasksIndirectNV(
+    VkCommandBuffer commandBuffer,
+        VkBuffer buffer,
+        VkDeviceSize offset,
+        uint32_t drawCount,
+        uint32_t stride,
+        uint32_t doLock);
+    void vkCmdDrawMeshTasksIndirectCountNV(
+    VkCommandBuffer commandBuffer,
+        VkBuffer buffer,
+        VkDeviceSize offset,
+        VkBuffer countBuffer,
+        VkDeviceSize countBufferOffset,
+        uint32_t maxDrawCount,
+        uint32_t stride,
+        uint32_t doLock);
+#endif
+#ifdef VK_NV_fragment_shader_barycentric
+#endif
+#ifdef VK_NV_shader_image_footprint
+#endif
+#ifdef VK_NV_scissor_exclusive
+    void vkCmdSetExclusiveScissorNV(
+    VkCommandBuffer commandBuffer,
+        uint32_t firstExclusiveScissor,
+        uint32_t exclusiveScissorCount,
+        const VkRect2D* pExclusiveScissors,
+        uint32_t doLock);
+#endif
 #ifdef VK_NV_device_diagnostic_checkpoints
     void vkCmdSetCheckpointNV(
     VkCommandBuffer commandBuffer,
-        const void* pCheckpointMarker);
+        const void* pCheckpointMarker,
+        uint32_t doLock);
     void vkGetQueueCheckpointDataNV(
     VkQueue queue,
         uint32_t* pCheckpointDataCount,
-        VkCheckpointDataNV* pCheckpointData);
+        VkCheckpointDataNV* pCheckpointData,
+        uint32_t doLock);
 #endif
-#ifdef VK_GOOGLE_address_space
-    VkResult vkMapMemoryIntoAddressSpaceGOOGLE(
+#ifdef VK_INTEL_shader_integer_functions2
+#endif
+#ifdef VK_INTEL_performance_query
+    VkResult vkInitializePerformanceApiINTEL(
     VkDevice device,
-        VkDeviceMemory memory,
-        uint64_t* pAddress);
+        const VkInitializePerformanceApiInfoINTEL* pInitializeInfo,
+        uint32_t doLock);
+    void vkUninitializePerformanceApiINTEL(
+    VkDevice device,
+        uint32_t doLock);
+    VkResult vkCmdSetPerformanceMarkerINTEL(
+    VkCommandBuffer commandBuffer,
+        const VkPerformanceMarkerInfoINTEL* pMarkerInfo,
+        uint32_t doLock);
+    VkResult vkCmdSetPerformanceStreamMarkerINTEL(
+    VkCommandBuffer commandBuffer,
+        const VkPerformanceStreamMarkerInfoINTEL* pMarkerInfo,
+        uint32_t doLock);
+    VkResult vkCmdSetPerformanceOverrideINTEL(
+    VkCommandBuffer commandBuffer,
+        const VkPerformanceOverrideInfoINTEL* pOverrideInfo,
+        uint32_t doLock);
+    VkResult vkAcquirePerformanceConfigurationINTEL(
+    VkDevice device,
+        const VkPerformanceConfigurationAcquireInfoINTEL* pAcquireInfo,
+        VkPerformanceConfigurationINTEL* pConfiguration,
+        uint32_t doLock);
+    VkResult vkReleasePerformanceConfigurationINTEL(
+    VkDevice device,
+        VkPerformanceConfigurationINTEL configuration,
+        uint32_t doLock);
+    VkResult vkQueueSetPerformanceConfigurationINTEL(
+    VkQueue queue,
+        VkPerformanceConfigurationINTEL configuration,
+        uint32_t doLock);
+    VkResult vkGetPerformanceParameterINTEL(
+    VkDevice device,
+        VkPerformanceParameterTypeINTEL parameter,
+        VkPerformanceValueINTEL* pValue,
+        uint32_t doLock);
 #endif
-#ifdef VK_GOOGLE_color_buffer
+#ifdef VK_EXT_pci_bus_info
+#endif
+#ifdef VK_AMD_display_native_hdr
+    void vkSetLocalDimmingAMD(
+    VkDevice device,
+        VkSwapchainKHR swapChain,
+        VkBool32 localDimmingEnable,
+        uint32_t doLock);
+#endif
+#ifdef VK_FUCHSIA_imagepipe_surface
+    VkResult vkCreateImagePipeSurfaceFUCHSIA(
+    VkInstance instance,
+        const VkImagePipeSurfaceCreateInfoFUCHSIA* pCreateInfo,
+        const VkAllocationCallbacks* pAllocator,
+        VkSurfaceKHR* pSurface,
+        uint32_t doLock);
+#endif
+#ifdef VK_EXT_metal_surface
+    VkResult vkCreateMetalSurfaceEXT(
+    VkInstance instance,
+        const VkMetalSurfaceCreateInfoEXT* pCreateInfo,
+        const VkAllocationCallbacks* pAllocator,
+        VkSurfaceKHR* pSurface,
+        uint32_t doLock);
+#endif
+#ifdef VK_EXT_fragment_density_map
+#endif
+#ifdef VK_EXT_scalar_block_layout
+#endif
+#ifdef VK_GOOGLE_hlsl_functionality1
+#endif
+#ifdef VK_GOOGLE_decorate_string
+#endif
+#ifdef VK_EXT_subgroup_size_control
+#endif
+#ifdef VK_AMD_shader_core_properties2
+#endif
+#ifdef VK_AMD_device_coherent_memory
+#endif
+#ifdef VK_EXT_shader_image_atomic_int64
+#endif
+#ifdef VK_EXT_memory_budget
+#endif
+#ifdef VK_EXT_memory_priority
+#endif
+#ifdef VK_NV_dedicated_allocation_image_aliasing
+#endif
+#ifdef VK_EXT_buffer_device_address
+    VkDeviceAddress vkGetBufferDeviceAddressEXT(
+    VkDevice device,
+        const VkBufferDeviceAddressInfo* pInfo,
+        uint32_t doLock);
+#endif
+#ifdef VK_EXT_tooling_info
+    VkResult vkGetPhysicalDeviceToolPropertiesEXT(
+    VkPhysicalDevice physicalDevice,
+        uint32_t* pToolCount,
+        VkPhysicalDeviceToolPropertiesEXT* pToolProperties,
+        uint32_t doLock);
+#endif
+#ifdef VK_EXT_separate_stencil_usage
+#endif
+#ifdef VK_EXT_validation_features
+#endif
+#ifdef VK_NV_cooperative_matrix
+    VkResult vkGetPhysicalDeviceCooperativeMatrixPropertiesNV(
+    VkPhysicalDevice physicalDevice,
+        uint32_t* pPropertyCount,
+        VkCooperativeMatrixPropertiesNV* pProperties,
+        uint32_t doLock);
+#endif
+#ifdef VK_NV_coverage_reduction_mode
+    VkResult vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV(
+    VkPhysicalDevice physicalDevice,
+        uint32_t* pCombinationCount,
+        VkFramebufferMixedSamplesCombinationNV* pCombinations,
+        uint32_t doLock);
+#endif
+#ifdef VK_EXT_fragment_shader_interlock
+#endif
+#ifdef VK_EXT_ycbcr_image_arrays
+#endif
+#ifdef VK_EXT_full_screen_exclusive
+    VkResult vkGetPhysicalDeviceSurfacePresentModes2EXT(
+    VkPhysicalDevice physicalDevice,
+        const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo,
+        uint32_t* pPresentModeCount,
+        VkPresentModeKHR* pPresentModes,
+        uint32_t doLock);
+    VkResult vkAcquireFullScreenExclusiveModeEXT(
+    VkDevice device,
+        VkSwapchainKHR swapchain,
+        uint32_t doLock);
+    VkResult vkReleaseFullScreenExclusiveModeEXT(
+    VkDevice device,
+        VkSwapchainKHR swapchain,
+        uint32_t doLock);
+    VkResult vkGetDeviceGroupSurfacePresentModes2EXT(
+    VkDevice device,
+        const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo,
+        VkDeviceGroupPresentModeFlagsKHR* pModes,
+        uint32_t doLock);
+#endif
+#ifdef VK_EXT_headless_surface
+    VkResult vkCreateHeadlessSurfaceEXT(
+    VkInstance instance,
+        const VkHeadlessSurfaceCreateInfoEXT* pCreateInfo,
+        const VkAllocationCallbacks* pAllocator,
+        VkSurfaceKHR* pSurface,
+        uint32_t doLock);
+#endif
+#ifdef VK_EXT_line_rasterization
+    void vkCmdSetLineStippleEXT(
+    VkCommandBuffer commandBuffer,
+        uint32_t lineStippleFactor,
+        uint16_t lineStipplePattern,
+        uint32_t doLock);
+#endif
+#ifdef VK_EXT_shader_atomic_float
+#endif
+#ifdef VK_EXT_host_query_reset
+    void vkResetQueryPoolEXT(
+    VkDevice device,
+        VkQueryPool queryPool,
+        uint32_t firstQuery,
+        uint32_t queryCount,
+        uint32_t doLock);
+#endif
+#ifdef VK_EXT_index_type_uint8
+#endif
+#ifdef VK_EXT_extended_dynamic_state
+    void vkCmdSetCullModeEXT(
+    VkCommandBuffer commandBuffer,
+        VkCullModeFlags cullMode,
+        uint32_t doLock);
+    void vkCmdSetFrontFaceEXT(
+    VkCommandBuffer commandBuffer,
+        VkFrontFace frontFace,
+        uint32_t doLock);
+    void vkCmdSetPrimitiveTopologyEXT(
+    VkCommandBuffer commandBuffer,
+        VkPrimitiveTopology primitiveTopology,
+        uint32_t doLock);
+    void vkCmdSetViewportWithCountEXT(
+    VkCommandBuffer commandBuffer,
+        uint32_t viewportCount,
+        const VkViewport* pViewports,
+        uint32_t doLock);
+    void vkCmdSetScissorWithCountEXT(
+    VkCommandBuffer commandBuffer,
+        uint32_t scissorCount,
+        const VkRect2D* pScissors,
+        uint32_t doLock);
+    void vkCmdBindVertexBuffers2EXT(
+    VkCommandBuffer commandBuffer,
+        uint32_t firstBinding,
+        uint32_t bindingCount,
+        const VkBuffer* pBuffers,
+        const VkDeviceSize* pOffsets,
+        const VkDeviceSize* pSizes,
+        const VkDeviceSize* pStrides,
+        uint32_t doLock);
+    void vkCmdSetDepthTestEnableEXT(
+    VkCommandBuffer commandBuffer,
+        VkBool32 depthTestEnable,
+        uint32_t doLock);
+    void vkCmdSetDepthWriteEnableEXT(
+    VkCommandBuffer commandBuffer,
+        VkBool32 depthWriteEnable,
+        uint32_t doLock);
+    void vkCmdSetDepthCompareOpEXT(
+    VkCommandBuffer commandBuffer,
+        VkCompareOp depthCompareOp,
+        uint32_t doLock);
+    void vkCmdSetDepthBoundsTestEnableEXT(
+    VkCommandBuffer commandBuffer,
+        VkBool32 depthBoundsTestEnable,
+        uint32_t doLock);
+    void vkCmdSetStencilTestEnableEXT(
+    VkCommandBuffer commandBuffer,
+        VkBool32 stencilTestEnable,
+        uint32_t doLock);
+    void vkCmdSetStencilOpEXT(
+    VkCommandBuffer commandBuffer,
+        VkStencilFaceFlags faceMask,
+        VkStencilOp failOp,
+        VkStencilOp passOp,
+        VkStencilOp depthFailOp,
+        VkCompareOp compareOp,
+        uint32_t doLock);
+#endif
+#ifdef VK_EXT_shader_demote_to_helper_invocation
+#endif
+#ifdef VK_NV_device_generated_commands
+    void vkGetGeneratedCommandsMemoryRequirementsNV(
+    VkDevice device,
+        const VkGeneratedCommandsMemoryRequirementsInfoNV* pInfo,
+        VkMemoryRequirements2* pMemoryRequirements,
+        uint32_t doLock);
+    void vkCmdPreprocessGeneratedCommandsNV(
+    VkCommandBuffer commandBuffer,
+        const VkGeneratedCommandsInfoNV* pGeneratedCommandsInfo,
+        uint32_t doLock);
+    void vkCmdExecuteGeneratedCommandsNV(
+    VkCommandBuffer commandBuffer,
+        VkBool32 isPreprocessed,
+        const VkGeneratedCommandsInfoNV* pGeneratedCommandsInfo,
+        uint32_t doLock);
+    void vkCmdBindPipelineShaderGroupNV(
+    VkCommandBuffer commandBuffer,
+        VkPipelineBindPoint pipelineBindPoint,
+        VkPipeline pipeline,
+        uint32_t groupIndex,
+        uint32_t doLock);
+    VkResult vkCreateIndirectCommandsLayoutNV(
+    VkDevice device,
+        const VkIndirectCommandsLayoutCreateInfoNV* pCreateInfo,
+        const VkAllocationCallbacks* pAllocator,
+        VkIndirectCommandsLayoutNV* pIndirectCommandsLayout,
+        uint32_t doLock);
+    void vkDestroyIndirectCommandsLayoutNV(
+    VkDevice device,
+        VkIndirectCommandsLayoutNV indirectCommandsLayout,
+        const VkAllocationCallbacks* pAllocator,
+        uint32_t doLock);
+#endif
+#ifdef VK_EXT_texel_buffer_alignment
+#endif
+#ifdef VK_QCOM_render_pass_transform
+#endif
+#ifdef VK_EXT_device_memory_report
+#endif
+#ifdef VK_EXT_robustness2
+#endif
+#ifdef VK_EXT_custom_border_color
+#endif
+#ifdef VK_GOOGLE_user_type
+#endif
+#ifdef VK_EXT_private_data
+    VkResult vkCreatePrivateDataSlotEXT(
+    VkDevice device,
+        const VkPrivateDataSlotCreateInfoEXT* pCreateInfo,
+        const VkAllocationCallbacks* pAllocator,
+        VkPrivateDataSlotEXT* pPrivateDataSlot,
+        uint32_t doLock);
+    void vkDestroyPrivateDataSlotEXT(
+    VkDevice device,
+        VkPrivateDataSlotEXT privateDataSlot,
+        const VkAllocationCallbacks* pAllocator,
+        uint32_t doLock);
+    VkResult vkSetPrivateDataEXT(
+    VkDevice device,
+        VkObjectType objectType,
+        uint64_t objectHandle,
+        VkPrivateDataSlotEXT privateDataSlot,
+        uint64_t data,
+        uint32_t doLock);
+    void vkGetPrivateDataEXT(
+    VkDevice device,
+        VkObjectType objectType,
+        uint64_t objectHandle,
+        VkPrivateDataSlotEXT privateDataSlot,
+        uint64_t* pData,
+        uint32_t doLock);
+#endif
+#ifdef VK_EXT_pipeline_creation_cache_control
+#endif
+#ifdef VK_NV_device_diagnostics_config
+#endif
+#ifdef VK_QCOM_render_pass_store_ops
+#endif
+#ifdef VK_NV_fragment_shading_rate_enums
+    void vkCmdSetFragmentShadingRateEnumNV(
+    VkCommandBuffer commandBuffer,
+        VkFragmentShadingRateNV shadingRate,
+        const VkFragmentShadingRateCombinerOpKHR combinerOps[2],
+        uint32_t doLock);
+#endif
+#ifdef VK_EXT_fragment_density_map2
+#endif
+#ifdef VK_QCOM_rotated_copy_commands
+#endif
+#ifdef VK_EXT_image_robustness
+#endif
+#ifdef VK_EXT_4444_formats
+#endif
+#ifdef VK_EXT_directfb_surface
+    VkResult vkCreateDirectFBSurfaceEXT(
+    VkInstance instance,
+        const VkDirectFBSurfaceCreateInfoEXT* pCreateInfo,
+        const VkAllocationCallbacks* pAllocator,
+        VkSurfaceKHR* pSurface,
+        uint32_t doLock);
+    VkBool32 vkGetPhysicalDeviceDirectFBPresentationSupportEXT(
+    VkPhysicalDevice physicalDevice,
+        uint32_t queueFamilyIndex,
+        IDirectFB* dfb,
+        uint32_t doLock);
+#endif
+#ifdef VK_GOOGLE_gfxstream
     VkResult vkRegisterImageColorBufferGOOGLE(
     VkDevice device,
         VkImage image,
-        uint32_t colorBuffer);
+        uint32_t colorBuffer,
+        uint32_t doLock);
     VkResult vkRegisterBufferColorBufferGOOGLE(
     VkDevice device,
         VkBuffer buffer,
-        uint32_t colorBuffer);
-#endif
-#ifdef VK_GOOGLE_sized_descriptor_update_template
+        uint32_t colorBuffer,
+        uint32_t doLock);
+    VkResult vkMapMemoryIntoAddressSpaceGOOGLE(
+    VkDevice device,
+        VkDeviceMemory memory,
+        uint64_t* pAddress,
+        uint32_t doLock);
     void vkUpdateDescriptorSetWithTemplateSizedGOOGLE(
     VkDevice device,
         VkDescriptorSet descriptorSet,
@@ -1777,49 +2899,244 @@
         const uint32_t* pBufferViewEntryIndices,
         const VkDescriptorImageInfo* pImageInfos,
         const VkDescriptorBufferInfo* pBufferInfos,
-        const VkBufferView* pBufferViews);
-#endif
-#ifdef VK_GOOGLE_async_command_buffers
+        const VkBufferView* pBufferViews,
+        uint32_t doLock);
     void vkBeginCommandBufferAsyncGOOGLE(
     VkCommandBuffer commandBuffer,
-        const VkCommandBufferBeginInfo* pBeginInfo);
+        const VkCommandBufferBeginInfo* pBeginInfo,
+        uint32_t doLock);
     void vkEndCommandBufferAsyncGOOGLE(
-    VkCommandBuffer commandBuffer);
+    VkCommandBuffer commandBuffer,
+        uint32_t doLock);
     void vkResetCommandBufferAsyncGOOGLE(
     VkCommandBuffer commandBuffer,
-        VkCommandBufferResetFlags flags);
+        VkCommandBufferResetFlags flags,
+        uint32_t doLock);
     void vkCommandBufferHostSyncGOOGLE(
     VkCommandBuffer commandBuffer,
         uint32_t needHostSync,
-        uint32_t sequenceNumber);
-#endif
-#ifdef VK_GOOGLE_create_resources_with_requirements
+        uint32_t sequenceNumber,
+        uint32_t doLock);
     VkResult vkCreateImageWithRequirementsGOOGLE(
     VkDevice device,
         const VkImageCreateInfo* pCreateInfo,
         const VkAllocationCallbacks* pAllocator,
         VkImage* pImage,
-        VkMemoryRequirements* pMemoryRequirements);
+        VkMemoryRequirements* pMemoryRequirements,
+        uint32_t doLock);
     VkResult vkCreateBufferWithRequirementsGOOGLE(
     VkDevice device,
         const VkBufferCreateInfo* pCreateInfo,
         const VkAllocationCallbacks* pAllocator,
         VkBuffer* pBuffer,
-        VkMemoryRequirements* pMemoryRequirements);
-#endif
-#ifdef VK_GOOGLE_address_space_info
+        VkMemoryRequirements* pMemoryRequirements,
+        uint32_t doLock);
     VkResult vkGetMemoryHostAddressInfoGOOGLE(
     VkDevice device,
         VkDeviceMemory memory,
         uint64_t* pAddress,
         uint64_t* pSize,
-        uint64_t* pHostmemId);
-#endif
-#ifdef VK_GOOGLE_free_memory_sync
+        uint64_t* pHostmemId,
+        uint32_t doLock);
     VkResult vkFreeMemorySyncGOOGLE(
     VkDevice device,
         VkDeviceMemory memory,
-        const VkAllocationCallbacks* pAllocator);
+        const VkAllocationCallbacks* pAllocator,
+        uint32_t doLock);
+    void vkQueueHostSyncGOOGLE(
+    VkQueue queue,
+        uint32_t needHostSync,
+        uint32_t sequenceNumber,
+        uint32_t doLock);
+    void vkQueueSubmitAsyncGOOGLE(
+    VkQueue queue,
+        uint32_t submitCount,
+        const VkSubmitInfo* pSubmits,
+        VkFence fence,
+        uint32_t doLock);
+    void vkQueueWaitIdleAsyncGOOGLE(
+    VkQueue queue,
+        uint32_t doLock);
+    void vkQueueBindSparseAsyncGOOGLE(
+    VkQueue queue,
+        uint32_t bindInfoCount,
+        const VkBindSparseInfo* pBindInfo,
+        VkFence fence,
+        uint32_t doLock);
+    void vkGetLinearImageLayoutGOOGLE(
+    VkDevice device,
+        VkFormat format,
+        VkDeviceSize* pOffset,
+        VkDeviceSize* pRowPitchAlignment,
+        uint32_t doLock);
+    void vkQueueFlushCommandsGOOGLE(
+    VkQueue queue,
+        VkCommandBuffer commandBuffer,
+        VkDeviceSize dataSize,
+        const void* pData,
+        uint32_t doLock);
+    void vkQueueCommitDescriptorSetUpdatesGOOGLE(
+    VkQueue queue,
+        uint32_t descriptorPoolCount,
+        const VkDescriptorPool* pDescriptorPools,
+        uint32_t descriptorSetCount,
+        const VkDescriptorSetLayout* pSetLayouts,
+        const uint64_t* pDescriptorSetPoolIds,
+        const uint32_t* pDescriptorSetWhichPool,
+        const uint32_t* pDescriptorSetPendingAllocation,
+        const uint32_t* pDescriptorWriteStartingIndices,
+        uint32_t pendingDescriptorWriteCount,
+        const VkWriteDescriptorSet* pPendingDescriptorWrites,
+        uint32_t doLock);
+    void vkCollectDescriptorPoolIdsGOOGLE(
+    VkDevice device,
+        VkDescriptorPool descriptorPool,
+        uint32_t* pPoolIdCount,
+        uint64_t* pPoolIds,
+        uint32_t doLock);
+#endif
+#ifdef VK_KHR_acceleration_structure
+    VkResult vkCreateAccelerationStructureKHR(
+    VkDevice device,
+        const VkAccelerationStructureCreateInfoKHR* pCreateInfo,
+        const VkAllocationCallbacks* pAllocator,
+        VkAccelerationStructureKHR* pAccelerationStructure,
+        uint32_t doLock);
+    void vkDestroyAccelerationStructureKHR(
+    VkDevice device,
+        VkAccelerationStructureKHR accelerationStructure,
+        const VkAllocationCallbacks* pAllocator,
+        uint32_t doLock);
+    void vkCmdBuildAccelerationStructuresKHR(
+    VkCommandBuffer commandBuffer,
+        uint32_t infoCount,
+        const VkAccelerationStructureBuildGeometryInfoKHR* pInfos,
+        const VkAccelerationStructureBuildRangeInfoKHR* const* ppBuildRangeInfos,
+        uint32_t doLock);
+    void vkCmdBuildAccelerationStructuresIndirectKHR(
+    VkCommandBuffer commandBuffer,
+        uint32_t infoCount,
+        const VkAccelerationStructureBuildGeometryInfoKHR* pInfos,
+        const VkDeviceAddress* pIndirectDeviceAddresses,
+        const uint32_t* pIndirectStrides,
+        const uint32_t* const* ppMaxPrimitiveCounts,
+        uint32_t doLock);
+    VkResult vkBuildAccelerationStructuresKHR(
+    VkDevice device,
+        VkDeferredOperationKHR deferredOperation,
+        uint32_t infoCount,
+        const VkAccelerationStructureBuildGeometryInfoKHR* pInfos,
+        const VkAccelerationStructureBuildRangeInfoKHR* const* ppBuildRangeInfos,
+        uint32_t doLock);
+    VkResult vkCopyAccelerationStructureKHR(
+    VkDevice device,
+        VkDeferredOperationKHR deferredOperation,
+        const VkCopyAccelerationStructureInfoKHR* pInfo,
+        uint32_t doLock);
+    VkResult vkCopyAccelerationStructureToMemoryKHR(
+    VkDevice device,
+        VkDeferredOperationKHR deferredOperation,
+        const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo,
+        uint32_t doLock);
+    VkResult vkCopyMemoryToAccelerationStructureKHR(
+    VkDevice device,
+        VkDeferredOperationKHR deferredOperation,
+        const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo,
+        uint32_t doLock);
+    VkResult vkWriteAccelerationStructuresPropertiesKHR(
+    VkDevice device,
+        uint32_t accelerationStructureCount,
+        const VkAccelerationStructureKHR* pAccelerationStructures,
+        VkQueryType queryType,
+        size_t dataSize,
+        void* pData,
+        size_t stride,
+        uint32_t doLock);
+    void vkCmdCopyAccelerationStructureKHR(
+    VkCommandBuffer commandBuffer,
+        const VkCopyAccelerationStructureInfoKHR* pInfo,
+        uint32_t doLock);
+    void vkCmdCopyAccelerationStructureToMemoryKHR(
+    VkCommandBuffer commandBuffer,
+        const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo,
+        uint32_t doLock);
+    void vkCmdCopyMemoryToAccelerationStructureKHR(
+    VkCommandBuffer commandBuffer,
+        const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo,
+        uint32_t doLock);
+    VkDeviceAddress vkGetAccelerationStructureDeviceAddressKHR(
+    VkDevice device,
+        const VkAccelerationStructureDeviceAddressInfoKHR* pInfo,
+        uint32_t doLock);
+    void vkCmdWriteAccelerationStructuresPropertiesKHR(
+    VkCommandBuffer commandBuffer,
+        uint32_t accelerationStructureCount,
+        const VkAccelerationStructureKHR* pAccelerationStructures,
+        VkQueryType queryType,
+        VkQueryPool queryPool,
+        uint32_t firstQuery,
+        uint32_t doLock);
+    void vkGetDeviceAccelerationStructureCompatibilityKHR(
+    VkDevice device,
+        const VkAccelerationStructureVersionInfoKHR* pVersionInfo,
+        VkAccelerationStructureCompatibilityKHR* pCompatibility,
+        uint32_t doLock);
+    void vkGetAccelerationStructureBuildSizesKHR(
+    VkDevice device,
+        VkAccelerationStructureBuildTypeKHR buildType,
+        const VkAccelerationStructureBuildGeometryInfoKHR* pBuildInfo,
+        const uint32_t* pMaxPrimitiveCounts,
+        VkAccelerationStructureBuildSizesInfoKHR* pSizeInfo,
+        uint32_t doLock);
+#endif
+#ifdef VK_KHR_ray_tracing_pipeline
+    void vkCmdTraceRaysKHR(
+    VkCommandBuffer commandBuffer,
+        const VkStridedDeviceAddressRegionKHR* pRaygenShaderBindingTable,
+        const VkStridedDeviceAddressRegionKHR* pMissShaderBindingTable,
+        const VkStridedDeviceAddressRegionKHR* pHitShaderBindingTable,
+        const VkStridedDeviceAddressRegionKHR* pCallableShaderBindingTable,
+        uint32_t width,
+        uint32_t height,
+        uint32_t depth,
+        uint32_t doLock);
+    VkResult vkCreateRayTracingPipelinesKHR(
+    VkDevice device,
+        VkDeferredOperationKHR deferredOperation,
+        VkPipelineCache pipelineCache,
+        uint32_t createInfoCount,
+        const VkRayTracingPipelineCreateInfoKHR* pCreateInfos,
+        const VkAllocationCallbacks* pAllocator,
+        VkPipeline* pPipelines,
+        uint32_t doLock);
+    VkResult vkGetRayTracingCaptureReplayShaderGroupHandlesKHR(
+    VkDevice device,
+        VkPipeline pipeline,
+        uint32_t firstGroup,
+        uint32_t groupCount,
+        size_t dataSize,
+        void* pData,
+        uint32_t doLock);
+    void vkCmdTraceRaysIndirectKHR(
+    VkCommandBuffer commandBuffer,
+        const VkStridedDeviceAddressRegionKHR* pRaygenShaderBindingTable,
+        const VkStridedDeviceAddressRegionKHR* pMissShaderBindingTable,
+        const VkStridedDeviceAddressRegionKHR* pHitShaderBindingTable,
+        const VkStridedDeviceAddressRegionKHR* pCallableShaderBindingTable,
+        VkDeviceAddress indirectDeviceAddress,
+        uint32_t doLock);
+    VkDeviceSize vkGetRayTracingShaderGroupStackSizeKHR(
+    VkDevice device,
+        VkPipeline pipeline,
+        uint32_t group,
+        VkShaderGroupShaderKHR groupShader,
+        uint32_t doLock);
+    void vkCmdSetRayTracingPipelineStackSizeKHR(
+    VkCommandBuffer commandBuffer,
+        uint32_t pipelineStackSize,
+        uint32_t doLock);
+#endif
+#ifdef VK_KHR_ray_query
 #endif
 
 private:
diff --git a/system/vulkan_enc/VkEncoder.h.inl b/system/vulkan_enc/VkEncoder.h.inl
new file mode 100644
index 0000000..e064f0a
--- /dev/null
+++ b/system/vulkan_enc/VkEncoder.h.inl
@@ -0,0 +1,9 @@
+    void flush();
+    void lock();
+    void unlock();
+    void incRef();
+    bool decRef();
+    uint32_t refCount = 1;
+    #define POOL_CLEAR_INTERVAL 10
+    uint32_t encodeCount = 0;
+    uint32_t featureBits = 0;
diff --git a/system/vulkan_enc/VulkanHandles.h b/system/vulkan_enc/VulkanHandles.h
index 54699aa..b0f4f6c 100644
--- a/system/vulkan_enc/VulkanHandles.h
+++ b/system/vulkan_enc/VulkanHandles.h
@@ -18,12 +18,12 @@
 
 #define GOLDFISH_VK_LIST_TRIVIAL_DISPATCHABLE_HANDLE_TYPES(f) \
     f(VkPhysicalDevice) \
-    f(VkQueue) \
 
 #define GOLDFISH_VK_LIST_DISPATCHABLE_HANDLE_TYPES(f) \
     f(VkInstance) \
     f(VkDevice) \
     f(VkCommandBuffer) \
+    f(VkQueue) \
     GOLDFISH_VK_LIST_TRIVIAL_DISPATCHABLE_HANDLE_TYPES(f)
 
 #ifdef VK_NVX_device_generated_commands
@@ -38,17 +38,59 @@
 
 #endif // VK_NVX_device_generated_commands
 
+#ifdef VK_NV_device_generated_commands
+
+#define __GOLDFISH_VK_LIST_NON_DISPATCHABLE_HANDLE_TYPES_NV_DEVICE_GENERATED_COMMANDS(f) \
+    f(VkIndirectCommandsLayoutNV) \
+
+#else
+
+#define __GOLDFISH_VK_LIST_NON_DISPATCHABLE_HANDLE_TYPES_NV_DEVICE_GENERATED_COMMANDS(f)
+
+#endif // VK_NV_device_generated_commands
+
+#ifdef VK_NV_ray_tracing
+
+#define __GOLDFISH_VK_LIST_NON_DISPATCHABLE_HANDLE_TYPES_NV_RAY_TRACING(f) \
+    f(VkAccelerationStructureNV) \
+
+#else
+
+#define __GOLDFISH_VK_LIST_NON_DISPATCHABLE_HANDLE_TYPES_NV_RAY_TRACING(f)
+
+#endif // VK_NV_ray_tracing
+
+#ifdef VK_KHR_acceleration_structure
+
+#define __GOLDFISH_VK_LIST_NON_DISPATCHABLE_HANDLE_TYPES_KHR_ACCELERATION_STRUCTURE(f) \
+    f(VkAccelerationStructureKHR) \
+
+#else
+
+#define __GOLDFISH_VK_LIST_NON_DISPATCHABLE_HANDLE_TYPES_KHR_ACCELERATION_STRUCTURE(f)
+
+#endif // VK_KHR_acceleration_structure
+
+#ifdef VK_USE_PLATFORM_FUCHSIA
+
+#define __GOLDFISH_VK_LIST_NON_DISPATCHABLE_HANDLE_TYPES_FUCHSIA(f) \
+    f(VkBufferCollectionFUCHSIA)
+
+#else
+
+#define __GOLDFISH_VK_LIST_NON_DISPATCHABLE_HANDLE_TYPES_FUCHSIA(f)
+
+#endif  // VK_USE_PLATFORM_FUCHSIA
+
 #define GOLDFISH_VK_LIST_TRIVIAL_NON_DISPATCHABLE_HANDLE_TYPES(f) \
     f(VkBufferView) \
     f(VkImageView) \
     f(VkShaderModule) \
-    f(VkSampler) \
     f(VkPipeline) \
     f(VkPipelineCache) \
     f(VkPipelineLayout) \
     f(VkRenderPass) \
     f(VkFramebuffer) \
-    f(VkCommandPool) \
     f(VkEvent) \
     f(VkQueryPool) \
     f(VkSamplerYcbcrConversion) \
@@ -59,7 +101,10 @@
     f(VkValidationCacheEXT) \
     f(VkDebugReportCallbackEXT) \
     f(VkDebugUtilsMessengerEXT) \
-    __GOLDFISH_VK_LIST_NON_DISPATCHABLE_HANDLE_TYPES_NVX_DEVICE_GENERATED_COMMANDS(f)
+    __GOLDFISH_VK_LIST_NON_DISPATCHABLE_HANDLE_TYPES_NVX_DEVICE_GENERATED_COMMANDS(f) \
+    __GOLDFISH_VK_LIST_NON_DISPATCHABLE_HANDLE_TYPES_NV_DEVICE_GENERATED_COMMANDS(f) \
+    __GOLDFISH_VK_LIST_NON_DISPATCHABLE_HANDLE_TYPES_NV_RAY_TRACING(f) \
+    __GOLDFISH_VK_LIST_NON_DISPATCHABLE_HANDLE_TYPES_KHR_ACCELERATION_STRUCTURE(f) \
 
 #define GOLDFISH_VK_LIST_NON_DISPATCHABLE_HANDLE_TYPES(f) \
     f(VkDeviceMemory) \
@@ -71,6 +116,9 @@
     f(VkDescriptorPool) \
     f(VkDescriptorSet) \
     f(VkDescriptorSetLayout) \
+    f(VkCommandPool) \
+    f(VkSampler) \
+    __GOLDFISH_VK_LIST_NON_DISPATCHABLE_HANDLE_TYPES_FUCHSIA(f) \
     GOLDFISH_VK_LIST_TRIVIAL_NON_DISPATCHABLE_HANDLE_TYPES(f) \
 
 #define GOLDFISH_VK_LIST_HANDLE_TYPES(f) \
@@ -80,3 +128,27 @@
 #define GOLDFISH_VK_LIST_TRIVIAL_HANDLE_TYPES(f) \
     GOLDFISH_VK_LIST_TRIVIAL_DISPATCHABLE_HANDLE_TYPES(f) \
     GOLDFISH_VK_LIST_TRIVIAL_NON_DISPATCHABLE_HANDLE_TYPES(f)
+
+#define GOLDFISH_VK_LIST_AUTODEFINED_STRUCT_DISPATCHABLE_HANDLE_TYPES(f) \
+    f(VkInstance) \
+    f(VkDevice) \
+    f(VkQueue) \
+    GOLDFISH_VK_LIST_TRIVIAL_DISPATCHABLE_HANDLE_TYPES(f)
+
+#define GOLDFISH_VK_LIST_AUTODEFINED_STRUCT_NON_DISPATCHABLE_HANDLE_TYPES(f) \
+    f(VkDeviceMemory) \
+    f(VkBuffer) \
+    f(VkImage) \
+    f(VkSemaphore) \
+    f(VkFence) \
+    f(VkDescriptorUpdateTemplate) \
+    f(VkCommandPool) \
+    f(VkSampler) \
+    __GOLDFISH_VK_LIST_NON_DISPATCHABLE_HANDLE_TYPES_FUCHSIA(f) \
+    GOLDFISH_VK_LIST_TRIVIAL_NON_DISPATCHABLE_HANDLE_TYPES(f) \
+
+#define GOLDFISH_VK_LIST_MANUAL_STRUCT_NON_DISPATCHABLE_HANDLE_TYPES(f) \
+    f(VkDescriptorPool) \
+    f(VkDescriptorSetLayout) \
+    f(VkDescriptorSet) \
+
diff --git a/system/vulkan_enc/VulkanStreamGuest.cpp b/system/vulkan_enc/VulkanStreamGuest.cpp
index 43f3706..fedc826 100644
--- a/system/vulkan_enc/VulkanStreamGuest.cpp
+++ b/system/vulkan_enc/VulkanStreamGuest.cpp
@@ -13,121 +13,26 @@
 // limitations under the License.
 #include "VulkanStreamGuest.h"
 
-#include "IOStream.h"
-#include "ResourceTracker.h"
-
-#include "android/base/Pool.h"
-#include "android/base/Tracing.h"
-
-#include <vector>
-
-#include <log/log.h>
-#include <inttypes.h>
-
 namespace goldfish_vk {
 
-class VulkanStreamGuest::Impl : public android::base::Stream {
-public:
-    Impl(IOStream* stream) : mStream(stream) {
-        unsetHandleMapping();
-        mFeatureBits = ResourceTracker::get()->getStreamFeatures();
-    }
-
-    ~Impl() { }
-
-    bool valid() { return true; }
-
-    void alloc(void **ptrAddr, size_t bytes) {
-        if (!bytes) {
-            *ptrAddr = nullptr;
-            return;
-        }
-
-        *ptrAddr = mPool.alloc(bytes);
-    }
-
-    ssize_t write(const void *buffer, size_t size) override {
-        uint8_t* streamBuf = (uint8_t*)mStream->alloc(size);
-        memcpy(streamBuf, buffer, size);
-        return size;
-    }
-
-    ssize_t read(void *buffer, size_t size) override {
-        if (!mStream->readback(buffer, size)) {
-            ALOGE("FATAL: Could not read back %zu bytes", size);
-            abort();
-        }
-        return size;
-    }
-
-    void clearPool() {
-        mPool.freeAll();
-    }
-
-    void setHandleMapping(VulkanHandleMapping* mapping) {
-        mCurrentHandleMapping = mapping;
-    }
-
-    void unsetHandleMapping() {
-        mCurrentHandleMapping = &mDefaultHandleMapping;
-    }
-
-    VulkanHandleMapping* handleMapping() const {
-        return mCurrentHandleMapping;
-    }
-
-    void flush() {
-        commitWrite();
-    }
-
-    uint32_t getFeatureBits() const {
-        return mFeatureBits;
-    }
-
-private:
-    size_t oustandingWriteBuffer() const {
-        return mWritePos;
-    }
-
-    size_t remainingWriteBufferSize() const {
-        return mWriteBuffer.size() - mWritePos;
-    }
-
-    void commitWrite() {
-        AEMU_SCOPED_TRACE("VulkanStreamGuest device write");
-        mStream->flush();
-    }
-
-    ssize_t bufferedWrite(const void *buffer, size_t size) {
-        if (size > remainingWriteBufferSize()) {
-            mWriteBuffer.resize((mWritePos + size) << 1);
-        }
-        memcpy(mWriteBuffer.data() + mWritePos, buffer, size);
-        mWritePos += size;
-        return size;
-    }
-
-    android::base::Pool mPool { 8, 4096, 64 };
-
-    size_t mWritePos = 0;
-    std::vector<uint8_t> mWriteBuffer;
-    IOStream* mStream = nullptr;
-    DefaultHandleMapping mDefaultHandleMapping;
-    VulkanHandleMapping* mCurrentHandleMapping;
-    uint32_t mFeatureBits = 0;
-};
-
-VulkanStreamGuest::VulkanStreamGuest(IOStream *stream) :
-    mImpl(new VulkanStreamGuest::Impl(stream)) { }
+VulkanStreamGuest::VulkanStreamGuest(IOStream *stream): mStream(stream) {
+    unsetHandleMapping();
+    mFeatureBits = ResourceTracker::get()->getStreamFeatures();
+}
 
 VulkanStreamGuest::~VulkanStreamGuest() = default;
 
 bool VulkanStreamGuest::valid() {
-    return mImpl->valid();
+    return true;
 }
 
 void VulkanStreamGuest::alloc(void** ptrAddr, size_t bytes) {
-    mImpl->alloc(ptrAddr, bytes);
+    if (!bytes) {
+        *ptrAddr = nullptr;
+        return;
+    }
+    
+    *ptrAddr = mPool.alloc(bytes);
 }
 
 void VulkanStreamGuest::loadStringInPlace(char** forOutput) {
@@ -157,37 +62,95 @@
     }
 }
 
+void VulkanStreamGuest::loadStringInPlaceWithStreamPtr(char** forOutput, uint8_t** streamPtr) {
+    uint32_t len;
+    memcpy(&len, *streamPtr, sizeof(uint32_t));
+    *streamPtr += sizeof(uint32_t);
+    android::base::Stream::fromBe32((uint8_t*)&len);
+
+    alloc((void**)forOutput, len + 1);
+
+    memset(*forOutput, 0x0, len + 1);
+
+    if (len > 0) {
+        memcpy(*forOutput, *streamPtr, len);
+        *streamPtr += len;
+    }
+}
+
+void VulkanStreamGuest::loadStringArrayInPlaceWithStreamPtr(char*** forOutput, uint8_t** streamPtr) {
+ uint32_t count;
+    memcpy(&count, *streamPtr, sizeof(uint32_t));
+    *streamPtr += sizeof(uint32_t);
+    android::base::Stream::fromBe32((uint8_t*)&count);
+    if (!count) {
+        *forOutput = nullptr;
+        return;
+    }
+
+    alloc((void**)forOutput, count * sizeof(char*));
+
+    char **stringsForOutput = *forOutput;
+
+    for (size_t i = 0; i < count; i++) {
+        loadStringInPlaceWithStreamPtr(stringsForOutput + i, streamPtr);
+    }
+}
+
 
 ssize_t VulkanStreamGuest::read(void *buffer, size_t size) {
-    return mImpl->read(buffer, size);
+    if (!mStream->readback(buffer, size)) {
+        ALOGE("FATAL: Could not read back %zu bytes", size);
+        abort();
+    }
+    return size;
 }
 
 ssize_t VulkanStreamGuest::write(const void *buffer, size_t size) {
-    return mImpl->write(buffer, size);
+    uint8_t* streamBuf = (uint8_t*)mStream->alloc(size);
+    memcpy(streamBuf, buffer, size);
+    return size;
+}
+
+void VulkanStreamGuest::writeLarge(const void* buffer, size_t size) {
+    mStream->writeFullyAsync(buffer, size);
 }
 
 void VulkanStreamGuest::clearPool() {
-    mImpl->clearPool();
+    mPool.freeAll();
 }
 
 void VulkanStreamGuest::setHandleMapping(VulkanHandleMapping* mapping) {
-    mImpl->setHandleMapping(mapping);
+    mCurrentHandleMapping = mapping;
 }
 
 void VulkanStreamGuest::unsetHandleMapping() {
-    mImpl->unsetHandleMapping();
+    mCurrentHandleMapping = &mDefaultHandleMapping;
 }
 
 VulkanHandleMapping* VulkanStreamGuest::handleMapping() const {
-    return mImpl->handleMapping();
+    return mCurrentHandleMapping;
 }
 
 void VulkanStreamGuest::flush() {
-    mImpl->flush();
+    AEMU_SCOPED_TRACE("VulkanStreamGuest device write");
+    mStream->flush();
 }
 
 uint32_t VulkanStreamGuest::getFeatureBits() const {
-    return mImpl->getFeatureBits();
+    return mFeatureBits;
+}
+
+void VulkanStreamGuest::incStreamRef() {
+    mStream->incRef();
+}
+
+bool VulkanStreamGuest::decStreamRef() {
+    return mStream->decRef();
+}
+
+uint8_t* VulkanStreamGuest::reserve(size_t size) {
+    return (uint8_t*)mStream->alloc(size);
 }
 
 VulkanCountingStream::VulkanCountingStream() : VulkanStreamGuest(nullptr) { }
diff --git a/system/vulkan_enc/VulkanStreamGuest.h b/system/vulkan_enc/VulkanStreamGuest.h
index d70b9fb..6d2dac1 100644
--- a/system/vulkan_enc/VulkanStreamGuest.h
+++ b/system/vulkan_enc/VulkanStreamGuest.h
@@ -20,8 +20,18 @@
 
 #include "VulkanHandleMapping.h"
 
+#include "IOStream.h"
+#include "ResourceTracker.h"
+
+#include "android/base/BumpPool.h"
+#include "android/base/Tracing.h"
+
+#include <vector>
 #include <memory>
 
+#include <log/log.h>
+#include <inttypes.h>
+
 class IOStream;
 
 namespace goldfish_vk {
@@ -42,8 +52,14 @@
     void loadStringInPlace(char** forOutput);
     void loadStringArrayInPlace(char*** forOutput);
 
+    // When we load a string and are using a reserved pointer.
+    void loadStringInPlaceWithStreamPtr(char** forOutput, uint8_t** streamPtr);
+    void loadStringArrayInPlaceWithStreamPtr(char*** forOutput, uint8_t** streamPtr);
+
     ssize_t read(void *buffer, size_t size) override;
     ssize_t write(const void *buffer, size_t size) override;
+    
+    void writeLarge(const void* buffer, size_t size);
 
     // Frees everything that got alloc'ed.
     void clearPool();
@@ -56,9 +72,17 @@
 
     uint32_t getFeatureBits() const;
 
+    void incStreamRef();
+    bool decStreamRef();
+
+    uint8_t* reserve(size_t size);
 private:
-    class Impl;
-    std::unique_ptr<Impl> mImpl;
+    android::base::BumpPool mPool;
+    std::vector<uint8_t> mWriteBuffer;
+    IOStream* mStream = nullptr;
+    DefaultHandleMapping mDefaultHandleMapping;
+    VulkanHandleMapping* mCurrentHandleMapping;
+    uint32_t mFeatureBits = 0;
 };
 
 class VulkanCountingStream : public VulkanStreamGuest {
diff --git a/system/vulkan_enc/func_table.cpp b/system/vulkan_enc/func_table.cpp
new file mode 100644
index 0000000..dc3f66d
--- /dev/null
+++ b/system/vulkan_enc/func_table.cpp
@@ -0,0 +1,14694 @@
+// Copyright (C) 2018 The Android Open Source Project
+// Copyright (C) 2018 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Autogenerated module func_table
+// (impl) generated by android/android-emugl/host/libs/libOpenglRender/vulkan-registry/xml/genvk.py -registry android/android-emugl/host/libs/libOpenglRender/vulkan-registry/xml/vk.xml cereal -o android/android-emugl/host/libs/libOpenglRender/vulkan/cereal
+// Please do not modify directly;
+// re-run android/scripts/generate-vulkan-sources.sh,
+// or directly from Python by defining:
+// VULKAN_REGISTRY_XML_DIR : Directory containing genvk.py and vk.xml
+// CEREAL_OUTPUT_DIR: Where to put the generated sources.
+// python3 $VULKAN_REGISTRY_XML_DIR/genvk.py -registry $VULKAN_REGISTRY_XML_DIR/vk.xml cereal -o $CEREAL_OUTPUT_DIR
+
+#include "func_table.h"
+
+
+#include "VkEncoder.h"
+#include "../OpenglSystemCommon/HostConnection.h"
+#include "ResourceTracker.h"
+
+#include "goldfish_vk_private_defs.h"
+
+#include <log/log.h>
+
+// Stuff we are not going to use but if included,
+// will cause compile errors. These are Android Vulkan
+// required extensions, but the approach will be to
+// implement them completely on the guest side.
+#undef VK_KHR_android_surface
+
+
+namespace goldfish_vk {
+
+static void sOnInvalidDynamicallyCheckedCall(const char* apiname, const char* neededFeature)
+{
+    ALOGE("invalid call to %s: %s not supported", apiname, neededFeature);
+    abort();
+}
+#ifdef VK_VERSION_1_0
+static VkResult entry_vkCreateInstance(
+    const VkInstanceCreateInfo* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkInstance* pInstance)
+{
+    AEMU_SCOPED_TRACE("vkCreateInstance");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateInstance_VkResult_return = (VkResult)0;
+    vkCreateInstance_VkResult_return = vkEnc->vkCreateInstance(pCreateInfo, pAllocator, pInstance, true /* do lock */);
+    return vkCreateInstance_VkResult_return;
+}
+static void entry_vkDestroyInstance(
+    VkInstance instance,
+    const VkAllocationCallbacks* pAllocator)
+{
+    AEMU_SCOPED_TRACE("vkDestroyInstance");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkDestroyInstance(instance, pAllocator, true /* do lock */);
+}
+static VkResult entry_vkEnumeratePhysicalDevices(
+    VkInstance instance,
+    uint32_t* pPhysicalDeviceCount,
+    VkPhysicalDevice* pPhysicalDevices)
+{
+    AEMU_SCOPED_TRACE("vkEnumeratePhysicalDevices");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkEnumeratePhysicalDevices_VkResult_return = (VkResult)0;
+    auto resources = ResourceTracker::get();
+    vkEnumeratePhysicalDevices_VkResult_return = resources->on_vkEnumeratePhysicalDevices(vkEnc, VK_SUCCESS, instance, pPhysicalDeviceCount, pPhysicalDevices);
+    return vkEnumeratePhysicalDevices_VkResult_return;
+}
+static void entry_vkGetPhysicalDeviceFeatures(
+    VkPhysicalDevice physicalDevice,
+    VkPhysicalDeviceFeatures* pFeatures)
+{
+    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceFeatures");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetPhysicalDeviceFeatures(physicalDevice, pFeatures, true /* do lock */);
+}
+static void entry_vkGetPhysicalDeviceFormatProperties(
+    VkPhysicalDevice physicalDevice,
+    VkFormat format,
+    VkFormatProperties* pFormatProperties)
+{
+    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceFormatProperties");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetPhysicalDeviceFormatProperties(physicalDevice, format, pFormatProperties, true /* do lock */);
+}
+static VkResult entry_vkGetPhysicalDeviceImageFormatProperties(
+    VkPhysicalDevice physicalDevice,
+    VkFormat format,
+    VkImageType type,
+    VkImageTiling tiling,
+    VkImageUsageFlags usage,
+    VkImageCreateFlags flags,
+    VkImageFormatProperties* pImageFormatProperties)
+{
+    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceImageFormatProperties");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetPhysicalDeviceImageFormatProperties_VkResult_return = (VkResult)0;
+    vkGetPhysicalDeviceImageFormatProperties_VkResult_return = vkEnc->vkGetPhysicalDeviceImageFormatProperties(physicalDevice, format, type, tiling, usage, flags, pImageFormatProperties, true /* do lock */);
+    return vkGetPhysicalDeviceImageFormatProperties_VkResult_return;
+}
+static void entry_vkGetPhysicalDeviceProperties(
+    VkPhysicalDevice physicalDevice,
+    VkPhysicalDeviceProperties* pProperties)
+{
+    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceProperties");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetPhysicalDeviceProperties(physicalDevice, pProperties, true /* do lock */);
+}
+static void entry_vkGetPhysicalDeviceQueueFamilyProperties(
+    VkPhysicalDevice physicalDevice,
+    uint32_t* pQueueFamilyPropertyCount,
+    VkQueueFamilyProperties* pQueueFamilyProperties)
+{
+    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceQueueFamilyProperties");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetPhysicalDeviceQueueFamilyProperties(physicalDevice, pQueueFamilyPropertyCount, pQueueFamilyProperties, true /* do lock */);
+}
+static void entry_vkGetPhysicalDeviceMemoryProperties(
+    VkPhysicalDevice physicalDevice,
+    VkPhysicalDeviceMemoryProperties* pMemoryProperties)
+{
+    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceMemoryProperties");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetPhysicalDeviceMemoryProperties(physicalDevice, pMemoryProperties, true /* do lock */);
+}
+static PFN_vkVoidFunction entry_vkGetInstanceProcAddr(
+    VkInstance instance,
+    const char* pName)
+{
+    AEMU_SCOPED_TRACE("vkGetInstanceProcAddr");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    PFN_vkVoidFunction vkGetInstanceProcAddr_PFN_vkVoidFunction_return = (PFN_vkVoidFunction)0;
+    vkGetInstanceProcAddr_PFN_vkVoidFunction_return = vkEnc->vkGetInstanceProcAddr(instance, pName, true /* do lock */);
+    return vkGetInstanceProcAddr_PFN_vkVoidFunction_return;
+}
+static PFN_vkVoidFunction entry_vkGetDeviceProcAddr(
+    VkDevice device,
+    const char* pName)
+{
+    AEMU_SCOPED_TRACE("vkGetDeviceProcAddr");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    PFN_vkVoidFunction vkGetDeviceProcAddr_PFN_vkVoidFunction_return = (PFN_vkVoidFunction)0;
+    vkGetDeviceProcAddr_PFN_vkVoidFunction_return = vkEnc->vkGetDeviceProcAddr(device, pName, true /* do lock */);
+    return vkGetDeviceProcAddr_PFN_vkVoidFunction_return;
+}
+static VkResult entry_vkCreateDevice(
+    VkPhysicalDevice physicalDevice,
+    const VkDeviceCreateInfo* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkDevice* pDevice)
+{
+    AEMU_SCOPED_TRACE("vkCreateDevice");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateDevice_VkResult_return = (VkResult)0;
+    vkCreateDevice_VkResult_return = vkEnc->vkCreateDevice(physicalDevice, pCreateInfo, pAllocator, pDevice, true /* do lock */);
+    return vkCreateDevice_VkResult_return;
+}
+static void entry_vkDestroyDevice(
+    VkDevice device,
+    const VkAllocationCallbacks* pAllocator)
+{
+    AEMU_SCOPED_TRACE("vkDestroyDevice");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkDestroyDevice(device, pAllocator, true /* do lock */);
+}
+static VkResult entry_vkEnumerateInstanceExtensionProperties(
+    const char* pLayerName,
+    uint32_t* pPropertyCount,
+    VkExtensionProperties* pProperties)
+{
+    AEMU_SCOPED_TRACE("vkEnumerateInstanceExtensionProperties");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkEnumerateInstanceExtensionProperties_VkResult_return = (VkResult)0;
+    auto resources = ResourceTracker::get();
+    vkEnumerateInstanceExtensionProperties_VkResult_return = resources->on_vkEnumerateInstanceExtensionProperties(vkEnc, VK_SUCCESS, pLayerName, pPropertyCount, pProperties);
+    return vkEnumerateInstanceExtensionProperties_VkResult_return;
+}
+static VkResult entry_vkEnumerateDeviceExtensionProperties(
+    VkPhysicalDevice physicalDevice,
+    const char* pLayerName,
+    uint32_t* pPropertyCount,
+    VkExtensionProperties* pProperties)
+{
+    AEMU_SCOPED_TRACE("vkEnumerateDeviceExtensionProperties");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkEnumerateDeviceExtensionProperties_VkResult_return = (VkResult)0;
+    auto resources = ResourceTracker::get();
+    vkEnumerateDeviceExtensionProperties_VkResult_return = resources->on_vkEnumerateDeviceExtensionProperties(vkEnc, VK_SUCCESS, physicalDevice, pLayerName, pPropertyCount, pProperties);
+    return vkEnumerateDeviceExtensionProperties_VkResult_return;
+}
+static VkResult entry_vkEnumerateInstanceLayerProperties(
+    uint32_t* pPropertyCount,
+    VkLayerProperties* pProperties)
+{
+    AEMU_SCOPED_TRACE("vkEnumerateInstanceLayerProperties");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkEnumerateInstanceLayerProperties_VkResult_return = (VkResult)0;
+    vkEnumerateInstanceLayerProperties_VkResult_return = vkEnc->vkEnumerateInstanceLayerProperties(pPropertyCount, pProperties, true /* do lock */);
+    return vkEnumerateInstanceLayerProperties_VkResult_return;
+}
+static VkResult entry_vkEnumerateDeviceLayerProperties(
+    VkPhysicalDevice physicalDevice,
+    uint32_t* pPropertyCount,
+    VkLayerProperties* pProperties)
+{
+    AEMU_SCOPED_TRACE("vkEnumerateDeviceLayerProperties");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkEnumerateDeviceLayerProperties_VkResult_return = (VkResult)0;
+    vkEnumerateDeviceLayerProperties_VkResult_return = vkEnc->vkEnumerateDeviceLayerProperties(physicalDevice, pPropertyCount, pProperties, true /* do lock */);
+    return vkEnumerateDeviceLayerProperties_VkResult_return;
+}
+static void entry_vkGetDeviceQueue(
+    VkDevice device,
+    uint32_t queueFamilyIndex,
+    uint32_t queueIndex,
+    VkQueue* pQueue)
+{
+    AEMU_SCOPED_TRACE("vkGetDeviceQueue");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue, true /* do lock */);
+}
+static VkResult entry_vkQueueSubmit(
+    VkQueue queue,
+    uint32_t submitCount,
+    const VkSubmitInfo* pSubmits,
+    VkFence fence)
+{
+    AEMU_SCOPED_TRACE("vkQueueSubmit");
+    auto vkEnc = ResourceTracker::getQueueEncoder(queue);
+    VkResult vkQueueSubmit_VkResult_return = (VkResult)0;
+    auto resources = ResourceTracker::get();
+    vkQueueSubmit_VkResult_return = resources->on_vkQueueSubmit(vkEnc, VK_SUCCESS, queue, submitCount, pSubmits, fence);
+    return vkQueueSubmit_VkResult_return;
+}
+static VkResult entry_vkQueueWaitIdle(
+    VkQueue queue)
+{
+    AEMU_SCOPED_TRACE("vkQueueWaitIdle");
+    auto vkEnc = ResourceTracker::getQueueEncoder(queue);
+    VkResult vkQueueWaitIdle_VkResult_return = (VkResult)0;
+    auto resources = ResourceTracker::get();
+    vkQueueWaitIdle_VkResult_return = resources->on_vkQueueWaitIdle(vkEnc, VK_SUCCESS, queue);
+    return vkQueueWaitIdle_VkResult_return;
+}
+static VkResult entry_vkDeviceWaitIdle(
+    VkDevice device)
+{
+    AEMU_SCOPED_TRACE("vkDeviceWaitIdle");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkDeviceWaitIdle_VkResult_return = (VkResult)0;
+    vkDeviceWaitIdle_VkResult_return = vkEnc->vkDeviceWaitIdle(device, true /* do lock */);
+    return vkDeviceWaitIdle_VkResult_return;
+}
+static VkResult entry_vkAllocateMemory(
+    VkDevice device,
+    const VkMemoryAllocateInfo* pAllocateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkDeviceMemory* pMemory)
+{
+    AEMU_SCOPED_TRACE("vkAllocateMemory");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkAllocateMemory_VkResult_return = (VkResult)0;
+    auto resources = ResourceTracker::get();
+    vkAllocateMemory_VkResult_return = resources->on_vkAllocateMemory(vkEnc, VK_SUCCESS, device, pAllocateInfo, pAllocator, pMemory);
+    return vkAllocateMemory_VkResult_return;
+}
+static void entry_vkFreeMemory(
+    VkDevice device,
+    VkDeviceMemory memory,
+    const VkAllocationCallbacks* pAllocator)
+{
+    AEMU_SCOPED_TRACE("vkFreeMemory");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    auto resources = ResourceTracker::get();
+    resources->on_vkFreeMemory(vkEnc, device, memory, pAllocator);
+}
+static VkResult entry_vkMapMemory(
+    VkDevice device,
+    VkDeviceMemory memory,
+    VkDeviceSize offset,
+    VkDeviceSize size,
+    VkMemoryMapFlags flags,
+    void** ppData)
+{
+    AEMU_SCOPED_TRACE("vkMapMemory");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkMapMemory_VkResult_return = (VkResult)0;
+    vkMapMemory_VkResult_return = vkEnc->vkMapMemory(device, memory, offset, size, flags, ppData, true /* do lock */);
+    return vkMapMemory_VkResult_return;
+}
+static void entry_vkUnmapMemory(
+    VkDevice device,
+    VkDeviceMemory memory)
+{
+    AEMU_SCOPED_TRACE("vkUnmapMemory");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkUnmapMemory(device, memory, true /* do lock */);
+}
+static VkResult entry_vkFlushMappedMemoryRanges(
+    VkDevice device,
+    uint32_t memoryRangeCount,
+    const VkMappedMemoryRange* pMemoryRanges)
+{
+    AEMU_SCOPED_TRACE("vkFlushMappedMemoryRanges");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkFlushMappedMemoryRanges_VkResult_return = (VkResult)0;
+    vkFlushMappedMemoryRanges_VkResult_return = vkEnc->vkFlushMappedMemoryRanges(device, memoryRangeCount, pMemoryRanges, true /* do lock */);
+    return vkFlushMappedMemoryRanges_VkResult_return;
+}
+static VkResult entry_vkInvalidateMappedMemoryRanges(
+    VkDevice device,
+    uint32_t memoryRangeCount,
+    const VkMappedMemoryRange* pMemoryRanges)
+{
+    AEMU_SCOPED_TRACE("vkInvalidateMappedMemoryRanges");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkInvalidateMappedMemoryRanges_VkResult_return = (VkResult)0;
+    vkInvalidateMappedMemoryRanges_VkResult_return = vkEnc->vkInvalidateMappedMemoryRanges(device, memoryRangeCount, pMemoryRanges, true /* do lock */);
+    return vkInvalidateMappedMemoryRanges_VkResult_return;
+}
+static void entry_vkGetDeviceMemoryCommitment(
+    VkDevice device,
+    VkDeviceMemory memory,
+    VkDeviceSize* pCommittedMemoryInBytes)
+{
+    AEMU_SCOPED_TRACE("vkGetDeviceMemoryCommitment");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetDeviceMemoryCommitment(device, memory, pCommittedMemoryInBytes, true /* do lock */);
+}
+static VkResult entry_vkBindBufferMemory(
+    VkDevice device,
+    VkBuffer buffer,
+    VkDeviceMemory memory,
+    VkDeviceSize memoryOffset)
+{
+    AEMU_SCOPED_TRACE("vkBindBufferMemory");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkBindBufferMemory_VkResult_return = (VkResult)0;
+    auto resources = ResourceTracker::get();
+    vkBindBufferMemory_VkResult_return = resources->on_vkBindBufferMemory(vkEnc, VK_SUCCESS, device, buffer, memory, memoryOffset);
+    return vkBindBufferMemory_VkResult_return;
+}
+static VkResult entry_vkBindImageMemory(
+    VkDevice device,
+    VkImage image,
+    VkDeviceMemory memory,
+    VkDeviceSize memoryOffset)
+{
+    AEMU_SCOPED_TRACE("vkBindImageMemory");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkBindImageMemory_VkResult_return = (VkResult)0;
+    auto resources = ResourceTracker::get();
+    vkBindImageMemory_VkResult_return = resources->on_vkBindImageMemory(vkEnc, VK_SUCCESS, device, image, memory, memoryOffset);
+    return vkBindImageMemory_VkResult_return;
+}
+static void entry_vkGetBufferMemoryRequirements(
+    VkDevice device,
+    VkBuffer buffer,
+    VkMemoryRequirements* pMemoryRequirements)
+{
+    AEMU_SCOPED_TRACE("vkGetBufferMemoryRequirements");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    auto resources = ResourceTracker::get();
+    resources->on_vkGetBufferMemoryRequirements(vkEnc, device, buffer, pMemoryRequirements);
+}
+static void entry_vkGetImageMemoryRequirements(
+    VkDevice device,
+    VkImage image,
+    VkMemoryRequirements* pMemoryRequirements)
+{
+    AEMU_SCOPED_TRACE("vkGetImageMemoryRequirements");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    auto resources = ResourceTracker::get();
+    resources->on_vkGetImageMemoryRequirements(vkEnc, device, image, pMemoryRequirements);
+}
+static void entry_vkGetImageSparseMemoryRequirements(
+    VkDevice device,
+    VkImage image,
+    uint32_t* pSparseMemoryRequirementCount,
+    VkSparseImageMemoryRequirements* pSparseMemoryRequirements)
+{
+    AEMU_SCOPED_TRACE("vkGetImageSparseMemoryRequirements");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetImageSparseMemoryRequirements(device, image, pSparseMemoryRequirementCount, pSparseMemoryRequirements, true /* do lock */);
+}
+static void entry_vkGetPhysicalDeviceSparseImageFormatProperties(
+    VkPhysicalDevice physicalDevice,
+    VkFormat format,
+    VkImageType type,
+    VkSampleCountFlagBits samples,
+    VkImageUsageFlags usage,
+    VkImageTiling tiling,
+    uint32_t* pPropertyCount,
+    VkSparseImageFormatProperties* pProperties)
+{
+    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceSparseImageFormatProperties");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetPhysicalDeviceSparseImageFormatProperties(physicalDevice, format, type, samples, usage, tiling, pPropertyCount, pProperties, true /* do lock */);
+}
+static VkResult entry_vkQueueBindSparse(
+    VkQueue queue,
+    uint32_t bindInfoCount,
+    const VkBindSparseInfo* pBindInfo,
+    VkFence fence)
+{
+    AEMU_SCOPED_TRACE("vkQueueBindSparse");
+    auto vkEnc = ResourceTracker::getQueueEncoder(queue);
+    VkResult vkQueueBindSparse_VkResult_return = (VkResult)0;
+    vkQueueBindSparse_VkResult_return = vkEnc->vkQueueBindSparse(queue, bindInfoCount, pBindInfo, fence, true /* do lock */);
+    return vkQueueBindSparse_VkResult_return;
+}
+static VkResult entry_vkCreateFence(
+    VkDevice device,
+    const VkFenceCreateInfo* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkFence* pFence)
+{
+    AEMU_SCOPED_TRACE("vkCreateFence");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateFence_VkResult_return = (VkResult)0;
+    auto resources = ResourceTracker::get();
+    vkCreateFence_VkResult_return = resources->on_vkCreateFence(vkEnc, VK_SUCCESS, device, pCreateInfo, pAllocator, pFence);
+    return vkCreateFence_VkResult_return;
+}
+static void entry_vkDestroyFence(
+    VkDevice device,
+    VkFence fence,
+    const VkAllocationCallbacks* pAllocator)
+{
+    AEMU_SCOPED_TRACE("vkDestroyFence");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkDestroyFence(device, fence, pAllocator, true /* do lock */);
+}
+static VkResult entry_vkResetFences(
+    VkDevice device,
+    uint32_t fenceCount,
+    const VkFence* pFences)
+{
+    AEMU_SCOPED_TRACE("vkResetFences");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkResetFences_VkResult_return = (VkResult)0;
+    auto resources = ResourceTracker::get();
+    vkResetFences_VkResult_return = resources->on_vkResetFences(vkEnc, VK_SUCCESS, device, fenceCount, pFences);
+    return vkResetFences_VkResult_return;
+}
+static VkResult entry_vkGetFenceStatus(
+    VkDevice device,
+    VkFence fence)
+{
+    AEMU_SCOPED_TRACE("vkGetFenceStatus");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetFenceStatus_VkResult_return = (VkResult)0;
+    vkGetFenceStatus_VkResult_return = vkEnc->vkGetFenceStatus(device, fence, true /* do lock */);
+    return vkGetFenceStatus_VkResult_return;
+}
+static VkResult entry_vkWaitForFences(
+    VkDevice device,
+    uint32_t fenceCount,
+    const VkFence* pFences,
+    VkBool32 waitAll,
+    uint64_t timeout)
+{
+    AEMU_SCOPED_TRACE("vkWaitForFences");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkWaitForFences_VkResult_return = (VkResult)0;
+    auto resources = ResourceTracker::get();
+    vkWaitForFences_VkResult_return = resources->on_vkWaitForFences(vkEnc, VK_SUCCESS, device, fenceCount, pFences, waitAll, timeout);
+    return vkWaitForFences_VkResult_return;
+}
+static VkResult entry_vkCreateSemaphore(
+    VkDevice device,
+    const VkSemaphoreCreateInfo* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkSemaphore* pSemaphore)
+{
+    AEMU_SCOPED_TRACE("vkCreateSemaphore");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateSemaphore_VkResult_return = (VkResult)0;
+    auto resources = ResourceTracker::get();
+    vkCreateSemaphore_VkResult_return = resources->on_vkCreateSemaphore(vkEnc, VK_SUCCESS, device, pCreateInfo, pAllocator, pSemaphore);
+    return vkCreateSemaphore_VkResult_return;
+}
+static void entry_vkDestroySemaphore(
+    VkDevice device,
+    VkSemaphore semaphore,
+    const VkAllocationCallbacks* pAllocator)
+{
+    AEMU_SCOPED_TRACE("vkDestroySemaphore");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    auto resources = ResourceTracker::get();
+    resources->on_vkDestroySemaphore(vkEnc, device, semaphore, pAllocator);
+}
+static VkResult entry_vkCreateEvent(
+    VkDevice device,
+    const VkEventCreateInfo* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkEvent* pEvent)
+{
+    AEMU_SCOPED_TRACE("vkCreateEvent");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateEvent_VkResult_return = (VkResult)0;
+    vkCreateEvent_VkResult_return = vkEnc->vkCreateEvent(device, pCreateInfo, pAllocator, pEvent, true /* do lock */);
+    return vkCreateEvent_VkResult_return;
+}
+static void entry_vkDestroyEvent(
+    VkDevice device,
+    VkEvent event,
+    const VkAllocationCallbacks* pAllocator)
+{
+    AEMU_SCOPED_TRACE("vkDestroyEvent");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkDestroyEvent(device, event, pAllocator, true /* do lock */);
+}
+static VkResult entry_vkGetEventStatus(
+    VkDevice device,
+    VkEvent event)
+{
+    AEMU_SCOPED_TRACE("vkGetEventStatus");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetEventStatus_VkResult_return = (VkResult)0;
+    vkGetEventStatus_VkResult_return = vkEnc->vkGetEventStatus(device, event, true /* do lock */);
+    return vkGetEventStatus_VkResult_return;
+}
+static VkResult entry_vkSetEvent(
+    VkDevice device,
+    VkEvent event)
+{
+    AEMU_SCOPED_TRACE("vkSetEvent");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkSetEvent_VkResult_return = (VkResult)0;
+    vkSetEvent_VkResult_return = vkEnc->vkSetEvent(device, event, true /* do lock */);
+    return vkSetEvent_VkResult_return;
+}
+static VkResult entry_vkResetEvent(
+    VkDevice device,
+    VkEvent event)
+{
+    AEMU_SCOPED_TRACE("vkResetEvent");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkResetEvent_VkResult_return = (VkResult)0;
+    vkResetEvent_VkResult_return = vkEnc->vkResetEvent(device, event, true /* do lock */);
+    return vkResetEvent_VkResult_return;
+}
+static VkResult entry_vkCreateQueryPool(
+    VkDevice device,
+    const VkQueryPoolCreateInfo* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkQueryPool* pQueryPool)
+{
+    AEMU_SCOPED_TRACE("vkCreateQueryPool");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateQueryPool_VkResult_return = (VkResult)0;
+    vkCreateQueryPool_VkResult_return = vkEnc->vkCreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool, true /* do lock */);
+    return vkCreateQueryPool_VkResult_return;
+}
+static void entry_vkDestroyQueryPool(
+    VkDevice device,
+    VkQueryPool queryPool,
+    const VkAllocationCallbacks* pAllocator)
+{
+    AEMU_SCOPED_TRACE("vkDestroyQueryPool");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkDestroyQueryPool(device, queryPool, pAllocator, true /* do lock */);
+}
+static VkResult entry_vkGetQueryPoolResults(
+    VkDevice device,
+    VkQueryPool queryPool,
+    uint32_t firstQuery,
+    uint32_t queryCount,
+    size_t dataSize,
+    void* pData,
+    VkDeviceSize stride,
+    VkQueryResultFlags flags)
+{
+    AEMU_SCOPED_TRACE("vkGetQueryPoolResults");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetQueryPoolResults_VkResult_return = (VkResult)0;
+    vkGetQueryPoolResults_VkResult_return = vkEnc->vkGetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride, flags, true /* do lock */);
+    return vkGetQueryPoolResults_VkResult_return;
+}
+static VkResult entry_vkCreateBuffer(
+    VkDevice device,
+    const VkBufferCreateInfo* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkBuffer* pBuffer)
+{
+    AEMU_SCOPED_TRACE("vkCreateBuffer");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateBuffer_VkResult_return = (VkResult)0;
+    auto resources = ResourceTracker::get();
+    vkCreateBuffer_VkResult_return = resources->on_vkCreateBuffer(vkEnc, VK_SUCCESS, device, pCreateInfo, pAllocator, pBuffer);
+    return vkCreateBuffer_VkResult_return;
+}
+static void entry_vkDestroyBuffer(
+    VkDevice device,
+    VkBuffer buffer,
+    const VkAllocationCallbacks* pAllocator)
+{
+    AEMU_SCOPED_TRACE("vkDestroyBuffer");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    auto resources = ResourceTracker::get();
+    resources->on_vkDestroyBuffer(vkEnc, device, buffer, pAllocator);
+}
+static VkResult entry_vkCreateBufferView(
+    VkDevice device,
+    const VkBufferViewCreateInfo* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkBufferView* pView)
+{
+    AEMU_SCOPED_TRACE("vkCreateBufferView");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateBufferView_VkResult_return = (VkResult)0;
+    vkCreateBufferView_VkResult_return = vkEnc->vkCreateBufferView(device, pCreateInfo, pAllocator, pView, true /* do lock */);
+    return vkCreateBufferView_VkResult_return;
+}
+static void entry_vkDestroyBufferView(
+    VkDevice device,
+    VkBufferView bufferView,
+    const VkAllocationCallbacks* pAllocator)
+{
+    AEMU_SCOPED_TRACE("vkDestroyBufferView");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkDestroyBufferView(device, bufferView, pAllocator, true /* do lock */);
+}
+static VkResult entry_vkCreateImage(
+    VkDevice device,
+    const VkImageCreateInfo* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkImage* pImage)
+{
+    AEMU_SCOPED_TRACE("vkCreateImage");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateImage_VkResult_return = (VkResult)0;
+    auto resources = ResourceTracker::get();
+    vkCreateImage_VkResult_return = resources->on_vkCreateImage(vkEnc, VK_SUCCESS, device, pCreateInfo, pAllocator, pImage);
+    return vkCreateImage_VkResult_return;
+}
+static void entry_vkDestroyImage(
+    VkDevice device,
+    VkImage image,
+    const VkAllocationCallbacks* pAllocator)
+{
+    AEMU_SCOPED_TRACE("vkDestroyImage");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    auto resources = ResourceTracker::get();
+    resources->on_vkDestroyImage(vkEnc, device, image, pAllocator);
+}
+static void entry_vkGetImageSubresourceLayout(
+    VkDevice device,
+    VkImage image,
+    const VkImageSubresource* pSubresource,
+    VkSubresourceLayout* pLayout)
+{
+    AEMU_SCOPED_TRACE("vkGetImageSubresourceLayout");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetImageSubresourceLayout(device, image, pSubresource, pLayout, true /* do lock */);
+}
+static VkResult entry_vkCreateImageView(
+    VkDevice device,
+    const VkImageViewCreateInfo* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkImageView* pView)
+{
+    AEMU_SCOPED_TRACE("vkCreateImageView");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateImageView_VkResult_return = (VkResult)0;
+    auto resources = ResourceTracker::get();
+    vkCreateImageView_VkResult_return = resources->on_vkCreateImageView(vkEnc, VK_SUCCESS, device, pCreateInfo, pAllocator, pView);
+    return vkCreateImageView_VkResult_return;
+}
+static void entry_vkDestroyImageView(
+    VkDevice device,
+    VkImageView imageView,
+    const VkAllocationCallbacks* pAllocator)
+{
+    AEMU_SCOPED_TRACE("vkDestroyImageView");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkDestroyImageView(device, imageView, pAllocator, true /* do lock */);
+}
+static VkResult entry_vkCreateShaderModule(
+    VkDevice device,
+    const VkShaderModuleCreateInfo* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkShaderModule* pShaderModule)
+{
+    AEMU_SCOPED_TRACE("vkCreateShaderModule");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateShaderModule_VkResult_return = (VkResult)0;
+    vkCreateShaderModule_VkResult_return = vkEnc->vkCreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule, true /* do lock */);
+    return vkCreateShaderModule_VkResult_return;
+}
+static void entry_vkDestroyShaderModule(
+    VkDevice device,
+    VkShaderModule shaderModule,
+    const VkAllocationCallbacks* pAllocator)
+{
+    AEMU_SCOPED_TRACE("vkDestroyShaderModule");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkDestroyShaderModule(device, shaderModule, pAllocator, true /* do lock */);
+}
+static VkResult entry_vkCreatePipelineCache(
+    VkDevice device,
+    const VkPipelineCacheCreateInfo* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkPipelineCache* pPipelineCache)
+{
+    AEMU_SCOPED_TRACE("vkCreatePipelineCache");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreatePipelineCache_VkResult_return = (VkResult)0;
+    vkCreatePipelineCache_VkResult_return = vkEnc->vkCreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache, true /* do lock */);
+    return vkCreatePipelineCache_VkResult_return;
+}
+static void entry_vkDestroyPipelineCache(
+    VkDevice device,
+    VkPipelineCache pipelineCache,
+    const VkAllocationCallbacks* pAllocator)
+{
+    AEMU_SCOPED_TRACE("vkDestroyPipelineCache");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkDestroyPipelineCache(device, pipelineCache, pAllocator, true /* do lock */);
+}
+static VkResult entry_vkGetPipelineCacheData(
+    VkDevice device,
+    VkPipelineCache pipelineCache,
+    size_t* pDataSize,
+    void* pData)
+{
+    AEMU_SCOPED_TRACE("vkGetPipelineCacheData");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetPipelineCacheData_VkResult_return = (VkResult)0;
+    vkGetPipelineCacheData_VkResult_return = vkEnc->vkGetPipelineCacheData(device, pipelineCache, pDataSize, pData, true /* do lock */);
+    return vkGetPipelineCacheData_VkResult_return;
+}
+static VkResult entry_vkMergePipelineCaches(
+    VkDevice device,
+    VkPipelineCache dstCache,
+    uint32_t srcCacheCount,
+    const VkPipelineCache* pSrcCaches)
+{
+    AEMU_SCOPED_TRACE("vkMergePipelineCaches");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkMergePipelineCaches_VkResult_return = (VkResult)0;
+    vkMergePipelineCaches_VkResult_return = vkEnc->vkMergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches, true /* do lock */);
+    return vkMergePipelineCaches_VkResult_return;
+}
+static VkResult entry_vkCreateGraphicsPipelines(
+    VkDevice device,
+    VkPipelineCache pipelineCache,
+    uint32_t createInfoCount,
+    const VkGraphicsPipelineCreateInfo* pCreateInfos,
+    const VkAllocationCallbacks* pAllocator,
+    VkPipeline* pPipelines)
+{
+    AEMU_SCOPED_TRACE("vkCreateGraphicsPipelines");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateGraphicsPipelines_VkResult_return = (VkResult)0;
+    vkCreateGraphicsPipelines_VkResult_return = vkEnc->vkCreateGraphicsPipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines, true /* do lock */);
+    return vkCreateGraphicsPipelines_VkResult_return;
+}
+static VkResult entry_vkCreateComputePipelines(
+    VkDevice device,
+    VkPipelineCache pipelineCache,
+    uint32_t createInfoCount,
+    const VkComputePipelineCreateInfo* pCreateInfos,
+    const VkAllocationCallbacks* pAllocator,
+    VkPipeline* pPipelines)
+{
+    AEMU_SCOPED_TRACE("vkCreateComputePipelines");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateComputePipelines_VkResult_return = (VkResult)0;
+    vkCreateComputePipelines_VkResult_return = vkEnc->vkCreateComputePipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines, true /* do lock */);
+    return vkCreateComputePipelines_VkResult_return;
+}
+static void entry_vkDestroyPipeline(
+    VkDevice device,
+    VkPipeline pipeline,
+    const VkAllocationCallbacks* pAllocator)
+{
+    AEMU_SCOPED_TRACE("vkDestroyPipeline");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkDestroyPipeline(device, pipeline, pAllocator, true /* do lock */);
+}
+static VkResult entry_vkCreatePipelineLayout(
+    VkDevice device,
+    const VkPipelineLayoutCreateInfo* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkPipelineLayout* pPipelineLayout)
+{
+    AEMU_SCOPED_TRACE("vkCreatePipelineLayout");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreatePipelineLayout_VkResult_return = (VkResult)0;
+    vkCreatePipelineLayout_VkResult_return = vkEnc->vkCreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout, true /* do lock */);
+    return vkCreatePipelineLayout_VkResult_return;
+}
+static void entry_vkDestroyPipelineLayout(
+    VkDevice device,
+    VkPipelineLayout pipelineLayout,
+    const VkAllocationCallbacks* pAllocator)
+{
+    AEMU_SCOPED_TRACE("vkDestroyPipelineLayout");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkDestroyPipelineLayout(device, pipelineLayout, pAllocator, true /* do lock */);
+}
+static VkResult entry_vkCreateSampler(
+    VkDevice device,
+    const VkSamplerCreateInfo* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkSampler* pSampler)
+{
+    AEMU_SCOPED_TRACE("vkCreateSampler");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateSampler_VkResult_return = (VkResult)0;
+    auto resources = ResourceTracker::get();
+    vkCreateSampler_VkResult_return = resources->on_vkCreateSampler(vkEnc, VK_SUCCESS, device, pCreateInfo, pAllocator, pSampler);
+    return vkCreateSampler_VkResult_return;
+}
+static void entry_vkDestroySampler(
+    VkDevice device,
+    VkSampler sampler,
+    const VkAllocationCallbacks* pAllocator)
+{
+    AEMU_SCOPED_TRACE("vkDestroySampler");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkDestroySampler(device, sampler, pAllocator, true /* do lock */);
+}
+static VkResult entry_vkCreateDescriptorSetLayout(
+    VkDevice device,
+    const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkDescriptorSetLayout* pSetLayout)
+{
+    AEMU_SCOPED_TRACE("vkCreateDescriptorSetLayout");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateDescriptorSetLayout_VkResult_return = (VkResult)0;
+    auto resources = ResourceTracker::get();
+    vkCreateDescriptorSetLayout_VkResult_return = resources->on_vkCreateDescriptorSetLayout(vkEnc, VK_SUCCESS, device, pCreateInfo, pAllocator, pSetLayout);
+    return vkCreateDescriptorSetLayout_VkResult_return;
+}
+static void entry_vkDestroyDescriptorSetLayout(
+    VkDevice device,
+    VkDescriptorSetLayout descriptorSetLayout,
+    const VkAllocationCallbacks* pAllocator)
+{
+    AEMU_SCOPED_TRACE("vkDestroyDescriptorSetLayout");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    auto resources = ResourceTracker::get();
+    resources->on_vkDestroyDescriptorSetLayout(vkEnc, device, descriptorSetLayout, pAllocator);
+}
+static VkResult entry_vkCreateDescriptorPool(
+    VkDevice device,
+    const VkDescriptorPoolCreateInfo* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkDescriptorPool* pDescriptorPool)
+{
+    AEMU_SCOPED_TRACE("vkCreateDescriptorPool");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateDescriptorPool_VkResult_return = (VkResult)0;
+    auto resources = ResourceTracker::get();
+    vkCreateDescriptorPool_VkResult_return = resources->on_vkCreateDescriptorPool(vkEnc, VK_SUCCESS, device, pCreateInfo, pAllocator, pDescriptorPool);
+    return vkCreateDescriptorPool_VkResult_return;
+}
+static void entry_vkDestroyDescriptorPool(
+    VkDevice device,
+    VkDescriptorPool descriptorPool,
+    const VkAllocationCallbacks* pAllocator)
+{
+    AEMU_SCOPED_TRACE("vkDestroyDescriptorPool");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    auto resources = ResourceTracker::get();
+    resources->on_vkDestroyDescriptorPool(vkEnc, device, descriptorPool, pAllocator);
+}
+static VkResult entry_vkResetDescriptorPool(
+    VkDevice device,
+    VkDescriptorPool descriptorPool,
+    VkDescriptorPoolResetFlags flags)
+{
+    AEMU_SCOPED_TRACE("vkResetDescriptorPool");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkResetDescriptorPool_VkResult_return = (VkResult)0;
+    auto resources = ResourceTracker::get();
+    vkResetDescriptorPool_VkResult_return = resources->on_vkResetDescriptorPool(vkEnc, VK_SUCCESS, device, descriptorPool, flags);
+    return vkResetDescriptorPool_VkResult_return;
+}
+static VkResult entry_vkAllocateDescriptorSets(
+    VkDevice device,
+    const VkDescriptorSetAllocateInfo* pAllocateInfo,
+    VkDescriptorSet* pDescriptorSets)
+{
+    AEMU_SCOPED_TRACE("vkAllocateDescriptorSets");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkAllocateDescriptorSets_VkResult_return = (VkResult)0;
+    auto resources = ResourceTracker::get();
+    vkAllocateDescriptorSets_VkResult_return = resources->on_vkAllocateDescriptorSets(vkEnc, VK_SUCCESS, device, pAllocateInfo, pDescriptorSets);
+    return vkAllocateDescriptorSets_VkResult_return;
+}
+static VkResult entry_vkFreeDescriptorSets(
+    VkDevice device,
+    VkDescriptorPool descriptorPool,
+    uint32_t descriptorSetCount,
+    const VkDescriptorSet* pDescriptorSets)
+{
+    AEMU_SCOPED_TRACE("vkFreeDescriptorSets");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkFreeDescriptorSets_VkResult_return = (VkResult)0;
+    auto resources = ResourceTracker::get();
+    vkFreeDescriptorSets_VkResult_return = resources->on_vkFreeDescriptorSets(vkEnc, VK_SUCCESS, device, descriptorPool, descriptorSetCount, pDescriptorSets);
+    return vkFreeDescriptorSets_VkResult_return;
+}
+static void entry_vkUpdateDescriptorSets(
+    VkDevice device,
+    uint32_t descriptorWriteCount,
+    const VkWriteDescriptorSet* pDescriptorWrites,
+    uint32_t descriptorCopyCount,
+    const VkCopyDescriptorSet* pDescriptorCopies)
+{
+    AEMU_SCOPED_TRACE("vkUpdateDescriptorSets");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    auto resources = ResourceTracker::get();
+    resources->on_vkUpdateDescriptorSets(vkEnc, device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies);
+}
+static VkResult entry_vkCreateFramebuffer(
+    VkDevice device,
+    const VkFramebufferCreateInfo* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkFramebuffer* pFramebuffer)
+{
+    AEMU_SCOPED_TRACE("vkCreateFramebuffer");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateFramebuffer_VkResult_return = (VkResult)0;
+    vkCreateFramebuffer_VkResult_return = vkEnc->vkCreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer, true /* do lock */);
+    return vkCreateFramebuffer_VkResult_return;
+}
+static void entry_vkDestroyFramebuffer(
+    VkDevice device,
+    VkFramebuffer framebuffer,
+    const VkAllocationCallbacks* pAllocator)
+{
+    AEMU_SCOPED_TRACE("vkDestroyFramebuffer");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkDestroyFramebuffer(device, framebuffer, pAllocator, true /* do lock */);
+}
+static VkResult entry_vkCreateRenderPass(
+    VkDevice device,
+    const VkRenderPassCreateInfo* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkRenderPass* pRenderPass)
+{
+    AEMU_SCOPED_TRACE("vkCreateRenderPass");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateRenderPass_VkResult_return = (VkResult)0;
+    vkCreateRenderPass_VkResult_return = vkEnc->vkCreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass, true /* do lock */);
+    return vkCreateRenderPass_VkResult_return;
+}
+static void entry_vkDestroyRenderPass(
+    VkDevice device,
+    VkRenderPass renderPass,
+    const VkAllocationCallbacks* pAllocator)
+{
+    AEMU_SCOPED_TRACE("vkDestroyRenderPass");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkDestroyRenderPass(device, renderPass, pAllocator, true /* do lock */);
+}
+static void entry_vkGetRenderAreaGranularity(
+    VkDevice device,
+    VkRenderPass renderPass,
+    VkExtent2D* pGranularity)
+{
+    AEMU_SCOPED_TRACE("vkGetRenderAreaGranularity");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetRenderAreaGranularity(device, renderPass, pGranularity, true /* do lock */);
+}
+static VkResult entry_vkCreateCommandPool(
+    VkDevice device,
+    const VkCommandPoolCreateInfo* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkCommandPool* pCommandPool)
+{
+    AEMU_SCOPED_TRACE("vkCreateCommandPool");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateCommandPool_VkResult_return = (VkResult)0;
+    vkCreateCommandPool_VkResult_return = vkEnc->vkCreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool, true /* do lock */);
+    return vkCreateCommandPool_VkResult_return;
+}
+static void entry_vkDestroyCommandPool(
+    VkDevice device,
+    VkCommandPool commandPool,
+    const VkAllocationCallbacks* pAllocator)
+{
+    AEMU_SCOPED_TRACE("vkDestroyCommandPool");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkDestroyCommandPool(device, commandPool, pAllocator, true /* do lock */);
+}
+static VkResult entry_vkResetCommandPool(
+    VkDevice device,
+    VkCommandPool commandPool,
+    VkCommandPoolResetFlags flags)
+{
+    AEMU_SCOPED_TRACE("vkResetCommandPool");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkResetCommandPool_VkResult_return = (VkResult)0;
+    vkResetCommandPool_VkResult_return = vkEnc->vkResetCommandPool(device, commandPool, flags, true /* do lock */);
+    if (vkResetCommandPool_VkResult_return == VK_SUCCESS) {
+        ResourceTracker::get()->resetCommandPoolStagingInfo(commandPool);
+    }
+    return vkResetCommandPool_VkResult_return;
+}
+static VkResult entry_vkAllocateCommandBuffers(
+    VkDevice device,
+    const VkCommandBufferAllocateInfo* pAllocateInfo,
+    VkCommandBuffer* pCommandBuffers)
+{
+    AEMU_SCOPED_TRACE("vkAllocateCommandBuffers");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkAllocateCommandBuffers_VkResult_return = (VkResult)0;
+    auto resources = ResourceTracker::get();
+    vkAllocateCommandBuffers_VkResult_return = resources->on_vkAllocateCommandBuffers(vkEnc, VK_SUCCESS, device, pAllocateInfo, pCommandBuffers);
+    if (vkAllocateCommandBuffers_VkResult_return == VK_SUCCESS) {
+        ResourceTracker::get()->addToCommandPool(pAllocateInfo->commandPool, pAllocateInfo->commandBufferCount, pCommandBuffers);
+    }
+    return vkAllocateCommandBuffers_VkResult_return;
+}
+static void entry_vkFreeCommandBuffers(
+    VkDevice device,
+    VkCommandPool commandPool,
+    uint32_t commandBufferCount,
+    const VkCommandBuffer* pCommandBuffers)
+{
+    AEMU_SCOPED_TRACE("vkFreeCommandBuffers");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkFreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers, true /* do lock */);
+}
+static VkResult entry_vkBeginCommandBuffer(
+    VkCommandBuffer commandBuffer,
+    const VkCommandBufferBeginInfo* pBeginInfo)
+{
+    AEMU_SCOPED_TRACE("vkBeginCommandBuffer");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    VkResult vkBeginCommandBuffer_VkResult_return = (VkResult)0;
+    auto resources = ResourceTracker::get();
+    vkBeginCommandBuffer_VkResult_return = resources->on_vkBeginCommandBuffer(vkEnc, VK_SUCCESS, commandBuffer, pBeginInfo);
+    return vkBeginCommandBuffer_VkResult_return;
+}
+static VkResult entry_vkEndCommandBuffer(
+    VkCommandBuffer commandBuffer)
+{
+    AEMU_SCOPED_TRACE("vkEndCommandBuffer");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    VkResult vkEndCommandBuffer_VkResult_return = (VkResult)0;
+    auto resources = ResourceTracker::get();
+    vkEndCommandBuffer_VkResult_return = resources->on_vkEndCommandBuffer(vkEnc, VK_SUCCESS, commandBuffer);
+    return vkEndCommandBuffer_VkResult_return;
+}
+static VkResult entry_vkResetCommandBuffer(
+    VkCommandBuffer commandBuffer,
+    VkCommandBufferResetFlags flags)
+{
+    AEMU_SCOPED_TRACE("vkResetCommandBuffer");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    VkResult vkResetCommandBuffer_VkResult_return = (VkResult)0;
+    auto resources = ResourceTracker::get();
+    vkResetCommandBuffer_VkResult_return = resources->on_vkResetCommandBuffer(vkEnc, VK_SUCCESS, commandBuffer, flags);
+    return vkResetCommandBuffer_VkResult_return;
+}
+static void entry_vkCmdBindPipeline(
+    VkCommandBuffer commandBuffer,
+    VkPipelineBindPoint pipelineBindPoint,
+    VkPipeline pipeline)
+{
+    AEMU_SCOPED_TRACE("vkCmdBindPipeline");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline, true /* do lock */);
+}
+static void entry_vkCmdSetViewport(
+    VkCommandBuffer commandBuffer,
+    uint32_t firstViewport,
+    uint32_t viewportCount,
+    const VkViewport* pViewports)
+{
+    AEMU_SCOPED_TRACE("vkCmdSetViewport");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports, true /* do lock */);
+}
+static void entry_vkCmdSetScissor(
+    VkCommandBuffer commandBuffer,
+    uint32_t firstScissor,
+    uint32_t scissorCount,
+    const VkRect2D* pScissors)
+{
+    AEMU_SCOPED_TRACE("vkCmdSetScissor");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors, true /* do lock */);
+}
+static void entry_vkCmdSetLineWidth(
+    VkCommandBuffer commandBuffer,
+    float lineWidth)
+{
+    AEMU_SCOPED_TRACE("vkCmdSetLineWidth");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdSetLineWidth(commandBuffer, lineWidth, true /* do lock */);
+}
+static void entry_vkCmdSetDepthBias(
+    VkCommandBuffer commandBuffer,
+    float depthBiasConstantFactor,
+    float depthBiasClamp,
+    float depthBiasSlopeFactor)
+{
+    AEMU_SCOPED_TRACE("vkCmdSetDepthBias");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp, depthBiasSlopeFactor, true /* do lock */);
+}
+static void entry_vkCmdSetBlendConstants(
+    VkCommandBuffer commandBuffer,
+    const float blendConstants[4])
+{
+    AEMU_SCOPED_TRACE("vkCmdSetBlendConstants");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdSetBlendConstants(commandBuffer, blendConstants, true /* do lock */);
+}
+static void entry_vkCmdSetDepthBounds(
+    VkCommandBuffer commandBuffer,
+    float minDepthBounds,
+    float maxDepthBounds)
+{
+    AEMU_SCOPED_TRACE("vkCmdSetDepthBounds");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds, true /* do lock */);
+}
+static void entry_vkCmdSetStencilCompareMask(
+    VkCommandBuffer commandBuffer,
+    VkStencilFaceFlags faceMask,
+    uint32_t compareMask)
+{
+    AEMU_SCOPED_TRACE("vkCmdSetStencilCompareMask");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdSetStencilCompareMask(commandBuffer, faceMask, compareMask, true /* do lock */);
+}
+static void entry_vkCmdSetStencilWriteMask(
+    VkCommandBuffer commandBuffer,
+    VkStencilFaceFlags faceMask,
+    uint32_t writeMask)
+{
+    AEMU_SCOPED_TRACE("vkCmdSetStencilWriteMask");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdSetStencilWriteMask(commandBuffer, faceMask, writeMask, true /* do lock */);
+}
+static void entry_vkCmdSetStencilReference(
+    VkCommandBuffer commandBuffer,
+    VkStencilFaceFlags faceMask,
+    uint32_t reference)
+{
+    AEMU_SCOPED_TRACE("vkCmdSetStencilReference");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdSetStencilReference(commandBuffer, faceMask, reference, true /* do lock */);
+}
+static void entry_vkCmdBindDescriptorSets(
+    VkCommandBuffer commandBuffer,
+    VkPipelineBindPoint pipelineBindPoint,
+    VkPipelineLayout layout,
+    uint32_t firstSet,
+    uint32_t descriptorSetCount,
+    const VkDescriptorSet* pDescriptorSets,
+    uint32_t dynamicOffsetCount,
+    const uint32_t* pDynamicOffsets)
+{
+    AEMU_SCOPED_TRACE("vkCmdBindDescriptorSets");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    auto resources = ResourceTracker::get();
+    resources->on_vkCmdBindDescriptorSets(vkEnc, commandBuffer, pipelineBindPoint, layout, firstSet, descriptorSetCount, pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
+}
+static void entry_vkCmdBindIndexBuffer(
+    VkCommandBuffer commandBuffer,
+    VkBuffer buffer,
+    VkDeviceSize offset,
+    VkIndexType indexType)
+{
+    AEMU_SCOPED_TRACE("vkCmdBindIndexBuffer");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdBindIndexBuffer(commandBuffer, buffer, offset, indexType, true /* do lock */);
+}
+static void entry_vkCmdBindVertexBuffers(
+    VkCommandBuffer commandBuffer,
+    uint32_t firstBinding,
+    uint32_t bindingCount,
+    const VkBuffer* pBuffers,
+    const VkDeviceSize* pOffsets)
+{
+    AEMU_SCOPED_TRACE("vkCmdBindVertexBuffers");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets, true /* do lock */);
+}
+static void entry_vkCmdDraw(
+    VkCommandBuffer commandBuffer,
+    uint32_t vertexCount,
+    uint32_t instanceCount,
+    uint32_t firstVertex,
+    uint32_t firstInstance)
+{
+    AEMU_SCOPED_TRACE("vkCmdDraw");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance, true /* do lock */);
+}
+static void entry_vkCmdDrawIndexed(
+    VkCommandBuffer commandBuffer,
+    uint32_t indexCount,
+    uint32_t instanceCount,
+    uint32_t firstIndex,
+    int32_t vertexOffset,
+    uint32_t firstInstance)
+{
+    AEMU_SCOPED_TRACE("vkCmdDrawIndexed");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance, true /* do lock */);
+}
+static void entry_vkCmdDrawIndirect(
+    VkCommandBuffer commandBuffer,
+    VkBuffer buffer,
+    VkDeviceSize offset,
+    uint32_t drawCount,
+    uint32_t stride)
+{
+    AEMU_SCOPED_TRACE("vkCmdDrawIndirect");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdDrawIndirect(commandBuffer, buffer, offset, drawCount, stride, true /* do lock */);
+}
+static void entry_vkCmdDrawIndexedIndirect(
+    VkCommandBuffer commandBuffer,
+    VkBuffer buffer,
+    VkDeviceSize offset,
+    uint32_t drawCount,
+    uint32_t stride)
+{
+    AEMU_SCOPED_TRACE("vkCmdDrawIndexedIndirect");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdDrawIndexedIndirect(commandBuffer, buffer, offset, drawCount, stride, true /* do lock */);
+}
+static void entry_vkCmdDispatch(
+    VkCommandBuffer commandBuffer,
+    uint32_t groupCountX,
+    uint32_t groupCountY,
+    uint32_t groupCountZ)
+{
+    AEMU_SCOPED_TRACE("vkCmdDispatch");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdDispatch(commandBuffer, groupCountX, groupCountY, groupCountZ, true /* do lock */);
+}
+static void entry_vkCmdDispatchIndirect(
+    VkCommandBuffer commandBuffer,
+    VkBuffer buffer,
+    VkDeviceSize offset)
+{
+    AEMU_SCOPED_TRACE("vkCmdDispatchIndirect");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdDispatchIndirect(commandBuffer, buffer, offset, true /* do lock */);
+}
+static void entry_vkCmdCopyBuffer(
+    VkCommandBuffer commandBuffer,
+    VkBuffer srcBuffer,
+    VkBuffer dstBuffer,
+    uint32_t regionCount,
+    const VkBufferCopy* pRegions)
+{
+    AEMU_SCOPED_TRACE("vkCmdCopyBuffer");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions, true /* do lock */);
+}
+static void entry_vkCmdCopyImage(
+    VkCommandBuffer commandBuffer,
+    VkImage srcImage,
+    VkImageLayout srcImageLayout,
+    VkImage dstImage,
+    VkImageLayout dstImageLayout,
+    uint32_t regionCount,
+    const VkImageCopy* pRegions)
+{
+    AEMU_SCOPED_TRACE("vkCmdCopyImage");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions, true /* do lock */);
+}
+static void entry_vkCmdBlitImage(
+    VkCommandBuffer commandBuffer,
+    VkImage srcImage,
+    VkImageLayout srcImageLayout,
+    VkImage dstImage,
+    VkImageLayout dstImageLayout,
+    uint32_t regionCount,
+    const VkImageBlit* pRegions,
+    VkFilter filter)
+{
+    AEMU_SCOPED_TRACE("vkCmdBlitImage");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions, filter, true /* do lock */);
+}
+static void entry_vkCmdCopyBufferToImage(
+    VkCommandBuffer commandBuffer,
+    VkBuffer srcBuffer,
+    VkImage dstImage,
+    VkImageLayout dstImageLayout,
+    uint32_t regionCount,
+    const VkBufferImageCopy* pRegions)
+{
+    AEMU_SCOPED_TRACE("vkCmdCopyBufferToImage");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions, true /* do lock */);
+}
+static void entry_vkCmdCopyImageToBuffer(
+    VkCommandBuffer commandBuffer,
+    VkImage srcImage,
+    VkImageLayout srcImageLayout,
+    VkBuffer dstBuffer,
+    uint32_t regionCount,
+    const VkBufferImageCopy* pRegions)
+{
+    AEMU_SCOPED_TRACE("vkCmdCopyImageToBuffer");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions, true /* do lock */);
+}
+static void entry_vkCmdUpdateBuffer(
+    VkCommandBuffer commandBuffer,
+    VkBuffer dstBuffer,
+    VkDeviceSize dstOffset,
+    VkDeviceSize dataSize,
+    const void* pData)
+{
+    AEMU_SCOPED_TRACE("vkCmdUpdateBuffer");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData, true /* do lock */);
+}
+static void entry_vkCmdFillBuffer(
+    VkCommandBuffer commandBuffer,
+    VkBuffer dstBuffer,
+    VkDeviceSize dstOffset,
+    VkDeviceSize size,
+    uint32_t data)
+{
+    AEMU_SCOPED_TRACE("vkCmdFillBuffer");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data, true /* do lock */);
+}
+static void entry_vkCmdClearColorImage(
+    VkCommandBuffer commandBuffer,
+    VkImage image,
+    VkImageLayout imageLayout,
+    const VkClearColorValue* pColor,
+    uint32_t rangeCount,
+    const VkImageSubresourceRange* pRanges)
+{
+    AEMU_SCOPED_TRACE("vkCmdClearColorImage");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges, true /* do lock */);
+}
+static void entry_vkCmdClearDepthStencilImage(
+    VkCommandBuffer commandBuffer,
+    VkImage image,
+    VkImageLayout imageLayout,
+    const VkClearDepthStencilValue* pDepthStencil,
+    uint32_t rangeCount,
+    const VkImageSubresourceRange* pRanges)
+{
+    AEMU_SCOPED_TRACE("vkCmdClearDepthStencilImage");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges, true /* do lock */);
+}
+static void entry_vkCmdClearAttachments(
+    VkCommandBuffer commandBuffer,
+    uint32_t attachmentCount,
+    const VkClearAttachment* pAttachments,
+    uint32_t rectCount,
+    const VkClearRect* pRects)
+{
+    AEMU_SCOPED_TRACE("vkCmdClearAttachments");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects, true /* do lock */);
+}
+static void entry_vkCmdResolveImage(
+    VkCommandBuffer commandBuffer,
+    VkImage srcImage,
+    VkImageLayout srcImageLayout,
+    VkImage dstImage,
+    VkImageLayout dstImageLayout,
+    uint32_t regionCount,
+    const VkImageResolve* pRegions)
+{
+    AEMU_SCOPED_TRACE("vkCmdResolveImage");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions, true /* do lock */);
+}
+static void entry_vkCmdSetEvent(
+    VkCommandBuffer commandBuffer,
+    VkEvent event,
+    VkPipelineStageFlags stageMask)
+{
+    AEMU_SCOPED_TRACE("vkCmdSetEvent");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdSetEvent(commandBuffer, event, stageMask, true /* do lock */);
+}
+static void entry_vkCmdResetEvent(
+    VkCommandBuffer commandBuffer,
+    VkEvent event,
+    VkPipelineStageFlags stageMask)
+{
+    AEMU_SCOPED_TRACE("vkCmdResetEvent");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdResetEvent(commandBuffer, event, stageMask, true /* do lock */);
+}
+static void entry_vkCmdWaitEvents(
+    VkCommandBuffer commandBuffer,
+    uint32_t eventCount,
+    const VkEvent* pEvents,
+    VkPipelineStageFlags srcStageMask,
+    VkPipelineStageFlags dstStageMask,
+    uint32_t memoryBarrierCount,
+    const VkMemoryBarrier* pMemoryBarriers,
+    uint32_t bufferMemoryBarrierCount,
+    const VkBufferMemoryBarrier* pBufferMemoryBarriers,
+    uint32_t imageMemoryBarrierCount,
+    const VkImageMemoryBarrier* pImageMemoryBarriers)
+{
+    AEMU_SCOPED_TRACE("vkCmdWaitEvents");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdWaitEvents(commandBuffer, eventCount, pEvents, srcStageMask, dstStageMask, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers, true /* do lock */);
+}
+static void entry_vkCmdPipelineBarrier(
+    VkCommandBuffer commandBuffer,
+    VkPipelineStageFlags srcStageMask,
+    VkPipelineStageFlags dstStageMask,
+    VkDependencyFlags dependencyFlags,
+    uint32_t memoryBarrierCount,
+    const VkMemoryBarrier* pMemoryBarriers,
+    uint32_t bufferMemoryBarrierCount,
+    const VkBufferMemoryBarrier* pBufferMemoryBarriers,
+    uint32_t imageMemoryBarrierCount,
+    const VkImageMemoryBarrier* pImageMemoryBarriers)
+{
+    AEMU_SCOPED_TRACE("vkCmdPipelineBarrier");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers, true /* do lock */);
+}
+static void entry_vkCmdBeginQuery(
+    VkCommandBuffer commandBuffer,
+    VkQueryPool queryPool,
+    uint32_t query,
+    VkQueryControlFlags flags)
+{
+    AEMU_SCOPED_TRACE("vkCmdBeginQuery");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdBeginQuery(commandBuffer, queryPool, query, flags, true /* do lock */);
+}
+static void entry_vkCmdEndQuery(
+    VkCommandBuffer commandBuffer,
+    VkQueryPool queryPool,
+    uint32_t query)
+{
+    AEMU_SCOPED_TRACE("vkCmdEndQuery");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdEndQuery(commandBuffer, queryPool, query, true /* do lock */);
+}
+static void entry_vkCmdResetQueryPool(
+    VkCommandBuffer commandBuffer,
+    VkQueryPool queryPool,
+    uint32_t firstQuery,
+    uint32_t queryCount)
+{
+    AEMU_SCOPED_TRACE("vkCmdResetQueryPool");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount, true /* do lock */);
+}
+static void entry_vkCmdWriteTimestamp(
+    VkCommandBuffer commandBuffer,
+    VkPipelineStageFlagBits pipelineStage,
+    VkQueryPool queryPool,
+    uint32_t query)
+{
+    AEMU_SCOPED_TRACE("vkCmdWriteTimestamp");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, query, true /* do lock */);
+}
+static void entry_vkCmdCopyQueryPoolResults(
+    VkCommandBuffer commandBuffer,
+    VkQueryPool queryPool,
+    uint32_t firstQuery,
+    uint32_t queryCount,
+    VkBuffer dstBuffer,
+    VkDeviceSize dstOffset,
+    VkDeviceSize stride,
+    VkQueryResultFlags flags)
+{
+    AEMU_SCOPED_TRACE("vkCmdCopyQueryPoolResults");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer, dstOffset, stride, flags, true /* do lock */);
+}
+static void entry_vkCmdPushConstants(
+    VkCommandBuffer commandBuffer,
+    VkPipelineLayout layout,
+    VkShaderStageFlags stageFlags,
+    uint32_t offset,
+    uint32_t size,
+    const void* pValues)
+{
+    AEMU_SCOPED_TRACE("vkCmdPushConstants");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues, true /* do lock */);
+}
+static void entry_vkCmdBeginRenderPass(
+    VkCommandBuffer commandBuffer,
+    const VkRenderPassBeginInfo* pRenderPassBegin,
+    VkSubpassContents contents)
+{
+    AEMU_SCOPED_TRACE("vkCmdBeginRenderPass");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents, true /* do lock */);
+}
+static void entry_vkCmdNextSubpass(
+    VkCommandBuffer commandBuffer,
+    VkSubpassContents contents)
+{
+    AEMU_SCOPED_TRACE("vkCmdNextSubpass");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdNextSubpass(commandBuffer, contents, true /* do lock */);
+}
+static void entry_vkCmdEndRenderPass(
+    VkCommandBuffer commandBuffer)
+{
+    AEMU_SCOPED_TRACE("vkCmdEndRenderPass");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdEndRenderPass(commandBuffer, true /* do lock */);
+}
+static void entry_vkCmdExecuteCommands(
+    VkCommandBuffer commandBuffer,
+    uint32_t commandBufferCount,
+    const VkCommandBuffer* pCommandBuffers)
+{
+    AEMU_SCOPED_TRACE("vkCmdExecuteCommands");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    auto resources = ResourceTracker::get();
+    resources->on_vkCmdExecuteCommands(vkEnc, commandBuffer, commandBufferCount, pCommandBuffers);
+}
+#endif
+#ifdef VK_VERSION_1_1
+static VkResult entry_vkEnumerateInstanceVersion(
+    uint32_t* pApiVersion)
+{
+    AEMU_SCOPED_TRACE("vkEnumerateInstanceVersion");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkEnumerateInstanceVersion_VkResult_return = (VkResult)0;
+    vkEnumerateInstanceVersion_VkResult_return = vkEnc->vkEnumerateInstanceVersion(pApiVersion, true /* do lock */);
+    return vkEnumerateInstanceVersion_VkResult_return;
+}
+static VkResult entry_vkBindBufferMemory2(
+    VkDevice device,
+    uint32_t bindInfoCount,
+    const VkBindBufferMemoryInfo* pBindInfos)
+{
+    AEMU_SCOPED_TRACE("vkBindBufferMemory2");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkBindBufferMemory2_VkResult_return = (VkResult)0;
+    auto resources = ResourceTracker::get();
+    vkBindBufferMemory2_VkResult_return = resources->on_vkBindBufferMemory2(vkEnc, VK_SUCCESS, device, bindInfoCount, pBindInfos);
+    return vkBindBufferMemory2_VkResult_return;
+}
+static VkResult dynCheck_entry_vkBindBufferMemory2(
+    VkDevice device,
+    uint32_t bindInfoCount,
+    const VkBindBufferMemoryInfo* pBindInfos)
+{
+    auto resources = ResourceTracker::get();
+    if (resources->getApiVersionFromDevice(device) < VK_API_VERSION_1_1)
+    {
+        sOnInvalidDynamicallyCheckedCall("vkBindBufferMemory2", "VK_VERSION_1_1");
+    }
+    AEMU_SCOPED_TRACE("vkBindBufferMemory2");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkBindBufferMemory2_VkResult_return = (VkResult)0;
+    vkBindBufferMemory2_VkResult_return = resources->on_vkBindBufferMemory2(vkEnc, VK_SUCCESS, device, bindInfoCount, pBindInfos);
+    return vkBindBufferMemory2_VkResult_return;
+}
+static VkResult entry_vkBindImageMemory2(
+    VkDevice device,
+    uint32_t bindInfoCount,
+    const VkBindImageMemoryInfo* pBindInfos)
+{
+    AEMU_SCOPED_TRACE("vkBindImageMemory2");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkBindImageMemory2_VkResult_return = (VkResult)0;
+    auto resources = ResourceTracker::get();
+    vkBindImageMemory2_VkResult_return = resources->on_vkBindImageMemory2(vkEnc, VK_SUCCESS, device, bindInfoCount, pBindInfos);
+    return vkBindImageMemory2_VkResult_return;
+}
+static VkResult dynCheck_entry_vkBindImageMemory2(
+    VkDevice device,
+    uint32_t bindInfoCount,
+    const VkBindImageMemoryInfo* pBindInfos)
+{
+    auto resources = ResourceTracker::get();
+    if (resources->getApiVersionFromDevice(device) < VK_API_VERSION_1_1)
+    {
+        sOnInvalidDynamicallyCheckedCall("vkBindImageMemory2", "VK_VERSION_1_1");
+    }
+    AEMU_SCOPED_TRACE("vkBindImageMemory2");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkBindImageMemory2_VkResult_return = (VkResult)0;
+    vkBindImageMemory2_VkResult_return = resources->on_vkBindImageMemory2(vkEnc, VK_SUCCESS, device, bindInfoCount, pBindInfos);
+    return vkBindImageMemory2_VkResult_return;
+}
+static void entry_vkGetDeviceGroupPeerMemoryFeatures(
+    VkDevice device,
+    uint32_t heapIndex,
+    uint32_t localDeviceIndex,
+    uint32_t remoteDeviceIndex,
+    VkPeerMemoryFeatureFlags* pPeerMemoryFeatures)
+{
+    AEMU_SCOPED_TRACE("vkGetDeviceGroupPeerMemoryFeatures");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetDeviceGroupPeerMemoryFeatures(device, heapIndex, localDeviceIndex, remoteDeviceIndex, pPeerMemoryFeatures, true /* do lock */);
+}
+static void dynCheck_entry_vkGetDeviceGroupPeerMemoryFeatures(
+    VkDevice device,
+    uint32_t heapIndex,
+    uint32_t localDeviceIndex,
+    uint32_t remoteDeviceIndex,
+    VkPeerMemoryFeatureFlags* pPeerMemoryFeatures)
+{
+    auto resources = ResourceTracker::get();
+    if (resources->getApiVersionFromDevice(device) < VK_API_VERSION_1_1)
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetDeviceGroupPeerMemoryFeatures", "VK_VERSION_1_1");
+    }
+    AEMU_SCOPED_TRACE("vkGetDeviceGroupPeerMemoryFeatures");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetDeviceGroupPeerMemoryFeatures(device, heapIndex, localDeviceIndex, remoteDeviceIndex, pPeerMemoryFeatures, true /* do lock */);
+}
+static void entry_vkCmdSetDeviceMask(
+    VkCommandBuffer commandBuffer,
+    uint32_t deviceMask)
+{
+    AEMU_SCOPED_TRACE("vkCmdSetDeviceMask");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdSetDeviceMask(commandBuffer, deviceMask, true /* do lock */);
+}
+static void entry_vkCmdDispatchBase(
+    VkCommandBuffer commandBuffer,
+    uint32_t baseGroupX,
+    uint32_t baseGroupY,
+    uint32_t baseGroupZ,
+    uint32_t groupCountX,
+    uint32_t groupCountY,
+    uint32_t groupCountZ)
+{
+    AEMU_SCOPED_TRACE("vkCmdDispatchBase");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdDispatchBase(commandBuffer, baseGroupX, baseGroupY, baseGroupZ, groupCountX, groupCountY, groupCountZ, true /* do lock */);
+}
+static VkResult entry_vkEnumeratePhysicalDeviceGroups(
+    VkInstance instance,
+    uint32_t* pPhysicalDeviceGroupCount,
+    VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties)
+{
+    AEMU_SCOPED_TRACE("vkEnumeratePhysicalDeviceGroups");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkEnumeratePhysicalDeviceGroups_VkResult_return = (VkResult)0;
+    vkEnumeratePhysicalDeviceGroups_VkResult_return = vkEnc->vkEnumeratePhysicalDeviceGroups(instance, pPhysicalDeviceGroupCount, pPhysicalDeviceGroupProperties, true /* do lock */);
+    return vkEnumeratePhysicalDeviceGroups_VkResult_return;
+}
+static void entry_vkGetImageMemoryRequirements2(
+    VkDevice device,
+    const VkImageMemoryRequirementsInfo2* pInfo,
+    VkMemoryRequirements2* pMemoryRequirements)
+{
+    AEMU_SCOPED_TRACE("vkGetImageMemoryRequirements2");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    auto resources = ResourceTracker::get();
+    resources->on_vkGetImageMemoryRequirements2(vkEnc, device, pInfo, pMemoryRequirements);
+}
+static void dynCheck_entry_vkGetImageMemoryRequirements2(
+    VkDevice device,
+    const VkImageMemoryRequirementsInfo2* pInfo,
+    VkMemoryRequirements2* pMemoryRequirements)
+{
+    auto resources = ResourceTracker::get();
+    if (resources->getApiVersionFromDevice(device) < VK_API_VERSION_1_1)
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetImageMemoryRequirements2", "VK_VERSION_1_1");
+    }
+    AEMU_SCOPED_TRACE("vkGetImageMemoryRequirements2");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    resources->on_vkGetImageMemoryRequirements2(vkEnc, device, pInfo, pMemoryRequirements);
+}
+static void entry_vkGetBufferMemoryRequirements2(
+    VkDevice device,
+    const VkBufferMemoryRequirementsInfo2* pInfo,
+    VkMemoryRequirements2* pMemoryRequirements)
+{
+    AEMU_SCOPED_TRACE("vkGetBufferMemoryRequirements2");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    auto resources = ResourceTracker::get();
+    resources->on_vkGetBufferMemoryRequirements2(vkEnc, device, pInfo, pMemoryRequirements);
+}
+static void dynCheck_entry_vkGetBufferMemoryRequirements2(
+    VkDevice device,
+    const VkBufferMemoryRequirementsInfo2* pInfo,
+    VkMemoryRequirements2* pMemoryRequirements)
+{
+    auto resources = ResourceTracker::get();
+    if (resources->getApiVersionFromDevice(device) < VK_API_VERSION_1_1)
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetBufferMemoryRequirements2", "VK_VERSION_1_1");
+    }
+    AEMU_SCOPED_TRACE("vkGetBufferMemoryRequirements2");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    resources->on_vkGetBufferMemoryRequirements2(vkEnc, device, pInfo, pMemoryRequirements);
+}
+static void entry_vkGetImageSparseMemoryRequirements2(
+    VkDevice device,
+    const VkImageSparseMemoryRequirementsInfo2* pInfo,
+    uint32_t* pSparseMemoryRequirementCount,
+    VkSparseImageMemoryRequirements2* pSparseMemoryRequirements)
+{
+    AEMU_SCOPED_TRACE("vkGetImageSparseMemoryRequirements2");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetImageSparseMemoryRequirements2(device, pInfo, pSparseMemoryRequirementCount, pSparseMemoryRequirements, true /* do lock */);
+}
+static void dynCheck_entry_vkGetImageSparseMemoryRequirements2(
+    VkDevice device,
+    const VkImageSparseMemoryRequirementsInfo2* pInfo,
+    uint32_t* pSparseMemoryRequirementCount,
+    VkSparseImageMemoryRequirements2* pSparseMemoryRequirements)
+{
+    auto resources = ResourceTracker::get();
+    if (resources->getApiVersionFromDevice(device) < VK_API_VERSION_1_1)
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetImageSparseMemoryRequirements2", "VK_VERSION_1_1");
+    }
+    AEMU_SCOPED_TRACE("vkGetImageSparseMemoryRequirements2");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetImageSparseMemoryRequirements2(device, pInfo, pSparseMemoryRequirementCount, pSparseMemoryRequirements, true /* do lock */);
+}
+static void entry_vkGetPhysicalDeviceFeatures2(
+    VkPhysicalDevice physicalDevice,
+    VkPhysicalDeviceFeatures2* pFeatures)
+{
+    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceFeatures2");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetPhysicalDeviceFeatures2(physicalDevice, pFeatures, true /* do lock */);
+}
+static void entry_vkGetPhysicalDeviceProperties2(
+    VkPhysicalDevice physicalDevice,
+    VkPhysicalDeviceProperties2* pProperties)
+{
+    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceProperties2");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetPhysicalDeviceProperties2(physicalDevice, pProperties, true /* do lock */);
+}
+static void entry_vkGetPhysicalDeviceFormatProperties2(
+    VkPhysicalDevice physicalDevice,
+    VkFormat format,
+    VkFormatProperties2* pFormatProperties)
+{
+    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceFormatProperties2");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetPhysicalDeviceFormatProperties2(physicalDevice, format, pFormatProperties, true /* do lock */);
+}
+static VkResult entry_vkGetPhysicalDeviceImageFormatProperties2(
+    VkPhysicalDevice physicalDevice,
+    const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo,
+    VkImageFormatProperties2* pImageFormatProperties)
+{
+    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceImageFormatProperties2");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetPhysicalDeviceImageFormatProperties2_VkResult_return = (VkResult)0;
+    auto resources = ResourceTracker::get();
+    vkGetPhysicalDeviceImageFormatProperties2_VkResult_return = resources->on_vkGetPhysicalDeviceImageFormatProperties2(vkEnc, VK_SUCCESS, physicalDevice, pImageFormatInfo, pImageFormatProperties);
+    return vkGetPhysicalDeviceImageFormatProperties2_VkResult_return;
+}
+static void entry_vkGetPhysicalDeviceQueueFamilyProperties2(
+    VkPhysicalDevice physicalDevice,
+    uint32_t* pQueueFamilyPropertyCount,
+    VkQueueFamilyProperties2* pQueueFamilyProperties)
+{
+    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceQueueFamilyProperties2");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetPhysicalDeviceQueueFamilyProperties2(physicalDevice, pQueueFamilyPropertyCount, pQueueFamilyProperties, true /* do lock */);
+}
+static void entry_vkGetPhysicalDeviceMemoryProperties2(
+    VkPhysicalDevice physicalDevice,
+    VkPhysicalDeviceMemoryProperties2* pMemoryProperties)
+{
+    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceMemoryProperties2");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetPhysicalDeviceMemoryProperties2(physicalDevice, pMemoryProperties, true /* do lock */);
+}
+static void entry_vkGetPhysicalDeviceSparseImageFormatProperties2(
+    VkPhysicalDevice physicalDevice,
+    const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo,
+    uint32_t* pPropertyCount,
+    VkSparseImageFormatProperties2* pProperties)
+{
+    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceSparseImageFormatProperties2");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetPhysicalDeviceSparseImageFormatProperties2(physicalDevice, pFormatInfo, pPropertyCount, pProperties, true /* do lock */);
+}
+static void entry_vkTrimCommandPool(
+    VkDevice device,
+    VkCommandPool commandPool,
+    VkCommandPoolTrimFlags flags)
+{
+    AEMU_SCOPED_TRACE("vkTrimCommandPool");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkTrimCommandPool(device, commandPool, flags, true /* do lock */);
+}
+static void dynCheck_entry_vkTrimCommandPool(
+    VkDevice device,
+    VkCommandPool commandPool,
+    VkCommandPoolTrimFlags flags)
+{
+    auto resources = ResourceTracker::get();
+    if (resources->getApiVersionFromDevice(device) < VK_API_VERSION_1_1)
+    {
+        sOnInvalidDynamicallyCheckedCall("vkTrimCommandPool", "VK_VERSION_1_1");
+    }
+    AEMU_SCOPED_TRACE("vkTrimCommandPool");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkTrimCommandPool(device, commandPool, flags, true /* do lock */);
+}
+static void entry_vkGetDeviceQueue2(
+    VkDevice device,
+    const VkDeviceQueueInfo2* pQueueInfo,
+    VkQueue* pQueue)
+{
+    AEMU_SCOPED_TRACE("vkGetDeviceQueue2");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetDeviceQueue2(device, pQueueInfo, pQueue, true /* do lock */);
+}
+static void dynCheck_entry_vkGetDeviceQueue2(
+    VkDevice device,
+    const VkDeviceQueueInfo2* pQueueInfo,
+    VkQueue* pQueue)
+{
+    auto resources = ResourceTracker::get();
+    if (resources->getApiVersionFromDevice(device) < VK_API_VERSION_1_1)
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetDeviceQueue2", "VK_VERSION_1_1");
+    }
+    AEMU_SCOPED_TRACE("vkGetDeviceQueue2");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetDeviceQueue2(device, pQueueInfo, pQueue, true /* do lock */);
+}
+static VkResult entry_vkCreateSamplerYcbcrConversion(
+    VkDevice device,
+    const VkSamplerYcbcrConversionCreateInfo* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkSamplerYcbcrConversion* pYcbcrConversion)
+{
+    AEMU_SCOPED_TRACE("vkCreateSamplerYcbcrConversion");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateSamplerYcbcrConversion_VkResult_return = (VkResult)0;
+    auto resources = ResourceTracker::get();
+    vkCreateSamplerYcbcrConversion_VkResult_return = resources->on_vkCreateSamplerYcbcrConversion(vkEnc, VK_SUCCESS, device, pCreateInfo, pAllocator, pYcbcrConversion);
+    return vkCreateSamplerYcbcrConversion_VkResult_return;
+}
+static VkResult dynCheck_entry_vkCreateSamplerYcbcrConversion(
+    VkDevice device,
+    const VkSamplerYcbcrConversionCreateInfo* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkSamplerYcbcrConversion* pYcbcrConversion)
+{
+    auto resources = ResourceTracker::get();
+    if (resources->getApiVersionFromDevice(device) < VK_API_VERSION_1_1)
+    {
+        sOnInvalidDynamicallyCheckedCall("vkCreateSamplerYcbcrConversion", "VK_VERSION_1_1");
+    }
+    AEMU_SCOPED_TRACE("vkCreateSamplerYcbcrConversion");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateSamplerYcbcrConversion_VkResult_return = (VkResult)0;
+    vkCreateSamplerYcbcrConversion_VkResult_return = resources->on_vkCreateSamplerYcbcrConversion(vkEnc, VK_SUCCESS, device, pCreateInfo, pAllocator, pYcbcrConversion);
+    return vkCreateSamplerYcbcrConversion_VkResult_return;
+}
+static void entry_vkDestroySamplerYcbcrConversion(
+    VkDevice device,
+    VkSamplerYcbcrConversion ycbcrConversion,
+    const VkAllocationCallbacks* pAllocator)
+{
+    AEMU_SCOPED_TRACE("vkDestroySamplerYcbcrConversion");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    auto resources = ResourceTracker::get();
+    resources->on_vkDestroySamplerYcbcrConversion(vkEnc, device, ycbcrConversion, pAllocator);
+}
+static void dynCheck_entry_vkDestroySamplerYcbcrConversion(
+    VkDevice device,
+    VkSamplerYcbcrConversion ycbcrConversion,
+    const VkAllocationCallbacks* pAllocator)
+{
+    auto resources = ResourceTracker::get();
+    if (resources->getApiVersionFromDevice(device) < VK_API_VERSION_1_1)
+    {
+        sOnInvalidDynamicallyCheckedCall("vkDestroySamplerYcbcrConversion", "VK_VERSION_1_1");
+    }
+    AEMU_SCOPED_TRACE("vkDestroySamplerYcbcrConversion");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    resources->on_vkDestroySamplerYcbcrConversion(vkEnc, device, ycbcrConversion, pAllocator);
+}
+static VkResult entry_vkCreateDescriptorUpdateTemplate(
+    VkDevice device,
+    const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate)
+{
+    AEMU_SCOPED_TRACE("vkCreateDescriptorUpdateTemplate");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateDescriptorUpdateTemplate_VkResult_return = (VkResult)0;
+    vkCreateDescriptorUpdateTemplate_VkResult_return = vkEnc->vkCreateDescriptorUpdateTemplate(device, pCreateInfo, pAllocator, pDescriptorUpdateTemplate, true /* do lock */);
+    return vkCreateDescriptorUpdateTemplate_VkResult_return;
+}
+static VkResult dynCheck_entry_vkCreateDescriptorUpdateTemplate(
+    VkDevice device,
+    const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate)
+{
+    auto resources = ResourceTracker::get();
+    if (resources->getApiVersionFromDevice(device) < VK_API_VERSION_1_1)
+    {
+        sOnInvalidDynamicallyCheckedCall("vkCreateDescriptorUpdateTemplate", "VK_VERSION_1_1");
+    }
+    AEMU_SCOPED_TRACE("vkCreateDescriptorUpdateTemplate");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateDescriptorUpdateTemplate_VkResult_return = (VkResult)0;
+    vkCreateDescriptorUpdateTemplate_VkResult_return = vkEnc->vkCreateDescriptorUpdateTemplate(device, pCreateInfo, pAllocator, pDescriptorUpdateTemplate, true /* do lock */);
+    return vkCreateDescriptorUpdateTemplate_VkResult_return;
+}
+static void entry_vkDestroyDescriptorUpdateTemplate(
+    VkDevice device,
+    VkDescriptorUpdateTemplate descriptorUpdateTemplate,
+    const VkAllocationCallbacks* pAllocator)
+{
+    AEMU_SCOPED_TRACE("vkDestroyDescriptorUpdateTemplate");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkDestroyDescriptorUpdateTemplate(device, descriptorUpdateTemplate, pAllocator, true /* do lock */);
+}
+static void dynCheck_entry_vkDestroyDescriptorUpdateTemplate(
+    VkDevice device,
+    VkDescriptorUpdateTemplate descriptorUpdateTemplate,
+    const VkAllocationCallbacks* pAllocator)
+{
+    auto resources = ResourceTracker::get();
+    if (resources->getApiVersionFromDevice(device) < VK_API_VERSION_1_1)
+    {
+        sOnInvalidDynamicallyCheckedCall("vkDestroyDescriptorUpdateTemplate", "VK_VERSION_1_1");
+    }
+    AEMU_SCOPED_TRACE("vkDestroyDescriptorUpdateTemplate");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkDestroyDescriptorUpdateTemplate(device, descriptorUpdateTemplate, pAllocator, true /* do lock */);
+}
+static void entry_vkUpdateDescriptorSetWithTemplate(
+    VkDevice device,
+    VkDescriptorSet descriptorSet,
+    VkDescriptorUpdateTemplate descriptorUpdateTemplate,
+    const void* pData)
+{
+    AEMU_SCOPED_TRACE("vkUpdateDescriptorSetWithTemplate");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    auto resources = ResourceTracker::get();
+    resources->on_vkUpdateDescriptorSetWithTemplate(vkEnc, device, descriptorSet, descriptorUpdateTemplate, pData);
+}
+static void dynCheck_entry_vkUpdateDescriptorSetWithTemplate(
+    VkDevice device,
+    VkDescriptorSet descriptorSet,
+    VkDescriptorUpdateTemplate descriptorUpdateTemplate,
+    const void* pData)
+{
+    auto resources = ResourceTracker::get();
+    if (resources->getApiVersionFromDevice(device) < VK_API_VERSION_1_1)
+    {
+        sOnInvalidDynamicallyCheckedCall("vkUpdateDescriptorSetWithTemplate", "VK_VERSION_1_1");
+    }
+    AEMU_SCOPED_TRACE("vkUpdateDescriptorSetWithTemplate");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    resources->on_vkUpdateDescriptorSetWithTemplate(vkEnc, device, descriptorSet, descriptorUpdateTemplate, pData);
+}
+static void entry_vkGetPhysicalDeviceExternalBufferProperties(
+    VkPhysicalDevice physicalDevice,
+    const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo,
+    VkExternalBufferProperties* pExternalBufferProperties)
+{
+    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceExternalBufferProperties");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetPhysicalDeviceExternalBufferProperties(physicalDevice, pExternalBufferInfo, pExternalBufferProperties, true /* do lock */);
+}
+static void entry_vkGetPhysicalDeviceExternalFenceProperties(
+    VkPhysicalDevice physicalDevice,
+    const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo,
+    VkExternalFenceProperties* pExternalFenceProperties)
+{
+    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceExternalFenceProperties");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    auto resources = ResourceTracker::get();
+    resources->on_vkGetPhysicalDeviceExternalFenceProperties(vkEnc, physicalDevice, pExternalFenceInfo, pExternalFenceProperties);
+}
+static void entry_vkGetPhysicalDeviceExternalSemaphoreProperties(
+    VkPhysicalDevice physicalDevice,
+    const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo,
+    VkExternalSemaphoreProperties* pExternalSemaphoreProperties)
+{
+    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceExternalSemaphoreProperties");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetPhysicalDeviceExternalSemaphoreProperties(physicalDevice, pExternalSemaphoreInfo, pExternalSemaphoreProperties, true /* do lock */);
+}
+static void entry_vkGetDescriptorSetLayoutSupport(
+    VkDevice device,
+    const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
+    VkDescriptorSetLayoutSupport* pSupport)
+{
+    AEMU_SCOPED_TRACE("vkGetDescriptorSetLayoutSupport");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetDescriptorSetLayoutSupport(device, pCreateInfo, pSupport, true /* do lock */);
+}
+static void dynCheck_entry_vkGetDescriptorSetLayoutSupport(
+    VkDevice device,
+    const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
+    VkDescriptorSetLayoutSupport* pSupport)
+{
+    auto resources = ResourceTracker::get();
+    if (resources->getApiVersionFromDevice(device) < VK_API_VERSION_1_1)
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetDescriptorSetLayoutSupport", "VK_VERSION_1_1");
+    }
+    AEMU_SCOPED_TRACE("vkGetDescriptorSetLayoutSupport");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetDescriptorSetLayoutSupport(device, pCreateInfo, pSupport, true /* do lock */);
+}
+#endif
+#ifdef VK_VERSION_1_2
+static void entry_vkCmdDrawIndirectCount(
+    VkCommandBuffer commandBuffer,
+    VkBuffer buffer,
+    VkDeviceSize offset,
+    VkBuffer countBuffer,
+    VkDeviceSize countBufferOffset,
+    uint32_t maxDrawCount,
+    uint32_t stride)
+{
+    AEMU_SCOPED_TRACE("vkCmdDrawIndirectCount");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdDrawIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride, true /* do lock */);
+}
+static void entry_vkCmdDrawIndexedIndirectCount(
+    VkCommandBuffer commandBuffer,
+    VkBuffer buffer,
+    VkDeviceSize offset,
+    VkBuffer countBuffer,
+    VkDeviceSize countBufferOffset,
+    uint32_t maxDrawCount,
+    uint32_t stride)
+{
+    AEMU_SCOPED_TRACE("vkCmdDrawIndexedIndirectCount");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdDrawIndexedIndirectCount(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride, true /* do lock */);
+}
+static VkResult entry_vkCreateRenderPass2(
+    VkDevice device,
+    const VkRenderPassCreateInfo2* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkRenderPass* pRenderPass)
+{
+    AEMU_SCOPED_TRACE("vkCreateRenderPass2");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateRenderPass2_VkResult_return = (VkResult)0;
+    vkCreateRenderPass2_VkResult_return = vkEnc->vkCreateRenderPass2(device, pCreateInfo, pAllocator, pRenderPass, true /* do lock */);
+    return vkCreateRenderPass2_VkResult_return;
+}
+static VkResult dynCheck_entry_vkCreateRenderPass2(
+    VkDevice device,
+    const VkRenderPassCreateInfo2* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkRenderPass* pRenderPass)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_VERSION_1_2"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkCreateRenderPass2", "VK_VERSION_1_2");
+    }
+    AEMU_SCOPED_TRACE("vkCreateRenderPass2");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateRenderPass2_VkResult_return = (VkResult)0;
+    vkCreateRenderPass2_VkResult_return = vkEnc->vkCreateRenderPass2(device, pCreateInfo, pAllocator, pRenderPass, true /* do lock */);
+    return vkCreateRenderPass2_VkResult_return;
+}
+static void entry_vkCmdBeginRenderPass2(
+    VkCommandBuffer commandBuffer,
+    const VkRenderPassBeginInfo* pRenderPassBegin,
+    const VkSubpassBeginInfo* pSubpassBeginInfo)
+{
+    AEMU_SCOPED_TRACE("vkCmdBeginRenderPass2");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdBeginRenderPass2(commandBuffer, pRenderPassBegin, pSubpassBeginInfo, true /* do lock */);
+}
+static void entry_vkCmdNextSubpass2(
+    VkCommandBuffer commandBuffer,
+    const VkSubpassBeginInfo* pSubpassBeginInfo,
+    const VkSubpassEndInfo* pSubpassEndInfo)
+{
+    AEMU_SCOPED_TRACE("vkCmdNextSubpass2");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdNextSubpass2(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo, true /* do lock */);
+}
+static void entry_vkCmdEndRenderPass2(
+    VkCommandBuffer commandBuffer,
+    const VkSubpassEndInfo* pSubpassEndInfo)
+{
+    AEMU_SCOPED_TRACE("vkCmdEndRenderPass2");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdEndRenderPass2(commandBuffer, pSubpassEndInfo, true /* do lock */);
+}
+static void entry_vkResetQueryPool(
+    VkDevice device,
+    VkQueryPool queryPool,
+    uint32_t firstQuery,
+    uint32_t queryCount)
+{
+    AEMU_SCOPED_TRACE("vkResetQueryPool");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkResetQueryPool(device, queryPool, firstQuery, queryCount, true /* do lock */);
+}
+static void dynCheck_entry_vkResetQueryPool(
+    VkDevice device,
+    VkQueryPool queryPool,
+    uint32_t firstQuery,
+    uint32_t queryCount)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_VERSION_1_2"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkResetQueryPool", "VK_VERSION_1_2");
+    }
+    AEMU_SCOPED_TRACE("vkResetQueryPool");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkResetQueryPool(device, queryPool, firstQuery, queryCount, true /* do lock */);
+}
+static VkResult entry_vkGetSemaphoreCounterValue(
+    VkDevice device,
+    VkSemaphore semaphore,
+    uint64_t* pValue)
+{
+    AEMU_SCOPED_TRACE("vkGetSemaphoreCounterValue");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetSemaphoreCounterValue_VkResult_return = (VkResult)0;
+    vkGetSemaphoreCounterValue_VkResult_return = vkEnc->vkGetSemaphoreCounterValue(device, semaphore, pValue, true /* do lock */);
+    return vkGetSemaphoreCounterValue_VkResult_return;
+}
+static VkResult dynCheck_entry_vkGetSemaphoreCounterValue(
+    VkDevice device,
+    VkSemaphore semaphore,
+    uint64_t* pValue)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_VERSION_1_2"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetSemaphoreCounterValue", "VK_VERSION_1_2");
+    }
+    AEMU_SCOPED_TRACE("vkGetSemaphoreCounterValue");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetSemaphoreCounterValue_VkResult_return = (VkResult)0;
+    vkGetSemaphoreCounterValue_VkResult_return = vkEnc->vkGetSemaphoreCounterValue(device, semaphore, pValue, true /* do lock */);
+    return vkGetSemaphoreCounterValue_VkResult_return;
+}
+static VkResult entry_vkWaitSemaphores(
+    VkDevice device,
+    const VkSemaphoreWaitInfo* pWaitInfo,
+    uint64_t timeout)
+{
+    AEMU_SCOPED_TRACE("vkWaitSemaphores");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkWaitSemaphores_VkResult_return = (VkResult)0;
+    vkWaitSemaphores_VkResult_return = vkEnc->vkWaitSemaphores(device, pWaitInfo, timeout, true /* do lock */);
+    return vkWaitSemaphores_VkResult_return;
+}
+static VkResult dynCheck_entry_vkWaitSemaphores(
+    VkDevice device,
+    const VkSemaphoreWaitInfo* pWaitInfo,
+    uint64_t timeout)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_VERSION_1_2"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkWaitSemaphores", "VK_VERSION_1_2");
+    }
+    AEMU_SCOPED_TRACE("vkWaitSemaphores");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkWaitSemaphores_VkResult_return = (VkResult)0;
+    vkWaitSemaphores_VkResult_return = vkEnc->vkWaitSemaphores(device, pWaitInfo, timeout, true /* do lock */);
+    return vkWaitSemaphores_VkResult_return;
+}
+static VkResult entry_vkSignalSemaphore(
+    VkDevice device,
+    const VkSemaphoreSignalInfo* pSignalInfo)
+{
+    AEMU_SCOPED_TRACE("vkSignalSemaphore");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkSignalSemaphore_VkResult_return = (VkResult)0;
+    vkSignalSemaphore_VkResult_return = vkEnc->vkSignalSemaphore(device, pSignalInfo, true /* do lock */);
+    return vkSignalSemaphore_VkResult_return;
+}
+static VkResult dynCheck_entry_vkSignalSemaphore(
+    VkDevice device,
+    const VkSemaphoreSignalInfo* pSignalInfo)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_VERSION_1_2"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkSignalSemaphore", "VK_VERSION_1_2");
+    }
+    AEMU_SCOPED_TRACE("vkSignalSemaphore");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkSignalSemaphore_VkResult_return = (VkResult)0;
+    vkSignalSemaphore_VkResult_return = vkEnc->vkSignalSemaphore(device, pSignalInfo, true /* do lock */);
+    return vkSignalSemaphore_VkResult_return;
+}
+static VkDeviceAddress entry_vkGetBufferDeviceAddress(
+    VkDevice device,
+    const VkBufferDeviceAddressInfo* pInfo)
+{
+    AEMU_SCOPED_TRACE("vkGetBufferDeviceAddress");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkDeviceAddress vkGetBufferDeviceAddress_VkDeviceAddress_return = (VkDeviceAddress)0;
+    vkGetBufferDeviceAddress_VkDeviceAddress_return = vkEnc->vkGetBufferDeviceAddress(device, pInfo, true /* do lock */);
+    return vkGetBufferDeviceAddress_VkDeviceAddress_return;
+}
+static VkDeviceAddress dynCheck_entry_vkGetBufferDeviceAddress(
+    VkDevice device,
+    const VkBufferDeviceAddressInfo* pInfo)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_VERSION_1_2"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetBufferDeviceAddress", "VK_VERSION_1_2");
+    }
+    AEMU_SCOPED_TRACE("vkGetBufferDeviceAddress");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkDeviceAddress vkGetBufferDeviceAddress_VkDeviceAddress_return = (VkDeviceAddress)0;
+    vkGetBufferDeviceAddress_VkDeviceAddress_return = vkEnc->vkGetBufferDeviceAddress(device, pInfo, true /* do lock */);
+    return vkGetBufferDeviceAddress_VkDeviceAddress_return;
+}
+static uint64_t entry_vkGetBufferOpaqueCaptureAddress(
+    VkDevice device,
+    const VkBufferDeviceAddressInfo* pInfo)
+{
+    AEMU_SCOPED_TRACE("vkGetBufferOpaqueCaptureAddress");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    uint64_t vkGetBufferOpaqueCaptureAddress_uint64_t_return = (uint64_t)0;
+    vkGetBufferOpaqueCaptureAddress_uint64_t_return = vkEnc->vkGetBufferOpaqueCaptureAddress(device, pInfo, true /* do lock */);
+    return vkGetBufferOpaqueCaptureAddress_uint64_t_return;
+}
+static uint64_t dynCheck_entry_vkGetBufferOpaqueCaptureAddress(
+    VkDevice device,
+    const VkBufferDeviceAddressInfo* pInfo)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_VERSION_1_2"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetBufferOpaqueCaptureAddress", "VK_VERSION_1_2");
+    }
+    AEMU_SCOPED_TRACE("vkGetBufferOpaqueCaptureAddress");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    uint64_t vkGetBufferOpaqueCaptureAddress_uint64_t_return = (uint64_t)0;
+    vkGetBufferOpaqueCaptureAddress_uint64_t_return = vkEnc->vkGetBufferOpaqueCaptureAddress(device, pInfo, true /* do lock */);
+    return vkGetBufferOpaqueCaptureAddress_uint64_t_return;
+}
+static uint64_t entry_vkGetDeviceMemoryOpaqueCaptureAddress(
+    VkDevice device,
+    const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo)
+{
+    AEMU_SCOPED_TRACE("vkGetDeviceMemoryOpaqueCaptureAddress");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    uint64_t vkGetDeviceMemoryOpaqueCaptureAddress_uint64_t_return = (uint64_t)0;
+    vkGetDeviceMemoryOpaqueCaptureAddress_uint64_t_return = vkEnc->vkGetDeviceMemoryOpaqueCaptureAddress(device, pInfo, true /* do lock */);
+    return vkGetDeviceMemoryOpaqueCaptureAddress_uint64_t_return;
+}
+static uint64_t dynCheck_entry_vkGetDeviceMemoryOpaqueCaptureAddress(
+    VkDevice device,
+    const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_VERSION_1_2"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetDeviceMemoryOpaqueCaptureAddress", "VK_VERSION_1_2");
+    }
+    AEMU_SCOPED_TRACE("vkGetDeviceMemoryOpaqueCaptureAddress");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    uint64_t vkGetDeviceMemoryOpaqueCaptureAddress_uint64_t_return = (uint64_t)0;
+    vkGetDeviceMemoryOpaqueCaptureAddress_uint64_t_return = vkEnc->vkGetDeviceMemoryOpaqueCaptureAddress(device, pInfo, true /* do lock */);
+    return vkGetDeviceMemoryOpaqueCaptureAddress_uint64_t_return;
+}
+#endif
+#ifdef VK_KHR_surface
+static void entry_vkDestroySurfaceKHR(
+    VkInstance instance,
+    VkSurfaceKHR surface,
+    const VkAllocationCallbacks* pAllocator)
+{
+    AEMU_SCOPED_TRACE("vkDestroySurfaceKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkDestroySurfaceKHR(instance, surface, pAllocator, true /* do lock */);
+}
+static VkResult entry_vkGetPhysicalDeviceSurfaceSupportKHR(
+    VkPhysicalDevice physicalDevice,
+    uint32_t queueFamilyIndex,
+    VkSurfaceKHR surface,
+    VkBool32* pSupported)
+{
+    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceSurfaceSupportKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetPhysicalDeviceSurfaceSupportKHR_VkResult_return = (VkResult)0;
+    vkGetPhysicalDeviceSurfaceSupportKHR_VkResult_return = vkEnc->vkGetPhysicalDeviceSurfaceSupportKHR(physicalDevice, queueFamilyIndex, surface, pSupported, true /* do lock */);
+    return vkGetPhysicalDeviceSurfaceSupportKHR_VkResult_return;
+}
+static VkResult entry_vkGetPhysicalDeviceSurfaceCapabilitiesKHR(
+    VkPhysicalDevice physicalDevice,
+    VkSurfaceKHR surface,
+    VkSurfaceCapabilitiesKHR* pSurfaceCapabilities)
+{
+    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceSurfaceCapabilitiesKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetPhysicalDeviceSurfaceCapabilitiesKHR_VkResult_return = (VkResult)0;
+    vkGetPhysicalDeviceSurfaceCapabilitiesKHR_VkResult_return = vkEnc->vkGetPhysicalDeviceSurfaceCapabilitiesKHR(physicalDevice, surface, pSurfaceCapabilities, true /* do lock */);
+    return vkGetPhysicalDeviceSurfaceCapabilitiesKHR_VkResult_return;
+}
+static VkResult entry_vkGetPhysicalDeviceSurfaceFormatsKHR(
+    VkPhysicalDevice physicalDevice,
+    VkSurfaceKHR surface,
+    uint32_t* pSurfaceFormatCount,
+    VkSurfaceFormatKHR* pSurfaceFormats)
+{
+    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceSurfaceFormatsKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetPhysicalDeviceSurfaceFormatsKHR_VkResult_return = (VkResult)0;
+    vkGetPhysicalDeviceSurfaceFormatsKHR_VkResult_return = vkEnc->vkGetPhysicalDeviceSurfaceFormatsKHR(physicalDevice, surface, pSurfaceFormatCount, pSurfaceFormats, true /* do lock */);
+    return vkGetPhysicalDeviceSurfaceFormatsKHR_VkResult_return;
+}
+static VkResult entry_vkGetPhysicalDeviceSurfacePresentModesKHR(
+    VkPhysicalDevice physicalDevice,
+    VkSurfaceKHR surface,
+    uint32_t* pPresentModeCount,
+    VkPresentModeKHR* pPresentModes)
+{
+    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceSurfacePresentModesKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetPhysicalDeviceSurfacePresentModesKHR_VkResult_return = (VkResult)0;
+    vkGetPhysicalDeviceSurfacePresentModesKHR_VkResult_return = vkEnc->vkGetPhysicalDeviceSurfacePresentModesKHR(physicalDevice, surface, pPresentModeCount, pPresentModes, true /* do lock */);
+    return vkGetPhysicalDeviceSurfacePresentModesKHR_VkResult_return;
+}
+#endif
+#ifdef VK_KHR_swapchain
+static VkResult entry_vkCreateSwapchainKHR(
+    VkDevice device,
+    const VkSwapchainCreateInfoKHR* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkSwapchainKHR* pSwapchain)
+{
+    AEMU_SCOPED_TRACE("vkCreateSwapchainKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateSwapchainKHR_VkResult_return = (VkResult)0;
+    vkCreateSwapchainKHR_VkResult_return = vkEnc->vkCreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain, true /* do lock */);
+    return vkCreateSwapchainKHR_VkResult_return;
+}
+static VkResult dynCheck_entry_vkCreateSwapchainKHR(
+    VkDevice device,
+    const VkSwapchainCreateInfoKHR* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkSwapchainKHR* pSwapchain)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_swapchain"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkCreateSwapchainKHR", "VK_KHR_swapchain");
+    }
+    AEMU_SCOPED_TRACE("vkCreateSwapchainKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateSwapchainKHR_VkResult_return = (VkResult)0;
+    vkCreateSwapchainKHR_VkResult_return = vkEnc->vkCreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain, true /* do lock */);
+    return vkCreateSwapchainKHR_VkResult_return;
+}
+static void entry_vkDestroySwapchainKHR(
+    VkDevice device,
+    VkSwapchainKHR swapchain,
+    const VkAllocationCallbacks* pAllocator)
+{
+    AEMU_SCOPED_TRACE("vkDestroySwapchainKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkDestroySwapchainKHR(device, swapchain, pAllocator, true /* do lock */);
+}
+static void dynCheck_entry_vkDestroySwapchainKHR(
+    VkDevice device,
+    VkSwapchainKHR swapchain,
+    const VkAllocationCallbacks* pAllocator)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_swapchain"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkDestroySwapchainKHR", "VK_KHR_swapchain");
+    }
+    AEMU_SCOPED_TRACE("vkDestroySwapchainKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkDestroySwapchainKHR(device, swapchain, pAllocator, true /* do lock */);
+}
+static VkResult entry_vkGetSwapchainImagesKHR(
+    VkDevice device,
+    VkSwapchainKHR swapchain,
+    uint32_t* pSwapchainImageCount,
+    VkImage* pSwapchainImages)
+{
+    AEMU_SCOPED_TRACE("vkGetSwapchainImagesKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetSwapchainImagesKHR_VkResult_return = (VkResult)0;
+    vkGetSwapchainImagesKHR_VkResult_return = vkEnc->vkGetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages, true /* do lock */);
+    return vkGetSwapchainImagesKHR_VkResult_return;
+}
+static VkResult dynCheck_entry_vkGetSwapchainImagesKHR(
+    VkDevice device,
+    VkSwapchainKHR swapchain,
+    uint32_t* pSwapchainImageCount,
+    VkImage* pSwapchainImages)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_swapchain"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetSwapchainImagesKHR", "VK_KHR_swapchain");
+    }
+    AEMU_SCOPED_TRACE("vkGetSwapchainImagesKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetSwapchainImagesKHR_VkResult_return = (VkResult)0;
+    vkGetSwapchainImagesKHR_VkResult_return = vkEnc->vkGetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages, true /* do lock */);
+    return vkGetSwapchainImagesKHR_VkResult_return;
+}
+static VkResult entry_vkAcquireNextImageKHR(
+    VkDevice device,
+    VkSwapchainKHR swapchain,
+    uint64_t timeout,
+    VkSemaphore semaphore,
+    VkFence fence,
+    uint32_t* pImageIndex)
+{
+    AEMU_SCOPED_TRACE("vkAcquireNextImageKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkAcquireNextImageKHR_VkResult_return = (VkResult)0;
+    vkAcquireNextImageKHR_VkResult_return = vkEnc->vkAcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex, true /* do lock */);
+    return vkAcquireNextImageKHR_VkResult_return;
+}
+static VkResult dynCheck_entry_vkAcquireNextImageKHR(
+    VkDevice device,
+    VkSwapchainKHR swapchain,
+    uint64_t timeout,
+    VkSemaphore semaphore,
+    VkFence fence,
+    uint32_t* pImageIndex)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_swapchain"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkAcquireNextImageKHR", "VK_KHR_swapchain");
+    }
+    AEMU_SCOPED_TRACE("vkAcquireNextImageKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkAcquireNextImageKHR_VkResult_return = (VkResult)0;
+    vkAcquireNextImageKHR_VkResult_return = vkEnc->vkAcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex, true /* do lock */);
+    return vkAcquireNextImageKHR_VkResult_return;
+}
+static VkResult entry_vkQueuePresentKHR(
+    VkQueue queue,
+    const VkPresentInfoKHR* pPresentInfo)
+{
+    AEMU_SCOPED_TRACE("vkQueuePresentKHR");
+    auto vkEnc = ResourceTracker::getQueueEncoder(queue);
+    VkResult vkQueuePresentKHR_VkResult_return = (VkResult)0;
+    vkQueuePresentKHR_VkResult_return = vkEnc->vkQueuePresentKHR(queue, pPresentInfo, true /* do lock */);
+    return vkQueuePresentKHR_VkResult_return;
+}
+static VkResult entry_vkGetDeviceGroupPresentCapabilitiesKHR(
+    VkDevice device,
+    VkDeviceGroupPresentCapabilitiesKHR* pDeviceGroupPresentCapabilities)
+{
+    AEMU_SCOPED_TRACE("vkGetDeviceGroupPresentCapabilitiesKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetDeviceGroupPresentCapabilitiesKHR_VkResult_return = (VkResult)0;
+    vkGetDeviceGroupPresentCapabilitiesKHR_VkResult_return = vkEnc->vkGetDeviceGroupPresentCapabilitiesKHR(device, pDeviceGroupPresentCapabilities, true /* do lock */);
+    return vkGetDeviceGroupPresentCapabilitiesKHR_VkResult_return;
+}
+static VkResult dynCheck_entry_vkGetDeviceGroupPresentCapabilitiesKHR(
+    VkDevice device,
+    VkDeviceGroupPresentCapabilitiesKHR* pDeviceGroupPresentCapabilities)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_swapchain"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetDeviceGroupPresentCapabilitiesKHR", "VK_KHR_swapchain");
+    }
+    AEMU_SCOPED_TRACE("vkGetDeviceGroupPresentCapabilitiesKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetDeviceGroupPresentCapabilitiesKHR_VkResult_return = (VkResult)0;
+    vkGetDeviceGroupPresentCapabilitiesKHR_VkResult_return = vkEnc->vkGetDeviceGroupPresentCapabilitiesKHR(device, pDeviceGroupPresentCapabilities, true /* do lock */);
+    return vkGetDeviceGroupPresentCapabilitiesKHR_VkResult_return;
+}
+static VkResult entry_vkGetDeviceGroupSurfacePresentModesKHR(
+    VkDevice device,
+    VkSurfaceKHR surface,
+    VkDeviceGroupPresentModeFlagsKHR* pModes)
+{
+    AEMU_SCOPED_TRACE("vkGetDeviceGroupSurfacePresentModesKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetDeviceGroupSurfacePresentModesKHR_VkResult_return = (VkResult)0;
+    vkGetDeviceGroupSurfacePresentModesKHR_VkResult_return = vkEnc->vkGetDeviceGroupSurfacePresentModesKHR(device, surface, pModes, true /* do lock */);
+    return vkGetDeviceGroupSurfacePresentModesKHR_VkResult_return;
+}
+static VkResult dynCheck_entry_vkGetDeviceGroupSurfacePresentModesKHR(
+    VkDevice device,
+    VkSurfaceKHR surface,
+    VkDeviceGroupPresentModeFlagsKHR* pModes)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_swapchain"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetDeviceGroupSurfacePresentModesKHR", "VK_KHR_swapchain");
+    }
+    AEMU_SCOPED_TRACE("vkGetDeviceGroupSurfacePresentModesKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetDeviceGroupSurfacePresentModesKHR_VkResult_return = (VkResult)0;
+    vkGetDeviceGroupSurfacePresentModesKHR_VkResult_return = vkEnc->vkGetDeviceGroupSurfacePresentModesKHR(device, surface, pModes, true /* do lock */);
+    return vkGetDeviceGroupSurfacePresentModesKHR_VkResult_return;
+}
+static VkResult entry_vkGetPhysicalDevicePresentRectanglesKHR(
+    VkPhysicalDevice physicalDevice,
+    VkSurfaceKHR surface,
+    uint32_t* pRectCount,
+    VkRect2D* pRects)
+{
+    AEMU_SCOPED_TRACE("vkGetPhysicalDevicePresentRectanglesKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetPhysicalDevicePresentRectanglesKHR_VkResult_return = (VkResult)0;
+    vkGetPhysicalDevicePresentRectanglesKHR_VkResult_return = vkEnc->vkGetPhysicalDevicePresentRectanglesKHR(physicalDevice, surface, pRectCount, pRects, true /* do lock */);
+    return vkGetPhysicalDevicePresentRectanglesKHR_VkResult_return;
+}
+static VkResult entry_vkAcquireNextImage2KHR(
+    VkDevice device,
+    const VkAcquireNextImageInfoKHR* pAcquireInfo,
+    uint32_t* pImageIndex)
+{
+    AEMU_SCOPED_TRACE("vkAcquireNextImage2KHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkAcquireNextImage2KHR_VkResult_return = (VkResult)0;
+    vkAcquireNextImage2KHR_VkResult_return = vkEnc->vkAcquireNextImage2KHR(device, pAcquireInfo, pImageIndex, true /* do lock */);
+    return vkAcquireNextImage2KHR_VkResult_return;
+}
+static VkResult dynCheck_entry_vkAcquireNextImage2KHR(
+    VkDevice device,
+    const VkAcquireNextImageInfoKHR* pAcquireInfo,
+    uint32_t* pImageIndex)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_swapchain"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkAcquireNextImage2KHR", "VK_KHR_swapchain");
+    }
+    AEMU_SCOPED_TRACE("vkAcquireNextImage2KHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkAcquireNextImage2KHR_VkResult_return = (VkResult)0;
+    vkAcquireNextImage2KHR_VkResult_return = vkEnc->vkAcquireNextImage2KHR(device, pAcquireInfo, pImageIndex, true /* do lock */);
+    return vkAcquireNextImage2KHR_VkResult_return;
+}
+#endif
+#ifdef VK_KHR_display
+static VkResult entry_vkGetPhysicalDeviceDisplayPropertiesKHR(
+    VkPhysicalDevice physicalDevice,
+    uint32_t* pPropertyCount,
+    VkDisplayPropertiesKHR* pProperties)
+{
+    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceDisplayPropertiesKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetPhysicalDeviceDisplayPropertiesKHR_VkResult_return = (VkResult)0;
+    vkGetPhysicalDeviceDisplayPropertiesKHR_VkResult_return = vkEnc->vkGetPhysicalDeviceDisplayPropertiesKHR(physicalDevice, pPropertyCount, pProperties, true /* do lock */);
+    return vkGetPhysicalDeviceDisplayPropertiesKHR_VkResult_return;
+}
+static VkResult entry_vkGetPhysicalDeviceDisplayPlanePropertiesKHR(
+    VkPhysicalDevice physicalDevice,
+    uint32_t* pPropertyCount,
+    VkDisplayPlanePropertiesKHR* pProperties)
+{
+    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceDisplayPlanePropertiesKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetPhysicalDeviceDisplayPlanePropertiesKHR_VkResult_return = (VkResult)0;
+    vkGetPhysicalDeviceDisplayPlanePropertiesKHR_VkResult_return = vkEnc->vkGetPhysicalDeviceDisplayPlanePropertiesKHR(physicalDevice, pPropertyCount, pProperties, true /* do lock */);
+    return vkGetPhysicalDeviceDisplayPlanePropertiesKHR_VkResult_return;
+}
+static VkResult entry_vkGetDisplayPlaneSupportedDisplaysKHR(
+    VkPhysicalDevice physicalDevice,
+    uint32_t planeIndex,
+    uint32_t* pDisplayCount,
+    VkDisplayKHR* pDisplays)
+{
+    AEMU_SCOPED_TRACE("vkGetDisplayPlaneSupportedDisplaysKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetDisplayPlaneSupportedDisplaysKHR_VkResult_return = (VkResult)0;
+    vkGetDisplayPlaneSupportedDisplaysKHR_VkResult_return = vkEnc->vkGetDisplayPlaneSupportedDisplaysKHR(physicalDevice, planeIndex, pDisplayCount, pDisplays, true /* do lock */);
+    return vkGetDisplayPlaneSupportedDisplaysKHR_VkResult_return;
+}
+static VkResult entry_vkGetDisplayModePropertiesKHR(
+    VkPhysicalDevice physicalDevice,
+    VkDisplayKHR display,
+    uint32_t* pPropertyCount,
+    VkDisplayModePropertiesKHR* pProperties)
+{
+    AEMU_SCOPED_TRACE("vkGetDisplayModePropertiesKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetDisplayModePropertiesKHR_VkResult_return = (VkResult)0;
+    vkGetDisplayModePropertiesKHR_VkResult_return = vkEnc->vkGetDisplayModePropertiesKHR(physicalDevice, display, pPropertyCount, pProperties, true /* do lock */);
+    return vkGetDisplayModePropertiesKHR_VkResult_return;
+}
+static VkResult entry_vkCreateDisplayModeKHR(
+    VkPhysicalDevice physicalDevice,
+    VkDisplayKHR display,
+    const VkDisplayModeCreateInfoKHR* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkDisplayModeKHR* pMode)
+{
+    AEMU_SCOPED_TRACE("vkCreateDisplayModeKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateDisplayModeKHR_VkResult_return = (VkResult)0;
+    vkCreateDisplayModeKHR_VkResult_return = vkEnc->vkCreateDisplayModeKHR(physicalDevice, display, pCreateInfo, pAllocator, pMode, true /* do lock */);
+    return vkCreateDisplayModeKHR_VkResult_return;
+}
+static VkResult entry_vkGetDisplayPlaneCapabilitiesKHR(
+    VkPhysicalDevice physicalDevice,
+    VkDisplayModeKHR mode,
+    uint32_t planeIndex,
+    VkDisplayPlaneCapabilitiesKHR* pCapabilities)
+{
+    AEMU_SCOPED_TRACE("vkGetDisplayPlaneCapabilitiesKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetDisplayPlaneCapabilitiesKHR_VkResult_return = (VkResult)0;
+    vkGetDisplayPlaneCapabilitiesKHR_VkResult_return = vkEnc->vkGetDisplayPlaneCapabilitiesKHR(physicalDevice, mode, planeIndex, pCapabilities, true /* do lock */);
+    return vkGetDisplayPlaneCapabilitiesKHR_VkResult_return;
+}
+static VkResult entry_vkCreateDisplayPlaneSurfaceKHR(
+    VkInstance instance,
+    const VkDisplaySurfaceCreateInfoKHR* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkSurfaceKHR* pSurface)
+{
+    AEMU_SCOPED_TRACE("vkCreateDisplayPlaneSurfaceKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateDisplayPlaneSurfaceKHR_VkResult_return = (VkResult)0;
+    vkCreateDisplayPlaneSurfaceKHR_VkResult_return = vkEnc->vkCreateDisplayPlaneSurfaceKHR(instance, pCreateInfo, pAllocator, pSurface, true /* do lock */);
+    return vkCreateDisplayPlaneSurfaceKHR_VkResult_return;
+}
+#endif
+#ifdef VK_KHR_display_swapchain
+static VkResult entry_vkCreateSharedSwapchainsKHR(
+    VkDevice device,
+    uint32_t swapchainCount,
+    const VkSwapchainCreateInfoKHR* pCreateInfos,
+    const VkAllocationCallbacks* pAllocator,
+    VkSwapchainKHR* pSwapchains)
+{
+    AEMU_SCOPED_TRACE("vkCreateSharedSwapchainsKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateSharedSwapchainsKHR_VkResult_return = (VkResult)0;
+    vkCreateSharedSwapchainsKHR_VkResult_return = vkEnc->vkCreateSharedSwapchainsKHR(device, swapchainCount, pCreateInfos, pAllocator, pSwapchains, true /* do lock */);
+    return vkCreateSharedSwapchainsKHR_VkResult_return;
+}
+static VkResult dynCheck_entry_vkCreateSharedSwapchainsKHR(
+    VkDevice device,
+    uint32_t swapchainCount,
+    const VkSwapchainCreateInfoKHR* pCreateInfos,
+    const VkAllocationCallbacks* pAllocator,
+    VkSwapchainKHR* pSwapchains)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_display_swapchain"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkCreateSharedSwapchainsKHR", "VK_KHR_display_swapchain");
+    }
+    AEMU_SCOPED_TRACE("vkCreateSharedSwapchainsKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateSharedSwapchainsKHR_VkResult_return = (VkResult)0;
+    vkCreateSharedSwapchainsKHR_VkResult_return = vkEnc->vkCreateSharedSwapchainsKHR(device, swapchainCount, pCreateInfos, pAllocator, pSwapchains, true /* do lock */);
+    return vkCreateSharedSwapchainsKHR_VkResult_return;
+}
+#endif
+#ifdef VK_KHR_xlib_surface
+static VkResult entry_vkCreateXlibSurfaceKHR(
+    VkInstance instance,
+    const VkXlibSurfaceCreateInfoKHR* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkSurfaceKHR* pSurface)
+{
+    AEMU_SCOPED_TRACE("vkCreateXlibSurfaceKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateXlibSurfaceKHR_VkResult_return = (VkResult)0;
+    vkCreateXlibSurfaceKHR_VkResult_return = vkEnc->vkCreateXlibSurfaceKHR(instance, pCreateInfo, pAllocator, pSurface, true /* do lock */);
+    return vkCreateXlibSurfaceKHR_VkResult_return;
+}
+static VkBool32 entry_vkGetPhysicalDeviceXlibPresentationSupportKHR(
+    VkPhysicalDevice physicalDevice,
+    uint32_t queueFamilyIndex,
+    Display* dpy,
+    VisualID visualID)
+{
+    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceXlibPresentationSupportKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkBool32 vkGetPhysicalDeviceXlibPresentationSupportKHR_VkBool32_return = (VkBool32)0;
+    vkGetPhysicalDeviceXlibPresentationSupportKHR_VkBool32_return = vkEnc->vkGetPhysicalDeviceXlibPresentationSupportKHR(physicalDevice, queueFamilyIndex, dpy, visualID, true /* do lock */);
+    return vkGetPhysicalDeviceXlibPresentationSupportKHR_VkBool32_return;
+}
+#endif
+#ifdef VK_KHR_xcb_surface
+static VkResult entry_vkCreateXcbSurfaceKHR(
+    VkInstance instance,
+    const VkXcbSurfaceCreateInfoKHR* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkSurfaceKHR* pSurface)
+{
+    AEMU_SCOPED_TRACE("vkCreateXcbSurfaceKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateXcbSurfaceKHR_VkResult_return = (VkResult)0;
+    vkCreateXcbSurfaceKHR_VkResult_return = vkEnc->vkCreateXcbSurfaceKHR(instance, pCreateInfo, pAllocator, pSurface, true /* do lock */);
+    return vkCreateXcbSurfaceKHR_VkResult_return;
+}
+static VkBool32 entry_vkGetPhysicalDeviceXcbPresentationSupportKHR(
+    VkPhysicalDevice physicalDevice,
+    uint32_t queueFamilyIndex,
+    xcb_connection_t* connection,
+    xcb_visualid_t visual_id)
+{
+    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceXcbPresentationSupportKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkBool32 vkGetPhysicalDeviceXcbPresentationSupportKHR_VkBool32_return = (VkBool32)0;
+    vkGetPhysicalDeviceXcbPresentationSupportKHR_VkBool32_return = vkEnc->vkGetPhysicalDeviceXcbPresentationSupportKHR(physicalDevice, queueFamilyIndex, connection, visual_id, true /* do lock */);
+    return vkGetPhysicalDeviceXcbPresentationSupportKHR_VkBool32_return;
+}
+#endif
+#ifdef VK_KHR_wayland_surface
+static VkResult entry_vkCreateWaylandSurfaceKHR(
+    VkInstance instance,
+    const VkWaylandSurfaceCreateInfoKHR* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkSurfaceKHR* pSurface)
+{
+    AEMU_SCOPED_TRACE("vkCreateWaylandSurfaceKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateWaylandSurfaceKHR_VkResult_return = (VkResult)0;
+    vkCreateWaylandSurfaceKHR_VkResult_return = vkEnc->vkCreateWaylandSurfaceKHR(instance, pCreateInfo, pAllocator, pSurface, true /* do lock */);
+    return vkCreateWaylandSurfaceKHR_VkResult_return;
+}
+static VkBool32 entry_vkGetPhysicalDeviceWaylandPresentationSupportKHR(
+    VkPhysicalDevice physicalDevice,
+    uint32_t queueFamilyIndex,
+    wl_display* display)
+{
+    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceWaylandPresentationSupportKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkBool32 vkGetPhysicalDeviceWaylandPresentationSupportKHR_VkBool32_return = (VkBool32)0;
+    vkGetPhysicalDeviceWaylandPresentationSupportKHR_VkBool32_return = vkEnc->vkGetPhysicalDeviceWaylandPresentationSupportKHR(physicalDevice, queueFamilyIndex, display, true /* do lock */);
+    return vkGetPhysicalDeviceWaylandPresentationSupportKHR_VkBool32_return;
+}
+#endif
+#ifdef VK_KHR_android_surface
+static VkResult entry_vkCreateAndroidSurfaceKHR(
+    VkInstance instance,
+    const VkAndroidSurfaceCreateInfoKHR* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkSurfaceKHR* pSurface)
+{
+    AEMU_SCOPED_TRACE("vkCreateAndroidSurfaceKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateAndroidSurfaceKHR_VkResult_return = (VkResult)0;
+    vkCreateAndroidSurfaceKHR_VkResult_return = vkEnc->vkCreateAndroidSurfaceKHR(instance, pCreateInfo, pAllocator, pSurface, true /* do lock */);
+    return vkCreateAndroidSurfaceKHR_VkResult_return;
+}
+#endif
+#ifdef VK_KHR_win32_surface
+static VkResult entry_vkCreateWin32SurfaceKHR(
+    VkInstance instance,
+    const VkWin32SurfaceCreateInfoKHR* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkSurfaceKHR* pSurface)
+{
+    AEMU_SCOPED_TRACE("vkCreateWin32SurfaceKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateWin32SurfaceKHR_VkResult_return = (VkResult)0;
+    vkCreateWin32SurfaceKHR_VkResult_return = vkEnc->vkCreateWin32SurfaceKHR(instance, pCreateInfo, pAllocator, pSurface, true /* do lock */);
+    return vkCreateWin32SurfaceKHR_VkResult_return;
+}
+static VkBool32 entry_vkGetPhysicalDeviceWin32PresentationSupportKHR(
+    VkPhysicalDevice physicalDevice,
+    uint32_t queueFamilyIndex)
+{
+    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceWin32PresentationSupportKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkBool32 vkGetPhysicalDeviceWin32PresentationSupportKHR_VkBool32_return = (VkBool32)0;
+    vkGetPhysicalDeviceWin32PresentationSupportKHR_VkBool32_return = vkEnc->vkGetPhysicalDeviceWin32PresentationSupportKHR(physicalDevice, queueFamilyIndex, true /* do lock */);
+    return vkGetPhysicalDeviceWin32PresentationSupportKHR_VkBool32_return;
+}
+#endif
+#ifdef VK_KHR_sampler_mirror_clamp_to_edge
+#endif
+#ifdef VK_KHR_multiview
+#endif
+#ifdef VK_KHR_get_physical_device_properties2
+static void entry_vkGetPhysicalDeviceFeatures2KHR(
+    VkPhysicalDevice physicalDevice,
+    VkPhysicalDeviceFeatures2* pFeatures)
+{
+    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceFeatures2KHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetPhysicalDeviceFeatures2KHR(physicalDevice, pFeatures, true /* do lock */);
+}
+static void entry_vkGetPhysicalDeviceProperties2KHR(
+    VkPhysicalDevice physicalDevice,
+    VkPhysicalDeviceProperties2* pProperties)
+{
+    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceProperties2KHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetPhysicalDeviceProperties2KHR(physicalDevice, pProperties, true /* do lock */);
+}
+static void entry_vkGetPhysicalDeviceFormatProperties2KHR(
+    VkPhysicalDevice physicalDevice,
+    VkFormat format,
+    VkFormatProperties2* pFormatProperties)
+{
+    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceFormatProperties2KHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetPhysicalDeviceFormatProperties2KHR(physicalDevice, format, pFormatProperties, true /* do lock */);
+}
+static VkResult entry_vkGetPhysicalDeviceImageFormatProperties2KHR(
+    VkPhysicalDevice physicalDevice,
+    const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo,
+    VkImageFormatProperties2* pImageFormatProperties)
+{
+    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceImageFormatProperties2KHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetPhysicalDeviceImageFormatProperties2KHR_VkResult_return = (VkResult)0;
+    auto resources = ResourceTracker::get();
+    vkGetPhysicalDeviceImageFormatProperties2KHR_VkResult_return = resources->on_vkGetPhysicalDeviceImageFormatProperties2KHR(vkEnc, VK_SUCCESS, physicalDevice, pImageFormatInfo, pImageFormatProperties);
+    return vkGetPhysicalDeviceImageFormatProperties2KHR_VkResult_return;
+}
+static void entry_vkGetPhysicalDeviceQueueFamilyProperties2KHR(
+    VkPhysicalDevice physicalDevice,
+    uint32_t* pQueueFamilyPropertyCount,
+    VkQueueFamilyProperties2* pQueueFamilyProperties)
+{
+    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceQueueFamilyProperties2KHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetPhysicalDeviceQueueFamilyProperties2KHR(physicalDevice, pQueueFamilyPropertyCount, pQueueFamilyProperties, true /* do lock */);
+}
+static void entry_vkGetPhysicalDeviceMemoryProperties2KHR(
+    VkPhysicalDevice physicalDevice,
+    VkPhysicalDeviceMemoryProperties2* pMemoryProperties)
+{
+    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceMemoryProperties2KHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetPhysicalDeviceMemoryProperties2KHR(physicalDevice, pMemoryProperties, true /* do lock */);
+}
+static void entry_vkGetPhysicalDeviceSparseImageFormatProperties2KHR(
+    VkPhysicalDevice physicalDevice,
+    const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo,
+    uint32_t* pPropertyCount,
+    VkSparseImageFormatProperties2* pProperties)
+{
+    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceSparseImageFormatProperties2KHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetPhysicalDeviceSparseImageFormatProperties2KHR(physicalDevice, pFormatInfo, pPropertyCount, pProperties, true /* do lock */);
+}
+#endif
+#ifdef VK_KHR_device_group
+static void entry_vkGetDeviceGroupPeerMemoryFeaturesKHR(
+    VkDevice device,
+    uint32_t heapIndex,
+    uint32_t localDeviceIndex,
+    uint32_t remoteDeviceIndex,
+    VkPeerMemoryFeatureFlags* pPeerMemoryFeatures)
+{
+    AEMU_SCOPED_TRACE("vkGetDeviceGroupPeerMemoryFeaturesKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetDeviceGroupPeerMemoryFeaturesKHR(device, heapIndex, localDeviceIndex, remoteDeviceIndex, pPeerMemoryFeatures, true /* do lock */);
+}
+static void dynCheck_entry_vkGetDeviceGroupPeerMemoryFeaturesKHR(
+    VkDevice device,
+    uint32_t heapIndex,
+    uint32_t localDeviceIndex,
+    uint32_t remoteDeviceIndex,
+    VkPeerMemoryFeatureFlags* pPeerMemoryFeatures)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_device_group"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetDeviceGroupPeerMemoryFeaturesKHR", "VK_KHR_device_group");
+    }
+    AEMU_SCOPED_TRACE("vkGetDeviceGroupPeerMemoryFeaturesKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetDeviceGroupPeerMemoryFeaturesKHR(device, heapIndex, localDeviceIndex, remoteDeviceIndex, pPeerMemoryFeatures, true /* do lock */);
+}
+static void entry_vkCmdSetDeviceMaskKHR(
+    VkCommandBuffer commandBuffer,
+    uint32_t deviceMask)
+{
+    AEMU_SCOPED_TRACE("vkCmdSetDeviceMaskKHR");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdSetDeviceMaskKHR(commandBuffer, deviceMask, true /* do lock */);
+}
+static void entry_vkCmdDispatchBaseKHR(
+    VkCommandBuffer commandBuffer,
+    uint32_t baseGroupX,
+    uint32_t baseGroupY,
+    uint32_t baseGroupZ,
+    uint32_t groupCountX,
+    uint32_t groupCountY,
+    uint32_t groupCountZ)
+{
+    AEMU_SCOPED_TRACE("vkCmdDispatchBaseKHR");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdDispatchBaseKHR(commandBuffer, baseGroupX, baseGroupY, baseGroupZ, groupCountX, groupCountY, groupCountZ, true /* do lock */);
+}
+#endif
+#ifdef VK_KHR_shader_draw_parameters
+#endif
+#ifdef VK_KHR_maintenance1
+static void entry_vkTrimCommandPoolKHR(
+    VkDevice device,
+    VkCommandPool commandPool,
+    VkCommandPoolTrimFlags flags)
+{
+    AEMU_SCOPED_TRACE("vkTrimCommandPoolKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkTrimCommandPoolKHR(device, commandPool, flags, true /* do lock */);
+}
+static void dynCheck_entry_vkTrimCommandPoolKHR(
+    VkDevice device,
+    VkCommandPool commandPool,
+    VkCommandPoolTrimFlags flags)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_maintenance1"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkTrimCommandPoolKHR", "VK_KHR_maintenance1");
+    }
+    AEMU_SCOPED_TRACE("vkTrimCommandPoolKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkTrimCommandPoolKHR(device, commandPool, flags, true /* do lock */);
+}
+#endif
+#ifdef VK_KHR_device_group_creation
+static VkResult entry_vkEnumeratePhysicalDeviceGroupsKHR(
+    VkInstance instance,
+    uint32_t* pPhysicalDeviceGroupCount,
+    VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties)
+{
+    AEMU_SCOPED_TRACE("vkEnumeratePhysicalDeviceGroupsKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkEnumeratePhysicalDeviceGroupsKHR_VkResult_return = (VkResult)0;
+    vkEnumeratePhysicalDeviceGroupsKHR_VkResult_return = vkEnc->vkEnumeratePhysicalDeviceGroupsKHR(instance, pPhysicalDeviceGroupCount, pPhysicalDeviceGroupProperties, true /* do lock */);
+    return vkEnumeratePhysicalDeviceGroupsKHR_VkResult_return;
+}
+#endif
+#ifdef VK_KHR_external_memory_capabilities
+static void entry_vkGetPhysicalDeviceExternalBufferPropertiesKHR(
+    VkPhysicalDevice physicalDevice,
+    const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo,
+    VkExternalBufferProperties* pExternalBufferProperties)
+{
+    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceExternalBufferPropertiesKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetPhysicalDeviceExternalBufferPropertiesKHR(physicalDevice, pExternalBufferInfo, pExternalBufferProperties, true /* do lock */);
+}
+#endif
+#ifdef VK_KHR_external_memory
+#endif
+#ifdef VK_KHR_external_memory_win32
+static VkResult entry_vkGetMemoryWin32HandleKHR(
+    VkDevice device,
+    const VkMemoryGetWin32HandleInfoKHR* pGetWin32HandleInfo,
+    HANDLE* pHandle)
+{
+    AEMU_SCOPED_TRACE("vkGetMemoryWin32HandleKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetMemoryWin32HandleKHR_VkResult_return = (VkResult)0;
+    vkGetMemoryWin32HandleKHR_VkResult_return = vkEnc->vkGetMemoryWin32HandleKHR(device, pGetWin32HandleInfo, pHandle, true /* do lock */);
+    return vkGetMemoryWin32HandleKHR_VkResult_return;
+}
+static VkResult dynCheck_entry_vkGetMemoryWin32HandleKHR(
+    VkDevice device,
+    const VkMemoryGetWin32HandleInfoKHR* pGetWin32HandleInfo,
+    HANDLE* pHandle)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_external_memory_win32"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetMemoryWin32HandleKHR", "VK_KHR_external_memory_win32");
+    }
+    AEMU_SCOPED_TRACE("vkGetMemoryWin32HandleKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetMemoryWin32HandleKHR_VkResult_return = (VkResult)0;
+    vkGetMemoryWin32HandleKHR_VkResult_return = vkEnc->vkGetMemoryWin32HandleKHR(device, pGetWin32HandleInfo, pHandle, true /* do lock */);
+    return vkGetMemoryWin32HandleKHR_VkResult_return;
+}
+static VkResult entry_vkGetMemoryWin32HandlePropertiesKHR(
+    VkDevice device,
+    VkExternalMemoryHandleTypeFlagBits handleType,
+    HANDLE handle,
+    VkMemoryWin32HandlePropertiesKHR* pMemoryWin32HandleProperties)
+{
+    AEMU_SCOPED_TRACE("vkGetMemoryWin32HandlePropertiesKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetMemoryWin32HandlePropertiesKHR_VkResult_return = (VkResult)0;
+    vkGetMemoryWin32HandlePropertiesKHR_VkResult_return = vkEnc->vkGetMemoryWin32HandlePropertiesKHR(device, handleType, handle, pMemoryWin32HandleProperties, true /* do lock */);
+    return vkGetMemoryWin32HandlePropertiesKHR_VkResult_return;
+}
+static VkResult dynCheck_entry_vkGetMemoryWin32HandlePropertiesKHR(
+    VkDevice device,
+    VkExternalMemoryHandleTypeFlagBits handleType,
+    HANDLE handle,
+    VkMemoryWin32HandlePropertiesKHR* pMemoryWin32HandleProperties)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_external_memory_win32"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetMemoryWin32HandlePropertiesKHR", "VK_KHR_external_memory_win32");
+    }
+    AEMU_SCOPED_TRACE("vkGetMemoryWin32HandlePropertiesKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetMemoryWin32HandlePropertiesKHR_VkResult_return = (VkResult)0;
+    vkGetMemoryWin32HandlePropertiesKHR_VkResult_return = vkEnc->vkGetMemoryWin32HandlePropertiesKHR(device, handleType, handle, pMemoryWin32HandleProperties, true /* do lock */);
+    return vkGetMemoryWin32HandlePropertiesKHR_VkResult_return;
+}
+#endif
+#ifdef VK_KHR_external_memory_fd
+static VkResult entry_vkGetMemoryFdKHR(
+    VkDevice device,
+    const VkMemoryGetFdInfoKHR* pGetFdInfo,
+    int* pFd)
+{
+    AEMU_SCOPED_TRACE("vkGetMemoryFdKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetMemoryFdKHR_VkResult_return = (VkResult)0;
+    vkGetMemoryFdKHR_VkResult_return = vkEnc->vkGetMemoryFdKHR(device, pGetFdInfo, pFd, true /* do lock */);
+    return vkGetMemoryFdKHR_VkResult_return;
+}
+static VkResult dynCheck_entry_vkGetMemoryFdKHR(
+    VkDevice device,
+    const VkMemoryGetFdInfoKHR* pGetFdInfo,
+    int* pFd)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_external_memory_fd"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetMemoryFdKHR", "VK_KHR_external_memory_fd");
+    }
+    AEMU_SCOPED_TRACE("vkGetMemoryFdKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetMemoryFdKHR_VkResult_return = (VkResult)0;
+    vkGetMemoryFdKHR_VkResult_return = vkEnc->vkGetMemoryFdKHR(device, pGetFdInfo, pFd, true /* do lock */);
+    return vkGetMemoryFdKHR_VkResult_return;
+}
+static VkResult entry_vkGetMemoryFdPropertiesKHR(
+    VkDevice device,
+    VkExternalMemoryHandleTypeFlagBits handleType,
+    int fd,
+    VkMemoryFdPropertiesKHR* pMemoryFdProperties)
+{
+    AEMU_SCOPED_TRACE("vkGetMemoryFdPropertiesKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetMemoryFdPropertiesKHR_VkResult_return = (VkResult)0;
+    vkGetMemoryFdPropertiesKHR_VkResult_return = vkEnc->vkGetMemoryFdPropertiesKHR(device, handleType, fd, pMemoryFdProperties, true /* do lock */);
+    return vkGetMemoryFdPropertiesKHR_VkResult_return;
+}
+static VkResult dynCheck_entry_vkGetMemoryFdPropertiesKHR(
+    VkDevice device,
+    VkExternalMemoryHandleTypeFlagBits handleType,
+    int fd,
+    VkMemoryFdPropertiesKHR* pMemoryFdProperties)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_external_memory_fd"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetMemoryFdPropertiesKHR", "VK_KHR_external_memory_fd");
+    }
+    AEMU_SCOPED_TRACE("vkGetMemoryFdPropertiesKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetMemoryFdPropertiesKHR_VkResult_return = (VkResult)0;
+    vkGetMemoryFdPropertiesKHR_VkResult_return = vkEnc->vkGetMemoryFdPropertiesKHR(device, handleType, fd, pMemoryFdProperties, true /* do lock */);
+    return vkGetMemoryFdPropertiesKHR_VkResult_return;
+}
+#endif
+#ifdef VK_KHR_win32_keyed_mutex
+#endif
+#ifdef VK_KHR_external_semaphore_capabilities
+static void entry_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR(
+    VkPhysicalDevice physicalDevice,
+    const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo,
+    VkExternalSemaphoreProperties* pExternalSemaphoreProperties)
+{
+    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceExternalSemaphorePropertiesKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetPhysicalDeviceExternalSemaphorePropertiesKHR(physicalDevice, pExternalSemaphoreInfo, pExternalSemaphoreProperties, true /* do lock */);
+}
+#endif
+#ifdef VK_KHR_external_semaphore
+#endif
+#ifdef VK_KHR_external_semaphore_win32
+static VkResult entry_vkImportSemaphoreWin32HandleKHR(
+    VkDevice device,
+    const VkImportSemaphoreWin32HandleInfoKHR* pImportSemaphoreWin32HandleInfo)
+{
+    AEMU_SCOPED_TRACE("vkImportSemaphoreWin32HandleKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkImportSemaphoreWin32HandleKHR_VkResult_return = (VkResult)0;
+    vkImportSemaphoreWin32HandleKHR_VkResult_return = vkEnc->vkImportSemaphoreWin32HandleKHR(device, pImportSemaphoreWin32HandleInfo, true /* do lock */);
+    return vkImportSemaphoreWin32HandleKHR_VkResult_return;
+}
+static VkResult dynCheck_entry_vkImportSemaphoreWin32HandleKHR(
+    VkDevice device,
+    const VkImportSemaphoreWin32HandleInfoKHR* pImportSemaphoreWin32HandleInfo)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_external_semaphore_win32"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkImportSemaphoreWin32HandleKHR", "VK_KHR_external_semaphore_win32");
+    }
+    AEMU_SCOPED_TRACE("vkImportSemaphoreWin32HandleKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkImportSemaphoreWin32HandleKHR_VkResult_return = (VkResult)0;
+    vkImportSemaphoreWin32HandleKHR_VkResult_return = vkEnc->vkImportSemaphoreWin32HandleKHR(device, pImportSemaphoreWin32HandleInfo, true /* do lock */);
+    return vkImportSemaphoreWin32HandleKHR_VkResult_return;
+}
+static VkResult entry_vkGetSemaphoreWin32HandleKHR(
+    VkDevice device,
+    const VkSemaphoreGetWin32HandleInfoKHR* pGetWin32HandleInfo,
+    HANDLE* pHandle)
+{
+    AEMU_SCOPED_TRACE("vkGetSemaphoreWin32HandleKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetSemaphoreWin32HandleKHR_VkResult_return = (VkResult)0;
+    vkGetSemaphoreWin32HandleKHR_VkResult_return = vkEnc->vkGetSemaphoreWin32HandleKHR(device, pGetWin32HandleInfo, pHandle, true /* do lock */);
+    return vkGetSemaphoreWin32HandleKHR_VkResult_return;
+}
+static VkResult dynCheck_entry_vkGetSemaphoreWin32HandleKHR(
+    VkDevice device,
+    const VkSemaphoreGetWin32HandleInfoKHR* pGetWin32HandleInfo,
+    HANDLE* pHandle)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_external_semaphore_win32"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetSemaphoreWin32HandleKHR", "VK_KHR_external_semaphore_win32");
+    }
+    AEMU_SCOPED_TRACE("vkGetSemaphoreWin32HandleKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetSemaphoreWin32HandleKHR_VkResult_return = (VkResult)0;
+    vkGetSemaphoreWin32HandleKHR_VkResult_return = vkEnc->vkGetSemaphoreWin32HandleKHR(device, pGetWin32HandleInfo, pHandle, true /* do lock */);
+    return vkGetSemaphoreWin32HandleKHR_VkResult_return;
+}
+#endif
+#ifdef VK_KHR_external_semaphore_fd
+static VkResult entry_vkImportSemaphoreFdKHR(
+    VkDevice device,
+    const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo)
+{
+    AEMU_SCOPED_TRACE("vkImportSemaphoreFdKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkImportSemaphoreFdKHR_VkResult_return = (VkResult)0;
+    auto resources = ResourceTracker::get();
+    vkImportSemaphoreFdKHR_VkResult_return = resources->on_vkImportSemaphoreFdKHR(vkEnc, VK_SUCCESS, device, pImportSemaphoreFdInfo);
+    return vkImportSemaphoreFdKHR_VkResult_return;
+}
+static VkResult dynCheck_entry_vkImportSemaphoreFdKHR(
+    VkDevice device,
+    const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_external_semaphore_fd"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkImportSemaphoreFdKHR", "VK_KHR_external_semaphore_fd");
+    }
+    AEMU_SCOPED_TRACE("vkImportSemaphoreFdKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkImportSemaphoreFdKHR_VkResult_return = (VkResult)0;
+    vkImportSemaphoreFdKHR_VkResult_return = resources->on_vkImportSemaphoreFdKHR(vkEnc, VK_SUCCESS, device, pImportSemaphoreFdInfo);
+    return vkImportSemaphoreFdKHR_VkResult_return;
+}
+static VkResult entry_vkGetSemaphoreFdKHR(
+    VkDevice device,
+    const VkSemaphoreGetFdInfoKHR* pGetFdInfo,
+    int* pFd)
+{
+    AEMU_SCOPED_TRACE("vkGetSemaphoreFdKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetSemaphoreFdKHR_VkResult_return = (VkResult)0;
+    auto resources = ResourceTracker::get();
+    vkGetSemaphoreFdKHR_VkResult_return = resources->on_vkGetSemaphoreFdKHR(vkEnc, VK_SUCCESS, device, pGetFdInfo, pFd);
+    return vkGetSemaphoreFdKHR_VkResult_return;
+}
+static VkResult dynCheck_entry_vkGetSemaphoreFdKHR(
+    VkDevice device,
+    const VkSemaphoreGetFdInfoKHR* pGetFdInfo,
+    int* pFd)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_external_semaphore_fd"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetSemaphoreFdKHR", "VK_KHR_external_semaphore_fd");
+    }
+    AEMU_SCOPED_TRACE("vkGetSemaphoreFdKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetSemaphoreFdKHR_VkResult_return = (VkResult)0;
+    vkGetSemaphoreFdKHR_VkResult_return = resources->on_vkGetSemaphoreFdKHR(vkEnc, VK_SUCCESS, device, pGetFdInfo, pFd);
+    return vkGetSemaphoreFdKHR_VkResult_return;
+}
+#endif
+#ifdef VK_KHR_push_descriptor
+static void entry_vkCmdPushDescriptorSetKHR(
+    VkCommandBuffer commandBuffer,
+    VkPipelineBindPoint pipelineBindPoint,
+    VkPipelineLayout layout,
+    uint32_t set,
+    uint32_t descriptorWriteCount,
+    const VkWriteDescriptorSet* pDescriptorWrites)
+{
+    AEMU_SCOPED_TRACE("vkCmdPushDescriptorSetKHR");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdPushDescriptorSetKHR(commandBuffer, pipelineBindPoint, layout, set, descriptorWriteCount, pDescriptorWrites, true /* do lock */);
+}
+static void entry_vkCmdPushDescriptorSetWithTemplateKHR(
+    VkCommandBuffer commandBuffer,
+    VkDescriptorUpdateTemplate descriptorUpdateTemplate,
+    VkPipelineLayout layout,
+    uint32_t set,
+    const void* pData)
+{
+    AEMU_SCOPED_TRACE("vkCmdPushDescriptorSetWithTemplateKHR");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdPushDescriptorSetWithTemplateKHR(commandBuffer, descriptorUpdateTemplate, layout, set, pData, true /* do lock */);
+}
+#endif
+#ifdef VK_KHR_shader_float16_int8
+#endif
+#ifdef VK_KHR_16bit_storage
+#endif
+#ifdef VK_KHR_incremental_present
+#endif
+#ifdef VK_KHR_descriptor_update_template
+static VkResult entry_vkCreateDescriptorUpdateTemplateKHR(
+    VkDevice device,
+    const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate)
+{
+    AEMU_SCOPED_TRACE("vkCreateDescriptorUpdateTemplateKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateDescriptorUpdateTemplateKHR_VkResult_return = (VkResult)0;
+    vkCreateDescriptorUpdateTemplateKHR_VkResult_return = vkEnc->vkCreateDescriptorUpdateTemplateKHR(device, pCreateInfo, pAllocator, pDescriptorUpdateTemplate, true /* do lock */);
+    return vkCreateDescriptorUpdateTemplateKHR_VkResult_return;
+}
+static VkResult dynCheck_entry_vkCreateDescriptorUpdateTemplateKHR(
+    VkDevice device,
+    const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_descriptor_update_template"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkCreateDescriptorUpdateTemplateKHR", "VK_KHR_descriptor_update_template");
+    }
+    AEMU_SCOPED_TRACE("vkCreateDescriptorUpdateTemplateKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateDescriptorUpdateTemplateKHR_VkResult_return = (VkResult)0;
+    vkCreateDescriptorUpdateTemplateKHR_VkResult_return = vkEnc->vkCreateDescriptorUpdateTemplateKHR(device, pCreateInfo, pAllocator, pDescriptorUpdateTemplate, true /* do lock */);
+    return vkCreateDescriptorUpdateTemplateKHR_VkResult_return;
+}
+static void entry_vkDestroyDescriptorUpdateTemplateKHR(
+    VkDevice device,
+    VkDescriptorUpdateTemplate descriptorUpdateTemplate,
+    const VkAllocationCallbacks* pAllocator)
+{
+    AEMU_SCOPED_TRACE("vkDestroyDescriptorUpdateTemplateKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkDestroyDescriptorUpdateTemplateKHR(device, descriptorUpdateTemplate, pAllocator, true /* do lock */);
+}
+static void dynCheck_entry_vkDestroyDescriptorUpdateTemplateKHR(
+    VkDevice device,
+    VkDescriptorUpdateTemplate descriptorUpdateTemplate,
+    const VkAllocationCallbacks* pAllocator)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_descriptor_update_template"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkDestroyDescriptorUpdateTemplateKHR", "VK_KHR_descriptor_update_template");
+    }
+    AEMU_SCOPED_TRACE("vkDestroyDescriptorUpdateTemplateKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkDestroyDescriptorUpdateTemplateKHR(device, descriptorUpdateTemplate, pAllocator, true /* do lock */);
+}
+static void entry_vkUpdateDescriptorSetWithTemplateKHR(
+    VkDevice device,
+    VkDescriptorSet descriptorSet,
+    VkDescriptorUpdateTemplate descriptorUpdateTemplate,
+    const void* pData)
+{
+    AEMU_SCOPED_TRACE("vkUpdateDescriptorSetWithTemplateKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkUpdateDescriptorSetWithTemplateKHR(device, descriptorSet, descriptorUpdateTemplate, pData, true /* do lock */);
+}
+static void dynCheck_entry_vkUpdateDescriptorSetWithTemplateKHR(
+    VkDevice device,
+    VkDescriptorSet descriptorSet,
+    VkDescriptorUpdateTemplate descriptorUpdateTemplate,
+    const void* pData)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_descriptor_update_template"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkUpdateDescriptorSetWithTemplateKHR", "VK_KHR_descriptor_update_template");
+    }
+    AEMU_SCOPED_TRACE("vkUpdateDescriptorSetWithTemplateKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkUpdateDescriptorSetWithTemplateKHR(device, descriptorSet, descriptorUpdateTemplate, pData, true /* do lock */);
+}
+#endif
+#ifdef VK_KHR_imageless_framebuffer
+#endif
+#ifdef VK_KHR_create_renderpass2
+static VkResult entry_vkCreateRenderPass2KHR(
+    VkDevice device,
+    const VkRenderPassCreateInfo2* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkRenderPass* pRenderPass)
+{
+    AEMU_SCOPED_TRACE("vkCreateRenderPass2KHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateRenderPass2KHR_VkResult_return = (VkResult)0;
+    vkCreateRenderPass2KHR_VkResult_return = vkEnc->vkCreateRenderPass2KHR(device, pCreateInfo, pAllocator, pRenderPass, true /* do lock */);
+    return vkCreateRenderPass2KHR_VkResult_return;
+}
+static VkResult dynCheck_entry_vkCreateRenderPass2KHR(
+    VkDevice device,
+    const VkRenderPassCreateInfo2* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkRenderPass* pRenderPass)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_create_renderpass2"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkCreateRenderPass2KHR", "VK_KHR_create_renderpass2");
+    }
+    AEMU_SCOPED_TRACE("vkCreateRenderPass2KHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateRenderPass2KHR_VkResult_return = (VkResult)0;
+    vkCreateRenderPass2KHR_VkResult_return = vkEnc->vkCreateRenderPass2KHR(device, pCreateInfo, pAllocator, pRenderPass, true /* do lock */);
+    return vkCreateRenderPass2KHR_VkResult_return;
+}
+static void entry_vkCmdBeginRenderPass2KHR(
+    VkCommandBuffer commandBuffer,
+    const VkRenderPassBeginInfo* pRenderPassBegin,
+    const VkSubpassBeginInfo* pSubpassBeginInfo)
+{
+    AEMU_SCOPED_TRACE("vkCmdBeginRenderPass2KHR");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdBeginRenderPass2KHR(commandBuffer, pRenderPassBegin, pSubpassBeginInfo, true /* do lock */);
+}
+static void entry_vkCmdNextSubpass2KHR(
+    VkCommandBuffer commandBuffer,
+    const VkSubpassBeginInfo* pSubpassBeginInfo,
+    const VkSubpassEndInfo* pSubpassEndInfo)
+{
+    AEMU_SCOPED_TRACE("vkCmdNextSubpass2KHR");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdNextSubpass2KHR(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo, true /* do lock */);
+}
+static void entry_vkCmdEndRenderPass2KHR(
+    VkCommandBuffer commandBuffer,
+    const VkSubpassEndInfo* pSubpassEndInfo)
+{
+    AEMU_SCOPED_TRACE("vkCmdEndRenderPass2KHR");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdEndRenderPass2KHR(commandBuffer, pSubpassEndInfo, true /* do lock */);
+}
+#endif
+#ifdef VK_KHR_shared_presentable_image
+static VkResult entry_vkGetSwapchainStatusKHR(
+    VkDevice device,
+    VkSwapchainKHR swapchain)
+{
+    AEMU_SCOPED_TRACE("vkGetSwapchainStatusKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetSwapchainStatusKHR_VkResult_return = (VkResult)0;
+    vkGetSwapchainStatusKHR_VkResult_return = vkEnc->vkGetSwapchainStatusKHR(device, swapchain, true /* do lock */);
+    return vkGetSwapchainStatusKHR_VkResult_return;
+}
+static VkResult dynCheck_entry_vkGetSwapchainStatusKHR(
+    VkDevice device,
+    VkSwapchainKHR swapchain)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_shared_presentable_image"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetSwapchainStatusKHR", "VK_KHR_shared_presentable_image");
+    }
+    AEMU_SCOPED_TRACE("vkGetSwapchainStatusKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetSwapchainStatusKHR_VkResult_return = (VkResult)0;
+    vkGetSwapchainStatusKHR_VkResult_return = vkEnc->vkGetSwapchainStatusKHR(device, swapchain, true /* do lock */);
+    return vkGetSwapchainStatusKHR_VkResult_return;
+}
+#endif
+#ifdef VK_KHR_external_fence_capabilities
+static void entry_vkGetPhysicalDeviceExternalFencePropertiesKHR(
+    VkPhysicalDevice physicalDevice,
+    const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo,
+    VkExternalFenceProperties* pExternalFenceProperties)
+{
+    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceExternalFencePropertiesKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    auto resources = ResourceTracker::get();
+    resources->on_vkGetPhysicalDeviceExternalFencePropertiesKHR(vkEnc, physicalDevice, pExternalFenceInfo, pExternalFenceProperties);
+}
+#endif
+#ifdef VK_KHR_external_fence
+#endif
+#ifdef VK_KHR_external_fence_win32
+static VkResult entry_vkImportFenceWin32HandleKHR(
+    VkDevice device,
+    const VkImportFenceWin32HandleInfoKHR* pImportFenceWin32HandleInfo)
+{
+    AEMU_SCOPED_TRACE("vkImportFenceWin32HandleKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkImportFenceWin32HandleKHR_VkResult_return = (VkResult)0;
+    vkImportFenceWin32HandleKHR_VkResult_return = vkEnc->vkImportFenceWin32HandleKHR(device, pImportFenceWin32HandleInfo, true /* do lock */);
+    return vkImportFenceWin32HandleKHR_VkResult_return;
+}
+static VkResult dynCheck_entry_vkImportFenceWin32HandleKHR(
+    VkDevice device,
+    const VkImportFenceWin32HandleInfoKHR* pImportFenceWin32HandleInfo)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_external_fence_win32"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkImportFenceWin32HandleKHR", "VK_KHR_external_fence_win32");
+    }
+    AEMU_SCOPED_TRACE("vkImportFenceWin32HandleKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkImportFenceWin32HandleKHR_VkResult_return = (VkResult)0;
+    vkImportFenceWin32HandleKHR_VkResult_return = vkEnc->vkImportFenceWin32HandleKHR(device, pImportFenceWin32HandleInfo, true /* do lock */);
+    return vkImportFenceWin32HandleKHR_VkResult_return;
+}
+static VkResult entry_vkGetFenceWin32HandleKHR(
+    VkDevice device,
+    const VkFenceGetWin32HandleInfoKHR* pGetWin32HandleInfo,
+    HANDLE* pHandle)
+{
+    AEMU_SCOPED_TRACE("vkGetFenceWin32HandleKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetFenceWin32HandleKHR_VkResult_return = (VkResult)0;
+    vkGetFenceWin32HandleKHR_VkResult_return = vkEnc->vkGetFenceWin32HandleKHR(device, pGetWin32HandleInfo, pHandle, true /* do lock */);
+    return vkGetFenceWin32HandleKHR_VkResult_return;
+}
+static VkResult dynCheck_entry_vkGetFenceWin32HandleKHR(
+    VkDevice device,
+    const VkFenceGetWin32HandleInfoKHR* pGetWin32HandleInfo,
+    HANDLE* pHandle)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_external_fence_win32"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetFenceWin32HandleKHR", "VK_KHR_external_fence_win32");
+    }
+    AEMU_SCOPED_TRACE("vkGetFenceWin32HandleKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetFenceWin32HandleKHR_VkResult_return = (VkResult)0;
+    vkGetFenceWin32HandleKHR_VkResult_return = vkEnc->vkGetFenceWin32HandleKHR(device, pGetWin32HandleInfo, pHandle, true /* do lock */);
+    return vkGetFenceWin32HandleKHR_VkResult_return;
+}
+#endif
+#ifdef VK_KHR_external_fence_fd
+static VkResult entry_vkImportFenceFdKHR(
+    VkDevice device,
+    const VkImportFenceFdInfoKHR* pImportFenceFdInfo)
+{
+    AEMU_SCOPED_TRACE("vkImportFenceFdKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkImportFenceFdKHR_VkResult_return = (VkResult)0;
+    auto resources = ResourceTracker::get();
+    vkImportFenceFdKHR_VkResult_return = resources->on_vkImportFenceFdKHR(vkEnc, VK_SUCCESS, device, pImportFenceFdInfo);
+    return vkImportFenceFdKHR_VkResult_return;
+}
+static VkResult dynCheck_entry_vkImportFenceFdKHR(
+    VkDevice device,
+    const VkImportFenceFdInfoKHR* pImportFenceFdInfo)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_external_fence_fd"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkImportFenceFdKHR", "VK_KHR_external_fence_fd");
+    }
+    AEMU_SCOPED_TRACE("vkImportFenceFdKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkImportFenceFdKHR_VkResult_return = (VkResult)0;
+    vkImportFenceFdKHR_VkResult_return = resources->on_vkImportFenceFdKHR(vkEnc, VK_SUCCESS, device, pImportFenceFdInfo);
+    return vkImportFenceFdKHR_VkResult_return;
+}
+static VkResult entry_vkGetFenceFdKHR(
+    VkDevice device,
+    const VkFenceGetFdInfoKHR* pGetFdInfo,
+    int* pFd)
+{
+    AEMU_SCOPED_TRACE("vkGetFenceFdKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetFenceFdKHR_VkResult_return = (VkResult)0;
+    auto resources = ResourceTracker::get();
+    vkGetFenceFdKHR_VkResult_return = resources->on_vkGetFenceFdKHR(vkEnc, VK_SUCCESS, device, pGetFdInfo, pFd);
+    return vkGetFenceFdKHR_VkResult_return;
+}
+static VkResult dynCheck_entry_vkGetFenceFdKHR(
+    VkDevice device,
+    const VkFenceGetFdInfoKHR* pGetFdInfo,
+    int* pFd)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_external_fence_fd"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetFenceFdKHR", "VK_KHR_external_fence_fd");
+    }
+    AEMU_SCOPED_TRACE("vkGetFenceFdKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetFenceFdKHR_VkResult_return = (VkResult)0;
+    vkGetFenceFdKHR_VkResult_return = resources->on_vkGetFenceFdKHR(vkEnc, VK_SUCCESS, device, pGetFdInfo, pFd);
+    return vkGetFenceFdKHR_VkResult_return;
+}
+#endif
+#ifdef VK_KHR_performance_query
+static VkResult entry_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR(
+    VkPhysicalDevice physicalDevice,
+    uint32_t queueFamilyIndex,
+    uint32_t* pCounterCount,
+    VkPerformanceCounterKHR* pCounters,
+    VkPerformanceCounterDescriptionKHR* pCounterDescriptions)
+{
+    AEMU_SCOPED_TRACE("vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR_VkResult_return = (VkResult)0;
+    vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR_VkResult_return = vkEnc->vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR(physicalDevice, queueFamilyIndex, pCounterCount, pCounters, pCounterDescriptions, true /* do lock */);
+    return vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR_VkResult_return;
+}
+static void entry_vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR(
+    VkPhysicalDevice physicalDevice,
+    const VkQueryPoolPerformanceCreateInfoKHR* pPerformanceQueryCreateInfo,
+    uint32_t* pNumPasses)
+{
+    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR(physicalDevice, pPerformanceQueryCreateInfo, pNumPasses, true /* do lock */);
+}
+static VkResult entry_vkAcquireProfilingLockKHR(
+    VkDevice device,
+    const VkAcquireProfilingLockInfoKHR* pInfo)
+{
+    AEMU_SCOPED_TRACE("vkAcquireProfilingLockKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkAcquireProfilingLockKHR_VkResult_return = (VkResult)0;
+    vkAcquireProfilingLockKHR_VkResult_return = vkEnc->vkAcquireProfilingLockKHR(device, pInfo, true /* do lock */);
+    return vkAcquireProfilingLockKHR_VkResult_return;
+}
+static VkResult dynCheck_entry_vkAcquireProfilingLockKHR(
+    VkDevice device,
+    const VkAcquireProfilingLockInfoKHR* pInfo)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_performance_query"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkAcquireProfilingLockKHR", "VK_KHR_performance_query");
+    }
+    AEMU_SCOPED_TRACE("vkAcquireProfilingLockKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkAcquireProfilingLockKHR_VkResult_return = (VkResult)0;
+    vkAcquireProfilingLockKHR_VkResult_return = vkEnc->vkAcquireProfilingLockKHR(device, pInfo, true /* do lock */);
+    return vkAcquireProfilingLockKHR_VkResult_return;
+}
+static void entry_vkReleaseProfilingLockKHR(
+    VkDevice device)
+{
+    AEMU_SCOPED_TRACE("vkReleaseProfilingLockKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkReleaseProfilingLockKHR(device, true /* do lock */);
+}
+static void dynCheck_entry_vkReleaseProfilingLockKHR(
+    VkDevice device)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_performance_query"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkReleaseProfilingLockKHR", "VK_KHR_performance_query");
+    }
+    AEMU_SCOPED_TRACE("vkReleaseProfilingLockKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkReleaseProfilingLockKHR(device, true /* do lock */);
+}
+#endif
+#ifdef VK_KHR_maintenance2
+#endif
+#ifdef VK_KHR_get_surface_capabilities2
+static VkResult entry_vkGetPhysicalDeviceSurfaceCapabilities2KHR(
+    VkPhysicalDevice physicalDevice,
+    const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo,
+    VkSurfaceCapabilities2KHR* pSurfaceCapabilities)
+{
+    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceSurfaceCapabilities2KHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetPhysicalDeviceSurfaceCapabilities2KHR_VkResult_return = (VkResult)0;
+    vkGetPhysicalDeviceSurfaceCapabilities2KHR_VkResult_return = vkEnc->vkGetPhysicalDeviceSurfaceCapabilities2KHR(physicalDevice, pSurfaceInfo, pSurfaceCapabilities, true /* do lock */);
+    return vkGetPhysicalDeviceSurfaceCapabilities2KHR_VkResult_return;
+}
+static VkResult entry_vkGetPhysicalDeviceSurfaceFormats2KHR(
+    VkPhysicalDevice physicalDevice,
+    const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo,
+    uint32_t* pSurfaceFormatCount,
+    VkSurfaceFormat2KHR* pSurfaceFormats)
+{
+    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceSurfaceFormats2KHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetPhysicalDeviceSurfaceFormats2KHR_VkResult_return = (VkResult)0;
+    vkGetPhysicalDeviceSurfaceFormats2KHR_VkResult_return = vkEnc->vkGetPhysicalDeviceSurfaceFormats2KHR(physicalDevice, pSurfaceInfo, pSurfaceFormatCount, pSurfaceFormats, true /* do lock */);
+    return vkGetPhysicalDeviceSurfaceFormats2KHR_VkResult_return;
+}
+#endif
+#ifdef VK_KHR_variable_pointers
+#endif
+#ifdef VK_KHR_get_display_properties2
+static VkResult entry_vkGetPhysicalDeviceDisplayProperties2KHR(
+    VkPhysicalDevice physicalDevice,
+    uint32_t* pPropertyCount,
+    VkDisplayProperties2KHR* pProperties)
+{
+    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceDisplayProperties2KHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetPhysicalDeviceDisplayProperties2KHR_VkResult_return = (VkResult)0;
+    vkGetPhysicalDeviceDisplayProperties2KHR_VkResult_return = vkEnc->vkGetPhysicalDeviceDisplayProperties2KHR(physicalDevice, pPropertyCount, pProperties, true /* do lock */);
+    return vkGetPhysicalDeviceDisplayProperties2KHR_VkResult_return;
+}
+static VkResult entry_vkGetPhysicalDeviceDisplayPlaneProperties2KHR(
+    VkPhysicalDevice physicalDevice,
+    uint32_t* pPropertyCount,
+    VkDisplayPlaneProperties2KHR* pProperties)
+{
+    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceDisplayPlaneProperties2KHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetPhysicalDeviceDisplayPlaneProperties2KHR_VkResult_return = (VkResult)0;
+    vkGetPhysicalDeviceDisplayPlaneProperties2KHR_VkResult_return = vkEnc->vkGetPhysicalDeviceDisplayPlaneProperties2KHR(physicalDevice, pPropertyCount, pProperties, true /* do lock */);
+    return vkGetPhysicalDeviceDisplayPlaneProperties2KHR_VkResult_return;
+}
+static VkResult entry_vkGetDisplayModeProperties2KHR(
+    VkPhysicalDevice physicalDevice,
+    VkDisplayKHR display,
+    uint32_t* pPropertyCount,
+    VkDisplayModeProperties2KHR* pProperties)
+{
+    AEMU_SCOPED_TRACE("vkGetDisplayModeProperties2KHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetDisplayModeProperties2KHR_VkResult_return = (VkResult)0;
+    vkGetDisplayModeProperties2KHR_VkResult_return = vkEnc->vkGetDisplayModeProperties2KHR(physicalDevice, display, pPropertyCount, pProperties, true /* do lock */);
+    return vkGetDisplayModeProperties2KHR_VkResult_return;
+}
+static VkResult entry_vkGetDisplayPlaneCapabilities2KHR(
+    VkPhysicalDevice physicalDevice,
+    const VkDisplayPlaneInfo2KHR* pDisplayPlaneInfo,
+    VkDisplayPlaneCapabilities2KHR* pCapabilities)
+{
+    AEMU_SCOPED_TRACE("vkGetDisplayPlaneCapabilities2KHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetDisplayPlaneCapabilities2KHR_VkResult_return = (VkResult)0;
+    vkGetDisplayPlaneCapabilities2KHR_VkResult_return = vkEnc->vkGetDisplayPlaneCapabilities2KHR(physicalDevice, pDisplayPlaneInfo, pCapabilities, true /* do lock */);
+    return vkGetDisplayPlaneCapabilities2KHR_VkResult_return;
+}
+#endif
+#ifdef VK_KHR_dedicated_allocation
+#endif
+#ifdef VK_KHR_storage_buffer_storage_class
+#endif
+#ifdef VK_KHR_relaxed_block_layout
+#endif
+#ifdef VK_KHR_get_memory_requirements2
+static void entry_vkGetImageMemoryRequirements2KHR(
+    VkDevice device,
+    const VkImageMemoryRequirementsInfo2* pInfo,
+    VkMemoryRequirements2* pMemoryRequirements)
+{
+    AEMU_SCOPED_TRACE("vkGetImageMemoryRequirements2KHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    auto resources = ResourceTracker::get();
+    resources->on_vkGetImageMemoryRequirements2KHR(vkEnc, device, pInfo, pMemoryRequirements);
+}
+static void dynCheck_entry_vkGetImageMemoryRequirements2KHR(
+    VkDevice device,
+    const VkImageMemoryRequirementsInfo2* pInfo,
+    VkMemoryRequirements2* pMemoryRequirements)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_get_memory_requirements2"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetImageMemoryRequirements2KHR", "VK_KHR_get_memory_requirements2");
+    }
+    AEMU_SCOPED_TRACE("vkGetImageMemoryRequirements2KHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    resources->on_vkGetImageMemoryRequirements2KHR(vkEnc, device, pInfo, pMemoryRequirements);
+}
+static void entry_vkGetBufferMemoryRequirements2KHR(
+    VkDevice device,
+    const VkBufferMemoryRequirementsInfo2* pInfo,
+    VkMemoryRequirements2* pMemoryRequirements)
+{
+    AEMU_SCOPED_TRACE("vkGetBufferMemoryRequirements2KHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    auto resources = ResourceTracker::get();
+    resources->on_vkGetBufferMemoryRequirements2KHR(vkEnc, device, pInfo, pMemoryRequirements);
+}
+static void dynCheck_entry_vkGetBufferMemoryRequirements2KHR(
+    VkDevice device,
+    const VkBufferMemoryRequirementsInfo2* pInfo,
+    VkMemoryRequirements2* pMemoryRequirements)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_get_memory_requirements2"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetBufferMemoryRequirements2KHR", "VK_KHR_get_memory_requirements2");
+    }
+    AEMU_SCOPED_TRACE("vkGetBufferMemoryRequirements2KHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    resources->on_vkGetBufferMemoryRequirements2KHR(vkEnc, device, pInfo, pMemoryRequirements);
+}
+static void entry_vkGetImageSparseMemoryRequirements2KHR(
+    VkDevice device,
+    const VkImageSparseMemoryRequirementsInfo2* pInfo,
+    uint32_t* pSparseMemoryRequirementCount,
+    VkSparseImageMemoryRequirements2* pSparseMemoryRequirements)
+{
+    AEMU_SCOPED_TRACE("vkGetImageSparseMemoryRequirements2KHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetImageSparseMemoryRequirements2KHR(device, pInfo, pSparseMemoryRequirementCount, pSparseMemoryRequirements, true /* do lock */);
+}
+static void dynCheck_entry_vkGetImageSparseMemoryRequirements2KHR(
+    VkDevice device,
+    const VkImageSparseMemoryRequirementsInfo2* pInfo,
+    uint32_t* pSparseMemoryRequirementCount,
+    VkSparseImageMemoryRequirements2* pSparseMemoryRequirements)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_get_memory_requirements2"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetImageSparseMemoryRequirements2KHR", "VK_KHR_get_memory_requirements2");
+    }
+    AEMU_SCOPED_TRACE("vkGetImageSparseMemoryRequirements2KHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetImageSparseMemoryRequirements2KHR(device, pInfo, pSparseMemoryRequirementCount, pSparseMemoryRequirements, true /* do lock */);
+}
+#endif
+#ifdef VK_KHR_image_format_list
+#endif
+#ifdef VK_KHR_sampler_ycbcr_conversion
+static VkResult entry_vkCreateSamplerYcbcrConversionKHR(
+    VkDevice device,
+    const VkSamplerYcbcrConversionCreateInfo* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkSamplerYcbcrConversion* pYcbcrConversion)
+{
+    AEMU_SCOPED_TRACE("vkCreateSamplerYcbcrConversionKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateSamplerYcbcrConversionKHR_VkResult_return = (VkResult)0;
+    auto resources = ResourceTracker::get();
+    vkCreateSamplerYcbcrConversionKHR_VkResult_return = resources->on_vkCreateSamplerYcbcrConversionKHR(vkEnc, VK_SUCCESS, device, pCreateInfo, pAllocator, pYcbcrConversion);
+    return vkCreateSamplerYcbcrConversionKHR_VkResult_return;
+}
+static VkResult dynCheck_entry_vkCreateSamplerYcbcrConversionKHR(
+    VkDevice device,
+    const VkSamplerYcbcrConversionCreateInfo* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkSamplerYcbcrConversion* pYcbcrConversion)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_sampler_ycbcr_conversion"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkCreateSamplerYcbcrConversionKHR", "VK_KHR_sampler_ycbcr_conversion");
+    }
+    AEMU_SCOPED_TRACE("vkCreateSamplerYcbcrConversionKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateSamplerYcbcrConversionKHR_VkResult_return = (VkResult)0;
+    vkCreateSamplerYcbcrConversionKHR_VkResult_return = resources->on_vkCreateSamplerYcbcrConversionKHR(vkEnc, VK_SUCCESS, device, pCreateInfo, pAllocator, pYcbcrConversion);
+    return vkCreateSamplerYcbcrConversionKHR_VkResult_return;
+}
+static void entry_vkDestroySamplerYcbcrConversionKHR(
+    VkDevice device,
+    VkSamplerYcbcrConversion ycbcrConversion,
+    const VkAllocationCallbacks* pAllocator)
+{
+    AEMU_SCOPED_TRACE("vkDestroySamplerYcbcrConversionKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    auto resources = ResourceTracker::get();
+    resources->on_vkDestroySamplerYcbcrConversionKHR(vkEnc, device, ycbcrConversion, pAllocator);
+}
+static void dynCheck_entry_vkDestroySamplerYcbcrConversionKHR(
+    VkDevice device,
+    VkSamplerYcbcrConversion ycbcrConversion,
+    const VkAllocationCallbacks* pAllocator)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_sampler_ycbcr_conversion"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkDestroySamplerYcbcrConversionKHR", "VK_KHR_sampler_ycbcr_conversion");
+    }
+    AEMU_SCOPED_TRACE("vkDestroySamplerYcbcrConversionKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    resources->on_vkDestroySamplerYcbcrConversionKHR(vkEnc, device, ycbcrConversion, pAllocator);
+}
+#endif
+#ifdef VK_KHR_bind_memory2
+static VkResult entry_vkBindBufferMemory2KHR(
+    VkDevice device,
+    uint32_t bindInfoCount,
+    const VkBindBufferMemoryInfo* pBindInfos)
+{
+    AEMU_SCOPED_TRACE("vkBindBufferMemory2KHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkBindBufferMemory2KHR_VkResult_return = (VkResult)0;
+    auto resources = ResourceTracker::get();
+    vkBindBufferMemory2KHR_VkResult_return = resources->on_vkBindBufferMemory2KHR(vkEnc, VK_SUCCESS, device, bindInfoCount, pBindInfos);
+    return vkBindBufferMemory2KHR_VkResult_return;
+}
+static VkResult dynCheck_entry_vkBindBufferMemory2KHR(
+    VkDevice device,
+    uint32_t bindInfoCount,
+    const VkBindBufferMemoryInfo* pBindInfos)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_bind_memory2"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkBindBufferMemory2KHR", "VK_KHR_bind_memory2");
+    }
+    AEMU_SCOPED_TRACE("vkBindBufferMemory2KHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkBindBufferMemory2KHR_VkResult_return = (VkResult)0;
+    vkBindBufferMemory2KHR_VkResult_return = resources->on_vkBindBufferMemory2KHR(vkEnc, VK_SUCCESS, device, bindInfoCount, pBindInfos);
+    return vkBindBufferMemory2KHR_VkResult_return;
+}
+static VkResult entry_vkBindImageMemory2KHR(
+    VkDevice device,
+    uint32_t bindInfoCount,
+    const VkBindImageMemoryInfo* pBindInfos)
+{
+    AEMU_SCOPED_TRACE("vkBindImageMemory2KHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkBindImageMemory2KHR_VkResult_return = (VkResult)0;
+    auto resources = ResourceTracker::get();
+    vkBindImageMemory2KHR_VkResult_return = resources->on_vkBindImageMemory2KHR(vkEnc, VK_SUCCESS, device, bindInfoCount, pBindInfos);
+    return vkBindImageMemory2KHR_VkResult_return;
+}
+static VkResult dynCheck_entry_vkBindImageMemory2KHR(
+    VkDevice device,
+    uint32_t bindInfoCount,
+    const VkBindImageMemoryInfo* pBindInfos)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_bind_memory2"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkBindImageMemory2KHR", "VK_KHR_bind_memory2");
+    }
+    AEMU_SCOPED_TRACE("vkBindImageMemory2KHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkBindImageMemory2KHR_VkResult_return = (VkResult)0;
+    vkBindImageMemory2KHR_VkResult_return = resources->on_vkBindImageMemory2KHR(vkEnc, VK_SUCCESS, device, bindInfoCount, pBindInfos);
+    return vkBindImageMemory2KHR_VkResult_return;
+}
+#endif
+#ifdef VK_KHR_portability_subset
+#endif
+#ifdef VK_KHR_maintenance3
+static void entry_vkGetDescriptorSetLayoutSupportKHR(
+    VkDevice device,
+    const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
+    VkDescriptorSetLayoutSupport* pSupport)
+{
+    AEMU_SCOPED_TRACE("vkGetDescriptorSetLayoutSupportKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetDescriptorSetLayoutSupportKHR(device, pCreateInfo, pSupport, true /* do lock */);
+}
+static void dynCheck_entry_vkGetDescriptorSetLayoutSupportKHR(
+    VkDevice device,
+    const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
+    VkDescriptorSetLayoutSupport* pSupport)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_maintenance3"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetDescriptorSetLayoutSupportKHR", "VK_KHR_maintenance3");
+    }
+    AEMU_SCOPED_TRACE("vkGetDescriptorSetLayoutSupportKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetDescriptorSetLayoutSupportKHR(device, pCreateInfo, pSupport, true /* do lock */);
+}
+#endif
+#ifdef VK_KHR_draw_indirect_count
+static void entry_vkCmdDrawIndirectCountKHR(
+    VkCommandBuffer commandBuffer,
+    VkBuffer buffer,
+    VkDeviceSize offset,
+    VkBuffer countBuffer,
+    VkDeviceSize countBufferOffset,
+    uint32_t maxDrawCount,
+    uint32_t stride)
+{
+    AEMU_SCOPED_TRACE("vkCmdDrawIndirectCountKHR");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdDrawIndirectCountKHR(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride, true /* do lock */);
+}
+static void entry_vkCmdDrawIndexedIndirectCountKHR(
+    VkCommandBuffer commandBuffer,
+    VkBuffer buffer,
+    VkDeviceSize offset,
+    VkBuffer countBuffer,
+    VkDeviceSize countBufferOffset,
+    uint32_t maxDrawCount,
+    uint32_t stride)
+{
+    AEMU_SCOPED_TRACE("vkCmdDrawIndexedIndirectCountKHR");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdDrawIndexedIndirectCountKHR(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride, true /* do lock */);
+}
+#endif
+#ifdef VK_KHR_shader_subgroup_extended_types
+#endif
+#ifdef VK_KHR_8bit_storage
+#endif
+#ifdef VK_KHR_shader_atomic_int64
+#endif
+#ifdef VK_KHR_shader_clock
+#endif
+#ifdef VK_KHR_driver_properties
+#endif
+#ifdef VK_KHR_shader_float_controls
+#endif
+#ifdef VK_KHR_depth_stencil_resolve
+#endif
+#ifdef VK_KHR_swapchain_mutable_format
+#endif
+#ifdef VK_KHR_timeline_semaphore
+static VkResult entry_vkGetSemaphoreCounterValueKHR(
+    VkDevice device,
+    VkSemaphore semaphore,
+    uint64_t* pValue)
+{
+    AEMU_SCOPED_TRACE("vkGetSemaphoreCounterValueKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetSemaphoreCounterValueKHR_VkResult_return = (VkResult)0;
+    vkGetSemaphoreCounterValueKHR_VkResult_return = vkEnc->vkGetSemaphoreCounterValueKHR(device, semaphore, pValue, true /* do lock */);
+    return vkGetSemaphoreCounterValueKHR_VkResult_return;
+}
+static VkResult dynCheck_entry_vkGetSemaphoreCounterValueKHR(
+    VkDevice device,
+    VkSemaphore semaphore,
+    uint64_t* pValue)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_timeline_semaphore"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetSemaphoreCounterValueKHR", "VK_KHR_timeline_semaphore");
+    }
+    AEMU_SCOPED_TRACE("vkGetSemaphoreCounterValueKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetSemaphoreCounterValueKHR_VkResult_return = (VkResult)0;
+    vkGetSemaphoreCounterValueKHR_VkResult_return = vkEnc->vkGetSemaphoreCounterValueKHR(device, semaphore, pValue, true /* do lock */);
+    return vkGetSemaphoreCounterValueKHR_VkResult_return;
+}
+static VkResult entry_vkWaitSemaphoresKHR(
+    VkDevice device,
+    const VkSemaphoreWaitInfo* pWaitInfo,
+    uint64_t timeout)
+{
+    AEMU_SCOPED_TRACE("vkWaitSemaphoresKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkWaitSemaphoresKHR_VkResult_return = (VkResult)0;
+    vkWaitSemaphoresKHR_VkResult_return = vkEnc->vkWaitSemaphoresKHR(device, pWaitInfo, timeout, true /* do lock */);
+    return vkWaitSemaphoresKHR_VkResult_return;
+}
+static VkResult dynCheck_entry_vkWaitSemaphoresKHR(
+    VkDevice device,
+    const VkSemaphoreWaitInfo* pWaitInfo,
+    uint64_t timeout)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_timeline_semaphore"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkWaitSemaphoresKHR", "VK_KHR_timeline_semaphore");
+    }
+    AEMU_SCOPED_TRACE("vkWaitSemaphoresKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkWaitSemaphoresKHR_VkResult_return = (VkResult)0;
+    vkWaitSemaphoresKHR_VkResult_return = vkEnc->vkWaitSemaphoresKHR(device, pWaitInfo, timeout, true /* do lock */);
+    return vkWaitSemaphoresKHR_VkResult_return;
+}
+static VkResult entry_vkSignalSemaphoreKHR(
+    VkDevice device,
+    const VkSemaphoreSignalInfo* pSignalInfo)
+{
+    AEMU_SCOPED_TRACE("vkSignalSemaphoreKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkSignalSemaphoreKHR_VkResult_return = (VkResult)0;
+    vkSignalSemaphoreKHR_VkResult_return = vkEnc->vkSignalSemaphoreKHR(device, pSignalInfo, true /* do lock */);
+    return vkSignalSemaphoreKHR_VkResult_return;
+}
+static VkResult dynCheck_entry_vkSignalSemaphoreKHR(
+    VkDevice device,
+    const VkSemaphoreSignalInfo* pSignalInfo)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_timeline_semaphore"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkSignalSemaphoreKHR", "VK_KHR_timeline_semaphore");
+    }
+    AEMU_SCOPED_TRACE("vkSignalSemaphoreKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkSignalSemaphoreKHR_VkResult_return = (VkResult)0;
+    vkSignalSemaphoreKHR_VkResult_return = vkEnc->vkSignalSemaphoreKHR(device, pSignalInfo, true /* do lock */);
+    return vkSignalSemaphoreKHR_VkResult_return;
+}
+#endif
+#ifdef VK_KHR_vulkan_memory_model
+#endif
+#ifdef VK_KHR_shader_terminate_invocation
+#endif
+#ifdef VK_KHR_fragment_shading_rate
+static VkResult entry_vkGetPhysicalDeviceFragmentShadingRatesKHR(
+    VkPhysicalDevice physicalDevice,
+    uint32_t* pFragmentShadingRateCount,
+    VkPhysicalDeviceFragmentShadingRateKHR* pFragmentShadingRates)
+{
+    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceFragmentShadingRatesKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetPhysicalDeviceFragmentShadingRatesKHR_VkResult_return = (VkResult)0;
+    vkGetPhysicalDeviceFragmentShadingRatesKHR_VkResult_return = vkEnc->vkGetPhysicalDeviceFragmentShadingRatesKHR(physicalDevice, pFragmentShadingRateCount, pFragmentShadingRates, true /* do lock */);
+    return vkGetPhysicalDeviceFragmentShadingRatesKHR_VkResult_return;
+}
+static void entry_vkCmdSetFragmentShadingRateKHR(
+    VkCommandBuffer commandBuffer,
+    const VkExtent2D* pFragmentSize,
+    const VkFragmentShadingRateCombinerOpKHR combinerOps[2])
+{
+    AEMU_SCOPED_TRACE("vkCmdSetFragmentShadingRateKHR");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdSetFragmentShadingRateKHR(commandBuffer, pFragmentSize, combinerOps, true /* do lock */);
+}
+#endif
+#ifdef VK_KHR_spirv_1_4
+#endif
+#ifdef VK_KHR_surface_protected_capabilities
+#endif
+#ifdef VK_KHR_separate_depth_stencil_layouts
+#endif
+#ifdef VK_KHR_uniform_buffer_standard_layout
+#endif
+#ifdef VK_KHR_buffer_device_address
+static VkDeviceAddress entry_vkGetBufferDeviceAddressKHR(
+    VkDevice device,
+    const VkBufferDeviceAddressInfo* pInfo)
+{
+    AEMU_SCOPED_TRACE("vkGetBufferDeviceAddressKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkDeviceAddress vkGetBufferDeviceAddressKHR_VkDeviceAddress_return = (VkDeviceAddress)0;
+    vkGetBufferDeviceAddressKHR_VkDeviceAddress_return = vkEnc->vkGetBufferDeviceAddressKHR(device, pInfo, true /* do lock */);
+    return vkGetBufferDeviceAddressKHR_VkDeviceAddress_return;
+}
+static VkDeviceAddress dynCheck_entry_vkGetBufferDeviceAddressKHR(
+    VkDevice device,
+    const VkBufferDeviceAddressInfo* pInfo)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_buffer_device_address"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetBufferDeviceAddressKHR", "VK_KHR_buffer_device_address");
+    }
+    AEMU_SCOPED_TRACE("vkGetBufferDeviceAddressKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkDeviceAddress vkGetBufferDeviceAddressKHR_VkDeviceAddress_return = (VkDeviceAddress)0;
+    vkGetBufferDeviceAddressKHR_VkDeviceAddress_return = vkEnc->vkGetBufferDeviceAddressKHR(device, pInfo, true /* do lock */);
+    return vkGetBufferDeviceAddressKHR_VkDeviceAddress_return;
+}
+static uint64_t entry_vkGetBufferOpaqueCaptureAddressKHR(
+    VkDevice device,
+    const VkBufferDeviceAddressInfo* pInfo)
+{
+    AEMU_SCOPED_TRACE("vkGetBufferOpaqueCaptureAddressKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    uint64_t vkGetBufferOpaqueCaptureAddressKHR_uint64_t_return = (uint64_t)0;
+    vkGetBufferOpaqueCaptureAddressKHR_uint64_t_return = vkEnc->vkGetBufferOpaqueCaptureAddressKHR(device, pInfo, true /* do lock */);
+    return vkGetBufferOpaqueCaptureAddressKHR_uint64_t_return;
+}
+static uint64_t dynCheck_entry_vkGetBufferOpaqueCaptureAddressKHR(
+    VkDevice device,
+    const VkBufferDeviceAddressInfo* pInfo)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_buffer_device_address"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetBufferOpaqueCaptureAddressKHR", "VK_KHR_buffer_device_address");
+    }
+    AEMU_SCOPED_TRACE("vkGetBufferOpaqueCaptureAddressKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    uint64_t vkGetBufferOpaqueCaptureAddressKHR_uint64_t_return = (uint64_t)0;
+    vkGetBufferOpaqueCaptureAddressKHR_uint64_t_return = vkEnc->vkGetBufferOpaqueCaptureAddressKHR(device, pInfo, true /* do lock */);
+    return vkGetBufferOpaqueCaptureAddressKHR_uint64_t_return;
+}
+static uint64_t entry_vkGetDeviceMemoryOpaqueCaptureAddressKHR(
+    VkDevice device,
+    const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo)
+{
+    AEMU_SCOPED_TRACE("vkGetDeviceMemoryOpaqueCaptureAddressKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    uint64_t vkGetDeviceMemoryOpaqueCaptureAddressKHR_uint64_t_return = (uint64_t)0;
+    vkGetDeviceMemoryOpaqueCaptureAddressKHR_uint64_t_return = vkEnc->vkGetDeviceMemoryOpaqueCaptureAddressKHR(device, pInfo, true /* do lock */);
+    return vkGetDeviceMemoryOpaqueCaptureAddressKHR_uint64_t_return;
+}
+static uint64_t dynCheck_entry_vkGetDeviceMemoryOpaqueCaptureAddressKHR(
+    VkDevice device,
+    const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_buffer_device_address"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetDeviceMemoryOpaqueCaptureAddressKHR", "VK_KHR_buffer_device_address");
+    }
+    AEMU_SCOPED_TRACE("vkGetDeviceMemoryOpaqueCaptureAddressKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    uint64_t vkGetDeviceMemoryOpaqueCaptureAddressKHR_uint64_t_return = (uint64_t)0;
+    vkGetDeviceMemoryOpaqueCaptureAddressKHR_uint64_t_return = vkEnc->vkGetDeviceMemoryOpaqueCaptureAddressKHR(device, pInfo, true /* do lock */);
+    return vkGetDeviceMemoryOpaqueCaptureAddressKHR_uint64_t_return;
+}
+#endif
+#ifdef VK_KHR_deferred_host_operations
+static VkResult entry_vkCreateDeferredOperationKHR(
+    VkDevice device,
+    const VkAllocationCallbacks* pAllocator,
+    VkDeferredOperationKHR* pDeferredOperation)
+{
+    AEMU_SCOPED_TRACE("vkCreateDeferredOperationKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateDeferredOperationKHR_VkResult_return = (VkResult)0;
+    vkCreateDeferredOperationKHR_VkResult_return = vkEnc->vkCreateDeferredOperationKHR(device, pAllocator, pDeferredOperation, true /* do lock */);
+    return vkCreateDeferredOperationKHR_VkResult_return;
+}
+static VkResult dynCheck_entry_vkCreateDeferredOperationKHR(
+    VkDevice device,
+    const VkAllocationCallbacks* pAllocator,
+    VkDeferredOperationKHR* pDeferredOperation)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_deferred_host_operations"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkCreateDeferredOperationKHR", "VK_KHR_deferred_host_operations");
+    }
+    AEMU_SCOPED_TRACE("vkCreateDeferredOperationKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateDeferredOperationKHR_VkResult_return = (VkResult)0;
+    vkCreateDeferredOperationKHR_VkResult_return = vkEnc->vkCreateDeferredOperationKHR(device, pAllocator, pDeferredOperation, true /* do lock */);
+    return vkCreateDeferredOperationKHR_VkResult_return;
+}
+static void entry_vkDestroyDeferredOperationKHR(
+    VkDevice device,
+    VkDeferredOperationKHR operation,
+    const VkAllocationCallbacks* pAllocator)
+{
+    AEMU_SCOPED_TRACE("vkDestroyDeferredOperationKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkDestroyDeferredOperationKHR(device, operation, pAllocator, true /* do lock */);
+}
+static void dynCheck_entry_vkDestroyDeferredOperationKHR(
+    VkDevice device,
+    VkDeferredOperationKHR operation,
+    const VkAllocationCallbacks* pAllocator)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_deferred_host_operations"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkDestroyDeferredOperationKHR", "VK_KHR_deferred_host_operations");
+    }
+    AEMU_SCOPED_TRACE("vkDestroyDeferredOperationKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkDestroyDeferredOperationKHR(device, operation, pAllocator, true /* do lock */);
+}
+static uint32_t entry_vkGetDeferredOperationMaxConcurrencyKHR(
+    VkDevice device,
+    VkDeferredOperationKHR operation)
+{
+    AEMU_SCOPED_TRACE("vkGetDeferredOperationMaxConcurrencyKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    uint32_t vkGetDeferredOperationMaxConcurrencyKHR_uint32_t_return = (uint32_t)0;
+    vkGetDeferredOperationMaxConcurrencyKHR_uint32_t_return = vkEnc->vkGetDeferredOperationMaxConcurrencyKHR(device, operation, true /* do lock */);
+    return vkGetDeferredOperationMaxConcurrencyKHR_uint32_t_return;
+}
+static uint32_t dynCheck_entry_vkGetDeferredOperationMaxConcurrencyKHR(
+    VkDevice device,
+    VkDeferredOperationKHR operation)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_deferred_host_operations"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetDeferredOperationMaxConcurrencyKHR", "VK_KHR_deferred_host_operations");
+    }
+    AEMU_SCOPED_TRACE("vkGetDeferredOperationMaxConcurrencyKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    uint32_t vkGetDeferredOperationMaxConcurrencyKHR_uint32_t_return = (uint32_t)0;
+    vkGetDeferredOperationMaxConcurrencyKHR_uint32_t_return = vkEnc->vkGetDeferredOperationMaxConcurrencyKHR(device, operation, true /* do lock */);
+    return vkGetDeferredOperationMaxConcurrencyKHR_uint32_t_return;
+}
+static VkResult entry_vkGetDeferredOperationResultKHR(
+    VkDevice device,
+    VkDeferredOperationKHR operation)
+{
+    AEMU_SCOPED_TRACE("vkGetDeferredOperationResultKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetDeferredOperationResultKHR_VkResult_return = (VkResult)0;
+    vkGetDeferredOperationResultKHR_VkResult_return = vkEnc->vkGetDeferredOperationResultKHR(device, operation, true /* do lock */);
+    return vkGetDeferredOperationResultKHR_VkResult_return;
+}
+static VkResult dynCheck_entry_vkGetDeferredOperationResultKHR(
+    VkDevice device,
+    VkDeferredOperationKHR operation)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_deferred_host_operations"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetDeferredOperationResultKHR", "VK_KHR_deferred_host_operations");
+    }
+    AEMU_SCOPED_TRACE("vkGetDeferredOperationResultKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetDeferredOperationResultKHR_VkResult_return = (VkResult)0;
+    vkGetDeferredOperationResultKHR_VkResult_return = vkEnc->vkGetDeferredOperationResultKHR(device, operation, true /* do lock */);
+    return vkGetDeferredOperationResultKHR_VkResult_return;
+}
+static VkResult entry_vkDeferredOperationJoinKHR(
+    VkDevice device,
+    VkDeferredOperationKHR operation)
+{
+    AEMU_SCOPED_TRACE("vkDeferredOperationJoinKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkDeferredOperationJoinKHR_VkResult_return = (VkResult)0;
+    vkDeferredOperationJoinKHR_VkResult_return = vkEnc->vkDeferredOperationJoinKHR(device, operation, true /* do lock */);
+    return vkDeferredOperationJoinKHR_VkResult_return;
+}
+static VkResult dynCheck_entry_vkDeferredOperationJoinKHR(
+    VkDevice device,
+    VkDeferredOperationKHR operation)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_deferred_host_operations"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkDeferredOperationJoinKHR", "VK_KHR_deferred_host_operations");
+    }
+    AEMU_SCOPED_TRACE("vkDeferredOperationJoinKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkDeferredOperationJoinKHR_VkResult_return = (VkResult)0;
+    vkDeferredOperationJoinKHR_VkResult_return = vkEnc->vkDeferredOperationJoinKHR(device, operation, true /* do lock */);
+    return vkDeferredOperationJoinKHR_VkResult_return;
+}
+#endif
+#ifdef VK_KHR_pipeline_executable_properties
+static VkResult entry_vkGetPipelineExecutablePropertiesKHR(
+    VkDevice device,
+    const VkPipelineInfoKHR* pPipelineInfo,
+    uint32_t* pExecutableCount,
+    VkPipelineExecutablePropertiesKHR* pProperties)
+{
+    AEMU_SCOPED_TRACE("vkGetPipelineExecutablePropertiesKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetPipelineExecutablePropertiesKHR_VkResult_return = (VkResult)0;
+    vkGetPipelineExecutablePropertiesKHR_VkResult_return = vkEnc->vkGetPipelineExecutablePropertiesKHR(device, pPipelineInfo, pExecutableCount, pProperties, true /* do lock */);
+    return vkGetPipelineExecutablePropertiesKHR_VkResult_return;
+}
+static VkResult dynCheck_entry_vkGetPipelineExecutablePropertiesKHR(
+    VkDevice device,
+    const VkPipelineInfoKHR* pPipelineInfo,
+    uint32_t* pExecutableCount,
+    VkPipelineExecutablePropertiesKHR* pProperties)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_pipeline_executable_properties"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetPipelineExecutablePropertiesKHR", "VK_KHR_pipeline_executable_properties");
+    }
+    AEMU_SCOPED_TRACE("vkGetPipelineExecutablePropertiesKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetPipelineExecutablePropertiesKHR_VkResult_return = (VkResult)0;
+    vkGetPipelineExecutablePropertiesKHR_VkResult_return = vkEnc->vkGetPipelineExecutablePropertiesKHR(device, pPipelineInfo, pExecutableCount, pProperties, true /* do lock */);
+    return vkGetPipelineExecutablePropertiesKHR_VkResult_return;
+}
+static VkResult entry_vkGetPipelineExecutableStatisticsKHR(
+    VkDevice device,
+    const VkPipelineExecutableInfoKHR* pExecutableInfo,
+    uint32_t* pStatisticCount,
+    VkPipelineExecutableStatisticKHR* pStatistics)
+{
+    AEMU_SCOPED_TRACE("vkGetPipelineExecutableStatisticsKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetPipelineExecutableStatisticsKHR_VkResult_return = (VkResult)0;
+    vkGetPipelineExecutableStatisticsKHR_VkResult_return = vkEnc->vkGetPipelineExecutableStatisticsKHR(device, pExecutableInfo, pStatisticCount, pStatistics, true /* do lock */);
+    return vkGetPipelineExecutableStatisticsKHR_VkResult_return;
+}
+static VkResult dynCheck_entry_vkGetPipelineExecutableStatisticsKHR(
+    VkDevice device,
+    const VkPipelineExecutableInfoKHR* pExecutableInfo,
+    uint32_t* pStatisticCount,
+    VkPipelineExecutableStatisticKHR* pStatistics)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_pipeline_executable_properties"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetPipelineExecutableStatisticsKHR", "VK_KHR_pipeline_executable_properties");
+    }
+    AEMU_SCOPED_TRACE("vkGetPipelineExecutableStatisticsKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetPipelineExecutableStatisticsKHR_VkResult_return = (VkResult)0;
+    vkGetPipelineExecutableStatisticsKHR_VkResult_return = vkEnc->vkGetPipelineExecutableStatisticsKHR(device, pExecutableInfo, pStatisticCount, pStatistics, true /* do lock */);
+    return vkGetPipelineExecutableStatisticsKHR_VkResult_return;
+}
+static VkResult entry_vkGetPipelineExecutableInternalRepresentationsKHR(
+    VkDevice device,
+    const VkPipelineExecutableInfoKHR* pExecutableInfo,
+    uint32_t* pInternalRepresentationCount,
+    VkPipelineExecutableInternalRepresentationKHR* pInternalRepresentations)
+{
+    AEMU_SCOPED_TRACE("vkGetPipelineExecutableInternalRepresentationsKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetPipelineExecutableInternalRepresentationsKHR_VkResult_return = (VkResult)0;
+    vkGetPipelineExecutableInternalRepresentationsKHR_VkResult_return = vkEnc->vkGetPipelineExecutableInternalRepresentationsKHR(device, pExecutableInfo, pInternalRepresentationCount, pInternalRepresentations, true /* do lock */);
+    return vkGetPipelineExecutableInternalRepresentationsKHR_VkResult_return;
+}
+static VkResult dynCheck_entry_vkGetPipelineExecutableInternalRepresentationsKHR(
+    VkDevice device,
+    const VkPipelineExecutableInfoKHR* pExecutableInfo,
+    uint32_t* pInternalRepresentationCount,
+    VkPipelineExecutableInternalRepresentationKHR* pInternalRepresentations)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_pipeline_executable_properties"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetPipelineExecutableInternalRepresentationsKHR", "VK_KHR_pipeline_executable_properties");
+    }
+    AEMU_SCOPED_TRACE("vkGetPipelineExecutableInternalRepresentationsKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetPipelineExecutableInternalRepresentationsKHR_VkResult_return = (VkResult)0;
+    vkGetPipelineExecutableInternalRepresentationsKHR_VkResult_return = vkEnc->vkGetPipelineExecutableInternalRepresentationsKHR(device, pExecutableInfo, pInternalRepresentationCount, pInternalRepresentations, true /* do lock */);
+    return vkGetPipelineExecutableInternalRepresentationsKHR_VkResult_return;
+}
+#endif
+#ifdef VK_KHR_pipeline_library
+#endif
+#ifdef VK_KHR_shader_non_semantic_info
+#endif
+#ifdef VK_KHR_copy_commands2
+static void entry_vkCmdCopyBuffer2KHR(
+    VkCommandBuffer commandBuffer,
+    const VkCopyBufferInfo2KHR* pCopyBufferInfo)
+{
+    AEMU_SCOPED_TRACE("vkCmdCopyBuffer2KHR");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdCopyBuffer2KHR(commandBuffer, pCopyBufferInfo, true /* do lock */);
+}
+static void entry_vkCmdCopyImage2KHR(
+    VkCommandBuffer commandBuffer,
+    const VkCopyImageInfo2KHR* pCopyImageInfo)
+{
+    AEMU_SCOPED_TRACE("vkCmdCopyImage2KHR");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdCopyImage2KHR(commandBuffer, pCopyImageInfo, true /* do lock */);
+}
+static void entry_vkCmdCopyBufferToImage2KHR(
+    VkCommandBuffer commandBuffer,
+    const VkCopyBufferToImageInfo2KHR* pCopyBufferToImageInfo)
+{
+    AEMU_SCOPED_TRACE("vkCmdCopyBufferToImage2KHR");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdCopyBufferToImage2KHR(commandBuffer, pCopyBufferToImageInfo, true /* do lock */);
+}
+static void entry_vkCmdCopyImageToBuffer2KHR(
+    VkCommandBuffer commandBuffer,
+    const VkCopyImageToBufferInfo2KHR* pCopyImageToBufferInfo)
+{
+    AEMU_SCOPED_TRACE("vkCmdCopyImageToBuffer2KHR");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdCopyImageToBuffer2KHR(commandBuffer, pCopyImageToBufferInfo, true /* do lock */);
+}
+static void entry_vkCmdBlitImage2KHR(
+    VkCommandBuffer commandBuffer,
+    const VkBlitImageInfo2KHR* pBlitImageInfo)
+{
+    AEMU_SCOPED_TRACE("vkCmdBlitImage2KHR");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdBlitImage2KHR(commandBuffer, pBlitImageInfo, true /* do lock */);
+}
+static void entry_vkCmdResolveImage2KHR(
+    VkCommandBuffer commandBuffer,
+    const VkResolveImageInfo2KHR* pResolveImageInfo)
+{
+    AEMU_SCOPED_TRACE("vkCmdResolveImage2KHR");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdResolveImage2KHR(commandBuffer, pResolveImageInfo, true /* do lock */);
+}
+#endif
+#ifdef VK_ANDROID_native_buffer
+static VkResult entry_vkGetSwapchainGrallocUsageANDROID(
+    VkDevice device,
+    VkFormat format,
+    VkImageUsageFlags imageUsage,
+    int* grallocUsage)
+{
+    AEMU_SCOPED_TRACE("vkGetSwapchainGrallocUsageANDROID");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetSwapchainGrallocUsageANDROID_VkResult_return = (VkResult)0;
+    vkGetSwapchainGrallocUsageANDROID_VkResult_return = vkEnc->vkGetSwapchainGrallocUsageANDROID(device, format, imageUsage, grallocUsage, true /* do lock */);
+    return vkGetSwapchainGrallocUsageANDROID_VkResult_return;
+}
+static VkResult dynCheck_entry_vkGetSwapchainGrallocUsageANDROID(
+    VkDevice device,
+    VkFormat format,
+    VkImageUsageFlags imageUsage,
+    int* grallocUsage)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_ANDROID_native_buffer"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetSwapchainGrallocUsageANDROID", "VK_ANDROID_native_buffer");
+    }
+    AEMU_SCOPED_TRACE("vkGetSwapchainGrallocUsageANDROID");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetSwapchainGrallocUsageANDROID_VkResult_return = (VkResult)0;
+    vkGetSwapchainGrallocUsageANDROID_VkResult_return = vkEnc->vkGetSwapchainGrallocUsageANDROID(device, format, imageUsage, grallocUsage, true /* do lock */);
+    return vkGetSwapchainGrallocUsageANDROID_VkResult_return;
+}
+static VkResult entry_vkAcquireImageANDROID(
+    VkDevice device,
+    VkImage image,
+    int nativeFenceFd,
+    VkSemaphore semaphore,
+    VkFence fence)
+{
+    AEMU_SCOPED_TRACE("vkAcquireImageANDROID");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkAcquireImageANDROID_VkResult_return = (VkResult)0;
+    vkAcquireImageANDROID_VkResult_return = vkEnc->vkAcquireImageANDROID(device, image, nativeFenceFd, semaphore, fence, true /* do lock */);
+    return vkAcquireImageANDROID_VkResult_return;
+}
+static VkResult dynCheck_entry_vkAcquireImageANDROID(
+    VkDevice device,
+    VkImage image,
+    int nativeFenceFd,
+    VkSemaphore semaphore,
+    VkFence fence)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_ANDROID_native_buffer"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkAcquireImageANDROID", "VK_ANDROID_native_buffer");
+    }
+    AEMU_SCOPED_TRACE("vkAcquireImageANDROID");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkAcquireImageANDROID_VkResult_return = (VkResult)0;
+    vkAcquireImageANDROID_VkResult_return = vkEnc->vkAcquireImageANDROID(device, image, nativeFenceFd, semaphore, fence, true /* do lock */);
+    return vkAcquireImageANDROID_VkResult_return;
+}
+static VkResult entry_vkQueueSignalReleaseImageANDROID(
+    VkQueue queue,
+    uint32_t waitSemaphoreCount,
+    const VkSemaphore* pWaitSemaphores,
+    VkImage image,
+    int* pNativeFenceFd)
+{
+    AEMU_SCOPED_TRACE("vkQueueSignalReleaseImageANDROID");
+    auto vkEnc = ResourceTracker::getQueueEncoder(queue);
+    VkResult vkQueueSignalReleaseImageANDROID_VkResult_return = (VkResult)0;
+    vkQueueSignalReleaseImageANDROID_VkResult_return = vkEnc->vkQueueSignalReleaseImageANDROID(queue, waitSemaphoreCount, pWaitSemaphores, image, pNativeFenceFd, true /* do lock */);
+    return vkQueueSignalReleaseImageANDROID_VkResult_return;
+}
+#endif
+#ifdef VK_EXT_debug_report
+static VkResult entry_vkCreateDebugReportCallbackEXT(
+    VkInstance instance,
+    const VkDebugReportCallbackCreateInfoEXT* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkDebugReportCallbackEXT* pCallback)
+{
+    AEMU_SCOPED_TRACE("vkCreateDebugReportCallbackEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateDebugReportCallbackEXT_VkResult_return = (VkResult)0;
+    vkCreateDebugReportCallbackEXT_VkResult_return = vkEnc->vkCreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pCallback, true /* do lock */);
+    return vkCreateDebugReportCallbackEXT_VkResult_return;
+}
+static void entry_vkDestroyDebugReportCallbackEXT(
+    VkInstance instance,
+    VkDebugReportCallbackEXT callback,
+    const VkAllocationCallbacks* pAllocator)
+{
+    AEMU_SCOPED_TRACE("vkDestroyDebugReportCallbackEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkDestroyDebugReportCallbackEXT(instance, callback, pAllocator, true /* do lock */);
+}
+static void entry_vkDebugReportMessageEXT(
+    VkInstance instance,
+    VkDebugReportFlagsEXT flags,
+    VkDebugReportObjectTypeEXT objectType,
+    uint64_t object,
+    size_t location,
+    int32_t messageCode,
+    const char* pLayerPrefix,
+    const char* pMessage)
+{
+    AEMU_SCOPED_TRACE("vkDebugReportMessageEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkDebugReportMessageEXT(instance, flags, objectType, object, location, messageCode, pLayerPrefix, pMessage, true /* do lock */);
+}
+#endif
+#ifdef VK_NV_glsl_shader
+#endif
+#ifdef VK_EXT_depth_range_unrestricted
+#endif
+#ifdef VK_IMG_filter_cubic
+#endif
+#ifdef VK_AMD_rasterization_order
+#endif
+#ifdef VK_AMD_shader_trinary_minmax
+#endif
+#ifdef VK_AMD_shader_explicit_vertex_parameter
+#endif
+#ifdef VK_EXT_debug_marker
+static VkResult entry_vkDebugMarkerSetObjectTagEXT(
+    VkDevice device,
+    const VkDebugMarkerObjectTagInfoEXT* pTagInfo)
+{
+    AEMU_SCOPED_TRACE("vkDebugMarkerSetObjectTagEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkDebugMarkerSetObjectTagEXT_VkResult_return = (VkResult)0;
+    vkDebugMarkerSetObjectTagEXT_VkResult_return = vkEnc->vkDebugMarkerSetObjectTagEXT(device, pTagInfo, true /* do lock */);
+    return vkDebugMarkerSetObjectTagEXT_VkResult_return;
+}
+static VkResult dynCheck_entry_vkDebugMarkerSetObjectTagEXT(
+    VkDevice device,
+    const VkDebugMarkerObjectTagInfoEXT* pTagInfo)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_EXT_debug_marker"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkDebugMarkerSetObjectTagEXT", "VK_EXT_debug_marker");
+    }
+    AEMU_SCOPED_TRACE("vkDebugMarkerSetObjectTagEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkDebugMarkerSetObjectTagEXT_VkResult_return = (VkResult)0;
+    vkDebugMarkerSetObjectTagEXT_VkResult_return = vkEnc->vkDebugMarkerSetObjectTagEXT(device, pTagInfo, true /* do lock */);
+    return vkDebugMarkerSetObjectTagEXT_VkResult_return;
+}
+static VkResult entry_vkDebugMarkerSetObjectNameEXT(
+    VkDevice device,
+    const VkDebugMarkerObjectNameInfoEXT* pNameInfo)
+{
+    AEMU_SCOPED_TRACE("vkDebugMarkerSetObjectNameEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkDebugMarkerSetObjectNameEXT_VkResult_return = (VkResult)0;
+    vkDebugMarkerSetObjectNameEXT_VkResult_return = vkEnc->vkDebugMarkerSetObjectNameEXT(device, pNameInfo, true /* do lock */);
+    return vkDebugMarkerSetObjectNameEXT_VkResult_return;
+}
+static VkResult dynCheck_entry_vkDebugMarkerSetObjectNameEXT(
+    VkDevice device,
+    const VkDebugMarkerObjectNameInfoEXT* pNameInfo)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_EXT_debug_marker"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkDebugMarkerSetObjectNameEXT", "VK_EXT_debug_marker");
+    }
+    AEMU_SCOPED_TRACE("vkDebugMarkerSetObjectNameEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkDebugMarkerSetObjectNameEXT_VkResult_return = (VkResult)0;
+    vkDebugMarkerSetObjectNameEXT_VkResult_return = vkEnc->vkDebugMarkerSetObjectNameEXT(device, pNameInfo, true /* do lock */);
+    return vkDebugMarkerSetObjectNameEXT_VkResult_return;
+}
+static void entry_vkCmdDebugMarkerBeginEXT(
+    VkCommandBuffer commandBuffer,
+    const VkDebugMarkerMarkerInfoEXT* pMarkerInfo)
+{
+    AEMU_SCOPED_TRACE("vkCmdDebugMarkerBeginEXT");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdDebugMarkerBeginEXT(commandBuffer, pMarkerInfo, true /* do lock */);
+}
+static void entry_vkCmdDebugMarkerEndEXT(
+    VkCommandBuffer commandBuffer)
+{
+    AEMU_SCOPED_TRACE("vkCmdDebugMarkerEndEXT");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdDebugMarkerEndEXT(commandBuffer, true /* do lock */);
+}
+static void entry_vkCmdDebugMarkerInsertEXT(
+    VkCommandBuffer commandBuffer,
+    const VkDebugMarkerMarkerInfoEXT* pMarkerInfo)
+{
+    AEMU_SCOPED_TRACE("vkCmdDebugMarkerInsertEXT");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdDebugMarkerInsertEXT(commandBuffer, pMarkerInfo, true /* do lock */);
+}
+#endif
+#ifdef VK_AMD_gcn_shader
+#endif
+#ifdef VK_NV_dedicated_allocation
+#endif
+#ifdef VK_EXT_transform_feedback
+static void entry_vkCmdBindTransformFeedbackBuffersEXT(
+    VkCommandBuffer commandBuffer,
+    uint32_t firstBinding,
+    uint32_t bindingCount,
+    const VkBuffer* pBuffers,
+    const VkDeviceSize* pOffsets,
+    const VkDeviceSize* pSizes)
+{
+    AEMU_SCOPED_TRACE("vkCmdBindTransformFeedbackBuffersEXT");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdBindTransformFeedbackBuffersEXT(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets, pSizes, true /* do lock */);
+}
+static void entry_vkCmdBeginTransformFeedbackEXT(
+    VkCommandBuffer commandBuffer,
+    uint32_t firstCounterBuffer,
+    uint32_t counterBufferCount,
+    const VkBuffer* pCounterBuffers,
+    const VkDeviceSize* pCounterBufferOffsets)
+{
+    AEMU_SCOPED_TRACE("vkCmdBeginTransformFeedbackEXT");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdBeginTransformFeedbackEXT(commandBuffer, firstCounterBuffer, counterBufferCount, pCounterBuffers, pCounterBufferOffsets, true /* do lock */);
+}
+static void entry_vkCmdEndTransformFeedbackEXT(
+    VkCommandBuffer commandBuffer,
+    uint32_t firstCounterBuffer,
+    uint32_t counterBufferCount,
+    const VkBuffer* pCounterBuffers,
+    const VkDeviceSize* pCounterBufferOffsets)
+{
+    AEMU_SCOPED_TRACE("vkCmdEndTransformFeedbackEXT");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdEndTransformFeedbackEXT(commandBuffer, firstCounterBuffer, counterBufferCount, pCounterBuffers, pCounterBufferOffsets, true /* do lock */);
+}
+static void entry_vkCmdBeginQueryIndexedEXT(
+    VkCommandBuffer commandBuffer,
+    VkQueryPool queryPool,
+    uint32_t query,
+    VkQueryControlFlags flags,
+    uint32_t index)
+{
+    AEMU_SCOPED_TRACE("vkCmdBeginQueryIndexedEXT");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdBeginQueryIndexedEXT(commandBuffer, queryPool, query, flags, index, true /* do lock */);
+}
+static void entry_vkCmdEndQueryIndexedEXT(
+    VkCommandBuffer commandBuffer,
+    VkQueryPool queryPool,
+    uint32_t query,
+    uint32_t index)
+{
+    AEMU_SCOPED_TRACE("vkCmdEndQueryIndexedEXT");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdEndQueryIndexedEXT(commandBuffer, queryPool, query, index, true /* do lock */);
+}
+static void entry_vkCmdDrawIndirectByteCountEXT(
+    VkCommandBuffer commandBuffer,
+    uint32_t instanceCount,
+    uint32_t firstInstance,
+    VkBuffer counterBuffer,
+    VkDeviceSize counterBufferOffset,
+    uint32_t counterOffset,
+    uint32_t vertexStride)
+{
+    AEMU_SCOPED_TRACE("vkCmdDrawIndirectByteCountEXT");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdDrawIndirectByteCountEXT(commandBuffer, instanceCount, firstInstance, counterBuffer, counterBufferOffset, counterOffset, vertexStride, true /* do lock */);
+}
+#endif
+#ifdef VK_NVX_image_view_handle
+static uint32_t entry_vkGetImageViewHandleNVX(
+    VkDevice device,
+    const VkImageViewHandleInfoNVX* pInfo)
+{
+    AEMU_SCOPED_TRACE("vkGetImageViewHandleNVX");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    uint32_t vkGetImageViewHandleNVX_uint32_t_return = (uint32_t)0;
+    vkGetImageViewHandleNVX_uint32_t_return = vkEnc->vkGetImageViewHandleNVX(device, pInfo, true /* do lock */);
+    return vkGetImageViewHandleNVX_uint32_t_return;
+}
+static uint32_t dynCheck_entry_vkGetImageViewHandleNVX(
+    VkDevice device,
+    const VkImageViewHandleInfoNVX* pInfo)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_NVX_image_view_handle"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetImageViewHandleNVX", "VK_NVX_image_view_handle");
+    }
+    AEMU_SCOPED_TRACE("vkGetImageViewHandleNVX");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    uint32_t vkGetImageViewHandleNVX_uint32_t_return = (uint32_t)0;
+    vkGetImageViewHandleNVX_uint32_t_return = vkEnc->vkGetImageViewHandleNVX(device, pInfo, true /* do lock */);
+    return vkGetImageViewHandleNVX_uint32_t_return;
+}
+static VkResult entry_vkGetImageViewAddressNVX(
+    VkDevice device,
+    VkImageView imageView,
+    VkImageViewAddressPropertiesNVX* pProperties)
+{
+    AEMU_SCOPED_TRACE("vkGetImageViewAddressNVX");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetImageViewAddressNVX_VkResult_return = (VkResult)0;
+    vkGetImageViewAddressNVX_VkResult_return = vkEnc->vkGetImageViewAddressNVX(device, imageView, pProperties, true /* do lock */);
+    return vkGetImageViewAddressNVX_VkResult_return;
+}
+static VkResult dynCheck_entry_vkGetImageViewAddressNVX(
+    VkDevice device,
+    VkImageView imageView,
+    VkImageViewAddressPropertiesNVX* pProperties)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_NVX_image_view_handle"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetImageViewAddressNVX", "VK_NVX_image_view_handle");
+    }
+    AEMU_SCOPED_TRACE("vkGetImageViewAddressNVX");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetImageViewAddressNVX_VkResult_return = (VkResult)0;
+    vkGetImageViewAddressNVX_VkResult_return = vkEnc->vkGetImageViewAddressNVX(device, imageView, pProperties, true /* do lock */);
+    return vkGetImageViewAddressNVX_VkResult_return;
+}
+#endif
+#ifdef VK_AMD_draw_indirect_count
+static void entry_vkCmdDrawIndirectCountAMD(
+    VkCommandBuffer commandBuffer,
+    VkBuffer buffer,
+    VkDeviceSize offset,
+    VkBuffer countBuffer,
+    VkDeviceSize countBufferOffset,
+    uint32_t maxDrawCount,
+    uint32_t stride)
+{
+    AEMU_SCOPED_TRACE("vkCmdDrawIndirectCountAMD");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdDrawIndirectCountAMD(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride, true /* do lock */);
+}
+static void entry_vkCmdDrawIndexedIndirectCountAMD(
+    VkCommandBuffer commandBuffer,
+    VkBuffer buffer,
+    VkDeviceSize offset,
+    VkBuffer countBuffer,
+    VkDeviceSize countBufferOffset,
+    uint32_t maxDrawCount,
+    uint32_t stride)
+{
+    AEMU_SCOPED_TRACE("vkCmdDrawIndexedIndirectCountAMD");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdDrawIndexedIndirectCountAMD(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride, true /* do lock */);
+}
+#endif
+#ifdef VK_AMD_negative_viewport_height
+#endif
+#ifdef VK_AMD_gpu_shader_half_float
+#endif
+#ifdef VK_AMD_shader_ballot
+#endif
+#ifdef VK_AMD_texture_gather_bias_lod
+#endif
+#ifdef VK_AMD_shader_info
+static VkResult entry_vkGetShaderInfoAMD(
+    VkDevice device,
+    VkPipeline pipeline,
+    VkShaderStageFlagBits shaderStage,
+    VkShaderInfoTypeAMD infoType,
+    size_t* pInfoSize,
+    void* pInfo)
+{
+    AEMU_SCOPED_TRACE("vkGetShaderInfoAMD");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetShaderInfoAMD_VkResult_return = (VkResult)0;
+    vkGetShaderInfoAMD_VkResult_return = vkEnc->vkGetShaderInfoAMD(device, pipeline, shaderStage, infoType, pInfoSize, pInfo, true /* do lock */);
+    return vkGetShaderInfoAMD_VkResult_return;
+}
+static VkResult dynCheck_entry_vkGetShaderInfoAMD(
+    VkDevice device,
+    VkPipeline pipeline,
+    VkShaderStageFlagBits shaderStage,
+    VkShaderInfoTypeAMD infoType,
+    size_t* pInfoSize,
+    void* pInfo)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_AMD_shader_info"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetShaderInfoAMD", "VK_AMD_shader_info");
+    }
+    AEMU_SCOPED_TRACE("vkGetShaderInfoAMD");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetShaderInfoAMD_VkResult_return = (VkResult)0;
+    vkGetShaderInfoAMD_VkResult_return = vkEnc->vkGetShaderInfoAMD(device, pipeline, shaderStage, infoType, pInfoSize, pInfo, true /* do lock */);
+    return vkGetShaderInfoAMD_VkResult_return;
+}
+#endif
+#ifdef VK_AMD_shader_image_load_store_lod
+#endif
+#ifdef VK_GGP_stream_descriptor_surface
+static VkResult entry_vkCreateStreamDescriptorSurfaceGGP(
+    VkInstance instance,
+    const VkStreamDescriptorSurfaceCreateInfoGGP* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkSurfaceKHR* pSurface)
+{
+    AEMU_SCOPED_TRACE("vkCreateStreamDescriptorSurfaceGGP");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateStreamDescriptorSurfaceGGP_VkResult_return = (VkResult)0;
+    vkCreateStreamDescriptorSurfaceGGP_VkResult_return = vkEnc->vkCreateStreamDescriptorSurfaceGGP(instance, pCreateInfo, pAllocator, pSurface, true /* do lock */);
+    return vkCreateStreamDescriptorSurfaceGGP_VkResult_return;
+}
+#endif
+#ifdef VK_NV_corner_sampled_image
+#endif
+#ifdef VK_IMG_format_pvrtc
+#endif
+#ifdef VK_NV_external_memory_capabilities
+static VkResult entry_vkGetPhysicalDeviceExternalImageFormatPropertiesNV(
+    VkPhysicalDevice physicalDevice,
+    VkFormat format,
+    VkImageType type,
+    VkImageTiling tiling,
+    VkImageUsageFlags usage,
+    VkImageCreateFlags flags,
+    VkExternalMemoryHandleTypeFlagsNV externalHandleType,
+    VkExternalImageFormatPropertiesNV* pExternalImageFormatProperties)
+{
+    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceExternalImageFormatPropertiesNV");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetPhysicalDeviceExternalImageFormatPropertiesNV_VkResult_return = (VkResult)0;
+    vkGetPhysicalDeviceExternalImageFormatPropertiesNV_VkResult_return = vkEnc->vkGetPhysicalDeviceExternalImageFormatPropertiesNV(physicalDevice, format, type, tiling, usage, flags, externalHandleType, pExternalImageFormatProperties, true /* do lock */);
+    return vkGetPhysicalDeviceExternalImageFormatPropertiesNV_VkResult_return;
+}
+#endif
+#ifdef VK_NV_external_memory
+#endif
+#ifdef VK_NV_external_memory_win32
+static VkResult entry_vkGetMemoryWin32HandleNV(
+    VkDevice device,
+    VkDeviceMemory memory,
+    VkExternalMemoryHandleTypeFlagsNV handleType,
+    HANDLE* pHandle)
+{
+    AEMU_SCOPED_TRACE("vkGetMemoryWin32HandleNV");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetMemoryWin32HandleNV_VkResult_return = (VkResult)0;
+    vkGetMemoryWin32HandleNV_VkResult_return = vkEnc->vkGetMemoryWin32HandleNV(device, memory, handleType, pHandle, true /* do lock */);
+    return vkGetMemoryWin32HandleNV_VkResult_return;
+}
+static VkResult dynCheck_entry_vkGetMemoryWin32HandleNV(
+    VkDevice device,
+    VkDeviceMemory memory,
+    VkExternalMemoryHandleTypeFlagsNV handleType,
+    HANDLE* pHandle)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_NV_external_memory_win32"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetMemoryWin32HandleNV", "VK_NV_external_memory_win32");
+    }
+    AEMU_SCOPED_TRACE("vkGetMemoryWin32HandleNV");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetMemoryWin32HandleNV_VkResult_return = (VkResult)0;
+    vkGetMemoryWin32HandleNV_VkResult_return = vkEnc->vkGetMemoryWin32HandleNV(device, memory, handleType, pHandle, true /* do lock */);
+    return vkGetMemoryWin32HandleNV_VkResult_return;
+}
+#endif
+#ifdef VK_NV_win32_keyed_mutex
+#endif
+#ifdef VK_EXT_validation_flags
+#endif
+#ifdef VK_NN_vi_surface
+static VkResult entry_vkCreateViSurfaceNN(
+    VkInstance instance,
+    const VkViSurfaceCreateInfoNN* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkSurfaceKHR* pSurface)
+{
+    AEMU_SCOPED_TRACE("vkCreateViSurfaceNN");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateViSurfaceNN_VkResult_return = (VkResult)0;
+    vkCreateViSurfaceNN_VkResult_return = vkEnc->vkCreateViSurfaceNN(instance, pCreateInfo, pAllocator, pSurface, true /* do lock */);
+    return vkCreateViSurfaceNN_VkResult_return;
+}
+#endif
+#ifdef VK_EXT_shader_subgroup_ballot
+#endif
+#ifdef VK_EXT_shader_subgroup_vote
+#endif
+#ifdef VK_EXT_texture_compression_astc_hdr
+#endif
+#ifdef VK_EXT_astc_decode_mode
+#endif
+#ifdef VK_EXT_conditional_rendering
+static void entry_vkCmdBeginConditionalRenderingEXT(
+    VkCommandBuffer commandBuffer,
+    const VkConditionalRenderingBeginInfoEXT* pConditionalRenderingBegin)
+{
+    AEMU_SCOPED_TRACE("vkCmdBeginConditionalRenderingEXT");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdBeginConditionalRenderingEXT(commandBuffer, pConditionalRenderingBegin, true /* do lock */);
+}
+static void entry_vkCmdEndConditionalRenderingEXT(
+    VkCommandBuffer commandBuffer)
+{
+    AEMU_SCOPED_TRACE("vkCmdEndConditionalRenderingEXT");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdEndConditionalRenderingEXT(commandBuffer, true /* do lock */);
+}
+#endif
+#ifdef VK_NV_clip_space_w_scaling
+static void entry_vkCmdSetViewportWScalingNV(
+    VkCommandBuffer commandBuffer,
+    uint32_t firstViewport,
+    uint32_t viewportCount,
+    const VkViewportWScalingNV* pViewportWScalings)
+{
+    AEMU_SCOPED_TRACE("vkCmdSetViewportWScalingNV");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdSetViewportWScalingNV(commandBuffer, firstViewport, viewportCount, pViewportWScalings, true /* do lock */);
+}
+#endif
+#ifdef VK_EXT_direct_mode_display
+static VkResult entry_vkReleaseDisplayEXT(
+    VkPhysicalDevice physicalDevice,
+    VkDisplayKHR display)
+{
+    AEMU_SCOPED_TRACE("vkReleaseDisplayEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkReleaseDisplayEXT_VkResult_return = (VkResult)0;
+    vkReleaseDisplayEXT_VkResult_return = vkEnc->vkReleaseDisplayEXT(physicalDevice, display, true /* do lock */);
+    return vkReleaseDisplayEXT_VkResult_return;
+}
+#endif
+#ifdef VK_EXT_acquire_xlib_display
+static VkResult entry_vkAcquireXlibDisplayEXT(
+    VkPhysicalDevice physicalDevice,
+    Display* dpy,
+    VkDisplayKHR display)
+{
+    AEMU_SCOPED_TRACE("vkAcquireXlibDisplayEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkAcquireXlibDisplayEXT_VkResult_return = (VkResult)0;
+    vkAcquireXlibDisplayEXT_VkResult_return = vkEnc->vkAcquireXlibDisplayEXT(physicalDevice, dpy, display, true /* do lock */);
+    return vkAcquireXlibDisplayEXT_VkResult_return;
+}
+static VkResult entry_vkGetRandROutputDisplayEXT(
+    VkPhysicalDevice physicalDevice,
+    Display* dpy,
+    RROutput rrOutput,
+    VkDisplayKHR* pDisplay)
+{
+    AEMU_SCOPED_TRACE("vkGetRandROutputDisplayEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetRandROutputDisplayEXT_VkResult_return = (VkResult)0;
+    vkGetRandROutputDisplayEXT_VkResult_return = vkEnc->vkGetRandROutputDisplayEXT(physicalDevice, dpy, rrOutput, pDisplay, true /* do lock */);
+    return vkGetRandROutputDisplayEXT_VkResult_return;
+}
+#endif
+#ifdef VK_EXT_display_surface_counter
+static VkResult entry_vkGetPhysicalDeviceSurfaceCapabilities2EXT(
+    VkPhysicalDevice physicalDevice,
+    VkSurfaceKHR surface,
+    VkSurfaceCapabilities2EXT* pSurfaceCapabilities)
+{
+    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceSurfaceCapabilities2EXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetPhysicalDeviceSurfaceCapabilities2EXT_VkResult_return = (VkResult)0;
+    vkGetPhysicalDeviceSurfaceCapabilities2EXT_VkResult_return = vkEnc->vkGetPhysicalDeviceSurfaceCapabilities2EXT(physicalDevice, surface, pSurfaceCapabilities, true /* do lock */);
+    return vkGetPhysicalDeviceSurfaceCapabilities2EXT_VkResult_return;
+}
+#endif
+#ifdef VK_EXT_display_control
+static VkResult entry_vkDisplayPowerControlEXT(
+    VkDevice device,
+    VkDisplayKHR display,
+    const VkDisplayPowerInfoEXT* pDisplayPowerInfo)
+{
+    AEMU_SCOPED_TRACE("vkDisplayPowerControlEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkDisplayPowerControlEXT_VkResult_return = (VkResult)0;
+    vkDisplayPowerControlEXT_VkResult_return = vkEnc->vkDisplayPowerControlEXT(device, display, pDisplayPowerInfo, true /* do lock */);
+    return vkDisplayPowerControlEXT_VkResult_return;
+}
+static VkResult dynCheck_entry_vkDisplayPowerControlEXT(
+    VkDevice device,
+    VkDisplayKHR display,
+    const VkDisplayPowerInfoEXT* pDisplayPowerInfo)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_EXT_display_control"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkDisplayPowerControlEXT", "VK_EXT_display_control");
+    }
+    AEMU_SCOPED_TRACE("vkDisplayPowerControlEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkDisplayPowerControlEXT_VkResult_return = (VkResult)0;
+    vkDisplayPowerControlEXT_VkResult_return = vkEnc->vkDisplayPowerControlEXT(device, display, pDisplayPowerInfo, true /* do lock */);
+    return vkDisplayPowerControlEXT_VkResult_return;
+}
+static VkResult entry_vkRegisterDeviceEventEXT(
+    VkDevice device,
+    const VkDeviceEventInfoEXT* pDeviceEventInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkFence* pFence)
+{
+    AEMU_SCOPED_TRACE("vkRegisterDeviceEventEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkRegisterDeviceEventEXT_VkResult_return = (VkResult)0;
+    vkRegisterDeviceEventEXT_VkResult_return = vkEnc->vkRegisterDeviceEventEXT(device, pDeviceEventInfo, pAllocator, pFence, true /* do lock */);
+    return vkRegisterDeviceEventEXT_VkResult_return;
+}
+static VkResult dynCheck_entry_vkRegisterDeviceEventEXT(
+    VkDevice device,
+    const VkDeviceEventInfoEXT* pDeviceEventInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkFence* pFence)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_EXT_display_control"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkRegisterDeviceEventEXT", "VK_EXT_display_control");
+    }
+    AEMU_SCOPED_TRACE("vkRegisterDeviceEventEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkRegisterDeviceEventEXT_VkResult_return = (VkResult)0;
+    vkRegisterDeviceEventEXT_VkResult_return = vkEnc->vkRegisterDeviceEventEXT(device, pDeviceEventInfo, pAllocator, pFence, true /* do lock */);
+    return vkRegisterDeviceEventEXT_VkResult_return;
+}
+static VkResult entry_vkRegisterDisplayEventEXT(
+    VkDevice device,
+    VkDisplayKHR display,
+    const VkDisplayEventInfoEXT* pDisplayEventInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkFence* pFence)
+{
+    AEMU_SCOPED_TRACE("vkRegisterDisplayEventEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkRegisterDisplayEventEXT_VkResult_return = (VkResult)0;
+    vkRegisterDisplayEventEXT_VkResult_return = vkEnc->vkRegisterDisplayEventEXT(device, display, pDisplayEventInfo, pAllocator, pFence, true /* do lock */);
+    return vkRegisterDisplayEventEXT_VkResult_return;
+}
+static VkResult dynCheck_entry_vkRegisterDisplayEventEXT(
+    VkDevice device,
+    VkDisplayKHR display,
+    const VkDisplayEventInfoEXT* pDisplayEventInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkFence* pFence)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_EXT_display_control"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkRegisterDisplayEventEXT", "VK_EXT_display_control");
+    }
+    AEMU_SCOPED_TRACE("vkRegisterDisplayEventEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkRegisterDisplayEventEXT_VkResult_return = (VkResult)0;
+    vkRegisterDisplayEventEXT_VkResult_return = vkEnc->vkRegisterDisplayEventEXT(device, display, pDisplayEventInfo, pAllocator, pFence, true /* do lock */);
+    return vkRegisterDisplayEventEXT_VkResult_return;
+}
+static VkResult entry_vkGetSwapchainCounterEXT(
+    VkDevice device,
+    VkSwapchainKHR swapchain,
+    VkSurfaceCounterFlagBitsEXT counter,
+    uint64_t* pCounterValue)
+{
+    AEMU_SCOPED_TRACE("vkGetSwapchainCounterEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetSwapchainCounterEXT_VkResult_return = (VkResult)0;
+    vkGetSwapchainCounterEXT_VkResult_return = vkEnc->vkGetSwapchainCounterEXT(device, swapchain, counter, pCounterValue, true /* do lock */);
+    return vkGetSwapchainCounterEXT_VkResult_return;
+}
+static VkResult dynCheck_entry_vkGetSwapchainCounterEXT(
+    VkDevice device,
+    VkSwapchainKHR swapchain,
+    VkSurfaceCounterFlagBitsEXT counter,
+    uint64_t* pCounterValue)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_EXT_display_control"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetSwapchainCounterEXT", "VK_EXT_display_control");
+    }
+    AEMU_SCOPED_TRACE("vkGetSwapchainCounterEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetSwapchainCounterEXT_VkResult_return = (VkResult)0;
+    vkGetSwapchainCounterEXT_VkResult_return = vkEnc->vkGetSwapchainCounterEXT(device, swapchain, counter, pCounterValue, true /* do lock */);
+    return vkGetSwapchainCounterEXT_VkResult_return;
+}
+#endif
+#ifdef VK_GOOGLE_display_timing
+static VkResult entry_vkGetRefreshCycleDurationGOOGLE(
+    VkDevice device,
+    VkSwapchainKHR swapchain,
+    VkRefreshCycleDurationGOOGLE* pDisplayTimingProperties)
+{
+    AEMU_SCOPED_TRACE("vkGetRefreshCycleDurationGOOGLE");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetRefreshCycleDurationGOOGLE_VkResult_return = (VkResult)0;
+    vkGetRefreshCycleDurationGOOGLE_VkResult_return = vkEnc->vkGetRefreshCycleDurationGOOGLE(device, swapchain, pDisplayTimingProperties, true /* do lock */);
+    return vkGetRefreshCycleDurationGOOGLE_VkResult_return;
+}
+static VkResult dynCheck_entry_vkGetRefreshCycleDurationGOOGLE(
+    VkDevice device,
+    VkSwapchainKHR swapchain,
+    VkRefreshCycleDurationGOOGLE* pDisplayTimingProperties)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_GOOGLE_display_timing"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetRefreshCycleDurationGOOGLE", "VK_GOOGLE_display_timing");
+    }
+    AEMU_SCOPED_TRACE("vkGetRefreshCycleDurationGOOGLE");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetRefreshCycleDurationGOOGLE_VkResult_return = (VkResult)0;
+    vkGetRefreshCycleDurationGOOGLE_VkResult_return = vkEnc->vkGetRefreshCycleDurationGOOGLE(device, swapchain, pDisplayTimingProperties, true /* do lock */);
+    return vkGetRefreshCycleDurationGOOGLE_VkResult_return;
+}
+static VkResult entry_vkGetPastPresentationTimingGOOGLE(
+    VkDevice device,
+    VkSwapchainKHR swapchain,
+    uint32_t* pPresentationTimingCount,
+    VkPastPresentationTimingGOOGLE* pPresentationTimings)
+{
+    AEMU_SCOPED_TRACE("vkGetPastPresentationTimingGOOGLE");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetPastPresentationTimingGOOGLE_VkResult_return = (VkResult)0;
+    vkGetPastPresentationTimingGOOGLE_VkResult_return = vkEnc->vkGetPastPresentationTimingGOOGLE(device, swapchain, pPresentationTimingCount, pPresentationTimings, true /* do lock */);
+    return vkGetPastPresentationTimingGOOGLE_VkResult_return;
+}
+static VkResult dynCheck_entry_vkGetPastPresentationTimingGOOGLE(
+    VkDevice device,
+    VkSwapchainKHR swapchain,
+    uint32_t* pPresentationTimingCount,
+    VkPastPresentationTimingGOOGLE* pPresentationTimings)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_GOOGLE_display_timing"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetPastPresentationTimingGOOGLE", "VK_GOOGLE_display_timing");
+    }
+    AEMU_SCOPED_TRACE("vkGetPastPresentationTimingGOOGLE");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetPastPresentationTimingGOOGLE_VkResult_return = (VkResult)0;
+    vkGetPastPresentationTimingGOOGLE_VkResult_return = vkEnc->vkGetPastPresentationTimingGOOGLE(device, swapchain, pPresentationTimingCount, pPresentationTimings, true /* do lock */);
+    return vkGetPastPresentationTimingGOOGLE_VkResult_return;
+}
+#endif
+#ifdef VK_NV_sample_mask_override_coverage
+#endif
+#ifdef VK_NV_geometry_shader_passthrough
+#endif
+#ifdef VK_NV_viewport_array2
+#endif
+#ifdef VK_NVX_multiview_per_view_attributes
+#endif
+#ifdef VK_NV_viewport_swizzle
+#endif
+#ifdef VK_EXT_discard_rectangles
+static void entry_vkCmdSetDiscardRectangleEXT(
+    VkCommandBuffer commandBuffer,
+    uint32_t firstDiscardRectangle,
+    uint32_t discardRectangleCount,
+    const VkRect2D* pDiscardRectangles)
+{
+    AEMU_SCOPED_TRACE("vkCmdSetDiscardRectangleEXT");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdSetDiscardRectangleEXT(commandBuffer, firstDiscardRectangle, discardRectangleCount, pDiscardRectangles, true /* do lock */);
+}
+#endif
+#ifdef VK_EXT_conservative_rasterization
+#endif
+#ifdef VK_EXT_depth_clip_enable
+#endif
+#ifdef VK_EXT_swapchain_colorspace
+#endif
+#ifdef VK_EXT_hdr_metadata
+static void entry_vkSetHdrMetadataEXT(
+    VkDevice device,
+    uint32_t swapchainCount,
+    const VkSwapchainKHR* pSwapchains,
+    const VkHdrMetadataEXT* pMetadata)
+{
+    AEMU_SCOPED_TRACE("vkSetHdrMetadataEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkSetHdrMetadataEXT(device, swapchainCount, pSwapchains, pMetadata, true /* do lock */);
+}
+static void dynCheck_entry_vkSetHdrMetadataEXT(
+    VkDevice device,
+    uint32_t swapchainCount,
+    const VkSwapchainKHR* pSwapchains,
+    const VkHdrMetadataEXT* pMetadata)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_EXT_hdr_metadata"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkSetHdrMetadataEXT", "VK_EXT_hdr_metadata");
+    }
+    AEMU_SCOPED_TRACE("vkSetHdrMetadataEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkSetHdrMetadataEXT(device, swapchainCount, pSwapchains, pMetadata, true /* do lock */);
+}
+#endif
+#ifdef VK_MVK_ios_surface
+static VkResult entry_vkCreateIOSSurfaceMVK(
+    VkInstance instance,
+    const VkIOSSurfaceCreateInfoMVK* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkSurfaceKHR* pSurface)
+{
+    AEMU_SCOPED_TRACE("vkCreateIOSSurfaceMVK");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateIOSSurfaceMVK_VkResult_return = (VkResult)0;
+    vkCreateIOSSurfaceMVK_VkResult_return = vkEnc->vkCreateIOSSurfaceMVK(instance, pCreateInfo, pAllocator, pSurface, true /* do lock */);
+    return vkCreateIOSSurfaceMVK_VkResult_return;
+}
+#endif
+#ifdef VK_MVK_macos_surface
+static VkResult entry_vkCreateMacOSSurfaceMVK(
+    VkInstance instance,
+    const VkMacOSSurfaceCreateInfoMVK* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkSurfaceKHR* pSurface)
+{
+    AEMU_SCOPED_TRACE("vkCreateMacOSSurfaceMVK");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateMacOSSurfaceMVK_VkResult_return = (VkResult)0;
+    vkCreateMacOSSurfaceMVK_VkResult_return = vkEnc->vkCreateMacOSSurfaceMVK(instance, pCreateInfo, pAllocator, pSurface, true /* do lock */);
+    return vkCreateMacOSSurfaceMVK_VkResult_return;
+}
+#endif
+#ifdef VK_MVK_moltenvk
+static void entry_vkGetMTLDeviceMVK(
+    VkPhysicalDevice physicalDevice,
+    void** pMTLDevice)
+{
+    AEMU_SCOPED_TRACE("vkGetMTLDeviceMVK");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetMTLDeviceMVK(physicalDevice, pMTLDevice, true /* do lock */);
+}
+static VkResult entry_vkSetMTLTextureMVK(
+    VkImage image,
+    void* mtlTexture)
+{
+    AEMU_SCOPED_TRACE("vkSetMTLTextureMVK");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkSetMTLTextureMVK_VkResult_return = (VkResult)0;
+    vkSetMTLTextureMVK_VkResult_return = vkEnc->vkSetMTLTextureMVK(image, mtlTexture, true /* do lock */);
+    return vkSetMTLTextureMVK_VkResult_return;
+}
+static void entry_vkGetMTLTextureMVK(
+    VkImage image,
+    void** pMTLTexture)
+{
+    AEMU_SCOPED_TRACE("vkGetMTLTextureMVK");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetMTLTextureMVK(image, pMTLTexture, true /* do lock */);
+}
+static void entry_vkGetMTLBufferMVK(
+    VkBuffer buffer,
+    void** pMTLBuffer)
+{
+    AEMU_SCOPED_TRACE("vkGetMTLBufferMVK");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetMTLBufferMVK(buffer, pMTLBuffer, true /* do lock */);
+}
+static VkResult entry_vkUseIOSurfaceMVK(
+    VkImage image,
+    void* ioSurface)
+{
+    AEMU_SCOPED_TRACE("vkUseIOSurfaceMVK");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkUseIOSurfaceMVK_VkResult_return = (VkResult)0;
+    vkUseIOSurfaceMVK_VkResult_return = vkEnc->vkUseIOSurfaceMVK(image, ioSurface, true /* do lock */);
+    return vkUseIOSurfaceMVK_VkResult_return;
+}
+static void entry_vkGetIOSurfaceMVK(
+    VkImage image,
+    void** pIOSurface)
+{
+    AEMU_SCOPED_TRACE("vkGetIOSurfaceMVK");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetIOSurfaceMVK(image, pIOSurface, true /* do lock */);
+}
+#endif
+#ifdef VK_EXT_external_memory_dma_buf
+#endif
+#ifdef VK_EXT_queue_family_foreign
+#endif
+#ifdef VK_EXT_debug_utils
+static VkResult entry_vkSetDebugUtilsObjectNameEXT(
+    VkDevice device,
+    const VkDebugUtilsObjectNameInfoEXT* pNameInfo)
+{
+    AEMU_SCOPED_TRACE("vkSetDebugUtilsObjectNameEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkSetDebugUtilsObjectNameEXT_VkResult_return = (VkResult)0;
+    vkSetDebugUtilsObjectNameEXT_VkResult_return = vkEnc->vkSetDebugUtilsObjectNameEXT(device, pNameInfo, true /* do lock */);
+    return vkSetDebugUtilsObjectNameEXT_VkResult_return;
+}
+static VkResult dynCheck_entry_vkSetDebugUtilsObjectNameEXT(
+    VkDevice device,
+    const VkDebugUtilsObjectNameInfoEXT* pNameInfo)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_EXT_debug_utils"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkSetDebugUtilsObjectNameEXT", "VK_EXT_debug_utils");
+    }
+    AEMU_SCOPED_TRACE("vkSetDebugUtilsObjectNameEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkSetDebugUtilsObjectNameEXT_VkResult_return = (VkResult)0;
+    vkSetDebugUtilsObjectNameEXT_VkResult_return = vkEnc->vkSetDebugUtilsObjectNameEXT(device, pNameInfo, true /* do lock */);
+    return vkSetDebugUtilsObjectNameEXT_VkResult_return;
+}
+static VkResult entry_vkSetDebugUtilsObjectTagEXT(
+    VkDevice device,
+    const VkDebugUtilsObjectTagInfoEXT* pTagInfo)
+{
+    AEMU_SCOPED_TRACE("vkSetDebugUtilsObjectTagEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkSetDebugUtilsObjectTagEXT_VkResult_return = (VkResult)0;
+    vkSetDebugUtilsObjectTagEXT_VkResult_return = vkEnc->vkSetDebugUtilsObjectTagEXT(device, pTagInfo, true /* do lock */);
+    return vkSetDebugUtilsObjectTagEXT_VkResult_return;
+}
+static VkResult dynCheck_entry_vkSetDebugUtilsObjectTagEXT(
+    VkDevice device,
+    const VkDebugUtilsObjectTagInfoEXT* pTagInfo)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_EXT_debug_utils"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkSetDebugUtilsObjectTagEXT", "VK_EXT_debug_utils");
+    }
+    AEMU_SCOPED_TRACE("vkSetDebugUtilsObjectTagEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkSetDebugUtilsObjectTagEXT_VkResult_return = (VkResult)0;
+    vkSetDebugUtilsObjectTagEXT_VkResult_return = vkEnc->vkSetDebugUtilsObjectTagEXT(device, pTagInfo, true /* do lock */);
+    return vkSetDebugUtilsObjectTagEXT_VkResult_return;
+}
+static void entry_vkQueueBeginDebugUtilsLabelEXT(
+    VkQueue queue,
+    const VkDebugUtilsLabelEXT* pLabelInfo)
+{
+    AEMU_SCOPED_TRACE("vkQueueBeginDebugUtilsLabelEXT");
+    auto vkEnc = ResourceTracker::getQueueEncoder(queue);
+    vkEnc->vkQueueBeginDebugUtilsLabelEXT(queue, pLabelInfo, true /* do lock */);
+}
+static void entry_vkQueueEndDebugUtilsLabelEXT(
+    VkQueue queue)
+{
+    AEMU_SCOPED_TRACE("vkQueueEndDebugUtilsLabelEXT");
+    auto vkEnc = ResourceTracker::getQueueEncoder(queue);
+    vkEnc->vkQueueEndDebugUtilsLabelEXT(queue, true /* do lock */);
+}
+static void entry_vkQueueInsertDebugUtilsLabelEXT(
+    VkQueue queue,
+    const VkDebugUtilsLabelEXT* pLabelInfo)
+{
+    AEMU_SCOPED_TRACE("vkQueueInsertDebugUtilsLabelEXT");
+    auto vkEnc = ResourceTracker::getQueueEncoder(queue);
+    vkEnc->vkQueueInsertDebugUtilsLabelEXT(queue, pLabelInfo, true /* do lock */);
+}
+static void entry_vkCmdBeginDebugUtilsLabelEXT(
+    VkCommandBuffer commandBuffer,
+    const VkDebugUtilsLabelEXT* pLabelInfo)
+{
+    AEMU_SCOPED_TRACE("vkCmdBeginDebugUtilsLabelEXT");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdBeginDebugUtilsLabelEXT(commandBuffer, pLabelInfo, true /* do lock */);
+}
+static void entry_vkCmdEndDebugUtilsLabelEXT(
+    VkCommandBuffer commandBuffer)
+{
+    AEMU_SCOPED_TRACE("vkCmdEndDebugUtilsLabelEXT");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdEndDebugUtilsLabelEXT(commandBuffer, true /* do lock */);
+}
+static void entry_vkCmdInsertDebugUtilsLabelEXT(
+    VkCommandBuffer commandBuffer,
+    const VkDebugUtilsLabelEXT* pLabelInfo)
+{
+    AEMU_SCOPED_TRACE("vkCmdInsertDebugUtilsLabelEXT");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdInsertDebugUtilsLabelEXT(commandBuffer, pLabelInfo, true /* do lock */);
+}
+static VkResult entry_vkCreateDebugUtilsMessengerEXT(
+    VkInstance instance,
+    const VkDebugUtilsMessengerCreateInfoEXT* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkDebugUtilsMessengerEXT* pMessenger)
+{
+    AEMU_SCOPED_TRACE("vkCreateDebugUtilsMessengerEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateDebugUtilsMessengerEXT_VkResult_return = (VkResult)0;
+    vkCreateDebugUtilsMessengerEXT_VkResult_return = vkEnc->vkCreateDebugUtilsMessengerEXT(instance, pCreateInfo, pAllocator, pMessenger, true /* do lock */);
+    return vkCreateDebugUtilsMessengerEXT_VkResult_return;
+}
+static void entry_vkDestroyDebugUtilsMessengerEXT(
+    VkInstance instance,
+    VkDebugUtilsMessengerEXT messenger,
+    const VkAllocationCallbacks* pAllocator)
+{
+    AEMU_SCOPED_TRACE("vkDestroyDebugUtilsMessengerEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkDestroyDebugUtilsMessengerEXT(instance, messenger, pAllocator, true /* do lock */);
+}
+static void entry_vkSubmitDebugUtilsMessageEXT(
+    VkInstance instance,
+    VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
+    VkDebugUtilsMessageTypeFlagsEXT messageTypes,
+    const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData)
+{
+    AEMU_SCOPED_TRACE("vkSubmitDebugUtilsMessageEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkSubmitDebugUtilsMessageEXT(instance, messageSeverity, messageTypes, pCallbackData, true /* do lock */);
+}
+#endif
+#ifdef VK_ANDROID_external_memory_android_hardware_buffer
+static VkResult entry_vkGetAndroidHardwareBufferPropertiesANDROID(
+    VkDevice device,
+    const AHardwareBuffer* buffer,
+    VkAndroidHardwareBufferPropertiesANDROID* pProperties)
+{
+    AEMU_SCOPED_TRACE("vkGetAndroidHardwareBufferPropertiesANDROID");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetAndroidHardwareBufferPropertiesANDROID_VkResult_return = (VkResult)0;
+    auto resources = ResourceTracker::get();
+    vkGetAndroidHardwareBufferPropertiesANDROID_VkResult_return = resources->on_vkGetAndroidHardwareBufferPropertiesANDROID(vkEnc, VK_SUCCESS, device, buffer, pProperties);
+    return vkGetAndroidHardwareBufferPropertiesANDROID_VkResult_return;
+}
+static VkResult dynCheck_entry_vkGetAndroidHardwareBufferPropertiesANDROID(
+    VkDevice device,
+    const AHardwareBuffer* buffer,
+    VkAndroidHardwareBufferPropertiesANDROID* pProperties)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_ANDROID_external_memory_android_hardware_buffer"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetAndroidHardwareBufferPropertiesANDROID", "VK_ANDROID_external_memory_android_hardware_buffer");
+    }
+    AEMU_SCOPED_TRACE("vkGetAndroidHardwareBufferPropertiesANDROID");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetAndroidHardwareBufferPropertiesANDROID_VkResult_return = (VkResult)0;
+    vkGetAndroidHardwareBufferPropertiesANDROID_VkResult_return = resources->on_vkGetAndroidHardwareBufferPropertiesANDROID(vkEnc, VK_SUCCESS, device, buffer, pProperties);
+    return vkGetAndroidHardwareBufferPropertiesANDROID_VkResult_return;
+}
+static VkResult entry_vkGetMemoryAndroidHardwareBufferANDROID(
+    VkDevice device,
+    const VkMemoryGetAndroidHardwareBufferInfoANDROID* pInfo,
+    AHardwareBuffer** pBuffer)
+{
+    AEMU_SCOPED_TRACE("vkGetMemoryAndroidHardwareBufferANDROID");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetMemoryAndroidHardwareBufferANDROID_VkResult_return = (VkResult)0;
+    auto resources = ResourceTracker::get();
+    vkGetMemoryAndroidHardwareBufferANDROID_VkResult_return = resources->on_vkGetMemoryAndroidHardwareBufferANDROID(vkEnc, VK_SUCCESS, device, pInfo, pBuffer);
+    return vkGetMemoryAndroidHardwareBufferANDROID_VkResult_return;
+}
+static VkResult dynCheck_entry_vkGetMemoryAndroidHardwareBufferANDROID(
+    VkDevice device,
+    const VkMemoryGetAndroidHardwareBufferInfoANDROID* pInfo,
+    AHardwareBuffer** pBuffer)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_ANDROID_external_memory_android_hardware_buffer"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetMemoryAndroidHardwareBufferANDROID", "VK_ANDROID_external_memory_android_hardware_buffer");
+    }
+    AEMU_SCOPED_TRACE("vkGetMemoryAndroidHardwareBufferANDROID");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetMemoryAndroidHardwareBufferANDROID_VkResult_return = (VkResult)0;
+    vkGetMemoryAndroidHardwareBufferANDROID_VkResult_return = resources->on_vkGetMemoryAndroidHardwareBufferANDROID(vkEnc, VK_SUCCESS, device, pInfo, pBuffer);
+    return vkGetMemoryAndroidHardwareBufferANDROID_VkResult_return;
+}
+#endif
+#ifdef VK_EXT_sampler_filter_minmax
+#endif
+#ifdef VK_AMD_gpu_shader_int16
+#endif
+#ifdef VK_AMD_mixed_attachment_samples
+#endif
+#ifdef VK_AMD_shader_fragment_mask
+#endif
+#ifdef VK_EXT_inline_uniform_block
+#endif
+#ifdef VK_EXT_shader_stencil_export
+#endif
+#ifdef VK_EXT_sample_locations
+static void entry_vkCmdSetSampleLocationsEXT(
+    VkCommandBuffer commandBuffer,
+    const VkSampleLocationsInfoEXT* pSampleLocationsInfo)
+{
+    AEMU_SCOPED_TRACE("vkCmdSetSampleLocationsEXT");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdSetSampleLocationsEXT(commandBuffer, pSampleLocationsInfo, true /* do lock */);
+}
+static void entry_vkGetPhysicalDeviceMultisamplePropertiesEXT(
+    VkPhysicalDevice physicalDevice,
+    VkSampleCountFlagBits samples,
+    VkMultisamplePropertiesEXT* pMultisampleProperties)
+{
+    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceMultisamplePropertiesEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetPhysicalDeviceMultisamplePropertiesEXT(physicalDevice, samples, pMultisampleProperties, true /* do lock */);
+}
+#endif
+#ifdef VK_EXT_blend_operation_advanced
+#endif
+#ifdef VK_NV_fragment_coverage_to_color
+#endif
+#ifdef VK_NV_framebuffer_mixed_samples
+#endif
+#ifdef VK_NV_fill_rectangle
+#endif
+#ifdef VK_NV_shader_sm_builtins
+#endif
+#ifdef VK_EXT_post_depth_coverage
+#endif
+#ifdef VK_EXT_image_drm_format_modifier
+static VkResult entry_vkGetImageDrmFormatModifierPropertiesEXT(
+    VkDevice device,
+    VkImage image,
+    VkImageDrmFormatModifierPropertiesEXT* pProperties)
+{
+    AEMU_SCOPED_TRACE("vkGetImageDrmFormatModifierPropertiesEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetImageDrmFormatModifierPropertiesEXT_VkResult_return = (VkResult)0;
+    vkGetImageDrmFormatModifierPropertiesEXT_VkResult_return = vkEnc->vkGetImageDrmFormatModifierPropertiesEXT(device, image, pProperties, true /* do lock */);
+    return vkGetImageDrmFormatModifierPropertiesEXT_VkResult_return;
+}
+static VkResult dynCheck_entry_vkGetImageDrmFormatModifierPropertiesEXT(
+    VkDevice device,
+    VkImage image,
+    VkImageDrmFormatModifierPropertiesEXT* pProperties)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_EXT_image_drm_format_modifier"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetImageDrmFormatModifierPropertiesEXT", "VK_EXT_image_drm_format_modifier");
+    }
+    AEMU_SCOPED_TRACE("vkGetImageDrmFormatModifierPropertiesEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetImageDrmFormatModifierPropertiesEXT_VkResult_return = (VkResult)0;
+    vkGetImageDrmFormatModifierPropertiesEXT_VkResult_return = vkEnc->vkGetImageDrmFormatModifierPropertiesEXT(device, image, pProperties, true /* do lock */);
+    return vkGetImageDrmFormatModifierPropertiesEXT_VkResult_return;
+}
+#endif
+#ifdef VK_EXT_validation_cache
+static VkResult entry_vkCreateValidationCacheEXT(
+    VkDevice device,
+    const VkValidationCacheCreateInfoEXT* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkValidationCacheEXT* pValidationCache)
+{
+    AEMU_SCOPED_TRACE("vkCreateValidationCacheEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateValidationCacheEXT_VkResult_return = (VkResult)0;
+    vkCreateValidationCacheEXT_VkResult_return = vkEnc->vkCreateValidationCacheEXT(device, pCreateInfo, pAllocator, pValidationCache, true /* do lock */);
+    return vkCreateValidationCacheEXT_VkResult_return;
+}
+static VkResult dynCheck_entry_vkCreateValidationCacheEXT(
+    VkDevice device,
+    const VkValidationCacheCreateInfoEXT* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkValidationCacheEXT* pValidationCache)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_EXT_validation_cache"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkCreateValidationCacheEXT", "VK_EXT_validation_cache");
+    }
+    AEMU_SCOPED_TRACE("vkCreateValidationCacheEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateValidationCacheEXT_VkResult_return = (VkResult)0;
+    vkCreateValidationCacheEXT_VkResult_return = vkEnc->vkCreateValidationCacheEXT(device, pCreateInfo, pAllocator, pValidationCache, true /* do lock */);
+    return vkCreateValidationCacheEXT_VkResult_return;
+}
+static void entry_vkDestroyValidationCacheEXT(
+    VkDevice device,
+    VkValidationCacheEXT validationCache,
+    const VkAllocationCallbacks* pAllocator)
+{
+    AEMU_SCOPED_TRACE("vkDestroyValidationCacheEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkDestroyValidationCacheEXT(device, validationCache, pAllocator, true /* do lock */);
+}
+static void dynCheck_entry_vkDestroyValidationCacheEXT(
+    VkDevice device,
+    VkValidationCacheEXT validationCache,
+    const VkAllocationCallbacks* pAllocator)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_EXT_validation_cache"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkDestroyValidationCacheEXT", "VK_EXT_validation_cache");
+    }
+    AEMU_SCOPED_TRACE("vkDestroyValidationCacheEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkDestroyValidationCacheEXT(device, validationCache, pAllocator, true /* do lock */);
+}
+static VkResult entry_vkMergeValidationCachesEXT(
+    VkDevice device,
+    VkValidationCacheEXT dstCache,
+    uint32_t srcCacheCount,
+    const VkValidationCacheEXT* pSrcCaches)
+{
+    AEMU_SCOPED_TRACE("vkMergeValidationCachesEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkMergeValidationCachesEXT_VkResult_return = (VkResult)0;
+    vkMergeValidationCachesEXT_VkResult_return = vkEnc->vkMergeValidationCachesEXT(device, dstCache, srcCacheCount, pSrcCaches, true /* do lock */);
+    return vkMergeValidationCachesEXT_VkResult_return;
+}
+static VkResult dynCheck_entry_vkMergeValidationCachesEXT(
+    VkDevice device,
+    VkValidationCacheEXT dstCache,
+    uint32_t srcCacheCount,
+    const VkValidationCacheEXT* pSrcCaches)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_EXT_validation_cache"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkMergeValidationCachesEXT", "VK_EXT_validation_cache");
+    }
+    AEMU_SCOPED_TRACE("vkMergeValidationCachesEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkMergeValidationCachesEXT_VkResult_return = (VkResult)0;
+    vkMergeValidationCachesEXT_VkResult_return = vkEnc->vkMergeValidationCachesEXT(device, dstCache, srcCacheCount, pSrcCaches, true /* do lock */);
+    return vkMergeValidationCachesEXT_VkResult_return;
+}
+static VkResult entry_vkGetValidationCacheDataEXT(
+    VkDevice device,
+    VkValidationCacheEXT validationCache,
+    size_t* pDataSize,
+    void* pData)
+{
+    AEMU_SCOPED_TRACE("vkGetValidationCacheDataEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetValidationCacheDataEXT_VkResult_return = (VkResult)0;
+    vkGetValidationCacheDataEXT_VkResult_return = vkEnc->vkGetValidationCacheDataEXT(device, validationCache, pDataSize, pData, true /* do lock */);
+    return vkGetValidationCacheDataEXT_VkResult_return;
+}
+static VkResult dynCheck_entry_vkGetValidationCacheDataEXT(
+    VkDevice device,
+    VkValidationCacheEXT validationCache,
+    size_t* pDataSize,
+    void* pData)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_EXT_validation_cache"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetValidationCacheDataEXT", "VK_EXT_validation_cache");
+    }
+    AEMU_SCOPED_TRACE("vkGetValidationCacheDataEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetValidationCacheDataEXT_VkResult_return = (VkResult)0;
+    vkGetValidationCacheDataEXT_VkResult_return = vkEnc->vkGetValidationCacheDataEXT(device, validationCache, pDataSize, pData, true /* do lock */);
+    return vkGetValidationCacheDataEXT_VkResult_return;
+}
+#endif
+#ifdef VK_EXT_descriptor_indexing
+#endif
+#ifdef VK_EXT_shader_viewport_index_layer
+#endif
+#ifdef VK_NV_shading_rate_image
+static void entry_vkCmdBindShadingRateImageNV(
+    VkCommandBuffer commandBuffer,
+    VkImageView imageView,
+    VkImageLayout imageLayout)
+{
+    AEMU_SCOPED_TRACE("vkCmdBindShadingRateImageNV");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdBindShadingRateImageNV(commandBuffer, imageView, imageLayout, true /* do lock */);
+}
+static void entry_vkCmdSetViewportShadingRatePaletteNV(
+    VkCommandBuffer commandBuffer,
+    uint32_t firstViewport,
+    uint32_t viewportCount,
+    const VkShadingRatePaletteNV* pShadingRatePalettes)
+{
+    AEMU_SCOPED_TRACE("vkCmdSetViewportShadingRatePaletteNV");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdSetViewportShadingRatePaletteNV(commandBuffer, firstViewport, viewportCount, pShadingRatePalettes, true /* do lock */);
+}
+static void entry_vkCmdSetCoarseSampleOrderNV(
+    VkCommandBuffer commandBuffer,
+    VkCoarseSampleOrderTypeNV sampleOrderType,
+    uint32_t customSampleOrderCount,
+    const VkCoarseSampleOrderCustomNV* pCustomSampleOrders)
+{
+    AEMU_SCOPED_TRACE("vkCmdSetCoarseSampleOrderNV");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdSetCoarseSampleOrderNV(commandBuffer, sampleOrderType, customSampleOrderCount, pCustomSampleOrders, true /* do lock */);
+}
+#endif
+#ifdef VK_NV_ray_tracing
+static VkResult entry_vkCreateAccelerationStructureNV(
+    VkDevice device,
+    const VkAccelerationStructureCreateInfoNV* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkAccelerationStructureNV* pAccelerationStructure)
+{
+    AEMU_SCOPED_TRACE("vkCreateAccelerationStructureNV");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateAccelerationStructureNV_VkResult_return = (VkResult)0;
+    vkCreateAccelerationStructureNV_VkResult_return = vkEnc->vkCreateAccelerationStructureNV(device, pCreateInfo, pAllocator, pAccelerationStructure, true /* do lock */);
+    return vkCreateAccelerationStructureNV_VkResult_return;
+}
+static VkResult dynCheck_entry_vkCreateAccelerationStructureNV(
+    VkDevice device,
+    const VkAccelerationStructureCreateInfoNV* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkAccelerationStructureNV* pAccelerationStructure)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_NV_ray_tracing"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkCreateAccelerationStructureNV", "VK_NV_ray_tracing");
+    }
+    AEMU_SCOPED_TRACE("vkCreateAccelerationStructureNV");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateAccelerationStructureNV_VkResult_return = (VkResult)0;
+    vkCreateAccelerationStructureNV_VkResult_return = vkEnc->vkCreateAccelerationStructureNV(device, pCreateInfo, pAllocator, pAccelerationStructure, true /* do lock */);
+    return vkCreateAccelerationStructureNV_VkResult_return;
+}
+static void entry_vkDestroyAccelerationStructureNV(
+    VkDevice device,
+    VkAccelerationStructureNV accelerationStructure,
+    const VkAllocationCallbacks* pAllocator)
+{
+    AEMU_SCOPED_TRACE("vkDestroyAccelerationStructureNV");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkDestroyAccelerationStructureNV(device, accelerationStructure, pAllocator, true /* do lock */);
+}
+static void dynCheck_entry_vkDestroyAccelerationStructureNV(
+    VkDevice device,
+    VkAccelerationStructureNV accelerationStructure,
+    const VkAllocationCallbacks* pAllocator)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_NV_ray_tracing"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkDestroyAccelerationStructureNV", "VK_NV_ray_tracing");
+    }
+    AEMU_SCOPED_TRACE("vkDestroyAccelerationStructureNV");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkDestroyAccelerationStructureNV(device, accelerationStructure, pAllocator, true /* do lock */);
+}
+static void entry_vkGetAccelerationStructureMemoryRequirementsNV(
+    VkDevice device,
+    const VkAccelerationStructureMemoryRequirementsInfoNV* pInfo,
+    VkMemoryRequirements2KHR* pMemoryRequirements)
+{
+    AEMU_SCOPED_TRACE("vkGetAccelerationStructureMemoryRequirementsNV");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetAccelerationStructureMemoryRequirementsNV(device, pInfo, pMemoryRequirements, true /* do lock */);
+}
+static void dynCheck_entry_vkGetAccelerationStructureMemoryRequirementsNV(
+    VkDevice device,
+    const VkAccelerationStructureMemoryRequirementsInfoNV* pInfo,
+    VkMemoryRequirements2KHR* pMemoryRequirements)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_NV_ray_tracing"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetAccelerationStructureMemoryRequirementsNV", "VK_NV_ray_tracing");
+    }
+    AEMU_SCOPED_TRACE("vkGetAccelerationStructureMemoryRequirementsNV");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetAccelerationStructureMemoryRequirementsNV(device, pInfo, pMemoryRequirements, true /* do lock */);
+}
+static VkResult entry_vkBindAccelerationStructureMemoryNV(
+    VkDevice device,
+    uint32_t bindInfoCount,
+    const VkBindAccelerationStructureMemoryInfoNV* pBindInfos)
+{
+    AEMU_SCOPED_TRACE("vkBindAccelerationStructureMemoryNV");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkBindAccelerationStructureMemoryNV_VkResult_return = (VkResult)0;
+    vkBindAccelerationStructureMemoryNV_VkResult_return = vkEnc->vkBindAccelerationStructureMemoryNV(device, bindInfoCount, pBindInfos, true /* do lock */);
+    return vkBindAccelerationStructureMemoryNV_VkResult_return;
+}
+static VkResult dynCheck_entry_vkBindAccelerationStructureMemoryNV(
+    VkDevice device,
+    uint32_t bindInfoCount,
+    const VkBindAccelerationStructureMemoryInfoNV* pBindInfos)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_NV_ray_tracing"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkBindAccelerationStructureMemoryNV", "VK_NV_ray_tracing");
+    }
+    AEMU_SCOPED_TRACE("vkBindAccelerationStructureMemoryNV");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkBindAccelerationStructureMemoryNV_VkResult_return = (VkResult)0;
+    vkBindAccelerationStructureMemoryNV_VkResult_return = vkEnc->vkBindAccelerationStructureMemoryNV(device, bindInfoCount, pBindInfos, true /* do lock */);
+    return vkBindAccelerationStructureMemoryNV_VkResult_return;
+}
+static void entry_vkCmdBuildAccelerationStructureNV(
+    VkCommandBuffer commandBuffer,
+    const VkAccelerationStructureInfoNV* pInfo,
+    VkBuffer instanceData,
+    VkDeviceSize instanceOffset,
+    VkBool32 update,
+    VkAccelerationStructureNV dst,
+    VkAccelerationStructureNV src,
+    VkBuffer scratch,
+    VkDeviceSize scratchOffset)
+{
+    AEMU_SCOPED_TRACE("vkCmdBuildAccelerationStructureNV");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdBuildAccelerationStructureNV(commandBuffer, pInfo, instanceData, instanceOffset, update, dst, src, scratch, scratchOffset, true /* do lock */);
+}
+static void entry_vkCmdCopyAccelerationStructureNV(
+    VkCommandBuffer commandBuffer,
+    VkAccelerationStructureNV dst,
+    VkAccelerationStructureNV src,
+    VkCopyAccelerationStructureModeKHR mode)
+{
+    AEMU_SCOPED_TRACE("vkCmdCopyAccelerationStructureNV");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdCopyAccelerationStructureNV(commandBuffer, dst, src, mode, true /* do lock */);
+}
+static void entry_vkCmdTraceRaysNV(
+    VkCommandBuffer commandBuffer,
+    VkBuffer raygenShaderBindingTableBuffer,
+    VkDeviceSize raygenShaderBindingOffset,
+    VkBuffer missShaderBindingTableBuffer,
+    VkDeviceSize missShaderBindingOffset,
+    VkDeviceSize missShaderBindingStride,
+    VkBuffer hitShaderBindingTableBuffer,
+    VkDeviceSize hitShaderBindingOffset,
+    VkDeviceSize hitShaderBindingStride,
+    VkBuffer callableShaderBindingTableBuffer,
+    VkDeviceSize callableShaderBindingOffset,
+    VkDeviceSize callableShaderBindingStride,
+    uint32_t width,
+    uint32_t height,
+    uint32_t depth)
+{
+    AEMU_SCOPED_TRACE("vkCmdTraceRaysNV");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdTraceRaysNV(commandBuffer, raygenShaderBindingTableBuffer, raygenShaderBindingOffset, missShaderBindingTableBuffer, missShaderBindingOffset, missShaderBindingStride, hitShaderBindingTableBuffer, hitShaderBindingOffset, hitShaderBindingStride, callableShaderBindingTableBuffer, callableShaderBindingOffset, callableShaderBindingStride, width, height, depth, true /* do lock */);
+}
+static VkResult entry_vkCreateRayTracingPipelinesNV(
+    VkDevice device,
+    VkPipelineCache pipelineCache,
+    uint32_t createInfoCount,
+    const VkRayTracingPipelineCreateInfoNV* pCreateInfos,
+    const VkAllocationCallbacks* pAllocator,
+    VkPipeline* pPipelines)
+{
+    AEMU_SCOPED_TRACE("vkCreateRayTracingPipelinesNV");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateRayTracingPipelinesNV_VkResult_return = (VkResult)0;
+    vkCreateRayTracingPipelinesNV_VkResult_return = vkEnc->vkCreateRayTracingPipelinesNV(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines, true /* do lock */);
+    return vkCreateRayTracingPipelinesNV_VkResult_return;
+}
+static VkResult dynCheck_entry_vkCreateRayTracingPipelinesNV(
+    VkDevice device,
+    VkPipelineCache pipelineCache,
+    uint32_t createInfoCount,
+    const VkRayTracingPipelineCreateInfoNV* pCreateInfos,
+    const VkAllocationCallbacks* pAllocator,
+    VkPipeline* pPipelines)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_NV_ray_tracing"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkCreateRayTracingPipelinesNV", "VK_NV_ray_tracing");
+    }
+    AEMU_SCOPED_TRACE("vkCreateRayTracingPipelinesNV");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateRayTracingPipelinesNV_VkResult_return = (VkResult)0;
+    vkCreateRayTracingPipelinesNV_VkResult_return = vkEnc->vkCreateRayTracingPipelinesNV(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines, true /* do lock */);
+    return vkCreateRayTracingPipelinesNV_VkResult_return;
+}
+static VkResult entry_vkGetRayTracingShaderGroupHandlesKHR(
+    VkDevice device,
+    VkPipeline pipeline,
+    uint32_t firstGroup,
+    uint32_t groupCount,
+    size_t dataSize,
+    void* pData)
+{
+    AEMU_SCOPED_TRACE("vkGetRayTracingShaderGroupHandlesKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetRayTracingShaderGroupHandlesKHR_VkResult_return = (VkResult)0;
+    vkGetRayTracingShaderGroupHandlesKHR_VkResult_return = vkEnc->vkGetRayTracingShaderGroupHandlesKHR(device, pipeline, firstGroup, groupCount, dataSize, pData, true /* do lock */);
+    return vkGetRayTracingShaderGroupHandlesKHR_VkResult_return;
+}
+static VkResult dynCheck_entry_vkGetRayTracingShaderGroupHandlesKHR(
+    VkDevice device,
+    VkPipeline pipeline,
+    uint32_t firstGroup,
+    uint32_t groupCount,
+    size_t dataSize,
+    void* pData)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_NV_ray_tracing"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetRayTracingShaderGroupHandlesKHR", "VK_NV_ray_tracing");
+    }
+    AEMU_SCOPED_TRACE("vkGetRayTracingShaderGroupHandlesKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetRayTracingShaderGroupHandlesKHR_VkResult_return = (VkResult)0;
+    vkGetRayTracingShaderGroupHandlesKHR_VkResult_return = vkEnc->vkGetRayTracingShaderGroupHandlesKHR(device, pipeline, firstGroup, groupCount, dataSize, pData, true /* do lock */);
+    return vkGetRayTracingShaderGroupHandlesKHR_VkResult_return;
+}
+static VkResult entry_vkGetRayTracingShaderGroupHandlesNV(
+    VkDevice device,
+    VkPipeline pipeline,
+    uint32_t firstGroup,
+    uint32_t groupCount,
+    size_t dataSize,
+    void* pData)
+{
+    AEMU_SCOPED_TRACE("vkGetRayTracingShaderGroupHandlesNV");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetRayTracingShaderGroupHandlesNV_VkResult_return = (VkResult)0;
+    vkGetRayTracingShaderGroupHandlesNV_VkResult_return = vkEnc->vkGetRayTracingShaderGroupHandlesNV(device, pipeline, firstGroup, groupCount, dataSize, pData, true /* do lock */);
+    return vkGetRayTracingShaderGroupHandlesNV_VkResult_return;
+}
+static VkResult dynCheck_entry_vkGetRayTracingShaderGroupHandlesNV(
+    VkDevice device,
+    VkPipeline pipeline,
+    uint32_t firstGroup,
+    uint32_t groupCount,
+    size_t dataSize,
+    void* pData)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_NV_ray_tracing"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetRayTracingShaderGroupHandlesNV", "VK_NV_ray_tracing");
+    }
+    AEMU_SCOPED_TRACE("vkGetRayTracingShaderGroupHandlesNV");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetRayTracingShaderGroupHandlesNV_VkResult_return = (VkResult)0;
+    vkGetRayTracingShaderGroupHandlesNV_VkResult_return = vkEnc->vkGetRayTracingShaderGroupHandlesNV(device, pipeline, firstGroup, groupCount, dataSize, pData, true /* do lock */);
+    return vkGetRayTracingShaderGroupHandlesNV_VkResult_return;
+}
+static VkResult entry_vkGetAccelerationStructureHandleNV(
+    VkDevice device,
+    VkAccelerationStructureNV accelerationStructure,
+    size_t dataSize,
+    void* pData)
+{
+    AEMU_SCOPED_TRACE("vkGetAccelerationStructureHandleNV");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetAccelerationStructureHandleNV_VkResult_return = (VkResult)0;
+    vkGetAccelerationStructureHandleNV_VkResult_return = vkEnc->vkGetAccelerationStructureHandleNV(device, accelerationStructure, dataSize, pData, true /* do lock */);
+    return vkGetAccelerationStructureHandleNV_VkResult_return;
+}
+static VkResult dynCheck_entry_vkGetAccelerationStructureHandleNV(
+    VkDevice device,
+    VkAccelerationStructureNV accelerationStructure,
+    size_t dataSize,
+    void* pData)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_NV_ray_tracing"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetAccelerationStructureHandleNV", "VK_NV_ray_tracing");
+    }
+    AEMU_SCOPED_TRACE("vkGetAccelerationStructureHandleNV");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetAccelerationStructureHandleNV_VkResult_return = (VkResult)0;
+    vkGetAccelerationStructureHandleNV_VkResult_return = vkEnc->vkGetAccelerationStructureHandleNV(device, accelerationStructure, dataSize, pData, true /* do lock */);
+    return vkGetAccelerationStructureHandleNV_VkResult_return;
+}
+static void entry_vkCmdWriteAccelerationStructuresPropertiesNV(
+    VkCommandBuffer commandBuffer,
+    uint32_t accelerationStructureCount,
+    const VkAccelerationStructureNV* pAccelerationStructures,
+    VkQueryType queryType,
+    VkQueryPool queryPool,
+    uint32_t firstQuery)
+{
+    AEMU_SCOPED_TRACE("vkCmdWriteAccelerationStructuresPropertiesNV");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdWriteAccelerationStructuresPropertiesNV(commandBuffer, accelerationStructureCount, pAccelerationStructures, queryType, queryPool, firstQuery, true /* do lock */);
+}
+static VkResult entry_vkCompileDeferredNV(
+    VkDevice device,
+    VkPipeline pipeline,
+    uint32_t shader)
+{
+    AEMU_SCOPED_TRACE("vkCompileDeferredNV");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCompileDeferredNV_VkResult_return = (VkResult)0;
+    vkCompileDeferredNV_VkResult_return = vkEnc->vkCompileDeferredNV(device, pipeline, shader, true /* do lock */);
+    return vkCompileDeferredNV_VkResult_return;
+}
+static VkResult dynCheck_entry_vkCompileDeferredNV(
+    VkDevice device,
+    VkPipeline pipeline,
+    uint32_t shader)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_NV_ray_tracing"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkCompileDeferredNV", "VK_NV_ray_tracing");
+    }
+    AEMU_SCOPED_TRACE("vkCompileDeferredNV");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCompileDeferredNV_VkResult_return = (VkResult)0;
+    vkCompileDeferredNV_VkResult_return = vkEnc->vkCompileDeferredNV(device, pipeline, shader, true /* do lock */);
+    return vkCompileDeferredNV_VkResult_return;
+}
+#endif
+#ifdef VK_NV_representative_fragment_test
+#endif
+#ifdef VK_EXT_filter_cubic
+#endif
+#ifdef VK_QCOM_render_pass_shader_resolve
+#endif
+#ifdef VK_EXT_global_priority
+#endif
+#ifdef VK_EXT_external_memory_host
+static VkResult entry_vkGetMemoryHostPointerPropertiesEXT(
+    VkDevice device,
+    VkExternalMemoryHandleTypeFlagBits handleType,
+    const void* pHostPointer,
+    VkMemoryHostPointerPropertiesEXT* pMemoryHostPointerProperties)
+{
+    AEMU_SCOPED_TRACE("vkGetMemoryHostPointerPropertiesEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetMemoryHostPointerPropertiesEXT_VkResult_return = (VkResult)0;
+    vkGetMemoryHostPointerPropertiesEXT_VkResult_return = vkEnc->vkGetMemoryHostPointerPropertiesEXT(device, handleType, pHostPointer, pMemoryHostPointerProperties, true /* do lock */);
+    return vkGetMemoryHostPointerPropertiesEXT_VkResult_return;
+}
+static VkResult dynCheck_entry_vkGetMemoryHostPointerPropertiesEXT(
+    VkDevice device,
+    VkExternalMemoryHandleTypeFlagBits handleType,
+    const void* pHostPointer,
+    VkMemoryHostPointerPropertiesEXT* pMemoryHostPointerProperties)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_EXT_external_memory_host"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetMemoryHostPointerPropertiesEXT", "VK_EXT_external_memory_host");
+    }
+    AEMU_SCOPED_TRACE("vkGetMemoryHostPointerPropertiesEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetMemoryHostPointerPropertiesEXT_VkResult_return = (VkResult)0;
+    vkGetMemoryHostPointerPropertiesEXT_VkResult_return = vkEnc->vkGetMemoryHostPointerPropertiesEXT(device, handleType, pHostPointer, pMemoryHostPointerProperties, true /* do lock */);
+    return vkGetMemoryHostPointerPropertiesEXT_VkResult_return;
+}
+#endif
+#ifdef VK_AMD_buffer_marker
+static void entry_vkCmdWriteBufferMarkerAMD(
+    VkCommandBuffer commandBuffer,
+    VkPipelineStageFlagBits pipelineStage,
+    VkBuffer dstBuffer,
+    VkDeviceSize dstOffset,
+    uint32_t marker)
+{
+    AEMU_SCOPED_TRACE("vkCmdWriteBufferMarkerAMD");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdWriteBufferMarkerAMD(commandBuffer, pipelineStage, dstBuffer, dstOffset, marker, true /* do lock */);
+}
+#endif
+#ifdef VK_AMD_pipeline_compiler_control
+#endif
+#ifdef VK_EXT_calibrated_timestamps
+static VkResult entry_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT(
+    VkPhysicalDevice physicalDevice,
+    uint32_t* pTimeDomainCount,
+    VkTimeDomainEXT* pTimeDomains)
+{
+    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceCalibrateableTimeDomainsEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetPhysicalDeviceCalibrateableTimeDomainsEXT_VkResult_return = (VkResult)0;
+    vkGetPhysicalDeviceCalibrateableTimeDomainsEXT_VkResult_return = vkEnc->vkGetPhysicalDeviceCalibrateableTimeDomainsEXT(physicalDevice, pTimeDomainCount, pTimeDomains, true /* do lock */);
+    return vkGetPhysicalDeviceCalibrateableTimeDomainsEXT_VkResult_return;
+}
+static VkResult entry_vkGetCalibratedTimestampsEXT(
+    VkDevice device,
+    uint32_t timestampCount,
+    const VkCalibratedTimestampInfoEXT* pTimestampInfos,
+    uint64_t* pTimestamps,
+    uint64_t* pMaxDeviation)
+{
+    AEMU_SCOPED_TRACE("vkGetCalibratedTimestampsEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetCalibratedTimestampsEXT_VkResult_return = (VkResult)0;
+    vkGetCalibratedTimestampsEXT_VkResult_return = vkEnc->vkGetCalibratedTimestampsEXT(device, timestampCount, pTimestampInfos, pTimestamps, pMaxDeviation, true /* do lock */);
+    return vkGetCalibratedTimestampsEXT_VkResult_return;
+}
+static VkResult dynCheck_entry_vkGetCalibratedTimestampsEXT(
+    VkDevice device,
+    uint32_t timestampCount,
+    const VkCalibratedTimestampInfoEXT* pTimestampInfos,
+    uint64_t* pTimestamps,
+    uint64_t* pMaxDeviation)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_EXT_calibrated_timestamps"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetCalibratedTimestampsEXT", "VK_EXT_calibrated_timestamps");
+    }
+    AEMU_SCOPED_TRACE("vkGetCalibratedTimestampsEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetCalibratedTimestampsEXT_VkResult_return = (VkResult)0;
+    vkGetCalibratedTimestampsEXT_VkResult_return = vkEnc->vkGetCalibratedTimestampsEXT(device, timestampCount, pTimestampInfos, pTimestamps, pMaxDeviation, true /* do lock */);
+    return vkGetCalibratedTimestampsEXT_VkResult_return;
+}
+#endif
+#ifdef VK_AMD_shader_core_properties
+#endif
+#ifdef VK_AMD_memory_overallocation_behavior
+#endif
+#ifdef VK_EXT_vertex_attribute_divisor
+#endif
+#ifdef VK_GGP_frame_token
+#endif
+#ifdef VK_EXT_pipeline_creation_feedback
+#endif
+#ifdef VK_NV_shader_subgroup_partitioned
+#endif
+#ifdef VK_NV_compute_shader_derivatives
+#endif
+#ifdef VK_NV_mesh_shader
+static void entry_vkCmdDrawMeshTasksNV(
+    VkCommandBuffer commandBuffer,
+    uint32_t taskCount,
+    uint32_t firstTask)
+{
+    AEMU_SCOPED_TRACE("vkCmdDrawMeshTasksNV");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdDrawMeshTasksNV(commandBuffer, taskCount, firstTask, true /* do lock */);
+}
+static void entry_vkCmdDrawMeshTasksIndirectNV(
+    VkCommandBuffer commandBuffer,
+    VkBuffer buffer,
+    VkDeviceSize offset,
+    uint32_t drawCount,
+    uint32_t stride)
+{
+    AEMU_SCOPED_TRACE("vkCmdDrawMeshTasksIndirectNV");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdDrawMeshTasksIndirectNV(commandBuffer, buffer, offset, drawCount, stride, true /* do lock */);
+}
+static void entry_vkCmdDrawMeshTasksIndirectCountNV(
+    VkCommandBuffer commandBuffer,
+    VkBuffer buffer,
+    VkDeviceSize offset,
+    VkBuffer countBuffer,
+    VkDeviceSize countBufferOffset,
+    uint32_t maxDrawCount,
+    uint32_t stride)
+{
+    AEMU_SCOPED_TRACE("vkCmdDrawMeshTasksIndirectCountNV");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdDrawMeshTasksIndirectCountNV(commandBuffer, buffer, offset, countBuffer, countBufferOffset, maxDrawCount, stride, true /* do lock */);
+}
+#endif
+#ifdef VK_NV_fragment_shader_barycentric
+#endif
+#ifdef VK_NV_shader_image_footprint
+#endif
+#ifdef VK_NV_scissor_exclusive
+static void entry_vkCmdSetExclusiveScissorNV(
+    VkCommandBuffer commandBuffer,
+    uint32_t firstExclusiveScissor,
+    uint32_t exclusiveScissorCount,
+    const VkRect2D* pExclusiveScissors)
+{
+    AEMU_SCOPED_TRACE("vkCmdSetExclusiveScissorNV");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdSetExclusiveScissorNV(commandBuffer, firstExclusiveScissor, exclusiveScissorCount, pExclusiveScissors, true /* do lock */);
+}
+#endif
+#ifdef VK_NV_device_diagnostic_checkpoints
+static void entry_vkCmdSetCheckpointNV(
+    VkCommandBuffer commandBuffer,
+    const void* pCheckpointMarker)
+{
+    AEMU_SCOPED_TRACE("vkCmdSetCheckpointNV");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdSetCheckpointNV(commandBuffer, pCheckpointMarker, true /* do lock */);
+}
+static void entry_vkGetQueueCheckpointDataNV(
+    VkQueue queue,
+    uint32_t* pCheckpointDataCount,
+    VkCheckpointDataNV* pCheckpointData)
+{
+    AEMU_SCOPED_TRACE("vkGetQueueCheckpointDataNV");
+    auto vkEnc = ResourceTracker::getQueueEncoder(queue);
+    vkEnc->vkGetQueueCheckpointDataNV(queue, pCheckpointDataCount, pCheckpointData, true /* do lock */);
+}
+#endif
+#ifdef VK_INTEL_shader_integer_functions2
+#endif
+#ifdef VK_INTEL_performance_query
+static VkResult entry_vkInitializePerformanceApiINTEL(
+    VkDevice device,
+    const VkInitializePerformanceApiInfoINTEL* pInitializeInfo)
+{
+    AEMU_SCOPED_TRACE("vkInitializePerformanceApiINTEL");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkInitializePerformanceApiINTEL_VkResult_return = (VkResult)0;
+    vkInitializePerformanceApiINTEL_VkResult_return = vkEnc->vkInitializePerformanceApiINTEL(device, pInitializeInfo, true /* do lock */);
+    return vkInitializePerformanceApiINTEL_VkResult_return;
+}
+static VkResult dynCheck_entry_vkInitializePerformanceApiINTEL(
+    VkDevice device,
+    const VkInitializePerformanceApiInfoINTEL* pInitializeInfo)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_INTEL_performance_query"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkInitializePerformanceApiINTEL", "VK_INTEL_performance_query");
+    }
+    AEMU_SCOPED_TRACE("vkInitializePerformanceApiINTEL");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkInitializePerformanceApiINTEL_VkResult_return = (VkResult)0;
+    vkInitializePerformanceApiINTEL_VkResult_return = vkEnc->vkInitializePerformanceApiINTEL(device, pInitializeInfo, true /* do lock */);
+    return vkInitializePerformanceApiINTEL_VkResult_return;
+}
+static void entry_vkUninitializePerformanceApiINTEL(
+    VkDevice device)
+{
+    AEMU_SCOPED_TRACE("vkUninitializePerformanceApiINTEL");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkUninitializePerformanceApiINTEL(device, true /* do lock */);
+}
+static void dynCheck_entry_vkUninitializePerformanceApiINTEL(
+    VkDevice device)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_INTEL_performance_query"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkUninitializePerformanceApiINTEL", "VK_INTEL_performance_query");
+    }
+    AEMU_SCOPED_TRACE("vkUninitializePerformanceApiINTEL");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkUninitializePerformanceApiINTEL(device, true /* do lock */);
+}
+static VkResult entry_vkCmdSetPerformanceMarkerINTEL(
+    VkCommandBuffer commandBuffer,
+    const VkPerformanceMarkerInfoINTEL* pMarkerInfo)
+{
+    AEMU_SCOPED_TRACE("vkCmdSetPerformanceMarkerINTEL");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    VkResult vkCmdSetPerformanceMarkerINTEL_VkResult_return = (VkResult)0;
+    vkCmdSetPerformanceMarkerINTEL_VkResult_return = vkEnc->vkCmdSetPerformanceMarkerINTEL(commandBuffer, pMarkerInfo, true /* do lock */);
+    return vkCmdSetPerformanceMarkerINTEL_VkResult_return;
+}
+static VkResult entry_vkCmdSetPerformanceStreamMarkerINTEL(
+    VkCommandBuffer commandBuffer,
+    const VkPerformanceStreamMarkerInfoINTEL* pMarkerInfo)
+{
+    AEMU_SCOPED_TRACE("vkCmdSetPerformanceStreamMarkerINTEL");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    VkResult vkCmdSetPerformanceStreamMarkerINTEL_VkResult_return = (VkResult)0;
+    vkCmdSetPerformanceStreamMarkerINTEL_VkResult_return = vkEnc->vkCmdSetPerformanceStreamMarkerINTEL(commandBuffer, pMarkerInfo, true /* do lock */);
+    return vkCmdSetPerformanceStreamMarkerINTEL_VkResult_return;
+}
+static VkResult entry_vkCmdSetPerformanceOverrideINTEL(
+    VkCommandBuffer commandBuffer,
+    const VkPerformanceOverrideInfoINTEL* pOverrideInfo)
+{
+    AEMU_SCOPED_TRACE("vkCmdSetPerformanceOverrideINTEL");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    VkResult vkCmdSetPerformanceOverrideINTEL_VkResult_return = (VkResult)0;
+    vkCmdSetPerformanceOverrideINTEL_VkResult_return = vkEnc->vkCmdSetPerformanceOverrideINTEL(commandBuffer, pOverrideInfo, true /* do lock */);
+    return vkCmdSetPerformanceOverrideINTEL_VkResult_return;
+}
+static VkResult entry_vkAcquirePerformanceConfigurationINTEL(
+    VkDevice device,
+    const VkPerformanceConfigurationAcquireInfoINTEL* pAcquireInfo,
+    VkPerformanceConfigurationINTEL* pConfiguration)
+{
+    AEMU_SCOPED_TRACE("vkAcquirePerformanceConfigurationINTEL");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkAcquirePerformanceConfigurationINTEL_VkResult_return = (VkResult)0;
+    vkAcquirePerformanceConfigurationINTEL_VkResult_return = vkEnc->vkAcquirePerformanceConfigurationINTEL(device, pAcquireInfo, pConfiguration, true /* do lock */);
+    return vkAcquirePerformanceConfigurationINTEL_VkResult_return;
+}
+static VkResult dynCheck_entry_vkAcquirePerformanceConfigurationINTEL(
+    VkDevice device,
+    const VkPerformanceConfigurationAcquireInfoINTEL* pAcquireInfo,
+    VkPerformanceConfigurationINTEL* pConfiguration)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_INTEL_performance_query"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkAcquirePerformanceConfigurationINTEL", "VK_INTEL_performance_query");
+    }
+    AEMU_SCOPED_TRACE("vkAcquirePerformanceConfigurationINTEL");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkAcquirePerformanceConfigurationINTEL_VkResult_return = (VkResult)0;
+    vkAcquirePerformanceConfigurationINTEL_VkResult_return = vkEnc->vkAcquirePerformanceConfigurationINTEL(device, pAcquireInfo, pConfiguration, true /* do lock */);
+    return vkAcquirePerformanceConfigurationINTEL_VkResult_return;
+}
+static VkResult entry_vkReleasePerformanceConfigurationINTEL(
+    VkDevice device,
+    VkPerformanceConfigurationINTEL configuration)
+{
+    AEMU_SCOPED_TRACE("vkReleasePerformanceConfigurationINTEL");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkReleasePerformanceConfigurationINTEL_VkResult_return = (VkResult)0;
+    vkReleasePerformanceConfigurationINTEL_VkResult_return = vkEnc->vkReleasePerformanceConfigurationINTEL(device, configuration, true /* do lock */);
+    return vkReleasePerformanceConfigurationINTEL_VkResult_return;
+}
+static VkResult dynCheck_entry_vkReleasePerformanceConfigurationINTEL(
+    VkDevice device,
+    VkPerformanceConfigurationINTEL configuration)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_INTEL_performance_query"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkReleasePerformanceConfigurationINTEL", "VK_INTEL_performance_query");
+    }
+    AEMU_SCOPED_TRACE("vkReleasePerformanceConfigurationINTEL");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkReleasePerformanceConfigurationINTEL_VkResult_return = (VkResult)0;
+    vkReleasePerformanceConfigurationINTEL_VkResult_return = vkEnc->vkReleasePerformanceConfigurationINTEL(device, configuration, true /* do lock */);
+    return vkReleasePerformanceConfigurationINTEL_VkResult_return;
+}
+static VkResult entry_vkQueueSetPerformanceConfigurationINTEL(
+    VkQueue queue,
+    VkPerformanceConfigurationINTEL configuration)
+{
+    AEMU_SCOPED_TRACE("vkQueueSetPerformanceConfigurationINTEL");
+    auto vkEnc = ResourceTracker::getQueueEncoder(queue);
+    VkResult vkQueueSetPerformanceConfigurationINTEL_VkResult_return = (VkResult)0;
+    vkQueueSetPerformanceConfigurationINTEL_VkResult_return = vkEnc->vkQueueSetPerformanceConfigurationINTEL(queue, configuration, true /* do lock */);
+    return vkQueueSetPerformanceConfigurationINTEL_VkResult_return;
+}
+static VkResult entry_vkGetPerformanceParameterINTEL(
+    VkDevice device,
+    VkPerformanceParameterTypeINTEL parameter,
+    VkPerformanceValueINTEL* pValue)
+{
+    AEMU_SCOPED_TRACE("vkGetPerformanceParameterINTEL");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetPerformanceParameterINTEL_VkResult_return = (VkResult)0;
+    vkGetPerformanceParameterINTEL_VkResult_return = vkEnc->vkGetPerformanceParameterINTEL(device, parameter, pValue, true /* do lock */);
+    return vkGetPerformanceParameterINTEL_VkResult_return;
+}
+static VkResult dynCheck_entry_vkGetPerformanceParameterINTEL(
+    VkDevice device,
+    VkPerformanceParameterTypeINTEL parameter,
+    VkPerformanceValueINTEL* pValue)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_INTEL_performance_query"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetPerformanceParameterINTEL", "VK_INTEL_performance_query");
+    }
+    AEMU_SCOPED_TRACE("vkGetPerformanceParameterINTEL");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetPerformanceParameterINTEL_VkResult_return = (VkResult)0;
+    vkGetPerformanceParameterINTEL_VkResult_return = vkEnc->vkGetPerformanceParameterINTEL(device, parameter, pValue, true /* do lock */);
+    return vkGetPerformanceParameterINTEL_VkResult_return;
+}
+#endif
+#ifdef VK_EXT_pci_bus_info
+#endif
+#ifdef VK_AMD_display_native_hdr
+static void entry_vkSetLocalDimmingAMD(
+    VkDevice device,
+    VkSwapchainKHR swapChain,
+    VkBool32 localDimmingEnable)
+{
+    AEMU_SCOPED_TRACE("vkSetLocalDimmingAMD");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkSetLocalDimmingAMD(device, swapChain, localDimmingEnable, true /* do lock */);
+}
+static void dynCheck_entry_vkSetLocalDimmingAMD(
+    VkDevice device,
+    VkSwapchainKHR swapChain,
+    VkBool32 localDimmingEnable)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_AMD_display_native_hdr"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkSetLocalDimmingAMD", "VK_AMD_display_native_hdr");
+    }
+    AEMU_SCOPED_TRACE("vkSetLocalDimmingAMD");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkSetLocalDimmingAMD(device, swapChain, localDimmingEnable, true /* do lock */);
+}
+#endif
+#ifdef VK_FUCHSIA_imagepipe_surface
+static VkResult entry_vkCreateImagePipeSurfaceFUCHSIA(
+    VkInstance instance,
+    const VkImagePipeSurfaceCreateInfoFUCHSIA* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkSurfaceKHR* pSurface)
+{
+    AEMU_SCOPED_TRACE("vkCreateImagePipeSurfaceFUCHSIA");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateImagePipeSurfaceFUCHSIA_VkResult_return = (VkResult)0;
+    vkCreateImagePipeSurfaceFUCHSIA_VkResult_return = vkEnc->vkCreateImagePipeSurfaceFUCHSIA(instance, pCreateInfo, pAllocator, pSurface, true /* do lock */);
+    return vkCreateImagePipeSurfaceFUCHSIA_VkResult_return;
+}
+#endif
+#ifdef VK_EXT_metal_surface
+static VkResult entry_vkCreateMetalSurfaceEXT(
+    VkInstance instance,
+    const VkMetalSurfaceCreateInfoEXT* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkSurfaceKHR* pSurface)
+{
+    AEMU_SCOPED_TRACE("vkCreateMetalSurfaceEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateMetalSurfaceEXT_VkResult_return = (VkResult)0;
+    vkCreateMetalSurfaceEXT_VkResult_return = vkEnc->vkCreateMetalSurfaceEXT(instance, pCreateInfo, pAllocator, pSurface, true /* do lock */);
+    return vkCreateMetalSurfaceEXT_VkResult_return;
+}
+#endif
+#ifdef VK_EXT_fragment_density_map
+#endif
+#ifdef VK_EXT_scalar_block_layout
+#endif
+#ifdef VK_GOOGLE_hlsl_functionality1
+#endif
+#ifdef VK_GOOGLE_decorate_string
+#endif
+#ifdef VK_EXT_subgroup_size_control
+#endif
+#ifdef VK_AMD_shader_core_properties2
+#endif
+#ifdef VK_AMD_device_coherent_memory
+#endif
+#ifdef VK_EXT_shader_image_atomic_int64
+#endif
+#ifdef VK_EXT_memory_budget
+#endif
+#ifdef VK_EXT_memory_priority
+#endif
+#ifdef VK_NV_dedicated_allocation_image_aliasing
+#endif
+#ifdef VK_EXT_buffer_device_address
+static VkDeviceAddress entry_vkGetBufferDeviceAddressEXT(
+    VkDevice device,
+    const VkBufferDeviceAddressInfo* pInfo)
+{
+    AEMU_SCOPED_TRACE("vkGetBufferDeviceAddressEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkDeviceAddress vkGetBufferDeviceAddressEXT_VkDeviceAddress_return = (VkDeviceAddress)0;
+    vkGetBufferDeviceAddressEXT_VkDeviceAddress_return = vkEnc->vkGetBufferDeviceAddressEXT(device, pInfo, true /* do lock */);
+    return vkGetBufferDeviceAddressEXT_VkDeviceAddress_return;
+}
+static VkDeviceAddress dynCheck_entry_vkGetBufferDeviceAddressEXT(
+    VkDevice device,
+    const VkBufferDeviceAddressInfo* pInfo)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_EXT_buffer_device_address"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetBufferDeviceAddressEXT", "VK_EXT_buffer_device_address");
+    }
+    AEMU_SCOPED_TRACE("vkGetBufferDeviceAddressEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkDeviceAddress vkGetBufferDeviceAddressEXT_VkDeviceAddress_return = (VkDeviceAddress)0;
+    vkGetBufferDeviceAddressEXT_VkDeviceAddress_return = vkEnc->vkGetBufferDeviceAddressEXT(device, pInfo, true /* do lock */);
+    return vkGetBufferDeviceAddressEXT_VkDeviceAddress_return;
+}
+#endif
+#ifdef VK_EXT_tooling_info
+static VkResult entry_vkGetPhysicalDeviceToolPropertiesEXT(
+    VkPhysicalDevice physicalDevice,
+    uint32_t* pToolCount,
+    VkPhysicalDeviceToolPropertiesEXT* pToolProperties)
+{
+    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceToolPropertiesEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetPhysicalDeviceToolPropertiesEXT_VkResult_return = (VkResult)0;
+    vkGetPhysicalDeviceToolPropertiesEXT_VkResult_return = vkEnc->vkGetPhysicalDeviceToolPropertiesEXT(physicalDevice, pToolCount, pToolProperties, true /* do lock */);
+    return vkGetPhysicalDeviceToolPropertiesEXT_VkResult_return;
+}
+#endif
+#ifdef VK_EXT_separate_stencil_usage
+#endif
+#ifdef VK_EXT_validation_features
+#endif
+#ifdef VK_NV_cooperative_matrix
+static VkResult entry_vkGetPhysicalDeviceCooperativeMatrixPropertiesNV(
+    VkPhysicalDevice physicalDevice,
+    uint32_t* pPropertyCount,
+    VkCooperativeMatrixPropertiesNV* pProperties)
+{
+    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceCooperativeMatrixPropertiesNV");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetPhysicalDeviceCooperativeMatrixPropertiesNV_VkResult_return = (VkResult)0;
+    vkGetPhysicalDeviceCooperativeMatrixPropertiesNV_VkResult_return = vkEnc->vkGetPhysicalDeviceCooperativeMatrixPropertiesNV(physicalDevice, pPropertyCount, pProperties, true /* do lock */);
+    return vkGetPhysicalDeviceCooperativeMatrixPropertiesNV_VkResult_return;
+}
+#endif
+#ifdef VK_NV_coverage_reduction_mode
+static VkResult entry_vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV(
+    VkPhysicalDevice physicalDevice,
+    uint32_t* pCombinationCount,
+    VkFramebufferMixedSamplesCombinationNV* pCombinations)
+{
+    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV_VkResult_return = (VkResult)0;
+    vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV_VkResult_return = vkEnc->vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV(physicalDevice, pCombinationCount, pCombinations, true /* do lock */);
+    return vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV_VkResult_return;
+}
+#endif
+#ifdef VK_EXT_fragment_shader_interlock
+#endif
+#ifdef VK_EXT_ycbcr_image_arrays
+#endif
+#ifdef VK_EXT_full_screen_exclusive
+static VkResult entry_vkGetPhysicalDeviceSurfacePresentModes2EXT(
+    VkPhysicalDevice physicalDevice,
+    const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo,
+    uint32_t* pPresentModeCount,
+    VkPresentModeKHR* pPresentModes)
+{
+    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceSurfacePresentModes2EXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetPhysicalDeviceSurfacePresentModes2EXT_VkResult_return = (VkResult)0;
+    vkGetPhysicalDeviceSurfacePresentModes2EXT_VkResult_return = vkEnc->vkGetPhysicalDeviceSurfacePresentModes2EXT(physicalDevice, pSurfaceInfo, pPresentModeCount, pPresentModes, true /* do lock */);
+    return vkGetPhysicalDeviceSurfacePresentModes2EXT_VkResult_return;
+}
+static VkResult entry_vkAcquireFullScreenExclusiveModeEXT(
+    VkDevice device,
+    VkSwapchainKHR swapchain)
+{
+    AEMU_SCOPED_TRACE("vkAcquireFullScreenExclusiveModeEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkAcquireFullScreenExclusiveModeEXT_VkResult_return = (VkResult)0;
+    vkAcquireFullScreenExclusiveModeEXT_VkResult_return = vkEnc->vkAcquireFullScreenExclusiveModeEXT(device, swapchain, true /* do lock */);
+    return vkAcquireFullScreenExclusiveModeEXT_VkResult_return;
+}
+static VkResult dynCheck_entry_vkAcquireFullScreenExclusiveModeEXT(
+    VkDevice device,
+    VkSwapchainKHR swapchain)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_EXT_full_screen_exclusive"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkAcquireFullScreenExclusiveModeEXT", "VK_EXT_full_screen_exclusive");
+    }
+    AEMU_SCOPED_TRACE("vkAcquireFullScreenExclusiveModeEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkAcquireFullScreenExclusiveModeEXT_VkResult_return = (VkResult)0;
+    vkAcquireFullScreenExclusiveModeEXT_VkResult_return = vkEnc->vkAcquireFullScreenExclusiveModeEXT(device, swapchain, true /* do lock */);
+    return vkAcquireFullScreenExclusiveModeEXT_VkResult_return;
+}
+static VkResult entry_vkReleaseFullScreenExclusiveModeEXT(
+    VkDevice device,
+    VkSwapchainKHR swapchain)
+{
+    AEMU_SCOPED_TRACE("vkReleaseFullScreenExclusiveModeEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkReleaseFullScreenExclusiveModeEXT_VkResult_return = (VkResult)0;
+    vkReleaseFullScreenExclusiveModeEXT_VkResult_return = vkEnc->vkReleaseFullScreenExclusiveModeEXT(device, swapchain, true /* do lock */);
+    return vkReleaseFullScreenExclusiveModeEXT_VkResult_return;
+}
+static VkResult dynCheck_entry_vkReleaseFullScreenExclusiveModeEXT(
+    VkDevice device,
+    VkSwapchainKHR swapchain)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_EXT_full_screen_exclusive"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkReleaseFullScreenExclusiveModeEXT", "VK_EXT_full_screen_exclusive");
+    }
+    AEMU_SCOPED_TRACE("vkReleaseFullScreenExclusiveModeEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkReleaseFullScreenExclusiveModeEXT_VkResult_return = (VkResult)0;
+    vkReleaseFullScreenExclusiveModeEXT_VkResult_return = vkEnc->vkReleaseFullScreenExclusiveModeEXT(device, swapchain, true /* do lock */);
+    return vkReleaseFullScreenExclusiveModeEXT_VkResult_return;
+}
+static VkResult entry_vkGetDeviceGroupSurfacePresentModes2EXT(
+    VkDevice device,
+    const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo,
+    VkDeviceGroupPresentModeFlagsKHR* pModes)
+{
+    AEMU_SCOPED_TRACE("vkGetDeviceGroupSurfacePresentModes2EXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetDeviceGroupSurfacePresentModes2EXT_VkResult_return = (VkResult)0;
+    vkGetDeviceGroupSurfacePresentModes2EXT_VkResult_return = vkEnc->vkGetDeviceGroupSurfacePresentModes2EXT(device, pSurfaceInfo, pModes, true /* do lock */);
+    return vkGetDeviceGroupSurfacePresentModes2EXT_VkResult_return;
+}
+static VkResult dynCheck_entry_vkGetDeviceGroupSurfacePresentModes2EXT(
+    VkDevice device,
+    const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo,
+    VkDeviceGroupPresentModeFlagsKHR* pModes)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_EXT_full_screen_exclusive"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetDeviceGroupSurfacePresentModes2EXT", "VK_EXT_full_screen_exclusive");
+    }
+    AEMU_SCOPED_TRACE("vkGetDeviceGroupSurfacePresentModes2EXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetDeviceGroupSurfacePresentModes2EXT_VkResult_return = (VkResult)0;
+    vkGetDeviceGroupSurfacePresentModes2EXT_VkResult_return = vkEnc->vkGetDeviceGroupSurfacePresentModes2EXT(device, pSurfaceInfo, pModes, true /* do lock */);
+    return vkGetDeviceGroupSurfacePresentModes2EXT_VkResult_return;
+}
+#endif
+#ifdef VK_EXT_headless_surface
+static VkResult entry_vkCreateHeadlessSurfaceEXT(
+    VkInstance instance,
+    const VkHeadlessSurfaceCreateInfoEXT* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkSurfaceKHR* pSurface)
+{
+    AEMU_SCOPED_TRACE("vkCreateHeadlessSurfaceEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateHeadlessSurfaceEXT_VkResult_return = (VkResult)0;
+    vkCreateHeadlessSurfaceEXT_VkResult_return = vkEnc->vkCreateHeadlessSurfaceEXT(instance, pCreateInfo, pAllocator, pSurface, true /* do lock */);
+    return vkCreateHeadlessSurfaceEXT_VkResult_return;
+}
+#endif
+#ifdef VK_EXT_line_rasterization
+static void entry_vkCmdSetLineStippleEXT(
+    VkCommandBuffer commandBuffer,
+    uint32_t lineStippleFactor,
+    uint16_t lineStipplePattern)
+{
+    AEMU_SCOPED_TRACE("vkCmdSetLineStippleEXT");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdSetLineStippleEXT(commandBuffer, lineStippleFactor, lineStipplePattern, true /* do lock */);
+}
+#endif
+#ifdef VK_EXT_shader_atomic_float
+#endif
+#ifdef VK_EXT_host_query_reset
+static void entry_vkResetQueryPoolEXT(
+    VkDevice device,
+    VkQueryPool queryPool,
+    uint32_t firstQuery,
+    uint32_t queryCount)
+{
+    AEMU_SCOPED_TRACE("vkResetQueryPoolEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkResetQueryPoolEXT(device, queryPool, firstQuery, queryCount, true /* do lock */);
+}
+static void dynCheck_entry_vkResetQueryPoolEXT(
+    VkDevice device,
+    VkQueryPool queryPool,
+    uint32_t firstQuery,
+    uint32_t queryCount)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_EXT_host_query_reset"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkResetQueryPoolEXT", "VK_EXT_host_query_reset");
+    }
+    AEMU_SCOPED_TRACE("vkResetQueryPoolEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkResetQueryPoolEXT(device, queryPool, firstQuery, queryCount, true /* do lock */);
+}
+#endif
+#ifdef VK_EXT_index_type_uint8
+#endif
+#ifdef VK_EXT_extended_dynamic_state
+static void entry_vkCmdSetCullModeEXT(
+    VkCommandBuffer commandBuffer,
+    VkCullModeFlags cullMode)
+{
+    AEMU_SCOPED_TRACE("vkCmdSetCullModeEXT");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdSetCullModeEXT(commandBuffer, cullMode, true /* do lock */);
+}
+static void entry_vkCmdSetFrontFaceEXT(
+    VkCommandBuffer commandBuffer,
+    VkFrontFace frontFace)
+{
+    AEMU_SCOPED_TRACE("vkCmdSetFrontFaceEXT");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdSetFrontFaceEXT(commandBuffer, frontFace, true /* do lock */);
+}
+static void entry_vkCmdSetPrimitiveTopologyEXT(
+    VkCommandBuffer commandBuffer,
+    VkPrimitiveTopology primitiveTopology)
+{
+    AEMU_SCOPED_TRACE("vkCmdSetPrimitiveTopologyEXT");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdSetPrimitiveTopologyEXT(commandBuffer, primitiveTopology, true /* do lock */);
+}
+static void entry_vkCmdSetViewportWithCountEXT(
+    VkCommandBuffer commandBuffer,
+    uint32_t viewportCount,
+    const VkViewport* pViewports)
+{
+    AEMU_SCOPED_TRACE("vkCmdSetViewportWithCountEXT");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdSetViewportWithCountEXT(commandBuffer, viewportCount, pViewports, true /* do lock */);
+}
+static void entry_vkCmdSetScissorWithCountEXT(
+    VkCommandBuffer commandBuffer,
+    uint32_t scissorCount,
+    const VkRect2D* pScissors)
+{
+    AEMU_SCOPED_TRACE("vkCmdSetScissorWithCountEXT");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdSetScissorWithCountEXT(commandBuffer, scissorCount, pScissors, true /* do lock */);
+}
+static void entry_vkCmdBindVertexBuffers2EXT(
+    VkCommandBuffer commandBuffer,
+    uint32_t firstBinding,
+    uint32_t bindingCount,
+    const VkBuffer* pBuffers,
+    const VkDeviceSize* pOffsets,
+    const VkDeviceSize* pSizes,
+    const VkDeviceSize* pStrides)
+{
+    AEMU_SCOPED_TRACE("vkCmdBindVertexBuffers2EXT");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdBindVertexBuffers2EXT(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets, pSizes, pStrides, true /* do lock */);
+}
+static void entry_vkCmdSetDepthTestEnableEXT(
+    VkCommandBuffer commandBuffer,
+    VkBool32 depthTestEnable)
+{
+    AEMU_SCOPED_TRACE("vkCmdSetDepthTestEnableEXT");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdSetDepthTestEnableEXT(commandBuffer, depthTestEnable, true /* do lock */);
+}
+static void entry_vkCmdSetDepthWriteEnableEXT(
+    VkCommandBuffer commandBuffer,
+    VkBool32 depthWriteEnable)
+{
+    AEMU_SCOPED_TRACE("vkCmdSetDepthWriteEnableEXT");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdSetDepthWriteEnableEXT(commandBuffer, depthWriteEnable, true /* do lock */);
+}
+static void entry_vkCmdSetDepthCompareOpEXT(
+    VkCommandBuffer commandBuffer,
+    VkCompareOp depthCompareOp)
+{
+    AEMU_SCOPED_TRACE("vkCmdSetDepthCompareOpEXT");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdSetDepthCompareOpEXT(commandBuffer, depthCompareOp, true /* do lock */);
+}
+static void entry_vkCmdSetDepthBoundsTestEnableEXT(
+    VkCommandBuffer commandBuffer,
+    VkBool32 depthBoundsTestEnable)
+{
+    AEMU_SCOPED_TRACE("vkCmdSetDepthBoundsTestEnableEXT");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdSetDepthBoundsTestEnableEXT(commandBuffer, depthBoundsTestEnable, true /* do lock */);
+}
+static void entry_vkCmdSetStencilTestEnableEXT(
+    VkCommandBuffer commandBuffer,
+    VkBool32 stencilTestEnable)
+{
+    AEMU_SCOPED_TRACE("vkCmdSetStencilTestEnableEXT");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdSetStencilTestEnableEXT(commandBuffer, stencilTestEnable, true /* do lock */);
+}
+static void entry_vkCmdSetStencilOpEXT(
+    VkCommandBuffer commandBuffer,
+    VkStencilFaceFlags faceMask,
+    VkStencilOp failOp,
+    VkStencilOp passOp,
+    VkStencilOp depthFailOp,
+    VkCompareOp compareOp)
+{
+    AEMU_SCOPED_TRACE("vkCmdSetStencilOpEXT");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdSetStencilOpEXT(commandBuffer, faceMask, failOp, passOp, depthFailOp, compareOp, true /* do lock */);
+}
+#endif
+#ifdef VK_EXT_shader_demote_to_helper_invocation
+#endif
+#ifdef VK_NV_device_generated_commands
+static void entry_vkGetGeneratedCommandsMemoryRequirementsNV(
+    VkDevice device,
+    const VkGeneratedCommandsMemoryRequirementsInfoNV* pInfo,
+    VkMemoryRequirements2* pMemoryRequirements)
+{
+    AEMU_SCOPED_TRACE("vkGetGeneratedCommandsMemoryRequirementsNV");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetGeneratedCommandsMemoryRequirementsNV(device, pInfo, pMemoryRequirements, true /* do lock */);
+}
+static void dynCheck_entry_vkGetGeneratedCommandsMemoryRequirementsNV(
+    VkDevice device,
+    const VkGeneratedCommandsMemoryRequirementsInfoNV* pInfo,
+    VkMemoryRequirements2* pMemoryRequirements)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_NV_device_generated_commands"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetGeneratedCommandsMemoryRequirementsNV", "VK_NV_device_generated_commands");
+    }
+    AEMU_SCOPED_TRACE("vkGetGeneratedCommandsMemoryRequirementsNV");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetGeneratedCommandsMemoryRequirementsNV(device, pInfo, pMemoryRequirements, true /* do lock */);
+}
+static void entry_vkCmdPreprocessGeneratedCommandsNV(
+    VkCommandBuffer commandBuffer,
+    const VkGeneratedCommandsInfoNV* pGeneratedCommandsInfo)
+{
+    AEMU_SCOPED_TRACE("vkCmdPreprocessGeneratedCommandsNV");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdPreprocessGeneratedCommandsNV(commandBuffer, pGeneratedCommandsInfo, true /* do lock */);
+}
+static void entry_vkCmdExecuteGeneratedCommandsNV(
+    VkCommandBuffer commandBuffer,
+    VkBool32 isPreprocessed,
+    const VkGeneratedCommandsInfoNV* pGeneratedCommandsInfo)
+{
+    AEMU_SCOPED_TRACE("vkCmdExecuteGeneratedCommandsNV");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdExecuteGeneratedCommandsNV(commandBuffer, isPreprocessed, pGeneratedCommandsInfo, true /* do lock */);
+}
+static void entry_vkCmdBindPipelineShaderGroupNV(
+    VkCommandBuffer commandBuffer,
+    VkPipelineBindPoint pipelineBindPoint,
+    VkPipeline pipeline,
+    uint32_t groupIndex)
+{
+    AEMU_SCOPED_TRACE("vkCmdBindPipelineShaderGroupNV");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdBindPipelineShaderGroupNV(commandBuffer, pipelineBindPoint, pipeline, groupIndex, true /* do lock */);
+}
+static VkResult entry_vkCreateIndirectCommandsLayoutNV(
+    VkDevice device,
+    const VkIndirectCommandsLayoutCreateInfoNV* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkIndirectCommandsLayoutNV* pIndirectCommandsLayout)
+{
+    AEMU_SCOPED_TRACE("vkCreateIndirectCommandsLayoutNV");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateIndirectCommandsLayoutNV_VkResult_return = (VkResult)0;
+    vkCreateIndirectCommandsLayoutNV_VkResult_return = vkEnc->vkCreateIndirectCommandsLayoutNV(device, pCreateInfo, pAllocator, pIndirectCommandsLayout, true /* do lock */);
+    return vkCreateIndirectCommandsLayoutNV_VkResult_return;
+}
+static VkResult dynCheck_entry_vkCreateIndirectCommandsLayoutNV(
+    VkDevice device,
+    const VkIndirectCommandsLayoutCreateInfoNV* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkIndirectCommandsLayoutNV* pIndirectCommandsLayout)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_NV_device_generated_commands"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkCreateIndirectCommandsLayoutNV", "VK_NV_device_generated_commands");
+    }
+    AEMU_SCOPED_TRACE("vkCreateIndirectCommandsLayoutNV");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateIndirectCommandsLayoutNV_VkResult_return = (VkResult)0;
+    vkCreateIndirectCommandsLayoutNV_VkResult_return = vkEnc->vkCreateIndirectCommandsLayoutNV(device, pCreateInfo, pAllocator, pIndirectCommandsLayout, true /* do lock */);
+    return vkCreateIndirectCommandsLayoutNV_VkResult_return;
+}
+static void entry_vkDestroyIndirectCommandsLayoutNV(
+    VkDevice device,
+    VkIndirectCommandsLayoutNV indirectCommandsLayout,
+    const VkAllocationCallbacks* pAllocator)
+{
+    AEMU_SCOPED_TRACE("vkDestroyIndirectCommandsLayoutNV");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkDestroyIndirectCommandsLayoutNV(device, indirectCommandsLayout, pAllocator, true /* do lock */);
+}
+static void dynCheck_entry_vkDestroyIndirectCommandsLayoutNV(
+    VkDevice device,
+    VkIndirectCommandsLayoutNV indirectCommandsLayout,
+    const VkAllocationCallbacks* pAllocator)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_NV_device_generated_commands"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkDestroyIndirectCommandsLayoutNV", "VK_NV_device_generated_commands");
+    }
+    AEMU_SCOPED_TRACE("vkDestroyIndirectCommandsLayoutNV");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkDestroyIndirectCommandsLayoutNV(device, indirectCommandsLayout, pAllocator, true /* do lock */);
+}
+#endif
+#ifdef VK_EXT_texel_buffer_alignment
+#endif
+#ifdef VK_QCOM_render_pass_transform
+#endif
+#ifdef VK_EXT_device_memory_report
+#endif
+#ifdef VK_EXT_robustness2
+#endif
+#ifdef VK_EXT_custom_border_color
+#endif
+#ifdef VK_GOOGLE_user_type
+#endif
+#ifdef VK_EXT_private_data
+static VkResult entry_vkCreatePrivateDataSlotEXT(
+    VkDevice device,
+    const VkPrivateDataSlotCreateInfoEXT* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkPrivateDataSlotEXT* pPrivateDataSlot)
+{
+    AEMU_SCOPED_TRACE("vkCreatePrivateDataSlotEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreatePrivateDataSlotEXT_VkResult_return = (VkResult)0;
+    vkCreatePrivateDataSlotEXT_VkResult_return = vkEnc->vkCreatePrivateDataSlotEXT(device, pCreateInfo, pAllocator, pPrivateDataSlot, true /* do lock */);
+    return vkCreatePrivateDataSlotEXT_VkResult_return;
+}
+static VkResult dynCheck_entry_vkCreatePrivateDataSlotEXT(
+    VkDevice device,
+    const VkPrivateDataSlotCreateInfoEXT* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkPrivateDataSlotEXT* pPrivateDataSlot)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_EXT_private_data"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkCreatePrivateDataSlotEXT", "VK_EXT_private_data");
+    }
+    AEMU_SCOPED_TRACE("vkCreatePrivateDataSlotEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreatePrivateDataSlotEXT_VkResult_return = (VkResult)0;
+    vkCreatePrivateDataSlotEXT_VkResult_return = vkEnc->vkCreatePrivateDataSlotEXT(device, pCreateInfo, pAllocator, pPrivateDataSlot, true /* do lock */);
+    return vkCreatePrivateDataSlotEXT_VkResult_return;
+}
+static void entry_vkDestroyPrivateDataSlotEXT(
+    VkDevice device,
+    VkPrivateDataSlotEXT privateDataSlot,
+    const VkAllocationCallbacks* pAllocator)
+{
+    AEMU_SCOPED_TRACE("vkDestroyPrivateDataSlotEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkDestroyPrivateDataSlotEXT(device, privateDataSlot, pAllocator, true /* do lock */);
+}
+static void dynCheck_entry_vkDestroyPrivateDataSlotEXT(
+    VkDevice device,
+    VkPrivateDataSlotEXT privateDataSlot,
+    const VkAllocationCallbacks* pAllocator)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_EXT_private_data"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkDestroyPrivateDataSlotEXT", "VK_EXT_private_data");
+    }
+    AEMU_SCOPED_TRACE("vkDestroyPrivateDataSlotEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkDestroyPrivateDataSlotEXT(device, privateDataSlot, pAllocator, true /* do lock */);
+}
+static VkResult entry_vkSetPrivateDataEXT(
+    VkDevice device,
+    VkObjectType objectType,
+    uint64_t objectHandle,
+    VkPrivateDataSlotEXT privateDataSlot,
+    uint64_t data)
+{
+    AEMU_SCOPED_TRACE("vkSetPrivateDataEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkSetPrivateDataEXT_VkResult_return = (VkResult)0;
+    vkSetPrivateDataEXT_VkResult_return = vkEnc->vkSetPrivateDataEXT(device, objectType, objectHandle, privateDataSlot, data, true /* do lock */);
+    return vkSetPrivateDataEXT_VkResult_return;
+}
+static VkResult dynCheck_entry_vkSetPrivateDataEXT(
+    VkDevice device,
+    VkObjectType objectType,
+    uint64_t objectHandle,
+    VkPrivateDataSlotEXT privateDataSlot,
+    uint64_t data)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_EXT_private_data"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkSetPrivateDataEXT", "VK_EXT_private_data");
+    }
+    AEMU_SCOPED_TRACE("vkSetPrivateDataEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkSetPrivateDataEXT_VkResult_return = (VkResult)0;
+    vkSetPrivateDataEXT_VkResult_return = vkEnc->vkSetPrivateDataEXT(device, objectType, objectHandle, privateDataSlot, data, true /* do lock */);
+    return vkSetPrivateDataEXT_VkResult_return;
+}
+static void entry_vkGetPrivateDataEXT(
+    VkDevice device,
+    VkObjectType objectType,
+    uint64_t objectHandle,
+    VkPrivateDataSlotEXT privateDataSlot,
+    uint64_t* pData)
+{
+    AEMU_SCOPED_TRACE("vkGetPrivateDataEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetPrivateDataEXT(device, objectType, objectHandle, privateDataSlot, pData, true /* do lock */);
+}
+static void dynCheck_entry_vkGetPrivateDataEXT(
+    VkDevice device,
+    VkObjectType objectType,
+    uint64_t objectHandle,
+    VkPrivateDataSlotEXT privateDataSlot,
+    uint64_t* pData)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_EXT_private_data"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetPrivateDataEXT", "VK_EXT_private_data");
+    }
+    AEMU_SCOPED_TRACE("vkGetPrivateDataEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetPrivateDataEXT(device, objectType, objectHandle, privateDataSlot, pData, true /* do lock */);
+}
+#endif
+#ifdef VK_EXT_pipeline_creation_cache_control
+#endif
+#ifdef VK_NV_device_diagnostics_config
+#endif
+#ifdef VK_QCOM_render_pass_store_ops
+#endif
+#ifdef VK_NV_fragment_shading_rate_enums
+static void entry_vkCmdSetFragmentShadingRateEnumNV(
+    VkCommandBuffer commandBuffer,
+    VkFragmentShadingRateNV shadingRate,
+    const VkFragmentShadingRateCombinerOpKHR combinerOps[2])
+{
+    AEMU_SCOPED_TRACE("vkCmdSetFragmentShadingRateEnumNV");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdSetFragmentShadingRateEnumNV(commandBuffer, shadingRate, combinerOps, true /* do lock */);
+}
+#endif
+#ifdef VK_EXT_fragment_density_map2
+#endif
+#ifdef VK_QCOM_rotated_copy_commands
+#endif
+#ifdef VK_EXT_image_robustness
+#endif
+#ifdef VK_EXT_4444_formats
+#endif
+#ifdef VK_EXT_directfb_surface
+static VkResult entry_vkCreateDirectFBSurfaceEXT(
+    VkInstance instance,
+    const VkDirectFBSurfaceCreateInfoEXT* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkSurfaceKHR* pSurface)
+{
+    AEMU_SCOPED_TRACE("vkCreateDirectFBSurfaceEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateDirectFBSurfaceEXT_VkResult_return = (VkResult)0;
+    vkCreateDirectFBSurfaceEXT_VkResult_return = vkEnc->vkCreateDirectFBSurfaceEXT(instance, pCreateInfo, pAllocator, pSurface, true /* do lock */);
+    return vkCreateDirectFBSurfaceEXT_VkResult_return;
+}
+static VkBool32 entry_vkGetPhysicalDeviceDirectFBPresentationSupportEXT(
+    VkPhysicalDevice physicalDevice,
+    uint32_t queueFamilyIndex,
+    IDirectFB* dfb)
+{
+    AEMU_SCOPED_TRACE("vkGetPhysicalDeviceDirectFBPresentationSupportEXT");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkBool32 vkGetPhysicalDeviceDirectFBPresentationSupportEXT_VkBool32_return = (VkBool32)0;
+    vkGetPhysicalDeviceDirectFBPresentationSupportEXT_VkBool32_return = vkEnc->vkGetPhysicalDeviceDirectFBPresentationSupportEXT(physicalDevice, queueFamilyIndex, dfb, true /* do lock */);
+    return vkGetPhysicalDeviceDirectFBPresentationSupportEXT_VkBool32_return;
+}
+#endif
+#ifdef VK_GOOGLE_gfxstream
+static VkResult entry_vkRegisterImageColorBufferGOOGLE(
+    VkDevice device,
+    VkImage image,
+    uint32_t colorBuffer)
+{
+    AEMU_SCOPED_TRACE("vkRegisterImageColorBufferGOOGLE");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkRegisterImageColorBufferGOOGLE_VkResult_return = (VkResult)0;
+    vkRegisterImageColorBufferGOOGLE_VkResult_return = vkEnc->vkRegisterImageColorBufferGOOGLE(device, image, colorBuffer, true /* do lock */);
+    return vkRegisterImageColorBufferGOOGLE_VkResult_return;
+}
+static VkResult dynCheck_entry_vkRegisterImageColorBufferGOOGLE(
+    VkDevice device,
+    VkImage image,
+    uint32_t colorBuffer)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_GOOGLE_gfxstream"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkRegisterImageColorBufferGOOGLE", "VK_GOOGLE_gfxstream");
+    }
+    AEMU_SCOPED_TRACE("vkRegisterImageColorBufferGOOGLE");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkRegisterImageColorBufferGOOGLE_VkResult_return = (VkResult)0;
+    vkRegisterImageColorBufferGOOGLE_VkResult_return = vkEnc->vkRegisterImageColorBufferGOOGLE(device, image, colorBuffer, true /* do lock */);
+    return vkRegisterImageColorBufferGOOGLE_VkResult_return;
+}
+static VkResult entry_vkRegisterBufferColorBufferGOOGLE(
+    VkDevice device,
+    VkBuffer buffer,
+    uint32_t colorBuffer)
+{
+    AEMU_SCOPED_TRACE("vkRegisterBufferColorBufferGOOGLE");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkRegisterBufferColorBufferGOOGLE_VkResult_return = (VkResult)0;
+    vkRegisterBufferColorBufferGOOGLE_VkResult_return = vkEnc->vkRegisterBufferColorBufferGOOGLE(device, buffer, colorBuffer, true /* do lock */);
+    return vkRegisterBufferColorBufferGOOGLE_VkResult_return;
+}
+static VkResult dynCheck_entry_vkRegisterBufferColorBufferGOOGLE(
+    VkDevice device,
+    VkBuffer buffer,
+    uint32_t colorBuffer)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_GOOGLE_gfxstream"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkRegisterBufferColorBufferGOOGLE", "VK_GOOGLE_gfxstream");
+    }
+    AEMU_SCOPED_TRACE("vkRegisterBufferColorBufferGOOGLE");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkRegisterBufferColorBufferGOOGLE_VkResult_return = (VkResult)0;
+    vkRegisterBufferColorBufferGOOGLE_VkResult_return = vkEnc->vkRegisterBufferColorBufferGOOGLE(device, buffer, colorBuffer, true /* do lock */);
+    return vkRegisterBufferColorBufferGOOGLE_VkResult_return;
+}
+static VkResult entry_vkMapMemoryIntoAddressSpaceGOOGLE(
+    VkDevice device,
+    VkDeviceMemory memory,
+    uint64_t* pAddress)
+{
+    AEMU_SCOPED_TRACE("vkMapMemoryIntoAddressSpaceGOOGLE");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkMapMemoryIntoAddressSpaceGOOGLE_VkResult_return = (VkResult)0;
+    vkMapMemoryIntoAddressSpaceGOOGLE_VkResult_return = vkEnc->vkMapMemoryIntoAddressSpaceGOOGLE(device, memory, pAddress, true /* do lock */);
+    return vkMapMemoryIntoAddressSpaceGOOGLE_VkResult_return;
+}
+static VkResult dynCheck_entry_vkMapMemoryIntoAddressSpaceGOOGLE(
+    VkDevice device,
+    VkDeviceMemory memory,
+    uint64_t* pAddress)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_GOOGLE_gfxstream"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkMapMemoryIntoAddressSpaceGOOGLE", "VK_GOOGLE_gfxstream");
+    }
+    AEMU_SCOPED_TRACE("vkMapMemoryIntoAddressSpaceGOOGLE");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkMapMemoryIntoAddressSpaceGOOGLE_VkResult_return = (VkResult)0;
+    vkMapMemoryIntoAddressSpaceGOOGLE_VkResult_return = vkEnc->vkMapMemoryIntoAddressSpaceGOOGLE(device, memory, pAddress, true /* do lock */);
+    return vkMapMemoryIntoAddressSpaceGOOGLE_VkResult_return;
+}
+static void entry_vkUpdateDescriptorSetWithTemplateSizedGOOGLE(
+    VkDevice device,
+    VkDescriptorSet descriptorSet,
+    VkDescriptorUpdateTemplate descriptorUpdateTemplate,
+    uint32_t imageInfoCount,
+    uint32_t bufferInfoCount,
+    uint32_t bufferViewCount,
+    const uint32_t* pImageInfoEntryIndices,
+    const uint32_t* pBufferInfoEntryIndices,
+    const uint32_t* pBufferViewEntryIndices,
+    const VkDescriptorImageInfo* pImageInfos,
+    const VkDescriptorBufferInfo* pBufferInfos,
+    const VkBufferView* pBufferViews)
+{
+    AEMU_SCOPED_TRACE("vkUpdateDescriptorSetWithTemplateSizedGOOGLE");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkUpdateDescriptorSetWithTemplateSizedGOOGLE(device, descriptorSet, descriptorUpdateTemplate, imageInfoCount, bufferInfoCount, bufferViewCount, pImageInfoEntryIndices, pBufferInfoEntryIndices, pBufferViewEntryIndices, pImageInfos, pBufferInfos, pBufferViews, true /* do lock */);
+}
+static void dynCheck_entry_vkUpdateDescriptorSetWithTemplateSizedGOOGLE(
+    VkDevice device,
+    VkDescriptorSet descriptorSet,
+    VkDescriptorUpdateTemplate descriptorUpdateTemplate,
+    uint32_t imageInfoCount,
+    uint32_t bufferInfoCount,
+    uint32_t bufferViewCount,
+    const uint32_t* pImageInfoEntryIndices,
+    const uint32_t* pBufferInfoEntryIndices,
+    const uint32_t* pBufferViewEntryIndices,
+    const VkDescriptorImageInfo* pImageInfos,
+    const VkDescriptorBufferInfo* pBufferInfos,
+    const VkBufferView* pBufferViews)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_GOOGLE_gfxstream"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkUpdateDescriptorSetWithTemplateSizedGOOGLE", "VK_GOOGLE_gfxstream");
+    }
+    AEMU_SCOPED_TRACE("vkUpdateDescriptorSetWithTemplateSizedGOOGLE");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkUpdateDescriptorSetWithTemplateSizedGOOGLE(device, descriptorSet, descriptorUpdateTemplate, imageInfoCount, bufferInfoCount, bufferViewCount, pImageInfoEntryIndices, pBufferInfoEntryIndices, pBufferViewEntryIndices, pImageInfos, pBufferInfos, pBufferViews, true /* do lock */);
+}
+static void entry_vkBeginCommandBufferAsyncGOOGLE(
+    VkCommandBuffer commandBuffer,
+    const VkCommandBufferBeginInfo* pBeginInfo)
+{
+    AEMU_SCOPED_TRACE("vkBeginCommandBufferAsyncGOOGLE");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkBeginCommandBufferAsyncGOOGLE(commandBuffer, pBeginInfo, true /* do lock */);
+}
+static void entry_vkEndCommandBufferAsyncGOOGLE(
+    VkCommandBuffer commandBuffer)
+{
+    AEMU_SCOPED_TRACE("vkEndCommandBufferAsyncGOOGLE");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkEndCommandBufferAsyncGOOGLE(commandBuffer, true /* do lock */);
+}
+static void entry_vkResetCommandBufferAsyncGOOGLE(
+    VkCommandBuffer commandBuffer,
+    VkCommandBufferResetFlags flags)
+{
+    AEMU_SCOPED_TRACE("vkResetCommandBufferAsyncGOOGLE");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkResetCommandBufferAsyncGOOGLE(commandBuffer, flags, true /* do lock */);
+}
+static void entry_vkCommandBufferHostSyncGOOGLE(
+    VkCommandBuffer commandBuffer,
+    uint32_t needHostSync,
+    uint32_t sequenceNumber)
+{
+    AEMU_SCOPED_TRACE("vkCommandBufferHostSyncGOOGLE");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCommandBufferHostSyncGOOGLE(commandBuffer, needHostSync, sequenceNumber, true /* do lock */);
+}
+static VkResult entry_vkCreateImageWithRequirementsGOOGLE(
+    VkDevice device,
+    const VkImageCreateInfo* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkImage* pImage,
+    VkMemoryRequirements* pMemoryRequirements)
+{
+    AEMU_SCOPED_TRACE("vkCreateImageWithRequirementsGOOGLE");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateImageWithRequirementsGOOGLE_VkResult_return = (VkResult)0;
+    vkCreateImageWithRequirementsGOOGLE_VkResult_return = vkEnc->vkCreateImageWithRequirementsGOOGLE(device, pCreateInfo, pAllocator, pImage, pMemoryRequirements, true /* do lock */);
+    return vkCreateImageWithRequirementsGOOGLE_VkResult_return;
+}
+static VkResult dynCheck_entry_vkCreateImageWithRequirementsGOOGLE(
+    VkDevice device,
+    const VkImageCreateInfo* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkImage* pImage,
+    VkMemoryRequirements* pMemoryRequirements)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_GOOGLE_gfxstream"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkCreateImageWithRequirementsGOOGLE", "VK_GOOGLE_gfxstream");
+    }
+    AEMU_SCOPED_TRACE("vkCreateImageWithRequirementsGOOGLE");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateImageWithRequirementsGOOGLE_VkResult_return = (VkResult)0;
+    vkCreateImageWithRequirementsGOOGLE_VkResult_return = vkEnc->vkCreateImageWithRequirementsGOOGLE(device, pCreateInfo, pAllocator, pImage, pMemoryRequirements, true /* do lock */);
+    return vkCreateImageWithRequirementsGOOGLE_VkResult_return;
+}
+static VkResult entry_vkCreateBufferWithRequirementsGOOGLE(
+    VkDevice device,
+    const VkBufferCreateInfo* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkBuffer* pBuffer,
+    VkMemoryRequirements* pMemoryRequirements)
+{
+    AEMU_SCOPED_TRACE("vkCreateBufferWithRequirementsGOOGLE");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateBufferWithRequirementsGOOGLE_VkResult_return = (VkResult)0;
+    vkCreateBufferWithRequirementsGOOGLE_VkResult_return = vkEnc->vkCreateBufferWithRequirementsGOOGLE(device, pCreateInfo, pAllocator, pBuffer, pMemoryRequirements, true /* do lock */);
+    return vkCreateBufferWithRequirementsGOOGLE_VkResult_return;
+}
+static VkResult dynCheck_entry_vkCreateBufferWithRequirementsGOOGLE(
+    VkDevice device,
+    const VkBufferCreateInfo* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkBuffer* pBuffer,
+    VkMemoryRequirements* pMemoryRequirements)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_GOOGLE_gfxstream"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkCreateBufferWithRequirementsGOOGLE", "VK_GOOGLE_gfxstream");
+    }
+    AEMU_SCOPED_TRACE("vkCreateBufferWithRequirementsGOOGLE");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateBufferWithRequirementsGOOGLE_VkResult_return = (VkResult)0;
+    vkCreateBufferWithRequirementsGOOGLE_VkResult_return = vkEnc->vkCreateBufferWithRequirementsGOOGLE(device, pCreateInfo, pAllocator, pBuffer, pMemoryRequirements, true /* do lock */);
+    return vkCreateBufferWithRequirementsGOOGLE_VkResult_return;
+}
+static VkResult entry_vkGetMemoryHostAddressInfoGOOGLE(
+    VkDevice device,
+    VkDeviceMemory memory,
+    uint64_t* pAddress,
+    uint64_t* pSize,
+    uint64_t* pHostmemId)
+{
+    AEMU_SCOPED_TRACE("vkGetMemoryHostAddressInfoGOOGLE");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetMemoryHostAddressInfoGOOGLE_VkResult_return = (VkResult)0;
+    vkGetMemoryHostAddressInfoGOOGLE_VkResult_return = vkEnc->vkGetMemoryHostAddressInfoGOOGLE(device, memory, pAddress, pSize, pHostmemId, true /* do lock */);
+    return vkGetMemoryHostAddressInfoGOOGLE_VkResult_return;
+}
+static VkResult dynCheck_entry_vkGetMemoryHostAddressInfoGOOGLE(
+    VkDevice device,
+    VkDeviceMemory memory,
+    uint64_t* pAddress,
+    uint64_t* pSize,
+    uint64_t* pHostmemId)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_GOOGLE_gfxstream"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetMemoryHostAddressInfoGOOGLE", "VK_GOOGLE_gfxstream");
+    }
+    AEMU_SCOPED_TRACE("vkGetMemoryHostAddressInfoGOOGLE");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetMemoryHostAddressInfoGOOGLE_VkResult_return = (VkResult)0;
+    vkGetMemoryHostAddressInfoGOOGLE_VkResult_return = vkEnc->vkGetMemoryHostAddressInfoGOOGLE(device, memory, pAddress, pSize, pHostmemId, true /* do lock */);
+    return vkGetMemoryHostAddressInfoGOOGLE_VkResult_return;
+}
+static VkResult entry_vkFreeMemorySyncGOOGLE(
+    VkDevice device,
+    VkDeviceMemory memory,
+    const VkAllocationCallbacks* pAllocator)
+{
+    AEMU_SCOPED_TRACE("vkFreeMemorySyncGOOGLE");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkFreeMemorySyncGOOGLE_VkResult_return = (VkResult)0;
+    vkFreeMemorySyncGOOGLE_VkResult_return = vkEnc->vkFreeMemorySyncGOOGLE(device, memory, pAllocator, true /* do lock */);
+    return vkFreeMemorySyncGOOGLE_VkResult_return;
+}
+static VkResult dynCheck_entry_vkFreeMemorySyncGOOGLE(
+    VkDevice device,
+    VkDeviceMemory memory,
+    const VkAllocationCallbacks* pAllocator)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_GOOGLE_gfxstream"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkFreeMemorySyncGOOGLE", "VK_GOOGLE_gfxstream");
+    }
+    AEMU_SCOPED_TRACE("vkFreeMemorySyncGOOGLE");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkFreeMemorySyncGOOGLE_VkResult_return = (VkResult)0;
+    vkFreeMemorySyncGOOGLE_VkResult_return = vkEnc->vkFreeMemorySyncGOOGLE(device, memory, pAllocator, true /* do lock */);
+    return vkFreeMemorySyncGOOGLE_VkResult_return;
+}
+static void entry_vkQueueHostSyncGOOGLE(
+    VkQueue queue,
+    uint32_t needHostSync,
+    uint32_t sequenceNumber)
+{
+    AEMU_SCOPED_TRACE("vkQueueHostSyncGOOGLE");
+    auto vkEnc = ResourceTracker::getQueueEncoder(queue);
+    vkEnc->vkQueueHostSyncGOOGLE(queue, needHostSync, sequenceNumber, true /* do lock */);
+}
+static void entry_vkQueueSubmitAsyncGOOGLE(
+    VkQueue queue,
+    uint32_t submitCount,
+    const VkSubmitInfo* pSubmits,
+    VkFence fence)
+{
+    AEMU_SCOPED_TRACE("vkQueueSubmitAsyncGOOGLE");
+    auto vkEnc = ResourceTracker::getQueueEncoder(queue);
+    vkEnc->vkQueueSubmitAsyncGOOGLE(queue, submitCount, pSubmits, fence, true /* do lock */);
+}
+static void entry_vkQueueWaitIdleAsyncGOOGLE(
+    VkQueue queue)
+{
+    AEMU_SCOPED_TRACE("vkQueueWaitIdleAsyncGOOGLE");
+    auto vkEnc = ResourceTracker::getQueueEncoder(queue);
+    vkEnc->vkQueueWaitIdleAsyncGOOGLE(queue, true /* do lock */);
+}
+static void entry_vkQueueBindSparseAsyncGOOGLE(
+    VkQueue queue,
+    uint32_t bindInfoCount,
+    const VkBindSparseInfo* pBindInfo,
+    VkFence fence)
+{
+    AEMU_SCOPED_TRACE("vkQueueBindSparseAsyncGOOGLE");
+    auto vkEnc = ResourceTracker::getQueueEncoder(queue);
+    vkEnc->vkQueueBindSparseAsyncGOOGLE(queue, bindInfoCount, pBindInfo, fence, true /* do lock */);
+}
+static void entry_vkGetLinearImageLayoutGOOGLE(
+    VkDevice device,
+    VkFormat format,
+    VkDeviceSize* pOffset,
+    VkDeviceSize* pRowPitchAlignment)
+{
+    AEMU_SCOPED_TRACE("vkGetLinearImageLayoutGOOGLE");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetLinearImageLayoutGOOGLE(device, format, pOffset, pRowPitchAlignment, true /* do lock */);
+}
+static void dynCheck_entry_vkGetLinearImageLayoutGOOGLE(
+    VkDevice device,
+    VkFormat format,
+    VkDeviceSize* pOffset,
+    VkDeviceSize* pRowPitchAlignment)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_GOOGLE_gfxstream"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetLinearImageLayoutGOOGLE", "VK_GOOGLE_gfxstream");
+    }
+    AEMU_SCOPED_TRACE("vkGetLinearImageLayoutGOOGLE");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetLinearImageLayoutGOOGLE(device, format, pOffset, pRowPitchAlignment, true /* do lock */);
+}
+static void entry_vkQueueFlushCommandsGOOGLE(
+    VkQueue queue,
+    VkCommandBuffer commandBuffer,
+    VkDeviceSize dataSize,
+    const void* pData)
+{
+    AEMU_SCOPED_TRACE("vkQueueFlushCommandsGOOGLE");
+    auto vkEnc = ResourceTracker::getQueueEncoder(queue);
+    vkEnc->vkQueueFlushCommandsGOOGLE(queue, commandBuffer, dataSize, pData, true /* do lock */);
+}
+static void entry_vkQueueCommitDescriptorSetUpdatesGOOGLE(
+    VkQueue queue,
+    uint32_t descriptorPoolCount,
+    const VkDescriptorPool* pDescriptorPools,
+    uint32_t descriptorSetCount,
+    const VkDescriptorSetLayout* pSetLayouts,
+    const uint64_t* pDescriptorSetPoolIds,
+    const uint32_t* pDescriptorSetWhichPool,
+    const uint32_t* pDescriptorSetPendingAllocation,
+    const uint32_t* pDescriptorWriteStartingIndices,
+    uint32_t pendingDescriptorWriteCount,
+    const VkWriteDescriptorSet* pPendingDescriptorWrites)
+{
+    AEMU_SCOPED_TRACE("vkQueueCommitDescriptorSetUpdatesGOOGLE");
+    auto vkEnc = ResourceTracker::getQueueEncoder(queue);
+    vkEnc->vkQueueCommitDescriptorSetUpdatesGOOGLE(queue, descriptorPoolCount, pDescriptorPools, descriptorSetCount, pSetLayouts, pDescriptorSetPoolIds, pDescriptorSetWhichPool, pDescriptorSetPendingAllocation, pDescriptorWriteStartingIndices, pendingDescriptorWriteCount, pPendingDescriptorWrites, true /* do lock */);
+}
+static void entry_vkCollectDescriptorPoolIdsGOOGLE(
+    VkDevice device,
+    VkDescriptorPool descriptorPool,
+    uint32_t* pPoolIdCount,
+    uint64_t* pPoolIds)
+{
+    AEMU_SCOPED_TRACE("vkCollectDescriptorPoolIdsGOOGLE");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkCollectDescriptorPoolIdsGOOGLE(device, descriptorPool, pPoolIdCount, pPoolIds, true /* do lock */);
+}
+static void dynCheck_entry_vkCollectDescriptorPoolIdsGOOGLE(
+    VkDevice device,
+    VkDescriptorPool descriptorPool,
+    uint32_t* pPoolIdCount,
+    uint64_t* pPoolIds)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_GOOGLE_gfxstream"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkCollectDescriptorPoolIdsGOOGLE", "VK_GOOGLE_gfxstream");
+    }
+    AEMU_SCOPED_TRACE("vkCollectDescriptorPoolIdsGOOGLE");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkCollectDescriptorPoolIdsGOOGLE(device, descriptorPool, pPoolIdCount, pPoolIds, true /* do lock */);
+}
+#endif
+#ifdef VK_KHR_acceleration_structure
+static VkResult entry_vkCreateAccelerationStructureKHR(
+    VkDevice device,
+    const VkAccelerationStructureCreateInfoKHR* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkAccelerationStructureKHR* pAccelerationStructure)
+{
+    AEMU_SCOPED_TRACE("vkCreateAccelerationStructureKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateAccelerationStructureKHR_VkResult_return = (VkResult)0;
+    vkCreateAccelerationStructureKHR_VkResult_return = vkEnc->vkCreateAccelerationStructureKHR(device, pCreateInfo, pAllocator, pAccelerationStructure, true /* do lock */);
+    return vkCreateAccelerationStructureKHR_VkResult_return;
+}
+static VkResult dynCheck_entry_vkCreateAccelerationStructureKHR(
+    VkDevice device,
+    const VkAccelerationStructureCreateInfoKHR* pCreateInfo,
+    const VkAllocationCallbacks* pAllocator,
+    VkAccelerationStructureKHR* pAccelerationStructure)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_acceleration_structure"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkCreateAccelerationStructureKHR", "VK_KHR_acceleration_structure");
+    }
+    AEMU_SCOPED_TRACE("vkCreateAccelerationStructureKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateAccelerationStructureKHR_VkResult_return = (VkResult)0;
+    vkCreateAccelerationStructureKHR_VkResult_return = vkEnc->vkCreateAccelerationStructureKHR(device, pCreateInfo, pAllocator, pAccelerationStructure, true /* do lock */);
+    return vkCreateAccelerationStructureKHR_VkResult_return;
+}
+static void entry_vkDestroyAccelerationStructureKHR(
+    VkDevice device,
+    VkAccelerationStructureKHR accelerationStructure,
+    const VkAllocationCallbacks* pAllocator)
+{
+    AEMU_SCOPED_TRACE("vkDestroyAccelerationStructureKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkDestroyAccelerationStructureKHR(device, accelerationStructure, pAllocator, true /* do lock */);
+}
+static void dynCheck_entry_vkDestroyAccelerationStructureKHR(
+    VkDevice device,
+    VkAccelerationStructureKHR accelerationStructure,
+    const VkAllocationCallbacks* pAllocator)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_acceleration_structure"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkDestroyAccelerationStructureKHR", "VK_KHR_acceleration_structure");
+    }
+    AEMU_SCOPED_TRACE("vkDestroyAccelerationStructureKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkDestroyAccelerationStructureKHR(device, accelerationStructure, pAllocator, true /* do lock */);
+}
+static void entry_vkCmdBuildAccelerationStructuresKHR(
+    VkCommandBuffer commandBuffer,
+    uint32_t infoCount,
+    const VkAccelerationStructureBuildGeometryInfoKHR* pInfos,
+    const VkAccelerationStructureBuildRangeInfoKHR* const* ppBuildRangeInfos)
+{
+    AEMU_SCOPED_TRACE("vkCmdBuildAccelerationStructuresKHR");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdBuildAccelerationStructuresKHR(commandBuffer, infoCount, pInfos, ppBuildRangeInfos, true /* do lock */);
+}
+static void entry_vkCmdBuildAccelerationStructuresIndirectKHR(
+    VkCommandBuffer commandBuffer,
+    uint32_t infoCount,
+    const VkAccelerationStructureBuildGeometryInfoKHR* pInfos,
+    const VkDeviceAddress* pIndirectDeviceAddresses,
+    const uint32_t* pIndirectStrides,
+    const uint32_t* const* ppMaxPrimitiveCounts)
+{
+    AEMU_SCOPED_TRACE("vkCmdBuildAccelerationStructuresIndirectKHR");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdBuildAccelerationStructuresIndirectKHR(commandBuffer, infoCount, pInfos, pIndirectDeviceAddresses, pIndirectStrides, ppMaxPrimitiveCounts, true /* do lock */);
+}
+static VkResult entry_vkBuildAccelerationStructuresKHR(
+    VkDevice device,
+    VkDeferredOperationKHR deferredOperation,
+    uint32_t infoCount,
+    const VkAccelerationStructureBuildGeometryInfoKHR* pInfos,
+    const VkAccelerationStructureBuildRangeInfoKHR* const* ppBuildRangeInfos)
+{
+    AEMU_SCOPED_TRACE("vkBuildAccelerationStructuresKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkBuildAccelerationStructuresKHR_VkResult_return = (VkResult)0;
+    vkBuildAccelerationStructuresKHR_VkResult_return = vkEnc->vkBuildAccelerationStructuresKHR(device, deferredOperation, infoCount, pInfos, ppBuildRangeInfos, true /* do lock */);
+    return vkBuildAccelerationStructuresKHR_VkResult_return;
+}
+static VkResult dynCheck_entry_vkBuildAccelerationStructuresKHR(
+    VkDevice device,
+    VkDeferredOperationKHR deferredOperation,
+    uint32_t infoCount,
+    const VkAccelerationStructureBuildGeometryInfoKHR* pInfos,
+    const VkAccelerationStructureBuildRangeInfoKHR* const* ppBuildRangeInfos)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_acceleration_structure"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkBuildAccelerationStructuresKHR", "VK_KHR_acceleration_structure");
+    }
+    AEMU_SCOPED_TRACE("vkBuildAccelerationStructuresKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkBuildAccelerationStructuresKHR_VkResult_return = (VkResult)0;
+    vkBuildAccelerationStructuresKHR_VkResult_return = vkEnc->vkBuildAccelerationStructuresKHR(device, deferredOperation, infoCount, pInfos, ppBuildRangeInfos, true /* do lock */);
+    return vkBuildAccelerationStructuresKHR_VkResult_return;
+}
+static VkResult entry_vkCopyAccelerationStructureKHR(
+    VkDevice device,
+    VkDeferredOperationKHR deferredOperation,
+    const VkCopyAccelerationStructureInfoKHR* pInfo)
+{
+    AEMU_SCOPED_TRACE("vkCopyAccelerationStructureKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCopyAccelerationStructureKHR_VkResult_return = (VkResult)0;
+    vkCopyAccelerationStructureKHR_VkResult_return = vkEnc->vkCopyAccelerationStructureKHR(device, deferredOperation, pInfo, true /* do lock */);
+    return vkCopyAccelerationStructureKHR_VkResult_return;
+}
+static VkResult dynCheck_entry_vkCopyAccelerationStructureKHR(
+    VkDevice device,
+    VkDeferredOperationKHR deferredOperation,
+    const VkCopyAccelerationStructureInfoKHR* pInfo)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_acceleration_structure"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkCopyAccelerationStructureKHR", "VK_KHR_acceleration_structure");
+    }
+    AEMU_SCOPED_TRACE("vkCopyAccelerationStructureKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCopyAccelerationStructureKHR_VkResult_return = (VkResult)0;
+    vkCopyAccelerationStructureKHR_VkResult_return = vkEnc->vkCopyAccelerationStructureKHR(device, deferredOperation, pInfo, true /* do lock */);
+    return vkCopyAccelerationStructureKHR_VkResult_return;
+}
+static VkResult entry_vkCopyAccelerationStructureToMemoryKHR(
+    VkDevice device,
+    VkDeferredOperationKHR deferredOperation,
+    const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo)
+{
+    AEMU_SCOPED_TRACE("vkCopyAccelerationStructureToMemoryKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCopyAccelerationStructureToMemoryKHR_VkResult_return = (VkResult)0;
+    vkCopyAccelerationStructureToMemoryKHR_VkResult_return = vkEnc->vkCopyAccelerationStructureToMemoryKHR(device, deferredOperation, pInfo, true /* do lock */);
+    return vkCopyAccelerationStructureToMemoryKHR_VkResult_return;
+}
+static VkResult dynCheck_entry_vkCopyAccelerationStructureToMemoryKHR(
+    VkDevice device,
+    VkDeferredOperationKHR deferredOperation,
+    const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_acceleration_structure"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkCopyAccelerationStructureToMemoryKHR", "VK_KHR_acceleration_structure");
+    }
+    AEMU_SCOPED_TRACE("vkCopyAccelerationStructureToMemoryKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCopyAccelerationStructureToMemoryKHR_VkResult_return = (VkResult)0;
+    vkCopyAccelerationStructureToMemoryKHR_VkResult_return = vkEnc->vkCopyAccelerationStructureToMemoryKHR(device, deferredOperation, pInfo, true /* do lock */);
+    return vkCopyAccelerationStructureToMemoryKHR_VkResult_return;
+}
+static VkResult entry_vkCopyMemoryToAccelerationStructureKHR(
+    VkDevice device,
+    VkDeferredOperationKHR deferredOperation,
+    const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo)
+{
+    AEMU_SCOPED_TRACE("vkCopyMemoryToAccelerationStructureKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCopyMemoryToAccelerationStructureKHR_VkResult_return = (VkResult)0;
+    vkCopyMemoryToAccelerationStructureKHR_VkResult_return = vkEnc->vkCopyMemoryToAccelerationStructureKHR(device, deferredOperation, pInfo, true /* do lock */);
+    return vkCopyMemoryToAccelerationStructureKHR_VkResult_return;
+}
+static VkResult dynCheck_entry_vkCopyMemoryToAccelerationStructureKHR(
+    VkDevice device,
+    VkDeferredOperationKHR deferredOperation,
+    const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_acceleration_structure"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkCopyMemoryToAccelerationStructureKHR", "VK_KHR_acceleration_structure");
+    }
+    AEMU_SCOPED_TRACE("vkCopyMemoryToAccelerationStructureKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCopyMemoryToAccelerationStructureKHR_VkResult_return = (VkResult)0;
+    vkCopyMemoryToAccelerationStructureKHR_VkResult_return = vkEnc->vkCopyMemoryToAccelerationStructureKHR(device, deferredOperation, pInfo, true /* do lock */);
+    return vkCopyMemoryToAccelerationStructureKHR_VkResult_return;
+}
+static VkResult entry_vkWriteAccelerationStructuresPropertiesKHR(
+    VkDevice device,
+    uint32_t accelerationStructureCount,
+    const VkAccelerationStructureKHR* pAccelerationStructures,
+    VkQueryType queryType,
+    size_t dataSize,
+    void* pData,
+    size_t stride)
+{
+    AEMU_SCOPED_TRACE("vkWriteAccelerationStructuresPropertiesKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkWriteAccelerationStructuresPropertiesKHR_VkResult_return = (VkResult)0;
+    vkWriteAccelerationStructuresPropertiesKHR_VkResult_return = vkEnc->vkWriteAccelerationStructuresPropertiesKHR(device, accelerationStructureCount, pAccelerationStructures, queryType, dataSize, pData, stride, true /* do lock */);
+    return vkWriteAccelerationStructuresPropertiesKHR_VkResult_return;
+}
+static VkResult dynCheck_entry_vkWriteAccelerationStructuresPropertiesKHR(
+    VkDevice device,
+    uint32_t accelerationStructureCount,
+    const VkAccelerationStructureKHR* pAccelerationStructures,
+    VkQueryType queryType,
+    size_t dataSize,
+    void* pData,
+    size_t stride)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_acceleration_structure"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkWriteAccelerationStructuresPropertiesKHR", "VK_KHR_acceleration_structure");
+    }
+    AEMU_SCOPED_TRACE("vkWriteAccelerationStructuresPropertiesKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkWriteAccelerationStructuresPropertiesKHR_VkResult_return = (VkResult)0;
+    vkWriteAccelerationStructuresPropertiesKHR_VkResult_return = vkEnc->vkWriteAccelerationStructuresPropertiesKHR(device, accelerationStructureCount, pAccelerationStructures, queryType, dataSize, pData, stride, true /* do lock */);
+    return vkWriteAccelerationStructuresPropertiesKHR_VkResult_return;
+}
+static void entry_vkCmdCopyAccelerationStructureKHR(
+    VkCommandBuffer commandBuffer,
+    const VkCopyAccelerationStructureInfoKHR* pInfo)
+{
+    AEMU_SCOPED_TRACE("vkCmdCopyAccelerationStructureKHR");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdCopyAccelerationStructureKHR(commandBuffer, pInfo, true /* do lock */);
+}
+static void entry_vkCmdCopyAccelerationStructureToMemoryKHR(
+    VkCommandBuffer commandBuffer,
+    const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo)
+{
+    AEMU_SCOPED_TRACE("vkCmdCopyAccelerationStructureToMemoryKHR");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdCopyAccelerationStructureToMemoryKHR(commandBuffer, pInfo, true /* do lock */);
+}
+static void entry_vkCmdCopyMemoryToAccelerationStructureKHR(
+    VkCommandBuffer commandBuffer,
+    const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo)
+{
+    AEMU_SCOPED_TRACE("vkCmdCopyMemoryToAccelerationStructureKHR");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdCopyMemoryToAccelerationStructureKHR(commandBuffer, pInfo, true /* do lock */);
+}
+static VkDeviceAddress entry_vkGetAccelerationStructureDeviceAddressKHR(
+    VkDevice device,
+    const VkAccelerationStructureDeviceAddressInfoKHR* pInfo)
+{
+    AEMU_SCOPED_TRACE("vkGetAccelerationStructureDeviceAddressKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkDeviceAddress vkGetAccelerationStructureDeviceAddressKHR_VkDeviceAddress_return = (VkDeviceAddress)0;
+    vkGetAccelerationStructureDeviceAddressKHR_VkDeviceAddress_return = vkEnc->vkGetAccelerationStructureDeviceAddressKHR(device, pInfo, true /* do lock */);
+    return vkGetAccelerationStructureDeviceAddressKHR_VkDeviceAddress_return;
+}
+static VkDeviceAddress dynCheck_entry_vkGetAccelerationStructureDeviceAddressKHR(
+    VkDevice device,
+    const VkAccelerationStructureDeviceAddressInfoKHR* pInfo)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_acceleration_structure"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetAccelerationStructureDeviceAddressKHR", "VK_KHR_acceleration_structure");
+    }
+    AEMU_SCOPED_TRACE("vkGetAccelerationStructureDeviceAddressKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkDeviceAddress vkGetAccelerationStructureDeviceAddressKHR_VkDeviceAddress_return = (VkDeviceAddress)0;
+    vkGetAccelerationStructureDeviceAddressKHR_VkDeviceAddress_return = vkEnc->vkGetAccelerationStructureDeviceAddressKHR(device, pInfo, true /* do lock */);
+    return vkGetAccelerationStructureDeviceAddressKHR_VkDeviceAddress_return;
+}
+static void entry_vkCmdWriteAccelerationStructuresPropertiesKHR(
+    VkCommandBuffer commandBuffer,
+    uint32_t accelerationStructureCount,
+    const VkAccelerationStructureKHR* pAccelerationStructures,
+    VkQueryType queryType,
+    VkQueryPool queryPool,
+    uint32_t firstQuery)
+{
+    AEMU_SCOPED_TRACE("vkCmdWriteAccelerationStructuresPropertiesKHR");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdWriteAccelerationStructuresPropertiesKHR(commandBuffer, accelerationStructureCount, pAccelerationStructures, queryType, queryPool, firstQuery, true /* do lock */);
+}
+static void entry_vkGetDeviceAccelerationStructureCompatibilityKHR(
+    VkDevice device,
+    const VkAccelerationStructureVersionInfoKHR* pVersionInfo,
+    VkAccelerationStructureCompatibilityKHR* pCompatibility)
+{
+    AEMU_SCOPED_TRACE("vkGetDeviceAccelerationStructureCompatibilityKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetDeviceAccelerationStructureCompatibilityKHR(device, pVersionInfo, pCompatibility, true /* do lock */);
+}
+static void dynCheck_entry_vkGetDeviceAccelerationStructureCompatibilityKHR(
+    VkDevice device,
+    const VkAccelerationStructureVersionInfoKHR* pVersionInfo,
+    VkAccelerationStructureCompatibilityKHR* pCompatibility)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_acceleration_structure"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetDeviceAccelerationStructureCompatibilityKHR", "VK_KHR_acceleration_structure");
+    }
+    AEMU_SCOPED_TRACE("vkGetDeviceAccelerationStructureCompatibilityKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetDeviceAccelerationStructureCompatibilityKHR(device, pVersionInfo, pCompatibility, true /* do lock */);
+}
+static void entry_vkGetAccelerationStructureBuildSizesKHR(
+    VkDevice device,
+    VkAccelerationStructureBuildTypeKHR buildType,
+    const VkAccelerationStructureBuildGeometryInfoKHR* pBuildInfo,
+    const uint32_t* pMaxPrimitiveCounts,
+    VkAccelerationStructureBuildSizesInfoKHR* pSizeInfo)
+{
+    AEMU_SCOPED_TRACE("vkGetAccelerationStructureBuildSizesKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetAccelerationStructureBuildSizesKHR(device, buildType, pBuildInfo, pMaxPrimitiveCounts, pSizeInfo, true /* do lock */);
+}
+static void dynCheck_entry_vkGetAccelerationStructureBuildSizesKHR(
+    VkDevice device,
+    VkAccelerationStructureBuildTypeKHR buildType,
+    const VkAccelerationStructureBuildGeometryInfoKHR* pBuildInfo,
+    const uint32_t* pMaxPrimitiveCounts,
+    VkAccelerationStructureBuildSizesInfoKHR* pSizeInfo)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_acceleration_structure"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetAccelerationStructureBuildSizesKHR", "VK_KHR_acceleration_structure");
+    }
+    AEMU_SCOPED_TRACE("vkGetAccelerationStructureBuildSizesKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    vkEnc->vkGetAccelerationStructureBuildSizesKHR(device, buildType, pBuildInfo, pMaxPrimitiveCounts, pSizeInfo, true /* do lock */);
+}
+#endif
+#ifdef VK_KHR_ray_tracing_pipeline
+static void entry_vkCmdTraceRaysKHR(
+    VkCommandBuffer commandBuffer,
+    const VkStridedDeviceAddressRegionKHR* pRaygenShaderBindingTable,
+    const VkStridedDeviceAddressRegionKHR* pMissShaderBindingTable,
+    const VkStridedDeviceAddressRegionKHR* pHitShaderBindingTable,
+    const VkStridedDeviceAddressRegionKHR* pCallableShaderBindingTable,
+    uint32_t width,
+    uint32_t height,
+    uint32_t depth)
+{
+    AEMU_SCOPED_TRACE("vkCmdTraceRaysKHR");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdTraceRaysKHR(commandBuffer, pRaygenShaderBindingTable, pMissShaderBindingTable, pHitShaderBindingTable, pCallableShaderBindingTable, width, height, depth, true /* do lock */);
+}
+static VkResult entry_vkCreateRayTracingPipelinesKHR(
+    VkDevice device,
+    VkDeferredOperationKHR deferredOperation,
+    VkPipelineCache pipelineCache,
+    uint32_t createInfoCount,
+    const VkRayTracingPipelineCreateInfoKHR* pCreateInfos,
+    const VkAllocationCallbacks* pAllocator,
+    VkPipeline* pPipelines)
+{
+    AEMU_SCOPED_TRACE("vkCreateRayTracingPipelinesKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateRayTracingPipelinesKHR_VkResult_return = (VkResult)0;
+    vkCreateRayTracingPipelinesKHR_VkResult_return = vkEnc->vkCreateRayTracingPipelinesKHR(device, deferredOperation, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines, true /* do lock */);
+    return vkCreateRayTracingPipelinesKHR_VkResult_return;
+}
+static VkResult dynCheck_entry_vkCreateRayTracingPipelinesKHR(
+    VkDevice device,
+    VkDeferredOperationKHR deferredOperation,
+    VkPipelineCache pipelineCache,
+    uint32_t createInfoCount,
+    const VkRayTracingPipelineCreateInfoKHR* pCreateInfos,
+    const VkAllocationCallbacks* pAllocator,
+    VkPipeline* pPipelines)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_ray_tracing_pipeline"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkCreateRayTracingPipelinesKHR", "VK_KHR_ray_tracing_pipeline");
+    }
+    AEMU_SCOPED_TRACE("vkCreateRayTracingPipelinesKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkCreateRayTracingPipelinesKHR_VkResult_return = (VkResult)0;
+    vkCreateRayTracingPipelinesKHR_VkResult_return = vkEnc->vkCreateRayTracingPipelinesKHR(device, deferredOperation, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines, true /* do lock */);
+    return vkCreateRayTracingPipelinesKHR_VkResult_return;
+}
+static VkResult entry_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR(
+    VkDevice device,
+    VkPipeline pipeline,
+    uint32_t firstGroup,
+    uint32_t groupCount,
+    size_t dataSize,
+    void* pData)
+{
+    AEMU_SCOPED_TRACE("vkGetRayTracingCaptureReplayShaderGroupHandlesKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetRayTracingCaptureReplayShaderGroupHandlesKHR_VkResult_return = (VkResult)0;
+    vkGetRayTracingCaptureReplayShaderGroupHandlesKHR_VkResult_return = vkEnc->vkGetRayTracingCaptureReplayShaderGroupHandlesKHR(device, pipeline, firstGroup, groupCount, dataSize, pData, true /* do lock */);
+    return vkGetRayTracingCaptureReplayShaderGroupHandlesKHR_VkResult_return;
+}
+static VkResult dynCheck_entry_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR(
+    VkDevice device,
+    VkPipeline pipeline,
+    uint32_t firstGroup,
+    uint32_t groupCount,
+    size_t dataSize,
+    void* pData)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_ray_tracing_pipeline"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetRayTracingCaptureReplayShaderGroupHandlesKHR", "VK_KHR_ray_tracing_pipeline");
+    }
+    AEMU_SCOPED_TRACE("vkGetRayTracingCaptureReplayShaderGroupHandlesKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkResult vkGetRayTracingCaptureReplayShaderGroupHandlesKHR_VkResult_return = (VkResult)0;
+    vkGetRayTracingCaptureReplayShaderGroupHandlesKHR_VkResult_return = vkEnc->vkGetRayTracingCaptureReplayShaderGroupHandlesKHR(device, pipeline, firstGroup, groupCount, dataSize, pData, true /* do lock */);
+    return vkGetRayTracingCaptureReplayShaderGroupHandlesKHR_VkResult_return;
+}
+static void entry_vkCmdTraceRaysIndirectKHR(
+    VkCommandBuffer commandBuffer,
+    const VkStridedDeviceAddressRegionKHR* pRaygenShaderBindingTable,
+    const VkStridedDeviceAddressRegionKHR* pMissShaderBindingTable,
+    const VkStridedDeviceAddressRegionKHR* pHitShaderBindingTable,
+    const VkStridedDeviceAddressRegionKHR* pCallableShaderBindingTable,
+    VkDeviceAddress indirectDeviceAddress)
+{
+    AEMU_SCOPED_TRACE("vkCmdTraceRaysIndirectKHR");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdTraceRaysIndirectKHR(commandBuffer, pRaygenShaderBindingTable, pMissShaderBindingTable, pHitShaderBindingTable, pCallableShaderBindingTable, indirectDeviceAddress, true /* do lock */);
+}
+static VkDeviceSize entry_vkGetRayTracingShaderGroupStackSizeKHR(
+    VkDevice device,
+    VkPipeline pipeline,
+    uint32_t group,
+    VkShaderGroupShaderKHR groupShader)
+{
+    AEMU_SCOPED_TRACE("vkGetRayTracingShaderGroupStackSizeKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkDeviceSize vkGetRayTracingShaderGroupStackSizeKHR_VkDeviceSize_return = (VkDeviceSize)0;
+    vkGetRayTracingShaderGroupStackSizeKHR_VkDeviceSize_return = vkEnc->vkGetRayTracingShaderGroupStackSizeKHR(device, pipeline, group, groupShader, true /* do lock */);
+    return vkGetRayTracingShaderGroupStackSizeKHR_VkDeviceSize_return;
+}
+static VkDeviceSize dynCheck_entry_vkGetRayTracingShaderGroupStackSizeKHR(
+    VkDevice device,
+    VkPipeline pipeline,
+    uint32_t group,
+    VkShaderGroupShaderKHR groupShader)
+{
+    auto resources = ResourceTracker::get();
+    if (!resources->hasDeviceExtension(device, "VK_KHR_ray_tracing_pipeline"))
+    {
+        sOnInvalidDynamicallyCheckedCall("vkGetRayTracingShaderGroupStackSizeKHR", "VK_KHR_ray_tracing_pipeline");
+    }
+    AEMU_SCOPED_TRACE("vkGetRayTracingShaderGroupStackSizeKHR");
+    auto vkEnc = ResourceTracker::getThreadLocalEncoder();
+    VkDeviceSize vkGetRayTracingShaderGroupStackSizeKHR_VkDeviceSize_return = (VkDeviceSize)0;
+    vkGetRayTracingShaderGroupStackSizeKHR_VkDeviceSize_return = vkEnc->vkGetRayTracingShaderGroupStackSizeKHR(device, pipeline, group, groupShader, true /* do lock */);
+    return vkGetRayTracingShaderGroupStackSizeKHR_VkDeviceSize_return;
+}
+static void entry_vkCmdSetRayTracingPipelineStackSizeKHR(
+    VkCommandBuffer commandBuffer,
+    uint32_t pipelineStackSize)
+{
+    AEMU_SCOPED_TRACE("vkCmdSetRayTracingPipelineStackSizeKHR");
+    auto vkEnc = ResourceTracker::getCommandBufferEncoder(commandBuffer);
+    vkEnc->vkCmdSetRayTracingPipelineStackSizeKHR(commandBuffer, pipelineStackSize, true /* do lock */);
+}
+#endif
+#ifdef VK_KHR_ray_query
+#endif
+void* goldfish_vulkan_get_proc_address(const char* name){
+#ifdef VK_VERSION_1_0
+    if (!strcmp(name, "vkCreateInstance"))
+    {
+        return (void*)entry_vkCreateInstance;
+    }
+    if (!strcmp(name, "vkDestroyInstance"))
+    {
+        return (void*)entry_vkDestroyInstance;
+    }
+    if (!strcmp(name, "vkEnumeratePhysicalDevices"))
+    {
+        return (void*)entry_vkEnumeratePhysicalDevices;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceFeatures"))
+    {
+        return (void*)entry_vkGetPhysicalDeviceFeatures;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceFormatProperties"))
+    {
+        return (void*)entry_vkGetPhysicalDeviceFormatProperties;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceImageFormatProperties"))
+    {
+        return (void*)entry_vkGetPhysicalDeviceImageFormatProperties;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceProperties"))
+    {
+        return (void*)entry_vkGetPhysicalDeviceProperties;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceQueueFamilyProperties"))
+    {
+        return (void*)entry_vkGetPhysicalDeviceQueueFamilyProperties;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceMemoryProperties"))
+    {
+        return (void*)entry_vkGetPhysicalDeviceMemoryProperties;
+    }
+    if (!strcmp(name, "vkGetInstanceProcAddr"))
+    {
+        return (void*)entry_vkGetInstanceProcAddr;
+    }
+    if (!strcmp(name, "vkGetDeviceProcAddr"))
+    {
+        return (void*)entry_vkGetDeviceProcAddr;
+    }
+    if (!strcmp(name, "vkCreateDevice"))
+    {
+        return (void*)entry_vkCreateDevice;
+    }
+    if (!strcmp(name, "vkDestroyDevice"))
+    {
+        return (void*)entry_vkDestroyDevice;
+    }
+    if (!strcmp(name, "vkEnumerateInstanceExtensionProperties"))
+    {
+        return (void*)entry_vkEnumerateInstanceExtensionProperties;
+    }
+    if (!strcmp(name, "vkEnumerateDeviceExtensionProperties"))
+    {
+        return (void*)entry_vkEnumerateDeviceExtensionProperties;
+    }
+    if (!strcmp(name, "vkEnumerateInstanceLayerProperties"))
+    {
+        return (void*)entry_vkEnumerateInstanceLayerProperties;
+    }
+    if (!strcmp(name, "vkEnumerateDeviceLayerProperties"))
+    {
+        return (void*)entry_vkEnumerateDeviceLayerProperties;
+    }
+    if (!strcmp(name, "vkGetDeviceQueue"))
+    {
+        return (void*)entry_vkGetDeviceQueue;
+    }
+    if (!strcmp(name, "vkQueueSubmit"))
+    {
+        return (void*)entry_vkQueueSubmit;
+    }
+    if (!strcmp(name, "vkQueueWaitIdle"))
+    {
+        return (void*)entry_vkQueueWaitIdle;
+    }
+    if (!strcmp(name, "vkDeviceWaitIdle"))
+    {
+        return (void*)entry_vkDeviceWaitIdle;
+    }
+    if (!strcmp(name, "vkAllocateMemory"))
+    {
+        return (void*)entry_vkAllocateMemory;
+    }
+    if (!strcmp(name, "vkFreeMemory"))
+    {
+        return (void*)entry_vkFreeMemory;
+    }
+    if (!strcmp(name, "vkMapMemory"))
+    {
+        return (void*)entry_vkMapMemory;
+    }
+    if (!strcmp(name, "vkUnmapMemory"))
+    {
+        return (void*)entry_vkUnmapMemory;
+    }
+    if (!strcmp(name, "vkFlushMappedMemoryRanges"))
+    {
+        return (void*)entry_vkFlushMappedMemoryRanges;
+    }
+    if (!strcmp(name, "vkInvalidateMappedMemoryRanges"))
+    {
+        return (void*)entry_vkInvalidateMappedMemoryRanges;
+    }
+    if (!strcmp(name, "vkGetDeviceMemoryCommitment"))
+    {
+        return (void*)entry_vkGetDeviceMemoryCommitment;
+    }
+    if (!strcmp(name, "vkBindBufferMemory"))
+    {
+        return (void*)entry_vkBindBufferMemory;
+    }
+    if (!strcmp(name, "vkBindImageMemory"))
+    {
+        return (void*)entry_vkBindImageMemory;
+    }
+    if (!strcmp(name, "vkGetBufferMemoryRequirements"))
+    {
+        return (void*)entry_vkGetBufferMemoryRequirements;
+    }
+    if (!strcmp(name, "vkGetImageMemoryRequirements"))
+    {
+        return (void*)entry_vkGetImageMemoryRequirements;
+    }
+    if (!strcmp(name, "vkGetImageSparseMemoryRequirements"))
+    {
+        return (void*)entry_vkGetImageSparseMemoryRequirements;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceSparseImageFormatProperties"))
+    {
+        return (void*)entry_vkGetPhysicalDeviceSparseImageFormatProperties;
+    }
+    if (!strcmp(name, "vkQueueBindSparse"))
+    {
+        return (void*)entry_vkQueueBindSparse;
+    }
+    if (!strcmp(name, "vkCreateFence"))
+    {
+        return (void*)entry_vkCreateFence;
+    }
+    if (!strcmp(name, "vkDestroyFence"))
+    {
+        return (void*)entry_vkDestroyFence;
+    }
+    if (!strcmp(name, "vkResetFences"))
+    {
+        return (void*)entry_vkResetFences;
+    }
+    if (!strcmp(name, "vkGetFenceStatus"))
+    {
+        return (void*)entry_vkGetFenceStatus;
+    }
+    if (!strcmp(name, "vkWaitForFences"))
+    {
+        return (void*)entry_vkWaitForFences;
+    }
+    if (!strcmp(name, "vkCreateSemaphore"))
+    {
+        return (void*)entry_vkCreateSemaphore;
+    }
+    if (!strcmp(name, "vkDestroySemaphore"))
+    {
+        return (void*)entry_vkDestroySemaphore;
+    }
+    if (!strcmp(name, "vkCreateEvent"))
+    {
+        return (void*)entry_vkCreateEvent;
+    }
+    if (!strcmp(name, "vkDestroyEvent"))
+    {
+        return (void*)entry_vkDestroyEvent;
+    }
+    if (!strcmp(name, "vkGetEventStatus"))
+    {
+        return (void*)entry_vkGetEventStatus;
+    }
+    if (!strcmp(name, "vkSetEvent"))
+    {
+        return (void*)entry_vkSetEvent;
+    }
+    if (!strcmp(name, "vkResetEvent"))
+    {
+        return (void*)entry_vkResetEvent;
+    }
+    if (!strcmp(name, "vkCreateQueryPool"))
+    {
+        return (void*)entry_vkCreateQueryPool;
+    }
+    if (!strcmp(name, "vkDestroyQueryPool"))
+    {
+        return (void*)entry_vkDestroyQueryPool;
+    }
+    if (!strcmp(name, "vkGetQueryPoolResults"))
+    {
+        return (void*)entry_vkGetQueryPoolResults;
+    }
+    if (!strcmp(name, "vkCreateBuffer"))
+    {
+        return (void*)entry_vkCreateBuffer;
+    }
+    if (!strcmp(name, "vkDestroyBuffer"))
+    {
+        return (void*)entry_vkDestroyBuffer;
+    }
+    if (!strcmp(name, "vkCreateBufferView"))
+    {
+        return (void*)entry_vkCreateBufferView;
+    }
+    if (!strcmp(name, "vkDestroyBufferView"))
+    {
+        return (void*)entry_vkDestroyBufferView;
+    }
+    if (!strcmp(name, "vkCreateImage"))
+    {
+        return (void*)entry_vkCreateImage;
+    }
+    if (!strcmp(name, "vkDestroyImage"))
+    {
+        return (void*)entry_vkDestroyImage;
+    }
+    if (!strcmp(name, "vkGetImageSubresourceLayout"))
+    {
+        return (void*)entry_vkGetImageSubresourceLayout;
+    }
+    if (!strcmp(name, "vkCreateImageView"))
+    {
+        return (void*)entry_vkCreateImageView;
+    }
+    if (!strcmp(name, "vkDestroyImageView"))
+    {
+        return (void*)entry_vkDestroyImageView;
+    }
+    if (!strcmp(name, "vkCreateShaderModule"))
+    {
+        return (void*)entry_vkCreateShaderModule;
+    }
+    if (!strcmp(name, "vkDestroyShaderModule"))
+    {
+        return (void*)entry_vkDestroyShaderModule;
+    }
+    if (!strcmp(name, "vkCreatePipelineCache"))
+    {
+        return (void*)entry_vkCreatePipelineCache;
+    }
+    if (!strcmp(name, "vkDestroyPipelineCache"))
+    {
+        return (void*)entry_vkDestroyPipelineCache;
+    }
+    if (!strcmp(name, "vkGetPipelineCacheData"))
+    {
+        return (void*)entry_vkGetPipelineCacheData;
+    }
+    if (!strcmp(name, "vkMergePipelineCaches"))
+    {
+        return (void*)entry_vkMergePipelineCaches;
+    }
+    if (!strcmp(name, "vkCreateGraphicsPipelines"))
+    {
+        return (void*)entry_vkCreateGraphicsPipelines;
+    }
+    if (!strcmp(name, "vkCreateComputePipelines"))
+    {
+        return (void*)entry_vkCreateComputePipelines;
+    }
+    if (!strcmp(name, "vkDestroyPipeline"))
+    {
+        return (void*)entry_vkDestroyPipeline;
+    }
+    if (!strcmp(name, "vkCreatePipelineLayout"))
+    {
+        return (void*)entry_vkCreatePipelineLayout;
+    }
+    if (!strcmp(name, "vkDestroyPipelineLayout"))
+    {
+        return (void*)entry_vkDestroyPipelineLayout;
+    }
+    if (!strcmp(name, "vkCreateSampler"))
+    {
+        return (void*)entry_vkCreateSampler;
+    }
+    if (!strcmp(name, "vkDestroySampler"))
+    {
+        return (void*)entry_vkDestroySampler;
+    }
+    if (!strcmp(name, "vkCreateDescriptorSetLayout"))
+    {
+        return (void*)entry_vkCreateDescriptorSetLayout;
+    }
+    if (!strcmp(name, "vkDestroyDescriptorSetLayout"))
+    {
+        return (void*)entry_vkDestroyDescriptorSetLayout;
+    }
+    if (!strcmp(name, "vkCreateDescriptorPool"))
+    {
+        return (void*)entry_vkCreateDescriptorPool;
+    }
+    if (!strcmp(name, "vkDestroyDescriptorPool"))
+    {
+        return (void*)entry_vkDestroyDescriptorPool;
+    }
+    if (!strcmp(name, "vkResetDescriptorPool"))
+    {
+        return (void*)entry_vkResetDescriptorPool;
+    }
+    if (!strcmp(name, "vkAllocateDescriptorSets"))
+    {
+        return (void*)entry_vkAllocateDescriptorSets;
+    }
+    if (!strcmp(name, "vkFreeDescriptorSets"))
+    {
+        return (void*)entry_vkFreeDescriptorSets;
+    }
+    if (!strcmp(name, "vkUpdateDescriptorSets"))
+    {
+        return (void*)entry_vkUpdateDescriptorSets;
+    }
+    if (!strcmp(name, "vkCreateFramebuffer"))
+    {
+        return (void*)entry_vkCreateFramebuffer;
+    }
+    if (!strcmp(name, "vkDestroyFramebuffer"))
+    {
+        return (void*)entry_vkDestroyFramebuffer;
+    }
+    if (!strcmp(name, "vkCreateRenderPass"))
+    {
+        return (void*)entry_vkCreateRenderPass;
+    }
+    if (!strcmp(name, "vkDestroyRenderPass"))
+    {
+        return (void*)entry_vkDestroyRenderPass;
+    }
+    if (!strcmp(name, "vkGetRenderAreaGranularity"))
+    {
+        return (void*)entry_vkGetRenderAreaGranularity;
+    }
+    if (!strcmp(name, "vkCreateCommandPool"))
+    {
+        return (void*)entry_vkCreateCommandPool;
+    }
+    if (!strcmp(name, "vkDestroyCommandPool"))
+    {
+        return (void*)entry_vkDestroyCommandPool;
+    }
+    if (!strcmp(name, "vkResetCommandPool"))
+    {
+        return (void*)entry_vkResetCommandPool;
+    }
+    if (!strcmp(name, "vkAllocateCommandBuffers"))
+    {
+        return (void*)entry_vkAllocateCommandBuffers;
+    }
+    if (!strcmp(name, "vkFreeCommandBuffers"))
+    {
+        return (void*)entry_vkFreeCommandBuffers;
+    }
+    if (!strcmp(name, "vkBeginCommandBuffer"))
+    {
+        return (void*)entry_vkBeginCommandBuffer;
+    }
+    if (!strcmp(name, "vkEndCommandBuffer"))
+    {
+        return (void*)entry_vkEndCommandBuffer;
+    }
+    if (!strcmp(name, "vkResetCommandBuffer"))
+    {
+        return (void*)entry_vkResetCommandBuffer;
+    }
+    if (!strcmp(name, "vkCmdBindPipeline"))
+    {
+        return (void*)entry_vkCmdBindPipeline;
+    }
+    if (!strcmp(name, "vkCmdSetViewport"))
+    {
+        return (void*)entry_vkCmdSetViewport;
+    }
+    if (!strcmp(name, "vkCmdSetScissor"))
+    {
+        return (void*)entry_vkCmdSetScissor;
+    }
+    if (!strcmp(name, "vkCmdSetLineWidth"))
+    {
+        return (void*)entry_vkCmdSetLineWidth;
+    }
+    if (!strcmp(name, "vkCmdSetDepthBias"))
+    {
+        return (void*)entry_vkCmdSetDepthBias;
+    }
+    if (!strcmp(name, "vkCmdSetBlendConstants"))
+    {
+        return (void*)entry_vkCmdSetBlendConstants;
+    }
+    if (!strcmp(name, "vkCmdSetDepthBounds"))
+    {
+        return (void*)entry_vkCmdSetDepthBounds;
+    }
+    if (!strcmp(name, "vkCmdSetStencilCompareMask"))
+    {
+        return (void*)entry_vkCmdSetStencilCompareMask;
+    }
+    if (!strcmp(name, "vkCmdSetStencilWriteMask"))
+    {
+        return (void*)entry_vkCmdSetStencilWriteMask;
+    }
+    if (!strcmp(name, "vkCmdSetStencilReference"))
+    {
+        return (void*)entry_vkCmdSetStencilReference;
+    }
+    if (!strcmp(name, "vkCmdBindDescriptorSets"))
+    {
+        return (void*)entry_vkCmdBindDescriptorSets;
+    }
+    if (!strcmp(name, "vkCmdBindIndexBuffer"))
+    {
+        return (void*)entry_vkCmdBindIndexBuffer;
+    }
+    if (!strcmp(name, "vkCmdBindVertexBuffers"))
+    {
+        return (void*)entry_vkCmdBindVertexBuffers;
+    }
+    if (!strcmp(name, "vkCmdDraw"))
+    {
+        return (void*)entry_vkCmdDraw;
+    }
+    if (!strcmp(name, "vkCmdDrawIndexed"))
+    {
+        return (void*)entry_vkCmdDrawIndexed;
+    }
+    if (!strcmp(name, "vkCmdDrawIndirect"))
+    {
+        return (void*)entry_vkCmdDrawIndirect;
+    }
+    if (!strcmp(name, "vkCmdDrawIndexedIndirect"))
+    {
+        return (void*)entry_vkCmdDrawIndexedIndirect;
+    }
+    if (!strcmp(name, "vkCmdDispatch"))
+    {
+        return (void*)entry_vkCmdDispatch;
+    }
+    if (!strcmp(name, "vkCmdDispatchIndirect"))
+    {
+        return (void*)entry_vkCmdDispatchIndirect;
+    }
+    if (!strcmp(name, "vkCmdCopyBuffer"))
+    {
+        return (void*)entry_vkCmdCopyBuffer;
+    }
+    if (!strcmp(name, "vkCmdCopyImage"))
+    {
+        return (void*)entry_vkCmdCopyImage;
+    }
+    if (!strcmp(name, "vkCmdBlitImage"))
+    {
+        return (void*)entry_vkCmdBlitImage;
+    }
+    if (!strcmp(name, "vkCmdCopyBufferToImage"))
+    {
+        return (void*)entry_vkCmdCopyBufferToImage;
+    }
+    if (!strcmp(name, "vkCmdCopyImageToBuffer"))
+    {
+        return (void*)entry_vkCmdCopyImageToBuffer;
+    }
+    if (!strcmp(name, "vkCmdUpdateBuffer"))
+    {
+        return (void*)entry_vkCmdUpdateBuffer;
+    }
+    if (!strcmp(name, "vkCmdFillBuffer"))
+    {
+        return (void*)entry_vkCmdFillBuffer;
+    }
+    if (!strcmp(name, "vkCmdClearColorImage"))
+    {
+        return (void*)entry_vkCmdClearColorImage;
+    }
+    if (!strcmp(name, "vkCmdClearDepthStencilImage"))
+    {
+        return (void*)entry_vkCmdClearDepthStencilImage;
+    }
+    if (!strcmp(name, "vkCmdClearAttachments"))
+    {
+        return (void*)entry_vkCmdClearAttachments;
+    }
+    if (!strcmp(name, "vkCmdResolveImage"))
+    {
+        return (void*)entry_vkCmdResolveImage;
+    }
+    if (!strcmp(name, "vkCmdSetEvent"))
+    {
+        return (void*)entry_vkCmdSetEvent;
+    }
+    if (!strcmp(name, "vkCmdResetEvent"))
+    {
+        return (void*)entry_vkCmdResetEvent;
+    }
+    if (!strcmp(name, "vkCmdWaitEvents"))
+    {
+        return (void*)entry_vkCmdWaitEvents;
+    }
+    if (!strcmp(name, "vkCmdPipelineBarrier"))
+    {
+        return (void*)entry_vkCmdPipelineBarrier;
+    }
+    if (!strcmp(name, "vkCmdBeginQuery"))
+    {
+        return (void*)entry_vkCmdBeginQuery;
+    }
+    if (!strcmp(name, "vkCmdEndQuery"))
+    {
+        return (void*)entry_vkCmdEndQuery;
+    }
+    if (!strcmp(name, "vkCmdResetQueryPool"))
+    {
+        return (void*)entry_vkCmdResetQueryPool;
+    }
+    if (!strcmp(name, "vkCmdWriteTimestamp"))
+    {
+        return (void*)entry_vkCmdWriteTimestamp;
+    }
+    if (!strcmp(name, "vkCmdCopyQueryPoolResults"))
+    {
+        return (void*)entry_vkCmdCopyQueryPoolResults;
+    }
+    if (!strcmp(name, "vkCmdPushConstants"))
+    {
+        return (void*)entry_vkCmdPushConstants;
+    }
+    if (!strcmp(name, "vkCmdBeginRenderPass"))
+    {
+        return (void*)entry_vkCmdBeginRenderPass;
+    }
+    if (!strcmp(name, "vkCmdNextSubpass"))
+    {
+        return (void*)entry_vkCmdNextSubpass;
+    }
+    if (!strcmp(name, "vkCmdEndRenderPass"))
+    {
+        return (void*)entry_vkCmdEndRenderPass;
+    }
+    if (!strcmp(name, "vkCmdExecuteCommands"))
+    {
+        return (void*)entry_vkCmdExecuteCommands;
+    }
+#endif
+#ifdef VK_VERSION_1_1
+    if (!strcmp(name, "vkEnumerateInstanceVersion"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkBindBufferMemory2"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkBindImageMemory2"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetDeviceGroupPeerMemoryFeatures"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdSetDeviceMask"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdDispatchBase"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkEnumeratePhysicalDeviceGroups"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetImageMemoryRequirements2"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetBufferMemoryRequirements2"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetImageSparseMemoryRequirements2"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceFeatures2"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceProperties2"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceFormatProperties2"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceImageFormatProperties2"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceQueueFamilyProperties2"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceMemoryProperties2"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceSparseImageFormatProperties2"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkTrimCommandPool"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetDeviceQueue2"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCreateSamplerYcbcrConversion"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkDestroySamplerYcbcrConversion"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCreateDescriptorUpdateTemplate"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkDestroyDescriptorUpdateTemplate"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkUpdateDescriptorSetWithTemplate"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceExternalBufferProperties"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceExternalFenceProperties"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceExternalSemaphoreProperties"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetDescriptorSetLayoutSupport"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_VERSION_1_2
+    if (!strcmp(name, "vkCmdDrawIndirectCount"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdDrawIndexedIndirectCount"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCreateRenderPass2"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdBeginRenderPass2"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdNextSubpass2"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdEndRenderPass2"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkResetQueryPool"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetSemaphoreCounterValue"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkWaitSemaphores"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkSignalSemaphore"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetBufferDeviceAddress"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetBufferOpaqueCaptureAddress"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetDeviceMemoryOpaqueCaptureAddress"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_KHR_surface
+    if (!strcmp(name, "vkDestroySurfaceKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceSurfaceSupportKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceSurfaceCapabilitiesKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceSurfaceFormatsKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceSurfacePresentModesKHR"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_KHR_swapchain
+    if (!strcmp(name, "vkCreateSwapchainKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkDestroySwapchainKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetSwapchainImagesKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkAcquireNextImageKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkQueuePresentKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetDeviceGroupPresentCapabilitiesKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetDeviceGroupSurfacePresentModesKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDevicePresentRectanglesKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkAcquireNextImage2KHR"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_KHR_display
+    if (!strcmp(name, "vkGetPhysicalDeviceDisplayPropertiesKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceDisplayPlanePropertiesKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetDisplayPlaneSupportedDisplaysKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetDisplayModePropertiesKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCreateDisplayModeKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetDisplayPlaneCapabilitiesKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCreateDisplayPlaneSurfaceKHR"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_KHR_display_swapchain
+    if (!strcmp(name, "vkCreateSharedSwapchainsKHR"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_KHR_xlib_surface
+    if (!strcmp(name, "vkCreateXlibSurfaceKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceXlibPresentationSupportKHR"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_KHR_xcb_surface
+    if (!strcmp(name, "vkCreateXcbSurfaceKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceXcbPresentationSupportKHR"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_KHR_wayland_surface
+    if (!strcmp(name, "vkCreateWaylandSurfaceKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceWaylandPresentationSupportKHR"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_KHR_android_surface
+    if (!strcmp(name, "vkCreateAndroidSurfaceKHR"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_KHR_win32_surface
+    if (!strcmp(name, "vkCreateWin32SurfaceKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceWin32PresentationSupportKHR"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_KHR_get_physical_device_properties2
+    if (!strcmp(name, "vkGetPhysicalDeviceFeatures2KHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceProperties2KHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceFormatProperties2KHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceImageFormatProperties2KHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceQueueFamilyProperties2KHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceMemoryProperties2KHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceSparseImageFormatProperties2KHR"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_KHR_device_group
+    if (!strcmp(name, "vkGetDeviceGroupPeerMemoryFeaturesKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdSetDeviceMaskKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdDispatchBaseKHR"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_KHR_maintenance1
+    if (!strcmp(name, "vkTrimCommandPoolKHR"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_KHR_device_group_creation
+    if (!strcmp(name, "vkEnumeratePhysicalDeviceGroupsKHR"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_KHR_external_memory_capabilities
+    if (!strcmp(name, "vkGetPhysicalDeviceExternalBufferPropertiesKHR"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_KHR_external_memory_win32
+    if (!strcmp(name, "vkGetMemoryWin32HandleKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetMemoryWin32HandlePropertiesKHR"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_KHR_external_memory_fd
+    if (!strcmp(name, "vkGetMemoryFdKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetMemoryFdPropertiesKHR"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_KHR_external_semaphore_capabilities
+    if (!strcmp(name, "vkGetPhysicalDeviceExternalSemaphorePropertiesKHR"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_KHR_external_semaphore_win32
+    if (!strcmp(name, "vkImportSemaphoreWin32HandleKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetSemaphoreWin32HandleKHR"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_KHR_external_semaphore_fd
+    if (!strcmp(name, "vkImportSemaphoreFdKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetSemaphoreFdKHR"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_KHR_push_descriptor
+    if (!strcmp(name, "vkCmdPushDescriptorSetKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdPushDescriptorSetWithTemplateKHR"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_KHR_descriptor_update_template
+    if (!strcmp(name, "vkCreateDescriptorUpdateTemplateKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkDestroyDescriptorUpdateTemplateKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkUpdateDescriptorSetWithTemplateKHR"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_KHR_create_renderpass2
+    if (!strcmp(name, "vkCreateRenderPass2KHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdBeginRenderPass2KHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdNextSubpass2KHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdEndRenderPass2KHR"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_KHR_shared_presentable_image
+    if (!strcmp(name, "vkGetSwapchainStatusKHR"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_KHR_external_fence_capabilities
+    if (!strcmp(name, "vkGetPhysicalDeviceExternalFencePropertiesKHR"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_KHR_external_fence_win32
+    if (!strcmp(name, "vkImportFenceWin32HandleKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetFenceWin32HandleKHR"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_KHR_external_fence_fd
+    if (!strcmp(name, "vkImportFenceFdKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetFenceFdKHR"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_KHR_performance_query
+    if (!strcmp(name, "vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkAcquireProfilingLockKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkReleaseProfilingLockKHR"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_KHR_get_surface_capabilities2
+    if (!strcmp(name, "vkGetPhysicalDeviceSurfaceCapabilities2KHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceSurfaceFormats2KHR"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_KHR_get_display_properties2
+    if (!strcmp(name, "vkGetPhysicalDeviceDisplayProperties2KHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceDisplayPlaneProperties2KHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetDisplayModeProperties2KHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetDisplayPlaneCapabilities2KHR"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_KHR_get_memory_requirements2
+    if (!strcmp(name, "vkGetImageMemoryRequirements2KHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetBufferMemoryRequirements2KHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetImageSparseMemoryRequirements2KHR"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_KHR_sampler_ycbcr_conversion
+    if (!strcmp(name, "vkCreateSamplerYcbcrConversionKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkDestroySamplerYcbcrConversionKHR"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_KHR_bind_memory2
+    if (!strcmp(name, "vkBindBufferMemory2KHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkBindImageMemory2KHR"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_KHR_maintenance3
+    if (!strcmp(name, "vkGetDescriptorSetLayoutSupportKHR"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_KHR_draw_indirect_count
+    if (!strcmp(name, "vkCmdDrawIndirectCountKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdDrawIndexedIndirectCountKHR"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_KHR_timeline_semaphore
+    if (!strcmp(name, "vkGetSemaphoreCounterValueKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkWaitSemaphoresKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkSignalSemaphoreKHR"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_KHR_fragment_shading_rate
+    if (!strcmp(name, "vkGetPhysicalDeviceFragmentShadingRatesKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdSetFragmentShadingRateKHR"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_KHR_buffer_device_address
+    if (!strcmp(name, "vkGetBufferDeviceAddressKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetBufferOpaqueCaptureAddressKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetDeviceMemoryOpaqueCaptureAddressKHR"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_KHR_deferred_host_operations
+    if (!strcmp(name, "vkCreateDeferredOperationKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkDestroyDeferredOperationKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetDeferredOperationMaxConcurrencyKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetDeferredOperationResultKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkDeferredOperationJoinKHR"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_KHR_pipeline_executable_properties
+    if (!strcmp(name, "vkGetPipelineExecutablePropertiesKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetPipelineExecutableStatisticsKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetPipelineExecutableInternalRepresentationsKHR"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_KHR_copy_commands2
+    if (!strcmp(name, "vkCmdCopyBuffer2KHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdCopyImage2KHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdCopyBufferToImage2KHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdCopyImageToBuffer2KHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdBlitImage2KHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdResolveImage2KHR"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_ANDROID_native_buffer
+    if (!strcmp(name, "vkGetSwapchainGrallocUsageANDROID"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkAcquireImageANDROID"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkQueueSignalReleaseImageANDROID"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_EXT_debug_report
+    if (!strcmp(name, "vkCreateDebugReportCallbackEXT"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkDestroyDebugReportCallbackEXT"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkDebugReportMessageEXT"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_EXT_debug_marker
+    if (!strcmp(name, "vkDebugMarkerSetObjectTagEXT"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkDebugMarkerSetObjectNameEXT"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdDebugMarkerBeginEXT"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdDebugMarkerEndEXT"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdDebugMarkerInsertEXT"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_EXT_transform_feedback
+    if (!strcmp(name, "vkCmdBindTransformFeedbackBuffersEXT"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdBeginTransformFeedbackEXT"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdEndTransformFeedbackEXT"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdBeginQueryIndexedEXT"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdEndQueryIndexedEXT"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdDrawIndirectByteCountEXT"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_NVX_image_view_handle
+    if (!strcmp(name, "vkGetImageViewHandleNVX"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetImageViewAddressNVX"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_AMD_draw_indirect_count
+    if (!strcmp(name, "vkCmdDrawIndirectCountAMD"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdDrawIndexedIndirectCountAMD"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_AMD_shader_info
+    if (!strcmp(name, "vkGetShaderInfoAMD"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_GGP_stream_descriptor_surface
+    if (!strcmp(name, "vkCreateStreamDescriptorSurfaceGGP"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_NV_external_memory_capabilities
+    if (!strcmp(name, "vkGetPhysicalDeviceExternalImageFormatPropertiesNV"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_NV_external_memory_win32
+    if (!strcmp(name, "vkGetMemoryWin32HandleNV"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_NN_vi_surface
+    if (!strcmp(name, "vkCreateViSurfaceNN"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_EXT_conditional_rendering
+    if (!strcmp(name, "vkCmdBeginConditionalRenderingEXT"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdEndConditionalRenderingEXT"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_NV_clip_space_w_scaling
+    if (!strcmp(name, "vkCmdSetViewportWScalingNV"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_EXT_direct_mode_display
+    if (!strcmp(name, "vkReleaseDisplayEXT"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_EXT_acquire_xlib_display
+    if (!strcmp(name, "vkAcquireXlibDisplayEXT"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetRandROutputDisplayEXT"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_EXT_display_surface_counter
+    if (!strcmp(name, "vkGetPhysicalDeviceSurfaceCapabilities2EXT"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_EXT_display_control
+    if (!strcmp(name, "vkDisplayPowerControlEXT"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkRegisterDeviceEventEXT"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkRegisterDisplayEventEXT"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetSwapchainCounterEXT"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_GOOGLE_display_timing
+    if (!strcmp(name, "vkGetRefreshCycleDurationGOOGLE"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetPastPresentationTimingGOOGLE"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_EXT_discard_rectangles
+    if (!strcmp(name, "vkCmdSetDiscardRectangleEXT"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_EXT_hdr_metadata
+    if (!strcmp(name, "vkSetHdrMetadataEXT"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_MVK_ios_surface
+    if (!strcmp(name, "vkCreateIOSSurfaceMVK"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_MVK_macos_surface
+    if (!strcmp(name, "vkCreateMacOSSurfaceMVK"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_MVK_moltenvk
+    if (!strcmp(name, "vkGetMTLDeviceMVK"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkSetMTLTextureMVK"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetMTLTextureMVK"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetMTLBufferMVK"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkUseIOSurfaceMVK"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetIOSurfaceMVK"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_EXT_debug_utils
+    if (!strcmp(name, "vkSetDebugUtilsObjectNameEXT"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkSetDebugUtilsObjectTagEXT"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkQueueBeginDebugUtilsLabelEXT"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkQueueEndDebugUtilsLabelEXT"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkQueueInsertDebugUtilsLabelEXT"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdBeginDebugUtilsLabelEXT"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdEndDebugUtilsLabelEXT"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdInsertDebugUtilsLabelEXT"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCreateDebugUtilsMessengerEXT"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkDestroyDebugUtilsMessengerEXT"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkSubmitDebugUtilsMessageEXT"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_ANDROID_external_memory_android_hardware_buffer
+    if (!strcmp(name, "vkGetAndroidHardwareBufferPropertiesANDROID"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetMemoryAndroidHardwareBufferANDROID"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_EXT_sample_locations
+    if (!strcmp(name, "vkCmdSetSampleLocationsEXT"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceMultisamplePropertiesEXT"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_EXT_image_drm_format_modifier
+    if (!strcmp(name, "vkGetImageDrmFormatModifierPropertiesEXT"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_EXT_validation_cache
+    if (!strcmp(name, "vkCreateValidationCacheEXT"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkDestroyValidationCacheEXT"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkMergeValidationCachesEXT"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetValidationCacheDataEXT"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_NV_shading_rate_image
+    if (!strcmp(name, "vkCmdBindShadingRateImageNV"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdSetViewportShadingRatePaletteNV"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdSetCoarseSampleOrderNV"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_NV_ray_tracing
+    if (!strcmp(name, "vkCreateAccelerationStructureNV"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkDestroyAccelerationStructureNV"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetAccelerationStructureMemoryRequirementsNV"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkBindAccelerationStructureMemoryNV"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdBuildAccelerationStructureNV"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdCopyAccelerationStructureNV"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdTraceRaysNV"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCreateRayTracingPipelinesNV"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetRayTracingShaderGroupHandlesKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetRayTracingShaderGroupHandlesNV"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetAccelerationStructureHandleNV"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdWriteAccelerationStructuresPropertiesNV"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCompileDeferredNV"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_EXT_external_memory_host
+    if (!strcmp(name, "vkGetMemoryHostPointerPropertiesEXT"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_AMD_buffer_marker
+    if (!strcmp(name, "vkCmdWriteBufferMarkerAMD"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_EXT_calibrated_timestamps
+    if (!strcmp(name, "vkGetPhysicalDeviceCalibrateableTimeDomainsEXT"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetCalibratedTimestampsEXT"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_NV_mesh_shader
+    if (!strcmp(name, "vkCmdDrawMeshTasksNV"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdDrawMeshTasksIndirectNV"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdDrawMeshTasksIndirectCountNV"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_NV_scissor_exclusive
+    if (!strcmp(name, "vkCmdSetExclusiveScissorNV"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_NV_device_diagnostic_checkpoints
+    if (!strcmp(name, "vkCmdSetCheckpointNV"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetQueueCheckpointDataNV"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_INTEL_performance_query
+    if (!strcmp(name, "vkInitializePerformanceApiINTEL"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkUninitializePerformanceApiINTEL"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdSetPerformanceMarkerINTEL"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdSetPerformanceStreamMarkerINTEL"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdSetPerformanceOverrideINTEL"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkAcquirePerformanceConfigurationINTEL"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkReleasePerformanceConfigurationINTEL"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkQueueSetPerformanceConfigurationINTEL"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetPerformanceParameterINTEL"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_AMD_display_native_hdr
+    if (!strcmp(name, "vkSetLocalDimmingAMD"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_FUCHSIA_imagepipe_surface
+    if (!strcmp(name, "vkCreateImagePipeSurfaceFUCHSIA"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_EXT_metal_surface
+    if (!strcmp(name, "vkCreateMetalSurfaceEXT"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_EXT_buffer_device_address
+    if (!strcmp(name, "vkGetBufferDeviceAddressEXT"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_EXT_tooling_info
+    if (!strcmp(name, "vkGetPhysicalDeviceToolPropertiesEXT"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_NV_cooperative_matrix
+    if (!strcmp(name, "vkGetPhysicalDeviceCooperativeMatrixPropertiesNV"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_NV_coverage_reduction_mode
+    if (!strcmp(name, "vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_EXT_full_screen_exclusive
+    if (!strcmp(name, "vkGetPhysicalDeviceSurfacePresentModes2EXT"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkAcquireFullScreenExclusiveModeEXT"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkReleaseFullScreenExclusiveModeEXT"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetDeviceGroupSurfacePresentModes2EXT"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_EXT_headless_surface
+    if (!strcmp(name, "vkCreateHeadlessSurfaceEXT"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_EXT_line_rasterization
+    if (!strcmp(name, "vkCmdSetLineStippleEXT"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_EXT_host_query_reset
+    if (!strcmp(name, "vkResetQueryPoolEXT"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_EXT_extended_dynamic_state
+    if (!strcmp(name, "vkCmdSetCullModeEXT"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdSetFrontFaceEXT"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdSetPrimitiveTopologyEXT"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdSetViewportWithCountEXT"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdSetScissorWithCountEXT"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdBindVertexBuffers2EXT"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdSetDepthTestEnableEXT"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdSetDepthWriteEnableEXT"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdSetDepthCompareOpEXT"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdSetDepthBoundsTestEnableEXT"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdSetStencilTestEnableEXT"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdSetStencilOpEXT"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_NV_device_generated_commands
+    if (!strcmp(name, "vkGetGeneratedCommandsMemoryRequirementsNV"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdPreprocessGeneratedCommandsNV"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdExecuteGeneratedCommandsNV"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdBindPipelineShaderGroupNV"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCreateIndirectCommandsLayoutNV"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkDestroyIndirectCommandsLayoutNV"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_EXT_private_data
+    if (!strcmp(name, "vkCreatePrivateDataSlotEXT"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkDestroyPrivateDataSlotEXT"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkSetPrivateDataEXT"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetPrivateDataEXT"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_NV_fragment_shading_rate_enums
+    if (!strcmp(name, "vkCmdSetFragmentShadingRateEnumNV"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_EXT_directfb_surface
+    if (!strcmp(name, "vkCreateDirectFBSurfaceEXT"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceDirectFBPresentationSupportEXT"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_GOOGLE_gfxstream
+    if (!strcmp(name, "vkRegisterImageColorBufferGOOGLE"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkRegisterBufferColorBufferGOOGLE"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkMapMemoryIntoAddressSpaceGOOGLE"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkUpdateDescriptorSetWithTemplateSizedGOOGLE"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkBeginCommandBufferAsyncGOOGLE"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkEndCommandBufferAsyncGOOGLE"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkResetCommandBufferAsyncGOOGLE"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCommandBufferHostSyncGOOGLE"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCreateImageWithRequirementsGOOGLE"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCreateBufferWithRequirementsGOOGLE"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetMemoryHostAddressInfoGOOGLE"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkFreeMemorySyncGOOGLE"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkQueueHostSyncGOOGLE"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkQueueSubmitAsyncGOOGLE"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkQueueWaitIdleAsyncGOOGLE"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkQueueBindSparseAsyncGOOGLE"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetLinearImageLayoutGOOGLE"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkQueueFlushCommandsGOOGLE"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkQueueCommitDescriptorSetUpdatesGOOGLE"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCollectDescriptorPoolIdsGOOGLE"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_KHR_acceleration_structure
+    if (!strcmp(name, "vkCreateAccelerationStructureKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkDestroyAccelerationStructureKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdBuildAccelerationStructuresKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdBuildAccelerationStructuresIndirectKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkBuildAccelerationStructuresKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCopyAccelerationStructureKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCopyAccelerationStructureToMemoryKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCopyMemoryToAccelerationStructureKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkWriteAccelerationStructuresPropertiesKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdCopyAccelerationStructureKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdCopyAccelerationStructureToMemoryKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdCopyMemoryToAccelerationStructureKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetAccelerationStructureDeviceAddressKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdWriteAccelerationStructuresPropertiesKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetDeviceAccelerationStructureCompatibilityKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetAccelerationStructureBuildSizesKHR"))
+    {
+        return nullptr;
+    }
+#endif
+#ifdef VK_KHR_ray_tracing_pipeline
+    if (!strcmp(name, "vkCmdTraceRaysKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCreateRayTracingPipelinesKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetRayTracingCaptureReplayShaderGroupHandlesKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdTraceRaysIndirectKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetRayTracingShaderGroupStackSizeKHR"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkCmdSetRayTracingPipelineStackSizeKHR"))
+    {
+        return nullptr;
+    }
+#endif
+    return nullptr;
+}
+void* goldfish_vulkan_get_instance_proc_address(VkInstance instance, const char* name){
+    auto resources = ResourceTracker::get();
+    bool has1_1OrHigher = resources->getApiVersionFromInstance(instance) >= VK_API_VERSION_1_1;
+#ifdef VK_VERSION_1_0
+    if (!strcmp(name, "vkCreateInstance"))
+    {
+        return (void*)entry_vkCreateInstance;
+    }
+    if (!strcmp(name, "vkDestroyInstance"))
+    {
+        return (void*)entry_vkDestroyInstance;
+    }
+    if (!strcmp(name, "vkEnumeratePhysicalDevices"))
+    {
+        return (void*)entry_vkEnumeratePhysicalDevices;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceFeatures"))
+    {
+        return (void*)entry_vkGetPhysicalDeviceFeatures;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceFormatProperties"))
+    {
+        return (void*)entry_vkGetPhysicalDeviceFormatProperties;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceImageFormatProperties"))
+    {
+        return (void*)entry_vkGetPhysicalDeviceImageFormatProperties;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceProperties"))
+    {
+        return (void*)entry_vkGetPhysicalDeviceProperties;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceQueueFamilyProperties"))
+    {
+        return (void*)entry_vkGetPhysicalDeviceQueueFamilyProperties;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceMemoryProperties"))
+    {
+        return (void*)entry_vkGetPhysicalDeviceMemoryProperties;
+    }
+    if (!strcmp(name, "vkGetInstanceProcAddr"))
+    {
+        return (void*)entry_vkGetInstanceProcAddr;
+    }
+    if (!strcmp(name, "vkGetDeviceProcAddr"))
+    {
+        return (void*)entry_vkGetDeviceProcAddr;
+    }
+    if (!strcmp(name, "vkCreateDevice"))
+    {
+        return (void*)entry_vkCreateDevice;
+    }
+    if (!strcmp(name, "vkDestroyDevice"))
+    {
+        return (void*)entry_vkDestroyDevice;
+    }
+    if (!strcmp(name, "vkEnumerateInstanceExtensionProperties"))
+    {
+        return (void*)entry_vkEnumerateInstanceExtensionProperties;
+    }
+    if (!strcmp(name, "vkEnumerateDeviceExtensionProperties"))
+    {
+        return (void*)entry_vkEnumerateDeviceExtensionProperties;
+    }
+    if (!strcmp(name, "vkEnumerateInstanceLayerProperties"))
+    {
+        return (void*)entry_vkEnumerateInstanceLayerProperties;
+    }
+    if (!strcmp(name, "vkEnumerateDeviceLayerProperties"))
+    {
+        return (void*)entry_vkEnumerateDeviceLayerProperties;
+    }
+    if (!strcmp(name, "vkGetDeviceQueue"))
+    {
+        return (void*)entry_vkGetDeviceQueue;
+    }
+    if (!strcmp(name, "vkQueueSubmit"))
+    {
+        return (void*)entry_vkQueueSubmit;
+    }
+    if (!strcmp(name, "vkQueueWaitIdle"))
+    {
+        return (void*)entry_vkQueueWaitIdle;
+    }
+    if (!strcmp(name, "vkDeviceWaitIdle"))
+    {
+        return (void*)entry_vkDeviceWaitIdle;
+    }
+    if (!strcmp(name, "vkAllocateMemory"))
+    {
+        return (void*)entry_vkAllocateMemory;
+    }
+    if (!strcmp(name, "vkFreeMemory"))
+    {
+        return (void*)entry_vkFreeMemory;
+    }
+    if (!strcmp(name, "vkMapMemory"))
+    {
+        return (void*)entry_vkMapMemory;
+    }
+    if (!strcmp(name, "vkUnmapMemory"))
+    {
+        return (void*)entry_vkUnmapMemory;
+    }
+    if (!strcmp(name, "vkFlushMappedMemoryRanges"))
+    {
+        return (void*)entry_vkFlushMappedMemoryRanges;
+    }
+    if (!strcmp(name, "vkInvalidateMappedMemoryRanges"))
+    {
+        return (void*)entry_vkInvalidateMappedMemoryRanges;
+    }
+    if (!strcmp(name, "vkGetDeviceMemoryCommitment"))
+    {
+        return (void*)entry_vkGetDeviceMemoryCommitment;
+    }
+    if (!strcmp(name, "vkBindBufferMemory"))
+    {
+        return (void*)entry_vkBindBufferMemory;
+    }
+    if (!strcmp(name, "vkBindImageMemory"))
+    {
+        return (void*)entry_vkBindImageMemory;
+    }
+    if (!strcmp(name, "vkGetBufferMemoryRequirements"))
+    {
+        return (void*)entry_vkGetBufferMemoryRequirements;
+    }
+    if (!strcmp(name, "vkGetImageMemoryRequirements"))
+    {
+        return (void*)entry_vkGetImageMemoryRequirements;
+    }
+    if (!strcmp(name, "vkGetImageSparseMemoryRequirements"))
+    {
+        return (void*)entry_vkGetImageSparseMemoryRequirements;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceSparseImageFormatProperties"))
+    {
+        return (void*)entry_vkGetPhysicalDeviceSparseImageFormatProperties;
+    }
+    if (!strcmp(name, "vkQueueBindSparse"))
+    {
+        return (void*)entry_vkQueueBindSparse;
+    }
+    if (!strcmp(name, "vkCreateFence"))
+    {
+        return (void*)entry_vkCreateFence;
+    }
+    if (!strcmp(name, "vkDestroyFence"))
+    {
+        return (void*)entry_vkDestroyFence;
+    }
+    if (!strcmp(name, "vkResetFences"))
+    {
+        return (void*)entry_vkResetFences;
+    }
+    if (!strcmp(name, "vkGetFenceStatus"))
+    {
+        return (void*)entry_vkGetFenceStatus;
+    }
+    if (!strcmp(name, "vkWaitForFences"))
+    {
+        return (void*)entry_vkWaitForFences;
+    }
+    if (!strcmp(name, "vkCreateSemaphore"))
+    {
+        return (void*)entry_vkCreateSemaphore;
+    }
+    if (!strcmp(name, "vkDestroySemaphore"))
+    {
+        return (void*)entry_vkDestroySemaphore;
+    }
+    if (!strcmp(name, "vkCreateEvent"))
+    {
+        return (void*)entry_vkCreateEvent;
+    }
+    if (!strcmp(name, "vkDestroyEvent"))
+    {
+        return (void*)entry_vkDestroyEvent;
+    }
+    if (!strcmp(name, "vkGetEventStatus"))
+    {
+        return (void*)entry_vkGetEventStatus;
+    }
+    if (!strcmp(name, "vkSetEvent"))
+    {
+        return (void*)entry_vkSetEvent;
+    }
+    if (!strcmp(name, "vkResetEvent"))
+    {
+        return (void*)entry_vkResetEvent;
+    }
+    if (!strcmp(name, "vkCreateQueryPool"))
+    {
+        return (void*)entry_vkCreateQueryPool;
+    }
+    if (!strcmp(name, "vkDestroyQueryPool"))
+    {
+        return (void*)entry_vkDestroyQueryPool;
+    }
+    if (!strcmp(name, "vkGetQueryPoolResults"))
+    {
+        return (void*)entry_vkGetQueryPoolResults;
+    }
+    if (!strcmp(name, "vkCreateBuffer"))
+    {
+        return (void*)entry_vkCreateBuffer;
+    }
+    if (!strcmp(name, "vkDestroyBuffer"))
+    {
+        return (void*)entry_vkDestroyBuffer;
+    }
+    if (!strcmp(name, "vkCreateBufferView"))
+    {
+        return (void*)entry_vkCreateBufferView;
+    }
+    if (!strcmp(name, "vkDestroyBufferView"))
+    {
+        return (void*)entry_vkDestroyBufferView;
+    }
+    if (!strcmp(name, "vkCreateImage"))
+    {
+        return (void*)entry_vkCreateImage;
+    }
+    if (!strcmp(name, "vkDestroyImage"))
+    {
+        return (void*)entry_vkDestroyImage;
+    }
+    if (!strcmp(name, "vkGetImageSubresourceLayout"))
+    {
+        return (void*)entry_vkGetImageSubresourceLayout;
+    }
+    if (!strcmp(name, "vkCreateImageView"))
+    {
+        return (void*)entry_vkCreateImageView;
+    }
+    if (!strcmp(name, "vkDestroyImageView"))
+    {
+        return (void*)entry_vkDestroyImageView;
+    }
+    if (!strcmp(name, "vkCreateShaderModule"))
+    {
+        return (void*)entry_vkCreateShaderModule;
+    }
+    if (!strcmp(name, "vkDestroyShaderModule"))
+    {
+        return (void*)entry_vkDestroyShaderModule;
+    }
+    if (!strcmp(name, "vkCreatePipelineCache"))
+    {
+        return (void*)entry_vkCreatePipelineCache;
+    }
+    if (!strcmp(name, "vkDestroyPipelineCache"))
+    {
+        return (void*)entry_vkDestroyPipelineCache;
+    }
+    if (!strcmp(name, "vkGetPipelineCacheData"))
+    {
+        return (void*)entry_vkGetPipelineCacheData;
+    }
+    if (!strcmp(name, "vkMergePipelineCaches"))
+    {
+        return (void*)entry_vkMergePipelineCaches;
+    }
+    if (!strcmp(name, "vkCreateGraphicsPipelines"))
+    {
+        return (void*)entry_vkCreateGraphicsPipelines;
+    }
+    if (!strcmp(name, "vkCreateComputePipelines"))
+    {
+        return (void*)entry_vkCreateComputePipelines;
+    }
+    if (!strcmp(name, "vkDestroyPipeline"))
+    {
+        return (void*)entry_vkDestroyPipeline;
+    }
+    if (!strcmp(name, "vkCreatePipelineLayout"))
+    {
+        return (void*)entry_vkCreatePipelineLayout;
+    }
+    if (!strcmp(name, "vkDestroyPipelineLayout"))
+    {
+        return (void*)entry_vkDestroyPipelineLayout;
+    }
+    if (!strcmp(name, "vkCreateSampler"))
+    {
+        return (void*)entry_vkCreateSampler;
+    }
+    if (!strcmp(name, "vkDestroySampler"))
+    {
+        return (void*)entry_vkDestroySampler;
+    }
+    if (!strcmp(name, "vkCreateDescriptorSetLayout"))
+    {
+        return (void*)entry_vkCreateDescriptorSetLayout;
+    }
+    if (!strcmp(name, "vkDestroyDescriptorSetLayout"))
+    {
+        return (void*)entry_vkDestroyDescriptorSetLayout;
+    }
+    if (!strcmp(name, "vkCreateDescriptorPool"))
+    {
+        return (void*)entry_vkCreateDescriptorPool;
+    }
+    if (!strcmp(name, "vkDestroyDescriptorPool"))
+    {
+        return (void*)entry_vkDestroyDescriptorPool;
+    }
+    if (!strcmp(name, "vkResetDescriptorPool"))
+    {
+        return (void*)entry_vkResetDescriptorPool;
+    }
+    if (!strcmp(name, "vkAllocateDescriptorSets"))
+    {
+        return (void*)entry_vkAllocateDescriptorSets;
+    }
+    if (!strcmp(name, "vkFreeDescriptorSets"))
+    {
+        return (void*)entry_vkFreeDescriptorSets;
+    }
+    if (!strcmp(name, "vkUpdateDescriptorSets"))
+    {
+        return (void*)entry_vkUpdateDescriptorSets;
+    }
+    if (!strcmp(name, "vkCreateFramebuffer"))
+    {
+        return (void*)entry_vkCreateFramebuffer;
+    }
+    if (!strcmp(name, "vkDestroyFramebuffer"))
+    {
+        return (void*)entry_vkDestroyFramebuffer;
+    }
+    if (!strcmp(name, "vkCreateRenderPass"))
+    {
+        return (void*)entry_vkCreateRenderPass;
+    }
+    if (!strcmp(name, "vkDestroyRenderPass"))
+    {
+        return (void*)entry_vkDestroyRenderPass;
+    }
+    if (!strcmp(name, "vkGetRenderAreaGranularity"))
+    {
+        return (void*)entry_vkGetRenderAreaGranularity;
+    }
+    if (!strcmp(name, "vkCreateCommandPool"))
+    {
+        return (void*)entry_vkCreateCommandPool;
+    }
+    if (!strcmp(name, "vkDestroyCommandPool"))
+    {
+        return (void*)entry_vkDestroyCommandPool;
+    }
+    if (!strcmp(name, "vkResetCommandPool"))
+    {
+        return (void*)entry_vkResetCommandPool;
+    }
+    if (!strcmp(name, "vkAllocateCommandBuffers"))
+    {
+        return (void*)entry_vkAllocateCommandBuffers;
+    }
+    if (!strcmp(name, "vkFreeCommandBuffers"))
+    {
+        return (void*)entry_vkFreeCommandBuffers;
+    }
+    if (!strcmp(name, "vkBeginCommandBuffer"))
+    {
+        return (void*)entry_vkBeginCommandBuffer;
+    }
+    if (!strcmp(name, "vkEndCommandBuffer"))
+    {
+        return (void*)entry_vkEndCommandBuffer;
+    }
+    if (!strcmp(name, "vkResetCommandBuffer"))
+    {
+        return (void*)entry_vkResetCommandBuffer;
+    }
+    if (!strcmp(name, "vkCmdBindPipeline"))
+    {
+        return (void*)entry_vkCmdBindPipeline;
+    }
+    if (!strcmp(name, "vkCmdSetViewport"))
+    {
+        return (void*)entry_vkCmdSetViewport;
+    }
+    if (!strcmp(name, "vkCmdSetScissor"))
+    {
+        return (void*)entry_vkCmdSetScissor;
+    }
+    if (!strcmp(name, "vkCmdSetLineWidth"))
+    {
+        return (void*)entry_vkCmdSetLineWidth;
+    }
+    if (!strcmp(name, "vkCmdSetDepthBias"))
+    {
+        return (void*)entry_vkCmdSetDepthBias;
+    }
+    if (!strcmp(name, "vkCmdSetBlendConstants"))
+    {
+        return (void*)entry_vkCmdSetBlendConstants;
+    }
+    if (!strcmp(name, "vkCmdSetDepthBounds"))
+    {
+        return (void*)entry_vkCmdSetDepthBounds;
+    }
+    if (!strcmp(name, "vkCmdSetStencilCompareMask"))
+    {
+        return (void*)entry_vkCmdSetStencilCompareMask;
+    }
+    if (!strcmp(name, "vkCmdSetStencilWriteMask"))
+    {
+        return (void*)entry_vkCmdSetStencilWriteMask;
+    }
+    if (!strcmp(name, "vkCmdSetStencilReference"))
+    {
+        return (void*)entry_vkCmdSetStencilReference;
+    }
+    if (!strcmp(name, "vkCmdBindDescriptorSets"))
+    {
+        return (void*)entry_vkCmdBindDescriptorSets;
+    }
+    if (!strcmp(name, "vkCmdBindIndexBuffer"))
+    {
+        return (void*)entry_vkCmdBindIndexBuffer;
+    }
+    if (!strcmp(name, "vkCmdBindVertexBuffers"))
+    {
+        return (void*)entry_vkCmdBindVertexBuffers;
+    }
+    if (!strcmp(name, "vkCmdDraw"))
+    {
+        return (void*)entry_vkCmdDraw;
+    }
+    if (!strcmp(name, "vkCmdDrawIndexed"))
+    {
+        return (void*)entry_vkCmdDrawIndexed;
+    }
+    if (!strcmp(name, "vkCmdDrawIndirect"))
+    {
+        return (void*)entry_vkCmdDrawIndirect;
+    }
+    if (!strcmp(name, "vkCmdDrawIndexedIndirect"))
+    {
+        return (void*)entry_vkCmdDrawIndexedIndirect;
+    }
+    if (!strcmp(name, "vkCmdDispatch"))
+    {
+        return (void*)entry_vkCmdDispatch;
+    }
+    if (!strcmp(name, "vkCmdDispatchIndirect"))
+    {
+        return (void*)entry_vkCmdDispatchIndirect;
+    }
+    if (!strcmp(name, "vkCmdCopyBuffer"))
+    {
+        return (void*)entry_vkCmdCopyBuffer;
+    }
+    if (!strcmp(name, "vkCmdCopyImage"))
+    {
+        return (void*)entry_vkCmdCopyImage;
+    }
+    if (!strcmp(name, "vkCmdBlitImage"))
+    {
+        return (void*)entry_vkCmdBlitImage;
+    }
+    if (!strcmp(name, "vkCmdCopyBufferToImage"))
+    {
+        return (void*)entry_vkCmdCopyBufferToImage;
+    }
+    if (!strcmp(name, "vkCmdCopyImageToBuffer"))
+    {
+        return (void*)entry_vkCmdCopyImageToBuffer;
+    }
+    if (!strcmp(name, "vkCmdUpdateBuffer"))
+    {
+        return (void*)entry_vkCmdUpdateBuffer;
+    }
+    if (!strcmp(name, "vkCmdFillBuffer"))
+    {
+        return (void*)entry_vkCmdFillBuffer;
+    }
+    if (!strcmp(name, "vkCmdClearColorImage"))
+    {
+        return (void*)entry_vkCmdClearColorImage;
+    }
+    if (!strcmp(name, "vkCmdClearDepthStencilImage"))
+    {
+        return (void*)entry_vkCmdClearDepthStencilImage;
+    }
+    if (!strcmp(name, "vkCmdClearAttachments"))
+    {
+        return (void*)entry_vkCmdClearAttachments;
+    }
+    if (!strcmp(name, "vkCmdResolveImage"))
+    {
+        return (void*)entry_vkCmdResolveImage;
+    }
+    if (!strcmp(name, "vkCmdSetEvent"))
+    {
+        return (void*)entry_vkCmdSetEvent;
+    }
+    if (!strcmp(name, "vkCmdResetEvent"))
+    {
+        return (void*)entry_vkCmdResetEvent;
+    }
+    if (!strcmp(name, "vkCmdWaitEvents"))
+    {
+        return (void*)entry_vkCmdWaitEvents;
+    }
+    if (!strcmp(name, "vkCmdPipelineBarrier"))
+    {
+        return (void*)entry_vkCmdPipelineBarrier;
+    }
+    if (!strcmp(name, "vkCmdBeginQuery"))
+    {
+        return (void*)entry_vkCmdBeginQuery;
+    }
+    if (!strcmp(name, "vkCmdEndQuery"))
+    {
+        return (void*)entry_vkCmdEndQuery;
+    }
+    if (!strcmp(name, "vkCmdResetQueryPool"))
+    {
+        return (void*)entry_vkCmdResetQueryPool;
+    }
+    if (!strcmp(name, "vkCmdWriteTimestamp"))
+    {
+        return (void*)entry_vkCmdWriteTimestamp;
+    }
+    if (!strcmp(name, "vkCmdCopyQueryPoolResults"))
+    {
+        return (void*)entry_vkCmdCopyQueryPoolResults;
+    }
+    if (!strcmp(name, "vkCmdPushConstants"))
+    {
+        return (void*)entry_vkCmdPushConstants;
+    }
+    if (!strcmp(name, "vkCmdBeginRenderPass"))
+    {
+        return (void*)entry_vkCmdBeginRenderPass;
+    }
+    if (!strcmp(name, "vkCmdNextSubpass"))
+    {
+        return (void*)entry_vkCmdNextSubpass;
+    }
+    if (!strcmp(name, "vkCmdEndRenderPass"))
+    {
+        return (void*)entry_vkCmdEndRenderPass;
+    }
+    if (!strcmp(name, "vkCmdExecuteCommands"))
+    {
+        return (void*)entry_vkCmdExecuteCommands;
+    }
+#endif
+#ifdef VK_VERSION_1_1
+    if (!strcmp(name, "vkEnumerateInstanceVersion"))
+    {
+        return has1_1OrHigher ? (void*)entry_vkEnumerateInstanceVersion : nullptr;
+    }
+    if (!strcmp(name, "vkBindBufferMemory2"))
+    {
+        return (void*)dynCheck_entry_vkBindBufferMemory2;
+    }
+    if (!strcmp(name, "vkBindImageMemory2"))
+    {
+        return (void*)dynCheck_entry_vkBindImageMemory2;
+    }
+    if (!strcmp(name, "vkGetDeviceGroupPeerMemoryFeatures"))
+    {
+        return (void*)dynCheck_entry_vkGetDeviceGroupPeerMemoryFeatures;
+    }
+    if (!strcmp(name, "vkCmdSetDeviceMask"))
+    {
+        return has1_1OrHigher ? (void*)entry_vkCmdSetDeviceMask : nullptr;
+    }
+    if (!strcmp(name, "vkCmdDispatchBase"))
+    {
+        return has1_1OrHigher ? (void*)entry_vkCmdDispatchBase : nullptr;
+    }
+    if (!strcmp(name, "vkEnumeratePhysicalDeviceGroups"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetImageMemoryRequirements2"))
+    {
+        return (void*)dynCheck_entry_vkGetImageMemoryRequirements2;
+    }
+    if (!strcmp(name, "vkGetBufferMemoryRequirements2"))
+    {
+        return (void*)dynCheck_entry_vkGetBufferMemoryRequirements2;
+    }
+    if (!strcmp(name, "vkGetImageSparseMemoryRequirements2"))
+    {
+        return (void*)dynCheck_entry_vkGetImageSparseMemoryRequirements2;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceFeatures2"))
+    {
+        return has1_1OrHigher ? (void*)entry_vkGetPhysicalDeviceFeatures2 : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceProperties2"))
+    {
+        return has1_1OrHigher ? (void*)entry_vkGetPhysicalDeviceProperties2 : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceFormatProperties2"))
+    {
+        return has1_1OrHigher ? (void*)entry_vkGetPhysicalDeviceFormatProperties2 : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceImageFormatProperties2"))
+    {
+        return has1_1OrHigher ? (void*)entry_vkGetPhysicalDeviceImageFormatProperties2 : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceQueueFamilyProperties2"))
+    {
+        return has1_1OrHigher ? (void*)entry_vkGetPhysicalDeviceQueueFamilyProperties2 : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceMemoryProperties2"))
+    {
+        return has1_1OrHigher ? (void*)entry_vkGetPhysicalDeviceMemoryProperties2 : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceSparseImageFormatProperties2"))
+    {
+        return has1_1OrHigher ? (void*)entry_vkGetPhysicalDeviceSparseImageFormatProperties2 : nullptr;
+    }
+    if (!strcmp(name, "vkTrimCommandPool"))
+    {
+        return (void*)dynCheck_entry_vkTrimCommandPool;
+    }
+    if (!strcmp(name, "vkGetDeviceQueue2"))
+    {
+        return (void*)dynCheck_entry_vkGetDeviceQueue2;
+    }
+    if (!strcmp(name, "vkCreateSamplerYcbcrConversion"))
+    {
+        return (void*)dynCheck_entry_vkCreateSamplerYcbcrConversion;
+    }
+    if (!strcmp(name, "vkDestroySamplerYcbcrConversion"))
+    {
+        return (void*)dynCheck_entry_vkDestroySamplerYcbcrConversion;
+    }
+    if (!strcmp(name, "vkCreateDescriptorUpdateTemplate"))
+    {
+        return (void*)dynCheck_entry_vkCreateDescriptorUpdateTemplate;
+    }
+    if (!strcmp(name, "vkDestroyDescriptorUpdateTemplate"))
+    {
+        return (void*)dynCheck_entry_vkDestroyDescriptorUpdateTemplate;
+    }
+    if (!strcmp(name, "vkUpdateDescriptorSetWithTemplate"))
+    {
+        return (void*)dynCheck_entry_vkUpdateDescriptorSetWithTemplate;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceExternalBufferProperties"))
+    {
+        return has1_1OrHigher ? (void*)entry_vkGetPhysicalDeviceExternalBufferProperties : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceExternalFenceProperties"))
+    {
+        return has1_1OrHigher ? (void*)entry_vkGetPhysicalDeviceExternalFenceProperties : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceExternalSemaphoreProperties"))
+    {
+        return has1_1OrHigher ? (void*)entry_vkGetPhysicalDeviceExternalSemaphoreProperties : nullptr;
+    }
+    if (!strcmp(name, "vkGetDescriptorSetLayoutSupport"))
+    {
+        return (void*)dynCheck_entry_vkGetDescriptorSetLayoutSupport;
+    }
+#endif
+#ifdef VK_VERSION_1_2
+    if (!strcmp(name, "vkCmdDrawIndirectCount"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_VERSION_1_2");
+        return hasExt ? (void*)entry_vkCmdDrawIndirectCount : nullptr;
+    }
+    if (!strcmp(name, "vkCmdDrawIndexedIndirectCount"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_VERSION_1_2");
+        return hasExt ? (void*)entry_vkCmdDrawIndexedIndirectCount : nullptr;
+    }
+    if (!strcmp(name, "vkCreateRenderPass2"))
+    {
+        return (void*)dynCheck_entry_vkCreateRenderPass2;
+    }
+    if (!strcmp(name, "vkCmdBeginRenderPass2"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_VERSION_1_2");
+        return hasExt ? (void*)entry_vkCmdBeginRenderPass2 : nullptr;
+    }
+    if (!strcmp(name, "vkCmdNextSubpass2"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_VERSION_1_2");
+        return hasExt ? (void*)entry_vkCmdNextSubpass2 : nullptr;
+    }
+    if (!strcmp(name, "vkCmdEndRenderPass2"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_VERSION_1_2");
+        return hasExt ? (void*)entry_vkCmdEndRenderPass2 : nullptr;
+    }
+    if (!strcmp(name, "vkResetQueryPool"))
+    {
+        return (void*)dynCheck_entry_vkResetQueryPool;
+    }
+    if (!strcmp(name, "vkGetSemaphoreCounterValue"))
+    {
+        return (void*)dynCheck_entry_vkGetSemaphoreCounterValue;
+    }
+    if (!strcmp(name, "vkWaitSemaphores"))
+    {
+        return (void*)dynCheck_entry_vkWaitSemaphores;
+    }
+    if (!strcmp(name, "vkSignalSemaphore"))
+    {
+        return (void*)dynCheck_entry_vkSignalSemaphore;
+    }
+    if (!strcmp(name, "vkGetBufferDeviceAddress"))
+    {
+        return (void*)dynCheck_entry_vkGetBufferDeviceAddress;
+    }
+    if (!strcmp(name, "vkGetBufferOpaqueCaptureAddress"))
+    {
+        return (void*)dynCheck_entry_vkGetBufferOpaqueCaptureAddress;
+    }
+    if (!strcmp(name, "vkGetDeviceMemoryOpaqueCaptureAddress"))
+    {
+        return (void*)dynCheck_entry_vkGetDeviceMemoryOpaqueCaptureAddress;
+    }
+#endif
+#ifdef VK_KHR_surface
+    if (!strcmp(name, "vkDestroySurfaceKHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_surface");
+        return hasExt ? (void*)entry_vkDestroySurfaceKHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceSurfaceSupportKHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_surface");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceSurfaceSupportKHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceSurfaceCapabilitiesKHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_surface");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceSurfaceCapabilitiesKHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceSurfaceFormatsKHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_surface");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceSurfaceFormatsKHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceSurfacePresentModesKHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_surface");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceSurfacePresentModesKHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_swapchain
+    if (!strcmp(name, "vkCreateSwapchainKHR"))
+    {
+        return (void*)dynCheck_entry_vkCreateSwapchainKHR;
+    }
+    if (!strcmp(name, "vkDestroySwapchainKHR"))
+    {
+        return (void*)dynCheck_entry_vkDestroySwapchainKHR;
+    }
+    if (!strcmp(name, "vkGetSwapchainImagesKHR"))
+    {
+        return (void*)dynCheck_entry_vkGetSwapchainImagesKHR;
+    }
+    if (!strcmp(name, "vkAcquireNextImageKHR"))
+    {
+        return (void*)dynCheck_entry_vkAcquireNextImageKHR;
+    }
+    if (!strcmp(name, "vkQueuePresentKHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_swapchain");
+        return hasExt ? (void*)entry_vkQueuePresentKHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetDeviceGroupPresentCapabilitiesKHR"))
+    {
+        return (void*)dynCheck_entry_vkGetDeviceGroupPresentCapabilitiesKHR;
+    }
+    if (!strcmp(name, "vkGetDeviceGroupSurfacePresentModesKHR"))
+    {
+        return (void*)dynCheck_entry_vkGetDeviceGroupSurfacePresentModesKHR;
+    }
+    if (!strcmp(name, "vkGetPhysicalDevicePresentRectanglesKHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_swapchain");
+        return hasExt ? (void*)entry_vkGetPhysicalDevicePresentRectanglesKHR : nullptr;
+    }
+    if (!strcmp(name, "vkAcquireNextImage2KHR"))
+    {
+        return (void*)dynCheck_entry_vkAcquireNextImage2KHR;
+    }
+#endif
+#ifdef VK_KHR_display
+    if (!strcmp(name, "vkGetPhysicalDeviceDisplayPropertiesKHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_display");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceDisplayPropertiesKHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceDisplayPlanePropertiesKHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_display");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceDisplayPlanePropertiesKHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetDisplayPlaneSupportedDisplaysKHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_display");
+        return hasExt ? (void*)entry_vkGetDisplayPlaneSupportedDisplaysKHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetDisplayModePropertiesKHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_display");
+        return hasExt ? (void*)entry_vkGetDisplayModePropertiesKHR : nullptr;
+    }
+    if (!strcmp(name, "vkCreateDisplayModeKHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_display");
+        return hasExt ? (void*)entry_vkCreateDisplayModeKHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetDisplayPlaneCapabilitiesKHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_display");
+        return hasExt ? (void*)entry_vkGetDisplayPlaneCapabilitiesKHR : nullptr;
+    }
+    if (!strcmp(name, "vkCreateDisplayPlaneSurfaceKHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_display");
+        return hasExt ? (void*)entry_vkCreateDisplayPlaneSurfaceKHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_display_swapchain
+    if (!strcmp(name, "vkCreateSharedSwapchainsKHR"))
+    {
+        return (void*)dynCheck_entry_vkCreateSharedSwapchainsKHR;
+    }
+#endif
+#ifdef VK_KHR_xlib_surface
+    if (!strcmp(name, "vkCreateXlibSurfaceKHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_xlib_surface");
+        return hasExt ? (void*)entry_vkCreateXlibSurfaceKHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceXlibPresentationSupportKHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_xlib_surface");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceXlibPresentationSupportKHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_xcb_surface
+    if (!strcmp(name, "vkCreateXcbSurfaceKHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_xcb_surface");
+        return hasExt ? (void*)entry_vkCreateXcbSurfaceKHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceXcbPresentationSupportKHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_xcb_surface");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceXcbPresentationSupportKHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_wayland_surface
+    if (!strcmp(name, "vkCreateWaylandSurfaceKHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_wayland_surface");
+        return hasExt ? (void*)entry_vkCreateWaylandSurfaceKHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceWaylandPresentationSupportKHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_wayland_surface");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceWaylandPresentationSupportKHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_android_surface
+    if (!strcmp(name, "vkCreateAndroidSurfaceKHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_android_surface");
+        return hasExt ? (void*)entry_vkCreateAndroidSurfaceKHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_win32_surface
+    if (!strcmp(name, "vkCreateWin32SurfaceKHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_win32_surface");
+        return hasExt ? (void*)entry_vkCreateWin32SurfaceKHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceWin32PresentationSupportKHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_win32_surface");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceWin32PresentationSupportKHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_get_physical_device_properties2
+    if (!strcmp(name, "vkGetPhysicalDeviceFeatures2KHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_get_physical_device_properties2");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceFeatures2KHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceProperties2KHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_get_physical_device_properties2");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceProperties2KHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceFormatProperties2KHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_get_physical_device_properties2");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceFormatProperties2KHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceImageFormatProperties2KHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_get_physical_device_properties2");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceImageFormatProperties2KHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceQueueFamilyProperties2KHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_get_physical_device_properties2");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceQueueFamilyProperties2KHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceMemoryProperties2KHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_get_physical_device_properties2");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceMemoryProperties2KHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceSparseImageFormatProperties2KHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_get_physical_device_properties2");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceSparseImageFormatProperties2KHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_device_group
+    if (!strcmp(name, "vkGetDeviceGroupPeerMemoryFeaturesKHR"))
+    {
+        return (void*)dynCheck_entry_vkGetDeviceGroupPeerMemoryFeaturesKHR;
+    }
+    if (!strcmp(name, "vkCmdSetDeviceMaskKHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_device_group");
+        return hasExt ? (void*)entry_vkCmdSetDeviceMaskKHR : nullptr;
+    }
+    if (!strcmp(name, "vkCmdDispatchBaseKHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_device_group");
+        return hasExt ? (void*)entry_vkCmdDispatchBaseKHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_maintenance1
+    if (!strcmp(name, "vkTrimCommandPoolKHR"))
+    {
+        return (void*)dynCheck_entry_vkTrimCommandPoolKHR;
+    }
+#endif
+#ifdef VK_KHR_device_group_creation
+    if (!strcmp(name, "vkEnumeratePhysicalDeviceGroupsKHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_device_group_creation");
+        return hasExt ? (void*)entry_vkEnumeratePhysicalDeviceGroupsKHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_external_memory_capabilities
+    if (!strcmp(name, "vkGetPhysicalDeviceExternalBufferPropertiesKHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_external_memory_capabilities");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceExternalBufferPropertiesKHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_external_memory_win32
+    if (!strcmp(name, "vkGetMemoryWin32HandleKHR"))
+    {
+        return (void*)dynCheck_entry_vkGetMemoryWin32HandleKHR;
+    }
+    if (!strcmp(name, "vkGetMemoryWin32HandlePropertiesKHR"))
+    {
+        return (void*)dynCheck_entry_vkGetMemoryWin32HandlePropertiesKHR;
+    }
+#endif
+#ifdef VK_KHR_external_memory_fd
+    if (!strcmp(name, "vkGetMemoryFdKHR"))
+    {
+        return (void*)dynCheck_entry_vkGetMemoryFdKHR;
+    }
+    if (!strcmp(name, "vkGetMemoryFdPropertiesKHR"))
+    {
+        return (void*)dynCheck_entry_vkGetMemoryFdPropertiesKHR;
+    }
+#endif
+#ifdef VK_KHR_external_semaphore_capabilities
+    if (!strcmp(name, "vkGetPhysicalDeviceExternalSemaphorePropertiesKHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_external_semaphore_capabilities");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_external_semaphore_win32
+    if (!strcmp(name, "vkImportSemaphoreWin32HandleKHR"))
+    {
+        return (void*)dynCheck_entry_vkImportSemaphoreWin32HandleKHR;
+    }
+    if (!strcmp(name, "vkGetSemaphoreWin32HandleKHR"))
+    {
+        return (void*)dynCheck_entry_vkGetSemaphoreWin32HandleKHR;
+    }
+#endif
+#ifdef VK_KHR_external_semaphore_fd
+    if (!strcmp(name, "vkImportSemaphoreFdKHR"))
+    {
+        return (void*)dynCheck_entry_vkImportSemaphoreFdKHR;
+    }
+    if (!strcmp(name, "vkGetSemaphoreFdKHR"))
+    {
+        return (void*)dynCheck_entry_vkGetSemaphoreFdKHR;
+    }
+#endif
+#ifdef VK_KHR_push_descriptor
+    if (!strcmp(name, "vkCmdPushDescriptorSetKHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_push_descriptor");
+        return hasExt ? (void*)entry_vkCmdPushDescriptorSetKHR : nullptr;
+    }
+    if (!strcmp(name, "vkCmdPushDescriptorSetWithTemplateKHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_push_descriptor");
+        return hasExt ? (void*)entry_vkCmdPushDescriptorSetWithTemplateKHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_descriptor_update_template
+    if (!strcmp(name, "vkCreateDescriptorUpdateTemplateKHR"))
+    {
+        return (void*)dynCheck_entry_vkCreateDescriptorUpdateTemplateKHR;
+    }
+    if (!strcmp(name, "vkDestroyDescriptorUpdateTemplateKHR"))
+    {
+        return (void*)dynCheck_entry_vkDestroyDescriptorUpdateTemplateKHR;
+    }
+    if (!strcmp(name, "vkUpdateDescriptorSetWithTemplateKHR"))
+    {
+        return (void*)dynCheck_entry_vkUpdateDescriptorSetWithTemplateKHR;
+    }
+#endif
+#ifdef VK_KHR_create_renderpass2
+    if (!strcmp(name, "vkCreateRenderPass2KHR"))
+    {
+        return (void*)dynCheck_entry_vkCreateRenderPass2KHR;
+    }
+    if (!strcmp(name, "vkCmdBeginRenderPass2KHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_create_renderpass2");
+        return hasExt ? (void*)entry_vkCmdBeginRenderPass2KHR : nullptr;
+    }
+    if (!strcmp(name, "vkCmdNextSubpass2KHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_create_renderpass2");
+        return hasExt ? (void*)entry_vkCmdNextSubpass2KHR : nullptr;
+    }
+    if (!strcmp(name, "vkCmdEndRenderPass2KHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_create_renderpass2");
+        return hasExt ? (void*)entry_vkCmdEndRenderPass2KHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_shared_presentable_image
+    if (!strcmp(name, "vkGetSwapchainStatusKHR"))
+    {
+        return (void*)dynCheck_entry_vkGetSwapchainStatusKHR;
+    }
+#endif
+#ifdef VK_KHR_external_fence_capabilities
+    if (!strcmp(name, "vkGetPhysicalDeviceExternalFencePropertiesKHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_external_fence_capabilities");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceExternalFencePropertiesKHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_external_fence_win32
+    if (!strcmp(name, "vkImportFenceWin32HandleKHR"))
+    {
+        return (void*)dynCheck_entry_vkImportFenceWin32HandleKHR;
+    }
+    if (!strcmp(name, "vkGetFenceWin32HandleKHR"))
+    {
+        return (void*)dynCheck_entry_vkGetFenceWin32HandleKHR;
+    }
+#endif
+#ifdef VK_KHR_external_fence_fd
+    if (!strcmp(name, "vkImportFenceFdKHR"))
+    {
+        return (void*)dynCheck_entry_vkImportFenceFdKHR;
+    }
+    if (!strcmp(name, "vkGetFenceFdKHR"))
+    {
+        return (void*)dynCheck_entry_vkGetFenceFdKHR;
+    }
+#endif
+#ifdef VK_KHR_performance_query
+    if (!strcmp(name, "vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_performance_query");
+        return hasExt ? (void*)entry_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_performance_query");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR : nullptr;
+    }
+    if (!strcmp(name, "vkAcquireProfilingLockKHR"))
+    {
+        return (void*)dynCheck_entry_vkAcquireProfilingLockKHR;
+    }
+    if (!strcmp(name, "vkReleaseProfilingLockKHR"))
+    {
+        return (void*)dynCheck_entry_vkReleaseProfilingLockKHR;
+    }
+#endif
+#ifdef VK_KHR_get_surface_capabilities2
+    if (!strcmp(name, "vkGetPhysicalDeviceSurfaceCapabilities2KHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_get_surface_capabilities2");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceSurfaceCapabilities2KHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceSurfaceFormats2KHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_get_surface_capabilities2");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceSurfaceFormats2KHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_get_display_properties2
+    if (!strcmp(name, "vkGetPhysicalDeviceDisplayProperties2KHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_get_display_properties2");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceDisplayProperties2KHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceDisplayPlaneProperties2KHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_get_display_properties2");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceDisplayPlaneProperties2KHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetDisplayModeProperties2KHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_get_display_properties2");
+        return hasExt ? (void*)entry_vkGetDisplayModeProperties2KHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetDisplayPlaneCapabilities2KHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_get_display_properties2");
+        return hasExt ? (void*)entry_vkGetDisplayPlaneCapabilities2KHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_get_memory_requirements2
+    if (!strcmp(name, "vkGetImageMemoryRequirements2KHR"))
+    {
+        return (void*)dynCheck_entry_vkGetImageMemoryRequirements2KHR;
+    }
+    if (!strcmp(name, "vkGetBufferMemoryRequirements2KHR"))
+    {
+        return (void*)dynCheck_entry_vkGetBufferMemoryRequirements2KHR;
+    }
+    if (!strcmp(name, "vkGetImageSparseMemoryRequirements2KHR"))
+    {
+        return (void*)dynCheck_entry_vkGetImageSparseMemoryRequirements2KHR;
+    }
+#endif
+#ifdef VK_KHR_sampler_ycbcr_conversion
+    if (!strcmp(name, "vkCreateSamplerYcbcrConversionKHR"))
+    {
+        return (void*)dynCheck_entry_vkCreateSamplerYcbcrConversionKHR;
+    }
+    if (!strcmp(name, "vkDestroySamplerYcbcrConversionKHR"))
+    {
+        return (void*)dynCheck_entry_vkDestroySamplerYcbcrConversionKHR;
+    }
+#endif
+#ifdef VK_KHR_bind_memory2
+    if (!strcmp(name, "vkBindBufferMemory2KHR"))
+    {
+        return (void*)dynCheck_entry_vkBindBufferMemory2KHR;
+    }
+    if (!strcmp(name, "vkBindImageMemory2KHR"))
+    {
+        return (void*)dynCheck_entry_vkBindImageMemory2KHR;
+    }
+#endif
+#ifdef VK_KHR_maintenance3
+    if (!strcmp(name, "vkGetDescriptorSetLayoutSupportKHR"))
+    {
+        return (void*)dynCheck_entry_vkGetDescriptorSetLayoutSupportKHR;
+    }
+#endif
+#ifdef VK_KHR_draw_indirect_count
+    if (!strcmp(name, "vkCmdDrawIndirectCountKHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_draw_indirect_count");
+        return hasExt ? (void*)entry_vkCmdDrawIndirectCountKHR : nullptr;
+    }
+    if (!strcmp(name, "vkCmdDrawIndexedIndirectCountKHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_draw_indirect_count");
+        return hasExt ? (void*)entry_vkCmdDrawIndexedIndirectCountKHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_timeline_semaphore
+    if (!strcmp(name, "vkGetSemaphoreCounterValueKHR"))
+    {
+        return (void*)dynCheck_entry_vkGetSemaphoreCounterValueKHR;
+    }
+    if (!strcmp(name, "vkWaitSemaphoresKHR"))
+    {
+        return (void*)dynCheck_entry_vkWaitSemaphoresKHR;
+    }
+    if (!strcmp(name, "vkSignalSemaphoreKHR"))
+    {
+        return (void*)dynCheck_entry_vkSignalSemaphoreKHR;
+    }
+#endif
+#ifdef VK_KHR_fragment_shading_rate
+    if (!strcmp(name, "vkGetPhysicalDeviceFragmentShadingRatesKHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_fragment_shading_rate");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceFragmentShadingRatesKHR : nullptr;
+    }
+    if (!strcmp(name, "vkCmdSetFragmentShadingRateKHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_fragment_shading_rate");
+        return hasExt ? (void*)entry_vkCmdSetFragmentShadingRateKHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_buffer_device_address
+    if (!strcmp(name, "vkGetBufferDeviceAddressKHR"))
+    {
+        return (void*)dynCheck_entry_vkGetBufferDeviceAddressKHR;
+    }
+    if (!strcmp(name, "vkGetBufferOpaqueCaptureAddressKHR"))
+    {
+        return (void*)dynCheck_entry_vkGetBufferOpaqueCaptureAddressKHR;
+    }
+    if (!strcmp(name, "vkGetDeviceMemoryOpaqueCaptureAddressKHR"))
+    {
+        return (void*)dynCheck_entry_vkGetDeviceMemoryOpaqueCaptureAddressKHR;
+    }
+#endif
+#ifdef VK_KHR_deferred_host_operations
+    if (!strcmp(name, "vkCreateDeferredOperationKHR"))
+    {
+        return (void*)dynCheck_entry_vkCreateDeferredOperationKHR;
+    }
+    if (!strcmp(name, "vkDestroyDeferredOperationKHR"))
+    {
+        return (void*)dynCheck_entry_vkDestroyDeferredOperationKHR;
+    }
+    if (!strcmp(name, "vkGetDeferredOperationMaxConcurrencyKHR"))
+    {
+        return (void*)dynCheck_entry_vkGetDeferredOperationMaxConcurrencyKHR;
+    }
+    if (!strcmp(name, "vkGetDeferredOperationResultKHR"))
+    {
+        return (void*)dynCheck_entry_vkGetDeferredOperationResultKHR;
+    }
+    if (!strcmp(name, "vkDeferredOperationJoinKHR"))
+    {
+        return (void*)dynCheck_entry_vkDeferredOperationJoinKHR;
+    }
+#endif
+#ifdef VK_KHR_pipeline_executable_properties
+    if (!strcmp(name, "vkGetPipelineExecutablePropertiesKHR"))
+    {
+        return (void*)dynCheck_entry_vkGetPipelineExecutablePropertiesKHR;
+    }
+    if (!strcmp(name, "vkGetPipelineExecutableStatisticsKHR"))
+    {
+        return (void*)dynCheck_entry_vkGetPipelineExecutableStatisticsKHR;
+    }
+    if (!strcmp(name, "vkGetPipelineExecutableInternalRepresentationsKHR"))
+    {
+        return (void*)dynCheck_entry_vkGetPipelineExecutableInternalRepresentationsKHR;
+    }
+#endif
+#ifdef VK_KHR_copy_commands2
+    if (!strcmp(name, "vkCmdCopyBuffer2KHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_copy_commands2");
+        return hasExt ? (void*)entry_vkCmdCopyBuffer2KHR : nullptr;
+    }
+    if (!strcmp(name, "vkCmdCopyImage2KHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_copy_commands2");
+        return hasExt ? (void*)entry_vkCmdCopyImage2KHR : nullptr;
+    }
+    if (!strcmp(name, "vkCmdCopyBufferToImage2KHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_copy_commands2");
+        return hasExt ? (void*)entry_vkCmdCopyBufferToImage2KHR : nullptr;
+    }
+    if (!strcmp(name, "vkCmdCopyImageToBuffer2KHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_copy_commands2");
+        return hasExt ? (void*)entry_vkCmdCopyImageToBuffer2KHR : nullptr;
+    }
+    if (!strcmp(name, "vkCmdBlitImage2KHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_copy_commands2");
+        return hasExt ? (void*)entry_vkCmdBlitImage2KHR : nullptr;
+    }
+    if (!strcmp(name, "vkCmdResolveImage2KHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_copy_commands2");
+        return hasExt ? (void*)entry_vkCmdResolveImage2KHR : nullptr;
+    }
+#endif
+#ifdef VK_ANDROID_native_buffer
+    if (!strcmp(name, "vkGetSwapchainGrallocUsageANDROID"))
+    {
+        return (void*)dynCheck_entry_vkGetSwapchainGrallocUsageANDROID;
+    }
+    if (!strcmp(name, "vkAcquireImageANDROID"))
+    {
+        return (void*)dynCheck_entry_vkAcquireImageANDROID;
+    }
+    if (!strcmp(name, "vkQueueSignalReleaseImageANDROID"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_ANDROID_native_buffer");
+        return hasExt ? (void*)entry_vkQueueSignalReleaseImageANDROID : nullptr;
+    }
+#endif
+#ifdef VK_EXT_debug_report
+    if (!strcmp(name, "vkCreateDebugReportCallbackEXT"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_debug_report");
+        return hasExt ? (void*)entry_vkCreateDebugReportCallbackEXT : nullptr;
+    }
+    if (!strcmp(name, "vkDestroyDebugReportCallbackEXT"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_debug_report");
+        return hasExt ? (void*)entry_vkDestroyDebugReportCallbackEXT : nullptr;
+    }
+    if (!strcmp(name, "vkDebugReportMessageEXT"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_debug_report");
+        return hasExt ? (void*)entry_vkDebugReportMessageEXT : nullptr;
+    }
+#endif
+#ifdef VK_EXT_debug_marker
+    if (!strcmp(name, "vkDebugMarkerSetObjectTagEXT"))
+    {
+        return (void*)dynCheck_entry_vkDebugMarkerSetObjectTagEXT;
+    }
+    if (!strcmp(name, "vkDebugMarkerSetObjectNameEXT"))
+    {
+        return (void*)dynCheck_entry_vkDebugMarkerSetObjectNameEXT;
+    }
+    if (!strcmp(name, "vkCmdDebugMarkerBeginEXT"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_debug_marker");
+        return hasExt ? (void*)entry_vkCmdDebugMarkerBeginEXT : nullptr;
+    }
+    if (!strcmp(name, "vkCmdDebugMarkerEndEXT"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_debug_marker");
+        return hasExt ? (void*)entry_vkCmdDebugMarkerEndEXT : nullptr;
+    }
+    if (!strcmp(name, "vkCmdDebugMarkerInsertEXT"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_debug_marker");
+        return hasExt ? (void*)entry_vkCmdDebugMarkerInsertEXT : nullptr;
+    }
+#endif
+#ifdef VK_EXT_transform_feedback
+    if (!strcmp(name, "vkCmdBindTransformFeedbackBuffersEXT"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_transform_feedback");
+        return hasExt ? (void*)entry_vkCmdBindTransformFeedbackBuffersEXT : nullptr;
+    }
+    if (!strcmp(name, "vkCmdBeginTransformFeedbackEXT"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_transform_feedback");
+        return hasExt ? (void*)entry_vkCmdBeginTransformFeedbackEXT : nullptr;
+    }
+    if (!strcmp(name, "vkCmdEndTransformFeedbackEXT"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_transform_feedback");
+        return hasExt ? (void*)entry_vkCmdEndTransformFeedbackEXT : nullptr;
+    }
+    if (!strcmp(name, "vkCmdBeginQueryIndexedEXT"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_transform_feedback");
+        return hasExt ? (void*)entry_vkCmdBeginQueryIndexedEXT : nullptr;
+    }
+    if (!strcmp(name, "vkCmdEndQueryIndexedEXT"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_transform_feedback");
+        return hasExt ? (void*)entry_vkCmdEndQueryIndexedEXT : nullptr;
+    }
+    if (!strcmp(name, "vkCmdDrawIndirectByteCountEXT"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_transform_feedback");
+        return hasExt ? (void*)entry_vkCmdDrawIndirectByteCountEXT : nullptr;
+    }
+#endif
+#ifdef VK_NVX_image_view_handle
+    if (!strcmp(name, "vkGetImageViewHandleNVX"))
+    {
+        return (void*)dynCheck_entry_vkGetImageViewHandleNVX;
+    }
+    if (!strcmp(name, "vkGetImageViewAddressNVX"))
+    {
+        return (void*)dynCheck_entry_vkGetImageViewAddressNVX;
+    }
+#endif
+#ifdef VK_AMD_draw_indirect_count
+    if (!strcmp(name, "vkCmdDrawIndirectCountAMD"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_AMD_draw_indirect_count");
+        return hasExt ? (void*)entry_vkCmdDrawIndirectCountAMD : nullptr;
+    }
+    if (!strcmp(name, "vkCmdDrawIndexedIndirectCountAMD"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_AMD_draw_indirect_count");
+        return hasExt ? (void*)entry_vkCmdDrawIndexedIndirectCountAMD : nullptr;
+    }
+#endif
+#ifdef VK_AMD_shader_info
+    if (!strcmp(name, "vkGetShaderInfoAMD"))
+    {
+        return (void*)dynCheck_entry_vkGetShaderInfoAMD;
+    }
+#endif
+#ifdef VK_GGP_stream_descriptor_surface
+    if (!strcmp(name, "vkCreateStreamDescriptorSurfaceGGP"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_GGP_stream_descriptor_surface");
+        return hasExt ? (void*)entry_vkCreateStreamDescriptorSurfaceGGP : nullptr;
+    }
+#endif
+#ifdef VK_NV_external_memory_capabilities
+    if (!strcmp(name, "vkGetPhysicalDeviceExternalImageFormatPropertiesNV"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_NV_external_memory_capabilities");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceExternalImageFormatPropertiesNV : nullptr;
+    }
+#endif
+#ifdef VK_NV_external_memory_win32
+    if (!strcmp(name, "vkGetMemoryWin32HandleNV"))
+    {
+        return (void*)dynCheck_entry_vkGetMemoryWin32HandleNV;
+    }
+#endif
+#ifdef VK_NN_vi_surface
+    if (!strcmp(name, "vkCreateViSurfaceNN"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_NN_vi_surface");
+        return hasExt ? (void*)entry_vkCreateViSurfaceNN : nullptr;
+    }
+#endif
+#ifdef VK_EXT_conditional_rendering
+    if (!strcmp(name, "vkCmdBeginConditionalRenderingEXT"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_conditional_rendering");
+        return hasExt ? (void*)entry_vkCmdBeginConditionalRenderingEXT : nullptr;
+    }
+    if (!strcmp(name, "vkCmdEndConditionalRenderingEXT"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_conditional_rendering");
+        return hasExt ? (void*)entry_vkCmdEndConditionalRenderingEXT : nullptr;
+    }
+#endif
+#ifdef VK_NV_clip_space_w_scaling
+    if (!strcmp(name, "vkCmdSetViewportWScalingNV"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_NV_clip_space_w_scaling");
+        return hasExt ? (void*)entry_vkCmdSetViewportWScalingNV : nullptr;
+    }
+#endif
+#ifdef VK_EXT_direct_mode_display
+    if (!strcmp(name, "vkReleaseDisplayEXT"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_direct_mode_display");
+        return hasExt ? (void*)entry_vkReleaseDisplayEXT : nullptr;
+    }
+#endif
+#ifdef VK_EXT_acquire_xlib_display
+    if (!strcmp(name, "vkAcquireXlibDisplayEXT"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_acquire_xlib_display");
+        return hasExt ? (void*)entry_vkAcquireXlibDisplayEXT : nullptr;
+    }
+    if (!strcmp(name, "vkGetRandROutputDisplayEXT"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_acquire_xlib_display");
+        return hasExt ? (void*)entry_vkGetRandROutputDisplayEXT : nullptr;
+    }
+#endif
+#ifdef VK_EXT_display_surface_counter
+    if (!strcmp(name, "vkGetPhysicalDeviceSurfaceCapabilities2EXT"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_display_surface_counter");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceSurfaceCapabilities2EXT : nullptr;
+    }
+#endif
+#ifdef VK_EXT_display_control
+    if (!strcmp(name, "vkDisplayPowerControlEXT"))
+    {
+        return (void*)dynCheck_entry_vkDisplayPowerControlEXT;
+    }
+    if (!strcmp(name, "vkRegisterDeviceEventEXT"))
+    {
+        return (void*)dynCheck_entry_vkRegisterDeviceEventEXT;
+    }
+    if (!strcmp(name, "vkRegisterDisplayEventEXT"))
+    {
+        return (void*)dynCheck_entry_vkRegisterDisplayEventEXT;
+    }
+    if (!strcmp(name, "vkGetSwapchainCounterEXT"))
+    {
+        return (void*)dynCheck_entry_vkGetSwapchainCounterEXT;
+    }
+#endif
+#ifdef VK_GOOGLE_display_timing
+    if (!strcmp(name, "vkGetRefreshCycleDurationGOOGLE"))
+    {
+        return (void*)dynCheck_entry_vkGetRefreshCycleDurationGOOGLE;
+    }
+    if (!strcmp(name, "vkGetPastPresentationTimingGOOGLE"))
+    {
+        return (void*)dynCheck_entry_vkGetPastPresentationTimingGOOGLE;
+    }
+#endif
+#ifdef VK_EXT_discard_rectangles
+    if (!strcmp(name, "vkCmdSetDiscardRectangleEXT"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_discard_rectangles");
+        return hasExt ? (void*)entry_vkCmdSetDiscardRectangleEXT : nullptr;
+    }
+#endif
+#ifdef VK_EXT_hdr_metadata
+    if (!strcmp(name, "vkSetHdrMetadataEXT"))
+    {
+        return (void*)dynCheck_entry_vkSetHdrMetadataEXT;
+    }
+#endif
+#ifdef VK_MVK_ios_surface
+    if (!strcmp(name, "vkCreateIOSSurfaceMVK"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_MVK_ios_surface");
+        return hasExt ? (void*)entry_vkCreateIOSSurfaceMVK : nullptr;
+    }
+#endif
+#ifdef VK_MVK_macos_surface
+    if (!strcmp(name, "vkCreateMacOSSurfaceMVK"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_MVK_macos_surface");
+        return hasExt ? (void*)entry_vkCreateMacOSSurfaceMVK : nullptr;
+    }
+#endif
+#ifdef VK_MVK_moltenvk
+    if (!strcmp(name, "vkGetMTLDeviceMVK"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_MVK_moltenvk");
+        return hasExt ? (void*)entry_vkGetMTLDeviceMVK : nullptr;
+    }
+    if (!strcmp(name, "vkSetMTLTextureMVK"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_MVK_moltenvk");
+        return hasExt ? (void*)entry_vkSetMTLTextureMVK : nullptr;
+    }
+    if (!strcmp(name, "vkGetMTLTextureMVK"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_MVK_moltenvk");
+        return hasExt ? (void*)entry_vkGetMTLTextureMVK : nullptr;
+    }
+    if (!strcmp(name, "vkGetMTLBufferMVK"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_MVK_moltenvk");
+        return hasExt ? (void*)entry_vkGetMTLBufferMVK : nullptr;
+    }
+    if (!strcmp(name, "vkUseIOSurfaceMVK"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_MVK_moltenvk");
+        return hasExt ? (void*)entry_vkUseIOSurfaceMVK : nullptr;
+    }
+    if (!strcmp(name, "vkGetIOSurfaceMVK"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_MVK_moltenvk");
+        return hasExt ? (void*)entry_vkGetIOSurfaceMVK : nullptr;
+    }
+#endif
+#ifdef VK_EXT_debug_utils
+    if (!strcmp(name, "vkSetDebugUtilsObjectNameEXT"))
+    {
+        return (void*)dynCheck_entry_vkSetDebugUtilsObjectNameEXT;
+    }
+    if (!strcmp(name, "vkSetDebugUtilsObjectTagEXT"))
+    {
+        return (void*)dynCheck_entry_vkSetDebugUtilsObjectTagEXT;
+    }
+    if (!strcmp(name, "vkQueueBeginDebugUtilsLabelEXT"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_debug_utils");
+        return hasExt ? (void*)entry_vkQueueBeginDebugUtilsLabelEXT : nullptr;
+    }
+    if (!strcmp(name, "vkQueueEndDebugUtilsLabelEXT"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_debug_utils");
+        return hasExt ? (void*)entry_vkQueueEndDebugUtilsLabelEXT : nullptr;
+    }
+    if (!strcmp(name, "vkQueueInsertDebugUtilsLabelEXT"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_debug_utils");
+        return hasExt ? (void*)entry_vkQueueInsertDebugUtilsLabelEXT : nullptr;
+    }
+    if (!strcmp(name, "vkCmdBeginDebugUtilsLabelEXT"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_debug_utils");
+        return hasExt ? (void*)entry_vkCmdBeginDebugUtilsLabelEXT : nullptr;
+    }
+    if (!strcmp(name, "vkCmdEndDebugUtilsLabelEXT"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_debug_utils");
+        return hasExt ? (void*)entry_vkCmdEndDebugUtilsLabelEXT : nullptr;
+    }
+    if (!strcmp(name, "vkCmdInsertDebugUtilsLabelEXT"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_debug_utils");
+        return hasExt ? (void*)entry_vkCmdInsertDebugUtilsLabelEXT : nullptr;
+    }
+    if (!strcmp(name, "vkCreateDebugUtilsMessengerEXT"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_debug_utils");
+        return hasExt ? (void*)entry_vkCreateDebugUtilsMessengerEXT : nullptr;
+    }
+    if (!strcmp(name, "vkDestroyDebugUtilsMessengerEXT"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_debug_utils");
+        return hasExt ? (void*)entry_vkDestroyDebugUtilsMessengerEXT : nullptr;
+    }
+    if (!strcmp(name, "vkSubmitDebugUtilsMessageEXT"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_debug_utils");
+        return hasExt ? (void*)entry_vkSubmitDebugUtilsMessageEXT : nullptr;
+    }
+#endif
+#ifdef VK_ANDROID_external_memory_android_hardware_buffer
+    if (!strcmp(name, "vkGetAndroidHardwareBufferPropertiesANDROID"))
+    {
+        return (void*)dynCheck_entry_vkGetAndroidHardwareBufferPropertiesANDROID;
+    }
+    if (!strcmp(name, "vkGetMemoryAndroidHardwareBufferANDROID"))
+    {
+        return (void*)dynCheck_entry_vkGetMemoryAndroidHardwareBufferANDROID;
+    }
+#endif
+#ifdef VK_EXT_sample_locations
+    if (!strcmp(name, "vkCmdSetSampleLocationsEXT"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_sample_locations");
+        return hasExt ? (void*)entry_vkCmdSetSampleLocationsEXT : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceMultisamplePropertiesEXT"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_sample_locations");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceMultisamplePropertiesEXT : nullptr;
+    }
+#endif
+#ifdef VK_EXT_image_drm_format_modifier
+    if (!strcmp(name, "vkGetImageDrmFormatModifierPropertiesEXT"))
+    {
+        return (void*)dynCheck_entry_vkGetImageDrmFormatModifierPropertiesEXT;
+    }
+#endif
+#ifdef VK_EXT_validation_cache
+    if (!strcmp(name, "vkCreateValidationCacheEXT"))
+    {
+        return (void*)dynCheck_entry_vkCreateValidationCacheEXT;
+    }
+    if (!strcmp(name, "vkDestroyValidationCacheEXT"))
+    {
+        return (void*)dynCheck_entry_vkDestroyValidationCacheEXT;
+    }
+    if (!strcmp(name, "vkMergeValidationCachesEXT"))
+    {
+        return (void*)dynCheck_entry_vkMergeValidationCachesEXT;
+    }
+    if (!strcmp(name, "vkGetValidationCacheDataEXT"))
+    {
+        return (void*)dynCheck_entry_vkGetValidationCacheDataEXT;
+    }
+#endif
+#ifdef VK_NV_shading_rate_image
+    if (!strcmp(name, "vkCmdBindShadingRateImageNV"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_NV_shading_rate_image");
+        return hasExt ? (void*)entry_vkCmdBindShadingRateImageNV : nullptr;
+    }
+    if (!strcmp(name, "vkCmdSetViewportShadingRatePaletteNV"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_NV_shading_rate_image");
+        return hasExt ? (void*)entry_vkCmdSetViewportShadingRatePaletteNV : nullptr;
+    }
+    if (!strcmp(name, "vkCmdSetCoarseSampleOrderNV"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_NV_shading_rate_image");
+        return hasExt ? (void*)entry_vkCmdSetCoarseSampleOrderNV : nullptr;
+    }
+#endif
+#ifdef VK_NV_ray_tracing
+    if (!strcmp(name, "vkCreateAccelerationStructureNV"))
+    {
+        return (void*)dynCheck_entry_vkCreateAccelerationStructureNV;
+    }
+    if (!strcmp(name, "vkDestroyAccelerationStructureNV"))
+    {
+        return (void*)dynCheck_entry_vkDestroyAccelerationStructureNV;
+    }
+    if (!strcmp(name, "vkGetAccelerationStructureMemoryRequirementsNV"))
+    {
+        return (void*)dynCheck_entry_vkGetAccelerationStructureMemoryRequirementsNV;
+    }
+    if (!strcmp(name, "vkBindAccelerationStructureMemoryNV"))
+    {
+        return (void*)dynCheck_entry_vkBindAccelerationStructureMemoryNV;
+    }
+    if (!strcmp(name, "vkCmdBuildAccelerationStructureNV"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_NV_ray_tracing");
+        return hasExt ? (void*)entry_vkCmdBuildAccelerationStructureNV : nullptr;
+    }
+    if (!strcmp(name, "vkCmdCopyAccelerationStructureNV"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_NV_ray_tracing");
+        return hasExt ? (void*)entry_vkCmdCopyAccelerationStructureNV : nullptr;
+    }
+    if (!strcmp(name, "vkCmdTraceRaysNV"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_NV_ray_tracing");
+        return hasExt ? (void*)entry_vkCmdTraceRaysNV : nullptr;
+    }
+    if (!strcmp(name, "vkCreateRayTracingPipelinesNV"))
+    {
+        return (void*)dynCheck_entry_vkCreateRayTracingPipelinesNV;
+    }
+    if (!strcmp(name, "vkGetRayTracingShaderGroupHandlesKHR"))
+    {
+        return (void*)dynCheck_entry_vkGetRayTracingShaderGroupHandlesKHR;
+    }
+    if (!strcmp(name, "vkGetRayTracingShaderGroupHandlesNV"))
+    {
+        return (void*)dynCheck_entry_vkGetRayTracingShaderGroupHandlesNV;
+    }
+    if (!strcmp(name, "vkGetAccelerationStructureHandleNV"))
+    {
+        return (void*)dynCheck_entry_vkGetAccelerationStructureHandleNV;
+    }
+    if (!strcmp(name, "vkCmdWriteAccelerationStructuresPropertiesNV"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_NV_ray_tracing");
+        return hasExt ? (void*)entry_vkCmdWriteAccelerationStructuresPropertiesNV : nullptr;
+    }
+    if (!strcmp(name, "vkCompileDeferredNV"))
+    {
+        return (void*)dynCheck_entry_vkCompileDeferredNV;
+    }
+#endif
+#ifdef VK_EXT_external_memory_host
+    if (!strcmp(name, "vkGetMemoryHostPointerPropertiesEXT"))
+    {
+        return (void*)dynCheck_entry_vkGetMemoryHostPointerPropertiesEXT;
+    }
+#endif
+#ifdef VK_AMD_buffer_marker
+    if (!strcmp(name, "vkCmdWriteBufferMarkerAMD"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_AMD_buffer_marker");
+        return hasExt ? (void*)entry_vkCmdWriteBufferMarkerAMD : nullptr;
+    }
+#endif
+#ifdef VK_EXT_calibrated_timestamps
+    if (!strcmp(name, "vkGetPhysicalDeviceCalibrateableTimeDomainsEXT"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_calibrated_timestamps");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT : nullptr;
+    }
+    if (!strcmp(name, "vkGetCalibratedTimestampsEXT"))
+    {
+        return (void*)dynCheck_entry_vkGetCalibratedTimestampsEXT;
+    }
+#endif
+#ifdef VK_NV_mesh_shader
+    if (!strcmp(name, "vkCmdDrawMeshTasksNV"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_NV_mesh_shader");
+        return hasExt ? (void*)entry_vkCmdDrawMeshTasksNV : nullptr;
+    }
+    if (!strcmp(name, "vkCmdDrawMeshTasksIndirectNV"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_NV_mesh_shader");
+        return hasExt ? (void*)entry_vkCmdDrawMeshTasksIndirectNV : nullptr;
+    }
+    if (!strcmp(name, "vkCmdDrawMeshTasksIndirectCountNV"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_NV_mesh_shader");
+        return hasExt ? (void*)entry_vkCmdDrawMeshTasksIndirectCountNV : nullptr;
+    }
+#endif
+#ifdef VK_NV_scissor_exclusive
+    if (!strcmp(name, "vkCmdSetExclusiveScissorNV"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_NV_scissor_exclusive");
+        return hasExt ? (void*)entry_vkCmdSetExclusiveScissorNV : nullptr;
+    }
+#endif
+#ifdef VK_NV_device_diagnostic_checkpoints
+    if (!strcmp(name, "vkCmdSetCheckpointNV"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_NV_device_diagnostic_checkpoints");
+        return hasExt ? (void*)entry_vkCmdSetCheckpointNV : nullptr;
+    }
+    if (!strcmp(name, "vkGetQueueCheckpointDataNV"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_NV_device_diagnostic_checkpoints");
+        return hasExt ? (void*)entry_vkGetQueueCheckpointDataNV : nullptr;
+    }
+#endif
+#ifdef VK_INTEL_performance_query
+    if (!strcmp(name, "vkInitializePerformanceApiINTEL"))
+    {
+        return (void*)dynCheck_entry_vkInitializePerformanceApiINTEL;
+    }
+    if (!strcmp(name, "vkUninitializePerformanceApiINTEL"))
+    {
+        return (void*)dynCheck_entry_vkUninitializePerformanceApiINTEL;
+    }
+    if (!strcmp(name, "vkCmdSetPerformanceMarkerINTEL"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_INTEL_performance_query");
+        return hasExt ? (void*)entry_vkCmdSetPerformanceMarkerINTEL : nullptr;
+    }
+    if (!strcmp(name, "vkCmdSetPerformanceStreamMarkerINTEL"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_INTEL_performance_query");
+        return hasExt ? (void*)entry_vkCmdSetPerformanceStreamMarkerINTEL : nullptr;
+    }
+    if (!strcmp(name, "vkCmdSetPerformanceOverrideINTEL"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_INTEL_performance_query");
+        return hasExt ? (void*)entry_vkCmdSetPerformanceOverrideINTEL : nullptr;
+    }
+    if (!strcmp(name, "vkAcquirePerformanceConfigurationINTEL"))
+    {
+        return (void*)dynCheck_entry_vkAcquirePerformanceConfigurationINTEL;
+    }
+    if (!strcmp(name, "vkReleasePerformanceConfigurationINTEL"))
+    {
+        return (void*)dynCheck_entry_vkReleasePerformanceConfigurationINTEL;
+    }
+    if (!strcmp(name, "vkQueueSetPerformanceConfigurationINTEL"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_INTEL_performance_query");
+        return hasExt ? (void*)entry_vkQueueSetPerformanceConfigurationINTEL : nullptr;
+    }
+    if (!strcmp(name, "vkGetPerformanceParameterINTEL"))
+    {
+        return (void*)dynCheck_entry_vkGetPerformanceParameterINTEL;
+    }
+#endif
+#ifdef VK_AMD_display_native_hdr
+    if (!strcmp(name, "vkSetLocalDimmingAMD"))
+    {
+        return (void*)dynCheck_entry_vkSetLocalDimmingAMD;
+    }
+#endif
+#ifdef VK_FUCHSIA_imagepipe_surface
+    if (!strcmp(name, "vkCreateImagePipeSurfaceFUCHSIA"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_FUCHSIA_imagepipe_surface");
+        return hasExt ? (void*)entry_vkCreateImagePipeSurfaceFUCHSIA : nullptr;
+    }
+#endif
+#ifdef VK_EXT_metal_surface
+    if (!strcmp(name, "vkCreateMetalSurfaceEXT"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_metal_surface");
+        return hasExt ? (void*)entry_vkCreateMetalSurfaceEXT : nullptr;
+    }
+#endif
+#ifdef VK_EXT_buffer_device_address
+    if (!strcmp(name, "vkGetBufferDeviceAddressEXT"))
+    {
+        return (void*)dynCheck_entry_vkGetBufferDeviceAddressEXT;
+    }
+#endif
+#ifdef VK_EXT_tooling_info
+    if (!strcmp(name, "vkGetPhysicalDeviceToolPropertiesEXT"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_tooling_info");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceToolPropertiesEXT : nullptr;
+    }
+#endif
+#ifdef VK_NV_cooperative_matrix
+    if (!strcmp(name, "vkGetPhysicalDeviceCooperativeMatrixPropertiesNV"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_NV_cooperative_matrix");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceCooperativeMatrixPropertiesNV : nullptr;
+    }
+#endif
+#ifdef VK_NV_coverage_reduction_mode
+    if (!strcmp(name, "vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_NV_coverage_reduction_mode");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV : nullptr;
+    }
+#endif
+#ifdef VK_EXT_full_screen_exclusive
+    if (!strcmp(name, "vkGetPhysicalDeviceSurfacePresentModes2EXT"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_full_screen_exclusive");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceSurfacePresentModes2EXT : nullptr;
+    }
+    if (!strcmp(name, "vkAcquireFullScreenExclusiveModeEXT"))
+    {
+        return (void*)dynCheck_entry_vkAcquireFullScreenExclusiveModeEXT;
+    }
+    if (!strcmp(name, "vkReleaseFullScreenExclusiveModeEXT"))
+    {
+        return (void*)dynCheck_entry_vkReleaseFullScreenExclusiveModeEXT;
+    }
+    if (!strcmp(name, "vkGetDeviceGroupSurfacePresentModes2EXT"))
+    {
+        return (void*)dynCheck_entry_vkGetDeviceGroupSurfacePresentModes2EXT;
+    }
+#endif
+#ifdef VK_EXT_headless_surface
+    if (!strcmp(name, "vkCreateHeadlessSurfaceEXT"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_headless_surface");
+        return hasExt ? (void*)entry_vkCreateHeadlessSurfaceEXT : nullptr;
+    }
+#endif
+#ifdef VK_EXT_line_rasterization
+    if (!strcmp(name, "vkCmdSetLineStippleEXT"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_line_rasterization");
+        return hasExt ? (void*)entry_vkCmdSetLineStippleEXT : nullptr;
+    }
+#endif
+#ifdef VK_EXT_host_query_reset
+    if (!strcmp(name, "vkResetQueryPoolEXT"))
+    {
+        return (void*)dynCheck_entry_vkResetQueryPoolEXT;
+    }
+#endif
+#ifdef VK_EXT_extended_dynamic_state
+    if (!strcmp(name, "vkCmdSetCullModeEXT"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_extended_dynamic_state");
+        return hasExt ? (void*)entry_vkCmdSetCullModeEXT : nullptr;
+    }
+    if (!strcmp(name, "vkCmdSetFrontFaceEXT"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_extended_dynamic_state");
+        return hasExt ? (void*)entry_vkCmdSetFrontFaceEXT : nullptr;
+    }
+    if (!strcmp(name, "vkCmdSetPrimitiveTopologyEXT"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_extended_dynamic_state");
+        return hasExt ? (void*)entry_vkCmdSetPrimitiveTopologyEXT : nullptr;
+    }
+    if (!strcmp(name, "vkCmdSetViewportWithCountEXT"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_extended_dynamic_state");
+        return hasExt ? (void*)entry_vkCmdSetViewportWithCountEXT : nullptr;
+    }
+    if (!strcmp(name, "vkCmdSetScissorWithCountEXT"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_extended_dynamic_state");
+        return hasExt ? (void*)entry_vkCmdSetScissorWithCountEXT : nullptr;
+    }
+    if (!strcmp(name, "vkCmdBindVertexBuffers2EXT"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_extended_dynamic_state");
+        return hasExt ? (void*)entry_vkCmdBindVertexBuffers2EXT : nullptr;
+    }
+    if (!strcmp(name, "vkCmdSetDepthTestEnableEXT"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_extended_dynamic_state");
+        return hasExt ? (void*)entry_vkCmdSetDepthTestEnableEXT : nullptr;
+    }
+    if (!strcmp(name, "vkCmdSetDepthWriteEnableEXT"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_extended_dynamic_state");
+        return hasExt ? (void*)entry_vkCmdSetDepthWriteEnableEXT : nullptr;
+    }
+    if (!strcmp(name, "vkCmdSetDepthCompareOpEXT"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_extended_dynamic_state");
+        return hasExt ? (void*)entry_vkCmdSetDepthCompareOpEXT : nullptr;
+    }
+    if (!strcmp(name, "vkCmdSetDepthBoundsTestEnableEXT"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_extended_dynamic_state");
+        return hasExt ? (void*)entry_vkCmdSetDepthBoundsTestEnableEXT : nullptr;
+    }
+    if (!strcmp(name, "vkCmdSetStencilTestEnableEXT"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_extended_dynamic_state");
+        return hasExt ? (void*)entry_vkCmdSetStencilTestEnableEXT : nullptr;
+    }
+    if (!strcmp(name, "vkCmdSetStencilOpEXT"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_extended_dynamic_state");
+        return hasExt ? (void*)entry_vkCmdSetStencilOpEXT : nullptr;
+    }
+#endif
+#ifdef VK_NV_device_generated_commands
+    if (!strcmp(name, "vkGetGeneratedCommandsMemoryRequirementsNV"))
+    {
+        return (void*)dynCheck_entry_vkGetGeneratedCommandsMemoryRequirementsNV;
+    }
+    if (!strcmp(name, "vkCmdPreprocessGeneratedCommandsNV"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_NV_device_generated_commands");
+        return hasExt ? (void*)entry_vkCmdPreprocessGeneratedCommandsNV : nullptr;
+    }
+    if (!strcmp(name, "vkCmdExecuteGeneratedCommandsNV"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_NV_device_generated_commands");
+        return hasExt ? (void*)entry_vkCmdExecuteGeneratedCommandsNV : nullptr;
+    }
+    if (!strcmp(name, "vkCmdBindPipelineShaderGroupNV"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_NV_device_generated_commands");
+        return hasExt ? (void*)entry_vkCmdBindPipelineShaderGroupNV : nullptr;
+    }
+    if (!strcmp(name, "vkCreateIndirectCommandsLayoutNV"))
+    {
+        return (void*)dynCheck_entry_vkCreateIndirectCommandsLayoutNV;
+    }
+    if (!strcmp(name, "vkDestroyIndirectCommandsLayoutNV"))
+    {
+        return (void*)dynCheck_entry_vkDestroyIndirectCommandsLayoutNV;
+    }
+#endif
+#ifdef VK_EXT_private_data
+    if (!strcmp(name, "vkCreatePrivateDataSlotEXT"))
+    {
+        return (void*)dynCheck_entry_vkCreatePrivateDataSlotEXT;
+    }
+    if (!strcmp(name, "vkDestroyPrivateDataSlotEXT"))
+    {
+        return (void*)dynCheck_entry_vkDestroyPrivateDataSlotEXT;
+    }
+    if (!strcmp(name, "vkSetPrivateDataEXT"))
+    {
+        return (void*)dynCheck_entry_vkSetPrivateDataEXT;
+    }
+    if (!strcmp(name, "vkGetPrivateDataEXT"))
+    {
+        return (void*)dynCheck_entry_vkGetPrivateDataEXT;
+    }
+#endif
+#ifdef VK_NV_fragment_shading_rate_enums
+    if (!strcmp(name, "vkCmdSetFragmentShadingRateEnumNV"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_NV_fragment_shading_rate_enums");
+        return hasExt ? (void*)entry_vkCmdSetFragmentShadingRateEnumNV : nullptr;
+    }
+#endif
+#ifdef VK_EXT_directfb_surface
+    if (!strcmp(name, "vkCreateDirectFBSurfaceEXT"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_directfb_surface");
+        return hasExt ? (void*)entry_vkCreateDirectFBSurfaceEXT : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceDirectFBPresentationSupportEXT"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_EXT_directfb_surface");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceDirectFBPresentationSupportEXT : nullptr;
+    }
+#endif
+#ifdef VK_GOOGLE_gfxstream
+    if (!strcmp(name, "vkRegisterImageColorBufferGOOGLE"))
+    {
+        return (void*)dynCheck_entry_vkRegisterImageColorBufferGOOGLE;
+    }
+    if (!strcmp(name, "vkRegisterBufferColorBufferGOOGLE"))
+    {
+        return (void*)dynCheck_entry_vkRegisterBufferColorBufferGOOGLE;
+    }
+    if (!strcmp(name, "vkMapMemoryIntoAddressSpaceGOOGLE"))
+    {
+        return (void*)dynCheck_entry_vkMapMemoryIntoAddressSpaceGOOGLE;
+    }
+    if (!strcmp(name, "vkUpdateDescriptorSetWithTemplateSizedGOOGLE"))
+    {
+        return (void*)dynCheck_entry_vkUpdateDescriptorSetWithTemplateSizedGOOGLE;
+    }
+    if (!strcmp(name, "vkBeginCommandBufferAsyncGOOGLE"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_GOOGLE_gfxstream");
+        return hasExt ? (void*)entry_vkBeginCommandBufferAsyncGOOGLE : nullptr;
+    }
+    if (!strcmp(name, "vkEndCommandBufferAsyncGOOGLE"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_GOOGLE_gfxstream");
+        return hasExt ? (void*)entry_vkEndCommandBufferAsyncGOOGLE : nullptr;
+    }
+    if (!strcmp(name, "vkResetCommandBufferAsyncGOOGLE"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_GOOGLE_gfxstream");
+        return hasExt ? (void*)entry_vkResetCommandBufferAsyncGOOGLE : nullptr;
+    }
+    if (!strcmp(name, "vkCommandBufferHostSyncGOOGLE"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_GOOGLE_gfxstream");
+        return hasExt ? (void*)entry_vkCommandBufferHostSyncGOOGLE : nullptr;
+    }
+    if (!strcmp(name, "vkCreateImageWithRequirementsGOOGLE"))
+    {
+        return (void*)dynCheck_entry_vkCreateImageWithRequirementsGOOGLE;
+    }
+    if (!strcmp(name, "vkCreateBufferWithRequirementsGOOGLE"))
+    {
+        return (void*)dynCheck_entry_vkCreateBufferWithRequirementsGOOGLE;
+    }
+    if (!strcmp(name, "vkGetMemoryHostAddressInfoGOOGLE"))
+    {
+        return (void*)dynCheck_entry_vkGetMemoryHostAddressInfoGOOGLE;
+    }
+    if (!strcmp(name, "vkFreeMemorySyncGOOGLE"))
+    {
+        return (void*)dynCheck_entry_vkFreeMemorySyncGOOGLE;
+    }
+    if (!strcmp(name, "vkQueueHostSyncGOOGLE"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_GOOGLE_gfxstream");
+        return hasExt ? (void*)entry_vkQueueHostSyncGOOGLE : nullptr;
+    }
+    if (!strcmp(name, "vkQueueSubmitAsyncGOOGLE"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_GOOGLE_gfxstream");
+        return hasExt ? (void*)entry_vkQueueSubmitAsyncGOOGLE : nullptr;
+    }
+    if (!strcmp(name, "vkQueueWaitIdleAsyncGOOGLE"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_GOOGLE_gfxstream");
+        return hasExt ? (void*)entry_vkQueueWaitIdleAsyncGOOGLE : nullptr;
+    }
+    if (!strcmp(name, "vkQueueBindSparseAsyncGOOGLE"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_GOOGLE_gfxstream");
+        return hasExt ? (void*)entry_vkQueueBindSparseAsyncGOOGLE : nullptr;
+    }
+    if (!strcmp(name, "vkGetLinearImageLayoutGOOGLE"))
+    {
+        return (void*)dynCheck_entry_vkGetLinearImageLayoutGOOGLE;
+    }
+    if (!strcmp(name, "vkQueueFlushCommandsGOOGLE"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_GOOGLE_gfxstream");
+        return hasExt ? (void*)entry_vkQueueFlushCommandsGOOGLE : nullptr;
+    }
+    if (!strcmp(name, "vkQueueCommitDescriptorSetUpdatesGOOGLE"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_GOOGLE_gfxstream");
+        return hasExt ? (void*)entry_vkQueueCommitDescriptorSetUpdatesGOOGLE : nullptr;
+    }
+    if (!strcmp(name, "vkCollectDescriptorPoolIdsGOOGLE"))
+    {
+        return (void*)dynCheck_entry_vkCollectDescriptorPoolIdsGOOGLE;
+    }
+#endif
+#ifdef VK_KHR_acceleration_structure
+    if (!strcmp(name, "vkCreateAccelerationStructureKHR"))
+    {
+        return (void*)dynCheck_entry_vkCreateAccelerationStructureKHR;
+    }
+    if (!strcmp(name, "vkDestroyAccelerationStructureKHR"))
+    {
+        return (void*)dynCheck_entry_vkDestroyAccelerationStructureKHR;
+    }
+    if (!strcmp(name, "vkCmdBuildAccelerationStructuresKHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_acceleration_structure");
+        return hasExt ? (void*)entry_vkCmdBuildAccelerationStructuresKHR : nullptr;
+    }
+    if (!strcmp(name, "vkCmdBuildAccelerationStructuresIndirectKHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_acceleration_structure");
+        return hasExt ? (void*)entry_vkCmdBuildAccelerationStructuresIndirectKHR : nullptr;
+    }
+    if (!strcmp(name, "vkBuildAccelerationStructuresKHR"))
+    {
+        return (void*)dynCheck_entry_vkBuildAccelerationStructuresKHR;
+    }
+    if (!strcmp(name, "vkCopyAccelerationStructureKHR"))
+    {
+        return (void*)dynCheck_entry_vkCopyAccelerationStructureKHR;
+    }
+    if (!strcmp(name, "vkCopyAccelerationStructureToMemoryKHR"))
+    {
+        return (void*)dynCheck_entry_vkCopyAccelerationStructureToMemoryKHR;
+    }
+    if (!strcmp(name, "vkCopyMemoryToAccelerationStructureKHR"))
+    {
+        return (void*)dynCheck_entry_vkCopyMemoryToAccelerationStructureKHR;
+    }
+    if (!strcmp(name, "vkWriteAccelerationStructuresPropertiesKHR"))
+    {
+        return (void*)dynCheck_entry_vkWriteAccelerationStructuresPropertiesKHR;
+    }
+    if (!strcmp(name, "vkCmdCopyAccelerationStructureKHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_acceleration_structure");
+        return hasExt ? (void*)entry_vkCmdCopyAccelerationStructureKHR : nullptr;
+    }
+    if (!strcmp(name, "vkCmdCopyAccelerationStructureToMemoryKHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_acceleration_structure");
+        return hasExt ? (void*)entry_vkCmdCopyAccelerationStructureToMemoryKHR : nullptr;
+    }
+    if (!strcmp(name, "vkCmdCopyMemoryToAccelerationStructureKHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_acceleration_structure");
+        return hasExt ? (void*)entry_vkCmdCopyMemoryToAccelerationStructureKHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetAccelerationStructureDeviceAddressKHR"))
+    {
+        return (void*)dynCheck_entry_vkGetAccelerationStructureDeviceAddressKHR;
+    }
+    if (!strcmp(name, "vkCmdWriteAccelerationStructuresPropertiesKHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_acceleration_structure");
+        return hasExt ? (void*)entry_vkCmdWriteAccelerationStructuresPropertiesKHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetDeviceAccelerationStructureCompatibilityKHR"))
+    {
+        return (void*)dynCheck_entry_vkGetDeviceAccelerationStructureCompatibilityKHR;
+    }
+    if (!strcmp(name, "vkGetAccelerationStructureBuildSizesKHR"))
+    {
+        return (void*)dynCheck_entry_vkGetAccelerationStructureBuildSizesKHR;
+    }
+#endif
+#ifdef VK_KHR_ray_tracing_pipeline
+    if (!strcmp(name, "vkCmdTraceRaysKHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_ray_tracing_pipeline");
+        return hasExt ? (void*)entry_vkCmdTraceRaysKHR : nullptr;
+    }
+    if (!strcmp(name, "vkCreateRayTracingPipelinesKHR"))
+    {
+        return (void*)dynCheck_entry_vkCreateRayTracingPipelinesKHR;
+    }
+    if (!strcmp(name, "vkGetRayTracingCaptureReplayShaderGroupHandlesKHR"))
+    {
+        return (void*)dynCheck_entry_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR;
+    }
+    if (!strcmp(name, "vkCmdTraceRaysIndirectKHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_ray_tracing_pipeline");
+        return hasExt ? (void*)entry_vkCmdTraceRaysIndirectKHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetRayTracingShaderGroupStackSizeKHR"))
+    {
+        return (void*)dynCheck_entry_vkGetRayTracingShaderGroupStackSizeKHR;
+    }
+    if (!strcmp(name, "vkCmdSetRayTracingPipelineStackSizeKHR"))
+    {
+        bool hasExt = resources->hasInstanceExtension(instance, "VK_KHR_ray_tracing_pipeline");
+        return hasExt ? (void*)entry_vkCmdSetRayTracingPipelineStackSizeKHR : nullptr;
+    }
+#endif
+    return nullptr;
+}
+void* goldfish_vulkan_get_device_proc_address(VkDevice device, const char* name){
+    auto resources = ResourceTracker::get();
+    bool has1_1OrHigher = resources->getApiVersionFromDevice(device) >= VK_API_VERSION_1_1;
+#ifdef VK_VERSION_1_0
+    if (!strcmp(name, "vkCreateInstance"))
+    {
+        return (void*)entry_vkCreateInstance;
+    }
+    if (!strcmp(name, "vkDestroyInstance"))
+    {
+        return (void*)entry_vkDestroyInstance;
+    }
+    if (!strcmp(name, "vkEnumeratePhysicalDevices"))
+    {
+        return (void*)entry_vkEnumeratePhysicalDevices;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceFeatures"))
+    {
+        return (void*)entry_vkGetPhysicalDeviceFeatures;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceFormatProperties"))
+    {
+        return (void*)entry_vkGetPhysicalDeviceFormatProperties;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceImageFormatProperties"))
+    {
+        return (void*)entry_vkGetPhysicalDeviceImageFormatProperties;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceProperties"))
+    {
+        return (void*)entry_vkGetPhysicalDeviceProperties;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceQueueFamilyProperties"))
+    {
+        return (void*)entry_vkGetPhysicalDeviceQueueFamilyProperties;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceMemoryProperties"))
+    {
+        return (void*)entry_vkGetPhysicalDeviceMemoryProperties;
+    }
+    if (!strcmp(name, "vkGetInstanceProcAddr"))
+    {
+        return (void*)entry_vkGetInstanceProcAddr;
+    }
+    if (!strcmp(name, "vkGetDeviceProcAddr"))
+    {
+        return (void*)entry_vkGetDeviceProcAddr;
+    }
+    if (!strcmp(name, "vkCreateDevice"))
+    {
+        return (void*)entry_vkCreateDevice;
+    }
+    if (!strcmp(name, "vkDestroyDevice"))
+    {
+        return (void*)entry_vkDestroyDevice;
+    }
+    if (!strcmp(name, "vkEnumerateInstanceExtensionProperties"))
+    {
+        return (void*)entry_vkEnumerateInstanceExtensionProperties;
+    }
+    if (!strcmp(name, "vkEnumerateDeviceExtensionProperties"))
+    {
+        return (void*)entry_vkEnumerateDeviceExtensionProperties;
+    }
+    if (!strcmp(name, "vkEnumerateInstanceLayerProperties"))
+    {
+        return (void*)entry_vkEnumerateInstanceLayerProperties;
+    }
+    if (!strcmp(name, "vkEnumerateDeviceLayerProperties"))
+    {
+        return (void*)entry_vkEnumerateDeviceLayerProperties;
+    }
+    if (!strcmp(name, "vkGetDeviceQueue"))
+    {
+        return (void*)entry_vkGetDeviceQueue;
+    }
+    if (!strcmp(name, "vkQueueSubmit"))
+    {
+        return (void*)entry_vkQueueSubmit;
+    }
+    if (!strcmp(name, "vkQueueWaitIdle"))
+    {
+        return (void*)entry_vkQueueWaitIdle;
+    }
+    if (!strcmp(name, "vkDeviceWaitIdle"))
+    {
+        return (void*)entry_vkDeviceWaitIdle;
+    }
+    if (!strcmp(name, "vkAllocateMemory"))
+    {
+        return (void*)entry_vkAllocateMemory;
+    }
+    if (!strcmp(name, "vkFreeMemory"))
+    {
+        return (void*)entry_vkFreeMemory;
+    }
+    if (!strcmp(name, "vkMapMemory"))
+    {
+        return (void*)entry_vkMapMemory;
+    }
+    if (!strcmp(name, "vkUnmapMemory"))
+    {
+        return (void*)entry_vkUnmapMemory;
+    }
+    if (!strcmp(name, "vkFlushMappedMemoryRanges"))
+    {
+        return (void*)entry_vkFlushMappedMemoryRanges;
+    }
+    if (!strcmp(name, "vkInvalidateMappedMemoryRanges"))
+    {
+        return (void*)entry_vkInvalidateMappedMemoryRanges;
+    }
+    if (!strcmp(name, "vkGetDeviceMemoryCommitment"))
+    {
+        return (void*)entry_vkGetDeviceMemoryCommitment;
+    }
+    if (!strcmp(name, "vkBindBufferMemory"))
+    {
+        return (void*)entry_vkBindBufferMemory;
+    }
+    if (!strcmp(name, "vkBindImageMemory"))
+    {
+        return (void*)entry_vkBindImageMemory;
+    }
+    if (!strcmp(name, "vkGetBufferMemoryRequirements"))
+    {
+        return (void*)entry_vkGetBufferMemoryRequirements;
+    }
+    if (!strcmp(name, "vkGetImageMemoryRequirements"))
+    {
+        return (void*)entry_vkGetImageMemoryRequirements;
+    }
+    if (!strcmp(name, "vkGetImageSparseMemoryRequirements"))
+    {
+        return (void*)entry_vkGetImageSparseMemoryRequirements;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceSparseImageFormatProperties"))
+    {
+        return (void*)entry_vkGetPhysicalDeviceSparseImageFormatProperties;
+    }
+    if (!strcmp(name, "vkQueueBindSparse"))
+    {
+        return (void*)entry_vkQueueBindSparse;
+    }
+    if (!strcmp(name, "vkCreateFence"))
+    {
+        return (void*)entry_vkCreateFence;
+    }
+    if (!strcmp(name, "vkDestroyFence"))
+    {
+        return (void*)entry_vkDestroyFence;
+    }
+    if (!strcmp(name, "vkResetFences"))
+    {
+        return (void*)entry_vkResetFences;
+    }
+    if (!strcmp(name, "vkGetFenceStatus"))
+    {
+        return (void*)entry_vkGetFenceStatus;
+    }
+    if (!strcmp(name, "vkWaitForFences"))
+    {
+        return (void*)entry_vkWaitForFences;
+    }
+    if (!strcmp(name, "vkCreateSemaphore"))
+    {
+        return (void*)entry_vkCreateSemaphore;
+    }
+    if (!strcmp(name, "vkDestroySemaphore"))
+    {
+        return (void*)entry_vkDestroySemaphore;
+    }
+    if (!strcmp(name, "vkCreateEvent"))
+    {
+        return (void*)entry_vkCreateEvent;
+    }
+    if (!strcmp(name, "vkDestroyEvent"))
+    {
+        return (void*)entry_vkDestroyEvent;
+    }
+    if (!strcmp(name, "vkGetEventStatus"))
+    {
+        return (void*)entry_vkGetEventStatus;
+    }
+    if (!strcmp(name, "vkSetEvent"))
+    {
+        return (void*)entry_vkSetEvent;
+    }
+    if (!strcmp(name, "vkResetEvent"))
+    {
+        return (void*)entry_vkResetEvent;
+    }
+    if (!strcmp(name, "vkCreateQueryPool"))
+    {
+        return (void*)entry_vkCreateQueryPool;
+    }
+    if (!strcmp(name, "vkDestroyQueryPool"))
+    {
+        return (void*)entry_vkDestroyQueryPool;
+    }
+    if (!strcmp(name, "vkGetQueryPoolResults"))
+    {
+        return (void*)entry_vkGetQueryPoolResults;
+    }
+    if (!strcmp(name, "vkCreateBuffer"))
+    {
+        return (void*)entry_vkCreateBuffer;
+    }
+    if (!strcmp(name, "vkDestroyBuffer"))
+    {
+        return (void*)entry_vkDestroyBuffer;
+    }
+    if (!strcmp(name, "vkCreateBufferView"))
+    {
+        return (void*)entry_vkCreateBufferView;
+    }
+    if (!strcmp(name, "vkDestroyBufferView"))
+    {
+        return (void*)entry_vkDestroyBufferView;
+    }
+    if (!strcmp(name, "vkCreateImage"))
+    {
+        return (void*)entry_vkCreateImage;
+    }
+    if (!strcmp(name, "vkDestroyImage"))
+    {
+        return (void*)entry_vkDestroyImage;
+    }
+    if (!strcmp(name, "vkGetImageSubresourceLayout"))
+    {
+        return (void*)entry_vkGetImageSubresourceLayout;
+    }
+    if (!strcmp(name, "vkCreateImageView"))
+    {
+        return (void*)entry_vkCreateImageView;
+    }
+    if (!strcmp(name, "vkDestroyImageView"))
+    {
+        return (void*)entry_vkDestroyImageView;
+    }
+    if (!strcmp(name, "vkCreateShaderModule"))
+    {
+        return (void*)entry_vkCreateShaderModule;
+    }
+    if (!strcmp(name, "vkDestroyShaderModule"))
+    {
+        return (void*)entry_vkDestroyShaderModule;
+    }
+    if (!strcmp(name, "vkCreatePipelineCache"))
+    {
+        return (void*)entry_vkCreatePipelineCache;
+    }
+    if (!strcmp(name, "vkDestroyPipelineCache"))
+    {
+        return (void*)entry_vkDestroyPipelineCache;
+    }
+    if (!strcmp(name, "vkGetPipelineCacheData"))
+    {
+        return (void*)entry_vkGetPipelineCacheData;
+    }
+    if (!strcmp(name, "vkMergePipelineCaches"))
+    {
+        return (void*)entry_vkMergePipelineCaches;
+    }
+    if (!strcmp(name, "vkCreateGraphicsPipelines"))
+    {
+        return (void*)entry_vkCreateGraphicsPipelines;
+    }
+    if (!strcmp(name, "vkCreateComputePipelines"))
+    {
+        return (void*)entry_vkCreateComputePipelines;
+    }
+    if (!strcmp(name, "vkDestroyPipeline"))
+    {
+        return (void*)entry_vkDestroyPipeline;
+    }
+    if (!strcmp(name, "vkCreatePipelineLayout"))
+    {
+        return (void*)entry_vkCreatePipelineLayout;
+    }
+    if (!strcmp(name, "vkDestroyPipelineLayout"))
+    {
+        return (void*)entry_vkDestroyPipelineLayout;
+    }
+    if (!strcmp(name, "vkCreateSampler"))
+    {
+        return (void*)entry_vkCreateSampler;
+    }
+    if (!strcmp(name, "vkDestroySampler"))
+    {
+        return (void*)entry_vkDestroySampler;
+    }
+    if (!strcmp(name, "vkCreateDescriptorSetLayout"))
+    {
+        return (void*)entry_vkCreateDescriptorSetLayout;
+    }
+    if (!strcmp(name, "vkDestroyDescriptorSetLayout"))
+    {
+        return (void*)entry_vkDestroyDescriptorSetLayout;
+    }
+    if (!strcmp(name, "vkCreateDescriptorPool"))
+    {
+        return (void*)entry_vkCreateDescriptorPool;
+    }
+    if (!strcmp(name, "vkDestroyDescriptorPool"))
+    {
+        return (void*)entry_vkDestroyDescriptorPool;
+    }
+    if (!strcmp(name, "vkResetDescriptorPool"))
+    {
+        return (void*)entry_vkResetDescriptorPool;
+    }
+    if (!strcmp(name, "vkAllocateDescriptorSets"))
+    {
+        return (void*)entry_vkAllocateDescriptorSets;
+    }
+    if (!strcmp(name, "vkFreeDescriptorSets"))
+    {
+        return (void*)entry_vkFreeDescriptorSets;
+    }
+    if (!strcmp(name, "vkUpdateDescriptorSets"))
+    {
+        return (void*)entry_vkUpdateDescriptorSets;
+    }
+    if (!strcmp(name, "vkCreateFramebuffer"))
+    {
+        return (void*)entry_vkCreateFramebuffer;
+    }
+    if (!strcmp(name, "vkDestroyFramebuffer"))
+    {
+        return (void*)entry_vkDestroyFramebuffer;
+    }
+    if (!strcmp(name, "vkCreateRenderPass"))
+    {
+        return (void*)entry_vkCreateRenderPass;
+    }
+    if (!strcmp(name, "vkDestroyRenderPass"))
+    {
+        return (void*)entry_vkDestroyRenderPass;
+    }
+    if (!strcmp(name, "vkGetRenderAreaGranularity"))
+    {
+        return (void*)entry_vkGetRenderAreaGranularity;
+    }
+    if (!strcmp(name, "vkCreateCommandPool"))
+    {
+        return (void*)entry_vkCreateCommandPool;
+    }
+    if (!strcmp(name, "vkDestroyCommandPool"))
+    {
+        return (void*)entry_vkDestroyCommandPool;
+    }
+    if (!strcmp(name, "vkResetCommandPool"))
+    {
+        return (void*)entry_vkResetCommandPool;
+    }
+    if (!strcmp(name, "vkAllocateCommandBuffers"))
+    {
+        return (void*)entry_vkAllocateCommandBuffers;
+    }
+    if (!strcmp(name, "vkFreeCommandBuffers"))
+    {
+        return (void*)entry_vkFreeCommandBuffers;
+    }
+    if (!strcmp(name, "vkBeginCommandBuffer"))
+    {
+        return (void*)entry_vkBeginCommandBuffer;
+    }
+    if (!strcmp(name, "vkEndCommandBuffer"))
+    {
+        return (void*)entry_vkEndCommandBuffer;
+    }
+    if (!strcmp(name, "vkResetCommandBuffer"))
+    {
+        return (void*)entry_vkResetCommandBuffer;
+    }
+    if (!strcmp(name, "vkCmdBindPipeline"))
+    {
+        return (void*)entry_vkCmdBindPipeline;
+    }
+    if (!strcmp(name, "vkCmdSetViewport"))
+    {
+        return (void*)entry_vkCmdSetViewport;
+    }
+    if (!strcmp(name, "vkCmdSetScissor"))
+    {
+        return (void*)entry_vkCmdSetScissor;
+    }
+    if (!strcmp(name, "vkCmdSetLineWidth"))
+    {
+        return (void*)entry_vkCmdSetLineWidth;
+    }
+    if (!strcmp(name, "vkCmdSetDepthBias"))
+    {
+        return (void*)entry_vkCmdSetDepthBias;
+    }
+    if (!strcmp(name, "vkCmdSetBlendConstants"))
+    {
+        return (void*)entry_vkCmdSetBlendConstants;
+    }
+    if (!strcmp(name, "vkCmdSetDepthBounds"))
+    {
+        return (void*)entry_vkCmdSetDepthBounds;
+    }
+    if (!strcmp(name, "vkCmdSetStencilCompareMask"))
+    {
+        return (void*)entry_vkCmdSetStencilCompareMask;
+    }
+    if (!strcmp(name, "vkCmdSetStencilWriteMask"))
+    {
+        return (void*)entry_vkCmdSetStencilWriteMask;
+    }
+    if (!strcmp(name, "vkCmdSetStencilReference"))
+    {
+        return (void*)entry_vkCmdSetStencilReference;
+    }
+    if (!strcmp(name, "vkCmdBindDescriptorSets"))
+    {
+        return (void*)entry_vkCmdBindDescriptorSets;
+    }
+    if (!strcmp(name, "vkCmdBindIndexBuffer"))
+    {
+        return (void*)entry_vkCmdBindIndexBuffer;
+    }
+    if (!strcmp(name, "vkCmdBindVertexBuffers"))
+    {
+        return (void*)entry_vkCmdBindVertexBuffers;
+    }
+    if (!strcmp(name, "vkCmdDraw"))
+    {
+        return (void*)entry_vkCmdDraw;
+    }
+    if (!strcmp(name, "vkCmdDrawIndexed"))
+    {
+        return (void*)entry_vkCmdDrawIndexed;
+    }
+    if (!strcmp(name, "vkCmdDrawIndirect"))
+    {
+        return (void*)entry_vkCmdDrawIndirect;
+    }
+    if (!strcmp(name, "vkCmdDrawIndexedIndirect"))
+    {
+        return (void*)entry_vkCmdDrawIndexedIndirect;
+    }
+    if (!strcmp(name, "vkCmdDispatch"))
+    {
+        return (void*)entry_vkCmdDispatch;
+    }
+    if (!strcmp(name, "vkCmdDispatchIndirect"))
+    {
+        return (void*)entry_vkCmdDispatchIndirect;
+    }
+    if (!strcmp(name, "vkCmdCopyBuffer"))
+    {
+        return (void*)entry_vkCmdCopyBuffer;
+    }
+    if (!strcmp(name, "vkCmdCopyImage"))
+    {
+        return (void*)entry_vkCmdCopyImage;
+    }
+    if (!strcmp(name, "vkCmdBlitImage"))
+    {
+        return (void*)entry_vkCmdBlitImage;
+    }
+    if (!strcmp(name, "vkCmdCopyBufferToImage"))
+    {
+        return (void*)entry_vkCmdCopyBufferToImage;
+    }
+    if (!strcmp(name, "vkCmdCopyImageToBuffer"))
+    {
+        return (void*)entry_vkCmdCopyImageToBuffer;
+    }
+    if (!strcmp(name, "vkCmdUpdateBuffer"))
+    {
+        return (void*)entry_vkCmdUpdateBuffer;
+    }
+    if (!strcmp(name, "vkCmdFillBuffer"))
+    {
+        return (void*)entry_vkCmdFillBuffer;
+    }
+    if (!strcmp(name, "vkCmdClearColorImage"))
+    {
+        return (void*)entry_vkCmdClearColorImage;
+    }
+    if (!strcmp(name, "vkCmdClearDepthStencilImage"))
+    {
+        return (void*)entry_vkCmdClearDepthStencilImage;
+    }
+    if (!strcmp(name, "vkCmdClearAttachments"))
+    {
+        return (void*)entry_vkCmdClearAttachments;
+    }
+    if (!strcmp(name, "vkCmdResolveImage"))
+    {
+        return (void*)entry_vkCmdResolveImage;
+    }
+    if (!strcmp(name, "vkCmdSetEvent"))
+    {
+        return (void*)entry_vkCmdSetEvent;
+    }
+    if (!strcmp(name, "vkCmdResetEvent"))
+    {
+        return (void*)entry_vkCmdResetEvent;
+    }
+    if (!strcmp(name, "vkCmdWaitEvents"))
+    {
+        return (void*)entry_vkCmdWaitEvents;
+    }
+    if (!strcmp(name, "vkCmdPipelineBarrier"))
+    {
+        return (void*)entry_vkCmdPipelineBarrier;
+    }
+    if (!strcmp(name, "vkCmdBeginQuery"))
+    {
+        return (void*)entry_vkCmdBeginQuery;
+    }
+    if (!strcmp(name, "vkCmdEndQuery"))
+    {
+        return (void*)entry_vkCmdEndQuery;
+    }
+    if (!strcmp(name, "vkCmdResetQueryPool"))
+    {
+        return (void*)entry_vkCmdResetQueryPool;
+    }
+    if (!strcmp(name, "vkCmdWriteTimestamp"))
+    {
+        return (void*)entry_vkCmdWriteTimestamp;
+    }
+    if (!strcmp(name, "vkCmdCopyQueryPoolResults"))
+    {
+        return (void*)entry_vkCmdCopyQueryPoolResults;
+    }
+    if (!strcmp(name, "vkCmdPushConstants"))
+    {
+        return (void*)entry_vkCmdPushConstants;
+    }
+    if (!strcmp(name, "vkCmdBeginRenderPass"))
+    {
+        return (void*)entry_vkCmdBeginRenderPass;
+    }
+    if (!strcmp(name, "vkCmdNextSubpass"))
+    {
+        return (void*)entry_vkCmdNextSubpass;
+    }
+    if (!strcmp(name, "vkCmdEndRenderPass"))
+    {
+        return (void*)entry_vkCmdEndRenderPass;
+    }
+    if (!strcmp(name, "vkCmdExecuteCommands"))
+    {
+        return (void*)entry_vkCmdExecuteCommands;
+    }
+#endif
+#ifdef VK_VERSION_1_1
+    if (!strcmp(name, "vkEnumerateInstanceVersion"))
+    {
+        return has1_1OrHigher ? (void*)entry_vkEnumerateInstanceVersion : nullptr;
+    }
+    if (!strcmp(name, "vkBindBufferMemory2"))
+    {
+        return has1_1OrHigher ? (void*)entry_vkBindBufferMemory2 : nullptr;
+    }
+    if (!strcmp(name, "vkBindImageMemory2"))
+    {
+        return has1_1OrHigher ? (void*)entry_vkBindImageMemory2 : nullptr;
+    }
+    if (!strcmp(name, "vkGetDeviceGroupPeerMemoryFeatures"))
+    {
+        return has1_1OrHigher ? (void*)entry_vkGetDeviceGroupPeerMemoryFeatures : nullptr;
+    }
+    if (!strcmp(name, "vkCmdSetDeviceMask"))
+    {
+        return has1_1OrHigher ? (void*)entry_vkCmdSetDeviceMask : nullptr;
+    }
+    if (!strcmp(name, "vkCmdDispatchBase"))
+    {
+        return has1_1OrHigher ? (void*)entry_vkCmdDispatchBase : nullptr;
+    }
+    if (!strcmp(name, "vkEnumeratePhysicalDeviceGroups"))
+    {
+        return nullptr;
+    }
+    if (!strcmp(name, "vkGetImageMemoryRequirements2"))
+    {
+        return has1_1OrHigher ? (void*)entry_vkGetImageMemoryRequirements2 : nullptr;
+    }
+    if (!strcmp(name, "vkGetBufferMemoryRequirements2"))
+    {
+        return has1_1OrHigher ? (void*)entry_vkGetBufferMemoryRequirements2 : nullptr;
+    }
+    if (!strcmp(name, "vkGetImageSparseMemoryRequirements2"))
+    {
+        return has1_1OrHigher ? (void*)entry_vkGetImageSparseMemoryRequirements2 : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceFeatures2"))
+    {
+        return has1_1OrHigher ? (void*)entry_vkGetPhysicalDeviceFeatures2 : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceProperties2"))
+    {
+        return has1_1OrHigher ? (void*)entry_vkGetPhysicalDeviceProperties2 : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceFormatProperties2"))
+    {
+        return has1_1OrHigher ? (void*)entry_vkGetPhysicalDeviceFormatProperties2 : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceImageFormatProperties2"))
+    {
+        return has1_1OrHigher ? (void*)entry_vkGetPhysicalDeviceImageFormatProperties2 : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceQueueFamilyProperties2"))
+    {
+        return has1_1OrHigher ? (void*)entry_vkGetPhysicalDeviceQueueFamilyProperties2 : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceMemoryProperties2"))
+    {
+        return has1_1OrHigher ? (void*)entry_vkGetPhysicalDeviceMemoryProperties2 : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceSparseImageFormatProperties2"))
+    {
+        return has1_1OrHigher ? (void*)entry_vkGetPhysicalDeviceSparseImageFormatProperties2 : nullptr;
+    }
+    if (!strcmp(name, "vkTrimCommandPool"))
+    {
+        return has1_1OrHigher ? (void*)entry_vkTrimCommandPool : nullptr;
+    }
+    if (!strcmp(name, "vkGetDeviceQueue2"))
+    {
+        return has1_1OrHigher ? (void*)entry_vkGetDeviceQueue2 : nullptr;
+    }
+    if (!strcmp(name, "vkCreateSamplerYcbcrConversion"))
+    {
+        return has1_1OrHigher ? (void*)entry_vkCreateSamplerYcbcrConversion : nullptr;
+    }
+    if (!strcmp(name, "vkDestroySamplerYcbcrConversion"))
+    {
+        return has1_1OrHigher ? (void*)entry_vkDestroySamplerYcbcrConversion : nullptr;
+    }
+    if (!strcmp(name, "vkCreateDescriptorUpdateTemplate"))
+    {
+        return has1_1OrHigher ? (void*)entry_vkCreateDescriptorUpdateTemplate : nullptr;
+    }
+    if (!strcmp(name, "vkDestroyDescriptorUpdateTemplate"))
+    {
+        return has1_1OrHigher ? (void*)entry_vkDestroyDescriptorUpdateTemplate : nullptr;
+    }
+    if (!strcmp(name, "vkUpdateDescriptorSetWithTemplate"))
+    {
+        return has1_1OrHigher ? (void*)entry_vkUpdateDescriptorSetWithTemplate : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceExternalBufferProperties"))
+    {
+        return has1_1OrHigher ? (void*)entry_vkGetPhysicalDeviceExternalBufferProperties : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceExternalFenceProperties"))
+    {
+        return has1_1OrHigher ? (void*)entry_vkGetPhysicalDeviceExternalFenceProperties : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceExternalSemaphoreProperties"))
+    {
+        return has1_1OrHigher ? (void*)entry_vkGetPhysicalDeviceExternalSemaphoreProperties : nullptr;
+    }
+    if (!strcmp(name, "vkGetDescriptorSetLayoutSupport"))
+    {
+        return has1_1OrHigher ? (void*)entry_vkGetDescriptorSetLayoutSupport : nullptr;
+    }
+#endif
+#ifdef VK_VERSION_1_2
+    if (!strcmp(name, "vkCmdDrawIndirectCount"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_VERSION_1_2");
+        return hasExt ? (void*)entry_vkCmdDrawIndirectCount : nullptr;
+    }
+    if (!strcmp(name, "vkCmdDrawIndexedIndirectCount"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_VERSION_1_2");
+        return hasExt ? (void*)entry_vkCmdDrawIndexedIndirectCount : nullptr;
+    }
+    if (!strcmp(name, "vkCreateRenderPass2"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_VERSION_1_2");
+        return hasExt ? (void*)entry_vkCreateRenderPass2 : nullptr;
+    }
+    if (!strcmp(name, "vkCmdBeginRenderPass2"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_VERSION_1_2");
+        return hasExt ? (void*)entry_vkCmdBeginRenderPass2 : nullptr;
+    }
+    if (!strcmp(name, "vkCmdNextSubpass2"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_VERSION_1_2");
+        return hasExt ? (void*)entry_vkCmdNextSubpass2 : nullptr;
+    }
+    if (!strcmp(name, "vkCmdEndRenderPass2"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_VERSION_1_2");
+        return hasExt ? (void*)entry_vkCmdEndRenderPass2 : nullptr;
+    }
+    if (!strcmp(name, "vkResetQueryPool"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_VERSION_1_2");
+        return hasExt ? (void*)entry_vkResetQueryPool : nullptr;
+    }
+    if (!strcmp(name, "vkGetSemaphoreCounterValue"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_VERSION_1_2");
+        return hasExt ? (void*)entry_vkGetSemaphoreCounterValue : nullptr;
+    }
+    if (!strcmp(name, "vkWaitSemaphores"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_VERSION_1_2");
+        return hasExt ? (void*)entry_vkWaitSemaphores : nullptr;
+    }
+    if (!strcmp(name, "vkSignalSemaphore"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_VERSION_1_2");
+        return hasExt ? (void*)entry_vkSignalSemaphore : nullptr;
+    }
+    if (!strcmp(name, "vkGetBufferDeviceAddress"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_VERSION_1_2");
+        return hasExt ? (void*)entry_vkGetBufferDeviceAddress : nullptr;
+    }
+    if (!strcmp(name, "vkGetBufferOpaqueCaptureAddress"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_VERSION_1_2");
+        return hasExt ? (void*)entry_vkGetBufferOpaqueCaptureAddress : nullptr;
+    }
+    if (!strcmp(name, "vkGetDeviceMemoryOpaqueCaptureAddress"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_VERSION_1_2");
+        return hasExt ? (void*)entry_vkGetDeviceMemoryOpaqueCaptureAddress : nullptr;
+    }
+#endif
+#ifdef VK_KHR_surface
+    if (!strcmp(name, "vkDestroySurfaceKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_surface");
+        return hasExt ? (void*)entry_vkDestroySurfaceKHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceSurfaceSupportKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_surface");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceSurfaceSupportKHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceSurfaceCapabilitiesKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_surface");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceSurfaceCapabilitiesKHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceSurfaceFormatsKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_surface");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceSurfaceFormatsKHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceSurfacePresentModesKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_surface");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceSurfacePresentModesKHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_swapchain
+    if (!strcmp(name, "vkCreateSwapchainKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_swapchain");
+        return hasExt ? (void*)entry_vkCreateSwapchainKHR : nullptr;
+    }
+    if (!strcmp(name, "vkDestroySwapchainKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_swapchain");
+        return hasExt ? (void*)entry_vkDestroySwapchainKHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetSwapchainImagesKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_swapchain");
+        return hasExt ? (void*)entry_vkGetSwapchainImagesKHR : nullptr;
+    }
+    if (!strcmp(name, "vkAcquireNextImageKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_swapchain");
+        return hasExt ? (void*)entry_vkAcquireNextImageKHR : nullptr;
+    }
+    if (!strcmp(name, "vkQueuePresentKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_swapchain");
+        return hasExt ? (void*)entry_vkQueuePresentKHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetDeviceGroupPresentCapabilitiesKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_swapchain");
+        return hasExt ? (void*)entry_vkGetDeviceGroupPresentCapabilitiesKHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetDeviceGroupSurfacePresentModesKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_swapchain");
+        return hasExt ? (void*)entry_vkGetDeviceGroupSurfacePresentModesKHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDevicePresentRectanglesKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_swapchain");
+        return hasExt ? (void*)entry_vkGetPhysicalDevicePresentRectanglesKHR : nullptr;
+    }
+    if (!strcmp(name, "vkAcquireNextImage2KHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_swapchain");
+        return hasExt ? (void*)entry_vkAcquireNextImage2KHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_display
+    if (!strcmp(name, "vkGetPhysicalDeviceDisplayPropertiesKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_display");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceDisplayPropertiesKHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceDisplayPlanePropertiesKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_display");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceDisplayPlanePropertiesKHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetDisplayPlaneSupportedDisplaysKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_display");
+        return hasExt ? (void*)entry_vkGetDisplayPlaneSupportedDisplaysKHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetDisplayModePropertiesKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_display");
+        return hasExt ? (void*)entry_vkGetDisplayModePropertiesKHR : nullptr;
+    }
+    if (!strcmp(name, "vkCreateDisplayModeKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_display");
+        return hasExt ? (void*)entry_vkCreateDisplayModeKHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetDisplayPlaneCapabilitiesKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_display");
+        return hasExt ? (void*)entry_vkGetDisplayPlaneCapabilitiesKHR : nullptr;
+    }
+    if (!strcmp(name, "vkCreateDisplayPlaneSurfaceKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_display");
+        return hasExt ? (void*)entry_vkCreateDisplayPlaneSurfaceKHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_display_swapchain
+    if (!strcmp(name, "vkCreateSharedSwapchainsKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_display_swapchain");
+        return hasExt ? (void*)entry_vkCreateSharedSwapchainsKHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_xlib_surface
+    if (!strcmp(name, "vkCreateXlibSurfaceKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_xlib_surface");
+        return hasExt ? (void*)entry_vkCreateXlibSurfaceKHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceXlibPresentationSupportKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_xlib_surface");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceXlibPresentationSupportKHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_xcb_surface
+    if (!strcmp(name, "vkCreateXcbSurfaceKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_xcb_surface");
+        return hasExt ? (void*)entry_vkCreateXcbSurfaceKHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceXcbPresentationSupportKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_xcb_surface");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceXcbPresentationSupportKHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_wayland_surface
+    if (!strcmp(name, "vkCreateWaylandSurfaceKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_wayland_surface");
+        return hasExt ? (void*)entry_vkCreateWaylandSurfaceKHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceWaylandPresentationSupportKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_wayland_surface");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceWaylandPresentationSupportKHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_android_surface
+    if (!strcmp(name, "vkCreateAndroidSurfaceKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_android_surface");
+        return hasExt ? (void*)entry_vkCreateAndroidSurfaceKHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_win32_surface
+    if (!strcmp(name, "vkCreateWin32SurfaceKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_win32_surface");
+        return hasExt ? (void*)entry_vkCreateWin32SurfaceKHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceWin32PresentationSupportKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_win32_surface");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceWin32PresentationSupportKHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_get_physical_device_properties2
+    if (!strcmp(name, "vkGetPhysicalDeviceFeatures2KHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_get_physical_device_properties2");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceFeatures2KHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceProperties2KHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_get_physical_device_properties2");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceProperties2KHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceFormatProperties2KHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_get_physical_device_properties2");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceFormatProperties2KHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceImageFormatProperties2KHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_get_physical_device_properties2");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceImageFormatProperties2KHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceQueueFamilyProperties2KHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_get_physical_device_properties2");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceQueueFamilyProperties2KHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceMemoryProperties2KHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_get_physical_device_properties2");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceMemoryProperties2KHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceSparseImageFormatProperties2KHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_get_physical_device_properties2");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceSparseImageFormatProperties2KHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_device_group
+    if (!strcmp(name, "vkGetDeviceGroupPeerMemoryFeaturesKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_device_group");
+        return hasExt ? (void*)entry_vkGetDeviceGroupPeerMemoryFeaturesKHR : nullptr;
+    }
+    if (!strcmp(name, "vkCmdSetDeviceMaskKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_device_group");
+        return hasExt ? (void*)entry_vkCmdSetDeviceMaskKHR : nullptr;
+    }
+    if (!strcmp(name, "vkCmdDispatchBaseKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_device_group");
+        return hasExt ? (void*)entry_vkCmdDispatchBaseKHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_maintenance1
+    if (!strcmp(name, "vkTrimCommandPoolKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_maintenance1");
+        return hasExt ? (void*)entry_vkTrimCommandPoolKHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_device_group_creation
+    if (!strcmp(name, "vkEnumeratePhysicalDeviceGroupsKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_device_group_creation");
+        return hasExt ? (void*)entry_vkEnumeratePhysicalDeviceGroupsKHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_external_memory_capabilities
+    if (!strcmp(name, "vkGetPhysicalDeviceExternalBufferPropertiesKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_external_memory_capabilities");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceExternalBufferPropertiesKHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_external_memory_win32
+    if (!strcmp(name, "vkGetMemoryWin32HandleKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_external_memory_win32");
+        return hasExt ? (void*)entry_vkGetMemoryWin32HandleKHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetMemoryWin32HandlePropertiesKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_external_memory_win32");
+        return hasExt ? (void*)entry_vkGetMemoryWin32HandlePropertiesKHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_external_memory_fd
+    if (!strcmp(name, "vkGetMemoryFdKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_external_memory_fd");
+        return hasExt ? (void*)entry_vkGetMemoryFdKHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetMemoryFdPropertiesKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_external_memory_fd");
+        return hasExt ? (void*)entry_vkGetMemoryFdPropertiesKHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_external_semaphore_capabilities
+    if (!strcmp(name, "vkGetPhysicalDeviceExternalSemaphorePropertiesKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_external_semaphore_capabilities");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_external_semaphore_win32
+    if (!strcmp(name, "vkImportSemaphoreWin32HandleKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_external_semaphore_win32");
+        return hasExt ? (void*)entry_vkImportSemaphoreWin32HandleKHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetSemaphoreWin32HandleKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_external_semaphore_win32");
+        return hasExt ? (void*)entry_vkGetSemaphoreWin32HandleKHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_external_semaphore_fd
+    if (!strcmp(name, "vkImportSemaphoreFdKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_external_semaphore_fd");
+        return hasExt ? (void*)entry_vkImportSemaphoreFdKHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetSemaphoreFdKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_external_semaphore_fd");
+        return hasExt ? (void*)entry_vkGetSemaphoreFdKHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_push_descriptor
+    if (!strcmp(name, "vkCmdPushDescriptorSetKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_push_descriptor");
+        return hasExt ? (void*)entry_vkCmdPushDescriptorSetKHR : nullptr;
+    }
+    if (!strcmp(name, "vkCmdPushDescriptorSetWithTemplateKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_push_descriptor");
+        return hasExt ? (void*)entry_vkCmdPushDescriptorSetWithTemplateKHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_descriptor_update_template
+    if (!strcmp(name, "vkCreateDescriptorUpdateTemplateKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_descriptor_update_template");
+        return hasExt ? (void*)entry_vkCreateDescriptorUpdateTemplateKHR : nullptr;
+    }
+    if (!strcmp(name, "vkDestroyDescriptorUpdateTemplateKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_descriptor_update_template");
+        return hasExt ? (void*)entry_vkDestroyDescriptorUpdateTemplateKHR : nullptr;
+    }
+    if (!strcmp(name, "vkUpdateDescriptorSetWithTemplateKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_descriptor_update_template");
+        return hasExt ? (void*)entry_vkUpdateDescriptorSetWithTemplateKHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_create_renderpass2
+    if (!strcmp(name, "vkCreateRenderPass2KHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_create_renderpass2");
+        return hasExt ? (void*)entry_vkCreateRenderPass2KHR : nullptr;
+    }
+    if (!strcmp(name, "vkCmdBeginRenderPass2KHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_create_renderpass2");
+        return hasExt ? (void*)entry_vkCmdBeginRenderPass2KHR : nullptr;
+    }
+    if (!strcmp(name, "vkCmdNextSubpass2KHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_create_renderpass2");
+        return hasExt ? (void*)entry_vkCmdNextSubpass2KHR : nullptr;
+    }
+    if (!strcmp(name, "vkCmdEndRenderPass2KHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_create_renderpass2");
+        return hasExt ? (void*)entry_vkCmdEndRenderPass2KHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_shared_presentable_image
+    if (!strcmp(name, "vkGetSwapchainStatusKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_shared_presentable_image");
+        return hasExt ? (void*)entry_vkGetSwapchainStatusKHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_external_fence_capabilities
+    if (!strcmp(name, "vkGetPhysicalDeviceExternalFencePropertiesKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_external_fence_capabilities");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceExternalFencePropertiesKHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_external_fence_win32
+    if (!strcmp(name, "vkImportFenceWin32HandleKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_external_fence_win32");
+        return hasExt ? (void*)entry_vkImportFenceWin32HandleKHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetFenceWin32HandleKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_external_fence_win32");
+        return hasExt ? (void*)entry_vkGetFenceWin32HandleKHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_external_fence_fd
+    if (!strcmp(name, "vkImportFenceFdKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_external_fence_fd");
+        return hasExt ? (void*)entry_vkImportFenceFdKHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetFenceFdKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_external_fence_fd");
+        return hasExt ? (void*)entry_vkGetFenceFdKHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_performance_query
+    if (!strcmp(name, "vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_performance_query");
+        return hasExt ? (void*)entry_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_performance_query");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR : nullptr;
+    }
+    if (!strcmp(name, "vkAcquireProfilingLockKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_performance_query");
+        return hasExt ? (void*)entry_vkAcquireProfilingLockKHR : nullptr;
+    }
+    if (!strcmp(name, "vkReleaseProfilingLockKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_performance_query");
+        return hasExt ? (void*)entry_vkReleaseProfilingLockKHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_get_surface_capabilities2
+    if (!strcmp(name, "vkGetPhysicalDeviceSurfaceCapabilities2KHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_get_surface_capabilities2");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceSurfaceCapabilities2KHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceSurfaceFormats2KHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_get_surface_capabilities2");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceSurfaceFormats2KHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_get_display_properties2
+    if (!strcmp(name, "vkGetPhysicalDeviceDisplayProperties2KHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_get_display_properties2");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceDisplayProperties2KHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceDisplayPlaneProperties2KHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_get_display_properties2");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceDisplayPlaneProperties2KHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetDisplayModeProperties2KHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_get_display_properties2");
+        return hasExt ? (void*)entry_vkGetDisplayModeProperties2KHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetDisplayPlaneCapabilities2KHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_get_display_properties2");
+        return hasExt ? (void*)entry_vkGetDisplayPlaneCapabilities2KHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_get_memory_requirements2
+    if (!strcmp(name, "vkGetImageMemoryRequirements2KHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_get_memory_requirements2");
+        return hasExt ? (void*)entry_vkGetImageMemoryRequirements2KHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetBufferMemoryRequirements2KHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_get_memory_requirements2");
+        return hasExt ? (void*)entry_vkGetBufferMemoryRequirements2KHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetImageSparseMemoryRequirements2KHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_get_memory_requirements2");
+        return hasExt ? (void*)entry_vkGetImageSparseMemoryRequirements2KHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_sampler_ycbcr_conversion
+    if (!strcmp(name, "vkCreateSamplerYcbcrConversionKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_sampler_ycbcr_conversion");
+        return hasExt ? (void*)entry_vkCreateSamplerYcbcrConversionKHR : nullptr;
+    }
+    if (!strcmp(name, "vkDestroySamplerYcbcrConversionKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_sampler_ycbcr_conversion");
+        return hasExt ? (void*)entry_vkDestroySamplerYcbcrConversionKHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_bind_memory2
+    if (!strcmp(name, "vkBindBufferMemory2KHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_bind_memory2");
+        return hasExt ? (void*)entry_vkBindBufferMemory2KHR : nullptr;
+    }
+    if (!strcmp(name, "vkBindImageMemory2KHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_bind_memory2");
+        return hasExt ? (void*)entry_vkBindImageMemory2KHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_maintenance3
+    if (!strcmp(name, "vkGetDescriptorSetLayoutSupportKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_maintenance3");
+        return hasExt ? (void*)entry_vkGetDescriptorSetLayoutSupportKHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_draw_indirect_count
+    if (!strcmp(name, "vkCmdDrawIndirectCountKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_draw_indirect_count");
+        return hasExt ? (void*)entry_vkCmdDrawIndirectCountKHR : nullptr;
+    }
+    if (!strcmp(name, "vkCmdDrawIndexedIndirectCountKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_draw_indirect_count");
+        return hasExt ? (void*)entry_vkCmdDrawIndexedIndirectCountKHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_timeline_semaphore
+    if (!strcmp(name, "vkGetSemaphoreCounterValueKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_timeline_semaphore");
+        return hasExt ? (void*)entry_vkGetSemaphoreCounterValueKHR : nullptr;
+    }
+    if (!strcmp(name, "vkWaitSemaphoresKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_timeline_semaphore");
+        return hasExt ? (void*)entry_vkWaitSemaphoresKHR : nullptr;
+    }
+    if (!strcmp(name, "vkSignalSemaphoreKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_timeline_semaphore");
+        return hasExt ? (void*)entry_vkSignalSemaphoreKHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_fragment_shading_rate
+    if (!strcmp(name, "vkGetPhysicalDeviceFragmentShadingRatesKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_fragment_shading_rate");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceFragmentShadingRatesKHR : nullptr;
+    }
+    if (!strcmp(name, "vkCmdSetFragmentShadingRateKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_fragment_shading_rate");
+        return hasExt ? (void*)entry_vkCmdSetFragmentShadingRateKHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_buffer_device_address
+    if (!strcmp(name, "vkGetBufferDeviceAddressKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_buffer_device_address");
+        return hasExt ? (void*)entry_vkGetBufferDeviceAddressKHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetBufferOpaqueCaptureAddressKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_buffer_device_address");
+        return hasExt ? (void*)entry_vkGetBufferOpaqueCaptureAddressKHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetDeviceMemoryOpaqueCaptureAddressKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_buffer_device_address");
+        return hasExt ? (void*)entry_vkGetDeviceMemoryOpaqueCaptureAddressKHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_deferred_host_operations
+    if (!strcmp(name, "vkCreateDeferredOperationKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_deferred_host_operations");
+        return hasExt ? (void*)entry_vkCreateDeferredOperationKHR : nullptr;
+    }
+    if (!strcmp(name, "vkDestroyDeferredOperationKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_deferred_host_operations");
+        return hasExt ? (void*)entry_vkDestroyDeferredOperationKHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetDeferredOperationMaxConcurrencyKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_deferred_host_operations");
+        return hasExt ? (void*)entry_vkGetDeferredOperationMaxConcurrencyKHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetDeferredOperationResultKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_deferred_host_operations");
+        return hasExt ? (void*)entry_vkGetDeferredOperationResultKHR : nullptr;
+    }
+    if (!strcmp(name, "vkDeferredOperationJoinKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_deferred_host_operations");
+        return hasExt ? (void*)entry_vkDeferredOperationJoinKHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_pipeline_executable_properties
+    if (!strcmp(name, "vkGetPipelineExecutablePropertiesKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_pipeline_executable_properties");
+        return hasExt ? (void*)entry_vkGetPipelineExecutablePropertiesKHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetPipelineExecutableStatisticsKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_pipeline_executable_properties");
+        return hasExt ? (void*)entry_vkGetPipelineExecutableStatisticsKHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetPipelineExecutableInternalRepresentationsKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_pipeline_executable_properties");
+        return hasExt ? (void*)entry_vkGetPipelineExecutableInternalRepresentationsKHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_copy_commands2
+    if (!strcmp(name, "vkCmdCopyBuffer2KHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_copy_commands2");
+        return hasExt ? (void*)entry_vkCmdCopyBuffer2KHR : nullptr;
+    }
+    if (!strcmp(name, "vkCmdCopyImage2KHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_copy_commands2");
+        return hasExt ? (void*)entry_vkCmdCopyImage2KHR : nullptr;
+    }
+    if (!strcmp(name, "vkCmdCopyBufferToImage2KHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_copy_commands2");
+        return hasExt ? (void*)entry_vkCmdCopyBufferToImage2KHR : nullptr;
+    }
+    if (!strcmp(name, "vkCmdCopyImageToBuffer2KHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_copy_commands2");
+        return hasExt ? (void*)entry_vkCmdCopyImageToBuffer2KHR : nullptr;
+    }
+    if (!strcmp(name, "vkCmdBlitImage2KHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_copy_commands2");
+        return hasExt ? (void*)entry_vkCmdBlitImage2KHR : nullptr;
+    }
+    if (!strcmp(name, "vkCmdResolveImage2KHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_copy_commands2");
+        return hasExt ? (void*)entry_vkCmdResolveImage2KHR : nullptr;
+    }
+#endif
+#ifdef VK_ANDROID_native_buffer
+    if (!strcmp(name, "vkGetSwapchainGrallocUsageANDROID"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_ANDROID_native_buffer");
+        return hasExt ? (void*)entry_vkGetSwapchainGrallocUsageANDROID : nullptr;
+    }
+    if (!strcmp(name, "vkAcquireImageANDROID"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_ANDROID_native_buffer");
+        return hasExt ? (void*)entry_vkAcquireImageANDROID : nullptr;
+    }
+    if (!strcmp(name, "vkQueueSignalReleaseImageANDROID"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_ANDROID_native_buffer");
+        return hasExt ? (void*)entry_vkQueueSignalReleaseImageANDROID : nullptr;
+    }
+#endif
+#ifdef VK_EXT_debug_report
+    if (!strcmp(name, "vkCreateDebugReportCallbackEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_debug_report");
+        return hasExt ? (void*)entry_vkCreateDebugReportCallbackEXT : nullptr;
+    }
+    if (!strcmp(name, "vkDestroyDebugReportCallbackEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_debug_report");
+        return hasExt ? (void*)entry_vkDestroyDebugReportCallbackEXT : nullptr;
+    }
+    if (!strcmp(name, "vkDebugReportMessageEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_debug_report");
+        return hasExt ? (void*)entry_vkDebugReportMessageEXT : nullptr;
+    }
+#endif
+#ifdef VK_EXT_debug_marker
+    if (!strcmp(name, "vkDebugMarkerSetObjectTagEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_debug_marker");
+        return hasExt ? (void*)entry_vkDebugMarkerSetObjectTagEXT : nullptr;
+    }
+    if (!strcmp(name, "vkDebugMarkerSetObjectNameEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_debug_marker");
+        return hasExt ? (void*)entry_vkDebugMarkerSetObjectNameEXT : nullptr;
+    }
+    if (!strcmp(name, "vkCmdDebugMarkerBeginEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_debug_marker");
+        return hasExt ? (void*)entry_vkCmdDebugMarkerBeginEXT : nullptr;
+    }
+    if (!strcmp(name, "vkCmdDebugMarkerEndEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_debug_marker");
+        return hasExt ? (void*)entry_vkCmdDebugMarkerEndEXT : nullptr;
+    }
+    if (!strcmp(name, "vkCmdDebugMarkerInsertEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_debug_marker");
+        return hasExt ? (void*)entry_vkCmdDebugMarkerInsertEXT : nullptr;
+    }
+#endif
+#ifdef VK_EXT_transform_feedback
+    if (!strcmp(name, "vkCmdBindTransformFeedbackBuffersEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_transform_feedback");
+        return hasExt ? (void*)entry_vkCmdBindTransformFeedbackBuffersEXT : nullptr;
+    }
+    if (!strcmp(name, "vkCmdBeginTransformFeedbackEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_transform_feedback");
+        return hasExt ? (void*)entry_vkCmdBeginTransformFeedbackEXT : nullptr;
+    }
+    if (!strcmp(name, "vkCmdEndTransformFeedbackEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_transform_feedback");
+        return hasExt ? (void*)entry_vkCmdEndTransformFeedbackEXT : nullptr;
+    }
+    if (!strcmp(name, "vkCmdBeginQueryIndexedEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_transform_feedback");
+        return hasExt ? (void*)entry_vkCmdBeginQueryIndexedEXT : nullptr;
+    }
+    if (!strcmp(name, "vkCmdEndQueryIndexedEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_transform_feedback");
+        return hasExt ? (void*)entry_vkCmdEndQueryIndexedEXT : nullptr;
+    }
+    if (!strcmp(name, "vkCmdDrawIndirectByteCountEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_transform_feedback");
+        return hasExt ? (void*)entry_vkCmdDrawIndirectByteCountEXT : nullptr;
+    }
+#endif
+#ifdef VK_NVX_image_view_handle
+    if (!strcmp(name, "vkGetImageViewHandleNVX"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_NVX_image_view_handle");
+        return hasExt ? (void*)entry_vkGetImageViewHandleNVX : nullptr;
+    }
+    if (!strcmp(name, "vkGetImageViewAddressNVX"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_NVX_image_view_handle");
+        return hasExt ? (void*)entry_vkGetImageViewAddressNVX : nullptr;
+    }
+#endif
+#ifdef VK_AMD_draw_indirect_count
+    if (!strcmp(name, "vkCmdDrawIndirectCountAMD"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_AMD_draw_indirect_count");
+        return hasExt ? (void*)entry_vkCmdDrawIndirectCountAMD : nullptr;
+    }
+    if (!strcmp(name, "vkCmdDrawIndexedIndirectCountAMD"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_AMD_draw_indirect_count");
+        return hasExt ? (void*)entry_vkCmdDrawIndexedIndirectCountAMD : nullptr;
+    }
+#endif
+#ifdef VK_AMD_shader_info
+    if (!strcmp(name, "vkGetShaderInfoAMD"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_AMD_shader_info");
+        return hasExt ? (void*)entry_vkGetShaderInfoAMD : nullptr;
+    }
+#endif
+#ifdef VK_GGP_stream_descriptor_surface
+    if (!strcmp(name, "vkCreateStreamDescriptorSurfaceGGP"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_GGP_stream_descriptor_surface");
+        return hasExt ? (void*)entry_vkCreateStreamDescriptorSurfaceGGP : nullptr;
+    }
+#endif
+#ifdef VK_NV_external_memory_capabilities
+    if (!strcmp(name, "vkGetPhysicalDeviceExternalImageFormatPropertiesNV"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_NV_external_memory_capabilities");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceExternalImageFormatPropertiesNV : nullptr;
+    }
+#endif
+#ifdef VK_NV_external_memory_win32
+    if (!strcmp(name, "vkGetMemoryWin32HandleNV"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_NV_external_memory_win32");
+        return hasExt ? (void*)entry_vkGetMemoryWin32HandleNV : nullptr;
+    }
+#endif
+#ifdef VK_NN_vi_surface
+    if (!strcmp(name, "vkCreateViSurfaceNN"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_NN_vi_surface");
+        return hasExt ? (void*)entry_vkCreateViSurfaceNN : nullptr;
+    }
+#endif
+#ifdef VK_EXT_conditional_rendering
+    if (!strcmp(name, "vkCmdBeginConditionalRenderingEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_conditional_rendering");
+        return hasExt ? (void*)entry_vkCmdBeginConditionalRenderingEXT : nullptr;
+    }
+    if (!strcmp(name, "vkCmdEndConditionalRenderingEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_conditional_rendering");
+        return hasExt ? (void*)entry_vkCmdEndConditionalRenderingEXT : nullptr;
+    }
+#endif
+#ifdef VK_NV_clip_space_w_scaling
+    if (!strcmp(name, "vkCmdSetViewportWScalingNV"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_NV_clip_space_w_scaling");
+        return hasExt ? (void*)entry_vkCmdSetViewportWScalingNV : nullptr;
+    }
+#endif
+#ifdef VK_EXT_direct_mode_display
+    if (!strcmp(name, "vkReleaseDisplayEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_direct_mode_display");
+        return hasExt ? (void*)entry_vkReleaseDisplayEXT : nullptr;
+    }
+#endif
+#ifdef VK_EXT_acquire_xlib_display
+    if (!strcmp(name, "vkAcquireXlibDisplayEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_acquire_xlib_display");
+        return hasExt ? (void*)entry_vkAcquireXlibDisplayEXT : nullptr;
+    }
+    if (!strcmp(name, "vkGetRandROutputDisplayEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_acquire_xlib_display");
+        return hasExt ? (void*)entry_vkGetRandROutputDisplayEXT : nullptr;
+    }
+#endif
+#ifdef VK_EXT_display_surface_counter
+    if (!strcmp(name, "vkGetPhysicalDeviceSurfaceCapabilities2EXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_display_surface_counter");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceSurfaceCapabilities2EXT : nullptr;
+    }
+#endif
+#ifdef VK_EXT_display_control
+    if (!strcmp(name, "vkDisplayPowerControlEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_display_control");
+        return hasExt ? (void*)entry_vkDisplayPowerControlEXT : nullptr;
+    }
+    if (!strcmp(name, "vkRegisterDeviceEventEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_display_control");
+        return hasExt ? (void*)entry_vkRegisterDeviceEventEXT : nullptr;
+    }
+    if (!strcmp(name, "vkRegisterDisplayEventEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_display_control");
+        return hasExt ? (void*)entry_vkRegisterDisplayEventEXT : nullptr;
+    }
+    if (!strcmp(name, "vkGetSwapchainCounterEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_display_control");
+        return hasExt ? (void*)entry_vkGetSwapchainCounterEXT : nullptr;
+    }
+#endif
+#ifdef VK_GOOGLE_display_timing
+    if (!strcmp(name, "vkGetRefreshCycleDurationGOOGLE"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_GOOGLE_display_timing");
+        return hasExt ? (void*)entry_vkGetRefreshCycleDurationGOOGLE : nullptr;
+    }
+    if (!strcmp(name, "vkGetPastPresentationTimingGOOGLE"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_GOOGLE_display_timing");
+        return hasExt ? (void*)entry_vkGetPastPresentationTimingGOOGLE : nullptr;
+    }
+#endif
+#ifdef VK_EXT_discard_rectangles
+    if (!strcmp(name, "vkCmdSetDiscardRectangleEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_discard_rectangles");
+        return hasExt ? (void*)entry_vkCmdSetDiscardRectangleEXT : nullptr;
+    }
+#endif
+#ifdef VK_EXT_hdr_metadata
+    if (!strcmp(name, "vkSetHdrMetadataEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_hdr_metadata");
+        return hasExt ? (void*)entry_vkSetHdrMetadataEXT : nullptr;
+    }
+#endif
+#ifdef VK_MVK_ios_surface
+    if (!strcmp(name, "vkCreateIOSSurfaceMVK"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_MVK_ios_surface");
+        return hasExt ? (void*)entry_vkCreateIOSSurfaceMVK : nullptr;
+    }
+#endif
+#ifdef VK_MVK_macos_surface
+    if (!strcmp(name, "vkCreateMacOSSurfaceMVK"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_MVK_macos_surface");
+        return hasExt ? (void*)entry_vkCreateMacOSSurfaceMVK : nullptr;
+    }
+#endif
+#ifdef VK_MVK_moltenvk
+    if (!strcmp(name, "vkGetMTLDeviceMVK"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_MVK_moltenvk");
+        return hasExt ? (void*)entry_vkGetMTLDeviceMVK : nullptr;
+    }
+    if (!strcmp(name, "vkSetMTLTextureMVK"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_MVK_moltenvk");
+        return hasExt ? (void*)entry_vkSetMTLTextureMVK : nullptr;
+    }
+    if (!strcmp(name, "vkGetMTLTextureMVK"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_MVK_moltenvk");
+        return hasExt ? (void*)entry_vkGetMTLTextureMVK : nullptr;
+    }
+    if (!strcmp(name, "vkGetMTLBufferMVK"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_MVK_moltenvk");
+        return hasExt ? (void*)entry_vkGetMTLBufferMVK : nullptr;
+    }
+    if (!strcmp(name, "vkUseIOSurfaceMVK"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_MVK_moltenvk");
+        return hasExt ? (void*)entry_vkUseIOSurfaceMVK : nullptr;
+    }
+    if (!strcmp(name, "vkGetIOSurfaceMVK"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_MVK_moltenvk");
+        return hasExt ? (void*)entry_vkGetIOSurfaceMVK : nullptr;
+    }
+#endif
+#ifdef VK_EXT_debug_utils
+    if (!strcmp(name, "vkSetDebugUtilsObjectNameEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_debug_utils");
+        return hasExt ? (void*)entry_vkSetDebugUtilsObjectNameEXT : nullptr;
+    }
+    if (!strcmp(name, "vkSetDebugUtilsObjectTagEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_debug_utils");
+        return hasExt ? (void*)entry_vkSetDebugUtilsObjectTagEXT : nullptr;
+    }
+    if (!strcmp(name, "vkQueueBeginDebugUtilsLabelEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_debug_utils");
+        return hasExt ? (void*)entry_vkQueueBeginDebugUtilsLabelEXT : nullptr;
+    }
+    if (!strcmp(name, "vkQueueEndDebugUtilsLabelEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_debug_utils");
+        return hasExt ? (void*)entry_vkQueueEndDebugUtilsLabelEXT : nullptr;
+    }
+    if (!strcmp(name, "vkQueueInsertDebugUtilsLabelEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_debug_utils");
+        return hasExt ? (void*)entry_vkQueueInsertDebugUtilsLabelEXT : nullptr;
+    }
+    if (!strcmp(name, "vkCmdBeginDebugUtilsLabelEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_debug_utils");
+        return hasExt ? (void*)entry_vkCmdBeginDebugUtilsLabelEXT : nullptr;
+    }
+    if (!strcmp(name, "vkCmdEndDebugUtilsLabelEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_debug_utils");
+        return hasExt ? (void*)entry_vkCmdEndDebugUtilsLabelEXT : nullptr;
+    }
+    if (!strcmp(name, "vkCmdInsertDebugUtilsLabelEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_debug_utils");
+        return hasExt ? (void*)entry_vkCmdInsertDebugUtilsLabelEXT : nullptr;
+    }
+    if (!strcmp(name, "vkCreateDebugUtilsMessengerEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_debug_utils");
+        return hasExt ? (void*)entry_vkCreateDebugUtilsMessengerEXT : nullptr;
+    }
+    if (!strcmp(name, "vkDestroyDebugUtilsMessengerEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_debug_utils");
+        return hasExt ? (void*)entry_vkDestroyDebugUtilsMessengerEXT : nullptr;
+    }
+    if (!strcmp(name, "vkSubmitDebugUtilsMessageEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_debug_utils");
+        return hasExt ? (void*)entry_vkSubmitDebugUtilsMessageEXT : nullptr;
+    }
+#endif
+#ifdef VK_ANDROID_external_memory_android_hardware_buffer
+    if (!strcmp(name, "vkGetAndroidHardwareBufferPropertiesANDROID"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_ANDROID_external_memory_android_hardware_buffer");
+        return hasExt ? (void*)entry_vkGetAndroidHardwareBufferPropertiesANDROID : nullptr;
+    }
+    if (!strcmp(name, "vkGetMemoryAndroidHardwareBufferANDROID"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_ANDROID_external_memory_android_hardware_buffer");
+        return hasExt ? (void*)entry_vkGetMemoryAndroidHardwareBufferANDROID : nullptr;
+    }
+#endif
+#ifdef VK_EXT_sample_locations
+    if (!strcmp(name, "vkCmdSetSampleLocationsEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_sample_locations");
+        return hasExt ? (void*)entry_vkCmdSetSampleLocationsEXT : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceMultisamplePropertiesEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_sample_locations");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceMultisamplePropertiesEXT : nullptr;
+    }
+#endif
+#ifdef VK_EXT_image_drm_format_modifier
+    if (!strcmp(name, "vkGetImageDrmFormatModifierPropertiesEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_image_drm_format_modifier");
+        return hasExt ? (void*)entry_vkGetImageDrmFormatModifierPropertiesEXT : nullptr;
+    }
+#endif
+#ifdef VK_EXT_validation_cache
+    if (!strcmp(name, "vkCreateValidationCacheEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_validation_cache");
+        return hasExt ? (void*)entry_vkCreateValidationCacheEXT : nullptr;
+    }
+    if (!strcmp(name, "vkDestroyValidationCacheEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_validation_cache");
+        return hasExt ? (void*)entry_vkDestroyValidationCacheEXT : nullptr;
+    }
+    if (!strcmp(name, "vkMergeValidationCachesEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_validation_cache");
+        return hasExt ? (void*)entry_vkMergeValidationCachesEXT : nullptr;
+    }
+    if (!strcmp(name, "vkGetValidationCacheDataEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_validation_cache");
+        return hasExt ? (void*)entry_vkGetValidationCacheDataEXT : nullptr;
+    }
+#endif
+#ifdef VK_NV_shading_rate_image
+    if (!strcmp(name, "vkCmdBindShadingRateImageNV"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_NV_shading_rate_image");
+        return hasExt ? (void*)entry_vkCmdBindShadingRateImageNV : nullptr;
+    }
+    if (!strcmp(name, "vkCmdSetViewportShadingRatePaletteNV"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_NV_shading_rate_image");
+        return hasExt ? (void*)entry_vkCmdSetViewportShadingRatePaletteNV : nullptr;
+    }
+    if (!strcmp(name, "vkCmdSetCoarseSampleOrderNV"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_NV_shading_rate_image");
+        return hasExt ? (void*)entry_vkCmdSetCoarseSampleOrderNV : nullptr;
+    }
+#endif
+#ifdef VK_NV_ray_tracing
+    if (!strcmp(name, "vkCreateAccelerationStructureNV"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_NV_ray_tracing");
+        return hasExt ? (void*)entry_vkCreateAccelerationStructureNV : nullptr;
+    }
+    if (!strcmp(name, "vkDestroyAccelerationStructureNV"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_NV_ray_tracing");
+        return hasExt ? (void*)entry_vkDestroyAccelerationStructureNV : nullptr;
+    }
+    if (!strcmp(name, "vkGetAccelerationStructureMemoryRequirementsNV"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_NV_ray_tracing");
+        return hasExt ? (void*)entry_vkGetAccelerationStructureMemoryRequirementsNV : nullptr;
+    }
+    if (!strcmp(name, "vkBindAccelerationStructureMemoryNV"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_NV_ray_tracing");
+        return hasExt ? (void*)entry_vkBindAccelerationStructureMemoryNV : nullptr;
+    }
+    if (!strcmp(name, "vkCmdBuildAccelerationStructureNV"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_NV_ray_tracing");
+        return hasExt ? (void*)entry_vkCmdBuildAccelerationStructureNV : nullptr;
+    }
+    if (!strcmp(name, "vkCmdCopyAccelerationStructureNV"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_NV_ray_tracing");
+        return hasExt ? (void*)entry_vkCmdCopyAccelerationStructureNV : nullptr;
+    }
+    if (!strcmp(name, "vkCmdTraceRaysNV"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_NV_ray_tracing");
+        return hasExt ? (void*)entry_vkCmdTraceRaysNV : nullptr;
+    }
+    if (!strcmp(name, "vkCreateRayTracingPipelinesNV"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_NV_ray_tracing");
+        return hasExt ? (void*)entry_vkCreateRayTracingPipelinesNV : nullptr;
+    }
+    if (!strcmp(name, "vkGetRayTracingShaderGroupHandlesKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_NV_ray_tracing");
+        return hasExt ? (void*)entry_vkGetRayTracingShaderGroupHandlesKHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetRayTracingShaderGroupHandlesNV"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_NV_ray_tracing");
+        return hasExt ? (void*)entry_vkGetRayTracingShaderGroupHandlesNV : nullptr;
+    }
+    if (!strcmp(name, "vkGetAccelerationStructureHandleNV"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_NV_ray_tracing");
+        return hasExt ? (void*)entry_vkGetAccelerationStructureHandleNV : nullptr;
+    }
+    if (!strcmp(name, "vkCmdWriteAccelerationStructuresPropertiesNV"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_NV_ray_tracing");
+        return hasExt ? (void*)entry_vkCmdWriteAccelerationStructuresPropertiesNV : nullptr;
+    }
+    if (!strcmp(name, "vkCompileDeferredNV"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_NV_ray_tracing");
+        return hasExt ? (void*)entry_vkCompileDeferredNV : nullptr;
+    }
+#endif
+#ifdef VK_EXT_external_memory_host
+    if (!strcmp(name, "vkGetMemoryHostPointerPropertiesEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_external_memory_host");
+        return hasExt ? (void*)entry_vkGetMemoryHostPointerPropertiesEXT : nullptr;
+    }
+#endif
+#ifdef VK_AMD_buffer_marker
+    if (!strcmp(name, "vkCmdWriteBufferMarkerAMD"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_AMD_buffer_marker");
+        return hasExt ? (void*)entry_vkCmdWriteBufferMarkerAMD : nullptr;
+    }
+#endif
+#ifdef VK_EXT_calibrated_timestamps
+    if (!strcmp(name, "vkGetPhysicalDeviceCalibrateableTimeDomainsEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_calibrated_timestamps");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT : nullptr;
+    }
+    if (!strcmp(name, "vkGetCalibratedTimestampsEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_calibrated_timestamps");
+        return hasExt ? (void*)entry_vkGetCalibratedTimestampsEXT : nullptr;
+    }
+#endif
+#ifdef VK_NV_mesh_shader
+    if (!strcmp(name, "vkCmdDrawMeshTasksNV"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_NV_mesh_shader");
+        return hasExt ? (void*)entry_vkCmdDrawMeshTasksNV : nullptr;
+    }
+    if (!strcmp(name, "vkCmdDrawMeshTasksIndirectNV"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_NV_mesh_shader");
+        return hasExt ? (void*)entry_vkCmdDrawMeshTasksIndirectNV : nullptr;
+    }
+    if (!strcmp(name, "vkCmdDrawMeshTasksIndirectCountNV"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_NV_mesh_shader");
+        return hasExt ? (void*)entry_vkCmdDrawMeshTasksIndirectCountNV : nullptr;
+    }
+#endif
+#ifdef VK_NV_scissor_exclusive
+    if (!strcmp(name, "vkCmdSetExclusiveScissorNV"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_NV_scissor_exclusive");
+        return hasExt ? (void*)entry_vkCmdSetExclusiveScissorNV : nullptr;
+    }
+#endif
+#ifdef VK_NV_device_diagnostic_checkpoints
+    if (!strcmp(name, "vkCmdSetCheckpointNV"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_NV_device_diagnostic_checkpoints");
+        return hasExt ? (void*)entry_vkCmdSetCheckpointNV : nullptr;
+    }
+    if (!strcmp(name, "vkGetQueueCheckpointDataNV"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_NV_device_diagnostic_checkpoints");
+        return hasExt ? (void*)entry_vkGetQueueCheckpointDataNV : nullptr;
+    }
+#endif
+#ifdef VK_INTEL_performance_query
+    if (!strcmp(name, "vkInitializePerformanceApiINTEL"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_INTEL_performance_query");
+        return hasExt ? (void*)entry_vkInitializePerformanceApiINTEL : nullptr;
+    }
+    if (!strcmp(name, "vkUninitializePerformanceApiINTEL"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_INTEL_performance_query");
+        return hasExt ? (void*)entry_vkUninitializePerformanceApiINTEL : nullptr;
+    }
+    if (!strcmp(name, "vkCmdSetPerformanceMarkerINTEL"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_INTEL_performance_query");
+        return hasExt ? (void*)entry_vkCmdSetPerformanceMarkerINTEL : nullptr;
+    }
+    if (!strcmp(name, "vkCmdSetPerformanceStreamMarkerINTEL"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_INTEL_performance_query");
+        return hasExt ? (void*)entry_vkCmdSetPerformanceStreamMarkerINTEL : nullptr;
+    }
+    if (!strcmp(name, "vkCmdSetPerformanceOverrideINTEL"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_INTEL_performance_query");
+        return hasExt ? (void*)entry_vkCmdSetPerformanceOverrideINTEL : nullptr;
+    }
+    if (!strcmp(name, "vkAcquirePerformanceConfigurationINTEL"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_INTEL_performance_query");
+        return hasExt ? (void*)entry_vkAcquirePerformanceConfigurationINTEL : nullptr;
+    }
+    if (!strcmp(name, "vkReleasePerformanceConfigurationINTEL"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_INTEL_performance_query");
+        return hasExt ? (void*)entry_vkReleasePerformanceConfigurationINTEL : nullptr;
+    }
+    if (!strcmp(name, "vkQueueSetPerformanceConfigurationINTEL"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_INTEL_performance_query");
+        return hasExt ? (void*)entry_vkQueueSetPerformanceConfigurationINTEL : nullptr;
+    }
+    if (!strcmp(name, "vkGetPerformanceParameterINTEL"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_INTEL_performance_query");
+        return hasExt ? (void*)entry_vkGetPerformanceParameterINTEL : nullptr;
+    }
+#endif
+#ifdef VK_AMD_display_native_hdr
+    if (!strcmp(name, "vkSetLocalDimmingAMD"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_AMD_display_native_hdr");
+        return hasExt ? (void*)entry_vkSetLocalDimmingAMD : nullptr;
+    }
+#endif
+#ifdef VK_FUCHSIA_imagepipe_surface
+    if (!strcmp(name, "vkCreateImagePipeSurfaceFUCHSIA"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_FUCHSIA_imagepipe_surface");
+        return hasExt ? (void*)entry_vkCreateImagePipeSurfaceFUCHSIA : nullptr;
+    }
+#endif
+#ifdef VK_EXT_metal_surface
+    if (!strcmp(name, "vkCreateMetalSurfaceEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_metal_surface");
+        return hasExt ? (void*)entry_vkCreateMetalSurfaceEXT : nullptr;
+    }
+#endif
+#ifdef VK_EXT_buffer_device_address
+    if (!strcmp(name, "vkGetBufferDeviceAddressEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_buffer_device_address");
+        return hasExt ? (void*)entry_vkGetBufferDeviceAddressEXT : nullptr;
+    }
+#endif
+#ifdef VK_EXT_tooling_info
+    if (!strcmp(name, "vkGetPhysicalDeviceToolPropertiesEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_tooling_info");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceToolPropertiesEXT : nullptr;
+    }
+#endif
+#ifdef VK_NV_cooperative_matrix
+    if (!strcmp(name, "vkGetPhysicalDeviceCooperativeMatrixPropertiesNV"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_NV_cooperative_matrix");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceCooperativeMatrixPropertiesNV : nullptr;
+    }
+#endif
+#ifdef VK_NV_coverage_reduction_mode
+    if (!strcmp(name, "vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_NV_coverage_reduction_mode");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV : nullptr;
+    }
+#endif
+#ifdef VK_EXT_full_screen_exclusive
+    if (!strcmp(name, "vkGetPhysicalDeviceSurfacePresentModes2EXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_full_screen_exclusive");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceSurfacePresentModes2EXT : nullptr;
+    }
+    if (!strcmp(name, "vkAcquireFullScreenExclusiveModeEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_full_screen_exclusive");
+        return hasExt ? (void*)entry_vkAcquireFullScreenExclusiveModeEXT : nullptr;
+    }
+    if (!strcmp(name, "vkReleaseFullScreenExclusiveModeEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_full_screen_exclusive");
+        return hasExt ? (void*)entry_vkReleaseFullScreenExclusiveModeEXT : nullptr;
+    }
+    if (!strcmp(name, "vkGetDeviceGroupSurfacePresentModes2EXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_full_screen_exclusive");
+        return hasExt ? (void*)entry_vkGetDeviceGroupSurfacePresentModes2EXT : nullptr;
+    }
+#endif
+#ifdef VK_EXT_headless_surface
+    if (!strcmp(name, "vkCreateHeadlessSurfaceEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_headless_surface");
+        return hasExt ? (void*)entry_vkCreateHeadlessSurfaceEXT : nullptr;
+    }
+#endif
+#ifdef VK_EXT_line_rasterization
+    if (!strcmp(name, "vkCmdSetLineStippleEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_line_rasterization");
+        return hasExt ? (void*)entry_vkCmdSetLineStippleEXT : nullptr;
+    }
+#endif
+#ifdef VK_EXT_host_query_reset
+    if (!strcmp(name, "vkResetQueryPoolEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_host_query_reset");
+        return hasExt ? (void*)entry_vkResetQueryPoolEXT : nullptr;
+    }
+#endif
+#ifdef VK_EXT_extended_dynamic_state
+    if (!strcmp(name, "vkCmdSetCullModeEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_extended_dynamic_state");
+        return hasExt ? (void*)entry_vkCmdSetCullModeEXT : nullptr;
+    }
+    if (!strcmp(name, "vkCmdSetFrontFaceEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_extended_dynamic_state");
+        return hasExt ? (void*)entry_vkCmdSetFrontFaceEXT : nullptr;
+    }
+    if (!strcmp(name, "vkCmdSetPrimitiveTopologyEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_extended_dynamic_state");
+        return hasExt ? (void*)entry_vkCmdSetPrimitiveTopologyEXT : nullptr;
+    }
+    if (!strcmp(name, "vkCmdSetViewportWithCountEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_extended_dynamic_state");
+        return hasExt ? (void*)entry_vkCmdSetViewportWithCountEXT : nullptr;
+    }
+    if (!strcmp(name, "vkCmdSetScissorWithCountEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_extended_dynamic_state");
+        return hasExt ? (void*)entry_vkCmdSetScissorWithCountEXT : nullptr;
+    }
+    if (!strcmp(name, "vkCmdBindVertexBuffers2EXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_extended_dynamic_state");
+        return hasExt ? (void*)entry_vkCmdBindVertexBuffers2EXT : nullptr;
+    }
+    if (!strcmp(name, "vkCmdSetDepthTestEnableEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_extended_dynamic_state");
+        return hasExt ? (void*)entry_vkCmdSetDepthTestEnableEXT : nullptr;
+    }
+    if (!strcmp(name, "vkCmdSetDepthWriteEnableEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_extended_dynamic_state");
+        return hasExt ? (void*)entry_vkCmdSetDepthWriteEnableEXT : nullptr;
+    }
+    if (!strcmp(name, "vkCmdSetDepthCompareOpEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_extended_dynamic_state");
+        return hasExt ? (void*)entry_vkCmdSetDepthCompareOpEXT : nullptr;
+    }
+    if (!strcmp(name, "vkCmdSetDepthBoundsTestEnableEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_extended_dynamic_state");
+        return hasExt ? (void*)entry_vkCmdSetDepthBoundsTestEnableEXT : nullptr;
+    }
+    if (!strcmp(name, "vkCmdSetStencilTestEnableEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_extended_dynamic_state");
+        return hasExt ? (void*)entry_vkCmdSetStencilTestEnableEXT : nullptr;
+    }
+    if (!strcmp(name, "vkCmdSetStencilOpEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_extended_dynamic_state");
+        return hasExt ? (void*)entry_vkCmdSetStencilOpEXT : nullptr;
+    }
+#endif
+#ifdef VK_NV_device_generated_commands
+    if (!strcmp(name, "vkGetGeneratedCommandsMemoryRequirementsNV"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_NV_device_generated_commands");
+        return hasExt ? (void*)entry_vkGetGeneratedCommandsMemoryRequirementsNV : nullptr;
+    }
+    if (!strcmp(name, "vkCmdPreprocessGeneratedCommandsNV"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_NV_device_generated_commands");
+        return hasExt ? (void*)entry_vkCmdPreprocessGeneratedCommandsNV : nullptr;
+    }
+    if (!strcmp(name, "vkCmdExecuteGeneratedCommandsNV"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_NV_device_generated_commands");
+        return hasExt ? (void*)entry_vkCmdExecuteGeneratedCommandsNV : nullptr;
+    }
+    if (!strcmp(name, "vkCmdBindPipelineShaderGroupNV"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_NV_device_generated_commands");
+        return hasExt ? (void*)entry_vkCmdBindPipelineShaderGroupNV : nullptr;
+    }
+    if (!strcmp(name, "vkCreateIndirectCommandsLayoutNV"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_NV_device_generated_commands");
+        return hasExt ? (void*)entry_vkCreateIndirectCommandsLayoutNV : nullptr;
+    }
+    if (!strcmp(name, "vkDestroyIndirectCommandsLayoutNV"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_NV_device_generated_commands");
+        return hasExt ? (void*)entry_vkDestroyIndirectCommandsLayoutNV : nullptr;
+    }
+#endif
+#ifdef VK_EXT_private_data
+    if (!strcmp(name, "vkCreatePrivateDataSlotEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_private_data");
+        return hasExt ? (void*)entry_vkCreatePrivateDataSlotEXT : nullptr;
+    }
+    if (!strcmp(name, "vkDestroyPrivateDataSlotEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_private_data");
+        return hasExt ? (void*)entry_vkDestroyPrivateDataSlotEXT : nullptr;
+    }
+    if (!strcmp(name, "vkSetPrivateDataEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_private_data");
+        return hasExt ? (void*)entry_vkSetPrivateDataEXT : nullptr;
+    }
+    if (!strcmp(name, "vkGetPrivateDataEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_private_data");
+        return hasExt ? (void*)entry_vkGetPrivateDataEXT : nullptr;
+    }
+#endif
+#ifdef VK_NV_fragment_shading_rate_enums
+    if (!strcmp(name, "vkCmdSetFragmentShadingRateEnumNV"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_NV_fragment_shading_rate_enums");
+        return hasExt ? (void*)entry_vkCmdSetFragmentShadingRateEnumNV : nullptr;
+    }
+#endif
+#ifdef VK_EXT_directfb_surface
+    if (!strcmp(name, "vkCreateDirectFBSurfaceEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_directfb_surface");
+        return hasExt ? (void*)entry_vkCreateDirectFBSurfaceEXT : nullptr;
+    }
+    if (!strcmp(name, "vkGetPhysicalDeviceDirectFBPresentationSupportEXT"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_EXT_directfb_surface");
+        return hasExt ? (void*)entry_vkGetPhysicalDeviceDirectFBPresentationSupportEXT : nullptr;
+    }
+#endif
+#ifdef VK_GOOGLE_gfxstream
+    if (!strcmp(name, "vkRegisterImageColorBufferGOOGLE"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_GOOGLE_gfxstream");
+        return hasExt ? (void*)entry_vkRegisterImageColorBufferGOOGLE : nullptr;
+    }
+    if (!strcmp(name, "vkRegisterBufferColorBufferGOOGLE"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_GOOGLE_gfxstream");
+        return hasExt ? (void*)entry_vkRegisterBufferColorBufferGOOGLE : nullptr;
+    }
+    if (!strcmp(name, "vkMapMemoryIntoAddressSpaceGOOGLE"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_GOOGLE_gfxstream");
+        return hasExt ? (void*)entry_vkMapMemoryIntoAddressSpaceGOOGLE : nullptr;
+    }
+    if (!strcmp(name, "vkUpdateDescriptorSetWithTemplateSizedGOOGLE"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_GOOGLE_gfxstream");
+        return hasExt ? (void*)entry_vkUpdateDescriptorSetWithTemplateSizedGOOGLE : nullptr;
+    }
+    if (!strcmp(name, "vkBeginCommandBufferAsyncGOOGLE"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_GOOGLE_gfxstream");
+        return hasExt ? (void*)entry_vkBeginCommandBufferAsyncGOOGLE : nullptr;
+    }
+    if (!strcmp(name, "vkEndCommandBufferAsyncGOOGLE"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_GOOGLE_gfxstream");
+        return hasExt ? (void*)entry_vkEndCommandBufferAsyncGOOGLE : nullptr;
+    }
+    if (!strcmp(name, "vkResetCommandBufferAsyncGOOGLE"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_GOOGLE_gfxstream");
+        return hasExt ? (void*)entry_vkResetCommandBufferAsyncGOOGLE : nullptr;
+    }
+    if (!strcmp(name, "vkCommandBufferHostSyncGOOGLE"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_GOOGLE_gfxstream");
+        return hasExt ? (void*)entry_vkCommandBufferHostSyncGOOGLE : nullptr;
+    }
+    if (!strcmp(name, "vkCreateImageWithRequirementsGOOGLE"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_GOOGLE_gfxstream");
+        return hasExt ? (void*)entry_vkCreateImageWithRequirementsGOOGLE : nullptr;
+    }
+    if (!strcmp(name, "vkCreateBufferWithRequirementsGOOGLE"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_GOOGLE_gfxstream");
+        return hasExt ? (void*)entry_vkCreateBufferWithRequirementsGOOGLE : nullptr;
+    }
+    if (!strcmp(name, "vkGetMemoryHostAddressInfoGOOGLE"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_GOOGLE_gfxstream");
+        return hasExt ? (void*)entry_vkGetMemoryHostAddressInfoGOOGLE : nullptr;
+    }
+    if (!strcmp(name, "vkFreeMemorySyncGOOGLE"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_GOOGLE_gfxstream");
+        return hasExt ? (void*)entry_vkFreeMemorySyncGOOGLE : nullptr;
+    }
+    if (!strcmp(name, "vkQueueHostSyncGOOGLE"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_GOOGLE_gfxstream");
+        return hasExt ? (void*)entry_vkQueueHostSyncGOOGLE : nullptr;
+    }
+    if (!strcmp(name, "vkQueueSubmitAsyncGOOGLE"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_GOOGLE_gfxstream");
+        return hasExt ? (void*)entry_vkQueueSubmitAsyncGOOGLE : nullptr;
+    }
+    if (!strcmp(name, "vkQueueWaitIdleAsyncGOOGLE"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_GOOGLE_gfxstream");
+        return hasExt ? (void*)entry_vkQueueWaitIdleAsyncGOOGLE : nullptr;
+    }
+    if (!strcmp(name, "vkQueueBindSparseAsyncGOOGLE"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_GOOGLE_gfxstream");
+        return hasExt ? (void*)entry_vkQueueBindSparseAsyncGOOGLE : nullptr;
+    }
+    if (!strcmp(name, "vkGetLinearImageLayoutGOOGLE"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_GOOGLE_gfxstream");
+        return hasExt ? (void*)entry_vkGetLinearImageLayoutGOOGLE : nullptr;
+    }
+    if (!strcmp(name, "vkQueueFlushCommandsGOOGLE"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_GOOGLE_gfxstream");
+        return hasExt ? (void*)entry_vkQueueFlushCommandsGOOGLE : nullptr;
+    }
+    if (!strcmp(name, "vkQueueCommitDescriptorSetUpdatesGOOGLE"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_GOOGLE_gfxstream");
+        return hasExt ? (void*)entry_vkQueueCommitDescriptorSetUpdatesGOOGLE : nullptr;
+    }
+    if (!strcmp(name, "vkCollectDescriptorPoolIdsGOOGLE"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_GOOGLE_gfxstream");
+        return hasExt ? (void*)entry_vkCollectDescriptorPoolIdsGOOGLE : nullptr;
+    }
+#endif
+#ifdef VK_KHR_acceleration_structure
+    if (!strcmp(name, "vkCreateAccelerationStructureKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_acceleration_structure");
+        return hasExt ? (void*)entry_vkCreateAccelerationStructureKHR : nullptr;
+    }
+    if (!strcmp(name, "vkDestroyAccelerationStructureKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_acceleration_structure");
+        return hasExt ? (void*)entry_vkDestroyAccelerationStructureKHR : nullptr;
+    }
+    if (!strcmp(name, "vkCmdBuildAccelerationStructuresKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_acceleration_structure");
+        return hasExt ? (void*)entry_vkCmdBuildAccelerationStructuresKHR : nullptr;
+    }
+    if (!strcmp(name, "vkCmdBuildAccelerationStructuresIndirectKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_acceleration_structure");
+        return hasExt ? (void*)entry_vkCmdBuildAccelerationStructuresIndirectKHR : nullptr;
+    }
+    if (!strcmp(name, "vkBuildAccelerationStructuresKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_acceleration_structure");
+        return hasExt ? (void*)entry_vkBuildAccelerationStructuresKHR : nullptr;
+    }
+    if (!strcmp(name, "vkCopyAccelerationStructureKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_acceleration_structure");
+        return hasExt ? (void*)entry_vkCopyAccelerationStructureKHR : nullptr;
+    }
+    if (!strcmp(name, "vkCopyAccelerationStructureToMemoryKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_acceleration_structure");
+        return hasExt ? (void*)entry_vkCopyAccelerationStructureToMemoryKHR : nullptr;
+    }
+    if (!strcmp(name, "vkCopyMemoryToAccelerationStructureKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_acceleration_structure");
+        return hasExt ? (void*)entry_vkCopyMemoryToAccelerationStructureKHR : nullptr;
+    }
+    if (!strcmp(name, "vkWriteAccelerationStructuresPropertiesKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_acceleration_structure");
+        return hasExt ? (void*)entry_vkWriteAccelerationStructuresPropertiesKHR : nullptr;
+    }
+    if (!strcmp(name, "vkCmdCopyAccelerationStructureKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_acceleration_structure");
+        return hasExt ? (void*)entry_vkCmdCopyAccelerationStructureKHR : nullptr;
+    }
+    if (!strcmp(name, "vkCmdCopyAccelerationStructureToMemoryKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_acceleration_structure");
+        return hasExt ? (void*)entry_vkCmdCopyAccelerationStructureToMemoryKHR : nullptr;
+    }
+    if (!strcmp(name, "vkCmdCopyMemoryToAccelerationStructureKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_acceleration_structure");
+        return hasExt ? (void*)entry_vkCmdCopyMemoryToAccelerationStructureKHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetAccelerationStructureDeviceAddressKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_acceleration_structure");
+        return hasExt ? (void*)entry_vkGetAccelerationStructureDeviceAddressKHR : nullptr;
+    }
+    if (!strcmp(name, "vkCmdWriteAccelerationStructuresPropertiesKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_acceleration_structure");
+        return hasExt ? (void*)entry_vkCmdWriteAccelerationStructuresPropertiesKHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetDeviceAccelerationStructureCompatibilityKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_acceleration_structure");
+        return hasExt ? (void*)entry_vkGetDeviceAccelerationStructureCompatibilityKHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetAccelerationStructureBuildSizesKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_acceleration_structure");
+        return hasExt ? (void*)entry_vkGetAccelerationStructureBuildSizesKHR : nullptr;
+    }
+#endif
+#ifdef VK_KHR_ray_tracing_pipeline
+    if (!strcmp(name, "vkCmdTraceRaysKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_ray_tracing_pipeline");
+        return hasExt ? (void*)entry_vkCmdTraceRaysKHR : nullptr;
+    }
+    if (!strcmp(name, "vkCreateRayTracingPipelinesKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_ray_tracing_pipeline");
+        return hasExt ? (void*)entry_vkCreateRayTracingPipelinesKHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetRayTracingCaptureReplayShaderGroupHandlesKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_ray_tracing_pipeline");
+        return hasExt ? (void*)entry_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR : nullptr;
+    }
+    if (!strcmp(name, "vkCmdTraceRaysIndirectKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_ray_tracing_pipeline");
+        return hasExt ? (void*)entry_vkCmdTraceRaysIndirectKHR : nullptr;
+    }
+    if (!strcmp(name, "vkGetRayTracingShaderGroupStackSizeKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_ray_tracing_pipeline");
+        return hasExt ? (void*)entry_vkGetRayTracingShaderGroupStackSizeKHR : nullptr;
+    }
+    if (!strcmp(name, "vkCmdSetRayTracingPipelineStackSizeKHR"))
+    {
+        bool hasExt = resources->hasDeviceExtension(device, "VK_KHR_ray_tracing_pipeline");
+        return hasExt ? (void*)entry_vkCmdSetRayTracingPipelineStackSizeKHR : nullptr;
+    }
+#endif
+    return nullptr;
+}
+
+} // namespace goldfish_vk
diff --git a/system/vulkan/func_table.h b/system/vulkan_enc/func_table.h
similarity index 60%
rename from system/vulkan/func_table.h
rename to system/vulkan_enc/func_table.h
index c0d4224..3c12410 100644
--- a/system/vulkan/func_table.h
+++ b/system/vulkan_enc/func_table.h
@@ -34,6 +34,8 @@
 #endif
 #ifdef VK_VERSION_1_1
 #endif
+#ifdef VK_VERSION_1_2
+#endif
 #ifdef VK_KHR_surface
 #endif
 #ifdef VK_KHR_swapchain
@@ -48,8 +50,6 @@
 #endif
 #ifdef VK_KHR_wayland_surface
 #endif
-#ifdef VK_KHR_mir_surface
-#endif
 #ifdef VK_KHR_android_surface
 #endif
 #ifdef VK_KHR_win32_surface
@@ -88,12 +88,16 @@
 #endif
 #ifdef VK_KHR_push_descriptor
 #endif
+#ifdef VK_KHR_shader_float16_int8
+#endif
 #ifdef VK_KHR_16bit_storage
 #endif
 #ifdef VK_KHR_incremental_present
 #endif
 #ifdef VK_KHR_descriptor_update_template
 #endif
+#ifdef VK_KHR_imageless_framebuffer
+#endif
 #ifdef VK_KHR_create_renderpass2
 #endif
 #ifdef VK_KHR_shared_presentable_image
@@ -106,6 +110,8 @@
 #endif
 #ifdef VK_KHR_external_fence_fd
 #endif
+#ifdef VK_KHR_performance_query
+#endif
 #ifdef VK_KHR_maintenance2
 #endif
 #ifdef VK_KHR_get_surface_capabilities2
@@ -128,12 +134,56 @@
 #endif
 #ifdef VK_KHR_bind_memory2
 #endif
+#ifdef VK_KHR_portability_subset
+#endif
 #ifdef VK_KHR_maintenance3
 #endif
 #ifdef VK_KHR_draw_indirect_count
 #endif
+#ifdef VK_KHR_shader_subgroup_extended_types
+#endif
 #ifdef VK_KHR_8bit_storage
 #endif
+#ifdef VK_KHR_shader_atomic_int64
+#endif
+#ifdef VK_KHR_shader_clock
+#endif
+#ifdef VK_KHR_driver_properties
+#endif
+#ifdef VK_KHR_shader_float_controls
+#endif
+#ifdef VK_KHR_depth_stencil_resolve
+#endif
+#ifdef VK_KHR_swapchain_mutable_format
+#endif
+#ifdef VK_KHR_timeline_semaphore
+#endif
+#ifdef VK_KHR_vulkan_memory_model
+#endif
+#ifdef VK_KHR_shader_terminate_invocation
+#endif
+#ifdef VK_KHR_fragment_shading_rate
+#endif
+#ifdef VK_KHR_spirv_1_4
+#endif
+#ifdef VK_KHR_surface_protected_capabilities
+#endif
+#ifdef VK_KHR_separate_depth_stencil_layouts
+#endif
+#ifdef VK_KHR_uniform_buffer_standard_layout
+#endif
+#ifdef VK_KHR_buffer_device_address
+#endif
+#ifdef VK_KHR_deferred_host_operations
+#endif
+#ifdef VK_KHR_pipeline_executable_properties
+#endif
+#ifdef VK_KHR_pipeline_library
+#endif
+#ifdef VK_KHR_shader_non_semantic_info
+#endif
+#ifdef VK_KHR_copy_commands2
+#endif
 #ifdef VK_ANDROID_native_buffer
 #endif
 #ifdef VK_EXT_debug_report
@@ -156,6 +206,10 @@
 #endif
 #ifdef VK_NV_dedicated_allocation
 #endif
+#ifdef VK_EXT_transform_feedback
+#endif
+#ifdef VK_NVX_image_view_handle
+#endif
 #ifdef VK_AMD_draw_indirect_count
 #endif
 #ifdef VK_AMD_negative_viewport_height
@@ -170,6 +224,10 @@
 #endif
 #ifdef VK_AMD_shader_image_load_store_lod
 #endif
+#ifdef VK_GGP_stream_descriptor_surface
+#endif
+#ifdef VK_NV_corner_sampled_image
+#endif
 #ifdef VK_IMG_format_pvrtc
 #endif
 #ifdef VK_NV_external_memory_capabilities
@@ -188,9 +246,11 @@
 #endif
 #ifdef VK_EXT_shader_subgroup_vote
 #endif
-#ifdef VK_EXT_conditional_rendering
+#ifdef VK_EXT_texture_compression_astc_hdr
 #endif
-#ifdef VK_NVX_device_generated_commands
+#ifdef VK_EXT_astc_decode_mode
+#endif
+#ifdef VK_EXT_conditional_rendering
 #endif
 #ifdef VK_NV_clip_space_w_scaling
 #endif
@@ -218,6 +278,8 @@
 #endif
 #ifdef VK_EXT_conservative_rasterization
 #endif
+#ifdef VK_EXT_depth_clip_enable
+#endif
 #ifdef VK_EXT_swapchain_colorspace
 #endif
 #ifdef VK_EXT_hdr_metadata
@@ -226,6 +288,8 @@
 #endif
 #ifdef VK_MVK_macos_surface
 #endif
+#ifdef VK_MVK_moltenvk
+#endif
 #ifdef VK_EXT_external_memory_dma_buf
 #endif
 #ifdef VK_EXT_queue_family_foreign
@@ -242,6 +306,8 @@
 #endif
 #ifdef VK_AMD_shader_fragment_mask
 #endif
+#ifdef VK_EXT_inline_uniform_block
+#endif
 #ifdef VK_EXT_shader_stencil_export
 #endif
 #ifdef VK_EXT_sample_locations
@@ -254,41 +320,169 @@
 #endif
 #ifdef VK_NV_fill_rectangle
 #endif
+#ifdef VK_NV_shader_sm_builtins
+#endif
 #ifdef VK_EXT_post_depth_coverage
 #endif
+#ifdef VK_EXT_image_drm_format_modifier
+#endif
 #ifdef VK_EXT_validation_cache
 #endif
 #ifdef VK_EXT_descriptor_indexing
 #endif
 #ifdef VK_EXT_shader_viewport_index_layer
 #endif
+#ifdef VK_NV_shading_rate_image
+#endif
+#ifdef VK_NV_ray_tracing
+#endif
+#ifdef VK_NV_representative_fragment_test
+#endif
+#ifdef VK_EXT_filter_cubic
+#endif
+#ifdef VK_QCOM_render_pass_shader_resolve
+#endif
 #ifdef VK_EXT_global_priority
 #endif
 #ifdef VK_EXT_external_memory_host
 #endif
 #ifdef VK_AMD_buffer_marker
 #endif
+#ifdef VK_AMD_pipeline_compiler_control
+#endif
+#ifdef VK_EXT_calibrated_timestamps
+#endif
 #ifdef VK_AMD_shader_core_properties
 #endif
+#ifdef VK_AMD_memory_overallocation_behavior
+#endif
 #ifdef VK_EXT_vertex_attribute_divisor
 #endif
+#ifdef VK_GGP_frame_token
+#endif
+#ifdef VK_EXT_pipeline_creation_feedback
+#endif
 #ifdef VK_NV_shader_subgroup_partitioned
 #endif
+#ifdef VK_NV_compute_shader_derivatives
+#endif
+#ifdef VK_NV_mesh_shader
+#endif
+#ifdef VK_NV_fragment_shader_barycentric
+#endif
+#ifdef VK_NV_shader_image_footprint
+#endif
+#ifdef VK_NV_scissor_exclusive
+#endif
 #ifdef VK_NV_device_diagnostic_checkpoints
 #endif
-#ifdef VK_GOOGLE_address_space
+#ifdef VK_INTEL_shader_integer_functions2
 #endif
-#ifdef VK_GOOGLE_color_buffer
+#ifdef VK_INTEL_performance_query
 #endif
-#ifdef VK_GOOGLE_sized_descriptor_update_template
+#ifdef VK_EXT_pci_bus_info
 #endif
-#ifdef VK_GOOGLE_async_command_buffers
+#ifdef VK_AMD_display_native_hdr
 #endif
-#ifdef VK_GOOGLE_create_resources_with_requirements
+#ifdef VK_FUCHSIA_imagepipe_surface
 #endif
-#ifdef VK_GOOGLE_address_space_info
+#ifdef VK_EXT_metal_surface
 #endif
-#ifdef VK_GOOGLE_free_memory_sync
+#ifdef VK_EXT_fragment_density_map
+#endif
+#ifdef VK_EXT_scalar_block_layout
+#endif
+#ifdef VK_GOOGLE_hlsl_functionality1
+#endif
+#ifdef VK_GOOGLE_decorate_string
+#endif
+#ifdef VK_EXT_subgroup_size_control
+#endif
+#ifdef VK_AMD_shader_core_properties2
+#endif
+#ifdef VK_AMD_device_coherent_memory
+#endif
+#ifdef VK_EXT_shader_image_atomic_int64
+#endif
+#ifdef VK_EXT_memory_budget
+#endif
+#ifdef VK_EXT_memory_priority
+#endif
+#ifdef VK_NV_dedicated_allocation_image_aliasing
+#endif
+#ifdef VK_EXT_buffer_device_address
+#endif
+#ifdef VK_EXT_tooling_info
+#endif
+#ifdef VK_EXT_separate_stencil_usage
+#endif
+#ifdef VK_EXT_validation_features
+#endif
+#ifdef VK_NV_cooperative_matrix
+#endif
+#ifdef VK_NV_coverage_reduction_mode
+#endif
+#ifdef VK_EXT_fragment_shader_interlock
+#endif
+#ifdef VK_EXT_ycbcr_image_arrays
+#endif
+#ifdef VK_EXT_full_screen_exclusive
+#endif
+#ifdef VK_EXT_headless_surface
+#endif
+#ifdef VK_EXT_line_rasterization
+#endif
+#ifdef VK_EXT_shader_atomic_float
+#endif
+#ifdef VK_EXT_host_query_reset
+#endif
+#ifdef VK_EXT_index_type_uint8
+#endif
+#ifdef VK_EXT_extended_dynamic_state
+#endif
+#ifdef VK_EXT_shader_demote_to_helper_invocation
+#endif
+#ifdef VK_NV_device_generated_commands
+#endif
+#ifdef VK_EXT_texel_buffer_alignment
+#endif
+#ifdef VK_QCOM_render_pass_transform
+#endif
+#ifdef VK_EXT_device_memory_report
+#endif
+#ifdef VK_EXT_robustness2
+#endif
+#ifdef VK_EXT_custom_border_color
+#endif
+#ifdef VK_GOOGLE_user_type
+#endif
+#ifdef VK_EXT_private_data
+#endif
+#ifdef VK_EXT_pipeline_creation_cache_control
+#endif
+#ifdef VK_NV_device_diagnostics_config
+#endif
+#ifdef VK_QCOM_render_pass_store_ops
+#endif
+#ifdef VK_NV_fragment_shading_rate_enums
+#endif
+#ifdef VK_EXT_fragment_density_map2
+#endif
+#ifdef VK_QCOM_rotated_copy_commands
+#endif
+#ifdef VK_EXT_image_robustness
+#endif
+#ifdef VK_EXT_4444_formats
+#endif
+#ifdef VK_EXT_directfb_surface
+#endif
+#ifdef VK_GOOGLE_gfxstream
+#endif
+#ifdef VK_KHR_acceleration_structure
+#endif
+#ifdef VK_KHR_ray_tracing_pipeline
+#endif
+#ifdef VK_KHR_ray_query
 #endif
 void* goldfish_vulkan_get_proc_address(const char* name);
 void* goldfish_vulkan_get_instance_proc_address(VkInstance instance, const char* name);
diff --git a/system/vulkan_enc/goldfish_vk_counting_guest.cpp b/system/vulkan_enc/goldfish_vk_counting_guest.cpp
new file mode 100644
index 0000000..4e42832
--- /dev/null
+++ b/system/vulkan_enc/goldfish_vk_counting_guest.cpp
@@ -0,0 +1,15565 @@
+// Copyright (C) 2018 The Android Open Source Project
+// Copyright (C) 2018 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Autogenerated module goldfish_vk_counting_guest
+// (impl) generated by android/android-emugl/host/libs/libOpenglRender/vulkan-registry/xml/genvk.py -registry android/android-emugl/host/libs/libOpenglRender/vulkan-registry/xml/vk.xml cereal -o android/android-emugl/host/libs/libOpenglRender/vulkan/cereal
+// Please do not modify directly;
+// re-run android/scripts/generate-vulkan-sources.sh,
+// or directly from Python by defining:
+// VULKAN_REGISTRY_XML_DIR : Directory containing genvk.py and vk.xml
+// CEREAL_OUTPUT_DIR: Where to put the generated sources.
+// python3 $VULKAN_REGISTRY_XML_DIR/genvk.py -registry $VULKAN_REGISTRY_XML_DIR/vk.xml cereal -o $CEREAL_OUTPUT_DIR
+
+#include "goldfish_vk_counting_guest.h"
+
+
+#include "goldfish_vk_extension_structs_guest.h"
+#include "goldfish_vk_private_defs.h"
+
+
+namespace goldfish_vk {
+
+void count_extension_struct(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const void* structExtension,
+    size_t* count);
+
+#ifdef VK_VERSION_1_0
+void count_VkExtent2D(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkExtent2D* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkExtent3D(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkExtent3D* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkOffset2D(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkOffset2D* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(int32_t);
+    *count += sizeof(int32_t);
+}
+
+void count_VkOffset3D(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkOffset3D* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(int32_t);
+    *count += sizeof(int32_t);
+    *count += sizeof(int32_t);
+}
+
+void count_VkRect2D(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkRect2D* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    count_VkOffset2D(featureBits, rootType, (VkOffset2D*)(&toCount->offset), count);
+    count_VkExtent2D(featureBits, rootType, (VkExtent2D*)(&toCount->extent), count);
+}
+
+void count_VkBaseInStructure(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkBaseInStructure* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+}
+
+void count_VkBaseOutStructure(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkBaseOutStructure* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+}
+
+void count_VkBufferMemoryBarrier(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkBufferMemoryBarrier* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkAccessFlags);
+    *count += sizeof(VkAccessFlags);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    *count += sizeof(VkDeviceSize);
+    *count += sizeof(VkDeviceSize);
+}
+
+void count_VkDispatchIndirectCommand(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDispatchIndirectCommand* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkDrawIndexedIndirectCommand(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDrawIndexedIndirectCommand* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(int32_t);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkDrawIndirectCommand(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDrawIndirectCommand* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkImageSubresourceRange(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImageSubresourceRange* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkImageAspectFlags);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkImageMemoryBarrier(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImageMemoryBarrier* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkAccessFlags);
+    *count += sizeof(VkAccessFlags);
+    *count += sizeof(VkImageLayout);
+    *count += sizeof(VkImageLayout);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    count_VkImageSubresourceRange(featureBits, rootType, (VkImageSubresourceRange*)(&toCount->subresourceRange), count);
+}
+
+void count_VkMemoryBarrier(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkMemoryBarrier* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkAccessFlags);
+    *count += sizeof(VkAccessFlags);
+}
+
+void count_VkAllocationCallbacks(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAllocationCallbacks* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pUserData)
+    {
+        *count += sizeof(uint8_t);
+    }
+    *count += 8;
+    *count += 8;
+    *count += 8;
+    *count += 8;
+    *count += 8;
+}
+
+void count_VkApplicationInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkApplicationInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    if (featureBits & VULKAN_STREAM_FEATURE_NULL_OPTIONAL_STRINGS_BIT)
+    {
+        // WARNING PTR CHECK
+        *count += 8;
+        if (toCount->pApplicationName)
+        {
+            *count += sizeof(uint32_t) + (toCount->pApplicationName ? strlen(toCount->pApplicationName) : 0);
+        }
+    }
+    else
+    {
+        *count += sizeof(uint32_t) + (toCount->pApplicationName ? strlen(toCount->pApplicationName) : 0);
+    }
+    *count += sizeof(uint32_t);
+    if (featureBits & VULKAN_STREAM_FEATURE_NULL_OPTIONAL_STRINGS_BIT)
+    {
+        // WARNING PTR CHECK
+        *count += 8;
+        if (toCount->pEngineName)
+        {
+            *count += sizeof(uint32_t) + (toCount->pEngineName ? strlen(toCount->pEngineName) : 0);
+        }
+    }
+    else
+    {
+        *count += sizeof(uint32_t) + (toCount->pEngineName ? strlen(toCount->pEngineName) : 0);
+    }
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkFormatProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkFormatProperties* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkFormatFeatureFlags);
+    *count += sizeof(VkFormatFeatureFlags);
+    *count += sizeof(VkFormatFeatureFlags);
+}
+
+void count_VkImageFormatProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImageFormatProperties* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    count_VkExtent3D(featureBits, rootType, (VkExtent3D*)(&toCount->maxExtent), count);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(VkSampleCountFlags);
+    *count += sizeof(VkDeviceSize);
+}
+
+void count_VkInstanceCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkInstanceCreateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkInstanceCreateFlags);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pApplicationInfo)
+    {
+        count_VkApplicationInfo(featureBits, rootType, (const VkApplicationInfo*)(toCount->pApplicationInfo), count);
+    }
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < toCount->enabledLayerCount; ++i)
+        {
+            size_t l = toCount->ppEnabledLayerNames[i] ? strlen(toCount->ppEnabledLayerNames[i]) : 0;
+            *count += sizeof(uint32_t) + (toCount->ppEnabledLayerNames[i] ? strlen(toCount->ppEnabledLayerNames[i]) : 0);
+        }
+    }
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < toCount->enabledExtensionCount; ++i)
+        {
+            size_t l = toCount->ppEnabledExtensionNames[i] ? strlen(toCount->ppEnabledExtensionNames[i]) : 0;
+            *count += sizeof(uint32_t) + (toCount->ppEnabledExtensionNames[i] ? strlen(toCount->ppEnabledExtensionNames[i]) : 0);
+        }
+    }
+}
+
+void count_VkMemoryHeap(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkMemoryHeap* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkDeviceSize);
+    *count += sizeof(VkMemoryHeapFlags);
+}
+
+void count_VkMemoryType(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkMemoryType* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkMemoryPropertyFlags);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkPhysicalDeviceFeatures(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFeatures* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkPhysicalDeviceLimits(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceLimits* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(VkDeviceSize);
+    *count += sizeof(VkDeviceSize);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += 3 * sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += 3 * sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(float);
+    *count += sizeof(float);
+    *count += sizeof(uint32_t);
+    *count += 2 * sizeof(uint32_t);
+    *count += 2 * sizeof(float);
+    *count += sizeof(uint32_t);
+    *count += 8;
+    *count += sizeof(VkDeviceSize);
+    *count += sizeof(VkDeviceSize);
+    *count += sizeof(VkDeviceSize);
+    *count += sizeof(int32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(int32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(float);
+    *count += sizeof(float);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(VkSampleCountFlags);
+    *count += sizeof(VkSampleCountFlags);
+    *count += sizeof(VkSampleCountFlags);
+    *count += sizeof(VkSampleCountFlags);
+    *count += sizeof(uint32_t);
+    *count += sizeof(VkSampleCountFlags);
+    *count += sizeof(VkSampleCountFlags);
+    *count += sizeof(VkSampleCountFlags);
+    *count += sizeof(VkSampleCountFlags);
+    *count += sizeof(VkSampleCountFlags);
+    *count += sizeof(uint32_t);
+    *count += sizeof(VkBool32);
+    *count += sizeof(float);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += 2 * sizeof(float);
+    *count += 2 * sizeof(float);
+    *count += sizeof(float);
+    *count += sizeof(float);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkDeviceSize);
+    *count += sizeof(VkDeviceSize);
+    *count += sizeof(VkDeviceSize);
+}
+
+void count_VkPhysicalDeviceMemoryProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMemoryProperties* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)VK_MAX_MEMORY_TYPES; ++i)
+    {
+        count_VkMemoryType(featureBits, rootType, (VkMemoryType*)(toCount->memoryTypes + i), count);
+    }
+    *count += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)VK_MAX_MEMORY_HEAPS; ++i)
+    {
+        count_VkMemoryHeap(featureBits, rootType, (VkMemoryHeap*)(toCount->memoryHeaps + i), count);
+    }
+}
+
+void count_VkPhysicalDeviceSparseProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSparseProperties* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkPhysicalDeviceProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceProperties* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(VkPhysicalDeviceType);
+    *count += VK_MAX_PHYSICAL_DEVICE_NAME_SIZE * sizeof(char);
+    *count += VK_UUID_SIZE * sizeof(uint8_t);
+    count_VkPhysicalDeviceLimits(featureBits, rootType, (VkPhysicalDeviceLimits*)(&toCount->limits), count);
+    count_VkPhysicalDeviceSparseProperties(featureBits, rootType, (VkPhysicalDeviceSparseProperties*)(&toCount->sparseProperties), count);
+}
+
+void count_VkQueueFamilyProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkQueueFamilyProperties* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkQueueFlags);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    count_VkExtent3D(featureBits, rootType, (VkExtent3D*)(&toCount->minImageTransferGranularity), count);
+}
+
+void count_VkDeviceQueueCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDeviceQueueCreateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkDeviceQueueCreateFlags);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        *count += toCount->queueCount * sizeof(const float);
+    }
+}
+
+void count_VkDeviceCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDeviceCreateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkDeviceCreateFlags);
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < (uint32_t)toCount->queueCreateInfoCount; ++i)
+        {
+            count_VkDeviceQueueCreateInfo(featureBits, rootType, (const VkDeviceQueueCreateInfo*)(toCount->pQueueCreateInfos + i), count);
+        }
+    }
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < toCount->enabledLayerCount; ++i)
+        {
+            size_t l = toCount->ppEnabledLayerNames[i] ? strlen(toCount->ppEnabledLayerNames[i]) : 0;
+            *count += sizeof(uint32_t) + (toCount->ppEnabledLayerNames[i] ? strlen(toCount->ppEnabledLayerNames[i]) : 0);
+        }
+    }
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < toCount->enabledExtensionCount; ++i)
+        {
+            size_t l = toCount->ppEnabledExtensionNames[i] ? strlen(toCount->ppEnabledExtensionNames[i]) : 0;
+            *count += sizeof(uint32_t) + (toCount->ppEnabledExtensionNames[i] ? strlen(toCount->ppEnabledExtensionNames[i]) : 0);
+        }
+    }
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pEnabledFeatures)
+    {
+        count_VkPhysicalDeviceFeatures(featureBits, rootType, (const VkPhysicalDeviceFeatures*)(toCount->pEnabledFeatures), count);
+    }
+}
+
+void count_VkExtensionProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkExtensionProperties* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += VK_MAX_EXTENSION_NAME_SIZE * sizeof(char);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkLayerProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkLayerProperties* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += VK_MAX_EXTENSION_NAME_SIZE * sizeof(char);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += VK_MAX_DESCRIPTION_SIZE * sizeof(char);
+}
+
+void count_VkSubmitInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSubmitInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    if (toCount->waitSemaphoreCount)
+    {
+        *count += toCount->waitSemaphoreCount * 8;
+    }
+    if (toCount)
+    {
+        *count += toCount->waitSemaphoreCount * sizeof(const VkPipelineStageFlags);
+    }
+    *count += sizeof(uint32_t);
+    if (toCount->commandBufferCount)
+    {
+        *count += toCount->commandBufferCount * 8;
+    }
+    *count += sizeof(uint32_t);
+    if (toCount->signalSemaphoreCount)
+    {
+        *count += toCount->signalSemaphoreCount * 8;
+    }
+}
+
+void count_VkMappedMemoryRange(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkMappedMemoryRange* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    *count += sizeof(VkDeviceSize);
+    *count += sizeof(VkDeviceSize);
+}
+
+void count_VkMemoryAllocateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkMemoryAllocateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkDeviceSize);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkMemoryRequirements(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkMemoryRequirements* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkDeviceSize);
+    *count += sizeof(VkDeviceSize);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkSparseMemoryBind(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSparseMemoryBind* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkDeviceSize);
+    *count += sizeof(VkDeviceSize);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    *count += sizeof(VkDeviceSize);
+    *count += sizeof(VkSparseMemoryBindFlags);
+}
+
+void count_VkSparseBufferMemoryBindInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSparseBufferMemoryBindInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < (uint32_t)toCount->bindCount; ++i)
+        {
+            count_VkSparseMemoryBind(featureBits, rootType, (const VkSparseMemoryBind*)(toCount->pBinds + i), count);
+        }
+    }
+}
+
+void count_VkSparseImageOpaqueMemoryBindInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSparseImageOpaqueMemoryBindInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < (uint32_t)toCount->bindCount; ++i)
+        {
+            count_VkSparseMemoryBind(featureBits, rootType, (const VkSparseMemoryBind*)(toCount->pBinds + i), count);
+        }
+    }
+}
+
+void count_VkImageSubresource(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImageSubresource* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkImageAspectFlags);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkSparseImageMemoryBind(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSparseImageMemoryBind* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    count_VkImageSubresource(featureBits, rootType, (VkImageSubresource*)(&toCount->subresource), count);
+    count_VkOffset3D(featureBits, rootType, (VkOffset3D*)(&toCount->offset), count);
+    count_VkExtent3D(featureBits, rootType, (VkExtent3D*)(&toCount->extent), count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    *count += sizeof(VkDeviceSize);
+    *count += sizeof(VkSparseMemoryBindFlags);
+}
+
+void count_VkSparseImageMemoryBindInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSparseImageMemoryBindInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < (uint32_t)toCount->bindCount; ++i)
+        {
+            count_VkSparseImageMemoryBind(featureBits, rootType, (const VkSparseImageMemoryBind*)(toCount->pBinds + i), count);
+        }
+    }
+}
+
+void count_VkBindSparseInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkBindSparseInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    if (toCount->waitSemaphoreCount)
+    {
+        *count += toCount->waitSemaphoreCount * 8;
+    }
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < (uint32_t)toCount->bufferBindCount; ++i)
+        {
+            count_VkSparseBufferMemoryBindInfo(featureBits, rootType, (const VkSparseBufferMemoryBindInfo*)(toCount->pBufferBinds + i), count);
+        }
+    }
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < (uint32_t)toCount->imageOpaqueBindCount; ++i)
+        {
+            count_VkSparseImageOpaqueMemoryBindInfo(featureBits, rootType, (const VkSparseImageOpaqueMemoryBindInfo*)(toCount->pImageOpaqueBinds + i), count);
+        }
+    }
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < (uint32_t)toCount->imageBindCount; ++i)
+        {
+            count_VkSparseImageMemoryBindInfo(featureBits, rootType, (const VkSparseImageMemoryBindInfo*)(toCount->pImageBinds + i), count);
+        }
+    }
+    *count += sizeof(uint32_t);
+    if (toCount->signalSemaphoreCount)
+    {
+        *count += toCount->signalSemaphoreCount * 8;
+    }
+}
+
+void count_VkSparseImageFormatProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSparseImageFormatProperties* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkImageAspectFlags);
+    count_VkExtent3D(featureBits, rootType, (VkExtent3D*)(&toCount->imageGranularity), count);
+    *count += sizeof(VkSparseImageFormatFlags);
+}
+
+void count_VkSparseImageMemoryRequirements(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSparseImageMemoryRequirements* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    count_VkSparseImageFormatProperties(featureBits, rootType, (VkSparseImageFormatProperties*)(&toCount->formatProperties), count);
+    *count += sizeof(uint32_t);
+    *count += sizeof(VkDeviceSize);
+    *count += sizeof(VkDeviceSize);
+    *count += sizeof(VkDeviceSize);
+}
+
+void count_VkFenceCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkFenceCreateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkFenceCreateFlags);
+}
+
+void count_VkSemaphoreCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSemaphoreCreateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkSemaphoreCreateFlags);
+}
+
+void count_VkEventCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkEventCreateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkEventCreateFlags);
+}
+
+void count_VkQueryPoolCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkQueryPoolCreateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkQueryPoolCreateFlags);
+    *count += sizeof(VkQueryType);
+    *count += sizeof(uint32_t);
+    *count += sizeof(VkQueryPipelineStatisticFlags);
+}
+
+void count_VkBufferCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkBufferCreateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBufferCreateFlags);
+    *count += sizeof(VkDeviceSize);
+    *count += sizeof(VkBufferUsageFlags);
+    *count += sizeof(VkSharingMode);
+    *count += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pQueueFamilyIndices)
+    {
+        if (toCount)
+        {
+            *count += toCount->queueFamilyIndexCount * sizeof(const uint32_t);
+        }
+    }
+}
+
+void count_VkBufferViewCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkBufferViewCreateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBufferViewCreateFlags);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    *count += sizeof(VkFormat);
+    *count += sizeof(VkDeviceSize);
+    *count += sizeof(VkDeviceSize);
+}
+
+void count_VkImageCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImageCreateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkImageCreateFlags);
+    *count += sizeof(VkImageType);
+    *count += sizeof(VkFormat);
+    count_VkExtent3D(featureBits, rootType, (VkExtent3D*)(&toCount->extent), count);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(VkSampleCountFlagBits);
+    *count += sizeof(VkImageTiling);
+    *count += sizeof(VkImageUsageFlags);
+    *count += sizeof(VkSharingMode);
+    *count += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pQueueFamilyIndices)
+    {
+        if (toCount)
+        {
+            *count += toCount->queueFamilyIndexCount * sizeof(const uint32_t);
+        }
+    }
+    *count += sizeof(VkImageLayout);
+}
+
+void count_VkSubresourceLayout(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSubresourceLayout* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkDeviceSize);
+    *count += sizeof(VkDeviceSize);
+    *count += sizeof(VkDeviceSize);
+    *count += sizeof(VkDeviceSize);
+    *count += sizeof(VkDeviceSize);
+}
+
+void count_VkComponentMapping(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkComponentMapping* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkComponentSwizzle);
+    *count += sizeof(VkComponentSwizzle);
+    *count += sizeof(VkComponentSwizzle);
+    *count += sizeof(VkComponentSwizzle);
+}
+
+void count_VkImageViewCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImageViewCreateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkImageViewCreateFlags);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    *count += sizeof(VkImageViewType);
+    *count += sizeof(VkFormat);
+    count_VkComponentMapping(featureBits, rootType, (VkComponentMapping*)(&toCount->components), count);
+    count_VkImageSubresourceRange(featureBits, rootType, (VkImageSubresourceRange*)(&toCount->subresourceRange), count);
+}
+
+void count_VkShaderModuleCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkShaderModuleCreateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkShaderModuleCreateFlags);
+    *count += 8;
+    if (toCount)
+    {
+        *count += (toCount->codeSize / 4) * sizeof(const uint32_t);
+    }
+}
+
+void count_VkPipelineCacheCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineCacheCreateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkPipelineCacheCreateFlags);
+    *count += 8;
+    if (toCount)
+    {
+        *count += toCount->initialDataSize * sizeof(const uint8_t);
+    }
+}
+
+void count_VkSpecializationMapEntry(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSpecializationMapEntry* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += 8;
+}
+
+void count_VkSpecializationInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSpecializationInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < (uint32_t)toCount->mapEntryCount; ++i)
+        {
+            count_VkSpecializationMapEntry(featureBits, rootType, (const VkSpecializationMapEntry*)(toCount->pMapEntries + i), count);
+        }
+    }
+    *count += 8;
+    if (toCount)
+    {
+        *count += toCount->dataSize * sizeof(const uint8_t);
+    }
+}
+
+void count_VkPipelineShaderStageCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineShaderStageCreateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkPipelineShaderStageCreateFlags);
+    *count += sizeof(VkShaderStageFlagBits);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    *count += sizeof(uint32_t) + (toCount->pName ? strlen(toCount->pName) : 0);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pSpecializationInfo)
+    {
+        count_VkSpecializationInfo(featureBits, rootType, (const VkSpecializationInfo*)(toCount->pSpecializationInfo), count);
+    }
+}
+
+void count_VkComputePipelineCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkComputePipelineCreateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkPipelineCreateFlags);
+    count_VkPipelineShaderStageCreateInfo(featureBits, rootType, (VkPipelineShaderStageCreateInfo*)(&toCount->stage), count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    uint64_t cgen_var_1;
+    *count += 1 * 8;
+    *count += sizeof(int32_t);
+}
+
+void count_VkVertexInputBindingDescription(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkVertexInputBindingDescription* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(VkVertexInputRate);
+}
+
+void count_VkVertexInputAttributeDescription(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkVertexInputAttributeDescription* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(VkFormat);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkPipelineVertexInputStateCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineVertexInputStateCreateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkPipelineVertexInputStateCreateFlags);
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < (uint32_t)toCount->vertexBindingDescriptionCount; ++i)
+        {
+            count_VkVertexInputBindingDescription(featureBits, rootType, (const VkVertexInputBindingDescription*)(toCount->pVertexBindingDescriptions + i), count);
+        }
+    }
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < (uint32_t)toCount->vertexAttributeDescriptionCount; ++i)
+        {
+            count_VkVertexInputAttributeDescription(featureBits, rootType, (const VkVertexInputAttributeDescription*)(toCount->pVertexAttributeDescriptions + i), count);
+        }
+    }
+}
+
+void count_VkPipelineInputAssemblyStateCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineInputAssemblyStateCreateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkPipelineInputAssemblyStateCreateFlags);
+    *count += sizeof(VkPrimitiveTopology);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkPipelineTessellationStateCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineTessellationStateCreateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkPipelineTessellationStateCreateFlags);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkViewport(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkViewport* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(float);
+    *count += sizeof(float);
+    *count += sizeof(float);
+    *count += sizeof(float);
+    *count += sizeof(float);
+    *count += sizeof(float);
+}
+
+void count_VkPipelineViewportStateCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineViewportStateCreateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkPipelineViewportStateCreateFlags);
+    *count += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pViewports)
+    {
+        if (toCount)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toCount->viewportCount; ++i)
+            {
+                count_VkViewport(featureBits, rootType, (const VkViewport*)(toCount->pViewports + i), count);
+            }
+        }
+    }
+    *count += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pScissors)
+    {
+        if (toCount)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toCount->scissorCount; ++i)
+            {
+                count_VkRect2D(featureBits, rootType, (const VkRect2D*)(toCount->pScissors + i), count);
+            }
+        }
+    }
+}
+
+void count_VkPipelineRasterizationStateCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineRasterizationStateCreateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkPipelineRasterizationStateCreateFlags);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkPolygonMode);
+    *count += sizeof(VkCullModeFlags);
+    *count += sizeof(VkFrontFace);
+    *count += sizeof(VkBool32);
+    *count += sizeof(float);
+    *count += sizeof(float);
+    *count += sizeof(float);
+    *count += sizeof(float);
+}
+
+void count_VkPipelineMultisampleStateCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineMultisampleStateCreateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkPipelineMultisampleStateCreateFlags);
+    *count += sizeof(VkSampleCountFlagBits);
+    *count += sizeof(VkBool32);
+    *count += sizeof(float);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pSampleMask)
+    {
+        if (toCount)
+        {
+            *count += (((toCount->rasterizationSamples) + 31) / 32) * sizeof(const VkSampleMask);
+        }
+    }
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkStencilOpState(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkStencilOpState* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStencilOp);
+    *count += sizeof(VkStencilOp);
+    *count += sizeof(VkStencilOp);
+    *count += sizeof(VkCompareOp);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkPipelineDepthStencilStateCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineDepthStencilStateCreateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkPipelineDepthStencilStateCreateFlags);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkCompareOp);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    count_VkStencilOpState(featureBits, rootType, (VkStencilOpState*)(&toCount->front), count);
+    count_VkStencilOpState(featureBits, rootType, (VkStencilOpState*)(&toCount->back), count);
+    *count += sizeof(float);
+    *count += sizeof(float);
+}
+
+void count_VkPipelineColorBlendAttachmentState(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineColorBlendAttachmentState* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBlendFactor);
+    *count += sizeof(VkBlendFactor);
+    *count += sizeof(VkBlendOp);
+    *count += sizeof(VkBlendFactor);
+    *count += sizeof(VkBlendFactor);
+    *count += sizeof(VkBlendOp);
+    *count += sizeof(VkColorComponentFlags);
+}
+
+void count_VkPipelineColorBlendStateCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineColorBlendStateCreateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkPipelineColorBlendStateCreateFlags);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkLogicOp);
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < (uint32_t)toCount->attachmentCount; ++i)
+        {
+            count_VkPipelineColorBlendAttachmentState(featureBits, rootType, (const VkPipelineColorBlendAttachmentState*)(toCount->pAttachments + i), count);
+        }
+    }
+    *count += 4 * sizeof(float);
+}
+
+void count_VkPipelineDynamicStateCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineDynamicStateCreateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkPipelineDynamicStateCreateFlags);
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        *count += toCount->dynamicStateCount * sizeof(const VkDynamicState);
+    }
+}
+
+void count_VkGraphicsPipelineCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkGraphicsPipelineCreateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    uint32_t hasRasterization = 1;
+    if (featureBits & VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT)
+    {
+        hasRasterization = (((0 == toCount->pRasterizationState)) ? (0) : (!((*(toCount->pRasterizationState)).rasterizerDiscardEnable)));
+        *count += 4;
+    }
+    uint32_t hasTessellation = 1;
+    if (featureBits & VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT)
+    {
+        hasTessellation = arrayany(toCount->pStages, 0, toCount->stageCount, [](VkPipelineShaderStageCreateInfo s) { return ((s.stage == VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) || (s.stage == VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)); });
+        *count += 4;
+    }
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkPipelineCreateFlags);
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < (uint32_t)toCount->stageCount; ++i)
+        {
+            count_VkPipelineShaderStageCreateInfo(featureBits, rootType, (const VkPipelineShaderStageCreateInfo*)(toCount->pStages + i), count);
+        }
+    }
+    // WARNING PTR CHECK
+    if (featureBits & VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT)
+    {
+        *count += 8;
+    }
+    if ((!(featureBits & VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT) || toCount->pVertexInputState))
+    {
+        count_VkPipelineVertexInputStateCreateInfo(featureBits, rootType, (const VkPipelineVertexInputStateCreateInfo*)(toCount->pVertexInputState), count);
+    }
+    // WARNING PTR CHECK
+    if (featureBits & VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT)
+    {
+        *count += 8;
+    }
+    if ((!(featureBits & VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT) || toCount->pInputAssemblyState))
+    {
+        count_VkPipelineInputAssemblyStateCreateInfo(featureBits, rootType, (const VkPipelineInputAssemblyStateCreateInfo*)(toCount->pInputAssemblyState), count);
+    }
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pTessellationState)
+    {
+        if (hasTessellation)
+        {
+            count_VkPipelineTessellationStateCreateInfo(featureBits, rootType, (const VkPipelineTessellationStateCreateInfo*)(toCount->pTessellationState), count);
+        }
+    }
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pViewportState)
+    {
+        if (hasRasterization)
+        {
+            count_VkPipelineViewportStateCreateInfo(featureBits, rootType, (const VkPipelineViewportStateCreateInfo*)(toCount->pViewportState), count);
+        }
+    }
+    // WARNING PTR CHECK
+    if (featureBits & VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT)
+    {
+        *count += 8;
+    }
+    if ((!(featureBits & VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT) || toCount->pRasterizationState))
+    {
+        count_VkPipelineRasterizationStateCreateInfo(featureBits, rootType, (const VkPipelineRasterizationStateCreateInfo*)(toCount->pRasterizationState), count);
+    }
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pMultisampleState)
+    {
+        if (hasRasterization)
+        {
+            count_VkPipelineMultisampleStateCreateInfo(featureBits, rootType, (const VkPipelineMultisampleStateCreateInfo*)(toCount->pMultisampleState), count);
+        }
+    }
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pDepthStencilState)
+    {
+        if (hasRasterization)
+        {
+            count_VkPipelineDepthStencilStateCreateInfo(featureBits, rootType, (const VkPipelineDepthStencilStateCreateInfo*)(toCount->pDepthStencilState), count);
+        }
+    }
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pColorBlendState)
+    {
+        if (hasRasterization)
+        {
+            count_VkPipelineColorBlendStateCreateInfo(featureBits, rootType, (const VkPipelineColorBlendStateCreateInfo*)(toCount->pColorBlendState), count);
+        }
+    }
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pDynamicState)
+    {
+        count_VkPipelineDynamicStateCreateInfo(featureBits, rootType, (const VkPipelineDynamicStateCreateInfo*)(toCount->pDynamicState), count);
+    }
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    uint64_t cgen_var_1;
+    *count += 1 * 8;
+    *count += sizeof(uint32_t);
+    uint64_t cgen_var_2;
+    *count += 1 * 8;
+    *count += sizeof(int32_t);
+}
+
+void count_VkPushConstantRange(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPushConstantRange* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkShaderStageFlags);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkPipelineLayoutCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineLayoutCreateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkPipelineLayoutCreateFlags);
+    *count += sizeof(uint32_t);
+    if (toCount->setLayoutCount)
+    {
+        *count += toCount->setLayoutCount * 8;
+    }
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < (uint32_t)toCount->pushConstantRangeCount; ++i)
+        {
+            count_VkPushConstantRange(featureBits, rootType, (const VkPushConstantRange*)(toCount->pPushConstantRanges + i), count);
+        }
+    }
+}
+
+void count_VkSamplerCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSamplerCreateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkSamplerCreateFlags);
+    *count += sizeof(VkFilter);
+    *count += sizeof(VkFilter);
+    *count += sizeof(VkSamplerMipmapMode);
+    *count += sizeof(VkSamplerAddressMode);
+    *count += sizeof(VkSamplerAddressMode);
+    *count += sizeof(VkSamplerAddressMode);
+    *count += sizeof(float);
+    *count += sizeof(VkBool32);
+    *count += sizeof(float);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkCompareOp);
+    *count += sizeof(float);
+    *count += sizeof(float);
+    *count += sizeof(VkBorderColor);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkCopyDescriptorSet(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkCopyDescriptorSet* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    uint64_t cgen_var_1;
+    *count += 1 * 8;
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkDescriptorBufferInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDescriptorBufferInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    *count += sizeof(VkDeviceSize);
+    *count += sizeof(VkDeviceSize);
+}
+
+void count_VkDescriptorImageInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDescriptorImageInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    uint64_t cgen_var_1;
+    *count += 1 * 8;
+    *count += sizeof(VkImageLayout);
+}
+
+void count_VkDescriptorPoolSize(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDescriptorPoolSize* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkDescriptorType);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkDescriptorPoolCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDescriptorPoolCreateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkDescriptorPoolCreateFlags);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < (uint32_t)toCount->poolSizeCount; ++i)
+        {
+            count_VkDescriptorPoolSize(featureBits, rootType, (const VkDescriptorPoolSize*)(toCount->pPoolSizes + i), count);
+        }
+    }
+}
+
+void count_VkDescriptorSetAllocateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDescriptorSetAllocateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    *count += sizeof(uint32_t);
+    if (toCount->descriptorSetCount)
+    {
+        *count += toCount->descriptorSetCount * 8;
+    }
+}
+
+void count_VkDescriptorSetLayoutBinding(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDescriptorSetLayoutBinding* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(uint32_t);
+    *count += sizeof(VkDescriptorType);
+    *count += sizeof(uint32_t);
+    *count += sizeof(VkShaderStageFlags);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pImmutableSamplers)
+    {
+        if (toCount->descriptorCount)
+        {
+            *count += toCount->descriptorCount * 8;
+        }
+    }
+}
+
+void count_VkDescriptorSetLayoutCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDescriptorSetLayoutCreateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkDescriptorSetLayoutCreateFlags);
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < (uint32_t)toCount->bindingCount; ++i)
+        {
+            count_VkDescriptorSetLayoutBinding(featureBits, rootType, (const VkDescriptorSetLayoutBinding*)(toCount->pBindings + i), count);
+        }
+    }
+}
+
+void count_VkWriteDescriptorSet(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkWriteDescriptorSet* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(VkDescriptorType);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pImageInfo)
+    {
+        if ((!(featureBits & VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT) || ((VK_DESCRIPTOR_TYPE_SAMPLER == toCount->descriptorType) || (VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER == toCount->descriptorType) || (VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE == toCount->descriptorType) || (VK_DESCRIPTOR_TYPE_STORAGE_IMAGE == toCount->descriptorType) || (VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT == toCount->descriptorType))))
+        {
+            if (toCount)
+            {
+                for (uint32_t i = 0; i < (uint32_t)toCount->descriptorCount; ++i)
+                {
+                    count_VkDescriptorImageInfo(featureBits, rootType, (const VkDescriptorImageInfo*)(toCount->pImageInfo + i), count);
+                }
+            }
+        }
+    }
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pBufferInfo)
+    {
+        if ((!(featureBits & VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT) || ((VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER == toCount->descriptorType) || (VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC == toCount->descriptorType) || (VK_DESCRIPTOR_TYPE_STORAGE_BUFFER == toCount->descriptorType) || (VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC == toCount->descriptorType))))
+        {
+            if (toCount)
+            {
+                for (uint32_t i = 0; i < (uint32_t)toCount->descriptorCount; ++i)
+                {
+                    count_VkDescriptorBufferInfo(featureBits, rootType, (const VkDescriptorBufferInfo*)(toCount->pBufferInfo + i), count);
+                }
+            }
+        }
+    }
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pTexelBufferView)
+    {
+        if ((!(featureBits & VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT) || ((VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER == toCount->descriptorType) || (VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER == toCount->descriptorType))))
+        {
+            if (toCount->descriptorCount)
+            {
+                *count += toCount->descriptorCount * 8;
+            }
+        }
+    }
+}
+
+void count_VkAttachmentDescription(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAttachmentDescription* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkAttachmentDescriptionFlags);
+    *count += sizeof(VkFormat);
+    *count += sizeof(VkSampleCountFlagBits);
+    *count += sizeof(VkAttachmentLoadOp);
+    *count += sizeof(VkAttachmentStoreOp);
+    *count += sizeof(VkAttachmentLoadOp);
+    *count += sizeof(VkAttachmentStoreOp);
+    *count += sizeof(VkImageLayout);
+    *count += sizeof(VkImageLayout);
+}
+
+void count_VkAttachmentReference(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAttachmentReference* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(uint32_t);
+    *count += sizeof(VkImageLayout);
+}
+
+void count_VkFramebufferCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkFramebufferCreateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkFramebufferCreateFlags);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    *count += sizeof(uint32_t);
+    if (toCount->attachmentCount)
+    {
+        *count += toCount->attachmentCount * 8;
+    }
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkSubpassDescription(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSubpassDescription* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkSubpassDescriptionFlags);
+    *count += sizeof(VkPipelineBindPoint);
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < (uint32_t)toCount->inputAttachmentCount; ++i)
+        {
+            count_VkAttachmentReference(featureBits, rootType, (const VkAttachmentReference*)(toCount->pInputAttachments + i), count);
+        }
+    }
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < (uint32_t)toCount->colorAttachmentCount; ++i)
+        {
+            count_VkAttachmentReference(featureBits, rootType, (const VkAttachmentReference*)(toCount->pColorAttachments + i), count);
+        }
+    }
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pResolveAttachments)
+    {
+        if (toCount)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toCount->colorAttachmentCount; ++i)
+            {
+                count_VkAttachmentReference(featureBits, rootType, (const VkAttachmentReference*)(toCount->pResolveAttachments + i), count);
+            }
+        }
+    }
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pDepthStencilAttachment)
+    {
+        count_VkAttachmentReference(featureBits, rootType, (const VkAttachmentReference*)(toCount->pDepthStencilAttachment), count);
+    }
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        *count += toCount->preserveAttachmentCount * sizeof(const uint32_t);
+    }
+}
+
+void count_VkSubpassDependency(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSubpassDependency* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(VkPipelineStageFlags);
+    *count += sizeof(VkPipelineStageFlags);
+    *count += sizeof(VkAccessFlags);
+    *count += sizeof(VkAccessFlags);
+    *count += sizeof(VkDependencyFlags);
+}
+
+void count_VkRenderPassCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkRenderPassCreateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkRenderPassCreateFlags);
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < (uint32_t)toCount->attachmentCount; ++i)
+        {
+            count_VkAttachmentDescription(featureBits, rootType, (const VkAttachmentDescription*)(toCount->pAttachments + i), count);
+        }
+    }
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < (uint32_t)toCount->subpassCount; ++i)
+        {
+            count_VkSubpassDescription(featureBits, rootType, (const VkSubpassDescription*)(toCount->pSubpasses + i), count);
+        }
+    }
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < (uint32_t)toCount->dependencyCount; ++i)
+        {
+            count_VkSubpassDependency(featureBits, rootType, (const VkSubpassDependency*)(toCount->pDependencies + i), count);
+        }
+    }
+}
+
+void count_VkCommandPoolCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkCommandPoolCreateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkCommandPoolCreateFlags);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkCommandBufferAllocateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkCommandBufferAllocateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    *count += sizeof(VkCommandBufferLevel);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkCommandBufferInheritanceInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkCommandBufferInheritanceInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    *count += sizeof(uint32_t);
+    uint64_t cgen_var_1;
+    *count += 1 * 8;
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkQueryControlFlags);
+    *count += sizeof(VkQueryPipelineStatisticFlags);
+}
+
+void count_VkCommandBufferBeginInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkCommandBufferBeginInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkCommandBufferUsageFlags);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pInheritanceInfo)
+    {
+        count_VkCommandBufferInheritanceInfo(featureBits, rootType, (const VkCommandBufferInheritanceInfo*)(toCount->pInheritanceInfo), count);
+    }
+}
+
+void count_VkBufferCopy(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkBufferCopy* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkDeviceSize);
+    *count += sizeof(VkDeviceSize);
+    *count += sizeof(VkDeviceSize);
+}
+
+void count_VkImageSubresourceLayers(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImageSubresourceLayers* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkImageAspectFlags);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkBufferImageCopy(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkBufferImageCopy* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkDeviceSize);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    count_VkImageSubresourceLayers(featureBits, rootType, (VkImageSubresourceLayers*)(&toCount->imageSubresource), count);
+    count_VkOffset3D(featureBits, rootType, (VkOffset3D*)(&toCount->imageOffset), count);
+    count_VkExtent3D(featureBits, rootType, (VkExtent3D*)(&toCount->imageExtent), count);
+}
+
+void count_VkClearColorValue(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkClearColorValue* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += 4 * sizeof(float);
+}
+
+void count_VkClearDepthStencilValue(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkClearDepthStencilValue* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(float);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkClearValue(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkClearValue* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    count_VkClearColorValue(featureBits, rootType, (VkClearColorValue*)(&toCount->color), count);
+}
+
+void count_VkClearAttachment(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkClearAttachment* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkImageAspectFlags);
+    *count += sizeof(uint32_t);
+    count_VkClearValue(featureBits, rootType, (VkClearValue*)(&toCount->clearValue), count);
+}
+
+void count_VkClearRect(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkClearRect* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    count_VkRect2D(featureBits, rootType, (VkRect2D*)(&toCount->rect), count);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkImageBlit(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImageBlit* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    count_VkImageSubresourceLayers(featureBits, rootType, (VkImageSubresourceLayers*)(&toCount->srcSubresource), count);
+    for (uint32_t i = 0; i < (uint32_t)2; ++i)
+    {
+        count_VkOffset3D(featureBits, rootType, (VkOffset3D*)(toCount->srcOffsets + i), count);
+    }
+    count_VkImageSubresourceLayers(featureBits, rootType, (VkImageSubresourceLayers*)(&toCount->dstSubresource), count);
+    for (uint32_t i = 0; i < (uint32_t)2; ++i)
+    {
+        count_VkOffset3D(featureBits, rootType, (VkOffset3D*)(toCount->dstOffsets + i), count);
+    }
+}
+
+void count_VkImageCopy(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImageCopy* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    count_VkImageSubresourceLayers(featureBits, rootType, (VkImageSubresourceLayers*)(&toCount->srcSubresource), count);
+    count_VkOffset3D(featureBits, rootType, (VkOffset3D*)(&toCount->srcOffset), count);
+    count_VkImageSubresourceLayers(featureBits, rootType, (VkImageSubresourceLayers*)(&toCount->dstSubresource), count);
+    count_VkOffset3D(featureBits, rootType, (VkOffset3D*)(&toCount->dstOffset), count);
+    count_VkExtent3D(featureBits, rootType, (VkExtent3D*)(&toCount->extent), count);
+}
+
+void count_VkImageResolve(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImageResolve* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    count_VkImageSubresourceLayers(featureBits, rootType, (VkImageSubresourceLayers*)(&toCount->srcSubresource), count);
+    count_VkOffset3D(featureBits, rootType, (VkOffset3D*)(&toCount->srcOffset), count);
+    count_VkImageSubresourceLayers(featureBits, rootType, (VkImageSubresourceLayers*)(&toCount->dstSubresource), count);
+    count_VkOffset3D(featureBits, rootType, (VkOffset3D*)(&toCount->dstOffset), count);
+    count_VkExtent3D(featureBits, rootType, (VkExtent3D*)(&toCount->extent), count);
+}
+
+void count_VkRenderPassBeginInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkRenderPassBeginInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    uint64_t cgen_var_1;
+    *count += 1 * 8;
+    count_VkRect2D(featureBits, rootType, (VkRect2D*)(&toCount->renderArea), count);
+    *count += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pClearValues)
+    {
+        if (toCount)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toCount->clearValueCount; ++i)
+            {
+                count_VkClearValue(featureBits, rootType, (const VkClearValue*)(toCount->pClearValues + i), count);
+            }
+        }
+    }
+}
+
+#endif
+#ifdef VK_VERSION_1_1
+void count_VkPhysicalDeviceSubgroupProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSubgroupProperties* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    *count += sizeof(VkShaderStageFlags);
+    *count += sizeof(VkSubgroupFeatureFlags);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkBindBufferMemoryInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkBindBufferMemoryInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    uint64_t cgen_var_1;
+    *count += 1 * 8;
+    *count += sizeof(VkDeviceSize);
+}
+
+void count_VkBindImageMemoryInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkBindImageMemoryInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    uint64_t cgen_var_1;
+    *count += 1 * 8;
+    *count += sizeof(VkDeviceSize);
+}
+
+void count_VkPhysicalDevice16BitStorageFeatures(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDevice16BitStorageFeatures* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkMemoryDedicatedRequirements(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkMemoryDedicatedRequirements* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkMemoryDedicatedAllocateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkMemoryDedicatedAllocateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    uint64_t cgen_var_1;
+    *count += 1 * 8;
+}
+
+void count_VkMemoryAllocateFlagsInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkMemoryAllocateFlagsInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkMemoryAllocateFlags);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkDeviceGroupRenderPassBeginInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDeviceGroupRenderPassBeginInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < (uint32_t)toCount->deviceRenderAreaCount; ++i)
+        {
+            count_VkRect2D(featureBits, rootType, (const VkRect2D*)(toCount->pDeviceRenderAreas + i), count);
+        }
+    }
+}
+
+void count_VkDeviceGroupCommandBufferBeginInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDeviceGroupCommandBufferBeginInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkDeviceGroupSubmitInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDeviceGroupSubmitInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        *count += toCount->waitSemaphoreCount * sizeof(const uint32_t);
+    }
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        *count += toCount->commandBufferCount * sizeof(const uint32_t);
+    }
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        *count += toCount->signalSemaphoreCount * sizeof(const uint32_t);
+    }
+}
+
+void count_VkDeviceGroupBindSparseInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDeviceGroupBindSparseInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkBindBufferMemoryDeviceGroupInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkBindBufferMemoryDeviceGroupInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        *count += toCount->deviceIndexCount * sizeof(const uint32_t);
+    }
+}
+
+void count_VkBindImageMemoryDeviceGroupInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkBindImageMemoryDeviceGroupInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        *count += toCount->deviceIndexCount * sizeof(const uint32_t);
+    }
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < (uint32_t)toCount->splitInstanceBindRegionCount; ++i)
+        {
+            count_VkRect2D(featureBits, rootType, (const VkRect2D*)(toCount->pSplitInstanceBindRegions + i), count);
+        }
+    }
+}
+
+void count_VkPhysicalDeviceGroupProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceGroupProperties* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    *count += VK_MAX_DEVICE_GROUP_SIZE * sizeof(VkPhysicalDevice);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkDeviceGroupDeviceCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDeviceGroupDeviceCreateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    if (toCount->physicalDeviceCount)
+    {
+        *count += toCount->physicalDeviceCount * 8;
+    }
+}
+
+void count_VkBufferMemoryRequirementsInfo2(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkBufferMemoryRequirementsInfo2* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+}
+
+void count_VkImageMemoryRequirementsInfo2(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImageMemoryRequirementsInfo2* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+}
+
+void count_VkImageSparseMemoryRequirementsInfo2(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImageSparseMemoryRequirementsInfo2* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+}
+
+void count_VkMemoryRequirements2(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkMemoryRequirements2* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    count_VkMemoryRequirements(featureBits, rootType, (VkMemoryRequirements*)(&toCount->memoryRequirements), count);
+}
+
+void count_VkSparseImageMemoryRequirements2(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSparseImageMemoryRequirements2* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    count_VkSparseImageMemoryRequirements(featureBits, rootType, (VkSparseImageMemoryRequirements*)(&toCount->memoryRequirements), count);
+}
+
+void count_VkPhysicalDeviceFeatures2(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFeatures2* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    count_VkPhysicalDeviceFeatures(featureBits, rootType, (VkPhysicalDeviceFeatures*)(&toCount->features), count);
+}
+
+void count_VkPhysicalDeviceProperties2(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceProperties2* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    count_VkPhysicalDeviceProperties(featureBits, rootType, (VkPhysicalDeviceProperties*)(&toCount->properties), count);
+}
+
+void count_VkFormatProperties2(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkFormatProperties2* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    count_VkFormatProperties(featureBits, rootType, (VkFormatProperties*)(&toCount->formatProperties), count);
+}
+
+void count_VkImageFormatProperties2(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImageFormatProperties2* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    count_VkImageFormatProperties(featureBits, rootType, (VkImageFormatProperties*)(&toCount->imageFormatProperties), count);
+}
+
+void count_VkPhysicalDeviceImageFormatInfo2(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceImageFormatInfo2* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkFormat);
+    *count += sizeof(VkImageType);
+    *count += sizeof(VkImageTiling);
+    *count += sizeof(VkImageUsageFlags);
+    *count += sizeof(VkImageCreateFlags);
+}
+
+void count_VkQueueFamilyProperties2(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkQueueFamilyProperties2* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    count_VkQueueFamilyProperties(featureBits, rootType, (VkQueueFamilyProperties*)(&toCount->queueFamilyProperties), count);
+}
+
+void count_VkPhysicalDeviceMemoryProperties2(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMemoryProperties2* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    count_VkPhysicalDeviceMemoryProperties(featureBits, rootType, (VkPhysicalDeviceMemoryProperties*)(&toCount->memoryProperties), count);
+}
+
+void count_VkSparseImageFormatProperties2(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSparseImageFormatProperties2* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    count_VkSparseImageFormatProperties(featureBits, rootType, (VkSparseImageFormatProperties*)(&toCount->properties), count);
+}
+
+void count_VkPhysicalDeviceSparseImageFormatInfo2(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSparseImageFormatInfo2* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkFormat);
+    *count += sizeof(VkImageType);
+    *count += sizeof(VkSampleCountFlagBits);
+    *count += sizeof(VkImageUsageFlags);
+    *count += sizeof(VkImageTiling);
+}
+
+void count_VkPhysicalDevicePointClippingProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDevicePointClippingProperties* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkPointClippingBehavior);
+}
+
+void count_VkInputAttachmentAspectReference(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkInputAttachmentAspectReference* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(VkImageAspectFlags);
+}
+
+void count_VkRenderPassInputAttachmentAspectCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkRenderPassInputAttachmentAspectCreateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < (uint32_t)toCount->aspectReferenceCount; ++i)
+        {
+            count_VkInputAttachmentAspectReference(featureBits, rootType, (const VkInputAttachmentAspectReference*)(toCount->pAspectReferences + i), count);
+        }
+    }
+}
+
+void count_VkImageViewUsageCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImageViewUsageCreateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkImageUsageFlags);
+}
+
+void count_VkPipelineTessellationDomainOriginStateCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineTessellationDomainOriginStateCreateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkTessellationDomainOrigin);
+}
+
+void count_VkRenderPassMultiviewCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkRenderPassMultiviewCreateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        *count += toCount->subpassCount * sizeof(const uint32_t);
+    }
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        *count += toCount->dependencyCount * sizeof(const int32_t);
+    }
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        *count += toCount->correlationMaskCount * sizeof(const uint32_t);
+    }
+}
+
+void count_VkPhysicalDeviceMultiviewFeatures(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMultiviewFeatures* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkPhysicalDeviceMultiviewProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMultiviewProperties* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkPhysicalDeviceVariablePointersFeatures(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVariablePointersFeatures* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkPhysicalDeviceProtectedMemoryFeatures(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceProtectedMemoryFeatures* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkPhysicalDeviceProtectedMemoryProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceProtectedMemoryProperties* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkDeviceQueueInfo2(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDeviceQueueInfo2* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkDeviceQueueCreateFlags);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkProtectedSubmitInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkProtectedSubmitInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkSamplerYcbcrConversionCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSamplerYcbcrConversionCreateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkFormat);
+    *count += sizeof(VkSamplerYcbcrModelConversion);
+    *count += sizeof(VkSamplerYcbcrRange);
+    count_VkComponentMapping(featureBits, rootType, (VkComponentMapping*)(&toCount->components), count);
+    *count += sizeof(VkChromaLocation);
+    *count += sizeof(VkChromaLocation);
+    *count += sizeof(VkFilter);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkSamplerYcbcrConversionInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSamplerYcbcrConversionInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+}
+
+void count_VkBindImagePlaneMemoryInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkBindImagePlaneMemoryInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkImageAspectFlagBits);
+}
+
+void count_VkImagePlaneMemoryRequirementsInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImagePlaneMemoryRequirementsInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkImageAspectFlagBits);
+}
+
+void count_VkPhysicalDeviceSamplerYcbcrConversionFeatures(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSamplerYcbcrConversionFeatures* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkSamplerYcbcrConversionImageFormatProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSamplerYcbcrConversionImageFormatProperties* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkDescriptorUpdateTemplateEntry(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDescriptorUpdateTemplateEntry* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(VkDescriptorType);
+    *count += 8;
+    *count += 8;
+}
+
+void count_VkDescriptorUpdateTemplateCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDescriptorUpdateTemplateCreateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkDescriptorUpdateTemplateCreateFlags);
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < (uint32_t)toCount->descriptorUpdateEntryCount; ++i)
+        {
+            count_VkDescriptorUpdateTemplateEntry(featureBits, rootType, (const VkDescriptorUpdateTemplateEntry*)(toCount->pDescriptorUpdateEntries + i), count);
+        }
+    }
+    *count += sizeof(VkDescriptorUpdateTemplateType);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    *count += sizeof(VkPipelineBindPoint);
+    uint64_t cgen_var_1;
+    *count += 1 * 8;
+    *count += sizeof(uint32_t);
+}
+
+void count_VkExternalMemoryProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkExternalMemoryProperties* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkExternalMemoryFeatureFlags);
+    *count += sizeof(VkExternalMemoryHandleTypeFlags);
+    *count += sizeof(VkExternalMemoryHandleTypeFlags);
+}
+
+void count_VkPhysicalDeviceExternalImageFormatInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceExternalImageFormatInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkExternalMemoryHandleTypeFlagBits);
+}
+
+void count_VkExternalImageFormatProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkExternalImageFormatProperties* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    count_VkExternalMemoryProperties(featureBits, rootType, (VkExternalMemoryProperties*)(&toCount->externalMemoryProperties), count);
+}
+
+void count_VkPhysicalDeviceExternalBufferInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceExternalBufferInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBufferCreateFlags);
+    *count += sizeof(VkBufferUsageFlags);
+    *count += sizeof(VkExternalMemoryHandleTypeFlagBits);
+}
+
+void count_VkExternalBufferProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkExternalBufferProperties* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    count_VkExternalMemoryProperties(featureBits, rootType, (VkExternalMemoryProperties*)(&toCount->externalMemoryProperties), count);
+}
+
+void count_VkPhysicalDeviceIDProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceIDProperties* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += VK_UUID_SIZE * sizeof(uint8_t);
+    *count += VK_UUID_SIZE * sizeof(uint8_t);
+    *count += VK_LUID_SIZE * sizeof(uint8_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkExternalMemoryImageCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkExternalMemoryImageCreateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkExternalMemoryHandleTypeFlags);
+}
+
+void count_VkExternalMemoryBufferCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkExternalMemoryBufferCreateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkExternalMemoryHandleTypeFlags);
+}
+
+void count_VkExportMemoryAllocateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkExportMemoryAllocateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkExternalMemoryHandleTypeFlags);
+}
+
+void count_VkPhysicalDeviceExternalFenceInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceExternalFenceInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkExternalFenceHandleTypeFlagBits);
+}
+
+void count_VkExternalFenceProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkExternalFenceProperties* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkExternalFenceHandleTypeFlags);
+    *count += sizeof(VkExternalFenceHandleTypeFlags);
+    *count += sizeof(VkExternalFenceFeatureFlags);
+}
+
+void count_VkExportFenceCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkExportFenceCreateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkExternalFenceHandleTypeFlags);
+}
+
+void count_VkExportSemaphoreCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkExportSemaphoreCreateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkExternalSemaphoreHandleTypeFlags);
+}
+
+void count_VkPhysicalDeviceExternalSemaphoreInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceExternalSemaphoreInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkExternalSemaphoreHandleTypeFlagBits);
+}
+
+void count_VkExternalSemaphoreProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkExternalSemaphoreProperties* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkExternalSemaphoreHandleTypeFlags);
+    *count += sizeof(VkExternalSemaphoreHandleTypeFlags);
+    *count += sizeof(VkExternalSemaphoreFeatureFlags);
+}
+
+void count_VkPhysicalDeviceMaintenance3Properties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMaintenance3Properties* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    *count += sizeof(VkDeviceSize);
+}
+
+void count_VkDescriptorSetLayoutSupport(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDescriptorSetLayoutSupport* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkPhysicalDeviceShaderDrawParametersFeatures(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderDrawParametersFeatures* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_VERSION_1_2
+void count_VkPhysicalDeviceVulkan11Features(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVulkan11Features* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkPhysicalDeviceVulkan11Properties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVulkan11Properties* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += VK_UUID_SIZE * sizeof(uint8_t);
+    *count += VK_UUID_SIZE * sizeof(uint8_t);
+    *count += VK_LUID_SIZE * sizeof(uint8_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(VkBool32);
+    *count += sizeof(uint32_t);
+    *count += sizeof(VkShaderStageFlags);
+    *count += sizeof(VkSubgroupFeatureFlags);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkPointClippingBehavior);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(VkBool32);
+    *count += sizeof(uint32_t);
+    *count += sizeof(VkDeviceSize);
+}
+
+void count_VkPhysicalDeviceVulkan12Features(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVulkan12Features* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkConformanceVersion(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkConformanceVersion* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(uint8_t);
+    *count += sizeof(uint8_t);
+    *count += sizeof(uint8_t);
+    *count += sizeof(uint8_t);
+}
+
+void count_VkPhysicalDeviceVulkan12Properties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVulkan12Properties* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkDriverId);
+    *count += VK_MAX_DRIVER_NAME_SIZE * sizeof(char);
+    *count += VK_MAX_DRIVER_INFO_SIZE * sizeof(char);
+    count_VkConformanceVersion(featureBits, rootType, (VkConformanceVersion*)(&toCount->conformanceVersion), count);
+    *count += sizeof(VkShaderFloatControlsIndependence);
+    *count += sizeof(VkShaderFloatControlsIndependence);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(uint32_t);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(VkResolveModeFlags);
+    *count += sizeof(VkResolveModeFlags);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(uint64_t);
+    *count += sizeof(VkSampleCountFlags);
+}
+
+void count_VkImageFormatListCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImageFormatListCreateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        *count += toCount->viewFormatCount * sizeof(const VkFormat);
+    }
+}
+
+void count_VkAttachmentDescription2(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAttachmentDescription2* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkAttachmentDescriptionFlags);
+    *count += sizeof(VkFormat);
+    *count += sizeof(VkSampleCountFlagBits);
+    *count += sizeof(VkAttachmentLoadOp);
+    *count += sizeof(VkAttachmentStoreOp);
+    *count += sizeof(VkAttachmentLoadOp);
+    *count += sizeof(VkAttachmentStoreOp);
+    *count += sizeof(VkImageLayout);
+    *count += sizeof(VkImageLayout);
+}
+
+void count_VkAttachmentReference2(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAttachmentReference2* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    *count += sizeof(VkImageLayout);
+    *count += sizeof(VkImageAspectFlags);
+}
+
+void count_VkSubpassDescription2(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSubpassDescription2* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkSubpassDescriptionFlags);
+    *count += sizeof(VkPipelineBindPoint);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < (uint32_t)toCount->inputAttachmentCount; ++i)
+        {
+            count_VkAttachmentReference2(featureBits, rootType, (const VkAttachmentReference2*)(toCount->pInputAttachments + i), count);
+        }
+    }
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < (uint32_t)toCount->colorAttachmentCount; ++i)
+        {
+            count_VkAttachmentReference2(featureBits, rootType, (const VkAttachmentReference2*)(toCount->pColorAttachments + i), count);
+        }
+    }
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pResolveAttachments)
+    {
+        if (toCount)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toCount->colorAttachmentCount; ++i)
+            {
+                count_VkAttachmentReference2(featureBits, rootType, (const VkAttachmentReference2*)(toCount->pResolveAttachments + i), count);
+            }
+        }
+    }
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pDepthStencilAttachment)
+    {
+        count_VkAttachmentReference2(featureBits, rootType, (const VkAttachmentReference2*)(toCount->pDepthStencilAttachment), count);
+    }
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        *count += toCount->preserveAttachmentCount * sizeof(const uint32_t);
+    }
+}
+
+void count_VkSubpassDependency2(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSubpassDependency2* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(VkPipelineStageFlags);
+    *count += sizeof(VkPipelineStageFlags);
+    *count += sizeof(VkAccessFlags);
+    *count += sizeof(VkAccessFlags);
+    *count += sizeof(VkDependencyFlags);
+    *count += sizeof(int32_t);
+}
+
+void count_VkRenderPassCreateInfo2(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkRenderPassCreateInfo2* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkRenderPassCreateFlags);
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < (uint32_t)toCount->attachmentCount; ++i)
+        {
+            count_VkAttachmentDescription2(featureBits, rootType, (const VkAttachmentDescription2*)(toCount->pAttachments + i), count);
+        }
+    }
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < (uint32_t)toCount->subpassCount; ++i)
+        {
+            count_VkSubpassDescription2(featureBits, rootType, (const VkSubpassDescription2*)(toCount->pSubpasses + i), count);
+        }
+    }
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < (uint32_t)toCount->dependencyCount; ++i)
+        {
+            count_VkSubpassDependency2(featureBits, rootType, (const VkSubpassDependency2*)(toCount->pDependencies + i), count);
+        }
+    }
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        *count += toCount->correlatedViewMaskCount * sizeof(const uint32_t);
+    }
+}
+
+void count_VkSubpassBeginInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSubpassBeginInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkSubpassContents);
+}
+
+void count_VkSubpassEndInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSubpassEndInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+}
+
+void count_VkPhysicalDevice8BitStorageFeatures(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDevice8BitStorageFeatures* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkPhysicalDeviceDriverProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDriverProperties* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkDriverId);
+    *count += VK_MAX_DRIVER_NAME_SIZE * sizeof(char);
+    *count += VK_MAX_DRIVER_INFO_SIZE * sizeof(char);
+    count_VkConformanceVersion(featureBits, rootType, (VkConformanceVersion*)(&toCount->conformanceVersion), count);
+}
+
+void count_VkPhysicalDeviceShaderAtomicInt64Features(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderAtomicInt64Features* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkPhysicalDeviceShaderFloat16Int8Features(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderFloat16Int8Features* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkPhysicalDeviceFloatControlsProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFloatControlsProperties* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkShaderFloatControlsIndependence);
+    *count += sizeof(VkShaderFloatControlsIndependence);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkDescriptorSetLayoutBindingFlagsCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDescriptorSetLayoutBindingFlagsCreateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pBindingFlags)
+    {
+        if (toCount)
+        {
+            *count += toCount->bindingCount * sizeof(const VkDescriptorBindingFlags);
+        }
+    }
+}
+
+void count_VkPhysicalDeviceDescriptorIndexingFeatures(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDescriptorIndexingFeatures* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkPhysicalDeviceDescriptorIndexingProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDescriptorIndexingProperties* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkDescriptorSetVariableDescriptorCountAllocateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDescriptorSetVariableDescriptorCountAllocateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        *count += toCount->descriptorSetCount * sizeof(const uint32_t);
+    }
+}
+
+void count_VkDescriptorSetVariableDescriptorCountLayoutSupport(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDescriptorSetVariableDescriptorCountLayoutSupport* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkSubpassDescriptionDepthStencilResolve(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSubpassDescriptionDepthStencilResolve* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkResolveModeFlagBits);
+    *count += sizeof(VkResolveModeFlagBits);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pDepthStencilResolveAttachment)
+    {
+        count_VkAttachmentReference2(featureBits, rootType, (const VkAttachmentReference2*)(toCount->pDepthStencilResolveAttachment), count);
+    }
+}
+
+void count_VkPhysicalDeviceDepthStencilResolveProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDepthStencilResolveProperties* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkResolveModeFlags);
+    *count += sizeof(VkResolveModeFlags);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkPhysicalDeviceScalarBlockLayoutFeatures(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceScalarBlockLayoutFeatures* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkImageStencilUsageCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImageStencilUsageCreateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkImageUsageFlags);
+}
+
+void count_VkSamplerReductionModeCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSamplerReductionModeCreateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkSamplerReductionMode);
+}
+
+void count_VkPhysicalDeviceSamplerFilterMinmaxProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSamplerFilterMinmaxProperties* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkPhysicalDeviceVulkanMemoryModelFeatures(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVulkanMemoryModelFeatures* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkPhysicalDeviceImagelessFramebufferFeatures(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceImagelessFramebufferFeatures* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkFramebufferAttachmentImageInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkFramebufferAttachmentImageInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkImageCreateFlags);
+    *count += sizeof(VkImageUsageFlags);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        *count += toCount->viewFormatCount * sizeof(const VkFormat);
+    }
+}
+
+void count_VkFramebufferAttachmentsCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkFramebufferAttachmentsCreateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < (uint32_t)toCount->attachmentImageInfoCount; ++i)
+        {
+            count_VkFramebufferAttachmentImageInfo(featureBits, rootType, (const VkFramebufferAttachmentImageInfo*)(toCount->pAttachmentImageInfos + i), count);
+        }
+    }
+}
+
+void count_VkRenderPassAttachmentBeginInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkRenderPassAttachmentBeginInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    if (toCount->attachmentCount)
+    {
+        *count += toCount->attachmentCount * 8;
+    }
+}
+
+void count_VkPhysicalDeviceUniformBufferStandardLayoutFeatures(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceUniformBufferStandardLayoutFeatures* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkAttachmentReferenceStencilLayout(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAttachmentReferenceStencilLayout* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkImageLayout);
+}
+
+void count_VkAttachmentDescriptionStencilLayout(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAttachmentDescriptionStencilLayout* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkImageLayout);
+    *count += sizeof(VkImageLayout);
+}
+
+void count_VkPhysicalDeviceHostQueryResetFeatures(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceHostQueryResetFeatures* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkPhysicalDeviceTimelineSemaphoreFeatures(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTimelineSemaphoreFeatures* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkPhysicalDeviceTimelineSemaphoreProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTimelineSemaphoreProperties* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint64_t);
+}
+
+void count_VkSemaphoreTypeCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSemaphoreTypeCreateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkSemaphoreType);
+    *count += sizeof(uint64_t);
+}
+
+void count_VkTimelineSemaphoreSubmitInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkTimelineSemaphoreSubmitInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pWaitSemaphoreValues)
+    {
+        if (toCount)
+        {
+            *count += toCount->waitSemaphoreValueCount * sizeof(const uint64_t);
+        }
+    }
+    *count += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pSignalSemaphoreValues)
+    {
+        if (toCount)
+        {
+            *count += toCount->signalSemaphoreValueCount * sizeof(const uint64_t);
+        }
+    }
+}
+
+void count_VkSemaphoreWaitInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSemaphoreWaitInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkSemaphoreWaitFlags);
+    *count += sizeof(uint32_t);
+    if (toCount->semaphoreCount)
+    {
+        *count += toCount->semaphoreCount * 8;
+    }
+    if (toCount)
+    {
+        *count += toCount->semaphoreCount * sizeof(const uint64_t);
+    }
+}
+
+void count_VkSemaphoreSignalInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSemaphoreSignalInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    *count += sizeof(uint64_t);
+}
+
+void count_VkPhysicalDeviceBufferDeviceAddressFeatures(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceBufferDeviceAddressFeatures* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkBufferDeviceAddressInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkBufferDeviceAddressInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+}
+
+void count_VkBufferOpaqueCaptureAddressCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkBufferOpaqueCaptureAddressCreateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint64_t);
+}
+
+void count_VkMemoryOpaqueCaptureAddressAllocateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkMemoryOpaqueCaptureAddressAllocateInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint64_t);
+}
+
+void count_VkDeviceMemoryOpaqueCaptureAddressInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDeviceMemoryOpaqueCaptureAddressInfo* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+}
+
+#endif
+#ifdef VK_KHR_surface
+void count_VkSurfaceCapabilitiesKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSurfaceCapabilitiesKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    count_VkExtent2D(featureBits, rootType, (VkExtent2D*)(&toCount->currentExtent), count);
+    count_VkExtent2D(featureBits, rootType, (VkExtent2D*)(&toCount->minImageExtent), count);
+    count_VkExtent2D(featureBits, rootType, (VkExtent2D*)(&toCount->maxImageExtent), count);
+    *count += sizeof(uint32_t);
+    *count += sizeof(VkSurfaceTransformFlagsKHR);
+    *count += sizeof(VkSurfaceTransformFlagBitsKHR);
+    *count += sizeof(VkCompositeAlphaFlagsKHR);
+    *count += sizeof(VkImageUsageFlags);
+}
+
+void count_VkSurfaceFormatKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSurfaceFormatKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkFormat);
+    *count += sizeof(VkColorSpaceKHR);
+}
+
+#endif
+#ifdef VK_KHR_swapchain
+void count_VkSwapchainCreateInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSwapchainCreateInfoKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkSwapchainCreateFlagsKHR);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    *count += sizeof(uint32_t);
+    *count += sizeof(VkFormat);
+    *count += sizeof(VkColorSpaceKHR);
+    count_VkExtent2D(featureBits, rootType, (VkExtent2D*)(&toCount->imageExtent), count);
+    *count += sizeof(uint32_t);
+    *count += sizeof(VkImageUsageFlags);
+    *count += sizeof(VkSharingMode);
+    *count += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pQueueFamilyIndices)
+    {
+        if (toCount)
+        {
+            *count += toCount->queueFamilyIndexCount * sizeof(const uint32_t);
+        }
+    }
+    *count += sizeof(VkSurfaceTransformFlagBitsKHR);
+    *count += sizeof(VkCompositeAlphaFlagBitsKHR);
+    *count += sizeof(VkPresentModeKHR);
+    *count += sizeof(VkBool32);
+    uint64_t cgen_var_1;
+    *count += 1 * 8;
+}
+
+void count_VkPresentInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPresentInfoKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    if (toCount->waitSemaphoreCount)
+    {
+        *count += toCount->waitSemaphoreCount * 8;
+    }
+    *count += sizeof(uint32_t);
+    if (toCount->swapchainCount)
+    {
+        *count += toCount->swapchainCount * 8;
+    }
+    if (toCount)
+    {
+        *count += toCount->swapchainCount * sizeof(const uint32_t);
+    }
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pResults)
+    {
+        if (toCount)
+        {
+            *count += toCount->swapchainCount * sizeof(VkResult);
+        }
+    }
+}
+
+void count_VkImageSwapchainCreateInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImageSwapchainCreateInfoKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+}
+
+void count_VkBindImageMemorySwapchainInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkBindImageMemorySwapchainInfoKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    *count += sizeof(uint32_t);
+}
+
+void count_VkAcquireNextImageInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAcquireNextImageInfoKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    *count += sizeof(uint64_t);
+    uint64_t cgen_var_1;
+    *count += 1 * 8;
+    uint64_t cgen_var_2;
+    *count += 1 * 8;
+    *count += sizeof(uint32_t);
+}
+
+void count_VkDeviceGroupPresentCapabilitiesKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDeviceGroupPresentCapabilitiesKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += VK_MAX_DEVICE_GROUP_SIZE * sizeof(uint32_t);
+    *count += sizeof(VkDeviceGroupPresentModeFlagsKHR);
+}
+
+void count_VkDeviceGroupPresentInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDeviceGroupPresentInfoKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        *count += toCount->swapchainCount * sizeof(const uint32_t);
+    }
+    *count += sizeof(VkDeviceGroupPresentModeFlagBitsKHR);
+}
+
+void count_VkDeviceGroupSwapchainCreateInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDeviceGroupSwapchainCreateInfoKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkDeviceGroupPresentModeFlagsKHR);
+}
+
+#endif
+#ifdef VK_KHR_display
+void count_VkDisplayModeParametersKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDisplayModeParametersKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    count_VkExtent2D(featureBits, rootType, (VkExtent2D*)(&toCount->visibleRegion), count);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkDisplayModeCreateInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDisplayModeCreateInfoKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkDisplayModeCreateFlagsKHR);
+    count_VkDisplayModeParametersKHR(featureBits, rootType, (VkDisplayModeParametersKHR*)(&toCount->parameters), count);
+}
+
+void count_VkDisplayModePropertiesKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDisplayModePropertiesKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    count_VkDisplayModeParametersKHR(featureBits, rootType, (VkDisplayModeParametersKHR*)(&toCount->parameters), count);
+}
+
+void count_VkDisplayPlaneCapabilitiesKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDisplayPlaneCapabilitiesKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkDisplayPlaneAlphaFlagsKHR);
+    count_VkOffset2D(featureBits, rootType, (VkOffset2D*)(&toCount->minSrcPosition), count);
+    count_VkOffset2D(featureBits, rootType, (VkOffset2D*)(&toCount->maxSrcPosition), count);
+    count_VkExtent2D(featureBits, rootType, (VkExtent2D*)(&toCount->minSrcExtent), count);
+    count_VkExtent2D(featureBits, rootType, (VkExtent2D*)(&toCount->maxSrcExtent), count);
+    count_VkOffset2D(featureBits, rootType, (VkOffset2D*)(&toCount->minDstPosition), count);
+    count_VkOffset2D(featureBits, rootType, (VkOffset2D*)(&toCount->maxDstPosition), count);
+    count_VkExtent2D(featureBits, rootType, (VkExtent2D*)(&toCount->minDstExtent), count);
+    count_VkExtent2D(featureBits, rootType, (VkExtent2D*)(&toCount->maxDstExtent), count);
+}
+
+void count_VkDisplayPlanePropertiesKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDisplayPlanePropertiesKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    *count += sizeof(uint32_t);
+}
+
+void count_VkDisplayPropertiesKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDisplayPropertiesKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    *count += sizeof(uint32_t) + (toCount->displayName ? strlen(toCount->displayName) : 0);
+    count_VkExtent2D(featureBits, rootType, (VkExtent2D*)(&toCount->physicalDimensions), count);
+    count_VkExtent2D(featureBits, rootType, (VkExtent2D*)(&toCount->physicalResolution), count);
+    *count += sizeof(VkSurfaceTransformFlagsKHR);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkDisplaySurfaceCreateInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDisplaySurfaceCreateInfoKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkDisplaySurfaceCreateFlagsKHR);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(VkSurfaceTransformFlagBitsKHR);
+    *count += sizeof(float);
+    *count += sizeof(VkDisplayPlaneAlphaFlagBitsKHR);
+    count_VkExtent2D(featureBits, rootType, (VkExtent2D*)(&toCount->imageExtent), count);
+}
+
+#endif
+#ifdef VK_KHR_display_swapchain
+void count_VkDisplayPresentInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDisplayPresentInfoKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    count_VkRect2D(featureBits, rootType, (VkRect2D*)(&toCount->srcRect), count);
+    count_VkRect2D(featureBits, rootType, (VkRect2D*)(&toCount->dstRect), count);
+    *count += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_KHR_xlib_surface
+void count_VkXlibSurfaceCreateInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkXlibSurfaceCreateInfoKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkXlibSurfaceCreateFlagsKHR);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->dpy)
+    {
+        *count += sizeof(Display);
+    }
+    *count += sizeof(Window);
+}
+
+#endif
+#ifdef VK_KHR_xcb_surface
+void count_VkXcbSurfaceCreateInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkXcbSurfaceCreateInfoKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkXcbSurfaceCreateFlagsKHR);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->connection)
+    {
+        *count += sizeof(xcb_connection_t);
+    }
+    *count += sizeof(xcb_window_t);
+}
+
+#endif
+#ifdef VK_KHR_wayland_surface
+void count_VkWaylandSurfaceCreateInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkWaylandSurfaceCreateInfoKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkWaylandSurfaceCreateFlagsKHR);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->display)
+    {
+        *count += sizeof(wl_display);
+    }
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->surface)
+    {
+        *count += sizeof(wl_surface);
+    }
+}
+
+#endif
+#ifdef VK_KHR_android_surface
+void count_VkAndroidSurfaceCreateInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAndroidSurfaceCreateInfoKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkAndroidSurfaceCreateFlagsKHR);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->window)
+    {
+        *count += sizeof(ANativeWindow);
+    }
+}
+
+#endif
+#ifdef VK_KHR_win32_surface
+void count_VkWin32SurfaceCreateInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkWin32SurfaceCreateInfoKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkWin32SurfaceCreateFlagsKHR);
+    *count += sizeof(HINSTANCE);
+    *count += sizeof(HWND);
+}
+
+#endif
+#ifdef VK_KHR_sampler_mirror_clamp_to_edge
+#endif
+#ifdef VK_KHR_multiview
+#endif
+#ifdef VK_KHR_get_physical_device_properties2
+#endif
+#ifdef VK_KHR_device_group
+#endif
+#ifdef VK_KHR_shader_draw_parameters
+#endif
+#ifdef VK_KHR_maintenance1
+#endif
+#ifdef VK_KHR_device_group_creation
+#endif
+#ifdef VK_KHR_external_memory_capabilities
+#endif
+#ifdef VK_KHR_external_memory
+#endif
+#ifdef VK_KHR_external_memory_win32
+void count_VkImportMemoryWin32HandleInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImportMemoryWin32HandleInfoKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkExternalMemoryHandleTypeFlagBits);
+    *count += sizeof(HANDLE);
+    *count += sizeof(LPCWSTR);
+}
+
+void count_VkExportMemoryWin32HandleInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkExportMemoryWin32HandleInfoKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pAttributes)
+    {
+        *count += sizeof(const SECURITY_ATTRIBUTES);
+    }
+    *count += sizeof(DWORD);
+    *count += sizeof(LPCWSTR);
+}
+
+void count_VkMemoryWin32HandlePropertiesKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkMemoryWin32HandlePropertiesKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkMemoryGetWin32HandleInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkMemoryGetWin32HandleInfoKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    *count += sizeof(VkExternalMemoryHandleTypeFlagBits);
+}
+
+#endif
+#ifdef VK_KHR_external_memory_fd
+void count_VkImportMemoryFdInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImportMemoryFdInfoKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkExternalMemoryHandleTypeFlagBits);
+    *count += sizeof(int);
+}
+
+void count_VkMemoryFdPropertiesKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkMemoryFdPropertiesKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkMemoryGetFdInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkMemoryGetFdInfoKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    *count += sizeof(VkExternalMemoryHandleTypeFlagBits);
+}
+
+#endif
+#ifdef VK_KHR_win32_keyed_mutex
+void count_VkWin32KeyedMutexAcquireReleaseInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkWin32KeyedMutexAcquireReleaseInfoKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    if (toCount->acquireCount)
+    {
+        *count += toCount->acquireCount * 8;
+    }
+    if (toCount)
+    {
+        *count += toCount->acquireCount * sizeof(const uint64_t);
+    }
+    if (toCount)
+    {
+        *count += toCount->acquireCount * sizeof(const uint32_t);
+    }
+    *count += sizeof(uint32_t);
+    if (toCount->releaseCount)
+    {
+        *count += toCount->releaseCount * 8;
+    }
+    if (toCount)
+    {
+        *count += toCount->releaseCount * sizeof(const uint64_t);
+    }
+}
+
+#endif
+#ifdef VK_KHR_external_semaphore_capabilities
+#endif
+#ifdef VK_KHR_external_semaphore
+#endif
+#ifdef VK_KHR_external_semaphore_win32
+void count_VkImportSemaphoreWin32HandleInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImportSemaphoreWin32HandleInfoKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    *count += sizeof(VkSemaphoreImportFlags);
+    *count += sizeof(VkExternalSemaphoreHandleTypeFlagBits);
+    *count += sizeof(HANDLE);
+    *count += sizeof(LPCWSTR);
+}
+
+void count_VkExportSemaphoreWin32HandleInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkExportSemaphoreWin32HandleInfoKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pAttributes)
+    {
+        *count += sizeof(const SECURITY_ATTRIBUTES);
+    }
+    *count += sizeof(DWORD);
+    *count += sizeof(LPCWSTR);
+}
+
+void count_VkD3D12FenceSubmitInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkD3D12FenceSubmitInfoKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pWaitSemaphoreValues)
+    {
+        if (toCount)
+        {
+            *count += toCount->waitSemaphoreValuesCount * sizeof(const uint64_t);
+        }
+    }
+    *count += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pSignalSemaphoreValues)
+    {
+        if (toCount)
+        {
+            *count += toCount->signalSemaphoreValuesCount * sizeof(const uint64_t);
+        }
+    }
+}
+
+void count_VkSemaphoreGetWin32HandleInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSemaphoreGetWin32HandleInfoKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    *count += sizeof(VkExternalSemaphoreHandleTypeFlagBits);
+}
+
+#endif
+#ifdef VK_KHR_external_semaphore_fd
+void count_VkImportSemaphoreFdInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImportSemaphoreFdInfoKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    *count += sizeof(VkSemaphoreImportFlags);
+    *count += sizeof(VkExternalSemaphoreHandleTypeFlagBits);
+    *count += sizeof(int);
+}
+
+void count_VkSemaphoreGetFdInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSemaphoreGetFdInfoKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    *count += sizeof(VkExternalSemaphoreHandleTypeFlagBits);
+}
+
+#endif
+#ifdef VK_KHR_push_descriptor
+void count_VkPhysicalDevicePushDescriptorPropertiesKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDevicePushDescriptorPropertiesKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+}
+
+#endif
+#ifdef VK_KHR_shader_float16_int8
+#endif
+#ifdef VK_KHR_16bit_storage
+#endif
+#ifdef VK_KHR_incremental_present
+void count_VkRectLayerKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkRectLayerKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    count_VkOffset2D(featureBits, rootType, (VkOffset2D*)(&toCount->offset), count);
+    count_VkExtent2D(featureBits, rootType, (VkExtent2D*)(&toCount->extent), count);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkPresentRegionKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPresentRegionKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pRectangles)
+    {
+        if (toCount)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toCount->rectangleCount; ++i)
+            {
+                count_VkRectLayerKHR(featureBits, rootType, (const VkRectLayerKHR*)(toCount->pRectangles + i), count);
+            }
+        }
+    }
+}
+
+void count_VkPresentRegionsKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPresentRegionsKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pRegions)
+    {
+        if (toCount)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toCount->swapchainCount; ++i)
+            {
+                count_VkPresentRegionKHR(featureBits, rootType, (const VkPresentRegionKHR*)(toCount->pRegions + i), count);
+            }
+        }
+    }
+}
+
+#endif
+#ifdef VK_KHR_descriptor_update_template
+#endif
+#ifdef VK_KHR_imageless_framebuffer
+#endif
+#ifdef VK_KHR_create_renderpass2
+#endif
+#ifdef VK_KHR_shared_presentable_image
+void count_VkSharedPresentSurfaceCapabilitiesKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSharedPresentSurfaceCapabilitiesKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkImageUsageFlags);
+}
+
+#endif
+#ifdef VK_KHR_external_fence_capabilities
+#endif
+#ifdef VK_KHR_external_fence
+#endif
+#ifdef VK_KHR_external_fence_win32
+void count_VkImportFenceWin32HandleInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImportFenceWin32HandleInfoKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    *count += sizeof(VkFenceImportFlags);
+    *count += sizeof(VkExternalFenceHandleTypeFlagBits);
+    *count += sizeof(HANDLE);
+    *count += sizeof(LPCWSTR);
+}
+
+void count_VkExportFenceWin32HandleInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkExportFenceWin32HandleInfoKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pAttributes)
+    {
+        *count += sizeof(const SECURITY_ATTRIBUTES);
+    }
+    *count += sizeof(DWORD);
+    *count += sizeof(LPCWSTR);
+}
+
+void count_VkFenceGetWin32HandleInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkFenceGetWin32HandleInfoKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    *count += sizeof(VkExternalFenceHandleTypeFlagBits);
+}
+
+#endif
+#ifdef VK_KHR_external_fence_fd
+void count_VkImportFenceFdInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImportFenceFdInfoKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    *count += sizeof(VkFenceImportFlags);
+    *count += sizeof(VkExternalFenceHandleTypeFlagBits);
+    *count += sizeof(int);
+}
+
+void count_VkFenceGetFdInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkFenceGetFdInfoKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    *count += sizeof(VkExternalFenceHandleTypeFlagBits);
+}
+
+#endif
+#ifdef VK_KHR_performance_query
+void count_VkPhysicalDevicePerformanceQueryFeaturesKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDevicePerformanceQueryFeaturesKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkPhysicalDevicePerformanceQueryPropertiesKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDevicePerformanceQueryPropertiesKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkPerformanceCounterKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPerformanceCounterKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkPerformanceCounterUnitKHR);
+    *count += sizeof(VkPerformanceCounterScopeKHR);
+    *count += sizeof(VkPerformanceCounterStorageKHR);
+    *count += VK_UUID_SIZE * sizeof(uint8_t);
+}
+
+void count_VkPerformanceCounterDescriptionKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPerformanceCounterDescriptionKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkPerformanceCounterDescriptionFlagsKHR);
+    *count += VK_MAX_DESCRIPTION_SIZE * sizeof(char);
+    *count += VK_MAX_DESCRIPTION_SIZE * sizeof(char);
+    *count += VK_MAX_DESCRIPTION_SIZE * sizeof(char);
+}
+
+void count_VkQueryPoolPerformanceCreateInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkQueryPoolPerformanceCreateInfoKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        *count += toCount->counterIndexCount * sizeof(const uint32_t);
+    }
+}
+
+void count_VkPerformanceCounterResultKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPerformanceCounterResultKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(int32_t);
+}
+
+void count_VkAcquireProfilingLockInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAcquireProfilingLockInfoKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkAcquireProfilingLockFlagsKHR);
+    *count += sizeof(uint64_t);
+}
+
+void count_VkPerformanceQuerySubmitInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPerformanceQuerySubmitInfoKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+}
+
+#endif
+#ifdef VK_KHR_maintenance2
+#endif
+#ifdef VK_KHR_get_surface_capabilities2
+void count_VkPhysicalDeviceSurfaceInfo2KHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSurfaceInfo2KHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+}
+
+void count_VkSurfaceCapabilities2KHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSurfaceCapabilities2KHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    count_VkSurfaceCapabilitiesKHR(featureBits, rootType, (VkSurfaceCapabilitiesKHR*)(&toCount->surfaceCapabilities), count);
+}
+
+void count_VkSurfaceFormat2KHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSurfaceFormat2KHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    count_VkSurfaceFormatKHR(featureBits, rootType, (VkSurfaceFormatKHR*)(&toCount->surfaceFormat), count);
+}
+
+#endif
+#ifdef VK_KHR_variable_pointers
+#endif
+#ifdef VK_KHR_get_display_properties2
+void count_VkDisplayProperties2KHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDisplayProperties2KHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    count_VkDisplayPropertiesKHR(featureBits, rootType, (VkDisplayPropertiesKHR*)(&toCount->displayProperties), count);
+}
+
+void count_VkDisplayPlaneProperties2KHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDisplayPlaneProperties2KHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    count_VkDisplayPlanePropertiesKHR(featureBits, rootType, (VkDisplayPlanePropertiesKHR*)(&toCount->displayPlaneProperties), count);
+}
+
+void count_VkDisplayModeProperties2KHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDisplayModeProperties2KHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    count_VkDisplayModePropertiesKHR(featureBits, rootType, (VkDisplayModePropertiesKHR*)(&toCount->displayModeProperties), count);
+}
+
+void count_VkDisplayPlaneInfo2KHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDisplayPlaneInfo2KHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    *count += sizeof(uint32_t);
+}
+
+void count_VkDisplayPlaneCapabilities2KHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDisplayPlaneCapabilities2KHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    count_VkDisplayPlaneCapabilitiesKHR(featureBits, rootType, (VkDisplayPlaneCapabilitiesKHR*)(&toCount->capabilities), count);
+}
+
+#endif
+#ifdef VK_KHR_dedicated_allocation
+#endif
+#ifdef VK_KHR_storage_buffer_storage_class
+#endif
+#ifdef VK_KHR_relaxed_block_layout
+#endif
+#ifdef VK_KHR_get_memory_requirements2
+#endif
+#ifdef VK_KHR_image_format_list
+#endif
+#ifdef VK_KHR_sampler_ycbcr_conversion
+#endif
+#ifdef VK_KHR_bind_memory2
+#endif
+#ifdef VK_KHR_portability_subset
+void count_VkPhysicalDevicePortabilitySubsetFeaturesKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDevicePortabilitySubsetFeaturesKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkPhysicalDevicePortabilitySubsetPropertiesKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDevicePortabilitySubsetPropertiesKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+}
+
+#endif
+#ifdef VK_KHR_maintenance3
+#endif
+#ifdef VK_KHR_draw_indirect_count
+#endif
+#ifdef VK_KHR_shader_subgroup_extended_types
+#endif
+#ifdef VK_KHR_8bit_storage
+#endif
+#ifdef VK_KHR_shader_atomic_int64
+#endif
+#ifdef VK_KHR_shader_clock
+void count_VkPhysicalDeviceShaderClockFeaturesKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderClockFeaturesKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_KHR_driver_properties
+#endif
+#ifdef VK_KHR_shader_float_controls
+#endif
+#ifdef VK_KHR_depth_stencil_resolve
+#endif
+#ifdef VK_KHR_swapchain_mutable_format
+#endif
+#ifdef VK_KHR_timeline_semaphore
+#endif
+#ifdef VK_KHR_vulkan_memory_model
+#endif
+#ifdef VK_KHR_shader_terminate_invocation
+void count_VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_KHR_fragment_shading_rate
+void count_VkFragmentShadingRateAttachmentInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkFragmentShadingRateAttachmentInfoKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    count_VkAttachmentReference2(featureBits, rootType, (const VkAttachmentReference2*)(toCount->pFragmentShadingRateAttachment), count);
+    count_VkExtent2D(featureBits, rootType, (VkExtent2D*)(&toCount->shadingRateAttachmentTexelSize), count);
+}
+
+void count_VkPipelineFragmentShadingRateStateCreateInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineFragmentShadingRateStateCreateInfoKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    count_VkExtent2D(featureBits, rootType, (VkExtent2D*)(&toCount->fragmentSize), count);
+    *count += 2 * sizeof(VkFragmentShadingRateCombinerOpKHR);
+}
+
+void count_VkPhysicalDeviceFragmentShadingRateFeaturesKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShadingRateFeaturesKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkPhysicalDeviceFragmentShadingRatePropertiesKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShadingRatePropertiesKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    count_VkExtent2D(featureBits, rootType, (VkExtent2D*)(&toCount->minFragmentShadingRateAttachmentTexelSize), count);
+    count_VkExtent2D(featureBits, rootType, (VkExtent2D*)(&toCount->maxFragmentShadingRateAttachmentTexelSize), count);
+    *count += sizeof(uint32_t);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    count_VkExtent2D(featureBits, rootType, (VkExtent2D*)(&toCount->maxFragmentSize), count);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(VkSampleCountFlagBits);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkPhysicalDeviceFragmentShadingRateKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShadingRateKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkSampleCountFlags);
+    count_VkExtent2D(featureBits, rootType, (VkExtent2D*)(&toCount->fragmentSize), count);
+}
+
+#endif
+#ifdef VK_KHR_spirv_1_4
+#endif
+#ifdef VK_KHR_surface_protected_capabilities
+void count_VkSurfaceProtectedCapabilitiesKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSurfaceProtectedCapabilitiesKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_KHR_separate_depth_stencil_layouts
+#endif
+#ifdef VK_KHR_uniform_buffer_standard_layout
+#endif
+#ifdef VK_KHR_buffer_device_address
+#endif
+#ifdef VK_KHR_deferred_host_operations
+#endif
+#ifdef VK_KHR_pipeline_executable_properties
+void count_VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkPipelineInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineInfoKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+}
+
+void count_VkPipelineExecutablePropertiesKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineExecutablePropertiesKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkShaderStageFlags);
+    *count += VK_MAX_DESCRIPTION_SIZE * sizeof(char);
+    *count += VK_MAX_DESCRIPTION_SIZE * sizeof(char);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkPipelineExecutableInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineExecutableInfoKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    *count += sizeof(uint32_t);
+}
+
+void count_VkPipelineExecutableStatisticValueKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineExecutableStatisticValueKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkBool32);
+}
+
+void count_VkPipelineExecutableStatisticKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineExecutableStatisticKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += VK_MAX_DESCRIPTION_SIZE * sizeof(char);
+    *count += VK_MAX_DESCRIPTION_SIZE * sizeof(char);
+    *count += sizeof(VkPipelineExecutableStatisticFormatKHR);
+    count_VkPipelineExecutableStatisticValueKHR(featureBits, rootType, (VkPipelineExecutableStatisticValueKHR*)(&toCount->value), count);
+}
+
+void count_VkPipelineExecutableInternalRepresentationKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineExecutableInternalRepresentationKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += VK_MAX_DESCRIPTION_SIZE * sizeof(char);
+    *count += VK_MAX_DESCRIPTION_SIZE * sizeof(char);
+    *count += sizeof(VkBool32);
+    *count += 8;
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pData)
+    {
+        if (toCount)
+        {
+            *count += toCount->dataSize * sizeof(uint8_t);
+        }
+    }
+}
+
+#endif
+#ifdef VK_KHR_pipeline_library
+void count_VkPipelineLibraryCreateInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineLibraryCreateInfoKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    if (toCount->libraryCount)
+    {
+        *count += toCount->libraryCount * 8;
+    }
+}
+
+#endif
+#ifdef VK_KHR_shader_non_semantic_info
+#endif
+#ifdef VK_KHR_copy_commands2
+void count_VkBufferCopy2KHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkBufferCopy2KHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkDeviceSize);
+    *count += sizeof(VkDeviceSize);
+    *count += sizeof(VkDeviceSize);
+}
+
+void count_VkCopyBufferInfo2KHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkCopyBufferInfo2KHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    uint64_t cgen_var_1;
+    *count += 1 * 8;
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < (uint32_t)toCount->regionCount; ++i)
+        {
+            count_VkBufferCopy2KHR(featureBits, rootType, (const VkBufferCopy2KHR*)(toCount->pRegions + i), count);
+        }
+    }
+}
+
+void count_VkImageCopy2KHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImageCopy2KHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    count_VkImageSubresourceLayers(featureBits, rootType, (VkImageSubresourceLayers*)(&toCount->srcSubresource), count);
+    count_VkOffset3D(featureBits, rootType, (VkOffset3D*)(&toCount->srcOffset), count);
+    count_VkImageSubresourceLayers(featureBits, rootType, (VkImageSubresourceLayers*)(&toCount->dstSubresource), count);
+    count_VkOffset3D(featureBits, rootType, (VkOffset3D*)(&toCount->dstOffset), count);
+    count_VkExtent3D(featureBits, rootType, (VkExtent3D*)(&toCount->extent), count);
+}
+
+void count_VkCopyImageInfo2KHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkCopyImageInfo2KHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    *count += sizeof(VkImageLayout);
+    uint64_t cgen_var_1;
+    *count += 1 * 8;
+    *count += sizeof(VkImageLayout);
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < (uint32_t)toCount->regionCount; ++i)
+        {
+            count_VkImageCopy2KHR(featureBits, rootType, (const VkImageCopy2KHR*)(toCount->pRegions + i), count);
+        }
+    }
+}
+
+void count_VkBufferImageCopy2KHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkBufferImageCopy2KHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkDeviceSize);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    count_VkImageSubresourceLayers(featureBits, rootType, (VkImageSubresourceLayers*)(&toCount->imageSubresource), count);
+    count_VkOffset3D(featureBits, rootType, (VkOffset3D*)(&toCount->imageOffset), count);
+    count_VkExtent3D(featureBits, rootType, (VkExtent3D*)(&toCount->imageExtent), count);
+}
+
+void count_VkCopyBufferToImageInfo2KHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkCopyBufferToImageInfo2KHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    uint64_t cgen_var_1;
+    *count += 1 * 8;
+    *count += sizeof(VkImageLayout);
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < (uint32_t)toCount->regionCount; ++i)
+        {
+            count_VkBufferImageCopy2KHR(featureBits, rootType, (const VkBufferImageCopy2KHR*)(toCount->pRegions + i), count);
+        }
+    }
+}
+
+void count_VkCopyImageToBufferInfo2KHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkCopyImageToBufferInfo2KHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    *count += sizeof(VkImageLayout);
+    uint64_t cgen_var_1;
+    *count += 1 * 8;
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < (uint32_t)toCount->regionCount; ++i)
+        {
+            count_VkBufferImageCopy2KHR(featureBits, rootType, (const VkBufferImageCopy2KHR*)(toCount->pRegions + i), count);
+        }
+    }
+}
+
+void count_VkImageBlit2KHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImageBlit2KHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    count_VkImageSubresourceLayers(featureBits, rootType, (VkImageSubresourceLayers*)(&toCount->srcSubresource), count);
+    for (uint32_t i = 0; i < (uint32_t)2; ++i)
+    {
+        count_VkOffset3D(featureBits, rootType, (VkOffset3D*)(toCount->srcOffsets + i), count);
+    }
+    count_VkImageSubresourceLayers(featureBits, rootType, (VkImageSubresourceLayers*)(&toCount->dstSubresource), count);
+    for (uint32_t i = 0; i < (uint32_t)2; ++i)
+    {
+        count_VkOffset3D(featureBits, rootType, (VkOffset3D*)(toCount->dstOffsets + i), count);
+    }
+}
+
+void count_VkBlitImageInfo2KHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkBlitImageInfo2KHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    *count += sizeof(VkImageLayout);
+    uint64_t cgen_var_1;
+    *count += 1 * 8;
+    *count += sizeof(VkImageLayout);
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < (uint32_t)toCount->regionCount; ++i)
+        {
+            count_VkImageBlit2KHR(featureBits, rootType, (const VkImageBlit2KHR*)(toCount->pRegions + i), count);
+        }
+    }
+    *count += sizeof(VkFilter);
+}
+
+void count_VkImageResolve2KHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImageResolve2KHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    count_VkImageSubresourceLayers(featureBits, rootType, (VkImageSubresourceLayers*)(&toCount->srcSubresource), count);
+    count_VkOffset3D(featureBits, rootType, (VkOffset3D*)(&toCount->srcOffset), count);
+    count_VkImageSubresourceLayers(featureBits, rootType, (VkImageSubresourceLayers*)(&toCount->dstSubresource), count);
+    count_VkOffset3D(featureBits, rootType, (VkOffset3D*)(&toCount->dstOffset), count);
+    count_VkExtent3D(featureBits, rootType, (VkExtent3D*)(&toCount->extent), count);
+}
+
+void count_VkResolveImageInfo2KHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkResolveImageInfo2KHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    *count += sizeof(VkImageLayout);
+    uint64_t cgen_var_1;
+    *count += 1 * 8;
+    *count += sizeof(VkImageLayout);
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < (uint32_t)toCount->regionCount; ++i)
+        {
+            count_VkImageResolve2KHR(featureBits, rootType, (const VkImageResolve2KHR*)(toCount->pRegions + i), count);
+        }
+    }
+}
+
+#endif
+#ifdef VK_ANDROID_native_buffer
+void count_VkNativeBufferANDROID(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkNativeBufferANDROID* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->handle)
+    {
+        *count += sizeof(const uint32_t);
+    }
+    *count += sizeof(int);
+    *count += sizeof(int);
+    *count += sizeof(int);
+    *count += sizeof(uint64_t);
+    *count += sizeof(uint64_t);
+}
+
+#endif
+#ifdef VK_EXT_debug_report
+void count_VkDebugReportCallbackCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDebugReportCallbackCreateInfoEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkDebugReportFlagsEXT);
+    *count += 8;
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pUserData)
+    {
+        *count += sizeof(uint8_t);
+    }
+}
+
+#endif
+#ifdef VK_NV_glsl_shader
+#endif
+#ifdef VK_EXT_depth_range_unrestricted
+#endif
+#ifdef VK_IMG_filter_cubic
+#endif
+#ifdef VK_AMD_rasterization_order
+void count_VkPipelineRasterizationStateRasterizationOrderAMD(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineRasterizationStateRasterizationOrderAMD* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkRasterizationOrderAMD);
+}
+
+#endif
+#ifdef VK_AMD_shader_trinary_minmax
+#endif
+#ifdef VK_AMD_shader_explicit_vertex_parameter
+#endif
+#ifdef VK_EXT_debug_marker
+void count_VkDebugMarkerObjectNameInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDebugMarkerObjectNameInfoEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkDebugReportObjectTypeEXT);
+    *count += sizeof(uint64_t);
+    *count += sizeof(uint32_t) + (toCount->pObjectName ? strlen(toCount->pObjectName) : 0);
+}
+
+void count_VkDebugMarkerObjectTagInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDebugMarkerObjectTagInfoEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkDebugReportObjectTypeEXT);
+    *count += sizeof(uint64_t);
+    *count += sizeof(uint64_t);
+    *count += 8;
+    if (toCount)
+    {
+        *count += toCount->tagSize * sizeof(const uint8_t);
+    }
+}
+
+void count_VkDebugMarkerMarkerInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDebugMarkerMarkerInfoEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t) + (toCount->pMarkerName ? strlen(toCount->pMarkerName) : 0);
+    *count += 4 * sizeof(float);
+}
+
+#endif
+#ifdef VK_AMD_gcn_shader
+#endif
+#ifdef VK_NV_dedicated_allocation
+void count_VkDedicatedAllocationImageCreateInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDedicatedAllocationImageCreateInfoNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkDedicatedAllocationBufferCreateInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDedicatedAllocationBufferCreateInfoNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkDedicatedAllocationMemoryAllocateInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDedicatedAllocationMemoryAllocateInfoNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    uint64_t cgen_var_1;
+    *count += 1 * 8;
+}
+
+#endif
+#ifdef VK_EXT_transform_feedback
+void count_VkPhysicalDeviceTransformFeedbackFeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTransformFeedbackFeaturesEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkPhysicalDeviceTransformFeedbackPropertiesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTransformFeedbackPropertiesEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(VkDeviceSize);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkPipelineRasterizationStateStreamCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineRasterizationStateStreamCreateInfoEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkPipelineRasterizationStateStreamCreateFlagsEXT);
+    *count += sizeof(uint32_t);
+}
+
+#endif
+#ifdef VK_NVX_image_view_handle
+void count_VkImageViewHandleInfoNVX(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImageViewHandleInfoNVX* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    *count += sizeof(VkDescriptorType);
+    uint64_t cgen_var_1;
+    *count += 1 * 8;
+}
+
+void count_VkImageViewAddressPropertiesNVX(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImageViewAddressPropertiesNVX* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkDeviceAddress);
+    *count += sizeof(VkDeviceSize);
+}
+
+#endif
+#ifdef VK_AMD_draw_indirect_count
+#endif
+#ifdef VK_AMD_negative_viewport_height
+#endif
+#ifdef VK_AMD_gpu_shader_half_float
+#endif
+#ifdef VK_AMD_shader_ballot
+#endif
+#ifdef VK_AMD_texture_gather_bias_lod
+void count_VkTextureLODGatherFormatPropertiesAMD(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkTextureLODGatherFormatPropertiesAMD* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_AMD_shader_info
+void count_VkShaderResourceUsageAMD(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkShaderResourceUsageAMD* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += 8;
+    *count += 8;
+}
+
+void count_VkShaderStatisticsInfoAMD(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkShaderStatisticsInfoAMD* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkShaderStageFlags);
+    count_VkShaderResourceUsageAMD(featureBits, rootType, (VkShaderResourceUsageAMD*)(&toCount->resourceUsage), count);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += 3 * sizeof(uint32_t);
+}
+
+#endif
+#ifdef VK_AMD_shader_image_load_store_lod
+#endif
+#ifdef VK_GGP_stream_descriptor_surface
+void count_VkStreamDescriptorSurfaceCreateInfoGGP(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkStreamDescriptorSurfaceCreateInfoGGP* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkStreamDescriptorSurfaceCreateFlagsGGP);
+    *count += sizeof(GgpStreamDescriptor);
+}
+
+#endif
+#ifdef VK_NV_corner_sampled_image
+void count_VkPhysicalDeviceCornerSampledImageFeaturesNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCornerSampledImageFeaturesNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_IMG_format_pvrtc
+#endif
+#ifdef VK_NV_external_memory_capabilities
+void count_VkExternalImageFormatPropertiesNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkExternalImageFormatPropertiesNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    count_VkImageFormatProperties(featureBits, rootType, (VkImageFormatProperties*)(&toCount->imageFormatProperties), count);
+    *count += sizeof(VkExternalMemoryFeatureFlagsNV);
+    *count += sizeof(VkExternalMemoryHandleTypeFlagsNV);
+    *count += sizeof(VkExternalMemoryHandleTypeFlagsNV);
+}
+
+#endif
+#ifdef VK_NV_external_memory
+void count_VkExternalMemoryImageCreateInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkExternalMemoryImageCreateInfoNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkExternalMemoryHandleTypeFlagsNV);
+}
+
+void count_VkExportMemoryAllocateInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkExportMemoryAllocateInfoNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkExternalMemoryHandleTypeFlagsNV);
+}
+
+#endif
+#ifdef VK_NV_external_memory_win32
+void count_VkImportMemoryWin32HandleInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImportMemoryWin32HandleInfoNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkExternalMemoryHandleTypeFlagsNV);
+    *count += sizeof(HANDLE);
+}
+
+void count_VkExportMemoryWin32HandleInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkExportMemoryWin32HandleInfoNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pAttributes)
+    {
+        *count += sizeof(const SECURITY_ATTRIBUTES);
+    }
+    *count += sizeof(DWORD);
+}
+
+#endif
+#ifdef VK_NV_win32_keyed_mutex
+void count_VkWin32KeyedMutexAcquireReleaseInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkWin32KeyedMutexAcquireReleaseInfoNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    if (toCount->acquireCount)
+    {
+        *count += toCount->acquireCount * 8;
+    }
+    if (toCount)
+    {
+        *count += toCount->acquireCount * sizeof(const uint64_t);
+    }
+    if (toCount)
+    {
+        *count += toCount->acquireCount * sizeof(const uint32_t);
+    }
+    *count += sizeof(uint32_t);
+    if (toCount->releaseCount)
+    {
+        *count += toCount->releaseCount * 8;
+    }
+    if (toCount)
+    {
+        *count += toCount->releaseCount * sizeof(const uint64_t);
+    }
+}
+
+#endif
+#ifdef VK_EXT_validation_flags
+void count_VkValidationFlagsEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkValidationFlagsEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        *count += toCount->disabledValidationCheckCount * sizeof(const VkValidationCheckEXT);
+    }
+}
+
+#endif
+#ifdef VK_NN_vi_surface
+void count_VkViSurfaceCreateInfoNN(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkViSurfaceCreateInfoNN* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkViSurfaceCreateFlagsNN);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->window)
+    {
+        *count += sizeof(uint8_t);
+    }
+}
+
+#endif
+#ifdef VK_EXT_shader_subgroup_ballot
+#endif
+#ifdef VK_EXT_shader_subgroup_vote
+#endif
+#ifdef VK_EXT_texture_compression_astc_hdr
+void count_VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_EXT_astc_decode_mode
+void count_VkImageViewASTCDecodeModeEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImageViewASTCDecodeModeEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkFormat);
+}
+
+void count_VkPhysicalDeviceASTCDecodeFeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceASTCDecodeFeaturesEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_EXT_conditional_rendering
+void count_VkConditionalRenderingBeginInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkConditionalRenderingBeginInfoEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    *count += sizeof(VkDeviceSize);
+    *count += sizeof(VkConditionalRenderingFlagsEXT);
+}
+
+void count_VkPhysicalDeviceConditionalRenderingFeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceConditionalRenderingFeaturesEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkCommandBufferInheritanceConditionalRenderingInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkCommandBufferInheritanceConditionalRenderingInfoEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_NV_clip_space_w_scaling
+void count_VkViewportWScalingNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkViewportWScalingNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(float);
+    *count += sizeof(float);
+}
+
+void count_VkPipelineViewportWScalingStateCreateInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineViewportWScalingStateCreateInfoNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+    *count += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pViewportWScalings)
+    {
+        if (toCount)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toCount->viewportCount; ++i)
+            {
+                count_VkViewportWScalingNV(featureBits, rootType, (const VkViewportWScalingNV*)(toCount->pViewportWScalings + i), count);
+            }
+        }
+    }
+}
+
+#endif
+#ifdef VK_EXT_direct_mode_display
+#endif
+#ifdef VK_EXT_acquire_xlib_display
+#endif
+#ifdef VK_EXT_display_surface_counter
+void count_VkSurfaceCapabilities2EXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSurfaceCapabilities2EXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    count_VkExtent2D(featureBits, rootType, (VkExtent2D*)(&toCount->currentExtent), count);
+    count_VkExtent2D(featureBits, rootType, (VkExtent2D*)(&toCount->minImageExtent), count);
+    count_VkExtent2D(featureBits, rootType, (VkExtent2D*)(&toCount->maxImageExtent), count);
+    *count += sizeof(uint32_t);
+    *count += sizeof(VkSurfaceTransformFlagsKHR);
+    *count += sizeof(VkSurfaceTransformFlagBitsKHR);
+    *count += sizeof(VkCompositeAlphaFlagsKHR);
+    *count += sizeof(VkImageUsageFlags);
+    *count += sizeof(VkSurfaceCounterFlagsEXT);
+}
+
+#endif
+#ifdef VK_EXT_display_control
+void count_VkDisplayPowerInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDisplayPowerInfoEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkDisplayPowerStateEXT);
+}
+
+void count_VkDeviceEventInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDeviceEventInfoEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkDeviceEventTypeEXT);
+}
+
+void count_VkDisplayEventInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDisplayEventInfoEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkDisplayEventTypeEXT);
+}
+
+void count_VkSwapchainCounterCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSwapchainCounterCreateInfoEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkSurfaceCounterFlagsEXT);
+}
+
+#endif
+#ifdef VK_GOOGLE_display_timing
+void count_VkRefreshCycleDurationGOOGLE(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkRefreshCycleDurationGOOGLE* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(uint64_t);
+}
+
+void count_VkPastPresentationTimingGOOGLE(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPastPresentationTimingGOOGLE* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint64_t);
+    *count += sizeof(uint64_t);
+    *count += sizeof(uint64_t);
+    *count += sizeof(uint64_t);
+}
+
+void count_VkPresentTimeGOOGLE(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPresentTimeGOOGLE* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint64_t);
+}
+
+void count_VkPresentTimesInfoGOOGLE(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPresentTimesInfoGOOGLE* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pTimes)
+    {
+        if (toCount)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toCount->swapchainCount; ++i)
+            {
+                count_VkPresentTimeGOOGLE(featureBits, rootType, (const VkPresentTimeGOOGLE*)(toCount->pTimes + i), count);
+            }
+        }
+    }
+}
+
+#endif
+#ifdef VK_NV_sample_mask_override_coverage
+#endif
+#ifdef VK_NV_geometry_shader_passthrough
+#endif
+#ifdef VK_NV_viewport_array2
+#endif
+#ifdef VK_NVX_multiview_per_view_attributes
+void count_VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_NV_viewport_swizzle
+void count_VkViewportSwizzleNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkViewportSwizzleNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkViewportCoordinateSwizzleNV);
+    *count += sizeof(VkViewportCoordinateSwizzleNV);
+    *count += sizeof(VkViewportCoordinateSwizzleNV);
+    *count += sizeof(VkViewportCoordinateSwizzleNV);
+}
+
+void count_VkPipelineViewportSwizzleStateCreateInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineViewportSwizzleStateCreateInfoNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkPipelineViewportSwizzleStateCreateFlagsNV);
+    *count += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pViewportSwizzles)
+    {
+        if (toCount)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toCount->viewportCount; ++i)
+            {
+                count_VkViewportSwizzleNV(featureBits, rootType, (const VkViewportSwizzleNV*)(toCount->pViewportSwizzles + i), count);
+            }
+        }
+    }
+}
+
+#endif
+#ifdef VK_EXT_discard_rectangles
+void count_VkPhysicalDeviceDiscardRectanglePropertiesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDiscardRectanglePropertiesEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkPipelineDiscardRectangleStateCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineDiscardRectangleStateCreateInfoEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkPipelineDiscardRectangleStateCreateFlagsEXT);
+    *count += sizeof(VkDiscardRectangleModeEXT);
+    *count += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pDiscardRectangles)
+    {
+        if (toCount)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toCount->discardRectangleCount; ++i)
+            {
+                count_VkRect2D(featureBits, rootType, (const VkRect2D*)(toCount->pDiscardRectangles + i), count);
+            }
+        }
+    }
+}
+
+#endif
+#ifdef VK_EXT_conservative_rasterization
+void count_VkPhysicalDeviceConservativeRasterizationPropertiesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceConservativeRasterizationPropertiesEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(float);
+    *count += sizeof(float);
+    *count += sizeof(float);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkPipelineRasterizationConservativeStateCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineRasterizationConservativeStateCreateInfoEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkPipelineRasterizationConservativeStateCreateFlagsEXT);
+    *count += sizeof(VkConservativeRasterizationModeEXT);
+    *count += sizeof(float);
+}
+
+#endif
+#ifdef VK_EXT_depth_clip_enable
+void count_VkPhysicalDeviceDepthClipEnableFeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDepthClipEnableFeaturesEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkPipelineRasterizationDepthClipStateCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineRasterizationDepthClipStateCreateInfoEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkPipelineRasterizationDepthClipStateCreateFlagsEXT);
+    *count += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_EXT_swapchain_colorspace
+#endif
+#ifdef VK_EXT_hdr_metadata
+void count_VkXYColorEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkXYColorEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(float);
+    *count += sizeof(float);
+}
+
+void count_VkHdrMetadataEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkHdrMetadataEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    count_VkXYColorEXT(featureBits, rootType, (VkXYColorEXT*)(&toCount->displayPrimaryRed), count);
+    count_VkXYColorEXT(featureBits, rootType, (VkXYColorEXT*)(&toCount->displayPrimaryGreen), count);
+    count_VkXYColorEXT(featureBits, rootType, (VkXYColorEXT*)(&toCount->displayPrimaryBlue), count);
+    count_VkXYColorEXT(featureBits, rootType, (VkXYColorEXT*)(&toCount->whitePoint), count);
+    *count += sizeof(float);
+    *count += sizeof(float);
+    *count += sizeof(float);
+    *count += sizeof(float);
+}
+
+#endif
+#ifdef VK_MVK_ios_surface
+void count_VkIOSSurfaceCreateInfoMVK(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkIOSSurfaceCreateInfoMVK* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkIOSSurfaceCreateFlagsMVK);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pView)
+    {
+        *count += sizeof(const uint8_t);
+    }
+}
+
+#endif
+#ifdef VK_MVK_macos_surface
+void count_VkMacOSSurfaceCreateInfoMVK(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkMacOSSurfaceCreateInfoMVK* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkMacOSSurfaceCreateFlagsMVK);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pView)
+    {
+        *count += sizeof(const uint8_t);
+    }
+}
+
+#endif
+#ifdef VK_MVK_moltenvk
+#endif
+#ifdef VK_EXT_external_memory_dma_buf
+#endif
+#ifdef VK_EXT_queue_family_foreign
+#endif
+#ifdef VK_EXT_debug_utils
+void count_VkDebugUtilsLabelEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDebugUtilsLabelEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t) + (toCount->pLabelName ? strlen(toCount->pLabelName) : 0);
+    *count += 4 * sizeof(float);
+}
+
+void count_VkDebugUtilsObjectNameInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDebugUtilsObjectNameInfoEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkObjectType);
+    *count += sizeof(uint64_t);
+    if (featureBits & VULKAN_STREAM_FEATURE_NULL_OPTIONAL_STRINGS_BIT)
+    {
+        // WARNING PTR CHECK
+        *count += 8;
+        if (toCount->pObjectName)
+        {
+            *count += sizeof(uint32_t) + (toCount->pObjectName ? strlen(toCount->pObjectName) : 0);
+        }
+    }
+    else
+    {
+        *count += sizeof(uint32_t) + (toCount->pObjectName ? strlen(toCount->pObjectName) : 0);
+    }
+}
+
+void count_VkDebugUtilsMessengerCallbackDataEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDebugUtilsMessengerCallbackDataEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkDebugUtilsMessengerCallbackDataFlagsEXT);
+    if (featureBits & VULKAN_STREAM_FEATURE_NULL_OPTIONAL_STRINGS_BIT)
+    {
+        // WARNING PTR CHECK
+        *count += 8;
+        if (toCount->pMessageIdName)
+        {
+            *count += sizeof(uint32_t) + (toCount->pMessageIdName ? strlen(toCount->pMessageIdName) : 0);
+        }
+    }
+    else
+    {
+        *count += sizeof(uint32_t) + (toCount->pMessageIdName ? strlen(toCount->pMessageIdName) : 0);
+    }
+    *count += sizeof(int32_t);
+    *count += sizeof(uint32_t) + (toCount->pMessage ? strlen(toCount->pMessage) : 0);
+    *count += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pQueueLabels)
+    {
+        if (toCount)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toCount->queueLabelCount; ++i)
+            {
+                count_VkDebugUtilsLabelEXT(featureBits, rootType, (VkDebugUtilsLabelEXT*)(toCount->pQueueLabels + i), count);
+            }
+        }
+    }
+    *count += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pCmdBufLabels)
+    {
+        if (toCount)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toCount->cmdBufLabelCount; ++i)
+            {
+                count_VkDebugUtilsLabelEXT(featureBits, rootType, (VkDebugUtilsLabelEXT*)(toCount->pCmdBufLabels + i), count);
+            }
+        }
+    }
+    *count += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pObjects)
+    {
+        if (toCount)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toCount->objectCount; ++i)
+            {
+                count_VkDebugUtilsObjectNameInfoEXT(featureBits, rootType, (VkDebugUtilsObjectNameInfoEXT*)(toCount->pObjects + i), count);
+            }
+        }
+    }
+}
+
+void count_VkDebugUtilsMessengerCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDebugUtilsMessengerCreateInfoEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkDebugUtilsMessengerCreateFlagsEXT);
+    *count += sizeof(VkDebugUtilsMessageSeverityFlagsEXT);
+    *count += sizeof(VkDebugUtilsMessageTypeFlagsEXT);
+    *count += 8;
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pUserData)
+    {
+        *count += sizeof(uint8_t);
+    }
+}
+
+void count_VkDebugUtilsObjectTagInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDebugUtilsObjectTagInfoEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkObjectType);
+    *count += sizeof(uint64_t);
+    *count += sizeof(uint64_t);
+    *count += 8;
+    if (toCount)
+    {
+        *count += toCount->tagSize * sizeof(const uint8_t);
+    }
+}
+
+#endif
+#ifdef VK_ANDROID_external_memory_android_hardware_buffer
+void count_VkAndroidHardwareBufferUsageANDROID(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAndroidHardwareBufferUsageANDROID* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint64_t);
+}
+
+void count_VkAndroidHardwareBufferPropertiesANDROID(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAndroidHardwareBufferPropertiesANDROID* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkDeviceSize);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkAndroidHardwareBufferFormatPropertiesANDROID(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAndroidHardwareBufferFormatPropertiesANDROID* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkFormat);
+    *count += sizeof(uint64_t);
+    *count += sizeof(VkFormatFeatureFlags);
+    count_VkComponentMapping(featureBits, rootType, (VkComponentMapping*)(&toCount->samplerYcbcrConversionComponents), count);
+    *count += sizeof(VkSamplerYcbcrModelConversion);
+    *count += sizeof(VkSamplerYcbcrRange);
+    *count += sizeof(VkChromaLocation);
+    *count += sizeof(VkChromaLocation);
+}
+
+void count_VkImportAndroidHardwareBufferInfoANDROID(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImportAndroidHardwareBufferInfoANDROID* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(AHardwareBuffer);
+}
+
+void count_VkMemoryGetAndroidHardwareBufferInfoANDROID(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkMemoryGetAndroidHardwareBufferInfoANDROID* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+}
+
+void count_VkExternalFormatANDROID(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkExternalFormatANDROID* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint64_t);
+}
+
+#endif
+#ifdef VK_EXT_sampler_filter_minmax
+#endif
+#ifdef VK_AMD_gpu_shader_int16
+#endif
+#ifdef VK_AMD_mixed_attachment_samples
+#endif
+#ifdef VK_AMD_shader_fragment_mask
+#endif
+#ifdef VK_EXT_inline_uniform_block
+void count_VkPhysicalDeviceInlineUniformBlockFeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceInlineUniformBlockFeaturesEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkPhysicalDeviceInlineUniformBlockPropertiesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceInlineUniformBlockPropertiesEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkWriteDescriptorSetInlineUniformBlockEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkWriteDescriptorSetInlineUniformBlockEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        *count += toCount->dataSize * sizeof(const uint8_t);
+    }
+}
+
+void count_VkDescriptorPoolInlineUniformBlockCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDescriptorPoolInlineUniformBlockCreateInfoEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+}
+
+#endif
+#ifdef VK_EXT_shader_stencil_export
+#endif
+#ifdef VK_EXT_sample_locations
+void count_VkSampleLocationEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSampleLocationEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(float);
+    *count += sizeof(float);
+}
+
+void count_VkSampleLocationsInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSampleLocationsInfoEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkSampleCountFlagBits);
+    count_VkExtent2D(featureBits, rootType, (VkExtent2D*)(&toCount->sampleLocationGridSize), count);
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < (uint32_t)toCount->sampleLocationsCount; ++i)
+        {
+            count_VkSampleLocationEXT(featureBits, rootType, (const VkSampleLocationEXT*)(toCount->pSampleLocations + i), count);
+        }
+    }
+}
+
+void count_VkAttachmentSampleLocationsEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAttachmentSampleLocationsEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(uint32_t);
+    count_VkSampleLocationsInfoEXT(featureBits, rootType, (VkSampleLocationsInfoEXT*)(&toCount->sampleLocationsInfo), count);
+}
+
+void count_VkSubpassSampleLocationsEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSubpassSampleLocationsEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(uint32_t);
+    count_VkSampleLocationsInfoEXT(featureBits, rootType, (VkSampleLocationsInfoEXT*)(&toCount->sampleLocationsInfo), count);
+}
+
+void count_VkRenderPassSampleLocationsBeginInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkRenderPassSampleLocationsBeginInfoEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < (uint32_t)toCount->attachmentInitialSampleLocationsCount; ++i)
+        {
+            count_VkAttachmentSampleLocationsEXT(featureBits, rootType, (const VkAttachmentSampleLocationsEXT*)(toCount->pAttachmentInitialSampleLocations + i), count);
+        }
+    }
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < (uint32_t)toCount->postSubpassSampleLocationsCount; ++i)
+        {
+            count_VkSubpassSampleLocationsEXT(featureBits, rootType, (const VkSubpassSampleLocationsEXT*)(toCount->pPostSubpassSampleLocations + i), count);
+        }
+    }
+}
+
+void count_VkPipelineSampleLocationsStateCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineSampleLocationsStateCreateInfoEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+    count_VkSampleLocationsInfoEXT(featureBits, rootType, (VkSampleLocationsInfoEXT*)(&toCount->sampleLocationsInfo), count);
+}
+
+void count_VkPhysicalDeviceSampleLocationsPropertiesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSampleLocationsPropertiesEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkSampleCountFlags);
+    count_VkExtent2D(featureBits, rootType, (VkExtent2D*)(&toCount->maxSampleLocationGridSize), count);
+    *count += 2 * sizeof(float);
+    *count += sizeof(uint32_t);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkMultisamplePropertiesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkMultisamplePropertiesEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    count_VkExtent2D(featureBits, rootType, (VkExtent2D*)(&toCount->maxSampleLocationGridSize), count);
+}
+
+#endif
+#ifdef VK_EXT_blend_operation_advanced
+void count_VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkPipelineColorBlendAdvancedStateCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineColorBlendAdvancedStateCreateInfoEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBlendOverlapEXT);
+}
+
+#endif
+#ifdef VK_NV_fragment_coverage_to_color
+void count_VkPipelineCoverageToColorStateCreateInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineCoverageToColorStateCreateInfoNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkPipelineCoverageToColorStateCreateFlagsNV);
+    *count += sizeof(VkBool32);
+    *count += sizeof(uint32_t);
+}
+
+#endif
+#ifdef VK_NV_framebuffer_mixed_samples
+void count_VkPipelineCoverageModulationStateCreateInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineCoverageModulationStateCreateInfoNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkPipelineCoverageModulationStateCreateFlagsNV);
+    *count += sizeof(VkCoverageModulationModeNV);
+    *count += sizeof(VkBool32);
+    *count += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pCoverageModulationTable)
+    {
+        if (toCount)
+        {
+            *count += toCount->coverageModulationTableCount * sizeof(const float);
+        }
+    }
+}
+
+#endif
+#ifdef VK_NV_fill_rectangle
+#endif
+#ifdef VK_NV_shader_sm_builtins
+void count_VkPhysicalDeviceShaderSMBuiltinsPropertiesNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderSMBuiltinsPropertiesNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkPhysicalDeviceShaderSMBuiltinsFeaturesNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderSMBuiltinsFeaturesNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_EXT_post_depth_coverage
+#endif
+#ifdef VK_EXT_image_drm_format_modifier
+void count_VkDrmFormatModifierPropertiesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDrmFormatModifierPropertiesEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(uint64_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(VkFormatFeatureFlags);
+}
+
+void count_VkDrmFormatModifierPropertiesListEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDrmFormatModifierPropertiesListEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pDrmFormatModifierProperties)
+    {
+        if (toCount)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toCount->drmFormatModifierCount; ++i)
+            {
+                count_VkDrmFormatModifierPropertiesEXT(featureBits, rootType, (VkDrmFormatModifierPropertiesEXT*)(toCount->pDrmFormatModifierProperties + i), count);
+            }
+        }
+    }
+}
+
+void count_VkPhysicalDeviceImageDrmFormatModifierInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceImageDrmFormatModifierInfoEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint64_t);
+    *count += sizeof(VkSharingMode);
+    *count += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pQueueFamilyIndices)
+    {
+        if (toCount)
+        {
+            *count += toCount->queueFamilyIndexCount * sizeof(const uint32_t);
+        }
+    }
+}
+
+void count_VkImageDrmFormatModifierListCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImageDrmFormatModifierListCreateInfoEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        *count += toCount->drmFormatModifierCount * sizeof(const uint64_t);
+    }
+}
+
+void count_VkImageDrmFormatModifierExplicitCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImageDrmFormatModifierExplicitCreateInfoEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint64_t);
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < (uint32_t)toCount->drmFormatModifierPlaneCount; ++i)
+        {
+            count_VkSubresourceLayout(featureBits, rootType, (const VkSubresourceLayout*)(toCount->pPlaneLayouts + i), count);
+        }
+    }
+}
+
+void count_VkImageDrmFormatModifierPropertiesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImageDrmFormatModifierPropertiesEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint64_t);
+}
+
+#endif
+#ifdef VK_EXT_validation_cache
+void count_VkValidationCacheCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkValidationCacheCreateInfoEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkValidationCacheCreateFlagsEXT);
+    *count += 8;
+    if (toCount)
+    {
+        *count += toCount->initialDataSize * sizeof(const uint8_t);
+    }
+}
+
+void count_VkShaderModuleValidationCacheCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkShaderModuleValidationCacheCreateInfoEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+}
+
+#endif
+#ifdef VK_EXT_descriptor_indexing
+#endif
+#ifdef VK_EXT_shader_viewport_index_layer
+#endif
+#ifdef VK_NV_shading_rate_image
+void count_VkShadingRatePaletteNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkShadingRatePaletteNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        *count += toCount->shadingRatePaletteEntryCount * sizeof(const VkShadingRatePaletteEntryNV);
+    }
+}
+
+void count_VkPipelineViewportShadingRateImageStateCreateInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineViewportShadingRateImageStateCreateInfoNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+    *count += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pShadingRatePalettes)
+    {
+        if (toCount)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toCount->viewportCount; ++i)
+            {
+                count_VkShadingRatePaletteNV(featureBits, rootType, (const VkShadingRatePaletteNV*)(toCount->pShadingRatePalettes + i), count);
+            }
+        }
+    }
+}
+
+void count_VkPhysicalDeviceShadingRateImageFeaturesNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShadingRateImageFeaturesNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkPhysicalDeviceShadingRateImagePropertiesNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShadingRateImagePropertiesNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    count_VkExtent2D(featureBits, rootType, (VkExtent2D*)(&toCount->shadingRateTexelSize), count);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkCoarseSampleLocationNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkCoarseSampleLocationNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkCoarseSampleOrderCustomNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkCoarseSampleOrderCustomNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkShadingRatePaletteEntryNV);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < (uint32_t)toCount->sampleLocationCount; ++i)
+        {
+            count_VkCoarseSampleLocationNV(featureBits, rootType, (const VkCoarseSampleLocationNV*)(toCount->pSampleLocations + i), count);
+        }
+    }
+}
+
+void count_VkPipelineViewportCoarseSampleOrderStateCreateInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineViewportCoarseSampleOrderStateCreateInfoNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkCoarseSampleOrderTypeNV);
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < (uint32_t)toCount->customSampleOrderCount; ++i)
+        {
+            count_VkCoarseSampleOrderCustomNV(featureBits, rootType, (const VkCoarseSampleOrderCustomNV*)(toCount->pCustomSampleOrders + i), count);
+        }
+    }
+}
+
+#endif
+#ifdef VK_NV_ray_tracing
+void count_VkRayTracingShaderGroupCreateInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkRayTracingShaderGroupCreateInfoNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkRayTracingShaderGroupTypeKHR);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkRayTracingPipelineCreateInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkRayTracingPipelineCreateInfoNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkPipelineCreateFlags);
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < (uint32_t)toCount->stageCount; ++i)
+        {
+            count_VkPipelineShaderStageCreateInfo(featureBits, rootType, (const VkPipelineShaderStageCreateInfo*)(toCount->pStages + i), count);
+        }
+    }
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < (uint32_t)toCount->groupCount; ++i)
+        {
+            count_VkRayTracingShaderGroupCreateInfoNV(featureBits, rootType, (const VkRayTracingShaderGroupCreateInfoNV*)(toCount->pGroups + i), count);
+        }
+    }
+    *count += sizeof(uint32_t);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    uint64_t cgen_var_1;
+    *count += 1 * 8;
+    *count += sizeof(int32_t);
+}
+
+void count_VkGeometryTrianglesNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkGeometryTrianglesNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    *count += sizeof(VkDeviceSize);
+    *count += sizeof(uint32_t);
+    *count += sizeof(VkDeviceSize);
+    *count += sizeof(VkFormat);
+    uint64_t cgen_var_1;
+    *count += 1 * 8;
+    *count += sizeof(VkDeviceSize);
+    *count += sizeof(uint32_t);
+    *count += sizeof(VkIndexType);
+    uint64_t cgen_var_2;
+    *count += 1 * 8;
+    *count += sizeof(VkDeviceSize);
+}
+
+void count_VkGeometryAABBNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkGeometryAABBNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(VkDeviceSize);
+}
+
+void count_VkGeometryDataNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkGeometryDataNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    count_VkGeometryTrianglesNV(featureBits, rootType, (VkGeometryTrianglesNV*)(&toCount->triangles), count);
+    count_VkGeometryAABBNV(featureBits, rootType, (VkGeometryAABBNV*)(&toCount->aabbs), count);
+}
+
+void count_VkGeometryNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkGeometryNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkGeometryTypeKHR);
+    count_VkGeometryDataNV(featureBits, rootType, (VkGeometryDataNV*)(&toCount->geometry), count);
+    *count += sizeof(VkGeometryFlagsKHR);
+}
+
+void count_VkAccelerationStructureInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAccelerationStructureInfoNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkAccelerationStructureTypeNV);
+    *count += sizeof(VkBuildAccelerationStructureFlagsNV);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < (uint32_t)toCount->geometryCount; ++i)
+        {
+            count_VkGeometryNV(featureBits, rootType, (const VkGeometryNV*)(toCount->pGeometries + i), count);
+        }
+    }
+}
+
+void count_VkAccelerationStructureCreateInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAccelerationStructureCreateInfoNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkDeviceSize);
+    count_VkAccelerationStructureInfoNV(featureBits, rootType, (VkAccelerationStructureInfoNV*)(&toCount->info), count);
+}
+
+void count_VkBindAccelerationStructureMemoryInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkBindAccelerationStructureMemoryInfoNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    uint64_t cgen_var_1;
+    *count += 1 * 8;
+    *count += sizeof(VkDeviceSize);
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        *count += toCount->deviceIndexCount * sizeof(const uint32_t);
+    }
+}
+
+void count_VkWriteDescriptorSetAccelerationStructureNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkWriteDescriptorSetAccelerationStructureNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pAccelerationStructures)
+    {
+        if (toCount->accelerationStructureCount)
+        {
+            *count += toCount->accelerationStructureCount * 8;
+        }
+    }
+}
+
+void count_VkAccelerationStructureMemoryRequirementsInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAccelerationStructureMemoryRequirementsInfoNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkAccelerationStructureMemoryRequirementsTypeNV);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+}
+
+void count_VkPhysicalDeviceRayTracingPropertiesNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRayTracingPropertiesNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint64_t);
+    *count += sizeof(uint64_t);
+    *count += sizeof(uint64_t);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkTransformMatrixKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkTransformMatrixKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += ((3)*(4)) * sizeof(float);
+}
+
+void count_VkAabbPositionsKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAabbPositionsKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(float);
+    *count += sizeof(float);
+    *count += sizeof(float);
+    *count += sizeof(float);
+    *count += sizeof(float);
+    *count += sizeof(float);
+}
+
+void count_VkAccelerationStructureInstanceKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAccelerationStructureInstanceKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    count_VkTransformMatrixKHR(featureBits, rootType, (VkTransformMatrixKHR*)(&toCount->transform), count);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(VkGeometryInstanceFlagsKHR);
+    *count += sizeof(uint64_t);
+}
+
+#endif
+#ifdef VK_NV_representative_fragment_test
+void count_VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkPipelineRepresentativeFragmentTestStateCreateInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineRepresentativeFragmentTestStateCreateInfoNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_EXT_filter_cubic
+void count_VkPhysicalDeviceImageViewImageFormatInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceImageViewImageFormatInfoEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkImageViewType);
+}
+
+void count_VkFilterCubicImageViewImageFormatPropertiesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkFilterCubicImageViewImageFormatPropertiesEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_QCOM_render_pass_shader_resolve
+#endif
+#ifdef VK_EXT_global_priority
+void count_VkDeviceQueueGlobalPriorityCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDeviceQueueGlobalPriorityCreateInfoEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkQueueGlobalPriorityEXT);
+}
+
+#endif
+#ifdef VK_EXT_external_memory_host
+void count_VkImportMemoryHostPointerInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImportMemoryHostPointerInfoEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkExternalMemoryHandleTypeFlagBits);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pHostPointer)
+    {
+        *count += sizeof(uint8_t);
+    }
+}
+
+void count_VkMemoryHostPointerPropertiesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkMemoryHostPointerPropertiesEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkPhysicalDeviceExternalMemoryHostPropertiesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceExternalMemoryHostPropertiesEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkDeviceSize);
+}
+
+#endif
+#ifdef VK_AMD_buffer_marker
+#endif
+#ifdef VK_AMD_pipeline_compiler_control
+void count_VkPipelineCompilerControlCreateInfoAMD(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineCompilerControlCreateInfoAMD* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkPipelineCompilerControlFlagsAMD);
+}
+
+#endif
+#ifdef VK_EXT_calibrated_timestamps
+void count_VkCalibratedTimestampInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkCalibratedTimestampInfoEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkTimeDomainEXT);
+}
+
+#endif
+#ifdef VK_AMD_shader_core_properties
+void count_VkPhysicalDeviceShaderCorePropertiesAMD(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderCorePropertiesAMD* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+}
+
+#endif
+#ifdef VK_AMD_memory_overallocation_behavior
+void count_VkDeviceMemoryOverallocationCreateInfoAMD(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDeviceMemoryOverallocationCreateInfoAMD* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkMemoryOverallocationBehaviorAMD);
+}
+
+#endif
+#ifdef VK_EXT_vertex_attribute_divisor
+void count_VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkVertexInputBindingDivisorDescriptionEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkVertexInputBindingDivisorDescriptionEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkPipelineVertexInputDivisorStateCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineVertexInputDivisorStateCreateInfoEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < (uint32_t)toCount->vertexBindingDivisorCount; ++i)
+        {
+            count_VkVertexInputBindingDivisorDescriptionEXT(featureBits, rootType, (const VkVertexInputBindingDivisorDescriptionEXT*)(toCount->pVertexBindingDivisors + i), count);
+        }
+    }
+}
+
+void count_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_GGP_frame_token
+void count_VkPresentFrameTokenGGP(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPresentFrameTokenGGP* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(GgpFrameToken);
+}
+
+#endif
+#ifdef VK_EXT_pipeline_creation_feedback
+void count_VkPipelineCreationFeedbackEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineCreationFeedbackEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkPipelineCreationFeedbackFlagsEXT);
+    *count += sizeof(uint64_t);
+}
+
+void count_VkPipelineCreationFeedbackCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineCreationFeedbackCreateInfoEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    count_VkPipelineCreationFeedbackEXT(featureBits, rootType, (VkPipelineCreationFeedbackEXT*)(toCount->pPipelineCreationFeedback), count);
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < (uint32_t)toCount->pipelineStageCreationFeedbackCount; ++i)
+        {
+            count_VkPipelineCreationFeedbackEXT(featureBits, rootType, (VkPipelineCreationFeedbackEXT*)(toCount->pPipelineStageCreationFeedbacks + i), count);
+        }
+    }
+}
+
+#endif
+#ifdef VK_NV_shader_subgroup_partitioned
+#endif
+#ifdef VK_NV_compute_shader_derivatives
+void count_VkPhysicalDeviceComputeShaderDerivativesFeaturesNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceComputeShaderDerivativesFeaturesNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_NV_mesh_shader
+void count_VkPhysicalDeviceMeshShaderFeaturesNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMeshShaderFeaturesNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkPhysicalDeviceMeshShaderPropertiesNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMeshShaderPropertiesNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += 3 * sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += 3 * sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkDrawMeshTasksIndirectCommandNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDrawMeshTasksIndirectCommandNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+}
+
+#endif
+#ifdef VK_NV_fragment_shader_barycentric
+void count_VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_NV_shader_image_footprint
+void count_VkPhysicalDeviceShaderImageFootprintFeaturesNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderImageFootprintFeaturesNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_NV_scissor_exclusive
+void count_VkPipelineViewportExclusiveScissorStateCreateInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineViewportExclusiveScissorStateCreateInfoNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pExclusiveScissors)
+    {
+        if (toCount)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toCount->exclusiveScissorCount; ++i)
+            {
+                count_VkRect2D(featureBits, rootType, (const VkRect2D*)(toCount->pExclusiveScissors + i), count);
+            }
+        }
+    }
+}
+
+void count_VkPhysicalDeviceExclusiveScissorFeaturesNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceExclusiveScissorFeaturesNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_NV_device_diagnostic_checkpoints
+void count_VkQueueFamilyCheckpointPropertiesNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkQueueFamilyCheckpointPropertiesNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkPipelineStageFlags);
+}
+
+void count_VkCheckpointDataNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkCheckpointDataNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkPipelineStageFlagBits);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pCheckpointMarker)
+    {
+        *count += sizeof(uint8_t);
+    }
+}
+
+#endif
+#ifdef VK_INTEL_shader_integer_functions2
+void count_VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_INTEL_performance_query
+void count_VkPerformanceValueDataINTEL(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPerformanceValueDataINTEL* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(uint32_t);
+}
+
+void count_VkPerformanceValueINTEL(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPerformanceValueINTEL* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkPerformanceValueTypeINTEL);
+    count_VkPerformanceValueDataINTEL(featureBits, rootType, (VkPerformanceValueDataINTEL*)(&toCount->data), count);
+}
+
+void count_VkInitializePerformanceApiInfoINTEL(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkInitializePerformanceApiInfoINTEL* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pUserData)
+    {
+        *count += sizeof(uint8_t);
+    }
+}
+
+void count_VkQueryPoolPerformanceQueryCreateInfoINTEL(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkQueryPoolPerformanceQueryCreateInfoINTEL* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkQueryPoolSamplingModeINTEL);
+}
+
+void count_VkPerformanceMarkerInfoINTEL(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPerformanceMarkerInfoINTEL* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint64_t);
+}
+
+void count_VkPerformanceStreamMarkerInfoINTEL(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPerformanceStreamMarkerInfoINTEL* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkPerformanceOverrideInfoINTEL(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPerformanceOverrideInfoINTEL* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkPerformanceOverrideTypeINTEL);
+    *count += sizeof(VkBool32);
+    *count += sizeof(uint64_t);
+}
+
+void count_VkPerformanceConfigurationAcquireInfoINTEL(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPerformanceConfigurationAcquireInfoINTEL* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkPerformanceConfigurationTypeINTEL);
+}
+
+#endif
+#ifdef VK_EXT_pci_bus_info
+void count_VkPhysicalDevicePCIBusInfoPropertiesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDevicePCIBusInfoPropertiesEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+}
+
+#endif
+#ifdef VK_AMD_display_native_hdr
+void count_VkDisplayNativeHdrSurfaceCapabilitiesAMD(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDisplayNativeHdrSurfaceCapabilitiesAMD* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkSwapchainDisplayNativeHdrCreateInfoAMD(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSwapchainDisplayNativeHdrCreateInfoAMD* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_FUCHSIA_imagepipe_surface
+void count_VkImagePipeSurfaceCreateInfoFUCHSIA(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImagePipeSurfaceCreateInfoFUCHSIA* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkImagePipeSurfaceCreateFlagsFUCHSIA);
+    *count += sizeof(zx_handle_t);
+}
+
+#endif
+#ifdef VK_EXT_metal_surface
+void count_VkMetalSurfaceCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkMetalSurfaceCreateInfoEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkMetalSurfaceCreateFlagsEXT);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pLayer)
+    {
+        *count += sizeof(const CAMetalLayer);
+    }
+}
+
+#endif
+#ifdef VK_EXT_fragment_density_map
+void count_VkPhysicalDeviceFragmentDensityMapFeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentDensityMapFeaturesEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkPhysicalDeviceFragmentDensityMapPropertiesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentDensityMapPropertiesEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    count_VkExtent2D(featureBits, rootType, (VkExtent2D*)(&toCount->minFragmentDensityTexelSize), count);
+    count_VkExtent2D(featureBits, rootType, (VkExtent2D*)(&toCount->maxFragmentDensityTexelSize), count);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkRenderPassFragmentDensityMapCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkRenderPassFragmentDensityMapCreateInfoEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    count_VkAttachmentReference(featureBits, rootType, (VkAttachmentReference*)(&toCount->fragmentDensityMapAttachment), count);
+}
+
+#endif
+#ifdef VK_EXT_scalar_block_layout
+#endif
+#ifdef VK_GOOGLE_hlsl_functionality1
+#endif
+#ifdef VK_GOOGLE_decorate_string
+#endif
+#ifdef VK_EXT_subgroup_size_control
+void count_VkPhysicalDeviceSubgroupSizeControlFeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSubgroupSizeControlFeaturesEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkPhysicalDeviceSubgroupSizeControlPropertiesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(VkShaderStageFlags);
+}
+
+void count_VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+}
+
+#endif
+#ifdef VK_AMD_shader_core_properties2
+void count_VkPhysicalDeviceShaderCoreProperties2AMD(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderCoreProperties2AMD* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkShaderCorePropertiesFlagsAMD);
+    *count += sizeof(uint32_t);
+}
+
+#endif
+#ifdef VK_AMD_device_coherent_memory
+void count_VkPhysicalDeviceCoherentMemoryFeaturesAMD(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCoherentMemoryFeaturesAMD* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_EXT_shader_image_atomic_int64
+void count_VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_EXT_memory_budget
+void count_VkPhysicalDeviceMemoryBudgetPropertiesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMemoryBudgetPropertiesEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += VK_MAX_MEMORY_HEAPS * sizeof(VkDeviceSize);
+    *count += VK_MAX_MEMORY_HEAPS * sizeof(VkDeviceSize);
+}
+
+#endif
+#ifdef VK_EXT_memory_priority
+void count_VkPhysicalDeviceMemoryPriorityFeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMemoryPriorityFeaturesEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkMemoryPriorityAllocateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkMemoryPriorityAllocateInfoEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(float);
+}
+
+#endif
+#ifdef VK_NV_dedicated_allocation_image_aliasing
+void count_VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_EXT_buffer_device_address
+void count_VkPhysicalDeviceBufferDeviceAddressFeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceBufferDeviceAddressFeaturesEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkBufferDeviceAddressCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkBufferDeviceAddressCreateInfoEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkDeviceAddress);
+}
+
+#endif
+#ifdef VK_EXT_tooling_info
+void count_VkPhysicalDeviceToolPropertiesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceToolPropertiesEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += VK_MAX_EXTENSION_NAME_SIZE * sizeof(char);
+    *count += VK_MAX_EXTENSION_NAME_SIZE * sizeof(char);
+    *count += sizeof(VkToolPurposeFlagsEXT);
+    *count += VK_MAX_DESCRIPTION_SIZE * sizeof(char);
+    *count += VK_MAX_EXTENSION_NAME_SIZE * sizeof(char);
+}
+
+#endif
+#ifdef VK_EXT_separate_stencil_usage
+#endif
+#ifdef VK_EXT_validation_features
+void count_VkValidationFeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkValidationFeaturesEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        *count += toCount->enabledValidationFeatureCount * sizeof(const VkValidationFeatureEnableEXT);
+    }
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        *count += toCount->disabledValidationFeatureCount * sizeof(const VkValidationFeatureDisableEXT);
+    }
+}
+
+#endif
+#ifdef VK_NV_cooperative_matrix
+void count_VkCooperativeMatrixPropertiesNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkCooperativeMatrixPropertiesNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(VkComponentTypeNV);
+    *count += sizeof(VkComponentTypeNV);
+    *count += sizeof(VkComponentTypeNV);
+    *count += sizeof(VkComponentTypeNV);
+    *count += sizeof(VkScopeNV);
+}
+
+void count_VkPhysicalDeviceCooperativeMatrixFeaturesNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCooperativeMatrixFeaturesNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkPhysicalDeviceCooperativeMatrixPropertiesNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCooperativeMatrixPropertiesNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkShaderStageFlags);
+}
+
+#endif
+#ifdef VK_NV_coverage_reduction_mode
+void count_VkPhysicalDeviceCoverageReductionModeFeaturesNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCoverageReductionModeFeaturesNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkPipelineCoverageReductionStateCreateInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineCoverageReductionStateCreateInfoNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkPipelineCoverageReductionStateCreateFlagsNV);
+    *count += sizeof(VkCoverageReductionModeNV);
+}
+
+void count_VkFramebufferMixedSamplesCombinationNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkFramebufferMixedSamplesCombinationNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkCoverageReductionModeNV);
+    *count += sizeof(VkSampleCountFlagBits);
+    *count += sizeof(VkSampleCountFlags);
+    *count += sizeof(VkSampleCountFlags);
+}
+
+#endif
+#ifdef VK_EXT_fragment_shader_interlock
+void count_VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_EXT_ycbcr_image_arrays
+void count_VkPhysicalDeviceYcbcrImageArraysFeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceYcbcrImageArraysFeaturesEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_EXT_full_screen_exclusive
+void count_VkSurfaceFullScreenExclusiveInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSurfaceFullScreenExclusiveInfoEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkFullScreenExclusiveEXT);
+}
+
+void count_VkSurfaceCapabilitiesFullScreenExclusiveEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSurfaceCapabilitiesFullScreenExclusiveEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkSurfaceFullScreenExclusiveWin32InfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSurfaceFullScreenExclusiveWin32InfoEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(HMONITOR);
+}
+
+#endif
+#ifdef VK_EXT_headless_surface
+void count_VkHeadlessSurfaceCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkHeadlessSurfaceCreateInfoEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkHeadlessSurfaceCreateFlagsEXT);
+}
+
+#endif
+#ifdef VK_EXT_line_rasterization
+void count_VkPhysicalDeviceLineRasterizationFeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceLineRasterizationFeaturesEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkPhysicalDeviceLineRasterizationPropertiesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceLineRasterizationPropertiesEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkPipelineRasterizationLineStateCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineRasterizationLineStateCreateInfoEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkLineRasterizationModeEXT);
+    *count += sizeof(VkBool32);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint16_t);
+}
+
+#endif
+#ifdef VK_EXT_shader_atomic_float
+void count_VkPhysicalDeviceShaderAtomicFloatFeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderAtomicFloatFeaturesEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_EXT_host_query_reset
+#endif
+#ifdef VK_EXT_index_type_uint8
+void count_VkPhysicalDeviceIndexTypeUint8FeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceIndexTypeUint8FeaturesEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_EXT_extended_dynamic_state
+void count_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceExtendedDynamicStateFeaturesEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_EXT_shader_demote_to_helper_invocation
+void count_VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_NV_device_generated_commands
+void count_VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkGraphicsShaderGroupCreateInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkGraphicsShaderGroupCreateInfoNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < (uint32_t)toCount->stageCount; ++i)
+        {
+            count_VkPipelineShaderStageCreateInfo(featureBits, rootType, (const VkPipelineShaderStageCreateInfo*)(toCount->pStages + i), count);
+        }
+    }
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pVertexInputState)
+    {
+        count_VkPipelineVertexInputStateCreateInfo(featureBits, rootType, (const VkPipelineVertexInputStateCreateInfo*)(toCount->pVertexInputState), count);
+    }
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pTessellationState)
+    {
+        count_VkPipelineTessellationStateCreateInfo(featureBits, rootType, (const VkPipelineTessellationStateCreateInfo*)(toCount->pTessellationState), count);
+    }
+}
+
+void count_VkGraphicsPipelineShaderGroupsCreateInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkGraphicsPipelineShaderGroupsCreateInfoNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < (uint32_t)toCount->groupCount; ++i)
+        {
+            count_VkGraphicsShaderGroupCreateInfoNV(featureBits, rootType, (const VkGraphicsShaderGroupCreateInfoNV*)(toCount->pGroups + i), count);
+        }
+    }
+    *count += sizeof(uint32_t);
+    if (toCount->pipelineCount)
+    {
+        *count += toCount->pipelineCount * 8;
+    }
+}
+
+void count_VkBindShaderGroupIndirectCommandNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkBindShaderGroupIndirectCommandNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(uint32_t);
+}
+
+void count_VkBindIndexBufferIndirectCommandNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkBindIndexBufferIndirectCommandNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkDeviceAddress);
+    *count += sizeof(uint32_t);
+    *count += sizeof(VkIndexType);
+}
+
+void count_VkBindVertexBufferIndirectCommandNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkBindVertexBufferIndirectCommandNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkDeviceAddress);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkSetStateFlagsIndirectCommandNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSetStateFlagsIndirectCommandNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(uint32_t);
+}
+
+void count_VkIndirectCommandsStreamNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkIndirectCommandsStreamNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    *count += sizeof(VkDeviceSize);
+}
+
+void count_VkIndirectCommandsLayoutTokenNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkIndirectCommandsLayoutTokenNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkIndirectCommandsTokenTypeNV);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(VkBool32);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    *count += sizeof(VkShaderStageFlags);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(VkIndirectStateFlagsNV);
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        *count += toCount->indexTypeCount * sizeof(const VkIndexType);
+    }
+    if (toCount)
+    {
+        *count += toCount->indexTypeCount * sizeof(const uint32_t);
+    }
+}
+
+void count_VkIndirectCommandsLayoutCreateInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkIndirectCommandsLayoutCreateInfoNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkIndirectCommandsLayoutUsageFlagsNV);
+    *count += sizeof(VkPipelineBindPoint);
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < (uint32_t)toCount->tokenCount; ++i)
+        {
+            count_VkIndirectCommandsLayoutTokenNV(featureBits, rootType, (const VkIndirectCommandsLayoutTokenNV*)(toCount->pTokens + i), count);
+        }
+    }
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        *count += toCount->streamCount * sizeof(const uint32_t);
+    }
+}
+
+void count_VkGeneratedCommandsInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkGeneratedCommandsInfoNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkPipelineBindPoint);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    uint64_t cgen_var_1;
+    *count += 1 * 8;
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < (uint32_t)toCount->streamCount; ++i)
+        {
+            count_VkIndirectCommandsStreamNV(featureBits, rootType, (const VkIndirectCommandsStreamNV*)(toCount->pStreams + i), count);
+        }
+    }
+    *count += sizeof(uint32_t);
+    uint64_t cgen_var_2;
+    *count += 1 * 8;
+    *count += sizeof(VkDeviceSize);
+    *count += sizeof(VkDeviceSize);
+    uint64_t cgen_var_3;
+    *count += 1 * 8;
+    *count += sizeof(VkDeviceSize);
+    uint64_t cgen_var_4;
+    *count += 1 * 8;
+    *count += sizeof(VkDeviceSize);
+}
+
+void count_VkGeneratedCommandsMemoryRequirementsInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkGeneratedCommandsMemoryRequirementsInfoNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkPipelineBindPoint);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    uint64_t cgen_var_1;
+    *count += 1 * 8;
+    *count += sizeof(uint32_t);
+}
+
+#endif
+#ifdef VK_EXT_texel_buffer_alignment
+void count_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkDeviceSize);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkDeviceSize);
+    *count += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_QCOM_render_pass_transform
+void count_VkRenderPassTransformBeginInfoQCOM(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkRenderPassTransformBeginInfoQCOM* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkSurfaceTransformFlagBitsKHR);
+}
+
+void count_VkCommandBufferInheritanceRenderPassTransformInfoQCOM(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkCommandBufferInheritanceRenderPassTransformInfoQCOM* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkSurfaceTransformFlagBitsKHR);
+    count_VkRect2D(featureBits, rootType, (VkRect2D*)(&toCount->renderArea), count);
+}
+
+#endif
+#ifdef VK_EXT_device_memory_report
+void count_VkPhysicalDeviceDeviceMemoryReportFeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDeviceMemoryReportFeaturesEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkDeviceMemoryReportCallbackDataEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDeviceMemoryReportCallbackDataEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkDeviceMemoryReportFlagsEXT);
+    *count += sizeof(VkDeviceMemoryReportEventTypeEXT);
+    *count += sizeof(uint64_t);
+    *count += sizeof(VkDeviceSize);
+    *count += sizeof(VkObjectType);
+    *count += sizeof(uint64_t);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkDeviceDeviceMemoryReportCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDeviceDeviceMemoryReportCreateInfoEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkDeviceMemoryReportFlagsEXT);
+    *count += 8;
+    *count += sizeof(uint8_t);
+}
+
+#endif
+#ifdef VK_EXT_robustness2
+void count_VkPhysicalDeviceRobustness2FeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRobustness2FeaturesEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkPhysicalDeviceRobustness2PropertiesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRobustness2PropertiesEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkDeviceSize);
+    *count += sizeof(VkDeviceSize);
+}
+
+#endif
+#ifdef VK_EXT_custom_border_color
+void count_VkSamplerCustomBorderColorCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSamplerCustomBorderColorCreateInfoEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    count_VkClearColorValue(featureBits, rootType, (VkClearColorValue*)(&toCount->customBorderColor), count);
+    *count += sizeof(VkFormat);
+}
+
+void count_VkPhysicalDeviceCustomBorderColorPropertiesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCustomBorderColorPropertiesEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkPhysicalDeviceCustomBorderColorFeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCustomBorderColorFeaturesEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_GOOGLE_user_type
+#endif
+#ifdef VK_EXT_private_data
+void count_VkPhysicalDevicePrivateDataFeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDevicePrivateDataFeaturesEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkDevicePrivateDataCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDevicePrivateDataCreateInfoEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkPrivateDataSlotCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPrivateDataSlotCreateInfoEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkPrivateDataSlotCreateFlagsEXT);
+}
+
+#endif
+#ifdef VK_EXT_pipeline_creation_cache_control
+void count_VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_NV_device_diagnostics_config
+void count_VkPhysicalDeviceDiagnosticsConfigFeaturesNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDiagnosticsConfigFeaturesNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkDeviceDiagnosticsConfigCreateInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDeviceDiagnosticsConfigCreateInfoNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkDeviceDiagnosticsConfigFlagsNV);
+}
+
+#endif
+#ifdef VK_QCOM_render_pass_store_ops
+#endif
+#ifdef VK_NV_fragment_shading_rate_enums
+void count_VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkSampleCountFlagBits);
+}
+
+void count_VkPipelineFragmentShadingRateEnumStateCreateInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineFragmentShadingRateEnumStateCreateInfoNV* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkFragmentShadingRateTypeNV);
+    *count += sizeof(VkFragmentShadingRateNV);
+    *count += 2 * sizeof(VkFragmentShadingRateCombinerOpKHR);
+}
+
+#endif
+#ifdef VK_EXT_fragment_density_map2
+void count_VkPhysicalDeviceFragmentDensityMap2FeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentDensityMap2FeaturesEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkPhysicalDeviceFragmentDensityMap2PropertiesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentDensityMap2PropertiesEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+}
+
+#endif
+#ifdef VK_QCOM_rotated_copy_commands
+void count_VkCopyCommandTransformInfoQCOM(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkCopyCommandTransformInfoQCOM* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkSurfaceTransformFlagBitsKHR);
+}
+
+#endif
+#ifdef VK_EXT_image_robustness
+void count_VkPhysicalDeviceImageRobustnessFeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceImageRobustnessFeaturesEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_EXT_4444_formats
+void count_VkPhysicalDevice4444FormatsFeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDevice4444FormatsFeaturesEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_EXT_directfb_surface
+void count_VkDirectFBSurfaceCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDirectFBSurfaceCreateInfoEXT* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkDirectFBSurfaceCreateFlagsEXT);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->dfb)
+    {
+        *count += sizeof(IDirectFB);
+    }
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->surface)
+    {
+        *count += sizeof(IDirectFBSurface);
+    }
+}
+
+#endif
+#ifdef VK_GOOGLE_gfxstream
+void count_VkImportColorBufferGOOGLE(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImportColorBufferGOOGLE* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkImportBufferGOOGLE(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImportBufferGOOGLE* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkImportPhysicalAddressGOOGLE(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImportPhysicalAddressGOOGLE* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint64_t);
+    *count += sizeof(VkDeviceSize);
+    *count += sizeof(VkFormat);
+    *count += sizeof(VkImageTiling);
+    *count += sizeof(uint32_t);
+}
+
+#endif
+#ifdef VK_KHR_acceleration_structure
+void count_VkDeviceOrHostAddressKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDeviceOrHostAddressKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkDeviceAddress);
+}
+
+void count_VkDeviceOrHostAddressConstKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDeviceOrHostAddressConstKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkDeviceAddress);
+}
+
+void count_VkAccelerationStructureBuildRangeInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAccelerationStructureBuildRangeInfoKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkAccelerationStructureGeometryTrianglesDataKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAccelerationStructureGeometryTrianglesDataKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkFormat);
+    count_VkDeviceOrHostAddressConstKHR(featureBits, rootType, (VkDeviceOrHostAddressConstKHR*)(&toCount->vertexData), count);
+    *count += sizeof(VkDeviceSize);
+    *count += sizeof(uint32_t);
+    *count += sizeof(VkIndexType);
+    count_VkDeviceOrHostAddressConstKHR(featureBits, rootType, (VkDeviceOrHostAddressConstKHR*)(&toCount->indexData), count);
+    count_VkDeviceOrHostAddressConstKHR(featureBits, rootType, (VkDeviceOrHostAddressConstKHR*)(&toCount->transformData), count);
+}
+
+void count_VkAccelerationStructureGeometryAabbsDataKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAccelerationStructureGeometryAabbsDataKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    count_VkDeviceOrHostAddressConstKHR(featureBits, rootType, (VkDeviceOrHostAddressConstKHR*)(&toCount->data), count);
+    *count += sizeof(VkDeviceSize);
+}
+
+void count_VkAccelerationStructureGeometryInstancesDataKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAccelerationStructureGeometryInstancesDataKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+    count_VkDeviceOrHostAddressConstKHR(featureBits, rootType, (VkDeviceOrHostAddressConstKHR*)(&toCount->data), count);
+}
+
+void count_VkAccelerationStructureGeometryDataKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAccelerationStructureGeometryDataKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    count_VkAccelerationStructureGeometryTrianglesDataKHR(featureBits, rootType, (VkAccelerationStructureGeometryTrianglesDataKHR*)(&toCount->triangles), count);
+}
+
+void count_VkAccelerationStructureGeometryKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAccelerationStructureGeometryKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkGeometryTypeKHR);
+    count_VkAccelerationStructureGeometryDataKHR(featureBits, rootType, (VkAccelerationStructureGeometryDataKHR*)(&toCount->geometry), count);
+    *count += sizeof(VkGeometryFlagsKHR);
+}
+
+void count_VkAccelerationStructureBuildGeometryInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAccelerationStructureBuildGeometryInfoKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkAccelerationStructureTypeKHR);
+    *count += sizeof(VkBuildAccelerationStructureFlagsKHR);
+    *count += sizeof(VkBuildAccelerationStructureModeKHR);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    uint64_t cgen_var_1;
+    *count += 1 * 8;
+    *count += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pGeometries)
+    {
+        if (toCount)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toCount->geometryCount; ++i)
+            {
+                count_VkAccelerationStructureGeometryKHR(featureBits, rootType, (const VkAccelerationStructureGeometryKHR*)(toCount->pGeometries + i), count);
+            }
+        }
+    }
+    count_VkDeviceOrHostAddressKHR(featureBits, rootType, (VkDeviceOrHostAddressKHR*)(&toCount->scratchData), count);
+}
+
+void count_VkAccelerationStructureCreateInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAccelerationStructureCreateInfoKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkAccelerationStructureCreateFlagsKHR);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    *count += sizeof(VkDeviceSize);
+    *count += sizeof(VkDeviceSize);
+    *count += sizeof(VkAccelerationStructureTypeKHR);
+    *count += sizeof(VkDeviceAddress);
+}
+
+void count_VkWriteDescriptorSetAccelerationStructureKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkWriteDescriptorSetAccelerationStructureKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pAccelerationStructures)
+    {
+        if (toCount->accelerationStructureCount)
+        {
+            *count += toCount->accelerationStructureCount * 8;
+        }
+    }
+}
+
+void count_VkPhysicalDeviceAccelerationStructureFeaturesKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceAccelerationStructureFeaturesKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkPhysicalDeviceAccelerationStructurePropertiesKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceAccelerationStructurePropertiesKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint64_t);
+    *count += sizeof(uint64_t);
+    *count += sizeof(uint64_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkAccelerationStructureDeviceAddressInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAccelerationStructureDeviceAddressInfoKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+}
+
+void count_VkAccelerationStructureVersionInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAccelerationStructureVersionInfoKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    if (toCount)
+    {
+        *count += 2*VK_UUID_SIZE * sizeof(const uint8_t);
+    }
+}
+
+void count_VkCopyAccelerationStructureToMemoryInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkCopyAccelerationStructureToMemoryInfoKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    count_VkDeviceOrHostAddressKHR(featureBits, rootType, (VkDeviceOrHostAddressKHR*)(&toCount->dst), count);
+    *count += sizeof(VkCopyAccelerationStructureModeKHR);
+}
+
+void count_VkCopyMemoryToAccelerationStructureInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkCopyMemoryToAccelerationStructureInfoKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    count_VkDeviceOrHostAddressConstKHR(featureBits, rootType, (VkDeviceOrHostAddressConstKHR*)(&toCount->src), count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    *count += sizeof(VkCopyAccelerationStructureModeKHR);
+}
+
+void count_VkCopyAccelerationStructureInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkCopyAccelerationStructureInfoKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    uint64_t cgen_var_1;
+    *count += 1 * 8;
+    *count += sizeof(VkCopyAccelerationStructureModeKHR);
+}
+
+void count_VkAccelerationStructureBuildSizesInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAccelerationStructureBuildSizesInfoKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkDeviceSize);
+    *count += sizeof(VkDeviceSize);
+    *count += sizeof(VkDeviceSize);
+}
+
+#endif
+#ifdef VK_KHR_ray_tracing_pipeline
+void count_VkRayTracingShaderGroupCreateInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkRayTracingShaderGroupCreateInfoKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkRayTracingShaderGroupTypeKHR);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pShaderGroupCaptureReplayHandle)
+    {
+        *count += sizeof(const uint8_t);
+    }
+}
+
+void count_VkRayTracingPipelineInterfaceCreateInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkRayTracingPipelineInterfaceCreateInfoKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkRayTracingPipelineCreateInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkRayTracingPipelineCreateInfoKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkPipelineCreateFlags);
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < (uint32_t)toCount->stageCount; ++i)
+        {
+            count_VkPipelineShaderStageCreateInfo(featureBits, rootType, (const VkPipelineShaderStageCreateInfo*)(toCount->pStages + i), count);
+        }
+    }
+    *count += sizeof(uint32_t);
+    if (toCount)
+    {
+        for (uint32_t i = 0; i < (uint32_t)toCount->groupCount; ++i)
+        {
+            count_VkRayTracingShaderGroupCreateInfoKHR(featureBits, rootType, (const VkRayTracingShaderGroupCreateInfoKHR*)(toCount->pGroups + i), count);
+        }
+    }
+    *count += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pLibraryInfo)
+    {
+        count_VkPipelineLibraryCreateInfoKHR(featureBits, rootType, (const VkPipelineLibraryCreateInfoKHR*)(toCount->pLibraryInfo), count);
+    }
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pLibraryInterface)
+    {
+        count_VkRayTracingPipelineInterfaceCreateInfoKHR(featureBits, rootType, (const VkRayTracingPipelineInterfaceCreateInfoKHR*)(toCount->pLibraryInterface), count);
+    }
+    // WARNING PTR CHECK
+    *count += 8;
+    if (toCount->pDynamicState)
+    {
+        count_VkPipelineDynamicStateCreateInfo(featureBits, rootType, (const VkPipelineDynamicStateCreateInfo*)(toCount->pDynamicState), count);
+    }
+    uint64_t cgen_var_0;
+    *count += 1 * 8;
+    uint64_t cgen_var_1;
+    *count += 1 * 8;
+    *count += sizeof(int32_t);
+}
+
+void count_VkPhysicalDeviceRayTracingPipelineFeaturesKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRayTracingPipelineFeaturesKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+    *count += sizeof(VkBool32);
+}
+
+void count_VkPhysicalDeviceRayTracingPipelinePropertiesKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRayTracingPipelinePropertiesKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+}
+
+void count_VkStridedDeviceAddressRegionKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkStridedDeviceAddressRegionKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkDeviceAddress);
+    *count += sizeof(VkDeviceSize);
+    *count += sizeof(VkDeviceSize);
+}
+
+void count_VkTraceRaysIndirectCommandKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkTraceRaysIndirectCommandKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+    *count += sizeof(uint32_t);
+}
+
+#endif
+#ifdef VK_KHR_ray_query
+void count_VkPhysicalDeviceRayQueryFeaturesKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRayQueryFeaturesKHR* toCount,
+    size_t* count)
+{
+    (void)featureBits;
+    (void)rootType;
+    (void)toCount;
+    (void)count;
+    *count += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = toCount->sType;
+    }
+    count_extension_struct(featureBits, rootType, toCount->pNext, count);
+    *count += sizeof(VkBool32);
+}
+
+#endif
+void count_extension_struct(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const void* structExtension,
+    size_t* count)
+{
+    VkInstanceCreateInfo* structAccess = (VkInstanceCreateInfo*)(structExtension);
+    size_t currExtSize = goldfish_vk_extension_struct_size_with_stream_features(featureBits, rootType, structExtension);
+    if (!currExtSize && structExtension)
+    {
+        // unknown struct extension; skip and call on its pNext field
+        count_extension_struct(featureBits, rootType, (void*)structAccess->pNext, count);
+        return;
+    }
+    else
+    {
+        // known or null extension struct
+        *count += sizeof(uint32_t);
+        if (!currExtSize)
+        {
+            // exit if this was a null extension struct (size == 0 in this branch)
+            return;
+        }
+    }
+    *count += sizeof(VkStructureType);
+    if (!structExtension)
+    {
+        return;
+    }
+    uint32_t structType = (uint32_t)goldfish_vk_struct_type(structExtension);
+    switch(structType)
+    {
+#ifdef VK_VERSION_1_1
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES:
+        {
+            count_VkPhysicalDeviceSubgroupProperties(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceSubgroupProperties*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES:
+        {
+            count_VkPhysicalDevice16BitStorageFeatures(featureBits, rootType, reinterpret_cast<const VkPhysicalDevice16BitStorageFeatures*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS:
+        {
+            count_VkMemoryDedicatedRequirements(featureBits, rootType, reinterpret_cast<const VkMemoryDedicatedRequirements*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO:
+        {
+            count_VkMemoryDedicatedAllocateInfo(featureBits, rootType, reinterpret_cast<const VkMemoryDedicatedAllocateInfo*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO:
+        {
+            count_VkMemoryAllocateFlagsInfo(featureBits, rootType, reinterpret_cast<const VkMemoryAllocateFlagsInfo*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO:
+        {
+            count_VkDeviceGroupRenderPassBeginInfo(featureBits, rootType, reinterpret_cast<const VkDeviceGroupRenderPassBeginInfo*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO:
+        {
+            count_VkDeviceGroupCommandBufferBeginInfo(featureBits, rootType, reinterpret_cast<const VkDeviceGroupCommandBufferBeginInfo*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO:
+        {
+            count_VkDeviceGroupSubmitInfo(featureBits, rootType, reinterpret_cast<const VkDeviceGroupSubmitInfo*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO:
+        {
+            count_VkDeviceGroupBindSparseInfo(featureBits, rootType, reinterpret_cast<const VkDeviceGroupBindSparseInfo*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO:
+        {
+            count_VkBindBufferMemoryDeviceGroupInfo(featureBits, rootType, reinterpret_cast<const VkBindBufferMemoryDeviceGroupInfo*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO:
+        {
+            count_VkBindImageMemoryDeviceGroupInfo(featureBits, rootType, reinterpret_cast<const VkBindImageMemoryDeviceGroupInfo*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO:
+        {
+            count_VkDeviceGroupDeviceCreateInfo(featureBits, rootType, reinterpret_cast<const VkDeviceGroupDeviceCreateInfo*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2:
+        {
+            count_VkPhysicalDeviceFeatures2(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceFeatures2*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES:
+        {
+            count_VkPhysicalDevicePointClippingProperties(featureBits, rootType, reinterpret_cast<const VkPhysicalDevicePointClippingProperties*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO:
+        {
+            count_VkRenderPassInputAttachmentAspectCreateInfo(featureBits, rootType, reinterpret_cast<const VkRenderPassInputAttachmentAspectCreateInfo*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO:
+        {
+            count_VkImageViewUsageCreateInfo(featureBits, rootType, reinterpret_cast<const VkImageViewUsageCreateInfo*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO:
+        {
+            count_VkPipelineTessellationDomainOriginStateCreateInfo(featureBits, rootType, reinterpret_cast<const VkPipelineTessellationDomainOriginStateCreateInfo*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO:
+        {
+            count_VkRenderPassMultiviewCreateInfo(featureBits, rootType, reinterpret_cast<const VkRenderPassMultiviewCreateInfo*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES:
+        {
+            count_VkPhysicalDeviceMultiviewFeatures(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceMultiviewFeatures*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES:
+        {
+            count_VkPhysicalDeviceMultiviewProperties(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceMultiviewProperties*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES:
+        {
+            count_VkPhysicalDeviceVariablePointersFeatures(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceVariablePointersFeatures*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES:
+        {
+            count_VkPhysicalDeviceProtectedMemoryFeatures(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceProtectedMemoryFeatures*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_PROPERTIES:
+        {
+            count_VkPhysicalDeviceProtectedMemoryProperties(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceProtectedMemoryProperties*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO:
+        {
+            count_VkProtectedSubmitInfo(featureBits, rootType, reinterpret_cast<const VkProtectedSubmitInfo*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO:
+        {
+            count_VkSamplerYcbcrConversionInfo(featureBits, rootType, reinterpret_cast<const VkSamplerYcbcrConversionInfo*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO:
+        {
+            count_VkBindImagePlaneMemoryInfo(featureBits, rootType, reinterpret_cast<const VkBindImagePlaneMemoryInfo*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO:
+        {
+            count_VkImagePlaneMemoryRequirementsInfo(featureBits, rootType, reinterpret_cast<const VkImagePlaneMemoryRequirementsInfo*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES:
+        {
+            count_VkPhysicalDeviceSamplerYcbcrConversionFeatures(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceSamplerYcbcrConversionFeatures*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES:
+        {
+            count_VkSamplerYcbcrConversionImageFormatProperties(featureBits, rootType, reinterpret_cast<const VkSamplerYcbcrConversionImageFormatProperties*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO:
+        {
+            count_VkPhysicalDeviceExternalImageFormatInfo(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceExternalImageFormatInfo*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES:
+        {
+            count_VkExternalImageFormatProperties(featureBits, rootType, reinterpret_cast<const VkExternalImageFormatProperties*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES:
+        {
+            count_VkPhysicalDeviceIDProperties(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceIDProperties*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO:
+        {
+            count_VkExternalMemoryImageCreateInfo(featureBits, rootType, reinterpret_cast<const VkExternalMemoryImageCreateInfo*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO:
+        {
+            count_VkExternalMemoryBufferCreateInfo(featureBits, rootType, reinterpret_cast<const VkExternalMemoryBufferCreateInfo*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO:
+        {
+            count_VkExportMemoryAllocateInfo(featureBits, rootType, reinterpret_cast<const VkExportMemoryAllocateInfo*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO:
+        {
+            count_VkExportFenceCreateInfo(featureBits, rootType, reinterpret_cast<const VkExportFenceCreateInfo*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO:
+        {
+            count_VkExportSemaphoreCreateInfo(featureBits, rootType, reinterpret_cast<const VkExportSemaphoreCreateInfo*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES:
+        {
+            count_VkPhysicalDeviceMaintenance3Properties(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceMaintenance3Properties*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES:
+        {
+            count_VkPhysicalDeviceShaderDrawParametersFeatures(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceShaderDrawParametersFeatures*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_VERSION_1_2
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES:
+        {
+            count_VkPhysicalDeviceVulkan11Features(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceVulkan11Features*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_PROPERTIES:
+        {
+            count_VkPhysicalDeviceVulkan11Properties(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceVulkan11Properties*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES:
+        {
+            count_VkPhysicalDeviceVulkan12Features(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceVulkan12Features*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_PROPERTIES:
+        {
+            count_VkPhysicalDeviceVulkan12Properties(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceVulkan12Properties*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO:
+        {
+            count_VkImageFormatListCreateInfo(featureBits, rootType, reinterpret_cast<const VkImageFormatListCreateInfo*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES:
+        {
+            count_VkPhysicalDevice8BitStorageFeatures(featureBits, rootType, reinterpret_cast<const VkPhysicalDevice8BitStorageFeatures*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES:
+        {
+            count_VkPhysicalDeviceDriverProperties(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceDriverProperties*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES:
+        {
+            count_VkPhysicalDeviceShaderAtomicInt64Features(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceShaderAtomicInt64Features*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES:
+        {
+            count_VkPhysicalDeviceShaderFloat16Int8Features(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceShaderFloat16Int8Features*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES:
+        {
+            count_VkPhysicalDeviceFloatControlsProperties(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceFloatControlsProperties*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO:
+        {
+            count_VkDescriptorSetLayoutBindingFlagsCreateInfo(featureBits, rootType, reinterpret_cast<const VkDescriptorSetLayoutBindingFlagsCreateInfo*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES:
+        {
+            count_VkPhysicalDeviceDescriptorIndexingFeatures(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceDescriptorIndexingFeatures*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES:
+        {
+            count_VkPhysicalDeviceDescriptorIndexingProperties(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceDescriptorIndexingProperties*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO:
+        {
+            count_VkDescriptorSetVariableDescriptorCountAllocateInfo(featureBits, rootType, reinterpret_cast<const VkDescriptorSetVariableDescriptorCountAllocateInfo*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT:
+        {
+            count_VkDescriptorSetVariableDescriptorCountLayoutSupport(featureBits, rootType, reinterpret_cast<const VkDescriptorSetVariableDescriptorCountLayoutSupport*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE:
+        {
+            count_VkSubpassDescriptionDepthStencilResolve(featureBits, rootType, reinterpret_cast<const VkSubpassDescriptionDepthStencilResolve*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES:
+        {
+            count_VkPhysicalDeviceDepthStencilResolveProperties(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceDepthStencilResolveProperties*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES:
+        {
+            count_VkPhysicalDeviceScalarBlockLayoutFeatures(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceScalarBlockLayoutFeatures*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_IMAGE_STENCIL_USAGE_CREATE_INFO:
+        {
+            count_VkImageStencilUsageCreateInfo(featureBits, rootType, reinterpret_cast<const VkImageStencilUsageCreateInfo*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO:
+        {
+            count_VkSamplerReductionModeCreateInfo(featureBits, rootType, reinterpret_cast<const VkSamplerReductionModeCreateInfo*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES:
+        {
+            count_VkPhysicalDeviceSamplerFilterMinmaxProperties(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceSamplerFilterMinmaxProperties*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES:
+        {
+            count_VkPhysicalDeviceVulkanMemoryModelFeatures(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceVulkanMemoryModelFeatures*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES:
+        {
+            count_VkPhysicalDeviceImagelessFramebufferFeatures(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceImagelessFramebufferFeatures*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO:
+        {
+            count_VkFramebufferAttachmentsCreateInfo(featureBits, rootType, reinterpret_cast<const VkFramebufferAttachmentsCreateInfo*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_RENDER_PASS_ATTACHMENT_BEGIN_INFO:
+        {
+            count_VkRenderPassAttachmentBeginInfo(featureBits, rootType, reinterpret_cast<const VkRenderPassAttachmentBeginInfo*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES:
+        {
+            count_VkPhysicalDeviceUniformBufferStandardLayoutFeatures(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceUniformBufferStandardLayoutFeatures*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES:
+        {
+            count_VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES:
+        {
+            count_VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_STENCIL_LAYOUT:
+        {
+            count_VkAttachmentReferenceStencilLayout(featureBits, rootType, reinterpret_cast<const VkAttachmentReferenceStencilLayout*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_STENCIL_LAYOUT:
+        {
+            count_VkAttachmentDescriptionStencilLayout(featureBits, rootType, reinterpret_cast<const VkAttachmentDescriptionStencilLayout*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES:
+        {
+            count_VkPhysicalDeviceHostQueryResetFeatures(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceHostQueryResetFeatures*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES:
+        {
+            count_VkPhysicalDeviceTimelineSemaphoreFeatures(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceTimelineSemaphoreFeatures*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES:
+        {
+            count_VkPhysicalDeviceTimelineSemaphoreProperties(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceTimelineSemaphoreProperties*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO:
+        {
+            count_VkSemaphoreTypeCreateInfo(featureBits, rootType, reinterpret_cast<const VkSemaphoreTypeCreateInfo*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO:
+        {
+            count_VkTimelineSemaphoreSubmitInfo(featureBits, rootType, reinterpret_cast<const VkTimelineSemaphoreSubmitInfo*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES:
+        {
+            count_VkPhysicalDeviceBufferDeviceAddressFeatures(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceBufferDeviceAddressFeatures*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO:
+        {
+            count_VkBufferOpaqueCaptureAddressCreateInfo(featureBits, rootType, reinterpret_cast<const VkBufferOpaqueCaptureAddressCreateInfo*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO:
+        {
+            count_VkMemoryOpaqueCaptureAddressAllocateInfo(featureBits, rootType, reinterpret_cast<const VkMemoryOpaqueCaptureAddressAllocateInfo*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_KHR_swapchain
+        case VK_STRUCTURE_TYPE_IMAGE_SWAPCHAIN_CREATE_INFO_KHR:
+        {
+            count_VkImageSwapchainCreateInfoKHR(featureBits, rootType, reinterpret_cast<const VkImageSwapchainCreateInfoKHR*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_SWAPCHAIN_INFO_KHR:
+        {
+            count_VkBindImageMemorySwapchainInfoKHR(featureBits, rootType, reinterpret_cast<const VkBindImageMemorySwapchainInfoKHR*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DEVICE_GROUP_PRESENT_INFO_KHR:
+        {
+            count_VkDeviceGroupPresentInfoKHR(featureBits, rootType, reinterpret_cast<const VkDeviceGroupPresentInfoKHR*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DEVICE_GROUP_SWAPCHAIN_CREATE_INFO_KHR:
+        {
+            count_VkDeviceGroupSwapchainCreateInfoKHR(featureBits, rootType, reinterpret_cast<const VkDeviceGroupSwapchainCreateInfoKHR*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_KHR_display_swapchain
+        case VK_STRUCTURE_TYPE_DISPLAY_PRESENT_INFO_KHR:
+        {
+            count_VkDisplayPresentInfoKHR(featureBits, rootType, reinterpret_cast<const VkDisplayPresentInfoKHR*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_KHR_external_memory_win32
+        case VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_KHR:
+        {
+            count_VkImportMemoryWin32HandleInfoKHR(featureBits, rootType, reinterpret_cast<const VkImportMemoryWin32HandleInfoKHR*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_KHR:
+        {
+            count_VkExportMemoryWin32HandleInfoKHR(featureBits, rootType, reinterpret_cast<const VkExportMemoryWin32HandleInfoKHR*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_KHR_external_memory_fd
+        case VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR:
+        {
+            count_VkImportMemoryFdInfoKHR(featureBits, rootType, reinterpret_cast<const VkImportMemoryFdInfoKHR*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_KHR_win32_keyed_mutex
+        case VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_KHR:
+        {
+            count_VkWin32KeyedMutexAcquireReleaseInfoKHR(featureBits, rootType, reinterpret_cast<const VkWin32KeyedMutexAcquireReleaseInfoKHR*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_KHR_external_semaphore_win32
+        case VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR:
+        {
+            count_VkExportSemaphoreWin32HandleInfoKHR(featureBits, rootType, reinterpret_cast<const VkExportSemaphoreWin32HandleInfoKHR*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_D3D12_FENCE_SUBMIT_INFO_KHR:
+        {
+            count_VkD3D12FenceSubmitInfoKHR(featureBits, rootType, reinterpret_cast<const VkD3D12FenceSubmitInfoKHR*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_KHR_push_descriptor
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR:
+        {
+            count_VkPhysicalDevicePushDescriptorPropertiesKHR(featureBits, rootType, reinterpret_cast<const VkPhysicalDevicePushDescriptorPropertiesKHR*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_KHR_incremental_present
+        case VK_STRUCTURE_TYPE_PRESENT_REGIONS_KHR:
+        {
+            count_VkPresentRegionsKHR(featureBits, rootType, reinterpret_cast<const VkPresentRegionsKHR*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_KHR_shared_presentable_image
+        case VK_STRUCTURE_TYPE_SHARED_PRESENT_SURFACE_CAPABILITIES_KHR:
+        {
+            count_VkSharedPresentSurfaceCapabilitiesKHR(featureBits, rootType, reinterpret_cast<const VkSharedPresentSurfaceCapabilitiesKHR*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_KHR_external_fence_win32
+        case VK_STRUCTURE_TYPE_EXPORT_FENCE_WIN32_HANDLE_INFO_KHR:
+        {
+            count_VkExportFenceWin32HandleInfoKHR(featureBits, rootType, reinterpret_cast<const VkExportFenceWin32HandleInfoKHR*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_KHR_performance_query
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_FEATURES_KHR:
+        {
+            count_VkPhysicalDevicePerformanceQueryFeaturesKHR(featureBits, rootType, reinterpret_cast<const VkPhysicalDevicePerformanceQueryFeaturesKHR*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_PROPERTIES_KHR:
+        {
+            count_VkPhysicalDevicePerformanceQueryPropertiesKHR(featureBits, rootType, reinterpret_cast<const VkPhysicalDevicePerformanceQueryPropertiesKHR*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_CREATE_INFO_KHR:
+        {
+            count_VkQueryPoolPerformanceCreateInfoKHR(featureBits, rootType, reinterpret_cast<const VkQueryPoolPerformanceCreateInfoKHR*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PERFORMANCE_QUERY_SUBMIT_INFO_KHR:
+        {
+            count_VkPerformanceQuerySubmitInfoKHR(featureBits, rootType, reinterpret_cast<const VkPerformanceQuerySubmitInfoKHR*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_KHR_portability_subset
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PORTABILITY_SUBSET_FEATURES_KHR:
+        {
+            count_VkPhysicalDevicePortabilitySubsetFeaturesKHR(featureBits, rootType, reinterpret_cast<const VkPhysicalDevicePortabilitySubsetFeaturesKHR*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PORTABILITY_SUBSET_PROPERTIES_KHR:
+        {
+            count_VkPhysicalDevicePortabilitySubsetPropertiesKHR(featureBits, rootType, reinterpret_cast<const VkPhysicalDevicePortabilitySubsetPropertiesKHR*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_KHR_shader_clock
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CLOCK_FEATURES_KHR:
+        {
+            count_VkPhysicalDeviceShaderClockFeaturesKHR(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceShaderClockFeaturesKHR*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_KHR_shader_terminate_invocation
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES_KHR:
+        {
+            count_VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_KHR_fragment_shading_rate
+        case VK_STRUCTURE_TYPE_FRAGMENT_SHADING_RATE_ATTACHMENT_INFO_KHR:
+        {
+            count_VkFragmentShadingRateAttachmentInfoKHR(featureBits, rootType, reinterpret_cast<const VkFragmentShadingRateAttachmentInfoKHR*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_STATE_CREATE_INFO_KHR:
+        {
+            count_VkPipelineFragmentShadingRateStateCreateInfoKHR(featureBits, rootType, reinterpret_cast<const VkPipelineFragmentShadingRateStateCreateInfoKHR*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_FEATURES_KHR:
+        {
+            count_VkPhysicalDeviceFragmentShadingRateFeaturesKHR(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceFragmentShadingRateFeaturesKHR*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_PROPERTIES_KHR:
+        {
+            count_VkPhysicalDeviceFragmentShadingRatePropertiesKHR(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceFragmentShadingRatePropertiesKHR*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_KHR_surface_protected_capabilities
+        case VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR:
+        {
+            count_VkSurfaceProtectedCapabilitiesKHR(featureBits, rootType, reinterpret_cast<const VkSurfaceProtectedCapabilitiesKHR*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_KHR_pipeline_executable_properties
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_EXECUTABLE_PROPERTIES_FEATURES_KHR:
+        {
+            count_VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR(featureBits, rootType, reinterpret_cast<const VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_ANDROID_native_buffer
+        case VK_STRUCTURE_TYPE_NATIVE_BUFFER_ANDROID:
+        {
+            count_VkNativeBufferANDROID(featureBits, rootType, reinterpret_cast<const VkNativeBufferANDROID*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_EXT_debug_report
+        case VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT:
+        {
+            count_VkDebugReportCallbackCreateInfoEXT(featureBits, rootType, reinterpret_cast<const VkDebugReportCallbackCreateInfoEXT*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_AMD_rasterization_order
+        case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_RASTERIZATION_ORDER_AMD:
+        {
+            count_VkPipelineRasterizationStateRasterizationOrderAMD(featureBits, rootType, reinterpret_cast<const VkPipelineRasterizationStateRasterizationOrderAMD*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_NV_dedicated_allocation
+        case VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_IMAGE_CREATE_INFO_NV:
+        {
+            count_VkDedicatedAllocationImageCreateInfoNV(featureBits, rootType, reinterpret_cast<const VkDedicatedAllocationImageCreateInfoNV*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_BUFFER_CREATE_INFO_NV:
+        {
+            count_VkDedicatedAllocationBufferCreateInfoNV(featureBits, rootType, reinterpret_cast<const VkDedicatedAllocationBufferCreateInfoNV*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_MEMORY_ALLOCATE_INFO_NV:
+        {
+            count_VkDedicatedAllocationMemoryAllocateInfoNV(featureBits, rootType, reinterpret_cast<const VkDedicatedAllocationMemoryAllocateInfoNV*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_EXT_transform_feedback
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT:
+        {
+            count_VkPhysicalDeviceTransformFeedbackFeaturesEXT(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceTransformFeedbackFeaturesEXT*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT:
+        {
+            count_VkPhysicalDeviceTransformFeedbackPropertiesEXT(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceTransformFeedbackPropertiesEXT*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_STREAM_CREATE_INFO_EXT:
+        {
+            count_VkPipelineRasterizationStateStreamCreateInfoEXT(featureBits, rootType, reinterpret_cast<const VkPipelineRasterizationStateStreamCreateInfoEXT*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_AMD_texture_gather_bias_lod
+        case VK_STRUCTURE_TYPE_TEXTURE_LOD_GATHER_FORMAT_PROPERTIES_AMD:
+        {
+            count_VkTextureLODGatherFormatPropertiesAMD(featureBits, rootType, reinterpret_cast<const VkTextureLODGatherFormatPropertiesAMD*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_NV_corner_sampled_image
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CORNER_SAMPLED_IMAGE_FEATURES_NV:
+        {
+            count_VkPhysicalDeviceCornerSampledImageFeaturesNV(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceCornerSampledImageFeaturesNV*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_NV_external_memory
+        case VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_NV:
+        {
+            count_VkExternalMemoryImageCreateInfoNV(featureBits, rootType, reinterpret_cast<const VkExternalMemoryImageCreateInfoNV*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_NV:
+        {
+            count_VkExportMemoryAllocateInfoNV(featureBits, rootType, reinterpret_cast<const VkExportMemoryAllocateInfoNV*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_NV_external_memory_win32
+        case VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_NV:
+        {
+            count_VkImportMemoryWin32HandleInfoNV(featureBits, rootType, reinterpret_cast<const VkImportMemoryWin32HandleInfoNV*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_NV:
+        {
+            count_VkExportMemoryWin32HandleInfoNV(featureBits, rootType, reinterpret_cast<const VkExportMemoryWin32HandleInfoNV*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_NV_win32_keyed_mutex
+        case VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_NV:
+        {
+            count_VkWin32KeyedMutexAcquireReleaseInfoNV(featureBits, rootType, reinterpret_cast<const VkWin32KeyedMutexAcquireReleaseInfoNV*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_EXT_validation_flags
+        case VK_STRUCTURE_TYPE_VALIDATION_FLAGS_EXT:
+        {
+            count_VkValidationFlagsEXT(featureBits, rootType, reinterpret_cast<const VkValidationFlagsEXT*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_EXT_texture_compression_astc_hdr
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES_EXT:
+        {
+            count_VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_EXT_astc_decode_mode
+        case VK_STRUCTURE_TYPE_IMAGE_VIEW_ASTC_DECODE_MODE_EXT:
+        {
+            count_VkImageViewASTCDecodeModeEXT(featureBits, rootType, reinterpret_cast<const VkImageViewASTCDecodeModeEXT*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ASTC_DECODE_FEATURES_EXT:
+        {
+            count_VkPhysicalDeviceASTCDecodeFeaturesEXT(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceASTCDecodeFeaturesEXT*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_EXT_conditional_rendering
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT:
+        {
+            count_VkPhysicalDeviceConditionalRenderingFeaturesEXT(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceConditionalRenderingFeaturesEXT*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_CONDITIONAL_RENDERING_INFO_EXT:
+        {
+            count_VkCommandBufferInheritanceConditionalRenderingInfoEXT(featureBits, rootType, reinterpret_cast<const VkCommandBufferInheritanceConditionalRenderingInfoEXT*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_NV_clip_space_w_scaling
+        case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_W_SCALING_STATE_CREATE_INFO_NV:
+        {
+            count_VkPipelineViewportWScalingStateCreateInfoNV(featureBits, rootType, reinterpret_cast<const VkPipelineViewportWScalingStateCreateInfoNV*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_EXT_display_control
+        case VK_STRUCTURE_TYPE_SWAPCHAIN_COUNTER_CREATE_INFO_EXT:
+        {
+            count_VkSwapchainCounterCreateInfoEXT(featureBits, rootType, reinterpret_cast<const VkSwapchainCounterCreateInfoEXT*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_GOOGLE_display_timing
+        case VK_STRUCTURE_TYPE_PRESENT_TIMES_INFO_GOOGLE:
+        {
+            count_VkPresentTimesInfoGOOGLE(featureBits, rootType, reinterpret_cast<const VkPresentTimesInfoGOOGLE*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_NVX_multiview_per_view_attributes
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PER_VIEW_ATTRIBUTES_PROPERTIES_NVX:
+        {
+            count_VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_NV_viewport_swizzle
+        case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SWIZZLE_STATE_CREATE_INFO_NV:
+        {
+            count_VkPipelineViewportSwizzleStateCreateInfoNV(featureBits, rootType, reinterpret_cast<const VkPipelineViewportSwizzleStateCreateInfoNV*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_EXT_discard_rectangles
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DISCARD_RECTANGLE_PROPERTIES_EXT:
+        {
+            count_VkPhysicalDeviceDiscardRectanglePropertiesEXT(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceDiscardRectanglePropertiesEXT*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT:
+        {
+            count_VkPipelineDiscardRectangleStateCreateInfoEXT(featureBits, rootType, reinterpret_cast<const VkPipelineDiscardRectangleStateCreateInfoEXT*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_EXT_conservative_rasterization
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONSERVATIVE_RASTERIZATION_PROPERTIES_EXT:
+        {
+            count_VkPhysicalDeviceConservativeRasterizationPropertiesEXT(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceConservativeRasterizationPropertiesEXT*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_CONSERVATIVE_STATE_CREATE_INFO_EXT:
+        {
+            count_VkPipelineRasterizationConservativeStateCreateInfoEXT(featureBits, rootType, reinterpret_cast<const VkPipelineRasterizationConservativeStateCreateInfoEXT*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_EXT_depth_clip_enable
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_ENABLE_FEATURES_EXT:
+        {
+            count_VkPhysicalDeviceDepthClipEnableFeaturesEXT(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceDepthClipEnableFeaturesEXT*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT:
+        {
+            count_VkPipelineRasterizationDepthClipStateCreateInfoEXT(featureBits, rootType, reinterpret_cast<const VkPipelineRasterizationDepthClipStateCreateInfoEXT*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_EXT_debug_utils
+        case VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT:
+        {
+            count_VkDebugUtilsMessengerCreateInfoEXT(featureBits, rootType, reinterpret_cast<const VkDebugUtilsMessengerCreateInfoEXT*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_ANDROID_external_memory_android_hardware_buffer
+        case VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_USAGE_ANDROID:
+        {
+            count_VkAndroidHardwareBufferUsageANDROID(featureBits, rootType, reinterpret_cast<const VkAndroidHardwareBufferUsageANDROID*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID:
+        {
+            count_VkAndroidHardwareBufferFormatPropertiesANDROID(featureBits, rootType, reinterpret_cast<const VkAndroidHardwareBufferFormatPropertiesANDROID*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID:
+        {
+            count_VkImportAndroidHardwareBufferInfoANDROID(featureBits, rootType, reinterpret_cast<const VkImportAndroidHardwareBufferInfoANDROID*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID:
+        {
+            count_VkExternalFormatANDROID(featureBits, rootType, reinterpret_cast<const VkExternalFormatANDROID*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_EXT_inline_uniform_block
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES_EXT:
+        {
+            count_VkPhysicalDeviceInlineUniformBlockFeaturesEXT(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceInlineUniformBlockFeaturesEXT*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES_EXT:
+        {
+            count_VkPhysicalDeviceInlineUniformBlockPropertiesEXT(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceInlineUniformBlockPropertiesEXT*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT:
+        {
+            count_VkWriteDescriptorSetInlineUniformBlockEXT(featureBits, rootType, reinterpret_cast<const VkWriteDescriptorSetInlineUniformBlockEXT*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO_EXT:
+        {
+            count_VkDescriptorPoolInlineUniformBlockCreateInfoEXT(featureBits, rootType, reinterpret_cast<const VkDescriptorPoolInlineUniformBlockCreateInfoEXT*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_EXT_sample_locations
+        case VK_STRUCTURE_TYPE_SAMPLE_LOCATIONS_INFO_EXT:
+        {
+            count_VkSampleLocationsInfoEXT(featureBits, rootType, reinterpret_cast<const VkSampleLocationsInfoEXT*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_RENDER_PASS_SAMPLE_LOCATIONS_BEGIN_INFO_EXT:
+        {
+            count_VkRenderPassSampleLocationsBeginInfoEXT(featureBits, rootType, reinterpret_cast<const VkRenderPassSampleLocationsBeginInfoEXT*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT:
+        {
+            count_VkPipelineSampleLocationsStateCreateInfoEXT(featureBits, rootType, reinterpret_cast<const VkPipelineSampleLocationsStateCreateInfoEXT*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT:
+        {
+            count_VkPhysicalDeviceSampleLocationsPropertiesEXT(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceSampleLocationsPropertiesEXT*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_EXT_blend_operation_advanced
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT:
+        {
+            count_VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_PROPERTIES_EXT:
+        {
+            count_VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_ADVANCED_STATE_CREATE_INFO_EXT:
+        {
+            count_VkPipelineColorBlendAdvancedStateCreateInfoEXT(featureBits, rootType, reinterpret_cast<const VkPipelineColorBlendAdvancedStateCreateInfoEXT*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_NV_fragment_coverage_to_color
+        case VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_TO_COLOR_STATE_CREATE_INFO_NV:
+        {
+            count_VkPipelineCoverageToColorStateCreateInfoNV(featureBits, rootType, reinterpret_cast<const VkPipelineCoverageToColorStateCreateInfoNV*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_NV_framebuffer_mixed_samples
+        case VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_MODULATION_STATE_CREATE_INFO_NV:
+        {
+            count_VkPipelineCoverageModulationStateCreateInfoNV(featureBits, rootType, reinterpret_cast<const VkPipelineCoverageModulationStateCreateInfoNV*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_NV_shader_sm_builtins
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_PROPERTIES_NV:
+        {
+            count_VkPhysicalDeviceShaderSMBuiltinsPropertiesNV(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceShaderSMBuiltinsPropertiesNV*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_FEATURES_NV:
+        {
+            count_VkPhysicalDeviceShaderSMBuiltinsFeaturesNV(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceShaderSMBuiltinsFeaturesNV*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_EXT_image_drm_format_modifier
+        case VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT:
+        {
+            count_VkDrmFormatModifierPropertiesListEXT(featureBits, rootType, reinterpret_cast<const VkDrmFormatModifierPropertiesListEXT*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT:
+        {
+            count_VkPhysicalDeviceImageDrmFormatModifierInfoEXT(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceImageDrmFormatModifierInfoEXT*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT:
+        {
+            count_VkImageDrmFormatModifierListCreateInfoEXT(featureBits, rootType, reinterpret_cast<const VkImageDrmFormatModifierListCreateInfoEXT*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT:
+        {
+            count_VkImageDrmFormatModifierExplicitCreateInfoEXT(featureBits, rootType, reinterpret_cast<const VkImageDrmFormatModifierExplicitCreateInfoEXT*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_EXT_validation_cache
+        case VK_STRUCTURE_TYPE_SHADER_MODULE_VALIDATION_CACHE_CREATE_INFO_EXT:
+        {
+            count_VkShaderModuleValidationCacheCreateInfoEXT(featureBits, rootType, reinterpret_cast<const VkShaderModuleValidationCacheCreateInfoEXT*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_NV_shading_rate_image
+        case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SHADING_RATE_IMAGE_STATE_CREATE_INFO_NV:
+        {
+            count_VkPipelineViewportShadingRateImageStateCreateInfoNV(featureBits, rootType, reinterpret_cast<const VkPipelineViewportShadingRateImageStateCreateInfoNV*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_FEATURES_NV:
+        {
+            count_VkPhysicalDeviceShadingRateImageFeaturesNV(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceShadingRateImageFeaturesNV*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_PROPERTIES_NV:
+        {
+            count_VkPhysicalDeviceShadingRateImagePropertiesNV(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceShadingRateImagePropertiesNV*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_COARSE_SAMPLE_ORDER_STATE_CREATE_INFO_NV:
+        {
+            count_VkPipelineViewportCoarseSampleOrderStateCreateInfoNV(featureBits, rootType, reinterpret_cast<const VkPipelineViewportCoarseSampleOrderStateCreateInfoNV*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_NV_ray_tracing
+        case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_NV:
+        {
+            count_VkWriteDescriptorSetAccelerationStructureNV(featureBits, rootType, reinterpret_cast<const VkWriteDescriptorSetAccelerationStructureNV*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_NV:
+        {
+            count_VkPhysicalDeviceRayTracingPropertiesNV(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceRayTracingPropertiesNV*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_NV_representative_fragment_test
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_REPRESENTATIVE_FRAGMENT_TEST_FEATURES_NV:
+        {
+            count_VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_REPRESENTATIVE_FRAGMENT_TEST_STATE_CREATE_INFO_NV:
+        {
+            count_VkPipelineRepresentativeFragmentTestStateCreateInfoNV(featureBits, rootType, reinterpret_cast<const VkPipelineRepresentativeFragmentTestStateCreateInfoNV*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_EXT_filter_cubic
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_VIEW_IMAGE_FORMAT_INFO_EXT:
+        {
+            count_VkPhysicalDeviceImageViewImageFormatInfoEXT(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceImageViewImageFormatInfoEXT*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_FILTER_CUBIC_IMAGE_VIEW_IMAGE_FORMAT_PROPERTIES_EXT:
+        {
+            count_VkFilterCubicImageViewImageFormatPropertiesEXT(featureBits, rootType, reinterpret_cast<const VkFilterCubicImageViewImageFormatPropertiesEXT*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_EXT_global_priority
+        case VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT:
+        {
+            count_VkDeviceQueueGlobalPriorityCreateInfoEXT(featureBits, rootType, reinterpret_cast<const VkDeviceQueueGlobalPriorityCreateInfoEXT*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_EXT_external_memory_host
+        case VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT:
+        {
+            count_VkImportMemoryHostPointerInfoEXT(featureBits, rootType, reinterpret_cast<const VkImportMemoryHostPointerInfoEXT*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT:
+        {
+            count_VkPhysicalDeviceExternalMemoryHostPropertiesEXT(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceExternalMemoryHostPropertiesEXT*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_AMD_pipeline_compiler_control
+        case VK_STRUCTURE_TYPE_PIPELINE_COMPILER_CONTROL_CREATE_INFO_AMD:
+        {
+            count_VkPipelineCompilerControlCreateInfoAMD(featureBits, rootType, reinterpret_cast<const VkPipelineCompilerControlCreateInfoAMD*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_AMD_shader_core_properties
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_AMD:
+        {
+            count_VkPhysicalDeviceShaderCorePropertiesAMD(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceShaderCorePropertiesAMD*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_AMD_memory_overallocation_behavior
+        case VK_STRUCTURE_TYPE_DEVICE_MEMORY_OVERALLOCATION_CREATE_INFO_AMD:
+        {
+            count_VkDeviceMemoryOverallocationCreateInfoAMD(featureBits, rootType, reinterpret_cast<const VkDeviceMemoryOverallocationCreateInfoAMD*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_EXT_vertex_attribute_divisor
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT:
+        {
+            count_VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT:
+        {
+            count_VkPipelineVertexInputDivisorStateCreateInfoEXT(featureBits, rootType, reinterpret_cast<const VkPipelineVertexInputDivisorStateCreateInfoEXT*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT:
+        {
+            count_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_GGP_frame_token
+        case VK_STRUCTURE_TYPE_PRESENT_FRAME_TOKEN_GGP:
+        {
+            count_VkPresentFrameTokenGGP(featureBits, rootType, reinterpret_cast<const VkPresentFrameTokenGGP*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_EXT_pipeline_creation_feedback
+        case VK_STRUCTURE_TYPE_PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT:
+        {
+            count_VkPipelineCreationFeedbackCreateInfoEXT(featureBits, rootType, reinterpret_cast<const VkPipelineCreationFeedbackCreateInfoEXT*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_NV_compute_shader_derivatives
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_FEATURES_NV:
+        {
+            count_VkPhysicalDeviceComputeShaderDerivativesFeaturesNV(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceComputeShaderDerivativesFeaturesNV*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_NV_mesh_shader
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_FEATURES_NV:
+        {
+            count_VkPhysicalDeviceMeshShaderFeaturesNV(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceMeshShaderFeaturesNV*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_PROPERTIES_NV:
+        {
+            count_VkPhysicalDeviceMeshShaderPropertiesNV(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceMeshShaderPropertiesNV*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_NV_fragment_shader_barycentric
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_NV:
+        {
+            count_VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_NV_shader_image_footprint
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_FOOTPRINT_FEATURES_NV:
+        {
+            count_VkPhysicalDeviceShaderImageFootprintFeaturesNV(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceShaderImageFootprintFeaturesNV*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_NV_scissor_exclusive
+        case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_EXCLUSIVE_SCISSOR_STATE_CREATE_INFO_NV:
+        {
+            count_VkPipelineViewportExclusiveScissorStateCreateInfoNV(featureBits, rootType, reinterpret_cast<const VkPipelineViewportExclusiveScissorStateCreateInfoNV*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXCLUSIVE_SCISSOR_FEATURES_NV:
+        {
+            count_VkPhysicalDeviceExclusiveScissorFeaturesNV(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceExclusiveScissorFeaturesNV*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_NV_device_diagnostic_checkpoints
+        case VK_STRUCTURE_TYPE_QUEUE_FAMILY_CHECKPOINT_PROPERTIES_NV:
+        {
+            count_VkQueueFamilyCheckpointPropertiesNV(featureBits, rootType, reinterpret_cast<const VkQueueFamilyCheckpointPropertiesNV*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_INTEL_shader_integer_functions2
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_FUNCTIONS_2_FEATURES_INTEL:
+        {
+            count_VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_INTEL_performance_query
+        case VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_QUERY_CREATE_INFO_INTEL:
+        {
+            count_VkQueryPoolPerformanceQueryCreateInfoINTEL(featureBits, rootType, reinterpret_cast<const VkQueryPoolPerformanceQueryCreateInfoINTEL*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_EXT_pci_bus_info
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT:
+        {
+            count_VkPhysicalDevicePCIBusInfoPropertiesEXT(featureBits, rootType, reinterpret_cast<const VkPhysicalDevicePCIBusInfoPropertiesEXT*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_AMD_display_native_hdr
+        case VK_STRUCTURE_TYPE_DISPLAY_NATIVE_HDR_SURFACE_CAPABILITIES_AMD:
+        {
+            count_VkDisplayNativeHdrSurfaceCapabilitiesAMD(featureBits, rootType, reinterpret_cast<const VkDisplayNativeHdrSurfaceCapabilitiesAMD*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_SWAPCHAIN_DISPLAY_NATIVE_HDR_CREATE_INFO_AMD:
+        {
+            count_VkSwapchainDisplayNativeHdrCreateInfoAMD(featureBits, rootType, reinterpret_cast<const VkSwapchainDisplayNativeHdrCreateInfoAMD*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_EXT_fragment_density_map
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_FEATURES_EXT:
+        {
+            switch(rootType)
+            {
+                case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2:
+                {
+                    count_VkPhysicalDeviceFragmentDensityMapFeaturesEXT(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceFragmentDensityMapFeaturesEXT*>(structExtension), count);
+                    break;
+                }
+                case VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO:
+                {
+                    count_VkPhysicalDeviceFragmentDensityMapFeaturesEXT(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceFragmentDensityMapFeaturesEXT*>(structExtension), count);
+                    break;
+                }
+                case VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO:
+                {
+                    count_VkImportColorBufferGOOGLE(featureBits, rootType, reinterpret_cast<const VkImportColorBufferGOOGLE*>(structExtension), count);
+                    break;
+                }
+                default:
+                {
+                    count_VkPhysicalDeviceFragmentDensityMapFeaturesEXT(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceFragmentDensityMapFeaturesEXT*>(structExtension), count);
+                    break;
+                }
+            }
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_PROPERTIES_EXT:
+        {
+            switch(rootType)
+            {
+                case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2:
+                {
+                    count_VkPhysicalDeviceFragmentDensityMapPropertiesEXT(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceFragmentDensityMapPropertiesEXT*>(structExtension), count);
+                    break;
+                }
+                case VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO:
+                {
+                    count_VkImportPhysicalAddressGOOGLE(featureBits, rootType, reinterpret_cast<const VkImportPhysicalAddressGOOGLE*>(structExtension), count);
+                    break;
+                }
+                default:
+                {
+                    count_VkPhysicalDeviceFragmentDensityMapPropertiesEXT(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceFragmentDensityMapPropertiesEXT*>(structExtension), count);
+                    break;
+                }
+            }
+            break;
+        }
+        case VK_STRUCTURE_TYPE_RENDER_PASS_FRAGMENT_DENSITY_MAP_CREATE_INFO_EXT:
+        {
+            switch(rootType)
+            {
+                case VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO:
+                {
+                    count_VkRenderPassFragmentDensityMapCreateInfoEXT(featureBits, rootType, reinterpret_cast<const VkRenderPassFragmentDensityMapCreateInfoEXT*>(structExtension), count);
+                    break;
+                }
+                case VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2:
+                {
+                    count_VkRenderPassFragmentDensityMapCreateInfoEXT(featureBits, rootType, reinterpret_cast<const VkRenderPassFragmentDensityMapCreateInfoEXT*>(structExtension), count);
+                    break;
+                }
+                case VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO:
+                {
+                    count_VkImportBufferGOOGLE(featureBits, rootType, reinterpret_cast<const VkImportBufferGOOGLE*>(structExtension), count);
+                    break;
+                }
+                default:
+                {
+                    count_VkRenderPassFragmentDensityMapCreateInfoEXT(featureBits, rootType, reinterpret_cast<const VkRenderPassFragmentDensityMapCreateInfoEXT*>(structExtension), count);
+                    break;
+                }
+            }
+            break;
+        }
+#endif
+#ifdef VK_EXT_subgroup_size_control
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT:
+        {
+            count_VkPhysicalDeviceSubgroupSizeControlFeaturesEXT(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceSubgroupSizeControlFeaturesEXT*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT:
+        {
+            count_VkPhysicalDeviceSubgroupSizeControlPropertiesEXT(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT:
+        {
+            count_VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT(featureBits, rootType, reinterpret_cast<const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_AMD_shader_core_properties2
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_2_AMD:
+        {
+            count_VkPhysicalDeviceShaderCoreProperties2AMD(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceShaderCoreProperties2AMD*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_AMD_device_coherent_memory
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COHERENT_MEMORY_FEATURES_AMD:
+        {
+            count_VkPhysicalDeviceCoherentMemoryFeaturesAMD(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceCoherentMemoryFeaturesAMD*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_EXT_shader_image_atomic_int64
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_ATOMIC_INT64_FEATURES_EXT:
+        {
+            count_VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_EXT_memory_budget
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT:
+        {
+            count_VkPhysicalDeviceMemoryBudgetPropertiesEXT(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceMemoryBudgetPropertiesEXT*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_EXT_memory_priority
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PRIORITY_FEATURES_EXT:
+        {
+            count_VkPhysicalDeviceMemoryPriorityFeaturesEXT(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceMemoryPriorityFeaturesEXT*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT:
+        {
+            count_VkMemoryPriorityAllocateInfoEXT(featureBits, rootType, reinterpret_cast<const VkMemoryPriorityAllocateInfoEXT*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_NV_dedicated_allocation_image_aliasing
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEDICATED_ALLOCATION_IMAGE_ALIASING_FEATURES_NV:
+        {
+            count_VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_EXT_buffer_device_address
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_EXT:
+        {
+            count_VkPhysicalDeviceBufferDeviceAddressFeaturesEXT(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceBufferDeviceAddressFeaturesEXT*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_CREATE_INFO_EXT:
+        {
+            count_VkBufferDeviceAddressCreateInfoEXT(featureBits, rootType, reinterpret_cast<const VkBufferDeviceAddressCreateInfoEXT*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_EXT_validation_features
+        case VK_STRUCTURE_TYPE_VALIDATION_FEATURES_EXT:
+        {
+            count_VkValidationFeaturesEXT(featureBits, rootType, reinterpret_cast<const VkValidationFeaturesEXT*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_NV_cooperative_matrix
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_FEATURES_NV:
+        {
+            count_VkPhysicalDeviceCooperativeMatrixFeaturesNV(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceCooperativeMatrixFeaturesNV*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_PROPERTIES_NV:
+        {
+            count_VkPhysicalDeviceCooperativeMatrixPropertiesNV(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceCooperativeMatrixPropertiesNV*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_NV_coverage_reduction_mode
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COVERAGE_REDUCTION_MODE_FEATURES_NV:
+        {
+            count_VkPhysicalDeviceCoverageReductionModeFeaturesNV(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceCoverageReductionModeFeaturesNV*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_REDUCTION_STATE_CREATE_INFO_NV:
+        {
+            count_VkPipelineCoverageReductionStateCreateInfoNV(featureBits, rootType, reinterpret_cast<const VkPipelineCoverageReductionStateCreateInfoNV*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_EXT_fragment_shader_interlock
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_INTERLOCK_FEATURES_EXT:
+        {
+            count_VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_EXT_ycbcr_image_arrays
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_IMAGE_ARRAYS_FEATURES_EXT:
+        {
+            count_VkPhysicalDeviceYcbcrImageArraysFeaturesEXT(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceYcbcrImageArraysFeaturesEXT*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_EXT_full_screen_exclusive
+        case VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_INFO_EXT:
+        {
+            count_VkSurfaceFullScreenExclusiveInfoEXT(featureBits, rootType, reinterpret_cast<const VkSurfaceFullScreenExclusiveInfoEXT*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_FULL_SCREEN_EXCLUSIVE_EXT:
+        {
+            count_VkSurfaceCapabilitiesFullScreenExclusiveEXT(featureBits, rootType, reinterpret_cast<const VkSurfaceCapabilitiesFullScreenExclusiveEXT*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_WIN32_INFO_EXT:
+        {
+            count_VkSurfaceFullScreenExclusiveWin32InfoEXT(featureBits, rootType, reinterpret_cast<const VkSurfaceFullScreenExclusiveWin32InfoEXT*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_EXT_line_rasterization
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT:
+        {
+            count_VkPhysicalDeviceLineRasterizationFeaturesEXT(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceLineRasterizationFeaturesEXT*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_EXT:
+        {
+            count_VkPhysicalDeviceLineRasterizationPropertiesEXT(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceLineRasterizationPropertiesEXT*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT:
+        {
+            count_VkPipelineRasterizationLineStateCreateInfoEXT(featureBits, rootType, reinterpret_cast<const VkPipelineRasterizationLineStateCreateInfoEXT*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_EXT_shader_atomic_float
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_FLOAT_FEATURES_EXT:
+        {
+            count_VkPhysicalDeviceShaderAtomicFloatFeaturesEXT(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceShaderAtomicFloatFeaturesEXT*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_EXT_index_type_uint8
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT:
+        {
+            count_VkPhysicalDeviceIndexTypeUint8FeaturesEXT(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceIndexTypeUint8FeaturesEXT*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_EXT_extended_dynamic_state
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_FEATURES_EXT:
+        {
+            count_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceExtendedDynamicStateFeaturesEXT*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_EXT_shader_demote_to_helper_invocation
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES_EXT:
+        {
+            count_VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_NV_device_generated_commands
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_PROPERTIES_NV:
+        {
+            count_VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_FEATURES_NV:
+        {
+            count_VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_SHADER_GROUPS_CREATE_INFO_NV:
+        {
+            count_VkGraphicsPipelineShaderGroupsCreateInfoNV(featureBits, rootType, reinterpret_cast<const VkGraphicsPipelineShaderGroupsCreateInfoNV*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_EXT_texel_buffer_alignment
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_FEATURES_EXT:
+        {
+            count_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES_EXT:
+        {
+            count_VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_QCOM_render_pass_transform
+        case VK_STRUCTURE_TYPE_RENDER_PASS_TRANSFORM_BEGIN_INFO_QCOM:
+        {
+            count_VkRenderPassTransformBeginInfoQCOM(featureBits, rootType, reinterpret_cast<const VkRenderPassTransformBeginInfoQCOM*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_RENDER_PASS_TRANSFORM_INFO_QCOM:
+        {
+            count_VkCommandBufferInheritanceRenderPassTransformInfoQCOM(featureBits, rootType, reinterpret_cast<const VkCommandBufferInheritanceRenderPassTransformInfoQCOM*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_EXT_device_memory_report
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_MEMORY_REPORT_FEATURES_EXT:
+        {
+            count_VkPhysicalDeviceDeviceMemoryReportFeaturesEXT(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceDeviceMemoryReportFeaturesEXT*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DEVICE_DEVICE_MEMORY_REPORT_CREATE_INFO_EXT:
+        {
+            count_VkDeviceDeviceMemoryReportCreateInfoEXT(featureBits, rootType, reinterpret_cast<const VkDeviceDeviceMemoryReportCreateInfoEXT*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_EXT_robustness2
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT:
+        {
+            count_VkPhysicalDeviceRobustness2FeaturesEXT(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceRobustness2FeaturesEXT*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_PROPERTIES_EXT:
+        {
+            count_VkPhysicalDeviceRobustness2PropertiesEXT(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceRobustness2PropertiesEXT*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_EXT_custom_border_color
+        case VK_STRUCTURE_TYPE_SAMPLER_CUSTOM_BORDER_COLOR_CREATE_INFO_EXT:
+        {
+            count_VkSamplerCustomBorderColorCreateInfoEXT(featureBits, rootType, reinterpret_cast<const VkSamplerCustomBorderColorCreateInfoEXT*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_PROPERTIES_EXT:
+        {
+            count_VkPhysicalDeviceCustomBorderColorPropertiesEXT(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceCustomBorderColorPropertiesEXT*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_FEATURES_EXT:
+        {
+            count_VkPhysicalDeviceCustomBorderColorFeaturesEXT(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceCustomBorderColorFeaturesEXT*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_EXT_private_data
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES_EXT:
+        {
+            count_VkPhysicalDevicePrivateDataFeaturesEXT(featureBits, rootType, reinterpret_cast<const VkPhysicalDevicePrivateDataFeaturesEXT*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DEVICE_PRIVATE_DATA_CREATE_INFO_EXT:
+        {
+            count_VkDevicePrivateDataCreateInfoEXT(featureBits, rootType, reinterpret_cast<const VkDevicePrivateDataCreateInfoEXT*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_EXT_pipeline_creation_cache_control
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES_EXT:
+        {
+            count_VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT(featureBits, rootType, reinterpret_cast<const VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_NV_device_diagnostics_config
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DIAGNOSTICS_CONFIG_FEATURES_NV:
+        {
+            count_VkPhysicalDeviceDiagnosticsConfigFeaturesNV(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceDiagnosticsConfigFeaturesNV*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DEVICE_DIAGNOSTICS_CONFIG_CREATE_INFO_NV:
+        {
+            count_VkDeviceDiagnosticsConfigCreateInfoNV(featureBits, rootType, reinterpret_cast<const VkDeviceDiagnosticsConfigCreateInfoNV*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_NV_fragment_shading_rate_enums
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_FEATURES_NV:
+        {
+            count_VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_PROPERTIES_NV:
+        {
+            count_VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_ENUM_STATE_CREATE_INFO_NV:
+        {
+            count_VkPipelineFragmentShadingRateEnumStateCreateInfoNV(featureBits, rootType, reinterpret_cast<const VkPipelineFragmentShadingRateEnumStateCreateInfoNV*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_EXT_fragment_density_map2
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_2_FEATURES_EXT:
+        {
+            count_VkPhysicalDeviceFragmentDensityMap2FeaturesEXT(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceFragmentDensityMap2FeaturesEXT*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_2_PROPERTIES_EXT:
+        {
+            count_VkPhysicalDeviceFragmentDensityMap2PropertiesEXT(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceFragmentDensityMap2PropertiesEXT*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_QCOM_rotated_copy_commands
+        case VK_STRUCTURE_TYPE_COPY_COMMAND_TRANSFORM_INFO_QCOM:
+        {
+            count_VkCopyCommandTransformInfoQCOM(featureBits, rootType, reinterpret_cast<const VkCopyCommandTransformInfoQCOM*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_EXT_image_robustness
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES_EXT:
+        {
+            count_VkPhysicalDeviceImageRobustnessFeaturesEXT(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceImageRobustnessFeaturesEXT*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_EXT_4444_formats
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT:
+        {
+            count_VkPhysicalDevice4444FormatsFeaturesEXT(featureBits, rootType, reinterpret_cast<const VkPhysicalDevice4444FormatsFeaturesEXT*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_GOOGLE_gfxstream
+        case VK_STRUCTURE_TYPE_IMPORT_COLOR_BUFFER_GOOGLE:
+        {
+            count_VkImportColorBufferGOOGLE(featureBits, rootType, reinterpret_cast<const VkImportColorBufferGOOGLE*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_IMPORT_BUFFER_GOOGLE:
+        {
+            count_VkImportBufferGOOGLE(featureBits, rootType, reinterpret_cast<const VkImportBufferGOOGLE*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_IMPORT_PHYSICAL_ADDRESS_GOOGLE:
+        {
+            count_VkImportPhysicalAddressGOOGLE(featureBits, rootType, reinterpret_cast<const VkImportPhysicalAddressGOOGLE*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_KHR_acceleration_structure
+        case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR:
+        {
+            count_VkWriteDescriptorSetAccelerationStructureKHR(featureBits, rootType, reinterpret_cast<const VkWriteDescriptorSetAccelerationStructureKHR*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_FEATURES_KHR:
+        {
+            count_VkPhysicalDeviceAccelerationStructureFeaturesKHR(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceAccelerationStructureFeaturesKHR*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_PROPERTIES_KHR:
+        {
+            count_VkPhysicalDeviceAccelerationStructurePropertiesKHR(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceAccelerationStructurePropertiesKHR*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_KHR_ray_tracing_pipeline
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_FEATURES_KHR:
+        {
+            count_VkPhysicalDeviceRayTracingPipelineFeaturesKHR(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceRayTracingPipelineFeaturesKHR*>(structExtension), count);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_PROPERTIES_KHR:
+        {
+            count_VkPhysicalDeviceRayTracingPipelinePropertiesKHR(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceRayTracingPipelinePropertiesKHR*>(structExtension), count);
+            break;
+        }
+#endif
+#ifdef VK_KHR_ray_query
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_QUERY_FEATURES_KHR:
+        {
+            count_VkPhysicalDeviceRayQueryFeaturesKHR(featureBits, rootType, reinterpret_cast<const VkPhysicalDeviceRayQueryFeaturesKHR*>(structExtension), count);
+            break;
+        }
+#endif
+        default:
+        {
+            // fatal; the switch is only taken if the extension struct is known
+            abort();
+        }
+    }
+}
+
+
+} // namespace goldfish_vk
diff --git a/system/vulkan_enc/goldfish_vk_counting_guest.h b/system/vulkan_enc/goldfish_vk_counting_guest.h
new file mode 100644
index 0000000..6f4d3a7
--- /dev/null
+++ b/system/vulkan_enc/goldfish_vk_counting_guest.h
@@ -0,0 +1,4245 @@
+// Copyright (C) 2018 The Android Open Source Project
+// Copyright (C) 2018 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Autogenerated module goldfish_vk_counting_guest
+// (header) generated by android/android-emugl/host/libs/libOpenglRender/vulkan-registry/xml/genvk.py -registry android/android-emugl/host/libs/libOpenglRender/vulkan-registry/xml/vk.xml cereal -o android/android-emugl/host/libs/libOpenglRender/vulkan/cereal
+// Please do not modify directly;
+// re-run android/scripts/generate-vulkan-sources.sh,
+// or directly from Python by defining:
+// VULKAN_REGISTRY_XML_DIR : Directory containing genvk.py and vk.xml
+// CEREAL_OUTPUT_DIR: Where to put the generated sources.
+// python3 $VULKAN_REGISTRY_XML_DIR/genvk.py -registry $VULKAN_REGISTRY_XML_DIR/vk.xml cereal -o $CEREAL_OUTPUT_DIR
+
+#pragma once
+
+#include <vulkan/vulkan.h>
+
+
+#include "vk_platform_compat.h"
+#include "goldfish_vk_private_defs.h"
+
+
+namespace goldfish_vk {
+
+#ifdef VK_VERSION_1_0
+void count_VkExtent2D(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkExtent2D* toCount,
+    size_t* count);
+
+void count_VkExtent3D(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkExtent3D* toCount,
+    size_t* count);
+
+void count_VkOffset2D(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkOffset2D* toCount,
+    size_t* count);
+
+void count_VkOffset3D(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkOffset3D* toCount,
+    size_t* count);
+
+void count_VkRect2D(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkRect2D* toCount,
+    size_t* count);
+
+void count_VkBaseInStructure(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkBaseInStructure* toCount,
+    size_t* count);
+
+void count_VkBaseOutStructure(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkBaseOutStructure* toCount,
+    size_t* count);
+
+void count_VkBufferMemoryBarrier(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkBufferMemoryBarrier* toCount,
+    size_t* count);
+
+void count_VkDispatchIndirectCommand(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDispatchIndirectCommand* toCount,
+    size_t* count);
+
+void count_VkDrawIndexedIndirectCommand(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDrawIndexedIndirectCommand* toCount,
+    size_t* count);
+
+void count_VkDrawIndirectCommand(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDrawIndirectCommand* toCount,
+    size_t* count);
+
+void count_VkImageSubresourceRange(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImageSubresourceRange* toCount,
+    size_t* count);
+
+void count_VkImageMemoryBarrier(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImageMemoryBarrier* toCount,
+    size_t* count);
+
+void count_VkMemoryBarrier(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkMemoryBarrier* toCount,
+    size_t* count);
+
+void count_VkAllocationCallbacks(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAllocationCallbacks* toCount,
+    size_t* count);
+
+void count_VkApplicationInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkApplicationInfo* toCount,
+    size_t* count);
+
+void count_VkFormatProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkFormatProperties* toCount,
+    size_t* count);
+
+void count_VkImageFormatProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImageFormatProperties* toCount,
+    size_t* count);
+
+void count_VkInstanceCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkInstanceCreateInfo* toCount,
+    size_t* count);
+
+void count_VkMemoryHeap(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkMemoryHeap* toCount,
+    size_t* count);
+
+void count_VkMemoryType(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkMemoryType* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceFeatures(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFeatures* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceLimits(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceLimits* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceMemoryProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMemoryProperties* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceSparseProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSparseProperties* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceProperties* toCount,
+    size_t* count);
+
+void count_VkQueueFamilyProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkQueueFamilyProperties* toCount,
+    size_t* count);
+
+void count_VkDeviceQueueCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDeviceQueueCreateInfo* toCount,
+    size_t* count);
+
+void count_VkDeviceCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDeviceCreateInfo* toCount,
+    size_t* count);
+
+void count_VkExtensionProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkExtensionProperties* toCount,
+    size_t* count);
+
+void count_VkLayerProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkLayerProperties* toCount,
+    size_t* count);
+
+void count_VkSubmitInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSubmitInfo* toCount,
+    size_t* count);
+
+void count_VkMappedMemoryRange(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkMappedMemoryRange* toCount,
+    size_t* count);
+
+void count_VkMemoryAllocateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkMemoryAllocateInfo* toCount,
+    size_t* count);
+
+void count_VkMemoryRequirements(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkMemoryRequirements* toCount,
+    size_t* count);
+
+void count_VkSparseMemoryBind(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSparseMemoryBind* toCount,
+    size_t* count);
+
+void count_VkSparseBufferMemoryBindInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSparseBufferMemoryBindInfo* toCount,
+    size_t* count);
+
+void count_VkSparseImageOpaqueMemoryBindInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSparseImageOpaqueMemoryBindInfo* toCount,
+    size_t* count);
+
+void count_VkImageSubresource(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImageSubresource* toCount,
+    size_t* count);
+
+void count_VkSparseImageMemoryBind(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSparseImageMemoryBind* toCount,
+    size_t* count);
+
+void count_VkSparseImageMemoryBindInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSparseImageMemoryBindInfo* toCount,
+    size_t* count);
+
+void count_VkBindSparseInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkBindSparseInfo* toCount,
+    size_t* count);
+
+void count_VkSparseImageFormatProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSparseImageFormatProperties* toCount,
+    size_t* count);
+
+void count_VkSparseImageMemoryRequirements(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSparseImageMemoryRequirements* toCount,
+    size_t* count);
+
+void count_VkFenceCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkFenceCreateInfo* toCount,
+    size_t* count);
+
+void count_VkSemaphoreCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSemaphoreCreateInfo* toCount,
+    size_t* count);
+
+void count_VkEventCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkEventCreateInfo* toCount,
+    size_t* count);
+
+void count_VkQueryPoolCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkQueryPoolCreateInfo* toCount,
+    size_t* count);
+
+void count_VkBufferCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkBufferCreateInfo* toCount,
+    size_t* count);
+
+void count_VkBufferViewCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkBufferViewCreateInfo* toCount,
+    size_t* count);
+
+void count_VkImageCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImageCreateInfo* toCount,
+    size_t* count);
+
+void count_VkSubresourceLayout(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSubresourceLayout* toCount,
+    size_t* count);
+
+void count_VkComponentMapping(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkComponentMapping* toCount,
+    size_t* count);
+
+void count_VkImageViewCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImageViewCreateInfo* toCount,
+    size_t* count);
+
+void count_VkShaderModuleCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkShaderModuleCreateInfo* toCount,
+    size_t* count);
+
+void count_VkPipelineCacheCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineCacheCreateInfo* toCount,
+    size_t* count);
+
+void count_VkSpecializationMapEntry(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSpecializationMapEntry* toCount,
+    size_t* count);
+
+void count_VkSpecializationInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSpecializationInfo* toCount,
+    size_t* count);
+
+void count_VkPipelineShaderStageCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineShaderStageCreateInfo* toCount,
+    size_t* count);
+
+void count_VkComputePipelineCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkComputePipelineCreateInfo* toCount,
+    size_t* count);
+
+void count_VkVertexInputBindingDescription(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkVertexInputBindingDescription* toCount,
+    size_t* count);
+
+void count_VkVertexInputAttributeDescription(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkVertexInputAttributeDescription* toCount,
+    size_t* count);
+
+void count_VkPipelineVertexInputStateCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineVertexInputStateCreateInfo* toCount,
+    size_t* count);
+
+void count_VkPipelineInputAssemblyStateCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineInputAssemblyStateCreateInfo* toCount,
+    size_t* count);
+
+void count_VkPipelineTessellationStateCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineTessellationStateCreateInfo* toCount,
+    size_t* count);
+
+void count_VkViewport(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkViewport* toCount,
+    size_t* count);
+
+void count_VkPipelineViewportStateCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineViewportStateCreateInfo* toCount,
+    size_t* count);
+
+void count_VkPipelineRasterizationStateCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineRasterizationStateCreateInfo* toCount,
+    size_t* count);
+
+void count_VkPipelineMultisampleStateCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineMultisampleStateCreateInfo* toCount,
+    size_t* count);
+
+void count_VkStencilOpState(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkStencilOpState* toCount,
+    size_t* count);
+
+void count_VkPipelineDepthStencilStateCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineDepthStencilStateCreateInfo* toCount,
+    size_t* count);
+
+void count_VkPipelineColorBlendAttachmentState(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineColorBlendAttachmentState* toCount,
+    size_t* count);
+
+void count_VkPipelineColorBlendStateCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineColorBlendStateCreateInfo* toCount,
+    size_t* count);
+
+void count_VkPipelineDynamicStateCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineDynamicStateCreateInfo* toCount,
+    size_t* count);
+
+void count_VkGraphicsPipelineCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkGraphicsPipelineCreateInfo* toCount,
+    size_t* count);
+
+void count_VkPushConstantRange(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPushConstantRange* toCount,
+    size_t* count);
+
+void count_VkPipelineLayoutCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineLayoutCreateInfo* toCount,
+    size_t* count);
+
+void count_VkSamplerCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSamplerCreateInfo* toCount,
+    size_t* count);
+
+void count_VkCopyDescriptorSet(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkCopyDescriptorSet* toCount,
+    size_t* count);
+
+void count_VkDescriptorBufferInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDescriptorBufferInfo* toCount,
+    size_t* count);
+
+void count_VkDescriptorImageInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDescriptorImageInfo* toCount,
+    size_t* count);
+
+void count_VkDescriptorPoolSize(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDescriptorPoolSize* toCount,
+    size_t* count);
+
+void count_VkDescriptorPoolCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDescriptorPoolCreateInfo* toCount,
+    size_t* count);
+
+void count_VkDescriptorSetAllocateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDescriptorSetAllocateInfo* toCount,
+    size_t* count);
+
+void count_VkDescriptorSetLayoutBinding(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDescriptorSetLayoutBinding* toCount,
+    size_t* count);
+
+void count_VkDescriptorSetLayoutCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDescriptorSetLayoutCreateInfo* toCount,
+    size_t* count);
+
+void count_VkWriteDescriptorSet(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkWriteDescriptorSet* toCount,
+    size_t* count);
+
+void count_VkAttachmentDescription(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAttachmentDescription* toCount,
+    size_t* count);
+
+void count_VkAttachmentReference(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAttachmentReference* toCount,
+    size_t* count);
+
+void count_VkFramebufferCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkFramebufferCreateInfo* toCount,
+    size_t* count);
+
+void count_VkSubpassDescription(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSubpassDescription* toCount,
+    size_t* count);
+
+void count_VkSubpassDependency(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSubpassDependency* toCount,
+    size_t* count);
+
+void count_VkRenderPassCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkRenderPassCreateInfo* toCount,
+    size_t* count);
+
+void count_VkCommandPoolCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkCommandPoolCreateInfo* toCount,
+    size_t* count);
+
+void count_VkCommandBufferAllocateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkCommandBufferAllocateInfo* toCount,
+    size_t* count);
+
+void count_VkCommandBufferInheritanceInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkCommandBufferInheritanceInfo* toCount,
+    size_t* count);
+
+void count_VkCommandBufferBeginInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkCommandBufferBeginInfo* toCount,
+    size_t* count);
+
+void count_VkBufferCopy(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkBufferCopy* toCount,
+    size_t* count);
+
+void count_VkImageSubresourceLayers(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImageSubresourceLayers* toCount,
+    size_t* count);
+
+void count_VkBufferImageCopy(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkBufferImageCopy* toCount,
+    size_t* count);
+
+void count_VkClearColorValue(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkClearColorValue* toCount,
+    size_t* count);
+
+void count_VkClearDepthStencilValue(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkClearDepthStencilValue* toCount,
+    size_t* count);
+
+void count_VkClearValue(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkClearValue* toCount,
+    size_t* count);
+
+void count_VkClearAttachment(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkClearAttachment* toCount,
+    size_t* count);
+
+void count_VkClearRect(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkClearRect* toCount,
+    size_t* count);
+
+void count_VkImageBlit(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImageBlit* toCount,
+    size_t* count);
+
+void count_VkImageCopy(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImageCopy* toCount,
+    size_t* count);
+
+void count_VkImageResolve(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImageResolve* toCount,
+    size_t* count);
+
+void count_VkRenderPassBeginInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkRenderPassBeginInfo* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_VERSION_1_1
+void count_VkPhysicalDeviceSubgroupProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSubgroupProperties* toCount,
+    size_t* count);
+
+void count_VkBindBufferMemoryInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkBindBufferMemoryInfo* toCount,
+    size_t* count);
+
+void count_VkBindImageMemoryInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkBindImageMemoryInfo* toCount,
+    size_t* count);
+
+void count_VkPhysicalDevice16BitStorageFeatures(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDevice16BitStorageFeatures* toCount,
+    size_t* count);
+
+void count_VkMemoryDedicatedRequirements(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkMemoryDedicatedRequirements* toCount,
+    size_t* count);
+
+void count_VkMemoryDedicatedAllocateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkMemoryDedicatedAllocateInfo* toCount,
+    size_t* count);
+
+void count_VkMemoryAllocateFlagsInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkMemoryAllocateFlagsInfo* toCount,
+    size_t* count);
+
+void count_VkDeviceGroupRenderPassBeginInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDeviceGroupRenderPassBeginInfo* toCount,
+    size_t* count);
+
+void count_VkDeviceGroupCommandBufferBeginInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDeviceGroupCommandBufferBeginInfo* toCount,
+    size_t* count);
+
+void count_VkDeviceGroupSubmitInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDeviceGroupSubmitInfo* toCount,
+    size_t* count);
+
+void count_VkDeviceGroupBindSparseInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDeviceGroupBindSparseInfo* toCount,
+    size_t* count);
+
+void count_VkBindBufferMemoryDeviceGroupInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkBindBufferMemoryDeviceGroupInfo* toCount,
+    size_t* count);
+
+void count_VkBindImageMemoryDeviceGroupInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkBindImageMemoryDeviceGroupInfo* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceGroupProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceGroupProperties* toCount,
+    size_t* count);
+
+void count_VkDeviceGroupDeviceCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDeviceGroupDeviceCreateInfo* toCount,
+    size_t* count);
+
+void count_VkBufferMemoryRequirementsInfo2(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkBufferMemoryRequirementsInfo2* toCount,
+    size_t* count);
+
+void count_VkImageMemoryRequirementsInfo2(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImageMemoryRequirementsInfo2* toCount,
+    size_t* count);
+
+void count_VkImageSparseMemoryRequirementsInfo2(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImageSparseMemoryRequirementsInfo2* toCount,
+    size_t* count);
+
+void count_VkMemoryRequirements2(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkMemoryRequirements2* toCount,
+    size_t* count);
+
+void count_VkSparseImageMemoryRequirements2(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSparseImageMemoryRequirements2* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceFeatures2(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFeatures2* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceProperties2(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceProperties2* toCount,
+    size_t* count);
+
+void count_VkFormatProperties2(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkFormatProperties2* toCount,
+    size_t* count);
+
+void count_VkImageFormatProperties2(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImageFormatProperties2* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceImageFormatInfo2(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceImageFormatInfo2* toCount,
+    size_t* count);
+
+void count_VkQueueFamilyProperties2(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkQueueFamilyProperties2* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceMemoryProperties2(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMemoryProperties2* toCount,
+    size_t* count);
+
+void count_VkSparseImageFormatProperties2(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSparseImageFormatProperties2* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceSparseImageFormatInfo2(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSparseImageFormatInfo2* toCount,
+    size_t* count);
+
+void count_VkPhysicalDevicePointClippingProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDevicePointClippingProperties* toCount,
+    size_t* count);
+
+void count_VkInputAttachmentAspectReference(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkInputAttachmentAspectReference* toCount,
+    size_t* count);
+
+void count_VkRenderPassInputAttachmentAspectCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkRenderPassInputAttachmentAspectCreateInfo* toCount,
+    size_t* count);
+
+void count_VkImageViewUsageCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImageViewUsageCreateInfo* toCount,
+    size_t* count);
+
+void count_VkPipelineTessellationDomainOriginStateCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineTessellationDomainOriginStateCreateInfo* toCount,
+    size_t* count);
+
+void count_VkRenderPassMultiviewCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkRenderPassMultiviewCreateInfo* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceMultiviewFeatures(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMultiviewFeatures* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceMultiviewProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMultiviewProperties* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceVariablePointersFeatures(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVariablePointersFeatures* toCount,
+    size_t* count);
+
+DEFINE_ALIAS_FUNCTION(count_VkPhysicalDeviceVariablePointersFeatures, count_VkPhysicalDeviceVariablePointerFeatures);
+
+void count_VkPhysicalDeviceProtectedMemoryFeatures(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceProtectedMemoryFeatures* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceProtectedMemoryProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceProtectedMemoryProperties* toCount,
+    size_t* count);
+
+void count_VkDeviceQueueInfo2(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDeviceQueueInfo2* toCount,
+    size_t* count);
+
+void count_VkProtectedSubmitInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkProtectedSubmitInfo* toCount,
+    size_t* count);
+
+void count_VkSamplerYcbcrConversionCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSamplerYcbcrConversionCreateInfo* toCount,
+    size_t* count);
+
+void count_VkSamplerYcbcrConversionInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSamplerYcbcrConversionInfo* toCount,
+    size_t* count);
+
+void count_VkBindImagePlaneMemoryInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkBindImagePlaneMemoryInfo* toCount,
+    size_t* count);
+
+void count_VkImagePlaneMemoryRequirementsInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImagePlaneMemoryRequirementsInfo* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceSamplerYcbcrConversionFeatures(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSamplerYcbcrConversionFeatures* toCount,
+    size_t* count);
+
+void count_VkSamplerYcbcrConversionImageFormatProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSamplerYcbcrConversionImageFormatProperties* toCount,
+    size_t* count);
+
+void count_VkDescriptorUpdateTemplateEntry(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDescriptorUpdateTemplateEntry* toCount,
+    size_t* count);
+
+void count_VkDescriptorUpdateTemplateCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDescriptorUpdateTemplateCreateInfo* toCount,
+    size_t* count);
+
+void count_VkExternalMemoryProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkExternalMemoryProperties* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceExternalImageFormatInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceExternalImageFormatInfo* toCount,
+    size_t* count);
+
+void count_VkExternalImageFormatProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkExternalImageFormatProperties* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceExternalBufferInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceExternalBufferInfo* toCount,
+    size_t* count);
+
+void count_VkExternalBufferProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkExternalBufferProperties* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceIDProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceIDProperties* toCount,
+    size_t* count);
+
+void count_VkExternalMemoryImageCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkExternalMemoryImageCreateInfo* toCount,
+    size_t* count);
+
+void count_VkExternalMemoryBufferCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkExternalMemoryBufferCreateInfo* toCount,
+    size_t* count);
+
+void count_VkExportMemoryAllocateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkExportMemoryAllocateInfo* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceExternalFenceInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceExternalFenceInfo* toCount,
+    size_t* count);
+
+void count_VkExternalFenceProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkExternalFenceProperties* toCount,
+    size_t* count);
+
+void count_VkExportFenceCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkExportFenceCreateInfo* toCount,
+    size_t* count);
+
+void count_VkExportSemaphoreCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkExportSemaphoreCreateInfo* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceExternalSemaphoreInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceExternalSemaphoreInfo* toCount,
+    size_t* count);
+
+void count_VkExternalSemaphoreProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkExternalSemaphoreProperties* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceMaintenance3Properties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMaintenance3Properties* toCount,
+    size_t* count);
+
+void count_VkDescriptorSetLayoutSupport(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDescriptorSetLayoutSupport* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceShaderDrawParametersFeatures(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderDrawParametersFeatures* toCount,
+    size_t* count);
+
+DEFINE_ALIAS_FUNCTION(count_VkPhysicalDeviceShaderDrawParametersFeatures, count_VkPhysicalDeviceShaderDrawParameterFeatures);
+
+#endif
+#ifdef VK_VERSION_1_2
+void count_VkPhysicalDeviceVulkan11Features(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVulkan11Features* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceVulkan11Properties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVulkan11Properties* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceVulkan12Features(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVulkan12Features* toCount,
+    size_t* count);
+
+void count_VkConformanceVersion(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkConformanceVersion* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceVulkan12Properties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVulkan12Properties* toCount,
+    size_t* count);
+
+void count_VkImageFormatListCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImageFormatListCreateInfo* toCount,
+    size_t* count);
+
+void count_VkAttachmentDescription2(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAttachmentDescription2* toCount,
+    size_t* count);
+
+void count_VkAttachmentReference2(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAttachmentReference2* toCount,
+    size_t* count);
+
+void count_VkSubpassDescription2(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSubpassDescription2* toCount,
+    size_t* count);
+
+void count_VkSubpassDependency2(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSubpassDependency2* toCount,
+    size_t* count);
+
+void count_VkRenderPassCreateInfo2(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkRenderPassCreateInfo2* toCount,
+    size_t* count);
+
+void count_VkSubpassBeginInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSubpassBeginInfo* toCount,
+    size_t* count);
+
+void count_VkSubpassEndInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSubpassEndInfo* toCount,
+    size_t* count);
+
+void count_VkPhysicalDevice8BitStorageFeatures(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDevice8BitStorageFeatures* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceDriverProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDriverProperties* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceShaderAtomicInt64Features(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderAtomicInt64Features* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceShaderFloat16Int8Features(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderFloat16Int8Features* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceFloatControlsProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFloatControlsProperties* toCount,
+    size_t* count);
+
+void count_VkDescriptorSetLayoutBindingFlagsCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDescriptorSetLayoutBindingFlagsCreateInfo* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceDescriptorIndexingFeatures(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDescriptorIndexingFeatures* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceDescriptorIndexingProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDescriptorIndexingProperties* toCount,
+    size_t* count);
+
+void count_VkDescriptorSetVariableDescriptorCountAllocateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDescriptorSetVariableDescriptorCountAllocateInfo* toCount,
+    size_t* count);
+
+void count_VkDescriptorSetVariableDescriptorCountLayoutSupport(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDescriptorSetVariableDescriptorCountLayoutSupport* toCount,
+    size_t* count);
+
+void count_VkSubpassDescriptionDepthStencilResolve(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSubpassDescriptionDepthStencilResolve* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceDepthStencilResolveProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDepthStencilResolveProperties* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceScalarBlockLayoutFeatures(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceScalarBlockLayoutFeatures* toCount,
+    size_t* count);
+
+void count_VkImageStencilUsageCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImageStencilUsageCreateInfo* toCount,
+    size_t* count);
+
+void count_VkSamplerReductionModeCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSamplerReductionModeCreateInfo* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceSamplerFilterMinmaxProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSamplerFilterMinmaxProperties* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceVulkanMemoryModelFeatures(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVulkanMemoryModelFeatures* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceImagelessFramebufferFeatures(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceImagelessFramebufferFeatures* toCount,
+    size_t* count);
+
+void count_VkFramebufferAttachmentImageInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkFramebufferAttachmentImageInfo* toCount,
+    size_t* count);
+
+void count_VkFramebufferAttachmentsCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkFramebufferAttachmentsCreateInfo* toCount,
+    size_t* count);
+
+void count_VkRenderPassAttachmentBeginInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkRenderPassAttachmentBeginInfo* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceUniformBufferStandardLayoutFeatures(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceUniformBufferStandardLayoutFeatures* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures* toCount,
+    size_t* count);
+
+void count_VkAttachmentReferenceStencilLayout(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAttachmentReferenceStencilLayout* toCount,
+    size_t* count);
+
+void count_VkAttachmentDescriptionStencilLayout(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAttachmentDescriptionStencilLayout* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceHostQueryResetFeatures(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceHostQueryResetFeatures* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceTimelineSemaphoreFeatures(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTimelineSemaphoreFeatures* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceTimelineSemaphoreProperties(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTimelineSemaphoreProperties* toCount,
+    size_t* count);
+
+void count_VkSemaphoreTypeCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSemaphoreTypeCreateInfo* toCount,
+    size_t* count);
+
+void count_VkTimelineSemaphoreSubmitInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkTimelineSemaphoreSubmitInfo* toCount,
+    size_t* count);
+
+void count_VkSemaphoreWaitInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSemaphoreWaitInfo* toCount,
+    size_t* count);
+
+void count_VkSemaphoreSignalInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSemaphoreSignalInfo* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceBufferDeviceAddressFeatures(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceBufferDeviceAddressFeatures* toCount,
+    size_t* count);
+
+void count_VkBufferDeviceAddressInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkBufferDeviceAddressInfo* toCount,
+    size_t* count);
+
+void count_VkBufferOpaqueCaptureAddressCreateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkBufferOpaqueCaptureAddressCreateInfo* toCount,
+    size_t* count);
+
+void count_VkMemoryOpaqueCaptureAddressAllocateInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkMemoryOpaqueCaptureAddressAllocateInfo* toCount,
+    size_t* count);
+
+void count_VkDeviceMemoryOpaqueCaptureAddressInfo(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDeviceMemoryOpaqueCaptureAddressInfo* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_KHR_surface
+void count_VkSurfaceCapabilitiesKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSurfaceCapabilitiesKHR* toCount,
+    size_t* count);
+
+void count_VkSurfaceFormatKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSurfaceFormatKHR* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_KHR_swapchain
+void count_VkSwapchainCreateInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSwapchainCreateInfoKHR* toCount,
+    size_t* count);
+
+void count_VkPresentInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPresentInfoKHR* toCount,
+    size_t* count);
+
+void count_VkImageSwapchainCreateInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImageSwapchainCreateInfoKHR* toCount,
+    size_t* count);
+
+void count_VkBindImageMemorySwapchainInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkBindImageMemorySwapchainInfoKHR* toCount,
+    size_t* count);
+
+void count_VkAcquireNextImageInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAcquireNextImageInfoKHR* toCount,
+    size_t* count);
+
+void count_VkDeviceGroupPresentCapabilitiesKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDeviceGroupPresentCapabilitiesKHR* toCount,
+    size_t* count);
+
+void count_VkDeviceGroupPresentInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDeviceGroupPresentInfoKHR* toCount,
+    size_t* count);
+
+void count_VkDeviceGroupSwapchainCreateInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDeviceGroupSwapchainCreateInfoKHR* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_KHR_display
+void count_VkDisplayModeParametersKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDisplayModeParametersKHR* toCount,
+    size_t* count);
+
+void count_VkDisplayModeCreateInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDisplayModeCreateInfoKHR* toCount,
+    size_t* count);
+
+void count_VkDisplayModePropertiesKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDisplayModePropertiesKHR* toCount,
+    size_t* count);
+
+void count_VkDisplayPlaneCapabilitiesKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDisplayPlaneCapabilitiesKHR* toCount,
+    size_t* count);
+
+void count_VkDisplayPlanePropertiesKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDisplayPlanePropertiesKHR* toCount,
+    size_t* count);
+
+void count_VkDisplayPropertiesKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDisplayPropertiesKHR* toCount,
+    size_t* count);
+
+void count_VkDisplaySurfaceCreateInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDisplaySurfaceCreateInfoKHR* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_KHR_display_swapchain
+void count_VkDisplayPresentInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDisplayPresentInfoKHR* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_KHR_xlib_surface
+void count_VkXlibSurfaceCreateInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkXlibSurfaceCreateInfoKHR* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_KHR_xcb_surface
+void count_VkXcbSurfaceCreateInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkXcbSurfaceCreateInfoKHR* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_KHR_wayland_surface
+void count_VkWaylandSurfaceCreateInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkWaylandSurfaceCreateInfoKHR* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_KHR_android_surface
+void count_VkAndroidSurfaceCreateInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAndroidSurfaceCreateInfoKHR* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_KHR_win32_surface
+void count_VkWin32SurfaceCreateInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkWin32SurfaceCreateInfoKHR* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_KHR_sampler_mirror_clamp_to_edge
+#endif
+#ifdef VK_KHR_multiview
+DEFINE_ALIAS_FUNCTION(count_VkRenderPassMultiviewCreateInfo, count_VkRenderPassMultiviewCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkPhysicalDeviceMultiviewFeatures, count_VkPhysicalDeviceMultiviewFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkPhysicalDeviceMultiviewProperties, count_VkPhysicalDeviceMultiviewPropertiesKHR);
+
+#endif
+#ifdef VK_KHR_get_physical_device_properties2
+DEFINE_ALIAS_FUNCTION(count_VkPhysicalDeviceFeatures2, count_VkPhysicalDeviceFeatures2KHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkPhysicalDeviceProperties2, count_VkPhysicalDeviceProperties2KHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkFormatProperties2, count_VkFormatProperties2KHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkImageFormatProperties2, count_VkImageFormatProperties2KHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkPhysicalDeviceImageFormatInfo2, count_VkPhysicalDeviceImageFormatInfo2KHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkQueueFamilyProperties2, count_VkQueueFamilyProperties2KHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkPhysicalDeviceMemoryProperties2, count_VkPhysicalDeviceMemoryProperties2KHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkSparseImageFormatProperties2, count_VkSparseImageFormatProperties2KHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkPhysicalDeviceSparseImageFormatInfo2, count_VkPhysicalDeviceSparseImageFormatInfo2KHR);
+
+#endif
+#ifdef VK_KHR_device_group
+DEFINE_ALIAS_FUNCTION(count_VkMemoryAllocateFlagsInfo, count_VkMemoryAllocateFlagsInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkDeviceGroupRenderPassBeginInfo, count_VkDeviceGroupRenderPassBeginInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkDeviceGroupCommandBufferBeginInfo, count_VkDeviceGroupCommandBufferBeginInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkDeviceGroupSubmitInfo, count_VkDeviceGroupSubmitInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkDeviceGroupBindSparseInfo, count_VkDeviceGroupBindSparseInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkBindBufferMemoryDeviceGroupInfo, count_VkBindBufferMemoryDeviceGroupInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkBindImageMemoryDeviceGroupInfo, count_VkBindImageMemoryDeviceGroupInfoKHR);
+
+#endif
+#ifdef VK_KHR_shader_draw_parameters
+#endif
+#ifdef VK_KHR_maintenance1
+#endif
+#ifdef VK_KHR_device_group_creation
+DEFINE_ALIAS_FUNCTION(count_VkPhysicalDeviceGroupProperties, count_VkPhysicalDeviceGroupPropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkDeviceGroupDeviceCreateInfo, count_VkDeviceGroupDeviceCreateInfoKHR);
+
+#endif
+#ifdef VK_KHR_external_memory_capabilities
+DEFINE_ALIAS_FUNCTION(count_VkExternalMemoryProperties, count_VkExternalMemoryPropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkPhysicalDeviceExternalImageFormatInfo, count_VkPhysicalDeviceExternalImageFormatInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkExternalImageFormatProperties, count_VkExternalImageFormatPropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkPhysicalDeviceExternalBufferInfo, count_VkPhysicalDeviceExternalBufferInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkExternalBufferProperties, count_VkExternalBufferPropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkPhysicalDeviceIDProperties, count_VkPhysicalDeviceIDPropertiesKHR);
+
+#endif
+#ifdef VK_KHR_external_memory
+DEFINE_ALIAS_FUNCTION(count_VkExternalMemoryImageCreateInfo, count_VkExternalMemoryImageCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkExternalMemoryBufferCreateInfo, count_VkExternalMemoryBufferCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkExportMemoryAllocateInfo, count_VkExportMemoryAllocateInfoKHR);
+
+#endif
+#ifdef VK_KHR_external_memory_win32
+void count_VkImportMemoryWin32HandleInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImportMemoryWin32HandleInfoKHR* toCount,
+    size_t* count);
+
+void count_VkExportMemoryWin32HandleInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkExportMemoryWin32HandleInfoKHR* toCount,
+    size_t* count);
+
+void count_VkMemoryWin32HandlePropertiesKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkMemoryWin32HandlePropertiesKHR* toCount,
+    size_t* count);
+
+void count_VkMemoryGetWin32HandleInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkMemoryGetWin32HandleInfoKHR* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_KHR_external_memory_fd
+void count_VkImportMemoryFdInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImportMemoryFdInfoKHR* toCount,
+    size_t* count);
+
+void count_VkMemoryFdPropertiesKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkMemoryFdPropertiesKHR* toCount,
+    size_t* count);
+
+void count_VkMemoryGetFdInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkMemoryGetFdInfoKHR* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_KHR_win32_keyed_mutex
+void count_VkWin32KeyedMutexAcquireReleaseInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkWin32KeyedMutexAcquireReleaseInfoKHR* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_KHR_external_semaphore_capabilities
+DEFINE_ALIAS_FUNCTION(count_VkPhysicalDeviceExternalSemaphoreInfo, count_VkPhysicalDeviceExternalSemaphoreInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkExternalSemaphoreProperties, count_VkExternalSemaphorePropertiesKHR);
+
+#endif
+#ifdef VK_KHR_external_semaphore
+DEFINE_ALIAS_FUNCTION(count_VkExportSemaphoreCreateInfo, count_VkExportSemaphoreCreateInfoKHR);
+
+#endif
+#ifdef VK_KHR_external_semaphore_win32
+void count_VkImportSemaphoreWin32HandleInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImportSemaphoreWin32HandleInfoKHR* toCount,
+    size_t* count);
+
+void count_VkExportSemaphoreWin32HandleInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkExportSemaphoreWin32HandleInfoKHR* toCount,
+    size_t* count);
+
+void count_VkD3D12FenceSubmitInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkD3D12FenceSubmitInfoKHR* toCount,
+    size_t* count);
+
+void count_VkSemaphoreGetWin32HandleInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSemaphoreGetWin32HandleInfoKHR* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_KHR_external_semaphore_fd
+void count_VkImportSemaphoreFdInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImportSemaphoreFdInfoKHR* toCount,
+    size_t* count);
+
+void count_VkSemaphoreGetFdInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSemaphoreGetFdInfoKHR* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_KHR_push_descriptor
+void count_VkPhysicalDevicePushDescriptorPropertiesKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDevicePushDescriptorPropertiesKHR* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_KHR_shader_float16_int8
+DEFINE_ALIAS_FUNCTION(count_VkPhysicalDeviceShaderFloat16Int8Features, count_VkPhysicalDeviceShaderFloat16Int8FeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkPhysicalDeviceShaderFloat16Int8Features, count_VkPhysicalDeviceFloat16Int8FeaturesKHR);
+
+#endif
+#ifdef VK_KHR_16bit_storage
+DEFINE_ALIAS_FUNCTION(count_VkPhysicalDevice16BitStorageFeatures, count_VkPhysicalDevice16BitStorageFeaturesKHR);
+
+#endif
+#ifdef VK_KHR_incremental_present
+void count_VkRectLayerKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkRectLayerKHR* toCount,
+    size_t* count);
+
+void count_VkPresentRegionKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPresentRegionKHR* toCount,
+    size_t* count);
+
+void count_VkPresentRegionsKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPresentRegionsKHR* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_KHR_descriptor_update_template
+DEFINE_ALIAS_FUNCTION(count_VkDescriptorUpdateTemplateEntry, count_VkDescriptorUpdateTemplateEntryKHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkDescriptorUpdateTemplateCreateInfo, count_VkDescriptorUpdateTemplateCreateInfoKHR);
+
+#endif
+#ifdef VK_KHR_imageless_framebuffer
+DEFINE_ALIAS_FUNCTION(count_VkPhysicalDeviceImagelessFramebufferFeatures, count_VkPhysicalDeviceImagelessFramebufferFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkFramebufferAttachmentsCreateInfo, count_VkFramebufferAttachmentsCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkFramebufferAttachmentImageInfo, count_VkFramebufferAttachmentImageInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkRenderPassAttachmentBeginInfo, count_VkRenderPassAttachmentBeginInfoKHR);
+
+#endif
+#ifdef VK_KHR_create_renderpass2
+DEFINE_ALIAS_FUNCTION(count_VkRenderPassCreateInfo2, count_VkRenderPassCreateInfo2KHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkAttachmentDescription2, count_VkAttachmentDescription2KHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkAttachmentReference2, count_VkAttachmentReference2KHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkSubpassDescription2, count_VkSubpassDescription2KHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkSubpassDependency2, count_VkSubpassDependency2KHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkSubpassBeginInfo, count_VkSubpassBeginInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkSubpassEndInfo, count_VkSubpassEndInfoKHR);
+
+#endif
+#ifdef VK_KHR_shared_presentable_image
+void count_VkSharedPresentSurfaceCapabilitiesKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSharedPresentSurfaceCapabilitiesKHR* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_KHR_external_fence_capabilities
+DEFINE_ALIAS_FUNCTION(count_VkPhysicalDeviceExternalFenceInfo, count_VkPhysicalDeviceExternalFenceInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkExternalFenceProperties, count_VkExternalFencePropertiesKHR);
+
+#endif
+#ifdef VK_KHR_external_fence
+DEFINE_ALIAS_FUNCTION(count_VkExportFenceCreateInfo, count_VkExportFenceCreateInfoKHR);
+
+#endif
+#ifdef VK_KHR_external_fence_win32
+void count_VkImportFenceWin32HandleInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImportFenceWin32HandleInfoKHR* toCount,
+    size_t* count);
+
+void count_VkExportFenceWin32HandleInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkExportFenceWin32HandleInfoKHR* toCount,
+    size_t* count);
+
+void count_VkFenceGetWin32HandleInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkFenceGetWin32HandleInfoKHR* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_KHR_external_fence_fd
+void count_VkImportFenceFdInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImportFenceFdInfoKHR* toCount,
+    size_t* count);
+
+void count_VkFenceGetFdInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkFenceGetFdInfoKHR* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_KHR_performance_query
+void count_VkPhysicalDevicePerformanceQueryFeaturesKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDevicePerformanceQueryFeaturesKHR* toCount,
+    size_t* count);
+
+void count_VkPhysicalDevicePerformanceQueryPropertiesKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDevicePerformanceQueryPropertiesKHR* toCount,
+    size_t* count);
+
+void count_VkPerformanceCounterKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPerformanceCounterKHR* toCount,
+    size_t* count);
+
+void count_VkPerformanceCounterDescriptionKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPerformanceCounterDescriptionKHR* toCount,
+    size_t* count);
+
+void count_VkQueryPoolPerformanceCreateInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkQueryPoolPerformanceCreateInfoKHR* toCount,
+    size_t* count);
+
+void count_VkPerformanceCounterResultKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPerformanceCounterResultKHR* toCount,
+    size_t* count);
+
+void count_VkAcquireProfilingLockInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAcquireProfilingLockInfoKHR* toCount,
+    size_t* count);
+
+void count_VkPerformanceQuerySubmitInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPerformanceQuerySubmitInfoKHR* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_KHR_maintenance2
+DEFINE_ALIAS_FUNCTION(count_VkPhysicalDevicePointClippingProperties, count_VkPhysicalDevicePointClippingPropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkRenderPassInputAttachmentAspectCreateInfo, count_VkRenderPassInputAttachmentAspectCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkInputAttachmentAspectReference, count_VkInputAttachmentAspectReferenceKHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkImageViewUsageCreateInfo, count_VkImageViewUsageCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkPipelineTessellationDomainOriginStateCreateInfo, count_VkPipelineTessellationDomainOriginStateCreateInfoKHR);
+
+#endif
+#ifdef VK_KHR_get_surface_capabilities2
+void count_VkPhysicalDeviceSurfaceInfo2KHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSurfaceInfo2KHR* toCount,
+    size_t* count);
+
+void count_VkSurfaceCapabilities2KHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSurfaceCapabilities2KHR* toCount,
+    size_t* count);
+
+void count_VkSurfaceFormat2KHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSurfaceFormat2KHR* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_KHR_variable_pointers
+DEFINE_ALIAS_FUNCTION(count_VkPhysicalDeviceVariablePointersFeatures, count_VkPhysicalDeviceVariablePointerFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkPhysicalDeviceVariablePointersFeatures, count_VkPhysicalDeviceVariablePointersFeaturesKHR);
+
+#endif
+#ifdef VK_KHR_get_display_properties2
+void count_VkDisplayProperties2KHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDisplayProperties2KHR* toCount,
+    size_t* count);
+
+void count_VkDisplayPlaneProperties2KHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDisplayPlaneProperties2KHR* toCount,
+    size_t* count);
+
+void count_VkDisplayModeProperties2KHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDisplayModeProperties2KHR* toCount,
+    size_t* count);
+
+void count_VkDisplayPlaneInfo2KHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDisplayPlaneInfo2KHR* toCount,
+    size_t* count);
+
+void count_VkDisplayPlaneCapabilities2KHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDisplayPlaneCapabilities2KHR* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_KHR_dedicated_allocation
+DEFINE_ALIAS_FUNCTION(count_VkMemoryDedicatedRequirements, count_VkMemoryDedicatedRequirementsKHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkMemoryDedicatedAllocateInfo, count_VkMemoryDedicatedAllocateInfoKHR);
+
+#endif
+#ifdef VK_KHR_storage_buffer_storage_class
+#endif
+#ifdef VK_KHR_relaxed_block_layout
+#endif
+#ifdef VK_KHR_get_memory_requirements2
+DEFINE_ALIAS_FUNCTION(count_VkBufferMemoryRequirementsInfo2, count_VkBufferMemoryRequirementsInfo2KHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkImageMemoryRequirementsInfo2, count_VkImageMemoryRequirementsInfo2KHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkImageSparseMemoryRequirementsInfo2, count_VkImageSparseMemoryRequirementsInfo2KHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkMemoryRequirements2, count_VkMemoryRequirements2KHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkSparseImageMemoryRequirements2, count_VkSparseImageMemoryRequirements2KHR);
+
+#endif
+#ifdef VK_KHR_image_format_list
+DEFINE_ALIAS_FUNCTION(count_VkImageFormatListCreateInfo, count_VkImageFormatListCreateInfoKHR);
+
+#endif
+#ifdef VK_KHR_sampler_ycbcr_conversion
+DEFINE_ALIAS_FUNCTION(count_VkSamplerYcbcrConversionCreateInfo, count_VkSamplerYcbcrConversionCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkSamplerYcbcrConversionInfo, count_VkSamplerYcbcrConversionInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkBindImagePlaneMemoryInfo, count_VkBindImagePlaneMemoryInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkImagePlaneMemoryRequirementsInfo, count_VkImagePlaneMemoryRequirementsInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkPhysicalDeviceSamplerYcbcrConversionFeatures, count_VkPhysicalDeviceSamplerYcbcrConversionFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkSamplerYcbcrConversionImageFormatProperties, count_VkSamplerYcbcrConversionImageFormatPropertiesKHR);
+
+#endif
+#ifdef VK_KHR_bind_memory2
+DEFINE_ALIAS_FUNCTION(count_VkBindBufferMemoryInfo, count_VkBindBufferMemoryInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkBindImageMemoryInfo, count_VkBindImageMemoryInfoKHR);
+
+#endif
+#ifdef VK_KHR_portability_subset
+void count_VkPhysicalDevicePortabilitySubsetFeaturesKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDevicePortabilitySubsetFeaturesKHR* toCount,
+    size_t* count);
+
+void count_VkPhysicalDevicePortabilitySubsetPropertiesKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDevicePortabilitySubsetPropertiesKHR* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_KHR_maintenance3
+DEFINE_ALIAS_FUNCTION(count_VkPhysicalDeviceMaintenance3Properties, count_VkPhysicalDeviceMaintenance3PropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkDescriptorSetLayoutSupport, count_VkDescriptorSetLayoutSupportKHR);
+
+#endif
+#ifdef VK_KHR_draw_indirect_count
+#endif
+#ifdef VK_KHR_shader_subgroup_extended_types
+DEFINE_ALIAS_FUNCTION(count_VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures, count_VkPhysicalDeviceShaderSubgroupExtendedTypesFeaturesKHR);
+
+#endif
+#ifdef VK_KHR_8bit_storage
+DEFINE_ALIAS_FUNCTION(count_VkPhysicalDevice8BitStorageFeatures, count_VkPhysicalDevice8BitStorageFeaturesKHR);
+
+#endif
+#ifdef VK_KHR_shader_atomic_int64
+DEFINE_ALIAS_FUNCTION(count_VkPhysicalDeviceShaderAtomicInt64Features, count_VkPhysicalDeviceShaderAtomicInt64FeaturesKHR);
+
+#endif
+#ifdef VK_KHR_shader_clock
+void count_VkPhysicalDeviceShaderClockFeaturesKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderClockFeaturesKHR* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_KHR_driver_properties
+DEFINE_ALIAS_FUNCTION(count_VkConformanceVersion, count_VkConformanceVersionKHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkPhysicalDeviceDriverProperties, count_VkPhysicalDeviceDriverPropertiesKHR);
+
+#endif
+#ifdef VK_KHR_shader_float_controls
+DEFINE_ALIAS_FUNCTION(count_VkPhysicalDeviceFloatControlsProperties, count_VkPhysicalDeviceFloatControlsPropertiesKHR);
+
+#endif
+#ifdef VK_KHR_depth_stencil_resolve
+DEFINE_ALIAS_FUNCTION(count_VkSubpassDescriptionDepthStencilResolve, count_VkSubpassDescriptionDepthStencilResolveKHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkPhysicalDeviceDepthStencilResolveProperties, count_VkPhysicalDeviceDepthStencilResolvePropertiesKHR);
+
+#endif
+#ifdef VK_KHR_swapchain_mutable_format
+#endif
+#ifdef VK_KHR_timeline_semaphore
+DEFINE_ALIAS_FUNCTION(count_VkPhysicalDeviceTimelineSemaphoreFeatures, count_VkPhysicalDeviceTimelineSemaphoreFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkPhysicalDeviceTimelineSemaphoreProperties, count_VkPhysicalDeviceTimelineSemaphorePropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkSemaphoreTypeCreateInfo, count_VkSemaphoreTypeCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkTimelineSemaphoreSubmitInfo, count_VkTimelineSemaphoreSubmitInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkSemaphoreWaitInfo, count_VkSemaphoreWaitInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkSemaphoreSignalInfo, count_VkSemaphoreSignalInfoKHR);
+
+#endif
+#ifdef VK_KHR_vulkan_memory_model
+DEFINE_ALIAS_FUNCTION(count_VkPhysicalDeviceVulkanMemoryModelFeatures, count_VkPhysicalDeviceVulkanMemoryModelFeaturesKHR);
+
+#endif
+#ifdef VK_KHR_shader_terminate_invocation
+void count_VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_KHR_fragment_shading_rate
+void count_VkFragmentShadingRateAttachmentInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkFragmentShadingRateAttachmentInfoKHR* toCount,
+    size_t* count);
+
+void count_VkPipelineFragmentShadingRateStateCreateInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineFragmentShadingRateStateCreateInfoKHR* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceFragmentShadingRateFeaturesKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShadingRateFeaturesKHR* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceFragmentShadingRatePropertiesKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShadingRatePropertiesKHR* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceFragmentShadingRateKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShadingRateKHR* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_KHR_spirv_1_4
+#endif
+#ifdef VK_KHR_surface_protected_capabilities
+void count_VkSurfaceProtectedCapabilitiesKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSurfaceProtectedCapabilitiesKHR* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_KHR_separate_depth_stencil_layouts
+DEFINE_ALIAS_FUNCTION(count_VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures, count_VkPhysicalDeviceSeparateDepthStencilLayoutsFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkAttachmentReferenceStencilLayout, count_VkAttachmentReferenceStencilLayoutKHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkAttachmentDescriptionStencilLayout, count_VkAttachmentDescriptionStencilLayoutKHR);
+
+#endif
+#ifdef VK_KHR_uniform_buffer_standard_layout
+DEFINE_ALIAS_FUNCTION(count_VkPhysicalDeviceUniformBufferStandardLayoutFeatures, count_VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR);
+
+#endif
+#ifdef VK_KHR_buffer_device_address
+DEFINE_ALIAS_FUNCTION(count_VkPhysicalDeviceBufferDeviceAddressFeatures, count_VkPhysicalDeviceBufferDeviceAddressFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkBufferDeviceAddressInfo, count_VkBufferDeviceAddressInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkBufferOpaqueCaptureAddressCreateInfo, count_VkBufferOpaqueCaptureAddressCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkMemoryOpaqueCaptureAddressAllocateInfo, count_VkMemoryOpaqueCaptureAddressAllocateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(count_VkDeviceMemoryOpaqueCaptureAddressInfo, count_VkDeviceMemoryOpaqueCaptureAddressInfoKHR);
+
+#endif
+#ifdef VK_KHR_deferred_host_operations
+#endif
+#ifdef VK_KHR_pipeline_executable_properties
+void count_VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR* toCount,
+    size_t* count);
+
+void count_VkPipelineInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineInfoKHR* toCount,
+    size_t* count);
+
+void count_VkPipelineExecutablePropertiesKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineExecutablePropertiesKHR* toCount,
+    size_t* count);
+
+void count_VkPipelineExecutableInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineExecutableInfoKHR* toCount,
+    size_t* count);
+
+void count_VkPipelineExecutableStatisticValueKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineExecutableStatisticValueKHR* toCount,
+    size_t* count);
+
+void count_VkPipelineExecutableStatisticKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineExecutableStatisticKHR* toCount,
+    size_t* count);
+
+void count_VkPipelineExecutableInternalRepresentationKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineExecutableInternalRepresentationKHR* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_KHR_pipeline_library
+void count_VkPipelineLibraryCreateInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineLibraryCreateInfoKHR* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_KHR_shader_non_semantic_info
+#endif
+#ifdef VK_KHR_copy_commands2
+void count_VkBufferCopy2KHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkBufferCopy2KHR* toCount,
+    size_t* count);
+
+void count_VkCopyBufferInfo2KHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkCopyBufferInfo2KHR* toCount,
+    size_t* count);
+
+void count_VkImageCopy2KHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImageCopy2KHR* toCount,
+    size_t* count);
+
+void count_VkCopyImageInfo2KHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkCopyImageInfo2KHR* toCount,
+    size_t* count);
+
+void count_VkBufferImageCopy2KHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkBufferImageCopy2KHR* toCount,
+    size_t* count);
+
+void count_VkCopyBufferToImageInfo2KHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkCopyBufferToImageInfo2KHR* toCount,
+    size_t* count);
+
+void count_VkCopyImageToBufferInfo2KHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkCopyImageToBufferInfo2KHR* toCount,
+    size_t* count);
+
+void count_VkImageBlit2KHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImageBlit2KHR* toCount,
+    size_t* count);
+
+void count_VkBlitImageInfo2KHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkBlitImageInfo2KHR* toCount,
+    size_t* count);
+
+void count_VkImageResolve2KHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImageResolve2KHR* toCount,
+    size_t* count);
+
+void count_VkResolveImageInfo2KHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkResolveImageInfo2KHR* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_ANDROID_native_buffer
+void count_VkNativeBufferANDROID(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkNativeBufferANDROID* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_EXT_debug_report
+void count_VkDebugReportCallbackCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDebugReportCallbackCreateInfoEXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_NV_glsl_shader
+#endif
+#ifdef VK_EXT_depth_range_unrestricted
+#endif
+#ifdef VK_IMG_filter_cubic
+#endif
+#ifdef VK_AMD_rasterization_order
+void count_VkPipelineRasterizationStateRasterizationOrderAMD(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineRasterizationStateRasterizationOrderAMD* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_AMD_shader_trinary_minmax
+#endif
+#ifdef VK_AMD_shader_explicit_vertex_parameter
+#endif
+#ifdef VK_EXT_debug_marker
+void count_VkDebugMarkerObjectNameInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDebugMarkerObjectNameInfoEXT* toCount,
+    size_t* count);
+
+void count_VkDebugMarkerObjectTagInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDebugMarkerObjectTagInfoEXT* toCount,
+    size_t* count);
+
+void count_VkDebugMarkerMarkerInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDebugMarkerMarkerInfoEXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_AMD_gcn_shader
+#endif
+#ifdef VK_NV_dedicated_allocation
+void count_VkDedicatedAllocationImageCreateInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDedicatedAllocationImageCreateInfoNV* toCount,
+    size_t* count);
+
+void count_VkDedicatedAllocationBufferCreateInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDedicatedAllocationBufferCreateInfoNV* toCount,
+    size_t* count);
+
+void count_VkDedicatedAllocationMemoryAllocateInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDedicatedAllocationMemoryAllocateInfoNV* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_EXT_transform_feedback
+void count_VkPhysicalDeviceTransformFeedbackFeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTransformFeedbackFeaturesEXT* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceTransformFeedbackPropertiesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTransformFeedbackPropertiesEXT* toCount,
+    size_t* count);
+
+void count_VkPipelineRasterizationStateStreamCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineRasterizationStateStreamCreateInfoEXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_NVX_image_view_handle
+void count_VkImageViewHandleInfoNVX(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImageViewHandleInfoNVX* toCount,
+    size_t* count);
+
+void count_VkImageViewAddressPropertiesNVX(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImageViewAddressPropertiesNVX* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_AMD_draw_indirect_count
+#endif
+#ifdef VK_AMD_negative_viewport_height
+#endif
+#ifdef VK_AMD_gpu_shader_half_float
+#endif
+#ifdef VK_AMD_shader_ballot
+#endif
+#ifdef VK_AMD_texture_gather_bias_lod
+void count_VkTextureLODGatherFormatPropertiesAMD(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkTextureLODGatherFormatPropertiesAMD* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_AMD_shader_info
+void count_VkShaderResourceUsageAMD(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkShaderResourceUsageAMD* toCount,
+    size_t* count);
+
+void count_VkShaderStatisticsInfoAMD(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkShaderStatisticsInfoAMD* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_AMD_shader_image_load_store_lod
+#endif
+#ifdef VK_GGP_stream_descriptor_surface
+void count_VkStreamDescriptorSurfaceCreateInfoGGP(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkStreamDescriptorSurfaceCreateInfoGGP* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_NV_corner_sampled_image
+void count_VkPhysicalDeviceCornerSampledImageFeaturesNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCornerSampledImageFeaturesNV* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_IMG_format_pvrtc
+#endif
+#ifdef VK_NV_external_memory_capabilities
+void count_VkExternalImageFormatPropertiesNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkExternalImageFormatPropertiesNV* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_NV_external_memory
+void count_VkExternalMemoryImageCreateInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkExternalMemoryImageCreateInfoNV* toCount,
+    size_t* count);
+
+void count_VkExportMemoryAllocateInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkExportMemoryAllocateInfoNV* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_NV_external_memory_win32
+void count_VkImportMemoryWin32HandleInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImportMemoryWin32HandleInfoNV* toCount,
+    size_t* count);
+
+void count_VkExportMemoryWin32HandleInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkExportMemoryWin32HandleInfoNV* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_NV_win32_keyed_mutex
+void count_VkWin32KeyedMutexAcquireReleaseInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkWin32KeyedMutexAcquireReleaseInfoNV* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_EXT_validation_flags
+void count_VkValidationFlagsEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkValidationFlagsEXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_NN_vi_surface
+void count_VkViSurfaceCreateInfoNN(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkViSurfaceCreateInfoNN* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_EXT_shader_subgroup_ballot
+#endif
+#ifdef VK_EXT_shader_subgroup_vote
+#endif
+#ifdef VK_EXT_texture_compression_astc_hdr
+void count_VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_EXT_astc_decode_mode
+void count_VkImageViewASTCDecodeModeEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImageViewASTCDecodeModeEXT* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceASTCDecodeFeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceASTCDecodeFeaturesEXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_EXT_conditional_rendering
+void count_VkConditionalRenderingBeginInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkConditionalRenderingBeginInfoEXT* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceConditionalRenderingFeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceConditionalRenderingFeaturesEXT* toCount,
+    size_t* count);
+
+void count_VkCommandBufferInheritanceConditionalRenderingInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkCommandBufferInheritanceConditionalRenderingInfoEXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_NV_clip_space_w_scaling
+void count_VkViewportWScalingNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkViewportWScalingNV* toCount,
+    size_t* count);
+
+void count_VkPipelineViewportWScalingStateCreateInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineViewportWScalingStateCreateInfoNV* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_EXT_direct_mode_display
+#endif
+#ifdef VK_EXT_acquire_xlib_display
+#endif
+#ifdef VK_EXT_display_surface_counter
+void count_VkSurfaceCapabilities2EXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSurfaceCapabilities2EXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_EXT_display_control
+void count_VkDisplayPowerInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDisplayPowerInfoEXT* toCount,
+    size_t* count);
+
+void count_VkDeviceEventInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDeviceEventInfoEXT* toCount,
+    size_t* count);
+
+void count_VkDisplayEventInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDisplayEventInfoEXT* toCount,
+    size_t* count);
+
+void count_VkSwapchainCounterCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSwapchainCounterCreateInfoEXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_GOOGLE_display_timing
+void count_VkRefreshCycleDurationGOOGLE(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkRefreshCycleDurationGOOGLE* toCount,
+    size_t* count);
+
+void count_VkPastPresentationTimingGOOGLE(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPastPresentationTimingGOOGLE* toCount,
+    size_t* count);
+
+void count_VkPresentTimeGOOGLE(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPresentTimeGOOGLE* toCount,
+    size_t* count);
+
+void count_VkPresentTimesInfoGOOGLE(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPresentTimesInfoGOOGLE* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_NV_sample_mask_override_coverage
+#endif
+#ifdef VK_NV_geometry_shader_passthrough
+#endif
+#ifdef VK_NV_viewport_array2
+#endif
+#ifdef VK_NVX_multiview_per_view_attributes
+void count_VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_NV_viewport_swizzle
+void count_VkViewportSwizzleNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkViewportSwizzleNV* toCount,
+    size_t* count);
+
+void count_VkPipelineViewportSwizzleStateCreateInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineViewportSwizzleStateCreateInfoNV* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_EXT_discard_rectangles
+void count_VkPhysicalDeviceDiscardRectanglePropertiesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDiscardRectanglePropertiesEXT* toCount,
+    size_t* count);
+
+void count_VkPipelineDiscardRectangleStateCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineDiscardRectangleStateCreateInfoEXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_EXT_conservative_rasterization
+void count_VkPhysicalDeviceConservativeRasterizationPropertiesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceConservativeRasterizationPropertiesEXT* toCount,
+    size_t* count);
+
+void count_VkPipelineRasterizationConservativeStateCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineRasterizationConservativeStateCreateInfoEXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_EXT_depth_clip_enable
+void count_VkPhysicalDeviceDepthClipEnableFeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDepthClipEnableFeaturesEXT* toCount,
+    size_t* count);
+
+void count_VkPipelineRasterizationDepthClipStateCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineRasterizationDepthClipStateCreateInfoEXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_EXT_swapchain_colorspace
+#endif
+#ifdef VK_EXT_hdr_metadata
+void count_VkXYColorEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkXYColorEXT* toCount,
+    size_t* count);
+
+void count_VkHdrMetadataEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkHdrMetadataEXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_MVK_ios_surface
+void count_VkIOSSurfaceCreateInfoMVK(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkIOSSurfaceCreateInfoMVK* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_MVK_macos_surface
+void count_VkMacOSSurfaceCreateInfoMVK(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkMacOSSurfaceCreateInfoMVK* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_MVK_moltenvk
+#endif
+#ifdef VK_EXT_external_memory_dma_buf
+#endif
+#ifdef VK_EXT_queue_family_foreign
+#endif
+#ifdef VK_EXT_debug_utils
+void count_VkDebugUtilsLabelEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDebugUtilsLabelEXT* toCount,
+    size_t* count);
+
+void count_VkDebugUtilsObjectNameInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDebugUtilsObjectNameInfoEXT* toCount,
+    size_t* count);
+
+void count_VkDebugUtilsMessengerCallbackDataEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDebugUtilsMessengerCallbackDataEXT* toCount,
+    size_t* count);
+
+void count_VkDebugUtilsMessengerCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDebugUtilsMessengerCreateInfoEXT* toCount,
+    size_t* count);
+
+void count_VkDebugUtilsObjectTagInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDebugUtilsObjectTagInfoEXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_ANDROID_external_memory_android_hardware_buffer
+void count_VkAndroidHardwareBufferUsageANDROID(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAndroidHardwareBufferUsageANDROID* toCount,
+    size_t* count);
+
+void count_VkAndroidHardwareBufferPropertiesANDROID(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAndroidHardwareBufferPropertiesANDROID* toCount,
+    size_t* count);
+
+void count_VkAndroidHardwareBufferFormatPropertiesANDROID(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAndroidHardwareBufferFormatPropertiesANDROID* toCount,
+    size_t* count);
+
+void count_VkImportAndroidHardwareBufferInfoANDROID(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImportAndroidHardwareBufferInfoANDROID* toCount,
+    size_t* count);
+
+void count_VkMemoryGetAndroidHardwareBufferInfoANDROID(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkMemoryGetAndroidHardwareBufferInfoANDROID* toCount,
+    size_t* count);
+
+void count_VkExternalFormatANDROID(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkExternalFormatANDROID* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_EXT_sampler_filter_minmax
+DEFINE_ALIAS_FUNCTION(count_VkSamplerReductionModeCreateInfo, count_VkSamplerReductionModeCreateInfoEXT);
+
+DEFINE_ALIAS_FUNCTION(count_VkPhysicalDeviceSamplerFilterMinmaxProperties, count_VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT);
+
+#endif
+#ifdef VK_AMD_gpu_shader_int16
+#endif
+#ifdef VK_AMD_mixed_attachment_samples
+#endif
+#ifdef VK_AMD_shader_fragment_mask
+#endif
+#ifdef VK_EXT_inline_uniform_block
+void count_VkPhysicalDeviceInlineUniformBlockFeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceInlineUniformBlockFeaturesEXT* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceInlineUniformBlockPropertiesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceInlineUniformBlockPropertiesEXT* toCount,
+    size_t* count);
+
+void count_VkWriteDescriptorSetInlineUniformBlockEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkWriteDescriptorSetInlineUniformBlockEXT* toCount,
+    size_t* count);
+
+void count_VkDescriptorPoolInlineUniformBlockCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDescriptorPoolInlineUniformBlockCreateInfoEXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_EXT_shader_stencil_export
+#endif
+#ifdef VK_EXT_sample_locations
+void count_VkSampleLocationEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSampleLocationEXT* toCount,
+    size_t* count);
+
+void count_VkSampleLocationsInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSampleLocationsInfoEXT* toCount,
+    size_t* count);
+
+void count_VkAttachmentSampleLocationsEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAttachmentSampleLocationsEXT* toCount,
+    size_t* count);
+
+void count_VkSubpassSampleLocationsEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSubpassSampleLocationsEXT* toCount,
+    size_t* count);
+
+void count_VkRenderPassSampleLocationsBeginInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkRenderPassSampleLocationsBeginInfoEXT* toCount,
+    size_t* count);
+
+void count_VkPipelineSampleLocationsStateCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineSampleLocationsStateCreateInfoEXT* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceSampleLocationsPropertiesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSampleLocationsPropertiesEXT* toCount,
+    size_t* count);
+
+void count_VkMultisamplePropertiesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkMultisamplePropertiesEXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_EXT_blend_operation_advanced
+void count_VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT* toCount,
+    size_t* count);
+
+void count_VkPipelineColorBlendAdvancedStateCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineColorBlendAdvancedStateCreateInfoEXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_NV_fragment_coverage_to_color
+void count_VkPipelineCoverageToColorStateCreateInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineCoverageToColorStateCreateInfoNV* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_NV_framebuffer_mixed_samples
+void count_VkPipelineCoverageModulationStateCreateInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineCoverageModulationStateCreateInfoNV* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_NV_fill_rectangle
+#endif
+#ifdef VK_NV_shader_sm_builtins
+void count_VkPhysicalDeviceShaderSMBuiltinsPropertiesNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderSMBuiltinsPropertiesNV* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceShaderSMBuiltinsFeaturesNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderSMBuiltinsFeaturesNV* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_EXT_post_depth_coverage
+#endif
+#ifdef VK_EXT_image_drm_format_modifier
+void count_VkDrmFormatModifierPropertiesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDrmFormatModifierPropertiesEXT* toCount,
+    size_t* count);
+
+void count_VkDrmFormatModifierPropertiesListEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDrmFormatModifierPropertiesListEXT* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceImageDrmFormatModifierInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceImageDrmFormatModifierInfoEXT* toCount,
+    size_t* count);
+
+void count_VkImageDrmFormatModifierListCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImageDrmFormatModifierListCreateInfoEXT* toCount,
+    size_t* count);
+
+void count_VkImageDrmFormatModifierExplicitCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImageDrmFormatModifierExplicitCreateInfoEXT* toCount,
+    size_t* count);
+
+void count_VkImageDrmFormatModifierPropertiesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImageDrmFormatModifierPropertiesEXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_EXT_validation_cache
+void count_VkValidationCacheCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkValidationCacheCreateInfoEXT* toCount,
+    size_t* count);
+
+void count_VkShaderModuleValidationCacheCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkShaderModuleValidationCacheCreateInfoEXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_EXT_descriptor_indexing
+DEFINE_ALIAS_FUNCTION(count_VkDescriptorSetLayoutBindingFlagsCreateInfo, count_VkDescriptorSetLayoutBindingFlagsCreateInfoEXT);
+
+DEFINE_ALIAS_FUNCTION(count_VkPhysicalDeviceDescriptorIndexingFeatures, count_VkPhysicalDeviceDescriptorIndexingFeaturesEXT);
+
+DEFINE_ALIAS_FUNCTION(count_VkPhysicalDeviceDescriptorIndexingProperties, count_VkPhysicalDeviceDescriptorIndexingPropertiesEXT);
+
+DEFINE_ALIAS_FUNCTION(count_VkDescriptorSetVariableDescriptorCountAllocateInfo, count_VkDescriptorSetVariableDescriptorCountAllocateInfoEXT);
+
+DEFINE_ALIAS_FUNCTION(count_VkDescriptorSetVariableDescriptorCountLayoutSupport, count_VkDescriptorSetVariableDescriptorCountLayoutSupportEXT);
+
+#endif
+#ifdef VK_EXT_shader_viewport_index_layer
+#endif
+#ifdef VK_NV_shading_rate_image
+void count_VkShadingRatePaletteNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkShadingRatePaletteNV* toCount,
+    size_t* count);
+
+void count_VkPipelineViewportShadingRateImageStateCreateInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineViewportShadingRateImageStateCreateInfoNV* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceShadingRateImageFeaturesNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShadingRateImageFeaturesNV* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceShadingRateImagePropertiesNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShadingRateImagePropertiesNV* toCount,
+    size_t* count);
+
+void count_VkCoarseSampleLocationNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkCoarseSampleLocationNV* toCount,
+    size_t* count);
+
+void count_VkCoarseSampleOrderCustomNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkCoarseSampleOrderCustomNV* toCount,
+    size_t* count);
+
+void count_VkPipelineViewportCoarseSampleOrderStateCreateInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineViewportCoarseSampleOrderStateCreateInfoNV* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_NV_ray_tracing
+void count_VkRayTracingShaderGroupCreateInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkRayTracingShaderGroupCreateInfoNV* toCount,
+    size_t* count);
+
+void count_VkRayTracingPipelineCreateInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkRayTracingPipelineCreateInfoNV* toCount,
+    size_t* count);
+
+void count_VkGeometryTrianglesNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkGeometryTrianglesNV* toCount,
+    size_t* count);
+
+void count_VkGeometryAABBNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkGeometryAABBNV* toCount,
+    size_t* count);
+
+void count_VkGeometryDataNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkGeometryDataNV* toCount,
+    size_t* count);
+
+void count_VkGeometryNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkGeometryNV* toCount,
+    size_t* count);
+
+void count_VkAccelerationStructureInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAccelerationStructureInfoNV* toCount,
+    size_t* count);
+
+void count_VkAccelerationStructureCreateInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAccelerationStructureCreateInfoNV* toCount,
+    size_t* count);
+
+void count_VkBindAccelerationStructureMemoryInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkBindAccelerationStructureMemoryInfoNV* toCount,
+    size_t* count);
+
+void count_VkWriteDescriptorSetAccelerationStructureNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkWriteDescriptorSetAccelerationStructureNV* toCount,
+    size_t* count);
+
+void count_VkAccelerationStructureMemoryRequirementsInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAccelerationStructureMemoryRequirementsInfoNV* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceRayTracingPropertiesNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRayTracingPropertiesNV* toCount,
+    size_t* count);
+
+void count_VkTransformMatrixKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkTransformMatrixKHR* toCount,
+    size_t* count);
+
+DEFINE_ALIAS_FUNCTION(count_VkTransformMatrixKHR, count_VkTransformMatrixNV);
+
+void count_VkAabbPositionsKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAabbPositionsKHR* toCount,
+    size_t* count);
+
+DEFINE_ALIAS_FUNCTION(count_VkAabbPositionsKHR, count_VkAabbPositionsNV);
+
+void count_VkAccelerationStructureInstanceKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAccelerationStructureInstanceKHR* toCount,
+    size_t* count);
+
+DEFINE_ALIAS_FUNCTION(count_VkAccelerationStructureInstanceKHR, count_VkAccelerationStructureInstanceNV);
+
+#endif
+#ifdef VK_NV_representative_fragment_test
+void count_VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV* toCount,
+    size_t* count);
+
+void count_VkPipelineRepresentativeFragmentTestStateCreateInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineRepresentativeFragmentTestStateCreateInfoNV* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_EXT_filter_cubic
+void count_VkPhysicalDeviceImageViewImageFormatInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceImageViewImageFormatInfoEXT* toCount,
+    size_t* count);
+
+void count_VkFilterCubicImageViewImageFormatPropertiesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkFilterCubicImageViewImageFormatPropertiesEXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_QCOM_render_pass_shader_resolve
+#endif
+#ifdef VK_EXT_global_priority
+void count_VkDeviceQueueGlobalPriorityCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDeviceQueueGlobalPriorityCreateInfoEXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_EXT_external_memory_host
+void count_VkImportMemoryHostPointerInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImportMemoryHostPointerInfoEXT* toCount,
+    size_t* count);
+
+void count_VkMemoryHostPointerPropertiesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkMemoryHostPointerPropertiesEXT* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceExternalMemoryHostPropertiesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceExternalMemoryHostPropertiesEXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_AMD_buffer_marker
+#endif
+#ifdef VK_AMD_pipeline_compiler_control
+void count_VkPipelineCompilerControlCreateInfoAMD(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineCompilerControlCreateInfoAMD* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_EXT_calibrated_timestamps
+void count_VkCalibratedTimestampInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkCalibratedTimestampInfoEXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_AMD_shader_core_properties
+void count_VkPhysicalDeviceShaderCorePropertiesAMD(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderCorePropertiesAMD* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_AMD_memory_overallocation_behavior
+void count_VkDeviceMemoryOverallocationCreateInfoAMD(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDeviceMemoryOverallocationCreateInfoAMD* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_EXT_vertex_attribute_divisor
+void count_VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT* toCount,
+    size_t* count);
+
+void count_VkVertexInputBindingDivisorDescriptionEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkVertexInputBindingDivisorDescriptionEXT* toCount,
+    size_t* count);
+
+void count_VkPipelineVertexInputDivisorStateCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineVertexInputDivisorStateCreateInfoEXT* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_GGP_frame_token
+void count_VkPresentFrameTokenGGP(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPresentFrameTokenGGP* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_EXT_pipeline_creation_feedback
+void count_VkPipelineCreationFeedbackEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineCreationFeedbackEXT* toCount,
+    size_t* count);
+
+void count_VkPipelineCreationFeedbackCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineCreationFeedbackCreateInfoEXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_NV_shader_subgroup_partitioned
+#endif
+#ifdef VK_NV_compute_shader_derivatives
+void count_VkPhysicalDeviceComputeShaderDerivativesFeaturesNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceComputeShaderDerivativesFeaturesNV* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_NV_mesh_shader
+void count_VkPhysicalDeviceMeshShaderFeaturesNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMeshShaderFeaturesNV* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceMeshShaderPropertiesNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMeshShaderPropertiesNV* toCount,
+    size_t* count);
+
+void count_VkDrawMeshTasksIndirectCommandNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDrawMeshTasksIndirectCommandNV* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_NV_fragment_shader_barycentric
+void count_VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_NV_shader_image_footprint
+void count_VkPhysicalDeviceShaderImageFootprintFeaturesNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderImageFootprintFeaturesNV* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_NV_scissor_exclusive
+void count_VkPipelineViewportExclusiveScissorStateCreateInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineViewportExclusiveScissorStateCreateInfoNV* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceExclusiveScissorFeaturesNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceExclusiveScissorFeaturesNV* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_NV_device_diagnostic_checkpoints
+void count_VkQueueFamilyCheckpointPropertiesNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkQueueFamilyCheckpointPropertiesNV* toCount,
+    size_t* count);
+
+void count_VkCheckpointDataNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkCheckpointDataNV* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_INTEL_shader_integer_functions2
+void count_VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_INTEL_performance_query
+void count_VkPerformanceValueDataINTEL(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPerformanceValueDataINTEL* toCount,
+    size_t* count);
+
+void count_VkPerformanceValueINTEL(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPerformanceValueINTEL* toCount,
+    size_t* count);
+
+void count_VkInitializePerformanceApiInfoINTEL(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkInitializePerformanceApiInfoINTEL* toCount,
+    size_t* count);
+
+void count_VkQueryPoolPerformanceQueryCreateInfoINTEL(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkQueryPoolPerformanceQueryCreateInfoINTEL* toCount,
+    size_t* count);
+
+DEFINE_ALIAS_FUNCTION(count_VkQueryPoolPerformanceQueryCreateInfoINTEL, count_VkQueryPoolCreateInfoINTEL);
+
+void count_VkPerformanceMarkerInfoINTEL(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPerformanceMarkerInfoINTEL* toCount,
+    size_t* count);
+
+void count_VkPerformanceStreamMarkerInfoINTEL(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPerformanceStreamMarkerInfoINTEL* toCount,
+    size_t* count);
+
+void count_VkPerformanceOverrideInfoINTEL(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPerformanceOverrideInfoINTEL* toCount,
+    size_t* count);
+
+void count_VkPerformanceConfigurationAcquireInfoINTEL(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPerformanceConfigurationAcquireInfoINTEL* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_EXT_pci_bus_info
+void count_VkPhysicalDevicePCIBusInfoPropertiesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDevicePCIBusInfoPropertiesEXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_AMD_display_native_hdr
+void count_VkDisplayNativeHdrSurfaceCapabilitiesAMD(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDisplayNativeHdrSurfaceCapabilitiesAMD* toCount,
+    size_t* count);
+
+void count_VkSwapchainDisplayNativeHdrCreateInfoAMD(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSwapchainDisplayNativeHdrCreateInfoAMD* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_FUCHSIA_imagepipe_surface
+void count_VkImagePipeSurfaceCreateInfoFUCHSIA(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImagePipeSurfaceCreateInfoFUCHSIA* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_EXT_metal_surface
+void count_VkMetalSurfaceCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkMetalSurfaceCreateInfoEXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_EXT_fragment_density_map
+void count_VkPhysicalDeviceFragmentDensityMapFeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentDensityMapFeaturesEXT* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceFragmentDensityMapPropertiesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentDensityMapPropertiesEXT* toCount,
+    size_t* count);
+
+void count_VkRenderPassFragmentDensityMapCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkRenderPassFragmentDensityMapCreateInfoEXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_EXT_scalar_block_layout
+DEFINE_ALIAS_FUNCTION(count_VkPhysicalDeviceScalarBlockLayoutFeatures, count_VkPhysicalDeviceScalarBlockLayoutFeaturesEXT);
+
+#endif
+#ifdef VK_GOOGLE_hlsl_functionality1
+#endif
+#ifdef VK_GOOGLE_decorate_string
+#endif
+#ifdef VK_EXT_subgroup_size_control
+void count_VkPhysicalDeviceSubgroupSizeControlFeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSubgroupSizeControlFeaturesEXT* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceSubgroupSizeControlPropertiesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT* toCount,
+    size_t* count);
+
+void count_VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_AMD_shader_core_properties2
+void count_VkPhysicalDeviceShaderCoreProperties2AMD(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderCoreProperties2AMD* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_AMD_device_coherent_memory
+void count_VkPhysicalDeviceCoherentMemoryFeaturesAMD(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCoherentMemoryFeaturesAMD* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_EXT_shader_image_atomic_int64
+void count_VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_EXT_memory_budget
+void count_VkPhysicalDeviceMemoryBudgetPropertiesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMemoryBudgetPropertiesEXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_EXT_memory_priority
+void count_VkPhysicalDeviceMemoryPriorityFeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMemoryPriorityFeaturesEXT* toCount,
+    size_t* count);
+
+void count_VkMemoryPriorityAllocateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkMemoryPriorityAllocateInfoEXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_NV_dedicated_allocation_image_aliasing
+void count_VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_EXT_buffer_device_address
+void count_VkPhysicalDeviceBufferDeviceAddressFeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceBufferDeviceAddressFeaturesEXT* toCount,
+    size_t* count);
+
+DEFINE_ALIAS_FUNCTION(count_VkPhysicalDeviceBufferDeviceAddressFeaturesEXT, count_VkPhysicalDeviceBufferAddressFeaturesEXT);
+
+DEFINE_ALIAS_FUNCTION(count_VkBufferDeviceAddressInfo, count_VkBufferDeviceAddressInfoEXT);
+
+void count_VkBufferDeviceAddressCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkBufferDeviceAddressCreateInfoEXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_EXT_tooling_info
+void count_VkPhysicalDeviceToolPropertiesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceToolPropertiesEXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_EXT_separate_stencil_usage
+DEFINE_ALIAS_FUNCTION(count_VkImageStencilUsageCreateInfo, count_VkImageStencilUsageCreateInfoEXT);
+
+#endif
+#ifdef VK_EXT_validation_features
+void count_VkValidationFeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkValidationFeaturesEXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_NV_cooperative_matrix
+void count_VkCooperativeMatrixPropertiesNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkCooperativeMatrixPropertiesNV* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceCooperativeMatrixFeaturesNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCooperativeMatrixFeaturesNV* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceCooperativeMatrixPropertiesNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCooperativeMatrixPropertiesNV* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_NV_coverage_reduction_mode
+void count_VkPhysicalDeviceCoverageReductionModeFeaturesNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCoverageReductionModeFeaturesNV* toCount,
+    size_t* count);
+
+void count_VkPipelineCoverageReductionStateCreateInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineCoverageReductionStateCreateInfoNV* toCount,
+    size_t* count);
+
+void count_VkFramebufferMixedSamplesCombinationNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkFramebufferMixedSamplesCombinationNV* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_EXT_fragment_shader_interlock
+void count_VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_EXT_ycbcr_image_arrays
+void count_VkPhysicalDeviceYcbcrImageArraysFeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceYcbcrImageArraysFeaturesEXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_EXT_full_screen_exclusive
+void count_VkSurfaceFullScreenExclusiveInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSurfaceFullScreenExclusiveInfoEXT* toCount,
+    size_t* count);
+
+void count_VkSurfaceCapabilitiesFullScreenExclusiveEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSurfaceCapabilitiesFullScreenExclusiveEXT* toCount,
+    size_t* count);
+
+void count_VkSurfaceFullScreenExclusiveWin32InfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSurfaceFullScreenExclusiveWin32InfoEXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_EXT_headless_surface
+void count_VkHeadlessSurfaceCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkHeadlessSurfaceCreateInfoEXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_EXT_line_rasterization
+void count_VkPhysicalDeviceLineRasterizationFeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceLineRasterizationFeaturesEXT* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceLineRasterizationPropertiesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceLineRasterizationPropertiesEXT* toCount,
+    size_t* count);
+
+void count_VkPipelineRasterizationLineStateCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineRasterizationLineStateCreateInfoEXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_EXT_shader_atomic_float
+void count_VkPhysicalDeviceShaderAtomicFloatFeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderAtomicFloatFeaturesEXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_EXT_host_query_reset
+DEFINE_ALIAS_FUNCTION(count_VkPhysicalDeviceHostQueryResetFeatures, count_VkPhysicalDeviceHostQueryResetFeaturesEXT);
+
+#endif
+#ifdef VK_EXT_index_type_uint8
+void count_VkPhysicalDeviceIndexTypeUint8FeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceIndexTypeUint8FeaturesEXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_EXT_extended_dynamic_state
+void count_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceExtendedDynamicStateFeaturesEXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_EXT_shader_demote_to_helper_invocation
+void count_VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_NV_device_generated_commands
+void count_VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV* toCount,
+    size_t* count);
+
+void count_VkGraphicsShaderGroupCreateInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkGraphicsShaderGroupCreateInfoNV* toCount,
+    size_t* count);
+
+void count_VkGraphicsPipelineShaderGroupsCreateInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkGraphicsPipelineShaderGroupsCreateInfoNV* toCount,
+    size_t* count);
+
+void count_VkBindShaderGroupIndirectCommandNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkBindShaderGroupIndirectCommandNV* toCount,
+    size_t* count);
+
+void count_VkBindIndexBufferIndirectCommandNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkBindIndexBufferIndirectCommandNV* toCount,
+    size_t* count);
+
+void count_VkBindVertexBufferIndirectCommandNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkBindVertexBufferIndirectCommandNV* toCount,
+    size_t* count);
+
+void count_VkSetStateFlagsIndirectCommandNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSetStateFlagsIndirectCommandNV* toCount,
+    size_t* count);
+
+void count_VkIndirectCommandsStreamNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkIndirectCommandsStreamNV* toCount,
+    size_t* count);
+
+void count_VkIndirectCommandsLayoutTokenNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkIndirectCommandsLayoutTokenNV* toCount,
+    size_t* count);
+
+void count_VkIndirectCommandsLayoutCreateInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkIndirectCommandsLayoutCreateInfoNV* toCount,
+    size_t* count);
+
+void count_VkGeneratedCommandsInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkGeneratedCommandsInfoNV* toCount,
+    size_t* count);
+
+void count_VkGeneratedCommandsMemoryRequirementsInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkGeneratedCommandsMemoryRequirementsInfoNV* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_EXT_texel_buffer_alignment
+void count_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_QCOM_render_pass_transform
+void count_VkRenderPassTransformBeginInfoQCOM(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkRenderPassTransformBeginInfoQCOM* toCount,
+    size_t* count);
+
+void count_VkCommandBufferInheritanceRenderPassTransformInfoQCOM(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkCommandBufferInheritanceRenderPassTransformInfoQCOM* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_EXT_device_memory_report
+void count_VkPhysicalDeviceDeviceMemoryReportFeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDeviceMemoryReportFeaturesEXT* toCount,
+    size_t* count);
+
+void count_VkDeviceMemoryReportCallbackDataEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDeviceMemoryReportCallbackDataEXT* toCount,
+    size_t* count);
+
+void count_VkDeviceDeviceMemoryReportCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDeviceDeviceMemoryReportCreateInfoEXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_EXT_robustness2
+void count_VkPhysicalDeviceRobustness2FeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRobustness2FeaturesEXT* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceRobustness2PropertiesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRobustness2PropertiesEXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_EXT_custom_border_color
+void count_VkSamplerCustomBorderColorCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkSamplerCustomBorderColorCreateInfoEXT* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceCustomBorderColorPropertiesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCustomBorderColorPropertiesEXT* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceCustomBorderColorFeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCustomBorderColorFeaturesEXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_GOOGLE_user_type
+#endif
+#ifdef VK_EXT_private_data
+void count_VkPhysicalDevicePrivateDataFeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDevicePrivateDataFeaturesEXT* toCount,
+    size_t* count);
+
+void count_VkDevicePrivateDataCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDevicePrivateDataCreateInfoEXT* toCount,
+    size_t* count);
+
+void count_VkPrivateDataSlotCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPrivateDataSlotCreateInfoEXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_EXT_pipeline_creation_cache_control
+void count_VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_NV_device_diagnostics_config
+void count_VkPhysicalDeviceDiagnosticsConfigFeaturesNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDiagnosticsConfigFeaturesNV* toCount,
+    size_t* count);
+
+void count_VkDeviceDiagnosticsConfigCreateInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDeviceDiagnosticsConfigCreateInfoNV* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_QCOM_render_pass_store_ops
+#endif
+#ifdef VK_NV_fragment_shading_rate_enums
+void count_VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV* toCount,
+    size_t* count);
+
+void count_VkPipelineFragmentShadingRateEnumStateCreateInfoNV(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPipelineFragmentShadingRateEnumStateCreateInfoNV* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_EXT_fragment_density_map2
+void count_VkPhysicalDeviceFragmentDensityMap2FeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentDensityMap2FeaturesEXT* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceFragmentDensityMap2PropertiesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentDensityMap2PropertiesEXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_QCOM_rotated_copy_commands
+void count_VkCopyCommandTransformInfoQCOM(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkCopyCommandTransformInfoQCOM* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_EXT_image_robustness
+void count_VkPhysicalDeviceImageRobustnessFeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceImageRobustnessFeaturesEXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_EXT_4444_formats
+void count_VkPhysicalDevice4444FormatsFeaturesEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDevice4444FormatsFeaturesEXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_EXT_directfb_surface
+void count_VkDirectFBSurfaceCreateInfoEXT(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDirectFBSurfaceCreateInfoEXT* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_GOOGLE_gfxstream
+void count_VkImportColorBufferGOOGLE(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImportColorBufferGOOGLE* toCount,
+    size_t* count);
+
+void count_VkImportBufferGOOGLE(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImportBufferGOOGLE* toCount,
+    size_t* count);
+
+void count_VkImportPhysicalAddressGOOGLE(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkImportPhysicalAddressGOOGLE* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_KHR_acceleration_structure
+void count_VkDeviceOrHostAddressKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDeviceOrHostAddressKHR* toCount,
+    size_t* count);
+
+void count_VkDeviceOrHostAddressConstKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkDeviceOrHostAddressConstKHR* toCount,
+    size_t* count);
+
+void count_VkAccelerationStructureBuildRangeInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAccelerationStructureBuildRangeInfoKHR* toCount,
+    size_t* count);
+
+void count_VkAccelerationStructureGeometryTrianglesDataKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAccelerationStructureGeometryTrianglesDataKHR* toCount,
+    size_t* count);
+
+void count_VkAccelerationStructureGeometryAabbsDataKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAccelerationStructureGeometryAabbsDataKHR* toCount,
+    size_t* count);
+
+void count_VkAccelerationStructureGeometryInstancesDataKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAccelerationStructureGeometryInstancesDataKHR* toCount,
+    size_t* count);
+
+void count_VkAccelerationStructureGeometryDataKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAccelerationStructureGeometryDataKHR* toCount,
+    size_t* count);
+
+void count_VkAccelerationStructureGeometryKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAccelerationStructureGeometryKHR* toCount,
+    size_t* count);
+
+void count_VkAccelerationStructureBuildGeometryInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAccelerationStructureBuildGeometryInfoKHR* toCount,
+    size_t* count);
+
+void count_VkAccelerationStructureCreateInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAccelerationStructureCreateInfoKHR* toCount,
+    size_t* count);
+
+void count_VkWriteDescriptorSetAccelerationStructureKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkWriteDescriptorSetAccelerationStructureKHR* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceAccelerationStructureFeaturesKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceAccelerationStructureFeaturesKHR* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceAccelerationStructurePropertiesKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceAccelerationStructurePropertiesKHR* toCount,
+    size_t* count);
+
+void count_VkAccelerationStructureDeviceAddressInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAccelerationStructureDeviceAddressInfoKHR* toCount,
+    size_t* count);
+
+void count_VkAccelerationStructureVersionInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAccelerationStructureVersionInfoKHR* toCount,
+    size_t* count);
+
+void count_VkCopyAccelerationStructureToMemoryInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkCopyAccelerationStructureToMemoryInfoKHR* toCount,
+    size_t* count);
+
+void count_VkCopyMemoryToAccelerationStructureInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkCopyMemoryToAccelerationStructureInfoKHR* toCount,
+    size_t* count);
+
+void count_VkCopyAccelerationStructureInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkCopyAccelerationStructureInfoKHR* toCount,
+    size_t* count);
+
+void count_VkAccelerationStructureBuildSizesInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkAccelerationStructureBuildSizesInfoKHR* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_KHR_ray_tracing_pipeline
+void count_VkRayTracingShaderGroupCreateInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkRayTracingShaderGroupCreateInfoKHR* toCount,
+    size_t* count);
+
+void count_VkRayTracingPipelineInterfaceCreateInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkRayTracingPipelineInterfaceCreateInfoKHR* toCount,
+    size_t* count);
+
+void count_VkRayTracingPipelineCreateInfoKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkRayTracingPipelineCreateInfoKHR* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceRayTracingPipelineFeaturesKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRayTracingPipelineFeaturesKHR* toCount,
+    size_t* count);
+
+void count_VkPhysicalDeviceRayTracingPipelinePropertiesKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRayTracingPipelinePropertiesKHR* toCount,
+    size_t* count);
+
+void count_VkStridedDeviceAddressRegionKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkStridedDeviceAddressRegionKHR* toCount,
+    size_t* count);
+
+void count_VkTraceRaysIndirectCommandKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkTraceRaysIndirectCommandKHR* toCount,
+    size_t* count);
+
+#endif
+#ifdef VK_KHR_ray_query
+void count_VkPhysicalDeviceRayQueryFeaturesKHR(
+    uint32_t featureBits,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRayQueryFeaturesKHR* toCount,
+    size_t* count);
+
+#endif
+
+} // namespace goldfish_vk
diff --git a/system/vulkan_enc/goldfish_vk_deepcopy_guest.cpp b/system/vulkan_enc/goldfish_vk_deepcopy_guest.cpp
index e2d3294..a1be4fb 100644
--- a/system/vulkan_enc/goldfish_vk_deepcopy_guest.cpp
+++ b/system/vulkan_enc/goldfish_vk_deepcopy_guest.cpp
@@ -28,130 +28,422 @@
 #include "goldfish_vk_extension_structs_guest.h"
 #include "goldfish_vk_private_defs.h"
 
+#include "vk_util.h"
+
 
 namespace goldfish_vk {
 
 void deepcopy_extension_struct(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const void* structExtension,
     void* structExtension_out);
 
 #ifdef VK_VERSION_1_0
-void deepcopy_VkApplicationInfo(
-    Pool* pool,
-    const VkApplicationInfo* from,
-    VkApplicationInfo* to)
+void deepcopy_VkExtent2D(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkExtent2D* from,
+    VkExtent2D* to)
 {
-    (void)pool;
-    *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
-    to->pNext = nullptr;
-    if (pNext_size)
-    {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
-    }
-    to->pApplicationName = nullptr;
-    if (from->pApplicationName)
-    {
-        to->pApplicationName = pool->strDup(from->pApplicationName);
-    }
-    to->pEngineName = nullptr;
-    if (from->pEngineName)
-    {
-        to->pEngineName = pool->strDup(from->pEngineName);
-    }
-}
-
-void deepcopy_VkInstanceCreateInfo(
-    Pool* pool,
-    const VkInstanceCreateInfo* from,
-    VkInstanceCreateInfo* to)
-{
-    (void)pool;
-    *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
-    to->pNext = nullptr;
-    if (pNext_size)
-    {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
-    }
-    to->pApplicationInfo = nullptr;
-    if (from->pApplicationInfo)
-    {
-        to->pApplicationInfo = (VkApplicationInfo*)pool->alloc(sizeof(const VkApplicationInfo));
-        deepcopy_VkApplicationInfo(pool, from->pApplicationInfo, (VkApplicationInfo*)(to->pApplicationInfo));
-    }
-    to->ppEnabledLayerNames = nullptr;
-    if (from->ppEnabledLayerNames && from->enabledLayerCount)
-    {
-        to->ppEnabledLayerNames = pool->strDupArray(from->ppEnabledLayerNames, from->enabledLayerCount);
-    }
-    to->ppEnabledExtensionNames = nullptr;
-    if (from->ppEnabledExtensionNames && from->enabledExtensionCount)
-    {
-        to->ppEnabledExtensionNames = pool->strDupArray(from->ppEnabledExtensionNames, from->enabledExtensionCount);
-    }
-}
-
-void deepcopy_VkAllocationCallbacks(
-    Pool* pool,
-    const VkAllocationCallbacks* from,
-    VkAllocationCallbacks* to)
-{
-    (void)pool;
-    *to = *from;
-    to->pUserData = nullptr;
-    if (from->pUserData)
-    {
-        to->pUserData = (void*)pool->dupArray(from->pUserData, sizeof(uint8_t));
-    }
-}
-
-void deepcopy_VkPhysicalDeviceFeatures(
-    Pool* pool,
-    const VkPhysicalDeviceFeatures* from,
-    VkPhysicalDeviceFeatures* to)
-{
-    (void)pool;
-    *to = *from;
-}
-
-void deepcopy_VkFormatProperties(
-    Pool* pool,
-    const VkFormatProperties* from,
-    VkFormatProperties* to)
-{
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
 }
 
 void deepcopy_VkExtent3D(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkExtent3D* from,
     VkExtent3D* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+}
+
+void deepcopy_VkOffset2D(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkOffset2D* from,
+    VkOffset2D* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+}
+
+void deepcopy_VkOffset3D(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkOffset3D* from,
+    VkOffset3D* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+}
+
+void deepcopy_VkRect2D(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkRect2D* from,
+    VkRect2D* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    deepcopy_VkOffset2D(alloc, rootType, &from->offset, (VkOffset2D*)(&to->offset));
+    deepcopy_VkExtent2D(alloc, rootType, &from->extent, (VkExtent2D*)(&to->extent));
+}
+
+void deepcopy_VkBaseInStructure(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkBaseInStructure* from,
+    VkBaseInStructure* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const VkBaseInStructure*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkBaseOutStructure(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkBaseOutStructure* from,
+    VkBaseOutStructure* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (VkBaseOutStructure*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkBufferMemoryBarrier(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkBufferMemoryBarrier* from,
+    VkBufferMemoryBarrier* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkDispatchIndirectCommand(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDispatchIndirectCommand* from,
+    VkDispatchIndirectCommand* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+}
+
+void deepcopy_VkDrawIndexedIndirectCommand(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDrawIndexedIndirectCommand* from,
+    VkDrawIndexedIndirectCommand* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+}
+
+void deepcopy_VkDrawIndirectCommand(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDrawIndirectCommand* from,
+    VkDrawIndirectCommand* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+}
+
+void deepcopy_VkImageSubresourceRange(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkImageSubresourceRange* from,
+    VkImageSubresourceRange* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+}
+
+void deepcopy_VkImageMemoryBarrier(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkImageMemoryBarrier* from,
+    VkImageMemoryBarrier* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    deepcopy_VkImageSubresourceRange(alloc, rootType, &from->subresourceRange, (VkImageSubresourceRange*)(&to->subresourceRange));
+}
+
+void deepcopy_VkMemoryBarrier(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkMemoryBarrier* from,
+    VkMemoryBarrier* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkAllocationCallbacks(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkAllocationCallbacks* from,
+    VkAllocationCallbacks* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    to->pUserData = nullptr;
+    if (from->pUserData)
+    {
+        to->pUserData = (void*)alloc->dupArray(from->pUserData, sizeof(uint8_t));
+    }
+}
+
+void deepcopy_VkApplicationInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkApplicationInfo* from,
+    VkApplicationInfo* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    to->pApplicationName = nullptr;
+    if (from->pApplicationName)
+    {
+        to->pApplicationName = alloc->strDup(from->pApplicationName);
+    }
+    to->pEngineName = nullptr;
+    if (from->pEngineName)
+    {
+        to->pEngineName = alloc->strDup(from->pEngineName);
+    }
+}
+
+void deepcopy_VkFormatProperties(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkFormatProperties* from,
+    VkFormatProperties* to)
+{
+    (void)alloc;
+    (void)rootType;
     *to = *from;
 }
 
 void deepcopy_VkImageFormatProperties(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkImageFormatProperties* from,
     VkImageFormatProperties* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    deepcopy_VkExtent3D(pool, &from->maxExtent, (VkExtent3D*)(&to->maxExtent));
+    deepcopy_VkExtent3D(alloc, rootType, &from->maxExtent, (VkExtent3D*)(&to->maxExtent));
+}
+
+void deepcopy_VkInstanceCreateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkInstanceCreateInfo* from,
+    VkInstanceCreateInfo* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    to->pApplicationInfo = nullptr;
+    if (from->pApplicationInfo)
+    {
+        to->pApplicationInfo = (VkApplicationInfo*)alloc->alloc(sizeof(const VkApplicationInfo));
+        deepcopy_VkApplicationInfo(alloc, rootType, from->pApplicationInfo, (VkApplicationInfo*)(to->pApplicationInfo));
+    }
+    to->ppEnabledLayerNames = nullptr;
+    if (from->ppEnabledLayerNames && from->enabledLayerCount)
+    {
+        to->ppEnabledLayerNames = alloc->strDupArray(from->ppEnabledLayerNames, from->enabledLayerCount);
+    }
+    to->ppEnabledExtensionNames = nullptr;
+    if (from->ppEnabledExtensionNames && from->enabledExtensionCount)
+    {
+        to->ppEnabledExtensionNames = alloc->strDupArray(from->ppEnabledExtensionNames, from->enabledExtensionCount);
+    }
+}
+
+void deepcopy_VkMemoryHeap(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkMemoryHeap* from,
+    VkMemoryHeap* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+}
+
+void deepcopy_VkMemoryType(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkMemoryType* from,
+    VkMemoryType* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+}
+
+void deepcopy_VkPhysicalDeviceFeatures(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFeatures* from,
+    VkPhysicalDeviceFeatures* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
 }
 
 void deepcopy_VkPhysicalDeviceLimits(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceLimits* from,
     VkPhysicalDeviceLimits* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
     memcpy(to->maxComputeWorkGroupCount, from->maxComputeWorkGroupCount, 3 * sizeof(uint32_t));
     memcpy(to->maxComputeWorkGroupSize, from->maxComputeWorkGroupSize, 3 * sizeof(uint32_t));
@@ -161,1564 +453,2107 @@
     memcpy(to->lineWidthRange, from->lineWidthRange, 2 * sizeof(float));
 }
 
+void deepcopy_VkPhysicalDeviceMemoryProperties(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMemoryProperties* from,
+    VkPhysicalDeviceMemoryProperties* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    for (uint32_t i = 0; i < (uint32_t)VK_MAX_MEMORY_TYPES; ++i)
+    {
+        deepcopy_VkMemoryType(alloc, rootType, from->memoryTypes + i, (VkMemoryType*)(to->memoryTypes + i));
+    }
+    for (uint32_t i = 0; i < (uint32_t)VK_MAX_MEMORY_HEAPS; ++i)
+    {
+        deepcopy_VkMemoryHeap(alloc, rootType, from->memoryHeaps + i, (VkMemoryHeap*)(to->memoryHeaps + i));
+    }
+}
+
 void deepcopy_VkPhysicalDeviceSparseProperties(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceSparseProperties* from,
     VkPhysicalDeviceSparseProperties* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
 }
 
 void deepcopy_VkPhysicalDeviceProperties(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceProperties* from,
     VkPhysicalDeviceProperties* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
     memcpy(to->deviceName, from->deviceName, VK_MAX_PHYSICAL_DEVICE_NAME_SIZE * sizeof(char));
     memcpy(to->pipelineCacheUUID, from->pipelineCacheUUID, VK_UUID_SIZE * sizeof(uint8_t));
-    deepcopy_VkPhysicalDeviceLimits(pool, &from->limits, (VkPhysicalDeviceLimits*)(&to->limits));
-    deepcopy_VkPhysicalDeviceSparseProperties(pool, &from->sparseProperties, (VkPhysicalDeviceSparseProperties*)(&to->sparseProperties));
+    deepcopy_VkPhysicalDeviceLimits(alloc, rootType, &from->limits, (VkPhysicalDeviceLimits*)(&to->limits));
+    deepcopy_VkPhysicalDeviceSparseProperties(alloc, rootType, &from->sparseProperties, (VkPhysicalDeviceSparseProperties*)(&to->sparseProperties));
 }
 
 void deepcopy_VkQueueFamilyProperties(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkQueueFamilyProperties* from,
     VkQueueFamilyProperties* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    deepcopy_VkExtent3D(pool, &from->minImageTransferGranularity, (VkExtent3D*)(&to->minImageTransferGranularity));
-}
-
-void deepcopy_VkMemoryType(
-    Pool* pool,
-    const VkMemoryType* from,
-    VkMemoryType* to)
-{
-    (void)pool;
-    *to = *from;
-}
-
-void deepcopy_VkMemoryHeap(
-    Pool* pool,
-    const VkMemoryHeap* from,
-    VkMemoryHeap* to)
-{
-    (void)pool;
-    *to = *from;
-}
-
-void deepcopy_VkPhysicalDeviceMemoryProperties(
-    Pool* pool,
-    const VkPhysicalDeviceMemoryProperties* from,
-    VkPhysicalDeviceMemoryProperties* to)
-{
-    (void)pool;
-    *to = *from;
-    for (uint32_t i = 0; i < (uint32_t)VK_MAX_MEMORY_TYPES; ++i)
-    {
-        deepcopy_VkMemoryType(pool, from->memoryTypes + i, (VkMemoryType*)(to->memoryTypes + i));
-    }
-    for (uint32_t i = 0; i < (uint32_t)VK_MAX_MEMORY_HEAPS; ++i)
-    {
-        deepcopy_VkMemoryHeap(pool, from->memoryHeaps + i, (VkMemoryHeap*)(to->memoryHeaps + i));
-    }
+    deepcopy_VkExtent3D(alloc, rootType, &from->minImageTransferGranularity, (VkExtent3D*)(&to->minImageTransferGranularity));
 }
 
 void deepcopy_VkDeviceQueueCreateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDeviceQueueCreateInfo* from,
     VkDeviceQueueCreateInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
     to->pQueuePriorities = nullptr;
     if (from->pQueuePriorities)
     {
-        to->pQueuePriorities = (float*)pool->dupArray(from->pQueuePriorities, from->queueCount * sizeof(const float));
+        to->pQueuePriorities = (float*)alloc->dupArray(from->pQueuePriorities, from->queueCount * sizeof(const float));
     }
 }
 
 void deepcopy_VkDeviceCreateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDeviceCreateInfo* from,
     VkDeviceCreateInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
-    to->pQueueCreateInfos = nullptr;
-    if (from->pQueueCreateInfos)
+    if (from)
     {
-        to->pQueueCreateInfos = (VkDeviceQueueCreateInfo*)pool->alloc(from->queueCreateInfoCount * sizeof(const VkDeviceQueueCreateInfo));
-        to->queueCreateInfoCount = from->queueCreateInfoCount;
-        for (uint32_t i = 0; i < (uint32_t)from->queueCreateInfoCount; ++i)
+        to->pQueueCreateInfos = nullptr;
+        if (from->pQueueCreateInfos)
         {
-            deepcopy_VkDeviceQueueCreateInfo(pool, from->pQueueCreateInfos + i, (VkDeviceQueueCreateInfo*)(to->pQueueCreateInfos + i));
+            to->pQueueCreateInfos = (VkDeviceQueueCreateInfo*)alloc->alloc(from->queueCreateInfoCount * sizeof(const VkDeviceQueueCreateInfo));
+            to->queueCreateInfoCount = from->queueCreateInfoCount;
+            for (uint32_t i = 0; i < (uint32_t)from->queueCreateInfoCount; ++i)
+            {
+                deepcopy_VkDeviceQueueCreateInfo(alloc, rootType, from->pQueueCreateInfos + i, (VkDeviceQueueCreateInfo*)(to->pQueueCreateInfos + i));
+            }
         }
     }
     to->ppEnabledLayerNames = nullptr;
     if (from->ppEnabledLayerNames && from->enabledLayerCount)
     {
-        to->ppEnabledLayerNames = pool->strDupArray(from->ppEnabledLayerNames, from->enabledLayerCount);
+        to->ppEnabledLayerNames = alloc->strDupArray(from->ppEnabledLayerNames, from->enabledLayerCount);
     }
     to->ppEnabledExtensionNames = nullptr;
     if (from->ppEnabledExtensionNames && from->enabledExtensionCount)
     {
-        to->ppEnabledExtensionNames = pool->strDupArray(from->ppEnabledExtensionNames, from->enabledExtensionCount);
+        to->ppEnabledExtensionNames = alloc->strDupArray(from->ppEnabledExtensionNames, from->enabledExtensionCount);
     }
     to->pEnabledFeatures = nullptr;
     if (from->pEnabledFeatures)
     {
-        to->pEnabledFeatures = (VkPhysicalDeviceFeatures*)pool->alloc(sizeof(const VkPhysicalDeviceFeatures));
-        deepcopy_VkPhysicalDeviceFeatures(pool, from->pEnabledFeatures, (VkPhysicalDeviceFeatures*)(to->pEnabledFeatures));
+        to->pEnabledFeatures = (VkPhysicalDeviceFeatures*)alloc->alloc(sizeof(const VkPhysicalDeviceFeatures));
+        deepcopy_VkPhysicalDeviceFeatures(alloc, rootType, from->pEnabledFeatures, (VkPhysicalDeviceFeatures*)(to->pEnabledFeatures));
     }
 }
 
 void deepcopy_VkExtensionProperties(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkExtensionProperties* from,
     VkExtensionProperties* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
     memcpy(to->extensionName, from->extensionName, VK_MAX_EXTENSION_NAME_SIZE * sizeof(char));
 }
 
 void deepcopy_VkLayerProperties(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkLayerProperties* from,
     VkLayerProperties* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
     memcpy(to->layerName, from->layerName, VK_MAX_EXTENSION_NAME_SIZE * sizeof(char));
     memcpy(to->description, from->description, VK_MAX_DESCRIPTION_SIZE * sizeof(char));
 }
 
 void deepcopy_VkSubmitInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkSubmitInfo* from,
     VkSubmitInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
     to->pWaitSemaphores = nullptr;
     if (from->pWaitSemaphores)
     {
-        to->pWaitSemaphores = (VkSemaphore*)pool->dupArray(from->pWaitSemaphores, from->waitSemaphoreCount * sizeof(const VkSemaphore));
+        to->pWaitSemaphores = (VkSemaphore*)alloc->dupArray(from->pWaitSemaphores, from->waitSemaphoreCount * sizeof(const VkSemaphore));
     }
     to->pWaitDstStageMask = nullptr;
     if (from->pWaitDstStageMask)
     {
-        to->pWaitDstStageMask = (VkPipelineStageFlags*)pool->dupArray(from->pWaitDstStageMask, from->waitSemaphoreCount * sizeof(const VkPipelineStageFlags));
+        to->pWaitDstStageMask = (VkPipelineStageFlags*)alloc->dupArray(from->pWaitDstStageMask, from->waitSemaphoreCount * sizeof(const VkPipelineStageFlags));
     }
     to->pCommandBuffers = nullptr;
     if (from->pCommandBuffers)
     {
-        to->pCommandBuffers = (VkCommandBuffer*)pool->dupArray(from->pCommandBuffers, from->commandBufferCount * sizeof(const VkCommandBuffer));
+        to->pCommandBuffers = (VkCommandBuffer*)alloc->dupArray(from->pCommandBuffers, from->commandBufferCount * sizeof(const VkCommandBuffer));
     }
     to->pSignalSemaphores = nullptr;
     if (from->pSignalSemaphores)
     {
-        to->pSignalSemaphores = (VkSemaphore*)pool->dupArray(from->pSignalSemaphores, from->signalSemaphoreCount * sizeof(const VkSemaphore));
-    }
-}
-
-void deepcopy_VkMemoryAllocateInfo(
-    Pool* pool,
-    const VkMemoryAllocateInfo* from,
-    VkMemoryAllocateInfo* to)
-{
-    (void)pool;
-    *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
-    to->pNext = nullptr;
-    if (pNext_size)
-    {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pSignalSemaphores = (VkSemaphore*)alloc->dupArray(from->pSignalSemaphores, from->signalSemaphoreCount * sizeof(const VkSemaphore));
     }
 }
 
 void deepcopy_VkMappedMemoryRange(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkMappedMemoryRange* from,
     VkMappedMemoryRange* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkMemoryAllocateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkMemoryAllocateInfo* from,
+    VkMemoryAllocateInfo* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkMemoryRequirements(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkMemoryRequirements* from,
     VkMemoryRequirements* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
 }
 
-void deepcopy_VkSparseImageFormatProperties(
-    Pool* pool,
-    const VkSparseImageFormatProperties* from,
-    VkSparseImageFormatProperties* to)
-{
-    (void)pool;
-    *to = *from;
-    deepcopy_VkExtent3D(pool, &from->imageGranularity, (VkExtent3D*)(&to->imageGranularity));
-}
-
-void deepcopy_VkSparseImageMemoryRequirements(
-    Pool* pool,
-    const VkSparseImageMemoryRequirements* from,
-    VkSparseImageMemoryRequirements* to)
-{
-    (void)pool;
-    *to = *from;
-    deepcopy_VkSparseImageFormatProperties(pool, &from->formatProperties, (VkSparseImageFormatProperties*)(&to->formatProperties));
-}
-
 void deepcopy_VkSparseMemoryBind(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkSparseMemoryBind* from,
     VkSparseMemoryBind* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
 }
 
 void deepcopy_VkSparseBufferMemoryBindInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkSparseBufferMemoryBindInfo* from,
     VkSparseBufferMemoryBindInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    to->pBinds = nullptr;
-    if (from->pBinds)
+    if (from)
     {
-        to->pBinds = (VkSparseMemoryBind*)pool->alloc(from->bindCount * sizeof(const VkSparseMemoryBind));
-        to->bindCount = from->bindCount;
-        for (uint32_t i = 0; i < (uint32_t)from->bindCount; ++i)
+        to->pBinds = nullptr;
+        if (from->pBinds)
         {
-            deepcopy_VkSparseMemoryBind(pool, from->pBinds + i, (VkSparseMemoryBind*)(to->pBinds + i));
+            to->pBinds = (VkSparseMemoryBind*)alloc->alloc(from->bindCount * sizeof(const VkSparseMemoryBind));
+            to->bindCount = from->bindCount;
+            for (uint32_t i = 0; i < (uint32_t)from->bindCount; ++i)
+            {
+                deepcopy_VkSparseMemoryBind(alloc, rootType, from->pBinds + i, (VkSparseMemoryBind*)(to->pBinds + i));
+            }
         }
     }
 }
 
 void deepcopy_VkSparseImageOpaqueMemoryBindInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkSparseImageOpaqueMemoryBindInfo* from,
     VkSparseImageOpaqueMemoryBindInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    to->pBinds = nullptr;
-    if (from->pBinds)
+    if (from)
     {
-        to->pBinds = (VkSparseMemoryBind*)pool->alloc(from->bindCount * sizeof(const VkSparseMemoryBind));
-        to->bindCount = from->bindCount;
-        for (uint32_t i = 0; i < (uint32_t)from->bindCount; ++i)
+        to->pBinds = nullptr;
+        if (from->pBinds)
         {
-            deepcopy_VkSparseMemoryBind(pool, from->pBinds + i, (VkSparseMemoryBind*)(to->pBinds + i));
+            to->pBinds = (VkSparseMemoryBind*)alloc->alloc(from->bindCount * sizeof(const VkSparseMemoryBind));
+            to->bindCount = from->bindCount;
+            for (uint32_t i = 0; i < (uint32_t)from->bindCount; ++i)
+            {
+                deepcopy_VkSparseMemoryBind(alloc, rootType, from->pBinds + i, (VkSparseMemoryBind*)(to->pBinds + i));
+            }
         }
     }
 }
 
 void deepcopy_VkImageSubresource(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkImageSubresource* from,
     VkImageSubresource* to)
 {
-    (void)pool;
-    *to = *from;
-}
-
-void deepcopy_VkOffset3D(
-    Pool* pool,
-    const VkOffset3D* from,
-    VkOffset3D* to)
-{
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
 }
 
 void deepcopy_VkSparseImageMemoryBind(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkSparseImageMemoryBind* from,
     VkSparseImageMemoryBind* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    deepcopy_VkImageSubresource(pool, &from->subresource, (VkImageSubresource*)(&to->subresource));
-    deepcopy_VkOffset3D(pool, &from->offset, (VkOffset3D*)(&to->offset));
-    deepcopy_VkExtent3D(pool, &from->extent, (VkExtent3D*)(&to->extent));
+    deepcopy_VkImageSubresource(alloc, rootType, &from->subresource, (VkImageSubresource*)(&to->subresource));
+    deepcopy_VkOffset3D(alloc, rootType, &from->offset, (VkOffset3D*)(&to->offset));
+    deepcopy_VkExtent3D(alloc, rootType, &from->extent, (VkExtent3D*)(&to->extent));
 }
 
 void deepcopy_VkSparseImageMemoryBindInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkSparseImageMemoryBindInfo* from,
     VkSparseImageMemoryBindInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    to->pBinds = nullptr;
-    if (from->pBinds)
+    if (from)
     {
-        to->pBinds = (VkSparseImageMemoryBind*)pool->alloc(from->bindCount * sizeof(const VkSparseImageMemoryBind));
-        to->bindCount = from->bindCount;
-        for (uint32_t i = 0; i < (uint32_t)from->bindCount; ++i)
+        to->pBinds = nullptr;
+        if (from->pBinds)
         {
-            deepcopy_VkSparseImageMemoryBind(pool, from->pBinds + i, (VkSparseImageMemoryBind*)(to->pBinds + i));
+            to->pBinds = (VkSparseImageMemoryBind*)alloc->alloc(from->bindCount * sizeof(const VkSparseImageMemoryBind));
+            to->bindCount = from->bindCount;
+            for (uint32_t i = 0; i < (uint32_t)from->bindCount; ++i)
+            {
+                deepcopy_VkSparseImageMemoryBind(alloc, rootType, from->pBinds + i, (VkSparseImageMemoryBind*)(to->pBinds + i));
+            }
         }
     }
 }
 
 void deepcopy_VkBindSparseInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkBindSparseInfo* from,
     VkBindSparseInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
     to->pWaitSemaphores = nullptr;
     if (from->pWaitSemaphores)
     {
-        to->pWaitSemaphores = (VkSemaphore*)pool->dupArray(from->pWaitSemaphores, from->waitSemaphoreCount * sizeof(const VkSemaphore));
+        to->pWaitSemaphores = (VkSemaphore*)alloc->dupArray(from->pWaitSemaphores, from->waitSemaphoreCount * sizeof(const VkSemaphore));
     }
-    to->pBufferBinds = nullptr;
-    if (from->pBufferBinds)
+    if (from)
     {
-        to->pBufferBinds = (VkSparseBufferMemoryBindInfo*)pool->alloc(from->bufferBindCount * sizeof(const VkSparseBufferMemoryBindInfo));
-        to->bufferBindCount = from->bufferBindCount;
-        for (uint32_t i = 0; i < (uint32_t)from->bufferBindCount; ++i)
+        to->pBufferBinds = nullptr;
+        if (from->pBufferBinds)
         {
-            deepcopy_VkSparseBufferMemoryBindInfo(pool, from->pBufferBinds + i, (VkSparseBufferMemoryBindInfo*)(to->pBufferBinds + i));
+            to->pBufferBinds = (VkSparseBufferMemoryBindInfo*)alloc->alloc(from->bufferBindCount * sizeof(const VkSparseBufferMemoryBindInfo));
+            to->bufferBindCount = from->bufferBindCount;
+            for (uint32_t i = 0; i < (uint32_t)from->bufferBindCount; ++i)
+            {
+                deepcopy_VkSparseBufferMemoryBindInfo(alloc, rootType, from->pBufferBinds + i, (VkSparseBufferMemoryBindInfo*)(to->pBufferBinds + i));
+            }
         }
     }
-    to->pImageOpaqueBinds = nullptr;
-    if (from->pImageOpaqueBinds)
+    if (from)
     {
-        to->pImageOpaqueBinds = (VkSparseImageOpaqueMemoryBindInfo*)pool->alloc(from->imageOpaqueBindCount * sizeof(const VkSparseImageOpaqueMemoryBindInfo));
-        to->imageOpaqueBindCount = from->imageOpaqueBindCount;
-        for (uint32_t i = 0; i < (uint32_t)from->imageOpaqueBindCount; ++i)
+        to->pImageOpaqueBinds = nullptr;
+        if (from->pImageOpaqueBinds)
         {
-            deepcopy_VkSparseImageOpaqueMemoryBindInfo(pool, from->pImageOpaqueBinds + i, (VkSparseImageOpaqueMemoryBindInfo*)(to->pImageOpaqueBinds + i));
+            to->pImageOpaqueBinds = (VkSparseImageOpaqueMemoryBindInfo*)alloc->alloc(from->imageOpaqueBindCount * sizeof(const VkSparseImageOpaqueMemoryBindInfo));
+            to->imageOpaqueBindCount = from->imageOpaqueBindCount;
+            for (uint32_t i = 0; i < (uint32_t)from->imageOpaqueBindCount; ++i)
+            {
+                deepcopy_VkSparseImageOpaqueMemoryBindInfo(alloc, rootType, from->pImageOpaqueBinds + i, (VkSparseImageOpaqueMemoryBindInfo*)(to->pImageOpaqueBinds + i));
+            }
         }
     }
-    to->pImageBinds = nullptr;
-    if (from->pImageBinds)
+    if (from)
     {
-        to->pImageBinds = (VkSparseImageMemoryBindInfo*)pool->alloc(from->imageBindCount * sizeof(const VkSparseImageMemoryBindInfo));
-        to->imageBindCount = from->imageBindCount;
-        for (uint32_t i = 0; i < (uint32_t)from->imageBindCount; ++i)
+        to->pImageBinds = nullptr;
+        if (from->pImageBinds)
         {
-            deepcopy_VkSparseImageMemoryBindInfo(pool, from->pImageBinds + i, (VkSparseImageMemoryBindInfo*)(to->pImageBinds + i));
+            to->pImageBinds = (VkSparseImageMemoryBindInfo*)alloc->alloc(from->imageBindCount * sizeof(const VkSparseImageMemoryBindInfo));
+            to->imageBindCount = from->imageBindCount;
+            for (uint32_t i = 0; i < (uint32_t)from->imageBindCount; ++i)
+            {
+                deepcopy_VkSparseImageMemoryBindInfo(alloc, rootType, from->pImageBinds + i, (VkSparseImageMemoryBindInfo*)(to->pImageBinds + i));
+            }
         }
     }
     to->pSignalSemaphores = nullptr;
     if (from->pSignalSemaphores)
     {
-        to->pSignalSemaphores = (VkSemaphore*)pool->dupArray(from->pSignalSemaphores, from->signalSemaphoreCount * sizeof(const VkSemaphore));
+        to->pSignalSemaphores = (VkSemaphore*)alloc->dupArray(from->pSignalSemaphores, from->signalSemaphoreCount * sizeof(const VkSemaphore));
     }
 }
 
+void deepcopy_VkSparseImageFormatProperties(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkSparseImageFormatProperties* from,
+    VkSparseImageFormatProperties* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    deepcopy_VkExtent3D(alloc, rootType, &from->imageGranularity, (VkExtent3D*)(&to->imageGranularity));
+}
+
+void deepcopy_VkSparseImageMemoryRequirements(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkSparseImageMemoryRequirements* from,
+    VkSparseImageMemoryRequirements* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    deepcopy_VkSparseImageFormatProperties(alloc, rootType, &from->formatProperties, (VkSparseImageFormatProperties*)(&to->formatProperties));
+}
+
 void deepcopy_VkFenceCreateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkFenceCreateInfo* from,
     VkFenceCreateInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkSemaphoreCreateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkSemaphoreCreateInfo* from,
     VkSemaphoreCreateInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkEventCreateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkEventCreateInfo* from,
     VkEventCreateInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkQueryPoolCreateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkQueryPoolCreateInfo* from,
     VkQueryPoolCreateInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkBufferCreateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkBufferCreateInfo* from,
     VkBufferCreateInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
     to->pQueueFamilyIndices = nullptr;
     if (from->pQueueFamilyIndices)
     {
-        to->pQueueFamilyIndices = (uint32_t*)pool->dupArray(from->pQueueFamilyIndices, from->queueFamilyIndexCount * sizeof(const uint32_t));
+        to->pQueueFamilyIndices = (uint32_t*)alloc->dupArray(from->pQueueFamilyIndices, from->queueFamilyIndexCount * sizeof(const uint32_t));
     }
 }
 
 void deepcopy_VkBufferViewCreateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkBufferViewCreateInfo* from,
     VkBufferViewCreateInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkImageCreateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkImageCreateInfo* from,
     VkImageCreateInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
-    deepcopy_VkExtent3D(pool, &from->extent, (VkExtent3D*)(&to->extent));
+    deepcopy_VkExtent3D(alloc, rootType, &from->extent, (VkExtent3D*)(&to->extent));
     to->pQueueFamilyIndices = nullptr;
     if (from->pQueueFamilyIndices)
     {
-        to->pQueueFamilyIndices = (uint32_t*)pool->dupArray(from->pQueueFamilyIndices, from->queueFamilyIndexCount * sizeof(const uint32_t));
+        to->pQueueFamilyIndices = (uint32_t*)alloc->dupArray(from->pQueueFamilyIndices, from->queueFamilyIndexCount * sizeof(const uint32_t));
     }
 }
 
 void deepcopy_VkSubresourceLayout(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkSubresourceLayout* from,
     VkSubresourceLayout* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
 }
 
 void deepcopy_VkComponentMapping(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkComponentMapping* from,
     VkComponentMapping* to)
 {
-    (void)pool;
-    *to = *from;
-}
-
-void deepcopy_VkImageSubresourceRange(
-    Pool* pool,
-    const VkImageSubresourceRange* from,
-    VkImageSubresourceRange* to)
-{
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
 }
 
 void deepcopy_VkImageViewCreateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkImageViewCreateInfo* from,
     VkImageViewCreateInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
-    deepcopy_VkComponentMapping(pool, &from->components, (VkComponentMapping*)(&to->components));
-    deepcopy_VkImageSubresourceRange(pool, &from->subresourceRange, (VkImageSubresourceRange*)(&to->subresourceRange));
+    deepcopy_VkComponentMapping(alloc, rootType, &from->components, (VkComponentMapping*)(&to->components));
+    deepcopy_VkImageSubresourceRange(alloc, rootType, &from->subresourceRange, (VkImageSubresourceRange*)(&to->subresourceRange));
 }
 
 void deepcopy_VkShaderModuleCreateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkShaderModuleCreateInfo* from,
     VkShaderModuleCreateInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
     to->pCode = nullptr;
     if (from->pCode)
     {
-        to->pCode = (uint32_t*)pool->dupArray(from->pCode, (from->codeSize / 4) * sizeof(const uint32_t));
+        to->pCode = (uint32_t*)alloc->dupArray(from->pCode, (from->codeSize / 4) * sizeof(const uint32_t));
     }
 }
 
 void deepcopy_VkPipelineCacheCreateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPipelineCacheCreateInfo* from,
     VkPipelineCacheCreateInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
     to->pInitialData = nullptr;
     if (from->pInitialData)
     {
-        to->pInitialData = (void*)pool->dupArray(from->pInitialData, from->initialDataSize * sizeof(const uint8_t));
+        to->pInitialData = (void*)alloc->dupArray(from->pInitialData, from->initialDataSize * sizeof(const uint8_t));
     }
 }
 
 void deepcopy_VkSpecializationMapEntry(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkSpecializationMapEntry* from,
     VkSpecializationMapEntry* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
 }
 
 void deepcopy_VkSpecializationInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkSpecializationInfo* from,
     VkSpecializationInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    to->pMapEntries = nullptr;
-    if (from->pMapEntries)
+    if (from)
     {
-        to->pMapEntries = (VkSpecializationMapEntry*)pool->alloc(from->mapEntryCount * sizeof(const VkSpecializationMapEntry));
-        to->mapEntryCount = from->mapEntryCount;
-        for (uint32_t i = 0; i < (uint32_t)from->mapEntryCount; ++i)
+        to->pMapEntries = nullptr;
+        if (from->pMapEntries)
         {
-            deepcopy_VkSpecializationMapEntry(pool, from->pMapEntries + i, (VkSpecializationMapEntry*)(to->pMapEntries + i));
+            to->pMapEntries = (VkSpecializationMapEntry*)alloc->alloc(from->mapEntryCount * sizeof(const VkSpecializationMapEntry));
+            to->mapEntryCount = from->mapEntryCount;
+            for (uint32_t i = 0; i < (uint32_t)from->mapEntryCount; ++i)
+            {
+                deepcopy_VkSpecializationMapEntry(alloc, rootType, from->pMapEntries + i, (VkSpecializationMapEntry*)(to->pMapEntries + i));
+            }
         }
     }
     to->pData = nullptr;
     if (from->pData)
     {
-        to->pData = (void*)pool->dupArray(from->pData, from->dataSize * sizeof(const uint8_t));
+        to->pData = (void*)alloc->dupArray(from->pData, from->dataSize * sizeof(const uint8_t));
     }
 }
 
 void deepcopy_VkPipelineShaderStageCreateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPipelineShaderStageCreateInfo* from,
     VkPipelineShaderStageCreateInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
     to->pName = nullptr;
     if (from->pName)
     {
-        to->pName = pool->strDup(from->pName);
+        to->pName = alloc->strDup(from->pName);
     }
     to->pSpecializationInfo = nullptr;
     if (from->pSpecializationInfo)
     {
-        to->pSpecializationInfo = (VkSpecializationInfo*)pool->alloc(sizeof(const VkSpecializationInfo));
-        deepcopy_VkSpecializationInfo(pool, from->pSpecializationInfo, (VkSpecializationInfo*)(to->pSpecializationInfo));
+        to->pSpecializationInfo = (VkSpecializationInfo*)alloc->alloc(sizeof(const VkSpecializationInfo));
+        deepcopy_VkSpecializationInfo(alloc, rootType, from->pSpecializationInfo, (VkSpecializationInfo*)(to->pSpecializationInfo));
     }
 }
 
+void deepcopy_VkComputePipelineCreateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkComputePipelineCreateInfo* from,
+    VkComputePipelineCreateInfo* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    deepcopy_VkPipelineShaderStageCreateInfo(alloc, rootType, &from->stage, (VkPipelineShaderStageCreateInfo*)(&to->stage));
+}
+
 void deepcopy_VkVertexInputBindingDescription(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkVertexInputBindingDescription* from,
     VkVertexInputBindingDescription* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
 }
 
 void deepcopy_VkVertexInputAttributeDescription(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkVertexInputAttributeDescription* from,
     VkVertexInputAttributeDescription* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
 }
 
 void deepcopy_VkPipelineVertexInputStateCreateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPipelineVertexInputStateCreateInfo* from,
     VkPipelineVertexInputStateCreateInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
-    to->pVertexBindingDescriptions = nullptr;
-    if (from->pVertexBindingDescriptions)
+    if (from)
     {
-        to->pVertexBindingDescriptions = (VkVertexInputBindingDescription*)pool->alloc(from->vertexBindingDescriptionCount * sizeof(const VkVertexInputBindingDescription));
-        to->vertexBindingDescriptionCount = from->vertexBindingDescriptionCount;
-        for (uint32_t i = 0; i < (uint32_t)from->vertexBindingDescriptionCount; ++i)
+        to->pVertexBindingDescriptions = nullptr;
+        if (from->pVertexBindingDescriptions)
         {
-            deepcopy_VkVertexInputBindingDescription(pool, from->pVertexBindingDescriptions + i, (VkVertexInputBindingDescription*)(to->pVertexBindingDescriptions + i));
+            to->pVertexBindingDescriptions = (VkVertexInputBindingDescription*)alloc->alloc(from->vertexBindingDescriptionCount * sizeof(const VkVertexInputBindingDescription));
+            to->vertexBindingDescriptionCount = from->vertexBindingDescriptionCount;
+            for (uint32_t i = 0; i < (uint32_t)from->vertexBindingDescriptionCount; ++i)
+            {
+                deepcopy_VkVertexInputBindingDescription(alloc, rootType, from->pVertexBindingDescriptions + i, (VkVertexInputBindingDescription*)(to->pVertexBindingDescriptions + i));
+            }
         }
     }
-    to->pVertexAttributeDescriptions = nullptr;
-    if (from->pVertexAttributeDescriptions)
+    if (from)
     {
-        to->pVertexAttributeDescriptions = (VkVertexInputAttributeDescription*)pool->alloc(from->vertexAttributeDescriptionCount * sizeof(const VkVertexInputAttributeDescription));
-        to->vertexAttributeDescriptionCount = from->vertexAttributeDescriptionCount;
-        for (uint32_t i = 0; i < (uint32_t)from->vertexAttributeDescriptionCount; ++i)
+        to->pVertexAttributeDescriptions = nullptr;
+        if (from->pVertexAttributeDescriptions)
         {
-            deepcopy_VkVertexInputAttributeDescription(pool, from->pVertexAttributeDescriptions + i, (VkVertexInputAttributeDescription*)(to->pVertexAttributeDescriptions + i));
+            to->pVertexAttributeDescriptions = (VkVertexInputAttributeDescription*)alloc->alloc(from->vertexAttributeDescriptionCount * sizeof(const VkVertexInputAttributeDescription));
+            to->vertexAttributeDescriptionCount = from->vertexAttributeDescriptionCount;
+            for (uint32_t i = 0; i < (uint32_t)from->vertexAttributeDescriptionCount; ++i)
+            {
+                deepcopy_VkVertexInputAttributeDescription(alloc, rootType, from->pVertexAttributeDescriptions + i, (VkVertexInputAttributeDescription*)(to->pVertexAttributeDescriptions + i));
+            }
         }
     }
 }
 
 void deepcopy_VkPipelineInputAssemblyStateCreateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPipelineInputAssemblyStateCreateInfo* from,
     VkPipelineInputAssemblyStateCreateInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkPipelineTessellationStateCreateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPipelineTessellationStateCreateInfo* from,
     VkPipelineTessellationStateCreateInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkViewport(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkViewport* from,
     VkViewport* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
 }
 
-void deepcopy_VkOffset2D(
-    Pool* pool,
-    const VkOffset2D* from,
-    VkOffset2D* to)
-{
-    (void)pool;
-    *to = *from;
-}
-
-void deepcopy_VkExtent2D(
-    Pool* pool,
-    const VkExtent2D* from,
-    VkExtent2D* to)
-{
-    (void)pool;
-    *to = *from;
-}
-
-void deepcopy_VkRect2D(
-    Pool* pool,
-    const VkRect2D* from,
-    VkRect2D* to)
-{
-    (void)pool;
-    *to = *from;
-    deepcopy_VkOffset2D(pool, &from->offset, (VkOffset2D*)(&to->offset));
-    deepcopy_VkExtent2D(pool, &from->extent, (VkExtent2D*)(&to->extent));
-}
-
 void deepcopy_VkPipelineViewportStateCreateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPipelineViewportStateCreateInfo* from,
     VkPipelineViewportStateCreateInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
-    to->pViewports = nullptr;
-    if (from->pViewports)
+    if (from)
     {
-        to->pViewports = (VkViewport*)pool->alloc(from->viewportCount * sizeof(const VkViewport));
-        to->viewportCount = from->viewportCount;
-        for (uint32_t i = 0; i < (uint32_t)from->viewportCount; ++i)
+        to->pViewports = nullptr;
+        if (from->pViewports)
         {
-            deepcopy_VkViewport(pool, from->pViewports + i, (VkViewport*)(to->pViewports + i));
+            to->pViewports = (VkViewport*)alloc->alloc(from->viewportCount * sizeof(const VkViewport));
+            to->viewportCount = from->viewportCount;
+            for (uint32_t i = 0; i < (uint32_t)from->viewportCount; ++i)
+            {
+                deepcopy_VkViewport(alloc, rootType, from->pViewports + i, (VkViewport*)(to->pViewports + i));
+            }
         }
     }
-    to->pScissors = nullptr;
-    if (from->pScissors)
+    if (from)
     {
-        to->pScissors = (VkRect2D*)pool->alloc(from->scissorCount * sizeof(const VkRect2D));
-        to->scissorCount = from->scissorCount;
-        for (uint32_t i = 0; i < (uint32_t)from->scissorCount; ++i)
+        to->pScissors = nullptr;
+        if (from->pScissors)
         {
-            deepcopy_VkRect2D(pool, from->pScissors + i, (VkRect2D*)(to->pScissors + i));
+            to->pScissors = (VkRect2D*)alloc->alloc(from->scissorCount * sizeof(const VkRect2D));
+            to->scissorCount = from->scissorCount;
+            for (uint32_t i = 0; i < (uint32_t)from->scissorCount; ++i)
+            {
+                deepcopy_VkRect2D(alloc, rootType, from->pScissors + i, (VkRect2D*)(to->pScissors + i));
+            }
         }
     }
 }
 
 void deepcopy_VkPipelineRasterizationStateCreateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPipelineRasterizationStateCreateInfo* from,
     VkPipelineRasterizationStateCreateInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkPipelineMultisampleStateCreateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPipelineMultisampleStateCreateInfo* from,
     VkPipelineMultisampleStateCreateInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
     to->pSampleMask = nullptr;
     if (from->pSampleMask)
     {
-        to->pSampleMask = (VkSampleMask*)pool->dupArray(from->pSampleMask, (((from->rasterizationSamples) + 31) / 32) * sizeof(const VkSampleMask));
+        to->pSampleMask = (VkSampleMask*)alloc->dupArray(from->pSampleMask, (((from->rasterizationSamples) + 31) / 32) * sizeof(const VkSampleMask));
     }
 }
 
 void deepcopy_VkStencilOpState(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkStencilOpState* from,
     VkStencilOpState* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
 }
 
 void deepcopy_VkPipelineDepthStencilStateCreateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPipelineDepthStencilStateCreateInfo* from,
     VkPipelineDepthStencilStateCreateInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
-    deepcopy_VkStencilOpState(pool, &from->front, (VkStencilOpState*)(&to->front));
-    deepcopy_VkStencilOpState(pool, &from->back, (VkStencilOpState*)(&to->back));
+    deepcopy_VkStencilOpState(alloc, rootType, &from->front, (VkStencilOpState*)(&to->front));
+    deepcopy_VkStencilOpState(alloc, rootType, &from->back, (VkStencilOpState*)(&to->back));
 }
 
 void deepcopy_VkPipelineColorBlendAttachmentState(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPipelineColorBlendAttachmentState* from,
     VkPipelineColorBlendAttachmentState* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
 }
 
 void deepcopy_VkPipelineColorBlendStateCreateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPipelineColorBlendStateCreateInfo* from,
     VkPipelineColorBlendStateCreateInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
-    to->pAttachments = nullptr;
-    if (from->pAttachments)
+    if (from)
     {
-        to->pAttachments = (VkPipelineColorBlendAttachmentState*)pool->alloc(from->attachmentCount * sizeof(const VkPipelineColorBlendAttachmentState));
-        to->attachmentCount = from->attachmentCount;
-        for (uint32_t i = 0; i < (uint32_t)from->attachmentCount; ++i)
+        to->pAttachments = nullptr;
+        if (from->pAttachments)
         {
-            deepcopy_VkPipelineColorBlendAttachmentState(pool, from->pAttachments + i, (VkPipelineColorBlendAttachmentState*)(to->pAttachments + i));
+            to->pAttachments = (VkPipelineColorBlendAttachmentState*)alloc->alloc(from->attachmentCount * sizeof(const VkPipelineColorBlendAttachmentState));
+            to->attachmentCount = from->attachmentCount;
+            for (uint32_t i = 0; i < (uint32_t)from->attachmentCount; ++i)
+            {
+                deepcopy_VkPipelineColorBlendAttachmentState(alloc, rootType, from->pAttachments + i, (VkPipelineColorBlendAttachmentState*)(to->pAttachments + i));
+            }
         }
     }
     memcpy(to->blendConstants, from->blendConstants, 4 * sizeof(float));
 }
 
 void deepcopy_VkPipelineDynamicStateCreateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPipelineDynamicStateCreateInfo* from,
     VkPipelineDynamicStateCreateInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
     to->pDynamicStates = nullptr;
     if (from->pDynamicStates)
     {
-        to->pDynamicStates = (VkDynamicState*)pool->dupArray(from->pDynamicStates, from->dynamicStateCount * sizeof(const VkDynamicState));
+        to->pDynamicStates = (VkDynamicState*)alloc->dupArray(from->pDynamicStates, from->dynamicStateCount * sizeof(const VkDynamicState));
     }
 }
 
 void deepcopy_VkGraphicsPipelineCreateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkGraphicsPipelineCreateInfo* from,
     VkGraphicsPipelineCreateInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
-    to->pStages = nullptr;
-    if (from->pStages)
+    if (from)
     {
-        to->pStages = (VkPipelineShaderStageCreateInfo*)pool->alloc(from->stageCount * sizeof(const VkPipelineShaderStageCreateInfo));
-        to->stageCount = from->stageCount;
-        for (uint32_t i = 0; i < (uint32_t)from->stageCount; ++i)
+        to->pStages = nullptr;
+        if (from->pStages)
         {
-            deepcopy_VkPipelineShaderStageCreateInfo(pool, from->pStages + i, (VkPipelineShaderStageCreateInfo*)(to->pStages + i));
+            to->pStages = (VkPipelineShaderStageCreateInfo*)alloc->alloc(from->stageCount * sizeof(const VkPipelineShaderStageCreateInfo));
+            to->stageCount = from->stageCount;
+            for (uint32_t i = 0; i < (uint32_t)from->stageCount; ++i)
+            {
+                deepcopy_VkPipelineShaderStageCreateInfo(alloc, rootType, from->pStages + i, (VkPipelineShaderStageCreateInfo*)(to->pStages + i));
+            }
         }
     }
     to->pVertexInputState = nullptr;
     if (from->pVertexInputState)
     {
-        to->pVertexInputState = (VkPipelineVertexInputStateCreateInfo*)pool->alloc(sizeof(const VkPipelineVertexInputStateCreateInfo));
-        deepcopy_VkPipelineVertexInputStateCreateInfo(pool, from->pVertexInputState, (VkPipelineVertexInputStateCreateInfo*)(to->pVertexInputState));
+        to->pVertexInputState = (VkPipelineVertexInputStateCreateInfo*)alloc->alloc(sizeof(const VkPipelineVertexInputStateCreateInfo));
+        deepcopy_VkPipelineVertexInputStateCreateInfo(alloc, rootType, from->pVertexInputState, (VkPipelineVertexInputStateCreateInfo*)(to->pVertexInputState));
     }
     to->pInputAssemblyState = nullptr;
     if (from->pInputAssemblyState)
     {
-        to->pInputAssemblyState = (VkPipelineInputAssemblyStateCreateInfo*)pool->alloc(sizeof(const VkPipelineInputAssemblyStateCreateInfo));
-        deepcopy_VkPipelineInputAssemblyStateCreateInfo(pool, from->pInputAssemblyState, (VkPipelineInputAssemblyStateCreateInfo*)(to->pInputAssemblyState));
+        to->pInputAssemblyState = (VkPipelineInputAssemblyStateCreateInfo*)alloc->alloc(sizeof(const VkPipelineInputAssemblyStateCreateInfo));
+        deepcopy_VkPipelineInputAssemblyStateCreateInfo(alloc, rootType, from->pInputAssemblyState, (VkPipelineInputAssemblyStateCreateInfo*)(to->pInputAssemblyState));
     }
     to->pTessellationState = nullptr;
     if (from->pTessellationState)
     {
-        to->pTessellationState = (VkPipelineTessellationStateCreateInfo*)pool->alloc(sizeof(const VkPipelineTessellationStateCreateInfo));
-        deepcopy_VkPipelineTessellationStateCreateInfo(pool, from->pTessellationState, (VkPipelineTessellationStateCreateInfo*)(to->pTessellationState));
+        to->pTessellationState = (VkPipelineTessellationStateCreateInfo*)alloc->alloc(sizeof(const VkPipelineTessellationStateCreateInfo));
+        deepcopy_VkPipelineTessellationStateCreateInfo(alloc, rootType, from->pTessellationState, (VkPipelineTessellationStateCreateInfo*)(to->pTessellationState));
     }
     to->pViewportState = nullptr;
     if (from->pViewportState)
     {
-        to->pViewportState = (VkPipelineViewportStateCreateInfo*)pool->alloc(sizeof(const VkPipelineViewportStateCreateInfo));
-        deepcopy_VkPipelineViewportStateCreateInfo(pool, from->pViewportState, (VkPipelineViewportStateCreateInfo*)(to->pViewportState));
+        to->pViewportState = (VkPipelineViewportStateCreateInfo*)alloc->alloc(sizeof(const VkPipelineViewportStateCreateInfo));
+        deepcopy_VkPipelineViewportStateCreateInfo(alloc, rootType, from->pViewportState, (VkPipelineViewportStateCreateInfo*)(to->pViewportState));
     }
     to->pRasterizationState = nullptr;
     if (from->pRasterizationState)
     {
-        to->pRasterizationState = (VkPipelineRasterizationStateCreateInfo*)pool->alloc(sizeof(const VkPipelineRasterizationStateCreateInfo));
-        deepcopy_VkPipelineRasterizationStateCreateInfo(pool, from->pRasterizationState, (VkPipelineRasterizationStateCreateInfo*)(to->pRasterizationState));
+        to->pRasterizationState = (VkPipelineRasterizationStateCreateInfo*)alloc->alloc(sizeof(const VkPipelineRasterizationStateCreateInfo));
+        deepcopy_VkPipelineRasterizationStateCreateInfo(alloc, rootType, from->pRasterizationState, (VkPipelineRasterizationStateCreateInfo*)(to->pRasterizationState));
     }
     to->pMultisampleState = nullptr;
     if (from->pMultisampleState)
     {
-        to->pMultisampleState = (VkPipelineMultisampleStateCreateInfo*)pool->alloc(sizeof(const VkPipelineMultisampleStateCreateInfo));
-        deepcopy_VkPipelineMultisampleStateCreateInfo(pool, from->pMultisampleState, (VkPipelineMultisampleStateCreateInfo*)(to->pMultisampleState));
+        to->pMultisampleState = (VkPipelineMultisampleStateCreateInfo*)alloc->alloc(sizeof(const VkPipelineMultisampleStateCreateInfo));
+        deepcopy_VkPipelineMultisampleStateCreateInfo(alloc, rootType, from->pMultisampleState, (VkPipelineMultisampleStateCreateInfo*)(to->pMultisampleState));
     }
     to->pDepthStencilState = nullptr;
     if (from->pDepthStencilState)
     {
-        to->pDepthStencilState = (VkPipelineDepthStencilStateCreateInfo*)pool->alloc(sizeof(const VkPipelineDepthStencilStateCreateInfo));
-        deepcopy_VkPipelineDepthStencilStateCreateInfo(pool, from->pDepthStencilState, (VkPipelineDepthStencilStateCreateInfo*)(to->pDepthStencilState));
+        to->pDepthStencilState = (VkPipelineDepthStencilStateCreateInfo*)alloc->alloc(sizeof(const VkPipelineDepthStencilStateCreateInfo));
+        deepcopy_VkPipelineDepthStencilStateCreateInfo(alloc, rootType, from->pDepthStencilState, (VkPipelineDepthStencilStateCreateInfo*)(to->pDepthStencilState));
     }
     to->pColorBlendState = nullptr;
     if (from->pColorBlendState)
     {
-        to->pColorBlendState = (VkPipelineColorBlendStateCreateInfo*)pool->alloc(sizeof(const VkPipelineColorBlendStateCreateInfo));
-        deepcopy_VkPipelineColorBlendStateCreateInfo(pool, from->pColorBlendState, (VkPipelineColorBlendStateCreateInfo*)(to->pColorBlendState));
+        to->pColorBlendState = (VkPipelineColorBlendStateCreateInfo*)alloc->alloc(sizeof(const VkPipelineColorBlendStateCreateInfo));
+        deepcopy_VkPipelineColorBlendStateCreateInfo(alloc, rootType, from->pColorBlendState, (VkPipelineColorBlendStateCreateInfo*)(to->pColorBlendState));
     }
     to->pDynamicState = nullptr;
     if (from->pDynamicState)
     {
-        to->pDynamicState = (VkPipelineDynamicStateCreateInfo*)pool->alloc(sizeof(const VkPipelineDynamicStateCreateInfo));
-        deepcopy_VkPipelineDynamicStateCreateInfo(pool, from->pDynamicState, (VkPipelineDynamicStateCreateInfo*)(to->pDynamicState));
+        to->pDynamicState = (VkPipelineDynamicStateCreateInfo*)alloc->alloc(sizeof(const VkPipelineDynamicStateCreateInfo));
+        deepcopy_VkPipelineDynamicStateCreateInfo(alloc, rootType, from->pDynamicState, (VkPipelineDynamicStateCreateInfo*)(to->pDynamicState));
     }
 }
 
-void deepcopy_VkComputePipelineCreateInfo(
-    Pool* pool,
-    const VkComputePipelineCreateInfo* from,
-    VkComputePipelineCreateInfo* to)
-{
-    (void)pool;
-    *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
-    to->pNext = nullptr;
-    if (pNext_size)
-    {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
-    }
-    deepcopy_VkPipelineShaderStageCreateInfo(pool, &from->stage, (VkPipelineShaderStageCreateInfo*)(&to->stage));
-}
-
 void deepcopy_VkPushConstantRange(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPushConstantRange* from,
     VkPushConstantRange* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
 }
 
 void deepcopy_VkPipelineLayoutCreateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPipelineLayoutCreateInfo* from,
     VkPipelineLayoutCreateInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
     to->pSetLayouts = nullptr;
     if (from->pSetLayouts)
     {
-        to->pSetLayouts = (VkDescriptorSetLayout*)pool->dupArray(from->pSetLayouts, from->setLayoutCount * sizeof(const VkDescriptorSetLayout));
+        to->pSetLayouts = (VkDescriptorSetLayout*)alloc->dupArray(from->pSetLayouts, from->setLayoutCount * sizeof(const VkDescriptorSetLayout));
     }
-    to->pPushConstantRanges = nullptr;
-    if (from->pPushConstantRanges)
+    if (from)
     {
-        to->pPushConstantRanges = (VkPushConstantRange*)pool->alloc(from->pushConstantRangeCount * sizeof(const VkPushConstantRange));
-        to->pushConstantRangeCount = from->pushConstantRangeCount;
-        for (uint32_t i = 0; i < (uint32_t)from->pushConstantRangeCount; ++i)
+        to->pPushConstantRanges = nullptr;
+        if (from->pPushConstantRanges)
         {
-            deepcopy_VkPushConstantRange(pool, from->pPushConstantRanges + i, (VkPushConstantRange*)(to->pPushConstantRanges + i));
+            to->pPushConstantRanges = (VkPushConstantRange*)alloc->alloc(from->pushConstantRangeCount * sizeof(const VkPushConstantRange));
+            to->pushConstantRangeCount = from->pushConstantRangeCount;
+            for (uint32_t i = 0; i < (uint32_t)from->pushConstantRangeCount; ++i)
+            {
+                deepcopy_VkPushConstantRange(alloc, rootType, from->pPushConstantRanges + i, (VkPushConstantRange*)(to->pPushConstantRanges + i));
+            }
         }
     }
 }
 
 void deepcopy_VkSamplerCreateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkSamplerCreateInfo* from,
     VkSamplerCreateInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
-void deepcopy_VkDescriptorSetLayoutBinding(
-    Pool* pool,
-    const VkDescriptorSetLayoutBinding* from,
-    VkDescriptorSetLayoutBinding* to)
+void deepcopy_VkCopyDescriptorSet(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkCopyDescriptorSet* from,
+    VkCopyDescriptorSet* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    to->pImmutableSamplers = nullptr;
-    if (from->pImmutableSamplers)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        to->pImmutableSamplers = (VkSampler*)pool->dupArray(from->pImmutableSamplers, from->descriptorCount * sizeof(const VkSampler));
+        rootType = from->sType;
     }
-}
-
-void deepcopy_VkDescriptorSetLayoutCreateInfo(
-    Pool* pool,
-    const VkDescriptorSetLayoutCreateInfo* from,
-    VkDescriptorSetLayoutCreateInfo* to)
-{
-    (void)pool;
-    *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
-    to->pBindings = nullptr;
-    if (from->pBindings)
-    {
-        to->pBindings = (VkDescriptorSetLayoutBinding*)pool->alloc(from->bindingCount * sizeof(const VkDescriptorSetLayoutBinding));
-        to->bindingCount = from->bindingCount;
-        for (uint32_t i = 0; i < (uint32_t)from->bindingCount; ++i)
-        {
-            deepcopy_VkDescriptorSetLayoutBinding(pool, from->pBindings + i, (VkDescriptorSetLayoutBinding*)(to->pBindings + i));
-        }
-    }
+}
+
+void deepcopy_VkDescriptorBufferInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDescriptorBufferInfo* from,
+    VkDescriptorBufferInfo* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+}
+
+void deepcopy_VkDescriptorImageInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDescriptorImageInfo* from,
+    VkDescriptorImageInfo* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
 }
 
 void deepcopy_VkDescriptorPoolSize(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDescriptorPoolSize* from,
     VkDescriptorPoolSize* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
 }
 
 void deepcopy_VkDescriptorPoolCreateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDescriptorPoolCreateInfo* from,
     VkDescriptorPoolCreateInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
-    to->pPoolSizes = nullptr;
-    if (from->pPoolSizes)
+    if (from)
     {
-        to->pPoolSizes = (VkDescriptorPoolSize*)pool->alloc(from->poolSizeCount * sizeof(const VkDescriptorPoolSize));
-        to->poolSizeCount = from->poolSizeCount;
-        for (uint32_t i = 0; i < (uint32_t)from->poolSizeCount; ++i)
+        to->pPoolSizes = nullptr;
+        if (from->pPoolSizes)
         {
-            deepcopy_VkDescriptorPoolSize(pool, from->pPoolSizes + i, (VkDescriptorPoolSize*)(to->pPoolSizes + i));
+            to->pPoolSizes = (VkDescriptorPoolSize*)alloc->alloc(from->poolSizeCount * sizeof(const VkDescriptorPoolSize));
+            to->poolSizeCount = from->poolSizeCount;
+            for (uint32_t i = 0; i < (uint32_t)from->poolSizeCount; ++i)
+            {
+                deepcopy_VkDescriptorPoolSize(alloc, rootType, from->pPoolSizes + i, (VkDescriptorPoolSize*)(to->pPoolSizes + i));
+            }
         }
     }
 }
 
 void deepcopy_VkDescriptorSetAllocateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDescriptorSetAllocateInfo* from,
     VkDescriptorSetAllocateInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
     to->pSetLayouts = nullptr;
     if (from->pSetLayouts)
     {
-        to->pSetLayouts = (VkDescriptorSetLayout*)pool->dupArray(from->pSetLayouts, from->descriptorSetCount * sizeof(const VkDescriptorSetLayout));
+        to->pSetLayouts = (VkDescriptorSetLayout*)alloc->dupArray(from->pSetLayouts, from->descriptorSetCount * sizeof(const VkDescriptorSetLayout));
     }
 }
 
-void deepcopy_VkDescriptorImageInfo(
-    Pool* pool,
-    const VkDescriptorImageInfo* from,
-    VkDescriptorImageInfo* to)
+void deepcopy_VkDescriptorSetLayoutBinding(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDescriptorSetLayoutBinding* from,
+    VkDescriptorSetLayoutBinding* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
+    to->pImmutableSamplers = nullptr;
+    if (from->pImmutableSamplers)
+    {
+        to->pImmutableSamplers = (VkSampler*)alloc->dupArray(from->pImmutableSamplers, from->descriptorCount * sizeof(const VkSampler));
+    }
 }
 
-void deepcopy_VkDescriptorBufferInfo(
-    Pool* pool,
-    const VkDescriptorBufferInfo* from,
-    VkDescriptorBufferInfo* to)
+void deepcopy_VkDescriptorSetLayoutCreateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDescriptorSetLayoutCreateInfo* from,
+    VkDescriptorSetLayoutCreateInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-}
-
-void deepcopy_VkWriteDescriptorSet(
-    Pool* pool,
-    const VkWriteDescriptorSet* from,
-    VkWriteDescriptorSet* to)
-{
-    (void)pool;
-    *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
-    to->pImageInfo = nullptr;
-    if (from->pImageInfo)
+    if (from)
     {
-        to->pImageInfo = (VkDescriptorImageInfo*)pool->alloc(from->descriptorCount * sizeof(const VkDescriptorImageInfo));
-        to->descriptorCount = from->descriptorCount;
-        for (uint32_t i = 0; i < (uint32_t)from->descriptorCount; ++i)
+        to->pBindings = nullptr;
+        if (from->pBindings)
         {
-            deepcopy_VkDescriptorImageInfo(pool, from->pImageInfo + i, (VkDescriptorImageInfo*)(to->pImageInfo + i));
+            to->pBindings = (VkDescriptorSetLayoutBinding*)alloc->alloc(from->bindingCount * sizeof(const VkDescriptorSetLayoutBinding));
+            to->bindingCount = from->bindingCount;
+            for (uint32_t i = 0; i < (uint32_t)from->bindingCount; ++i)
+            {
+                deepcopy_VkDescriptorSetLayoutBinding(alloc, rootType, from->pBindings + i, (VkDescriptorSetLayoutBinding*)(to->pBindings + i));
+            }
         }
     }
-    to->pBufferInfo = nullptr;
-    if (from->pBufferInfo)
+}
+
+void deepcopy_VkWriteDescriptorSet(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkWriteDescriptorSet* from,
+    VkWriteDescriptorSet* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        to->pBufferInfo = (VkDescriptorBufferInfo*)pool->alloc(from->descriptorCount * sizeof(const VkDescriptorBufferInfo));
-        to->descriptorCount = from->descriptorCount;
-        for (uint32_t i = 0; i < (uint32_t)from->descriptorCount; ++i)
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    if (from)
+    {
+        to->pImageInfo = nullptr;
+        if (from->pImageInfo)
         {
-            deepcopy_VkDescriptorBufferInfo(pool, from->pBufferInfo + i, (VkDescriptorBufferInfo*)(to->pBufferInfo + i));
+            to->pImageInfo = (VkDescriptorImageInfo*)alloc->alloc(from->descriptorCount * sizeof(const VkDescriptorImageInfo));
+            to->descriptorCount = from->descriptorCount;
+            for (uint32_t i = 0; i < (uint32_t)from->descriptorCount; ++i)
+            {
+                deepcopy_VkDescriptorImageInfo(alloc, rootType, from->pImageInfo + i, (VkDescriptorImageInfo*)(to->pImageInfo + i));
+            }
+        }
+    }
+    if (from)
+    {
+        to->pBufferInfo = nullptr;
+        if (from->pBufferInfo)
+        {
+            to->pBufferInfo = (VkDescriptorBufferInfo*)alloc->alloc(from->descriptorCount * sizeof(const VkDescriptorBufferInfo));
+            to->descriptorCount = from->descriptorCount;
+            for (uint32_t i = 0; i < (uint32_t)from->descriptorCount; ++i)
+            {
+                deepcopy_VkDescriptorBufferInfo(alloc, rootType, from->pBufferInfo + i, (VkDescriptorBufferInfo*)(to->pBufferInfo + i));
+            }
         }
     }
     to->pTexelBufferView = nullptr;
     if (from->pTexelBufferView)
     {
-        to->pTexelBufferView = (VkBufferView*)pool->dupArray(from->pTexelBufferView, from->descriptorCount * sizeof(const VkBufferView));
-    }
-}
-
-void deepcopy_VkCopyDescriptorSet(
-    Pool* pool,
-    const VkCopyDescriptorSet* from,
-    VkCopyDescriptorSet* to)
-{
-    (void)pool;
-    *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
-    to->pNext = nullptr;
-    if (pNext_size)
-    {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
-    }
-}
-
-void deepcopy_VkFramebufferCreateInfo(
-    Pool* pool,
-    const VkFramebufferCreateInfo* from,
-    VkFramebufferCreateInfo* to)
-{
-    (void)pool;
-    *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
-    to->pNext = nullptr;
-    if (pNext_size)
-    {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
-    }
-    to->pAttachments = nullptr;
-    if (from->pAttachments)
-    {
-        to->pAttachments = (VkImageView*)pool->dupArray(from->pAttachments, from->attachmentCount * sizeof(const VkImageView));
+        to->pTexelBufferView = (VkBufferView*)alloc->dupArray(from->pTexelBufferView, from->descriptorCount * sizeof(const VkBufferView));
     }
 }
 
 void deepcopy_VkAttachmentDescription(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkAttachmentDescription* from,
     VkAttachmentDescription* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
 }
 
 void deepcopy_VkAttachmentReference(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkAttachmentReference* from,
     VkAttachmentReference* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
 }
 
+void deepcopy_VkFramebufferCreateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkFramebufferCreateInfo* from,
+    VkFramebufferCreateInfo* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    to->pAttachments = nullptr;
+    if (from->pAttachments)
+    {
+        to->pAttachments = (VkImageView*)alloc->dupArray(from->pAttachments, from->attachmentCount * sizeof(const VkImageView));
+    }
+}
+
 void deepcopy_VkSubpassDescription(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkSubpassDescription* from,
     VkSubpassDescription* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    to->pInputAttachments = nullptr;
-    if (from->pInputAttachments)
+    if (from)
     {
-        to->pInputAttachments = (VkAttachmentReference*)pool->alloc(from->inputAttachmentCount * sizeof(const VkAttachmentReference));
-        to->inputAttachmentCount = from->inputAttachmentCount;
-        for (uint32_t i = 0; i < (uint32_t)from->inputAttachmentCount; ++i)
+        to->pInputAttachments = nullptr;
+        if (from->pInputAttachments)
         {
-            deepcopy_VkAttachmentReference(pool, from->pInputAttachments + i, (VkAttachmentReference*)(to->pInputAttachments + i));
+            to->pInputAttachments = (VkAttachmentReference*)alloc->alloc(from->inputAttachmentCount * sizeof(const VkAttachmentReference));
+            to->inputAttachmentCount = from->inputAttachmentCount;
+            for (uint32_t i = 0; i < (uint32_t)from->inputAttachmentCount; ++i)
+            {
+                deepcopy_VkAttachmentReference(alloc, rootType, from->pInputAttachments + i, (VkAttachmentReference*)(to->pInputAttachments + i));
+            }
         }
     }
-    to->pColorAttachments = nullptr;
-    if (from->pColorAttachments)
+    if (from)
     {
-        to->pColorAttachments = (VkAttachmentReference*)pool->alloc(from->colorAttachmentCount * sizeof(const VkAttachmentReference));
-        to->colorAttachmentCount = from->colorAttachmentCount;
-        for (uint32_t i = 0; i < (uint32_t)from->colorAttachmentCount; ++i)
+        to->pColorAttachments = nullptr;
+        if (from->pColorAttachments)
         {
-            deepcopy_VkAttachmentReference(pool, from->pColorAttachments + i, (VkAttachmentReference*)(to->pColorAttachments + i));
+            to->pColorAttachments = (VkAttachmentReference*)alloc->alloc(from->colorAttachmentCount * sizeof(const VkAttachmentReference));
+            to->colorAttachmentCount = from->colorAttachmentCount;
+            for (uint32_t i = 0; i < (uint32_t)from->colorAttachmentCount; ++i)
+            {
+                deepcopy_VkAttachmentReference(alloc, rootType, from->pColorAttachments + i, (VkAttachmentReference*)(to->pColorAttachments + i));
+            }
         }
     }
-    to->pResolveAttachments = nullptr;
-    if (from->pResolveAttachments)
+    if (from)
     {
-        to->pResolveAttachments = (VkAttachmentReference*)pool->alloc(from->colorAttachmentCount * sizeof(const VkAttachmentReference));
-        to->colorAttachmentCount = from->colorAttachmentCount;
-        for (uint32_t i = 0; i < (uint32_t)from->colorAttachmentCount; ++i)
+        to->pResolveAttachments = nullptr;
+        if (from->pResolveAttachments)
         {
-            deepcopy_VkAttachmentReference(pool, from->pResolveAttachments + i, (VkAttachmentReference*)(to->pResolveAttachments + i));
+            to->pResolveAttachments = (VkAttachmentReference*)alloc->alloc(from->colorAttachmentCount * sizeof(const VkAttachmentReference));
+            to->colorAttachmentCount = from->colorAttachmentCount;
+            for (uint32_t i = 0; i < (uint32_t)from->colorAttachmentCount; ++i)
+            {
+                deepcopy_VkAttachmentReference(alloc, rootType, from->pResolveAttachments + i, (VkAttachmentReference*)(to->pResolveAttachments + i));
+            }
         }
     }
     to->pDepthStencilAttachment = nullptr;
     if (from->pDepthStencilAttachment)
     {
-        to->pDepthStencilAttachment = (VkAttachmentReference*)pool->alloc(sizeof(const VkAttachmentReference));
-        deepcopy_VkAttachmentReference(pool, from->pDepthStencilAttachment, (VkAttachmentReference*)(to->pDepthStencilAttachment));
+        to->pDepthStencilAttachment = (VkAttachmentReference*)alloc->alloc(sizeof(const VkAttachmentReference));
+        deepcopy_VkAttachmentReference(alloc, rootType, from->pDepthStencilAttachment, (VkAttachmentReference*)(to->pDepthStencilAttachment));
     }
     to->pPreserveAttachments = nullptr;
     if (from->pPreserveAttachments)
     {
-        to->pPreserveAttachments = (uint32_t*)pool->dupArray(from->pPreserveAttachments, from->preserveAttachmentCount * sizeof(const uint32_t));
+        to->pPreserveAttachments = (uint32_t*)alloc->dupArray(from->pPreserveAttachments, from->preserveAttachmentCount * sizeof(const uint32_t));
     }
 }
 
 void deepcopy_VkSubpassDependency(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkSubpassDependency* from,
     VkSubpassDependency* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
 }
 
 void deepcopy_VkRenderPassCreateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkRenderPassCreateInfo* from,
     VkRenderPassCreateInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
-    to->pAttachments = nullptr;
-    if (from->pAttachments)
+    if (from)
     {
-        to->pAttachments = (VkAttachmentDescription*)pool->alloc(from->attachmentCount * sizeof(const VkAttachmentDescription));
-        to->attachmentCount = from->attachmentCount;
-        for (uint32_t i = 0; i < (uint32_t)from->attachmentCount; ++i)
+        to->pAttachments = nullptr;
+        if (from->pAttachments)
         {
-            deepcopy_VkAttachmentDescription(pool, from->pAttachments + i, (VkAttachmentDescription*)(to->pAttachments + i));
+            to->pAttachments = (VkAttachmentDescription*)alloc->alloc(from->attachmentCount * sizeof(const VkAttachmentDescription));
+            to->attachmentCount = from->attachmentCount;
+            for (uint32_t i = 0; i < (uint32_t)from->attachmentCount; ++i)
+            {
+                deepcopy_VkAttachmentDescription(alloc, rootType, from->pAttachments + i, (VkAttachmentDescription*)(to->pAttachments + i));
+            }
         }
     }
-    to->pSubpasses = nullptr;
-    if (from->pSubpasses)
+    if (from)
     {
-        to->pSubpasses = (VkSubpassDescription*)pool->alloc(from->subpassCount * sizeof(const VkSubpassDescription));
-        to->subpassCount = from->subpassCount;
-        for (uint32_t i = 0; i < (uint32_t)from->subpassCount; ++i)
+        to->pSubpasses = nullptr;
+        if (from->pSubpasses)
         {
-            deepcopy_VkSubpassDescription(pool, from->pSubpasses + i, (VkSubpassDescription*)(to->pSubpasses + i));
+            to->pSubpasses = (VkSubpassDescription*)alloc->alloc(from->subpassCount * sizeof(const VkSubpassDescription));
+            to->subpassCount = from->subpassCount;
+            for (uint32_t i = 0; i < (uint32_t)from->subpassCount; ++i)
+            {
+                deepcopy_VkSubpassDescription(alloc, rootType, from->pSubpasses + i, (VkSubpassDescription*)(to->pSubpasses + i));
+            }
         }
     }
-    to->pDependencies = nullptr;
-    if (from->pDependencies)
+    if (from)
     {
-        to->pDependencies = (VkSubpassDependency*)pool->alloc(from->dependencyCount * sizeof(const VkSubpassDependency));
-        to->dependencyCount = from->dependencyCount;
-        for (uint32_t i = 0; i < (uint32_t)from->dependencyCount; ++i)
+        to->pDependencies = nullptr;
+        if (from->pDependencies)
         {
-            deepcopy_VkSubpassDependency(pool, from->pDependencies + i, (VkSubpassDependency*)(to->pDependencies + i));
+            to->pDependencies = (VkSubpassDependency*)alloc->alloc(from->dependencyCount * sizeof(const VkSubpassDependency));
+            to->dependencyCount = from->dependencyCount;
+            for (uint32_t i = 0; i < (uint32_t)from->dependencyCount; ++i)
+            {
+                deepcopy_VkSubpassDependency(alloc, rootType, from->pDependencies + i, (VkSubpassDependency*)(to->pDependencies + i));
+            }
         }
     }
 }
 
 void deepcopy_VkCommandPoolCreateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkCommandPoolCreateInfo* from,
     VkCommandPoolCreateInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkCommandBufferAllocateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkCommandBufferAllocateInfo* from,
     VkCommandBufferAllocateInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkCommandBufferInheritanceInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkCommandBufferInheritanceInfo* from,
     VkCommandBufferInheritanceInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkCommandBufferBeginInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkCommandBufferBeginInfo* from,
     VkCommandBufferBeginInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
     to->pInheritanceInfo = nullptr;
     if (from->pInheritanceInfo)
     {
-        to->pInheritanceInfo = (VkCommandBufferInheritanceInfo*)pool->alloc(sizeof(const VkCommandBufferInheritanceInfo));
-        deepcopy_VkCommandBufferInheritanceInfo(pool, from->pInheritanceInfo, (VkCommandBufferInheritanceInfo*)(to->pInheritanceInfo));
+        to->pInheritanceInfo = (VkCommandBufferInheritanceInfo*)alloc->alloc(sizeof(const VkCommandBufferInheritanceInfo));
+        deepcopy_VkCommandBufferInheritanceInfo(alloc, rootType, from->pInheritanceInfo, (VkCommandBufferInheritanceInfo*)(to->pInheritanceInfo));
     }
 }
 
 void deepcopy_VkBufferCopy(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkBufferCopy* from,
     VkBufferCopy* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
 }
 
 void deepcopy_VkImageSubresourceLayers(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkImageSubresourceLayers* from,
     VkImageSubresourceLayers* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
 }
 
-void deepcopy_VkImageCopy(
-    Pool* pool,
-    const VkImageCopy* from,
-    VkImageCopy* to)
-{
-    (void)pool;
-    *to = *from;
-    deepcopy_VkImageSubresourceLayers(pool, &from->srcSubresource, (VkImageSubresourceLayers*)(&to->srcSubresource));
-    deepcopy_VkOffset3D(pool, &from->srcOffset, (VkOffset3D*)(&to->srcOffset));
-    deepcopy_VkImageSubresourceLayers(pool, &from->dstSubresource, (VkImageSubresourceLayers*)(&to->dstSubresource));
-    deepcopy_VkOffset3D(pool, &from->dstOffset, (VkOffset3D*)(&to->dstOffset));
-    deepcopy_VkExtent3D(pool, &from->extent, (VkExtent3D*)(&to->extent));
-}
-
-void deepcopy_VkImageBlit(
-    Pool* pool,
-    const VkImageBlit* from,
-    VkImageBlit* to)
-{
-    (void)pool;
-    *to = *from;
-    deepcopy_VkImageSubresourceLayers(pool, &from->srcSubresource, (VkImageSubresourceLayers*)(&to->srcSubresource));
-    for (uint32_t i = 0; i < (uint32_t)2; ++i)
-    {
-        deepcopy_VkOffset3D(pool, from->srcOffsets + i, (VkOffset3D*)(to->srcOffsets + i));
-    }
-    deepcopy_VkImageSubresourceLayers(pool, &from->dstSubresource, (VkImageSubresourceLayers*)(&to->dstSubresource));
-    for (uint32_t i = 0; i < (uint32_t)2; ++i)
-    {
-        deepcopy_VkOffset3D(pool, from->dstOffsets + i, (VkOffset3D*)(to->dstOffsets + i));
-    }
-}
-
 void deepcopy_VkBufferImageCopy(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkBufferImageCopy* from,
     VkBufferImageCopy* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    deepcopy_VkImageSubresourceLayers(pool, &from->imageSubresource, (VkImageSubresourceLayers*)(&to->imageSubresource));
-    deepcopy_VkOffset3D(pool, &from->imageOffset, (VkOffset3D*)(&to->imageOffset));
-    deepcopy_VkExtent3D(pool, &from->imageExtent, (VkExtent3D*)(&to->imageExtent));
+    deepcopy_VkImageSubresourceLayers(alloc, rootType, &from->imageSubresource, (VkImageSubresourceLayers*)(&to->imageSubresource));
+    deepcopy_VkOffset3D(alloc, rootType, &from->imageOffset, (VkOffset3D*)(&to->imageOffset));
+    deepcopy_VkExtent3D(alloc, rootType, &from->imageExtent, (VkExtent3D*)(&to->imageExtent));
 }
 
 void deepcopy_VkClearColorValue(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkClearColorValue* from,
     VkClearColorValue* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
     memcpy(to->float32, from->float32, 4 * sizeof(float));
     memcpy(to->int32, from->int32, 4 * sizeof(int32_t));
@@ -1726,1166 +2561,1774 @@
 }
 
 void deepcopy_VkClearDepthStencilValue(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkClearDepthStencilValue* from,
     VkClearDepthStencilValue* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
 }
 
 void deepcopy_VkClearValue(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkClearValue* from,
     VkClearValue* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    deepcopy_VkClearColorValue(pool, &from->color, (VkClearColorValue*)(&to->color));
-    deepcopy_VkClearDepthStencilValue(pool, &from->depthStencil, (VkClearDepthStencilValue*)(&to->depthStencil));
+    deepcopy_VkClearColorValue(alloc, rootType, &from->color, (VkClearColorValue*)(&to->color));
+    deepcopy_VkClearDepthStencilValue(alloc, rootType, &from->depthStencil, (VkClearDepthStencilValue*)(&to->depthStencil));
 }
 
 void deepcopy_VkClearAttachment(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkClearAttachment* from,
     VkClearAttachment* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    deepcopy_VkClearValue(pool, &from->clearValue, (VkClearValue*)(&to->clearValue));
+    deepcopy_VkClearValue(alloc, rootType, &from->clearValue, (VkClearValue*)(&to->clearValue));
 }
 
 void deepcopy_VkClearRect(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkClearRect* from,
     VkClearRect* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    deepcopy_VkRect2D(pool, &from->rect, (VkRect2D*)(&to->rect));
+    deepcopy_VkRect2D(alloc, rootType, &from->rect, (VkRect2D*)(&to->rect));
+}
+
+void deepcopy_VkImageBlit(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkImageBlit* from,
+    VkImageBlit* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    deepcopy_VkImageSubresourceLayers(alloc, rootType, &from->srcSubresource, (VkImageSubresourceLayers*)(&to->srcSubresource));
+    for (uint32_t i = 0; i < (uint32_t)2; ++i)
+    {
+        deepcopy_VkOffset3D(alloc, rootType, from->srcOffsets + i, (VkOffset3D*)(to->srcOffsets + i));
+    }
+    deepcopy_VkImageSubresourceLayers(alloc, rootType, &from->dstSubresource, (VkImageSubresourceLayers*)(&to->dstSubresource));
+    for (uint32_t i = 0; i < (uint32_t)2; ++i)
+    {
+        deepcopy_VkOffset3D(alloc, rootType, from->dstOffsets + i, (VkOffset3D*)(to->dstOffsets + i));
+    }
+}
+
+void deepcopy_VkImageCopy(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkImageCopy* from,
+    VkImageCopy* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    deepcopy_VkImageSubresourceLayers(alloc, rootType, &from->srcSubresource, (VkImageSubresourceLayers*)(&to->srcSubresource));
+    deepcopy_VkOffset3D(alloc, rootType, &from->srcOffset, (VkOffset3D*)(&to->srcOffset));
+    deepcopy_VkImageSubresourceLayers(alloc, rootType, &from->dstSubresource, (VkImageSubresourceLayers*)(&to->dstSubresource));
+    deepcopy_VkOffset3D(alloc, rootType, &from->dstOffset, (VkOffset3D*)(&to->dstOffset));
+    deepcopy_VkExtent3D(alloc, rootType, &from->extent, (VkExtent3D*)(&to->extent));
 }
 
 void deepcopy_VkImageResolve(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkImageResolve* from,
     VkImageResolve* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    deepcopy_VkImageSubresourceLayers(pool, &from->srcSubresource, (VkImageSubresourceLayers*)(&to->srcSubresource));
-    deepcopy_VkOffset3D(pool, &from->srcOffset, (VkOffset3D*)(&to->srcOffset));
-    deepcopy_VkImageSubresourceLayers(pool, &from->dstSubresource, (VkImageSubresourceLayers*)(&to->dstSubresource));
-    deepcopy_VkOffset3D(pool, &from->dstOffset, (VkOffset3D*)(&to->dstOffset));
-    deepcopy_VkExtent3D(pool, &from->extent, (VkExtent3D*)(&to->extent));
-}
-
-void deepcopy_VkMemoryBarrier(
-    Pool* pool,
-    const VkMemoryBarrier* from,
-    VkMemoryBarrier* to)
-{
-    (void)pool;
-    *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
-    to->pNext = nullptr;
-    if (pNext_size)
-    {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
-    }
-}
-
-void deepcopy_VkBufferMemoryBarrier(
-    Pool* pool,
-    const VkBufferMemoryBarrier* from,
-    VkBufferMemoryBarrier* to)
-{
-    (void)pool;
-    *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
-    to->pNext = nullptr;
-    if (pNext_size)
-    {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
-    }
-}
-
-void deepcopy_VkImageMemoryBarrier(
-    Pool* pool,
-    const VkImageMemoryBarrier* from,
-    VkImageMemoryBarrier* to)
-{
-    (void)pool;
-    *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
-    to->pNext = nullptr;
-    if (pNext_size)
-    {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
-    }
-    deepcopy_VkImageSubresourceRange(pool, &from->subresourceRange, (VkImageSubresourceRange*)(&to->subresourceRange));
+    deepcopy_VkImageSubresourceLayers(alloc, rootType, &from->srcSubresource, (VkImageSubresourceLayers*)(&to->srcSubresource));
+    deepcopy_VkOffset3D(alloc, rootType, &from->srcOffset, (VkOffset3D*)(&to->srcOffset));
+    deepcopy_VkImageSubresourceLayers(alloc, rootType, &from->dstSubresource, (VkImageSubresourceLayers*)(&to->dstSubresource));
+    deepcopy_VkOffset3D(alloc, rootType, &from->dstOffset, (VkOffset3D*)(&to->dstOffset));
+    deepcopy_VkExtent3D(alloc, rootType, &from->extent, (VkExtent3D*)(&to->extent));
 }
 
 void deepcopy_VkRenderPassBeginInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkRenderPassBeginInfo* from,
     VkRenderPassBeginInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
-    deepcopy_VkRect2D(pool, &from->renderArea, (VkRect2D*)(&to->renderArea));
-    to->pClearValues = nullptr;
-    if (from->pClearValues)
+    deepcopy_VkRect2D(alloc, rootType, &from->renderArea, (VkRect2D*)(&to->renderArea));
+    if (from)
     {
-        to->pClearValues = (VkClearValue*)pool->alloc(from->clearValueCount * sizeof(const VkClearValue));
-        to->clearValueCount = from->clearValueCount;
-        for (uint32_t i = 0; i < (uint32_t)from->clearValueCount; ++i)
+        to->pClearValues = nullptr;
+        if (from->pClearValues)
         {
-            deepcopy_VkClearValue(pool, from->pClearValues + i, (VkClearValue*)(to->pClearValues + i));
+            to->pClearValues = (VkClearValue*)alloc->alloc(from->clearValueCount * sizeof(const VkClearValue));
+            to->clearValueCount = from->clearValueCount;
+            for (uint32_t i = 0; i < (uint32_t)from->clearValueCount; ++i)
+            {
+                deepcopy_VkClearValue(alloc, rootType, from->pClearValues + i, (VkClearValue*)(to->pClearValues + i));
+            }
         }
     }
 }
 
-void deepcopy_VkDispatchIndirectCommand(
-    Pool* pool,
-    const VkDispatchIndirectCommand* from,
-    VkDispatchIndirectCommand* to)
-{
-    (void)pool;
-    *to = *from;
-}
-
-void deepcopy_VkDrawIndexedIndirectCommand(
-    Pool* pool,
-    const VkDrawIndexedIndirectCommand* from,
-    VkDrawIndexedIndirectCommand* to)
-{
-    (void)pool;
-    *to = *from;
-}
-
-void deepcopy_VkDrawIndirectCommand(
-    Pool* pool,
-    const VkDrawIndirectCommand* from,
-    VkDrawIndirectCommand* to)
-{
-    (void)pool;
-    *to = *from;
-}
-
-void deepcopy_VkBaseOutStructure(
-    Pool* pool,
-    const VkBaseOutStructure* from,
-    VkBaseOutStructure* to)
-{
-    (void)pool;
-    *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
-    to->pNext = nullptr;
-    if (pNext_size)
-    {
-        to->pNext = (VkBaseOutStructure*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
-    }
-}
-
-void deepcopy_VkBaseInStructure(
-    Pool* pool,
-    const VkBaseInStructure* from,
-    VkBaseInStructure* to)
-{
-    (void)pool;
-    *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
-    to->pNext = nullptr;
-    if (pNext_size)
-    {
-        to->pNext = (const VkBaseInStructure*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
-    }
-}
-
 #endif
 #ifdef VK_VERSION_1_1
 void deepcopy_VkPhysicalDeviceSubgroupProperties(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceSubgroupProperties* from,
     VkPhysicalDeviceSubgroupProperties* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkBindBufferMemoryInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkBindBufferMemoryInfo* from,
     VkBindBufferMemoryInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkBindImageMemoryInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkBindImageMemoryInfo* from,
     VkBindImageMemoryInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkPhysicalDevice16BitStorageFeatures(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDevice16BitStorageFeatures* from,
     VkPhysicalDevice16BitStorageFeatures* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkMemoryDedicatedRequirements(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkMemoryDedicatedRequirements* from,
     VkMemoryDedicatedRequirements* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkMemoryDedicatedAllocateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkMemoryDedicatedAllocateInfo* from,
     VkMemoryDedicatedAllocateInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkMemoryAllocateFlagsInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkMemoryAllocateFlagsInfo* from,
     VkMemoryAllocateFlagsInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkDeviceGroupRenderPassBeginInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDeviceGroupRenderPassBeginInfo* from,
     VkDeviceGroupRenderPassBeginInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
-    to->pDeviceRenderAreas = nullptr;
-    if (from->pDeviceRenderAreas)
+    if (from)
     {
-        to->pDeviceRenderAreas = (VkRect2D*)pool->alloc(from->deviceRenderAreaCount * sizeof(const VkRect2D));
-        to->deviceRenderAreaCount = from->deviceRenderAreaCount;
-        for (uint32_t i = 0; i < (uint32_t)from->deviceRenderAreaCount; ++i)
+        to->pDeviceRenderAreas = nullptr;
+        if (from->pDeviceRenderAreas)
         {
-            deepcopy_VkRect2D(pool, from->pDeviceRenderAreas + i, (VkRect2D*)(to->pDeviceRenderAreas + i));
+            to->pDeviceRenderAreas = (VkRect2D*)alloc->alloc(from->deviceRenderAreaCount * sizeof(const VkRect2D));
+            to->deviceRenderAreaCount = from->deviceRenderAreaCount;
+            for (uint32_t i = 0; i < (uint32_t)from->deviceRenderAreaCount; ++i)
+            {
+                deepcopy_VkRect2D(alloc, rootType, from->pDeviceRenderAreas + i, (VkRect2D*)(to->pDeviceRenderAreas + i));
+            }
         }
     }
 }
 
 void deepcopy_VkDeviceGroupCommandBufferBeginInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDeviceGroupCommandBufferBeginInfo* from,
     VkDeviceGroupCommandBufferBeginInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkDeviceGroupSubmitInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDeviceGroupSubmitInfo* from,
     VkDeviceGroupSubmitInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
     to->pWaitSemaphoreDeviceIndices = nullptr;
     if (from->pWaitSemaphoreDeviceIndices)
     {
-        to->pWaitSemaphoreDeviceIndices = (uint32_t*)pool->dupArray(from->pWaitSemaphoreDeviceIndices, from->waitSemaphoreCount * sizeof(const uint32_t));
+        to->pWaitSemaphoreDeviceIndices = (uint32_t*)alloc->dupArray(from->pWaitSemaphoreDeviceIndices, from->waitSemaphoreCount * sizeof(const uint32_t));
     }
     to->pCommandBufferDeviceMasks = nullptr;
     if (from->pCommandBufferDeviceMasks)
     {
-        to->pCommandBufferDeviceMasks = (uint32_t*)pool->dupArray(from->pCommandBufferDeviceMasks, from->commandBufferCount * sizeof(const uint32_t));
+        to->pCommandBufferDeviceMasks = (uint32_t*)alloc->dupArray(from->pCommandBufferDeviceMasks, from->commandBufferCount * sizeof(const uint32_t));
     }
     to->pSignalSemaphoreDeviceIndices = nullptr;
     if (from->pSignalSemaphoreDeviceIndices)
     {
-        to->pSignalSemaphoreDeviceIndices = (uint32_t*)pool->dupArray(from->pSignalSemaphoreDeviceIndices, from->signalSemaphoreCount * sizeof(const uint32_t));
+        to->pSignalSemaphoreDeviceIndices = (uint32_t*)alloc->dupArray(from->pSignalSemaphoreDeviceIndices, from->signalSemaphoreCount * sizeof(const uint32_t));
     }
 }
 
 void deepcopy_VkDeviceGroupBindSparseInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDeviceGroupBindSparseInfo* from,
     VkDeviceGroupBindSparseInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkBindBufferMemoryDeviceGroupInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkBindBufferMemoryDeviceGroupInfo* from,
     VkBindBufferMemoryDeviceGroupInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
     to->pDeviceIndices = nullptr;
     if (from->pDeviceIndices)
     {
-        to->pDeviceIndices = (uint32_t*)pool->dupArray(from->pDeviceIndices, from->deviceIndexCount * sizeof(const uint32_t));
+        to->pDeviceIndices = (uint32_t*)alloc->dupArray(from->pDeviceIndices, from->deviceIndexCount * sizeof(const uint32_t));
     }
 }
 
 void deepcopy_VkBindImageMemoryDeviceGroupInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkBindImageMemoryDeviceGroupInfo* from,
     VkBindImageMemoryDeviceGroupInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
     to->pDeviceIndices = nullptr;
     if (from->pDeviceIndices)
     {
-        to->pDeviceIndices = (uint32_t*)pool->dupArray(from->pDeviceIndices, from->deviceIndexCount * sizeof(const uint32_t));
+        to->pDeviceIndices = (uint32_t*)alloc->dupArray(from->pDeviceIndices, from->deviceIndexCount * sizeof(const uint32_t));
     }
-    to->pSplitInstanceBindRegions = nullptr;
-    if (from->pSplitInstanceBindRegions)
+    if (from)
     {
-        to->pSplitInstanceBindRegions = (VkRect2D*)pool->alloc(from->splitInstanceBindRegionCount * sizeof(const VkRect2D));
-        to->splitInstanceBindRegionCount = from->splitInstanceBindRegionCount;
-        for (uint32_t i = 0; i < (uint32_t)from->splitInstanceBindRegionCount; ++i)
+        to->pSplitInstanceBindRegions = nullptr;
+        if (from->pSplitInstanceBindRegions)
         {
-            deepcopy_VkRect2D(pool, from->pSplitInstanceBindRegions + i, (VkRect2D*)(to->pSplitInstanceBindRegions + i));
+            to->pSplitInstanceBindRegions = (VkRect2D*)alloc->alloc(from->splitInstanceBindRegionCount * sizeof(const VkRect2D));
+            to->splitInstanceBindRegionCount = from->splitInstanceBindRegionCount;
+            for (uint32_t i = 0; i < (uint32_t)from->splitInstanceBindRegionCount; ++i)
+            {
+                deepcopy_VkRect2D(alloc, rootType, from->pSplitInstanceBindRegions + i, (VkRect2D*)(to->pSplitInstanceBindRegions + i));
+            }
         }
     }
 }
 
 void deepcopy_VkPhysicalDeviceGroupProperties(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceGroupProperties* from,
     VkPhysicalDeviceGroupProperties* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
     memcpy(to->physicalDevices, from->physicalDevices, VK_MAX_DEVICE_GROUP_SIZE * sizeof(VkPhysicalDevice));
 }
 
 void deepcopy_VkDeviceGroupDeviceCreateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDeviceGroupDeviceCreateInfo* from,
     VkDeviceGroupDeviceCreateInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
     to->pPhysicalDevices = nullptr;
     if (from->pPhysicalDevices)
     {
-        to->pPhysicalDevices = (VkPhysicalDevice*)pool->dupArray(from->pPhysicalDevices, from->physicalDeviceCount * sizeof(const VkPhysicalDevice));
+        to->pPhysicalDevices = (VkPhysicalDevice*)alloc->dupArray(from->pPhysicalDevices, from->physicalDeviceCount * sizeof(const VkPhysicalDevice));
     }
 }
 
 void deepcopy_VkBufferMemoryRequirementsInfo2(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkBufferMemoryRequirementsInfo2* from,
     VkBufferMemoryRequirementsInfo2* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkImageMemoryRequirementsInfo2(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkImageMemoryRequirementsInfo2* from,
     VkImageMemoryRequirementsInfo2* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkImageSparseMemoryRequirementsInfo2(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkImageSparseMemoryRequirementsInfo2* from,
     VkImageSparseMemoryRequirementsInfo2* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkMemoryRequirements2(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkMemoryRequirements2* from,
     VkMemoryRequirements2* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
-    deepcopy_VkMemoryRequirements(pool, &from->memoryRequirements, (VkMemoryRequirements*)(&to->memoryRequirements));
+    deepcopy_VkMemoryRequirements(alloc, rootType, &from->memoryRequirements, (VkMemoryRequirements*)(&to->memoryRequirements));
 }
 
 void deepcopy_VkSparseImageMemoryRequirements2(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkSparseImageMemoryRequirements2* from,
     VkSparseImageMemoryRequirements2* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
-    deepcopy_VkSparseImageMemoryRequirements(pool, &from->memoryRequirements, (VkSparseImageMemoryRequirements*)(&to->memoryRequirements));
+    deepcopy_VkSparseImageMemoryRequirements(alloc, rootType, &from->memoryRequirements, (VkSparseImageMemoryRequirements*)(&to->memoryRequirements));
 }
 
 void deepcopy_VkPhysicalDeviceFeatures2(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceFeatures2* from,
     VkPhysicalDeviceFeatures2* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
-    deepcopy_VkPhysicalDeviceFeatures(pool, &from->features, (VkPhysicalDeviceFeatures*)(&to->features));
+    deepcopy_VkPhysicalDeviceFeatures(alloc, rootType, &from->features, (VkPhysicalDeviceFeatures*)(&to->features));
 }
 
 void deepcopy_VkPhysicalDeviceProperties2(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceProperties2* from,
     VkPhysicalDeviceProperties2* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
-    deepcopy_VkPhysicalDeviceProperties(pool, &from->properties, (VkPhysicalDeviceProperties*)(&to->properties));
+    deepcopy_VkPhysicalDeviceProperties(alloc, rootType, &from->properties, (VkPhysicalDeviceProperties*)(&to->properties));
 }
 
 void deepcopy_VkFormatProperties2(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkFormatProperties2* from,
     VkFormatProperties2* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
-    deepcopy_VkFormatProperties(pool, &from->formatProperties, (VkFormatProperties*)(&to->formatProperties));
+    deepcopy_VkFormatProperties(alloc, rootType, &from->formatProperties, (VkFormatProperties*)(&to->formatProperties));
 }
 
 void deepcopy_VkImageFormatProperties2(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkImageFormatProperties2* from,
     VkImageFormatProperties2* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
-    deepcopy_VkImageFormatProperties(pool, &from->imageFormatProperties, (VkImageFormatProperties*)(&to->imageFormatProperties));
+    deepcopy_VkImageFormatProperties(alloc, rootType, &from->imageFormatProperties, (VkImageFormatProperties*)(&to->imageFormatProperties));
 }
 
 void deepcopy_VkPhysicalDeviceImageFormatInfo2(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceImageFormatInfo2* from,
     VkPhysicalDeviceImageFormatInfo2* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkQueueFamilyProperties2(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkQueueFamilyProperties2* from,
     VkQueueFamilyProperties2* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
-    deepcopy_VkQueueFamilyProperties(pool, &from->queueFamilyProperties, (VkQueueFamilyProperties*)(&to->queueFamilyProperties));
+    deepcopy_VkQueueFamilyProperties(alloc, rootType, &from->queueFamilyProperties, (VkQueueFamilyProperties*)(&to->queueFamilyProperties));
 }
 
 void deepcopy_VkPhysicalDeviceMemoryProperties2(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceMemoryProperties2* from,
     VkPhysicalDeviceMemoryProperties2* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
-    deepcopy_VkPhysicalDeviceMemoryProperties(pool, &from->memoryProperties, (VkPhysicalDeviceMemoryProperties*)(&to->memoryProperties));
+    deepcopy_VkPhysicalDeviceMemoryProperties(alloc, rootType, &from->memoryProperties, (VkPhysicalDeviceMemoryProperties*)(&to->memoryProperties));
 }
 
 void deepcopy_VkSparseImageFormatProperties2(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkSparseImageFormatProperties2* from,
     VkSparseImageFormatProperties2* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
-    deepcopy_VkSparseImageFormatProperties(pool, &from->properties, (VkSparseImageFormatProperties*)(&to->properties));
+    deepcopy_VkSparseImageFormatProperties(alloc, rootType, &from->properties, (VkSparseImageFormatProperties*)(&to->properties));
 }
 
 void deepcopy_VkPhysicalDeviceSparseImageFormatInfo2(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceSparseImageFormatInfo2* from,
     VkPhysicalDeviceSparseImageFormatInfo2* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkPhysicalDevicePointClippingProperties(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDevicePointClippingProperties* from,
     VkPhysicalDevicePointClippingProperties* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkInputAttachmentAspectReference(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkInputAttachmentAspectReference* from,
     VkInputAttachmentAspectReference* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
 }
 
 void deepcopy_VkRenderPassInputAttachmentAspectCreateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkRenderPassInputAttachmentAspectCreateInfo* from,
     VkRenderPassInputAttachmentAspectCreateInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
-    to->pAspectReferences = nullptr;
-    if (from->pAspectReferences)
+    if (from)
     {
-        to->pAspectReferences = (VkInputAttachmentAspectReference*)pool->alloc(from->aspectReferenceCount * sizeof(const VkInputAttachmentAspectReference));
-        to->aspectReferenceCount = from->aspectReferenceCount;
-        for (uint32_t i = 0; i < (uint32_t)from->aspectReferenceCount; ++i)
+        to->pAspectReferences = nullptr;
+        if (from->pAspectReferences)
         {
-            deepcopy_VkInputAttachmentAspectReference(pool, from->pAspectReferences + i, (VkInputAttachmentAspectReference*)(to->pAspectReferences + i));
+            to->pAspectReferences = (VkInputAttachmentAspectReference*)alloc->alloc(from->aspectReferenceCount * sizeof(const VkInputAttachmentAspectReference));
+            to->aspectReferenceCount = from->aspectReferenceCount;
+            for (uint32_t i = 0; i < (uint32_t)from->aspectReferenceCount; ++i)
+            {
+                deepcopy_VkInputAttachmentAspectReference(alloc, rootType, from->pAspectReferences + i, (VkInputAttachmentAspectReference*)(to->pAspectReferences + i));
+            }
         }
     }
 }
 
 void deepcopy_VkImageViewUsageCreateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkImageViewUsageCreateInfo* from,
     VkImageViewUsageCreateInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkPipelineTessellationDomainOriginStateCreateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPipelineTessellationDomainOriginStateCreateInfo* from,
     VkPipelineTessellationDomainOriginStateCreateInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkRenderPassMultiviewCreateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkRenderPassMultiviewCreateInfo* from,
     VkRenderPassMultiviewCreateInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
     to->pViewMasks = nullptr;
     if (from->pViewMasks)
     {
-        to->pViewMasks = (uint32_t*)pool->dupArray(from->pViewMasks, from->subpassCount * sizeof(const uint32_t));
+        to->pViewMasks = (uint32_t*)alloc->dupArray(from->pViewMasks, from->subpassCount * sizeof(const uint32_t));
     }
     to->pViewOffsets = nullptr;
     if (from->pViewOffsets)
     {
-        to->pViewOffsets = (int32_t*)pool->dupArray(from->pViewOffsets, from->dependencyCount * sizeof(const int32_t));
+        to->pViewOffsets = (int32_t*)alloc->dupArray(from->pViewOffsets, from->dependencyCount * sizeof(const int32_t));
     }
     to->pCorrelationMasks = nullptr;
     if (from->pCorrelationMasks)
     {
-        to->pCorrelationMasks = (uint32_t*)pool->dupArray(from->pCorrelationMasks, from->correlationMaskCount * sizeof(const uint32_t));
+        to->pCorrelationMasks = (uint32_t*)alloc->dupArray(from->pCorrelationMasks, from->correlationMaskCount * sizeof(const uint32_t));
     }
 }
 
 void deepcopy_VkPhysicalDeviceMultiviewFeatures(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceMultiviewFeatures* from,
     VkPhysicalDeviceMultiviewFeatures* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkPhysicalDeviceMultiviewProperties(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceMultiviewProperties* from,
     VkPhysicalDeviceMultiviewProperties* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
-void deepcopy_VkPhysicalDeviceVariablePointerFeatures(
-    Pool* pool,
-    const VkPhysicalDeviceVariablePointerFeatures* from,
-    VkPhysicalDeviceVariablePointerFeatures* to)
+void deepcopy_VkPhysicalDeviceVariablePointersFeatures(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVariablePointersFeatures* from,
+    VkPhysicalDeviceVariablePointersFeatures* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkPhysicalDeviceProtectedMemoryFeatures(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceProtectedMemoryFeatures* from,
     VkPhysicalDeviceProtectedMemoryFeatures* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkPhysicalDeviceProtectedMemoryProperties(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceProtectedMemoryProperties* from,
     VkPhysicalDeviceProtectedMemoryProperties* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkDeviceQueueInfo2(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDeviceQueueInfo2* from,
     VkDeviceQueueInfo2* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkProtectedSubmitInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkProtectedSubmitInfo* from,
     VkProtectedSubmitInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkSamplerYcbcrConversionCreateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkSamplerYcbcrConversionCreateInfo* from,
     VkSamplerYcbcrConversionCreateInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
-    deepcopy_VkComponentMapping(pool, &from->components, (VkComponentMapping*)(&to->components));
+    deepcopy_VkComponentMapping(alloc, rootType, &from->components, (VkComponentMapping*)(&to->components));
 }
 
 void deepcopy_VkSamplerYcbcrConversionInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkSamplerYcbcrConversionInfo* from,
     VkSamplerYcbcrConversionInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkBindImagePlaneMemoryInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkBindImagePlaneMemoryInfo* from,
     VkBindImagePlaneMemoryInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkImagePlaneMemoryRequirementsInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkImagePlaneMemoryRequirementsInfo* from,
     VkImagePlaneMemoryRequirementsInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkPhysicalDeviceSamplerYcbcrConversionFeatures(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceSamplerYcbcrConversionFeatures* from,
     VkPhysicalDeviceSamplerYcbcrConversionFeatures* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkSamplerYcbcrConversionImageFormatProperties(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkSamplerYcbcrConversionImageFormatProperties* from,
     VkSamplerYcbcrConversionImageFormatProperties* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkDescriptorUpdateTemplateEntry(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDescriptorUpdateTemplateEntry* from,
     VkDescriptorUpdateTemplateEntry* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
 }
 
 void deepcopy_VkDescriptorUpdateTemplateCreateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDescriptorUpdateTemplateCreateInfo* from,
     VkDescriptorUpdateTemplateCreateInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
-    to->pDescriptorUpdateEntries = nullptr;
-    if (from->pDescriptorUpdateEntries)
+    if (from)
     {
-        to->pDescriptorUpdateEntries = (VkDescriptorUpdateTemplateEntry*)pool->alloc(from->descriptorUpdateEntryCount * sizeof(const VkDescriptorUpdateTemplateEntry));
-        to->descriptorUpdateEntryCount = from->descriptorUpdateEntryCount;
-        for (uint32_t i = 0; i < (uint32_t)from->descriptorUpdateEntryCount; ++i)
+        to->pDescriptorUpdateEntries = nullptr;
+        if (from->pDescriptorUpdateEntries)
         {
-            deepcopy_VkDescriptorUpdateTemplateEntry(pool, from->pDescriptorUpdateEntries + i, (VkDescriptorUpdateTemplateEntry*)(to->pDescriptorUpdateEntries + i));
+            to->pDescriptorUpdateEntries = (VkDescriptorUpdateTemplateEntry*)alloc->alloc(from->descriptorUpdateEntryCount * sizeof(const VkDescriptorUpdateTemplateEntry));
+            to->descriptorUpdateEntryCount = from->descriptorUpdateEntryCount;
+            for (uint32_t i = 0; i < (uint32_t)from->descriptorUpdateEntryCount; ++i)
+            {
+                deepcopy_VkDescriptorUpdateTemplateEntry(alloc, rootType, from->pDescriptorUpdateEntries + i, (VkDescriptorUpdateTemplateEntry*)(to->pDescriptorUpdateEntries + i));
+            }
         }
     }
 }
 
 void deepcopy_VkExternalMemoryProperties(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkExternalMemoryProperties* from,
     VkExternalMemoryProperties* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
 }
 
 void deepcopy_VkPhysicalDeviceExternalImageFormatInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceExternalImageFormatInfo* from,
     VkPhysicalDeviceExternalImageFormatInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkExternalImageFormatProperties(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkExternalImageFormatProperties* from,
     VkExternalImageFormatProperties* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
-    deepcopy_VkExternalMemoryProperties(pool, &from->externalMemoryProperties, (VkExternalMemoryProperties*)(&to->externalMemoryProperties));
+    deepcopy_VkExternalMemoryProperties(alloc, rootType, &from->externalMemoryProperties, (VkExternalMemoryProperties*)(&to->externalMemoryProperties));
 }
 
 void deepcopy_VkPhysicalDeviceExternalBufferInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceExternalBufferInfo* from,
     VkPhysicalDeviceExternalBufferInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkExternalBufferProperties(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkExternalBufferProperties* from,
     VkExternalBufferProperties* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
-    deepcopy_VkExternalMemoryProperties(pool, &from->externalMemoryProperties, (VkExternalMemoryProperties*)(&to->externalMemoryProperties));
+    deepcopy_VkExternalMemoryProperties(alloc, rootType, &from->externalMemoryProperties, (VkExternalMemoryProperties*)(&to->externalMemoryProperties));
 }
 
 void deepcopy_VkPhysicalDeviceIDProperties(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceIDProperties* from,
     VkPhysicalDeviceIDProperties* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
     memcpy(to->deviceUUID, from->deviceUUID, VK_UUID_SIZE * sizeof(uint8_t));
     memcpy(to->driverUUID, from->driverUUID, VK_UUID_SIZE * sizeof(uint8_t));
@@ -2893,640 +4336,2542 @@
 }
 
 void deepcopy_VkExternalMemoryImageCreateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkExternalMemoryImageCreateInfo* from,
     VkExternalMemoryImageCreateInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkExternalMemoryBufferCreateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkExternalMemoryBufferCreateInfo* from,
     VkExternalMemoryBufferCreateInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkExportMemoryAllocateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkExportMemoryAllocateInfo* from,
     VkExportMemoryAllocateInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkPhysicalDeviceExternalFenceInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceExternalFenceInfo* from,
     VkPhysicalDeviceExternalFenceInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkExternalFenceProperties(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkExternalFenceProperties* from,
     VkExternalFenceProperties* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkExportFenceCreateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkExportFenceCreateInfo* from,
     VkExportFenceCreateInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkExportSemaphoreCreateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkExportSemaphoreCreateInfo* from,
     VkExportSemaphoreCreateInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkPhysicalDeviceExternalSemaphoreInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceExternalSemaphoreInfo* from,
     VkPhysicalDeviceExternalSemaphoreInfo* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkExternalSemaphoreProperties(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkExternalSemaphoreProperties* from,
     VkExternalSemaphoreProperties* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkPhysicalDeviceMaintenance3Properties(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceMaintenance3Properties* from,
     VkPhysicalDeviceMaintenance3Properties* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkDescriptorSetLayoutSupport(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDescriptorSetLayoutSupport* from,
     VkDescriptorSetLayoutSupport* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
-void deepcopy_VkPhysicalDeviceShaderDrawParameterFeatures(
-    Pool* pool,
-    const VkPhysicalDeviceShaderDrawParameterFeatures* from,
-    VkPhysicalDeviceShaderDrawParameterFeatures* to)
+void deepcopy_VkPhysicalDeviceShaderDrawParametersFeatures(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderDrawParametersFeatures* from,
+    VkPhysicalDeviceShaderDrawParametersFeatures* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_VERSION_1_2
+void deepcopy_VkPhysicalDeviceVulkan11Features(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVulkan11Features* from,
+    VkPhysicalDeviceVulkan11Features* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPhysicalDeviceVulkan11Properties(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVulkan11Properties* from,
+    VkPhysicalDeviceVulkan11Properties* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    memcpy(to->deviceUUID, from->deviceUUID, VK_UUID_SIZE * sizeof(uint8_t));
+    memcpy(to->driverUUID, from->driverUUID, VK_UUID_SIZE * sizeof(uint8_t));
+    memcpy(to->deviceLUID, from->deviceLUID, VK_LUID_SIZE * sizeof(uint8_t));
+}
+
+void deepcopy_VkPhysicalDeviceVulkan12Features(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVulkan12Features* from,
+    VkPhysicalDeviceVulkan12Features* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkConformanceVersion(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkConformanceVersion* from,
+    VkConformanceVersion* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+}
+
+void deepcopy_VkPhysicalDeviceVulkan12Properties(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVulkan12Properties* from,
+    VkPhysicalDeviceVulkan12Properties* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    memcpy(to->driverName, from->driverName, VK_MAX_DRIVER_NAME_SIZE * sizeof(char));
+    memcpy(to->driverInfo, from->driverInfo, VK_MAX_DRIVER_INFO_SIZE * sizeof(char));
+    deepcopy_VkConformanceVersion(alloc, rootType, &from->conformanceVersion, (VkConformanceVersion*)(&to->conformanceVersion));
+}
+
+void deepcopy_VkImageFormatListCreateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkImageFormatListCreateInfo* from,
+    VkImageFormatListCreateInfo* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    to->pViewFormats = nullptr;
+    if (from->pViewFormats)
+    {
+        to->pViewFormats = (VkFormat*)alloc->dupArray(from->pViewFormats, from->viewFormatCount * sizeof(const VkFormat));
+    }
+}
+
+void deepcopy_VkAttachmentDescription2(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkAttachmentDescription2* from,
+    VkAttachmentDescription2* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkAttachmentReference2(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkAttachmentReference2* from,
+    VkAttachmentReference2* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkSubpassDescription2(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkSubpassDescription2* from,
+    VkSubpassDescription2* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    if (from)
+    {
+        to->pInputAttachments = nullptr;
+        if (from->pInputAttachments)
+        {
+            to->pInputAttachments = (VkAttachmentReference2*)alloc->alloc(from->inputAttachmentCount * sizeof(const VkAttachmentReference2));
+            to->inputAttachmentCount = from->inputAttachmentCount;
+            for (uint32_t i = 0; i < (uint32_t)from->inputAttachmentCount; ++i)
+            {
+                deepcopy_VkAttachmentReference2(alloc, rootType, from->pInputAttachments + i, (VkAttachmentReference2*)(to->pInputAttachments + i));
+            }
+        }
+    }
+    if (from)
+    {
+        to->pColorAttachments = nullptr;
+        if (from->pColorAttachments)
+        {
+            to->pColorAttachments = (VkAttachmentReference2*)alloc->alloc(from->colorAttachmentCount * sizeof(const VkAttachmentReference2));
+            to->colorAttachmentCount = from->colorAttachmentCount;
+            for (uint32_t i = 0; i < (uint32_t)from->colorAttachmentCount; ++i)
+            {
+                deepcopy_VkAttachmentReference2(alloc, rootType, from->pColorAttachments + i, (VkAttachmentReference2*)(to->pColorAttachments + i));
+            }
+        }
+    }
+    if (from)
+    {
+        to->pResolveAttachments = nullptr;
+        if (from->pResolveAttachments)
+        {
+            to->pResolveAttachments = (VkAttachmentReference2*)alloc->alloc(from->colorAttachmentCount * sizeof(const VkAttachmentReference2));
+            to->colorAttachmentCount = from->colorAttachmentCount;
+            for (uint32_t i = 0; i < (uint32_t)from->colorAttachmentCount; ++i)
+            {
+                deepcopy_VkAttachmentReference2(alloc, rootType, from->pResolveAttachments + i, (VkAttachmentReference2*)(to->pResolveAttachments + i));
+            }
+        }
+    }
+    to->pDepthStencilAttachment = nullptr;
+    if (from->pDepthStencilAttachment)
+    {
+        to->pDepthStencilAttachment = (VkAttachmentReference2*)alloc->alloc(sizeof(const VkAttachmentReference2));
+        deepcopy_VkAttachmentReference2(alloc, rootType, from->pDepthStencilAttachment, (VkAttachmentReference2*)(to->pDepthStencilAttachment));
+    }
+    to->pPreserveAttachments = nullptr;
+    if (from->pPreserveAttachments)
+    {
+        to->pPreserveAttachments = (uint32_t*)alloc->dupArray(from->pPreserveAttachments, from->preserveAttachmentCount * sizeof(const uint32_t));
+    }
+}
+
+void deepcopy_VkSubpassDependency2(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkSubpassDependency2* from,
+    VkSubpassDependency2* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkRenderPassCreateInfo2(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkRenderPassCreateInfo2* from,
+    VkRenderPassCreateInfo2* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    if (from)
+    {
+        to->pAttachments = nullptr;
+        if (from->pAttachments)
+        {
+            to->pAttachments = (VkAttachmentDescription2*)alloc->alloc(from->attachmentCount * sizeof(const VkAttachmentDescription2));
+            to->attachmentCount = from->attachmentCount;
+            for (uint32_t i = 0; i < (uint32_t)from->attachmentCount; ++i)
+            {
+                deepcopy_VkAttachmentDescription2(alloc, rootType, from->pAttachments + i, (VkAttachmentDescription2*)(to->pAttachments + i));
+            }
+        }
+    }
+    if (from)
+    {
+        to->pSubpasses = nullptr;
+        if (from->pSubpasses)
+        {
+            to->pSubpasses = (VkSubpassDescription2*)alloc->alloc(from->subpassCount * sizeof(const VkSubpassDescription2));
+            to->subpassCount = from->subpassCount;
+            for (uint32_t i = 0; i < (uint32_t)from->subpassCount; ++i)
+            {
+                deepcopy_VkSubpassDescription2(alloc, rootType, from->pSubpasses + i, (VkSubpassDescription2*)(to->pSubpasses + i));
+            }
+        }
+    }
+    if (from)
+    {
+        to->pDependencies = nullptr;
+        if (from->pDependencies)
+        {
+            to->pDependencies = (VkSubpassDependency2*)alloc->alloc(from->dependencyCount * sizeof(const VkSubpassDependency2));
+            to->dependencyCount = from->dependencyCount;
+            for (uint32_t i = 0; i < (uint32_t)from->dependencyCount; ++i)
+            {
+                deepcopy_VkSubpassDependency2(alloc, rootType, from->pDependencies + i, (VkSubpassDependency2*)(to->pDependencies + i));
+            }
+        }
+    }
+    to->pCorrelatedViewMasks = nullptr;
+    if (from->pCorrelatedViewMasks)
+    {
+        to->pCorrelatedViewMasks = (uint32_t*)alloc->dupArray(from->pCorrelatedViewMasks, from->correlatedViewMaskCount * sizeof(const uint32_t));
+    }
+}
+
+void deepcopy_VkSubpassBeginInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkSubpassBeginInfo* from,
+    VkSubpassBeginInfo* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkSubpassEndInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkSubpassEndInfo* from,
+    VkSubpassEndInfo* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPhysicalDevice8BitStorageFeatures(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDevice8BitStorageFeatures* from,
+    VkPhysicalDevice8BitStorageFeatures* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPhysicalDeviceDriverProperties(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDriverProperties* from,
+    VkPhysicalDeviceDriverProperties* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    memcpy(to->driverName, from->driverName, VK_MAX_DRIVER_NAME_SIZE * sizeof(char));
+    memcpy(to->driverInfo, from->driverInfo, VK_MAX_DRIVER_INFO_SIZE * sizeof(char));
+    deepcopy_VkConformanceVersion(alloc, rootType, &from->conformanceVersion, (VkConformanceVersion*)(&to->conformanceVersion));
+}
+
+void deepcopy_VkPhysicalDeviceShaderAtomicInt64Features(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderAtomicInt64Features* from,
+    VkPhysicalDeviceShaderAtomicInt64Features* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPhysicalDeviceShaderFloat16Int8Features(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderFloat16Int8Features* from,
+    VkPhysicalDeviceShaderFloat16Int8Features* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPhysicalDeviceFloatControlsProperties(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFloatControlsProperties* from,
+    VkPhysicalDeviceFloatControlsProperties* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkDescriptorSetLayoutBindingFlagsCreateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDescriptorSetLayoutBindingFlagsCreateInfo* from,
+    VkDescriptorSetLayoutBindingFlagsCreateInfo* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    to->pBindingFlags = nullptr;
+    if (from->pBindingFlags)
+    {
+        to->pBindingFlags = (VkDescriptorBindingFlags*)alloc->dupArray(from->pBindingFlags, from->bindingCount * sizeof(const VkDescriptorBindingFlags));
+    }
+}
+
+void deepcopy_VkPhysicalDeviceDescriptorIndexingFeatures(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDescriptorIndexingFeatures* from,
+    VkPhysicalDeviceDescriptorIndexingFeatures* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPhysicalDeviceDescriptorIndexingProperties(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDescriptorIndexingProperties* from,
+    VkPhysicalDeviceDescriptorIndexingProperties* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkDescriptorSetVariableDescriptorCountAllocateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDescriptorSetVariableDescriptorCountAllocateInfo* from,
+    VkDescriptorSetVariableDescriptorCountAllocateInfo* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    to->pDescriptorCounts = nullptr;
+    if (from->pDescriptorCounts)
+    {
+        to->pDescriptorCounts = (uint32_t*)alloc->dupArray(from->pDescriptorCounts, from->descriptorSetCount * sizeof(const uint32_t));
+    }
+}
+
+void deepcopy_VkDescriptorSetVariableDescriptorCountLayoutSupport(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDescriptorSetVariableDescriptorCountLayoutSupport* from,
+    VkDescriptorSetVariableDescriptorCountLayoutSupport* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkSubpassDescriptionDepthStencilResolve(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkSubpassDescriptionDepthStencilResolve* from,
+    VkSubpassDescriptionDepthStencilResolve* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    to->pDepthStencilResolveAttachment = nullptr;
+    if (from->pDepthStencilResolveAttachment)
+    {
+        to->pDepthStencilResolveAttachment = (VkAttachmentReference2*)alloc->alloc(sizeof(const VkAttachmentReference2));
+        deepcopy_VkAttachmentReference2(alloc, rootType, from->pDepthStencilResolveAttachment, (VkAttachmentReference2*)(to->pDepthStencilResolveAttachment));
+    }
+}
+
+void deepcopy_VkPhysicalDeviceDepthStencilResolveProperties(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDepthStencilResolveProperties* from,
+    VkPhysicalDeviceDepthStencilResolveProperties* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPhysicalDeviceScalarBlockLayoutFeatures(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceScalarBlockLayoutFeatures* from,
+    VkPhysicalDeviceScalarBlockLayoutFeatures* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkImageStencilUsageCreateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkImageStencilUsageCreateInfo* from,
+    VkImageStencilUsageCreateInfo* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkSamplerReductionModeCreateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkSamplerReductionModeCreateInfo* from,
+    VkSamplerReductionModeCreateInfo* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPhysicalDeviceSamplerFilterMinmaxProperties(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSamplerFilterMinmaxProperties* from,
+    VkPhysicalDeviceSamplerFilterMinmaxProperties* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPhysicalDeviceVulkanMemoryModelFeatures(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVulkanMemoryModelFeatures* from,
+    VkPhysicalDeviceVulkanMemoryModelFeatures* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPhysicalDeviceImagelessFramebufferFeatures(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceImagelessFramebufferFeatures* from,
+    VkPhysicalDeviceImagelessFramebufferFeatures* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkFramebufferAttachmentImageInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkFramebufferAttachmentImageInfo* from,
+    VkFramebufferAttachmentImageInfo* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    to->pViewFormats = nullptr;
+    if (from->pViewFormats)
+    {
+        to->pViewFormats = (VkFormat*)alloc->dupArray(from->pViewFormats, from->viewFormatCount * sizeof(const VkFormat));
+    }
+}
+
+void deepcopy_VkFramebufferAttachmentsCreateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkFramebufferAttachmentsCreateInfo* from,
+    VkFramebufferAttachmentsCreateInfo* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    if (from)
+    {
+        to->pAttachmentImageInfos = nullptr;
+        if (from->pAttachmentImageInfos)
+        {
+            to->pAttachmentImageInfos = (VkFramebufferAttachmentImageInfo*)alloc->alloc(from->attachmentImageInfoCount * sizeof(const VkFramebufferAttachmentImageInfo));
+            to->attachmentImageInfoCount = from->attachmentImageInfoCount;
+            for (uint32_t i = 0; i < (uint32_t)from->attachmentImageInfoCount; ++i)
+            {
+                deepcopy_VkFramebufferAttachmentImageInfo(alloc, rootType, from->pAttachmentImageInfos + i, (VkFramebufferAttachmentImageInfo*)(to->pAttachmentImageInfos + i));
+            }
+        }
+    }
+}
+
+void deepcopy_VkRenderPassAttachmentBeginInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkRenderPassAttachmentBeginInfo* from,
+    VkRenderPassAttachmentBeginInfo* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    to->pAttachments = nullptr;
+    if (from->pAttachments)
+    {
+        to->pAttachments = (VkImageView*)alloc->dupArray(from->pAttachments, from->attachmentCount * sizeof(const VkImageView));
+    }
+}
+
+void deepcopy_VkPhysicalDeviceUniformBufferStandardLayoutFeatures(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceUniformBufferStandardLayoutFeatures* from,
+    VkPhysicalDeviceUniformBufferStandardLayoutFeatures* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures* from,
+    VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures* from,
+    VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkAttachmentReferenceStencilLayout(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkAttachmentReferenceStencilLayout* from,
+    VkAttachmentReferenceStencilLayout* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkAttachmentDescriptionStencilLayout(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkAttachmentDescriptionStencilLayout* from,
+    VkAttachmentDescriptionStencilLayout* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPhysicalDeviceHostQueryResetFeatures(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceHostQueryResetFeatures* from,
+    VkPhysicalDeviceHostQueryResetFeatures* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPhysicalDeviceTimelineSemaphoreFeatures(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTimelineSemaphoreFeatures* from,
+    VkPhysicalDeviceTimelineSemaphoreFeatures* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPhysicalDeviceTimelineSemaphoreProperties(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTimelineSemaphoreProperties* from,
+    VkPhysicalDeviceTimelineSemaphoreProperties* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkSemaphoreTypeCreateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkSemaphoreTypeCreateInfo* from,
+    VkSemaphoreTypeCreateInfo* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkTimelineSemaphoreSubmitInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkTimelineSemaphoreSubmitInfo* from,
+    VkTimelineSemaphoreSubmitInfo* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    to->pWaitSemaphoreValues = nullptr;
+    if (from->pWaitSemaphoreValues)
+    {
+        to->pWaitSemaphoreValues = (uint64_t*)alloc->dupArray(from->pWaitSemaphoreValues, from->waitSemaphoreValueCount * sizeof(const uint64_t));
+    }
+    to->pSignalSemaphoreValues = nullptr;
+    if (from->pSignalSemaphoreValues)
+    {
+        to->pSignalSemaphoreValues = (uint64_t*)alloc->dupArray(from->pSignalSemaphoreValues, from->signalSemaphoreValueCount * sizeof(const uint64_t));
+    }
+}
+
+void deepcopy_VkSemaphoreWaitInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkSemaphoreWaitInfo* from,
+    VkSemaphoreWaitInfo* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    to->pSemaphores = nullptr;
+    if (from->pSemaphores)
+    {
+        to->pSemaphores = (VkSemaphore*)alloc->dupArray(from->pSemaphores, from->semaphoreCount * sizeof(const VkSemaphore));
+    }
+    to->pValues = nullptr;
+    if (from->pValues)
+    {
+        to->pValues = (uint64_t*)alloc->dupArray(from->pValues, from->semaphoreCount * sizeof(const uint64_t));
+    }
+}
+
+void deepcopy_VkSemaphoreSignalInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkSemaphoreSignalInfo* from,
+    VkSemaphoreSignalInfo* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPhysicalDeviceBufferDeviceAddressFeatures(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceBufferDeviceAddressFeatures* from,
+    VkPhysicalDeviceBufferDeviceAddressFeatures* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkBufferDeviceAddressInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkBufferDeviceAddressInfo* from,
+    VkBufferDeviceAddressInfo* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkBufferOpaqueCaptureAddressCreateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkBufferOpaqueCaptureAddressCreateInfo* from,
+    VkBufferOpaqueCaptureAddressCreateInfo* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkMemoryOpaqueCaptureAddressAllocateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkMemoryOpaqueCaptureAddressAllocateInfo* from,
+    VkMemoryOpaqueCaptureAddressAllocateInfo* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkDeviceMemoryOpaqueCaptureAddressInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDeviceMemoryOpaqueCaptureAddressInfo* from,
+    VkDeviceMemoryOpaqueCaptureAddressInfo* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 #endif
 #ifdef VK_KHR_surface
 void deepcopy_VkSurfaceCapabilitiesKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkSurfaceCapabilitiesKHR* from,
     VkSurfaceCapabilitiesKHR* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    deepcopy_VkExtent2D(pool, &from->currentExtent, (VkExtent2D*)(&to->currentExtent));
-    deepcopy_VkExtent2D(pool, &from->minImageExtent, (VkExtent2D*)(&to->minImageExtent));
-    deepcopy_VkExtent2D(pool, &from->maxImageExtent, (VkExtent2D*)(&to->maxImageExtent));
+    deepcopy_VkExtent2D(alloc, rootType, &from->currentExtent, (VkExtent2D*)(&to->currentExtent));
+    deepcopy_VkExtent2D(alloc, rootType, &from->minImageExtent, (VkExtent2D*)(&to->minImageExtent));
+    deepcopy_VkExtent2D(alloc, rootType, &from->maxImageExtent, (VkExtent2D*)(&to->maxImageExtent));
 }
 
 void deepcopy_VkSurfaceFormatKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkSurfaceFormatKHR* from,
     VkSurfaceFormatKHR* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
 }
 
 #endif
 #ifdef VK_KHR_swapchain
 void deepcopy_VkSwapchainCreateInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkSwapchainCreateInfoKHR* from,
     VkSwapchainCreateInfoKHR* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
-    deepcopy_VkExtent2D(pool, &from->imageExtent, (VkExtent2D*)(&to->imageExtent));
+    deepcopy_VkExtent2D(alloc, rootType, &from->imageExtent, (VkExtent2D*)(&to->imageExtent));
     to->pQueueFamilyIndices = nullptr;
     if (from->pQueueFamilyIndices)
     {
-        to->pQueueFamilyIndices = (uint32_t*)pool->dupArray(from->pQueueFamilyIndices, from->queueFamilyIndexCount * sizeof(const uint32_t));
+        to->pQueueFamilyIndices = (uint32_t*)alloc->dupArray(from->pQueueFamilyIndices, from->queueFamilyIndexCount * sizeof(const uint32_t));
     }
 }
 
 void deepcopy_VkPresentInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPresentInfoKHR* from,
     VkPresentInfoKHR* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
     to->pWaitSemaphores = nullptr;
     if (from->pWaitSemaphores)
     {
-        to->pWaitSemaphores = (VkSemaphore*)pool->dupArray(from->pWaitSemaphores, from->waitSemaphoreCount * sizeof(const VkSemaphore));
+        to->pWaitSemaphores = (VkSemaphore*)alloc->dupArray(from->pWaitSemaphores, from->waitSemaphoreCount * sizeof(const VkSemaphore));
     }
     to->pSwapchains = nullptr;
     if (from->pSwapchains)
     {
-        to->pSwapchains = (VkSwapchainKHR*)pool->dupArray(from->pSwapchains, from->swapchainCount * sizeof(const VkSwapchainKHR));
+        to->pSwapchains = (VkSwapchainKHR*)alloc->dupArray(from->pSwapchains, from->swapchainCount * sizeof(const VkSwapchainKHR));
     }
     to->pImageIndices = nullptr;
     if (from->pImageIndices)
     {
-        to->pImageIndices = (uint32_t*)pool->dupArray(from->pImageIndices, from->swapchainCount * sizeof(const uint32_t));
+        to->pImageIndices = (uint32_t*)alloc->dupArray(from->pImageIndices, from->swapchainCount * sizeof(const uint32_t));
     }
     to->pResults = nullptr;
     if (from->pResults)
     {
-        to->pResults = (VkResult*)pool->dupArray(from->pResults, from->swapchainCount * sizeof(VkResult));
+        to->pResults = (VkResult*)alloc->dupArray(from->pResults, from->swapchainCount * sizeof(VkResult));
     }
 }
 
 void deepcopy_VkImageSwapchainCreateInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkImageSwapchainCreateInfoKHR* from,
     VkImageSwapchainCreateInfoKHR* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkBindImageMemorySwapchainInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkBindImageMemorySwapchainInfoKHR* from,
     VkBindImageMemorySwapchainInfoKHR* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkAcquireNextImageInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkAcquireNextImageInfoKHR* from,
     VkAcquireNextImageInfoKHR* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkDeviceGroupPresentCapabilitiesKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDeviceGroupPresentCapabilitiesKHR* from,
     VkDeviceGroupPresentCapabilitiesKHR* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
     memcpy(to->presentMask, from->presentMask, VK_MAX_DEVICE_GROUP_SIZE * sizeof(uint32_t));
 }
 
 void deepcopy_VkDeviceGroupPresentInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDeviceGroupPresentInfoKHR* from,
     VkDeviceGroupPresentInfoKHR* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
     to->pDeviceMasks = nullptr;
     if (from->pDeviceMasks)
     {
-        to->pDeviceMasks = (uint32_t*)pool->dupArray(from->pDeviceMasks, from->swapchainCount * sizeof(const uint32_t));
+        to->pDeviceMasks = (uint32_t*)alloc->dupArray(from->pDeviceMasks, from->swapchainCount * sizeof(const uint32_t));
     }
 }
 
 void deepcopy_VkDeviceGroupSwapchainCreateInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDeviceGroupSwapchainCreateInfoKHR* from,
     VkDeviceGroupSwapchainCreateInfoKHR* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 #endif
 #ifdef VK_KHR_display
+void deepcopy_VkDisplayModeParametersKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDisplayModeParametersKHR* from,
+    VkDisplayModeParametersKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    deepcopy_VkExtent2D(alloc, rootType, &from->visibleRegion, (VkExtent2D*)(&to->visibleRegion));
+}
+
+void deepcopy_VkDisplayModeCreateInfoKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDisplayModeCreateInfoKHR* from,
+    VkDisplayModeCreateInfoKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    deepcopy_VkDisplayModeParametersKHR(alloc, rootType, &from->parameters, (VkDisplayModeParametersKHR*)(&to->parameters));
+}
+
+void deepcopy_VkDisplayModePropertiesKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDisplayModePropertiesKHR* from,
+    VkDisplayModePropertiesKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    deepcopy_VkDisplayModeParametersKHR(alloc, rootType, &from->parameters, (VkDisplayModeParametersKHR*)(&to->parameters));
+}
+
+void deepcopy_VkDisplayPlaneCapabilitiesKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDisplayPlaneCapabilitiesKHR* from,
+    VkDisplayPlaneCapabilitiesKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    deepcopy_VkOffset2D(alloc, rootType, &from->minSrcPosition, (VkOffset2D*)(&to->minSrcPosition));
+    deepcopy_VkOffset2D(alloc, rootType, &from->maxSrcPosition, (VkOffset2D*)(&to->maxSrcPosition));
+    deepcopy_VkExtent2D(alloc, rootType, &from->minSrcExtent, (VkExtent2D*)(&to->minSrcExtent));
+    deepcopy_VkExtent2D(alloc, rootType, &from->maxSrcExtent, (VkExtent2D*)(&to->maxSrcExtent));
+    deepcopy_VkOffset2D(alloc, rootType, &from->minDstPosition, (VkOffset2D*)(&to->minDstPosition));
+    deepcopy_VkOffset2D(alloc, rootType, &from->maxDstPosition, (VkOffset2D*)(&to->maxDstPosition));
+    deepcopy_VkExtent2D(alloc, rootType, &from->minDstExtent, (VkExtent2D*)(&to->minDstExtent));
+    deepcopy_VkExtent2D(alloc, rootType, &from->maxDstExtent, (VkExtent2D*)(&to->maxDstExtent));
+}
+
+void deepcopy_VkDisplayPlanePropertiesKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDisplayPlanePropertiesKHR* from,
+    VkDisplayPlanePropertiesKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+}
+
 void deepcopy_VkDisplayPropertiesKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDisplayPropertiesKHR* from,
     VkDisplayPropertiesKHR* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
     to->displayName = nullptr;
     if (from->displayName)
     {
-        to->displayName = pool->strDup(from->displayName);
+        to->displayName = alloc->strDup(from->displayName);
     }
-    deepcopy_VkExtent2D(pool, &from->physicalDimensions, (VkExtent2D*)(&to->physicalDimensions));
-    deepcopy_VkExtent2D(pool, &from->physicalResolution, (VkExtent2D*)(&to->physicalResolution));
-}
-
-void deepcopy_VkDisplayModeParametersKHR(
-    Pool* pool,
-    const VkDisplayModeParametersKHR* from,
-    VkDisplayModeParametersKHR* to)
-{
-    (void)pool;
-    *to = *from;
-    deepcopy_VkExtent2D(pool, &from->visibleRegion, (VkExtent2D*)(&to->visibleRegion));
-}
-
-void deepcopy_VkDisplayModePropertiesKHR(
-    Pool* pool,
-    const VkDisplayModePropertiesKHR* from,
-    VkDisplayModePropertiesKHR* to)
-{
-    (void)pool;
-    *to = *from;
-    deepcopy_VkDisplayModeParametersKHR(pool, &from->parameters, (VkDisplayModeParametersKHR*)(&to->parameters));
-}
-
-void deepcopy_VkDisplayModeCreateInfoKHR(
-    Pool* pool,
-    const VkDisplayModeCreateInfoKHR* from,
-    VkDisplayModeCreateInfoKHR* to)
-{
-    (void)pool;
-    *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
-    to->pNext = nullptr;
-    if (pNext_size)
-    {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
-    }
-    deepcopy_VkDisplayModeParametersKHR(pool, &from->parameters, (VkDisplayModeParametersKHR*)(&to->parameters));
-}
-
-void deepcopy_VkDisplayPlaneCapabilitiesKHR(
-    Pool* pool,
-    const VkDisplayPlaneCapabilitiesKHR* from,
-    VkDisplayPlaneCapabilitiesKHR* to)
-{
-    (void)pool;
-    *to = *from;
-    deepcopy_VkOffset2D(pool, &from->minSrcPosition, (VkOffset2D*)(&to->minSrcPosition));
-    deepcopy_VkOffset2D(pool, &from->maxSrcPosition, (VkOffset2D*)(&to->maxSrcPosition));
-    deepcopy_VkExtent2D(pool, &from->minSrcExtent, (VkExtent2D*)(&to->minSrcExtent));
-    deepcopy_VkExtent2D(pool, &from->maxSrcExtent, (VkExtent2D*)(&to->maxSrcExtent));
-    deepcopy_VkOffset2D(pool, &from->minDstPosition, (VkOffset2D*)(&to->minDstPosition));
-    deepcopy_VkOffset2D(pool, &from->maxDstPosition, (VkOffset2D*)(&to->maxDstPosition));
-    deepcopy_VkExtent2D(pool, &from->minDstExtent, (VkExtent2D*)(&to->minDstExtent));
-    deepcopy_VkExtent2D(pool, &from->maxDstExtent, (VkExtent2D*)(&to->maxDstExtent));
-}
-
-void deepcopy_VkDisplayPlanePropertiesKHR(
-    Pool* pool,
-    const VkDisplayPlanePropertiesKHR* from,
-    VkDisplayPlanePropertiesKHR* to)
-{
-    (void)pool;
-    *to = *from;
+    deepcopy_VkExtent2D(alloc, rootType, &from->physicalDimensions, (VkExtent2D*)(&to->physicalDimensions));
+    deepcopy_VkExtent2D(alloc, rootType, &from->physicalResolution, (VkExtent2D*)(&to->physicalResolution));
 }
 
 void deepcopy_VkDisplaySurfaceCreateInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDisplaySurfaceCreateInfoKHR* from,
     VkDisplaySurfaceCreateInfoKHR* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
-    deepcopy_VkExtent2D(pool, &from->imageExtent, (VkExtent2D*)(&to->imageExtent));
+    deepcopy_VkExtent2D(alloc, rootType, &from->imageExtent, (VkExtent2D*)(&to->imageExtent));
 }
 
 #endif
 #ifdef VK_KHR_display_swapchain
 void deepcopy_VkDisplayPresentInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDisplayPresentInfoKHR* from,
     VkDisplayPresentInfoKHR* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
-    deepcopy_VkRect2D(pool, &from->srcRect, (VkRect2D*)(&to->srcRect));
-    deepcopy_VkRect2D(pool, &from->dstRect, (VkRect2D*)(&to->dstRect));
+    deepcopy_VkRect2D(alloc, rootType, &from->srcRect, (VkRect2D*)(&to->srcRect));
+    deepcopy_VkRect2D(alloc, rootType, &from->dstRect, (VkRect2D*)(&to->dstRect));
 }
 
 #endif
 #ifdef VK_KHR_xlib_surface
 void deepcopy_VkXlibSurfaceCreateInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkXlibSurfaceCreateInfoKHR* from,
     VkXlibSurfaceCreateInfoKHR* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
     to->dpy = nullptr;
     if (from->dpy)
     {
-        to->dpy = (Display*)pool->dupArray(from->dpy, sizeof(Display));
+        to->dpy = (Display*)alloc->dupArray(from->dpy, sizeof(Display));
     }
 }
 
 #endif
 #ifdef VK_KHR_xcb_surface
 void deepcopy_VkXcbSurfaceCreateInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkXcbSurfaceCreateInfoKHR* from,
     VkXcbSurfaceCreateInfoKHR* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
     to->connection = nullptr;
     if (from->connection)
     {
-        to->connection = (xcb_connection_t*)pool->dupArray(from->connection, sizeof(xcb_connection_t));
+        to->connection = (xcb_connection_t*)alloc->dupArray(from->connection, sizeof(xcb_connection_t));
     }
 }
 
 #endif
 #ifdef VK_KHR_wayland_surface
 void deepcopy_VkWaylandSurfaceCreateInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkWaylandSurfaceCreateInfoKHR* from,
     VkWaylandSurfaceCreateInfoKHR* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
     to->display = nullptr;
     if (from->display)
     {
-        to->display = (wl_display*)pool->dupArray(from->display, sizeof(wl_display));
+        to->display = (wl_display*)alloc->dupArray(from->display, sizeof(wl_display));
     }
     to->surface = nullptr;
     if (from->surface)
     {
-        to->surface = (wl_surface*)pool->dupArray(from->surface, sizeof(wl_surface));
-    }
-}
-
-#endif
-#ifdef VK_KHR_mir_surface
-void deepcopy_VkMirSurfaceCreateInfoKHR(
-    Pool* pool,
-    const VkMirSurfaceCreateInfoKHR* from,
-    VkMirSurfaceCreateInfoKHR* to)
-{
-    (void)pool;
-    *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
-    to->pNext = nullptr;
-    if (pNext_size)
-    {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
-    }
-    to->connection = nullptr;
-    if (from->connection)
-    {
-        to->connection = (MirConnection*)pool->dupArray(from->connection, sizeof(MirConnection));
-    }
-    to->mirSurface = nullptr;
-    if (from->mirSurface)
-    {
-        to->mirSurface = (MirSurface*)pool->dupArray(from->mirSurface, sizeof(MirSurface));
+        to->surface = (wl_surface*)alloc->dupArray(from->surface, sizeof(wl_surface));
     }
 }
 
 #endif
 #ifdef VK_KHR_android_surface
 void deepcopy_VkAndroidSurfaceCreateInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkAndroidSurfaceCreateInfoKHR* from,
     VkAndroidSurfaceCreateInfoKHR* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
     to->window = nullptr;
     if (from->window)
     {
-        to->window = (ANativeWindow*)pool->dupArray(from->window, sizeof(ANativeWindow));
+        to->window = (ANativeWindow*)alloc->dupArray(from->window, sizeof(ANativeWindow));
     }
 }
 
 #endif
 #ifdef VK_KHR_win32_surface
 void deepcopy_VkWin32SurfaceCreateInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkWin32SurfaceCreateInfoKHR* from,
     VkWin32SurfaceCreateInfoKHR* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
@@ -3551,164 +6896,260 @@
 #endif
 #ifdef VK_KHR_external_memory_win32
 void deepcopy_VkImportMemoryWin32HandleInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkImportMemoryWin32HandleInfoKHR* from,
     VkImportMemoryWin32HandleInfoKHR* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkExportMemoryWin32HandleInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkExportMemoryWin32HandleInfoKHR* from,
     VkExportMemoryWin32HandleInfoKHR* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
     to->pAttributes = nullptr;
     if (from->pAttributes)
     {
-        to->pAttributes = (SECURITY_ATTRIBUTES*)pool->dupArray(from->pAttributes, sizeof(const SECURITY_ATTRIBUTES));
+        to->pAttributes = (SECURITY_ATTRIBUTES*)alloc->dupArray(from->pAttributes, sizeof(const SECURITY_ATTRIBUTES));
     }
 }
 
 void deepcopy_VkMemoryWin32HandlePropertiesKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkMemoryWin32HandlePropertiesKHR* from,
     VkMemoryWin32HandlePropertiesKHR* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkMemoryGetWin32HandleInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkMemoryGetWin32HandleInfoKHR* from,
     VkMemoryGetWin32HandleInfoKHR* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 #endif
 #ifdef VK_KHR_external_memory_fd
 void deepcopy_VkImportMemoryFdInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkImportMemoryFdInfoKHR* from,
     VkImportMemoryFdInfoKHR* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkMemoryFdPropertiesKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkMemoryFdPropertiesKHR* from,
     VkMemoryFdPropertiesKHR* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkMemoryGetFdInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkMemoryGetFdInfoKHR* from,
     VkMemoryGetFdInfoKHR* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 #endif
 #ifdef VK_KHR_win32_keyed_mutex
 void deepcopy_VkWin32KeyedMutexAcquireReleaseInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkWin32KeyedMutexAcquireReleaseInfoKHR* from,
     VkWin32KeyedMutexAcquireReleaseInfoKHR* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
     to->pAcquireSyncs = nullptr;
     if (from->pAcquireSyncs)
     {
-        to->pAcquireSyncs = (VkDeviceMemory*)pool->dupArray(from->pAcquireSyncs, from->acquireCount * sizeof(const VkDeviceMemory));
+        to->pAcquireSyncs = (VkDeviceMemory*)alloc->dupArray(from->pAcquireSyncs, from->acquireCount * sizeof(const VkDeviceMemory));
     }
     to->pAcquireKeys = nullptr;
     if (from->pAcquireKeys)
     {
-        to->pAcquireKeys = (uint64_t*)pool->dupArray(from->pAcquireKeys, from->acquireCount * sizeof(const uint64_t));
+        to->pAcquireKeys = (uint64_t*)alloc->dupArray(from->pAcquireKeys, from->acquireCount * sizeof(const uint64_t));
     }
     to->pAcquireTimeouts = nullptr;
     if (from->pAcquireTimeouts)
     {
-        to->pAcquireTimeouts = (uint32_t*)pool->dupArray(from->pAcquireTimeouts, from->acquireCount * sizeof(const uint32_t));
+        to->pAcquireTimeouts = (uint32_t*)alloc->dupArray(from->pAcquireTimeouts, from->acquireCount * sizeof(const uint32_t));
     }
     to->pReleaseSyncs = nullptr;
     if (from->pReleaseSyncs)
     {
-        to->pReleaseSyncs = (VkDeviceMemory*)pool->dupArray(from->pReleaseSyncs, from->releaseCount * sizeof(const VkDeviceMemory));
+        to->pReleaseSyncs = (VkDeviceMemory*)alloc->dupArray(from->pReleaseSyncs, from->releaseCount * sizeof(const VkDeviceMemory));
     }
     to->pReleaseKeys = nullptr;
     if (from->pReleaseKeys)
     {
-        to->pReleaseKeys = (uint64_t*)pool->dupArray(from->pReleaseKeys, from->releaseCount * sizeof(const uint64_t));
+        to->pReleaseKeys = (uint64_t*)alloc->dupArray(from->pReleaseKeys, from->releaseCount * sizeof(const uint64_t));
     }
 }
 
@@ -3719,192 +7160,300 @@
 #endif
 #ifdef VK_KHR_external_semaphore_win32
 void deepcopy_VkImportSemaphoreWin32HandleInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkImportSemaphoreWin32HandleInfoKHR* from,
     VkImportSemaphoreWin32HandleInfoKHR* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkExportSemaphoreWin32HandleInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkExportSemaphoreWin32HandleInfoKHR* from,
     VkExportSemaphoreWin32HandleInfoKHR* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
     to->pAttributes = nullptr;
     if (from->pAttributes)
     {
-        to->pAttributes = (SECURITY_ATTRIBUTES*)pool->dupArray(from->pAttributes, sizeof(const SECURITY_ATTRIBUTES));
+        to->pAttributes = (SECURITY_ATTRIBUTES*)alloc->dupArray(from->pAttributes, sizeof(const SECURITY_ATTRIBUTES));
     }
 }
 
 void deepcopy_VkD3D12FenceSubmitInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkD3D12FenceSubmitInfoKHR* from,
     VkD3D12FenceSubmitInfoKHR* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
     to->pWaitSemaphoreValues = nullptr;
     if (from->pWaitSemaphoreValues)
     {
-        to->pWaitSemaphoreValues = (uint64_t*)pool->dupArray(from->pWaitSemaphoreValues, from->waitSemaphoreValuesCount * sizeof(const uint64_t));
+        to->pWaitSemaphoreValues = (uint64_t*)alloc->dupArray(from->pWaitSemaphoreValues, from->waitSemaphoreValuesCount * sizeof(const uint64_t));
     }
     to->pSignalSemaphoreValues = nullptr;
     if (from->pSignalSemaphoreValues)
     {
-        to->pSignalSemaphoreValues = (uint64_t*)pool->dupArray(from->pSignalSemaphoreValues, from->signalSemaphoreValuesCount * sizeof(const uint64_t));
+        to->pSignalSemaphoreValues = (uint64_t*)alloc->dupArray(from->pSignalSemaphoreValues, from->signalSemaphoreValuesCount * sizeof(const uint64_t));
     }
 }
 
 void deepcopy_VkSemaphoreGetWin32HandleInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkSemaphoreGetWin32HandleInfoKHR* from,
     VkSemaphoreGetWin32HandleInfoKHR* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 #endif
 #ifdef VK_KHR_external_semaphore_fd
 void deepcopy_VkImportSemaphoreFdInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkImportSemaphoreFdInfoKHR* from,
     VkImportSemaphoreFdInfoKHR* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkSemaphoreGetFdInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkSemaphoreGetFdInfoKHR* from,
     VkSemaphoreGetFdInfoKHR* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 #endif
 #ifdef VK_KHR_push_descriptor
 void deepcopy_VkPhysicalDevicePushDescriptorPropertiesKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDevicePushDescriptorPropertiesKHR* from,
     VkPhysicalDevicePushDescriptorPropertiesKHR* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 #endif
+#ifdef VK_KHR_shader_float16_int8
+#endif
 #ifdef VK_KHR_16bit_storage
 #endif
 #ifdef VK_KHR_incremental_present
 void deepcopy_VkRectLayerKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkRectLayerKHR* from,
     VkRectLayerKHR* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    deepcopy_VkOffset2D(pool, &from->offset, (VkOffset2D*)(&to->offset));
-    deepcopy_VkExtent2D(pool, &from->extent, (VkExtent2D*)(&to->extent));
+    deepcopy_VkOffset2D(alloc, rootType, &from->offset, (VkOffset2D*)(&to->offset));
+    deepcopy_VkExtent2D(alloc, rootType, &from->extent, (VkExtent2D*)(&to->extent));
 }
 
 void deepcopy_VkPresentRegionKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPresentRegionKHR* from,
     VkPresentRegionKHR* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    to->pRectangles = nullptr;
-    if (from->pRectangles)
+    if (from)
     {
-        to->pRectangles = (VkRectLayerKHR*)pool->alloc(from->rectangleCount * sizeof(const VkRectLayerKHR));
-        to->rectangleCount = from->rectangleCount;
-        for (uint32_t i = 0; i < (uint32_t)from->rectangleCount; ++i)
+        to->pRectangles = nullptr;
+        if (from->pRectangles)
         {
-            deepcopy_VkRectLayerKHR(pool, from->pRectangles + i, (VkRectLayerKHR*)(to->pRectangles + i));
+            to->pRectangles = (VkRectLayerKHR*)alloc->alloc(from->rectangleCount * sizeof(const VkRectLayerKHR));
+            to->rectangleCount = from->rectangleCount;
+            for (uint32_t i = 0; i < (uint32_t)from->rectangleCount; ++i)
+            {
+                deepcopy_VkRectLayerKHR(alloc, rootType, from->pRectangles + i, (VkRectLayerKHR*)(to->pRectangles + i));
+            }
         }
     }
 }
 
 void deepcopy_VkPresentRegionsKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPresentRegionsKHR* from,
     VkPresentRegionsKHR* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
-    to->pRegions = nullptr;
-    if (from->pRegions)
+    if (from)
     {
-        to->pRegions = (VkPresentRegionKHR*)pool->alloc(from->swapchainCount * sizeof(const VkPresentRegionKHR));
-        to->swapchainCount = from->swapchainCount;
-        for (uint32_t i = 0; i < (uint32_t)from->swapchainCount; ++i)
+        to->pRegions = nullptr;
+        if (from->pRegions)
         {
-            deepcopy_VkPresentRegionKHR(pool, from->pRegions + i, (VkPresentRegionKHR*)(to->pRegions + i));
+            to->pRegions = (VkPresentRegionKHR*)alloc->alloc(from->swapchainCount * sizeof(const VkPresentRegionKHR));
+            to->swapchainCount = from->swapchainCount;
+            for (uint32_t i = 0; i < (uint32_t)from->swapchainCount; ++i)
+            {
+                deepcopy_VkPresentRegionKHR(alloc, rootType, from->pRegions + i, (VkPresentRegionKHR*)(to->pRegions + i));
+            }
         }
     }
 }
@@ -3912,210 +7461,36 @@
 #endif
 #ifdef VK_KHR_descriptor_update_template
 #endif
+#ifdef VK_KHR_imageless_framebuffer
+#endif
 #ifdef VK_KHR_create_renderpass2
-void deepcopy_VkAttachmentDescription2KHR(
-    Pool* pool,
-    const VkAttachmentDescription2KHR* from,
-    VkAttachmentDescription2KHR* to)
-{
-    (void)pool;
-    *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
-    to->pNext = nullptr;
-    if (pNext_size)
-    {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
-    }
-}
-
-void deepcopy_VkAttachmentReference2KHR(
-    Pool* pool,
-    const VkAttachmentReference2KHR* from,
-    VkAttachmentReference2KHR* to)
-{
-    (void)pool;
-    *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
-    to->pNext = nullptr;
-    if (pNext_size)
-    {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
-    }
-}
-
-void deepcopy_VkSubpassDescription2KHR(
-    Pool* pool,
-    const VkSubpassDescription2KHR* from,
-    VkSubpassDescription2KHR* to)
-{
-    (void)pool;
-    *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
-    to->pNext = nullptr;
-    if (pNext_size)
-    {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
-    }
-    to->pInputAttachments = nullptr;
-    if (from->pInputAttachments)
-    {
-        to->pInputAttachments = (VkAttachmentReference2KHR*)pool->alloc(from->inputAttachmentCount * sizeof(const VkAttachmentReference2KHR));
-        to->inputAttachmentCount = from->inputAttachmentCount;
-        for (uint32_t i = 0; i < (uint32_t)from->inputAttachmentCount; ++i)
-        {
-            deepcopy_VkAttachmentReference2KHR(pool, from->pInputAttachments + i, (VkAttachmentReference2KHR*)(to->pInputAttachments + i));
-        }
-    }
-    to->pColorAttachments = nullptr;
-    if (from->pColorAttachments)
-    {
-        to->pColorAttachments = (VkAttachmentReference2KHR*)pool->alloc(from->colorAttachmentCount * sizeof(const VkAttachmentReference2KHR));
-        to->colorAttachmentCount = from->colorAttachmentCount;
-        for (uint32_t i = 0; i < (uint32_t)from->colorAttachmentCount; ++i)
-        {
-            deepcopy_VkAttachmentReference2KHR(pool, from->pColorAttachments + i, (VkAttachmentReference2KHR*)(to->pColorAttachments + i));
-        }
-    }
-    to->pResolveAttachments = nullptr;
-    if (from->pResolveAttachments)
-    {
-        to->pResolveAttachments = (VkAttachmentReference2KHR*)pool->alloc(from->colorAttachmentCount * sizeof(const VkAttachmentReference2KHR));
-        to->colorAttachmentCount = from->colorAttachmentCount;
-        for (uint32_t i = 0; i < (uint32_t)from->colorAttachmentCount; ++i)
-        {
-            deepcopy_VkAttachmentReference2KHR(pool, from->pResolveAttachments + i, (VkAttachmentReference2KHR*)(to->pResolveAttachments + i));
-        }
-    }
-    to->pDepthStencilAttachment = nullptr;
-    if (from->pDepthStencilAttachment)
-    {
-        to->pDepthStencilAttachment = (VkAttachmentReference2KHR*)pool->alloc(sizeof(const VkAttachmentReference2KHR));
-        deepcopy_VkAttachmentReference2KHR(pool, from->pDepthStencilAttachment, (VkAttachmentReference2KHR*)(to->pDepthStencilAttachment));
-    }
-    to->pPreserveAttachments = nullptr;
-    if (from->pPreserveAttachments)
-    {
-        to->pPreserveAttachments = (uint32_t*)pool->dupArray(from->pPreserveAttachments, from->preserveAttachmentCount * sizeof(const uint32_t));
-    }
-}
-
-void deepcopy_VkSubpassDependency2KHR(
-    Pool* pool,
-    const VkSubpassDependency2KHR* from,
-    VkSubpassDependency2KHR* to)
-{
-    (void)pool;
-    *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
-    to->pNext = nullptr;
-    if (pNext_size)
-    {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
-    }
-}
-
-void deepcopy_VkRenderPassCreateInfo2KHR(
-    Pool* pool,
-    const VkRenderPassCreateInfo2KHR* from,
-    VkRenderPassCreateInfo2KHR* to)
-{
-    (void)pool;
-    *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
-    to->pNext = nullptr;
-    if (pNext_size)
-    {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
-    }
-    to->pAttachments = nullptr;
-    if (from->pAttachments)
-    {
-        to->pAttachments = (VkAttachmentDescription2KHR*)pool->alloc(from->attachmentCount * sizeof(const VkAttachmentDescription2KHR));
-        to->attachmentCount = from->attachmentCount;
-        for (uint32_t i = 0; i < (uint32_t)from->attachmentCount; ++i)
-        {
-            deepcopy_VkAttachmentDescription2KHR(pool, from->pAttachments + i, (VkAttachmentDescription2KHR*)(to->pAttachments + i));
-        }
-    }
-    to->pSubpasses = nullptr;
-    if (from->pSubpasses)
-    {
-        to->pSubpasses = (VkSubpassDescription2KHR*)pool->alloc(from->subpassCount * sizeof(const VkSubpassDescription2KHR));
-        to->subpassCount = from->subpassCount;
-        for (uint32_t i = 0; i < (uint32_t)from->subpassCount; ++i)
-        {
-            deepcopy_VkSubpassDescription2KHR(pool, from->pSubpasses + i, (VkSubpassDescription2KHR*)(to->pSubpasses + i));
-        }
-    }
-    to->pDependencies = nullptr;
-    if (from->pDependencies)
-    {
-        to->pDependencies = (VkSubpassDependency2KHR*)pool->alloc(from->dependencyCount * sizeof(const VkSubpassDependency2KHR));
-        to->dependencyCount = from->dependencyCount;
-        for (uint32_t i = 0; i < (uint32_t)from->dependencyCount; ++i)
-        {
-            deepcopy_VkSubpassDependency2KHR(pool, from->pDependencies + i, (VkSubpassDependency2KHR*)(to->pDependencies + i));
-        }
-    }
-    to->pCorrelatedViewMasks = nullptr;
-    if (from->pCorrelatedViewMasks)
-    {
-        to->pCorrelatedViewMasks = (uint32_t*)pool->dupArray(from->pCorrelatedViewMasks, from->correlatedViewMaskCount * sizeof(const uint32_t));
-    }
-}
-
-void deepcopy_VkSubpassBeginInfoKHR(
-    Pool* pool,
-    const VkSubpassBeginInfoKHR* from,
-    VkSubpassBeginInfoKHR* to)
-{
-    (void)pool;
-    *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
-    to->pNext = nullptr;
-    if (pNext_size)
-    {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
-    }
-}
-
-void deepcopy_VkSubpassEndInfoKHR(
-    Pool* pool,
-    const VkSubpassEndInfoKHR* from,
-    VkSubpassEndInfoKHR* to)
-{
-    (void)pool;
-    *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
-    to->pNext = nullptr;
-    if (pNext_size)
-    {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
-    }
-}
-
 #endif
 #ifdef VK_KHR_shared_presentable_image
 void deepcopy_VkSharedPresentSurfaceCapabilitiesKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkSharedPresentSurfaceCapabilitiesKHR* from,
     VkSharedPresentSurfaceCapabilitiesKHR* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
@@ -4126,89 +7501,367 @@
 #endif
 #ifdef VK_KHR_external_fence_win32
 void deepcopy_VkImportFenceWin32HandleInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkImportFenceWin32HandleInfoKHR* from,
     VkImportFenceWin32HandleInfoKHR* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkExportFenceWin32HandleInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkExportFenceWin32HandleInfoKHR* from,
     VkExportFenceWin32HandleInfoKHR* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
     to->pAttributes = nullptr;
     if (from->pAttributes)
     {
-        to->pAttributes = (SECURITY_ATTRIBUTES*)pool->dupArray(from->pAttributes, sizeof(const SECURITY_ATTRIBUTES));
+        to->pAttributes = (SECURITY_ATTRIBUTES*)alloc->dupArray(from->pAttributes, sizeof(const SECURITY_ATTRIBUTES));
     }
 }
 
 void deepcopy_VkFenceGetWin32HandleInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkFenceGetWin32HandleInfoKHR* from,
     VkFenceGetWin32HandleInfoKHR* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 #endif
 #ifdef VK_KHR_external_fence_fd
 void deepcopy_VkImportFenceFdInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkImportFenceFdInfoKHR* from,
     VkImportFenceFdInfoKHR* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkFenceGetFdInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkFenceGetFdInfoKHR* from,
     VkFenceGetFdInfoKHR* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_KHR_performance_query
+void deepcopy_VkPhysicalDevicePerformanceQueryFeaturesKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDevicePerformanceQueryFeaturesKHR* from,
+    VkPhysicalDevicePerformanceQueryFeaturesKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPhysicalDevicePerformanceQueryPropertiesKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDevicePerformanceQueryPropertiesKHR* from,
+    VkPhysicalDevicePerformanceQueryPropertiesKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPerformanceCounterKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPerformanceCounterKHR* from,
+    VkPerformanceCounterKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    memcpy(to->uuid, from->uuid, VK_UUID_SIZE * sizeof(uint8_t));
+}
+
+void deepcopy_VkPerformanceCounterDescriptionKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPerformanceCounterDescriptionKHR* from,
+    VkPerformanceCounterDescriptionKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    memcpy(to->name, from->name, VK_MAX_DESCRIPTION_SIZE * sizeof(char));
+    memcpy(to->category, from->category, VK_MAX_DESCRIPTION_SIZE * sizeof(char));
+    memcpy(to->description, from->description, VK_MAX_DESCRIPTION_SIZE * sizeof(char));
+}
+
+void deepcopy_VkQueryPoolPerformanceCreateInfoKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkQueryPoolPerformanceCreateInfoKHR* from,
+    VkQueryPoolPerformanceCreateInfoKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    to->pCounterIndices = nullptr;
+    if (from->pCounterIndices)
+    {
+        to->pCounterIndices = (uint32_t*)alloc->dupArray(from->pCounterIndices, from->counterIndexCount * sizeof(const uint32_t));
+    }
+}
+
+void deepcopy_VkPerformanceCounterResultKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPerformanceCounterResultKHR* from,
+    VkPerformanceCounterResultKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+}
+
+void deepcopy_VkAcquireProfilingLockInfoKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkAcquireProfilingLockInfoKHR* from,
+    VkAcquireProfilingLockInfoKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPerformanceQuerySubmitInfoKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPerformanceQuerySubmitInfoKHR* from,
+    VkPerformanceQuerySubmitInfoKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
@@ -4217,53 +7870,89 @@
 #endif
 #ifdef VK_KHR_get_surface_capabilities2
 void deepcopy_VkPhysicalDeviceSurfaceInfo2KHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceSurfaceInfo2KHR* from,
     VkPhysicalDeviceSurfaceInfo2KHR* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkSurfaceCapabilities2KHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkSurfaceCapabilities2KHR* from,
     VkSurfaceCapabilities2KHR* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
-    deepcopy_VkSurfaceCapabilitiesKHR(pool, &from->surfaceCapabilities, (VkSurfaceCapabilitiesKHR*)(&to->surfaceCapabilities));
+    deepcopy_VkSurfaceCapabilitiesKHR(alloc, rootType, &from->surfaceCapabilities, (VkSurfaceCapabilitiesKHR*)(&to->surfaceCapabilities));
 }
 
 void deepcopy_VkSurfaceFormat2KHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkSurfaceFormat2KHR* from,
     VkSurfaceFormat2KHR* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
-    deepcopy_VkSurfaceFormatKHR(pool, &from->surfaceFormat, (VkSurfaceFormatKHR*)(&to->surfaceFormat));
+    deepcopy_VkSurfaceFormatKHR(alloc, rootType, &from->surfaceFormat, (VkSurfaceFormatKHR*)(&to->surfaceFormat));
 }
 
 #endif
@@ -4271,87 +7960,147 @@
 #endif
 #ifdef VK_KHR_get_display_properties2
 void deepcopy_VkDisplayProperties2KHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDisplayProperties2KHR* from,
     VkDisplayProperties2KHR* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
-    deepcopy_VkDisplayPropertiesKHR(pool, &from->displayProperties, (VkDisplayPropertiesKHR*)(&to->displayProperties));
+    deepcopy_VkDisplayPropertiesKHR(alloc, rootType, &from->displayProperties, (VkDisplayPropertiesKHR*)(&to->displayProperties));
 }
 
 void deepcopy_VkDisplayPlaneProperties2KHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDisplayPlaneProperties2KHR* from,
     VkDisplayPlaneProperties2KHR* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
-    deepcopy_VkDisplayPlanePropertiesKHR(pool, &from->displayPlaneProperties, (VkDisplayPlanePropertiesKHR*)(&to->displayPlaneProperties));
+    deepcopy_VkDisplayPlanePropertiesKHR(alloc, rootType, &from->displayPlaneProperties, (VkDisplayPlanePropertiesKHR*)(&to->displayPlaneProperties));
 }
 
 void deepcopy_VkDisplayModeProperties2KHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDisplayModeProperties2KHR* from,
     VkDisplayModeProperties2KHR* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
-    deepcopy_VkDisplayModePropertiesKHR(pool, &from->displayModeProperties, (VkDisplayModePropertiesKHR*)(&to->displayModeProperties));
+    deepcopy_VkDisplayModePropertiesKHR(alloc, rootType, &from->displayModeProperties, (VkDisplayModePropertiesKHR*)(&to->displayModeProperties));
 }
 
 void deepcopy_VkDisplayPlaneInfo2KHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDisplayPlaneInfo2KHR* from,
     VkDisplayPlaneInfo2KHR* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkDisplayPlaneCapabilities2KHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDisplayPlaneCapabilities2KHR* from,
     VkDisplayPlaneCapabilities2KHR* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
-    deepcopy_VkDisplayPlaneCapabilitiesKHR(pool, &from->capabilities, (VkDisplayPlaneCapabilitiesKHR*)(&to->capabilities));
+    deepcopy_VkDisplayPlaneCapabilitiesKHR(alloc, rootType, &from->capabilities, (VkDisplayPlaneCapabilitiesKHR*)(&to->capabilities));
 }
 
 #endif
@@ -4364,96 +8113,1053 @@
 #ifdef VK_KHR_get_memory_requirements2
 #endif
 #ifdef VK_KHR_image_format_list
-void deepcopy_VkImageFormatListCreateInfoKHR(
-    Pool* pool,
-    const VkImageFormatListCreateInfoKHR* from,
-    VkImageFormatListCreateInfoKHR* to)
-{
-    (void)pool;
-    *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
-    to->pNext = nullptr;
-    if (pNext_size)
-    {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
-    }
-    to->pViewFormats = nullptr;
-    if (from->pViewFormats)
-    {
-        to->pViewFormats = (VkFormat*)pool->dupArray(from->pViewFormats, from->viewFormatCount * sizeof(const VkFormat));
-    }
-}
-
 #endif
 #ifdef VK_KHR_sampler_ycbcr_conversion
 #endif
 #ifdef VK_KHR_bind_memory2
 #endif
+#ifdef VK_KHR_portability_subset
+void deepcopy_VkPhysicalDevicePortabilitySubsetFeaturesKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDevicePortabilitySubsetFeaturesKHR* from,
+    VkPhysicalDevicePortabilitySubsetFeaturesKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPhysicalDevicePortabilitySubsetPropertiesKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDevicePortabilitySubsetPropertiesKHR* from,
+    VkPhysicalDevicePortabilitySubsetPropertiesKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
 #ifdef VK_KHR_maintenance3
 #endif
 #ifdef VK_KHR_draw_indirect_count
 #endif
+#ifdef VK_KHR_shader_subgroup_extended_types
+#endif
 #ifdef VK_KHR_8bit_storage
-void deepcopy_VkPhysicalDevice8BitStorageFeaturesKHR(
-    Pool* pool,
-    const VkPhysicalDevice8BitStorageFeaturesKHR* from,
-    VkPhysicalDevice8BitStorageFeaturesKHR* to)
+#endif
+#ifdef VK_KHR_shader_atomic_int64
+#endif
+#ifdef VK_KHR_shader_clock
+void deepcopy_VkPhysicalDeviceShaderClockFeaturesKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderClockFeaturesKHR* from,
+    VkPhysicalDeviceShaderClockFeaturesKHR* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_KHR_driver_properties
+#endif
+#ifdef VK_KHR_shader_float_controls
+#endif
+#ifdef VK_KHR_depth_stencil_resolve
+#endif
+#ifdef VK_KHR_swapchain_mutable_format
+#endif
+#ifdef VK_KHR_timeline_semaphore
+#endif
+#ifdef VK_KHR_vulkan_memory_model
+#endif
+#ifdef VK_KHR_shader_terminate_invocation
+void deepcopy_VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR* from,
+    VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_KHR_fragment_shading_rate
+void deepcopy_VkFragmentShadingRateAttachmentInfoKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkFragmentShadingRateAttachmentInfoKHR* from,
+    VkFragmentShadingRateAttachmentInfoKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    to->pFragmentShadingRateAttachment = nullptr;
+    if (from->pFragmentShadingRateAttachment)
+    {
+        to->pFragmentShadingRateAttachment = (VkAttachmentReference2*)alloc->alloc(sizeof(const VkAttachmentReference2));
+        deepcopy_VkAttachmentReference2(alloc, rootType, from->pFragmentShadingRateAttachment, (VkAttachmentReference2*)(to->pFragmentShadingRateAttachment));
+    }
+    deepcopy_VkExtent2D(alloc, rootType, &from->shadingRateAttachmentTexelSize, (VkExtent2D*)(&to->shadingRateAttachmentTexelSize));
+}
+
+void deepcopy_VkPipelineFragmentShadingRateStateCreateInfoKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineFragmentShadingRateStateCreateInfoKHR* from,
+    VkPipelineFragmentShadingRateStateCreateInfoKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    deepcopy_VkExtent2D(alloc, rootType, &from->fragmentSize, (VkExtent2D*)(&to->fragmentSize));
+    memcpy(to->combinerOps, from->combinerOps, 2 * sizeof(VkFragmentShadingRateCombinerOpKHR));
+}
+
+void deepcopy_VkPhysicalDeviceFragmentShadingRateFeaturesKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShadingRateFeaturesKHR* from,
+    VkPhysicalDeviceFragmentShadingRateFeaturesKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPhysicalDeviceFragmentShadingRatePropertiesKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShadingRatePropertiesKHR* from,
+    VkPhysicalDeviceFragmentShadingRatePropertiesKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    deepcopy_VkExtent2D(alloc, rootType, &from->minFragmentShadingRateAttachmentTexelSize, (VkExtent2D*)(&to->minFragmentShadingRateAttachmentTexelSize));
+    deepcopy_VkExtent2D(alloc, rootType, &from->maxFragmentShadingRateAttachmentTexelSize, (VkExtent2D*)(&to->maxFragmentShadingRateAttachmentTexelSize));
+    deepcopy_VkExtent2D(alloc, rootType, &from->maxFragmentSize, (VkExtent2D*)(&to->maxFragmentSize));
+}
+
+void deepcopy_VkPhysicalDeviceFragmentShadingRateKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShadingRateKHR* from,
+    VkPhysicalDeviceFragmentShadingRateKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    deepcopy_VkExtent2D(alloc, rootType, &from->fragmentSize, (VkExtent2D*)(&to->fragmentSize));
+}
+
+#endif
+#ifdef VK_KHR_spirv_1_4
+#endif
+#ifdef VK_KHR_surface_protected_capabilities
+void deepcopy_VkSurfaceProtectedCapabilitiesKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkSurfaceProtectedCapabilitiesKHR* from,
+    VkSurfaceProtectedCapabilitiesKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_KHR_separate_depth_stencil_layouts
+#endif
+#ifdef VK_KHR_uniform_buffer_standard_layout
+#endif
+#ifdef VK_KHR_buffer_device_address
+#endif
+#ifdef VK_KHR_deferred_host_operations
+#endif
+#ifdef VK_KHR_pipeline_executable_properties
+void deepcopy_VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR* from,
+    VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPipelineInfoKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineInfoKHR* from,
+    VkPipelineInfoKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPipelineExecutablePropertiesKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineExecutablePropertiesKHR* from,
+    VkPipelineExecutablePropertiesKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    memcpy(to->name, from->name, VK_MAX_DESCRIPTION_SIZE * sizeof(char));
+    memcpy(to->description, from->description, VK_MAX_DESCRIPTION_SIZE * sizeof(char));
+}
+
+void deepcopy_VkPipelineExecutableInfoKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineExecutableInfoKHR* from,
+    VkPipelineExecutableInfoKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPipelineExecutableStatisticValueKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineExecutableStatisticValueKHR* from,
+    VkPipelineExecutableStatisticValueKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+}
+
+void deepcopy_VkPipelineExecutableStatisticKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineExecutableStatisticKHR* from,
+    VkPipelineExecutableStatisticKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    memcpy(to->name, from->name, VK_MAX_DESCRIPTION_SIZE * sizeof(char));
+    memcpy(to->description, from->description, VK_MAX_DESCRIPTION_SIZE * sizeof(char));
+    deepcopy_VkPipelineExecutableStatisticValueKHR(alloc, rootType, &from->value, (VkPipelineExecutableStatisticValueKHR*)(&to->value));
+}
+
+void deepcopy_VkPipelineExecutableInternalRepresentationKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineExecutableInternalRepresentationKHR* from,
+    VkPipelineExecutableInternalRepresentationKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    memcpy(to->name, from->name, VK_MAX_DESCRIPTION_SIZE * sizeof(char));
+    memcpy(to->description, from->description, VK_MAX_DESCRIPTION_SIZE * sizeof(char));
+    to->pData = nullptr;
+    if (from->pData)
+    {
+        to->pData = (void*)alloc->dupArray(from->pData, from->dataSize * sizeof(uint8_t));
+    }
+}
+
+#endif
+#ifdef VK_KHR_pipeline_library
+void deepcopy_VkPipelineLibraryCreateInfoKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineLibraryCreateInfoKHR* from,
+    VkPipelineLibraryCreateInfoKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    to->pLibraries = nullptr;
+    if (from->pLibraries)
+    {
+        to->pLibraries = (VkPipeline*)alloc->dupArray(from->pLibraries, from->libraryCount * sizeof(const VkPipeline));
+    }
+}
+
+#endif
+#ifdef VK_KHR_shader_non_semantic_info
+#endif
+#ifdef VK_KHR_copy_commands2
+void deepcopy_VkBufferCopy2KHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkBufferCopy2KHR* from,
+    VkBufferCopy2KHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkCopyBufferInfo2KHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkCopyBufferInfo2KHR* from,
+    VkCopyBufferInfo2KHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    if (from)
+    {
+        to->pRegions = nullptr;
+        if (from->pRegions)
+        {
+            to->pRegions = (VkBufferCopy2KHR*)alloc->alloc(from->regionCount * sizeof(const VkBufferCopy2KHR));
+            to->regionCount = from->regionCount;
+            for (uint32_t i = 0; i < (uint32_t)from->regionCount; ++i)
+            {
+                deepcopy_VkBufferCopy2KHR(alloc, rootType, from->pRegions + i, (VkBufferCopy2KHR*)(to->pRegions + i));
+            }
+        }
+    }
+}
+
+void deepcopy_VkImageCopy2KHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkImageCopy2KHR* from,
+    VkImageCopy2KHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    deepcopy_VkImageSubresourceLayers(alloc, rootType, &from->srcSubresource, (VkImageSubresourceLayers*)(&to->srcSubresource));
+    deepcopy_VkOffset3D(alloc, rootType, &from->srcOffset, (VkOffset3D*)(&to->srcOffset));
+    deepcopy_VkImageSubresourceLayers(alloc, rootType, &from->dstSubresource, (VkImageSubresourceLayers*)(&to->dstSubresource));
+    deepcopy_VkOffset3D(alloc, rootType, &from->dstOffset, (VkOffset3D*)(&to->dstOffset));
+    deepcopy_VkExtent3D(alloc, rootType, &from->extent, (VkExtent3D*)(&to->extent));
+}
+
+void deepcopy_VkCopyImageInfo2KHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkCopyImageInfo2KHR* from,
+    VkCopyImageInfo2KHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    if (from)
+    {
+        to->pRegions = nullptr;
+        if (from->pRegions)
+        {
+            to->pRegions = (VkImageCopy2KHR*)alloc->alloc(from->regionCount * sizeof(const VkImageCopy2KHR));
+            to->regionCount = from->regionCount;
+            for (uint32_t i = 0; i < (uint32_t)from->regionCount; ++i)
+            {
+                deepcopy_VkImageCopy2KHR(alloc, rootType, from->pRegions + i, (VkImageCopy2KHR*)(to->pRegions + i));
+            }
+        }
+    }
+}
+
+void deepcopy_VkBufferImageCopy2KHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkBufferImageCopy2KHR* from,
+    VkBufferImageCopy2KHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    deepcopy_VkImageSubresourceLayers(alloc, rootType, &from->imageSubresource, (VkImageSubresourceLayers*)(&to->imageSubresource));
+    deepcopy_VkOffset3D(alloc, rootType, &from->imageOffset, (VkOffset3D*)(&to->imageOffset));
+    deepcopy_VkExtent3D(alloc, rootType, &from->imageExtent, (VkExtent3D*)(&to->imageExtent));
+}
+
+void deepcopy_VkCopyBufferToImageInfo2KHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkCopyBufferToImageInfo2KHR* from,
+    VkCopyBufferToImageInfo2KHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    if (from)
+    {
+        to->pRegions = nullptr;
+        if (from->pRegions)
+        {
+            to->pRegions = (VkBufferImageCopy2KHR*)alloc->alloc(from->regionCount * sizeof(const VkBufferImageCopy2KHR));
+            to->regionCount = from->regionCount;
+            for (uint32_t i = 0; i < (uint32_t)from->regionCount; ++i)
+            {
+                deepcopy_VkBufferImageCopy2KHR(alloc, rootType, from->pRegions + i, (VkBufferImageCopy2KHR*)(to->pRegions + i));
+            }
+        }
+    }
+}
+
+void deepcopy_VkCopyImageToBufferInfo2KHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkCopyImageToBufferInfo2KHR* from,
+    VkCopyImageToBufferInfo2KHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    if (from)
+    {
+        to->pRegions = nullptr;
+        if (from->pRegions)
+        {
+            to->pRegions = (VkBufferImageCopy2KHR*)alloc->alloc(from->regionCount * sizeof(const VkBufferImageCopy2KHR));
+            to->regionCount = from->regionCount;
+            for (uint32_t i = 0; i < (uint32_t)from->regionCount; ++i)
+            {
+                deepcopy_VkBufferImageCopy2KHR(alloc, rootType, from->pRegions + i, (VkBufferImageCopy2KHR*)(to->pRegions + i));
+            }
+        }
+    }
+}
+
+void deepcopy_VkImageBlit2KHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkImageBlit2KHR* from,
+    VkImageBlit2KHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    deepcopy_VkImageSubresourceLayers(alloc, rootType, &from->srcSubresource, (VkImageSubresourceLayers*)(&to->srcSubresource));
+    for (uint32_t i = 0; i < (uint32_t)2; ++i)
+    {
+        deepcopy_VkOffset3D(alloc, rootType, from->srcOffsets + i, (VkOffset3D*)(to->srcOffsets + i));
+    }
+    deepcopy_VkImageSubresourceLayers(alloc, rootType, &from->dstSubresource, (VkImageSubresourceLayers*)(&to->dstSubresource));
+    for (uint32_t i = 0; i < (uint32_t)2; ++i)
+    {
+        deepcopy_VkOffset3D(alloc, rootType, from->dstOffsets + i, (VkOffset3D*)(to->dstOffsets + i));
+    }
+}
+
+void deepcopy_VkBlitImageInfo2KHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkBlitImageInfo2KHR* from,
+    VkBlitImageInfo2KHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    if (from)
+    {
+        to->pRegions = nullptr;
+        if (from->pRegions)
+        {
+            to->pRegions = (VkImageBlit2KHR*)alloc->alloc(from->regionCount * sizeof(const VkImageBlit2KHR));
+            to->regionCount = from->regionCount;
+            for (uint32_t i = 0; i < (uint32_t)from->regionCount; ++i)
+            {
+                deepcopy_VkImageBlit2KHR(alloc, rootType, from->pRegions + i, (VkImageBlit2KHR*)(to->pRegions + i));
+            }
+        }
+    }
+}
+
+void deepcopy_VkImageResolve2KHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkImageResolve2KHR* from,
+    VkImageResolve2KHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    deepcopy_VkImageSubresourceLayers(alloc, rootType, &from->srcSubresource, (VkImageSubresourceLayers*)(&to->srcSubresource));
+    deepcopy_VkOffset3D(alloc, rootType, &from->srcOffset, (VkOffset3D*)(&to->srcOffset));
+    deepcopy_VkImageSubresourceLayers(alloc, rootType, &from->dstSubresource, (VkImageSubresourceLayers*)(&to->dstSubresource));
+    deepcopy_VkOffset3D(alloc, rootType, &from->dstOffset, (VkOffset3D*)(&to->dstOffset));
+    deepcopy_VkExtent3D(alloc, rootType, &from->extent, (VkExtent3D*)(&to->extent));
+}
+
+void deepcopy_VkResolveImageInfo2KHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkResolveImageInfo2KHR* from,
+    VkResolveImageInfo2KHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    if (from)
+    {
+        to->pRegions = nullptr;
+        if (from->pRegions)
+        {
+            to->pRegions = (VkImageResolve2KHR*)alloc->alloc(from->regionCount * sizeof(const VkImageResolve2KHR));
+            to->regionCount = from->regionCount;
+            for (uint32_t i = 0; i < (uint32_t)from->regionCount; ++i)
+            {
+                deepcopy_VkImageResolve2KHR(alloc, rootType, from->pRegions + i, (VkImageResolve2KHR*)(to->pRegions + i));
+            }
+        }
     }
 }
 
 #endif
 #ifdef VK_ANDROID_native_buffer
 void deepcopy_VkNativeBufferANDROID(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkNativeBufferANDROID* from,
     VkNativeBufferANDROID* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
     to->handle = nullptr;
     if (from->handle)
     {
-        to->handle = (uint32_t*)pool->dupArray(from->handle, sizeof(const uint32_t));
+        to->handle = (uint32_t*)alloc->dupArray(from->handle, sizeof(const uint32_t));
     }
 }
 
 #endif
 #ifdef VK_EXT_debug_report
 void deepcopy_VkDebugReportCallbackCreateInfoEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDebugReportCallbackCreateInfoEXT* from,
     VkDebugReportCallbackCreateInfoEXT* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
     to->pUserData = nullptr;
     if (from->pUserData)
     {
-        to->pUserData = (void*)pool->dupArray(from->pUserData, sizeof(uint8_t));
+        to->pUserData = (void*)alloc->dupArray(from->pUserData, sizeof(uint8_t));
     }
 }
 
@@ -4466,18 +9172,30 @@
 #endif
 #ifdef VK_AMD_rasterization_order
 void deepcopy_VkPipelineRasterizationStateRasterizationOrderAMD(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPipelineRasterizationStateRasterizationOrderAMD* from,
     VkPipelineRasterizationStateRasterizationOrderAMD* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
@@ -4488,65 +9206,101 @@
 #endif
 #ifdef VK_EXT_debug_marker
 void deepcopy_VkDebugMarkerObjectNameInfoEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDebugMarkerObjectNameInfoEXT* from,
     VkDebugMarkerObjectNameInfoEXT* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
     to->pObjectName = nullptr;
     if (from->pObjectName)
     {
-        to->pObjectName = pool->strDup(from->pObjectName);
+        to->pObjectName = alloc->strDup(from->pObjectName);
     }
 }
 
 void deepcopy_VkDebugMarkerObjectTagInfoEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDebugMarkerObjectTagInfoEXT* from,
     VkDebugMarkerObjectTagInfoEXT* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
     to->pTag = nullptr;
     if (from->pTag)
     {
-        to->pTag = (void*)pool->dupArray(from->pTag, from->tagSize * sizeof(const uint8_t));
+        to->pTag = (void*)alloc->dupArray(from->pTag, from->tagSize * sizeof(const uint8_t));
     }
 }
 
 void deepcopy_VkDebugMarkerMarkerInfoEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDebugMarkerMarkerInfoEXT* from,
     VkDebugMarkerMarkerInfoEXT* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
     to->pMarkerName = nullptr;
     if (from->pMarkerName)
     {
-        to->pMarkerName = pool->strDup(from->pMarkerName);
+        to->pMarkerName = alloc->strDup(from->pMarkerName);
     }
     memcpy(to->color, from->color, 4 * sizeof(float));
 }
@@ -4556,50 +9310,230 @@
 #endif
 #ifdef VK_NV_dedicated_allocation
 void deepcopy_VkDedicatedAllocationImageCreateInfoNV(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDedicatedAllocationImageCreateInfoNV* from,
     VkDedicatedAllocationImageCreateInfoNV* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkDedicatedAllocationBufferCreateInfoNV(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDedicatedAllocationBufferCreateInfoNV* from,
     VkDedicatedAllocationBufferCreateInfoNV* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkDedicatedAllocationMemoryAllocateInfoNV(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDedicatedAllocationMemoryAllocateInfoNV* from,
     VkDedicatedAllocationMemoryAllocateInfoNV* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_transform_feedback
+void deepcopy_VkPhysicalDeviceTransformFeedbackFeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTransformFeedbackFeaturesEXT* from,
+    VkPhysicalDeviceTransformFeedbackFeaturesEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPhysicalDeviceTransformFeedbackPropertiesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTransformFeedbackPropertiesEXT* from,
+    VkPhysicalDeviceTransformFeedbackPropertiesEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPipelineRasterizationStateStreamCreateInfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineRasterizationStateStreamCreateInfoEXT* from,
+    VkPipelineRasterizationStateStreamCreateInfoEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_NVX_image_view_handle
+void deepcopy_VkImageViewHandleInfoNVX(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkImageViewHandleInfoNVX* from,
+    VkImageViewHandleInfoNVX* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkImageViewAddressPropertiesNVX(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkImageViewAddressPropertiesNVX* from,
+    VkImageViewAddressPropertiesNVX* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
@@ -4614,218 +9548,380 @@
 #endif
 #ifdef VK_AMD_texture_gather_bias_lod
 void deepcopy_VkTextureLODGatherFormatPropertiesAMD(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkTextureLODGatherFormatPropertiesAMD* from,
     VkTextureLODGatherFormatPropertiesAMD* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 #endif
 #ifdef VK_AMD_shader_info
 void deepcopy_VkShaderResourceUsageAMD(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkShaderResourceUsageAMD* from,
     VkShaderResourceUsageAMD* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
 }
 
 void deepcopy_VkShaderStatisticsInfoAMD(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkShaderStatisticsInfoAMD* from,
     VkShaderStatisticsInfoAMD* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    deepcopy_VkShaderResourceUsageAMD(pool, &from->resourceUsage, (VkShaderResourceUsageAMD*)(&to->resourceUsage));
+    deepcopy_VkShaderResourceUsageAMD(alloc, rootType, &from->resourceUsage, (VkShaderResourceUsageAMD*)(&to->resourceUsage));
     memcpy(to->computeWorkGroupSize, from->computeWorkGroupSize, 3 * sizeof(uint32_t));
 }
 
 #endif
 #ifdef VK_AMD_shader_image_load_store_lod
 #endif
+#ifdef VK_GGP_stream_descriptor_surface
+void deepcopy_VkStreamDescriptorSurfaceCreateInfoGGP(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkStreamDescriptorSurfaceCreateInfoGGP* from,
+    VkStreamDescriptorSurfaceCreateInfoGGP* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_NV_corner_sampled_image
+void deepcopy_VkPhysicalDeviceCornerSampledImageFeaturesNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCornerSampledImageFeaturesNV* from,
+    VkPhysicalDeviceCornerSampledImageFeaturesNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
 #ifdef VK_IMG_format_pvrtc
 #endif
 #ifdef VK_NV_external_memory_capabilities
 void deepcopy_VkExternalImageFormatPropertiesNV(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkExternalImageFormatPropertiesNV* from,
     VkExternalImageFormatPropertiesNV* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    deepcopy_VkImageFormatProperties(pool, &from->imageFormatProperties, (VkImageFormatProperties*)(&to->imageFormatProperties));
+    deepcopy_VkImageFormatProperties(alloc, rootType, &from->imageFormatProperties, (VkImageFormatProperties*)(&to->imageFormatProperties));
 }
 
 #endif
 #ifdef VK_NV_external_memory
 void deepcopy_VkExternalMemoryImageCreateInfoNV(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkExternalMemoryImageCreateInfoNV* from,
     VkExternalMemoryImageCreateInfoNV* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkExportMemoryAllocateInfoNV(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkExportMemoryAllocateInfoNV* from,
     VkExportMemoryAllocateInfoNV* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 #endif
 #ifdef VK_NV_external_memory_win32
 void deepcopy_VkImportMemoryWin32HandleInfoNV(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkImportMemoryWin32HandleInfoNV* from,
     VkImportMemoryWin32HandleInfoNV* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkExportMemoryWin32HandleInfoNV(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkExportMemoryWin32HandleInfoNV* from,
     VkExportMemoryWin32HandleInfoNV* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
     to->pAttributes = nullptr;
     if (from->pAttributes)
     {
-        to->pAttributes = (SECURITY_ATTRIBUTES*)pool->dupArray(from->pAttributes, sizeof(const SECURITY_ATTRIBUTES));
+        to->pAttributes = (SECURITY_ATTRIBUTES*)alloc->dupArray(from->pAttributes, sizeof(const SECURITY_ATTRIBUTES));
     }
 }
 
 #endif
 #ifdef VK_NV_win32_keyed_mutex
 void deepcopy_VkWin32KeyedMutexAcquireReleaseInfoNV(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkWin32KeyedMutexAcquireReleaseInfoNV* from,
     VkWin32KeyedMutexAcquireReleaseInfoNV* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
     to->pAcquireSyncs = nullptr;
     if (from->pAcquireSyncs)
     {
-        to->pAcquireSyncs = (VkDeviceMemory*)pool->dupArray(from->pAcquireSyncs, from->acquireCount * sizeof(const VkDeviceMemory));
+        to->pAcquireSyncs = (VkDeviceMemory*)alloc->dupArray(from->pAcquireSyncs, from->acquireCount * sizeof(const VkDeviceMemory));
     }
     to->pAcquireKeys = nullptr;
     if (from->pAcquireKeys)
     {
-        to->pAcquireKeys = (uint64_t*)pool->dupArray(from->pAcquireKeys, from->acquireCount * sizeof(const uint64_t));
+        to->pAcquireKeys = (uint64_t*)alloc->dupArray(from->pAcquireKeys, from->acquireCount * sizeof(const uint64_t));
     }
     to->pAcquireTimeoutMilliseconds = nullptr;
     if (from->pAcquireTimeoutMilliseconds)
     {
-        to->pAcquireTimeoutMilliseconds = (uint32_t*)pool->dupArray(from->pAcquireTimeoutMilliseconds, from->acquireCount * sizeof(const uint32_t));
+        to->pAcquireTimeoutMilliseconds = (uint32_t*)alloc->dupArray(from->pAcquireTimeoutMilliseconds, from->acquireCount * sizeof(const uint32_t));
     }
     to->pReleaseSyncs = nullptr;
     if (from->pReleaseSyncs)
     {
-        to->pReleaseSyncs = (VkDeviceMemory*)pool->dupArray(from->pReleaseSyncs, from->releaseCount * sizeof(const VkDeviceMemory));
+        to->pReleaseSyncs = (VkDeviceMemory*)alloc->dupArray(from->pReleaseSyncs, from->releaseCount * sizeof(const VkDeviceMemory));
     }
     to->pReleaseKeys = nullptr;
     if (from->pReleaseKeys)
     {
-        to->pReleaseKeys = (uint64_t*)pool->dupArray(from->pReleaseKeys, from->releaseCount * sizeof(const uint64_t));
+        to->pReleaseKeys = (uint64_t*)alloc->dupArray(from->pReleaseKeys, from->releaseCount * sizeof(const uint64_t));
     }
 }
 
 #endif
 #ifdef VK_EXT_validation_flags
 void deepcopy_VkValidationFlagsEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkValidationFlagsEXT* from,
     VkValidationFlagsEXT* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
     to->pDisabledValidationChecks = nullptr;
     if (from->pDisabledValidationChecks)
     {
-        to->pDisabledValidationChecks = (VkValidationCheckEXT*)pool->dupArray(from->pDisabledValidationChecks, from->disabledValidationCheckCount * sizeof(const VkValidationCheckEXT));
+        to->pDisabledValidationChecks = (VkValidationCheckEXT*)alloc->dupArray(from->pDisabledValidationChecks, from->disabledValidationCheckCount * sizeof(const VkValidationCheckEXT));
     }
 }
 
 #endif
 #ifdef VK_NN_vi_surface
 void deepcopy_VkViSurfaceCreateInfoNN(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkViSurfaceCreateInfoNN* from,
     VkViSurfaceCreateInfoNN* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
     to->window = nullptr;
     if (from->window)
     {
-        to->window = (void*)pool->dupArray(from->window, sizeof(uint8_t));
+        to->window = (void*)alloc->dupArray(from->window, sizeof(uint8_t));
     }
 }
 
@@ -4834,293 +9930,229 @@
 #endif
 #ifdef VK_EXT_shader_subgroup_vote
 #endif
-#ifdef VK_EXT_conditional_rendering
-void deepcopy_VkConditionalRenderingBeginInfoEXT(
-    Pool* pool,
-    const VkConditionalRenderingBeginInfoEXT* from,
-    VkConditionalRenderingBeginInfoEXT* to)
+#ifdef VK_EXT_texture_compression_astc_hdr
+void deepcopy_VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT* from,
+    VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
-    to->pNext = nullptr;
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        rootType = from->sType;
     }
-}
-
-void deepcopy_VkPhysicalDeviceConditionalRenderingFeaturesEXT(
-    Pool* pool,
-    const VkPhysicalDeviceConditionalRenderingFeaturesEXT* from,
-    VkPhysicalDeviceConditionalRenderingFeaturesEXT* to)
-{
-    (void)pool;
-    *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
-    to->pNext = nullptr;
-    if (pNext_size)
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
     }
-}
-
-void deepcopy_VkCommandBufferInheritanceConditionalRenderingInfoEXT(
-    Pool* pool,
-    const VkCommandBufferInheritanceConditionalRenderingInfoEXT* from,
-    VkCommandBufferInheritanceConditionalRenderingInfoEXT* to)
-{
-    (void)pool;
-    *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 #endif
-#ifdef VK_NVX_device_generated_commands
-void deepcopy_VkDeviceGeneratedCommandsFeaturesNVX(
-    Pool* pool,
-    const VkDeviceGeneratedCommandsFeaturesNVX* from,
-    VkDeviceGeneratedCommandsFeaturesNVX* to)
+#ifdef VK_EXT_astc_decode_mode
+void deepcopy_VkImageViewASTCDecodeModeEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkImageViewASTCDecodeModeEXT* from,
+    VkImageViewASTCDecodeModeEXT* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
-void deepcopy_VkDeviceGeneratedCommandsLimitsNVX(
-    Pool* pool,
-    const VkDeviceGeneratedCommandsLimitsNVX* from,
-    VkDeviceGeneratedCommandsLimitsNVX* to)
+void deepcopy_VkPhysicalDeviceASTCDecodeFeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceASTCDecodeFeaturesEXT* from,
+    VkPhysicalDeviceASTCDecodeFeaturesEXT* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
-void deepcopy_VkIndirectCommandsTokenNVX(
-    Pool* pool,
-    const VkIndirectCommandsTokenNVX* from,
-    VkIndirectCommandsTokenNVX* to)
+#endif
+#ifdef VK_EXT_conditional_rendering
+void deepcopy_VkConditionalRenderingBeginInfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkConditionalRenderingBeginInfoEXT* from,
+    VkConditionalRenderingBeginInfoEXT* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-}
-
-void deepcopy_VkIndirectCommandsLayoutTokenNVX(
-    Pool* pool,
-    const VkIndirectCommandsLayoutTokenNVX* from,
-    VkIndirectCommandsLayoutTokenNVX* to)
-{
-    (void)pool;
-    *to = *from;
-}
-
-void deepcopy_VkIndirectCommandsLayoutCreateInfoNVX(
-    Pool* pool,
-    const VkIndirectCommandsLayoutCreateInfoNVX* from,
-    VkIndirectCommandsLayoutCreateInfoNVX* to)
-{
-    (void)pool;
-    *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
-    }
-    to->pTokens = nullptr;
-    if (from->pTokens)
-    {
-        to->pTokens = (VkIndirectCommandsLayoutTokenNVX*)pool->alloc(from->tokenCount * sizeof(const VkIndirectCommandsLayoutTokenNVX));
-        to->tokenCount = from->tokenCount;
-        for (uint32_t i = 0; i < (uint32_t)from->tokenCount; ++i)
-        {
-            deepcopy_VkIndirectCommandsLayoutTokenNVX(pool, from->pTokens + i, (VkIndirectCommandsLayoutTokenNVX*)(to->pTokens + i));
-        }
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
-void deepcopy_VkCmdProcessCommandsInfoNVX(
-    Pool* pool,
-    const VkCmdProcessCommandsInfoNVX* from,
-    VkCmdProcessCommandsInfoNVX* to)
+void deepcopy_VkPhysicalDeviceConditionalRenderingFeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceConditionalRenderingFeaturesEXT* from,
+    VkPhysicalDeviceConditionalRenderingFeaturesEXT* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
-    }
-    to->pIndirectCommandsTokens = nullptr;
-    if (from->pIndirectCommandsTokens)
-    {
-        to->pIndirectCommandsTokens = (VkIndirectCommandsTokenNVX*)pool->alloc(from->indirectCommandsTokenCount * sizeof(const VkIndirectCommandsTokenNVX));
-        to->indirectCommandsTokenCount = from->indirectCommandsTokenCount;
-        for (uint32_t i = 0; i < (uint32_t)from->indirectCommandsTokenCount; ++i)
-        {
-            deepcopy_VkIndirectCommandsTokenNVX(pool, from->pIndirectCommandsTokens + i, (VkIndirectCommandsTokenNVX*)(to->pIndirectCommandsTokens + i));
-        }
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
-void deepcopy_VkCmdReserveSpaceForCommandsInfoNVX(
-    Pool* pool,
-    const VkCmdReserveSpaceForCommandsInfoNVX* from,
-    VkCmdReserveSpaceForCommandsInfoNVX* to)
+void deepcopy_VkCommandBufferInheritanceConditionalRenderingInfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkCommandBufferInheritanceConditionalRenderingInfoEXT* from,
+    VkCommandBufferInheritanceConditionalRenderingInfoEXT* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
-void deepcopy_VkObjectTableCreateInfoNVX(
-    Pool* pool,
-    const VkObjectTableCreateInfoNVX* from,
-    VkObjectTableCreateInfoNVX* to)
-{
-    (void)pool;
-    *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
-    to->pNext = nullptr;
-    if (pNext_size)
-    {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
-    }
-    to->pObjectEntryTypes = nullptr;
-    if (from->pObjectEntryTypes)
-    {
-        to->pObjectEntryTypes = (VkObjectEntryTypeNVX*)pool->dupArray(from->pObjectEntryTypes, from->objectCount * sizeof(const VkObjectEntryTypeNVX));
-    }
-    to->pObjectEntryCounts = nullptr;
-    if (from->pObjectEntryCounts)
-    {
-        to->pObjectEntryCounts = (uint32_t*)pool->dupArray(from->pObjectEntryCounts, from->objectCount * sizeof(const uint32_t));
-    }
-    to->pObjectEntryUsageFlags = nullptr;
-    if (from->pObjectEntryUsageFlags)
-    {
-        to->pObjectEntryUsageFlags = (VkObjectEntryUsageFlagsNVX*)pool->dupArray(from->pObjectEntryUsageFlags, from->objectCount * sizeof(const VkObjectEntryUsageFlagsNVX));
-    }
-}
-
-void deepcopy_VkObjectTableEntryNVX(
-    Pool* pool,
-    const VkObjectTableEntryNVX* from,
-    VkObjectTableEntryNVX* to)
-{
-    (void)pool;
-    *to = *from;
-}
-
-void deepcopy_VkObjectTablePipelineEntryNVX(
-    Pool* pool,
-    const VkObjectTablePipelineEntryNVX* from,
-    VkObjectTablePipelineEntryNVX* to)
-{
-    (void)pool;
-    *to = *from;
-}
-
-void deepcopy_VkObjectTableDescriptorSetEntryNVX(
-    Pool* pool,
-    const VkObjectTableDescriptorSetEntryNVX* from,
-    VkObjectTableDescriptorSetEntryNVX* to)
-{
-    (void)pool;
-    *to = *from;
-}
-
-void deepcopy_VkObjectTableVertexBufferEntryNVX(
-    Pool* pool,
-    const VkObjectTableVertexBufferEntryNVX* from,
-    VkObjectTableVertexBufferEntryNVX* to)
-{
-    (void)pool;
-    *to = *from;
-}
-
-void deepcopy_VkObjectTableIndexBufferEntryNVX(
-    Pool* pool,
-    const VkObjectTableIndexBufferEntryNVX* from,
-    VkObjectTableIndexBufferEntryNVX* to)
-{
-    (void)pool;
-    *to = *from;
-}
-
-void deepcopy_VkObjectTablePushConstantEntryNVX(
-    Pool* pool,
-    const VkObjectTablePushConstantEntryNVX* from,
-    VkObjectTablePushConstantEntryNVX* to)
-{
-    (void)pool;
-    *to = *from;
-}
-
 #endif
 #ifdef VK_NV_clip_space_w_scaling
 void deepcopy_VkViewportWScalingNV(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkViewportWScalingNV* from,
     VkViewportWScalingNV* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
 }
 
 void deepcopy_VkPipelineViewportWScalingStateCreateInfoNV(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPipelineViewportWScalingStateCreateInfoNV* from,
     VkPipelineViewportWScalingStateCreateInfoNV* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
-    to->pViewportWScalings = nullptr;
-    if (from->pViewportWScalings)
+    if (from)
     {
-        to->pViewportWScalings = (VkViewportWScalingNV*)pool->alloc(from->viewportCount * sizeof(const VkViewportWScalingNV));
-        to->viewportCount = from->viewportCount;
-        for (uint32_t i = 0; i < (uint32_t)from->viewportCount; ++i)
+        to->pViewportWScalings = nullptr;
+        if (from->pViewportWScalings)
         {
-            deepcopy_VkViewportWScalingNV(pool, from->pViewportWScalings + i, (VkViewportWScalingNV*)(to->pViewportWScalings + i));
+            to->pViewportWScalings = (VkViewportWScalingNV*)alloc->alloc(from->viewportCount * sizeof(const VkViewportWScalingNV));
+            to->viewportCount = from->viewportCount;
+            for (uint32_t i = 0; i < (uint32_t)from->viewportCount; ++i)
+            {
+                deepcopy_VkViewportWScalingNV(alloc, rootType, from->pViewportWScalings + i, (VkViewportWScalingNV*)(to->pViewportWScalings + i));
+            }
         }
     }
 }
@@ -5132,141 +10164,222 @@
 #endif
 #ifdef VK_EXT_display_surface_counter
 void deepcopy_VkSurfaceCapabilities2EXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkSurfaceCapabilities2EXT* from,
     VkSurfaceCapabilities2EXT* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
-    deepcopy_VkExtent2D(pool, &from->currentExtent, (VkExtent2D*)(&to->currentExtent));
-    deepcopy_VkExtent2D(pool, &from->minImageExtent, (VkExtent2D*)(&to->minImageExtent));
-    deepcopy_VkExtent2D(pool, &from->maxImageExtent, (VkExtent2D*)(&to->maxImageExtent));
+    deepcopy_VkExtent2D(alloc, rootType, &from->currentExtent, (VkExtent2D*)(&to->currentExtent));
+    deepcopy_VkExtent2D(alloc, rootType, &from->minImageExtent, (VkExtent2D*)(&to->minImageExtent));
+    deepcopy_VkExtent2D(alloc, rootType, &from->maxImageExtent, (VkExtent2D*)(&to->maxImageExtent));
 }
 
 #endif
 #ifdef VK_EXT_display_control
 void deepcopy_VkDisplayPowerInfoEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDisplayPowerInfoEXT* from,
     VkDisplayPowerInfoEXT* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkDeviceEventInfoEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDeviceEventInfoEXT* from,
     VkDeviceEventInfoEXT* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkDisplayEventInfoEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDisplayEventInfoEXT* from,
     VkDisplayEventInfoEXT* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkSwapchainCounterCreateInfoEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkSwapchainCounterCreateInfoEXT* from,
     VkSwapchainCounterCreateInfoEXT* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 #endif
 #ifdef VK_GOOGLE_display_timing
 void deepcopy_VkRefreshCycleDurationGOOGLE(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkRefreshCycleDurationGOOGLE* from,
     VkRefreshCycleDurationGOOGLE* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
 }
 
 void deepcopy_VkPastPresentationTimingGOOGLE(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPastPresentationTimingGOOGLE* from,
     VkPastPresentationTimingGOOGLE* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
 }
 
 void deepcopy_VkPresentTimeGOOGLE(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPresentTimeGOOGLE* from,
     VkPresentTimeGOOGLE* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
 }
 
 void deepcopy_VkPresentTimesInfoGOOGLE(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPresentTimesInfoGOOGLE* from,
     VkPresentTimesInfoGOOGLE* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
-    to->pTimes = nullptr;
-    if (from->pTimes)
+    if (from)
     {
-        to->pTimes = (VkPresentTimeGOOGLE*)pool->alloc(from->swapchainCount * sizeof(const VkPresentTimeGOOGLE));
-        to->swapchainCount = from->swapchainCount;
-        for (uint32_t i = 0; i < (uint32_t)from->swapchainCount; ++i)
+        to->pTimes = nullptr;
+        if (from->pTimes)
         {
-            deepcopy_VkPresentTimeGOOGLE(pool, from->pTimes + i, (VkPresentTimeGOOGLE*)(to->pTimes + i));
+            to->pTimes = (VkPresentTimeGOOGLE*)alloc->alloc(from->swapchainCount * sizeof(const VkPresentTimeGOOGLE));
+            to->swapchainCount = from->swapchainCount;
+            for (uint32_t i = 0; i < (uint32_t)from->swapchainCount; ++i)
+            {
+                deepcopy_VkPresentTimeGOOGLE(alloc, rootType, from->pTimes + i, (VkPresentTimeGOOGLE*)(to->pTimes + i));
+            }
         }
     }
 }
@@ -5280,54 +10393,83 @@
 #endif
 #ifdef VK_NVX_multiview_per_view_attributes
 void deepcopy_VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX* from,
     VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 #endif
 #ifdef VK_NV_viewport_swizzle
 void deepcopy_VkViewportSwizzleNV(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkViewportSwizzleNV* from,
     VkViewportSwizzleNV* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
 }
 
 void deepcopy_VkPipelineViewportSwizzleStateCreateInfoNV(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPipelineViewportSwizzleStateCreateInfoNV* from,
     VkPipelineViewportSwizzleStateCreateInfoNV* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
-    to->pViewportSwizzles = nullptr;
-    if (from->pViewportSwizzles)
+    if (from)
     {
-        to->pViewportSwizzles = (VkViewportSwizzleNV*)pool->alloc(from->viewportCount * sizeof(const VkViewportSwizzleNV));
-        to->viewportCount = from->viewportCount;
-        for (uint32_t i = 0; i < (uint32_t)from->viewportCount; ++i)
+        to->pViewportSwizzles = nullptr;
+        if (from->pViewportSwizzles)
         {
-            deepcopy_VkViewportSwizzleNV(pool, from->pViewportSwizzles + i, (VkViewportSwizzleNV*)(to->pViewportSwizzles + i));
+            to->pViewportSwizzles = (VkViewportSwizzleNV*)alloc->alloc(from->viewportCount * sizeof(const VkViewportSwizzleNV));
+            to->viewportCount = from->viewportCount;
+            for (uint32_t i = 0; i < (uint32_t)from->viewportCount; ++i)
+            {
+                deepcopy_VkViewportSwizzleNV(alloc, rootType, from->pViewportSwizzles + i, (VkViewportSwizzleNV*)(to->pViewportSwizzles + i));
+            }
         }
     }
 }
@@ -5335,43 +10477,70 @@
 #endif
 #ifdef VK_EXT_discard_rectangles
 void deepcopy_VkPhysicalDeviceDiscardRectanglePropertiesEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceDiscardRectanglePropertiesEXT* from,
     VkPhysicalDeviceDiscardRectanglePropertiesEXT* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkPipelineDiscardRectangleStateCreateInfoEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPipelineDiscardRectangleStateCreateInfoEXT* from,
     VkPipelineDiscardRectangleStateCreateInfoEXT* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
-    to->pDiscardRectangles = nullptr;
-    if (from->pDiscardRectangles)
+    if (from)
     {
-        to->pDiscardRectangles = (VkRect2D*)pool->alloc(from->discardRectangleCount * sizeof(const VkRect2D));
-        to->discardRectangleCount = from->discardRectangleCount;
-        for (uint32_t i = 0; i < (uint32_t)from->discardRectangleCount; ++i)
+        to->pDiscardRectangles = nullptr;
+        if (from->pDiscardRectangles)
         {
-            deepcopy_VkRect2D(pool, from->pDiscardRectangles + i, (VkRect2D*)(to->pDiscardRectangles + i));
+            to->pDiscardRectangles = (VkRect2D*)alloc->alloc(from->discardRectangleCount * sizeof(const VkRect2D));
+            to->discardRectangleCount = from->discardRectangleCount;
+            for (uint32_t i = 0; i < (uint32_t)from->discardRectangleCount; ++i)
+            {
+                deepcopy_VkRect2D(alloc, rootType, from->pDiscardRectangles + i, (VkRect2D*)(to->pDiscardRectangles + i));
+            }
         }
     }
 }
@@ -5379,34 +10548,116 @@
 #endif
 #ifdef VK_EXT_conservative_rasterization
 void deepcopy_VkPhysicalDeviceConservativeRasterizationPropertiesEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceConservativeRasterizationPropertiesEXT* from,
     VkPhysicalDeviceConservativeRasterizationPropertiesEXT* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkPipelineRasterizationConservativeStateCreateInfoEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPipelineRasterizationConservativeStateCreateInfoEXT* from,
     VkPipelineRasterizationConservativeStateCreateInfoEXT* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_depth_clip_enable
+void deepcopy_VkPhysicalDeviceDepthClipEnableFeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDepthClipEnableFeaturesEXT* from,
+    VkPhysicalDeviceDepthClipEnableFeaturesEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPipelineRasterizationDepthClipStateCreateInfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineRasterizationDepthClipStateCreateInfoEXT* from,
+    VkPipelineRasterizationDepthClipStateCreateInfoEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
@@ -5415,365 +10666,514 @@
 #endif
 #ifdef VK_EXT_hdr_metadata
 void deepcopy_VkXYColorEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkXYColorEXT* from,
     VkXYColorEXT* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
 }
 
 void deepcopy_VkHdrMetadataEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkHdrMetadataEXT* from,
     VkHdrMetadataEXT* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
-    deepcopy_VkXYColorEXT(pool, &from->displayPrimaryRed, (VkXYColorEXT*)(&to->displayPrimaryRed));
-    deepcopy_VkXYColorEXT(pool, &from->displayPrimaryGreen, (VkXYColorEXT*)(&to->displayPrimaryGreen));
-    deepcopy_VkXYColorEXT(pool, &from->displayPrimaryBlue, (VkXYColorEXT*)(&to->displayPrimaryBlue));
-    deepcopy_VkXYColorEXT(pool, &from->whitePoint, (VkXYColorEXT*)(&to->whitePoint));
+    deepcopy_VkXYColorEXT(alloc, rootType, &from->displayPrimaryRed, (VkXYColorEXT*)(&to->displayPrimaryRed));
+    deepcopy_VkXYColorEXT(alloc, rootType, &from->displayPrimaryGreen, (VkXYColorEXT*)(&to->displayPrimaryGreen));
+    deepcopy_VkXYColorEXT(alloc, rootType, &from->displayPrimaryBlue, (VkXYColorEXT*)(&to->displayPrimaryBlue));
+    deepcopy_VkXYColorEXT(alloc, rootType, &from->whitePoint, (VkXYColorEXT*)(&to->whitePoint));
 }
 
 #endif
 #ifdef VK_MVK_ios_surface
 void deepcopy_VkIOSSurfaceCreateInfoMVK(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkIOSSurfaceCreateInfoMVK* from,
     VkIOSSurfaceCreateInfoMVK* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
     to->pView = nullptr;
     if (from->pView)
     {
-        to->pView = (void*)pool->dupArray(from->pView, sizeof(const uint8_t));
+        to->pView = (void*)alloc->dupArray(from->pView, sizeof(const uint8_t));
     }
 }
 
 #endif
 #ifdef VK_MVK_macos_surface
 void deepcopy_VkMacOSSurfaceCreateInfoMVK(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkMacOSSurfaceCreateInfoMVK* from,
     VkMacOSSurfaceCreateInfoMVK* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
     to->pView = nullptr;
     if (from->pView)
     {
-        to->pView = (void*)pool->dupArray(from->pView, sizeof(const uint8_t));
+        to->pView = (void*)alloc->dupArray(from->pView, sizeof(const uint8_t));
     }
 }
 
 #endif
+#ifdef VK_MVK_moltenvk
+#endif
 #ifdef VK_EXT_external_memory_dma_buf
 #endif
 #ifdef VK_EXT_queue_family_foreign
 #endif
 #ifdef VK_EXT_debug_utils
-void deepcopy_VkDebugUtilsObjectNameInfoEXT(
-    Pool* pool,
-    const VkDebugUtilsObjectNameInfoEXT* from,
-    VkDebugUtilsObjectNameInfoEXT* to)
-{
-    (void)pool;
-    *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
-    to->pNext = nullptr;
-    if (pNext_size)
-    {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
-    }
-    to->pObjectName = nullptr;
-    if (from->pObjectName)
-    {
-        to->pObjectName = pool->strDup(from->pObjectName);
-    }
-}
-
-void deepcopy_VkDebugUtilsObjectTagInfoEXT(
-    Pool* pool,
-    const VkDebugUtilsObjectTagInfoEXT* from,
-    VkDebugUtilsObjectTagInfoEXT* to)
-{
-    (void)pool;
-    *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
-    to->pNext = nullptr;
-    if (pNext_size)
-    {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
-    }
-    to->pTag = nullptr;
-    if (from->pTag)
-    {
-        to->pTag = (void*)pool->dupArray(from->pTag, from->tagSize * sizeof(const uint8_t));
-    }
-}
-
 void deepcopy_VkDebugUtilsLabelEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDebugUtilsLabelEXT* from,
     VkDebugUtilsLabelEXT* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
     to->pLabelName = nullptr;
     if (from->pLabelName)
     {
-        to->pLabelName = pool->strDup(from->pLabelName);
+        to->pLabelName = alloc->strDup(from->pLabelName);
     }
     memcpy(to->color, from->color, 4 * sizeof(float));
 }
 
-void deepcopy_VkDebugUtilsMessengerCallbackDataEXT(
-    Pool* pool,
-    const VkDebugUtilsMessengerCallbackDataEXT* from,
-    VkDebugUtilsMessengerCallbackDataEXT* to)
+void deepcopy_VkDebugUtilsObjectNameInfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDebugUtilsObjectNameInfoEXT* from,
+    VkDebugUtilsObjectNameInfoEXT* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    to->pObjectName = nullptr;
+    if (from->pObjectName)
+    {
+        to->pObjectName = alloc->strDup(from->pObjectName);
+    }
+}
+
+void deepcopy_VkDebugUtilsMessengerCallbackDataEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDebugUtilsMessengerCallbackDataEXT* from,
+    VkDebugUtilsMessengerCallbackDataEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
     to->pMessageIdName = nullptr;
     if (from->pMessageIdName)
     {
-        to->pMessageIdName = pool->strDup(from->pMessageIdName);
+        to->pMessageIdName = alloc->strDup(from->pMessageIdName);
     }
     to->pMessage = nullptr;
     if (from->pMessage)
     {
-        to->pMessage = pool->strDup(from->pMessage);
+        to->pMessage = alloc->strDup(from->pMessage);
     }
-    to->pQueueLabels = nullptr;
-    if (from->pQueueLabels)
+    if (from)
     {
-        to->pQueueLabels = (VkDebugUtilsLabelEXT*)pool->alloc(from->queueLabelCount * sizeof(VkDebugUtilsLabelEXT));
-        to->queueLabelCount = from->queueLabelCount;
-        for (uint32_t i = 0; i < (uint32_t)from->queueLabelCount; ++i)
+        to->pQueueLabels = nullptr;
+        if (from->pQueueLabels)
         {
-            deepcopy_VkDebugUtilsLabelEXT(pool, from->pQueueLabels + i, (VkDebugUtilsLabelEXT*)(to->pQueueLabels + i));
+            to->pQueueLabels = (VkDebugUtilsLabelEXT*)alloc->alloc(from->queueLabelCount * sizeof(VkDebugUtilsLabelEXT));
+            to->queueLabelCount = from->queueLabelCount;
+            for (uint32_t i = 0; i < (uint32_t)from->queueLabelCount; ++i)
+            {
+                deepcopy_VkDebugUtilsLabelEXT(alloc, rootType, from->pQueueLabels + i, (VkDebugUtilsLabelEXT*)(to->pQueueLabels + i));
+            }
         }
     }
-    to->pCmdBufLabels = nullptr;
-    if (from->pCmdBufLabels)
+    if (from)
     {
-        to->pCmdBufLabels = (VkDebugUtilsLabelEXT*)pool->alloc(from->cmdBufLabelCount * sizeof(VkDebugUtilsLabelEXT));
-        to->cmdBufLabelCount = from->cmdBufLabelCount;
-        for (uint32_t i = 0; i < (uint32_t)from->cmdBufLabelCount; ++i)
+        to->pCmdBufLabels = nullptr;
+        if (from->pCmdBufLabels)
         {
-            deepcopy_VkDebugUtilsLabelEXT(pool, from->pCmdBufLabels + i, (VkDebugUtilsLabelEXT*)(to->pCmdBufLabels + i));
+            to->pCmdBufLabels = (VkDebugUtilsLabelEXT*)alloc->alloc(from->cmdBufLabelCount * sizeof(VkDebugUtilsLabelEXT));
+            to->cmdBufLabelCount = from->cmdBufLabelCount;
+            for (uint32_t i = 0; i < (uint32_t)from->cmdBufLabelCount; ++i)
+            {
+                deepcopy_VkDebugUtilsLabelEXT(alloc, rootType, from->pCmdBufLabels + i, (VkDebugUtilsLabelEXT*)(to->pCmdBufLabels + i));
+            }
         }
     }
-    to->pObjects = nullptr;
-    if (from->pObjects)
+    if (from)
     {
-        to->pObjects = (VkDebugUtilsObjectNameInfoEXT*)pool->alloc(from->objectCount * sizeof(VkDebugUtilsObjectNameInfoEXT));
-        to->objectCount = from->objectCount;
-        for (uint32_t i = 0; i < (uint32_t)from->objectCount; ++i)
+        to->pObjects = nullptr;
+        if (from->pObjects)
         {
-            deepcopy_VkDebugUtilsObjectNameInfoEXT(pool, from->pObjects + i, (VkDebugUtilsObjectNameInfoEXT*)(to->pObjects + i));
+            to->pObjects = (VkDebugUtilsObjectNameInfoEXT*)alloc->alloc(from->objectCount * sizeof(VkDebugUtilsObjectNameInfoEXT));
+            to->objectCount = from->objectCount;
+            for (uint32_t i = 0; i < (uint32_t)from->objectCount; ++i)
+            {
+                deepcopy_VkDebugUtilsObjectNameInfoEXT(alloc, rootType, from->pObjects + i, (VkDebugUtilsObjectNameInfoEXT*)(to->pObjects + i));
+            }
         }
     }
 }
 
 void deepcopy_VkDebugUtilsMessengerCreateInfoEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDebugUtilsMessengerCreateInfoEXT* from,
     VkDebugUtilsMessengerCreateInfoEXT* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
     to->pUserData = nullptr;
     if (from->pUserData)
     {
-        to->pUserData = (void*)pool->dupArray(from->pUserData, sizeof(uint8_t));
+        to->pUserData = (void*)alloc->dupArray(from->pUserData, sizeof(uint8_t));
+    }
+}
+
+void deepcopy_VkDebugUtilsObjectTagInfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDebugUtilsObjectTagInfoEXT* from,
+    VkDebugUtilsObjectTagInfoEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    to->pTag = nullptr;
+    if (from->pTag)
+    {
+        to->pTag = (void*)alloc->dupArray(from->pTag, from->tagSize * sizeof(const uint8_t));
     }
 }
 
 #endif
 #ifdef VK_ANDROID_external_memory_android_hardware_buffer
 void deepcopy_VkAndroidHardwareBufferUsageANDROID(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkAndroidHardwareBufferUsageANDROID* from,
     VkAndroidHardwareBufferUsageANDROID* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkAndroidHardwareBufferPropertiesANDROID(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkAndroidHardwareBufferPropertiesANDROID* from,
     VkAndroidHardwareBufferPropertiesANDROID* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkAndroidHardwareBufferFormatPropertiesANDROID(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkAndroidHardwareBufferFormatPropertiesANDROID* from,
     VkAndroidHardwareBufferFormatPropertiesANDROID* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
-    deepcopy_VkComponentMapping(pool, &from->samplerYcbcrConversionComponents, (VkComponentMapping*)(&to->samplerYcbcrConversionComponents));
+    deepcopy_VkComponentMapping(alloc, rootType, &from->samplerYcbcrConversionComponents, (VkComponentMapping*)(&to->samplerYcbcrConversionComponents));
 }
 
 void deepcopy_VkImportAndroidHardwareBufferInfoANDROID(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkImportAndroidHardwareBufferInfoANDROID* from,
     VkImportAndroidHardwareBufferInfoANDROID* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
     to->buffer = nullptr;
     if (from->buffer)
     {
-        to->buffer = (AHardwareBuffer*)pool->dupArray(from->buffer, sizeof(AHardwareBuffer));
+        to->buffer = (AHardwareBuffer*)alloc->dupArray(from->buffer, sizeof(AHardwareBuffer));
     }
 }
 
 void deepcopy_VkMemoryGetAndroidHardwareBufferInfoANDROID(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkMemoryGetAndroidHardwareBufferInfoANDROID* from,
     VkMemoryGetAndroidHardwareBufferInfoANDROID* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkExternalFormatANDROID(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkExternalFormatANDROID* from,
     VkExternalFormatANDROID* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 #endif
 #ifdef VK_EXT_sampler_filter_minmax
-void deepcopy_VkSamplerReductionModeCreateInfoEXT(
-    Pool* pool,
-    const VkSamplerReductionModeCreateInfoEXT* from,
-    VkSamplerReductionModeCreateInfoEXT* to)
-{
-    (void)pool;
-    *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
-    to->pNext = nullptr;
-    if (pNext_size)
-    {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
-    }
-}
-
-void deepcopy_VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT(
-    Pool* pool,
-    const VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT* from,
-    VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT* to)
-{
-    (void)pool;
-    *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
-    to->pNext = nullptr;
-    if (pNext_size)
-    {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
-    }
-}
-
 #endif
 #ifdef VK_AMD_gpu_shader_int16
 #endif
@@ -5781,523 +11181,1974 @@
 #endif
 #ifdef VK_AMD_shader_fragment_mask
 #endif
+#ifdef VK_EXT_inline_uniform_block
+void deepcopy_VkPhysicalDeviceInlineUniformBlockFeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceInlineUniformBlockFeaturesEXT* from,
+    VkPhysicalDeviceInlineUniformBlockFeaturesEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPhysicalDeviceInlineUniformBlockPropertiesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceInlineUniformBlockPropertiesEXT* from,
+    VkPhysicalDeviceInlineUniformBlockPropertiesEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkWriteDescriptorSetInlineUniformBlockEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkWriteDescriptorSetInlineUniformBlockEXT* from,
+    VkWriteDescriptorSetInlineUniformBlockEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    to->pData = nullptr;
+    if (from->pData)
+    {
+        to->pData = (void*)alloc->dupArray(from->pData, from->dataSize * sizeof(const uint8_t));
+    }
+}
+
+void deepcopy_VkDescriptorPoolInlineUniformBlockCreateInfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDescriptorPoolInlineUniformBlockCreateInfoEXT* from,
+    VkDescriptorPoolInlineUniformBlockCreateInfoEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
 #ifdef VK_EXT_shader_stencil_export
 #endif
 #ifdef VK_EXT_sample_locations
 void deepcopy_VkSampleLocationEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkSampleLocationEXT* from,
     VkSampleLocationEXT* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
 }
 
 void deepcopy_VkSampleLocationsInfoEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkSampleLocationsInfoEXT* from,
     VkSampleLocationsInfoEXT* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
-    deepcopy_VkExtent2D(pool, &from->sampleLocationGridSize, (VkExtent2D*)(&to->sampleLocationGridSize));
-    to->pSampleLocations = nullptr;
-    if (from->pSampleLocations)
+    deepcopy_VkExtent2D(alloc, rootType, &from->sampleLocationGridSize, (VkExtent2D*)(&to->sampleLocationGridSize));
+    if (from)
     {
-        to->pSampleLocations = (VkSampleLocationEXT*)pool->alloc(from->sampleLocationsCount * sizeof(const VkSampleLocationEXT));
-        to->sampleLocationsCount = from->sampleLocationsCount;
-        for (uint32_t i = 0; i < (uint32_t)from->sampleLocationsCount; ++i)
+        to->pSampleLocations = nullptr;
+        if (from->pSampleLocations)
         {
-            deepcopy_VkSampleLocationEXT(pool, from->pSampleLocations + i, (VkSampleLocationEXT*)(to->pSampleLocations + i));
+            to->pSampleLocations = (VkSampleLocationEXT*)alloc->alloc(from->sampleLocationsCount * sizeof(const VkSampleLocationEXT));
+            to->sampleLocationsCount = from->sampleLocationsCount;
+            for (uint32_t i = 0; i < (uint32_t)from->sampleLocationsCount; ++i)
+            {
+                deepcopy_VkSampleLocationEXT(alloc, rootType, from->pSampleLocations + i, (VkSampleLocationEXT*)(to->pSampleLocations + i));
+            }
         }
     }
 }
 
 void deepcopy_VkAttachmentSampleLocationsEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkAttachmentSampleLocationsEXT* from,
     VkAttachmentSampleLocationsEXT* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    deepcopy_VkSampleLocationsInfoEXT(pool, &from->sampleLocationsInfo, (VkSampleLocationsInfoEXT*)(&to->sampleLocationsInfo));
+    deepcopy_VkSampleLocationsInfoEXT(alloc, rootType, &from->sampleLocationsInfo, (VkSampleLocationsInfoEXT*)(&to->sampleLocationsInfo));
 }
 
 void deepcopy_VkSubpassSampleLocationsEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkSubpassSampleLocationsEXT* from,
     VkSubpassSampleLocationsEXT* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    deepcopy_VkSampleLocationsInfoEXT(pool, &from->sampleLocationsInfo, (VkSampleLocationsInfoEXT*)(&to->sampleLocationsInfo));
+    deepcopy_VkSampleLocationsInfoEXT(alloc, rootType, &from->sampleLocationsInfo, (VkSampleLocationsInfoEXT*)(&to->sampleLocationsInfo));
 }
 
 void deepcopy_VkRenderPassSampleLocationsBeginInfoEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkRenderPassSampleLocationsBeginInfoEXT* from,
     VkRenderPassSampleLocationsBeginInfoEXT* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
-    to->pAttachmentInitialSampleLocations = nullptr;
-    if (from->pAttachmentInitialSampleLocations)
+    if (from)
     {
-        to->pAttachmentInitialSampleLocations = (VkAttachmentSampleLocationsEXT*)pool->alloc(from->attachmentInitialSampleLocationsCount * sizeof(const VkAttachmentSampleLocationsEXT));
-        to->attachmentInitialSampleLocationsCount = from->attachmentInitialSampleLocationsCount;
-        for (uint32_t i = 0; i < (uint32_t)from->attachmentInitialSampleLocationsCount; ++i)
+        to->pAttachmentInitialSampleLocations = nullptr;
+        if (from->pAttachmentInitialSampleLocations)
         {
-            deepcopy_VkAttachmentSampleLocationsEXT(pool, from->pAttachmentInitialSampleLocations + i, (VkAttachmentSampleLocationsEXT*)(to->pAttachmentInitialSampleLocations + i));
+            to->pAttachmentInitialSampleLocations = (VkAttachmentSampleLocationsEXT*)alloc->alloc(from->attachmentInitialSampleLocationsCount * sizeof(const VkAttachmentSampleLocationsEXT));
+            to->attachmentInitialSampleLocationsCount = from->attachmentInitialSampleLocationsCount;
+            for (uint32_t i = 0; i < (uint32_t)from->attachmentInitialSampleLocationsCount; ++i)
+            {
+                deepcopy_VkAttachmentSampleLocationsEXT(alloc, rootType, from->pAttachmentInitialSampleLocations + i, (VkAttachmentSampleLocationsEXT*)(to->pAttachmentInitialSampleLocations + i));
+            }
         }
     }
-    to->pPostSubpassSampleLocations = nullptr;
-    if (from->pPostSubpassSampleLocations)
+    if (from)
     {
-        to->pPostSubpassSampleLocations = (VkSubpassSampleLocationsEXT*)pool->alloc(from->postSubpassSampleLocationsCount * sizeof(const VkSubpassSampleLocationsEXT));
-        to->postSubpassSampleLocationsCount = from->postSubpassSampleLocationsCount;
-        for (uint32_t i = 0; i < (uint32_t)from->postSubpassSampleLocationsCount; ++i)
+        to->pPostSubpassSampleLocations = nullptr;
+        if (from->pPostSubpassSampleLocations)
         {
-            deepcopy_VkSubpassSampleLocationsEXT(pool, from->pPostSubpassSampleLocations + i, (VkSubpassSampleLocationsEXT*)(to->pPostSubpassSampleLocations + i));
+            to->pPostSubpassSampleLocations = (VkSubpassSampleLocationsEXT*)alloc->alloc(from->postSubpassSampleLocationsCount * sizeof(const VkSubpassSampleLocationsEXT));
+            to->postSubpassSampleLocationsCount = from->postSubpassSampleLocationsCount;
+            for (uint32_t i = 0; i < (uint32_t)from->postSubpassSampleLocationsCount; ++i)
+            {
+                deepcopy_VkSubpassSampleLocationsEXT(alloc, rootType, from->pPostSubpassSampleLocations + i, (VkSubpassSampleLocationsEXT*)(to->pPostSubpassSampleLocations + i));
+            }
         }
     }
 }
 
 void deepcopy_VkPipelineSampleLocationsStateCreateInfoEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPipelineSampleLocationsStateCreateInfoEXT* from,
     VkPipelineSampleLocationsStateCreateInfoEXT* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
-    deepcopy_VkSampleLocationsInfoEXT(pool, &from->sampleLocationsInfo, (VkSampleLocationsInfoEXT*)(&to->sampleLocationsInfo));
+    deepcopy_VkSampleLocationsInfoEXT(alloc, rootType, &from->sampleLocationsInfo, (VkSampleLocationsInfoEXT*)(&to->sampleLocationsInfo));
 }
 
 void deepcopy_VkPhysicalDeviceSampleLocationsPropertiesEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceSampleLocationsPropertiesEXT* from,
     VkPhysicalDeviceSampleLocationsPropertiesEXT* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
-    deepcopy_VkExtent2D(pool, &from->maxSampleLocationGridSize, (VkExtent2D*)(&to->maxSampleLocationGridSize));
+    deepcopy_VkExtent2D(alloc, rootType, &from->maxSampleLocationGridSize, (VkExtent2D*)(&to->maxSampleLocationGridSize));
     memcpy(to->sampleLocationCoordinateRange, from->sampleLocationCoordinateRange, 2 * sizeof(float));
 }
 
 void deepcopy_VkMultisamplePropertiesEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkMultisamplePropertiesEXT* from,
     VkMultisamplePropertiesEXT* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
-    deepcopy_VkExtent2D(pool, &from->maxSampleLocationGridSize, (VkExtent2D*)(&to->maxSampleLocationGridSize));
+    deepcopy_VkExtent2D(alloc, rootType, &from->maxSampleLocationGridSize, (VkExtent2D*)(&to->maxSampleLocationGridSize));
 }
 
 #endif
 #ifdef VK_EXT_blend_operation_advanced
 void deepcopy_VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* from,
     VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT* from,
     VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkPipelineColorBlendAdvancedStateCreateInfoEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPipelineColorBlendAdvancedStateCreateInfoEXT* from,
     VkPipelineColorBlendAdvancedStateCreateInfoEXT* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 #endif
 #ifdef VK_NV_fragment_coverage_to_color
 void deepcopy_VkPipelineCoverageToColorStateCreateInfoNV(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPipelineCoverageToColorStateCreateInfoNV* from,
     VkPipelineCoverageToColorStateCreateInfoNV* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 #endif
 #ifdef VK_NV_framebuffer_mixed_samples
 void deepcopy_VkPipelineCoverageModulationStateCreateInfoNV(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPipelineCoverageModulationStateCreateInfoNV* from,
     VkPipelineCoverageModulationStateCreateInfoNV* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
     to->pCoverageModulationTable = nullptr;
     if (from->pCoverageModulationTable)
     {
-        to->pCoverageModulationTable = (float*)pool->dupArray(from->pCoverageModulationTable, from->coverageModulationTableCount * sizeof(const float));
+        to->pCoverageModulationTable = (float*)alloc->dupArray(from->pCoverageModulationTable, from->coverageModulationTableCount * sizeof(const float));
     }
 }
 
 #endif
 #ifdef VK_NV_fill_rectangle
 #endif
-#ifdef VK_EXT_post_depth_coverage
-#endif
-#ifdef VK_EXT_validation_cache
-void deepcopy_VkValidationCacheCreateInfoEXT(
-    Pool* pool,
-    const VkValidationCacheCreateInfoEXT* from,
-    VkValidationCacheCreateInfoEXT* to)
+#ifdef VK_NV_shader_sm_builtins
+void deepcopy_VkPhysicalDeviceShaderSMBuiltinsPropertiesNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderSMBuiltinsPropertiesNV* from,
+    VkPhysicalDeviceShaderSMBuiltinsPropertiesNV* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPhysicalDeviceShaderSMBuiltinsFeaturesNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderSMBuiltinsFeaturesNV* from,
+    VkPhysicalDeviceShaderSMBuiltinsFeaturesNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_post_depth_coverage
+#endif
+#ifdef VK_EXT_image_drm_format_modifier
+void deepcopy_VkDrmFormatModifierPropertiesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDrmFormatModifierPropertiesEXT* from,
+    VkDrmFormatModifierPropertiesEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+}
+
+void deepcopy_VkDrmFormatModifierPropertiesListEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDrmFormatModifierPropertiesListEXT* from,
+    VkDrmFormatModifierPropertiesListEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    if (from)
+    {
+        to->pDrmFormatModifierProperties = nullptr;
+        if (from->pDrmFormatModifierProperties)
+        {
+            to->pDrmFormatModifierProperties = (VkDrmFormatModifierPropertiesEXT*)alloc->alloc(from->drmFormatModifierCount * sizeof(VkDrmFormatModifierPropertiesEXT));
+            to->drmFormatModifierCount = from->drmFormatModifierCount;
+            for (uint32_t i = 0; i < (uint32_t)from->drmFormatModifierCount; ++i)
+            {
+                deepcopy_VkDrmFormatModifierPropertiesEXT(alloc, rootType, from->pDrmFormatModifierProperties + i, (VkDrmFormatModifierPropertiesEXT*)(to->pDrmFormatModifierProperties + i));
+            }
+        }
+    }
+}
+
+void deepcopy_VkPhysicalDeviceImageDrmFormatModifierInfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceImageDrmFormatModifierInfoEXT* from,
+    VkPhysicalDeviceImageDrmFormatModifierInfoEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    to->pQueueFamilyIndices = nullptr;
+    if (from->pQueueFamilyIndices)
+    {
+        to->pQueueFamilyIndices = (uint32_t*)alloc->dupArray(from->pQueueFamilyIndices, from->queueFamilyIndexCount * sizeof(const uint32_t));
+    }
+}
+
+void deepcopy_VkImageDrmFormatModifierListCreateInfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkImageDrmFormatModifierListCreateInfoEXT* from,
+    VkImageDrmFormatModifierListCreateInfoEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    to->pDrmFormatModifiers = nullptr;
+    if (from->pDrmFormatModifiers)
+    {
+        to->pDrmFormatModifiers = (uint64_t*)alloc->dupArray(from->pDrmFormatModifiers, from->drmFormatModifierCount * sizeof(const uint64_t));
+    }
+}
+
+void deepcopy_VkImageDrmFormatModifierExplicitCreateInfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkImageDrmFormatModifierExplicitCreateInfoEXT* from,
+    VkImageDrmFormatModifierExplicitCreateInfoEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    if (from)
+    {
+        to->pPlaneLayouts = nullptr;
+        if (from->pPlaneLayouts)
+        {
+            to->pPlaneLayouts = (VkSubresourceLayout*)alloc->alloc(from->drmFormatModifierPlaneCount * sizeof(const VkSubresourceLayout));
+            to->drmFormatModifierPlaneCount = from->drmFormatModifierPlaneCount;
+            for (uint32_t i = 0; i < (uint32_t)from->drmFormatModifierPlaneCount; ++i)
+            {
+                deepcopy_VkSubresourceLayout(alloc, rootType, from->pPlaneLayouts + i, (VkSubresourceLayout*)(to->pPlaneLayouts + i));
+            }
+        }
+    }
+}
+
+void deepcopy_VkImageDrmFormatModifierPropertiesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkImageDrmFormatModifierPropertiesEXT* from,
+    VkImageDrmFormatModifierPropertiesEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_validation_cache
+void deepcopy_VkValidationCacheCreateInfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkValidationCacheCreateInfoEXT* from,
+    VkValidationCacheCreateInfoEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
     to->pInitialData = nullptr;
     if (from->pInitialData)
     {
-        to->pInitialData = (void*)pool->dupArray(from->pInitialData, from->initialDataSize * sizeof(const uint8_t));
+        to->pInitialData = (void*)alloc->dupArray(from->pInitialData, from->initialDataSize * sizeof(const uint8_t));
     }
 }
 
 void deepcopy_VkShaderModuleValidationCacheCreateInfoEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkShaderModuleValidationCacheCreateInfoEXT* from,
     VkShaderModuleValidationCacheCreateInfoEXT* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 #endif
 #ifdef VK_EXT_descriptor_indexing
-void deepcopy_VkDescriptorSetLayoutBindingFlagsCreateInfoEXT(
-    Pool* pool,
-    const VkDescriptorSetLayoutBindingFlagsCreateInfoEXT* from,
-    VkDescriptorSetLayoutBindingFlagsCreateInfoEXT* to)
-{
-    (void)pool;
-    *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
-    to->pNext = nullptr;
-    if (pNext_size)
-    {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
-    }
-    to->pBindingFlags = nullptr;
-    if (from->pBindingFlags)
-    {
-        to->pBindingFlags = (VkDescriptorBindingFlagsEXT*)pool->dupArray(from->pBindingFlags, from->bindingCount * sizeof(const VkDescriptorBindingFlagsEXT));
-    }
-}
-
-void deepcopy_VkPhysicalDeviceDescriptorIndexingFeaturesEXT(
-    Pool* pool,
-    const VkPhysicalDeviceDescriptorIndexingFeaturesEXT* from,
-    VkPhysicalDeviceDescriptorIndexingFeaturesEXT* to)
-{
-    (void)pool;
-    *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
-    to->pNext = nullptr;
-    if (pNext_size)
-    {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
-    }
-}
-
-void deepcopy_VkPhysicalDeviceDescriptorIndexingPropertiesEXT(
-    Pool* pool,
-    const VkPhysicalDeviceDescriptorIndexingPropertiesEXT* from,
-    VkPhysicalDeviceDescriptorIndexingPropertiesEXT* to)
-{
-    (void)pool;
-    *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
-    to->pNext = nullptr;
-    if (pNext_size)
-    {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
-    }
-}
-
-void deepcopy_VkDescriptorSetVariableDescriptorCountAllocateInfoEXT(
-    Pool* pool,
-    const VkDescriptorSetVariableDescriptorCountAllocateInfoEXT* from,
-    VkDescriptorSetVariableDescriptorCountAllocateInfoEXT* to)
-{
-    (void)pool;
-    *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
-    to->pNext = nullptr;
-    if (pNext_size)
-    {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
-    }
-    to->pDescriptorCounts = nullptr;
-    if (from->pDescriptorCounts)
-    {
-        to->pDescriptorCounts = (uint32_t*)pool->dupArray(from->pDescriptorCounts, from->descriptorSetCount * sizeof(const uint32_t));
-    }
-}
-
-void deepcopy_VkDescriptorSetVariableDescriptorCountLayoutSupportEXT(
-    Pool* pool,
-    const VkDescriptorSetVariableDescriptorCountLayoutSupportEXT* from,
-    VkDescriptorSetVariableDescriptorCountLayoutSupportEXT* to)
-{
-    (void)pool;
-    *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
-    to->pNext = nullptr;
-    if (pNext_size)
-    {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
-    }
-}
-
 #endif
 #ifdef VK_EXT_shader_viewport_index_layer
 #endif
-#ifdef VK_EXT_global_priority
-void deepcopy_VkDeviceQueueGlobalPriorityCreateInfoEXT(
-    Pool* pool,
-    const VkDeviceQueueGlobalPriorityCreateInfoEXT* from,
-    VkDeviceQueueGlobalPriorityCreateInfoEXT* to)
+#ifdef VK_NV_shading_rate_image
+void deepcopy_VkShadingRatePaletteNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkShadingRatePaletteNV* from,
+    VkShadingRatePaletteNV* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    to->pShadingRatePaletteEntries = nullptr;
+    if (from->pShadingRatePaletteEntries)
+    {
+        to->pShadingRatePaletteEntries = (VkShadingRatePaletteEntryNV*)alloc->dupArray(from->pShadingRatePaletteEntries, from->shadingRatePaletteEntryCount * sizeof(const VkShadingRatePaletteEntryNV));
+    }
+}
+
+void deepcopy_VkPipelineViewportShadingRateImageStateCreateInfoNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineViewportShadingRateImageStateCreateInfoNV* from,
+    VkPipelineViewportShadingRateImageStateCreateInfoNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    if (from)
+    {
+        to->pShadingRatePalettes = nullptr;
+        if (from->pShadingRatePalettes)
+        {
+            to->pShadingRatePalettes = (VkShadingRatePaletteNV*)alloc->alloc(from->viewportCount * sizeof(const VkShadingRatePaletteNV));
+            to->viewportCount = from->viewportCount;
+            for (uint32_t i = 0; i < (uint32_t)from->viewportCount; ++i)
+            {
+                deepcopy_VkShadingRatePaletteNV(alloc, rootType, from->pShadingRatePalettes + i, (VkShadingRatePaletteNV*)(to->pShadingRatePalettes + i));
+            }
+        }
+    }
+}
+
+void deepcopy_VkPhysicalDeviceShadingRateImageFeaturesNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShadingRateImageFeaturesNV* from,
+    VkPhysicalDeviceShadingRateImageFeaturesNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPhysicalDeviceShadingRateImagePropertiesNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShadingRateImagePropertiesNV* from,
+    VkPhysicalDeviceShadingRateImagePropertiesNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    deepcopy_VkExtent2D(alloc, rootType, &from->shadingRateTexelSize, (VkExtent2D*)(&to->shadingRateTexelSize));
+}
+
+void deepcopy_VkCoarseSampleLocationNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkCoarseSampleLocationNV* from,
+    VkCoarseSampleLocationNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+}
+
+void deepcopy_VkCoarseSampleOrderCustomNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkCoarseSampleOrderCustomNV* from,
+    VkCoarseSampleOrderCustomNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (from)
+    {
+        to->pSampleLocations = nullptr;
+        if (from->pSampleLocations)
+        {
+            to->pSampleLocations = (VkCoarseSampleLocationNV*)alloc->alloc(from->sampleLocationCount * sizeof(const VkCoarseSampleLocationNV));
+            to->sampleLocationCount = from->sampleLocationCount;
+            for (uint32_t i = 0; i < (uint32_t)from->sampleLocationCount; ++i)
+            {
+                deepcopy_VkCoarseSampleLocationNV(alloc, rootType, from->pSampleLocations + i, (VkCoarseSampleLocationNV*)(to->pSampleLocations + i));
+            }
+        }
+    }
+}
+
+void deepcopy_VkPipelineViewportCoarseSampleOrderStateCreateInfoNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineViewportCoarseSampleOrderStateCreateInfoNV* from,
+    VkPipelineViewportCoarseSampleOrderStateCreateInfoNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    if (from)
+    {
+        to->pCustomSampleOrders = nullptr;
+        if (from->pCustomSampleOrders)
+        {
+            to->pCustomSampleOrders = (VkCoarseSampleOrderCustomNV*)alloc->alloc(from->customSampleOrderCount * sizeof(const VkCoarseSampleOrderCustomNV));
+            to->customSampleOrderCount = from->customSampleOrderCount;
+            for (uint32_t i = 0; i < (uint32_t)from->customSampleOrderCount; ++i)
+            {
+                deepcopy_VkCoarseSampleOrderCustomNV(alloc, rootType, from->pCustomSampleOrders + i, (VkCoarseSampleOrderCustomNV*)(to->pCustomSampleOrders + i));
+            }
+        }
+    }
+}
+
+#endif
+#ifdef VK_NV_ray_tracing
+void deepcopy_VkRayTracingShaderGroupCreateInfoNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkRayTracingShaderGroupCreateInfoNV* from,
+    VkRayTracingShaderGroupCreateInfoNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkRayTracingPipelineCreateInfoNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkRayTracingPipelineCreateInfoNV* from,
+    VkRayTracingPipelineCreateInfoNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    if (from)
+    {
+        to->pStages = nullptr;
+        if (from->pStages)
+        {
+            to->pStages = (VkPipelineShaderStageCreateInfo*)alloc->alloc(from->stageCount * sizeof(const VkPipelineShaderStageCreateInfo));
+            to->stageCount = from->stageCount;
+            for (uint32_t i = 0; i < (uint32_t)from->stageCount; ++i)
+            {
+                deepcopy_VkPipelineShaderStageCreateInfo(alloc, rootType, from->pStages + i, (VkPipelineShaderStageCreateInfo*)(to->pStages + i));
+            }
+        }
+    }
+    if (from)
+    {
+        to->pGroups = nullptr;
+        if (from->pGroups)
+        {
+            to->pGroups = (VkRayTracingShaderGroupCreateInfoNV*)alloc->alloc(from->groupCount * sizeof(const VkRayTracingShaderGroupCreateInfoNV));
+            to->groupCount = from->groupCount;
+            for (uint32_t i = 0; i < (uint32_t)from->groupCount; ++i)
+            {
+                deepcopy_VkRayTracingShaderGroupCreateInfoNV(alloc, rootType, from->pGroups + i, (VkRayTracingShaderGroupCreateInfoNV*)(to->pGroups + i));
+            }
+        }
+    }
+}
+
+void deepcopy_VkGeometryTrianglesNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkGeometryTrianglesNV* from,
+    VkGeometryTrianglesNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkGeometryAABBNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkGeometryAABBNV* from,
+    VkGeometryAABBNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkGeometryDataNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkGeometryDataNV* from,
+    VkGeometryDataNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    deepcopy_VkGeometryTrianglesNV(alloc, rootType, &from->triangles, (VkGeometryTrianglesNV*)(&to->triangles));
+    deepcopy_VkGeometryAABBNV(alloc, rootType, &from->aabbs, (VkGeometryAABBNV*)(&to->aabbs));
+}
+
+void deepcopy_VkGeometryNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkGeometryNV* from,
+    VkGeometryNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    deepcopy_VkGeometryDataNV(alloc, rootType, &from->geometry, (VkGeometryDataNV*)(&to->geometry));
+}
+
+void deepcopy_VkAccelerationStructureInfoNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkAccelerationStructureInfoNV* from,
+    VkAccelerationStructureInfoNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    if (from)
+    {
+        to->pGeometries = nullptr;
+        if (from->pGeometries)
+        {
+            to->pGeometries = (VkGeometryNV*)alloc->alloc(from->geometryCount * sizeof(const VkGeometryNV));
+            to->geometryCount = from->geometryCount;
+            for (uint32_t i = 0; i < (uint32_t)from->geometryCount; ++i)
+            {
+                deepcopy_VkGeometryNV(alloc, rootType, from->pGeometries + i, (VkGeometryNV*)(to->pGeometries + i));
+            }
+        }
+    }
+}
+
+void deepcopy_VkAccelerationStructureCreateInfoNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkAccelerationStructureCreateInfoNV* from,
+    VkAccelerationStructureCreateInfoNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    deepcopy_VkAccelerationStructureInfoNV(alloc, rootType, &from->info, (VkAccelerationStructureInfoNV*)(&to->info));
+}
+
+void deepcopy_VkBindAccelerationStructureMemoryInfoNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkBindAccelerationStructureMemoryInfoNV* from,
+    VkBindAccelerationStructureMemoryInfoNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    to->pDeviceIndices = nullptr;
+    if (from->pDeviceIndices)
+    {
+        to->pDeviceIndices = (uint32_t*)alloc->dupArray(from->pDeviceIndices, from->deviceIndexCount * sizeof(const uint32_t));
+    }
+}
+
+void deepcopy_VkWriteDescriptorSetAccelerationStructureNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkWriteDescriptorSetAccelerationStructureNV* from,
+    VkWriteDescriptorSetAccelerationStructureNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    to->pAccelerationStructures = nullptr;
+    if (from->pAccelerationStructures)
+    {
+        to->pAccelerationStructures = (VkAccelerationStructureNV*)alloc->dupArray(from->pAccelerationStructures, from->accelerationStructureCount * sizeof(const VkAccelerationStructureNV));
+    }
+}
+
+void deepcopy_VkAccelerationStructureMemoryRequirementsInfoNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkAccelerationStructureMemoryRequirementsInfoNV* from,
+    VkAccelerationStructureMemoryRequirementsInfoNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPhysicalDeviceRayTracingPropertiesNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRayTracingPropertiesNV* from,
+    VkPhysicalDeviceRayTracingPropertiesNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkTransformMatrixKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkTransformMatrixKHR* from,
+    VkTransformMatrixKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    memcpy(to->matrix, from->matrix, ((3)*(4)) * sizeof(float));
+}
+
+void deepcopy_VkAabbPositionsKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkAabbPositionsKHR* from,
+    VkAabbPositionsKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+}
+
+void deepcopy_VkAccelerationStructureInstanceKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkAccelerationStructureInstanceKHR* from,
+    VkAccelerationStructureInstanceKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    deepcopy_VkTransformMatrixKHR(alloc, rootType, &from->transform, (VkTransformMatrixKHR*)(&to->transform));
+}
+
+#endif
+#ifdef VK_NV_representative_fragment_test
+void deepcopy_VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV* from,
+    VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPipelineRepresentativeFragmentTestStateCreateInfoNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineRepresentativeFragmentTestStateCreateInfoNV* from,
+    VkPipelineRepresentativeFragmentTestStateCreateInfoNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_filter_cubic
+void deepcopy_VkPhysicalDeviceImageViewImageFormatInfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceImageViewImageFormatInfoEXT* from,
+    VkPhysicalDeviceImageViewImageFormatInfoEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkFilterCubicImageViewImageFormatPropertiesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkFilterCubicImageViewImageFormatPropertiesEXT* from,
+    VkFilterCubicImageViewImageFormatPropertiesEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_QCOM_render_pass_shader_resolve
+#endif
+#ifdef VK_EXT_global_priority
+void deepcopy_VkDeviceQueueGlobalPriorityCreateInfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDeviceQueueGlobalPriorityCreateInfoEXT* from,
+    VkDeviceQueueGlobalPriorityCreateInfoEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 #endif
 #ifdef VK_EXT_external_memory_host
 void deepcopy_VkImportMemoryHostPointerInfoEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkImportMemoryHostPointerInfoEXT* from,
     VkImportMemoryHostPointerInfoEXT* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
     to->pHostPointer = nullptr;
     if (from->pHostPointer)
     {
-        to->pHostPointer = (void*)pool->dupArray(from->pHostPointer, sizeof(uint8_t));
+        to->pHostPointer = (void*)alloc->dupArray(from->pHostPointer, sizeof(uint8_t));
     }
 }
 
 void deepcopy_VkMemoryHostPointerPropertiesEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkMemoryHostPointerPropertiesEXT* from,
     VkMemoryHostPointerPropertiesEXT* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkPhysicalDeviceExternalMemoryHostPropertiesEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceExternalMemoryHostPropertiesEXT* from,
     VkPhysicalDeviceExternalMemoryHostPropertiesEXT* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 #endif
 #ifdef VK_AMD_buffer_marker
 #endif
-#ifdef VK_AMD_shader_core_properties
-void deepcopy_VkPhysicalDeviceShaderCorePropertiesAMD(
-    Pool* pool,
-    const VkPhysicalDeviceShaderCorePropertiesAMD* from,
-    VkPhysicalDeviceShaderCorePropertiesAMD* to)
+#ifdef VK_AMD_pipeline_compiler_control
+void deepcopy_VkPipelineCompilerControlCreateInfoAMD(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineCompilerControlCreateInfoAMD* from,
+    VkPipelineCompilerControlCreateInfoAMD* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_calibrated_timestamps
+void deepcopy_VkCalibratedTimestampInfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkCalibratedTimestampInfoEXT* from,
+    VkCalibratedTimestampInfoEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_AMD_shader_core_properties
+void deepcopy_VkPhysicalDeviceShaderCorePropertiesAMD(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderCorePropertiesAMD* from,
+    VkPhysicalDeviceShaderCorePropertiesAMD* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_AMD_memory_overallocation_behavior
+void deepcopy_VkDeviceMemoryOverallocationCreateInfoAMD(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDeviceMemoryOverallocationCreateInfoAMD* from,
+    VkDeviceMemoryOverallocationCreateInfoAMD* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 #endif
 #ifdef VK_EXT_vertex_attribute_divisor
 void deepcopy_VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT* from,
     VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkVertexInputBindingDivisorDescriptionEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkVertexInputBindingDivisorDescriptionEXT* from,
     VkVertexInputBindingDivisorDescriptionEXT* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
 }
 
 void deepcopy_VkPipelineVertexInputDivisorStateCreateInfoEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPipelineVertexInputDivisorStateCreateInfoEXT* from,
     VkPipelineVertexInputDivisorStateCreateInfoEXT* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (const void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
-    to->pVertexBindingDivisors = nullptr;
-    if (from->pVertexBindingDivisors)
+    if (from)
     {
-        to->pVertexBindingDivisors = (VkVertexInputBindingDivisorDescriptionEXT*)pool->alloc(from->vertexBindingDivisorCount * sizeof(const VkVertexInputBindingDivisorDescriptionEXT));
-        to->vertexBindingDivisorCount = from->vertexBindingDivisorCount;
-        for (uint32_t i = 0; i < (uint32_t)from->vertexBindingDivisorCount; ++i)
+        to->pVertexBindingDivisors = nullptr;
+        if (from->pVertexBindingDivisors)
         {
-            deepcopy_VkVertexInputBindingDivisorDescriptionEXT(pool, from->pVertexBindingDivisors + i, (VkVertexInputBindingDivisorDescriptionEXT*)(to->pVertexBindingDivisors + i));
+            to->pVertexBindingDivisors = (VkVertexInputBindingDivisorDescriptionEXT*)alloc->alloc(from->vertexBindingDivisorCount * sizeof(const VkVertexInputBindingDivisorDescriptionEXT));
+            to->vertexBindingDivisorCount = from->vertexBindingDivisorCount;
+            for (uint32_t i = 0; i < (uint32_t)from->vertexBindingDivisorCount; ++i)
+            {
+                deepcopy_VkVertexInputBindingDivisorDescriptionEXT(alloc, rootType, from->pVertexBindingDivisors + i, (VkVertexInputBindingDivisorDescriptionEXT*)(to->pVertexBindingDivisors + i));
+            }
+        }
+    }
+}
+
+void deepcopy_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT* from,
+    VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_GGP_frame_token
+void deepcopy_VkPresentFrameTokenGGP(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPresentFrameTokenGGP* from,
+    VkPresentFrameTokenGGP* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_pipeline_creation_feedback
+void deepcopy_VkPipelineCreationFeedbackEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineCreationFeedbackEXT* from,
+    VkPipelineCreationFeedbackEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+}
+
+void deepcopy_VkPipelineCreationFeedbackCreateInfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineCreationFeedbackCreateInfoEXT* from,
+    VkPipelineCreationFeedbackCreateInfoEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    to->pPipelineCreationFeedback = nullptr;
+    if (from->pPipelineCreationFeedback)
+    {
+        to->pPipelineCreationFeedback = (VkPipelineCreationFeedbackEXT*)alloc->alloc(sizeof(VkPipelineCreationFeedbackEXT));
+        deepcopy_VkPipelineCreationFeedbackEXT(alloc, rootType, from->pPipelineCreationFeedback, (VkPipelineCreationFeedbackEXT*)(to->pPipelineCreationFeedback));
+    }
+    if (from)
+    {
+        to->pPipelineStageCreationFeedbacks = nullptr;
+        if (from->pPipelineStageCreationFeedbacks)
+        {
+            to->pPipelineStageCreationFeedbacks = (VkPipelineCreationFeedbackEXT*)alloc->alloc(from->pipelineStageCreationFeedbackCount * sizeof(VkPipelineCreationFeedbackEXT));
+            to->pipelineStageCreationFeedbackCount = from->pipelineStageCreationFeedbackCount;
+            for (uint32_t i = 0; i < (uint32_t)from->pipelineStageCreationFeedbackCount; ++i)
+            {
+                deepcopy_VkPipelineCreationFeedbackEXT(alloc, rootType, from->pPipelineStageCreationFeedbacks + i, (VkPipelineCreationFeedbackEXT*)(to->pPipelineStageCreationFeedbacks + i));
+            }
         }
     }
 }
@@ -6305,93 +13156,3782 @@
 #endif
 #ifdef VK_NV_shader_subgroup_partitioned
 #endif
-#ifdef VK_NV_device_diagnostic_checkpoints
-void deepcopy_VkQueueFamilyCheckpointPropertiesNV(
-    Pool* pool,
-    const VkQueueFamilyCheckpointPropertiesNV* from,
-    VkQueueFamilyCheckpointPropertiesNV* to)
+#ifdef VK_NV_compute_shader_derivatives
+void deepcopy_VkPhysicalDeviceComputeShaderDerivativesFeaturesNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceComputeShaderDerivativesFeaturesNV* from,
+    VkPhysicalDeviceComputeShaderDerivativesFeaturesNV* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_NV_mesh_shader
+void deepcopy_VkPhysicalDeviceMeshShaderFeaturesNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMeshShaderFeaturesNV* from,
+    VkPhysicalDeviceMeshShaderFeaturesNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPhysicalDeviceMeshShaderPropertiesNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMeshShaderPropertiesNV* from,
+    VkPhysicalDeviceMeshShaderPropertiesNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    memcpy(to->maxTaskWorkGroupSize, from->maxTaskWorkGroupSize, 3 * sizeof(uint32_t));
+    memcpy(to->maxMeshWorkGroupSize, from->maxMeshWorkGroupSize, 3 * sizeof(uint32_t));
+}
+
+void deepcopy_VkDrawMeshTasksIndirectCommandNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDrawMeshTasksIndirectCommandNV* from,
+    VkDrawMeshTasksIndirectCommandNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+}
+
+#endif
+#ifdef VK_NV_fragment_shader_barycentric
+void deepcopy_VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV* from,
+    VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_NV_shader_image_footprint
+void deepcopy_VkPhysicalDeviceShaderImageFootprintFeaturesNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderImageFootprintFeaturesNV* from,
+    VkPhysicalDeviceShaderImageFootprintFeaturesNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_NV_scissor_exclusive
+void deepcopy_VkPipelineViewportExclusiveScissorStateCreateInfoNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineViewportExclusiveScissorStateCreateInfoNV* from,
+    VkPipelineViewportExclusiveScissorStateCreateInfoNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    if (from)
+    {
+        to->pExclusiveScissors = nullptr;
+        if (from->pExclusiveScissors)
+        {
+            to->pExclusiveScissors = (VkRect2D*)alloc->alloc(from->exclusiveScissorCount * sizeof(const VkRect2D));
+            to->exclusiveScissorCount = from->exclusiveScissorCount;
+            for (uint32_t i = 0; i < (uint32_t)from->exclusiveScissorCount; ++i)
+            {
+                deepcopy_VkRect2D(alloc, rootType, from->pExclusiveScissors + i, (VkRect2D*)(to->pExclusiveScissors + i));
+            }
+        }
+    }
+}
+
+void deepcopy_VkPhysicalDeviceExclusiveScissorFeaturesNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceExclusiveScissorFeaturesNV* from,
+    VkPhysicalDeviceExclusiveScissorFeaturesNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_NV_device_diagnostic_checkpoints
+void deepcopy_VkQueueFamilyCheckpointPropertiesNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkQueueFamilyCheckpointPropertiesNV* from,
+    VkQueueFamilyCheckpointPropertiesNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkCheckpointDataNV(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkCheckpointDataNV* from,
     VkCheckpointDataNV* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
     to->pCheckpointMarker = nullptr;
     if (from->pCheckpointMarker)
     {
-        to->pCheckpointMarker = (void*)pool->dupArray(from->pCheckpointMarker, sizeof(uint8_t));
+        to->pCheckpointMarker = (void*)alloc->dupArray(from->pCheckpointMarker, sizeof(uint8_t));
     }
 }
 
 #endif
-#ifdef VK_GOOGLE_address_space
-#endif
-#ifdef VK_GOOGLE_color_buffer
-void deepcopy_VkImportColorBufferGOOGLE(
-    Pool* pool,
-    const VkImportColorBufferGOOGLE* from,
-    VkImportColorBufferGOOGLE* to)
+#ifdef VK_INTEL_shader_integer_functions2
+void deepcopy_VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL* from,
+    VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_INTEL_performance_query
+void deepcopy_VkPerformanceValueDataINTEL(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPerformanceValueDataINTEL* from,
+    VkPerformanceValueDataINTEL* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    to->valueString = nullptr;
+    if (from->valueString)
+    {
+        to->valueString = alloc->strDup(from->valueString);
+    }
+}
+
+void deepcopy_VkPerformanceValueINTEL(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPerformanceValueINTEL* from,
+    VkPerformanceValueINTEL* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    deepcopy_VkPerformanceValueDataINTEL(alloc, rootType, &from->data, (VkPerformanceValueDataINTEL*)(&to->data));
+}
+
+void deepcopy_VkInitializePerformanceApiInfoINTEL(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkInitializePerformanceApiInfoINTEL* from,
+    VkInitializePerformanceApiInfoINTEL* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    to->pUserData = nullptr;
+    if (from->pUserData)
+    {
+        to->pUserData = (void*)alloc->dupArray(from->pUserData, sizeof(uint8_t));
+    }
+}
+
+void deepcopy_VkQueryPoolPerformanceQueryCreateInfoINTEL(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkQueryPoolPerformanceQueryCreateInfoINTEL* from,
+    VkQueryPoolPerformanceQueryCreateInfoINTEL* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPerformanceMarkerInfoINTEL(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPerformanceMarkerInfoINTEL* from,
+    VkPerformanceMarkerInfoINTEL* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPerformanceStreamMarkerInfoINTEL(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPerformanceStreamMarkerInfoINTEL* from,
+    VkPerformanceStreamMarkerInfoINTEL* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPerformanceOverrideInfoINTEL(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPerformanceOverrideInfoINTEL* from,
+    VkPerformanceOverrideInfoINTEL* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPerformanceConfigurationAcquireInfoINTEL(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPerformanceConfigurationAcquireInfoINTEL* from,
+    VkPerformanceConfigurationAcquireInfoINTEL* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_pci_bus_info
+void deepcopy_VkPhysicalDevicePCIBusInfoPropertiesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDevicePCIBusInfoPropertiesEXT* from,
+    VkPhysicalDevicePCIBusInfoPropertiesEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_AMD_display_native_hdr
+void deepcopy_VkDisplayNativeHdrSurfaceCapabilitiesAMD(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDisplayNativeHdrSurfaceCapabilitiesAMD* from,
+    VkDisplayNativeHdrSurfaceCapabilitiesAMD* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkSwapchainDisplayNativeHdrCreateInfoAMD(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkSwapchainDisplayNativeHdrCreateInfoAMD* from,
+    VkSwapchainDisplayNativeHdrCreateInfoAMD* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_FUCHSIA_imagepipe_surface
+void deepcopy_VkImagePipeSurfaceCreateInfoFUCHSIA(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkImagePipeSurfaceCreateInfoFUCHSIA* from,
+    VkImagePipeSurfaceCreateInfoFUCHSIA* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_metal_surface
+void deepcopy_VkMetalSurfaceCreateInfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkMetalSurfaceCreateInfoEXT* from,
+    VkMetalSurfaceCreateInfoEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    to->pLayer = nullptr;
+    if (from->pLayer)
+    {
+        to->pLayer = (CAMetalLayer*)alloc->dupArray(from->pLayer, sizeof(const CAMetalLayer));
+    }
+}
+
+#endif
+#ifdef VK_EXT_fragment_density_map
+void deepcopy_VkPhysicalDeviceFragmentDensityMapFeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentDensityMapFeaturesEXT* from,
+    VkPhysicalDeviceFragmentDensityMapFeaturesEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPhysicalDeviceFragmentDensityMapPropertiesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentDensityMapPropertiesEXT* from,
+    VkPhysicalDeviceFragmentDensityMapPropertiesEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    deepcopy_VkExtent2D(alloc, rootType, &from->minFragmentDensityTexelSize, (VkExtent2D*)(&to->minFragmentDensityTexelSize));
+    deepcopy_VkExtent2D(alloc, rootType, &from->maxFragmentDensityTexelSize, (VkExtent2D*)(&to->maxFragmentDensityTexelSize));
+}
+
+void deepcopy_VkRenderPassFragmentDensityMapCreateInfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkRenderPassFragmentDensityMapCreateInfoEXT* from,
+    VkRenderPassFragmentDensityMapCreateInfoEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    deepcopy_VkAttachmentReference(alloc, rootType, &from->fragmentDensityMapAttachment, (VkAttachmentReference*)(&to->fragmentDensityMapAttachment));
+}
+
+#endif
+#ifdef VK_EXT_scalar_block_layout
+#endif
+#ifdef VK_GOOGLE_hlsl_functionality1
+#endif
+#ifdef VK_GOOGLE_decorate_string
+#endif
+#ifdef VK_EXT_subgroup_size_control
+void deepcopy_VkPhysicalDeviceSubgroupSizeControlFeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSubgroupSizeControlFeaturesEXT* from,
+    VkPhysicalDeviceSubgroupSizeControlFeaturesEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPhysicalDeviceSubgroupSizeControlPropertiesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT* from,
+    VkPhysicalDeviceSubgroupSizeControlPropertiesEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT* from,
+    VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_AMD_shader_core_properties2
+void deepcopy_VkPhysicalDeviceShaderCoreProperties2AMD(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderCoreProperties2AMD* from,
+    VkPhysicalDeviceShaderCoreProperties2AMD* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_AMD_device_coherent_memory
+void deepcopy_VkPhysicalDeviceCoherentMemoryFeaturesAMD(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCoherentMemoryFeaturesAMD* from,
+    VkPhysicalDeviceCoherentMemoryFeaturesAMD* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_shader_image_atomic_int64
+void deepcopy_VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT* from,
+    VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_memory_budget
+void deepcopy_VkPhysicalDeviceMemoryBudgetPropertiesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMemoryBudgetPropertiesEXT* from,
+    VkPhysicalDeviceMemoryBudgetPropertiesEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    memcpy(to->heapBudget, from->heapBudget, VK_MAX_MEMORY_HEAPS * sizeof(VkDeviceSize));
+    memcpy(to->heapUsage, from->heapUsage, VK_MAX_MEMORY_HEAPS * sizeof(VkDeviceSize));
+}
+
+#endif
+#ifdef VK_EXT_memory_priority
+void deepcopy_VkPhysicalDeviceMemoryPriorityFeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMemoryPriorityFeaturesEXT* from,
+    VkPhysicalDeviceMemoryPriorityFeaturesEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkMemoryPriorityAllocateInfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkMemoryPriorityAllocateInfoEXT* from,
+    VkMemoryPriorityAllocateInfoEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_NV_dedicated_allocation_image_aliasing
+void deepcopy_VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV* from,
+    VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_buffer_device_address
+void deepcopy_VkPhysicalDeviceBufferDeviceAddressFeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceBufferDeviceAddressFeaturesEXT* from,
+    VkPhysicalDeviceBufferDeviceAddressFeaturesEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkBufferDeviceAddressCreateInfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkBufferDeviceAddressCreateInfoEXT* from,
+    VkBufferDeviceAddressCreateInfoEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_tooling_info
+void deepcopy_VkPhysicalDeviceToolPropertiesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceToolPropertiesEXT* from,
+    VkPhysicalDeviceToolPropertiesEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    memcpy(to->name, from->name, VK_MAX_EXTENSION_NAME_SIZE * sizeof(char));
+    memcpy(to->version, from->version, VK_MAX_EXTENSION_NAME_SIZE * sizeof(char));
+    memcpy(to->description, from->description, VK_MAX_DESCRIPTION_SIZE * sizeof(char));
+    memcpy(to->layer, from->layer, VK_MAX_EXTENSION_NAME_SIZE * sizeof(char));
+}
+
+#endif
+#ifdef VK_EXT_separate_stencil_usage
+#endif
+#ifdef VK_EXT_validation_features
+void deepcopy_VkValidationFeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkValidationFeaturesEXT* from,
+    VkValidationFeaturesEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    to->pEnabledValidationFeatures = nullptr;
+    if (from->pEnabledValidationFeatures)
+    {
+        to->pEnabledValidationFeatures = (VkValidationFeatureEnableEXT*)alloc->dupArray(from->pEnabledValidationFeatures, from->enabledValidationFeatureCount * sizeof(const VkValidationFeatureEnableEXT));
+    }
+    to->pDisabledValidationFeatures = nullptr;
+    if (from->pDisabledValidationFeatures)
+    {
+        to->pDisabledValidationFeatures = (VkValidationFeatureDisableEXT*)alloc->dupArray(from->pDisabledValidationFeatures, from->disabledValidationFeatureCount * sizeof(const VkValidationFeatureDisableEXT));
+    }
+}
+
+#endif
+#ifdef VK_NV_cooperative_matrix
+void deepcopy_VkCooperativeMatrixPropertiesNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkCooperativeMatrixPropertiesNV* from,
+    VkCooperativeMatrixPropertiesNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPhysicalDeviceCooperativeMatrixFeaturesNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCooperativeMatrixFeaturesNV* from,
+    VkPhysicalDeviceCooperativeMatrixFeaturesNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPhysicalDeviceCooperativeMatrixPropertiesNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCooperativeMatrixPropertiesNV* from,
+    VkPhysicalDeviceCooperativeMatrixPropertiesNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_NV_coverage_reduction_mode
+void deepcopy_VkPhysicalDeviceCoverageReductionModeFeaturesNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCoverageReductionModeFeaturesNV* from,
+    VkPhysicalDeviceCoverageReductionModeFeaturesNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPipelineCoverageReductionStateCreateInfoNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineCoverageReductionStateCreateInfoNV* from,
+    VkPipelineCoverageReductionStateCreateInfoNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkFramebufferMixedSamplesCombinationNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkFramebufferMixedSamplesCombinationNV* from,
+    VkFramebufferMixedSamplesCombinationNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_fragment_shader_interlock
+void deepcopy_VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT* from,
+    VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_ycbcr_image_arrays
+void deepcopy_VkPhysicalDeviceYcbcrImageArraysFeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceYcbcrImageArraysFeaturesEXT* from,
+    VkPhysicalDeviceYcbcrImageArraysFeaturesEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_full_screen_exclusive
+void deepcopy_VkSurfaceFullScreenExclusiveInfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkSurfaceFullScreenExclusiveInfoEXT* from,
+    VkSurfaceFullScreenExclusiveInfoEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkSurfaceCapabilitiesFullScreenExclusiveEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkSurfaceCapabilitiesFullScreenExclusiveEXT* from,
+    VkSurfaceCapabilitiesFullScreenExclusiveEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkSurfaceFullScreenExclusiveWin32InfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkSurfaceFullScreenExclusiveWin32InfoEXT* from,
+    VkSurfaceFullScreenExclusiveWin32InfoEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_headless_surface
+void deepcopy_VkHeadlessSurfaceCreateInfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkHeadlessSurfaceCreateInfoEXT* from,
+    VkHeadlessSurfaceCreateInfoEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_line_rasterization
+void deepcopy_VkPhysicalDeviceLineRasterizationFeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceLineRasterizationFeaturesEXT* from,
+    VkPhysicalDeviceLineRasterizationFeaturesEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPhysicalDeviceLineRasterizationPropertiesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceLineRasterizationPropertiesEXT* from,
+    VkPhysicalDeviceLineRasterizationPropertiesEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPipelineRasterizationLineStateCreateInfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineRasterizationLineStateCreateInfoEXT* from,
+    VkPipelineRasterizationLineStateCreateInfoEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_shader_atomic_float
+void deepcopy_VkPhysicalDeviceShaderAtomicFloatFeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderAtomicFloatFeaturesEXT* from,
+    VkPhysicalDeviceShaderAtomicFloatFeaturesEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_host_query_reset
+#endif
+#ifdef VK_EXT_index_type_uint8
+void deepcopy_VkPhysicalDeviceIndexTypeUint8FeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceIndexTypeUint8FeaturesEXT* from,
+    VkPhysicalDeviceIndexTypeUint8FeaturesEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_extended_dynamic_state
+void deepcopy_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceExtendedDynamicStateFeaturesEXT* from,
+    VkPhysicalDeviceExtendedDynamicStateFeaturesEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_shader_demote_to_helper_invocation
+void deepcopy_VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT* from,
+    VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_NV_device_generated_commands
+void deepcopy_VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV* from,
+    VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV* from,
+    VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkGraphicsShaderGroupCreateInfoNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkGraphicsShaderGroupCreateInfoNV* from,
+    VkGraphicsShaderGroupCreateInfoNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    if (from)
+    {
+        to->pStages = nullptr;
+        if (from->pStages)
+        {
+            to->pStages = (VkPipelineShaderStageCreateInfo*)alloc->alloc(from->stageCount * sizeof(const VkPipelineShaderStageCreateInfo));
+            to->stageCount = from->stageCount;
+            for (uint32_t i = 0; i < (uint32_t)from->stageCount; ++i)
+            {
+                deepcopy_VkPipelineShaderStageCreateInfo(alloc, rootType, from->pStages + i, (VkPipelineShaderStageCreateInfo*)(to->pStages + i));
+            }
+        }
+    }
+    to->pVertexInputState = nullptr;
+    if (from->pVertexInputState)
+    {
+        to->pVertexInputState = (VkPipelineVertexInputStateCreateInfo*)alloc->alloc(sizeof(const VkPipelineVertexInputStateCreateInfo));
+        deepcopy_VkPipelineVertexInputStateCreateInfo(alloc, rootType, from->pVertexInputState, (VkPipelineVertexInputStateCreateInfo*)(to->pVertexInputState));
+    }
+    to->pTessellationState = nullptr;
+    if (from->pTessellationState)
+    {
+        to->pTessellationState = (VkPipelineTessellationStateCreateInfo*)alloc->alloc(sizeof(const VkPipelineTessellationStateCreateInfo));
+        deepcopy_VkPipelineTessellationStateCreateInfo(alloc, rootType, from->pTessellationState, (VkPipelineTessellationStateCreateInfo*)(to->pTessellationState));
+    }
+}
+
+void deepcopy_VkGraphicsPipelineShaderGroupsCreateInfoNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkGraphicsPipelineShaderGroupsCreateInfoNV* from,
+    VkGraphicsPipelineShaderGroupsCreateInfoNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    if (from)
+    {
+        to->pGroups = nullptr;
+        if (from->pGroups)
+        {
+            to->pGroups = (VkGraphicsShaderGroupCreateInfoNV*)alloc->alloc(from->groupCount * sizeof(const VkGraphicsShaderGroupCreateInfoNV));
+            to->groupCount = from->groupCount;
+            for (uint32_t i = 0; i < (uint32_t)from->groupCount; ++i)
+            {
+                deepcopy_VkGraphicsShaderGroupCreateInfoNV(alloc, rootType, from->pGroups + i, (VkGraphicsShaderGroupCreateInfoNV*)(to->pGroups + i));
+            }
+        }
+    }
+    to->pPipelines = nullptr;
+    if (from->pPipelines)
+    {
+        to->pPipelines = (VkPipeline*)alloc->dupArray(from->pPipelines, from->pipelineCount * sizeof(const VkPipeline));
+    }
+}
+
+void deepcopy_VkBindShaderGroupIndirectCommandNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkBindShaderGroupIndirectCommandNV* from,
+    VkBindShaderGroupIndirectCommandNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+}
+
+void deepcopy_VkBindIndexBufferIndirectCommandNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkBindIndexBufferIndirectCommandNV* from,
+    VkBindIndexBufferIndirectCommandNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+}
+
+void deepcopy_VkBindVertexBufferIndirectCommandNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkBindVertexBufferIndirectCommandNV* from,
+    VkBindVertexBufferIndirectCommandNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+}
+
+void deepcopy_VkSetStateFlagsIndirectCommandNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkSetStateFlagsIndirectCommandNV* from,
+    VkSetStateFlagsIndirectCommandNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+}
+
+void deepcopy_VkIndirectCommandsStreamNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkIndirectCommandsStreamNV* from,
+    VkIndirectCommandsStreamNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+}
+
+void deepcopy_VkIndirectCommandsLayoutTokenNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkIndirectCommandsLayoutTokenNV* from,
+    VkIndirectCommandsLayoutTokenNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    to->pIndexTypes = nullptr;
+    if (from->pIndexTypes)
+    {
+        to->pIndexTypes = (VkIndexType*)alloc->dupArray(from->pIndexTypes, from->indexTypeCount * sizeof(const VkIndexType));
+    }
+    to->pIndexTypeValues = nullptr;
+    if (from->pIndexTypeValues)
+    {
+        to->pIndexTypeValues = (uint32_t*)alloc->dupArray(from->pIndexTypeValues, from->indexTypeCount * sizeof(const uint32_t));
+    }
+}
+
+void deepcopy_VkIndirectCommandsLayoutCreateInfoNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkIndirectCommandsLayoutCreateInfoNV* from,
+    VkIndirectCommandsLayoutCreateInfoNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    if (from)
+    {
+        to->pTokens = nullptr;
+        if (from->pTokens)
+        {
+            to->pTokens = (VkIndirectCommandsLayoutTokenNV*)alloc->alloc(from->tokenCount * sizeof(const VkIndirectCommandsLayoutTokenNV));
+            to->tokenCount = from->tokenCount;
+            for (uint32_t i = 0; i < (uint32_t)from->tokenCount; ++i)
+            {
+                deepcopy_VkIndirectCommandsLayoutTokenNV(alloc, rootType, from->pTokens + i, (VkIndirectCommandsLayoutTokenNV*)(to->pTokens + i));
+            }
+        }
+    }
+    to->pStreamStrides = nullptr;
+    if (from->pStreamStrides)
+    {
+        to->pStreamStrides = (uint32_t*)alloc->dupArray(from->pStreamStrides, from->streamCount * sizeof(const uint32_t));
+    }
+}
+
+void deepcopy_VkGeneratedCommandsInfoNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkGeneratedCommandsInfoNV* from,
+    VkGeneratedCommandsInfoNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    if (from)
+    {
+        to->pStreams = nullptr;
+        if (from->pStreams)
+        {
+            to->pStreams = (VkIndirectCommandsStreamNV*)alloc->alloc(from->streamCount * sizeof(const VkIndirectCommandsStreamNV));
+            to->streamCount = from->streamCount;
+            for (uint32_t i = 0; i < (uint32_t)from->streamCount; ++i)
+            {
+                deepcopy_VkIndirectCommandsStreamNV(alloc, rootType, from->pStreams + i, (VkIndirectCommandsStreamNV*)(to->pStreams + i));
+            }
+        }
+    }
+}
+
+void deepcopy_VkGeneratedCommandsMemoryRequirementsInfoNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkGeneratedCommandsMemoryRequirementsInfoNV* from,
+    VkGeneratedCommandsMemoryRequirementsInfoNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_texel_buffer_alignment
+void deepcopy_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT* from,
+    VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT* from,
+    VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_QCOM_render_pass_transform
+void deepcopy_VkRenderPassTransformBeginInfoQCOM(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkRenderPassTransformBeginInfoQCOM* from,
+    VkRenderPassTransformBeginInfoQCOM* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkCommandBufferInheritanceRenderPassTransformInfoQCOM(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkCommandBufferInheritanceRenderPassTransformInfoQCOM* from,
+    VkCommandBufferInheritanceRenderPassTransformInfoQCOM* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    deepcopy_VkRect2D(alloc, rootType, &from->renderArea, (VkRect2D*)(&to->renderArea));
+}
+
+#endif
+#ifdef VK_EXT_device_memory_report
+void deepcopy_VkPhysicalDeviceDeviceMemoryReportFeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDeviceMemoryReportFeaturesEXT* from,
+    VkPhysicalDeviceDeviceMemoryReportFeaturesEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkDeviceMemoryReportCallbackDataEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDeviceMemoryReportCallbackDataEXT* from,
+    VkDeviceMemoryReportCallbackDataEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkDeviceDeviceMemoryReportCreateInfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDeviceDeviceMemoryReportCreateInfoEXT* from,
+    VkDeviceDeviceMemoryReportCreateInfoEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    to->pUserData = nullptr;
+    if (from->pUserData)
+    {
+        to->pUserData = (void*)alloc->dupArray(from->pUserData, sizeof(uint8_t));
+    }
+}
+
+#endif
+#ifdef VK_EXT_robustness2
+void deepcopy_VkPhysicalDeviceRobustness2FeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRobustness2FeaturesEXT* from,
+    VkPhysicalDeviceRobustness2FeaturesEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPhysicalDeviceRobustness2PropertiesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRobustness2PropertiesEXT* from,
+    VkPhysicalDeviceRobustness2PropertiesEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_custom_border_color
+void deepcopy_VkSamplerCustomBorderColorCreateInfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkSamplerCustomBorderColorCreateInfoEXT* from,
+    VkSamplerCustomBorderColorCreateInfoEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    deepcopy_VkClearColorValue(alloc, rootType, &from->customBorderColor, (VkClearColorValue*)(&to->customBorderColor));
+}
+
+void deepcopy_VkPhysicalDeviceCustomBorderColorPropertiesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCustomBorderColorPropertiesEXT* from,
+    VkPhysicalDeviceCustomBorderColorPropertiesEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPhysicalDeviceCustomBorderColorFeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCustomBorderColorFeaturesEXT* from,
+    VkPhysicalDeviceCustomBorderColorFeaturesEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_GOOGLE_user_type
+#endif
+#ifdef VK_EXT_private_data
+void deepcopy_VkPhysicalDevicePrivateDataFeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDevicePrivateDataFeaturesEXT* from,
+    VkPhysicalDevicePrivateDataFeaturesEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkDevicePrivateDataCreateInfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDevicePrivateDataCreateInfoEXT* from,
+    VkDevicePrivateDataCreateInfoEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPrivateDataSlotCreateInfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPrivateDataSlotCreateInfoEXT* from,
+    VkPrivateDataSlotCreateInfoEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_pipeline_creation_cache_control
+void deepcopy_VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT* from,
+    VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_NV_device_diagnostics_config
+void deepcopy_VkPhysicalDeviceDiagnosticsConfigFeaturesNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDiagnosticsConfigFeaturesNV* from,
+    VkPhysicalDeviceDiagnosticsConfigFeaturesNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkDeviceDiagnosticsConfigCreateInfoNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDeviceDiagnosticsConfigCreateInfoNV* from,
+    VkDeviceDiagnosticsConfigCreateInfoNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_QCOM_render_pass_store_ops
+#endif
+#ifdef VK_NV_fragment_shading_rate_enums
+void deepcopy_VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV* from,
+    VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV* from,
+    VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPipelineFragmentShadingRateEnumStateCreateInfoNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineFragmentShadingRateEnumStateCreateInfoNV* from,
+    VkPipelineFragmentShadingRateEnumStateCreateInfoNV* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    memcpy(to->combinerOps, from->combinerOps, 2 * sizeof(VkFragmentShadingRateCombinerOpKHR));
+}
+
+#endif
+#ifdef VK_EXT_fragment_density_map2
+void deepcopy_VkPhysicalDeviceFragmentDensityMap2FeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentDensityMap2FeaturesEXT* from,
+    VkPhysicalDeviceFragmentDensityMap2FeaturesEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPhysicalDeviceFragmentDensityMap2PropertiesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentDensityMap2PropertiesEXT* from,
+    VkPhysicalDeviceFragmentDensityMap2PropertiesEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_QCOM_rotated_copy_commands
+void deepcopy_VkCopyCommandTransformInfoQCOM(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkCopyCommandTransformInfoQCOM* from,
+    VkCopyCommandTransformInfoQCOM* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_image_robustness
+void deepcopy_VkPhysicalDeviceImageRobustnessFeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceImageRobustnessFeaturesEXT* from,
+    VkPhysicalDeviceImageRobustnessFeaturesEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_4444_formats
+void deepcopy_VkPhysicalDevice4444FormatsFeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDevice4444FormatsFeaturesEXT* from,
+    VkPhysicalDevice4444FormatsFeaturesEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_directfb_surface
+void deepcopy_VkDirectFBSurfaceCreateInfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDirectFBSurfaceCreateInfoEXT* from,
+    VkDirectFBSurfaceCreateInfoEXT* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    to->dfb = nullptr;
+    if (from->dfb)
+    {
+        to->dfb = (IDirectFB*)alloc->dupArray(from->dfb, sizeof(IDirectFB));
+    }
+    to->surface = nullptr;
+    if (from->surface)
+    {
+        to->surface = (IDirectFBSurface*)alloc->dupArray(from->surface, sizeof(IDirectFBSurface));
+    }
+}
+
+#endif
+#ifdef VK_GOOGLE_gfxstream
+void deepcopy_VkImportColorBufferGOOGLE(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkImportColorBufferGOOGLE* from,
+    VkImportColorBufferGOOGLE* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkImportBufferGOOGLE(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkImportBufferGOOGLE* from,
+    VkImportBufferGOOGLE* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 void deepcopy_VkImportPhysicalAddressGOOGLE(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkImportPhysicalAddressGOOGLE* from,
     VkImportPhysicalAddressGOOGLE* to)
 {
-    (void)pool;
+    (void)alloc;
+    (void)rootType;
     *to = *from;
-    size_t pNext_size = goldfish_vk_extension_struct_size(from->pNext);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
     to->pNext = nullptr;
     if (pNext_size)
     {
-        to->pNext = (void*)pool->alloc(pNext_size);
-        deepcopy_extension_struct(pool, from->pNext, (void*)(to->pNext));
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
     }
 }
 
 #endif
-#ifdef VK_GOOGLE_sized_descriptor_update_template
+#ifdef VK_KHR_acceleration_structure
+void deepcopy_VkDeviceOrHostAddressKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDeviceOrHostAddressKHR* from,
+    VkDeviceOrHostAddressKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    to->hostAddress = nullptr;
+    if (from->hostAddress)
+    {
+        to->hostAddress = (void*)alloc->dupArray(from->hostAddress, sizeof(uint8_t));
+    }
+}
+
+void deepcopy_VkDeviceOrHostAddressConstKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDeviceOrHostAddressConstKHR* from,
+    VkDeviceOrHostAddressConstKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    to->hostAddress = nullptr;
+    if (from->hostAddress)
+    {
+        to->hostAddress = (void*)alloc->dupArray(from->hostAddress, sizeof(const uint8_t));
+    }
+}
+
+void deepcopy_VkAccelerationStructureBuildRangeInfoKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkAccelerationStructureBuildRangeInfoKHR* from,
+    VkAccelerationStructureBuildRangeInfoKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+}
+
+void deepcopy_VkAccelerationStructureGeometryTrianglesDataKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkAccelerationStructureGeometryTrianglesDataKHR* from,
+    VkAccelerationStructureGeometryTrianglesDataKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    deepcopy_VkDeviceOrHostAddressConstKHR(alloc, rootType, &from->vertexData, (VkDeviceOrHostAddressConstKHR*)(&to->vertexData));
+    deepcopy_VkDeviceOrHostAddressConstKHR(alloc, rootType, &from->indexData, (VkDeviceOrHostAddressConstKHR*)(&to->indexData));
+    deepcopy_VkDeviceOrHostAddressConstKHR(alloc, rootType, &from->transformData, (VkDeviceOrHostAddressConstKHR*)(&to->transformData));
+}
+
+void deepcopy_VkAccelerationStructureGeometryAabbsDataKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkAccelerationStructureGeometryAabbsDataKHR* from,
+    VkAccelerationStructureGeometryAabbsDataKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    deepcopy_VkDeviceOrHostAddressConstKHR(alloc, rootType, &from->data, (VkDeviceOrHostAddressConstKHR*)(&to->data));
+}
+
+void deepcopy_VkAccelerationStructureGeometryInstancesDataKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkAccelerationStructureGeometryInstancesDataKHR* from,
+    VkAccelerationStructureGeometryInstancesDataKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    deepcopy_VkDeviceOrHostAddressConstKHR(alloc, rootType, &from->data, (VkDeviceOrHostAddressConstKHR*)(&to->data));
+}
+
+void deepcopy_VkAccelerationStructureGeometryDataKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkAccelerationStructureGeometryDataKHR* from,
+    VkAccelerationStructureGeometryDataKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    deepcopy_VkAccelerationStructureGeometryTrianglesDataKHR(alloc, rootType, &from->triangles, (VkAccelerationStructureGeometryTrianglesDataKHR*)(&to->triangles));
+    deepcopy_VkAccelerationStructureGeometryAabbsDataKHR(alloc, rootType, &from->aabbs, (VkAccelerationStructureGeometryAabbsDataKHR*)(&to->aabbs));
+    deepcopy_VkAccelerationStructureGeometryInstancesDataKHR(alloc, rootType, &from->instances, (VkAccelerationStructureGeometryInstancesDataKHR*)(&to->instances));
+}
+
+void deepcopy_VkAccelerationStructureGeometryKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkAccelerationStructureGeometryKHR* from,
+    VkAccelerationStructureGeometryKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    deepcopy_VkAccelerationStructureGeometryDataKHR(alloc, rootType, &from->geometry, (VkAccelerationStructureGeometryDataKHR*)(&to->geometry));
+}
+
+void deepcopy_VkAccelerationStructureBuildGeometryInfoKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkAccelerationStructureBuildGeometryInfoKHR* from,
+    VkAccelerationStructureBuildGeometryInfoKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    if (from)
+    {
+        to->pGeometries = nullptr;
+        if (from->pGeometries)
+        {
+            to->pGeometries = (VkAccelerationStructureGeometryKHR*)alloc->alloc(from->geometryCount * sizeof(const VkAccelerationStructureGeometryKHR));
+            to->geometryCount = from->geometryCount;
+            for (uint32_t i = 0; i < (uint32_t)from->geometryCount; ++i)
+            {
+                deepcopy_VkAccelerationStructureGeometryKHR(alloc, rootType, from->pGeometries + i, (VkAccelerationStructureGeometryKHR*)(to->pGeometries + i));
+            }
+        }
+    }
+    deepcopy_VkDeviceOrHostAddressKHR(alloc, rootType, &from->scratchData, (VkDeviceOrHostAddressKHR*)(&to->scratchData));
+}
+
+void deepcopy_VkAccelerationStructureCreateInfoKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkAccelerationStructureCreateInfoKHR* from,
+    VkAccelerationStructureCreateInfoKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkWriteDescriptorSetAccelerationStructureKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkWriteDescriptorSetAccelerationStructureKHR* from,
+    VkWriteDescriptorSetAccelerationStructureKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    to->pAccelerationStructures = nullptr;
+    if (from->pAccelerationStructures)
+    {
+        to->pAccelerationStructures = (VkAccelerationStructureKHR*)alloc->dupArray(from->pAccelerationStructures, from->accelerationStructureCount * sizeof(const VkAccelerationStructureKHR));
+    }
+}
+
+void deepcopy_VkPhysicalDeviceAccelerationStructureFeaturesKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceAccelerationStructureFeaturesKHR* from,
+    VkPhysicalDeviceAccelerationStructureFeaturesKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPhysicalDeviceAccelerationStructurePropertiesKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceAccelerationStructurePropertiesKHR* from,
+    VkPhysicalDeviceAccelerationStructurePropertiesKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkAccelerationStructureDeviceAddressInfoKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkAccelerationStructureDeviceAddressInfoKHR* from,
+    VkAccelerationStructureDeviceAddressInfoKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkAccelerationStructureVersionInfoKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkAccelerationStructureVersionInfoKHR* from,
+    VkAccelerationStructureVersionInfoKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    to->pVersionData = nullptr;
+    if (from->pVersionData)
+    {
+        to->pVersionData = (uint8_t*)alloc->dupArray(from->pVersionData, 2*VK_UUID_SIZE * sizeof(const uint8_t));
+    }
+}
+
+void deepcopy_VkCopyAccelerationStructureToMemoryInfoKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkCopyAccelerationStructureToMemoryInfoKHR* from,
+    VkCopyAccelerationStructureToMemoryInfoKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    deepcopy_VkDeviceOrHostAddressKHR(alloc, rootType, &from->dst, (VkDeviceOrHostAddressKHR*)(&to->dst));
+}
+
+void deepcopy_VkCopyMemoryToAccelerationStructureInfoKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkCopyMemoryToAccelerationStructureInfoKHR* from,
+    VkCopyMemoryToAccelerationStructureInfoKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    deepcopy_VkDeviceOrHostAddressConstKHR(alloc, rootType, &from->src, (VkDeviceOrHostAddressConstKHR*)(&to->src));
+}
+
+void deepcopy_VkCopyAccelerationStructureInfoKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkCopyAccelerationStructureInfoKHR* from,
+    VkCopyAccelerationStructureInfoKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkAccelerationStructureBuildSizesInfoKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkAccelerationStructureBuildSizesInfoKHR* from,
+    VkAccelerationStructureBuildSizesInfoKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
 #endif
-#ifdef VK_GOOGLE_async_command_buffers
+#ifdef VK_KHR_ray_tracing_pipeline
+void deepcopy_VkRayTracingShaderGroupCreateInfoKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkRayTracingShaderGroupCreateInfoKHR* from,
+    VkRayTracingShaderGroupCreateInfoKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    to->pShaderGroupCaptureReplayHandle = nullptr;
+    if (from->pShaderGroupCaptureReplayHandle)
+    {
+        to->pShaderGroupCaptureReplayHandle = (void*)alloc->dupArray(from->pShaderGroupCaptureReplayHandle, sizeof(const uint8_t));
+    }
+}
+
+void deepcopy_VkRayTracingPipelineInterfaceCreateInfoKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkRayTracingPipelineInterfaceCreateInfoKHR* from,
+    VkRayTracingPipelineInterfaceCreateInfoKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkRayTracingPipelineCreateInfoKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkRayTracingPipelineCreateInfoKHR* from,
+    VkRayTracingPipelineCreateInfoKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (const void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+    if (from)
+    {
+        to->pStages = nullptr;
+        if (from->pStages)
+        {
+            to->pStages = (VkPipelineShaderStageCreateInfo*)alloc->alloc(from->stageCount * sizeof(const VkPipelineShaderStageCreateInfo));
+            to->stageCount = from->stageCount;
+            for (uint32_t i = 0; i < (uint32_t)from->stageCount; ++i)
+            {
+                deepcopy_VkPipelineShaderStageCreateInfo(alloc, rootType, from->pStages + i, (VkPipelineShaderStageCreateInfo*)(to->pStages + i));
+            }
+        }
+    }
+    if (from)
+    {
+        to->pGroups = nullptr;
+        if (from->pGroups)
+        {
+            to->pGroups = (VkRayTracingShaderGroupCreateInfoKHR*)alloc->alloc(from->groupCount * sizeof(const VkRayTracingShaderGroupCreateInfoKHR));
+            to->groupCount = from->groupCount;
+            for (uint32_t i = 0; i < (uint32_t)from->groupCount; ++i)
+            {
+                deepcopy_VkRayTracingShaderGroupCreateInfoKHR(alloc, rootType, from->pGroups + i, (VkRayTracingShaderGroupCreateInfoKHR*)(to->pGroups + i));
+            }
+        }
+    }
+    to->pLibraryInfo = nullptr;
+    if (from->pLibraryInfo)
+    {
+        to->pLibraryInfo = (VkPipelineLibraryCreateInfoKHR*)alloc->alloc(sizeof(const VkPipelineLibraryCreateInfoKHR));
+        deepcopy_VkPipelineLibraryCreateInfoKHR(alloc, rootType, from->pLibraryInfo, (VkPipelineLibraryCreateInfoKHR*)(to->pLibraryInfo));
+    }
+    to->pLibraryInterface = nullptr;
+    if (from->pLibraryInterface)
+    {
+        to->pLibraryInterface = (VkRayTracingPipelineInterfaceCreateInfoKHR*)alloc->alloc(sizeof(const VkRayTracingPipelineInterfaceCreateInfoKHR));
+        deepcopy_VkRayTracingPipelineInterfaceCreateInfoKHR(alloc, rootType, from->pLibraryInterface, (VkRayTracingPipelineInterfaceCreateInfoKHR*)(to->pLibraryInterface));
+    }
+    to->pDynamicState = nullptr;
+    if (from->pDynamicState)
+    {
+        to->pDynamicState = (VkPipelineDynamicStateCreateInfo*)alloc->alloc(sizeof(const VkPipelineDynamicStateCreateInfo));
+        deepcopy_VkPipelineDynamicStateCreateInfo(alloc, rootType, from->pDynamicState, (VkPipelineDynamicStateCreateInfo*)(to->pDynamicState));
+    }
+}
+
+void deepcopy_VkPhysicalDeviceRayTracingPipelineFeaturesKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRayTracingPipelineFeaturesKHR* from,
+    VkPhysicalDeviceRayTracingPipelineFeaturesKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkPhysicalDeviceRayTracingPipelinePropertiesKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRayTracingPipelinePropertiesKHR* from,
+    VkPhysicalDeviceRayTracingPipelinePropertiesKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
+void deepcopy_VkStridedDeviceAddressRegionKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkStridedDeviceAddressRegionKHR* from,
+    VkStridedDeviceAddressRegionKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+}
+
+void deepcopy_VkTraceRaysIndirectCommandKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkTraceRaysIndirectCommandKHR* from,
+    VkTraceRaysIndirectCommandKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+}
+
 #endif
-#ifdef VK_GOOGLE_create_resources_with_requirements
-#endif
-#ifdef VK_GOOGLE_address_space_info
-#endif
-#ifdef VK_GOOGLE_free_memory_sync
+#ifdef VK_KHR_ray_query
+void deepcopy_VkPhysicalDeviceRayQueryFeaturesKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRayQueryFeaturesKHR* from,
+    VkPhysicalDeviceRayQueryFeaturesKHR* to)
+{
+    (void)alloc;
+    (void)rootType;
+    *to = *from;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = from->sType;
+    }
+    const void* from_pNext = from;
+    size_t pNext_size = 0u;
+    while (!pNext_size && from_pNext)
+    {
+        from_pNext = static_cast<const vk_struct_common*>(from_pNext)->pNext;
+        pNext_size = goldfish_vk_extension_struct_size(rootType, from_pNext);
+    }
+    to->pNext = nullptr;
+    if (pNext_size)
+    {
+        to->pNext = (void*)alloc->alloc(pNext_size);
+        deepcopy_extension_struct(alloc, rootType, from_pNext, (void*)(to->pNext));
+    }
+}
+
 #endif
 void deepcopy_extension_struct(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const void* structExtension,
     void* structExtension_out)
 {
@@ -6405,645 +16945,1613 @@
 #ifdef VK_VERSION_1_1
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES:
         {
-            deepcopy_VkPhysicalDeviceSubgroupProperties(pool, reinterpret_cast<const VkPhysicalDeviceSubgroupProperties*>(structExtension), reinterpret_cast<VkPhysicalDeviceSubgroupProperties*>(structExtension_out));
+            deepcopy_VkPhysicalDeviceSubgroupProperties(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceSubgroupProperties*>(structExtension), reinterpret_cast<VkPhysicalDeviceSubgroupProperties*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES:
         {
-            deepcopy_VkPhysicalDevice16BitStorageFeatures(pool, reinterpret_cast<const VkPhysicalDevice16BitStorageFeatures*>(structExtension), reinterpret_cast<VkPhysicalDevice16BitStorageFeatures*>(structExtension_out));
+            deepcopy_VkPhysicalDevice16BitStorageFeatures(alloc, rootType, reinterpret_cast<const VkPhysicalDevice16BitStorageFeatures*>(structExtension), reinterpret_cast<VkPhysicalDevice16BitStorageFeatures*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS:
         {
-            deepcopy_VkMemoryDedicatedRequirements(pool, reinterpret_cast<const VkMemoryDedicatedRequirements*>(structExtension), reinterpret_cast<VkMemoryDedicatedRequirements*>(structExtension_out));
+            deepcopy_VkMemoryDedicatedRequirements(alloc, rootType, reinterpret_cast<const VkMemoryDedicatedRequirements*>(structExtension), reinterpret_cast<VkMemoryDedicatedRequirements*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO:
         {
-            deepcopy_VkMemoryDedicatedAllocateInfo(pool, reinterpret_cast<const VkMemoryDedicatedAllocateInfo*>(structExtension), reinterpret_cast<VkMemoryDedicatedAllocateInfo*>(structExtension_out));
+            deepcopy_VkMemoryDedicatedAllocateInfo(alloc, rootType, reinterpret_cast<const VkMemoryDedicatedAllocateInfo*>(structExtension), reinterpret_cast<VkMemoryDedicatedAllocateInfo*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO:
         {
-            deepcopy_VkMemoryAllocateFlagsInfo(pool, reinterpret_cast<const VkMemoryAllocateFlagsInfo*>(structExtension), reinterpret_cast<VkMemoryAllocateFlagsInfo*>(structExtension_out));
+            deepcopy_VkMemoryAllocateFlagsInfo(alloc, rootType, reinterpret_cast<const VkMemoryAllocateFlagsInfo*>(structExtension), reinterpret_cast<VkMemoryAllocateFlagsInfo*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO:
         {
-            deepcopy_VkDeviceGroupRenderPassBeginInfo(pool, reinterpret_cast<const VkDeviceGroupRenderPassBeginInfo*>(structExtension), reinterpret_cast<VkDeviceGroupRenderPassBeginInfo*>(structExtension_out));
+            deepcopy_VkDeviceGroupRenderPassBeginInfo(alloc, rootType, reinterpret_cast<const VkDeviceGroupRenderPassBeginInfo*>(structExtension), reinterpret_cast<VkDeviceGroupRenderPassBeginInfo*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO:
         {
-            deepcopy_VkDeviceGroupCommandBufferBeginInfo(pool, reinterpret_cast<const VkDeviceGroupCommandBufferBeginInfo*>(structExtension), reinterpret_cast<VkDeviceGroupCommandBufferBeginInfo*>(structExtension_out));
+            deepcopy_VkDeviceGroupCommandBufferBeginInfo(alloc, rootType, reinterpret_cast<const VkDeviceGroupCommandBufferBeginInfo*>(structExtension), reinterpret_cast<VkDeviceGroupCommandBufferBeginInfo*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO:
         {
-            deepcopy_VkDeviceGroupSubmitInfo(pool, reinterpret_cast<const VkDeviceGroupSubmitInfo*>(structExtension), reinterpret_cast<VkDeviceGroupSubmitInfo*>(structExtension_out));
+            deepcopy_VkDeviceGroupSubmitInfo(alloc, rootType, reinterpret_cast<const VkDeviceGroupSubmitInfo*>(structExtension), reinterpret_cast<VkDeviceGroupSubmitInfo*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO:
         {
-            deepcopy_VkDeviceGroupBindSparseInfo(pool, reinterpret_cast<const VkDeviceGroupBindSparseInfo*>(structExtension), reinterpret_cast<VkDeviceGroupBindSparseInfo*>(structExtension_out));
+            deepcopy_VkDeviceGroupBindSparseInfo(alloc, rootType, reinterpret_cast<const VkDeviceGroupBindSparseInfo*>(structExtension), reinterpret_cast<VkDeviceGroupBindSparseInfo*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO:
         {
-            deepcopy_VkBindBufferMemoryDeviceGroupInfo(pool, reinterpret_cast<const VkBindBufferMemoryDeviceGroupInfo*>(structExtension), reinterpret_cast<VkBindBufferMemoryDeviceGroupInfo*>(structExtension_out));
+            deepcopy_VkBindBufferMemoryDeviceGroupInfo(alloc, rootType, reinterpret_cast<const VkBindBufferMemoryDeviceGroupInfo*>(structExtension), reinterpret_cast<VkBindBufferMemoryDeviceGroupInfo*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO:
         {
-            deepcopy_VkBindImageMemoryDeviceGroupInfo(pool, reinterpret_cast<const VkBindImageMemoryDeviceGroupInfo*>(structExtension), reinterpret_cast<VkBindImageMemoryDeviceGroupInfo*>(structExtension_out));
+            deepcopy_VkBindImageMemoryDeviceGroupInfo(alloc, rootType, reinterpret_cast<const VkBindImageMemoryDeviceGroupInfo*>(structExtension), reinterpret_cast<VkBindImageMemoryDeviceGroupInfo*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO:
         {
-            deepcopy_VkDeviceGroupDeviceCreateInfo(pool, reinterpret_cast<const VkDeviceGroupDeviceCreateInfo*>(structExtension), reinterpret_cast<VkDeviceGroupDeviceCreateInfo*>(structExtension_out));
+            deepcopy_VkDeviceGroupDeviceCreateInfo(alloc, rootType, reinterpret_cast<const VkDeviceGroupDeviceCreateInfo*>(structExtension), reinterpret_cast<VkDeviceGroupDeviceCreateInfo*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2:
         {
-            deepcopy_VkPhysicalDeviceFeatures2(pool, reinterpret_cast<const VkPhysicalDeviceFeatures2*>(structExtension), reinterpret_cast<VkPhysicalDeviceFeatures2*>(structExtension_out));
+            deepcopy_VkPhysicalDeviceFeatures2(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceFeatures2*>(structExtension), reinterpret_cast<VkPhysicalDeviceFeatures2*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES:
         {
-            deepcopy_VkPhysicalDevicePointClippingProperties(pool, reinterpret_cast<const VkPhysicalDevicePointClippingProperties*>(structExtension), reinterpret_cast<VkPhysicalDevicePointClippingProperties*>(structExtension_out));
+            deepcopy_VkPhysicalDevicePointClippingProperties(alloc, rootType, reinterpret_cast<const VkPhysicalDevicePointClippingProperties*>(structExtension), reinterpret_cast<VkPhysicalDevicePointClippingProperties*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO:
         {
-            deepcopy_VkRenderPassInputAttachmentAspectCreateInfo(pool, reinterpret_cast<const VkRenderPassInputAttachmentAspectCreateInfo*>(structExtension), reinterpret_cast<VkRenderPassInputAttachmentAspectCreateInfo*>(structExtension_out));
+            deepcopy_VkRenderPassInputAttachmentAspectCreateInfo(alloc, rootType, reinterpret_cast<const VkRenderPassInputAttachmentAspectCreateInfo*>(structExtension), reinterpret_cast<VkRenderPassInputAttachmentAspectCreateInfo*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO:
         {
-            deepcopy_VkImageViewUsageCreateInfo(pool, reinterpret_cast<const VkImageViewUsageCreateInfo*>(structExtension), reinterpret_cast<VkImageViewUsageCreateInfo*>(structExtension_out));
+            deepcopy_VkImageViewUsageCreateInfo(alloc, rootType, reinterpret_cast<const VkImageViewUsageCreateInfo*>(structExtension), reinterpret_cast<VkImageViewUsageCreateInfo*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO:
         {
-            deepcopy_VkPipelineTessellationDomainOriginStateCreateInfo(pool, reinterpret_cast<const VkPipelineTessellationDomainOriginStateCreateInfo*>(structExtension), reinterpret_cast<VkPipelineTessellationDomainOriginStateCreateInfo*>(structExtension_out));
+            deepcopy_VkPipelineTessellationDomainOriginStateCreateInfo(alloc, rootType, reinterpret_cast<const VkPipelineTessellationDomainOriginStateCreateInfo*>(structExtension), reinterpret_cast<VkPipelineTessellationDomainOriginStateCreateInfo*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO:
         {
-            deepcopy_VkRenderPassMultiviewCreateInfo(pool, reinterpret_cast<const VkRenderPassMultiviewCreateInfo*>(structExtension), reinterpret_cast<VkRenderPassMultiviewCreateInfo*>(structExtension_out));
+            deepcopy_VkRenderPassMultiviewCreateInfo(alloc, rootType, reinterpret_cast<const VkRenderPassMultiviewCreateInfo*>(structExtension), reinterpret_cast<VkRenderPassMultiviewCreateInfo*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES:
         {
-            deepcopy_VkPhysicalDeviceMultiviewFeatures(pool, reinterpret_cast<const VkPhysicalDeviceMultiviewFeatures*>(structExtension), reinterpret_cast<VkPhysicalDeviceMultiviewFeatures*>(structExtension_out));
+            deepcopy_VkPhysicalDeviceMultiviewFeatures(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceMultiviewFeatures*>(structExtension), reinterpret_cast<VkPhysicalDeviceMultiviewFeatures*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES:
         {
-            deepcopy_VkPhysicalDeviceMultiviewProperties(pool, reinterpret_cast<const VkPhysicalDeviceMultiviewProperties*>(structExtension), reinterpret_cast<VkPhysicalDeviceMultiviewProperties*>(structExtension_out));
+            deepcopy_VkPhysicalDeviceMultiviewProperties(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceMultiviewProperties*>(structExtension), reinterpret_cast<VkPhysicalDeviceMultiviewProperties*>(structExtension_out));
             break;
         }
-        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES:
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES:
         {
-            deepcopy_VkPhysicalDeviceVariablePointerFeatures(pool, reinterpret_cast<const VkPhysicalDeviceVariablePointerFeatures*>(structExtension), reinterpret_cast<VkPhysicalDeviceVariablePointerFeatures*>(structExtension_out));
+            deepcopy_VkPhysicalDeviceVariablePointersFeatures(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceVariablePointersFeatures*>(structExtension), reinterpret_cast<VkPhysicalDeviceVariablePointersFeatures*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES:
         {
-            deepcopy_VkPhysicalDeviceProtectedMemoryFeatures(pool, reinterpret_cast<const VkPhysicalDeviceProtectedMemoryFeatures*>(structExtension), reinterpret_cast<VkPhysicalDeviceProtectedMemoryFeatures*>(structExtension_out));
+            deepcopy_VkPhysicalDeviceProtectedMemoryFeatures(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceProtectedMemoryFeatures*>(structExtension), reinterpret_cast<VkPhysicalDeviceProtectedMemoryFeatures*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_PROPERTIES:
         {
-            deepcopy_VkPhysicalDeviceProtectedMemoryProperties(pool, reinterpret_cast<const VkPhysicalDeviceProtectedMemoryProperties*>(structExtension), reinterpret_cast<VkPhysicalDeviceProtectedMemoryProperties*>(structExtension_out));
+            deepcopy_VkPhysicalDeviceProtectedMemoryProperties(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceProtectedMemoryProperties*>(structExtension), reinterpret_cast<VkPhysicalDeviceProtectedMemoryProperties*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO:
         {
-            deepcopy_VkProtectedSubmitInfo(pool, reinterpret_cast<const VkProtectedSubmitInfo*>(structExtension), reinterpret_cast<VkProtectedSubmitInfo*>(structExtension_out));
+            deepcopy_VkProtectedSubmitInfo(alloc, rootType, reinterpret_cast<const VkProtectedSubmitInfo*>(structExtension), reinterpret_cast<VkProtectedSubmitInfo*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO:
         {
-            deepcopy_VkSamplerYcbcrConversionInfo(pool, reinterpret_cast<const VkSamplerYcbcrConversionInfo*>(structExtension), reinterpret_cast<VkSamplerYcbcrConversionInfo*>(structExtension_out));
+            deepcopy_VkSamplerYcbcrConversionInfo(alloc, rootType, reinterpret_cast<const VkSamplerYcbcrConversionInfo*>(structExtension), reinterpret_cast<VkSamplerYcbcrConversionInfo*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO:
         {
-            deepcopy_VkBindImagePlaneMemoryInfo(pool, reinterpret_cast<const VkBindImagePlaneMemoryInfo*>(structExtension), reinterpret_cast<VkBindImagePlaneMemoryInfo*>(structExtension_out));
+            deepcopy_VkBindImagePlaneMemoryInfo(alloc, rootType, reinterpret_cast<const VkBindImagePlaneMemoryInfo*>(structExtension), reinterpret_cast<VkBindImagePlaneMemoryInfo*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO:
         {
-            deepcopy_VkImagePlaneMemoryRequirementsInfo(pool, reinterpret_cast<const VkImagePlaneMemoryRequirementsInfo*>(structExtension), reinterpret_cast<VkImagePlaneMemoryRequirementsInfo*>(structExtension_out));
+            deepcopy_VkImagePlaneMemoryRequirementsInfo(alloc, rootType, reinterpret_cast<const VkImagePlaneMemoryRequirementsInfo*>(structExtension), reinterpret_cast<VkImagePlaneMemoryRequirementsInfo*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES:
         {
-            deepcopy_VkPhysicalDeviceSamplerYcbcrConversionFeatures(pool, reinterpret_cast<const VkPhysicalDeviceSamplerYcbcrConversionFeatures*>(structExtension), reinterpret_cast<VkPhysicalDeviceSamplerYcbcrConversionFeatures*>(structExtension_out));
+            deepcopy_VkPhysicalDeviceSamplerYcbcrConversionFeatures(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceSamplerYcbcrConversionFeatures*>(structExtension), reinterpret_cast<VkPhysicalDeviceSamplerYcbcrConversionFeatures*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES:
         {
-            deepcopy_VkSamplerYcbcrConversionImageFormatProperties(pool, reinterpret_cast<const VkSamplerYcbcrConversionImageFormatProperties*>(structExtension), reinterpret_cast<VkSamplerYcbcrConversionImageFormatProperties*>(structExtension_out));
+            deepcopy_VkSamplerYcbcrConversionImageFormatProperties(alloc, rootType, reinterpret_cast<const VkSamplerYcbcrConversionImageFormatProperties*>(structExtension), reinterpret_cast<VkSamplerYcbcrConversionImageFormatProperties*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO:
         {
-            deepcopy_VkPhysicalDeviceExternalImageFormatInfo(pool, reinterpret_cast<const VkPhysicalDeviceExternalImageFormatInfo*>(structExtension), reinterpret_cast<VkPhysicalDeviceExternalImageFormatInfo*>(structExtension_out));
+            deepcopy_VkPhysicalDeviceExternalImageFormatInfo(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceExternalImageFormatInfo*>(structExtension), reinterpret_cast<VkPhysicalDeviceExternalImageFormatInfo*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES:
         {
-            deepcopy_VkExternalImageFormatProperties(pool, reinterpret_cast<const VkExternalImageFormatProperties*>(structExtension), reinterpret_cast<VkExternalImageFormatProperties*>(structExtension_out));
+            deepcopy_VkExternalImageFormatProperties(alloc, rootType, reinterpret_cast<const VkExternalImageFormatProperties*>(structExtension), reinterpret_cast<VkExternalImageFormatProperties*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES:
         {
-            deepcopy_VkPhysicalDeviceIDProperties(pool, reinterpret_cast<const VkPhysicalDeviceIDProperties*>(structExtension), reinterpret_cast<VkPhysicalDeviceIDProperties*>(structExtension_out));
+            deepcopy_VkPhysicalDeviceIDProperties(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceIDProperties*>(structExtension), reinterpret_cast<VkPhysicalDeviceIDProperties*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO:
         {
-            deepcopy_VkExternalMemoryImageCreateInfo(pool, reinterpret_cast<const VkExternalMemoryImageCreateInfo*>(structExtension), reinterpret_cast<VkExternalMemoryImageCreateInfo*>(structExtension_out));
+            deepcopy_VkExternalMemoryImageCreateInfo(alloc, rootType, reinterpret_cast<const VkExternalMemoryImageCreateInfo*>(structExtension), reinterpret_cast<VkExternalMemoryImageCreateInfo*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO:
         {
-            deepcopy_VkExternalMemoryBufferCreateInfo(pool, reinterpret_cast<const VkExternalMemoryBufferCreateInfo*>(structExtension), reinterpret_cast<VkExternalMemoryBufferCreateInfo*>(structExtension_out));
+            deepcopy_VkExternalMemoryBufferCreateInfo(alloc, rootType, reinterpret_cast<const VkExternalMemoryBufferCreateInfo*>(structExtension), reinterpret_cast<VkExternalMemoryBufferCreateInfo*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO:
         {
-            deepcopy_VkExportMemoryAllocateInfo(pool, reinterpret_cast<const VkExportMemoryAllocateInfo*>(structExtension), reinterpret_cast<VkExportMemoryAllocateInfo*>(structExtension_out));
+            deepcopy_VkExportMemoryAllocateInfo(alloc, rootType, reinterpret_cast<const VkExportMemoryAllocateInfo*>(structExtension), reinterpret_cast<VkExportMemoryAllocateInfo*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO:
         {
-            deepcopy_VkExportFenceCreateInfo(pool, reinterpret_cast<const VkExportFenceCreateInfo*>(structExtension), reinterpret_cast<VkExportFenceCreateInfo*>(structExtension_out));
+            deepcopy_VkExportFenceCreateInfo(alloc, rootType, reinterpret_cast<const VkExportFenceCreateInfo*>(structExtension), reinterpret_cast<VkExportFenceCreateInfo*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO:
         {
-            deepcopy_VkExportSemaphoreCreateInfo(pool, reinterpret_cast<const VkExportSemaphoreCreateInfo*>(structExtension), reinterpret_cast<VkExportSemaphoreCreateInfo*>(structExtension_out));
+            deepcopy_VkExportSemaphoreCreateInfo(alloc, rootType, reinterpret_cast<const VkExportSemaphoreCreateInfo*>(structExtension), reinterpret_cast<VkExportSemaphoreCreateInfo*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES:
         {
-            deepcopy_VkPhysicalDeviceMaintenance3Properties(pool, reinterpret_cast<const VkPhysicalDeviceMaintenance3Properties*>(structExtension), reinterpret_cast<VkPhysicalDeviceMaintenance3Properties*>(structExtension_out));
+            deepcopy_VkPhysicalDeviceMaintenance3Properties(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceMaintenance3Properties*>(structExtension), reinterpret_cast<VkPhysicalDeviceMaintenance3Properties*>(structExtension_out));
             break;
         }
-        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES:
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES:
         {
-            deepcopy_VkPhysicalDeviceShaderDrawParameterFeatures(pool, reinterpret_cast<const VkPhysicalDeviceShaderDrawParameterFeatures*>(structExtension), reinterpret_cast<VkPhysicalDeviceShaderDrawParameterFeatures*>(structExtension_out));
+            deepcopy_VkPhysicalDeviceShaderDrawParametersFeatures(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceShaderDrawParametersFeatures*>(structExtension), reinterpret_cast<VkPhysicalDeviceShaderDrawParametersFeatures*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_VERSION_1_2
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES:
+        {
+            deepcopy_VkPhysicalDeviceVulkan11Features(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceVulkan11Features*>(structExtension), reinterpret_cast<VkPhysicalDeviceVulkan11Features*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_PROPERTIES:
+        {
+            deepcopy_VkPhysicalDeviceVulkan11Properties(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceVulkan11Properties*>(structExtension), reinterpret_cast<VkPhysicalDeviceVulkan11Properties*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES:
+        {
+            deepcopy_VkPhysicalDeviceVulkan12Features(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceVulkan12Features*>(structExtension), reinterpret_cast<VkPhysicalDeviceVulkan12Features*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_PROPERTIES:
+        {
+            deepcopy_VkPhysicalDeviceVulkan12Properties(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceVulkan12Properties*>(structExtension), reinterpret_cast<VkPhysicalDeviceVulkan12Properties*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO:
+        {
+            deepcopy_VkImageFormatListCreateInfo(alloc, rootType, reinterpret_cast<const VkImageFormatListCreateInfo*>(structExtension), reinterpret_cast<VkImageFormatListCreateInfo*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES:
+        {
+            deepcopy_VkPhysicalDevice8BitStorageFeatures(alloc, rootType, reinterpret_cast<const VkPhysicalDevice8BitStorageFeatures*>(structExtension), reinterpret_cast<VkPhysicalDevice8BitStorageFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES:
+        {
+            deepcopy_VkPhysicalDeviceDriverProperties(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceDriverProperties*>(structExtension), reinterpret_cast<VkPhysicalDeviceDriverProperties*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES:
+        {
+            deepcopy_VkPhysicalDeviceShaderAtomicInt64Features(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceShaderAtomicInt64Features*>(structExtension), reinterpret_cast<VkPhysicalDeviceShaderAtomicInt64Features*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES:
+        {
+            deepcopy_VkPhysicalDeviceShaderFloat16Int8Features(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceShaderFloat16Int8Features*>(structExtension), reinterpret_cast<VkPhysicalDeviceShaderFloat16Int8Features*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES:
+        {
+            deepcopy_VkPhysicalDeviceFloatControlsProperties(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceFloatControlsProperties*>(structExtension), reinterpret_cast<VkPhysicalDeviceFloatControlsProperties*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO:
+        {
+            deepcopy_VkDescriptorSetLayoutBindingFlagsCreateInfo(alloc, rootType, reinterpret_cast<const VkDescriptorSetLayoutBindingFlagsCreateInfo*>(structExtension), reinterpret_cast<VkDescriptorSetLayoutBindingFlagsCreateInfo*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES:
+        {
+            deepcopy_VkPhysicalDeviceDescriptorIndexingFeatures(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceDescriptorIndexingFeatures*>(structExtension), reinterpret_cast<VkPhysicalDeviceDescriptorIndexingFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES:
+        {
+            deepcopy_VkPhysicalDeviceDescriptorIndexingProperties(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceDescriptorIndexingProperties*>(structExtension), reinterpret_cast<VkPhysicalDeviceDescriptorIndexingProperties*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO:
+        {
+            deepcopy_VkDescriptorSetVariableDescriptorCountAllocateInfo(alloc, rootType, reinterpret_cast<const VkDescriptorSetVariableDescriptorCountAllocateInfo*>(structExtension), reinterpret_cast<VkDescriptorSetVariableDescriptorCountAllocateInfo*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT:
+        {
+            deepcopy_VkDescriptorSetVariableDescriptorCountLayoutSupport(alloc, rootType, reinterpret_cast<const VkDescriptorSetVariableDescriptorCountLayoutSupport*>(structExtension), reinterpret_cast<VkDescriptorSetVariableDescriptorCountLayoutSupport*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE:
+        {
+            deepcopy_VkSubpassDescriptionDepthStencilResolve(alloc, rootType, reinterpret_cast<const VkSubpassDescriptionDepthStencilResolve*>(structExtension), reinterpret_cast<VkSubpassDescriptionDepthStencilResolve*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES:
+        {
+            deepcopy_VkPhysicalDeviceDepthStencilResolveProperties(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceDepthStencilResolveProperties*>(structExtension), reinterpret_cast<VkPhysicalDeviceDepthStencilResolveProperties*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES:
+        {
+            deepcopy_VkPhysicalDeviceScalarBlockLayoutFeatures(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceScalarBlockLayoutFeatures*>(structExtension), reinterpret_cast<VkPhysicalDeviceScalarBlockLayoutFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_IMAGE_STENCIL_USAGE_CREATE_INFO:
+        {
+            deepcopy_VkImageStencilUsageCreateInfo(alloc, rootType, reinterpret_cast<const VkImageStencilUsageCreateInfo*>(structExtension), reinterpret_cast<VkImageStencilUsageCreateInfo*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO:
+        {
+            deepcopy_VkSamplerReductionModeCreateInfo(alloc, rootType, reinterpret_cast<const VkSamplerReductionModeCreateInfo*>(structExtension), reinterpret_cast<VkSamplerReductionModeCreateInfo*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES:
+        {
+            deepcopy_VkPhysicalDeviceSamplerFilterMinmaxProperties(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceSamplerFilterMinmaxProperties*>(structExtension), reinterpret_cast<VkPhysicalDeviceSamplerFilterMinmaxProperties*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES:
+        {
+            deepcopy_VkPhysicalDeviceVulkanMemoryModelFeatures(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceVulkanMemoryModelFeatures*>(structExtension), reinterpret_cast<VkPhysicalDeviceVulkanMemoryModelFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES:
+        {
+            deepcopy_VkPhysicalDeviceImagelessFramebufferFeatures(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceImagelessFramebufferFeatures*>(structExtension), reinterpret_cast<VkPhysicalDeviceImagelessFramebufferFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO:
+        {
+            deepcopy_VkFramebufferAttachmentsCreateInfo(alloc, rootType, reinterpret_cast<const VkFramebufferAttachmentsCreateInfo*>(structExtension), reinterpret_cast<VkFramebufferAttachmentsCreateInfo*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_RENDER_PASS_ATTACHMENT_BEGIN_INFO:
+        {
+            deepcopy_VkRenderPassAttachmentBeginInfo(alloc, rootType, reinterpret_cast<const VkRenderPassAttachmentBeginInfo*>(structExtension), reinterpret_cast<VkRenderPassAttachmentBeginInfo*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES:
+        {
+            deepcopy_VkPhysicalDeviceUniformBufferStandardLayoutFeatures(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceUniformBufferStandardLayoutFeatures*>(structExtension), reinterpret_cast<VkPhysicalDeviceUniformBufferStandardLayoutFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES:
+        {
+            deepcopy_VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures*>(structExtension), reinterpret_cast<VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES:
+        {
+            deepcopy_VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures*>(structExtension), reinterpret_cast<VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_STENCIL_LAYOUT:
+        {
+            deepcopy_VkAttachmentReferenceStencilLayout(alloc, rootType, reinterpret_cast<const VkAttachmentReferenceStencilLayout*>(structExtension), reinterpret_cast<VkAttachmentReferenceStencilLayout*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_STENCIL_LAYOUT:
+        {
+            deepcopy_VkAttachmentDescriptionStencilLayout(alloc, rootType, reinterpret_cast<const VkAttachmentDescriptionStencilLayout*>(structExtension), reinterpret_cast<VkAttachmentDescriptionStencilLayout*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES:
+        {
+            deepcopy_VkPhysicalDeviceHostQueryResetFeatures(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceHostQueryResetFeatures*>(structExtension), reinterpret_cast<VkPhysicalDeviceHostQueryResetFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES:
+        {
+            deepcopy_VkPhysicalDeviceTimelineSemaphoreFeatures(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceTimelineSemaphoreFeatures*>(structExtension), reinterpret_cast<VkPhysicalDeviceTimelineSemaphoreFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES:
+        {
+            deepcopy_VkPhysicalDeviceTimelineSemaphoreProperties(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceTimelineSemaphoreProperties*>(structExtension), reinterpret_cast<VkPhysicalDeviceTimelineSemaphoreProperties*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO:
+        {
+            deepcopy_VkSemaphoreTypeCreateInfo(alloc, rootType, reinterpret_cast<const VkSemaphoreTypeCreateInfo*>(structExtension), reinterpret_cast<VkSemaphoreTypeCreateInfo*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO:
+        {
+            deepcopy_VkTimelineSemaphoreSubmitInfo(alloc, rootType, reinterpret_cast<const VkTimelineSemaphoreSubmitInfo*>(structExtension), reinterpret_cast<VkTimelineSemaphoreSubmitInfo*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES:
+        {
+            deepcopy_VkPhysicalDeviceBufferDeviceAddressFeatures(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceBufferDeviceAddressFeatures*>(structExtension), reinterpret_cast<VkPhysicalDeviceBufferDeviceAddressFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO:
+        {
+            deepcopy_VkBufferOpaqueCaptureAddressCreateInfo(alloc, rootType, reinterpret_cast<const VkBufferOpaqueCaptureAddressCreateInfo*>(structExtension), reinterpret_cast<VkBufferOpaqueCaptureAddressCreateInfo*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO:
+        {
+            deepcopy_VkMemoryOpaqueCaptureAddressAllocateInfo(alloc, rootType, reinterpret_cast<const VkMemoryOpaqueCaptureAddressAllocateInfo*>(structExtension), reinterpret_cast<VkMemoryOpaqueCaptureAddressAllocateInfo*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_KHR_swapchain
         case VK_STRUCTURE_TYPE_IMAGE_SWAPCHAIN_CREATE_INFO_KHR:
         {
-            deepcopy_VkImageSwapchainCreateInfoKHR(pool, reinterpret_cast<const VkImageSwapchainCreateInfoKHR*>(structExtension), reinterpret_cast<VkImageSwapchainCreateInfoKHR*>(structExtension_out));
+            deepcopy_VkImageSwapchainCreateInfoKHR(alloc, rootType, reinterpret_cast<const VkImageSwapchainCreateInfoKHR*>(structExtension), reinterpret_cast<VkImageSwapchainCreateInfoKHR*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_SWAPCHAIN_INFO_KHR:
         {
-            deepcopy_VkBindImageMemorySwapchainInfoKHR(pool, reinterpret_cast<const VkBindImageMemorySwapchainInfoKHR*>(structExtension), reinterpret_cast<VkBindImageMemorySwapchainInfoKHR*>(structExtension_out));
+            deepcopy_VkBindImageMemorySwapchainInfoKHR(alloc, rootType, reinterpret_cast<const VkBindImageMemorySwapchainInfoKHR*>(structExtension), reinterpret_cast<VkBindImageMemorySwapchainInfoKHR*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_DEVICE_GROUP_PRESENT_INFO_KHR:
         {
-            deepcopy_VkDeviceGroupPresentInfoKHR(pool, reinterpret_cast<const VkDeviceGroupPresentInfoKHR*>(structExtension), reinterpret_cast<VkDeviceGroupPresentInfoKHR*>(structExtension_out));
+            deepcopy_VkDeviceGroupPresentInfoKHR(alloc, rootType, reinterpret_cast<const VkDeviceGroupPresentInfoKHR*>(structExtension), reinterpret_cast<VkDeviceGroupPresentInfoKHR*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_DEVICE_GROUP_SWAPCHAIN_CREATE_INFO_KHR:
         {
-            deepcopy_VkDeviceGroupSwapchainCreateInfoKHR(pool, reinterpret_cast<const VkDeviceGroupSwapchainCreateInfoKHR*>(structExtension), reinterpret_cast<VkDeviceGroupSwapchainCreateInfoKHR*>(structExtension_out));
+            deepcopy_VkDeviceGroupSwapchainCreateInfoKHR(alloc, rootType, reinterpret_cast<const VkDeviceGroupSwapchainCreateInfoKHR*>(structExtension), reinterpret_cast<VkDeviceGroupSwapchainCreateInfoKHR*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_KHR_display_swapchain
         case VK_STRUCTURE_TYPE_DISPLAY_PRESENT_INFO_KHR:
         {
-            deepcopy_VkDisplayPresentInfoKHR(pool, reinterpret_cast<const VkDisplayPresentInfoKHR*>(structExtension), reinterpret_cast<VkDisplayPresentInfoKHR*>(structExtension_out));
+            deepcopy_VkDisplayPresentInfoKHR(alloc, rootType, reinterpret_cast<const VkDisplayPresentInfoKHR*>(structExtension), reinterpret_cast<VkDisplayPresentInfoKHR*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_KHR_external_memory_win32
         case VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_KHR:
         {
-            deepcopy_VkImportMemoryWin32HandleInfoKHR(pool, reinterpret_cast<const VkImportMemoryWin32HandleInfoKHR*>(structExtension), reinterpret_cast<VkImportMemoryWin32HandleInfoKHR*>(structExtension_out));
+            deepcopy_VkImportMemoryWin32HandleInfoKHR(alloc, rootType, reinterpret_cast<const VkImportMemoryWin32HandleInfoKHR*>(structExtension), reinterpret_cast<VkImportMemoryWin32HandleInfoKHR*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_KHR:
         {
-            deepcopy_VkExportMemoryWin32HandleInfoKHR(pool, reinterpret_cast<const VkExportMemoryWin32HandleInfoKHR*>(structExtension), reinterpret_cast<VkExportMemoryWin32HandleInfoKHR*>(structExtension_out));
+            deepcopy_VkExportMemoryWin32HandleInfoKHR(alloc, rootType, reinterpret_cast<const VkExportMemoryWin32HandleInfoKHR*>(structExtension), reinterpret_cast<VkExportMemoryWin32HandleInfoKHR*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_KHR_external_memory_fd
         case VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR:
         {
-            deepcopy_VkImportMemoryFdInfoKHR(pool, reinterpret_cast<const VkImportMemoryFdInfoKHR*>(structExtension), reinterpret_cast<VkImportMemoryFdInfoKHR*>(structExtension_out));
+            deepcopy_VkImportMemoryFdInfoKHR(alloc, rootType, reinterpret_cast<const VkImportMemoryFdInfoKHR*>(structExtension), reinterpret_cast<VkImportMemoryFdInfoKHR*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_KHR_win32_keyed_mutex
         case VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_KHR:
         {
-            deepcopy_VkWin32KeyedMutexAcquireReleaseInfoKHR(pool, reinterpret_cast<const VkWin32KeyedMutexAcquireReleaseInfoKHR*>(structExtension), reinterpret_cast<VkWin32KeyedMutexAcquireReleaseInfoKHR*>(structExtension_out));
+            deepcopy_VkWin32KeyedMutexAcquireReleaseInfoKHR(alloc, rootType, reinterpret_cast<const VkWin32KeyedMutexAcquireReleaseInfoKHR*>(structExtension), reinterpret_cast<VkWin32KeyedMutexAcquireReleaseInfoKHR*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_KHR_external_semaphore_win32
         case VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR:
         {
-            deepcopy_VkExportSemaphoreWin32HandleInfoKHR(pool, reinterpret_cast<const VkExportSemaphoreWin32HandleInfoKHR*>(structExtension), reinterpret_cast<VkExportSemaphoreWin32HandleInfoKHR*>(structExtension_out));
+            deepcopy_VkExportSemaphoreWin32HandleInfoKHR(alloc, rootType, reinterpret_cast<const VkExportSemaphoreWin32HandleInfoKHR*>(structExtension), reinterpret_cast<VkExportSemaphoreWin32HandleInfoKHR*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_D3D12_FENCE_SUBMIT_INFO_KHR:
         {
-            deepcopy_VkD3D12FenceSubmitInfoKHR(pool, reinterpret_cast<const VkD3D12FenceSubmitInfoKHR*>(structExtension), reinterpret_cast<VkD3D12FenceSubmitInfoKHR*>(structExtension_out));
+            deepcopy_VkD3D12FenceSubmitInfoKHR(alloc, rootType, reinterpret_cast<const VkD3D12FenceSubmitInfoKHR*>(structExtension), reinterpret_cast<VkD3D12FenceSubmitInfoKHR*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_KHR_push_descriptor
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR:
         {
-            deepcopy_VkPhysicalDevicePushDescriptorPropertiesKHR(pool, reinterpret_cast<const VkPhysicalDevicePushDescriptorPropertiesKHR*>(structExtension), reinterpret_cast<VkPhysicalDevicePushDescriptorPropertiesKHR*>(structExtension_out));
+            deepcopy_VkPhysicalDevicePushDescriptorPropertiesKHR(alloc, rootType, reinterpret_cast<const VkPhysicalDevicePushDescriptorPropertiesKHR*>(structExtension), reinterpret_cast<VkPhysicalDevicePushDescriptorPropertiesKHR*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_KHR_incremental_present
         case VK_STRUCTURE_TYPE_PRESENT_REGIONS_KHR:
         {
-            deepcopy_VkPresentRegionsKHR(pool, reinterpret_cast<const VkPresentRegionsKHR*>(structExtension), reinterpret_cast<VkPresentRegionsKHR*>(structExtension_out));
+            deepcopy_VkPresentRegionsKHR(alloc, rootType, reinterpret_cast<const VkPresentRegionsKHR*>(structExtension), reinterpret_cast<VkPresentRegionsKHR*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_KHR_shared_presentable_image
         case VK_STRUCTURE_TYPE_SHARED_PRESENT_SURFACE_CAPABILITIES_KHR:
         {
-            deepcopy_VkSharedPresentSurfaceCapabilitiesKHR(pool, reinterpret_cast<const VkSharedPresentSurfaceCapabilitiesKHR*>(structExtension), reinterpret_cast<VkSharedPresentSurfaceCapabilitiesKHR*>(structExtension_out));
+            deepcopy_VkSharedPresentSurfaceCapabilitiesKHR(alloc, rootType, reinterpret_cast<const VkSharedPresentSurfaceCapabilitiesKHR*>(structExtension), reinterpret_cast<VkSharedPresentSurfaceCapabilitiesKHR*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_KHR_external_fence_win32
         case VK_STRUCTURE_TYPE_EXPORT_FENCE_WIN32_HANDLE_INFO_KHR:
         {
-            deepcopy_VkExportFenceWin32HandleInfoKHR(pool, reinterpret_cast<const VkExportFenceWin32HandleInfoKHR*>(structExtension), reinterpret_cast<VkExportFenceWin32HandleInfoKHR*>(structExtension_out));
+            deepcopy_VkExportFenceWin32HandleInfoKHR(alloc, rootType, reinterpret_cast<const VkExportFenceWin32HandleInfoKHR*>(structExtension), reinterpret_cast<VkExportFenceWin32HandleInfoKHR*>(structExtension_out));
             break;
         }
 #endif
-#ifdef VK_KHR_image_format_list
-        case VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO_KHR:
+#ifdef VK_KHR_performance_query
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_FEATURES_KHR:
         {
-            deepcopy_VkImageFormatListCreateInfoKHR(pool, reinterpret_cast<const VkImageFormatListCreateInfoKHR*>(structExtension), reinterpret_cast<VkImageFormatListCreateInfoKHR*>(structExtension_out));
+            deepcopy_VkPhysicalDevicePerformanceQueryFeaturesKHR(alloc, rootType, reinterpret_cast<const VkPhysicalDevicePerformanceQueryFeaturesKHR*>(structExtension), reinterpret_cast<VkPhysicalDevicePerformanceQueryFeaturesKHR*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_PROPERTIES_KHR:
+        {
+            deepcopy_VkPhysicalDevicePerformanceQueryPropertiesKHR(alloc, rootType, reinterpret_cast<const VkPhysicalDevicePerformanceQueryPropertiesKHR*>(structExtension), reinterpret_cast<VkPhysicalDevicePerformanceQueryPropertiesKHR*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_CREATE_INFO_KHR:
+        {
+            deepcopy_VkQueryPoolPerformanceCreateInfoKHR(alloc, rootType, reinterpret_cast<const VkQueryPoolPerformanceCreateInfoKHR*>(structExtension), reinterpret_cast<VkQueryPoolPerformanceCreateInfoKHR*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PERFORMANCE_QUERY_SUBMIT_INFO_KHR:
+        {
+            deepcopy_VkPerformanceQuerySubmitInfoKHR(alloc, rootType, reinterpret_cast<const VkPerformanceQuerySubmitInfoKHR*>(structExtension), reinterpret_cast<VkPerformanceQuerySubmitInfoKHR*>(structExtension_out));
             break;
         }
 #endif
-#ifdef VK_KHR_8bit_storage
-        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR:
+#ifdef VK_KHR_portability_subset
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PORTABILITY_SUBSET_FEATURES_KHR:
         {
-            deepcopy_VkPhysicalDevice8BitStorageFeaturesKHR(pool, reinterpret_cast<const VkPhysicalDevice8BitStorageFeaturesKHR*>(structExtension), reinterpret_cast<VkPhysicalDevice8BitStorageFeaturesKHR*>(structExtension_out));
+            deepcopy_VkPhysicalDevicePortabilitySubsetFeaturesKHR(alloc, rootType, reinterpret_cast<const VkPhysicalDevicePortabilitySubsetFeaturesKHR*>(structExtension), reinterpret_cast<VkPhysicalDevicePortabilitySubsetFeaturesKHR*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PORTABILITY_SUBSET_PROPERTIES_KHR:
+        {
+            deepcopy_VkPhysicalDevicePortabilitySubsetPropertiesKHR(alloc, rootType, reinterpret_cast<const VkPhysicalDevicePortabilitySubsetPropertiesKHR*>(structExtension), reinterpret_cast<VkPhysicalDevicePortabilitySubsetPropertiesKHR*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_KHR_shader_clock
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CLOCK_FEATURES_KHR:
+        {
+            deepcopy_VkPhysicalDeviceShaderClockFeaturesKHR(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceShaderClockFeaturesKHR*>(structExtension), reinterpret_cast<VkPhysicalDeviceShaderClockFeaturesKHR*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_KHR_shader_terminate_invocation
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES_KHR:
+        {
+            deepcopy_VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR*>(structExtension), reinterpret_cast<VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_KHR_fragment_shading_rate
+        case VK_STRUCTURE_TYPE_FRAGMENT_SHADING_RATE_ATTACHMENT_INFO_KHR:
+        {
+            deepcopy_VkFragmentShadingRateAttachmentInfoKHR(alloc, rootType, reinterpret_cast<const VkFragmentShadingRateAttachmentInfoKHR*>(structExtension), reinterpret_cast<VkFragmentShadingRateAttachmentInfoKHR*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_STATE_CREATE_INFO_KHR:
+        {
+            deepcopy_VkPipelineFragmentShadingRateStateCreateInfoKHR(alloc, rootType, reinterpret_cast<const VkPipelineFragmentShadingRateStateCreateInfoKHR*>(structExtension), reinterpret_cast<VkPipelineFragmentShadingRateStateCreateInfoKHR*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_FEATURES_KHR:
+        {
+            deepcopy_VkPhysicalDeviceFragmentShadingRateFeaturesKHR(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceFragmentShadingRateFeaturesKHR*>(structExtension), reinterpret_cast<VkPhysicalDeviceFragmentShadingRateFeaturesKHR*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_PROPERTIES_KHR:
+        {
+            deepcopy_VkPhysicalDeviceFragmentShadingRatePropertiesKHR(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceFragmentShadingRatePropertiesKHR*>(structExtension), reinterpret_cast<VkPhysicalDeviceFragmentShadingRatePropertiesKHR*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_KHR_surface_protected_capabilities
+        case VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR:
+        {
+            deepcopy_VkSurfaceProtectedCapabilitiesKHR(alloc, rootType, reinterpret_cast<const VkSurfaceProtectedCapabilitiesKHR*>(structExtension), reinterpret_cast<VkSurfaceProtectedCapabilitiesKHR*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_KHR_pipeline_executable_properties
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_EXECUTABLE_PROPERTIES_FEATURES_KHR:
+        {
+            deepcopy_VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR(alloc, rootType, reinterpret_cast<const VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR*>(structExtension), reinterpret_cast<VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_ANDROID_native_buffer
         case VK_STRUCTURE_TYPE_NATIVE_BUFFER_ANDROID:
         {
-            deepcopy_VkNativeBufferANDROID(pool, reinterpret_cast<const VkNativeBufferANDROID*>(structExtension), reinterpret_cast<VkNativeBufferANDROID*>(structExtension_out));
+            deepcopy_VkNativeBufferANDROID(alloc, rootType, reinterpret_cast<const VkNativeBufferANDROID*>(structExtension), reinterpret_cast<VkNativeBufferANDROID*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_EXT_debug_report
         case VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT:
         {
-            deepcopy_VkDebugReportCallbackCreateInfoEXT(pool, reinterpret_cast<const VkDebugReportCallbackCreateInfoEXT*>(structExtension), reinterpret_cast<VkDebugReportCallbackCreateInfoEXT*>(structExtension_out));
+            deepcopy_VkDebugReportCallbackCreateInfoEXT(alloc, rootType, reinterpret_cast<const VkDebugReportCallbackCreateInfoEXT*>(structExtension), reinterpret_cast<VkDebugReportCallbackCreateInfoEXT*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_AMD_rasterization_order
         case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_RASTERIZATION_ORDER_AMD:
         {
-            deepcopy_VkPipelineRasterizationStateRasterizationOrderAMD(pool, reinterpret_cast<const VkPipelineRasterizationStateRasterizationOrderAMD*>(structExtension), reinterpret_cast<VkPipelineRasterizationStateRasterizationOrderAMD*>(structExtension_out));
+            deepcopy_VkPipelineRasterizationStateRasterizationOrderAMD(alloc, rootType, reinterpret_cast<const VkPipelineRasterizationStateRasterizationOrderAMD*>(structExtension), reinterpret_cast<VkPipelineRasterizationStateRasterizationOrderAMD*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_NV_dedicated_allocation
         case VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_IMAGE_CREATE_INFO_NV:
         {
-            deepcopy_VkDedicatedAllocationImageCreateInfoNV(pool, reinterpret_cast<const VkDedicatedAllocationImageCreateInfoNV*>(structExtension), reinterpret_cast<VkDedicatedAllocationImageCreateInfoNV*>(structExtension_out));
+            deepcopy_VkDedicatedAllocationImageCreateInfoNV(alloc, rootType, reinterpret_cast<const VkDedicatedAllocationImageCreateInfoNV*>(structExtension), reinterpret_cast<VkDedicatedAllocationImageCreateInfoNV*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_BUFFER_CREATE_INFO_NV:
         {
-            deepcopy_VkDedicatedAllocationBufferCreateInfoNV(pool, reinterpret_cast<const VkDedicatedAllocationBufferCreateInfoNV*>(structExtension), reinterpret_cast<VkDedicatedAllocationBufferCreateInfoNV*>(structExtension_out));
+            deepcopy_VkDedicatedAllocationBufferCreateInfoNV(alloc, rootType, reinterpret_cast<const VkDedicatedAllocationBufferCreateInfoNV*>(structExtension), reinterpret_cast<VkDedicatedAllocationBufferCreateInfoNV*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_MEMORY_ALLOCATE_INFO_NV:
         {
-            deepcopy_VkDedicatedAllocationMemoryAllocateInfoNV(pool, reinterpret_cast<const VkDedicatedAllocationMemoryAllocateInfoNV*>(structExtension), reinterpret_cast<VkDedicatedAllocationMemoryAllocateInfoNV*>(structExtension_out));
+            deepcopy_VkDedicatedAllocationMemoryAllocateInfoNV(alloc, rootType, reinterpret_cast<const VkDedicatedAllocationMemoryAllocateInfoNV*>(structExtension), reinterpret_cast<VkDedicatedAllocationMemoryAllocateInfoNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_transform_feedback
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT:
+        {
+            deepcopy_VkPhysicalDeviceTransformFeedbackFeaturesEXT(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceTransformFeedbackFeaturesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceTransformFeedbackFeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT:
+        {
+            deepcopy_VkPhysicalDeviceTransformFeedbackPropertiesEXT(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceTransformFeedbackPropertiesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceTransformFeedbackPropertiesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_STREAM_CREATE_INFO_EXT:
+        {
+            deepcopy_VkPipelineRasterizationStateStreamCreateInfoEXT(alloc, rootType, reinterpret_cast<const VkPipelineRasterizationStateStreamCreateInfoEXT*>(structExtension), reinterpret_cast<VkPipelineRasterizationStateStreamCreateInfoEXT*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_AMD_texture_gather_bias_lod
         case VK_STRUCTURE_TYPE_TEXTURE_LOD_GATHER_FORMAT_PROPERTIES_AMD:
         {
-            deepcopy_VkTextureLODGatherFormatPropertiesAMD(pool, reinterpret_cast<const VkTextureLODGatherFormatPropertiesAMD*>(structExtension), reinterpret_cast<VkTextureLODGatherFormatPropertiesAMD*>(structExtension_out));
+            deepcopy_VkTextureLODGatherFormatPropertiesAMD(alloc, rootType, reinterpret_cast<const VkTextureLODGatherFormatPropertiesAMD*>(structExtension), reinterpret_cast<VkTextureLODGatherFormatPropertiesAMD*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_corner_sampled_image
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CORNER_SAMPLED_IMAGE_FEATURES_NV:
+        {
+            deepcopy_VkPhysicalDeviceCornerSampledImageFeaturesNV(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceCornerSampledImageFeaturesNV*>(structExtension), reinterpret_cast<VkPhysicalDeviceCornerSampledImageFeaturesNV*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_NV_external_memory
         case VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_NV:
         {
-            deepcopy_VkExternalMemoryImageCreateInfoNV(pool, reinterpret_cast<const VkExternalMemoryImageCreateInfoNV*>(structExtension), reinterpret_cast<VkExternalMemoryImageCreateInfoNV*>(structExtension_out));
+            deepcopy_VkExternalMemoryImageCreateInfoNV(alloc, rootType, reinterpret_cast<const VkExternalMemoryImageCreateInfoNV*>(structExtension), reinterpret_cast<VkExternalMemoryImageCreateInfoNV*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_NV:
         {
-            deepcopy_VkExportMemoryAllocateInfoNV(pool, reinterpret_cast<const VkExportMemoryAllocateInfoNV*>(structExtension), reinterpret_cast<VkExportMemoryAllocateInfoNV*>(structExtension_out));
+            deepcopy_VkExportMemoryAllocateInfoNV(alloc, rootType, reinterpret_cast<const VkExportMemoryAllocateInfoNV*>(structExtension), reinterpret_cast<VkExportMemoryAllocateInfoNV*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_NV_external_memory_win32
         case VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_NV:
         {
-            deepcopy_VkImportMemoryWin32HandleInfoNV(pool, reinterpret_cast<const VkImportMemoryWin32HandleInfoNV*>(structExtension), reinterpret_cast<VkImportMemoryWin32HandleInfoNV*>(structExtension_out));
+            deepcopy_VkImportMemoryWin32HandleInfoNV(alloc, rootType, reinterpret_cast<const VkImportMemoryWin32HandleInfoNV*>(structExtension), reinterpret_cast<VkImportMemoryWin32HandleInfoNV*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_NV:
         {
-            deepcopy_VkExportMemoryWin32HandleInfoNV(pool, reinterpret_cast<const VkExportMemoryWin32HandleInfoNV*>(structExtension), reinterpret_cast<VkExportMemoryWin32HandleInfoNV*>(structExtension_out));
+            deepcopy_VkExportMemoryWin32HandleInfoNV(alloc, rootType, reinterpret_cast<const VkExportMemoryWin32HandleInfoNV*>(structExtension), reinterpret_cast<VkExportMemoryWin32HandleInfoNV*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_NV_win32_keyed_mutex
         case VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_NV:
         {
-            deepcopy_VkWin32KeyedMutexAcquireReleaseInfoNV(pool, reinterpret_cast<const VkWin32KeyedMutexAcquireReleaseInfoNV*>(structExtension), reinterpret_cast<VkWin32KeyedMutexAcquireReleaseInfoNV*>(structExtension_out));
+            deepcopy_VkWin32KeyedMutexAcquireReleaseInfoNV(alloc, rootType, reinterpret_cast<const VkWin32KeyedMutexAcquireReleaseInfoNV*>(structExtension), reinterpret_cast<VkWin32KeyedMutexAcquireReleaseInfoNV*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_EXT_validation_flags
         case VK_STRUCTURE_TYPE_VALIDATION_FLAGS_EXT:
         {
-            deepcopy_VkValidationFlagsEXT(pool, reinterpret_cast<const VkValidationFlagsEXT*>(structExtension), reinterpret_cast<VkValidationFlagsEXT*>(structExtension_out));
+            deepcopy_VkValidationFlagsEXT(alloc, rootType, reinterpret_cast<const VkValidationFlagsEXT*>(structExtension), reinterpret_cast<VkValidationFlagsEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_texture_compression_astc_hdr
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES_EXT:
+        {
+            deepcopy_VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_astc_decode_mode
+        case VK_STRUCTURE_TYPE_IMAGE_VIEW_ASTC_DECODE_MODE_EXT:
+        {
+            deepcopy_VkImageViewASTCDecodeModeEXT(alloc, rootType, reinterpret_cast<const VkImageViewASTCDecodeModeEXT*>(structExtension), reinterpret_cast<VkImageViewASTCDecodeModeEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ASTC_DECODE_FEATURES_EXT:
+        {
+            deepcopy_VkPhysicalDeviceASTCDecodeFeaturesEXT(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceASTCDecodeFeaturesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceASTCDecodeFeaturesEXT*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_EXT_conditional_rendering
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT:
         {
-            deepcopy_VkPhysicalDeviceConditionalRenderingFeaturesEXT(pool, reinterpret_cast<const VkPhysicalDeviceConditionalRenderingFeaturesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceConditionalRenderingFeaturesEXT*>(structExtension_out));
+            deepcopy_VkPhysicalDeviceConditionalRenderingFeaturesEXT(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceConditionalRenderingFeaturesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceConditionalRenderingFeaturesEXT*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_CONDITIONAL_RENDERING_INFO_EXT:
         {
-            deepcopy_VkCommandBufferInheritanceConditionalRenderingInfoEXT(pool, reinterpret_cast<const VkCommandBufferInheritanceConditionalRenderingInfoEXT*>(structExtension), reinterpret_cast<VkCommandBufferInheritanceConditionalRenderingInfoEXT*>(structExtension_out));
+            deepcopy_VkCommandBufferInheritanceConditionalRenderingInfoEXT(alloc, rootType, reinterpret_cast<const VkCommandBufferInheritanceConditionalRenderingInfoEXT*>(structExtension), reinterpret_cast<VkCommandBufferInheritanceConditionalRenderingInfoEXT*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_NV_clip_space_w_scaling
         case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_W_SCALING_STATE_CREATE_INFO_NV:
         {
-            deepcopy_VkPipelineViewportWScalingStateCreateInfoNV(pool, reinterpret_cast<const VkPipelineViewportWScalingStateCreateInfoNV*>(structExtension), reinterpret_cast<VkPipelineViewportWScalingStateCreateInfoNV*>(structExtension_out));
+            deepcopy_VkPipelineViewportWScalingStateCreateInfoNV(alloc, rootType, reinterpret_cast<const VkPipelineViewportWScalingStateCreateInfoNV*>(structExtension), reinterpret_cast<VkPipelineViewportWScalingStateCreateInfoNV*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_EXT_display_control
         case VK_STRUCTURE_TYPE_SWAPCHAIN_COUNTER_CREATE_INFO_EXT:
         {
-            deepcopy_VkSwapchainCounterCreateInfoEXT(pool, reinterpret_cast<const VkSwapchainCounterCreateInfoEXT*>(structExtension), reinterpret_cast<VkSwapchainCounterCreateInfoEXT*>(structExtension_out));
+            deepcopy_VkSwapchainCounterCreateInfoEXT(alloc, rootType, reinterpret_cast<const VkSwapchainCounterCreateInfoEXT*>(structExtension), reinterpret_cast<VkSwapchainCounterCreateInfoEXT*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_GOOGLE_display_timing
         case VK_STRUCTURE_TYPE_PRESENT_TIMES_INFO_GOOGLE:
         {
-            deepcopy_VkPresentTimesInfoGOOGLE(pool, reinterpret_cast<const VkPresentTimesInfoGOOGLE*>(structExtension), reinterpret_cast<VkPresentTimesInfoGOOGLE*>(structExtension_out));
+            deepcopy_VkPresentTimesInfoGOOGLE(alloc, rootType, reinterpret_cast<const VkPresentTimesInfoGOOGLE*>(structExtension), reinterpret_cast<VkPresentTimesInfoGOOGLE*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_NVX_multiview_per_view_attributes
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PER_VIEW_ATTRIBUTES_PROPERTIES_NVX:
         {
-            deepcopy_VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX(pool, reinterpret_cast<const VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX*>(structExtension), reinterpret_cast<VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX*>(structExtension_out));
+            deepcopy_VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX*>(structExtension), reinterpret_cast<VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_NV_viewport_swizzle
         case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SWIZZLE_STATE_CREATE_INFO_NV:
         {
-            deepcopy_VkPipelineViewportSwizzleStateCreateInfoNV(pool, reinterpret_cast<const VkPipelineViewportSwizzleStateCreateInfoNV*>(structExtension), reinterpret_cast<VkPipelineViewportSwizzleStateCreateInfoNV*>(structExtension_out));
+            deepcopy_VkPipelineViewportSwizzleStateCreateInfoNV(alloc, rootType, reinterpret_cast<const VkPipelineViewportSwizzleStateCreateInfoNV*>(structExtension), reinterpret_cast<VkPipelineViewportSwizzleStateCreateInfoNV*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_EXT_discard_rectangles
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DISCARD_RECTANGLE_PROPERTIES_EXT:
         {
-            deepcopy_VkPhysicalDeviceDiscardRectanglePropertiesEXT(pool, reinterpret_cast<const VkPhysicalDeviceDiscardRectanglePropertiesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceDiscardRectanglePropertiesEXT*>(structExtension_out));
+            deepcopy_VkPhysicalDeviceDiscardRectanglePropertiesEXT(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceDiscardRectanglePropertiesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceDiscardRectanglePropertiesEXT*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT:
         {
-            deepcopy_VkPipelineDiscardRectangleStateCreateInfoEXT(pool, reinterpret_cast<const VkPipelineDiscardRectangleStateCreateInfoEXT*>(structExtension), reinterpret_cast<VkPipelineDiscardRectangleStateCreateInfoEXT*>(structExtension_out));
+            deepcopy_VkPipelineDiscardRectangleStateCreateInfoEXT(alloc, rootType, reinterpret_cast<const VkPipelineDiscardRectangleStateCreateInfoEXT*>(structExtension), reinterpret_cast<VkPipelineDiscardRectangleStateCreateInfoEXT*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_EXT_conservative_rasterization
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONSERVATIVE_RASTERIZATION_PROPERTIES_EXT:
         {
-            deepcopy_VkPhysicalDeviceConservativeRasterizationPropertiesEXT(pool, reinterpret_cast<const VkPhysicalDeviceConservativeRasterizationPropertiesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceConservativeRasterizationPropertiesEXT*>(structExtension_out));
+            deepcopy_VkPhysicalDeviceConservativeRasterizationPropertiesEXT(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceConservativeRasterizationPropertiesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceConservativeRasterizationPropertiesEXT*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_CONSERVATIVE_STATE_CREATE_INFO_EXT:
         {
-            deepcopy_VkPipelineRasterizationConservativeStateCreateInfoEXT(pool, reinterpret_cast<const VkPipelineRasterizationConservativeStateCreateInfoEXT*>(structExtension), reinterpret_cast<VkPipelineRasterizationConservativeStateCreateInfoEXT*>(structExtension_out));
+            deepcopy_VkPipelineRasterizationConservativeStateCreateInfoEXT(alloc, rootType, reinterpret_cast<const VkPipelineRasterizationConservativeStateCreateInfoEXT*>(structExtension), reinterpret_cast<VkPipelineRasterizationConservativeStateCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_depth_clip_enable
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_ENABLE_FEATURES_EXT:
+        {
+            deepcopy_VkPhysicalDeviceDepthClipEnableFeaturesEXT(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceDepthClipEnableFeaturesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceDepthClipEnableFeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT:
+        {
+            deepcopy_VkPipelineRasterizationDepthClipStateCreateInfoEXT(alloc, rootType, reinterpret_cast<const VkPipelineRasterizationDepthClipStateCreateInfoEXT*>(structExtension), reinterpret_cast<VkPipelineRasterizationDepthClipStateCreateInfoEXT*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_EXT_debug_utils
         case VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT:
         {
-            deepcopy_VkDebugUtilsMessengerCreateInfoEXT(pool, reinterpret_cast<const VkDebugUtilsMessengerCreateInfoEXT*>(structExtension), reinterpret_cast<VkDebugUtilsMessengerCreateInfoEXT*>(structExtension_out));
+            deepcopy_VkDebugUtilsMessengerCreateInfoEXT(alloc, rootType, reinterpret_cast<const VkDebugUtilsMessengerCreateInfoEXT*>(structExtension), reinterpret_cast<VkDebugUtilsMessengerCreateInfoEXT*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_ANDROID_external_memory_android_hardware_buffer
         case VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_USAGE_ANDROID:
         {
-            deepcopy_VkAndroidHardwareBufferUsageANDROID(pool, reinterpret_cast<const VkAndroidHardwareBufferUsageANDROID*>(structExtension), reinterpret_cast<VkAndroidHardwareBufferUsageANDROID*>(structExtension_out));
+            deepcopy_VkAndroidHardwareBufferUsageANDROID(alloc, rootType, reinterpret_cast<const VkAndroidHardwareBufferUsageANDROID*>(structExtension), reinterpret_cast<VkAndroidHardwareBufferUsageANDROID*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID:
         {
-            deepcopy_VkAndroidHardwareBufferFormatPropertiesANDROID(pool, reinterpret_cast<const VkAndroidHardwareBufferFormatPropertiesANDROID*>(structExtension), reinterpret_cast<VkAndroidHardwareBufferFormatPropertiesANDROID*>(structExtension_out));
+            deepcopy_VkAndroidHardwareBufferFormatPropertiesANDROID(alloc, rootType, reinterpret_cast<const VkAndroidHardwareBufferFormatPropertiesANDROID*>(structExtension), reinterpret_cast<VkAndroidHardwareBufferFormatPropertiesANDROID*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID:
         {
-            deepcopy_VkImportAndroidHardwareBufferInfoANDROID(pool, reinterpret_cast<const VkImportAndroidHardwareBufferInfoANDROID*>(structExtension), reinterpret_cast<VkImportAndroidHardwareBufferInfoANDROID*>(structExtension_out));
+            deepcopy_VkImportAndroidHardwareBufferInfoANDROID(alloc, rootType, reinterpret_cast<const VkImportAndroidHardwareBufferInfoANDROID*>(structExtension), reinterpret_cast<VkImportAndroidHardwareBufferInfoANDROID*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID:
         {
-            deepcopy_VkExternalFormatANDROID(pool, reinterpret_cast<const VkExternalFormatANDROID*>(structExtension), reinterpret_cast<VkExternalFormatANDROID*>(structExtension_out));
+            deepcopy_VkExternalFormatANDROID(alloc, rootType, reinterpret_cast<const VkExternalFormatANDROID*>(structExtension), reinterpret_cast<VkExternalFormatANDROID*>(structExtension_out));
             break;
         }
 #endif
-#ifdef VK_EXT_sampler_filter_minmax
-        case VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO_EXT:
+#ifdef VK_EXT_inline_uniform_block
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES_EXT:
         {
-            deepcopy_VkSamplerReductionModeCreateInfoEXT(pool, reinterpret_cast<const VkSamplerReductionModeCreateInfoEXT*>(structExtension), reinterpret_cast<VkSamplerReductionModeCreateInfoEXT*>(structExtension_out));
+            deepcopy_VkPhysicalDeviceInlineUniformBlockFeaturesEXT(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceInlineUniformBlockFeaturesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceInlineUniformBlockFeaturesEXT*>(structExtension_out));
             break;
         }
-        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES_EXT:
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES_EXT:
         {
-            deepcopy_VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT(pool, reinterpret_cast<const VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT*>(structExtension_out));
+            deepcopy_VkPhysicalDeviceInlineUniformBlockPropertiesEXT(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceInlineUniformBlockPropertiesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceInlineUniformBlockPropertiesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT:
+        {
+            deepcopy_VkWriteDescriptorSetInlineUniformBlockEXT(alloc, rootType, reinterpret_cast<const VkWriteDescriptorSetInlineUniformBlockEXT*>(structExtension), reinterpret_cast<VkWriteDescriptorSetInlineUniformBlockEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO_EXT:
+        {
+            deepcopy_VkDescriptorPoolInlineUniformBlockCreateInfoEXT(alloc, rootType, reinterpret_cast<const VkDescriptorPoolInlineUniformBlockCreateInfoEXT*>(structExtension), reinterpret_cast<VkDescriptorPoolInlineUniformBlockCreateInfoEXT*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_EXT_sample_locations
         case VK_STRUCTURE_TYPE_SAMPLE_LOCATIONS_INFO_EXT:
         {
-            deepcopy_VkSampleLocationsInfoEXT(pool, reinterpret_cast<const VkSampleLocationsInfoEXT*>(structExtension), reinterpret_cast<VkSampleLocationsInfoEXT*>(structExtension_out));
+            deepcopy_VkSampleLocationsInfoEXT(alloc, rootType, reinterpret_cast<const VkSampleLocationsInfoEXT*>(structExtension), reinterpret_cast<VkSampleLocationsInfoEXT*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_RENDER_PASS_SAMPLE_LOCATIONS_BEGIN_INFO_EXT:
         {
-            deepcopy_VkRenderPassSampleLocationsBeginInfoEXT(pool, reinterpret_cast<const VkRenderPassSampleLocationsBeginInfoEXT*>(structExtension), reinterpret_cast<VkRenderPassSampleLocationsBeginInfoEXT*>(structExtension_out));
+            deepcopy_VkRenderPassSampleLocationsBeginInfoEXT(alloc, rootType, reinterpret_cast<const VkRenderPassSampleLocationsBeginInfoEXT*>(structExtension), reinterpret_cast<VkRenderPassSampleLocationsBeginInfoEXT*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT:
         {
-            deepcopy_VkPipelineSampleLocationsStateCreateInfoEXT(pool, reinterpret_cast<const VkPipelineSampleLocationsStateCreateInfoEXT*>(structExtension), reinterpret_cast<VkPipelineSampleLocationsStateCreateInfoEXT*>(structExtension_out));
+            deepcopy_VkPipelineSampleLocationsStateCreateInfoEXT(alloc, rootType, reinterpret_cast<const VkPipelineSampleLocationsStateCreateInfoEXT*>(structExtension), reinterpret_cast<VkPipelineSampleLocationsStateCreateInfoEXT*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT:
         {
-            deepcopy_VkPhysicalDeviceSampleLocationsPropertiesEXT(pool, reinterpret_cast<const VkPhysicalDeviceSampleLocationsPropertiesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceSampleLocationsPropertiesEXT*>(structExtension_out));
+            deepcopy_VkPhysicalDeviceSampleLocationsPropertiesEXT(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceSampleLocationsPropertiesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceSampleLocationsPropertiesEXT*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_EXT_blend_operation_advanced
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT:
         {
-            deepcopy_VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT(pool, reinterpret_cast<const VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*>(structExtension_out));
+            deepcopy_VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_PROPERTIES_EXT:
         {
-            deepcopy_VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT(pool, reinterpret_cast<const VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT*>(structExtension_out));
+            deepcopy_VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_ADVANCED_STATE_CREATE_INFO_EXT:
         {
-            deepcopy_VkPipelineColorBlendAdvancedStateCreateInfoEXT(pool, reinterpret_cast<const VkPipelineColorBlendAdvancedStateCreateInfoEXT*>(structExtension), reinterpret_cast<VkPipelineColorBlendAdvancedStateCreateInfoEXT*>(structExtension_out));
+            deepcopy_VkPipelineColorBlendAdvancedStateCreateInfoEXT(alloc, rootType, reinterpret_cast<const VkPipelineColorBlendAdvancedStateCreateInfoEXT*>(structExtension), reinterpret_cast<VkPipelineColorBlendAdvancedStateCreateInfoEXT*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_NV_fragment_coverage_to_color
         case VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_TO_COLOR_STATE_CREATE_INFO_NV:
         {
-            deepcopy_VkPipelineCoverageToColorStateCreateInfoNV(pool, reinterpret_cast<const VkPipelineCoverageToColorStateCreateInfoNV*>(structExtension), reinterpret_cast<VkPipelineCoverageToColorStateCreateInfoNV*>(structExtension_out));
+            deepcopy_VkPipelineCoverageToColorStateCreateInfoNV(alloc, rootType, reinterpret_cast<const VkPipelineCoverageToColorStateCreateInfoNV*>(structExtension), reinterpret_cast<VkPipelineCoverageToColorStateCreateInfoNV*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_NV_framebuffer_mixed_samples
         case VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_MODULATION_STATE_CREATE_INFO_NV:
         {
-            deepcopy_VkPipelineCoverageModulationStateCreateInfoNV(pool, reinterpret_cast<const VkPipelineCoverageModulationStateCreateInfoNV*>(structExtension), reinterpret_cast<VkPipelineCoverageModulationStateCreateInfoNV*>(structExtension_out));
+            deepcopy_VkPipelineCoverageModulationStateCreateInfoNV(alloc, rootType, reinterpret_cast<const VkPipelineCoverageModulationStateCreateInfoNV*>(structExtension), reinterpret_cast<VkPipelineCoverageModulationStateCreateInfoNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_shader_sm_builtins
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_PROPERTIES_NV:
+        {
+            deepcopy_VkPhysicalDeviceShaderSMBuiltinsPropertiesNV(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceShaderSMBuiltinsPropertiesNV*>(structExtension), reinterpret_cast<VkPhysicalDeviceShaderSMBuiltinsPropertiesNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_FEATURES_NV:
+        {
+            deepcopy_VkPhysicalDeviceShaderSMBuiltinsFeaturesNV(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceShaderSMBuiltinsFeaturesNV*>(structExtension), reinterpret_cast<VkPhysicalDeviceShaderSMBuiltinsFeaturesNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_image_drm_format_modifier
+        case VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT:
+        {
+            deepcopy_VkDrmFormatModifierPropertiesListEXT(alloc, rootType, reinterpret_cast<const VkDrmFormatModifierPropertiesListEXT*>(structExtension), reinterpret_cast<VkDrmFormatModifierPropertiesListEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT:
+        {
+            deepcopy_VkPhysicalDeviceImageDrmFormatModifierInfoEXT(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceImageDrmFormatModifierInfoEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceImageDrmFormatModifierInfoEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT:
+        {
+            deepcopy_VkImageDrmFormatModifierListCreateInfoEXT(alloc, rootType, reinterpret_cast<const VkImageDrmFormatModifierListCreateInfoEXT*>(structExtension), reinterpret_cast<VkImageDrmFormatModifierListCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT:
+        {
+            deepcopy_VkImageDrmFormatModifierExplicitCreateInfoEXT(alloc, rootType, reinterpret_cast<const VkImageDrmFormatModifierExplicitCreateInfoEXT*>(structExtension), reinterpret_cast<VkImageDrmFormatModifierExplicitCreateInfoEXT*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_EXT_validation_cache
         case VK_STRUCTURE_TYPE_SHADER_MODULE_VALIDATION_CACHE_CREATE_INFO_EXT:
         {
-            deepcopy_VkShaderModuleValidationCacheCreateInfoEXT(pool, reinterpret_cast<const VkShaderModuleValidationCacheCreateInfoEXT*>(structExtension), reinterpret_cast<VkShaderModuleValidationCacheCreateInfoEXT*>(structExtension_out));
+            deepcopy_VkShaderModuleValidationCacheCreateInfoEXT(alloc, rootType, reinterpret_cast<const VkShaderModuleValidationCacheCreateInfoEXT*>(structExtension), reinterpret_cast<VkShaderModuleValidationCacheCreateInfoEXT*>(structExtension_out));
             break;
         }
 #endif
-#ifdef VK_EXT_descriptor_indexing
-        case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT:
+#ifdef VK_NV_shading_rate_image
+        case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SHADING_RATE_IMAGE_STATE_CREATE_INFO_NV:
         {
-            deepcopy_VkDescriptorSetLayoutBindingFlagsCreateInfoEXT(pool, reinterpret_cast<const VkDescriptorSetLayoutBindingFlagsCreateInfoEXT*>(structExtension), reinterpret_cast<VkDescriptorSetLayoutBindingFlagsCreateInfoEXT*>(structExtension_out));
+            deepcopy_VkPipelineViewportShadingRateImageStateCreateInfoNV(alloc, rootType, reinterpret_cast<const VkPipelineViewportShadingRateImageStateCreateInfoNV*>(structExtension), reinterpret_cast<VkPipelineViewportShadingRateImageStateCreateInfoNV*>(structExtension_out));
             break;
         }
-        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT:
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_FEATURES_NV:
         {
-            deepcopy_VkPhysicalDeviceDescriptorIndexingFeaturesEXT(pool, reinterpret_cast<const VkPhysicalDeviceDescriptorIndexingFeaturesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceDescriptorIndexingFeaturesEXT*>(structExtension_out));
+            deepcopy_VkPhysicalDeviceShadingRateImageFeaturesNV(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceShadingRateImageFeaturesNV*>(structExtension), reinterpret_cast<VkPhysicalDeviceShadingRateImageFeaturesNV*>(structExtension_out));
             break;
         }
-        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES_EXT:
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_PROPERTIES_NV:
         {
-            deepcopy_VkPhysicalDeviceDescriptorIndexingPropertiesEXT(pool, reinterpret_cast<const VkPhysicalDeviceDescriptorIndexingPropertiesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceDescriptorIndexingPropertiesEXT*>(structExtension_out));
+            deepcopy_VkPhysicalDeviceShadingRateImagePropertiesNV(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceShadingRateImagePropertiesNV*>(structExtension), reinterpret_cast<VkPhysicalDeviceShadingRateImagePropertiesNV*>(structExtension_out));
             break;
         }
-        case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO_EXT:
+        case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_COARSE_SAMPLE_ORDER_STATE_CREATE_INFO_NV:
         {
-            deepcopy_VkDescriptorSetVariableDescriptorCountAllocateInfoEXT(pool, reinterpret_cast<const VkDescriptorSetVariableDescriptorCountAllocateInfoEXT*>(structExtension), reinterpret_cast<VkDescriptorSetVariableDescriptorCountAllocateInfoEXT*>(structExtension_out));
+            deepcopy_VkPipelineViewportCoarseSampleOrderStateCreateInfoNV(alloc, rootType, reinterpret_cast<const VkPipelineViewportCoarseSampleOrderStateCreateInfoNV*>(structExtension), reinterpret_cast<VkPipelineViewportCoarseSampleOrderStateCreateInfoNV*>(structExtension_out));
             break;
         }
-        case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT_EXT:
+#endif
+#ifdef VK_NV_ray_tracing
+        case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_NV:
         {
-            deepcopy_VkDescriptorSetVariableDescriptorCountLayoutSupportEXT(pool, reinterpret_cast<const VkDescriptorSetVariableDescriptorCountLayoutSupportEXT*>(structExtension), reinterpret_cast<VkDescriptorSetVariableDescriptorCountLayoutSupportEXT*>(structExtension_out));
+            deepcopy_VkWriteDescriptorSetAccelerationStructureNV(alloc, rootType, reinterpret_cast<const VkWriteDescriptorSetAccelerationStructureNV*>(structExtension), reinterpret_cast<VkWriteDescriptorSetAccelerationStructureNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_NV:
+        {
+            deepcopy_VkPhysicalDeviceRayTracingPropertiesNV(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceRayTracingPropertiesNV*>(structExtension), reinterpret_cast<VkPhysicalDeviceRayTracingPropertiesNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_representative_fragment_test
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_REPRESENTATIVE_FRAGMENT_TEST_FEATURES_NV:
+        {
+            deepcopy_VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV*>(structExtension), reinterpret_cast<VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_REPRESENTATIVE_FRAGMENT_TEST_STATE_CREATE_INFO_NV:
+        {
+            deepcopy_VkPipelineRepresentativeFragmentTestStateCreateInfoNV(alloc, rootType, reinterpret_cast<const VkPipelineRepresentativeFragmentTestStateCreateInfoNV*>(structExtension), reinterpret_cast<VkPipelineRepresentativeFragmentTestStateCreateInfoNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_filter_cubic
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_VIEW_IMAGE_FORMAT_INFO_EXT:
+        {
+            deepcopy_VkPhysicalDeviceImageViewImageFormatInfoEXT(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceImageViewImageFormatInfoEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceImageViewImageFormatInfoEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_FILTER_CUBIC_IMAGE_VIEW_IMAGE_FORMAT_PROPERTIES_EXT:
+        {
+            deepcopy_VkFilterCubicImageViewImageFormatPropertiesEXT(alloc, rootType, reinterpret_cast<const VkFilterCubicImageViewImageFormatPropertiesEXT*>(structExtension), reinterpret_cast<VkFilterCubicImageViewImageFormatPropertiesEXT*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_EXT_global_priority
         case VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT:
         {
-            deepcopy_VkDeviceQueueGlobalPriorityCreateInfoEXT(pool, reinterpret_cast<const VkDeviceQueueGlobalPriorityCreateInfoEXT*>(structExtension), reinterpret_cast<VkDeviceQueueGlobalPriorityCreateInfoEXT*>(structExtension_out));
+            deepcopy_VkDeviceQueueGlobalPriorityCreateInfoEXT(alloc, rootType, reinterpret_cast<const VkDeviceQueueGlobalPriorityCreateInfoEXT*>(structExtension), reinterpret_cast<VkDeviceQueueGlobalPriorityCreateInfoEXT*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_EXT_external_memory_host
         case VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT:
         {
-            deepcopy_VkImportMemoryHostPointerInfoEXT(pool, reinterpret_cast<const VkImportMemoryHostPointerInfoEXT*>(structExtension), reinterpret_cast<VkImportMemoryHostPointerInfoEXT*>(structExtension_out));
+            deepcopy_VkImportMemoryHostPointerInfoEXT(alloc, rootType, reinterpret_cast<const VkImportMemoryHostPointerInfoEXT*>(structExtension), reinterpret_cast<VkImportMemoryHostPointerInfoEXT*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT:
         {
-            deepcopy_VkPhysicalDeviceExternalMemoryHostPropertiesEXT(pool, reinterpret_cast<const VkPhysicalDeviceExternalMemoryHostPropertiesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceExternalMemoryHostPropertiesEXT*>(structExtension_out));
+            deepcopy_VkPhysicalDeviceExternalMemoryHostPropertiesEXT(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceExternalMemoryHostPropertiesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceExternalMemoryHostPropertiesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_AMD_pipeline_compiler_control
+        case VK_STRUCTURE_TYPE_PIPELINE_COMPILER_CONTROL_CREATE_INFO_AMD:
+        {
+            deepcopy_VkPipelineCompilerControlCreateInfoAMD(alloc, rootType, reinterpret_cast<const VkPipelineCompilerControlCreateInfoAMD*>(structExtension), reinterpret_cast<VkPipelineCompilerControlCreateInfoAMD*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_AMD_shader_core_properties
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_AMD:
         {
-            deepcopy_VkPhysicalDeviceShaderCorePropertiesAMD(pool, reinterpret_cast<const VkPhysicalDeviceShaderCorePropertiesAMD*>(structExtension), reinterpret_cast<VkPhysicalDeviceShaderCorePropertiesAMD*>(structExtension_out));
+            deepcopy_VkPhysicalDeviceShaderCorePropertiesAMD(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceShaderCorePropertiesAMD*>(structExtension), reinterpret_cast<VkPhysicalDeviceShaderCorePropertiesAMD*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_AMD_memory_overallocation_behavior
+        case VK_STRUCTURE_TYPE_DEVICE_MEMORY_OVERALLOCATION_CREATE_INFO_AMD:
+        {
+            deepcopy_VkDeviceMemoryOverallocationCreateInfoAMD(alloc, rootType, reinterpret_cast<const VkDeviceMemoryOverallocationCreateInfoAMD*>(structExtension), reinterpret_cast<VkDeviceMemoryOverallocationCreateInfoAMD*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_EXT_vertex_attribute_divisor
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT:
         {
-            deepcopy_VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT(pool, reinterpret_cast<const VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT*>(structExtension_out));
+            deepcopy_VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT:
         {
-            deepcopy_VkPipelineVertexInputDivisorStateCreateInfoEXT(pool, reinterpret_cast<const VkPipelineVertexInputDivisorStateCreateInfoEXT*>(structExtension), reinterpret_cast<VkPipelineVertexInputDivisorStateCreateInfoEXT*>(structExtension_out));
+            deepcopy_VkPipelineVertexInputDivisorStateCreateInfoEXT(alloc, rootType, reinterpret_cast<const VkPipelineVertexInputDivisorStateCreateInfoEXT*>(structExtension), reinterpret_cast<VkPipelineVertexInputDivisorStateCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT:
+        {
+            deepcopy_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_GGP_frame_token
+        case VK_STRUCTURE_TYPE_PRESENT_FRAME_TOKEN_GGP:
+        {
+            deepcopy_VkPresentFrameTokenGGP(alloc, rootType, reinterpret_cast<const VkPresentFrameTokenGGP*>(structExtension), reinterpret_cast<VkPresentFrameTokenGGP*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_pipeline_creation_feedback
+        case VK_STRUCTURE_TYPE_PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT:
+        {
+            deepcopy_VkPipelineCreationFeedbackCreateInfoEXT(alloc, rootType, reinterpret_cast<const VkPipelineCreationFeedbackCreateInfoEXT*>(structExtension), reinterpret_cast<VkPipelineCreationFeedbackCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_compute_shader_derivatives
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_FEATURES_NV:
+        {
+            deepcopy_VkPhysicalDeviceComputeShaderDerivativesFeaturesNV(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceComputeShaderDerivativesFeaturesNV*>(structExtension), reinterpret_cast<VkPhysicalDeviceComputeShaderDerivativesFeaturesNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_mesh_shader
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_FEATURES_NV:
+        {
+            deepcopy_VkPhysicalDeviceMeshShaderFeaturesNV(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceMeshShaderFeaturesNV*>(structExtension), reinterpret_cast<VkPhysicalDeviceMeshShaderFeaturesNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_PROPERTIES_NV:
+        {
+            deepcopy_VkPhysicalDeviceMeshShaderPropertiesNV(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceMeshShaderPropertiesNV*>(structExtension), reinterpret_cast<VkPhysicalDeviceMeshShaderPropertiesNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_fragment_shader_barycentric
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_NV:
+        {
+            deepcopy_VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV*>(structExtension), reinterpret_cast<VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_shader_image_footprint
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_FOOTPRINT_FEATURES_NV:
+        {
+            deepcopy_VkPhysicalDeviceShaderImageFootprintFeaturesNV(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceShaderImageFootprintFeaturesNV*>(structExtension), reinterpret_cast<VkPhysicalDeviceShaderImageFootprintFeaturesNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_scissor_exclusive
+        case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_EXCLUSIVE_SCISSOR_STATE_CREATE_INFO_NV:
+        {
+            deepcopy_VkPipelineViewportExclusiveScissorStateCreateInfoNV(alloc, rootType, reinterpret_cast<const VkPipelineViewportExclusiveScissorStateCreateInfoNV*>(structExtension), reinterpret_cast<VkPipelineViewportExclusiveScissorStateCreateInfoNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXCLUSIVE_SCISSOR_FEATURES_NV:
+        {
+            deepcopy_VkPhysicalDeviceExclusiveScissorFeaturesNV(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceExclusiveScissorFeaturesNV*>(structExtension), reinterpret_cast<VkPhysicalDeviceExclusiveScissorFeaturesNV*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_NV_device_diagnostic_checkpoints
         case VK_STRUCTURE_TYPE_QUEUE_FAMILY_CHECKPOINT_PROPERTIES_NV:
         {
-            deepcopy_VkQueueFamilyCheckpointPropertiesNV(pool, reinterpret_cast<const VkQueueFamilyCheckpointPropertiesNV*>(structExtension), reinterpret_cast<VkQueueFamilyCheckpointPropertiesNV*>(structExtension_out));
+            deepcopy_VkQueueFamilyCheckpointPropertiesNV(alloc, rootType, reinterpret_cast<const VkQueueFamilyCheckpointPropertiesNV*>(structExtension), reinterpret_cast<VkQueueFamilyCheckpointPropertiesNV*>(structExtension_out));
             break;
         }
 #endif
-#ifdef VK_GOOGLE_color_buffer
+#ifdef VK_INTEL_shader_integer_functions2
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_FUNCTIONS_2_FEATURES_INTEL:
+        {
+            deepcopy_VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL*>(structExtension), reinterpret_cast<VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_INTEL_performance_query
+        case VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_QUERY_CREATE_INFO_INTEL:
+        {
+            deepcopy_VkQueryPoolPerformanceQueryCreateInfoINTEL(alloc, rootType, reinterpret_cast<const VkQueryPoolPerformanceQueryCreateInfoINTEL*>(structExtension), reinterpret_cast<VkQueryPoolPerformanceQueryCreateInfoINTEL*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_pci_bus_info
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT:
+        {
+            deepcopy_VkPhysicalDevicePCIBusInfoPropertiesEXT(alloc, rootType, reinterpret_cast<const VkPhysicalDevicePCIBusInfoPropertiesEXT*>(structExtension), reinterpret_cast<VkPhysicalDevicePCIBusInfoPropertiesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_AMD_display_native_hdr
+        case VK_STRUCTURE_TYPE_DISPLAY_NATIVE_HDR_SURFACE_CAPABILITIES_AMD:
+        {
+            deepcopy_VkDisplayNativeHdrSurfaceCapabilitiesAMD(alloc, rootType, reinterpret_cast<const VkDisplayNativeHdrSurfaceCapabilitiesAMD*>(structExtension), reinterpret_cast<VkDisplayNativeHdrSurfaceCapabilitiesAMD*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_SWAPCHAIN_DISPLAY_NATIVE_HDR_CREATE_INFO_AMD:
+        {
+            deepcopy_VkSwapchainDisplayNativeHdrCreateInfoAMD(alloc, rootType, reinterpret_cast<const VkSwapchainDisplayNativeHdrCreateInfoAMD*>(structExtension), reinterpret_cast<VkSwapchainDisplayNativeHdrCreateInfoAMD*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_fragment_density_map
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_FEATURES_EXT:
+        {
+            switch(rootType)
+            {
+                case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2:
+                {
+                    deepcopy_VkPhysicalDeviceFragmentDensityMapFeaturesEXT(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceFragmentDensityMapFeaturesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceFragmentDensityMapFeaturesEXT*>(structExtension_out));
+                    break;
+                }
+                case VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO:
+                {
+                    deepcopy_VkPhysicalDeviceFragmentDensityMapFeaturesEXT(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceFragmentDensityMapFeaturesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceFragmentDensityMapFeaturesEXT*>(structExtension_out));
+                    break;
+                }
+                case VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO:
+                {
+                    deepcopy_VkImportColorBufferGOOGLE(alloc, rootType, reinterpret_cast<const VkImportColorBufferGOOGLE*>(structExtension), reinterpret_cast<VkImportColorBufferGOOGLE*>(structExtension_out));
+                    break;
+                }
+                default:
+                {
+                    deepcopy_VkPhysicalDeviceFragmentDensityMapFeaturesEXT(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceFragmentDensityMapFeaturesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceFragmentDensityMapFeaturesEXT*>(structExtension_out));
+                    break;
+                }
+            }
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_PROPERTIES_EXT:
+        {
+            switch(rootType)
+            {
+                case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2:
+                {
+                    deepcopy_VkPhysicalDeviceFragmentDensityMapPropertiesEXT(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceFragmentDensityMapPropertiesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceFragmentDensityMapPropertiesEXT*>(structExtension_out));
+                    break;
+                }
+                case VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO:
+                {
+                    deepcopy_VkImportPhysicalAddressGOOGLE(alloc, rootType, reinterpret_cast<const VkImportPhysicalAddressGOOGLE*>(structExtension), reinterpret_cast<VkImportPhysicalAddressGOOGLE*>(structExtension_out));
+                    break;
+                }
+                default:
+                {
+                    deepcopy_VkPhysicalDeviceFragmentDensityMapPropertiesEXT(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceFragmentDensityMapPropertiesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceFragmentDensityMapPropertiesEXT*>(structExtension_out));
+                    break;
+                }
+            }
+            break;
+        }
+        case VK_STRUCTURE_TYPE_RENDER_PASS_FRAGMENT_DENSITY_MAP_CREATE_INFO_EXT:
+        {
+            switch(rootType)
+            {
+                case VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO:
+                {
+                    deepcopy_VkRenderPassFragmentDensityMapCreateInfoEXT(alloc, rootType, reinterpret_cast<const VkRenderPassFragmentDensityMapCreateInfoEXT*>(structExtension), reinterpret_cast<VkRenderPassFragmentDensityMapCreateInfoEXT*>(structExtension_out));
+                    break;
+                }
+                case VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2:
+                {
+                    deepcopy_VkRenderPassFragmentDensityMapCreateInfoEXT(alloc, rootType, reinterpret_cast<const VkRenderPassFragmentDensityMapCreateInfoEXT*>(structExtension), reinterpret_cast<VkRenderPassFragmentDensityMapCreateInfoEXT*>(structExtension_out));
+                    break;
+                }
+                case VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO:
+                {
+                    deepcopy_VkImportBufferGOOGLE(alloc, rootType, reinterpret_cast<const VkImportBufferGOOGLE*>(structExtension), reinterpret_cast<VkImportBufferGOOGLE*>(structExtension_out));
+                    break;
+                }
+                default:
+                {
+                    deepcopy_VkRenderPassFragmentDensityMapCreateInfoEXT(alloc, rootType, reinterpret_cast<const VkRenderPassFragmentDensityMapCreateInfoEXT*>(structExtension), reinterpret_cast<VkRenderPassFragmentDensityMapCreateInfoEXT*>(structExtension_out));
+                    break;
+                }
+            }
+            break;
+        }
+#endif
+#ifdef VK_EXT_subgroup_size_control
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT:
+        {
+            deepcopy_VkPhysicalDeviceSubgroupSizeControlFeaturesEXT(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceSubgroupSizeControlFeaturesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceSubgroupSizeControlFeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT:
+        {
+            deepcopy_VkPhysicalDeviceSubgroupSizeControlPropertiesEXT(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceSubgroupSizeControlPropertiesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT:
+        {
+            deepcopy_VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT(alloc, rootType, reinterpret_cast<const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT*>(structExtension), reinterpret_cast<VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_AMD_shader_core_properties2
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_2_AMD:
+        {
+            deepcopy_VkPhysicalDeviceShaderCoreProperties2AMD(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceShaderCoreProperties2AMD*>(structExtension), reinterpret_cast<VkPhysicalDeviceShaderCoreProperties2AMD*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_AMD_device_coherent_memory
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COHERENT_MEMORY_FEATURES_AMD:
+        {
+            deepcopy_VkPhysicalDeviceCoherentMemoryFeaturesAMD(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceCoherentMemoryFeaturesAMD*>(structExtension), reinterpret_cast<VkPhysicalDeviceCoherentMemoryFeaturesAMD*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_shader_image_atomic_int64
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_ATOMIC_INT64_FEATURES_EXT:
+        {
+            deepcopy_VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_memory_budget
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT:
+        {
+            deepcopy_VkPhysicalDeviceMemoryBudgetPropertiesEXT(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceMemoryBudgetPropertiesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceMemoryBudgetPropertiesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_memory_priority
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PRIORITY_FEATURES_EXT:
+        {
+            deepcopy_VkPhysicalDeviceMemoryPriorityFeaturesEXT(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceMemoryPriorityFeaturesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceMemoryPriorityFeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT:
+        {
+            deepcopy_VkMemoryPriorityAllocateInfoEXT(alloc, rootType, reinterpret_cast<const VkMemoryPriorityAllocateInfoEXT*>(structExtension), reinterpret_cast<VkMemoryPriorityAllocateInfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_dedicated_allocation_image_aliasing
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEDICATED_ALLOCATION_IMAGE_ALIASING_FEATURES_NV:
+        {
+            deepcopy_VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV*>(structExtension), reinterpret_cast<VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_buffer_device_address
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_EXT:
+        {
+            deepcopy_VkPhysicalDeviceBufferDeviceAddressFeaturesEXT(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceBufferDeviceAddressFeaturesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceBufferDeviceAddressFeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_CREATE_INFO_EXT:
+        {
+            deepcopy_VkBufferDeviceAddressCreateInfoEXT(alloc, rootType, reinterpret_cast<const VkBufferDeviceAddressCreateInfoEXT*>(structExtension), reinterpret_cast<VkBufferDeviceAddressCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_validation_features
+        case VK_STRUCTURE_TYPE_VALIDATION_FEATURES_EXT:
+        {
+            deepcopy_VkValidationFeaturesEXT(alloc, rootType, reinterpret_cast<const VkValidationFeaturesEXT*>(structExtension), reinterpret_cast<VkValidationFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_cooperative_matrix
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_FEATURES_NV:
+        {
+            deepcopy_VkPhysicalDeviceCooperativeMatrixFeaturesNV(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceCooperativeMatrixFeaturesNV*>(structExtension), reinterpret_cast<VkPhysicalDeviceCooperativeMatrixFeaturesNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_PROPERTIES_NV:
+        {
+            deepcopy_VkPhysicalDeviceCooperativeMatrixPropertiesNV(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceCooperativeMatrixPropertiesNV*>(structExtension), reinterpret_cast<VkPhysicalDeviceCooperativeMatrixPropertiesNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_coverage_reduction_mode
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COVERAGE_REDUCTION_MODE_FEATURES_NV:
+        {
+            deepcopy_VkPhysicalDeviceCoverageReductionModeFeaturesNV(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceCoverageReductionModeFeaturesNV*>(structExtension), reinterpret_cast<VkPhysicalDeviceCoverageReductionModeFeaturesNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_REDUCTION_STATE_CREATE_INFO_NV:
+        {
+            deepcopy_VkPipelineCoverageReductionStateCreateInfoNV(alloc, rootType, reinterpret_cast<const VkPipelineCoverageReductionStateCreateInfoNV*>(structExtension), reinterpret_cast<VkPipelineCoverageReductionStateCreateInfoNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_fragment_shader_interlock
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_INTERLOCK_FEATURES_EXT:
+        {
+            deepcopy_VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_ycbcr_image_arrays
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_IMAGE_ARRAYS_FEATURES_EXT:
+        {
+            deepcopy_VkPhysicalDeviceYcbcrImageArraysFeaturesEXT(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceYcbcrImageArraysFeaturesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceYcbcrImageArraysFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_full_screen_exclusive
+        case VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_INFO_EXT:
+        {
+            deepcopy_VkSurfaceFullScreenExclusiveInfoEXT(alloc, rootType, reinterpret_cast<const VkSurfaceFullScreenExclusiveInfoEXT*>(structExtension), reinterpret_cast<VkSurfaceFullScreenExclusiveInfoEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_FULL_SCREEN_EXCLUSIVE_EXT:
+        {
+            deepcopy_VkSurfaceCapabilitiesFullScreenExclusiveEXT(alloc, rootType, reinterpret_cast<const VkSurfaceCapabilitiesFullScreenExclusiveEXT*>(structExtension), reinterpret_cast<VkSurfaceCapabilitiesFullScreenExclusiveEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_WIN32_INFO_EXT:
+        {
+            deepcopy_VkSurfaceFullScreenExclusiveWin32InfoEXT(alloc, rootType, reinterpret_cast<const VkSurfaceFullScreenExclusiveWin32InfoEXT*>(structExtension), reinterpret_cast<VkSurfaceFullScreenExclusiveWin32InfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_line_rasterization
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT:
+        {
+            deepcopy_VkPhysicalDeviceLineRasterizationFeaturesEXT(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceLineRasterizationFeaturesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceLineRasterizationFeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_EXT:
+        {
+            deepcopy_VkPhysicalDeviceLineRasterizationPropertiesEXT(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceLineRasterizationPropertiesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceLineRasterizationPropertiesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT:
+        {
+            deepcopy_VkPipelineRasterizationLineStateCreateInfoEXT(alloc, rootType, reinterpret_cast<const VkPipelineRasterizationLineStateCreateInfoEXT*>(structExtension), reinterpret_cast<VkPipelineRasterizationLineStateCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_shader_atomic_float
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_FLOAT_FEATURES_EXT:
+        {
+            deepcopy_VkPhysicalDeviceShaderAtomicFloatFeaturesEXT(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceShaderAtomicFloatFeaturesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceShaderAtomicFloatFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_index_type_uint8
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT:
+        {
+            deepcopy_VkPhysicalDeviceIndexTypeUint8FeaturesEXT(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceIndexTypeUint8FeaturesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceIndexTypeUint8FeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_extended_dynamic_state
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_FEATURES_EXT:
+        {
+            deepcopy_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceExtendedDynamicStateFeaturesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceExtendedDynamicStateFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_shader_demote_to_helper_invocation
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES_EXT:
+        {
+            deepcopy_VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_device_generated_commands
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_PROPERTIES_NV:
+        {
+            deepcopy_VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV*>(structExtension), reinterpret_cast<VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_FEATURES_NV:
+        {
+            deepcopy_VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV*>(structExtension), reinterpret_cast<VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_SHADER_GROUPS_CREATE_INFO_NV:
+        {
+            deepcopy_VkGraphicsPipelineShaderGroupsCreateInfoNV(alloc, rootType, reinterpret_cast<const VkGraphicsPipelineShaderGroupsCreateInfoNV*>(structExtension), reinterpret_cast<VkGraphicsPipelineShaderGroupsCreateInfoNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_texel_buffer_alignment
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_FEATURES_EXT:
+        {
+            deepcopy_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES_EXT:
+        {
+            deepcopy_VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_QCOM_render_pass_transform
+        case VK_STRUCTURE_TYPE_RENDER_PASS_TRANSFORM_BEGIN_INFO_QCOM:
+        {
+            deepcopy_VkRenderPassTransformBeginInfoQCOM(alloc, rootType, reinterpret_cast<const VkRenderPassTransformBeginInfoQCOM*>(structExtension), reinterpret_cast<VkRenderPassTransformBeginInfoQCOM*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_RENDER_PASS_TRANSFORM_INFO_QCOM:
+        {
+            deepcopy_VkCommandBufferInheritanceRenderPassTransformInfoQCOM(alloc, rootType, reinterpret_cast<const VkCommandBufferInheritanceRenderPassTransformInfoQCOM*>(structExtension), reinterpret_cast<VkCommandBufferInheritanceRenderPassTransformInfoQCOM*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_device_memory_report
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_MEMORY_REPORT_FEATURES_EXT:
+        {
+            deepcopy_VkPhysicalDeviceDeviceMemoryReportFeaturesEXT(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceDeviceMemoryReportFeaturesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceDeviceMemoryReportFeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DEVICE_DEVICE_MEMORY_REPORT_CREATE_INFO_EXT:
+        {
+            deepcopy_VkDeviceDeviceMemoryReportCreateInfoEXT(alloc, rootType, reinterpret_cast<const VkDeviceDeviceMemoryReportCreateInfoEXT*>(structExtension), reinterpret_cast<VkDeviceDeviceMemoryReportCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_robustness2
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT:
+        {
+            deepcopy_VkPhysicalDeviceRobustness2FeaturesEXT(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceRobustness2FeaturesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceRobustness2FeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_PROPERTIES_EXT:
+        {
+            deepcopy_VkPhysicalDeviceRobustness2PropertiesEXT(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceRobustness2PropertiesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceRobustness2PropertiesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_custom_border_color
+        case VK_STRUCTURE_TYPE_SAMPLER_CUSTOM_BORDER_COLOR_CREATE_INFO_EXT:
+        {
+            deepcopy_VkSamplerCustomBorderColorCreateInfoEXT(alloc, rootType, reinterpret_cast<const VkSamplerCustomBorderColorCreateInfoEXT*>(structExtension), reinterpret_cast<VkSamplerCustomBorderColorCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_PROPERTIES_EXT:
+        {
+            deepcopy_VkPhysicalDeviceCustomBorderColorPropertiesEXT(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceCustomBorderColorPropertiesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceCustomBorderColorPropertiesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_FEATURES_EXT:
+        {
+            deepcopy_VkPhysicalDeviceCustomBorderColorFeaturesEXT(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceCustomBorderColorFeaturesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceCustomBorderColorFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_private_data
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES_EXT:
+        {
+            deepcopy_VkPhysicalDevicePrivateDataFeaturesEXT(alloc, rootType, reinterpret_cast<const VkPhysicalDevicePrivateDataFeaturesEXT*>(structExtension), reinterpret_cast<VkPhysicalDevicePrivateDataFeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DEVICE_PRIVATE_DATA_CREATE_INFO_EXT:
+        {
+            deepcopy_VkDevicePrivateDataCreateInfoEXT(alloc, rootType, reinterpret_cast<const VkDevicePrivateDataCreateInfoEXT*>(structExtension), reinterpret_cast<VkDevicePrivateDataCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_pipeline_creation_cache_control
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES_EXT:
+        {
+            deepcopy_VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT(alloc, rootType, reinterpret_cast<const VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT*>(structExtension), reinterpret_cast<VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_device_diagnostics_config
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DIAGNOSTICS_CONFIG_FEATURES_NV:
+        {
+            deepcopy_VkPhysicalDeviceDiagnosticsConfigFeaturesNV(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceDiagnosticsConfigFeaturesNV*>(structExtension), reinterpret_cast<VkPhysicalDeviceDiagnosticsConfigFeaturesNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DEVICE_DIAGNOSTICS_CONFIG_CREATE_INFO_NV:
+        {
+            deepcopy_VkDeviceDiagnosticsConfigCreateInfoNV(alloc, rootType, reinterpret_cast<const VkDeviceDiagnosticsConfigCreateInfoNV*>(structExtension), reinterpret_cast<VkDeviceDiagnosticsConfigCreateInfoNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_fragment_shading_rate_enums
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_FEATURES_NV:
+        {
+            deepcopy_VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV*>(structExtension), reinterpret_cast<VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_PROPERTIES_NV:
+        {
+            deepcopy_VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV*>(structExtension), reinterpret_cast<VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_ENUM_STATE_CREATE_INFO_NV:
+        {
+            deepcopy_VkPipelineFragmentShadingRateEnumStateCreateInfoNV(alloc, rootType, reinterpret_cast<const VkPipelineFragmentShadingRateEnumStateCreateInfoNV*>(structExtension), reinterpret_cast<VkPipelineFragmentShadingRateEnumStateCreateInfoNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_fragment_density_map2
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_2_FEATURES_EXT:
+        {
+            deepcopy_VkPhysicalDeviceFragmentDensityMap2FeaturesEXT(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceFragmentDensityMap2FeaturesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceFragmentDensityMap2FeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_2_PROPERTIES_EXT:
+        {
+            deepcopy_VkPhysicalDeviceFragmentDensityMap2PropertiesEXT(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceFragmentDensityMap2PropertiesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceFragmentDensityMap2PropertiesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_QCOM_rotated_copy_commands
+        case VK_STRUCTURE_TYPE_COPY_COMMAND_TRANSFORM_INFO_QCOM:
+        {
+            deepcopy_VkCopyCommandTransformInfoQCOM(alloc, rootType, reinterpret_cast<const VkCopyCommandTransformInfoQCOM*>(structExtension), reinterpret_cast<VkCopyCommandTransformInfoQCOM*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_image_robustness
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES_EXT:
+        {
+            deepcopy_VkPhysicalDeviceImageRobustnessFeaturesEXT(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceImageRobustnessFeaturesEXT*>(structExtension), reinterpret_cast<VkPhysicalDeviceImageRobustnessFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_4444_formats
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT:
+        {
+            deepcopy_VkPhysicalDevice4444FormatsFeaturesEXT(alloc, rootType, reinterpret_cast<const VkPhysicalDevice4444FormatsFeaturesEXT*>(structExtension), reinterpret_cast<VkPhysicalDevice4444FormatsFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_GOOGLE_gfxstream
         case VK_STRUCTURE_TYPE_IMPORT_COLOR_BUFFER_GOOGLE:
         {
-            deepcopy_VkImportColorBufferGOOGLE(pool, reinterpret_cast<const VkImportColorBufferGOOGLE*>(structExtension), reinterpret_cast<VkImportColorBufferGOOGLE*>(structExtension_out));
+            deepcopy_VkImportColorBufferGOOGLE(alloc, rootType, reinterpret_cast<const VkImportColorBufferGOOGLE*>(structExtension), reinterpret_cast<VkImportColorBufferGOOGLE*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_IMPORT_BUFFER_GOOGLE:
+        {
+            deepcopy_VkImportBufferGOOGLE(alloc, rootType, reinterpret_cast<const VkImportBufferGOOGLE*>(structExtension), reinterpret_cast<VkImportBufferGOOGLE*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_IMPORT_PHYSICAL_ADDRESS_GOOGLE:
         {
-            deepcopy_VkImportPhysicalAddressGOOGLE(pool, reinterpret_cast<const VkImportPhysicalAddressGOOGLE*>(structExtension), reinterpret_cast<VkImportPhysicalAddressGOOGLE*>(structExtension_out));
+            deepcopy_VkImportPhysicalAddressGOOGLE(alloc, rootType, reinterpret_cast<const VkImportPhysicalAddressGOOGLE*>(structExtension), reinterpret_cast<VkImportPhysicalAddressGOOGLE*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_KHR_acceleration_structure
+        case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR:
+        {
+            deepcopy_VkWriteDescriptorSetAccelerationStructureKHR(alloc, rootType, reinterpret_cast<const VkWriteDescriptorSetAccelerationStructureKHR*>(structExtension), reinterpret_cast<VkWriteDescriptorSetAccelerationStructureKHR*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_FEATURES_KHR:
+        {
+            deepcopy_VkPhysicalDeviceAccelerationStructureFeaturesKHR(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceAccelerationStructureFeaturesKHR*>(structExtension), reinterpret_cast<VkPhysicalDeviceAccelerationStructureFeaturesKHR*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_PROPERTIES_KHR:
+        {
+            deepcopy_VkPhysicalDeviceAccelerationStructurePropertiesKHR(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceAccelerationStructurePropertiesKHR*>(structExtension), reinterpret_cast<VkPhysicalDeviceAccelerationStructurePropertiesKHR*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_KHR_ray_tracing_pipeline
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_FEATURES_KHR:
+        {
+            deepcopy_VkPhysicalDeviceRayTracingPipelineFeaturesKHR(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceRayTracingPipelineFeaturesKHR*>(structExtension), reinterpret_cast<VkPhysicalDeviceRayTracingPipelineFeaturesKHR*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_PROPERTIES_KHR:
+        {
+            deepcopy_VkPhysicalDeviceRayTracingPipelinePropertiesKHR(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceRayTracingPipelinePropertiesKHR*>(structExtension), reinterpret_cast<VkPhysicalDeviceRayTracingPipelinePropertiesKHR*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_KHR_ray_query
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_QUERY_FEATURES_KHR:
+        {
+            deepcopy_VkPhysicalDeviceRayQueryFeaturesKHR(alloc, rootType, reinterpret_cast<const VkPhysicalDeviceRayQueryFeaturesKHR*>(structExtension), reinterpret_cast<VkPhysicalDeviceRayQueryFeaturesKHR*>(structExtension_out));
             break;
         }
 #endif
diff --git a/system/vulkan_enc/goldfish_vk_deepcopy_guest.h b/system/vulkan_enc/goldfish_vk_deepcopy_guest.h
index 1164e1c..d1e8c10 100644
--- a/system/vulkan_enc/goldfish_vk_deepcopy_guest.h
+++ b/system/vulkan_enc/goldfish_vk_deepcopy_guest.h
@@ -30,8 +30,9 @@
 #include "vk_platform_compat.h"
 
 #include "goldfish_vk_private_defs.h"
-#include "android/base/Pool.h"
-using android::base::Pool;
+#include "android/base/BumpPool.h"
+using android::base::Allocator;
+using android::base::BumpPool;
 // Stuff we are not going to use but if included,
 // will cause compile errors. These are Android Vulkan
 // required extensions, but the approach will be to
@@ -43,1030 +44,1535 @@
 namespace goldfish_vk {
 
 #ifdef VK_VERSION_1_0
-void deepcopy_VkApplicationInfo(
-    Pool* pool,
-    const VkApplicationInfo* from,
-    VkApplicationInfo* to);
-
-void deepcopy_VkInstanceCreateInfo(
-    Pool* pool,
-    const VkInstanceCreateInfo* from,
-    VkInstanceCreateInfo* to);
-
-void deepcopy_VkAllocationCallbacks(
-    Pool* pool,
-    const VkAllocationCallbacks* from,
-    VkAllocationCallbacks* to);
-
-void deepcopy_VkPhysicalDeviceFeatures(
-    Pool* pool,
-    const VkPhysicalDeviceFeatures* from,
-    VkPhysicalDeviceFeatures* to);
-
-void deepcopy_VkFormatProperties(
-    Pool* pool,
-    const VkFormatProperties* from,
-    VkFormatProperties* to);
-
-void deepcopy_VkExtent3D(
-    Pool* pool,
-    const VkExtent3D* from,
-    VkExtent3D* to);
-
-void deepcopy_VkImageFormatProperties(
-    Pool* pool,
-    const VkImageFormatProperties* from,
-    VkImageFormatProperties* to);
-
-void deepcopy_VkPhysicalDeviceLimits(
-    Pool* pool,
-    const VkPhysicalDeviceLimits* from,
-    VkPhysicalDeviceLimits* to);
-
-void deepcopy_VkPhysicalDeviceSparseProperties(
-    Pool* pool,
-    const VkPhysicalDeviceSparseProperties* from,
-    VkPhysicalDeviceSparseProperties* to);
-
-void deepcopy_VkPhysicalDeviceProperties(
-    Pool* pool,
-    const VkPhysicalDeviceProperties* from,
-    VkPhysicalDeviceProperties* to);
-
-void deepcopy_VkQueueFamilyProperties(
-    Pool* pool,
-    const VkQueueFamilyProperties* from,
-    VkQueueFamilyProperties* to);
-
-void deepcopy_VkMemoryType(
-    Pool* pool,
-    const VkMemoryType* from,
-    VkMemoryType* to);
-
-void deepcopy_VkMemoryHeap(
-    Pool* pool,
-    const VkMemoryHeap* from,
-    VkMemoryHeap* to);
-
-void deepcopy_VkPhysicalDeviceMemoryProperties(
-    Pool* pool,
-    const VkPhysicalDeviceMemoryProperties* from,
-    VkPhysicalDeviceMemoryProperties* to);
-
-void deepcopy_VkDeviceQueueCreateInfo(
-    Pool* pool,
-    const VkDeviceQueueCreateInfo* from,
-    VkDeviceQueueCreateInfo* to);
-
-void deepcopy_VkDeviceCreateInfo(
-    Pool* pool,
-    const VkDeviceCreateInfo* from,
-    VkDeviceCreateInfo* to);
-
-void deepcopy_VkExtensionProperties(
-    Pool* pool,
-    const VkExtensionProperties* from,
-    VkExtensionProperties* to);
-
-void deepcopy_VkLayerProperties(
-    Pool* pool,
-    const VkLayerProperties* from,
-    VkLayerProperties* to);
-
-void deepcopy_VkSubmitInfo(
-    Pool* pool,
-    const VkSubmitInfo* from,
-    VkSubmitInfo* to);
-
-void deepcopy_VkMemoryAllocateInfo(
-    Pool* pool,
-    const VkMemoryAllocateInfo* from,
-    VkMemoryAllocateInfo* to);
-
-void deepcopy_VkMappedMemoryRange(
-    Pool* pool,
-    const VkMappedMemoryRange* from,
-    VkMappedMemoryRange* to);
-
-void deepcopy_VkMemoryRequirements(
-    Pool* pool,
-    const VkMemoryRequirements* from,
-    VkMemoryRequirements* to);
-
-void deepcopy_VkSparseImageFormatProperties(
-    Pool* pool,
-    const VkSparseImageFormatProperties* from,
-    VkSparseImageFormatProperties* to);
-
-void deepcopy_VkSparseImageMemoryRequirements(
-    Pool* pool,
-    const VkSparseImageMemoryRequirements* from,
-    VkSparseImageMemoryRequirements* to);
-
-void deepcopy_VkSparseMemoryBind(
-    Pool* pool,
-    const VkSparseMemoryBind* from,
-    VkSparseMemoryBind* to);
-
-void deepcopy_VkSparseBufferMemoryBindInfo(
-    Pool* pool,
-    const VkSparseBufferMemoryBindInfo* from,
-    VkSparseBufferMemoryBindInfo* to);
-
-void deepcopy_VkSparseImageOpaqueMemoryBindInfo(
-    Pool* pool,
-    const VkSparseImageOpaqueMemoryBindInfo* from,
-    VkSparseImageOpaqueMemoryBindInfo* to);
-
-void deepcopy_VkImageSubresource(
-    Pool* pool,
-    const VkImageSubresource* from,
-    VkImageSubresource* to);
-
-void deepcopy_VkOffset3D(
-    Pool* pool,
-    const VkOffset3D* from,
-    VkOffset3D* to);
-
-void deepcopy_VkSparseImageMemoryBind(
-    Pool* pool,
-    const VkSparseImageMemoryBind* from,
-    VkSparseImageMemoryBind* to);
-
-void deepcopy_VkSparseImageMemoryBindInfo(
-    Pool* pool,
-    const VkSparseImageMemoryBindInfo* from,
-    VkSparseImageMemoryBindInfo* to);
-
-void deepcopy_VkBindSparseInfo(
-    Pool* pool,
-    const VkBindSparseInfo* from,
-    VkBindSparseInfo* to);
-
-void deepcopy_VkFenceCreateInfo(
-    Pool* pool,
-    const VkFenceCreateInfo* from,
-    VkFenceCreateInfo* to);
-
-void deepcopy_VkSemaphoreCreateInfo(
-    Pool* pool,
-    const VkSemaphoreCreateInfo* from,
-    VkSemaphoreCreateInfo* to);
-
-void deepcopy_VkEventCreateInfo(
-    Pool* pool,
-    const VkEventCreateInfo* from,
-    VkEventCreateInfo* to);
-
-void deepcopy_VkQueryPoolCreateInfo(
-    Pool* pool,
-    const VkQueryPoolCreateInfo* from,
-    VkQueryPoolCreateInfo* to);
-
-void deepcopy_VkBufferCreateInfo(
-    Pool* pool,
-    const VkBufferCreateInfo* from,
-    VkBufferCreateInfo* to);
-
-void deepcopy_VkBufferViewCreateInfo(
-    Pool* pool,
-    const VkBufferViewCreateInfo* from,
-    VkBufferViewCreateInfo* to);
-
-void deepcopy_VkImageCreateInfo(
-    Pool* pool,
-    const VkImageCreateInfo* from,
-    VkImageCreateInfo* to);
-
-void deepcopy_VkSubresourceLayout(
-    Pool* pool,
-    const VkSubresourceLayout* from,
-    VkSubresourceLayout* to);
-
-void deepcopy_VkComponentMapping(
-    Pool* pool,
-    const VkComponentMapping* from,
-    VkComponentMapping* to);
-
-void deepcopy_VkImageSubresourceRange(
-    Pool* pool,
-    const VkImageSubresourceRange* from,
-    VkImageSubresourceRange* to);
-
-void deepcopy_VkImageViewCreateInfo(
-    Pool* pool,
-    const VkImageViewCreateInfo* from,
-    VkImageViewCreateInfo* to);
-
-void deepcopy_VkShaderModuleCreateInfo(
-    Pool* pool,
-    const VkShaderModuleCreateInfo* from,
-    VkShaderModuleCreateInfo* to);
-
-void deepcopy_VkPipelineCacheCreateInfo(
-    Pool* pool,
-    const VkPipelineCacheCreateInfo* from,
-    VkPipelineCacheCreateInfo* to);
-
-void deepcopy_VkSpecializationMapEntry(
-    Pool* pool,
-    const VkSpecializationMapEntry* from,
-    VkSpecializationMapEntry* to);
-
-void deepcopy_VkSpecializationInfo(
-    Pool* pool,
-    const VkSpecializationInfo* from,
-    VkSpecializationInfo* to);
-
-void deepcopy_VkPipelineShaderStageCreateInfo(
-    Pool* pool,
-    const VkPipelineShaderStageCreateInfo* from,
-    VkPipelineShaderStageCreateInfo* to);
-
-void deepcopy_VkVertexInputBindingDescription(
-    Pool* pool,
-    const VkVertexInputBindingDescription* from,
-    VkVertexInputBindingDescription* to);
-
-void deepcopy_VkVertexInputAttributeDescription(
-    Pool* pool,
-    const VkVertexInputAttributeDescription* from,
-    VkVertexInputAttributeDescription* to);
-
-void deepcopy_VkPipelineVertexInputStateCreateInfo(
-    Pool* pool,
-    const VkPipelineVertexInputStateCreateInfo* from,
-    VkPipelineVertexInputStateCreateInfo* to);
-
-void deepcopy_VkPipelineInputAssemblyStateCreateInfo(
-    Pool* pool,
-    const VkPipelineInputAssemblyStateCreateInfo* from,
-    VkPipelineInputAssemblyStateCreateInfo* to);
-
-void deepcopy_VkPipelineTessellationStateCreateInfo(
-    Pool* pool,
-    const VkPipelineTessellationStateCreateInfo* from,
-    VkPipelineTessellationStateCreateInfo* to);
-
-void deepcopy_VkViewport(
-    Pool* pool,
-    const VkViewport* from,
-    VkViewport* to);
-
-void deepcopy_VkOffset2D(
-    Pool* pool,
-    const VkOffset2D* from,
-    VkOffset2D* to);
-
 void deepcopy_VkExtent2D(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkExtent2D* from,
     VkExtent2D* to);
 
+void deepcopy_VkExtent3D(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkExtent3D* from,
+    VkExtent3D* to);
+
+void deepcopy_VkOffset2D(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkOffset2D* from,
+    VkOffset2D* to);
+
+void deepcopy_VkOffset3D(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkOffset3D* from,
+    VkOffset3D* to);
+
 void deepcopy_VkRect2D(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkRect2D* from,
     VkRect2D* to);
 
-void deepcopy_VkPipelineViewportStateCreateInfo(
-    Pool* pool,
-    const VkPipelineViewportStateCreateInfo* from,
-    VkPipelineViewportStateCreateInfo* to);
+void deepcopy_VkBaseInStructure(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkBaseInStructure* from,
+    VkBaseInStructure* to);
 
-void deepcopy_VkPipelineRasterizationStateCreateInfo(
-    Pool* pool,
-    const VkPipelineRasterizationStateCreateInfo* from,
-    VkPipelineRasterizationStateCreateInfo* to);
-
-void deepcopy_VkPipelineMultisampleStateCreateInfo(
-    Pool* pool,
-    const VkPipelineMultisampleStateCreateInfo* from,
-    VkPipelineMultisampleStateCreateInfo* to);
-
-void deepcopy_VkStencilOpState(
-    Pool* pool,
-    const VkStencilOpState* from,
-    VkStencilOpState* to);
-
-void deepcopy_VkPipelineDepthStencilStateCreateInfo(
-    Pool* pool,
-    const VkPipelineDepthStencilStateCreateInfo* from,
-    VkPipelineDepthStencilStateCreateInfo* to);
-
-void deepcopy_VkPipelineColorBlendAttachmentState(
-    Pool* pool,
-    const VkPipelineColorBlendAttachmentState* from,
-    VkPipelineColorBlendAttachmentState* to);
-
-void deepcopy_VkPipelineColorBlendStateCreateInfo(
-    Pool* pool,
-    const VkPipelineColorBlendStateCreateInfo* from,
-    VkPipelineColorBlendStateCreateInfo* to);
-
-void deepcopy_VkPipelineDynamicStateCreateInfo(
-    Pool* pool,
-    const VkPipelineDynamicStateCreateInfo* from,
-    VkPipelineDynamicStateCreateInfo* to);
-
-void deepcopy_VkGraphicsPipelineCreateInfo(
-    Pool* pool,
-    const VkGraphicsPipelineCreateInfo* from,
-    VkGraphicsPipelineCreateInfo* to);
-
-void deepcopy_VkComputePipelineCreateInfo(
-    Pool* pool,
-    const VkComputePipelineCreateInfo* from,
-    VkComputePipelineCreateInfo* to);
-
-void deepcopy_VkPushConstantRange(
-    Pool* pool,
-    const VkPushConstantRange* from,
-    VkPushConstantRange* to);
-
-void deepcopy_VkPipelineLayoutCreateInfo(
-    Pool* pool,
-    const VkPipelineLayoutCreateInfo* from,
-    VkPipelineLayoutCreateInfo* to);
-
-void deepcopy_VkSamplerCreateInfo(
-    Pool* pool,
-    const VkSamplerCreateInfo* from,
-    VkSamplerCreateInfo* to);
-
-void deepcopy_VkDescriptorSetLayoutBinding(
-    Pool* pool,
-    const VkDescriptorSetLayoutBinding* from,
-    VkDescriptorSetLayoutBinding* to);
-
-void deepcopy_VkDescriptorSetLayoutCreateInfo(
-    Pool* pool,
-    const VkDescriptorSetLayoutCreateInfo* from,
-    VkDescriptorSetLayoutCreateInfo* to);
-
-void deepcopy_VkDescriptorPoolSize(
-    Pool* pool,
-    const VkDescriptorPoolSize* from,
-    VkDescriptorPoolSize* to);
-
-void deepcopy_VkDescriptorPoolCreateInfo(
-    Pool* pool,
-    const VkDescriptorPoolCreateInfo* from,
-    VkDescriptorPoolCreateInfo* to);
-
-void deepcopy_VkDescriptorSetAllocateInfo(
-    Pool* pool,
-    const VkDescriptorSetAllocateInfo* from,
-    VkDescriptorSetAllocateInfo* to);
-
-void deepcopy_VkDescriptorImageInfo(
-    Pool* pool,
-    const VkDescriptorImageInfo* from,
-    VkDescriptorImageInfo* to);
-
-void deepcopy_VkDescriptorBufferInfo(
-    Pool* pool,
-    const VkDescriptorBufferInfo* from,
-    VkDescriptorBufferInfo* to);
-
-void deepcopy_VkWriteDescriptorSet(
-    Pool* pool,
-    const VkWriteDescriptorSet* from,
-    VkWriteDescriptorSet* to);
-
-void deepcopy_VkCopyDescriptorSet(
-    Pool* pool,
-    const VkCopyDescriptorSet* from,
-    VkCopyDescriptorSet* to);
-
-void deepcopy_VkFramebufferCreateInfo(
-    Pool* pool,
-    const VkFramebufferCreateInfo* from,
-    VkFramebufferCreateInfo* to);
-
-void deepcopy_VkAttachmentDescription(
-    Pool* pool,
-    const VkAttachmentDescription* from,
-    VkAttachmentDescription* to);
-
-void deepcopy_VkAttachmentReference(
-    Pool* pool,
-    const VkAttachmentReference* from,
-    VkAttachmentReference* to);
-
-void deepcopy_VkSubpassDescription(
-    Pool* pool,
-    const VkSubpassDescription* from,
-    VkSubpassDescription* to);
-
-void deepcopy_VkSubpassDependency(
-    Pool* pool,
-    const VkSubpassDependency* from,
-    VkSubpassDependency* to);
-
-void deepcopy_VkRenderPassCreateInfo(
-    Pool* pool,
-    const VkRenderPassCreateInfo* from,
-    VkRenderPassCreateInfo* to);
-
-void deepcopy_VkCommandPoolCreateInfo(
-    Pool* pool,
-    const VkCommandPoolCreateInfo* from,
-    VkCommandPoolCreateInfo* to);
-
-void deepcopy_VkCommandBufferAllocateInfo(
-    Pool* pool,
-    const VkCommandBufferAllocateInfo* from,
-    VkCommandBufferAllocateInfo* to);
-
-void deepcopy_VkCommandBufferInheritanceInfo(
-    Pool* pool,
-    const VkCommandBufferInheritanceInfo* from,
-    VkCommandBufferInheritanceInfo* to);
-
-void deepcopy_VkCommandBufferBeginInfo(
-    Pool* pool,
-    const VkCommandBufferBeginInfo* from,
-    VkCommandBufferBeginInfo* to);
-
-void deepcopy_VkBufferCopy(
-    Pool* pool,
-    const VkBufferCopy* from,
-    VkBufferCopy* to);
-
-void deepcopy_VkImageSubresourceLayers(
-    Pool* pool,
-    const VkImageSubresourceLayers* from,
-    VkImageSubresourceLayers* to);
-
-void deepcopy_VkImageCopy(
-    Pool* pool,
-    const VkImageCopy* from,
-    VkImageCopy* to);
-
-void deepcopy_VkImageBlit(
-    Pool* pool,
-    const VkImageBlit* from,
-    VkImageBlit* to);
-
-void deepcopy_VkBufferImageCopy(
-    Pool* pool,
-    const VkBufferImageCopy* from,
-    VkBufferImageCopy* to);
-
-void deepcopy_VkClearColorValue(
-    Pool* pool,
-    const VkClearColorValue* from,
-    VkClearColorValue* to);
-
-void deepcopy_VkClearDepthStencilValue(
-    Pool* pool,
-    const VkClearDepthStencilValue* from,
-    VkClearDepthStencilValue* to);
-
-void deepcopy_VkClearValue(
-    Pool* pool,
-    const VkClearValue* from,
-    VkClearValue* to);
-
-void deepcopy_VkClearAttachment(
-    Pool* pool,
-    const VkClearAttachment* from,
-    VkClearAttachment* to);
-
-void deepcopy_VkClearRect(
-    Pool* pool,
-    const VkClearRect* from,
-    VkClearRect* to);
-
-void deepcopy_VkImageResolve(
-    Pool* pool,
-    const VkImageResolve* from,
-    VkImageResolve* to);
-
-void deepcopy_VkMemoryBarrier(
-    Pool* pool,
-    const VkMemoryBarrier* from,
-    VkMemoryBarrier* to);
+void deepcopy_VkBaseOutStructure(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkBaseOutStructure* from,
+    VkBaseOutStructure* to);
 
 void deepcopy_VkBufferMemoryBarrier(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkBufferMemoryBarrier* from,
     VkBufferMemoryBarrier* to);
 
-void deepcopy_VkImageMemoryBarrier(
-    Pool* pool,
-    const VkImageMemoryBarrier* from,
-    VkImageMemoryBarrier* to);
-
-void deepcopy_VkRenderPassBeginInfo(
-    Pool* pool,
-    const VkRenderPassBeginInfo* from,
-    VkRenderPassBeginInfo* to);
-
 void deepcopy_VkDispatchIndirectCommand(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDispatchIndirectCommand* from,
     VkDispatchIndirectCommand* to);
 
 void deepcopy_VkDrawIndexedIndirectCommand(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDrawIndexedIndirectCommand* from,
     VkDrawIndexedIndirectCommand* to);
 
 void deepcopy_VkDrawIndirectCommand(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDrawIndirectCommand* from,
     VkDrawIndirectCommand* to);
 
-void deepcopy_VkBaseOutStructure(
-    Pool* pool,
-    const VkBaseOutStructure* from,
-    VkBaseOutStructure* to);
+void deepcopy_VkImageSubresourceRange(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkImageSubresourceRange* from,
+    VkImageSubresourceRange* to);
 
-void deepcopy_VkBaseInStructure(
-    Pool* pool,
-    const VkBaseInStructure* from,
-    VkBaseInStructure* to);
+void deepcopy_VkImageMemoryBarrier(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkImageMemoryBarrier* from,
+    VkImageMemoryBarrier* to);
+
+void deepcopy_VkMemoryBarrier(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkMemoryBarrier* from,
+    VkMemoryBarrier* to);
+
+void deepcopy_VkAllocationCallbacks(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkAllocationCallbacks* from,
+    VkAllocationCallbacks* to);
+
+void deepcopy_VkApplicationInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkApplicationInfo* from,
+    VkApplicationInfo* to);
+
+void deepcopy_VkFormatProperties(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkFormatProperties* from,
+    VkFormatProperties* to);
+
+void deepcopy_VkImageFormatProperties(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkImageFormatProperties* from,
+    VkImageFormatProperties* to);
+
+void deepcopy_VkInstanceCreateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkInstanceCreateInfo* from,
+    VkInstanceCreateInfo* to);
+
+void deepcopy_VkMemoryHeap(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkMemoryHeap* from,
+    VkMemoryHeap* to);
+
+void deepcopy_VkMemoryType(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkMemoryType* from,
+    VkMemoryType* to);
+
+void deepcopy_VkPhysicalDeviceFeatures(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFeatures* from,
+    VkPhysicalDeviceFeatures* to);
+
+void deepcopy_VkPhysicalDeviceLimits(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceLimits* from,
+    VkPhysicalDeviceLimits* to);
+
+void deepcopy_VkPhysicalDeviceMemoryProperties(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMemoryProperties* from,
+    VkPhysicalDeviceMemoryProperties* to);
+
+void deepcopy_VkPhysicalDeviceSparseProperties(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSparseProperties* from,
+    VkPhysicalDeviceSparseProperties* to);
+
+void deepcopy_VkPhysicalDeviceProperties(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceProperties* from,
+    VkPhysicalDeviceProperties* to);
+
+void deepcopy_VkQueueFamilyProperties(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkQueueFamilyProperties* from,
+    VkQueueFamilyProperties* to);
+
+void deepcopy_VkDeviceQueueCreateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDeviceQueueCreateInfo* from,
+    VkDeviceQueueCreateInfo* to);
+
+void deepcopy_VkDeviceCreateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDeviceCreateInfo* from,
+    VkDeviceCreateInfo* to);
+
+void deepcopy_VkExtensionProperties(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkExtensionProperties* from,
+    VkExtensionProperties* to);
+
+void deepcopy_VkLayerProperties(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkLayerProperties* from,
+    VkLayerProperties* to);
+
+void deepcopy_VkSubmitInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkSubmitInfo* from,
+    VkSubmitInfo* to);
+
+void deepcopy_VkMappedMemoryRange(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkMappedMemoryRange* from,
+    VkMappedMemoryRange* to);
+
+void deepcopy_VkMemoryAllocateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkMemoryAllocateInfo* from,
+    VkMemoryAllocateInfo* to);
+
+void deepcopy_VkMemoryRequirements(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkMemoryRequirements* from,
+    VkMemoryRequirements* to);
+
+void deepcopy_VkSparseMemoryBind(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkSparseMemoryBind* from,
+    VkSparseMemoryBind* to);
+
+void deepcopy_VkSparseBufferMemoryBindInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkSparseBufferMemoryBindInfo* from,
+    VkSparseBufferMemoryBindInfo* to);
+
+void deepcopy_VkSparseImageOpaqueMemoryBindInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkSparseImageOpaqueMemoryBindInfo* from,
+    VkSparseImageOpaqueMemoryBindInfo* to);
+
+void deepcopy_VkImageSubresource(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkImageSubresource* from,
+    VkImageSubresource* to);
+
+void deepcopy_VkSparseImageMemoryBind(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkSparseImageMemoryBind* from,
+    VkSparseImageMemoryBind* to);
+
+void deepcopy_VkSparseImageMemoryBindInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkSparseImageMemoryBindInfo* from,
+    VkSparseImageMemoryBindInfo* to);
+
+void deepcopy_VkBindSparseInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkBindSparseInfo* from,
+    VkBindSparseInfo* to);
+
+void deepcopy_VkSparseImageFormatProperties(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkSparseImageFormatProperties* from,
+    VkSparseImageFormatProperties* to);
+
+void deepcopy_VkSparseImageMemoryRequirements(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkSparseImageMemoryRequirements* from,
+    VkSparseImageMemoryRequirements* to);
+
+void deepcopy_VkFenceCreateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkFenceCreateInfo* from,
+    VkFenceCreateInfo* to);
+
+void deepcopy_VkSemaphoreCreateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkSemaphoreCreateInfo* from,
+    VkSemaphoreCreateInfo* to);
+
+void deepcopy_VkEventCreateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkEventCreateInfo* from,
+    VkEventCreateInfo* to);
+
+void deepcopy_VkQueryPoolCreateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkQueryPoolCreateInfo* from,
+    VkQueryPoolCreateInfo* to);
+
+void deepcopy_VkBufferCreateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkBufferCreateInfo* from,
+    VkBufferCreateInfo* to);
+
+void deepcopy_VkBufferViewCreateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkBufferViewCreateInfo* from,
+    VkBufferViewCreateInfo* to);
+
+void deepcopy_VkImageCreateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkImageCreateInfo* from,
+    VkImageCreateInfo* to);
+
+void deepcopy_VkSubresourceLayout(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkSubresourceLayout* from,
+    VkSubresourceLayout* to);
+
+void deepcopy_VkComponentMapping(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkComponentMapping* from,
+    VkComponentMapping* to);
+
+void deepcopy_VkImageViewCreateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkImageViewCreateInfo* from,
+    VkImageViewCreateInfo* to);
+
+void deepcopy_VkShaderModuleCreateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkShaderModuleCreateInfo* from,
+    VkShaderModuleCreateInfo* to);
+
+void deepcopy_VkPipelineCacheCreateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineCacheCreateInfo* from,
+    VkPipelineCacheCreateInfo* to);
+
+void deepcopy_VkSpecializationMapEntry(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkSpecializationMapEntry* from,
+    VkSpecializationMapEntry* to);
+
+void deepcopy_VkSpecializationInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkSpecializationInfo* from,
+    VkSpecializationInfo* to);
+
+void deepcopy_VkPipelineShaderStageCreateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineShaderStageCreateInfo* from,
+    VkPipelineShaderStageCreateInfo* to);
+
+void deepcopy_VkComputePipelineCreateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkComputePipelineCreateInfo* from,
+    VkComputePipelineCreateInfo* to);
+
+void deepcopy_VkVertexInputBindingDescription(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkVertexInputBindingDescription* from,
+    VkVertexInputBindingDescription* to);
+
+void deepcopy_VkVertexInputAttributeDescription(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkVertexInputAttributeDescription* from,
+    VkVertexInputAttributeDescription* to);
+
+void deepcopy_VkPipelineVertexInputStateCreateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineVertexInputStateCreateInfo* from,
+    VkPipelineVertexInputStateCreateInfo* to);
+
+void deepcopy_VkPipelineInputAssemblyStateCreateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineInputAssemblyStateCreateInfo* from,
+    VkPipelineInputAssemblyStateCreateInfo* to);
+
+void deepcopy_VkPipelineTessellationStateCreateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineTessellationStateCreateInfo* from,
+    VkPipelineTessellationStateCreateInfo* to);
+
+void deepcopy_VkViewport(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkViewport* from,
+    VkViewport* to);
+
+void deepcopy_VkPipelineViewportStateCreateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineViewportStateCreateInfo* from,
+    VkPipelineViewportStateCreateInfo* to);
+
+void deepcopy_VkPipelineRasterizationStateCreateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineRasterizationStateCreateInfo* from,
+    VkPipelineRasterizationStateCreateInfo* to);
+
+void deepcopy_VkPipelineMultisampleStateCreateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineMultisampleStateCreateInfo* from,
+    VkPipelineMultisampleStateCreateInfo* to);
+
+void deepcopy_VkStencilOpState(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkStencilOpState* from,
+    VkStencilOpState* to);
+
+void deepcopy_VkPipelineDepthStencilStateCreateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineDepthStencilStateCreateInfo* from,
+    VkPipelineDepthStencilStateCreateInfo* to);
+
+void deepcopy_VkPipelineColorBlendAttachmentState(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineColorBlendAttachmentState* from,
+    VkPipelineColorBlendAttachmentState* to);
+
+void deepcopy_VkPipelineColorBlendStateCreateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineColorBlendStateCreateInfo* from,
+    VkPipelineColorBlendStateCreateInfo* to);
+
+void deepcopy_VkPipelineDynamicStateCreateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineDynamicStateCreateInfo* from,
+    VkPipelineDynamicStateCreateInfo* to);
+
+void deepcopy_VkGraphicsPipelineCreateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkGraphicsPipelineCreateInfo* from,
+    VkGraphicsPipelineCreateInfo* to);
+
+void deepcopy_VkPushConstantRange(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPushConstantRange* from,
+    VkPushConstantRange* to);
+
+void deepcopy_VkPipelineLayoutCreateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineLayoutCreateInfo* from,
+    VkPipelineLayoutCreateInfo* to);
+
+void deepcopy_VkSamplerCreateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkSamplerCreateInfo* from,
+    VkSamplerCreateInfo* to);
+
+void deepcopy_VkCopyDescriptorSet(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkCopyDescriptorSet* from,
+    VkCopyDescriptorSet* to);
+
+void deepcopy_VkDescriptorBufferInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDescriptorBufferInfo* from,
+    VkDescriptorBufferInfo* to);
+
+void deepcopy_VkDescriptorImageInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDescriptorImageInfo* from,
+    VkDescriptorImageInfo* to);
+
+void deepcopy_VkDescriptorPoolSize(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDescriptorPoolSize* from,
+    VkDescriptorPoolSize* to);
+
+void deepcopy_VkDescriptorPoolCreateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDescriptorPoolCreateInfo* from,
+    VkDescriptorPoolCreateInfo* to);
+
+void deepcopy_VkDescriptorSetAllocateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDescriptorSetAllocateInfo* from,
+    VkDescriptorSetAllocateInfo* to);
+
+void deepcopy_VkDescriptorSetLayoutBinding(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDescriptorSetLayoutBinding* from,
+    VkDescriptorSetLayoutBinding* to);
+
+void deepcopy_VkDescriptorSetLayoutCreateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDescriptorSetLayoutCreateInfo* from,
+    VkDescriptorSetLayoutCreateInfo* to);
+
+void deepcopy_VkWriteDescriptorSet(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkWriteDescriptorSet* from,
+    VkWriteDescriptorSet* to);
+
+void deepcopy_VkAttachmentDescription(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkAttachmentDescription* from,
+    VkAttachmentDescription* to);
+
+void deepcopy_VkAttachmentReference(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkAttachmentReference* from,
+    VkAttachmentReference* to);
+
+void deepcopy_VkFramebufferCreateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkFramebufferCreateInfo* from,
+    VkFramebufferCreateInfo* to);
+
+void deepcopy_VkSubpassDescription(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkSubpassDescription* from,
+    VkSubpassDescription* to);
+
+void deepcopy_VkSubpassDependency(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkSubpassDependency* from,
+    VkSubpassDependency* to);
+
+void deepcopy_VkRenderPassCreateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkRenderPassCreateInfo* from,
+    VkRenderPassCreateInfo* to);
+
+void deepcopy_VkCommandPoolCreateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkCommandPoolCreateInfo* from,
+    VkCommandPoolCreateInfo* to);
+
+void deepcopy_VkCommandBufferAllocateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkCommandBufferAllocateInfo* from,
+    VkCommandBufferAllocateInfo* to);
+
+void deepcopy_VkCommandBufferInheritanceInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkCommandBufferInheritanceInfo* from,
+    VkCommandBufferInheritanceInfo* to);
+
+void deepcopy_VkCommandBufferBeginInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkCommandBufferBeginInfo* from,
+    VkCommandBufferBeginInfo* to);
+
+void deepcopy_VkBufferCopy(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkBufferCopy* from,
+    VkBufferCopy* to);
+
+void deepcopy_VkImageSubresourceLayers(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkImageSubresourceLayers* from,
+    VkImageSubresourceLayers* to);
+
+void deepcopy_VkBufferImageCopy(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkBufferImageCopy* from,
+    VkBufferImageCopy* to);
+
+void deepcopy_VkClearColorValue(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkClearColorValue* from,
+    VkClearColorValue* to);
+
+void deepcopy_VkClearDepthStencilValue(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkClearDepthStencilValue* from,
+    VkClearDepthStencilValue* to);
+
+void deepcopy_VkClearValue(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkClearValue* from,
+    VkClearValue* to);
+
+void deepcopy_VkClearAttachment(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkClearAttachment* from,
+    VkClearAttachment* to);
+
+void deepcopy_VkClearRect(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkClearRect* from,
+    VkClearRect* to);
+
+void deepcopy_VkImageBlit(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkImageBlit* from,
+    VkImageBlit* to);
+
+void deepcopy_VkImageCopy(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkImageCopy* from,
+    VkImageCopy* to);
+
+void deepcopy_VkImageResolve(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkImageResolve* from,
+    VkImageResolve* to);
+
+void deepcopy_VkRenderPassBeginInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkRenderPassBeginInfo* from,
+    VkRenderPassBeginInfo* to);
 
 #endif
 #ifdef VK_VERSION_1_1
 void deepcopy_VkPhysicalDeviceSubgroupProperties(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceSubgroupProperties* from,
     VkPhysicalDeviceSubgroupProperties* to);
 
 void deepcopy_VkBindBufferMemoryInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkBindBufferMemoryInfo* from,
     VkBindBufferMemoryInfo* to);
 
 void deepcopy_VkBindImageMemoryInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkBindImageMemoryInfo* from,
     VkBindImageMemoryInfo* to);
 
 void deepcopy_VkPhysicalDevice16BitStorageFeatures(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDevice16BitStorageFeatures* from,
     VkPhysicalDevice16BitStorageFeatures* to);
 
 void deepcopy_VkMemoryDedicatedRequirements(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkMemoryDedicatedRequirements* from,
     VkMemoryDedicatedRequirements* to);
 
 void deepcopy_VkMemoryDedicatedAllocateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkMemoryDedicatedAllocateInfo* from,
     VkMemoryDedicatedAllocateInfo* to);
 
 void deepcopy_VkMemoryAllocateFlagsInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkMemoryAllocateFlagsInfo* from,
     VkMemoryAllocateFlagsInfo* to);
 
 void deepcopy_VkDeviceGroupRenderPassBeginInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDeviceGroupRenderPassBeginInfo* from,
     VkDeviceGroupRenderPassBeginInfo* to);
 
 void deepcopy_VkDeviceGroupCommandBufferBeginInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDeviceGroupCommandBufferBeginInfo* from,
     VkDeviceGroupCommandBufferBeginInfo* to);
 
 void deepcopy_VkDeviceGroupSubmitInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDeviceGroupSubmitInfo* from,
     VkDeviceGroupSubmitInfo* to);
 
 void deepcopy_VkDeviceGroupBindSparseInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDeviceGroupBindSparseInfo* from,
     VkDeviceGroupBindSparseInfo* to);
 
 void deepcopy_VkBindBufferMemoryDeviceGroupInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkBindBufferMemoryDeviceGroupInfo* from,
     VkBindBufferMemoryDeviceGroupInfo* to);
 
 void deepcopy_VkBindImageMemoryDeviceGroupInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkBindImageMemoryDeviceGroupInfo* from,
     VkBindImageMemoryDeviceGroupInfo* to);
 
 void deepcopy_VkPhysicalDeviceGroupProperties(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceGroupProperties* from,
     VkPhysicalDeviceGroupProperties* to);
 
 void deepcopy_VkDeviceGroupDeviceCreateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDeviceGroupDeviceCreateInfo* from,
     VkDeviceGroupDeviceCreateInfo* to);
 
 void deepcopy_VkBufferMemoryRequirementsInfo2(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkBufferMemoryRequirementsInfo2* from,
     VkBufferMemoryRequirementsInfo2* to);
 
 void deepcopy_VkImageMemoryRequirementsInfo2(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkImageMemoryRequirementsInfo2* from,
     VkImageMemoryRequirementsInfo2* to);
 
 void deepcopy_VkImageSparseMemoryRequirementsInfo2(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkImageSparseMemoryRequirementsInfo2* from,
     VkImageSparseMemoryRequirementsInfo2* to);
 
 void deepcopy_VkMemoryRequirements2(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkMemoryRequirements2* from,
     VkMemoryRequirements2* to);
 
 void deepcopy_VkSparseImageMemoryRequirements2(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkSparseImageMemoryRequirements2* from,
     VkSparseImageMemoryRequirements2* to);
 
 void deepcopy_VkPhysicalDeviceFeatures2(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceFeatures2* from,
     VkPhysicalDeviceFeatures2* to);
 
 void deepcopy_VkPhysicalDeviceProperties2(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceProperties2* from,
     VkPhysicalDeviceProperties2* to);
 
 void deepcopy_VkFormatProperties2(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkFormatProperties2* from,
     VkFormatProperties2* to);
 
 void deepcopy_VkImageFormatProperties2(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkImageFormatProperties2* from,
     VkImageFormatProperties2* to);
 
 void deepcopy_VkPhysicalDeviceImageFormatInfo2(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceImageFormatInfo2* from,
     VkPhysicalDeviceImageFormatInfo2* to);
 
 void deepcopy_VkQueueFamilyProperties2(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkQueueFamilyProperties2* from,
     VkQueueFamilyProperties2* to);
 
 void deepcopy_VkPhysicalDeviceMemoryProperties2(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceMemoryProperties2* from,
     VkPhysicalDeviceMemoryProperties2* to);
 
 void deepcopy_VkSparseImageFormatProperties2(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkSparseImageFormatProperties2* from,
     VkSparseImageFormatProperties2* to);
 
 void deepcopy_VkPhysicalDeviceSparseImageFormatInfo2(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceSparseImageFormatInfo2* from,
     VkPhysicalDeviceSparseImageFormatInfo2* to);
 
 void deepcopy_VkPhysicalDevicePointClippingProperties(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDevicePointClippingProperties* from,
     VkPhysicalDevicePointClippingProperties* to);
 
 void deepcopy_VkInputAttachmentAspectReference(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkInputAttachmentAspectReference* from,
     VkInputAttachmentAspectReference* to);
 
 void deepcopy_VkRenderPassInputAttachmentAspectCreateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkRenderPassInputAttachmentAspectCreateInfo* from,
     VkRenderPassInputAttachmentAspectCreateInfo* to);
 
 void deepcopy_VkImageViewUsageCreateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkImageViewUsageCreateInfo* from,
     VkImageViewUsageCreateInfo* to);
 
 void deepcopy_VkPipelineTessellationDomainOriginStateCreateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPipelineTessellationDomainOriginStateCreateInfo* from,
     VkPipelineTessellationDomainOriginStateCreateInfo* to);
 
 void deepcopy_VkRenderPassMultiviewCreateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkRenderPassMultiviewCreateInfo* from,
     VkRenderPassMultiviewCreateInfo* to);
 
 void deepcopy_VkPhysicalDeviceMultiviewFeatures(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceMultiviewFeatures* from,
     VkPhysicalDeviceMultiviewFeatures* to);
 
 void deepcopy_VkPhysicalDeviceMultiviewProperties(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceMultiviewProperties* from,
     VkPhysicalDeviceMultiviewProperties* to);
 
-void deepcopy_VkPhysicalDeviceVariablePointerFeatures(
-    Pool* pool,
-    const VkPhysicalDeviceVariablePointerFeatures* from,
-    VkPhysicalDeviceVariablePointerFeatures* to);
+void deepcopy_VkPhysicalDeviceVariablePointersFeatures(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVariablePointersFeatures* from,
+    VkPhysicalDeviceVariablePointersFeatures* to);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkPhysicalDeviceVariablePointersFeatures, deepcopy_VkPhysicalDeviceVariablePointerFeatures);
 
 void deepcopy_VkPhysicalDeviceProtectedMemoryFeatures(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceProtectedMemoryFeatures* from,
     VkPhysicalDeviceProtectedMemoryFeatures* to);
 
 void deepcopy_VkPhysicalDeviceProtectedMemoryProperties(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceProtectedMemoryProperties* from,
     VkPhysicalDeviceProtectedMemoryProperties* to);
 
 void deepcopy_VkDeviceQueueInfo2(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDeviceQueueInfo2* from,
     VkDeviceQueueInfo2* to);
 
 void deepcopy_VkProtectedSubmitInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkProtectedSubmitInfo* from,
     VkProtectedSubmitInfo* to);
 
 void deepcopy_VkSamplerYcbcrConversionCreateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkSamplerYcbcrConversionCreateInfo* from,
     VkSamplerYcbcrConversionCreateInfo* to);
 
 void deepcopy_VkSamplerYcbcrConversionInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkSamplerYcbcrConversionInfo* from,
     VkSamplerYcbcrConversionInfo* to);
 
 void deepcopy_VkBindImagePlaneMemoryInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkBindImagePlaneMemoryInfo* from,
     VkBindImagePlaneMemoryInfo* to);
 
 void deepcopy_VkImagePlaneMemoryRequirementsInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkImagePlaneMemoryRequirementsInfo* from,
     VkImagePlaneMemoryRequirementsInfo* to);
 
 void deepcopy_VkPhysicalDeviceSamplerYcbcrConversionFeatures(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceSamplerYcbcrConversionFeatures* from,
     VkPhysicalDeviceSamplerYcbcrConversionFeatures* to);
 
 void deepcopy_VkSamplerYcbcrConversionImageFormatProperties(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkSamplerYcbcrConversionImageFormatProperties* from,
     VkSamplerYcbcrConversionImageFormatProperties* to);
 
 void deepcopy_VkDescriptorUpdateTemplateEntry(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDescriptorUpdateTemplateEntry* from,
     VkDescriptorUpdateTemplateEntry* to);
 
 void deepcopy_VkDescriptorUpdateTemplateCreateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDescriptorUpdateTemplateCreateInfo* from,
     VkDescriptorUpdateTemplateCreateInfo* to);
 
 void deepcopy_VkExternalMemoryProperties(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkExternalMemoryProperties* from,
     VkExternalMemoryProperties* to);
 
 void deepcopy_VkPhysicalDeviceExternalImageFormatInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceExternalImageFormatInfo* from,
     VkPhysicalDeviceExternalImageFormatInfo* to);
 
 void deepcopy_VkExternalImageFormatProperties(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkExternalImageFormatProperties* from,
     VkExternalImageFormatProperties* to);
 
 void deepcopy_VkPhysicalDeviceExternalBufferInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceExternalBufferInfo* from,
     VkPhysicalDeviceExternalBufferInfo* to);
 
 void deepcopy_VkExternalBufferProperties(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkExternalBufferProperties* from,
     VkExternalBufferProperties* to);
 
 void deepcopy_VkPhysicalDeviceIDProperties(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceIDProperties* from,
     VkPhysicalDeviceIDProperties* to);
 
 void deepcopy_VkExternalMemoryImageCreateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkExternalMemoryImageCreateInfo* from,
     VkExternalMemoryImageCreateInfo* to);
 
 void deepcopy_VkExternalMemoryBufferCreateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkExternalMemoryBufferCreateInfo* from,
     VkExternalMemoryBufferCreateInfo* to);
 
 void deepcopy_VkExportMemoryAllocateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkExportMemoryAllocateInfo* from,
     VkExportMemoryAllocateInfo* to);
 
 void deepcopy_VkPhysicalDeviceExternalFenceInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceExternalFenceInfo* from,
     VkPhysicalDeviceExternalFenceInfo* to);
 
 void deepcopy_VkExternalFenceProperties(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkExternalFenceProperties* from,
     VkExternalFenceProperties* to);
 
 void deepcopy_VkExportFenceCreateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkExportFenceCreateInfo* from,
     VkExportFenceCreateInfo* to);
 
 void deepcopy_VkExportSemaphoreCreateInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkExportSemaphoreCreateInfo* from,
     VkExportSemaphoreCreateInfo* to);
 
 void deepcopy_VkPhysicalDeviceExternalSemaphoreInfo(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceExternalSemaphoreInfo* from,
     VkPhysicalDeviceExternalSemaphoreInfo* to);
 
 void deepcopy_VkExternalSemaphoreProperties(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkExternalSemaphoreProperties* from,
     VkExternalSemaphoreProperties* to);
 
 void deepcopy_VkPhysicalDeviceMaintenance3Properties(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceMaintenance3Properties* from,
     VkPhysicalDeviceMaintenance3Properties* to);
 
 void deepcopy_VkDescriptorSetLayoutSupport(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDescriptorSetLayoutSupport* from,
     VkDescriptorSetLayoutSupport* to);
 
-void deepcopy_VkPhysicalDeviceShaderDrawParameterFeatures(
-    Pool* pool,
-    const VkPhysicalDeviceShaderDrawParameterFeatures* from,
-    VkPhysicalDeviceShaderDrawParameterFeatures* to);
+void deepcopy_VkPhysicalDeviceShaderDrawParametersFeatures(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderDrawParametersFeatures* from,
+    VkPhysicalDeviceShaderDrawParametersFeatures* to);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkPhysicalDeviceShaderDrawParametersFeatures, deepcopy_VkPhysicalDeviceShaderDrawParameterFeatures);
+
+#endif
+#ifdef VK_VERSION_1_2
+void deepcopy_VkPhysicalDeviceVulkan11Features(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVulkan11Features* from,
+    VkPhysicalDeviceVulkan11Features* to);
+
+void deepcopy_VkPhysicalDeviceVulkan11Properties(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVulkan11Properties* from,
+    VkPhysicalDeviceVulkan11Properties* to);
+
+void deepcopy_VkPhysicalDeviceVulkan12Features(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVulkan12Features* from,
+    VkPhysicalDeviceVulkan12Features* to);
+
+void deepcopy_VkConformanceVersion(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkConformanceVersion* from,
+    VkConformanceVersion* to);
+
+void deepcopy_VkPhysicalDeviceVulkan12Properties(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVulkan12Properties* from,
+    VkPhysicalDeviceVulkan12Properties* to);
+
+void deepcopy_VkImageFormatListCreateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkImageFormatListCreateInfo* from,
+    VkImageFormatListCreateInfo* to);
+
+void deepcopy_VkAttachmentDescription2(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkAttachmentDescription2* from,
+    VkAttachmentDescription2* to);
+
+void deepcopy_VkAttachmentReference2(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkAttachmentReference2* from,
+    VkAttachmentReference2* to);
+
+void deepcopy_VkSubpassDescription2(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkSubpassDescription2* from,
+    VkSubpassDescription2* to);
+
+void deepcopy_VkSubpassDependency2(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkSubpassDependency2* from,
+    VkSubpassDependency2* to);
+
+void deepcopy_VkRenderPassCreateInfo2(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkRenderPassCreateInfo2* from,
+    VkRenderPassCreateInfo2* to);
+
+void deepcopy_VkSubpassBeginInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkSubpassBeginInfo* from,
+    VkSubpassBeginInfo* to);
+
+void deepcopy_VkSubpassEndInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkSubpassEndInfo* from,
+    VkSubpassEndInfo* to);
+
+void deepcopy_VkPhysicalDevice8BitStorageFeatures(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDevice8BitStorageFeatures* from,
+    VkPhysicalDevice8BitStorageFeatures* to);
+
+void deepcopy_VkPhysicalDeviceDriverProperties(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDriverProperties* from,
+    VkPhysicalDeviceDriverProperties* to);
+
+void deepcopy_VkPhysicalDeviceShaderAtomicInt64Features(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderAtomicInt64Features* from,
+    VkPhysicalDeviceShaderAtomicInt64Features* to);
+
+void deepcopy_VkPhysicalDeviceShaderFloat16Int8Features(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderFloat16Int8Features* from,
+    VkPhysicalDeviceShaderFloat16Int8Features* to);
+
+void deepcopy_VkPhysicalDeviceFloatControlsProperties(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFloatControlsProperties* from,
+    VkPhysicalDeviceFloatControlsProperties* to);
+
+void deepcopy_VkDescriptorSetLayoutBindingFlagsCreateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDescriptorSetLayoutBindingFlagsCreateInfo* from,
+    VkDescriptorSetLayoutBindingFlagsCreateInfo* to);
+
+void deepcopy_VkPhysicalDeviceDescriptorIndexingFeatures(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDescriptorIndexingFeatures* from,
+    VkPhysicalDeviceDescriptorIndexingFeatures* to);
+
+void deepcopy_VkPhysicalDeviceDescriptorIndexingProperties(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDescriptorIndexingProperties* from,
+    VkPhysicalDeviceDescriptorIndexingProperties* to);
+
+void deepcopy_VkDescriptorSetVariableDescriptorCountAllocateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDescriptorSetVariableDescriptorCountAllocateInfo* from,
+    VkDescriptorSetVariableDescriptorCountAllocateInfo* to);
+
+void deepcopy_VkDescriptorSetVariableDescriptorCountLayoutSupport(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDescriptorSetVariableDescriptorCountLayoutSupport* from,
+    VkDescriptorSetVariableDescriptorCountLayoutSupport* to);
+
+void deepcopy_VkSubpassDescriptionDepthStencilResolve(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkSubpassDescriptionDepthStencilResolve* from,
+    VkSubpassDescriptionDepthStencilResolve* to);
+
+void deepcopy_VkPhysicalDeviceDepthStencilResolveProperties(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDepthStencilResolveProperties* from,
+    VkPhysicalDeviceDepthStencilResolveProperties* to);
+
+void deepcopy_VkPhysicalDeviceScalarBlockLayoutFeatures(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceScalarBlockLayoutFeatures* from,
+    VkPhysicalDeviceScalarBlockLayoutFeatures* to);
+
+void deepcopy_VkImageStencilUsageCreateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkImageStencilUsageCreateInfo* from,
+    VkImageStencilUsageCreateInfo* to);
+
+void deepcopy_VkSamplerReductionModeCreateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkSamplerReductionModeCreateInfo* from,
+    VkSamplerReductionModeCreateInfo* to);
+
+void deepcopy_VkPhysicalDeviceSamplerFilterMinmaxProperties(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSamplerFilterMinmaxProperties* from,
+    VkPhysicalDeviceSamplerFilterMinmaxProperties* to);
+
+void deepcopy_VkPhysicalDeviceVulkanMemoryModelFeatures(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVulkanMemoryModelFeatures* from,
+    VkPhysicalDeviceVulkanMemoryModelFeatures* to);
+
+void deepcopy_VkPhysicalDeviceImagelessFramebufferFeatures(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceImagelessFramebufferFeatures* from,
+    VkPhysicalDeviceImagelessFramebufferFeatures* to);
+
+void deepcopy_VkFramebufferAttachmentImageInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkFramebufferAttachmentImageInfo* from,
+    VkFramebufferAttachmentImageInfo* to);
+
+void deepcopy_VkFramebufferAttachmentsCreateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkFramebufferAttachmentsCreateInfo* from,
+    VkFramebufferAttachmentsCreateInfo* to);
+
+void deepcopy_VkRenderPassAttachmentBeginInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkRenderPassAttachmentBeginInfo* from,
+    VkRenderPassAttachmentBeginInfo* to);
+
+void deepcopy_VkPhysicalDeviceUniformBufferStandardLayoutFeatures(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceUniformBufferStandardLayoutFeatures* from,
+    VkPhysicalDeviceUniformBufferStandardLayoutFeatures* to);
+
+void deepcopy_VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures* from,
+    VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures* to);
+
+void deepcopy_VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures* from,
+    VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures* to);
+
+void deepcopy_VkAttachmentReferenceStencilLayout(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkAttachmentReferenceStencilLayout* from,
+    VkAttachmentReferenceStencilLayout* to);
+
+void deepcopy_VkAttachmentDescriptionStencilLayout(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkAttachmentDescriptionStencilLayout* from,
+    VkAttachmentDescriptionStencilLayout* to);
+
+void deepcopy_VkPhysicalDeviceHostQueryResetFeatures(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceHostQueryResetFeatures* from,
+    VkPhysicalDeviceHostQueryResetFeatures* to);
+
+void deepcopy_VkPhysicalDeviceTimelineSemaphoreFeatures(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTimelineSemaphoreFeatures* from,
+    VkPhysicalDeviceTimelineSemaphoreFeatures* to);
+
+void deepcopy_VkPhysicalDeviceTimelineSemaphoreProperties(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTimelineSemaphoreProperties* from,
+    VkPhysicalDeviceTimelineSemaphoreProperties* to);
+
+void deepcopy_VkSemaphoreTypeCreateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkSemaphoreTypeCreateInfo* from,
+    VkSemaphoreTypeCreateInfo* to);
+
+void deepcopy_VkTimelineSemaphoreSubmitInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkTimelineSemaphoreSubmitInfo* from,
+    VkTimelineSemaphoreSubmitInfo* to);
+
+void deepcopy_VkSemaphoreWaitInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkSemaphoreWaitInfo* from,
+    VkSemaphoreWaitInfo* to);
+
+void deepcopy_VkSemaphoreSignalInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkSemaphoreSignalInfo* from,
+    VkSemaphoreSignalInfo* to);
+
+void deepcopy_VkPhysicalDeviceBufferDeviceAddressFeatures(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceBufferDeviceAddressFeatures* from,
+    VkPhysicalDeviceBufferDeviceAddressFeatures* to);
+
+void deepcopy_VkBufferDeviceAddressInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkBufferDeviceAddressInfo* from,
+    VkBufferDeviceAddressInfo* to);
+
+void deepcopy_VkBufferOpaqueCaptureAddressCreateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkBufferOpaqueCaptureAddressCreateInfo* from,
+    VkBufferOpaqueCaptureAddressCreateInfo* to);
+
+void deepcopy_VkMemoryOpaqueCaptureAddressAllocateInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkMemoryOpaqueCaptureAddressAllocateInfo* from,
+    VkMemoryOpaqueCaptureAddressAllocateInfo* to);
+
+void deepcopy_VkDeviceMemoryOpaqueCaptureAddressInfo(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDeviceMemoryOpaqueCaptureAddressInfo* from,
+    VkDeviceMemoryOpaqueCaptureAddressInfo* to);
 
 #endif
 #ifdef VK_KHR_surface
 void deepcopy_VkSurfaceCapabilitiesKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkSurfaceCapabilitiesKHR* from,
     VkSurfaceCapabilitiesKHR* to);
 
 void deepcopy_VkSurfaceFormatKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkSurfaceFormatKHR* from,
     VkSurfaceFormatKHR* to);
 
 #endif
 #ifdef VK_KHR_swapchain
 void deepcopy_VkSwapchainCreateInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkSwapchainCreateInfoKHR* from,
     VkSwapchainCreateInfoKHR* to);
 
 void deepcopy_VkPresentInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPresentInfoKHR* from,
     VkPresentInfoKHR* to);
 
 void deepcopy_VkImageSwapchainCreateInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkImageSwapchainCreateInfoKHR* from,
     VkImageSwapchainCreateInfoKHR* to);
 
 void deepcopy_VkBindImageMemorySwapchainInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkBindImageMemorySwapchainInfoKHR* from,
     VkBindImageMemorySwapchainInfoKHR* to);
 
 void deepcopy_VkAcquireNextImageInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkAcquireNextImageInfoKHR* from,
     VkAcquireNextImageInfoKHR* to);
 
 void deepcopy_VkDeviceGroupPresentCapabilitiesKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDeviceGroupPresentCapabilitiesKHR* from,
     VkDeviceGroupPresentCapabilitiesKHR* to);
 
 void deepcopy_VkDeviceGroupPresentInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDeviceGroupPresentInfoKHR* from,
     VkDeviceGroupPresentInfoKHR* to);
 
 void deepcopy_VkDeviceGroupSwapchainCreateInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDeviceGroupSwapchainCreateInfoKHR* from,
     VkDeviceGroupSwapchainCreateInfoKHR* to);
 
 #endif
 #ifdef VK_KHR_display
-void deepcopy_VkDisplayPropertiesKHR(
-    Pool* pool,
-    const VkDisplayPropertiesKHR* from,
-    VkDisplayPropertiesKHR* to);
-
 void deepcopy_VkDisplayModeParametersKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDisplayModeParametersKHR* from,
     VkDisplayModeParametersKHR* to);
 
-void deepcopy_VkDisplayModePropertiesKHR(
-    Pool* pool,
-    const VkDisplayModePropertiesKHR* from,
-    VkDisplayModePropertiesKHR* to);
-
 void deepcopy_VkDisplayModeCreateInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDisplayModeCreateInfoKHR* from,
     VkDisplayModeCreateInfoKHR* to);
 
+void deepcopy_VkDisplayModePropertiesKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDisplayModePropertiesKHR* from,
+    VkDisplayModePropertiesKHR* to);
+
 void deepcopy_VkDisplayPlaneCapabilitiesKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDisplayPlaneCapabilitiesKHR* from,
     VkDisplayPlaneCapabilitiesKHR* to);
 
 void deepcopy_VkDisplayPlanePropertiesKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDisplayPlanePropertiesKHR* from,
     VkDisplayPlanePropertiesKHR* to);
 
+void deepcopy_VkDisplayPropertiesKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDisplayPropertiesKHR* from,
+    VkDisplayPropertiesKHR* to);
+
 void deepcopy_VkDisplaySurfaceCreateInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDisplaySurfaceCreateInfoKHR* from,
     VkDisplaySurfaceCreateInfoKHR* to);
 
 #endif
 #ifdef VK_KHR_display_swapchain
 void deepcopy_VkDisplayPresentInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDisplayPresentInfoKHR* from,
     VkDisplayPresentInfoKHR* to);
 
 #endif
 #ifdef VK_KHR_xlib_surface
 void deepcopy_VkXlibSurfaceCreateInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkXlibSurfaceCreateInfoKHR* from,
     VkXlibSurfaceCreateInfoKHR* to);
 
 #endif
 #ifdef VK_KHR_xcb_surface
 void deepcopy_VkXcbSurfaceCreateInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkXcbSurfaceCreateInfoKHR* from,
     VkXcbSurfaceCreateInfoKHR* to);
 
 #endif
 #ifdef VK_KHR_wayland_surface
 void deepcopy_VkWaylandSurfaceCreateInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkWaylandSurfaceCreateInfoKHR* from,
     VkWaylandSurfaceCreateInfoKHR* to);
 
 #endif
-#ifdef VK_KHR_mir_surface
-void deepcopy_VkMirSurfaceCreateInfoKHR(
-    Pool* pool,
-    const VkMirSurfaceCreateInfoKHR* from,
-    VkMirSurfaceCreateInfoKHR* to);
-
-#endif
 #ifdef VK_KHR_android_surface
 void deepcopy_VkAndroidSurfaceCreateInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkAndroidSurfaceCreateInfoKHR* from,
     VkAndroidSurfaceCreateInfoKHR* to);
 
 #endif
 #ifdef VK_KHR_win32_surface
 void deepcopy_VkWin32SurfaceCreateInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkWin32SurfaceCreateInfoKHR* from,
     VkWin32SurfaceCreateInfoKHR* to);
 
@@ -1074,298 +1580,761 @@
 #ifdef VK_KHR_sampler_mirror_clamp_to_edge
 #endif
 #ifdef VK_KHR_multiview
+DEFINE_ALIAS_FUNCTION(deepcopy_VkRenderPassMultiviewCreateInfo, deepcopy_VkRenderPassMultiviewCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkPhysicalDeviceMultiviewFeatures, deepcopy_VkPhysicalDeviceMultiviewFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkPhysicalDeviceMultiviewProperties, deepcopy_VkPhysicalDeviceMultiviewPropertiesKHR);
+
 #endif
 #ifdef VK_KHR_get_physical_device_properties2
+DEFINE_ALIAS_FUNCTION(deepcopy_VkPhysicalDeviceFeatures2, deepcopy_VkPhysicalDeviceFeatures2KHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkPhysicalDeviceProperties2, deepcopy_VkPhysicalDeviceProperties2KHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkFormatProperties2, deepcopy_VkFormatProperties2KHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkImageFormatProperties2, deepcopy_VkImageFormatProperties2KHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkPhysicalDeviceImageFormatInfo2, deepcopy_VkPhysicalDeviceImageFormatInfo2KHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkQueueFamilyProperties2, deepcopy_VkQueueFamilyProperties2KHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkPhysicalDeviceMemoryProperties2, deepcopy_VkPhysicalDeviceMemoryProperties2KHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkSparseImageFormatProperties2, deepcopy_VkSparseImageFormatProperties2KHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkPhysicalDeviceSparseImageFormatInfo2, deepcopy_VkPhysicalDeviceSparseImageFormatInfo2KHR);
+
 #endif
 #ifdef VK_KHR_device_group
+DEFINE_ALIAS_FUNCTION(deepcopy_VkMemoryAllocateFlagsInfo, deepcopy_VkMemoryAllocateFlagsInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkDeviceGroupRenderPassBeginInfo, deepcopy_VkDeviceGroupRenderPassBeginInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkDeviceGroupCommandBufferBeginInfo, deepcopy_VkDeviceGroupCommandBufferBeginInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkDeviceGroupSubmitInfo, deepcopy_VkDeviceGroupSubmitInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkDeviceGroupBindSparseInfo, deepcopy_VkDeviceGroupBindSparseInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkBindBufferMemoryDeviceGroupInfo, deepcopy_VkBindBufferMemoryDeviceGroupInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkBindImageMemoryDeviceGroupInfo, deepcopy_VkBindImageMemoryDeviceGroupInfoKHR);
+
 #endif
 #ifdef VK_KHR_shader_draw_parameters
 #endif
 #ifdef VK_KHR_maintenance1
 #endif
 #ifdef VK_KHR_device_group_creation
+DEFINE_ALIAS_FUNCTION(deepcopy_VkPhysicalDeviceGroupProperties, deepcopy_VkPhysicalDeviceGroupPropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkDeviceGroupDeviceCreateInfo, deepcopy_VkDeviceGroupDeviceCreateInfoKHR);
+
 #endif
 #ifdef VK_KHR_external_memory_capabilities
+DEFINE_ALIAS_FUNCTION(deepcopy_VkExternalMemoryProperties, deepcopy_VkExternalMemoryPropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkPhysicalDeviceExternalImageFormatInfo, deepcopy_VkPhysicalDeviceExternalImageFormatInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkExternalImageFormatProperties, deepcopy_VkExternalImageFormatPropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkPhysicalDeviceExternalBufferInfo, deepcopy_VkPhysicalDeviceExternalBufferInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkExternalBufferProperties, deepcopy_VkExternalBufferPropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkPhysicalDeviceIDProperties, deepcopy_VkPhysicalDeviceIDPropertiesKHR);
+
 #endif
 #ifdef VK_KHR_external_memory
+DEFINE_ALIAS_FUNCTION(deepcopy_VkExternalMemoryImageCreateInfo, deepcopy_VkExternalMemoryImageCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkExternalMemoryBufferCreateInfo, deepcopy_VkExternalMemoryBufferCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkExportMemoryAllocateInfo, deepcopy_VkExportMemoryAllocateInfoKHR);
+
 #endif
 #ifdef VK_KHR_external_memory_win32
 void deepcopy_VkImportMemoryWin32HandleInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkImportMemoryWin32HandleInfoKHR* from,
     VkImportMemoryWin32HandleInfoKHR* to);
 
 void deepcopy_VkExportMemoryWin32HandleInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkExportMemoryWin32HandleInfoKHR* from,
     VkExportMemoryWin32HandleInfoKHR* to);
 
 void deepcopy_VkMemoryWin32HandlePropertiesKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkMemoryWin32HandlePropertiesKHR* from,
     VkMemoryWin32HandlePropertiesKHR* to);
 
 void deepcopy_VkMemoryGetWin32HandleInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkMemoryGetWin32HandleInfoKHR* from,
     VkMemoryGetWin32HandleInfoKHR* to);
 
 #endif
 #ifdef VK_KHR_external_memory_fd
 void deepcopy_VkImportMemoryFdInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkImportMemoryFdInfoKHR* from,
     VkImportMemoryFdInfoKHR* to);
 
 void deepcopy_VkMemoryFdPropertiesKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkMemoryFdPropertiesKHR* from,
     VkMemoryFdPropertiesKHR* to);
 
 void deepcopy_VkMemoryGetFdInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkMemoryGetFdInfoKHR* from,
     VkMemoryGetFdInfoKHR* to);
 
 #endif
 #ifdef VK_KHR_win32_keyed_mutex
 void deepcopy_VkWin32KeyedMutexAcquireReleaseInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkWin32KeyedMutexAcquireReleaseInfoKHR* from,
     VkWin32KeyedMutexAcquireReleaseInfoKHR* to);
 
 #endif
 #ifdef VK_KHR_external_semaphore_capabilities
+DEFINE_ALIAS_FUNCTION(deepcopy_VkPhysicalDeviceExternalSemaphoreInfo, deepcopy_VkPhysicalDeviceExternalSemaphoreInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkExternalSemaphoreProperties, deepcopy_VkExternalSemaphorePropertiesKHR);
+
 #endif
 #ifdef VK_KHR_external_semaphore
+DEFINE_ALIAS_FUNCTION(deepcopy_VkExportSemaphoreCreateInfo, deepcopy_VkExportSemaphoreCreateInfoKHR);
+
 #endif
 #ifdef VK_KHR_external_semaphore_win32
 void deepcopy_VkImportSemaphoreWin32HandleInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkImportSemaphoreWin32HandleInfoKHR* from,
     VkImportSemaphoreWin32HandleInfoKHR* to);
 
 void deepcopy_VkExportSemaphoreWin32HandleInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkExportSemaphoreWin32HandleInfoKHR* from,
     VkExportSemaphoreWin32HandleInfoKHR* to);
 
 void deepcopy_VkD3D12FenceSubmitInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkD3D12FenceSubmitInfoKHR* from,
     VkD3D12FenceSubmitInfoKHR* to);
 
 void deepcopy_VkSemaphoreGetWin32HandleInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkSemaphoreGetWin32HandleInfoKHR* from,
     VkSemaphoreGetWin32HandleInfoKHR* to);
 
 #endif
 #ifdef VK_KHR_external_semaphore_fd
 void deepcopy_VkImportSemaphoreFdInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkImportSemaphoreFdInfoKHR* from,
     VkImportSemaphoreFdInfoKHR* to);
 
 void deepcopy_VkSemaphoreGetFdInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkSemaphoreGetFdInfoKHR* from,
     VkSemaphoreGetFdInfoKHR* to);
 
 #endif
 #ifdef VK_KHR_push_descriptor
 void deepcopy_VkPhysicalDevicePushDescriptorPropertiesKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDevicePushDescriptorPropertiesKHR* from,
     VkPhysicalDevicePushDescriptorPropertiesKHR* to);
 
 #endif
+#ifdef VK_KHR_shader_float16_int8
+DEFINE_ALIAS_FUNCTION(deepcopy_VkPhysicalDeviceShaderFloat16Int8Features, deepcopy_VkPhysicalDeviceShaderFloat16Int8FeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkPhysicalDeviceShaderFloat16Int8Features, deepcopy_VkPhysicalDeviceFloat16Int8FeaturesKHR);
+
+#endif
 #ifdef VK_KHR_16bit_storage
+DEFINE_ALIAS_FUNCTION(deepcopy_VkPhysicalDevice16BitStorageFeatures, deepcopy_VkPhysicalDevice16BitStorageFeaturesKHR);
+
 #endif
 #ifdef VK_KHR_incremental_present
 void deepcopy_VkRectLayerKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkRectLayerKHR* from,
     VkRectLayerKHR* to);
 
 void deepcopy_VkPresentRegionKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPresentRegionKHR* from,
     VkPresentRegionKHR* to);
 
 void deepcopy_VkPresentRegionsKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPresentRegionsKHR* from,
     VkPresentRegionsKHR* to);
 
 #endif
 #ifdef VK_KHR_descriptor_update_template
+DEFINE_ALIAS_FUNCTION(deepcopy_VkDescriptorUpdateTemplateEntry, deepcopy_VkDescriptorUpdateTemplateEntryKHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkDescriptorUpdateTemplateCreateInfo, deepcopy_VkDescriptorUpdateTemplateCreateInfoKHR);
+
+#endif
+#ifdef VK_KHR_imageless_framebuffer
+DEFINE_ALIAS_FUNCTION(deepcopy_VkPhysicalDeviceImagelessFramebufferFeatures, deepcopy_VkPhysicalDeviceImagelessFramebufferFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkFramebufferAttachmentsCreateInfo, deepcopy_VkFramebufferAttachmentsCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkFramebufferAttachmentImageInfo, deepcopy_VkFramebufferAttachmentImageInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkRenderPassAttachmentBeginInfo, deepcopy_VkRenderPassAttachmentBeginInfoKHR);
+
 #endif
 #ifdef VK_KHR_create_renderpass2
-void deepcopy_VkAttachmentDescription2KHR(
-    Pool* pool,
-    const VkAttachmentDescription2KHR* from,
-    VkAttachmentDescription2KHR* to);
+DEFINE_ALIAS_FUNCTION(deepcopy_VkRenderPassCreateInfo2, deepcopy_VkRenderPassCreateInfo2KHR);
 
-void deepcopy_VkAttachmentReference2KHR(
-    Pool* pool,
-    const VkAttachmentReference2KHR* from,
-    VkAttachmentReference2KHR* to);
+DEFINE_ALIAS_FUNCTION(deepcopy_VkAttachmentDescription2, deepcopy_VkAttachmentDescription2KHR);
 
-void deepcopy_VkSubpassDescription2KHR(
-    Pool* pool,
-    const VkSubpassDescription2KHR* from,
-    VkSubpassDescription2KHR* to);
+DEFINE_ALIAS_FUNCTION(deepcopy_VkAttachmentReference2, deepcopy_VkAttachmentReference2KHR);
 
-void deepcopy_VkSubpassDependency2KHR(
-    Pool* pool,
-    const VkSubpassDependency2KHR* from,
-    VkSubpassDependency2KHR* to);
+DEFINE_ALIAS_FUNCTION(deepcopy_VkSubpassDescription2, deepcopy_VkSubpassDescription2KHR);
 
-void deepcopy_VkRenderPassCreateInfo2KHR(
-    Pool* pool,
-    const VkRenderPassCreateInfo2KHR* from,
-    VkRenderPassCreateInfo2KHR* to);
+DEFINE_ALIAS_FUNCTION(deepcopy_VkSubpassDependency2, deepcopy_VkSubpassDependency2KHR);
 
-void deepcopy_VkSubpassBeginInfoKHR(
-    Pool* pool,
-    const VkSubpassBeginInfoKHR* from,
-    VkSubpassBeginInfoKHR* to);
+DEFINE_ALIAS_FUNCTION(deepcopy_VkSubpassBeginInfo, deepcopy_VkSubpassBeginInfoKHR);
 
-void deepcopy_VkSubpassEndInfoKHR(
-    Pool* pool,
-    const VkSubpassEndInfoKHR* from,
-    VkSubpassEndInfoKHR* to);
+DEFINE_ALIAS_FUNCTION(deepcopy_VkSubpassEndInfo, deepcopy_VkSubpassEndInfoKHR);
 
 #endif
 #ifdef VK_KHR_shared_presentable_image
 void deepcopy_VkSharedPresentSurfaceCapabilitiesKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkSharedPresentSurfaceCapabilitiesKHR* from,
     VkSharedPresentSurfaceCapabilitiesKHR* to);
 
 #endif
 #ifdef VK_KHR_external_fence_capabilities
+DEFINE_ALIAS_FUNCTION(deepcopy_VkPhysicalDeviceExternalFenceInfo, deepcopy_VkPhysicalDeviceExternalFenceInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkExternalFenceProperties, deepcopy_VkExternalFencePropertiesKHR);
+
 #endif
 #ifdef VK_KHR_external_fence
+DEFINE_ALIAS_FUNCTION(deepcopy_VkExportFenceCreateInfo, deepcopy_VkExportFenceCreateInfoKHR);
+
 #endif
 #ifdef VK_KHR_external_fence_win32
 void deepcopy_VkImportFenceWin32HandleInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkImportFenceWin32HandleInfoKHR* from,
     VkImportFenceWin32HandleInfoKHR* to);
 
 void deepcopy_VkExportFenceWin32HandleInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkExportFenceWin32HandleInfoKHR* from,
     VkExportFenceWin32HandleInfoKHR* to);
 
 void deepcopy_VkFenceGetWin32HandleInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkFenceGetWin32HandleInfoKHR* from,
     VkFenceGetWin32HandleInfoKHR* to);
 
 #endif
 #ifdef VK_KHR_external_fence_fd
 void deepcopy_VkImportFenceFdInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkImportFenceFdInfoKHR* from,
     VkImportFenceFdInfoKHR* to);
 
 void deepcopy_VkFenceGetFdInfoKHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkFenceGetFdInfoKHR* from,
     VkFenceGetFdInfoKHR* to);
 
 #endif
+#ifdef VK_KHR_performance_query
+void deepcopy_VkPhysicalDevicePerformanceQueryFeaturesKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDevicePerformanceQueryFeaturesKHR* from,
+    VkPhysicalDevicePerformanceQueryFeaturesKHR* to);
+
+void deepcopy_VkPhysicalDevicePerformanceQueryPropertiesKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDevicePerformanceQueryPropertiesKHR* from,
+    VkPhysicalDevicePerformanceQueryPropertiesKHR* to);
+
+void deepcopy_VkPerformanceCounterKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPerformanceCounterKHR* from,
+    VkPerformanceCounterKHR* to);
+
+void deepcopy_VkPerformanceCounterDescriptionKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPerformanceCounterDescriptionKHR* from,
+    VkPerformanceCounterDescriptionKHR* to);
+
+void deepcopy_VkQueryPoolPerformanceCreateInfoKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkQueryPoolPerformanceCreateInfoKHR* from,
+    VkQueryPoolPerformanceCreateInfoKHR* to);
+
+void deepcopy_VkPerformanceCounterResultKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPerformanceCounterResultKHR* from,
+    VkPerformanceCounterResultKHR* to);
+
+void deepcopy_VkAcquireProfilingLockInfoKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkAcquireProfilingLockInfoKHR* from,
+    VkAcquireProfilingLockInfoKHR* to);
+
+void deepcopy_VkPerformanceQuerySubmitInfoKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPerformanceQuerySubmitInfoKHR* from,
+    VkPerformanceQuerySubmitInfoKHR* to);
+
+#endif
 #ifdef VK_KHR_maintenance2
+DEFINE_ALIAS_FUNCTION(deepcopy_VkPhysicalDevicePointClippingProperties, deepcopy_VkPhysicalDevicePointClippingPropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkRenderPassInputAttachmentAspectCreateInfo, deepcopy_VkRenderPassInputAttachmentAspectCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkInputAttachmentAspectReference, deepcopy_VkInputAttachmentAspectReferenceKHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkImageViewUsageCreateInfo, deepcopy_VkImageViewUsageCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkPipelineTessellationDomainOriginStateCreateInfo, deepcopy_VkPipelineTessellationDomainOriginStateCreateInfoKHR);
+
 #endif
 #ifdef VK_KHR_get_surface_capabilities2
 void deepcopy_VkPhysicalDeviceSurfaceInfo2KHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceSurfaceInfo2KHR* from,
     VkPhysicalDeviceSurfaceInfo2KHR* to);
 
 void deepcopy_VkSurfaceCapabilities2KHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkSurfaceCapabilities2KHR* from,
     VkSurfaceCapabilities2KHR* to);
 
 void deepcopy_VkSurfaceFormat2KHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkSurfaceFormat2KHR* from,
     VkSurfaceFormat2KHR* to);
 
 #endif
 #ifdef VK_KHR_variable_pointers
+DEFINE_ALIAS_FUNCTION(deepcopy_VkPhysicalDeviceVariablePointersFeatures, deepcopy_VkPhysicalDeviceVariablePointerFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkPhysicalDeviceVariablePointersFeatures, deepcopy_VkPhysicalDeviceVariablePointersFeaturesKHR);
+
 #endif
 #ifdef VK_KHR_get_display_properties2
 void deepcopy_VkDisplayProperties2KHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDisplayProperties2KHR* from,
     VkDisplayProperties2KHR* to);
 
 void deepcopy_VkDisplayPlaneProperties2KHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDisplayPlaneProperties2KHR* from,
     VkDisplayPlaneProperties2KHR* to);
 
 void deepcopy_VkDisplayModeProperties2KHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDisplayModeProperties2KHR* from,
     VkDisplayModeProperties2KHR* to);
 
 void deepcopy_VkDisplayPlaneInfo2KHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDisplayPlaneInfo2KHR* from,
     VkDisplayPlaneInfo2KHR* to);
 
 void deepcopy_VkDisplayPlaneCapabilities2KHR(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDisplayPlaneCapabilities2KHR* from,
     VkDisplayPlaneCapabilities2KHR* to);
 
 #endif
 #ifdef VK_KHR_dedicated_allocation
+DEFINE_ALIAS_FUNCTION(deepcopy_VkMemoryDedicatedRequirements, deepcopy_VkMemoryDedicatedRequirementsKHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkMemoryDedicatedAllocateInfo, deepcopy_VkMemoryDedicatedAllocateInfoKHR);
+
 #endif
 #ifdef VK_KHR_storage_buffer_storage_class
 #endif
 #ifdef VK_KHR_relaxed_block_layout
 #endif
 #ifdef VK_KHR_get_memory_requirements2
+DEFINE_ALIAS_FUNCTION(deepcopy_VkBufferMemoryRequirementsInfo2, deepcopy_VkBufferMemoryRequirementsInfo2KHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkImageMemoryRequirementsInfo2, deepcopy_VkImageMemoryRequirementsInfo2KHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkImageSparseMemoryRequirementsInfo2, deepcopy_VkImageSparseMemoryRequirementsInfo2KHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkMemoryRequirements2, deepcopy_VkMemoryRequirements2KHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkSparseImageMemoryRequirements2, deepcopy_VkSparseImageMemoryRequirements2KHR);
+
 #endif
 #ifdef VK_KHR_image_format_list
-void deepcopy_VkImageFormatListCreateInfoKHR(
-    Pool* pool,
-    const VkImageFormatListCreateInfoKHR* from,
-    VkImageFormatListCreateInfoKHR* to);
+DEFINE_ALIAS_FUNCTION(deepcopy_VkImageFormatListCreateInfo, deepcopy_VkImageFormatListCreateInfoKHR);
 
 #endif
 #ifdef VK_KHR_sampler_ycbcr_conversion
+DEFINE_ALIAS_FUNCTION(deepcopy_VkSamplerYcbcrConversionCreateInfo, deepcopy_VkSamplerYcbcrConversionCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkSamplerYcbcrConversionInfo, deepcopy_VkSamplerYcbcrConversionInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkBindImagePlaneMemoryInfo, deepcopy_VkBindImagePlaneMemoryInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkImagePlaneMemoryRequirementsInfo, deepcopy_VkImagePlaneMemoryRequirementsInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkPhysicalDeviceSamplerYcbcrConversionFeatures, deepcopy_VkPhysicalDeviceSamplerYcbcrConversionFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkSamplerYcbcrConversionImageFormatProperties, deepcopy_VkSamplerYcbcrConversionImageFormatPropertiesKHR);
+
 #endif
 #ifdef VK_KHR_bind_memory2
+DEFINE_ALIAS_FUNCTION(deepcopy_VkBindBufferMemoryInfo, deepcopy_VkBindBufferMemoryInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkBindImageMemoryInfo, deepcopy_VkBindImageMemoryInfoKHR);
+
+#endif
+#ifdef VK_KHR_portability_subset
+void deepcopy_VkPhysicalDevicePortabilitySubsetFeaturesKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDevicePortabilitySubsetFeaturesKHR* from,
+    VkPhysicalDevicePortabilitySubsetFeaturesKHR* to);
+
+void deepcopy_VkPhysicalDevicePortabilitySubsetPropertiesKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDevicePortabilitySubsetPropertiesKHR* from,
+    VkPhysicalDevicePortabilitySubsetPropertiesKHR* to);
+
 #endif
 #ifdef VK_KHR_maintenance3
+DEFINE_ALIAS_FUNCTION(deepcopy_VkPhysicalDeviceMaintenance3Properties, deepcopy_VkPhysicalDeviceMaintenance3PropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkDescriptorSetLayoutSupport, deepcopy_VkDescriptorSetLayoutSupportKHR);
+
 #endif
 #ifdef VK_KHR_draw_indirect_count
 #endif
+#ifdef VK_KHR_shader_subgroup_extended_types
+DEFINE_ALIAS_FUNCTION(deepcopy_VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures, deepcopy_VkPhysicalDeviceShaderSubgroupExtendedTypesFeaturesKHR);
+
+#endif
 #ifdef VK_KHR_8bit_storage
-void deepcopy_VkPhysicalDevice8BitStorageFeaturesKHR(
-    Pool* pool,
-    const VkPhysicalDevice8BitStorageFeaturesKHR* from,
-    VkPhysicalDevice8BitStorageFeaturesKHR* to);
+DEFINE_ALIAS_FUNCTION(deepcopy_VkPhysicalDevice8BitStorageFeatures, deepcopy_VkPhysicalDevice8BitStorageFeaturesKHR);
+
+#endif
+#ifdef VK_KHR_shader_atomic_int64
+DEFINE_ALIAS_FUNCTION(deepcopy_VkPhysicalDeviceShaderAtomicInt64Features, deepcopy_VkPhysicalDeviceShaderAtomicInt64FeaturesKHR);
+
+#endif
+#ifdef VK_KHR_shader_clock
+void deepcopy_VkPhysicalDeviceShaderClockFeaturesKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderClockFeaturesKHR* from,
+    VkPhysicalDeviceShaderClockFeaturesKHR* to);
+
+#endif
+#ifdef VK_KHR_driver_properties
+DEFINE_ALIAS_FUNCTION(deepcopy_VkConformanceVersion, deepcopy_VkConformanceVersionKHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkPhysicalDeviceDriverProperties, deepcopy_VkPhysicalDeviceDriverPropertiesKHR);
+
+#endif
+#ifdef VK_KHR_shader_float_controls
+DEFINE_ALIAS_FUNCTION(deepcopy_VkPhysicalDeviceFloatControlsProperties, deepcopy_VkPhysicalDeviceFloatControlsPropertiesKHR);
+
+#endif
+#ifdef VK_KHR_depth_stencil_resolve
+DEFINE_ALIAS_FUNCTION(deepcopy_VkSubpassDescriptionDepthStencilResolve, deepcopy_VkSubpassDescriptionDepthStencilResolveKHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkPhysicalDeviceDepthStencilResolveProperties, deepcopy_VkPhysicalDeviceDepthStencilResolvePropertiesKHR);
+
+#endif
+#ifdef VK_KHR_swapchain_mutable_format
+#endif
+#ifdef VK_KHR_timeline_semaphore
+DEFINE_ALIAS_FUNCTION(deepcopy_VkPhysicalDeviceTimelineSemaphoreFeatures, deepcopy_VkPhysicalDeviceTimelineSemaphoreFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkPhysicalDeviceTimelineSemaphoreProperties, deepcopy_VkPhysicalDeviceTimelineSemaphorePropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkSemaphoreTypeCreateInfo, deepcopy_VkSemaphoreTypeCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkTimelineSemaphoreSubmitInfo, deepcopy_VkTimelineSemaphoreSubmitInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkSemaphoreWaitInfo, deepcopy_VkSemaphoreWaitInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkSemaphoreSignalInfo, deepcopy_VkSemaphoreSignalInfoKHR);
+
+#endif
+#ifdef VK_KHR_vulkan_memory_model
+DEFINE_ALIAS_FUNCTION(deepcopy_VkPhysicalDeviceVulkanMemoryModelFeatures, deepcopy_VkPhysicalDeviceVulkanMemoryModelFeaturesKHR);
+
+#endif
+#ifdef VK_KHR_shader_terminate_invocation
+void deepcopy_VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR* from,
+    VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR* to);
+
+#endif
+#ifdef VK_KHR_fragment_shading_rate
+void deepcopy_VkFragmentShadingRateAttachmentInfoKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkFragmentShadingRateAttachmentInfoKHR* from,
+    VkFragmentShadingRateAttachmentInfoKHR* to);
+
+void deepcopy_VkPipelineFragmentShadingRateStateCreateInfoKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineFragmentShadingRateStateCreateInfoKHR* from,
+    VkPipelineFragmentShadingRateStateCreateInfoKHR* to);
+
+void deepcopy_VkPhysicalDeviceFragmentShadingRateFeaturesKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShadingRateFeaturesKHR* from,
+    VkPhysicalDeviceFragmentShadingRateFeaturesKHR* to);
+
+void deepcopy_VkPhysicalDeviceFragmentShadingRatePropertiesKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShadingRatePropertiesKHR* from,
+    VkPhysicalDeviceFragmentShadingRatePropertiesKHR* to);
+
+void deepcopy_VkPhysicalDeviceFragmentShadingRateKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShadingRateKHR* from,
+    VkPhysicalDeviceFragmentShadingRateKHR* to);
+
+#endif
+#ifdef VK_KHR_spirv_1_4
+#endif
+#ifdef VK_KHR_surface_protected_capabilities
+void deepcopy_VkSurfaceProtectedCapabilitiesKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkSurfaceProtectedCapabilitiesKHR* from,
+    VkSurfaceProtectedCapabilitiesKHR* to);
+
+#endif
+#ifdef VK_KHR_separate_depth_stencil_layouts
+DEFINE_ALIAS_FUNCTION(deepcopy_VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures, deepcopy_VkPhysicalDeviceSeparateDepthStencilLayoutsFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkAttachmentReferenceStencilLayout, deepcopy_VkAttachmentReferenceStencilLayoutKHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkAttachmentDescriptionStencilLayout, deepcopy_VkAttachmentDescriptionStencilLayoutKHR);
+
+#endif
+#ifdef VK_KHR_uniform_buffer_standard_layout
+DEFINE_ALIAS_FUNCTION(deepcopy_VkPhysicalDeviceUniformBufferStandardLayoutFeatures, deepcopy_VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR);
+
+#endif
+#ifdef VK_KHR_buffer_device_address
+DEFINE_ALIAS_FUNCTION(deepcopy_VkPhysicalDeviceBufferDeviceAddressFeatures, deepcopy_VkPhysicalDeviceBufferDeviceAddressFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkBufferDeviceAddressInfo, deepcopy_VkBufferDeviceAddressInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkBufferOpaqueCaptureAddressCreateInfo, deepcopy_VkBufferOpaqueCaptureAddressCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkMemoryOpaqueCaptureAddressAllocateInfo, deepcopy_VkMemoryOpaqueCaptureAddressAllocateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkDeviceMemoryOpaqueCaptureAddressInfo, deepcopy_VkDeviceMemoryOpaqueCaptureAddressInfoKHR);
+
+#endif
+#ifdef VK_KHR_deferred_host_operations
+#endif
+#ifdef VK_KHR_pipeline_executable_properties
+void deepcopy_VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR* from,
+    VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR* to);
+
+void deepcopy_VkPipelineInfoKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineInfoKHR* from,
+    VkPipelineInfoKHR* to);
+
+void deepcopy_VkPipelineExecutablePropertiesKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineExecutablePropertiesKHR* from,
+    VkPipelineExecutablePropertiesKHR* to);
+
+void deepcopy_VkPipelineExecutableInfoKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineExecutableInfoKHR* from,
+    VkPipelineExecutableInfoKHR* to);
+
+void deepcopy_VkPipelineExecutableStatisticValueKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineExecutableStatisticValueKHR* from,
+    VkPipelineExecutableStatisticValueKHR* to);
+
+void deepcopy_VkPipelineExecutableStatisticKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineExecutableStatisticKHR* from,
+    VkPipelineExecutableStatisticKHR* to);
+
+void deepcopy_VkPipelineExecutableInternalRepresentationKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineExecutableInternalRepresentationKHR* from,
+    VkPipelineExecutableInternalRepresentationKHR* to);
+
+#endif
+#ifdef VK_KHR_pipeline_library
+void deepcopy_VkPipelineLibraryCreateInfoKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineLibraryCreateInfoKHR* from,
+    VkPipelineLibraryCreateInfoKHR* to);
+
+#endif
+#ifdef VK_KHR_shader_non_semantic_info
+#endif
+#ifdef VK_KHR_copy_commands2
+void deepcopy_VkBufferCopy2KHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkBufferCopy2KHR* from,
+    VkBufferCopy2KHR* to);
+
+void deepcopy_VkCopyBufferInfo2KHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkCopyBufferInfo2KHR* from,
+    VkCopyBufferInfo2KHR* to);
+
+void deepcopy_VkImageCopy2KHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkImageCopy2KHR* from,
+    VkImageCopy2KHR* to);
+
+void deepcopy_VkCopyImageInfo2KHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkCopyImageInfo2KHR* from,
+    VkCopyImageInfo2KHR* to);
+
+void deepcopy_VkBufferImageCopy2KHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkBufferImageCopy2KHR* from,
+    VkBufferImageCopy2KHR* to);
+
+void deepcopy_VkCopyBufferToImageInfo2KHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkCopyBufferToImageInfo2KHR* from,
+    VkCopyBufferToImageInfo2KHR* to);
+
+void deepcopy_VkCopyImageToBufferInfo2KHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkCopyImageToBufferInfo2KHR* from,
+    VkCopyImageToBufferInfo2KHR* to);
+
+void deepcopy_VkImageBlit2KHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkImageBlit2KHR* from,
+    VkImageBlit2KHR* to);
+
+void deepcopy_VkBlitImageInfo2KHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkBlitImageInfo2KHR* from,
+    VkBlitImageInfo2KHR* to);
+
+void deepcopy_VkImageResolve2KHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkImageResolve2KHR* from,
+    VkImageResolve2KHR* to);
+
+void deepcopy_VkResolveImageInfo2KHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkResolveImageInfo2KHR* from,
+    VkResolveImageInfo2KHR* to);
 
 #endif
 #ifdef VK_ANDROID_native_buffer
 void deepcopy_VkNativeBufferANDROID(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkNativeBufferANDROID* from,
     VkNativeBufferANDROID* to);
 
 #endif
 #ifdef VK_EXT_debug_report
 void deepcopy_VkDebugReportCallbackCreateInfoEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDebugReportCallbackCreateInfoEXT* from,
     VkDebugReportCallbackCreateInfoEXT* to);
 
@@ -1378,7 +2347,8 @@
 #endif
 #ifdef VK_AMD_rasterization_order
 void deepcopy_VkPipelineRasterizationStateRasterizationOrderAMD(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPipelineRasterizationStateRasterizationOrderAMD* from,
     VkPipelineRasterizationStateRasterizationOrderAMD* to);
 
@@ -1389,17 +2359,20 @@
 #endif
 #ifdef VK_EXT_debug_marker
 void deepcopy_VkDebugMarkerObjectNameInfoEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDebugMarkerObjectNameInfoEXT* from,
     VkDebugMarkerObjectNameInfoEXT* to);
 
 void deepcopy_VkDebugMarkerObjectTagInfoEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDebugMarkerObjectTagInfoEXT* from,
     VkDebugMarkerObjectTagInfoEXT* to);
 
 void deepcopy_VkDebugMarkerMarkerInfoEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDebugMarkerMarkerInfoEXT* from,
     VkDebugMarkerMarkerInfoEXT* to);
 
@@ -1408,21 +2381,58 @@
 #endif
 #ifdef VK_NV_dedicated_allocation
 void deepcopy_VkDedicatedAllocationImageCreateInfoNV(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDedicatedAllocationImageCreateInfoNV* from,
     VkDedicatedAllocationImageCreateInfoNV* to);
 
 void deepcopy_VkDedicatedAllocationBufferCreateInfoNV(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDedicatedAllocationBufferCreateInfoNV* from,
     VkDedicatedAllocationBufferCreateInfoNV* to);
 
 void deepcopy_VkDedicatedAllocationMemoryAllocateInfoNV(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDedicatedAllocationMemoryAllocateInfoNV* from,
     VkDedicatedAllocationMemoryAllocateInfoNV* to);
 
 #endif
+#ifdef VK_EXT_transform_feedback
+void deepcopy_VkPhysicalDeviceTransformFeedbackFeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTransformFeedbackFeaturesEXT* from,
+    VkPhysicalDeviceTransformFeedbackFeaturesEXT* to);
+
+void deepcopy_VkPhysicalDeviceTransformFeedbackPropertiesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTransformFeedbackPropertiesEXT* from,
+    VkPhysicalDeviceTransformFeedbackPropertiesEXT* to);
+
+void deepcopy_VkPipelineRasterizationStateStreamCreateInfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineRasterizationStateStreamCreateInfoEXT* from,
+    VkPipelineRasterizationStateStreamCreateInfoEXT* to);
+
+#endif
+#ifdef VK_NVX_image_view_handle
+void deepcopy_VkImageViewHandleInfoNVX(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkImageViewHandleInfoNVX* from,
+    VkImageViewHandleInfoNVX* to);
+
+void deepcopy_VkImageViewAddressPropertiesNVX(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkImageViewAddressPropertiesNVX* from,
+    VkImageViewAddressPropertiesNVX* to);
+
+#endif
 #ifdef VK_AMD_draw_indirect_count
 #endif
 #ifdef VK_AMD_negative_viewport_height
@@ -1433,75 +2443,102 @@
 #endif
 #ifdef VK_AMD_texture_gather_bias_lod
 void deepcopy_VkTextureLODGatherFormatPropertiesAMD(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkTextureLODGatherFormatPropertiesAMD* from,
     VkTextureLODGatherFormatPropertiesAMD* to);
 
 #endif
 #ifdef VK_AMD_shader_info
 void deepcopy_VkShaderResourceUsageAMD(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkShaderResourceUsageAMD* from,
     VkShaderResourceUsageAMD* to);
 
 void deepcopy_VkShaderStatisticsInfoAMD(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkShaderStatisticsInfoAMD* from,
     VkShaderStatisticsInfoAMD* to);
 
 #endif
 #ifdef VK_AMD_shader_image_load_store_lod
 #endif
+#ifdef VK_GGP_stream_descriptor_surface
+void deepcopy_VkStreamDescriptorSurfaceCreateInfoGGP(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkStreamDescriptorSurfaceCreateInfoGGP* from,
+    VkStreamDescriptorSurfaceCreateInfoGGP* to);
+
+#endif
+#ifdef VK_NV_corner_sampled_image
+void deepcopy_VkPhysicalDeviceCornerSampledImageFeaturesNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCornerSampledImageFeaturesNV* from,
+    VkPhysicalDeviceCornerSampledImageFeaturesNV* to);
+
+#endif
 #ifdef VK_IMG_format_pvrtc
 #endif
 #ifdef VK_NV_external_memory_capabilities
 void deepcopy_VkExternalImageFormatPropertiesNV(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkExternalImageFormatPropertiesNV* from,
     VkExternalImageFormatPropertiesNV* to);
 
 #endif
 #ifdef VK_NV_external_memory
 void deepcopy_VkExternalMemoryImageCreateInfoNV(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkExternalMemoryImageCreateInfoNV* from,
     VkExternalMemoryImageCreateInfoNV* to);
 
 void deepcopy_VkExportMemoryAllocateInfoNV(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkExportMemoryAllocateInfoNV* from,
     VkExportMemoryAllocateInfoNV* to);
 
 #endif
 #ifdef VK_NV_external_memory_win32
 void deepcopy_VkImportMemoryWin32HandleInfoNV(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkImportMemoryWin32HandleInfoNV* from,
     VkImportMemoryWin32HandleInfoNV* to);
 
 void deepcopy_VkExportMemoryWin32HandleInfoNV(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkExportMemoryWin32HandleInfoNV* from,
     VkExportMemoryWin32HandleInfoNV* to);
 
 #endif
 #ifdef VK_NV_win32_keyed_mutex
 void deepcopy_VkWin32KeyedMutexAcquireReleaseInfoNV(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkWin32KeyedMutexAcquireReleaseInfoNV* from,
     VkWin32KeyedMutexAcquireReleaseInfoNV* to);
 
 #endif
 #ifdef VK_EXT_validation_flags
 void deepcopy_VkValidationFlagsEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkValidationFlagsEXT* from,
     VkValidationFlagsEXT* to);
 
 #endif
 #ifdef VK_NN_vi_surface
 void deepcopy_VkViSurfaceCreateInfoNN(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkViSurfaceCreateInfoNN* from,
     VkViSurfaceCreateInfoNN* to);
 
@@ -1510,103 +2547,58 @@
 #endif
 #ifdef VK_EXT_shader_subgroup_vote
 #endif
+#ifdef VK_EXT_texture_compression_astc_hdr
+void deepcopy_VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT* from,
+    VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT* to);
+
+#endif
+#ifdef VK_EXT_astc_decode_mode
+void deepcopy_VkImageViewASTCDecodeModeEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkImageViewASTCDecodeModeEXT* from,
+    VkImageViewASTCDecodeModeEXT* to);
+
+void deepcopy_VkPhysicalDeviceASTCDecodeFeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceASTCDecodeFeaturesEXT* from,
+    VkPhysicalDeviceASTCDecodeFeaturesEXT* to);
+
+#endif
 #ifdef VK_EXT_conditional_rendering
 void deepcopy_VkConditionalRenderingBeginInfoEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkConditionalRenderingBeginInfoEXT* from,
     VkConditionalRenderingBeginInfoEXT* to);
 
 void deepcopy_VkPhysicalDeviceConditionalRenderingFeaturesEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceConditionalRenderingFeaturesEXT* from,
     VkPhysicalDeviceConditionalRenderingFeaturesEXT* to);
 
 void deepcopy_VkCommandBufferInheritanceConditionalRenderingInfoEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkCommandBufferInheritanceConditionalRenderingInfoEXT* from,
     VkCommandBufferInheritanceConditionalRenderingInfoEXT* to);
 
 #endif
-#ifdef VK_NVX_device_generated_commands
-void deepcopy_VkDeviceGeneratedCommandsFeaturesNVX(
-    Pool* pool,
-    const VkDeviceGeneratedCommandsFeaturesNVX* from,
-    VkDeviceGeneratedCommandsFeaturesNVX* to);
-
-void deepcopy_VkDeviceGeneratedCommandsLimitsNVX(
-    Pool* pool,
-    const VkDeviceGeneratedCommandsLimitsNVX* from,
-    VkDeviceGeneratedCommandsLimitsNVX* to);
-
-void deepcopy_VkIndirectCommandsTokenNVX(
-    Pool* pool,
-    const VkIndirectCommandsTokenNVX* from,
-    VkIndirectCommandsTokenNVX* to);
-
-void deepcopy_VkIndirectCommandsLayoutTokenNVX(
-    Pool* pool,
-    const VkIndirectCommandsLayoutTokenNVX* from,
-    VkIndirectCommandsLayoutTokenNVX* to);
-
-void deepcopy_VkIndirectCommandsLayoutCreateInfoNVX(
-    Pool* pool,
-    const VkIndirectCommandsLayoutCreateInfoNVX* from,
-    VkIndirectCommandsLayoutCreateInfoNVX* to);
-
-void deepcopy_VkCmdProcessCommandsInfoNVX(
-    Pool* pool,
-    const VkCmdProcessCommandsInfoNVX* from,
-    VkCmdProcessCommandsInfoNVX* to);
-
-void deepcopy_VkCmdReserveSpaceForCommandsInfoNVX(
-    Pool* pool,
-    const VkCmdReserveSpaceForCommandsInfoNVX* from,
-    VkCmdReserveSpaceForCommandsInfoNVX* to);
-
-void deepcopy_VkObjectTableCreateInfoNVX(
-    Pool* pool,
-    const VkObjectTableCreateInfoNVX* from,
-    VkObjectTableCreateInfoNVX* to);
-
-void deepcopy_VkObjectTableEntryNVX(
-    Pool* pool,
-    const VkObjectTableEntryNVX* from,
-    VkObjectTableEntryNVX* to);
-
-void deepcopy_VkObjectTablePipelineEntryNVX(
-    Pool* pool,
-    const VkObjectTablePipelineEntryNVX* from,
-    VkObjectTablePipelineEntryNVX* to);
-
-void deepcopy_VkObjectTableDescriptorSetEntryNVX(
-    Pool* pool,
-    const VkObjectTableDescriptorSetEntryNVX* from,
-    VkObjectTableDescriptorSetEntryNVX* to);
-
-void deepcopy_VkObjectTableVertexBufferEntryNVX(
-    Pool* pool,
-    const VkObjectTableVertexBufferEntryNVX* from,
-    VkObjectTableVertexBufferEntryNVX* to);
-
-void deepcopy_VkObjectTableIndexBufferEntryNVX(
-    Pool* pool,
-    const VkObjectTableIndexBufferEntryNVX* from,
-    VkObjectTableIndexBufferEntryNVX* to);
-
-void deepcopy_VkObjectTablePushConstantEntryNVX(
-    Pool* pool,
-    const VkObjectTablePushConstantEntryNVX* from,
-    VkObjectTablePushConstantEntryNVX* to);
-
-#endif
 #ifdef VK_NV_clip_space_w_scaling
 void deepcopy_VkViewportWScalingNV(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkViewportWScalingNV* from,
     VkViewportWScalingNV* to);
 
 void deepcopy_VkPipelineViewportWScalingStateCreateInfoNV(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPipelineViewportWScalingStateCreateInfoNV* from,
     VkPipelineViewportWScalingStateCreateInfoNV* to);
 
@@ -1617,51 +2609,60 @@
 #endif
 #ifdef VK_EXT_display_surface_counter
 void deepcopy_VkSurfaceCapabilities2EXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkSurfaceCapabilities2EXT* from,
     VkSurfaceCapabilities2EXT* to);
 
 #endif
 #ifdef VK_EXT_display_control
 void deepcopy_VkDisplayPowerInfoEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDisplayPowerInfoEXT* from,
     VkDisplayPowerInfoEXT* to);
 
 void deepcopy_VkDeviceEventInfoEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDeviceEventInfoEXT* from,
     VkDeviceEventInfoEXT* to);
 
 void deepcopy_VkDisplayEventInfoEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDisplayEventInfoEXT* from,
     VkDisplayEventInfoEXT* to);
 
 void deepcopy_VkSwapchainCounterCreateInfoEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkSwapchainCounterCreateInfoEXT* from,
     VkSwapchainCounterCreateInfoEXT* to);
 
 #endif
 #ifdef VK_GOOGLE_display_timing
 void deepcopy_VkRefreshCycleDurationGOOGLE(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkRefreshCycleDurationGOOGLE* from,
     VkRefreshCycleDurationGOOGLE* to);
 
 void deepcopy_VkPastPresentationTimingGOOGLE(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPastPresentationTimingGOOGLE* from,
     VkPastPresentationTimingGOOGLE* to);
 
 void deepcopy_VkPresentTimeGOOGLE(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPresentTimeGOOGLE* from,
     VkPresentTimeGOOGLE* to);
 
 void deepcopy_VkPresentTimesInfoGOOGLE(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPresentTimesInfoGOOGLE* from,
     VkPresentTimesInfoGOOGLE* to);
 
@@ -1674,148 +2675,180 @@
 #endif
 #ifdef VK_NVX_multiview_per_view_attributes
 void deepcopy_VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX* from,
     VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX* to);
 
 #endif
 #ifdef VK_NV_viewport_swizzle
 void deepcopy_VkViewportSwizzleNV(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkViewportSwizzleNV* from,
     VkViewportSwizzleNV* to);
 
 void deepcopy_VkPipelineViewportSwizzleStateCreateInfoNV(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPipelineViewportSwizzleStateCreateInfoNV* from,
     VkPipelineViewportSwizzleStateCreateInfoNV* to);
 
 #endif
 #ifdef VK_EXT_discard_rectangles
 void deepcopy_VkPhysicalDeviceDiscardRectanglePropertiesEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceDiscardRectanglePropertiesEXT* from,
     VkPhysicalDeviceDiscardRectanglePropertiesEXT* to);
 
 void deepcopy_VkPipelineDiscardRectangleStateCreateInfoEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPipelineDiscardRectangleStateCreateInfoEXT* from,
     VkPipelineDiscardRectangleStateCreateInfoEXT* to);
 
 #endif
 #ifdef VK_EXT_conservative_rasterization
 void deepcopy_VkPhysicalDeviceConservativeRasterizationPropertiesEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceConservativeRasterizationPropertiesEXT* from,
     VkPhysicalDeviceConservativeRasterizationPropertiesEXT* to);
 
 void deepcopy_VkPipelineRasterizationConservativeStateCreateInfoEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPipelineRasterizationConservativeStateCreateInfoEXT* from,
     VkPipelineRasterizationConservativeStateCreateInfoEXT* to);
 
 #endif
+#ifdef VK_EXT_depth_clip_enable
+void deepcopy_VkPhysicalDeviceDepthClipEnableFeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDepthClipEnableFeaturesEXT* from,
+    VkPhysicalDeviceDepthClipEnableFeaturesEXT* to);
+
+void deepcopy_VkPipelineRasterizationDepthClipStateCreateInfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineRasterizationDepthClipStateCreateInfoEXT* from,
+    VkPipelineRasterizationDepthClipStateCreateInfoEXT* to);
+
+#endif
 #ifdef VK_EXT_swapchain_colorspace
 #endif
 #ifdef VK_EXT_hdr_metadata
 void deepcopy_VkXYColorEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkXYColorEXT* from,
     VkXYColorEXT* to);
 
 void deepcopy_VkHdrMetadataEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkHdrMetadataEXT* from,
     VkHdrMetadataEXT* to);
 
 #endif
 #ifdef VK_MVK_ios_surface
 void deepcopy_VkIOSSurfaceCreateInfoMVK(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkIOSSurfaceCreateInfoMVK* from,
     VkIOSSurfaceCreateInfoMVK* to);
 
 #endif
 #ifdef VK_MVK_macos_surface
 void deepcopy_VkMacOSSurfaceCreateInfoMVK(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkMacOSSurfaceCreateInfoMVK* from,
     VkMacOSSurfaceCreateInfoMVK* to);
 
 #endif
+#ifdef VK_MVK_moltenvk
+#endif
 #ifdef VK_EXT_external_memory_dma_buf
 #endif
 #ifdef VK_EXT_queue_family_foreign
 #endif
 #ifdef VK_EXT_debug_utils
-void deepcopy_VkDebugUtilsObjectNameInfoEXT(
-    Pool* pool,
-    const VkDebugUtilsObjectNameInfoEXT* from,
-    VkDebugUtilsObjectNameInfoEXT* to);
-
-void deepcopy_VkDebugUtilsObjectTagInfoEXT(
-    Pool* pool,
-    const VkDebugUtilsObjectTagInfoEXT* from,
-    VkDebugUtilsObjectTagInfoEXT* to);
-
 void deepcopy_VkDebugUtilsLabelEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDebugUtilsLabelEXT* from,
     VkDebugUtilsLabelEXT* to);
 
+void deepcopy_VkDebugUtilsObjectNameInfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDebugUtilsObjectNameInfoEXT* from,
+    VkDebugUtilsObjectNameInfoEXT* to);
+
 void deepcopy_VkDebugUtilsMessengerCallbackDataEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDebugUtilsMessengerCallbackDataEXT* from,
     VkDebugUtilsMessengerCallbackDataEXT* to);
 
 void deepcopy_VkDebugUtilsMessengerCreateInfoEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDebugUtilsMessengerCreateInfoEXT* from,
     VkDebugUtilsMessengerCreateInfoEXT* to);
 
+void deepcopy_VkDebugUtilsObjectTagInfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDebugUtilsObjectTagInfoEXT* from,
+    VkDebugUtilsObjectTagInfoEXT* to);
+
 #endif
 #ifdef VK_ANDROID_external_memory_android_hardware_buffer
 void deepcopy_VkAndroidHardwareBufferUsageANDROID(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkAndroidHardwareBufferUsageANDROID* from,
     VkAndroidHardwareBufferUsageANDROID* to);
 
 void deepcopy_VkAndroidHardwareBufferPropertiesANDROID(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkAndroidHardwareBufferPropertiesANDROID* from,
     VkAndroidHardwareBufferPropertiesANDROID* to);
 
 void deepcopy_VkAndroidHardwareBufferFormatPropertiesANDROID(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkAndroidHardwareBufferFormatPropertiesANDROID* from,
     VkAndroidHardwareBufferFormatPropertiesANDROID* to);
 
 void deepcopy_VkImportAndroidHardwareBufferInfoANDROID(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkImportAndroidHardwareBufferInfoANDROID* from,
     VkImportAndroidHardwareBufferInfoANDROID* to);
 
 void deepcopy_VkMemoryGetAndroidHardwareBufferInfoANDROID(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkMemoryGetAndroidHardwareBufferInfoANDROID* from,
     VkMemoryGetAndroidHardwareBufferInfoANDROID* to);
 
 void deepcopy_VkExternalFormatANDROID(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkExternalFormatANDROID* from,
     VkExternalFormatANDROID* to);
 
 #endif
 #ifdef VK_EXT_sampler_filter_minmax
-void deepcopy_VkSamplerReductionModeCreateInfoEXT(
-    Pool* pool,
-    const VkSamplerReductionModeCreateInfoEXT* from,
-    VkSamplerReductionModeCreateInfoEXT* to);
+DEFINE_ALIAS_FUNCTION(deepcopy_VkSamplerReductionModeCreateInfo, deepcopy_VkSamplerReductionModeCreateInfoEXT);
 
-void deepcopy_VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT(
-    Pool* pool,
-    const VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT* from,
-    VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT* to);
+DEFINE_ALIAS_FUNCTION(deepcopy_VkPhysicalDeviceSamplerFilterMinmaxProperties, deepcopy_VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT);
 
 #endif
 #ifdef VK_AMD_gpu_shader_int16
@@ -1824,213 +2857,1399 @@
 #endif
 #ifdef VK_AMD_shader_fragment_mask
 #endif
+#ifdef VK_EXT_inline_uniform_block
+void deepcopy_VkPhysicalDeviceInlineUniformBlockFeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceInlineUniformBlockFeaturesEXT* from,
+    VkPhysicalDeviceInlineUniformBlockFeaturesEXT* to);
+
+void deepcopy_VkPhysicalDeviceInlineUniformBlockPropertiesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceInlineUniformBlockPropertiesEXT* from,
+    VkPhysicalDeviceInlineUniformBlockPropertiesEXT* to);
+
+void deepcopy_VkWriteDescriptorSetInlineUniformBlockEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkWriteDescriptorSetInlineUniformBlockEXT* from,
+    VkWriteDescriptorSetInlineUniformBlockEXT* to);
+
+void deepcopy_VkDescriptorPoolInlineUniformBlockCreateInfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDescriptorPoolInlineUniformBlockCreateInfoEXT* from,
+    VkDescriptorPoolInlineUniformBlockCreateInfoEXT* to);
+
+#endif
 #ifdef VK_EXT_shader_stencil_export
 #endif
 #ifdef VK_EXT_sample_locations
 void deepcopy_VkSampleLocationEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkSampleLocationEXT* from,
     VkSampleLocationEXT* to);
 
 void deepcopy_VkSampleLocationsInfoEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkSampleLocationsInfoEXT* from,
     VkSampleLocationsInfoEXT* to);
 
 void deepcopy_VkAttachmentSampleLocationsEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkAttachmentSampleLocationsEXT* from,
     VkAttachmentSampleLocationsEXT* to);
 
 void deepcopy_VkSubpassSampleLocationsEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkSubpassSampleLocationsEXT* from,
     VkSubpassSampleLocationsEXT* to);
 
 void deepcopy_VkRenderPassSampleLocationsBeginInfoEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkRenderPassSampleLocationsBeginInfoEXT* from,
     VkRenderPassSampleLocationsBeginInfoEXT* to);
 
 void deepcopy_VkPipelineSampleLocationsStateCreateInfoEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPipelineSampleLocationsStateCreateInfoEXT* from,
     VkPipelineSampleLocationsStateCreateInfoEXT* to);
 
 void deepcopy_VkPhysicalDeviceSampleLocationsPropertiesEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceSampleLocationsPropertiesEXT* from,
     VkPhysicalDeviceSampleLocationsPropertiesEXT* to);
 
 void deepcopy_VkMultisamplePropertiesEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkMultisamplePropertiesEXT* from,
     VkMultisamplePropertiesEXT* to);
 
 #endif
 #ifdef VK_EXT_blend_operation_advanced
 void deepcopy_VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* from,
     VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* to);
 
 void deepcopy_VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT* from,
     VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT* to);
 
 void deepcopy_VkPipelineColorBlendAdvancedStateCreateInfoEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPipelineColorBlendAdvancedStateCreateInfoEXT* from,
     VkPipelineColorBlendAdvancedStateCreateInfoEXT* to);
 
 #endif
 #ifdef VK_NV_fragment_coverage_to_color
 void deepcopy_VkPipelineCoverageToColorStateCreateInfoNV(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPipelineCoverageToColorStateCreateInfoNV* from,
     VkPipelineCoverageToColorStateCreateInfoNV* to);
 
 #endif
 #ifdef VK_NV_framebuffer_mixed_samples
 void deepcopy_VkPipelineCoverageModulationStateCreateInfoNV(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPipelineCoverageModulationStateCreateInfoNV* from,
     VkPipelineCoverageModulationStateCreateInfoNV* to);
 
 #endif
 #ifdef VK_NV_fill_rectangle
 #endif
+#ifdef VK_NV_shader_sm_builtins
+void deepcopy_VkPhysicalDeviceShaderSMBuiltinsPropertiesNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderSMBuiltinsPropertiesNV* from,
+    VkPhysicalDeviceShaderSMBuiltinsPropertiesNV* to);
+
+void deepcopy_VkPhysicalDeviceShaderSMBuiltinsFeaturesNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderSMBuiltinsFeaturesNV* from,
+    VkPhysicalDeviceShaderSMBuiltinsFeaturesNV* to);
+
+#endif
 #ifdef VK_EXT_post_depth_coverage
 #endif
+#ifdef VK_EXT_image_drm_format_modifier
+void deepcopy_VkDrmFormatModifierPropertiesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDrmFormatModifierPropertiesEXT* from,
+    VkDrmFormatModifierPropertiesEXT* to);
+
+void deepcopy_VkDrmFormatModifierPropertiesListEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDrmFormatModifierPropertiesListEXT* from,
+    VkDrmFormatModifierPropertiesListEXT* to);
+
+void deepcopy_VkPhysicalDeviceImageDrmFormatModifierInfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceImageDrmFormatModifierInfoEXT* from,
+    VkPhysicalDeviceImageDrmFormatModifierInfoEXT* to);
+
+void deepcopy_VkImageDrmFormatModifierListCreateInfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkImageDrmFormatModifierListCreateInfoEXT* from,
+    VkImageDrmFormatModifierListCreateInfoEXT* to);
+
+void deepcopy_VkImageDrmFormatModifierExplicitCreateInfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkImageDrmFormatModifierExplicitCreateInfoEXT* from,
+    VkImageDrmFormatModifierExplicitCreateInfoEXT* to);
+
+void deepcopy_VkImageDrmFormatModifierPropertiesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkImageDrmFormatModifierPropertiesEXT* from,
+    VkImageDrmFormatModifierPropertiesEXT* to);
+
+#endif
 #ifdef VK_EXT_validation_cache
 void deepcopy_VkValidationCacheCreateInfoEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkValidationCacheCreateInfoEXT* from,
     VkValidationCacheCreateInfoEXT* to);
 
 void deepcopy_VkShaderModuleValidationCacheCreateInfoEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkShaderModuleValidationCacheCreateInfoEXT* from,
     VkShaderModuleValidationCacheCreateInfoEXT* to);
 
 #endif
 #ifdef VK_EXT_descriptor_indexing
-void deepcopy_VkDescriptorSetLayoutBindingFlagsCreateInfoEXT(
-    Pool* pool,
-    const VkDescriptorSetLayoutBindingFlagsCreateInfoEXT* from,
-    VkDescriptorSetLayoutBindingFlagsCreateInfoEXT* to);
+DEFINE_ALIAS_FUNCTION(deepcopy_VkDescriptorSetLayoutBindingFlagsCreateInfo, deepcopy_VkDescriptorSetLayoutBindingFlagsCreateInfoEXT);
 
-void deepcopy_VkPhysicalDeviceDescriptorIndexingFeaturesEXT(
-    Pool* pool,
-    const VkPhysicalDeviceDescriptorIndexingFeaturesEXT* from,
-    VkPhysicalDeviceDescriptorIndexingFeaturesEXT* to);
+DEFINE_ALIAS_FUNCTION(deepcopy_VkPhysicalDeviceDescriptorIndexingFeatures, deepcopy_VkPhysicalDeviceDescriptorIndexingFeaturesEXT);
 
-void deepcopy_VkPhysicalDeviceDescriptorIndexingPropertiesEXT(
-    Pool* pool,
-    const VkPhysicalDeviceDescriptorIndexingPropertiesEXT* from,
-    VkPhysicalDeviceDescriptorIndexingPropertiesEXT* to);
+DEFINE_ALIAS_FUNCTION(deepcopy_VkPhysicalDeviceDescriptorIndexingProperties, deepcopy_VkPhysicalDeviceDescriptorIndexingPropertiesEXT);
 
-void deepcopy_VkDescriptorSetVariableDescriptorCountAllocateInfoEXT(
-    Pool* pool,
-    const VkDescriptorSetVariableDescriptorCountAllocateInfoEXT* from,
-    VkDescriptorSetVariableDescriptorCountAllocateInfoEXT* to);
+DEFINE_ALIAS_FUNCTION(deepcopy_VkDescriptorSetVariableDescriptorCountAllocateInfo, deepcopy_VkDescriptorSetVariableDescriptorCountAllocateInfoEXT);
 
-void deepcopy_VkDescriptorSetVariableDescriptorCountLayoutSupportEXT(
-    Pool* pool,
-    const VkDescriptorSetVariableDescriptorCountLayoutSupportEXT* from,
-    VkDescriptorSetVariableDescriptorCountLayoutSupportEXT* to);
+DEFINE_ALIAS_FUNCTION(deepcopy_VkDescriptorSetVariableDescriptorCountLayoutSupport, deepcopy_VkDescriptorSetVariableDescriptorCountLayoutSupportEXT);
 
 #endif
 #ifdef VK_EXT_shader_viewport_index_layer
 #endif
+#ifdef VK_NV_shading_rate_image
+void deepcopy_VkShadingRatePaletteNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkShadingRatePaletteNV* from,
+    VkShadingRatePaletteNV* to);
+
+void deepcopy_VkPipelineViewportShadingRateImageStateCreateInfoNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineViewportShadingRateImageStateCreateInfoNV* from,
+    VkPipelineViewportShadingRateImageStateCreateInfoNV* to);
+
+void deepcopy_VkPhysicalDeviceShadingRateImageFeaturesNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShadingRateImageFeaturesNV* from,
+    VkPhysicalDeviceShadingRateImageFeaturesNV* to);
+
+void deepcopy_VkPhysicalDeviceShadingRateImagePropertiesNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShadingRateImagePropertiesNV* from,
+    VkPhysicalDeviceShadingRateImagePropertiesNV* to);
+
+void deepcopy_VkCoarseSampleLocationNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkCoarseSampleLocationNV* from,
+    VkCoarseSampleLocationNV* to);
+
+void deepcopy_VkCoarseSampleOrderCustomNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkCoarseSampleOrderCustomNV* from,
+    VkCoarseSampleOrderCustomNV* to);
+
+void deepcopy_VkPipelineViewportCoarseSampleOrderStateCreateInfoNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineViewportCoarseSampleOrderStateCreateInfoNV* from,
+    VkPipelineViewportCoarseSampleOrderStateCreateInfoNV* to);
+
+#endif
+#ifdef VK_NV_ray_tracing
+void deepcopy_VkRayTracingShaderGroupCreateInfoNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkRayTracingShaderGroupCreateInfoNV* from,
+    VkRayTracingShaderGroupCreateInfoNV* to);
+
+void deepcopy_VkRayTracingPipelineCreateInfoNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkRayTracingPipelineCreateInfoNV* from,
+    VkRayTracingPipelineCreateInfoNV* to);
+
+void deepcopy_VkGeometryTrianglesNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkGeometryTrianglesNV* from,
+    VkGeometryTrianglesNV* to);
+
+void deepcopy_VkGeometryAABBNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkGeometryAABBNV* from,
+    VkGeometryAABBNV* to);
+
+void deepcopy_VkGeometryDataNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkGeometryDataNV* from,
+    VkGeometryDataNV* to);
+
+void deepcopy_VkGeometryNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkGeometryNV* from,
+    VkGeometryNV* to);
+
+void deepcopy_VkAccelerationStructureInfoNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkAccelerationStructureInfoNV* from,
+    VkAccelerationStructureInfoNV* to);
+
+void deepcopy_VkAccelerationStructureCreateInfoNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkAccelerationStructureCreateInfoNV* from,
+    VkAccelerationStructureCreateInfoNV* to);
+
+void deepcopy_VkBindAccelerationStructureMemoryInfoNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkBindAccelerationStructureMemoryInfoNV* from,
+    VkBindAccelerationStructureMemoryInfoNV* to);
+
+void deepcopy_VkWriteDescriptorSetAccelerationStructureNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkWriteDescriptorSetAccelerationStructureNV* from,
+    VkWriteDescriptorSetAccelerationStructureNV* to);
+
+void deepcopy_VkAccelerationStructureMemoryRequirementsInfoNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkAccelerationStructureMemoryRequirementsInfoNV* from,
+    VkAccelerationStructureMemoryRequirementsInfoNV* to);
+
+void deepcopy_VkPhysicalDeviceRayTracingPropertiesNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRayTracingPropertiesNV* from,
+    VkPhysicalDeviceRayTracingPropertiesNV* to);
+
+void deepcopy_VkTransformMatrixKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkTransformMatrixKHR* from,
+    VkTransformMatrixKHR* to);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkTransformMatrixKHR, deepcopy_VkTransformMatrixNV);
+
+void deepcopy_VkAabbPositionsKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkAabbPositionsKHR* from,
+    VkAabbPositionsKHR* to);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkAabbPositionsKHR, deepcopy_VkAabbPositionsNV);
+
+void deepcopy_VkAccelerationStructureInstanceKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkAccelerationStructureInstanceKHR* from,
+    VkAccelerationStructureInstanceKHR* to);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkAccelerationStructureInstanceKHR, deepcopy_VkAccelerationStructureInstanceNV);
+
+#endif
+#ifdef VK_NV_representative_fragment_test
+void deepcopy_VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV* from,
+    VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV* to);
+
+void deepcopy_VkPipelineRepresentativeFragmentTestStateCreateInfoNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineRepresentativeFragmentTestStateCreateInfoNV* from,
+    VkPipelineRepresentativeFragmentTestStateCreateInfoNV* to);
+
+#endif
+#ifdef VK_EXT_filter_cubic
+void deepcopy_VkPhysicalDeviceImageViewImageFormatInfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceImageViewImageFormatInfoEXT* from,
+    VkPhysicalDeviceImageViewImageFormatInfoEXT* to);
+
+void deepcopy_VkFilterCubicImageViewImageFormatPropertiesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkFilterCubicImageViewImageFormatPropertiesEXT* from,
+    VkFilterCubicImageViewImageFormatPropertiesEXT* to);
+
+#endif
+#ifdef VK_QCOM_render_pass_shader_resolve
+#endif
 #ifdef VK_EXT_global_priority
 void deepcopy_VkDeviceQueueGlobalPriorityCreateInfoEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkDeviceQueueGlobalPriorityCreateInfoEXT* from,
     VkDeviceQueueGlobalPriorityCreateInfoEXT* to);
 
 #endif
 #ifdef VK_EXT_external_memory_host
 void deepcopy_VkImportMemoryHostPointerInfoEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkImportMemoryHostPointerInfoEXT* from,
     VkImportMemoryHostPointerInfoEXT* to);
 
 void deepcopy_VkMemoryHostPointerPropertiesEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkMemoryHostPointerPropertiesEXT* from,
     VkMemoryHostPointerPropertiesEXT* to);
 
 void deepcopy_VkPhysicalDeviceExternalMemoryHostPropertiesEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceExternalMemoryHostPropertiesEXT* from,
     VkPhysicalDeviceExternalMemoryHostPropertiesEXT* to);
 
 #endif
 #ifdef VK_AMD_buffer_marker
 #endif
+#ifdef VK_AMD_pipeline_compiler_control
+void deepcopy_VkPipelineCompilerControlCreateInfoAMD(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineCompilerControlCreateInfoAMD* from,
+    VkPipelineCompilerControlCreateInfoAMD* to);
+
+#endif
+#ifdef VK_EXT_calibrated_timestamps
+void deepcopy_VkCalibratedTimestampInfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkCalibratedTimestampInfoEXT* from,
+    VkCalibratedTimestampInfoEXT* to);
+
+#endif
 #ifdef VK_AMD_shader_core_properties
 void deepcopy_VkPhysicalDeviceShaderCorePropertiesAMD(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceShaderCorePropertiesAMD* from,
     VkPhysicalDeviceShaderCorePropertiesAMD* to);
 
 #endif
+#ifdef VK_AMD_memory_overallocation_behavior
+void deepcopy_VkDeviceMemoryOverallocationCreateInfoAMD(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDeviceMemoryOverallocationCreateInfoAMD* from,
+    VkDeviceMemoryOverallocationCreateInfoAMD* to);
+
+#endif
 #ifdef VK_EXT_vertex_attribute_divisor
 void deepcopy_VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT* from,
     VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT* to);
 
 void deepcopy_VkVertexInputBindingDivisorDescriptionEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkVertexInputBindingDivisorDescriptionEXT* from,
     VkVertexInputBindingDivisorDescriptionEXT* to);
 
 void deepcopy_VkPipelineVertexInputDivisorStateCreateInfoEXT(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkPipelineVertexInputDivisorStateCreateInfoEXT* from,
     VkPipelineVertexInputDivisorStateCreateInfoEXT* to);
 
+void deepcopy_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT* from,
+    VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT* to);
+
+#endif
+#ifdef VK_GGP_frame_token
+void deepcopy_VkPresentFrameTokenGGP(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPresentFrameTokenGGP* from,
+    VkPresentFrameTokenGGP* to);
+
+#endif
+#ifdef VK_EXT_pipeline_creation_feedback
+void deepcopy_VkPipelineCreationFeedbackEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineCreationFeedbackEXT* from,
+    VkPipelineCreationFeedbackEXT* to);
+
+void deepcopy_VkPipelineCreationFeedbackCreateInfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineCreationFeedbackCreateInfoEXT* from,
+    VkPipelineCreationFeedbackCreateInfoEXT* to);
+
 #endif
 #ifdef VK_NV_shader_subgroup_partitioned
 #endif
+#ifdef VK_NV_compute_shader_derivatives
+void deepcopy_VkPhysicalDeviceComputeShaderDerivativesFeaturesNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceComputeShaderDerivativesFeaturesNV* from,
+    VkPhysicalDeviceComputeShaderDerivativesFeaturesNV* to);
+
+#endif
+#ifdef VK_NV_mesh_shader
+void deepcopy_VkPhysicalDeviceMeshShaderFeaturesNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMeshShaderFeaturesNV* from,
+    VkPhysicalDeviceMeshShaderFeaturesNV* to);
+
+void deepcopy_VkPhysicalDeviceMeshShaderPropertiesNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMeshShaderPropertiesNV* from,
+    VkPhysicalDeviceMeshShaderPropertiesNV* to);
+
+void deepcopy_VkDrawMeshTasksIndirectCommandNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDrawMeshTasksIndirectCommandNV* from,
+    VkDrawMeshTasksIndirectCommandNV* to);
+
+#endif
+#ifdef VK_NV_fragment_shader_barycentric
+void deepcopy_VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV* from,
+    VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV* to);
+
+#endif
+#ifdef VK_NV_shader_image_footprint
+void deepcopy_VkPhysicalDeviceShaderImageFootprintFeaturesNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderImageFootprintFeaturesNV* from,
+    VkPhysicalDeviceShaderImageFootprintFeaturesNV* to);
+
+#endif
+#ifdef VK_NV_scissor_exclusive
+void deepcopy_VkPipelineViewportExclusiveScissorStateCreateInfoNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineViewportExclusiveScissorStateCreateInfoNV* from,
+    VkPipelineViewportExclusiveScissorStateCreateInfoNV* to);
+
+void deepcopy_VkPhysicalDeviceExclusiveScissorFeaturesNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceExclusiveScissorFeaturesNV* from,
+    VkPhysicalDeviceExclusiveScissorFeaturesNV* to);
+
+#endif
 #ifdef VK_NV_device_diagnostic_checkpoints
 void deepcopy_VkQueueFamilyCheckpointPropertiesNV(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkQueueFamilyCheckpointPropertiesNV* from,
     VkQueueFamilyCheckpointPropertiesNV* to);
 
 void deepcopy_VkCheckpointDataNV(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkCheckpointDataNV* from,
     VkCheckpointDataNV* to);
 
 #endif
-#ifdef VK_GOOGLE_address_space
+#ifdef VK_INTEL_shader_integer_functions2
+void deepcopy_VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL* from,
+    VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL* to);
+
 #endif
-#ifdef VK_GOOGLE_color_buffer
+#ifdef VK_INTEL_performance_query
+void deepcopy_VkPerformanceValueDataINTEL(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPerformanceValueDataINTEL* from,
+    VkPerformanceValueDataINTEL* to);
+
+void deepcopy_VkPerformanceValueINTEL(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPerformanceValueINTEL* from,
+    VkPerformanceValueINTEL* to);
+
+void deepcopy_VkInitializePerformanceApiInfoINTEL(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkInitializePerformanceApiInfoINTEL* from,
+    VkInitializePerformanceApiInfoINTEL* to);
+
+void deepcopy_VkQueryPoolPerformanceQueryCreateInfoINTEL(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkQueryPoolPerformanceQueryCreateInfoINTEL* from,
+    VkQueryPoolPerformanceQueryCreateInfoINTEL* to);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkQueryPoolPerformanceQueryCreateInfoINTEL, deepcopy_VkQueryPoolCreateInfoINTEL);
+
+void deepcopy_VkPerformanceMarkerInfoINTEL(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPerformanceMarkerInfoINTEL* from,
+    VkPerformanceMarkerInfoINTEL* to);
+
+void deepcopy_VkPerformanceStreamMarkerInfoINTEL(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPerformanceStreamMarkerInfoINTEL* from,
+    VkPerformanceStreamMarkerInfoINTEL* to);
+
+void deepcopy_VkPerformanceOverrideInfoINTEL(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPerformanceOverrideInfoINTEL* from,
+    VkPerformanceOverrideInfoINTEL* to);
+
+void deepcopy_VkPerformanceConfigurationAcquireInfoINTEL(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPerformanceConfigurationAcquireInfoINTEL* from,
+    VkPerformanceConfigurationAcquireInfoINTEL* to);
+
+#endif
+#ifdef VK_EXT_pci_bus_info
+void deepcopy_VkPhysicalDevicePCIBusInfoPropertiesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDevicePCIBusInfoPropertiesEXT* from,
+    VkPhysicalDevicePCIBusInfoPropertiesEXT* to);
+
+#endif
+#ifdef VK_AMD_display_native_hdr
+void deepcopy_VkDisplayNativeHdrSurfaceCapabilitiesAMD(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDisplayNativeHdrSurfaceCapabilitiesAMD* from,
+    VkDisplayNativeHdrSurfaceCapabilitiesAMD* to);
+
+void deepcopy_VkSwapchainDisplayNativeHdrCreateInfoAMD(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkSwapchainDisplayNativeHdrCreateInfoAMD* from,
+    VkSwapchainDisplayNativeHdrCreateInfoAMD* to);
+
+#endif
+#ifdef VK_FUCHSIA_imagepipe_surface
+void deepcopy_VkImagePipeSurfaceCreateInfoFUCHSIA(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkImagePipeSurfaceCreateInfoFUCHSIA* from,
+    VkImagePipeSurfaceCreateInfoFUCHSIA* to);
+
+#endif
+#ifdef VK_EXT_metal_surface
+void deepcopy_VkMetalSurfaceCreateInfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkMetalSurfaceCreateInfoEXT* from,
+    VkMetalSurfaceCreateInfoEXT* to);
+
+#endif
+#ifdef VK_EXT_fragment_density_map
+void deepcopy_VkPhysicalDeviceFragmentDensityMapFeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentDensityMapFeaturesEXT* from,
+    VkPhysicalDeviceFragmentDensityMapFeaturesEXT* to);
+
+void deepcopy_VkPhysicalDeviceFragmentDensityMapPropertiesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentDensityMapPropertiesEXT* from,
+    VkPhysicalDeviceFragmentDensityMapPropertiesEXT* to);
+
+void deepcopy_VkRenderPassFragmentDensityMapCreateInfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkRenderPassFragmentDensityMapCreateInfoEXT* from,
+    VkRenderPassFragmentDensityMapCreateInfoEXT* to);
+
+#endif
+#ifdef VK_EXT_scalar_block_layout
+DEFINE_ALIAS_FUNCTION(deepcopy_VkPhysicalDeviceScalarBlockLayoutFeatures, deepcopy_VkPhysicalDeviceScalarBlockLayoutFeaturesEXT);
+
+#endif
+#ifdef VK_GOOGLE_hlsl_functionality1
+#endif
+#ifdef VK_GOOGLE_decorate_string
+#endif
+#ifdef VK_EXT_subgroup_size_control
+void deepcopy_VkPhysicalDeviceSubgroupSizeControlFeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSubgroupSizeControlFeaturesEXT* from,
+    VkPhysicalDeviceSubgroupSizeControlFeaturesEXT* to);
+
+void deepcopy_VkPhysicalDeviceSubgroupSizeControlPropertiesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT* from,
+    VkPhysicalDeviceSubgroupSizeControlPropertiesEXT* to);
+
+void deepcopy_VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT* from,
+    VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT* to);
+
+#endif
+#ifdef VK_AMD_shader_core_properties2
+void deepcopy_VkPhysicalDeviceShaderCoreProperties2AMD(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderCoreProperties2AMD* from,
+    VkPhysicalDeviceShaderCoreProperties2AMD* to);
+
+#endif
+#ifdef VK_AMD_device_coherent_memory
+void deepcopy_VkPhysicalDeviceCoherentMemoryFeaturesAMD(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCoherentMemoryFeaturesAMD* from,
+    VkPhysicalDeviceCoherentMemoryFeaturesAMD* to);
+
+#endif
+#ifdef VK_EXT_shader_image_atomic_int64
+void deepcopy_VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT* from,
+    VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT* to);
+
+#endif
+#ifdef VK_EXT_memory_budget
+void deepcopy_VkPhysicalDeviceMemoryBudgetPropertiesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMemoryBudgetPropertiesEXT* from,
+    VkPhysicalDeviceMemoryBudgetPropertiesEXT* to);
+
+#endif
+#ifdef VK_EXT_memory_priority
+void deepcopy_VkPhysicalDeviceMemoryPriorityFeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMemoryPriorityFeaturesEXT* from,
+    VkPhysicalDeviceMemoryPriorityFeaturesEXT* to);
+
+void deepcopy_VkMemoryPriorityAllocateInfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkMemoryPriorityAllocateInfoEXT* from,
+    VkMemoryPriorityAllocateInfoEXT* to);
+
+#endif
+#ifdef VK_NV_dedicated_allocation_image_aliasing
+void deepcopy_VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV* from,
+    VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV* to);
+
+#endif
+#ifdef VK_EXT_buffer_device_address
+void deepcopy_VkPhysicalDeviceBufferDeviceAddressFeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceBufferDeviceAddressFeaturesEXT* from,
+    VkPhysicalDeviceBufferDeviceAddressFeaturesEXT* to);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkPhysicalDeviceBufferDeviceAddressFeaturesEXT, deepcopy_VkPhysicalDeviceBufferAddressFeaturesEXT);
+
+DEFINE_ALIAS_FUNCTION(deepcopy_VkBufferDeviceAddressInfo, deepcopy_VkBufferDeviceAddressInfoEXT);
+
+void deepcopy_VkBufferDeviceAddressCreateInfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkBufferDeviceAddressCreateInfoEXT* from,
+    VkBufferDeviceAddressCreateInfoEXT* to);
+
+#endif
+#ifdef VK_EXT_tooling_info
+void deepcopy_VkPhysicalDeviceToolPropertiesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceToolPropertiesEXT* from,
+    VkPhysicalDeviceToolPropertiesEXT* to);
+
+#endif
+#ifdef VK_EXT_separate_stencil_usage
+DEFINE_ALIAS_FUNCTION(deepcopy_VkImageStencilUsageCreateInfo, deepcopy_VkImageStencilUsageCreateInfoEXT);
+
+#endif
+#ifdef VK_EXT_validation_features
+void deepcopy_VkValidationFeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkValidationFeaturesEXT* from,
+    VkValidationFeaturesEXT* to);
+
+#endif
+#ifdef VK_NV_cooperative_matrix
+void deepcopy_VkCooperativeMatrixPropertiesNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkCooperativeMatrixPropertiesNV* from,
+    VkCooperativeMatrixPropertiesNV* to);
+
+void deepcopy_VkPhysicalDeviceCooperativeMatrixFeaturesNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCooperativeMatrixFeaturesNV* from,
+    VkPhysicalDeviceCooperativeMatrixFeaturesNV* to);
+
+void deepcopy_VkPhysicalDeviceCooperativeMatrixPropertiesNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCooperativeMatrixPropertiesNV* from,
+    VkPhysicalDeviceCooperativeMatrixPropertiesNV* to);
+
+#endif
+#ifdef VK_NV_coverage_reduction_mode
+void deepcopy_VkPhysicalDeviceCoverageReductionModeFeaturesNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCoverageReductionModeFeaturesNV* from,
+    VkPhysicalDeviceCoverageReductionModeFeaturesNV* to);
+
+void deepcopy_VkPipelineCoverageReductionStateCreateInfoNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineCoverageReductionStateCreateInfoNV* from,
+    VkPipelineCoverageReductionStateCreateInfoNV* to);
+
+void deepcopy_VkFramebufferMixedSamplesCombinationNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkFramebufferMixedSamplesCombinationNV* from,
+    VkFramebufferMixedSamplesCombinationNV* to);
+
+#endif
+#ifdef VK_EXT_fragment_shader_interlock
+void deepcopy_VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT* from,
+    VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT* to);
+
+#endif
+#ifdef VK_EXT_ycbcr_image_arrays
+void deepcopy_VkPhysicalDeviceYcbcrImageArraysFeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceYcbcrImageArraysFeaturesEXT* from,
+    VkPhysicalDeviceYcbcrImageArraysFeaturesEXT* to);
+
+#endif
+#ifdef VK_EXT_full_screen_exclusive
+void deepcopy_VkSurfaceFullScreenExclusiveInfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkSurfaceFullScreenExclusiveInfoEXT* from,
+    VkSurfaceFullScreenExclusiveInfoEXT* to);
+
+void deepcopy_VkSurfaceCapabilitiesFullScreenExclusiveEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkSurfaceCapabilitiesFullScreenExclusiveEXT* from,
+    VkSurfaceCapabilitiesFullScreenExclusiveEXT* to);
+
+void deepcopy_VkSurfaceFullScreenExclusiveWin32InfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkSurfaceFullScreenExclusiveWin32InfoEXT* from,
+    VkSurfaceFullScreenExclusiveWin32InfoEXT* to);
+
+#endif
+#ifdef VK_EXT_headless_surface
+void deepcopy_VkHeadlessSurfaceCreateInfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkHeadlessSurfaceCreateInfoEXT* from,
+    VkHeadlessSurfaceCreateInfoEXT* to);
+
+#endif
+#ifdef VK_EXT_line_rasterization
+void deepcopy_VkPhysicalDeviceLineRasterizationFeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceLineRasterizationFeaturesEXT* from,
+    VkPhysicalDeviceLineRasterizationFeaturesEXT* to);
+
+void deepcopy_VkPhysicalDeviceLineRasterizationPropertiesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceLineRasterizationPropertiesEXT* from,
+    VkPhysicalDeviceLineRasterizationPropertiesEXT* to);
+
+void deepcopy_VkPipelineRasterizationLineStateCreateInfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineRasterizationLineStateCreateInfoEXT* from,
+    VkPipelineRasterizationLineStateCreateInfoEXT* to);
+
+#endif
+#ifdef VK_EXT_shader_atomic_float
+void deepcopy_VkPhysicalDeviceShaderAtomicFloatFeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderAtomicFloatFeaturesEXT* from,
+    VkPhysicalDeviceShaderAtomicFloatFeaturesEXT* to);
+
+#endif
+#ifdef VK_EXT_host_query_reset
+DEFINE_ALIAS_FUNCTION(deepcopy_VkPhysicalDeviceHostQueryResetFeatures, deepcopy_VkPhysicalDeviceHostQueryResetFeaturesEXT);
+
+#endif
+#ifdef VK_EXT_index_type_uint8
+void deepcopy_VkPhysicalDeviceIndexTypeUint8FeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceIndexTypeUint8FeaturesEXT* from,
+    VkPhysicalDeviceIndexTypeUint8FeaturesEXT* to);
+
+#endif
+#ifdef VK_EXT_extended_dynamic_state
+void deepcopy_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceExtendedDynamicStateFeaturesEXT* from,
+    VkPhysicalDeviceExtendedDynamicStateFeaturesEXT* to);
+
+#endif
+#ifdef VK_EXT_shader_demote_to_helper_invocation
+void deepcopy_VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT* from,
+    VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT* to);
+
+#endif
+#ifdef VK_NV_device_generated_commands
+void deepcopy_VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV* from,
+    VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV* to);
+
+void deepcopy_VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV* from,
+    VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV* to);
+
+void deepcopy_VkGraphicsShaderGroupCreateInfoNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkGraphicsShaderGroupCreateInfoNV* from,
+    VkGraphicsShaderGroupCreateInfoNV* to);
+
+void deepcopy_VkGraphicsPipelineShaderGroupsCreateInfoNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkGraphicsPipelineShaderGroupsCreateInfoNV* from,
+    VkGraphicsPipelineShaderGroupsCreateInfoNV* to);
+
+void deepcopy_VkBindShaderGroupIndirectCommandNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkBindShaderGroupIndirectCommandNV* from,
+    VkBindShaderGroupIndirectCommandNV* to);
+
+void deepcopy_VkBindIndexBufferIndirectCommandNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkBindIndexBufferIndirectCommandNV* from,
+    VkBindIndexBufferIndirectCommandNV* to);
+
+void deepcopy_VkBindVertexBufferIndirectCommandNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkBindVertexBufferIndirectCommandNV* from,
+    VkBindVertexBufferIndirectCommandNV* to);
+
+void deepcopy_VkSetStateFlagsIndirectCommandNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkSetStateFlagsIndirectCommandNV* from,
+    VkSetStateFlagsIndirectCommandNV* to);
+
+void deepcopy_VkIndirectCommandsStreamNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkIndirectCommandsStreamNV* from,
+    VkIndirectCommandsStreamNV* to);
+
+void deepcopy_VkIndirectCommandsLayoutTokenNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkIndirectCommandsLayoutTokenNV* from,
+    VkIndirectCommandsLayoutTokenNV* to);
+
+void deepcopy_VkIndirectCommandsLayoutCreateInfoNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkIndirectCommandsLayoutCreateInfoNV* from,
+    VkIndirectCommandsLayoutCreateInfoNV* to);
+
+void deepcopy_VkGeneratedCommandsInfoNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkGeneratedCommandsInfoNV* from,
+    VkGeneratedCommandsInfoNV* to);
+
+void deepcopy_VkGeneratedCommandsMemoryRequirementsInfoNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkGeneratedCommandsMemoryRequirementsInfoNV* from,
+    VkGeneratedCommandsMemoryRequirementsInfoNV* to);
+
+#endif
+#ifdef VK_EXT_texel_buffer_alignment
+void deepcopy_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT* from,
+    VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT* to);
+
+void deepcopy_VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT* from,
+    VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT* to);
+
+#endif
+#ifdef VK_QCOM_render_pass_transform
+void deepcopy_VkRenderPassTransformBeginInfoQCOM(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkRenderPassTransformBeginInfoQCOM* from,
+    VkRenderPassTransformBeginInfoQCOM* to);
+
+void deepcopy_VkCommandBufferInheritanceRenderPassTransformInfoQCOM(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkCommandBufferInheritanceRenderPassTransformInfoQCOM* from,
+    VkCommandBufferInheritanceRenderPassTransformInfoQCOM* to);
+
+#endif
+#ifdef VK_EXT_device_memory_report
+void deepcopy_VkPhysicalDeviceDeviceMemoryReportFeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDeviceMemoryReportFeaturesEXT* from,
+    VkPhysicalDeviceDeviceMemoryReportFeaturesEXT* to);
+
+void deepcopy_VkDeviceMemoryReportCallbackDataEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDeviceMemoryReportCallbackDataEXT* from,
+    VkDeviceMemoryReportCallbackDataEXT* to);
+
+void deepcopy_VkDeviceDeviceMemoryReportCreateInfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDeviceDeviceMemoryReportCreateInfoEXT* from,
+    VkDeviceDeviceMemoryReportCreateInfoEXT* to);
+
+#endif
+#ifdef VK_EXT_robustness2
+void deepcopy_VkPhysicalDeviceRobustness2FeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRobustness2FeaturesEXT* from,
+    VkPhysicalDeviceRobustness2FeaturesEXT* to);
+
+void deepcopy_VkPhysicalDeviceRobustness2PropertiesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRobustness2PropertiesEXT* from,
+    VkPhysicalDeviceRobustness2PropertiesEXT* to);
+
+#endif
+#ifdef VK_EXT_custom_border_color
+void deepcopy_VkSamplerCustomBorderColorCreateInfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkSamplerCustomBorderColorCreateInfoEXT* from,
+    VkSamplerCustomBorderColorCreateInfoEXT* to);
+
+void deepcopy_VkPhysicalDeviceCustomBorderColorPropertiesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCustomBorderColorPropertiesEXT* from,
+    VkPhysicalDeviceCustomBorderColorPropertiesEXT* to);
+
+void deepcopy_VkPhysicalDeviceCustomBorderColorFeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCustomBorderColorFeaturesEXT* from,
+    VkPhysicalDeviceCustomBorderColorFeaturesEXT* to);
+
+#endif
+#ifdef VK_GOOGLE_user_type
+#endif
+#ifdef VK_EXT_private_data
+void deepcopy_VkPhysicalDevicePrivateDataFeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDevicePrivateDataFeaturesEXT* from,
+    VkPhysicalDevicePrivateDataFeaturesEXT* to);
+
+void deepcopy_VkDevicePrivateDataCreateInfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDevicePrivateDataCreateInfoEXT* from,
+    VkDevicePrivateDataCreateInfoEXT* to);
+
+void deepcopy_VkPrivateDataSlotCreateInfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPrivateDataSlotCreateInfoEXT* from,
+    VkPrivateDataSlotCreateInfoEXT* to);
+
+#endif
+#ifdef VK_EXT_pipeline_creation_cache_control
+void deepcopy_VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT* from,
+    VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT* to);
+
+#endif
+#ifdef VK_NV_device_diagnostics_config
+void deepcopy_VkPhysicalDeviceDiagnosticsConfigFeaturesNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDiagnosticsConfigFeaturesNV* from,
+    VkPhysicalDeviceDiagnosticsConfigFeaturesNV* to);
+
+void deepcopy_VkDeviceDiagnosticsConfigCreateInfoNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDeviceDiagnosticsConfigCreateInfoNV* from,
+    VkDeviceDiagnosticsConfigCreateInfoNV* to);
+
+#endif
+#ifdef VK_QCOM_render_pass_store_ops
+#endif
+#ifdef VK_NV_fragment_shading_rate_enums
+void deepcopy_VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV* from,
+    VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV* to);
+
+void deepcopy_VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV* from,
+    VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV* to);
+
+void deepcopy_VkPipelineFragmentShadingRateEnumStateCreateInfoNV(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPipelineFragmentShadingRateEnumStateCreateInfoNV* from,
+    VkPipelineFragmentShadingRateEnumStateCreateInfoNV* to);
+
+#endif
+#ifdef VK_EXT_fragment_density_map2
+void deepcopy_VkPhysicalDeviceFragmentDensityMap2FeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentDensityMap2FeaturesEXT* from,
+    VkPhysicalDeviceFragmentDensityMap2FeaturesEXT* to);
+
+void deepcopy_VkPhysicalDeviceFragmentDensityMap2PropertiesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentDensityMap2PropertiesEXT* from,
+    VkPhysicalDeviceFragmentDensityMap2PropertiesEXT* to);
+
+#endif
+#ifdef VK_QCOM_rotated_copy_commands
+void deepcopy_VkCopyCommandTransformInfoQCOM(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkCopyCommandTransformInfoQCOM* from,
+    VkCopyCommandTransformInfoQCOM* to);
+
+#endif
+#ifdef VK_EXT_image_robustness
+void deepcopy_VkPhysicalDeviceImageRobustnessFeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceImageRobustnessFeaturesEXT* from,
+    VkPhysicalDeviceImageRobustnessFeaturesEXT* to);
+
+#endif
+#ifdef VK_EXT_4444_formats
+void deepcopy_VkPhysicalDevice4444FormatsFeaturesEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDevice4444FormatsFeaturesEXT* from,
+    VkPhysicalDevice4444FormatsFeaturesEXT* to);
+
+#endif
+#ifdef VK_EXT_directfb_surface
+void deepcopy_VkDirectFBSurfaceCreateInfoEXT(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDirectFBSurfaceCreateInfoEXT* from,
+    VkDirectFBSurfaceCreateInfoEXT* to);
+
+#endif
+#ifdef VK_GOOGLE_gfxstream
 void deepcopy_VkImportColorBufferGOOGLE(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkImportColorBufferGOOGLE* from,
     VkImportColorBufferGOOGLE* to);
 
+void deepcopy_VkImportBufferGOOGLE(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkImportBufferGOOGLE* from,
+    VkImportBufferGOOGLE* to);
+
 void deepcopy_VkImportPhysicalAddressGOOGLE(
-    Pool* pool,
+    Allocator* alloc,
+    VkStructureType rootType,
     const VkImportPhysicalAddressGOOGLE* from,
     VkImportPhysicalAddressGOOGLE* to);
 
 #endif
-#ifdef VK_GOOGLE_sized_descriptor_update_template
+#ifdef VK_KHR_acceleration_structure
+void deepcopy_VkDeviceOrHostAddressKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDeviceOrHostAddressKHR* from,
+    VkDeviceOrHostAddressKHR* to);
+
+void deepcopy_VkDeviceOrHostAddressConstKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkDeviceOrHostAddressConstKHR* from,
+    VkDeviceOrHostAddressConstKHR* to);
+
+void deepcopy_VkAccelerationStructureBuildRangeInfoKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkAccelerationStructureBuildRangeInfoKHR* from,
+    VkAccelerationStructureBuildRangeInfoKHR* to);
+
+void deepcopy_VkAccelerationStructureGeometryTrianglesDataKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkAccelerationStructureGeometryTrianglesDataKHR* from,
+    VkAccelerationStructureGeometryTrianglesDataKHR* to);
+
+void deepcopy_VkAccelerationStructureGeometryAabbsDataKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkAccelerationStructureGeometryAabbsDataKHR* from,
+    VkAccelerationStructureGeometryAabbsDataKHR* to);
+
+void deepcopy_VkAccelerationStructureGeometryInstancesDataKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkAccelerationStructureGeometryInstancesDataKHR* from,
+    VkAccelerationStructureGeometryInstancesDataKHR* to);
+
+void deepcopy_VkAccelerationStructureGeometryDataKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkAccelerationStructureGeometryDataKHR* from,
+    VkAccelerationStructureGeometryDataKHR* to);
+
+void deepcopy_VkAccelerationStructureGeometryKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkAccelerationStructureGeometryKHR* from,
+    VkAccelerationStructureGeometryKHR* to);
+
+void deepcopy_VkAccelerationStructureBuildGeometryInfoKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkAccelerationStructureBuildGeometryInfoKHR* from,
+    VkAccelerationStructureBuildGeometryInfoKHR* to);
+
+void deepcopy_VkAccelerationStructureCreateInfoKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkAccelerationStructureCreateInfoKHR* from,
+    VkAccelerationStructureCreateInfoKHR* to);
+
+void deepcopy_VkWriteDescriptorSetAccelerationStructureKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkWriteDescriptorSetAccelerationStructureKHR* from,
+    VkWriteDescriptorSetAccelerationStructureKHR* to);
+
+void deepcopy_VkPhysicalDeviceAccelerationStructureFeaturesKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceAccelerationStructureFeaturesKHR* from,
+    VkPhysicalDeviceAccelerationStructureFeaturesKHR* to);
+
+void deepcopy_VkPhysicalDeviceAccelerationStructurePropertiesKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceAccelerationStructurePropertiesKHR* from,
+    VkPhysicalDeviceAccelerationStructurePropertiesKHR* to);
+
+void deepcopy_VkAccelerationStructureDeviceAddressInfoKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkAccelerationStructureDeviceAddressInfoKHR* from,
+    VkAccelerationStructureDeviceAddressInfoKHR* to);
+
+void deepcopy_VkAccelerationStructureVersionInfoKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkAccelerationStructureVersionInfoKHR* from,
+    VkAccelerationStructureVersionInfoKHR* to);
+
+void deepcopy_VkCopyAccelerationStructureToMemoryInfoKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkCopyAccelerationStructureToMemoryInfoKHR* from,
+    VkCopyAccelerationStructureToMemoryInfoKHR* to);
+
+void deepcopy_VkCopyMemoryToAccelerationStructureInfoKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkCopyMemoryToAccelerationStructureInfoKHR* from,
+    VkCopyMemoryToAccelerationStructureInfoKHR* to);
+
+void deepcopy_VkCopyAccelerationStructureInfoKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkCopyAccelerationStructureInfoKHR* from,
+    VkCopyAccelerationStructureInfoKHR* to);
+
+void deepcopy_VkAccelerationStructureBuildSizesInfoKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkAccelerationStructureBuildSizesInfoKHR* from,
+    VkAccelerationStructureBuildSizesInfoKHR* to);
+
 #endif
-#ifdef VK_GOOGLE_async_command_buffers
+#ifdef VK_KHR_ray_tracing_pipeline
+void deepcopy_VkRayTracingShaderGroupCreateInfoKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkRayTracingShaderGroupCreateInfoKHR* from,
+    VkRayTracingShaderGroupCreateInfoKHR* to);
+
+void deepcopy_VkRayTracingPipelineInterfaceCreateInfoKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkRayTracingPipelineInterfaceCreateInfoKHR* from,
+    VkRayTracingPipelineInterfaceCreateInfoKHR* to);
+
+void deepcopy_VkRayTracingPipelineCreateInfoKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkRayTracingPipelineCreateInfoKHR* from,
+    VkRayTracingPipelineCreateInfoKHR* to);
+
+void deepcopy_VkPhysicalDeviceRayTracingPipelineFeaturesKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRayTracingPipelineFeaturesKHR* from,
+    VkPhysicalDeviceRayTracingPipelineFeaturesKHR* to);
+
+void deepcopy_VkPhysicalDeviceRayTracingPipelinePropertiesKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRayTracingPipelinePropertiesKHR* from,
+    VkPhysicalDeviceRayTracingPipelinePropertiesKHR* to);
+
+void deepcopy_VkStridedDeviceAddressRegionKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkStridedDeviceAddressRegionKHR* from,
+    VkStridedDeviceAddressRegionKHR* to);
+
+void deepcopy_VkTraceRaysIndirectCommandKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkTraceRaysIndirectCommandKHR* from,
+    VkTraceRaysIndirectCommandKHR* to);
+
 #endif
-#ifdef VK_GOOGLE_create_resources_with_requirements
-#endif
-#ifdef VK_GOOGLE_address_space_info
-#endif
-#ifdef VK_GOOGLE_free_memory_sync
+#ifdef VK_KHR_ray_query
+void deepcopy_VkPhysicalDeviceRayQueryFeaturesKHR(
+    Allocator* alloc,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRayQueryFeaturesKHR* from,
+    VkPhysicalDeviceRayQueryFeaturesKHR* to);
+
 #endif
 
 } // namespace goldfish_vk
diff --git a/system/vulkan_enc/goldfish_vk_extension_structs_guest.cpp b/system/vulkan_enc/goldfish_vk_extension_structs_guest.cpp
index e4b1ca7..5984b4d 100644
--- a/system/vulkan_enc/goldfish_vk_extension_structs_guest.cpp
+++ b/system/vulkan_enc/goldfish_vk_extension_structs_guest.cpp
@@ -32,6 +32,8 @@
 #endif
 #ifdef VK_VERSION_1_1
 #endif
+#ifdef VK_VERSION_1_2
+#endif
 #ifdef VK_KHR_surface
 #endif
 #ifdef VK_KHR_swapchain
@@ -46,8 +48,6 @@
 #endif
 #ifdef VK_KHR_wayland_surface
 #endif
-#ifdef VK_KHR_mir_surface
-#endif
 #ifdef VK_KHR_android_surface
 #endif
 #ifdef VK_KHR_win32_surface
@@ -86,12 +86,16 @@
 #endif
 #ifdef VK_KHR_push_descriptor
 #endif
+#ifdef VK_KHR_shader_float16_int8
+#endif
 #ifdef VK_KHR_16bit_storage
 #endif
 #ifdef VK_KHR_incremental_present
 #endif
 #ifdef VK_KHR_descriptor_update_template
 #endif
+#ifdef VK_KHR_imageless_framebuffer
+#endif
 #ifdef VK_KHR_create_renderpass2
 #endif
 #ifdef VK_KHR_shared_presentable_image
@@ -104,6 +108,8 @@
 #endif
 #ifdef VK_KHR_external_fence_fd
 #endif
+#ifdef VK_KHR_performance_query
+#endif
 #ifdef VK_KHR_maintenance2
 #endif
 #ifdef VK_KHR_get_surface_capabilities2
@@ -126,12 +132,56 @@
 #endif
 #ifdef VK_KHR_bind_memory2
 #endif
+#ifdef VK_KHR_portability_subset
+#endif
 #ifdef VK_KHR_maintenance3
 #endif
 #ifdef VK_KHR_draw_indirect_count
 #endif
+#ifdef VK_KHR_shader_subgroup_extended_types
+#endif
 #ifdef VK_KHR_8bit_storage
 #endif
+#ifdef VK_KHR_shader_atomic_int64
+#endif
+#ifdef VK_KHR_shader_clock
+#endif
+#ifdef VK_KHR_driver_properties
+#endif
+#ifdef VK_KHR_shader_float_controls
+#endif
+#ifdef VK_KHR_depth_stencil_resolve
+#endif
+#ifdef VK_KHR_swapchain_mutable_format
+#endif
+#ifdef VK_KHR_timeline_semaphore
+#endif
+#ifdef VK_KHR_vulkan_memory_model
+#endif
+#ifdef VK_KHR_shader_terminate_invocation
+#endif
+#ifdef VK_KHR_fragment_shading_rate
+#endif
+#ifdef VK_KHR_spirv_1_4
+#endif
+#ifdef VK_KHR_surface_protected_capabilities
+#endif
+#ifdef VK_KHR_separate_depth_stencil_layouts
+#endif
+#ifdef VK_KHR_uniform_buffer_standard_layout
+#endif
+#ifdef VK_KHR_buffer_device_address
+#endif
+#ifdef VK_KHR_deferred_host_operations
+#endif
+#ifdef VK_KHR_pipeline_executable_properties
+#endif
+#ifdef VK_KHR_pipeline_library
+#endif
+#ifdef VK_KHR_shader_non_semantic_info
+#endif
+#ifdef VK_KHR_copy_commands2
+#endif
 #ifdef VK_ANDROID_native_buffer
 #endif
 #ifdef VK_EXT_debug_report
@@ -154,6 +204,10 @@
 #endif
 #ifdef VK_NV_dedicated_allocation
 #endif
+#ifdef VK_EXT_transform_feedback
+#endif
+#ifdef VK_NVX_image_view_handle
+#endif
 #ifdef VK_AMD_draw_indirect_count
 #endif
 #ifdef VK_AMD_negative_viewport_height
@@ -168,6 +222,10 @@
 #endif
 #ifdef VK_AMD_shader_image_load_store_lod
 #endif
+#ifdef VK_GGP_stream_descriptor_surface
+#endif
+#ifdef VK_NV_corner_sampled_image
+#endif
 #ifdef VK_IMG_format_pvrtc
 #endif
 #ifdef VK_NV_external_memory_capabilities
@@ -186,9 +244,11 @@
 #endif
 #ifdef VK_EXT_shader_subgroup_vote
 #endif
-#ifdef VK_EXT_conditional_rendering
+#ifdef VK_EXT_texture_compression_astc_hdr
 #endif
-#ifdef VK_NVX_device_generated_commands
+#ifdef VK_EXT_astc_decode_mode
+#endif
+#ifdef VK_EXT_conditional_rendering
 #endif
 #ifdef VK_NV_clip_space_w_scaling
 #endif
@@ -216,6 +276,8 @@
 #endif
 #ifdef VK_EXT_conservative_rasterization
 #endif
+#ifdef VK_EXT_depth_clip_enable
+#endif
 #ifdef VK_EXT_swapchain_colorspace
 #endif
 #ifdef VK_EXT_hdr_metadata
@@ -224,6 +286,8 @@
 #endif
 #ifdef VK_MVK_macos_surface
 #endif
+#ifdef VK_MVK_moltenvk
+#endif
 #ifdef VK_EXT_external_memory_dma_buf
 #endif
 #ifdef VK_EXT_queue_family_foreign
@@ -240,6 +304,8 @@
 #endif
 #ifdef VK_AMD_shader_fragment_mask
 #endif
+#ifdef VK_EXT_inline_uniform_block
+#endif
 #ifdef VK_EXT_shader_stencil_export
 #endif
 #ifdef VK_EXT_sample_locations
@@ -252,41 +318,169 @@
 #endif
 #ifdef VK_NV_fill_rectangle
 #endif
+#ifdef VK_NV_shader_sm_builtins
+#endif
 #ifdef VK_EXT_post_depth_coverage
 #endif
+#ifdef VK_EXT_image_drm_format_modifier
+#endif
 #ifdef VK_EXT_validation_cache
 #endif
 #ifdef VK_EXT_descriptor_indexing
 #endif
 #ifdef VK_EXT_shader_viewport_index_layer
 #endif
+#ifdef VK_NV_shading_rate_image
+#endif
+#ifdef VK_NV_ray_tracing
+#endif
+#ifdef VK_NV_representative_fragment_test
+#endif
+#ifdef VK_EXT_filter_cubic
+#endif
+#ifdef VK_QCOM_render_pass_shader_resolve
+#endif
 #ifdef VK_EXT_global_priority
 #endif
 #ifdef VK_EXT_external_memory_host
 #endif
 #ifdef VK_AMD_buffer_marker
 #endif
+#ifdef VK_AMD_pipeline_compiler_control
+#endif
+#ifdef VK_EXT_calibrated_timestamps
+#endif
 #ifdef VK_AMD_shader_core_properties
 #endif
+#ifdef VK_AMD_memory_overallocation_behavior
+#endif
 #ifdef VK_EXT_vertex_attribute_divisor
 #endif
+#ifdef VK_GGP_frame_token
+#endif
+#ifdef VK_EXT_pipeline_creation_feedback
+#endif
 #ifdef VK_NV_shader_subgroup_partitioned
 #endif
+#ifdef VK_NV_compute_shader_derivatives
+#endif
+#ifdef VK_NV_mesh_shader
+#endif
+#ifdef VK_NV_fragment_shader_barycentric
+#endif
+#ifdef VK_NV_shader_image_footprint
+#endif
+#ifdef VK_NV_scissor_exclusive
+#endif
 #ifdef VK_NV_device_diagnostic_checkpoints
 #endif
-#ifdef VK_GOOGLE_address_space
+#ifdef VK_INTEL_shader_integer_functions2
 #endif
-#ifdef VK_GOOGLE_color_buffer
+#ifdef VK_INTEL_performance_query
 #endif
-#ifdef VK_GOOGLE_sized_descriptor_update_template
+#ifdef VK_EXT_pci_bus_info
 #endif
-#ifdef VK_GOOGLE_async_command_buffers
+#ifdef VK_AMD_display_native_hdr
 #endif
-#ifdef VK_GOOGLE_create_resources_with_requirements
+#ifdef VK_FUCHSIA_imagepipe_surface
 #endif
-#ifdef VK_GOOGLE_address_space_info
+#ifdef VK_EXT_metal_surface
 #endif
-#ifdef VK_GOOGLE_free_memory_sync
+#ifdef VK_EXT_fragment_density_map
+#endif
+#ifdef VK_EXT_scalar_block_layout
+#endif
+#ifdef VK_GOOGLE_hlsl_functionality1
+#endif
+#ifdef VK_GOOGLE_decorate_string
+#endif
+#ifdef VK_EXT_subgroup_size_control
+#endif
+#ifdef VK_AMD_shader_core_properties2
+#endif
+#ifdef VK_AMD_device_coherent_memory
+#endif
+#ifdef VK_EXT_shader_image_atomic_int64
+#endif
+#ifdef VK_EXT_memory_budget
+#endif
+#ifdef VK_EXT_memory_priority
+#endif
+#ifdef VK_NV_dedicated_allocation_image_aliasing
+#endif
+#ifdef VK_EXT_buffer_device_address
+#endif
+#ifdef VK_EXT_tooling_info
+#endif
+#ifdef VK_EXT_separate_stencil_usage
+#endif
+#ifdef VK_EXT_validation_features
+#endif
+#ifdef VK_NV_cooperative_matrix
+#endif
+#ifdef VK_NV_coverage_reduction_mode
+#endif
+#ifdef VK_EXT_fragment_shader_interlock
+#endif
+#ifdef VK_EXT_ycbcr_image_arrays
+#endif
+#ifdef VK_EXT_full_screen_exclusive
+#endif
+#ifdef VK_EXT_headless_surface
+#endif
+#ifdef VK_EXT_line_rasterization
+#endif
+#ifdef VK_EXT_shader_atomic_float
+#endif
+#ifdef VK_EXT_host_query_reset
+#endif
+#ifdef VK_EXT_index_type_uint8
+#endif
+#ifdef VK_EXT_extended_dynamic_state
+#endif
+#ifdef VK_EXT_shader_demote_to_helper_invocation
+#endif
+#ifdef VK_NV_device_generated_commands
+#endif
+#ifdef VK_EXT_texel_buffer_alignment
+#endif
+#ifdef VK_QCOM_render_pass_transform
+#endif
+#ifdef VK_EXT_device_memory_report
+#endif
+#ifdef VK_EXT_robustness2
+#endif
+#ifdef VK_EXT_custom_border_color
+#endif
+#ifdef VK_GOOGLE_user_type
+#endif
+#ifdef VK_EXT_private_data
+#endif
+#ifdef VK_EXT_pipeline_creation_cache_control
+#endif
+#ifdef VK_NV_device_diagnostics_config
+#endif
+#ifdef VK_QCOM_render_pass_store_ops
+#endif
+#ifdef VK_NV_fragment_shading_rate_enums
+#endif
+#ifdef VK_EXT_fragment_density_map2
+#endif
+#ifdef VK_QCOM_rotated_copy_commands
+#endif
+#ifdef VK_EXT_image_robustness
+#endif
+#ifdef VK_EXT_4444_formats
+#endif
+#ifdef VK_EXT_directfb_surface
+#endif
+#ifdef VK_GOOGLE_gfxstream
+#endif
+#ifdef VK_KHR_acceleration_structure
+#endif
+#ifdef VK_KHR_ray_tracing_pipeline
+#endif
+#ifdef VK_KHR_ray_query
 #endif
 uint32_t goldfish_vk_struct_type(
     const void* structExtension)
@@ -296,6 +490,7 @@
 }
 
 size_t goldfish_vk_extension_struct_size(
+    VkStructureType rootType,
     const void* structExtension)
 {
     if (!structExtension)
@@ -386,9 +581,9 @@
         {
             return sizeof(VkPhysicalDeviceMultiviewProperties);
         }
-        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES:
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES:
         {
-            return sizeof(VkPhysicalDeviceVariablePointerFeatures);
+            return sizeof(VkPhysicalDeviceVariablePointersFeatures);
         }
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES:
         {
@@ -458,9 +653,163 @@
         {
             return sizeof(VkPhysicalDeviceMaintenance3Properties);
         }
-        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES:
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES:
         {
-            return sizeof(VkPhysicalDeviceShaderDrawParameterFeatures);
+            return sizeof(VkPhysicalDeviceShaderDrawParametersFeatures);
+        }
+#endif
+#ifdef VK_VERSION_1_2
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES:
+        {
+            return sizeof(VkPhysicalDeviceVulkan11Features);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_PROPERTIES:
+        {
+            return sizeof(VkPhysicalDeviceVulkan11Properties);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES:
+        {
+            return sizeof(VkPhysicalDeviceVulkan12Features);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_PROPERTIES:
+        {
+            return sizeof(VkPhysicalDeviceVulkan12Properties);
+        }
+        case VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO:
+        {
+            return sizeof(VkImageFormatListCreateInfo);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES:
+        {
+            return sizeof(VkPhysicalDevice8BitStorageFeatures);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES:
+        {
+            return sizeof(VkPhysicalDeviceDriverProperties);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES:
+        {
+            return sizeof(VkPhysicalDeviceShaderAtomicInt64Features);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES:
+        {
+            return sizeof(VkPhysicalDeviceShaderFloat16Int8Features);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES:
+        {
+            return sizeof(VkPhysicalDeviceFloatControlsProperties);
+        }
+        case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO:
+        {
+            return sizeof(VkDescriptorSetLayoutBindingFlagsCreateInfo);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES:
+        {
+            return sizeof(VkPhysicalDeviceDescriptorIndexingFeatures);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES:
+        {
+            return sizeof(VkPhysicalDeviceDescriptorIndexingProperties);
+        }
+        case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO:
+        {
+            return sizeof(VkDescriptorSetVariableDescriptorCountAllocateInfo);
+        }
+        case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT:
+        {
+            return sizeof(VkDescriptorSetVariableDescriptorCountLayoutSupport);
+        }
+        case VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE:
+        {
+            return sizeof(VkSubpassDescriptionDepthStencilResolve);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES:
+        {
+            return sizeof(VkPhysicalDeviceDepthStencilResolveProperties);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES:
+        {
+            return sizeof(VkPhysicalDeviceScalarBlockLayoutFeatures);
+        }
+        case VK_STRUCTURE_TYPE_IMAGE_STENCIL_USAGE_CREATE_INFO:
+        {
+            return sizeof(VkImageStencilUsageCreateInfo);
+        }
+        case VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO:
+        {
+            return sizeof(VkSamplerReductionModeCreateInfo);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES:
+        {
+            return sizeof(VkPhysicalDeviceSamplerFilterMinmaxProperties);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES:
+        {
+            return sizeof(VkPhysicalDeviceVulkanMemoryModelFeatures);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES:
+        {
+            return sizeof(VkPhysicalDeviceImagelessFramebufferFeatures);
+        }
+        case VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO:
+        {
+            return sizeof(VkFramebufferAttachmentsCreateInfo);
+        }
+        case VK_STRUCTURE_TYPE_RENDER_PASS_ATTACHMENT_BEGIN_INFO:
+        {
+            return sizeof(VkRenderPassAttachmentBeginInfo);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES:
+        {
+            return sizeof(VkPhysicalDeviceUniformBufferStandardLayoutFeatures);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES:
+        {
+            return sizeof(VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES:
+        {
+            return sizeof(VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures);
+        }
+        case VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_STENCIL_LAYOUT:
+        {
+            return sizeof(VkAttachmentReferenceStencilLayout);
+        }
+        case VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_STENCIL_LAYOUT:
+        {
+            return sizeof(VkAttachmentDescriptionStencilLayout);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES:
+        {
+            return sizeof(VkPhysicalDeviceHostQueryResetFeatures);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES:
+        {
+            return sizeof(VkPhysicalDeviceTimelineSemaphoreFeatures);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES:
+        {
+            return sizeof(VkPhysicalDeviceTimelineSemaphoreProperties);
+        }
+        case VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO:
+        {
+            return sizeof(VkSemaphoreTypeCreateInfo);
+        }
+        case VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO:
+        {
+            return sizeof(VkTimelineSemaphoreSubmitInfo);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES:
+        {
+            return sizeof(VkPhysicalDeviceBufferDeviceAddressFeatures);
+        }
+        case VK_STRUCTURE_TYPE_BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO:
+        {
+            return sizeof(VkBufferOpaqueCaptureAddressCreateInfo);
+        }
+        case VK_STRUCTURE_TYPE_MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO:
+        {
+            return sizeof(VkMemoryOpaqueCaptureAddressAllocateInfo);
         }
 #endif
 #ifdef VK_KHR_swapchain
@@ -543,16 +892,74 @@
             return sizeof(VkExportFenceWin32HandleInfoKHR);
         }
 #endif
-#ifdef VK_KHR_image_format_list
-        case VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO_KHR:
+#ifdef VK_KHR_performance_query
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_FEATURES_KHR:
         {
-            return sizeof(VkImageFormatListCreateInfoKHR);
+            return sizeof(VkPhysicalDevicePerformanceQueryFeaturesKHR);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_PROPERTIES_KHR:
+        {
+            return sizeof(VkPhysicalDevicePerformanceQueryPropertiesKHR);
+        }
+        case VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_CREATE_INFO_KHR:
+        {
+            return sizeof(VkQueryPoolPerformanceCreateInfoKHR);
+        }
+        case VK_STRUCTURE_TYPE_PERFORMANCE_QUERY_SUBMIT_INFO_KHR:
+        {
+            return sizeof(VkPerformanceQuerySubmitInfoKHR);
         }
 #endif
-#ifdef VK_KHR_8bit_storage
-        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR:
+#ifdef VK_KHR_portability_subset
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PORTABILITY_SUBSET_FEATURES_KHR:
         {
-            return sizeof(VkPhysicalDevice8BitStorageFeaturesKHR);
+            return sizeof(VkPhysicalDevicePortabilitySubsetFeaturesKHR);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PORTABILITY_SUBSET_PROPERTIES_KHR:
+        {
+            return sizeof(VkPhysicalDevicePortabilitySubsetPropertiesKHR);
+        }
+#endif
+#ifdef VK_KHR_shader_clock
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CLOCK_FEATURES_KHR:
+        {
+            return sizeof(VkPhysicalDeviceShaderClockFeaturesKHR);
+        }
+#endif
+#ifdef VK_KHR_shader_terminate_invocation
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES_KHR:
+        {
+            return sizeof(VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR);
+        }
+#endif
+#ifdef VK_KHR_fragment_shading_rate
+        case VK_STRUCTURE_TYPE_FRAGMENT_SHADING_RATE_ATTACHMENT_INFO_KHR:
+        {
+            return sizeof(VkFragmentShadingRateAttachmentInfoKHR);
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_STATE_CREATE_INFO_KHR:
+        {
+            return sizeof(VkPipelineFragmentShadingRateStateCreateInfoKHR);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_FEATURES_KHR:
+        {
+            return sizeof(VkPhysicalDeviceFragmentShadingRateFeaturesKHR);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_PROPERTIES_KHR:
+        {
+            return sizeof(VkPhysicalDeviceFragmentShadingRatePropertiesKHR);
+        }
+#endif
+#ifdef VK_KHR_surface_protected_capabilities
+        case VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR:
+        {
+            return sizeof(VkSurfaceProtectedCapabilitiesKHR);
+        }
+#endif
+#ifdef VK_KHR_pipeline_executable_properties
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_EXECUTABLE_PROPERTIES_FEATURES_KHR:
+        {
+            return sizeof(VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR);
         }
 #endif
 #ifdef VK_ANDROID_native_buffer
@@ -587,12 +994,32 @@
             return sizeof(VkDedicatedAllocationMemoryAllocateInfoNV);
         }
 #endif
+#ifdef VK_EXT_transform_feedback
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceTransformFeedbackFeaturesEXT);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceTransformFeedbackPropertiesEXT);
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_STREAM_CREATE_INFO_EXT:
+        {
+            return sizeof(VkPipelineRasterizationStateStreamCreateInfoEXT);
+        }
+#endif
 #ifdef VK_AMD_texture_gather_bias_lod
         case VK_STRUCTURE_TYPE_TEXTURE_LOD_GATHER_FORMAT_PROPERTIES_AMD:
         {
             return sizeof(VkTextureLODGatherFormatPropertiesAMD);
         }
 #endif
+#ifdef VK_NV_corner_sampled_image
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CORNER_SAMPLED_IMAGE_FEATURES_NV:
+        {
+            return sizeof(VkPhysicalDeviceCornerSampledImageFeaturesNV);
+        }
+#endif
 #ifdef VK_NV_external_memory
         case VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_NV:
         {
@@ -625,6 +1052,22 @@
             return sizeof(VkValidationFlagsEXT);
         }
 #endif
+#ifdef VK_EXT_texture_compression_astc_hdr
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT);
+        }
+#endif
+#ifdef VK_EXT_astc_decode_mode
+        case VK_STRUCTURE_TYPE_IMAGE_VIEW_ASTC_DECODE_MODE_EXT:
+        {
+            return sizeof(VkImageViewASTCDecodeModeEXT);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ASTC_DECODE_FEATURES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceASTCDecodeFeaturesEXT);
+        }
+#endif
 #ifdef VK_EXT_conditional_rendering
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT:
         {
@@ -685,6 +1128,16 @@
             return sizeof(VkPipelineRasterizationConservativeStateCreateInfoEXT);
         }
 #endif
+#ifdef VK_EXT_depth_clip_enable
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_ENABLE_FEATURES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceDepthClipEnableFeaturesEXT);
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT:
+        {
+            return sizeof(VkPipelineRasterizationDepthClipStateCreateInfoEXT);
+        }
+#endif
 #ifdef VK_EXT_debug_utils
         case VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT:
         {
@@ -709,14 +1162,22 @@
             return sizeof(VkExternalFormatANDROID);
         }
 #endif
-#ifdef VK_EXT_sampler_filter_minmax
-        case VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO_EXT:
+#ifdef VK_EXT_inline_uniform_block
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES_EXT:
         {
-            return sizeof(VkSamplerReductionModeCreateInfoEXT);
+            return sizeof(VkPhysicalDeviceInlineUniformBlockFeaturesEXT);
         }
-        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES_EXT:
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES_EXT:
         {
-            return sizeof(VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT);
+            return sizeof(VkPhysicalDeviceInlineUniformBlockPropertiesEXT);
+        }
+        case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT:
+        {
+            return sizeof(VkWriteDescriptorSetInlineUniformBlockEXT);
+        }
+        case VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO_EXT:
+        {
+            return sizeof(VkDescriptorPoolInlineUniformBlockCreateInfoEXT);
         }
 #endif
 #ifdef VK_EXT_sample_locations
@@ -763,32 +1224,86 @@
             return sizeof(VkPipelineCoverageModulationStateCreateInfoNV);
         }
 #endif
+#ifdef VK_NV_shader_sm_builtins
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_PROPERTIES_NV:
+        {
+            return sizeof(VkPhysicalDeviceShaderSMBuiltinsPropertiesNV);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_FEATURES_NV:
+        {
+            return sizeof(VkPhysicalDeviceShaderSMBuiltinsFeaturesNV);
+        }
+#endif
+#ifdef VK_EXT_image_drm_format_modifier
+        case VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT:
+        {
+            return sizeof(VkDrmFormatModifierPropertiesListEXT);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT:
+        {
+            return sizeof(VkPhysicalDeviceImageDrmFormatModifierInfoEXT);
+        }
+        case VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT:
+        {
+            return sizeof(VkImageDrmFormatModifierListCreateInfoEXT);
+        }
+        case VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT:
+        {
+            return sizeof(VkImageDrmFormatModifierExplicitCreateInfoEXT);
+        }
+#endif
 #ifdef VK_EXT_validation_cache
         case VK_STRUCTURE_TYPE_SHADER_MODULE_VALIDATION_CACHE_CREATE_INFO_EXT:
         {
             return sizeof(VkShaderModuleValidationCacheCreateInfoEXT);
         }
 #endif
-#ifdef VK_EXT_descriptor_indexing
-        case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT:
+#ifdef VK_NV_shading_rate_image
+        case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SHADING_RATE_IMAGE_STATE_CREATE_INFO_NV:
         {
-            return sizeof(VkDescriptorSetLayoutBindingFlagsCreateInfoEXT);
+            return sizeof(VkPipelineViewportShadingRateImageStateCreateInfoNV);
         }
-        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT:
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_FEATURES_NV:
         {
-            return sizeof(VkPhysicalDeviceDescriptorIndexingFeaturesEXT);
+            return sizeof(VkPhysicalDeviceShadingRateImageFeaturesNV);
         }
-        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES_EXT:
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_PROPERTIES_NV:
         {
-            return sizeof(VkPhysicalDeviceDescriptorIndexingPropertiesEXT);
+            return sizeof(VkPhysicalDeviceShadingRateImagePropertiesNV);
         }
-        case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO_EXT:
+        case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_COARSE_SAMPLE_ORDER_STATE_CREATE_INFO_NV:
         {
-            return sizeof(VkDescriptorSetVariableDescriptorCountAllocateInfoEXT);
+            return sizeof(VkPipelineViewportCoarseSampleOrderStateCreateInfoNV);
         }
-        case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT_EXT:
+#endif
+#ifdef VK_NV_ray_tracing
+        case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_NV:
         {
-            return sizeof(VkDescriptorSetVariableDescriptorCountLayoutSupportEXT);
+            return sizeof(VkWriteDescriptorSetAccelerationStructureNV);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_NV:
+        {
+            return sizeof(VkPhysicalDeviceRayTracingPropertiesNV);
+        }
+#endif
+#ifdef VK_NV_representative_fragment_test
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_REPRESENTATIVE_FRAGMENT_TEST_FEATURES_NV:
+        {
+            return sizeof(VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV);
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_REPRESENTATIVE_FRAGMENT_TEST_STATE_CREATE_INFO_NV:
+        {
+            return sizeof(VkPipelineRepresentativeFragmentTestStateCreateInfoNV);
+        }
+#endif
+#ifdef VK_EXT_filter_cubic
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_VIEW_IMAGE_FORMAT_INFO_EXT:
+        {
+            return sizeof(VkPhysicalDeviceImageViewImageFormatInfoEXT);
+        }
+        case VK_STRUCTURE_TYPE_FILTER_CUBIC_IMAGE_VIEW_IMAGE_FORMAT_PROPERTIES_EXT:
+        {
+            return sizeof(VkFilterCubicImageViewImageFormatPropertiesEXT);
         }
 #endif
 #ifdef VK_EXT_global_priority
@@ -807,12 +1322,24 @@
             return sizeof(VkPhysicalDeviceExternalMemoryHostPropertiesEXT);
         }
 #endif
+#ifdef VK_AMD_pipeline_compiler_control
+        case VK_STRUCTURE_TYPE_PIPELINE_COMPILER_CONTROL_CREATE_INFO_AMD:
+        {
+            return sizeof(VkPipelineCompilerControlCreateInfoAMD);
+        }
+#endif
 #ifdef VK_AMD_shader_core_properties
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_AMD:
         {
             return sizeof(VkPhysicalDeviceShaderCorePropertiesAMD);
         }
 #endif
+#ifdef VK_AMD_memory_overallocation_behavior
+        case VK_STRUCTURE_TYPE_DEVICE_MEMORY_OVERALLOCATION_CREATE_INFO_AMD:
+        {
+            return sizeof(VkDeviceMemoryOverallocationCreateInfoAMD);
+        }
+#endif
 #ifdef VK_EXT_vertex_attribute_divisor
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT:
         {
@@ -822,6 +1349,60 @@
         {
             return sizeof(VkPipelineVertexInputDivisorStateCreateInfoEXT);
         }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT);
+        }
+#endif
+#ifdef VK_GGP_frame_token
+        case VK_STRUCTURE_TYPE_PRESENT_FRAME_TOKEN_GGP:
+        {
+            return sizeof(VkPresentFrameTokenGGP);
+        }
+#endif
+#ifdef VK_EXT_pipeline_creation_feedback
+        case VK_STRUCTURE_TYPE_PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT:
+        {
+            return sizeof(VkPipelineCreationFeedbackCreateInfoEXT);
+        }
+#endif
+#ifdef VK_NV_compute_shader_derivatives
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_FEATURES_NV:
+        {
+            return sizeof(VkPhysicalDeviceComputeShaderDerivativesFeaturesNV);
+        }
+#endif
+#ifdef VK_NV_mesh_shader
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_FEATURES_NV:
+        {
+            return sizeof(VkPhysicalDeviceMeshShaderFeaturesNV);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_PROPERTIES_NV:
+        {
+            return sizeof(VkPhysicalDeviceMeshShaderPropertiesNV);
+        }
+#endif
+#ifdef VK_NV_fragment_shader_barycentric
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_NV:
+        {
+            return sizeof(VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV);
+        }
+#endif
+#ifdef VK_NV_shader_image_footprint
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_FOOTPRINT_FEATURES_NV:
+        {
+            return sizeof(VkPhysicalDeviceShaderImageFootprintFeaturesNV);
+        }
+#endif
+#ifdef VK_NV_scissor_exclusive
+        case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_EXCLUSIVE_SCISSOR_STATE_CREATE_INFO_NV:
+        {
+            return sizeof(VkPipelineViewportExclusiveScissorStateCreateInfoNV);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXCLUSIVE_SCISSOR_FEATURES_NV:
+        {
+            return sizeof(VkPhysicalDeviceExclusiveScissorFeaturesNV);
+        }
 #endif
 #ifdef VK_NV_device_diagnostic_checkpoints
         case VK_STRUCTURE_TYPE_QUEUE_FAMILY_CHECKPOINT_PROPERTIES_NV:
@@ -829,16 +1410,1816 @@
             return sizeof(VkQueueFamilyCheckpointPropertiesNV);
         }
 #endif
-#ifdef VK_GOOGLE_color_buffer
+#ifdef VK_INTEL_shader_integer_functions2
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_FUNCTIONS_2_FEATURES_INTEL:
+        {
+            return sizeof(VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL);
+        }
+#endif
+#ifdef VK_INTEL_performance_query
+        case VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_QUERY_CREATE_INFO_INTEL:
+        {
+            return sizeof(VkQueryPoolPerformanceQueryCreateInfoINTEL);
+        }
+#endif
+#ifdef VK_EXT_pci_bus_info
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT:
+        {
+            return sizeof(VkPhysicalDevicePCIBusInfoPropertiesEXT);
+        }
+#endif
+#ifdef VK_AMD_display_native_hdr
+        case VK_STRUCTURE_TYPE_DISPLAY_NATIVE_HDR_SURFACE_CAPABILITIES_AMD:
+        {
+            return sizeof(VkDisplayNativeHdrSurfaceCapabilitiesAMD);
+        }
+        case VK_STRUCTURE_TYPE_SWAPCHAIN_DISPLAY_NATIVE_HDR_CREATE_INFO_AMD:
+        {
+            return sizeof(VkSwapchainDisplayNativeHdrCreateInfoAMD);
+        }
+#endif
+#ifdef VK_EXT_fragment_density_map
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_FEATURES_EXT:
+        {
+            switch(rootType)
+            {
+                case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2:
+                {
+                    return sizeof(VkPhysicalDeviceFragmentDensityMapFeaturesEXT);
+                    break;
+                }
+                case VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO:
+                {
+                    return sizeof(VkPhysicalDeviceFragmentDensityMapFeaturesEXT);
+                    break;
+                }
+                case VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO:
+                {
+                    return sizeof(VkImportColorBufferGOOGLE);
+                    break;
+                }
+                default:
+                {
+                    return sizeof(VkPhysicalDeviceFragmentDensityMapFeaturesEXT);
+                    break;
+                }
+            }
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_PROPERTIES_EXT:
+        {
+            switch(rootType)
+            {
+                case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2:
+                {
+                    return sizeof(VkPhysicalDeviceFragmentDensityMapPropertiesEXT);
+                    break;
+                }
+                case VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO:
+                {
+                    return sizeof(VkImportPhysicalAddressGOOGLE);
+                    break;
+                }
+                default:
+                {
+                    return sizeof(VkPhysicalDeviceFragmentDensityMapPropertiesEXT);
+                    break;
+                }
+            }
+        }
+        case VK_STRUCTURE_TYPE_RENDER_PASS_FRAGMENT_DENSITY_MAP_CREATE_INFO_EXT:
+        {
+            switch(rootType)
+            {
+                case VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO:
+                {
+                    return sizeof(VkRenderPassFragmentDensityMapCreateInfoEXT);
+                    break;
+                }
+                case VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2:
+                {
+                    return sizeof(VkRenderPassFragmentDensityMapCreateInfoEXT);
+                    break;
+                }
+                case VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO:
+                {
+                    return sizeof(VkImportBufferGOOGLE);
+                    break;
+                }
+                default:
+                {
+                    return sizeof(VkRenderPassFragmentDensityMapCreateInfoEXT);
+                    break;
+                }
+            }
+        }
+#endif
+#ifdef VK_EXT_subgroup_size_control
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceSubgroupSizeControlFeaturesEXT);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceSubgroupSizeControlPropertiesEXT);
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT:
+        {
+            return sizeof(VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT);
+        }
+#endif
+#ifdef VK_AMD_shader_core_properties2
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_2_AMD:
+        {
+            return sizeof(VkPhysicalDeviceShaderCoreProperties2AMD);
+        }
+#endif
+#ifdef VK_AMD_device_coherent_memory
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COHERENT_MEMORY_FEATURES_AMD:
+        {
+            return sizeof(VkPhysicalDeviceCoherentMemoryFeaturesAMD);
+        }
+#endif
+#ifdef VK_EXT_shader_image_atomic_int64
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_ATOMIC_INT64_FEATURES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT);
+        }
+#endif
+#ifdef VK_EXT_memory_budget
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceMemoryBudgetPropertiesEXT);
+        }
+#endif
+#ifdef VK_EXT_memory_priority
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PRIORITY_FEATURES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceMemoryPriorityFeaturesEXT);
+        }
+        case VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT:
+        {
+            return sizeof(VkMemoryPriorityAllocateInfoEXT);
+        }
+#endif
+#ifdef VK_NV_dedicated_allocation_image_aliasing
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEDICATED_ALLOCATION_IMAGE_ALIASING_FEATURES_NV:
+        {
+            return sizeof(VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV);
+        }
+#endif
+#ifdef VK_EXT_buffer_device_address
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceBufferDeviceAddressFeaturesEXT);
+        }
+        case VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_CREATE_INFO_EXT:
+        {
+            return sizeof(VkBufferDeviceAddressCreateInfoEXT);
+        }
+#endif
+#ifdef VK_EXT_validation_features
+        case VK_STRUCTURE_TYPE_VALIDATION_FEATURES_EXT:
+        {
+            return sizeof(VkValidationFeaturesEXT);
+        }
+#endif
+#ifdef VK_NV_cooperative_matrix
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_FEATURES_NV:
+        {
+            return sizeof(VkPhysicalDeviceCooperativeMatrixFeaturesNV);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_PROPERTIES_NV:
+        {
+            return sizeof(VkPhysicalDeviceCooperativeMatrixPropertiesNV);
+        }
+#endif
+#ifdef VK_NV_coverage_reduction_mode
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COVERAGE_REDUCTION_MODE_FEATURES_NV:
+        {
+            return sizeof(VkPhysicalDeviceCoverageReductionModeFeaturesNV);
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_REDUCTION_STATE_CREATE_INFO_NV:
+        {
+            return sizeof(VkPipelineCoverageReductionStateCreateInfoNV);
+        }
+#endif
+#ifdef VK_EXT_fragment_shader_interlock
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_INTERLOCK_FEATURES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT);
+        }
+#endif
+#ifdef VK_EXT_ycbcr_image_arrays
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_IMAGE_ARRAYS_FEATURES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceYcbcrImageArraysFeaturesEXT);
+        }
+#endif
+#ifdef VK_EXT_full_screen_exclusive
+        case VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_INFO_EXT:
+        {
+            return sizeof(VkSurfaceFullScreenExclusiveInfoEXT);
+        }
+        case VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_FULL_SCREEN_EXCLUSIVE_EXT:
+        {
+            return sizeof(VkSurfaceCapabilitiesFullScreenExclusiveEXT);
+        }
+        case VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_WIN32_INFO_EXT:
+        {
+            return sizeof(VkSurfaceFullScreenExclusiveWin32InfoEXT);
+        }
+#endif
+#ifdef VK_EXT_line_rasterization
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceLineRasterizationFeaturesEXT);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceLineRasterizationPropertiesEXT);
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT:
+        {
+            return sizeof(VkPipelineRasterizationLineStateCreateInfoEXT);
+        }
+#endif
+#ifdef VK_EXT_shader_atomic_float
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_FLOAT_FEATURES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceShaderAtomicFloatFeaturesEXT);
+        }
+#endif
+#ifdef VK_EXT_index_type_uint8
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceIndexTypeUint8FeaturesEXT);
+        }
+#endif
+#ifdef VK_EXT_extended_dynamic_state
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_FEATURES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceExtendedDynamicStateFeaturesEXT);
+        }
+#endif
+#ifdef VK_EXT_shader_demote_to_helper_invocation
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT);
+        }
+#endif
+#ifdef VK_NV_device_generated_commands
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_PROPERTIES_NV:
+        {
+            return sizeof(VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_FEATURES_NV:
+        {
+            return sizeof(VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV);
+        }
+        case VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_SHADER_GROUPS_CREATE_INFO_NV:
+        {
+            return sizeof(VkGraphicsPipelineShaderGroupsCreateInfoNV);
+        }
+#endif
+#ifdef VK_EXT_texel_buffer_alignment
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_FEATURES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT);
+        }
+#endif
+#ifdef VK_QCOM_render_pass_transform
+        case VK_STRUCTURE_TYPE_RENDER_PASS_TRANSFORM_BEGIN_INFO_QCOM:
+        {
+            return sizeof(VkRenderPassTransformBeginInfoQCOM);
+        }
+        case VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_RENDER_PASS_TRANSFORM_INFO_QCOM:
+        {
+            return sizeof(VkCommandBufferInheritanceRenderPassTransformInfoQCOM);
+        }
+#endif
+#ifdef VK_EXT_device_memory_report
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_MEMORY_REPORT_FEATURES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceDeviceMemoryReportFeaturesEXT);
+        }
+        case VK_STRUCTURE_TYPE_DEVICE_DEVICE_MEMORY_REPORT_CREATE_INFO_EXT:
+        {
+            return sizeof(VkDeviceDeviceMemoryReportCreateInfoEXT);
+        }
+#endif
+#ifdef VK_EXT_robustness2
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceRobustness2FeaturesEXT);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_PROPERTIES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceRobustness2PropertiesEXT);
+        }
+#endif
+#ifdef VK_EXT_custom_border_color
+        case VK_STRUCTURE_TYPE_SAMPLER_CUSTOM_BORDER_COLOR_CREATE_INFO_EXT:
+        {
+            return sizeof(VkSamplerCustomBorderColorCreateInfoEXT);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_PROPERTIES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceCustomBorderColorPropertiesEXT);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_FEATURES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceCustomBorderColorFeaturesEXT);
+        }
+#endif
+#ifdef VK_EXT_private_data
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES_EXT:
+        {
+            return sizeof(VkPhysicalDevicePrivateDataFeaturesEXT);
+        }
+        case VK_STRUCTURE_TYPE_DEVICE_PRIVATE_DATA_CREATE_INFO_EXT:
+        {
+            return sizeof(VkDevicePrivateDataCreateInfoEXT);
+        }
+#endif
+#ifdef VK_EXT_pipeline_creation_cache_control
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES_EXT:
+        {
+            return sizeof(VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT);
+        }
+#endif
+#ifdef VK_NV_device_diagnostics_config
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DIAGNOSTICS_CONFIG_FEATURES_NV:
+        {
+            return sizeof(VkPhysicalDeviceDiagnosticsConfigFeaturesNV);
+        }
+        case VK_STRUCTURE_TYPE_DEVICE_DIAGNOSTICS_CONFIG_CREATE_INFO_NV:
+        {
+            return sizeof(VkDeviceDiagnosticsConfigCreateInfoNV);
+        }
+#endif
+#ifdef VK_NV_fragment_shading_rate_enums
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_FEATURES_NV:
+        {
+            return sizeof(VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_PROPERTIES_NV:
+        {
+            return sizeof(VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV);
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_ENUM_STATE_CREATE_INFO_NV:
+        {
+            return sizeof(VkPipelineFragmentShadingRateEnumStateCreateInfoNV);
+        }
+#endif
+#ifdef VK_EXT_fragment_density_map2
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_2_FEATURES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceFragmentDensityMap2FeaturesEXT);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_2_PROPERTIES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceFragmentDensityMap2PropertiesEXT);
+        }
+#endif
+#ifdef VK_QCOM_rotated_copy_commands
+        case VK_STRUCTURE_TYPE_COPY_COMMAND_TRANSFORM_INFO_QCOM:
+        {
+            return sizeof(VkCopyCommandTransformInfoQCOM);
+        }
+#endif
+#ifdef VK_EXT_image_robustness
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceImageRobustnessFeaturesEXT);
+        }
+#endif
+#ifdef VK_EXT_4444_formats
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT:
+        {
+            return sizeof(VkPhysicalDevice4444FormatsFeaturesEXT);
+        }
+#endif
+#ifdef VK_GOOGLE_gfxstream
         case VK_STRUCTURE_TYPE_IMPORT_COLOR_BUFFER_GOOGLE:
         {
             return sizeof(VkImportColorBufferGOOGLE);
         }
+        case VK_STRUCTURE_TYPE_IMPORT_BUFFER_GOOGLE:
+        {
+            return sizeof(VkImportBufferGOOGLE);
+        }
         case VK_STRUCTURE_TYPE_IMPORT_PHYSICAL_ADDRESS_GOOGLE:
         {
             return sizeof(VkImportPhysicalAddressGOOGLE);
         }
 #endif
+#ifdef VK_KHR_acceleration_structure
+        case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR:
+        {
+            return sizeof(VkWriteDescriptorSetAccelerationStructureKHR);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_FEATURES_KHR:
+        {
+            return sizeof(VkPhysicalDeviceAccelerationStructureFeaturesKHR);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_PROPERTIES_KHR:
+        {
+            return sizeof(VkPhysicalDeviceAccelerationStructurePropertiesKHR);
+        }
+#endif
+#ifdef VK_KHR_ray_tracing_pipeline
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_FEATURES_KHR:
+        {
+            return sizeof(VkPhysicalDeviceRayTracingPipelineFeaturesKHR);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_PROPERTIES_KHR:
+        {
+            return sizeof(VkPhysicalDeviceRayTracingPipelinePropertiesKHR);
+        }
+#endif
+#ifdef VK_KHR_ray_query
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_QUERY_FEATURES_KHR:
+        {
+            return sizeof(VkPhysicalDeviceRayQueryFeaturesKHR);
+        }
+#endif
+        default:
+        {
+            return (size_t)0;
+        }
+    }
+}
+
+size_t goldfish_vk_extension_struct_size_with_stream_features(
+    uint32_t streamFeatures,
+    VkStructureType rootType,
+    const void* structExtension)
+{
+    if (!structExtension)
+    {
+        return (size_t)0;
+    }
+    uint32_t structType = (uint32_t)goldfish_vk_struct_type(structExtension);
+    switch(structType)
+    {
+#ifdef VK_VERSION_1_1
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES:
+        {
+            return sizeof(VkPhysicalDeviceSubgroupProperties);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES:
+        {
+            return sizeof(VkPhysicalDevice16BitStorageFeatures);
+        }
+        case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS:
+        {
+            return sizeof(VkMemoryDedicatedRequirements);
+        }
+        case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO:
+        {
+            return sizeof(VkMemoryDedicatedAllocateInfo);
+        }
+        case VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO:
+        {
+            return sizeof(VkMemoryAllocateFlagsInfo);
+        }
+        case VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO:
+        {
+            return sizeof(VkDeviceGroupRenderPassBeginInfo);
+        }
+        case VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO:
+        {
+            return sizeof(VkDeviceGroupCommandBufferBeginInfo);
+        }
+        case VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO:
+        {
+            return sizeof(VkDeviceGroupSubmitInfo);
+        }
+        case VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO:
+        {
+            return sizeof(VkDeviceGroupBindSparseInfo);
+        }
+        case VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO:
+        {
+            return sizeof(VkBindBufferMemoryDeviceGroupInfo);
+        }
+        case VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO:
+        {
+            return sizeof(VkBindImageMemoryDeviceGroupInfo);
+        }
+        case VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO:
+        {
+            return sizeof(VkDeviceGroupDeviceCreateInfo);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2:
+        {
+            return sizeof(VkPhysicalDeviceFeatures2);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES:
+        {
+            return sizeof(VkPhysicalDevicePointClippingProperties);
+        }
+        case VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO:
+        {
+            return sizeof(VkRenderPassInputAttachmentAspectCreateInfo);
+        }
+        case VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO:
+        {
+            return sizeof(VkImageViewUsageCreateInfo);
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO:
+        {
+            return sizeof(VkPipelineTessellationDomainOriginStateCreateInfo);
+        }
+        case VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO:
+        {
+            return sizeof(VkRenderPassMultiviewCreateInfo);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES:
+        {
+            return sizeof(VkPhysicalDeviceMultiviewFeatures);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES:
+        {
+            return sizeof(VkPhysicalDeviceMultiviewProperties);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES:
+        {
+            return sizeof(VkPhysicalDeviceVariablePointersFeatures);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES:
+        {
+            return sizeof(VkPhysicalDeviceProtectedMemoryFeatures);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_PROPERTIES:
+        {
+            return sizeof(VkPhysicalDeviceProtectedMemoryProperties);
+        }
+        case VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO:
+        {
+            return sizeof(VkProtectedSubmitInfo);
+        }
+        case VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO:
+        {
+            return sizeof(VkSamplerYcbcrConversionInfo);
+        }
+        case VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO:
+        {
+            return sizeof(VkBindImagePlaneMemoryInfo);
+        }
+        case VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO:
+        {
+            return sizeof(VkImagePlaneMemoryRequirementsInfo);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES:
+        {
+            return sizeof(VkPhysicalDeviceSamplerYcbcrConversionFeatures);
+        }
+        case VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES:
+        {
+            return sizeof(VkSamplerYcbcrConversionImageFormatProperties);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO:
+        {
+            return sizeof(VkPhysicalDeviceExternalImageFormatInfo);
+        }
+        case VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES:
+        {
+            return sizeof(VkExternalImageFormatProperties);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES:
+        {
+            return sizeof(VkPhysicalDeviceIDProperties);
+        }
+        case VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO:
+        {
+            return sizeof(VkExternalMemoryImageCreateInfo);
+        }
+        case VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO:
+        {
+            return sizeof(VkExternalMemoryBufferCreateInfo);
+        }
+        case VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO:
+        {
+            return sizeof(VkExportMemoryAllocateInfo);
+        }
+        case VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO:
+        {
+            return sizeof(VkExportFenceCreateInfo);
+        }
+        case VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO:
+        {
+            return sizeof(VkExportSemaphoreCreateInfo);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES:
+        {
+            return sizeof(VkPhysicalDeviceMaintenance3Properties);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES:
+        {
+            return sizeof(VkPhysicalDeviceShaderDrawParametersFeatures);
+        }
+#endif
+#ifdef VK_VERSION_1_2
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES:
+        {
+            return sizeof(VkPhysicalDeviceVulkan11Features);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_PROPERTIES:
+        {
+            return sizeof(VkPhysicalDeviceVulkan11Properties);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES:
+        {
+            return sizeof(VkPhysicalDeviceVulkan12Features);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_PROPERTIES:
+        {
+            return sizeof(VkPhysicalDeviceVulkan12Properties);
+        }
+        case VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO:
+        {
+            return sizeof(VkImageFormatListCreateInfo);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES:
+        {
+            return sizeof(VkPhysicalDevice8BitStorageFeatures);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES:
+        {
+            return sizeof(VkPhysicalDeviceDriverProperties);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES:
+        {
+            return sizeof(VkPhysicalDeviceShaderAtomicInt64Features);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES:
+        {
+            if (streamFeatures & VULKAN_STREAM_FEATURE_SHADER_FLOAT16_INT8_BIT)
+            {
+                return sizeof(VkPhysicalDeviceShaderFloat16Int8Features);
+            }
+            else
+            {
+                return 0;
+            }
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES:
+        {
+            return sizeof(VkPhysicalDeviceFloatControlsProperties);
+        }
+        case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO:
+        {
+            return sizeof(VkDescriptorSetLayoutBindingFlagsCreateInfo);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES:
+        {
+            return sizeof(VkPhysicalDeviceDescriptorIndexingFeatures);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES:
+        {
+            return sizeof(VkPhysicalDeviceDescriptorIndexingProperties);
+        }
+        case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO:
+        {
+            return sizeof(VkDescriptorSetVariableDescriptorCountAllocateInfo);
+        }
+        case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT:
+        {
+            return sizeof(VkDescriptorSetVariableDescriptorCountLayoutSupport);
+        }
+        case VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE:
+        {
+            return sizeof(VkSubpassDescriptionDepthStencilResolve);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES:
+        {
+            return sizeof(VkPhysicalDeviceDepthStencilResolveProperties);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES:
+        {
+            return sizeof(VkPhysicalDeviceScalarBlockLayoutFeatures);
+        }
+        case VK_STRUCTURE_TYPE_IMAGE_STENCIL_USAGE_CREATE_INFO:
+        {
+            return sizeof(VkImageStencilUsageCreateInfo);
+        }
+        case VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO:
+        {
+            return sizeof(VkSamplerReductionModeCreateInfo);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES:
+        {
+            return sizeof(VkPhysicalDeviceSamplerFilterMinmaxProperties);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES:
+        {
+            return sizeof(VkPhysicalDeviceVulkanMemoryModelFeatures);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES:
+        {
+            return sizeof(VkPhysicalDeviceImagelessFramebufferFeatures);
+        }
+        case VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO:
+        {
+            return sizeof(VkFramebufferAttachmentsCreateInfo);
+        }
+        case VK_STRUCTURE_TYPE_RENDER_PASS_ATTACHMENT_BEGIN_INFO:
+        {
+            return sizeof(VkRenderPassAttachmentBeginInfo);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES:
+        {
+            return sizeof(VkPhysicalDeviceUniformBufferStandardLayoutFeatures);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES:
+        {
+            return sizeof(VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES:
+        {
+            return sizeof(VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures);
+        }
+        case VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_STENCIL_LAYOUT:
+        {
+            return sizeof(VkAttachmentReferenceStencilLayout);
+        }
+        case VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_STENCIL_LAYOUT:
+        {
+            return sizeof(VkAttachmentDescriptionStencilLayout);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES:
+        {
+            return sizeof(VkPhysicalDeviceHostQueryResetFeatures);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES:
+        {
+            return sizeof(VkPhysicalDeviceTimelineSemaphoreFeatures);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES:
+        {
+            return sizeof(VkPhysicalDeviceTimelineSemaphoreProperties);
+        }
+        case VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO:
+        {
+            return sizeof(VkSemaphoreTypeCreateInfo);
+        }
+        case VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO:
+        {
+            return sizeof(VkTimelineSemaphoreSubmitInfo);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES:
+        {
+            return sizeof(VkPhysicalDeviceBufferDeviceAddressFeatures);
+        }
+        case VK_STRUCTURE_TYPE_BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO:
+        {
+            return sizeof(VkBufferOpaqueCaptureAddressCreateInfo);
+        }
+        case VK_STRUCTURE_TYPE_MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO:
+        {
+            return sizeof(VkMemoryOpaqueCaptureAddressAllocateInfo);
+        }
+#endif
+#ifdef VK_KHR_swapchain
+        case VK_STRUCTURE_TYPE_IMAGE_SWAPCHAIN_CREATE_INFO_KHR:
+        {
+            return sizeof(VkImageSwapchainCreateInfoKHR);
+        }
+        case VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_SWAPCHAIN_INFO_KHR:
+        {
+            return sizeof(VkBindImageMemorySwapchainInfoKHR);
+        }
+        case VK_STRUCTURE_TYPE_DEVICE_GROUP_PRESENT_INFO_KHR:
+        {
+            return sizeof(VkDeviceGroupPresentInfoKHR);
+        }
+        case VK_STRUCTURE_TYPE_DEVICE_GROUP_SWAPCHAIN_CREATE_INFO_KHR:
+        {
+            return sizeof(VkDeviceGroupSwapchainCreateInfoKHR);
+        }
+#endif
+#ifdef VK_KHR_display_swapchain
+        case VK_STRUCTURE_TYPE_DISPLAY_PRESENT_INFO_KHR:
+        {
+            return sizeof(VkDisplayPresentInfoKHR);
+        }
+#endif
+#ifdef VK_KHR_external_memory_win32
+        case VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_KHR:
+        {
+            return sizeof(VkImportMemoryWin32HandleInfoKHR);
+        }
+        case VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_KHR:
+        {
+            return sizeof(VkExportMemoryWin32HandleInfoKHR);
+        }
+#endif
+#ifdef VK_KHR_external_memory_fd
+        case VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR:
+        {
+            return sizeof(VkImportMemoryFdInfoKHR);
+        }
+#endif
+#ifdef VK_KHR_win32_keyed_mutex
+        case VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_KHR:
+        {
+            return sizeof(VkWin32KeyedMutexAcquireReleaseInfoKHR);
+        }
+#endif
+#ifdef VK_KHR_external_semaphore_win32
+        case VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR:
+        {
+            return sizeof(VkExportSemaphoreWin32HandleInfoKHR);
+        }
+        case VK_STRUCTURE_TYPE_D3D12_FENCE_SUBMIT_INFO_KHR:
+        {
+            return sizeof(VkD3D12FenceSubmitInfoKHR);
+        }
+#endif
+#ifdef VK_KHR_push_descriptor
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR:
+        {
+            return sizeof(VkPhysicalDevicePushDescriptorPropertiesKHR);
+        }
+#endif
+#ifdef VK_KHR_incremental_present
+        case VK_STRUCTURE_TYPE_PRESENT_REGIONS_KHR:
+        {
+            return sizeof(VkPresentRegionsKHR);
+        }
+#endif
+#ifdef VK_KHR_shared_presentable_image
+        case VK_STRUCTURE_TYPE_SHARED_PRESENT_SURFACE_CAPABILITIES_KHR:
+        {
+            return sizeof(VkSharedPresentSurfaceCapabilitiesKHR);
+        }
+#endif
+#ifdef VK_KHR_external_fence_win32
+        case VK_STRUCTURE_TYPE_EXPORT_FENCE_WIN32_HANDLE_INFO_KHR:
+        {
+            return sizeof(VkExportFenceWin32HandleInfoKHR);
+        }
+#endif
+#ifdef VK_KHR_performance_query
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_FEATURES_KHR:
+        {
+            return sizeof(VkPhysicalDevicePerformanceQueryFeaturesKHR);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_PROPERTIES_KHR:
+        {
+            return sizeof(VkPhysicalDevicePerformanceQueryPropertiesKHR);
+        }
+        case VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_CREATE_INFO_KHR:
+        {
+            return sizeof(VkQueryPoolPerformanceCreateInfoKHR);
+        }
+        case VK_STRUCTURE_TYPE_PERFORMANCE_QUERY_SUBMIT_INFO_KHR:
+        {
+            return sizeof(VkPerformanceQuerySubmitInfoKHR);
+        }
+#endif
+#ifdef VK_KHR_portability_subset
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PORTABILITY_SUBSET_FEATURES_KHR:
+        {
+            return sizeof(VkPhysicalDevicePortabilitySubsetFeaturesKHR);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PORTABILITY_SUBSET_PROPERTIES_KHR:
+        {
+            return sizeof(VkPhysicalDevicePortabilitySubsetPropertiesKHR);
+        }
+#endif
+#ifdef VK_KHR_shader_clock
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CLOCK_FEATURES_KHR:
+        {
+            return sizeof(VkPhysicalDeviceShaderClockFeaturesKHR);
+        }
+#endif
+#ifdef VK_KHR_shader_terminate_invocation
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES_KHR:
+        {
+            return sizeof(VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR);
+        }
+#endif
+#ifdef VK_KHR_fragment_shading_rate
+        case VK_STRUCTURE_TYPE_FRAGMENT_SHADING_RATE_ATTACHMENT_INFO_KHR:
+        {
+            return sizeof(VkFragmentShadingRateAttachmentInfoKHR);
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_STATE_CREATE_INFO_KHR:
+        {
+            return sizeof(VkPipelineFragmentShadingRateStateCreateInfoKHR);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_FEATURES_KHR:
+        {
+            return sizeof(VkPhysicalDeviceFragmentShadingRateFeaturesKHR);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_PROPERTIES_KHR:
+        {
+            return sizeof(VkPhysicalDeviceFragmentShadingRatePropertiesKHR);
+        }
+#endif
+#ifdef VK_KHR_surface_protected_capabilities
+        case VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR:
+        {
+            return sizeof(VkSurfaceProtectedCapabilitiesKHR);
+        }
+#endif
+#ifdef VK_KHR_pipeline_executable_properties
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_EXECUTABLE_PROPERTIES_FEATURES_KHR:
+        {
+            return sizeof(VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR);
+        }
+#endif
+#ifdef VK_ANDROID_native_buffer
+        case VK_STRUCTURE_TYPE_NATIVE_BUFFER_ANDROID:
+        {
+            return sizeof(VkNativeBufferANDROID);
+        }
+#endif
+#ifdef VK_EXT_debug_report
+        case VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT:
+        {
+            return sizeof(VkDebugReportCallbackCreateInfoEXT);
+        }
+#endif
+#ifdef VK_AMD_rasterization_order
+        case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_RASTERIZATION_ORDER_AMD:
+        {
+            return sizeof(VkPipelineRasterizationStateRasterizationOrderAMD);
+        }
+#endif
+#ifdef VK_NV_dedicated_allocation
+        case VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_IMAGE_CREATE_INFO_NV:
+        {
+            return sizeof(VkDedicatedAllocationImageCreateInfoNV);
+        }
+        case VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_BUFFER_CREATE_INFO_NV:
+        {
+            return sizeof(VkDedicatedAllocationBufferCreateInfoNV);
+        }
+        case VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_MEMORY_ALLOCATE_INFO_NV:
+        {
+            return sizeof(VkDedicatedAllocationMemoryAllocateInfoNV);
+        }
+#endif
+#ifdef VK_EXT_transform_feedback
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceTransformFeedbackFeaturesEXT);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceTransformFeedbackPropertiesEXT);
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_STREAM_CREATE_INFO_EXT:
+        {
+            return sizeof(VkPipelineRasterizationStateStreamCreateInfoEXT);
+        }
+#endif
+#ifdef VK_AMD_texture_gather_bias_lod
+        case VK_STRUCTURE_TYPE_TEXTURE_LOD_GATHER_FORMAT_PROPERTIES_AMD:
+        {
+            return sizeof(VkTextureLODGatherFormatPropertiesAMD);
+        }
+#endif
+#ifdef VK_NV_corner_sampled_image
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CORNER_SAMPLED_IMAGE_FEATURES_NV:
+        {
+            return sizeof(VkPhysicalDeviceCornerSampledImageFeaturesNV);
+        }
+#endif
+#ifdef VK_NV_external_memory
+        case VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_NV:
+        {
+            return sizeof(VkExternalMemoryImageCreateInfoNV);
+        }
+        case VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_NV:
+        {
+            return sizeof(VkExportMemoryAllocateInfoNV);
+        }
+#endif
+#ifdef VK_NV_external_memory_win32
+        case VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_NV:
+        {
+            return sizeof(VkImportMemoryWin32HandleInfoNV);
+        }
+        case VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_NV:
+        {
+            return sizeof(VkExportMemoryWin32HandleInfoNV);
+        }
+#endif
+#ifdef VK_NV_win32_keyed_mutex
+        case VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_NV:
+        {
+            return sizeof(VkWin32KeyedMutexAcquireReleaseInfoNV);
+        }
+#endif
+#ifdef VK_EXT_validation_flags
+        case VK_STRUCTURE_TYPE_VALIDATION_FLAGS_EXT:
+        {
+            return sizeof(VkValidationFlagsEXT);
+        }
+#endif
+#ifdef VK_EXT_texture_compression_astc_hdr
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT);
+        }
+#endif
+#ifdef VK_EXT_astc_decode_mode
+        case VK_STRUCTURE_TYPE_IMAGE_VIEW_ASTC_DECODE_MODE_EXT:
+        {
+            return sizeof(VkImageViewASTCDecodeModeEXT);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ASTC_DECODE_FEATURES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceASTCDecodeFeaturesEXT);
+        }
+#endif
+#ifdef VK_EXT_conditional_rendering
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceConditionalRenderingFeaturesEXT);
+        }
+        case VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_CONDITIONAL_RENDERING_INFO_EXT:
+        {
+            return sizeof(VkCommandBufferInheritanceConditionalRenderingInfoEXT);
+        }
+#endif
+#ifdef VK_NV_clip_space_w_scaling
+        case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_W_SCALING_STATE_CREATE_INFO_NV:
+        {
+            return sizeof(VkPipelineViewportWScalingStateCreateInfoNV);
+        }
+#endif
+#ifdef VK_EXT_display_control
+        case VK_STRUCTURE_TYPE_SWAPCHAIN_COUNTER_CREATE_INFO_EXT:
+        {
+            return sizeof(VkSwapchainCounterCreateInfoEXT);
+        }
+#endif
+#ifdef VK_GOOGLE_display_timing
+        case VK_STRUCTURE_TYPE_PRESENT_TIMES_INFO_GOOGLE:
+        {
+            return sizeof(VkPresentTimesInfoGOOGLE);
+        }
+#endif
+#ifdef VK_NVX_multiview_per_view_attributes
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PER_VIEW_ATTRIBUTES_PROPERTIES_NVX:
+        {
+            return sizeof(VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX);
+        }
+#endif
+#ifdef VK_NV_viewport_swizzle
+        case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SWIZZLE_STATE_CREATE_INFO_NV:
+        {
+            return sizeof(VkPipelineViewportSwizzleStateCreateInfoNV);
+        }
+#endif
+#ifdef VK_EXT_discard_rectangles
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DISCARD_RECTANGLE_PROPERTIES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceDiscardRectanglePropertiesEXT);
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT:
+        {
+            return sizeof(VkPipelineDiscardRectangleStateCreateInfoEXT);
+        }
+#endif
+#ifdef VK_EXT_conservative_rasterization
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONSERVATIVE_RASTERIZATION_PROPERTIES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceConservativeRasterizationPropertiesEXT);
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_CONSERVATIVE_STATE_CREATE_INFO_EXT:
+        {
+            return sizeof(VkPipelineRasterizationConservativeStateCreateInfoEXT);
+        }
+#endif
+#ifdef VK_EXT_depth_clip_enable
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_ENABLE_FEATURES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceDepthClipEnableFeaturesEXT);
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT:
+        {
+            return sizeof(VkPipelineRasterizationDepthClipStateCreateInfoEXT);
+        }
+#endif
+#ifdef VK_EXT_debug_utils
+        case VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT:
+        {
+            return sizeof(VkDebugUtilsMessengerCreateInfoEXT);
+        }
+#endif
+#ifdef VK_ANDROID_external_memory_android_hardware_buffer
+        case VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_USAGE_ANDROID:
+        {
+            return sizeof(VkAndroidHardwareBufferUsageANDROID);
+        }
+        case VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID:
+        {
+            return sizeof(VkAndroidHardwareBufferFormatPropertiesANDROID);
+        }
+        case VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID:
+        {
+            return sizeof(VkImportAndroidHardwareBufferInfoANDROID);
+        }
+        case VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID:
+        {
+            return sizeof(VkExternalFormatANDROID);
+        }
+#endif
+#ifdef VK_EXT_inline_uniform_block
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceInlineUniformBlockFeaturesEXT);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceInlineUniformBlockPropertiesEXT);
+        }
+        case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT:
+        {
+            return sizeof(VkWriteDescriptorSetInlineUniformBlockEXT);
+        }
+        case VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO_EXT:
+        {
+            return sizeof(VkDescriptorPoolInlineUniformBlockCreateInfoEXT);
+        }
+#endif
+#ifdef VK_EXT_sample_locations
+        case VK_STRUCTURE_TYPE_SAMPLE_LOCATIONS_INFO_EXT:
+        {
+            return sizeof(VkSampleLocationsInfoEXT);
+        }
+        case VK_STRUCTURE_TYPE_RENDER_PASS_SAMPLE_LOCATIONS_BEGIN_INFO_EXT:
+        {
+            return sizeof(VkRenderPassSampleLocationsBeginInfoEXT);
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT:
+        {
+            return sizeof(VkPipelineSampleLocationsStateCreateInfoEXT);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceSampleLocationsPropertiesEXT);
+        }
+#endif
+#ifdef VK_EXT_blend_operation_advanced
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_PROPERTIES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT);
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_ADVANCED_STATE_CREATE_INFO_EXT:
+        {
+            return sizeof(VkPipelineColorBlendAdvancedStateCreateInfoEXT);
+        }
+#endif
+#ifdef VK_NV_fragment_coverage_to_color
+        case VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_TO_COLOR_STATE_CREATE_INFO_NV:
+        {
+            return sizeof(VkPipelineCoverageToColorStateCreateInfoNV);
+        }
+#endif
+#ifdef VK_NV_framebuffer_mixed_samples
+        case VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_MODULATION_STATE_CREATE_INFO_NV:
+        {
+            return sizeof(VkPipelineCoverageModulationStateCreateInfoNV);
+        }
+#endif
+#ifdef VK_NV_shader_sm_builtins
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_PROPERTIES_NV:
+        {
+            return sizeof(VkPhysicalDeviceShaderSMBuiltinsPropertiesNV);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_FEATURES_NV:
+        {
+            return sizeof(VkPhysicalDeviceShaderSMBuiltinsFeaturesNV);
+        }
+#endif
+#ifdef VK_EXT_image_drm_format_modifier
+        case VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT:
+        {
+            return sizeof(VkDrmFormatModifierPropertiesListEXT);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT:
+        {
+            return sizeof(VkPhysicalDeviceImageDrmFormatModifierInfoEXT);
+        }
+        case VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT:
+        {
+            return sizeof(VkImageDrmFormatModifierListCreateInfoEXT);
+        }
+        case VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT:
+        {
+            return sizeof(VkImageDrmFormatModifierExplicitCreateInfoEXT);
+        }
+#endif
+#ifdef VK_EXT_validation_cache
+        case VK_STRUCTURE_TYPE_SHADER_MODULE_VALIDATION_CACHE_CREATE_INFO_EXT:
+        {
+            return sizeof(VkShaderModuleValidationCacheCreateInfoEXT);
+        }
+#endif
+#ifdef VK_NV_shading_rate_image
+        case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SHADING_RATE_IMAGE_STATE_CREATE_INFO_NV:
+        {
+            return sizeof(VkPipelineViewportShadingRateImageStateCreateInfoNV);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_FEATURES_NV:
+        {
+            return sizeof(VkPhysicalDeviceShadingRateImageFeaturesNV);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_PROPERTIES_NV:
+        {
+            return sizeof(VkPhysicalDeviceShadingRateImagePropertiesNV);
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_COARSE_SAMPLE_ORDER_STATE_CREATE_INFO_NV:
+        {
+            return sizeof(VkPipelineViewportCoarseSampleOrderStateCreateInfoNV);
+        }
+#endif
+#ifdef VK_NV_ray_tracing
+        case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_NV:
+        {
+            return sizeof(VkWriteDescriptorSetAccelerationStructureNV);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_NV:
+        {
+            return sizeof(VkPhysicalDeviceRayTracingPropertiesNV);
+        }
+#endif
+#ifdef VK_NV_representative_fragment_test
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_REPRESENTATIVE_FRAGMENT_TEST_FEATURES_NV:
+        {
+            return sizeof(VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV);
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_REPRESENTATIVE_FRAGMENT_TEST_STATE_CREATE_INFO_NV:
+        {
+            return sizeof(VkPipelineRepresentativeFragmentTestStateCreateInfoNV);
+        }
+#endif
+#ifdef VK_EXT_filter_cubic
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_VIEW_IMAGE_FORMAT_INFO_EXT:
+        {
+            return sizeof(VkPhysicalDeviceImageViewImageFormatInfoEXT);
+        }
+        case VK_STRUCTURE_TYPE_FILTER_CUBIC_IMAGE_VIEW_IMAGE_FORMAT_PROPERTIES_EXT:
+        {
+            return sizeof(VkFilterCubicImageViewImageFormatPropertiesEXT);
+        }
+#endif
+#ifdef VK_EXT_global_priority
+        case VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT:
+        {
+            return sizeof(VkDeviceQueueGlobalPriorityCreateInfoEXT);
+        }
+#endif
+#ifdef VK_EXT_external_memory_host
+        case VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT:
+        {
+            return sizeof(VkImportMemoryHostPointerInfoEXT);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceExternalMemoryHostPropertiesEXT);
+        }
+#endif
+#ifdef VK_AMD_pipeline_compiler_control
+        case VK_STRUCTURE_TYPE_PIPELINE_COMPILER_CONTROL_CREATE_INFO_AMD:
+        {
+            return sizeof(VkPipelineCompilerControlCreateInfoAMD);
+        }
+#endif
+#ifdef VK_AMD_shader_core_properties
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_AMD:
+        {
+            return sizeof(VkPhysicalDeviceShaderCorePropertiesAMD);
+        }
+#endif
+#ifdef VK_AMD_memory_overallocation_behavior
+        case VK_STRUCTURE_TYPE_DEVICE_MEMORY_OVERALLOCATION_CREATE_INFO_AMD:
+        {
+            return sizeof(VkDeviceMemoryOverallocationCreateInfoAMD);
+        }
+#endif
+#ifdef VK_EXT_vertex_attribute_divisor
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT);
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT:
+        {
+            return sizeof(VkPipelineVertexInputDivisorStateCreateInfoEXT);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT);
+        }
+#endif
+#ifdef VK_GGP_frame_token
+        case VK_STRUCTURE_TYPE_PRESENT_FRAME_TOKEN_GGP:
+        {
+            return sizeof(VkPresentFrameTokenGGP);
+        }
+#endif
+#ifdef VK_EXT_pipeline_creation_feedback
+        case VK_STRUCTURE_TYPE_PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT:
+        {
+            return sizeof(VkPipelineCreationFeedbackCreateInfoEXT);
+        }
+#endif
+#ifdef VK_NV_compute_shader_derivatives
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_FEATURES_NV:
+        {
+            return sizeof(VkPhysicalDeviceComputeShaderDerivativesFeaturesNV);
+        }
+#endif
+#ifdef VK_NV_mesh_shader
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_FEATURES_NV:
+        {
+            return sizeof(VkPhysicalDeviceMeshShaderFeaturesNV);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_PROPERTIES_NV:
+        {
+            return sizeof(VkPhysicalDeviceMeshShaderPropertiesNV);
+        }
+#endif
+#ifdef VK_NV_fragment_shader_barycentric
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_NV:
+        {
+            return sizeof(VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV);
+        }
+#endif
+#ifdef VK_NV_shader_image_footprint
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_FOOTPRINT_FEATURES_NV:
+        {
+            return sizeof(VkPhysicalDeviceShaderImageFootprintFeaturesNV);
+        }
+#endif
+#ifdef VK_NV_scissor_exclusive
+        case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_EXCLUSIVE_SCISSOR_STATE_CREATE_INFO_NV:
+        {
+            return sizeof(VkPipelineViewportExclusiveScissorStateCreateInfoNV);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXCLUSIVE_SCISSOR_FEATURES_NV:
+        {
+            return sizeof(VkPhysicalDeviceExclusiveScissorFeaturesNV);
+        }
+#endif
+#ifdef VK_NV_device_diagnostic_checkpoints
+        case VK_STRUCTURE_TYPE_QUEUE_FAMILY_CHECKPOINT_PROPERTIES_NV:
+        {
+            return sizeof(VkQueueFamilyCheckpointPropertiesNV);
+        }
+#endif
+#ifdef VK_INTEL_shader_integer_functions2
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_FUNCTIONS_2_FEATURES_INTEL:
+        {
+            return sizeof(VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL);
+        }
+#endif
+#ifdef VK_INTEL_performance_query
+        case VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_QUERY_CREATE_INFO_INTEL:
+        {
+            return sizeof(VkQueryPoolPerformanceQueryCreateInfoINTEL);
+        }
+#endif
+#ifdef VK_EXT_pci_bus_info
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT:
+        {
+            return sizeof(VkPhysicalDevicePCIBusInfoPropertiesEXT);
+        }
+#endif
+#ifdef VK_AMD_display_native_hdr
+        case VK_STRUCTURE_TYPE_DISPLAY_NATIVE_HDR_SURFACE_CAPABILITIES_AMD:
+        {
+            return sizeof(VkDisplayNativeHdrSurfaceCapabilitiesAMD);
+        }
+        case VK_STRUCTURE_TYPE_SWAPCHAIN_DISPLAY_NATIVE_HDR_CREATE_INFO_AMD:
+        {
+            return sizeof(VkSwapchainDisplayNativeHdrCreateInfoAMD);
+        }
+#endif
+#ifdef VK_EXT_fragment_density_map
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_FEATURES_EXT:
+        {
+            switch(rootType)
+            {
+                case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2:
+                {
+                    return sizeof(VkPhysicalDeviceFragmentDensityMapFeaturesEXT);
+                    break;
+                }
+                case VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO:
+                {
+                    return sizeof(VkPhysicalDeviceFragmentDensityMapFeaturesEXT);
+                    break;
+                }
+                case VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO:
+                {
+                    return sizeof(VkImportColorBufferGOOGLE);
+                    break;
+                }
+                default:
+                {
+                    return sizeof(VkPhysicalDeviceFragmentDensityMapFeaturesEXT);
+                    break;
+                }
+            }
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_PROPERTIES_EXT:
+        {
+            switch(rootType)
+            {
+                case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2:
+                {
+                    return sizeof(VkPhysicalDeviceFragmentDensityMapPropertiesEXT);
+                    break;
+                }
+                case VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO:
+                {
+                    return sizeof(VkImportPhysicalAddressGOOGLE);
+                    break;
+                }
+                default:
+                {
+                    return sizeof(VkPhysicalDeviceFragmentDensityMapPropertiesEXT);
+                    break;
+                }
+            }
+        }
+        case VK_STRUCTURE_TYPE_RENDER_PASS_FRAGMENT_DENSITY_MAP_CREATE_INFO_EXT:
+        {
+            switch(rootType)
+            {
+                case VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO:
+                {
+                    return sizeof(VkRenderPassFragmentDensityMapCreateInfoEXT);
+                    break;
+                }
+                case VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2:
+                {
+                    return sizeof(VkRenderPassFragmentDensityMapCreateInfoEXT);
+                    break;
+                }
+                case VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO:
+                {
+                    return sizeof(VkImportBufferGOOGLE);
+                    break;
+                }
+                default:
+                {
+                    return sizeof(VkRenderPassFragmentDensityMapCreateInfoEXT);
+                    break;
+                }
+            }
+        }
+#endif
+#ifdef VK_EXT_subgroup_size_control
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceSubgroupSizeControlFeaturesEXT);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceSubgroupSizeControlPropertiesEXT);
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT:
+        {
+            return sizeof(VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT);
+        }
+#endif
+#ifdef VK_AMD_shader_core_properties2
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_2_AMD:
+        {
+            return sizeof(VkPhysicalDeviceShaderCoreProperties2AMD);
+        }
+#endif
+#ifdef VK_AMD_device_coherent_memory
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COHERENT_MEMORY_FEATURES_AMD:
+        {
+            return sizeof(VkPhysicalDeviceCoherentMemoryFeaturesAMD);
+        }
+#endif
+#ifdef VK_EXT_shader_image_atomic_int64
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_ATOMIC_INT64_FEATURES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT);
+        }
+#endif
+#ifdef VK_EXT_memory_budget
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceMemoryBudgetPropertiesEXT);
+        }
+#endif
+#ifdef VK_EXT_memory_priority
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PRIORITY_FEATURES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceMemoryPriorityFeaturesEXT);
+        }
+        case VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT:
+        {
+            return sizeof(VkMemoryPriorityAllocateInfoEXT);
+        }
+#endif
+#ifdef VK_NV_dedicated_allocation_image_aliasing
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEDICATED_ALLOCATION_IMAGE_ALIASING_FEATURES_NV:
+        {
+            return sizeof(VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV);
+        }
+#endif
+#ifdef VK_EXT_buffer_device_address
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceBufferDeviceAddressFeaturesEXT);
+        }
+        case VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_CREATE_INFO_EXT:
+        {
+            return sizeof(VkBufferDeviceAddressCreateInfoEXT);
+        }
+#endif
+#ifdef VK_EXT_validation_features
+        case VK_STRUCTURE_TYPE_VALIDATION_FEATURES_EXT:
+        {
+            return sizeof(VkValidationFeaturesEXT);
+        }
+#endif
+#ifdef VK_NV_cooperative_matrix
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_FEATURES_NV:
+        {
+            return sizeof(VkPhysicalDeviceCooperativeMatrixFeaturesNV);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_PROPERTIES_NV:
+        {
+            return sizeof(VkPhysicalDeviceCooperativeMatrixPropertiesNV);
+        }
+#endif
+#ifdef VK_NV_coverage_reduction_mode
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COVERAGE_REDUCTION_MODE_FEATURES_NV:
+        {
+            return sizeof(VkPhysicalDeviceCoverageReductionModeFeaturesNV);
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_REDUCTION_STATE_CREATE_INFO_NV:
+        {
+            return sizeof(VkPipelineCoverageReductionStateCreateInfoNV);
+        }
+#endif
+#ifdef VK_EXT_fragment_shader_interlock
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_INTERLOCK_FEATURES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT);
+        }
+#endif
+#ifdef VK_EXT_ycbcr_image_arrays
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_IMAGE_ARRAYS_FEATURES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceYcbcrImageArraysFeaturesEXT);
+        }
+#endif
+#ifdef VK_EXT_full_screen_exclusive
+        case VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_INFO_EXT:
+        {
+            return sizeof(VkSurfaceFullScreenExclusiveInfoEXT);
+        }
+        case VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_FULL_SCREEN_EXCLUSIVE_EXT:
+        {
+            return sizeof(VkSurfaceCapabilitiesFullScreenExclusiveEXT);
+        }
+        case VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_WIN32_INFO_EXT:
+        {
+            return sizeof(VkSurfaceFullScreenExclusiveWin32InfoEXT);
+        }
+#endif
+#ifdef VK_EXT_line_rasterization
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceLineRasterizationFeaturesEXT);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceLineRasterizationPropertiesEXT);
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT:
+        {
+            return sizeof(VkPipelineRasterizationLineStateCreateInfoEXT);
+        }
+#endif
+#ifdef VK_EXT_shader_atomic_float
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_FLOAT_FEATURES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceShaderAtomicFloatFeaturesEXT);
+        }
+#endif
+#ifdef VK_EXT_index_type_uint8
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceIndexTypeUint8FeaturesEXT);
+        }
+#endif
+#ifdef VK_EXT_extended_dynamic_state
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_FEATURES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceExtendedDynamicStateFeaturesEXT);
+        }
+#endif
+#ifdef VK_EXT_shader_demote_to_helper_invocation
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT);
+        }
+#endif
+#ifdef VK_NV_device_generated_commands
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_PROPERTIES_NV:
+        {
+            return sizeof(VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_FEATURES_NV:
+        {
+            return sizeof(VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV);
+        }
+        case VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_SHADER_GROUPS_CREATE_INFO_NV:
+        {
+            return sizeof(VkGraphicsPipelineShaderGroupsCreateInfoNV);
+        }
+#endif
+#ifdef VK_EXT_texel_buffer_alignment
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_FEATURES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT);
+        }
+#endif
+#ifdef VK_QCOM_render_pass_transform
+        case VK_STRUCTURE_TYPE_RENDER_PASS_TRANSFORM_BEGIN_INFO_QCOM:
+        {
+            return sizeof(VkRenderPassTransformBeginInfoQCOM);
+        }
+        case VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_RENDER_PASS_TRANSFORM_INFO_QCOM:
+        {
+            return sizeof(VkCommandBufferInheritanceRenderPassTransformInfoQCOM);
+        }
+#endif
+#ifdef VK_EXT_device_memory_report
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_MEMORY_REPORT_FEATURES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceDeviceMemoryReportFeaturesEXT);
+        }
+        case VK_STRUCTURE_TYPE_DEVICE_DEVICE_MEMORY_REPORT_CREATE_INFO_EXT:
+        {
+            return sizeof(VkDeviceDeviceMemoryReportCreateInfoEXT);
+        }
+#endif
+#ifdef VK_EXT_robustness2
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceRobustness2FeaturesEXT);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_PROPERTIES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceRobustness2PropertiesEXT);
+        }
+#endif
+#ifdef VK_EXT_custom_border_color
+        case VK_STRUCTURE_TYPE_SAMPLER_CUSTOM_BORDER_COLOR_CREATE_INFO_EXT:
+        {
+            return sizeof(VkSamplerCustomBorderColorCreateInfoEXT);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_PROPERTIES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceCustomBorderColorPropertiesEXT);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_FEATURES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceCustomBorderColorFeaturesEXT);
+        }
+#endif
+#ifdef VK_EXT_private_data
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES_EXT:
+        {
+            return sizeof(VkPhysicalDevicePrivateDataFeaturesEXT);
+        }
+        case VK_STRUCTURE_TYPE_DEVICE_PRIVATE_DATA_CREATE_INFO_EXT:
+        {
+            return sizeof(VkDevicePrivateDataCreateInfoEXT);
+        }
+#endif
+#ifdef VK_EXT_pipeline_creation_cache_control
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES_EXT:
+        {
+            return sizeof(VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT);
+        }
+#endif
+#ifdef VK_NV_device_diagnostics_config
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DIAGNOSTICS_CONFIG_FEATURES_NV:
+        {
+            return sizeof(VkPhysicalDeviceDiagnosticsConfigFeaturesNV);
+        }
+        case VK_STRUCTURE_TYPE_DEVICE_DIAGNOSTICS_CONFIG_CREATE_INFO_NV:
+        {
+            return sizeof(VkDeviceDiagnosticsConfigCreateInfoNV);
+        }
+#endif
+#ifdef VK_NV_fragment_shading_rate_enums
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_FEATURES_NV:
+        {
+            return sizeof(VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_PROPERTIES_NV:
+        {
+            return sizeof(VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV);
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_ENUM_STATE_CREATE_INFO_NV:
+        {
+            return sizeof(VkPipelineFragmentShadingRateEnumStateCreateInfoNV);
+        }
+#endif
+#ifdef VK_EXT_fragment_density_map2
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_2_FEATURES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceFragmentDensityMap2FeaturesEXT);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_2_PROPERTIES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceFragmentDensityMap2PropertiesEXT);
+        }
+#endif
+#ifdef VK_QCOM_rotated_copy_commands
+        case VK_STRUCTURE_TYPE_COPY_COMMAND_TRANSFORM_INFO_QCOM:
+        {
+            return sizeof(VkCopyCommandTransformInfoQCOM);
+        }
+#endif
+#ifdef VK_EXT_image_robustness
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES_EXT:
+        {
+            return sizeof(VkPhysicalDeviceImageRobustnessFeaturesEXT);
+        }
+#endif
+#ifdef VK_EXT_4444_formats
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT:
+        {
+            return sizeof(VkPhysicalDevice4444FormatsFeaturesEXT);
+        }
+#endif
+#ifdef VK_GOOGLE_gfxstream
+        case VK_STRUCTURE_TYPE_IMPORT_COLOR_BUFFER_GOOGLE:
+        {
+            return sizeof(VkImportColorBufferGOOGLE);
+        }
+        case VK_STRUCTURE_TYPE_IMPORT_BUFFER_GOOGLE:
+        {
+            return sizeof(VkImportBufferGOOGLE);
+        }
+        case VK_STRUCTURE_TYPE_IMPORT_PHYSICAL_ADDRESS_GOOGLE:
+        {
+            return sizeof(VkImportPhysicalAddressGOOGLE);
+        }
+#endif
+#ifdef VK_KHR_acceleration_structure
+        case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR:
+        {
+            return sizeof(VkWriteDescriptorSetAccelerationStructureKHR);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_FEATURES_KHR:
+        {
+            return sizeof(VkPhysicalDeviceAccelerationStructureFeaturesKHR);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_PROPERTIES_KHR:
+        {
+            return sizeof(VkPhysicalDeviceAccelerationStructurePropertiesKHR);
+        }
+#endif
+#ifdef VK_KHR_ray_tracing_pipeline
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_FEATURES_KHR:
+        {
+            return sizeof(VkPhysicalDeviceRayTracingPipelineFeaturesKHR);
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_PROPERTIES_KHR:
+        {
+            return sizeof(VkPhysicalDeviceRayTracingPipelinePropertiesKHR);
+        }
+#endif
+#ifdef VK_KHR_ray_query
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_QUERY_FEATURES_KHR:
+        {
+            return sizeof(VkPhysicalDeviceRayQueryFeaturesKHR);
+        }
+#endif
         default:
         {
             return (size_t)0;
diff --git a/system/vulkan_enc/goldfish_vk_extension_structs_guest.h b/system/vulkan_enc/goldfish_vk_extension_structs_guest.h
index 5a3fc66..6bae0b8 100644
--- a/system/vulkan_enc/goldfish_vk_extension_structs_guest.h
+++ b/system/vulkan_enc/goldfish_vk_extension_structs_guest.h
@@ -43,12 +43,20 @@
     const void* structExtension);
 
 size_t goldfish_vk_extension_struct_size(
+    VkStructureType rootType,
+    const void* structExtension);
+
+size_t goldfish_vk_extension_struct_size_with_stream_features(
+    uint32_t streamFeatures,
+    VkStructureType rootType,
     const void* structExtension);
 
 #ifdef VK_VERSION_1_0
 #endif
 #ifdef VK_VERSION_1_1
 #endif
+#ifdef VK_VERSION_1_2
+#endif
 #ifdef VK_KHR_surface
 #endif
 #ifdef VK_KHR_swapchain
@@ -63,8 +71,6 @@
 #endif
 #ifdef VK_KHR_wayland_surface
 #endif
-#ifdef VK_KHR_mir_surface
-#endif
 #ifdef VK_KHR_android_surface
 #endif
 #ifdef VK_KHR_win32_surface
@@ -103,12 +109,16 @@
 #endif
 #ifdef VK_KHR_push_descriptor
 #endif
+#ifdef VK_KHR_shader_float16_int8
+#endif
 #ifdef VK_KHR_16bit_storage
 #endif
 #ifdef VK_KHR_incremental_present
 #endif
 #ifdef VK_KHR_descriptor_update_template
 #endif
+#ifdef VK_KHR_imageless_framebuffer
+#endif
 #ifdef VK_KHR_create_renderpass2
 #endif
 #ifdef VK_KHR_shared_presentable_image
@@ -121,6 +131,8 @@
 #endif
 #ifdef VK_KHR_external_fence_fd
 #endif
+#ifdef VK_KHR_performance_query
+#endif
 #ifdef VK_KHR_maintenance2
 #endif
 #ifdef VK_KHR_get_surface_capabilities2
@@ -143,12 +155,56 @@
 #endif
 #ifdef VK_KHR_bind_memory2
 #endif
+#ifdef VK_KHR_portability_subset
+#endif
 #ifdef VK_KHR_maintenance3
 #endif
 #ifdef VK_KHR_draw_indirect_count
 #endif
+#ifdef VK_KHR_shader_subgroup_extended_types
+#endif
 #ifdef VK_KHR_8bit_storage
 #endif
+#ifdef VK_KHR_shader_atomic_int64
+#endif
+#ifdef VK_KHR_shader_clock
+#endif
+#ifdef VK_KHR_driver_properties
+#endif
+#ifdef VK_KHR_shader_float_controls
+#endif
+#ifdef VK_KHR_depth_stencil_resolve
+#endif
+#ifdef VK_KHR_swapchain_mutable_format
+#endif
+#ifdef VK_KHR_timeline_semaphore
+#endif
+#ifdef VK_KHR_vulkan_memory_model
+#endif
+#ifdef VK_KHR_shader_terminate_invocation
+#endif
+#ifdef VK_KHR_fragment_shading_rate
+#endif
+#ifdef VK_KHR_spirv_1_4
+#endif
+#ifdef VK_KHR_surface_protected_capabilities
+#endif
+#ifdef VK_KHR_separate_depth_stencil_layouts
+#endif
+#ifdef VK_KHR_uniform_buffer_standard_layout
+#endif
+#ifdef VK_KHR_buffer_device_address
+#endif
+#ifdef VK_KHR_deferred_host_operations
+#endif
+#ifdef VK_KHR_pipeline_executable_properties
+#endif
+#ifdef VK_KHR_pipeline_library
+#endif
+#ifdef VK_KHR_shader_non_semantic_info
+#endif
+#ifdef VK_KHR_copy_commands2
+#endif
 #ifdef VK_ANDROID_native_buffer
 #endif
 #ifdef VK_EXT_debug_report
@@ -171,6 +227,10 @@
 #endif
 #ifdef VK_NV_dedicated_allocation
 #endif
+#ifdef VK_EXT_transform_feedback
+#endif
+#ifdef VK_NVX_image_view_handle
+#endif
 #ifdef VK_AMD_draw_indirect_count
 #endif
 #ifdef VK_AMD_negative_viewport_height
@@ -185,6 +245,10 @@
 #endif
 #ifdef VK_AMD_shader_image_load_store_lod
 #endif
+#ifdef VK_GGP_stream_descriptor_surface
+#endif
+#ifdef VK_NV_corner_sampled_image
+#endif
 #ifdef VK_IMG_format_pvrtc
 #endif
 #ifdef VK_NV_external_memory_capabilities
@@ -203,9 +267,11 @@
 #endif
 #ifdef VK_EXT_shader_subgroup_vote
 #endif
-#ifdef VK_EXT_conditional_rendering
+#ifdef VK_EXT_texture_compression_astc_hdr
 #endif
-#ifdef VK_NVX_device_generated_commands
+#ifdef VK_EXT_astc_decode_mode
+#endif
+#ifdef VK_EXT_conditional_rendering
 #endif
 #ifdef VK_NV_clip_space_w_scaling
 #endif
@@ -233,6 +299,8 @@
 #endif
 #ifdef VK_EXT_conservative_rasterization
 #endif
+#ifdef VK_EXT_depth_clip_enable
+#endif
 #ifdef VK_EXT_swapchain_colorspace
 #endif
 #ifdef VK_EXT_hdr_metadata
@@ -241,6 +309,8 @@
 #endif
 #ifdef VK_MVK_macos_surface
 #endif
+#ifdef VK_MVK_moltenvk
+#endif
 #ifdef VK_EXT_external_memory_dma_buf
 #endif
 #ifdef VK_EXT_queue_family_foreign
@@ -257,6 +327,8 @@
 #endif
 #ifdef VK_AMD_shader_fragment_mask
 #endif
+#ifdef VK_EXT_inline_uniform_block
+#endif
 #ifdef VK_EXT_shader_stencil_export
 #endif
 #ifdef VK_EXT_sample_locations
@@ -269,41 +341,169 @@
 #endif
 #ifdef VK_NV_fill_rectangle
 #endif
+#ifdef VK_NV_shader_sm_builtins
+#endif
 #ifdef VK_EXT_post_depth_coverage
 #endif
+#ifdef VK_EXT_image_drm_format_modifier
+#endif
 #ifdef VK_EXT_validation_cache
 #endif
 #ifdef VK_EXT_descriptor_indexing
 #endif
 #ifdef VK_EXT_shader_viewport_index_layer
 #endif
+#ifdef VK_NV_shading_rate_image
+#endif
+#ifdef VK_NV_ray_tracing
+#endif
+#ifdef VK_NV_representative_fragment_test
+#endif
+#ifdef VK_EXT_filter_cubic
+#endif
+#ifdef VK_QCOM_render_pass_shader_resolve
+#endif
 #ifdef VK_EXT_global_priority
 #endif
 #ifdef VK_EXT_external_memory_host
 #endif
 #ifdef VK_AMD_buffer_marker
 #endif
+#ifdef VK_AMD_pipeline_compiler_control
+#endif
+#ifdef VK_EXT_calibrated_timestamps
+#endif
 #ifdef VK_AMD_shader_core_properties
 #endif
+#ifdef VK_AMD_memory_overallocation_behavior
+#endif
 #ifdef VK_EXT_vertex_attribute_divisor
 #endif
+#ifdef VK_GGP_frame_token
+#endif
+#ifdef VK_EXT_pipeline_creation_feedback
+#endif
 #ifdef VK_NV_shader_subgroup_partitioned
 #endif
+#ifdef VK_NV_compute_shader_derivatives
+#endif
+#ifdef VK_NV_mesh_shader
+#endif
+#ifdef VK_NV_fragment_shader_barycentric
+#endif
+#ifdef VK_NV_shader_image_footprint
+#endif
+#ifdef VK_NV_scissor_exclusive
+#endif
 #ifdef VK_NV_device_diagnostic_checkpoints
 #endif
-#ifdef VK_GOOGLE_address_space
+#ifdef VK_INTEL_shader_integer_functions2
 #endif
-#ifdef VK_GOOGLE_color_buffer
+#ifdef VK_INTEL_performance_query
 #endif
-#ifdef VK_GOOGLE_sized_descriptor_update_template
+#ifdef VK_EXT_pci_bus_info
 #endif
-#ifdef VK_GOOGLE_async_command_buffers
+#ifdef VK_AMD_display_native_hdr
 #endif
-#ifdef VK_GOOGLE_create_resources_with_requirements
+#ifdef VK_FUCHSIA_imagepipe_surface
 #endif
-#ifdef VK_GOOGLE_address_space_info
+#ifdef VK_EXT_metal_surface
 #endif
-#ifdef VK_GOOGLE_free_memory_sync
+#ifdef VK_EXT_fragment_density_map
+#endif
+#ifdef VK_EXT_scalar_block_layout
+#endif
+#ifdef VK_GOOGLE_hlsl_functionality1
+#endif
+#ifdef VK_GOOGLE_decorate_string
+#endif
+#ifdef VK_EXT_subgroup_size_control
+#endif
+#ifdef VK_AMD_shader_core_properties2
+#endif
+#ifdef VK_AMD_device_coherent_memory
+#endif
+#ifdef VK_EXT_shader_image_atomic_int64
+#endif
+#ifdef VK_EXT_memory_budget
+#endif
+#ifdef VK_EXT_memory_priority
+#endif
+#ifdef VK_NV_dedicated_allocation_image_aliasing
+#endif
+#ifdef VK_EXT_buffer_device_address
+#endif
+#ifdef VK_EXT_tooling_info
+#endif
+#ifdef VK_EXT_separate_stencil_usage
+#endif
+#ifdef VK_EXT_validation_features
+#endif
+#ifdef VK_NV_cooperative_matrix
+#endif
+#ifdef VK_NV_coverage_reduction_mode
+#endif
+#ifdef VK_EXT_fragment_shader_interlock
+#endif
+#ifdef VK_EXT_ycbcr_image_arrays
+#endif
+#ifdef VK_EXT_full_screen_exclusive
+#endif
+#ifdef VK_EXT_headless_surface
+#endif
+#ifdef VK_EXT_line_rasterization
+#endif
+#ifdef VK_EXT_shader_atomic_float
+#endif
+#ifdef VK_EXT_host_query_reset
+#endif
+#ifdef VK_EXT_index_type_uint8
+#endif
+#ifdef VK_EXT_extended_dynamic_state
+#endif
+#ifdef VK_EXT_shader_demote_to_helper_invocation
+#endif
+#ifdef VK_NV_device_generated_commands
+#endif
+#ifdef VK_EXT_texel_buffer_alignment
+#endif
+#ifdef VK_QCOM_render_pass_transform
+#endif
+#ifdef VK_EXT_device_memory_report
+#endif
+#ifdef VK_EXT_robustness2
+#endif
+#ifdef VK_EXT_custom_border_color
+#endif
+#ifdef VK_GOOGLE_user_type
+#endif
+#ifdef VK_EXT_private_data
+#endif
+#ifdef VK_EXT_pipeline_creation_cache_control
+#endif
+#ifdef VK_NV_device_diagnostics_config
+#endif
+#ifdef VK_QCOM_render_pass_store_ops
+#endif
+#ifdef VK_NV_fragment_shading_rate_enums
+#endif
+#ifdef VK_EXT_fragment_density_map2
+#endif
+#ifdef VK_QCOM_rotated_copy_commands
+#endif
+#ifdef VK_EXT_image_robustness
+#endif
+#ifdef VK_EXT_4444_formats
+#endif
+#ifdef VK_EXT_directfb_surface
+#endif
+#ifdef VK_GOOGLE_gfxstream
+#endif
+#ifdef VK_KHR_acceleration_structure
+#endif
+#ifdef VK_KHR_ray_tracing_pipeline
+#endif
+#ifdef VK_KHR_ray_query
 #endif
 
 } // namespace goldfish_vk
diff --git a/system/vulkan_enc/goldfish_vk_handlemap_guest.cpp b/system/vulkan_enc/goldfish_vk_handlemap_guest.cpp
index 8e66554..269b50b 100644
--- a/system/vulkan_enc/goldfish_vk_handlemap_guest.cpp
+++ b/system/vulkan_enc/goldfish_vk_handlemap_guest.cpp
@@ -36,6 +36,151 @@
     void* structExtension_out);
 
 #ifdef VK_VERSION_1_0
+void handlemap_VkExtent2D(
+    VulkanHandleMapping* handlemap,
+    VkExtent2D* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+}
+
+void handlemap_VkExtent3D(
+    VulkanHandleMapping* handlemap,
+    VkExtent3D* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+}
+
+void handlemap_VkOffset2D(
+    VulkanHandleMapping* handlemap,
+    VkOffset2D* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+}
+
+void handlemap_VkOffset3D(
+    VulkanHandleMapping* handlemap,
+    VkOffset3D* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+}
+
+void handlemap_VkRect2D(
+    VulkanHandleMapping* handlemap,
+    VkRect2D* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    handlemap_VkOffset2D(handlemap, (VkOffset2D*)(&toMap->offset));
+    handlemap_VkExtent2D(handlemap, (VkExtent2D*)(&toMap->extent));
+}
+
+void handlemap_VkBaseInStructure(
+    VulkanHandleMapping* handlemap,
+    VkBaseInStructure* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkBaseOutStructure(
+    VulkanHandleMapping* handlemap,
+    VkBaseOutStructure* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkBufferMemoryBarrier(
+    VulkanHandleMapping* handlemap,
+    VkBufferMemoryBarrier* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    handlemap->mapHandles_VkBuffer((VkBuffer*)&toMap->buffer);
+}
+
+void handlemap_VkDispatchIndirectCommand(
+    VulkanHandleMapping* handlemap,
+    VkDispatchIndirectCommand* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+}
+
+void handlemap_VkDrawIndexedIndirectCommand(
+    VulkanHandleMapping* handlemap,
+    VkDrawIndexedIndirectCommand* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+}
+
+void handlemap_VkDrawIndirectCommand(
+    VulkanHandleMapping* handlemap,
+    VkDrawIndirectCommand* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+}
+
+void handlemap_VkImageSubresourceRange(
+    VulkanHandleMapping* handlemap,
+    VkImageSubresourceRange* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+}
+
+void handlemap_VkImageMemoryBarrier(
+    VulkanHandleMapping* handlemap,
+    VkImageMemoryBarrier* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    handlemap->mapHandles_VkImage((VkImage*)&toMap->image);
+    handlemap_VkImageSubresourceRange(handlemap, (VkImageSubresourceRange*)(&toMap->subresourceRange));
+}
+
+void handlemap_VkMemoryBarrier(
+    VulkanHandleMapping* handlemap,
+    VkMemoryBarrier* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkAllocationCallbacks(
+    VulkanHandleMapping* handlemap,
+    VkAllocationCallbacks* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+}
+
 void handlemap_VkApplicationInfo(
     VulkanHandleMapping* handlemap,
     VkApplicationInfo* toMap)
@@ -48,6 +193,23 @@
     }
 }
 
+void handlemap_VkFormatProperties(
+    VulkanHandleMapping* handlemap,
+    VkFormatProperties* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+}
+
+void handlemap_VkImageFormatProperties(
+    VulkanHandleMapping* handlemap,
+    VkImageFormatProperties* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    handlemap_VkExtent3D(handlemap, (VkExtent3D*)(&toMap->maxExtent));
+}
+
 void handlemap_VkInstanceCreateInfo(
     VulkanHandleMapping* handlemap,
     VkInstanceCreateInfo* toMap)
@@ -64,9 +226,17 @@
     }
 }
 
-void handlemap_VkAllocationCallbacks(
+void handlemap_VkMemoryHeap(
     VulkanHandleMapping* handlemap,
-    VkAllocationCallbacks* toMap)
+    VkMemoryHeap* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+}
+
+void handlemap_VkMemoryType(
+    VulkanHandleMapping* handlemap,
+    VkMemoryType* toMap)
 {
     (void)handlemap;
     (void)toMap;
@@ -80,31 +250,6 @@
     (void)toMap;
 }
 
-void handlemap_VkFormatProperties(
-    VulkanHandleMapping* handlemap,
-    VkFormatProperties* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-}
-
-void handlemap_VkExtent3D(
-    VulkanHandleMapping* handlemap,
-    VkExtent3D* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-}
-
-void handlemap_VkImageFormatProperties(
-    VulkanHandleMapping* handlemap,
-    VkImageFormatProperties* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-    handlemap_VkExtent3D(handlemap, (VkExtent3D*)(&toMap->maxExtent));
-}
-
 void handlemap_VkPhysicalDeviceLimits(
     VulkanHandleMapping* handlemap,
     VkPhysicalDeviceLimits* toMap)
@@ -113,6 +258,22 @@
     (void)toMap;
 }
 
+void handlemap_VkPhysicalDeviceMemoryProperties(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceMemoryProperties* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    for (uint32_t i = 0; i < (uint32_t)VK_MAX_MEMORY_TYPES; ++i)
+    {
+        handlemap_VkMemoryType(handlemap, (VkMemoryType*)(toMap->memoryTypes + i));
+    }
+    for (uint32_t i = 0; i < (uint32_t)VK_MAX_MEMORY_HEAPS; ++i)
+    {
+        handlemap_VkMemoryHeap(handlemap, (VkMemoryHeap*)(toMap->memoryHeaps + i));
+    }
+}
+
 void handlemap_VkPhysicalDeviceSparseProperties(
     VulkanHandleMapping* handlemap,
     VkPhysicalDeviceSparseProperties* toMap)
@@ -140,38 +301,6 @@
     handlemap_VkExtent3D(handlemap, (VkExtent3D*)(&toMap->minImageTransferGranularity));
 }
 
-void handlemap_VkMemoryType(
-    VulkanHandleMapping* handlemap,
-    VkMemoryType* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-}
-
-void handlemap_VkMemoryHeap(
-    VulkanHandleMapping* handlemap,
-    VkMemoryHeap* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-}
-
-void handlemap_VkPhysicalDeviceMemoryProperties(
-    VulkanHandleMapping* handlemap,
-    VkPhysicalDeviceMemoryProperties* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-    for (uint32_t i = 0; i < (uint32_t)VK_MAX_MEMORY_TYPES; ++i)
-    {
-        handlemap_VkMemoryType(handlemap, (VkMemoryType*)(toMap->memoryTypes + i));
-    }
-    for (uint32_t i = 0; i < (uint32_t)VK_MAX_MEMORY_HEAPS; ++i)
-    {
-        handlemap_VkMemoryHeap(handlemap, (VkMemoryHeap*)(toMap->memoryHeaps + i));
-    }
-}
-
 void handlemap_VkDeviceQueueCreateInfo(
     VulkanHandleMapping* handlemap,
     VkDeviceQueueCreateInfo* toMap)
@@ -194,11 +323,14 @@
     {
         handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
     }
-    if (toMap->pQueueCreateInfos)
+    if (toMap)
     {
-        for (uint32_t i = 0; i < (uint32_t)toMap->queueCreateInfoCount; ++i)
+        if (toMap->pQueueCreateInfos)
         {
-            handlemap_VkDeviceQueueCreateInfo(handlemap, (VkDeviceQueueCreateInfo*)(toMap->pQueueCreateInfos + i));
+            for (uint32_t i = 0; i < (uint32_t)toMap->queueCreateInfoCount; ++i)
+            {
+                handlemap_VkDeviceQueueCreateInfo(handlemap, (VkDeviceQueueCreateInfo*)(toMap->pQueueCreateInfos + i));
+            }
         }
     }
     if (toMap->pEnabledFeatures)
@@ -247,18 +379,6 @@
     }
 }
 
-void handlemap_VkMemoryAllocateInfo(
-    VulkanHandleMapping* handlemap,
-    VkMemoryAllocateInfo* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-    if (toMap->pNext)
-    {
-        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
-    }
-}
-
 void handlemap_VkMappedMemoryRange(
     VulkanHandleMapping* handlemap,
     VkMappedMemoryRange* toMap)
@@ -272,6 +392,18 @@
     handlemap->mapHandles_VkDeviceMemory((VkDeviceMemory*)&toMap->memory);
 }
 
+void handlemap_VkMemoryAllocateInfo(
+    VulkanHandleMapping* handlemap,
+    VkMemoryAllocateInfo* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
 void handlemap_VkMemoryRequirements(
     VulkanHandleMapping* handlemap,
     VkMemoryRequirements* toMap)
@@ -280,24 +412,6 @@
     (void)toMap;
 }
 
-void handlemap_VkSparseImageFormatProperties(
-    VulkanHandleMapping* handlemap,
-    VkSparseImageFormatProperties* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-    handlemap_VkExtent3D(handlemap, (VkExtent3D*)(&toMap->imageGranularity));
-}
-
-void handlemap_VkSparseImageMemoryRequirements(
-    VulkanHandleMapping* handlemap,
-    VkSparseImageMemoryRequirements* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-    handlemap_VkSparseImageFormatProperties(handlemap, (VkSparseImageFormatProperties*)(&toMap->formatProperties));
-}
-
 void handlemap_VkSparseMemoryBind(
     VulkanHandleMapping* handlemap,
     VkSparseMemoryBind* toMap)
@@ -314,11 +428,14 @@
     (void)handlemap;
     (void)toMap;
     handlemap->mapHandles_VkBuffer((VkBuffer*)&toMap->buffer);
-    if (toMap->pBinds)
+    if (toMap)
     {
-        for (uint32_t i = 0; i < (uint32_t)toMap->bindCount; ++i)
+        if (toMap->pBinds)
         {
-            handlemap_VkSparseMemoryBind(handlemap, (VkSparseMemoryBind*)(toMap->pBinds + i));
+            for (uint32_t i = 0; i < (uint32_t)toMap->bindCount; ++i)
+            {
+                handlemap_VkSparseMemoryBind(handlemap, (VkSparseMemoryBind*)(toMap->pBinds + i));
+            }
         }
     }
 }
@@ -330,11 +447,14 @@
     (void)handlemap;
     (void)toMap;
     handlemap->mapHandles_VkImage((VkImage*)&toMap->image);
-    if (toMap->pBinds)
+    if (toMap)
     {
-        for (uint32_t i = 0; i < (uint32_t)toMap->bindCount; ++i)
+        if (toMap->pBinds)
         {
-            handlemap_VkSparseMemoryBind(handlemap, (VkSparseMemoryBind*)(toMap->pBinds + i));
+            for (uint32_t i = 0; i < (uint32_t)toMap->bindCount; ++i)
+            {
+                handlemap_VkSparseMemoryBind(handlemap, (VkSparseMemoryBind*)(toMap->pBinds + i));
+            }
         }
     }
 }
@@ -347,14 +467,6 @@
     (void)toMap;
 }
 
-void handlemap_VkOffset3D(
-    VulkanHandleMapping* handlemap,
-    VkOffset3D* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-}
-
 void handlemap_VkSparseImageMemoryBind(
     VulkanHandleMapping* handlemap,
     VkSparseImageMemoryBind* toMap)
@@ -374,11 +486,14 @@
     (void)handlemap;
     (void)toMap;
     handlemap->mapHandles_VkImage((VkImage*)&toMap->image);
-    if (toMap->pBinds)
+    if (toMap)
     {
-        for (uint32_t i = 0; i < (uint32_t)toMap->bindCount; ++i)
+        if (toMap->pBinds)
         {
-            handlemap_VkSparseImageMemoryBind(handlemap, (VkSparseImageMemoryBind*)(toMap->pBinds + i));
+            for (uint32_t i = 0; i < (uint32_t)toMap->bindCount; ++i)
+            {
+                handlemap_VkSparseImageMemoryBind(handlemap, (VkSparseImageMemoryBind*)(toMap->pBinds + i));
+            }
         }
     }
 }
@@ -397,25 +512,34 @@
     {
         handlemap->mapHandles_VkSemaphore((VkSemaphore*)toMap->pWaitSemaphores, toMap->waitSemaphoreCount);
     }
-    if (toMap->pBufferBinds)
+    if (toMap)
     {
-        for (uint32_t i = 0; i < (uint32_t)toMap->bufferBindCount; ++i)
+        if (toMap->pBufferBinds)
         {
-            handlemap_VkSparseBufferMemoryBindInfo(handlemap, (VkSparseBufferMemoryBindInfo*)(toMap->pBufferBinds + i));
+            for (uint32_t i = 0; i < (uint32_t)toMap->bufferBindCount; ++i)
+            {
+                handlemap_VkSparseBufferMemoryBindInfo(handlemap, (VkSparseBufferMemoryBindInfo*)(toMap->pBufferBinds + i));
+            }
         }
     }
-    if (toMap->pImageOpaqueBinds)
+    if (toMap)
     {
-        for (uint32_t i = 0; i < (uint32_t)toMap->imageOpaqueBindCount; ++i)
+        if (toMap->pImageOpaqueBinds)
         {
-            handlemap_VkSparseImageOpaqueMemoryBindInfo(handlemap, (VkSparseImageOpaqueMemoryBindInfo*)(toMap->pImageOpaqueBinds + i));
+            for (uint32_t i = 0; i < (uint32_t)toMap->imageOpaqueBindCount; ++i)
+            {
+                handlemap_VkSparseImageOpaqueMemoryBindInfo(handlemap, (VkSparseImageOpaqueMemoryBindInfo*)(toMap->pImageOpaqueBinds + i));
+            }
         }
     }
-    if (toMap->pImageBinds)
+    if (toMap)
     {
-        for (uint32_t i = 0; i < (uint32_t)toMap->imageBindCount; ++i)
+        if (toMap->pImageBinds)
         {
-            handlemap_VkSparseImageMemoryBindInfo(handlemap, (VkSparseImageMemoryBindInfo*)(toMap->pImageBinds + i));
+            for (uint32_t i = 0; i < (uint32_t)toMap->imageBindCount; ++i)
+            {
+                handlemap_VkSparseImageMemoryBindInfo(handlemap, (VkSparseImageMemoryBindInfo*)(toMap->pImageBinds + i));
+            }
         }
     }
     if (toMap->pSignalSemaphores)
@@ -424,6 +548,24 @@
     }
 }
 
+void handlemap_VkSparseImageFormatProperties(
+    VulkanHandleMapping* handlemap,
+    VkSparseImageFormatProperties* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    handlemap_VkExtent3D(handlemap, (VkExtent3D*)(&toMap->imageGranularity));
+}
+
+void handlemap_VkSparseImageMemoryRequirements(
+    VulkanHandleMapping* handlemap,
+    VkSparseImageMemoryRequirements* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    handlemap_VkSparseImageFormatProperties(handlemap, (VkSparseImageFormatProperties*)(&toMap->formatProperties));
+}
+
 void handlemap_VkFenceCreateInfo(
     VulkanHandleMapping* handlemap,
     VkFenceCreateInfo* toMap)
@@ -526,14 +668,6 @@
     (void)toMap;
 }
 
-void handlemap_VkImageSubresourceRange(
-    VulkanHandleMapping* handlemap,
-    VkImageSubresourceRange* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-}
-
 void handlemap_VkImageViewCreateInfo(
     VulkanHandleMapping* handlemap,
     VkImageViewCreateInfo* toMap)
@@ -587,11 +721,14 @@
 {
     (void)handlemap;
     (void)toMap;
-    if (toMap->pMapEntries)
+    if (toMap)
     {
-        for (uint32_t i = 0; i < (uint32_t)toMap->mapEntryCount; ++i)
+        if (toMap->pMapEntries)
         {
-            handlemap_VkSpecializationMapEntry(handlemap, (VkSpecializationMapEntry*)(toMap->pMapEntries + i));
+            for (uint32_t i = 0; i < (uint32_t)toMap->mapEntryCount; ++i)
+            {
+                handlemap_VkSpecializationMapEntry(handlemap, (VkSpecializationMapEntry*)(toMap->pMapEntries + i));
+            }
         }
     }
 }
@@ -613,6 +750,21 @@
     }
 }
 
+void handlemap_VkComputePipelineCreateInfo(
+    VulkanHandleMapping* handlemap,
+    VkComputePipelineCreateInfo* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    handlemap_VkPipelineShaderStageCreateInfo(handlemap, (VkPipelineShaderStageCreateInfo*)(&toMap->stage));
+    handlemap->mapHandles_VkPipelineLayout((VkPipelineLayout*)&toMap->layout);
+    handlemap->mapHandles_VkPipeline((VkPipeline*)&toMap->basePipelineHandle);
+}
+
 void handlemap_VkVertexInputBindingDescription(
     VulkanHandleMapping* handlemap,
     VkVertexInputBindingDescription* toMap)
@@ -639,18 +791,24 @@
     {
         handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
     }
-    if (toMap->pVertexBindingDescriptions)
+    if (toMap)
     {
-        for (uint32_t i = 0; i < (uint32_t)toMap->vertexBindingDescriptionCount; ++i)
+        if (toMap->pVertexBindingDescriptions)
         {
-            handlemap_VkVertexInputBindingDescription(handlemap, (VkVertexInputBindingDescription*)(toMap->pVertexBindingDescriptions + i));
+            for (uint32_t i = 0; i < (uint32_t)toMap->vertexBindingDescriptionCount; ++i)
+            {
+                handlemap_VkVertexInputBindingDescription(handlemap, (VkVertexInputBindingDescription*)(toMap->pVertexBindingDescriptions + i));
+            }
         }
     }
-    if (toMap->pVertexAttributeDescriptions)
+    if (toMap)
     {
-        for (uint32_t i = 0; i < (uint32_t)toMap->vertexAttributeDescriptionCount; ++i)
+        if (toMap->pVertexAttributeDescriptions)
         {
-            handlemap_VkVertexInputAttributeDescription(handlemap, (VkVertexInputAttributeDescription*)(toMap->pVertexAttributeDescriptions + i));
+            for (uint32_t i = 0; i < (uint32_t)toMap->vertexAttributeDescriptionCount; ++i)
+            {
+                handlemap_VkVertexInputAttributeDescription(handlemap, (VkVertexInputAttributeDescription*)(toMap->pVertexAttributeDescriptions + i));
+            }
         }
     }
 }
@@ -687,32 +845,6 @@
     (void)toMap;
 }
 
-void handlemap_VkOffset2D(
-    VulkanHandleMapping* handlemap,
-    VkOffset2D* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-}
-
-void handlemap_VkExtent2D(
-    VulkanHandleMapping* handlemap,
-    VkExtent2D* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-}
-
-void handlemap_VkRect2D(
-    VulkanHandleMapping* handlemap,
-    VkRect2D* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-    handlemap_VkOffset2D(handlemap, (VkOffset2D*)(&toMap->offset));
-    handlemap_VkExtent2D(handlemap, (VkExtent2D*)(&toMap->extent));
-}
-
 void handlemap_VkPipelineViewportStateCreateInfo(
     VulkanHandleMapping* handlemap,
     VkPipelineViewportStateCreateInfo* toMap)
@@ -723,18 +855,24 @@
     {
         handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
     }
-    if (toMap->pViewports)
+    if (toMap)
     {
-        for (uint32_t i = 0; i < (uint32_t)toMap->viewportCount; ++i)
+        if (toMap->pViewports)
         {
-            handlemap_VkViewport(handlemap, (VkViewport*)(toMap->pViewports + i));
+            for (uint32_t i = 0; i < (uint32_t)toMap->viewportCount; ++i)
+            {
+                handlemap_VkViewport(handlemap, (VkViewport*)(toMap->pViewports + i));
+            }
         }
     }
-    if (toMap->pScissors)
+    if (toMap)
     {
-        for (uint32_t i = 0; i < (uint32_t)toMap->scissorCount; ++i)
+        if (toMap->pScissors)
         {
-            handlemap_VkRect2D(handlemap, (VkRect2D*)(toMap->pScissors + i));
+            for (uint32_t i = 0; i < (uint32_t)toMap->scissorCount; ++i)
+            {
+                handlemap_VkRect2D(handlemap, (VkRect2D*)(toMap->pScissors + i));
+            }
         }
     }
 }
@@ -803,11 +941,14 @@
     {
         handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
     }
-    if (toMap->pAttachments)
+    if (toMap)
     {
-        for (uint32_t i = 0; i < (uint32_t)toMap->attachmentCount; ++i)
+        if (toMap->pAttachments)
         {
-            handlemap_VkPipelineColorBlendAttachmentState(handlemap, (VkPipelineColorBlendAttachmentState*)(toMap->pAttachments + i));
+            for (uint32_t i = 0; i < (uint32_t)toMap->attachmentCount; ++i)
+            {
+                handlemap_VkPipelineColorBlendAttachmentState(handlemap, (VkPipelineColorBlendAttachmentState*)(toMap->pAttachments + i));
+            }
         }
     }
 }
@@ -834,11 +975,14 @@
     {
         handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
     }
-    if (toMap->pStages)
+    if (toMap)
     {
-        for (uint32_t i = 0; i < (uint32_t)toMap->stageCount; ++i)
+        if (toMap->pStages)
         {
-            handlemap_VkPipelineShaderStageCreateInfo(handlemap, (VkPipelineShaderStageCreateInfo*)(toMap->pStages + i));
+            for (uint32_t i = 0; i < (uint32_t)toMap->stageCount; ++i)
+            {
+                handlemap_VkPipelineShaderStageCreateInfo(handlemap, (VkPipelineShaderStageCreateInfo*)(toMap->pStages + i));
+            }
         }
     }
     if (toMap->pVertexInputState)
@@ -882,21 +1026,6 @@
     handlemap->mapHandles_VkPipeline((VkPipeline*)&toMap->basePipelineHandle);
 }
 
-void handlemap_VkComputePipelineCreateInfo(
-    VulkanHandleMapping* handlemap,
-    VkComputePipelineCreateInfo* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-    if (toMap->pNext)
-    {
-        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
-    }
-    handlemap_VkPipelineShaderStageCreateInfo(handlemap, (VkPipelineShaderStageCreateInfo*)(&toMap->stage));
-    handlemap->mapHandles_VkPipelineLayout((VkPipelineLayout*)&toMap->layout);
-    handlemap->mapHandles_VkPipeline((VkPipeline*)&toMap->basePipelineHandle);
-}
-
 void handlemap_VkPushConstantRange(
     VulkanHandleMapping* handlemap,
     VkPushConstantRange* toMap)
@@ -919,11 +1048,14 @@
     {
         handlemap->mapHandles_VkDescriptorSetLayout((VkDescriptorSetLayout*)toMap->pSetLayouts, toMap->setLayoutCount);
     }
-    if (toMap->pPushConstantRanges)
+    if (toMap)
     {
-        for (uint32_t i = 0; i < (uint32_t)toMap->pushConstantRangeCount; ++i)
+        if (toMap->pPushConstantRanges)
         {
-            handlemap_VkPushConstantRange(handlemap, (VkPushConstantRange*)(toMap->pPushConstantRanges + i));
+            for (uint32_t i = 0; i < (uint32_t)toMap->pushConstantRangeCount; ++i)
+            {
+                handlemap_VkPushConstantRange(handlemap, (VkPushConstantRange*)(toMap->pPushConstantRanges + i));
+            }
         }
     }
 }
@@ -940,6 +1072,86 @@
     }
 }
 
+void handlemap_VkCopyDescriptorSet(
+    VulkanHandleMapping* handlemap,
+    VkCopyDescriptorSet* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    handlemap->mapHandles_VkDescriptorSet((VkDescriptorSet*)&toMap->srcSet);
+    handlemap->mapHandles_VkDescriptorSet((VkDescriptorSet*)&toMap->dstSet);
+}
+
+void handlemap_VkDescriptorBufferInfo(
+    VulkanHandleMapping* handlemap,
+    VkDescriptorBufferInfo* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    handlemap->mapHandles_VkBuffer((VkBuffer*)&toMap->buffer);
+}
+
+void handlemap_VkDescriptorImageInfo(
+    VulkanHandleMapping* handlemap,
+    VkDescriptorImageInfo* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    handlemap->mapHandles_VkSampler((VkSampler*)&toMap->sampler);
+    handlemap->mapHandles_VkImageView((VkImageView*)&toMap->imageView);
+}
+
+void handlemap_VkDescriptorPoolSize(
+    VulkanHandleMapping* handlemap,
+    VkDescriptorPoolSize* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+}
+
+void handlemap_VkDescriptorPoolCreateInfo(
+    VulkanHandleMapping* handlemap,
+    VkDescriptorPoolCreateInfo* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    if (toMap)
+    {
+        if (toMap->pPoolSizes)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toMap->poolSizeCount; ++i)
+            {
+                handlemap_VkDescriptorPoolSize(handlemap, (VkDescriptorPoolSize*)(toMap->pPoolSizes + i));
+            }
+        }
+    }
+}
+
+void handlemap_VkDescriptorSetAllocateInfo(
+    VulkanHandleMapping* handlemap,
+    VkDescriptorSetAllocateInfo* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    handlemap->mapHandles_VkDescriptorPool((VkDescriptorPool*)&toMap->descriptorPool);
+    if (toMap->pSetLayouts)
+    {
+        handlemap->mapHandles_VkDescriptorSetLayout((VkDescriptorSetLayout*)toMap->pSetLayouts, toMap->descriptorSetCount);
+    }
+}
+
 void handlemap_VkDescriptorSetLayoutBinding(
     VulkanHandleMapping* handlemap,
     VkDescriptorSetLayoutBinding* toMap)
@@ -962,78 +1174,18 @@
     {
         handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
     }
-    if (toMap->pBindings)
+    if (toMap)
     {
-        for (uint32_t i = 0; i < (uint32_t)toMap->bindingCount; ++i)
+        if (toMap->pBindings)
         {
-            handlemap_VkDescriptorSetLayoutBinding(handlemap, (VkDescriptorSetLayoutBinding*)(toMap->pBindings + i));
+            for (uint32_t i = 0; i < (uint32_t)toMap->bindingCount; ++i)
+            {
+                handlemap_VkDescriptorSetLayoutBinding(handlemap, (VkDescriptorSetLayoutBinding*)(toMap->pBindings + i));
+            }
         }
     }
 }
 
-void handlemap_VkDescriptorPoolSize(
-    VulkanHandleMapping* handlemap,
-    VkDescriptorPoolSize* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-}
-
-void handlemap_VkDescriptorPoolCreateInfo(
-    VulkanHandleMapping* handlemap,
-    VkDescriptorPoolCreateInfo* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-    if (toMap->pNext)
-    {
-        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
-    }
-    if (toMap->pPoolSizes)
-    {
-        for (uint32_t i = 0; i < (uint32_t)toMap->poolSizeCount; ++i)
-        {
-            handlemap_VkDescriptorPoolSize(handlemap, (VkDescriptorPoolSize*)(toMap->pPoolSizes + i));
-        }
-    }
-}
-
-void handlemap_VkDescriptorSetAllocateInfo(
-    VulkanHandleMapping* handlemap,
-    VkDescriptorSetAllocateInfo* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-    if (toMap->pNext)
-    {
-        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
-    }
-    handlemap->mapHandles_VkDescriptorPool((VkDescriptorPool*)&toMap->descriptorPool);
-    if (toMap->pSetLayouts)
-    {
-        handlemap->mapHandles_VkDescriptorSetLayout((VkDescriptorSetLayout*)toMap->pSetLayouts, toMap->descriptorSetCount);
-    }
-}
-
-void handlemap_VkDescriptorImageInfo(
-    VulkanHandleMapping* handlemap,
-    VkDescriptorImageInfo* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-    handlemap->mapHandles_VkSampler((VkSampler*)&toMap->sampler);
-    handlemap->mapHandles_VkImageView((VkImageView*)&toMap->imageView);
-}
-
-void handlemap_VkDescriptorBufferInfo(
-    VulkanHandleMapping* handlemap,
-    VkDescriptorBufferInfo* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-    handlemap->mapHandles_VkBuffer((VkBuffer*)&toMap->buffer);
-}
-
 void handlemap_VkWriteDescriptorSet(
     VulkanHandleMapping* handlemap,
     VkWriteDescriptorSet* toMap)
@@ -1045,18 +1197,24 @@
         handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
     }
     handlemap->mapHandles_VkDescriptorSet((VkDescriptorSet*)&toMap->dstSet);
-    if (toMap->pImageInfo)
+    if (toMap)
     {
-        for (uint32_t i = 0; i < (uint32_t)toMap->descriptorCount; ++i)
+        if (toMap->pImageInfo)
         {
-            handlemap_VkDescriptorImageInfo(handlemap, (VkDescriptorImageInfo*)(toMap->pImageInfo + i));
+            for (uint32_t i = 0; i < (uint32_t)toMap->descriptorCount; ++i)
+            {
+                handlemap_VkDescriptorImageInfo(handlemap, (VkDescriptorImageInfo*)(toMap->pImageInfo + i));
+            }
         }
     }
-    if (toMap->pBufferInfo)
+    if (toMap)
     {
-        for (uint32_t i = 0; i < (uint32_t)toMap->descriptorCount; ++i)
+        if (toMap->pBufferInfo)
         {
-            handlemap_VkDescriptorBufferInfo(handlemap, (VkDescriptorBufferInfo*)(toMap->pBufferInfo + i));
+            for (uint32_t i = 0; i < (uint32_t)toMap->descriptorCount; ++i)
+            {
+                handlemap_VkDescriptorBufferInfo(handlemap, (VkDescriptorBufferInfo*)(toMap->pBufferInfo + i));
+            }
         }
     }
     if (toMap->pTexelBufferView)
@@ -1065,37 +1223,6 @@
     }
 }
 
-void handlemap_VkCopyDescriptorSet(
-    VulkanHandleMapping* handlemap,
-    VkCopyDescriptorSet* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-    if (toMap->pNext)
-    {
-        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
-    }
-    handlemap->mapHandles_VkDescriptorSet((VkDescriptorSet*)&toMap->srcSet);
-    handlemap->mapHandles_VkDescriptorSet((VkDescriptorSet*)&toMap->dstSet);
-}
-
-void handlemap_VkFramebufferCreateInfo(
-    VulkanHandleMapping* handlemap,
-    VkFramebufferCreateInfo* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-    if (toMap->pNext)
-    {
-        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
-    }
-    handlemap->mapHandles_VkRenderPass((VkRenderPass*)&toMap->renderPass);
-    if (toMap->pAttachments)
-    {
-        handlemap->mapHandles_VkImageView((VkImageView*)toMap->pAttachments, toMap->attachmentCount);
-    }
-}
-
 void handlemap_VkAttachmentDescription(
     VulkanHandleMapping* handlemap,
     VkAttachmentDescription* toMap)
@@ -1112,31 +1239,57 @@
     (void)toMap;
 }
 
+void handlemap_VkFramebufferCreateInfo(
+    VulkanHandleMapping* handlemap,
+    VkFramebufferCreateInfo* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    handlemap->mapHandles_VkRenderPass((VkRenderPass*)&toMap->renderPass);
+    if (toMap->pAttachments)
+    {
+        handlemap->mapHandles_VkImageView((VkImageView*)toMap->pAttachments, toMap->attachmentCount);
+    }
+}
+
 void handlemap_VkSubpassDescription(
     VulkanHandleMapping* handlemap,
     VkSubpassDescription* toMap)
 {
     (void)handlemap;
     (void)toMap;
-    if (toMap->pInputAttachments)
+    if (toMap)
     {
-        for (uint32_t i = 0; i < (uint32_t)toMap->inputAttachmentCount; ++i)
+        if (toMap->pInputAttachments)
         {
-            handlemap_VkAttachmentReference(handlemap, (VkAttachmentReference*)(toMap->pInputAttachments + i));
+            for (uint32_t i = 0; i < (uint32_t)toMap->inputAttachmentCount; ++i)
+            {
+                handlemap_VkAttachmentReference(handlemap, (VkAttachmentReference*)(toMap->pInputAttachments + i));
+            }
         }
     }
-    if (toMap->pColorAttachments)
+    if (toMap)
     {
-        for (uint32_t i = 0; i < (uint32_t)toMap->colorAttachmentCount; ++i)
+        if (toMap->pColorAttachments)
         {
-            handlemap_VkAttachmentReference(handlemap, (VkAttachmentReference*)(toMap->pColorAttachments + i));
+            for (uint32_t i = 0; i < (uint32_t)toMap->colorAttachmentCount; ++i)
+            {
+                handlemap_VkAttachmentReference(handlemap, (VkAttachmentReference*)(toMap->pColorAttachments + i));
+            }
         }
     }
-    if (toMap->pResolveAttachments)
+    if (toMap)
     {
-        for (uint32_t i = 0; i < (uint32_t)toMap->colorAttachmentCount; ++i)
+        if (toMap->pResolveAttachments)
         {
-            handlemap_VkAttachmentReference(handlemap, (VkAttachmentReference*)(toMap->pResolveAttachments + i));
+            for (uint32_t i = 0; i < (uint32_t)toMap->colorAttachmentCount; ++i)
+            {
+                handlemap_VkAttachmentReference(handlemap, (VkAttachmentReference*)(toMap->pResolveAttachments + i));
+            }
         }
     }
     if (toMap->pDepthStencilAttachment)
@@ -1163,25 +1316,34 @@
     {
         handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
     }
-    if (toMap->pAttachments)
+    if (toMap)
     {
-        for (uint32_t i = 0; i < (uint32_t)toMap->attachmentCount; ++i)
+        if (toMap->pAttachments)
         {
-            handlemap_VkAttachmentDescription(handlemap, (VkAttachmentDescription*)(toMap->pAttachments + i));
+            for (uint32_t i = 0; i < (uint32_t)toMap->attachmentCount; ++i)
+            {
+                handlemap_VkAttachmentDescription(handlemap, (VkAttachmentDescription*)(toMap->pAttachments + i));
+            }
         }
     }
-    if (toMap->pSubpasses)
+    if (toMap)
     {
-        for (uint32_t i = 0; i < (uint32_t)toMap->subpassCount; ++i)
+        if (toMap->pSubpasses)
         {
-            handlemap_VkSubpassDescription(handlemap, (VkSubpassDescription*)(toMap->pSubpasses + i));
+            for (uint32_t i = 0; i < (uint32_t)toMap->subpassCount; ++i)
+            {
+                handlemap_VkSubpassDescription(handlemap, (VkSubpassDescription*)(toMap->pSubpasses + i));
+            }
         }
     }
-    if (toMap->pDependencies)
+    if (toMap)
     {
-        for (uint32_t i = 0; i < (uint32_t)toMap->dependencyCount; ++i)
+        if (toMap->pDependencies)
         {
-            handlemap_VkSubpassDependency(handlemap, (VkSubpassDependency*)(toMap->pDependencies + i));
+            for (uint32_t i = 0; i < (uint32_t)toMap->dependencyCount; ++i)
+            {
+                handlemap_VkSubpassDependency(handlemap, (VkSubpassDependency*)(toMap->pDependencies + i));
+            }
         }
     }
 }
@@ -1257,37 +1419,6 @@
     (void)toMap;
 }
 
-void handlemap_VkImageCopy(
-    VulkanHandleMapping* handlemap,
-    VkImageCopy* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-    handlemap_VkImageSubresourceLayers(handlemap, (VkImageSubresourceLayers*)(&toMap->srcSubresource));
-    handlemap_VkOffset3D(handlemap, (VkOffset3D*)(&toMap->srcOffset));
-    handlemap_VkImageSubresourceLayers(handlemap, (VkImageSubresourceLayers*)(&toMap->dstSubresource));
-    handlemap_VkOffset3D(handlemap, (VkOffset3D*)(&toMap->dstOffset));
-    handlemap_VkExtent3D(handlemap, (VkExtent3D*)(&toMap->extent));
-}
-
-void handlemap_VkImageBlit(
-    VulkanHandleMapping* handlemap,
-    VkImageBlit* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-    handlemap_VkImageSubresourceLayers(handlemap, (VkImageSubresourceLayers*)(&toMap->srcSubresource));
-    for (uint32_t i = 0; i < (uint32_t)2; ++i)
-    {
-        handlemap_VkOffset3D(handlemap, (VkOffset3D*)(toMap->srcOffsets + i));
-    }
-    handlemap_VkImageSubresourceLayers(handlemap, (VkImageSubresourceLayers*)(&toMap->dstSubresource));
-    for (uint32_t i = 0; i < (uint32_t)2; ++i)
-    {
-        handlemap_VkOffset3D(handlemap, (VkOffset3D*)(toMap->dstOffsets + i));
-    }
-}
-
 void handlemap_VkBufferImageCopy(
     VulkanHandleMapping* handlemap,
     VkBufferImageCopy* toMap)
@@ -1343,6 +1474,37 @@
     handlemap_VkRect2D(handlemap, (VkRect2D*)(&toMap->rect));
 }
 
+void handlemap_VkImageBlit(
+    VulkanHandleMapping* handlemap,
+    VkImageBlit* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    handlemap_VkImageSubresourceLayers(handlemap, (VkImageSubresourceLayers*)(&toMap->srcSubresource));
+    for (uint32_t i = 0; i < (uint32_t)2; ++i)
+    {
+        handlemap_VkOffset3D(handlemap, (VkOffset3D*)(toMap->srcOffsets + i));
+    }
+    handlemap_VkImageSubresourceLayers(handlemap, (VkImageSubresourceLayers*)(&toMap->dstSubresource));
+    for (uint32_t i = 0; i < (uint32_t)2; ++i)
+    {
+        handlemap_VkOffset3D(handlemap, (VkOffset3D*)(toMap->dstOffsets + i));
+    }
+}
+
+void handlemap_VkImageCopy(
+    VulkanHandleMapping* handlemap,
+    VkImageCopy* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    handlemap_VkImageSubresourceLayers(handlemap, (VkImageSubresourceLayers*)(&toMap->srcSubresource));
+    handlemap_VkOffset3D(handlemap, (VkOffset3D*)(&toMap->srcOffset));
+    handlemap_VkImageSubresourceLayers(handlemap, (VkImageSubresourceLayers*)(&toMap->dstSubresource));
+    handlemap_VkOffset3D(handlemap, (VkOffset3D*)(&toMap->dstOffset));
+    handlemap_VkExtent3D(handlemap, (VkExtent3D*)(&toMap->extent));
+}
+
 void handlemap_VkImageResolve(
     VulkanHandleMapping* handlemap,
     VkImageResolve* toMap)
@@ -1356,45 +1518,6 @@
     handlemap_VkExtent3D(handlemap, (VkExtent3D*)(&toMap->extent));
 }
 
-void handlemap_VkMemoryBarrier(
-    VulkanHandleMapping* handlemap,
-    VkMemoryBarrier* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-    if (toMap->pNext)
-    {
-        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
-    }
-}
-
-void handlemap_VkBufferMemoryBarrier(
-    VulkanHandleMapping* handlemap,
-    VkBufferMemoryBarrier* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-    if (toMap->pNext)
-    {
-        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
-    }
-    handlemap->mapHandles_VkBuffer((VkBuffer*)&toMap->buffer);
-}
-
-void handlemap_VkImageMemoryBarrier(
-    VulkanHandleMapping* handlemap,
-    VkImageMemoryBarrier* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-    if (toMap->pNext)
-    {
-        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
-    }
-    handlemap->mapHandles_VkImage((VkImage*)&toMap->image);
-    handlemap_VkImageSubresourceRange(handlemap, (VkImageSubresourceRange*)(&toMap->subresourceRange));
-}
-
 void handlemap_VkRenderPassBeginInfo(
     VulkanHandleMapping* handlemap,
     VkRenderPassBeginInfo* toMap)
@@ -1408,63 +1531,18 @@
     handlemap->mapHandles_VkRenderPass((VkRenderPass*)&toMap->renderPass);
     handlemap->mapHandles_VkFramebuffer((VkFramebuffer*)&toMap->framebuffer);
     handlemap_VkRect2D(handlemap, (VkRect2D*)(&toMap->renderArea));
-    if (toMap->pClearValues)
+    if (toMap)
     {
-        for (uint32_t i = 0; i < (uint32_t)toMap->clearValueCount; ++i)
+        if (toMap->pClearValues)
         {
-            handlemap_VkClearValue(handlemap, (VkClearValue*)(toMap->pClearValues + i));
+            for (uint32_t i = 0; i < (uint32_t)toMap->clearValueCount; ++i)
+            {
+                handlemap_VkClearValue(handlemap, (VkClearValue*)(toMap->pClearValues + i));
+            }
         }
     }
 }
 
-void handlemap_VkDispatchIndirectCommand(
-    VulkanHandleMapping* handlemap,
-    VkDispatchIndirectCommand* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-}
-
-void handlemap_VkDrawIndexedIndirectCommand(
-    VulkanHandleMapping* handlemap,
-    VkDrawIndexedIndirectCommand* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-}
-
-void handlemap_VkDrawIndirectCommand(
-    VulkanHandleMapping* handlemap,
-    VkDrawIndirectCommand* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-}
-
-void handlemap_VkBaseOutStructure(
-    VulkanHandleMapping* handlemap,
-    VkBaseOutStructure* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-    if (toMap->pNext)
-    {
-        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
-    }
-}
-
-void handlemap_VkBaseInStructure(
-    VulkanHandleMapping* handlemap,
-    VkBaseInStructure* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-    if (toMap->pNext)
-    {
-        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
-    }
-}
-
 #endif
 #ifdef VK_VERSION_1_1
 void handlemap_VkPhysicalDeviceSubgroupProperties(
@@ -1567,11 +1645,14 @@
     {
         handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
     }
-    if (toMap->pDeviceRenderAreas)
+    if (toMap)
     {
-        for (uint32_t i = 0; i < (uint32_t)toMap->deviceRenderAreaCount; ++i)
+        if (toMap->pDeviceRenderAreas)
         {
-            handlemap_VkRect2D(handlemap, (VkRect2D*)(toMap->pDeviceRenderAreas + i));
+            for (uint32_t i = 0; i < (uint32_t)toMap->deviceRenderAreaCount; ++i)
+            {
+                handlemap_VkRect2D(handlemap, (VkRect2D*)(toMap->pDeviceRenderAreas + i));
+            }
         }
     }
 }
@@ -1634,11 +1715,14 @@
     {
         handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
     }
-    if (toMap->pSplitInstanceBindRegions)
+    if (toMap)
     {
-        for (uint32_t i = 0; i < (uint32_t)toMap->splitInstanceBindRegionCount; ++i)
+        if (toMap->pSplitInstanceBindRegions)
         {
-            handlemap_VkRect2D(handlemap, (VkRect2D*)(toMap->pSplitInstanceBindRegions + i));
+            for (uint32_t i = 0; i < (uint32_t)toMap->splitInstanceBindRegionCount; ++i)
+            {
+                handlemap_VkRect2D(handlemap, (VkRect2D*)(toMap->pSplitInstanceBindRegions + i));
+            }
         }
     }
 }
@@ -1882,11 +1966,14 @@
     {
         handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
     }
-    if (toMap->pAspectReferences)
+    if (toMap)
     {
-        for (uint32_t i = 0; i < (uint32_t)toMap->aspectReferenceCount; ++i)
+        if (toMap->pAspectReferences)
         {
-            handlemap_VkInputAttachmentAspectReference(handlemap, (VkInputAttachmentAspectReference*)(toMap->pAspectReferences + i));
+            for (uint32_t i = 0; i < (uint32_t)toMap->aspectReferenceCount; ++i)
+            {
+                handlemap_VkInputAttachmentAspectReference(handlemap, (VkInputAttachmentAspectReference*)(toMap->pAspectReferences + i));
+            }
         }
     }
 }
@@ -1951,9 +2038,9 @@
     }
 }
 
-void handlemap_VkPhysicalDeviceVariablePointerFeatures(
+void handlemap_VkPhysicalDeviceVariablePointersFeatures(
     VulkanHandleMapping* handlemap,
-    VkPhysicalDeviceVariablePointerFeatures* toMap)
+    VkPhysicalDeviceVariablePointersFeatures* toMap)
 {
     (void)handlemap;
     (void)toMap;
@@ -2103,11 +2190,14 @@
     {
         handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
     }
-    if (toMap->pDescriptorUpdateEntries)
+    if (toMap)
     {
-        for (uint32_t i = 0; i < (uint32_t)toMap->descriptorUpdateEntryCount; ++i)
+        if (toMap->pDescriptorUpdateEntries)
         {
-            handlemap_VkDescriptorUpdateTemplateEntry(handlemap, (VkDescriptorUpdateTemplateEntry*)(toMap->pDescriptorUpdateEntries + i));
+            for (uint32_t i = 0; i < (uint32_t)toMap->descriptorUpdateEntryCount; ++i)
+            {
+                handlemap_VkDescriptorUpdateTemplateEntry(handlemap, (VkDescriptorUpdateTemplateEntry*)(toMap->pDescriptorUpdateEntries + i));
+            }
         }
     }
     handlemap->mapHandles_VkDescriptorSetLayout((VkDescriptorSetLayout*)&toMap->descriptorSetLayout);
@@ -2316,9 +2406,9 @@
     }
 }
 
-void handlemap_VkPhysicalDeviceShaderDrawParameterFeatures(
+void handlemap_VkPhysicalDeviceShaderDrawParametersFeatures(
     VulkanHandleMapping* handlemap,
-    VkPhysicalDeviceShaderDrawParameterFeatures* toMap)
+    VkPhysicalDeviceShaderDrawParametersFeatures* toMap)
 {
     (void)handlemap;
     (void)toMap;
@@ -2329,6 +2419,707 @@
 }
 
 #endif
+#ifdef VK_VERSION_1_2
+void handlemap_VkPhysicalDeviceVulkan11Features(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceVulkan11Features* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPhysicalDeviceVulkan11Properties(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceVulkan11Properties* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPhysicalDeviceVulkan12Features(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceVulkan12Features* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkConformanceVersion(
+    VulkanHandleMapping* handlemap,
+    VkConformanceVersion* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+}
+
+void handlemap_VkPhysicalDeviceVulkan12Properties(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceVulkan12Properties* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    handlemap_VkConformanceVersion(handlemap, (VkConformanceVersion*)(&toMap->conformanceVersion));
+}
+
+void handlemap_VkImageFormatListCreateInfo(
+    VulkanHandleMapping* handlemap,
+    VkImageFormatListCreateInfo* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkAttachmentDescription2(
+    VulkanHandleMapping* handlemap,
+    VkAttachmentDescription2* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkAttachmentReference2(
+    VulkanHandleMapping* handlemap,
+    VkAttachmentReference2* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkSubpassDescription2(
+    VulkanHandleMapping* handlemap,
+    VkSubpassDescription2* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    if (toMap)
+    {
+        if (toMap->pInputAttachments)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toMap->inputAttachmentCount; ++i)
+            {
+                handlemap_VkAttachmentReference2(handlemap, (VkAttachmentReference2*)(toMap->pInputAttachments + i));
+            }
+        }
+    }
+    if (toMap)
+    {
+        if (toMap->pColorAttachments)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toMap->colorAttachmentCount; ++i)
+            {
+                handlemap_VkAttachmentReference2(handlemap, (VkAttachmentReference2*)(toMap->pColorAttachments + i));
+            }
+        }
+    }
+    if (toMap)
+    {
+        if (toMap->pResolveAttachments)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toMap->colorAttachmentCount; ++i)
+            {
+                handlemap_VkAttachmentReference2(handlemap, (VkAttachmentReference2*)(toMap->pResolveAttachments + i));
+            }
+        }
+    }
+    if (toMap->pDepthStencilAttachment)
+    {
+        handlemap_VkAttachmentReference2(handlemap, (VkAttachmentReference2*)(toMap->pDepthStencilAttachment));
+    }
+}
+
+void handlemap_VkSubpassDependency2(
+    VulkanHandleMapping* handlemap,
+    VkSubpassDependency2* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkRenderPassCreateInfo2(
+    VulkanHandleMapping* handlemap,
+    VkRenderPassCreateInfo2* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    if (toMap)
+    {
+        if (toMap->pAttachments)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toMap->attachmentCount; ++i)
+            {
+                handlemap_VkAttachmentDescription2(handlemap, (VkAttachmentDescription2*)(toMap->pAttachments + i));
+            }
+        }
+    }
+    if (toMap)
+    {
+        if (toMap->pSubpasses)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toMap->subpassCount; ++i)
+            {
+                handlemap_VkSubpassDescription2(handlemap, (VkSubpassDescription2*)(toMap->pSubpasses + i));
+            }
+        }
+    }
+    if (toMap)
+    {
+        if (toMap->pDependencies)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toMap->dependencyCount; ++i)
+            {
+                handlemap_VkSubpassDependency2(handlemap, (VkSubpassDependency2*)(toMap->pDependencies + i));
+            }
+        }
+    }
+}
+
+void handlemap_VkSubpassBeginInfo(
+    VulkanHandleMapping* handlemap,
+    VkSubpassBeginInfo* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkSubpassEndInfo(
+    VulkanHandleMapping* handlemap,
+    VkSubpassEndInfo* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPhysicalDevice8BitStorageFeatures(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDevice8BitStorageFeatures* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPhysicalDeviceDriverProperties(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceDriverProperties* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    handlemap_VkConformanceVersion(handlemap, (VkConformanceVersion*)(&toMap->conformanceVersion));
+}
+
+void handlemap_VkPhysicalDeviceShaderAtomicInt64Features(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceShaderAtomicInt64Features* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPhysicalDeviceShaderFloat16Int8Features(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceShaderFloat16Int8Features* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPhysicalDeviceFloatControlsProperties(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceFloatControlsProperties* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkDescriptorSetLayoutBindingFlagsCreateInfo(
+    VulkanHandleMapping* handlemap,
+    VkDescriptorSetLayoutBindingFlagsCreateInfo* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPhysicalDeviceDescriptorIndexingFeatures(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceDescriptorIndexingFeatures* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPhysicalDeviceDescriptorIndexingProperties(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceDescriptorIndexingProperties* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkDescriptorSetVariableDescriptorCountAllocateInfo(
+    VulkanHandleMapping* handlemap,
+    VkDescriptorSetVariableDescriptorCountAllocateInfo* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkDescriptorSetVariableDescriptorCountLayoutSupport(
+    VulkanHandleMapping* handlemap,
+    VkDescriptorSetVariableDescriptorCountLayoutSupport* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkSubpassDescriptionDepthStencilResolve(
+    VulkanHandleMapping* handlemap,
+    VkSubpassDescriptionDepthStencilResolve* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    if (toMap->pDepthStencilResolveAttachment)
+    {
+        handlemap_VkAttachmentReference2(handlemap, (VkAttachmentReference2*)(toMap->pDepthStencilResolveAttachment));
+    }
+}
+
+void handlemap_VkPhysicalDeviceDepthStencilResolveProperties(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceDepthStencilResolveProperties* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPhysicalDeviceScalarBlockLayoutFeatures(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceScalarBlockLayoutFeatures* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkImageStencilUsageCreateInfo(
+    VulkanHandleMapping* handlemap,
+    VkImageStencilUsageCreateInfo* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkSamplerReductionModeCreateInfo(
+    VulkanHandleMapping* handlemap,
+    VkSamplerReductionModeCreateInfo* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPhysicalDeviceSamplerFilterMinmaxProperties(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceSamplerFilterMinmaxProperties* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPhysicalDeviceVulkanMemoryModelFeatures(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceVulkanMemoryModelFeatures* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPhysicalDeviceImagelessFramebufferFeatures(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceImagelessFramebufferFeatures* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkFramebufferAttachmentImageInfo(
+    VulkanHandleMapping* handlemap,
+    VkFramebufferAttachmentImageInfo* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkFramebufferAttachmentsCreateInfo(
+    VulkanHandleMapping* handlemap,
+    VkFramebufferAttachmentsCreateInfo* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    if (toMap)
+    {
+        if (toMap->pAttachmentImageInfos)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toMap->attachmentImageInfoCount; ++i)
+            {
+                handlemap_VkFramebufferAttachmentImageInfo(handlemap, (VkFramebufferAttachmentImageInfo*)(toMap->pAttachmentImageInfos + i));
+            }
+        }
+    }
+}
+
+void handlemap_VkRenderPassAttachmentBeginInfo(
+    VulkanHandleMapping* handlemap,
+    VkRenderPassAttachmentBeginInfo* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    if (toMap->pAttachments)
+    {
+        handlemap->mapHandles_VkImageView((VkImageView*)toMap->pAttachments, toMap->attachmentCount);
+    }
+}
+
+void handlemap_VkPhysicalDeviceUniformBufferStandardLayoutFeatures(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceUniformBufferStandardLayoutFeatures* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkAttachmentReferenceStencilLayout(
+    VulkanHandleMapping* handlemap,
+    VkAttachmentReferenceStencilLayout* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkAttachmentDescriptionStencilLayout(
+    VulkanHandleMapping* handlemap,
+    VkAttachmentDescriptionStencilLayout* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPhysicalDeviceHostQueryResetFeatures(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceHostQueryResetFeatures* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPhysicalDeviceTimelineSemaphoreFeatures(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceTimelineSemaphoreFeatures* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPhysicalDeviceTimelineSemaphoreProperties(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceTimelineSemaphoreProperties* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkSemaphoreTypeCreateInfo(
+    VulkanHandleMapping* handlemap,
+    VkSemaphoreTypeCreateInfo* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkTimelineSemaphoreSubmitInfo(
+    VulkanHandleMapping* handlemap,
+    VkTimelineSemaphoreSubmitInfo* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkSemaphoreWaitInfo(
+    VulkanHandleMapping* handlemap,
+    VkSemaphoreWaitInfo* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    if (toMap->pSemaphores)
+    {
+        handlemap->mapHandles_VkSemaphore((VkSemaphore*)toMap->pSemaphores, toMap->semaphoreCount);
+    }
+}
+
+void handlemap_VkSemaphoreSignalInfo(
+    VulkanHandleMapping* handlemap,
+    VkSemaphoreSignalInfo* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    handlemap->mapHandles_VkSemaphore((VkSemaphore*)&toMap->semaphore);
+}
+
+void handlemap_VkPhysicalDeviceBufferDeviceAddressFeatures(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceBufferDeviceAddressFeatures* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkBufferDeviceAddressInfo(
+    VulkanHandleMapping* handlemap,
+    VkBufferDeviceAddressInfo* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    handlemap->mapHandles_VkBuffer((VkBuffer*)&toMap->buffer);
+}
+
+void handlemap_VkBufferOpaqueCaptureAddressCreateInfo(
+    VulkanHandleMapping* handlemap,
+    VkBufferOpaqueCaptureAddressCreateInfo* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkMemoryOpaqueCaptureAddressAllocateInfo(
+    VulkanHandleMapping* handlemap,
+    VkMemoryOpaqueCaptureAddressAllocateInfo* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkDeviceMemoryOpaqueCaptureAddressInfo(
+    VulkanHandleMapping* handlemap,
+    VkDeviceMemoryOpaqueCaptureAddressInfo* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    handlemap->mapHandles_VkDeviceMemory((VkDeviceMemory*)&toMap->memory);
+}
+
+#endif
 #ifdef VK_KHR_surface
 void handlemap_VkSurfaceCapabilitiesKHR(
     VulkanHandleMapping* handlemap,
@@ -2465,17 +3256,6 @@
 
 #endif
 #ifdef VK_KHR_display
-void handlemap_VkDisplayPropertiesKHR(
-    VulkanHandleMapping* handlemap,
-    VkDisplayPropertiesKHR* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-    handlemap->mapHandles_VkDisplayKHR((VkDisplayKHR*)&toMap->display);
-    handlemap_VkExtent2D(handlemap, (VkExtent2D*)(&toMap->physicalDimensions));
-    handlemap_VkExtent2D(handlemap, (VkExtent2D*)(&toMap->physicalResolution));
-}
-
 void handlemap_VkDisplayModeParametersKHR(
     VulkanHandleMapping* handlemap,
     VkDisplayModeParametersKHR* toMap)
@@ -2485,16 +3265,6 @@
     handlemap_VkExtent2D(handlemap, (VkExtent2D*)(&toMap->visibleRegion));
 }
 
-void handlemap_VkDisplayModePropertiesKHR(
-    VulkanHandleMapping* handlemap,
-    VkDisplayModePropertiesKHR* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-    handlemap->mapHandles_VkDisplayModeKHR((VkDisplayModeKHR*)&toMap->displayMode);
-    handlemap_VkDisplayModeParametersKHR(handlemap, (VkDisplayModeParametersKHR*)(&toMap->parameters));
-}
-
 void handlemap_VkDisplayModeCreateInfoKHR(
     VulkanHandleMapping* handlemap,
     VkDisplayModeCreateInfoKHR* toMap)
@@ -2508,6 +3278,16 @@
     handlemap_VkDisplayModeParametersKHR(handlemap, (VkDisplayModeParametersKHR*)(&toMap->parameters));
 }
 
+void handlemap_VkDisplayModePropertiesKHR(
+    VulkanHandleMapping* handlemap,
+    VkDisplayModePropertiesKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    handlemap->mapHandles_VkDisplayModeKHR((VkDisplayModeKHR*)&toMap->displayMode);
+    handlemap_VkDisplayModeParametersKHR(handlemap, (VkDisplayModeParametersKHR*)(&toMap->parameters));
+}
+
 void handlemap_VkDisplayPlaneCapabilitiesKHR(
     VulkanHandleMapping* handlemap,
     VkDisplayPlaneCapabilitiesKHR* toMap)
@@ -2533,6 +3313,17 @@
     handlemap->mapHandles_VkDisplayKHR((VkDisplayKHR*)&toMap->currentDisplay);
 }
 
+void handlemap_VkDisplayPropertiesKHR(
+    VulkanHandleMapping* handlemap,
+    VkDisplayPropertiesKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    handlemap->mapHandles_VkDisplayKHR((VkDisplayKHR*)&toMap->display);
+    handlemap_VkExtent2D(handlemap, (VkExtent2D*)(&toMap->physicalDimensions));
+    handlemap_VkExtent2D(handlemap, (VkExtent2D*)(&toMap->physicalResolution));
+}
+
 void handlemap_VkDisplaySurfaceCreateInfoKHR(
     VulkanHandleMapping* handlemap,
     VkDisplaySurfaceCreateInfoKHR* toMap)
@@ -2606,20 +3397,6 @@
 }
 
 #endif
-#ifdef VK_KHR_mir_surface
-void handlemap_VkMirSurfaceCreateInfoKHR(
-    VulkanHandleMapping* handlemap,
-    VkMirSurfaceCreateInfoKHR* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-    if (toMap->pNext)
-    {
-        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
-    }
-}
-
-#endif
 #ifdef VK_KHR_android_surface
 void handlemap_VkAndroidSurfaceCreateInfoKHR(
     VulkanHandleMapping* handlemap,
@@ -2876,6 +3653,8 @@
 }
 
 #endif
+#ifdef VK_KHR_shader_float16_int8
+#endif
 #ifdef VK_KHR_16bit_storage
 #endif
 #ifdef VK_KHR_incremental_present
@@ -2895,11 +3674,14 @@
 {
     (void)handlemap;
     (void)toMap;
-    if (toMap->pRectangles)
+    if (toMap)
     {
-        for (uint32_t i = 0; i < (uint32_t)toMap->rectangleCount; ++i)
+        if (toMap->pRectangles)
         {
-            handlemap_VkRectLayerKHR(handlemap, (VkRectLayerKHR*)(toMap->pRectangles + i));
+            for (uint32_t i = 0; i < (uint32_t)toMap->rectangleCount; ++i)
+            {
+                handlemap_VkRectLayerKHR(handlemap, (VkRectLayerKHR*)(toMap->pRectangles + i));
+            }
         }
     }
 }
@@ -2914,11 +3696,14 @@
     {
         handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
     }
-    if (toMap->pRegions)
+    if (toMap)
     {
-        for (uint32_t i = 0; i < (uint32_t)toMap->swapchainCount; ++i)
+        if (toMap->pRegions)
         {
-            handlemap_VkPresentRegionKHR(handlemap, (VkPresentRegionKHR*)(toMap->pRegions + i));
+            for (uint32_t i = 0; i < (uint32_t)toMap->swapchainCount; ++i)
+            {
+                handlemap_VkPresentRegionKHR(handlemap, (VkPresentRegionKHR*)(toMap->pRegions + i));
+            }
         }
     }
 }
@@ -2926,137 +3711,9 @@
 #endif
 #ifdef VK_KHR_descriptor_update_template
 #endif
+#ifdef VK_KHR_imageless_framebuffer
+#endif
 #ifdef VK_KHR_create_renderpass2
-void handlemap_VkAttachmentDescription2KHR(
-    VulkanHandleMapping* handlemap,
-    VkAttachmentDescription2KHR* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-    if (toMap->pNext)
-    {
-        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
-    }
-}
-
-void handlemap_VkAttachmentReference2KHR(
-    VulkanHandleMapping* handlemap,
-    VkAttachmentReference2KHR* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-    if (toMap->pNext)
-    {
-        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
-    }
-}
-
-void handlemap_VkSubpassDescription2KHR(
-    VulkanHandleMapping* handlemap,
-    VkSubpassDescription2KHR* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-    if (toMap->pNext)
-    {
-        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
-    }
-    if (toMap->pInputAttachments)
-    {
-        for (uint32_t i = 0; i < (uint32_t)toMap->inputAttachmentCount; ++i)
-        {
-            handlemap_VkAttachmentReference2KHR(handlemap, (VkAttachmentReference2KHR*)(toMap->pInputAttachments + i));
-        }
-    }
-    if (toMap->pColorAttachments)
-    {
-        for (uint32_t i = 0; i < (uint32_t)toMap->colorAttachmentCount; ++i)
-        {
-            handlemap_VkAttachmentReference2KHR(handlemap, (VkAttachmentReference2KHR*)(toMap->pColorAttachments + i));
-        }
-    }
-    if (toMap->pResolveAttachments)
-    {
-        for (uint32_t i = 0; i < (uint32_t)toMap->colorAttachmentCount; ++i)
-        {
-            handlemap_VkAttachmentReference2KHR(handlemap, (VkAttachmentReference2KHR*)(toMap->pResolveAttachments + i));
-        }
-    }
-    if (toMap->pDepthStencilAttachment)
-    {
-        handlemap_VkAttachmentReference2KHR(handlemap, (VkAttachmentReference2KHR*)(toMap->pDepthStencilAttachment));
-    }
-}
-
-void handlemap_VkSubpassDependency2KHR(
-    VulkanHandleMapping* handlemap,
-    VkSubpassDependency2KHR* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-    if (toMap->pNext)
-    {
-        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
-    }
-}
-
-void handlemap_VkRenderPassCreateInfo2KHR(
-    VulkanHandleMapping* handlemap,
-    VkRenderPassCreateInfo2KHR* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-    if (toMap->pNext)
-    {
-        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
-    }
-    if (toMap->pAttachments)
-    {
-        for (uint32_t i = 0; i < (uint32_t)toMap->attachmentCount; ++i)
-        {
-            handlemap_VkAttachmentDescription2KHR(handlemap, (VkAttachmentDescription2KHR*)(toMap->pAttachments + i));
-        }
-    }
-    if (toMap->pSubpasses)
-    {
-        for (uint32_t i = 0; i < (uint32_t)toMap->subpassCount; ++i)
-        {
-            handlemap_VkSubpassDescription2KHR(handlemap, (VkSubpassDescription2KHR*)(toMap->pSubpasses + i));
-        }
-    }
-    if (toMap->pDependencies)
-    {
-        for (uint32_t i = 0; i < (uint32_t)toMap->dependencyCount; ++i)
-        {
-            handlemap_VkSubpassDependency2KHR(handlemap, (VkSubpassDependency2KHR*)(toMap->pDependencies + i));
-        }
-    }
-}
-
-void handlemap_VkSubpassBeginInfoKHR(
-    VulkanHandleMapping* handlemap,
-    VkSubpassBeginInfoKHR* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-    if (toMap->pNext)
-    {
-        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
-    }
-}
-
-void handlemap_VkSubpassEndInfoKHR(
-    VulkanHandleMapping* handlemap,
-    VkSubpassEndInfoKHR* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-    if (toMap->pNext)
-    {
-        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
-    }
-}
-
 #endif
 #ifdef VK_KHR_shared_presentable_image
 void handlemap_VkSharedPresentSurfaceCapabilitiesKHR(
@@ -3144,6 +3801,100 @@
 }
 
 #endif
+#ifdef VK_KHR_performance_query
+void handlemap_VkPhysicalDevicePerformanceQueryFeaturesKHR(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDevicePerformanceQueryFeaturesKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPhysicalDevicePerformanceQueryPropertiesKHR(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDevicePerformanceQueryPropertiesKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPerformanceCounterKHR(
+    VulkanHandleMapping* handlemap,
+    VkPerformanceCounterKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPerformanceCounterDescriptionKHR(
+    VulkanHandleMapping* handlemap,
+    VkPerformanceCounterDescriptionKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkQueryPoolPerformanceCreateInfoKHR(
+    VulkanHandleMapping* handlemap,
+    VkQueryPoolPerformanceCreateInfoKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPerformanceCounterResultKHR(
+    VulkanHandleMapping* handlemap,
+    VkPerformanceCounterResultKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+}
+
+void handlemap_VkAcquireProfilingLockInfoKHR(
+    VulkanHandleMapping* handlemap,
+    VkAcquireProfilingLockInfoKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPerformanceQuerySubmitInfoKHR(
+    VulkanHandleMapping* handlemap,
+    VkPerformanceQuerySubmitInfoKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
 #ifdef VK_KHR_maintenance2
 #endif
 #ifdef VK_KHR_get_surface_capabilities2
@@ -3265,9 +4016,27 @@
 #ifdef VK_KHR_get_memory_requirements2
 #endif
 #ifdef VK_KHR_image_format_list
-void handlemap_VkImageFormatListCreateInfoKHR(
+#endif
+#ifdef VK_KHR_sampler_ycbcr_conversion
+#endif
+#ifdef VK_KHR_bind_memory2
+#endif
+#ifdef VK_KHR_portability_subset
+void handlemap_VkPhysicalDevicePortabilitySubsetFeaturesKHR(
     VulkanHandleMapping* handlemap,
-    VkImageFormatListCreateInfoKHR* toMap)
+    VkPhysicalDevicePortabilitySubsetFeaturesKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPhysicalDevicePortabilitySubsetPropertiesKHR(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDevicePortabilitySubsetPropertiesKHR* toMap)
 {
     (void)handlemap;
     (void)toMap;
@@ -3278,18 +4047,20 @@
 }
 
 #endif
-#ifdef VK_KHR_sampler_ycbcr_conversion
-#endif
-#ifdef VK_KHR_bind_memory2
-#endif
 #ifdef VK_KHR_maintenance3
 #endif
 #ifdef VK_KHR_draw_indirect_count
 #endif
+#ifdef VK_KHR_shader_subgroup_extended_types
+#endif
 #ifdef VK_KHR_8bit_storage
-void handlemap_VkPhysicalDevice8BitStorageFeaturesKHR(
+#endif
+#ifdef VK_KHR_shader_atomic_int64
+#endif
+#ifdef VK_KHR_shader_clock
+void handlemap_VkPhysicalDeviceShaderClockFeaturesKHR(
     VulkanHandleMapping* handlemap,
-    VkPhysicalDevice8BitStorageFeaturesKHR* toMap)
+    VkPhysicalDeviceShaderClockFeaturesKHR* toMap)
 {
     (void)handlemap;
     (void)toMap;
@@ -3300,6 +4071,462 @@
 }
 
 #endif
+#ifdef VK_KHR_driver_properties
+#endif
+#ifdef VK_KHR_shader_float_controls
+#endif
+#ifdef VK_KHR_depth_stencil_resolve
+#endif
+#ifdef VK_KHR_swapchain_mutable_format
+#endif
+#ifdef VK_KHR_timeline_semaphore
+#endif
+#ifdef VK_KHR_vulkan_memory_model
+#endif
+#ifdef VK_KHR_shader_terminate_invocation
+void handlemap_VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
+#ifdef VK_KHR_fragment_shading_rate
+void handlemap_VkFragmentShadingRateAttachmentInfoKHR(
+    VulkanHandleMapping* handlemap,
+    VkFragmentShadingRateAttachmentInfoKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    if (toMap->pFragmentShadingRateAttachment)
+    {
+        handlemap_VkAttachmentReference2(handlemap, (VkAttachmentReference2*)(toMap->pFragmentShadingRateAttachment));
+    }
+    handlemap_VkExtent2D(handlemap, (VkExtent2D*)(&toMap->shadingRateAttachmentTexelSize));
+}
+
+void handlemap_VkPipelineFragmentShadingRateStateCreateInfoKHR(
+    VulkanHandleMapping* handlemap,
+    VkPipelineFragmentShadingRateStateCreateInfoKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    handlemap_VkExtent2D(handlemap, (VkExtent2D*)(&toMap->fragmentSize));
+}
+
+void handlemap_VkPhysicalDeviceFragmentShadingRateFeaturesKHR(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceFragmentShadingRateFeaturesKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPhysicalDeviceFragmentShadingRatePropertiesKHR(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceFragmentShadingRatePropertiesKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    handlemap_VkExtent2D(handlemap, (VkExtent2D*)(&toMap->minFragmentShadingRateAttachmentTexelSize));
+    handlemap_VkExtent2D(handlemap, (VkExtent2D*)(&toMap->maxFragmentShadingRateAttachmentTexelSize));
+    handlemap_VkExtent2D(handlemap, (VkExtent2D*)(&toMap->maxFragmentSize));
+}
+
+void handlemap_VkPhysicalDeviceFragmentShadingRateKHR(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceFragmentShadingRateKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    handlemap_VkExtent2D(handlemap, (VkExtent2D*)(&toMap->fragmentSize));
+}
+
+#endif
+#ifdef VK_KHR_spirv_1_4
+#endif
+#ifdef VK_KHR_surface_protected_capabilities
+void handlemap_VkSurfaceProtectedCapabilitiesKHR(
+    VulkanHandleMapping* handlemap,
+    VkSurfaceProtectedCapabilitiesKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
+#ifdef VK_KHR_separate_depth_stencil_layouts
+#endif
+#ifdef VK_KHR_uniform_buffer_standard_layout
+#endif
+#ifdef VK_KHR_buffer_device_address
+#endif
+#ifdef VK_KHR_deferred_host_operations
+#endif
+#ifdef VK_KHR_pipeline_executable_properties
+void handlemap_VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPipelineInfoKHR(
+    VulkanHandleMapping* handlemap,
+    VkPipelineInfoKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    handlemap->mapHandles_VkPipeline((VkPipeline*)&toMap->pipeline);
+}
+
+void handlemap_VkPipelineExecutablePropertiesKHR(
+    VulkanHandleMapping* handlemap,
+    VkPipelineExecutablePropertiesKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPipelineExecutableInfoKHR(
+    VulkanHandleMapping* handlemap,
+    VkPipelineExecutableInfoKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    handlemap->mapHandles_VkPipeline((VkPipeline*)&toMap->pipeline);
+}
+
+void handlemap_VkPipelineExecutableStatisticValueKHR(
+    VulkanHandleMapping* handlemap,
+    VkPipelineExecutableStatisticValueKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+}
+
+void handlemap_VkPipelineExecutableStatisticKHR(
+    VulkanHandleMapping* handlemap,
+    VkPipelineExecutableStatisticKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    handlemap_VkPipelineExecutableStatisticValueKHR(handlemap, (VkPipelineExecutableStatisticValueKHR*)(&toMap->value));
+}
+
+void handlemap_VkPipelineExecutableInternalRepresentationKHR(
+    VulkanHandleMapping* handlemap,
+    VkPipelineExecutableInternalRepresentationKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
+#ifdef VK_KHR_pipeline_library
+void handlemap_VkPipelineLibraryCreateInfoKHR(
+    VulkanHandleMapping* handlemap,
+    VkPipelineLibraryCreateInfoKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    if (toMap->pLibraries)
+    {
+        handlemap->mapHandles_VkPipeline((VkPipeline*)toMap->pLibraries, toMap->libraryCount);
+    }
+}
+
+#endif
+#ifdef VK_KHR_shader_non_semantic_info
+#endif
+#ifdef VK_KHR_copy_commands2
+void handlemap_VkBufferCopy2KHR(
+    VulkanHandleMapping* handlemap,
+    VkBufferCopy2KHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkCopyBufferInfo2KHR(
+    VulkanHandleMapping* handlemap,
+    VkCopyBufferInfo2KHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    handlemap->mapHandles_VkBuffer((VkBuffer*)&toMap->srcBuffer);
+    handlemap->mapHandles_VkBuffer((VkBuffer*)&toMap->dstBuffer);
+    if (toMap)
+    {
+        if (toMap->pRegions)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toMap->regionCount; ++i)
+            {
+                handlemap_VkBufferCopy2KHR(handlemap, (VkBufferCopy2KHR*)(toMap->pRegions + i));
+            }
+        }
+    }
+}
+
+void handlemap_VkImageCopy2KHR(
+    VulkanHandleMapping* handlemap,
+    VkImageCopy2KHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    handlemap_VkImageSubresourceLayers(handlemap, (VkImageSubresourceLayers*)(&toMap->srcSubresource));
+    handlemap_VkOffset3D(handlemap, (VkOffset3D*)(&toMap->srcOffset));
+    handlemap_VkImageSubresourceLayers(handlemap, (VkImageSubresourceLayers*)(&toMap->dstSubresource));
+    handlemap_VkOffset3D(handlemap, (VkOffset3D*)(&toMap->dstOffset));
+    handlemap_VkExtent3D(handlemap, (VkExtent3D*)(&toMap->extent));
+}
+
+void handlemap_VkCopyImageInfo2KHR(
+    VulkanHandleMapping* handlemap,
+    VkCopyImageInfo2KHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    handlemap->mapHandles_VkImage((VkImage*)&toMap->srcImage);
+    handlemap->mapHandles_VkImage((VkImage*)&toMap->dstImage);
+    if (toMap)
+    {
+        if (toMap->pRegions)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toMap->regionCount; ++i)
+            {
+                handlemap_VkImageCopy2KHR(handlemap, (VkImageCopy2KHR*)(toMap->pRegions + i));
+            }
+        }
+    }
+}
+
+void handlemap_VkBufferImageCopy2KHR(
+    VulkanHandleMapping* handlemap,
+    VkBufferImageCopy2KHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    handlemap_VkImageSubresourceLayers(handlemap, (VkImageSubresourceLayers*)(&toMap->imageSubresource));
+    handlemap_VkOffset3D(handlemap, (VkOffset3D*)(&toMap->imageOffset));
+    handlemap_VkExtent3D(handlemap, (VkExtent3D*)(&toMap->imageExtent));
+}
+
+void handlemap_VkCopyBufferToImageInfo2KHR(
+    VulkanHandleMapping* handlemap,
+    VkCopyBufferToImageInfo2KHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    handlemap->mapHandles_VkBuffer((VkBuffer*)&toMap->srcBuffer);
+    handlemap->mapHandles_VkImage((VkImage*)&toMap->dstImage);
+    if (toMap)
+    {
+        if (toMap->pRegions)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toMap->regionCount; ++i)
+            {
+                handlemap_VkBufferImageCopy2KHR(handlemap, (VkBufferImageCopy2KHR*)(toMap->pRegions + i));
+            }
+        }
+    }
+}
+
+void handlemap_VkCopyImageToBufferInfo2KHR(
+    VulkanHandleMapping* handlemap,
+    VkCopyImageToBufferInfo2KHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    handlemap->mapHandles_VkImage((VkImage*)&toMap->srcImage);
+    handlemap->mapHandles_VkBuffer((VkBuffer*)&toMap->dstBuffer);
+    if (toMap)
+    {
+        if (toMap->pRegions)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toMap->regionCount; ++i)
+            {
+                handlemap_VkBufferImageCopy2KHR(handlemap, (VkBufferImageCopy2KHR*)(toMap->pRegions + i));
+            }
+        }
+    }
+}
+
+void handlemap_VkImageBlit2KHR(
+    VulkanHandleMapping* handlemap,
+    VkImageBlit2KHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    handlemap_VkImageSubresourceLayers(handlemap, (VkImageSubresourceLayers*)(&toMap->srcSubresource));
+    for (uint32_t i = 0; i < (uint32_t)2; ++i)
+    {
+        handlemap_VkOffset3D(handlemap, (VkOffset3D*)(toMap->srcOffsets + i));
+    }
+    handlemap_VkImageSubresourceLayers(handlemap, (VkImageSubresourceLayers*)(&toMap->dstSubresource));
+    for (uint32_t i = 0; i < (uint32_t)2; ++i)
+    {
+        handlemap_VkOffset3D(handlemap, (VkOffset3D*)(toMap->dstOffsets + i));
+    }
+}
+
+void handlemap_VkBlitImageInfo2KHR(
+    VulkanHandleMapping* handlemap,
+    VkBlitImageInfo2KHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    handlemap->mapHandles_VkImage((VkImage*)&toMap->srcImage);
+    handlemap->mapHandles_VkImage((VkImage*)&toMap->dstImage);
+    if (toMap)
+    {
+        if (toMap->pRegions)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toMap->regionCount; ++i)
+            {
+                handlemap_VkImageBlit2KHR(handlemap, (VkImageBlit2KHR*)(toMap->pRegions + i));
+            }
+        }
+    }
+}
+
+void handlemap_VkImageResolve2KHR(
+    VulkanHandleMapping* handlemap,
+    VkImageResolve2KHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    handlemap_VkImageSubresourceLayers(handlemap, (VkImageSubresourceLayers*)(&toMap->srcSubresource));
+    handlemap_VkOffset3D(handlemap, (VkOffset3D*)(&toMap->srcOffset));
+    handlemap_VkImageSubresourceLayers(handlemap, (VkImageSubresourceLayers*)(&toMap->dstSubresource));
+    handlemap_VkOffset3D(handlemap, (VkOffset3D*)(&toMap->dstOffset));
+    handlemap_VkExtent3D(handlemap, (VkExtent3D*)(&toMap->extent));
+}
+
+void handlemap_VkResolveImageInfo2KHR(
+    VulkanHandleMapping* handlemap,
+    VkResolveImageInfo2KHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    handlemap->mapHandles_VkImage((VkImage*)&toMap->srcImage);
+    handlemap->mapHandles_VkImage((VkImage*)&toMap->dstImage);
+    if (toMap)
+    {
+        if (toMap->pRegions)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toMap->regionCount; ++i)
+            {
+                handlemap_VkImageResolve2KHR(handlemap, (VkImageResolve2KHR*)(toMap->pRegions + i));
+            }
+        }
+    }
+}
+
+#endif
 #ifdef VK_ANDROID_native_buffer
 void handlemap_VkNativeBufferANDROID(
     VulkanHandleMapping* handlemap,
@@ -3432,6 +4659,72 @@
 }
 
 #endif
+#ifdef VK_EXT_transform_feedback
+void handlemap_VkPhysicalDeviceTransformFeedbackFeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceTransformFeedbackFeaturesEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPhysicalDeviceTransformFeedbackPropertiesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceTransformFeedbackPropertiesEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPipelineRasterizationStateStreamCreateInfoEXT(
+    VulkanHandleMapping* handlemap,
+    VkPipelineRasterizationStateStreamCreateInfoEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
+#ifdef VK_NVX_image_view_handle
+void handlemap_VkImageViewHandleInfoNVX(
+    VulkanHandleMapping* handlemap,
+    VkImageViewHandleInfoNVX* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    handlemap->mapHandles_VkImageView((VkImageView*)&toMap->imageView);
+    handlemap->mapHandles_VkSampler((VkSampler*)&toMap->sampler);
+}
+
+void handlemap_VkImageViewAddressPropertiesNVX(
+    VulkanHandleMapping* handlemap,
+    VkImageViewAddressPropertiesNVX* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
 #ifdef VK_AMD_draw_indirect_count
 #endif
 #ifdef VK_AMD_negative_viewport_height
@@ -3475,6 +4768,34 @@
 #endif
 #ifdef VK_AMD_shader_image_load_store_lod
 #endif
+#ifdef VK_GGP_stream_descriptor_surface
+void handlemap_VkStreamDescriptorSurfaceCreateInfoGGP(
+    VulkanHandleMapping* handlemap,
+    VkStreamDescriptorSurfaceCreateInfoGGP* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
+#ifdef VK_NV_corner_sampled_image
+void handlemap_VkPhysicalDeviceCornerSampledImageFeaturesNV(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceCornerSampledImageFeaturesNV* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
 #ifdef VK_IMG_format_pvrtc
 #endif
 #ifdef VK_NV_external_memory_capabilities
@@ -3594,6 +4915,46 @@
 #endif
 #ifdef VK_EXT_shader_subgroup_vote
 #endif
+#ifdef VK_EXT_texture_compression_astc_hdr
+void handlemap_VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_astc_decode_mode
+void handlemap_VkImageViewASTCDecodeModeEXT(
+    VulkanHandleMapping* handlemap,
+    VkImageViewASTCDecodeModeEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPhysicalDeviceASTCDecodeFeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceASTCDecodeFeaturesEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
 #ifdef VK_EXT_conditional_rendering
 void handlemap_VkConditionalRenderingBeginInfoEXT(
     VulkanHandleMapping* handlemap,
@@ -3633,172 +4994,6 @@
 }
 
 #endif
-#ifdef VK_NVX_device_generated_commands
-void handlemap_VkDeviceGeneratedCommandsFeaturesNVX(
-    VulkanHandleMapping* handlemap,
-    VkDeviceGeneratedCommandsFeaturesNVX* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-    if (toMap->pNext)
-    {
-        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
-    }
-}
-
-void handlemap_VkDeviceGeneratedCommandsLimitsNVX(
-    VulkanHandleMapping* handlemap,
-    VkDeviceGeneratedCommandsLimitsNVX* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-    if (toMap->pNext)
-    {
-        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
-    }
-}
-
-void handlemap_VkIndirectCommandsTokenNVX(
-    VulkanHandleMapping* handlemap,
-    VkIndirectCommandsTokenNVX* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-    handlemap->mapHandles_VkBuffer((VkBuffer*)&toMap->buffer);
-}
-
-void handlemap_VkIndirectCommandsLayoutTokenNVX(
-    VulkanHandleMapping* handlemap,
-    VkIndirectCommandsLayoutTokenNVX* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-}
-
-void handlemap_VkIndirectCommandsLayoutCreateInfoNVX(
-    VulkanHandleMapping* handlemap,
-    VkIndirectCommandsLayoutCreateInfoNVX* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-    if (toMap->pNext)
-    {
-        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
-    }
-    if (toMap->pTokens)
-    {
-        for (uint32_t i = 0; i < (uint32_t)toMap->tokenCount; ++i)
-        {
-            handlemap_VkIndirectCommandsLayoutTokenNVX(handlemap, (VkIndirectCommandsLayoutTokenNVX*)(toMap->pTokens + i));
-        }
-    }
-}
-
-void handlemap_VkCmdProcessCommandsInfoNVX(
-    VulkanHandleMapping* handlemap,
-    VkCmdProcessCommandsInfoNVX* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-    if (toMap->pNext)
-    {
-        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
-    }
-    handlemap->mapHandles_VkObjectTableNVX((VkObjectTableNVX*)&toMap->objectTable);
-    handlemap->mapHandles_VkIndirectCommandsLayoutNVX((VkIndirectCommandsLayoutNVX*)&toMap->indirectCommandsLayout);
-    if (toMap->pIndirectCommandsTokens)
-    {
-        for (uint32_t i = 0; i < (uint32_t)toMap->indirectCommandsTokenCount; ++i)
-        {
-            handlemap_VkIndirectCommandsTokenNVX(handlemap, (VkIndirectCommandsTokenNVX*)(toMap->pIndirectCommandsTokens + i));
-        }
-    }
-    handlemap->mapHandles_VkCommandBuffer((VkCommandBuffer*)&toMap->targetCommandBuffer);
-    handlemap->mapHandles_VkBuffer((VkBuffer*)&toMap->sequencesCountBuffer);
-    handlemap->mapHandles_VkBuffer((VkBuffer*)&toMap->sequencesIndexBuffer);
-}
-
-void handlemap_VkCmdReserveSpaceForCommandsInfoNVX(
-    VulkanHandleMapping* handlemap,
-    VkCmdReserveSpaceForCommandsInfoNVX* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-    if (toMap->pNext)
-    {
-        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
-    }
-    handlemap->mapHandles_VkObjectTableNVX((VkObjectTableNVX*)&toMap->objectTable);
-    handlemap->mapHandles_VkIndirectCommandsLayoutNVX((VkIndirectCommandsLayoutNVX*)&toMap->indirectCommandsLayout);
-}
-
-void handlemap_VkObjectTableCreateInfoNVX(
-    VulkanHandleMapping* handlemap,
-    VkObjectTableCreateInfoNVX* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-    if (toMap->pNext)
-    {
-        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
-    }
-}
-
-void handlemap_VkObjectTableEntryNVX(
-    VulkanHandleMapping* handlemap,
-    VkObjectTableEntryNVX* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-}
-
-void handlemap_VkObjectTablePipelineEntryNVX(
-    VulkanHandleMapping* handlemap,
-    VkObjectTablePipelineEntryNVX* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-    handlemap->mapHandles_VkPipeline((VkPipeline*)&toMap->pipeline);
-}
-
-void handlemap_VkObjectTableDescriptorSetEntryNVX(
-    VulkanHandleMapping* handlemap,
-    VkObjectTableDescriptorSetEntryNVX* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-    handlemap->mapHandles_VkPipelineLayout((VkPipelineLayout*)&toMap->pipelineLayout);
-    handlemap->mapHandles_VkDescriptorSet((VkDescriptorSet*)&toMap->descriptorSet);
-}
-
-void handlemap_VkObjectTableVertexBufferEntryNVX(
-    VulkanHandleMapping* handlemap,
-    VkObjectTableVertexBufferEntryNVX* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-    handlemap->mapHandles_VkBuffer((VkBuffer*)&toMap->buffer);
-}
-
-void handlemap_VkObjectTableIndexBufferEntryNVX(
-    VulkanHandleMapping* handlemap,
-    VkObjectTableIndexBufferEntryNVX* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-    handlemap->mapHandles_VkBuffer((VkBuffer*)&toMap->buffer);
-}
-
-void handlemap_VkObjectTablePushConstantEntryNVX(
-    VulkanHandleMapping* handlemap,
-    VkObjectTablePushConstantEntryNVX* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-    handlemap->mapHandles_VkPipelineLayout((VkPipelineLayout*)&toMap->pipelineLayout);
-}
-
-#endif
 #ifdef VK_NV_clip_space_w_scaling
 void handlemap_VkViewportWScalingNV(
     VulkanHandleMapping* handlemap,
@@ -3818,11 +5013,14 @@
     {
         handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
     }
-    if (toMap->pViewportWScalings)
+    if (toMap)
     {
-        for (uint32_t i = 0; i < (uint32_t)toMap->viewportCount; ++i)
+        if (toMap->pViewportWScalings)
         {
-            handlemap_VkViewportWScalingNV(handlemap, (VkViewportWScalingNV*)(toMap->pViewportWScalings + i));
+            for (uint32_t i = 0; i < (uint32_t)toMap->viewportCount; ++i)
+            {
+                handlemap_VkViewportWScalingNV(handlemap, (VkViewportWScalingNV*)(toMap->pViewportWScalings + i));
+            }
         }
     }
 }
@@ -3934,11 +5132,14 @@
     {
         handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
     }
-    if (toMap->pTimes)
+    if (toMap)
     {
-        for (uint32_t i = 0; i < (uint32_t)toMap->swapchainCount; ++i)
+        if (toMap->pTimes)
         {
-            handlemap_VkPresentTimeGOOGLE(handlemap, (VkPresentTimeGOOGLE*)(toMap->pTimes + i));
+            for (uint32_t i = 0; i < (uint32_t)toMap->swapchainCount; ++i)
+            {
+                handlemap_VkPresentTimeGOOGLE(handlemap, (VkPresentTimeGOOGLE*)(toMap->pTimes + i));
+            }
         }
     }
 }
@@ -3983,11 +5184,14 @@
     {
         handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
     }
-    if (toMap->pViewportSwizzles)
+    if (toMap)
     {
-        for (uint32_t i = 0; i < (uint32_t)toMap->viewportCount; ++i)
+        if (toMap->pViewportSwizzles)
         {
-            handlemap_VkViewportSwizzleNV(handlemap, (VkViewportSwizzleNV*)(toMap->pViewportSwizzles + i));
+            for (uint32_t i = 0; i < (uint32_t)toMap->viewportCount; ++i)
+            {
+                handlemap_VkViewportSwizzleNV(handlemap, (VkViewportSwizzleNV*)(toMap->pViewportSwizzles + i));
+            }
         }
     }
 }
@@ -4016,11 +5220,14 @@
     {
         handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
     }
-    if (toMap->pDiscardRectangles)
+    if (toMap)
     {
-        for (uint32_t i = 0; i < (uint32_t)toMap->discardRectangleCount; ++i)
+        if (toMap->pDiscardRectangles)
         {
-            handlemap_VkRect2D(handlemap, (VkRect2D*)(toMap->pDiscardRectangles + i));
+            for (uint32_t i = 0; i < (uint32_t)toMap->discardRectangleCount; ++i)
+            {
+                handlemap_VkRect2D(handlemap, (VkRect2D*)(toMap->pDiscardRectangles + i));
+            }
         }
     }
 }
@@ -4052,6 +5259,32 @@
 }
 
 #endif
+#ifdef VK_EXT_depth_clip_enable
+void handlemap_VkPhysicalDeviceDepthClipEnableFeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceDepthClipEnableFeaturesEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPipelineRasterizationDepthClipStateCreateInfoEXT(
+    VulkanHandleMapping* handlemap,
+    VkPipelineRasterizationDepthClipStateCreateInfoEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
 #ifdef VK_EXT_swapchain_colorspace
 #endif
 #ifdef VK_EXT_hdr_metadata
@@ -4108,35 +5341,13 @@
 }
 
 #endif
+#ifdef VK_MVK_moltenvk
+#endif
 #ifdef VK_EXT_external_memory_dma_buf
 #endif
 #ifdef VK_EXT_queue_family_foreign
 #endif
 #ifdef VK_EXT_debug_utils
-void handlemap_VkDebugUtilsObjectNameInfoEXT(
-    VulkanHandleMapping* handlemap,
-    VkDebugUtilsObjectNameInfoEXT* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-    if (toMap->pNext)
-    {
-        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
-    }
-}
-
-void handlemap_VkDebugUtilsObjectTagInfoEXT(
-    VulkanHandleMapping* handlemap,
-    VkDebugUtilsObjectTagInfoEXT* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-    if (toMap->pNext)
-    {
-        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
-    }
-}
-
 void handlemap_VkDebugUtilsLabelEXT(
     VulkanHandleMapping* handlemap,
     VkDebugUtilsLabelEXT* toMap)
@@ -4149,6 +5360,18 @@
     }
 }
 
+void handlemap_VkDebugUtilsObjectNameInfoEXT(
+    VulkanHandleMapping* handlemap,
+    VkDebugUtilsObjectNameInfoEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
 void handlemap_VkDebugUtilsMessengerCallbackDataEXT(
     VulkanHandleMapping* handlemap,
     VkDebugUtilsMessengerCallbackDataEXT* toMap)
@@ -4159,25 +5382,34 @@
     {
         handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
     }
-    if (toMap->pQueueLabels)
+    if (toMap)
     {
-        for (uint32_t i = 0; i < (uint32_t)toMap->queueLabelCount; ++i)
+        if (toMap->pQueueLabels)
         {
-            handlemap_VkDebugUtilsLabelEXT(handlemap, (VkDebugUtilsLabelEXT*)(toMap->pQueueLabels + i));
+            for (uint32_t i = 0; i < (uint32_t)toMap->queueLabelCount; ++i)
+            {
+                handlemap_VkDebugUtilsLabelEXT(handlemap, (VkDebugUtilsLabelEXT*)(toMap->pQueueLabels + i));
+            }
         }
     }
-    if (toMap->pCmdBufLabels)
+    if (toMap)
     {
-        for (uint32_t i = 0; i < (uint32_t)toMap->cmdBufLabelCount; ++i)
+        if (toMap->pCmdBufLabels)
         {
-            handlemap_VkDebugUtilsLabelEXT(handlemap, (VkDebugUtilsLabelEXT*)(toMap->pCmdBufLabels + i));
+            for (uint32_t i = 0; i < (uint32_t)toMap->cmdBufLabelCount; ++i)
+            {
+                handlemap_VkDebugUtilsLabelEXT(handlemap, (VkDebugUtilsLabelEXT*)(toMap->pCmdBufLabels + i));
+            }
         }
     }
-    if (toMap->pObjects)
+    if (toMap)
     {
-        for (uint32_t i = 0; i < (uint32_t)toMap->objectCount; ++i)
+        if (toMap->pObjects)
         {
-            handlemap_VkDebugUtilsObjectNameInfoEXT(handlemap, (VkDebugUtilsObjectNameInfoEXT*)(toMap->pObjects + i));
+            for (uint32_t i = 0; i < (uint32_t)toMap->objectCount; ++i)
+            {
+                handlemap_VkDebugUtilsObjectNameInfoEXT(handlemap, (VkDebugUtilsObjectNameInfoEXT*)(toMap->pObjects + i));
+            }
         }
     }
 }
@@ -4194,6 +5426,18 @@
     }
 }
 
+void handlemap_VkDebugUtilsObjectTagInfoEXT(
+    VulkanHandleMapping* handlemap,
+    VkDebugUtilsObjectTagInfoEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
 #endif
 #ifdef VK_ANDROID_external_memory_android_hardware_buffer
 void handlemap_VkAndroidHardwareBufferUsageANDROID(
@@ -4272,30 +5516,6 @@
 
 #endif
 #ifdef VK_EXT_sampler_filter_minmax
-void handlemap_VkSamplerReductionModeCreateInfoEXT(
-    VulkanHandleMapping* handlemap,
-    VkSamplerReductionModeCreateInfoEXT* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-    if (toMap->pNext)
-    {
-        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
-    }
-}
-
-void handlemap_VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT(
-    VulkanHandleMapping* handlemap,
-    VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT* toMap)
-{
-    (void)handlemap;
-    (void)toMap;
-    if (toMap->pNext)
-    {
-        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
-    }
-}
-
 #endif
 #ifdef VK_AMD_gpu_shader_int16
 #endif
@@ -4303,6 +5523,56 @@
 #endif
 #ifdef VK_AMD_shader_fragment_mask
 #endif
+#ifdef VK_EXT_inline_uniform_block
+void handlemap_VkPhysicalDeviceInlineUniformBlockFeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceInlineUniformBlockFeaturesEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPhysicalDeviceInlineUniformBlockPropertiesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceInlineUniformBlockPropertiesEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkWriteDescriptorSetInlineUniformBlockEXT(
+    VulkanHandleMapping* handlemap,
+    VkWriteDescriptorSetInlineUniformBlockEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkDescriptorPoolInlineUniformBlockCreateInfoEXT(
+    VulkanHandleMapping* handlemap,
+    VkDescriptorPoolInlineUniformBlockCreateInfoEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
 #ifdef VK_EXT_shader_stencil_export
 #endif
 #ifdef VK_EXT_sample_locations
@@ -4325,11 +5595,14 @@
         handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
     }
     handlemap_VkExtent2D(handlemap, (VkExtent2D*)(&toMap->sampleLocationGridSize));
-    if (toMap->pSampleLocations)
+    if (toMap)
     {
-        for (uint32_t i = 0; i < (uint32_t)toMap->sampleLocationsCount; ++i)
+        if (toMap->pSampleLocations)
         {
-            handlemap_VkSampleLocationEXT(handlemap, (VkSampleLocationEXT*)(toMap->pSampleLocations + i));
+            for (uint32_t i = 0; i < (uint32_t)toMap->sampleLocationsCount; ++i)
+            {
+                handlemap_VkSampleLocationEXT(handlemap, (VkSampleLocationEXT*)(toMap->pSampleLocations + i));
+            }
         }
     }
 }
@@ -4362,18 +5635,24 @@
     {
         handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
     }
-    if (toMap->pAttachmentInitialSampleLocations)
+    if (toMap)
     {
-        for (uint32_t i = 0; i < (uint32_t)toMap->attachmentInitialSampleLocationsCount; ++i)
+        if (toMap->pAttachmentInitialSampleLocations)
         {
-            handlemap_VkAttachmentSampleLocationsEXT(handlemap, (VkAttachmentSampleLocationsEXT*)(toMap->pAttachmentInitialSampleLocations + i));
+            for (uint32_t i = 0; i < (uint32_t)toMap->attachmentInitialSampleLocationsCount; ++i)
+            {
+                handlemap_VkAttachmentSampleLocationsEXT(handlemap, (VkAttachmentSampleLocationsEXT*)(toMap->pAttachmentInitialSampleLocations + i));
+            }
         }
     }
-    if (toMap->pPostSubpassSampleLocations)
+    if (toMap)
     {
-        for (uint32_t i = 0; i < (uint32_t)toMap->postSubpassSampleLocationsCount; ++i)
+        if (toMap->pPostSubpassSampleLocations)
         {
-            handlemap_VkSubpassSampleLocationsEXT(handlemap, (VkSubpassSampleLocationsEXT*)(toMap->pPostSubpassSampleLocations + i));
+            for (uint32_t i = 0; i < (uint32_t)toMap->postSubpassSampleLocationsCount; ++i)
+            {
+                handlemap_VkSubpassSampleLocationsEXT(handlemap, (VkSubpassSampleLocationsEXT*)(toMap->pPostSubpassSampleLocations + i));
+            }
         }
     }
 }
@@ -4486,8 +5765,124 @@
 #endif
 #ifdef VK_NV_fill_rectangle
 #endif
+#ifdef VK_NV_shader_sm_builtins
+void handlemap_VkPhysicalDeviceShaderSMBuiltinsPropertiesNV(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceShaderSMBuiltinsPropertiesNV* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPhysicalDeviceShaderSMBuiltinsFeaturesNV(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceShaderSMBuiltinsFeaturesNV* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
 #ifdef VK_EXT_post_depth_coverage
 #endif
+#ifdef VK_EXT_image_drm_format_modifier
+void handlemap_VkDrmFormatModifierPropertiesEXT(
+    VulkanHandleMapping* handlemap,
+    VkDrmFormatModifierPropertiesEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+}
+
+void handlemap_VkDrmFormatModifierPropertiesListEXT(
+    VulkanHandleMapping* handlemap,
+    VkDrmFormatModifierPropertiesListEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    if (toMap)
+    {
+        if (toMap->pDrmFormatModifierProperties)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toMap->drmFormatModifierCount; ++i)
+            {
+                handlemap_VkDrmFormatModifierPropertiesEXT(handlemap, (VkDrmFormatModifierPropertiesEXT*)(toMap->pDrmFormatModifierProperties + i));
+            }
+        }
+    }
+}
+
+void handlemap_VkPhysicalDeviceImageDrmFormatModifierInfoEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceImageDrmFormatModifierInfoEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkImageDrmFormatModifierListCreateInfoEXT(
+    VulkanHandleMapping* handlemap,
+    VkImageDrmFormatModifierListCreateInfoEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkImageDrmFormatModifierExplicitCreateInfoEXT(
+    VulkanHandleMapping* handlemap,
+    VkImageDrmFormatModifierExplicitCreateInfoEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    if (toMap)
+    {
+        if (toMap->pPlaneLayouts)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toMap->drmFormatModifierPlaneCount; ++i)
+            {
+                handlemap_VkSubresourceLayout(handlemap, (VkSubresourceLayout*)(toMap->pPlaneLayouts + i));
+            }
+        }
+    }
+}
+
+void handlemap_VkImageDrmFormatModifierPropertiesEXT(
+    VulkanHandleMapping* handlemap,
+    VkImageDrmFormatModifierPropertiesEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
 #ifdef VK_EXT_validation_cache
 void handlemap_VkValidationCacheCreateInfoEXT(
     VulkanHandleMapping* handlemap,
@@ -4516,9 +5911,43 @@
 
 #endif
 #ifdef VK_EXT_descriptor_indexing
-void handlemap_VkDescriptorSetLayoutBindingFlagsCreateInfoEXT(
+#endif
+#ifdef VK_EXT_shader_viewport_index_layer
+#endif
+#ifdef VK_NV_shading_rate_image
+void handlemap_VkShadingRatePaletteNV(
     VulkanHandleMapping* handlemap,
-    VkDescriptorSetLayoutBindingFlagsCreateInfoEXT* toMap)
+    VkShadingRatePaletteNV* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+}
+
+void handlemap_VkPipelineViewportShadingRateImageStateCreateInfoNV(
+    VulkanHandleMapping* handlemap,
+    VkPipelineViewportShadingRateImageStateCreateInfoNV* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    if (toMap)
+    {
+        if (toMap->pShadingRatePalettes)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toMap->viewportCount; ++i)
+            {
+                handlemap_VkShadingRatePaletteNV(handlemap, (VkShadingRatePaletteNV*)(toMap->pShadingRatePalettes + i));
+            }
+        }
+    }
+}
+
+void handlemap_VkPhysicalDeviceShadingRateImageFeaturesNV(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceShadingRateImageFeaturesNV* toMap)
 {
     (void)handlemap;
     (void)toMap;
@@ -4528,9 +5957,72 @@
     }
 }
 
-void handlemap_VkPhysicalDeviceDescriptorIndexingFeaturesEXT(
+void handlemap_VkPhysicalDeviceShadingRateImagePropertiesNV(
     VulkanHandleMapping* handlemap,
-    VkPhysicalDeviceDescriptorIndexingFeaturesEXT* toMap)
+    VkPhysicalDeviceShadingRateImagePropertiesNV* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    handlemap_VkExtent2D(handlemap, (VkExtent2D*)(&toMap->shadingRateTexelSize));
+}
+
+void handlemap_VkCoarseSampleLocationNV(
+    VulkanHandleMapping* handlemap,
+    VkCoarseSampleLocationNV* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+}
+
+void handlemap_VkCoarseSampleOrderCustomNV(
+    VulkanHandleMapping* handlemap,
+    VkCoarseSampleOrderCustomNV* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap)
+    {
+        if (toMap->pSampleLocations)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toMap->sampleLocationCount; ++i)
+            {
+                handlemap_VkCoarseSampleLocationNV(handlemap, (VkCoarseSampleLocationNV*)(toMap->pSampleLocations + i));
+            }
+        }
+    }
+}
+
+void handlemap_VkPipelineViewportCoarseSampleOrderStateCreateInfoNV(
+    VulkanHandleMapping* handlemap,
+    VkPipelineViewportCoarseSampleOrderStateCreateInfoNV* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    if (toMap)
+    {
+        if (toMap->pCustomSampleOrders)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toMap->customSampleOrderCount; ++i)
+            {
+                handlemap_VkCoarseSampleOrderCustomNV(handlemap, (VkCoarseSampleOrderCustomNV*)(toMap->pCustomSampleOrders + i));
+            }
+        }
+    }
+}
+
+#endif
+#ifdef VK_NV_ray_tracing
+void handlemap_VkRayTracingShaderGroupCreateInfoNV(
+    VulkanHandleMapping* handlemap,
+    VkRayTracingShaderGroupCreateInfoNV* toMap)
 {
     (void)handlemap;
     (void)toMap;
@@ -4540,9 +6032,172 @@
     }
 }
 
-void handlemap_VkPhysicalDeviceDescriptorIndexingPropertiesEXT(
+void handlemap_VkRayTracingPipelineCreateInfoNV(
     VulkanHandleMapping* handlemap,
-    VkPhysicalDeviceDescriptorIndexingPropertiesEXT* toMap)
+    VkRayTracingPipelineCreateInfoNV* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    if (toMap)
+    {
+        if (toMap->pStages)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toMap->stageCount; ++i)
+            {
+                handlemap_VkPipelineShaderStageCreateInfo(handlemap, (VkPipelineShaderStageCreateInfo*)(toMap->pStages + i));
+            }
+        }
+    }
+    if (toMap)
+    {
+        if (toMap->pGroups)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toMap->groupCount; ++i)
+            {
+                handlemap_VkRayTracingShaderGroupCreateInfoNV(handlemap, (VkRayTracingShaderGroupCreateInfoNV*)(toMap->pGroups + i));
+            }
+        }
+    }
+    handlemap->mapHandles_VkPipelineLayout((VkPipelineLayout*)&toMap->layout);
+    handlemap->mapHandles_VkPipeline((VkPipeline*)&toMap->basePipelineHandle);
+}
+
+void handlemap_VkGeometryTrianglesNV(
+    VulkanHandleMapping* handlemap,
+    VkGeometryTrianglesNV* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    handlemap->mapHandles_VkBuffer((VkBuffer*)&toMap->vertexData);
+    handlemap->mapHandles_VkBuffer((VkBuffer*)&toMap->indexData);
+    handlemap->mapHandles_VkBuffer((VkBuffer*)&toMap->transformData);
+}
+
+void handlemap_VkGeometryAABBNV(
+    VulkanHandleMapping* handlemap,
+    VkGeometryAABBNV* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    handlemap->mapHandles_VkBuffer((VkBuffer*)&toMap->aabbData);
+}
+
+void handlemap_VkGeometryDataNV(
+    VulkanHandleMapping* handlemap,
+    VkGeometryDataNV* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    handlemap_VkGeometryTrianglesNV(handlemap, (VkGeometryTrianglesNV*)(&toMap->triangles));
+    handlemap_VkGeometryAABBNV(handlemap, (VkGeometryAABBNV*)(&toMap->aabbs));
+}
+
+void handlemap_VkGeometryNV(
+    VulkanHandleMapping* handlemap,
+    VkGeometryNV* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    handlemap_VkGeometryDataNV(handlemap, (VkGeometryDataNV*)(&toMap->geometry));
+}
+
+void handlemap_VkAccelerationStructureInfoNV(
+    VulkanHandleMapping* handlemap,
+    VkAccelerationStructureInfoNV* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    if (toMap)
+    {
+        if (toMap->pGeometries)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toMap->geometryCount; ++i)
+            {
+                handlemap_VkGeometryNV(handlemap, (VkGeometryNV*)(toMap->pGeometries + i));
+            }
+        }
+    }
+}
+
+void handlemap_VkAccelerationStructureCreateInfoNV(
+    VulkanHandleMapping* handlemap,
+    VkAccelerationStructureCreateInfoNV* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    handlemap_VkAccelerationStructureInfoNV(handlemap, (VkAccelerationStructureInfoNV*)(&toMap->info));
+}
+
+void handlemap_VkBindAccelerationStructureMemoryInfoNV(
+    VulkanHandleMapping* handlemap,
+    VkBindAccelerationStructureMemoryInfoNV* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    handlemap->mapHandles_VkAccelerationStructureNV((VkAccelerationStructureNV*)&toMap->accelerationStructure);
+    handlemap->mapHandles_VkDeviceMemory((VkDeviceMemory*)&toMap->memory);
+}
+
+void handlemap_VkWriteDescriptorSetAccelerationStructureNV(
+    VulkanHandleMapping* handlemap,
+    VkWriteDescriptorSetAccelerationStructureNV* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    if (toMap->pAccelerationStructures)
+    {
+        handlemap->mapHandles_VkAccelerationStructureNV((VkAccelerationStructureNV*)toMap->pAccelerationStructures, toMap->accelerationStructureCount);
+    }
+}
+
+void handlemap_VkAccelerationStructureMemoryRequirementsInfoNV(
+    VulkanHandleMapping* handlemap,
+    VkAccelerationStructureMemoryRequirementsInfoNV* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    handlemap->mapHandles_VkAccelerationStructureNV((VkAccelerationStructureNV*)&toMap->accelerationStructure);
+}
+
+void handlemap_VkPhysicalDeviceRayTracingPropertiesNV(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceRayTracingPropertiesNV* toMap)
 {
     (void)handlemap;
     (void)toMap;
@@ -4552,9 +6207,36 @@
     }
 }
 
-void handlemap_VkDescriptorSetVariableDescriptorCountAllocateInfoEXT(
+void handlemap_VkTransformMatrixKHR(
     VulkanHandleMapping* handlemap,
-    VkDescriptorSetVariableDescriptorCountAllocateInfoEXT* toMap)
+    VkTransformMatrixKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+}
+
+void handlemap_VkAabbPositionsKHR(
+    VulkanHandleMapping* handlemap,
+    VkAabbPositionsKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+}
+
+void handlemap_VkAccelerationStructureInstanceKHR(
+    VulkanHandleMapping* handlemap,
+    VkAccelerationStructureInstanceKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    handlemap_VkTransformMatrixKHR(handlemap, (VkTransformMatrixKHR*)(&toMap->transform));
+}
+
+#endif
+#ifdef VK_NV_representative_fragment_test
+void handlemap_VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV* toMap)
 {
     (void)handlemap;
     (void)toMap;
@@ -4564,9 +6246,9 @@
     }
 }
 
-void handlemap_VkDescriptorSetVariableDescriptorCountLayoutSupportEXT(
+void handlemap_VkPipelineRepresentativeFragmentTestStateCreateInfoNV(
     VulkanHandleMapping* handlemap,
-    VkDescriptorSetVariableDescriptorCountLayoutSupportEXT* toMap)
+    VkPipelineRepresentativeFragmentTestStateCreateInfoNV* toMap)
 {
     (void)handlemap;
     (void)toMap;
@@ -4577,7 +6259,33 @@
 }
 
 #endif
-#ifdef VK_EXT_shader_viewport_index_layer
+#ifdef VK_EXT_filter_cubic
+void handlemap_VkPhysicalDeviceImageViewImageFormatInfoEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceImageViewImageFormatInfoEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkFilterCubicImageViewImageFormatPropertiesEXT(
+    VulkanHandleMapping* handlemap,
+    VkFilterCubicImageViewImageFormatPropertiesEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
+#ifdef VK_QCOM_render_pass_shader_resolve
 #endif
 #ifdef VK_EXT_global_priority
 void handlemap_VkDeviceQueueGlobalPriorityCreateInfoEXT(
@@ -4633,6 +6341,34 @@
 #endif
 #ifdef VK_AMD_buffer_marker
 #endif
+#ifdef VK_AMD_pipeline_compiler_control
+void handlemap_VkPipelineCompilerControlCreateInfoAMD(
+    VulkanHandleMapping* handlemap,
+    VkPipelineCompilerControlCreateInfoAMD* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_calibrated_timestamps
+void handlemap_VkCalibratedTimestampInfoEXT(
+    VulkanHandleMapping* handlemap,
+    VkCalibratedTimestampInfoEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
 #ifdef VK_AMD_shader_core_properties
 void handlemap_VkPhysicalDeviceShaderCorePropertiesAMD(
     VulkanHandleMapping* handlemap,
@@ -4647,6 +6383,20 @@
 }
 
 #endif
+#ifdef VK_AMD_memory_overallocation_behavior
+void handlemap_VkDeviceMemoryOverallocationCreateInfoAMD(
+    VulkanHandleMapping* handlemap,
+    VkDeviceMemoryOverallocationCreateInfoAMD* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
 #ifdef VK_EXT_vertex_attribute_divisor
 void handlemap_VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT(
     VulkanHandleMapping* handlemap,
@@ -4678,11 +6428,76 @@
     {
         handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
     }
-    if (toMap->pVertexBindingDivisors)
+    if (toMap)
     {
-        for (uint32_t i = 0; i < (uint32_t)toMap->vertexBindingDivisorCount; ++i)
+        if (toMap->pVertexBindingDivisors)
         {
-            handlemap_VkVertexInputBindingDivisorDescriptionEXT(handlemap, (VkVertexInputBindingDivisorDescriptionEXT*)(toMap->pVertexBindingDivisors + i));
+            for (uint32_t i = 0; i < (uint32_t)toMap->vertexBindingDivisorCount; ++i)
+            {
+                handlemap_VkVertexInputBindingDivisorDescriptionEXT(handlemap, (VkVertexInputBindingDivisorDescriptionEXT*)(toMap->pVertexBindingDivisors + i));
+            }
+        }
+    }
+}
+
+void handlemap_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
+#ifdef VK_GGP_frame_token
+void handlemap_VkPresentFrameTokenGGP(
+    VulkanHandleMapping* handlemap,
+    VkPresentFrameTokenGGP* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_pipeline_creation_feedback
+void handlemap_VkPipelineCreationFeedbackEXT(
+    VulkanHandleMapping* handlemap,
+    VkPipelineCreationFeedbackEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+}
+
+void handlemap_VkPipelineCreationFeedbackCreateInfoEXT(
+    VulkanHandleMapping* handlemap,
+    VkPipelineCreationFeedbackCreateInfoEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    if (toMap->pPipelineCreationFeedback)
+    {
+        handlemap_VkPipelineCreationFeedbackEXT(handlemap, (VkPipelineCreationFeedbackEXT*)(toMap->pPipelineCreationFeedback));
+    }
+    if (toMap)
+    {
+        if (toMap->pPipelineStageCreationFeedbacks)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toMap->pipelineStageCreationFeedbackCount; ++i)
+            {
+                handlemap_VkPipelineCreationFeedbackEXT(handlemap, (VkPipelineCreationFeedbackEXT*)(toMap->pPipelineStageCreationFeedbacks + i));
+            }
         }
     }
 }
@@ -4690,6 +6505,118 @@
 #endif
 #ifdef VK_NV_shader_subgroup_partitioned
 #endif
+#ifdef VK_NV_compute_shader_derivatives
+void handlemap_VkPhysicalDeviceComputeShaderDerivativesFeaturesNV(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceComputeShaderDerivativesFeaturesNV* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
+#ifdef VK_NV_mesh_shader
+void handlemap_VkPhysicalDeviceMeshShaderFeaturesNV(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceMeshShaderFeaturesNV* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPhysicalDeviceMeshShaderPropertiesNV(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceMeshShaderPropertiesNV* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkDrawMeshTasksIndirectCommandNV(
+    VulkanHandleMapping* handlemap,
+    VkDrawMeshTasksIndirectCommandNV* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+}
+
+#endif
+#ifdef VK_NV_fragment_shader_barycentric
+void handlemap_VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
+#ifdef VK_NV_shader_image_footprint
+void handlemap_VkPhysicalDeviceShaderImageFootprintFeaturesNV(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceShaderImageFootprintFeaturesNV* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
+#ifdef VK_NV_scissor_exclusive
+void handlemap_VkPipelineViewportExclusiveScissorStateCreateInfoNV(
+    VulkanHandleMapping* handlemap,
+    VkPipelineViewportExclusiveScissorStateCreateInfoNV* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    if (toMap)
+    {
+        if (toMap->pExclusiveScissors)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toMap->exclusiveScissorCount; ++i)
+            {
+                handlemap_VkRect2D(handlemap, (VkRect2D*)(toMap->pExclusiveScissors + i));
+            }
+        }
+    }
+}
+
+void handlemap_VkPhysicalDeviceExclusiveScissorFeaturesNV(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceExclusiveScissorFeaturesNV* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
 #ifdef VK_NV_device_diagnostic_checkpoints
 void handlemap_VkQueueFamilyCheckpointPropertiesNV(
     VulkanHandleMapping* handlemap,
@@ -4716,9 +6643,1226 @@
 }
 
 #endif
-#ifdef VK_GOOGLE_address_space
+#ifdef VK_INTEL_shader_integer_functions2
+void handlemap_VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
 #endif
-#ifdef VK_GOOGLE_color_buffer
+#ifdef VK_INTEL_performance_query
+void handlemap_VkPerformanceValueDataINTEL(
+    VulkanHandleMapping* handlemap,
+    VkPerformanceValueDataINTEL* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+}
+
+void handlemap_VkPerformanceValueINTEL(
+    VulkanHandleMapping* handlemap,
+    VkPerformanceValueINTEL* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    handlemap_VkPerformanceValueDataINTEL(handlemap, (VkPerformanceValueDataINTEL*)(&toMap->data));
+}
+
+void handlemap_VkInitializePerformanceApiInfoINTEL(
+    VulkanHandleMapping* handlemap,
+    VkInitializePerformanceApiInfoINTEL* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkQueryPoolPerformanceQueryCreateInfoINTEL(
+    VulkanHandleMapping* handlemap,
+    VkQueryPoolPerformanceQueryCreateInfoINTEL* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPerformanceMarkerInfoINTEL(
+    VulkanHandleMapping* handlemap,
+    VkPerformanceMarkerInfoINTEL* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPerformanceStreamMarkerInfoINTEL(
+    VulkanHandleMapping* handlemap,
+    VkPerformanceStreamMarkerInfoINTEL* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPerformanceOverrideInfoINTEL(
+    VulkanHandleMapping* handlemap,
+    VkPerformanceOverrideInfoINTEL* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPerformanceConfigurationAcquireInfoINTEL(
+    VulkanHandleMapping* handlemap,
+    VkPerformanceConfigurationAcquireInfoINTEL* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_pci_bus_info
+void handlemap_VkPhysicalDevicePCIBusInfoPropertiesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDevicePCIBusInfoPropertiesEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
+#ifdef VK_AMD_display_native_hdr
+void handlemap_VkDisplayNativeHdrSurfaceCapabilitiesAMD(
+    VulkanHandleMapping* handlemap,
+    VkDisplayNativeHdrSurfaceCapabilitiesAMD* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkSwapchainDisplayNativeHdrCreateInfoAMD(
+    VulkanHandleMapping* handlemap,
+    VkSwapchainDisplayNativeHdrCreateInfoAMD* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
+#ifdef VK_FUCHSIA_imagepipe_surface
+void handlemap_VkImagePipeSurfaceCreateInfoFUCHSIA(
+    VulkanHandleMapping* handlemap,
+    VkImagePipeSurfaceCreateInfoFUCHSIA* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_metal_surface
+void handlemap_VkMetalSurfaceCreateInfoEXT(
+    VulkanHandleMapping* handlemap,
+    VkMetalSurfaceCreateInfoEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_fragment_density_map
+void handlemap_VkPhysicalDeviceFragmentDensityMapFeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceFragmentDensityMapFeaturesEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPhysicalDeviceFragmentDensityMapPropertiesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceFragmentDensityMapPropertiesEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    handlemap_VkExtent2D(handlemap, (VkExtent2D*)(&toMap->minFragmentDensityTexelSize));
+    handlemap_VkExtent2D(handlemap, (VkExtent2D*)(&toMap->maxFragmentDensityTexelSize));
+}
+
+void handlemap_VkRenderPassFragmentDensityMapCreateInfoEXT(
+    VulkanHandleMapping* handlemap,
+    VkRenderPassFragmentDensityMapCreateInfoEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    handlemap_VkAttachmentReference(handlemap, (VkAttachmentReference*)(&toMap->fragmentDensityMapAttachment));
+}
+
+#endif
+#ifdef VK_EXT_scalar_block_layout
+#endif
+#ifdef VK_GOOGLE_hlsl_functionality1
+#endif
+#ifdef VK_GOOGLE_decorate_string
+#endif
+#ifdef VK_EXT_subgroup_size_control
+void handlemap_VkPhysicalDeviceSubgroupSizeControlFeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceSubgroupSizeControlFeaturesEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPhysicalDeviceSubgroupSizeControlPropertiesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceSubgroupSizeControlPropertiesEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT(
+    VulkanHandleMapping* handlemap,
+    VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
+#ifdef VK_AMD_shader_core_properties2
+void handlemap_VkPhysicalDeviceShaderCoreProperties2AMD(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceShaderCoreProperties2AMD* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
+#ifdef VK_AMD_device_coherent_memory
+void handlemap_VkPhysicalDeviceCoherentMemoryFeaturesAMD(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceCoherentMemoryFeaturesAMD* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_shader_image_atomic_int64
+void handlemap_VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_memory_budget
+void handlemap_VkPhysicalDeviceMemoryBudgetPropertiesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceMemoryBudgetPropertiesEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_memory_priority
+void handlemap_VkPhysicalDeviceMemoryPriorityFeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceMemoryPriorityFeaturesEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkMemoryPriorityAllocateInfoEXT(
+    VulkanHandleMapping* handlemap,
+    VkMemoryPriorityAllocateInfoEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
+#ifdef VK_NV_dedicated_allocation_image_aliasing
+void handlemap_VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_buffer_device_address
+void handlemap_VkPhysicalDeviceBufferDeviceAddressFeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceBufferDeviceAddressFeaturesEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkBufferDeviceAddressCreateInfoEXT(
+    VulkanHandleMapping* handlemap,
+    VkBufferDeviceAddressCreateInfoEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_tooling_info
+void handlemap_VkPhysicalDeviceToolPropertiesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceToolPropertiesEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_separate_stencil_usage
+#endif
+#ifdef VK_EXT_validation_features
+void handlemap_VkValidationFeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkValidationFeaturesEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
+#ifdef VK_NV_cooperative_matrix
+void handlemap_VkCooperativeMatrixPropertiesNV(
+    VulkanHandleMapping* handlemap,
+    VkCooperativeMatrixPropertiesNV* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPhysicalDeviceCooperativeMatrixFeaturesNV(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceCooperativeMatrixFeaturesNV* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPhysicalDeviceCooperativeMatrixPropertiesNV(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceCooperativeMatrixPropertiesNV* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
+#ifdef VK_NV_coverage_reduction_mode
+void handlemap_VkPhysicalDeviceCoverageReductionModeFeaturesNV(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceCoverageReductionModeFeaturesNV* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPipelineCoverageReductionStateCreateInfoNV(
+    VulkanHandleMapping* handlemap,
+    VkPipelineCoverageReductionStateCreateInfoNV* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkFramebufferMixedSamplesCombinationNV(
+    VulkanHandleMapping* handlemap,
+    VkFramebufferMixedSamplesCombinationNV* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_fragment_shader_interlock
+void handlemap_VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_ycbcr_image_arrays
+void handlemap_VkPhysicalDeviceYcbcrImageArraysFeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceYcbcrImageArraysFeaturesEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_full_screen_exclusive
+void handlemap_VkSurfaceFullScreenExclusiveInfoEXT(
+    VulkanHandleMapping* handlemap,
+    VkSurfaceFullScreenExclusiveInfoEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkSurfaceCapabilitiesFullScreenExclusiveEXT(
+    VulkanHandleMapping* handlemap,
+    VkSurfaceCapabilitiesFullScreenExclusiveEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkSurfaceFullScreenExclusiveWin32InfoEXT(
+    VulkanHandleMapping* handlemap,
+    VkSurfaceFullScreenExclusiveWin32InfoEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_headless_surface
+void handlemap_VkHeadlessSurfaceCreateInfoEXT(
+    VulkanHandleMapping* handlemap,
+    VkHeadlessSurfaceCreateInfoEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_line_rasterization
+void handlemap_VkPhysicalDeviceLineRasterizationFeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceLineRasterizationFeaturesEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPhysicalDeviceLineRasterizationPropertiesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceLineRasterizationPropertiesEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPipelineRasterizationLineStateCreateInfoEXT(
+    VulkanHandleMapping* handlemap,
+    VkPipelineRasterizationLineStateCreateInfoEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_shader_atomic_float
+void handlemap_VkPhysicalDeviceShaderAtomicFloatFeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceShaderAtomicFloatFeaturesEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_host_query_reset
+#endif
+#ifdef VK_EXT_index_type_uint8
+void handlemap_VkPhysicalDeviceIndexTypeUint8FeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceIndexTypeUint8FeaturesEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_extended_dynamic_state
+void handlemap_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceExtendedDynamicStateFeaturesEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_shader_demote_to_helper_invocation
+void handlemap_VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
+#ifdef VK_NV_device_generated_commands
+void handlemap_VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkGraphicsShaderGroupCreateInfoNV(
+    VulkanHandleMapping* handlemap,
+    VkGraphicsShaderGroupCreateInfoNV* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    if (toMap)
+    {
+        if (toMap->pStages)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toMap->stageCount; ++i)
+            {
+                handlemap_VkPipelineShaderStageCreateInfo(handlemap, (VkPipelineShaderStageCreateInfo*)(toMap->pStages + i));
+            }
+        }
+    }
+    if (toMap->pVertexInputState)
+    {
+        handlemap_VkPipelineVertexInputStateCreateInfo(handlemap, (VkPipelineVertexInputStateCreateInfo*)(toMap->pVertexInputState));
+    }
+    if (toMap->pTessellationState)
+    {
+        handlemap_VkPipelineTessellationStateCreateInfo(handlemap, (VkPipelineTessellationStateCreateInfo*)(toMap->pTessellationState));
+    }
+}
+
+void handlemap_VkGraphicsPipelineShaderGroupsCreateInfoNV(
+    VulkanHandleMapping* handlemap,
+    VkGraphicsPipelineShaderGroupsCreateInfoNV* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    if (toMap)
+    {
+        if (toMap->pGroups)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toMap->groupCount; ++i)
+            {
+                handlemap_VkGraphicsShaderGroupCreateInfoNV(handlemap, (VkGraphicsShaderGroupCreateInfoNV*)(toMap->pGroups + i));
+            }
+        }
+    }
+    if (toMap->pPipelines)
+    {
+        handlemap->mapHandles_VkPipeline((VkPipeline*)toMap->pPipelines, toMap->pipelineCount);
+    }
+}
+
+void handlemap_VkBindShaderGroupIndirectCommandNV(
+    VulkanHandleMapping* handlemap,
+    VkBindShaderGroupIndirectCommandNV* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+}
+
+void handlemap_VkBindIndexBufferIndirectCommandNV(
+    VulkanHandleMapping* handlemap,
+    VkBindIndexBufferIndirectCommandNV* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+}
+
+void handlemap_VkBindVertexBufferIndirectCommandNV(
+    VulkanHandleMapping* handlemap,
+    VkBindVertexBufferIndirectCommandNV* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+}
+
+void handlemap_VkSetStateFlagsIndirectCommandNV(
+    VulkanHandleMapping* handlemap,
+    VkSetStateFlagsIndirectCommandNV* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+}
+
+void handlemap_VkIndirectCommandsStreamNV(
+    VulkanHandleMapping* handlemap,
+    VkIndirectCommandsStreamNV* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    handlemap->mapHandles_VkBuffer((VkBuffer*)&toMap->buffer);
+}
+
+void handlemap_VkIndirectCommandsLayoutTokenNV(
+    VulkanHandleMapping* handlemap,
+    VkIndirectCommandsLayoutTokenNV* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    handlemap->mapHandles_VkPipelineLayout((VkPipelineLayout*)&toMap->pushconstantPipelineLayout);
+}
+
+void handlemap_VkIndirectCommandsLayoutCreateInfoNV(
+    VulkanHandleMapping* handlemap,
+    VkIndirectCommandsLayoutCreateInfoNV* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    if (toMap)
+    {
+        if (toMap->pTokens)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toMap->tokenCount; ++i)
+            {
+                handlemap_VkIndirectCommandsLayoutTokenNV(handlemap, (VkIndirectCommandsLayoutTokenNV*)(toMap->pTokens + i));
+            }
+        }
+    }
+}
+
+void handlemap_VkGeneratedCommandsInfoNV(
+    VulkanHandleMapping* handlemap,
+    VkGeneratedCommandsInfoNV* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    handlemap->mapHandles_VkPipeline((VkPipeline*)&toMap->pipeline);
+    handlemap->mapHandles_VkIndirectCommandsLayoutNV((VkIndirectCommandsLayoutNV*)&toMap->indirectCommandsLayout);
+    if (toMap)
+    {
+        if (toMap->pStreams)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toMap->streamCount; ++i)
+            {
+                handlemap_VkIndirectCommandsStreamNV(handlemap, (VkIndirectCommandsStreamNV*)(toMap->pStreams + i));
+            }
+        }
+    }
+    handlemap->mapHandles_VkBuffer((VkBuffer*)&toMap->preprocessBuffer);
+    handlemap->mapHandles_VkBuffer((VkBuffer*)&toMap->sequencesCountBuffer);
+    handlemap->mapHandles_VkBuffer((VkBuffer*)&toMap->sequencesIndexBuffer);
+}
+
+void handlemap_VkGeneratedCommandsMemoryRequirementsInfoNV(
+    VulkanHandleMapping* handlemap,
+    VkGeneratedCommandsMemoryRequirementsInfoNV* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    handlemap->mapHandles_VkPipeline((VkPipeline*)&toMap->pipeline);
+    handlemap->mapHandles_VkIndirectCommandsLayoutNV((VkIndirectCommandsLayoutNV*)&toMap->indirectCommandsLayout);
+}
+
+#endif
+#ifdef VK_EXT_texel_buffer_alignment
+void handlemap_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
+#ifdef VK_QCOM_render_pass_transform
+void handlemap_VkRenderPassTransformBeginInfoQCOM(
+    VulkanHandleMapping* handlemap,
+    VkRenderPassTransformBeginInfoQCOM* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkCommandBufferInheritanceRenderPassTransformInfoQCOM(
+    VulkanHandleMapping* handlemap,
+    VkCommandBufferInheritanceRenderPassTransformInfoQCOM* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    handlemap_VkRect2D(handlemap, (VkRect2D*)(&toMap->renderArea));
+}
+
+#endif
+#ifdef VK_EXT_device_memory_report
+void handlemap_VkPhysicalDeviceDeviceMemoryReportFeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceDeviceMemoryReportFeaturesEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkDeviceMemoryReportCallbackDataEXT(
+    VulkanHandleMapping* handlemap,
+    VkDeviceMemoryReportCallbackDataEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkDeviceDeviceMemoryReportCreateInfoEXT(
+    VulkanHandleMapping* handlemap,
+    VkDeviceDeviceMemoryReportCreateInfoEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_robustness2
+void handlemap_VkPhysicalDeviceRobustness2FeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceRobustness2FeaturesEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPhysicalDeviceRobustness2PropertiesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceRobustness2PropertiesEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_custom_border_color
+void handlemap_VkSamplerCustomBorderColorCreateInfoEXT(
+    VulkanHandleMapping* handlemap,
+    VkSamplerCustomBorderColorCreateInfoEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    handlemap_VkClearColorValue(handlemap, (VkClearColorValue*)(&toMap->customBorderColor));
+}
+
+void handlemap_VkPhysicalDeviceCustomBorderColorPropertiesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceCustomBorderColorPropertiesEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPhysicalDeviceCustomBorderColorFeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceCustomBorderColorFeaturesEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
+#ifdef VK_GOOGLE_user_type
+#endif
+#ifdef VK_EXT_private_data
+void handlemap_VkPhysicalDevicePrivateDataFeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDevicePrivateDataFeaturesEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkDevicePrivateDataCreateInfoEXT(
+    VulkanHandleMapping* handlemap,
+    VkDevicePrivateDataCreateInfoEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPrivateDataSlotCreateInfoEXT(
+    VulkanHandleMapping* handlemap,
+    VkPrivateDataSlotCreateInfoEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_pipeline_creation_cache_control
+void handlemap_VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
+#ifdef VK_NV_device_diagnostics_config
+void handlemap_VkPhysicalDeviceDiagnosticsConfigFeaturesNV(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceDiagnosticsConfigFeaturesNV* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkDeviceDiagnosticsConfigCreateInfoNV(
+    VulkanHandleMapping* handlemap,
+    VkDeviceDiagnosticsConfigCreateInfoNV* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
+#ifdef VK_QCOM_render_pass_store_ops
+#endif
+#ifdef VK_NV_fragment_shading_rate_enums
+void handlemap_VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPipelineFragmentShadingRateEnumStateCreateInfoNV(
+    VulkanHandleMapping* handlemap,
+    VkPipelineFragmentShadingRateEnumStateCreateInfoNV* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_fragment_density_map2
+void handlemap_VkPhysicalDeviceFragmentDensityMap2FeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceFragmentDensityMap2FeaturesEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPhysicalDeviceFragmentDensityMap2PropertiesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceFragmentDensityMap2PropertiesEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
+#ifdef VK_QCOM_rotated_copy_commands
+void handlemap_VkCopyCommandTransformInfoQCOM(
+    VulkanHandleMapping* handlemap,
+    VkCopyCommandTransformInfoQCOM* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_image_robustness
+void handlemap_VkPhysicalDeviceImageRobustnessFeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceImageRobustnessFeaturesEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_4444_formats
+void handlemap_VkPhysicalDevice4444FormatsFeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDevice4444FormatsFeaturesEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_directfb_surface
+void handlemap_VkDirectFBSurfaceCreateInfoEXT(
+    VulkanHandleMapping* handlemap,
+    VkDirectFBSurfaceCreateInfoEXT* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+#endif
+#ifdef VK_GOOGLE_gfxstream
 void handlemap_VkImportColorBufferGOOGLE(
     VulkanHandleMapping* handlemap,
     VkImportColorBufferGOOGLE* toMap)
@@ -4731,6 +7875,18 @@
     }
 }
 
+void handlemap_VkImportBufferGOOGLE(
+    VulkanHandleMapping* handlemap,
+    VkImportBufferGOOGLE* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
 void handlemap_VkImportPhysicalAddressGOOGLE(
     VulkanHandleMapping* handlemap,
     VkImportPhysicalAddressGOOGLE* toMap)
@@ -4744,15 +7900,379 @@
 }
 
 #endif
-#ifdef VK_GOOGLE_sized_descriptor_update_template
+#ifdef VK_KHR_acceleration_structure
+void handlemap_VkDeviceOrHostAddressKHR(
+    VulkanHandleMapping* handlemap,
+    VkDeviceOrHostAddressKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+}
+
+void handlemap_VkDeviceOrHostAddressConstKHR(
+    VulkanHandleMapping* handlemap,
+    VkDeviceOrHostAddressConstKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+}
+
+void handlemap_VkAccelerationStructureBuildRangeInfoKHR(
+    VulkanHandleMapping* handlemap,
+    VkAccelerationStructureBuildRangeInfoKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+}
+
+void handlemap_VkAccelerationStructureGeometryTrianglesDataKHR(
+    VulkanHandleMapping* handlemap,
+    VkAccelerationStructureGeometryTrianglesDataKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    handlemap_VkDeviceOrHostAddressConstKHR(handlemap, (VkDeviceOrHostAddressConstKHR*)(&toMap->vertexData));
+    handlemap_VkDeviceOrHostAddressConstKHR(handlemap, (VkDeviceOrHostAddressConstKHR*)(&toMap->indexData));
+    handlemap_VkDeviceOrHostAddressConstKHR(handlemap, (VkDeviceOrHostAddressConstKHR*)(&toMap->transformData));
+}
+
+void handlemap_VkAccelerationStructureGeometryAabbsDataKHR(
+    VulkanHandleMapping* handlemap,
+    VkAccelerationStructureGeometryAabbsDataKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    handlemap_VkDeviceOrHostAddressConstKHR(handlemap, (VkDeviceOrHostAddressConstKHR*)(&toMap->data));
+}
+
+void handlemap_VkAccelerationStructureGeometryInstancesDataKHR(
+    VulkanHandleMapping* handlemap,
+    VkAccelerationStructureGeometryInstancesDataKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    handlemap_VkDeviceOrHostAddressConstKHR(handlemap, (VkDeviceOrHostAddressConstKHR*)(&toMap->data));
+}
+
+void handlemap_VkAccelerationStructureGeometryDataKHR(
+    VulkanHandleMapping* handlemap,
+    VkAccelerationStructureGeometryDataKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    handlemap_VkAccelerationStructureGeometryTrianglesDataKHR(handlemap, (VkAccelerationStructureGeometryTrianglesDataKHR*)(&toMap->triangles));
+    handlemap_VkAccelerationStructureGeometryAabbsDataKHR(handlemap, (VkAccelerationStructureGeometryAabbsDataKHR*)(&toMap->aabbs));
+    handlemap_VkAccelerationStructureGeometryInstancesDataKHR(handlemap, (VkAccelerationStructureGeometryInstancesDataKHR*)(&toMap->instances));
+}
+
+void handlemap_VkAccelerationStructureGeometryKHR(
+    VulkanHandleMapping* handlemap,
+    VkAccelerationStructureGeometryKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    handlemap_VkAccelerationStructureGeometryDataKHR(handlemap, (VkAccelerationStructureGeometryDataKHR*)(&toMap->geometry));
+}
+
+void handlemap_VkAccelerationStructureBuildGeometryInfoKHR(
+    VulkanHandleMapping* handlemap,
+    VkAccelerationStructureBuildGeometryInfoKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    handlemap->mapHandles_VkAccelerationStructureKHR((VkAccelerationStructureKHR*)&toMap->srcAccelerationStructure);
+    handlemap->mapHandles_VkAccelerationStructureKHR((VkAccelerationStructureKHR*)&toMap->dstAccelerationStructure);
+    if (toMap)
+    {
+        if (toMap->pGeometries)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toMap->geometryCount; ++i)
+            {
+                handlemap_VkAccelerationStructureGeometryKHR(handlemap, (VkAccelerationStructureGeometryKHR*)(toMap->pGeometries + i));
+            }
+        }
+    }
+    handlemap_VkDeviceOrHostAddressKHR(handlemap, (VkDeviceOrHostAddressKHR*)(&toMap->scratchData));
+}
+
+void handlemap_VkAccelerationStructureCreateInfoKHR(
+    VulkanHandleMapping* handlemap,
+    VkAccelerationStructureCreateInfoKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    handlemap->mapHandles_VkBuffer((VkBuffer*)&toMap->buffer);
+}
+
+void handlemap_VkWriteDescriptorSetAccelerationStructureKHR(
+    VulkanHandleMapping* handlemap,
+    VkWriteDescriptorSetAccelerationStructureKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    if (toMap->pAccelerationStructures)
+    {
+        handlemap->mapHandles_VkAccelerationStructureKHR((VkAccelerationStructureKHR*)toMap->pAccelerationStructures, toMap->accelerationStructureCount);
+    }
+}
+
+void handlemap_VkPhysicalDeviceAccelerationStructureFeaturesKHR(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceAccelerationStructureFeaturesKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPhysicalDeviceAccelerationStructurePropertiesKHR(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceAccelerationStructurePropertiesKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkAccelerationStructureDeviceAddressInfoKHR(
+    VulkanHandleMapping* handlemap,
+    VkAccelerationStructureDeviceAddressInfoKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    handlemap->mapHandles_VkAccelerationStructureKHR((VkAccelerationStructureKHR*)&toMap->accelerationStructure);
+}
+
+void handlemap_VkAccelerationStructureVersionInfoKHR(
+    VulkanHandleMapping* handlemap,
+    VkAccelerationStructureVersionInfoKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkCopyAccelerationStructureToMemoryInfoKHR(
+    VulkanHandleMapping* handlemap,
+    VkCopyAccelerationStructureToMemoryInfoKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    handlemap->mapHandles_VkAccelerationStructureKHR((VkAccelerationStructureKHR*)&toMap->src);
+    handlemap_VkDeviceOrHostAddressKHR(handlemap, (VkDeviceOrHostAddressKHR*)(&toMap->dst));
+}
+
+void handlemap_VkCopyMemoryToAccelerationStructureInfoKHR(
+    VulkanHandleMapping* handlemap,
+    VkCopyMemoryToAccelerationStructureInfoKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    handlemap_VkDeviceOrHostAddressConstKHR(handlemap, (VkDeviceOrHostAddressConstKHR*)(&toMap->src));
+    handlemap->mapHandles_VkAccelerationStructureKHR((VkAccelerationStructureKHR*)&toMap->dst);
+}
+
+void handlemap_VkCopyAccelerationStructureInfoKHR(
+    VulkanHandleMapping* handlemap,
+    VkCopyAccelerationStructureInfoKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    handlemap->mapHandles_VkAccelerationStructureKHR((VkAccelerationStructureKHR*)&toMap->src);
+    handlemap->mapHandles_VkAccelerationStructureKHR((VkAccelerationStructureKHR*)&toMap->dst);
+}
+
+void handlemap_VkAccelerationStructureBuildSizesInfoKHR(
+    VulkanHandleMapping* handlemap,
+    VkAccelerationStructureBuildSizesInfoKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
 #endif
-#ifdef VK_GOOGLE_async_command_buffers
+#ifdef VK_KHR_ray_tracing_pipeline
+void handlemap_VkRayTracingShaderGroupCreateInfoKHR(
+    VulkanHandleMapping* handlemap,
+    VkRayTracingShaderGroupCreateInfoKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkRayTracingPipelineInterfaceCreateInfoKHR(
+    VulkanHandleMapping* handlemap,
+    VkRayTracingPipelineInterfaceCreateInfoKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkRayTracingPipelineCreateInfoKHR(
+    VulkanHandleMapping* handlemap,
+    VkRayTracingPipelineCreateInfoKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+    if (toMap)
+    {
+        if (toMap->pStages)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toMap->stageCount; ++i)
+            {
+                handlemap_VkPipelineShaderStageCreateInfo(handlemap, (VkPipelineShaderStageCreateInfo*)(toMap->pStages + i));
+            }
+        }
+    }
+    if (toMap)
+    {
+        if (toMap->pGroups)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toMap->groupCount; ++i)
+            {
+                handlemap_VkRayTracingShaderGroupCreateInfoKHR(handlemap, (VkRayTracingShaderGroupCreateInfoKHR*)(toMap->pGroups + i));
+            }
+        }
+    }
+    if (toMap->pLibraryInfo)
+    {
+        handlemap_VkPipelineLibraryCreateInfoKHR(handlemap, (VkPipelineLibraryCreateInfoKHR*)(toMap->pLibraryInfo));
+    }
+    if (toMap->pLibraryInterface)
+    {
+        handlemap_VkRayTracingPipelineInterfaceCreateInfoKHR(handlemap, (VkRayTracingPipelineInterfaceCreateInfoKHR*)(toMap->pLibraryInterface));
+    }
+    if (toMap->pDynamicState)
+    {
+        handlemap_VkPipelineDynamicStateCreateInfo(handlemap, (VkPipelineDynamicStateCreateInfo*)(toMap->pDynamicState));
+    }
+    handlemap->mapHandles_VkPipelineLayout((VkPipelineLayout*)&toMap->layout);
+    handlemap->mapHandles_VkPipeline((VkPipeline*)&toMap->basePipelineHandle);
+}
+
+void handlemap_VkPhysicalDeviceRayTracingPipelineFeaturesKHR(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceRayTracingPipelineFeaturesKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkPhysicalDeviceRayTracingPipelinePropertiesKHR(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceRayTracingPipelinePropertiesKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
+void handlemap_VkStridedDeviceAddressRegionKHR(
+    VulkanHandleMapping* handlemap,
+    VkStridedDeviceAddressRegionKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+}
+
+void handlemap_VkTraceRaysIndirectCommandKHR(
+    VulkanHandleMapping* handlemap,
+    VkTraceRaysIndirectCommandKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+}
+
 #endif
-#ifdef VK_GOOGLE_create_resources_with_requirements
-#endif
-#ifdef VK_GOOGLE_address_space_info
-#endif
-#ifdef VK_GOOGLE_free_memory_sync
+#ifdef VK_KHR_ray_query
+void handlemap_VkPhysicalDeviceRayQueryFeaturesKHR(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceRayQueryFeaturesKHR* toMap)
+{
+    (void)handlemap;
+    (void)toMap;
+    if (toMap->pNext)
+    {
+        handlemap_extension_struct(handlemap, (void*)(toMap->pNext));
+    }
+}
+
 #endif
 void handlemap_extension_struct(
     VulkanHandleMapping* handlemap,
@@ -4866,9 +8386,9 @@
             handlemap_VkPhysicalDeviceMultiviewProperties(handlemap, reinterpret_cast<VkPhysicalDeviceMultiviewProperties*>(structExtension_out));
             break;
         }
-        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES:
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES:
         {
-            handlemap_VkPhysicalDeviceVariablePointerFeatures(handlemap, reinterpret_cast<VkPhysicalDeviceVariablePointerFeatures*>(structExtension_out));
+            handlemap_VkPhysicalDeviceVariablePointersFeatures(handlemap, reinterpret_cast<VkPhysicalDeviceVariablePointersFeatures*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES:
@@ -4956,9 +8476,201 @@
             handlemap_VkPhysicalDeviceMaintenance3Properties(handlemap, reinterpret_cast<VkPhysicalDeviceMaintenance3Properties*>(structExtension_out));
             break;
         }
-        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES:
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES:
         {
-            handlemap_VkPhysicalDeviceShaderDrawParameterFeatures(handlemap, reinterpret_cast<VkPhysicalDeviceShaderDrawParameterFeatures*>(structExtension_out));
+            handlemap_VkPhysicalDeviceShaderDrawParametersFeatures(handlemap, reinterpret_cast<VkPhysicalDeviceShaderDrawParametersFeatures*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_VERSION_1_2
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES:
+        {
+            handlemap_VkPhysicalDeviceVulkan11Features(handlemap, reinterpret_cast<VkPhysicalDeviceVulkan11Features*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_PROPERTIES:
+        {
+            handlemap_VkPhysicalDeviceVulkan11Properties(handlemap, reinterpret_cast<VkPhysicalDeviceVulkan11Properties*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES:
+        {
+            handlemap_VkPhysicalDeviceVulkan12Features(handlemap, reinterpret_cast<VkPhysicalDeviceVulkan12Features*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_PROPERTIES:
+        {
+            handlemap_VkPhysicalDeviceVulkan12Properties(handlemap, reinterpret_cast<VkPhysicalDeviceVulkan12Properties*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO:
+        {
+            handlemap_VkImageFormatListCreateInfo(handlemap, reinterpret_cast<VkImageFormatListCreateInfo*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES:
+        {
+            handlemap_VkPhysicalDevice8BitStorageFeatures(handlemap, reinterpret_cast<VkPhysicalDevice8BitStorageFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES:
+        {
+            handlemap_VkPhysicalDeviceDriverProperties(handlemap, reinterpret_cast<VkPhysicalDeviceDriverProperties*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES:
+        {
+            handlemap_VkPhysicalDeviceShaderAtomicInt64Features(handlemap, reinterpret_cast<VkPhysicalDeviceShaderAtomicInt64Features*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES:
+        {
+            handlemap_VkPhysicalDeviceShaderFloat16Int8Features(handlemap, reinterpret_cast<VkPhysicalDeviceShaderFloat16Int8Features*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES:
+        {
+            handlemap_VkPhysicalDeviceFloatControlsProperties(handlemap, reinterpret_cast<VkPhysicalDeviceFloatControlsProperties*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO:
+        {
+            handlemap_VkDescriptorSetLayoutBindingFlagsCreateInfo(handlemap, reinterpret_cast<VkDescriptorSetLayoutBindingFlagsCreateInfo*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES:
+        {
+            handlemap_VkPhysicalDeviceDescriptorIndexingFeatures(handlemap, reinterpret_cast<VkPhysicalDeviceDescriptorIndexingFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES:
+        {
+            handlemap_VkPhysicalDeviceDescriptorIndexingProperties(handlemap, reinterpret_cast<VkPhysicalDeviceDescriptorIndexingProperties*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO:
+        {
+            handlemap_VkDescriptorSetVariableDescriptorCountAllocateInfo(handlemap, reinterpret_cast<VkDescriptorSetVariableDescriptorCountAllocateInfo*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT:
+        {
+            handlemap_VkDescriptorSetVariableDescriptorCountLayoutSupport(handlemap, reinterpret_cast<VkDescriptorSetVariableDescriptorCountLayoutSupport*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE:
+        {
+            handlemap_VkSubpassDescriptionDepthStencilResolve(handlemap, reinterpret_cast<VkSubpassDescriptionDepthStencilResolve*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES:
+        {
+            handlemap_VkPhysicalDeviceDepthStencilResolveProperties(handlemap, reinterpret_cast<VkPhysicalDeviceDepthStencilResolveProperties*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES:
+        {
+            handlemap_VkPhysicalDeviceScalarBlockLayoutFeatures(handlemap, reinterpret_cast<VkPhysicalDeviceScalarBlockLayoutFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_IMAGE_STENCIL_USAGE_CREATE_INFO:
+        {
+            handlemap_VkImageStencilUsageCreateInfo(handlemap, reinterpret_cast<VkImageStencilUsageCreateInfo*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO:
+        {
+            handlemap_VkSamplerReductionModeCreateInfo(handlemap, reinterpret_cast<VkSamplerReductionModeCreateInfo*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES:
+        {
+            handlemap_VkPhysicalDeviceSamplerFilterMinmaxProperties(handlemap, reinterpret_cast<VkPhysicalDeviceSamplerFilterMinmaxProperties*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES:
+        {
+            handlemap_VkPhysicalDeviceVulkanMemoryModelFeatures(handlemap, reinterpret_cast<VkPhysicalDeviceVulkanMemoryModelFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES:
+        {
+            handlemap_VkPhysicalDeviceImagelessFramebufferFeatures(handlemap, reinterpret_cast<VkPhysicalDeviceImagelessFramebufferFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO:
+        {
+            handlemap_VkFramebufferAttachmentsCreateInfo(handlemap, reinterpret_cast<VkFramebufferAttachmentsCreateInfo*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_RENDER_PASS_ATTACHMENT_BEGIN_INFO:
+        {
+            handlemap_VkRenderPassAttachmentBeginInfo(handlemap, reinterpret_cast<VkRenderPassAttachmentBeginInfo*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES:
+        {
+            handlemap_VkPhysicalDeviceUniformBufferStandardLayoutFeatures(handlemap, reinterpret_cast<VkPhysicalDeviceUniformBufferStandardLayoutFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES:
+        {
+            handlemap_VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures(handlemap, reinterpret_cast<VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES:
+        {
+            handlemap_VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures(handlemap, reinterpret_cast<VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_STENCIL_LAYOUT:
+        {
+            handlemap_VkAttachmentReferenceStencilLayout(handlemap, reinterpret_cast<VkAttachmentReferenceStencilLayout*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_STENCIL_LAYOUT:
+        {
+            handlemap_VkAttachmentDescriptionStencilLayout(handlemap, reinterpret_cast<VkAttachmentDescriptionStencilLayout*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES:
+        {
+            handlemap_VkPhysicalDeviceHostQueryResetFeatures(handlemap, reinterpret_cast<VkPhysicalDeviceHostQueryResetFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES:
+        {
+            handlemap_VkPhysicalDeviceTimelineSemaphoreFeatures(handlemap, reinterpret_cast<VkPhysicalDeviceTimelineSemaphoreFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES:
+        {
+            handlemap_VkPhysicalDeviceTimelineSemaphoreProperties(handlemap, reinterpret_cast<VkPhysicalDeviceTimelineSemaphoreProperties*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO:
+        {
+            handlemap_VkSemaphoreTypeCreateInfo(handlemap, reinterpret_cast<VkSemaphoreTypeCreateInfo*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO:
+        {
+            handlemap_VkTimelineSemaphoreSubmitInfo(handlemap, reinterpret_cast<VkTimelineSemaphoreSubmitInfo*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES:
+        {
+            handlemap_VkPhysicalDeviceBufferDeviceAddressFeatures(handlemap, reinterpret_cast<VkPhysicalDeviceBufferDeviceAddressFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO:
+        {
+            handlemap_VkBufferOpaqueCaptureAddressCreateInfo(handlemap, reinterpret_cast<VkBufferOpaqueCaptureAddressCreateInfo*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO:
+        {
+            handlemap_VkMemoryOpaqueCaptureAddressAllocateInfo(handlemap, reinterpret_cast<VkMemoryOpaqueCaptureAddressAllocateInfo*>(structExtension_out));
             break;
         }
 #endif
@@ -5057,17 +8769,87 @@
             break;
         }
 #endif
-#ifdef VK_KHR_image_format_list
-        case VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO_KHR:
+#ifdef VK_KHR_performance_query
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_FEATURES_KHR:
         {
-            handlemap_VkImageFormatListCreateInfoKHR(handlemap, reinterpret_cast<VkImageFormatListCreateInfoKHR*>(structExtension_out));
+            handlemap_VkPhysicalDevicePerformanceQueryFeaturesKHR(handlemap, reinterpret_cast<VkPhysicalDevicePerformanceQueryFeaturesKHR*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_PROPERTIES_KHR:
+        {
+            handlemap_VkPhysicalDevicePerformanceQueryPropertiesKHR(handlemap, reinterpret_cast<VkPhysicalDevicePerformanceQueryPropertiesKHR*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_CREATE_INFO_KHR:
+        {
+            handlemap_VkQueryPoolPerformanceCreateInfoKHR(handlemap, reinterpret_cast<VkQueryPoolPerformanceCreateInfoKHR*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PERFORMANCE_QUERY_SUBMIT_INFO_KHR:
+        {
+            handlemap_VkPerformanceQuerySubmitInfoKHR(handlemap, reinterpret_cast<VkPerformanceQuerySubmitInfoKHR*>(structExtension_out));
             break;
         }
 #endif
-#ifdef VK_KHR_8bit_storage
-        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR:
+#ifdef VK_KHR_portability_subset
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PORTABILITY_SUBSET_FEATURES_KHR:
         {
-            handlemap_VkPhysicalDevice8BitStorageFeaturesKHR(handlemap, reinterpret_cast<VkPhysicalDevice8BitStorageFeaturesKHR*>(structExtension_out));
+            handlemap_VkPhysicalDevicePortabilitySubsetFeaturesKHR(handlemap, reinterpret_cast<VkPhysicalDevicePortabilitySubsetFeaturesKHR*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PORTABILITY_SUBSET_PROPERTIES_KHR:
+        {
+            handlemap_VkPhysicalDevicePortabilitySubsetPropertiesKHR(handlemap, reinterpret_cast<VkPhysicalDevicePortabilitySubsetPropertiesKHR*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_KHR_shader_clock
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CLOCK_FEATURES_KHR:
+        {
+            handlemap_VkPhysicalDeviceShaderClockFeaturesKHR(handlemap, reinterpret_cast<VkPhysicalDeviceShaderClockFeaturesKHR*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_KHR_shader_terminate_invocation
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES_KHR:
+        {
+            handlemap_VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR(handlemap, reinterpret_cast<VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_KHR_fragment_shading_rate
+        case VK_STRUCTURE_TYPE_FRAGMENT_SHADING_RATE_ATTACHMENT_INFO_KHR:
+        {
+            handlemap_VkFragmentShadingRateAttachmentInfoKHR(handlemap, reinterpret_cast<VkFragmentShadingRateAttachmentInfoKHR*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_STATE_CREATE_INFO_KHR:
+        {
+            handlemap_VkPipelineFragmentShadingRateStateCreateInfoKHR(handlemap, reinterpret_cast<VkPipelineFragmentShadingRateStateCreateInfoKHR*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_FEATURES_KHR:
+        {
+            handlemap_VkPhysicalDeviceFragmentShadingRateFeaturesKHR(handlemap, reinterpret_cast<VkPhysicalDeviceFragmentShadingRateFeaturesKHR*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_PROPERTIES_KHR:
+        {
+            handlemap_VkPhysicalDeviceFragmentShadingRatePropertiesKHR(handlemap, reinterpret_cast<VkPhysicalDeviceFragmentShadingRatePropertiesKHR*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_KHR_surface_protected_capabilities
+        case VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR:
+        {
+            handlemap_VkSurfaceProtectedCapabilitiesKHR(handlemap, reinterpret_cast<VkSurfaceProtectedCapabilitiesKHR*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_KHR_pipeline_executable_properties
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_EXECUTABLE_PROPERTIES_FEATURES_KHR:
+        {
+            handlemap_VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR(handlemap, reinterpret_cast<VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR*>(structExtension_out));
             break;
         }
 #endif
@@ -5109,6 +8891,23 @@
             break;
         }
 #endif
+#ifdef VK_EXT_transform_feedback
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT:
+        {
+            handlemap_VkPhysicalDeviceTransformFeedbackFeaturesEXT(handlemap, reinterpret_cast<VkPhysicalDeviceTransformFeedbackFeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT:
+        {
+            handlemap_VkPhysicalDeviceTransformFeedbackPropertiesEXT(handlemap, reinterpret_cast<VkPhysicalDeviceTransformFeedbackPropertiesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_STREAM_CREATE_INFO_EXT:
+        {
+            handlemap_VkPipelineRasterizationStateStreamCreateInfoEXT(handlemap, reinterpret_cast<VkPipelineRasterizationStateStreamCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
 #ifdef VK_AMD_texture_gather_bias_lod
         case VK_STRUCTURE_TYPE_TEXTURE_LOD_GATHER_FORMAT_PROPERTIES_AMD:
         {
@@ -5116,6 +8915,13 @@
             break;
         }
 #endif
+#ifdef VK_NV_corner_sampled_image
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CORNER_SAMPLED_IMAGE_FEATURES_NV:
+        {
+            handlemap_VkPhysicalDeviceCornerSampledImageFeaturesNV(handlemap, reinterpret_cast<VkPhysicalDeviceCornerSampledImageFeaturesNV*>(structExtension_out));
+            break;
+        }
+#endif
 #ifdef VK_NV_external_memory
         case VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_NV:
         {
@@ -5154,6 +8960,25 @@
             break;
         }
 #endif
+#ifdef VK_EXT_texture_compression_astc_hdr
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES_EXT:
+        {
+            handlemap_VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT(handlemap, reinterpret_cast<VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_astc_decode_mode
+        case VK_STRUCTURE_TYPE_IMAGE_VIEW_ASTC_DECODE_MODE_EXT:
+        {
+            handlemap_VkImageViewASTCDecodeModeEXT(handlemap, reinterpret_cast<VkImageViewASTCDecodeModeEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ASTC_DECODE_FEATURES_EXT:
+        {
+            handlemap_VkPhysicalDeviceASTCDecodeFeaturesEXT(handlemap, reinterpret_cast<VkPhysicalDeviceASTCDecodeFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
 #ifdef VK_EXT_conditional_rendering
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT:
         {
@@ -5225,6 +9050,18 @@
             break;
         }
 #endif
+#ifdef VK_EXT_depth_clip_enable
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_ENABLE_FEATURES_EXT:
+        {
+            handlemap_VkPhysicalDeviceDepthClipEnableFeaturesEXT(handlemap, reinterpret_cast<VkPhysicalDeviceDepthClipEnableFeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT:
+        {
+            handlemap_VkPipelineRasterizationDepthClipStateCreateInfoEXT(handlemap, reinterpret_cast<VkPipelineRasterizationDepthClipStateCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
 #ifdef VK_EXT_debug_utils
         case VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT:
         {
@@ -5254,15 +9091,25 @@
             break;
         }
 #endif
-#ifdef VK_EXT_sampler_filter_minmax
-        case VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO_EXT:
+#ifdef VK_EXT_inline_uniform_block
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES_EXT:
         {
-            handlemap_VkSamplerReductionModeCreateInfoEXT(handlemap, reinterpret_cast<VkSamplerReductionModeCreateInfoEXT*>(structExtension_out));
+            handlemap_VkPhysicalDeviceInlineUniformBlockFeaturesEXT(handlemap, reinterpret_cast<VkPhysicalDeviceInlineUniformBlockFeaturesEXT*>(structExtension_out));
             break;
         }
-        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES_EXT:
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES_EXT:
         {
-            handlemap_VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT(handlemap, reinterpret_cast<VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT*>(structExtension_out));
+            handlemap_VkPhysicalDeviceInlineUniformBlockPropertiesEXT(handlemap, reinterpret_cast<VkPhysicalDeviceInlineUniformBlockPropertiesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT:
+        {
+            handlemap_VkWriteDescriptorSetInlineUniformBlockEXT(handlemap, reinterpret_cast<VkWriteDescriptorSetInlineUniformBlockEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO_EXT:
+        {
+            handlemap_VkDescriptorPoolInlineUniformBlockCreateInfoEXT(handlemap, reinterpret_cast<VkDescriptorPoolInlineUniformBlockCreateInfoEXT*>(structExtension_out));
             break;
         }
 #endif
@@ -5319,6 +9166,40 @@
             break;
         }
 #endif
+#ifdef VK_NV_shader_sm_builtins
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_PROPERTIES_NV:
+        {
+            handlemap_VkPhysicalDeviceShaderSMBuiltinsPropertiesNV(handlemap, reinterpret_cast<VkPhysicalDeviceShaderSMBuiltinsPropertiesNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_FEATURES_NV:
+        {
+            handlemap_VkPhysicalDeviceShaderSMBuiltinsFeaturesNV(handlemap, reinterpret_cast<VkPhysicalDeviceShaderSMBuiltinsFeaturesNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_image_drm_format_modifier
+        case VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT:
+        {
+            handlemap_VkDrmFormatModifierPropertiesListEXT(handlemap, reinterpret_cast<VkDrmFormatModifierPropertiesListEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT:
+        {
+            handlemap_VkPhysicalDeviceImageDrmFormatModifierInfoEXT(handlemap, reinterpret_cast<VkPhysicalDeviceImageDrmFormatModifierInfoEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT:
+        {
+            handlemap_VkImageDrmFormatModifierListCreateInfoEXT(handlemap, reinterpret_cast<VkImageDrmFormatModifierListCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT:
+        {
+            handlemap_VkImageDrmFormatModifierExplicitCreateInfoEXT(handlemap, reinterpret_cast<VkImageDrmFormatModifierExplicitCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
 #ifdef VK_EXT_validation_cache
         case VK_STRUCTURE_TYPE_SHADER_MODULE_VALIDATION_CACHE_CREATE_INFO_EXT:
         {
@@ -5326,30 +9207,61 @@
             break;
         }
 #endif
-#ifdef VK_EXT_descriptor_indexing
-        case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT:
+#ifdef VK_NV_shading_rate_image
+        case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SHADING_RATE_IMAGE_STATE_CREATE_INFO_NV:
         {
-            handlemap_VkDescriptorSetLayoutBindingFlagsCreateInfoEXT(handlemap, reinterpret_cast<VkDescriptorSetLayoutBindingFlagsCreateInfoEXT*>(structExtension_out));
+            handlemap_VkPipelineViewportShadingRateImageStateCreateInfoNV(handlemap, reinterpret_cast<VkPipelineViewportShadingRateImageStateCreateInfoNV*>(structExtension_out));
             break;
         }
-        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT:
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_FEATURES_NV:
         {
-            handlemap_VkPhysicalDeviceDescriptorIndexingFeaturesEXT(handlemap, reinterpret_cast<VkPhysicalDeviceDescriptorIndexingFeaturesEXT*>(structExtension_out));
+            handlemap_VkPhysicalDeviceShadingRateImageFeaturesNV(handlemap, reinterpret_cast<VkPhysicalDeviceShadingRateImageFeaturesNV*>(structExtension_out));
             break;
         }
-        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES_EXT:
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_PROPERTIES_NV:
         {
-            handlemap_VkPhysicalDeviceDescriptorIndexingPropertiesEXT(handlemap, reinterpret_cast<VkPhysicalDeviceDescriptorIndexingPropertiesEXT*>(structExtension_out));
+            handlemap_VkPhysicalDeviceShadingRateImagePropertiesNV(handlemap, reinterpret_cast<VkPhysicalDeviceShadingRateImagePropertiesNV*>(structExtension_out));
             break;
         }
-        case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO_EXT:
+        case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_COARSE_SAMPLE_ORDER_STATE_CREATE_INFO_NV:
         {
-            handlemap_VkDescriptorSetVariableDescriptorCountAllocateInfoEXT(handlemap, reinterpret_cast<VkDescriptorSetVariableDescriptorCountAllocateInfoEXT*>(structExtension_out));
+            handlemap_VkPipelineViewportCoarseSampleOrderStateCreateInfoNV(handlemap, reinterpret_cast<VkPipelineViewportCoarseSampleOrderStateCreateInfoNV*>(structExtension_out));
             break;
         }
-        case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT_EXT:
+#endif
+#ifdef VK_NV_ray_tracing
+        case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_NV:
         {
-            handlemap_VkDescriptorSetVariableDescriptorCountLayoutSupportEXT(handlemap, reinterpret_cast<VkDescriptorSetVariableDescriptorCountLayoutSupportEXT*>(structExtension_out));
+            handlemap_VkWriteDescriptorSetAccelerationStructureNV(handlemap, reinterpret_cast<VkWriteDescriptorSetAccelerationStructureNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_NV:
+        {
+            handlemap_VkPhysicalDeviceRayTracingPropertiesNV(handlemap, reinterpret_cast<VkPhysicalDeviceRayTracingPropertiesNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_representative_fragment_test
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_REPRESENTATIVE_FRAGMENT_TEST_FEATURES_NV:
+        {
+            handlemap_VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV(handlemap, reinterpret_cast<VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_REPRESENTATIVE_FRAGMENT_TEST_STATE_CREATE_INFO_NV:
+        {
+            handlemap_VkPipelineRepresentativeFragmentTestStateCreateInfoNV(handlemap, reinterpret_cast<VkPipelineRepresentativeFragmentTestStateCreateInfoNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_filter_cubic
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_VIEW_IMAGE_FORMAT_INFO_EXT:
+        {
+            handlemap_VkPhysicalDeviceImageViewImageFormatInfoEXT(handlemap, reinterpret_cast<VkPhysicalDeviceImageViewImageFormatInfoEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_FILTER_CUBIC_IMAGE_VIEW_IMAGE_FORMAT_PROPERTIES_EXT:
+        {
+            handlemap_VkFilterCubicImageViewImageFormatPropertiesEXT(handlemap, reinterpret_cast<VkFilterCubicImageViewImageFormatPropertiesEXT*>(structExtension_out));
             break;
         }
 #endif
@@ -5372,6 +9284,13 @@
             break;
         }
 #endif
+#ifdef VK_AMD_pipeline_compiler_control
+        case VK_STRUCTURE_TYPE_PIPELINE_COMPILER_CONTROL_CREATE_INFO_AMD:
+        {
+            handlemap_VkPipelineCompilerControlCreateInfoAMD(handlemap, reinterpret_cast<VkPipelineCompilerControlCreateInfoAMD*>(structExtension_out));
+            break;
+        }
+#endif
 #ifdef VK_AMD_shader_core_properties
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_AMD:
         {
@@ -5379,6 +9298,13 @@
             break;
         }
 #endif
+#ifdef VK_AMD_memory_overallocation_behavior
+        case VK_STRUCTURE_TYPE_DEVICE_MEMORY_OVERALLOCATION_CREATE_INFO_AMD:
+        {
+            handlemap_VkDeviceMemoryOverallocationCreateInfoAMD(handlemap, reinterpret_cast<VkDeviceMemoryOverallocationCreateInfoAMD*>(structExtension_out));
+            break;
+        }
+#endif
 #ifdef VK_EXT_vertex_attribute_divisor
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT:
         {
@@ -5390,6 +9316,70 @@
             handlemap_VkPipelineVertexInputDivisorStateCreateInfoEXT(handlemap, reinterpret_cast<VkPipelineVertexInputDivisorStateCreateInfoEXT*>(structExtension_out));
             break;
         }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT:
+        {
+            handlemap_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT(handlemap, reinterpret_cast<VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_GGP_frame_token
+        case VK_STRUCTURE_TYPE_PRESENT_FRAME_TOKEN_GGP:
+        {
+            handlemap_VkPresentFrameTokenGGP(handlemap, reinterpret_cast<VkPresentFrameTokenGGP*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_pipeline_creation_feedback
+        case VK_STRUCTURE_TYPE_PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT:
+        {
+            handlemap_VkPipelineCreationFeedbackCreateInfoEXT(handlemap, reinterpret_cast<VkPipelineCreationFeedbackCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_compute_shader_derivatives
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_FEATURES_NV:
+        {
+            handlemap_VkPhysicalDeviceComputeShaderDerivativesFeaturesNV(handlemap, reinterpret_cast<VkPhysicalDeviceComputeShaderDerivativesFeaturesNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_mesh_shader
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_FEATURES_NV:
+        {
+            handlemap_VkPhysicalDeviceMeshShaderFeaturesNV(handlemap, reinterpret_cast<VkPhysicalDeviceMeshShaderFeaturesNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_PROPERTIES_NV:
+        {
+            handlemap_VkPhysicalDeviceMeshShaderPropertiesNV(handlemap, reinterpret_cast<VkPhysicalDeviceMeshShaderPropertiesNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_fragment_shader_barycentric
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_NV:
+        {
+            handlemap_VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV(handlemap, reinterpret_cast<VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_shader_image_footprint
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_FOOTPRINT_FEATURES_NV:
+        {
+            handlemap_VkPhysicalDeviceShaderImageFootprintFeaturesNV(handlemap, reinterpret_cast<VkPhysicalDeviceShaderImageFootprintFeaturesNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_scissor_exclusive
+        case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_EXCLUSIVE_SCISSOR_STATE_CREATE_INFO_NV:
+        {
+            handlemap_VkPipelineViewportExclusiveScissorStateCreateInfoNV(handlemap, reinterpret_cast<VkPipelineViewportExclusiveScissorStateCreateInfoNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXCLUSIVE_SCISSOR_FEATURES_NV:
+        {
+            handlemap_VkPhysicalDeviceExclusiveScissorFeaturesNV(handlemap, reinterpret_cast<VkPhysicalDeviceExclusiveScissorFeaturesNV*>(structExtension_out));
+            break;
+        }
 #endif
 #ifdef VK_NV_device_diagnostic_checkpoints
         case VK_STRUCTURE_TYPE_QUEUE_FAMILY_CHECKPOINT_PROPERTIES_NV:
@@ -5398,18 +9388,455 @@
             break;
         }
 #endif
-#ifdef VK_GOOGLE_color_buffer
+#ifdef VK_INTEL_shader_integer_functions2
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_FUNCTIONS_2_FEATURES_INTEL:
+        {
+            handlemap_VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL(handlemap, reinterpret_cast<VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_INTEL_performance_query
+        case VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_QUERY_CREATE_INFO_INTEL:
+        {
+            handlemap_VkQueryPoolPerformanceQueryCreateInfoINTEL(handlemap, reinterpret_cast<VkQueryPoolPerformanceQueryCreateInfoINTEL*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_pci_bus_info
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT:
+        {
+            handlemap_VkPhysicalDevicePCIBusInfoPropertiesEXT(handlemap, reinterpret_cast<VkPhysicalDevicePCIBusInfoPropertiesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_AMD_display_native_hdr
+        case VK_STRUCTURE_TYPE_DISPLAY_NATIVE_HDR_SURFACE_CAPABILITIES_AMD:
+        {
+            handlemap_VkDisplayNativeHdrSurfaceCapabilitiesAMD(handlemap, reinterpret_cast<VkDisplayNativeHdrSurfaceCapabilitiesAMD*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_SWAPCHAIN_DISPLAY_NATIVE_HDR_CREATE_INFO_AMD:
+        {
+            handlemap_VkSwapchainDisplayNativeHdrCreateInfoAMD(handlemap, reinterpret_cast<VkSwapchainDisplayNativeHdrCreateInfoAMD*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_fragment_density_map
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_FEATURES_EXT:
+        {
+            handlemap_VkPhysicalDeviceFragmentDensityMapFeaturesEXT(handlemap, reinterpret_cast<VkPhysicalDeviceFragmentDensityMapFeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_PROPERTIES_EXT:
+        {
+            handlemap_VkPhysicalDeviceFragmentDensityMapPropertiesEXT(handlemap, reinterpret_cast<VkPhysicalDeviceFragmentDensityMapPropertiesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_RENDER_PASS_FRAGMENT_DENSITY_MAP_CREATE_INFO_EXT:
+        {
+            handlemap_VkRenderPassFragmentDensityMapCreateInfoEXT(handlemap, reinterpret_cast<VkRenderPassFragmentDensityMapCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_subgroup_size_control
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT:
+        {
+            handlemap_VkPhysicalDeviceSubgroupSizeControlFeaturesEXT(handlemap, reinterpret_cast<VkPhysicalDeviceSubgroupSizeControlFeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT:
+        {
+            handlemap_VkPhysicalDeviceSubgroupSizeControlPropertiesEXT(handlemap, reinterpret_cast<VkPhysicalDeviceSubgroupSizeControlPropertiesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT:
+        {
+            handlemap_VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT(handlemap, reinterpret_cast<VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_AMD_shader_core_properties2
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_2_AMD:
+        {
+            handlemap_VkPhysicalDeviceShaderCoreProperties2AMD(handlemap, reinterpret_cast<VkPhysicalDeviceShaderCoreProperties2AMD*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_AMD_device_coherent_memory
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COHERENT_MEMORY_FEATURES_AMD:
+        {
+            handlemap_VkPhysicalDeviceCoherentMemoryFeaturesAMD(handlemap, reinterpret_cast<VkPhysicalDeviceCoherentMemoryFeaturesAMD*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_shader_image_atomic_int64
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_ATOMIC_INT64_FEATURES_EXT:
+        {
+            handlemap_VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT(handlemap, reinterpret_cast<VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_memory_budget
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT:
+        {
+            handlemap_VkPhysicalDeviceMemoryBudgetPropertiesEXT(handlemap, reinterpret_cast<VkPhysicalDeviceMemoryBudgetPropertiesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_memory_priority
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PRIORITY_FEATURES_EXT:
+        {
+            handlemap_VkPhysicalDeviceMemoryPriorityFeaturesEXT(handlemap, reinterpret_cast<VkPhysicalDeviceMemoryPriorityFeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT:
+        {
+            handlemap_VkMemoryPriorityAllocateInfoEXT(handlemap, reinterpret_cast<VkMemoryPriorityAllocateInfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_dedicated_allocation_image_aliasing
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEDICATED_ALLOCATION_IMAGE_ALIASING_FEATURES_NV:
+        {
+            handlemap_VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV(handlemap, reinterpret_cast<VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_buffer_device_address
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_EXT:
+        {
+            handlemap_VkPhysicalDeviceBufferDeviceAddressFeaturesEXT(handlemap, reinterpret_cast<VkPhysicalDeviceBufferDeviceAddressFeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_CREATE_INFO_EXT:
+        {
+            handlemap_VkBufferDeviceAddressCreateInfoEXT(handlemap, reinterpret_cast<VkBufferDeviceAddressCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_validation_features
+        case VK_STRUCTURE_TYPE_VALIDATION_FEATURES_EXT:
+        {
+            handlemap_VkValidationFeaturesEXT(handlemap, reinterpret_cast<VkValidationFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_cooperative_matrix
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_FEATURES_NV:
+        {
+            handlemap_VkPhysicalDeviceCooperativeMatrixFeaturesNV(handlemap, reinterpret_cast<VkPhysicalDeviceCooperativeMatrixFeaturesNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_PROPERTIES_NV:
+        {
+            handlemap_VkPhysicalDeviceCooperativeMatrixPropertiesNV(handlemap, reinterpret_cast<VkPhysicalDeviceCooperativeMatrixPropertiesNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_coverage_reduction_mode
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COVERAGE_REDUCTION_MODE_FEATURES_NV:
+        {
+            handlemap_VkPhysicalDeviceCoverageReductionModeFeaturesNV(handlemap, reinterpret_cast<VkPhysicalDeviceCoverageReductionModeFeaturesNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_REDUCTION_STATE_CREATE_INFO_NV:
+        {
+            handlemap_VkPipelineCoverageReductionStateCreateInfoNV(handlemap, reinterpret_cast<VkPipelineCoverageReductionStateCreateInfoNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_fragment_shader_interlock
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_INTERLOCK_FEATURES_EXT:
+        {
+            handlemap_VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT(handlemap, reinterpret_cast<VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_ycbcr_image_arrays
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_IMAGE_ARRAYS_FEATURES_EXT:
+        {
+            handlemap_VkPhysicalDeviceYcbcrImageArraysFeaturesEXT(handlemap, reinterpret_cast<VkPhysicalDeviceYcbcrImageArraysFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_full_screen_exclusive
+        case VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_INFO_EXT:
+        {
+            handlemap_VkSurfaceFullScreenExclusiveInfoEXT(handlemap, reinterpret_cast<VkSurfaceFullScreenExclusiveInfoEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_FULL_SCREEN_EXCLUSIVE_EXT:
+        {
+            handlemap_VkSurfaceCapabilitiesFullScreenExclusiveEXT(handlemap, reinterpret_cast<VkSurfaceCapabilitiesFullScreenExclusiveEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_WIN32_INFO_EXT:
+        {
+            handlemap_VkSurfaceFullScreenExclusiveWin32InfoEXT(handlemap, reinterpret_cast<VkSurfaceFullScreenExclusiveWin32InfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_line_rasterization
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT:
+        {
+            handlemap_VkPhysicalDeviceLineRasterizationFeaturesEXT(handlemap, reinterpret_cast<VkPhysicalDeviceLineRasterizationFeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_EXT:
+        {
+            handlemap_VkPhysicalDeviceLineRasterizationPropertiesEXT(handlemap, reinterpret_cast<VkPhysicalDeviceLineRasterizationPropertiesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT:
+        {
+            handlemap_VkPipelineRasterizationLineStateCreateInfoEXT(handlemap, reinterpret_cast<VkPipelineRasterizationLineStateCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_shader_atomic_float
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_FLOAT_FEATURES_EXT:
+        {
+            handlemap_VkPhysicalDeviceShaderAtomicFloatFeaturesEXT(handlemap, reinterpret_cast<VkPhysicalDeviceShaderAtomicFloatFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_index_type_uint8
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT:
+        {
+            handlemap_VkPhysicalDeviceIndexTypeUint8FeaturesEXT(handlemap, reinterpret_cast<VkPhysicalDeviceIndexTypeUint8FeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_extended_dynamic_state
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_FEATURES_EXT:
+        {
+            handlemap_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT(handlemap, reinterpret_cast<VkPhysicalDeviceExtendedDynamicStateFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_shader_demote_to_helper_invocation
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES_EXT:
+        {
+            handlemap_VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT(handlemap, reinterpret_cast<VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_device_generated_commands
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_PROPERTIES_NV:
+        {
+            handlemap_VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV(handlemap, reinterpret_cast<VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_FEATURES_NV:
+        {
+            handlemap_VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV(handlemap, reinterpret_cast<VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_SHADER_GROUPS_CREATE_INFO_NV:
+        {
+            handlemap_VkGraphicsPipelineShaderGroupsCreateInfoNV(handlemap, reinterpret_cast<VkGraphicsPipelineShaderGroupsCreateInfoNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_texel_buffer_alignment
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_FEATURES_EXT:
+        {
+            handlemap_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT(handlemap, reinterpret_cast<VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES_EXT:
+        {
+            handlemap_VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT(handlemap, reinterpret_cast<VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_QCOM_render_pass_transform
+        case VK_STRUCTURE_TYPE_RENDER_PASS_TRANSFORM_BEGIN_INFO_QCOM:
+        {
+            handlemap_VkRenderPassTransformBeginInfoQCOM(handlemap, reinterpret_cast<VkRenderPassTransformBeginInfoQCOM*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_RENDER_PASS_TRANSFORM_INFO_QCOM:
+        {
+            handlemap_VkCommandBufferInheritanceRenderPassTransformInfoQCOM(handlemap, reinterpret_cast<VkCommandBufferInheritanceRenderPassTransformInfoQCOM*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_device_memory_report
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_MEMORY_REPORT_FEATURES_EXT:
+        {
+            handlemap_VkPhysicalDeviceDeviceMemoryReportFeaturesEXT(handlemap, reinterpret_cast<VkPhysicalDeviceDeviceMemoryReportFeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DEVICE_DEVICE_MEMORY_REPORT_CREATE_INFO_EXT:
+        {
+            handlemap_VkDeviceDeviceMemoryReportCreateInfoEXT(handlemap, reinterpret_cast<VkDeviceDeviceMemoryReportCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_robustness2
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT:
+        {
+            handlemap_VkPhysicalDeviceRobustness2FeaturesEXT(handlemap, reinterpret_cast<VkPhysicalDeviceRobustness2FeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_PROPERTIES_EXT:
+        {
+            handlemap_VkPhysicalDeviceRobustness2PropertiesEXT(handlemap, reinterpret_cast<VkPhysicalDeviceRobustness2PropertiesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_custom_border_color
+        case VK_STRUCTURE_TYPE_SAMPLER_CUSTOM_BORDER_COLOR_CREATE_INFO_EXT:
+        {
+            handlemap_VkSamplerCustomBorderColorCreateInfoEXT(handlemap, reinterpret_cast<VkSamplerCustomBorderColorCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_PROPERTIES_EXT:
+        {
+            handlemap_VkPhysicalDeviceCustomBorderColorPropertiesEXT(handlemap, reinterpret_cast<VkPhysicalDeviceCustomBorderColorPropertiesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_FEATURES_EXT:
+        {
+            handlemap_VkPhysicalDeviceCustomBorderColorFeaturesEXT(handlemap, reinterpret_cast<VkPhysicalDeviceCustomBorderColorFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_private_data
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES_EXT:
+        {
+            handlemap_VkPhysicalDevicePrivateDataFeaturesEXT(handlemap, reinterpret_cast<VkPhysicalDevicePrivateDataFeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DEVICE_PRIVATE_DATA_CREATE_INFO_EXT:
+        {
+            handlemap_VkDevicePrivateDataCreateInfoEXT(handlemap, reinterpret_cast<VkDevicePrivateDataCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_pipeline_creation_cache_control
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES_EXT:
+        {
+            handlemap_VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT(handlemap, reinterpret_cast<VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_device_diagnostics_config
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DIAGNOSTICS_CONFIG_FEATURES_NV:
+        {
+            handlemap_VkPhysicalDeviceDiagnosticsConfigFeaturesNV(handlemap, reinterpret_cast<VkPhysicalDeviceDiagnosticsConfigFeaturesNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DEVICE_DIAGNOSTICS_CONFIG_CREATE_INFO_NV:
+        {
+            handlemap_VkDeviceDiagnosticsConfigCreateInfoNV(handlemap, reinterpret_cast<VkDeviceDiagnosticsConfigCreateInfoNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_fragment_shading_rate_enums
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_FEATURES_NV:
+        {
+            handlemap_VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV(handlemap, reinterpret_cast<VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_PROPERTIES_NV:
+        {
+            handlemap_VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV(handlemap, reinterpret_cast<VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_ENUM_STATE_CREATE_INFO_NV:
+        {
+            handlemap_VkPipelineFragmentShadingRateEnumStateCreateInfoNV(handlemap, reinterpret_cast<VkPipelineFragmentShadingRateEnumStateCreateInfoNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_fragment_density_map2
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_2_FEATURES_EXT:
+        {
+            handlemap_VkPhysicalDeviceFragmentDensityMap2FeaturesEXT(handlemap, reinterpret_cast<VkPhysicalDeviceFragmentDensityMap2FeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_2_PROPERTIES_EXT:
+        {
+            handlemap_VkPhysicalDeviceFragmentDensityMap2PropertiesEXT(handlemap, reinterpret_cast<VkPhysicalDeviceFragmentDensityMap2PropertiesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_QCOM_rotated_copy_commands
+        case VK_STRUCTURE_TYPE_COPY_COMMAND_TRANSFORM_INFO_QCOM:
+        {
+            handlemap_VkCopyCommandTransformInfoQCOM(handlemap, reinterpret_cast<VkCopyCommandTransformInfoQCOM*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_image_robustness
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES_EXT:
+        {
+            handlemap_VkPhysicalDeviceImageRobustnessFeaturesEXT(handlemap, reinterpret_cast<VkPhysicalDeviceImageRobustnessFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_4444_formats
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT:
+        {
+            handlemap_VkPhysicalDevice4444FormatsFeaturesEXT(handlemap, reinterpret_cast<VkPhysicalDevice4444FormatsFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_GOOGLE_gfxstream
         case VK_STRUCTURE_TYPE_IMPORT_COLOR_BUFFER_GOOGLE:
         {
             handlemap_VkImportColorBufferGOOGLE(handlemap, reinterpret_cast<VkImportColorBufferGOOGLE*>(structExtension_out));
             break;
         }
+        case VK_STRUCTURE_TYPE_IMPORT_BUFFER_GOOGLE:
+        {
+            handlemap_VkImportBufferGOOGLE(handlemap, reinterpret_cast<VkImportBufferGOOGLE*>(structExtension_out));
+            break;
+        }
         case VK_STRUCTURE_TYPE_IMPORT_PHYSICAL_ADDRESS_GOOGLE:
         {
             handlemap_VkImportPhysicalAddressGOOGLE(handlemap, reinterpret_cast<VkImportPhysicalAddressGOOGLE*>(structExtension_out));
             break;
         }
 #endif
+#ifdef VK_KHR_acceleration_structure
+        case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR:
+        {
+            handlemap_VkWriteDescriptorSetAccelerationStructureKHR(handlemap, reinterpret_cast<VkWriteDescriptorSetAccelerationStructureKHR*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_FEATURES_KHR:
+        {
+            handlemap_VkPhysicalDeviceAccelerationStructureFeaturesKHR(handlemap, reinterpret_cast<VkPhysicalDeviceAccelerationStructureFeaturesKHR*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_PROPERTIES_KHR:
+        {
+            handlemap_VkPhysicalDeviceAccelerationStructurePropertiesKHR(handlemap, reinterpret_cast<VkPhysicalDeviceAccelerationStructurePropertiesKHR*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_KHR_ray_tracing_pipeline
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_FEATURES_KHR:
+        {
+            handlemap_VkPhysicalDeviceRayTracingPipelineFeaturesKHR(handlemap, reinterpret_cast<VkPhysicalDeviceRayTracingPipelineFeaturesKHR*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_PROPERTIES_KHR:
+        {
+            handlemap_VkPhysicalDeviceRayTracingPipelinePropertiesKHR(handlemap, reinterpret_cast<VkPhysicalDeviceRayTracingPipelinePropertiesKHR*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_KHR_ray_query
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_QUERY_FEATURES_KHR:
+        {
+            handlemap_VkPhysicalDeviceRayQueryFeaturesKHR(handlemap, reinterpret_cast<VkPhysicalDeviceRayQueryFeaturesKHR*>(structExtension_out));
+            break;
+        }
+#endif
         default:
         {
             return;
diff --git a/system/vulkan_enc/goldfish_vk_handlemap_guest.h b/system/vulkan_enc/goldfish_vk_handlemap_guest.h
index 0dcedcb..9157ecb 100644
--- a/system/vulkan_enc/goldfish_vk_handlemap_guest.h
+++ b/system/vulkan_enc/goldfish_vk_handlemap_guest.h
@@ -42,38 +42,102 @@
 namespace goldfish_vk {
 
 #ifdef VK_VERSION_1_0
-void handlemap_VkApplicationInfo(
+void handlemap_VkExtent2D(
     VulkanHandleMapping* handlemap,
-    VkApplicationInfo* toMap);
-
-void handlemap_VkInstanceCreateInfo(
-    VulkanHandleMapping* handlemap,
-    VkInstanceCreateInfo* toMap);
-
-void handlemap_VkAllocationCallbacks(
-    VulkanHandleMapping* handlemap,
-    VkAllocationCallbacks* toMap);
-
-void handlemap_VkPhysicalDeviceFeatures(
-    VulkanHandleMapping* handlemap,
-    VkPhysicalDeviceFeatures* toMap);
-
-void handlemap_VkFormatProperties(
-    VulkanHandleMapping* handlemap,
-    VkFormatProperties* toMap);
+    VkExtent2D* toMap);
 
 void handlemap_VkExtent3D(
     VulkanHandleMapping* handlemap,
     VkExtent3D* toMap);
 
+void handlemap_VkOffset2D(
+    VulkanHandleMapping* handlemap,
+    VkOffset2D* toMap);
+
+void handlemap_VkOffset3D(
+    VulkanHandleMapping* handlemap,
+    VkOffset3D* toMap);
+
+void handlemap_VkRect2D(
+    VulkanHandleMapping* handlemap,
+    VkRect2D* toMap);
+
+void handlemap_VkBaseInStructure(
+    VulkanHandleMapping* handlemap,
+    VkBaseInStructure* toMap);
+
+void handlemap_VkBaseOutStructure(
+    VulkanHandleMapping* handlemap,
+    VkBaseOutStructure* toMap);
+
+void handlemap_VkBufferMemoryBarrier(
+    VulkanHandleMapping* handlemap,
+    VkBufferMemoryBarrier* toMap);
+
+void handlemap_VkDispatchIndirectCommand(
+    VulkanHandleMapping* handlemap,
+    VkDispatchIndirectCommand* toMap);
+
+void handlemap_VkDrawIndexedIndirectCommand(
+    VulkanHandleMapping* handlemap,
+    VkDrawIndexedIndirectCommand* toMap);
+
+void handlemap_VkDrawIndirectCommand(
+    VulkanHandleMapping* handlemap,
+    VkDrawIndirectCommand* toMap);
+
+void handlemap_VkImageSubresourceRange(
+    VulkanHandleMapping* handlemap,
+    VkImageSubresourceRange* toMap);
+
+void handlemap_VkImageMemoryBarrier(
+    VulkanHandleMapping* handlemap,
+    VkImageMemoryBarrier* toMap);
+
+void handlemap_VkMemoryBarrier(
+    VulkanHandleMapping* handlemap,
+    VkMemoryBarrier* toMap);
+
+void handlemap_VkAllocationCallbacks(
+    VulkanHandleMapping* handlemap,
+    VkAllocationCallbacks* toMap);
+
+void handlemap_VkApplicationInfo(
+    VulkanHandleMapping* handlemap,
+    VkApplicationInfo* toMap);
+
+void handlemap_VkFormatProperties(
+    VulkanHandleMapping* handlemap,
+    VkFormatProperties* toMap);
+
 void handlemap_VkImageFormatProperties(
     VulkanHandleMapping* handlemap,
     VkImageFormatProperties* toMap);
 
+void handlemap_VkInstanceCreateInfo(
+    VulkanHandleMapping* handlemap,
+    VkInstanceCreateInfo* toMap);
+
+void handlemap_VkMemoryHeap(
+    VulkanHandleMapping* handlemap,
+    VkMemoryHeap* toMap);
+
+void handlemap_VkMemoryType(
+    VulkanHandleMapping* handlemap,
+    VkMemoryType* toMap);
+
+void handlemap_VkPhysicalDeviceFeatures(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceFeatures* toMap);
+
 void handlemap_VkPhysicalDeviceLimits(
     VulkanHandleMapping* handlemap,
     VkPhysicalDeviceLimits* toMap);
 
+void handlemap_VkPhysicalDeviceMemoryProperties(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceMemoryProperties* toMap);
+
 void handlemap_VkPhysicalDeviceSparseProperties(
     VulkanHandleMapping* handlemap,
     VkPhysicalDeviceSparseProperties* toMap);
@@ -86,18 +150,6 @@
     VulkanHandleMapping* handlemap,
     VkQueueFamilyProperties* toMap);
 
-void handlemap_VkMemoryType(
-    VulkanHandleMapping* handlemap,
-    VkMemoryType* toMap);
-
-void handlemap_VkMemoryHeap(
-    VulkanHandleMapping* handlemap,
-    VkMemoryHeap* toMap);
-
-void handlemap_VkPhysicalDeviceMemoryProperties(
-    VulkanHandleMapping* handlemap,
-    VkPhysicalDeviceMemoryProperties* toMap);
-
 void handlemap_VkDeviceQueueCreateInfo(
     VulkanHandleMapping* handlemap,
     VkDeviceQueueCreateInfo* toMap);
@@ -118,26 +170,18 @@
     VulkanHandleMapping* handlemap,
     VkSubmitInfo* toMap);
 
-void handlemap_VkMemoryAllocateInfo(
-    VulkanHandleMapping* handlemap,
-    VkMemoryAllocateInfo* toMap);
-
 void handlemap_VkMappedMemoryRange(
     VulkanHandleMapping* handlemap,
     VkMappedMemoryRange* toMap);
 
+void handlemap_VkMemoryAllocateInfo(
+    VulkanHandleMapping* handlemap,
+    VkMemoryAllocateInfo* toMap);
+
 void handlemap_VkMemoryRequirements(
     VulkanHandleMapping* handlemap,
     VkMemoryRequirements* toMap);
 
-void handlemap_VkSparseImageFormatProperties(
-    VulkanHandleMapping* handlemap,
-    VkSparseImageFormatProperties* toMap);
-
-void handlemap_VkSparseImageMemoryRequirements(
-    VulkanHandleMapping* handlemap,
-    VkSparseImageMemoryRequirements* toMap);
-
 void handlemap_VkSparseMemoryBind(
     VulkanHandleMapping* handlemap,
     VkSparseMemoryBind* toMap);
@@ -154,10 +198,6 @@
     VulkanHandleMapping* handlemap,
     VkImageSubresource* toMap);
 
-void handlemap_VkOffset3D(
-    VulkanHandleMapping* handlemap,
-    VkOffset3D* toMap);
-
 void handlemap_VkSparseImageMemoryBind(
     VulkanHandleMapping* handlemap,
     VkSparseImageMemoryBind* toMap);
@@ -170,6 +210,14 @@
     VulkanHandleMapping* handlemap,
     VkBindSparseInfo* toMap);
 
+void handlemap_VkSparseImageFormatProperties(
+    VulkanHandleMapping* handlemap,
+    VkSparseImageFormatProperties* toMap);
+
+void handlemap_VkSparseImageMemoryRequirements(
+    VulkanHandleMapping* handlemap,
+    VkSparseImageMemoryRequirements* toMap);
+
 void handlemap_VkFenceCreateInfo(
     VulkanHandleMapping* handlemap,
     VkFenceCreateInfo* toMap);
@@ -206,10 +254,6 @@
     VulkanHandleMapping* handlemap,
     VkComponentMapping* toMap);
 
-void handlemap_VkImageSubresourceRange(
-    VulkanHandleMapping* handlemap,
-    VkImageSubresourceRange* toMap);
-
 void handlemap_VkImageViewCreateInfo(
     VulkanHandleMapping* handlemap,
     VkImageViewCreateInfo* toMap);
@@ -234,6 +278,10 @@
     VulkanHandleMapping* handlemap,
     VkPipelineShaderStageCreateInfo* toMap);
 
+void handlemap_VkComputePipelineCreateInfo(
+    VulkanHandleMapping* handlemap,
+    VkComputePipelineCreateInfo* toMap);
+
 void handlemap_VkVertexInputBindingDescription(
     VulkanHandleMapping* handlemap,
     VkVertexInputBindingDescription* toMap);
@@ -258,18 +306,6 @@
     VulkanHandleMapping* handlemap,
     VkViewport* toMap);
 
-void handlemap_VkOffset2D(
-    VulkanHandleMapping* handlemap,
-    VkOffset2D* toMap);
-
-void handlemap_VkExtent2D(
-    VulkanHandleMapping* handlemap,
-    VkExtent2D* toMap);
-
-void handlemap_VkRect2D(
-    VulkanHandleMapping* handlemap,
-    VkRect2D* toMap);
-
 void handlemap_VkPipelineViewportStateCreateInfo(
     VulkanHandleMapping* handlemap,
     VkPipelineViewportStateCreateInfo* toMap);
@@ -306,10 +342,6 @@
     VulkanHandleMapping* handlemap,
     VkGraphicsPipelineCreateInfo* toMap);
 
-void handlemap_VkComputePipelineCreateInfo(
-    VulkanHandleMapping* handlemap,
-    VkComputePipelineCreateInfo* toMap);
-
 void handlemap_VkPushConstantRange(
     VulkanHandleMapping* handlemap,
     VkPushConstantRange* toMap);
@@ -322,13 +354,17 @@
     VulkanHandleMapping* handlemap,
     VkSamplerCreateInfo* toMap);
 
-void handlemap_VkDescriptorSetLayoutBinding(
+void handlemap_VkCopyDescriptorSet(
     VulkanHandleMapping* handlemap,
-    VkDescriptorSetLayoutBinding* toMap);
+    VkCopyDescriptorSet* toMap);
 
-void handlemap_VkDescriptorSetLayoutCreateInfo(
+void handlemap_VkDescriptorBufferInfo(
     VulkanHandleMapping* handlemap,
-    VkDescriptorSetLayoutCreateInfo* toMap);
+    VkDescriptorBufferInfo* toMap);
+
+void handlemap_VkDescriptorImageInfo(
+    VulkanHandleMapping* handlemap,
+    VkDescriptorImageInfo* toMap);
 
 void handlemap_VkDescriptorPoolSize(
     VulkanHandleMapping* handlemap,
@@ -342,26 +378,18 @@
     VulkanHandleMapping* handlemap,
     VkDescriptorSetAllocateInfo* toMap);
 
-void handlemap_VkDescriptorImageInfo(
+void handlemap_VkDescriptorSetLayoutBinding(
     VulkanHandleMapping* handlemap,
-    VkDescriptorImageInfo* toMap);
+    VkDescriptorSetLayoutBinding* toMap);
 
-void handlemap_VkDescriptorBufferInfo(
+void handlemap_VkDescriptorSetLayoutCreateInfo(
     VulkanHandleMapping* handlemap,
-    VkDescriptorBufferInfo* toMap);
+    VkDescriptorSetLayoutCreateInfo* toMap);
 
 void handlemap_VkWriteDescriptorSet(
     VulkanHandleMapping* handlemap,
     VkWriteDescriptorSet* toMap);
 
-void handlemap_VkCopyDescriptorSet(
-    VulkanHandleMapping* handlemap,
-    VkCopyDescriptorSet* toMap);
-
-void handlemap_VkFramebufferCreateInfo(
-    VulkanHandleMapping* handlemap,
-    VkFramebufferCreateInfo* toMap);
-
 void handlemap_VkAttachmentDescription(
     VulkanHandleMapping* handlemap,
     VkAttachmentDescription* toMap);
@@ -370,6 +398,10 @@
     VulkanHandleMapping* handlemap,
     VkAttachmentReference* toMap);
 
+void handlemap_VkFramebufferCreateInfo(
+    VulkanHandleMapping* handlemap,
+    VkFramebufferCreateInfo* toMap);
+
 void handlemap_VkSubpassDescription(
     VulkanHandleMapping* handlemap,
     VkSubpassDescription* toMap);
@@ -406,14 +438,6 @@
     VulkanHandleMapping* handlemap,
     VkImageSubresourceLayers* toMap);
 
-void handlemap_VkImageCopy(
-    VulkanHandleMapping* handlemap,
-    VkImageCopy* toMap);
-
-void handlemap_VkImageBlit(
-    VulkanHandleMapping* handlemap,
-    VkImageBlit* toMap);
-
 void handlemap_VkBufferImageCopy(
     VulkanHandleMapping* handlemap,
     VkBufferImageCopy* toMap);
@@ -438,46 +462,22 @@
     VulkanHandleMapping* handlemap,
     VkClearRect* toMap);
 
+void handlemap_VkImageBlit(
+    VulkanHandleMapping* handlemap,
+    VkImageBlit* toMap);
+
+void handlemap_VkImageCopy(
+    VulkanHandleMapping* handlemap,
+    VkImageCopy* toMap);
+
 void handlemap_VkImageResolve(
     VulkanHandleMapping* handlemap,
     VkImageResolve* toMap);
 
-void handlemap_VkMemoryBarrier(
-    VulkanHandleMapping* handlemap,
-    VkMemoryBarrier* toMap);
-
-void handlemap_VkBufferMemoryBarrier(
-    VulkanHandleMapping* handlemap,
-    VkBufferMemoryBarrier* toMap);
-
-void handlemap_VkImageMemoryBarrier(
-    VulkanHandleMapping* handlemap,
-    VkImageMemoryBarrier* toMap);
-
 void handlemap_VkRenderPassBeginInfo(
     VulkanHandleMapping* handlemap,
     VkRenderPassBeginInfo* toMap);
 
-void handlemap_VkDispatchIndirectCommand(
-    VulkanHandleMapping* handlemap,
-    VkDispatchIndirectCommand* toMap);
-
-void handlemap_VkDrawIndexedIndirectCommand(
-    VulkanHandleMapping* handlemap,
-    VkDrawIndexedIndirectCommand* toMap);
-
-void handlemap_VkDrawIndirectCommand(
-    VulkanHandleMapping* handlemap,
-    VkDrawIndirectCommand* toMap);
-
-void handlemap_VkBaseOutStructure(
-    VulkanHandleMapping* handlemap,
-    VkBaseOutStructure* toMap);
-
-void handlemap_VkBaseInStructure(
-    VulkanHandleMapping* handlemap,
-    VkBaseInStructure* toMap);
-
 #endif
 #ifdef VK_VERSION_1_1
 void handlemap_VkPhysicalDeviceSubgroupProperties(
@@ -628,9 +628,11 @@
     VulkanHandleMapping* handlemap,
     VkPhysicalDeviceMultiviewProperties* toMap);
 
-void handlemap_VkPhysicalDeviceVariablePointerFeatures(
+void handlemap_VkPhysicalDeviceVariablePointersFeatures(
     VulkanHandleMapping* handlemap,
-    VkPhysicalDeviceVariablePointerFeatures* toMap);
+    VkPhysicalDeviceVariablePointersFeatures* toMap);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkPhysicalDeviceVariablePointersFeatures, handlemap_VkPhysicalDeviceVariablePointerFeatures);
 
 void handlemap_VkPhysicalDeviceProtectedMemoryFeatures(
     VulkanHandleMapping* handlemap,
@@ -748,9 +750,217 @@
     VulkanHandleMapping* handlemap,
     VkDescriptorSetLayoutSupport* toMap);
 
-void handlemap_VkPhysicalDeviceShaderDrawParameterFeatures(
+void handlemap_VkPhysicalDeviceShaderDrawParametersFeatures(
     VulkanHandleMapping* handlemap,
-    VkPhysicalDeviceShaderDrawParameterFeatures* toMap);
+    VkPhysicalDeviceShaderDrawParametersFeatures* toMap);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkPhysicalDeviceShaderDrawParametersFeatures, handlemap_VkPhysicalDeviceShaderDrawParameterFeatures);
+
+#endif
+#ifdef VK_VERSION_1_2
+void handlemap_VkPhysicalDeviceVulkan11Features(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceVulkan11Features* toMap);
+
+void handlemap_VkPhysicalDeviceVulkan11Properties(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceVulkan11Properties* toMap);
+
+void handlemap_VkPhysicalDeviceVulkan12Features(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceVulkan12Features* toMap);
+
+void handlemap_VkConformanceVersion(
+    VulkanHandleMapping* handlemap,
+    VkConformanceVersion* toMap);
+
+void handlemap_VkPhysicalDeviceVulkan12Properties(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceVulkan12Properties* toMap);
+
+void handlemap_VkImageFormatListCreateInfo(
+    VulkanHandleMapping* handlemap,
+    VkImageFormatListCreateInfo* toMap);
+
+void handlemap_VkAttachmentDescription2(
+    VulkanHandleMapping* handlemap,
+    VkAttachmentDescription2* toMap);
+
+void handlemap_VkAttachmentReference2(
+    VulkanHandleMapping* handlemap,
+    VkAttachmentReference2* toMap);
+
+void handlemap_VkSubpassDescription2(
+    VulkanHandleMapping* handlemap,
+    VkSubpassDescription2* toMap);
+
+void handlemap_VkSubpassDependency2(
+    VulkanHandleMapping* handlemap,
+    VkSubpassDependency2* toMap);
+
+void handlemap_VkRenderPassCreateInfo2(
+    VulkanHandleMapping* handlemap,
+    VkRenderPassCreateInfo2* toMap);
+
+void handlemap_VkSubpassBeginInfo(
+    VulkanHandleMapping* handlemap,
+    VkSubpassBeginInfo* toMap);
+
+void handlemap_VkSubpassEndInfo(
+    VulkanHandleMapping* handlemap,
+    VkSubpassEndInfo* toMap);
+
+void handlemap_VkPhysicalDevice8BitStorageFeatures(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDevice8BitStorageFeatures* toMap);
+
+void handlemap_VkPhysicalDeviceDriverProperties(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceDriverProperties* toMap);
+
+void handlemap_VkPhysicalDeviceShaderAtomicInt64Features(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceShaderAtomicInt64Features* toMap);
+
+void handlemap_VkPhysicalDeviceShaderFloat16Int8Features(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceShaderFloat16Int8Features* toMap);
+
+void handlemap_VkPhysicalDeviceFloatControlsProperties(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceFloatControlsProperties* toMap);
+
+void handlemap_VkDescriptorSetLayoutBindingFlagsCreateInfo(
+    VulkanHandleMapping* handlemap,
+    VkDescriptorSetLayoutBindingFlagsCreateInfo* toMap);
+
+void handlemap_VkPhysicalDeviceDescriptorIndexingFeatures(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceDescriptorIndexingFeatures* toMap);
+
+void handlemap_VkPhysicalDeviceDescriptorIndexingProperties(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceDescriptorIndexingProperties* toMap);
+
+void handlemap_VkDescriptorSetVariableDescriptorCountAllocateInfo(
+    VulkanHandleMapping* handlemap,
+    VkDescriptorSetVariableDescriptorCountAllocateInfo* toMap);
+
+void handlemap_VkDescriptorSetVariableDescriptorCountLayoutSupport(
+    VulkanHandleMapping* handlemap,
+    VkDescriptorSetVariableDescriptorCountLayoutSupport* toMap);
+
+void handlemap_VkSubpassDescriptionDepthStencilResolve(
+    VulkanHandleMapping* handlemap,
+    VkSubpassDescriptionDepthStencilResolve* toMap);
+
+void handlemap_VkPhysicalDeviceDepthStencilResolveProperties(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceDepthStencilResolveProperties* toMap);
+
+void handlemap_VkPhysicalDeviceScalarBlockLayoutFeatures(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceScalarBlockLayoutFeatures* toMap);
+
+void handlemap_VkImageStencilUsageCreateInfo(
+    VulkanHandleMapping* handlemap,
+    VkImageStencilUsageCreateInfo* toMap);
+
+void handlemap_VkSamplerReductionModeCreateInfo(
+    VulkanHandleMapping* handlemap,
+    VkSamplerReductionModeCreateInfo* toMap);
+
+void handlemap_VkPhysicalDeviceSamplerFilterMinmaxProperties(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceSamplerFilterMinmaxProperties* toMap);
+
+void handlemap_VkPhysicalDeviceVulkanMemoryModelFeatures(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceVulkanMemoryModelFeatures* toMap);
+
+void handlemap_VkPhysicalDeviceImagelessFramebufferFeatures(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceImagelessFramebufferFeatures* toMap);
+
+void handlemap_VkFramebufferAttachmentImageInfo(
+    VulkanHandleMapping* handlemap,
+    VkFramebufferAttachmentImageInfo* toMap);
+
+void handlemap_VkFramebufferAttachmentsCreateInfo(
+    VulkanHandleMapping* handlemap,
+    VkFramebufferAttachmentsCreateInfo* toMap);
+
+void handlemap_VkRenderPassAttachmentBeginInfo(
+    VulkanHandleMapping* handlemap,
+    VkRenderPassAttachmentBeginInfo* toMap);
+
+void handlemap_VkPhysicalDeviceUniformBufferStandardLayoutFeatures(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceUniformBufferStandardLayoutFeatures* toMap);
+
+void handlemap_VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures* toMap);
+
+void handlemap_VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures* toMap);
+
+void handlemap_VkAttachmentReferenceStencilLayout(
+    VulkanHandleMapping* handlemap,
+    VkAttachmentReferenceStencilLayout* toMap);
+
+void handlemap_VkAttachmentDescriptionStencilLayout(
+    VulkanHandleMapping* handlemap,
+    VkAttachmentDescriptionStencilLayout* toMap);
+
+void handlemap_VkPhysicalDeviceHostQueryResetFeatures(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceHostQueryResetFeatures* toMap);
+
+void handlemap_VkPhysicalDeviceTimelineSemaphoreFeatures(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceTimelineSemaphoreFeatures* toMap);
+
+void handlemap_VkPhysicalDeviceTimelineSemaphoreProperties(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceTimelineSemaphoreProperties* toMap);
+
+void handlemap_VkSemaphoreTypeCreateInfo(
+    VulkanHandleMapping* handlemap,
+    VkSemaphoreTypeCreateInfo* toMap);
+
+void handlemap_VkTimelineSemaphoreSubmitInfo(
+    VulkanHandleMapping* handlemap,
+    VkTimelineSemaphoreSubmitInfo* toMap);
+
+void handlemap_VkSemaphoreWaitInfo(
+    VulkanHandleMapping* handlemap,
+    VkSemaphoreWaitInfo* toMap);
+
+void handlemap_VkSemaphoreSignalInfo(
+    VulkanHandleMapping* handlemap,
+    VkSemaphoreSignalInfo* toMap);
+
+void handlemap_VkPhysicalDeviceBufferDeviceAddressFeatures(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceBufferDeviceAddressFeatures* toMap);
+
+void handlemap_VkBufferDeviceAddressInfo(
+    VulkanHandleMapping* handlemap,
+    VkBufferDeviceAddressInfo* toMap);
+
+void handlemap_VkBufferOpaqueCaptureAddressCreateInfo(
+    VulkanHandleMapping* handlemap,
+    VkBufferOpaqueCaptureAddressCreateInfo* toMap);
+
+void handlemap_VkMemoryOpaqueCaptureAddressAllocateInfo(
+    VulkanHandleMapping* handlemap,
+    VkMemoryOpaqueCaptureAddressAllocateInfo* toMap);
+
+void handlemap_VkDeviceMemoryOpaqueCaptureAddressInfo(
+    VulkanHandleMapping* handlemap,
+    VkDeviceMemoryOpaqueCaptureAddressInfo* toMap);
 
 #endif
 #ifdef VK_KHR_surface
@@ -798,22 +1008,18 @@
 
 #endif
 #ifdef VK_KHR_display
-void handlemap_VkDisplayPropertiesKHR(
-    VulkanHandleMapping* handlemap,
-    VkDisplayPropertiesKHR* toMap);
-
 void handlemap_VkDisplayModeParametersKHR(
     VulkanHandleMapping* handlemap,
     VkDisplayModeParametersKHR* toMap);
 
-void handlemap_VkDisplayModePropertiesKHR(
-    VulkanHandleMapping* handlemap,
-    VkDisplayModePropertiesKHR* toMap);
-
 void handlemap_VkDisplayModeCreateInfoKHR(
     VulkanHandleMapping* handlemap,
     VkDisplayModeCreateInfoKHR* toMap);
 
+void handlemap_VkDisplayModePropertiesKHR(
+    VulkanHandleMapping* handlemap,
+    VkDisplayModePropertiesKHR* toMap);
+
 void handlemap_VkDisplayPlaneCapabilitiesKHR(
     VulkanHandleMapping* handlemap,
     VkDisplayPlaneCapabilitiesKHR* toMap);
@@ -822,6 +1028,10 @@
     VulkanHandleMapping* handlemap,
     VkDisplayPlanePropertiesKHR* toMap);
 
+void handlemap_VkDisplayPropertiesKHR(
+    VulkanHandleMapping* handlemap,
+    VkDisplayPropertiesKHR* toMap);
+
 void handlemap_VkDisplaySurfaceCreateInfoKHR(
     VulkanHandleMapping* handlemap,
     VkDisplaySurfaceCreateInfoKHR* toMap);
@@ -851,12 +1061,6 @@
     VkWaylandSurfaceCreateInfoKHR* toMap);
 
 #endif
-#ifdef VK_KHR_mir_surface
-void handlemap_VkMirSurfaceCreateInfoKHR(
-    VulkanHandleMapping* handlemap,
-    VkMirSurfaceCreateInfoKHR* toMap);
-
-#endif
 #ifdef VK_KHR_android_surface
 void handlemap_VkAndroidSurfaceCreateInfoKHR(
     VulkanHandleMapping* handlemap,
@@ -872,20 +1076,80 @@
 #ifdef VK_KHR_sampler_mirror_clamp_to_edge
 #endif
 #ifdef VK_KHR_multiview
+DEFINE_ALIAS_FUNCTION(handlemap_VkRenderPassMultiviewCreateInfo, handlemap_VkRenderPassMultiviewCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkPhysicalDeviceMultiviewFeatures, handlemap_VkPhysicalDeviceMultiviewFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkPhysicalDeviceMultiviewProperties, handlemap_VkPhysicalDeviceMultiviewPropertiesKHR);
+
 #endif
 #ifdef VK_KHR_get_physical_device_properties2
+DEFINE_ALIAS_FUNCTION(handlemap_VkPhysicalDeviceFeatures2, handlemap_VkPhysicalDeviceFeatures2KHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkPhysicalDeviceProperties2, handlemap_VkPhysicalDeviceProperties2KHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkFormatProperties2, handlemap_VkFormatProperties2KHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkImageFormatProperties2, handlemap_VkImageFormatProperties2KHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkPhysicalDeviceImageFormatInfo2, handlemap_VkPhysicalDeviceImageFormatInfo2KHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkQueueFamilyProperties2, handlemap_VkQueueFamilyProperties2KHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkPhysicalDeviceMemoryProperties2, handlemap_VkPhysicalDeviceMemoryProperties2KHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkSparseImageFormatProperties2, handlemap_VkSparseImageFormatProperties2KHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkPhysicalDeviceSparseImageFormatInfo2, handlemap_VkPhysicalDeviceSparseImageFormatInfo2KHR);
+
 #endif
 #ifdef VK_KHR_device_group
+DEFINE_ALIAS_FUNCTION(handlemap_VkMemoryAllocateFlagsInfo, handlemap_VkMemoryAllocateFlagsInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkDeviceGroupRenderPassBeginInfo, handlemap_VkDeviceGroupRenderPassBeginInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkDeviceGroupCommandBufferBeginInfo, handlemap_VkDeviceGroupCommandBufferBeginInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkDeviceGroupSubmitInfo, handlemap_VkDeviceGroupSubmitInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkDeviceGroupBindSparseInfo, handlemap_VkDeviceGroupBindSparseInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkBindBufferMemoryDeviceGroupInfo, handlemap_VkBindBufferMemoryDeviceGroupInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkBindImageMemoryDeviceGroupInfo, handlemap_VkBindImageMemoryDeviceGroupInfoKHR);
+
 #endif
 #ifdef VK_KHR_shader_draw_parameters
 #endif
 #ifdef VK_KHR_maintenance1
 #endif
 #ifdef VK_KHR_device_group_creation
+DEFINE_ALIAS_FUNCTION(handlemap_VkPhysicalDeviceGroupProperties, handlemap_VkPhysicalDeviceGroupPropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkDeviceGroupDeviceCreateInfo, handlemap_VkDeviceGroupDeviceCreateInfoKHR);
+
 #endif
 #ifdef VK_KHR_external_memory_capabilities
+DEFINE_ALIAS_FUNCTION(handlemap_VkExternalMemoryProperties, handlemap_VkExternalMemoryPropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkPhysicalDeviceExternalImageFormatInfo, handlemap_VkPhysicalDeviceExternalImageFormatInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkExternalImageFormatProperties, handlemap_VkExternalImageFormatPropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkPhysicalDeviceExternalBufferInfo, handlemap_VkPhysicalDeviceExternalBufferInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkExternalBufferProperties, handlemap_VkExternalBufferPropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkPhysicalDeviceIDProperties, handlemap_VkPhysicalDeviceIDPropertiesKHR);
+
 #endif
 #ifdef VK_KHR_external_memory
+DEFINE_ALIAS_FUNCTION(handlemap_VkExternalMemoryImageCreateInfo, handlemap_VkExternalMemoryImageCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkExternalMemoryBufferCreateInfo, handlemap_VkExternalMemoryBufferCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkExportMemoryAllocateInfo, handlemap_VkExportMemoryAllocateInfoKHR);
+
 #endif
 #ifdef VK_KHR_external_memory_win32
 void handlemap_VkImportMemoryWin32HandleInfoKHR(
@@ -926,8 +1190,14 @@
 
 #endif
 #ifdef VK_KHR_external_semaphore_capabilities
+DEFINE_ALIAS_FUNCTION(handlemap_VkPhysicalDeviceExternalSemaphoreInfo, handlemap_VkPhysicalDeviceExternalSemaphoreInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkExternalSemaphoreProperties, handlemap_VkExternalSemaphorePropertiesKHR);
+
 #endif
 #ifdef VK_KHR_external_semaphore
+DEFINE_ALIAS_FUNCTION(handlemap_VkExportSemaphoreCreateInfo, handlemap_VkExportSemaphoreCreateInfoKHR);
+
 #endif
 #ifdef VK_KHR_external_semaphore_win32
 void handlemap_VkImportSemaphoreWin32HandleInfoKHR(
@@ -963,7 +1233,15 @@
     VkPhysicalDevicePushDescriptorPropertiesKHR* toMap);
 
 #endif
+#ifdef VK_KHR_shader_float16_int8
+DEFINE_ALIAS_FUNCTION(handlemap_VkPhysicalDeviceShaderFloat16Int8Features, handlemap_VkPhysicalDeviceShaderFloat16Int8FeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkPhysicalDeviceShaderFloat16Int8Features, handlemap_VkPhysicalDeviceFloat16Int8FeaturesKHR);
+
+#endif
 #ifdef VK_KHR_16bit_storage
+DEFINE_ALIAS_FUNCTION(handlemap_VkPhysicalDevice16BitStorageFeatures, handlemap_VkPhysicalDevice16BitStorageFeaturesKHR);
+
 #endif
 #ifdef VK_KHR_incremental_present
 void handlemap_VkRectLayerKHR(
@@ -980,35 +1258,35 @@
 
 #endif
 #ifdef VK_KHR_descriptor_update_template
+DEFINE_ALIAS_FUNCTION(handlemap_VkDescriptorUpdateTemplateEntry, handlemap_VkDescriptorUpdateTemplateEntryKHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkDescriptorUpdateTemplateCreateInfo, handlemap_VkDescriptorUpdateTemplateCreateInfoKHR);
+
+#endif
+#ifdef VK_KHR_imageless_framebuffer
+DEFINE_ALIAS_FUNCTION(handlemap_VkPhysicalDeviceImagelessFramebufferFeatures, handlemap_VkPhysicalDeviceImagelessFramebufferFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkFramebufferAttachmentsCreateInfo, handlemap_VkFramebufferAttachmentsCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkFramebufferAttachmentImageInfo, handlemap_VkFramebufferAttachmentImageInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkRenderPassAttachmentBeginInfo, handlemap_VkRenderPassAttachmentBeginInfoKHR);
+
 #endif
 #ifdef VK_KHR_create_renderpass2
-void handlemap_VkAttachmentDescription2KHR(
-    VulkanHandleMapping* handlemap,
-    VkAttachmentDescription2KHR* toMap);
+DEFINE_ALIAS_FUNCTION(handlemap_VkRenderPassCreateInfo2, handlemap_VkRenderPassCreateInfo2KHR);
 
-void handlemap_VkAttachmentReference2KHR(
-    VulkanHandleMapping* handlemap,
-    VkAttachmentReference2KHR* toMap);
+DEFINE_ALIAS_FUNCTION(handlemap_VkAttachmentDescription2, handlemap_VkAttachmentDescription2KHR);
 
-void handlemap_VkSubpassDescription2KHR(
-    VulkanHandleMapping* handlemap,
-    VkSubpassDescription2KHR* toMap);
+DEFINE_ALIAS_FUNCTION(handlemap_VkAttachmentReference2, handlemap_VkAttachmentReference2KHR);
 
-void handlemap_VkSubpassDependency2KHR(
-    VulkanHandleMapping* handlemap,
-    VkSubpassDependency2KHR* toMap);
+DEFINE_ALIAS_FUNCTION(handlemap_VkSubpassDescription2, handlemap_VkSubpassDescription2KHR);
 
-void handlemap_VkRenderPassCreateInfo2KHR(
-    VulkanHandleMapping* handlemap,
-    VkRenderPassCreateInfo2KHR* toMap);
+DEFINE_ALIAS_FUNCTION(handlemap_VkSubpassDependency2, handlemap_VkSubpassDependency2KHR);
 
-void handlemap_VkSubpassBeginInfoKHR(
-    VulkanHandleMapping* handlemap,
-    VkSubpassBeginInfoKHR* toMap);
+DEFINE_ALIAS_FUNCTION(handlemap_VkSubpassBeginInfo, handlemap_VkSubpassBeginInfoKHR);
 
-void handlemap_VkSubpassEndInfoKHR(
-    VulkanHandleMapping* handlemap,
-    VkSubpassEndInfoKHR* toMap);
+DEFINE_ALIAS_FUNCTION(handlemap_VkSubpassEndInfo, handlemap_VkSubpassEndInfoKHR);
 
 #endif
 #ifdef VK_KHR_shared_presentable_image
@@ -1018,8 +1296,14 @@
 
 #endif
 #ifdef VK_KHR_external_fence_capabilities
+DEFINE_ALIAS_FUNCTION(handlemap_VkPhysicalDeviceExternalFenceInfo, handlemap_VkPhysicalDeviceExternalFenceInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkExternalFenceProperties, handlemap_VkExternalFencePropertiesKHR);
+
 #endif
 #ifdef VK_KHR_external_fence
+DEFINE_ALIAS_FUNCTION(handlemap_VkExportFenceCreateInfo, handlemap_VkExportFenceCreateInfoKHR);
+
 #endif
 #ifdef VK_KHR_external_fence_win32
 void handlemap_VkImportFenceWin32HandleInfoKHR(
@@ -1045,7 +1329,51 @@
     VkFenceGetFdInfoKHR* toMap);
 
 #endif
+#ifdef VK_KHR_performance_query
+void handlemap_VkPhysicalDevicePerformanceQueryFeaturesKHR(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDevicePerformanceQueryFeaturesKHR* toMap);
+
+void handlemap_VkPhysicalDevicePerformanceQueryPropertiesKHR(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDevicePerformanceQueryPropertiesKHR* toMap);
+
+void handlemap_VkPerformanceCounterKHR(
+    VulkanHandleMapping* handlemap,
+    VkPerformanceCounterKHR* toMap);
+
+void handlemap_VkPerformanceCounterDescriptionKHR(
+    VulkanHandleMapping* handlemap,
+    VkPerformanceCounterDescriptionKHR* toMap);
+
+void handlemap_VkQueryPoolPerformanceCreateInfoKHR(
+    VulkanHandleMapping* handlemap,
+    VkQueryPoolPerformanceCreateInfoKHR* toMap);
+
+void handlemap_VkPerformanceCounterResultKHR(
+    VulkanHandleMapping* handlemap,
+    VkPerformanceCounterResultKHR* toMap);
+
+void handlemap_VkAcquireProfilingLockInfoKHR(
+    VulkanHandleMapping* handlemap,
+    VkAcquireProfilingLockInfoKHR* toMap);
+
+void handlemap_VkPerformanceQuerySubmitInfoKHR(
+    VulkanHandleMapping* handlemap,
+    VkPerformanceQuerySubmitInfoKHR* toMap);
+
+#endif
 #ifdef VK_KHR_maintenance2
+DEFINE_ALIAS_FUNCTION(handlemap_VkPhysicalDevicePointClippingProperties, handlemap_VkPhysicalDevicePointClippingPropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkRenderPassInputAttachmentAspectCreateInfo, handlemap_VkRenderPassInputAttachmentAspectCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkInputAttachmentAspectReference, handlemap_VkInputAttachmentAspectReferenceKHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkImageViewUsageCreateInfo, handlemap_VkImageViewUsageCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkPipelineTessellationDomainOriginStateCreateInfo, handlemap_VkPipelineTessellationDomainOriginStateCreateInfoKHR);
+
 #endif
 #ifdef VK_KHR_get_surface_capabilities2
 void handlemap_VkPhysicalDeviceSurfaceInfo2KHR(
@@ -1062,6 +1390,10 @@
 
 #endif
 #ifdef VK_KHR_variable_pointers
+DEFINE_ALIAS_FUNCTION(handlemap_VkPhysicalDeviceVariablePointersFeatures, handlemap_VkPhysicalDeviceVariablePointerFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkPhysicalDeviceVariablePointersFeatures, handlemap_VkPhysicalDeviceVariablePointersFeaturesKHR);
+
 #endif
 #ifdef VK_KHR_get_display_properties2
 void handlemap_VkDisplayProperties2KHR(
@@ -1086,31 +1418,267 @@
 
 #endif
 #ifdef VK_KHR_dedicated_allocation
+DEFINE_ALIAS_FUNCTION(handlemap_VkMemoryDedicatedRequirements, handlemap_VkMemoryDedicatedRequirementsKHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkMemoryDedicatedAllocateInfo, handlemap_VkMemoryDedicatedAllocateInfoKHR);
+
 #endif
 #ifdef VK_KHR_storage_buffer_storage_class
 #endif
 #ifdef VK_KHR_relaxed_block_layout
 #endif
 #ifdef VK_KHR_get_memory_requirements2
+DEFINE_ALIAS_FUNCTION(handlemap_VkBufferMemoryRequirementsInfo2, handlemap_VkBufferMemoryRequirementsInfo2KHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkImageMemoryRequirementsInfo2, handlemap_VkImageMemoryRequirementsInfo2KHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkImageSparseMemoryRequirementsInfo2, handlemap_VkImageSparseMemoryRequirementsInfo2KHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkMemoryRequirements2, handlemap_VkMemoryRequirements2KHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkSparseImageMemoryRequirements2, handlemap_VkSparseImageMemoryRequirements2KHR);
+
 #endif
 #ifdef VK_KHR_image_format_list
-void handlemap_VkImageFormatListCreateInfoKHR(
-    VulkanHandleMapping* handlemap,
-    VkImageFormatListCreateInfoKHR* toMap);
+DEFINE_ALIAS_FUNCTION(handlemap_VkImageFormatListCreateInfo, handlemap_VkImageFormatListCreateInfoKHR);
 
 #endif
 #ifdef VK_KHR_sampler_ycbcr_conversion
+DEFINE_ALIAS_FUNCTION(handlemap_VkSamplerYcbcrConversionCreateInfo, handlemap_VkSamplerYcbcrConversionCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkSamplerYcbcrConversionInfo, handlemap_VkSamplerYcbcrConversionInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkBindImagePlaneMemoryInfo, handlemap_VkBindImagePlaneMemoryInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkImagePlaneMemoryRequirementsInfo, handlemap_VkImagePlaneMemoryRequirementsInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkPhysicalDeviceSamplerYcbcrConversionFeatures, handlemap_VkPhysicalDeviceSamplerYcbcrConversionFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkSamplerYcbcrConversionImageFormatProperties, handlemap_VkSamplerYcbcrConversionImageFormatPropertiesKHR);
+
 #endif
 #ifdef VK_KHR_bind_memory2
+DEFINE_ALIAS_FUNCTION(handlemap_VkBindBufferMemoryInfo, handlemap_VkBindBufferMemoryInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkBindImageMemoryInfo, handlemap_VkBindImageMemoryInfoKHR);
+
+#endif
+#ifdef VK_KHR_portability_subset
+void handlemap_VkPhysicalDevicePortabilitySubsetFeaturesKHR(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDevicePortabilitySubsetFeaturesKHR* toMap);
+
+void handlemap_VkPhysicalDevicePortabilitySubsetPropertiesKHR(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDevicePortabilitySubsetPropertiesKHR* toMap);
+
 #endif
 #ifdef VK_KHR_maintenance3
+DEFINE_ALIAS_FUNCTION(handlemap_VkPhysicalDeviceMaintenance3Properties, handlemap_VkPhysicalDeviceMaintenance3PropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkDescriptorSetLayoutSupport, handlemap_VkDescriptorSetLayoutSupportKHR);
+
 #endif
 #ifdef VK_KHR_draw_indirect_count
 #endif
+#ifdef VK_KHR_shader_subgroup_extended_types
+DEFINE_ALIAS_FUNCTION(handlemap_VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures, handlemap_VkPhysicalDeviceShaderSubgroupExtendedTypesFeaturesKHR);
+
+#endif
 #ifdef VK_KHR_8bit_storage
-void handlemap_VkPhysicalDevice8BitStorageFeaturesKHR(
+DEFINE_ALIAS_FUNCTION(handlemap_VkPhysicalDevice8BitStorageFeatures, handlemap_VkPhysicalDevice8BitStorageFeaturesKHR);
+
+#endif
+#ifdef VK_KHR_shader_atomic_int64
+DEFINE_ALIAS_FUNCTION(handlemap_VkPhysicalDeviceShaderAtomicInt64Features, handlemap_VkPhysicalDeviceShaderAtomicInt64FeaturesKHR);
+
+#endif
+#ifdef VK_KHR_shader_clock
+void handlemap_VkPhysicalDeviceShaderClockFeaturesKHR(
     VulkanHandleMapping* handlemap,
-    VkPhysicalDevice8BitStorageFeaturesKHR* toMap);
+    VkPhysicalDeviceShaderClockFeaturesKHR* toMap);
+
+#endif
+#ifdef VK_KHR_driver_properties
+DEFINE_ALIAS_FUNCTION(handlemap_VkConformanceVersion, handlemap_VkConformanceVersionKHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkPhysicalDeviceDriverProperties, handlemap_VkPhysicalDeviceDriverPropertiesKHR);
+
+#endif
+#ifdef VK_KHR_shader_float_controls
+DEFINE_ALIAS_FUNCTION(handlemap_VkPhysicalDeviceFloatControlsProperties, handlemap_VkPhysicalDeviceFloatControlsPropertiesKHR);
+
+#endif
+#ifdef VK_KHR_depth_stencil_resolve
+DEFINE_ALIAS_FUNCTION(handlemap_VkSubpassDescriptionDepthStencilResolve, handlemap_VkSubpassDescriptionDepthStencilResolveKHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkPhysicalDeviceDepthStencilResolveProperties, handlemap_VkPhysicalDeviceDepthStencilResolvePropertiesKHR);
+
+#endif
+#ifdef VK_KHR_swapchain_mutable_format
+#endif
+#ifdef VK_KHR_timeline_semaphore
+DEFINE_ALIAS_FUNCTION(handlemap_VkPhysicalDeviceTimelineSemaphoreFeatures, handlemap_VkPhysicalDeviceTimelineSemaphoreFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkPhysicalDeviceTimelineSemaphoreProperties, handlemap_VkPhysicalDeviceTimelineSemaphorePropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkSemaphoreTypeCreateInfo, handlemap_VkSemaphoreTypeCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkTimelineSemaphoreSubmitInfo, handlemap_VkTimelineSemaphoreSubmitInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkSemaphoreWaitInfo, handlemap_VkSemaphoreWaitInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkSemaphoreSignalInfo, handlemap_VkSemaphoreSignalInfoKHR);
+
+#endif
+#ifdef VK_KHR_vulkan_memory_model
+DEFINE_ALIAS_FUNCTION(handlemap_VkPhysicalDeviceVulkanMemoryModelFeatures, handlemap_VkPhysicalDeviceVulkanMemoryModelFeaturesKHR);
+
+#endif
+#ifdef VK_KHR_shader_terminate_invocation
+void handlemap_VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR* toMap);
+
+#endif
+#ifdef VK_KHR_fragment_shading_rate
+void handlemap_VkFragmentShadingRateAttachmentInfoKHR(
+    VulkanHandleMapping* handlemap,
+    VkFragmentShadingRateAttachmentInfoKHR* toMap);
+
+void handlemap_VkPipelineFragmentShadingRateStateCreateInfoKHR(
+    VulkanHandleMapping* handlemap,
+    VkPipelineFragmentShadingRateStateCreateInfoKHR* toMap);
+
+void handlemap_VkPhysicalDeviceFragmentShadingRateFeaturesKHR(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceFragmentShadingRateFeaturesKHR* toMap);
+
+void handlemap_VkPhysicalDeviceFragmentShadingRatePropertiesKHR(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceFragmentShadingRatePropertiesKHR* toMap);
+
+void handlemap_VkPhysicalDeviceFragmentShadingRateKHR(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceFragmentShadingRateKHR* toMap);
+
+#endif
+#ifdef VK_KHR_spirv_1_4
+#endif
+#ifdef VK_KHR_surface_protected_capabilities
+void handlemap_VkSurfaceProtectedCapabilitiesKHR(
+    VulkanHandleMapping* handlemap,
+    VkSurfaceProtectedCapabilitiesKHR* toMap);
+
+#endif
+#ifdef VK_KHR_separate_depth_stencil_layouts
+DEFINE_ALIAS_FUNCTION(handlemap_VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures, handlemap_VkPhysicalDeviceSeparateDepthStencilLayoutsFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkAttachmentReferenceStencilLayout, handlemap_VkAttachmentReferenceStencilLayoutKHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkAttachmentDescriptionStencilLayout, handlemap_VkAttachmentDescriptionStencilLayoutKHR);
+
+#endif
+#ifdef VK_KHR_uniform_buffer_standard_layout
+DEFINE_ALIAS_FUNCTION(handlemap_VkPhysicalDeviceUniformBufferStandardLayoutFeatures, handlemap_VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR);
+
+#endif
+#ifdef VK_KHR_buffer_device_address
+DEFINE_ALIAS_FUNCTION(handlemap_VkPhysicalDeviceBufferDeviceAddressFeatures, handlemap_VkPhysicalDeviceBufferDeviceAddressFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkBufferDeviceAddressInfo, handlemap_VkBufferDeviceAddressInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkBufferOpaqueCaptureAddressCreateInfo, handlemap_VkBufferOpaqueCaptureAddressCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkMemoryOpaqueCaptureAddressAllocateInfo, handlemap_VkMemoryOpaqueCaptureAddressAllocateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkDeviceMemoryOpaqueCaptureAddressInfo, handlemap_VkDeviceMemoryOpaqueCaptureAddressInfoKHR);
+
+#endif
+#ifdef VK_KHR_deferred_host_operations
+#endif
+#ifdef VK_KHR_pipeline_executable_properties
+void handlemap_VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR* toMap);
+
+void handlemap_VkPipelineInfoKHR(
+    VulkanHandleMapping* handlemap,
+    VkPipelineInfoKHR* toMap);
+
+void handlemap_VkPipelineExecutablePropertiesKHR(
+    VulkanHandleMapping* handlemap,
+    VkPipelineExecutablePropertiesKHR* toMap);
+
+void handlemap_VkPipelineExecutableInfoKHR(
+    VulkanHandleMapping* handlemap,
+    VkPipelineExecutableInfoKHR* toMap);
+
+void handlemap_VkPipelineExecutableStatisticValueKHR(
+    VulkanHandleMapping* handlemap,
+    VkPipelineExecutableStatisticValueKHR* toMap);
+
+void handlemap_VkPipelineExecutableStatisticKHR(
+    VulkanHandleMapping* handlemap,
+    VkPipelineExecutableStatisticKHR* toMap);
+
+void handlemap_VkPipelineExecutableInternalRepresentationKHR(
+    VulkanHandleMapping* handlemap,
+    VkPipelineExecutableInternalRepresentationKHR* toMap);
+
+#endif
+#ifdef VK_KHR_pipeline_library
+void handlemap_VkPipelineLibraryCreateInfoKHR(
+    VulkanHandleMapping* handlemap,
+    VkPipelineLibraryCreateInfoKHR* toMap);
+
+#endif
+#ifdef VK_KHR_shader_non_semantic_info
+#endif
+#ifdef VK_KHR_copy_commands2
+void handlemap_VkBufferCopy2KHR(
+    VulkanHandleMapping* handlemap,
+    VkBufferCopy2KHR* toMap);
+
+void handlemap_VkCopyBufferInfo2KHR(
+    VulkanHandleMapping* handlemap,
+    VkCopyBufferInfo2KHR* toMap);
+
+void handlemap_VkImageCopy2KHR(
+    VulkanHandleMapping* handlemap,
+    VkImageCopy2KHR* toMap);
+
+void handlemap_VkCopyImageInfo2KHR(
+    VulkanHandleMapping* handlemap,
+    VkCopyImageInfo2KHR* toMap);
+
+void handlemap_VkBufferImageCopy2KHR(
+    VulkanHandleMapping* handlemap,
+    VkBufferImageCopy2KHR* toMap);
+
+void handlemap_VkCopyBufferToImageInfo2KHR(
+    VulkanHandleMapping* handlemap,
+    VkCopyBufferToImageInfo2KHR* toMap);
+
+void handlemap_VkCopyImageToBufferInfo2KHR(
+    VulkanHandleMapping* handlemap,
+    VkCopyImageToBufferInfo2KHR* toMap);
+
+void handlemap_VkImageBlit2KHR(
+    VulkanHandleMapping* handlemap,
+    VkImageBlit2KHR* toMap);
+
+void handlemap_VkBlitImageInfo2KHR(
+    VulkanHandleMapping* handlemap,
+    VkBlitImageInfo2KHR* toMap);
+
+void handlemap_VkImageResolve2KHR(
+    VulkanHandleMapping* handlemap,
+    VkImageResolve2KHR* toMap);
+
+void handlemap_VkResolveImageInfo2KHR(
+    VulkanHandleMapping* handlemap,
+    VkResolveImageInfo2KHR* toMap);
 
 #endif
 #ifdef VK_ANDROID_native_buffer
@@ -1171,6 +1739,30 @@
     VkDedicatedAllocationMemoryAllocateInfoNV* toMap);
 
 #endif
+#ifdef VK_EXT_transform_feedback
+void handlemap_VkPhysicalDeviceTransformFeedbackFeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceTransformFeedbackFeaturesEXT* toMap);
+
+void handlemap_VkPhysicalDeviceTransformFeedbackPropertiesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceTransformFeedbackPropertiesEXT* toMap);
+
+void handlemap_VkPipelineRasterizationStateStreamCreateInfoEXT(
+    VulkanHandleMapping* handlemap,
+    VkPipelineRasterizationStateStreamCreateInfoEXT* toMap);
+
+#endif
+#ifdef VK_NVX_image_view_handle
+void handlemap_VkImageViewHandleInfoNVX(
+    VulkanHandleMapping* handlemap,
+    VkImageViewHandleInfoNVX* toMap);
+
+void handlemap_VkImageViewAddressPropertiesNVX(
+    VulkanHandleMapping* handlemap,
+    VkImageViewAddressPropertiesNVX* toMap);
+
+#endif
 #ifdef VK_AMD_draw_indirect_count
 #endif
 #ifdef VK_AMD_negative_viewport_height
@@ -1197,6 +1789,18 @@
 #endif
 #ifdef VK_AMD_shader_image_load_store_lod
 #endif
+#ifdef VK_GGP_stream_descriptor_surface
+void handlemap_VkStreamDescriptorSurfaceCreateInfoGGP(
+    VulkanHandleMapping* handlemap,
+    VkStreamDescriptorSurfaceCreateInfoGGP* toMap);
+
+#endif
+#ifdef VK_NV_corner_sampled_image
+void handlemap_VkPhysicalDeviceCornerSampledImageFeaturesNV(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceCornerSampledImageFeaturesNV* toMap);
+
+#endif
 #ifdef VK_IMG_format_pvrtc
 #endif
 #ifdef VK_NV_external_memory_capabilities
@@ -1247,6 +1851,22 @@
 #endif
 #ifdef VK_EXT_shader_subgroup_vote
 #endif
+#ifdef VK_EXT_texture_compression_astc_hdr
+void handlemap_VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT* toMap);
+
+#endif
+#ifdef VK_EXT_astc_decode_mode
+void handlemap_VkImageViewASTCDecodeModeEXT(
+    VulkanHandleMapping* handlemap,
+    VkImageViewASTCDecodeModeEXT* toMap);
+
+void handlemap_VkPhysicalDeviceASTCDecodeFeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceASTCDecodeFeaturesEXT* toMap);
+
+#endif
 #ifdef VK_EXT_conditional_rendering
 void handlemap_VkConditionalRenderingBeginInfoEXT(
     VulkanHandleMapping* handlemap,
@@ -1261,64 +1881,6 @@
     VkCommandBufferInheritanceConditionalRenderingInfoEXT* toMap);
 
 #endif
-#ifdef VK_NVX_device_generated_commands
-void handlemap_VkDeviceGeneratedCommandsFeaturesNVX(
-    VulkanHandleMapping* handlemap,
-    VkDeviceGeneratedCommandsFeaturesNVX* toMap);
-
-void handlemap_VkDeviceGeneratedCommandsLimitsNVX(
-    VulkanHandleMapping* handlemap,
-    VkDeviceGeneratedCommandsLimitsNVX* toMap);
-
-void handlemap_VkIndirectCommandsTokenNVX(
-    VulkanHandleMapping* handlemap,
-    VkIndirectCommandsTokenNVX* toMap);
-
-void handlemap_VkIndirectCommandsLayoutTokenNVX(
-    VulkanHandleMapping* handlemap,
-    VkIndirectCommandsLayoutTokenNVX* toMap);
-
-void handlemap_VkIndirectCommandsLayoutCreateInfoNVX(
-    VulkanHandleMapping* handlemap,
-    VkIndirectCommandsLayoutCreateInfoNVX* toMap);
-
-void handlemap_VkCmdProcessCommandsInfoNVX(
-    VulkanHandleMapping* handlemap,
-    VkCmdProcessCommandsInfoNVX* toMap);
-
-void handlemap_VkCmdReserveSpaceForCommandsInfoNVX(
-    VulkanHandleMapping* handlemap,
-    VkCmdReserveSpaceForCommandsInfoNVX* toMap);
-
-void handlemap_VkObjectTableCreateInfoNVX(
-    VulkanHandleMapping* handlemap,
-    VkObjectTableCreateInfoNVX* toMap);
-
-void handlemap_VkObjectTableEntryNVX(
-    VulkanHandleMapping* handlemap,
-    VkObjectTableEntryNVX* toMap);
-
-void handlemap_VkObjectTablePipelineEntryNVX(
-    VulkanHandleMapping* handlemap,
-    VkObjectTablePipelineEntryNVX* toMap);
-
-void handlemap_VkObjectTableDescriptorSetEntryNVX(
-    VulkanHandleMapping* handlemap,
-    VkObjectTableDescriptorSetEntryNVX* toMap);
-
-void handlemap_VkObjectTableVertexBufferEntryNVX(
-    VulkanHandleMapping* handlemap,
-    VkObjectTableVertexBufferEntryNVX* toMap);
-
-void handlemap_VkObjectTableIndexBufferEntryNVX(
-    VulkanHandleMapping* handlemap,
-    VkObjectTableIndexBufferEntryNVX* toMap);
-
-void handlemap_VkObjectTablePushConstantEntryNVX(
-    VulkanHandleMapping* handlemap,
-    VkObjectTablePushConstantEntryNVX* toMap);
-
-#endif
 #ifdef VK_NV_clip_space_w_scaling
 void handlemap_VkViewportWScalingNV(
     VulkanHandleMapping* handlemap,
@@ -1417,6 +1979,16 @@
     VkPipelineRasterizationConservativeStateCreateInfoEXT* toMap);
 
 #endif
+#ifdef VK_EXT_depth_clip_enable
+void handlemap_VkPhysicalDeviceDepthClipEnableFeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceDepthClipEnableFeaturesEXT* toMap);
+
+void handlemap_VkPipelineRasterizationDepthClipStateCreateInfoEXT(
+    VulkanHandleMapping* handlemap,
+    VkPipelineRasterizationDepthClipStateCreateInfoEXT* toMap);
+
+#endif
 #ifdef VK_EXT_swapchain_colorspace
 #endif
 #ifdef VK_EXT_hdr_metadata
@@ -1441,23 +2013,21 @@
     VkMacOSSurfaceCreateInfoMVK* toMap);
 
 #endif
+#ifdef VK_MVK_moltenvk
+#endif
 #ifdef VK_EXT_external_memory_dma_buf
 #endif
 #ifdef VK_EXT_queue_family_foreign
 #endif
 #ifdef VK_EXT_debug_utils
-void handlemap_VkDebugUtilsObjectNameInfoEXT(
-    VulkanHandleMapping* handlemap,
-    VkDebugUtilsObjectNameInfoEXT* toMap);
-
-void handlemap_VkDebugUtilsObjectTagInfoEXT(
-    VulkanHandleMapping* handlemap,
-    VkDebugUtilsObjectTagInfoEXT* toMap);
-
 void handlemap_VkDebugUtilsLabelEXT(
     VulkanHandleMapping* handlemap,
     VkDebugUtilsLabelEXT* toMap);
 
+void handlemap_VkDebugUtilsObjectNameInfoEXT(
+    VulkanHandleMapping* handlemap,
+    VkDebugUtilsObjectNameInfoEXT* toMap);
+
 void handlemap_VkDebugUtilsMessengerCallbackDataEXT(
     VulkanHandleMapping* handlemap,
     VkDebugUtilsMessengerCallbackDataEXT* toMap);
@@ -1466,6 +2036,10 @@
     VulkanHandleMapping* handlemap,
     VkDebugUtilsMessengerCreateInfoEXT* toMap);
 
+void handlemap_VkDebugUtilsObjectTagInfoEXT(
+    VulkanHandleMapping* handlemap,
+    VkDebugUtilsObjectTagInfoEXT* toMap);
+
 #endif
 #ifdef VK_ANDROID_external_memory_android_hardware_buffer
 void handlemap_VkAndroidHardwareBufferUsageANDROID(
@@ -1494,13 +2068,9 @@
 
 #endif
 #ifdef VK_EXT_sampler_filter_minmax
-void handlemap_VkSamplerReductionModeCreateInfoEXT(
-    VulkanHandleMapping* handlemap,
-    VkSamplerReductionModeCreateInfoEXT* toMap);
+DEFINE_ALIAS_FUNCTION(handlemap_VkSamplerReductionModeCreateInfo, handlemap_VkSamplerReductionModeCreateInfoEXT);
 
-void handlemap_VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT(
-    VulkanHandleMapping* handlemap,
-    VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT* toMap);
+DEFINE_ALIAS_FUNCTION(handlemap_VkPhysicalDeviceSamplerFilterMinmaxProperties, handlemap_VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT);
 
 #endif
 #ifdef VK_AMD_gpu_shader_int16
@@ -1509,6 +2079,24 @@
 #endif
 #ifdef VK_AMD_shader_fragment_mask
 #endif
+#ifdef VK_EXT_inline_uniform_block
+void handlemap_VkPhysicalDeviceInlineUniformBlockFeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceInlineUniformBlockFeaturesEXT* toMap);
+
+void handlemap_VkPhysicalDeviceInlineUniformBlockPropertiesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceInlineUniformBlockPropertiesEXT* toMap);
+
+void handlemap_VkWriteDescriptorSetInlineUniformBlockEXT(
+    VulkanHandleMapping* handlemap,
+    VkWriteDescriptorSetInlineUniformBlockEXT* toMap);
+
+void handlemap_VkDescriptorPoolInlineUniformBlockCreateInfoEXT(
+    VulkanHandleMapping* handlemap,
+    VkDescriptorPoolInlineUniformBlockCreateInfoEXT* toMap);
+
+#endif
 #ifdef VK_EXT_shader_stencil_export
 #endif
 #ifdef VK_EXT_sample_locations
@@ -1573,8 +2161,44 @@
 #endif
 #ifdef VK_NV_fill_rectangle
 #endif
+#ifdef VK_NV_shader_sm_builtins
+void handlemap_VkPhysicalDeviceShaderSMBuiltinsPropertiesNV(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceShaderSMBuiltinsPropertiesNV* toMap);
+
+void handlemap_VkPhysicalDeviceShaderSMBuiltinsFeaturesNV(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceShaderSMBuiltinsFeaturesNV* toMap);
+
+#endif
 #ifdef VK_EXT_post_depth_coverage
 #endif
+#ifdef VK_EXT_image_drm_format_modifier
+void handlemap_VkDrmFormatModifierPropertiesEXT(
+    VulkanHandleMapping* handlemap,
+    VkDrmFormatModifierPropertiesEXT* toMap);
+
+void handlemap_VkDrmFormatModifierPropertiesListEXT(
+    VulkanHandleMapping* handlemap,
+    VkDrmFormatModifierPropertiesListEXT* toMap);
+
+void handlemap_VkPhysicalDeviceImageDrmFormatModifierInfoEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceImageDrmFormatModifierInfoEXT* toMap);
+
+void handlemap_VkImageDrmFormatModifierListCreateInfoEXT(
+    VulkanHandleMapping* handlemap,
+    VkImageDrmFormatModifierListCreateInfoEXT* toMap);
+
+void handlemap_VkImageDrmFormatModifierExplicitCreateInfoEXT(
+    VulkanHandleMapping* handlemap,
+    VkImageDrmFormatModifierExplicitCreateInfoEXT* toMap);
+
+void handlemap_VkImageDrmFormatModifierPropertiesEXT(
+    VulkanHandleMapping* handlemap,
+    VkImageDrmFormatModifierPropertiesEXT* toMap);
+
+#endif
 #ifdef VK_EXT_validation_cache
 void handlemap_VkValidationCacheCreateInfoEXT(
     VulkanHandleMapping* handlemap,
@@ -1586,29 +2210,139 @@
 
 #endif
 #ifdef VK_EXT_descriptor_indexing
-void handlemap_VkDescriptorSetLayoutBindingFlagsCreateInfoEXT(
-    VulkanHandleMapping* handlemap,
-    VkDescriptorSetLayoutBindingFlagsCreateInfoEXT* toMap);
+DEFINE_ALIAS_FUNCTION(handlemap_VkDescriptorSetLayoutBindingFlagsCreateInfo, handlemap_VkDescriptorSetLayoutBindingFlagsCreateInfoEXT);
 
-void handlemap_VkPhysicalDeviceDescriptorIndexingFeaturesEXT(
-    VulkanHandleMapping* handlemap,
-    VkPhysicalDeviceDescriptorIndexingFeaturesEXT* toMap);
+DEFINE_ALIAS_FUNCTION(handlemap_VkPhysicalDeviceDescriptorIndexingFeatures, handlemap_VkPhysicalDeviceDescriptorIndexingFeaturesEXT);
 
-void handlemap_VkPhysicalDeviceDescriptorIndexingPropertiesEXT(
-    VulkanHandleMapping* handlemap,
-    VkPhysicalDeviceDescriptorIndexingPropertiesEXT* toMap);
+DEFINE_ALIAS_FUNCTION(handlemap_VkPhysicalDeviceDescriptorIndexingProperties, handlemap_VkPhysicalDeviceDescriptorIndexingPropertiesEXT);
 
-void handlemap_VkDescriptorSetVariableDescriptorCountAllocateInfoEXT(
-    VulkanHandleMapping* handlemap,
-    VkDescriptorSetVariableDescriptorCountAllocateInfoEXT* toMap);
+DEFINE_ALIAS_FUNCTION(handlemap_VkDescriptorSetVariableDescriptorCountAllocateInfo, handlemap_VkDescriptorSetVariableDescriptorCountAllocateInfoEXT);
 
-void handlemap_VkDescriptorSetVariableDescriptorCountLayoutSupportEXT(
-    VulkanHandleMapping* handlemap,
-    VkDescriptorSetVariableDescriptorCountLayoutSupportEXT* toMap);
+DEFINE_ALIAS_FUNCTION(handlemap_VkDescriptorSetVariableDescriptorCountLayoutSupport, handlemap_VkDescriptorSetVariableDescriptorCountLayoutSupportEXT);
 
 #endif
 #ifdef VK_EXT_shader_viewport_index_layer
 #endif
+#ifdef VK_NV_shading_rate_image
+void handlemap_VkShadingRatePaletteNV(
+    VulkanHandleMapping* handlemap,
+    VkShadingRatePaletteNV* toMap);
+
+void handlemap_VkPipelineViewportShadingRateImageStateCreateInfoNV(
+    VulkanHandleMapping* handlemap,
+    VkPipelineViewportShadingRateImageStateCreateInfoNV* toMap);
+
+void handlemap_VkPhysicalDeviceShadingRateImageFeaturesNV(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceShadingRateImageFeaturesNV* toMap);
+
+void handlemap_VkPhysicalDeviceShadingRateImagePropertiesNV(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceShadingRateImagePropertiesNV* toMap);
+
+void handlemap_VkCoarseSampleLocationNV(
+    VulkanHandleMapping* handlemap,
+    VkCoarseSampleLocationNV* toMap);
+
+void handlemap_VkCoarseSampleOrderCustomNV(
+    VulkanHandleMapping* handlemap,
+    VkCoarseSampleOrderCustomNV* toMap);
+
+void handlemap_VkPipelineViewportCoarseSampleOrderStateCreateInfoNV(
+    VulkanHandleMapping* handlemap,
+    VkPipelineViewportCoarseSampleOrderStateCreateInfoNV* toMap);
+
+#endif
+#ifdef VK_NV_ray_tracing
+void handlemap_VkRayTracingShaderGroupCreateInfoNV(
+    VulkanHandleMapping* handlemap,
+    VkRayTracingShaderGroupCreateInfoNV* toMap);
+
+void handlemap_VkRayTracingPipelineCreateInfoNV(
+    VulkanHandleMapping* handlemap,
+    VkRayTracingPipelineCreateInfoNV* toMap);
+
+void handlemap_VkGeometryTrianglesNV(
+    VulkanHandleMapping* handlemap,
+    VkGeometryTrianglesNV* toMap);
+
+void handlemap_VkGeometryAABBNV(
+    VulkanHandleMapping* handlemap,
+    VkGeometryAABBNV* toMap);
+
+void handlemap_VkGeometryDataNV(
+    VulkanHandleMapping* handlemap,
+    VkGeometryDataNV* toMap);
+
+void handlemap_VkGeometryNV(
+    VulkanHandleMapping* handlemap,
+    VkGeometryNV* toMap);
+
+void handlemap_VkAccelerationStructureInfoNV(
+    VulkanHandleMapping* handlemap,
+    VkAccelerationStructureInfoNV* toMap);
+
+void handlemap_VkAccelerationStructureCreateInfoNV(
+    VulkanHandleMapping* handlemap,
+    VkAccelerationStructureCreateInfoNV* toMap);
+
+void handlemap_VkBindAccelerationStructureMemoryInfoNV(
+    VulkanHandleMapping* handlemap,
+    VkBindAccelerationStructureMemoryInfoNV* toMap);
+
+void handlemap_VkWriteDescriptorSetAccelerationStructureNV(
+    VulkanHandleMapping* handlemap,
+    VkWriteDescriptorSetAccelerationStructureNV* toMap);
+
+void handlemap_VkAccelerationStructureMemoryRequirementsInfoNV(
+    VulkanHandleMapping* handlemap,
+    VkAccelerationStructureMemoryRequirementsInfoNV* toMap);
+
+void handlemap_VkPhysicalDeviceRayTracingPropertiesNV(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceRayTracingPropertiesNV* toMap);
+
+void handlemap_VkTransformMatrixKHR(
+    VulkanHandleMapping* handlemap,
+    VkTransformMatrixKHR* toMap);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkTransformMatrixKHR, handlemap_VkTransformMatrixNV);
+
+void handlemap_VkAabbPositionsKHR(
+    VulkanHandleMapping* handlemap,
+    VkAabbPositionsKHR* toMap);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkAabbPositionsKHR, handlemap_VkAabbPositionsNV);
+
+void handlemap_VkAccelerationStructureInstanceKHR(
+    VulkanHandleMapping* handlemap,
+    VkAccelerationStructureInstanceKHR* toMap);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkAccelerationStructureInstanceKHR, handlemap_VkAccelerationStructureInstanceNV);
+
+#endif
+#ifdef VK_NV_representative_fragment_test
+void handlemap_VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV* toMap);
+
+void handlemap_VkPipelineRepresentativeFragmentTestStateCreateInfoNV(
+    VulkanHandleMapping* handlemap,
+    VkPipelineRepresentativeFragmentTestStateCreateInfoNV* toMap);
+
+#endif
+#ifdef VK_EXT_filter_cubic
+void handlemap_VkPhysicalDeviceImageViewImageFormatInfoEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceImageViewImageFormatInfoEXT* toMap);
+
+void handlemap_VkFilterCubicImageViewImageFormatPropertiesEXT(
+    VulkanHandleMapping* handlemap,
+    VkFilterCubicImageViewImageFormatPropertiesEXT* toMap);
+
+#endif
+#ifdef VK_QCOM_render_pass_shader_resolve
+#endif
 #ifdef VK_EXT_global_priority
 void handlemap_VkDeviceQueueGlobalPriorityCreateInfoEXT(
     VulkanHandleMapping* handlemap,
@@ -1631,12 +2365,30 @@
 #endif
 #ifdef VK_AMD_buffer_marker
 #endif
+#ifdef VK_AMD_pipeline_compiler_control
+void handlemap_VkPipelineCompilerControlCreateInfoAMD(
+    VulkanHandleMapping* handlemap,
+    VkPipelineCompilerControlCreateInfoAMD* toMap);
+
+#endif
+#ifdef VK_EXT_calibrated_timestamps
+void handlemap_VkCalibratedTimestampInfoEXT(
+    VulkanHandleMapping* handlemap,
+    VkCalibratedTimestampInfoEXT* toMap);
+
+#endif
 #ifdef VK_AMD_shader_core_properties
 void handlemap_VkPhysicalDeviceShaderCorePropertiesAMD(
     VulkanHandleMapping* handlemap,
     VkPhysicalDeviceShaderCorePropertiesAMD* toMap);
 
 #endif
+#ifdef VK_AMD_memory_overallocation_behavior
+void handlemap_VkDeviceMemoryOverallocationCreateInfoAMD(
+    VulkanHandleMapping* handlemap,
+    VkDeviceMemoryOverallocationCreateInfoAMD* toMap);
+
+#endif
 #ifdef VK_EXT_vertex_attribute_divisor
 void handlemap_VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT(
     VulkanHandleMapping* handlemap,
@@ -1650,9 +2402,71 @@
     VulkanHandleMapping* handlemap,
     VkPipelineVertexInputDivisorStateCreateInfoEXT* toMap);
 
+void handlemap_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT* toMap);
+
+#endif
+#ifdef VK_GGP_frame_token
+void handlemap_VkPresentFrameTokenGGP(
+    VulkanHandleMapping* handlemap,
+    VkPresentFrameTokenGGP* toMap);
+
+#endif
+#ifdef VK_EXT_pipeline_creation_feedback
+void handlemap_VkPipelineCreationFeedbackEXT(
+    VulkanHandleMapping* handlemap,
+    VkPipelineCreationFeedbackEXT* toMap);
+
+void handlemap_VkPipelineCreationFeedbackCreateInfoEXT(
+    VulkanHandleMapping* handlemap,
+    VkPipelineCreationFeedbackCreateInfoEXT* toMap);
+
 #endif
 #ifdef VK_NV_shader_subgroup_partitioned
 #endif
+#ifdef VK_NV_compute_shader_derivatives
+void handlemap_VkPhysicalDeviceComputeShaderDerivativesFeaturesNV(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceComputeShaderDerivativesFeaturesNV* toMap);
+
+#endif
+#ifdef VK_NV_mesh_shader
+void handlemap_VkPhysicalDeviceMeshShaderFeaturesNV(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceMeshShaderFeaturesNV* toMap);
+
+void handlemap_VkPhysicalDeviceMeshShaderPropertiesNV(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceMeshShaderPropertiesNV* toMap);
+
+void handlemap_VkDrawMeshTasksIndirectCommandNV(
+    VulkanHandleMapping* handlemap,
+    VkDrawMeshTasksIndirectCommandNV* toMap);
+
+#endif
+#ifdef VK_NV_fragment_shader_barycentric
+void handlemap_VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV* toMap);
+
+#endif
+#ifdef VK_NV_shader_image_footprint
+void handlemap_VkPhysicalDeviceShaderImageFootprintFeaturesNV(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceShaderImageFootprintFeaturesNV* toMap);
+
+#endif
+#ifdef VK_NV_scissor_exclusive
+void handlemap_VkPipelineViewportExclusiveScissorStateCreateInfoNV(
+    VulkanHandleMapping* handlemap,
+    VkPipelineViewportExclusiveScissorStateCreateInfoNV* toMap);
+
+void handlemap_VkPhysicalDeviceExclusiveScissorFeaturesNV(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceExclusiveScissorFeaturesNV* toMap);
+
+#endif
 #ifdef VK_NV_device_diagnostic_checkpoints
 void handlemap_VkQueueFamilyCheckpointPropertiesNV(
     VulkanHandleMapping* handlemap,
@@ -1663,27 +2477,605 @@
     VkCheckpointDataNV* toMap);
 
 #endif
-#ifdef VK_GOOGLE_address_space
+#ifdef VK_INTEL_shader_integer_functions2
+void handlemap_VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL* toMap);
+
 #endif
-#ifdef VK_GOOGLE_color_buffer
+#ifdef VK_INTEL_performance_query
+void handlemap_VkPerformanceValueDataINTEL(
+    VulkanHandleMapping* handlemap,
+    VkPerformanceValueDataINTEL* toMap);
+
+void handlemap_VkPerformanceValueINTEL(
+    VulkanHandleMapping* handlemap,
+    VkPerformanceValueINTEL* toMap);
+
+void handlemap_VkInitializePerformanceApiInfoINTEL(
+    VulkanHandleMapping* handlemap,
+    VkInitializePerformanceApiInfoINTEL* toMap);
+
+void handlemap_VkQueryPoolPerformanceQueryCreateInfoINTEL(
+    VulkanHandleMapping* handlemap,
+    VkQueryPoolPerformanceQueryCreateInfoINTEL* toMap);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkQueryPoolPerformanceQueryCreateInfoINTEL, handlemap_VkQueryPoolCreateInfoINTEL);
+
+void handlemap_VkPerformanceMarkerInfoINTEL(
+    VulkanHandleMapping* handlemap,
+    VkPerformanceMarkerInfoINTEL* toMap);
+
+void handlemap_VkPerformanceStreamMarkerInfoINTEL(
+    VulkanHandleMapping* handlemap,
+    VkPerformanceStreamMarkerInfoINTEL* toMap);
+
+void handlemap_VkPerformanceOverrideInfoINTEL(
+    VulkanHandleMapping* handlemap,
+    VkPerformanceOverrideInfoINTEL* toMap);
+
+void handlemap_VkPerformanceConfigurationAcquireInfoINTEL(
+    VulkanHandleMapping* handlemap,
+    VkPerformanceConfigurationAcquireInfoINTEL* toMap);
+
+#endif
+#ifdef VK_EXT_pci_bus_info
+void handlemap_VkPhysicalDevicePCIBusInfoPropertiesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDevicePCIBusInfoPropertiesEXT* toMap);
+
+#endif
+#ifdef VK_AMD_display_native_hdr
+void handlemap_VkDisplayNativeHdrSurfaceCapabilitiesAMD(
+    VulkanHandleMapping* handlemap,
+    VkDisplayNativeHdrSurfaceCapabilitiesAMD* toMap);
+
+void handlemap_VkSwapchainDisplayNativeHdrCreateInfoAMD(
+    VulkanHandleMapping* handlemap,
+    VkSwapchainDisplayNativeHdrCreateInfoAMD* toMap);
+
+#endif
+#ifdef VK_FUCHSIA_imagepipe_surface
+void handlemap_VkImagePipeSurfaceCreateInfoFUCHSIA(
+    VulkanHandleMapping* handlemap,
+    VkImagePipeSurfaceCreateInfoFUCHSIA* toMap);
+
+#endif
+#ifdef VK_EXT_metal_surface
+void handlemap_VkMetalSurfaceCreateInfoEXT(
+    VulkanHandleMapping* handlemap,
+    VkMetalSurfaceCreateInfoEXT* toMap);
+
+#endif
+#ifdef VK_EXT_fragment_density_map
+void handlemap_VkPhysicalDeviceFragmentDensityMapFeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceFragmentDensityMapFeaturesEXT* toMap);
+
+void handlemap_VkPhysicalDeviceFragmentDensityMapPropertiesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceFragmentDensityMapPropertiesEXT* toMap);
+
+void handlemap_VkRenderPassFragmentDensityMapCreateInfoEXT(
+    VulkanHandleMapping* handlemap,
+    VkRenderPassFragmentDensityMapCreateInfoEXT* toMap);
+
+#endif
+#ifdef VK_EXT_scalar_block_layout
+DEFINE_ALIAS_FUNCTION(handlemap_VkPhysicalDeviceScalarBlockLayoutFeatures, handlemap_VkPhysicalDeviceScalarBlockLayoutFeaturesEXT);
+
+#endif
+#ifdef VK_GOOGLE_hlsl_functionality1
+#endif
+#ifdef VK_GOOGLE_decorate_string
+#endif
+#ifdef VK_EXT_subgroup_size_control
+void handlemap_VkPhysicalDeviceSubgroupSizeControlFeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceSubgroupSizeControlFeaturesEXT* toMap);
+
+void handlemap_VkPhysicalDeviceSubgroupSizeControlPropertiesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceSubgroupSizeControlPropertiesEXT* toMap);
+
+void handlemap_VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT(
+    VulkanHandleMapping* handlemap,
+    VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT* toMap);
+
+#endif
+#ifdef VK_AMD_shader_core_properties2
+void handlemap_VkPhysicalDeviceShaderCoreProperties2AMD(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceShaderCoreProperties2AMD* toMap);
+
+#endif
+#ifdef VK_AMD_device_coherent_memory
+void handlemap_VkPhysicalDeviceCoherentMemoryFeaturesAMD(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceCoherentMemoryFeaturesAMD* toMap);
+
+#endif
+#ifdef VK_EXT_shader_image_atomic_int64
+void handlemap_VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT* toMap);
+
+#endif
+#ifdef VK_EXT_memory_budget
+void handlemap_VkPhysicalDeviceMemoryBudgetPropertiesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceMemoryBudgetPropertiesEXT* toMap);
+
+#endif
+#ifdef VK_EXT_memory_priority
+void handlemap_VkPhysicalDeviceMemoryPriorityFeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceMemoryPriorityFeaturesEXT* toMap);
+
+void handlemap_VkMemoryPriorityAllocateInfoEXT(
+    VulkanHandleMapping* handlemap,
+    VkMemoryPriorityAllocateInfoEXT* toMap);
+
+#endif
+#ifdef VK_NV_dedicated_allocation_image_aliasing
+void handlemap_VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV* toMap);
+
+#endif
+#ifdef VK_EXT_buffer_device_address
+void handlemap_VkPhysicalDeviceBufferDeviceAddressFeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceBufferDeviceAddressFeaturesEXT* toMap);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkPhysicalDeviceBufferDeviceAddressFeaturesEXT, handlemap_VkPhysicalDeviceBufferAddressFeaturesEXT);
+
+DEFINE_ALIAS_FUNCTION(handlemap_VkBufferDeviceAddressInfo, handlemap_VkBufferDeviceAddressInfoEXT);
+
+void handlemap_VkBufferDeviceAddressCreateInfoEXT(
+    VulkanHandleMapping* handlemap,
+    VkBufferDeviceAddressCreateInfoEXT* toMap);
+
+#endif
+#ifdef VK_EXT_tooling_info
+void handlemap_VkPhysicalDeviceToolPropertiesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceToolPropertiesEXT* toMap);
+
+#endif
+#ifdef VK_EXT_separate_stencil_usage
+DEFINE_ALIAS_FUNCTION(handlemap_VkImageStencilUsageCreateInfo, handlemap_VkImageStencilUsageCreateInfoEXT);
+
+#endif
+#ifdef VK_EXT_validation_features
+void handlemap_VkValidationFeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkValidationFeaturesEXT* toMap);
+
+#endif
+#ifdef VK_NV_cooperative_matrix
+void handlemap_VkCooperativeMatrixPropertiesNV(
+    VulkanHandleMapping* handlemap,
+    VkCooperativeMatrixPropertiesNV* toMap);
+
+void handlemap_VkPhysicalDeviceCooperativeMatrixFeaturesNV(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceCooperativeMatrixFeaturesNV* toMap);
+
+void handlemap_VkPhysicalDeviceCooperativeMatrixPropertiesNV(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceCooperativeMatrixPropertiesNV* toMap);
+
+#endif
+#ifdef VK_NV_coverage_reduction_mode
+void handlemap_VkPhysicalDeviceCoverageReductionModeFeaturesNV(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceCoverageReductionModeFeaturesNV* toMap);
+
+void handlemap_VkPipelineCoverageReductionStateCreateInfoNV(
+    VulkanHandleMapping* handlemap,
+    VkPipelineCoverageReductionStateCreateInfoNV* toMap);
+
+void handlemap_VkFramebufferMixedSamplesCombinationNV(
+    VulkanHandleMapping* handlemap,
+    VkFramebufferMixedSamplesCombinationNV* toMap);
+
+#endif
+#ifdef VK_EXT_fragment_shader_interlock
+void handlemap_VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT* toMap);
+
+#endif
+#ifdef VK_EXT_ycbcr_image_arrays
+void handlemap_VkPhysicalDeviceYcbcrImageArraysFeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceYcbcrImageArraysFeaturesEXT* toMap);
+
+#endif
+#ifdef VK_EXT_full_screen_exclusive
+void handlemap_VkSurfaceFullScreenExclusiveInfoEXT(
+    VulkanHandleMapping* handlemap,
+    VkSurfaceFullScreenExclusiveInfoEXT* toMap);
+
+void handlemap_VkSurfaceCapabilitiesFullScreenExclusiveEXT(
+    VulkanHandleMapping* handlemap,
+    VkSurfaceCapabilitiesFullScreenExclusiveEXT* toMap);
+
+void handlemap_VkSurfaceFullScreenExclusiveWin32InfoEXT(
+    VulkanHandleMapping* handlemap,
+    VkSurfaceFullScreenExclusiveWin32InfoEXT* toMap);
+
+#endif
+#ifdef VK_EXT_headless_surface
+void handlemap_VkHeadlessSurfaceCreateInfoEXT(
+    VulkanHandleMapping* handlemap,
+    VkHeadlessSurfaceCreateInfoEXT* toMap);
+
+#endif
+#ifdef VK_EXT_line_rasterization
+void handlemap_VkPhysicalDeviceLineRasterizationFeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceLineRasterizationFeaturesEXT* toMap);
+
+void handlemap_VkPhysicalDeviceLineRasterizationPropertiesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceLineRasterizationPropertiesEXT* toMap);
+
+void handlemap_VkPipelineRasterizationLineStateCreateInfoEXT(
+    VulkanHandleMapping* handlemap,
+    VkPipelineRasterizationLineStateCreateInfoEXT* toMap);
+
+#endif
+#ifdef VK_EXT_shader_atomic_float
+void handlemap_VkPhysicalDeviceShaderAtomicFloatFeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceShaderAtomicFloatFeaturesEXT* toMap);
+
+#endif
+#ifdef VK_EXT_host_query_reset
+DEFINE_ALIAS_FUNCTION(handlemap_VkPhysicalDeviceHostQueryResetFeatures, handlemap_VkPhysicalDeviceHostQueryResetFeaturesEXT);
+
+#endif
+#ifdef VK_EXT_index_type_uint8
+void handlemap_VkPhysicalDeviceIndexTypeUint8FeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceIndexTypeUint8FeaturesEXT* toMap);
+
+#endif
+#ifdef VK_EXT_extended_dynamic_state
+void handlemap_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceExtendedDynamicStateFeaturesEXT* toMap);
+
+#endif
+#ifdef VK_EXT_shader_demote_to_helper_invocation
+void handlemap_VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT* toMap);
+
+#endif
+#ifdef VK_NV_device_generated_commands
+void handlemap_VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV* toMap);
+
+void handlemap_VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV* toMap);
+
+void handlemap_VkGraphicsShaderGroupCreateInfoNV(
+    VulkanHandleMapping* handlemap,
+    VkGraphicsShaderGroupCreateInfoNV* toMap);
+
+void handlemap_VkGraphicsPipelineShaderGroupsCreateInfoNV(
+    VulkanHandleMapping* handlemap,
+    VkGraphicsPipelineShaderGroupsCreateInfoNV* toMap);
+
+void handlemap_VkBindShaderGroupIndirectCommandNV(
+    VulkanHandleMapping* handlemap,
+    VkBindShaderGroupIndirectCommandNV* toMap);
+
+void handlemap_VkBindIndexBufferIndirectCommandNV(
+    VulkanHandleMapping* handlemap,
+    VkBindIndexBufferIndirectCommandNV* toMap);
+
+void handlemap_VkBindVertexBufferIndirectCommandNV(
+    VulkanHandleMapping* handlemap,
+    VkBindVertexBufferIndirectCommandNV* toMap);
+
+void handlemap_VkSetStateFlagsIndirectCommandNV(
+    VulkanHandleMapping* handlemap,
+    VkSetStateFlagsIndirectCommandNV* toMap);
+
+void handlemap_VkIndirectCommandsStreamNV(
+    VulkanHandleMapping* handlemap,
+    VkIndirectCommandsStreamNV* toMap);
+
+void handlemap_VkIndirectCommandsLayoutTokenNV(
+    VulkanHandleMapping* handlemap,
+    VkIndirectCommandsLayoutTokenNV* toMap);
+
+void handlemap_VkIndirectCommandsLayoutCreateInfoNV(
+    VulkanHandleMapping* handlemap,
+    VkIndirectCommandsLayoutCreateInfoNV* toMap);
+
+void handlemap_VkGeneratedCommandsInfoNV(
+    VulkanHandleMapping* handlemap,
+    VkGeneratedCommandsInfoNV* toMap);
+
+void handlemap_VkGeneratedCommandsMemoryRequirementsInfoNV(
+    VulkanHandleMapping* handlemap,
+    VkGeneratedCommandsMemoryRequirementsInfoNV* toMap);
+
+#endif
+#ifdef VK_EXT_texel_buffer_alignment
+void handlemap_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT* toMap);
+
+void handlemap_VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT* toMap);
+
+#endif
+#ifdef VK_QCOM_render_pass_transform
+void handlemap_VkRenderPassTransformBeginInfoQCOM(
+    VulkanHandleMapping* handlemap,
+    VkRenderPassTransformBeginInfoQCOM* toMap);
+
+void handlemap_VkCommandBufferInheritanceRenderPassTransformInfoQCOM(
+    VulkanHandleMapping* handlemap,
+    VkCommandBufferInheritanceRenderPassTransformInfoQCOM* toMap);
+
+#endif
+#ifdef VK_EXT_device_memory_report
+void handlemap_VkPhysicalDeviceDeviceMemoryReportFeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceDeviceMemoryReportFeaturesEXT* toMap);
+
+void handlemap_VkDeviceMemoryReportCallbackDataEXT(
+    VulkanHandleMapping* handlemap,
+    VkDeviceMemoryReportCallbackDataEXT* toMap);
+
+void handlemap_VkDeviceDeviceMemoryReportCreateInfoEXT(
+    VulkanHandleMapping* handlemap,
+    VkDeviceDeviceMemoryReportCreateInfoEXT* toMap);
+
+#endif
+#ifdef VK_EXT_robustness2
+void handlemap_VkPhysicalDeviceRobustness2FeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceRobustness2FeaturesEXT* toMap);
+
+void handlemap_VkPhysicalDeviceRobustness2PropertiesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceRobustness2PropertiesEXT* toMap);
+
+#endif
+#ifdef VK_EXT_custom_border_color
+void handlemap_VkSamplerCustomBorderColorCreateInfoEXT(
+    VulkanHandleMapping* handlemap,
+    VkSamplerCustomBorderColorCreateInfoEXT* toMap);
+
+void handlemap_VkPhysicalDeviceCustomBorderColorPropertiesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceCustomBorderColorPropertiesEXT* toMap);
+
+void handlemap_VkPhysicalDeviceCustomBorderColorFeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceCustomBorderColorFeaturesEXT* toMap);
+
+#endif
+#ifdef VK_GOOGLE_user_type
+#endif
+#ifdef VK_EXT_private_data
+void handlemap_VkPhysicalDevicePrivateDataFeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDevicePrivateDataFeaturesEXT* toMap);
+
+void handlemap_VkDevicePrivateDataCreateInfoEXT(
+    VulkanHandleMapping* handlemap,
+    VkDevicePrivateDataCreateInfoEXT* toMap);
+
+void handlemap_VkPrivateDataSlotCreateInfoEXT(
+    VulkanHandleMapping* handlemap,
+    VkPrivateDataSlotCreateInfoEXT* toMap);
+
+#endif
+#ifdef VK_EXT_pipeline_creation_cache_control
+void handlemap_VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT* toMap);
+
+#endif
+#ifdef VK_NV_device_diagnostics_config
+void handlemap_VkPhysicalDeviceDiagnosticsConfigFeaturesNV(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceDiagnosticsConfigFeaturesNV* toMap);
+
+void handlemap_VkDeviceDiagnosticsConfigCreateInfoNV(
+    VulkanHandleMapping* handlemap,
+    VkDeviceDiagnosticsConfigCreateInfoNV* toMap);
+
+#endif
+#ifdef VK_QCOM_render_pass_store_ops
+#endif
+#ifdef VK_NV_fragment_shading_rate_enums
+void handlemap_VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV* toMap);
+
+void handlemap_VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV* toMap);
+
+void handlemap_VkPipelineFragmentShadingRateEnumStateCreateInfoNV(
+    VulkanHandleMapping* handlemap,
+    VkPipelineFragmentShadingRateEnumStateCreateInfoNV* toMap);
+
+#endif
+#ifdef VK_EXT_fragment_density_map2
+void handlemap_VkPhysicalDeviceFragmentDensityMap2FeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceFragmentDensityMap2FeaturesEXT* toMap);
+
+void handlemap_VkPhysicalDeviceFragmentDensityMap2PropertiesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceFragmentDensityMap2PropertiesEXT* toMap);
+
+#endif
+#ifdef VK_QCOM_rotated_copy_commands
+void handlemap_VkCopyCommandTransformInfoQCOM(
+    VulkanHandleMapping* handlemap,
+    VkCopyCommandTransformInfoQCOM* toMap);
+
+#endif
+#ifdef VK_EXT_image_robustness
+void handlemap_VkPhysicalDeviceImageRobustnessFeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceImageRobustnessFeaturesEXT* toMap);
+
+#endif
+#ifdef VK_EXT_4444_formats
+void handlemap_VkPhysicalDevice4444FormatsFeaturesEXT(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDevice4444FormatsFeaturesEXT* toMap);
+
+#endif
+#ifdef VK_EXT_directfb_surface
+void handlemap_VkDirectFBSurfaceCreateInfoEXT(
+    VulkanHandleMapping* handlemap,
+    VkDirectFBSurfaceCreateInfoEXT* toMap);
+
+#endif
+#ifdef VK_GOOGLE_gfxstream
 void handlemap_VkImportColorBufferGOOGLE(
     VulkanHandleMapping* handlemap,
     VkImportColorBufferGOOGLE* toMap);
 
+void handlemap_VkImportBufferGOOGLE(
+    VulkanHandleMapping* handlemap,
+    VkImportBufferGOOGLE* toMap);
+
 void handlemap_VkImportPhysicalAddressGOOGLE(
     VulkanHandleMapping* handlemap,
     VkImportPhysicalAddressGOOGLE* toMap);
 
 #endif
-#ifdef VK_GOOGLE_sized_descriptor_update_template
+#ifdef VK_KHR_acceleration_structure
+void handlemap_VkDeviceOrHostAddressKHR(
+    VulkanHandleMapping* handlemap,
+    VkDeviceOrHostAddressKHR* toMap);
+
+void handlemap_VkDeviceOrHostAddressConstKHR(
+    VulkanHandleMapping* handlemap,
+    VkDeviceOrHostAddressConstKHR* toMap);
+
+void handlemap_VkAccelerationStructureBuildRangeInfoKHR(
+    VulkanHandleMapping* handlemap,
+    VkAccelerationStructureBuildRangeInfoKHR* toMap);
+
+void handlemap_VkAccelerationStructureGeometryTrianglesDataKHR(
+    VulkanHandleMapping* handlemap,
+    VkAccelerationStructureGeometryTrianglesDataKHR* toMap);
+
+void handlemap_VkAccelerationStructureGeometryAabbsDataKHR(
+    VulkanHandleMapping* handlemap,
+    VkAccelerationStructureGeometryAabbsDataKHR* toMap);
+
+void handlemap_VkAccelerationStructureGeometryInstancesDataKHR(
+    VulkanHandleMapping* handlemap,
+    VkAccelerationStructureGeometryInstancesDataKHR* toMap);
+
+void handlemap_VkAccelerationStructureGeometryDataKHR(
+    VulkanHandleMapping* handlemap,
+    VkAccelerationStructureGeometryDataKHR* toMap);
+
+void handlemap_VkAccelerationStructureGeometryKHR(
+    VulkanHandleMapping* handlemap,
+    VkAccelerationStructureGeometryKHR* toMap);
+
+void handlemap_VkAccelerationStructureBuildGeometryInfoKHR(
+    VulkanHandleMapping* handlemap,
+    VkAccelerationStructureBuildGeometryInfoKHR* toMap);
+
+void handlemap_VkAccelerationStructureCreateInfoKHR(
+    VulkanHandleMapping* handlemap,
+    VkAccelerationStructureCreateInfoKHR* toMap);
+
+void handlemap_VkWriteDescriptorSetAccelerationStructureKHR(
+    VulkanHandleMapping* handlemap,
+    VkWriteDescriptorSetAccelerationStructureKHR* toMap);
+
+void handlemap_VkPhysicalDeviceAccelerationStructureFeaturesKHR(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceAccelerationStructureFeaturesKHR* toMap);
+
+void handlemap_VkPhysicalDeviceAccelerationStructurePropertiesKHR(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceAccelerationStructurePropertiesKHR* toMap);
+
+void handlemap_VkAccelerationStructureDeviceAddressInfoKHR(
+    VulkanHandleMapping* handlemap,
+    VkAccelerationStructureDeviceAddressInfoKHR* toMap);
+
+void handlemap_VkAccelerationStructureVersionInfoKHR(
+    VulkanHandleMapping* handlemap,
+    VkAccelerationStructureVersionInfoKHR* toMap);
+
+void handlemap_VkCopyAccelerationStructureToMemoryInfoKHR(
+    VulkanHandleMapping* handlemap,
+    VkCopyAccelerationStructureToMemoryInfoKHR* toMap);
+
+void handlemap_VkCopyMemoryToAccelerationStructureInfoKHR(
+    VulkanHandleMapping* handlemap,
+    VkCopyMemoryToAccelerationStructureInfoKHR* toMap);
+
+void handlemap_VkCopyAccelerationStructureInfoKHR(
+    VulkanHandleMapping* handlemap,
+    VkCopyAccelerationStructureInfoKHR* toMap);
+
+void handlemap_VkAccelerationStructureBuildSizesInfoKHR(
+    VulkanHandleMapping* handlemap,
+    VkAccelerationStructureBuildSizesInfoKHR* toMap);
+
 #endif
-#ifdef VK_GOOGLE_async_command_buffers
+#ifdef VK_KHR_ray_tracing_pipeline
+void handlemap_VkRayTracingShaderGroupCreateInfoKHR(
+    VulkanHandleMapping* handlemap,
+    VkRayTracingShaderGroupCreateInfoKHR* toMap);
+
+void handlemap_VkRayTracingPipelineInterfaceCreateInfoKHR(
+    VulkanHandleMapping* handlemap,
+    VkRayTracingPipelineInterfaceCreateInfoKHR* toMap);
+
+void handlemap_VkRayTracingPipelineCreateInfoKHR(
+    VulkanHandleMapping* handlemap,
+    VkRayTracingPipelineCreateInfoKHR* toMap);
+
+void handlemap_VkPhysicalDeviceRayTracingPipelineFeaturesKHR(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceRayTracingPipelineFeaturesKHR* toMap);
+
+void handlemap_VkPhysicalDeviceRayTracingPipelinePropertiesKHR(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceRayTracingPipelinePropertiesKHR* toMap);
+
+void handlemap_VkStridedDeviceAddressRegionKHR(
+    VulkanHandleMapping* handlemap,
+    VkStridedDeviceAddressRegionKHR* toMap);
+
+void handlemap_VkTraceRaysIndirectCommandKHR(
+    VulkanHandleMapping* handlemap,
+    VkTraceRaysIndirectCommandKHR* toMap);
+
 #endif
-#ifdef VK_GOOGLE_create_resources_with_requirements
-#endif
-#ifdef VK_GOOGLE_address_space_info
-#endif
-#ifdef VK_GOOGLE_free_memory_sync
+#ifdef VK_KHR_ray_query
+void handlemap_VkPhysicalDeviceRayQueryFeaturesKHR(
+    VulkanHandleMapping* handlemap,
+    VkPhysicalDeviceRayQueryFeaturesKHR* toMap);
+
 #endif
 
 } // namespace goldfish_vk
diff --git a/system/vulkan_enc/goldfish_vk_marshaling_guest.cpp b/system/vulkan_enc/goldfish_vk_marshaling_guest.cpp
index 371e92e..5b1e964 100644
--- a/system/vulkan_enc/goldfish_vk_marshaling_guest.cpp
+++ b/system/vulkan_enc/goldfish_vk_marshaling_guest.cpp
@@ -33,25 +33,460 @@
 
 void marshal_extension_struct(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const void* structExtension);
 
 void unmarshal_extension_struct(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     void* structExtension_out);
 
 #ifdef VK_VERSION_1_0
+void marshal_VkExtent2D(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkExtent2D* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((uint32_t*)&forMarshaling->width, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->height, sizeof(uint32_t));
+}
+
+void unmarshal_VkExtent2D(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkExtent2D* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((uint32_t*)&forUnmarshaling->width, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->height, sizeof(uint32_t));
+}
+
+void marshal_VkExtent3D(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkExtent3D* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((uint32_t*)&forMarshaling->width, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->height, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->depth, sizeof(uint32_t));
+}
+
+void unmarshal_VkExtent3D(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkExtent3D* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((uint32_t*)&forUnmarshaling->width, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->height, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->depth, sizeof(uint32_t));
+}
+
+void marshal_VkOffset2D(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkOffset2D* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((int32_t*)&forMarshaling->x, sizeof(int32_t));
+    vkStream->write((int32_t*)&forMarshaling->y, sizeof(int32_t));
+}
+
+void unmarshal_VkOffset2D(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkOffset2D* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((int32_t*)&forUnmarshaling->x, sizeof(int32_t));
+    vkStream->read((int32_t*)&forUnmarshaling->y, sizeof(int32_t));
+}
+
+void marshal_VkOffset3D(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkOffset3D* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((int32_t*)&forMarshaling->x, sizeof(int32_t));
+    vkStream->write((int32_t*)&forMarshaling->y, sizeof(int32_t));
+    vkStream->write((int32_t*)&forMarshaling->z, sizeof(int32_t));
+}
+
+void unmarshal_VkOffset3D(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkOffset3D* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((int32_t*)&forUnmarshaling->x, sizeof(int32_t));
+    vkStream->read((int32_t*)&forUnmarshaling->y, sizeof(int32_t));
+    vkStream->read((int32_t*)&forUnmarshaling->z, sizeof(int32_t));
+}
+
+void marshal_VkRect2D(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRect2D* forMarshaling)
+{
+    (void)rootType;
+    marshal_VkOffset2D(vkStream, rootType, (VkOffset2D*)(&forMarshaling->offset));
+    marshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->extent));
+}
+
+void unmarshal_VkRect2D(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkRect2D* forUnmarshaling)
+{
+    (void)rootType;
+    unmarshal_VkOffset2D(vkStream, rootType, (VkOffset2D*)(&forUnmarshaling->offset));
+    unmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forUnmarshaling->extent));
+}
+
+void marshal_VkBaseInStructure(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBaseInStructure* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+}
+
+void unmarshal_VkBaseInStructure(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkBaseInStructure* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+}
+
+void marshal_VkBaseOutStructure(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBaseOutStructure* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+}
+
+void unmarshal_VkBaseOutStructure(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkBaseOutStructure* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+}
+
+void marshal_VkBufferMemoryBarrier(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBufferMemoryBarrier* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkAccessFlags*)&forMarshaling->srcAccessMask, sizeof(VkAccessFlags));
+    vkStream->write((VkAccessFlags*)&forMarshaling->dstAccessMask, sizeof(VkAccessFlags));
+    vkStream->write((uint32_t*)&forMarshaling->srcQueueFamilyIndex, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->dstQueueFamilyIndex, sizeof(uint32_t));
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkBuffer_u64(&forMarshaling->buffer, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->write((VkDeviceSize*)&forMarshaling->offset, sizeof(VkDeviceSize));
+    vkStream->write((VkDeviceSize*)&forMarshaling->size, sizeof(VkDeviceSize));
+}
+
+void unmarshal_VkBufferMemoryBarrier(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkBufferMemoryBarrier* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkAccessFlags*)&forUnmarshaling->srcAccessMask, sizeof(VkAccessFlags));
+    vkStream->read((VkAccessFlags*)&forUnmarshaling->dstAccessMask, sizeof(VkAccessFlags));
+    vkStream->read((uint32_t*)&forUnmarshaling->srcQueueFamilyIndex, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->dstQueueFamilyIndex, sizeof(uint32_t));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkBuffer(&cgen_var_0, (VkBuffer*)&forUnmarshaling->buffer, 1);
+    vkStream->read((VkDeviceSize*)&forUnmarshaling->offset, sizeof(VkDeviceSize));
+    vkStream->read((VkDeviceSize*)&forUnmarshaling->size, sizeof(VkDeviceSize));
+}
+
+void marshal_VkDispatchIndirectCommand(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDispatchIndirectCommand* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((uint32_t*)&forMarshaling->x, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->y, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->z, sizeof(uint32_t));
+}
+
+void unmarshal_VkDispatchIndirectCommand(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDispatchIndirectCommand* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((uint32_t*)&forUnmarshaling->x, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->y, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->z, sizeof(uint32_t));
+}
+
+void marshal_VkDrawIndexedIndirectCommand(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDrawIndexedIndirectCommand* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((uint32_t*)&forMarshaling->indexCount, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->instanceCount, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->firstIndex, sizeof(uint32_t));
+    vkStream->write((int32_t*)&forMarshaling->vertexOffset, sizeof(int32_t));
+    vkStream->write((uint32_t*)&forMarshaling->firstInstance, sizeof(uint32_t));
+}
+
+void unmarshal_VkDrawIndexedIndirectCommand(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDrawIndexedIndirectCommand* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((uint32_t*)&forUnmarshaling->indexCount, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->instanceCount, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->firstIndex, sizeof(uint32_t));
+    vkStream->read((int32_t*)&forUnmarshaling->vertexOffset, sizeof(int32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->firstInstance, sizeof(uint32_t));
+}
+
+void marshal_VkDrawIndirectCommand(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDrawIndirectCommand* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((uint32_t*)&forMarshaling->vertexCount, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->instanceCount, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->firstVertex, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->firstInstance, sizeof(uint32_t));
+}
+
+void unmarshal_VkDrawIndirectCommand(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDrawIndirectCommand* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((uint32_t*)&forUnmarshaling->vertexCount, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->instanceCount, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->firstVertex, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->firstInstance, sizeof(uint32_t));
+}
+
+void marshal_VkImageSubresourceRange(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageSubresourceRange* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkImageAspectFlags*)&forMarshaling->aspectMask, sizeof(VkImageAspectFlags));
+    vkStream->write((uint32_t*)&forMarshaling->baseMipLevel, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->levelCount, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->baseArrayLayer, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->layerCount, sizeof(uint32_t));
+}
+
+void unmarshal_VkImageSubresourceRange(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkImageSubresourceRange* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkImageAspectFlags*)&forUnmarshaling->aspectMask, sizeof(VkImageAspectFlags));
+    vkStream->read((uint32_t*)&forUnmarshaling->baseMipLevel, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->levelCount, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->baseArrayLayer, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->layerCount, sizeof(uint32_t));
+}
+
+void marshal_VkImageMemoryBarrier(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageMemoryBarrier* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkAccessFlags*)&forMarshaling->srcAccessMask, sizeof(VkAccessFlags));
+    vkStream->write((VkAccessFlags*)&forMarshaling->dstAccessMask, sizeof(VkAccessFlags));
+    vkStream->write((VkImageLayout*)&forMarshaling->oldLayout, sizeof(VkImageLayout));
+    vkStream->write((VkImageLayout*)&forMarshaling->newLayout, sizeof(VkImageLayout));
+    vkStream->write((uint32_t*)&forMarshaling->srcQueueFamilyIndex, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->dstQueueFamilyIndex, sizeof(uint32_t));
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkImage_u64(&forMarshaling->image, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
+    marshal_VkImageSubresourceRange(vkStream, rootType, (VkImageSubresourceRange*)(&forMarshaling->subresourceRange));
+}
+
+void unmarshal_VkImageMemoryBarrier(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkImageMemoryBarrier* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkAccessFlags*)&forUnmarshaling->srcAccessMask, sizeof(VkAccessFlags));
+    vkStream->read((VkAccessFlags*)&forUnmarshaling->dstAccessMask, sizeof(VkAccessFlags));
+    vkStream->read((VkImageLayout*)&forUnmarshaling->oldLayout, sizeof(VkImageLayout));
+    vkStream->read((VkImageLayout*)&forUnmarshaling->newLayout, sizeof(VkImageLayout));
+    vkStream->read((uint32_t*)&forUnmarshaling->srcQueueFamilyIndex, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->dstQueueFamilyIndex, sizeof(uint32_t));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkImage(&cgen_var_0, (VkImage*)&forUnmarshaling->image, 1);
+    unmarshal_VkImageSubresourceRange(vkStream, rootType, (VkImageSubresourceRange*)(&forUnmarshaling->subresourceRange));
+}
+
+void marshal_VkMemoryBarrier(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMemoryBarrier* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkAccessFlags*)&forMarshaling->srcAccessMask, sizeof(VkAccessFlags));
+    vkStream->write((VkAccessFlags*)&forMarshaling->dstAccessMask, sizeof(VkAccessFlags));
+}
+
+void unmarshal_VkMemoryBarrier(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkMemoryBarrier* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkAccessFlags*)&forUnmarshaling->srcAccessMask, sizeof(VkAccessFlags));
+    vkStream->read((VkAccessFlags*)&forUnmarshaling->dstAccessMask, sizeof(VkAccessFlags));
+}
+
+void marshal_VkAllocationCallbacks(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAllocationCallbacks* forMarshaling)
+{
+    (void)rootType;
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pUserData;
+    vkStream->putBe64(cgen_var_0);
+    if (forMarshaling->pUserData)
+    {
+        vkStream->write((void*)forMarshaling->pUserData, sizeof(uint8_t));
+    }
+    uint64_t cgen_var_1 = (uint64_t)forMarshaling->pfnAllocation;
+    vkStream->putBe64(cgen_var_1);
+    uint64_t cgen_var_2 = (uint64_t)forMarshaling->pfnReallocation;
+    vkStream->putBe64(cgen_var_2);
+    uint64_t cgen_var_3 = (uint64_t)forMarshaling->pfnFree;
+    vkStream->putBe64(cgen_var_3);
+    uint64_t cgen_var_4 = (uint64_t)forMarshaling->pfnInternalAllocation;
+    vkStream->putBe64(cgen_var_4);
+    uint64_t cgen_var_5 = (uint64_t)forMarshaling->pfnInternalFree;
+    vkStream->putBe64(cgen_var_5);
+}
+
+void unmarshal_VkAllocationCallbacks(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkAllocationCallbacks* forUnmarshaling)
+{
+    (void)rootType;
+    // WARNING PTR CHECK
+    void* check_pUserData;
+    check_pUserData = (void*)(uintptr_t)vkStream->getBe64();
+    if (forUnmarshaling->pUserData)
+    {
+        if (!(check_pUserData))
+        {
+            fprintf(stderr, "fatal: forUnmarshaling->pUserData inconsistent between guest and host\n");
+        }
+        vkStream->read((void*)forUnmarshaling->pUserData, sizeof(uint8_t));
+    }
+    forUnmarshaling->pfnAllocation = (PFN_vkAllocationFunction)vkStream->getBe64();
+    forUnmarshaling->pfnReallocation = (PFN_vkReallocationFunction)vkStream->getBe64();
+    forUnmarshaling->pfnFree = (PFN_vkFreeFunction)vkStream->getBe64();
+    forUnmarshaling->pfnInternalAllocation = (PFN_vkInternalAllocationNotification)vkStream->getBe64();
+    forUnmarshaling->pfnInternalFree = (PFN_vkInternalFreeNotification)vkStream->getBe64();
+}
+
 void marshal_VkApplicationInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkApplicationInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     if (vkStream->getFeatureBits() & VULKAN_STREAM_FEATURE_NULL_OPTIONAL_STRINGS_BIT)
     {
         // WARNING PTR CHECK
@@ -70,8 +505,8 @@
     if (vkStream->getFeatureBits() & VULKAN_STREAM_FEATURE_NULL_OPTIONAL_STRINGS_BIT)
     {
         // WARNING PTR CHECK
-        uint64_t cgen_var_1 = (uint64_t)(uintptr_t)forMarshaling->pEngineName;
-        vkStream->putBe64(cgen_var_1);
+        uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pEngineName;
+        vkStream->putBe64(cgen_var_0);
         if (forMarshaling->pEngineName)
         {
             vkStream->putString(forMarshaling->pEngineName);
@@ -87,17 +522,16 @@
 
 void unmarshal_VkApplicationInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkApplicationInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     if (vkStream->getFeatureBits() & VULKAN_STREAM_FEATURE_NULL_OPTIONAL_STRINGS_BIT)
     {
         // WARNING PTR CHECK
@@ -139,25 +573,73 @@
     vkStream->read((uint32_t*)&forUnmarshaling->apiVersion, sizeof(uint32_t));
 }
 
+void marshal_VkFormatProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkFormatProperties* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkFormatFeatureFlags*)&forMarshaling->linearTilingFeatures, sizeof(VkFormatFeatureFlags));
+    vkStream->write((VkFormatFeatureFlags*)&forMarshaling->optimalTilingFeatures, sizeof(VkFormatFeatureFlags));
+    vkStream->write((VkFormatFeatureFlags*)&forMarshaling->bufferFeatures, sizeof(VkFormatFeatureFlags));
+}
+
+void unmarshal_VkFormatProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkFormatProperties* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkFormatFeatureFlags*)&forUnmarshaling->linearTilingFeatures, sizeof(VkFormatFeatureFlags));
+    vkStream->read((VkFormatFeatureFlags*)&forUnmarshaling->optimalTilingFeatures, sizeof(VkFormatFeatureFlags));
+    vkStream->read((VkFormatFeatureFlags*)&forUnmarshaling->bufferFeatures, sizeof(VkFormatFeatureFlags));
+}
+
+void marshal_VkImageFormatProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageFormatProperties* forMarshaling)
+{
+    (void)rootType;
+    marshal_VkExtent3D(vkStream, rootType, (VkExtent3D*)(&forMarshaling->maxExtent));
+    vkStream->write((uint32_t*)&forMarshaling->maxMipLevels, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxArrayLayers, sizeof(uint32_t));
+    vkStream->write((VkSampleCountFlags*)&forMarshaling->sampleCounts, sizeof(VkSampleCountFlags));
+    vkStream->write((VkDeviceSize*)&forMarshaling->maxResourceSize, sizeof(VkDeviceSize));
+}
+
+void unmarshal_VkImageFormatProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkImageFormatProperties* forUnmarshaling)
+{
+    (void)rootType;
+    unmarshal_VkExtent3D(vkStream, rootType, (VkExtent3D*)(&forUnmarshaling->maxExtent));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxMipLevels, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxArrayLayers, sizeof(uint32_t));
+    vkStream->read((VkSampleCountFlags*)&forUnmarshaling->sampleCounts, sizeof(VkSampleCountFlags));
+    vkStream->read((VkDeviceSize*)&forUnmarshaling->maxResourceSize, sizeof(VkDeviceSize));
+}
+
 void marshal_VkInstanceCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkInstanceCreateInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkInstanceCreateFlags*)&forMarshaling->flags, sizeof(VkInstanceCreateFlags));
     // WARNING PTR CHECK
-    uint64_t cgen_var_4 = (uint64_t)(uintptr_t)forMarshaling->pApplicationInfo;
-    vkStream->putBe64(cgen_var_4);
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pApplicationInfo;
+    vkStream->putBe64(cgen_var_0);
     if (forMarshaling->pApplicationInfo)
     {
-        marshal_VkApplicationInfo(vkStream, (const VkApplicationInfo*)(forMarshaling->pApplicationInfo));
+        marshal_VkApplicationInfo(vkStream, rootType, (const VkApplicationInfo*)(forMarshaling->pApplicationInfo));
     }
     vkStream->write((uint32_t*)&forMarshaling->enabledLayerCount, sizeof(uint32_t));
     saveStringArray(vkStream, forMarshaling->ppEnabledLayerNames, forMarshaling->enabledLayerCount);
@@ -167,17 +649,16 @@
 
 void unmarshal_VkInstanceCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkInstanceCreateInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkInstanceCreateFlags*)&forUnmarshaling->flags, sizeof(VkInstanceCreateFlags));
     // WARNING PTR CHECK
     const VkApplicationInfo* check_pApplicationInfo;
@@ -188,7 +669,7 @@
         {
             fprintf(stderr, "fatal: forUnmarshaling->pApplicationInfo inconsistent between guest and host\n");
         }
-        unmarshal_VkApplicationInfo(vkStream, (VkApplicationInfo*)(forUnmarshaling->pApplicationInfo));
+        unmarshal_VkApplicationInfo(vkStream, rootType, (VkApplicationInfo*)(forUnmarshaling->pApplicationInfo));
     }
     vkStream->read((uint32_t*)&forUnmarshaling->enabledLayerCount, sizeof(uint32_t));
     vkStream->loadStringArrayInPlace((char***)&forUnmarshaling->ppEnabledLayerNames);
@@ -196,55 +677,52 @@
     vkStream->loadStringArrayInPlace((char***)&forUnmarshaling->ppEnabledExtensionNames);
 }
 
-void marshal_VkAllocationCallbacks(
+void marshal_VkMemoryHeap(
     VulkanStreamGuest* vkStream,
-    const VkAllocationCallbacks* forMarshaling)
+    VkStructureType rootType,
+    const VkMemoryHeap* forMarshaling)
 {
-    // WARNING PTR CHECK
-    uint64_t cgen_var_6 = (uint64_t)(uintptr_t)forMarshaling->pUserData;
-    vkStream->putBe64(cgen_var_6);
-    if (forMarshaling->pUserData)
-    {
-        vkStream->write((void*)forMarshaling->pUserData, sizeof(uint8_t));
-    }
-    uint64_t cgen_var_7 = (uint64_t)forMarshaling->pfnAllocation;
-    vkStream->putBe64(cgen_var_7);
-    uint64_t cgen_var_8 = (uint64_t)forMarshaling->pfnReallocation;
-    vkStream->putBe64(cgen_var_8);
-    uint64_t cgen_var_9 = (uint64_t)forMarshaling->pfnFree;
-    vkStream->putBe64(cgen_var_9);
-    uint64_t cgen_var_10 = (uint64_t)forMarshaling->pfnInternalAllocation;
-    vkStream->putBe64(cgen_var_10);
-    uint64_t cgen_var_11 = (uint64_t)forMarshaling->pfnInternalFree;
-    vkStream->putBe64(cgen_var_11);
+    (void)rootType;
+    vkStream->write((VkDeviceSize*)&forMarshaling->size, sizeof(VkDeviceSize));
+    vkStream->write((VkMemoryHeapFlags*)&forMarshaling->flags, sizeof(VkMemoryHeapFlags));
 }
 
-void unmarshal_VkAllocationCallbacks(
+void unmarshal_VkMemoryHeap(
     VulkanStreamGuest* vkStream,
-    VkAllocationCallbacks* forUnmarshaling)
+    VkStructureType rootType,
+    VkMemoryHeap* forUnmarshaling)
 {
-    // WARNING PTR CHECK
-    void* check_pUserData;
-    check_pUserData = (void*)(uintptr_t)vkStream->getBe64();
-    if (forUnmarshaling->pUserData)
-    {
-        if (!(check_pUserData))
-        {
-            fprintf(stderr, "fatal: forUnmarshaling->pUserData inconsistent between guest and host\n");
-        }
-        vkStream->read((void*)forUnmarshaling->pUserData, sizeof(uint8_t));
-    }
-    forUnmarshaling->pfnAllocation = (PFN_vkAllocationFunction)vkStream->getBe64();
-    forUnmarshaling->pfnReallocation = (PFN_vkReallocationFunction)vkStream->getBe64();
-    forUnmarshaling->pfnFree = (PFN_vkFreeFunction)vkStream->getBe64();
-    forUnmarshaling->pfnInternalAllocation = (PFN_vkInternalAllocationNotification)vkStream->getBe64();
-    forUnmarshaling->pfnInternalFree = (PFN_vkInternalFreeNotification)vkStream->getBe64();
+    (void)rootType;
+    vkStream->read((VkDeviceSize*)&forUnmarshaling->size, sizeof(VkDeviceSize));
+    vkStream->read((VkMemoryHeapFlags*)&forUnmarshaling->flags, sizeof(VkMemoryHeapFlags));
+}
+
+void marshal_VkMemoryType(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMemoryType* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkMemoryPropertyFlags*)&forMarshaling->propertyFlags, sizeof(VkMemoryPropertyFlags));
+    vkStream->write((uint32_t*)&forMarshaling->heapIndex, sizeof(uint32_t));
+}
+
+void unmarshal_VkMemoryType(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkMemoryType* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkMemoryPropertyFlags*)&forUnmarshaling->propertyFlags, sizeof(VkMemoryPropertyFlags));
+    vkStream->read((uint32_t*)&forUnmarshaling->heapIndex, sizeof(uint32_t));
 }
 
 void marshal_VkPhysicalDeviceFeatures(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceFeatures* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkBool32*)&forMarshaling->robustBufferAccess, sizeof(VkBool32));
     vkStream->write((VkBool32*)&forMarshaling->fullDrawIndexUint32, sizeof(VkBool32));
     vkStream->write((VkBool32*)&forMarshaling->imageCubeArray, sizeof(VkBool32));
@@ -304,8 +782,10 @@
 
 void unmarshal_VkPhysicalDeviceFeatures(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceFeatures* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkBool32*)&forUnmarshaling->robustBufferAccess, sizeof(VkBool32));
     vkStream->read((VkBool32*)&forUnmarshaling->fullDrawIndexUint32, sizeof(VkBool32));
     vkStream->read((VkBool32*)&forUnmarshaling->imageCubeArray, sizeof(VkBool32));
@@ -363,68 +843,12 @@
     vkStream->read((VkBool32*)&forUnmarshaling->inheritedQueries, sizeof(VkBool32));
 }
 
-void marshal_VkFormatProperties(
-    VulkanStreamGuest* vkStream,
-    const VkFormatProperties* forMarshaling)
-{
-    vkStream->write((VkFormatFeatureFlags*)&forMarshaling->linearTilingFeatures, sizeof(VkFormatFeatureFlags));
-    vkStream->write((VkFormatFeatureFlags*)&forMarshaling->optimalTilingFeatures, sizeof(VkFormatFeatureFlags));
-    vkStream->write((VkFormatFeatureFlags*)&forMarshaling->bufferFeatures, sizeof(VkFormatFeatureFlags));
-}
-
-void unmarshal_VkFormatProperties(
-    VulkanStreamGuest* vkStream,
-    VkFormatProperties* forUnmarshaling)
-{
-    vkStream->read((VkFormatFeatureFlags*)&forUnmarshaling->linearTilingFeatures, sizeof(VkFormatFeatureFlags));
-    vkStream->read((VkFormatFeatureFlags*)&forUnmarshaling->optimalTilingFeatures, sizeof(VkFormatFeatureFlags));
-    vkStream->read((VkFormatFeatureFlags*)&forUnmarshaling->bufferFeatures, sizeof(VkFormatFeatureFlags));
-}
-
-void marshal_VkExtent3D(
-    VulkanStreamGuest* vkStream,
-    const VkExtent3D* forMarshaling)
-{
-    vkStream->write((uint32_t*)&forMarshaling->width, sizeof(uint32_t));
-    vkStream->write((uint32_t*)&forMarshaling->height, sizeof(uint32_t));
-    vkStream->write((uint32_t*)&forMarshaling->depth, sizeof(uint32_t));
-}
-
-void unmarshal_VkExtent3D(
-    VulkanStreamGuest* vkStream,
-    VkExtent3D* forUnmarshaling)
-{
-    vkStream->read((uint32_t*)&forUnmarshaling->width, sizeof(uint32_t));
-    vkStream->read((uint32_t*)&forUnmarshaling->height, sizeof(uint32_t));
-    vkStream->read((uint32_t*)&forUnmarshaling->depth, sizeof(uint32_t));
-}
-
-void marshal_VkImageFormatProperties(
-    VulkanStreamGuest* vkStream,
-    const VkImageFormatProperties* forMarshaling)
-{
-    marshal_VkExtent3D(vkStream, (VkExtent3D*)(&forMarshaling->maxExtent));
-    vkStream->write((uint32_t*)&forMarshaling->maxMipLevels, sizeof(uint32_t));
-    vkStream->write((uint32_t*)&forMarshaling->maxArrayLayers, sizeof(uint32_t));
-    vkStream->write((VkSampleCountFlags*)&forMarshaling->sampleCounts, sizeof(VkSampleCountFlags));
-    vkStream->write((VkDeviceSize*)&forMarshaling->maxResourceSize, sizeof(VkDeviceSize));
-}
-
-void unmarshal_VkImageFormatProperties(
-    VulkanStreamGuest* vkStream,
-    VkImageFormatProperties* forUnmarshaling)
-{
-    unmarshal_VkExtent3D(vkStream, (VkExtent3D*)(&forUnmarshaling->maxExtent));
-    vkStream->read((uint32_t*)&forUnmarshaling->maxMipLevels, sizeof(uint32_t));
-    vkStream->read((uint32_t*)&forUnmarshaling->maxArrayLayers, sizeof(uint32_t));
-    vkStream->read((VkSampleCountFlags*)&forUnmarshaling->sampleCounts, sizeof(VkSampleCountFlags));
-    vkStream->read((VkDeviceSize*)&forUnmarshaling->maxResourceSize, sizeof(VkDeviceSize));
-}
-
 void marshal_VkPhysicalDeviceLimits(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceLimits* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((uint32_t*)&forMarshaling->maxImageDimension1D, sizeof(uint32_t));
     vkStream->write((uint32_t*)&forMarshaling->maxImageDimension2D, sizeof(uint32_t));
     vkStream->write((uint32_t*)&forMarshaling->maxImageDimension3D, sizeof(uint32_t));
@@ -491,8 +915,8 @@
     vkStream->write((uint32_t*)forMarshaling->maxViewportDimensions, 2 * sizeof(uint32_t));
     vkStream->write((float*)forMarshaling->viewportBoundsRange, 2 * sizeof(float));
     vkStream->write((uint32_t*)&forMarshaling->viewportSubPixelBits, sizeof(uint32_t));
-    uint64_t cgen_var_18 = (uint64_t)forMarshaling->minMemoryMapAlignment;
-    vkStream->putBe64(cgen_var_18);
+    uint64_t cgen_var_0 = (uint64_t)forMarshaling->minMemoryMapAlignment;
+    vkStream->putBe64(cgen_var_0);
     vkStream->write((VkDeviceSize*)&forMarshaling->minTexelBufferOffsetAlignment, sizeof(VkDeviceSize));
     vkStream->write((VkDeviceSize*)&forMarshaling->minUniformBufferOffsetAlignment, sizeof(VkDeviceSize));
     vkStream->write((VkDeviceSize*)&forMarshaling->minStorageBufferOffsetAlignment, sizeof(VkDeviceSize));
@@ -536,8 +960,10 @@
 
 void unmarshal_VkPhysicalDeviceLimits(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceLimits* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((uint32_t*)&forUnmarshaling->maxImageDimension1D, sizeof(uint32_t));
     vkStream->read((uint32_t*)&forUnmarshaling->maxImageDimension2D, sizeof(uint32_t));
     vkStream->read((uint32_t*)&forUnmarshaling->maxImageDimension3D, sizeof(uint32_t));
@@ -646,10 +1072,48 @@
     vkStream->read((VkDeviceSize*)&forUnmarshaling->nonCoherentAtomSize, sizeof(VkDeviceSize));
 }
 
+void marshal_VkPhysicalDeviceMemoryProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMemoryProperties* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((uint32_t*)&forMarshaling->memoryTypeCount, sizeof(uint32_t));
+    for (uint32_t i = 0; i < (uint32_t)VK_MAX_MEMORY_TYPES; ++i)
+    {
+        marshal_VkMemoryType(vkStream, rootType, (VkMemoryType*)(forMarshaling->memoryTypes + i));
+    }
+    vkStream->write((uint32_t*)&forMarshaling->memoryHeapCount, sizeof(uint32_t));
+    for (uint32_t i = 0; i < (uint32_t)VK_MAX_MEMORY_HEAPS; ++i)
+    {
+        marshal_VkMemoryHeap(vkStream, rootType, (VkMemoryHeap*)(forMarshaling->memoryHeaps + i));
+    }
+}
+
+void unmarshal_VkPhysicalDeviceMemoryProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceMemoryProperties* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((uint32_t*)&forUnmarshaling->memoryTypeCount, sizeof(uint32_t));
+    for (uint32_t i = 0; i < (uint32_t)VK_MAX_MEMORY_TYPES; ++i)
+    {
+        unmarshal_VkMemoryType(vkStream, rootType, (VkMemoryType*)(forUnmarshaling->memoryTypes + i));
+    }
+    vkStream->read((uint32_t*)&forUnmarshaling->memoryHeapCount, sizeof(uint32_t));
+    for (uint32_t i = 0; i < (uint32_t)VK_MAX_MEMORY_HEAPS; ++i)
+    {
+        unmarshal_VkMemoryHeap(vkStream, rootType, (VkMemoryHeap*)(forUnmarshaling->memoryHeaps + i));
+    }
+}
+
 void marshal_VkPhysicalDeviceSparseProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceSparseProperties* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkBool32*)&forMarshaling->residencyStandard2DBlockShape, sizeof(VkBool32));
     vkStream->write((VkBool32*)&forMarshaling->residencyStandard2DMultisampleBlockShape, sizeof(VkBool32));
     vkStream->write((VkBool32*)&forMarshaling->residencyStandard3DBlockShape, sizeof(VkBool32));
@@ -659,8 +1123,10 @@
 
 void unmarshal_VkPhysicalDeviceSparseProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceSparseProperties* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkBool32*)&forUnmarshaling->residencyStandard2DBlockShape, sizeof(VkBool32));
     vkStream->read((VkBool32*)&forUnmarshaling->residencyStandard2DMultisampleBlockShape, sizeof(VkBool32));
     vkStream->read((VkBool32*)&forUnmarshaling->residencyStandard3DBlockShape, sizeof(VkBool32));
@@ -670,8 +1136,10 @@
 
 void marshal_VkPhysicalDeviceProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceProperties* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((uint32_t*)&forMarshaling->apiVersion, sizeof(uint32_t));
     vkStream->write((uint32_t*)&forMarshaling->driverVersion, sizeof(uint32_t));
     vkStream->write((uint32_t*)&forMarshaling->vendorID, sizeof(uint32_t));
@@ -679,14 +1147,16 @@
     vkStream->write((VkPhysicalDeviceType*)&forMarshaling->deviceType, sizeof(VkPhysicalDeviceType));
     vkStream->write((char*)forMarshaling->deviceName, VK_MAX_PHYSICAL_DEVICE_NAME_SIZE * sizeof(char));
     vkStream->write((uint8_t*)forMarshaling->pipelineCacheUUID, VK_UUID_SIZE * sizeof(uint8_t));
-    marshal_VkPhysicalDeviceLimits(vkStream, (VkPhysicalDeviceLimits*)(&forMarshaling->limits));
-    marshal_VkPhysicalDeviceSparseProperties(vkStream, (VkPhysicalDeviceSparseProperties*)(&forMarshaling->sparseProperties));
+    marshal_VkPhysicalDeviceLimits(vkStream, rootType, (VkPhysicalDeviceLimits*)(&forMarshaling->limits));
+    marshal_VkPhysicalDeviceSparseProperties(vkStream, rootType, (VkPhysicalDeviceSparseProperties*)(&forMarshaling->sparseProperties));
 }
 
 void unmarshal_VkPhysicalDeviceProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceProperties* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((uint32_t*)&forUnmarshaling->apiVersion, sizeof(uint32_t));
     vkStream->read((uint32_t*)&forUnmarshaling->driverVersion, sizeof(uint32_t));
     vkStream->read((uint32_t*)&forUnmarshaling->vendorID, sizeof(uint32_t));
@@ -694,106 +1164,46 @@
     vkStream->read((VkPhysicalDeviceType*)&forUnmarshaling->deviceType, sizeof(VkPhysicalDeviceType));
     vkStream->read((char*)forUnmarshaling->deviceName, VK_MAX_PHYSICAL_DEVICE_NAME_SIZE * sizeof(char));
     vkStream->read((uint8_t*)forUnmarshaling->pipelineCacheUUID, VK_UUID_SIZE * sizeof(uint8_t));
-    unmarshal_VkPhysicalDeviceLimits(vkStream, (VkPhysicalDeviceLimits*)(&forUnmarshaling->limits));
-    unmarshal_VkPhysicalDeviceSparseProperties(vkStream, (VkPhysicalDeviceSparseProperties*)(&forUnmarshaling->sparseProperties));
+    unmarshal_VkPhysicalDeviceLimits(vkStream, rootType, (VkPhysicalDeviceLimits*)(&forUnmarshaling->limits));
+    unmarshal_VkPhysicalDeviceSparseProperties(vkStream, rootType, (VkPhysicalDeviceSparseProperties*)(&forUnmarshaling->sparseProperties));
 }
 
 void marshal_VkQueueFamilyProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkQueueFamilyProperties* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkQueueFlags*)&forMarshaling->queueFlags, sizeof(VkQueueFlags));
     vkStream->write((uint32_t*)&forMarshaling->queueCount, sizeof(uint32_t));
     vkStream->write((uint32_t*)&forMarshaling->timestampValidBits, sizeof(uint32_t));
-    marshal_VkExtent3D(vkStream, (VkExtent3D*)(&forMarshaling->minImageTransferGranularity));
+    marshal_VkExtent3D(vkStream, rootType, (VkExtent3D*)(&forMarshaling->minImageTransferGranularity));
 }
 
 void unmarshal_VkQueueFamilyProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkQueueFamilyProperties* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkQueueFlags*)&forUnmarshaling->queueFlags, sizeof(VkQueueFlags));
     vkStream->read((uint32_t*)&forUnmarshaling->queueCount, sizeof(uint32_t));
     vkStream->read((uint32_t*)&forUnmarshaling->timestampValidBits, sizeof(uint32_t));
-    unmarshal_VkExtent3D(vkStream, (VkExtent3D*)(&forUnmarshaling->minImageTransferGranularity));
-}
-
-void marshal_VkMemoryType(
-    VulkanStreamGuest* vkStream,
-    const VkMemoryType* forMarshaling)
-{
-    vkStream->write((VkMemoryPropertyFlags*)&forMarshaling->propertyFlags, sizeof(VkMemoryPropertyFlags));
-    vkStream->write((uint32_t*)&forMarshaling->heapIndex, sizeof(uint32_t));
-}
-
-void unmarshal_VkMemoryType(
-    VulkanStreamGuest* vkStream,
-    VkMemoryType* forUnmarshaling)
-{
-    vkStream->read((VkMemoryPropertyFlags*)&forUnmarshaling->propertyFlags, sizeof(VkMemoryPropertyFlags));
-    vkStream->read((uint32_t*)&forUnmarshaling->heapIndex, sizeof(uint32_t));
-}
-
-void marshal_VkMemoryHeap(
-    VulkanStreamGuest* vkStream,
-    const VkMemoryHeap* forMarshaling)
-{
-    vkStream->write((VkDeviceSize*)&forMarshaling->size, sizeof(VkDeviceSize));
-    vkStream->write((VkMemoryHeapFlags*)&forMarshaling->flags, sizeof(VkMemoryHeapFlags));
-}
-
-void unmarshal_VkMemoryHeap(
-    VulkanStreamGuest* vkStream,
-    VkMemoryHeap* forUnmarshaling)
-{
-    vkStream->read((VkDeviceSize*)&forUnmarshaling->size, sizeof(VkDeviceSize));
-    vkStream->read((VkMemoryHeapFlags*)&forUnmarshaling->flags, sizeof(VkMemoryHeapFlags));
-}
-
-void marshal_VkPhysicalDeviceMemoryProperties(
-    VulkanStreamGuest* vkStream,
-    const VkPhysicalDeviceMemoryProperties* forMarshaling)
-{
-    vkStream->write((uint32_t*)&forMarshaling->memoryTypeCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)VK_MAX_MEMORY_TYPES; ++i)
-    {
-        marshal_VkMemoryType(vkStream, (VkMemoryType*)(forMarshaling->memoryTypes + i));
-    }
-    vkStream->write((uint32_t*)&forMarshaling->memoryHeapCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)VK_MAX_MEMORY_HEAPS; ++i)
-    {
-        marshal_VkMemoryHeap(vkStream, (VkMemoryHeap*)(forMarshaling->memoryHeaps + i));
-    }
-}
-
-void unmarshal_VkPhysicalDeviceMemoryProperties(
-    VulkanStreamGuest* vkStream,
-    VkPhysicalDeviceMemoryProperties* forUnmarshaling)
-{
-    vkStream->read((uint32_t*)&forUnmarshaling->memoryTypeCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)VK_MAX_MEMORY_TYPES; ++i)
-    {
-        unmarshal_VkMemoryType(vkStream, (VkMemoryType*)(forUnmarshaling->memoryTypes + i));
-    }
-    vkStream->read((uint32_t*)&forUnmarshaling->memoryHeapCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)VK_MAX_MEMORY_HEAPS; ++i)
-    {
-        unmarshal_VkMemoryHeap(vkStream, (VkMemoryHeap*)(forUnmarshaling->memoryHeaps + i));
-    }
+    unmarshal_VkExtent3D(vkStream, rootType, (VkExtent3D*)(&forUnmarshaling->minImageTransferGranularity));
 }
 
 void marshal_VkDeviceQueueCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDeviceQueueCreateInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkDeviceQueueCreateFlags*)&forMarshaling->flags, sizeof(VkDeviceQueueCreateFlags));
     vkStream->write((uint32_t*)&forMarshaling->queueFamilyIndex, sizeof(uint32_t));
     vkStream->write((uint32_t*)&forMarshaling->queueCount, sizeof(uint32_t));
@@ -802,17 +1212,16 @@
 
 void unmarshal_VkDeviceQueueCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDeviceQueueCreateInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkDeviceQueueCreateFlags*)&forUnmarshaling->flags, sizeof(VkDeviceQueueCreateFlags));
     vkStream->read((uint32_t*)&forUnmarshaling->queueFamilyIndex, sizeof(uint32_t));
     vkStream->read((uint32_t*)&forUnmarshaling->queueCount, sizeof(uint32_t));
@@ -821,53 +1230,58 @@
 
 void marshal_VkDeviceCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDeviceCreateInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkDeviceCreateFlags*)&forMarshaling->flags, sizeof(VkDeviceCreateFlags));
     vkStream->write((uint32_t*)&forMarshaling->queueCreateInfoCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forMarshaling->queueCreateInfoCount; ++i)
+    if (forMarshaling)
     {
-        marshal_VkDeviceQueueCreateInfo(vkStream, (const VkDeviceQueueCreateInfo*)(forMarshaling->pQueueCreateInfos + i));
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->queueCreateInfoCount; ++i)
+        {
+            marshal_VkDeviceQueueCreateInfo(vkStream, rootType, (const VkDeviceQueueCreateInfo*)(forMarshaling->pQueueCreateInfos + i));
+        }
     }
     vkStream->write((uint32_t*)&forMarshaling->enabledLayerCount, sizeof(uint32_t));
     saveStringArray(vkStream, forMarshaling->ppEnabledLayerNames, forMarshaling->enabledLayerCount);
     vkStream->write((uint32_t*)&forMarshaling->enabledExtensionCount, sizeof(uint32_t));
     saveStringArray(vkStream, forMarshaling->ppEnabledExtensionNames, forMarshaling->enabledExtensionCount);
     // WARNING PTR CHECK
-    uint64_t cgen_var_20 = (uint64_t)(uintptr_t)forMarshaling->pEnabledFeatures;
-    vkStream->putBe64(cgen_var_20);
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pEnabledFeatures;
+    vkStream->putBe64(cgen_var_0);
     if (forMarshaling->pEnabledFeatures)
     {
-        marshal_VkPhysicalDeviceFeatures(vkStream, (const VkPhysicalDeviceFeatures*)(forMarshaling->pEnabledFeatures));
+        marshal_VkPhysicalDeviceFeatures(vkStream, rootType, (const VkPhysicalDeviceFeatures*)(forMarshaling->pEnabledFeatures));
     }
 }
 
 void unmarshal_VkDeviceCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDeviceCreateInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkDeviceCreateFlags*)&forUnmarshaling->flags, sizeof(VkDeviceCreateFlags));
     vkStream->read((uint32_t*)&forUnmarshaling->queueCreateInfoCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->queueCreateInfoCount; ++i)
+    if (forUnmarshaling)
     {
-        unmarshal_VkDeviceQueueCreateInfo(vkStream, (VkDeviceQueueCreateInfo*)(forUnmarshaling->pQueueCreateInfos + i));
+        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->queueCreateInfoCount; ++i)
+        {
+            unmarshal_VkDeviceQueueCreateInfo(vkStream, rootType, (VkDeviceQueueCreateInfo*)(forUnmarshaling->pQueueCreateInfos + i));
+        }
     }
     vkStream->read((uint32_t*)&forUnmarshaling->enabledLayerCount, sizeof(uint32_t));
     vkStream->loadStringArrayInPlace((char***)&forUnmarshaling->ppEnabledLayerNames);
@@ -882,30 +1296,36 @@
         {
             fprintf(stderr, "fatal: forUnmarshaling->pEnabledFeatures inconsistent between guest and host\n");
         }
-        unmarshal_VkPhysicalDeviceFeatures(vkStream, (VkPhysicalDeviceFeatures*)(forUnmarshaling->pEnabledFeatures));
+        unmarshal_VkPhysicalDeviceFeatures(vkStream, rootType, (VkPhysicalDeviceFeatures*)(forUnmarshaling->pEnabledFeatures));
     }
 }
 
 void marshal_VkExtensionProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkExtensionProperties* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((char*)forMarshaling->extensionName, VK_MAX_EXTENSION_NAME_SIZE * sizeof(char));
     vkStream->write((uint32_t*)&forMarshaling->specVersion, sizeof(uint32_t));
 }
 
 void unmarshal_VkExtensionProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkExtensionProperties* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((char*)forUnmarshaling->extensionName, VK_MAX_EXTENSION_NAME_SIZE * sizeof(char));
     vkStream->read((uint32_t*)&forUnmarshaling->specVersion, sizeof(uint32_t));
 }
 
 void marshal_VkLayerProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkLayerProperties* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((char*)forMarshaling->layerName, VK_MAX_EXTENSION_NAME_SIZE * sizeof(char));
     vkStream->write((uint32_t*)&forMarshaling->specVersion, sizeof(uint32_t));
     vkStream->write((uint32_t*)&forMarshaling->implementationVersion, sizeof(uint32_t));
@@ -914,8 +1334,10 @@
 
 void unmarshal_VkLayerProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkLayerProperties* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((char*)forUnmarshaling->layerName, VK_MAX_EXTENSION_NAME_SIZE * sizeof(char));
     vkStream->read((uint32_t*)&forUnmarshaling->specVersion, sizeof(uint32_t));
     vkStream->read((uint32_t*)&forUnmarshaling->implementationVersion, sizeof(uint32_t));
@@ -924,159 +1346,158 @@
 
 void marshal_VkSubmitInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSubmitInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((uint32_t*)&forMarshaling->waitSemaphoreCount, sizeof(uint32_t));
     if (forMarshaling->waitSemaphoreCount)
     {
-        uint64_t* cgen_var_22;
-        vkStream->alloc((void**)&cgen_var_22, forMarshaling->waitSemaphoreCount * 8);
-        vkStream->handleMapping()->mapHandles_VkSemaphore_u64(forMarshaling->pWaitSemaphores, cgen_var_22, forMarshaling->waitSemaphoreCount);
-        vkStream->write((uint64_t*)cgen_var_22, forMarshaling->waitSemaphoreCount * 8);
+        uint64_t* cgen_var_0;
+        vkStream->alloc((void**)&cgen_var_0, forMarshaling->waitSemaphoreCount * 8);
+        vkStream->handleMapping()->mapHandles_VkSemaphore_u64(forMarshaling->pWaitSemaphores, cgen_var_0, forMarshaling->waitSemaphoreCount);
+        vkStream->write((uint64_t*)cgen_var_0, forMarshaling->waitSemaphoreCount * 8);
     }
     vkStream->write((const VkPipelineStageFlags*)forMarshaling->pWaitDstStageMask, forMarshaling->waitSemaphoreCount * sizeof(const VkPipelineStageFlags));
     vkStream->write((uint32_t*)&forMarshaling->commandBufferCount, sizeof(uint32_t));
     if (forMarshaling->commandBufferCount)
     {
-        uint64_t* cgen_var_23;
-        vkStream->alloc((void**)&cgen_var_23, forMarshaling->commandBufferCount * 8);
-        vkStream->handleMapping()->mapHandles_VkCommandBuffer_u64(forMarshaling->pCommandBuffers, cgen_var_23, forMarshaling->commandBufferCount);
-        vkStream->write((uint64_t*)cgen_var_23, forMarshaling->commandBufferCount * 8);
+        uint64_t* cgen_var_1;
+        vkStream->alloc((void**)&cgen_var_1, forMarshaling->commandBufferCount * 8);
+        vkStream->handleMapping()->mapHandles_VkCommandBuffer_u64(forMarshaling->pCommandBuffers, cgen_var_1, forMarshaling->commandBufferCount);
+        vkStream->write((uint64_t*)cgen_var_1, forMarshaling->commandBufferCount * 8);
     }
     vkStream->write((uint32_t*)&forMarshaling->signalSemaphoreCount, sizeof(uint32_t));
     if (forMarshaling->signalSemaphoreCount)
     {
-        uint64_t* cgen_var_24;
-        vkStream->alloc((void**)&cgen_var_24, forMarshaling->signalSemaphoreCount * 8);
-        vkStream->handleMapping()->mapHandles_VkSemaphore_u64(forMarshaling->pSignalSemaphores, cgen_var_24, forMarshaling->signalSemaphoreCount);
-        vkStream->write((uint64_t*)cgen_var_24, forMarshaling->signalSemaphoreCount * 8);
+        uint64_t* cgen_var_2;
+        vkStream->alloc((void**)&cgen_var_2, forMarshaling->signalSemaphoreCount * 8);
+        vkStream->handleMapping()->mapHandles_VkSemaphore_u64(forMarshaling->pSignalSemaphores, cgen_var_2, forMarshaling->signalSemaphoreCount);
+        vkStream->write((uint64_t*)cgen_var_2, forMarshaling->signalSemaphoreCount * 8);
     }
 }
 
 void unmarshal_VkSubmitInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSubmitInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((uint32_t*)&forUnmarshaling->waitSemaphoreCount, sizeof(uint32_t));
     if (forUnmarshaling->waitSemaphoreCount)
     {
-        uint64_t* cgen_var_25;
-        vkStream->alloc((void**)&cgen_var_25, forUnmarshaling->waitSemaphoreCount * 8);
-        vkStream->read((uint64_t*)cgen_var_25, forUnmarshaling->waitSemaphoreCount * 8);
-        vkStream->handleMapping()->mapHandles_u64_VkSemaphore(cgen_var_25, (VkSemaphore*)forUnmarshaling->pWaitSemaphores, forUnmarshaling->waitSemaphoreCount);
+        uint64_t* cgen_var_0;
+        vkStream->alloc((void**)&cgen_var_0, forUnmarshaling->waitSemaphoreCount * 8);
+        vkStream->read((uint64_t*)cgen_var_0, forUnmarshaling->waitSemaphoreCount * 8);
+        vkStream->handleMapping()->mapHandles_u64_VkSemaphore(cgen_var_0, (VkSemaphore*)forUnmarshaling->pWaitSemaphores, forUnmarshaling->waitSemaphoreCount);
     }
     vkStream->read((VkPipelineStageFlags*)forUnmarshaling->pWaitDstStageMask, forUnmarshaling->waitSemaphoreCount * sizeof(const VkPipelineStageFlags));
     vkStream->read((uint32_t*)&forUnmarshaling->commandBufferCount, sizeof(uint32_t));
     if (forUnmarshaling->commandBufferCount)
     {
-        uint64_t* cgen_var_26;
-        vkStream->alloc((void**)&cgen_var_26, forUnmarshaling->commandBufferCount * 8);
-        vkStream->read((uint64_t*)cgen_var_26, forUnmarshaling->commandBufferCount * 8);
-        vkStream->handleMapping()->mapHandles_u64_VkCommandBuffer(cgen_var_26, (VkCommandBuffer*)forUnmarshaling->pCommandBuffers, forUnmarshaling->commandBufferCount);
+        uint64_t* cgen_var_1;
+        vkStream->alloc((void**)&cgen_var_1, forUnmarshaling->commandBufferCount * 8);
+        vkStream->read((uint64_t*)cgen_var_1, forUnmarshaling->commandBufferCount * 8);
+        vkStream->handleMapping()->mapHandles_u64_VkCommandBuffer(cgen_var_1, (VkCommandBuffer*)forUnmarshaling->pCommandBuffers, forUnmarshaling->commandBufferCount);
     }
     vkStream->read((uint32_t*)&forUnmarshaling->signalSemaphoreCount, sizeof(uint32_t));
     if (forUnmarshaling->signalSemaphoreCount)
     {
-        uint64_t* cgen_var_27;
-        vkStream->alloc((void**)&cgen_var_27, forUnmarshaling->signalSemaphoreCount * 8);
-        vkStream->read((uint64_t*)cgen_var_27, forUnmarshaling->signalSemaphoreCount * 8);
-        vkStream->handleMapping()->mapHandles_u64_VkSemaphore(cgen_var_27, (VkSemaphore*)forUnmarshaling->pSignalSemaphores, forUnmarshaling->signalSemaphoreCount);
+        uint64_t* cgen_var_2;
+        vkStream->alloc((void**)&cgen_var_2, forUnmarshaling->signalSemaphoreCount * 8);
+        vkStream->read((uint64_t*)cgen_var_2, forUnmarshaling->signalSemaphoreCount * 8);
+        vkStream->handleMapping()->mapHandles_u64_VkSemaphore(cgen_var_2, (VkSemaphore*)forUnmarshaling->pSignalSemaphores, forUnmarshaling->signalSemaphoreCount);
     }
 }
 
-void marshal_VkMemoryAllocateInfo(
-    VulkanStreamGuest* vkStream,
-    const VkMemoryAllocateInfo* forMarshaling)
-{
-    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
-    {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
-    }
-    vkStream->write((VkDeviceSize*)&forMarshaling->allocationSize, sizeof(VkDeviceSize));
-    vkStream->write((uint32_t*)&forMarshaling->memoryTypeIndex, sizeof(uint32_t));
-}
-
-void unmarshal_VkMemoryAllocateInfo(
-    VulkanStreamGuest* vkStream,
-    VkMemoryAllocateInfo* forUnmarshaling)
-{
-    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
-    {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
-    }
-    vkStream->read((VkDeviceSize*)&forUnmarshaling->allocationSize, sizeof(VkDeviceSize));
-    vkStream->read((uint32_t*)&forUnmarshaling->memoryTypeIndex, sizeof(uint32_t));
-}
-
 void marshal_VkMappedMemoryRange(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkMappedMemoryRange* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    uint64_t cgen_var_28;
-    vkStream->handleMapping()->mapHandles_VkDeviceMemory_u64(&forMarshaling->memory, &cgen_var_28, 1);
-    vkStream->write((uint64_t*)&cgen_var_28, 1 * 8);
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkDeviceMemory_u64(&forMarshaling->memory, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
     vkStream->write((VkDeviceSize*)&forMarshaling->offset, sizeof(VkDeviceSize));
     vkStream->write((VkDeviceSize*)&forMarshaling->size, sizeof(VkDeviceSize));
 }
 
 void unmarshal_VkMappedMemoryRange(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkMappedMemoryRange* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    uint64_t cgen_var_29;
-    vkStream->read((uint64_t*)&cgen_var_29, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkDeviceMemory(&cgen_var_29, (VkDeviceMemory*)&forUnmarshaling->memory, 1);
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkDeviceMemory(&cgen_var_0, (VkDeviceMemory*)&forUnmarshaling->memory, 1);
     vkStream->read((VkDeviceSize*)&forUnmarshaling->offset, sizeof(VkDeviceSize));
     vkStream->read((VkDeviceSize*)&forUnmarshaling->size, sizeof(VkDeviceSize));
 }
 
+void marshal_VkMemoryAllocateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMemoryAllocateInfo* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkDeviceSize*)&forMarshaling->allocationSize, sizeof(VkDeviceSize));
+    vkStream->write((uint32_t*)&forMarshaling->memoryTypeIndex, sizeof(uint32_t));
+}
+
+void unmarshal_VkMemoryAllocateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkMemoryAllocateInfo* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkDeviceSize*)&forUnmarshaling->allocationSize, sizeof(VkDeviceSize));
+    vkStream->read((uint32_t*)&forUnmarshaling->memoryTypeIndex, sizeof(uint32_t));
+}
+
 void marshal_VkMemoryRequirements(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkMemoryRequirements* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkDeviceSize*)&forMarshaling->size, sizeof(VkDeviceSize));
     vkStream->write((VkDeviceSize*)&forMarshaling->alignment, sizeof(VkDeviceSize));
     vkStream->write((uint32_t*)&forMarshaling->memoryTypeBits, sizeof(uint32_t));
@@ -1084,36 +1505,350 @@
 
 void unmarshal_VkMemoryRequirements(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkMemoryRequirements* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkDeviceSize*)&forUnmarshaling->size, sizeof(VkDeviceSize));
     vkStream->read((VkDeviceSize*)&forUnmarshaling->alignment, sizeof(VkDeviceSize));
     vkStream->read((uint32_t*)&forUnmarshaling->memoryTypeBits, sizeof(uint32_t));
 }
 
+void marshal_VkSparseMemoryBind(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSparseMemoryBind* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkDeviceSize*)&forMarshaling->resourceOffset, sizeof(VkDeviceSize));
+    vkStream->write((VkDeviceSize*)&forMarshaling->size, sizeof(VkDeviceSize));
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkDeviceMemory_u64(&forMarshaling->memory, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->write((VkDeviceSize*)&forMarshaling->memoryOffset, sizeof(VkDeviceSize));
+    vkStream->write((VkSparseMemoryBindFlags*)&forMarshaling->flags, sizeof(VkSparseMemoryBindFlags));
+}
+
+void unmarshal_VkSparseMemoryBind(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkSparseMemoryBind* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkDeviceSize*)&forUnmarshaling->resourceOffset, sizeof(VkDeviceSize));
+    vkStream->read((VkDeviceSize*)&forUnmarshaling->size, sizeof(VkDeviceSize));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkDeviceMemory(&cgen_var_0, (VkDeviceMemory*)&forUnmarshaling->memory, 1);
+    vkStream->read((VkDeviceSize*)&forUnmarshaling->memoryOffset, sizeof(VkDeviceSize));
+    vkStream->read((VkSparseMemoryBindFlags*)&forUnmarshaling->flags, sizeof(VkSparseMemoryBindFlags));
+}
+
+void marshal_VkSparseBufferMemoryBindInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSparseBufferMemoryBindInfo* forMarshaling)
+{
+    (void)rootType;
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkBuffer_u64(&forMarshaling->buffer, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->write((uint32_t*)&forMarshaling->bindCount, sizeof(uint32_t));
+    if (forMarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->bindCount; ++i)
+        {
+            marshal_VkSparseMemoryBind(vkStream, rootType, (const VkSparseMemoryBind*)(forMarshaling->pBinds + i));
+        }
+    }
+}
+
+void unmarshal_VkSparseBufferMemoryBindInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkSparseBufferMemoryBindInfo* forUnmarshaling)
+{
+    (void)rootType;
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkBuffer(&cgen_var_0, (VkBuffer*)&forUnmarshaling->buffer, 1);
+    vkStream->read((uint32_t*)&forUnmarshaling->bindCount, sizeof(uint32_t));
+    if (forUnmarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->bindCount; ++i)
+        {
+            unmarshal_VkSparseMemoryBind(vkStream, rootType, (VkSparseMemoryBind*)(forUnmarshaling->pBinds + i));
+        }
+    }
+}
+
+void marshal_VkSparseImageOpaqueMemoryBindInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSparseImageOpaqueMemoryBindInfo* forMarshaling)
+{
+    (void)rootType;
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkImage_u64(&forMarshaling->image, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->write((uint32_t*)&forMarshaling->bindCount, sizeof(uint32_t));
+    if (forMarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->bindCount; ++i)
+        {
+            marshal_VkSparseMemoryBind(vkStream, rootType, (const VkSparseMemoryBind*)(forMarshaling->pBinds + i));
+        }
+    }
+}
+
+void unmarshal_VkSparseImageOpaqueMemoryBindInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkSparseImageOpaqueMemoryBindInfo* forUnmarshaling)
+{
+    (void)rootType;
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkImage(&cgen_var_0, (VkImage*)&forUnmarshaling->image, 1);
+    vkStream->read((uint32_t*)&forUnmarshaling->bindCount, sizeof(uint32_t));
+    if (forUnmarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->bindCount; ++i)
+        {
+            unmarshal_VkSparseMemoryBind(vkStream, rootType, (VkSparseMemoryBind*)(forUnmarshaling->pBinds + i));
+        }
+    }
+}
+
+void marshal_VkImageSubresource(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageSubresource* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkImageAspectFlags*)&forMarshaling->aspectMask, sizeof(VkImageAspectFlags));
+    vkStream->write((uint32_t*)&forMarshaling->mipLevel, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->arrayLayer, sizeof(uint32_t));
+}
+
+void unmarshal_VkImageSubresource(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkImageSubresource* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkImageAspectFlags*)&forUnmarshaling->aspectMask, sizeof(VkImageAspectFlags));
+    vkStream->read((uint32_t*)&forUnmarshaling->mipLevel, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->arrayLayer, sizeof(uint32_t));
+}
+
+void marshal_VkSparseImageMemoryBind(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSparseImageMemoryBind* forMarshaling)
+{
+    (void)rootType;
+    marshal_VkImageSubresource(vkStream, rootType, (VkImageSubresource*)(&forMarshaling->subresource));
+    marshal_VkOffset3D(vkStream, rootType, (VkOffset3D*)(&forMarshaling->offset));
+    marshal_VkExtent3D(vkStream, rootType, (VkExtent3D*)(&forMarshaling->extent));
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkDeviceMemory_u64(&forMarshaling->memory, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->write((VkDeviceSize*)&forMarshaling->memoryOffset, sizeof(VkDeviceSize));
+    vkStream->write((VkSparseMemoryBindFlags*)&forMarshaling->flags, sizeof(VkSparseMemoryBindFlags));
+}
+
+void unmarshal_VkSparseImageMemoryBind(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkSparseImageMemoryBind* forUnmarshaling)
+{
+    (void)rootType;
+    unmarshal_VkImageSubresource(vkStream, rootType, (VkImageSubresource*)(&forUnmarshaling->subresource));
+    unmarshal_VkOffset3D(vkStream, rootType, (VkOffset3D*)(&forUnmarshaling->offset));
+    unmarshal_VkExtent3D(vkStream, rootType, (VkExtent3D*)(&forUnmarshaling->extent));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkDeviceMemory(&cgen_var_0, (VkDeviceMemory*)&forUnmarshaling->memory, 1);
+    vkStream->read((VkDeviceSize*)&forUnmarshaling->memoryOffset, sizeof(VkDeviceSize));
+    vkStream->read((VkSparseMemoryBindFlags*)&forUnmarshaling->flags, sizeof(VkSparseMemoryBindFlags));
+}
+
+void marshal_VkSparseImageMemoryBindInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSparseImageMemoryBindInfo* forMarshaling)
+{
+    (void)rootType;
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkImage_u64(&forMarshaling->image, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->write((uint32_t*)&forMarshaling->bindCount, sizeof(uint32_t));
+    if (forMarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->bindCount; ++i)
+        {
+            marshal_VkSparseImageMemoryBind(vkStream, rootType, (const VkSparseImageMemoryBind*)(forMarshaling->pBinds + i));
+        }
+    }
+}
+
+void unmarshal_VkSparseImageMemoryBindInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkSparseImageMemoryBindInfo* forUnmarshaling)
+{
+    (void)rootType;
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkImage(&cgen_var_0, (VkImage*)&forUnmarshaling->image, 1);
+    vkStream->read((uint32_t*)&forUnmarshaling->bindCount, sizeof(uint32_t));
+    if (forUnmarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->bindCount; ++i)
+        {
+            unmarshal_VkSparseImageMemoryBind(vkStream, rootType, (VkSparseImageMemoryBind*)(forUnmarshaling->pBinds + i));
+        }
+    }
+}
+
+void marshal_VkBindSparseInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBindSparseInfo* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((uint32_t*)&forMarshaling->waitSemaphoreCount, sizeof(uint32_t));
+    if (forMarshaling->waitSemaphoreCount)
+    {
+        uint64_t* cgen_var_0;
+        vkStream->alloc((void**)&cgen_var_0, forMarshaling->waitSemaphoreCount * 8);
+        vkStream->handleMapping()->mapHandles_VkSemaphore_u64(forMarshaling->pWaitSemaphores, cgen_var_0, forMarshaling->waitSemaphoreCount);
+        vkStream->write((uint64_t*)cgen_var_0, forMarshaling->waitSemaphoreCount * 8);
+    }
+    vkStream->write((uint32_t*)&forMarshaling->bufferBindCount, sizeof(uint32_t));
+    if (forMarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->bufferBindCount; ++i)
+        {
+            marshal_VkSparseBufferMemoryBindInfo(vkStream, rootType, (const VkSparseBufferMemoryBindInfo*)(forMarshaling->pBufferBinds + i));
+        }
+    }
+    vkStream->write((uint32_t*)&forMarshaling->imageOpaqueBindCount, sizeof(uint32_t));
+    if (forMarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->imageOpaqueBindCount; ++i)
+        {
+            marshal_VkSparseImageOpaqueMemoryBindInfo(vkStream, rootType, (const VkSparseImageOpaqueMemoryBindInfo*)(forMarshaling->pImageOpaqueBinds + i));
+        }
+    }
+    vkStream->write((uint32_t*)&forMarshaling->imageBindCount, sizeof(uint32_t));
+    if (forMarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->imageBindCount; ++i)
+        {
+            marshal_VkSparseImageMemoryBindInfo(vkStream, rootType, (const VkSparseImageMemoryBindInfo*)(forMarshaling->pImageBinds + i));
+        }
+    }
+    vkStream->write((uint32_t*)&forMarshaling->signalSemaphoreCount, sizeof(uint32_t));
+    if (forMarshaling->signalSemaphoreCount)
+    {
+        uint64_t* cgen_var_1;
+        vkStream->alloc((void**)&cgen_var_1, forMarshaling->signalSemaphoreCount * 8);
+        vkStream->handleMapping()->mapHandles_VkSemaphore_u64(forMarshaling->pSignalSemaphores, cgen_var_1, forMarshaling->signalSemaphoreCount);
+        vkStream->write((uint64_t*)cgen_var_1, forMarshaling->signalSemaphoreCount * 8);
+    }
+}
+
+void unmarshal_VkBindSparseInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkBindSparseInfo* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((uint32_t*)&forUnmarshaling->waitSemaphoreCount, sizeof(uint32_t));
+    if (forUnmarshaling->waitSemaphoreCount)
+    {
+        uint64_t* cgen_var_0;
+        vkStream->alloc((void**)&cgen_var_0, forUnmarshaling->waitSemaphoreCount * 8);
+        vkStream->read((uint64_t*)cgen_var_0, forUnmarshaling->waitSemaphoreCount * 8);
+        vkStream->handleMapping()->mapHandles_u64_VkSemaphore(cgen_var_0, (VkSemaphore*)forUnmarshaling->pWaitSemaphores, forUnmarshaling->waitSemaphoreCount);
+    }
+    vkStream->read((uint32_t*)&forUnmarshaling->bufferBindCount, sizeof(uint32_t));
+    if (forUnmarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->bufferBindCount; ++i)
+        {
+            unmarshal_VkSparseBufferMemoryBindInfo(vkStream, rootType, (VkSparseBufferMemoryBindInfo*)(forUnmarshaling->pBufferBinds + i));
+        }
+    }
+    vkStream->read((uint32_t*)&forUnmarshaling->imageOpaqueBindCount, sizeof(uint32_t));
+    if (forUnmarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->imageOpaqueBindCount; ++i)
+        {
+            unmarshal_VkSparseImageOpaqueMemoryBindInfo(vkStream, rootType, (VkSparseImageOpaqueMemoryBindInfo*)(forUnmarshaling->pImageOpaqueBinds + i));
+        }
+    }
+    vkStream->read((uint32_t*)&forUnmarshaling->imageBindCount, sizeof(uint32_t));
+    if (forUnmarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->imageBindCount; ++i)
+        {
+            unmarshal_VkSparseImageMemoryBindInfo(vkStream, rootType, (VkSparseImageMemoryBindInfo*)(forUnmarshaling->pImageBinds + i));
+        }
+    }
+    vkStream->read((uint32_t*)&forUnmarshaling->signalSemaphoreCount, sizeof(uint32_t));
+    if (forUnmarshaling->signalSemaphoreCount)
+    {
+        uint64_t* cgen_var_1;
+        vkStream->alloc((void**)&cgen_var_1, forUnmarshaling->signalSemaphoreCount * 8);
+        vkStream->read((uint64_t*)cgen_var_1, forUnmarshaling->signalSemaphoreCount * 8);
+        vkStream->handleMapping()->mapHandles_u64_VkSemaphore(cgen_var_1, (VkSemaphore*)forUnmarshaling->pSignalSemaphores, forUnmarshaling->signalSemaphoreCount);
+    }
+}
+
 void marshal_VkSparseImageFormatProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSparseImageFormatProperties* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkImageAspectFlags*)&forMarshaling->aspectMask, sizeof(VkImageAspectFlags));
-    marshal_VkExtent3D(vkStream, (VkExtent3D*)(&forMarshaling->imageGranularity));
+    marshal_VkExtent3D(vkStream, rootType, (VkExtent3D*)(&forMarshaling->imageGranularity));
     vkStream->write((VkSparseImageFormatFlags*)&forMarshaling->flags, sizeof(VkSparseImageFormatFlags));
 }
 
 void unmarshal_VkSparseImageFormatProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSparseImageFormatProperties* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkImageAspectFlags*)&forUnmarshaling->aspectMask, sizeof(VkImageAspectFlags));
-    unmarshal_VkExtent3D(vkStream, (VkExtent3D*)(&forUnmarshaling->imageGranularity));
+    unmarshal_VkExtent3D(vkStream, rootType, (VkExtent3D*)(&forUnmarshaling->imageGranularity));
     vkStream->read((VkSparseImageFormatFlags*)&forUnmarshaling->flags, sizeof(VkSparseImageFormatFlags));
 }
 
 void marshal_VkSparseImageMemoryRequirements(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSparseImageMemoryRequirements* forMarshaling)
 {
-    marshal_VkSparseImageFormatProperties(vkStream, (VkSparseImageFormatProperties*)(&forMarshaling->formatProperties));
+    (void)rootType;
+    marshal_VkSparseImageFormatProperties(vkStream, rootType, (VkSparseImageFormatProperties*)(&forMarshaling->formatProperties));
     vkStream->write((uint32_t*)&forMarshaling->imageMipTailFirstLod, sizeof(uint32_t));
     vkStream->write((VkDeviceSize*)&forMarshaling->imageMipTailSize, sizeof(VkDeviceSize));
     vkStream->write((VkDeviceSize*)&forMarshaling->imageMipTailOffset, sizeof(VkDeviceSize));
@@ -1122,385 +1857,119 @@
 
 void unmarshal_VkSparseImageMemoryRequirements(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSparseImageMemoryRequirements* forUnmarshaling)
 {
-    unmarshal_VkSparseImageFormatProperties(vkStream, (VkSparseImageFormatProperties*)(&forUnmarshaling->formatProperties));
+    (void)rootType;
+    unmarshal_VkSparseImageFormatProperties(vkStream, rootType, (VkSparseImageFormatProperties*)(&forUnmarshaling->formatProperties));
     vkStream->read((uint32_t*)&forUnmarshaling->imageMipTailFirstLod, sizeof(uint32_t));
     vkStream->read((VkDeviceSize*)&forUnmarshaling->imageMipTailSize, sizeof(VkDeviceSize));
     vkStream->read((VkDeviceSize*)&forUnmarshaling->imageMipTailOffset, sizeof(VkDeviceSize));
     vkStream->read((VkDeviceSize*)&forUnmarshaling->imageMipTailStride, sizeof(VkDeviceSize));
 }
 
-void marshal_VkSparseMemoryBind(
-    VulkanStreamGuest* vkStream,
-    const VkSparseMemoryBind* forMarshaling)
-{
-    vkStream->write((VkDeviceSize*)&forMarshaling->resourceOffset, sizeof(VkDeviceSize));
-    vkStream->write((VkDeviceSize*)&forMarshaling->size, sizeof(VkDeviceSize));
-    uint64_t cgen_var_30;
-    vkStream->handleMapping()->mapHandles_VkDeviceMemory_u64(&forMarshaling->memory, &cgen_var_30, 1);
-    vkStream->write((uint64_t*)&cgen_var_30, 1 * 8);
-    vkStream->write((VkDeviceSize*)&forMarshaling->memoryOffset, sizeof(VkDeviceSize));
-    vkStream->write((VkSparseMemoryBindFlags*)&forMarshaling->flags, sizeof(VkSparseMemoryBindFlags));
-}
-
-void unmarshal_VkSparseMemoryBind(
-    VulkanStreamGuest* vkStream,
-    VkSparseMemoryBind* forUnmarshaling)
-{
-    vkStream->read((VkDeviceSize*)&forUnmarshaling->resourceOffset, sizeof(VkDeviceSize));
-    vkStream->read((VkDeviceSize*)&forUnmarshaling->size, sizeof(VkDeviceSize));
-    uint64_t cgen_var_31;
-    vkStream->read((uint64_t*)&cgen_var_31, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkDeviceMemory(&cgen_var_31, (VkDeviceMemory*)&forUnmarshaling->memory, 1);
-    vkStream->read((VkDeviceSize*)&forUnmarshaling->memoryOffset, sizeof(VkDeviceSize));
-    vkStream->read((VkSparseMemoryBindFlags*)&forUnmarshaling->flags, sizeof(VkSparseMemoryBindFlags));
-}
-
-void marshal_VkSparseBufferMemoryBindInfo(
-    VulkanStreamGuest* vkStream,
-    const VkSparseBufferMemoryBindInfo* forMarshaling)
-{
-    uint64_t cgen_var_32;
-    vkStream->handleMapping()->mapHandles_VkBuffer_u64(&forMarshaling->buffer, &cgen_var_32, 1);
-    vkStream->write((uint64_t*)&cgen_var_32, 1 * 8);
-    vkStream->write((uint32_t*)&forMarshaling->bindCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forMarshaling->bindCount; ++i)
-    {
-        marshal_VkSparseMemoryBind(vkStream, (const VkSparseMemoryBind*)(forMarshaling->pBinds + i));
-    }
-}
-
-void unmarshal_VkSparseBufferMemoryBindInfo(
-    VulkanStreamGuest* vkStream,
-    VkSparseBufferMemoryBindInfo* forUnmarshaling)
-{
-    uint64_t cgen_var_33;
-    vkStream->read((uint64_t*)&cgen_var_33, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkBuffer(&cgen_var_33, (VkBuffer*)&forUnmarshaling->buffer, 1);
-    vkStream->read((uint32_t*)&forUnmarshaling->bindCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->bindCount; ++i)
-    {
-        unmarshal_VkSparseMemoryBind(vkStream, (VkSparseMemoryBind*)(forUnmarshaling->pBinds + i));
-    }
-}
-
-void marshal_VkSparseImageOpaqueMemoryBindInfo(
-    VulkanStreamGuest* vkStream,
-    const VkSparseImageOpaqueMemoryBindInfo* forMarshaling)
-{
-    uint64_t cgen_var_34;
-    vkStream->handleMapping()->mapHandles_VkImage_u64(&forMarshaling->image, &cgen_var_34, 1);
-    vkStream->write((uint64_t*)&cgen_var_34, 1 * 8);
-    vkStream->write((uint32_t*)&forMarshaling->bindCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forMarshaling->bindCount; ++i)
-    {
-        marshal_VkSparseMemoryBind(vkStream, (const VkSparseMemoryBind*)(forMarshaling->pBinds + i));
-    }
-}
-
-void unmarshal_VkSparseImageOpaqueMemoryBindInfo(
-    VulkanStreamGuest* vkStream,
-    VkSparseImageOpaqueMemoryBindInfo* forUnmarshaling)
-{
-    uint64_t cgen_var_35;
-    vkStream->read((uint64_t*)&cgen_var_35, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkImage(&cgen_var_35, (VkImage*)&forUnmarshaling->image, 1);
-    vkStream->read((uint32_t*)&forUnmarshaling->bindCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->bindCount; ++i)
-    {
-        unmarshal_VkSparseMemoryBind(vkStream, (VkSparseMemoryBind*)(forUnmarshaling->pBinds + i));
-    }
-}
-
-void marshal_VkImageSubresource(
-    VulkanStreamGuest* vkStream,
-    const VkImageSubresource* forMarshaling)
-{
-    vkStream->write((VkImageAspectFlags*)&forMarshaling->aspectMask, sizeof(VkImageAspectFlags));
-    vkStream->write((uint32_t*)&forMarshaling->mipLevel, sizeof(uint32_t));
-    vkStream->write((uint32_t*)&forMarshaling->arrayLayer, sizeof(uint32_t));
-}
-
-void unmarshal_VkImageSubresource(
-    VulkanStreamGuest* vkStream,
-    VkImageSubresource* forUnmarshaling)
-{
-    vkStream->read((VkImageAspectFlags*)&forUnmarshaling->aspectMask, sizeof(VkImageAspectFlags));
-    vkStream->read((uint32_t*)&forUnmarshaling->mipLevel, sizeof(uint32_t));
-    vkStream->read((uint32_t*)&forUnmarshaling->arrayLayer, sizeof(uint32_t));
-}
-
-void marshal_VkOffset3D(
-    VulkanStreamGuest* vkStream,
-    const VkOffset3D* forMarshaling)
-{
-    vkStream->write((int32_t*)&forMarshaling->x, sizeof(int32_t));
-    vkStream->write((int32_t*)&forMarshaling->y, sizeof(int32_t));
-    vkStream->write((int32_t*)&forMarshaling->z, sizeof(int32_t));
-}
-
-void unmarshal_VkOffset3D(
-    VulkanStreamGuest* vkStream,
-    VkOffset3D* forUnmarshaling)
-{
-    vkStream->read((int32_t*)&forUnmarshaling->x, sizeof(int32_t));
-    vkStream->read((int32_t*)&forUnmarshaling->y, sizeof(int32_t));
-    vkStream->read((int32_t*)&forUnmarshaling->z, sizeof(int32_t));
-}
-
-void marshal_VkSparseImageMemoryBind(
-    VulkanStreamGuest* vkStream,
-    const VkSparseImageMemoryBind* forMarshaling)
-{
-    marshal_VkImageSubresource(vkStream, (VkImageSubresource*)(&forMarshaling->subresource));
-    marshal_VkOffset3D(vkStream, (VkOffset3D*)(&forMarshaling->offset));
-    marshal_VkExtent3D(vkStream, (VkExtent3D*)(&forMarshaling->extent));
-    uint64_t cgen_var_36;
-    vkStream->handleMapping()->mapHandles_VkDeviceMemory_u64(&forMarshaling->memory, &cgen_var_36, 1);
-    vkStream->write((uint64_t*)&cgen_var_36, 1 * 8);
-    vkStream->write((VkDeviceSize*)&forMarshaling->memoryOffset, sizeof(VkDeviceSize));
-    vkStream->write((VkSparseMemoryBindFlags*)&forMarshaling->flags, sizeof(VkSparseMemoryBindFlags));
-}
-
-void unmarshal_VkSparseImageMemoryBind(
-    VulkanStreamGuest* vkStream,
-    VkSparseImageMemoryBind* forUnmarshaling)
-{
-    unmarshal_VkImageSubresource(vkStream, (VkImageSubresource*)(&forUnmarshaling->subresource));
-    unmarshal_VkOffset3D(vkStream, (VkOffset3D*)(&forUnmarshaling->offset));
-    unmarshal_VkExtent3D(vkStream, (VkExtent3D*)(&forUnmarshaling->extent));
-    uint64_t cgen_var_37;
-    vkStream->read((uint64_t*)&cgen_var_37, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkDeviceMemory(&cgen_var_37, (VkDeviceMemory*)&forUnmarshaling->memory, 1);
-    vkStream->read((VkDeviceSize*)&forUnmarshaling->memoryOffset, sizeof(VkDeviceSize));
-    vkStream->read((VkSparseMemoryBindFlags*)&forUnmarshaling->flags, sizeof(VkSparseMemoryBindFlags));
-}
-
-void marshal_VkSparseImageMemoryBindInfo(
-    VulkanStreamGuest* vkStream,
-    const VkSparseImageMemoryBindInfo* forMarshaling)
-{
-    uint64_t cgen_var_38;
-    vkStream->handleMapping()->mapHandles_VkImage_u64(&forMarshaling->image, &cgen_var_38, 1);
-    vkStream->write((uint64_t*)&cgen_var_38, 1 * 8);
-    vkStream->write((uint32_t*)&forMarshaling->bindCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forMarshaling->bindCount; ++i)
-    {
-        marshal_VkSparseImageMemoryBind(vkStream, (const VkSparseImageMemoryBind*)(forMarshaling->pBinds + i));
-    }
-}
-
-void unmarshal_VkSparseImageMemoryBindInfo(
-    VulkanStreamGuest* vkStream,
-    VkSparseImageMemoryBindInfo* forUnmarshaling)
-{
-    uint64_t cgen_var_39;
-    vkStream->read((uint64_t*)&cgen_var_39, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkImage(&cgen_var_39, (VkImage*)&forUnmarshaling->image, 1);
-    vkStream->read((uint32_t*)&forUnmarshaling->bindCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->bindCount; ++i)
-    {
-        unmarshal_VkSparseImageMemoryBind(vkStream, (VkSparseImageMemoryBind*)(forUnmarshaling->pBinds + i));
-    }
-}
-
-void marshal_VkBindSparseInfo(
-    VulkanStreamGuest* vkStream,
-    const VkBindSparseInfo* forMarshaling)
-{
-    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
-    {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
-    }
-    vkStream->write((uint32_t*)&forMarshaling->waitSemaphoreCount, sizeof(uint32_t));
-    if (forMarshaling->waitSemaphoreCount)
-    {
-        uint64_t* cgen_var_40;
-        vkStream->alloc((void**)&cgen_var_40, forMarshaling->waitSemaphoreCount * 8);
-        vkStream->handleMapping()->mapHandles_VkSemaphore_u64(forMarshaling->pWaitSemaphores, cgen_var_40, forMarshaling->waitSemaphoreCount);
-        vkStream->write((uint64_t*)cgen_var_40, forMarshaling->waitSemaphoreCount * 8);
-    }
-    vkStream->write((uint32_t*)&forMarshaling->bufferBindCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forMarshaling->bufferBindCount; ++i)
-    {
-        marshal_VkSparseBufferMemoryBindInfo(vkStream, (const VkSparseBufferMemoryBindInfo*)(forMarshaling->pBufferBinds + i));
-    }
-    vkStream->write((uint32_t*)&forMarshaling->imageOpaqueBindCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forMarshaling->imageOpaqueBindCount; ++i)
-    {
-        marshal_VkSparseImageOpaqueMemoryBindInfo(vkStream, (const VkSparseImageOpaqueMemoryBindInfo*)(forMarshaling->pImageOpaqueBinds + i));
-    }
-    vkStream->write((uint32_t*)&forMarshaling->imageBindCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forMarshaling->imageBindCount; ++i)
-    {
-        marshal_VkSparseImageMemoryBindInfo(vkStream, (const VkSparseImageMemoryBindInfo*)(forMarshaling->pImageBinds + i));
-    }
-    vkStream->write((uint32_t*)&forMarshaling->signalSemaphoreCount, sizeof(uint32_t));
-    if (forMarshaling->signalSemaphoreCount)
-    {
-        uint64_t* cgen_var_41;
-        vkStream->alloc((void**)&cgen_var_41, forMarshaling->signalSemaphoreCount * 8);
-        vkStream->handleMapping()->mapHandles_VkSemaphore_u64(forMarshaling->pSignalSemaphores, cgen_var_41, forMarshaling->signalSemaphoreCount);
-        vkStream->write((uint64_t*)cgen_var_41, forMarshaling->signalSemaphoreCount * 8);
-    }
-}
-
-void unmarshal_VkBindSparseInfo(
-    VulkanStreamGuest* vkStream,
-    VkBindSparseInfo* forUnmarshaling)
-{
-    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
-    {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
-    }
-    vkStream->read((uint32_t*)&forUnmarshaling->waitSemaphoreCount, sizeof(uint32_t));
-    if (forUnmarshaling->waitSemaphoreCount)
-    {
-        uint64_t* cgen_var_42;
-        vkStream->alloc((void**)&cgen_var_42, forUnmarshaling->waitSemaphoreCount * 8);
-        vkStream->read((uint64_t*)cgen_var_42, forUnmarshaling->waitSemaphoreCount * 8);
-        vkStream->handleMapping()->mapHandles_u64_VkSemaphore(cgen_var_42, (VkSemaphore*)forUnmarshaling->pWaitSemaphores, forUnmarshaling->waitSemaphoreCount);
-    }
-    vkStream->read((uint32_t*)&forUnmarshaling->bufferBindCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->bufferBindCount; ++i)
-    {
-        unmarshal_VkSparseBufferMemoryBindInfo(vkStream, (VkSparseBufferMemoryBindInfo*)(forUnmarshaling->pBufferBinds + i));
-    }
-    vkStream->read((uint32_t*)&forUnmarshaling->imageOpaqueBindCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->imageOpaqueBindCount; ++i)
-    {
-        unmarshal_VkSparseImageOpaqueMemoryBindInfo(vkStream, (VkSparseImageOpaqueMemoryBindInfo*)(forUnmarshaling->pImageOpaqueBinds + i));
-    }
-    vkStream->read((uint32_t*)&forUnmarshaling->imageBindCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->imageBindCount; ++i)
-    {
-        unmarshal_VkSparseImageMemoryBindInfo(vkStream, (VkSparseImageMemoryBindInfo*)(forUnmarshaling->pImageBinds + i));
-    }
-    vkStream->read((uint32_t*)&forUnmarshaling->signalSemaphoreCount, sizeof(uint32_t));
-    if (forUnmarshaling->signalSemaphoreCount)
-    {
-        uint64_t* cgen_var_43;
-        vkStream->alloc((void**)&cgen_var_43, forUnmarshaling->signalSemaphoreCount * 8);
-        vkStream->read((uint64_t*)cgen_var_43, forUnmarshaling->signalSemaphoreCount * 8);
-        vkStream->handleMapping()->mapHandles_u64_VkSemaphore(cgen_var_43, (VkSemaphore*)forUnmarshaling->pSignalSemaphores, forUnmarshaling->signalSemaphoreCount);
-    }
-}
-
 void marshal_VkFenceCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkFenceCreateInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkFenceCreateFlags*)&forMarshaling->flags, sizeof(VkFenceCreateFlags));
 }
 
 void unmarshal_VkFenceCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkFenceCreateInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkFenceCreateFlags*)&forUnmarshaling->flags, sizeof(VkFenceCreateFlags));
 }
 
 void marshal_VkSemaphoreCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSemaphoreCreateInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkSemaphoreCreateFlags*)&forMarshaling->flags, sizeof(VkSemaphoreCreateFlags));
 }
 
 void unmarshal_VkSemaphoreCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSemaphoreCreateInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkSemaphoreCreateFlags*)&forUnmarshaling->flags, sizeof(VkSemaphoreCreateFlags));
 }
 
 void marshal_VkEventCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkEventCreateInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkEventCreateFlags*)&forMarshaling->flags, sizeof(VkEventCreateFlags));
 }
 
 void unmarshal_VkEventCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkEventCreateInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkEventCreateFlags*)&forUnmarshaling->flags, sizeof(VkEventCreateFlags));
 }
 
 void marshal_VkQueryPoolCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkQueryPoolCreateInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkQueryPoolCreateFlags*)&forMarshaling->flags, sizeof(VkQueryPoolCreateFlags));
     vkStream->write((VkQueryType*)&forMarshaling->queryType, sizeof(VkQueryType));
     vkStream->write((uint32_t*)&forMarshaling->queryCount, sizeof(uint32_t));
@@ -1509,17 +1978,16 @@
 
 void unmarshal_VkQueryPoolCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkQueryPoolCreateInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkQueryPoolCreateFlags*)&forUnmarshaling->flags, sizeof(VkQueryPoolCreateFlags));
     vkStream->read((VkQueryType*)&forUnmarshaling->queryType, sizeof(VkQueryType));
     vkStream->read((uint32_t*)&forUnmarshaling->queryCount, sizeof(uint32_t));
@@ -1528,24 +1996,24 @@
 
 void marshal_VkBufferCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkBufferCreateInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkBufferCreateFlags*)&forMarshaling->flags, sizeof(VkBufferCreateFlags));
     vkStream->write((VkDeviceSize*)&forMarshaling->size, sizeof(VkDeviceSize));
     vkStream->write((VkBufferUsageFlags*)&forMarshaling->usage, sizeof(VkBufferUsageFlags));
     vkStream->write((VkSharingMode*)&forMarshaling->sharingMode, sizeof(VkSharingMode));
     vkStream->write((uint32_t*)&forMarshaling->queueFamilyIndexCount, sizeof(uint32_t));
     // WARNING PTR CHECK
-    uint64_t cgen_var_44 = (uint64_t)(uintptr_t)forMarshaling->pQueueFamilyIndices;
-    vkStream->putBe64(cgen_var_44);
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pQueueFamilyIndices;
+    vkStream->putBe64(cgen_var_0);
     if (forMarshaling->pQueueFamilyIndices)
     {
         vkStream->write((const uint32_t*)forMarshaling->pQueueFamilyIndices, forMarshaling->queueFamilyIndexCount * sizeof(const uint32_t));
@@ -1554,17 +2022,16 @@
 
 void unmarshal_VkBufferCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkBufferCreateInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkBufferCreateFlags*)&forUnmarshaling->flags, sizeof(VkBufferCreateFlags));
     vkStream->read((VkDeviceSize*)&forUnmarshaling->size, sizeof(VkDeviceSize));
     vkStream->read((VkBufferUsageFlags*)&forUnmarshaling->usage, sizeof(VkBufferUsageFlags));
@@ -1585,20 +2052,20 @@
 
 void marshal_VkBufferViewCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkBufferViewCreateInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkBufferViewCreateFlags*)&forMarshaling->flags, sizeof(VkBufferViewCreateFlags));
-    uint64_t cgen_var_46;
-    vkStream->handleMapping()->mapHandles_VkBuffer_u64(&forMarshaling->buffer, &cgen_var_46, 1);
-    vkStream->write((uint64_t*)&cgen_var_46, 1 * 8);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkBuffer_u64(&forMarshaling->buffer, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
     vkStream->write((VkFormat*)&forMarshaling->format, sizeof(VkFormat));
     vkStream->write((VkDeviceSize*)&forMarshaling->offset, sizeof(VkDeviceSize));
     vkStream->write((VkDeviceSize*)&forMarshaling->range, sizeof(VkDeviceSize));
@@ -1606,21 +2073,20 @@
 
 void unmarshal_VkBufferViewCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkBufferViewCreateInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkBufferViewCreateFlags*)&forUnmarshaling->flags, sizeof(VkBufferViewCreateFlags));
-    uint64_t cgen_var_47;
-    vkStream->read((uint64_t*)&cgen_var_47, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkBuffer(&cgen_var_47, (VkBuffer*)&forUnmarshaling->buffer, 1);
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkBuffer(&cgen_var_0, (VkBuffer*)&forUnmarshaling->buffer, 1);
     vkStream->read((VkFormat*)&forUnmarshaling->format, sizeof(VkFormat));
     vkStream->read((VkDeviceSize*)&forUnmarshaling->offset, sizeof(VkDeviceSize));
     vkStream->read((VkDeviceSize*)&forUnmarshaling->range, sizeof(VkDeviceSize));
@@ -1628,20 +2094,20 @@
 
 void marshal_VkImageCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkImageCreateInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkImageCreateFlags*)&forMarshaling->flags, sizeof(VkImageCreateFlags));
     vkStream->write((VkImageType*)&forMarshaling->imageType, sizeof(VkImageType));
     vkStream->write((VkFormat*)&forMarshaling->format, sizeof(VkFormat));
-    marshal_VkExtent3D(vkStream, (VkExtent3D*)(&forMarshaling->extent));
+    marshal_VkExtent3D(vkStream, rootType, (VkExtent3D*)(&forMarshaling->extent));
     vkStream->write((uint32_t*)&forMarshaling->mipLevels, sizeof(uint32_t));
     vkStream->write((uint32_t*)&forMarshaling->arrayLayers, sizeof(uint32_t));
     vkStream->write((VkSampleCountFlagBits*)&forMarshaling->samples, sizeof(VkSampleCountFlagBits));
@@ -1650,8 +2116,8 @@
     vkStream->write((VkSharingMode*)&forMarshaling->sharingMode, sizeof(VkSharingMode));
     vkStream->write((uint32_t*)&forMarshaling->queueFamilyIndexCount, sizeof(uint32_t));
     // WARNING PTR CHECK
-    uint64_t cgen_var_48 = (uint64_t)(uintptr_t)forMarshaling->pQueueFamilyIndices;
-    vkStream->putBe64(cgen_var_48);
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pQueueFamilyIndices;
+    vkStream->putBe64(cgen_var_0);
     if (forMarshaling->pQueueFamilyIndices)
     {
         vkStream->write((const uint32_t*)forMarshaling->pQueueFamilyIndices, forMarshaling->queueFamilyIndexCount * sizeof(const uint32_t));
@@ -1661,21 +2127,20 @@
 
 void unmarshal_VkImageCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkImageCreateInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkImageCreateFlags*)&forUnmarshaling->flags, sizeof(VkImageCreateFlags));
     vkStream->read((VkImageType*)&forUnmarshaling->imageType, sizeof(VkImageType));
     vkStream->read((VkFormat*)&forUnmarshaling->format, sizeof(VkFormat));
-    unmarshal_VkExtent3D(vkStream, (VkExtent3D*)(&forUnmarshaling->extent));
+    unmarshal_VkExtent3D(vkStream, rootType, (VkExtent3D*)(&forUnmarshaling->extent));
     vkStream->read((uint32_t*)&forUnmarshaling->mipLevels, sizeof(uint32_t));
     vkStream->read((uint32_t*)&forUnmarshaling->arrayLayers, sizeof(uint32_t));
     vkStream->read((VkSampleCountFlagBits*)&forUnmarshaling->samples, sizeof(VkSampleCountFlagBits));
@@ -1699,8 +2164,10 @@
 
 void marshal_VkSubresourceLayout(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSubresourceLayout* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkDeviceSize*)&forMarshaling->offset, sizeof(VkDeviceSize));
     vkStream->write((VkDeviceSize*)&forMarshaling->size, sizeof(VkDeviceSize));
     vkStream->write((VkDeviceSize*)&forMarshaling->rowPitch, sizeof(VkDeviceSize));
@@ -1710,8 +2177,10 @@
 
 void unmarshal_VkSubresourceLayout(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSubresourceLayout* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkDeviceSize*)&forUnmarshaling->offset, sizeof(VkDeviceSize));
     vkStream->read((VkDeviceSize*)&forUnmarshaling->size, sizeof(VkDeviceSize));
     vkStream->read((VkDeviceSize*)&forUnmarshaling->rowPitch, sizeof(VkDeviceSize));
@@ -1721,8 +2190,10 @@
 
 void marshal_VkComponentMapping(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkComponentMapping* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkComponentSwizzle*)&forMarshaling->r, sizeof(VkComponentSwizzle));
     vkStream->write((VkComponentSwizzle*)&forMarshaling->g, sizeof(VkComponentSwizzle));
     vkStream->write((VkComponentSwizzle*)&forMarshaling->b, sizeof(VkComponentSwizzle));
@@ -1731,112 +2202,90 @@
 
 void unmarshal_VkComponentMapping(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkComponentMapping* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkComponentSwizzle*)&forUnmarshaling->r, sizeof(VkComponentSwizzle));
     vkStream->read((VkComponentSwizzle*)&forUnmarshaling->g, sizeof(VkComponentSwizzle));
     vkStream->read((VkComponentSwizzle*)&forUnmarshaling->b, sizeof(VkComponentSwizzle));
     vkStream->read((VkComponentSwizzle*)&forUnmarshaling->a, sizeof(VkComponentSwizzle));
 }
 
-void marshal_VkImageSubresourceRange(
-    VulkanStreamGuest* vkStream,
-    const VkImageSubresourceRange* forMarshaling)
-{
-    vkStream->write((VkImageAspectFlags*)&forMarshaling->aspectMask, sizeof(VkImageAspectFlags));
-    vkStream->write((uint32_t*)&forMarshaling->baseMipLevel, sizeof(uint32_t));
-    vkStream->write((uint32_t*)&forMarshaling->levelCount, sizeof(uint32_t));
-    vkStream->write((uint32_t*)&forMarshaling->baseArrayLayer, sizeof(uint32_t));
-    vkStream->write((uint32_t*)&forMarshaling->layerCount, sizeof(uint32_t));
-}
-
-void unmarshal_VkImageSubresourceRange(
-    VulkanStreamGuest* vkStream,
-    VkImageSubresourceRange* forUnmarshaling)
-{
-    vkStream->read((VkImageAspectFlags*)&forUnmarshaling->aspectMask, sizeof(VkImageAspectFlags));
-    vkStream->read((uint32_t*)&forUnmarshaling->baseMipLevel, sizeof(uint32_t));
-    vkStream->read((uint32_t*)&forUnmarshaling->levelCount, sizeof(uint32_t));
-    vkStream->read((uint32_t*)&forUnmarshaling->baseArrayLayer, sizeof(uint32_t));
-    vkStream->read((uint32_t*)&forUnmarshaling->layerCount, sizeof(uint32_t));
-}
-
 void marshal_VkImageViewCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkImageViewCreateInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkImageViewCreateFlags*)&forMarshaling->flags, sizeof(VkImageViewCreateFlags));
-    uint64_t cgen_var_50;
-    vkStream->handleMapping()->mapHandles_VkImage_u64(&forMarshaling->image, &cgen_var_50, 1);
-    vkStream->write((uint64_t*)&cgen_var_50, 1 * 8);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkImage_u64(&forMarshaling->image, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
     vkStream->write((VkImageViewType*)&forMarshaling->viewType, sizeof(VkImageViewType));
     vkStream->write((VkFormat*)&forMarshaling->format, sizeof(VkFormat));
-    marshal_VkComponentMapping(vkStream, (VkComponentMapping*)(&forMarshaling->components));
-    marshal_VkImageSubresourceRange(vkStream, (VkImageSubresourceRange*)(&forMarshaling->subresourceRange));
+    marshal_VkComponentMapping(vkStream, rootType, (VkComponentMapping*)(&forMarshaling->components));
+    marshal_VkImageSubresourceRange(vkStream, rootType, (VkImageSubresourceRange*)(&forMarshaling->subresourceRange));
 }
 
 void unmarshal_VkImageViewCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkImageViewCreateInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkImageViewCreateFlags*)&forUnmarshaling->flags, sizeof(VkImageViewCreateFlags));
-    uint64_t cgen_var_51;
-    vkStream->read((uint64_t*)&cgen_var_51, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkImage(&cgen_var_51, (VkImage*)&forUnmarshaling->image, 1);
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkImage(&cgen_var_0, (VkImage*)&forUnmarshaling->image, 1);
     vkStream->read((VkImageViewType*)&forUnmarshaling->viewType, sizeof(VkImageViewType));
     vkStream->read((VkFormat*)&forUnmarshaling->format, sizeof(VkFormat));
-    unmarshal_VkComponentMapping(vkStream, (VkComponentMapping*)(&forUnmarshaling->components));
-    unmarshal_VkImageSubresourceRange(vkStream, (VkImageSubresourceRange*)(&forUnmarshaling->subresourceRange));
+    unmarshal_VkComponentMapping(vkStream, rootType, (VkComponentMapping*)(&forUnmarshaling->components));
+    unmarshal_VkImageSubresourceRange(vkStream, rootType, (VkImageSubresourceRange*)(&forUnmarshaling->subresourceRange));
 }
 
 void marshal_VkShaderModuleCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkShaderModuleCreateInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkShaderModuleCreateFlags*)&forMarshaling->flags, sizeof(VkShaderModuleCreateFlags));
-    uint64_t cgen_var_52 = (uint64_t)forMarshaling->codeSize;
-    vkStream->putBe64(cgen_var_52);
+    uint64_t cgen_var_0 = (uint64_t)forMarshaling->codeSize;
+    vkStream->putBe64(cgen_var_0);
     vkStream->write((const uint32_t*)forMarshaling->pCode, (forMarshaling->codeSize / 4) * sizeof(const uint32_t));
 }
 
 void unmarshal_VkShaderModuleCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkShaderModuleCreateInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkShaderModuleCreateFlags*)&forUnmarshaling->flags, sizeof(VkShaderModuleCreateFlags));
     forUnmarshaling->codeSize = (size_t)vkStream->getBe64();
     vkStream->read((uint32_t*)forUnmarshaling->pCode, (forUnmarshaling->codeSize / 4) * sizeof(const uint32_t));
@@ -1844,35 +2293,34 @@
 
 void marshal_VkPipelineCacheCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPipelineCacheCreateInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkPipelineCacheCreateFlags*)&forMarshaling->flags, sizeof(VkPipelineCacheCreateFlags));
-    uint64_t cgen_var_54 = (uint64_t)forMarshaling->initialDataSize;
-    vkStream->putBe64(cgen_var_54);
+    uint64_t cgen_var_0 = (uint64_t)forMarshaling->initialDataSize;
+    vkStream->putBe64(cgen_var_0);
     vkStream->write((const void*)forMarshaling->pInitialData, forMarshaling->initialDataSize * sizeof(const uint8_t));
 }
 
 void unmarshal_VkPipelineCacheCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPipelineCacheCreateInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkPipelineCacheCreateFlags*)&forUnmarshaling->flags, sizeof(VkPipelineCacheCreateFlags));
     forUnmarshaling->initialDataSize = (size_t)vkStream->getBe64();
     vkStream->read((void*)forUnmarshaling->pInitialData, forUnmarshaling->initialDataSize * sizeof(const uint8_t));
@@ -1880,18 +2328,22 @@
 
 void marshal_VkSpecializationMapEntry(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSpecializationMapEntry* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((uint32_t*)&forMarshaling->constantID, sizeof(uint32_t));
     vkStream->write((uint32_t*)&forMarshaling->offset, sizeof(uint32_t));
-    uint64_t cgen_var_56 = (uint64_t)forMarshaling->size;
-    vkStream->putBe64(cgen_var_56);
+    uint64_t cgen_var_0 = (uint64_t)forMarshaling->size;
+    vkStream->putBe64(cgen_var_0);
 }
 
 void unmarshal_VkSpecializationMapEntry(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSpecializationMapEntry* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((uint32_t*)&forUnmarshaling->constantID, sizeof(uint32_t));
     vkStream->read((uint32_t*)&forUnmarshaling->offset, sizeof(uint32_t));
     forUnmarshaling->size = (size_t)vkStream->getBe64();
@@ -1899,26 +2351,36 @@
 
 void marshal_VkSpecializationInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSpecializationInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((uint32_t*)&forMarshaling->mapEntryCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forMarshaling->mapEntryCount; ++i)
+    if (forMarshaling)
     {
-        marshal_VkSpecializationMapEntry(vkStream, (const VkSpecializationMapEntry*)(forMarshaling->pMapEntries + i));
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->mapEntryCount; ++i)
+        {
+            marshal_VkSpecializationMapEntry(vkStream, rootType, (const VkSpecializationMapEntry*)(forMarshaling->pMapEntries + i));
+        }
     }
-    uint64_t cgen_var_58 = (uint64_t)forMarshaling->dataSize;
-    vkStream->putBe64(cgen_var_58);
+    uint64_t cgen_var_0 = (uint64_t)forMarshaling->dataSize;
+    vkStream->putBe64(cgen_var_0);
     vkStream->write((const void*)forMarshaling->pData, forMarshaling->dataSize * sizeof(const uint8_t));
 }
 
 void unmarshal_VkSpecializationInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSpecializationInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((uint32_t*)&forUnmarshaling->mapEntryCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->mapEntryCount; ++i)
+    if (forUnmarshaling)
     {
-        unmarshal_VkSpecializationMapEntry(vkStream, (VkSpecializationMapEntry*)(forUnmarshaling->pMapEntries + i));
+        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->mapEntryCount; ++i)
+        {
+            unmarshal_VkSpecializationMapEntry(vkStream, rootType, (VkSpecializationMapEntry*)(forUnmarshaling->pMapEntries + i));
+        }
     }
     forUnmarshaling->dataSize = (size_t)vkStream->getBe64();
     vkStream->read((void*)forUnmarshaling->pData, forUnmarshaling->dataSize * sizeof(const uint8_t));
@@ -1926,49 +2388,48 @@
 
 void marshal_VkPipelineShaderStageCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPipelineShaderStageCreateInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkPipelineShaderStageCreateFlags*)&forMarshaling->flags, sizeof(VkPipelineShaderStageCreateFlags));
     vkStream->write((VkShaderStageFlagBits*)&forMarshaling->stage, sizeof(VkShaderStageFlagBits));
-    uint64_t cgen_var_60;
-    vkStream->handleMapping()->mapHandles_VkShaderModule_u64(&forMarshaling->module, &cgen_var_60, 1);
-    vkStream->write((uint64_t*)&cgen_var_60, 1 * 8);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkShaderModule_u64(&forMarshaling->module, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
     vkStream->putString(forMarshaling->pName);
     // WARNING PTR CHECK
-    uint64_t cgen_var_61 = (uint64_t)(uintptr_t)forMarshaling->pSpecializationInfo;
-    vkStream->putBe64(cgen_var_61);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)forMarshaling->pSpecializationInfo;
+    vkStream->putBe64(cgen_var_1);
     if (forMarshaling->pSpecializationInfo)
     {
-        marshal_VkSpecializationInfo(vkStream, (const VkSpecializationInfo*)(forMarshaling->pSpecializationInfo));
+        marshal_VkSpecializationInfo(vkStream, rootType, (const VkSpecializationInfo*)(forMarshaling->pSpecializationInfo));
     }
 }
 
 void unmarshal_VkPipelineShaderStageCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPipelineShaderStageCreateInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkPipelineShaderStageCreateFlags*)&forUnmarshaling->flags, sizeof(VkPipelineShaderStageCreateFlags));
     vkStream->read((VkShaderStageFlagBits*)&forUnmarshaling->stage, sizeof(VkShaderStageFlagBits));
-    uint64_t cgen_var_62;
-    vkStream->read((uint64_t*)&cgen_var_62, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkShaderModule(&cgen_var_62, (VkShaderModule*)&forUnmarshaling->module, 1);
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkShaderModule(&cgen_var_0, (VkShaderModule*)&forUnmarshaling->module, 1);
     vkStream->loadStringInPlace((char**)&forUnmarshaling->pName);
     // WARNING PTR CHECK
     const VkSpecializationInfo* check_pSpecializationInfo;
@@ -1979,14 +2440,62 @@
         {
             fprintf(stderr, "fatal: forUnmarshaling->pSpecializationInfo inconsistent between guest and host\n");
         }
-        unmarshal_VkSpecializationInfo(vkStream, (VkSpecializationInfo*)(forUnmarshaling->pSpecializationInfo));
+        unmarshal_VkSpecializationInfo(vkStream, rootType, (VkSpecializationInfo*)(forUnmarshaling->pSpecializationInfo));
     }
 }
 
+void marshal_VkComputePipelineCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkComputePipelineCreateInfo* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkPipelineCreateFlags*)&forMarshaling->flags, sizeof(VkPipelineCreateFlags));
+    marshal_VkPipelineShaderStageCreateInfo(vkStream, rootType, (VkPipelineShaderStageCreateInfo*)(&forMarshaling->stage));
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkPipelineLayout_u64(&forMarshaling->layout, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
+    uint64_t cgen_var_1;
+    vkStream->handleMapping()->mapHandles_VkPipeline_u64(&forMarshaling->basePipelineHandle, &cgen_var_1, 1);
+    vkStream->write((uint64_t*)&cgen_var_1, 1 * 8);
+    vkStream->write((int32_t*)&forMarshaling->basePipelineIndex, sizeof(int32_t));
+}
+
+void unmarshal_VkComputePipelineCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkComputePipelineCreateInfo* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkPipelineCreateFlags*)&forUnmarshaling->flags, sizeof(VkPipelineCreateFlags));
+    unmarshal_VkPipelineShaderStageCreateInfo(vkStream, rootType, (VkPipelineShaderStageCreateInfo*)(&forUnmarshaling->stage));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkPipelineLayout(&cgen_var_0, (VkPipelineLayout*)&forUnmarshaling->layout, 1);
+    uint64_t cgen_var_1;
+    vkStream->read((uint64_t*)&cgen_var_1, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkPipeline(&cgen_var_1, (VkPipeline*)&forUnmarshaling->basePipelineHandle, 1);
+    vkStream->read((int32_t*)&forUnmarshaling->basePipelineIndex, sizeof(int32_t));
+}
+
 void marshal_VkVertexInputBindingDescription(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkVertexInputBindingDescription* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((uint32_t*)&forMarshaling->binding, sizeof(uint32_t));
     vkStream->write((uint32_t*)&forMarshaling->stride, sizeof(uint32_t));
     vkStream->write((VkVertexInputRate*)&forMarshaling->inputRate, sizeof(VkVertexInputRate));
@@ -1994,8 +2503,10 @@
 
 void unmarshal_VkVertexInputBindingDescription(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkVertexInputBindingDescription* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((uint32_t*)&forUnmarshaling->binding, sizeof(uint32_t));
     vkStream->read((uint32_t*)&forUnmarshaling->stride, sizeof(uint32_t));
     vkStream->read((VkVertexInputRate*)&forUnmarshaling->inputRate, sizeof(VkVertexInputRate));
@@ -2003,8 +2514,10 @@
 
 void marshal_VkVertexInputAttributeDescription(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkVertexInputAttributeDescription* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((uint32_t*)&forMarshaling->location, sizeof(uint32_t));
     vkStream->write((uint32_t*)&forMarshaling->binding, sizeof(uint32_t));
     vkStream->write((VkFormat*)&forMarshaling->format, sizeof(VkFormat));
@@ -2013,8 +2526,10 @@
 
 void unmarshal_VkVertexInputAttributeDescription(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkVertexInputAttributeDescription* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((uint32_t*)&forUnmarshaling->location, sizeof(uint32_t));
     vkStream->read((uint32_t*)&forUnmarshaling->binding, sizeof(uint32_t));
     vkStream->read((VkFormat*)&forUnmarshaling->format, sizeof(VkFormat));
@@ -2023,67 +2538,78 @@
 
 void marshal_VkPipelineVertexInputStateCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPipelineVertexInputStateCreateInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkPipelineVertexInputStateCreateFlags*)&forMarshaling->flags, sizeof(VkPipelineVertexInputStateCreateFlags));
     vkStream->write((uint32_t*)&forMarshaling->vertexBindingDescriptionCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forMarshaling->vertexBindingDescriptionCount; ++i)
+    if (forMarshaling)
     {
-        marshal_VkVertexInputBindingDescription(vkStream, (const VkVertexInputBindingDescription*)(forMarshaling->pVertexBindingDescriptions + i));
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->vertexBindingDescriptionCount; ++i)
+        {
+            marshal_VkVertexInputBindingDescription(vkStream, rootType, (const VkVertexInputBindingDescription*)(forMarshaling->pVertexBindingDescriptions + i));
+        }
     }
     vkStream->write((uint32_t*)&forMarshaling->vertexAttributeDescriptionCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forMarshaling->vertexAttributeDescriptionCount; ++i)
+    if (forMarshaling)
     {
-        marshal_VkVertexInputAttributeDescription(vkStream, (const VkVertexInputAttributeDescription*)(forMarshaling->pVertexAttributeDescriptions + i));
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->vertexAttributeDescriptionCount; ++i)
+        {
+            marshal_VkVertexInputAttributeDescription(vkStream, rootType, (const VkVertexInputAttributeDescription*)(forMarshaling->pVertexAttributeDescriptions + i));
+        }
     }
 }
 
 void unmarshal_VkPipelineVertexInputStateCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPipelineVertexInputStateCreateInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkPipelineVertexInputStateCreateFlags*)&forUnmarshaling->flags, sizeof(VkPipelineVertexInputStateCreateFlags));
     vkStream->read((uint32_t*)&forUnmarshaling->vertexBindingDescriptionCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->vertexBindingDescriptionCount; ++i)
+    if (forUnmarshaling)
     {
-        unmarshal_VkVertexInputBindingDescription(vkStream, (VkVertexInputBindingDescription*)(forUnmarshaling->pVertexBindingDescriptions + i));
+        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->vertexBindingDescriptionCount; ++i)
+        {
+            unmarshal_VkVertexInputBindingDescription(vkStream, rootType, (VkVertexInputBindingDescription*)(forUnmarshaling->pVertexBindingDescriptions + i));
+        }
     }
     vkStream->read((uint32_t*)&forUnmarshaling->vertexAttributeDescriptionCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->vertexAttributeDescriptionCount; ++i)
+    if (forUnmarshaling)
     {
-        unmarshal_VkVertexInputAttributeDescription(vkStream, (VkVertexInputAttributeDescription*)(forUnmarshaling->pVertexAttributeDescriptions + i));
+        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->vertexAttributeDescriptionCount; ++i)
+        {
+            unmarshal_VkVertexInputAttributeDescription(vkStream, rootType, (VkVertexInputAttributeDescription*)(forUnmarshaling->pVertexAttributeDescriptions + i));
+        }
     }
 }
 
 void marshal_VkPipelineInputAssemblyStateCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPipelineInputAssemblyStateCreateInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkPipelineInputAssemblyStateCreateFlags*)&forMarshaling->flags, sizeof(VkPipelineInputAssemblyStateCreateFlags));
     vkStream->write((VkPrimitiveTopology*)&forMarshaling->topology, sizeof(VkPrimitiveTopology));
     vkStream->write((VkBool32*)&forMarshaling->primitiveRestartEnable, sizeof(VkBool32));
@@ -2091,17 +2617,16 @@
 
 void unmarshal_VkPipelineInputAssemblyStateCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPipelineInputAssemblyStateCreateInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkPipelineInputAssemblyStateCreateFlags*)&forUnmarshaling->flags, sizeof(VkPipelineInputAssemblyStateCreateFlags));
     vkStream->read((VkPrimitiveTopology*)&forUnmarshaling->topology, sizeof(VkPrimitiveTopology));
     vkStream->read((VkBool32*)&forUnmarshaling->primitiveRestartEnable, sizeof(VkBool32));
@@ -2109,41 +2634,42 @@
 
 void marshal_VkPipelineTessellationStateCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPipelineTessellationStateCreateInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkPipelineTessellationStateCreateFlags*)&forMarshaling->flags, sizeof(VkPipelineTessellationStateCreateFlags));
     vkStream->write((uint32_t*)&forMarshaling->patchControlPoints, sizeof(uint32_t));
 }
 
 void unmarshal_VkPipelineTessellationStateCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPipelineTessellationStateCreateInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkPipelineTessellationStateCreateFlags*)&forUnmarshaling->flags, sizeof(VkPipelineTessellationStateCreateFlags));
     vkStream->read((uint32_t*)&forUnmarshaling->patchControlPoints, sizeof(uint32_t));
 }
 
 void marshal_VkViewport(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkViewport* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((float*)&forMarshaling->x, sizeof(float));
     vkStream->write((float*)&forMarshaling->y, sizeof(float));
     vkStream->write((float*)&forMarshaling->width, sizeof(float));
@@ -2154,8 +2680,10 @@
 
 void unmarshal_VkViewport(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkViewport* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((float*)&forUnmarshaling->x, sizeof(float));
     vkStream->read((float*)&forUnmarshaling->y, sizeof(float));
     vkStream->read((float*)&forUnmarshaling->width, sizeof(float));
@@ -2164,104 +2692,61 @@
     vkStream->read((float*)&forUnmarshaling->maxDepth, sizeof(float));
 }
 
-void marshal_VkOffset2D(
-    VulkanStreamGuest* vkStream,
-    const VkOffset2D* forMarshaling)
-{
-    vkStream->write((int32_t*)&forMarshaling->x, sizeof(int32_t));
-    vkStream->write((int32_t*)&forMarshaling->y, sizeof(int32_t));
-}
-
-void unmarshal_VkOffset2D(
-    VulkanStreamGuest* vkStream,
-    VkOffset2D* forUnmarshaling)
-{
-    vkStream->read((int32_t*)&forUnmarshaling->x, sizeof(int32_t));
-    vkStream->read((int32_t*)&forUnmarshaling->y, sizeof(int32_t));
-}
-
-void marshal_VkExtent2D(
-    VulkanStreamGuest* vkStream,
-    const VkExtent2D* forMarshaling)
-{
-    vkStream->write((uint32_t*)&forMarshaling->width, sizeof(uint32_t));
-    vkStream->write((uint32_t*)&forMarshaling->height, sizeof(uint32_t));
-}
-
-void unmarshal_VkExtent2D(
-    VulkanStreamGuest* vkStream,
-    VkExtent2D* forUnmarshaling)
-{
-    vkStream->read((uint32_t*)&forUnmarshaling->width, sizeof(uint32_t));
-    vkStream->read((uint32_t*)&forUnmarshaling->height, sizeof(uint32_t));
-}
-
-void marshal_VkRect2D(
-    VulkanStreamGuest* vkStream,
-    const VkRect2D* forMarshaling)
-{
-    marshal_VkOffset2D(vkStream, (VkOffset2D*)(&forMarshaling->offset));
-    marshal_VkExtent2D(vkStream, (VkExtent2D*)(&forMarshaling->extent));
-}
-
-void unmarshal_VkRect2D(
-    VulkanStreamGuest* vkStream,
-    VkRect2D* forUnmarshaling)
-{
-    unmarshal_VkOffset2D(vkStream, (VkOffset2D*)(&forUnmarshaling->offset));
-    unmarshal_VkExtent2D(vkStream, (VkExtent2D*)(&forUnmarshaling->extent));
-}
-
 void marshal_VkPipelineViewportStateCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPipelineViewportStateCreateInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkPipelineViewportStateCreateFlags*)&forMarshaling->flags, sizeof(VkPipelineViewportStateCreateFlags));
     vkStream->write((uint32_t*)&forMarshaling->viewportCount, sizeof(uint32_t));
     // WARNING PTR CHECK
-    uint64_t cgen_var_64 = (uint64_t)(uintptr_t)forMarshaling->pViewports;
-    vkStream->putBe64(cgen_var_64);
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pViewports;
+    vkStream->putBe64(cgen_var_0);
     if (forMarshaling->pViewports)
     {
-        for (uint32_t i = 0; i < (uint32_t)forMarshaling->viewportCount; ++i)
+        if (forMarshaling)
         {
-            marshal_VkViewport(vkStream, (const VkViewport*)(forMarshaling->pViewports + i));
+            for (uint32_t i = 0; i < (uint32_t)forMarshaling->viewportCount; ++i)
+            {
+                marshal_VkViewport(vkStream, rootType, (const VkViewport*)(forMarshaling->pViewports + i));
+            }
         }
     }
     vkStream->write((uint32_t*)&forMarshaling->scissorCount, sizeof(uint32_t));
     // WARNING PTR CHECK
-    uint64_t cgen_var_65 = (uint64_t)(uintptr_t)forMarshaling->pScissors;
-    vkStream->putBe64(cgen_var_65);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)forMarshaling->pScissors;
+    vkStream->putBe64(cgen_var_1);
     if (forMarshaling->pScissors)
     {
-        for (uint32_t i = 0; i < (uint32_t)forMarshaling->scissorCount; ++i)
+        if (forMarshaling)
         {
-            marshal_VkRect2D(vkStream, (const VkRect2D*)(forMarshaling->pScissors + i));
+            for (uint32_t i = 0; i < (uint32_t)forMarshaling->scissorCount; ++i)
+            {
+                marshal_VkRect2D(vkStream, rootType, (const VkRect2D*)(forMarshaling->pScissors + i));
+            }
         }
     }
 }
 
 void unmarshal_VkPipelineViewportStateCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPipelineViewportStateCreateInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkPipelineViewportStateCreateFlags*)&forUnmarshaling->flags, sizeof(VkPipelineViewportStateCreateFlags));
     vkStream->read((uint32_t*)&forUnmarshaling->viewportCount, sizeof(uint32_t));
     // WARNING PTR CHECK
@@ -2273,9 +2758,12 @@
         {
             fprintf(stderr, "fatal: forUnmarshaling->pViewports inconsistent between guest and host\n");
         }
-        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->viewportCount; ++i)
+        if (forUnmarshaling)
         {
-            unmarshal_VkViewport(vkStream, (VkViewport*)(forUnmarshaling->pViewports + i));
+            for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->viewportCount; ++i)
+            {
+                unmarshal_VkViewport(vkStream, rootType, (VkViewport*)(forUnmarshaling->pViewports + i));
+            }
         }
     }
     vkStream->read((uint32_t*)&forUnmarshaling->scissorCount, sizeof(uint32_t));
@@ -2288,25 +2776,28 @@
         {
             fprintf(stderr, "fatal: forUnmarshaling->pScissors inconsistent between guest and host\n");
         }
-        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->scissorCount; ++i)
+        if (forUnmarshaling)
         {
-            unmarshal_VkRect2D(vkStream, (VkRect2D*)(forUnmarshaling->pScissors + i));
+            for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->scissorCount; ++i)
+            {
+                unmarshal_VkRect2D(vkStream, rootType, (VkRect2D*)(forUnmarshaling->pScissors + i));
+            }
         }
     }
 }
 
 void marshal_VkPipelineRasterizationStateCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPipelineRasterizationStateCreateInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkPipelineRasterizationStateCreateFlags*)&forMarshaling->flags, sizeof(VkPipelineRasterizationStateCreateFlags));
     vkStream->write((VkBool32*)&forMarshaling->depthClampEnable, sizeof(VkBool32));
     vkStream->write((VkBool32*)&forMarshaling->rasterizerDiscardEnable, sizeof(VkBool32));
@@ -2322,17 +2813,16 @@
 
 void unmarshal_VkPipelineRasterizationStateCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPipelineRasterizationStateCreateInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkPipelineRasterizationStateCreateFlags*)&forUnmarshaling->flags, sizeof(VkPipelineRasterizationStateCreateFlags));
     vkStream->read((VkBool32*)&forUnmarshaling->depthClampEnable, sizeof(VkBool32));
     vkStream->read((VkBool32*)&forUnmarshaling->rasterizerDiscardEnable, sizeof(VkBool32));
@@ -2348,23 +2838,23 @@
 
 void marshal_VkPipelineMultisampleStateCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPipelineMultisampleStateCreateInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkPipelineMultisampleStateCreateFlags*)&forMarshaling->flags, sizeof(VkPipelineMultisampleStateCreateFlags));
     vkStream->write((VkSampleCountFlagBits*)&forMarshaling->rasterizationSamples, sizeof(VkSampleCountFlagBits));
     vkStream->write((VkBool32*)&forMarshaling->sampleShadingEnable, sizeof(VkBool32));
     vkStream->write((float*)&forMarshaling->minSampleShading, sizeof(float));
     // WARNING PTR CHECK
-    uint64_t cgen_var_68 = (uint64_t)(uintptr_t)forMarshaling->pSampleMask;
-    vkStream->putBe64(cgen_var_68);
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pSampleMask;
+    vkStream->putBe64(cgen_var_0);
     if (forMarshaling->pSampleMask)
     {
         vkStream->write((const VkSampleMask*)forMarshaling->pSampleMask, (((forMarshaling->rasterizationSamples) + 31) / 32) * sizeof(const VkSampleMask));
@@ -2375,17 +2865,16 @@
 
 void unmarshal_VkPipelineMultisampleStateCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPipelineMultisampleStateCreateInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkPipelineMultisampleStateCreateFlags*)&forUnmarshaling->flags, sizeof(VkPipelineMultisampleStateCreateFlags));
     vkStream->read((VkSampleCountFlagBits*)&forUnmarshaling->rasterizationSamples, sizeof(VkSampleCountFlagBits));
     vkStream->read((VkBool32*)&forUnmarshaling->sampleShadingEnable, sizeof(VkBool32));
@@ -2407,8 +2896,10 @@
 
 void marshal_VkStencilOpState(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkStencilOpState* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStencilOp*)&forMarshaling->failOp, sizeof(VkStencilOp));
     vkStream->write((VkStencilOp*)&forMarshaling->passOp, sizeof(VkStencilOp));
     vkStream->write((VkStencilOp*)&forMarshaling->depthFailOp, sizeof(VkStencilOp));
@@ -2420,8 +2911,10 @@
 
 void unmarshal_VkStencilOpState(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkStencilOpState* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStencilOp*)&forUnmarshaling->failOp, sizeof(VkStencilOp));
     vkStream->read((VkStencilOp*)&forUnmarshaling->passOp, sizeof(VkStencilOp));
     vkStream->read((VkStencilOp*)&forUnmarshaling->depthFailOp, sizeof(VkStencilOp));
@@ -2433,57 +2926,58 @@
 
 void marshal_VkPipelineDepthStencilStateCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPipelineDepthStencilStateCreateInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkPipelineDepthStencilStateCreateFlags*)&forMarshaling->flags, sizeof(VkPipelineDepthStencilStateCreateFlags));
     vkStream->write((VkBool32*)&forMarshaling->depthTestEnable, sizeof(VkBool32));
     vkStream->write((VkBool32*)&forMarshaling->depthWriteEnable, sizeof(VkBool32));
     vkStream->write((VkCompareOp*)&forMarshaling->depthCompareOp, sizeof(VkCompareOp));
     vkStream->write((VkBool32*)&forMarshaling->depthBoundsTestEnable, sizeof(VkBool32));
     vkStream->write((VkBool32*)&forMarshaling->stencilTestEnable, sizeof(VkBool32));
-    marshal_VkStencilOpState(vkStream, (VkStencilOpState*)(&forMarshaling->front));
-    marshal_VkStencilOpState(vkStream, (VkStencilOpState*)(&forMarshaling->back));
+    marshal_VkStencilOpState(vkStream, rootType, (VkStencilOpState*)(&forMarshaling->front));
+    marshal_VkStencilOpState(vkStream, rootType, (VkStencilOpState*)(&forMarshaling->back));
     vkStream->write((float*)&forMarshaling->minDepthBounds, sizeof(float));
     vkStream->write((float*)&forMarshaling->maxDepthBounds, sizeof(float));
 }
 
 void unmarshal_VkPipelineDepthStencilStateCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPipelineDepthStencilStateCreateInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkPipelineDepthStencilStateCreateFlags*)&forUnmarshaling->flags, sizeof(VkPipelineDepthStencilStateCreateFlags));
     vkStream->read((VkBool32*)&forUnmarshaling->depthTestEnable, sizeof(VkBool32));
     vkStream->read((VkBool32*)&forUnmarshaling->depthWriteEnable, sizeof(VkBool32));
     vkStream->read((VkCompareOp*)&forUnmarshaling->depthCompareOp, sizeof(VkCompareOp));
     vkStream->read((VkBool32*)&forUnmarshaling->depthBoundsTestEnable, sizeof(VkBool32));
     vkStream->read((VkBool32*)&forUnmarshaling->stencilTestEnable, sizeof(VkBool32));
-    unmarshal_VkStencilOpState(vkStream, (VkStencilOpState*)(&forUnmarshaling->front));
-    unmarshal_VkStencilOpState(vkStream, (VkStencilOpState*)(&forUnmarshaling->back));
+    unmarshal_VkStencilOpState(vkStream, rootType, (VkStencilOpState*)(&forUnmarshaling->front));
+    unmarshal_VkStencilOpState(vkStream, rootType, (VkStencilOpState*)(&forUnmarshaling->back));
     vkStream->read((float*)&forUnmarshaling->minDepthBounds, sizeof(float));
     vkStream->read((float*)&forUnmarshaling->maxDepthBounds, sizeof(float));
 }
 
 void marshal_VkPipelineColorBlendAttachmentState(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPipelineColorBlendAttachmentState* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkBool32*)&forMarshaling->blendEnable, sizeof(VkBool32));
     vkStream->write((VkBlendFactor*)&forMarshaling->srcColorBlendFactor, sizeof(VkBlendFactor));
     vkStream->write((VkBlendFactor*)&forMarshaling->dstColorBlendFactor, sizeof(VkBlendFactor));
@@ -2496,8 +2990,10 @@
 
 void unmarshal_VkPipelineColorBlendAttachmentState(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPipelineColorBlendAttachmentState* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkBool32*)&forUnmarshaling->blendEnable, sizeof(VkBool32));
     vkStream->read((VkBlendFactor*)&forUnmarshaling->srcColorBlendFactor, sizeof(VkBlendFactor));
     vkStream->read((VkBlendFactor*)&forUnmarshaling->dstColorBlendFactor, sizeof(VkBlendFactor));
@@ -2510,63 +3006,68 @@
 
 void marshal_VkPipelineColorBlendStateCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPipelineColorBlendStateCreateInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkPipelineColorBlendStateCreateFlags*)&forMarshaling->flags, sizeof(VkPipelineColorBlendStateCreateFlags));
     vkStream->write((VkBool32*)&forMarshaling->logicOpEnable, sizeof(VkBool32));
     vkStream->write((VkLogicOp*)&forMarshaling->logicOp, sizeof(VkLogicOp));
     vkStream->write((uint32_t*)&forMarshaling->attachmentCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forMarshaling->attachmentCount; ++i)
+    if (forMarshaling)
     {
-        marshal_VkPipelineColorBlendAttachmentState(vkStream, (const VkPipelineColorBlendAttachmentState*)(forMarshaling->pAttachments + i));
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->attachmentCount; ++i)
+        {
+            marshal_VkPipelineColorBlendAttachmentState(vkStream, rootType, (const VkPipelineColorBlendAttachmentState*)(forMarshaling->pAttachments + i));
+        }
     }
     vkStream->write((float*)forMarshaling->blendConstants, 4 * sizeof(float));
 }
 
 void unmarshal_VkPipelineColorBlendStateCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPipelineColorBlendStateCreateInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkPipelineColorBlendStateCreateFlags*)&forUnmarshaling->flags, sizeof(VkPipelineColorBlendStateCreateFlags));
     vkStream->read((VkBool32*)&forUnmarshaling->logicOpEnable, sizeof(VkBool32));
     vkStream->read((VkLogicOp*)&forUnmarshaling->logicOp, sizeof(VkLogicOp));
     vkStream->read((uint32_t*)&forUnmarshaling->attachmentCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->attachmentCount; ++i)
+    if (forUnmarshaling)
     {
-        unmarshal_VkPipelineColorBlendAttachmentState(vkStream, (VkPipelineColorBlendAttachmentState*)(forUnmarshaling->pAttachments + i));
+        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->attachmentCount; ++i)
+        {
+            unmarshal_VkPipelineColorBlendAttachmentState(vkStream, rootType, (VkPipelineColorBlendAttachmentState*)(forUnmarshaling->pAttachments + i));
+        }
     }
     vkStream->read((float*)forUnmarshaling->blendConstants, 4 * sizeof(float));
 }
 
 void marshal_VkPipelineDynamicStateCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPipelineDynamicStateCreateInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkPipelineDynamicStateCreateFlags*)&forMarshaling->flags, sizeof(VkPipelineDynamicStateCreateFlags));
     vkStream->write((uint32_t*)&forMarshaling->dynamicStateCount, sizeof(uint32_t));
     vkStream->write((const VkDynamicState*)forMarshaling->pDynamicStates, forMarshaling->dynamicStateCount * sizeof(const VkDynamicState));
@@ -2574,17 +3075,16 @@
 
 void unmarshal_VkPipelineDynamicStateCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPipelineDynamicStateCreateInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkPipelineDynamicStateCreateFlags*)&forUnmarshaling->flags, sizeof(VkPipelineDynamicStateCreateFlags));
     vkStream->read((uint32_t*)&forUnmarshaling->dynamicStateCount, sizeof(uint32_t));
     vkStream->read((VkDynamicState*)forUnmarshaling->pDynamicStates, forUnmarshaling->dynamicStateCount * sizeof(const VkDynamicState));
@@ -2592,140 +3092,145 @@
 
 void marshal_VkGraphicsPipelineCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkGraphicsPipelineCreateInfo* forMarshaling)
 {
+    (void)rootType;
     uint32_t hasRasterization = 1;
     if (vkStream->getFeatureBits() & VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT)
     {
         hasRasterization = (((0 == forMarshaling->pRasterizationState)) ? (0) : (!((*(forMarshaling->pRasterizationState)).rasterizerDiscardEnable)));
-        uint32_t cgen_var_70 = (uint32_t)hasRasterization;
-        vkStream->putBe32(cgen_var_70);
+        uint32_t cgen_var_0 = (uint32_t)hasRasterization;
+        vkStream->putBe32(cgen_var_0);
     }
     uint32_t hasTessellation = 1;
     if (vkStream->getFeatureBits() & VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT)
     {
         hasTessellation = arrayany(forMarshaling->pStages, 0, forMarshaling->stageCount, [](VkPipelineShaderStageCreateInfo s) { return ((s.stage == VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) || (s.stage == VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)); });
-        uint32_t cgen_var_71 = (uint32_t)hasTessellation;
-        vkStream->putBe32(cgen_var_71);
+        uint32_t cgen_var_0 = (uint32_t)hasTessellation;
+        vkStream->putBe32(cgen_var_0);
     }
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkPipelineCreateFlags*)&forMarshaling->flags, sizeof(VkPipelineCreateFlags));
     vkStream->write((uint32_t*)&forMarshaling->stageCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forMarshaling->stageCount; ++i)
+    if (forMarshaling)
     {
-        marshal_VkPipelineShaderStageCreateInfo(vkStream, (const VkPipelineShaderStageCreateInfo*)(forMarshaling->pStages + i));
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->stageCount; ++i)
+        {
+            marshal_VkPipelineShaderStageCreateInfo(vkStream, rootType, (const VkPipelineShaderStageCreateInfo*)(forMarshaling->pStages + i));
+        }
     }
     // WARNING PTR CHECK
     if (vkStream->getFeatureBits() & VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT)
     {
-        uint64_t cgen_var_72 = (uint64_t)(uintptr_t)forMarshaling->pVertexInputState;
-        vkStream->putBe64(cgen_var_72);
+        uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pVertexInputState;
+        vkStream->putBe64(cgen_var_0);
     }
     if ((!(vkStream->getFeatureBits() & VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT) || forMarshaling->pVertexInputState))
     {
-        marshal_VkPipelineVertexInputStateCreateInfo(vkStream, (const VkPipelineVertexInputStateCreateInfo*)(forMarshaling->pVertexInputState));
+        marshal_VkPipelineVertexInputStateCreateInfo(vkStream, rootType, (const VkPipelineVertexInputStateCreateInfo*)(forMarshaling->pVertexInputState));
     }
     // WARNING PTR CHECK
     if (vkStream->getFeatureBits() & VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT)
     {
-        uint64_t cgen_var_73 = (uint64_t)(uintptr_t)forMarshaling->pInputAssemblyState;
-        vkStream->putBe64(cgen_var_73);
+        uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pInputAssemblyState;
+        vkStream->putBe64(cgen_var_0);
     }
     if ((!(vkStream->getFeatureBits() & VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT) || forMarshaling->pInputAssemblyState))
     {
-        marshal_VkPipelineInputAssemblyStateCreateInfo(vkStream, (const VkPipelineInputAssemblyStateCreateInfo*)(forMarshaling->pInputAssemblyState));
+        marshal_VkPipelineInputAssemblyStateCreateInfo(vkStream, rootType, (const VkPipelineInputAssemblyStateCreateInfo*)(forMarshaling->pInputAssemblyState));
     }
     // WARNING PTR CHECK
-    uint64_t cgen_var_74 = (uint64_t)(uintptr_t)forMarshaling->pTessellationState;
-    vkStream->putBe64(cgen_var_74);
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pTessellationState;
+    vkStream->putBe64(cgen_var_0);
     if (forMarshaling->pTessellationState)
     {
         if (hasTessellation)
         {
-            marshal_VkPipelineTessellationStateCreateInfo(vkStream, (const VkPipelineTessellationStateCreateInfo*)(forMarshaling->pTessellationState));
+            marshal_VkPipelineTessellationStateCreateInfo(vkStream, rootType, (const VkPipelineTessellationStateCreateInfo*)(forMarshaling->pTessellationState));
         }
     }
     // WARNING PTR CHECK
-    uint64_t cgen_var_75 = (uint64_t)(uintptr_t)forMarshaling->pViewportState;
-    vkStream->putBe64(cgen_var_75);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)forMarshaling->pViewportState;
+    vkStream->putBe64(cgen_var_1);
     if (forMarshaling->pViewportState)
     {
         if (hasRasterization)
         {
-            marshal_VkPipelineViewportStateCreateInfo(vkStream, (const VkPipelineViewportStateCreateInfo*)(forMarshaling->pViewportState));
+            marshal_VkPipelineViewportStateCreateInfo(vkStream, rootType, (const VkPipelineViewportStateCreateInfo*)(forMarshaling->pViewportState));
         }
     }
     // WARNING PTR CHECK
     if (vkStream->getFeatureBits() & VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT)
     {
-        uint64_t cgen_var_76 = (uint64_t)(uintptr_t)forMarshaling->pRasterizationState;
-        vkStream->putBe64(cgen_var_76);
+        uint64_t cgen_var_1_0 = (uint64_t)(uintptr_t)forMarshaling->pRasterizationState;
+        vkStream->putBe64(cgen_var_1_0);
     }
     if ((!(vkStream->getFeatureBits() & VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT) || forMarshaling->pRasterizationState))
     {
-        marshal_VkPipelineRasterizationStateCreateInfo(vkStream, (const VkPipelineRasterizationStateCreateInfo*)(forMarshaling->pRasterizationState));
+        marshal_VkPipelineRasterizationStateCreateInfo(vkStream, rootType, (const VkPipelineRasterizationStateCreateInfo*)(forMarshaling->pRasterizationState));
     }
     // WARNING PTR CHECK
-    uint64_t cgen_var_77 = (uint64_t)(uintptr_t)forMarshaling->pMultisampleState;
-    vkStream->putBe64(cgen_var_77);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)forMarshaling->pMultisampleState;
+    vkStream->putBe64(cgen_var_2);
     if (forMarshaling->pMultisampleState)
     {
         if (hasRasterization)
         {
-            marshal_VkPipelineMultisampleStateCreateInfo(vkStream, (const VkPipelineMultisampleStateCreateInfo*)(forMarshaling->pMultisampleState));
+            marshal_VkPipelineMultisampleStateCreateInfo(vkStream, rootType, (const VkPipelineMultisampleStateCreateInfo*)(forMarshaling->pMultisampleState));
         }
     }
     // WARNING PTR CHECK
-    uint64_t cgen_var_78 = (uint64_t)(uintptr_t)forMarshaling->pDepthStencilState;
-    vkStream->putBe64(cgen_var_78);
+    uint64_t cgen_var_3 = (uint64_t)(uintptr_t)forMarshaling->pDepthStencilState;
+    vkStream->putBe64(cgen_var_3);
     if (forMarshaling->pDepthStencilState)
     {
         if (hasRasterization)
         {
-            marshal_VkPipelineDepthStencilStateCreateInfo(vkStream, (const VkPipelineDepthStencilStateCreateInfo*)(forMarshaling->pDepthStencilState));
+            marshal_VkPipelineDepthStencilStateCreateInfo(vkStream, rootType, (const VkPipelineDepthStencilStateCreateInfo*)(forMarshaling->pDepthStencilState));
         }
     }
     // WARNING PTR CHECK
-    uint64_t cgen_var_79 = (uint64_t)(uintptr_t)forMarshaling->pColorBlendState;
-    vkStream->putBe64(cgen_var_79);
+    uint64_t cgen_var_4 = (uint64_t)(uintptr_t)forMarshaling->pColorBlendState;
+    vkStream->putBe64(cgen_var_4);
     if (forMarshaling->pColorBlendState)
     {
         if (hasRasterization)
         {
-            marshal_VkPipelineColorBlendStateCreateInfo(vkStream, (const VkPipelineColorBlendStateCreateInfo*)(forMarshaling->pColorBlendState));
+            marshal_VkPipelineColorBlendStateCreateInfo(vkStream, rootType, (const VkPipelineColorBlendStateCreateInfo*)(forMarshaling->pColorBlendState));
         }
     }
     // WARNING PTR CHECK
-    uint64_t cgen_var_80 = (uint64_t)(uintptr_t)forMarshaling->pDynamicState;
-    vkStream->putBe64(cgen_var_80);
+    uint64_t cgen_var_5 = (uint64_t)(uintptr_t)forMarshaling->pDynamicState;
+    vkStream->putBe64(cgen_var_5);
     if (forMarshaling->pDynamicState)
     {
-        marshal_VkPipelineDynamicStateCreateInfo(vkStream, (const VkPipelineDynamicStateCreateInfo*)(forMarshaling->pDynamicState));
+        marshal_VkPipelineDynamicStateCreateInfo(vkStream, rootType, (const VkPipelineDynamicStateCreateInfo*)(forMarshaling->pDynamicState));
     }
-    uint64_t cgen_var_81;
-    vkStream->handleMapping()->mapHandles_VkPipelineLayout_u64(&forMarshaling->layout, &cgen_var_81, 1);
-    vkStream->write((uint64_t*)&cgen_var_81, 1 * 8);
-    uint64_t cgen_var_82;
-    vkStream->handleMapping()->mapHandles_VkRenderPass_u64(&forMarshaling->renderPass, &cgen_var_82, 1);
-    vkStream->write((uint64_t*)&cgen_var_82, 1 * 8);
+    uint64_t cgen_var_6;
+    vkStream->handleMapping()->mapHandles_VkPipelineLayout_u64(&forMarshaling->layout, &cgen_var_6, 1);
+    vkStream->write((uint64_t*)&cgen_var_6, 1 * 8);
+    uint64_t cgen_var_7;
+    vkStream->handleMapping()->mapHandles_VkRenderPass_u64(&forMarshaling->renderPass, &cgen_var_7, 1);
+    vkStream->write((uint64_t*)&cgen_var_7, 1 * 8);
     vkStream->write((uint32_t*)&forMarshaling->subpass, sizeof(uint32_t));
-    uint64_t cgen_var_83;
-    vkStream->handleMapping()->mapHandles_VkPipeline_u64(&forMarshaling->basePipelineHandle, &cgen_var_83, 1);
-    vkStream->write((uint64_t*)&cgen_var_83, 1 * 8);
+    uint64_t cgen_var_8;
+    vkStream->handleMapping()->mapHandles_VkPipeline_u64(&forMarshaling->basePipelineHandle, &cgen_var_8, 1);
+    vkStream->write((uint64_t*)&cgen_var_8, 1 * 8);
     vkStream->write((int32_t*)&forMarshaling->basePipelineIndex, sizeof(int32_t));
 }
 
 void unmarshal_VkGraphicsPipelineCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkGraphicsPipelineCreateInfo* forUnmarshaling)
 {
+    (void)rootType;
     uint32_t hasRasterization = 1;
     if (vkStream->getFeatureBits() & VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT)
     {
@@ -2737,19 +3242,19 @@
         hasTessellation = (const uint32_t)vkStream->getBe32();
     }
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkPipelineCreateFlags*)&forUnmarshaling->flags, sizeof(VkPipelineCreateFlags));
     vkStream->read((uint32_t*)&forUnmarshaling->stageCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->stageCount; ++i)
+    if (forUnmarshaling)
     {
-        unmarshal_VkPipelineShaderStageCreateInfo(vkStream, (VkPipelineShaderStageCreateInfo*)(forUnmarshaling->pStages + i));
+        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->stageCount; ++i)
+        {
+            unmarshal_VkPipelineShaderStageCreateInfo(vkStream, rootType, (VkPipelineShaderStageCreateInfo*)(forUnmarshaling->pStages + i));
+        }
     }
     // WARNING PTR CHECK
     const VkPipelineVertexInputStateCreateInfo* check_pVertexInputState;
@@ -2759,7 +3264,7 @@
     }
     if ((!(vkStream->getFeatureBits() & VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT) || forUnmarshaling->pVertexInputState))
     {
-        unmarshal_VkPipelineVertexInputStateCreateInfo(vkStream, (VkPipelineVertexInputStateCreateInfo*)(forUnmarshaling->pVertexInputState));
+        unmarshal_VkPipelineVertexInputStateCreateInfo(vkStream, rootType, (VkPipelineVertexInputStateCreateInfo*)(forUnmarshaling->pVertexInputState));
     }
     // WARNING PTR CHECK
     const VkPipelineInputAssemblyStateCreateInfo* check_pInputAssemblyState;
@@ -2769,7 +3274,7 @@
     }
     if ((!(vkStream->getFeatureBits() & VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT) || forUnmarshaling->pInputAssemblyState))
     {
-        unmarshal_VkPipelineInputAssemblyStateCreateInfo(vkStream, (VkPipelineInputAssemblyStateCreateInfo*)(forUnmarshaling->pInputAssemblyState));
+        unmarshal_VkPipelineInputAssemblyStateCreateInfo(vkStream, rootType, (VkPipelineInputAssemblyStateCreateInfo*)(forUnmarshaling->pInputAssemblyState));
     }
     // WARNING PTR CHECK
     const VkPipelineTessellationStateCreateInfo* check_pTessellationState;
@@ -2782,7 +3287,7 @@
         }
         if (hasTessellation)
         {
-            unmarshal_VkPipelineTessellationStateCreateInfo(vkStream, (VkPipelineTessellationStateCreateInfo*)(forUnmarshaling->pTessellationState));
+            unmarshal_VkPipelineTessellationStateCreateInfo(vkStream, rootType, (VkPipelineTessellationStateCreateInfo*)(forUnmarshaling->pTessellationState));
         }
         else
         {
@@ -2800,7 +3305,7 @@
         }
         if (hasRasterization)
         {
-            unmarshal_VkPipelineViewportStateCreateInfo(vkStream, (VkPipelineViewportStateCreateInfo*)(forUnmarshaling->pViewportState));
+            unmarshal_VkPipelineViewportStateCreateInfo(vkStream, rootType, (VkPipelineViewportStateCreateInfo*)(forUnmarshaling->pViewportState));
         }
         else
         {
@@ -2815,7 +3320,7 @@
     }
     if ((!(vkStream->getFeatureBits() & VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT) || forUnmarshaling->pRasterizationState))
     {
-        unmarshal_VkPipelineRasterizationStateCreateInfo(vkStream, (VkPipelineRasterizationStateCreateInfo*)(forUnmarshaling->pRasterizationState));
+        unmarshal_VkPipelineRasterizationStateCreateInfo(vkStream, rootType, (VkPipelineRasterizationStateCreateInfo*)(forUnmarshaling->pRasterizationState));
     }
     // WARNING PTR CHECK
     const VkPipelineMultisampleStateCreateInfo* check_pMultisampleState;
@@ -2828,7 +3333,7 @@
         }
         if (hasRasterization)
         {
-            unmarshal_VkPipelineMultisampleStateCreateInfo(vkStream, (VkPipelineMultisampleStateCreateInfo*)(forUnmarshaling->pMultisampleState));
+            unmarshal_VkPipelineMultisampleStateCreateInfo(vkStream, rootType, (VkPipelineMultisampleStateCreateInfo*)(forUnmarshaling->pMultisampleState));
         }
         else
         {
@@ -2846,7 +3351,7 @@
         }
         if (hasRasterization)
         {
-            unmarshal_VkPipelineDepthStencilStateCreateInfo(vkStream, (VkPipelineDepthStencilStateCreateInfo*)(forUnmarshaling->pDepthStencilState));
+            unmarshal_VkPipelineDepthStencilStateCreateInfo(vkStream, rootType, (VkPipelineDepthStencilStateCreateInfo*)(forUnmarshaling->pDepthStencilState));
         }
         else
         {
@@ -2864,7 +3369,7 @@
         }
         if (hasRasterization)
         {
-            unmarshal_VkPipelineColorBlendStateCreateInfo(vkStream, (VkPipelineColorBlendStateCreateInfo*)(forUnmarshaling->pColorBlendState));
+            unmarshal_VkPipelineColorBlendStateCreateInfo(vkStream, rootType, (VkPipelineColorBlendStateCreateInfo*)(forUnmarshaling->pColorBlendState));
         }
         else
         {
@@ -2880,72 +3385,27 @@
         {
             fprintf(stderr, "fatal: forUnmarshaling->pDynamicState inconsistent between guest and host\n");
         }
-        unmarshal_VkPipelineDynamicStateCreateInfo(vkStream, (VkPipelineDynamicStateCreateInfo*)(forUnmarshaling->pDynamicState));
+        unmarshal_VkPipelineDynamicStateCreateInfo(vkStream, rootType, (VkPipelineDynamicStateCreateInfo*)(forUnmarshaling->pDynamicState));
     }
-    uint64_t cgen_var_95;
-    vkStream->read((uint64_t*)&cgen_var_95, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkPipelineLayout(&cgen_var_95, (VkPipelineLayout*)&forUnmarshaling->layout, 1);
-    uint64_t cgen_var_96;
-    vkStream->read((uint64_t*)&cgen_var_96, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkRenderPass(&cgen_var_96, (VkRenderPass*)&forUnmarshaling->renderPass, 1);
+    uint64_t cgen_var_6;
+    vkStream->read((uint64_t*)&cgen_var_6, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkPipelineLayout(&cgen_var_6, (VkPipelineLayout*)&forUnmarshaling->layout, 1);
+    uint64_t cgen_var_7;
+    vkStream->read((uint64_t*)&cgen_var_7, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkRenderPass(&cgen_var_7, (VkRenderPass*)&forUnmarshaling->renderPass, 1);
     vkStream->read((uint32_t*)&forUnmarshaling->subpass, sizeof(uint32_t));
-    uint64_t cgen_var_97;
-    vkStream->read((uint64_t*)&cgen_var_97, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkPipeline(&cgen_var_97, (VkPipeline*)&forUnmarshaling->basePipelineHandle, 1);
-    vkStream->read((int32_t*)&forUnmarshaling->basePipelineIndex, sizeof(int32_t));
-}
-
-void marshal_VkComputePipelineCreateInfo(
-    VulkanStreamGuest* vkStream,
-    const VkComputePipelineCreateInfo* forMarshaling)
-{
-    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
-    {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
-    }
-    vkStream->write((VkPipelineCreateFlags*)&forMarshaling->flags, sizeof(VkPipelineCreateFlags));
-    marshal_VkPipelineShaderStageCreateInfo(vkStream, (VkPipelineShaderStageCreateInfo*)(&forMarshaling->stage));
-    uint64_t cgen_var_98;
-    vkStream->handleMapping()->mapHandles_VkPipelineLayout_u64(&forMarshaling->layout, &cgen_var_98, 1);
-    vkStream->write((uint64_t*)&cgen_var_98, 1 * 8);
-    uint64_t cgen_var_99;
-    vkStream->handleMapping()->mapHandles_VkPipeline_u64(&forMarshaling->basePipelineHandle, &cgen_var_99, 1);
-    vkStream->write((uint64_t*)&cgen_var_99, 1 * 8);
-    vkStream->write((int32_t*)&forMarshaling->basePipelineIndex, sizeof(int32_t));
-}
-
-void unmarshal_VkComputePipelineCreateInfo(
-    VulkanStreamGuest* vkStream,
-    VkComputePipelineCreateInfo* forUnmarshaling)
-{
-    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
-    {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
-    }
-    vkStream->read((VkPipelineCreateFlags*)&forUnmarshaling->flags, sizeof(VkPipelineCreateFlags));
-    unmarshal_VkPipelineShaderStageCreateInfo(vkStream, (VkPipelineShaderStageCreateInfo*)(&forUnmarshaling->stage));
-    uint64_t cgen_var_100;
-    vkStream->read((uint64_t*)&cgen_var_100, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkPipelineLayout(&cgen_var_100, (VkPipelineLayout*)&forUnmarshaling->layout, 1);
-    uint64_t cgen_var_101;
-    vkStream->read((uint64_t*)&cgen_var_101, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkPipeline(&cgen_var_101, (VkPipeline*)&forUnmarshaling->basePipelineHandle, 1);
+    uint64_t cgen_var_8;
+    vkStream->read((uint64_t*)&cgen_var_8, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkPipeline(&cgen_var_8, (VkPipeline*)&forUnmarshaling->basePipelineHandle, 1);
     vkStream->read((int32_t*)&forUnmarshaling->basePipelineIndex, sizeof(int32_t));
 }
 
 void marshal_VkPushConstantRange(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPushConstantRange* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkShaderStageFlags*)&forMarshaling->stageFlags, sizeof(VkShaderStageFlags));
     vkStream->write((uint32_t*)&forMarshaling->offset, sizeof(uint32_t));
     vkStream->write((uint32_t*)&forMarshaling->size, sizeof(uint32_t));
@@ -2953,8 +3413,10 @@
 
 void unmarshal_VkPushConstantRange(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPushConstantRange* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkShaderStageFlags*)&forUnmarshaling->stageFlags, sizeof(VkShaderStageFlags));
     vkStream->read((uint32_t*)&forUnmarshaling->offset, sizeof(uint32_t));
     vkStream->read((uint32_t*)&forUnmarshaling->size, sizeof(uint32_t));
@@ -2962,73 +3424,78 @@
 
 void marshal_VkPipelineLayoutCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPipelineLayoutCreateInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkPipelineLayoutCreateFlags*)&forMarshaling->flags, sizeof(VkPipelineLayoutCreateFlags));
     vkStream->write((uint32_t*)&forMarshaling->setLayoutCount, sizeof(uint32_t));
     if (forMarshaling->setLayoutCount)
     {
-        uint64_t* cgen_var_102;
-        vkStream->alloc((void**)&cgen_var_102, forMarshaling->setLayoutCount * 8);
-        vkStream->handleMapping()->mapHandles_VkDescriptorSetLayout_u64(forMarshaling->pSetLayouts, cgen_var_102, forMarshaling->setLayoutCount);
-        vkStream->write((uint64_t*)cgen_var_102, forMarshaling->setLayoutCount * 8);
+        uint64_t* cgen_var_0;
+        vkStream->alloc((void**)&cgen_var_0, forMarshaling->setLayoutCount * 8);
+        vkStream->handleMapping()->mapHandles_VkDescriptorSetLayout_u64(forMarshaling->pSetLayouts, cgen_var_0, forMarshaling->setLayoutCount);
+        vkStream->write((uint64_t*)cgen_var_0, forMarshaling->setLayoutCount * 8);
     }
     vkStream->write((uint32_t*)&forMarshaling->pushConstantRangeCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forMarshaling->pushConstantRangeCount; ++i)
+    if (forMarshaling)
     {
-        marshal_VkPushConstantRange(vkStream, (const VkPushConstantRange*)(forMarshaling->pPushConstantRanges + i));
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->pushConstantRangeCount; ++i)
+        {
+            marshal_VkPushConstantRange(vkStream, rootType, (const VkPushConstantRange*)(forMarshaling->pPushConstantRanges + i));
+        }
     }
 }
 
 void unmarshal_VkPipelineLayoutCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPipelineLayoutCreateInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkPipelineLayoutCreateFlags*)&forUnmarshaling->flags, sizeof(VkPipelineLayoutCreateFlags));
     vkStream->read((uint32_t*)&forUnmarshaling->setLayoutCount, sizeof(uint32_t));
     if (forUnmarshaling->setLayoutCount)
     {
-        uint64_t* cgen_var_103;
-        vkStream->alloc((void**)&cgen_var_103, forUnmarshaling->setLayoutCount * 8);
-        vkStream->read((uint64_t*)cgen_var_103, forUnmarshaling->setLayoutCount * 8);
-        vkStream->handleMapping()->mapHandles_u64_VkDescriptorSetLayout(cgen_var_103, (VkDescriptorSetLayout*)forUnmarshaling->pSetLayouts, forUnmarshaling->setLayoutCount);
+        uint64_t* cgen_var_0;
+        vkStream->alloc((void**)&cgen_var_0, forUnmarshaling->setLayoutCount * 8);
+        vkStream->read((uint64_t*)cgen_var_0, forUnmarshaling->setLayoutCount * 8);
+        vkStream->handleMapping()->mapHandles_u64_VkDescriptorSetLayout(cgen_var_0, (VkDescriptorSetLayout*)forUnmarshaling->pSetLayouts, forUnmarshaling->setLayoutCount);
     }
     vkStream->read((uint32_t*)&forUnmarshaling->pushConstantRangeCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->pushConstantRangeCount; ++i)
+    if (forUnmarshaling)
     {
-        unmarshal_VkPushConstantRange(vkStream, (VkPushConstantRange*)(forUnmarshaling->pPushConstantRanges + i));
+        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->pushConstantRangeCount; ++i)
+        {
+            unmarshal_VkPushConstantRange(vkStream, rootType, (VkPushConstantRange*)(forUnmarshaling->pPushConstantRanges + i));
+        }
     }
 }
 
 void marshal_VkSamplerCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSamplerCreateInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkSamplerCreateFlags*)&forMarshaling->flags, sizeof(VkSamplerCreateFlags));
     vkStream->write((VkFilter*)&forMarshaling->magFilter, sizeof(VkFilter));
     vkStream->write((VkFilter*)&forMarshaling->minFilter, sizeof(VkFilter));
@@ -3049,17 +3516,16 @@
 
 void unmarshal_VkSamplerCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSamplerCreateInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkSamplerCreateFlags*)&forUnmarshaling->flags, sizeof(VkSamplerCreateFlags));
     vkStream->read((VkFilter*)&forUnmarshaling->magFilter, sizeof(VkFilter));
     vkStream->read((VkFilter*)&forUnmarshaling->minFilter, sizeof(VkFilter));
@@ -3078,33 +3544,261 @@
     vkStream->read((VkBool32*)&forUnmarshaling->unnormalizedCoordinates, sizeof(VkBool32));
 }
 
+void marshal_VkCopyDescriptorSet(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCopyDescriptorSet* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkDescriptorSet_u64(&forMarshaling->srcSet, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->write((uint32_t*)&forMarshaling->srcBinding, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->srcArrayElement, sizeof(uint32_t));
+    uint64_t cgen_var_1;
+    vkStream->handleMapping()->mapHandles_VkDescriptorSet_u64(&forMarshaling->dstSet, &cgen_var_1, 1);
+    vkStream->write((uint64_t*)&cgen_var_1, 1 * 8);
+    vkStream->write((uint32_t*)&forMarshaling->dstBinding, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->dstArrayElement, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->descriptorCount, sizeof(uint32_t));
+}
+
+void unmarshal_VkCopyDescriptorSet(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkCopyDescriptorSet* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkDescriptorSet(&cgen_var_0, (VkDescriptorSet*)&forUnmarshaling->srcSet, 1);
+    vkStream->read((uint32_t*)&forUnmarshaling->srcBinding, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->srcArrayElement, sizeof(uint32_t));
+    uint64_t cgen_var_1;
+    vkStream->read((uint64_t*)&cgen_var_1, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkDescriptorSet(&cgen_var_1, (VkDescriptorSet*)&forUnmarshaling->dstSet, 1);
+    vkStream->read((uint32_t*)&forUnmarshaling->dstBinding, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->dstArrayElement, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->descriptorCount, sizeof(uint32_t));
+}
+
+void marshal_VkDescriptorBufferInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDescriptorBufferInfo* forMarshaling)
+{
+    (void)rootType;
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkBuffer_u64(&forMarshaling->buffer, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->write((VkDeviceSize*)&forMarshaling->offset, sizeof(VkDeviceSize));
+    vkStream->write((VkDeviceSize*)&forMarshaling->range, sizeof(VkDeviceSize));
+}
+
+void unmarshal_VkDescriptorBufferInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDescriptorBufferInfo* forUnmarshaling)
+{
+    (void)rootType;
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkBuffer(&cgen_var_0, (VkBuffer*)&forUnmarshaling->buffer, 1);
+    vkStream->read((VkDeviceSize*)&forUnmarshaling->offset, sizeof(VkDeviceSize));
+    vkStream->read((VkDeviceSize*)&forUnmarshaling->range, sizeof(VkDeviceSize));
+}
+
+void marshal_VkDescriptorImageInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDescriptorImageInfo* forMarshaling)
+{
+    (void)rootType;
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkSampler_u64(&forMarshaling->sampler, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
+    uint64_t cgen_var_1;
+    vkStream->handleMapping()->mapHandles_VkImageView_u64(&forMarshaling->imageView, &cgen_var_1, 1);
+    vkStream->write((uint64_t*)&cgen_var_1, 1 * 8);
+    vkStream->write((VkImageLayout*)&forMarshaling->imageLayout, sizeof(VkImageLayout));
+}
+
+void unmarshal_VkDescriptorImageInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDescriptorImageInfo* forUnmarshaling)
+{
+    (void)rootType;
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkSampler(&cgen_var_0, (VkSampler*)&forUnmarshaling->sampler, 1);
+    uint64_t cgen_var_1;
+    vkStream->read((uint64_t*)&cgen_var_1, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkImageView(&cgen_var_1, (VkImageView*)&forUnmarshaling->imageView, 1);
+    vkStream->read((VkImageLayout*)&forUnmarshaling->imageLayout, sizeof(VkImageLayout));
+}
+
+void marshal_VkDescriptorPoolSize(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDescriptorPoolSize* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkDescriptorType*)&forMarshaling->type, sizeof(VkDescriptorType));
+    vkStream->write((uint32_t*)&forMarshaling->descriptorCount, sizeof(uint32_t));
+}
+
+void unmarshal_VkDescriptorPoolSize(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDescriptorPoolSize* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkDescriptorType*)&forUnmarshaling->type, sizeof(VkDescriptorType));
+    vkStream->read((uint32_t*)&forUnmarshaling->descriptorCount, sizeof(uint32_t));
+}
+
+void marshal_VkDescriptorPoolCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDescriptorPoolCreateInfo* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkDescriptorPoolCreateFlags*)&forMarshaling->flags, sizeof(VkDescriptorPoolCreateFlags));
+    vkStream->write((uint32_t*)&forMarshaling->maxSets, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->poolSizeCount, sizeof(uint32_t));
+    if (forMarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->poolSizeCount; ++i)
+        {
+            marshal_VkDescriptorPoolSize(vkStream, rootType, (const VkDescriptorPoolSize*)(forMarshaling->pPoolSizes + i));
+        }
+    }
+}
+
+void unmarshal_VkDescriptorPoolCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDescriptorPoolCreateInfo* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkDescriptorPoolCreateFlags*)&forUnmarshaling->flags, sizeof(VkDescriptorPoolCreateFlags));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxSets, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->poolSizeCount, sizeof(uint32_t));
+    if (forUnmarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->poolSizeCount; ++i)
+        {
+            unmarshal_VkDescriptorPoolSize(vkStream, rootType, (VkDescriptorPoolSize*)(forUnmarshaling->pPoolSizes + i));
+        }
+    }
+}
+
+void marshal_VkDescriptorSetAllocateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDescriptorSetAllocateInfo* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkDescriptorPool_u64(&forMarshaling->descriptorPool, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->write((uint32_t*)&forMarshaling->descriptorSetCount, sizeof(uint32_t));
+    if (forMarshaling->descriptorSetCount)
+    {
+        uint64_t* cgen_var_1;
+        vkStream->alloc((void**)&cgen_var_1, forMarshaling->descriptorSetCount * 8);
+        vkStream->handleMapping()->mapHandles_VkDescriptorSetLayout_u64(forMarshaling->pSetLayouts, cgen_var_1, forMarshaling->descriptorSetCount);
+        vkStream->write((uint64_t*)cgen_var_1, forMarshaling->descriptorSetCount * 8);
+    }
+}
+
+void unmarshal_VkDescriptorSetAllocateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDescriptorSetAllocateInfo* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkDescriptorPool(&cgen_var_0, (VkDescriptorPool*)&forUnmarshaling->descriptorPool, 1);
+    vkStream->read((uint32_t*)&forUnmarshaling->descriptorSetCount, sizeof(uint32_t));
+    if (forUnmarshaling->descriptorSetCount)
+    {
+        uint64_t* cgen_var_1;
+        vkStream->alloc((void**)&cgen_var_1, forUnmarshaling->descriptorSetCount * 8);
+        vkStream->read((uint64_t*)cgen_var_1, forUnmarshaling->descriptorSetCount * 8);
+        vkStream->handleMapping()->mapHandles_u64_VkDescriptorSetLayout(cgen_var_1, (VkDescriptorSetLayout*)forUnmarshaling->pSetLayouts, forUnmarshaling->descriptorSetCount);
+    }
+}
+
 void marshal_VkDescriptorSetLayoutBinding(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDescriptorSetLayoutBinding* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((uint32_t*)&forMarshaling->binding, sizeof(uint32_t));
     vkStream->write((VkDescriptorType*)&forMarshaling->descriptorType, sizeof(VkDescriptorType));
     vkStream->write((uint32_t*)&forMarshaling->descriptorCount, sizeof(uint32_t));
     vkStream->write((VkShaderStageFlags*)&forMarshaling->stageFlags, sizeof(VkShaderStageFlags));
     // WARNING PTR CHECK
-    uint64_t cgen_var_104 = (uint64_t)(uintptr_t)forMarshaling->pImmutableSamplers;
-    vkStream->putBe64(cgen_var_104);
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pImmutableSamplers;
+    vkStream->putBe64(cgen_var_0);
     if (forMarshaling->pImmutableSamplers)
     {
         if (forMarshaling->descriptorCount)
         {
-            uint64_t* cgen_var_105;
-            vkStream->alloc((void**)&cgen_var_105, forMarshaling->descriptorCount * 8);
-            vkStream->handleMapping()->mapHandles_VkSampler_u64(forMarshaling->pImmutableSamplers, cgen_var_105, forMarshaling->descriptorCount);
-            vkStream->write((uint64_t*)cgen_var_105, forMarshaling->descriptorCount * 8);
+            uint64_t* cgen_var_0_0;
+            vkStream->alloc((void**)&cgen_var_0_0, forMarshaling->descriptorCount * 8);
+            vkStream->handleMapping()->mapHandles_VkSampler_u64(forMarshaling->pImmutableSamplers, cgen_var_0_0, forMarshaling->descriptorCount);
+            vkStream->write((uint64_t*)cgen_var_0_0, forMarshaling->descriptorCount * 8);
         }
     }
 }
 
 void unmarshal_VkDescriptorSetLayoutBinding(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDescriptorSetLayoutBinding* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((uint32_t*)&forUnmarshaling->binding, sizeof(uint32_t));
     vkStream->read((VkDescriptorType*)&forUnmarshaling->descriptorType, sizeof(VkDescriptorType));
     vkStream->read((uint32_t*)&forUnmarshaling->descriptorCount, sizeof(uint32_t));
@@ -3120,271 +3814,124 @@
         }
         if (forUnmarshaling->descriptorCount)
         {
-            uint64_t* cgen_var_107;
-            vkStream->alloc((void**)&cgen_var_107, forUnmarshaling->descriptorCount * 8);
-            vkStream->read((uint64_t*)cgen_var_107, forUnmarshaling->descriptorCount * 8);
-            vkStream->handleMapping()->mapHandles_u64_VkSampler(cgen_var_107, (VkSampler*)forUnmarshaling->pImmutableSamplers, forUnmarshaling->descriptorCount);
+            uint64_t* cgen_var_0_0;
+            vkStream->alloc((void**)&cgen_var_0_0, forUnmarshaling->descriptorCount * 8);
+            vkStream->read((uint64_t*)cgen_var_0_0, forUnmarshaling->descriptorCount * 8);
+            vkStream->handleMapping()->mapHandles_u64_VkSampler(cgen_var_0_0, (VkSampler*)forUnmarshaling->pImmutableSamplers, forUnmarshaling->descriptorCount);
         }
     }
 }
 
 void marshal_VkDescriptorSetLayoutCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDescriptorSetLayoutCreateInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkDescriptorSetLayoutCreateFlags*)&forMarshaling->flags, sizeof(VkDescriptorSetLayoutCreateFlags));
     vkStream->write((uint32_t*)&forMarshaling->bindingCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forMarshaling->bindingCount; ++i)
+    if (forMarshaling)
     {
-        marshal_VkDescriptorSetLayoutBinding(vkStream, (const VkDescriptorSetLayoutBinding*)(forMarshaling->pBindings + i));
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->bindingCount; ++i)
+        {
+            marshal_VkDescriptorSetLayoutBinding(vkStream, rootType, (const VkDescriptorSetLayoutBinding*)(forMarshaling->pBindings + i));
+        }
     }
 }
 
 void unmarshal_VkDescriptorSetLayoutCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDescriptorSetLayoutCreateInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkDescriptorSetLayoutCreateFlags*)&forUnmarshaling->flags, sizeof(VkDescriptorSetLayoutCreateFlags));
     vkStream->read((uint32_t*)&forUnmarshaling->bindingCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->bindingCount; ++i)
+    if (forUnmarshaling)
     {
-        unmarshal_VkDescriptorSetLayoutBinding(vkStream, (VkDescriptorSetLayoutBinding*)(forUnmarshaling->pBindings + i));
+        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->bindingCount; ++i)
+        {
+            unmarshal_VkDescriptorSetLayoutBinding(vkStream, rootType, (VkDescriptorSetLayoutBinding*)(forUnmarshaling->pBindings + i));
+        }
     }
 }
 
-void marshal_VkDescriptorPoolSize(
-    VulkanStreamGuest* vkStream,
-    const VkDescriptorPoolSize* forMarshaling)
-{
-    vkStream->write((VkDescriptorType*)&forMarshaling->type, sizeof(VkDescriptorType));
-    vkStream->write((uint32_t*)&forMarshaling->descriptorCount, sizeof(uint32_t));
-}
-
-void unmarshal_VkDescriptorPoolSize(
-    VulkanStreamGuest* vkStream,
-    VkDescriptorPoolSize* forUnmarshaling)
-{
-    vkStream->read((VkDescriptorType*)&forUnmarshaling->type, sizeof(VkDescriptorType));
-    vkStream->read((uint32_t*)&forUnmarshaling->descriptorCount, sizeof(uint32_t));
-}
-
-void marshal_VkDescriptorPoolCreateInfo(
-    VulkanStreamGuest* vkStream,
-    const VkDescriptorPoolCreateInfo* forMarshaling)
-{
-    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
-    {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
-    }
-    vkStream->write((VkDescriptorPoolCreateFlags*)&forMarshaling->flags, sizeof(VkDescriptorPoolCreateFlags));
-    vkStream->write((uint32_t*)&forMarshaling->maxSets, sizeof(uint32_t));
-    vkStream->write((uint32_t*)&forMarshaling->poolSizeCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forMarshaling->poolSizeCount; ++i)
-    {
-        marshal_VkDescriptorPoolSize(vkStream, (const VkDescriptorPoolSize*)(forMarshaling->pPoolSizes + i));
-    }
-}
-
-void unmarshal_VkDescriptorPoolCreateInfo(
-    VulkanStreamGuest* vkStream,
-    VkDescriptorPoolCreateInfo* forUnmarshaling)
-{
-    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
-    {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
-    }
-    vkStream->read((VkDescriptorPoolCreateFlags*)&forUnmarshaling->flags, sizeof(VkDescriptorPoolCreateFlags));
-    vkStream->read((uint32_t*)&forUnmarshaling->maxSets, sizeof(uint32_t));
-    vkStream->read((uint32_t*)&forUnmarshaling->poolSizeCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->poolSizeCount; ++i)
-    {
-        unmarshal_VkDescriptorPoolSize(vkStream, (VkDescriptorPoolSize*)(forUnmarshaling->pPoolSizes + i));
-    }
-}
-
-void marshal_VkDescriptorSetAllocateInfo(
-    VulkanStreamGuest* vkStream,
-    const VkDescriptorSetAllocateInfo* forMarshaling)
-{
-    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
-    {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
-    }
-    uint64_t cgen_var_108;
-    vkStream->handleMapping()->mapHandles_VkDescriptorPool_u64(&forMarshaling->descriptorPool, &cgen_var_108, 1);
-    vkStream->write((uint64_t*)&cgen_var_108, 1 * 8);
-    vkStream->write((uint32_t*)&forMarshaling->descriptorSetCount, sizeof(uint32_t));
-    if (forMarshaling->descriptorSetCount)
-    {
-        uint64_t* cgen_var_109;
-        vkStream->alloc((void**)&cgen_var_109, forMarshaling->descriptorSetCount * 8);
-        vkStream->handleMapping()->mapHandles_VkDescriptorSetLayout_u64(forMarshaling->pSetLayouts, cgen_var_109, forMarshaling->descriptorSetCount);
-        vkStream->write((uint64_t*)cgen_var_109, forMarshaling->descriptorSetCount * 8);
-    }
-}
-
-void unmarshal_VkDescriptorSetAllocateInfo(
-    VulkanStreamGuest* vkStream,
-    VkDescriptorSetAllocateInfo* forUnmarshaling)
-{
-    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
-    {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
-    }
-    uint64_t cgen_var_110;
-    vkStream->read((uint64_t*)&cgen_var_110, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkDescriptorPool(&cgen_var_110, (VkDescriptorPool*)&forUnmarshaling->descriptorPool, 1);
-    vkStream->read((uint32_t*)&forUnmarshaling->descriptorSetCount, sizeof(uint32_t));
-    if (forUnmarshaling->descriptorSetCount)
-    {
-        uint64_t* cgen_var_111;
-        vkStream->alloc((void**)&cgen_var_111, forUnmarshaling->descriptorSetCount * 8);
-        vkStream->read((uint64_t*)cgen_var_111, forUnmarshaling->descriptorSetCount * 8);
-        vkStream->handleMapping()->mapHandles_u64_VkDescriptorSetLayout(cgen_var_111, (VkDescriptorSetLayout*)forUnmarshaling->pSetLayouts, forUnmarshaling->descriptorSetCount);
-    }
-}
-
-void marshal_VkDescriptorImageInfo(
-    VulkanStreamGuest* vkStream,
-    const VkDescriptorImageInfo* forMarshaling)
-{
-    uint64_t cgen_var_112;
-    vkStream->handleMapping()->mapHandles_VkSampler_u64(&forMarshaling->sampler, &cgen_var_112, 1);
-    vkStream->write((uint64_t*)&cgen_var_112, 1 * 8);
-    uint64_t cgen_var_113;
-    vkStream->handleMapping()->mapHandles_VkImageView_u64(&forMarshaling->imageView, &cgen_var_113, 1);
-    vkStream->write((uint64_t*)&cgen_var_113, 1 * 8);
-    vkStream->write((VkImageLayout*)&forMarshaling->imageLayout, sizeof(VkImageLayout));
-}
-
-void unmarshal_VkDescriptorImageInfo(
-    VulkanStreamGuest* vkStream,
-    VkDescriptorImageInfo* forUnmarshaling)
-{
-    uint64_t cgen_var_114;
-    vkStream->read((uint64_t*)&cgen_var_114, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkSampler(&cgen_var_114, (VkSampler*)&forUnmarshaling->sampler, 1);
-    uint64_t cgen_var_115;
-    vkStream->read((uint64_t*)&cgen_var_115, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkImageView(&cgen_var_115, (VkImageView*)&forUnmarshaling->imageView, 1);
-    vkStream->read((VkImageLayout*)&forUnmarshaling->imageLayout, sizeof(VkImageLayout));
-}
-
-void marshal_VkDescriptorBufferInfo(
-    VulkanStreamGuest* vkStream,
-    const VkDescriptorBufferInfo* forMarshaling)
-{
-    uint64_t cgen_var_116;
-    vkStream->handleMapping()->mapHandles_VkBuffer_u64(&forMarshaling->buffer, &cgen_var_116, 1);
-    vkStream->write((uint64_t*)&cgen_var_116, 1 * 8);
-    vkStream->write((VkDeviceSize*)&forMarshaling->offset, sizeof(VkDeviceSize));
-    vkStream->write((VkDeviceSize*)&forMarshaling->range, sizeof(VkDeviceSize));
-}
-
-void unmarshal_VkDescriptorBufferInfo(
-    VulkanStreamGuest* vkStream,
-    VkDescriptorBufferInfo* forUnmarshaling)
-{
-    uint64_t cgen_var_117;
-    vkStream->read((uint64_t*)&cgen_var_117, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkBuffer(&cgen_var_117, (VkBuffer*)&forUnmarshaling->buffer, 1);
-    vkStream->read((VkDeviceSize*)&forUnmarshaling->offset, sizeof(VkDeviceSize));
-    vkStream->read((VkDeviceSize*)&forUnmarshaling->range, sizeof(VkDeviceSize));
-}
-
 void marshal_VkWriteDescriptorSet(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkWriteDescriptorSet* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    uint64_t cgen_var_118;
-    vkStream->handleMapping()->mapHandles_VkDescriptorSet_u64(&forMarshaling->dstSet, &cgen_var_118, 1);
-    vkStream->write((uint64_t*)&cgen_var_118, 1 * 8);
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkDescriptorSet_u64(&forMarshaling->dstSet, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
     vkStream->write((uint32_t*)&forMarshaling->dstBinding, sizeof(uint32_t));
     vkStream->write((uint32_t*)&forMarshaling->dstArrayElement, sizeof(uint32_t));
     vkStream->write((uint32_t*)&forMarshaling->descriptorCount, sizeof(uint32_t));
     vkStream->write((VkDescriptorType*)&forMarshaling->descriptorType, sizeof(VkDescriptorType));
     // WARNING PTR CHECK
-    uint64_t cgen_var_119 = (uint64_t)(uintptr_t)forMarshaling->pImageInfo;
-    vkStream->putBe64(cgen_var_119);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)forMarshaling->pImageInfo;
+    vkStream->putBe64(cgen_var_1);
     if (forMarshaling->pImageInfo)
     {
         if ((!(vkStream->getFeatureBits() & VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT) || ((VK_DESCRIPTOR_TYPE_SAMPLER == forMarshaling->descriptorType) || (VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER == forMarshaling->descriptorType) || (VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE == forMarshaling->descriptorType) || (VK_DESCRIPTOR_TYPE_STORAGE_IMAGE == forMarshaling->descriptorType) || (VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT == forMarshaling->descriptorType))))
         {
-            for (uint32_t i = 0; i < (uint32_t)forMarshaling->descriptorCount; ++i)
+            if (forMarshaling)
             {
-                marshal_VkDescriptorImageInfo(vkStream, (const VkDescriptorImageInfo*)(forMarshaling->pImageInfo + i));
+                for (uint32_t i = 0; i < (uint32_t)forMarshaling->descriptorCount; ++i)
+                {
+                    marshal_VkDescriptorImageInfo(vkStream, rootType, (const VkDescriptorImageInfo*)(forMarshaling->pImageInfo + i));
+                }
             }
         }
     }
     // WARNING PTR CHECK
-    uint64_t cgen_var_120 = (uint64_t)(uintptr_t)forMarshaling->pBufferInfo;
-    vkStream->putBe64(cgen_var_120);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)forMarshaling->pBufferInfo;
+    vkStream->putBe64(cgen_var_2);
     if (forMarshaling->pBufferInfo)
     {
         if ((!(vkStream->getFeatureBits() & VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT) || ((VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER == forMarshaling->descriptorType) || (VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC == forMarshaling->descriptorType) || (VK_DESCRIPTOR_TYPE_STORAGE_BUFFER == forMarshaling->descriptorType) || (VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC == forMarshaling->descriptorType))))
         {
-            for (uint32_t i = 0; i < (uint32_t)forMarshaling->descriptorCount; ++i)
+            if (forMarshaling)
             {
-                marshal_VkDescriptorBufferInfo(vkStream, (const VkDescriptorBufferInfo*)(forMarshaling->pBufferInfo + i));
+                for (uint32_t i = 0; i < (uint32_t)forMarshaling->descriptorCount; ++i)
+                {
+                    marshal_VkDescriptorBufferInfo(vkStream, rootType, (const VkDescriptorBufferInfo*)(forMarshaling->pBufferInfo + i));
+                }
             }
         }
     }
     // WARNING PTR CHECK
-    uint64_t cgen_var_121 = (uint64_t)(uintptr_t)forMarshaling->pTexelBufferView;
-    vkStream->putBe64(cgen_var_121);
+    uint64_t cgen_var_3 = (uint64_t)(uintptr_t)forMarshaling->pTexelBufferView;
+    vkStream->putBe64(cgen_var_3);
     if (forMarshaling->pTexelBufferView)
     {
         if ((!(vkStream->getFeatureBits() & VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT) || ((VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER == forMarshaling->descriptorType) || (VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER == forMarshaling->descriptorType))))
         {
             if (forMarshaling->descriptorCount)
             {
-                uint64_t* cgen_var_122;
-                vkStream->alloc((void**)&cgen_var_122, forMarshaling->descriptorCount * 8);
-                vkStream->handleMapping()->mapHandles_VkBufferView_u64(forMarshaling->pTexelBufferView, cgen_var_122, forMarshaling->descriptorCount);
-                vkStream->write((uint64_t*)cgen_var_122, forMarshaling->descriptorCount * 8);
+                uint64_t* cgen_var_3_0;
+                vkStream->alloc((void**)&cgen_var_3_0, forMarshaling->descriptorCount * 8);
+                vkStream->handleMapping()->mapHandles_VkBufferView_u64(forMarshaling->pTexelBufferView, cgen_var_3_0, forMarshaling->descriptorCount);
+                vkStream->write((uint64_t*)cgen_var_3_0, forMarshaling->descriptorCount * 8);
             }
         }
     }
@@ -3392,20 +3939,19 @@
 
 void unmarshal_VkWriteDescriptorSet(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkWriteDescriptorSet* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    uint64_t cgen_var_123;
-    vkStream->read((uint64_t*)&cgen_var_123, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkDescriptorSet(&cgen_var_123, (VkDescriptorSet*)&forUnmarshaling->dstSet, 1);
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkDescriptorSet(&cgen_var_0, (VkDescriptorSet*)&forUnmarshaling->dstSet, 1);
     vkStream->read((uint32_t*)&forUnmarshaling->dstBinding, sizeof(uint32_t));
     vkStream->read((uint32_t*)&forUnmarshaling->dstArrayElement, sizeof(uint32_t));
     vkStream->read((uint32_t*)&forUnmarshaling->descriptorCount, sizeof(uint32_t));
@@ -3421,9 +3967,12 @@
         }
         if ((!(vkStream->getFeatureBits() & VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT) || ((VK_DESCRIPTOR_TYPE_SAMPLER == forUnmarshaling->descriptorType) || (VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER == forUnmarshaling->descriptorType) || (VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE == forUnmarshaling->descriptorType) || (VK_DESCRIPTOR_TYPE_STORAGE_IMAGE == forUnmarshaling->descriptorType) || (VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT == forUnmarshaling->descriptorType))))
         {
-            for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->descriptorCount; ++i)
+            if (forUnmarshaling)
             {
-                unmarshal_VkDescriptorImageInfo(vkStream, (VkDescriptorImageInfo*)(forUnmarshaling->pImageInfo + i));
+                for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->descriptorCount; ++i)
+                {
+                    unmarshal_VkDescriptorImageInfo(vkStream, rootType, (VkDescriptorImageInfo*)(forUnmarshaling->pImageInfo + i));
+                }
             }
         }
         else
@@ -3442,9 +3991,12 @@
         }
         if ((!(vkStream->getFeatureBits() & VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT) || ((VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER == forUnmarshaling->descriptorType) || (VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC == forUnmarshaling->descriptorType) || (VK_DESCRIPTOR_TYPE_STORAGE_BUFFER == forUnmarshaling->descriptorType) || (VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC == forUnmarshaling->descriptorType))))
         {
-            for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->descriptorCount; ++i)
+            if (forUnmarshaling)
             {
-                unmarshal_VkDescriptorBufferInfo(vkStream, (VkDescriptorBufferInfo*)(forUnmarshaling->pBufferInfo + i));
+                for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->descriptorCount; ++i)
+                {
+                    unmarshal_VkDescriptorBufferInfo(vkStream, rootType, (VkDescriptorBufferInfo*)(forUnmarshaling->pBufferInfo + i));
+                }
             }
         }
         else
@@ -3465,10 +4017,10 @@
         {
             if (forUnmarshaling->descriptorCount)
             {
-                uint64_t* cgen_var_127;
-                vkStream->alloc((void**)&cgen_var_127, forUnmarshaling->descriptorCount * 8);
-                vkStream->read((uint64_t*)cgen_var_127, forUnmarshaling->descriptorCount * 8);
-                vkStream->handleMapping()->mapHandles_u64_VkBufferView(cgen_var_127, (VkBufferView*)forUnmarshaling->pTexelBufferView, forUnmarshaling->descriptorCount);
+                uint64_t* cgen_var_3_0;
+                vkStream->alloc((void**)&cgen_var_3_0, forUnmarshaling->descriptorCount * 8);
+                vkStream->read((uint64_t*)cgen_var_3_0, forUnmarshaling->descriptorCount * 8);
+                vkStream->handleMapping()->mapHandles_u64_VkBufferView(cgen_var_3_0, (VkBufferView*)forUnmarshaling->pTexelBufferView, forUnmarshaling->descriptorCount);
             }
         }
         else
@@ -3478,120 +4030,12 @@
     }
 }
 
-void marshal_VkCopyDescriptorSet(
-    VulkanStreamGuest* vkStream,
-    const VkCopyDescriptorSet* forMarshaling)
-{
-    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
-    {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
-    }
-    uint64_t cgen_var_128;
-    vkStream->handleMapping()->mapHandles_VkDescriptorSet_u64(&forMarshaling->srcSet, &cgen_var_128, 1);
-    vkStream->write((uint64_t*)&cgen_var_128, 1 * 8);
-    vkStream->write((uint32_t*)&forMarshaling->srcBinding, sizeof(uint32_t));
-    vkStream->write((uint32_t*)&forMarshaling->srcArrayElement, sizeof(uint32_t));
-    uint64_t cgen_var_129;
-    vkStream->handleMapping()->mapHandles_VkDescriptorSet_u64(&forMarshaling->dstSet, &cgen_var_129, 1);
-    vkStream->write((uint64_t*)&cgen_var_129, 1 * 8);
-    vkStream->write((uint32_t*)&forMarshaling->dstBinding, sizeof(uint32_t));
-    vkStream->write((uint32_t*)&forMarshaling->dstArrayElement, sizeof(uint32_t));
-    vkStream->write((uint32_t*)&forMarshaling->descriptorCount, sizeof(uint32_t));
-}
-
-void unmarshal_VkCopyDescriptorSet(
-    VulkanStreamGuest* vkStream,
-    VkCopyDescriptorSet* forUnmarshaling)
-{
-    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
-    {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
-    }
-    uint64_t cgen_var_130;
-    vkStream->read((uint64_t*)&cgen_var_130, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkDescriptorSet(&cgen_var_130, (VkDescriptorSet*)&forUnmarshaling->srcSet, 1);
-    vkStream->read((uint32_t*)&forUnmarshaling->srcBinding, sizeof(uint32_t));
-    vkStream->read((uint32_t*)&forUnmarshaling->srcArrayElement, sizeof(uint32_t));
-    uint64_t cgen_var_131;
-    vkStream->read((uint64_t*)&cgen_var_131, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkDescriptorSet(&cgen_var_131, (VkDescriptorSet*)&forUnmarshaling->dstSet, 1);
-    vkStream->read((uint32_t*)&forUnmarshaling->dstBinding, sizeof(uint32_t));
-    vkStream->read((uint32_t*)&forUnmarshaling->dstArrayElement, sizeof(uint32_t));
-    vkStream->read((uint32_t*)&forUnmarshaling->descriptorCount, sizeof(uint32_t));
-}
-
-void marshal_VkFramebufferCreateInfo(
-    VulkanStreamGuest* vkStream,
-    const VkFramebufferCreateInfo* forMarshaling)
-{
-    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
-    {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
-    }
-    vkStream->write((VkFramebufferCreateFlags*)&forMarshaling->flags, sizeof(VkFramebufferCreateFlags));
-    uint64_t cgen_var_132;
-    vkStream->handleMapping()->mapHandles_VkRenderPass_u64(&forMarshaling->renderPass, &cgen_var_132, 1);
-    vkStream->write((uint64_t*)&cgen_var_132, 1 * 8);
-    vkStream->write((uint32_t*)&forMarshaling->attachmentCount, sizeof(uint32_t));
-    if (forMarshaling->attachmentCount)
-    {
-        uint64_t* cgen_var_133;
-        vkStream->alloc((void**)&cgen_var_133, forMarshaling->attachmentCount * 8);
-        vkStream->handleMapping()->mapHandles_VkImageView_u64(forMarshaling->pAttachments, cgen_var_133, forMarshaling->attachmentCount);
-        vkStream->write((uint64_t*)cgen_var_133, forMarshaling->attachmentCount * 8);
-    }
-    vkStream->write((uint32_t*)&forMarshaling->width, sizeof(uint32_t));
-    vkStream->write((uint32_t*)&forMarshaling->height, sizeof(uint32_t));
-    vkStream->write((uint32_t*)&forMarshaling->layers, sizeof(uint32_t));
-}
-
-void unmarshal_VkFramebufferCreateInfo(
-    VulkanStreamGuest* vkStream,
-    VkFramebufferCreateInfo* forUnmarshaling)
-{
-    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
-    {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
-    }
-    vkStream->read((VkFramebufferCreateFlags*)&forUnmarshaling->flags, sizeof(VkFramebufferCreateFlags));
-    uint64_t cgen_var_134;
-    vkStream->read((uint64_t*)&cgen_var_134, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkRenderPass(&cgen_var_134, (VkRenderPass*)&forUnmarshaling->renderPass, 1);
-    vkStream->read((uint32_t*)&forUnmarshaling->attachmentCount, sizeof(uint32_t));
-    if (forUnmarshaling->attachmentCount)
-    {
-        uint64_t* cgen_var_135;
-        vkStream->alloc((void**)&cgen_var_135, forUnmarshaling->attachmentCount * 8);
-        vkStream->read((uint64_t*)cgen_var_135, forUnmarshaling->attachmentCount * 8);
-        vkStream->handleMapping()->mapHandles_u64_VkImageView(cgen_var_135, (VkImageView*)forUnmarshaling->pAttachments, forUnmarshaling->attachmentCount);
-    }
-    vkStream->read((uint32_t*)&forUnmarshaling->width, sizeof(uint32_t));
-    vkStream->read((uint32_t*)&forUnmarshaling->height, sizeof(uint32_t));
-    vkStream->read((uint32_t*)&forUnmarshaling->layers, sizeof(uint32_t));
-}
-
 void marshal_VkAttachmentDescription(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkAttachmentDescription* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkAttachmentDescriptionFlags*)&forMarshaling->flags, sizeof(VkAttachmentDescriptionFlags));
     vkStream->write((VkFormat*)&forMarshaling->format, sizeof(VkFormat));
     vkStream->write((VkSampleCountFlagBits*)&forMarshaling->samples, sizeof(VkSampleCountFlagBits));
@@ -3605,8 +4049,10 @@
 
 void unmarshal_VkAttachmentDescription(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkAttachmentDescription* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkAttachmentDescriptionFlags*)&forUnmarshaling->flags, sizeof(VkAttachmentDescriptionFlags));
     vkStream->read((VkFormat*)&forUnmarshaling->format, sizeof(VkFormat));
     vkStream->read((VkSampleCountFlagBits*)&forUnmarshaling->samples, sizeof(VkSampleCountFlagBits));
@@ -3620,52 +4066,125 @@
 
 void marshal_VkAttachmentReference(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkAttachmentReference* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((uint32_t*)&forMarshaling->attachment, sizeof(uint32_t));
     vkStream->write((VkImageLayout*)&forMarshaling->layout, sizeof(VkImageLayout));
 }
 
 void unmarshal_VkAttachmentReference(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkAttachmentReference* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((uint32_t*)&forUnmarshaling->attachment, sizeof(uint32_t));
     vkStream->read((VkImageLayout*)&forUnmarshaling->layout, sizeof(VkImageLayout));
 }
 
+void marshal_VkFramebufferCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkFramebufferCreateInfo* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkFramebufferCreateFlags*)&forMarshaling->flags, sizeof(VkFramebufferCreateFlags));
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkRenderPass_u64(&forMarshaling->renderPass, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->write((uint32_t*)&forMarshaling->attachmentCount, sizeof(uint32_t));
+    if (forMarshaling->attachmentCount)
+    {
+        uint64_t* cgen_var_1;
+        vkStream->alloc((void**)&cgen_var_1, forMarshaling->attachmentCount * 8);
+        vkStream->handleMapping()->mapHandles_VkImageView_u64(forMarshaling->pAttachments, cgen_var_1, forMarshaling->attachmentCount);
+        vkStream->write((uint64_t*)cgen_var_1, forMarshaling->attachmentCount * 8);
+    }
+    vkStream->write((uint32_t*)&forMarshaling->width, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->height, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->layers, sizeof(uint32_t));
+}
+
+void unmarshal_VkFramebufferCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkFramebufferCreateInfo* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkFramebufferCreateFlags*)&forUnmarshaling->flags, sizeof(VkFramebufferCreateFlags));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkRenderPass(&cgen_var_0, (VkRenderPass*)&forUnmarshaling->renderPass, 1);
+    vkStream->read((uint32_t*)&forUnmarshaling->attachmentCount, sizeof(uint32_t));
+    if (forUnmarshaling->attachmentCount)
+    {
+        uint64_t* cgen_var_1;
+        vkStream->alloc((void**)&cgen_var_1, forUnmarshaling->attachmentCount * 8);
+        vkStream->read((uint64_t*)cgen_var_1, forUnmarshaling->attachmentCount * 8);
+        vkStream->handleMapping()->mapHandles_u64_VkImageView(cgen_var_1, (VkImageView*)forUnmarshaling->pAttachments, forUnmarshaling->attachmentCount);
+    }
+    vkStream->read((uint32_t*)&forUnmarshaling->width, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->height, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->layers, sizeof(uint32_t));
+}
+
 void marshal_VkSubpassDescription(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSubpassDescription* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkSubpassDescriptionFlags*)&forMarshaling->flags, sizeof(VkSubpassDescriptionFlags));
     vkStream->write((VkPipelineBindPoint*)&forMarshaling->pipelineBindPoint, sizeof(VkPipelineBindPoint));
     vkStream->write((uint32_t*)&forMarshaling->inputAttachmentCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forMarshaling->inputAttachmentCount; ++i)
+    if (forMarshaling)
     {
-        marshal_VkAttachmentReference(vkStream, (const VkAttachmentReference*)(forMarshaling->pInputAttachments + i));
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->inputAttachmentCount; ++i)
+        {
+            marshal_VkAttachmentReference(vkStream, rootType, (const VkAttachmentReference*)(forMarshaling->pInputAttachments + i));
+        }
     }
     vkStream->write((uint32_t*)&forMarshaling->colorAttachmentCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forMarshaling->colorAttachmentCount; ++i)
-    {
-        marshal_VkAttachmentReference(vkStream, (const VkAttachmentReference*)(forMarshaling->pColorAttachments + i));
-    }
-    // WARNING PTR CHECK
-    uint64_t cgen_var_136 = (uint64_t)(uintptr_t)forMarshaling->pResolveAttachments;
-    vkStream->putBe64(cgen_var_136);
-    if (forMarshaling->pResolveAttachments)
+    if (forMarshaling)
     {
         for (uint32_t i = 0; i < (uint32_t)forMarshaling->colorAttachmentCount; ++i)
         {
-            marshal_VkAttachmentReference(vkStream, (const VkAttachmentReference*)(forMarshaling->pResolveAttachments + i));
+            marshal_VkAttachmentReference(vkStream, rootType, (const VkAttachmentReference*)(forMarshaling->pColorAttachments + i));
         }
     }
     // WARNING PTR CHECK
-    uint64_t cgen_var_137 = (uint64_t)(uintptr_t)forMarshaling->pDepthStencilAttachment;
-    vkStream->putBe64(cgen_var_137);
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pResolveAttachments;
+    vkStream->putBe64(cgen_var_0);
+    if (forMarshaling->pResolveAttachments)
+    {
+        if (forMarshaling)
+        {
+            for (uint32_t i = 0; i < (uint32_t)forMarshaling->colorAttachmentCount; ++i)
+            {
+                marshal_VkAttachmentReference(vkStream, rootType, (const VkAttachmentReference*)(forMarshaling->pResolveAttachments + i));
+            }
+        }
+    }
+    // WARNING PTR CHECK
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)forMarshaling->pDepthStencilAttachment;
+    vkStream->putBe64(cgen_var_1);
     if (forMarshaling->pDepthStencilAttachment)
     {
-        marshal_VkAttachmentReference(vkStream, (const VkAttachmentReference*)(forMarshaling->pDepthStencilAttachment));
+        marshal_VkAttachmentReference(vkStream, rootType, (const VkAttachmentReference*)(forMarshaling->pDepthStencilAttachment));
     }
     vkStream->write((uint32_t*)&forMarshaling->preserveAttachmentCount, sizeof(uint32_t));
     vkStream->write((const uint32_t*)forMarshaling->pPreserveAttachments, forMarshaling->preserveAttachmentCount * sizeof(const uint32_t));
@@ -3673,19 +4192,27 @@
 
 void unmarshal_VkSubpassDescription(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSubpassDescription* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkSubpassDescriptionFlags*)&forUnmarshaling->flags, sizeof(VkSubpassDescriptionFlags));
     vkStream->read((VkPipelineBindPoint*)&forUnmarshaling->pipelineBindPoint, sizeof(VkPipelineBindPoint));
     vkStream->read((uint32_t*)&forUnmarshaling->inputAttachmentCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->inputAttachmentCount; ++i)
+    if (forUnmarshaling)
     {
-        unmarshal_VkAttachmentReference(vkStream, (VkAttachmentReference*)(forUnmarshaling->pInputAttachments + i));
+        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->inputAttachmentCount; ++i)
+        {
+            unmarshal_VkAttachmentReference(vkStream, rootType, (VkAttachmentReference*)(forUnmarshaling->pInputAttachments + i));
+        }
     }
     vkStream->read((uint32_t*)&forUnmarshaling->colorAttachmentCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->colorAttachmentCount; ++i)
+    if (forUnmarshaling)
     {
-        unmarshal_VkAttachmentReference(vkStream, (VkAttachmentReference*)(forUnmarshaling->pColorAttachments + i));
+        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->colorAttachmentCount; ++i)
+        {
+            unmarshal_VkAttachmentReference(vkStream, rootType, (VkAttachmentReference*)(forUnmarshaling->pColorAttachments + i));
+        }
     }
     // WARNING PTR CHECK
     const VkAttachmentReference* check_pResolveAttachments;
@@ -3696,9 +4223,12 @@
         {
             fprintf(stderr, "fatal: forUnmarshaling->pResolveAttachments inconsistent between guest and host\n");
         }
-        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->colorAttachmentCount; ++i)
+        if (forUnmarshaling)
         {
-            unmarshal_VkAttachmentReference(vkStream, (VkAttachmentReference*)(forUnmarshaling->pResolveAttachments + i));
+            for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->colorAttachmentCount; ++i)
+            {
+                unmarshal_VkAttachmentReference(vkStream, rootType, (VkAttachmentReference*)(forUnmarshaling->pResolveAttachments + i));
+            }
         }
     }
     // WARNING PTR CHECK
@@ -3710,7 +4240,7 @@
         {
             fprintf(stderr, "fatal: forUnmarshaling->pDepthStencilAttachment inconsistent between guest and host\n");
         }
-        unmarshal_VkAttachmentReference(vkStream, (VkAttachmentReference*)(forUnmarshaling->pDepthStencilAttachment));
+        unmarshal_VkAttachmentReference(vkStream, rootType, (VkAttachmentReference*)(forUnmarshaling->pDepthStencilAttachment));
     }
     vkStream->read((uint32_t*)&forUnmarshaling->preserveAttachmentCount, sizeof(uint32_t));
     vkStream->read((uint32_t*)forUnmarshaling->pPreserveAttachments, forUnmarshaling->preserveAttachmentCount * sizeof(const uint32_t));
@@ -3718,8 +4248,10 @@
 
 void marshal_VkSubpassDependency(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSubpassDependency* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((uint32_t*)&forMarshaling->srcSubpass, sizeof(uint32_t));
     vkStream->write((uint32_t*)&forMarshaling->dstSubpass, sizeof(uint32_t));
     vkStream->write((VkPipelineStageFlags*)&forMarshaling->srcStageMask, sizeof(VkPipelineStageFlags));
@@ -3731,8 +4263,10 @@
 
 void unmarshal_VkSubpassDependency(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSubpassDependency* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((uint32_t*)&forUnmarshaling->srcSubpass, sizeof(uint32_t));
     vkStream->read((uint32_t*)&forUnmarshaling->dstSubpass, sizeof(uint32_t));
     vkStream->read((VkPipelineStageFlags*)&forUnmarshaling->srcStageMask, sizeof(VkPipelineStageFlags));
@@ -3744,156 +4278,171 @@
 
 void marshal_VkRenderPassCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkRenderPassCreateInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkRenderPassCreateFlags*)&forMarshaling->flags, sizeof(VkRenderPassCreateFlags));
     vkStream->write((uint32_t*)&forMarshaling->attachmentCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forMarshaling->attachmentCount; ++i)
+    if (forMarshaling)
     {
-        marshal_VkAttachmentDescription(vkStream, (const VkAttachmentDescription*)(forMarshaling->pAttachments + i));
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->attachmentCount; ++i)
+        {
+            marshal_VkAttachmentDescription(vkStream, rootType, (const VkAttachmentDescription*)(forMarshaling->pAttachments + i));
+        }
     }
     vkStream->write((uint32_t*)&forMarshaling->subpassCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forMarshaling->subpassCount; ++i)
+    if (forMarshaling)
     {
-        marshal_VkSubpassDescription(vkStream, (const VkSubpassDescription*)(forMarshaling->pSubpasses + i));
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->subpassCount; ++i)
+        {
+            marshal_VkSubpassDescription(vkStream, rootType, (const VkSubpassDescription*)(forMarshaling->pSubpasses + i));
+        }
     }
     vkStream->write((uint32_t*)&forMarshaling->dependencyCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forMarshaling->dependencyCount; ++i)
+    if (forMarshaling)
     {
-        marshal_VkSubpassDependency(vkStream, (const VkSubpassDependency*)(forMarshaling->pDependencies + i));
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->dependencyCount; ++i)
+        {
+            marshal_VkSubpassDependency(vkStream, rootType, (const VkSubpassDependency*)(forMarshaling->pDependencies + i));
+        }
     }
 }
 
 void unmarshal_VkRenderPassCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkRenderPassCreateInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkRenderPassCreateFlags*)&forUnmarshaling->flags, sizeof(VkRenderPassCreateFlags));
     vkStream->read((uint32_t*)&forUnmarshaling->attachmentCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->attachmentCount; ++i)
+    if (forUnmarshaling)
     {
-        unmarshal_VkAttachmentDescription(vkStream, (VkAttachmentDescription*)(forUnmarshaling->pAttachments + i));
+        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->attachmentCount; ++i)
+        {
+            unmarshal_VkAttachmentDescription(vkStream, rootType, (VkAttachmentDescription*)(forUnmarshaling->pAttachments + i));
+        }
     }
     vkStream->read((uint32_t*)&forUnmarshaling->subpassCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->subpassCount; ++i)
+    if (forUnmarshaling)
     {
-        unmarshal_VkSubpassDescription(vkStream, (VkSubpassDescription*)(forUnmarshaling->pSubpasses + i));
+        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->subpassCount; ++i)
+        {
+            unmarshal_VkSubpassDescription(vkStream, rootType, (VkSubpassDescription*)(forUnmarshaling->pSubpasses + i));
+        }
     }
     vkStream->read((uint32_t*)&forUnmarshaling->dependencyCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->dependencyCount; ++i)
+    if (forUnmarshaling)
     {
-        unmarshal_VkSubpassDependency(vkStream, (VkSubpassDependency*)(forUnmarshaling->pDependencies + i));
+        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->dependencyCount; ++i)
+        {
+            unmarshal_VkSubpassDependency(vkStream, rootType, (VkSubpassDependency*)(forUnmarshaling->pDependencies + i));
+        }
     }
 }
 
 void marshal_VkCommandPoolCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkCommandPoolCreateInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkCommandPoolCreateFlags*)&forMarshaling->flags, sizeof(VkCommandPoolCreateFlags));
     vkStream->write((uint32_t*)&forMarshaling->queueFamilyIndex, sizeof(uint32_t));
 }
 
 void unmarshal_VkCommandPoolCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkCommandPoolCreateInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkCommandPoolCreateFlags*)&forUnmarshaling->flags, sizeof(VkCommandPoolCreateFlags));
     vkStream->read((uint32_t*)&forUnmarshaling->queueFamilyIndex, sizeof(uint32_t));
 }
 
 void marshal_VkCommandBufferAllocateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkCommandBufferAllocateInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    uint64_t cgen_var_140;
-    vkStream->handleMapping()->mapHandles_VkCommandPool_u64(&forMarshaling->commandPool, &cgen_var_140, 1);
-    vkStream->write((uint64_t*)&cgen_var_140, 1 * 8);
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkCommandPool_u64(&forMarshaling->commandPool, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
     vkStream->write((VkCommandBufferLevel*)&forMarshaling->level, sizeof(VkCommandBufferLevel));
     vkStream->write((uint32_t*)&forMarshaling->commandBufferCount, sizeof(uint32_t));
 }
 
 void unmarshal_VkCommandBufferAllocateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkCommandBufferAllocateInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    uint64_t cgen_var_141;
-    vkStream->read((uint64_t*)&cgen_var_141, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkCommandPool(&cgen_var_141, (VkCommandPool*)&forUnmarshaling->commandPool, 1);
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkCommandPool(&cgen_var_0, (VkCommandPool*)&forUnmarshaling->commandPool, 1);
     vkStream->read((VkCommandBufferLevel*)&forUnmarshaling->level, sizeof(VkCommandBufferLevel));
     vkStream->read((uint32_t*)&forUnmarshaling->commandBufferCount, sizeof(uint32_t));
 }
 
 void marshal_VkCommandBufferInheritanceInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkCommandBufferInheritanceInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    uint64_t cgen_var_142;
-    vkStream->handleMapping()->mapHandles_VkRenderPass_u64(&forMarshaling->renderPass, &cgen_var_142, 1);
-    vkStream->write((uint64_t*)&cgen_var_142, 1 * 8);
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkRenderPass_u64(&forMarshaling->renderPass, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
     vkStream->write((uint32_t*)&forMarshaling->subpass, sizeof(uint32_t));
-    uint64_t cgen_var_143;
-    vkStream->handleMapping()->mapHandles_VkFramebuffer_u64(&forMarshaling->framebuffer, &cgen_var_143, 1);
-    vkStream->write((uint64_t*)&cgen_var_143, 1 * 8);
+    uint64_t cgen_var_1;
+    vkStream->handleMapping()->mapHandles_VkFramebuffer_u64(&forMarshaling->framebuffer, &cgen_var_1, 1);
+    vkStream->write((uint64_t*)&cgen_var_1, 1 * 8);
     vkStream->write((VkBool32*)&forMarshaling->occlusionQueryEnable, sizeof(VkBool32));
     vkStream->write((VkQueryControlFlags*)&forMarshaling->queryFlags, sizeof(VkQueryControlFlags));
     vkStream->write((VkQueryPipelineStatisticFlags*)&forMarshaling->pipelineStatistics, sizeof(VkQueryPipelineStatisticFlags));
@@ -3901,24 +4450,23 @@
 
 void unmarshal_VkCommandBufferInheritanceInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkCommandBufferInheritanceInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    uint64_t cgen_var_144;
-    vkStream->read((uint64_t*)&cgen_var_144, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkRenderPass(&cgen_var_144, (VkRenderPass*)&forUnmarshaling->renderPass, 1);
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkRenderPass(&cgen_var_0, (VkRenderPass*)&forUnmarshaling->renderPass, 1);
     vkStream->read((uint32_t*)&forUnmarshaling->subpass, sizeof(uint32_t));
-    uint64_t cgen_var_145;
-    vkStream->read((uint64_t*)&cgen_var_145, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkFramebuffer(&cgen_var_145, (VkFramebuffer*)&forUnmarshaling->framebuffer, 1);
+    uint64_t cgen_var_1;
+    vkStream->read((uint64_t*)&cgen_var_1, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkFramebuffer(&cgen_var_1, (VkFramebuffer*)&forUnmarshaling->framebuffer, 1);
     vkStream->read((VkBool32*)&forUnmarshaling->occlusionQueryEnable, sizeof(VkBool32));
     vkStream->read((VkQueryControlFlags*)&forUnmarshaling->queryFlags, sizeof(VkQueryControlFlags));
     vkStream->read((VkQueryPipelineStatisticFlags*)&forUnmarshaling->pipelineStatistics, sizeof(VkQueryPipelineStatisticFlags));
@@ -3926,39 +4474,38 @@
 
 void marshal_VkCommandBufferBeginInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkCommandBufferBeginInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkCommandBufferUsageFlags*)&forMarshaling->flags, sizeof(VkCommandBufferUsageFlags));
     // WARNING PTR CHECK
-    uint64_t cgen_var_146 = (uint64_t)(uintptr_t)forMarshaling->pInheritanceInfo;
-    vkStream->putBe64(cgen_var_146);
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pInheritanceInfo;
+    vkStream->putBe64(cgen_var_0);
     if (forMarshaling->pInheritanceInfo)
     {
-        marshal_VkCommandBufferInheritanceInfo(vkStream, (const VkCommandBufferInheritanceInfo*)(forMarshaling->pInheritanceInfo));
+        marshal_VkCommandBufferInheritanceInfo(vkStream, rootType, (const VkCommandBufferInheritanceInfo*)(forMarshaling->pInheritanceInfo));
     }
 }
 
 void unmarshal_VkCommandBufferBeginInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkCommandBufferBeginInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkCommandBufferUsageFlags*)&forUnmarshaling->flags, sizeof(VkCommandBufferUsageFlags));
     // WARNING PTR CHECK
     const VkCommandBufferInheritanceInfo* check_pInheritanceInfo;
@@ -3969,14 +4516,16 @@
         {
             fprintf(stderr, "fatal: forUnmarshaling->pInheritanceInfo inconsistent between guest and host\n");
         }
-        unmarshal_VkCommandBufferInheritanceInfo(vkStream, (VkCommandBufferInheritanceInfo*)(forUnmarshaling->pInheritanceInfo));
+        unmarshal_VkCommandBufferInheritanceInfo(vkStream, rootType, (VkCommandBufferInheritanceInfo*)(forUnmarshaling->pInheritanceInfo));
     }
 }
 
 void marshal_VkBufferCopy(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkBufferCopy* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkDeviceSize*)&forMarshaling->srcOffset, sizeof(VkDeviceSize));
     vkStream->write((VkDeviceSize*)&forMarshaling->dstOffset, sizeof(VkDeviceSize));
     vkStream->write((VkDeviceSize*)&forMarshaling->size, sizeof(VkDeviceSize));
@@ -3984,8 +4533,10 @@
 
 void unmarshal_VkBufferCopy(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkBufferCopy* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkDeviceSize*)&forUnmarshaling->srcOffset, sizeof(VkDeviceSize));
     vkStream->read((VkDeviceSize*)&forUnmarshaling->dstOffset, sizeof(VkDeviceSize));
     vkStream->read((VkDeviceSize*)&forUnmarshaling->size, sizeof(VkDeviceSize));
@@ -3993,8 +4544,10 @@
 
 void marshal_VkImageSubresourceLayers(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkImageSubresourceLayers* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkImageAspectFlags*)&forMarshaling->aspectMask, sizeof(VkImageAspectFlags));
     vkStream->write((uint32_t*)&forMarshaling->mipLevel, sizeof(uint32_t));
     vkStream->write((uint32_t*)&forMarshaling->baseArrayLayer, sizeof(uint32_t));
@@ -4003,375 +4556,286 @@
 
 void unmarshal_VkImageSubresourceLayers(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkImageSubresourceLayers* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkImageAspectFlags*)&forUnmarshaling->aspectMask, sizeof(VkImageAspectFlags));
     vkStream->read((uint32_t*)&forUnmarshaling->mipLevel, sizeof(uint32_t));
     vkStream->read((uint32_t*)&forUnmarshaling->baseArrayLayer, sizeof(uint32_t));
     vkStream->read((uint32_t*)&forUnmarshaling->layerCount, sizeof(uint32_t));
 }
 
-void marshal_VkImageCopy(
-    VulkanStreamGuest* vkStream,
-    const VkImageCopy* forMarshaling)
-{
-    marshal_VkImageSubresourceLayers(vkStream, (VkImageSubresourceLayers*)(&forMarshaling->srcSubresource));
-    marshal_VkOffset3D(vkStream, (VkOffset3D*)(&forMarshaling->srcOffset));
-    marshal_VkImageSubresourceLayers(vkStream, (VkImageSubresourceLayers*)(&forMarshaling->dstSubresource));
-    marshal_VkOffset3D(vkStream, (VkOffset3D*)(&forMarshaling->dstOffset));
-    marshal_VkExtent3D(vkStream, (VkExtent3D*)(&forMarshaling->extent));
-}
-
-void unmarshal_VkImageCopy(
-    VulkanStreamGuest* vkStream,
-    VkImageCopy* forUnmarshaling)
-{
-    unmarshal_VkImageSubresourceLayers(vkStream, (VkImageSubresourceLayers*)(&forUnmarshaling->srcSubresource));
-    unmarshal_VkOffset3D(vkStream, (VkOffset3D*)(&forUnmarshaling->srcOffset));
-    unmarshal_VkImageSubresourceLayers(vkStream, (VkImageSubresourceLayers*)(&forUnmarshaling->dstSubresource));
-    unmarshal_VkOffset3D(vkStream, (VkOffset3D*)(&forUnmarshaling->dstOffset));
-    unmarshal_VkExtent3D(vkStream, (VkExtent3D*)(&forUnmarshaling->extent));
-}
-
-void marshal_VkImageBlit(
-    VulkanStreamGuest* vkStream,
-    const VkImageBlit* forMarshaling)
-{
-    marshal_VkImageSubresourceLayers(vkStream, (VkImageSubresourceLayers*)(&forMarshaling->srcSubresource));
-    for (uint32_t i = 0; i < (uint32_t)2; ++i)
-    {
-        marshal_VkOffset3D(vkStream, (VkOffset3D*)(forMarshaling->srcOffsets + i));
-    }
-    marshal_VkImageSubresourceLayers(vkStream, (VkImageSubresourceLayers*)(&forMarshaling->dstSubresource));
-    for (uint32_t i = 0; i < (uint32_t)2; ++i)
-    {
-        marshal_VkOffset3D(vkStream, (VkOffset3D*)(forMarshaling->dstOffsets + i));
-    }
-}
-
-void unmarshal_VkImageBlit(
-    VulkanStreamGuest* vkStream,
-    VkImageBlit* forUnmarshaling)
-{
-    unmarshal_VkImageSubresourceLayers(vkStream, (VkImageSubresourceLayers*)(&forUnmarshaling->srcSubresource));
-    for (uint32_t i = 0; i < (uint32_t)2; ++i)
-    {
-        unmarshal_VkOffset3D(vkStream, (VkOffset3D*)(forUnmarshaling->srcOffsets + i));
-    }
-    unmarshal_VkImageSubresourceLayers(vkStream, (VkImageSubresourceLayers*)(&forUnmarshaling->dstSubresource));
-    for (uint32_t i = 0; i < (uint32_t)2; ++i)
-    {
-        unmarshal_VkOffset3D(vkStream, (VkOffset3D*)(forUnmarshaling->dstOffsets + i));
-    }
-}
-
 void marshal_VkBufferImageCopy(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkBufferImageCopy* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkDeviceSize*)&forMarshaling->bufferOffset, sizeof(VkDeviceSize));
     vkStream->write((uint32_t*)&forMarshaling->bufferRowLength, sizeof(uint32_t));
     vkStream->write((uint32_t*)&forMarshaling->bufferImageHeight, sizeof(uint32_t));
-    marshal_VkImageSubresourceLayers(vkStream, (VkImageSubresourceLayers*)(&forMarshaling->imageSubresource));
-    marshal_VkOffset3D(vkStream, (VkOffset3D*)(&forMarshaling->imageOffset));
-    marshal_VkExtent3D(vkStream, (VkExtent3D*)(&forMarshaling->imageExtent));
+    marshal_VkImageSubresourceLayers(vkStream, rootType, (VkImageSubresourceLayers*)(&forMarshaling->imageSubresource));
+    marshal_VkOffset3D(vkStream, rootType, (VkOffset3D*)(&forMarshaling->imageOffset));
+    marshal_VkExtent3D(vkStream, rootType, (VkExtent3D*)(&forMarshaling->imageExtent));
 }
 
 void unmarshal_VkBufferImageCopy(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkBufferImageCopy* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkDeviceSize*)&forUnmarshaling->bufferOffset, sizeof(VkDeviceSize));
     vkStream->read((uint32_t*)&forUnmarshaling->bufferRowLength, sizeof(uint32_t));
     vkStream->read((uint32_t*)&forUnmarshaling->bufferImageHeight, sizeof(uint32_t));
-    unmarshal_VkImageSubresourceLayers(vkStream, (VkImageSubresourceLayers*)(&forUnmarshaling->imageSubresource));
-    unmarshal_VkOffset3D(vkStream, (VkOffset3D*)(&forUnmarshaling->imageOffset));
-    unmarshal_VkExtent3D(vkStream, (VkExtent3D*)(&forUnmarshaling->imageExtent));
+    unmarshal_VkImageSubresourceLayers(vkStream, rootType, (VkImageSubresourceLayers*)(&forUnmarshaling->imageSubresource));
+    unmarshal_VkOffset3D(vkStream, rootType, (VkOffset3D*)(&forUnmarshaling->imageOffset));
+    unmarshal_VkExtent3D(vkStream, rootType, (VkExtent3D*)(&forUnmarshaling->imageExtent));
 }
 
 void marshal_VkClearColorValue(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkClearColorValue* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((float*)forMarshaling->float32, 4 * sizeof(float));
 }
 
 void unmarshal_VkClearColorValue(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkClearColorValue* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((float*)forUnmarshaling->float32, 4 * sizeof(float));
 }
 
 void marshal_VkClearDepthStencilValue(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkClearDepthStencilValue* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((float*)&forMarshaling->depth, sizeof(float));
     vkStream->write((uint32_t*)&forMarshaling->stencil, sizeof(uint32_t));
 }
 
 void unmarshal_VkClearDepthStencilValue(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkClearDepthStencilValue* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((float*)&forUnmarshaling->depth, sizeof(float));
     vkStream->read((uint32_t*)&forUnmarshaling->stencil, sizeof(uint32_t));
 }
 
 void marshal_VkClearValue(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkClearValue* forMarshaling)
 {
-    marshal_VkClearColorValue(vkStream, (VkClearColorValue*)(&forMarshaling->color));
+    (void)rootType;
+    marshal_VkClearColorValue(vkStream, rootType, (VkClearColorValue*)(&forMarshaling->color));
 }
 
 void unmarshal_VkClearValue(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkClearValue* forUnmarshaling)
 {
-    unmarshal_VkClearColorValue(vkStream, (VkClearColorValue*)(&forUnmarshaling->color));
+    (void)rootType;
+    unmarshal_VkClearColorValue(vkStream, rootType, (VkClearColorValue*)(&forUnmarshaling->color));
 }
 
 void marshal_VkClearAttachment(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkClearAttachment* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkImageAspectFlags*)&forMarshaling->aspectMask, sizeof(VkImageAspectFlags));
     vkStream->write((uint32_t*)&forMarshaling->colorAttachment, sizeof(uint32_t));
-    marshal_VkClearValue(vkStream, (VkClearValue*)(&forMarshaling->clearValue));
+    marshal_VkClearValue(vkStream, rootType, (VkClearValue*)(&forMarshaling->clearValue));
 }
 
 void unmarshal_VkClearAttachment(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkClearAttachment* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkImageAspectFlags*)&forUnmarshaling->aspectMask, sizeof(VkImageAspectFlags));
     vkStream->read((uint32_t*)&forUnmarshaling->colorAttachment, sizeof(uint32_t));
-    unmarshal_VkClearValue(vkStream, (VkClearValue*)(&forUnmarshaling->clearValue));
+    unmarshal_VkClearValue(vkStream, rootType, (VkClearValue*)(&forUnmarshaling->clearValue));
 }
 
 void marshal_VkClearRect(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkClearRect* forMarshaling)
 {
-    marshal_VkRect2D(vkStream, (VkRect2D*)(&forMarshaling->rect));
+    (void)rootType;
+    marshal_VkRect2D(vkStream, rootType, (VkRect2D*)(&forMarshaling->rect));
     vkStream->write((uint32_t*)&forMarshaling->baseArrayLayer, sizeof(uint32_t));
     vkStream->write((uint32_t*)&forMarshaling->layerCount, sizeof(uint32_t));
 }
 
 void unmarshal_VkClearRect(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkClearRect* forUnmarshaling)
 {
-    unmarshal_VkRect2D(vkStream, (VkRect2D*)(&forUnmarshaling->rect));
+    (void)rootType;
+    unmarshal_VkRect2D(vkStream, rootType, (VkRect2D*)(&forUnmarshaling->rect));
     vkStream->read((uint32_t*)&forUnmarshaling->baseArrayLayer, sizeof(uint32_t));
     vkStream->read((uint32_t*)&forUnmarshaling->layerCount, sizeof(uint32_t));
 }
 
+void marshal_VkImageBlit(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageBlit* forMarshaling)
+{
+    (void)rootType;
+    marshal_VkImageSubresourceLayers(vkStream, rootType, (VkImageSubresourceLayers*)(&forMarshaling->srcSubresource));
+    for (uint32_t i = 0; i < (uint32_t)2; ++i)
+    {
+        marshal_VkOffset3D(vkStream, rootType, (VkOffset3D*)(forMarshaling->srcOffsets + i));
+    }
+    marshal_VkImageSubresourceLayers(vkStream, rootType, (VkImageSubresourceLayers*)(&forMarshaling->dstSubresource));
+    for (uint32_t i = 0; i < (uint32_t)2; ++i)
+    {
+        marshal_VkOffset3D(vkStream, rootType, (VkOffset3D*)(forMarshaling->dstOffsets + i));
+    }
+}
+
+void unmarshal_VkImageBlit(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkImageBlit* forUnmarshaling)
+{
+    (void)rootType;
+    unmarshal_VkImageSubresourceLayers(vkStream, rootType, (VkImageSubresourceLayers*)(&forUnmarshaling->srcSubresource));
+    for (uint32_t i = 0; i < (uint32_t)2; ++i)
+    {
+        unmarshal_VkOffset3D(vkStream, rootType, (VkOffset3D*)(forUnmarshaling->srcOffsets + i));
+    }
+    unmarshal_VkImageSubresourceLayers(vkStream, rootType, (VkImageSubresourceLayers*)(&forUnmarshaling->dstSubresource));
+    for (uint32_t i = 0; i < (uint32_t)2; ++i)
+    {
+        unmarshal_VkOffset3D(vkStream, rootType, (VkOffset3D*)(forUnmarshaling->dstOffsets + i));
+    }
+}
+
+void marshal_VkImageCopy(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageCopy* forMarshaling)
+{
+    (void)rootType;
+    marshal_VkImageSubresourceLayers(vkStream, rootType, (VkImageSubresourceLayers*)(&forMarshaling->srcSubresource));
+    marshal_VkOffset3D(vkStream, rootType, (VkOffset3D*)(&forMarshaling->srcOffset));
+    marshal_VkImageSubresourceLayers(vkStream, rootType, (VkImageSubresourceLayers*)(&forMarshaling->dstSubresource));
+    marshal_VkOffset3D(vkStream, rootType, (VkOffset3D*)(&forMarshaling->dstOffset));
+    marshal_VkExtent3D(vkStream, rootType, (VkExtent3D*)(&forMarshaling->extent));
+}
+
+void unmarshal_VkImageCopy(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkImageCopy* forUnmarshaling)
+{
+    (void)rootType;
+    unmarshal_VkImageSubresourceLayers(vkStream, rootType, (VkImageSubresourceLayers*)(&forUnmarshaling->srcSubresource));
+    unmarshal_VkOffset3D(vkStream, rootType, (VkOffset3D*)(&forUnmarshaling->srcOffset));
+    unmarshal_VkImageSubresourceLayers(vkStream, rootType, (VkImageSubresourceLayers*)(&forUnmarshaling->dstSubresource));
+    unmarshal_VkOffset3D(vkStream, rootType, (VkOffset3D*)(&forUnmarshaling->dstOffset));
+    unmarshal_VkExtent3D(vkStream, rootType, (VkExtent3D*)(&forUnmarshaling->extent));
+}
+
 void marshal_VkImageResolve(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkImageResolve* forMarshaling)
 {
-    marshal_VkImageSubresourceLayers(vkStream, (VkImageSubresourceLayers*)(&forMarshaling->srcSubresource));
-    marshal_VkOffset3D(vkStream, (VkOffset3D*)(&forMarshaling->srcOffset));
-    marshal_VkImageSubresourceLayers(vkStream, (VkImageSubresourceLayers*)(&forMarshaling->dstSubresource));
-    marshal_VkOffset3D(vkStream, (VkOffset3D*)(&forMarshaling->dstOffset));
-    marshal_VkExtent3D(vkStream, (VkExtent3D*)(&forMarshaling->extent));
+    (void)rootType;
+    marshal_VkImageSubresourceLayers(vkStream, rootType, (VkImageSubresourceLayers*)(&forMarshaling->srcSubresource));
+    marshal_VkOffset3D(vkStream, rootType, (VkOffset3D*)(&forMarshaling->srcOffset));
+    marshal_VkImageSubresourceLayers(vkStream, rootType, (VkImageSubresourceLayers*)(&forMarshaling->dstSubresource));
+    marshal_VkOffset3D(vkStream, rootType, (VkOffset3D*)(&forMarshaling->dstOffset));
+    marshal_VkExtent3D(vkStream, rootType, (VkExtent3D*)(&forMarshaling->extent));
 }
 
 void unmarshal_VkImageResolve(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkImageResolve* forUnmarshaling)
 {
-    unmarshal_VkImageSubresourceLayers(vkStream, (VkImageSubresourceLayers*)(&forUnmarshaling->srcSubresource));
-    unmarshal_VkOffset3D(vkStream, (VkOffset3D*)(&forUnmarshaling->srcOffset));
-    unmarshal_VkImageSubresourceLayers(vkStream, (VkImageSubresourceLayers*)(&forUnmarshaling->dstSubresource));
-    unmarshal_VkOffset3D(vkStream, (VkOffset3D*)(&forUnmarshaling->dstOffset));
-    unmarshal_VkExtent3D(vkStream, (VkExtent3D*)(&forUnmarshaling->extent));
-}
-
-void marshal_VkMemoryBarrier(
-    VulkanStreamGuest* vkStream,
-    const VkMemoryBarrier* forMarshaling)
-{
-    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
-    {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
-    }
-    vkStream->write((VkAccessFlags*)&forMarshaling->srcAccessMask, sizeof(VkAccessFlags));
-    vkStream->write((VkAccessFlags*)&forMarshaling->dstAccessMask, sizeof(VkAccessFlags));
-}
-
-void unmarshal_VkMemoryBarrier(
-    VulkanStreamGuest* vkStream,
-    VkMemoryBarrier* forUnmarshaling)
-{
-    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
-    {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
-    }
-    vkStream->read((VkAccessFlags*)&forUnmarshaling->srcAccessMask, sizeof(VkAccessFlags));
-    vkStream->read((VkAccessFlags*)&forUnmarshaling->dstAccessMask, sizeof(VkAccessFlags));
-}
-
-void marshal_VkBufferMemoryBarrier(
-    VulkanStreamGuest* vkStream,
-    const VkBufferMemoryBarrier* forMarshaling)
-{
-    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
-    {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
-    }
-    vkStream->write((VkAccessFlags*)&forMarshaling->srcAccessMask, sizeof(VkAccessFlags));
-    vkStream->write((VkAccessFlags*)&forMarshaling->dstAccessMask, sizeof(VkAccessFlags));
-    vkStream->write((uint32_t*)&forMarshaling->srcQueueFamilyIndex, sizeof(uint32_t));
-    vkStream->write((uint32_t*)&forMarshaling->dstQueueFamilyIndex, sizeof(uint32_t));
-    uint64_t cgen_var_148;
-    vkStream->handleMapping()->mapHandles_VkBuffer_u64(&forMarshaling->buffer, &cgen_var_148, 1);
-    vkStream->write((uint64_t*)&cgen_var_148, 1 * 8);
-    vkStream->write((VkDeviceSize*)&forMarshaling->offset, sizeof(VkDeviceSize));
-    vkStream->write((VkDeviceSize*)&forMarshaling->size, sizeof(VkDeviceSize));
-}
-
-void unmarshal_VkBufferMemoryBarrier(
-    VulkanStreamGuest* vkStream,
-    VkBufferMemoryBarrier* forUnmarshaling)
-{
-    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
-    {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
-    }
-    vkStream->read((VkAccessFlags*)&forUnmarshaling->srcAccessMask, sizeof(VkAccessFlags));
-    vkStream->read((VkAccessFlags*)&forUnmarshaling->dstAccessMask, sizeof(VkAccessFlags));
-    vkStream->read((uint32_t*)&forUnmarshaling->srcQueueFamilyIndex, sizeof(uint32_t));
-    vkStream->read((uint32_t*)&forUnmarshaling->dstQueueFamilyIndex, sizeof(uint32_t));
-    uint64_t cgen_var_149;
-    vkStream->read((uint64_t*)&cgen_var_149, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkBuffer(&cgen_var_149, (VkBuffer*)&forUnmarshaling->buffer, 1);
-    vkStream->read((VkDeviceSize*)&forUnmarshaling->offset, sizeof(VkDeviceSize));
-    vkStream->read((VkDeviceSize*)&forUnmarshaling->size, sizeof(VkDeviceSize));
-}
-
-void marshal_VkImageMemoryBarrier(
-    VulkanStreamGuest* vkStream,
-    const VkImageMemoryBarrier* forMarshaling)
-{
-    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
-    {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
-    }
-    vkStream->write((VkAccessFlags*)&forMarshaling->srcAccessMask, sizeof(VkAccessFlags));
-    vkStream->write((VkAccessFlags*)&forMarshaling->dstAccessMask, sizeof(VkAccessFlags));
-    vkStream->write((VkImageLayout*)&forMarshaling->oldLayout, sizeof(VkImageLayout));
-    vkStream->write((VkImageLayout*)&forMarshaling->newLayout, sizeof(VkImageLayout));
-    vkStream->write((uint32_t*)&forMarshaling->srcQueueFamilyIndex, sizeof(uint32_t));
-    vkStream->write((uint32_t*)&forMarshaling->dstQueueFamilyIndex, sizeof(uint32_t));
-    uint64_t cgen_var_150;
-    vkStream->handleMapping()->mapHandles_VkImage_u64(&forMarshaling->image, &cgen_var_150, 1);
-    vkStream->write((uint64_t*)&cgen_var_150, 1 * 8);
-    marshal_VkImageSubresourceRange(vkStream, (VkImageSubresourceRange*)(&forMarshaling->subresourceRange));
-}
-
-void unmarshal_VkImageMemoryBarrier(
-    VulkanStreamGuest* vkStream,
-    VkImageMemoryBarrier* forUnmarshaling)
-{
-    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
-    {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
-    }
-    vkStream->read((VkAccessFlags*)&forUnmarshaling->srcAccessMask, sizeof(VkAccessFlags));
-    vkStream->read((VkAccessFlags*)&forUnmarshaling->dstAccessMask, sizeof(VkAccessFlags));
-    vkStream->read((VkImageLayout*)&forUnmarshaling->oldLayout, sizeof(VkImageLayout));
-    vkStream->read((VkImageLayout*)&forUnmarshaling->newLayout, sizeof(VkImageLayout));
-    vkStream->read((uint32_t*)&forUnmarshaling->srcQueueFamilyIndex, sizeof(uint32_t));
-    vkStream->read((uint32_t*)&forUnmarshaling->dstQueueFamilyIndex, sizeof(uint32_t));
-    uint64_t cgen_var_151;
-    vkStream->read((uint64_t*)&cgen_var_151, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkImage(&cgen_var_151, (VkImage*)&forUnmarshaling->image, 1);
-    unmarshal_VkImageSubresourceRange(vkStream, (VkImageSubresourceRange*)(&forUnmarshaling->subresourceRange));
+    (void)rootType;
+    unmarshal_VkImageSubresourceLayers(vkStream, rootType, (VkImageSubresourceLayers*)(&forUnmarshaling->srcSubresource));
+    unmarshal_VkOffset3D(vkStream, rootType, (VkOffset3D*)(&forUnmarshaling->srcOffset));
+    unmarshal_VkImageSubresourceLayers(vkStream, rootType, (VkImageSubresourceLayers*)(&forUnmarshaling->dstSubresource));
+    unmarshal_VkOffset3D(vkStream, rootType, (VkOffset3D*)(&forUnmarshaling->dstOffset));
+    unmarshal_VkExtent3D(vkStream, rootType, (VkExtent3D*)(&forUnmarshaling->extent));
 }
 
 void marshal_VkRenderPassBeginInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkRenderPassBeginInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    uint64_t cgen_var_152;
-    vkStream->handleMapping()->mapHandles_VkRenderPass_u64(&forMarshaling->renderPass, &cgen_var_152, 1);
-    vkStream->write((uint64_t*)&cgen_var_152, 1 * 8);
-    uint64_t cgen_var_153;
-    vkStream->handleMapping()->mapHandles_VkFramebuffer_u64(&forMarshaling->framebuffer, &cgen_var_153, 1);
-    vkStream->write((uint64_t*)&cgen_var_153, 1 * 8);
-    marshal_VkRect2D(vkStream, (VkRect2D*)(&forMarshaling->renderArea));
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkRenderPass_u64(&forMarshaling->renderPass, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
+    uint64_t cgen_var_1;
+    vkStream->handleMapping()->mapHandles_VkFramebuffer_u64(&forMarshaling->framebuffer, &cgen_var_1, 1);
+    vkStream->write((uint64_t*)&cgen_var_1, 1 * 8);
+    marshal_VkRect2D(vkStream, rootType, (VkRect2D*)(&forMarshaling->renderArea));
     vkStream->write((uint32_t*)&forMarshaling->clearValueCount, sizeof(uint32_t));
     // WARNING PTR CHECK
-    uint64_t cgen_var_154 = (uint64_t)(uintptr_t)forMarshaling->pClearValues;
-    vkStream->putBe64(cgen_var_154);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)forMarshaling->pClearValues;
+    vkStream->putBe64(cgen_var_2);
     if (forMarshaling->pClearValues)
     {
-        for (uint32_t i = 0; i < (uint32_t)forMarshaling->clearValueCount; ++i)
+        if (forMarshaling)
         {
-            marshal_VkClearValue(vkStream, (const VkClearValue*)(forMarshaling->pClearValues + i));
+            for (uint32_t i = 0; i < (uint32_t)forMarshaling->clearValueCount; ++i)
+            {
+                marshal_VkClearValue(vkStream, rootType, (const VkClearValue*)(forMarshaling->pClearValues + i));
+            }
         }
     }
 }
 
 void unmarshal_VkRenderPassBeginInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkRenderPassBeginInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    uint64_t cgen_var_155;
-    vkStream->read((uint64_t*)&cgen_var_155, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkRenderPass(&cgen_var_155, (VkRenderPass*)&forUnmarshaling->renderPass, 1);
-    uint64_t cgen_var_156;
-    vkStream->read((uint64_t*)&cgen_var_156, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkFramebuffer(&cgen_var_156, (VkFramebuffer*)&forUnmarshaling->framebuffer, 1);
-    unmarshal_VkRect2D(vkStream, (VkRect2D*)(&forUnmarshaling->renderArea));
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkRenderPass(&cgen_var_0, (VkRenderPass*)&forUnmarshaling->renderPass, 1);
+    uint64_t cgen_var_1;
+    vkStream->read((uint64_t*)&cgen_var_1, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkFramebuffer(&cgen_var_1, (VkFramebuffer*)&forUnmarshaling->framebuffer, 1);
+    unmarshal_VkRect2D(vkStream, rootType, (VkRect2D*)(&forUnmarshaling->renderArea));
     vkStream->read((uint32_t*)&forUnmarshaling->clearValueCount, sizeof(uint32_t));
     // WARNING PTR CHECK
     const VkClearValue* check_pClearValues;
@@ -4382,145 +4846,30 @@
         {
             fprintf(stderr, "fatal: forUnmarshaling->pClearValues inconsistent between guest and host\n");
         }
-        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->clearValueCount; ++i)
+        if (forUnmarshaling)
         {
-            unmarshal_VkClearValue(vkStream, (VkClearValue*)(forUnmarshaling->pClearValues + i));
+            for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->clearValueCount; ++i)
+            {
+                unmarshal_VkClearValue(vkStream, rootType, (VkClearValue*)(forUnmarshaling->pClearValues + i));
+            }
         }
     }
 }
 
-void marshal_VkDispatchIndirectCommand(
-    VulkanStreamGuest* vkStream,
-    const VkDispatchIndirectCommand* forMarshaling)
-{
-    vkStream->write((uint32_t*)&forMarshaling->x, sizeof(uint32_t));
-    vkStream->write((uint32_t*)&forMarshaling->y, sizeof(uint32_t));
-    vkStream->write((uint32_t*)&forMarshaling->z, sizeof(uint32_t));
-}
-
-void unmarshal_VkDispatchIndirectCommand(
-    VulkanStreamGuest* vkStream,
-    VkDispatchIndirectCommand* forUnmarshaling)
-{
-    vkStream->read((uint32_t*)&forUnmarshaling->x, sizeof(uint32_t));
-    vkStream->read((uint32_t*)&forUnmarshaling->y, sizeof(uint32_t));
-    vkStream->read((uint32_t*)&forUnmarshaling->z, sizeof(uint32_t));
-}
-
-void marshal_VkDrawIndexedIndirectCommand(
-    VulkanStreamGuest* vkStream,
-    const VkDrawIndexedIndirectCommand* forMarshaling)
-{
-    vkStream->write((uint32_t*)&forMarshaling->indexCount, sizeof(uint32_t));
-    vkStream->write((uint32_t*)&forMarshaling->instanceCount, sizeof(uint32_t));
-    vkStream->write((uint32_t*)&forMarshaling->firstIndex, sizeof(uint32_t));
-    vkStream->write((int32_t*)&forMarshaling->vertexOffset, sizeof(int32_t));
-    vkStream->write((uint32_t*)&forMarshaling->firstInstance, sizeof(uint32_t));
-}
-
-void unmarshal_VkDrawIndexedIndirectCommand(
-    VulkanStreamGuest* vkStream,
-    VkDrawIndexedIndirectCommand* forUnmarshaling)
-{
-    vkStream->read((uint32_t*)&forUnmarshaling->indexCount, sizeof(uint32_t));
-    vkStream->read((uint32_t*)&forUnmarshaling->instanceCount, sizeof(uint32_t));
-    vkStream->read((uint32_t*)&forUnmarshaling->firstIndex, sizeof(uint32_t));
-    vkStream->read((int32_t*)&forUnmarshaling->vertexOffset, sizeof(int32_t));
-    vkStream->read((uint32_t*)&forUnmarshaling->firstInstance, sizeof(uint32_t));
-}
-
-void marshal_VkDrawIndirectCommand(
-    VulkanStreamGuest* vkStream,
-    const VkDrawIndirectCommand* forMarshaling)
-{
-    vkStream->write((uint32_t*)&forMarshaling->vertexCount, sizeof(uint32_t));
-    vkStream->write((uint32_t*)&forMarshaling->instanceCount, sizeof(uint32_t));
-    vkStream->write((uint32_t*)&forMarshaling->firstVertex, sizeof(uint32_t));
-    vkStream->write((uint32_t*)&forMarshaling->firstInstance, sizeof(uint32_t));
-}
-
-void unmarshal_VkDrawIndirectCommand(
-    VulkanStreamGuest* vkStream,
-    VkDrawIndirectCommand* forUnmarshaling)
-{
-    vkStream->read((uint32_t*)&forUnmarshaling->vertexCount, sizeof(uint32_t));
-    vkStream->read((uint32_t*)&forUnmarshaling->instanceCount, sizeof(uint32_t));
-    vkStream->read((uint32_t*)&forUnmarshaling->firstVertex, sizeof(uint32_t));
-    vkStream->read((uint32_t*)&forUnmarshaling->firstInstance, sizeof(uint32_t));
-}
-
-void marshal_VkBaseOutStructure(
-    VulkanStreamGuest* vkStream,
-    const VkBaseOutStructure* forMarshaling)
-{
-    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
-    {
-        vkStream->write((VkBaseOutStructure*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
-    }
-}
-
-void unmarshal_VkBaseOutStructure(
-    VulkanStreamGuest* vkStream,
-    VkBaseOutStructure* forUnmarshaling)
-{
-    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
-    {
-        uint64_t pNext_placeholder;
-        vkStream->read((VkBaseOutStructure*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
-    }
-}
-
-void marshal_VkBaseInStructure(
-    VulkanStreamGuest* vkStream,
-    const VkBaseInStructure* forMarshaling)
-{
-    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
-    {
-        vkStream->write((const VkBaseInStructure*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
-    }
-}
-
-void unmarshal_VkBaseInStructure(
-    VulkanStreamGuest* vkStream,
-    VkBaseInStructure* forUnmarshaling)
-{
-    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
-    {
-        uint64_t pNext_placeholder;
-        vkStream->read((VkBaseInStructure*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
-    }
-}
-
 #endif
 #ifdef VK_VERSION_1_1
 void marshal_VkPhysicalDeviceSubgroupProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceSubgroupProperties* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((uint32_t*)&forMarshaling->subgroupSize, sizeof(uint32_t));
     vkStream->write((VkShaderStageFlags*)&forMarshaling->supportedStages, sizeof(VkShaderStageFlags));
     vkStream->write((VkSubgroupFeatureFlags*)&forMarshaling->supportedOperations, sizeof(VkSubgroupFeatureFlags));
@@ -4529,17 +4878,16 @@
 
 void unmarshal_VkPhysicalDeviceSubgroupProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceSubgroupProperties* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((uint32_t*)&forUnmarshaling->subgroupSize, sizeof(uint32_t));
     vkStream->read((VkShaderStageFlags*)&forUnmarshaling->supportedStages, sizeof(VkShaderStageFlags));
     vkStream->read((VkSubgroupFeatureFlags*)&forUnmarshaling->supportedOperations, sizeof(VkSubgroupFeatureFlags));
@@ -4548,102 +4896,100 @@
 
 void marshal_VkBindBufferMemoryInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkBindBufferMemoryInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    uint64_t cgen_var_158;
-    vkStream->handleMapping()->mapHandles_VkBuffer_u64(&forMarshaling->buffer, &cgen_var_158, 1);
-    vkStream->write((uint64_t*)&cgen_var_158, 1 * 8);
-    uint64_t cgen_var_159;
-    vkStream->handleMapping()->mapHandles_VkDeviceMemory_u64(&forMarshaling->memory, &cgen_var_159, 1);
-    vkStream->write((uint64_t*)&cgen_var_159, 1 * 8);
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkBuffer_u64(&forMarshaling->buffer, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
+    uint64_t cgen_var_1;
+    vkStream->handleMapping()->mapHandles_VkDeviceMemory_u64(&forMarshaling->memory, &cgen_var_1, 1);
+    vkStream->write((uint64_t*)&cgen_var_1, 1 * 8);
     vkStream->write((VkDeviceSize*)&forMarshaling->memoryOffset, sizeof(VkDeviceSize));
 }
 
 void unmarshal_VkBindBufferMemoryInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkBindBufferMemoryInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    uint64_t cgen_var_160;
-    vkStream->read((uint64_t*)&cgen_var_160, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkBuffer(&cgen_var_160, (VkBuffer*)&forUnmarshaling->buffer, 1);
-    uint64_t cgen_var_161;
-    vkStream->read((uint64_t*)&cgen_var_161, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkDeviceMemory(&cgen_var_161, (VkDeviceMemory*)&forUnmarshaling->memory, 1);
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkBuffer(&cgen_var_0, (VkBuffer*)&forUnmarshaling->buffer, 1);
+    uint64_t cgen_var_1;
+    vkStream->read((uint64_t*)&cgen_var_1, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkDeviceMemory(&cgen_var_1, (VkDeviceMemory*)&forUnmarshaling->memory, 1);
     vkStream->read((VkDeviceSize*)&forUnmarshaling->memoryOffset, sizeof(VkDeviceSize));
 }
 
 void marshal_VkBindImageMemoryInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkBindImageMemoryInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    uint64_t cgen_var_162;
-    vkStream->handleMapping()->mapHandles_VkImage_u64(&forMarshaling->image, &cgen_var_162, 1);
-    vkStream->write((uint64_t*)&cgen_var_162, 1 * 8);
-    uint64_t cgen_var_163;
-    vkStream->handleMapping()->mapHandles_VkDeviceMemory_u64(&forMarshaling->memory, &cgen_var_163, 1);
-    vkStream->write((uint64_t*)&cgen_var_163, 1 * 8);
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkImage_u64(&forMarshaling->image, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
+    uint64_t cgen_var_1;
+    vkStream->handleMapping()->mapHandles_VkDeviceMemory_u64(&forMarshaling->memory, &cgen_var_1, 1);
+    vkStream->write((uint64_t*)&cgen_var_1, 1 * 8);
     vkStream->write((VkDeviceSize*)&forMarshaling->memoryOffset, sizeof(VkDeviceSize));
 }
 
 void unmarshal_VkBindImageMemoryInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkBindImageMemoryInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    uint64_t cgen_var_164;
-    vkStream->read((uint64_t*)&cgen_var_164, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkImage(&cgen_var_164, (VkImage*)&forUnmarshaling->image, 1);
-    uint64_t cgen_var_165;
-    vkStream->read((uint64_t*)&cgen_var_165, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkDeviceMemory(&cgen_var_165, (VkDeviceMemory*)&forUnmarshaling->memory, 1);
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkImage(&cgen_var_0, (VkImage*)&forUnmarshaling->image, 1);
+    uint64_t cgen_var_1;
+    vkStream->read((uint64_t*)&cgen_var_1, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkDeviceMemory(&cgen_var_1, (VkDeviceMemory*)&forUnmarshaling->memory, 1);
     vkStream->read((VkDeviceSize*)&forUnmarshaling->memoryOffset, sizeof(VkDeviceSize));
 }
 
 void marshal_VkPhysicalDevice16BitStorageFeatures(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDevice16BitStorageFeatures* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkBool32*)&forMarshaling->storageBuffer16BitAccess, sizeof(VkBool32));
     vkStream->write((VkBool32*)&forMarshaling->uniformAndStorageBuffer16BitAccess, sizeof(VkBool32));
     vkStream->write((VkBool32*)&forMarshaling->storagePushConstant16, sizeof(VkBool32));
@@ -4652,17 +4998,16 @@
 
 void unmarshal_VkPhysicalDevice16BitStorageFeatures(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDevice16BitStorageFeatures* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkBool32*)&forUnmarshaling->storageBuffer16BitAccess, sizeof(VkBool32));
     vkStream->read((VkBool32*)&forUnmarshaling->uniformAndStorageBuffer16BitAccess, sizeof(VkBool32));
     vkStream->read((VkBool32*)&forUnmarshaling->storagePushConstant16, sizeof(VkBool32));
@@ -4671,195 +5016,196 @@
 
 void marshal_VkMemoryDedicatedRequirements(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkMemoryDedicatedRequirements* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkBool32*)&forMarshaling->prefersDedicatedAllocation, sizeof(VkBool32));
     vkStream->write((VkBool32*)&forMarshaling->requiresDedicatedAllocation, sizeof(VkBool32));
 }
 
 void unmarshal_VkMemoryDedicatedRequirements(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkMemoryDedicatedRequirements* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkBool32*)&forUnmarshaling->prefersDedicatedAllocation, sizeof(VkBool32));
     vkStream->read((VkBool32*)&forUnmarshaling->requiresDedicatedAllocation, sizeof(VkBool32));
 }
 
 void marshal_VkMemoryDedicatedAllocateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkMemoryDedicatedAllocateInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    uint64_t cgen_var_166;
-    vkStream->handleMapping()->mapHandles_VkImage_u64(&forMarshaling->image, &cgen_var_166, 1);
-    vkStream->write((uint64_t*)&cgen_var_166, 1 * 8);
-    uint64_t cgen_var_167;
-    vkStream->handleMapping()->mapHandles_VkBuffer_u64(&forMarshaling->buffer, &cgen_var_167, 1);
-    vkStream->write((uint64_t*)&cgen_var_167, 1 * 8);
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkImage_u64(&forMarshaling->image, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
+    uint64_t cgen_var_1;
+    vkStream->handleMapping()->mapHandles_VkBuffer_u64(&forMarshaling->buffer, &cgen_var_1, 1);
+    vkStream->write((uint64_t*)&cgen_var_1, 1 * 8);
 }
 
 void unmarshal_VkMemoryDedicatedAllocateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkMemoryDedicatedAllocateInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    uint64_t cgen_var_168;
-    vkStream->read((uint64_t*)&cgen_var_168, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkImage(&cgen_var_168, (VkImage*)&forUnmarshaling->image, 1);
-    uint64_t cgen_var_169;
-    vkStream->read((uint64_t*)&cgen_var_169, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkBuffer(&cgen_var_169, (VkBuffer*)&forUnmarshaling->buffer, 1);
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkImage(&cgen_var_0, (VkImage*)&forUnmarshaling->image, 1);
+    uint64_t cgen_var_1;
+    vkStream->read((uint64_t*)&cgen_var_1, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkBuffer(&cgen_var_1, (VkBuffer*)&forUnmarshaling->buffer, 1);
 }
 
 void marshal_VkMemoryAllocateFlagsInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkMemoryAllocateFlagsInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkMemoryAllocateFlags*)&forMarshaling->flags, sizeof(VkMemoryAllocateFlags));
     vkStream->write((uint32_t*)&forMarshaling->deviceMask, sizeof(uint32_t));
 }
 
 void unmarshal_VkMemoryAllocateFlagsInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkMemoryAllocateFlagsInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkMemoryAllocateFlags*)&forUnmarshaling->flags, sizeof(VkMemoryAllocateFlags));
     vkStream->read((uint32_t*)&forUnmarshaling->deviceMask, sizeof(uint32_t));
 }
 
 void marshal_VkDeviceGroupRenderPassBeginInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDeviceGroupRenderPassBeginInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((uint32_t*)&forMarshaling->deviceMask, sizeof(uint32_t));
     vkStream->write((uint32_t*)&forMarshaling->deviceRenderAreaCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forMarshaling->deviceRenderAreaCount; ++i)
+    if (forMarshaling)
     {
-        marshal_VkRect2D(vkStream, (const VkRect2D*)(forMarshaling->pDeviceRenderAreas + i));
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->deviceRenderAreaCount; ++i)
+        {
+            marshal_VkRect2D(vkStream, rootType, (const VkRect2D*)(forMarshaling->pDeviceRenderAreas + i));
+        }
     }
 }
 
 void unmarshal_VkDeviceGroupRenderPassBeginInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDeviceGroupRenderPassBeginInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((uint32_t*)&forUnmarshaling->deviceMask, sizeof(uint32_t));
     vkStream->read((uint32_t*)&forUnmarshaling->deviceRenderAreaCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->deviceRenderAreaCount; ++i)
+    if (forUnmarshaling)
     {
-        unmarshal_VkRect2D(vkStream, (VkRect2D*)(forUnmarshaling->pDeviceRenderAreas + i));
+        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->deviceRenderAreaCount; ++i)
+        {
+            unmarshal_VkRect2D(vkStream, rootType, (VkRect2D*)(forUnmarshaling->pDeviceRenderAreas + i));
+        }
     }
 }
 
 void marshal_VkDeviceGroupCommandBufferBeginInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDeviceGroupCommandBufferBeginInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((uint32_t*)&forMarshaling->deviceMask, sizeof(uint32_t));
 }
 
 void unmarshal_VkDeviceGroupCommandBufferBeginInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDeviceGroupCommandBufferBeginInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((uint32_t*)&forUnmarshaling->deviceMask, sizeof(uint32_t));
 }
 
 void marshal_VkDeviceGroupSubmitInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDeviceGroupSubmitInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((uint32_t*)&forMarshaling->waitSemaphoreCount, sizeof(uint32_t));
     vkStream->write((const uint32_t*)forMarshaling->pWaitSemaphoreDeviceIndices, forMarshaling->waitSemaphoreCount * sizeof(const uint32_t));
     vkStream->write((uint32_t*)&forMarshaling->commandBufferCount, sizeof(uint32_t));
@@ -4870,17 +5216,16 @@
 
 void unmarshal_VkDeviceGroupSubmitInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDeviceGroupSubmitInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((uint32_t*)&forUnmarshaling->waitSemaphoreCount, sizeof(uint32_t));
     vkStream->read((uint32_t*)forUnmarshaling->pWaitSemaphoreDeviceIndices, forUnmarshaling->waitSemaphoreCount * sizeof(const uint32_t));
     vkStream->read((uint32_t*)&forUnmarshaling->commandBufferCount, sizeof(uint32_t));
@@ -4891,125 +5236,128 @@
 
 void marshal_VkDeviceGroupBindSparseInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDeviceGroupBindSparseInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((uint32_t*)&forMarshaling->resourceDeviceIndex, sizeof(uint32_t));
     vkStream->write((uint32_t*)&forMarshaling->memoryDeviceIndex, sizeof(uint32_t));
 }
 
 void unmarshal_VkDeviceGroupBindSparseInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDeviceGroupBindSparseInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((uint32_t*)&forUnmarshaling->resourceDeviceIndex, sizeof(uint32_t));
     vkStream->read((uint32_t*)&forUnmarshaling->memoryDeviceIndex, sizeof(uint32_t));
 }
 
 void marshal_VkBindBufferMemoryDeviceGroupInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkBindBufferMemoryDeviceGroupInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((uint32_t*)&forMarshaling->deviceIndexCount, sizeof(uint32_t));
     vkStream->write((const uint32_t*)forMarshaling->pDeviceIndices, forMarshaling->deviceIndexCount * sizeof(const uint32_t));
 }
 
 void unmarshal_VkBindBufferMemoryDeviceGroupInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkBindBufferMemoryDeviceGroupInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((uint32_t*)&forUnmarshaling->deviceIndexCount, sizeof(uint32_t));
     vkStream->read((uint32_t*)forUnmarshaling->pDeviceIndices, forUnmarshaling->deviceIndexCount * sizeof(const uint32_t));
 }
 
 void marshal_VkBindImageMemoryDeviceGroupInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkBindImageMemoryDeviceGroupInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((uint32_t*)&forMarshaling->deviceIndexCount, sizeof(uint32_t));
     vkStream->write((const uint32_t*)forMarshaling->pDeviceIndices, forMarshaling->deviceIndexCount * sizeof(const uint32_t));
     vkStream->write((uint32_t*)&forMarshaling->splitInstanceBindRegionCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forMarshaling->splitInstanceBindRegionCount; ++i)
+    if (forMarshaling)
     {
-        marshal_VkRect2D(vkStream, (const VkRect2D*)(forMarshaling->pSplitInstanceBindRegions + i));
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->splitInstanceBindRegionCount; ++i)
+        {
+            marshal_VkRect2D(vkStream, rootType, (const VkRect2D*)(forMarshaling->pSplitInstanceBindRegions + i));
+        }
     }
 }
 
 void unmarshal_VkBindImageMemoryDeviceGroupInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkBindImageMemoryDeviceGroupInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((uint32_t*)&forUnmarshaling->deviceIndexCount, sizeof(uint32_t));
     vkStream->read((uint32_t*)forUnmarshaling->pDeviceIndices, forUnmarshaling->deviceIndexCount * sizeof(const uint32_t));
     vkStream->read((uint32_t*)&forUnmarshaling->splitInstanceBindRegionCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->splitInstanceBindRegionCount; ++i)
+    if (forUnmarshaling)
     {
-        unmarshal_VkRect2D(vkStream, (VkRect2D*)(forUnmarshaling->pSplitInstanceBindRegions + i));
+        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->splitInstanceBindRegionCount; ++i)
+        {
+            unmarshal_VkRect2D(vkStream, rootType, (VkRect2D*)(forUnmarshaling->pSplitInstanceBindRegions + i));
+        }
     }
 }
 
 void marshal_VkPhysicalDeviceGroupProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceGroupProperties* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((uint32_t*)&forMarshaling->physicalDeviceCount, sizeof(uint32_t));
     vkStream->write((VkPhysicalDevice*)forMarshaling->physicalDevices, VK_MAX_DEVICE_GROUP_SIZE * sizeof(VkPhysicalDevice));
     vkStream->write((VkBool32*)&forMarshaling->subsetAllocation, sizeof(VkBool32));
@@ -5017,17 +5365,16 @@
 
 void unmarshal_VkPhysicalDeviceGroupProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceGroupProperties* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((uint32_t*)&forUnmarshaling->physicalDeviceCount, sizeof(uint32_t));
     vkStream->read((VkPhysicalDevice*)forUnmarshaling->physicalDevices, VK_MAX_DEVICE_GROUP_SIZE * sizeof(VkPhysicalDevice));
     vkStream->read((VkBool32*)&forUnmarshaling->subsetAllocation, sizeof(VkBool32));
@@ -5035,352 +5382,342 @@
 
 void marshal_VkDeviceGroupDeviceCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDeviceGroupDeviceCreateInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((uint32_t*)&forMarshaling->physicalDeviceCount, sizeof(uint32_t));
     if (forMarshaling->physicalDeviceCount)
     {
-        uint64_t* cgen_var_170;
-        vkStream->alloc((void**)&cgen_var_170, forMarshaling->physicalDeviceCount * 8);
-        vkStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(forMarshaling->pPhysicalDevices, cgen_var_170, forMarshaling->physicalDeviceCount);
-        vkStream->write((uint64_t*)cgen_var_170, forMarshaling->physicalDeviceCount * 8);
+        uint64_t* cgen_var_0;
+        vkStream->alloc((void**)&cgen_var_0, forMarshaling->physicalDeviceCount * 8);
+        vkStream->handleMapping()->mapHandles_VkPhysicalDevice_u64(forMarshaling->pPhysicalDevices, cgen_var_0, forMarshaling->physicalDeviceCount);
+        vkStream->write((uint64_t*)cgen_var_0, forMarshaling->physicalDeviceCount * 8);
     }
 }
 
 void unmarshal_VkDeviceGroupDeviceCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDeviceGroupDeviceCreateInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((uint32_t*)&forUnmarshaling->physicalDeviceCount, sizeof(uint32_t));
     if (forUnmarshaling->physicalDeviceCount)
     {
-        uint64_t* cgen_var_171;
-        vkStream->alloc((void**)&cgen_var_171, forUnmarshaling->physicalDeviceCount * 8);
-        vkStream->read((uint64_t*)cgen_var_171, forUnmarshaling->physicalDeviceCount * 8);
-        vkStream->handleMapping()->mapHandles_u64_VkPhysicalDevice(cgen_var_171, (VkPhysicalDevice*)forUnmarshaling->pPhysicalDevices, forUnmarshaling->physicalDeviceCount);
+        uint64_t* cgen_var_0;
+        vkStream->alloc((void**)&cgen_var_0, forUnmarshaling->physicalDeviceCount * 8);
+        vkStream->read((uint64_t*)cgen_var_0, forUnmarshaling->physicalDeviceCount * 8);
+        vkStream->handleMapping()->mapHandles_u64_VkPhysicalDevice(cgen_var_0, (VkPhysicalDevice*)forUnmarshaling->pPhysicalDevices, forUnmarshaling->physicalDeviceCount);
     }
 }
 
 void marshal_VkBufferMemoryRequirementsInfo2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkBufferMemoryRequirementsInfo2* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    uint64_t cgen_var_172;
-    vkStream->handleMapping()->mapHandles_VkBuffer_u64(&forMarshaling->buffer, &cgen_var_172, 1);
-    vkStream->write((uint64_t*)&cgen_var_172, 1 * 8);
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkBuffer_u64(&forMarshaling->buffer, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
 }
 
 void unmarshal_VkBufferMemoryRequirementsInfo2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkBufferMemoryRequirementsInfo2* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    uint64_t cgen_var_173;
-    vkStream->read((uint64_t*)&cgen_var_173, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkBuffer(&cgen_var_173, (VkBuffer*)&forUnmarshaling->buffer, 1);
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkBuffer(&cgen_var_0, (VkBuffer*)&forUnmarshaling->buffer, 1);
 }
 
 void marshal_VkImageMemoryRequirementsInfo2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkImageMemoryRequirementsInfo2* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    uint64_t cgen_var_174;
-    vkStream->handleMapping()->mapHandles_VkImage_u64(&forMarshaling->image, &cgen_var_174, 1);
-    vkStream->write((uint64_t*)&cgen_var_174, 1 * 8);
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkImage_u64(&forMarshaling->image, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
 }
 
 void unmarshal_VkImageMemoryRequirementsInfo2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkImageMemoryRequirementsInfo2* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    uint64_t cgen_var_175;
-    vkStream->read((uint64_t*)&cgen_var_175, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkImage(&cgen_var_175, (VkImage*)&forUnmarshaling->image, 1);
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkImage(&cgen_var_0, (VkImage*)&forUnmarshaling->image, 1);
 }
 
 void marshal_VkImageSparseMemoryRequirementsInfo2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkImageSparseMemoryRequirementsInfo2* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    uint64_t cgen_var_176;
-    vkStream->handleMapping()->mapHandles_VkImage_u64(&forMarshaling->image, &cgen_var_176, 1);
-    vkStream->write((uint64_t*)&cgen_var_176, 1 * 8);
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkImage_u64(&forMarshaling->image, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
 }
 
 void unmarshal_VkImageSparseMemoryRequirementsInfo2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkImageSparseMemoryRequirementsInfo2* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    uint64_t cgen_var_177;
-    vkStream->read((uint64_t*)&cgen_var_177, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkImage(&cgen_var_177, (VkImage*)&forUnmarshaling->image, 1);
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkImage(&cgen_var_0, (VkImage*)&forUnmarshaling->image, 1);
 }
 
 void marshal_VkMemoryRequirements2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkMemoryRequirements2* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    marshal_VkMemoryRequirements(vkStream, (VkMemoryRequirements*)(&forMarshaling->memoryRequirements));
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    marshal_VkMemoryRequirements(vkStream, rootType, (VkMemoryRequirements*)(&forMarshaling->memoryRequirements));
 }
 
 void unmarshal_VkMemoryRequirements2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkMemoryRequirements2* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    unmarshal_VkMemoryRequirements(vkStream, (VkMemoryRequirements*)(&forUnmarshaling->memoryRequirements));
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    unmarshal_VkMemoryRequirements(vkStream, rootType, (VkMemoryRequirements*)(&forUnmarshaling->memoryRequirements));
 }
 
 void marshal_VkSparseImageMemoryRequirements2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSparseImageMemoryRequirements2* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    marshal_VkSparseImageMemoryRequirements(vkStream, (VkSparseImageMemoryRequirements*)(&forMarshaling->memoryRequirements));
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    marshal_VkSparseImageMemoryRequirements(vkStream, rootType, (VkSparseImageMemoryRequirements*)(&forMarshaling->memoryRequirements));
 }
 
 void unmarshal_VkSparseImageMemoryRequirements2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSparseImageMemoryRequirements2* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    unmarshal_VkSparseImageMemoryRequirements(vkStream, (VkSparseImageMemoryRequirements*)(&forUnmarshaling->memoryRequirements));
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    unmarshal_VkSparseImageMemoryRequirements(vkStream, rootType, (VkSparseImageMemoryRequirements*)(&forUnmarshaling->memoryRequirements));
 }
 
 void marshal_VkPhysicalDeviceFeatures2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceFeatures2* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    marshal_VkPhysicalDeviceFeatures(vkStream, (VkPhysicalDeviceFeatures*)(&forMarshaling->features));
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    marshal_VkPhysicalDeviceFeatures(vkStream, rootType, (VkPhysicalDeviceFeatures*)(&forMarshaling->features));
 }
 
 void unmarshal_VkPhysicalDeviceFeatures2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceFeatures2* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    unmarshal_VkPhysicalDeviceFeatures(vkStream, (VkPhysicalDeviceFeatures*)(&forUnmarshaling->features));
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    unmarshal_VkPhysicalDeviceFeatures(vkStream, rootType, (VkPhysicalDeviceFeatures*)(&forUnmarshaling->features));
 }
 
 void marshal_VkPhysicalDeviceProperties2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceProperties2* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    marshal_VkPhysicalDeviceProperties(vkStream, (VkPhysicalDeviceProperties*)(&forMarshaling->properties));
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    marshal_VkPhysicalDeviceProperties(vkStream, rootType, (VkPhysicalDeviceProperties*)(&forMarshaling->properties));
 }
 
 void unmarshal_VkPhysicalDeviceProperties2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceProperties2* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    unmarshal_VkPhysicalDeviceProperties(vkStream, (VkPhysicalDeviceProperties*)(&forUnmarshaling->properties));
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    unmarshal_VkPhysicalDeviceProperties(vkStream, rootType, (VkPhysicalDeviceProperties*)(&forUnmarshaling->properties));
 }
 
 void marshal_VkFormatProperties2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkFormatProperties2* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    marshal_VkFormatProperties(vkStream, (VkFormatProperties*)(&forMarshaling->formatProperties));
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    marshal_VkFormatProperties(vkStream, rootType, (VkFormatProperties*)(&forMarshaling->formatProperties));
 }
 
 void unmarshal_VkFormatProperties2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkFormatProperties2* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    unmarshal_VkFormatProperties(vkStream, (VkFormatProperties*)(&forUnmarshaling->formatProperties));
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    unmarshal_VkFormatProperties(vkStream, rootType, (VkFormatProperties*)(&forUnmarshaling->formatProperties));
 }
 
 void marshal_VkImageFormatProperties2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkImageFormatProperties2* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    marshal_VkImageFormatProperties(vkStream, (VkImageFormatProperties*)(&forMarshaling->imageFormatProperties));
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    marshal_VkImageFormatProperties(vkStream, rootType, (VkImageFormatProperties*)(&forMarshaling->imageFormatProperties));
 }
 
 void unmarshal_VkImageFormatProperties2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkImageFormatProperties2* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    unmarshal_VkImageFormatProperties(vkStream, (VkImageFormatProperties*)(&forUnmarshaling->imageFormatProperties));
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    unmarshal_VkImageFormatProperties(vkStream, rootType, (VkImageFormatProperties*)(&forUnmarshaling->imageFormatProperties));
 }
 
 void marshal_VkPhysicalDeviceImageFormatInfo2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceImageFormatInfo2* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkFormat*)&forMarshaling->format, sizeof(VkFormat));
     vkStream->write((VkImageType*)&forMarshaling->type, sizeof(VkImageType));
     vkStream->write((VkImageTiling*)&forMarshaling->tiling, sizeof(VkImageTiling));
@@ -5390,17 +5727,16 @@
 
 void unmarshal_VkPhysicalDeviceImageFormatInfo2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceImageFormatInfo2* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkFormat*)&forUnmarshaling->format, sizeof(VkFormat));
     vkStream->read((VkImageType*)&forUnmarshaling->type, sizeof(VkImageType));
     vkStream->read((VkImageTiling*)&forUnmarshaling->tiling, sizeof(VkImageTiling));
@@ -5410,109 +5746,106 @@
 
 void marshal_VkQueueFamilyProperties2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkQueueFamilyProperties2* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    marshal_VkQueueFamilyProperties(vkStream, (VkQueueFamilyProperties*)(&forMarshaling->queueFamilyProperties));
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    marshal_VkQueueFamilyProperties(vkStream, rootType, (VkQueueFamilyProperties*)(&forMarshaling->queueFamilyProperties));
 }
 
 void unmarshal_VkQueueFamilyProperties2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkQueueFamilyProperties2* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    unmarshal_VkQueueFamilyProperties(vkStream, (VkQueueFamilyProperties*)(&forUnmarshaling->queueFamilyProperties));
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    unmarshal_VkQueueFamilyProperties(vkStream, rootType, (VkQueueFamilyProperties*)(&forUnmarshaling->queueFamilyProperties));
 }
 
 void marshal_VkPhysicalDeviceMemoryProperties2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceMemoryProperties2* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    marshal_VkPhysicalDeviceMemoryProperties(vkStream, (VkPhysicalDeviceMemoryProperties*)(&forMarshaling->memoryProperties));
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    marshal_VkPhysicalDeviceMemoryProperties(vkStream, rootType, (VkPhysicalDeviceMemoryProperties*)(&forMarshaling->memoryProperties));
 }
 
 void unmarshal_VkPhysicalDeviceMemoryProperties2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceMemoryProperties2* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    unmarshal_VkPhysicalDeviceMemoryProperties(vkStream, (VkPhysicalDeviceMemoryProperties*)(&forUnmarshaling->memoryProperties));
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    unmarshal_VkPhysicalDeviceMemoryProperties(vkStream, rootType, (VkPhysicalDeviceMemoryProperties*)(&forUnmarshaling->memoryProperties));
 }
 
 void marshal_VkSparseImageFormatProperties2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSparseImageFormatProperties2* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    marshal_VkSparseImageFormatProperties(vkStream, (VkSparseImageFormatProperties*)(&forMarshaling->properties));
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    marshal_VkSparseImageFormatProperties(vkStream, rootType, (VkSparseImageFormatProperties*)(&forMarshaling->properties));
 }
 
 void unmarshal_VkSparseImageFormatProperties2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSparseImageFormatProperties2* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    unmarshal_VkSparseImageFormatProperties(vkStream, (VkSparseImageFormatProperties*)(&forUnmarshaling->properties));
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    unmarshal_VkSparseImageFormatProperties(vkStream, rootType, (VkSparseImageFormatProperties*)(&forUnmarshaling->properties));
 }
 
 void marshal_VkPhysicalDeviceSparseImageFormatInfo2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceSparseImageFormatInfo2* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkFormat*)&forMarshaling->format, sizeof(VkFormat));
     vkStream->write((VkImageType*)&forMarshaling->type, sizeof(VkImageType));
     vkStream->write((VkSampleCountFlagBits*)&forMarshaling->samples, sizeof(VkSampleCountFlagBits));
@@ -5522,17 +5855,16 @@
 
 void unmarshal_VkPhysicalDeviceSparseImageFormatInfo2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceSparseImageFormatInfo2* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkFormat*)&forUnmarshaling->format, sizeof(VkFormat));
     vkStream->read((VkImageType*)&forUnmarshaling->type, sizeof(VkImageType));
     vkStream->read((VkSampleCountFlagBits*)&forUnmarshaling->samples, sizeof(VkSampleCountFlagBits));
@@ -5542,39 +5874,40 @@
 
 void marshal_VkPhysicalDevicePointClippingProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDevicePointClippingProperties* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkPointClippingBehavior*)&forMarshaling->pointClippingBehavior, sizeof(VkPointClippingBehavior));
 }
 
 void unmarshal_VkPhysicalDevicePointClippingProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDevicePointClippingProperties* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkPointClippingBehavior*)&forUnmarshaling->pointClippingBehavior, sizeof(VkPointClippingBehavior));
 }
 
 void marshal_VkInputAttachmentAspectReference(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkInputAttachmentAspectReference* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((uint32_t*)&forMarshaling->subpass, sizeof(uint32_t));
     vkStream->write((uint32_t*)&forMarshaling->inputAttachmentIndex, sizeof(uint32_t));
     vkStream->write((VkImageAspectFlags*)&forMarshaling->aspectMask, sizeof(VkImageAspectFlags));
@@ -5582,8 +5915,10 @@
 
 void unmarshal_VkInputAttachmentAspectReference(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkInputAttachmentAspectReference* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((uint32_t*)&forUnmarshaling->subpass, sizeof(uint32_t));
     vkStream->read((uint32_t*)&forUnmarshaling->inputAttachmentIndex, sizeof(uint32_t));
     vkStream->read((VkImageAspectFlags*)&forUnmarshaling->aspectMask, sizeof(VkImageAspectFlags));
@@ -5591,117 +5926,120 @@
 
 void marshal_VkRenderPassInputAttachmentAspectCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkRenderPassInputAttachmentAspectCreateInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((uint32_t*)&forMarshaling->aspectReferenceCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forMarshaling->aspectReferenceCount; ++i)
+    if (forMarshaling)
     {
-        marshal_VkInputAttachmentAspectReference(vkStream, (const VkInputAttachmentAspectReference*)(forMarshaling->pAspectReferences + i));
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->aspectReferenceCount; ++i)
+        {
+            marshal_VkInputAttachmentAspectReference(vkStream, rootType, (const VkInputAttachmentAspectReference*)(forMarshaling->pAspectReferences + i));
+        }
     }
 }
 
 void unmarshal_VkRenderPassInputAttachmentAspectCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkRenderPassInputAttachmentAspectCreateInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((uint32_t*)&forUnmarshaling->aspectReferenceCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->aspectReferenceCount; ++i)
+    if (forUnmarshaling)
     {
-        unmarshal_VkInputAttachmentAspectReference(vkStream, (VkInputAttachmentAspectReference*)(forUnmarshaling->pAspectReferences + i));
+        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->aspectReferenceCount; ++i)
+        {
+            unmarshal_VkInputAttachmentAspectReference(vkStream, rootType, (VkInputAttachmentAspectReference*)(forUnmarshaling->pAspectReferences + i));
+        }
     }
 }
 
 void marshal_VkImageViewUsageCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkImageViewUsageCreateInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkImageUsageFlags*)&forMarshaling->usage, sizeof(VkImageUsageFlags));
 }
 
 void unmarshal_VkImageViewUsageCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkImageViewUsageCreateInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkImageUsageFlags*)&forUnmarshaling->usage, sizeof(VkImageUsageFlags));
 }
 
 void marshal_VkPipelineTessellationDomainOriginStateCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPipelineTessellationDomainOriginStateCreateInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkTessellationDomainOrigin*)&forMarshaling->domainOrigin, sizeof(VkTessellationDomainOrigin));
 }
 
 void unmarshal_VkPipelineTessellationDomainOriginStateCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPipelineTessellationDomainOriginStateCreateInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkTessellationDomainOrigin*)&forUnmarshaling->domainOrigin, sizeof(VkTessellationDomainOrigin));
 }
 
 void marshal_VkRenderPassMultiviewCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkRenderPassMultiviewCreateInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((uint32_t*)&forMarshaling->subpassCount, sizeof(uint32_t));
     vkStream->write((const uint32_t*)forMarshaling->pViewMasks, forMarshaling->subpassCount * sizeof(const uint32_t));
     vkStream->write((uint32_t*)&forMarshaling->dependencyCount, sizeof(uint32_t));
@@ -5712,17 +6050,16 @@
 
 void unmarshal_VkRenderPassMultiviewCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkRenderPassMultiviewCreateInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((uint32_t*)&forUnmarshaling->subpassCount, sizeof(uint32_t));
     vkStream->read((uint32_t*)forUnmarshaling->pViewMasks, forUnmarshaling->subpassCount * sizeof(const uint32_t));
     vkStream->read((uint32_t*)&forUnmarshaling->dependencyCount, sizeof(uint32_t));
@@ -5733,16 +6070,16 @@
 
 void marshal_VkPhysicalDeviceMultiviewFeatures(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceMultiviewFeatures* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkBool32*)&forMarshaling->multiview, sizeof(VkBool32));
     vkStream->write((VkBool32*)&forMarshaling->multiviewGeometryShader, sizeof(VkBool32));
     vkStream->write((VkBool32*)&forMarshaling->multiviewTessellationShader, sizeof(VkBool32));
@@ -5750,17 +6087,16 @@
 
 void unmarshal_VkPhysicalDeviceMultiviewFeatures(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceMultiviewFeatures* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkBool32*)&forUnmarshaling->multiview, sizeof(VkBool32));
     vkStream->read((VkBool32*)&forUnmarshaling->multiviewGeometryShader, sizeof(VkBool32));
     vkStream->read((VkBool32*)&forUnmarshaling->multiviewTessellationShader, sizeof(VkBool32));
@@ -5768,144 +6104,140 @@
 
 void marshal_VkPhysicalDeviceMultiviewProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceMultiviewProperties* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((uint32_t*)&forMarshaling->maxMultiviewViewCount, sizeof(uint32_t));
     vkStream->write((uint32_t*)&forMarshaling->maxMultiviewInstanceIndex, sizeof(uint32_t));
 }
 
 void unmarshal_VkPhysicalDeviceMultiviewProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceMultiviewProperties* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((uint32_t*)&forUnmarshaling->maxMultiviewViewCount, sizeof(uint32_t));
     vkStream->read((uint32_t*)&forUnmarshaling->maxMultiviewInstanceIndex, sizeof(uint32_t));
 }
 
-void marshal_VkPhysicalDeviceVariablePointerFeatures(
+void marshal_VkPhysicalDeviceVariablePointersFeatures(
     VulkanStreamGuest* vkStream,
-    const VkPhysicalDeviceVariablePointerFeatures* forMarshaling)
+    VkStructureType rootType,
+    const VkPhysicalDeviceVariablePointersFeatures* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkBool32*)&forMarshaling->variablePointersStorageBuffer, sizeof(VkBool32));
     vkStream->write((VkBool32*)&forMarshaling->variablePointers, sizeof(VkBool32));
 }
 
-void unmarshal_VkPhysicalDeviceVariablePointerFeatures(
+void unmarshal_VkPhysicalDeviceVariablePointersFeatures(
     VulkanStreamGuest* vkStream,
-    VkPhysicalDeviceVariablePointerFeatures* forUnmarshaling)
+    VkStructureType rootType,
+    VkPhysicalDeviceVariablePointersFeatures* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkBool32*)&forUnmarshaling->variablePointersStorageBuffer, sizeof(VkBool32));
     vkStream->read((VkBool32*)&forUnmarshaling->variablePointers, sizeof(VkBool32));
 }
 
 void marshal_VkPhysicalDeviceProtectedMemoryFeatures(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceProtectedMemoryFeatures* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkBool32*)&forMarshaling->protectedMemory, sizeof(VkBool32));
 }
 
 void unmarshal_VkPhysicalDeviceProtectedMemoryFeatures(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceProtectedMemoryFeatures* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkBool32*)&forUnmarshaling->protectedMemory, sizeof(VkBool32));
 }
 
 void marshal_VkPhysicalDeviceProtectedMemoryProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceProtectedMemoryProperties* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkBool32*)&forMarshaling->protectedNoFault, sizeof(VkBool32));
 }
 
 void unmarshal_VkPhysicalDeviceProtectedMemoryProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceProtectedMemoryProperties* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkBool32*)&forUnmarshaling->protectedNoFault, sizeof(VkBool32));
 }
 
 void marshal_VkDeviceQueueInfo2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDeviceQueueInfo2* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkDeviceQueueCreateFlags*)&forMarshaling->flags, sizeof(VkDeviceQueueCreateFlags));
     vkStream->write((uint32_t*)&forMarshaling->queueFamilyIndex, sizeof(uint32_t));
     vkStream->write((uint32_t*)&forMarshaling->queueIndex, sizeof(uint32_t));
@@ -5913,17 +6245,16 @@
 
 void unmarshal_VkDeviceQueueInfo2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDeviceQueueInfo2* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkDeviceQueueCreateFlags*)&forUnmarshaling->flags, sizeof(VkDeviceQueueCreateFlags));
     vkStream->read((uint32_t*)&forUnmarshaling->queueFamilyIndex, sizeof(uint32_t));
     vkStream->read((uint32_t*)&forUnmarshaling->queueIndex, sizeof(uint32_t));
@@ -5931,51 +6262,50 @@
 
 void marshal_VkProtectedSubmitInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkProtectedSubmitInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkBool32*)&forMarshaling->protectedSubmit, sizeof(VkBool32));
 }
 
 void unmarshal_VkProtectedSubmitInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkProtectedSubmitInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkBool32*)&forUnmarshaling->protectedSubmit, sizeof(VkBool32));
 }
 
 void marshal_VkSamplerYcbcrConversionCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSamplerYcbcrConversionCreateInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkFormat*)&forMarshaling->format, sizeof(VkFormat));
     vkStream->write((VkSamplerYcbcrModelConversion*)&forMarshaling->ycbcrModel, sizeof(VkSamplerYcbcrModelConversion));
     vkStream->write((VkSamplerYcbcrRange*)&forMarshaling->ycbcrRange, sizeof(VkSamplerYcbcrRange));
-    marshal_VkComponentMapping(vkStream, (VkComponentMapping*)(&forMarshaling->components));
+    marshal_VkComponentMapping(vkStream, rootType, (VkComponentMapping*)(&forMarshaling->components));
     vkStream->write((VkChromaLocation*)&forMarshaling->xChromaOffset, sizeof(VkChromaLocation));
     vkStream->write((VkChromaLocation*)&forMarshaling->yChromaOffset, sizeof(VkChromaLocation));
     vkStream->write((VkFilter*)&forMarshaling->chromaFilter, sizeof(VkFilter));
@@ -5984,21 +6314,20 @@
 
 void unmarshal_VkSamplerYcbcrConversionCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSamplerYcbcrConversionCreateInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkFormat*)&forUnmarshaling->format, sizeof(VkFormat));
     vkStream->read((VkSamplerYcbcrModelConversion*)&forUnmarshaling->ycbcrModel, sizeof(VkSamplerYcbcrModelConversion));
     vkStream->read((VkSamplerYcbcrRange*)&forUnmarshaling->ycbcrRange, sizeof(VkSamplerYcbcrRange));
-    unmarshal_VkComponentMapping(vkStream, (VkComponentMapping*)(&forUnmarshaling->components));
+    unmarshal_VkComponentMapping(vkStream, rootType, (VkComponentMapping*)(&forUnmarshaling->components));
     vkStream->read((VkChromaLocation*)&forUnmarshaling->xChromaOffset, sizeof(VkChromaLocation));
     vkStream->read((VkChromaLocation*)&forUnmarshaling->yChromaOffset, sizeof(VkChromaLocation));
     vkStream->read((VkFilter*)&forUnmarshaling->chromaFilter, sizeof(VkFilter));
@@ -6007,181 +6336,180 @@
 
 void marshal_VkSamplerYcbcrConversionInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSamplerYcbcrConversionInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    uint64_t cgen_var_178;
-    vkStream->handleMapping()->mapHandles_VkSamplerYcbcrConversion_u64(&forMarshaling->conversion, &cgen_var_178, 1);
-    vkStream->write((uint64_t*)&cgen_var_178, 1 * 8);
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkSamplerYcbcrConversion_u64(&forMarshaling->conversion, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
 }
 
 void unmarshal_VkSamplerYcbcrConversionInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSamplerYcbcrConversionInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    uint64_t cgen_var_179;
-    vkStream->read((uint64_t*)&cgen_var_179, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkSamplerYcbcrConversion(&cgen_var_179, (VkSamplerYcbcrConversion*)&forUnmarshaling->conversion, 1);
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkSamplerYcbcrConversion(&cgen_var_0, (VkSamplerYcbcrConversion*)&forUnmarshaling->conversion, 1);
 }
 
 void marshal_VkBindImagePlaneMemoryInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkBindImagePlaneMemoryInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkImageAspectFlagBits*)&forMarshaling->planeAspect, sizeof(VkImageAspectFlagBits));
 }
 
 void unmarshal_VkBindImagePlaneMemoryInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkBindImagePlaneMemoryInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkImageAspectFlagBits*)&forUnmarshaling->planeAspect, sizeof(VkImageAspectFlagBits));
 }
 
 void marshal_VkImagePlaneMemoryRequirementsInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkImagePlaneMemoryRequirementsInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkImageAspectFlagBits*)&forMarshaling->planeAspect, sizeof(VkImageAspectFlagBits));
 }
 
 void unmarshal_VkImagePlaneMemoryRequirementsInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkImagePlaneMemoryRequirementsInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkImageAspectFlagBits*)&forUnmarshaling->planeAspect, sizeof(VkImageAspectFlagBits));
 }
 
 void marshal_VkPhysicalDeviceSamplerYcbcrConversionFeatures(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceSamplerYcbcrConversionFeatures* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkBool32*)&forMarshaling->samplerYcbcrConversion, sizeof(VkBool32));
 }
 
 void unmarshal_VkPhysicalDeviceSamplerYcbcrConversionFeatures(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceSamplerYcbcrConversionFeatures* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkBool32*)&forUnmarshaling->samplerYcbcrConversion, sizeof(VkBool32));
 }
 
 void marshal_VkSamplerYcbcrConversionImageFormatProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSamplerYcbcrConversionImageFormatProperties* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((uint32_t*)&forMarshaling->combinedImageSamplerDescriptorCount, sizeof(uint32_t));
 }
 
 void unmarshal_VkSamplerYcbcrConversionImageFormatProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSamplerYcbcrConversionImageFormatProperties* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((uint32_t*)&forUnmarshaling->combinedImageSamplerDescriptorCount, sizeof(uint32_t));
 }
 
 void marshal_VkDescriptorUpdateTemplateEntry(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDescriptorUpdateTemplateEntry* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((uint32_t*)&forMarshaling->dstBinding, sizeof(uint32_t));
     vkStream->write((uint32_t*)&forMarshaling->dstArrayElement, sizeof(uint32_t));
     vkStream->write((uint32_t*)&forMarshaling->descriptorCount, sizeof(uint32_t));
     vkStream->write((VkDescriptorType*)&forMarshaling->descriptorType, sizeof(VkDescriptorType));
-    uint64_t cgen_var_180 = (uint64_t)forMarshaling->offset;
-    vkStream->putBe64(cgen_var_180);
-    uint64_t cgen_var_181 = (uint64_t)forMarshaling->stride;
-    vkStream->putBe64(cgen_var_181);
+    uint64_t cgen_var_0 = (uint64_t)forMarshaling->offset;
+    vkStream->putBe64(cgen_var_0);
+    uint64_t cgen_var_1 = (uint64_t)forMarshaling->stride;
+    vkStream->putBe64(cgen_var_1);
 }
 
 void unmarshal_VkDescriptorUpdateTemplateEntry(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDescriptorUpdateTemplateEntry* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((uint32_t*)&forUnmarshaling->dstBinding, sizeof(uint32_t));
     vkStream->read((uint32_t*)&forUnmarshaling->dstArrayElement, sizeof(uint32_t));
     vkStream->read((uint32_t*)&forUnmarshaling->descriptorCount, sizeof(uint32_t));
@@ -6192,67 +6520,74 @@
 
 void marshal_VkDescriptorUpdateTemplateCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDescriptorUpdateTemplateCreateInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkDescriptorUpdateTemplateCreateFlags*)&forMarshaling->flags, sizeof(VkDescriptorUpdateTemplateCreateFlags));
     vkStream->write((uint32_t*)&forMarshaling->descriptorUpdateEntryCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forMarshaling->descriptorUpdateEntryCount; ++i)
+    if (forMarshaling)
     {
-        marshal_VkDescriptorUpdateTemplateEntry(vkStream, (const VkDescriptorUpdateTemplateEntry*)(forMarshaling->pDescriptorUpdateEntries + i));
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->descriptorUpdateEntryCount; ++i)
+        {
+            marshal_VkDescriptorUpdateTemplateEntry(vkStream, rootType, (const VkDescriptorUpdateTemplateEntry*)(forMarshaling->pDescriptorUpdateEntries + i));
+        }
     }
     vkStream->write((VkDescriptorUpdateTemplateType*)&forMarshaling->templateType, sizeof(VkDescriptorUpdateTemplateType));
-    uint64_t cgen_var_184;
-    vkStream->handleMapping()->mapHandles_VkDescriptorSetLayout_u64(&forMarshaling->descriptorSetLayout, &cgen_var_184, 1);
-    vkStream->write((uint64_t*)&cgen_var_184, 1 * 8);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkDescriptorSetLayout_u64(&forMarshaling->descriptorSetLayout, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
     vkStream->write((VkPipelineBindPoint*)&forMarshaling->pipelineBindPoint, sizeof(VkPipelineBindPoint));
-    uint64_t cgen_var_185;
-    vkStream->handleMapping()->mapHandles_VkPipelineLayout_u64(&forMarshaling->pipelineLayout, &cgen_var_185, 1);
-    vkStream->write((uint64_t*)&cgen_var_185, 1 * 8);
+    uint64_t cgen_var_1;
+    vkStream->handleMapping()->mapHandles_VkPipelineLayout_u64(&forMarshaling->pipelineLayout, &cgen_var_1, 1);
+    vkStream->write((uint64_t*)&cgen_var_1, 1 * 8);
     vkStream->write((uint32_t*)&forMarshaling->set, sizeof(uint32_t));
 }
 
 void unmarshal_VkDescriptorUpdateTemplateCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDescriptorUpdateTemplateCreateInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkDescriptorUpdateTemplateCreateFlags*)&forUnmarshaling->flags, sizeof(VkDescriptorUpdateTemplateCreateFlags));
     vkStream->read((uint32_t*)&forUnmarshaling->descriptorUpdateEntryCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->descriptorUpdateEntryCount; ++i)
+    if (forUnmarshaling)
     {
-        unmarshal_VkDescriptorUpdateTemplateEntry(vkStream, (VkDescriptorUpdateTemplateEntry*)(forUnmarshaling->pDescriptorUpdateEntries + i));
+        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->descriptorUpdateEntryCount; ++i)
+        {
+            unmarshal_VkDescriptorUpdateTemplateEntry(vkStream, rootType, (VkDescriptorUpdateTemplateEntry*)(forUnmarshaling->pDescriptorUpdateEntries + i));
+        }
     }
     vkStream->read((VkDescriptorUpdateTemplateType*)&forUnmarshaling->templateType, sizeof(VkDescriptorUpdateTemplateType));
-    uint64_t cgen_var_186;
-    vkStream->read((uint64_t*)&cgen_var_186, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkDescriptorSetLayout(&cgen_var_186, (VkDescriptorSetLayout*)&forUnmarshaling->descriptorSetLayout, 1);
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkDescriptorSetLayout(&cgen_var_0, (VkDescriptorSetLayout*)&forUnmarshaling->descriptorSetLayout, 1);
     vkStream->read((VkPipelineBindPoint*)&forUnmarshaling->pipelineBindPoint, sizeof(VkPipelineBindPoint));
-    uint64_t cgen_var_187;
-    vkStream->read((uint64_t*)&cgen_var_187, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkPipelineLayout(&cgen_var_187, (VkPipelineLayout*)&forUnmarshaling->pipelineLayout, 1);
+    uint64_t cgen_var_1;
+    vkStream->read((uint64_t*)&cgen_var_1, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkPipelineLayout(&cgen_var_1, (VkPipelineLayout*)&forUnmarshaling->pipelineLayout, 1);
     vkStream->read((uint32_t*)&forUnmarshaling->set, sizeof(uint32_t));
 }
 
 void marshal_VkExternalMemoryProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkExternalMemoryProperties* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkExternalMemoryFeatureFlags*)&forMarshaling->externalMemoryFeatures, sizeof(VkExternalMemoryFeatureFlags));
     vkStream->write((VkExternalMemoryHandleTypeFlags*)&forMarshaling->exportFromImportedHandleTypes, sizeof(VkExternalMemoryHandleTypeFlags));
     vkStream->write((VkExternalMemoryHandleTypeFlags*)&forMarshaling->compatibleHandleTypes, sizeof(VkExternalMemoryHandleTypeFlags));
@@ -6260,8 +6595,10 @@
 
 void unmarshal_VkExternalMemoryProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkExternalMemoryProperties* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkExternalMemoryFeatureFlags*)&forUnmarshaling->externalMemoryFeatures, sizeof(VkExternalMemoryFeatureFlags));
     vkStream->read((VkExternalMemoryHandleTypeFlags*)&forUnmarshaling->exportFromImportedHandleTypes, sizeof(VkExternalMemoryHandleTypeFlags));
     vkStream->read((VkExternalMemoryHandleTypeFlags*)&forUnmarshaling->compatibleHandleTypes, sizeof(VkExternalMemoryHandleTypeFlags));
@@ -6269,78 +6606,76 @@
 
 void marshal_VkPhysicalDeviceExternalImageFormatInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceExternalImageFormatInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkExternalMemoryHandleTypeFlagBits*)&forMarshaling->handleType, sizeof(VkExternalMemoryHandleTypeFlagBits));
 }
 
 void unmarshal_VkPhysicalDeviceExternalImageFormatInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceExternalImageFormatInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkExternalMemoryHandleTypeFlagBits*)&forUnmarshaling->handleType, sizeof(VkExternalMemoryHandleTypeFlagBits));
 }
 
 void marshal_VkExternalImageFormatProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkExternalImageFormatProperties* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    marshal_VkExternalMemoryProperties(vkStream, (VkExternalMemoryProperties*)(&forMarshaling->externalMemoryProperties));
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    marshal_VkExternalMemoryProperties(vkStream, rootType, (VkExternalMemoryProperties*)(&forMarshaling->externalMemoryProperties));
 }
 
 void unmarshal_VkExternalImageFormatProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkExternalImageFormatProperties* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    unmarshal_VkExternalMemoryProperties(vkStream, (VkExternalMemoryProperties*)(&forUnmarshaling->externalMemoryProperties));
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    unmarshal_VkExternalMemoryProperties(vkStream, rootType, (VkExternalMemoryProperties*)(&forUnmarshaling->externalMemoryProperties));
 }
 
 void marshal_VkPhysicalDeviceExternalBufferInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceExternalBufferInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkBufferCreateFlags*)&forMarshaling->flags, sizeof(VkBufferCreateFlags));
     vkStream->write((VkBufferUsageFlags*)&forMarshaling->usage, sizeof(VkBufferUsageFlags));
     vkStream->write((VkExternalMemoryHandleTypeFlagBits*)&forMarshaling->handleType, sizeof(VkExternalMemoryHandleTypeFlagBits));
@@ -6348,17 +6683,16 @@
 
 void unmarshal_VkPhysicalDeviceExternalBufferInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceExternalBufferInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkBufferCreateFlags*)&forUnmarshaling->flags, sizeof(VkBufferCreateFlags));
     vkStream->read((VkBufferUsageFlags*)&forUnmarshaling->usage, sizeof(VkBufferUsageFlags));
     vkStream->read((VkExternalMemoryHandleTypeFlagBits*)&forUnmarshaling->handleType, sizeof(VkExternalMemoryHandleTypeFlagBits));
@@ -6366,47 +6700,46 @@
 
 void marshal_VkExternalBufferProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkExternalBufferProperties* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    marshal_VkExternalMemoryProperties(vkStream, (VkExternalMemoryProperties*)(&forMarshaling->externalMemoryProperties));
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    marshal_VkExternalMemoryProperties(vkStream, rootType, (VkExternalMemoryProperties*)(&forMarshaling->externalMemoryProperties));
 }
 
 void unmarshal_VkExternalBufferProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkExternalBufferProperties* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    unmarshal_VkExternalMemoryProperties(vkStream, (VkExternalMemoryProperties*)(&forUnmarshaling->externalMemoryProperties));
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    unmarshal_VkExternalMemoryProperties(vkStream, rootType, (VkExternalMemoryProperties*)(&forUnmarshaling->externalMemoryProperties));
 }
 
 void marshal_VkPhysicalDeviceIDProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceIDProperties* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((uint8_t*)forMarshaling->deviceUUID, VK_UUID_SIZE * sizeof(uint8_t));
     vkStream->write((uint8_t*)forMarshaling->driverUUID, VK_UUID_SIZE * sizeof(uint8_t));
     vkStream->write((uint8_t*)forMarshaling->deviceLUID, VK_LUID_SIZE * sizeof(uint8_t));
@@ -6416,17 +6749,16 @@
 
 void unmarshal_VkPhysicalDeviceIDProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceIDProperties* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((uint8_t*)forUnmarshaling->deviceUUID, VK_UUID_SIZE * sizeof(uint8_t));
     vkStream->read((uint8_t*)forUnmarshaling->driverUUID, VK_UUID_SIZE * sizeof(uint8_t));
     vkStream->read((uint8_t*)forUnmarshaling->deviceLUID, VK_LUID_SIZE * sizeof(uint8_t));
@@ -6436,140 +6768,136 @@
 
 void marshal_VkExternalMemoryImageCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkExternalMemoryImageCreateInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkExternalMemoryHandleTypeFlags*)&forMarshaling->handleTypes, sizeof(VkExternalMemoryHandleTypeFlags));
 }
 
 void unmarshal_VkExternalMemoryImageCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkExternalMemoryImageCreateInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkExternalMemoryHandleTypeFlags*)&forUnmarshaling->handleTypes, sizeof(VkExternalMemoryHandleTypeFlags));
 }
 
 void marshal_VkExternalMemoryBufferCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkExternalMemoryBufferCreateInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkExternalMemoryHandleTypeFlags*)&forMarshaling->handleTypes, sizeof(VkExternalMemoryHandleTypeFlags));
 }
 
 void unmarshal_VkExternalMemoryBufferCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkExternalMemoryBufferCreateInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkExternalMemoryHandleTypeFlags*)&forUnmarshaling->handleTypes, sizeof(VkExternalMemoryHandleTypeFlags));
 }
 
 void marshal_VkExportMemoryAllocateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkExportMemoryAllocateInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkExternalMemoryHandleTypeFlags*)&forMarshaling->handleTypes, sizeof(VkExternalMemoryHandleTypeFlags));
 }
 
 void unmarshal_VkExportMemoryAllocateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkExportMemoryAllocateInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkExternalMemoryHandleTypeFlags*)&forUnmarshaling->handleTypes, sizeof(VkExternalMemoryHandleTypeFlags));
 }
 
 void marshal_VkPhysicalDeviceExternalFenceInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceExternalFenceInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkExternalFenceHandleTypeFlagBits*)&forMarshaling->handleType, sizeof(VkExternalFenceHandleTypeFlagBits));
 }
 
 void unmarshal_VkPhysicalDeviceExternalFenceInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceExternalFenceInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkExternalFenceHandleTypeFlagBits*)&forUnmarshaling->handleType, sizeof(VkExternalFenceHandleTypeFlagBits));
 }
 
 void marshal_VkExternalFenceProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkExternalFenceProperties* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkExternalFenceHandleTypeFlags*)&forMarshaling->exportFromImportedHandleTypes, sizeof(VkExternalFenceHandleTypeFlags));
     vkStream->write((VkExternalFenceHandleTypeFlags*)&forMarshaling->compatibleHandleTypes, sizeof(VkExternalFenceHandleTypeFlags));
     vkStream->write((VkExternalFenceFeatureFlags*)&forMarshaling->externalFenceFeatures, sizeof(VkExternalFenceFeatureFlags));
@@ -6577,17 +6905,16 @@
 
 void unmarshal_VkExternalFenceProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkExternalFenceProperties* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkExternalFenceHandleTypeFlags*)&forUnmarshaling->exportFromImportedHandleTypes, sizeof(VkExternalFenceHandleTypeFlags));
     vkStream->read((VkExternalFenceHandleTypeFlags*)&forUnmarshaling->compatibleHandleTypes, sizeof(VkExternalFenceHandleTypeFlags));
     vkStream->read((VkExternalFenceFeatureFlags*)&forUnmarshaling->externalFenceFeatures, sizeof(VkExternalFenceFeatureFlags));
@@ -6595,109 +6922,106 @@
 
 void marshal_VkExportFenceCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkExportFenceCreateInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkExternalFenceHandleTypeFlags*)&forMarshaling->handleTypes, sizeof(VkExternalFenceHandleTypeFlags));
 }
 
 void unmarshal_VkExportFenceCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkExportFenceCreateInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkExternalFenceHandleTypeFlags*)&forUnmarshaling->handleTypes, sizeof(VkExternalFenceHandleTypeFlags));
 }
 
 void marshal_VkExportSemaphoreCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkExportSemaphoreCreateInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkExternalSemaphoreHandleTypeFlags*)&forMarshaling->handleTypes, sizeof(VkExternalSemaphoreHandleTypeFlags));
 }
 
 void unmarshal_VkExportSemaphoreCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkExportSemaphoreCreateInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkExternalSemaphoreHandleTypeFlags*)&forUnmarshaling->handleTypes, sizeof(VkExternalSemaphoreHandleTypeFlags));
 }
 
 void marshal_VkPhysicalDeviceExternalSemaphoreInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceExternalSemaphoreInfo* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkExternalSemaphoreHandleTypeFlagBits*)&forMarshaling->handleType, sizeof(VkExternalSemaphoreHandleTypeFlagBits));
 }
 
 void unmarshal_VkPhysicalDeviceExternalSemaphoreInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceExternalSemaphoreInfo* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkExternalSemaphoreHandleTypeFlagBits*)&forUnmarshaling->handleType, sizeof(VkExternalSemaphoreHandleTypeFlagBits));
 }
 
 void marshal_VkExternalSemaphoreProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkExternalSemaphoreProperties* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkExternalSemaphoreHandleTypeFlags*)&forMarshaling->exportFromImportedHandleTypes, sizeof(VkExternalSemaphoreHandleTypeFlags));
     vkStream->write((VkExternalSemaphoreHandleTypeFlags*)&forMarshaling->compatibleHandleTypes, sizeof(VkExternalSemaphoreHandleTypeFlags));
     vkStream->write((VkExternalSemaphoreFeatureFlags*)&forMarshaling->externalSemaphoreFeatures, sizeof(VkExternalSemaphoreFeatureFlags));
@@ -6705,17 +7029,16 @@
 
 void unmarshal_VkExternalSemaphoreProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkExternalSemaphoreProperties* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkExternalSemaphoreHandleTypeFlags*)&forUnmarshaling->exportFromImportedHandleTypes, sizeof(VkExternalSemaphoreHandleTypeFlags));
     vkStream->read((VkExternalSemaphoreHandleTypeFlags*)&forUnmarshaling->compatibleHandleTypes, sizeof(VkExternalSemaphoreHandleTypeFlags));
     vkStream->read((VkExternalSemaphoreFeatureFlags*)&forUnmarshaling->externalSemaphoreFeatures, sizeof(VkExternalSemaphoreFeatureFlags));
@@ -6723,110 +7046,2351 @@
 
 void marshal_VkPhysicalDeviceMaintenance3Properties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceMaintenance3Properties* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((uint32_t*)&forMarshaling->maxPerSetDescriptors, sizeof(uint32_t));
     vkStream->write((VkDeviceSize*)&forMarshaling->maxMemoryAllocationSize, sizeof(VkDeviceSize));
 }
 
 void unmarshal_VkPhysicalDeviceMaintenance3Properties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceMaintenance3Properties* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((uint32_t*)&forUnmarshaling->maxPerSetDescriptors, sizeof(uint32_t));
     vkStream->read((VkDeviceSize*)&forUnmarshaling->maxMemoryAllocationSize, sizeof(VkDeviceSize));
 }
 
 void marshal_VkDescriptorSetLayoutSupport(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDescriptorSetLayoutSupport* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkBool32*)&forMarshaling->supported, sizeof(VkBool32));
 }
 
 void unmarshal_VkDescriptorSetLayoutSupport(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDescriptorSetLayoutSupport* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkBool32*)&forUnmarshaling->supported, sizeof(VkBool32));
 }
 
-void marshal_VkPhysicalDeviceShaderDrawParameterFeatures(
+void marshal_VkPhysicalDeviceShaderDrawParametersFeatures(
     VulkanStreamGuest* vkStream,
-    const VkPhysicalDeviceShaderDrawParameterFeatures* forMarshaling)
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderDrawParametersFeatures* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkBool32*)&forMarshaling->shaderDrawParameters, sizeof(VkBool32));
 }
 
-void unmarshal_VkPhysicalDeviceShaderDrawParameterFeatures(
+void unmarshal_VkPhysicalDeviceShaderDrawParametersFeatures(
     VulkanStreamGuest* vkStream,
-    VkPhysicalDeviceShaderDrawParameterFeatures* forUnmarshaling)
+    VkStructureType rootType,
+    VkPhysicalDeviceShaderDrawParametersFeatures* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkBool32*)&forUnmarshaling->shaderDrawParameters, sizeof(VkBool32));
 }
 
 #endif
+#ifdef VK_VERSION_1_2
+void marshal_VkPhysicalDeviceVulkan11Features(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVulkan11Features* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->storageBuffer16BitAccess, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->uniformAndStorageBuffer16BitAccess, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->storagePushConstant16, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->storageInputOutput16, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->multiview, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->multiviewGeometryShader, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->multiviewTessellationShader, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->variablePointersStorageBuffer, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->variablePointers, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->protectedMemory, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->samplerYcbcrConversion, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderDrawParameters, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceVulkan11Features(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceVulkan11Features* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->storageBuffer16BitAccess, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->uniformAndStorageBuffer16BitAccess, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->storagePushConstant16, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->storageInputOutput16, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->multiview, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->multiviewGeometryShader, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->multiviewTessellationShader, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->variablePointersStorageBuffer, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->variablePointers, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->protectedMemory, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->samplerYcbcrConversion, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderDrawParameters, sizeof(VkBool32));
+}
+
+void marshal_VkPhysicalDeviceVulkan11Properties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVulkan11Properties* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((uint8_t*)forMarshaling->deviceUUID, VK_UUID_SIZE * sizeof(uint8_t));
+    vkStream->write((uint8_t*)forMarshaling->driverUUID, VK_UUID_SIZE * sizeof(uint8_t));
+    vkStream->write((uint8_t*)forMarshaling->deviceLUID, VK_LUID_SIZE * sizeof(uint8_t));
+    vkStream->write((uint32_t*)&forMarshaling->deviceNodeMask, sizeof(uint32_t));
+    vkStream->write((VkBool32*)&forMarshaling->deviceLUIDValid, sizeof(VkBool32));
+    vkStream->write((uint32_t*)&forMarshaling->subgroupSize, sizeof(uint32_t));
+    vkStream->write((VkShaderStageFlags*)&forMarshaling->subgroupSupportedStages, sizeof(VkShaderStageFlags));
+    vkStream->write((VkSubgroupFeatureFlags*)&forMarshaling->subgroupSupportedOperations, sizeof(VkSubgroupFeatureFlags));
+    vkStream->write((VkBool32*)&forMarshaling->subgroupQuadOperationsInAllStages, sizeof(VkBool32));
+    vkStream->write((VkPointClippingBehavior*)&forMarshaling->pointClippingBehavior, sizeof(VkPointClippingBehavior));
+    vkStream->write((uint32_t*)&forMarshaling->maxMultiviewViewCount, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxMultiviewInstanceIndex, sizeof(uint32_t));
+    vkStream->write((VkBool32*)&forMarshaling->protectedNoFault, sizeof(VkBool32));
+    vkStream->write((uint32_t*)&forMarshaling->maxPerSetDescriptors, sizeof(uint32_t));
+    vkStream->write((VkDeviceSize*)&forMarshaling->maxMemoryAllocationSize, sizeof(VkDeviceSize));
+}
+
+void unmarshal_VkPhysicalDeviceVulkan11Properties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceVulkan11Properties* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((uint8_t*)forUnmarshaling->deviceUUID, VK_UUID_SIZE * sizeof(uint8_t));
+    vkStream->read((uint8_t*)forUnmarshaling->driverUUID, VK_UUID_SIZE * sizeof(uint8_t));
+    vkStream->read((uint8_t*)forUnmarshaling->deviceLUID, VK_LUID_SIZE * sizeof(uint8_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->deviceNodeMask, sizeof(uint32_t));
+    vkStream->read((VkBool32*)&forUnmarshaling->deviceLUIDValid, sizeof(VkBool32));
+    vkStream->read((uint32_t*)&forUnmarshaling->subgroupSize, sizeof(uint32_t));
+    vkStream->read((VkShaderStageFlags*)&forUnmarshaling->subgroupSupportedStages, sizeof(VkShaderStageFlags));
+    vkStream->read((VkSubgroupFeatureFlags*)&forUnmarshaling->subgroupSupportedOperations, sizeof(VkSubgroupFeatureFlags));
+    vkStream->read((VkBool32*)&forUnmarshaling->subgroupQuadOperationsInAllStages, sizeof(VkBool32));
+    vkStream->read((VkPointClippingBehavior*)&forUnmarshaling->pointClippingBehavior, sizeof(VkPointClippingBehavior));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxMultiviewViewCount, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxMultiviewInstanceIndex, sizeof(uint32_t));
+    vkStream->read((VkBool32*)&forUnmarshaling->protectedNoFault, sizeof(VkBool32));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxPerSetDescriptors, sizeof(uint32_t));
+    vkStream->read((VkDeviceSize*)&forUnmarshaling->maxMemoryAllocationSize, sizeof(VkDeviceSize));
+}
+
+void marshal_VkPhysicalDeviceVulkan12Features(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVulkan12Features* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->samplerMirrorClampToEdge, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->drawIndirectCount, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->storageBuffer8BitAccess, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->uniformAndStorageBuffer8BitAccess, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->storagePushConstant8, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderBufferInt64Atomics, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderSharedInt64Atomics, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderFloat16, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderInt8, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->descriptorIndexing, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderInputAttachmentArrayDynamicIndexing, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderUniformTexelBufferArrayDynamicIndexing, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderStorageTexelBufferArrayDynamicIndexing, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderUniformBufferArrayNonUniformIndexing, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderSampledImageArrayNonUniformIndexing, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderStorageBufferArrayNonUniformIndexing, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderStorageImageArrayNonUniformIndexing, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderInputAttachmentArrayNonUniformIndexing, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderUniformTexelBufferArrayNonUniformIndexing, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderStorageTexelBufferArrayNonUniformIndexing, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->descriptorBindingUniformBufferUpdateAfterBind, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->descriptorBindingSampledImageUpdateAfterBind, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->descriptorBindingStorageImageUpdateAfterBind, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->descriptorBindingStorageBufferUpdateAfterBind, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->descriptorBindingUniformTexelBufferUpdateAfterBind, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->descriptorBindingStorageTexelBufferUpdateAfterBind, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->descriptorBindingUpdateUnusedWhilePending, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->descriptorBindingPartiallyBound, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->descriptorBindingVariableDescriptorCount, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->runtimeDescriptorArray, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->samplerFilterMinmax, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->scalarBlockLayout, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->imagelessFramebuffer, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->uniformBufferStandardLayout, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderSubgroupExtendedTypes, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->separateDepthStencilLayouts, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->hostQueryReset, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->timelineSemaphore, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->bufferDeviceAddress, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->bufferDeviceAddressCaptureReplay, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->bufferDeviceAddressMultiDevice, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->vulkanMemoryModel, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->vulkanMemoryModelDeviceScope, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->vulkanMemoryModelAvailabilityVisibilityChains, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderOutputViewportIndex, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderOutputLayer, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->subgroupBroadcastDynamicId, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceVulkan12Features(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceVulkan12Features* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->samplerMirrorClampToEdge, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->drawIndirectCount, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->storageBuffer8BitAccess, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->uniformAndStorageBuffer8BitAccess, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->storagePushConstant8, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderBufferInt64Atomics, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderSharedInt64Atomics, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderFloat16, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderInt8, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->descriptorIndexing, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderInputAttachmentArrayDynamicIndexing, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderUniformTexelBufferArrayDynamicIndexing, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderStorageTexelBufferArrayDynamicIndexing, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderUniformBufferArrayNonUniformIndexing, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderSampledImageArrayNonUniformIndexing, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderStorageBufferArrayNonUniformIndexing, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderStorageImageArrayNonUniformIndexing, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderInputAttachmentArrayNonUniformIndexing, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderUniformTexelBufferArrayNonUniformIndexing, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderStorageTexelBufferArrayNonUniformIndexing, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->descriptorBindingUniformBufferUpdateAfterBind, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->descriptorBindingSampledImageUpdateAfterBind, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->descriptorBindingStorageImageUpdateAfterBind, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->descriptorBindingStorageBufferUpdateAfterBind, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->descriptorBindingUniformTexelBufferUpdateAfterBind, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->descriptorBindingStorageTexelBufferUpdateAfterBind, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->descriptorBindingUpdateUnusedWhilePending, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->descriptorBindingPartiallyBound, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->descriptorBindingVariableDescriptorCount, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->runtimeDescriptorArray, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->samplerFilterMinmax, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->scalarBlockLayout, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->imagelessFramebuffer, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->uniformBufferStandardLayout, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderSubgroupExtendedTypes, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->separateDepthStencilLayouts, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->hostQueryReset, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->timelineSemaphore, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->bufferDeviceAddress, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->bufferDeviceAddressCaptureReplay, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->bufferDeviceAddressMultiDevice, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->vulkanMemoryModel, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->vulkanMemoryModelDeviceScope, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->vulkanMemoryModelAvailabilityVisibilityChains, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderOutputViewportIndex, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderOutputLayer, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->subgroupBroadcastDynamicId, sizeof(VkBool32));
+}
+
+void marshal_VkConformanceVersion(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkConformanceVersion* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((uint8_t*)&forMarshaling->major, sizeof(uint8_t));
+    vkStream->write((uint8_t*)&forMarshaling->minor, sizeof(uint8_t));
+    vkStream->write((uint8_t*)&forMarshaling->subminor, sizeof(uint8_t));
+    vkStream->write((uint8_t*)&forMarshaling->patch, sizeof(uint8_t));
+}
+
+void unmarshal_VkConformanceVersion(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkConformanceVersion* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((uint8_t*)&forUnmarshaling->major, sizeof(uint8_t));
+    vkStream->read((uint8_t*)&forUnmarshaling->minor, sizeof(uint8_t));
+    vkStream->read((uint8_t*)&forUnmarshaling->subminor, sizeof(uint8_t));
+    vkStream->read((uint8_t*)&forUnmarshaling->patch, sizeof(uint8_t));
+}
+
+void marshal_VkPhysicalDeviceVulkan12Properties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVulkan12Properties* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkDriverId*)&forMarshaling->driverID, sizeof(VkDriverId));
+    vkStream->write((char*)forMarshaling->driverName, VK_MAX_DRIVER_NAME_SIZE * sizeof(char));
+    vkStream->write((char*)forMarshaling->driverInfo, VK_MAX_DRIVER_INFO_SIZE * sizeof(char));
+    marshal_VkConformanceVersion(vkStream, rootType, (VkConformanceVersion*)(&forMarshaling->conformanceVersion));
+    vkStream->write((VkShaderFloatControlsIndependence*)&forMarshaling->denormBehaviorIndependence, sizeof(VkShaderFloatControlsIndependence));
+    vkStream->write((VkShaderFloatControlsIndependence*)&forMarshaling->roundingModeIndependence, sizeof(VkShaderFloatControlsIndependence));
+    vkStream->write((VkBool32*)&forMarshaling->shaderSignedZeroInfNanPreserveFloat16, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderSignedZeroInfNanPreserveFloat32, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderSignedZeroInfNanPreserveFloat64, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderDenormPreserveFloat16, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderDenormPreserveFloat32, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderDenormPreserveFloat64, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderDenormFlushToZeroFloat16, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderDenormFlushToZeroFloat32, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderDenormFlushToZeroFloat64, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderRoundingModeRTEFloat16, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderRoundingModeRTEFloat32, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderRoundingModeRTEFloat64, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderRoundingModeRTZFloat16, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderRoundingModeRTZFloat32, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderRoundingModeRTZFloat64, sizeof(VkBool32));
+    vkStream->write((uint32_t*)&forMarshaling->maxUpdateAfterBindDescriptorsInAllPools, sizeof(uint32_t));
+    vkStream->write((VkBool32*)&forMarshaling->shaderUniformBufferArrayNonUniformIndexingNative, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderSampledImageArrayNonUniformIndexingNative, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderStorageBufferArrayNonUniformIndexingNative, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderStorageImageArrayNonUniformIndexingNative, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderInputAttachmentArrayNonUniformIndexingNative, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->robustBufferAccessUpdateAfterBind, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->quadDivergentImplicitLod, sizeof(VkBool32));
+    vkStream->write((uint32_t*)&forMarshaling->maxPerStageDescriptorUpdateAfterBindSamplers, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxPerStageDescriptorUpdateAfterBindUniformBuffers, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxPerStageDescriptorUpdateAfterBindStorageBuffers, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxPerStageDescriptorUpdateAfterBindSampledImages, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxPerStageDescriptorUpdateAfterBindStorageImages, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxPerStageDescriptorUpdateAfterBindInputAttachments, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxPerStageUpdateAfterBindResources, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxDescriptorSetUpdateAfterBindSamplers, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxDescriptorSetUpdateAfterBindUniformBuffers, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxDescriptorSetUpdateAfterBindUniformBuffersDynamic, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxDescriptorSetUpdateAfterBindStorageBuffers, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxDescriptorSetUpdateAfterBindStorageBuffersDynamic, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxDescriptorSetUpdateAfterBindSampledImages, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxDescriptorSetUpdateAfterBindStorageImages, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxDescriptorSetUpdateAfterBindInputAttachments, sizeof(uint32_t));
+    vkStream->write((VkResolveModeFlags*)&forMarshaling->supportedDepthResolveModes, sizeof(VkResolveModeFlags));
+    vkStream->write((VkResolveModeFlags*)&forMarshaling->supportedStencilResolveModes, sizeof(VkResolveModeFlags));
+    vkStream->write((VkBool32*)&forMarshaling->independentResolveNone, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->independentResolve, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->filterMinmaxSingleComponentFormats, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->filterMinmaxImageComponentMapping, sizeof(VkBool32));
+    vkStream->write((uint64_t*)&forMarshaling->maxTimelineSemaphoreValueDifference, sizeof(uint64_t));
+    vkStream->write((VkSampleCountFlags*)&forMarshaling->framebufferIntegerColorSampleCounts, sizeof(VkSampleCountFlags));
+}
+
+void unmarshal_VkPhysicalDeviceVulkan12Properties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceVulkan12Properties* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkDriverId*)&forUnmarshaling->driverID, sizeof(VkDriverId));
+    vkStream->read((char*)forUnmarshaling->driverName, VK_MAX_DRIVER_NAME_SIZE * sizeof(char));
+    vkStream->read((char*)forUnmarshaling->driverInfo, VK_MAX_DRIVER_INFO_SIZE * sizeof(char));
+    unmarshal_VkConformanceVersion(vkStream, rootType, (VkConformanceVersion*)(&forUnmarshaling->conformanceVersion));
+    vkStream->read((VkShaderFloatControlsIndependence*)&forUnmarshaling->denormBehaviorIndependence, sizeof(VkShaderFloatControlsIndependence));
+    vkStream->read((VkShaderFloatControlsIndependence*)&forUnmarshaling->roundingModeIndependence, sizeof(VkShaderFloatControlsIndependence));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderSignedZeroInfNanPreserveFloat16, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderSignedZeroInfNanPreserveFloat32, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderSignedZeroInfNanPreserveFloat64, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderDenormPreserveFloat16, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderDenormPreserveFloat32, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderDenormPreserveFloat64, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderDenormFlushToZeroFloat16, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderDenormFlushToZeroFloat32, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderDenormFlushToZeroFloat64, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderRoundingModeRTEFloat16, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderRoundingModeRTEFloat32, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderRoundingModeRTEFloat64, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderRoundingModeRTZFloat16, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderRoundingModeRTZFloat32, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderRoundingModeRTZFloat64, sizeof(VkBool32));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxUpdateAfterBindDescriptorsInAllPools, sizeof(uint32_t));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderUniformBufferArrayNonUniformIndexingNative, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderSampledImageArrayNonUniformIndexingNative, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderStorageBufferArrayNonUniformIndexingNative, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderStorageImageArrayNonUniformIndexingNative, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderInputAttachmentArrayNonUniformIndexingNative, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->robustBufferAccessUpdateAfterBind, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->quadDivergentImplicitLod, sizeof(VkBool32));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxPerStageDescriptorUpdateAfterBindSamplers, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxPerStageDescriptorUpdateAfterBindUniformBuffers, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxPerStageDescriptorUpdateAfterBindStorageBuffers, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxPerStageDescriptorUpdateAfterBindSampledImages, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxPerStageDescriptorUpdateAfterBindStorageImages, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxPerStageDescriptorUpdateAfterBindInputAttachments, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxPerStageUpdateAfterBindResources, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxDescriptorSetUpdateAfterBindSamplers, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxDescriptorSetUpdateAfterBindUniformBuffers, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxDescriptorSetUpdateAfterBindUniformBuffersDynamic, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxDescriptorSetUpdateAfterBindStorageBuffers, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxDescriptorSetUpdateAfterBindStorageBuffersDynamic, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxDescriptorSetUpdateAfterBindSampledImages, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxDescriptorSetUpdateAfterBindStorageImages, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxDescriptorSetUpdateAfterBindInputAttachments, sizeof(uint32_t));
+    vkStream->read((VkResolveModeFlags*)&forUnmarshaling->supportedDepthResolveModes, sizeof(VkResolveModeFlags));
+    vkStream->read((VkResolveModeFlags*)&forUnmarshaling->supportedStencilResolveModes, sizeof(VkResolveModeFlags));
+    vkStream->read((VkBool32*)&forUnmarshaling->independentResolveNone, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->independentResolve, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->filterMinmaxSingleComponentFormats, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->filterMinmaxImageComponentMapping, sizeof(VkBool32));
+    vkStream->read((uint64_t*)&forUnmarshaling->maxTimelineSemaphoreValueDifference, sizeof(uint64_t));
+    vkStream->read((VkSampleCountFlags*)&forUnmarshaling->framebufferIntegerColorSampleCounts, sizeof(VkSampleCountFlags));
+}
+
+void marshal_VkImageFormatListCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageFormatListCreateInfo* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((uint32_t*)&forMarshaling->viewFormatCount, sizeof(uint32_t));
+    vkStream->write((const VkFormat*)forMarshaling->pViewFormats, forMarshaling->viewFormatCount * sizeof(const VkFormat));
+}
+
+void unmarshal_VkImageFormatListCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkImageFormatListCreateInfo* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((uint32_t*)&forUnmarshaling->viewFormatCount, sizeof(uint32_t));
+    vkStream->read((VkFormat*)forUnmarshaling->pViewFormats, forUnmarshaling->viewFormatCount * sizeof(const VkFormat));
+}
+
+void marshal_VkAttachmentDescription2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAttachmentDescription2* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkAttachmentDescriptionFlags*)&forMarshaling->flags, sizeof(VkAttachmentDescriptionFlags));
+    vkStream->write((VkFormat*)&forMarshaling->format, sizeof(VkFormat));
+    vkStream->write((VkSampleCountFlagBits*)&forMarshaling->samples, sizeof(VkSampleCountFlagBits));
+    vkStream->write((VkAttachmentLoadOp*)&forMarshaling->loadOp, sizeof(VkAttachmentLoadOp));
+    vkStream->write((VkAttachmentStoreOp*)&forMarshaling->storeOp, sizeof(VkAttachmentStoreOp));
+    vkStream->write((VkAttachmentLoadOp*)&forMarshaling->stencilLoadOp, sizeof(VkAttachmentLoadOp));
+    vkStream->write((VkAttachmentStoreOp*)&forMarshaling->stencilStoreOp, sizeof(VkAttachmentStoreOp));
+    vkStream->write((VkImageLayout*)&forMarshaling->initialLayout, sizeof(VkImageLayout));
+    vkStream->write((VkImageLayout*)&forMarshaling->finalLayout, sizeof(VkImageLayout));
+}
+
+void unmarshal_VkAttachmentDescription2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkAttachmentDescription2* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkAttachmentDescriptionFlags*)&forUnmarshaling->flags, sizeof(VkAttachmentDescriptionFlags));
+    vkStream->read((VkFormat*)&forUnmarshaling->format, sizeof(VkFormat));
+    vkStream->read((VkSampleCountFlagBits*)&forUnmarshaling->samples, sizeof(VkSampleCountFlagBits));
+    vkStream->read((VkAttachmentLoadOp*)&forUnmarshaling->loadOp, sizeof(VkAttachmentLoadOp));
+    vkStream->read((VkAttachmentStoreOp*)&forUnmarshaling->storeOp, sizeof(VkAttachmentStoreOp));
+    vkStream->read((VkAttachmentLoadOp*)&forUnmarshaling->stencilLoadOp, sizeof(VkAttachmentLoadOp));
+    vkStream->read((VkAttachmentStoreOp*)&forUnmarshaling->stencilStoreOp, sizeof(VkAttachmentStoreOp));
+    vkStream->read((VkImageLayout*)&forUnmarshaling->initialLayout, sizeof(VkImageLayout));
+    vkStream->read((VkImageLayout*)&forUnmarshaling->finalLayout, sizeof(VkImageLayout));
+}
+
+void marshal_VkAttachmentReference2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAttachmentReference2* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((uint32_t*)&forMarshaling->attachment, sizeof(uint32_t));
+    vkStream->write((VkImageLayout*)&forMarshaling->layout, sizeof(VkImageLayout));
+    vkStream->write((VkImageAspectFlags*)&forMarshaling->aspectMask, sizeof(VkImageAspectFlags));
+}
+
+void unmarshal_VkAttachmentReference2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkAttachmentReference2* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((uint32_t*)&forUnmarshaling->attachment, sizeof(uint32_t));
+    vkStream->read((VkImageLayout*)&forUnmarshaling->layout, sizeof(VkImageLayout));
+    vkStream->read((VkImageAspectFlags*)&forUnmarshaling->aspectMask, sizeof(VkImageAspectFlags));
+}
+
+void marshal_VkSubpassDescription2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSubpassDescription2* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkSubpassDescriptionFlags*)&forMarshaling->flags, sizeof(VkSubpassDescriptionFlags));
+    vkStream->write((VkPipelineBindPoint*)&forMarshaling->pipelineBindPoint, sizeof(VkPipelineBindPoint));
+    vkStream->write((uint32_t*)&forMarshaling->viewMask, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->inputAttachmentCount, sizeof(uint32_t));
+    if (forMarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->inputAttachmentCount; ++i)
+        {
+            marshal_VkAttachmentReference2(vkStream, rootType, (const VkAttachmentReference2*)(forMarshaling->pInputAttachments + i));
+        }
+    }
+    vkStream->write((uint32_t*)&forMarshaling->colorAttachmentCount, sizeof(uint32_t));
+    if (forMarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->colorAttachmentCount; ++i)
+        {
+            marshal_VkAttachmentReference2(vkStream, rootType, (const VkAttachmentReference2*)(forMarshaling->pColorAttachments + i));
+        }
+    }
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pResolveAttachments;
+    vkStream->putBe64(cgen_var_0);
+    if (forMarshaling->pResolveAttachments)
+    {
+        if (forMarshaling)
+        {
+            for (uint32_t i = 0; i < (uint32_t)forMarshaling->colorAttachmentCount; ++i)
+            {
+                marshal_VkAttachmentReference2(vkStream, rootType, (const VkAttachmentReference2*)(forMarshaling->pResolveAttachments + i));
+            }
+        }
+    }
+    // WARNING PTR CHECK
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)forMarshaling->pDepthStencilAttachment;
+    vkStream->putBe64(cgen_var_1);
+    if (forMarshaling->pDepthStencilAttachment)
+    {
+        marshal_VkAttachmentReference2(vkStream, rootType, (const VkAttachmentReference2*)(forMarshaling->pDepthStencilAttachment));
+    }
+    vkStream->write((uint32_t*)&forMarshaling->preserveAttachmentCount, sizeof(uint32_t));
+    vkStream->write((const uint32_t*)forMarshaling->pPreserveAttachments, forMarshaling->preserveAttachmentCount * sizeof(const uint32_t));
+}
+
+void unmarshal_VkSubpassDescription2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkSubpassDescription2* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkSubpassDescriptionFlags*)&forUnmarshaling->flags, sizeof(VkSubpassDescriptionFlags));
+    vkStream->read((VkPipelineBindPoint*)&forUnmarshaling->pipelineBindPoint, sizeof(VkPipelineBindPoint));
+    vkStream->read((uint32_t*)&forUnmarshaling->viewMask, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->inputAttachmentCount, sizeof(uint32_t));
+    if (forUnmarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->inputAttachmentCount; ++i)
+        {
+            unmarshal_VkAttachmentReference2(vkStream, rootType, (VkAttachmentReference2*)(forUnmarshaling->pInputAttachments + i));
+        }
+    }
+    vkStream->read((uint32_t*)&forUnmarshaling->colorAttachmentCount, sizeof(uint32_t));
+    if (forUnmarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->colorAttachmentCount; ++i)
+        {
+            unmarshal_VkAttachmentReference2(vkStream, rootType, (VkAttachmentReference2*)(forUnmarshaling->pColorAttachments + i));
+        }
+    }
+    // WARNING PTR CHECK
+    const VkAttachmentReference2* check_pResolveAttachments;
+    check_pResolveAttachments = (const VkAttachmentReference2*)(uintptr_t)vkStream->getBe64();
+    if (forUnmarshaling->pResolveAttachments)
+    {
+        if (!(check_pResolveAttachments))
+        {
+            fprintf(stderr, "fatal: forUnmarshaling->pResolveAttachments inconsistent between guest and host\n");
+        }
+        if (forUnmarshaling)
+        {
+            for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->colorAttachmentCount; ++i)
+            {
+                unmarshal_VkAttachmentReference2(vkStream, rootType, (VkAttachmentReference2*)(forUnmarshaling->pResolveAttachments + i));
+            }
+        }
+    }
+    // WARNING PTR CHECK
+    const VkAttachmentReference2* check_pDepthStencilAttachment;
+    check_pDepthStencilAttachment = (const VkAttachmentReference2*)(uintptr_t)vkStream->getBe64();
+    if (forUnmarshaling->pDepthStencilAttachment)
+    {
+        if (!(check_pDepthStencilAttachment))
+        {
+            fprintf(stderr, "fatal: forUnmarshaling->pDepthStencilAttachment inconsistent between guest and host\n");
+        }
+        unmarshal_VkAttachmentReference2(vkStream, rootType, (VkAttachmentReference2*)(forUnmarshaling->pDepthStencilAttachment));
+    }
+    vkStream->read((uint32_t*)&forUnmarshaling->preserveAttachmentCount, sizeof(uint32_t));
+    vkStream->read((uint32_t*)forUnmarshaling->pPreserveAttachments, forUnmarshaling->preserveAttachmentCount * sizeof(const uint32_t));
+}
+
+void marshal_VkSubpassDependency2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSubpassDependency2* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((uint32_t*)&forMarshaling->srcSubpass, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->dstSubpass, sizeof(uint32_t));
+    vkStream->write((VkPipelineStageFlags*)&forMarshaling->srcStageMask, sizeof(VkPipelineStageFlags));
+    vkStream->write((VkPipelineStageFlags*)&forMarshaling->dstStageMask, sizeof(VkPipelineStageFlags));
+    vkStream->write((VkAccessFlags*)&forMarshaling->srcAccessMask, sizeof(VkAccessFlags));
+    vkStream->write((VkAccessFlags*)&forMarshaling->dstAccessMask, sizeof(VkAccessFlags));
+    vkStream->write((VkDependencyFlags*)&forMarshaling->dependencyFlags, sizeof(VkDependencyFlags));
+    vkStream->write((int32_t*)&forMarshaling->viewOffset, sizeof(int32_t));
+}
+
+void unmarshal_VkSubpassDependency2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkSubpassDependency2* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((uint32_t*)&forUnmarshaling->srcSubpass, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->dstSubpass, sizeof(uint32_t));
+    vkStream->read((VkPipelineStageFlags*)&forUnmarshaling->srcStageMask, sizeof(VkPipelineStageFlags));
+    vkStream->read((VkPipelineStageFlags*)&forUnmarshaling->dstStageMask, sizeof(VkPipelineStageFlags));
+    vkStream->read((VkAccessFlags*)&forUnmarshaling->srcAccessMask, sizeof(VkAccessFlags));
+    vkStream->read((VkAccessFlags*)&forUnmarshaling->dstAccessMask, sizeof(VkAccessFlags));
+    vkStream->read((VkDependencyFlags*)&forUnmarshaling->dependencyFlags, sizeof(VkDependencyFlags));
+    vkStream->read((int32_t*)&forUnmarshaling->viewOffset, sizeof(int32_t));
+}
+
+void marshal_VkRenderPassCreateInfo2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRenderPassCreateInfo2* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkRenderPassCreateFlags*)&forMarshaling->flags, sizeof(VkRenderPassCreateFlags));
+    vkStream->write((uint32_t*)&forMarshaling->attachmentCount, sizeof(uint32_t));
+    if (forMarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->attachmentCount; ++i)
+        {
+            marshal_VkAttachmentDescription2(vkStream, rootType, (const VkAttachmentDescription2*)(forMarshaling->pAttachments + i));
+        }
+    }
+    vkStream->write((uint32_t*)&forMarshaling->subpassCount, sizeof(uint32_t));
+    if (forMarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->subpassCount; ++i)
+        {
+            marshal_VkSubpassDescription2(vkStream, rootType, (const VkSubpassDescription2*)(forMarshaling->pSubpasses + i));
+        }
+    }
+    vkStream->write((uint32_t*)&forMarshaling->dependencyCount, sizeof(uint32_t));
+    if (forMarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->dependencyCount; ++i)
+        {
+            marshal_VkSubpassDependency2(vkStream, rootType, (const VkSubpassDependency2*)(forMarshaling->pDependencies + i));
+        }
+    }
+    vkStream->write((uint32_t*)&forMarshaling->correlatedViewMaskCount, sizeof(uint32_t));
+    vkStream->write((const uint32_t*)forMarshaling->pCorrelatedViewMasks, forMarshaling->correlatedViewMaskCount * sizeof(const uint32_t));
+}
+
+void unmarshal_VkRenderPassCreateInfo2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkRenderPassCreateInfo2* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkRenderPassCreateFlags*)&forUnmarshaling->flags, sizeof(VkRenderPassCreateFlags));
+    vkStream->read((uint32_t*)&forUnmarshaling->attachmentCount, sizeof(uint32_t));
+    if (forUnmarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->attachmentCount; ++i)
+        {
+            unmarshal_VkAttachmentDescription2(vkStream, rootType, (VkAttachmentDescription2*)(forUnmarshaling->pAttachments + i));
+        }
+    }
+    vkStream->read((uint32_t*)&forUnmarshaling->subpassCount, sizeof(uint32_t));
+    if (forUnmarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->subpassCount; ++i)
+        {
+            unmarshal_VkSubpassDescription2(vkStream, rootType, (VkSubpassDescription2*)(forUnmarshaling->pSubpasses + i));
+        }
+    }
+    vkStream->read((uint32_t*)&forUnmarshaling->dependencyCount, sizeof(uint32_t));
+    if (forUnmarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->dependencyCount; ++i)
+        {
+            unmarshal_VkSubpassDependency2(vkStream, rootType, (VkSubpassDependency2*)(forUnmarshaling->pDependencies + i));
+        }
+    }
+    vkStream->read((uint32_t*)&forUnmarshaling->correlatedViewMaskCount, sizeof(uint32_t));
+    vkStream->read((uint32_t*)forUnmarshaling->pCorrelatedViewMasks, forUnmarshaling->correlatedViewMaskCount * sizeof(const uint32_t));
+}
+
+void marshal_VkSubpassBeginInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSubpassBeginInfo* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkSubpassContents*)&forMarshaling->contents, sizeof(VkSubpassContents));
+}
+
+void unmarshal_VkSubpassBeginInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkSubpassBeginInfo* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkSubpassContents*)&forUnmarshaling->contents, sizeof(VkSubpassContents));
+}
+
+void marshal_VkSubpassEndInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSubpassEndInfo* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+}
+
+void unmarshal_VkSubpassEndInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkSubpassEndInfo* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+}
+
+void marshal_VkPhysicalDevice8BitStorageFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDevice8BitStorageFeatures* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->storageBuffer8BitAccess, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->uniformAndStorageBuffer8BitAccess, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->storagePushConstant8, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDevice8BitStorageFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDevice8BitStorageFeatures* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->storageBuffer8BitAccess, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->uniformAndStorageBuffer8BitAccess, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->storagePushConstant8, sizeof(VkBool32));
+}
+
+void marshal_VkPhysicalDeviceDriverProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDriverProperties* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkDriverId*)&forMarshaling->driverID, sizeof(VkDriverId));
+    vkStream->write((char*)forMarshaling->driverName, VK_MAX_DRIVER_NAME_SIZE * sizeof(char));
+    vkStream->write((char*)forMarshaling->driverInfo, VK_MAX_DRIVER_INFO_SIZE * sizeof(char));
+    marshal_VkConformanceVersion(vkStream, rootType, (VkConformanceVersion*)(&forMarshaling->conformanceVersion));
+}
+
+void unmarshal_VkPhysicalDeviceDriverProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceDriverProperties* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkDriverId*)&forUnmarshaling->driverID, sizeof(VkDriverId));
+    vkStream->read((char*)forUnmarshaling->driverName, VK_MAX_DRIVER_NAME_SIZE * sizeof(char));
+    vkStream->read((char*)forUnmarshaling->driverInfo, VK_MAX_DRIVER_INFO_SIZE * sizeof(char));
+    unmarshal_VkConformanceVersion(vkStream, rootType, (VkConformanceVersion*)(&forUnmarshaling->conformanceVersion));
+}
+
+void marshal_VkPhysicalDeviceShaderAtomicInt64Features(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderAtomicInt64Features* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->shaderBufferInt64Atomics, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderSharedInt64Atomics, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceShaderAtomicInt64Features(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceShaderAtomicInt64Features* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderBufferInt64Atomics, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderSharedInt64Atomics, sizeof(VkBool32));
+}
+
+void marshal_VkPhysicalDeviceShaderFloat16Int8Features(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderFloat16Int8Features* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->shaderFloat16, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderInt8, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceShaderFloat16Int8Features(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceShaderFloat16Int8Features* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderFloat16, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderInt8, sizeof(VkBool32));
+}
+
+void marshal_VkPhysicalDeviceFloatControlsProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFloatControlsProperties* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkShaderFloatControlsIndependence*)&forMarshaling->denormBehaviorIndependence, sizeof(VkShaderFloatControlsIndependence));
+    vkStream->write((VkShaderFloatControlsIndependence*)&forMarshaling->roundingModeIndependence, sizeof(VkShaderFloatControlsIndependence));
+    vkStream->write((VkBool32*)&forMarshaling->shaderSignedZeroInfNanPreserveFloat16, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderSignedZeroInfNanPreserveFloat32, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderSignedZeroInfNanPreserveFloat64, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderDenormPreserveFloat16, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderDenormPreserveFloat32, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderDenormPreserveFloat64, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderDenormFlushToZeroFloat16, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderDenormFlushToZeroFloat32, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderDenormFlushToZeroFloat64, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderRoundingModeRTEFloat16, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderRoundingModeRTEFloat32, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderRoundingModeRTEFloat64, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderRoundingModeRTZFloat16, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderRoundingModeRTZFloat32, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderRoundingModeRTZFloat64, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceFloatControlsProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceFloatControlsProperties* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkShaderFloatControlsIndependence*)&forUnmarshaling->denormBehaviorIndependence, sizeof(VkShaderFloatControlsIndependence));
+    vkStream->read((VkShaderFloatControlsIndependence*)&forUnmarshaling->roundingModeIndependence, sizeof(VkShaderFloatControlsIndependence));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderSignedZeroInfNanPreserveFloat16, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderSignedZeroInfNanPreserveFloat32, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderSignedZeroInfNanPreserveFloat64, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderDenormPreserveFloat16, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderDenormPreserveFloat32, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderDenormPreserveFloat64, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderDenormFlushToZeroFloat16, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderDenormFlushToZeroFloat32, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderDenormFlushToZeroFloat64, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderRoundingModeRTEFloat16, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderRoundingModeRTEFloat32, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderRoundingModeRTEFloat64, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderRoundingModeRTZFloat16, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderRoundingModeRTZFloat32, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderRoundingModeRTZFloat64, sizeof(VkBool32));
+}
+
+void marshal_VkDescriptorSetLayoutBindingFlagsCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDescriptorSetLayoutBindingFlagsCreateInfo* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((uint32_t*)&forMarshaling->bindingCount, sizeof(uint32_t));
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pBindingFlags;
+    vkStream->putBe64(cgen_var_0);
+    if (forMarshaling->pBindingFlags)
+    {
+        vkStream->write((const VkDescriptorBindingFlags*)forMarshaling->pBindingFlags, forMarshaling->bindingCount * sizeof(const VkDescriptorBindingFlags));
+    }
+}
+
+void unmarshal_VkDescriptorSetLayoutBindingFlagsCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDescriptorSetLayoutBindingFlagsCreateInfo* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((uint32_t*)&forUnmarshaling->bindingCount, sizeof(uint32_t));
+    // WARNING PTR CHECK
+    const VkDescriptorBindingFlags* check_pBindingFlags;
+    check_pBindingFlags = (const VkDescriptorBindingFlags*)(uintptr_t)vkStream->getBe64();
+    if (forUnmarshaling->pBindingFlags)
+    {
+        if (!(check_pBindingFlags))
+        {
+            fprintf(stderr, "fatal: forUnmarshaling->pBindingFlags inconsistent between guest and host\n");
+        }
+        vkStream->read((VkDescriptorBindingFlags*)forUnmarshaling->pBindingFlags, forUnmarshaling->bindingCount * sizeof(const VkDescriptorBindingFlags));
+    }
+}
+
+void marshal_VkPhysicalDeviceDescriptorIndexingFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDescriptorIndexingFeatures* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->shaderInputAttachmentArrayDynamicIndexing, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderUniformTexelBufferArrayDynamicIndexing, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderStorageTexelBufferArrayDynamicIndexing, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderUniformBufferArrayNonUniformIndexing, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderSampledImageArrayNonUniformIndexing, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderStorageBufferArrayNonUniformIndexing, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderStorageImageArrayNonUniformIndexing, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderInputAttachmentArrayNonUniformIndexing, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderUniformTexelBufferArrayNonUniformIndexing, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderStorageTexelBufferArrayNonUniformIndexing, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->descriptorBindingUniformBufferUpdateAfterBind, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->descriptorBindingSampledImageUpdateAfterBind, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->descriptorBindingStorageImageUpdateAfterBind, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->descriptorBindingStorageBufferUpdateAfterBind, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->descriptorBindingUniformTexelBufferUpdateAfterBind, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->descriptorBindingStorageTexelBufferUpdateAfterBind, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->descriptorBindingUpdateUnusedWhilePending, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->descriptorBindingPartiallyBound, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->descriptorBindingVariableDescriptorCount, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->runtimeDescriptorArray, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceDescriptorIndexingFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceDescriptorIndexingFeatures* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderInputAttachmentArrayDynamicIndexing, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderUniformTexelBufferArrayDynamicIndexing, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderStorageTexelBufferArrayDynamicIndexing, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderUniformBufferArrayNonUniformIndexing, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderSampledImageArrayNonUniformIndexing, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderStorageBufferArrayNonUniformIndexing, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderStorageImageArrayNonUniformIndexing, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderInputAttachmentArrayNonUniformIndexing, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderUniformTexelBufferArrayNonUniformIndexing, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderStorageTexelBufferArrayNonUniformIndexing, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->descriptorBindingUniformBufferUpdateAfterBind, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->descriptorBindingSampledImageUpdateAfterBind, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->descriptorBindingStorageImageUpdateAfterBind, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->descriptorBindingStorageBufferUpdateAfterBind, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->descriptorBindingUniformTexelBufferUpdateAfterBind, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->descriptorBindingStorageTexelBufferUpdateAfterBind, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->descriptorBindingUpdateUnusedWhilePending, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->descriptorBindingPartiallyBound, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->descriptorBindingVariableDescriptorCount, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->runtimeDescriptorArray, sizeof(VkBool32));
+}
+
+void marshal_VkPhysicalDeviceDescriptorIndexingProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDescriptorIndexingProperties* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((uint32_t*)&forMarshaling->maxUpdateAfterBindDescriptorsInAllPools, sizeof(uint32_t));
+    vkStream->write((VkBool32*)&forMarshaling->shaderUniformBufferArrayNonUniformIndexingNative, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderSampledImageArrayNonUniformIndexingNative, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderStorageBufferArrayNonUniformIndexingNative, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderStorageImageArrayNonUniformIndexingNative, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderInputAttachmentArrayNonUniformIndexingNative, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->robustBufferAccessUpdateAfterBind, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->quadDivergentImplicitLod, sizeof(VkBool32));
+    vkStream->write((uint32_t*)&forMarshaling->maxPerStageDescriptorUpdateAfterBindSamplers, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxPerStageDescriptorUpdateAfterBindUniformBuffers, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxPerStageDescriptorUpdateAfterBindStorageBuffers, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxPerStageDescriptorUpdateAfterBindSampledImages, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxPerStageDescriptorUpdateAfterBindStorageImages, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxPerStageDescriptorUpdateAfterBindInputAttachments, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxPerStageUpdateAfterBindResources, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxDescriptorSetUpdateAfterBindSamplers, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxDescriptorSetUpdateAfterBindUniformBuffers, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxDescriptorSetUpdateAfterBindUniformBuffersDynamic, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxDescriptorSetUpdateAfterBindStorageBuffers, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxDescriptorSetUpdateAfterBindStorageBuffersDynamic, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxDescriptorSetUpdateAfterBindSampledImages, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxDescriptorSetUpdateAfterBindStorageImages, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxDescriptorSetUpdateAfterBindInputAttachments, sizeof(uint32_t));
+}
+
+void unmarshal_VkPhysicalDeviceDescriptorIndexingProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceDescriptorIndexingProperties* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxUpdateAfterBindDescriptorsInAllPools, sizeof(uint32_t));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderUniformBufferArrayNonUniformIndexingNative, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderSampledImageArrayNonUniformIndexingNative, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderStorageBufferArrayNonUniformIndexingNative, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderStorageImageArrayNonUniformIndexingNative, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderInputAttachmentArrayNonUniformIndexingNative, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->robustBufferAccessUpdateAfterBind, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->quadDivergentImplicitLod, sizeof(VkBool32));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxPerStageDescriptorUpdateAfterBindSamplers, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxPerStageDescriptorUpdateAfterBindUniformBuffers, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxPerStageDescriptorUpdateAfterBindStorageBuffers, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxPerStageDescriptorUpdateAfterBindSampledImages, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxPerStageDescriptorUpdateAfterBindStorageImages, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxPerStageDescriptorUpdateAfterBindInputAttachments, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxPerStageUpdateAfterBindResources, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxDescriptorSetUpdateAfterBindSamplers, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxDescriptorSetUpdateAfterBindUniformBuffers, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxDescriptorSetUpdateAfterBindUniformBuffersDynamic, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxDescriptorSetUpdateAfterBindStorageBuffers, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxDescriptorSetUpdateAfterBindStorageBuffersDynamic, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxDescriptorSetUpdateAfterBindSampledImages, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxDescriptorSetUpdateAfterBindStorageImages, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxDescriptorSetUpdateAfterBindInputAttachments, sizeof(uint32_t));
+}
+
+void marshal_VkDescriptorSetVariableDescriptorCountAllocateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDescriptorSetVariableDescriptorCountAllocateInfo* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((uint32_t*)&forMarshaling->descriptorSetCount, sizeof(uint32_t));
+    vkStream->write((const uint32_t*)forMarshaling->pDescriptorCounts, forMarshaling->descriptorSetCount * sizeof(const uint32_t));
+}
+
+void unmarshal_VkDescriptorSetVariableDescriptorCountAllocateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDescriptorSetVariableDescriptorCountAllocateInfo* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((uint32_t*)&forUnmarshaling->descriptorSetCount, sizeof(uint32_t));
+    vkStream->read((uint32_t*)forUnmarshaling->pDescriptorCounts, forUnmarshaling->descriptorSetCount * sizeof(const uint32_t));
+}
+
+void marshal_VkDescriptorSetVariableDescriptorCountLayoutSupport(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDescriptorSetVariableDescriptorCountLayoutSupport* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((uint32_t*)&forMarshaling->maxVariableDescriptorCount, sizeof(uint32_t));
+}
+
+void unmarshal_VkDescriptorSetVariableDescriptorCountLayoutSupport(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDescriptorSetVariableDescriptorCountLayoutSupport* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxVariableDescriptorCount, sizeof(uint32_t));
+}
+
+void marshal_VkSubpassDescriptionDepthStencilResolve(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSubpassDescriptionDepthStencilResolve* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkResolveModeFlagBits*)&forMarshaling->depthResolveMode, sizeof(VkResolveModeFlagBits));
+    vkStream->write((VkResolveModeFlagBits*)&forMarshaling->stencilResolveMode, sizeof(VkResolveModeFlagBits));
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pDepthStencilResolveAttachment;
+    vkStream->putBe64(cgen_var_0);
+    if (forMarshaling->pDepthStencilResolveAttachment)
+    {
+        marshal_VkAttachmentReference2(vkStream, rootType, (const VkAttachmentReference2*)(forMarshaling->pDepthStencilResolveAttachment));
+    }
+}
+
+void unmarshal_VkSubpassDescriptionDepthStencilResolve(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkSubpassDescriptionDepthStencilResolve* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkResolveModeFlagBits*)&forUnmarshaling->depthResolveMode, sizeof(VkResolveModeFlagBits));
+    vkStream->read((VkResolveModeFlagBits*)&forUnmarshaling->stencilResolveMode, sizeof(VkResolveModeFlagBits));
+    // WARNING PTR CHECK
+    const VkAttachmentReference2* check_pDepthStencilResolveAttachment;
+    check_pDepthStencilResolveAttachment = (const VkAttachmentReference2*)(uintptr_t)vkStream->getBe64();
+    if (forUnmarshaling->pDepthStencilResolveAttachment)
+    {
+        if (!(check_pDepthStencilResolveAttachment))
+        {
+            fprintf(stderr, "fatal: forUnmarshaling->pDepthStencilResolveAttachment inconsistent between guest and host\n");
+        }
+        unmarshal_VkAttachmentReference2(vkStream, rootType, (VkAttachmentReference2*)(forUnmarshaling->pDepthStencilResolveAttachment));
+    }
+}
+
+void marshal_VkPhysicalDeviceDepthStencilResolveProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDepthStencilResolveProperties* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkResolveModeFlags*)&forMarshaling->supportedDepthResolveModes, sizeof(VkResolveModeFlags));
+    vkStream->write((VkResolveModeFlags*)&forMarshaling->supportedStencilResolveModes, sizeof(VkResolveModeFlags));
+    vkStream->write((VkBool32*)&forMarshaling->independentResolveNone, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->independentResolve, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceDepthStencilResolveProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceDepthStencilResolveProperties* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkResolveModeFlags*)&forUnmarshaling->supportedDepthResolveModes, sizeof(VkResolveModeFlags));
+    vkStream->read((VkResolveModeFlags*)&forUnmarshaling->supportedStencilResolveModes, sizeof(VkResolveModeFlags));
+    vkStream->read((VkBool32*)&forUnmarshaling->independentResolveNone, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->independentResolve, sizeof(VkBool32));
+}
+
+void marshal_VkPhysicalDeviceScalarBlockLayoutFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceScalarBlockLayoutFeatures* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->scalarBlockLayout, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceScalarBlockLayoutFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceScalarBlockLayoutFeatures* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->scalarBlockLayout, sizeof(VkBool32));
+}
+
+void marshal_VkImageStencilUsageCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageStencilUsageCreateInfo* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkImageUsageFlags*)&forMarshaling->stencilUsage, sizeof(VkImageUsageFlags));
+}
+
+void unmarshal_VkImageStencilUsageCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkImageStencilUsageCreateInfo* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkImageUsageFlags*)&forUnmarshaling->stencilUsage, sizeof(VkImageUsageFlags));
+}
+
+void marshal_VkSamplerReductionModeCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSamplerReductionModeCreateInfo* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkSamplerReductionMode*)&forMarshaling->reductionMode, sizeof(VkSamplerReductionMode));
+}
+
+void unmarshal_VkSamplerReductionModeCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkSamplerReductionModeCreateInfo* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkSamplerReductionMode*)&forUnmarshaling->reductionMode, sizeof(VkSamplerReductionMode));
+}
+
+void marshal_VkPhysicalDeviceSamplerFilterMinmaxProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSamplerFilterMinmaxProperties* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->filterMinmaxSingleComponentFormats, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->filterMinmaxImageComponentMapping, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceSamplerFilterMinmaxProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceSamplerFilterMinmaxProperties* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->filterMinmaxSingleComponentFormats, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->filterMinmaxImageComponentMapping, sizeof(VkBool32));
+}
+
+void marshal_VkPhysicalDeviceVulkanMemoryModelFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVulkanMemoryModelFeatures* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->vulkanMemoryModel, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->vulkanMemoryModelDeviceScope, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->vulkanMemoryModelAvailabilityVisibilityChains, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceVulkanMemoryModelFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceVulkanMemoryModelFeatures* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->vulkanMemoryModel, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->vulkanMemoryModelDeviceScope, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->vulkanMemoryModelAvailabilityVisibilityChains, sizeof(VkBool32));
+}
+
+void marshal_VkPhysicalDeviceImagelessFramebufferFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceImagelessFramebufferFeatures* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->imagelessFramebuffer, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceImagelessFramebufferFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceImagelessFramebufferFeatures* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->imagelessFramebuffer, sizeof(VkBool32));
+}
+
+void marshal_VkFramebufferAttachmentImageInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkFramebufferAttachmentImageInfo* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkImageCreateFlags*)&forMarshaling->flags, sizeof(VkImageCreateFlags));
+    vkStream->write((VkImageUsageFlags*)&forMarshaling->usage, sizeof(VkImageUsageFlags));
+    vkStream->write((uint32_t*)&forMarshaling->width, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->height, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->layerCount, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->viewFormatCount, sizeof(uint32_t));
+    vkStream->write((const VkFormat*)forMarshaling->pViewFormats, forMarshaling->viewFormatCount * sizeof(const VkFormat));
+}
+
+void unmarshal_VkFramebufferAttachmentImageInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkFramebufferAttachmentImageInfo* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkImageCreateFlags*)&forUnmarshaling->flags, sizeof(VkImageCreateFlags));
+    vkStream->read((VkImageUsageFlags*)&forUnmarshaling->usage, sizeof(VkImageUsageFlags));
+    vkStream->read((uint32_t*)&forUnmarshaling->width, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->height, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->layerCount, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->viewFormatCount, sizeof(uint32_t));
+    vkStream->read((VkFormat*)forUnmarshaling->pViewFormats, forUnmarshaling->viewFormatCount * sizeof(const VkFormat));
+}
+
+void marshal_VkFramebufferAttachmentsCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkFramebufferAttachmentsCreateInfo* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((uint32_t*)&forMarshaling->attachmentImageInfoCount, sizeof(uint32_t));
+    if (forMarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->attachmentImageInfoCount; ++i)
+        {
+            marshal_VkFramebufferAttachmentImageInfo(vkStream, rootType, (const VkFramebufferAttachmentImageInfo*)(forMarshaling->pAttachmentImageInfos + i));
+        }
+    }
+}
+
+void unmarshal_VkFramebufferAttachmentsCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkFramebufferAttachmentsCreateInfo* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((uint32_t*)&forUnmarshaling->attachmentImageInfoCount, sizeof(uint32_t));
+    if (forUnmarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->attachmentImageInfoCount; ++i)
+        {
+            unmarshal_VkFramebufferAttachmentImageInfo(vkStream, rootType, (VkFramebufferAttachmentImageInfo*)(forUnmarshaling->pAttachmentImageInfos + i));
+        }
+    }
+}
+
+void marshal_VkRenderPassAttachmentBeginInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRenderPassAttachmentBeginInfo* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((uint32_t*)&forMarshaling->attachmentCount, sizeof(uint32_t));
+    if (forMarshaling->attachmentCount)
+    {
+        uint64_t* cgen_var_0;
+        vkStream->alloc((void**)&cgen_var_0, forMarshaling->attachmentCount * 8);
+        vkStream->handleMapping()->mapHandles_VkImageView_u64(forMarshaling->pAttachments, cgen_var_0, forMarshaling->attachmentCount);
+        vkStream->write((uint64_t*)cgen_var_0, forMarshaling->attachmentCount * 8);
+    }
+}
+
+void unmarshal_VkRenderPassAttachmentBeginInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkRenderPassAttachmentBeginInfo* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((uint32_t*)&forUnmarshaling->attachmentCount, sizeof(uint32_t));
+    if (forUnmarshaling->attachmentCount)
+    {
+        uint64_t* cgen_var_0;
+        vkStream->alloc((void**)&cgen_var_0, forUnmarshaling->attachmentCount * 8);
+        vkStream->read((uint64_t*)cgen_var_0, forUnmarshaling->attachmentCount * 8);
+        vkStream->handleMapping()->mapHandles_u64_VkImageView(cgen_var_0, (VkImageView*)forUnmarshaling->pAttachments, forUnmarshaling->attachmentCount);
+    }
+}
+
+void marshal_VkPhysicalDeviceUniformBufferStandardLayoutFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceUniformBufferStandardLayoutFeatures* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->uniformBufferStandardLayout, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceUniformBufferStandardLayoutFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceUniformBufferStandardLayoutFeatures* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->uniformBufferStandardLayout, sizeof(VkBool32));
+}
+
+void marshal_VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->shaderSubgroupExtendedTypes, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderSubgroupExtendedTypes, sizeof(VkBool32));
+}
+
+void marshal_VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->separateDepthStencilLayouts, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->separateDepthStencilLayouts, sizeof(VkBool32));
+}
+
+void marshal_VkAttachmentReferenceStencilLayout(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAttachmentReferenceStencilLayout* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkImageLayout*)&forMarshaling->stencilLayout, sizeof(VkImageLayout));
+}
+
+void unmarshal_VkAttachmentReferenceStencilLayout(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkAttachmentReferenceStencilLayout* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkImageLayout*)&forUnmarshaling->stencilLayout, sizeof(VkImageLayout));
+}
+
+void marshal_VkAttachmentDescriptionStencilLayout(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAttachmentDescriptionStencilLayout* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkImageLayout*)&forMarshaling->stencilInitialLayout, sizeof(VkImageLayout));
+    vkStream->write((VkImageLayout*)&forMarshaling->stencilFinalLayout, sizeof(VkImageLayout));
+}
+
+void unmarshal_VkAttachmentDescriptionStencilLayout(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkAttachmentDescriptionStencilLayout* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkImageLayout*)&forUnmarshaling->stencilInitialLayout, sizeof(VkImageLayout));
+    vkStream->read((VkImageLayout*)&forUnmarshaling->stencilFinalLayout, sizeof(VkImageLayout));
+}
+
+void marshal_VkPhysicalDeviceHostQueryResetFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceHostQueryResetFeatures* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->hostQueryReset, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceHostQueryResetFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceHostQueryResetFeatures* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->hostQueryReset, sizeof(VkBool32));
+}
+
+void marshal_VkPhysicalDeviceTimelineSemaphoreFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTimelineSemaphoreFeatures* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->timelineSemaphore, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceTimelineSemaphoreFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceTimelineSemaphoreFeatures* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->timelineSemaphore, sizeof(VkBool32));
+}
+
+void marshal_VkPhysicalDeviceTimelineSemaphoreProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTimelineSemaphoreProperties* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((uint64_t*)&forMarshaling->maxTimelineSemaphoreValueDifference, sizeof(uint64_t));
+}
+
+void unmarshal_VkPhysicalDeviceTimelineSemaphoreProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceTimelineSemaphoreProperties* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((uint64_t*)&forUnmarshaling->maxTimelineSemaphoreValueDifference, sizeof(uint64_t));
+}
+
+void marshal_VkSemaphoreTypeCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSemaphoreTypeCreateInfo* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkSemaphoreType*)&forMarshaling->semaphoreType, sizeof(VkSemaphoreType));
+    vkStream->write((uint64_t*)&forMarshaling->initialValue, sizeof(uint64_t));
+}
+
+void unmarshal_VkSemaphoreTypeCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkSemaphoreTypeCreateInfo* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkSemaphoreType*)&forUnmarshaling->semaphoreType, sizeof(VkSemaphoreType));
+    vkStream->read((uint64_t*)&forUnmarshaling->initialValue, sizeof(uint64_t));
+}
+
+void marshal_VkTimelineSemaphoreSubmitInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkTimelineSemaphoreSubmitInfo* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((uint32_t*)&forMarshaling->waitSemaphoreValueCount, sizeof(uint32_t));
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pWaitSemaphoreValues;
+    vkStream->putBe64(cgen_var_0);
+    if (forMarshaling->pWaitSemaphoreValues)
+    {
+        vkStream->write((const uint64_t*)forMarshaling->pWaitSemaphoreValues, forMarshaling->waitSemaphoreValueCount * sizeof(const uint64_t));
+    }
+    vkStream->write((uint32_t*)&forMarshaling->signalSemaphoreValueCount, sizeof(uint32_t));
+    // WARNING PTR CHECK
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)forMarshaling->pSignalSemaphoreValues;
+    vkStream->putBe64(cgen_var_1);
+    if (forMarshaling->pSignalSemaphoreValues)
+    {
+        vkStream->write((const uint64_t*)forMarshaling->pSignalSemaphoreValues, forMarshaling->signalSemaphoreValueCount * sizeof(const uint64_t));
+    }
+}
+
+void unmarshal_VkTimelineSemaphoreSubmitInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkTimelineSemaphoreSubmitInfo* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((uint32_t*)&forUnmarshaling->waitSemaphoreValueCount, sizeof(uint32_t));
+    // WARNING PTR CHECK
+    const uint64_t* check_pWaitSemaphoreValues;
+    check_pWaitSemaphoreValues = (const uint64_t*)(uintptr_t)vkStream->getBe64();
+    if (forUnmarshaling->pWaitSemaphoreValues)
+    {
+        if (!(check_pWaitSemaphoreValues))
+        {
+            fprintf(stderr, "fatal: forUnmarshaling->pWaitSemaphoreValues inconsistent between guest and host\n");
+        }
+        vkStream->read((uint64_t*)forUnmarshaling->pWaitSemaphoreValues, forUnmarshaling->waitSemaphoreValueCount * sizeof(const uint64_t));
+    }
+    vkStream->read((uint32_t*)&forUnmarshaling->signalSemaphoreValueCount, sizeof(uint32_t));
+    // WARNING PTR CHECK
+    const uint64_t* check_pSignalSemaphoreValues;
+    check_pSignalSemaphoreValues = (const uint64_t*)(uintptr_t)vkStream->getBe64();
+    if (forUnmarshaling->pSignalSemaphoreValues)
+    {
+        if (!(check_pSignalSemaphoreValues))
+        {
+            fprintf(stderr, "fatal: forUnmarshaling->pSignalSemaphoreValues inconsistent between guest and host\n");
+        }
+        vkStream->read((uint64_t*)forUnmarshaling->pSignalSemaphoreValues, forUnmarshaling->signalSemaphoreValueCount * sizeof(const uint64_t));
+    }
+}
+
+void marshal_VkSemaphoreWaitInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSemaphoreWaitInfo* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkSemaphoreWaitFlags*)&forMarshaling->flags, sizeof(VkSemaphoreWaitFlags));
+    vkStream->write((uint32_t*)&forMarshaling->semaphoreCount, sizeof(uint32_t));
+    if (forMarshaling->semaphoreCount)
+    {
+        uint64_t* cgen_var_0;
+        vkStream->alloc((void**)&cgen_var_0, forMarshaling->semaphoreCount * 8);
+        vkStream->handleMapping()->mapHandles_VkSemaphore_u64(forMarshaling->pSemaphores, cgen_var_0, forMarshaling->semaphoreCount);
+        vkStream->write((uint64_t*)cgen_var_0, forMarshaling->semaphoreCount * 8);
+    }
+    vkStream->write((const uint64_t*)forMarshaling->pValues, forMarshaling->semaphoreCount * sizeof(const uint64_t));
+}
+
+void unmarshal_VkSemaphoreWaitInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkSemaphoreWaitInfo* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkSemaphoreWaitFlags*)&forUnmarshaling->flags, sizeof(VkSemaphoreWaitFlags));
+    vkStream->read((uint32_t*)&forUnmarshaling->semaphoreCount, sizeof(uint32_t));
+    if (forUnmarshaling->semaphoreCount)
+    {
+        uint64_t* cgen_var_0;
+        vkStream->alloc((void**)&cgen_var_0, forUnmarshaling->semaphoreCount * 8);
+        vkStream->read((uint64_t*)cgen_var_0, forUnmarshaling->semaphoreCount * 8);
+        vkStream->handleMapping()->mapHandles_u64_VkSemaphore(cgen_var_0, (VkSemaphore*)forUnmarshaling->pSemaphores, forUnmarshaling->semaphoreCount);
+    }
+    vkStream->read((uint64_t*)forUnmarshaling->pValues, forUnmarshaling->semaphoreCount * sizeof(const uint64_t));
+}
+
+void marshal_VkSemaphoreSignalInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSemaphoreSignalInfo* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkSemaphore_u64(&forMarshaling->semaphore, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->write((uint64_t*)&forMarshaling->value, sizeof(uint64_t));
+}
+
+void unmarshal_VkSemaphoreSignalInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkSemaphoreSignalInfo* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkSemaphore(&cgen_var_0, (VkSemaphore*)&forUnmarshaling->semaphore, 1);
+    vkStream->read((uint64_t*)&forUnmarshaling->value, sizeof(uint64_t));
+}
+
+void marshal_VkPhysicalDeviceBufferDeviceAddressFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceBufferDeviceAddressFeatures* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->bufferDeviceAddress, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->bufferDeviceAddressCaptureReplay, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->bufferDeviceAddressMultiDevice, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceBufferDeviceAddressFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceBufferDeviceAddressFeatures* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->bufferDeviceAddress, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->bufferDeviceAddressCaptureReplay, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->bufferDeviceAddressMultiDevice, sizeof(VkBool32));
+}
+
+void marshal_VkBufferDeviceAddressInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBufferDeviceAddressInfo* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkBuffer_u64(&forMarshaling->buffer, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
+}
+
+void unmarshal_VkBufferDeviceAddressInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkBufferDeviceAddressInfo* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkBuffer(&cgen_var_0, (VkBuffer*)&forUnmarshaling->buffer, 1);
+}
+
+void marshal_VkBufferOpaqueCaptureAddressCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBufferOpaqueCaptureAddressCreateInfo* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((uint64_t*)&forMarshaling->opaqueCaptureAddress, sizeof(uint64_t));
+}
+
+void unmarshal_VkBufferOpaqueCaptureAddressCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkBufferOpaqueCaptureAddressCreateInfo* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((uint64_t*)&forUnmarshaling->opaqueCaptureAddress, sizeof(uint64_t));
+}
+
+void marshal_VkMemoryOpaqueCaptureAddressAllocateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMemoryOpaqueCaptureAddressAllocateInfo* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((uint64_t*)&forMarshaling->opaqueCaptureAddress, sizeof(uint64_t));
+}
+
+void unmarshal_VkMemoryOpaqueCaptureAddressAllocateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkMemoryOpaqueCaptureAddressAllocateInfo* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((uint64_t*)&forUnmarshaling->opaqueCaptureAddress, sizeof(uint64_t));
+}
+
+void marshal_VkDeviceMemoryOpaqueCaptureAddressInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceMemoryOpaqueCaptureAddressInfo* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkDeviceMemory_u64(&forMarshaling->memory, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
+}
+
+void unmarshal_VkDeviceMemoryOpaqueCaptureAddressInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDeviceMemoryOpaqueCaptureAddressInfo* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkDeviceMemory(&cgen_var_0, (VkDeviceMemory*)&forUnmarshaling->memory, 1);
+}
+
+#endif
 #ifdef VK_KHR_surface
 void marshal_VkSurfaceCapabilitiesKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSurfaceCapabilitiesKHR* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((uint32_t*)&forMarshaling->minImageCount, sizeof(uint32_t));
     vkStream->write((uint32_t*)&forMarshaling->maxImageCount, sizeof(uint32_t));
-    marshal_VkExtent2D(vkStream, (VkExtent2D*)(&forMarshaling->currentExtent));
-    marshal_VkExtent2D(vkStream, (VkExtent2D*)(&forMarshaling->minImageExtent));
-    marshal_VkExtent2D(vkStream, (VkExtent2D*)(&forMarshaling->maxImageExtent));
+    marshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->currentExtent));
+    marshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->minImageExtent));
+    marshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->maxImageExtent));
     vkStream->write((uint32_t*)&forMarshaling->maxImageArrayLayers, sizeof(uint32_t));
     vkStream->write((VkSurfaceTransformFlagsKHR*)&forMarshaling->supportedTransforms, sizeof(VkSurfaceTransformFlagsKHR));
     vkStream->write((VkSurfaceTransformFlagBitsKHR*)&forMarshaling->currentTransform, sizeof(VkSurfaceTransformFlagBitsKHR));
@@ -6836,13 +9400,15 @@
 
 void unmarshal_VkSurfaceCapabilitiesKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSurfaceCapabilitiesKHR* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((uint32_t*)&forUnmarshaling->minImageCount, sizeof(uint32_t));
     vkStream->read((uint32_t*)&forUnmarshaling->maxImageCount, sizeof(uint32_t));
-    unmarshal_VkExtent2D(vkStream, (VkExtent2D*)(&forUnmarshaling->currentExtent));
-    unmarshal_VkExtent2D(vkStream, (VkExtent2D*)(&forUnmarshaling->minImageExtent));
-    unmarshal_VkExtent2D(vkStream, (VkExtent2D*)(&forUnmarshaling->maxImageExtent));
+    unmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forUnmarshaling->currentExtent));
+    unmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forUnmarshaling->minImageExtent));
+    unmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forUnmarshaling->maxImageExtent));
     vkStream->read((uint32_t*)&forUnmarshaling->maxImageArrayLayers, sizeof(uint32_t));
     vkStream->read((VkSurfaceTransformFlagsKHR*)&forUnmarshaling->supportedTransforms, sizeof(VkSurfaceTransformFlagsKHR));
     vkStream->read((VkSurfaceTransformFlagBitsKHR*)&forUnmarshaling->currentTransform, sizeof(VkSurfaceTransformFlagBitsKHR));
@@ -6852,16 +9418,20 @@
 
 void marshal_VkSurfaceFormatKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSurfaceFormatKHR* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkFormat*)&forMarshaling->format, sizeof(VkFormat));
     vkStream->write((VkColorSpaceKHR*)&forMarshaling->colorSpace, sizeof(VkColorSpaceKHR));
 }
 
 void unmarshal_VkSurfaceFormatKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSurfaceFormatKHR* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkFormat*)&forUnmarshaling->format, sizeof(VkFormat));
     vkStream->read((VkColorSpaceKHR*)&forUnmarshaling->colorSpace, sizeof(VkColorSpaceKHR));
 }
@@ -6870,31 +9440,31 @@
 #ifdef VK_KHR_swapchain
 void marshal_VkSwapchainCreateInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSwapchainCreateInfoKHR* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkSwapchainCreateFlagsKHR*)&forMarshaling->flags, sizeof(VkSwapchainCreateFlagsKHR));
-    uint64_t cgen_var_188;
-    vkStream->handleMapping()->mapHandles_VkSurfaceKHR_u64(&forMarshaling->surface, &cgen_var_188, 1);
-    vkStream->write((uint64_t*)&cgen_var_188, 1 * 8);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkSurfaceKHR_u64(&forMarshaling->surface, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
     vkStream->write((uint32_t*)&forMarshaling->minImageCount, sizeof(uint32_t));
     vkStream->write((VkFormat*)&forMarshaling->imageFormat, sizeof(VkFormat));
     vkStream->write((VkColorSpaceKHR*)&forMarshaling->imageColorSpace, sizeof(VkColorSpaceKHR));
-    marshal_VkExtent2D(vkStream, (VkExtent2D*)(&forMarshaling->imageExtent));
+    marshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->imageExtent));
     vkStream->write((uint32_t*)&forMarshaling->imageArrayLayers, sizeof(uint32_t));
     vkStream->write((VkImageUsageFlags*)&forMarshaling->imageUsage, sizeof(VkImageUsageFlags));
     vkStream->write((VkSharingMode*)&forMarshaling->imageSharingMode, sizeof(VkSharingMode));
     vkStream->write((uint32_t*)&forMarshaling->queueFamilyIndexCount, sizeof(uint32_t));
     // WARNING PTR CHECK
-    uint64_t cgen_var_189 = (uint64_t)(uintptr_t)forMarshaling->pQueueFamilyIndices;
-    vkStream->putBe64(cgen_var_189);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)forMarshaling->pQueueFamilyIndices;
+    vkStream->putBe64(cgen_var_1);
     if (forMarshaling->pQueueFamilyIndices)
     {
         vkStream->write((const uint32_t*)forMarshaling->pQueueFamilyIndices, forMarshaling->queueFamilyIndexCount * sizeof(const uint32_t));
@@ -6903,32 +9473,31 @@
     vkStream->write((VkCompositeAlphaFlagBitsKHR*)&forMarshaling->compositeAlpha, sizeof(VkCompositeAlphaFlagBitsKHR));
     vkStream->write((VkPresentModeKHR*)&forMarshaling->presentMode, sizeof(VkPresentModeKHR));
     vkStream->write((VkBool32*)&forMarshaling->clipped, sizeof(VkBool32));
-    uint64_t cgen_var_190;
-    vkStream->handleMapping()->mapHandles_VkSwapchainKHR_u64(&forMarshaling->oldSwapchain, &cgen_var_190, 1);
-    vkStream->write((uint64_t*)&cgen_var_190, 1 * 8);
+    uint64_t cgen_var_2;
+    vkStream->handleMapping()->mapHandles_VkSwapchainKHR_u64(&forMarshaling->oldSwapchain, &cgen_var_2, 1);
+    vkStream->write((uint64_t*)&cgen_var_2, 1 * 8);
 }
 
 void unmarshal_VkSwapchainCreateInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSwapchainCreateInfoKHR* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkSwapchainCreateFlagsKHR*)&forUnmarshaling->flags, sizeof(VkSwapchainCreateFlagsKHR));
-    uint64_t cgen_var_191;
-    vkStream->read((uint64_t*)&cgen_var_191, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkSurfaceKHR(&cgen_var_191, (VkSurfaceKHR*)&forUnmarshaling->surface, 1);
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkSurfaceKHR(&cgen_var_0, (VkSurfaceKHR*)&forUnmarshaling->surface, 1);
     vkStream->read((uint32_t*)&forUnmarshaling->minImageCount, sizeof(uint32_t));
     vkStream->read((VkFormat*)&forUnmarshaling->imageFormat, sizeof(VkFormat));
     vkStream->read((VkColorSpaceKHR*)&forUnmarshaling->imageColorSpace, sizeof(VkColorSpaceKHR));
-    unmarshal_VkExtent2D(vkStream, (VkExtent2D*)(&forUnmarshaling->imageExtent));
+    unmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forUnmarshaling->imageExtent));
     vkStream->read((uint32_t*)&forUnmarshaling->imageArrayLayers, sizeof(uint32_t));
     vkStream->read((VkImageUsageFlags*)&forUnmarshaling->imageUsage, sizeof(VkImageUsageFlags));
     vkStream->read((VkSharingMode*)&forUnmarshaling->imageSharingMode, sizeof(VkSharingMode));
@@ -6948,43 +9517,43 @@
     vkStream->read((VkCompositeAlphaFlagBitsKHR*)&forUnmarshaling->compositeAlpha, sizeof(VkCompositeAlphaFlagBitsKHR));
     vkStream->read((VkPresentModeKHR*)&forUnmarshaling->presentMode, sizeof(VkPresentModeKHR));
     vkStream->read((VkBool32*)&forUnmarshaling->clipped, sizeof(VkBool32));
-    uint64_t cgen_var_193;
-    vkStream->read((uint64_t*)&cgen_var_193, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkSwapchainKHR(&cgen_var_193, (VkSwapchainKHR*)&forUnmarshaling->oldSwapchain, 1);
+    uint64_t cgen_var_2;
+    vkStream->read((uint64_t*)&cgen_var_2, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkSwapchainKHR(&cgen_var_2, (VkSwapchainKHR*)&forUnmarshaling->oldSwapchain, 1);
 }
 
 void marshal_VkPresentInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPresentInfoKHR* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((uint32_t*)&forMarshaling->waitSemaphoreCount, sizeof(uint32_t));
     if (forMarshaling->waitSemaphoreCount)
     {
-        uint64_t* cgen_var_194;
-        vkStream->alloc((void**)&cgen_var_194, forMarshaling->waitSemaphoreCount * 8);
-        vkStream->handleMapping()->mapHandles_VkSemaphore_u64(forMarshaling->pWaitSemaphores, cgen_var_194, forMarshaling->waitSemaphoreCount);
-        vkStream->write((uint64_t*)cgen_var_194, forMarshaling->waitSemaphoreCount * 8);
+        uint64_t* cgen_var_0;
+        vkStream->alloc((void**)&cgen_var_0, forMarshaling->waitSemaphoreCount * 8);
+        vkStream->handleMapping()->mapHandles_VkSemaphore_u64(forMarshaling->pWaitSemaphores, cgen_var_0, forMarshaling->waitSemaphoreCount);
+        vkStream->write((uint64_t*)cgen_var_0, forMarshaling->waitSemaphoreCount * 8);
     }
     vkStream->write((uint32_t*)&forMarshaling->swapchainCount, sizeof(uint32_t));
     if (forMarshaling->swapchainCount)
     {
-        uint64_t* cgen_var_195;
-        vkStream->alloc((void**)&cgen_var_195, forMarshaling->swapchainCount * 8);
-        vkStream->handleMapping()->mapHandles_VkSwapchainKHR_u64(forMarshaling->pSwapchains, cgen_var_195, forMarshaling->swapchainCount);
-        vkStream->write((uint64_t*)cgen_var_195, forMarshaling->swapchainCount * 8);
+        uint64_t* cgen_var_1;
+        vkStream->alloc((void**)&cgen_var_1, forMarshaling->swapchainCount * 8);
+        vkStream->handleMapping()->mapHandles_VkSwapchainKHR_u64(forMarshaling->pSwapchains, cgen_var_1, forMarshaling->swapchainCount);
+        vkStream->write((uint64_t*)cgen_var_1, forMarshaling->swapchainCount * 8);
     }
     vkStream->write((const uint32_t*)forMarshaling->pImageIndices, forMarshaling->swapchainCount * sizeof(const uint32_t));
     // WARNING PTR CHECK
-    uint64_t cgen_var_196 = (uint64_t)(uintptr_t)forMarshaling->pResults;
-    vkStream->putBe64(cgen_var_196);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)forMarshaling->pResults;
+    vkStream->putBe64(cgen_var_2);
     if (forMarshaling->pResults)
     {
         vkStream->write((VkResult*)forMarshaling->pResults, forMarshaling->swapchainCount * sizeof(VkResult));
@@ -6993,32 +9562,31 @@
 
 void unmarshal_VkPresentInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPresentInfoKHR* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((uint32_t*)&forUnmarshaling->waitSemaphoreCount, sizeof(uint32_t));
     if (forUnmarshaling->waitSemaphoreCount)
     {
-        uint64_t* cgen_var_197;
-        vkStream->alloc((void**)&cgen_var_197, forUnmarshaling->waitSemaphoreCount * 8);
-        vkStream->read((uint64_t*)cgen_var_197, forUnmarshaling->waitSemaphoreCount * 8);
-        vkStream->handleMapping()->mapHandles_u64_VkSemaphore(cgen_var_197, (VkSemaphore*)forUnmarshaling->pWaitSemaphores, forUnmarshaling->waitSemaphoreCount);
+        uint64_t* cgen_var_0;
+        vkStream->alloc((void**)&cgen_var_0, forUnmarshaling->waitSemaphoreCount * 8);
+        vkStream->read((uint64_t*)cgen_var_0, forUnmarshaling->waitSemaphoreCount * 8);
+        vkStream->handleMapping()->mapHandles_u64_VkSemaphore(cgen_var_0, (VkSemaphore*)forUnmarshaling->pWaitSemaphores, forUnmarshaling->waitSemaphoreCount);
     }
     vkStream->read((uint32_t*)&forUnmarshaling->swapchainCount, sizeof(uint32_t));
     if (forUnmarshaling->swapchainCount)
     {
-        uint64_t* cgen_var_198;
-        vkStream->alloc((void**)&cgen_var_198, forUnmarshaling->swapchainCount * 8);
-        vkStream->read((uint64_t*)cgen_var_198, forUnmarshaling->swapchainCount * 8);
-        vkStream->handleMapping()->mapHandles_u64_VkSwapchainKHR(cgen_var_198, (VkSwapchainKHR*)forUnmarshaling->pSwapchains, forUnmarshaling->swapchainCount);
+        uint64_t* cgen_var_1;
+        vkStream->alloc((void**)&cgen_var_1, forUnmarshaling->swapchainCount * 8);
+        vkStream->read((uint64_t*)cgen_var_1, forUnmarshaling->swapchainCount * 8);
+        vkStream->handleMapping()->mapHandles_u64_VkSwapchainKHR(cgen_var_1, (VkSwapchainKHR*)forUnmarshaling->pSwapchains, forUnmarshaling->swapchainCount);
     }
     vkStream->read((uint32_t*)forUnmarshaling->pImageIndices, forUnmarshaling->swapchainCount * sizeof(const uint32_t));
     // WARNING PTR CHECK
@@ -7036,172 +9604,168 @@
 
 void marshal_VkImageSwapchainCreateInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkImageSwapchainCreateInfoKHR* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    uint64_t cgen_var_200;
-    vkStream->handleMapping()->mapHandles_VkSwapchainKHR_u64(&forMarshaling->swapchain, &cgen_var_200, 1);
-    vkStream->write((uint64_t*)&cgen_var_200, 1 * 8);
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkSwapchainKHR_u64(&forMarshaling->swapchain, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
 }
 
 void unmarshal_VkImageSwapchainCreateInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkImageSwapchainCreateInfoKHR* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    uint64_t cgen_var_201;
-    vkStream->read((uint64_t*)&cgen_var_201, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkSwapchainKHR(&cgen_var_201, (VkSwapchainKHR*)&forUnmarshaling->swapchain, 1);
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkSwapchainKHR(&cgen_var_0, (VkSwapchainKHR*)&forUnmarshaling->swapchain, 1);
 }
 
 void marshal_VkBindImageMemorySwapchainInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkBindImageMemorySwapchainInfoKHR* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    uint64_t cgen_var_202;
-    vkStream->handleMapping()->mapHandles_VkSwapchainKHR_u64(&forMarshaling->swapchain, &cgen_var_202, 1);
-    vkStream->write((uint64_t*)&cgen_var_202, 1 * 8);
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkSwapchainKHR_u64(&forMarshaling->swapchain, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
     vkStream->write((uint32_t*)&forMarshaling->imageIndex, sizeof(uint32_t));
 }
 
 void unmarshal_VkBindImageMemorySwapchainInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkBindImageMemorySwapchainInfoKHR* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    uint64_t cgen_var_203;
-    vkStream->read((uint64_t*)&cgen_var_203, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkSwapchainKHR(&cgen_var_203, (VkSwapchainKHR*)&forUnmarshaling->swapchain, 1);
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkSwapchainKHR(&cgen_var_0, (VkSwapchainKHR*)&forUnmarshaling->swapchain, 1);
     vkStream->read((uint32_t*)&forUnmarshaling->imageIndex, sizeof(uint32_t));
 }
 
 void marshal_VkAcquireNextImageInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkAcquireNextImageInfoKHR* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    uint64_t cgen_var_204;
-    vkStream->handleMapping()->mapHandles_VkSwapchainKHR_u64(&forMarshaling->swapchain, &cgen_var_204, 1);
-    vkStream->write((uint64_t*)&cgen_var_204, 1 * 8);
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkSwapchainKHR_u64(&forMarshaling->swapchain, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
     vkStream->write((uint64_t*)&forMarshaling->timeout, sizeof(uint64_t));
-    uint64_t cgen_var_205;
-    vkStream->handleMapping()->mapHandles_VkSemaphore_u64(&forMarshaling->semaphore, &cgen_var_205, 1);
-    vkStream->write((uint64_t*)&cgen_var_205, 1 * 8);
-    uint64_t cgen_var_206;
-    vkStream->handleMapping()->mapHandles_VkFence_u64(&forMarshaling->fence, &cgen_var_206, 1);
-    vkStream->write((uint64_t*)&cgen_var_206, 1 * 8);
+    uint64_t cgen_var_1;
+    vkStream->handleMapping()->mapHandles_VkSemaphore_u64(&forMarshaling->semaphore, &cgen_var_1, 1);
+    vkStream->write((uint64_t*)&cgen_var_1, 1 * 8);
+    uint64_t cgen_var_2;
+    vkStream->handleMapping()->mapHandles_VkFence_u64(&forMarshaling->fence, &cgen_var_2, 1);
+    vkStream->write((uint64_t*)&cgen_var_2, 1 * 8);
     vkStream->write((uint32_t*)&forMarshaling->deviceMask, sizeof(uint32_t));
 }
 
 void unmarshal_VkAcquireNextImageInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkAcquireNextImageInfoKHR* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    uint64_t cgen_var_207;
-    vkStream->read((uint64_t*)&cgen_var_207, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkSwapchainKHR(&cgen_var_207, (VkSwapchainKHR*)&forUnmarshaling->swapchain, 1);
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkSwapchainKHR(&cgen_var_0, (VkSwapchainKHR*)&forUnmarshaling->swapchain, 1);
     vkStream->read((uint64_t*)&forUnmarshaling->timeout, sizeof(uint64_t));
-    uint64_t cgen_var_208;
-    vkStream->read((uint64_t*)&cgen_var_208, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkSemaphore(&cgen_var_208, (VkSemaphore*)&forUnmarshaling->semaphore, 1);
-    uint64_t cgen_var_209;
-    vkStream->read((uint64_t*)&cgen_var_209, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkFence(&cgen_var_209, (VkFence*)&forUnmarshaling->fence, 1);
+    uint64_t cgen_var_1;
+    vkStream->read((uint64_t*)&cgen_var_1, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkSemaphore(&cgen_var_1, (VkSemaphore*)&forUnmarshaling->semaphore, 1);
+    uint64_t cgen_var_2;
+    vkStream->read((uint64_t*)&cgen_var_2, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkFence(&cgen_var_2, (VkFence*)&forUnmarshaling->fence, 1);
     vkStream->read((uint32_t*)&forUnmarshaling->deviceMask, sizeof(uint32_t));
 }
 
 void marshal_VkDeviceGroupPresentCapabilitiesKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDeviceGroupPresentCapabilitiesKHR* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((uint32_t*)forMarshaling->presentMask, VK_MAX_DEVICE_GROUP_SIZE * sizeof(uint32_t));
     vkStream->write((VkDeviceGroupPresentModeFlagsKHR*)&forMarshaling->modes, sizeof(VkDeviceGroupPresentModeFlagsKHR));
 }
 
 void unmarshal_VkDeviceGroupPresentCapabilitiesKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDeviceGroupPresentCapabilitiesKHR* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((uint32_t*)forUnmarshaling->presentMask, VK_MAX_DEVICE_GROUP_SIZE * sizeof(uint32_t));
     vkStream->read((VkDeviceGroupPresentModeFlagsKHR*)&forUnmarshaling->modes, sizeof(VkDeviceGroupPresentModeFlagsKHR));
 }
 
 void marshal_VkDeviceGroupPresentInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDeviceGroupPresentInfoKHR* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((uint32_t*)&forMarshaling->swapchainCount, sizeof(uint32_t));
     vkStream->write((const uint32_t*)forMarshaling->pDeviceMasks, forMarshaling->swapchainCount * sizeof(const uint32_t));
     vkStream->write((VkDeviceGroupPresentModeFlagBitsKHR*)&forMarshaling->mode, sizeof(VkDeviceGroupPresentModeFlagBitsKHR));
@@ -7209,17 +9773,16 @@
 
 void unmarshal_VkDeviceGroupPresentInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDeviceGroupPresentInfoKHR* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((uint32_t*)&forUnmarshaling->swapchainCount, sizeof(uint32_t));
     vkStream->read((uint32_t*)forUnmarshaling->pDeviceMasks, forUnmarshaling->swapchainCount * sizeof(const uint32_t));
     vkStream->read((VkDeviceGroupPresentModeFlagBitsKHR*)&forUnmarshaling->mode, sizeof(VkDeviceGroupPresentModeFlagBitsKHR));
@@ -7227,47 +9790,182 @@
 
 void marshal_VkDeviceGroupSwapchainCreateInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDeviceGroupSwapchainCreateInfoKHR* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkDeviceGroupPresentModeFlagsKHR*)&forMarshaling->modes, sizeof(VkDeviceGroupPresentModeFlagsKHR));
 }
 
 void unmarshal_VkDeviceGroupSwapchainCreateInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDeviceGroupSwapchainCreateInfoKHR* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkDeviceGroupPresentModeFlagsKHR*)&forUnmarshaling->modes, sizeof(VkDeviceGroupPresentModeFlagsKHR));
 }
 
 #endif
 #ifdef VK_KHR_display
+void marshal_VkDisplayModeParametersKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDisplayModeParametersKHR* forMarshaling)
+{
+    (void)rootType;
+    marshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->visibleRegion));
+    vkStream->write((uint32_t*)&forMarshaling->refreshRate, sizeof(uint32_t));
+}
+
+void unmarshal_VkDisplayModeParametersKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDisplayModeParametersKHR* forUnmarshaling)
+{
+    (void)rootType;
+    unmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forUnmarshaling->visibleRegion));
+    vkStream->read((uint32_t*)&forUnmarshaling->refreshRate, sizeof(uint32_t));
+}
+
+void marshal_VkDisplayModeCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDisplayModeCreateInfoKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkDisplayModeCreateFlagsKHR*)&forMarshaling->flags, sizeof(VkDisplayModeCreateFlagsKHR));
+    marshal_VkDisplayModeParametersKHR(vkStream, rootType, (VkDisplayModeParametersKHR*)(&forMarshaling->parameters));
+}
+
+void unmarshal_VkDisplayModeCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDisplayModeCreateInfoKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkDisplayModeCreateFlagsKHR*)&forUnmarshaling->flags, sizeof(VkDisplayModeCreateFlagsKHR));
+    unmarshal_VkDisplayModeParametersKHR(vkStream, rootType, (VkDisplayModeParametersKHR*)(&forUnmarshaling->parameters));
+}
+
+void marshal_VkDisplayModePropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDisplayModePropertiesKHR* forMarshaling)
+{
+    (void)rootType;
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkDisplayModeKHR_u64(&forMarshaling->displayMode, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
+    marshal_VkDisplayModeParametersKHR(vkStream, rootType, (VkDisplayModeParametersKHR*)(&forMarshaling->parameters));
+}
+
+void unmarshal_VkDisplayModePropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDisplayModePropertiesKHR* forUnmarshaling)
+{
+    (void)rootType;
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkDisplayModeKHR(&cgen_var_0, (VkDisplayModeKHR*)&forUnmarshaling->displayMode, 1);
+    unmarshal_VkDisplayModeParametersKHR(vkStream, rootType, (VkDisplayModeParametersKHR*)(&forUnmarshaling->parameters));
+}
+
+void marshal_VkDisplayPlaneCapabilitiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDisplayPlaneCapabilitiesKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkDisplayPlaneAlphaFlagsKHR*)&forMarshaling->supportedAlpha, sizeof(VkDisplayPlaneAlphaFlagsKHR));
+    marshal_VkOffset2D(vkStream, rootType, (VkOffset2D*)(&forMarshaling->minSrcPosition));
+    marshal_VkOffset2D(vkStream, rootType, (VkOffset2D*)(&forMarshaling->maxSrcPosition));
+    marshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->minSrcExtent));
+    marshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->maxSrcExtent));
+    marshal_VkOffset2D(vkStream, rootType, (VkOffset2D*)(&forMarshaling->minDstPosition));
+    marshal_VkOffset2D(vkStream, rootType, (VkOffset2D*)(&forMarshaling->maxDstPosition));
+    marshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->minDstExtent));
+    marshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->maxDstExtent));
+}
+
+void unmarshal_VkDisplayPlaneCapabilitiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDisplayPlaneCapabilitiesKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkDisplayPlaneAlphaFlagsKHR*)&forUnmarshaling->supportedAlpha, sizeof(VkDisplayPlaneAlphaFlagsKHR));
+    unmarshal_VkOffset2D(vkStream, rootType, (VkOffset2D*)(&forUnmarshaling->minSrcPosition));
+    unmarshal_VkOffset2D(vkStream, rootType, (VkOffset2D*)(&forUnmarshaling->maxSrcPosition));
+    unmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forUnmarshaling->minSrcExtent));
+    unmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forUnmarshaling->maxSrcExtent));
+    unmarshal_VkOffset2D(vkStream, rootType, (VkOffset2D*)(&forUnmarshaling->minDstPosition));
+    unmarshal_VkOffset2D(vkStream, rootType, (VkOffset2D*)(&forUnmarshaling->maxDstPosition));
+    unmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forUnmarshaling->minDstExtent));
+    unmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forUnmarshaling->maxDstExtent));
+}
+
+void marshal_VkDisplayPlanePropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDisplayPlanePropertiesKHR* forMarshaling)
+{
+    (void)rootType;
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkDisplayKHR_u64(&forMarshaling->currentDisplay, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->write((uint32_t*)&forMarshaling->currentStackIndex, sizeof(uint32_t));
+}
+
+void unmarshal_VkDisplayPlanePropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDisplayPlanePropertiesKHR* forUnmarshaling)
+{
+    (void)rootType;
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkDisplayKHR(&cgen_var_0, (VkDisplayKHR*)&forUnmarshaling->currentDisplay, 1);
+    vkStream->read((uint32_t*)&forUnmarshaling->currentStackIndex, sizeof(uint32_t));
+}
+
 void marshal_VkDisplayPropertiesKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDisplayPropertiesKHR* forMarshaling)
 {
-    uint64_t cgen_var_210;
-    vkStream->handleMapping()->mapHandles_VkDisplayKHR_u64(&forMarshaling->display, &cgen_var_210, 1);
-    vkStream->write((uint64_t*)&cgen_var_210, 1 * 8);
+    (void)rootType;
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkDisplayKHR_u64(&forMarshaling->display, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
     vkStream->putString(forMarshaling->displayName);
-    marshal_VkExtent2D(vkStream, (VkExtent2D*)(&forMarshaling->physicalDimensions));
-    marshal_VkExtent2D(vkStream, (VkExtent2D*)(&forMarshaling->physicalResolution));
+    marshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->physicalDimensions));
+    marshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->physicalResolution));
     vkStream->write((VkSurfaceTransformFlagsKHR*)&forMarshaling->supportedTransforms, sizeof(VkSurfaceTransformFlagsKHR));
     vkStream->write((VkBool32*)&forMarshaling->planeReorderPossible, sizeof(VkBool32));
     vkStream->write((VkBool32*)&forMarshaling->persistentContent, sizeof(VkBool32));
@@ -7275,221 +9973,102 @@
 
 void unmarshal_VkDisplayPropertiesKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDisplayPropertiesKHR* forUnmarshaling)
 {
-    uint64_t cgen_var_211;
-    vkStream->read((uint64_t*)&cgen_var_211, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkDisplayKHR(&cgen_var_211, (VkDisplayKHR*)&forUnmarshaling->display, 1);
+    (void)rootType;
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkDisplayKHR(&cgen_var_0, (VkDisplayKHR*)&forUnmarshaling->display, 1);
     vkStream->loadStringInPlace((char**)&forUnmarshaling->displayName);
-    unmarshal_VkExtent2D(vkStream, (VkExtent2D*)(&forUnmarshaling->physicalDimensions));
-    unmarshal_VkExtent2D(vkStream, (VkExtent2D*)(&forUnmarshaling->physicalResolution));
+    unmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forUnmarshaling->physicalDimensions));
+    unmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forUnmarshaling->physicalResolution));
     vkStream->read((VkSurfaceTransformFlagsKHR*)&forUnmarshaling->supportedTransforms, sizeof(VkSurfaceTransformFlagsKHR));
     vkStream->read((VkBool32*)&forUnmarshaling->planeReorderPossible, sizeof(VkBool32));
     vkStream->read((VkBool32*)&forUnmarshaling->persistentContent, sizeof(VkBool32));
 }
 
-void marshal_VkDisplayModeParametersKHR(
-    VulkanStreamGuest* vkStream,
-    const VkDisplayModeParametersKHR* forMarshaling)
-{
-    marshal_VkExtent2D(vkStream, (VkExtent2D*)(&forMarshaling->visibleRegion));
-    vkStream->write((uint32_t*)&forMarshaling->refreshRate, sizeof(uint32_t));
-}
-
-void unmarshal_VkDisplayModeParametersKHR(
-    VulkanStreamGuest* vkStream,
-    VkDisplayModeParametersKHR* forUnmarshaling)
-{
-    unmarshal_VkExtent2D(vkStream, (VkExtent2D*)(&forUnmarshaling->visibleRegion));
-    vkStream->read((uint32_t*)&forUnmarshaling->refreshRate, sizeof(uint32_t));
-}
-
-void marshal_VkDisplayModePropertiesKHR(
-    VulkanStreamGuest* vkStream,
-    const VkDisplayModePropertiesKHR* forMarshaling)
-{
-    uint64_t cgen_var_212;
-    vkStream->handleMapping()->mapHandles_VkDisplayModeKHR_u64(&forMarshaling->displayMode, &cgen_var_212, 1);
-    vkStream->write((uint64_t*)&cgen_var_212, 1 * 8);
-    marshal_VkDisplayModeParametersKHR(vkStream, (VkDisplayModeParametersKHR*)(&forMarshaling->parameters));
-}
-
-void unmarshal_VkDisplayModePropertiesKHR(
-    VulkanStreamGuest* vkStream,
-    VkDisplayModePropertiesKHR* forUnmarshaling)
-{
-    uint64_t cgen_var_213;
-    vkStream->read((uint64_t*)&cgen_var_213, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkDisplayModeKHR(&cgen_var_213, (VkDisplayModeKHR*)&forUnmarshaling->displayMode, 1);
-    unmarshal_VkDisplayModeParametersKHR(vkStream, (VkDisplayModeParametersKHR*)(&forUnmarshaling->parameters));
-}
-
-void marshal_VkDisplayModeCreateInfoKHR(
-    VulkanStreamGuest* vkStream,
-    const VkDisplayModeCreateInfoKHR* forMarshaling)
-{
-    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
-    {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
-    }
-    vkStream->write((VkDisplayModeCreateFlagsKHR*)&forMarshaling->flags, sizeof(VkDisplayModeCreateFlagsKHR));
-    marshal_VkDisplayModeParametersKHR(vkStream, (VkDisplayModeParametersKHR*)(&forMarshaling->parameters));
-}
-
-void unmarshal_VkDisplayModeCreateInfoKHR(
-    VulkanStreamGuest* vkStream,
-    VkDisplayModeCreateInfoKHR* forUnmarshaling)
-{
-    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
-    {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
-    }
-    vkStream->read((VkDisplayModeCreateFlagsKHR*)&forUnmarshaling->flags, sizeof(VkDisplayModeCreateFlagsKHR));
-    unmarshal_VkDisplayModeParametersKHR(vkStream, (VkDisplayModeParametersKHR*)(&forUnmarshaling->parameters));
-}
-
-void marshal_VkDisplayPlaneCapabilitiesKHR(
-    VulkanStreamGuest* vkStream,
-    const VkDisplayPlaneCapabilitiesKHR* forMarshaling)
-{
-    vkStream->write((VkDisplayPlaneAlphaFlagsKHR*)&forMarshaling->supportedAlpha, sizeof(VkDisplayPlaneAlphaFlagsKHR));
-    marshal_VkOffset2D(vkStream, (VkOffset2D*)(&forMarshaling->minSrcPosition));
-    marshal_VkOffset2D(vkStream, (VkOffset2D*)(&forMarshaling->maxSrcPosition));
-    marshal_VkExtent2D(vkStream, (VkExtent2D*)(&forMarshaling->minSrcExtent));
-    marshal_VkExtent2D(vkStream, (VkExtent2D*)(&forMarshaling->maxSrcExtent));
-    marshal_VkOffset2D(vkStream, (VkOffset2D*)(&forMarshaling->minDstPosition));
-    marshal_VkOffset2D(vkStream, (VkOffset2D*)(&forMarshaling->maxDstPosition));
-    marshal_VkExtent2D(vkStream, (VkExtent2D*)(&forMarshaling->minDstExtent));
-    marshal_VkExtent2D(vkStream, (VkExtent2D*)(&forMarshaling->maxDstExtent));
-}
-
-void unmarshal_VkDisplayPlaneCapabilitiesKHR(
-    VulkanStreamGuest* vkStream,
-    VkDisplayPlaneCapabilitiesKHR* forUnmarshaling)
-{
-    vkStream->read((VkDisplayPlaneAlphaFlagsKHR*)&forUnmarshaling->supportedAlpha, sizeof(VkDisplayPlaneAlphaFlagsKHR));
-    unmarshal_VkOffset2D(vkStream, (VkOffset2D*)(&forUnmarshaling->minSrcPosition));
-    unmarshal_VkOffset2D(vkStream, (VkOffset2D*)(&forUnmarshaling->maxSrcPosition));
-    unmarshal_VkExtent2D(vkStream, (VkExtent2D*)(&forUnmarshaling->minSrcExtent));
-    unmarshal_VkExtent2D(vkStream, (VkExtent2D*)(&forUnmarshaling->maxSrcExtent));
-    unmarshal_VkOffset2D(vkStream, (VkOffset2D*)(&forUnmarshaling->minDstPosition));
-    unmarshal_VkOffset2D(vkStream, (VkOffset2D*)(&forUnmarshaling->maxDstPosition));
-    unmarshal_VkExtent2D(vkStream, (VkExtent2D*)(&forUnmarshaling->minDstExtent));
-    unmarshal_VkExtent2D(vkStream, (VkExtent2D*)(&forUnmarshaling->maxDstExtent));
-}
-
-void marshal_VkDisplayPlanePropertiesKHR(
-    VulkanStreamGuest* vkStream,
-    const VkDisplayPlanePropertiesKHR* forMarshaling)
-{
-    uint64_t cgen_var_214;
-    vkStream->handleMapping()->mapHandles_VkDisplayKHR_u64(&forMarshaling->currentDisplay, &cgen_var_214, 1);
-    vkStream->write((uint64_t*)&cgen_var_214, 1 * 8);
-    vkStream->write((uint32_t*)&forMarshaling->currentStackIndex, sizeof(uint32_t));
-}
-
-void unmarshal_VkDisplayPlanePropertiesKHR(
-    VulkanStreamGuest* vkStream,
-    VkDisplayPlanePropertiesKHR* forUnmarshaling)
-{
-    uint64_t cgen_var_215;
-    vkStream->read((uint64_t*)&cgen_var_215, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkDisplayKHR(&cgen_var_215, (VkDisplayKHR*)&forUnmarshaling->currentDisplay, 1);
-    vkStream->read((uint32_t*)&forUnmarshaling->currentStackIndex, sizeof(uint32_t));
-}
-
 void marshal_VkDisplaySurfaceCreateInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDisplaySurfaceCreateInfoKHR* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkDisplaySurfaceCreateFlagsKHR*)&forMarshaling->flags, sizeof(VkDisplaySurfaceCreateFlagsKHR));
-    uint64_t cgen_var_216;
-    vkStream->handleMapping()->mapHandles_VkDisplayModeKHR_u64(&forMarshaling->displayMode, &cgen_var_216, 1);
-    vkStream->write((uint64_t*)&cgen_var_216, 1 * 8);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkDisplayModeKHR_u64(&forMarshaling->displayMode, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
     vkStream->write((uint32_t*)&forMarshaling->planeIndex, sizeof(uint32_t));
     vkStream->write((uint32_t*)&forMarshaling->planeStackIndex, sizeof(uint32_t));
     vkStream->write((VkSurfaceTransformFlagBitsKHR*)&forMarshaling->transform, sizeof(VkSurfaceTransformFlagBitsKHR));
     vkStream->write((float*)&forMarshaling->globalAlpha, sizeof(float));
     vkStream->write((VkDisplayPlaneAlphaFlagBitsKHR*)&forMarshaling->alphaMode, sizeof(VkDisplayPlaneAlphaFlagBitsKHR));
-    marshal_VkExtent2D(vkStream, (VkExtent2D*)(&forMarshaling->imageExtent));
+    marshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->imageExtent));
 }
 
 void unmarshal_VkDisplaySurfaceCreateInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDisplaySurfaceCreateInfoKHR* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkDisplaySurfaceCreateFlagsKHR*)&forUnmarshaling->flags, sizeof(VkDisplaySurfaceCreateFlagsKHR));
-    uint64_t cgen_var_217;
-    vkStream->read((uint64_t*)&cgen_var_217, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkDisplayModeKHR(&cgen_var_217, (VkDisplayModeKHR*)&forUnmarshaling->displayMode, 1);
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkDisplayModeKHR(&cgen_var_0, (VkDisplayModeKHR*)&forUnmarshaling->displayMode, 1);
     vkStream->read((uint32_t*)&forUnmarshaling->planeIndex, sizeof(uint32_t));
     vkStream->read((uint32_t*)&forUnmarshaling->planeStackIndex, sizeof(uint32_t));
     vkStream->read((VkSurfaceTransformFlagBitsKHR*)&forUnmarshaling->transform, sizeof(VkSurfaceTransformFlagBitsKHR));
     vkStream->read((float*)&forUnmarshaling->globalAlpha, sizeof(float));
     vkStream->read((VkDisplayPlaneAlphaFlagBitsKHR*)&forUnmarshaling->alphaMode, sizeof(VkDisplayPlaneAlphaFlagBitsKHR));
-    unmarshal_VkExtent2D(vkStream, (VkExtent2D*)(&forUnmarshaling->imageExtent));
+    unmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forUnmarshaling->imageExtent));
 }
 
 #endif
 #ifdef VK_KHR_display_swapchain
 void marshal_VkDisplayPresentInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDisplayPresentInfoKHR* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    marshal_VkRect2D(vkStream, (VkRect2D*)(&forMarshaling->srcRect));
-    marshal_VkRect2D(vkStream, (VkRect2D*)(&forMarshaling->dstRect));
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    marshal_VkRect2D(vkStream, rootType, (VkRect2D*)(&forMarshaling->srcRect));
+    marshal_VkRect2D(vkStream, rootType, (VkRect2D*)(&forMarshaling->dstRect));
     vkStream->write((VkBool32*)&forMarshaling->persistent, sizeof(VkBool32));
 }
 
 void unmarshal_VkDisplayPresentInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDisplayPresentInfoKHR* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    unmarshal_VkRect2D(vkStream, (VkRect2D*)(&forUnmarshaling->srcRect));
-    unmarshal_VkRect2D(vkStream, (VkRect2D*)(&forUnmarshaling->dstRect));
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    unmarshal_VkRect2D(vkStream, rootType, (VkRect2D*)(&forUnmarshaling->srcRect));
+    unmarshal_VkRect2D(vkStream, rootType, (VkRect2D*)(&forUnmarshaling->dstRect));
     vkStream->read((VkBool32*)&forUnmarshaling->persistent, sizeof(VkBool32));
 }
 
@@ -7497,20 +10076,20 @@
 #ifdef VK_KHR_xlib_surface
 void marshal_VkXlibSurfaceCreateInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkXlibSurfaceCreateInfoKHR* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkXlibSurfaceCreateFlagsKHR*)&forMarshaling->flags, sizeof(VkXlibSurfaceCreateFlagsKHR));
     // WARNING PTR CHECK
-    uint64_t cgen_var_218 = (uint64_t)(uintptr_t)forMarshaling->dpy;
-    vkStream->putBe64(cgen_var_218);
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->dpy;
+    vkStream->putBe64(cgen_var_0);
     if (forMarshaling->dpy)
     {
         vkStream->write((Display*)forMarshaling->dpy, sizeof(Display));
@@ -7520,17 +10099,16 @@
 
 void unmarshal_VkXlibSurfaceCreateInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkXlibSurfaceCreateInfoKHR* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkXlibSurfaceCreateFlagsKHR*)&forUnmarshaling->flags, sizeof(VkXlibSurfaceCreateFlagsKHR));
     // WARNING PTR CHECK
     Display* check_dpy;
@@ -7550,20 +10128,20 @@
 #ifdef VK_KHR_xcb_surface
 void marshal_VkXcbSurfaceCreateInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkXcbSurfaceCreateInfoKHR* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkXcbSurfaceCreateFlagsKHR*)&forMarshaling->flags, sizeof(VkXcbSurfaceCreateFlagsKHR));
     // WARNING PTR CHECK
-    uint64_t cgen_var_220 = (uint64_t)(uintptr_t)forMarshaling->connection;
-    vkStream->putBe64(cgen_var_220);
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->connection;
+    vkStream->putBe64(cgen_var_0);
     if (forMarshaling->connection)
     {
         vkStream->write((xcb_connection_t*)forMarshaling->connection, sizeof(xcb_connection_t));
@@ -7573,17 +10151,16 @@
 
 void unmarshal_VkXcbSurfaceCreateInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkXcbSurfaceCreateInfoKHR* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkXcbSurfaceCreateFlagsKHR*)&forUnmarshaling->flags, sizeof(VkXcbSurfaceCreateFlagsKHR));
     // WARNING PTR CHECK
     xcb_connection_t* check_connection;
@@ -7603,27 +10180,27 @@
 #ifdef VK_KHR_wayland_surface
 void marshal_VkWaylandSurfaceCreateInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkWaylandSurfaceCreateInfoKHR* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkWaylandSurfaceCreateFlagsKHR*)&forMarshaling->flags, sizeof(VkWaylandSurfaceCreateFlagsKHR));
     // WARNING PTR CHECK
-    uint64_t cgen_var_222 = (uint64_t)(uintptr_t)forMarshaling->display;
-    vkStream->putBe64(cgen_var_222);
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->display;
+    vkStream->putBe64(cgen_var_0);
     if (forMarshaling->display)
     {
         vkStream->write((wl_display*)forMarshaling->display, sizeof(wl_display));
     }
     // WARNING PTR CHECK
-    uint64_t cgen_var_223 = (uint64_t)(uintptr_t)forMarshaling->surface;
-    vkStream->putBe64(cgen_var_223);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)forMarshaling->surface;
+    vkStream->putBe64(cgen_var_1);
     if (forMarshaling->surface)
     {
         vkStream->write((wl_surface*)forMarshaling->surface, sizeof(wl_surface));
@@ -7632,17 +10209,16 @@
 
 void unmarshal_VkWaylandSurfaceCreateInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkWaylandSurfaceCreateInfoKHR* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkWaylandSurfaceCreateFlagsKHR*)&forUnmarshaling->flags, sizeof(VkWaylandSurfaceCreateFlagsKHR));
     // WARNING PTR CHECK
     wl_display* check_display;
@@ -7669,92 +10245,23 @@
 }
 
 #endif
-#ifdef VK_KHR_mir_surface
-void marshal_VkMirSurfaceCreateInfoKHR(
-    VulkanStreamGuest* vkStream,
-    const VkMirSurfaceCreateInfoKHR* forMarshaling)
-{
-    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
-    {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
-    }
-    vkStream->write((VkMirSurfaceCreateFlagsKHR*)&forMarshaling->flags, sizeof(VkMirSurfaceCreateFlagsKHR));
-    // WARNING PTR CHECK
-    uint64_t cgen_var_226 = (uint64_t)(uintptr_t)forMarshaling->connection;
-    vkStream->putBe64(cgen_var_226);
-    if (forMarshaling->connection)
-    {
-        vkStream->write((MirConnection*)forMarshaling->connection, sizeof(MirConnection));
-    }
-    // WARNING PTR CHECK
-    uint64_t cgen_var_227 = (uint64_t)(uintptr_t)forMarshaling->mirSurface;
-    vkStream->putBe64(cgen_var_227);
-    if (forMarshaling->mirSurface)
-    {
-        vkStream->write((MirSurface*)forMarshaling->mirSurface, sizeof(MirSurface));
-    }
-}
-
-void unmarshal_VkMirSurfaceCreateInfoKHR(
-    VulkanStreamGuest* vkStream,
-    VkMirSurfaceCreateInfoKHR* forUnmarshaling)
-{
-    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
-    {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
-    }
-    vkStream->read((VkMirSurfaceCreateFlagsKHR*)&forUnmarshaling->flags, sizeof(VkMirSurfaceCreateFlagsKHR));
-    // WARNING PTR CHECK
-    MirConnection* check_connection;
-    check_connection = (MirConnection*)(uintptr_t)vkStream->getBe64();
-    if (forUnmarshaling->connection)
-    {
-        if (!(check_connection))
-        {
-            fprintf(stderr, "fatal: forUnmarshaling->connection inconsistent between guest and host\n");
-        }
-        vkStream->read((MirConnection*)forUnmarshaling->connection, sizeof(MirConnection));
-    }
-    // WARNING PTR CHECK
-    MirSurface* check_mirSurface;
-    check_mirSurface = (MirSurface*)(uintptr_t)vkStream->getBe64();
-    if (forUnmarshaling->mirSurface)
-    {
-        if (!(check_mirSurface))
-        {
-            fprintf(stderr, "fatal: forUnmarshaling->mirSurface inconsistent between guest and host\n");
-        }
-        vkStream->read((MirSurface*)forUnmarshaling->mirSurface, sizeof(MirSurface));
-    }
-}
-
-#endif
 #ifdef VK_KHR_android_surface
 void marshal_VkAndroidSurfaceCreateInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkAndroidSurfaceCreateInfoKHR* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkAndroidSurfaceCreateFlagsKHR*)&forMarshaling->flags, sizeof(VkAndroidSurfaceCreateFlagsKHR));
     // WARNING PTR CHECK
-    uint64_t cgen_var_230 = (uint64_t)(uintptr_t)forMarshaling->window;
-    vkStream->putBe64(cgen_var_230);
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->window;
+    vkStream->putBe64(cgen_var_0);
     if (forMarshaling->window)
     {
         vkStream->write((ANativeWindow*)forMarshaling->window, sizeof(ANativeWindow));
@@ -7763,17 +10270,16 @@
 
 void unmarshal_VkAndroidSurfaceCreateInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkAndroidSurfaceCreateInfoKHR* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkAndroidSurfaceCreateFlagsKHR*)&forUnmarshaling->flags, sizeof(VkAndroidSurfaceCreateFlagsKHR));
     // WARNING PTR CHECK
     ANativeWindow* check_window;
@@ -7792,16 +10298,16 @@
 #ifdef VK_KHR_win32_surface
 void marshal_VkWin32SurfaceCreateInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkWin32SurfaceCreateInfoKHR* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkWin32SurfaceCreateFlagsKHR*)&forMarshaling->flags, sizeof(VkWin32SurfaceCreateFlagsKHR));
     vkStream->write((HINSTANCE*)&forMarshaling->hinstance, sizeof(HINSTANCE));
     vkStream->write((HWND*)&forMarshaling->hwnd, sizeof(HWND));
@@ -7809,17 +10315,16 @@
 
 void unmarshal_VkWin32SurfaceCreateInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkWin32SurfaceCreateInfoKHR* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkWin32SurfaceCreateFlagsKHR*)&forUnmarshaling->flags, sizeof(VkWin32SurfaceCreateFlagsKHR));
     vkStream->read((HINSTANCE*)&forUnmarshaling->hinstance, sizeof(HINSTANCE));
     vkStream->read((HWND*)&forUnmarshaling->hwnd, sizeof(HWND));
@@ -7847,16 +10352,16 @@
 #ifdef VK_KHR_external_memory_win32
 void marshal_VkImportMemoryWin32HandleInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkImportMemoryWin32HandleInfoKHR* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkExternalMemoryHandleTypeFlagBits*)&forMarshaling->handleType, sizeof(VkExternalMemoryHandleTypeFlagBits));
     vkStream->write((HANDLE*)&forMarshaling->handle, sizeof(HANDLE));
     vkStream->write((LPCWSTR*)&forMarshaling->name, sizeof(LPCWSTR));
@@ -7864,17 +10369,16 @@
 
 void unmarshal_VkImportMemoryWin32HandleInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkImportMemoryWin32HandleInfoKHR* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkExternalMemoryHandleTypeFlagBits*)&forUnmarshaling->handleType, sizeof(VkExternalMemoryHandleTypeFlagBits));
     vkStream->read((HANDLE*)&forUnmarshaling->handle, sizeof(HANDLE));
     vkStream->read((LPCWSTR*)&forUnmarshaling->name, sizeof(LPCWSTR));
@@ -7882,19 +10386,19 @@
 
 void marshal_VkExportMemoryWin32HandleInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkExportMemoryWin32HandleInfoKHR* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     // WARNING PTR CHECK
-    uint64_t cgen_var_232 = (uint64_t)(uintptr_t)forMarshaling->pAttributes;
-    vkStream->putBe64(cgen_var_232);
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pAttributes;
+    vkStream->putBe64(cgen_var_0);
     if (forMarshaling->pAttributes)
     {
         vkStream->write((const SECURITY_ATTRIBUTES*)forMarshaling->pAttributes, sizeof(const SECURITY_ATTRIBUTES));
@@ -7905,17 +10409,16 @@
 
 void unmarshal_VkExportMemoryWin32HandleInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkExportMemoryWin32HandleInfoKHR* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     // WARNING PTR CHECK
     const SECURITY_ATTRIBUTES* check_pAttributes;
     check_pAttributes = (const SECURITY_ATTRIBUTES*)(uintptr_t)vkStream->getBe64();
@@ -7933,69 +10436,67 @@
 
 void marshal_VkMemoryWin32HandlePropertiesKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkMemoryWin32HandlePropertiesKHR* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((uint32_t*)&forMarshaling->memoryTypeBits, sizeof(uint32_t));
 }
 
 void unmarshal_VkMemoryWin32HandlePropertiesKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkMemoryWin32HandlePropertiesKHR* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((uint32_t*)&forUnmarshaling->memoryTypeBits, sizeof(uint32_t));
 }
 
 void marshal_VkMemoryGetWin32HandleInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkMemoryGetWin32HandleInfoKHR* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    uint64_t cgen_var_234;
-    vkStream->handleMapping()->mapHandles_VkDeviceMemory_u64(&forMarshaling->memory, &cgen_var_234, 1);
-    vkStream->write((uint64_t*)&cgen_var_234, 1 * 8);
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkDeviceMemory_u64(&forMarshaling->memory, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
     vkStream->write((VkExternalMemoryHandleTypeFlagBits*)&forMarshaling->handleType, sizeof(VkExternalMemoryHandleTypeFlagBits));
 }
 
 void unmarshal_VkMemoryGetWin32HandleInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkMemoryGetWin32HandleInfoKHR* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    uint64_t cgen_var_235;
-    vkStream->read((uint64_t*)&cgen_var_235, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkDeviceMemory(&cgen_var_235, (VkDeviceMemory*)&forUnmarshaling->memory, 1);
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkDeviceMemory(&cgen_var_0, (VkDeviceMemory*)&forUnmarshaling->memory, 1);
     vkStream->read((VkExternalMemoryHandleTypeFlagBits*)&forUnmarshaling->handleType, sizeof(VkExternalMemoryHandleTypeFlagBits));
 }
 
@@ -8003,102 +10504,99 @@
 #ifdef VK_KHR_external_memory_fd
 void marshal_VkImportMemoryFdInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkImportMemoryFdInfoKHR* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkExternalMemoryHandleTypeFlagBits*)&forMarshaling->handleType, sizeof(VkExternalMemoryHandleTypeFlagBits));
     vkStream->write((int*)&forMarshaling->fd, sizeof(int));
 }
 
 void unmarshal_VkImportMemoryFdInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkImportMemoryFdInfoKHR* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkExternalMemoryHandleTypeFlagBits*)&forUnmarshaling->handleType, sizeof(VkExternalMemoryHandleTypeFlagBits));
     vkStream->read((int*)&forUnmarshaling->fd, sizeof(int));
 }
 
 void marshal_VkMemoryFdPropertiesKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkMemoryFdPropertiesKHR* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((uint32_t*)&forMarshaling->memoryTypeBits, sizeof(uint32_t));
 }
 
 void unmarshal_VkMemoryFdPropertiesKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkMemoryFdPropertiesKHR* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((uint32_t*)&forUnmarshaling->memoryTypeBits, sizeof(uint32_t));
 }
 
 void marshal_VkMemoryGetFdInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkMemoryGetFdInfoKHR* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    uint64_t cgen_var_236;
-    vkStream->handleMapping()->mapHandles_VkDeviceMemory_u64(&forMarshaling->memory, &cgen_var_236, 1);
-    vkStream->write((uint64_t*)&cgen_var_236, 1 * 8);
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkDeviceMemory_u64(&forMarshaling->memory, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
     vkStream->write((VkExternalMemoryHandleTypeFlagBits*)&forMarshaling->handleType, sizeof(VkExternalMemoryHandleTypeFlagBits));
 }
 
 void unmarshal_VkMemoryGetFdInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkMemoryGetFdInfoKHR* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    uint64_t cgen_var_237;
-    vkStream->read((uint64_t*)&cgen_var_237, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkDeviceMemory(&cgen_var_237, (VkDeviceMemory*)&forUnmarshaling->memory, 1);
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkDeviceMemory(&cgen_var_0, (VkDeviceMemory*)&forUnmarshaling->memory, 1);
     vkStream->read((VkExternalMemoryHandleTypeFlagBits*)&forUnmarshaling->handleType, sizeof(VkExternalMemoryHandleTypeFlagBits));
 }
 
@@ -8106,67 +10604,66 @@
 #ifdef VK_KHR_win32_keyed_mutex
 void marshal_VkWin32KeyedMutexAcquireReleaseInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkWin32KeyedMutexAcquireReleaseInfoKHR* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((uint32_t*)&forMarshaling->acquireCount, sizeof(uint32_t));
     if (forMarshaling->acquireCount)
     {
-        uint64_t* cgen_var_238;
-        vkStream->alloc((void**)&cgen_var_238, forMarshaling->acquireCount * 8);
-        vkStream->handleMapping()->mapHandles_VkDeviceMemory_u64(forMarshaling->pAcquireSyncs, cgen_var_238, forMarshaling->acquireCount);
-        vkStream->write((uint64_t*)cgen_var_238, forMarshaling->acquireCount * 8);
+        uint64_t* cgen_var_0;
+        vkStream->alloc((void**)&cgen_var_0, forMarshaling->acquireCount * 8);
+        vkStream->handleMapping()->mapHandles_VkDeviceMemory_u64(forMarshaling->pAcquireSyncs, cgen_var_0, forMarshaling->acquireCount);
+        vkStream->write((uint64_t*)cgen_var_0, forMarshaling->acquireCount * 8);
     }
     vkStream->write((const uint64_t*)forMarshaling->pAcquireKeys, forMarshaling->acquireCount * sizeof(const uint64_t));
     vkStream->write((const uint32_t*)forMarshaling->pAcquireTimeouts, forMarshaling->acquireCount * sizeof(const uint32_t));
     vkStream->write((uint32_t*)&forMarshaling->releaseCount, sizeof(uint32_t));
     if (forMarshaling->releaseCount)
     {
-        uint64_t* cgen_var_239;
-        vkStream->alloc((void**)&cgen_var_239, forMarshaling->releaseCount * 8);
-        vkStream->handleMapping()->mapHandles_VkDeviceMemory_u64(forMarshaling->pReleaseSyncs, cgen_var_239, forMarshaling->releaseCount);
-        vkStream->write((uint64_t*)cgen_var_239, forMarshaling->releaseCount * 8);
+        uint64_t* cgen_var_1;
+        vkStream->alloc((void**)&cgen_var_1, forMarshaling->releaseCount * 8);
+        vkStream->handleMapping()->mapHandles_VkDeviceMemory_u64(forMarshaling->pReleaseSyncs, cgen_var_1, forMarshaling->releaseCount);
+        vkStream->write((uint64_t*)cgen_var_1, forMarshaling->releaseCount * 8);
     }
     vkStream->write((const uint64_t*)forMarshaling->pReleaseKeys, forMarshaling->releaseCount * sizeof(const uint64_t));
 }
 
 void unmarshal_VkWin32KeyedMutexAcquireReleaseInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkWin32KeyedMutexAcquireReleaseInfoKHR* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((uint32_t*)&forUnmarshaling->acquireCount, sizeof(uint32_t));
     if (forUnmarshaling->acquireCount)
     {
-        uint64_t* cgen_var_240;
-        vkStream->alloc((void**)&cgen_var_240, forUnmarshaling->acquireCount * 8);
-        vkStream->read((uint64_t*)cgen_var_240, forUnmarshaling->acquireCount * 8);
-        vkStream->handleMapping()->mapHandles_u64_VkDeviceMemory(cgen_var_240, (VkDeviceMemory*)forUnmarshaling->pAcquireSyncs, forUnmarshaling->acquireCount);
+        uint64_t* cgen_var_0;
+        vkStream->alloc((void**)&cgen_var_0, forUnmarshaling->acquireCount * 8);
+        vkStream->read((uint64_t*)cgen_var_0, forUnmarshaling->acquireCount * 8);
+        vkStream->handleMapping()->mapHandles_u64_VkDeviceMemory(cgen_var_0, (VkDeviceMemory*)forUnmarshaling->pAcquireSyncs, forUnmarshaling->acquireCount);
     }
     vkStream->read((uint64_t*)forUnmarshaling->pAcquireKeys, forUnmarshaling->acquireCount * sizeof(const uint64_t));
     vkStream->read((uint32_t*)forUnmarshaling->pAcquireTimeouts, forUnmarshaling->acquireCount * sizeof(const uint32_t));
     vkStream->read((uint32_t*)&forUnmarshaling->releaseCount, sizeof(uint32_t));
     if (forUnmarshaling->releaseCount)
     {
-        uint64_t* cgen_var_241;
-        vkStream->alloc((void**)&cgen_var_241, forUnmarshaling->releaseCount * 8);
-        vkStream->read((uint64_t*)cgen_var_241, forUnmarshaling->releaseCount * 8);
-        vkStream->handleMapping()->mapHandles_u64_VkDeviceMemory(cgen_var_241, (VkDeviceMemory*)forUnmarshaling->pReleaseSyncs, forUnmarshaling->releaseCount);
+        uint64_t* cgen_var_1;
+        vkStream->alloc((void**)&cgen_var_1, forUnmarshaling->releaseCount * 8);
+        vkStream->read((uint64_t*)cgen_var_1, forUnmarshaling->releaseCount * 8);
+        vkStream->handleMapping()->mapHandles_u64_VkDeviceMemory(cgen_var_1, (VkDeviceMemory*)forUnmarshaling->pReleaseSyncs, forUnmarshaling->releaseCount);
     }
     vkStream->read((uint64_t*)forUnmarshaling->pReleaseKeys, forUnmarshaling->releaseCount * sizeof(const uint64_t));
 }
@@ -8179,19 +10676,19 @@
 #ifdef VK_KHR_external_semaphore_win32
 void marshal_VkImportSemaphoreWin32HandleInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkImportSemaphoreWin32HandleInfoKHR* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    uint64_t cgen_var_242;
-    vkStream->handleMapping()->mapHandles_VkSemaphore_u64(&forMarshaling->semaphore, &cgen_var_242, 1);
-    vkStream->write((uint64_t*)&cgen_var_242, 1 * 8);
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkSemaphore_u64(&forMarshaling->semaphore, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
     vkStream->write((VkSemaphoreImportFlags*)&forMarshaling->flags, sizeof(VkSemaphoreImportFlags));
     vkStream->write((VkExternalSemaphoreHandleTypeFlagBits*)&forMarshaling->handleType, sizeof(VkExternalSemaphoreHandleTypeFlagBits));
     vkStream->write((HANDLE*)&forMarshaling->handle, sizeof(HANDLE));
@@ -8200,20 +10697,19 @@
 
 void unmarshal_VkImportSemaphoreWin32HandleInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkImportSemaphoreWin32HandleInfoKHR* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    uint64_t cgen_var_243;
-    vkStream->read((uint64_t*)&cgen_var_243, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkSemaphore(&cgen_var_243, (VkSemaphore*)&forUnmarshaling->semaphore, 1);
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkSemaphore(&cgen_var_0, (VkSemaphore*)&forUnmarshaling->semaphore, 1);
     vkStream->read((VkSemaphoreImportFlags*)&forUnmarshaling->flags, sizeof(VkSemaphoreImportFlags));
     vkStream->read((VkExternalSemaphoreHandleTypeFlagBits*)&forUnmarshaling->handleType, sizeof(VkExternalSemaphoreHandleTypeFlagBits));
     vkStream->read((HANDLE*)&forUnmarshaling->handle, sizeof(HANDLE));
@@ -8222,19 +10718,19 @@
 
 void marshal_VkExportSemaphoreWin32HandleInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkExportSemaphoreWin32HandleInfoKHR* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     // WARNING PTR CHECK
-    uint64_t cgen_var_244 = (uint64_t)(uintptr_t)forMarshaling->pAttributes;
-    vkStream->putBe64(cgen_var_244);
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pAttributes;
+    vkStream->putBe64(cgen_var_0);
     if (forMarshaling->pAttributes)
     {
         vkStream->write((const SECURITY_ATTRIBUTES*)forMarshaling->pAttributes, sizeof(const SECURITY_ATTRIBUTES));
@@ -8245,17 +10741,16 @@
 
 void unmarshal_VkExportSemaphoreWin32HandleInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkExportSemaphoreWin32HandleInfoKHR* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     // WARNING PTR CHECK
     const SECURITY_ATTRIBUTES* check_pAttributes;
     check_pAttributes = (const SECURITY_ATTRIBUTES*)(uintptr_t)vkStream->getBe64();
@@ -8273,28 +10768,28 @@
 
 void marshal_VkD3D12FenceSubmitInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkD3D12FenceSubmitInfoKHR* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((uint32_t*)&forMarshaling->waitSemaphoreValuesCount, sizeof(uint32_t));
     // WARNING PTR CHECK
-    uint64_t cgen_var_246 = (uint64_t)(uintptr_t)forMarshaling->pWaitSemaphoreValues;
-    vkStream->putBe64(cgen_var_246);
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pWaitSemaphoreValues;
+    vkStream->putBe64(cgen_var_0);
     if (forMarshaling->pWaitSemaphoreValues)
     {
         vkStream->write((const uint64_t*)forMarshaling->pWaitSemaphoreValues, forMarshaling->waitSemaphoreValuesCount * sizeof(const uint64_t));
     }
     vkStream->write((uint32_t*)&forMarshaling->signalSemaphoreValuesCount, sizeof(uint32_t));
     // WARNING PTR CHECK
-    uint64_t cgen_var_247 = (uint64_t)(uintptr_t)forMarshaling->pSignalSemaphoreValues;
-    vkStream->putBe64(cgen_var_247);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)forMarshaling->pSignalSemaphoreValues;
+    vkStream->putBe64(cgen_var_1);
     if (forMarshaling->pSignalSemaphoreValues)
     {
         vkStream->write((const uint64_t*)forMarshaling->pSignalSemaphoreValues, forMarshaling->signalSemaphoreValuesCount * sizeof(const uint64_t));
@@ -8303,17 +10798,16 @@
 
 void unmarshal_VkD3D12FenceSubmitInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkD3D12FenceSubmitInfoKHR* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((uint32_t*)&forUnmarshaling->waitSemaphoreValuesCount, sizeof(uint32_t));
     // WARNING PTR CHECK
     const uint64_t* check_pWaitSemaphoreValues;
@@ -8342,38 +10836,37 @@
 
 void marshal_VkSemaphoreGetWin32HandleInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSemaphoreGetWin32HandleInfoKHR* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    uint64_t cgen_var_250;
-    vkStream->handleMapping()->mapHandles_VkSemaphore_u64(&forMarshaling->semaphore, &cgen_var_250, 1);
-    vkStream->write((uint64_t*)&cgen_var_250, 1 * 8);
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkSemaphore_u64(&forMarshaling->semaphore, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
     vkStream->write((VkExternalSemaphoreHandleTypeFlagBits*)&forMarshaling->handleType, sizeof(VkExternalSemaphoreHandleTypeFlagBits));
 }
 
 void unmarshal_VkSemaphoreGetWin32HandleInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSemaphoreGetWin32HandleInfoKHR* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    uint64_t cgen_var_251;
-    vkStream->read((uint64_t*)&cgen_var_251, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkSemaphore(&cgen_var_251, (VkSemaphore*)&forUnmarshaling->semaphore, 1);
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkSemaphore(&cgen_var_0, (VkSemaphore*)&forUnmarshaling->semaphore, 1);
     vkStream->read((VkExternalSemaphoreHandleTypeFlagBits*)&forUnmarshaling->handleType, sizeof(VkExternalSemaphoreHandleTypeFlagBits));
 }
 
@@ -8381,19 +10874,19 @@
 #ifdef VK_KHR_external_semaphore_fd
 void marshal_VkImportSemaphoreFdInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkImportSemaphoreFdInfoKHR* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    uint64_t cgen_var_252;
-    vkStream->handleMapping()->mapHandles_VkSemaphore_u64(&forMarshaling->semaphore, &cgen_var_252, 1);
-    vkStream->write((uint64_t*)&cgen_var_252, 1 * 8);
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkSemaphore_u64(&forMarshaling->semaphore, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
     vkStream->write((VkSemaphoreImportFlags*)&forMarshaling->flags, sizeof(VkSemaphoreImportFlags));
     vkStream->write((VkExternalSemaphoreHandleTypeFlagBits*)&forMarshaling->handleType, sizeof(VkExternalSemaphoreHandleTypeFlagBits));
     vkStream->write((int*)&forMarshaling->fd, sizeof(int));
@@ -8401,20 +10894,19 @@
 
 void unmarshal_VkImportSemaphoreFdInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkImportSemaphoreFdInfoKHR* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    uint64_t cgen_var_253;
-    vkStream->read((uint64_t*)&cgen_var_253, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkSemaphore(&cgen_var_253, (VkSemaphore*)&forUnmarshaling->semaphore, 1);
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkSemaphore(&cgen_var_0, (VkSemaphore*)&forUnmarshaling->semaphore, 1);
     vkStream->read((VkSemaphoreImportFlags*)&forUnmarshaling->flags, sizeof(VkSemaphoreImportFlags));
     vkStream->read((VkExternalSemaphoreHandleTypeFlagBits*)&forUnmarshaling->handleType, sizeof(VkExternalSemaphoreHandleTypeFlagBits));
     vkStream->read((int*)&forUnmarshaling->fd, sizeof(int));
@@ -8422,38 +10914,37 @@
 
 void marshal_VkSemaphoreGetFdInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSemaphoreGetFdInfoKHR* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    uint64_t cgen_var_254;
-    vkStream->handleMapping()->mapHandles_VkSemaphore_u64(&forMarshaling->semaphore, &cgen_var_254, 1);
-    vkStream->write((uint64_t*)&cgen_var_254, 1 * 8);
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkSemaphore_u64(&forMarshaling->semaphore, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
     vkStream->write((VkExternalSemaphoreHandleTypeFlagBits*)&forMarshaling->handleType, sizeof(VkExternalSemaphoreHandleTypeFlagBits));
 }
 
 void unmarshal_VkSemaphoreGetFdInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSemaphoreGetFdInfoKHR* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    uint64_t cgen_var_255;
-    vkStream->read((uint64_t*)&cgen_var_255, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkSemaphore(&cgen_var_255, (VkSemaphore*)&forUnmarshaling->semaphore, 1);
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkSemaphore(&cgen_var_0, (VkSemaphore*)&forUnmarshaling->semaphore, 1);
     vkStream->read((VkExternalSemaphoreHandleTypeFlagBits*)&forUnmarshaling->handleType, sizeof(VkExternalSemaphoreHandleTypeFlagBits));
 }
 
@@ -8461,78 +10952,90 @@
 #ifdef VK_KHR_push_descriptor
 void marshal_VkPhysicalDevicePushDescriptorPropertiesKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDevicePushDescriptorPropertiesKHR* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((uint32_t*)&forMarshaling->maxPushDescriptors, sizeof(uint32_t));
 }
 
 void unmarshal_VkPhysicalDevicePushDescriptorPropertiesKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDevicePushDescriptorPropertiesKHR* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((uint32_t*)&forUnmarshaling->maxPushDescriptors, sizeof(uint32_t));
 }
 
 #endif
+#ifdef VK_KHR_shader_float16_int8
+#endif
 #ifdef VK_KHR_16bit_storage
 #endif
 #ifdef VK_KHR_incremental_present
 void marshal_VkRectLayerKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkRectLayerKHR* forMarshaling)
 {
-    marshal_VkOffset2D(vkStream, (VkOffset2D*)(&forMarshaling->offset));
-    marshal_VkExtent2D(vkStream, (VkExtent2D*)(&forMarshaling->extent));
+    (void)rootType;
+    marshal_VkOffset2D(vkStream, rootType, (VkOffset2D*)(&forMarshaling->offset));
+    marshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->extent));
     vkStream->write((uint32_t*)&forMarshaling->layer, sizeof(uint32_t));
 }
 
 void unmarshal_VkRectLayerKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkRectLayerKHR* forUnmarshaling)
 {
-    unmarshal_VkOffset2D(vkStream, (VkOffset2D*)(&forUnmarshaling->offset));
-    unmarshal_VkExtent2D(vkStream, (VkExtent2D*)(&forUnmarshaling->extent));
+    (void)rootType;
+    unmarshal_VkOffset2D(vkStream, rootType, (VkOffset2D*)(&forUnmarshaling->offset));
+    unmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forUnmarshaling->extent));
     vkStream->read((uint32_t*)&forUnmarshaling->layer, sizeof(uint32_t));
 }
 
 void marshal_VkPresentRegionKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPresentRegionKHR* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((uint32_t*)&forMarshaling->rectangleCount, sizeof(uint32_t));
     // WARNING PTR CHECK
-    uint64_t cgen_var_256 = (uint64_t)(uintptr_t)forMarshaling->pRectangles;
-    vkStream->putBe64(cgen_var_256);
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pRectangles;
+    vkStream->putBe64(cgen_var_0);
     if (forMarshaling->pRectangles)
     {
-        for (uint32_t i = 0; i < (uint32_t)forMarshaling->rectangleCount; ++i)
+        if (forMarshaling)
         {
-            marshal_VkRectLayerKHR(vkStream, (const VkRectLayerKHR*)(forMarshaling->pRectangles + i));
+            for (uint32_t i = 0; i < (uint32_t)forMarshaling->rectangleCount; ++i)
+            {
+                marshal_VkRectLayerKHR(vkStream, rootType, (const VkRectLayerKHR*)(forMarshaling->pRectangles + i));
+            }
         }
     }
 }
 
 void unmarshal_VkPresentRegionKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPresentRegionKHR* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((uint32_t*)&forUnmarshaling->rectangleCount, sizeof(uint32_t));
     // WARNING PTR CHECK
     const VkRectLayerKHR* check_pRectangles;
@@ -8543,51 +11046,56 @@
         {
             fprintf(stderr, "fatal: forUnmarshaling->pRectangles inconsistent between guest and host\n");
         }
-        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->rectangleCount; ++i)
+        if (forUnmarshaling)
         {
-            unmarshal_VkRectLayerKHR(vkStream, (VkRectLayerKHR*)(forUnmarshaling->pRectangles + i));
+            for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->rectangleCount; ++i)
+            {
+                unmarshal_VkRectLayerKHR(vkStream, rootType, (VkRectLayerKHR*)(forUnmarshaling->pRectangles + i));
+            }
         }
     }
 }
 
 void marshal_VkPresentRegionsKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPresentRegionsKHR* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((uint32_t*)&forMarshaling->swapchainCount, sizeof(uint32_t));
     // WARNING PTR CHECK
-    uint64_t cgen_var_258 = (uint64_t)(uintptr_t)forMarshaling->pRegions;
-    vkStream->putBe64(cgen_var_258);
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pRegions;
+    vkStream->putBe64(cgen_var_0);
     if (forMarshaling->pRegions)
     {
-        for (uint32_t i = 0; i < (uint32_t)forMarshaling->swapchainCount; ++i)
+        if (forMarshaling)
         {
-            marshal_VkPresentRegionKHR(vkStream, (const VkPresentRegionKHR*)(forMarshaling->pRegions + i));
+            for (uint32_t i = 0; i < (uint32_t)forMarshaling->swapchainCount; ++i)
+            {
+                marshal_VkPresentRegionKHR(vkStream, rootType, (const VkPresentRegionKHR*)(forMarshaling->pRegions + i));
+            }
         }
     }
 }
 
 void unmarshal_VkPresentRegionsKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPresentRegionsKHR* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((uint32_t*)&forUnmarshaling->swapchainCount, sizeof(uint32_t));
     // WARNING PTR CHECK
     const VkPresentRegionKHR* check_pRegions;
@@ -8598,9 +11106,12 @@
         {
             fprintf(stderr, "fatal: forUnmarshaling->pRegions inconsistent between guest and host\n");
         }
-        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->swapchainCount; ++i)
+        if (forUnmarshaling)
         {
-            unmarshal_VkPresentRegionKHR(vkStream, (VkPresentRegionKHR*)(forUnmarshaling->pRegions + i));
+            for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->swapchainCount; ++i)
+            {
+                unmarshal_VkPresentRegionKHR(vkStream, rootType, (VkPresentRegionKHR*)(forUnmarshaling->pRegions + i));
+            }
         }
     }
 }
@@ -8608,390 +11119,38 @@
 #endif
 #ifdef VK_KHR_descriptor_update_template
 #endif
+#ifdef VK_KHR_imageless_framebuffer
+#endif
 #ifdef VK_KHR_create_renderpass2
-void marshal_VkAttachmentDescription2KHR(
-    VulkanStreamGuest* vkStream,
-    const VkAttachmentDescription2KHR* forMarshaling)
-{
-    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
-    {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
-    }
-    vkStream->write((VkAttachmentDescriptionFlags*)&forMarshaling->flags, sizeof(VkAttachmentDescriptionFlags));
-    vkStream->write((VkFormat*)&forMarshaling->format, sizeof(VkFormat));
-    vkStream->write((VkSampleCountFlagBits*)&forMarshaling->samples, sizeof(VkSampleCountFlagBits));
-    vkStream->write((VkAttachmentLoadOp*)&forMarshaling->loadOp, sizeof(VkAttachmentLoadOp));
-    vkStream->write((VkAttachmentStoreOp*)&forMarshaling->storeOp, sizeof(VkAttachmentStoreOp));
-    vkStream->write((VkAttachmentLoadOp*)&forMarshaling->stencilLoadOp, sizeof(VkAttachmentLoadOp));
-    vkStream->write((VkAttachmentStoreOp*)&forMarshaling->stencilStoreOp, sizeof(VkAttachmentStoreOp));
-    vkStream->write((VkImageLayout*)&forMarshaling->initialLayout, sizeof(VkImageLayout));
-    vkStream->write((VkImageLayout*)&forMarshaling->finalLayout, sizeof(VkImageLayout));
-}
-
-void unmarshal_VkAttachmentDescription2KHR(
-    VulkanStreamGuest* vkStream,
-    VkAttachmentDescription2KHR* forUnmarshaling)
-{
-    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
-    {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
-    }
-    vkStream->read((VkAttachmentDescriptionFlags*)&forUnmarshaling->flags, sizeof(VkAttachmentDescriptionFlags));
-    vkStream->read((VkFormat*)&forUnmarshaling->format, sizeof(VkFormat));
-    vkStream->read((VkSampleCountFlagBits*)&forUnmarshaling->samples, sizeof(VkSampleCountFlagBits));
-    vkStream->read((VkAttachmentLoadOp*)&forUnmarshaling->loadOp, sizeof(VkAttachmentLoadOp));
-    vkStream->read((VkAttachmentStoreOp*)&forUnmarshaling->storeOp, sizeof(VkAttachmentStoreOp));
-    vkStream->read((VkAttachmentLoadOp*)&forUnmarshaling->stencilLoadOp, sizeof(VkAttachmentLoadOp));
-    vkStream->read((VkAttachmentStoreOp*)&forUnmarshaling->stencilStoreOp, sizeof(VkAttachmentStoreOp));
-    vkStream->read((VkImageLayout*)&forUnmarshaling->initialLayout, sizeof(VkImageLayout));
-    vkStream->read((VkImageLayout*)&forUnmarshaling->finalLayout, sizeof(VkImageLayout));
-}
-
-void marshal_VkAttachmentReference2KHR(
-    VulkanStreamGuest* vkStream,
-    const VkAttachmentReference2KHR* forMarshaling)
-{
-    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
-    {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
-    }
-    vkStream->write((uint32_t*)&forMarshaling->attachment, sizeof(uint32_t));
-    vkStream->write((VkImageLayout*)&forMarshaling->layout, sizeof(VkImageLayout));
-    vkStream->write((VkImageAspectFlags*)&forMarshaling->aspectMask, sizeof(VkImageAspectFlags));
-}
-
-void unmarshal_VkAttachmentReference2KHR(
-    VulkanStreamGuest* vkStream,
-    VkAttachmentReference2KHR* forUnmarshaling)
-{
-    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
-    {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
-    }
-    vkStream->read((uint32_t*)&forUnmarshaling->attachment, sizeof(uint32_t));
-    vkStream->read((VkImageLayout*)&forUnmarshaling->layout, sizeof(VkImageLayout));
-    vkStream->read((VkImageAspectFlags*)&forUnmarshaling->aspectMask, sizeof(VkImageAspectFlags));
-}
-
-void marshal_VkSubpassDescription2KHR(
-    VulkanStreamGuest* vkStream,
-    const VkSubpassDescription2KHR* forMarshaling)
-{
-    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
-    {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
-    }
-    vkStream->write((VkSubpassDescriptionFlags*)&forMarshaling->flags, sizeof(VkSubpassDescriptionFlags));
-    vkStream->write((VkPipelineBindPoint*)&forMarshaling->pipelineBindPoint, sizeof(VkPipelineBindPoint));
-    vkStream->write((uint32_t*)&forMarshaling->viewMask, sizeof(uint32_t));
-    vkStream->write((uint32_t*)&forMarshaling->inputAttachmentCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forMarshaling->inputAttachmentCount; ++i)
-    {
-        marshal_VkAttachmentReference2KHR(vkStream, (const VkAttachmentReference2KHR*)(forMarshaling->pInputAttachments + i));
-    }
-    vkStream->write((uint32_t*)&forMarshaling->colorAttachmentCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forMarshaling->colorAttachmentCount; ++i)
-    {
-        marshal_VkAttachmentReference2KHR(vkStream, (const VkAttachmentReference2KHR*)(forMarshaling->pColorAttachments + i));
-    }
-    // WARNING PTR CHECK
-    uint64_t cgen_var_260 = (uint64_t)(uintptr_t)forMarshaling->pResolveAttachments;
-    vkStream->putBe64(cgen_var_260);
-    if (forMarshaling->pResolveAttachments)
-    {
-        for (uint32_t i = 0; i < (uint32_t)forMarshaling->colorAttachmentCount; ++i)
-        {
-            marshal_VkAttachmentReference2KHR(vkStream, (const VkAttachmentReference2KHR*)(forMarshaling->pResolveAttachments + i));
-        }
-    }
-    // WARNING PTR CHECK
-    uint64_t cgen_var_261 = (uint64_t)(uintptr_t)forMarshaling->pDepthStencilAttachment;
-    vkStream->putBe64(cgen_var_261);
-    if (forMarshaling->pDepthStencilAttachment)
-    {
-        marshal_VkAttachmentReference2KHR(vkStream, (const VkAttachmentReference2KHR*)(forMarshaling->pDepthStencilAttachment));
-    }
-    vkStream->write((uint32_t*)&forMarshaling->preserveAttachmentCount, sizeof(uint32_t));
-    vkStream->write((const uint32_t*)forMarshaling->pPreserveAttachments, forMarshaling->preserveAttachmentCount * sizeof(const uint32_t));
-}
-
-void unmarshal_VkSubpassDescription2KHR(
-    VulkanStreamGuest* vkStream,
-    VkSubpassDescription2KHR* forUnmarshaling)
-{
-    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
-    {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
-    }
-    vkStream->read((VkSubpassDescriptionFlags*)&forUnmarshaling->flags, sizeof(VkSubpassDescriptionFlags));
-    vkStream->read((VkPipelineBindPoint*)&forUnmarshaling->pipelineBindPoint, sizeof(VkPipelineBindPoint));
-    vkStream->read((uint32_t*)&forUnmarshaling->viewMask, sizeof(uint32_t));
-    vkStream->read((uint32_t*)&forUnmarshaling->inputAttachmentCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->inputAttachmentCount; ++i)
-    {
-        unmarshal_VkAttachmentReference2KHR(vkStream, (VkAttachmentReference2KHR*)(forUnmarshaling->pInputAttachments + i));
-    }
-    vkStream->read((uint32_t*)&forUnmarshaling->colorAttachmentCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->colorAttachmentCount; ++i)
-    {
-        unmarshal_VkAttachmentReference2KHR(vkStream, (VkAttachmentReference2KHR*)(forUnmarshaling->pColorAttachments + i));
-    }
-    // WARNING PTR CHECK
-    const VkAttachmentReference2KHR* check_pResolveAttachments;
-    check_pResolveAttachments = (const VkAttachmentReference2KHR*)(uintptr_t)vkStream->getBe64();
-    if (forUnmarshaling->pResolveAttachments)
-    {
-        if (!(check_pResolveAttachments))
-        {
-            fprintf(stderr, "fatal: forUnmarshaling->pResolveAttachments inconsistent between guest and host\n");
-        }
-        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->colorAttachmentCount; ++i)
-        {
-            unmarshal_VkAttachmentReference2KHR(vkStream, (VkAttachmentReference2KHR*)(forUnmarshaling->pResolveAttachments + i));
-        }
-    }
-    // WARNING PTR CHECK
-    const VkAttachmentReference2KHR* check_pDepthStencilAttachment;
-    check_pDepthStencilAttachment = (const VkAttachmentReference2KHR*)(uintptr_t)vkStream->getBe64();
-    if (forUnmarshaling->pDepthStencilAttachment)
-    {
-        if (!(check_pDepthStencilAttachment))
-        {
-            fprintf(stderr, "fatal: forUnmarshaling->pDepthStencilAttachment inconsistent between guest and host\n");
-        }
-        unmarshal_VkAttachmentReference2KHR(vkStream, (VkAttachmentReference2KHR*)(forUnmarshaling->pDepthStencilAttachment));
-    }
-    vkStream->read((uint32_t*)&forUnmarshaling->preserveAttachmentCount, sizeof(uint32_t));
-    vkStream->read((uint32_t*)forUnmarshaling->pPreserveAttachments, forUnmarshaling->preserveAttachmentCount * sizeof(const uint32_t));
-}
-
-void marshal_VkSubpassDependency2KHR(
-    VulkanStreamGuest* vkStream,
-    const VkSubpassDependency2KHR* forMarshaling)
-{
-    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
-    {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
-    }
-    vkStream->write((uint32_t*)&forMarshaling->srcSubpass, sizeof(uint32_t));
-    vkStream->write((uint32_t*)&forMarshaling->dstSubpass, sizeof(uint32_t));
-    vkStream->write((VkPipelineStageFlags*)&forMarshaling->srcStageMask, sizeof(VkPipelineStageFlags));
-    vkStream->write((VkPipelineStageFlags*)&forMarshaling->dstStageMask, sizeof(VkPipelineStageFlags));
-    vkStream->write((VkAccessFlags*)&forMarshaling->srcAccessMask, sizeof(VkAccessFlags));
-    vkStream->write((VkAccessFlags*)&forMarshaling->dstAccessMask, sizeof(VkAccessFlags));
-    vkStream->write((VkDependencyFlags*)&forMarshaling->dependencyFlags, sizeof(VkDependencyFlags));
-    vkStream->write((int32_t*)&forMarshaling->viewOffset, sizeof(int32_t));
-}
-
-void unmarshal_VkSubpassDependency2KHR(
-    VulkanStreamGuest* vkStream,
-    VkSubpassDependency2KHR* forUnmarshaling)
-{
-    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
-    {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
-    }
-    vkStream->read((uint32_t*)&forUnmarshaling->srcSubpass, sizeof(uint32_t));
-    vkStream->read((uint32_t*)&forUnmarshaling->dstSubpass, sizeof(uint32_t));
-    vkStream->read((VkPipelineStageFlags*)&forUnmarshaling->srcStageMask, sizeof(VkPipelineStageFlags));
-    vkStream->read((VkPipelineStageFlags*)&forUnmarshaling->dstStageMask, sizeof(VkPipelineStageFlags));
-    vkStream->read((VkAccessFlags*)&forUnmarshaling->srcAccessMask, sizeof(VkAccessFlags));
-    vkStream->read((VkAccessFlags*)&forUnmarshaling->dstAccessMask, sizeof(VkAccessFlags));
-    vkStream->read((VkDependencyFlags*)&forUnmarshaling->dependencyFlags, sizeof(VkDependencyFlags));
-    vkStream->read((int32_t*)&forUnmarshaling->viewOffset, sizeof(int32_t));
-}
-
-void marshal_VkRenderPassCreateInfo2KHR(
-    VulkanStreamGuest* vkStream,
-    const VkRenderPassCreateInfo2KHR* forMarshaling)
-{
-    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
-    {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
-    }
-    vkStream->write((VkRenderPassCreateFlags*)&forMarshaling->flags, sizeof(VkRenderPassCreateFlags));
-    vkStream->write((uint32_t*)&forMarshaling->attachmentCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forMarshaling->attachmentCount; ++i)
-    {
-        marshal_VkAttachmentDescription2KHR(vkStream, (const VkAttachmentDescription2KHR*)(forMarshaling->pAttachments + i));
-    }
-    vkStream->write((uint32_t*)&forMarshaling->subpassCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forMarshaling->subpassCount; ++i)
-    {
-        marshal_VkSubpassDescription2KHR(vkStream, (const VkSubpassDescription2KHR*)(forMarshaling->pSubpasses + i));
-    }
-    vkStream->write((uint32_t*)&forMarshaling->dependencyCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forMarshaling->dependencyCount; ++i)
-    {
-        marshal_VkSubpassDependency2KHR(vkStream, (const VkSubpassDependency2KHR*)(forMarshaling->pDependencies + i));
-    }
-    vkStream->write((uint32_t*)&forMarshaling->correlatedViewMaskCount, sizeof(uint32_t));
-    vkStream->write((const uint32_t*)forMarshaling->pCorrelatedViewMasks, forMarshaling->correlatedViewMaskCount * sizeof(const uint32_t));
-}
-
-void unmarshal_VkRenderPassCreateInfo2KHR(
-    VulkanStreamGuest* vkStream,
-    VkRenderPassCreateInfo2KHR* forUnmarshaling)
-{
-    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
-    {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
-    }
-    vkStream->read((VkRenderPassCreateFlags*)&forUnmarshaling->flags, sizeof(VkRenderPassCreateFlags));
-    vkStream->read((uint32_t*)&forUnmarshaling->attachmentCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->attachmentCount; ++i)
-    {
-        unmarshal_VkAttachmentDescription2KHR(vkStream, (VkAttachmentDescription2KHR*)(forUnmarshaling->pAttachments + i));
-    }
-    vkStream->read((uint32_t*)&forUnmarshaling->subpassCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->subpassCount; ++i)
-    {
-        unmarshal_VkSubpassDescription2KHR(vkStream, (VkSubpassDescription2KHR*)(forUnmarshaling->pSubpasses + i));
-    }
-    vkStream->read((uint32_t*)&forUnmarshaling->dependencyCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->dependencyCount; ++i)
-    {
-        unmarshal_VkSubpassDependency2KHR(vkStream, (VkSubpassDependency2KHR*)(forUnmarshaling->pDependencies + i));
-    }
-    vkStream->read((uint32_t*)&forUnmarshaling->correlatedViewMaskCount, sizeof(uint32_t));
-    vkStream->read((uint32_t*)forUnmarshaling->pCorrelatedViewMasks, forUnmarshaling->correlatedViewMaskCount * sizeof(const uint32_t));
-}
-
-void marshal_VkSubpassBeginInfoKHR(
-    VulkanStreamGuest* vkStream,
-    const VkSubpassBeginInfoKHR* forMarshaling)
-{
-    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
-    {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
-    }
-    vkStream->write((VkSubpassContents*)&forMarshaling->contents, sizeof(VkSubpassContents));
-}
-
-void unmarshal_VkSubpassBeginInfoKHR(
-    VulkanStreamGuest* vkStream,
-    VkSubpassBeginInfoKHR* forUnmarshaling)
-{
-    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
-    {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
-    }
-    vkStream->read((VkSubpassContents*)&forUnmarshaling->contents, sizeof(VkSubpassContents));
-}
-
-void marshal_VkSubpassEndInfoKHR(
-    VulkanStreamGuest* vkStream,
-    const VkSubpassEndInfoKHR* forMarshaling)
-{
-    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
-    {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
-    }
-}
-
-void unmarshal_VkSubpassEndInfoKHR(
-    VulkanStreamGuest* vkStream,
-    VkSubpassEndInfoKHR* forUnmarshaling)
-{
-    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
-    {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
-    }
-}
-
 #endif
 #ifdef VK_KHR_shared_presentable_image
 void marshal_VkSharedPresentSurfaceCapabilitiesKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSharedPresentSurfaceCapabilitiesKHR* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkImageUsageFlags*)&forMarshaling->sharedPresentSupportedUsageFlags, sizeof(VkImageUsageFlags));
 }
 
 void unmarshal_VkSharedPresentSurfaceCapabilitiesKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSharedPresentSurfaceCapabilitiesKHR* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkImageUsageFlags*)&forUnmarshaling->sharedPresentSupportedUsageFlags, sizeof(VkImageUsageFlags));
 }
 
@@ -9003,19 +11162,19 @@
 #ifdef VK_KHR_external_fence_win32
 void marshal_VkImportFenceWin32HandleInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkImportFenceWin32HandleInfoKHR* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    uint64_t cgen_var_264;
-    vkStream->handleMapping()->mapHandles_VkFence_u64(&forMarshaling->fence, &cgen_var_264, 1);
-    vkStream->write((uint64_t*)&cgen_var_264, 1 * 8);
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkFence_u64(&forMarshaling->fence, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
     vkStream->write((VkFenceImportFlags*)&forMarshaling->flags, sizeof(VkFenceImportFlags));
     vkStream->write((VkExternalFenceHandleTypeFlagBits*)&forMarshaling->handleType, sizeof(VkExternalFenceHandleTypeFlagBits));
     vkStream->write((HANDLE*)&forMarshaling->handle, sizeof(HANDLE));
@@ -9024,20 +11183,19 @@
 
 void unmarshal_VkImportFenceWin32HandleInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkImportFenceWin32HandleInfoKHR* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    uint64_t cgen_var_265;
-    vkStream->read((uint64_t*)&cgen_var_265, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkFence(&cgen_var_265, (VkFence*)&forUnmarshaling->fence, 1);
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkFence(&cgen_var_0, (VkFence*)&forUnmarshaling->fence, 1);
     vkStream->read((VkFenceImportFlags*)&forUnmarshaling->flags, sizeof(VkFenceImportFlags));
     vkStream->read((VkExternalFenceHandleTypeFlagBits*)&forUnmarshaling->handleType, sizeof(VkExternalFenceHandleTypeFlagBits));
     vkStream->read((HANDLE*)&forUnmarshaling->handle, sizeof(HANDLE));
@@ -9046,19 +11204,19 @@
 
 void marshal_VkExportFenceWin32HandleInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkExportFenceWin32HandleInfoKHR* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     // WARNING PTR CHECK
-    uint64_t cgen_var_266 = (uint64_t)(uintptr_t)forMarshaling->pAttributes;
-    vkStream->putBe64(cgen_var_266);
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pAttributes;
+    vkStream->putBe64(cgen_var_0);
     if (forMarshaling->pAttributes)
     {
         vkStream->write((const SECURITY_ATTRIBUTES*)forMarshaling->pAttributes, sizeof(const SECURITY_ATTRIBUTES));
@@ -9069,17 +11227,16 @@
 
 void unmarshal_VkExportFenceWin32HandleInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkExportFenceWin32HandleInfoKHR* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     // WARNING PTR CHECK
     const SECURITY_ATTRIBUTES* check_pAttributes;
     check_pAttributes = (const SECURITY_ATTRIBUTES*)(uintptr_t)vkStream->getBe64();
@@ -9097,38 +11254,37 @@
 
 void marshal_VkFenceGetWin32HandleInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkFenceGetWin32HandleInfoKHR* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    uint64_t cgen_var_268;
-    vkStream->handleMapping()->mapHandles_VkFence_u64(&forMarshaling->fence, &cgen_var_268, 1);
-    vkStream->write((uint64_t*)&cgen_var_268, 1 * 8);
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkFence_u64(&forMarshaling->fence, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
     vkStream->write((VkExternalFenceHandleTypeFlagBits*)&forMarshaling->handleType, sizeof(VkExternalFenceHandleTypeFlagBits));
 }
 
 void unmarshal_VkFenceGetWin32HandleInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkFenceGetWin32HandleInfoKHR* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    uint64_t cgen_var_269;
-    vkStream->read((uint64_t*)&cgen_var_269, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkFence(&cgen_var_269, (VkFence*)&forUnmarshaling->fence, 1);
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkFence(&cgen_var_0, (VkFence*)&forUnmarshaling->fence, 1);
     vkStream->read((VkExternalFenceHandleTypeFlagBits*)&forUnmarshaling->handleType, sizeof(VkExternalFenceHandleTypeFlagBits));
 }
 
@@ -9136,19 +11292,19 @@
 #ifdef VK_KHR_external_fence_fd
 void marshal_VkImportFenceFdInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkImportFenceFdInfoKHR* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    uint64_t cgen_var_270;
-    vkStream->handleMapping()->mapHandles_VkFence_u64(&forMarshaling->fence, &cgen_var_270, 1);
-    vkStream->write((uint64_t*)&cgen_var_270, 1 * 8);
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkFence_u64(&forMarshaling->fence, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
     vkStream->write((VkFenceImportFlags*)&forMarshaling->flags, sizeof(VkFenceImportFlags));
     vkStream->write((VkExternalFenceHandleTypeFlagBits*)&forMarshaling->handleType, sizeof(VkExternalFenceHandleTypeFlagBits));
     vkStream->write((int*)&forMarshaling->fd, sizeof(int));
@@ -9156,20 +11312,19 @@
 
 void unmarshal_VkImportFenceFdInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkImportFenceFdInfoKHR* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    uint64_t cgen_var_271;
-    vkStream->read((uint64_t*)&cgen_var_271, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkFence(&cgen_var_271, (VkFence*)&forUnmarshaling->fence, 1);
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkFence(&cgen_var_0, (VkFence*)&forUnmarshaling->fence, 1);
     vkStream->read((VkFenceImportFlags*)&forUnmarshaling->flags, sizeof(VkFenceImportFlags));
     vkStream->read((VkExternalFenceHandleTypeFlagBits*)&forUnmarshaling->handleType, sizeof(VkExternalFenceHandleTypeFlagBits));
     vkStream->read((int*)&forUnmarshaling->fd, sizeof(int));
@@ -9177,140 +11332,386 @@
 
 void marshal_VkFenceGetFdInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkFenceGetFdInfoKHR* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    uint64_t cgen_var_272;
-    vkStream->handleMapping()->mapHandles_VkFence_u64(&forMarshaling->fence, &cgen_var_272, 1);
-    vkStream->write((uint64_t*)&cgen_var_272, 1 * 8);
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkFence_u64(&forMarshaling->fence, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
     vkStream->write((VkExternalFenceHandleTypeFlagBits*)&forMarshaling->handleType, sizeof(VkExternalFenceHandleTypeFlagBits));
 }
 
 void unmarshal_VkFenceGetFdInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkFenceGetFdInfoKHR* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    uint64_t cgen_var_273;
-    vkStream->read((uint64_t*)&cgen_var_273, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkFence(&cgen_var_273, (VkFence*)&forUnmarshaling->fence, 1);
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkFence(&cgen_var_0, (VkFence*)&forUnmarshaling->fence, 1);
     vkStream->read((VkExternalFenceHandleTypeFlagBits*)&forUnmarshaling->handleType, sizeof(VkExternalFenceHandleTypeFlagBits));
 }
 
 #endif
+#ifdef VK_KHR_performance_query
+void marshal_VkPhysicalDevicePerformanceQueryFeaturesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDevicePerformanceQueryFeaturesKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->performanceCounterQueryPools, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->performanceCounterMultipleQueryPools, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDevicePerformanceQueryFeaturesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDevicePerformanceQueryFeaturesKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->performanceCounterQueryPools, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->performanceCounterMultipleQueryPools, sizeof(VkBool32));
+}
+
+void marshal_VkPhysicalDevicePerformanceQueryPropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDevicePerformanceQueryPropertiesKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->allowCommandBufferQueryCopies, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDevicePerformanceQueryPropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDevicePerformanceQueryPropertiesKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->allowCommandBufferQueryCopies, sizeof(VkBool32));
+}
+
+void marshal_VkPerformanceCounterKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPerformanceCounterKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkPerformanceCounterUnitKHR*)&forMarshaling->unit, sizeof(VkPerformanceCounterUnitKHR));
+    vkStream->write((VkPerformanceCounterScopeKHR*)&forMarshaling->scope, sizeof(VkPerformanceCounterScopeKHR));
+    vkStream->write((VkPerformanceCounterStorageKHR*)&forMarshaling->storage, sizeof(VkPerformanceCounterStorageKHR));
+    vkStream->write((uint8_t*)forMarshaling->uuid, VK_UUID_SIZE * sizeof(uint8_t));
+}
+
+void unmarshal_VkPerformanceCounterKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPerformanceCounterKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkPerformanceCounterUnitKHR*)&forUnmarshaling->unit, sizeof(VkPerformanceCounterUnitKHR));
+    vkStream->read((VkPerformanceCounterScopeKHR*)&forUnmarshaling->scope, sizeof(VkPerformanceCounterScopeKHR));
+    vkStream->read((VkPerformanceCounterStorageKHR*)&forUnmarshaling->storage, sizeof(VkPerformanceCounterStorageKHR));
+    vkStream->read((uint8_t*)forUnmarshaling->uuid, VK_UUID_SIZE * sizeof(uint8_t));
+}
+
+void marshal_VkPerformanceCounterDescriptionKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPerformanceCounterDescriptionKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkPerformanceCounterDescriptionFlagsKHR*)&forMarshaling->flags, sizeof(VkPerformanceCounterDescriptionFlagsKHR));
+    vkStream->write((char*)forMarshaling->name, VK_MAX_DESCRIPTION_SIZE * sizeof(char));
+    vkStream->write((char*)forMarshaling->category, VK_MAX_DESCRIPTION_SIZE * sizeof(char));
+    vkStream->write((char*)forMarshaling->description, VK_MAX_DESCRIPTION_SIZE * sizeof(char));
+}
+
+void unmarshal_VkPerformanceCounterDescriptionKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPerformanceCounterDescriptionKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkPerformanceCounterDescriptionFlagsKHR*)&forUnmarshaling->flags, sizeof(VkPerformanceCounterDescriptionFlagsKHR));
+    vkStream->read((char*)forUnmarshaling->name, VK_MAX_DESCRIPTION_SIZE * sizeof(char));
+    vkStream->read((char*)forUnmarshaling->category, VK_MAX_DESCRIPTION_SIZE * sizeof(char));
+    vkStream->read((char*)forUnmarshaling->description, VK_MAX_DESCRIPTION_SIZE * sizeof(char));
+}
+
+void marshal_VkQueryPoolPerformanceCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkQueryPoolPerformanceCreateInfoKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((uint32_t*)&forMarshaling->queueFamilyIndex, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->counterIndexCount, sizeof(uint32_t));
+    vkStream->write((const uint32_t*)forMarshaling->pCounterIndices, forMarshaling->counterIndexCount * sizeof(const uint32_t));
+}
+
+void unmarshal_VkQueryPoolPerformanceCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkQueryPoolPerformanceCreateInfoKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((uint32_t*)&forUnmarshaling->queueFamilyIndex, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->counterIndexCount, sizeof(uint32_t));
+    vkStream->read((uint32_t*)forUnmarshaling->pCounterIndices, forUnmarshaling->counterIndexCount * sizeof(const uint32_t));
+}
+
+void marshal_VkPerformanceCounterResultKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPerformanceCounterResultKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((int32_t*)&forMarshaling->int32, sizeof(int32_t));
+}
+
+void unmarshal_VkPerformanceCounterResultKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPerformanceCounterResultKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((int32_t*)&forUnmarshaling->int32, sizeof(int32_t));
+}
+
+void marshal_VkAcquireProfilingLockInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAcquireProfilingLockInfoKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkAcquireProfilingLockFlagsKHR*)&forMarshaling->flags, sizeof(VkAcquireProfilingLockFlagsKHR));
+    vkStream->write((uint64_t*)&forMarshaling->timeout, sizeof(uint64_t));
+}
+
+void unmarshal_VkAcquireProfilingLockInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkAcquireProfilingLockInfoKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkAcquireProfilingLockFlagsKHR*)&forUnmarshaling->flags, sizeof(VkAcquireProfilingLockFlagsKHR));
+    vkStream->read((uint64_t*)&forUnmarshaling->timeout, sizeof(uint64_t));
+}
+
+void marshal_VkPerformanceQuerySubmitInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPerformanceQuerySubmitInfoKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((uint32_t*)&forMarshaling->counterPassIndex, sizeof(uint32_t));
+}
+
+void unmarshal_VkPerformanceQuerySubmitInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPerformanceQuerySubmitInfoKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((uint32_t*)&forUnmarshaling->counterPassIndex, sizeof(uint32_t));
+}
+
+#endif
 #ifdef VK_KHR_maintenance2
 #endif
 #ifdef VK_KHR_get_surface_capabilities2
 void marshal_VkPhysicalDeviceSurfaceInfo2KHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceSurfaceInfo2KHR* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    uint64_t cgen_var_274;
-    vkStream->handleMapping()->mapHandles_VkSurfaceKHR_u64(&forMarshaling->surface, &cgen_var_274, 1);
-    vkStream->write((uint64_t*)&cgen_var_274, 1 * 8);
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkSurfaceKHR_u64(&forMarshaling->surface, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
 }
 
 void unmarshal_VkPhysicalDeviceSurfaceInfo2KHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceSurfaceInfo2KHR* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    uint64_t cgen_var_275;
-    vkStream->read((uint64_t*)&cgen_var_275, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkSurfaceKHR(&cgen_var_275, (VkSurfaceKHR*)&forUnmarshaling->surface, 1);
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkSurfaceKHR(&cgen_var_0, (VkSurfaceKHR*)&forUnmarshaling->surface, 1);
 }
 
 void marshal_VkSurfaceCapabilities2KHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSurfaceCapabilities2KHR* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    marshal_VkSurfaceCapabilitiesKHR(vkStream, (VkSurfaceCapabilitiesKHR*)(&forMarshaling->surfaceCapabilities));
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    marshal_VkSurfaceCapabilitiesKHR(vkStream, rootType, (VkSurfaceCapabilitiesKHR*)(&forMarshaling->surfaceCapabilities));
 }
 
 void unmarshal_VkSurfaceCapabilities2KHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSurfaceCapabilities2KHR* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    unmarshal_VkSurfaceCapabilitiesKHR(vkStream, (VkSurfaceCapabilitiesKHR*)(&forUnmarshaling->surfaceCapabilities));
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    unmarshal_VkSurfaceCapabilitiesKHR(vkStream, rootType, (VkSurfaceCapabilitiesKHR*)(&forUnmarshaling->surfaceCapabilities));
 }
 
 void marshal_VkSurfaceFormat2KHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSurfaceFormat2KHR* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    marshal_VkSurfaceFormatKHR(vkStream, (VkSurfaceFormatKHR*)(&forMarshaling->surfaceFormat));
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    marshal_VkSurfaceFormatKHR(vkStream, rootType, (VkSurfaceFormatKHR*)(&forMarshaling->surfaceFormat));
 }
 
 void unmarshal_VkSurfaceFormat2KHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSurfaceFormat2KHR* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    unmarshal_VkSurfaceFormatKHR(vkStream, (VkSurfaceFormatKHR*)(&forUnmarshaling->surfaceFormat));
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    unmarshal_VkSurfaceFormatKHR(vkStream, rootType, (VkSurfaceFormatKHR*)(&forUnmarshaling->surfaceFormat));
 }
 
 #endif
@@ -9319,163 +11720,158 @@
 #ifdef VK_KHR_get_display_properties2
 void marshal_VkDisplayProperties2KHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDisplayProperties2KHR* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    marshal_VkDisplayPropertiesKHR(vkStream, (VkDisplayPropertiesKHR*)(&forMarshaling->displayProperties));
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    marshal_VkDisplayPropertiesKHR(vkStream, rootType, (VkDisplayPropertiesKHR*)(&forMarshaling->displayProperties));
 }
 
 void unmarshal_VkDisplayProperties2KHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDisplayProperties2KHR* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    unmarshal_VkDisplayPropertiesKHR(vkStream, (VkDisplayPropertiesKHR*)(&forUnmarshaling->displayProperties));
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    unmarshal_VkDisplayPropertiesKHR(vkStream, rootType, (VkDisplayPropertiesKHR*)(&forUnmarshaling->displayProperties));
 }
 
 void marshal_VkDisplayPlaneProperties2KHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDisplayPlaneProperties2KHR* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    marshal_VkDisplayPlanePropertiesKHR(vkStream, (VkDisplayPlanePropertiesKHR*)(&forMarshaling->displayPlaneProperties));
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    marshal_VkDisplayPlanePropertiesKHR(vkStream, rootType, (VkDisplayPlanePropertiesKHR*)(&forMarshaling->displayPlaneProperties));
 }
 
 void unmarshal_VkDisplayPlaneProperties2KHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDisplayPlaneProperties2KHR* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    unmarshal_VkDisplayPlanePropertiesKHR(vkStream, (VkDisplayPlanePropertiesKHR*)(&forUnmarshaling->displayPlaneProperties));
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    unmarshal_VkDisplayPlanePropertiesKHR(vkStream, rootType, (VkDisplayPlanePropertiesKHR*)(&forUnmarshaling->displayPlaneProperties));
 }
 
 void marshal_VkDisplayModeProperties2KHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDisplayModeProperties2KHR* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    marshal_VkDisplayModePropertiesKHR(vkStream, (VkDisplayModePropertiesKHR*)(&forMarshaling->displayModeProperties));
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    marshal_VkDisplayModePropertiesKHR(vkStream, rootType, (VkDisplayModePropertiesKHR*)(&forMarshaling->displayModeProperties));
 }
 
 void unmarshal_VkDisplayModeProperties2KHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDisplayModeProperties2KHR* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    unmarshal_VkDisplayModePropertiesKHR(vkStream, (VkDisplayModePropertiesKHR*)(&forUnmarshaling->displayModeProperties));
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    unmarshal_VkDisplayModePropertiesKHR(vkStream, rootType, (VkDisplayModePropertiesKHR*)(&forUnmarshaling->displayModeProperties));
 }
 
 void marshal_VkDisplayPlaneInfo2KHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDisplayPlaneInfo2KHR* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    uint64_t cgen_var_276;
-    vkStream->handleMapping()->mapHandles_VkDisplayModeKHR_u64(&forMarshaling->mode, &cgen_var_276, 1);
-    vkStream->write((uint64_t*)&cgen_var_276, 1 * 8);
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkDisplayModeKHR_u64(&forMarshaling->mode, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
     vkStream->write((uint32_t*)&forMarshaling->planeIndex, sizeof(uint32_t));
 }
 
 void unmarshal_VkDisplayPlaneInfo2KHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDisplayPlaneInfo2KHR* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    uint64_t cgen_var_277;
-    vkStream->read((uint64_t*)&cgen_var_277, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkDisplayModeKHR(&cgen_var_277, (VkDisplayModeKHR*)&forUnmarshaling->mode, 1);
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkDisplayModeKHR(&cgen_var_0, (VkDisplayModeKHR*)&forUnmarshaling->mode, 1);
     vkStream->read((uint32_t*)&forUnmarshaling->planeIndex, sizeof(uint32_t));
 }
 
 void marshal_VkDisplayPlaneCapabilities2KHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDisplayPlaneCapabilities2KHR* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    marshal_VkDisplayPlaneCapabilitiesKHR(vkStream, (VkDisplayPlaneCapabilitiesKHR*)(&forMarshaling->capabilities));
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    marshal_VkDisplayPlaneCapabilitiesKHR(vkStream, rootType, (VkDisplayPlaneCapabilitiesKHR*)(&forMarshaling->capabilities));
 }
 
 void unmarshal_VkDisplayPlaneCapabilities2KHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDisplayPlaneCapabilities2KHR* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    unmarshal_VkDisplayPlaneCapabilitiesKHR(vkStream, (VkDisplayPlaneCapabilitiesKHR*)(&forUnmarshaling->capabilities));
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    unmarshal_VkDisplayPlaneCapabilitiesKHR(vkStream, rootType, (VkDisplayPlaneCapabilitiesKHR*)(&forUnmarshaling->capabilities));
 }
 
 #endif
@@ -9488,101 +11884,1290 @@
 #ifdef VK_KHR_get_memory_requirements2
 #endif
 #ifdef VK_KHR_image_format_list
-void marshal_VkImageFormatListCreateInfoKHR(
-    VulkanStreamGuest* vkStream,
-    const VkImageFormatListCreateInfoKHR* forMarshaling)
-{
-    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
-    {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
-    }
-    vkStream->write((uint32_t*)&forMarshaling->viewFormatCount, sizeof(uint32_t));
-    vkStream->write((const VkFormat*)forMarshaling->pViewFormats, forMarshaling->viewFormatCount * sizeof(const VkFormat));
-}
-
-void unmarshal_VkImageFormatListCreateInfoKHR(
-    VulkanStreamGuest* vkStream,
-    VkImageFormatListCreateInfoKHR* forUnmarshaling)
-{
-    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
-    {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
-    }
-    vkStream->read((uint32_t*)&forUnmarshaling->viewFormatCount, sizeof(uint32_t));
-    vkStream->read((VkFormat*)forUnmarshaling->pViewFormats, forUnmarshaling->viewFormatCount * sizeof(const VkFormat));
-}
-
 #endif
 #ifdef VK_KHR_sampler_ycbcr_conversion
 #endif
 #ifdef VK_KHR_bind_memory2
 #endif
+#ifdef VK_KHR_portability_subset
+void marshal_VkPhysicalDevicePortabilitySubsetFeaturesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDevicePortabilitySubsetFeaturesKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->constantAlphaColorBlendFactors, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->events, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->imageViewFormatReinterpretation, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->imageViewFormatSwizzle, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->imageView2DOn3DImage, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->multisampleArrayImage, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->mutableComparisonSamplers, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->pointPolygons, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->samplerMipLodBias, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->separateStencilMaskRef, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderSampleRateInterpolationFunctions, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->tessellationIsolines, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->tessellationPointMode, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->triangleFans, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->vertexAttributeAccessBeyondStride, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDevicePortabilitySubsetFeaturesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDevicePortabilitySubsetFeaturesKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->constantAlphaColorBlendFactors, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->events, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->imageViewFormatReinterpretation, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->imageViewFormatSwizzle, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->imageView2DOn3DImage, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->multisampleArrayImage, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->mutableComparisonSamplers, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->pointPolygons, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->samplerMipLodBias, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->separateStencilMaskRef, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderSampleRateInterpolationFunctions, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->tessellationIsolines, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->tessellationPointMode, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->triangleFans, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->vertexAttributeAccessBeyondStride, sizeof(VkBool32));
+}
+
+void marshal_VkPhysicalDevicePortabilitySubsetPropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDevicePortabilitySubsetPropertiesKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((uint32_t*)&forMarshaling->minVertexInputBindingStrideAlignment, sizeof(uint32_t));
+}
+
+void unmarshal_VkPhysicalDevicePortabilitySubsetPropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDevicePortabilitySubsetPropertiesKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((uint32_t*)&forUnmarshaling->minVertexInputBindingStrideAlignment, sizeof(uint32_t));
+}
+
+#endif
 #ifdef VK_KHR_maintenance3
 #endif
 #ifdef VK_KHR_draw_indirect_count
 #endif
+#ifdef VK_KHR_shader_subgroup_extended_types
+#endif
 #ifdef VK_KHR_8bit_storage
-void marshal_VkPhysicalDevice8BitStorageFeaturesKHR(
+#endif
+#ifdef VK_KHR_shader_atomic_int64
+#endif
+#ifdef VK_KHR_shader_clock
+void marshal_VkPhysicalDeviceShaderClockFeaturesKHR(
     VulkanStreamGuest* vkStream,
-    const VkPhysicalDevice8BitStorageFeaturesKHR* forMarshaling)
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderClockFeaturesKHR* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    vkStream->write((VkBool32*)&forMarshaling->storageBuffer8BitAccess, sizeof(VkBool32));
-    vkStream->write((VkBool32*)&forMarshaling->uniformAndStorageBuffer8BitAccess, sizeof(VkBool32));
-    vkStream->write((VkBool32*)&forMarshaling->storagePushConstant8, sizeof(VkBool32));
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->shaderSubgroupClock, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderDeviceClock, sizeof(VkBool32));
 }
 
-void unmarshal_VkPhysicalDevice8BitStorageFeaturesKHR(
+void unmarshal_VkPhysicalDeviceShaderClockFeaturesKHR(
     VulkanStreamGuest* vkStream,
-    VkPhysicalDevice8BitStorageFeaturesKHR* forUnmarshaling)
+    VkStructureType rootType,
+    VkPhysicalDeviceShaderClockFeaturesKHR* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    vkStream->read((VkBool32*)&forUnmarshaling->storageBuffer8BitAccess, sizeof(VkBool32));
-    vkStream->read((VkBool32*)&forUnmarshaling->uniformAndStorageBuffer8BitAccess, sizeof(VkBool32));
-    vkStream->read((VkBool32*)&forUnmarshaling->storagePushConstant8, sizeof(VkBool32));
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderSubgroupClock, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderDeviceClock, sizeof(VkBool32));
+}
+
+#endif
+#ifdef VK_KHR_driver_properties
+#endif
+#ifdef VK_KHR_shader_float_controls
+#endif
+#ifdef VK_KHR_depth_stencil_resolve
+#endif
+#ifdef VK_KHR_swapchain_mutable_format
+#endif
+#ifdef VK_KHR_timeline_semaphore
+#endif
+#ifdef VK_KHR_vulkan_memory_model
+#endif
+#ifdef VK_KHR_shader_terminate_invocation
+void marshal_VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->shaderTerminateInvocation, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderTerminateInvocation, sizeof(VkBool32));
+}
+
+#endif
+#ifdef VK_KHR_fragment_shading_rate
+void marshal_VkFragmentShadingRateAttachmentInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkFragmentShadingRateAttachmentInfoKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    marshal_VkAttachmentReference2(vkStream, rootType, (const VkAttachmentReference2*)(forMarshaling->pFragmentShadingRateAttachment));
+    marshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->shadingRateAttachmentTexelSize));
+}
+
+void unmarshal_VkFragmentShadingRateAttachmentInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkFragmentShadingRateAttachmentInfoKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    unmarshal_VkAttachmentReference2(vkStream, rootType, (VkAttachmentReference2*)(forUnmarshaling->pFragmentShadingRateAttachment));
+    unmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forUnmarshaling->shadingRateAttachmentTexelSize));
+}
+
+void marshal_VkPipelineFragmentShadingRateStateCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineFragmentShadingRateStateCreateInfoKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    marshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->fragmentSize));
+    vkStream->write((VkFragmentShadingRateCombinerOpKHR*)forMarshaling->combinerOps, 2 * sizeof(VkFragmentShadingRateCombinerOpKHR));
+}
+
+void unmarshal_VkPipelineFragmentShadingRateStateCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPipelineFragmentShadingRateStateCreateInfoKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    unmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forUnmarshaling->fragmentSize));
+    vkStream->read((VkFragmentShadingRateCombinerOpKHR*)forUnmarshaling->combinerOps, 2 * sizeof(VkFragmentShadingRateCombinerOpKHR));
+}
+
+void marshal_VkPhysicalDeviceFragmentShadingRateFeaturesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShadingRateFeaturesKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->pipelineFragmentShadingRate, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->primitiveFragmentShadingRate, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->attachmentFragmentShadingRate, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceFragmentShadingRateFeaturesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceFragmentShadingRateFeaturesKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->pipelineFragmentShadingRate, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->primitiveFragmentShadingRate, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->attachmentFragmentShadingRate, sizeof(VkBool32));
+}
+
+void marshal_VkPhysicalDeviceFragmentShadingRatePropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShadingRatePropertiesKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    marshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->minFragmentShadingRateAttachmentTexelSize));
+    marshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->maxFragmentShadingRateAttachmentTexelSize));
+    vkStream->write((uint32_t*)&forMarshaling->maxFragmentShadingRateAttachmentTexelSizeAspectRatio, sizeof(uint32_t));
+    vkStream->write((VkBool32*)&forMarshaling->primitiveFragmentShadingRateWithMultipleViewports, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->layeredShadingRateAttachments, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->fragmentShadingRateNonTrivialCombinerOps, sizeof(VkBool32));
+    marshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->maxFragmentSize));
+    vkStream->write((uint32_t*)&forMarshaling->maxFragmentSizeAspectRatio, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxFragmentShadingRateCoverageSamples, sizeof(uint32_t));
+    vkStream->write((VkSampleCountFlagBits*)&forMarshaling->maxFragmentShadingRateRasterizationSamples, sizeof(VkSampleCountFlagBits));
+    vkStream->write((VkBool32*)&forMarshaling->fragmentShadingRateWithShaderDepthStencilWrites, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->fragmentShadingRateWithSampleMask, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->fragmentShadingRateWithShaderSampleMask, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->fragmentShadingRateWithConservativeRasterization, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->fragmentShadingRateWithFragmentShaderInterlock, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->fragmentShadingRateWithCustomSampleLocations, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->fragmentShadingRateStrictMultiplyCombiner, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceFragmentShadingRatePropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceFragmentShadingRatePropertiesKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    unmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forUnmarshaling->minFragmentShadingRateAttachmentTexelSize));
+    unmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forUnmarshaling->maxFragmentShadingRateAttachmentTexelSize));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxFragmentShadingRateAttachmentTexelSizeAspectRatio, sizeof(uint32_t));
+    vkStream->read((VkBool32*)&forUnmarshaling->primitiveFragmentShadingRateWithMultipleViewports, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->layeredShadingRateAttachments, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->fragmentShadingRateNonTrivialCombinerOps, sizeof(VkBool32));
+    unmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forUnmarshaling->maxFragmentSize));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxFragmentSizeAspectRatio, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxFragmentShadingRateCoverageSamples, sizeof(uint32_t));
+    vkStream->read((VkSampleCountFlagBits*)&forUnmarshaling->maxFragmentShadingRateRasterizationSamples, sizeof(VkSampleCountFlagBits));
+    vkStream->read((VkBool32*)&forUnmarshaling->fragmentShadingRateWithShaderDepthStencilWrites, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->fragmentShadingRateWithSampleMask, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->fragmentShadingRateWithShaderSampleMask, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->fragmentShadingRateWithConservativeRasterization, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->fragmentShadingRateWithFragmentShaderInterlock, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->fragmentShadingRateWithCustomSampleLocations, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->fragmentShadingRateStrictMultiplyCombiner, sizeof(VkBool32));
+}
+
+void marshal_VkPhysicalDeviceFragmentShadingRateKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShadingRateKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkSampleCountFlags*)&forMarshaling->sampleCounts, sizeof(VkSampleCountFlags));
+    marshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->fragmentSize));
+}
+
+void unmarshal_VkPhysicalDeviceFragmentShadingRateKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceFragmentShadingRateKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkSampleCountFlags*)&forUnmarshaling->sampleCounts, sizeof(VkSampleCountFlags));
+    unmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forUnmarshaling->fragmentSize));
+}
+
+#endif
+#ifdef VK_KHR_spirv_1_4
+#endif
+#ifdef VK_KHR_surface_protected_capabilities
+void marshal_VkSurfaceProtectedCapabilitiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSurfaceProtectedCapabilitiesKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->supportsProtected, sizeof(VkBool32));
+}
+
+void unmarshal_VkSurfaceProtectedCapabilitiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkSurfaceProtectedCapabilitiesKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->supportsProtected, sizeof(VkBool32));
+}
+
+#endif
+#ifdef VK_KHR_separate_depth_stencil_layouts
+#endif
+#ifdef VK_KHR_uniform_buffer_standard_layout
+#endif
+#ifdef VK_KHR_buffer_device_address
+#endif
+#ifdef VK_KHR_deferred_host_operations
+#endif
+#ifdef VK_KHR_pipeline_executable_properties
+void marshal_VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->pipelineExecutableInfo, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->pipelineExecutableInfo, sizeof(VkBool32));
+}
+
+void marshal_VkPipelineInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineInfoKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkPipeline_u64(&forMarshaling->pipeline, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
+}
+
+void unmarshal_VkPipelineInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPipelineInfoKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkPipeline(&cgen_var_0, (VkPipeline*)&forUnmarshaling->pipeline, 1);
+}
+
+void marshal_VkPipelineExecutablePropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineExecutablePropertiesKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkShaderStageFlags*)&forMarshaling->stages, sizeof(VkShaderStageFlags));
+    vkStream->write((char*)forMarshaling->name, VK_MAX_DESCRIPTION_SIZE * sizeof(char));
+    vkStream->write((char*)forMarshaling->description, VK_MAX_DESCRIPTION_SIZE * sizeof(char));
+    vkStream->write((uint32_t*)&forMarshaling->subgroupSize, sizeof(uint32_t));
+}
+
+void unmarshal_VkPipelineExecutablePropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPipelineExecutablePropertiesKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkShaderStageFlags*)&forUnmarshaling->stages, sizeof(VkShaderStageFlags));
+    vkStream->read((char*)forUnmarshaling->name, VK_MAX_DESCRIPTION_SIZE * sizeof(char));
+    vkStream->read((char*)forUnmarshaling->description, VK_MAX_DESCRIPTION_SIZE * sizeof(char));
+    vkStream->read((uint32_t*)&forUnmarshaling->subgroupSize, sizeof(uint32_t));
+}
+
+void marshal_VkPipelineExecutableInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineExecutableInfoKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkPipeline_u64(&forMarshaling->pipeline, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->write((uint32_t*)&forMarshaling->executableIndex, sizeof(uint32_t));
+}
+
+void unmarshal_VkPipelineExecutableInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPipelineExecutableInfoKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkPipeline(&cgen_var_0, (VkPipeline*)&forUnmarshaling->pipeline, 1);
+    vkStream->read((uint32_t*)&forUnmarshaling->executableIndex, sizeof(uint32_t));
+}
+
+void marshal_VkPipelineExecutableStatisticValueKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineExecutableStatisticValueKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkBool32*)&forMarshaling->b32, sizeof(VkBool32));
+}
+
+void unmarshal_VkPipelineExecutableStatisticValueKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPipelineExecutableStatisticValueKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkBool32*)&forUnmarshaling->b32, sizeof(VkBool32));
+}
+
+void marshal_VkPipelineExecutableStatisticKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineExecutableStatisticKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((char*)forMarshaling->name, VK_MAX_DESCRIPTION_SIZE * sizeof(char));
+    vkStream->write((char*)forMarshaling->description, VK_MAX_DESCRIPTION_SIZE * sizeof(char));
+    vkStream->write((VkPipelineExecutableStatisticFormatKHR*)&forMarshaling->format, sizeof(VkPipelineExecutableStatisticFormatKHR));
+    marshal_VkPipelineExecutableStatisticValueKHR(vkStream, rootType, (VkPipelineExecutableStatisticValueKHR*)(&forMarshaling->value));
+}
+
+void unmarshal_VkPipelineExecutableStatisticKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPipelineExecutableStatisticKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((char*)forUnmarshaling->name, VK_MAX_DESCRIPTION_SIZE * sizeof(char));
+    vkStream->read((char*)forUnmarshaling->description, VK_MAX_DESCRIPTION_SIZE * sizeof(char));
+    vkStream->read((VkPipelineExecutableStatisticFormatKHR*)&forUnmarshaling->format, sizeof(VkPipelineExecutableStatisticFormatKHR));
+    unmarshal_VkPipelineExecutableStatisticValueKHR(vkStream, rootType, (VkPipelineExecutableStatisticValueKHR*)(&forUnmarshaling->value));
+}
+
+void marshal_VkPipelineExecutableInternalRepresentationKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineExecutableInternalRepresentationKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((char*)forMarshaling->name, VK_MAX_DESCRIPTION_SIZE * sizeof(char));
+    vkStream->write((char*)forMarshaling->description, VK_MAX_DESCRIPTION_SIZE * sizeof(char));
+    vkStream->write((VkBool32*)&forMarshaling->isText, sizeof(VkBool32));
+    uint64_t cgen_var_0 = (uint64_t)forMarshaling->dataSize;
+    vkStream->putBe64(cgen_var_0);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)forMarshaling->pData;
+    vkStream->putBe64(cgen_var_1);
+    if (forMarshaling->pData)
+    {
+        vkStream->write((void*)forMarshaling->pData, forMarshaling->dataSize * sizeof(uint8_t));
+    }
+}
+
+void unmarshal_VkPipelineExecutableInternalRepresentationKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPipelineExecutableInternalRepresentationKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((char*)forUnmarshaling->name, VK_MAX_DESCRIPTION_SIZE * sizeof(char));
+    vkStream->read((char*)forUnmarshaling->description, VK_MAX_DESCRIPTION_SIZE * sizeof(char));
+    vkStream->read((VkBool32*)&forUnmarshaling->isText, sizeof(VkBool32));
+    forUnmarshaling->dataSize = (size_t)vkStream->getBe64();
+    // WARNING PTR CHECK
+    void* check_pData;
+    check_pData = (void*)(uintptr_t)vkStream->getBe64();
+    if (forUnmarshaling->pData)
+    {
+        if (!(check_pData))
+        {
+            fprintf(stderr, "fatal: forUnmarshaling->pData inconsistent between guest and host\n");
+        }
+        vkStream->read((void*)forUnmarshaling->pData, forUnmarshaling->dataSize * sizeof(uint8_t));
+    }
+}
+
+#endif
+#ifdef VK_KHR_pipeline_library
+void marshal_VkPipelineLibraryCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineLibraryCreateInfoKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((uint32_t*)&forMarshaling->libraryCount, sizeof(uint32_t));
+    if (forMarshaling->libraryCount)
+    {
+        uint64_t* cgen_var_0;
+        vkStream->alloc((void**)&cgen_var_0, forMarshaling->libraryCount * 8);
+        vkStream->handleMapping()->mapHandles_VkPipeline_u64(forMarshaling->pLibraries, cgen_var_0, forMarshaling->libraryCount);
+        vkStream->write((uint64_t*)cgen_var_0, forMarshaling->libraryCount * 8);
+    }
+}
+
+void unmarshal_VkPipelineLibraryCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPipelineLibraryCreateInfoKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((uint32_t*)&forUnmarshaling->libraryCount, sizeof(uint32_t));
+    if (forUnmarshaling->libraryCount)
+    {
+        uint64_t* cgen_var_0;
+        vkStream->alloc((void**)&cgen_var_0, forUnmarshaling->libraryCount * 8);
+        vkStream->read((uint64_t*)cgen_var_0, forUnmarshaling->libraryCount * 8);
+        vkStream->handleMapping()->mapHandles_u64_VkPipeline(cgen_var_0, (VkPipeline*)forUnmarshaling->pLibraries, forUnmarshaling->libraryCount);
+    }
+}
+
+#endif
+#ifdef VK_KHR_shader_non_semantic_info
+#endif
+#ifdef VK_KHR_copy_commands2
+void marshal_VkBufferCopy2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBufferCopy2KHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkDeviceSize*)&forMarshaling->srcOffset, sizeof(VkDeviceSize));
+    vkStream->write((VkDeviceSize*)&forMarshaling->dstOffset, sizeof(VkDeviceSize));
+    vkStream->write((VkDeviceSize*)&forMarshaling->size, sizeof(VkDeviceSize));
+}
+
+void unmarshal_VkBufferCopy2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkBufferCopy2KHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkDeviceSize*)&forUnmarshaling->srcOffset, sizeof(VkDeviceSize));
+    vkStream->read((VkDeviceSize*)&forUnmarshaling->dstOffset, sizeof(VkDeviceSize));
+    vkStream->read((VkDeviceSize*)&forUnmarshaling->size, sizeof(VkDeviceSize));
+}
+
+void marshal_VkCopyBufferInfo2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCopyBufferInfo2KHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkBuffer_u64(&forMarshaling->srcBuffer, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
+    uint64_t cgen_var_1;
+    vkStream->handleMapping()->mapHandles_VkBuffer_u64(&forMarshaling->dstBuffer, &cgen_var_1, 1);
+    vkStream->write((uint64_t*)&cgen_var_1, 1 * 8);
+    vkStream->write((uint32_t*)&forMarshaling->regionCount, sizeof(uint32_t));
+    if (forMarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->regionCount; ++i)
+        {
+            marshal_VkBufferCopy2KHR(vkStream, rootType, (const VkBufferCopy2KHR*)(forMarshaling->pRegions + i));
+        }
+    }
+}
+
+void unmarshal_VkCopyBufferInfo2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkCopyBufferInfo2KHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkBuffer(&cgen_var_0, (VkBuffer*)&forUnmarshaling->srcBuffer, 1);
+    uint64_t cgen_var_1;
+    vkStream->read((uint64_t*)&cgen_var_1, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkBuffer(&cgen_var_1, (VkBuffer*)&forUnmarshaling->dstBuffer, 1);
+    vkStream->read((uint32_t*)&forUnmarshaling->regionCount, sizeof(uint32_t));
+    if (forUnmarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->regionCount; ++i)
+        {
+            unmarshal_VkBufferCopy2KHR(vkStream, rootType, (VkBufferCopy2KHR*)(forUnmarshaling->pRegions + i));
+        }
+    }
+}
+
+void marshal_VkImageCopy2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageCopy2KHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    marshal_VkImageSubresourceLayers(vkStream, rootType, (VkImageSubresourceLayers*)(&forMarshaling->srcSubresource));
+    marshal_VkOffset3D(vkStream, rootType, (VkOffset3D*)(&forMarshaling->srcOffset));
+    marshal_VkImageSubresourceLayers(vkStream, rootType, (VkImageSubresourceLayers*)(&forMarshaling->dstSubresource));
+    marshal_VkOffset3D(vkStream, rootType, (VkOffset3D*)(&forMarshaling->dstOffset));
+    marshal_VkExtent3D(vkStream, rootType, (VkExtent3D*)(&forMarshaling->extent));
+}
+
+void unmarshal_VkImageCopy2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkImageCopy2KHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    unmarshal_VkImageSubresourceLayers(vkStream, rootType, (VkImageSubresourceLayers*)(&forUnmarshaling->srcSubresource));
+    unmarshal_VkOffset3D(vkStream, rootType, (VkOffset3D*)(&forUnmarshaling->srcOffset));
+    unmarshal_VkImageSubresourceLayers(vkStream, rootType, (VkImageSubresourceLayers*)(&forUnmarshaling->dstSubresource));
+    unmarshal_VkOffset3D(vkStream, rootType, (VkOffset3D*)(&forUnmarshaling->dstOffset));
+    unmarshal_VkExtent3D(vkStream, rootType, (VkExtent3D*)(&forUnmarshaling->extent));
+}
+
+void marshal_VkCopyImageInfo2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCopyImageInfo2KHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkImage_u64(&forMarshaling->srcImage, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->write((VkImageLayout*)&forMarshaling->srcImageLayout, sizeof(VkImageLayout));
+    uint64_t cgen_var_1;
+    vkStream->handleMapping()->mapHandles_VkImage_u64(&forMarshaling->dstImage, &cgen_var_1, 1);
+    vkStream->write((uint64_t*)&cgen_var_1, 1 * 8);
+    vkStream->write((VkImageLayout*)&forMarshaling->dstImageLayout, sizeof(VkImageLayout));
+    vkStream->write((uint32_t*)&forMarshaling->regionCount, sizeof(uint32_t));
+    if (forMarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->regionCount; ++i)
+        {
+            marshal_VkImageCopy2KHR(vkStream, rootType, (const VkImageCopy2KHR*)(forMarshaling->pRegions + i));
+        }
+    }
+}
+
+void unmarshal_VkCopyImageInfo2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkCopyImageInfo2KHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkImage(&cgen_var_0, (VkImage*)&forUnmarshaling->srcImage, 1);
+    vkStream->read((VkImageLayout*)&forUnmarshaling->srcImageLayout, sizeof(VkImageLayout));
+    uint64_t cgen_var_1;
+    vkStream->read((uint64_t*)&cgen_var_1, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkImage(&cgen_var_1, (VkImage*)&forUnmarshaling->dstImage, 1);
+    vkStream->read((VkImageLayout*)&forUnmarshaling->dstImageLayout, sizeof(VkImageLayout));
+    vkStream->read((uint32_t*)&forUnmarshaling->regionCount, sizeof(uint32_t));
+    if (forUnmarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->regionCount; ++i)
+        {
+            unmarshal_VkImageCopy2KHR(vkStream, rootType, (VkImageCopy2KHR*)(forUnmarshaling->pRegions + i));
+        }
+    }
+}
+
+void marshal_VkBufferImageCopy2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBufferImageCopy2KHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkDeviceSize*)&forMarshaling->bufferOffset, sizeof(VkDeviceSize));
+    vkStream->write((uint32_t*)&forMarshaling->bufferRowLength, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->bufferImageHeight, sizeof(uint32_t));
+    marshal_VkImageSubresourceLayers(vkStream, rootType, (VkImageSubresourceLayers*)(&forMarshaling->imageSubresource));
+    marshal_VkOffset3D(vkStream, rootType, (VkOffset3D*)(&forMarshaling->imageOffset));
+    marshal_VkExtent3D(vkStream, rootType, (VkExtent3D*)(&forMarshaling->imageExtent));
+}
+
+void unmarshal_VkBufferImageCopy2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkBufferImageCopy2KHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkDeviceSize*)&forUnmarshaling->bufferOffset, sizeof(VkDeviceSize));
+    vkStream->read((uint32_t*)&forUnmarshaling->bufferRowLength, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->bufferImageHeight, sizeof(uint32_t));
+    unmarshal_VkImageSubresourceLayers(vkStream, rootType, (VkImageSubresourceLayers*)(&forUnmarshaling->imageSubresource));
+    unmarshal_VkOffset3D(vkStream, rootType, (VkOffset3D*)(&forUnmarshaling->imageOffset));
+    unmarshal_VkExtent3D(vkStream, rootType, (VkExtent3D*)(&forUnmarshaling->imageExtent));
+}
+
+void marshal_VkCopyBufferToImageInfo2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCopyBufferToImageInfo2KHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkBuffer_u64(&forMarshaling->srcBuffer, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
+    uint64_t cgen_var_1;
+    vkStream->handleMapping()->mapHandles_VkImage_u64(&forMarshaling->dstImage, &cgen_var_1, 1);
+    vkStream->write((uint64_t*)&cgen_var_1, 1 * 8);
+    vkStream->write((VkImageLayout*)&forMarshaling->dstImageLayout, sizeof(VkImageLayout));
+    vkStream->write((uint32_t*)&forMarshaling->regionCount, sizeof(uint32_t));
+    if (forMarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->regionCount; ++i)
+        {
+            marshal_VkBufferImageCopy2KHR(vkStream, rootType, (const VkBufferImageCopy2KHR*)(forMarshaling->pRegions + i));
+        }
+    }
+}
+
+void unmarshal_VkCopyBufferToImageInfo2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkCopyBufferToImageInfo2KHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkBuffer(&cgen_var_0, (VkBuffer*)&forUnmarshaling->srcBuffer, 1);
+    uint64_t cgen_var_1;
+    vkStream->read((uint64_t*)&cgen_var_1, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkImage(&cgen_var_1, (VkImage*)&forUnmarshaling->dstImage, 1);
+    vkStream->read((VkImageLayout*)&forUnmarshaling->dstImageLayout, sizeof(VkImageLayout));
+    vkStream->read((uint32_t*)&forUnmarshaling->regionCount, sizeof(uint32_t));
+    if (forUnmarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->regionCount; ++i)
+        {
+            unmarshal_VkBufferImageCopy2KHR(vkStream, rootType, (VkBufferImageCopy2KHR*)(forUnmarshaling->pRegions + i));
+        }
+    }
+}
+
+void marshal_VkCopyImageToBufferInfo2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCopyImageToBufferInfo2KHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkImage_u64(&forMarshaling->srcImage, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->write((VkImageLayout*)&forMarshaling->srcImageLayout, sizeof(VkImageLayout));
+    uint64_t cgen_var_1;
+    vkStream->handleMapping()->mapHandles_VkBuffer_u64(&forMarshaling->dstBuffer, &cgen_var_1, 1);
+    vkStream->write((uint64_t*)&cgen_var_1, 1 * 8);
+    vkStream->write((uint32_t*)&forMarshaling->regionCount, sizeof(uint32_t));
+    if (forMarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->regionCount; ++i)
+        {
+            marshal_VkBufferImageCopy2KHR(vkStream, rootType, (const VkBufferImageCopy2KHR*)(forMarshaling->pRegions + i));
+        }
+    }
+}
+
+void unmarshal_VkCopyImageToBufferInfo2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkCopyImageToBufferInfo2KHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkImage(&cgen_var_0, (VkImage*)&forUnmarshaling->srcImage, 1);
+    vkStream->read((VkImageLayout*)&forUnmarshaling->srcImageLayout, sizeof(VkImageLayout));
+    uint64_t cgen_var_1;
+    vkStream->read((uint64_t*)&cgen_var_1, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkBuffer(&cgen_var_1, (VkBuffer*)&forUnmarshaling->dstBuffer, 1);
+    vkStream->read((uint32_t*)&forUnmarshaling->regionCount, sizeof(uint32_t));
+    if (forUnmarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->regionCount; ++i)
+        {
+            unmarshal_VkBufferImageCopy2KHR(vkStream, rootType, (VkBufferImageCopy2KHR*)(forUnmarshaling->pRegions + i));
+        }
+    }
+}
+
+void marshal_VkImageBlit2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageBlit2KHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    marshal_VkImageSubresourceLayers(vkStream, rootType, (VkImageSubresourceLayers*)(&forMarshaling->srcSubresource));
+    for (uint32_t i = 0; i < (uint32_t)2; ++i)
+    {
+        marshal_VkOffset3D(vkStream, rootType, (VkOffset3D*)(forMarshaling->srcOffsets + i));
+    }
+    marshal_VkImageSubresourceLayers(vkStream, rootType, (VkImageSubresourceLayers*)(&forMarshaling->dstSubresource));
+    for (uint32_t i = 0; i < (uint32_t)2; ++i)
+    {
+        marshal_VkOffset3D(vkStream, rootType, (VkOffset3D*)(forMarshaling->dstOffsets + i));
+    }
+}
+
+void unmarshal_VkImageBlit2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkImageBlit2KHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    unmarshal_VkImageSubresourceLayers(vkStream, rootType, (VkImageSubresourceLayers*)(&forUnmarshaling->srcSubresource));
+    for (uint32_t i = 0; i < (uint32_t)2; ++i)
+    {
+        unmarshal_VkOffset3D(vkStream, rootType, (VkOffset3D*)(forUnmarshaling->srcOffsets + i));
+    }
+    unmarshal_VkImageSubresourceLayers(vkStream, rootType, (VkImageSubresourceLayers*)(&forUnmarshaling->dstSubresource));
+    for (uint32_t i = 0; i < (uint32_t)2; ++i)
+    {
+        unmarshal_VkOffset3D(vkStream, rootType, (VkOffset3D*)(forUnmarshaling->dstOffsets + i));
+    }
+}
+
+void marshal_VkBlitImageInfo2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBlitImageInfo2KHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkImage_u64(&forMarshaling->srcImage, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->write((VkImageLayout*)&forMarshaling->srcImageLayout, sizeof(VkImageLayout));
+    uint64_t cgen_var_1;
+    vkStream->handleMapping()->mapHandles_VkImage_u64(&forMarshaling->dstImage, &cgen_var_1, 1);
+    vkStream->write((uint64_t*)&cgen_var_1, 1 * 8);
+    vkStream->write((VkImageLayout*)&forMarshaling->dstImageLayout, sizeof(VkImageLayout));
+    vkStream->write((uint32_t*)&forMarshaling->regionCount, sizeof(uint32_t));
+    if (forMarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->regionCount; ++i)
+        {
+            marshal_VkImageBlit2KHR(vkStream, rootType, (const VkImageBlit2KHR*)(forMarshaling->pRegions + i));
+        }
+    }
+    vkStream->write((VkFilter*)&forMarshaling->filter, sizeof(VkFilter));
+}
+
+void unmarshal_VkBlitImageInfo2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkBlitImageInfo2KHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkImage(&cgen_var_0, (VkImage*)&forUnmarshaling->srcImage, 1);
+    vkStream->read((VkImageLayout*)&forUnmarshaling->srcImageLayout, sizeof(VkImageLayout));
+    uint64_t cgen_var_1;
+    vkStream->read((uint64_t*)&cgen_var_1, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkImage(&cgen_var_1, (VkImage*)&forUnmarshaling->dstImage, 1);
+    vkStream->read((VkImageLayout*)&forUnmarshaling->dstImageLayout, sizeof(VkImageLayout));
+    vkStream->read((uint32_t*)&forUnmarshaling->regionCount, sizeof(uint32_t));
+    if (forUnmarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->regionCount; ++i)
+        {
+            unmarshal_VkImageBlit2KHR(vkStream, rootType, (VkImageBlit2KHR*)(forUnmarshaling->pRegions + i));
+        }
+    }
+    vkStream->read((VkFilter*)&forUnmarshaling->filter, sizeof(VkFilter));
+}
+
+void marshal_VkImageResolve2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageResolve2KHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    marshal_VkImageSubresourceLayers(vkStream, rootType, (VkImageSubresourceLayers*)(&forMarshaling->srcSubresource));
+    marshal_VkOffset3D(vkStream, rootType, (VkOffset3D*)(&forMarshaling->srcOffset));
+    marshal_VkImageSubresourceLayers(vkStream, rootType, (VkImageSubresourceLayers*)(&forMarshaling->dstSubresource));
+    marshal_VkOffset3D(vkStream, rootType, (VkOffset3D*)(&forMarshaling->dstOffset));
+    marshal_VkExtent3D(vkStream, rootType, (VkExtent3D*)(&forMarshaling->extent));
+}
+
+void unmarshal_VkImageResolve2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkImageResolve2KHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    unmarshal_VkImageSubresourceLayers(vkStream, rootType, (VkImageSubresourceLayers*)(&forUnmarshaling->srcSubresource));
+    unmarshal_VkOffset3D(vkStream, rootType, (VkOffset3D*)(&forUnmarshaling->srcOffset));
+    unmarshal_VkImageSubresourceLayers(vkStream, rootType, (VkImageSubresourceLayers*)(&forUnmarshaling->dstSubresource));
+    unmarshal_VkOffset3D(vkStream, rootType, (VkOffset3D*)(&forUnmarshaling->dstOffset));
+    unmarshal_VkExtent3D(vkStream, rootType, (VkExtent3D*)(&forUnmarshaling->extent));
+}
+
+void marshal_VkResolveImageInfo2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkResolveImageInfo2KHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkImage_u64(&forMarshaling->srcImage, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->write((VkImageLayout*)&forMarshaling->srcImageLayout, sizeof(VkImageLayout));
+    uint64_t cgen_var_1;
+    vkStream->handleMapping()->mapHandles_VkImage_u64(&forMarshaling->dstImage, &cgen_var_1, 1);
+    vkStream->write((uint64_t*)&cgen_var_1, 1 * 8);
+    vkStream->write((VkImageLayout*)&forMarshaling->dstImageLayout, sizeof(VkImageLayout));
+    vkStream->write((uint32_t*)&forMarshaling->regionCount, sizeof(uint32_t));
+    if (forMarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->regionCount; ++i)
+        {
+            marshal_VkImageResolve2KHR(vkStream, rootType, (const VkImageResolve2KHR*)(forMarshaling->pRegions + i));
+        }
+    }
+}
+
+void unmarshal_VkResolveImageInfo2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkResolveImageInfo2KHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkImage(&cgen_var_0, (VkImage*)&forUnmarshaling->srcImage, 1);
+    vkStream->read((VkImageLayout*)&forUnmarshaling->srcImageLayout, sizeof(VkImageLayout));
+    uint64_t cgen_var_1;
+    vkStream->read((uint64_t*)&cgen_var_1, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkImage(&cgen_var_1, (VkImage*)&forUnmarshaling->dstImage, 1);
+    vkStream->read((VkImageLayout*)&forUnmarshaling->dstImageLayout, sizeof(VkImageLayout));
+    vkStream->read((uint32_t*)&forUnmarshaling->regionCount, sizeof(uint32_t));
+    if (forUnmarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->regionCount; ++i)
+        {
+            unmarshal_VkImageResolve2KHR(vkStream, rootType, (VkImageResolve2KHR*)(forUnmarshaling->pRegions + i));
+        }
+    }
 }
 
 #endif
 #ifdef VK_ANDROID_native_buffer
 void marshal_VkNativeBufferANDROID(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkNativeBufferANDROID* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     // WARNING PTR CHECK
-    uint64_t cgen_var_278 = (uint64_t)(uintptr_t)forMarshaling->handle;
-    vkStream->putBe64(cgen_var_278);
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->handle;
+    vkStream->putBe64(cgen_var_0);
     if (forMarshaling->handle)
     {
         vkStream->write((const uint32_t*)forMarshaling->handle, sizeof(const uint32_t));
@@ -9596,17 +13181,16 @@
 
 void unmarshal_VkNativeBufferANDROID(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkNativeBufferANDROID* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     // WARNING PTR CHECK
     const uint32_t* check_handle;
     check_handle = (const uint32_t*)(uintptr_t)vkStream->getBe64();
@@ -9629,22 +13213,22 @@
 #ifdef VK_EXT_debug_report
 void marshal_VkDebugReportCallbackCreateInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDebugReportCallbackCreateInfoEXT* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkDebugReportFlagsEXT*)&forMarshaling->flags, sizeof(VkDebugReportFlagsEXT));
-    uint64_t cgen_var_280 = (uint64_t)forMarshaling->pfnCallback;
-    vkStream->putBe64(cgen_var_280);
+    uint64_t cgen_var_0 = (uint64_t)forMarshaling->pfnCallback;
+    vkStream->putBe64(cgen_var_0);
     // WARNING PTR CHECK
-    uint64_t cgen_var_281 = (uint64_t)(uintptr_t)forMarshaling->pUserData;
-    vkStream->putBe64(cgen_var_281);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)forMarshaling->pUserData;
+    vkStream->putBe64(cgen_var_1);
     if (forMarshaling->pUserData)
     {
         vkStream->write((void*)forMarshaling->pUserData, sizeof(uint8_t));
@@ -9653,17 +13237,16 @@
 
 void unmarshal_VkDebugReportCallbackCreateInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDebugReportCallbackCreateInfoEXT* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkDebugReportFlagsEXT*)&forUnmarshaling->flags, sizeof(VkDebugReportFlagsEXT));
     forUnmarshaling->pfnCallback = (PFN_vkDebugReportCallbackEXT)vkStream->getBe64();
     // WARNING PTR CHECK
@@ -9689,32 +13272,31 @@
 #ifdef VK_AMD_rasterization_order
 void marshal_VkPipelineRasterizationStateRasterizationOrderAMD(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPipelineRasterizationStateRasterizationOrderAMD* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkRasterizationOrderAMD*)&forMarshaling->rasterizationOrder, sizeof(VkRasterizationOrderAMD));
 }
 
 void unmarshal_VkPipelineRasterizationStateRasterizationOrderAMD(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPipelineRasterizationStateRasterizationOrderAMD* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkRasterizationOrderAMD*)&forUnmarshaling->rasterizationOrder, sizeof(VkRasterizationOrderAMD));
 }
 
@@ -9726,16 +13308,16 @@
 #ifdef VK_EXT_debug_marker
 void marshal_VkDebugMarkerObjectNameInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDebugMarkerObjectNameInfoEXT* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkDebugReportObjectTypeEXT*)&forMarshaling->objectType, sizeof(VkDebugReportObjectTypeEXT));
     vkStream->write((uint64_t*)&forMarshaling->object, sizeof(uint64_t));
     vkStream->putString(forMarshaling->pObjectName);
@@ -9743,17 +13325,16 @@
 
 void unmarshal_VkDebugMarkerObjectNameInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDebugMarkerObjectNameInfoEXT* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkDebugReportObjectTypeEXT*)&forUnmarshaling->objectType, sizeof(VkDebugReportObjectTypeEXT));
     vkStream->read((uint64_t*)&forUnmarshaling->object, sizeof(uint64_t));
     vkStream->loadStringInPlace((char**)&forUnmarshaling->pObjectName);
@@ -9761,37 +13342,36 @@
 
 void marshal_VkDebugMarkerObjectTagInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDebugMarkerObjectTagInfoEXT* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkDebugReportObjectTypeEXT*)&forMarshaling->objectType, sizeof(VkDebugReportObjectTypeEXT));
     vkStream->write((uint64_t*)&forMarshaling->object, sizeof(uint64_t));
     vkStream->write((uint64_t*)&forMarshaling->tagName, sizeof(uint64_t));
-    uint64_t cgen_var_284 = (uint64_t)forMarshaling->tagSize;
-    vkStream->putBe64(cgen_var_284);
+    uint64_t cgen_var_0 = (uint64_t)forMarshaling->tagSize;
+    vkStream->putBe64(cgen_var_0);
     vkStream->write((const void*)forMarshaling->pTag, forMarshaling->tagSize * sizeof(const uint8_t));
 }
 
 void unmarshal_VkDebugMarkerObjectTagInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDebugMarkerObjectTagInfoEXT* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkDebugReportObjectTypeEXT*)&forUnmarshaling->objectType, sizeof(VkDebugReportObjectTypeEXT));
     vkStream->read((uint64_t*)&forUnmarshaling->object, sizeof(uint64_t));
     vkStream->read((uint64_t*)&forUnmarshaling->tagName, sizeof(uint64_t));
@@ -9801,33 +13381,32 @@
 
 void marshal_VkDebugMarkerMarkerInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDebugMarkerMarkerInfoEXT* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->putString(forMarshaling->pMarkerName);
     vkStream->write((float*)forMarshaling->color, 4 * sizeof(float));
 }
 
 void unmarshal_VkDebugMarkerMarkerInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDebugMarkerMarkerInfoEXT* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->loadStringInPlace((char**)&forUnmarshaling->pMarkerName);
     vkStream->read((float*)forUnmarshaling->color, 4 * sizeof(float));
 }
@@ -9838,105 +13417,292 @@
 #ifdef VK_NV_dedicated_allocation
 void marshal_VkDedicatedAllocationImageCreateInfoNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDedicatedAllocationImageCreateInfoNV* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkBool32*)&forMarshaling->dedicatedAllocation, sizeof(VkBool32));
 }
 
 void unmarshal_VkDedicatedAllocationImageCreateInfoNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDedicatedAllocationImageCreateInfoNV* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkBool32*)&forUnmarshaling->dedicatedAllocation, sizeof(VkBool32));
 }
 
 void marshal_VkDedicatedAllocationBufferCreateInfoNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDedicatedAllocationBufferCreateInfoNV* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkBool32*)&forMarshaling->dedicatedAllocation, sizeof(VkBool32));
 }
 
 void unmarshal_VkDedicatedAllocationBufferCreateInfoNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDedicatedAllocationBufferCreateInfoNV* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkBool32*)&forUnmarshaling->dedicatedAllocation, sizeof(VkBool32));
 }
 
 void marshal_VkDedicatedAllocationMemoryAllocateInfoNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDedicatedAllocationMemoryAllocateInfoNV* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    uint64_t cgen_var_286;
-    vkStream->handleMapping()->mapHandles_VkImage_u64(&forMarshaling->image, &cgen_var_286, 1);
-    vkStream->write((uint64_t*)&cgen_var_286, 1 * 8);
-    uint64_t cgen_var_287;
-    vkStream->handleMapping()->mapHandles_VkBuffer_u64(&forMarshaling->buffer, &cgen_var_287, 1);
-    vkStream->write((uint64_t*)&cgen_var_287, 1 * 8);
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkImage_u64(&forMarshaling->image, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
+    uint64_t cgen_var_1;
+    vkStream->handleMapping()->mapHandles_VkBuffer_u64(&forMarshaling->buffer, &cgen_var_1, 1);
+    vkStream->write((uint64_t*)&cgen_var_1, 1 * 8);
 }
 
 void unmarshal_VkDedicatedAllocationMemoryAllocateInfoNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDedicatedAllocationMemoryAllocateInfoNV* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    uint64_t cgen_var_288;
-    vkStream->read((uint64_t*)&cgen_var_288, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkImage(&cgen_var_288, (VkImage*)&forUnmarshaling->image, 1);
-    uint64_t cgen_var_289;
-    vkStream->read((uint64_t*)&cgen_var_289, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkBuffer(&cgen_var_289, (VkBuffer*)&forUnmarshaling->buffer, 1);
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkImage(&cgen_var_0, (VkImage*)&forUnmarshaling->image, 1);
+    uint64_t cgen_var_1;
+    vkStream->read((uint64_t*)&cgen_var_1, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkBuffer(&cgen_var_1, (VkBuffer*)&forUnmarshaling->buffer, 1);
+}
+
+#endif
+#ifdef VK_EXT_transform_feedback
+void marshal_VkPhysicalDeviceTransformFeedbackFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTransformFeedbackFeaturesEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->transformFeedback, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->geometryStreams, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceTransformFeedbackFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceTransformFeedbackFeaturesEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->transformFeedback, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->geometryStreams, sizeof(VkBool32));
+}
+
+void marshal_VkPhysicalDeviceTransformFeedbackPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTransformFeedbackPropertiesEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((uint32_t*)&forMarshaling->maxTransformFeedbackStreams, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxTransformFeedbackBuffers, sizeof(uint32_t));
+    vkStream->write((VkDeviceSize*)&forMarshaling->maxTransformFeedbackBufferSize, sizeof(VkDeviceSize));
+    vkStream->write((uint32_t*)&forMarshaling->maxTransformFeedbackStreamDataSize, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxTransformFeedbackBufferDataSize, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxTransformFeedbackBufferDataStride, sizeof(uint32_t));
+    vkStream->write((VkBool32*)&forMarshaling->transformFeedbackQueries, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->transformFeedbackStreamsLinesTriangles, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->transformFeedbackRasterizationStreamSelect, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->transformFeedbackDraw, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceTransformFeedbackPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceTransformFeedbackPropertiesEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxTransformFeedbackStreams, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxTransformFeedbackBuffers, sizeof(uint32_t));
+    vkStream->read((VkDeviceSize*)&forUnmarshaling->maxTransformFeedbackBufferSize, sizeof(VkDeviceSize));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxTransformFeedbackStreamDataSize, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxTransformFeedbackBufferDataSize, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxTransformFeedbackBufferDataStride, sizeof(uint32_t));
+    vkStream->read((VkBool32*)&forUnmarshaling->transformFeedbackQueries, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->transformFeedbackStreamsLinesTriangles, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->transformFeedbackRasterizationStreamSelect, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->transformFeedbackDraw, sizeof(VkBool32));
+}
+
+void marshal_VkPipelineRasterizationStateStreamCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineRasterizationStateStreamCreateInfoEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkPipelineRasterizationStateStreamCreateFlagsEXT*)&forMarshaling->flags, sizeof(VkPipelineRasterizationStateStreamCreateFlagsEXT));
+    vkStream->write((uint32_t*)&forMarshaling->rasterizationStream, sizeof(uint32_t));
+}
+
+void unmarshal_VkPipelineRasterizationStateStreamCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPipelineRasterizationStateStreamCreateInfoEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkPipelineRasterizationStateStreamCreateFlagsEXT*)&forUnmarshaling->flags, sizeof(VkPipelineRasterizationStateStreamCreateFlagsEXT));
+    vkStream->read((uint32_t*)&forUnmarshaling->rasterizationStream, sizeof(uint32_t));
+}
+
+#endif
+#ifdef VK_NVX_image_view_handle
+void marshal_VkImageViewHandleInfoNVX(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageViewHandleInfoNVX* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkImageView_u64(&forMarshaling->imageView, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->write((VkDescriptorType*)&forMarshaling->descriptorType, sizeof(VkDescriptorType));
+    uint64_t cgen_var_1;
+    vkStream->handleMapping()->mapHandles_VkSampler_u64(&forMarshaling->sampler, &cgen_var_1, 1);
+    vkStream->write((uint64_t*)&cgen_var_1, 1 * 8);
+}
+
+void unmarshal_VkImageViewHandleInfoNVX(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkImageViewHandleInfoNVX* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkImageView(&cgen_var_0, (VkImageView*)&forUnmarshaling->imageView, 1);
+    vkStream->read((VkDescriptorType*)&forUnmarshaling->descriptorType, sizeof(VkDescriptorType));
+    uint64_t cgen_var_1;
+    vkStream->read((uint64_t*)&cgen_var_1, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkSampler(&cgen_var_1, (VkSampler*)&forUnmarshaling->sampler, 1);
+}
+
+void marshal_VkImageViewAddressPropertiesNVX(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageViewAddressPropertiesNVX* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkDeviceAddress*)&forMarshaling->deviceAddress, sizeof(VkDeviceAddress));
+    vkStream->write((VkDeviceSize*)&forMarshaling->size, sizeof(VkDeviceSize));
+}
+
+void unmarshal_VkImageViewAddressPropertiesNVX(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkImageViewAddressPropertiesNVX* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkDeviceAddress*)&forUnmarshaling->deviceAddress, sizeof(VkDeviceAddress));
+    vkStream->read((VkDeviceSize*)&forUnmarshaling->size, sizeof(VkDeviceSize));
 }
 
 #endif
@@ -9951,32 +13717,31 @@
 #ifdef VK_AMD_texture_gather_bias_lod
 void marshal_VkTextureLODGatherFormatPropertiesAMD(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkTextureLODGatherFormatPropertiesAMD* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkBool32*)&forMarshaling->supportsTextureGatherLODBiasAMD, sizeof(VkBool32));
 }
 
 void unmarshal_VkTextureLODGatherFormatPropertiesAMD(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkTextureLODGatherFormatPropertiesAMD* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkBool32*)&forUnmarshaling->supportsTextureGatherLODBiasAMD, sizeof(VkBool32));
 }
 
@@ -9984,21 +13749,25 @@
 #ifdef VK_AMD_shader_info
 void marshal_VkShaderResourceUsageAMD(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkShaderResourceUsageAMD* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((uint32_t*)&forMarshaling->numUsedVgprs, sizeof(uint32_t));
     vkStream->write((uint32_t*)&forMarshaling->numUsedSgprs, sizeof(uint32_t));
     vkStream->write((uint32_t*)&forMarshaling->ldsSizePerLocalWorkGroup, sizeof(uint32_t));
-    uint64_t cgen_var_290 = (uint64_t)forMarshaling->ldsUsageSizeInBytes;
-    vkStream->putBe64(cgen_var_290);
-    uint64_t cgen_var_291 = (uint64_t)forMarshaling->scratchMemUsageInBytes;
-    vkStream->putBe64(cgen_var_291);
+    uint64_t cgen_var_0 = (uint64_t)forMarshaling->ldsUsageSizeInBytes;
+    vkStream->putBe64(cgen_var_0);
+    uint64_t cgen_var_1 = (uint64_t)forMarshaling->scratchMemUsageInBytes;
+    vkStream->putBe64(cgen_var_1);
 }
 
 void unmarshal_VkShaderResourceUsageAMD(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkShaderResourceUsageAMD* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((uint32_t*)&forUnmarshaling->numUsedVgprs, sizeof(uint32_t));
     vkStream->read((uint32_t*)&forUnmarshaling->numUsedSgprs, sizeof(uint32_t));
     vkStream->read((uint32_t*)&forUnmarshaling->ldsSizePerLocalWorkGroup, sizeof(uint32_t));
@@ -10008,10 +13777,12 @@
 
 void marshal_VkShaderStatisticsInfoAMD(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkShaderStatisticsInfoAMD* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkShaderStageFlags*)&forMarshaling->shaderStageMask, sizeof(VkShaderStageFlags));
-    marshal_VkShaderResourceUsageAMD(vkStream, (VkShaderResourceUsageAMD*)(&forMarshaling->resourceUsage));
+    marshal_VkShaderResourceUsageAMD(vkStream, rootType, (VkShaderResourceUsageAMD*)(&forMarshaling->resourceUsage));
     vkStream->write((uint32_t*)&forMarshaling->numPhysicalVgprs, sizeof(uint32_t));
     vkStream->write((uint32_t*)&forMarshaling->numPhysicalSgprs, sizeof(uint32_t));
     vkStream->write((uint32_t*)&forMarshaling->numAvailableVgprs, sizeof(uint32_t));
@@ -10021,10 +13792,12 @@
 
 void unmarshal_VkShaderStatisticsInfoAMD(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkShaderStatisticsInfoAMD* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkShaderStageFlags*)&forUnmarshaling->shaderStageMask, sizeof(VkShaderStageFlags));
-    unmarshal_VkShaderResourceUsageAMD(vkStream, (VkShaderResourceUsageAMD*)(&forUnmarshaling->resourceUsage));
+    unmarshal_VkShaderResourceUsageAMD(vkStream, rootType, (VkShaderResourceUsageAMD*)(&forUnmarshaling->resourceUsage));
     vkStream->read((uint32_t*)&forUnmarshaling->numPhysicalVgprs, sizeof(uint32_t));
     vkStream->read((uint32_t*)&forUnmarshaling->numPhysicalSgprs, sizeof(uint32_t));
     vkStream->read((uint32_t*)&forUnmarshaling->numAvailableVgprs, sizeof(uint32_t));
@@ -10035,14 +13808,82 @@
 #endif
 #ifdef VK_AMD_shader_image_load_store_lod
 #endif
+#ifdef VK_GGP_stream_descriptor_surface
+void marshal_VkStreamDescriptorSurfaceCreateInfoGGP(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkStreamDescriptorSurfaceCreateInfoGGP* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkStreamDescriptorSurfaceCreateFlagsGGP*)&forMarshaling->flags, sizeof(VkStreamDescriptorSurfaceCreateFlagsGGP));
+    vkStream->write((GgpStreamDescriptor*)&forMarshaling->streamDescriptor, sizeof(GgpStreamDescriptor));
+}
+
+void unmarshal_VkStreamDescriptorSurfaceCreateInfoGGP(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkStreamDescriptorSurfaceCreateInfoGGP* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkStreamDescriptorSurfaceCreateFlagsGGP*)&forUnmarshaling->flags, sizeof(VkStreamDescriptorSurfaceCreateFlagsGGP));
+    vkStream->read((GgpStreamDescriptor*)&forUnmarshaling->streamDescriptor, sizeof(GgpStreamDescriptor));
+}
+
+#endif
+#ifdef VK_NV_corner_sampled_image
+void marshal_VkPhysicalDeviceCornerSampledImageFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCornerSampledImageFeaturesNV* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->cornerSampledImage, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceCornerSampledImageFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceCornerSampledImageFeaturesNV* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->cornerSampledImage, sizeof(VkBool32));
+}
+
+#endif
 #ifdef VK_IMG_format_pvrtc
 #endif
 #ifdef VK_NV_external_memory_capabilities
 void marshal_VkExternalImageFormatPropertiesNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkExternalImageFormatPropertiesNV* forMarshaling)
 {
-    marshal_VkImageFormatProperties(vkStream, (VkImageFormatProperties*)(&forMarshaling->imageFormatProperties));
+    (void)rootType;
+    marshal_VkImageFormatProperties(vkStream, rootType, (VkImageFormatProperties*)(&forMarshaling->imageFormatProperties));
     vkStream->write((VkExternalMemoryFeatureFlagsNV*)&forMarshaling->externalMemoryFeatures, sizeof(VkExternalMemoryFeatureFlagsNV));
     vkStream->write((VkExternalMemoryHandleTypeFlagsNV*)&forMarshaling->exportFromImportedHandleTypes, sizeof(VkExternalMemoryHandleTypeFlagsNV));
     vkStream->write((VkExternalMemoryHandleTypeFlagsNV*)&forMarshaling->compatibleHandleTypes, sizeof(VkExternalMemoryHandleTypeFlagsNV));
@@ -10050,9 +13891,11 @@
 
 void unmarshal_VkExternalImageFormatPropertiesNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkExternalImageFormatPropertiesNV* forUnmarshaling)
 {
-    unmarshal_VkImageFormatProperties(vkStream, (VkImageFormatProperties*)(&forUnmarshaling->imageFormatProperties));
+    (void)rootType;
+    unmarshal_VkImageFormatProperties(vkStream, rootType, (VkImageFormatProperties*)(&forUnmarshaling->imageFormatProperties));
     vkStream->read((VkExternalMemoryFeatureFlagsNV*)&forUnmarshaling->externalMemoryFeatures, sizeof(VkExternalMemoryFeatureFlagsNV));
     vkStream->read((VkExternalMemoryHandleTypeFlagsNV*)&forUnmarshaling->exportFromImportedHandleTypes, sizeof(VkExternalMemoryHandleTypeFlagsNV));
     vkStream->read((VkExternalMemoryHandleTypeFlagsNV*)&forUnmarshaling->compatibleHandleTypes, sizeof(VkExternalMemoryHandleTypeFlagsNV));
@@ -10062,63 +13905,61 @@
 #ifdef VK_NV_external_memory
 void marshal_VkExternalMemoryImageCreateInfoNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkExternalMemoryImageCreateInfoNV* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkExternalMemoryHandleTypeFlagsNV*)&forMarshaling->handleTypes, sizeof(VkExternalMemoryHandleTypeFlagsNV));
 }
 
 void unmarshal_VkExternalMemoryImageCreateInfoNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkExternalMemoryImageCreateInfoNV* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkExternalMemoryHandleTypeFlagsNV*)&forUnmarshaling->handleTypes, sizeof(VkExternalMemoryHandleTypeFlagsNV));
 }
 
 void marshal_VkExportMemoryAllocateInfoNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkExportMemoryAllocateInfoNV* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkExternalMemoryHandleTypeFlagsNV*)&forMarshaling->handleTypes, sizeof(VkExternalMemoryHandleTypeFlagsNV));
 }
 
 void unmarshal_VkExportMemoryAllocateInfoNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkExportMemoryAllocateInfoNV* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkExternalMemoryHandleTypeFlagsNV*)&forUnmarshaling->handleTypes, sizeof(VkExternalMemoryHandleTypeFlagsNV));
 }
 
@@ -10126,52 +13967,51 @@
 #ifdef VK_NV_external_memory_win32
 void marshal_VkImportMemoryWin32HandleInfoNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkImportMemoryWin32HandleInfoNV* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkExternalMemoryHandleTypeFlagsNV*)&forMarshaling->handleType, sizeof(VkExternalMemoryHandleTypeFlagsNV));
     vkStream->write((HANDLE*)&forMarshaling->handle, sizeof(HANDLE));
 }
 
 void unmarshal_VkImportMemoryWin32HandleInfoNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkImportMemoryWin32HandleInfoNV* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkExternalMemoryHandleTypeFlagsNV*)&forUnmarshaling->handleType, sizeof(VkExternalMemoryHandleTypeFlagsNV));
     vkStream->read((HANDLE*)&forUnmarshaling->handle, sizeof(HANDLE));
 }
 
 void marshal_VkExportMemoryWin32HandleInfoNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkExportMemoryWin32HandleInfoNV* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     // WARNING PTR CHECK
-    uint64_t cgen_var_294 = (uint64_t)(uintptr_t)forMarshaling->pAttributes;
-    vkStream->putBe64(cgen_var_294);
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pAttributes;
+    vkStream->putBe64(cgen_var_0);
     if (forMarshaling->pAttributes)
     {
         vkStream->write((const SECURITY_ATTRIBUTES*)forMarshaling->pAttributes, sizeof(const SECURITY_ATTRIBUTES));
@@ -10181,17 +14021,16 @@
 
 void unmarshal_VkExportMemoryWin32HandleInfoNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkExportMemoryWin32HandleInfoNV* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     // WARNING PTR CHECK
     const SECURITY_ATTRIBUTES* check_pAttributes;
     check_pAttributes = (const SECURITY_ATTRIBUTES*)(uintptr_t)vkStream->getBe64();
@@ -10210,67 +14049,66 @@
 #ifdef VK_NV_win32_keyed_mutex
 void marshal_VkWin32KeyedMutexAcquireReleaseInfoNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkWin32KeyedMutexAcquireReleaseInfoNV* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((uint32_t*)&forMarshaling->acquireCount, sizeof(uint32_t));
     if (forMarshaling->acquireCount)
     {
-        uint64_t* cgen_var_296;
-        vkStream->alloc((void**)&cgen_var_296, forMarshaling->acquireCount * 8);
-        vkStream->handleMapping()->mapHandles_VkDeviceMemory_u64(forMarshaling->pAcquireSyncs, cgen_var_296, forMarshaling->acquireCount);
-        vkStream->write((uint64_t*)cgen_var_296, forMarshaling->acquireCount * 8);
+        uint64_t* cgen_var_0;
+        vkStream->alloc((void**)&cgen_var_0, forMarshaling->acquireCount * 8);
+        vkStream->handleMapping()->mapHandles_VkDeviceMemory_u64(forMarshaling->pAcquireSyncs, cgen_var_0, forMarshaling->acquireCount);
+        vkStream->write((uint64_t*)cgen_var_0, forMarshaling->acquireCount * 8);
     }
     vkStream->write((const uint64_t*)forMarshaling->pAcquireKeys, forMarshaling->acquireCount * sizeof(const uint64_t));
     vkStream->write((const uint32_t*)forMarshaling->pAcquireTimeoutMilliseconds, forMarshaling->acquireCount * sizeof(const uint32_t));
     vkStream->write((uint32_t*)&forMarshaling->releaseCount, sizeof(uint32_t));
     if (forMarshaling->releaseCount)
     {
-        uint64_t* cgen_var_297;
-        vkStream->alloc((void**)&cgen_var_297, forMarshaling->releaseCount * 8);
-        vkStream->handleMapping()->mapHandles_VkDeviceMemory_u64(forMarshaling->pReleaseSyncs, cgen_var_297, forMarshaling->releaseCount);
-        vkStream->write((uint64_t*)cgen_var_297, forMarshaling->releaseCount * 8);
+        uint64_t* cgen_var_1;
+        vkStream->alloc((void**)&cgen_var_1, forMarshaling->releaseCount * 8);
+        vkStream->handleMapping()->mapHandles_VkDeviceMemory_u64(forMarshaling->pReleaseSyncs, cgen_var_1, forMarshaling->releaseCount);
+        vkStream->write((uint64_t*)cgen_var_1, forMarshaling->releaseCount * 8);
     }
     vkStream->write((const uint64_t*)forMarshaling->pReleaseKeys, forMarshaling->releaseCount * sizeof(const uint64_t));
 }
 
 void unmarshal_VkWin32KeyedMutexAcquireReleaseInfoNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkWin32KeyedMutexAcquireReleaseInfoNV* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((uint32_t*)&forUnmarshaling->acquireCount, sizeof(uint32_t));
     if (forUnmarshaling->acquireCount)
     {
-        uint64_t* cgen_var_298;
-        vkStream->alloc((void**)&cgen_var_298, forUnmarshaling->acquireCount * 8);
-        vkStream->read((uint64_t*)cgen_var_298, forUnmarshaling->acquireCount * 8);
-        vkStream->handleMapping()->mapHandles_u64_VkDeviceMemory(cgen_var_298, (VkDeviceMemory*)forUnmarshaling->pAcquireSyncs, forUnmarshaling->acquireCount);
+        uint64_t* cgen_var_0;
+        vkStream->alloc((void**)&cgen_var_0, forUnmarshaling->acquireCount * 8);
+        vkStream->read((uint64_t*)cgen_var_0, forUnmarshaling->acquireCount * 8);
+        vkStream->handleMapping()->mapHandles_u64_VkDeviceMemory(cgen_var_0, (VkDeviceMemory*)forUnmarshaling->pAcquireSyncs, forUnmarshaling->acquireCount);
     }
     vkStream->read((uint64_t*)forUnmarshaling->pAcquireKeys, forUnmarshaling->acquireCount * sizeof(const uint64_t));
     vkStream->read((uint32_t*)forUnmarshaling->pAcquireTimeoutMilliseconds, forUnmarshaling->acquireCount * sizeof(const uint32_t));
     vkStream->read((uint32_t*)&forUnmarshaling->releaseCount, sizeof(uint32_t));
     if (forUnmarshaling->releaseCount)
     {
-        uint64_t* cgen_var_299;
-        vkStream->alloc((void**)&cgen_var_299, forUnmarshaling->releaseCount * 8);
-        vkStream->read((uint64_t*)cgen_var_299, forUnmarshaling->releaseCount * 8);
-        vkStream->handleMapping()->mapHandles_u64_VkDeviceMemory(cgen_var_299, (VkDeviceMemory*)forUnmarshaling->pReleaseSyncs, forUnmarshaling->releaseCount);
+        uint64_t* cgen_var_1;
+        vkStream->alloc((void**)&cgen_var_1, forUnmarshaling->releaseCount * 8);
+        vkStream->read((uint64_t*)cgen_var_1, forUnmarshaling->releaseCount * 8);
+        vkStream->handleMapping()->mapHandles_u64_VkDeviceMemory(cgen_var_1, (VkDeviceMemory*)forUnmarshaling->pReleaseSyncs, forUnmarshaling->releaseCount);
     }
     vkStream->read((uint64_t*)forUnmarshaling->pReleaseKeys, forUnmarshaling->releaseCount * sizeof(const uint64_t));
 }
@@ -10279,33 +14117,32 @@
 #ifdef VK_EXT_validation_flags
 void marshal_VkValidationFlagsEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkValidationFlagsEXT* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((uint32_t*)&forMarshaling->disabledValidationCheckCount, sizeof(uint32_t));
     vkStream->write((const VkValidationCheckEXT*)forMarshaling->pDisabledValidationChecks, forMarshaling->disabledValidationCheckCount * sizeof(const VkValidationCheckEXT));
 }
 
 void unmarshal_VkValidationFlagsEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkValidationFlagsEXT* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((uint32_t*)&forUnmarshaling->disabledValidationCheckCount, sizeof(uint32_t));
     vkStream->read((VkValidationCheckEXT*)forUnmarshaling->pDisabledValidationChecks, forUnmarshaling->disabledValidationCheckCount * sizeof(const VkValidationCheckEXT));
 }
@@ -10314,20 +14151,20 @@
 #ifdef VK_NN_vi_surface
 void marshal_VkViSurfaceCreateInfoNN(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkViSurfaceCreateInfoNN* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkViSurfaceCreateFlagsNN*)&forMarshaling->flags, sizeof(VkViSurfaceCreateFlagsNN));
     // WARNING PTR CHECK
-    uint64_t cgen_var_300 = (uint64_t)(uintptr_t)forMarshaling->window;
-    vkStream->putBe64(cgen_var_300);
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->window;
+    vkStream->putBe64(cgen_var_0);
     if (forMarshaling->window)
     {
         vkStream->write((void*)forMarshaling->window, sizeof(uint8_t));
@@ -10336,17 +14173,16 @@
 
 void unmarshal_VkViSurfaceCreateInfoNN(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkViSurfaceCreateInfoNN* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkViSurfaceCreateFlagsNN*)&forUnmarshaling->flags, sizeof(VkViSurfaceCreateFlagsNN));
     // WARNING PTR CHECK
     void* check_window;
@@ -10366,625 +14202,264 @@
 #endif
 #ifdef VK_EXT_shader_subgroup_vote
 #endif
+#ifdef VK_EXT_texture_compression_astc_hdr
+void marshal_VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->textureCompressionASTC_HDR, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->textureCompressionASTC_HDR, sizeof(VkBool32));
+}
+
+#endif
+#ifdef VK_EXT_astc_decode_mode
+void marshal_VkImageViewASTCDecodeModeEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageViewASTCDecodeModeEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkFormat*)&forMarshaling->decodeMode, sizeof(VkFormat));
+}
+
+void unmarshal_VkImageViewASTCDecodeModeEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkImageViewASTCDecodeModeEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkFormat*)&forUnmarshaling->decodeMode, sizeof(VkFormat));
+}
+
+void marshal_VkPhysicalDeviceASTCDecodeFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceASTCDecodeFeaturesEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->decodeModeSharedExponent, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceASTCDecodeFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceASTCDecodeFeaturesEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->decodeModeSharedExponent, sizeof(VkBool32));
+}
+
+#endif
 #ifdef VK_EXT_conditional_rendering
 void marshal_VkConditionalRenderingBeginInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkConditionalRenderingBeginInfoEXT* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    uint64_t cgen_var_302;
-    vkStream->handleMapping()->mapHandles_VkBuffer_u64(&forMarshaling->buffer, &cgen_var_302, 1);
-    vkStream->write((uint64_t*)&cgen_var_302, 1 * 8);
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkBuffer_u64(&forMarshaling->buffer, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
     vkStream->write((VkDeviceSize*)&forMarshaling->offset, sizeof(VkDeviceSize));
     vkStream->write((VkConditionalRenderingFlagsEXT*)&forMarshaling->flags, sizeof(VkConditionalRenderingFlagsEXT));
 }
 
 void unmarshal_VkConditionalRenderingBeginInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkConditionalRenderingBeginInfoEXT* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    uint64_t cgen_var_303;
-    vkStream->read((uint64_t*)&cgen_var_303, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkBuffer(&cgen_var_303, (VkBuffer*)&forUnmarshaling->buffer, 1);
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkBuffer(&cgen_var_0, (VkBuffer*)&forUnmarshaling->buffer, 1);
     vkStream->read((VkDeviceSize*)&forUnmarshaling->offset, sizeof(VkDeviceSize));
     vkStream->read((VkConditionalRenderingFlagsEXT*)&forUnmarshaling->flags, sizeof(VkConditionalRenderingFlagsEXT));
 }
 
 void marshal_VkPhysicalDeviceConditionalRenderingFeaturesEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceConditionalRenderingFeaturesEXT* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkBool32*)&forMarshaling->conditionalRendering, sizeof(VkBool32));
     vkStream->write((VkBool32*)&forMarshaling->inheritedConditionalRendering, sizeof(VkBool32));
 }
 
 void unmarshal_VkPhysicalDeviceConditionalRenderingFeaturesEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceConditionalRenderingFeaturesEXT* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkBool32*)&forUnmarshaling->conditionalRendering, sizeof(VkBool32));
     vkStream->read((VkBool32*)&forUnmarshaling->inheritedConditionalRendering, sizeof(VkBool32));
 }
 
 void marshal_VkCommandBufferInheritanceConditionalRenderingInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkCommandBufferInheritanceConditionalRenderingInfoEXT* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkBool32*)&forMarshaling->conditionalRenderingEnable, sizeof(VkBool32));
 }
 
 void unmarshal_VkCommandBufferInheritanceConditionalRenderingInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkCommandBufferInheritanceConditionalRenderingInfoEXT* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkBool32*)&forUnmarshaling->conditionalRenderingEnable, sizeof(VkBool32));
 }
 
 #endif
-#ifdef VK_NVX_device_generated_commands
-void marshal_VkDeviceGeneratedCommandsFeaturesNVX(
-    VulkanStreamGuest* vkStream,
-    const VkDeviceGeneratedCommandsFeaturesNVX* forMarshaling)
-{
-    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
-    {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
-    }
-    vkStream->write((VkBool32*)&forMarshaling->computeBindingPointSupport, sizeof(VkBool32));
-}
-
-void unmarshal_VkDeviceGeneratedCommandsFeaturesNVX(
-    VulkanStreamGuest* vkStream,
-    VkDeviceGeneratedCommandsFeaturesNVX* forUnmarshaling)
-{
-    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
-    {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
-    }
-    vkStream->read((VkBool32*)&forUnmarshaling->computeBindingPointSupport, sizeof(VkBool32));
-}
-
-void marshal_VkDeviceGeneratedCommandsLimitsNVX(
-    VulkanStreamGuest* vkStream,
-    const VkDeviceGeneratedCommandsLimitsNVX* forMarshaling)
-{
-    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
-    {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
-    }
-    vkStream->write((uint32_t*)&forMarshaling->maxIndirectCommandsLayoutTokenCount, sizeof(uint32_t));
-    vkStream->write((uint32_t*)&forMarshaling->maxObjectEntryCounts, sizeof(uint32_t));
-    vkStream->write((uint32_t*)&forMarshaling->minSequenceCountBufferOffsetAlignment, sizeof(uint32_t));
-    vkStream->write((uint32_t*)&forMarshaling->minSequenceIndexBufferOffsetAlignment, sizeof(uint32_t));
-    vkStream->write((uint32_t*)&forMarshaling->minCommandsTokenBufferOffsetAlignment, sizeof(uint32_t));
-}
-
-void unmarshal_VkDeviceGeneratedCommandsLimitsNVX(
-    VulkanStreamGuest* vkStream,
-    VkDeviceGeneratedCommandsLimitsNVX* forUnmarshaling)
-{
-    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
-    {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
-    }
-    vkStream->read((uint32_t*)&forUnmarshaling->maxIndirectCommandsLayoutTokenCount, sizeof(uint32_t));
-    vkStream->read((uint32_t*)&forUnmarshaling->maxObjectEntryCounts, sizeof(uint32_t));
-    vkStream->read((uint32_t*)&forUnmarshaling->minSequenceCountBufferOffsetAlignment, sizeof(uint32_t));
-    vkStream->read((uint32_t*)&forUnmarshaling->minSequenceIndexBufferOffsetAlignment, sizeof(uint32_t));
-    vkStream->read((uint32_t*)&forUnmarshaling->minCommandsTokenBufferOffsetAlignment, sizeof(uint32_t));
-}
-
-void marshal_VkIndirectCommandsTokenNVX(
-    VulkanStreamGuest* vkStream,
-    const VkIndirectCommandsTokenNVX* forMarshaling)
-{
-    vkStream->write((VkIndirectCommandsTokenTypeNVX*)&forMarshaling->tokenType, sizeof(VkIndirectCommandsTokenTypeNVX));
-    uint64_t cgen_var_304;
-    vkStream->handleMapping()->mapHandles_VkBuffer_u64(&forMarshaling->buffer, &cgen_var_304, 1);
-    vkStream->write((uint64_t*)&cgen_var_304, 1 * 8);
-    vkStream->write((VkDeviceSize*)&forMarshaling->offset, sizeof(VkDeviceSize));
-}
-
-void unmarshal_VkIndirectCommandsTokenNVX(
-    VulkanStreamGuest* vkStream,
-    VkIndirectCommandsTokenNVX* forUnmarshaling)
-{
-    vkStream->read((VkIndirectCommandsTokenTypeNVX*)&forUnmarshaling->tokenType, sizeof(VkIndirectCommandsTokenTypeNVX));
-    uint64_t cgen_var_305;
-    vkStream->read((uint64_t*)&cgen_var_305, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkBuffer(&cgen_var_305, (VkBuffer*)&forUnmarshaling->buffer, 1);
-    vkStream->read((VkDeviceSize*)&forUnmarshaling->offset, sizeof(VkDeviceSize));
-}
-
-void marshal_VkIndirectCommandsLayoutTokenNVX(
-    VulkanStreamGuest* vkStream,
-    const VkIndirectCommandsLayoutTokenNVX* forMarshaling)
-{
-    vkStream->write((VkIndirectCommandsTokenTypeNVX*)&forMarshaling->tokenType, sizeof(VkIndirectCommandsTokenTypeNVX));
-    vkStream->write((uint32_t*)&forMarshaling->bindingUnit, sizeof(uint32_t));
-    vkStream->write((uint32_t*)&forMarshaling->dynamicCount, sizeof(uint32_t));
-    vkStream->write((uint32_t*)&forMarshaling->divisor, sizeof(uint32_t));
-}
-
-void unmarshal_VkIndirectCommandsLayoutTokenNVX(
-    VulkanStreamGuest* vkStream,
-    VkIndirectCommandsLayoutTokenNVX* forUnmarshaling)
-{
-    vkStream->read((VkIndirectCommandsTokenTypeNVX*)&forUnmarshaling->tokenType, sizeof(VkIndirectCommandsTokenTypeNVX));
-    vkStream->read((uint32_t*)&forUnmarshaling->bindingUnit, sizeof(uint32_t));
-    vkStream->read((uint32_t*)&forUnmarshaling->dynamicCount, sizeof(uint32_t));
-    vkStream->read((uint32_t*)&forUnmarshaling->divisor, sizeof(uint32_t));
-}
-
-void marshal_VkIndirectCommandsLayoutCreateInfoNVX(
-    VulkanStreamGuest* vkStream,
-    const VkIndirectCommandsLayoutCreateInfoNVX* forMarshaling)
-{
-    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
-    {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
-    }
-    vkStream->write((VkPipelineBindPoint*)&forMarshaling->pipelineBindPoint, sizeof(VkPipelineBindPoint));
-    vkStream->write((VkIndirectCommandsLayoutUsageFlagsNVX*)&forMarshaling->flags, sizeof(VkIndirectCommandsLayoutUsageFlagsNVX));
-    vkStream->write((uint32_t*)&forMarshaling->tokenCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forMarshaling->tokenCount; ++i)
-    {
-        marshal_VkIndirectCommandsLayoutTokenNVX(vkStream, (const VkIndirectCommandsLayoutTokenNVX*)(forMarshaling->pTokens + i));
-    }
-}
-
-void unmarshal_VkIndirectCommandsLayoutCreateInfoNVX(
-    VulkanStreamGuest* vkStream,
-    VkIndirectCommandsLayoutCreateInfoNVX* forUnmarshaling)
-{
-    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
-    {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
-    }
-    vkStream->read((VkPipelineBindPoint*)&forUnmarshaling->pipelineBindPoint, sizeof(VkPipelineBindPoint));
-    vkStream->read((VkIndirectCommandsLayoutUsageFlagsNVX*)&forUnmarshaling->flags, sizeof(VkIndirectCommandsLayoutUsageFlagsNVX));
-    vkStream->read((uint32_t*)&forUnmarshaling->tokenCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->tokenCount; ++i)
-    {
-        unmarshal_VkIndirectCommandsLayoutTokenNVX(vkStream, (VkIndirectCommandsLayoutTokenNVX*)(forUnmarshaling->pTokens + i));
-    }
-}
-
-void marshal_VkCmdProcessCommandsInfoNVX(
-    VulkanStreamGuest* vkStream,
-    const VkCmdProcessCommandsInfoNVX* forMarshaling)
-{
-    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
-    {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
-    }
-    uint64_t cgen_var_306;
-    vkStream->handleMapping()->mapHandles_VkObjectTableNVX_u64(&forMarshaling->objectTable, &cgen_var_306, 1);
-    vkStream->write((uint64_t*)&cgen_var_306, 1 * 8);
-    uint64_t cgen_var_307;
-    vkStream->handleMapping()->mapHandles_VkIndirectCommandsLayoutNVX_u64(&forMarshaling->indirectCommandsLayout, &cgen_var_307, 1);
-    vkStream->write((uint64_t*)&cgen_var_307, 1 * 8);
-    vkStream->write((uint32_t*)&forMarshaling->indirectCommandsTokenCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forMarshaling->indirectCommandsTokenCount; ++i)
-    {
-        marshal_VkIndirectCommandsTokenNVX(vkStream, (const VkIndirectCommandsTokenNVX*)(forMarshaling->pIndirectCommandsTokens + i));
-    }
-    vkStream->write((uint32_t*)&forMarshaling->maxSequencesCount, sizeof(uint32_t));
-    uint64_t cgen_var_308;
-    vkStream->handleMapping()->mapHandles_VkCommandBuffer_u64(&forMarshaling->targetCommandBuffer, &cgen_var_308, 1);
-    vkStream->write((uint64_t*)&cgen_var_308, 1 * 8);
-    uint64_t cgen_var_309;
-    vkStream->handleMapping()->mapHandles_VkBuffer_u64(&forMarshaling->sequencesCountBuffer, &cgen_var_309, 1);
-    vkStream->write((uint64_t*)&cgen_var_309, 1 * 8);
-    vkStream->write((VkDeviceSize*)&forMarshaling->sequencesCountOffset, sizeof(VkDeviceSize));
-    uint64_t cgen_var_310;
-    vkStream->handleMapping()->mapHandles_VkBuffer_u64(&forMarshaling->sequencesIndexBuffer, &cgen_var_310, 1);
-    vkStream->write((uint64_t*)&cgen_var_310, 1 * 8);
-    vkStream->write((VkDeviceSize*)&forMarshaling->sequencesIndexOffset, sizeof(VkDeviceSize));
-}
-
-void unmarshal_VkCmdProcessCommandsInfoNVX(
-    VulkanStreamGuest* vkStream,
-    VkCmdProcessCommandsInfoNVX* forUnmarshaling)
-{
-    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
-    {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
-    }
-    uint64_t cgen_var_311;
-    vkStream->read((uint64_t*)&cgen_var_311, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkObjectTableNVX(&cgen_var_311, (VkObjectTableNVX*)&forUnmarshaling->objectTable, 1);
-    uint64_t cgen_var_312;
-    vkStream->read((uint64_t*)&cgen_var_312, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkIndirectCommandsLayoutNVX(&cgen_var_312, (VkIndirectCommandsLayoutNVX*)&forUnmarshaling->indirectCommandsLayout, 1);
-    vkStream->read((uint32_t*)&forUnmarshaling->indirectCommandsTokenCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->indirectCommandsTokenCount; ++i)
-    {
-        unmarshal_VkIndirectCommandsTokenNVX(vkStream, (VkIndirectCommandsTokenNVX*)(forUnmarshaling->pIndirectCommandsTokens + i));
-    }
-    vkStream->read((uint32_t*)&forUnmarshaling->maxSequencesCount, sizeof(uint32_t));
-    uint64_t cgen_var_313;
-    vkStream->read((uint64_t*)&cgen_var_313, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkCommandBuffer(&cgen_var_313, (VkCommandBuffer*)&forUnmarshaling->targetCommandBuffer, 1);
-    uint64_t cgen_var_314;
-    vkStream->read((uint64_t*)&cgen_var_314, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkBuffer(&cgen_var_314, (VkBuffer*)&forUnmarshaling->sequencesCountBuffer, 1);
-    vkStream->read((VkDeviceSize*)&forUnmarshaling->sequencesCountOffset, sizeof(VkDeviceSize));
-    uint64_t cgen_var_315;
-    vkStream->read((uint64_t*)&cgen_var_315, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkBuffer(&cgen_var_315, (VkBuffer*)&forUnmarshaling->sequencesIndexBuffer, 1);
-    vkStream->read((VkDeviceSize*)&forUnmarshaling->sequencesIndexOffset, sizeof(VkDeviceSize));
-}
-
-void marshal_VkCmdReserveSpaceForCommandsInfoNVX(
-    VulkanStreamGuest* vkStream,
-    const VkCmdReserveSpaceForCommandsInfoNVX* forMarshaling)
-{
-    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
-    {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
-    }
-    uint64_t cgen_var_316;
-    vkStream->handleMapping()->mapHandles_VkObjectTableNVX_u64(&forMarshaling->objectTable, &cgen_var_316, 1);
-    vkStream->write((uint64_t*)&cgen_var_316, 1 * 8);
-    uint64_t cgen_var_317;
-    vkStream->handleMapping()->mapHandles_VkIndirectCommandsLayoutNVX_u64(&forMarshaling->indirectCommandsLayout, &cgen_var_317, 1);
-    vkStream->write((uint64_t*)&cgen_var_317, 1 * 8);
-    vkStream->write((uint32_t*)&forMarshaling->maxSequencesCount, sizeof(uint32_t));
-}
-
-void unmarshal_VkCmdReserveSpaceForCommandsInfoNVX(
-    VulkanStreamGuest* vkStream,
-    VkCmdReserveSpaceForCommandsInfoNVX* forUnmarshaling)
-{
-    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
-    {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
-    }
-    uint64_t cgen_var_318;
-    vkStream->read((uint64_t*)&cgen_var_318, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkObjectTableNVX(&cgen_var_318, (VkObjectTableNVX*)&forUnmarshaling->objectTable, 1);
-    uint64_t cgen_var_319;
-    vkStream->read((uint64_t*)&cgen_var_319, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkIndirectCommandsLayoutNVX(&cgen_var_319, (VkIndirectCommandsLayoutNVX*)&forUnmarshaling->indirectCommandsLayout, 1);
-    vkStream->read((uint32_t*)&forUnmarshaling->maxSequencesCount, sizeof(uint32_t));
-}
-
-void marshal_VkObjectTableCreateInfoNVX(
-    VulkanStreamGuest* vkStream,
-    const VkObjectTableCreateInfoNVX* forMarshaling)
-{
-    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
-    {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
-    }
-    vkStream->write((uint32_t*)&forMarshaling->objectCount, sizeof(uint32_t));
-    vkStream->write((const VkObjectEntryTypeNVX*)forMarshaling->pObjectEntryTypes, forMarshaling->objectCount * sizeof(const VkObjectEntryTypeNVX));
-    vkStream->write((const uint32_t*)forMarshaling->pObjectEntryCounts, forMarshaling->objectCount * sizeof(const uint32_t));
-    vkStream->write((const VkObjectEntryUsageFlagsNVX*)forMarshaling->pObjectEntryUsageFlags, forMarshaling->objectCount * sizeof(const VkObjectEntryUsageFlagsNVX));
-    vkStream->write((uint32_t*)&forMarshaling->maxUniformBuffersPerDescriptor, sizeof(uint32_t));
-    vkStream->write((uint32_t*)&forMarshaling->maxStorageBuffersPerDescriptor, sizeof(uint32_t));
-    vkStream->write((uint32_t*)&forMarshaling->maxStorageImagesPerDescriptor, sizeof(uint32_t));
-    vkStream->write((uint32_t*)&forMarshaling->maxSampledImagesPerDescriptor, sizeof(uint32_t));
-    vkStream->write((uint32_t*)&forMarshaling->maxPipelineLayouts, sizeof(uint32_t));
-}
-
-void unmarshal_VkObjectTableCreateInfoNVX(
-    VulkanStreamGuest* vkStream,
-    VkObjectTableCreateInfoNVX* forUnmarshaling)
-{
-    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
-    {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
-    }
-    vkStream->read((uint32_t*)&forUnmarshaling->objectCount, sizeof(uint32_t));
-    vkStream->read((VkObjectEntryTypeNVX*)forUnmarshaling->pObjectEntryTypes, forUnmarshaling->objectCount * sizeof(const VkObjectEntryTypeNVX));
-    vkStream->read((uint32_t*)forUnmarshaling->pObjectEntryCounts, forUnmarshaling->objectCount * sizeof(const uint32_t));
-    vkStream->read((VkObjectEntryUsageFlagsNVX*)forUnmarshaling->pObjectEntryUsageFlags, forUnmarshaling->objectCount * sizeof(const VkObjectEntryUsageFlagsNVX));
-    vkStream->read((uint32_t*)&forUnmarshaling->maxUniformBuffersPerDescriptor, sizeof(uint32_t));
-    vkStream->read((uint32_t*)&forUnmarshaling->maxStorageBuffersPerDescriptor, sizeof(uint32_t));
-    vkStream->read((uint32_t*)&forUnmarshaling->maxStorageImagesPerDescriptor, sizeof(uint32_t));
-    vkStream->read((uint32_t*)&forUnmarshaling->maxSampledImagesPerDescriptor, sizeof(uint32_t));
-    vkStream->read((uint32_t*)&forUnmarshaling->maxPipelineLayouts, sizeof(uint32_t));
-}
-
-void marshal_VkObjectTableEntryNVX(
-    VulkanStreamGuest* vkStream,
-    const VkObjectTableEntryNVX* forMarshaling)
-{
-    vkStream->write((VkObjectEntryTypeNVX*)&forMarshaling->type, sizeof(VkObjectEntryTypeNVX));
-    vkStream->write((VkObjectEntryUsageFlagsNVX*)&forMarshaling->flags, sizeof(VkObjectEntryUsageFlagsNVX));
-}
-
-void unmarshal_VkObjectTableEntryNVX(
-    VulkanStreamGuest* vkStream,
-    VkObjectTableEntryNVX* forUnmarshaling)
-{
-    vkStream->read((VkObjectEntryTypeNVX*)&forUnmarshaling->type, sizeof(VkObjectEntryTypeNVX));
-    vkStream->read((VkObjectEntryUsageFlagsNVX*)&forUnmarshaling->flags, sizeof(VkObjectEntryUsageFlagsNVX));
-}
-
-void marshal_VkObjectTablePipelineEntryNVX(
-    VulkanStreamGuest* vkStream,
-    const VkObjectTablePipelineEntryNVX* forMarshaling)
-{
-    vkStream->write((VkObjectEntryTypeNVX*)&forMarshaling->type, sizeof(VkObjectEntryTypeNVX));
-    vkStream->write((VkObjectEntryUsageFlagsNVX*)&forMarshaling->flags, sizeof(VkObjectEntryUsageFlagsNVX));
-    uint64_t cgen_var_320;
-    vkStream->handleMapping()->mapHandles_VkPipeline_u64(&forMarshaling->pipeline, &cgen_var_320, 1);
-    vkStream->write((uint64_t*)&cgen_var_320, 1 * 8);
-}
-
-void unmarshal_VkObjectTablePipelineEntryNVX(
-    VulkanStreamGuest* vkStream,
-    VkObjectTablePipelineEntryNVX* forUnmarshaling)
-{
-    vkStream->read((VkObjectEntryTypeNVX*)&forUnmarshaling->type, sizeof(VkObjectEntryTypeNVX));
-    vkStream->read((VkObjectEntryUsageFlagsNVX*)&forUnmarshaling->flags, sizeof(VkObjectEntryUsageFlagsNVX));
-    uint64_t cgen_var_321;
-    vkStream->read((uint64_t*)&cgen_var_321, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkPipeline(&cgen_var_321, (VkPipeline*)&forUnmarshaling->pipeline, 1);
-}
-
-void marshal_VkObjectTableDescriptorSetEntryNVX(
-    VulkanStreamGuest* vkStream,
-    const VkObjectTableDescriptorSetEntryNVX* forMarshaling)
-{
-    vkStream->write((VkObjectEntryTypeNVX*)&forMarshaling->type, sizeof(VkObjectEntryTypeNVX));
-    vkStream->write((VkObjectEntryUsageFlagsNVX*)&forMarshaling->flags, sizeof(VkObjectEntryUsageFlagsNVX));
-    uint64_t cgen_var_322;
-    vkStream->handleMapping()->mapHandles_VkPipelineLayout_u64(&forMarshaling->pipelineLayout, &cgen_var_322, 1);
-    vkStream->write((uint64_t*)&cgen_var_322, 1 * 8);
-    uint64_t cgen_var_323;
-    vkStream->handleMapping()->mapHandles_VkDescriptorSet_u64(&forMarshaling->descriptorSet, &cgen_var_323, 1);
-    vkStream->write((uint64_t*)&cgen_var_323, 1 * 8);
-}
-
-void unmarshal_VkObjectTableDescriptorSetEntryNVX(
-    VulkanStreamGuest* vkStream,
-    VkObjectTableDescriptorSetEntryNVX* forUnmarshaling)
-{
-    vkStream->read((VkObjectEntryTypeNVX*)&forUnmarshaling->type, sizeof(VkObjectEntryTypeNVX));
-    vkStream->read((VkObjectEntryUsageFlagsNVX*)&forUnmarshaling->flags, sizeof(VkObjectEntryUsageFlagsNVX));
-    uint64_t cgen_var_324;
-    vkStream->read((uint64_t*)&cgen_var_324, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkPipelineLayout(&cgen_var_324, (VkPipelineLayout*)&forUnmarshaling->pipelineLayout, 1);
-    uint64_t cgen_var_325;
-    vkStream->read((uint64_t*)&cgen_var_325, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkDescriptorSet(&cgen_var_325, (VkDescriptorSet*)&forUnmarshaling->descriptorSet, 1);
-}
-
-void marshal_VkObjectTableVertexBufferEntryNVX(
-    VulkanStreamGuest* vkStream,
-    const VkObjectTableVertexBufferEntryNVX* forMarshaling)
-{
-    vkStream->write((VkObjectEntryTypeNVX*)&forMarshaling->type, sizeof(VkObjectEntryTypeNVX));
-    vkStream->write((VkObjectEntryUsageFlagsNVX*)&forMarshaling->flags, sizeof(VkObjectEntryUsageFlagsNVX));
-    uint64_t cgen_var_326;
-    vkStream->handleMapping()->mapHandles_VkBuffer_u64(&forMarshaling->buffer, &cgen_var_326, 1);
-    vkStream->write((uint64_t*)&cgen_var_326, 1 * 8);
-}
-
-void unmarshal_VkObjectTableVertexBufferEntryNVX(
-    VulkanStreamGuest* vkStream,
-    VkObjectTableVertexBufferEntryNVX* forUnmarshaling)
-{
-    vkStream->read((VkObjectEntryTypeNVX*)&forUnmarshaling->type, sizeof(VkObjectEntryTypeNVX));
-    vkStream->read((VkObjectEntryUsageFlagsNVX*)&forUnmarshaling->flags, sizeof(VkObjectEntryUsageFlagsNVX));
-    uint64_t cgen_var_327;
-    vkStream->read((uint64_t*)&cgen_var_327, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkBuffer(&cgen_var_327, (VkBuffer*)&forUnmarshaling->buffer, 1);
-}
-
-void marshal_VkObjectTableIndexBufferEntryNVX(
-    VulkanStreamGuest* vkStream,
-    const VkObjectTableIndexBufferEntryNVX* forMarshaling)
-{
-    vkStream->write((VkObjectEntryTypeNVX*)&forMarshaling->type, sizeof(VkObjectEntryTypeNVX));
-    vkStream->write((VkObjectEntryUsageFlagsNVX*)&forMarshaling->flags, sizeof(VkObjectEntryUsageFlagsNVX));
-    uint64_t cgen_var_328;
-    vkStream->handleMapping()->mapHandles_VkBuffer_u64(&forMarshaling->buffer, &cgen_var_328, 1);
-    vkStream->write((uint64_t*)&cgen_var_328, 1 * 8);
-    vkStream->write((VkIndexType*)&forMarshaling->indexType, sizeof(VkIndexType));
-}
-
-void unmarshal_VkObjectTableIndexBufferEntryNVX(
-    VulkanStreamGuest* vkStream,
-    VkObjectTableIndexBufferEntryNVX* forUnmarshaling)
-{
-    vkStream->read((VkObjectEntryTypeNVX*)&forUnmarshaling->type, sizeof(VkObjectEntryTypeNVX));
-    vkStream->read((VkObjectEntryUsageFlagsNVX*)&forUnmarshaling->flags, sizeof(VkObjectEntryUsageFlagsNVX));
-    uint64_t cgen_var_329;
-    vkStream->read((uint64_t*)&cgen_var_329, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkBuffer(&cgen_var_329, (VkBuffer*)&forUnmarshaling->buffer, 1);
-    vkStream->read((VkIndexType*)&forUnmarshaling->indexType, sizeof(VkIndexType));
-}
-
-void marshal_VkObjectTablePushConstantEntryNVX(
-    VulkanStreamGuest* vkStream,
-    const VkObjectTablePushConstantEntryNVX* forMarshaling)
-{
-    vkStream->write((VkObjectEntryTypeNVX*)&forMarshaling->type, sizeof(VkObjectEntryTypeNVX));
-    vkStream->write((VkObjectEntryUsageFlagsNVX*)&forMarshaling->flags, sizeof(VkObjectEntryUsageFlagsNVX));
-    uint64_t cgen_var_330;
-    vkStream->handleMapping()->mapHandles_VkPipelineLayout_u64(&forMarshaling->pipelineLayout, &cgen_var_330, 1);
-    vkStream->write((uint64_t*)&cgen_var_330, 1 * 8);
-    vkStream->write((VkShaderStageFlags*)&forMarshaling->stageFlags, sizeof(VkShaderStageFlags));
-}
-
-void unmarshal_VkObjectTablePushConstantEntryNVX(
-    VulkanStreamGuest* vkStream,
-    VkObjectTablePushConstantEntryNVX* forUnmarshaling)
-{
-    vkStream->read((VkObjectEntryTypeNVX*)&forUnmarshaling->type, sizeof(VkObjectEntryTypeNVX));
-    vkStream->read((VkObjectEntryUsageFlagsNVX*)&forUnmarshaling->flags, sizeof(VkObjectEntryUsageFlagsNVX));
-    uint64_t cgen_var_331;
-    vkStream->read((uint64_t*)&cgen_var_331, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkPipelineLayout(&cgen_var_331, (VkPipelineLayout*)&forUnmarshaling->pipelineLayout, 1);
-    vkStream->read((VkShaderStageFlags*)&forUnmarshaling->stageFlags, sizeof(VkShaderStageFlags));
-}
-
-#endif
 #ifdef VK_NV_clip_space_w_scaling
 void marshal_VkViewportWScalingNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkViewportWScalingNV* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((float*)&forMarshaling->xcoeff, sizeof(float));
     vkStream->write((float*)&forMarshaling->ycoeff, sizeof(float));
 }
 
 void unmarshal_VkViewportWScalingNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkViewportWScalingNV* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((float*)&forUnmarshaling->xcoeff, sizeof(float));
     vkStream->read((float*)&forUnmarshaling->ycoeff, sizeof(float));
 }
 
 void marshal_VkPipelineViewportWScalingStateCreateInfoNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPipelineViewportWScalingStateCreateInfoNV* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkBool32*)&forMarshaling->viewportWScalingEnable, sizeof(VkBool32));
     vkStream->write((uint32_t*)&forMarshaling->viewportCount, sizeof(uint32_t));
     // WARNING PTR CHECK
-    uint64_t cgen_var_332 = (uint64_t)(uintptr_t)forMarshaling->pViewportWScalings;
-    vkStream->putBe64(cgen_var_332);
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pViewportWScalings;
+    vkStream->putBe64(cgen_var_0);
     if (forMarshaling->pViewportWScalings)
     {
-        for (uint32_t i = 0; i < (uint32_t)forMarshaling->viewportCount; ++i)
+        if (forMarshaling)
         {
-            marshal_VkViewportWScalingNV(vkStream, (const VkViewportWScalingNV*)(forMarshaling->pViewportWScalings + i));
+            for (uint32_t i = 0; i < (uint32_t)forMarshaling->viewportCount; ++i)
+            {
+                marshal_VkViewportWScalingNV(vkStream, rootType, (const VkViewportWScalingNV*)(forMarshaling->pViewportWScalings + i));
+            }
         }
     }
 }
 
 void unmarshal_VkPipelineViewportWScalingStateCreateInfoNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPipelineViewportWScalingStateCreateInfoNV* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkBool32*)&forUnmarshaling->viewportWScalingEnable, sizeof(VkBool32));
     vkStream->read((uint32_t*)&forUnmarshaling->viewportCount, sizeof(uint32_t));
     // WARNING PTR CHECK
@@ -10996,9 +14471,12 @@
         {
             fprintf(stderr, "fatal: forUnmarshaling->pViewportWScalings inconsistent between guest and host\n");
         }
-        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->viewportCount; ++i)
+        if (forUnmarshaling)
         {
-            unmarshal_VkViewportWScalingNV(vkStream, (VkViewportWScalingNV*)(forUnmarshaling->pViewportWScalings + i));
+            for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->viewportCount; ++i)
+            {
+                unmarshal_VkViewportWScalingNV(vkStream, rootType, (VkViewportWScalingNV*)(forUnmarshaling->pViewportWScalings + i));
+            }
         }
     }
 }
@@ -11011,21 +14489,21 @@
 #ifdef VK_EXT_display_surface_counter
 void marshal_VkSurfaceCapabilities2EXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSurfaceCapabilities2EXT* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((uint32_t*)&forMarshaling->minImageCount, sizeof(uint32_t));
     vkStream->write((uint32_t*)&forMarshaling->maxImageCount, sizeof(uint32_t));
-    marshal_VkExtent2D(vkStream, (VkExtent2D*)(&forMarshaling->currentExtent));
-    marshal_VkExtent2D(vkStream, (VkExtent2D*)(&forMarshaling->minImageExtent));
-    marshal_VkExtent2D(vkStream, (VkExtent2D*)(&forMarshaling->maxImageExtent));
+    marshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->currentExtent));
+    marshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->minImageExtent));
+    marshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->maxImageExtent));
     vkStream->write((uint32_t*)&forMarshaling->maxImageArrayLayers, sizeof(uint32_t));
     vkStream->write((VkSurfaceTransformFlagsKHR*)&forMarshaling->supportedTransforms, sizeof(VkSurfaceTransformFlagsKHR));
     vkStream->write((VkSurfaceTransformFlagBitsKHR*)&forMarshaling->currentTransform, sizeof(VkSurfaceTransformFlagBitsKHR));
@@ -11036,22 +14514,21 @@
 
 void unmarshal_VkSurfaceCapabilities2EXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSurfaceCapabilities2EXT* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((uint32_t*)&forUnmarshaling->minImageCount, sizeof(uint32_t));
     vkStream->read((uint32_t*)&forUnmarshaling->maxImageCount, sizeof(uint32_t));
-    unmarshal_VkExtent2D(vkStream, (VkExtent2D*)(&forUnmarshaling->currentExtent));
-    unmarshal_VkExtent2D(vkStream, (VkExtent2D*)(&forUnmarshaling->minImageExtent));
-    unmarshal_VkExtent2D(vkStream, (VkExtent2D*)(&forUnmarshaling->maxImageExtent));
+    unmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forUnmarshaling->currentExtent));
+    unmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forUnmarshaling->minImageExtent));
+    unmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forUnmarshaling->maxImageExtent));
     vkStream->read((uint32_t*)&forUnmarshaling->maxImageArrayLayers, sizeof(uint32_t));
     vkStream->read((VkSurfaceTransformFlagsKHR*)&forUnmarshaling->supportedTransforms, sizeof(VkSurfaceTransformFlagsKHR));
     vkStream->read((VkSurfaceTransformFlagBitsKHR*)&forUnmarshaling->currentTransform, sizeof(VkSurfaceTransformFlagBitsKHR));
@@ -11064,125 +14541,121 @@
 #ifdef VK_EXT_display_control
 void marshal_VkDisplayPowerInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDisplayPowerInfoEXT* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkDisplayPowerStateEXT*)&forMarshaling->powerState, sizeof(VkDisplayPowerStateEXT));
 }
 
 void unmarshal_VkDisplayPowerInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDisplayPowerInfoEXT* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkDisplayPowerStateEXT*)&forUnmarshaling->powerState, sizeof(VkDisplayPowerStateEXT));
 }
 
 void marshal_VkDeviceEventInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDeviceEventInfoEXT* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkDeviceEventTypeEXT*)&forMarshaling->deviceEvent, sizeof(VkDeviceEventTypeEXT));
 }
 
 void unmarshal_VkDeviceEventInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDeviceEventInfoEXT* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkDeviceEventTypeEXT*)&forUnmarshaling->deviceEvent, sizeof(VkDeviceEventTypeEXT));
 }
 
 void marshal_VkDisplayEventInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDisplayEventInfoEXT* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkDisplayEventTypeEXT*)&forMarshaling->displayEvent, sizeof(VkDisplayEventTypeEXT));
 }
 
 void unmarshal_VkDisplayEventInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDisplayEventInfoEXT* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkDisplayEventTypeEXT*)&forUnmarshaling->displayEvent, sizeof(VkDisplayEventTypeEXT));
 }
 
 void marshal_VkSwapchainCounterCreateInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSwapchainCounterCreateInfoEXT* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkSurfaceCounterFlagsEXT*)&forMarshaling->surfaceCounters, sizeof(VkSurfaceCounterFlagsEXT));
 }
 
 void unmarshal_VkSwapchainCounterCreateInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSwapchainCounterCreateInfoEXT* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkSurfaceCounterFlagsEXT*)&forUnmarshaling->surfaceCounters, sizeof(VkSurfaceCounterFlagsEXT));
 }
 
@@ -11190,22 +14663,28 @@
 #ifdef VK_GOOGLE_display_timing
 void marshal_VkRefreshCycleDurationGOOGLE(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkRefreshCycleDurationGOOGLE* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((uint64_t*)&forMarshaling->refreshDuration, sizeof(uint64_t));
 }
 
 void unmarshal_VkRefreshCycleDurationGOOGLE(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkRefreshCycleDurationGOOGLE* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((uint64_t*)&forUnmarshaling->refreshDuration, sizeof(uint64_t));
 }
 
 void marshal_VkPastPresentationTimingGOOGLE(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPastPresentationTimingGOOGLE* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((uint32_t*)&forMarshaling->presentID, sizeof(uint32_t));
     vkStream->write((uint64_t*)&forMarshaling->desiredPresentTime, sizeof(uint64_t));
     vkStream->write((uint64_t*)&forMarshaling->actualPresentTime, sizeof(uint64_t));
@@ -11215,8 +14694,10 @@
 
 void unmarshal_VkPastPresentationTimingGOOGLE(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPastPresentationTimingGOOGLE* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((uint32_t*)&forUnmarshaling->presentID, sizeof(uint32_t));
     vkStream->read((uint64_t*)&forUnmarshaling->desiredPresentTime, sizeof(uint64_t));
     vkStream->read((uint64_t*)&forUnmarshaling->actualPresentTime, sizeof(uint64_t));
@@ -11226,58 +14707,64 @@
 
 void marshal_VkPresentTimeGOOGLE(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPresentTimeGOOGLE* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((uint32_t*)&forMarshaling->presentID, sizeof(uint32_t));
     vkStream->write((uint64_t*)&forMarshaling->desiredPresentTime, sizeof(uint64_t));
 }
 
 void unmarshal_VkPresentTimeGOOGLE(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPresentTimeGOOGLE* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((uint32_t*)&forUnmarshaling->presentID, sizeof(uint32_t));
     vkStream->read((uint64_t*)&forUnmarshaling->desiredPresentTime, sizeof(uint64_t));
 }
 
 void marshal_VkPresentTimesInfoGOOGLE(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPresentTimesInfoGOOGLE* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((uint32_t*)&forMarshaling->swapchainCount, sizeof(uint32_t));
     // WARNING PTR CHECK
-    uint64_t cgen_var_334 = (uint64_t)(uintptr_t)forMarshaling->pTimes;
-    vkStream->putBe64(cgen_var_334);
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pTimes;
+    vkStream->putBe64(cgen_var_0);
     if (forMarshaling->pTimes)
     {
-        for (uint32_t i = 0; i < (uint32_t)forMarshaling->swapchainCount; ++i)
+        if (forMarshaling)
         {
-            marshal_VkPresentTimeGOOGLE(vkStream, (const VkPresentTimeGOOGLE*)(forMarshaling->pTimes + i));
+            for (uint32_t i = 0; i < (uint32_t)forMarshaling->swapchainCount; ++i)
+            {
+                marshal_VkPresentTimeGOOGLE(vkStream, rootType, (const VkPresentTimeGOOGLE*)(forMarshaling->pTimes + i));
+            }
         }
     }
 }
 
 void unmarshal_VkPresentTimesInfoGOOGLE(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPresentTimesInfoGOOGLE* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((uint32_t*)&forUnmarshaling->swapchainCount, sizeof(uint32_t));
     // WARNING PTR CHECK
     const VkPresentTimeGOOGLE* check_pTimes;
@@ -11288,9 +14775,12 @@
         {
             fprintf(stderr, "fatal: forUnmarshaling->pTimes inconsistent between guest and host\n");
         }
-        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->swapchainCount; ++i)
+        if (forUnmarshaling)
         {
-            unmarshal_VkPresentTimeGOOGLE(vkStream, (VkPresentTimeGOOGLE*)(forUnmarshaling->pTimes + i));
+            for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->swapchainCount; ++i)
+            {
+                unmarshal_VkPresentTimeGOOGLE(vkStream, rootType, (VkPresentTimeGOOGLE*)(forUnmarshaling->pTimes + i));
+            }
         }
     }
 }
@@ -11305,32 +14795,31 @@
 #ifdef VK_NVX_multiview_per_view_attributes
 void marshal_VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkBool32*)&forMarshaling->perViewPositionAllComponents, sizeof(VkBool32));
 }
 
 void unmarshal_VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkBool32*)&forUnmarshaling->perViewPositionAllComponents, sizeof(VkBool32));
 }
 
@@ -11338,8 +14827,10 @@
 #ifdef VK_NV_viewport_swizzle
 void marshal_VkViewportSwizzleNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkViewportSwizzleNV* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkViewportCoordinateSwizzleNV*)&forMarshaling->x, sizeof(VkViewportCoordinateSwizzleNV));
     vkStream->write((VkViewportCoordinateSwizzleNV*)&forMarshaling->y, sizeof(VkViewportCoordinateSwizzleNV));
     vkStream->write((VkViewportCoordinateSwizzleNV*)&forMarshaling->z, sizeof(VkViewportCoordinateSwizzleNV));
@@ -11348,8 +14839,10 @@
 
 void unmarshal_VkViewportSwizzleNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkViewportSwizzleNV* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkViewportCoordinateSwizzleNV*)&forUnmarshaling->x, sizeof(VkViewportCoordinateSwizzleNV));
     vkStream->read((VkViewportCoordinateSwizzleNV*)&forUnmarshaling->y, sizeof(VkViewportCoordinateSwizzleNV));
     vkStream->read((VkViewportCoordinateSwizzleNV*)&forUnmarshaling->z, sizeof(VkViewportCoordinateSwizzleNV));
@@ -11358,43 +14851,45 @@
 
 void marshal_VkPipelineViewportSwizzleStateCreateInfoNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPipelineViewportSwizzleStateCreateInfoNV* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkPipelineViewportSwizzleStateCreateFlagsNV*)&forMarshaling->flags, sizeof(VkPipelineViewportSwizzleStateCreateFlagsNV));
     vkStream->write((uint32_t*)&forMarshaling->viewportCount, sizeof(uint32_t));
     // WARNING PTR CHECK
-    uint64_t cgen_var_336 = (uint64_t)(uintptr_t)forMarshaling->pViewportSwizzles;
-    vkStream->putBe64(cgen_var_336);
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pViewportSwizzles;
+    vkStream->putBe64(cgen_var_0);
     if (forMarshaling->pViewportSwizzles)
     {
-        for (uint32_t i = 0; i < (uint32_t)forMarshaling->viewportCount; ++i)
+        if (forMarshaling)
         {
-            marshal_VkViewportSwizzleNV(vkStream, (const VkViewportSwizzleNV*)(forMarshaling->pViewportSwizzles + i));
+            for (uint32_t i = 0; i < (uint32_t)forMarshaling->viewportCount; ++i)
+            {
+                marshal_VkViewportSwizzleNV(vkStream, rootType, (const VkViewportSwizzleNV*)(forMarshaling->pViewportSwizzles + i));
+            }
         }
     }
 }
 
 void unmarshal_VkPipelineViewportSwizzleStateCreateInfoNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPipelineViewportSwizzleStateCreateInfoNV* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkPipelineViewportSwizzleStateCreateFlagsNV*)&forUnmarshaling->flags, sizeof(VkPipelineViewportSwizzleStateCreateFlagsNV));
     vkStream->read((uint32_t*)&forUnmarshaling->viewportCount, sizeof(uint32_t));
     // WARNING PTR CHECK
@@ -11406,9 +14901,12 @@
         {
             fprintf(stderr, "fatal: forUnmarshaling->pViewportSwizzles inconsistent between guest and host\n");
         }
-        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->viewportCount; ++i)
+        if (forUnmarshaling)
         {
-            unmarshal_VkViewportSwizzleNV(vkStream, (VkViewportSwizzleNV*)(forUnmarshaling->pViewportSwizzles + i));
+            for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->viewportCount; ++i)
+            {
+                unmarshal_VkViewportSwizzleNV(vkStream, rootType, (VkViewportSwizzleNV*)(forUnmarshaling->pViewportSwizzles + i));
+            }
         }
     }
 }
@@ -11417,75 +14915,76 @@
 #ifdef VK_EXT_discard_rectangles
 void marshal_VkPhysicalDeviceDiscardRectanglePropertiesEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceDiscardRectanglePropertiesEXT* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((uint32_t*)&forMarshaling->maxDiscardRectangles, sizeof(uint32_t));
 }
 
 void unmarshal_VkPhysicalDeviceDiscardRectanglePropertiesEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceDiscardRectanglePropertiesEXT* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((uint32_t*)&forUnmarshaling->maxDiscardRectangles, sizeof(uint32_t));
 }
 
 void marshal_VkPipelineDiscardRectangleStateCreateInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPipelineDiscardRectangleStateCreateInfoEXT* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkPipelineDiscardRectangleStateCreateFlagsEXT*)&forMarshaling->flags, sizeof(VkPipelineDiscardRectangleStateCreateFlagsEXT));
     vkStream->write((VkDiscardRectangleModeEXT*)&forMarshaling->discardRectangleMode, sizeof(VkDiscardRectangleModeEXT));
     vkStream->write((uint32_t*)&forMarshaling->discardRectangleCount, sizeof(uint32_t));
     // WARNING PTR CHECK
-    uint64_t cgen_var_338 = (uint64_t)(uintptr_t)forMarshaling->pDiscardRectangles;
-    vkStream->putBe64(cgen_var_338);
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pDiscardRectangles;
+    vkStream->putBe64(cgen_var_0);
     if (forMarshaling->pDiscardRectangles)
     {
-        for (uint32_t i = 0; i < (uint32_t)forMarshaling->discardRectangleCount; ++i)
+        if (forMarshaling)
         {
-            marshal_VkRect2D(vkStream, (const VkRect2D*)(forMarshaling->pDiscardRectangles + i));
+            for (uint32_t i = 0; i < (uint32_t)forMarshaling->discardRectangleCount; ++i)
+            {
+                marshal_VkRect2D(vkStream, rootType, (const VkRect2D*)(forMarshaling->pDiscardRectangles + i));
+            }
         }
     }
 }
 
 void unmarshal_VkPipelineDiscardRectangleStateCreateInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPipelineDiscardRectangleStateCreateInfoEXT* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkPipelineDiscardRectangleStateCreateFlagsEXT*)&forUnmarshaling->flags, sizeof(VkPipelineDiscardRectangleStateCreateFlagsEXT));
     vkStream->read((VkDiscardRectangleModeEXT*)&forUnmarshaling->discardRectangleMode, sizeof(VkDiscardRectangleModeEXT));
     vkStream->read((uint32_t*)&forUnmarshaling->discardRectangleCount, sizeof(uint32_t));
@@ -11498,9 +14997,12 @@
         {
             fprintf(stderr, "fatal: forUnmarshaling->pDiscardRectangles inconsistent between guest and host\n");
         }
-        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->discardRectangleCount; ++i)
+        if (forUnmarshaling)
         {
-            unmarshal_VkRect2D(vkStream, (VkRect2D*)(forUnmarshaling->pDiscardRectangles + i));
+            for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->discardRectangleCount; ++i)
+            {
+                unmarshal_VkRect2D(vkStream, rootType, (VkRect2D*)(forUnmarshaling->pDiscardRectangles + i));
+            }
         }
     }
 }
@@ -11509,16 +15011,16 @@
 #ifdef VK_EXT_conservative_rasterization
 void marshal_VkPhysicalDeviceConservativeRasterizationPropertiesEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceConservativeRasterizationPropertiesEXT* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((float*)&forMarshaling->primitiveOverestimationSize, sizeof(float));
     vkStream->write((float*)&forMarshaling->maxExtraPrimitiveOverestimationSize, sizeof(float));
     vkStream->write((float*)&forMarshaling->extraPrimitiveOverestimationSizeGranularity, sizeof(float));
@@ -11532,17 +15034,16 @@
 
 void unmarshal_VkPhysicalDeviceConservativeRasterizationPropertiesEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceConservativeRasterizationPropertiesEXT* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((float*)&forUnmarshaling->primitiveOverestimationSize, sizeof(float));
     vkStream->read((float*)&forUnmarshaling->maxExtraPrimitiveOverestimationSize, sizeof(float));
     vkStream->read((float*)&forUnmarshaling->extraPrimitiveOverestimationSizeGranularity, sizeof(float));
@@ -11556,16 +15057,16 @@
 
 void marshal_VkPipelineRasterizationConservativeStateCreateInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPipelineRasterizationConservativeStateCreateInfoEXT* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkPipelineRasterizationConservativeStateCreateFlagsEXT*)&forMarshaling->flags, sizeof(VkPipelineRasterizationConservativeStateCreateFlagsEXT));
     vkStream->write((VkConservativeRasterizationModeEXT*)&forMarshaling->conservativeRasterizationMode, sizeof(VkConservativeRasterizationModeEXT));
     vkStream->write((float*)&forMarshaling->extraPrimitiveOverestimationSize, sizeof(float));
@@ -11573,58 +15074,125 @@
 
 void unmarshal_VkPipelineRasterizationConservativeStateCreateInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPipelineRasterizationConservativeStateCreateInfoEXT* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkPipelineRasterizationConservativeStateCreateFlagsEXT*)&forUnmarshaling->flags, sizeof(VkPipelineRasterizationConservativeStateCreateFlagsEXT));
     vkStream->read((VkConservativeRasterizationModeEXT*)&forUnmarshaling->conservativeRasterizationMode, sizeof(VkConservativeRasterizationModeEXT));
     vkStream->read((float*)&forUnmarshaling->extraPrimitiveOverestimationSize, sizeof(float));
 }
 
 #endif
+#ifdef VK_EXT_depth_clip_enable
+void marshal_VkPhysicalDeviceDepthClipEnableFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDepthClipEnableFeaturesEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->depthClipEnable, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceDepthClipEnableFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceDepthClipEnableFeaturesEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->depthClipEnable, sizeof(VkBool32));
+}
+
+void marshal_VkPipelineRasterizationDepthClipStateCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineRasterizationDepthClipStateCreateInfoEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkPipelineRasterizationDepthClipStateCreateFlagsEXT*)&forMarshaling->flags, sizeof(VkPipelineRasterizationDepthClipStateCreateFlagsEXT));
+    vkStream->write((VkBool32*)&forMarshaling->depthClipEnable, sizeof(VkBool32));
+}
+
+void unmarshal_VkPipelineRasterizationDepthClipStateCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPipelineRasterizationDepthClipStateCreateInfoEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkPipelineRasterizationDepthClipStateCreateFlagsEXT*)&forUnmarshaling->flags, sizeof(VkPipelineRasterizationDepthClipStateCreateFlagsEXT));
+    vkStream->read((VkBool32*)&forUnmarshaling->depthClipEnable, sizeof(VkBool32));
+}
+
+#endif
 #ifdef VK_EXT_swapchain_colorspace
 #endif
 #ifdef VK_EXT_hdr_metadata
 void marshal_VkXYColorEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkXYColorEXT* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((float*)&forMarshaling->x, sizeof(float));
     vkStream->write((float*)&forMarshaling->y, sizeof(float));
 }
 
 void unmarshal_VkXYColorEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkXYColorEXT* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((float*)&forUnmarshaling->x, sizeof(float));
     vkStream->read((float*)&forUnmarshaling->y, sizeof(float));
 }
 
 void marshal_VkHdrMetadataEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkHdrMetadataEXT* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    marshal_VkXYColorEXT(vkStream, (VkXYColorEXT*)(&forMarshaling->displayPrimaryRed));
-    marshal_VkXYColorEXT(vkStream, (VkXYColorEXT*)(&forMarshaling->displayPrimaryGreen));
-    marshal_VkXYColorEXT(vkStream, (VkXYColorEXT*)(&forMarshaling->displayPrimaryBlue));
-    marshal_VkXYColorEXT(vkStream, (VkXYColorEXT*)(&forMarshaling->whitePoint));
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    marshal_VkXYColorEXT(vkStream, rootType, (VkXYColorEXT*)(&forMarshaling->displayPrimaryRed));
+    marshal_VkXYColorEXT(vkStream, rootType, (VkXYColorEXT*)(&forMarshaling->displayPrimaryGreen));
+    marshal_VkXYColorEXT(vkStream, rootType, (VkXYColorEXT*)(&forMarshaling->displayPrimaryBlue));
+    marshal_VkXYColorEXT(vkStream, rootType, (VkXYColorEXT*)(&forMarshaling->whitePoint));
     vkStream->write((float*)&forMarshaling->maxLuminance, sizeof(float));
     vkStream->write((float*)&forMarshaling->minLuminance, sizeof(float));
     vkStream->write((float*)&forMarshaling->maxContentLightLevel, sizeof(float));
@@ -11633,21 +15201,20 @@
 
 void unmarshal_VkHdrMetadataEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkHdrMetadataEXT* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    unmarshal_VkXYColorEXT(vkStream, (VkXYColorEXT*)(&forUnmarshaling->displayPrimaryRed));
-    unmarshal_VkXYColorEXT(vkStream, (VkXYColorEXT*)(&forUnmarshaling->displayPrimaryGreen));
-    unmarshal_VkXYColorEXT(vkStream, (VkXYColorEXT*)(&forUnmarshaling->displayPrimaryBlue));
-    unmarshal_VkXYColorEXT(vkStream, (VkXYColorEXT*)(&forUnmarshaling->whitePoint));
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    unmarshal_VkXYColorEXT(vkStream, rootType, (VkXYColorEXT*)(&forUnmarshaling->displayPrimaryRed));
+    unmarshal_VkXYColorEXT(vkStream, rootType, (VkXYColorEXT*)(&forUnmarshaling->displayPrimaryGreen));
+    unmarshal_VkXYColorEXT(vkStream, rootType, (VkXYColorEXT*)(&forUnmarshaling->displayPrimaryBlue));
+    unmarshal_VkXYColorEXT(vkStream, rootType, (VkXYColorEXT*)(&forUnmarshaling->whitePoint));
     vkStream->read((float*)&forUnmarshaling->maxLuminance, sizeof(float));
     vkStream->read((float*)&forUnmarshaling->minLuminance, sizeof(float));
     vkStream->read((float*)&forUnmarshaling->maxContentLightLevel, sizeof(float));
@@ -11658,20 +15225,20 @@
 #ifdef VK_MVK_ios_surface
 void marshal_VkIOSSurfaceCreateInfoMVK(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkIOSSurfaceCreateInfoMVK* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkIOSSurfaceCreateFlagsMVK*)&forMarshaling->flags, sizeof(VkIOSSurfaceCreateFlagsMVK));
     // WARNING PTR CHECK
-    uint64_t cgen_var_340 = (uint64_t)(uintptr_t)forMarshaling->pView;
-    vkStream->putBe64(cgen_var_340);
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pView;
+    vkStream->putBe64(cgen_var_0);
     if (forMarshaling->pView)
     {
         vkStream->write((const void*)forMarshaling->pView, sizeof(const uint8_t));
@@ -11680,17 +15247,16 @@
 
 void unmarshal_VkIOSSurfaceCreateInfoMVK(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkIOSSurfaceCreateInfoMVK* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkIOSSurfaceCreateFlagsMVK*)&forUnmarshaling->flags, sizeof(VkIOSSurfaceCreateFlagsMVK));
     // WARNING PTR CHECK
     const void* check_pView;
@@ -11709,20 +15275,20 @@
 #ifdef VK_MVK_macos_surface
 void marshal_VkMacOSSurfaceCreateInfoMVK(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkMacOSSurfaceCreateInfoMVK* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkMacOSSurfaceCreateFlagsMVK*)&forMarshaling->flags, sizeof(VkMacOSSurfaceCreateFlagsMVK));
     // WARNING PTR CHECK
-    uint64_t cgen_var_342 = (uint64_t)(uintptr_t)forMarshaling->pView;
-    vkStream->putBe64(cgen_var_342);
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pView;
+    vkStream->putBe64(cgen_var_0);
     if (forMarshaling->pView)
     {
         vkStream->write((const void*)forMarshaling->pView, sizeof(const uint8_t));
@@ -11731,17 +15297,16 @@
 
 void unmarshal_VkMacOSSurfaceCreateInfoMVK(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkMacOSSurfaceCreateInfoMVK* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkMacOSSurfaceCreateFlagsMVK*)&forUnmarshaling->flags, sizeof(VkMacOSSurfaceCreateFlagsMVK));
     // WARNING PTR CHECK
     const void* check_pView;
@@ -11757,30 +15322,64 @@
 }
 
 #endif
+#ifdef VK_MVK_moltenvk
+#endif
 #ifdef VK_EXT_external_memory_dma_buf
 #endif
 #ifdef VK_EXT_queue_family_foreign
 #endif
 #ifdef VK_EXT_debug_utils
+void marshal_VkDebugUtilsLabelEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDebugUtilsLabelEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->putString(forMarshaling->pLabelName);
+    vkStream->write((float*)forMarshaling->color, 4 * sizeof(float));
+}
+
+void unmarshal_VkDebugUtilsLabelEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDebugUtilsLabelEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->loadStringInPlace((char**)&forUnmarshaling->pLabelName);
+    vkStream->read((float*)forUnmarshaling->color, 4 * sizeof(float));
+}
+
 void marshal_VkDebugUtilsObjectNameInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDebugUtilsObjectNameInfoEXT* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkObjectType*)&forMarshaling->objectType, sizeof(VkObjectType));
     vkStream->write((uint64_t*)&forMarshaling->objectHandle, sizeof(uint64_t));
     if (vkStream->getFeatureBits() & VULKAN_STREAM_FEATURE_NULL_OPTIONAL_STRINGS_BIT)
     {
         // WARNING PTR CHECK
-        uint64_t cgen_var_344 = (uint64_t)(uintptr_t)forMarshaling->pObjectName;
-        vkStream->putBe64(cgen_var_344);
+        uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pObjectName;
+        vkStream->putBe64(cgen_var_0);
         if (forMarshaling->pObjectName)
         {
             vkStream->putString(forMarshaling->pObjectName);
@@ -11794,17 +15393,16 @@
 
 void unmarshal_VkDebugUtilsObjectNameInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDebugUtilsObjectNameInfoEXT* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkObjectType*)&forUnmarshaling->objectType, sizeof(VkObjectType));
     vkStream->read((uint64_t*)&forUnmarshaling->objectHandle, sizeof(uint64_t));
     if (vkStream->getFeatureBits() & VULKAN_STREAM_FEATURE_NULL_OPTIONAL_STRINGS_BIT)
@@ -11827,97 +15425,24 @@
     }
 }
 
-void marshal_VkDebugUtilsObjectTagInfoEXT(
-    VulkanStreamGuest* vkStream,
-    const VkDebugUtilsObjectTagInfoEXT* forMarshaling)
-{
-    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
-    {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
-    }
-    vkStream->write((VkObjectType*)&forMarshaling->objectType, sizeof(VkObjectType));
-    vkStream->write((uint64_t*)&forMarshaling->objectHandle, sizeof(uint64_t));
-    vkStream->write((uint64_t*)&forMarshaling->tagName, sizeof(uint64_t));
-    uint64_t cgen_var_346 = (uint64_t)forMarshaling->tagSize;
-    vkStream->putBe64(cgen_var_346);
-    vkStream->write((const void*)forMarshaling->pTag, forMarshaling->tagSize * sizeof(const uint8_t));
-}
-
-void unmarshal_VkDebugUtilsObjectTagInfoEXT(
-    VulkanStreamGuest* vkStream,
-    VkDebugUtilsObjectTagInfoEXT* forUnmarshaling)
-{
-    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
-    {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
-    }
-    vkStream->read((VkObjectType*)&forUnmarshaling->objectType, sizeof(VkObjectType));
-    vkStream->read((uint64_t*)&forUnmarshaling->objectHandle, sizeof(uint64_t));
-    vkStream->read((uint64_t*)&forUnmarshaling->tagName, sizeof(uint64_t));
-    forUnmarshaling->tagSize = (size_t)vkStream->getBe64();
-    vkStream->read((void*)forUnmarshaling->pTag, forUnmarshaling->tagSize * sizeof(const uint8_t));
-}
-
-void marshal_VkDebugUtilsLabelEXT(
-    VulkanStreamGuest* vkStream,
-    const VkDebugUtilsLabelEXT* forMarshaling)
-{
-    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
-    {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
-    }
-    vkStream->putString(forMarshaling->pLabelName);
-    vkStream->write((float*)forMarshaling->color, 4 * sizeof(float));
-}
-
-void unmarshal_VkDebugUtilsLabelEXT(
-    VulkanStreamGuest* vkStream,
-    VkDebugUtilsLabelEXT* forUnmarshaling)
-{
-    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
-    {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
-    }
-    vkStream->loadStringInPlace((char**)&forUnmarshaling->pLabelName);
-    vkStream->read((float*)forUnmarshaling->color, 4 * sizeof(float));
-}
-
 void marshal_VkDebugUtilsMessengerCallbackDataEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDebugUtilsMessengerCallbackDataEXT* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkDebugUtilsMessengerCallbackDataFlagsEXT*)&forMarshaling->flags, sizeof(VkDebugUtilsMessengerCallbackDataFlagsEXT));
     if (vkStream->getFeatureBits() & VULKAN_STREAM_FEATURE_NULL_OPTIONAL_STRINGS_BIT)
     {
         // WARNING PTR CHECK
-        uint64_t cgen_var_348 = (uint64_t)(uintptr_t)forMarshaling->pMessageIdName;
-        vkStream->putBe64(cgen_var_348);
+        uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pMessageIdName;
+        vkStream->putBe64(cgen_var_0);
         if (forMarshaling->pMessageIdName)
         {
             vkStream->putString(forMarshaling->pMessageIdName);
@@ -11931,52 +15456,60 @@
     vkStream->putString(forMarshaling->pMessage);
     vkStream->write((uint32_t*)&forMarshaling->queueLabelCount, sizeof(uint32_t));
     // WARNING PTR CHECK
-    uint64_t cgen_var_349 = (uint64_t)(uintptr_t)forMarshaling->pQueueLabels;
-    vkStream->putBe64(cgen_var_349);
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pQueueLabels;
+    vkStream->putBe64(cgen_var_0);
     if (forMarshaling->pQueueLabels)
     {
-        for (uint32_t i = 0; i < (uint32_t)forMarshaling->queueLabelCount; ++i)
+        if (forMarshaling)
         {
-            marshal_VkDebugUtilsLabelEXT(vkStream, (VkDebugUtilsLabelEXT*)(forMarshaling->pQueueLabels + i));
+            for (uint32_t i = 0; i < (uint32_t)forMarshaling->queueLabelCount; ++i)
+            {
+                marshal_VkDebugUtilsLabelEXT(vkStream, rootType, (VkDebugUtilsLabelEXT*)(forMarshaling->pQueueLabels + i));
+            }
         }
     }
     vkStream->write((uint32_t*)&forMarshaling->cmdBufLabelCount, sizeof(uint32_t));
     // WARNING PTR CHECK
-    uint64_t cgen_var_350 = (uint64_t)(uintptr_t)forMarshaling->pCmdBufLabels;
-    vkStream->putBe64(cgen_var_350);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)forMarshaling->pCmdBufLabels;
+    vkStream->putBe64(cgen_var_1);
     if (forMarshaling->pCmdBufLabels)
     {
-        for (uint32_t i = 0; i < (uint32_t)forMarshaling->cmdBufLabelCount; ++i)
+        if (forMarshaling)
         {
-            marshal_VkDebugUtilsLabelEXT(vkStream, (VkDebugUtilsLabelEXT*)(forMarshaling->pCmdBufLabels + i));
+            for (uint32_t i = 0; i < (uint32_t)forMarshaling->cmdBufLabelCount; ++i)
+            {
+                marshal_VkDebugUtilsLabelEXT(vkStream, rootType, (VkDebugUtilsLabelEXT*)(forMarshaling->pCmdBufLabels + i));
+            }
         }
     }
     vkStream->write((uint32_t*)&forMarshaling->objectCount, sizeof(uint32_t));
     // WARNING PTR CHECK
-    uint64_t cgen_var_351 = (uint64_t)(uintptr_t)forMarshaling->pObjects;
-    vkStream->putBe64(cgen_var_351);
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)forMarshaling->pObjects;
+    vkStream->putBe64(cgen_var_2);
     if (forMarshaling->pObjects)
     {
-        for (uint32_t i = 0; i < (uint32_t)forMarshaling->objectCount; ++i)
+        if (forMarshaling)
         {
-            marshal_VkDebugUtilsObjectNameInfoEXT(vkStream, (VkDebugUtilsObjectNameInfoEXT*)(forMarshaling->pObjects + i));
+            for (uint32_t i = 0; i < (uint32_t)forMarshaling->objectCount; ++i)
+            {
+                marshal_VkDebugUtilsObjectNameInfoEXT(vkStream, rootType, (VkDebugUtilsObjectNameInfoEXT*)(forMarshaling->pObjects + i));
+            }
         }
     }
 }
 
 void unmarshal_VkDebugUtilsMessengerCallbackDataEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDebugUtilsMessengerCallbackDataEXT* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkDebugUtilsMessengerCallbackDataFlagsEXT*)&forUnmarshaling->flags, sizeof(VkDebugUtilsMessengerCallbackDataFlagsEXT));
     if (vkStream->getFeatureBits() & VULKAN_STREAM_FEATURE_NULL_OPTIONAL_STRINGS_BIT)
     {
@@ -12008,9 +15541,12 @@
         {
             fprintf(stderr, "fatal: forUnmarshaling->pQueueLabels inconsistent between guest and host\n");
         }
-        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->queueLabelCount; ++i)
+        if (forUnmarshaling)
         {
-            unmarshal_VkDebugUtilsLabelEXT(vkStream, (VkDebugUtilsLabelEXT*)(forUnmarshaling->pQueueLabels + i));
+            for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->queueLabelCount; ++i)
+            {
+                unmarshal_VkDebugUtilsLabelEXT(vkStream, rootType, (VkDebugUtilsLabelEXT*)(forUnmarshaling->pQueueLabels + i));
+            }
         }
     }
     vkStream->read((uint32_t*)&forUnmarshaling->cmdBufLabelCount, sizeof(uint32_t));
@@ -12023,9 +15559,12 @@
         {
             fprintf(stderr, "fatal: forUnmarshaling->pCmdBufLabels inconsistent between guest and host\n");
         }
-        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->cmdBufLabelCount; ++i)
+        if (forUnmarshaling)
         {
-            unmarshal_VkDebugUtilsLabelEXT(vkStream, (VkDebugUtilsLabelEXT*)(forUnmarshaling->pCmdBufLabels + i));
+            for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->cmdBufLabelCount; ++i)
+            {
+                unmarshal_VkDebugUtilsLabelEXT(vkStream, rootType, (VkDebugUtilsLabelEXT*)(forUnmarshaling->pCmdBufLabels + i));
+            }
         }
     }
     vkStream->read((uint32_t*)&forUnmarshaling->objectCount, sizeof(uint32_t));
@@ -12038,33 +15577,36 @@
         {
             fprintf(stderr, "fatal: forUnmarshaling->pObjects inconsistent between guest and host\n");
         }
-        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->objectCount; ++i)
+        if (forUnmarshaling)
         {
-            unmarshal_VkDebugUtilsObjectNameInfoEXT(vkStream, (VkDebugUtilsObjectNameInfoEXT*)(forUnmarshaling->pObjects + i));
+            for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->objectCount; ++i)
+            {
+                unmarshal_VkDebugUtilsObjectNameInfoEXT(vkStream, rootType, (VkDebugUtilsObjectNameInfoEXT*)(forUnmarshaling->pObjects + i));
+            }
         }
     }
 }
 
 void marshal_VkDebugUtilsMessengerCreateInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDebugUtilsMessengerCreateInfoEXT* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkDebugUtilsMessengerCreateFlagsEXT*)&forMarshaling->flags, sizeof(VkDebugUtilsMessengerCreateFlagsEXT));
     vkStream->write((VkDebugUtilsMessageSeverityFlagsEXT*)&forMarshaling->messageSeverity, sizeof(VkDebugUtilsMessageSeverityFlagsEXT));
     vkStream->write((VkDebugUtilsMessageTypeFlagsEXT*)&forMarshaling->messageType, sizeof(VkDebugUtilsMessageTypeFlagsEXT));
-    uint64_t cgen_var_356 = (uint64_t)forMarshaling->pfnUserCallback;
-    vkStream->putBe64(cgen_var_356);
+    uint64_t cgen_var_0 = (uint64_t)forMarshaling->pfnUserCallback;
+    vkStream->putBe64(cgen_var_0);
     // WARNING PTR CHECK
-    uint64_t cgen_var_357 = (uint64_t)(uintptr_t)forMarshaling->pUserData;
-    vkStream->putBe64(cgen_var_357);
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)forMarshaling->pUserData;
+    vkStream->putBe64(cgen_var_1);
     if (forMarshaling->pUserData)
     {
         vkStream->write((void*)forMarshaling->pUserData, sizeof(uint8_t));
@@ -12073,17 +15615,16 @@
 
 void unmarshal_VkDebugUtilsMessengerCreateInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDebugUtilsMessengerCreateInfoEXT* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkDebugUtilsMessengerCreateFlagsEXT*)&forUnmarshaling->flags, sizeof(VkDebugUtilsMessengerCreateFlagsEXT));
     vkStream->read((VkDebugUtilsMessageSeverityFlagsEXT*)&forUnmarshaling->messageSeverity, sizeof(VkDebugUtilsMessageSeverityFlagsEXT));
     vkStream->read((VkDebugUtilsMessageTypeFlagsEXT*)&forUnmarshaling->messageType, sizeof(VkDebugUtilsMessageTypeFlagsEXT));
@@ -12101,88 +15642,125 @@
     }
 }
 
+void marshal_VkDebugUtilsObjectTagInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDebugUtilsObjectTagInfoEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkObjectType*)&forMarshaling->objectType, sizeof(VkObjectType));
+    vkStream->write((uint64_t*)&forMarshaling->objectHandle, sizeof(uint64_t));
+    vkStream->write((uint64_t*)&forMarshaling->tagName, sizeof(uint64_t));
+    uint64_t cgen_var_0 = (uint64_t)forMarshaling->tagSize;
+    vkStream->putBe64(cgen_var_0);
+    vkStream->write((const void*)forMarshaling->pTag, forMarshaling->tagSize * sizeof(const uint8_t));
+}
+
+void unmarshal_VkDebugUtilsObjectTagInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDebugUtilsObjectTagInfoEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkObjectType*)&forUnmarshaling->objectType, sizeof(VkObjectType));
+    vkStream->read((uint64_t*)&forUnmarshaling->objectHandle, sizeof(uint64_t));
+    vkStream->read((uint64_t*)&forUnmarshaling->tagName, sizeof(uint64_t));
+    forUnmarshaling->tagSize = (size_t)vkStream->getBe64();
+    vkStream->read((void*)forUnmarshaling->pTag, forUnmarshaling->tagSize * sizeof(const uint8_t));
+}
+
 #endif
 #ifdef VK_ANDROID_external_memory_android_hardware_buffer
 void marshal_VkAndroidHardwareBufferUsageANDROID(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkAndroidHardwareBufferUsageANDROID* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((uint64_t*)&forMarshaling->androidHardwareBufferUsage, sizeof(uint64_t));
 }
 
 void unmarshal_VkAndroidHardwareBufferUsageANDROID(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkAndroidHardwareBufferUsageANDROID* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((uint64_t*)&forUnmarshaling->androidHardwareBufferUsage, sizeof(uint64_t));
 }
 
 void marshal_VkAndroidHardwareBufferPropertiesANDROID(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkAndroidHardwareBufferPropertiesANDROID* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkDeviceSize*)&forMarshaling->allocationSize, sizeof(VkDeviceSize));
     vkStream->write((uint32_t*)&forMarshaling->memoryTypeBits, sizeof(uint32_t));
 }
 
 void unmarshal_VkAndroidHardwareBufferPropertiesANDROID(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkAndroidHardwareBufferPropertiesANDROID* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkDeviceSize*)&forUnmarshaling->allocationSize, sizeof(VkDeviceSize));
     vkStream->read((uint32_t*)&forUnmarshaling->memoryTypeBits, sizeof(uint32_t));
 }
 
 void marshal_VkAndroidHardwareBufferFormatPropertiesANDROID(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkAndroidHardwareBufferFormatPropertiesANDROID* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkFormat*)&forMarshaling->format, sizeof(VkFormat));
     vkStream->write((uint64_t*)&forMarshaling->externalFormat, sizeof(uint64_t));
     vkStream->write((VkFormatFeatureFlags*)&forMarshaling->formatFeatures, sizeof(VkFormatFeatureFlags));
-    marshal_VkComponentMapping(vkStream, (VkComponentMapping*)(&forMarshaling->samplerYcbcrConversionComponents));
+    marshal_VkComponentMapping(vkStream, rootType, (VkComponentMapping*)(&forMarshaling->samplerYcbcrConversionComponents));
     vkStream->write((VkSamplerYcbcrModelConversion*)&forMarshaling->suggestedYcbcrModel, sizeof(VkSamplerYcbcrModelConversion));
     vkStream->write((VkSamplerYcbcrRange*)&forMarshaling->suggestedYcbcrRange, sizeof(VkSamplerYcbcrRange));
     vkStream->write((VkChromaLocation*)&forMarshaling->suggestedXChromaOffset, sizeof(VkChromaLocation));
@@ -12191,21 +15769,20 @@
 
 void unmarshal_VkAndroidHardwareBufferFormatPropertiesANDROID(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkAndroidHardwareBufferFormatPropertiesANDROID* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkFormat*)&forUnmarshaling->format, sizeof(VkFormat));
     vkStream->read((uint64_t*)&forUnmarshaling->externalFormat, sizeof(uint64_t));
     vkStream->read((VkFormatFeatureFlags*)&forUnmarshaling->formatFeatures, sizeof(VkFormatFeatureFlags));
-    unmarshal_VkComponentMapping(vkStream, (VkComponentMapping*)(&forUnmarshaling->samplerYcbcrConversionComponents));
+    unmarshal_VkComponentMapping(vkStream, rootType, (VkComponentMapping*)(&forUnmarshaling->samplerYcbcrConversionComponents));
     vkStream->read((VkSamplerYcbcrModelConversion*)&forUnmarshaling->suggestedYcbcrModel, sizeof(VkSamplerYcbcrModelConversion));
     vkStream->read((VkSamplerYcbcrRange*)&forUnmarshaling->suggestedYcbcrRange, sizeof(VkSamplerYcbcrRange));
     vkStream->read((VkChromaLocation*)&forUnmarshaling->suggestedXChromaOffset, sizeof(VkChromaLocation));
@@ -12214,167 +15791,100 @@
 
 void marshal_VkImportAndroidHardwareBufferInfoANDROID(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkImportAndroidHardwareBufferInfoANDROID* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((AHardwareBuffer*)forMarshaling->buffer, sizeof(AHardwareBuffer));
 }
 
 void unmarshal_VkImportAndroidHardwareBufferInfoANDROID(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkImportAndroidHardwareBufferInfoANDROID* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((AHardwareBuffer*)forUnmarshaling->buffer, sizeof(AHardwareBuffer));
 }
 
 void marshal_VkMemoryGetAndroidHardwareBufferInfoANDROID(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkMemoryGetAndroidHardwareBufferInfoANDROID* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    uint64_t cgen_var_360;
-    vkStream->handleMapping()->mapHandles_VkDeviceMemory_u64(&forMarshaling->memory, &cgen_var_360, 1);
-    vkStream->write((uint64_t*)&cgen_var_360, 1 * 8);
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkDeviceMemory_u64(&forMarshaling->memory, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
 }
 
 void unmarshal_VkMemoryGetAndroidHardwareBufferInfoANDROID(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkMemoryGetAndroidHardwareBufferInfoANDROID* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    uint64_t cgen_var_361;
-    vkStream->read((uint64_t*)&cgen_var_361, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkDeviceMemory(&cgen_var_361, (VkDeviceMemory*)&forUnmarshaling->memory, 1);
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkDeviceMemory(&cgen_var_0, (VkDeviceMemory*)&forUnmarshaling->memory, 1);
 }
 
 void marshal_VkExternalFormatANDROID(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkExternalFormatANDROID* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((uint64_t*)&forMarshaling->externalFormat, sizeof(uint64_t));
 }
 
 void unmarshal_VkExternalFormatANDROID(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkExternalFormatANDROID* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((uint64_t*)&forUnmarshaling->externalFormat, sizeof(uint64_t));
 }
 
 #endif
 #ifdef VK_EXT_sampler_filter_minmax
-void marshal_VkSamplerReductionModeCreateInfoEXT(
-    VulkanStreamGuest* vkStream,
-    const VkSamplerReductionModeCreateInfoEXT* forMarshaling)
-{
-    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
-    {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
-    }
-    vkStream->write((VkSamplerReductionModeEXT*)&forMarshaling->reductionMode, sizeof(VkSamplerReductionModeEXT));
-}
-
-void unmarshal_VkSamplerReductionModeCreateInfoEXT(
-    VulkanStreamGuest* vkStream,
-    VkSamplerReductionModeCreateInfoEXT* forUnmarshaling)
-{
-    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
-    {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
-    }
-    vkStream->read((VkSamplerReductionModeEXT*)&forUnmarshaling->reductionMode, sizeof(VkSamplerReductionModeEXT));
-}
-
-void marshal_VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT(
-    VulkanStreamGuest* vkStream,
-    const VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT* forMarshaling)
-{
-    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
-    {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
-    }
-    vkStream->write((VkBool32*)&forMarshaling->filterMinmaxSingleComponentFormats, sizeof(VkBool32));
-    vkStream->write((VkBool32*)&forMarshaling->filterMinmaxImageComponentMapping, sizeof(VkBool32));
-}
-
-void unmarshal_VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT(
-    VulkanStreamGuest* vkStream,
-    VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT* forUnmarshaling)
-{
-    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
-    {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
-    }
-    vkStream->read((VkBool32*)&forUnmarshaling->filterMinmaxSingleComponentFormats, sizeof(VkBool32));
-    vkStream->read((VkBool32*)&forUnmarshaling->filterMinmaxImageComponentMapping, sizeof(VkBool32));
-}
-
 #endif
 #ifdef VK_AMD_gpu_shader_int16
 #endif
@@ -12382,196 +15892,357 @@
 #endif
 #ifdef VK_AMD_shader_fragment_mask
 #endif
+#ifdef VK_EXT_inline_uniform_block
+void marshal_VkPhysicalDeviceInlineUniformBlockFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceInlineUniformBlockFeaturesEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->inlineUniformBlock, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->descriptorBindingInlineUniformBlockUpdateAfterBind, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceInlineUniformBlockFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceInlineUniformBlockFeaturesEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->inlineUniformBlock, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->descriptorBindingInlineUniformBlockUpdateAfterBind, sizeof(VkBool32));
+}
+
+void marshal_VkPhysicalDeviceInlineUniformBlockPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceInlineUniformBlockPropertiesEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((uint32_t*)&forMarshaling->maxInlineUniformBlockSize, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxPerStageDescriptorInlineUniformBlocks, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxDescriptorSetInlineUniformBlocks, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxDescriptorSetUpdateAfterBindInlineUniformBlocks, sizeof(uint32_t));
+}
+
+void unmarshal_VkPhysicalDeviceInlineUniformBlockPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceInlineUniformBlockPropertiesEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxInlineUniformBlockSize, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxPerStageDescriptorInlineUniformBlocks, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxDescriptorSetInlineUniformBlocks, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxDescriptorSetUpdateAfterBindInlineUniformBlocks, sizeof(uint32_t));
+}
+
+void marshal_VkWriteDescriptorSetInlineUniformBlockEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkWriteDescriptorSetInlineUniformBlockEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((uint32_t*)&forMarshaling->dataSize, sizeof(uint32_t));
+    vkStream->write((const void*)forMarshaling->pData, forMarshaling->dataSize * sizeof(const uint8_t));
+}
+
+void unmarshal_VkWriteDescriptorSetInlineUniformBlockEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkWriteDescriptorSetInlineUniformBlockEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((uint32_t*)&forUnmarshaling->dataSize, sizeof(uint32_t));
+    vkStream->read((void*)forUnmarshaling->pData, forUnmarshaling->dataSize * sizeof(const uint8_t));
+}
+
+void marshal_VkDescriptorPoolInlineUniformBlockCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDescriptorPoolInlineUniformBlockCreateInfoEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((uint32_t*)&forMarshaling->maxInlineUniformBlockBindings, sizeof(uint32_t));
+}
+
+void unmarshal_VkDescriptorPoolInlineUniformBlockCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDescriptorPoolInlineUniformBlockCreateInfoEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxInlineUniformBlockBindings, sizeof(uint32_t));
+}
+
+#endif
 #ifdef VK_EXT_shader_stencil_export
 #endif
 #ifdef VK_EXT_sample_locations
 void marshal_VkSampleLocationEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSampleLocationEXT* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((float*)&forMarshaling->x, sizeof(float));
     vkStream->write((float*)&forMarshaling->y, sizeof(float));
 }
 
 void unmarshal_VkSampleLocationEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSampleLocationEXT* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((float*)&forUnmarshaling->x, sizeof(float));
     vkStream->read((float*)&forUnmarshaling->y, sizeof(float));
 }
 
 void marshal_VkSampleLocationsInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSampleLocationsInfoEXT* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkSampleCountFlagBits*)&forMarshaling->sampleLocationsPerPixel, sizeof(VkSampleCountFlagBits));
-    marshal_VkExtent2D(vkStream, (VkExtent2D*)(&forMarshaling->sampleLocationGridSize));
+    marshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->sampleLocationGridSize));
     vkStream->write((uint32_t*)&forMarshaling->sampleLocationsCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forMarshaling->sampleLocationsCount; ++i)
+    if (forMarshaling)
     {
-        marshal_VkSampleLocationEXT(vkStream, (const VkSampleLocationEXT*)(forMarshaling->pSampleLocations + i));
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->sampleLocationsCount; ++i)
+        {
+            marshal_VkSampleLocationEXT(vkStream, rootType, (const VkSampleLocationEXT*)(forMarshaling->pSampleLocations + i));
+        }
     }
 }
 
 void unmarshal_VkSampleLocationsInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSampleLocationsInfoEXT* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkSampleCountFlagBits*)&forUnmarshaling->sampleLocationsPerPixel, sizeof(VkSampleCountFlagBits));
-    unmarshal_VkExtent2D(vkStream, (VkExtent2D*)(&forUnmarshaling->sampleLocationGridSize));
+    unmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forUnmarshaling->sampleLocationGridSize));
     vkStream->read((uint32_t*)&forUnmarshaling->sampleLocationsCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->sampleLocationsCount; ++i)
+    if (forUnmarshaling)
     {
-        unmarshal_VkSampleLocationEXT(vkStream, (VkSampleLocationEXT*)(forUnmarshaling->pSampleLocations + i));
+        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->sampleLocationsCount; ++i)
+        {
+            unmarshal_VkSampleLocationEXT(vkStream, rootType, (VkSampleLocationEXT*)(forUnmarshaling->pSampleLocations + i));
+        }
     }
 }
 
 void marshal_VkAttachmentSampleLocationsEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkAttachmentSampleLocationsEXT* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((uint32_t*)&forMarshaling->attachmentIndex, sizeof(uint32_t));
-    marshal_VkSampleLocationsInfoEXT(vkStream, (VkSampleLocationsInfoEXT*)(&forMarshaling->sampleLocationsInfo));
+    marshal_VkSampleLocationsInfoEXT(vkStream, rootType, (VkSampleLocationsInfoEXT*)(&forMarshaling->sampleLocationsInfo));
 }
 
 void unmarshal_VkAttachmentSampleLocationsEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkAttachmentSampleLocationsEXT* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((uint32_t*)&forUnmarshaling->attachmentIndex, sizeof(uint32_t));
-    unmarshal_VkSampleLocationsInfoEXT(vkStream, (VkSampleLocationsInfoEXT*)(&forUnmarshaling->sampleLocationsInfo));
+    unmarshal_VkSampleLocationsInfoEXT(vkStream, rootType, (VkSampleLocationsInfoEXT*)(&forUnmarshaling->sampleLocationsInfo));
 }
 
 void marshal_VkSubpassSampleLocationsEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSubpassSampleLocationsEXT* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((uint32_t*)&forMarshaling->subpassIndex, sizeof(uint32_t));
-    marshal_VkSampleLocationsInfoEXT(vkStream, (VkSampleLocationsInfoEXT*)(&forMarshaling->sampleLocationsInfo));
+    marshal_VkSampleLocationsInfoEXT(vkStream, rootType, (VkSampleLocationsInfoEXT*)(&forMarshaling->sampleLocationsInfo));
 }
 
 void unmarshal_VkSubpassSampleLocationsEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSubpassSampleLocationsEXT* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((uint32_t*)&forUnmarshaling->subpassIndex, sizeof(uint32_t));
-    unmarshal_VkSampleLocationsInfoEXT(vkStream, (VkSampleLocationsInfoEXT*)(&forUnmarshaling->sampleLocationsInfo));
+    unmarshal_VkSampleLocationsInfoEXT(vkStream, rootType, (VkSampleLocationsInfoEXT*)(&forUnmarshaling->sampleLocationsInfo));
 }
 
 void marshal_VkRenderPassSampleLocationsBeginInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkRenderPassSampleLocationsBeginInfoEXT* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((uint32_t*)&forMarshaling->attachmentInitialSampleLocationsCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forMarshaling->attachmentInitialSampleLocationsCount; ++i)
+    if (forMarshaling)
     {
-        marshal_VkAttachmentSampleLocationsEXT(vkStream, (const VkAttachmentSampleLocationsEXT*)(forMarshaling->pAttachmentInitialSampleLocations + i));
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->attachmentInitialSampleLocationsCount; ++i)
+        {
+            marshal_VkAttachmentSampleLocationsEXT(vkStream, rootType, (const VkAttachmentSampleLocationsEXT*)(forMarshaling->pAttachmentInitialSampleLocations + i));
+        }
     }
     vkStream->write((uint32_t*)&forMarshaling->postSubpassSampleLocationsCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forMarshaling->postSubpassSampleLocationsCount; ++i)
+    if (forMarshaling)
     {
-        marshal_VkSubpassSampleLocationsEXT(vkStream, (const VkSubpassSampleLocationsEXT*)(forMarshaling->pPostSubpassSampleLocations + i));
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->postSubpassSampleLocationsCount; ++i)
+        {
+            marshal_VkSubpassSampleLocationsEXT(vkStream, rootType, (const VkSubpassSampleLocationsEXT*)(forMarshaling->pPostSubpassSampleLocations + i));
+        }
     }
 }
 
 void unmarshal_VkRenderPassSampleLocationsBeginInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkRenderPassSampleLocationsBeginInfoEXT* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((uint32_t*)&forUnmarshaling->attachmentInitialSampleLocationsCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->attachmentInitialSampleLocationsCount; ++i)
+    if (forUnmarshaling)
     {
-        unmarshal_VkAttachmentSampleLocationsEXT(vkStream, (VkAttachmentSampleLocationsEXT*)(forUnmarshaling->pAttachmentInitialSampleLocations + i));
+        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->attachmentInitialSampleLocationsCount; ++i)
+        {
+            unmarshal_VkAttachmentSampleLocationsEXT(vkStream, rootType, (VkAttachmentSampleLocationsEXT*)(forUnmarshaling->pAttachmentInitialSampleLocations + i));
+        }
     }
     vkStream->read((uint32_t*)&forUnmarshaling->postSubpassSampleLocationsCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->postSubpassSampleLocationsCount; ++i)
+    if (forUnmarshaling)
     {
-        unmarshal_VkSubpassSampleLocationsEXT(vkStream, (VkSubpassSampleLocationsEXT*)(forUnmarshaling->pPostSubpassSampleLocations + i));
+        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->postSubpassSampleLocationsCount; ++i)
+        {
+            unmarshal_VkSubpassSampleLocationsEXT(vkStream, rootType, (VkSubpassSampleLocationsEXT*)(forUnmarshaling->pPostSubpassSampleLocations + i));
+        }
     }
 }
 
 void marshal_VkPipelineSampleLocationsStateCreateInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPipelineSampleLocationsStateCreateInfoEXT* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkBool32*)&forMarshaling->sampleLocationsEnable, sizeof(VkBool32));
-    marshal_VkSampleLocationsInfoEXT(vkStream, (VkSampleLocationsInfoEXT*)(&forMarshaling->sampleLocationsInfo));
+    marshal_VkSampleLocationsInfoEXT(vkStream, rootType, (VkSampleLocationsInfoEXT*)(&forMarshaling->sampleLocationsInfo));
 }
 
 void unmarshal_VkPipelineSampleLocationsStateCreateInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPipelineSampleLocationsStateCreateInfoEXT* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkBool32*)&forUnmarshaling->sampleLocationsEnable, sizeof(VkBool32));
-    unmarshal_VkSampleLocationsInfoEXT(vkStream, (VkSampleLocationsInfoEXT*)(&forUnmarshaling->sampleLocationsInfo));
+    unmarshal_VkSampleLocationsInfoEXT(vkStream, rootType, (VkSampleLocationsInfoEXT*)(&forUnmarshaling->sampleLocationsInfo));
 }
 
 void marshal_VkPhysicalDeviceSampleLocationsPropertiesEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceSampleLocationsPropertiesEXT* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkSampleCountFlags*)&forMarshaling->sampleLocationSampleCounts, sizeof(VkSampleCountFlags));
-    marshal_VkExtent2D(vkStream, (VkExtent2D*)(&forMarshaling->maxSampleLocationGridSize));
+    marshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->maxSampleLocationGridSize));
     vkStream->write((float*)forMarshaling->sampleLocationCoordinateRange, 2 * sizeof(float));
     vkStream->write((uint32_t*)&forMarshaling->sampleLocationSubPixelBits, sizeof(uint32_t));
     vkStream->write((VkBool32*)&forMarshaling->variableSampleLocations, sizeof(VkBool32));
@@ -12579,19 +16250,18 @@
 
 void unmarshal_VkPhysicalDeviceSampleLocationsPropertiesEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceSampleLocationsPropertiesEXT* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkSampleCountFlags*)&forUnmarshaling->sampleLocationSampleCounts, sizeof(VkSampleCountFlags));
-    unmarshal_VkExtent2D(vkStream, (VkExtent2D*)(&forUnmarshaling->maxSampleLocationGridSize));
+    unmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forUnmarshaling->maxSampleLocationGridSize));
     vkStream->read((float*)forUnmarshaling->sampleLocationCoordinateRange, 2 * sizeof(float));
     vkStream->read((uint32_t*)&forUnmarshaling->sampleLocationSubPixelBits, sizeof(uint32_t));
     vkStream->read((VkBool32*)&forUnmarshaling->variableSampleLocations, sizeof(VkBool32));
@@ -12599,80 +16269,78 @@
 
 void marshal_VkMultisamplePropertiesEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkMultisamplePropertiesEXT* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    marshal_VkExtent2D(vkStream, (VkExtent2D*)(&forMarshaling->maxSampleLocationGridSize));
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    marshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->maxSampleLocationGridSize));
 }
 
 void unmarshal_VkMultisamplePropertiesEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkMultisamplePropertiesEXT* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    unmarshal_VkExtent2D(vkStream, (VkExtent2D*)(&forUnmarshaling->maxSampleLocationGridSize));
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    unmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forUnmarshaling->maxSampleLocationGridSize));
 }
 
 #endif
 #ifdef VK_EXT_blend_operation_advanced
 void marshal_VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkBool32*)&forMarshaling->advancedBlendCoherentOperations, sizeof(VkBool32));
 }
 
 void unmarshal_VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkBool32*)&forUnmarshaling->advancedBlendCoherentOperations, sizeof(VkBool32));
 }
 
 void marshal_VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((uint32_t*)&forMarshaling->advancedBlendMaxColorAttachments, sizeof(uint32_t));
     vkStream->write((VkBool32*)&forMarshaling->advancedBlendIndependentBlend, sizeof(VkBool32));
     vkStream->write((VkBool32*)&forMarshaling->advancedBlendNonPremultipliedSrcColor, sizeof(VkBool32));
@@ -12683,17 +16351,16 @@
 
 void unmarshal_VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((uint32_t*)&forUnmarshaling->advancedBlendMaxColorAttachments, sizeof(uint32_t));
     vkStream->read((VkBool32*)&forUnmarshaling->advancedBlendIndependentBlend, sizeof(VkBool32));
     vkStream->read((VkBool32*)&forUnmarshaling->advancedBlendNonPremultipliedSrcColor, sizeof(VkBool32));
@@ -12704,16 +16371,16 @@
 
 void marshal_VkPipelineColorBlendAdvancedStateCreateInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPipelineColorBlendAdvancedStateCreateInfoEXT* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkBool32*)&forMarshaling->srcPremultiplied, sizeof(VkBool32));
     vkStream->write((VkBool32*)&forMarshaling->dstPremultiplied, sizeof(VkBool32));
     vkStream->write((VkBlendOverlapEXT*)&forMarshaling->blendOverlap, sizeof(VkBlendOverlapEXT));
@@ -12721,17 +16388,16 @@
 
 void unmarshal_VkPipelineColorBlendAdvancedStateCreateInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPipelineColorBlendAdvancedStateCreateInfoEXT* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkBool32*)&forUnmarshaling->srcPremultiplied, sizeof(VkBool32));
     vkStream->read((VkBool32*)&forUnmarshaling->dstPremultiplied, sizeof(VkBool32));
     vkStream->read((VkBlendOverlapEXT*)&forUnmarshaling->blendOverlap, sizeof(VkBlendOverlapEXT));
@@ -12741,16 +16407,16 @@
 #ifdef VK_NV_fragment_coverage_to_color
 void marshal_VkPipelineCoverageToColorStateCreateInfoNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPipelineCoverageToColorStateCreateInfoNV* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkPipelineCoverageToColorStateCreateFlagsNV*)&forMarshaling->flags, sizeof(VkPipelineCoverageToColorStateCreateFlagsNV));
     vkStream->write((VkBool32*)&forMarshaling->coverageToColorEnable, sizeof(VkBool32));
     vkStream->write((uint32_t*)&forMarshaling->coverageToColorLocation, sizeof(uint32_t));
@@ -12758,17 +16424,16 @@
 
 void unmarshal_VkPipelineCoverageToColorStateCreateInfoNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPipelineCoverageToColorStateCreateInfoNV* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkPipelineCoverageToColorStateCreateFlagsNV*)&forUnmarshaling->flags, sizeof(VkPipelineCoverageToColorStateCreateFlagsNV));
     vkStream->read((VkBool32*)&forUnmarshaling->coverageToColorEnable, sizeof(VkBool32));
     vkStream->read((uint32_t*)&forUnmarshaling->coverageToColorLocation, sizeof(uint32_t));
@@ -12778,23 +16443,23 @@
 #ifdef VK_NV_framebuffer_mixed_samples
 void marshal_VkPipelineCoverageModulationStateCreateInfoNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPipelineCoverageModulationStateCreateInfoNV* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkPipelineCoverageModulationStateCreateFlagsNV*)&forMarshaling->flags, sizeof(VkPipelineCoverageModulationStateCreateFlagsNV));
     vkStream->write((VkCoverageModulationModeNV*)&forMarshaling->coverageModulationMode, sizeof(VkCoverageModulationModeNV));
     vkStream->write((VkBool32*)&forMarshaling->coverageModulationTableEnable, sizeof(VkBool32));
     vkStream->write((uint32_t*)&forMarshaling->coverageModulationTableCount, sizeof(uint32_t));
     // WARNING PTR CHECK
-    uint64_t cgen_var_362 = (uint64_t)(uintptr_t)forMarshaling->pCoverageModulationTable;
-    vkStream->putBe64(cgen_var_362);
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pCoverageModulationTable;
+    vkStream->putBe64(cgen_var_0);
     if (forMarshaling->pCoverageModulationTable)
     {
         vkStream->write((const float*)forMarshaling->pCoverageModulationTable, forMarshaling->coverageModulationTableCount * sizeof(const float));
@@ -12803,17 +16468,16 @@
 
 void unmarshal_VkPipelineCoverageModulationStateCreateInfoNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPipelineCoverageModulationStateCreateInfoNV* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkPipelineCoverageModulationStateCreateFlagsNV*)&forUnmarshaling->flags, sizeof(VkPipelineCoverageModulationStateCreateFlagsNV));
     vkStream->read((VkCoverageModulationModeNV*)&forUnmarshaling->coverageModulationMode, sizeof(VkCoverageModulationModeNV));
     vkStream->read((VkBool32*)&forUnmarshaling->coverageModulationTableEnable, sizeof(VkBool32));
@@ -12834,40 +16498,347 @@
 #endif
 #ifdef VK_NV_fill_rectangle
 #endif
+#ifdef VK_NV_shader_sm_builtins
+void marshal_VkPhysicalDeviceShaderSMBuiltinsPropertiesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderSMBuiltinsPropertiesNV* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((uint32_t*)&forMarshaling->shaderSMCount, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->shaderWarpsPerSM, sizeof(uint32_t));
+}
+
+void unmarshal_VkPhysicalDeviceShaderSMBuiltinsPropertiesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceShaderSMBuiltinsPropertiesNV* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((uint32_t*)&forUnmarshaling->shaderSMCount, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->shaderWarpsPerSM, sizeof(uint32_t));
+}
+
+void marshal_VkPhysicalDeviceShaderSMBuiltinsFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderSMBuiltinsFeaturesNV* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->shaderSMBuiltins, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceShaderSMBuiltinsFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceShaderSMBuiltinsFeaturesNV* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderSMBuiltins, sizeof(VkBool32));
+}
+
+#endif
 #ifdef VK_EXT_post_depth_coverage
 #endif
+#ifdef VK_EXT_image_drm_format_modifier
+void marshal_VkDrmFormatModifierPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDrmFormatModifierPropertiesEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((uint64_t*)&forMarshaling->drmFormatModifier, sizeof(uint64_t));
+    vkStream->write((uint32_t*)&forMarshaling->drmFormatModifierPlaneCount, sizeof(uint32_t));
+    vkStream->write((VkFormatFeatureFlags*)&forMarshaling->drmFormatModifierTilingFeatures, sizeof(VkFormatFeatureFlags));
+}
+
+void unmarshal_VkDrmFormatModifierPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDrmFormatModifierPropertiesEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((uint64_t*)&forUnmarshaling->drmFormatModifier, sizeof(uint64_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->drmFormatModifierPlaneCount, sizeof(uint32_t));
+    vkStream->read((VkFormatFeatureFlags*)&forUnmarshaling->drmFormatModifierTilingFeatures, sizeof(VkFormatFeatureFlags));
+}
+
+void marshal_VkDrmFormatModifierPropertiesListEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDrmFormatModifierPropertiesListEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((uint32_t*)&forMarshaling->drmFormatModifierCount, sizeof(uint32_t));
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pDrmFormatModifierProperties;
+    vkStream->putBe64(cgen_var_0);
+    if (forMarshaling->pDrmFormatModifierProperties)
+    {
+        if (forMarshaling)
+        {
+            for (uint32_t i = 0; i < (uint32_t)forMarshaling->drmFormatModifierCount; ++i)
+            {
+                marshal_VkDrmFormatModifierPropertiesEXT(vkStream, rootType, (VkDrmFormatModifierPropertiesEXT*)(forMarshaling->pDrmFormatModifierProperties + i));
+            }
+        }
+    }
+}
+
+void unmarshal_VkDrmFormatModifierPropertiesListEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDrmFormatModifierPropertiesListEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((uint32_t*)&forUnmarshaling->drmFormatModifierCount, sizeof(uint32_t));
+    // WARNING PTR CHECK
+    VkDrmFormatModifierPropertiesEXT* check_pDrmFormatModifierProperties;
+    check_pDrmFormatModifierProperties = (VkDrmFormatModifierPropertiesEXT*)(uintptr_t)vkStream->getBe64();
+    if (forUnmarshaling->pDrmFormatModifierProperties)
+    {
+        if (!(check_pDrmFormatModifierProperties))
+        {
+            fprintf(stderr, "fatal: forUnmarshaling->pDrmFormatModifierProperties inconsistent between guest and host\n");
+        }
+        if (forUnmarshaling)
+        {
+            for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->drmFormatModifierCount; ++i)
+            {
+                unmarshal_VkDrmFormatModifierPropertiesEXT(vkStream, rootType, (VkDrmFormatModifierPropertiesEXT*)(forUnmarshaling->pDrmFormatModifierProperties + i));
+            }
+        }
+    }
+}
+
+void marshal_VkPhysicalDeviceImageDrmFormatModifierInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceImageDrmFormatModifierInfoEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((uint64_t*)&forMarshaling->drmFormatModifier, sizeof(uint64_t));
+    vkStream->write((VkSharingMode*)&forMarshaling->sharingMode, sizeof(VkSharingMode));
+    vkStream->write((uint32_t*)&forMarshaling->queueFamilyIndexCount, sizeof(uint32_t));
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pQueueFamilyIndices;
+    vkStream->putBe64(cgen_var_0);
+    if (forMarshaling->pQueueFamilyIndices)
+    {
+        vkStream->write((const uint32_t*)forMarshaling->pQueueFamilyIndices, forMarshaling->queueFamilyIndexCount * sizeof(const uint32_t));
+    }
+}
+
+void unmarshal_VkPhysicalDeviceImageDrmFormatModifierInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceImageDrmFormatModifierInfoEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((uint64_t*)&forUnmarshaling->drmFormatModifier, sizeof(uint64_t));
+    vkStream->read((VkSharingMode*)&forUnmarshaling->sharingMode, sizeof(VkSharingMode));
+    vkStream->read((uint32_t*)&forUnmarshaling->queueFamilyIndexCount, sizeof(uint32_t));
+    // WARNING PTR CHECK
+    const uint32_t* check_pQueueFamilyIndices;
+    check_pQueueFamilyIndices = (const uint32_t*)(uintptr_t)vkStream->getBe64();
+    if (forUnmarshaling->pQueueFamilyIndices)
+    {
+        if (!(check_pQueueFamilyIndices))
+        {
+            fprintf(stderr, "fatal: forUnmarshaling->pQueueFamilyIndices inconsistent between guest and host\n");
+        }
+        vkStream->read((uint32_t*)forUnmarshaling->pQueueFamilyIndices, forUnmarshaling->queueFamilyIndexCount * sizeof(const uint32_t));
+    }
+}
+
+void marshal_VkImageDrmFormatModifierListCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageDrmFormatModifierListCreateInfoEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((uint32_t*)&forMarshaling->drmFormatModifierCount, sizeof(uint32_t));
+    vkStream->write((const uint64_t*)forMarshaling->pDrmFormatModifiers, forMarshaling->drmFormatModifierCount * sizeof(const uint64_t));
+}
+
+void unmarshal_VkImageDrmFormatModifierListCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkImageDrmFormatModifierListCreateInfoEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((uint32_t*)&forUnmarshaling->drmFormatModifierCount, sizeof(uint32_t));
+    vkStream->read((uint64_t*)forUnmarshaling->pDrmFormatModifiers, forUnmarshaling->drmFormatModifierCount * sizeof(const uint64_t));
+}
+
+void marshal_VkImageDrmFormatModifierExplicitCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageDrmFormatModifierExplicitCreateInfoEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((uint64_t*)&forMarshaling->drmFormatModifier, sizeof(uint64_t));
+    vkStream->write((uint32_t*)&forMarshaling->drmFormatModifierPlaneCount, sizeof(uint32_t));
+    if (forMarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->drmFormatModifierPlaneCount; ++i)
+        {
+            marshal_VkSubresourceLayout(vkStream, rootType, (const VkSubresourceLayout*)(forMarshaling->pPlaneLayouts + i));
+        }
+    }
+}
+
+void unmarshal_VkImageDrmFormatModifierExplicitCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkImageDrmFormatModifierExplicitCreateInfoEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((uint64_t*)&forUnmarshaling->drmFormatModifier, sizeof(uint64_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->drmFormatModifierPlaneCount, sizeof(uint32_t));
+    if (forUnmarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->drmFormatModifierPlaneCount; ++i)
+        {
+            unmarshal_VkSubresourceLayout(vkStream, rootType, (VkSubresourceLayout*)(forUnmarshaling->pPlaneLayouts + i));
+        }
+    }
+}
+
+void marshal_VkImageDrmFormatModifierPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageDrmFormatModifierPropertiesEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((uint64_t*)&forMarshaling->drmFormatModifier, sizeof(uint64_t));
+}
+
+void unmarshal_VkImageDrmFormatModifierPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkImageDrmFormatModifierPropertiesEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((uint64_t*)&forUnmarshaling->drmFormatModifier, sizeof(uint64_t));
+}
+
+#endif
 #ifdef VK_EXT_validation_cache
 void marshal_VkValidationCacheCreateInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkValidationCacheCreateInfoEXT* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkValidationCacheCreateFlagsEXT*)&forMarshaling->flags, sizeof(VkValidationCacheCreateFlagsEXT));
-    uint64_t cgen_var_364 = (uint64_t)forMarshaling->initialDataSize;
-    vkStream->putBe64(cgen_var_364);
+    uint64_t cgen_var_0 = (uint64_t)forMarshaling->initialDataSize;
+    vkStream->putBe64(cgen_var_0);
     vkStream->write((const void*)forMarshaling->pInitialData, forMarshaling->initialDataSize * sizeof(const uint8_t));
 }
 
 void unmarshal_VkValidationCacheCreateInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkValidationCacheCreateInfoEXT* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkValidationCacheCreateFlagsEXT*)&forUnmarshaling->flags, sizeof(VkValidationCacheCreateFlagsEXT));
     forUnmarshaling->initialDataSize = (size_t)vkStream->getBe64();
     vkStream->read((void*)forUnmarshaling->pInitialData, forUnmarshaling->initialDataSize * sizeof(const uint8_t));
@@ -12875,314 +16846,1085 @@
 
 void marshal_VkShaderModuleValidationCacheCreateInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkShaderModuleValidationCacheCreateInfoEXT* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    uint64_t cgen_var_366;
-    vkStream->handleMapping()->mapHandles_VkValidationCacheEXT_u64(&forMarshaling->validationCache, &cgen_var_366, 1);
-    vkStream->write((uint64_t*)&cgen_var_366, 1 * 8);
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkValidationCacheEXT_u64(&forMarshaling->validationCache, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
 }
 
 void unmarshal_VkShaderModuleValidationCacheCreateInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkShaderModuleValidationCacheCreateInfoEXT* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    uint64_t cgen_var_367;
-    vkStream->read((uint64_t*)&cgen_var_367, 1 * 8);
-    vkStream->handleMapping()->mapHandles_u64_VkValidationCacheEXT(&cgen_var_367, (VkValidationCacheEXT*)&forUnmarshaling->validationCache, 1);
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkValidationCacheEXT(&cgen_var_0, (VkValidationCacheEXT*)&forUnmarshaling->validationCache, 1);
 }
 
 #endif
 #ifdef VK_EXT_descriptor_indexing
-void marshal_VkDescriptorSetLayoutBindingFlagsCreateInfoEXT(
+#endif
+#ifdef VK_EXT_shader_viewport_index_layer
+#endif
+#ifdef VK_NV_shading_rate_image
+void marshal_VkShadingRatePaletteNV(
     VulkanStreamGuest* vkStream,
-    const VkDescriptorSetLayoutBindingFlagsCreateInfoEXT* forMarshaling)
+    VkStructureType rootType,
+    const VkShadingRatePaletteNV* forMarshaling)
 {
+    (void)rootType;
+    vkStream->write((uint32_t*)&forMarshaling->shadingRatePaletteEntryCount, sizeof(uint32_t));
+    vkStream->write((const VkShadingRatePaletteEntryNV*)forMarshaling->pShadingRatePaletteEntries, forMarshaling->shadingRatePaletteEntryCount * sizeof(const VkShadingRatePaletteEntryNV));
+}
+
+void unmarshal_VkShadingRatePaletteNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkShadingRatePaletteNV* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((uint32_t*)&forUnmarshaling->shadingRatePaletteEntryCount, sizeof(uint32_t));
+    vkStream->read((VkShadingRatePaletteEntryNV*)forUnmarshaling->pShadingRatePaletteEntries, forUnmarshaling->shadingRatePaletteEntryCount * sizeof(const VkShadingRatePaletteEntryNV));
+}
+
+void marshal_VkPipelineViewportShadingRateImageStateCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineViewportShadingRateImageStateCreateInfoNV* forMarshaling)
+{
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    vkStream->write((uint32_t*)&forMarshaling->bindingCount, sizeof(uint32_t));
-    vkStream->write((const VkDescriptorBindingFlagsEXT*)forMarshaling->pBindingFlags, forMarshaling->bindingCount * sizeof(const VkDescriptorBindingFlagsEXT));
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->shadingRateImageEnable, sizeof(VkBool32));
+    vkStream->write((uint32_t*)&forMarshaling->viewportCount, sizeof(uint32_t));
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pShadingRatePalettes;
+    vkStream->putBe64(cgen_var_0);
+    if (forMarshaling->pShadingRatePalettes)
+    {
+        if (forMarshaling)
+        {
+            for (uint32_t i = 0; i < (uint32_t)forMarshaling->viewportCount; ++i)
+            {
+                marshal_VkShadingRatePaletteNV(vkStream, rootType, (const VkShadingRatePaletteNV*)(forMarshaling->pShadingRatePalettes + i));
+            }
+        }
+    }
 }
 
-void unmarshal_VkDescriptorSetLayoutBindingFlagsCreateInfoEXT(
+void unmarshal_VkPipelineViewportShadingRateImageStateCreateInfoNV(
     VulkanStreamGuest* vkStream,
-    VkDescriptorSetLayoutBindingFlagsCreateInfoEXT* forUnmarshaling)
+    VkStructureType rootType,
+    VkPipelineViewportShadingRateImageStateCreateInfoNV* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    vkStream->read((uint32_t*)&forUnmarshaling->bindingCount, sizeof(uint32_t));
-    vkStream->read((VkDescriptorBindingFlagsEXT*)forUnmarshaling->pBindingFlags, forUnmarshaling->bindingCount * sizeof(const VkDescriptorBindingFlagsEXT));
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->shadingRateImageEnable, sizeof(VkBool32));
+    vkStream->read((uint32_t*)&forUnmarshaling->viewportCount, sizeof(uint32_t));
+    // WARNING PTR CHECK
+    const VkShadingRatePaletteNV* check_pShadingRatePalettes;
+    check_pShadingRatePalettes = (const VkShadingRatePaletteNV*)(uintptr_t)vkStream->getBe64();
+    if (forUnmarshaling->pShadingRatePalettes)
+    {
+        if (!(check_pShadingRatePalettes))
+        {
+            fprintf(stderr, "fatal: forUnmarshaling->pShadingRatePalettes inconsistent between guest and host\n");
+        }
+        if (forUnmarshaling)
+        {
+            for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->viewportCount; ++i)
+            {
+                unmarshal_VkShadingRatePaletteNV(vkStream, rootType, (VkShadingRatePaletteNV*)(forUnmarshaling->pShadingRatePalettes + i));
+            }
+        }
+    }
 }
 
-void marshal_VkPhysicalDeviceDescriptorIndexingFeaturesEXT(
+void marshal_VkPhysicalDeviceShadingRateImageFeaturesNV(
     VulkanStreamGuest* vkStream,
-    const VkPhysicalDeviceDescriptorIndexingFeaturesEXT* forMarshaling)
+    VkStructureType rootType,
+    const VkPhysicalDeviceShadingRateImageFeaturesNV* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    vkStream->write((VkBool32*)&forMarshaling->shaderInputAttachmentArrayDynamicIndexing, sizeof(VkBool32));
-    vkStream->write((VkBool32*)&forMarshaling->shaderUniformTexelBufferArrayDynamicIndexing, sizeof(VkBool32));
-    vkStream->write((VkBool32*)&forMarshaling->shaderStorageTexelBufferArrayDynamicIndexing, sizeof(VkBool32));
-    vkStream->write((VkBool32*)&forMarshaling->shaderUniformBufferArrayNonUniformIndexing, sizeof(VkBool32));
-    vkStream->write((VkBool32*)&forMarshaling->shaderSampledImageArrayNonUniformIndexing, sizeof(VkBool32));
-    vkStream->write((VkBool32*)&forMarshaling->shaderStorageBufferArrayNonUniformIndexing, sizeof(VkBool32));
-    vkStream->write((VkBool32*)&forMarshaling->shaderStorageImageArrayNonUniformIndexing, sizeof(VkBool32));
-    vkStream->write((VkBool32*)&forMarshaling->shaderInputAttachmentArrayNonUniformIndexing, sizeof(VkBool32));
-    vkStream->write((VkBool32*)&forMarshaling->shaderUniformTexelBufferArrayNonUniformIndexing, sizeof(VkBool32));
-    vkStream->write((VkBool32*)&forMarshaling->shaderStorageTexelBufferArrayNonUniformIndexing, sizeof(VkBool32));
-    vkStream->write((VkBool32*)&forMarshaling->descriptorBindingUniformBufferUpdateAfterBind, sizeof(VkBool32));
-    vkStream->write((VkBool32*)&forMarshaling->descriptorBindingSampledImageUpdateAfterBind, sizeof(VkBool32));
-    vkStream->write((VkBool32*)&forMarshaling->descriptorBindingStorageImageUpdateAfterBind, sizeof(VkBool32));
-    vkStream->write((VkBool32*)&forMarshaling->descriptorBindingStorageBufferUpdateAfterBind, sizeof(VkBool32));
-    vkStream->write((VkBool32*)&forMarshaling->descriptorBindingUniformTexelBufferUpdateAfterBind, sizeof(VkBool32));
-    vkStream->write((VkBool32*)&forMarshaling->descriptorBindingStorageTexelBufferUpdateAfterBind, sizeof(VkBool32));
-    vkStream->write((VkBool32*)&forMarshaling->descriptorBindingUpdateUnusedWhilePending, sizeof(VkBool32));
-    vkStream->write((VkBool32*)&forMarshaling->descriptorBindingPartiallyBound, sizeof(VkBool32));
-    vkStream->write((VkBool32*)&forMarshaling->descriptorBindingVariableDescriptorCount, sizeof(VkBool32));
-    vkStream->write((VkBool32*)&forMarshaling->runtimeDescriptorArray, sizeof(VkBool32));
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->shadingRateImage, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shadingRateCoarseSampleOrder, sizeof(VkBool32));
 }
 
-void unmarshal_VkPhysicalDeviceDescriptorIndexingFeaturesEXT(
+void unmarshal_VkPhysicalDeviceShadingRateImageFeaturesNV(
     VulkanStreamGuest* vkStream,
-    VkPhysicalDeviceDescriptorIndexingFeaturesEXT* forUnmarshaling)
+    VkStructureType rootType,
+    VkPhysicalDeviceShadingRateImageFeaturesNV* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    vkStream->read((VkBool32*)&forUnmarshaling->shaderInputAttachmentArrayDynamicIndexing, sizeof(VkBool32));
-    vkStream->read((VkBool32*)&forUnmarshaling->shaderUniformTexelBufferArrayDynamicIndexing, sizeof(VkBool32));
-    vkStream->read((VkBool32*)&forUnmarshaling->shaderStorageTexelBufferArrayDynamicIndexing, sizeof(VkBool32));
-    vkStream->read((VkBool32*)&forUnmarshaling->shaderUniformBufferArrayNonUniformIndexing, sizeof(VkBool32));
-    vkStream->read((VkBool32*)&forUnmarshaling->shaderSampledImageArrayNonUniformIndexing, sizeof(VkBool32));
-    vkStream->read((VkBool32*)&forUnmarshaling->shaderStorageBufferArrayNonUniformIndexing, sizeof(VkBool32));
-    vkStream->read((VkBool32*)&forUnmarshaling->shaderStorageImageArrayNonUniformIndexing, sizeof(VkBool32));
-    vkStream->read((VkBool32*)&forUnmarshaling->shaderInputAttachmentArrayNonUniformIndexing, sizeof(VkBool32));
-    vkStream->read((VkBool32*)&forUnmarshaling->shaderUniformTexelBufferArrayNonUniformIndexing, sizeof(VkBool32));
-    vkStream->read((VkBool32*)&forUnmarshaling->shaderStorageTexelBufferArrayNonUniformIndexing, sizeof(VkBool32));
-    vkStream->read((VkBool32*)&forUnmarshaling->descriptorBindingUniformBufferUpdateAfterBind, sizeof(VkBool32));
-    vkStream->read((VkBool32*)&forUnmarshaling->descriptorBindingSampledImageUpdateAfterBind, sizeof(VkBool32));
-    vkStream->read((VkBool32*)&forUnmarshaling->descriptorBindingStorageImageUpdateAfterBind, sizeof(VkBool32));
-    vkStream->read((VkBool32*)&forUnmarshaling->descriptorBindingStorageBufferUpdateAfterBind, sizeof(VkBool32));
-    vkStream->read((VkBool32*)&forUnmarshaling->descriptorBindingUniformTexelBufferUpdateAfterBind, sizeof(VkBool32));
-    vkStream->read((VkBool32*)&forUnmarshaling->descriptorBindingStorageTexelBufferUpdateAfterBind, sizeof(VkBool32));
-    vkStream->read((VkBool32*)&forUnmarshaling->descriptorBindingUpdateUnusedWhilePending, sizeof(VkBool32));
-    vkStream->read((VkBool32*)&forUnmarshaling->descriptorBindingPartiallyBound, sizeof(VkBool32));
-    vkStream->read((VkBool32*)&forUnmarshaling->descriptorBindingVariableDescriptorCount, sizeof(VkBool32));
-    vkStream->read((VkBool32*)&forUnmarshaling->runtimeDescriptorArray, sizeof(VkBool32));
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->shadingRateImage, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shadingRateCoarseSampleOrder, sizeof(VkBool32));
 }
 
-void marshal_VkPhysicalDeviceDescriptorIndexingPropertiesEXT(
+void marshal_VkPhysicalDeviceShadingRateImagePropertiesNV(
     VulkanStreamGuest* vkStream,
-    const VkPhysicalDeviceDescriptorIndexingPropertiesEXT* forMarshaling)
+    VkStructureType rootType,
+    const VkPhysicalDeviceShadingRateImagePropertiesNV* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    vkStream->write((uint32_t*)&forMarshaling->maxUpdateAfterBindDescriptorsInAllPools, sizeof(uint32_t));
-    vkStream->write((VkBool32*)&forMarshaling->shaderUniformBufferArrayNonUniformIndexingNative, sizeof(VkBool32));
-    vkStream->write((VkBool32*)&forMarshaling->shaderSampledImageArrayNonUniformIndexingNative, sizeof(VkBool32));
-    vkStream->write((VkBool32*)&forMarshaling->shaderStorageBufferArrayNonUniformIndexingNative, sizeof(VkBool32));
-    vkStream->write((VkBool32*)&forMarshaling->shaderStorageImageArrayNonUniformIndexingNative, sizeof(VkBool32));
-    vkStream->write((VkBool32*)&forMarshaling->shaderInputAttachmentArrayNonUniformIndexingNative, sizeof(VkBool32));
-    vkStream->write((VkBool32*)&forMarshaling->robustBufferAccessUpdateAfterBind, sizeof(VkBool32));
-    vkStream->write((VkBool32*)&forMarshaling->quadDivergentImplicitLod, sizeof(VkBool32));
-    vkStream->write((uint32_t*)&forMarshaling->maxPerStageDescriptorUpdateAfterBindSamplers, sizeof(uint32_t));
-    vkStream->write((uint32_t*)&forMarshaling->maxPerStageDescriptorUpdateAfterBindUniformBuffers, sizeof(uint32_t));
-    vkStream->write((uint32_t*)&forMarshaling->maxPerStageDescriptorUpdateAfterBindStorageBuffers, sizeof(uint32_t));
-    vkStream->write((uint32_t*)&forMarshaling->maxPerStageDescriptorUpdateAfterBindSampledImages, sizeof(uint32_t));
-    vkStream->write((uint32_t*)&forMarshaling->maxPerStageDescriptorUpdateAfterBindStorageImages, sizeof(uint32_t));
-    vkStream->write((uint32_t*)&forMarshaling->maxPerStageDescriptorUpdateAfterBindInputAttachments, sizeof(uint32_t));
-    vkStream->write((uint32_t*)&forMarshaling->maxPerStageUpdateAfterBindResources, sizeof(uint32_t));
-    vkStream->write((uint32_t*)&forMarshaling->maxDescriptorSetUpdateAfterBindSamplers, sizeof(uint32_t));
-    vkStream->write((uint32_t*)&forMarshaling->maxDescriptorSetUpdateAfterBindUniformBuffers, sizeof(uint32_t));
-    vkStream->write((uint32_t*)&forMarshaling->maxDescriptorSetUpdateAfterBindUniformBuffersDynamic, sizeof(uint32_t));
-    vkStream->write((uint32_t*)&forMarshaling->maxDescriptorSetUpdateAfterBindStorageBuffers, sizeof(uint32_t));
-    vkStream->write((uint32_t*)&forMarshaling->maxDescriptorSetUpdateAfterBindStorageBuffersDynamic, sizeof(uint32_t));
-    vkStream->write((uint32_t*)&forMarshaling->maxDescriptorSetUpdateAfterBindSampledImages, sizeof(uint32_t));
-    vkStream->write((uint32_t*)&forMarshaling->maxDescriptorSetUpdateAfterBindStorageImages, sizeof(uint32_t));
-    vkStream->write((uint32_t*)&forMarshaling->maxDescriptorSetUpdateAfterBindInputAttachments, sizeof(uint32_t));
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    marshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->shadingRateTexelSize));
+    vkStream->write((uint32_t*)&forMarshaling->shadingRatePaletteSize, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->shadingRateMaxCoarseSamples, sizeof(uint32_t));
 }
 
-void unmarshal_VkPhysicalDeviceDescriptorIndexingPropertiesEXT(
+void unmarshal_VkPhysicalDeviceShadingRateImagePropertiesNV(
     VulkanStreamGuest* vkStream,
-    VkPhysicalDeviceDescriptorIndexingPropertiesEXT* forUnmarshaling)
+    VkStructureType rootType,
+    VkPhysicalDeviceShadingRateImagePropertiesNV* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    vkStream->read((uint32_t*)&forUnmarshaling->maxUpdateAfterBindDescriptorsInAllPools, sizeof(uint32_t));
-    vkStream->read((VkBool32*)&forUnmarshaling->shaderUniformBufferArrayNonUniformIndexingNative, sizeof(VkBool32));
-    vkStream->read((VkBool32*)&forUnmarshaling->shaderSampledImageArrayNonUniformIndexingNative, sizeof(VkBool32));
-    vkStream->read((VkBool32*)&forUnmarshaling->shaderStorageBufferArrayNonUniformIndexingNative, sizeof(VkBool32));
-    vkStream->read((VkBool32*)&forUnmarshaling->shaderStorageImageArrayNonUniformIndexingNative, sizeof(VkBool32));
-    vkStream->read((VkBool32*)&forUnmarshaling->shaderInputAttachmentArrayNonUniformIndexingNative, sizeof(VkBool32));
-    vkStream->read((VkBool32*)&forUnmarshaling->robustBufferAccessUpdateAfterBind, sizeof(VkBool32));
-    vkStream->read((VkBool32*)&forUnmarshaling->quadDivergentImplicitLod, sizeof(VkBool32));
-    vkStream->read((uint32_t*)&forUnmarshaling->maxPerStageDescriptorUpdateAfterBindSamplers, sizeof(uint32_t));
-    vkStream->read((uint32_t*)&forUnmarshaling->maxPerStageDescriptorUpdateAfterBindUniformBuffers, sizeof(uint32_t));
-    vkStream->read((uint32_t*)&forUnmarshaling->maxPerStageDescriptorUpdateAfterBindStorageBuffers, sizeof(uint32_t));
-    vkStream->read((uint32_t*)&forUnmarshaling->maxPerStageDescriptorUpdateAfterBindSampledImages, sizeof(uint32_t));
-    vkStream->read((uint32_t*)&forUnmarshaling->maxPerStageDescriptorUpdateAfterBindStorageImages, sizeof(uint32_t));
-    vkStream->read((uint32_t*)&forUnmarshaling->maxPerStageDescriptorUpdateAfterBindInputAttachments, sizeof(uint32_t));
-    vkStream->read((uint32_t*)&forUnmarshaling->maxPerStageUpdateAfterBindResources, sizeof(uint32_t));
-    vkStream->read((uint32_t*)&forUnmarshaling->maxDescriptorSetUpdateAfterBindSamplers, sizeof(uint32_t));
-    vkStream->read((uint32_t*)&forUnmarshaling->maxDescriptorSetUpdateAfterBindUniformBuffers, sizeof(uint32_t));
-    vkStream->read((uint32_t*)&forUnmarshaling->maxDescriptorSetUpdateAfterBindUniformBuffersDynamic, sizeof(uint32_t));
-    vkStream->read((uint32_t*)&forUnmarshaling->maxDescriptorSetUpdateAfterBindStorageBuffers, sizeof(uint32_t));
-    vkStream->read((uint32_t*)&forUnmarshaling->maxDescriptorSetUpdateAfterBindStorageBuffersDynamic, sizeof(uint32_t));
-    vkStream->read((uint32_t*)&forUnmarshaling->maxDescriptorSetUpdateAfterBindSampledImages, sizeof(uint32_t));
-    vkStream->read((uint32_t*)&forUnmarshaling->maxDescriptorSetUpdateAfterBindStorageImages, sizeof(uint32_t));
-    vkStream->read((uint32_t*)&forUnmarshaling->maxDescriptorSetUpdateAfterBindInputAttachments, sizeof(uint32_t));
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    unmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forUnmarshaling->shadingRateTexelSize));
+    vkStream->read((uint32_t*)&forUnmarshaling->shadingRatePaletteSize, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->shadingRateMaxCoarseSamples, sizeof(uint32_t));
 }
 
-void marshal_VkDescriptorSetVariableDescriptorCountAllocateInfoEXT(
+void marshal_VkCoarseSampleLocationNV(
     VulkanStreamGuest* vkStream,
-    const VkDescriptorSetVariableDescriptorCountAllocateInfoEXT* forMarshaling)
+    VkStructureType rootType,
+    const VkCoarseSampleLocationNV* forMarshaling)
 {
+    (void)rootType;
+    vkStream->write((uint32_t*)&forMarshaling->pixelX, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->pixelY, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->sample, sizeof(uint32_t));
+}
+
+void unmarshal_VkCoarseSampleLocationNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkCoarseSampleLocationNV* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((uint32_t*)&forUnmarshaling->pixelX, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->pixelY, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->sample, sizeof(uint32_t));
+}
+
+void marshal_VkCoarseSampleOrderCustomNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCoarseSampleOrderCustomNV* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkShadingRatePaletteEntryNV*)&forMarshaling->shadingRate, sizeof(VkShadingRatePaletteEntryNV));
+    vkStream->write((uint32_t*)&forMarshaling->sampleCount, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->sampleLocationCount, sizeof(uint32_t));
+    if (forMarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->sampleLocationCount; ++i)
+        {
+            marshal_VkCoarseSampleLocationNV(vkStream, rootType, (const VkCoarseSampleLocationNV*)(forMarshaling->pSampleLocations + i));
+        }
+    }
+}
+
+void unmarshal_VkCoarseSampleOrderCustomNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkCoarseSampleOrderCustomNV* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkShadingRatePaletteEntryNV*)&forUnmarshaling->shadingRate, sizeof(VkShadingRatePaletteEntryNV));
+    vkStream->read((uint32_t*)&forUnmarshaling->sampleCount, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->sampleLocationCount, sizeof(uint32_t));
+    if (forUnmarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->sampleLocationCount; ++i)
+        {
+            unmarshal_VkCoarseSampleLocationNV(vkStream, rootType, (VkCoarseSampleLocationNV*)(forUnmarshaling->pSampleLocations + i));
+        }
+    }
+}
+
+void marshal_VkPipelineViewportCoarseSampleOrderStateCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineViewportCoarseSampleOrderStateCreateInfoNV* forMarshaling)
+{
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
-    vkStream->write((uint32_t*)&forMarshaling->descriptorSetCount, sizeof(uint32_t));
-    vkStream->write((const uint32_t*)forMarshaling->pDescriptorCounts, forMarshaling->descriptorSetCount * sizeof(const uint32_t));
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkCoarseSampleOrderTypeNV*)&forMarshaling->sampleOrderType, sizeof(VkCoarseSampleOrderTypeNV));
+    vkStream->write((uint32_t*)&forMarshaling->customSampleOrderCount, sizeof(uint32_t));
+    if (forMarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->customSampleOrderCount; ++i)
+        {
+            marshal_VkCoarseSampleOrderCustomNV(vkStream, rootType, (const VkCoarseSampleOrderCustomNV*)(forMarshaling->pCustomSampleOrders + i));
+        }
+    }
 }
 
-void unmarshal_VkDescriptorSetVariableDescriptorCountAllocateInfoEXT(
+void unmarshal_VkPipelineViewportCoarseSampleOrderStateCreateInfoNV(
     VulkanStreamGuest* vkStream,
-    VkDescriptorSetVariableDescriptorCountAllocateInfoEXT* forUnmarshaling)
+    VkStructureType rootType,
+    VkPipelineViewportCoarseSampleOrderStateCreateInfoNV* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
-    vkStream->read((uint32_t*)&forUnmarshaling->descriptorSetCount, sizeof(uint32_t));
-    vkStream->read((uint32_t*)forUnmarshaling->pDescriptorCounts, forUnmarshaling->descriptorSetCount * sizeof(const uint32_t));
-}
-
-void marshal_VkDescriptorSetVariableDescriptorCountLayoutSupportEXT(
-    VulkanStreamGuest* vkStream,
-    const VkDescriptorSetVariableDescriptorCountLayoutSupportEXT* forMarshaling)
-{
-    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkCoarseSampleOrderTypeNV*)&forUnmarshaling->sampleOrderType, sizeof(VkCoarseSampleOrderTypeNV));
+    vkStream->read((uint32_t*)&forUnmarshaling->customSampleOrderCount, sizeof(uint32_t));
+    if (forUnmarshaling)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->customSampleOrderCount; ++i)
+        {
+            unmarshal_VkCoarseSampleOrderCustomNV(vkStream, rootType, (VkCoarseSampleOrderCustomNV*)(forUnmarshaling->pCustomSampleOrders + i));
+        }
     }
-    vkStream->write((uint32_t*)&forMarshaling->maxVariableDescriptorCount, sizeof(uint32_t));
-}
-
-void unmarshal_VkDescriptorSetVariableDescriptorCountLayoutSupportEXT(
-    VulkanStreamGuest* vkStream,
-    VkDescriptorSetVariableDescriptorCountLayoutSupportEXT* forUnmarshaling)
-{
-    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
-    {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
-    }
-    vkStream->read((uint32_t*)&forUnmarshaling->maxVariableDescriptorCount, sizeof(uint32_t));
 }
 
 #endif
-#ifdef VK_EXT_shader_viewport_index_layer
+#ifdef VK_NV_ray_tracing
+void marshal_VkRayTracingShaderGroupCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRayTracingShaderGroupCreateInfoNV* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkRayTracingShaderGroupTypeKHR*)&forMarshaling->type, sizeof(VkRayTracingShaderGroupTypeKHR));
+    vkStream->write((uint32_t*)&forMarshaling->generalShader, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->closestHitShader, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->anyHitShader, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->intersectionShader, sizeof(uint32_t));
+}
+
+void unmarshal_VkRayTracingShaderGroupCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkRayTracingShaderGroupCreateInfoNV* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkRayTracingShaderGroupTypeKHR*)&forUnmarshaling->type, sizeof(VkRayTracingShaderGroupTypeKHR));
+    vkStream->read((uint32_t*)&forUnmarshaling->generalShader, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->closestHitShader, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->anyHitShader, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->intersectionShader, sizeof(uint32_t));
+}
+
+void marshal_VkRayTracingPipelineCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRayTracingPipelineCreateInfoNV* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkPipelineCreateFlags*)&forMarshaling->flags, sizeof(VkPipelineCreateFlags));
+    vkStream->write((uint32_t*)&forMarshaling->stageCount, sizeof(uint32_t));
+    if (forMarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->stageCount; ++i)
+        {
+            marshal_VkPipelineShaderStageCreateInfo(vkStream, rootType, (const VkPipelineShaderStageCreateInfo*)(forMarshaling->pStages + i));
+        }
+    }
+    vkStream->write((uint32_t*)&forMarshaling->groupCount, sizeof(uint32_t));
+    if (forMarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->groupCount; ++i)
+        {
+            marshal_VkRayTracingShaderGroupCreateInfoNV(vkStream, rootType, (const VkRayTracingShaderGroupCreateInfoNV*)(forMarshaling->pGroups + i));
+        }
+    }
+    vkStream->write((uint32_t*)&forMarshaling->maxRecursionDepth, sizeof(uint32_t));
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkPipelineLayout_u64(&forMarshaling->layout, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
+    uint64_t cgen_var_1;
+    vkStream->handleMapping()->mapHandles_VkPipeline_u64(&forMarshaling->basePipelineHandle, &cgen_var_1, 1);
+    vkStream->write((uint64_t*)&cgen_var_1, 1 * 8);
+    vkStream->write((int32_t*)&forMarshaling->basePipelineIndex, sizeof(int32_t));
+}
+
+void unmarshal_VkRayTracingPipelineCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkRayTracingPipelineCreateInfoNV* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkPipelineCreateFlags*)&forUnmarshaling->flags, sizeof(VkPipelineCreateFlags));
+    vkStream->read((uint32_t*)&forUnmarshaling->stageCount, sizeof(uint32_t));
+    if (forUnmarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->stageCount; ++i)
+        {
+            unmarshal_VkPipelineShaderStageCreateInfo(vkStream, rootType, (VkPipelineShaderStageCreateInfo*)(forUnmarshaling->pStages + i));
+        }
+    }
+    vkStream->read((uint32_t*)&forUnmarshaling->groupCount, sizeof(uint32_t));
+    if (forUnmarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->groupCount; ++i)
+        {
+            unmarshal_VkRayTracingShaderGroupCreateInfoNV(vkStream, rootType, (VkRayTracingShaderGroupCreateInfoNV*)(forUnmarshaling->pGroups + i));
+        }
+    }
+    vkStream->read((uint32_t*)&forUnmarshaling->maxRecursionDepth, sizeof(uint32_t));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkPipelineLayout(&cgen_var_0, (VkPipelineLayout*)&forUnmarshaling->layout, 1);
+    uint64_t cgen_var_1;
+    vkStream->read((uint64_t*)&cgen_var_1, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkPipeline(&cgen_var_1, (VkPipeline*)&forUnmarshaling->basePipelineHandle, 1);
+    vkStream->read((int32_t*)&forUnmarshaling->basePipelineIndex, sizeof(int32_t));
+}
+
+void marshal_VkGeometryTrianglesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkGeometryTrianglesNV* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkBuffer_u64(&forMarshaling->vertexData, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->write((VkDeviceSize*)&forMarshaling->vertexOffset, sizeof(VkDeviceSize));
+    vkStream->write((uint32_t*)&forMarshaling->vertexCount, sizeof(uint32_t));
+    vkStream->write((VkDeviceSize*)&forMarshaling->vertexStride, sizeof(VkDeviceSize));
+    vkStream->write((VkFormat*)&forMarshaling->vertexFormat, sizeof(VkFormat));
+    uint64_t cgen_var_1;
+    vkStream->handleMapping()->mapHandles_VkBuffer_u64(&forMarshaling->indexData, &cgen_var_1, 1);
+    vkStream->write((uint64_t*)&cgen_var_1, 1 * 8);
+    vkStream->write((VkDeviceSize*)&forMarshaling->indexOffset, sizeof(VkDeviceSize));
+    vkStream->write((uint32_t*)&forMarshaling->indexCount, sizeof(uint32_t));
+    vkStream->write((VkIndexType*)&forMarshaling->indexType, sizeof(VkIndexType));
+    uint64_t cgen_var_2;
+    vkStream->handleMapping()->mapHandles_VkBuffer_u64(&forMarshaling->transformData, &cgen_var_2, 1);
+    vkStream->write((uint64_t*)&cgen_var_2, 1 * 8);
+    vkStream->write((VkDeviceSize*)&forMarshaling->transformOffset, sizeof(VkDeviceSize));
+}
+
+void unmarshal_VkGeometryTrianglesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkGeometryTrianglesNV* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkBuffer(&cgen_var_0, (VkBuffer*)&forUnmarshaling->vertexData, 1);
+    vkStream->read((VkDeviceSize*)&forUnmarshaling->vertexOffset, sizeof(VkDeviceSize));
+    vkStream->read((uint32_t*)&forUnmarshaling->vertexCount, sizeof(uint32_t));
+    vkStream->read((VkDeviceSize*)&forUnmarshaling->vertexStride, sizeof(VkDeviceSize));
+    vkStream->read((VkFormat*)&forUnmarshaling->vertexFormat, sizeof(VkFormat));
+    uint64_t cgen_var_1;
+    vkStream->read((uint64_t*)&cgen_var_1, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkBuffer(&cgen_var_1, (VkBuffer*)&forUnmarshaling->indexData, 1);
+    vkStream->read((VkDeviceSize*)&forUnmarshaling->indexOffset, sizeof(VkDeviceSize));
+    vkStream->read((uint32_t*)&forUnmarshaling->indexCount, sizeof(uint32_t));
+    vkStream->read((VkIndexType*)&forUnmarshaling->indexType, sizeof(VkIndexType));
+    uint64_t cgen_var_2;
+    vkStream->read((uint64_t*)&cgen_var_2, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkBuffer(&cgen_var_2, (VkBuffer*)&forUnmarshaling->transformData, 1);
+    vkStream->read((VkDeviceSize*)&forUnmarshaling->transformOffset, sizeof(VkDeviceSize));
+}
+
+void marshal_VkGeometryAABBNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkGeometryAABBNV* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkBuffer_u64(&forMarshaling->aabbData, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->write((uint32_t*)&forMarshaling->numAABBs, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->stride, sizeof(uint32_t));
+    vkStream->write((VkDeviceSize*)&forMarshaling->offset, sizeof(VkDeviceSize));
+}
+
+void unmarshal_VkGeometryAABBNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkGeometryAABBNV* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkBuffer(&cgen_var_0, (VkBuffer*)&forUnmarshaling->aabbData, 1);
+    vkStream->read((uint32_t*)&forUnmarshaling->numAABBs, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->stride, sizeof(uint32_t));
+    vkStream->read((VkDeviceSize*)&forUnmarshaling->offset, sizeof(VkDeviceSize));
+}
+
+void marshal_VkGeometryDataNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkGeometryDataNV* forMarshaling)
+{
+    (void)rootType;
+    marshal_VkGeometryTrianglesNV(vkStream, rootType, (VkGeometryTrianglesNV*)(&forMarshaling->triangles));
+    marshal_VkGeometryAABBNV(vkStream, rootType, (VkGeometryAABBNV*)(&forMarshaling->aabbs));
+}
+
+void unmarshal_VkGeometryDataNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkGeometryDataNV* forUnmarshaling)
+{
+    (void)rootType;
+    unmarshal_VkGeometryTrianglesNV(vkStream, rootType, (VkGeometryTrianglesNV*)(&forUnmarshaling->triangles));
+    unmarshal_VkGeometryAABBNV(vkStream, rootType, (VkGeometryAABBNV*)(&forUnmarshaling->aabbs));
+}
+
+void marshal_VkGeometryNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkGeometryNV* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkGeometryTypeKHR*)&forMarshaling->geometryType, sizeof(VkGeometryTypeKHR));
+    marshal_VkGeometryDataNV(vkStream, rootType, (VkGeometryDataNV*)(&forMarshaling->geometry));
+    vkStream->write((VkGeometryFlagsKHR*)&forMarshaling->flags, sizeof(VkGeometryFlagsKHR));
+}
+
+void unmarshal_VkGeometryNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkGeometryNV* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkGeometryTypeKHR*)&forUnmarshaling->geometryType, sizeof(VkGeometryTypeKHR));
+    unmarshal_VkGeometryDataNV(vkStream, rootType, (VkGeometryDataNV*)(&forUnmarshaling->geometry));
+    vkStream->read((VkGeometryFlagsKHR*)&forUnmarshaling->flags, sizeof(VkGeometryFlagsKHR));
+}
+
+void marshal_VkAccelerationStructureInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureInfoNV* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkAccelerationStructureTypeNV*)&forMarshaling->type, sizeof(VkAccelerationStructureTypeNV));
+    vkStream->write((VkBuildAccelerationStructureFlagsNV*)&forMarshaling->flags, sizeof(VkBuildAccelerationStructureFlagsNV));
+    vkStream->write((uint32_t*)&forMarshaling->instanceCount, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->geometryCount, sizeof(uint32_t));
+    if (forMarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->geometryCount; ++i)
+        {
+            marshal_VkGeometryNV(vkStream, rootType, (const VkGeometryNV*)(forMarshaling->pGeometries + i));
+        }
+    }
+}
+
+void unmarshal_VkAccelerationStructureInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkAccelerationStructureInfoNV* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkAccelerationStructureTypeNV*)&forUnmarshaling->type, sizeof(VkAccelerationStructureTypeNV));
+    vkStream->read((VkBuildAccelerationStructureFlagsNV*)&forUnmarshaling->flags, sizeof(VkBuildAccelerationStructureFlagsNV));
+    vkStream->read((uint32_t*)&forUnmarshaling->instanceCount, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->geometryCount, sizeof(uint32_t));
+    if (forUnmarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->geometryCount; ++i)
+        {
+            unmarshal_VkGeometryNV(vkStream, rootType, (VkGeometryNV*)(forUnmarshaling->pGeometries + i));
+        }
+    }
+}
+
+void marshal_VkAccelerationStructureCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureCreateInfoNV* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkDeviceSize*)&forMarshaling->compactedSize, sizeof(VkDeviceSize));
+    marshal_VkAccelerationStructureInfoNV(vkStream, rootType, (VkAccelerationStructureInfoNV*)(&forMarshaling->info));
+}
+
+void unmarshal_VkAccelerationStructureCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkAccelerationStructureCreateInfoNV* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkDeviceSize*)&forUnmarshaling->compactedSize, sizeof(VkDeviceSize));
+    unmarshal_VkAccelerationStructureInfoNV(vkStream, rootType, (VkAccelerationStructureInfoNV*)(&forUnmarshaling->info));
+}
+
+void marshal_VkBindAccelerationStructureMemoryInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBindAccelerationStructureMemoryInfoNV* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkAccelerationStructureNV_u64(&forMarshaling->accelerationStructure, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
+    uint64_t cgen_var_1;
+    vkStream->handleMapping()->mapHandles_VkDeviceMemory_u64(&forMarshaling->memory, &cgen_var_1, 1);
+    vkStream->write((uint64_t*)&cgen_var_1, 1 * 8);
+    vkStream->write((VkDeviceSize*)&forMarshaling->memoryOffset, sizeof(VkDeviceSize));
+    vkStream->write((uint32_t*)&forMarshaling->deviceIndexCount, sizeof(uint32_t));
+    vkStream->write((const uint32_t*)forMarshaling->pDeviceIndices, forMarshaling->deviceIndexCount * sizeof(const uint32_t));
+}
+
+void unmarshal_VkBindAccelerationStructureMemoryInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkBindAccelerationStructureMemoryInfoNV* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkAccelerationStructureNV(&cgen_var_0, (VkAccelerationStructureNV*)&forUnmarshaling->accelerationStructure, 1);
+    uint64_t cgen_var_1;
+    vkStream->read((uint64_t*)&cgen_var_1, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkDeviceMemory(&cgen_var_1, (VkDeviceMemory*)&forUnmarshaling->memory, 1);
+    vkStream->read((VkDeviceSize*)&forUnmarshaling->memoryOffset, sizeof(VkDeviceSize));
+    vkStream->read((uint32_t*)&forUnmarshaling->deviceIndexCount, sizeof(uint32_t));
+    vkStream->read((uint32_t*)forUnmarshaling->pDeviceIndices, forUnmarshaling->deviceIndexCount * sizeof(const uint32_t));
+}
+
+void marshal_VkWriteDescriptorSetAccelerationStructureNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkWriteDescriptorSetAccelerationStructureNV* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((uint32_t*)&forMarshaling->accelerationStructureCount, sizeof(uint32_t));
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pAccelerationStructures;
+    vkStream->putBe64(cgen_var_0);
+    if (forMarshaling->pAccelerationStructures)
+    {
+        if (forMarshaling->accelerationStructureCount)
+        {
+            uint64_t* cgen_var_0_0;
+            vkStream->alloc((void**)&cgen_var_0_0, forMarshaling->accelerationStructureCount * 8);
+            vkStream->handleMapping()->mapHandles_VkAccelerationStructureNV_u64(forMarshaling->pAccelerationStructures, cgen_var_0_0, forMarshaling->accelerationStructureCount);
+            vkStream->write((uint64_t*)cgen_var_0_0, forMarshaling->accelerationStructureCount * 8);
+        }
+    }
+}
+
+void unmarshal_VkWriteDescriptorSetAccelerationStructureNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkWriteDescriptorSetAccelerationStructureNV* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((uint32_t*)&forUnmarshaling->accelerationStructureCount, sizeof(uint32_t));
+    // WARNING PTR CHECK
+    const VkAccelerationStructureNV* check_pAccelerationStructures;
+    check_pAccelerationStructures = (const VkAccelerationStructureNV*)(uintptr_t)vkStream->getBe64();
+    if (forUnmarshaling->pAccelerationStructures)
+    {
+        if (!(check_pAccelerationStructures))
+        {
+            fprintf(stderr, "fatal: forUnmarshaling->pAccelerationStructures inconsistent between guest and host\n");
+        }
+        if (forUnmarshaling->accelerationStructureCount)
+        {
+            uint64_t* cgen_var_0_0;
+            vkStream->alloc((void**)&cgen_var_0_0, forUnmarshaling->accelerationStructureCount * 8);
+            vkStream->read((uint64_t*)cgen_var_0_0, forUnmarshaling->accelerationStructureCount * 8);
+            vkStream->handleMapping()->mapHandles_u64_VkAccelerationStructureNV(cgen_var_0_0, (VkAccelerationStructureNV*)forUnmarshaling->pAccelerationStructures, forUnmarshaling->accelerationStructureCount);
+        }
+    }
+}
+
+void marshal_VkAccelerationStructureMemoryRequirementsInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureMemoryRequirementsInfoNV* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkAccelerationStructureMemoryRequirementsTypeNV*)&forMarshaling->type, sizeof(VkAccelerationStructureMemoryRequirementsTypeNV));
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkAccelerationStructureNV_u64(&forMarshaling->accelerationStructure, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
+}
+
+void unmarshal_VkAccelerationStructureMemoryRequirementsInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkAccelerationStructureMemoryRequirementsInfoNV* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkAccelerationStructureMemoryRequirementsTypeNV*)&forUnmarshaling->type, sizeof(VkAccelerationStructureMemoryRequirementsTypeNV));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkAccelerationStructureNV(&cgen_var_0, (VkAccelerationStructureNV*)&forUnmarshaling->accelerationStructure, 1);
+}
+
+void marshal_VkPhysicalDeviceRayTracingPropertiesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRayTracingPropertiesNV* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((uint32_t*)&forMarshaling->shaderGroupHandleSize, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxRecursionDepth, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxShaderGroupStride, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->shaderGroupBaseAlignment, sizeof(uint32_t));
+    vkStream->write((uint64_t*)&forMarshaling->maxGeometryCount, sizeof(uint64_t));
+    vkStream->write((uint64_t*)&forMarshaling->maxInstanceCount, sizeof(uint64_t));
+    vkStream->write((uint64_t*)&forMarshaling->maxTriangleCount, sizeof(uint64_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxDescriptorSetAccelerationStructures, sizeof(uint32_t));
+}
+
+void unmarshal_VkPhysicalDeviceRayTracingPropertiesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceRayTracingPropertiesNV* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((uint32_t*)&forUnmarshaling->shaderGroupHandleSize, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxRecursionDepth, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxShaderGroupStride, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->shaderGroupBaseAlignment, sizeof(uint32_t));
+    vkStream->read((uint64_t*)&forUnmarshaling->maxGeometryCount, sizeof(uint64_t));
+    vkStream->read((uint64_t*)&forUnmarshaling->maxInstanceCount, sizeof(uint64_t));
+    vkStream->read((uint64_t*)&forUnmarshaling->maxTriangleCount, sizeof(uint64_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxDescriptorSetAccelerationStructures, sizeof(uint32_t));
+}
+
+void marshal_VkTransformMatrixKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkTransformMatrixKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((float*)forMarshaling->matrix, ((3)*(4)) * sizeof(float));
+}
+
+void unmarshal_VkTransformMatrixKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkTransformMatrixKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((float*)forUnmarshaling->matrix, ((3)*(4)) * sizeof(float));
+}
+
+void marshal_VkAabbPositionsKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAabbPositionsKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((float*)&forMarshaling->minX, sizeof(float));
+    vkStream->write((float*)&forMarshaling->minY, sizeof(float));
+    vkStream->write((float*)&forMarshaling->minZ, sizeof(float));
+    vkStream->write((float*)&forMarshaling->maxX, sizeof(float));
+    vkStream->write((float*)&forMarshaling->maxY, sizeof(float));
+    vkStream->write((float*)&forMarshaling->maxZ, sizeof(float));
+}
+
+void unmarshal_VkAabbPositionsKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkAabbPositionsKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((float*)&forUnmarshaling->minX, sizeof(float));
+    vkStream->read((float*)&forUnmarshaling->minY, sizeof(float));
+    vkStream->read((float*)&forUnmarshaling->minZ, sizeof(float));
+    vkStream->read((float*)&forUnmarshaling->maxX, sizeof(float));
+    vkStream->read((float*)&forUnmarshaling->maxY, sizeof(float));
+    vkStream->read((float*)&forUnmarshaling->maxZ, sizeof(float));
+}
+
+void marshal_VkAccelerationStructureInstanceKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureInstanceKHR* forMarshaling)
+{
+    (void)rootType;
+    
+    typedef struct VkAccelerationStructureInstanceKHRWithoutBitFields {
+        VkTransformMatrixKHR          transform;
+        uint32_t                      dwords[2];
+        uint64_t                      accelerationStructureReference;
+    } VkAccelerationStructureInstanceKHRWithoutBitFields;
+    
+    const VkAccelerationStructureInstanceKHRWithoutBitFields* forMarshaling_new = (const VkAccelerationStructureInstanceKHRWithoutBitFields*)(forMarshaling);
+    marshal_VkTransformMatrixKHR(vkStream, rootType, (VkTransformMatrixKHR*)(&forMarshaling_new->transform));
+    for (uint32_t i = 0; i < 2; i++) {
+        vkStream->write((uint32_t*)&(forMarshaling_new->dwords[i]), sizeof(uint32_t));
+    }
+    vkStream->write((uint64_t*)&forMarshaling_new->accelerationStructureReference, sizeof(uint64_t));
+    
+}
+
+void unmarshal_VkAccelerationStructureInstanceKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkAccelerationStructureInstanceKHR* forUnmarshaling)
+{
+    (void)rootType;
+    
+    typedef struct VkAccelerationStructureInstanceKHRWithoutBitFields {
+        VkTransformMatrixKHR          transform;
+        uint32_t                      dwords[2];
+        uint64_t                      accelerationStructureReference;
+    } VkAccelerationStructureInstanceKHRWithoutBitFields;
+    
+    VkAccelerationStructureInstanceKHRWithoutBitFields* forUnmarshaling_new = (VkAccelerationStructureInstanceKHRWithoutBitFields*)(forUnmarshaling);
+    unmarshal_VkTransformMatrixKHR(vkStream, rootType, (VkTransformMatrixKHR*)(&forUnmarshaling_new->transform));
+    for (uint32_t i = 0; i < 2; i++) {
+        vkStream->read((uint32_t*)&(forUnmarshaling_new->dwords[i]), sizeof(uint32_t));
+    }
+    vkStream->read((uint64_t*)&forUnmarshaling_new->accelerationStructureReference, sizeof(uint64_t));
+    
+}
+
+#endif
+#ifdef VK_NV_representative_fragment_test
+void marshal_VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->representativeFragmentTest, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->representativeFragmentTest, sizeof(VkBool32));
+}
+
+void marshal_VkPipelineRepresentativeFragmentTestStateCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineRepresentativeFragmentTestStateCreateInfoNV* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->representativeFragmentTestEnable, sizeof(VkBool32));
+}
+
+void unmarshal_VkPipelineRepresentativeFragmentTestStateCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPipelineRepresentativeFragmentTestStateCreateInfoNV* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->representativeFragmentTestEnable, sizeof(VkBool32));
+}
+
+#endif
+#ifdef VK_EXT_filter_cubic
+void marshal_VkPhysicalDeviceImageViewImageFormatInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceImageViewImageFormatInfoEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkImageViewType*)&forMarshaling->imageViewType, sizeof(VkImageViewType));
+}
+
+void unmarshal_VkPhysicalDeviceImageViewImageFormatInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceImageViewImageFormatInfoEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkImageViewType*)&forUnmarshaling->imageViewType, sizeof(VkImageViewType));
+}
+
+void marshal_VkFilterCubicImageViewImageFormatPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkFilterCubicImageViewImageFormatPropertiesEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->filterCubic, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->filterCubicMinmax, sizeof(VkBool32));
+}
+
+void unmarshal_VkFilterCubicImageViewImageFormatPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkFilterCubicImageViewImageFormatPropertiesEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->filterCubic, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->filterCubicMinmax, sizeof(VkBool32));
+}
+
+#endif
+#ifdef VK_QCOM_render_pass_shader_resolve
 #endif
 #ifdef VK_EXT_global_priority
 void marshal_VkDeviceQueueGlobalPriorityCreateInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDeviceQueueGlobalPriorityCreateInfoEXT* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkQueueGlobalPriorityEXT*)&forMarshaling->globalPriority, sizeof(VkQueueGlobalPriorityEXT));
 }
 
 void unmarshal_VkDeviceQueueGlobalPriorityCreateInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDeviceQueueGlobalPriorityCreateInfoEXT* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkQueueGlobalPriorityEXT*)&forUnmarshaling->globalPriority, sizeof(VkQueueGlobalPriorityEXT));
 }
 
@@ -13190,20 +17932,20 @@
 #ifdef VK_EXT_external_memory_host
 void marshal_VkImportMemoryHostPointerInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkImportMemoryHostPointerInfoEXT* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkExternalMemoryHandleTypeFlagBits*)&forMarshaling->handleType, sizeof(VkExternalMemoryHandleTypeFlagBits));
     // WARNING PTR CHECK
-    uint64_t cgen_var_368 = (uint64_t)(uintptr_t)forMarshaling->pHostPointer;
-    vkStream->putBe64(cgen_var_368);
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pHostPointer;
+    vkStream->putBe64(cgen_var_0);
     if (forMarshaling->pHostPointer)
     {
         vkStream->write((void*)forMarshaling->pHostPointer, sizeof(uint8_t));
@@ -13212,17 +17954,16 @@
 
 void unmarshal_VkImportMemoryHostPointerInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkImportMemoryHostPointerInfoEXT* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkExternalMemoryHandleTypeFlagBits*)&forUnmarshaling->handleType, sizeof(VkExternalMemoryHandleTypeFlagBits));
     // WARNING PTR CHECK
     void* check_pHostPointer;
@@ -13239,82 +17980,144 @@
 
 void marshal_VkMemoryHostPointerPropertiesEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkMemoryHostPointerPropertiesEXT* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((uint32_t*)&forMarshaling->memoryTypeBits, sizeof(uint32_t));
 }
 
 void unmarshal_VkMemoryHostPointerPropertiesEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkMemoryHostPointerPropertiesEXT* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((uint32_t*)&forUnmarshaling->memoryTypeBits, sizeof(uint32_t));
 }
 
 void marshal_VkPhysicalDeviceExternalMemoryHostPropertiesEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceExternalMemoryHostPropertiesEXT* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkDeviceSize*)&forMarshaling->minImportedHostPointerAlignment, sizeof(VkDeviceSize));
 }
 
 void unmarshal_VkPhysicalDeviceExternalMemoryHostPropertiesEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceExternalMemoryHostPropertiesEXT* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkDeviceSize*)&forUnmarshaling->minImportedHostPointerAlignment, sizeof(VkDeviceSize));
 }
 
 #endif
 #ifdef VK_AMD_buffer_marker
 #endif
+#ifdef VK_AMD_pipeline_compiler_control
+void marshal_VkPipelineCompilerControlCreateInfoAMD(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineCompilerControlCreateInfoAMD* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkPipelineCompilerControlFlagsAMD*)&forMarshaling->compilerControlFlags, sizeof(VkPipelineCompilerControlFlagsAMD));
+}
+
+void unmarshal_VkPipelineCompilerControlCreateInfoAMD(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPipelineCompilerControlCreateInfoAMD* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkPipelineCompilerControlFlagsAMD*)&forUnmarshaling->compilerControlFlags, sizeof(VkPipelineCompilerControlFlagsAMD));
+}
+
+#endif
+#ifdef VK_EXT_calibrated_timestamps
+void marshal_VkCalibratedTimestampInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCalibratedTimestampInfoEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkTimeDomainEXT*)&forMarshaling->timeDomain, sizeof(VkTimeDomainEXT));
+}
+
+void unmarshal_VkCalibratedTimestampInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkCalibratedTimestampInfoEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkTimeDomainEXT*)&forUnmarshaling->timeDomain, sizeof(VkTimeDomainEXT));
+}
+
+#endif
 #ifdef VK_AMD_shader_core_properties
 void marshal_VkPhysicalDeviceShaderCorePropertiesAMD(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceShaderCorePropertiesAMD* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((uint32_t*)&forMarshaling->shaderEngineCount, sizeof(uint32_t));
     vkStream->write((uint32_t*)&forMarshaling->shaderArraysPerEngineCount, sizeof(uint32_t));
     vkStream->write((uint32_t*)&forMarshaling->computeUnitsPerShaderArray, sizeof(uint32_t));
@@ -13333,17 +18136,16 @@
 
 void unmarshal_VkPhysicalDeviceShaderCorePropertiesAMD(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceShaderCorePropertiesAMD* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((uint32_t*)&forUnmarshaling->shaderEngineCount, sizeof(uint32_t));
     vkStream->read((uint32_t*)&forUnmarshaling->shaderArraysPerEngineCount, sizeof(uint32_t));
     vkStream->read((uint32_t*)&forUnmarshaling->computeUnitsPerShaderArray, sizeof(uint32_t));
@@ -13361,144 +18163,613 @@
 }
 
 #endif
+#ifdef VK_AMD_memory_overallocation_behavior
+void marshal_VkDeviceMemoryOverallocationCreateInfoAMD(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceMemoryOverallocationCreateInfoAMD* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkMemoryOverallocationBehaviorAMD*)&forMarshaling->overallocationBehavior, sizeof(VkMemoryOverallocationBehaviorAMD));
+}
+
+void unmarshal_VkDeviceMemoryOverallocationCreateInfoAMD(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDeviceMemoryOverallocationCreateInfoAMD* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkMemoryOverallocationBehaviorAMD*)&forUnmarshaling->overallocationBehavior, sizeof(VkMemoryOverallocationBehaviorAMD));
+}
+
+#endif
 #ifdef VK_EXT_vertex_attribute_divisor
 void marshal_VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((uint32_t*)&forMarshaling->maxVertexAttribDivisor, sizeof(uint32_t));
 }
 
 void unmarshal_VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((uint32_t*)&forUnmarshaling->maxVertexAttribDivisor, sizeof(uint32_t));
 }
 
 void marshal_VkVertexInputBindingDivisorDescriptionEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkVertexInputBindingDivisorDescriptionEXT* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((uint32_t*)&forMarshaling->binding, sizeof(uint32_t));
     vkStream->write((uint32_t*)&forMarshaling->divisor, sizeof(uint32_t));
 }
 
 void unmarshal_VkVertexInputBindingDivisorDescriptionEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkVertexInputBindingDivisorDescriptionEXT* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((uint32_t*)&forUnmarshaling->binding, sizeof(uint32_t));
     vkStream->read((uint32_t*)&forUnmarshaling->divisor, sizeof(uint32_t));
 }
 
 void marshal_VkPipelineVertexInputDivisorStateCreateInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPipelineVertexInputDivisorStateCreateInfoEXT* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((const void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((uint32_t*)&forMarshaling->vertexBindingDivisorCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forMarshaling->vertexBindingDivisorCount; ++i)
+    if (forMarshaling)
     {
-        marshal_VkVertexInputBindingDivisorDescriptionEXT(vkStream, (const VkVertexInputBindingDivisorDescriptionEXT*)(forMarshaling->pVertexBindingDivisors + i));
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->vertexBindingDivisorCount; ++i)
+        {
+            marshal_VkVertexInputBindingDivisorDescriptionEXT(vkStream, rootType, (const VkVertexInputBindingDivisorDescriptionEXT*)(forMarshaling->pVertexBindingDivisors + i));
+        }
     }
 }
 
 void unmarshal_VkPipelineVertexInputDivisorStateCreateInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPipelineVertexInputDivisorStateCreateInfoEXT* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((uint32_t*)&forUnmarshaling->vertexBindingDivisorCount, sizeof(uint32_t));
-    for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->vertexBindingDivisorCount; ++i)
+    if (forUnmarshaling)
     {
-        unmarshal_VkVertexInputBindingDivisorDescriptionEXT(vkStream, (VkVertexInputBindingDivisorDescriptionEXT*)(forUnmarshaling->pVertexBindingDivisors + i));
+        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->vertexBindingDivisorCount; ++i)
+        {
+            unmarshal_VkVertexInputBindingDivisorDescriptionEXT(vkStream, rootType, (VkVertexInputBindingDivisorDescriptionEXT*)(forUnmarshaling->pVertexBindingDivisors + i));
+        }
+    }
+}
+
+void marshal_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->vertexAttributeInstanceRateDivisor, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->vertexAttributeInstanceRateZeroDivisor, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->vertexAttributeInstanceRateDivisor, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->vertexAttributeInstanceRateZeroDivisor, sizeof(VkBool32));
+}
+
+#endif
+#ifdef VK_GGP_frame_token
+void marshal_VkPresentFrameTokenGGP(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPresentFrameTokenGGP* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((GgpFrameToken*)&forMarshaling->frameToken, sizeof(GgpFrameToken));
+}
+
+void unmarshal_VkPresentFrameTokenGGP(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPresentFrameTokenGGP* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((GgpFrameToken*)&forUnmarshaling->frameToken, sizeof(GgpFrameToken));
+}
+
+#endif
+#ifdef VK_EXT_pipeline_creation_feedback
+void marshal_VkPipelineCreationFeedbackEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineCreationFeedbackEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkPipelineCreationFeedbackFlagsEXT*)&forMarshaling->flags, sizeof(VkPipelineCreationFeedbackFlagsEXT));
+    vkStream->write((uint64_t*)&forMarshaling->duration, sizeof(uint64_t));
+}
+
+void unmarshal_VkPipelineCreationFeedbackEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPipelineCreationFeedbackEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkPipelineCreationFeedbackFlagsEXT*)&forUnmarshaling->flags, sizeof(VkPipelineCreationFeedbackFlagsEXT));
+    vkStream->read((uint64_t*)&forUnmarshaling->duration, sizeof(uint64_t));
+}
+
+void marshal_VkPipelineCreationFeedbackCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineCreationFeedbackCreateInfoEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    marshal_VkPipelineCreationFeedbackEXT(vkStream, rootType, (VkPipelineCreationFeedbackEXT*)(forMarshaling->pPipelineCreationFeedback));
+    vkStream->write((uint32_t*)&forMarshaling->pipelineStageCreationFeedbackCount, sizeof(uint32_t));
+    if (forMarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->pipelineStageCreationFeedbackCount; ++i)
+        {
+            marshal_VkPipelineCreationFeedbackEXT(vkStream, rootType, (VkPipelineCreationFeedbackEXT*)(forMarshaling->pPipelineStageCreationFeedbacks + i));
+        }
+    }
+}
+
+void unmarshal_VkPipelineCreationFeedbackCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPipelineCreationFeedbackCreateInfoEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    unmarshal_VkPipelineCreationFeedbackEXT(vkStream, rootType, (VkPipelineCreationFeedbackEXT*)(forUnmarshaling->pPipelineCreationFeedback));
+    vkStream->read((uint32_t*)&forUnmarshaling->pipelineStageCreationFeedbackCount, sizeof(uint32_t));
+    if (forUnmarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->pipelineStageCreationFeedbackCount; ++i)
+        {
+            unmarshal_VkPipelineCreationFeedbackEXT(vkStream, rootType, (VkPipelineCreationFeedbackEXT*)(forUnmarshaling->pPipelineStageCreationFeedbacks + i));
+        }
     }
 }
 
 #endif
 #ifdef VK_NV_shader_subgroup_partitioned
 #endif
+#ifdef VK_NV_compute_shader_derivatives
+void marshal_VkPhysicalDeviceComputeShaderDerivativesFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceComputeShaderDerivativesFeaturesNV* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->computeDerivativeGroupQuads, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->computeDerivativeGroupLinear, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceComputeShaderDerivativesFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceComputeShaderDerivativesFeaturesNV* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->computeDerivativeGroupQuads, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->computeDerivativeGroupLinear, sizeof(VkBool32));
+}
+
+#endif
+#ifdef VK_NV_mesh_shader
+void marshal_VkPhysicalDeviceMeshShaderFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMeshShaderFeaturesNV* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->taskShader, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->meshShader, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceMeshShaderFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceMeshShaderFeaturesNV* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->taskShader, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->meshShader, sizeof(VkBool32));
+}
+
+void marshal_VkPhysicalDeviceMeshShaderPropertiesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMeshShaderPropertiesNV* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((uint32_t*)&forMarshaling->maxDrawMeshTasksCount, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxTaskWorkGroupInvocations, sizeof(uint32_t));
+    vkStream->write((uint32_t*)forMarshaling->maxTaskWorkGroupSize, 3 * sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxTaskTotalMemorySize, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxTaskOutputCount, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxMeshWorkGroupInvocations, sizeof(uint32_t));
+    vkStream->write((uint32_t*)forMarshaling->maxMeshWorkGroupSize, 3 * sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxMeshTotalMemorySize, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxMeshOutputVertices, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxMeshOutputPrimitives, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxMeshMultiviewViewCount, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->meshOutputPerVertexGranularity, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->meshOutputPerPrimitiveGranularity, sizeof(uint32_t));
+}
+
+void unmarshal_VkPhysicalDeviceMeshShaderPropertiesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceMeshShaderPropertiesNV* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxDrawMeshTasksCount, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxTaskWorkGroupInvocations, sizeof(uint32_t));
+    vkStream->read((uint32_t*)forUnmarshaling->maxTaskWorkGroupSize, 3 * sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxTaskTotalMemorySize, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxTaskOutputCount, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxMeshWorkGroupInvocations, sizeof(uint32_t));
+    vkStream->read((uint32_t*)forUnmarshaling->maxMeshWorkGroupSize, 3 * sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxMeshTotalMemorySize, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxMeshOutputVertices, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxMeshOutputPrimitives, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxMeshMultiviewViewCount, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->meshOutputPerVertexGranularity, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->meshOutputPerPrimitiveGranularity, sizeof(uint32_t));
+}
+
+void marshal_VkDrawMeshTasksIndirectCommandNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDrawMeshTasksIndirectCommandNV* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((uint32_t*)&forMarshaling->taskCount, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->firstTask, sizeof(uint32_t));
+}
+
+void unmarshal_VkDrawMeshTasksIndirectCommandNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDrawMeshTasksIndirectCommandNV* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((uint32_t*)&forUnmarshaling->taskCount, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->firstTask, sizeof(uint32_t));
+}
+
+#endif
+#ifdef VK_NV_fragment_shader_barycentric
+void marshal_VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->fragmentShaderBarycentric, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->fragmentShaderBarycentric, sizeof(VkBool32));
+}
+
+#endif
+#ifdef VK_NV_shader_image_footprint
+void marshal_VkPhysicalDeviceShaderImageFootprintFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderImageFootprintFeaturesNV* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->imageFootprint, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceShaderImageFootprintFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceShaderImageFootprintFeaturesNV* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->imageFootprint, sizeof(VkBool32));
+}
+
+#endif
+#ifdef VK_NV_scissor_exclusive
+void marshal_VkPipelineViewportExclusiveScissorStateCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineViewportExclusiveScissorStateCreateInfoNV* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((uint32_t*)&forMarshaling->exclusiveScissorCount, sizeof(uint32_t));
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pExclusiveScissors;
+    vkStream->putBe64(cgen_var_0);
+    if (forMarshaling->pExclusiveScissors)
+    {
+        if (forMarshaling)
+        {
+            for (uint32_t i = 0; i < (uint32_t)forMarshaling->exclusiveScissorCount; ++i)
+            {
+                marshal_VkRect2D(vkStream, rootType, (const VkRect2D*)(forMarshaling->pExclusiveScissors + i));
+            }
+        }
+    }
+}
+
+void unmarshal_VkPipelineViewportExclusiveScissorStateCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPipelineViewportExclusiveScissorStateCreateInfoNV* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((uint32_t*)&forUnmarshaling->exclusiveScissorCount, sizeof(uint32_t));
+    // WARNING PTR CHECK
+    const VkRect2D* check_pExclusiveScissors;
+    check_pExclusiveScissors = (const VkRect2D*)(uintptr_t)vkStream->getBe64();
+    if (forUnmarshaling->pExclusiveScissors)
+    {
+        if (!(check_pExclusiveScissors))
+        {
+            fprintf(stderr, "fatal: forUnmarshaling->pExclusiveScissors inconsistent between guest and host\n");
+        }
+        if (forUnmarshaling)
+        {
+            for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->exclusiveScissorCount; ++i)
+            {
+                unmarshal_VkRect2D(vkStream, rootType, (VkRect2D*)(forUnmarshaling->pExclusiveScissors + i));
+            }
+        }
+    }
+}
+
+void marshal_VkPhysicalDeviceExclusiveScissorFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceExclusiveScissorFeaturesNV* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->exclusiveScissor, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceExclusiveScissorFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceExclusiveScissorFeaturesNV* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->exclusiveScissor, sizeof(VkBool32));
+}
+
+#endif
 #ifdef VK_NV_device_diagnostic_checkpoints
 void marshal_VkQueueFamilyCheckpointPropertiesNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkQueueFamilyCheckpointPropertiesNV* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkPipelineStageFlags*)&forMarshaling->checkpointExecutionStageMask, sizeof(VkPipelineStageFlags));
 }
 
 void unmarshal_VkQueueFamilyCheckpointPropertiesNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkQueueFamilyCheckpointPropertiesNV* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkPipelineStageFlags*)&forUnmarshaling->checkpointExecutionStageMask, sizeof(VkPipelineStageFlags));
 }
 
 void marshal_VkCheckpointDataNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkCheckpointDataNV* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((VkPipelineStageFlagBits*)&forMarshaling->stage, sizeof(VkPipelineStageFlagBits));
     // WARNING PTR CHECK
-    uint64_t cgen_var_370 = (uint64_t)(uintptr_t)forMarshaling->pCheckpointMarker;
-    vkStream->putBe64(cgen_var_370);
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pCheckpointMarker;
+    vkStream->putBe64(cgen_var_0);
     if (forMarshaling->pCheckpointMarker)
     {
         vkStream->write((void*)forMarshaling->pCheckpointMarker, sizeof(uint8_t));
@@ -13507,17 +18778,16 @@
 
 void unmarshal_VkCheckpointDataNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkCheckpointDataNV* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((VkPipelineStageFlagBits*)&forUnmarshaling->stage, sizeof(VkPipelineStageFlagBits));
     // WARNING PTR CHECK
     void* check_pCheckpointMarker;
@@ -13533,52 +18803,3271 @@
 }
 
 #endif
-#ifdef VK_GOOGLE_address_space
+#ifdef VK_INTEL_shader_integer_functions2
+void marshal_VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->shaderIntegerFunctions2, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderIntegerFunctions2, sizeof(VkBool32));
+}
+
 #endif
-#ifdef VK_GOOGLE_color_buffer
+#ifdef VK_INTEL_performance_query
+void marshal_VkPerformanceValueDataINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPerformanceValueDataINTEL* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((uint32_t*)&forMarshaling->value32, sizeof(uint32_t));
+}
+
+void unmarshal_VkPerformanceValueDataINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPerformanceValueDataINTEL* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((uint32_t*)&forUnmarshaling->value32, sizeof(uint32_t));
+}
+
+void marshal_VkPerformanceValueINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPerformanceValueINTEL* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkPerformanceValueTypeINTEL*)&forMarshaling->type, sizeof(VkPerformanceValueTypeINTEL));
+    marshal_VkPerformanceValueDataINTEL(vkStream, rootType, (VkPerformanceValueDataINTEL*)(&forMarshaling->data));
+}
+
+void unmarshal_VkPerformanceValueINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPerformanceValueINTEL* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkPerformanceValueTypeINTEL*)&forUnmarshaling->type, sizeof(VkPerformanceValueTypeINTEL));
+    unmarshal_VkPerformanceValueDataINTEL(vkStream, rootType, (VkPerformanceValueDataINTEL*)(&forUnmarshaling->data));
+}
+
+void marshal_VkInitializePerformanceApiInfoINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkInitializePerformanceApiInfoINTEL* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pUserData;
+    vkStream->putBe64(cgen_var_0);
+    if (forMarshaling->pUserData)
+    {
+        vkStream->write((void*)forMarshaling->pUserData, sizeof(uint8_t));
+    }
+}
+
+void unmarshal_VkInitializePerformanceApiInfoINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkInitializePerformanceApiInfoINTEL* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    // WARNING PTR CHECK
+    void* check_pUserData;
+    check_pUserData = (void*)(uintptr_t)vkStream->getBe64();
+    if (forUnmarshaling->pUserData)
+    {
+        if (!(check_pUserData))
+        {
+            fprintf(stderr, "fatal: forUnmarshaling->pUserData inconsistent between guest and host\n");
+        }
+        vkStream->read((void*)forUnmarshaling->pUserData, sizeof(uint8_t));
+    }
+}
+
+void marshal_VkQueryPoolPerformanceQueryCreateInfoINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkQueryPoolPerformanceQueryCreateInfoINTEL* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkQueryPoolSamplingModeINTEL*)&forMarshaling->performanceCountersSampling, sizeof(VkQueryPoolSamplingModeINTEL));
+}
+
+void unmarshal_VkQueryPoolPerformanceQueryCreateInfoINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkQueryPoolPerformanceQueryCreateInfoINTEL* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkQueryPoolSamplingModeINTEL*)&forUnmarshaling->performanceCountersSampling, sizeof(VkQueryPoolSamplingModeINTEL));
+}
+
+void marshal_VkPerformanceMarkerInfoINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPerformanceMarkerInfoINTEL* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((uint64_t*)&forMarshaling->marker, sizeof(uint64_t));
+}
+
+void unmarshal_VkPerformanceMarkerInfoINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPerformanceMarkerInfoINTEL* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((uint64_t*)&forUnmarshaling->marker, sizeof(uint64_t));
+}
+
+void marshal_VkPerformanceStreamMarkerInfoINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPerformanceStreamMarkerInfoINTEL* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((uint32_t*)&forMarshaling->marker, sizeof(uint32_t));
+}
+
+void unmarshal_VkPerformanceStreamMarkerInfoINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPerformanceStreamMarkerInfoINTEL* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((uint32_t*)&forUnmarshaling->marker, sizeof(uint32_t));
+}
+
+void marshal_VkPerformanceOverrideInfoINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPerformanceOverrideInfoINTEL* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkPerformanceOverrideTypeINTEL*)&forMarshaling->type, sizeof(VkPerformanceOverrideTypeINTEL));
+    vkStream->write((VkBool32*)&forMarshaling->enable, sizeof(VkBool32));
+    vkStream->write((uint64_t*)&forMarshaling->parameter, sizeof(uint64_t));
+}
+
+void unmarshal_VkPerformanceOverrideInfoINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPerformanceOverrideInfoINTEL* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkPerformanceOverrideTypeINTEL*)&forUnmarshaling->type, sizeof(VkPerformanceOverrideTypeINTEL));
+    vkStream->read((VkBool32*)&forUnmarshaling->enable, sizeof(VkBool32));
+    vkStream->read((uint64_t*)&forUnmarshaling->parameter, sizeof(uint64_t));
+}
+
+void marshal_VkPerformanceConfigurationAcquireInfoINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPerformanceConfigurationAcquireInfoINTEL* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkPerformanceConfigurationTypeINTEL*)&forMarshaling->type, sizeof(VkPerformanceConfigurationTypeINTEL));
+}
+
+void unmarshal_VkPerformanceConfigurationAcquireInfoINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPerformanceConfigurationAcquireInfoINTEL* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkPerformanceConfigurationTypeINTEL*)&forUnmarshaling->type, sizeof(VkPerformanceConfigurationTypeINTEL));
+}
+
+#endif
+#ifdef VK_EXT_pci_bus_info
+void marshal_VkPhysicalDevicePCIBusInfoPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDevicePCIBusInfoPropertiesEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((uint32_t*)&forMarshaling->pciDomain, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->pciBus, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->pciDevice, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->pciFunction, sizeof(uint32_t));
+}
+
+void unmarshal_VkPhysicalDevicePCIBusInfoPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDevicePCIBusInfoPropertiesEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((uint32_t*)&forUnmarshaling->pciDomain, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->pciBus, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->pciDevice, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->pciFunction, sizeof(uint32_t));
+}
+
+#endif
+#ifdef VK_AMD_display_native_hdr
+void marshal_VkDisplayNativeHdrSurfaceCapabilitiesAMD(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDisplayNativeHdrSurfaceCapabilitiesAMD* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->localDimmingSupport, sizeof(VkBool32));
+}
+
+void unmarshal_VkDisplayNativeHdrSurfaceCapabilitiesAMD(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDisplayNativeHdrSurfaceCapabilitiesAMD* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->localDimmingSupport, sizeof(VkBool32));
+}
+
+void marshal_VkSwapchainDisplayNativeHdrCreateInfoAMD(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSwapchainDisplayNativeHdrCreateInfoAMD* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->localDimmingEnable, sizeof(VkBool32));
+}
+
+void unmarshal_VkSwapchainDisplayNativeHdrCreateInfoAMD(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkSwapchainDisplayNativeHdrCreateInfoAMD* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->localDimmingEnable, sizeof(VkBool32));
+}
+
+#endif
+#ifdef VK_FUCHSIA_imagepipe_surface
+void marshal_VkImagePipeSurfaceCreateInfoFUCHSIA(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImagePipeSurfaceCreateInfoFUCHSIA* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkImagePipeSurfaceCreateFlagsFUCHSIA*)&forMarshaling->flags, sizeof(VkImagePipeSurfaceCreateFlagsFUCHSIA));
+    vkStream->write((zx_handle_t*)&forMarshaling->imagePipeHandle, sizeof(zx_handle_t));
+}
+
+void unmarshal_VkImagePipeSurfaceCreateInfoFUCHSIA(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkImagePipeSurfaceCreateInfoFUCHSIA* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkImagePipeSurfaceCreateFlagsFUCHSIA*)&forUnmarshaling->flags, sizeof(VkImagePipeSurfaceCreateFlagsFUCHSIA));
+    vkStream->read((zx_handle_t*)&forUnmarshaling->imagePipeHandle, sizeof(zx_handle_t));
+}
+
+#endif
+#ifdef VK_EXT_metal_surface
+void marshal_VkMetalSurfaceCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMetalSurfaceCreateInfoEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkMetalSurfaceCreateFlagsEXT*)&forMarshaling->flags, sizeof(VkMetalSurfaceCreateFlagsEXT));
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pLayer;
+    vkStream->putBe64(cgen_var_0);
+    if (forMarshaling->pLayer)
+    {
+        vkStream->write((const CAMetalLayer*)forMarshaling->pLayer, sizeof(const CAMetalLayer));
+    }
+}
+
+void unmarshal_VkMetalSurfaceCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkMetalSurfaceCreateInfoEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkMetalSurfaceCreateFlagsEXT*)&forUnmarshaling->flags, sizeof(VkMetalSurfaceCreateFlagsEXT));
+    // WARNING PTR CHECK
+    const CAMetalLayer* check_pLayer;
+    check_pLayer = (const CAMetalLayer*)(uintptr_t)vkStream->getBe64();
+    if (forUnmarshaling->pLayer)
+    {
+        if (!(check_pLayer))
+        {
+            fprintf(stderr, "fatal: forUnmarshaling->pLayer inconsistent between guest and host\n");
+        }
+        vkStream->read((CAMetalLayer*)forUnmarshaling->pLayer, sizeof(const CAMetalLayer));
+    }
+}
+
+#endif
+#ifdef VK_EXT_fragment_density_map
+void marshal_VkPhysicalDeviceFragmentDensityMapFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentDensityMapFeaturesEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->fragmentDensityMap, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->fragmentDensityMapDynamic, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->fragmentDensityMapNonSubsampledImages, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceFragmentDensityMapFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceFragmentDensityMapFeaturesEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    forUnmarshaling->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_FEATURES_EXT;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->fragmentDensityMap, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->fragmentDensityMapDynamic, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->fragmentDensityMapNonSubsampledImages, sizeof(VkBool32));
+}
+
+void marshal_VkPhysicalDeviceFragmentDensityMapPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentDensityMapPropertiesEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    marshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->minFragmentDensityTexelSize));
+    marshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->maxFragmentDensityTexelSize));
+    vkStream->write((VkBool32*)&forMarshaling->fragmentDensityInvocations, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceFragmentDensityMapPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceFragmentDensityMapPropertiesEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    forUnmarshaling->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_PROPERTIES_EXT;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    unmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forUnmarshaling->minFragmentDensityTexelSize));
+    unmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forUnmarshaling->maxFragmentDensityTexelSize));
+    vkStream->read((VkBool32*)&forUnmarshaling->fragmentDensityInvocations, sizeof(VkBool32));
+}
+
+void marshal_VkRenderPassFragmentDensityMapCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRenderPassFragmentDensityMapCreateInfoEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    marshal_VkAttachmentReference(vkStream, rootType, (VkAttachmentReference*)(&forMarshaling->fragmentDensityMapAttachment));
+}
+
+void unmarshal_VkRenderPassFragmentDensityMapCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkRenderPassFragmentDensityMapCreateInfoEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    forUnmarshaling->sType = VK_STRUCTURE_TYPE_RENDER_PASS_FRAGMENT_DENSITY_MAP_CREATE_INFO_EXT;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    unmarshal_VkAttachmentReference(vkStream, rootType, (VkAttachmentReference*)(&forUnmarshaling->fragmentDensityMapAttachment));
+}
+
+#endif
+#ifdef VK_EXT_scalar_block_layout
+#endif
+#ifdef VK_GOOGLE_hlsl_functionality1
+#endif
+#ifdef VK_GOOGLE_decorate_string
+#endif
+#ifdef VK_EXT_subgroup_size_control
+void marshal_VkPhysicalDeviceSubgroupSizeControlFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSubgroupSizeControlFeaturesEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->subgroupSizeControl, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->computeFullSubgroups, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceSubgroupSizeControlFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceSubgroupSizeControlFeaturesEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->subgroupSizeControl, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->computeFullSubgroups, sizeof(VkBool32));
+}
+
+void marshal_VkPhysicalDeviceSubgroupSizeControlPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((uint32_t*)&forMarshaling->minSubgroupSize, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxSubgroupSize, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxComputeWorkgroupSubgroups, sizeof(uint32_t));
+    vkStream->write((VkShaderStageFlags*)&forMarshaling->requiredSubgroupSizeStages, sizeof(VkShaderStageFlags));
+}
+
+void unmarshal_VkPhysicalDeviceSubgroupSizeControlPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceSubgroupSizeControlPropertiesEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((uint32_t*)&forUnmarshaling->minSubgroupSize, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxSubgroupSize, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxComputeWorkgroupSubgroups, sizeof(uint32_t));
+    vkStream->read((VkShaderStageFlags*)&forUnmarshaling->requiredSubgroupSizeStages, sizeof(VkShaderStageFlags));
+}
+
+void marshal_VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((uint32_t*)&forMarshaling->requiredSubgroupSize, sizeof(uint32_t));
+}
+
+void unmarshal_VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((uint32_t*)&forUnmarshaling->requiredSubgroupSize, sizeof(uint32_t));
+}
+
+#endif
+#ifdef VK_AMD_shader_core_properties2
+void marshal_VkPhysicalDeviceShaderCoreProperties2AMD(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderCoreProperties2AMD* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkShaderCorePropertiesFlagsAMD*)&forMarshaling->shaderCoreFeatures, sizeof(VkShaderCorePropertiesFlagsAMD));
+    vkStream->write((uint32_t*)&forMarshaling->activeComputeUnitCount, sizeof(uint32_t));
+}
+
+void unmarshal_VkPhysicalDeviceShaderCoreProperties2AMD(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceShaderCoreProperties2AMD* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkShaderCorePropertiesFlagsAMD*)&forUnmarshaling->shaderCoreFeatures, sizeof(VkShaderCorePropertiesFlagsAMD));
+    vkStream->read((uint32_t*)&forUnmarshaling->activeComputeUnitCount, sizeof(uint32_t));
+}
+
+#endif
+#ifdef VK_AMD_device_coherent_memory
+void marshal_VkPhysicalDeviceCoherentMemoryFeaturesAMD(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCoherentMemoryFeaturesAMD* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->deviceCoherentMemory, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceCoherentMemoryFeaturesAMD(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceCoherentMemoryFeaturesAMD* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->deviceCoherentMemory, sizeof(VkBool32));
+}
+
+#endif
+#ifdef VK_EXT_shader_image_atomic_int64
+void marshal_VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->shaderImageInt64Atomics, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->sparseImageInt64Atomics, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderImageInt64Atomics, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->sparseImageInt64Atomics, sizeof(VkBool32));
+}
+
+#endif
+#ifdef VK_EXT_memory_budget
+void marshal_VkPhysicalDeviceMemoryBudgetPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMemoryBudgetPropertiesEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkDeviceSize*)forMarshaling->heapBudget, VK_MAX_MEMORY_HEAPS * sizeof(VkDeviceSize));
+    vkStream->write((VkDeviceSize*)forMarshaling->heapUsage, VK_MAX_MEMORY_HEAPS * sizeof(VkDeviceSize));
+}
+
+void unmarshal_VkPhysicalDeviceMemoryBudgetPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceMemoryBudgetPropertiesEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkDeviceSize*)forUnmarshaling->heapBudget, VK_MAX_MEMORY_HEAPS * sizeof(VkDeviceSize));
+    vkStream->read((VkDeviceSize*)forUnmarshaling->heapUsage, VK_MAX_MEMORY_HEAPS * sizeof(VkDeviceSize));
+}
+
+#endif
+#ifdef VK_EXT_memory_priority
+void marshal_VkPhysicalDeviceMemoryPriorityFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMemoryPriorityFeaturesEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->memoryPriority, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceMemoryPriorityFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceMemoryPriorityFeaturesEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->memoryPriority, sizeof(VkBool32));
+}
+
+void marshal_VkMemoryPriorityAllocateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMemoryPriorityAllocateInfoEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((float*)&forMarshaling->priority, sizeof(float));
+}
+
+void unmarshal_VkMemoryPriorityAllocateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkMemoryPriorityAllocateInfoEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((float*)&forUnmarshaling->priority, sizeof(float));
+}
+
+#endif
+#ifdef VK_NV_dedicated_allocation_image_aliasing
+void marshal_VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->dedicatedAllocationImageAliasing, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->dedicatedAllocationImageAliasing, sizeof(VkBool32));
+}
+
+#endif
+#ifdef VK_EXT_buffer_device_address
+void marshal_VkPhysicalDeviceBufferDeviceAddressFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceBufferDeviceAddressFeaturesEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->bufferDeviceAddress, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->bufferDeviceAddressCaptureReplay, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->bufferDeviceAddressMultiDevice, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceBufferDeviceAddressFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceBufferDeviceAddressFeaturesEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->bufferDeviceAddress, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->bufferDeviceAddressCaptureReplay, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->bufferDeviceAddressMultiDevice, sizeof(VkBool32));
+}
+
+void marshal_VkBufferDeviceAddressCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBufferDeviceAddressCreateInfoEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkDeviceAddress*)&forMarshaling->deviceAddress, sizeof(VkDeviceAddress));
+}
+
+void unmarshal_VkBufferDeviceAddressCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkBufferDeviceAddressCreateInfoEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkDeviceAddress*)&forUnmarshaling->deviceAddress, sizeof(VkDeviceAddress));
+}
+
+#endif
+#ifdef VK_EXT_tooling_info
+void marshal_VkPhysicalDeviceToolPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceToolPropertiesEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((char*)forMarshaling->name, VK_MAX_EXTENSION_NAME_SIZE * sizeof(char));
+    vkStream->write((char*)forMarshaling->version, VK_MAX_EXTENSION_NAME_SIZE * sizeof(char));
+    vkStream->write((VkToolPurposeFlagsEXT*)&forMarshaling->purposes, sizeof(VkToolPurposeFlagsEXT));
+    vkStream->write((char*)forMarshaling->description, VK_MAX_DESCRIPTION_SIZE * sizeof(char));
+    vkStream->write((char*)forMarshaling->layer, VK_MAX_EXTENSION_NAME_SIZE * sizeof(char));
+}
+
+void unmarshal_VkPhysicalDeviceToolPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceToolPropertiesEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((char*)forUnmarshaling->name, VK_MAX_EXTENSION_NAME_SIZE * sizeof(char));
+    vkStream->read((char*)forUnmarshaling->version, VK_MAX_EXTENSION_NAME_SIZE * sizeof(char));
+    vkStream->read((VkToolPurposeFlagsEXT*)&forUnmarshaling->purposes, sizeof(VkToolPurposeFlagsEXT));
+    vkStream->read((char*)forUnmarshaling->description, VK_MAX_DESCRIPTION_SIZE * sizeof(char));
+    vkStream->read((char*)forUnmarshaling->layer, VK_MAX_EXTENSION_NAME_SIZE * sizeof(char));
+}
+
+#endif
+#ifdef VK_EXT_separate_stencil_usage
+#endif
+#ifdef VK_EXT_validation_features
+void marshal_VkValidationFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkValidationFeaturesEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((uint32_t*)&forMarshaling->enabledValidationFeatureCount, sizeof(uint32_t));
+    vkStream->write((const VkValidationFeatureEnableEXT*)forMarshaling->pEnabledValidationFeatures, forMarshaling->enabledValidationFeatureCount * sizeof(const VkValidationFeatureEnableEXT));
+    vkStream->write((uint32_t*)&forMarshaling->disabledValidationFeatureCount, sizeof(uint32_t));
+    vkStream->write((const VkValidationFeatureDisableEXT*)forMarshaling->pDisabledValidationFeatures, forMarshaling->disabledValidationFeatureCount * sizeof(const VkValidationFeatureDisableEXT));
+}
+
+void unmarshal_VkValidationFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkValidationFeaturesEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((uint32_t*)&forUnmarshaling->enabledValidationFeatureCount, sizeof(uint32_t));
+    vkStream->read((VkValidationFeatureEnableEXT*)forUnmarshaling->pEnabledValidationFeatures, forUnmarshaling->enabledValidationFeatureCount * sizeof(const VkValidationFeatureEnableEXT));
+    vkStream->read((uint32_t*)&forUnmarshaling->disabledValidationFeatureCount, sizeof(uint32_t));
+    vkStream->read((VkValidationFeatureDisableEXT*)forUnmarshaling->pDisabledValidationFeatures, forUnmarshaling->disabledValidationFeatureCount * sizeof(const VkValidationFeatureDisableEXT));
+}
+
+#endif
+#ifdef VK_NV_cooperative_matrix
+void marshal_VkCooperativeMatrixPropertiesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCooperativeMatrixPropertiesNV* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((uint32_t*)&forMarshaling->MSize, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->NSize, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->KSize, sizeof(uint32_t));
+    vkStream->write((VkComponentTypeNV*)&forMarshaling->AType, sizeof(VkComponentTypeNV));
+    vkStream->write((VkComponentTypeNV*)&forMarshaling->BType, sizeof(VkComponentTypeNV));
+    vkStream->write((VkComponentTypeNV*)&forMarshaling->CType, sizeof(VkComponentTypeNV));
+    vkStream->write((VkComponentTypeNV*)&forMarshaling->DType, sizeof(VkComponentTypeNV));
+    vkStream->write((VkScopeNV*)&forMarshaling->scope, sizeof(VkScopeNV));
+}
+
+void unmarshal_VkCooperativeMatrixPropertiesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkCooperativeMatrixPropertiesNV* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((uint32_t*)&forUnmarshaling->MSize, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->NSize, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->KSize, sizeof(uint32_t));
+    vkStream->read((VkComponentTypeNV*)&forUnmarshaling->AType, sizeof(VkComponentTypeNV));
+    vkStream->read((VkComponentTypeNV*)&forUnmarshaling->BType, sizeof(VkComponentTypeNV));
+    vkStream->read((VkComponentTypeNV*)&forUnmarshaling->CType, sizeof(VkComponentTypeNV));
+    vkStream->read((VkComponentTypeNV*)&forUnmarshaling->DType, sizeof(VkComponentTypeNV));
+    vkStream->read((VkScopeNV*)&forUnmarshaling->scope, sizeof(VkScopeNV));
+}
+
+void marshal_VkPhysicalDeviceCooperativeMatrixFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCooperativeMatrixFeaturesNV* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->cooperativeMatrix, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->cooperativeMatrixRobustBufferAccess, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceCooperativeMatrixFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceCooperativeMatrixFeaturesNV* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->cooperativeMatrix, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->cooperativeMatrixRobustBufferAccess, sizeof(VkBool32));
+}
+
+void marshal_VkPhysicalDeviceCooperativeMatrixPropertiesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCooperativeMatrixPropertiesNV* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkShaderStageFlags*)&forMarshaling->cooperativeMatrixSupportedStages, sizeof(VkShaderStageFlags));
+}
+
+void unmarshal_VkPhysicalDeviceCooperativeMatrixPropertiesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceCooperativeMatrixPropertiesNV* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkShaderStageFlags*)&forUnmarshaling->cooperativeMatrixSupportedStages, sizeof(VkShaderStageFlags));
+}
+
+#endif
+#ifdef VK_NV_coverage_reduction_mode
+void marshal_VkPhysicalDeviceCoverageReductionModeFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCoverageReductionModeFeaturesNV* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->coverageReductionMode, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceCoverageReductionModeFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceCoverageReductionModeFeaturesNV* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->coverageReductionMode, sizeof(VkBool32));
+}
+
+void marshal_VkPipelineCoverageReductionStateCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineCoverageReductionStateCreateInfoNV* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkPipelineCoverageReductionStateCreateFlagsNV*)&forMarshaling->flags, sizeof(VkPipelineCoverageReductionStateCreateFlagsNV));
+    vkStream->write((VkCoverageReductionModeNV*)&forMarshaling->coverageReductionMode, sizeof(VkCoverageReductionModeNV));
+}
+
+void unmarshal_VkPipelineCoverageReductionStateCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPipelineCoverageReductionStateCreateInfoNV* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkPipelineCoverageReductionStateCreateFlagsNV*)&forUnmarshaling->flags, sizeof(VkPipelineCoverageReductionStateCreateFlagsNV));
+    vkStream->read((VkCoverageReductionModeNV*)&forUnmarshaling->coverageReductionMode, sizeof(VkCoverageReductionModeNV));
+}
+
+void marshal_VkFramebufferMixedSamplesCombinationNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkFramebufferMixedSamplesCombinationNV* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkCoverageReductionModeNV*)&forMarshaling->coverageReductionMode, sizeof(VkCoverageReductionModeNV));
+    vkStream->write((VkSampleCountFlagBits*)&forMarshaling->rasterizationSamples, sizeof(VkSampleCountFlagBits));
+    vkStream->write((VkSampleCountFlags*)&forMarshaling->depthStencilSamples, sizeof(VkSampleCountFlags));
+    vkStream->write((VkSampleCountFlags*)&forMarshaling->colorSamples, sizeof(VkSampleCountFlags));
+}
+
+void unmarshal_VkFramebufferMixedSamplesCombinationNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkFramebufferMixedSamplesCombinationNV* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkCoverageReductionModeNV*)&forUnmarshaling->coverageReductionMode, sizeof(VkCoverageReductionModeNV));
+    vkStream->read((VkSampleCountFlagBits*)&forUnmarshaling->rasterizationSamples, sizeof(VkSampleCountFlagBits));
+    vkStream->read((VkSampleCountFlags*)&forUnmarshaling->depthStencilSamples, sizeof(VkSampleCountFlags));
+    vkStream->read((VkSampleCountFlags*)&forUnmarshaling->colorSamples, sizeof(VkSampleCountFlags));
+}
+
+#endif
+#ifdef VK_EXT_fragment_shader_interlock
+void marshal_VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->fragmentShaderSampleInterlock, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->fragmentShaderPixelInterlock, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->fragmentShaderShadingRateInterlock, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->fragmentShaderSampleInterlock, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->fragmentShaderPixelInterlock, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->fragmentShaderShadingRateInterlock, sizeof(VkBool32));
+}
+
+#endif
+#ifdef VK_EXT_ycbcr_image_arrays
+void marshal_VkPhysicalDeviceYcbcrImageArraysFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceYcbcrImageArraysFeaturesEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->ycbcrImageArrays, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceYcbcrImageArraysFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceYcbcrImageArraysFeaturesEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->ycbcrImageArrays, sizeof(VkBool32));
+}
+
+#endif
+#ifdef VK_EXT_full_screen_exclusive
+void marshal_VkSurfaceFullScreenExclusiveInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSurfaceFullScreenExclusiveInfoEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkFullScreenExclusiveEXT*)&forMarshaling->fullScreenExclusive, sizeof(VkFullScreenExclusiveEXT));
+}
+
+void unmarshal_VkSurfaceFullScreenExclusiveInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkSurfaceFullScreenExclusiveInfoEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkFullScreenExclusiveEXT*)&forUnmarshaling->fullScreenExclusive, sizeof(VkFullScreenExclusiveEXT));
+}
+
+void marshal_VkSurfaceCapabilitiesFullScreenExclusiveEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSurfaceCapabilitiesFullScreenExclusiveEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->fullScreenExclusiveSupported, sizeof(VkBool32));
+}
+
+void unmarshal_VkSurfaceCapabilitiesFullScreenExclusiveEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkSurfaceCapabilitiesFullScreenExclusiveEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->fullScreenExclusiveSupported, sizeof(VkBool32));
+}
+
+void marshal_VkSurfaceFullScreenExclusiveWin32InfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSurfaceFullScreenExclusiveWin32InfoEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((HMONITOR*)&forMarshaling->hmonitor, sizeof(HMONITOR));
+}
+
+void unmarshal_VkSurfaceFullScreenExclusiveWin32InfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkSurfaceFullScreenExclusiveWin32InfoEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((HMONITOR*)&forUnmarshaling->hmonitor, sizeof(HMONITOR));
+}
+
+#endif
+#ifdef VK_EXT_headless_surface
+void marshal_VkHeadlessSurfaceCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkHeadlessSurfaceCreateInfoEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkHeadlessSurfaceCreateFlagsEXT*)&forMarshaling->flags, sizeof(VkHeadlessSurfaceCreateFlagsEXT));
+}
+
+void unmarshal_VkHeadlessSurfaceCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkHeadlessSurfaceCreateInfoEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkHeadlessSurfaceCreateFlagsEXT*)&forUnmarshaling->flags, sizeof(VkHeadlessSurfaceCreateFlagsEXT));
+}
+
+#endif
+#ifdef VK_EXT_line_rasterization
+void marshal_VkPhysicalDeviceLineRasterizationFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceLineRasterizationFeaturesEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->rectangularLines, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->bresenhamLines, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->smoothLines, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->stippledRectangularLines, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->stippledBresenhamLines, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->stippledSmoothLines, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceLineRasterizationFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceLineRasterizationFeaturesEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->rectangularLines, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->bresenhamLines, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->smoothLines, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->stippledRectangularLines, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->stippledBresenhamLines, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->stippledSmoothLines, sizeof(VkBool32));
+}
+
+void marshal_VkPhysicalDeviceLineRasterizationPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceLineRasterizationPropertiesEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((uint32_t*)&forMarshaling->lineSubPixelPrecisionBits, sizeof(uint32_t));
+}
+
+void unmarshal_VkPhysicalDeviceLineRasterizationPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceLineRasterizationPropertiesEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((uint32_t*)&forUnmarshaling->lineSubPixelPrecisionBits, sizeof(uint32_t));
+}
+
+void marshal_VkPipelineRasterizationLineStateCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineRasterizationLineStateCreateInfoEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkLineRasterizationModeEXT*)&forMarshaling->lineRasterizationMode, sizeof(VkLineRasterizationModeEXT));
+    vkStream->write((VkBool32*)&forMarshaling->stippledLineEnable, sizeof(VkBool32));
+    vkStream->write((uint32_t*)&forMarshaling->lineStippleFactor, sizeof(uint32_t));
+    vkStream->write((uint16_t*)&forMarshaling->lineStipplePattern, sizeof(uint16_t));
+}
+
+void unmarshal_VkPipelineRasterizationLineStateCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPipelineRasterizationLineStateCreateInfoEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkLineRasterizationModeEXT*)&forUnmarshaling->lineRasterizationMode, sizeof(VkLineRasterizationModeEXT));
+    vkStream->read((VkBool32*)&forUnmarshaling->stippledLineEnable, sizeof(VkBool32));
+    vkStream->read((uint32_t*)&forUnmarshaling->lineStippleFactor, sizeof(uint32_t));
+    vkStream->read((uint16_t*)&forUnmarshaling->lineStipplePattern, sizeof(uint16_t));
+}
+
+#endif
+#ifdef VK_EXT_shader_atomic_float
+void marshal_VkPhysicalDeviceShaderAtomicFloatFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderAtomicFloatFeaturesEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->shaderBufferFloat32Atomics, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderBufferFloat32AtomicAdd, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderBufferFloat64Atomics, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderBufferFloat64AtomicAdd, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderSharedFloat32Atomics, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderSharedFloat32AtomicAdd, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderSharedFloat64Atomics, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderSharedFloat64AtomicAdd, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderImageFloat32Atomics, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->shaderImageFloat32AtomicAdd, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->sparseImageFloat32Atomics, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->sparseImageFloat32AtomicAdd, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceShaderAtomicFloatFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceShaderAtomicFloatFeaturesEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderBufferFloat32Atomics, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderBufferFloat32AtomicAdd, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderBufferFloat64Atomics, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderBufferFloat64AtomicAdd, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderSharedFloat32Atomics, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderSharedFloat32AtomicAdd, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderSharedFloat64Atomics, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderSharedFloat64AtomicAdd, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderImageFloat32Atomics, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderImageFloat32AtomicAdd, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->sparseImageFloat32Atomics, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->sparseImageFloat32AtomicAdd, sizeof(VkBool32));
+}
+
+#endif
+#ifdef VK_EXT_host_query_reset
+#endif
+#ifdef VK_EXT_index_type_uint8
+void marshal_VkPhysicalDeviceIndexTypeUint8FeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceIndexTypeUint8FeaturesEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->indexTypeUint8, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceIndexTypeUint8FeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceIndexTypeUint8FeaturesEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->indexTypeUint8, sizeof(VkBool32));
+}
+
+#endif
+#ifdef VK_EXT_extended_dynamic_state
+void marshal_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceExtendedDynamicStateFeaturesEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->extendedDynamicState, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceExtendedDynamicStateFeaturesEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->extendedDynamicState, sizeof(VkBool32));
+}
+
+#endif
+#ifdef VK_EXT_shader_demote_to_helper_invocation
+void marshal_VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->shaderDemoteToHelperInvocation, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->shaderDemoteToHelperInvocation, sizeof(VkBool32));
+}
+
+#endif
+#ifdef VK_NV_device_generated_commands
+void marshal_VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((uint32_t*)&forMarshaling->maxGraphicsShaderGroupCount, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxIndirectSequenceCount, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxIndirectCommandsTokenCount, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxIndirectCommandsStreamCount, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxIndirectCommandsTokenOffset, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxIndirectCommandsStreamStride, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->minSequencesCountBufferOffsetAlignment, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->minSequencesIndexBufferOffsetAlignment, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->minIndirectCommandsBufferOffsetAlignment, sizeof(uint32_t));
+}
+
+void unmarshal_VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxGraphicsShaderGroupCount, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxIndirectSequenceCount, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxIndirectCommandsTokenCount, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxIndirectCommandsStreamCount, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxIndirectCommandsTokenOffset, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxIndirectCommandsStreamStride, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->minSequencesCountBufferOffsetAlignment, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->minSequencesIndexBufferOffsetAlignment, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->minIndirectCommandsBufferOffsetAlignment, sizeof(uint32_t));
+}
+
+void marshal_VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->deviceGeneratedCommands, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->deviceGeneratedCommands, sizeof(VkBool32));
+}
+
+void marshal_VkGraphicsShaderGroupCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkGraphicsShaderGroupCreateInfoNV* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((uint32_t*)&forMarshaling->stageCount, sizeof(uint32_t));
+    if (forMarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->stageCount; ++i)
+        {
+            marshal_VkPipelineShaderStageCreateInfo(vkStream, rootType, (const VkPipelineShaderStageCreateInfo*)(forMarshaling->pStages + i));
+        }
+    }
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pVertexInputState;
+    vkStream->putBe64(cgen_var_0);
+    if (forMarshaling->pVertexInputState)
+    {
+        marshal_VkPipelineVertexInputStateCreateInfo(vkStream, rootType, (const VkPipelineVertexInputStateCreateInfo*)(forMarshaling->pVertexInputState));
+    }
+    // WARNING PTR CHECK
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)forMarshaling->pTessellationState;
+    vkStream->putBe64(cgen_var_1);
+    if (forMarshaling->pTessellationState)
+    {
+        marshal_VkPipelineTessellationStateCreateInfo(vkStream, rootType, (const VkPipelineTessellationStateCreateInfo*)(forMarshaling->pTessellationState));
+    }
+}
+
+void unmarshal_VkGraphicsShaderGroupCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkGraphicsShaderGroupCreateInfoNV* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((uint32_t*)&forUnmarshaling->stageCount, sizeof(uint32_t));
+    if (forUnmarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->stageCount; ++i)
+        {
+            unmarshal_VkPipelineShaderStageCreateInfo(vkStream, rootType, (VkPipelineShaderStageCreateInfo*)(forUnmarshaling->pStages + i));
+        }
+    }
+    // WARNING PTR CHECK
+    const VkPipelineVertexInputStateCreateInfo* check_pVertexInputState;
+    check_pVertexInputState = (const VkPipelineVertexInputStateCreateInfo*)(uintptr_t)vkStream->getBe64();
+    if (forUnmarshaling->pVertexInputState)
+    {
+        if (!(check_pVertexInputState))
+        {
+            fprintf(stderr, "fatal: forUnmarshaling->pVertexInputState inconsistent between guest and host\n");
+        }
+        unmarshal_VkPipelineVertexInputStateCreateInfo(vkStream, rootType, (VkPipelineVertexInputStateCreateInfo*)(forUnmarshaling->pVertexInputState));
+    }
+    // WARNING PTR CHECK
+    const VkPipelineTessellationStateCreateInfo* check_pTessellationState;
+    check_pTessellationState = (const VkPipelineTessellationStateCreateInfo*)(uintptr_t)vkStream->getBe64();
+    if (forUnmarshaling->pTessellationState)
+    {
+        if (!(check_pTessellationState))
+        {
+            fprintf(stderr, "fatal: forUnmarshaling->pTessellationState inconsistent between guest and host\n");
+        }
+        unmarshal_VkPipelineTessellationStateCreateInfo(vkStream, rootType, (VkPipelineTessellationStateCreateInfo*)(forUnmarshaling->pTessellationState));
+    }
+}
+
+void marshal_VkGraphicsPipelineShaderGroupsCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkGraphicsPipelineShaderGroupsCreateInfoNV* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((uint32_t*)&forMarshaling->groupCount, sizeof(uint32_t));
+    if (forMarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->groupCount; ++i)
+        {
+            marshal_VkGraphicsShaderGroupCreateInfoNV(vkStream, rootType, (const VkGraphicsShaderGroupCreateInfoNV*)(forMarshaling->pGroups + i));
+        }
+    }
+    vkStream->write((uint32_t*)&forMarshaling->pipelineCount, sizeof(uint32_t));
+    if (forMarshaling->pipelineCount)
+    {
+        uint64_t* cgen_var_0;
+        vkStream->alloc((void**)&cgen_var_0, forMarshaling->pipelineCount * 8);
+        vkStream->handleMapping()->mapHandles_VkPipeline_u64(forMarshaling->pPipelines, cgen_var_0, forMarshaling->pipelineCount);
+        vkStream->write((uint64_t*)cgen_var_0, forMarshaling->pipelineCount * 8);
+    }
+}
+
+void unmarshal_VkGraphicsPipelineShaderGroupsCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkGraphicsPipelineShaderGroupsCreateInfoNV* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((uint32_t*)&forUnmarshaling->groupCount, sizeof(uint32_t));
+    if (forUnmarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->groupCount; ++i)
+        {
+            unmarshal_VkGraphicsShaderGroupCreateInfoNV(vkStream, rootType, (VkGraphicsShaderGroupCreateInfoNV*)(forUnmarshaling->pGroups + i));
+        }
+    }
+    vkStream->read((uint32_t*)&forUnmarshaling->pipelineCount, sizeof(uint32_t));
+    if (forUnmarshaling->pipelineCount)
+    {
+        uint64_t* cgen_var_0;
+        vkStream->alloc((void**)&cgen_var_0, forUnmarshaling->pipelineCount * 8);
+        vkStream->read((uint64_t*)cgen_var_0, forUnmarshaling->pipelineCount * 8);
+        vkStream->handleMapping()->mapHandles_u64_VkPipeline(cgen_var_0, (VkPipeline*)forUnmarshaling->pPipelines, forUnmarshaling->pipelineCount);
+    }
+}
+
+void marshal_VkBindShaderGroupIndirectCommandNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBindShaderGroupIndirectCommandNV* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((uint32_t*)&forMarshaling->groupIndex, sizeof(uint32_t));
+}
+
+void unmarshal_VkBindShaderGroupIndirectCommandNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkBindShaderGroupIndirectCommandNV* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((uint32_t*)&forUnmarshaling->groupIndex, sizeof(uint32_t));
+}
+
+void marshal_VkBindIndexBufferIndirectCommandNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBindIndexBufferIndirectCommandNV* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkDeviceAddress*)&forMarshaling->bufferAddress, sizeof(VkDeviceAddress));
+    vkStream->write((uint32_t*)&forMarshaling->size, sizeof(uint32_t));
+    vkStream->write((VkIndexType*)&forMarshaling->indexType, sizeof(VkIndexType));
+}
+
+void unmarshal_VkBindIndexBufferIndirectCommandNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkBindIndexBufferIndirectCommandNV* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkDeviceAddress*)&forUnmarshaling->bufferAddress, sizeof(VkDeviceAddress));
+    vkStream->read((uint32_t*)&forUnmarshaling->size, sizeof(uint32_t));
+    vkStream->read((VkIndexType*)&forUnmarshaling->indexType, sizeof(VkIndexType));
+}
+
+void marshal_VkBindVertexBufferIndirectCommandNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBindVertexBufferIndirectCommandNV* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkDeviceAddress*)&forMarshaling->bufferAddress, sizeof(VkDeviceAddress));
+    vkStream->write((uint32_t*)&forMarshaling->size, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->stride, sizeof(uint32_t));
+}
+
+void unmarshal_VkBindVertexBufferIndirectCommandNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkBindVertexBufferIndirectCommandNV* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkDeviceAddress*)&forUnmarshaling->bufferAddress, sizeof(VkDeviceAddress));
+    vkStream->read((uint32_t*)&forUnmarshaling->size, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->stride, sizeof(uint32_t));
+}
+
+void marshal_VkSetStateFlagsIndirectCommandNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSetStateFlagsIndirectCommandNV* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((uint32_t*)&forMarshaling->data, sizeof(uint32_t));
+}
+
+void unmarshal_VkSetStateFlagsIndirectCommandNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkSetStateFlagsIndirectCommandNV* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((uint32_t*)&forUnmarshaling->data, sizeof(uint32_t));
+}
+
+void marshal_VkIndirectCommandsStreamNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkIndirectCommandsStreamNV* forMarshaling)
+{
+    (void)rootType;
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkBuffer_u64(&forMarshaling->buffer, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->write((VkDeviceSize*)&forMarshaling->offset, sizeof(VkDeviceSize));
+}
+
+void unmarshal_VkIndirectCommandsStreamNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkIndirectCommandsStreamNV* forUnmarshaling)
+{
+    (void)rootType;
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkBuffer(&cgen_var_0, (VkBuffer*)&forUnmarshaling->buffer, 1);
+    vkStream->read((VkDeviceSize*)&forUnmarshaling->offset, sizeof(VkDeviceSize));
+}
+
+void marshal_VkIndirectCommandsLayoutTokenNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkIndirectCommandsLayoutTokenNV* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkIndirectCommandsTokenTypeNV*)&forMarshaling->tokenType, sizeof(VkIndirectCommandsTokenTypeNV));
+    vkStream->write((uint32_t*)&forMarshaling->stream, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->offset, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->vertexBindingUnit, sizeof(uint32_t));
+    vkStream->write((VkBool32*)&forMarshaling->vertexDynamicStride, sizeof(VkBool32));
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkPipelineLayout_u64(&forMarshaling->pushconstantPipelineLayout, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->write((VkShaderStageFlags*)&forMarshaling->pushconstantShaderStageFlags, sizeof(VkShaderStageFlags));
+    vkStream->write((uint32_t*)&forMarshaling->pushconstantOffset, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->pushconstantSize, sizeof(uint32_t));
+    vkStream->write((VkIndirectStateFlagsNV*)&forMarshaling->indirectStateFlags, sizeof(VkIndirectStateFlagsNV));
+    vkStream->write((uint32_t*)&forMarshaling->indexTypeCount, sizeof(uint32_t));
+    vkStream->write((const VkIndexType*)forMarshaling->pIndexTypes, forMarshaling->indexTypeCount * sizeof(const VkIndexType));
+    vkStream->write((const uint32_t*)forMarshaling->pIndexTypeValues, forMarshaling->indexTypeCount * sizeof(const uint32_t));
+}
+
+void unmarshal_VkIndirectCommandsLayoutTokenNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkIndirectCommandsLayoutTokenNV* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkIndirectCommandsTokenTypeNV*)&forUnmarshaling->tokenType, sizeof(VkIndirectCommandsTokenTypeNV));
+    vkStream->read((uint32_t*)&forUnmarshaling->stream, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->offset, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->vertexBindingUnit, sizeof(uint32_t));
+    vkStream->read((VkBool32*)&forUnmarshaling->vertexDynamicStride, sizeof(VkBool32));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkPipelineLayout(&cgen_var_0, (VkPipelineLayout*)&forUnmarshaling->pushconstantPipelineLayout, 1);
+    vkStream->read((VkShaderStageFlags*)&forUnmarshaling->pushconstantShaderStageFlags, sizeof(VkShaderStageFlags));
+    vkStream->read((uint32_t*)&forUnmarshaling->pushconstantOffset, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->pushconstantSize, sizeof(uint32_t));
+    vkStream->read((VkIndirectStateFlagsNV*)&forUnmarshaling->indirectStateFlags, sizeof(VkIndirectStateFlagsNV));
+    vkStream->read((uint32_t*)&forUnmarshaling->indexTypeCount, sizeof(uint32_t));
+    vkStream->read((VkIndexType*)forUnmarshaling->pIndexTypes, forUnmarshaling->indexTypeCount * sizeof(const VkIndexType));
+    vkStream->read((uint32_t*)forUnmarshaling->pIndexTypeValues, forUnmarshaling->indexTypeCount * sizeof(const uint32_t));
+}
+
+void marshal_VkIndirectCommandsLayoutCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkIndirectCommandsLayoutCreateInfoNV* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkIndirectCommandsLayoutUsageFlagsNV*)&forMarshaling->flags, sizeof(VkIndirectCommandsLayoutUsageFlagsNV));
+    vkStream->write((VkPipelineBindPoint*)&forMarshaling->pipelineBindPoint, sizeof(VkPipelineBindPoint));
+    vkStream->write((uint32_t*)&forMarshaling->tokenCount, sizeof(uint32_t));
+    if (forMarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->tokenCount; ++i)
+        {
+            marshal_VkIndirectCommandsLayoutTokenNV(vkStream, rootType, (const VkIndirectCommandsLayoutTokenNV*)(forMarshaling->pTokens + i));
+        }
+    }
+    vkStream->write((uint32_t*)&forMarshaling->streamCount, sizeof(uint32_t));
+    vkStream->write((const uint32_t*)forMarshaling->pStreamStrides, forMarshaling->streamCount * sizeof(const uint32_t));
+}
+
+void unmarshal_VkIndirectCommandsLayoutCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkIndirectCommandsLayoutCreateInfoNV* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkIndirectCommandsLayoutUsageFlagsNV*)&forUnmarshaling->flags, sizeof(VkIndirectCommandsLayoutUsageFlagsNV));
+    vkStream->read((VkPipelineBindPoint*)&forUnmarshaling->pipelineBindPoint, sizeof(VkPipelineBindPoint));
+    vkStream->read((uint32_t*)&forUnmarshaling->tokenCount, sizeof(uint32_t));
+    if (forUnmarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->tokenCount; ++i)
+        {
+            unmarshal_VkIndirectCommandsLayoutTokenNV(vkStream, rootType, (VkIndirectCommandsLayoutTokenNV*)(forUnmarshaling->pTokens + i));
+        }
+    }
+    vkStream->read((uint32_t*)&forUnmarshaling->streamCount, sizeof(uint32_t));
+    vkStream->read((uint32_t*)forUnmarshaling->pStreamStrides, forUnmarshaling->streamCount * sizeof(const uint32_t));
+}
+
+void marshal_VkGeneratedCommandsInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkGeneratedCommandsInfoNV* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkPipelineBindPoint*)&forMarshaling->pipelineBindPoint, sizeof(VkPipelineBindPoint));
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkPipeline_u64(&forMarshaling->pipeline, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
+    uint64_t cgen_var_1;
+    vkStream->handleMapping()->mapHandles_VkIndirectCommandsLayoutNV_u64(&forMarshaling->indirectCommandsLayout, &cgen_var_1, 1);
+    vkStream->write((uint64_t*)&cgen_var_1, 1 * 8);
+    vkStream->write((uint32_t*)&forMarshaling->streamCount, sizeof(uint32_t));
+    if (forMarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->streamCount; ++i)
+        {
+            marshal_VkIndirectCommandsStreamNV(vkStream, rootType, (const VkIndirectCommandsStreamNV*)(forMarshaling->pStreams + i));
+        }
+    }
+    vkStream->write((uint32_t*)&forMarshaling->sequencesCount, sizeof(uint32_t));
+    uint64_t cgen_var_2;
+    vkStream->handleMapping()->mapHandles_VkBuffer_u64(&forMarshaling->preprocessBuffer, &cgen_var_2, 1);
+    vkStream->write((uint64_t*)&cgen_var_2, 1 * 8);
+    vkStream->write((VkDeviceSize*)&forMarshaling->preprocessOffset, sizeof(VkDeviceSize));
+    vkStream->write((VkDeviceSize*)&forMarshaling->preprocessSize, sizeof(VkDeviceSize));
+    uint64_t cgen_var_3;
+    vkStream->handleMapping()->mapHandles_VkBuffer_u64(&forMarshaling->sequencesCountBuffer, &cgen_var_3, 1);
+    vkStream->write((uint64_t*)&cgen_var_3, 1 * 8);
+    vkStream->write((VkDeviceSize*)&forMarshaling->sequencesCountOffset, sizeof(VkDeviceSize));
+    uint64_t cgen_var_4;
+    vkStream->handleMapping()->mapHandles_VkBuffer_u64(&forMarshaling->sequencesIndexBuffer, &cgen_var_4, 1);
+    vkStream->write((uint64_t*)&cgen_var_4, 1 * 8);
+    vkStream->write((VkDeviceSize*)&forMarshaling->sequencesIndexOffset, sizeof(VkDeviceSize));
+}
+
+void unmarshal_VkGeneratedCommandsInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkGeneratedCommandsInfoNV* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkPipelineBindPoint*)&forUnmarshaling->pipelineBindPoint, sizeof(VkPipelineBindPoint));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkPipeline(&cgen_var_0, (VkPipeline*)&forUnmarshaling->pipeline, 1);
+    uint64_t cgen_var_1;
+    vkStream->read((uint64_t*)&cgen_var_1, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkIndirectCommandsLayoutNV(&cgen_var_1, (VkIndirectCommandsLayoutNV*)&forUnmarshaling->indirectCommandsLayout, 1);
+    vkStream->read((uint32_t*)&forUnmarshaling->streamCount, sizeof(uint32_t));
+    if (forUnmarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->streamCount; ++i)
+        {
+            unmarshal_VkIndirectCommandsStreamNV(vkStream, rootType, (VkIndirectCommandsStreamNV*)(forUnmarshaling->pStreams + i));
+        }
+    }
+    vkStream->read((uint32_t*)&forUnmarshaling->sequencesCount, sizeof(uint32_t));
+    uint64_t cgen_var_2;
+    vkStream->read((uint64_t*)&cgen_var_2, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkBuffer(&cgen_var_2, (VkBuffer*)&forUnmarshaling->preprocessBuffer, 1);
+    vkStream->read((VkDeviceSize*)&forUnmarshaling->preprocessOffset, sizeof(VkDeviceSize));
+    vkStream->read((VkDeviceSize*)&forUnmarshaling->preprocessSize, sizeof(VkDeviceSize));
+    uint64_t cgen_var_3;
+    vkStream->read((uint64_t*)&cgen_var_3, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkBuffer(&cgen_var_3, (VkBuffer*)&forUnmarshaling->sequencesCountBuffer, 1);
+    vkStream->read((VkDeviceSize*)&forUnmarshaling->sequencesCountOffset, sizeof(VkDeviceSize));
+    uint64_t cgen_var_4;
+    vkStream->read((uint64_t*)&cgen_var_4, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkBuffer(&cgen_var_4, (VkBuffer*)&forUnmarshaling->sequencesIndexBuffer, 1);
+    vkStream->read((VkDeviceSize*)&forUnmarshaling->sequencesIndexOffset, sizeof(VkDeviceSize));
+}
+
+void marshal_VkGeneratedCommandsMemoryRequirementsInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkGeneratedCommandsMemoryRequirementsInfoNV* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkPipelineBindPoint*)&forMarshaling->pipelineBindPoint, sizeof(VkPipelineBindPoint));
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkPipeline_u64(&forMarshaling->pipeline, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
+    uint64_t cgen_var_1;
+    vkStream->handleMapping()->mapHandles_VkIndirectCommandsLayoutNV_u64(&forMarshaling->indirectCommandsLayout, &cgen_var_1, 1);
+    vkStream->write((uint64_t*)&cgen_var_1, 1 * 8);
+    vkStream->write((uint32_t*)&forMarshaling->maxSequencesCount, sizeof(uint32_t));
+}
+
+void unmarshal_VkGeneratedCommandsMemoryRequirementsInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkGeneratedCommandsMemoryRequirementsInfoNV* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkPipelineBindPoint*)&forUnmarshaling->pipelineBindPoint, sizeof(VkPipelineBindPoint));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkPipeline(&cgen_var_0, (VkPipeline*)&forUnmarshaling->pipeline, 1);
+    uint64_t cgen_var_1;
+    vkStream->read((uint64_t*)&cgen_var_1, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkIndirectCommandsLayoutNV(&cgen_var_1, (VkIndirectCommandsLayoutNV*)&forUnmarshaling->indirectCommandsLayout, 1);
+    vkStream->read((uint32_t*)&forUnmarshaling->maxSequencesCount, sizeof(uint32_t));
+}
+
+#endif
+#ifdef VK_EXT_texel_buffer_alignment
+void marshal_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->texelBufferAlignment, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->texelBufferAlignment, sizeof(VkBool32));
+}
+
+void marshal_VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkDeviceSize*)&forMarshaling->storageTexelBufferOffsetAlignmentBytes, sizeof(VkDeviceSize));
+    vkStream->write((VkBool32*)&forMarshaling->storageTexelBufferOffsetSingleTexelAlignment, sizeof(VkBool32));
+    vkStream->write((VkDeviceSize*)&forMarshaling->uniformTexelBufferOffsetAlignmentBytes, sizeof(VkDeviceSize));
+    vkStream->write((VkBool32*)&forMarshaling->uniformTexelBufferOffsetSingleTexelAlignment, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkDeviceSize*)&forUnmarshaling->storageTexelBufferOffsetAlignmentBytes, sizeof(VkDeviceSize));
+    vkStream->read((VkBool32*)&forUnmarshaling->storageTexelBufferOffsetSingleTexelAlignment, sizeof(VkBool32));
+    vkStream->read((VkDeviceSize*)&forUnmarshaling->uniformTexelBufferOffsetAlignmentBytes, sizeof(VkDeviceSize));
+    vkStream->read((VkBool32*)&forUnmarshaling->uniformTexelBufferOffsetSingleTexelAlignment, sizeof(VkBool32));
+}
+
+#endif
+#ifdef VK_QCOM_render_pass_transform
+void marshal_VkRenderPassTransformBeginInfoQCOM(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRenderPassTransformBeginInfoQCOM* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkSurfaceTransformFlagBitsKHR*)&forMarshaling->transform, sizeof(VkSurfaceTransformFlagBitsKHR));
+}
+
+void unmarshal_VkRenderPassTransformBeginInfoQCOM(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkRenderPassTransformBeginInfoQCOM* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkSurfaceTransformFlagBitsKHR*)&forUnmarshaling->transform, sizeof(VkSurfaceTransformFlagBitsKHR));
+}
+
+void marshal_VkCommandBufferInheritanceRenderPassTransformInfoQCOM(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCommandBufferInheritanceRenderPassTransformInfoQCOM* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkSurfaceTransformFlagBitsKHR*)&forMarshaling->transform, sizeof(VkSurfaceTransformFlagBitsKHR));
+    marshal_VkRect2D(vkStream, rootType, (VkRect2D*)(&forMarshaling->renderArea));
+}
+
+void unmarshal_VkCommandBufferInheritanceRenderPassTransformInfoQCOM(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkCommandBufferInheritanceRenderPassTransformInfoQCOM* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkSurfaceTransformFlagBitsKHR*)&forUnmarshaling->transform, sizeof(VkSurfaceTransformFlagBitsKHR));
+    unmarshal_VkRect2D(vkStream, rootType, (VkRect2D*)(&forUnmarshaling->renderArea));
+}
+
+#endif
+#ifdef VK_EXT_device_memory_report
+void marshal_VkPhysicalDeviceDeviceMemoryReportFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDeviceMemoryReportFeaturesEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->deviceMemoryReport, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceDeviceMemoryReportFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceDeviceMemoryReportFeaturesEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->deviceMemoryReport, sizeof(VkBool32));
+}
+
+void marshal_VkDeviceMemoryReportCallbackDataEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceMemoryReportCallbackDataEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkDeviceMemoryReportFlagsEXT*)&forMarshaling->flags, sizeof(VkDeviceMemoryReportFlagsEXT));
+    vkStream->write((VkDeviceMemoryReportEventTypeEXT*)&forMarshaling->type, sizeof(VkDeviceMemoryReportEventTypeEXT));
+    vkStream->write((uint64_t*)&forMarshaling->memoryObjectId, sizeof(uint64_t));
+    vkStream->write((VkDeviceSize*)&forMarshaling->size, sizeof(VkDeviceSize));
+    vkStream->write((VkObjectType*)&forMarshaling->objectType, sizeof(VkObjectType));
+    vkStream->write((uint64_t*)&forMarshaling->objectHandle, sizeof(uint64_t));
+    vkStream->write((uint32_t*)&forMarshaling->heapIndex, sizeof(uint32_t));
+}
+
+void unmarshal_VkDeviceMemoryReportCallbackDataEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDeviceMemoryReportCallbackDataEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkDeviceMemoryReportFlagsEXT*)&forUnmarshaling->flags, sizeof(VkDeviceMemoryReportFlagsEXT));
+    vkStream->read((VkDeviceMemoryReportEventTypeEXT*)&forUnmarshaling->type, sizeof(VkDeviceMemoryReportEventTypeEXT));
+    vkStream->read((uint64_t*)&forUnmarshaling->memoryObjectId, sizeof(uint64_t));
+    vkStream->read((VkDeviceSize*)&forUnmarshaling->size, sizeof(VkDeviceSize));
+    vkStream->read((VkObjectType*)&forUnmarshaling->objectType, sizeof(VkObjectType));
+    vkStream->read((uint64_t*)&forUnmarshaling->objectHandle, sizeof(uint64_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->heapIndex, sizeof(uint32_t));
+}
+
+void marshal_VkDeviceDeviceMemoryReportCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceDeviceMemoryReportCreateInfoEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkDeviceMemoryReportFlagsEXT*)&forMarshaling->flags, sizeof(VkDeviceMemoryReportFlagsEXT));
+    uint64_t cgen_var_0 = (uint64_t)forMarshaling->pfnUserCallback;
+    vkStream->putBe64(cgen_var_0);
+    vkStream->write((void*)forMarshaling->pUserData, sizeof(uint8_t));
+}
+
+void unmarshal_VkDeviceDeviceMemoryReportCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDeviceDeviceMemoryReportCreateInfoEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkDeviceMemoryReportFlagsEXT*)&forUnmarshaling->flags, sizeof(VkDeviceMemoryReportFlagsEXT));
+    forUnmarshaling->pfnUserCallback = (PFN_vkDeviceMemoryReportCallbackEXT)vkStream->getBe64();
+    vkStream->read((void*)forUnmarshaling->pUserData, sizeof(uint8_t));
+}
+
+#endif
+#ifdef VK_EXT_robustness2
+void marshal_VkPhysicalDeviceRobustness2FeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRobustness2FeaturesEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->robustBufferAccess2, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->robustImageAccess2, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->nullDescriptor, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceRobustness2FeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceRobustness2FeaturesEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->robustBufferAccess2, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->robustImageAccess2, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->nullDescriptor, sizeof(VkBool32));
+}
+
+void marshal_VkPhysicalDeviceRobustness2PropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRobustness2PropertiesEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkDeviceSize*)&forMarshaling->robustStorageBufferAccessSizeAlignment, sizeof(VkDeviceSize));
+    vkStream->write((VkDeviceSize*)&forMarshaling->robustUniformBufferAccessSizeAlignment, sizeof(VkDeviceSize));
+}
+
+void unmarshal_VkPhysicalDeviceRobustness2PropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceRobustness2PropertiesEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkDeviceSize*)&forUnmarshaling->robustStorageBufferAccessSizeAlignment, sizeof(VkDeviceSize));
+    vkStream->read((VkDeviceSize*)&forUnmarshaling->robustUniformBufferAccessSizeAlignment, sizeof(VkDeviceSize));
+}
+
+#endif
+#ifdef VK_EXT_custom_border_color
+void marshal_VkSamplerCustomBorderColorCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSamplerCustomBorderColorCreateInfoEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    marshal_VkClearColorValue(vkStream, rootType, (VkClearColorValue*)(&forMarshaling->customBorderColor));
+    vkStream->write((VkFormat*)&forMarshaling->format, sizeof(VkFormat));
+}
+
+void unmarshal_VkSamplerCustomBorderColorCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkSamplerCustomBorderColorCreateInfoEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    unmarshal_VkClearColorValue(vkStream, rootType, (VkClearColorValue*)(&forUnmarshaling->customBorderColor));
+    vkStream->read((VkFormat*)&forUnmarshaling->format, sizeof(VkFormat));
+}
+
+void marshal_VkPhysicalDeviceCustomBorderColorPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCustomBorderColorPropertiesEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((uint32_t*)&forMarshaling->maxCustomBorderColorSamplers, sizeof(uint32_t));
+}
+
+void unmarshal_VkPhysicalDeviceCustomBorderColorPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceCustomBorderColorPropertiesEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxCustomBorderColorSamplers, sizeof(uint32_t));
+}
+
+void marshal_VkPhysicalDeviceCustomBorderColorFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCustomBorderColorFeaturesEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->customBorderColors, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->customBorderColorWithoutFormat, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceCustomBorderColorFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceCustomBorderColorFeaturesEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->customBorderColors, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->customBorderColorWithoutFormat, sizeof(VkBool32));
+}
+
+#endif
+#ifdef VK_GOOGLE_user_type
+#endif
+#ifdef VK_EXT_private_data
+void marshal_VkPhysicalDevicePrivateDataFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDevicePrivateDataFeaturesEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->privateData, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDevicePrivateDataFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDevicePrivateDataFeaturesEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->privateData, sizeof(VkBool32));
+}
+
+void marshal_VkDevicePrivateDataCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDevicePrivateDataCreateInfoEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((uint32_t*)&forMarshaling->privateDataSlotRequestCount, sizeof(uint32_t));
+}
+
+void unmarshal_VkDevicePrivateDataCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDevicePrivateDataCreateInfoEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((uint32_t*)&forUnmarshaling->privateDataSlotRequestCount, sizeof(uint32_t));
+}
+
+void marshal_VkPrivateDataSlotCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPrivateDataSlotCreateInfoEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkPrivateDataSlotCreateFlagsEXT*)&forMarshaling->flags, sizeof(VkPrivateDataSlotCreateFlagsEXT));
+}
+
+void unmarshal_VkPrivateDataSlotCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPrivateDataSlotCreateInfoEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkPrivateDataSlotCreateFlagsEXT*)&forUnmarshaling->flags, sizeof(VkPrivateDataSlotCreateFlagsEXT));
+}
+
+#endif
+#ifdef VK_EXT_pipeline_creation_cache_control
+void marshal_VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->pipelineCreationCacheControl, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->pipelineCreationCacheControl, sizeof(VkBool32));
+}
+
+#endif
+#ifdef VK_NV_device_diagnostics_config
+void marshal_VkPhysicalDeviceDiagnosticsConfigFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDiagnosticsConfigFeaturesNV* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->diagnosticsConfig, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceDiagnosticsConfigFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceDiagnosticsConfigFeaturesNV* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->diagnosticsConfig, sizeof(VkBool32));
+}
+
+void marshal_VkDeviceDiagnosticsConfigCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceDiagnosticsConfigCreateInfoNV* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkDeviceDiagnosticsConfigFlagsNV*)&forMarshaling->flags, sizeof(VkDeviceDiagnosticsConfigFlagsNV));
+}
+
+void unmarshal_VkDeviceDiagnosticsConfigCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDeviceDiagnosticsConfigCreateInfoNV* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkDeviceDiagnosticsConfigFlagsNV*)&forUnmarshaling->flags, sizeof(VkDeviceDiagnosticsConfigFlagsNV));
+}
+
+#endif
+#ifdef VK_QCOM_render_pass_store_ops
+#endif
+#ifdef VK_NV_fragment_shading_rate_enums
+void marshal_VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->fragmentShadingRateEnums, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->supersampleFragmentShadingRates, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->noInvocationFragmentShadingRates, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->fragmentShadingRateEnums, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->supersampleFragmentShadingRates, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->noInvocationFragmentShadingRates, sizeof(VkBool32));
+}
+
+void marshal_VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkSampleCountFlagBits*)&forMarshaling->maxFragmentShadingRateInvocationCount, sizeof(VkSampleCountFlagBits));
+}
+
+void unmarshal_VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkSampleCountFlagBits*)&forUnmarshaling->maxFragmentShadingRateInvocationCount, sizeof(VkSampleCountFlagBits));
+}
+
+void marshal_VkPipelineFragmentShadingRateEnumStateCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineFragmentShadingRateEnumStateCreateInfoNV* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkFragmentShadingRateTypeNV*)&forMarshaling->shadingRateType, sizeof(VkFragmentShadingRateTypeNV));
+    vkStream->write((VkFragmentShadingRateNV*)&forMarshaling->shadingRate, sizeof(VkFragmentShadingRateNV));
+    vkStream->write((VkFragmentShadingRateCombinerOpKHR*)forMarshaling->combinerOps, 2 * sizeof(VkFragmentShadingRateCombinerOpKHR));
+}
+
+void unmarshal_VkPipelineFragmentShadingRateEnumStateCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPipelineFragmentShadingRateEnumStateCreateInfoNV* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkFragmentShadingRateTypeNV*)&forUnmarshaling->shadingRateType, sizeof(VkFragmentShadingRateTypeNV));
+    vkStream->read((VkFragmentShadingRateNV*)&forUnmarshaling->shadingRate, sizeof(VkFragmentShadingRateNV));
+    vkStream->read((VkFragmentShadingRateCombinerOpKHR*)forUnmarshaling->combinerOps, 2 * sizeof(VkFragmentShadingRateCombinerOpKHR));
+}
+
+#endif
+#ifdef VK_EXT_fragment_density_map2
+void marshal_VkPhysicalDeviceFragmentDensityMap2FeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentDensityMap2FeaturesEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->fragmentDensityMapDeferred, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceFragmentDensityMap2FeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceFragmentDensityMap2FeaturesEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->fragmentDensityMapDeferred, sizeof(VkBool32));
+}
+
+void marshal_VkPhysicalDeviceFragmentDensityMap2PropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentDensityMap2PropertiesEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->subsampledLoads, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->subsampledCoarseReconstructionEarlyAccess, sizeof(VkBool32));
+    vkStream->write((uint32_t*)&forMarshaling->maxSubsampledArrayLayers, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxDescriptorSetSubsampledSamplers, sizeof(uint32_t));
+}
+
+void unmarshal_VkPhysicalDeviceFragmentDensityMap2PropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceFragmentDensityMap2PropertiesEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->subsampledLoads, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->subsampledCoarseReconstructionEarlyAccess, sizeof(VkBool32));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxSubsampledArrayLayers, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxDescriptorSetSubsampledSamplers, sizeof(uint32_t));
+}
+
+#endif
+#ifdef VK_QCOM_rotated_copy_commands
+void marshal_VkCopyCommandTransformInfoQCOM(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCopyCommandTransformInfoQCOM* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkSurfaceTransformFlagBitsKHR*)&forMarshaling->transform, sizeof(VkSurfaceTransformFlagBitsKHR));
+}
+
+void unmarshal_VkCopyCommandTransformInfoQCOM(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkCopyCommandTransformInfoQCOM* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkSurfaceTransformFlagBitsKHR*)&forUnmarshaling->transform, sizeof(VkSurfaceTransformFlagBitsKHR));
+}
+
+#endif
+#ifdef VK_EXT_image_robustness
+void marshal_VkPhysicalDeviceImageRobustnessFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceImageRobustnessFeaturesEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->robustImageAccess, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceImageRobustnessFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceImageRobustnessFeaturesEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->robustImageAccess, sizeof(VkBool32));
+}
+
+#endif
+#ifdef VK_EXT_4444_formats
+void marshal_VkPhysicalDevice4444FormatsFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDevice4444FormatsFeaturesEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->formatA4R4G4B4, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->formatA4B4G4R4, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDevice4444FormatsFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDevice4444FormatsFeaturesEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->formatA4R4G4B4, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->formatA4B4G4R4, sizeof(VkBool32));
+}
+
+#endif
+#ifdef VK_EXT_directfb_surface
+void marshal_VkDirectFBSurfaceCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDirectFBSurfaceCreateInfoEXT* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkDirectFBSurfaceCreateFlagsEXT*)&forMarshaling->flags, sizeof(VkDirectFBSurfaceCreateFlagsEXT));
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->dfb;
+    vkStream->putBe64(cgen_var_0);
+    if (forMarshaling->dfb)
+    {
+        vkStream->write((IDirectFB*)forMarshaling->dfb, sizeof(IDirectFB));
+    }
+    // WARNING PTR CHECK
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)forMarshaling->surface;
+    vkStream->putBe64(cgen_var_1);
+    if (forMarshaling->surface)
+    {
+        vkStream->write((IDirectFBSurface*)forMarshaling->surface, sizeof(IDirectFBSurface));
+    }
+}
+
+void unmarshal_VkDirectFBSurfaceCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDirectFBSurfaceCreateInfoEXT* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkDirectFBSurfaceCreateFlagsEXT*)&forUnmarshaling->flags, sizeof(VkDirectFBSurfaceCreateFlagsEXT));
+    // WARNING PTR CHECK
+    IDirectFB* check_dfb;
+    check_dfb = (IDirectFB*)(uintptr_t)vkStream->getBe64();
+    if (forUnmarshaling->dfb)
+    {
+        if (!(check_dfb))
+        {
+            fprintf(stderr, "fatal: forUnmarshaling->dfb inconsistent between guest and host\n");
+        }
+        vkStream->read((IDirectFB*)forUnmarshaling->dfb, sizeof(IDirectFB));
+    }
+    // WARNING PTR CHECK
+    IDirectFBSurface* check_surface;
+    check_surface = (IDirectFBSurface*)(uintptr_t)vkStream->getBe64();
+    if (forUnmarshaling->surface)
+    {
+        if (!(check_surface))
+        {
+            fprintf(stderr, "fatal: forUnmarshaling->surface inconsistent between guest and host\n");
+        }
+        vkStream->read((IDirectFBSurface*)forUnmarshaling->surface, sizeof(IDirectFBSurface));
+    }
+}
+
+#endif
+#ifdef VK_GOOGLE_gfxstream
 void marshal_VkImportColorBufferGOOGLE(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkImportColorBufferGOOGLE* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((uint32_t*)&forMarshaling->colorBuffer, sizeof(uint32_t));
 }
 
 void unmarshal_VkImportColorBufferGOOGLE(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkImportColorBufferGOOGLE* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    forUnmarshaling->sType = VK_STRUCTURE_TYPE_IMPORT_COLOR_BUFFER_GOOGLE;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((uint32_t*)&forUnmarshaling->colorBuffer, sizeof(uint32_t));
 }
 
+void marshal_VkImportBufferGOOGLE(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImportBufferGOOGLE* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((uint32_t*)&forMarshaling->buffer, sizeof(uint32_t));
+}
+
+void unmarshal_VkImportBufferGOOGLE(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkImportBufferGOOGLE* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    forUnmarshaling->sType = VK_STRUCTURE_TYPE_IMPORT_BUFFER_GOOGLE;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((uint32_t*)&forUnmarshaling->buffer, sizeof(uint32_t));
+}
+
 void marshal_VkImportPhysicalAddressGOOGLE(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkImportPhysicalAddressGOOGLE* forMarshaling)
 {
+    (void)rootType;
     vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size = goldfish_vk_extension_struct_size(forMarshaling->pNext);
-    vkStream->putBe32(pNext_size);
-    if (pNext_size)
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        vkStream->write((void*)forMarshaling->pNext, sizeof(VkStructureType));
-        marshal_extension_struct(vkStream, forMarshaling->pNext);
+        rootType = forMarshaling->sType;
     }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
     vkStream->write((uint64_t*)&forMarshaling->physicalAddress, sizeof(uint64_t));
     vkStream->write((VkDeviceSize*)&forMarshaling->size, sizeof(VkDeviceSize));
     vkStream->write((VkFormat*)&forMarshaling->format, sizeof(VkFormat));
@@ -13588,17 +22077,17 @@
 
 void unmarshal_VkImportPhysicalAddressGOOGLE(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkImportPhysicalAddressGOOGLE* forUnmarshaling)
 {
+    (void)rootType;
     vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
-    size_t pNext_size;
-    pNext_size = vkStream->getBe32();
-    if (pNext_size)
+    forUnmarshaling->sType = VK_STRUCTURE_TYPE_IMPORT_PHYSICAL_ADDRESS_GOOGLE;
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
     {
-        uint64_t pNext_placeholder;
-        vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
-        unmarshal_extension_struct(vkStream, (void*)(forUnmarshaling->pNext));
+        rootType = forUnmarshaling->sType;
     }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
     vkStream->read((uint64_t*)&forUnmarshaling->physicalAddress, sizeof(uint64_t));
     vkStream->read((VkDeviceSize*)&forUnmarshaling->size, sizeof(VkDeviceSize));
     vkStream->read((VkFormat*)&forUnmarshaling->format, sizeof(VkFormat));
@@ -13607,20 +22096,1112 @@
 }
 
 #endif
-#ifdef VK_GOOGLE_sized_descriptor_update_template
+#ifdef VK_KHR_acceleration_structure
+void marshal_VkDeviceOrHostAddressKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceOrHostAddressKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkDeviceAddress*)&forMarshaling->deviceAddress, sizeof(VkDeviceAddress));
+}
+
+void unmarshal_VkDeviceOrHostAddressKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDeviceOrHostAddressKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkDeviceAddress*)&forUnmarshaling->deviceAddress, sizeof(VkDeviceAddress));
+}
+
+void marshal_VkDeviceOrHostAddressConstKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceOrHostAddressConstKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkDeviceAddress*)&forMarshaling->deviceAddress, sizeof(VkDeviceAddress));
+}
+
+void unmarshal_VkDeviceOrHostAddressConstKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDeviceOrHostAddressConstKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkDeviceAddress*)&forUnmarshaling->deviceAddress, sizeof(VkDeviceAddress));
+}
+
+void marshal_VkAccelerationStructureBuildRangeInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureBuildRangeInfoKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((uint32_t*)&forMarshaling->primitiveCount, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->primitiveOffset, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->firstVertex, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->transformOffset, sizeof(uint32_t));
+}
+
+void unmarshal_VkAccelerationStructureBuildRangeInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkAccelerationStructureBuildRangeInfoKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((uint32_t*)&forUnmarshaling->primitiveCount, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->primitiveOffset, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->firstVertex, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->transformOffset, sizeof(uint32_t));
+}
+
+void marshal_VkAccelerationStructureGeometryTrianglesDataKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureGeometryTrianglesDataKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkFormat*)&forMarshaling->vertexFormat, sizeof(VkFormat));
+    marshal_VkDeviceOrHostAddressConstKHR(vkStream, rootType, (VkDeviceOrHostAddressConstKHR*)(&forMarshaling->vertexData));
+    vkStream->write((VkDeviceSize*)&forMarshaling->vertexStride, sizeof(VkDeviceSize));
+    vkStream->write((uint32_t*)&forMarshaling->maxVertex, sizeof(uint32_t));
+    vkStream->write((VkIndexType*)&forMarshaling->indexType, sizeof(VkIndexType));
+    marshal_VkDeviceOrHostAddressConstKHR(vkStream, rootType, (VkDeviceOrHostAddressConstKHR*)(&forMarshaling->indexData));
+    marshal_VkDeviceOrHostAddressConstKHR(vkStream, rootType, (VkDeviceOrHostAddressConstKHR*)(&forMarshaling->transformData));
+}
+
+void unmarshal_VkAccelerationStructureGeometryTrianglesDataKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkAccelerationStructureGeometryTrianglesDataKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkFormat*)&forUnmarshaling->vertexFormat, sizeof(VkFormat));
+    unmarshal_VkDeviceOrHostAddressConstKHR(vkStream, rootType, (VkDeviceOrHostAddressConstKHR*)(&forUnmarshaling->vertexData));
+    vkStream->read((VkDeviceSize*)&forUnmarshaling->vertexStride, sizeof(VkDeviceSize));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxVertex, sizeof(uint32_t));
+    vkStream->read((VkIndexType*)&forUnmarshaling->indexType, sizeof(VkIndexType));
+    unmarshal_VkDeviceOrHostAddressConstKHR(vkStream, rootType, (VkDeviceOrHostAddressConstKHR*)(&forUnmarshaling->indexData));
+    unmarshal_VkDeviceOrHostAddressConstKHR(vkStream, rootType, (VkDeviceOrHostAddressConstKHR*)(&forUnmarshaling->transformData));
+}
+
+void marshal_VkAccelerationStructureGeometryAabbsDataKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureGeometryAabbsDataKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    marshal_VkDeviceOrHostAddressConstKHR(vkStream, rootType, (VkDeviceOrHostAddressConstKHR*)(&forMarshaling->data));
+    vkStream->write((VkDeviceSize*)&forMarshaling->stride, sizeof(VkDeviceSize));
+}
+
+void unmarshal_VkAccelerationStructureGeometryAabbsDataKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkAccelerationStructureGeometryAabbsDataKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    unmarshal_VkDeviceOrHostAddressConstKHR(vkStream, rootType, (VkDeviceOrHostAddressConstKHR*)(&forUnmarshaling->data));
+    vkStream->read((VkDeviceSize*)&forUnmarshaling->stride, sizeof(VkDeviceSize));
+}
+
+void marshal_VkAccelerationStructureGeometryInstancesDataKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureGeometryInstancesDataKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->arrayOfPointers, sizeof(VkBool32));
+    marshal_VkDeviceOrHostAddressConstKHR(vkStream, rootType, (VkDeviceOrHostAddressConstKHR*)(&forMarshaling->data));
+}
+
+void unmarshal_VkAccelerationStructureGeometryInstancesDataKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkAccelerationStructureGeometryInstancesDataKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->arrayOfPointers, sizeof(VkBool32));
+    unmarshal_VkDeviceOrHostAddressConstKHR(vkStream, rootType, (VkDeviceOrHostAddressConstKHR*)(&forUnmarshaling->data));
+}
+
+void marshal_VkAccelerationStructureGeometryDataKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureGeometryDataKHR* forMarshaling)
+{
+    (void)rootType;
+    marshal_VkAccelerationStructureGeometryTrianglesDataKHR(vkStream, rootType, (VkAccelerationStructureGeometryTrianglesDataKHR*)(&forMarshaling->triangles));
+}
+
+void unmarshal_VkAccelerationStructureGeometryDataKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkAccelerationStructureGeometryDataKHR* forUnmarshaling)
+{
+    (void)rootType;
+    unmarshal_VkAccelerationStructureGeometryTrianglesDataKHR(vkStream, rootType, (VkAccelerationStructureGeometryTrianglesDataKHR*)(&forUnmarshaling->triangles));
+}
+
+void marshal_VkAccelerationStructureGeometryKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureGeometryKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkGeometryTypeKHR*)&forMarshaling->geometryType, sizeof(VkGeometryTypeKHR));
+    marshal_VkAccelerationStructureGeometryDataKHR(vkStream, rootType, (VkAccelerationStructureGeometryDataKHR*)(&forMarshaling->geometry));
+    vkStream->write((VkGeometryFlagsKHR*)&forMarshaling->flags, sizeof(VkGeometryFlagsKHR));
+}
+
+void unmarshal_VkAccelerationStructureGeometryKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkAccelerationStructureGeometryKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkGeometryTypeKHR*)&forUnmarshaling->geometryType, sizeof(VkGeometryTypeKHR));
+    unmarshal_VkAccelerationStructureGeometryDataKHR(vkStream, rootType, (VkAccelerationStructureGeometryDataKHR*)(&forUnmarshaling->geometry));
+    vkStream->read((VkGeometryFlagsKHR*)&forUnmarshaling->flags, sizeof(VkGeometryFlagsKHR));
+}
+
+void marshal_VkAccelerationStructureBuildGeometryInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureBuildGeometryInfoKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkAccelerationStructureTypeKHR*)&forMarshaling->type, sizeof(VkAccelerationStructureTypeKHR));
+    vkStream->write((VkBuildAccelerationStructureFlagsKHR*)&forMarshaling->flags, sizeof(VkBuildAccelerationStructureFlagsKHR));
+    vkStream->write((VkBuildAccelerationStructureModeKHR*)&forMarshaling->mode, sizeof(VkBuildAccelerationStructureModeKHR));
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkAccelerationStructureKHR_u64(&forMarshaling->srcAccelerationStructure, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
+    uint64_t cgen_var_1;
+    vkStream->handleMapping()->mapHandles_VkAccelerationStructureKHR_u64(&forMarshaling->dstAccelerationStructure, &cgen_var_1, 1);
+    vkStream->write((uint64_t*)&cgen_var_1, 1 * 8);
+    vkStream->write((uint32_t*)&forMarshaling->geometryCount, sizeof(uint32_t));
+    // WARNING PTR CHECK
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)forMarshaling->pGeometries;
+    vkStream->putBe64(cgen_var_2);
+    if (forMarshaling->pGeometries)
+    {
+        if (forMarshaling)
+        {
+            for (uint32_t i = 0; i < (uint32_t)forMarshaling->geometryCount; ++i)
+            {
+                marshal_VkAccelerationStructureGeometryKHR(vkStream, rootType, (const VkAccelerationStructureGeometryKHR*)(forMarshaling->pGeometries + i));
+            }
+        }
+    }
+    marshal_VkDeviceOrHostAddressKHR(vkStream, rootType, (VkDeviceOrHostAddressKHR*)(&forMarshaling->scratchData));
+}
+
+void unmarshal_VkAccelerationStructureBuildGeometryInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkAccelerationStructureBuildGeometryInfoKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkAccelerationStructureTypeKHR*)&forUnmarshaling->type, sizeof(VkAccelerationStructureTypeKHR));
+    vkStream->read((VkBuildAccelerationStructureFlagsKHR*)&forUnmarshaling->flags, sizeof(VkBuildAccelerationStructureFlagsKHR));
+    vkStream->read((VkBuildAccelerationStructureModeKHR*)&forUnmarshaling->mode, sizeof(VkBuildAccelerationStructureModeKHR));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkAccelerationStructureKHR(&cgen_var_0, (VkAccelerationStructureKHR*)&forUnmarshaling->srcAccelerationStructure, 1);
+    uint64_t cgen_var_1;
+    vkStream->read((uint64_t*)&cgen_var_1, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkAccelerationStructureKHR(&cgen_var_1, (VkAccelerationStructureKHR*)&forUnmarshaling->dstAccelerationStructure, 1);
+    vkStream->read((uint32_t*)&forUnmarshaling->geometryCount, sizeof(uint32_t));
+    // WARNING PTR CHECK
+    const VkAccelerationStructureGeometryKHR* check_pGeometries;
+    check_pGeometries = (const VkAccelerationStructureGeometryKHR*)(uintptr_t)vkStream->getBe64();
+    if (forUnmarshaling->pGeometries)
+    {
+        if (!(check_pGeometries))
+        {
+            fprintf(stderr, "fatal: forUnmarshaling->pGeometries inconsistent between guest and host\n");
+        }
+        if (forUnmarshaling)
+        {
+            for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->geometryCount; ++i)
+            {
+                unmarshal_VkAccelerationStructureGeometryKHR(vkStream, rootType, (VkAccelerationStructureGeometryKHR*)(forUnmarshaling->pGeometries + i));
+            }
+        }
+    }
+    unmarshal_VkDeviceOrHostAddressKHR(vkStream, rootType, (VkDeviceOrHostAddressKHR*)(&forUnmarshaling->scratchData));
+}
+
+void marshal_VkAccelerationStructureCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureCreateInfoKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkAccelerationStructureCreateFlagsKHR*)&forMarshaling->createFlags, sizeof(VkAccelerationStructureCreateFlagsKHR));
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkBuffer_u64(&forMarshaling->buffer, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->write((VkDeviceSize*)&forMarshaling->offset, sizeof(VkDeviceSize));
+    vkStream->write((VkDeviceSize*)&forMarshaling->size, sizeof(VkDeviceSize));
+    vkStream->write((VkAccelerationStructureTypeKHR*)&forMarshaling->type, sizeof(VkAccelerationStructureTypeKHR));
+    vkStream->write((VkDeviceAddress*)&forMarshaling->deviceAddress, sizeof(VkDeviceAddress));
+}
+
+void unmarshal_VkAccelerationStructureCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkAccelerationStructureCreateInfoKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkAccelerationStructureCreateFlagsKHR*)&forUnmarshaling->createFlags, sizeof(VkAccelerationStructureCreateFlagsKHR));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkBuffer(&cgen_var_0, (VkBuffer*)&forUnmarshaling->buffer, 1);
+    vkStream->read((VkDeviceSize*)&forUnmarshaling->offset, sizeof(VkDeviceSize));
+    vkStream->read((VkDeviceSize*)&forUnmarshaling->size, sizeof(VkDeviceSize));
+    vkStream->read((VkAccelerationStructureTypeKHR*)&forUnmarshaling->type, sizeof(VkAccelerationStructureTypeKHR));
+    vkStream->read((VkDeviceAddress*)&forUnmarshaling->deviceAddress, sizeof(VkDeviceAddress));
+}
+
+void marshal_VkWriteDescriptorSetAccelerationStructureKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkWriteDescriptorSetAccelerationStructureKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((uint32_t*)&forMarshaling->accelerationStructureCount, sizeof(uint32_t));
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pAccelerationStructures;
+    vkStream->putBe64(cgen_var_0);
+    if (forMarshaling->pAccelerationStructures)
+    {
+        if (forMarshaling->accelerationStructureCount)
+        {
+            uint64_t* cgen_var_0_0;
+            vkStream->alloc((void**)&cgen_var_0_0, forMarshaling->accelerationStructureCount * 8);
+            vkStream->handleMapping()->mapHandles_VkAccelerationStructureKHR_u64(forMarshaling->pAccelerationStructures, cgen_var_0_0, forMarshaling->accelerationStructureCount);
+            vkStream->write((uint64_t*)cgen_var_0_0, forMarshaling->accelerationStructureCount * 8);
+        }
+    }
+}
+
+void unmarshal_VkWriteDescriptorSetAccelerationStructureKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkWriteDescriptorSetAccelerationStructureKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((uint32_t*)&forUnmarshaling->accelerationStructureCount, sizeof(uint32_t));
+    // WARNING PTR CHECK
+    const VkAccelerationStructureKHR* check_pAccelerationStructures;
+    check_pAccelerationStructures = (const VkAccelerationStructureKHR*)(uintptr_t)vkStream->getBe64();
+    if (forUnmarshaling->pAccelerationStructures)
+    {
+        if (!(check_pAccelerationStructures))
+        {
+            fprintf(stderr, "fatal: forUnmarshaling->pAccelerationStructures inconsistent between guest and host\n");
+        }
+        if (forUnmarshaling->accelerationStructureCount)
+        {
+            uint64_t* cgen_var_0_0;
+            vkStream->alloc((void**)&cgen_var_0_0, forUnmarshaling->accelerationStructureCount * 8);
+            vkStream->read((uint64_t*)cgen_var_0_0, forUnmarshaling->accelerationStructureCount * 8);
+            vkStream->handleMapping()->mapHandles_u64_VkAccelerationStructureKHR(cgen_var_0_0, (VkAccelerationStructureKHR*)forUnmarshaling->pAccelerationStructures, forUnmarshaling->accelerationStructureCount);
+        }
+    }
+}
+
+void marshal_VkPhysicalDeviceAccelerationStructureFeaturesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceAccelerationStructureFeaturesKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->accelerationStructure, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->accelerationStructureCaptureReplay, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->accelerationStructureIndirectBuild, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->accelerationStructureHostCommands, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->descriptorBindingAccelerationStructureUpdateAfterBind, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceAccelerationStructureFeaturesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceAccelerationStructureFeaturesKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->accelerationStructure, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->accelerationStructureCaptureReplay, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->accelerationStructureIndirectBuild, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->accelerationStructureHostCommands, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->descriptorBindingAccelerationStructureUpdateAfterBind, sizeof(VkBool32));
+}
+
+void marshal_VkPhysicalDeviceAccelerationStructurePropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceAccelerationStructurePropertiesKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((uint64_t*)&forMarshaling->maxGeometryCount, sizeof(uint64_t));
+    vkStream->write((uint64_t*)&forMarshaling->maxInstanceCount, sizeof(uint64_t));
+    vkStream->write((uint64_t*)&forMarshaling->maxPrimitiveCount, sizeof(uint64_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxPerStageDescriptorAccelerationStructures, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxPerStageDescriptorUpdateAfterBindAccelerationStructures, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxDescriptorSetAccelerationStructures, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxDescriptorSetUpdateAfterBindAccelerationStructures, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->minAccelerationStructureScratchOffsetAlignment, sizeof(uint32_t));
+}
+
+void unmarshal_VkPhysicalDeviceAccelerationStructurePropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceAccelerationStructurePropertiesKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((uint64_t*)&forUnmarshaling->maxGeometryCount, sizeof(uint64_t));
+    vkStream->read((uint64_t*)&forUnmarshaling->maxInstanceCount, sizeof(uint64_t));
+    vkStream->read((uint64_t*)&forUnmarshaling->maxPrimitiveCount, sizeof(uint64_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxPerStageDescriptorAccelerationStructures, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxPerStageDescriptorUpdateAfterBindAccelerationStructures, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxDescriptorSetAccelerationStructures, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxDescriptorSetUpdateAfterBindAccelerationStructures, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->minAccelerationStructureScratchOffsetAlignment, sizeof(uint32_t));
+}
+
+void marshal_VkAccelerationStructureDeviceAddressInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureDeviceAddressInfoKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkAccelerationStructureKHR_u64(&forMarshaling->accelerationStructure, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
+}
+
+void unmarshal_VkAccelerationStructureDeviceAddressInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkAccelerationStructureDeviceAddressInfoKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkAccelerationStructureKHR(&cgen_var_0, (VkAccelerationStructureKHR*)&forUnmarshaling->accelerationStructure, 1);
+}
+
+void marshal_VkAccelerationStructureVersionInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureVersionInfoKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((const uint8_t*)forMarshaling->pVersionData, 2*VK_UUID_SIZE * sizeof(const uint8_t));
+}
+
+void unmarshal_VkAccelerationStructureVersionInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkAccelerationStructureVersionInfoKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((uint8_t*)forUnmarshaling->pVersionData, 2*VK_UUID_SIZE * sizeof(const uint8_t));
+}
+
+void marshal_VkCopyAccelerationStructureToMemoryInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCopyAccelerationStructureToMemoryInfoKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkAccelerationStructureKHR_u64(&forMarshaling->src, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
+    marshal_VkDeviceOrHostAddressKHR(vkStream, rootType, (VkDeviceOrHostAddressKHR*)(&forMarshaling->dst));
+    vkStream->write((VkCopyAccelerationStructureModeKHR*)&forMarshaling->mode, sizeof(VkCopyAccelerationStructureModeKHR));
+}
+
+void unmarshal_VkCopyAccelerationStructureToMemoryInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkCopyAccelerationStructureToMemoryInfoKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkAccelerationStructureKHR(&cgen_var_0, (VkAccelerationStructureKHR*)&forUnmarshaling->src, 1);
+    unmarshal_VkDeviceOrHostAddressKHR(vkStream, rootType, (VkDeviceOrHostAddressKHR*)(&forUnmarshaling->dst));
+    vkStream->read((VkCopyAccelerationStructureModeKHR*)&forUnmarshaling->mode, sizeof(VkCopyAccelerationStructureModeKHR));
+}
+
+void marshal_VkCopyMemoryToAccelerationStructureInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCopyMemoryToAccelerationStructureInfoKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    marshal_VkDeviceOrHostAddressConstKHR(vkStream, rootType, (VkDeviceOrHostAddressConstKHR*)(&forMarshaling->src));
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkAccelerationStructureKHR_u64(&forMarshaling->dst, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->write((VkCopyAccelerationStructureModeKHR*)&forMarshaling->mode, sizeof(VkCopyAccelerationStructureModeKHR));
+}
+
+void unmarshal_VkCopyMemoryToAccelerationStructureInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkCopyMemoryToAccelerationStructureInfoKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    unmarshal_VkDeviceOrHostAddressConstKHR(vkStream, rootType, (VkDeviceOrHostAddressConstKHR*)(&forUnmarshaling->src));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkAccelerationStructureKHR(&cgen_var_0, (VkAccelerationStructureKHR*)&forUnmarshaling->dst, 1);
+    vkStream->read((VkCopyAccelerationStructureModeKHR*)&forUnmarshaling->mode, sizeof(VkCopyAccelerationStructureModeKHR));
+}
+
+void marshal_VkCopyAccelerationStructureInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCopyAccelerationStructureInfoKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    uint64_t cgen_var_0;
+    vkStream->handleMapping()->mapHandles_VkAccelerationStructureKHR_u64(&forMarshaling->src, &cgen_var_0, 1);
+    vkStream->write((uint64_t*)&cgen_var_0, 1 * 8);
+    uint64_t cgen_var_1;
+    vkStream->handleMapping()->mapHandles_VkAccelerationStructureKHR_u64(&forMarshaling->dst, &cgen_var_1, 1);
+    vkStream->write((uint64_t*)&cgen_var_1, 1 * 8);
+    vkStream->write((VkCopyAccelerationStructureModeKHR*)&forMarshaling->mode, sizeof(VkCopyAccelerationStructureModeKHR));
+}
+
+void unmarshal_VkCopyAccelerationStructureInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkCopyAccelerationStructureInfoKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    uint64_t cgen_var_0;
+    vkStream->read((uint64_t*)&cgen_var_0, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkAccelerationStructureKHR(&cgen_var_0, (VkAccelerationStructureKHR*)&forUnmarshaling->src, 1);
+    uint64_t cgen_var_1;
+    vkStream->read((uint64_t*)&cgen_var_1, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkAccelerationStructureKHR(&cgen_var_1, (VkAccelerationStructureKHR*)&forUnmarshaling->dst, 1);
+    vkStream->read((VkCopyAccelerationStructureModeKHR*)&forUnmarshaling->mode, sizeof(VkCopyAccelerationStructureModeKHR));
+}
+
+void marshal_VkAccelerationStructureBuildSizesInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureBuildSizesInfoKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkDeviceSize*)&forMarshaling->accelerationStructureSize, sizeof(VkDeviceSize));
+    vkStream->write((VkDeviceSize*)&forMarshaling->updateScratchSize, sizeof(VkDeviceSize));
+    vkStream->write((VkDeviceSize*)&forMarshaling->buildScratchSize, sizeof(VkDeviceSize));
+}
+
+void unmarshal_VkAccelerationStructureBuildSizesInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkAccelerationStructureBuildSizesInfoKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkDeviceSize*)&forUnmarshaling->accelerationStructureSize, sizeof(VkDeviceSize));
+    vkStream->read((VkDeviceSize*)&forUnmarshaling->updateScratchSize, sizeof(VkDeviceSize));
+    vkStream->read((VkDeviceSize*)&forUnmarshaling->buildScratchSize, sizeof(VkDeviceSize));
+}
+
 #endif
-#ifdef VK_GOOGLE_async_command_buffers
+#ifdef VK_KHR_ray_tracing_pipeline
+void marshal_VkRayTracingShaderGroupCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRayTracingShaderGroupCreateInfoKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkRayTracingShaderGroupTypeKHR*)&forMarshaling->type, sizeof(VkRayTracingShaderGroupTypeKHR));
+    vkStream->write((uint32_t*)&forMarshaling->generalShader, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->closestHitShader, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->anyHitShader, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->intersectionShader, sizeof(uint32_t));
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pShaderGroupCaptureReplayHandle;
+    vkStream->putBe64(cgen_var_0);
+    if (forMarshaling->pShaderGroupCaptureReplayHandle)
+    {
+        vkStream->write((const void*)forMarshaling->pShaderGroupCaptureReplayHandle, sizeof(const uint8_t));
+    }
+}
+
+void unmarshal_VkRayTracingShaderGroupCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkRayTracingShaderGroupCreateInfoKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkRayTracingShaderGroupTypeKHR*)&forUnmarshaling->type, sizeof(VkRayTracingShaderGroupTypeKHR));
+    vkStream->read((uint32_t*)&forUnmarshaling->generalShader, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->closestHitShader, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->anyHitShader, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->intersectionShader, sizeof(uint32_t));
+    // WARNING PTR CHECK
+    const void* check_pShaderGroupCaptureReplayHandle;
+    check_pShaderGroupCaptureReplayHandle = (const void*)(uintptr_t)vkStream->getBe64();
+    if (forUnmarshaling->pShaderGroupCaptureReplayHandle)
+    {
+        if (!(check_pShaderGroupCaptureReplayHandle))
+        {
+            fprintf(stderr, "fatal: forUnmarshaling->pShaderGroupCaptureReplayHandle inconsistent between guest and host\n");
+        }
+        vkStream->read((void*)forUnmarshaling->pShaderGroupCaptureReplayHandle, sizeof(const uint8_t));
+    }
+}
+
+void marshal_VkRayTracingPipelineInterfaceCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRayTracingPipelineInterfaceCreateInfoKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((uint32_t*)&forMarshaling->maxPipelineRayPayloadSize, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxPipelineRayHitAttributeSize, sizeof(uint32_t));
+}
+
+void unmarshal_VkRayTracingPipelineInterfaceCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkRayTracingPipelineInterfaceCreateInfoKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxPipelineRayPayloadSize, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxPipelineRayHitAttributeSize, sizeof(uint32_t));
+}
+
+void marshal_VkRayTracingPipelineCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRayTracingPipelineCreateInfoKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkPipelineCreateFlags*)&forMarshaling->flags, sizeof(VkPipelineCreateFlags));
+    vkStream->write((uint32_t*)&forMarshaling->stageCount, sizeof(uint32_t));
+    if (forMarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->stageCount; ++i)
+        {
+            marshal_VkPipelineShaderStageCreateInfo(vkStream, rootType, (const VkPipelineShaderStageCreateInfo*)(forMarshaling->pStages + i));
+        }
+    }
+    vkStream->write((uint32_t*)&forMarshaling->groupCount, sizeof(uint32_t));
+    if (forMarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->groupCount; ++i)
+        {
+            marshal_VkRayTracingShaderGroupCreateInfoKHR(vkStream, rootType, (const VkRayTracingShaderGroupCreateInfoKHR*)(forMarshaling->pGroups + i));
+        }
+    }
+    vkStream->write((uint32_t*)&forMarshaling->maxPipelineRayRecursionDepth, sizeof(uint32_t));
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pLibraryInfo;
+    vkStream->putBe64(cgen_var_0);
+    if (forMarshaling->pLibraryInfo)
+    {
+        marshal_VkPipelineLibraryCreateInfoKHR(vkStream, rootType, (const VkPipelineLibraryCreateInfoKHR*)(forMarshaling->pLibraryInfo));
+    }
+    // WARNING PTR CHECK
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)forMarshaling->pLibraryInterface;
+    vkStream->putBe64(cgen_var_1);
+    if (forMarshaling->pLibraryInterface)
+    {
+        marshal_VkRayTracingPipelineInterfaceCreateInfoKHR(vkStream, rootType, (const VkRayTracingPipelineInterfaceCreateInfoKHR*)(forMarshaling->pLibraryInterface));
+    }
+    // WARNING PTR CHECK
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)forMarshaling->pDynamicState;
+    vkStream->putBe64(cgen_var_2);
+    if (forMarshaling->pDynamicState)
+    {
+        marshal_VkPipelineDynamicStateCreateInfo(vkStream, rootType, (const VkPipelineDynamicStateCreateInfo*)(forMarshaling->pDynamicState));
+    }
+    uint64_t cgen_var_3;
+    vkStream->handleMapping()->mapHandles_VkPipelineLayout_u64(&forMarshaling->layout, &cgen_var_3, 1);
+    vkStream->write((uint64_t*)&cgen_var_3, 1 * 8);
+    uint64_t cgen_var_4;
+    vkStream->handleMapping()->mapHandles_VkPipeline_u64(&forMarshaling->basePipelineHandle, &cgen_var_4, 1);
+    vkStream->write((uint64_t*)&cgen_var_4, 1 * 8);
+    vkStream->write((int32_t*)&forMarshaling->basePipelineIndex, sizeof(int32_t));
+}
+
+void unmarshal_VkRayTracingPipelineCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkRayTracingPipelineCreateInfoKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkPipelineCreateFlags*)&forUnmarshaling->flags, sizeof(VkPipelineCreateFlags));
+    vkStream->read((uint32_t*)&forUnmarshaling->stageCount, sizeof(uint32_t));
+    if (forUnmarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->stageCount; ++i)
+        {
+            unmarshal_VkPipelineShaderStageCreateInfo(vkStream, rootType, (VkPipelineShaderStageCreateInfo*)(forUnmarshaling->pStages + i));
+        }
+    }
+    vkStream->read((uint32_t*)&forUnmarshaling->groupCount, sizeof(uint32_t));
+    if (forUnmarshaling)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forUnmarshaling->groupCount; ++i)
+        {
+            unmarshal_VkRayTracingShaderGroupCreateInfoKHR(vkStream, rootType, (VkRayTracingShaderGroupCreateInfoKHR*)(forUnmarshaling->pGroups + i));
+        }
+    }
+    vkStream->read((uint32_t*)&forUnmarshaling->maxPipelineRayRecursionDepth, sizeof(uint32_t));
+    // WARNING PTR CHECK
+    const VkPipelineLibraryCreateInfoKHR* check_pLibraryInfo;
+    check_pLibraryInfo = (const VkPipelineLibraryCreateInfoKHR*)(uintptr_t)vkStream->getBe64();
+    if (forUnmarshaling->pLibraryInfo)
+    {
+        if (!(check_pLibraryInfo))
+        {
+            fprintf(stderr, "fatal: forUnmarshaling->pLibraryInfo inconsistent between guest and host\n");
+        }
+        unmarshal_VkPipelineLibraryCreateInfoKHR(vkStream, rootType, (VkPipelineLibraryCreateInfoKHR*)(forUnmarshaling->pLibraryInfo));
+    }
+    // WARNING PTR CHECK
+    const VkRayTracingPipelineInterfaceCreateInfoKHR* check_pLibraryInterface;
+    check_pLibraryInterface = (const VkRayTracingPipelineInterfaceCreateInfoKHR*)(uintptr_t)vkStream->getBe64();
+    if (forUnmarshaling->pLibraryInterface)
+    {
+        if (!(check_pLibraryInterface))
+        {
+            fprintf(stderr, "fatal: forUnmarshaling->pLibraryInterface inconsistent between guest and host\n");
+        }
+        unmarshal_VkRayTracingPipelineInterfaceCreateInfoKHR(vkStream, rootType, (VkRayTracingPipelineInterfaceCreateInfoKHR*)(forUnmarshaling->pLibraryInterface));
+    }
+    // WARNING PTR CHECK
+    const VkPipelineDynamicStateCreateInfo* check_pDynamicState;
+    check_pDynamicState = (const VkPipelineDynamicStateCreateInfo*)(uintptr_t)vkStream->getBe64();
+    if (forUnmarshaling->pDynamicState)
+    {
+        if (!(check_pDynamicState))
+        {
+            fprintf(stderr, "fatal: forUnmarshaling->pDynamicState inconsistent between guest and host\n");
+        }
+        unmarshal_VkPipelineDynamicStateCreateInfo(vkStream, rootType, (VkPipelineDynamicStateCreateInfo*)(forUnmarshaling->pDynamicState));
+    }
+    uint64_t cgen_var_3;
+    vkStream->read((uint64_t*)&cgen_var_3, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkPipelineLayout(&cgen_var_3, (VkPipelineLayout*)&forUnmarshaling->layout, 1);
+    uint64_t cgen_var_4;
+    vkStream->read((uint64_t*)&cgen_var_4, 1 * 8);
+    vkStream->handleMapping()->mapHandles_u64_VkPipeline(&cgen_var_4, (VkPipeline*)&forUnmarshaling->basePipelineHandle, 1);
+    vkStream->read((int32_t*)&forUnmarshaling->basePipelineIndex, sizeof(int32_t));
+}
+
+void marshal_VkPhysicalDeviceRayTracingPipelineFeaturesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRayTracingPipelineFeaturesKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->rayTracingPipeline, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->rayTracingPipelineShaderGroupHandleCaptureReplay, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->rayTracingPipelineShaderGroupHandleCaptureReplayMixed, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->rayTracingPipelineTraceRaysIndirect, sizeof(VkBool32));
+    vkStream->write((VkBool32*)&forMarshaling->rayTraversalPrimitiveCulling, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceRayTracingPipelineFeaturesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceRayTracingPipelineFeaturesKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->rayTracingPipeline, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->rayTracingPipelineShaderGroupHandleCaptureReplay, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->rayTracingPipelineShaderGroupHandleCaptureReplayMixed, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->rayTracingPipelineTraceRaysIndirect, sizeof(VkBool32));
+    vkStream->read((VkBool32*)&forUnmarshaling->rayTraversalPrimitiveCulling, sizeof(VkBool32));
+}
+
+void marshal_VkPhysicalDeviceRayTracingPipelinePropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRayTracingPipelinePropertiesKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((uint32_t*)&forMarshaling->shaderGroupHandleSize, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxRayRecursionDepth, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxShaderGroupStride, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->shaderGroupBaseAlignment, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->shaderGroupHandleCaptureReplaySize, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxRayDispatchInvocationCount, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->shaderGroupHandleAlignment, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->maxRayHitAttributeSize, sizeof(uint32_t));
+}
+
+void unmarshal_VkPhysicalDeviceRayTracingPipelinePropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceRayTracingPipelinePropertiesKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((uint32_t*)&forUnmarshaling->shaderGroupHandleSize, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxRayRecursionDepth, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxShaderGroupStride, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->shaderGroupBaseAlignment, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->shaderGroupHandleCaptureReplaySize, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxRayDispatchInvocationCount, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->shaderGroupHandleAlignment, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->maxRayHitAttributeSize, sizeof(uint32_t));
+}
+
+void marshal_VkStridedDeviceAddressRegionKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkStridedDeviceAddressRegionKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkDeviceAddress*)&forMarshaling->deviceAddress, sizeof(VkDeviceAddress));
+    vkStream->write((VkDeviceSize*)&forMarshaling->stride, sizeof(VkDeviceSize));
+    vkStream->write((VkDeviceSize*)&forMarshaling->size, sizeof(VkDeviceSize));
+}
+
+void unmarshal_VkStridedDeviceAddressRegionKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkStridedDeviceAddressRegionKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkDeviceAddress*)&forUnmarshaling->deviceAddress, sizeof(VkDeviceAddress));
+    vkStream->read((VkDeviceSize*)&forUnmarshaling->stride, sizeof(VkDeviceSize));
+    vkStream->read((VkDeviceSize*)&forUnmarshaling->size, sizeof(VkDeviceSize));
+}
+
+void marshal_VkTraceRaysIndirectCommandKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkTraceRaysIndirectCommandKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((uint32_t*)&forMarshaling->width, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->height, sizeof(uint32_t));
+    vkStream->write((uint32_t*)&forMarshaling->depth, sizeof(uint32_t));
+}
+
+void unmarshal_VkTraceRaysIndirectCommandKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkTraceRaysIndirectCommandKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((uint32_t*)&forUnmarshaling->width, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->height, sizeof(uint32_t));
+    vkStream->read((uint32_t*)&forUnmarshaling->depth, sizeof(uint32_t));
+}
+
 #endif
-#ifdef VK_GOOGLE_create_resources_with_requirements
-#endif
-#ifdef VK_GOOGLE_address_space_info
-#endif
-#ifdef VK_GOOGLE_free_memory_sync
+#ifdef VK_KHR_ray_query
+void marshal_VkPhysicalDeviceRayQueryFeaturesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRayQueryFeaturesKHR* forMarshaling)
+{
+    (void)rootType;
+    vkStream->write((VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    marshal_extension_struct(vkStream, rootType, forMarshaling->pNext);
+    vkStream->write((VkBool32*)&forMarshaling->rayQuery, sizeof(VkBool32));
+}
+
+void unmarshal_VkPhysicalDeviceRayQueryFeaturesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceRayQueryFeaturesKHR* forUnmarshaling)
+{
+    (void)rootType;
+    vkStream->read((VkStructureType*)&forUnmarshaling->sType, sizeof(VkStructureType));
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forUnmarshaling->sType;
+    }
+    unmarshal_extension_struct(vkStream, rootType, (void*)(forUnmarshaling->pNext));
+    vkStream->read((VkBool32*)&forUnmarshaling->rayQuery, sizeof(VkBool32));
+}
+
 #endif
 void marshal_extension_struct(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const void* structExtension)
 {
+    VkInstanceCreateInfo* structAccess = (VkInstanceCreateInfo*)(structExtension);
+    size_t currExtSize = goldfish_vk_extension_struct_size_with_stream_features(vkStream->getFeatureBits(), rootType, structExtension);
+    if (!currExtSize && structExtension)
+    {
+        // unknown struct extension; skip and call on its pNext field
+        marshal_extension_struct(vkStream, rootType, (void*)structAccess->pNext);
+        return;
+    }
+    else
+    {
+        // known or null extension struct
+        vkStream->putBe32(currExtSize);
+        if (!currExtSize)
+        {
+            // exit if this was a null extension struct (size == 0 in this branch)
+            return;
+        }
+    }
+    vkStream->write(structExtension, sizeof(VkStructureType));
     if (!structExtension)
     {
         return;
@@ -13631,659 +23212,1650 @@
 #ifdef VK_VERSION_1_1
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES:
         {
-            marshal_VkPhysicalDeviceSubgroupProperties(vkStream, reinterpret_cast<const VkPhysicalDeviceSubgroupProperties*>(structExtension));
+            marshal_VkPhysicalDeviceSubgroupProperties(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceSubgroupProperties*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES:
         {
-            marshal_VkPhysicalDevice16BitStorageFeatures(vkStream, reinterpret_cast<const VkPhysicalDevice16BitStorageFeatures*>(structExtension));
+            marshal_VkPhysicalDevice16BitStorageFeatures(vkStream, rootType, reinterpret_cast<const VkPhysicalDevice16BitStorageFeatures*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS:
         {
-            marshal_VkMemoryDedicatedRequirements(vkStream, reinterpret_cast<const VkMemoryDedicatedRequirements*>(structExtension));
+            marshal_VkMemoryDedicatedRequirements(vkStream, rootType, reinterpret_cast<const VkMemoryDedicatedRequirements*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO:
         {
-            marshal_VkMemoryDedicatedAllocateInfo(vkStream, reinterpret_cast<const VkMemoryDedicatedAllocateInfo*>(structExtension));
+            marshal_VkMemoryDedicatedAllocateInfo(vkStream, rootType, reinterpret_cast<const VkMemoryDedicatedAllocateInfo*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO:
         {
-            marshal_VkMemoryAllocateFlagsInfo(vkStream, reinterpret_cast<const VkMemoryAllocateFlagsInfo*>(structExtension));
+            marshal_VkMemoryAllocateFlagsInfo(vkStream, rootType, reinterpret_cast<const VkMemoryAllocateFlagsInfo*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO:
         {
-            marshal_VkDeviceGroupRenderPassBeginInfo(vkStream, reinterpret_cast<const VkDeviceGroupRenderPassBeginInfo*>(structExtension));
+            marshal_VkDeviceGroupRenderPassBeginInfo(vkStream, rootType, reinterpret_cast<const VkDeviceGroupRenderPassBeginInfo*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO:
         {
-            marshal_VkDeviceGroupCommandBufferBeginInfo(vkStream, reinterpret_cast<const VkDeviceGroupCommandBufferBeginInfo*>(structExtension));
+            marshal_VkDeviceGroupCommandBufferBeginInfo(vkStream, rootType, reinterpret_cast<const VkDeviceGroupCommandBufferBeginInfo*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO:
         {
-            marshal_VkDeviceGroupSubmitInfo(vkStream, reinterpret_cast<const VkDeviceGroupSubmitInfo*>(structExtension));
+            marshal_VkDeviceGroupSubmitInfo(vkStream, rootType, reinterpret_cast<const VkDeviceGroupSubmitInfo*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO:
         {
-            marshal_VkDeviceGroupBindSparseInfo(vkStream, reinterpret_cast<const VkDeviceGroupBindSparseInfo*>(structExtension));
+            marshal_VkDeviceGroupBindSparseInfo(vkStream, rootType, reinterpret_cast<const VkDeviceGroupBindSparseInfo*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO:
         {
-            marshal_VkBindBufferMemoryDeviceGroupInfo(vkStream, reinterpret_cast<const VkBindBufferMemoryDeviceGroupInfo*>(structExtension));
+            marshal_VkBindBufferMemoryDeviceGroupInfo(vkStream, rootType, reinterpret_cast<const VkBindBufferMemoryDeviceGroupInfo*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO:
         {
-            marshal_VkBindImageMemoryDeviceGroupInfo(vkStream, reinterpret_cast<const VkBindImageMemoryDeviceGroupInfo*>(structExtension));
+            marshal_VkBindImageMemoryDeviceGroupInfo(vkStream, rootType, reinterpret_cast<const VkBindImageMemoryDeviceGroupInfo*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO:
         {
-            marshal_VkDeviceGroupDeviceCreateInfo(vkStream, reinterpret_cast<const VkDeviceGroupDeviceCreateInfo*>(structExtension));
+            marshal_VkDeviceGroupDeviceCreateInfo(vkStream, rootType, reinterpret_cast<const VkDeviceGroupDeviceCreateInfo*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2:
         {
-            marshal_VkPhysicalDeviceFeatures2(vkStream, reinterpret_cast<const VkPhysicalDeviceFeatures2*>(structExtension));
+            marshal_VkPhysicalDeviceFeatures2(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceFeatures2*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES:
         {
-            marshal_VkPhysicalDevicePointClippingProperties(vkStream, reinterpret_cast<const VkPhysicalDevicePointClippingProperties*>(structExtension));
+            marshal_VkPhysicalDevicePointClippingProperties(vkStream, rootType, reinterpret_cast<const VkPhysicalDevicePointClippingProperties*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO:
         {
-            marshal_VkRenderPassInputAttachmentAspectCreateInfo(vkStream, reinterpret_cast<const VkRenderPassInputAttachmentAspectCreateInfo*>(structExtension));
+            marshal_VkRenderPassInputAttachmentAspectCreateInfo(vkStream, rootType, reinterpret_cast<const VkRenderPassInputAttachmentAspectCreateInfo*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO:
         {
-            marshal_VkImageViewUsageCreateInfo(vkStream, reinterpret_cast<const VkImageViewUsageCreateInfo*>(structExtension));
+            marshal_VkImageViewUsageCreateInfo(vkStream, rootType, reinterpret_cast<const VkImageViewUsageCreateInfo*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO:
         {
-            marshal_VkPipelineTessellationDomainOriginStateCreateInfo(vkStream, reinterpret_cast<const VkPipelineTessellationDomainOriginStateCreateInfo*>(structExtension));
+            marshal_VkPipelineTessellationDomainOriginStateCreateInfo(vkStream, rootType, reinterpret_cast<const VkPipelineTessellationDomainOriginStateCreateInfo*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO:
         {
-            marshal_VkRenderPassMultiviewCreateInfo(vkStream, reinterpret_cast<const VkRenderPassMultiviewCreateInfo*>(structExtension));
+            marshal_VkRenderPassMultiviewCreateInfo(vkStream, rootType, reinterpret_cast<const VkRenderPassMultiviewCreateInfo*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES:
         {
-            marshal_VkPhysicalDeviceMultiviewFeatures(vkStream, reinterpret_cast<const VkPhysicalDeviceMultiviewFeatures*>(structExtension));
+            marshal_VkPhysicalDeviceMultiviewFeatures(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceMultiviewFeatures*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES:
         {
-            marshal_VkPhysicalDeviceMultiviewProperties(vkStream, reinterpret_cast<const VkPhysicalDeviceMultiviewProperties*>(structExtension));
+            marshal_VkPhysicalDeviceMultiviewProperties(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceMultiviewProperties*>(structExtension));
             break;
         }
-        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES:
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES:
         {
-            marshal_VkPhysicalDeviceVariablePointerFeatures(vkStream, reinterpret_cast<const VkPhysicalDeviceVariablePointerFeatures*>(structExtension));
+            marshal_VkPhysicalDeviceVariablePointersFeatures(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceVariablePointersFeatures*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES:
         {
-            marshal_VkPhysicalDeviceProtectedMemoryFeatures(vkStream, reinterpret_cast<const VkPhysicalDeviceProtectedMemoryFeatures*>(structExtension));
+            marshal_VkPhysicalDeviceProtectedMemoryFeatures(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceProtectedMemoryFeatures*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_PROPERTIES:
         {
-            marshal_VkPhysicalDeviceProtectedMemoryProperties(vkStream, reinterpret_cast<const VkPhysicalDeviceProtectedMemoryProperties*>(structExtension));
+            marshal_VkPhysicalDeviceProtectedMemoryProperties(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceProtectedMemoryProperties*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO:
         {
-            marshal_VkProtectedSubmitInfo(vkStream, reinterpret_cast<const VkProtectedSubmitInfo*>(structExtension));
+            marshal_VkProtectedSubmitInfo(vkStream, rootType, reinterpret_cast<const VkProtectedSubmitInfo*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO:
         {
-            marshal_VkSamplerYcbcrConversionInfo(vkStream, reinterpret_cast<const VkSamplerYcbcrConversionInfo*>(structExtension));
+            marshal_VkSamplerYcbcrConversionInfo(vkStream, rootType, reinterpret_cast<const VkSamplerYcbcrConversionInfo*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO:
         {
-            marshal_VkBindImagePlaneMemoryInfo(vkStream, reinterpret_cast<const VkBindImagePlaneMemoryInfo*>(structExtension));
+            marshal_VkBindImagePlaneMemoryInfo(vkStream, rootType, reinterpret_cast<const VkBindImagePlaneMemoryInfo*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO:
         {
-            marshal_VkImagePlaneMemoryRequirementsInfo(vkStream, reinterpret_cast<const VkImagePlaneMemoryRequirementsInfo*>(structExtension));
+            marshal_VkImagePlaneMemoryRequirementsInfo(vkStream, rootType, reinterpret_cast<const VkImagePlaneMemoryRequirementsInfo*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES:
         {
-            marshal_VkPhysicalDeviceSamplerYcbcrConversionFeatures(vkStream, reinterpret_cast<const VkPhysicalDeviceSamplerYcbcrConversionFeatures*>(structExtension));
+            marshal_VkPhysicalDeviceSamplerYcbcrConversionFeatures(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceSamplerYcbcrConversionFeatures*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES:
         {
-            marshal_VkSamplerYcbcrConversionImageFormatProperties(vkStream, reinterpret_cast<const VkSamplerYcbcrConversionImageFormatProperties*>(structExtension));
+            marshal_VkSamplerYcbcrConversionImageFormatProperties(vkStream, rootType, reinterpret_cast<const VkSamplerYcbcrConversionImageFormatProperties*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO:
         {
-            marshal_VkPhysicalDeviceExternalImageFormatInfo(vkStream, reinterpret_cast<const VkPhysicalDeviceExternalImageFormatInfo*>(structExtension));
+            marshal_VkPhysicalDeviceExternalImageFormatInfo(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceExternalImageFormatInfo*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES:
         {
-            marshal_VkExternalImageFormatProperties(vkStream, reinterpret_cast<const VkExternalImageFormatProperties*>(structExtension));
+            marshal_VkExternalImageFormatProperties(vkStream, rootType, reinterpret_cast<const VkExternalImageFormatProperties*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES:
         {
-            marshal_VkPhysicalDeviceIDProperties(vkStream, reinterpret_cast<const VkPhysicalDeviceIDProperties*>(structExtension));
+            marshal_VkPhysicalDeviceIDProperties(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceIDProperties*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO:
         {
-            marshal_VkExternalMemoryImageCreateInfo(vkStream, reinterpret_cast<const VkExternalMemoryImageCreateInfo*>(structExtension));
+            marshal_VkExternalMemoryImageCreateInfo(vkStream, rootType, reinterpret_cast<const VkExternalMemoryImageCreateInfo*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO:
         {
-            marshal_VkExternalMemoryBufferCreateInfo(vkStream, reinterpret_cast<const VkExternalMemoryBufferCreateInfo*>(structExtension));
+            marshal_VkExternalMemoryBufferCreateInfo(vkStream, rootType, reinterpret_cast<const VkExternalMemoryBufferCreateInfo*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO:
         {
-            marshal_VkExportMemoryAllocateInfo(vkStream, reinterpret_cast<const VkExportMemoryAllocateInfo*>(structExtension));
+            marshal_VkExportMemoryAllocateInfo(vkStream, rootType, reinterpret_cast<const VkExportMemoryAllocateInfo*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO:
         {
-            marshal_VkExportFenceCreateInfo(vkStream, reinterpret_cast<const VkExportFenceCreateInfo*>(structExtension));
+            marshal_VkExportFenceCreateInfo(vkStream, rootType, reinterpret_cast<const VkExportFenceCreateInfo*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO:
         {
-            marshal_VkExportSemaphoreCreateInfo(vkStream, reinterpret_cast<const VkExportSemaphoreCreateInfo*>(structExtension));
+            marshal_VkExportSemaphoreCreateInfo(vkStream, rootType, reinterpret_cast<const VkExportSemaphoreCreateInfo*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES:
         {
-            marshal_VkPhysicalDeviceMaintenance3Properties(vkStream, reinterpret_cast<const VkPhysicalDeviceMaintenance3Properties*>(structExtension));
+            marshal_VkPhysicalDeviceMaintenance3Properties(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceMaintenance3Properties*>(structExtension));
             break;
         }
-        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES:
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES:
         {
-            marshal_VkPhysicalDeviceShaderDrawParameterFeatures(vkStream, reinterpret_cast<const VkPhysicalDeviceShaderDrawParameterFeatures*>(structExtension));
+            marshal_VkPhysicalDeviceShaderDrawParametersFeatures(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceShaderDrawParametersFeatures*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_VERSION_1_2
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES:
+        {
+            marshal_VkPhysicalDeviceVulkan11Features(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceVulkan11Features*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_PROPERTIES:
+        {
+            marshal_VkPhysicalDeviceVulkan11Properties(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceVulkan11Properties*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES:
+        {
+            marshal_VkPhysicalDeviceVulkan12Features(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceVulkan12Features*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_PROPERTIES:
+        {
+            marshal_VkPhysicalDeviceVulkan12Properties(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceVulkan12Properties*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO:
+        {
+            marshal_VkImageFormatListCreateInfo(vkStream, rootType, reinterpret_cast<const VkImageFormatListCreateInfo*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES:
+        {
+            marshal_VkPhysicalDevice8BitStorageFeatures(vkStream, rootType, reinterpret_cast<const VkPhysicalDevice8BitStorageFeatures*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES:
+        {
+            marshal_VkPhysicalDeviceDriverProperties(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceDriverProperties*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES:
+        {
+            marshal_VkPhysicalDeviceShaderAtomicInt64Features(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceShaderAtomicInt64Features*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES:
+        {
+            marshal_VkPhysicalDeviceShaderFloat16Int8Features(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceShaderFloat16Int8Features*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES:
+        {
+            marshal_VkPhysicalDeviceFloatControlsProperties(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceFloatControlsProperties*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO:
+        {
+            marshal_VkDescriptorSetLayoutBindingFlagsCreateInfo(vkStream, rootType, reinterpret_cast<const VkDescriptorSetLayoutBindingFlagsCreateInfo*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES:
+        {
+            marshal_VkPhysicalDeviceDescriptorIndexingFeatures(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceDescriptorIndexingFeatures*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES:
+        {
+            marshal_VkPhysicalDeviceDescriptorIndexingProperties(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceDescriptorIndexingProperties*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO:
+        {
+            marshal_VkDescriptorSetVariableDescriptorCountAllocateInfo(vkStream, rootType, reinterpret_cast<const VkDescriptorSetVariableDescriptorCountAllocateInfo*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT:
+        {
+            marshal_VkDescriptorSetVariableDescriptorCountLayoutSupport(vkStream, rootType, reinterpret_cast<const VkDescriptorSetVariableDescriptorCountLayoutSupport*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE:
+        {
+            marshal_VkSubpassDescriptionDepthStencilResolve(vkStream, rootType, reinterpret_cast<const VkSubpassDescriptionDepthStencilResolve*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES:
+        {
+            marshal_VkPhysicalDeviceDepthStencilResolveProperties(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceDepthStencilResolveProperties*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES:
+        {
+            marshal_VkPhysicalDeviceScalarBlockLayoutFeatures(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceScalarBlockLayoutFeatures*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_IMAGE_STENCIL_USAGE_CREATE_INFO:
+        {
+            marshal_VkImageStencilUsageCreateInfo(vkStream, rootType, reinterpret_cast<const VkImageStencilUsageCreateInfo*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO:
+        {
+            marshal_VkSamplerReductionModeCreateInfo(vkStream, rootType, reinterpret_cast<const VkSamplerReductionModeCreateInfo*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES:
+        {
+            marshal_VkPhysicalDeviceSamplerFilterMinmaxProperties(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceSamplerFilterMinmaxProperties*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES:
+        {
+            marshal_VkPhysicalDeviceVulkanMemoryModelFeatures(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceVulkanMemoryModelFeatures*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES:
+        {
+            marshal_VkPhysicalDeviceImagelessFramebufferFeatures(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceImagelessFramebufferFeatures*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO:
+        {
+            marshal_VkFramebufferAttachmentsCreateInfo(vkStream, rootType, reinterpret_cast<const VkFramebufferAttachmentsCreateInfo*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_RENDER_PASS_ATTACHMENT_BEGIN_INFO:
+        {
+            marshal_VkRenderPassAttachmentBeginInfo(vkStream, rootType, reinterpret_cast<const VkRenderPassAttachmentBeginInfo*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES:
+        {
+            marshal_VkPhysicalDeviceUniformBufferStandardLayoutFeatures(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceUniformBufferStandardLayoutFeatures*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES:
+        {
+            marshal_VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES:
+        {
+            marshal_VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_STENCIL_LAYOUT:
+        {
+            marshal_VkAttachmentReferenceStencilLayout(vkStream, rootType, reinterpret_cast<const VkAttachmentReferenceStencilLayout*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_STENCIL_LAYOUT:
+        {
+            marshal_VkAttachmentDescriptionStencilLayout(vkStream, rootType, reinterpret_cast<const VkAttachmentDescriptionStencilLayout*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES:
+        {
+            marshal_VkPhysicalDeviceHostQueryResetFeatures(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceHostQueryResetFeatures*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES:
+        {
+            marshal_VkPhysicalDeviceTimelineSemaphoreFeatures(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceTimelineSemaphoreFeatures*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES:
+        {
+            marshal_VkPhysicalDeviceTimelineSemaphoreProperties(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceTimelineSemaphoreProperties*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO:
+        {
+            marshal_VkSemaphoreTypeCreateInfo(vkStream, rootType, reinterpret_cast<const VkSemaphoreTypeCreateInfo*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO:
+        {
+            marshal_VkTimelineSemaphoreSubmitInfo(vkStream, rootType, reinterpret_cast<const VkTimelineSemaphoreSubmitInfo*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES:
+        {
+            marshal_VkPhysicalDeviceBufferDeviceAddressFeatures(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceBufferDeviceAddressFeatures*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO:
+        {
+            marshal_VkBufferOpaqueCaptureAddressCreateInfo(vkStream, rootType, reinterpret_cast<const VkBufferOpaqueCaptureAddressCreateInfo*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO:
+        {
+            marshal_VkMemoryOpaqueCaptureAddressAllocateInfo(vkStream, rootType, reinterpret_cast<const VkMemoryOpaqueCaptureAddressAllocateInfo*>(structExtension));
             break;
         }
 #endif
 #ifdef VK_KHR_swapchain
         case VK_STRUCTURE_TYPE_IMAGE_SWAPCHAIN_CREATE_INFO_KHR:
         {
-            marshal_VkImageSwapchainCreateInfoKHR(vkStream, reinterpret_cast<const VkImageSwapchainCreateInfoKHR*>(structExtension));
+            marshal_VkImageSwapchainCreateInfoKHR(vkStream, rootType, reinterpret_cast<const VkImageSwapchainCreateInfoKHR*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_SWAPCHAIN_INFO_KHR:
         {
-            marshal_VkBindImageMemorySwapchainInfoKHR(vkStream, reinterpret_cast<const VkBindImageMemorySwapchainInfoKHR*>(structExtension));
+            marshal_VkBindImageMemorySwapchainInfoKHR(vkStream, rootType, reinterpret_cast<const VkBindImageMemorySwapchainInfoKHR*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_DEVICE_GROUP_PRESENT_INFO_KHR:
         {
-            marshal_VkDeviceGroupPresentInfoKHR(vkStream, reinterpret_cast<const VkDeviceGroupPresentInfoKHR*>(structExtension));
+            marshal_VkDeviceGroupPresentInfoKHR(vkStream, rootType, reinterpret_cast<const VkDeviceGroupPresentInfoKHR*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_DEVICE_GROUP_SWAPCHAIN_CREATE_INFO_KHR:
         {
-            marshal_VkDeviceGroupSwapchainCreateInfoKHR(vkStream, reinterpret_cast<const VkDeviceGroupSwapchainCreateInfoKHR*>(structExtension));
+            marshal_VkDeviceGroupSwapchainCreateInfoKHR(vkStream, rootType, reinterpret_cast<const VkDeviceGroupSwapchainCreateInfoKHR*>(structExtension));
             break;
         }
 #endif
 #ifdef VK_KHR_display_swapchain
         case VK_STRUCTURE_TYPE_DISPLAY_PRESENT_INFO_KHR:
         {
-            marshal_VkDisplayPresentInfoKHR(vkStream, reinterpret_cast<const VkDisplayPresentInfoKHR*>(structExtension));
+            marshal_VkDisplayPresentInfoKHR(vkStream, rootType, reinterpret_cast<const VkDisplayPresentInfoKHR*>(structExtension));
             break;
         }
 #endif
 #ifdef VK_KHR_external_memory_win32
         case VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_KHR:
         {
-            marshal_VkImportMemoryWin32HandleInfoKHR(vkStream, reinterpret_cast<const VkImportMemoryWin32HandleInfoKHR*>(structExtension));
+            marshal_VkImportMemoryWin32HandleInfoKHR(vkStream, rootType, reinterpret_cast<const VkImportMemoryWin32HandleInfoKHR*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_KHR:
         {
-            marshal_VkExportMemoryWin32HandleInfoKHR(vkStream, reinterpret_cast<const VkExportMemoryWin32HandleInfoKHR*>(structExtension));
+            marshal_VkExportMemoryWin32HandleInfoKHR(vkStream, rootType, reinterpret_cast<const VkExportMemoryWin32HandleInfoKHR*>(structExtension));
             break;
         }
 #endif
 #ifdef VK_KHR_external_memory_fd
         case VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR:
         {
-            marshal_VkImportMemoryFdInfoKHR(vkStream, reinterpret_cast<const VkImportMemoryFdInfoKHR*>(structExtension));
+            marshal_VkImportMemoryFdInfoKHR(vkStream, rootType, reinterpret_cast<const VkImportMemoryFdInfoKHR*>(structExtension));
             break;
         }
 #endif
 #ifdef VK_KHR_win32_keyed_mutex
         case VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_KHR:
         {
-            marshal_VkWin32KeyedMutexAcquireReleaseInfoKHR(vkStream, reinterpret_cast<const VkWin32KeyedMutexAcquireReleaseInfoKHR*>(structExtension));
+            marshal_VkWin32KeyedMutexAcquireReleaseInfoKHR(vkStream, rootType, reinterpret_cast<const VkWin32KeyedMutexAcquireReleaseInfoKHR*>(structExtension));
             break;
         }
 #endif
 #ifdef VK_KHR_external_semaphore_win32
         case VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR:
         {
-            marshal_VkExportSemaphoreWin32HandleInfoKHR(vkStream, reinterpret_cast<const VkExportSemaphoreWin32HandleInfoKHR*>(structExtension));
+            marshal_VkExportSemaphoreWin32HandleInfoKHR(vkStream, rootType, reinterpret_cast<const VkExportSemaphoreWin32HandleInfoKHR*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_D3D12_FENCE_SUBMIT_INFO_KHR:
         {
-            marshal_VkD3D12FenceSubmitInfoKHR(vkStream, reinterpret_cast<const VkD3D12FenceSubmitInfoKHR*>(structExtension));
+            marshal_VkD3D12FenceSubmitInfoKHR(vkStream, rootType, reinterpret_cast<const VkD3D12FenceSubmitInfoKHR*>(structExtension));
             break;
         }
 #endif
 #ifdef VK_KHR_push_descriptor
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR:
         {
-            marshal_VkPhysicalDevicePushDescriptorPropertiesKHR(vkStream, reinterpret_cast<const VkPhysicalDevicePushDescriptorPropertiesKHR*>(structExtension));
+            marshal_VkPhysicalDevicePushDescriptorPropertiesKHR(vkStream, rootType, reinterpret_cast<const VkPhysicalDevicePushDescriptorPropertiesKHR*>(structExtension));
             break;
         }
 #endif
 #ifdef VK_KHR_incremental_present
         case VK_STRUCTURE_TYPE_PRESENT_REGIONS_KHR:
         {
-            marshal_VkPresentRegionsKHR(vkStream, reinterpret_cast<const VkPresentRegionsKHR*>(structExtension));
+            marshal_VkPresentRegionsKHR(vkStream, rootType, reinterpret_cast<const VkPresentRegionsKHR*>(structExtension));
             break;
         }
 #endif
 #ifdef VK_KHR_shared_presentable_image
         case VK_STRUCTURE_TYPE_SHARED_PRESENT_SURFACE_CAPABILITIES_KHR:
         {
-            marshal_VkSharedPresentSurfaceCapabilitiesKHR(vkStream, reinterpret_cast<const VkSharedPresentSurfaceCapabilitiesKHR*>(structExtension));
+            marshal_VkSharedPresentSurfaceCapabilitiesKHR(vkStream, rootType, reinterpret_cast<const VkSharedPresentSurfaceCapabilitiesKHR*>(structExtension));
             break;
         }
 #endif
 #ifdef VK_KHR_external_fence_win32
         case VK_STRUCTURE_TYPE_EXPORT_FENCE_WIN32_HANDLE_INFO_KHR:
         {
-            marshal_VkExportFenceWin32HandleInfoKHR(vkStream, reinterpret_cast<const VkExportFenceWin32HandleInfoKHR*>(structExtension));
+            marshal_VkExportFenceWin32HandleInfoKHR(vkStream, rootType, reinterpret_cast<const VkExportFenceWin32HandleInfoKHR*>(structExtension));
             break;
         }
 #endif
-#ifdef VK_KHR_image_format_list
-        case VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO_KHR:
+#ifdef VK_KHR_performance_query
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_FEATURES_KHR:
         {
-            marshal_VkImageFormatListCreateInfoKHR(vkStream, reinterpret_cast<const VkImageFormatListCreateInfoKHR*>(structExtension));
+            marshal_VkPhysicalDevicePerformanceQueryFeaturesKHR(vkStream, rootType, reinterpret_cast<const VkPhysicalDevicePerformanceQueryFeaturesKHR*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_PROPERTIES_KHR:
+        {
+            marshal_VkPhysicalDevicePerformanceQueryPropertiesKHR(vkStream, rootType, reinterpret_cast<const VkPhysicalDevicePerformanceQueryPropertiesKHR*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_CREATE_INFO_KHR:
+        {
+            marshal_VkQueryPoolPerformanceCreateInfoKHR(vkStream, rootType, reinterpret_cast<const VkQueryPoolPerformanceCreateInfoKHR*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PERFORMANCE_QUERY_SUBMIT_INFO_KHR:
+        {
+            marshal_VkPerformanceQuerySubmitInfoKHR(vkStream, rootType, reinterpret_cast<const VkPerformanceQuerySubmitInfoKHR*>(structExtension));
             break;
         }
 #endif
-#ifdef VK_KHR_8bit_storage
-        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR:
+#ifdef VK_KHR_portability_subset
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PORTABILITY_SUBSET_FEATURES_KHR:
         {
-            marshal_VkPhysicalDevice8BitStorageFeaturesKHR(vkStream, reinterpret_cast<const VkPhysicalDevice8BitStorageFeaturesKHR*>(structExtension));
+            marshal_VkPhysicalDevicePortabilitySubsetFeaturesKHR(vkStream, rootType, reinterpret_cast<const VkPhysicalDevicePortabilitySubsetFeaturesKHR*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PORTABILITY_SUBSET_PROPERTIES_KHR:
+        {
+            marshal_VkPhysicalDevicePortabilitySubsetPropertiesKHR(vkStream, rootType, reinterpret_cast<const VkPhysicalDevicePortabilitySubsetPropertiesKHR*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_KHR_shader_clock
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CLOCK_FEATURES_KHR:
+        {
+            marshal_VkPhysicalDeviceShaderClockFeaturesKHR(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceShaderClockFeaturesKHR*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_KHR_shader_terminate_invocation
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES_KHR:
+        {
+            marshal_VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_KHR_fragment_shading_rate
+        case VK_STRUCTURE_TYPE_FRAGMENT_SHADING_RATE_ATTACHMENT_INFO_KHR:
+        {
+            marshal_VkFragmentShadingRateAttachmentInfoKHR(vkStream, rootType, reinterpret_cast<const VkFragmentShadingRateAttachmentInfoKHR*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_STATE_CREATE_INFO_KHR:
+        {
+            marshal_VkPipelineFragmentShadingRateStateCreateInfoKHR(vkStream, rootType, reinterpret_cast<const VkPipelineFragmentShadingRateStateCreateInfoKHR*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_FEATURES_KHR:
+        {
+            marshal_VkPhysicalDeviceFragmentShadingRateFeaturesKHR(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceFragmentShadingRateFeaturesKHR*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_PROPERTIES_KHR:
+        {
+            marshal_VkPhysicalDeviceFragmentShadingRatePropertiesKHR(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceFragmentShadingRatePropertiesKHR*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_KHR_surface_protected_capabilities
+        case VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR:
+        {
+            marshal_VkSurfaceProtectedCapabilitiesKHR(vkStream, rootType, reinterpret_cast<const VkSurfaceProtectedCapabilitiesKHR*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_KHR_pipeline_executable_properties
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_EXECUTABLE_PROPERTIES_FEATURES_KHR:
+        {
+            marshal_VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR(vkStream, rootType, reinterpret_cast<const VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR*>(structExtension));
             break;
         }
 #endif
 #ifdef VK_ANDROID_native_buffer
         case VK_STRUCTURE_TYPE_NATIVE_BUFFER_ANDROID:
         {
-            marshal_VkNativeBufferANDROID(vkStream, reinterpret_cast<const VkNativeBufferANDROID*>(structExtension));
+            marshal_VkNativeBufferANDROID(vkStream, rootType, reinterpret_cast<const VkNativeBufferANDROID*>(structExtension));
             break;
         }
 #endif
 #ifdef VK_EXT_debug_report
         case VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT:
         {
-            marshal_VkDebugReportCallbackCreateInfoEXT(vkStream, reinterpret_cast<const VkDebugReportCallbackCreateInfoEXT*>(structExtension));
+            marshal_VkDebugReportCallbackCreateInfoEXT(vkStream, rootType, reinterpret_cast<const VkDebugReportCallbackCreateInfoEXT*>(structExtension));
             break;
         }
 #endif
 #ifdef VK_AMD_rasterization_order
         case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_RASTERIZATION_ORDER_AMD:
         {
-            marshal_VkPipelineRasterizationStateRasterizationOrderAMD(vkStream, reinterpret_cast<const VkPipelineRasterizationStateRasterizationOrderAMD*>(structExtension));
+            marshal_VkPipelineRasterizationStateRasterizationOrderAMD(vkStream, rootType, reinterpret_cast<const VkPipelineRasterizationStateRasterizationOrderAMD*>(structExtension));
             break;
         }
 #endif
 #ifdef VK_NV_dedicated_allocation
         case VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_IMAGE_CREATE_INFO_NV:
         {
-            marshal_VkDedicatedAllocationImageCreateInfoNV(vkStream, reinterpret_cast<const VkDedicatedAllocationImageCreateInfoNV*>(structExtension));
+            marshal_VkDedicatedAllocationImageCreateInfoNV(vkStream, rootType, reinterpret_cast<const VkDedicatedAllocationImageCreateInfoNV*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_BUFFER_CREATE_INFO_NV:
         {
-            marshal_VkDedicatedAllocationBufferCreateInfoNV(vkStream, reinterpret_cast<const VkDedicatedAllocationBufferCreateInfoNV*>(structExtension));
+            marshal_VkDedicatedAllocationBufferCreateInfoNV(vkStream, rootType, reinterpret_cast<const VkDedicatedAllocationBufferCreateInfoNV*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_MEMORY_ALLOCATE_INFO_NV:
         {
-            marshal_VkDedicatedAllocationMemoryAllocateInfoNV(vkStream, reinterpret_cast<const VkDedicatedAllocationMemoryAllocateInfoNV*>(structExtension));
+            marshal_VkDedicatedAllocationMemoryAllocateInfoNV(vkStream, rootType, reinterpret_cast<const VkDedicatedAllocationMemoryAllocateInfoNV*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_EXT_transform_feedback
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT:
+        {
+            marshal_VkPhysicalDeviceTransformFeedbackFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceTransformFeedbackFeaturesEXT*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT:
+        {
+            marshal_VkPhysicalDeviceTransformFeedbackPropertiesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceTransformFeedbackPropertiesEXT*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_STREAM_CREATE_INFO_EXT:
+        {
+            marshal_VkPipelineRasterizationStateStreamCreateInfoEXT(vkStream, rootType, reinterpret_cast<const VkPipelineRasterizationStateStreamCreateInfoEXT*>(structExtension));
             break;
         }
 #endif
 #ifdef VK_AMD_texture_gather_bias_lod
         case VK_STRUCTURE_TYPE_TEXTURE_LOD_GATHER_FORMAT_PROPERTIES_AMD:
         {
-            marshal_VkTextureLODGatherFormatPropertiesAMD(vkStream, reinterpret_cast<const VkTextureLODGatherFormatPropertiesAMD*>(structExtension));
+            marshal_VkTextureLODGatherFormatPropertiesAMD(vkStream, rootType, reinterpret_cast<const VkTextureLODGatherFormatPropertiesAMD*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_NV_corner_sampled_image
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CORNER_SAMPLED_IMAGE_FEATURES_NV:
+        {
+            marshal_VkPhysicalDeviceCornerSampledImageFeaturesNV(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceCornerSampledImageFeaturesNV*>(structExtension));
             break;
         }
 #endif
 #ifdef VK_NV_external_memory
         case VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_NV:
         {
-            marshal_VkExternalMemoryImageCreateInfoNV(vkStream, reinterpret_cast<const VkExternalMemoryImageCreateInfoNV*>(structExtension));
+            marshal_VkExternalMemoryImageCreateInfoNV(vkStream, rootType, reinterpret_cast<const VkExternalMemoryImageCreateInfoNV*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_NV:
         {
-            marshal_VkExportMemoryAllocateInfoNV(vkStream, reinterpret_cast<const VkExportMemoryAllocateInfoNV*>(structExtension));
+            marshal_VkExportMemoryAllocateInfoNV(vkStream, rootType, reinterpret_cast<const VkExportMemoryAllocateInfoNV*>(structExtension));
             break;
         }
 #endif
 #ifdef VK_NV_external_memory_win32
         case VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_NV:
         {
-            marshal_VkImportMemoryWin32HandleInfoNV(vkStream, reinterpret_cast<const VkImportMemoryWin32HandleInfoNV*>(structExtension));
+            marshal_VkImportMemoryWin32HandleInfoNV(vkStream, rootType, reinterpret_cast<const VkImportMemoryWin32HandleInfoNV*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_NV:
         {
-            marshal_VkExportMemoryWin32HandleInfoNV(vkStream, reinterpret_cast<const VkExportMemoryWin32HandleInfoNV*>(structExtension));
+            marshal_VkExportMemoryWin32HandleInfoNV(vkStream, rootType, reinterpret_cast<const VkExportMemoryWin32HandleInfoNV*>(structExtension));
             break;
         }
 #endif
 #ifdef VK_NV_win32_keyed_mutex
         case VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_NV:
         {
-            marshal_VkWin32KeyedMutexAcquireReleaseInfoNV(vkStream, reinterpret_cast<const VkWin32KeyedMutexAcquireReleaseInfoNV*>(structExtension));
+            marshal_VkWin32KeyedMutexAcquireReleaseInfoNV(vkStream, rootType, reinterpret_cast<const VkWin32KeyedMutexAcquireReleaseInfoNV*>(structExtension));
             break;
         }
 #endif
 #ifdef VK_EXT_validation_flags
         case VK_STRUCTURE_TYPE_VALIDATION_FLAGS_EXT:
         {
-            marshal_VkValidationFlagsEXT(vkStream, reinterpret_cast<const VkValidationFlagsEXT*>(structExtension));
+            marshal_VkValidationFlagsEXT(vkStream, rootType, reinterpret_cast<const VkValidationFlagsEXT*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_EXT_texture_compression_astc_hdr
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES_EXT:
+        {
+            marshal_VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_EXT_astc_decode_mode
+        case VK_STRUCTURE_TYPE_IMAGE_VIEW_ASTC_DECODE_MODE_EXT:
+        {
+            marshal_VkImageViewASTCDecodeModeEXT(vkStream, rootType, reinterpret_cast<const VkImageViewASTCDecodeModeEXT*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ASTC_DECODE_FEATURES_EXT:
+        {
+            marshal_VkPhysicalDeviceASTCDecodeFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceASTCDecodeFeaturesEXT*>(structExtension));
             break;
         }
 #endif
 #ifdef VK_EXT_conditional_rendering
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT:
         {
-            marshal_VkPhysicalDeviceConditionalRenderingFeaturesEXT(vkStream, reinterpret_cast<const VkPhysicalDeviceConditionalRenderingFeaturesEXT*>(structExtension));
+            marshal_VkPhysicalDeviceConditionalRenderingFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceConditionalRenderingFeaturesEXT*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_CONDITIONAL_RENDERING_INFO_EXT:
         {
-            marshal_VkCommandBufferInheritanceConditionalRenderingInfoEXT(vkStream, reinterpret_cast<const VkCommandBufferInheritanceConditionalRenderingInfoEXT*>(structExtension));
+            marshal_VkCommandBufferInheritanceConditionalRenderingInfoEXT(vkStream, rootType, reinterpret_cast<const VkCommandBufferInheritanceConditionalRenderingInfoEXT*>(structExtension));
             break;
         }
 #endif
 #ifdef VK_NV_clip_space_w_scaling
         case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_W_SCALING_STATE_CREATE_INFO_NV:
         {
-            marshal_VkPipelineViewportWScalingStateCreateInfoNV(vkStream, reinterpret_cast<const VkPipelineViewportWScalingStateCreateInfoNV*>(structExtension));
+            marshal_VkPipelineViewportWScalingStateCreateInfoNV(vkStream, rootType, reinterpret_cast<const VkPipelineViewportWScalingStateCreateInfoNV*>(structExtension));
             break;
         }
 #endif
 #ifdef VK_EXT_display_control
         case VK_STRUCTURE_TYPE_SWAPCHAIN_COUNTER_CREATE_INFO_EXT:
         {
-            marshal_VkSwapchainCounterCreateInfoEXT(vkStream, reinterpret_cast<const VkSwapchainCounterCreateInfoEXT*>(structExtension));
+            marshal_VkSwapchainCounterCreateInfoEXT(vkStream, rootType, reinterpret_cast<const VkSwapchainCounterCreateInfoEXT*>(structExtension));
             break;
         }
 #endif
 #ifdef VK_GOOGLE_display_timing
         case VK_STRUCTURE_TYPE_PRESENT_TIMES_INFO_GOOGLE:
         {
-            marshal_VkPresentTimesInfoGOOGLE(vkStream, reinterpret_cast<const VkPresentTimesInfoGOOGLE*>(structExtension));
+            marshal_VkPresentTimesInfoGOOGLE(vkStream, rootType, reinterpret_cast<const VkPresentTimesInfoGOOGLE*>(structExtension));
             break;
         }
 #endif
 #ifdef VK_NVX_multiview_per_view_attributes
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PER_VIEW_ATTRIBUTES_PROPERTIES_NVX:
         {
-            marshal_VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX(vkStream, reinterpret_cast<const VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX*>(structExtension));
+            marshal_VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX*>(structExtension));
             break;
         }
 #endif
 #ifdef VK_NV_viewport_swizzle
         case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SWIZZLE_STATE_CREATE_INFO_NV:
         {
-            marshal_VkPipelineViewportSwizzleStateCreateInfoNV(vkStream, reinterpret_cast<const VkPipelineViewportSwizzleStateCreateInfoNV*>(structExtension));
+            marshal_VkPipelineViewportSwizzleStateCreateInfoNV(vkStream, rootType, reinterpret_cast<const VkPipelineViewportSwizzleStateCreateInfoNV*>(structExtension));
             break;
         }
 #endif
 #ifdef VK_EXT_discard_rectangles
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DISCARD_RECTANGLE_PROPERTIES_EXT:
         {
-            marshal_VkPhysicalDeviceDiscardRectanglePropertiesEXT(vkStream, reinterpret_cast<const VkPhysicalDeviceDiscardRectanglePropertiesEXT*>(structExtension));
+            marshal_VkPhysicalDeviceDiscardRectanglePropertiesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceDiscardRectanglePropertiesEXT*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT:
         {
-            marshal_VkPipelineDiscardRectangleStateCreateInfoEXT(vkStream, reinterpret_cast<const VkPipelineDiscardRectangleStateCreateInfoEXT*>(structExtension));
+            marshal_VkPipelineDiscardRectangleStateCreateInfoEXT(vkStream, rootType, reinterpret_cast<const VkPipelineDiscardRectangleStateCreateInfoEXT*>(structExtension));
             break;
         }
 #endif
 #ifdef VK_EXT_conservative_rasterization
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONSERVATIVE_RASTERIZATION_PROPERTIES_EXT:
         {
-            marshal_VkPhysicalDeviceConservativeRasterizationPropertiesEXT(vkStream, reinterpret_cast<const VkPhysicalDeviceConservativeRasterizationPropertiesEXT*>(structExtension));
+            marshal_VkPhysicalDeviceConservativeRasterizationPropertiesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceConservativeRasterizationPropertiesEXT*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_CONSERVATIVE_STATE_CREATE_INFO_EXT:
         {
-            marshal_VkPipelineRasterizationConservativeStateCreateInfoEXT(vkStream, reinterpret_cast<const VkPipelineRasterizationConservativeStateCreateInfoEXT*>(structExtension));
+            marshal_VkPipelineRasterizationConservativeStateCreateInfoEXT(vkStream, rootType, reinterpret_cast<const VkPipelineRasterizationConservativeStateCreateInfoEXT*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_EXT_depth_clip_enable
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_ENABLE_FEATURES_EXT:
+        {
+            marshal_VkPhysicalDeviceDepthClipEnableFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceDepthClipEnableFeaturesEXT*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT:
+        {
+            marshal_VkPipelineRasterizationDepthClipStateCreateInfoEXT(vkStream, rootType, reinterpret_cast<const VkPipelineRasterizationDepthClipStateCreateInfoEXT*>(structExtension));
             break;
         }
 #endif
 #ifdef VK_EXT_debug_utils
         case VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT:
         {
-            marshal_VkDebugUtilsMessengerCreateInfoEXT(vkStream, reinterpret_cast<const VkDebugUtilsMessengerCreateInfoEXT*>(structExtension));
+            marshal_VkDebugUtilsMessengerCreateInfoEXT(vkStream, rootType, reinterpret_cast<const VkDebugUtilsMessengerCreateInfoEXT*>(structExtension));
             break;
         }
 #endif
 #ifdef VK_ANDROID_external_memory_android_hardware_buffer
         case VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_USAGE_ANDROID:
         {
-            marshal_VkAndroidHardwareBufferUsageANDROID(vkStream, reinterpret_cast<const VkAndroidHardwareBufferUsageANDROID*>(structExtension));
+            marshal_VkAndroidHardwareBufferUsageANDROID(vkStream, rootType, reinterpret_cast<const VkAndroidHardwareBufferUsageANDROID*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID:
         {
-            marshal_VkAndroidHardwareBufferFormatPropertiesANDROID(vkStream, reinterpret_cast<const VkAndroidHardwareBufferFormatPropertiesANDROID*>(structExtension));
+            marshal_VkAndroidHardwareBufferFormatPropertiesANDROID(vkStream, rootType, reinterpret_cast<const VkAndroidHardwareBufferFormatPropertiesANDROID*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID:
         {
-            marshal_VkImportAndroidHardwareBufferInfoANDROID(vkStream, reinterpret_cast<const VkImportAndroidHardwareBufferInfoANDROID*>(structExtension));
+            marshal_VkImportAndroidHardwareBufferInfoANDROID(vkStream, rootType, reinterpret_cast<const VkImportAndroidHardwareBufferInfoANDROID*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID:
         {
-            marshal_VkExternalFormatANDROID(vkStream, reinterpret_cast<const VkExternalFormatANDROID*>(structExtension));
+            marshal_VkExternalFormatANDROID(vkStream, rootType, reinterpret_cast<const VkExternalFormatANDROID*>(structExtension));
             break;
         }
 #endif
-#ifdef VK_EXT_sampler_filter_minmax
-        case VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO_EXT:
+#ifdef VK_EXT_inline_uniform_block
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES_EXT:
         {
-            marshal_VkSamplerReductionModeCreateInfoEXT(vkStream, reinterpret_cast<const VkSamplerReductionModeCreateInfoEXT*>(structExtension));
+            marshal_VkPhysicalDeviceInlineUniformBlockFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceInlineUniformBlockFeaturesEXT*>(structExtension));
             break;
         }
-        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES_EXT:
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES_EXT:
         {
-            marshal_VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT(vkStream, reinterpret_cast<const VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT*>(structExtension));
+            marshal_VkPhysicalDeviceInlineUniformBlockPropertiesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceInlineUniformBlockPropertiesEXT*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT:
+        {
+            marshal_VkWriteDescriptorSetInlineUniformBlockEXT(vkStream, rootType, reinterpret_cast<const VkWriteDescriptorSetInlineUniformBlockEXT*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO_EXT:
+        {
+            marshal_VkDescriptorPoolInlineUniformBlockCreateInfoEXT(vkStream, rootType, reinterpret_cast<const VkDescriptorPoolInlineUniformBlockCreateInfoEXT*>(structExtension));
             break;
         }
 #endif
 #ifdef VK_EXT_sample_locations
         case VK_STRUCTURE_TYPE_SAMPLE_LOCATIONS_INFO_EXT:
         {
-            marshal_VkSampleLocationsInfoEXT(vkStream, reinterpret_cast<const VkSampleLocationsInfoEXT*>(structExtension));
+            marshal_VkSampleLocationsInfoEXT(vkStream, rootType, reinterpret_cast<const VkSampleLocationsInfoEXT*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_RENDER_PASS_SAMPLE_LOCATIONS_BEGIN_INFO_EXT:
         {
-            marshal_VkRenderPassSampleLocationsBeginInfoEXT(vkStream, reinterpret_cast<const VkRenderPassSampleLocationsBeginInfoEXT*>(structExtension));
+            marshal_VkRenderPassSampleLocationsBeginInfoEXT(vkStream, rootType, reinterpret_cast<const VkRenderPassSampleLocationsBeginInfoEXT*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT:
         {
-            marshal_VkPipelineSampleLocationsStateCreateInfoEXT(vkStream, reinterpret_cast<const VkPipelineSampleLocationsStateCreateInfoEXT*>(structExtension));
+            marshal_VkPipelineSampleLocationsStateCreateInfoEXT(vkStream, rootType, reinterpret_cast<const VkPipelineSampleLocationsStateCreateInfoEXT*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT:
         {
-            marshal_VkPhysicalDeviceSampleLocationsPropertiesEXT(vkStream, reinterpret_cast<const VkPhysicalDeviceSampleLocationsPropertiesEXT*>(structExtension));
+            marshal_VkPhysicalDeviceSampleLocationsPropertiesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceSampleLocationsPropertiesEXT*>(structExtension));
             break;
         }
 #endif
 #ifdef VK_EXT_blend_operation_advanced
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT:
         {
-            marshal_VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT(vkStream, reinterpret_cast<const VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*>(structExtension));
+            marshal_VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_PROPERTIES_EXT:
         {
-            marshal_VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT(vkStream, reinterpret_cast<const VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT*>(structExtension));
+            marshal_VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_ADVANCED_STATE_CREATE_INFO_EXT:
         {
-            marshal_VkPipelineColorBlendAdvancedStateCreateInfoEXT(vkStream, reinterpret_cast<const VkPipelineColorBlendAdvancedStateCreateInfoEXT*>(structExtension));
+            marshal_VkPipelineColorBlendAdvancedStateCreateInfoEXT(vkStream, rootType, reinterpret_cast<const VkPipelineColorBlendAdvancedStateCreateInfoEXT*>(structExtension));
             break;
         }
 #endif
 #ifdef VK_NV_fragment_coverage_to_color
         case VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_TO_COLOR_STATE_CREATE_INFO_NV:
         {
-            marshal_VkPipelineCoverageToColorStateCreateInfoNV(vkStream, reinterpret_cast<const VkPipelineCoverageToColorStateCreateInfoNV*>(structExtension));
+            marshal_VkPipelineCoverageToColorStateCreateInfoNV(vkStream, rootType, reinterpret_cast<const VkPipelineCoverageToColorStateCreateInfoNV*>(structExtension));
             break;
         }
 #endif
 #ifdef VK_NV_framebuffer_mixed_samples
         case VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_MODULATION_STATE_CREATE_INFO_NV:
         {
-            marshal_VkPipelineCoverageModulationStateCreateInfoNV(vkStream, reinterpret_cast<const VkPipelineCoverageModulationStateCreateInfoNV*>(structExtension));
+            marshal_VkPipelineCoverageModulationStateCreateInfoNV(vkStream, rootType, reinterpret_cast<const VkPipelineCoverageModulationStateCreateInfoNV*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_NV_shader_sm_builtins
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_PROPERTIES_NV:
+        {
+            marshal_VkPhysicalDeviceShaderSMBuiltinsPropertiesNV(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceShaderSMBuiltinsPropertiesNV*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_FEATURES_NV:
+        {
+            marshal_VkPhysicalDeviceShaderSMBuiltinsFeaturesNV(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceShaderSMBuiltinsFeaturesNV*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_EXT_image_drm_format_modifier
+        case VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT:
+        {
+            marshal_VkDrmFormatModifierPropertiesListEXT(vkStream, rootType, reinterpret_cast<const VkDrmFormatModifierPropertiesListEXT*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT:
+        {
+            marshal_VkPhysicalDeviceImageDrmFormatModifierInfoEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceImageDrmFormatModifierInfoEXT*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT:
+        {
+            marshal_VkImageDrmFormatModifierListCreateInfoEXT(vkStream, rootType, reinterpret_cast<const VkImageDrmFormatModifierListCreateInfoEXT*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT:
+        {
+            marshal_VkImageDrmFormatModifierExplicitCreateInfoEXT(vkStream, rootType, reinterpret_cast<const VkImageDrmFormatModifierExplicitCreateInfoEXT*>(structExtension));
             break;
         }
 #endif
 #ifdef VK_EXT_validation_cache
         case VK_STRUCTURE_TYPE_SHADER_MODULE_VALIDATION_CACHE_CREATE_INFO_EXT:
         {
-            marshal_VkShaderModuleValidationCacheCreateInfoEXT(vkStream, reinterpret_cast<const VkShaderModuleValidationCacheCreateInfoEXT*>(structExtension));
+            marshal_VkShaderModuleValidationCacheCreateInfoEXT(vkStream, rootType, reinterpret_cast<const VkShaderModuleValidationCacheCreateInfoEXT*>(structExtension));
             break;
         }
 #endif
-#ifdef VK_EXT_descriptor_indexing
-        case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT:
+#ifdef VK_NV_shading_rate_image
+        case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SHADING_RATE_IMAGE_STATE_CREATE_INFO_NV:
         {
-            marshal_VkDescriptorSetLayoutBindingFlagsCreateInfoEXT(vkStream, reinterpret_cast<const VkDescriptorSetLayoutBindingFlagsCreateInfoEXT*>(structExtension));
+            marshal_VkPipelineViewportShadingRateImageStateCreateInfoNV(vkStream, rootType, reinterpret_cast<const VkPipelineViewportShadingRateImageStateCreateInfoNV*>(structExtension));
             break;
         }
-        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT:
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_FEATURES_NV:
         {
-            marshal_VkPhysicalDeviceDescriptorIndexingFeaturesEXT(vkStream, reinterpret_cast<const VkPhysicalDeviceDescriptorIndexingFeaturesEXT*>(structExtension));
+            marshal_VkPhysicalDeviceShadingRateImageFeaturesNV(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceShadingRateImageFeaturesNV*>(structExtension));
             break;
         }
-        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES_EXT:
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_PROPERTIES_NV:
         {
-            marshal_VkPhysicalDeviceDescriptorIndexingPropertiesEXT(vkStream, reinterpret_cast<const VkPhysicalDeviceDescriptorIndexingPropertiesEXT*>(structExtension));
+            marshal_VkPhysicalDeviceShadingRateImagePropertiesNV(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceShadingRateImagePropertiesNV*>(structExtension));
             break;
         }
-        case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO_EXT:
+        case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_COARSE_SAMPLE_ORDER_STATE_CREATE_INFO_NV:
         {
-            marshal_VkDescriptorSetVariableDescriptorCountAllocateInfoEXT(vkStream, reinterpret_cast<const VkDescriptorSetVariableDescriptorCountAllocateInfoEXT*>(structExtension));
+            marshal_VkPipelineViewportCoarseSampleOrderStateCreateInfoNV(vkStream, rootType, reinterpret_cast<const VkPipelineViewportCoarseSampleOrderStateCreateInfoNV*>(structExtension));
             break;
         }
-        case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT_EXT:
+#endif
+#ifdef VK_NV_ray_tracing
+        case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_NV:
         {
-            marshal_VkDescriptorSetVariableDescriptorCountLayoutSupportEXT(vkStream, reinterpret_cast<const VkDescriptorSetVariableDescriptorCountLayoutSupportEXT*>(structExtension));
+            marshal_VkWriteDescriptorSetAccelerationStructureNV(vkStream, rootType, reinterpret_cast<const VkWriteDescriptorSetAccelerationStructureNV*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_NV:
+        {
+            marshal_VkPhysicalDeviceRayTracingPropertiesNV(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceRayTracingPropertiesNV*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_NV_representative_fragment_test
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_REPRESENTATIVE_FRAGMENT_TEST_FEATURES_NV:
+        {
+            marshal_VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_REPRESENTATIVE_FRAGMENT_TEST_STATE_CREATE_INFO_NV:
+        {
+            marshal_VkPipelineRepresentativeFragmentTestStateCreateInfoNV(vkStream, rootType, reinterpret_cast<const VkPipelineRepresentativeFragmentTestStateCreateInfoNV*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_EXT_filter_cubic
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_VIEW_IMAGE_FORMAT_INFO_EXT:
+        {
+            marshal_VkPhysicalDeviceImageViewImageFormatInfoEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceImageViewImageFormatInfoEXT*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_FILTER_CUBIC_IMAGE_VIEW_IMAGE_FORMAT_PROPERTIES_EXT:
+        {
+            marshal_VkFilterCubicImageViewImageFormatPropertiesEXT(vkStream, rootType, reinterpret_cast<const VkFilterCubicImageViewImageFormatPropertiesEXT*>(structExtension));
             break;
         }
 #endif
 #ifdef VK_EXT_global_priority
         case VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT:
         {
-            marshal_VkDeviceQueueGlobalPriorityCreateInfoEXT(vkStream, reinterpret_cast<const VkDeviceQueueGlobalPriorityCreateInfoEXT*>(structExtension));
+            marshal_VkDeviceQueueGlobalPriorityCreateInfoEXT(vkStream, rootType, reinterpret_cast<const VkDeviceQueueGlobalPriorityCreateInfoEXT*>(structExtension));
             break;
         }
 #endif
 #ifdef VK_EXT_external_memory_host
         case VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT:
         {
-            marshal_VkImportMemoryHostPointerInfoEXT(vkStream, reinterpret_cast<const VkImportMemoryHostPointerInfoEXT*>(structExtension));
+            marshal_VkImportMemoryHostPointerInfoEXT(vkStream, rootType, reinterpret_cast<const VkImportMemoryHostPointerInfoEXT*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT:
         {
-            marshal_VkPhysicalDeviceExternalMemoryHostPropertiesEXT(vkStream, reinterpret_cast<const VkPhysicalDeviceExternalMemoryHostPropertiesEXT*>(structExtension));
+            marshal_VkPhysicalDeviceExternalMemoryHostPropertiesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceExternalMemoryHostPropertiesEXT*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_AMD_pipeline_compiler_control
+        case VK_STRUCTURE_TYPE_PIPELINE_COMPILER_CONTROL_CREATE_INFO_AMD:
+        {
+            marshal_VkPipelineCompilerControlCreateInfoAMD(vkStream, rootType, reinterpret_cast<const VkPipelineCompilerControlCreateInfoAMD*>(structExtension));
             break;
         }
 #endif
 #ifdef VK_AMD_shader_core_properties
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_AMD:
         {
-            marshal_VkPhysicalDeviceShaderCorePropertiesAMD(vkStream, reinterpret_cast<const VkPhysicalDeviceShaderCorePropertiesAMD*>(structExtension));
+            marshal_VkPhysicalDeviceShaderCorePropertiesAMD(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceShaderCorePropertiesAMD*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_AMD_memory_overallocation_behavior
+        case VK_STRUCTURE_TYPE_DEVICE_MEMORY_OVERALLOCATION_CREATE_INFO_AMD:
+        {
+            marshal_VkDeviceMemoryOverallocationCreateInfoAMD(vkStream, rootType, reinterpret_cast<const VkDeviceMemoryOverallocationCreateInfoAMD*>(structExtension));
             break;
         }
 #endif
 #ifdef VK_EXT_vertex_attribute_divisor
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT:
         {
-            marshal_VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT(vkStream, reinterpret_cast<const VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT*>(structExtension));
+            marshal_VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT:
         {
-            marshal_VkPipelineVertexInputDivisorStateCreateInfoEXT(vkStream, reinterpret_cast<const VkPipelineVertexInputDivisorStateCreateInfoEXT*>(structExtension));
+            marshal_VkPipelineVertexInputDivisorStateCreateInfoEXT(vkStream, rootType, reinterpret_cast<const VkPipelineVertexInputDivisorStateCreateInfoEXT*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT:
+        {
+            marshal_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_GGP_frame_token
+        case VK_STRUCTURE_TYPE_PRESENT_FRAME_TOKEN_GGP:
+        {
+            marshal_VkPresentFrameTokenGGP(vkStream, rootType, reinterpret_cast<const VkPresentFrameTokenGGP*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_EXT_pipeline_creation_feedback
+        case VK_STRUCTURE_TYPE_PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT:
+        {
+            marshal_VkPipelineCreationFeedbackCreateInfoEXT(vkStream, rootType, reinterpret_cast<const VkPipelineCreationFeedbackCreateInfoEXT*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_NV_compute_shader_derivatives
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_FEATURES_NV:
+        {
+            marshal_VkPhysicalDeviceComputeShaderDerivativesFeaturesNV(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceComputeShaderDerivativesFeaturesNV*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_NV_mesh_shader
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_FEATURES_NV:
+        {
+            marshal_VkPhysicalDeviceMeshShaderFeaturesNV(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceMeshShaderFeaturesNV*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_PROPERTIES_NV:
+        {
+            marshal_VkPhysicalDeviceMeshShaderPropertiesNV(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceMeshShaderPropertiesNV*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_NV_fragment_shader_barycentric
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_NV:
+        {
+            marshal_VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_NV_shader_image_footprint
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_FOOTPRINT_FEATURES_NV:
+        {
+            marshal_VkPhysicalDeviceShaderImageFootprintFeaturesNV(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceShaderImageFootprintFeaturesNV*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_NV_scissor_exclusive
+        case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_EXCLUSIVE_SCISSOR_STATE_CREATE_INFO_NV:
+        {
+            marshal_VkPipelineViewportExclusiveScissorStateCreateInfoNV(vkStream, rootType, reinterpret_cast<const VkPipelineViewportExclusiveScissorStateCreateInfoNV*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXCLUSIVE_SCISSOR_FEATURES_NV:
+        {
+            marshal_VkPhysicalDeviceExclusiveScissorFeaturesNV(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceExclusiveScissorFeaturesNV*>(structExtension));
             break;
         }
 #endif
 #ifdef VK_NV_device_diagnostic_checkpoints
         case VK_STRUCTURE_TYPE_QUEUE_FAMILY_CHECKPOINT_PROPERTIES_NV:
         {
-            marshal_VkQueueFamilyCheckpointPropertiesNV(vkStream, reinterpret_cast<const VkQueueFamilyCheckpointPropertiesNV*>(structExtension));
+            marshal_VkQueueFamilyCheckpointPropertiesNV(vkStream, rootType, reinterpret_cast<const VkQueueFamilyCheckpointPropertiesNV*>(structExtension));
             break;
         }
 #endif
-#ifdef VK_GOOGLE_color_buffer
+#ifdef VK_INTEL_shader_integer_functions2
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_FUNCTIONS_2_FEATURES_INTEL:
+        {
+            marshal_VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_INTEL_performance_query
+        case VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_QUERY_CREATE_INFO_INTEL:
+        {
+            marshal_VkQueryPoolPerformanceQueryCreateInfoINTEL(vkStream, rootType, reinterpret_cast<const VkQueryPoolPerformanceQueryCreateInfoINTEL*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_EXT_pci_bus_info
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT:
+        {
+            marshal_VkPhysicalDevicePCIBusInfoPropertiesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDevicePCIBusInfoPropertiesEXT*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_AMD_display_native_hdr
+        case VK_STRUCTURE_TYPE_DISPLAY_NATIVE_HDR_SURFACE_CAPABILITIES_AMD:
+        {
+            marshal_VkDisplayNativeHdrSurfaceCapabilitiesAMD(vkStream, rootType, reinterpret_cast<const VkDisplayNativeHdrSurfaceCapabilitiesAMD*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_SWAPCHAIN_DISPLAY_NATIVE_HDR_CREATE_INFO_AMD:
+        {
+            marshal_VkSwapchainDisplayNativeHdrCreateInfoAMD(vkStream, rootType, reinterpret_cast<const VkSwapchainDisplayNativeHdrCreateInfoAMD*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_EXT_fragment_density_map
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_FEATURES_EXT:
+        {
+            switch(rootType)
+            {
+                case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2:
+                {
+                    marshal_VkPhysicalDeviceFragmentDensityMapFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceFragmentDensityMapFeaturesEXT*>(structExtension));
+                    break;
+                }
+                case VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO:
+                {
+                    marshal_VkPhysicalDeviceFragmentDensityMapFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceFragmentDensityMapFeaturesEXT*>(structExtension));
+                    break;
+                }
+                case VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO:
+                {
+                    marshal_VkImportColorBufferGOOGLE(vkStream, rootType, reinterpret_cast<const VkImportColorBufferGOOGLE*>(structExtension));
+                    break;
+                }
+                default:
+                {
+                    marshal_VkPhysicalDeviceFragmentDensityMapFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceFragmentDensityMapFeaturesEXT*>(structExtension));
+                    break;
+                }
+            }
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_PROPERTIES_EXT:
+        {
+            switch(rootType)
+            {
+                case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2:
+                {
+                    marshal_VkPhysicalDeviceFragmentDensityMapPropertiesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceFragmentDensityMapPropertiesEXT*>(structExtension));
+                    break;
+                }
+                case VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO:
+                {
+                    marshal_VkImportPhysicalAddressGOOGLE(vkStream, rootType, reinterpret_cast<const VkImportPhysicalAddressGOOGLE*>(structExtension));
+                    break;
+                }
+                default:
+                {
+                    marshal_VkPhysicalDeviceFragmentDensityMapPropertiesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceFragmentDensityMapPropertiesEXT*>(structExtension));
+                    break;
+                }
+            }
+            break;
+        }
+        case VK_STRUCTURE_TYPE_RENDER_PASS_FRAGMENT_DENSITY_MAP_CREATE_INFO_EXT:
+        {
+            switch(rootType)
+            {
+                case VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO:
+                {
+                    marshal_VkRenderPassFragmentDensityMapCreateInfoEXT(vkStream, rootType, reinterpret_cast<const VkRenderPassFragmentDensityMapCreateInfoEXT*>(structExtension));
+                    break;
+                }
+                case VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2:
+                {
+                    marshal_VkRenderPassFragmentDensityMapCreateInfoEXT(vkStream, rootType, reinterpret_cast<const VkRenderPassFragmentDensityMapCreateInfoEXT*>(structExtension));
+                    break;
+                }
+                case VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO:
+                {
+                    marshal_VkImportBufferGOOGLE(vkStream, rootType, reinterpret_cast<const VkImportBufferGOOGLE*>(structExtension));
+                    break;
+                }
+                default:
+                {
+                    marshal_VkRenderPassFragmentDensityMapCreateInfoEXT(vkStream, rootType, reinterpret_cast<const VkRenderPassFragmentDensityMapCreateInfoEXT*>(structExtension));
+                    break;
+                }
+            }
+            break;
+        }
+#endif
+#ifdef VK_EXT_subgroup_size_control
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT:
+        {
+            marshal_VkPhysicalDeviceSubgroupSizeControlFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceSubgroupSizeControlFeaturesEXT*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT:
+        {
+            marshal_VkPhysicalDeviceSubgroupSizeControlPropertiesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT:
+        {
+            marshal_VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT(vkStream, rootType, reinterpret_cast<const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_AMD_shader_core_properties2
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_2_AMD:
+        {
+            marshal_VkPhysicalDeviceShaderCoreProperties2AMD(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceShaderCoreProperties2AMD*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_AMD_device_coherent_memory
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COHERENT_MEMORY_FEATURES_AMD:
+        {
+            marshal_VkPhysicalDeviceCoherentMemoryFeaturesAMD(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceCoherentMemoryFeaturesAMD*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_EXT_shader_image_atomic_int64
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_ATOMIC_INT64_FEATURES_EXT:
+        {
+            marshal_VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_EXT_memory_budget
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT:
+        {
+            marshal_VkPhysicalDeviceMemoryBudgetPropertiesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceMemoryBudgetPropertiesEXT*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_EXT_memory_priority
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PRIORITY_FEATURES_EXT:
+        {
+            marshal_VkPhysicalDeviceMemoryPriorityFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceMemoryPriorityFeaturesEXT*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT:
+        {
+            marshal_VkMemoryPriorityAllocateInfoEXT(vkStream, rootType, reinterpret_cast<const VkMemoryPriorityAllocateInfoEXT*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_NV_dedicated_allocation_image_aliasing
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEDICATED_ALLOCATION_IMAGE_ALIASING_FEATURES_NV:
+        {
+            marshal_VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_EXT_buffer_device_address
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_EXT:
+        {
+            marshal_VkPhysicalDeviceBufferDeviceAddressFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceBufferDeviceAddressFeaturesEXT*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_CREATE_INFO_EXT:
+        {
+            marshal_VkBufferDeviceAddressCreateInfoEXT(vkStream, rootType, reinterpret_cast<const VkBufferDeviceAddressCreateInfoEXT*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_EXT_validation_features
+        case VK_STRUCTURE_TYPE_VALIDATION_FEATURES_EXT:
+        {
+            marshal_VkValidationFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkValidationFeaturesEXT*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_NV_cooperative_matrix
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_FEATURES_NV:
+        {
+            marshal_VkPhysicalDeviceCooperativeMatrixFeaturesNV(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceCooperativeMatrixFeaturesNV*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_PROPERTIES_NV:
+        {
+            marshal_VkPhysicalDeviceCooperativeMatrixPropertiesNV(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceCooperativeMatrixPropertiesNV*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_NV_coverage_reduction_mode
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COVERAGE_REDUCTION_MODE_FEATURES_NV:
+        {
+            marshal_VkPhysicalDeviceCoverageReductionModeFeaturesNV(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceCoverageReductionModeFeaturesNV*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_REDUCTION_STATE_CREATE_INFO_NV:
+        {
+            marshal_VkPipelineCoverageReductionStateCreateInfoNV(vkStream, rootType, reinterpret_cast<const VkPipelineCoverageReductionStateCreateInfoNV*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_EXT_fragment_shader_interlock
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_INTERLOCK_FEATURES_EXT:
+        {
+            marshal_VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_EXT_ycbcr_image_arrays
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_IMAGE_ARRAYS_FEATURES_EXT:
+        {
+            marshal_VkPhysicalDeviceYcbcrImageArraysFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceYcbcrImageArraysFeaturesEXT*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_EXT_full_screen_exclusive
+        case VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_INFO_EXT:
+        {
+            marshal_VkSurfaceFullScreenExclusiveInfoEXT(vkStream, rootType, reinterpret_cast<const VkSurfaceFullScreenExclusiveInfoEXT*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_FULL_SCREEN_EXCLUSIVE_EXT:
+        {
+            marshal_VkSurfaceCapabilitiesFullScreenExclusiveEXT(vkStream, rootType, reinterpret_cast<const VkSurfaceCapabilitiesFullScreenExclusiveEXT*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_WIN32_INFO_EXT:
+        {
+            marshal_VkSurfaceFullScreenExclusiveWin32InfoEXT(vkStream, rootType, reinterpret_cast<const VkSurfaceFullScreenExclusiveWin32InfoEXT*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_EXT_line_rasterization
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT:
+        {
+            marshal_VkPhysicalDeviceLineRasterizationFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceLineRasterizationFeaturesEXT*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_EXT:
+        {
+            marshal_VkPhysicalDeviceLineRasterizationPropertiesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceLineRasterizationPropertiesEXT*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT:
+        {
+            marshal_VkPipelineRasterizationLineStateCreateInfoEXT(vkStream, rootType, reinterpret_cast<const VkPipelineRasterizationLineStateCreateInfoEXT*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_EXT_shader_atomic_float
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_FLOAT_FEATURES_EXT:
+        {
+            marshal_VkPhysicalDeviceShaderAtomicFloatFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceShaderAtomicFloatFeaturesEXT*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_EXT_index_type_uint8
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT:
+        {
+            marshal_VkPhysicalDeviceIndexTypeUint8FeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceIndexTypeUint8FeaturesEXT*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_EXT_extended_dynamic_state
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_FEATURES_EXT:
+        {
+            marshal_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceExtendedDynamicStateFeaturesEXT*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_EXT_shader_demote_to_helper_invocation
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES_EXT:
+        {
+            marshal_VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_NV_device_generated_commands
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_PROPERTIES_NV:
+        {
+            marshal_VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_FEATURES_NV:
+        {
+            marshal_VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_SHADER_GROUPS_CREATE_INFO_NV:
+        {
+            marshal_VkGraphicsPipelineShaderGroupsCreateInfoNV(vkStream, rootType, reinterpret_cast<const VkGraphicsPipelineShaderGroupsCreateInfoNV*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_EXT_texel_buffer_alignment
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_FEATURES_EXT:
+        {
+            marshal_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES_EXT:
+        {
+            marshal_VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_QCOM_render_pass_transform
+        case VK_STRUCTURE_TYPE_RENDER_PASS_TRANSFORM_BEGIN_INFO_QCOM:
+        {
+            marshal_VkRenderPassTransformBeginInfoQCOM(vkStream, rootType, reinterpret_cast<const VkRenderPassTransformBeginInfoQCOM*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_RENDER_PASS_TRANSFORM_INFO_QCOM:
+        {
+            marshal_VkCommandBufferInheritanceRenderPassTransformInfoQCOM(vkStream, rootType, reinterpret_cast<const VkCommandBufferInheritanceRenderPassTransformInfoQCOM*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_EXT_device_memory_report
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_MEMORY_REPORT_FEATURES_EXT:
+        {
+            marshal_VkPhysicalDeviceDeviceMemoryReportFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceDeviceMemoryReportFeaturesEXT*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DEVICE_DEVICE_MEMORY_REPORT_CREATE_INFO_EXT:
+        {
+            marshal_VkDeviceDeviceMemoryReportCreateInfoEXT(vkStream, rootType, reinterpret_cast<const VkDeviceDeviceMemoryReportCreateInfoEXT*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_EXT_robustness2
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT:
+        {
+            marshal_VkPhysicalDeviceRobustness2FeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceRobustness2FeaturesEXT*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_PROPERTIES_EXT:
+        {
+            marshal_VkPhysicalDeviceRobustness2PropertiesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceRobustness2PropertiesEXT*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_EXT_custom_border_color
+        case VK_STRUCTURE_TYPE_SAMPLER_CUSTOM_BORDER_COLOR_CREATE_INFO_EXT:
+        {
+            marshal_VkSamplerCustomBorderColorCreateInfoEXT(vkStream, rootType, reinterpret_cast<const VkSamplerCustomBorderColorCreateInfoEXT*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_PROPERTIES_EXT:
+        {
+            marshal_VkPhysicalDeviceCustomBorderColorPropertiesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceCustomBorderColorPropertiesEXT*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_FEATURES_EXT:
+        {
+            marshal_VkPhysicalDeviceCustomBorderColorFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceCustomBorderColorFeaturesEXT*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_EXT_private_data
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES_EXT:
+        {
+            marshal_VkPhysicalDevicePrivateDataFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDevicePrivateDataFeaturesEXT*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DEVICE_PRIVATE_DATA_CREATE_INFO_EXT:
+        {
+            marshal_VkDevicePrivateDataCreateInfoEXT(vkStream, rootType, reinterpret_cast<const VkDevicePrivateDataCreateInfoEXT*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_EXT_pipeline_creation_cache_control
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES_EXT:
+        {
+            marshal_VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_NV_device_diagnostics_config
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DIAGNOSTICS_CONFIG_FEATURES_NV:
+        {
+            marshal_VkPhysicalDeviceDiagnosticsConfigFeaturesNV(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceDiagnosticsConfigFeaturesNV*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DEVICE_DIAGNOSTICS_CONFIG_CREATE_INFO_NV:
+        {
+            marshal_VkDeviceDiagnosticsConfigCreateInfoNV(vkStream, rootType, reinterpret_cast<const VkDeviceDiagnosticsConfigCreateInfoNV*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_NV_fragment_shading_rate_enums
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_FEATURES_NV:
+        {
+            marshal_VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_PROPERTIES_NV:
+        {
+            marshal_VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_ENUM_STATE_CREATE_INFO_NV:
+        {
+            marshal_VkPipelineFragmentShadingRateEnumStateCreateInfoNV(vkStream, rootType, reinterpret_cast<const VkPipelineFragmentShadingRateEnumStateCreateInfoNV*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_EXT_fragment_density_map2
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_2_FEATURES_EXT:
+        {
+            marshal_VkPhysicalDeviceFragmentDensityMap2FeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceFragmentDensityMap2FeaturesEXT*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_2_PROPERTIES_EXT:
+        {
+            marshal_VkPhysicalDeviceFragmentDensityMap2PropertiesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceFragmentDensityMap2PropertiesEXT*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_QCOM_rotated_copy_commands
+        case VK_STRUCTURE_TYPE_COPY_COMMAND_TRANSFORM_INFO_QCOM:
+        {
+            marshal_VkCopyCommandTransformInfoQCOM(vkStream, rootType, reinterpret_cast<const VkCopyCommandTransformInfoQCOM*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_EXT_image_robustness
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES_EXT:
+        {
+            marshal_VkPhysicalDeviceImageRobustnessFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceImageRobustnessFeaturesEXT*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_EXT_4444_formats
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT:
+        {
+            marshal_VkPhysicalDevice4444FormatsFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDevice4444FormatsFeaturesEXT*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_GOOGLE_gfxstream
         case VK_STRUCTURE_TYPE_IMPORT_COLOR_BUFFER_GOOGLE:
         {
-            marshal_VkImportColorBufferGOOGLE(vkStream, reinterpret_cast<const VkImportColorBufferGOOGLE*>(structExtension));
+            marshal_VkImportColorBufferGOOGLE(vkStream, rootType, reinterpret_cast<const VkImportColorBufferGOOGLE*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_IMPORT_BUFFER_GOOGLE:
+        {
+            marshal_VkImportBufferGOOGLE(vkStream, rootType, reinterpret_cast<const VkImportBufferGOOGLE*>(structExtension));
             break;
         }
         case VK_STRUCTURE_TYPE_IMPORT_PHYSICAL_ADDRESS_GOOGLE:
         {
-            marshal_VkImportPhysicalAddressGOOGLE(vkStream, reinterpret_cast<const VkImportPhysicalAddressGOOGLE*>(structExtension));
+            marshal_VkImportPhysicalAddressGOOGLE(vkStream, rootType, reinterpret_cast<const VkImportPhysicalAddressGOOGLE*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_KHR_acceleration_structure
+        case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR:
+        {
+            marshal_VkWriteDescriptorSetAccelerationStructureKHR(vkStream, rootType, reinterpret_cast<const VkWriteDescriptorSetAccelerationStructureKHR*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_FEATURES_KHR:
+        {
+            marshal_VkPhysicalDeviceAccelerationStructureFeaturesKHR(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceAccelerationStructureFeaturesKHR*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_PROPERTIES_KHR:
+        {
+            marshal_VkPhysicalDeviceAccelerationStructurePropertiesKHR(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceAccelerationStructurePropertiesKHR*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_KHR_ray_tracing_pipeline
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_FEATURES_KHR:
+        {
+            marshal_VkPhysicalDeviceRayTracingPipelineFeaturesKHR(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceRayTracingPipelineFeaturesKHR*>(structExtension));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_PROPERTIES_KHR:
+        {
+            marshal_VkPhysicalDeviceRayTracingPipelinePropertiesKHR(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceRayTracingPipelinePropertiesKHR*>(structExtension));
+            break;
+        }
+#endif
+#ifdef VK_KHR_ray_query
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_QUERY_FEATURES_KHR:
+        {
+            marshal_VkPhysicalDeviceRayQueryFeaturesKHR(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceRayQueryFeaturesKHR*>(structExtension));
             break;
         }
 #endif
         default:
         {
-            return;
+            // fatal; the switch is only taken if the extension struct is known
+            abort();
         }
     }
 }
 
 void unmarshal_extension_struct(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     void* structExtension_out)
 {
+    VkInstanceCreateInfo* structAccess = (VkInstanceCreateInfo*)(structExtension_out);
+    size_t currExtSize = goldfish_vk_extension_struct_size_with_stream_features(vkStream->getFeatureBits(), rootType, structExtension_out);
+    if (!currExtSize && structExtension_out)
+    {
+        // unknown struct extension; skip and call on its pNext field
+        unmarshal_extension_struct(vkStream, rootType, (void*)structAccess->pNext);
+        return;
+    }
+    else
+    {
+        // known or null extension struct
+        vkStream->getBe32();
+        if (!currExtSize)
+        {
+            // exit if this was a null extension struct (size == 0 in this branch)
+            return;
+        }
+    }
+    uint64_t pNext_placeholder;
+    vkStream->read((void*)(&pNext_placeholder), sizeof(VkStructureType));
+    (void)pNext_placeholder;
     if (!structExtension_out)
     {
         return;
@@ -14294,651 +24866,1620 @@
 #ifdef VK_VERSION_1_1
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES:
         {
-            unmarshal_VkPhysicalDeviceSubgroupProperties(vkStream, reinterpret_cast<VkPhysicalDeviceSubgroupProperties*>(structExtension_out));
+            unmarshal_VkPhysicalDeviceSubgroupProperties(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceSubgroupProperties*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES:
         {
-            unmarshal_VkPhysicalDevice16BitStorageFeatures(vkStream, reinterpret_cast<VkPhysicalDevice16BitStorageFeatures*>(structExtension_out));
+            unmarshal_VkPhysicalDevice16BitStorageFeatures(vkStream, rootType, reinterpret_cast<VkPhysicalDevice16BitStorageFeatures*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS:
         {
-            unmarshal_VkMemoryDedicatedRequirements(vkStream, reinterpret_cast<VkMemoryDedicatedRequirements*>(structExtension_out));
+            unmarshal_VkMemoryDedicatedRequirements(vkStream, rootType, reinterpret_cast<VkMemoryDedicatedRequirements*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO:
         {
-            unmarshal_VkMemoryDedicatedAllocateInfo(vkStream, reinterpret_cast<VkMemoryDedicatedAllocateInfo*>(structExtension_out));
+            unmarshal_VkMemoryDedicatedAllocateInfo(vkStream, rootType, reinterpret_cast<VkMemoryDedicatedAllocateInfo*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO:
         {
-            unmarshal_VkMemoryAllocateFlagsInfo(vkStream, reinterpret_cast<VkMemoryAllocateFlagsInfo*>(structExtension_out));
+            unmarshal_VkMemoryAllocateFlagsInfo(vkStream, rootType, reinterpret_cast<VkMemoryAllocateFlagsInfo*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO:
         {
-            unmarshal_VkDeviceGroupRenderPassBeginInfo(vkStream, reinterpret_cast<VkDeviceGroupRenderPassBeginInfo*>(structExtension_out));
+            unmarshal_VkDeviceGroupRenderPassBeginInfo(vkStream, rootType, reinterpret_cast<VkDeviceGroupRenderPassBeginInfo*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO:
         {
-            unmarshal_VkDeviceGroupCommandBufferBeginInfo(vkStream, reinterpret_cast<VkDeviceGroupCommandBufferBeginInfo*>(structExtension_out));
+            unmarshal_VkDeviceGroupCommandBufferBeginInfo(vkStream, rootType, reinterpret_cast<VkDeviceGroupCommandBufferBeginInfo*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO:
         {
-            unmarshal_VkDeviceGroupSubmitInfo(vkStream, reinterpret_cast<VkDeviceGroupSubmitInfo*>(structExtension_out));
+            unmarshal_VkDeviceGroupSubmitInfo(vkStream, rootType, reinterpret_cast<VkDeviceGroupSubmitInfo*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO:
         {
-            unmarshal_VkDeviceGroupBindSparseInfo(vkStream, reinterpret_cast<VkDeviceGroupBindSparseInfo*>(structExtension_out));
+            unmarshal_VkDeviceGroupBindSparseInfo(vkStream, rootType, reinterpret_cast<VkDeviceGroupBindSparseInfo*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO:
         {
-            unmarshal_VkBindBufferMemoryDeviceGroupInfo(vkStream, reinterpret_cast<VkBindBufferMemoryDeviceGroupInfo*>(structExtension_out));
+            unmarshal_VkBindBufferMemoryDeviceGroupInfo(vkStream, rootType, reinterpret_cast<VkBindBufferMemoryDeviceGroupInfo*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO:
         {
-            unmarshal_VkBindImageMemoryDeviceGroupInfo(vkStream, reinterpret_cast<VkBindImageMemoryDeviceGroupInfo*>(structExtension_out));
+            unmarshal_VkBindImageMemoryDeviceGroupInfo(vkStream, rootType, reinterpret_cast<VkBindImageMemoryDeviceGroupInfo*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO:
         {
-            unmarshal_VkDeviceGroupDeviceCreateInfo(vkStream, reinterpret_cast<VkDeviceGroupDeviceCreateInfo*>(structExtension_out));
+            unmarshal_VkDeviceGroupDeviceCreateInfo(vkStream, rootType, reinterpret_cast<VkDeviceGroupDeviceCreateInfo*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2:
         {
-            unmarshal_VkPhysicalDeviceFeatures2(vkStream, reinterpret_cast<VkPhysicalDeviceFeatures2*>(structExtension_out));
+            unmarshal_VkPhysicalDeviceFeatures2(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceFeatures2*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES:
         {
-            unmarshal_VkPhysicalDevicePointClippingProperties(vkStream, reinterpret_cast<VkPhysicalDevicePointClippingProperties*>(structExtension_out));
+            unmarshal_VkPhysicalDevicePointClippingProperties(vkStream, rootType, reinterpret_cast<VkPhysicalDevicePointClippingProperties*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO:
         {
-            unmarshal_VkRenderPassInputAttachmentAspectCreateInfo(vkStream, reinterpret_cast<VkRenderPassInputAttachmentAspectCreateInfo*>(structExtension_out));
+            unmarshal_VkRenderPassInputAttachmentAspectCreateInfo(vkStream, rootType, reinterpret_cast<VkRenderPassInputAttachmentAspectCreateInfo*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO:
         {
-            unmarshal_VkImageViewUsageCreateInfo(vkStream, reinterpret_cast<VkImageViewUsageCreateInfo*>(structExtension_out));
+            unmarshal_VkImageViewUsageCreateInfo(vkStream, rootType, reinterpret_cast<VkImageViewUsageCreateInfo*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO:
         {
-            unmarshal_VkPipelineTessellationDomainOriginStateCreateInfo(vkStream, reinterpret_cast<VkPipelineTessellationDomainOriginStateCreateInfo*>(structExtension_out));
+            unmarshal_VkPipelineTessellationDomainOriginStateCreateInfo(vkStream, rootType, reinterpret_cast<VkPipelineTessellationDomainOriginStateCreateInfo*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO:
         {
-            unmarshal_VkRenderPassMultiviewCreateInfo(vkStream, reinterpret_cast<VkRenderPassMultiviewCreateInfo*>(structExtension_out));
+            unmarshal_VkRenderPassMultiviewCreateInfo(vkStream, rootType, reinterpret_cast<VkRenderPassMultiviewCreateInfo*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES:
         {
-            unmarshal_VkPhysicalDeviceMultiviewFeatures(vkStream, reinterpret_cast<VkPhysicalDeviceMultiviewFeatures*>(structExtension_out));
+            unmarshal_VkPhysicalDeviceMultiviewFeatures(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceMultiviewFeatures*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES:
         {
-            unmarshal_VkPhysicalDeviceMultiviewProperties(vkStream, reinterpret_cast<VkPhysicalDeviceMultiviewProperties*>(structExtension_out));
+            unmarshal_VkPhysicalDeviceMultiviewProperties(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceMultiviewProperties*>(structExtension_out));
             break;
         }
-        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES:
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES:
         {
-            unmarshal_VkPhysicalDeviceVariablePointerFeatures(vkStream, reinterpret_cast<VkPhysicalDeviceVariablePointerFeatures*>(structExtension_out));
+            unmarshal_VkPhysicalDeviceVariablePointersFeatures(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceVariablePointersFeatures*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES:
         {
-            unmarshal_VkPhysicalDeviceProtectedMemoryFeatures(vkStream, reinterpret_cast<VkPhysicalDeviceProtectedMemoryFeatures*>(structExtension_out));
+            unmarshal_VkPhysicalDeviceProtectedMemoryFeatures(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceProtectedMemoryFeatures*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_PROPERTIES:
         {
-            unmarshal_VkPhysicalDeviceProtectedMemoryProperties(vkStream, reinterpret_cast<VkPhysicalDeviceProtectedMemoryProperties*>(structExtension_out));
+            unmarshal_VkPhysicalDeviceProtectedMemoryProperties(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceProtectedMemoryProperties*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO:
         {
-            unmarshal_VkProtectedSubmitInfo(vkStream, reinterpret_cast<VkProtectedSubmitInfo*>(structExtension_out));
+            unmarshal_VkProtectedSubmitInfo(vkStream, rootType, reinterpret_cast<VkProtectedSubmitInfo*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO:
         {
-            unmarshal_VkSamplerYcbcrConversionInfo(vkStream, reinterpret_cast<VkSamplerYcbcrConversionInfo*>(structExtension_out));
+            unmarshal_VkSamplerYcbcrConversionInfo(vkStream, rootType, reinterpret_cast<VkSamplerYcbcrConversionInfo*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO:
         {
-            unmarshal_VkBindImagePlaneMemoryInfo(vkStream, reinterpret_cast<VkBindImagePlaneMemoryInfo*>(structExtension_out));
+            unmarshal_VkBindImagePlaneMemoryInfo(vkStream, rootType, reinterpret_cast<VkBindImagePlaneMemoryInfo*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO:
         {
-            unmarshal_VkImagePlaneMemoryRequirementsInfo(vkStream, reinterpret_cast<VkImagePlaneMemoryRequirementsInfo*>(structExtension_out));
+            unmarshal_VkImagePlaneMemoryRequirementsInfo(vkStream, rootType, reinterpret_cast<VkImagePlaneMemoryRequirementsInfo*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES:
         {
-            unmarshal_VkPhysicalDeviceSamplerYcbcrConversionFeatures(vkStream, reinterpret_cast<VkPhysicalDeviceSamplerYcbcrConversionFeatures*>(structExtension_out));
+            unmarshal_VkPhysicalDeviceSamplerYcbcrConversionFeatures(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceSamplerYcbcrConversionFeatures*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES:
         {
-            unmarshal_VkSamplerYcbcrConversionImageFormatProperties(vkStream, reinterpret_cast<VkSamplerYcbcrConversionImageFormatProperties*>(structExtension_out));
+            unmarshal_VkSamplerYcbcrConversionImageFormatProperties(vkStream, rootType, reinterpret_cast<VkSamplerYcbcrConversionImageFormatProperties*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO:
         {
-            unmarshal_VkPhysicalDeviceExternalImageFormatInfo(vkStream, reinterpret_cast<VkPhysicalDeviceExternalImageFormatInfo*>(structExtension_out));
+            unmarshal_VkPhysicalDeviceExternalImageFormatInfo(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceExternalImageFormatInfo*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES:
         {
-            unmarshal_VkExternalImageFormatProperties(vkStream, reinterpret_cast<VkExternalImageFormatProperties*>(structExtension_out));
+            unmarshal_VkExternalImageFormatProperties(vkStream, rootType, reinterpret_cast<VkExternalImageFormatProperties*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES:
         {
-            unmarshal_VkPhysicalDeviceIDProperties(vkStream, reinterpret_cast<VkPhysicalDeviceIDProperties*>(structExtension_out));
+            unmarshal_VkPhysicalDeviceIDProperties(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceIDProperties*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO:
         {
-            unmarshal_VkExternalMemoryImageCreateInfo(vkStream, reinterpret_cast<VkExternalMemoryImageCreateInfo*>(structExtension_out));
+            unmarshal_VkExternalMemoryImageCreateInfo(vkStream, rootType, reinterpret_cast<VkExternalMemoryImageCreateInfo*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO:
         {
-            unmarshal_VkExternalMemoryBufferCreateInfo(vkStream, reinterpret_cast<VkExternalMemoryBufferCreateInfo*>(structExtension_out));
+            unmarshal_VkExternalMemoryBufferCreateInfo(vkStream, rootType, reinterpret_cast<VkExternalMemoryBufferCreateInfo*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO:
         {
-            unmarshal_VkExportMemoryAllocateInfo(vkStream, reinterpret_cast<VkExportMemoryAllocateInfo*>(structExtension_out));
+            unmarshal_VkExportMemoryAllocateInfo(vkStream, rootType, reinterpret_cast<VkExportMemoryAllocateInfo*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO:
         {
-            unmarshal_VkExportFenceCreateInfo(vkStream, reinterpret_cast<VkExportFenceCreateInfo*>(structExtension_out));
+            unmarshal_VkExportFenceCreateInfo(vkStream, rootType, reinterpret_cast<VkExportFenceCreateInfo*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO:
         {
-            unmarshal_VkExportSemaphoreCreateInfo(vkStream, reinterpret_cast<VkExportSemaphoreCreateInfo*>(structExtension_out));
+            unmarshal_VkExportSemaphoreCreateInfo(vkStream, rootType, reinterpret_cast<VkExportSemaphoreCreateInfo*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES:
         {
-            unmarshal_VkPhysicalDeviceMaintenance3Properties(vkStream, reinterpret_cast<VkPhysicalDeviceMaintenance3Properties*>(structExtension_out));
+            unmarshal_VkPhysicalDeviceMaintenance3Properties(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceMaintenance3Properties*>(structExtension_out));
             break;
         }
-        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES:
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES:
         {
-            unmarshal_VkPhysicalDeviceShaderDrawParameterFeatures(vkStream, reinterpret_cast<VkPhysicalDeviceShaderDrawParameterFeatures*>(structExtension_out));
+            unmarshal_VkPhysicalDeviceShaderDrawParametersFeatures(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceShaderDrawParametersFeatures*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_VERSION_1_2
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES:
+        {
+            unmarshal_VkPhysicalDeviceVulkan11Features(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceVulkan11Features*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_PROPERTIES:
+        {
+            unmarshal_VkPhysicalDeviceVulkan11Properties(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceVulkan11Properties*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES:
+        {
+            unmarshal_VkPhysicalDeviceVulkan12Features(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceVulkan12Features*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_PROPERTIES:
+        {
+            unmarshal_VkPhysicalDeviceVulkan12Properties(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceVulkan12Properties*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO:
+        {
+            unmarshal_VkImageFormatListCreateInfo(vkStream, rootType, reinterpret_cast<VkImageFormatListCreateInfo*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES:
+        {
+            unmarshal_VkPhysicalDevice8BitStorageFeatures(vkStream, rootType, reinterpret_cast<VkPhysicalDevice8BitStorageFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES:
+        {
+            unmarshal_VkPhysicalDeviceDriverProperties(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceDriverProperties*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES:
+        {
+            unmarshal_VkPhysicalDeviceShaderAtomicInt64Features(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceShaderAtomicInt64Features*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES:
+        {
+            unmarshal_VkPhysicalDeviceShaderFloat16Int8Features(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceShaderFloat16Int8Features*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES:
+        {
+            unmarshal_VkPhysicalDeviceFloatControlsProperties(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceFloatControlsProperties*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO:
+        {
+            unmarshal_VkDescriptorSetLayoutBindingFlagsCreateInfo(vkStream, rootType, reinterpret_cast<VkDescriptorSetLayoutBindingFlagsCreateInfo*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES:
+        {
+            unmarshal_VkPhysicalDeviceDescriptorIndexingFeatures(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceDescriptorIndexingFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES:
+        {
+            unmarshal_VkPhysicalDeviceDescriptorIndexingProperties(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceDescriptorIndexingProperties*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO:
+        {
+            unmarshal_VkDescriptorSetVariableDescriptorCountAllocateInfo(vkStream, rootType, reinterpret_cast<VkDescriptorSetVariableDescriptorCountAllocateInfo*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT:
+        {
+            unmarshal_VkDescriptorSetVariableDescriptorCountLayoutSupport(vkStream, rootType, reinterpret_cast<VkDescriptorSetVariableDescriptorCountLayoutSupport*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE:
+        {
+            unmarshal_VkSubpassDescriptionDepthStencilResolve(vkStream, rootType, reinterpret_cast<VkSubpassDescriptionDepthStencilResolve*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES:
+        {
+            unmarshal_VkPhysicalDeviceDepthStencilResolveProperties(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceDepthStencilResolveProperties*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES:
+        {
+            unmarshal_VkPhysicalDeviceScalarBlockLayoutFeatures(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceScalarBlockLayoutFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_IMAGE_STENCIL_USAGE_CREATE_INFO:
+        {
+            unmarshal_VkImageStencilUsageCreateInfo(vkStream, rootType, reinterpret_cast<VkImageStencilUsageCreateInfo*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO:
+        {
+            unmarshal_VkSamplerReductionModeCreateInfo(vkStream, rootType, reinterpret_cast<VkSamplerReductionModeCreateInfo*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES:
+        {
+            unmarshal_VkPhysicalDeviceSamplerFilterMinmaxProperties(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceSamplerFilterMinmaxProperties*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES:
+        {
+            unmarshal_VkPhysicalDeviceVulkanMemoryModelFeatures(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceVulkanMemoryModelFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES:
+        {
+            unmarshal_VkPhysicalDeviceImagelessFramebufferFeatures(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceImagelessFramebufferFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO:
+        {
+            unmarshal_VkFramebufferAttachmentsCreateInfo(vkStream, rootType, reinterpret_cast<VkFramebufferAttachmentsCreateInfo*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_RENDER_PASS_ATTACHMENT_BEGIN_INFO:
+        {
+            unmarshal_VkRenderPassAttachmentBeginInfo(vkStream, rootType, reinterpret_cast<VkRenderPassAttachmentBeginInfo*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES:
+        {
+            unmarshal_VkPhysicalDeviceUniformBufferStandardLayoutFeatures(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceUniformBufferStandardLayoutFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES:
+        {
+            unmarshal_VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES:
+        {
+            unmarshal_VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_STENCIL_LAYOUT:
+        {
+            unmarshal_VkAttachmentReferenceStencilLayout(vkStream, rootType, reinterpret_cast<VkAttachmentReferenceStencilLayout*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_STENCIL_LAYOUT:
+        {
+            unmarshal_VkAttachmentDescriptionStencilLayout(vkStream, rootType, reinterpret_cast<VkAttachmentDescriptionStencilLayout*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES:
+        {
+            unmarshal_VkPhysicalDeviceHostQueryResetFeatures(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceHostQueryResetFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES:
+        {
+            unmarshal_VkPhysicalDeviceTimelineSemaphoreFeatures(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceTimelineSemaphoreFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES:
+        {
+            unmarshal_VkPhysicalDeviceTimelineSemaphoreProperties(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceTimelineSemaphoreProperties*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO:
+        {
+            unmarshal_VkSemaphoreTypeCreateInfo(vkStream, rootType, reinterpret_cast<VkSemaphoreTypeCreateInfo*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO:
+        {
+            unmarshal_VkTimelineSemaphoreSubmitInfo(vkStream, rootType, reinterpret_cast<VkTimelineSemaphoreSubmitInfo*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES:
+        {
+            unmarshal_VkPhysicalDeviceBufferDeviceAddressFeatures(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceBufferDeviceAddressFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO:
+        {
+            unmarshal_VkBufferOpaqueCaptureAddressCreateInfo(vkStream, rootType, reinterpret_cast<VkBufferOpaqueCaptureAddressCreateInfo*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO:
+        {
+            unmarshal_VkMemoryOpaqueCaptureAddressAllocateInfo(vkStream, rootType, reinterpret_cast<VkMemoryOpaqueCaptureAddressAllocateInfo*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_KHR_swapchain
         case VK_STRUCTURE_TYPE_IMAGE_SWAPCHAIN_CREATE_INFO_KHR:
         {
-            unmarshal_VkImageSwapchainCreateInfoKHR(vkStream, reinterpret_cast<VkImageSwapchainCreateInfoKHR*>(structExtension_out));
+            unmarshal_VkImageSwapchainCreateInfoKHR(vkStream, rootType, reinterpret_cast<VkImageSwapchainCreateInfoKHR*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_SWAPCHAIN_INFO_KHR:
         {
-            unmarshal_VkBindImageMemorySwapchainInfoKHR(vkStream, reinterpret_cast<VkBindImageMemorySwapchainInfoKHR*>(structExtension_out));
+            unmarshal_VkBindImageMemorySwapchainInfoKHR(vkStream, rootType, reinterpret_cast<VkBindImageMemorySwapchainInfoKHR*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_DEVICE_GROUP_PRESENT_INFO_KHR:
         {
-            unmarshal_VkDeviceGroupPresentInfoKHR(vkStream, reinterpret_cast<VkDeviceGroupPresentInfoKHR*>(structExtension_out));
+            unmarshal_VkDeviceGroupPresentInfoKHR(vkStream, rootType, reinterpret_cast<VkDeviceGroupPresentInfoKHR*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_DEVICE_GROUP_SWAPCHAIN_CREATE_INFO_KHR:
         {
-            unmarshal_VkDeviceGroupSwapchainCreateInfoKHR(vkStream, reinterpret_cast<VkDeviceGroupSwapchainCreateInfoKHR*>(structExtension_out));
+            unmarshal_VkDeviceGroupSwapchainCreateInfoKHR(vkStream, rootType, reinterpret_cast<VkDeviceGroupSwapchainCreateInfoKHR*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_KHR_display_swapchain
         case VK_STRUCTURE_TYPE_DISPLAY_PRESENT_INFO_KHR:
         {
-            unmarshal_VkDisplayPresentInfoKHR(vkStream, reinterpret_cast<VkDisplayPresentInfoKHR*>(structExtension_out));
+            unmarshal_VkDisplayPresentInfoKHR(vkStream, rootType, reinterpret_cast<VkDisplayPresentInfoKHR*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_KHR_external_memory_win32
         case VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_KHR:
         {
-            unmarshal_VkImportMemoryWin32HandleInfoKHR(vkStream, reinterpret_cast<VkImportMemoryWin32HandleInfoKHR*>(structExtension_out));
+            unmarshal_VkImportMemoryWin32HandleInfoKHR(vkStream, rootType, reinterpret_cast<VkImportMemoryWin32HandleInfoKHR*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_KHR:
         {
-            unmarshal_VkExportMemoryWin32HandleInfoKHR(vkStream, reinterpret_cast<VkExportMemoryWin32HandleInfoKHR*>(structExtension_out));
+            unmarshal_VkExportMemoryWin32HandleInfoKHR(vkStream, rootType, reinterpret_cast<VkExportMemoryWin32HandleInfoKHR*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_KHR_external_memory_fd
         case VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR:
         {
-            unmarshal_VkImportMemoryFdInfoKHR(vkStream, reinterpret_cast<VkImportMemoryFdInfoKHR*>(structExtension_out));
+            unmarshal_VkImportMemoryFdInfoKHR(vkStream, rootType, reinterpret_cast<VkImportMemoryFdInfoKHR*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_KHR_win32_keyed_mutex
         case VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_KHR:
         {
-            unmarshal_VkWin32KeyedMutexAcquireReleaseInfoKHR(vkStream, reinterpret_cast<VkWin32KeyedMutexAcquireReleaseInfoKHR*>(structExtension_out));
+            unmarshal_VkWin32KeyedMutexAcquireReleaseInfoKHR(vkStream, rootType, reinterpret_cast<VkWin32KeyedMutexAcquireReleaseInfoKHR*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_KHR_external_semaphore_win32
         case VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR:
         {
-            unmarshal_VkExportSemaphoreWin32HandleInfoKHR(vkStream, reinterpret_cast<VkExportSemaphoreWin32HandleInfoKHR*>(structExtension_out));
+            unmarshal_VkExportSemaphoreWin32HandleInfoKHR(vkStream, rootType, reinterpret_cast<VkExportSemaphoreWin32HandleInfoKHR*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_D3D12_FENCE_SUBMIT_INFO_KHR:
         {
-            unmarshal_VkD3D12FenceSubmitInfoKHR(vkStream, reinterpret_cast<VkD3D12FenceSubmitInfoKHR*>(structExtension_out));
+            unmarshal_VkD3D12FenceSubmitInfoKHR(vkStream, rootType, reinterpret_cast<VkD3D12FenceSubmitInfoKHR*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_KHR_push_descriptor
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR:
         {
-            unmarshal_VkPhysicalDevicePushDescriptorPropertiesKHR(vkStream, reinterpret_cast<VkPhysicalDevicePushDescriptorPropertiesKHR*>(structExtension_out));
+            unmarshal_VkPhysicalDevicePushDescriptorPropertiesKHR(vkStream, rootType, reinterpret_cast<VkPhysicalDevicePushDescriptorPropertiesKHR*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_KHR_incremental_present
         case VK_STRUCTURE_TYPE_PRESENT_REGIONS_KHR:
         {
-            unmarshal_VkPresentRegionsKHR(vkStream, reinterpret_cast<VkPresentRegionsKHR*>(structExtension_out));
+            unmarshal_VkPresentRegionsKHR(vkStream, rootType, reinterpret_cast<VkPresentRegionsKHR*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_KHR_shared_presentable_image
         case VK_STRUCTURE_TYPE_SHARED_PRESENT_SURFACE_CAPABILITIES_KHR:
         {
-            unmarshal_VkSharedPresentSurfaceCapabilitiesKHR(vkStream, reinterpret_cast<VkSharedPresentSurfaceCapabilitiesKHR*>(structExtension_out));
+            unmarshal_VkSharedPresentSurfaceCapabilitiesKHR(vkStream, rootType, reinterpret_cast<VkSharedPresentSurfaceCapabilitiesKHR*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_KHR_external_fence_win32
         case VK_STRUCTURE_TYPE_EXPORT_FENCE_WIN32_HANDLE_INFO_KHR:
         {
-            unmarshal_VkExportFenceWin32HandleInfoKHR(vkStream, reinterpret_cast<VkExportFenceWin32HandleInfoKHR*>(structExtension_out));
+            unmarshal_VkExportFenceWin32HandleInfoKHR(vkStream, rootType, reinterpret_cast<VkExportFenceWin32HandleInfoKHR*>(structExtension_out));
             break;
         }
 #endif
-#ifdef VK_KHR_image_format_list
-        case VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO_KHR:
+#ifdef VK_KHR_performance_query
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_FEATURES_KHR:
         {
-            unmarshal_VkImageFormatListCreateInfoKHR(vkStream, reinterpret_cast<VkImageFormatListCreateInfoKHR*>(structExtension_out));
+            unmarshal_VkPhysicalDevicePerformanceQueryFeaturesKHR(vkStream, rootType, reinterpret_cast<VkPhysicalDevicePerformanceQueryFeaturesKHR*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_PROPERTIES_KHR:
+        {
+            unmarshal_VkPhysicalDevicePerformanceQueryPropertiesKHR(vkStream, rootType, reinterpret_cast<VkPhysicalDevicePerformanceQueryPropertiesKHR*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_CREATE_INFO_KHR:
+        {
+            unmarshal_VkQueryPoolPerformanceCreateInfoKHR(vkStream, rootType, reinterpret_cast<VkQueryPoolPerformanceCreateInfoKHR*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PERFORMANCE_QUERY_SUBMIT_INFO_KHR:
+        {
+            unmarshal_VkPerformanceQuerySubmitInfoKHR(vkStream, rootType, reinterpret_cast<VkPerformanceQuerySubmitInfoKHR*>(structExtension_out));
             break;
         }
 #endif
-#ifdef VK_KHR_8bit_storage
-        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR:
+#ifdef VK_KHR_portability_subset
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PORTABILITY_SUBSET_FEATURES_KHR:
         {
-            unmarshal_VkPhysicalDevice8BitStorageFeaturesKHR(vkStream, reinterpret_cast<VkPhysicalDevice8BitStorageFeaturesKHR*>(structExtension_out));
+            unmarshal_VkPhysicalDevicePortabilitySubsetFeaturesKHR(vkStream, rootType, reinterpret_cast<VkPhysicalDevicePortabilitySubsetFeaturesKHR*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PORTABILITY_SUBSET_PROPERTIES_KHR:
+        {
+            unmarshal_VkPhysicalDevicePortabilitySubsetPropertiesKHR(vkStream, rootType, reinterpret_cast<VkPhysicalDevicePortabilitySubsetPropertiesKHR*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_KHR_shader_clock
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CLOCK_FEATURES_KHR:
+        {
+            unmarshal_VkPhysicalDeviceShaderClockFeaturesKHR(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceShaderClockFeaturesKHR*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_KHR_shader_terminate_invocation
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES_KHR:
+        {
+            unmarshal_VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_KHR_fragment_shading_rate
+        case VK_STRUCTURE_TYPE_FRAGMENT_SHADING_RATE_ATTACHMENT_INFO_KHR:
+        {
+            unmarshal_VkFragmentShadingRateAttachmentInfoKHR(vkStream, rootType, reinterpret_cast<VkFragmentShadingRateAttachmentInfoKHR*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_STATE_CREATE_INFO_KHR:
+        {
+            unmarshal_VkPipelineFragmentShadingRateStateCreateInfoKHR(vkStream, rootType, reinterpret_cast<VkPipelineFragmentShadingRateStateCreateInfoKHR*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_FEATURES_KHR:
+        {
+            unmarshal_VkPhysicalDeviceFragmentShadingRateFeaturesKHR(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceFragmentShadingRateFeaturesKHR*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_PROPERTIES_KHR:
+        {
+            unmarshal_VkPhysicalDeviceFragmentShadingRatePropertiesKHR(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceFragmentShadingRatePropertiesKHR*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_KHR_surface_protected_capabilities
+        case VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR:
+        {
+            unmarshal_VkSurfaceProtectedCapabilitiesKHR(vkStream, rootType, reinterpret_cast<VkSurfaceProtectedCapabilitiesKHR*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_KHR_pipeline_executable_properties
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_EXECUTABLE_PROPERTIES_FEATURES_KHR:
+        {
+            unmarshal_VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR(vkStream, rootType, reinterpret_cast<VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_ANDROID_native_buffer
         case VK_STRUCTURE_TYPE_NATIVE_BUFFER_ANDROID:
         {
-            unmarshal_VkNativeBufferANDROID(vkStream, reinterpret_cast<VkNativeBufferANDROID*>(structExtension_out));
+            unmarshal_VkNativeBufferANDROID(vkStream, rootType, reinterpret_cast<VkNativeBufferANDROID*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_EXT_debug_report
         case VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT:
         {
-            unmarshal_VkDebugReportCallbackCreateInfoEXT(vkStream, reinterpret_cast<VkDebugReportCallbackCreateInfoEXT*>(structExtension_out));
+            unmarshal_VkDebugReportCallbackCreateInfoEXT(vkStream, rootType, reinterpret_cast<VkDebugReportCallbackCreateInfoEXT*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_AMD_rasterization_order
         case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_RASTERIZATION_ORDER_AMD:
         {
-            unmarshal_VkPipelineRasterizationStateRasterizationOrderAMD(vkStream, reinterpret_cast<VkPipelineRasterizationStateRasterizationOrderAMD*>(structExtension_out));
+            unmarshal_VkPipelineRasterizationStateRasterizationOrderAMD(vkStream, rootType, reinterpret_cast<VkPipelineRasterizationStateRasterizationOrderAMD*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_NV_dedicated_allocation
         case VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_IMAGE_CREATE_INFO_NV:
         {
-            unmarshal_VkDedicatedAllocationImageCreateInfoNV(vkStream, reinterpret_cast<VkDedicatedAllocationImageCreateInfoNV*>(structExtension_out));
+            unmarshal_VkDedicatedAllocationImageCreateInfoNV(vkStream, rootType, reinterpret_cast<VkDedicatedAllocationImageCreateInfoNV*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_BUFFER_CREATE_INFO_NV:
         {
-            unmarshal_VkDedicatedAllocationBufferCreateInfoNV(vkStream, reinterpret_cast<VkDedicatedAllocationBufferCreateInfoNV*>(structExtension_out));
+            unmarshal_VkDedicatedAllocationBufferCreateInfoNV(vkStream, rootType, reinterpret_cast<VkDedicatedAllocationBufferCreateInfoNV*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_MEMORY_ALLOCATE_INFO_NV:
         {
-            unmarshal_VkDedicatedAllocationMemoryAllocateInfoNV(vkStream, reinterpret_cast<VkDedicatedAllocationMemoryAllocateInfoNV*>(structExtension_out));
+            unmarshal_VkDedicatedAllocationMemoryAllocateInfoNV(vkStream, rootType, reinterpret_cast<VkDedicatedAllocationMemoryAllocateInfoNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_transform_feedback
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT:
+        {
+            unmarshal_VkPhysicalDeviceTransformFeedbackFeaturesEXT(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceTransformFeedbackFeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT:
+        {
+            unmarshal_VkPhysicalDeviceTransformFeedbackPropertiesEXT(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceTransformFeedbackPropertiesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_STREAM_CREATE_INFO_EXT:
+        {
+            unmarshal_VkPipelineRasterizationStateStreamCreateInfoEXT(vkStream, rootType, reinterpret_cast<VkPipelineRasterizationStateStreamCreateInfoEXT*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_AMD_texture_gather_bias_lod
         case VK_STRUCTURE_TYPE_TEXTURE_LOD_GATHER_FORMAT_PROPERTIES_AMD:
         {
-            unmarshal_VkTextureLODGatherFormatPropertiesAMD(vkStream, reinterpret_cast<VkTextureLODGatherFormatPropertiesAMD*>(structExtension_out));
+            unmarshal_VkTextureLODGatherFormatPropertiesAMD(vkStream, rootType, reinterpret_cast<VkTextureLODGatherFormatPropertiesAMD*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_corner_sampled_image
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CORNER_SAMPLED_IMAGE_FEATURES_NV:
+        {
+            unmarshal_VkPhysicalDeviceCornerSampledImageFeaturesNV(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceCornerSampledImageFeaturesNV*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_NV_external_memory
         case VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_NV:
         {
-            unmarshal_VkExternalMemoryImageCreateInfoNV(vkStream, reinterpret_cast<VkExternalMemoryImageCreateInfoNV*>(structExtension_out));
+            unmarshal_VkExternalMemoryImageCreateInfoNV(vkStream, rootType, reinterpret_cast<VkExternalMemoryImageCreateInfoNV*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_NV:
         {
-            unmarshal_VkExportMemoryAllocateInfoNV(vkStream, reinterpret_cast<VkExportMemoryAllocateInfoNV*>(structExtension_out));
+            unmarshal_VkExportMemoryAllocateInfoNV(vkStream, rootType, reinterpret_cast<VkExportMemoryAllocateInfoNV*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_NV_external_memory_win32
         case VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_NV:
         {
-            unmarshal_VkImportMemoryWin32HandleInfoNV(vkStream, reinterpret_cast<VkImportMemoryWin32HandleInfoNV*>(structExtension_out));
+            unmarshal_VkImportMemoryWin32HandleInfoNV(vkStream, rootType, reinterpret_cast<VkImportMemoryWin32HandleInfoNV*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_NV:
         {
-            unmarshal_VkExportMemoryWin32HandleInfoNV(vkStream, reinterpret_cast<VkExportMemoryWin32HandleInfoNV*>(structExtension_out));
+            unmarshal_VkExportMemoryWin32HandleInfoNV(vkStream, rootType, reinterpret_cast<VkExportMemoryWin32HandleInfoNV*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_NV_win32_keyed_mutex
         case VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_NV:
         {
-            unmarshal_VkWin32KeyedMutexAcquireReleaseInfoNV(vkStream, reinterpret_cast<VkWin32KeyedMutexAcquireReleaseInfoNV*>(structExtension_out));
+            unmarshal_VkWin32KeyedMutexAcquireReleaseInfoNV(vkStream, rootType, reinterpret_cast<VkWin32KeyedMutexAcquireReleaseInfoNV*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_EXT_validation_flags
         case VK_STRUCTURE_TYPE_VALIDATION_FLAGS_EXT:
         {
-            unmarshal_VkValidationFlagsEXT(vkStream, reinterpret_cast<VkValidationFlagsEXT*>(structExtension_out));
+            unmarshal_VkValidationFlagsEXT(vkStream, rootType, reinterpret_cast<VkValidationFlagsEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_texture_compression_astc_hdr
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES_EXT:
+        {
+            unmarshal_VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_astc_decode_mode
+        case VK_STRUCTURE_TYPE_IMAGE_VIEW_ASTC_DECODE_MODE_EXT:
+        {
+            unmarshal_VkImageViewASTCDecodeModeEXT(vkStream, rootType, reinterpret_cast<VkImageViewASTCDecodeModeEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ASTC_DECODE_FEATURES_EXT:
+        {
+            unmarshal_VkPhysicalDeviceASTCDecodeFeaturesEXT(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceASTCDecodeFeaturesEXT*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_EXT_conditional_rendering
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT:
         {
-            unmarshal_VkPhysicalDeviceConditionalRenderingFeaturesEXT(vkStream, reinterpret_cast<VkPhysicalDeviceConditionalRenderingFeaturesEXT*>(structExtension_out));
+            unmarshal_VkPhysicalDeviceConditionalRenderingFeaturesEXT(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceConditionalRenderingFeaturesEXT*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_CONDITIONAL_RENDERING_INFO_EXT:
         {
-            unmarshal_VkCommandBufferInheritanceConditionalRenderingInfoEXT(vkStream, reinterpret_cast<VkCommandBufferInheritanceConditionalRenderingInfoEXT*>(structExtension_out));
+            unmarshal_VkCommandBufferInheritanceConditionalRenderingInfoEXT(vkStream, rootType, reinterpret_cast<VkCommandBufferInheritanceConditionalRenderingInfoEXT*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_NV_clip_space_w_scaling
         case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_W_SCALING_STATE_CREATE_INFO_NV:
         {
-            unmarshal_VkPipelineViewportWScalingStateCreateInfoNV(vkStream, reinterpret_cast<VkPipelineViewportWScalingStateCreateInfoNV*>(structExtension_out));
+            unmarshal_VkPipelineViewportWScalingStateCreateInfoNV(vkStream, rootType, reinterpret_cast<VkPipelineViewportWScalingStateCreateInfoNV*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_EXT_display_control
         case VK_STRUCTURE_TYPE_SWAPCHAIN_COUNTER_CREATE_INFO_EXT:
         {
-            unmarshal_VkSwapchainCounterCreateInfoEXT(vkStream, reinterpret_cast<VkSwapchainCounterCreateInfoEXT*>(structExtension_out));
+            unmarshal_VkSwapchainCounterCreateInfoEXT(vkStream, rootType, reinterpret_cast<VkSwapchainCounterCreateInfoEXT*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_GOOGLE_display_timing
         case VK_STRUCTURE_TYPE_PRESENT_TIMES_INFO_GOOGLE:
         {
-            unmarshal_VkPresentTimesInfoGOOGLE(vkStream, reinterpret_cast<VkPresentTimesInfoGOOGLE*>(structExtension_out));
+            unmarshal_VkPresentTimesInfoGOOGLE(vkStream, rootType, reinterpret_cast<VkPresentTimesInfoGOOGLE*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_NVX_multiview_per_view_attributes
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PER_VIEW_ATTRIBUTES_PROPERTIES_NVX:
         {
-            unmarshal_VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX(vkStream, reinterpret_cast<VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX*>(structExtension_out));
+            unmarshal_VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_NV_viewport_swizzle
         case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SWIZZLE_STATE_CREATE_INFO_NV:
         {
-            unmarshal_VkPipelineViewportSwizzleStateCreateInfoNV(vkStream, reinterpret_cast<VkPipelineViewportSwizzleStateCreateInfoNV*>(structExtension_out));
+            unmarshal_VkPipelineViewportSwizzleStateCreateInfoNV(vkStream, rootType, reinterpret_cast<VkPipelineViewportSwizzleStateCreateInfoNV*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_EXT_discard_rectangles
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DISCARD_RECTANGLE_PROPERTIES_EXT:
         {
-            unmarshal_VkPhysicalDeviceDiscardRectanglePropertiesEXT(vkStream, reinterpret_cast<VkPhysicalDeviceDiscardRectanglePropertiesEXT*>(structExtension_out));
+            unmarshal_VkPhysicalDeviceDiscardRectanglePropertiesEXT(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceDiscardRectanglePropertiesEXT*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT:
         {
-            unmarshal_VkPipelineDiscardRectangleStateCreateInfoEXT(vkStream, reinterpret_cast<VkPipelineDiscardRectangleStateCreateInfoEXT*>(structExtension_out));
+            unmarshal_VkPipelineDiscardRectangleStateCreateInfoEXT(vkStream, rootType, reinterpret_cast<VkPipelineDiscardRectangleStateCreateInfoEXT*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_EXT_conservative_rasterization
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONSERVATIVE_RASTERIZATION_PROPERTIES_EXT:
         {
-            unmarshal_VkPhysicalDeviceConservativeRasterizationPropertiesEXT(vkStream, reinterpret_cast<VkPhysicalDeviceConservativeRasterizationPropertiesEXT*>(structExtension_out));
+            unmarshal_VkPhysicalDeviceConservativeRasterizationPropertiesEXT(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceConservativeRasterizationPropertiesEXT*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_CONSERVATIVE_STATE_CREATE_INFO_EXT:
         {
-            unmarshal_VkPipelineRasterizationConservativeStateCreateInfoEXT(vkStream, reinterpret_cast<VkPipelineRasterizationConservativeStateCreateInfoEXT*>(structExtension_out));
+            unmarshal_VkPipelineRasterizationConservativeStateCreateInfoEXT(vkStream, rootType, reinterpret_cast<VkPipelineRasterizationConservativeStateCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_depth_clip_enable
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_ENABLE_FEATURES_EXT:
+        {
+            unmarshal_VkPhysicalDeviceDepthClipEnableFeaturesEXT(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceDepthClipEnableFeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT:
+        {
+            unmarshal_VkPipelineRasterizationDepthClipStateCreateInfoEXT(vkStream, rootType, reinterpret_cast<VkPipelineRasterizationDepthClipStateCreateInfoEXT*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_EXT_debug_utils
         case VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT:
         {
-            unmarshal_VkDebugUtilsMessengerCreateInfoEXT(vkStream, reinterpret_cast<VkDebugUtilsMessengerCreateInfoEXT*>(structExtension_out));
+            unmarshal_VkDebugUtilsMessengerCreateInfoEXT(vkStream, rootType, reinterpret_cast<VkDebugUtilsMessengerCreateInfoEXT*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_ANDROID_external_memory_android_hardware_buffer
         case VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_USAGE_ANDROID:
         {
-            unmarshal_VkAndroidHardwareBufferUsageANDROID(vkStream, reinterpret_cast<VkAndroidHardwareBufferUsageANDROID*>(structExtension_out));
+            unmarshal_VkAndroidHardwareBufferUsageANDROID(vkStream, rootType, reinterpret_cast<VkAndroidHardwareBufferUsageANDROID*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID:
         {
-            unmarshal_VkAndroidHardwareBufferFormatPropertiesANDROID(vkStream, reinterpret_cast<VkAndroidHardwareBufferFormatPropertiesANDROID*>(structExtension_out));
+            unmarshal_VkAndroidHardwareBufferFormatPropertiesANDROID(vkStream, rootType, reinterpret_cast<VkAndroidHardwareBufferFormatPropertiesANDROID*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID:
         {
-            unmarshal_VkImportAndroidHardwareBufferInfoANDROID(vkStream, reinterpret_cast<VkImportAndroidHardwareBufferInfoANDROID*>(structExtension_out));
+            unmarshal_VkImportAndroidHardwareBufferInfoANDROID(vkStream, rootType, reinterpret_cast<VkImportAndroidHardwareBufferInfoANDROID*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID:
         {
-            unmarshal_VkExternalFormatANDROID(vkStream, reinterpret_cast<VkExternalFormatANDROID*>(structExtension_out));
+            unmarshal_VkExternalFormatANDROID(vkStream, rootType, reinterpret_cast<VkExternalFormatANDROID*>(structExtension_out));
             break;
         }
 #endif
-#ifdef VK_EXT_sampler_filter_minmax
-        case VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO_EXT:
+#ifdef VK_EXT_inline_uniform_block
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES_EXT:
         {
-            unmarshal_VkSamplerReductionModeCreateInfoEXT(vkStream, reinterpret_cast<VkSamplerReductionModeCreateInfoEXT*>(structExtension_out));
+            unmarshal_VkPhysicalDeviceInlineUniformBlockFeaturesEXT(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceInlineUniformBlockFeaturesEXT*>(structExtension_out));
             break;
         }
-        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES_EXT:
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES_EXT:
         {
-            unmarshal_VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT(vkStream, reinterpret_cast<VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT*>(structExtension_out));
+            unmarshal_VkPhysicalDeviceInlineUniformBlockPropertiesEXT(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceInlineUniformBlockPropertiesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT:
+        {
+            unmarshal_VkWriteDescriptorSetInlineUniformBlockEXT(vkStream, rootType, reinterpret_cast<VkWriteDescriptorSetInlineUniformBlockEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO_EXT:
+        {
+            unmarshal_VkDescriptorPoolInlineUniformBlockCreateInfoEXT(vkStream, rootType, reinterpret_cast<VkDescriptorPoolInlineUniformBlockCreateInfoEXT*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_EXT_sample_locations
         case VK_STRUCTURE_TYPE_SAMPLE_LOCATIONS_INFO_EXT:
         {
-            unmarshal_VkSampleLocationsInfoEXT(vkStream, reinterpret_cast<VkSampleLocationsInfoEXT*>(structExtension_out));
+            unmarshal_VkSampleLocationsInfoEXT(vkStream, rootType, reinterpret_cast<VkSampleLocationsInfoEXT*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_RENDER_PASS_SAMPLE_LOCATIONS_BEGIN_INFO_EXT:
         {
-            unmarshal_VkRenderPassSampleLocationsBeginInfoEXT(vkStream, reinterpret_cast<VkRenderPassSampleLocationsBeginInfoEXT*>(structExtension_out));
+            unmarshal_VkRenderPassSampleLocationsBeginInfoEXT(vkStream, rootType, reinterpret_cast<VkRenderPassSampleLocationsBeginInfoEXT*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT:
         {
-            unmarshal_VkPipelineSampleLocationsStateCreateInfoEXT(vkStream, reinterpret_cast<VkPipelineSampleLocationsStateCreateInfoEXT*>(structExtension_out));
+            unmarshal_VkPipelineSampleLocationsStateCreateInfoEXT(vkStream, rootType, reinterpret_cast<VkPipelineSampleLocationsStateCreateInfoEXT*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT:
         {
-            unmarshal_VkPhysicalDeviceSampleLocationsPropertiesEXT(vkStream, reinterpret_cast<VkPhysicalDeviceSampleLocationsPropertiesEXT*>(structExtension_out));
+            unmarshal_VkPhysicalDeviceSampleLocationsPropertiesEXT(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceSampleLocationsPropertiesEXT*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_EXT_blend_operation_advanced
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT:
         {
-            unmarshal_VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT(vkStream, reinterpret_cast<VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*>(structExtension_out));
+            unmarshal_VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_PROPERTIES_EXT:
         {
-            unmarshal_VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT(vkStream, reinterpret_cast<VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT*>(structExtension_out));
+            unmarshal_VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_ADVANCED_STATE_CREATE_INFO_EXT:
         {
-            unmarshal_VkPipelineColorBlendAdvancedStateCreateInfoEXT(vkStream, reinterpret_cast<VkPipelineColorBlendAdvancedStateCreateInfoEXT*>(structExtension_out));
+            unmarshal_VkPipelineColorBlendAdvancedStateCreateInfoEXT(vkStream, rootType, reinterpret_cast<VkPipelineColorBlendAdvancedStateCreateInfoEXT*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_NV_fragment_coverage_to_color
         case VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_TO_COLOR_STATE_CREATE_INFO_NV:
         {
-            unmarshal_VkPipelineCoverageToColorStateCreateInfoNV(vkStream, reinterpret_cast<VkPipelineCoverageToColorStateCreateInfoNV*>(structExtension_out));
+            unmarshal_VkPipelineCoverageToColorStateCreateInfoNV(vkStream, rootType, reinterpret_cast<VkPipelineCoverageToColorStateCreateInfoNV*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_NV_framebuffer_mixed_samples
         case VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_MODULATION_STATE_CREATE_INFO_NV:
         {
-            unmarshal_VkPipelineCoverageModulationStateCreateInfoNV(vkStream, reinterpret_cast<VkPipelineCoverageModulationStateCreateInfoNV*>(structExtension_out));
+            unmarshal_VkPipelineCoverageModulationStateCreateInfoNV(vkStream, rootType, reinterpret_cast<VkPipelineCoverageModulationStateCreateInfoNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_shader_sm_builtins
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_PROPERTIES_NV:
+        {
+            unmarshal_VkPhysicalDeviceShaderSMBuiltinsPropertiesNV(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceShaderSMBuiltinsPropertiesNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_FEATURES_NV:
+        {
+            unmarshal_VkPhysicalDeviceShaderSMBuiltinsFeaturesNV(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceShaderSMBuiltinsFeaturesNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_image_drm_format_modifier
+        case VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT:
+        {
+            unmarshal_VkDrmFormatModifierPropertiesListEXT(vkStream, rootType, reinterpret_cast<VkDrmFormatModifierPropertiesListEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT:
+        {
+            unmarshal_VkPhysicalDeviceImageDrmFormatModifierInfoEXT(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceImageDrmFormatModifierInfoEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT:
+        {
+            unmarshal_VkImageDrmFormatModifierListCreateInfoEXT(vkStream, rootType, reinterpret_cast<VkImageDrmFormatModifierListCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT:
+        {
+            unmarshal_VkImageDrmFormatModifierExplicitCreateInfoEXT(vkStream, rootType, reinterpret_cast<VkImageDrmFormatModifierExplicitCreateInfoEXT*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_EXT_validation_cache
         case VK_STRUCTURE_TYPE_SHADER_MODULE_VALIDATION_CACHE_CREATE_INFO_EXT:
         {
-            unmarshal_VkShaderModuleValidationCacheCreateInfoEXT(vkStream, reinterpret_cast<VkShaderModuleValidationCacheCreateInfoEXT*>(structExtension_out));
+            unmarshal_VkShaderModuleValidationCacheCreateInfoEXT(vkStream, rootType, reinterpret_cast<VkShaderModuleValidationCacheCreateInfoEXT*>(structExtension_out));
             break;
         }
 #endif
-#ifdef VK_EXT_descriptor_indexing
-        case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT:
+#ifdef VK_NV_shading_rate_image
+        case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SHADING_RATE_IMAGE_STATE_CREATE_INFO_NV:
         {
-            unmarshal_VkDescriptorSetLayoutBindingFlagsCreateInfoEXT(vkStream, reinterpret_cast<VkDescriptorSetLayoutBindingFlagsCreateInfoEXT*>(structExtension_out));
+            unmarshal_VkPipelineViewportShadingRateImageStateCreateInfoNV(vkStream, rootType, reinterpret_cast<VkPipelineViewportShadingRateImageStateCreateInfoNV*>(structExtension_out));
             break;
         }
-        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT:
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_FEATURES_NV:
         {
-            unmarshal_VkPhysicalDeviceDescriptorIndexingFeaturesEXT(vkStream, reinterpret_cast<VkPhysicalDeviceDescriptorIndexingFeaturesEXT*>(structExtension_out));
+            unmarshal_VkPhysicalDeviceShadingRateImageFeaturesNV(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceShadingRateImageFeaturesNV*>(structExtension_out));
             break;
         }
-        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES_EXT:
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_PROPERTIES_NV:
         {
-            unmarshal_VkPhysicalDeviceDescriptorIndexingPropertiesEXT(vkStream, reinterpret_cast<VkPhysicalDeviceDescriptorIndexingPropertiesEXT*>(structExtension_out));
+            unmarshal_VkPhysicalDeviceShadingRateImagePropertiesNV(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceShadingRateImagePropertiesNV*>(structExtension_out));
             break;
         }
-        case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO_EXT:
+        case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_COARSE_SAMPLE_ORDER_STATE_CREATE_INFO_NV:
         {
-            unmarshal_VkDescriptorSetVariableDescriptorCountAllocateInfoEXT(vkStream, reinterpret_cast<VkDescriptorSetVariableDescriptorCountAllocateInfoEXT*>(structExtension_out));
+            unmarshal_VkPipelineViewportCoarseSampleOrderStateCreateInfoNV(vkStream, rootType, reinterpret_cast<VkPipelineViewportCoarseSampleOrderStateCreateInfoNV*>(structExtension_out));
             break;
         }
-        case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT_EXT:
+#endif
+#ifdef VK_NV_ray_tracing
+        case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_NV:
         {
-            unmarshal_VkDescriptorSetVariableDescriptorCountLayoutSupportEXT(vkStream, reinterpret_cast<VkDescriptorSetVariableDescriptorCountLayoutSupportEXT*>(structExtension_out));
+            unmarshal_VkWriteDescriptorSetAccelerationStructureNV(vkStream, rootType, reinterpret_cast<VkWriteDescriptorSetAccelerationStructureNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_NV:
+        {
+            unmarshal_VkPhysicalDeviceRayTracingPropertiesNV(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceRayTracingPropertiesNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_representative_fragment_test
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_REPRESENTATIVE_FRAGMENT_TEST_FEATURES_NV:
+        {
+            unmarshal_VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_REPRESENTATIVE_FRAGMENT_TEST_STATE_CREATE_INFO_NV:
+        {
+            unmarshal_VkPipelineRepresentativeFragmentTestStateCreateInfoNV(vkStream, rootType, reinterpret_cast<VkPipelineRepresentativeFragmentTestStateCreateInfoNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_filter_cubic
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_VIEW_IMAGE_FORMAT_INFO_EXT:
+        {
+            unmarshal_VkPhysicalDeviceImageViewImageFormatInfoEXT(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceImageViewImageFormatInfoEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_FILTER_CUBIC_IMAGE_VIEW_IMAGE_FORMAT_PROPERTIES_EXT:
+        {
+            unmarshal_VkFilterCubicImageViewImageFormatPropertiesEXT(vkStream, rootType, reinterpret_cast<VkFilterCubicImageViewImageFormatPropertiesEXT*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_EXT_global_priority
         case VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT:
         {
-            unmarshal_VkDeviceQueueGlobalPriorityCreateInfoEXT(vkStream, reinterpret_cast<VkDeviceQueueGlobalPriorityCreateInfoEXT*>(structExtension_out));
+            unmarshal_VkDeviceQueueGlobalPriorityCreateInfoEXT(vkStream, rootType, reinterpret_cast<VkDeviceQueueGlobalPriorityCreateInfoEXT*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_EXT_external_memory_host
         case VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT:
         {
-            unmarshal_VkImportMemoryHostPointerInfoEXT(vkStream, reinterpret_cast<VkImportMemoryHostPointerInfoEXT*>(structExtension_out));
+            unmarshal_VkImportMemoryHostPointerInfoEXT(vkStream, rootType, reinterpret_cast<VkImportMemoryHostPointerInfoEXT*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT:
         {
-            unmarshal_VkPhysicalDeviceExternalMemoryHostPropertiesEXT(vkStream, reinterpret_cast<VkPhysicalDeviceExternalMemoryHostPropertiesEXT*>(structExtension_out));
+            unmarshal_VkPhysicalDeviceExternalMemoryHostPropertiesEXT(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceExternalMemoryHostPropertiesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_AMD_pipeline_compiler_control
+        case VK_STRUCTURE_TYPE_PIPELINE_COMPILER_CONTROL_CREATE_INFO_AMD:
+        {
+            unmarshal_VkPipelineCompilerControlCreateInfoAMD(vkStream, rootType, reinterpret_cast<VkPipelineCompilerControlCreateInfoAMD*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_AMD_shader_core_properties
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_AMD:
         {
-            unmarshal_VkPhysicalDeviceShaderCorePropertiesAMD(vkStream, reinterpret_cast<VkPhysicalDeviceShaderCorePropertiesAMD*>(structExtension_out));
+            unmarshal_VkPhysicalDeviceShaderCorePropertiesAMD(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceShaderCorePropertiesAMD*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_AMD_memory_overallocation_behavior
+        case VK_STRUCTURE_TYPE_DEVICE_MEMORY_OVERALLOCATION_CREATE_INFO_AMD:
+        {
+            unmarshal_VkDeviceMemoryOverallocationCreateInfoAMD(vkStream, rootType, reinterpret_cast<VkDeviceMemoryOverallocationCreateInfoAMD*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_EXT_vertex_attribute_divisor
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT:
         {
-            unmarshal_VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT(vkStream, reinterpret_cast<VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT*>(structExtension_out));
+            unmarshal_VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT:
         {
-            unmarshal_VkPipelineVertexInputDivisorStateCreateInfoEXT(vkStream, reinterpret_cast<VkPipelineVertexInputDivisorStateCreateInfoEXT*>(structExtension_out));
+            unmarshal_VkPipelineVertexInputDivisorStateCreateInfoEXT(vkStream, rootType, reinterpret_cast<VkPipelineVertexInputDivisorStateCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT:
+        {
+            unmarshal_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_GGP_frame_token
+        case VK_STRUCTURE_TYPE_PRESENT_FRAME_TOKEN_GGP:
+        {
+            unmarshal_VkPresentFrameTokenGGP(vkStream, rootType, reinterpret_cast<VkPresentFrameTokenGGP*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_pipeline_creation_feedback
+        case VK_STRUCTURE_TYPE_PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT:
+        {
+            unmarshal_VkPipelineCreationFeedbackCreateInfoEXT(vkStream, rootType, reinterpret_cast<VkPipelineCreationFeedbackCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_compute_shader_derivatives
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_FEATURES_NV:
+        {
+            unmarshal_VkPhysicalDeviceComputeShaderDerivativesFeaturesNV(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceComputeShaderDerivativesFeaturesNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_mesh_shader
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_FEATURES_NV:
+        {
+            unmarshal_VkPhysicalDeviceMeshShaderFeaturesNV(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceMeshShaderFeaturesNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_PROPERTIES_NV:
+        {
+            unmarshal_VkPhysicalDeviceMeshShaderPropertiesNV(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceMeshShaderPropertiesNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_fragment_shader_barycentric
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_NV:
+        {
+            unmarshal_VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_shader_image_footprint
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_FOOTPRINT_FEATURES_NV:
+        {
+            unmarshal_VkPhysicalDeviceShaderImageFootprintFeaturesNV(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceShaderImageFootprintFeaturesNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_scissor_exclusive
+        case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_EXCLUSIVE_SCISSOR_STATE_CREATE_INFO_NV:
+        {
+            unmarshal_VkPipelineViewportExclusiveScissorStateCreateInfoNV(vkStream, rootType, reinterpret_cast<VkPipelineViewportExclusiveScissorStateCreateInfoNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXCLUSIVE_SCISSOR_FEATURES_NV:
+        {
+            unmarshal_VkPhysicalDeviceExclusiveScissorFeaturesNV(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceExclusiveScissorFeaturesNV*>(structExtension_out));
             break;
         }
 #endif
 #ifdef VK_NV_device_diagnostic_checkpoints
         case VK_STRUCTURE_TYPE_QUEUE_FAMILY_CHECKPOINT_PROPERTIES_NV:
         {
-            unmarshal_VkQueueFamilyCheckpointPropertiesNV(vkStream, reinterpret_cast<VkQueueFamilyCheckpointPropertiesNV*>(structExtension_out));
+            unmarshal_VkQueueFamilyCheckpointPropertiesNV(vkStream, rootType, reinterpret_cast<VkQueueFamilyCheckpointPropertiesNV*>(structExtension_out));
             break;
         }
 #endif
-#ifdef VK_GOOGLE_color_buffer
+#ifdef VK_INTEL_shader_integer_functions2
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_FUNCTIONS_2_FEATURES_INTEL:
+        {
+            unmarshal_VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_INTEL_performance_query
+        case VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_QUERY_CREATE_INFO_INTEL:
+        {
+            unmarshal_VkQueryPoolPerformanceQueryCreateInfoINTEL(vkStream, rootType, reinterpret_cast<VkQueryPoolPerformanceQueryCreateInfoINTEL*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_pci_bus_info
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT:
+        {
+            unmarshal_VkPhysicalDevicePCIBusInfoPropertiesEXT(vkStream, rootType, reinterpret_cast<VkPhysicalDevicePCIBusInfoPropertiesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_AMD_display_native_hdr
+        case VK_STRUCTURE_TYPE_DISPLAY_NATIVE_HDR_SURFACE_CAPABILITIES_AMD:
+        {
+            unmarshal_VkDisplayNativeHdrSurfaceCapabilitiesAMD(vkStream, rootType, reinterpret_cast<VkDisplayNativeHdrSurfaceCapabilitiesAMD*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_SWAPCHAIN_DISPLAY_NATIVE_HDR_CREATE_INFO_AMD:
+        {
+            unmarshal_VkSwapchainDisplayNativeHdrCreateInfoAMD(vkStream, rootType, reinterpret_cast<VkSwapchainDisplayNativeHdrCreateInfoAMD*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_fragment_density_map
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_FEATURES_EXT:
+        {
+            switch(rootType)
+            {
+                case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2:
+                {
+                    unmarshal_VkPhysicalDeviceFragmentDensityMapFeaturesEXT(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceFragmentDensityMapFeaturesEXT*>(structExtension_out));
+                    break;
+                }
+                case VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO:
+                {
+                    unmarshal_VkPhysicalDeviceFragmentDensityMapFeaturesEXT(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceFragmentDensityMapFeaturesEXT*>(structExtension_out));
+                    break;
+                }
+                case VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO:
+                {
+                    unmarshal_VkImportColorBufferGOOGLE(vkStream, rootType, reinterpret_cast<VkImportColorBufferGOOGLE*>(structExtension_out));
+                    break;
+                }
+                default:
+                {
+                    unmarshal_VkPhysicalDeviceFragmentDensityMapFeaturesEXT(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceFragmentDensityMapFeaturesEXT*>(structExtension_out));
+                    break;
+                }
+            }
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_PROPERTIES_EXT:
+        {
+            switch(rootType)
+            {
+                case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2:
+                {
+                    unmarshal_VkPhysicalDeviceFragmentDensityMapPropertiesEXT(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceFragmentDensityMapPropertiesEXT*>(structExtension_out));
+                    break;
+                }
+                case VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO:
+                {
+                    unmarshal_VkImportPhysicalAddressGOOGLE(vkStream, rootType, reinterpret_cast<VkImportPhysicalAddressGOOGLE*>(structExtension_out));
+                    break;
+                }
+                default:
+                {
+                    unmarshal_VkPhysicalDeviceFragmentDensityMapPropertiesEXT(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceFragmentDensityMapPropertiesEXT*>(structExtension_out));
+                    break;
+                }
+            }
+            break;
+        }
+        case VK_STRUCTURE_TYPE_RENDER_PASS_FRAGMENT_DENSITY_MAP_CREATE_INFO_EXT:
+        {
+            switch(rootType)
+            {
+                case VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO:
+                {
+                    unmarshal_VkRenderPassFragmentDensityMapCreateInfoEXT(vkStream, rootType, reinterpret_cast<VkRenderPassFragmentDensityMapCreateInfoEXT*>(structExtension_out));
+                    break;
+                }
+                case VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2:
+                {
+                    unmarshal_VkRenderPassFragmentDensityMapCreateInfoEXT(vkStream, rootType, reinterpret_cast<VkRenderPassFragmentDensityMapCreateInfoEXT*>(structExtension_out));
+                    break;
+                }
+                case VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO:
+                {
+                    unmarshal_VkImportBufferGOOGLE(vkStream, rootType, reinterpret_cast<VkImportBufferGOOGLE*>(structExtension_out));
+                    break;
+                }
+                default:
+                {
+                    unmarshal_VkRenderPassFragmentDensityMapCreateInfoEXT(vkStream, rootType, reinterpret_cast<VkRenderPassFragmentDensityMapCreateInfoEXT*>(structExtension_out));
+                    break;
+                }
+            }
+            break;
+        }
+#endif
+#ifdef VK_EXT_subgroup_size_control
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT:
+        {
+            unmarshal_VkPhysicalDeviceSubgroupSizeControlFeaturesEXT(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceSubgroupSizeControlFeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT:
+        {
+            unmarshal_VkPhysicalDeviceSubgroupSizeControlPropertiesEXT(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceSubgroupSizeControlPropertiesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT:
+        {
+            unmarshal_VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT(vkStream, rootType, reinterpret_cast<VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_AMD_shader_core_properties2
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_2_AMD:
+        {
+            unmarshal_VkPhysicalDeviceShaderCoreProperties2AMD(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceShaderCoreProperties2AMD*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_AMD_device_coherent_memory
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COHERENT_MEMORY_FEATURES_AMD:
+        {
+            unmarshal_VkPhysicalDeviceCoherentMemoryFeaturesAMD(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceCoherentMemoryFeaturesAMD*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_shader_image_atomic_int64
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_ATOMIC_INT64_FEATURES_EXT:
+        {
+            unmarshal_VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_memory_budget
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT:
+        {
+            unmarshal_VkPhysicalDeviceMemoryBudgetPropertiesEXT(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceMemoryBudgetPropertiesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_memory_priority
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PRIORITY_FEATURES_EXT:
+        {
+            unmarshal_VkPhysicalDeviceMemoryPriorityFeaturesEXT(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceMemoryPriorityFeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT:
+        {
+            unmarshal_VkMemoryPriorityAllocateInfoEXT(vkStream, rootType, reinterpret_cast<VkMemoryPriorityAllocateInfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_dedicated_allocation_image_aliasing
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEDICATED_ALLOCATION_IMAGE_ALIASING_FEATURES_NV:
+        {
+            unmarshal_VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_buffer_device_address
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_EXT:
+        {
+            unmarshal_VkPhysicalDeviceBufferDeviceAddressFeaturesEXT(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceBufferDeviceAddressFeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_CREATE_INFO_EXT:
+        {
+            unmarshal_VkBufferDeviceAddressCreateInfoEXT(vkStream, rootType, reinterpret_cast<VkBufferDeviceAddressCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_validation_features
+        case VK_STRUCTURE_TYPE_VALIDATION_FEATURES_EXT:
+        {
+            unmarshal_VkValidationFeaturesEXT(vkStream, rootType, reinterpret_cast<VkValidationFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_cooperative_matrix
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_FEATURES_NV:
+        {
+            unmarshal_VkPhysicalDeviceCooperativeMatrixFeaturesNV(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceCooperativeMatrixFeaturesNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_PROPERTIES_NV:
+        {
+            unmarshal_VkPhysicalDeviceCooperativeMatrixPropertiesNV(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceCooperativeMatrixPropertiesNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_coverage_reduction_mode
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COVERAGE_REDUCTION_MODE_FEATURES_NV:
+        {
+            unmarshal_VkPhysicalDeviceCoverageReductionModeFeaturesNV(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceCoverageReductionModeFeaturesNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_REDUCTION_STATE_CREATE_INFO_NV:
+        {
+            unmarshal_VkPipelineCoverageReductionStateCreateInfoNV(vkStream, rootType, reinterpret_cast<VkPipelineCoverageReductionStateCreateInfoNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_fragment_shader_interlock
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_INTERLOCK_FEATURES_EXT:
+        {
+            unmarshal_VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_ycbcr_image_arrays
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_IMAGE_ARRAYS_FEATURES_EXT:
+        {
+            unmarshal_VkPhysicalDeviceYcbcrImageArraysFeaturesEXT(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceYcbcrImageArraysFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_full_screen_exclusive
+        case VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_INFO_EXT:
+        {
+            unmarshal_VkSurfaceFullScreenExclusiveInfoEXT(vkStream, rootType, reinterpret_cast<VkSurfaceFullScreenExclusiveInfoEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_FULL_SCREEN_EXCLUSIVE_EXT:
+        {
+            unmarshal_VkSurfaceCapabilitiesFullScreenExclusiveEXT(vkStream, rootType, reinterpret_cast<VkSurfaceCapabilitiesFullScreenExclusiveEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_WIN32_INFO_EXT:
+        {
+            unmarshal_VkSurfaceFullScreenExclusiveWin32InfoEXT(vkStream, rootType, reinterpret_cast<VkSurfaceFullScreenExclusiveWin32InfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_line_rasterization
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT:
+        {
+            unmarshal_VkPhysicalDeviceLineRasterizationFeaturesEXT(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceLineRasterizationFeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_EXT:
+        {
+            unmarshal_VkPhysicalDeviceLineRasterizationPropertiesEXT(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceLineRasterizationPropertiesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT:
+        {
+            unmarshal_VkPipelineRasterizationLineStateCreateInfoEXT(vkStream, rootType, reinterpret_cast<VkPipelineRasterizationLineStateCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_shader_atomic_float
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_FLOAT_FEATURES_EXT:
+        {
+            unmarshal_VkPhysicalDeviceShaderAtomicFloatFeaturesEXT(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceShaderAtomicFloatFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_index_type_uint8
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT:
+        {
+            unmarshal_VkPhysicalDeviceIndexTypeUint8FeaturesEXT(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceIndexTypeUint8FeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_extended_dynamic_state
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_FEATURES_EXT:
+        {
+            unmarshal_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceExtendedDynamicStateFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_shader_demote_to_helper_invocation
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES_EXT:
+        {
+            unmarshal_VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_device_generated_commands
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_PROPERTIES_NV:
+        {
+            unmarshal_VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_FEATURES_NV:
+        {
+            unmarshal_VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_SHADER_GROUPS_CREATE_INFO_NV:
+        {
+            unmarshal_VkGraphicsPipelineShaderGroupsCreateInfoNV(vkStream, rootType, reinterpret_cast<VkGraphicsPipelineShaderGroupsCreateInfoNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_texel_buffer_alignment
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_FEATURES_EXT:
+        {
+            unmarshal_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES_EXT:
+        {
+            unmarshal_VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_QCOM_render_pass_transform
+        case VK_STRUCTURE_TYPE_RENDER_PASS_TRANSFORM_BEGIN_INFO_QCOM:
+        {
+            unmarshal_VkRenderPassTransformBeginInfoQCOM(vkStream, rootType, reinterpret_cast<VkRenderPassTransformBeginInfoQCOM*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_RENDER_PASS_TRANSFORM_INFO_QCOM:
+        {
+            unmarshal_VkCommandBufferInheritanceRenderPassTransformInfoQCOM(vkStream, rootType, reinterpret_cast<VkCommandBufferInheritanceRenderPassTransformInfoQCOM*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_device_memory_report
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_MEMORY_REPORT_FEATURES_EXT:
+        {
+            unmarshal_VkPhysicalDeviceDeviceMemoryReportFeaturesEXT(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceDeviceMemoryReportFeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DEVICE_DEVICE_MEMORY_REPORT_CREATE_INFO_EXT:
+        {
+            unmarshal_VkDeviceDeviceMemoryReportCreateInfoEXT(vkStream, rootType, reinterpret_cast<VkDeviceDeviceMemoryReportCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_robustness2
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT:
+        {
+            unmarshal_VkPhysicalDeviceRobustness2FeaturesEXT(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceRobustness2FeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_PROPERTIES_EXT:
+        {
+            unmarshal_VkPhysicalDeviceRobustness2PropertiesEXT(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceRobustness2PropertiesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_custom_border_color
+        case VK_STRUCTURE_TYPE_SAMPLER_CUSTOM_BORDER_COLOR_CREATE_INFO_EXT:
+        {
+            unmarshal_VkSamplerCustomBorderColorCreateInfoEXT(vkStream, rootType, reinterpret_cast<VkSamplerCustomBorderColorCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_PROPERTIES_EXT:
+        {
+            unmarshal_VkPhysicalDeviceCustomBorderColorPropertiesEXT(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceCustomBorderColorPropertiesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_FEATURES_EXT:
+        {
+            unmarshal_VkPhysicalDeviceCustomBorderColorFeaturesEXT(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceCustomBorderColorFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_private_data
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES_EXT:
+        {
+            unmarshal_VkPhysicalDevicePrivateDataFeaturesEXT(vkStream, rootType, reinterpret_cast<VkPhysicalDevicePrivateDataFeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DEVICE_PRIVATE_DATA_CREATE_INFO_EXT:
+        {
+            unmarshal_VkDevicePrivateDataCreateInfoEXT(vkStream, rootType, reinterpret_cast<VkDevicePrivateDataCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_pipeline_creation_cache_control
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES_EXT:
+        {
+            unmarshal_VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT(vkStream, rootType, reinterpret_cast<VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_device_diagnostics_config
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DIAGNOSTICS_CONFIG_FEATURES_NV:
+        {
+            unmarshal_VkPhysicalDeviceDiagnosticsConfigFeaturesNV(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceDiagnosticsConfigFeaturesNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DEVICE_DIAGNOSTICS_CONFIG_CREATE_INFO_NV:
+        {
+            unmarshal_VkDeviceDiagnosticsConfigCreateInfoNV(vkStream, rootType, reinterpret_cast<VkDeviceDiagnosticsConfigCreateInfoNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_fragment_shading_rate_enums
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_FEATURES_NV:
+        {
+            unmarshal_VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_PROPERTIES_NV:
+        {
+            unmarshal_VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_ENUM_STATE_CREATE_INFO_NV:
+        {
+            unmarshal_VkPipelineFragmentShadingRateEnumStateCreateInfoNV(vkStream, rootType, reinterpret_cast<VkPipelineFragmentShadingRateEnumStateCreateInfoNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_fragment_density_map2
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_2_FEATURES_EXT:
+        {
+            unmarshal_VkPhysicalDeviceFragmentDensityMap2FeaturesEXT(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceFragmentDensityMap2FeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_2_PROPERTIES_EXT:
+        {
+            unmarshal_VkPhysicalDeviceFragmentDensityMap2PropertiesEXT(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceFragmentDensityMap2PropertiesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_QCOM_rotated_copy_commands
+        case VK_STRUCTURE_TYPE_COPY_COMMAND_TRANSFORM_INFO_QCOM:
+        {
+            unmarshal_VkCopyCommandTransformInfoQCOM(vkStream, rootType, reinterpret_cast<VkCopyCommandTransformInfoQCOM*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_image_robustness
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES_EXT:
+        {
+            unmarshal_VkPhysicalDeviceImageRobustnessFeaturesEXT(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceImageRobustnessFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_4444_formats
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT:
+        {
+            unmarshal_VkPhysicalDevice4444FormatsFeaturesEXT(vkStream, rootType, reinterpret_cast<VkPhysicalDevice4444FormatsFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_GOOGLE_gfxstream
         case VK_STRUCTURE_TYPE_IMPORT_COLOR_BUFFER_GOOGLE:
         {
-            unmarshal_VkImportColorBufferGOOGLE(vkStream, reinterpret_cast<VkImportColorBufferGOOGLE*>(structExtension_out));
+            unmarshal_VkImportColorBufferGOOGLE(vkStream, rootType, reinterpret_cast<VkImportColorBufferGOOGLE*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_IMPORT_BUFFER_GOOGLE:
+        {
+            unmarshal_VkImportBufferGOOGLE(vkStream, rootType, reinterpret_cast<VkImportBufferGOOGLE*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_IMPORT_PHYSICAL_ADDRESS_GOOGLE:
         {
-            unmarshal_VkImportPhysicalAddressGOOGLE(vkStream, reinterpret_cast<VkImportPhysicalAddressGOOGLE*>(structExtension_out));
+            unmarshal_VkImportPhysicalAddressGOOGLE(vkStream, rootType, reinterpret_cast<VkImportPhysicalAddressGOOGLE*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_KHR_acceleration_structure
+        case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR:
+        {
+            unmarshal_VkWriteDescriptorSetAccelerationStructureKHR(vkStream, rootType, reinterpret_cast<VkWriteDescriptorSetAccelerationStructureKHR*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_FEATURES_KHR:
+        {
+            unmarshal_VkPhysicalDeviceAccelerationStructureFeaturesKHR(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceAccelerationStructureFeaturesKHR*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_PROPERTIES_KHR:
+        {
+            unmarshal_VkPhysicalDeviceAccelerationStructurePropertiesKHR(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceAccelerationStructurePropertiesKHR*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_KHR_ray_tracing_pipeline
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_FEATURES_KHR:
+        {
+            unmarshal_VkPhysicalDeviceRayTracingPipelineFeaturesKHR(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceRayTracingPipelineFeaturesKHR*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_PROPERTIES_KHR:
+        {
+            unmarshal_VkPhysicalDeviceRayTracingPipelinePropertiesKHR(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceRayTracingPipelinePropertiesKHR*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_KHR_ray_query
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_QUERY_FEATURES_KHR:
+        {
+            unmarshal_VkPhysicalDeviceRayQueryFeaturesKHR(vkStream, rootType, reinterpret_cast<VkPhysicalDeviceRayQueryFeaturesKHR*>(structExtension_out));
             break;
         }
 #endif
         default:
         {
-            return;
+            // fatal; the switch is only taken if the extension struct is known
+            abort();
         }
     }
 }
@@ -15738,16 +27279,6 @@
             return "OP_vkGetPhysicalDeviceWaylandPresentationSupportKHR";
         }
 #endif
-#ifdef VK_KHR_mir_surface
-        case OP_vkCreateMirSurfaceKHR:
-        {
-            return "OP_vkCreateMirSurfaceKHR";
-        }
-        case OP_vkGetPhysicalDeviceMirPresentationSupportKHR:
-        {
-            return "OP_vkGetPhysicalDeviceMirPresentationSupportKHR";
-        }
-#endif
 #ifdef VK_KHR_android_surface
         case OP_vkCreateAndroidSurfaceKHR:
         {
@@ -16118,44 +27649,6 @@
             return "OP_vkCmdEndConditionalRenderingEXT";
         }
 #endif
-#ifdef VK_NVX_device_generated_commands
-        case OP_vkCmdProcessCommandsNVX:
-        {
-            return "OP_vkCmdProcessCommandsNVX";
-        }
-        case OP_vkCmdReserveSpaceForCommandsNVX:
-        {
-            return "OP_vkCmdReserveSpaceForCommandsNVX";
-        }
-        case OP_vkCreateIndirectCommandsLayoutNVX:
-        {
-            return "OP_vkCreateIndirectCommandsLayoutNVX";
-        }
-        case OP_vkDestroyIndirectCommandsLayoutNVX:
-        {
-            return "OP_vkDestroyIndirectCommandsLayoutNVX";
-        }
-        case OP_vkCreateObjectTableNVX:
-        {
-            return "OP_vkCreateObjectTableNVX";
-        }
-        case OP_vkDestroyObjectTableNVX:
-        {
-            return "OP_vkDestroyObjectTableNVX";
-        }
-        case OP_vkRegisterObjectsNVX:
-        {
-            return "OP_vkRegisterObjectsNVX";
-        }
-        case OP_vkUnregisterObjectsNVX:
-        {
-            return "OP_vkUnregisterObjectsNVX";
-        }
-        case OP_vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX:
-        {
-            return "OP_vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX";
-        }
-#endif
 #ifdef VK_NV_clip_space_w_scaling
         case OP_vkCmdSetViewportWScalingNV:
         {
@@ -16342,13 +27835,11 @@
             return "OP_vkGetQueueCheckpointDataNV";
         }
 #endif
-#ifdef VK_GOOGLE_address_space
+#ifdef VK_GOOGLE_gfxstream
         case OP_vkMapMemoryIntoAddressSpaceGOOGLE:
         {
             return "OP_vkMapMemoryIntoAddressSpaceGOOGLE";
         }
-#endif
-#ifdef VK_GOOGLE_color_buffer
         case OP_vkRegisterImageColorBufferGOOGLE:
         {
             return "OP_vkRegisterImageColorBufferGOOGLE";
@@ -16357,14 +27848,10 @@
         {
             return "OP_vkRegisterBufferColorBufferGOOGLE";
         }
-#endif
-#ifdef VK_GOOGLE_sized_descriptor_update_template
         case OP_vkUpdateDescriptorSetWithTemplateSizedGOOGLE:
         {
             return "OP_vkUpdateDescriptorSetWithTemplateSizedGOOGLE";
         }
-#endif
-#ifdef VK_GOOGLE_async_command_buffers
         case OP_vkBeginCommandBufferAsyncGOOGLE:
         {
             return "OP_vkBeginCommandBufferAsyncGOOGLE";
@@ -16381,8 +27868,6 @@
         {
             return "OP_vkCommandBufferHostSyncGOOGLE";
         }
-#endif
-#ifdef VK_GOOGLE_create_resources_with_requirements
         case OP_vkCreateImageWithRequirementsGOOGLE:
         {
             return "OP_vkCreateImageWithRequirementsGOOGLE";
@@ -16391,18 +27876,912 @@
         {
             return "OP_vkCreateBufferWithRequirementsGOOGLE";
         }
-#endif
-#ifdef VK_GOOGLE_address_space_info
         case OP_vkGetMemoryHostAddressInfoGOOGLE:
         {
             return "OP_vkGetMemoryHostAddressInfoGOOGLE";
         }
-#endif
-#ifdef VK_GOOGLE_free_memory_sync
         case OP_vkFreeMemorySyncGOOGLE:
         {
             return "OP_vkFreeMemorySyncGOOGLE";
         }
+        case OP_vkQueueHostSyncGOOGLE:
+        {
+            return "OP_vkQueueHostSyncGOOGLE";
+        }
+        case OP_vkQueueSubmitAsyncGOOGLE:
+        {
+            return "OP_vkQueueSubmitAsyncGOOGLE";
+        }
+        case OP_vkQueueWaitIdleAsyncGOOGLE:
+        {
+            return "OP_vkQueueWaitIdleAsyncGOOGLE";
+        }
+        case OP_vkQueueBindSparseAsyncGOOGLE:
+        {
+            return "OP_vkQueueBindSparseAsyncGOOGLE";
+        }
+        case OP_vkGetLinearImageLayoutGOOGLE:
+        {
+            return "OP_vkGetLinearImageLayoutGOOGLE";
+        }
+#endif
+#ifdef VK_MVK_moltenvk
+        case OP_vkGetMTLDeviceMVK:
+        {
+            return "OP_vkGetMTLDeviceMVK";
+        }
+        case OP_vkSetMTLTextureMVK:
+        {
+            return "OP_vkSetMTLTextureMVK";
+        }
+        case OP_vkGetMTLTextureMVK:
+        {
+            return "OP_vkGetMTLTextureMVK";
+        }
+        case OP_vkGetMTLBufferMVK:
+        {
+            return "OP_vkGetMTLBufferMVK";
+        }
+        case OP_vkUseIOSurfaceMVK:
+        {
+            return "OP_vkUseIOSurfaceMVK";
+        }
+        case OP_vkGetIOSurfaceMVK:
+        {
+            return "OP_vkGetIOSurfaceMVK";
+        }
+#endif
+#ifdef VK_GOOGLE_gfxstream
+        case OP_vkQueueFlushCommandsGOOGLE:
+        {
+            return "OP_vkQueueFlushCommandsGOOGLE";
+        }
+#endif
+#ifdef VK_EXT_full_screen_exclusive
+        case OP_vkAcquireFullScreenExclusiveModeEXT:
+        {
+            return "OP_vkAcquireFullScreenExclusiveModeEXT";
+        }
+#endif
+#ifdef VK_NV_ray_tracing
+        case OP_vkBindAccelerationStructureMemoryNV:
+        {
+            return "OP_vkBindAccelerationStructureMemoryNV";
+        }
+#endif
+#ifdef VK_INTEL_performance_query
+        case OP_vkInitializePerformanceApiINTEL:
+        {
+            return "OP_vkInitializePerformanceApiINTEL";
+        }
+#endif
+#ifdef VK_KHR_deferred_host_operations
+        case OP_vkGetDeferredOperationMaxConcurrencyKHR:
+        {
+            return "OP_vkGetDeferredOperationMaxConcurrencyKHR";
+        }
+#endif
+#ifdef VK_EXT_calibrated_timestamps
+        case OP_vkGetCalibratedTimestampsEXT:
+        {
+            return "OP_vkGetCalibratedTimestampsEXT";
+        }
+#endif
+#ifdef VK_NV_ray_tracing
+        case OP_vkCreateRayTracingPipelinesNV:
+        {
+            return "OP_vkCreateRayTracingPipelinesNV";
+        }
+#endif
+#ifdef VK_KHR_acceleration_structure
+        case OP_vkCmdCopyMemoryToAccelerationStructureKHR:
+        {
+            return "OP_vkCmdCopyMemoryToAccelerationStructureKHR";
+        }
+#endif
+#ifdef VK_KHR_fragment_shading_rate
+        case OP_vkCmdSetFragmentShadingRateKHR:
+        {
+            return "OP_vkCmdSetFragmentShadingRateKHR";
+        }
+#endif
+#ifdef VK_NVX_image_view_handle
+        case OP_vkGetImageViewHandleNVX:
+        {
+            return "OP_vkGetImageViewHandleNVX";
+        }
+#endif
+#ifdef VK_EXT_extended_dynamic_state
+        case OP_vkCmdSetScissorWithCountEXT:
+        {
+            return "OP_vkCmdSetScissorWithCountEXT";
+        }
+#endif
+#ifdef VK_KHR_ray_tracing_pipeline
+        case OP_vkGetRayTracingShaderGroupStackSizeKHR:
+        {
+            return "OP_vkGetRayTracingShaderGroupStackSizeKHR";
+        }
+#endif
+#ifdef VK_EXT_full_screen_exclusive
+        case OP_vkGetDeviceGroupSurfacePresentModes2EXT:
+        {
+            return "OP_vkGetDeviceGroupSurfacePresentModes2EXT";
+        }
+#endif
+#ifdef VK_NV_mesh_shader
+        case OP_vkCmdDrawMeshTasksNV:
+        {
+            return "OP_vkCmdDrawMeshTasksNV";
+        }
+#endif
+#ifdef VK_NV_ray_tracing
+        case OP_vkCmdWriteAccelerationStructuresPropertiesNV:
+        {
+            return "OP_vkCmdWriteAccelerationStructuresPropertiesNV";
+        }
+#endif
+#ifdef VK_EXT_private_data
+        case OP_vkDestroyPrivateDataSlotEXT:
+        {
+            return "OP_vkDestroyPrivateDataSlotEXT";
+        }
+#endif
+#ifdef VK_NV_ray_tracing
+        case OP_vkCmdTraceRaysNV:
+        {
+            return "OP_vkCmdTraceRaysNV";
+        }
+#endif
+#ifdef VK_NVX_image_view_handle
+        case OP_vkGetImageViewAddressNVX:
+        {
+            return "OP_vkGetImageViewAddressNVX";
+        }
+#endif
+#ifdef VK_NV_ray_tracing
+        case OP_vkCmdCopyAccelerationStructureNV:
+        {
+            return "OP_vkCmdCopyAccelerationStructureNV";
+        }
+#endif
+#ifdef VK_KHR_ray_tracing_pipeline
+        case OP_vkCmdTraceRaysIndirectKHR:
+        {
+            return "OP_vkCmdTraceRaysIndirectKHR";
+        }
+#endif
+#ifdef VK_EXT_extended_dynamic_state
+        case OP_vkCmdSetFrontFaceEXT:
+        {
+            return "OP_vkCmdSetFrontFaceEXT";
+        }
+#endif
+#ifdef VK_INTEL_performance_query
+        case OP_vkGetPerformanceParameterINTEL:
+        {
+            return "OP_vkGetPerformanceParameterINTEL";
+        }
+#endif
+#ifdef VK_GOOGLE_gfxstream
+        case OP_vkCollectDescriptorPoolIdsGOOGLE:
+        {
+            return "OP_vkCollectDescriptorPoolIdsGOOGLE";
+        }
+#endif
+#ifdef VK_KHR_ray_tracing_pipeline
+        case OP_vkCmdTraceRaysKHR:
+        {
+            return "OP_vkCmdTraceRaysKHR";
+        }
+#endif
+#ifdef VK_NV_shading_rate_image
+        case OP_vkCmdSetViewportShadingRatePaletteNV:
+        {
+            return "OP_vkCmdSetViewportShadingRatePaletteNV";
+        }
+#endif
+#ifdef VK_KHR_deferred_host_operations
+        case OP_vkDestroyDeferredOperationKHR:
+        {
+            return "OP_vkDestroyDeferredOperationKHR";
+        }
+        case OP_vkDeferredOperationJoinKHR:
+        {
+            return "OP_vkDeferredOperationJoinKHR";
+        }
+#endif
+#ifdef VK_EXT_extended_dynamic_state
+        case OP_vkCmdSetDepthWriteEnableEXT:
+        {
+            return "OP_vkCmdSetDepthWriteEnableEXT";
+        }
+#endif
+#ifdef VK_KHR_buffer_device_address
+        case OP_vkGetBufferDeviceAddressKHR:
+        {
+            return "OP_vkGetBufferDeviceAddressKHR";
+        }
+#endif
+#ifdef VK_KHR_acceleration_structure
+        case OP_vkGetAccelerationStructureBuildSizesKHR:
+        {
+            return "OP_vkGetAccelerationStructureBuildSizesKHR";
+        }
+#endif
+#ifdef VK_NV_ray_tracing
+        case OP_vkGetAccelerationStructureMemoryRequirementsNV:
+        {
+            return "OP_vkGetAccelerationStructureMemoryRequirementsNV";
+        }
+#endif
+#ifdef VK_EXT_directfb_surface
+        case OP_vkCreateDirectFBSurfaceEXT:
+        {
+            return "OP_vkCreateDirectFBSurfaceEXT";
+        }
+#endif
+#ifdef VK_VERSION_1_2
+        case OP_vkCmdEndRenderPass2:
+        {
+            return "OP_vkCmdEndRenderPass2";
+        }
+#endif
+#ifdef VK_KHR_ray_tracing_pipeline
+        case OP_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR:
+        {
+            return "OP_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR";
+        }
+#endif
+#ifdef VK_VERSION_1_2
+        case OP_vkGetBufferDeviceAddress:
+        {
+            return "OP_vkGetBufferDeviceAddress";
+        }
+#endif
+#ifdef VK_EXT_extended_dynamic_state
+        case OP_vkCmdSetDepthBoundsTestEnableEXT:
+        {
+            return "OP_vkCmdSetDepthBoundsTestEnableEXT";
+        }
+#endif
+#ifdef VK_KHR_acceleration_structure
+        case OP_vkGetAccelerationStructureDeviceAddressKHR:
+        {
+            return "OP_vkGetAccelerationStructureDeviceAddressKHR";
+        }
+        case OP_vkCmdCopyAccelerationStructureToMemoryKHR:
+        {
+            return "OP_vkCmdCopyAccelerationStructureToMemoryKHR";
+        }
+#endif
+#ifdef VK_NV_mesh_shader
+        case OP_vkCmdDrawMeshTasksIndirectCountNV:
+        {
+            return "OP_vkCmdDrawMeshTasksIndirectCountNV";
+        }
+#endif
+#ifdef VK_KHR_performance_query
+        case OP_vkReleaseProfilingLockKHR:
+        {
+            return "OP_vkReleaseProfilingLockKHR";
+        }
+#endif
+#ifdef VK_KHR_acceleration_structure
+        case OP_vkDestroyAccelerationStructureKHR:
+        {
+            return "OP_vkDestroyAccelerationStructureKHR";
+        }
+#endif
+#ifdef VK_EXT_buffer_device_address
+        case OP_vkGetBufferDeviceAddressEXT:
+        {
+            return "OP_vkGetBufferDeviceAddressEXT";
+        }
+#endif
+#ifdef VK_VERSION_1_2
+        case OP_vkWaitSemaphores:
+        {
+            return "OP_vkWaitSemaphores";
+        }
+#endif
+#ifdef VK_EXT_private_data
+        case OP_vkSetPrivateDataEXT:
+        {
+            return "OP_vkSetPrivateDataEXT";
+        }
+#endif
+#ifdef VK_NV_scissor_exclusive
+        case OP_vkCmdSetExclusiveScissorNV:
+        {
+            return "OP_vkCmdSetExclusiveScissorNV";
+        }
+#endif
+#ifdef VK_KHR_copy_commands2
+        case OP_vkCmdCopyImage2KHR:
+        {
+            return "OP_vkCmdCopyImage2KHR";
+        }
+#endif
+#ifdef VK_KHR_timeline_semaphore
+        case OP_vkGetSemaphoreCounterValueKHR:
+        {
+            return "OP_vkGetSemaphoreCounterValueKHR";
+        }
+#endif
+#ifdef VK_NV_ray_tracing
+        case OP_vkGetRayTracingShaderGroupHandlesNV:
+        {
+            return "OP_vkGetRayTracingShaderGroupHandlesNV";
+        }
+#endif
+#ifdef VK_VERSION_1_2
+        case OP_vkGetBufferOpaqueCaptureAddress:
+        {
+            return "OP_vkGetBufferOpaqueCaptureAddress";
+        }
+#endif
+#ifdef VK_EXT_extended_dynamic_state
+        case OP_vkCmdSetDepthTestEnableEXT:
+        {
+            return "OP_vkCmdSetDepthTestEnableEXT";
+        }
+#endif
+#ifdef VK_NV_device_generated_commands
+        case OP_vkCmdExecuteGeneratedCommandsNV:
+        {
+            return "OP_vkCmdExecuteGeneratedCommandsNV";
+        }
+#endif
+#ifdef VK_VERSION_1_2
+        case OP_vkCmdBeginRenderPass2:
+        {
+            return "OP_vkCmdBeginRenderPass2";
+        }
+#endif
+#ifdef VK_EXT_private_data
+        case OP_vkCreatePrivateDataSlotEXT:
+        {
+            return "OP_vkCreatePrivateDataSlotEXT";
+        }
+#endif
+#ifdef VK_NV_shading_rate_image
+        case OP_vkCmdSetCoarseSampleOrderNV:
+        {
+            return "OP_vkCmdSetCoarseSampleOrderNV";
+        }
+        case OP_vkCmdBindShadingRateImageNV:
+        {
+            return "OP_vkCmdBindShadingRateImageNV";
+        }
+#endif
+#ifdef VK_KHR_performance_query
+        case OP_vkAcquireProfilingLockKHR:
+        {
+            return "OP_vkAcquireProfilingLockKHR";
+        }
+#endif
+#ifdef VK_KHR_acceleration_structure
+        case OP_vkCopyAccelerationStructureKHR:
+        {
+            return "OP_vkCopyAccelerationStructureKHR";
+        }
+#endif
+#ifdef VK_GGP_stream_descriptor_surface
+        case OP_vkCreateStreamDescriptorSurfaceGGP:
+        {
+            return "OP_vkCreateStreamDescriptorSurfaceGGP";
+        }
+#endif
+#ifdef VK_KHR_acceleration_structure
+        case OP_vkBuildAccelerationStructuresKHR:
+        {
+            return "OP_vkBuildAccelerationStructuresKHR";
+        }
+#endif
+#ifdef VK_EXT_host_query_reset
+        case OP_vkResetQueryPoolEXT:
+        {
+            return "OP_vkResetQueryPoolEXT";
+        }
+#endif
+#ifdef VK_EXT_extended_dynamic_state
+        case OP_vkCmdBindVertexBuffers2EXT:
+        {
+            return "OP_vkCmdBindVertexBuffers2EXT";
+        }
+#endif
+#ifdef VK_VERSION_1_2
+        case OP_vkCmdNextSubpass2:
+        {
+            return "OP_vkCmdNextSubpass2";
+        }
+        case OP_vkCmdDrawIndexedIndirectCount:
+        {
+            return "OP_vkCmdDrawIndexedIndirectCount";
+        }
+#endif
+#ifdef VK_INTEL_performance_query
+        case OP_vkAcquirePerformanceConfigurationINTEL:
+        {
+            return "OP_vkAcquirePerformanceConfigurationINTEL";
+        }
+#endif
+#ifdef VK_KHR_ray_tracing_pipeline
+        case OP_vkCreateRayTracingPipelinesKHR:
+        {
+            return "OP_vkCreateRayTracingPipelinesKHR";
+        }
+#endif
+#ifdef VK_EXT_extended_dynamic_state
+        case OP_vkCmdSetDepthCompareOpEXT:
+        {
+            return "OP_vkCmdSetDepthCompareOpEXT";
+        }
+#endif
+#ifdef VK_KHR_copy_commands2
+        case OP_vkCmdCopyBuffer2KHR:
+        {
+            return "OP_vkCmdCopyBuffer2KHR";
+        }
+        case OP_vkCmdCopyBufferToImage2KHR:
+        {
+            return "OP_vkCmdCopyBufferToImage2KHR";
+        }
+#endif
+#ifdef VK_NV_device_generated_commands
+        case OP_vkGetGeneratedCommandsMemoryRequirementsNV:
+        {
+            return "OP_vkGetGeneratedCommandsMemoryRequirementsNV";
+        }
+#endif
+#ifdef VK_EXT_image_drm_format_modifier
+        case OP_vkGetImageDrmFormatModifierPropertiesEXT:
+        {
+            return "OP_vkGetImageDrmFormatModifierPropertiesEXT";
+        }
+#endif
+#ifdef VK_INTEL_performance_query
+        case OP_vkCmdSetPerformanceOverrideINTEL:
+        {
+            return "OP_vkCmdSetPerformanceOverrideINTEL";
+        }
+#endif
+#ifdef VK_VERSION_1_2
+        case OP_vkResetQueryPool:
+        {
+            return "OP_vkResetQueryPool";
+        }
+#endif
+#ifdef VK_KHR_copy_commands2
+        case OP_vkCmdCopyImageToBuffer2KHR:
+        {
+            return "OP_vkCmdCopyImageToBuffer2KHR";
+        }
+#endif
+#ifdef VK_NV_ray_tracing
+        case OP_vkDestroyAccelerationStructureNV:
+        {
+            return "OP_vkDestroyAccelerationStructureNV";
+        }
+#endif
+#ifdef VK_INTEL_performance_query
+        case OP_vkReleasePerformanceConfigurationINTEL:
+        {
+            return "OP_vkReleasePerformanceConfigurationINTEL";
+        }
+#endif
+#ifdef VK_KHR_copy_commands2
+        case OP_vkCmdResolveImage2KHR:
+        {
+            return "OP_vkCmdResolveImage2KHR";
+        }
+#endif
+#ifdef VK_EXT_metal_surface
+        case OP_vkCreateMetalSurfaceEXT:
+        {
+            return "OP_vkCreateMetalSurfaceEXT";
+        }
+#endif
+#ifdef VK_KHR_acceleration_structure
+        case OP_vkCopyAccelerationStructureToMemoryKHR:
+        {
+            return "OP_vkCopyAccelerationStructureToMemoryKHR";
+        }
+#endif
+#ifdef VK_EXT_extended_dynamic_state
+        case OP_vkCmdSetViewportWithCountEXT:
+        {
+            return "OP_vkCmdSetViewportWithCountEXT";
+        }
+#endif
+#ifdef VK_EXT_full_screen_exclusive
+        case OP_vkReleaseFullScreenExclusiveModeEXT:
+        {
+            return "OP_vkReleaseFullScreenExclusiveModeEXT";
+        }
+#endif
+#ifdef VK_KHR_acceleration_structure
+        case OP_vkCmdBuildAccelerationStructuresIndirectKHR:
+        {
+            return "OP_vkCmdBuildAccelerationStructuresIndirectKHR";
+        }
+        case OP_vkCreateAccelerationStructureKHR:
+        {
+            return "OP_vkCreateAccelerationStructureKHR";
+        }
+#endif
+#ifdef VK_NV_ray_tracing
+        case OP_vkCreateAccelerationStructureNV:
+        {
+            return "OP_vkCreateAccelerationStructureNV";
+        }
+#endif
+#ifdef VK_KHR_copy_commands2
+        case OP_vkCmdBlitImage2KHR:
+        {
+            return "OP_vkCmdBlitImage2KHR";
+        }
+#endif
+#ifdef VK_KHR_ray_tracing_pipeline
+        case OP_vkCmdSetRayTracingPipelineStackSizeKHR:
+        {
+            return "OP_vkCmdSetRayTracingPipelineStackSizeKHR";
+        }
+#endif
+#ifdef VK_VERSION_1_2
+        case OP_vkGetDeviceMemoryOpaqueCaptureAddress:
+        {
+            return "OP_vkGetDeviceMemoryOpaqueCaptureAddress";
+        }
+#endif
+#ifdef VK_EXT_extended_dynamic_state
+        case OP_vkCmdSetPrimitiveTopologyEXT:
+        {
+            return "OP_vkCmdSetPrimitiveTopologyEXT";
+        }
+#endif
+#ifdef VK_INTEL_performance_query
+        case OP_vkCmdSetPerformanceStreamMarkerINTEL:
+        {
+            return "OP_vkCmdSetPerformanceStreamMarkerINTEL";
+        }
+#endif
+#ifdef VK_KHR_acceleration_structure
+        case OP_vkCopyMemoryToAccelerationStructureKHR:
+        {
+            return "OP_vkCopyMemoryToAccelerationStructureKHR";
+        }
+#endif
+#ifdef VK_FUCHSIA_imagepipe_surface
+        case OP_vkCreateImagePipeSurfaceFUCHSIA:
+        {
+            return "OP_vkCreateImagePipeSurfaceFUCHSIA";
+        }
+#endif
+#ifdef VK_KHR_deferred_host_operations
+        case OP_vkGetDeferredOperationResultKHR:
+        {
+            return "OP_vkGetDeferredOperationResultKHR";
+        }
+#endif
+#ifdef VK_EXT_line_rasterization
+        case OP_vkCmdSetLineStippleEXT:
+        {
+            return "OP_vkCmdSetLineStippleEXT";
+        }
+#endif
+#ifdef VK_KHR_timeline_semaphore
+        case OP_vkWaitSemaphoresKHR:
+        {
+            return "OP_vkWaitSemaphoresKHR";
+        }
+#endif
+#ifdef VK_NV_fragment_shading_rate_enums
+        case OP_vkCmdSetFragmentShadingRateEnumNV:
+        {
+            return "OP_vkCmdSetFragmentShadingRateEnumNV";
+        }
+#endif
+#ifdef VK_EXT_extended_dynamic_state
+        case OP_vkCmdSetCullModeEXT:
+        {
+            return "OP_vkCmdSetCullModeEXT";
+        }
+#endif
+#ifdef VK_KHR_acceleration_structure
+        case OP_vkGetDeviceAccelerationStructureCompatibilityKHR:
+        {
+            return "OP_vkGetDeviceAccelerationStructureCompatibilityKHR";
+        }
+#endif
+#ifdef VK_VERSION_1_2
+        case OP_vkGetSemaphoreCounterValue:
+        {
+            return "OP_vkGetSemaphoreCounterValue";
+        }
+#endif
+#ifdef VK_AMD_display_native_hdr
+        case OP_vkSetLocalDimmingAMD:
+        {
+            return "OP_vkSetLocalDimmingAMD";
+        }
+#endif
+#ifdef VK_EXT_transform_feedback
+        case OP_vkCmdBindTransformFeedbackBuffersEXT:
+        {
+            return "OP_vkCmdBindTransformFeedbackBuffersEXT";
+        }
+#endif
+#ifdef VK_GOOGLE_gfxstream
+        case OP_vkQueueCommitDescriptorSetUpdatesGOOGLE:
+        {
+            return "OP_vkQueueCommitDescriptorSetUpdatesGOOGLE";
+        }
+#endif
+#ifdef VK_EXT_full_screen_exclusive
+        case OP_vkGetPhysicalDeviceSurfacePresentModes2EXT:
+        {
+            return "OP_vkGetPhysicalDeviceSurfacePresentModes2EXT";
+        }
+#endif
+#ifdef VK_NV_ray_tracing
+        case OP_vkCmdBuildAccelerationStructureNV:
+        {
+            return "OP_vkCmdBuildAccelerationStructureNV";
+        }
+#endif
+#ifdef VK_KHR_pipeline_executable_properties
+        case OP_vkGetPipelineExecutablePropertiesKHR:
+        {
+            return "OP_vkGetPipelineExecutablePropertiesKHR";
+        }
+#endif
+#ifdef VK_NV_ray_tracing
+        case OP_vkGetAccelerationStructureHandleNV:
+        {
+            return "OP_vkGetAccelerationStructureHandleNV";
+        }
+#endif
+#ifdef VK_KHR_timeline_semaphore
+        case OP_vkSignalSemaphoreKHR:
+        {
+            return "OP_vkSignalSemaphoreKHR";
+        }
+#endif
+#ifdef VK_INTEL_performance_query
+        case OP_vkCmdSetPerformanceMarkerINTEL:
+        {
+            return "OP_vkCmdSetPerformanceMarkerINTEL";
+        }
+#endif
+#ifdef VK_NV_device_generated_commands
+        case OP_vkCmdBindPipelineShaderGroupNV:
+        {
+            return "OP_vkCmdBindPipelineShaderGroupNV";
+        }
+#endif
+#ifdef VK_VERSION_1_2
+        case OP_vkSignalSemaphore:
+        {
+            return "OP_vkSignalSemaphore";
+        }
+#endif
+#ifdef VK_KHR_pipeline_executable_properties
+        case OP_vkGetPipelineExecutableStatisticsKHR:
+        {
+            return "OP_vkGetPipelineExecutableStatisticsKHR";
+        }
+#endif
+#ifdef VK_KHR_acceleration_structure
+        case OP_vkCmdWriteAccelerationStructuresPropertiesKHR:
+        {
+            return "OP_vkCmdWriteAccelerationStructuresPropertiesKHR";
+        }
+#endif
+#ifdef VK_NV_ray_tracing
+        case OP_vkGetRayTracingShaderGroupHandlesKHR:
+        {
+            return "OP_vkGetRayTracingShaderGroupHandlesKHR";
+        }
+#endif
+#ifdef VK_EXT_transform_feedback
+        case OP_vkCmdEndTransformFeedbackEXT:
+        {
+            return "OP_vkCmdEndTransformFeedbackEXT";
+        }
+#endif
+#ifdef VK_KHR_acceleration_structure
+        case OP_vkCmdBuildAccelerationStructuresKHR:
+        {
+            return "OP_vkCmdBuildAccelerationStructuresKHR";
+        }
+#endif
+#ifdef VK_KHR_fragment_shading_rate
+        case OP_vkGetPhysicalDeviceFragmentShadingRatesKHR:
+        {
+            return "OP_vkGetPhysicalDeviceFragmentShadingRatesKHR";
+        }
+#endif
+#ifdef VK_NV_mesh_shader
+        case OP_vkCmdDrawMeshTasksIndirectNV:
+        {
+            return "OP_vkCmdDrawMeshTasksIndirectNV";
+        }
+#endif
+#ifdef VK_KHR_pipeline_executable_properties
+        case OP_vkGetPipelineExecutableInternalRepresentationsKHR:
+        {
+            return "OP_vkGetPipelineExecutableInternalRepresentationsKHR";
+        }
+#endif
+#ifdef VK_KHR_deferred_host_operations
+        case OP_vkCreateDeferredOperationKHR:
+        {
+            return "OP_vkCreateDeferredOperationKHR";
+        }
+#endif
+#ifdef VK_EXT_transform_feedback
+        case OP_vkCmdBeginQueryIndexedEXT:
+        {
+            return "OP_vkCmdBeginQueryIndexedEXT";
+        }
+#endif
+#ifdef VK_EXT_extended_dynamic_state
+        case OP_vkCmdSetStencilOpEXT:
+        {
+            return "OP_vkCmdSetStencilOpEXT";
+        }
+#endif
+#ifdef VK_NV_ray_tracing
+        case OP_vkCompileDeferredNV:
+        {
+            return "OP_vkCompileDeferredNV";
+        }
+#endif
+#ifdef VK_KHR_acceleration_structure
+        case OP_vkCmdCopyAccelerationStructureKHR:
+        {
+            return "OP_vkCmdCopyAccelerationStructureKHR";
+        }
+#endif
+#ifdef VK_VERSION_1_2
+        case OP_vkCreateRenderPass2:
+        {
+            return "OP_vkCreateRenderPass2";
+        }
+#endif
+#ifdef VK_EXT_transform_feedback
+        case OP_vkCmdEndQueryIndexedEXT:
+        {
+            return "OP_vkCmdEndQueryIndexedEXT";
+        }
+#endif
+#ifdef VK_KHR_performance_query
+        case OP_vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR:
+        {
+            return "OP_vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR";
+        }
+#endif
+#ifdef VK_EXT_tooling_info
+        case OP_vkGetPhysicalDeviceToolPropertiesEXT:
+        {
+            return "OP_vkGetPhysicalDeviceToolPropertiesEXT";
+        }
+#endif
+#ifdef VK_VERSION_1_2
+        case OP_vkCmdDrawIndirectCount:
+        {
+            return "OP_vkCmdDrawIndirectCount";
+        }
+#endif
+#ifdef VK_EXT_transform_feedback
+        case OP_vkCmdDrawIndirectByteCountEXT:
+        {
+            return "OP_vkCmdDrawIndirectByteCountEXT";
+        }
+#endif
+#ifdef VK_NV_device_generated_commands
+        case OP_vkCreateIndirectCommandsLayoutNV:
+        {
+            return "OP_vkCreateIndirectCommandsLayoutNV";
+        }
+#endif
+#ifdef VK_EXT_directfb_surface
+        case OP_vkGetPhysicalDeviceDirectFBPresentationSupportEXT:
+        {
+            return "OP_vkGetPhysicalDeviceDirectFBPresentationSupportEXT";
+        }
+#endif
+#ifdef VK_KHR_buffer_device_address
+        case OP_vkGetBufferOpaqueCaptureAddressKHR:
+        {
+            return "OP_vkGetBufferOpaqueCaptureAddressKHR";
+        }
+#endif
+#ifdef VK_EXT_extended_dynamic_state
+        case OP_vkCmdSetStencilTestEnableEXT:
+        {
+            return "OP_vkCmdSetStencilTestEnableEXT";
+        }
+#endif
+#ifdef VK_NV_cooperative_matrix
+        case OP_vkGetPhysicalDeviceCooperativeMatrixPropertiesNV:
+        {
+            return "OP_vkGetPhysicalDeviceCooperativeMatrixPropertiesNV";
+        }
+#endif
+#ifdef VK_KHR_acceleration_structure
+        case OP_vkWriteAccelerationStructuresPropertiesKHR:
+        {
+            return "OP_vkWriteAccelerationStructuresPropertiesKHR";
+        }
+#endif
+#ifdef VK_EXT_private_data
+        case OP_vkGetPrivateDataEXT:
+        {
+            return "OP_vkGetPrivateDataEXT";
+        }
+#endif
+#ifdef VK_NV_coverage_reduction_mode
+        case OP_vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV:
+        {
+            return "OP_vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV";
+        }
+#endif
+#ifdef VK_NV_device_generated_commands
+        case OP_vkDestroyIndirectCommandsLayoutNV:
+        {
+            return "OP_vkDestroyIndirectCommandsLayoutNV";
+        }
+#endif
+#ifdef VK_EXT_transform_feedback
+        case OP_vkCmdBeginTransformFeedbackEXT:
+        {
+            return "OP_vkCmdBeginTransformFeedbackEXT";
+        }
+#endif
+#ifdef VK_KHR_buffer_device_address
+        case OP_vkGetDeviceMemoryOpaqueCaptureAddressKHR:
+        {
+            return "OP_vkGetDeviceMemoryOpaqueCaptureAddressKHR";
+        }
+#endif
+#ifdef VK_INTEL_performance_query
+        case OP_vkQueueSetPerformanceConfigurationINTEL:
+        {
+            return "OP_vkQueueSetPerformanceConfigurationINTEL";
+        }
+#endif
+#ifdef VK_EXT_calibrated_timestamps
+        case OP_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT:
+        {
+            return "OP_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT";
+        }
+#endif
+#ifdef VK_INTEL_performance_query
+        case OP_vkUninitializePerformanceApiINTEL:
+        {
+            return "OP_vkUninitializePerformanceApiINTEL";
+        }
+#endif
+#ifdef VK_NV_device_generated_commands
+        case OP_vkCmdPreprocessGeneratedCommandsNV:
+        {
+            return "OP_vkCmdPreprocessGeneratedCommandsNV";
+        }
+#endif
+#ifdef VK_EXT_headless_surface
+        case OP_vkCreateHeadlessSurfaceEXT:
+        {
+            return "OP_vkCreateHeadlessSurfaceEXT";
+        }
+#endif
+#ifdef VK_KHR_performance_query
+        case OP_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR:
+        {
+            return "OP_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR";
+        }
 #endif
         default:
         {
diff --git a/system/vulkan_enc/goldfish_vk_marshaling_guest.h b/system/vulkan_enc/goldfish_vk_marshaling_guest.h
index 993c890..86c6b4e 100644
--- a/system/vulkan_enc/goldfish_vk_marshaling_guest.h
+++ b/system/vulkan_enc/goldfish_vk_marshaling_guest.h
@@ -44,303 +44,489 @@
 namespace goldfish_vk {
 
 #ifdef VK_VERSION_1_0
-void marshal_VkApplicationInfo(
+void marshal_VkExtent2D(
     VulkanStreamGuest* vkStream,
-    const VkApplicationInfo* forMarshaling);
+    VkStructureType rootType,
+    const VkExtent2D* forMarshaling);
 
-void unmarshal_VkApplicationInfo(
+void unmarshal_VkExtent2D(
     VulkanStreamGuest* vkStream,
-    VkApplicationInfo* forUnmarshaling);
+    VkStructureType rootType,
+    VkExtent2D* forUnmarshaling);
 
-void marshal_VkInstanceCreateInfo(
-    VulkanStreamGuest* vkStream,
-    const VkInstanceCreateInfo* forMarshaling);
-
-void unmarshal_VkInstanceCreateInfo(
-    VulkanStreamGuest* vkStream,
-    VkInstanceCreateInfo* forUnmarshaling);
-
-void marshal_VkAllocationCallbacks(
-    VulkanStreamGuest* vkStream,
-    const VkAllocationCallbacks* forMarshaling);
-
-void unmarshal_VkAllocationCallbacks(
-    VulkanStreamGuest* vkStream,
-    VkAllocationCallbacks* forUnmarshaling);
-
-#define OP_vkCreateInstance 20000
-#define OP_vkDestroyInstance 20001
-#define OP_vkEnumeratePhysicalDevices 20002
-void marshal_VkPhysicalDeviceFeatures(
-    VulkanStreamGuest* vkStream,
-    const VkPhysicalDeviceFeatures* forMarshaling);
-
-void unmarshal_VkPhysicalDeviceFeatures(
-    VulkanStreamGuest* vkStream,
-    VkPhysicalDeviceFeatures* forUnmarshaling);
-
-#define OP_vkGetPhysicalDeviceFeatures 20003
-void marshal_VkFormatProperties(
-    VulkanStreamGuest* vkStream,
-    const VkFormatProperties* forMarshaling);
-
-void unmarshal_VkFormatProperties(
-    VulkanStreamGuest* vkStream,
-    VkFormatProperties* forUnmarshaling);
-
-#define OP_vkGetPhysicalDeviceFormatProperties 20004
 void marshal_VkExtent3D(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkExtent3D* forMarshaling);
 
 void unmarshal_VkExtent3D(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkExtent3D* forUnmarshaling);
 
+void marshal_VkOffset2D(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkOffset2D* forMarshaling);
+
+void unmarshal_VkOffset2D(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkOffset2D* forUnmarshaling);
+
+void marshal_VkOffset3D(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkOffset3D* forMarshaling);
+
+void unmarshal_VkOffset3D(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkOffset3D* forUnmarshaling);
+
+void marshal_VkRect2D(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRect2D* forMarshaling);
+
+void unmarshal_VkRect2D(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkRect2D* forUnmarshaling);
+
+void marshal_VkBaseInStructure(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBaseInStructure* forMarshaling);
+
+void unmarshal_VkBaseInStructure(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkBaseInStructure* forUnmarshaling);
+
+void marshal_VkBaseOutStructure(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBaseOutStructure* forMarshaling);
+
+void unmarshal_VkBaseOutStructure(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkBaseOutStructure* forUnmarshaling);
+
+void marshal_VkBufferMemoryBarrier(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBufferMemoryBarrier* forMarshaling);
+
+void unmarshal_VkBufferMemoryBarrier(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkBufferMemoryBarrier* forUnmarshaling);
+
+void marshal_VkDispatchIndirectCommand(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDispatchIndirectCommand* forMarshaling);
+
+void unmarshal_VkDispatchIndirectCommand(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDispatchIndirectCommand* forUnmarshaling);
+
+void marshal_VkDrawIndexedIndirectCommand(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDrawIndexedIndirectCommand* forMarshaling);
+
+void unmarshal_VkDrawIndexedIndirectCommand(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDrawIndexedIndirectCommand* forUnmarshaling);
+
+void marshal_VkDrawIndirectCommand(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDrawIndirectCommand* forMarshaling);
+
+void unmarshal_VkDrawIndirectCommand(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDrawIndirectCommand* forUnmarshaling);
+
+void marshal_VkImageSubresourceRange(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageSubresourceRange* forMarshaling);
+
+void unmarshal_VkImageSubresourceRange(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkImageSubresourceRange* forUnmarshaling);
+
+void marshal_VkImageMemoryBarrier(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageMemoryBarrier* forMarshaling);
+
+void unmarshal_VkImageMemoryBarrier(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkImageMemoryBarrier* forUnmarshaling);
+
+void marshal_VkMemoryBarrier(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMemoryBarrier* forMarshaling);
+
+void unmarshal_VkMemoryBarrier(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkMemoryBarrier* forUnmarshaling);
+
+void marshal_VkAllocationCallbacks(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAllocationCallbacks* forMarshaling);
+
+void unmarshal_VkAllocationCallbacks(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkAllocationCallbacks* forUnmarshaling);
+
+void marshal_VkApplicationInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkApplicationInfo* forMarshaling);
+
+void unmarshal_VkApplicationInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkApplicationInfo* forUnmarshaling);
+
+void marshal_VkFormatProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkFormatProperties* forMarshaling);
+
+void unmarshal_VkFormatProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkFormatProperties* forUnmarshaling);
+
 void marshal_VkImageFormatProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkImageFormatProperties* forMarshaling);
 
 void unmarshal_VkImageFormatProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkImageFormatProperties* forUnmarshaling);
 
-#define OP_vkGetPhysicalDeviceImageFormatProperties 20005
-void marshal_VkPhysicalDeviceLimits(
+void marshal_VkInstanceCreateInfo(
     VulkanStreamGuest* vkStream,
-    const VkPhysicalDeviceLimits* forMarshaling);
+    VkStructureType rootType,
+    const VkInstanceCreateInfo* forMarshaling);
 
-void unmarshal_VkPhysicalDeviceLimits(
+void unmarshal_VkInstanceCreateInfo(
     VulkanStreamGuest* vkStream,
-    VkPhysicalDeviceLimits* forUnmarshaling);
-
-void marshal_VkPhysicalDeviceSparseProperties(
-    VulkanStreamGuest* vkStream,
-    const VkPhysicalDeviceSparseProperties* forMarshaling);
-
-void unmarshal_VkPhysicalDeviceSparseProperties(
-    VulkanStreamGuest* vkStream,
-    VkPhysicalDeviceSparseProperties* forUnmarshaling);
-
-void marshal_VkPhysicalDeviceProperties(
-    VulkanStreamGuest* vkStream,
-    const VkPhysicalDeviceProperties* forMarshaling);
-
-void unmarshal_VkPhysicalDeviceProperties(
-    VulkanStreamGuest* vkStream,
-    VkPhysicalDeviceProperties* forUnmarshaling);
-
-#define OP_vkGetPhysicalDeviceProperties 20006
-void marshal_VkQueueFamilyProperties(
-    VulkanStreamGuest* vkStream,
-    const VkQueueFamilyProperties* forMarshaling);
-
-void unmarshal_VkQueueFamilyProperties(
-    VulkanStreamGuest* vkStream,
-    VkQueueFamilyProperties* forUnmarshaling);
-
-#define OP_vkGetPhysicalDeviceQueueFamilyProperties 20007
-void marshal_VkMemoryType(
-    VulkanStreamGuest* vkStream,
-    const VkMemoryType* forMarshaling);
-
-void unmarshal_VkMemoryType(
-    VulkanStreamGuest* vkStream,
-    VkMemoryType* forUnmarshaling);
+    VkStructureType rootType,
+    VkInstanceCreateInfo* forUnmarshaling);
 
 void marshal_VkMemoryHeap(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkMemoryHeap* forMarshaling);
 
 void unmarshal_VkMemoryHeap(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkMemoryHeap* forUnmarshaling);
 
+void marshal_VkMemoryType(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMemoryType* forMarshaling);
+
+void unmarshal_VkMemoryType(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkMemoryType* forUnmarshaling);
+
+void marshal_VkPhysicalDeviceFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFeatures* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceFeatures* forUnmarshaling);
+
+void marshal_VkPhysicalDeviceLimits(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceLimits* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceLimits(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceLimits* forUnmarshaling);
+
 void marshal_VkPhysicalDeviceMemoryProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceMemoryProperties* forMarshaling);
 
 void unmarshal_VkPhysicalDeviceMemoryProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceMemoryProperties* forUnmarshaling);
 
+void marshal_VkPhysicalDeviceSparseProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSparseProperties* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceSparseProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceSparseProperties* forUnmarshaling);
+
+void marshal_VkPhysicalDeviceProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceProperties* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceProperties* forUnmarshaling);
+
+void marshal_VkQueueFamilyProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkQueueFamilyProperties* forMarshaling);
+
+void unmarshal_VkQueueFamilyProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkQueueFamilyProperties* forUnmarshaling);
+
+#define OP_vkCreateInstance 20000
+#define OP_vkDestroyInstance 20001
+#define OP_vkEnumeratePhysicalDevices 20002
+#define OP_vkGetPhysicalDeviceFeatures 20003
+#define OP_vkGetPhysicalDeviceFormatProperties 20004
+#define OP_vkGetPhysicalDeviceImageFormatProperties 20005
+#define OP_vkGetPhysicalDeviceProperties 20006
+#define OP_vkGetPhysicalDeviceQueueFamilyProperties 20007
 #define OP_vkGetPhysicalDeviceMemoryProperties 20008
 #define OP_vkGetInstanceProcAddr 20009
 #define OP_vkGetDeviceProcAddr 20010
 void marshal_VkDeviceQueueCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDeviceQueueCreateInfo* forMarshaling);
 
 void unmarshal_VkDeviceQueueCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDeviceQueueCreateInfo* forUnmarshaling);
 
 void marshal_VkDeviceCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDeviceCreateInfo* forMarshaling);
 
 void unmarshal_VkDeviceCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDeviceCreateInfo* forUnmarshaling);
 
 #define OP_vkCreateDevice 20011
 #define OP_vkDestroyDevice 20012
 void marshal_VkExtensionProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkExtensionProperties* forMarshaling);
 
 void unmarshal_VkExtensionProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkExtensionProperties* forUnmarshaling);
 
 #define OP_vkEnumerateInstanceExtensionProperties 20013
 #define OP_vkEnumerateDeviceExtensionProperties 20014
 void marshal_VkLayerProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkLayerProperties* forMarshaling);
 
 void unmarshal_VkLayerProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkLayerProperties* forUnmarshaling);
 
 #define OP_vkEnumerateInstanceLayerProperties 20015
 #define OP_vkEnumerateDeviceLayerProperties 20016
-#define OP_vkGetDeviceQueue 20017
 void marshal_VkSubmitInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSubmitInfo* forMarshaling);
 
 void unmarshal_VkSubmitInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSubmitInfo* forUnmarshaling);
 
+#define OP_vkGetDeviceQueue 20017
 #define OP_vkQueueSubmit 20018
 #define OP_vkQueueWaitIdle 20019
 #define OP_vkDeviceWaitIdle 20020
+void marshal_VkMappedMemoryRange(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMappedMemoryRange* forMarshaling);
+
+void unmarshal_VkMappedMemoryRange(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkMappedMemoryRange* forUnmarshaling);
+
 void marshal_VkMemoryAllocateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkMemoryAllocateInfo* forMarshaling);
 
 void unmarshal_VkMemoryAllocateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkMemoryAllocateInfo* forUnmarshaling);
 
 #define OP_vkAllocateMemory 20021
 #define OP_vkFreeMemory 20022
 #define OP_vkMapMemory 20023
 #define OP_vkUnmapMemory 20024
-void marshal_VkMappedMemoryRange(
-    VulkanStreamGuest* vkStream,
-    const VkMappedMemoryRange* forMarshaling);
-
-void unmarshal_VkMappedMemoryRange(
-    VulkanStreamGuest* vkStream,
-    VkMappedMemoryRange* forUnmarshaling);
-
 #define OP_vkFlushMappedMemoryRanges 20025
 #define OP_vkInvalidateMappedMemoryRanges 20026
 #define OP_vkGetDeviceMemoryCommitment 20027
-#define OP_vkBindBufferMemory 20028
-#define OP_vkBindImageMemory 20029
 void marshal_VkMemoryRequirements(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkMemoryRequirements* forMarshaling);
 
 void unmarshal_VkMemoryRequirements(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkMemoryRequirements* forUnmarshaling);
 
+#define OP_vkBindBufferMemory 20028
+#define OP_vkBindImageMemory 20029
 #define OP_vkGetBufferMemoryRequirements 20030
 #define OP_vkGetImageMemoryRequirements 20031
-void marshal_VkSparseImageFormatProperties(
-    VulkanStreamGuest* vkStream,
-    const VkSparseImageFormatProperties* forMarshaling);
-
-void unmarshal_VkSparseImageFormatProperties(
-    VulkanStreamGuest* vkStream,
-    VkSparseImageFormatProperties* forUnmarshaling);
-
-void marshal_VkSparseImageMemoryRequirements(
-    VulkanStreamGuest* vkStream,
-    const VkSparseImageMemoryRequirements* forMarshaling);
-
-void unmarshal_VkSparseImageMemoryRequirements(
-    VulkanStreamGuest* vkStream,
-    VkSparseImageMemoryRequirements* forUnmarshaling);
-
-#define OP_vkGetImageSparseMemoryRequirements 20032
-#define OP_vkGetPhysicalDeviceSparseImageFormatProperties 20033
 void marshal_VkSparseMemoryBind(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSparseMemoryBind* forMarshaling);
 
 void unmarshal_VkSparseMemoryBind(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSparseMemoryBind* forUnmarshaling);
 
 void marshal_VkSparseBufferMemoryBindInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSparseBufferMemoryBindInfo* forMarshaling);
 
 void unmarshal_VkSparseBufferMemoryBindInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSparseBufferMemoryBindInfo* forUnmarshaling);
 
 void marshal_VkSparseImageOpaqueMemoryBindInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSparseImageOpaqueMemoryBindInfo* forMarshaling);
 
 void unmarshal_VkSparseImageOpaqueMemoryBindInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSparseImageOpaqueMemoryBindInfo* forUnmarshaling);
 
 void marshal_VkImageSubresource(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkImageSubresource* forMarshaling);
 
 void unmarshal_VkImageSubresource(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkImageSubresource* forUnmarshaling);
 
-void marshal_VkOffset3D(
-    VulkanStreamGuest* vkStream,
-    const VkOffset3D* forMarshaling);
-
-void unmarshal_VkOffset3D(
-    VulkanStreamGuest* vkStream,
-    VkOffset3D* forUnmarshaling);
-
 void marshal_VkSparseImageMemoryBind(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSparseImageMemoryBind* forMarshaling);
 
 void unmarshal_VkSparseImageMemoryBind(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSparseImageMemoryBind* forUnmarshaling);
 
 void marshal_VkSparseImageMemoryBindInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSparseImageMemoryBindInfo* forMarshaling);
 
 void unmarshal_VkSparseImageMemoryBindInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSparseImageMemoryBindInfo* forUnmarshaling);
 
 void marshal_VkBindSparseInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkBindSparseInfo* forMarshaling);
 
 void unmarshal_VkBindSparseInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkBindSparseInfo* forUnmarshaling);
 
+void marshal_VkSparseImageFormatProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSparseImageFormatProperties* forMarshaling);
+
+void unmarshal_VkSparseImageFormatProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkSparseImageFormatProperties* forUnmarshaling);
+
+void marshal_VkSparseImageMemoryRequirements(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSparseImageMemoryRequirements* forMarshaling);
+
+void unmarshal_VkSparseImageMemoryRequirements(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkSparseImageMemoryRequirements* forUnmarshaling);
+
+#define OP_vkGetImageSparseMemoryRequirements 20032
+#define OP_vkGetPhysicalDeviceSparseImageFormatProperties 20033
 #define OP_vkQueueBindSparse 20034
 void marshal_VkFenceCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkFenceCreateInfo* forMarshaling);
 
 void unmarshal_VkFenceCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkFenceCreateInfo* forUnmarshaling);
 
 #define OP_vkCreateFence 20035
@@ -350,20 +536,24 @@
 #define OP_vkWaitForFences 20039
 void marshal_VkSemaphoreCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSemaphoreCreateInfo* forMarshaling);
 
 void unmarshal_VkSemaphoreCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSemaphoreCreateInfo* forUnmarshaling);
 
 #define OP_vkCreateSemaphore 20040
 #define OP_vkDestroySemaphore 20041
 void marshal_VkEventCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkEventCreateInfo* forMarshaling);
 
 void unmarshal_VkEventCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkEventCreateInfo* forUnmarshaling);
 
 #define OP_vkCreateEvent 20042
@@ -373,10 +563,12 @@
 #define OP_vkResetEvent 20046
 void marshal_VkQueryPoolCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkQueryPoolCreateInfo* forMarshaling);
 
 void unmarshal_VkQueryPoolCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkQueryPoolCreateInfo* forUnmarshaling);
 
 #define OP_vkCreateQueryPool 20047
@@ -384,85 +576,93 @@
 #define OP_vkGetQueryPoolResults 20049
 void marshal_VkBufferCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkBufferCreateInfo* forMarshaling);
 
 void unmarshal_VkBufferCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkBufferCreateInfo* forUnmarshaling);
 
 #define OP_vkCreateBuffer 20050
 #define OP_vkDestroyBuffer 20051
 void marshal_VkBufferViewCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkBufferViewCreateInfo* forMarshaling);
 
 void unmarshal_VkBufferViewCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkBufferViewCreateInfo* forUnmarshaling);
 
 #define OP_vkCreateBufferView 20052
 #define OP_vkDestroyBufferView 20053
 void marshal_VkImageCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkImageCreateInfo* forMarshaling);
 
 void unmarshal_VkImageCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkImageCreateInfo* forUnmarshaling);
 
-#define OP_vkCreateImage 20054
-#define OP_vkDestroyImage 20055
 void marshal_VkSubresourceLayout(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSubresourceLayout* forMarshaling);
 
 void unmarshal_VkSubresourceLayout(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSubresourceLayout* forUnmarshaling);
 
+#define OP_vkCreateImage 20054
+#define OP_vkDestroyImage 20055
 #define OP_vkGetImageSubresourceLayout 20056
 void marshal_VkComponentMapping(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkComponentMapping* forMarshaling);
 
 void unmarshal_VkComponentMapping(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkComponentMapping* forUnmarshaling);
 
-void marshal_VkImageSubresourceRange(
-    VulkanStreamGuest* vkStream,
-    const VkImageSubresourceRange* forMarshaling);
-
-void unmarshal_VkImageSubresourceRange(
-    VulkanStreamGuest* vkStream,
-    VkImageSubresourceRange* forUnmarshaling);
-
 void marshal_VkImageViewCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkImageViewCreateInfo* forMarshaling);
 
 void unmarshal_VkImageViewCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkImageViewCreateInfo* forUnmarshaling);
 
 #define OP_vkCreateImageView 20057
 #define OP_vkDestroyImageView 20058
 void marshal_VkShaderModuleCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkShaderModuleCreateInfo* forMarshaling);
 
 void unmarshal_VkShaderModuleCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkShaderModuleCreateInfo* forUnmarshaling);
 
 #define OP_vkCreateShaderModule 20059
 #define OP_vkDestroyShaderModule 20060
 void marshal_VkPipelineCacheCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPipelineCacheCreateInfo* forMarshaling);
 
 void unmarshal_VkPipelineCacheCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPipelineCacheCreateInfo* forUnmarshaling);
 
 #define OP_vkCreatePipelineCache 20061
@@ -471,350 +671,402 @@
 #define OP_vkMergePipelineCaches 20064
 void marshal_VkSpecializationMapEntry(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSpecializationMapEntry* forMarshaling);
 
 void unmarshal_VkSpecializationMapEntry(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSpecializationMapEntry* forUnmarshaling);
 
 void marshal_VkSpecializationInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSpecializationInfo* forMarshaling);
 
 void unmarshal_VkSpecializationInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSpecializationInfo* forUnmarshaling);
 
 void marshal_VkPipelineShaderStageCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPipelineShaderStageCreateInfo* forMarshaling);
 
 void unmarshal_VkPipelineShaderStageCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPipelineShaderStageCreateInfo* forUnmarshaling);
 
-void marshal_VkVertexInputBindingDescription(
-    VulkanStreamGuest* vkStream,
-    const VkVertexInputBindingDescription* forMarshaling);
-
-void unmarshal_VkVertexInputBindingDescription(
-    VulkanStreamGuest* vkStream,
-    VkVertexInputBindingDescription* forUnmarshaling);
-
-void marshal_VkVertexInputAttributeDescription(
-    VulkanStreamGuest* vkStream,
-    const VkVertexInputAttributeDescription* forMarshaling);
-
-void unmarshal_VkVertexInputAttributeDescription(
-    VulkanStreamGuest* vkStream,
-    VkVertexInputAttributeDescription* forUnmarshaling);
-
-void marshal_VkPipelineVertexInputStateCreateInfo(
-    VulkanStreamGuest* vkStream,
-    const VkPipelineVertexInputStateCreateInfo* forMarshaling);
-
-void unmarshal_VkPipelineVertexInputStateCreateInfo(
-    VulkanStreamGuest* vkStream,
-    VkPipelineVertexInputStateCreateInfo* forUnmarshaling);
-
-void marshal_VkPipelineInputAssemblyStateCreateInfo(
-    VulkanStreamGuest* vkStream,
-    const VkPipelineInputAssemblyStateCreateInfo* forMarshaling);
-
-void unmarshal_VkPipelineInputAssemblyStateCreateInfo(
-    VulkanStreamGuest* vkStream,
-    VkPipelineInputAssemblyStateCreateInfo* forUnmarshaling);
-
-void marshal_VkPipelineTessellationStateCreateInfo(
-    VulkanStreamGuest* vkStream,
-    const VkPipelineTessellationStateCreateInfo* forMarshaling);
-
-void unmarshal_VkPipelineTessellationStateCreateInfo(
-    VulkanStreamGuest* vkStream,
-    VkPipelineTessellationStateCreateInfo* forUnmarshaling);
-
-void marshal_VkViewport(
-    VulkanStreamGuest* vkStream,
-    const VkViewport* forMarshaling);
-
-void unmarshal_VkViewport(
-    VulkanStreamGuest* vkStream,
-    VkViewport* forUnmarshaling);
-
-void marshal_VkOffset2D(
-    VulkanStreamGuest* vkStream,
-    const VkOffset2D* forMarshaling);
-
-void unmarshal_VkOffset2D(
-    VulkanStreamGuest* vkStream,
-    VkOffset2D* forUnmarshaling);
-
-void marshal_VkExtent2D(
-    VulkanStreamGuest* vkStream,
-    const VkExtent2D* forMarshaling);
-
-void unmarshal_VkExtent2D(
-    VulkanStreamGuest* vkStream,
-    VkExtent2D* forUnmarshaling);
-
-void marshal_VkRect2D(
-    VulkanStreamGuest* vkStream,
-    const VkRect2D* forMarshaling);
-
-void unmarshal_VkRect2D(
-    VulkanStreamGuest* vkStream,
-    VkRect2D* forUnmarshaling);
-
-void marshal_VkPipelineViewportStateCreateInfo(
-    VulkanStreamGuest* vkStream,
-    const VkPipelineViewportStateCreateInfo* forMarshaling);
-
-void unmarshal_VkPipelineViewportStateCreateInfo(
-    VulkanStreamGuest* vkStream,
-    VkPipelineViewportStateCreateInfo* forUnmarshaling);
-
-void marshal_VkPipelineRasterizationStateCreateInfo(
-    VulkanStreamGuest* vkStream,
-    const VkPipelineRasterizationStateCreateInfo* forMarshaling);
-
-void unmarshal_VkPipelineRasterizationStateCreateInfo(
-    VulkanStreamGuest* vkStream,
-    VkPipelineRasterizationStateCreateInfo* forUnmarshaling);
-
-void marshal_VkPipelineMultisampleStateCreateInfo(
-    VulkanStreamGuest* vkStream,
-    const VkPipelineMultisampleStateCreateInfo* forMarshaling);
-
-void unmarshal_VkPipelineMultisampleStateCreateInfo(
-    VulkanStreamGuest* vkStream,
-    VkPipelineMultisampleStateCreateInfo* forUnmarshaling);
-
-void marshal_VkStencilOpState(
-    VulkanStreamGuest* vkStream,
-    const VkStencilOpState* forMarshaling);
-
-void unmarshal_VkStencilOpState(
-    VulkanStreamGuest* vkStream,
-    VkStencilOpState* forUnmarshaling);
-
-void marshal_VkPipelineDepthStencilStateCreateInfo(
-    VulkanStreamGuest* vkStream,
-    const VkPipelineDepthStencilStateCreateInfo* forMarshaling);
-
-void unmarshal_VkPipelineDepthStencilStateCreateInfo(
-    VulkanStreamGuest* vkStream,
-    VkPipelineDepthStencilStateCreateInfo* forUnmarshaling);
-
-void marshal_VkPipelineColorBlendAttachmentState(
-    VulkanStreamGuest* vkStream,
-    const VkPipelineColorBlendAttachmentState* forMarshaling);
-
-void unmarshal_VkPipelineColorBlendAttachmentState(
-    VulkanStreamGuest* vkStream,
-    VkPipelineColorBlendAttachmentState* forUnmarshaling);
-
-void marshal_VkPipelineColorBlendStateCreateInfo(
-    VulkanStreamGuest* vkStream,
-    const VkPipelineColorBlendStateCreateInfo* forMarshaling);
-
-void unmarshal_VkPipelineColorBlendStateCreateInfo(
-    VulkanStreamGuest* vkStream,
-    VkPipelineColorBlendStateCreateInfo* forUnmarshaling);
-
-void marshal_VkPipelineDynamicStateCreateInfo(
-    VulkanStreamGuest* vkStream,
-    const VkPipelineDynamicStateCreateInfo* forMarshaling);
-
-void unmarshal_VkPipelineDynamicStateCreateInfo(
-    VulkanStreamGuest* vkStream,
-    VkPipelineDynamicStateCreateInfo* forUnmarshaling);
-
-void marshal_VkGraphicsPipelineCreateInfo(
-    VulkanStreamGuest* vkStream,
-    const VkGraphicsPipelineCreateInfo* forMarshaling);
-
-void unmarshal_VkGraphicsPipelineCreateInfo(
-    VulkanStreamGuest* vkStream,
-    VkGraphicsPipelineCreateInfo* forUnmarshaling);
-
-#define OP_vkCreateGraphicsPipelines 20065
 void marshal_VkComputePipelineCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkComputePipelineCreateInfo* forMarshaling);
 
 void unmarshal_VkComputePipelineCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkComputePipelineCreateInfo* forUnmarshaling);
 
+void marshal_VkVertexInputBindingDescription(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkVertexInputBindingDescription* forMarshaling);
+
+void unmarshal_VkVertexInputBindingDescription(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkVertexInputBindingDescription* forUnmarshaling);
+
+void marshal_VkVertexInputAttributeDescription(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkVertexInputAttributeDescription* forMarshaling);
+
+void unmarshal_VkVertexInputAttributeDescription(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkVertexInputAttributeDescription* forUnmarshaling);
+
+void marshal_VkPipelineVertexInputStateCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineVertexInputStateCreateInfo* forMarshaling);
+
+void unmarshal_VkPipelineVertexInputStateCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPipelineVertexInputStateCreateInfo* forUnmarshaling);
+
+void marshal_VkPipelineInputAssemblyStateCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineInputAssemblyStateCreateInfo* forMarshaling);
+
+void unmarshal_VkPipelineInputAssemblyStateCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPipelineInputAssemblyStateCreateInfo* forUnmarshaling);
+
+void marshal_VkPipelineTessellationStateCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineTessellationStateCreateInfo* forMarshaling);
+
+void unmarshal_VkPipelineTessellationStateCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPipelineTessellationStateCreateInfo* forUnmarshaling);
+
+void marshal_VkViewport(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkViewport* forMarshaling);
+
+void unmarshal_VkViewport(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkViewport* forUnmarshaling);
+
+void marshal_VkPipelineViewportStateCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineViewportStateCreateInfo* forMarshaling);
+
+void unmarshal_VkPipelineViewportStateCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPipelineViewportStateCreateInfo* forUnmarshaling);
+
+void marshal_VkPipelineRasterizationStateCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineRasterizationStateCreateInfo* forMarshaling);
+
+void unmarshal_VkPipelineRasterizationStateCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPipelineRasterizationStateCreateInfo* forUnmarshaling);
+
+void marshal_VkPipelineMultisampleStateCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineMultisampleStateCreateInfo* forMarshaling);
+
+void unmarshal_VkPipelineMultisampleStateCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPipelineMultisampleStateCreateInfo* forUnmarshaling);
+
+void marshal_VkStencilOpState(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkStencilOpState* forMarshaling);
+
+void unmarshal_VkStencilOpState(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkStencilOpState* forUnmarshaling);
+
+void marshal_VkPipelineDepthStencilStateCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineDepthStencilStateCreateInfo* forMarshaling);
+
+void unmarshal_VkPipelineDepthStencilStateCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPipelineDepthStencilStateCreateInfo* forUnmarshaling);
+
+void marshal_VkPipelineColorBlendAttachmentState(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineColorBlendAttachmentState* forMarshaling);
+
+void unmarshal_VkPipelineColorBlendAttachmentState(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPipelineColorBlendAttachmentState* forUnmarshaling);
+
+void marshal_VkPipelineColorBlendStateCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineColorBlendStateCreateInfo* forMarshaling);
+
+void unmarshal_VkPipelineColorBlendStateCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPipelineColorBlendStateCreateInfo* forUnmarshaling);
+
+void marshal_VkPipelineDynamicStateCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineDynamicStateCreateInfo* forMarshaling);
+
+void unmarshal_VkPipelineDynamicStateCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPipelineDynamicStateCreateInfo* forUnmarshaling);
+
+void marshal_VkGraphicsPipelineCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkGraphicsPipelineCreateInfo* forMarshaling);
+
+void unmarshal_VkGraphicsPipelineCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkGraphicsPipelineCreateInfo* forUnmarshaling);
+
+#define OP_vkCreateGraphicsPipelines 20065
 #define OP_vkCreateComputePipelines 20066
 #define OP_vkDestroyPipeline 20067
 void marshal_VkPushConstantRange(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPushConstantRange* forMarshaling);
 
 void unmarshal_VkPushConstantRange(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPushConstantRange* forUnmarshaling);
 
 void marshal_VkPipelineLayoutCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPipelineLayoutCreateInfo* forMarshaling);
 
 void unmarshal_VkPipelineLayoutCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPipelineLayoutCreateInfo* forUnmarshaling);
 
 #define OP_vkCreatePipelineLayout 20068
 #define OP_vkDestroyPipelineLayout 20069
 void marshal_VkSamplerCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSamplerCreateInfo* forMarshaling);
 
 void unmarshal_VkSamplerCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSamplerCreateInfo* forUnmarshaling);
 
 #define OP_vkCreateSampler 20070
 #define OP_vkDestroySampler 20071
-void marshal_VkDescriptorSetLayoutBinding(
-    VulkanStreamGuest* vkStream,
-    const VkDescriptorSetLayoutBinding* forMarshaling);
-
-void unmarshal_VkDescriptorSetLayoutBinding(
-    VulkanStreamGuest* vkStream,
-    VkDescriptorSetLayoutBinding* forUnmarshaling);
-
-void marshal_VkDescriptorSetLayoutCreateInfo(
-    VulkanStreamGuest* vkStream,
-    const VkDescriptorSetLayoutCreateInfo* forMarshaling);
-
-void unmarshal_VkDescriptorSetLayoutCreateInfo(
-    VulkanStreamGuest* vkStream,
-    VkDescriptorSetLayoutCreateInfo* forUnmarshaling);
-
-#define OP_vkCreateDescriptorSetLayout 20072
-#define OP_vkDestroyDescriptorSetLayout 20073
-void marshal_VkDescriptorPoolSize(
-    VulkanStreamGuest* vkStream,
-    const VkDescriptorPoolSize* forMarshaling);
-
-void unmarshal_VkDescriptorPoolSize(
-    VulkanStreamGuest* vkStream,
-    VkDescriptorPoolSize* forUnmarshaling);
-
-void marshal_VkDescriptorPoolCreateInfo(
-    VulkanStreamGuest* vkStream,
-    const VkDescriptorPoolCreateInfo* forMarshaling);
-
-void unmarshal_VkDescriptorPoolCreateInfo(
-    VulkanStreamGuest* vkStream,
-    VkDescriptorPoolCreateInfo* forUnmarshaling);
-
-#define OP_vkCreateDescriptorPool 20074
-#define OP_vkDestroyDescriptorPool 20075
-#define OP_vkResetDescriptorPool 20076
-void marshal_VkDescriptorSetAllocateInfo(
-    VulkanStreamGuest* vkStream,
-    const VkDescriptorSetAllocateInfo* forMarshaling);
-
-void unmarshal_VkDescriptorSetAllocateInfo(
-    VulkanStreamGuest* vkStream,
-    VkDescriptorSetAllocateInfo* forUnmarshaling);
-
-#define OP_vkAllocateDescriptorSets 20077
-#define OP_vkFreeDescriptorSets 20078
-void marshal_VkDescriptorImageInfo(
-    VulkanStreamGuest* vkStream,
-    const VkDescriptorImageInfo* forMarshaling);
-
-void unmarshal_VkDescriptorImageInfo(
-    VulkanStreamGuest* vkStream,
-    VkDescriptorImageInfo* forUnmarshaling);
-
-void marshal_VkDescriptorBufferInfo(
-    VulkanStreamGuest* vkStream,
-    const VkDescriptorBufferInfo* forMarshaling);
-
-void unmarshal_VkDescriptorBufferInfo(
-    VulkanStreamGuest* vkStream,
-    VkDescriptorBufferInfo* forUnmarshaling);
-
-void marshal_VkWriteDescriptorSet(
-    VulkanStreamGuest* vkStream,
-    const VkWriteDescriptorSet* forMarshaling);
-
-void unmarshal_VkWriteDescriptorSet(
-    VulkanStreamGuest* vkStream,
-    VkWriteDescriptorSet* forUnmarshaling);
-
 void marshal_VkCopyDescriptorSet(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkCopyDescriptorSet* forMarshaling);
 
 void unmarshal_VkCopyDescriptorSet(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkCopyDescriptorSet* forUnmarshaling);
 
+void marshal_VkDescriptorBufferInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDescriptorBufferInfo* forMarshaling);
+
+void unmarshal_VkDescriptorBufferInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDescriptorBufferInfo* forUnmarshaling);
+
+void marshal_VkDescriptorImageInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDescriptorImageInfo* forMarshaling);
+
+void unmarshal_VkDescriptorImageInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDescriptorImageInfo* forUnmarshaling);
+
+void marshal_VkDescriptorPoolSize(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDescriptorPoolSize* forMarshaling);
+
+void unmarshal_VkDescriptorPoolSize(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDescriptorPoolSize* forUnmarshaling);
+
+void marshal_VkDescriptorPoolCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDescriptorPoolCreateInfo* forMarshaling);
+
+void unmarshal_VkDescriptorPoolCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDescriptorPoolCreateInfo* forUnmarshaling);
+
+void marshal_VkDescriptorSetAllocateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDescriptorSetAllocateInfo* forMarshaling);
+
+void unmarshal_VkDescriptorSetAllocateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDescriptorSetAllocateInfo* forUnmarshaling);
+
+void marshal_VkDescriptorSetLayoutBinding(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDescriptorSetLayoutBinding* forMarshaling);
+
+void unmarshal_VkDescriptorSetLayoutBinding(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDescriptorSetLayoutBinding* forUnmarshaling);
+
+void marshal_VkDescriptorSetLayoutCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDescriptorSetLayoutCreateInfo* forMarshaling);
+
+void unmarshal_VkDescriptorSetLayoutCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDescriptorSetLayoutCreateInfo* forUnmarshaling);
+
+void marshal_VkWriteDescriptorSet(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkWriteDescriptorSet* forMarshaling);
+
+void unmarshal_VkWriteDescriptorSet(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkWriteDescriptorSet* forUnmarshaling);
+
+#define OP_vkCreateDescriptorSetLayout 20072
+#define OP_vkDestroyDescriptorSetLayout 20073
+#define OP_vkCreateDescriptorPool 20074
+#define OP_vkDestroyDescriptorPool 20075
+#define OP_vkResetDescriptorPool 20076
+#define OP_vkAllocateDescriptorSets 20077
+#define OP_vkFreeDescriptorSets 20078
 #define OP_vkUpdateDescriptorSets 20079
-void marshal_VkFramebufferCreateInfo(
-    VulkanStreamGuest* vkStream,
-    const VkFramebufferCreateInfo* forMarshaling);
-
-void unmarshal_VkFramebufferCreateInfo(
-    VulkanStreamGuest* vkStream,
-    VkFramebufferCreateInfo* forUnmarshaling);
-
-#define OP_vkCreateFramebuffer 20080
-#define OP_vkDestroyFramebuffer 20081
 void marshal_VkAttachmentDescription(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkAttachmentDescription* forMarshaling);
 
 void unmarshal_VkAttachmentDescription(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkAttachmentDescription* forUnmarshaling);
 
 void marshal_VkAttachmentReference(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkAttachmentReference* forMarshaling);
 
 void unmarshal_VkAttachmentReference(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkAttachmentReference* forUnmarshaling);
 
+void marshal_VkFramebufferCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkFramebufferCreateInfo* forMarshaling);
+
+void unmarshal_VkFramebufferCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkFramebufferCreateInfo* forUnmarshaling);
+
 void marshal_VkSubpassDescription(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSubpassDescription* forMarshaling);
 
 void unmarshal_VkSubpassDescription(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSubpassDescription* forUnmarshaling);
 
 void marshal_VkSubpassDependency(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSubpassDependency* forMarshaling);
 
 void unmarshal_VkSubpassDependency(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSubpassDependency* forUnmarshaling);
 
 void marshal_VkRenderPassCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkRenderPassCreateInfo* forMarshaling);
 
 void unmarshal_VkRenderPassCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkRenderPassCreateInfo* forUnmarshaling);
 
+#define OP_vkCreateFramebuffer 20080
+#define OP_vkDestroyFramebuffer 20081
 #define OP_vkCreateRenderPass 20082
 #define OP_vkDestroyRenderPass 20083
 #define OP_vkGetRenderAreaGranularity 20084
 void marshal_VkCommandPoolCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkCommandPoolCreateInfo* forMarshaling);
 
 void unmarshal_VkCommandPoolCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkCommandPoolCreateInfo* forUnmarshaling);
 
 #define OP_vkCreateCommandPool 20085
@@ -822,33 +1074,159 @@
 #define OP_vkResetCommandPool 20087
 void marshal_VkCommandBufferAllocateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkCommandBufferAllocateInfo* forMarshaling);
 
 void unmarshal_VkCommandBufferAllocateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkCommandBufferAllocateInfo* forUnmarshaling);
 
-#define OP_vkAllocateCommandBuffers 20088
-#define OP_vkFreeCommandBuffers 20089
 void marshal_VkCommandBufferInheritanceInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkCommandBufferInheritanceInfo* forMarshaling);
 
 void unmarshal_VkCommandBufferInheritanceInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkCommandBufferInheritanceInfo* forUnmarshaling);
 
 void marshal_VkCommandBufferBeginInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkCommandBufferBeginInfo* forMarshaling);
 
 void unmarshal_VkCommandBufferBeginInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkCommandBufferBeginInfo* forUnmarshaling);
 
+#define OP_vkAllocateCommandBuffers 20088
+#define OP_vkFreeCommandBuffers 20089
 #define OP_vkBeginCommandBuffer 20090
 #define OP_vkEndCommandBuffer 20091
 #define OP_vkResetCommandBuffer 20092
+void marshal_VkBufferCopy(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBufferCopy* forMarshaling);
+
+void unmarshal_VkBufferCopy(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkBufferCopy* forUnmarshaling);
+
+void marshal_VkImageSubresourceLayers(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageSubresourceLayers* forMarshaling);
+
+void unmarshal_VkImageSubresourceLayers(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkImageSubresourceLayers* forUnmarshaling);
+
+void marshal_VkBufferImageCopy(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBufferImageCopy* forMarshaling);
+
+void unmarshal_VkBufferImageCopy(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkBufferImageCopy* forUnmarshaling);
+
+void marshal_VkClearColorValue(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkClearColorValue* forMarshaling);
+
+void unmarshal_VkClearColorValue(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkClearColorValue* forUnmarshaling);
+
+void marshal_VkClearDepthStencilValue(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkClearDepthStencilValue* forMarshaling);
+
+void unmarshal_VkClearDepthStencilValue(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkClearDepthStencilValue* forUnmarshaling);
+
+void marshal_VkClearValue(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkClearValue* forMarshaling);
+
+void unmarshal_VkClearValue(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkClearValue* forUnmarshaling);
+
+void marshal_VkClearAttachment(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkClearAttachment* forMarshaling);
+
+void unmarshal_VkClearAttachment(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkClearAttachment* forUnmarshaling);
+
+void marshal_VkClearRect(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkClearRect* forMarshaling);
+
+void unmarshal_VkClearRect(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkClearRect* forUnmarshaling);
+
+void marshal_VkImageBlit(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageBlit* forMarshaling);
+
+void unmarshal_VkImageBlit(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkImageBlit* forUnmarshaling);
+
+void marshal_VkImageCopy(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageCopy* forMarshaling);
+
+void unmarshal_VkImageCopy(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkImageCopy* forUnmarshaling);
+
+void marshal_VkImageResolve(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageResolve* forMarshaling);
+
+void unmarshal_VkImageResolve(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkImageResolve* forUnmarshaling);
+
+void marshal_VkRenderPassBeginInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRenderPassBeginInfo* forMarshaling);
+
+void unmarshal_VkRenderPassBeginInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkRenderPassBeginInfo* forUnmarshaling);
+
 #define OP_vkCmdBindPipeline 20093
 #define OP_vkCmdSetViewport 20094
 #define OP_vkCmdSetScissor 20095
@@ -868,131 +1246,19 @@
 #define OP_vkCmdDrawIndexedIndirect 20109
 #define OP_vkCmdDispatch 20110
 #define OP_vkCmdDispatchIndirect 20111
-void marshal_VkBufferCopy(
-    VulkanStreamGuest* vkStream,
-    const VkBufferCopy* forMarshaling);
-
-void unmarshal_VkBufferCopy(
-    VulkanStreamGuest* vkStream,
-    VkBufferCopy* forUnmarshaling);
-
 #define OP_vkCmdCopyBuffer 20112
-void marshal_VkImageSubresourceLayers(
-    VulkanStreamGuest* vkStream,
-    const VkImageSubresourceLayers* forMarshaling);
-
-void unmarshal_VkImageSubresourceLayers(
-    VulkanStreamGuest* vkStream,
-    VkImageSubresourceLayers* forUnmarshaling);
-
-void marshal_VkImageCopy(
-    VulkanStreamGuest* vkStream,
-    const VkImageCopy* forMarshaling);
-
-void unmarshal_VkImageCopy(
-    VulkanStreamGuest* vkStream,
-    VkImageCopy* forUnmarshaling);
-
 #define OP_vkCmdCopyImage 20113
-void marshal_VkImageBlit(
-    VulkanStreamGuest* vkStream,
-    const VkImageBlit* forMarshaling);
-
-void unmarshal_VkImageBlit(
-    VulkanStreamGuest* vkStream,
-    VkImageBlit* forUnmarshaling);
-
 #define OP_vkCmdBlitImage 20114
-void marshal_VkBufferImageCopy(
-    VulkanStreamGuest* vkStream,
-    const VkBufferImageCopy* forMarshaling);
-
-void unmarshal_VkBufferImageCopy(
-    VulkanStreamGuest* vkStream,
-    VkBufferImageCopy* forUnmarshaling);
-
 #define OP_vkCmdCopyBufferToImage 20115
 #define OP_vkCmdCopyImageToBuffer 20116
 #define OP_vkCmdUpdateBuffer 20117
 #define OP_vkCmdFillBuffer 20118
-void marshal_VkClearColorValue(
-    VulkanStreamGuest* vkStream,
-    const VkClearColorValue* forMarshaling);
-
-void unmarshal_VkClearColorValue(
-    VulkanStreamGuest* vkStream,
-    VkClearColorValue* forUnmarshaling);
-
 #define OP_vkCmdClearColorImage 20119
-void marshal_VkClearDepthStencilValue(
-    VulkanStreamGuest* vkStream,
-    const VkClearDepthStencilValue* forMarshaling);
-
-void unmarshal_VkClearDepthStencilValue(
-    VulkanStreamGuest* vkStream,
-    VkClearDepthStencilValue* forUnmarshaling);
-
 #define OP_vkCmdClearDepthStencilImage 20120
-void marshal_VkClearValue(
-    VulkanStreamGuest* vkStream,
-    const VkClearValue* forMarshaling);
-
-void unmarshal_VkClearValue(
-    VulkanStreamGuest* vkStream,
-    VkClearValue* forUnmarshaling);
-
-void marshal_VkClearAttachment(
-    VulkanStreamGuest* vkStream,
-    const VkClearAttachment* forMarshaling);
-
-void unmarshal_VkClearAttachment(
-    VulkanStreamGuest* vkStream,
-    VkClearAttachment* forUnmarshaling);
-
-void marshal_VkClearRect(
-    VulkanStreamGuest* vkStream,
-    const VkClearRect* forMarshaling);
-
-void unmarshal_VkClearRect(
-    VulkanStreamGuest* vkStream,
-    VkClearRect* forUnmarshaling);
-
 #define OP_vkCmdClearAttachments 20121
-void marshal_VkImageResolve(
-    VulkanStreamGuest* vkStream,
-    const VkImageResolve* forMarshaling);
-
-void unmarshal_VkImageResolve(
-    VulkanStreamGuest* vkStream,
-    VkImageResolve* forUnmarshaling);
-
 #define OP_vkCmdResolveImage 20122
 #define OP_vkCmdSetEvent 20123
 #define OP_vkCmdResetEvent 20124
-void marshal_VkMemoryBarrier(
-    VulkanStreamGuest* vkStream,
-    const VkMemoryBarrier* forMarshaling);
-
-void unmarshal_VkMemoryBarrier(
-    VulkanStreamGuest* vkStream,
-    VkMemoryBarrier* forUnmarshaling);
-
-void marshal_VkBufferMemoryBarrier(
-    VulkanStreamGuest* vkStream,
-    const VkBufferMemoryBarrier* forMarshaling);
-
-void unmarshal_VkBufferMemoryBarrier(
-    VulkanStreamGuest* vkStream,
-    VkBufferMemoryBarrier* forUnmarshaling);
-
-void marshal_VkImageMemoryBarrier(
-    VulkanStreamGuest* vkStream,
-    const VkImageMemoryBarrier* forMarshaling);
-
-void unmarshal_VkImageMemoryBarrier(
-    VulkanStreamGuest* vkStream,
-    VkImageMemoryBarrier* forUnmarshaling);
-
 #define OP_vkCmdWaitEvents 20125
 #define OP_vkCmdPipelineBarrier 20126
 #define OP_vkCmdBeginQuery 20127
@@ -1001,149 +1267,123 @@
 #define OP_vkCmdWriteTimestamp 20130
 #define OP_vkCmdCopyQueryPoolResults 20131
 #define OP_vkCmdPushConstants 20132
-void marshal_VkRenderPassBeginInfo(
-    VulkanStreamGuest* vkStream,
-    const VkRenderPassBeginInfo* forMarshaling);
-
-void unmarshal_VkRenderPassBeginInfo(
-    VulkanStreamGuest* vkStream,
-    VkRenderPassBeginInfo* forUnmarshaling);
-
 #define OP_vkCmdBeginRenderPass 20133
 #define OP_vkCmdNextSubpass 20134
 #define OP_vkCmdEndRenderPass 20135
 #define OP_vkCmdExecuteCommands 20136
-void marshal_VkDispatchIndirectCommand(
-    VulkanStreamGuest* vkStream,
-    const VkDispatchIndirectCommand* forMarshaling);
-
-void unmarshal_VkDispatchIndirectCommand(
-    VulkanStreamGuest* vkStream,
-    VkDispatchIndirectCommand* forUnmarshaling);
-
-void marshal_VkDrawIndexedIndirectCommand(
-    VulkanStreamGuest* vkStream,
-    const VkDrawIndexedIndirectCommand* forMarshaling);
-
-void unmarshal_VkDrawIndexedIndirectCommand(
-    VulkanStreamGuest* vkStream,
-    VkDrawIndexedIndirectCommand* forUnmarshaling);
-
-void marshal_VkDrawIndirectCommand(
-    VulkanStreamGuest* vkStream,
-    const VkDrawIndirectCommand* forMarshaling);
-
-void unmarshal_VkDrawIndirectCommand(
-    VulkanStreamGuest* vkStream,
-    VkDrawIndirectCommand* forUnmarshaling);
-
-void marshal_VkBaseOutStructure(
-    VulkanStreamGuest* vkStream,
-    const VkBaseOutStructure* forMarshaling);
-
-void unmarshal_VkBaseOutStructure(
-    VulkanStreamGuest* vkStream,
-    VkBaseOutStructure* forUnmarshaling);
-
-void marshal_VkBaseInStructure(
-    VulkanStreamGuest* vkStream,
-    const VkBaseInStructure* forMarshaling);
-
-void unmarshal_VkBaseInStructure(
-    VulkanStreamGuest* vkStream,
-    VkBaseInStructure* forUnmarshaling);
-
 #endif
 #ifdef VK_VERSION_1_1
 #define OP_vkEnumerateInstanceVersion 20137
 void marshal_VkPhysicalDeviceSubgroupProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceSubgroupProperties* forMarshaling);
 
 void unmarshal_VkPhysicalDeviceSubgroupProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceSubgroupProperties* forUnmarshaling);
 
 void marshal_VkBindBufferMemoryInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkBindBufferMemoryInfo* forMarshaling);
 
 void unmarshal_VkBindBufferMemoryInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkBindBufferMemoryInfo* forUnmarshaling);
 
 void marshal_VkBindImageMemoryInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkBindImageMemoryInfo* forMarshaling);
 
 void unmarshal_VkBindImageMemoryInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkBindImageMemoryInfo* forUnmarshaling);
 
 #define OP_vkBindBufferMemory2 20138
 #define OP_vkBindImageMemory2 20139
 void marshal_VkPhysicalDevice16BitStorageFeatures(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDevice16BitStorageFeatures* forMarshaling);
 
 void unmarshal_VkPhysicalDevice16BitStorageFeatures(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDevice16BitStorageFeatures* forUnmarshaling);
 
 void marshal_VkMemoryDedicatedRequirements(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkMemoryDedicatedRequirements* forMarshaling);
 
 void unmarshal_VkMemoryDedicatedRequirements(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkMemoryDedicatedRequirements* forUnmarshaling);
 
 void marshal_VkMemoryDedicatedAllocateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkMemoryDedicatedAllocateInfo* forMarshaling);
 
 void unmarshal_VkMemoryDedicatedAllocateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkMemoryDedicatedAllocateInfo* forUnmarshaling);
 
 void marshal_VkMemoryAllocateFlagsInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkMemoryAllocateFlagsInfo* forMarshaling);
 
 void unmarshal_VkMemoryAllocateFlagsInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkMemoryAllocateFlagsInfo* forUnmarshaling);
 
 void marshal_VkDeviceGroupRenderPassBeginInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDeviceGroupRenderPassBeginInfo* forMarshaling);
 
 void unmarshal_VkDeviceGroupRenderPassBeginInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDeviceGroupRenderPassBeginInfo* forUnmarshaling);
 
 void marshal_VkDeviceGroupCommandBufferBeginInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDeviceGroupCommandBufferBeginInfo* forMarshaling);
 
 void unmarshal_VkDeviceGroupCommandBufferBeginInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDeviceGroupCommandBufferBeginInfo* forUnmarshaling);
 
 void marshal_VkDeviceGroupSubmitInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDeviceGroupSubmitInfo* forMarshaling);
 
 void unmarshal_VkDeviceGroupSubmitInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDeviceGroupSubmitInfo* forUnmarshaling);
 
 void marshal_VkDeviceGroupBindSparseInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDeviceGroupBindSparseInfo* forMarshaling);
 
 void unmarshal_VkDeviceGroupBindSparseInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDeviceGroupBindSparseInfo* forUnmarshaling);
 
 #define OP_vkGetDeviceGroupPeerMemoryFeatures 20140
@@ -1151,75 +1391,93 @@
 #define OP_vkCmdDispatchBase 20142
 void marshal_VkBindBufferMemoryDeviceGroupInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkBindBufferMemoryDeviceGroupInfo* forMarshaling);
 
 void unmarshal_VkBindBufferMemoryDeviceGroupInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkBindBufferMemoryDeviceGroupInfo* forUnmarshaling);
 
 void marshal_VkBindImageMemoryDeviceGroupInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkBindImageMemoryDeviceGroupInfo* forMarshaling);
 
 void unmarshal_VkBindImageMemoryDeviceGroupInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkBindImageMemoryDeviceGroupInfo* forUnmarshaling);
 
 void marshal_VkPhysicalDeviceGroupProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceGroupProperties* forMarshaling);
 
 void unmarshal_VkPhysicalDeviceGroupProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceGroupProperties* forUnmarshaling);
 
 void marshal_VkDeviceGroupDeviceCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDeviceGroupDeviceCreateInfo* forMarshaling);
 
 void unmarshal_VkDeviceGroupDeviceCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDeviceGroupDeviceCreateInfo* forUnmarshaling);
 
 #define OP_vkEnumeratePhysicalDeviceGroups 20143
 void marshal_VkBufferMemoryRequirementsInfo2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkBufferMemoryRequirementsInfo2* forMarshaling);
 
 void unmarshal_VkBufferMemoryRequirementsInfo2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkBufferMemoryRequirementsInfo2* forUnmarshaling);
 
 void marshal_VkImageMemoryRequirementsInfo2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkImageMemoryRequirementsInfo2* forMarshaling);
 
 void unmarshal_VkImageMemoryRequirementsInfo2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkImageMemoryRequirementsInfo2* forUnmarshaling);
 
 void marshal_VkImageSparseMemoryRequirementsInfo2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkImageSparseMemoryRequirementsInfo2* forMarshaling);
 
 void unmarshal_VkImageSparseMemoryRequirementsInfo2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkImageSparseMemoryRequirementsInfo2* forUnmarshaling);
 
 void marshal_VkMemoryRequirements2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkMemoryRequirements2* forMarshaling);
 
 void unmarshal_VkMemoryRequirements2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkMemoryRequirements2* forUnmarshaling);
 
 void marshal_VkSparseImageMemoryRequirements2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSparseImageMemoryRequirements2* forMarshaling);
 
 void unmarshal_VkSparseImageMemoryRequirements2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSparseImageMemoryRequirements2* forUnmarshaling);
 
 #define OP_vkGetImageMemoryRequirements2 20144
@@ -1227,74 +1485,92 @@
 #define OP_vkGetImageSparseMemoryRequirements2 20146
 void marshal_VkPhysicalDeviceFeatures2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceFeatures2* forMarshaling);
 
 void unmarshal_VkPhysicalDeviceFeatures2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceFeatures2* forUnmarshaling);
 
 void marshal_VkPhysicalDeviceProperties2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceProperties2* forMarshaling);
 
 void unmarshal_VkPhysicalDeviceProperties2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceProperties2* forUnmarshaling);
 
 void marshal_VkFormatProperties2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkFormatProperties2* forMarshaling);
 
 void unmarshal_VkFormatProperties2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkFormatProperties2* forUnmarshaling);
 
 void marshal_VkImageFormatProperties2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkImageFormatProperties2* forMarshaling);
 
 void unmarshal_VkImageFormatProperties2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkImageFormatProperties2* forUnmarshaling);
 
 void marshal_VkPhysicalDeviceImageFormatInfo2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceImageFormatInfo2* forMarshaling);
 
 void unmarshal_VkPhysicalDeviceImageFormatInfo2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceImageFormatInfo2* forUnmarshaling);
 
 void marshal_VkQueueFamilyProperties2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkQueueFamilyProperties2* forMarshaling);
 
 void unmarshal_VkQueueFamilyProperties2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkQueueFamilyProperties2* forUnmarshaling);
 
 void marshal_VkPhysicalDeviceMemoryProperties2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceMemoryProperties2* forMarshaling);
 
 void unmarshal_VkPhysicalDeviceMemoryProperties2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceMemoryProperties2* forUnmarshaling);
 
 void marshal_VkSparseImageFormatProperties2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSparseImageFormatProperties2* forMarshaling);
 
 void unmarshal_VkSparseImageFormatProperties2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSparseImageFormatProperties2* forUnmarshaling);
 
 void marshal_VkPhysicalDeviceSparseImageFormatInfo2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceSparseImageFormatInfo2* forMarshaling);
 
 void unmarshal_VkPhysicalDeviceSparseImageFormatInfo2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceSparseImageFormatInfo2* forUnmarshaling);
 
 #define OP_vkGetPhysicalDeviceFeatures2 20147
@@ -1307,173 +1583,219 @@
 #define OP_vkTrimCommandPool 20154
 void marshal_VkPhysicalDevicePointClippingProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDevicePointClippingProperties* forMarshaling);
 
 void unmarshal_VkPhysicalDevicePointClippingProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDevicePointClippingProperties* forUnmarshaling);
 
 void marshal_VkInputAttachmentAspectReference(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkInputAttachmentAspectReference* forMarshaling);
 
 void unmarshal_VkInputAttachmentAspectReference(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkInputAttachmentAspectReference* forUnmarshaling);
 
 void marshal_VkRenderPassInputAttachmentAspectCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkRenderPassInputAttachmentAspectCreateInfo* forMarshaling);
 
 void unmarshal_VkRenderPassInputAttachmentAspectCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkRenderPassInputAttachmentAspectCreateInfo* forUnmarshaling);
 
 void marshal_VkImageViewUsageCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkImageViewUsageCreateInfo* forMarshaling);
 
 void unmarshal_VkImageViewUsageCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkImageViewUsageCreateInfo* forUnmarshaling);
 
 void marshal_VkPipelineTessellationDomainOriginStateCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPipelineTessellationDomainOriginStateCreateInfo* forMarshaling);
 
 void unmarshal_VkPipelineTessellationDomainOriginStateCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPipelineTessellationDomainOriginStateCreateInfo* forUnmarshaling);
 
 void marshal_VkRenderPassMultiviewCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkRenderPassMultiviewCreateInfo* forMarshaling);
 
 void unmarshal_VkRenderPassMultiviewCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkRenderPassMultiviewCreateInfo* forUnmarshaling);
 
 void marshal_VkPhysicalDeviceMultiviewFeatures(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceMultiviewFeatures* forMarshaling);
 
 void unmarshal_VkPhysicalDeviceMultiviewFeatures(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceMultiviewFeatures* forUnmarshaling);
 
 void marshal_VkPhysicalDeviceMultiviewProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceMultiviewProperties* forMarshaling);
 
 void unmarshal_VkPhysicalDeviceMultiviewProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceMultiviewProperties* forUnmarshaling);
 
-void marshal_VkPhysicalDeviceVariablePointerFeatures(
+void marshal_VkPhysicalDeviceVariablePointersFeatures(
     VulkanStreamGuest* vkStream,
-    const VkPhysicalDeviceVariablePointerFeatures* forMarshaling);
+    VkStructureType rootType,
+    const VkPhysicalDeviceVariablePointersFeatures* forMarshaling);
 
-void unmarshal_VkPhysicalDeviceVariablePointerFeatures(
+void unmarshal_VkPhysicalDeviceVariablePointersFeatures(
     VulkanStreamGuest* vkStream,
-    VkPhysicalDeviceVariablePointerFeatures* forUnmarshaling);
+    VkStructureType rootType,
+    VkPhysicalDeviceVariablePointersFeatures* forUnmarshaling);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkPhysicalDeviceVariablePointersFeatures, marshal_VkPhysicalDeviceVariablePointerFeatures);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkPhysicalDeviceVariablePointersFeatures, unmarshal_VkPhysicalDeviceVariablePointerFeatures);
 
 void marshal_VkPhysicalDeviceProtectedMemoryFeatures(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceProtectedMemoryFeatures* forMarshaling);
 
 void unmarshal_VkPhysicalDeviceProtectedMemoryFeatures(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceProtectedMemoryFeatures* forUnmarshaling);
 
 void marshal_VkPhysicalDeviceProtectedMemoryProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceProtectedMemoryProperties* forMarshaling);
 
 void unmarshal_VkPhysicalDeviceProtectedMemoryProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceProtectedMemoryProperties* forUnmarshaling);
 
 void marshal_VkDeviceQueueInfo2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDeviceQueueInfo2* forMarshaling);
 
 void unmarshal_VkDeviceQueueInfo2(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDeviceQueueInfo2* forUnmarshaling);
 
 void marshal_VkProtectedSubmitInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkProtectedSubmitInfo* forMarshaling);
 
 void unmarshal_VkProtectedSubmitInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkProtectedSubmitInfo* forUnmarshaling);
 
 #define OP_vkGetDeviceQueue2 20155
 void marshal_VkSamplerYcbcrConversionCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSamplerYcbcrConversionCreateInfo* forMarshaling);
 
 void unmarshal_VkSamplerYcbcrConversionCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSamplerYcbcrConversionCreateInfo* forUnmarshaling);
 
 void marshal_VkSamplerYcbcrConversionInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSamplerYcbcrConversionInfo* forMarshaling);
 
 void unmarshal_VkSamplerYcbcrConversionInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSamplerYcbcrConversionInfo* forUnmarshaling);
 
 void marshal_VkBindImagePlaneMemoryInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkBindImagePlaneMemoryInfo* forMarshaling);
 
 void unmarshal_VkBindImagePlaneMemoryInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkBindImagePlaneMemoryInfo* forUnmarshaling);
 
 void marshal_VkImagePlaneMemoryRequirementsInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkImagePlaneMemoryRequirementsInfo* forMarshaling);
 
 void unmarshal_VkImagePlaneMemoryRequirementsInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkImagePlaneMemoryRequirementsInfo* forUnmarshaling);
 
 void marshal_VkPhysicalDeviceSamplerYcbcrConversionFeatures(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceSamplerYcbcrConversionFeatures* forMarshaling);
 
 void unmarshal_VkPhysicalDeviceSamplerYcbcrConversionFeatures(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceSamplerYcbcrConversionFeatures* forUnmarshaling);
 
 void marshal_VkSamplerYcbcrConversionImageFormatProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSamplerYcbcrConversionImageFormatProperties* forMarshaling);
 
 void unmarshal_VkSamplerYcbcrConversionImageFormatProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSamplerYcbcrConversionImageFormatProperties* forUnmarshaling);
 
 #define OP_vkCreateSamplerYcbcrConversion 20156
 #define OP_vkDestroySamplerYcbcrConversion 20157
 void marshal_VkDescriptorUpdateTemplateEntry(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDescriptorUpdateTemplateEntry* forMarshaling);
 
 void unmarshal_VkDescriptorUpdateTemplateEntry(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDescriptorUpdateTemplateEntry* forUnmarshaling);
 
 void marshal_VkDescriptorUpdateTemplateCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDescriptorUpdateTemplateCreateInfo* forMarshaling);
 
 void unmarshal_VkDescriptorUpdateTemplateCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDescriptorUpdateTemplateCreateInfo* forUnmarshaling);
 
 #define OP_vkCreateDescriptorUpdateTemplate 20158
@@ -1481,244 +1803,829 @@
 #define OP_vkUpdateDescriptorSetWithTemplate 20160
 void marshal_VkExternalMemoryProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkExternalMemoryProperties* forMarshaling);
 
 void unmarshal_VkExternalMemoryProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkExternalMemoryProperties* forUnmarshaling);
 
 void marshal_VkPhysicalDeviceExternalImageFormatInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceExternalImageFormatInfo* forMarshaling);
 
 void unmarshal_VkPhysicalDeviceExternalImageFormatInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceExternalImageFormatInfo* forUnmarshaling);
 
 void marshal_VkExternalImageFormatProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkExternalImageFormatProperties* forMarshaling);
 
 void unmarshal_VkExternalImageFormatProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkExternalImageFormatProperties* forUnmarshaling);
 
 void marshal_VkPhysicalDeviceExternalBufferInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceExternalBufferInfo* forMarshaling);
 
 void unmarshal_VkPhysicalDeviceExternalBufferInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceExternalBufferInfo* forUnmarshaling);
 
 void marshal_VkExternalBufferProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkExternalBufferProperties* forMarshaling);
 
 void unmarshal_VkExternalBufferProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkExternalBufferProperties* forUnmarshaling);
 
 void marshal_VkPhysicalDeviceIDProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceIDProperties* forMarshaling);
 
 void unmarshal_VkPhysicalDeviceIDProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceIDProperties* forUnmarshaling);
 
 #define OP_vkGetPhysicalDeviceExternalBufferProperties 20161
 void marshal_VkExternalMemoryImageCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkExternalMemoryImageCreateInfo* forMarshaling);
 
 void unmarshal_VkExternalMemoryImageCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkExternalMemoryImageCreateInfo* forUnmarshaling);
 
 void marshal_VkExternalMemoryBufferCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkExternalMemoryBufferCreateInfo* forMarshaling);
 
 void unmarshal_VkExternalMemoryBufferCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkExternalMemoryBufferCreateInfo* forUnmarshaling);
 
 void marshal_VkExportMemoryAllocateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkExportMemoryAllocateInfo* forMarshaling);
 
 void unmarshal_VkExportMemoryAllocateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkExportMemoryAllocateInfo* forUnmarshaling);
 
 void marshal_VkPhysicalDeviceExternalFenceInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceExternalFenceInfo* forMarshaling);
 
 void unmarshal_VkPhysicalDeviceExternalFenceInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceExternalFenceInfo* forUnmarshaling);
 
 void marshal_VkExternalFenceProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkExternalFenceProperties* forMarshaling);
 
 void unmarshal_VkExternalFenceProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkExternalFenceProperties* forUnmarshaling);
 
 #define OP_vkGetPhysicalDeviceExternalFenceProperties 20162
 void marshal_VkExportFenceCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkExportFenceCreateInfo* forMarshaling);
 
 void unmarshal_VkExportFenceCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkExportFenceCreateInfo* forUnmarshaling);
 
 void marshal_VkExportSemaphoreCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkExportSemaphoreCreateInfo* forMarshaling);
 
 void unmarshal_VkExportSemaphoreCreateInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkExportSemaphoreCreateInfo* forUnmarshaling);
 
 void marshal_VkPhysicalDeviceExternalSemaphoreInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceExternalSemaphoreInfo* forMarshaling);
 
 void unmarshal_VkPhysicalDeviceExternalSemaphoreInfo(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceExternalSemaphoreInfo* forUnmarshaling);
 
 void marshal_VkExternalSemaphoreProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkExternalSemaphoreProperties* forMarshaling);
 
 void unmarshal_VkExternalSemaphoreProperties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkExternalSemaphoreProperties* forUnmarshaling);
 
 #define OP_vkGetPhysicalDeviceExternalSemaphoreProperties 20163
 void marshal_VkPhysicalDeviceMaintenance3Properties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceMaintenance3Properties* forMarshaling);
 
 void unmarshal_VkPhysicalDeviceMaintenance3Properties(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceMaintenance3Properties* forUnmarshaling);
 
 void marshal_VkDescriptorSetLayoutSupport(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDescriptorSetLayoutSupport* forMarshaling);
 
 void unmarshal_VkDescriptorSetLayoutSupport(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDescriptorSetLayoutSupport* forUnmarshaling);
 
 #define OP_vkGetDescriptorSetLayoutSupport 20164
-void marshal_VkPhysicalDeviceShaderDrawParameterFeatures(
+void marshal_VkPhysicalDeviceShaderDrawParametersFeatures(
     VulkanStreamGuest* vkStream,
-    const VkPhysicalDeviceShaderDrawParameterFeatures* forMarshaling);
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderDrawParametersFeatures* forMarshaling);
 
-void unmarshal_VkPhysicalDeviceShaderDrawParameterFeatures(
+void unmarshal_VkPhysicalDeviceShaderDrawParametersFeatures(
     VulkanStreamGuest* vkStream,
-    VkPhysicalDeviceShaderDrawParameterFeatures* forUnmarshaling);
+    VkStructureType rootType,
+    VkPhysicalDeviceShaderDrawParametersFeatures* forUnmarshaling);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkPhysicalDeviceShaderDrawParametersFeatures, marshal_VkPhysicalDeviceShaderDrawParameterFeatures);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkPhysicalDeviceShaderDrawParametersFeatures, unmarshal_VkPhysicalDeviceShaderDrawParameterFeatures);
 
 #endif
+#ifdef VK_VERSION_1_2
+void marshal_VkPhysicalDeviceVulkan11Features(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVulkan11Features* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceVulkan11Features(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceVulkan11Features* forUnmarshaling);
+
+void marshal_VkPhysicalDeviceVulkan11Properties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVulkan11Properties* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceVulkan11Properties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceVulkan11Properties* forUnmarshaling);
+
+void marshal_VkPhysicalDeviceVulkan12Features(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVulkan12Features* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceVulkan12Features(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceVulkan12Features* forUnmarshaling);
+
+void marshal_VkConformanceVersion(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkConformanceVersion* forMarshaling);
+
+void unmarshal_VkConformanceVersion(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkConformanceVersion* forUnmarshaling);
+
+void marshal_VkPhysicalDeviceVulkan12Properties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVulkan12Properties* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceVulkan12Properties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceVulkan12Properties* forUnmarshaling);
+
+void marshal_VkImageFormatListCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageFormatListCreateInfo* forMarshaling);
+
+void unmarshal_VkImageFormatListCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkImageFormatListCreateInfo* forUnmarshaling);
+
+#define OP_vkCmdDrawIndirectCount 282774587
+#define OP_vkCmdDrawIndexedIndirectCount 245204359
+void marshal_VkAttachmentDescription2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAttachmentDescription2* forMarshaling);
+
+void unmarshal_VkAttachmentDescription2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkAttachmentDescription2* forUnmarshaling);
+
+void marshal_VkAttachmentReference2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAttachmentReference2* forMarshaling);
+
+void unmarshal_VkAttachmentReference2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkAttachmentReference2* forUnmarshaling);
+
+void marshal_VkSubpassDescription2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSubpassDescription2* forMarshaling);
+
+void unmarshal_VkSubpassDescription2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkSubpassDescription2* forUnmarshaling);
+
+void marshal_VkSubpassDependency2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSubpassDependency2* forMarshaling);
+
+void unmarshal_VkSubpassDependency2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkSubpassDependency2* forUnmarshaling);
+
+void marshal_VkRenderPassCreateInfo2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRenderPassCreateInfo2* forMarshaling);
+
+void unmarshal_VkRenderPassCreateInfo2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkRenderPassCreateInfo2* forUnmarshaling);
+
+void marshal_VkSubpassBeginInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSubpassBeginInfo* forMarshaling);
+
+void unmarshal_VkSubpassBeginInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkSubpassBeginInfo* forUnmarshaling);
+
+void marshal_VkSubpassEndInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSubpassEndInfo* forMarshaling);
+
+void unmarshal_VkSubpassEndInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkSubpassEndInfo* forUnmarshaling);
+
+#define OP_vkCreateRenderPass2 279590827
+#define OP_vkCmdBeginRenderPass2 235222847
+#define OP_vkCmdNextSubpass2 244873750
+#define OP_vkCmdEndRenderPass2 221297834
+void marshal_VkPhysicalDevice8BitStorageFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDevice8BitStorageFeatures* forMarshaling);
+
+void unmarshal_VkPhysicalDevice8BitStorageFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDevice8BitStorageFeatures* forUnmarshaling);
+
+void marshal_VkPhysicalDeviceDriverProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDriverProperties* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceDriverProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceDriverProperties* forUnmarshaling);
+
+void marshal_VkPhysicalDeviceShaderAtomicInt64Features(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderAtomicInt64Features* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceShaderAtomicInt64Features(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceShaderAtomicInt64Features* forUnmarshaling);
+
+void marshal_VkPhysicalDeviceShaderFloat16Int8Features(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderFloat16Int8Features* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceShaderFloat16Int8Features(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceShaderFloat16Int8Features* forUnmarshaling);
+
+void marshal_VkPhysicalDeviceFloatControlsProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFloatControlsProperties* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceFloatControlsProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceFloatControlsProperties* forUnmarshaling);
+
+void marshal_VkDescriptorSetLayoutBindingFlagsCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDescriptorSetLayoutBindingFlagsCreateInfo* forMarshaling);
+
+void unmarshal_VkDescriptorSetLayoutBindingFlagsCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDescriptorSetLayoutBindingFlagsCreateInfo* forUnmarshaling);
+
+void marshal_VkPhysicalDeviceDescriptorIndexingFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDescriptorIndexingFeatures* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceDescriptorIndexingFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceDescriptorIndexingFeatures* forUnmarshaling);
+
+void marshal_VkPhysicalDeviceDescriptorIndexingProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDescriptorIndexingProperties* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceDescriptorIndexingProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceDescriptorIndexingProperties* forUnmarshaling);
+
+void marshal_VkDescriptorSetVariableDescriptorCountAllocateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDescriptorSetVariableDescriptorCountAllocateInfo* forMarshaling);
+
+void unmarshal_VkDescriptorSetVariableDescriptorCountAllocateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDescriptorSetVariableDescriptorCountAllocateInfo* forUnmarshaling);
+
+void marshal_VkDescriptorSetVariableDescriptorCountLayoutSupport(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDescriptorSetVariableDescriptorCountLayoutSupport* forMarshaling);
+
+void unmarshal_VkDescriptorSetVariableDescriptorCountLayoutSupport(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDescriptorSetVariableDescriptorCountLayoutSupport* forUnmarshaling);
+
+void marshal_VkSubpassDescriptionDepthStencilResolve(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSubpassDescriptionDepthStencilResolve* forMarshaling);
+
+void unmarshal_VkSubpassDescriptionDepthStencilResolve(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkSubpassDescriptionDepthStencilResolve* forUnmarshaling);
+
+void marshal_VkPhysicalDeviceDepthStencilResolveProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDepthStencilResolveProperties* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceDepthStencilResolveProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceDepthStencilResolveProperties* forUnmarshaling);
+
+void marshal_VkPhysicalDeviceScalarBlockLayoutFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceScalarBlockLayoutFeatures* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceScalarBlockLayoutFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceScalarBlockLayoutFeatures* forUnmarshaling);
+
+void marshal_VkImageStencilUsageCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageStencilUsageCreateInfo* forMarshaling);
+
+void unmarshal_VkImageStencilUsageCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkImageStencilUsageCreateInfo* forUnmarshaling);
+
+void marshal_VkSamplerReductionModeCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSamplerReductionModeCreateInfo* forMarshaling);
+
+void unmarshal_VkSamplerReductionModeCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkSamplerReductionModeCreateInfo* forUnmarshaling);
+
+void marshal_VkPhysicalDeviceSamplerFilterMinmaxProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSamplerFilterMinmaxProperties* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceSamplerFilterMinmaxProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceSamplerFilterMinmaxProperties* forUnmarshaling);
+
+void marshal_VkPhysicalDeviceVulkanMemoryModelFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVulkanMemoryModelFeatures* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceVulkanMemoryModelFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceVulkanMemoryModelFeatures* forUnmarshaling);
+
+void marshal_VkPhysicalDeviceImagelessFramebufferFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceImagelessFramebufferFeatures* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceImagelessFramebufferFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceImagelessFramebufferFeatures* forUnmarshaling);
+
+void marshal_VkFramebufferAttachmentImageInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkFramebufferAttachmentImageInfo* forMarshaling);
+
+void unmarshal_VkFramebufferAttachmentImageInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkFramebufferAttachmentImageInfo* forUnmarshaling);
+
+void marshal_VkFramebufferAttachmentsCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkFramebufferAttachmentsCreateInfo* forMarshaling);
+
+void unmarshal_VkFramebufferAttachmentsCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkFramebufferAttachmentsCreateInfo* forUnmarshaling);
+
+void marshal_VkRenderPassAttachmentBeginInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRenderPassAttachmentBeginInfo* forMarshaling);
+
+void unmarshal_VkRenderPassAttachmentBeginInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkRenderPassAttachmentBeginInfo* forUnmarshaling);
+
+void marshal_VkPhysicalDeviceUniformBufferStandardLayoutFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceUniformBufferStandardLayoutFeatures* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceUniformBufferStandardLayoutFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceUniformBufferStandardLayoutFeatures* forUnmarshaling);
+
+void marshal_VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures* forUnmarshaling);
+
+void marshal_VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures* forUnmarshaling);
+
+void marshal_VkAttachmentReferenceStencilLayout(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAttachmentReferenceStencilLayout* forMarshaling);
+
+void unmarshal_VkAttachmentReferenceStencilLayout(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkAttachmentReferenceStencilLayout* forUnmarshaling);
+
+void marshal_VkAttachmentDescriptionStencilLayout(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAttachmentDescriptionStencilLayout* forMarshaling);
+
+void unmarshal_VkAttachmentDescriptionStencilLayout(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkAttachmentDescriptionStencilLayout* forUnmarshaling);
+
+void marshal_VkPhysicalDeviceHostQueryResetFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceHostQueryResetFeatures* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceHostQueryResetFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceHostQueryResetFeatures* forUnmarshaling);
+
+#define OP_vkResetQueryPool 252097672
+void marshal_VkPhysicalDeviceTimelineSemaphoreFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTimelineSemaphoreFeatures* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceTimelineSemaphoreFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceTimelineSemaphoreFeatures* forUnmarshaling);
+
+void marshal_VkPhysicalDeviceTimelineSemaphoreProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTimelineSemaphoreProperties* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceTimelineSemaphoreProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceTimelineSemaphoreProperties* forUnmarshaling);
+
+void marshal_VkSemaphoreTypeCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSemaphoreTypeCreateInfo* forMarshaling);
+
+void unmarshal_VkSemaphoreTypeCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkSemaphoreTypeCreateInfo* forUnmarshaling);
+
+void marshal_VkTimelineSemaphoreSubmitInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkTimelineSemaphoreSubmitInfo* forMarshaling);
+
+void unmarshal_VkTimelineSemaphoreSubmitInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkTimelineSemaphoreSubmitInfo* forUnmarshaling);
+
+void marshal_VkSemaphoreWaitInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSemaphoreWaitInfo* forMarshaling);
+
+void unmarshal_VkSemaphoreWaitInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkSemaphoreWaitInfo* forUnmarshaling);
+
+void marshal_VkSemaphoreSignalInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSemaphoreSignalInfo* forMarshaling);
+
+void unmarshal_VkSemaphoreSignalInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkSemaphoreSignalInfo* forUnmarshaling);
+
+#define OP_vkGetSemaphoreCounterValue 267066974
+#define OP_vkWaitSemaphores 224777382
+#define OP_vkSignalSemaphore 271024127
+void marshal_VkPhysicalDeviceBufferDeviceAddressFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceBufferDeviceAddressFeatures* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceBufferDeviceAddressFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceBufferDeviceAddressFeatures* forUnmarshaling);
+
+void marshal_VkBufferDeviceAddressInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBufferDeviceAddressInfo* forMarshaling);
+
+void unmarshal_VkBufferDeviceAddressInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkBufferDeviceAddressInfo* forUnmarshaling);
+
+void marshal_VkBufferOpaqueCaptureAddressCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBufferOpaqueCaptureAddressCreateInfo* forMarshaling);
+
+void unmarshal_VkBufferOpaqueCaptureAddressCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkBufferOpaqueCaptureAddressCreateInfo* forUnmarshaling);
+
+void marshal_VkMemoryOpaqueCaptureAddressAllocateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMemoryOpaqueCaptureAddressAllocateInfo* forMarshaling);
+
+void unmarshal_VkMemoryOpaqueCaptureAddressAllocateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkMemoryOpaqueCaptureAddressAllocateInfo* forUnmarshaling);
+
+void marshal_VkDeviceMemoryOpaqueCaptureAddressInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceMemoryOpaqueCaptureAddressInfo* forMarshaling);
+
+void unmarshal_VkDeviceMemoryOpaqueCaptureAddressInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDeviceMemoryOpaqueCaptureAddressInfo* forUnmarshaling);
+
+#define OP_vkGetBufferDeviceAddress 222632266
+#define OP_vkGetBufferOpaqueCaptureAddress 230188231
+#define OP_vkGetDeviceMemoryOpaqueCaptureAddress 260620079
+#endif
 #ifdef VK_KHR_surface
-#define OP_vkDestroySurfaceKHR 20165
-#define OP_vkGetPhysicalDeviceSurfaceSupportKHR 20166
 void marshal_VkSurfaceCapabilitiesKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSurfaceCapabilitiesKHR* forMarshaling);
 
 void unmarshal_VkSurfaceCapabilitiesKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSurfaceCapabilitiesKHR* forUnmarshaling);
 
-#define OP_vkGetPhysicalDeviceSurfaceCapabilitiesKHR 20167
 void marshal_VkSurfaceFormatKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSurfaceFormatKHR* forMarshaling);
 
 void unmarshal_VkSurfaceFormatKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSurfaceFormatKHR* forUnmarshaling);
 
+#define OP_vkDestroySurfaceKHR 20165
+#define OP_vkGetPhysicalDeviceSurfaceSupportKHR 20166
+#define OP_vkGetPhysicalDeviceSurfaceCapabilitiesKHR 20167
 #define OP_vkGetPhysicalDeviceSurfaceFormatsKHR 20168
 #define OP_vkGetPhysicalDeviceSurfacePresentModesKHR 20169
 #endif
 #ifdef VK_KHR_swapchain
 void marshal_VkSwapchainCreateInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSwapchainCreateInfoKHR* forMarshaling);
 
 void unmarshal_VkSwapchainCreateInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSwapchainCreateInfoKHR* forUnmarshaling);
 
+void marshal_VkPresentInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPresentInfoKHR* forMarshaling);
+
+void unmarshal_VkPresentInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPresentInfoKHR* forUnmarshaling);
+
 #define OP_vkCreateSwapchainKHR 20170
 #define OP_vkDestroySwapchainKHR 20171
 #define OP_vkGetSwapchainImagesKHR 20172
 #define OP_vkAcquireNextImageKHR 20173
-void marshal_VkPresentInfoKHR(
-    VulkanStreamGuest* vkStream,
-    const VkPresentInfoKHR* forMarshaling);
-
-void unmarshal_VkPresentInfoKHR(
-    VulkanStreamGuest* vkStream,
-    VkPresentInfoKHR* forUnmarshaling);
-
 #define OP_vkQueuePresentKHR 20174
 void marshal_VkImageSwapchainCreateInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkImageSwapchainCreateInfoKHR* forMarshaling);
 
 void unmarshal_VkImageSwapchainCreateInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkImageSwapchainCreateInfoKHR* forUnmarshaling);
 
 void marshal_VkBindImageMemorySwapchainInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkBindImageMemorySwapchainInfoKHR* forMarshaling);
 
 void unmarshal_VkBindImageMemorySwapchainInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkBindImageMemorySwapchainInfoKHR* forUnmarshaling);
 
 void marshal_VkAcquireNextImageInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkAcquireNextImageInfoKHR* forMarshaling);
 
 void unmarshal_VkAcquireNextImageInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkAcquireNextImageInfoKHR* forUnmarshaling);
 
 void marshal_VkDeviceGroupPresentCapabilitiesKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDeviceGroupPresentCapabilitiesKHR* forMarshaling);
 
 void unmarshal_VkDeviceGroupPresentCapabilitiesKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDeviceGroupPresentCapabilitiesKHR* forUnmarshaling);
 
 void marshal_VkDeviceGroupPresentInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDeviceGroupPresentInfoKHR* forMarshaling);
 
 void unmarshal_VkDeviceGroupPresentInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDeviceGroupPresentInfoKHR* forUnmarshaling);
 
 void marshal_VkDeviceGroupSwapchainCreateInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDeviceGroupSwapchainCreateInfoKHR* forMarshaling);
 
 void unmarshal_VkDeviceGroupSwapchainCreateInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDeviceGroupSwapchainCreateInfoKHR* forUnmarshaling);
 
 #define OP_vkGetDeviceGroupPresentCapabilitiesKHR 20175
@@ -1727,60 +2634,74 @@
 #define OP_vkAcquireNextImage2KHR 20178
 #endif
 #ifdef VK_KHR_display
-void marshal_VkDisplayPropertiesKHR(
-    VulkanStreamGuest* vkStream,
-    const VkDisplayPropertiesKHR* forMarshaling);
-
-void unmarshal_VkDisplayPropertiesKHR(
-    VulkanStreamGuest* vkStream,
-    VkDisplayPropertiesKHR* forUnmarshaling);
-
 void marshal_VkDisplayModeParametersKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDisplayModeParametersKHR* forMarshaling);
 
 void unmarshal_VkDisplayModeParametersKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDisplayModeParametersKHR* forUnmarshaling);
 
-void marshal_VkDisplayModePropertiesKHR(
-    VulkanStreamGuest* vkStream,
-    const VkDisplayModePropertiesKHR* forMarshaling);
-
-void unmarshal_VkDisplayModePropertiesKHR(
-    VulkanStreamGuest* vkStream,
-    VkDisplayModePropertiesKHR* forUnmarshaling);
-
 void marshal_VkDisplayModeCreateInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDisplayModeCreateInfoKHR* forMarshaling);
 
 void unmarshal_VkDisplayModeCreateInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDisplayModeCreateInfoKHR* forUnmarshaling);
 
+void marshal_VkDisplayModePropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDisplayModePropertiesKHR* forMarshaling);
+
+void unmarshal_VkDisplayModePropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDisplayModePropertiesKHR* forUnmarshaling);
+
 void marshal_VkDisplayPlaneCapabilitiesKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDisplayPlaneCapabilitiesKHR* forMarshaling);
 
 void unmarshal_VkDisplayPlaneCapabilitiesKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDisplayPlaneCapabilitiesKHR* forUnmarshaling);
 
 void marshal_VkDisplayPlanePropertiesKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDisplayPlanePropertiesKHR* forMarshaling);
 
 void unmarshal_VkDisplayPlanePropertiesKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDisplayPlanePropertiesKHR* forUnmarshaling);
 
+void marshal_VkDisplayPropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDisplayPropertiesKHR* forMarshaling);
+
+void unmarshal_VkDisplayPropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDisplayPropertiesKHR* forUnmarshaling);
+
 void marshal_VkDisplaySurfaceCreateInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDisplaySurfaceCreateInfoKHR* forMarshaling);
 
 void unmarshal_VkDisplaySurfaceCreateInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDisplaySurfaceCreateInfoKHR* forUnmarshaling);
 
 #define OP_vkGetPhysicalDeviceDisplayPropertiesKHR 20179
@@ -1794,10 +2715,12 @@
 #ifdef VK_KHR_display_swapchain
 void marshal_VkDisplayPresentInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDisplayPresentInfoKHR* forMarshaling);
 
 void unmarshal_VkDisplayPresentInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDisplayPresentInfoKHR* forUnmarshaling);
 
 #define OP_vkCreateSharedSwapchainsKHR 20186
@@ -1805,10 +2728,12 @@
 #ifdef VK_KHR_xlib_surface
 void marshal_VkXlibSurfaceCreateInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkXlibSurfaceCreateInfoKHR* forMarshaling);
 
 void unmarshal_VkXlibSurfaceCreateInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkXlibSurfaceCreateInfoKHR* forUnmarshaling);
 
 #define OP_vkCreateXlibSurfaceKHR 20187
@@ -1817,10 +2742,12 @@
 #ifdef VK_KHR_xcb_surface
 void marshal_VkXcbSurfaceCreateInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkXcbSurfaceCreateInfoKHR* forMarshaling);
 
 void unmarshal_VkXcbSurfaceCreateInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkXcbSurfaceCreateInfoKHR* forUnmarshaling);
 
 #define OP_vkCreateXcbSurfaceKHR 20189
@@ -1829,34 +2756,26 @@
 #ifdef VK_KHR_wayland_surface
 void marshal_VkWaylandSurfaceCreateInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkWaylandSurfaceCreateInfoKHR* forMarshaling);
 
 void unmarshal_VkWaylandSurfaceCreateInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkWaylandSurfaceCreateInfoKHR* forUnmarshaling);
 
 #define OP_vkCreateWaylandSurfaceKHR 20191
 #define OP_vkGetPhysicalDeviceWaylandPresentationSupportKHR 20192
 #endif
-#ifdef VK_KHR_mir_surface
-void marshal_VkMirSurfaceCreateInfoKHR(
-    VulkanStreamGuest* vkStream,
-    const VkMirSurfaceCreateInfoKHR* forMarshaling);
-
-void unmarshal_VkMirSurfaceCreateInfoKHR(
-    VulkanStreamGuest* vkStream,
-    VkMirSurfaceCreateInfoKHR* forUnmarshaling);
-
-#define OP_vkCreateMirSurfaceKHR 20193
-#define OP_vkGetPhysicalDeviceMirPresentationSupportKHR 20194
-#endif
 #ifdef VK_KHR_android_surface
 void marshal_VkAndroidSurfaceCreateInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkAndroidSurfaceCreateInfoKHR* forMarshaling);
 
 void unmarshal_VkAndroidSurfaceCreateInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkAndroidSurfaceCreateInfoKHR* forUnmarshaling);
 
 #define OP_vkCreateAndroidSurfaceKHR 20195
@@ -1864,10 +2783,12 @@
 #ifdef VK_KHR_win32_surface
 void marshal_VkWin32SurfaceCreateInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkWin32SurfaceCreateInfoKHR* forMarshaling);
 
 void unmarshal_VkWin32SurfaceCreateInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkWin32SurfaceCreateInfoKHR* forUnmarshaling);
 
 #define OP_vkCreateWin32SurfaceKHR 20196
@@ -1876,8 +2797,56 @@
 #ifdef VK_KHR_sampler_mirror_clamp_to_edge
 #endif
 #ifdef VK_KHR_multiview
+DEFINE_ALIAS_FUNCTION(marshal_VkRenderPassMultiviewCreateInfo, marshal_VkRenderPassMultiviewCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkRenderPassMultiviewCreateInfo, unmarshal_VkRenderPassMultiviewCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkPhysicalDeviceMultiviewFeatures, marshal_VkPhysicalDeviceMultiviewFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkPhysicalDeviceMultiviewFeatures, unmarshal_VkPhysicalDeviceMultiviewFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkPhysicalDeviceMultiviewProperties, marshal_VkPhysicalDeviceMultiviewPropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkPhysicalDeviceMultiviewProperties, unmarshal_VkPhysicalDeviceMultiviewPropertiesKHR);
+
 #endif
 #ifdef VK_KHR_get_physical_device_properties2
+DEFINE_ALIAS_FUNCTION(marshal_VkPhysicalDeviceFeatures2, marshal_VkPhysicalDeviceFeatures2KHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkPhysicalDeviceFeatures2, unmarshal_VkPhysicalDeviceFeatures2KHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkPhysicalDeviceProperties2, marshal_VkPhysicalDeviceProperties2KHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkPhysicalDeviceProperties2, unmarshal_VkPhysicalDeviceProperties2KHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkFormatProperties2, marshal_VkFormatProperties2KHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkFormatProperties2, unmarshal_VkFormatProperties2KHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkImageFormatProperties2, marshal_VkImageFormatProperties2KHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkImageFormatProperties2, unmarshal_VkImageFormatProperties2KHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkPhysicalDeviceImageFormatInfo2, marshal_VkPhysicalDeviceImageFormatInfo2KHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkPhysicalDeviceImageFormatInfo2, unmarshal_VkPhysicalDeviceImageFormatInfo2KHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkQueueFamilyProperties2, marshal_VkQueueFamilyProperties2KHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkQueueFamilyProperties2, unmarshal_VkQueueFamilyProperties2KHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkPhysicalDeviceMemoryProperties2, marshal_VkPhysicalDeviceMemoryProperties2KHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkPhysicalDeviceMemoryProperties2, unmarshal_VkPhysicalDeviceMemoryProperties2KHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkSparseImageFormatProperties2, marshal_VkSparseImageFormatProperties2KHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkSparseImageFormatProperties2, unmarshal_VkSparseImageFormatProperties2KHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkPhysicalDeviceSparseImageFormatInfo2, marshal_VkPhysicalDeviceSparseImageFormatInfo2KHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkPhysicalDeviceSparseImageFormatInfo2, unmarshal_VkPhysicalDeviceSparseImageFormatInfo2KHR);
+
 #define OP_vkGetPhysicalDeviceFeatures2KHR 20198
 #define OP_vkGetPhysicalDeviceProperties2KHR 20199
 #define OP_vkGetPhysicalDeviceFormatProperties2KHR 20200
@@ -1887,9 +2856,37 @@
 #define OP_vkGetPhysicalDeviceSparseImageFormatProperties2KHR 20204
 #endif
 #ifdef VK_KHR_device_group
+DEFINE_ALIAS_FUNCTION(marshal_VkMemoryAllocateFlagsInfo, marshal_VkMemoryAllocateFlagsInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkMemoryAllocateFlagsInfo, unmarshal_VkMemoryAllocateFlagsInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkDeviceGroupRenderPassBeginInfo, marshal_VkDeviceGroupRenderPassBeginInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkDeviceGroupRenderPassBeginInfo, unmarshal_VkDeviceGroupRenderPassBeginInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkDeviceGroupCommandBufferBeginInfo, marshal_VkDeviceGroupCommandBufferBeginInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkDeviceGroupCommandBufferBeginInfo, unmarshal_VkDeviceGroupCommandBufferBeginInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkDeviceGroupSubmitInfo, marshal_VkDeviceGroupSubmitInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkDeviceGroupSubmitInfo, unmarshal_VkDeviceGroupSubmitInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkDeviceGroupBindSparseInfo, marshal_VkDeviceGroupBindSparseInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkDeviceGroupBindSparseInfo, unmarshal_VkDeviceGroupBindSparseInfoKHR);
+
 #define OP_vkGetDeviceGroupPeerMemoryFeaturesKHR 20205
 #define OP_vkCmdSetDeviceMaskKHR 20206
 #define OP_vkCmdDispatchBaseKHR 20207
+DEFINE_ALIAS_FUNCTION(marshal_VkBindBufferMemoryDeviceGroupInfo, marshal_VkBindBufferMemoryDeviceGroupInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkBindBufferMemoryDeviceGroupInfo, unmarshal_VkBindBufferMemoryDeviceGroupInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkBindImageMemoryDeviceGroupInfo, marshal_VkBindImageMemoryDeviceGroupInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkBindImageMemoryDeviceGroupInfo, unmarshal_VkBindImageMemoryDeviceGroupInfoKHR);
+
 #endif
 #ifdef VK_KHR_shader_draw_parameters
 #endif
@@ -1897,44 +2894,96 @@
 #define OP_vkTrimCommandPoolKHR 20208
 #endif
 #ifdef VK_KHR_device_group_creation
+DEFINE_ALIAS_FUNCTION(marshal_VkPhysicalDeviceGroupProperties, marshal_VkPhysicalDeviceGroupPropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkPhysicalDeviceGroupProperties, unmarshal_VkPhysicalDeviceGroupPropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkDeviceGroupDeviceCreateInfo, marshal_VkDeviceGroupDeviceCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkDeviceGroupDeviceCreateInfo, unmarshal_VkDeviceGroupDeviceCreateInfoKHR);
+
 #define OP_vkEnumeratePhysicalDeviceGroupsKHR 20209
 #endif
 #ifdef VK_KHR_external_memory_capabilities
+DEFINE_ALIAS_FUNCTION(marshal_VkExternalMemoryProperties, marshal_VkExternalMemoryPropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkExternalMemoryProperties, unmarshal_VkExternalMemoryPropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkPhysicalDeviceExternalImageFormatInfo, marshal_VkPhysicalDeviceExternalImageFormatInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkPhysicalDeviceExternalImageFormatInfo, unmarshal_VkPhysicalDeviceExternalImageFormatInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkExternalImageFormatProperties, marshal_VkExternalImageFormatPropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkExternalImageFormatProperties, unmarshal_VkExternalImageFormatPropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkPhysicalDeviceExternalBufferInfo, marshal_VkPhysicalDeviceExternalBufferInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkPhysicalDeviceExternalBufferInfo, unmarshal_VkPhysicalDeviceExternalBufferInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkExternalBufferProperties, marshal_VkExternalBufferPropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkExternalBufferProperties, unmarshal_VkExternalBufferPropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkPhysicalDeviceIDProperties, marshal_VkPhysicalDeviceIDPropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkPhysicalDeviceIDProperties, unmarshal_VkPhysicalDeviceIDPropertiesKHR);
+
 #define OP_vkGetPhysicalDeviceExternalBufferPropertiesKHR 20210
 #endif
 #ifdef VK_KHR_external_memory
+DEFINE_ALIAS_FUNCTION(marshal_VkExternalMemoryImageCreateInfo, marshal_VkExternalMemoryImageCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkExternalMemoryImageCreateInfo, unmarshal_VkExternalMemoryImageCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkExternalMemoryBufferCreateInfo, marshal_VkExternalMemoryBufferCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkExternalMemoryBufferCreateInfo, unmarshal_VkExternalMemoryBufferCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkExportMemoryAllocateInfo, marshal_VkExportMemoryAllocateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkExportMemoryAllocateInfo, unmarshal_VkExportMemoryAllocateInfoKHR);
+
 #endif
 #ifdef VK_KHR_external_memory_win32
 void marshal_VkImportMemoryWin32HandleInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkImportMemoryWin32HandleInfoKHR* forMarshaling);
 
 void unmarshal_VkImportMemoryWin32HandleInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkImportMemoryWin32HandleInfoKHR* forUnmarshaling);
 
 void marshal_VkExportMemoryWin32HandleInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkExportMemoryWin32HandleInfoKHR* forMarshaling);
 
 void unmarshal_VkExportMemoryWin32HandleInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkExportMemoryWin32HandleInfoKHR* forUnmarshaling);
 
 void marshal_VkMemoryWin32HandlePropertiesKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkMemoryWin32HandlePropertiesKHR* forMarshaling);
 
 void unmarshal_VkMemoryWin32HandlePropertiesKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkMemoryWin32HandlePropertiesKHR* forUnmarshaling);
 
 void marshal_VkMemoryGetWin32HandleInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkMemoryGetWin32HandleInfoKHR* forMarshaling);
 
 void unmarshal_VkMemoryGetWin32HandleInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkMemoryGetWin32HandleInfoKHR* forUnmarshaling);
 
 #define OP_vkGetMemoryWin32HandleKHR 20211
@@ -1943,26 +2992,32 @@
 #ifdef VK_KHR_external_memory_fd
 void marshal_VkImportMemoryFdInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkImportMemoryFdInfoKHR* forMarshaling);
 
 void unmarshal_VkImportMemoryFdInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkImportMemoryFdInfoKHR* forUnmarshaling);
 
 void marshal_VkMemoryFdPropertiesKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkMemoryFdPropertiesKHR* forMarshaling);
 
 void unmarshal_VkMemoryFdPropertiesKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkMemoryFdPropertiesKHR* forUnmarshaling);
 
 void marshal_VkMemoryGetFdInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkMemoryGetFdInfoKHR* forMarshaling);
 
 void unmarshal_VkMemoryGetFdInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkMemoryGetFdInfoKHR* forUnmarshaling);
 
 #define OP_vkGetMemoryFdKHR 20213
@@ -1971,49 +3026,71 @@
 #ifdef VK_KHR_win32_keyed_mutex
 void marshal_VkWin32KeyedMutexAcquireReleaseInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkWin32KeyedMutexAcquireReleaseInfoKHR* forMarshaling);
 
 void unmarshal_VkWin32KeyedMutexAcquireReleaseInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkWin32KeyedMutexAcquireReleaseInfoKHR* forUnmarshaling);
 
 #endif
 #ifdef VK_KHR_external_semaphore_capabilities
+DEFINE_ALIAS_FUNCTION(marshal_VkPhysicalDeviceExternalSemaphoreInfo, marshal_VkPhysicalDeviceExternalSemaphoreInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkPhysicalDeviceExternalSemaphoreInfo, unmarshal_VkPhysicalDeviceExternalSemaphoreInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkExternalSemaphoreProperties, marshal_VkExternalSemaphorePropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkExternalSemaphoreProperties, unmarshal_VkExternalSemaphorePropertiesKHR);
+
 #define OP_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR 20215
 #endif
 #ifdef VK_KHR_external_semaphore
+DEFINE_ALIAS_FUNCTION(marshal_VkExportSemaphoreCreateInfo, marshal_VkExportSemaphoreCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkExportSemaphoreCreateInfo, unmarshal_VkExportSemaphoreCreateInfoKHR);
+
 #endif
 #ifdef VK_KHR_external_semaphore_win32
 void marshal_VkImportSemaphoreWin32HandleInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkImportSemaphoreWin32HandleInfoKHR* forMarshaling);
 
 void unmarshal_VkImportSemaphoreWin32HandleInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkImportSemaphoreWin32HandleInfoKHR* forUnmarshaling);
 
 void marshal_VkExportSemaphoreWin32HandleInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkExportSemaphoreWin32HandleInfoKHR* forMarshaling);
 
 void unmarshal_VkExportSemaphoreWin32HandleInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkExportSemaphoreWin32HandleInfoKHR* forUnmarshaling);
 
 void marshal_VkD3D12FenceSubmitInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkD3D12FenceSubmitInfoKHR* forMarshaling);
 
 void unmarshal_VkD3D12FenceSubmitInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkD3D12FenceSubmitInfoKHR* forUnmarshaling);
 
 void marshal_VkSemaphoreGetWin32HandleInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSemaphoreGetWin32HandleInfoKHR* forMarshaling);
 
 void unmarshal_VkSemaphoreGetWin32HandleInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSemaphoreGetWin32HandleInfoKHR* forUnmarshaling);
 
 #define OP_vkImportSemaphoreWin32HandleKHR 20216
@@ -2022,18 +3099,22 @@
 #ifdef VK_KHR_external_semaphore_fd
 void marshal_VkImportSemaphoreFdInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkImportSemaphoreFdInfoKHR* forMarshaling);
 
 void unmarshal_VkImportSemaphoreFdInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkImportSemaphoreFdInfoKHR* forUnmarshaling);
 
 void marshal_VkSemaphoreGetFdInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSemaphoreGetFdInfoKHR* forMarshaling);
 
 void unmarshal_VkSemaphoreGetFdInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSemaphoreGetFdInfoKHR* forUnmarshaling);
 
 #define OP_vkImportSemaphoreFdKHR 20218
@@ -2042,149 +3123,189 @@
 #ifdef VK_KHR_push_descriptor
 void marshal_VkPhysicalDevicePushDescriptorPropertiesKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDevicePushDescriptorPropertiesKHR* forMarshaling);
 
 void unmarshal_VkPhysicalDevicePushDescriptorPropertiesKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDevicePushDescriptorPropertiesKHR* forUnmarshaling);
 
 #define OP_vkCmdPushDescriptorSetKHR 20220
 #define OP_vkCmdPushDescriptorSetWithTemplateKHR 20221
 #endif
+#ifdef VK_KHR_shader_float16_int8
+DEFINE_ALIAS_FUNCTION(marshal_VkPhysicalDeviceShaderFloat16Int8Features, marshal_VkPhysicalDeviceShaderFloat16Int8FeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkPhysicalDeviceShaderFloat16Int8Features, unmarshal_VkPhysicalDeviceShaderFloat16Int8FeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkPhysicalDeviceShaderFloat16Int8Features, marshal_VkPhysicalDeviceFloat16Int8FeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkPhysicalDeviceShaderFloat16Int8Features, unmarshal_VkPhysicalDeviceFloat16Int8FeaturesKHR);
+
+#endif
 #ifdef VK_KHR_16bit_storage
+DEFINE_ALIAS_FUNCTION(marshal_VkPhysicalDevice16BitStorageFeatures, marshal_VkPhysicalDevice16BitStorageFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkPhysicalDevice16BitStorageFeatures, unmarshal_VkPhysicalDevice16BitStorageFeaturesKHR);
+
 #endif
 #ifdef VK_KHR_incremental_present
 void marshal_VkRectLayerKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkRectLayerKHR* forMarshaling);
 
 void unmarshal_VkRectLayerKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkRectLayerKHR* forUnmarshaling);
 
 void marshal_VkPresentRegionKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPresentRegionKHR* forMarshaling);
 
 void unmarshal_VkPresentRegionKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPresentRegionKHR* forUnmarshaling);
 
 void marshal_VkPresentRegionsKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPresentRegionsKHR* forMarshaling);
 
 void unmarshal_VkPresentRegionsKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPresentRegionsKHR* forUnmarshaling);
 
 #endif
 #ifdef VK_KHR_descriptor_update_template
+DEFINE_ALIAS_FUNCTION(marshal_VkDescriptorUpdateTemplateEntry, marshal_VkDescriptorUpdateTemplateEntryKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkDescriptorUpdateTemplateEntry, unmarshal_VkDescriptorUpdateTemplateEntryKHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkDescriptorUpdateTemplateCreateInfo, marshal_VkDescriptorUpdateTemplateCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkDescriptorUpdateTemplateCreateInfo, unmarshal_VkDescriptorUpdateTemplateCreateInfoKHR);
+
 #define OP_vkCreateDescriptorUpdateTemplateKHR 20222
 #define OP_vkDestroyDescriptorUpdateTemplateKHR 20223
 #define OP_vkUpdateDescriptorSetWithTemplateKHR 20224
 #endif
+#ifdef VK_KHR_imageless_framebuffer
+DEFINE_ALIAS_FUNCTION(marshal_VkPhysicalDeviceImagelessFramebufferFeatures, marshal_VkPhysicalDeviceImagelessFramebufferFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkPhysicalDeviceImagelessFramebufferFeatures, unmarshal_VkPhysicalDeviceImagelessFramebufferFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkFramebufferAttachmentsCreateInfo, marshal_VkFramebufferAttachmentsCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkFramebufferAttachmentsCreateInfo, unmarshal_VkFramebufferAttachmentsCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkFramebufferAttachmentImageInfo, marshal_VkFramebufferAttachmentImageInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkFramebufferAttachmentImageInfo, unmarshal_VkFramebufferAttachmentImageInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkRenderPassAttachmentBeginInfo, marshal_VkRenderPassAttachmentBeginInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkRenderPassAttachmentBeginInfo, unmarshal_VkRenderPassAttachmentBeginInfoKHR);
+
+#endif
 #ifdef VK_KHR_create_renderpass2
-void marshal_VkAttachmentDescription2KHR(
-    VulkanStreamGuest* vkStream,
-    const VkAttachmentDescription2KHR* forMarshaling);
+DEFINE_ALIAS_FUNCTION(marshal_VkRenderPassCreateInfo2, marshal_VkRenderPassCreateInfo2KHR);
 
-void unmarshal_VkAttachmentDescription2KHR(
-    VulkanStreamGuest* vkStream,
-    VkAttachmentDescription2KHR* forUnmarshaling);
+DEFINE_ALIAS_FUNCTION(unmarshal_VkRenderPassCreateInfo2, unmarshal_VkRenderPassCreateInfo2KHR);
 
-void marshal_VkAttachmentReference2KHR(
-    VulkanStreamGuest* vkStream,
-    const VkAttachmentReference2KHR* forMarshaling);
+DEFINE_ALIAS_FUNCTION(marshal_VkAttachmentDescription2, marshal_VkAttachmentDescription2KHR);
 
-void unmarshal_VkAttachmentReference2KHR(
-    VulkanStreamGuest* vkStream,
-    VkAttachmentReference2KHR* forUnmarshaling);
+DEFINE_ALIAS_FUNCTION(unmarshal_VkAttachmentDescription2, unmarshal_VkAttachmentDescription2KHR);
 
-void marshal_VkSubpassDescription2KHR(
-    VulkanStreamGuest* vkStream,
-    const VkSubpassDescription2KHR* forMarshaling);
+DEFINE_ALIAS_FUNCTION(marshal_VkAttachmentReference2, marshal_VkAttachmentReference2KHR);
 
-void unmarshal_VkSubpassDescription2KHR(
-    VulkanStreamGuest* vkStream,
-    VkSubpassDescription2KHR* forUnmarshaling);
+DEFINE_ALIAS_FUNCTION(unmarshal_VkAttachmentReference2, unmarshal_VkAttachmentReference2KHR);
 
-void marshal_VkSubpassDependency2KHR(
-    VulkanStreamGuest* vkStream,
-    const VkSubpassDependency2KHR* forMarshaling);
+DEFINE_ALIAS_FUNCTION(marshal_VkSubpassDescription2, marshal_VkSubpassDescription2KHR);
 
-void unmarshal_VkSubpassDependency2KHR(
-    VulkanStreamGuest* vkStream,
-    VkSubpassDependency2KHR* forUnmarshaling);
+DEFINE_ALIAS_FUNCTION(unmarshal_VkSubpassDescription2, unmarshal_VkSubpassDescription2KHR);
 
-void marshal_VkRenderPassCreateInfo2KHR(
-    VulkanStreamGuest* vkStream,
-    const VkRenderPassCreateInfo2KHR* forMarshaling);
+DEFINE_ALIAS_FUNCTION(marshal_VkSubpassDependency2, marshal_VkSubpassDependency2KHR);
 
-void unmarshal_VkRenderPassCreateInfo2KHR(
-    VulkanStreamGuest* vkStream,
-    VkRenderPassCreateInfo2KHR* forUnmarshaling);
+DEFINE_ALIAS_FUNCTION(unmarshal_VkSubpassDependency2, unmarshal_VkSubpassDependency2KHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkSubpassBeginInfo, marshal_VkSubpassBeginInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkSubpassBeginInfo, unmarshal_VkSubpassBeginInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkSubpassEndInfo, marshal_VkSubpassEndInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkSubpassEndInfo, unmarshal_VkSubpassEndInfoKHR);
 
 #define OP_vkCreateRenderPass2KHR 20225
-void marshal_VkSubpassBeginInfoKHR(
-    VulkanStreamGuest* vkStream,
-    const VkSubpassBeginInfoKHR* forMarshaling);
-
-void unmarshal_VkSubpassBeginInfoKHR(
-    VulkanStreamGuest* vkStream,
-    VkSubpassBeginInfoKHR* forUnmarshaling);
-
 #define OP_vkCmdBeginRenderPass2KHR 20226
-void marshal_VkSubpassEndInfoKHR(
-    VulkanStreamGuest* vkStream,
-    const VkSubpassEndInfoKHR* forMarshaling);
-
-void unmarshal_VkSubpassEndInfoKHR(
-    VulkanStreamGuest* vkStream,
-    VkSubpassEndInfoKHR* forUnmarshaling);
-
 #define OP_vkCmdNextSubpass2KHR 20227
 #define OP_vkCmdEndRenderPass2KHR 20228
 #endif
 #ifdef VK_KHR_shared_presentable_image
 void marshal_VkSharedPresentSurfaceCapabilitiesKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSharedPresentSurfaceCapabilitiesKHR* forMarshaling);
 
 void unmarshal_VkSharedPresentSurfaceCapabilitiesKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSharedPresentSurfaceCapabilitiesKHR* forUnmarshaling);
 
 #define OP_vkGetSwapchainStatusKHR 20229
 #endif
 #ifdef VK_KHR_external_fence_capabilities
+DEFINE_ALIAS_FUNCTION(marshal_VkPhysicalDeviceExternalFenceInfo, marshal_VkPhysicalDeviceExternalFenceInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkPhysicalDeviceExternalFenceInfo, unmarshal_VkPhysicalDeviceExternalFenceInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkExternalFenceProperties, marshal_VkExternalFencePropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkExternalFenceProperties, unmarshal_VkExternalFencePropertiesKHR);
+
 #define OP_vkGetPhysicalDeviceExternalFencePropertiesKHR 20230
 #endif
 #ifdef VK_KHR_external_fence
+DEFINE_ALIAS_FUNCTION(marshal_VkExportFenceCreateInfo, marshal_VkExportFenceCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkExportFenceCreateInfo, unmarshal_VkExportFenceCreateInfoKHR);
+
 #endif
 #ifdef VK_KHR_external_fence_win32
 void marshal_VkImportFenceWin32HandleInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkImportFenceWin32HandleInfoKHR* forMarshaling);
 
 void unmarshal_VkImportFenceWin32HandleInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkImportFenceWin32HandleInfoKHR* forUnmarshaling);
 
 void marshal_VkExportFenceWin32HandleInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkExportFenceWin32HandleInfoKHR* forMarshaling);
 
 void unmarshal_VkExportFenceWin32HandleInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkExportFenceWin32HandleInfoKHR* forUnmarshaling);
 
 void marshal_VkFenceGetWin32HandleInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkFenceGetWin32HandleInfoKHR* forMarshaling);
 
 void unmarshal_VkFenceGetWin32HandleInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkFenceGetWin32HandleInfoKHR* forUnmarshaling);
 
 #define OP_vkImportFenceWin32HandleKHR 20231
@@ -2193,94 +3314,228 @@
 #ifdef VK_KHR_external_fence_fd
 void marshal_VkImportFenceFdInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkImportFenceFdInfoKHR* forMarshaling);
 
 void unmarshal_VkImportFenceFdInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkImportFenceFdInfoKHR* forUnmarshaling);
 
 void marshal_VkFenceGetFdInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkFenceGetFdInfoKHR* forMarshaling);
 
 void unmarshal_VkFenceGetFdInfoKHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkFenceGetFdInfoKHR* forUnmarshaling);
 
 #define OP_vkImportFenceFdKHR 20233
 #define OP_vkGetFenceFdKHR 20234
 #endif
+#ifdef VK_KHR_performance_query
+void marshal_VkPhysicalDevicePerformanceQueryFeaturesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDevicePerformanceQueryFeaturesKHR* forMarshaling);
+
+void unmarshal_VkPhysicalDevicePerformanceQueryFeaturesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDevicePerformanceQueryFeaturesKHR* forUnmarshaling);
+
+void marshal_VkPhysicalDevicePerformanceQueryPropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDevicePerformanceQueryPropertiesKHR* forMarshaling);
+
+void unmarshal_VkPhysicalDevicePerformanceQueryPropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDevicePerformanceQueryPropertiesKHR* forUnmarshaling);
+
+void marshal_VkPerformanceCounterKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPerformanceCounterKHR* forMarshaling);
+
+void unmarshal_VkPerformanceCounterKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPerformanceCounterKHR* forUnmarshaling);
+
+void marshal_VkPerformanceCounterDescriptionKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPerformanceCounterDescriptionKHR* forMarshaling);
+
+void unmarshal_VkPerformanceCounterDescriptionKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPerformanceCounterDescriptionKHR* forUnmarshaling);
+
+void marshal_VkQueryPoolPerformanceCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkQueryPoolPerformanceCreateInfoKHR* forMarshaling);
+
+void unmarshal_VkQueryPoolPerformanceCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkQueryPoolPerformanceCreateInfoKHR* forUnmarshaling);
+
+void marshal_VkPerformanceCounterResultKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPerformanceCounterResultKHR* forMarshaling);
+
+void unmarshal_VkPerformanceCounterResultKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPerformanceCounterResultKHR* forUnmarshaling);
+
+void marshal_VkAcquireProfilingLockInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAcquireProfilingLockInfoKHR* forMarshaling);
+
+void unmarshal_VkAcquireProfilingLockInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkAcquireProfilingLockInfoKHR* forUnmarshaling);
+
+void marshal_VkPerformanceQuerySubmitInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPerformanceQuerySubmitInfoKHR* forMarshaling);
+
+void unmarshal_VkPerformanceQuerySubmitInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPerformanceQuerySubmitInfoKHR* forUnmarshaling);
+
+#define OP_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR 299033148
+#define OP_vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR 282029987
+#define OP_vkAcquireProfilingLockKHR 238952296
+#define OP_vkReleaseProfilingLockKHR 223904011
+#endif
 #ifdef VK_KHR_maintenance2
+DEFINE_ALIAS_FUNCTION(marshal_VkPhysicalDevicePointClippingProperties, marshal_VkPhysicalDevicePointClippingPropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkPhysicalDevicePointClippingProperties, unmarshal_VkPhysicalDevicePointClippingPropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkRenderPassInputAttachmentAspectCreateInfo, marshal_VkRenderPassInputAttachmentAspectCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkRenderPassInputAttachmentAspectCreateInfo, unmarshal_VkRenderPassInputAttachmentAspectCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkInputAttachmentAspectReference, marshal_VkInputAttachmentAspectReferenceKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkInputAttachmentAspectReference, unmarshal_VkInputAttachmentAspectReferenceKHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkImageViewUsageCreateInfo, marshal_VkImageViewUsageCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkImageViewUsageCreateInfo, unmarshal_VkImageViewUsageCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkPipelineTessellationDomainOriginStateCreateInfo, marshal_VkPipelineTessellationDomainOriginStateCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkPipelineTessellationDomainOriginStateCreateInfo, unmarshal_VkPipelineTessellationDomainOriginStateCreateInfoKHR);
+
 #endif
 #ifdef VK_KHR_get_surface_capabilities2
 void marshal_VkPhysicalDeviceSurfaceInfo2KHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceSurfaceInfo2KHR* forMarshaling);
 
 void unmarshal_VkPhysicalDeviceSurfaceInfo2KHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceSurfaceInfo2KHR* forUnmarshaling);
 
 void marshal_VkSurfaceCapabilities2KHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSurfaceCapabilities2KHR* forMarshaling);
 
 void unmarshal_VkSurfaceCapabilities2KHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSurfaceCapabilities2KHR* forUnmarshaling);
 
 void marshal_VkSurfaceFormat2KHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSurfaceFormat2KHR* forMarshaling);
 
 void unmarshal_VkSurfaceFormat2KHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSurfaceFormat2KHR* forUnmarshaling);
 
 #define OP_vkGetPhysicalDeviceSurfaceCapabilities2KHR 20235
 #define OP_vkGetPhysicalDeviceSurfaceFormats2KHR 20236
 #endif
 #ifdef VK_KHR_variable_pointers
+DEFINE_ALIAS_FUNCTION(marshal_VkPhysicalDeviceVariablePointersFeatures, marshal_VkPhysicalDeviceVariablePointerFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkPhysicalDeviceVariablePointersFeatures, unmarshal_VkPhysicalDeviceVariablePointerFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkPhysicalDeviceVariablePointersFeatures, marshal_VkPhysicalDeviceVariablePointersFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkPhysicalDeviceVariablePointersFeatures, unmarshal_VkPhysicalDeviceVariablePointersFeaturesKHR);
+
 #endif
 #ifdef VK_KHR_get_display_properties2
 void marshal_VkDisplayProperties2KHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDisplayProperties2KHR* forMarshaling);
 
 void unmarshal_VkDisplayProperties2KHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDisplayProperties2KHR* forUnmarshaling);
 
 void marshal_VkDisplayPlaneProperties2KHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDisplayPlaneProperties2KHR* forMarshaling);
 
 void unmarshal_VkDisplayPlaneProperties2KHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDisplayPlaneProperties2KHR* forUnmarshaling);
 
 void marshal_VkDisplayModeProperties2KHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDisplayModeProperties2KHR* forMarshaling);
 
 void unmarshal_VkDisplayModeProperties2KHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDisplayModeProperties2KHR* forUnmarshaling);
 
 void marshal_VkDisplayPlaneInfo2KHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDisplayPlaneInfo2KHR* forMarshaling);
 
 void unmarshal_VkDisplayPlaneInfo2KHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDisplayPlaneInfo2KHR* forUnmarshaling);
 
 void marshal_VkDisplayPlaneCapabilities2KHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDisplayPlaneCapabilities2KHR* forMarshaling);
 
 void unmarshal_VkDisplayPlaneCapabilities2KHR(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDisplayPlaneCapabilities2KHR* forUnmarshaling);
 
 #define OP_vkGetPhysicalDeviceDisplayProperties2KHR 20237
@@ -2289,58 +3544,568 @@
 #define OP_vkGetDisplayPlaneCapabilities2KHR 20240
 #endif
 #ifdef VK_KHR_dedicated_allocation
+DEFINE_ALIAS_FUNCTION(marshal_VkMemoryDedicatedRequirements, marshal_VkMemoryDedicatedRequirementsKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkMemoryDedicatedRequirements, unmarshal_VkMemoryDedicatedRequirementsKHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkMemoryDedicatedAllocateInfo, marshal_VkMemoryDedicatedAllocateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkMemoryDedicatedAllocateInfo, unmarshal_VkMemoryDedicatedAllocateInfoKHR);
+
 #endif
 #ifdef VK_KHR_storage_buffer_storage_class
 #endif
 #ifdef VK_KHR_relaxed_block_layout
 #endif
 #ifdef VK_KHR_get_memory_requirements2
+DEFINE_ALIAS_FUNCTION(marshal_VkBufferMemoryRequirementsInfo2, marshal_VkBufferMemoryRequirementsInfo2KHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkBufferMemoryRequirementsInfo2, unmarshal_VkBufferMemoryRequirementsInfo2KHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkImageMemoryRequirementsInfo2, marshal_VkImageMemoryRequirementsInfo2KHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkImageMemoryRequirementsInfo2, unmarshal_VkImageMemoryRequirementsInfo2KHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkImageSparseMemoryRequirementsInfo2, marshal_VkImageSparseMemoryRequirementsInfo2KHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkImageSparseMemoryRequirementsInfo2, unmarshal_VkImageSparseMemoryRequirementsInfo2KHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkMemoryRequirements2, marshal_VkMemoryRequirements2KHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkMemoryRequirements2, unmarshal_VkMemoryRequirements2KHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkSparseImageMemoryRequirements2, marshal_VkSparseImageMemoryRequirements2KHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkSparseImageMemoryRequirements2, unmarshal_VkSparseImageMemoryRequirements2KHR);
+
 #define OP_vkGetImageMemoryRequirements2KHR 20241
 #define OP_vkGetBufferMemoryRequirements2KHR 20242
 #define OP_vkGetImageSparseMemoryRequirements2KHR 20243
 #endif
 #ifdef VK_KHR_image_format_list
-void marshal_VkImageFormatListCreateInfoKHR(
-    VulkanStreamGuest* vkStream,
-    const VkImageFormatListCreateInfoKHR* forMarshaling);
+DEFINE_ALIAS_FUNCTION(marshal_VkImageFormatListCreateInfo, marshal_VkImageFormatListCreateInfoKHR);
 
-void unmarshal_VkImageFormatListCreateInfoKHR(
-    VulkanStreamGuest* vkStream,
-    VkImageFormatListCreateInfoKHR* forUnmarshaling);
+DEFINE_ALIAS_FUNCTION(unmarshal_VkImageFormatListCreateInfo, unmarshal_VkImageFormatListCreateInfoKHR);
 
 #endif
 #ifdef VK_KHR_sampler_ycbcr_conversion
+DEFINE_ALIAS_FUNCTION(marshal_VkSamplerYcbcrConversionCreateInfo, marshal_VkSamplerYcbcrConversionCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkSamplerYcbcrConversionCreateInfo, unmarshal_VkSamplerYcbcrConversionCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkSamplerYcbcrConversionInfo, marshal_VkSamplerYcbcrConversionInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkSamplerYcbcrConversionInfo, unmarshal_VkSamplerYcbcrConversionInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkBindImagePlaneMemoryInfo, marshal_VkBindImagePlaneMemoryInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkBindImagePlaneMemoryInfo, unmarshal_VkBindImagePlaneMemoryInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkImagePlaneMemoryRequirementsInfo, marshal_VkImagePlaneMemoryRequirementsInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkImagePlaneMemoryRequirementsInfo, unmarshal_VkImagePlaneMemoryRequirementsInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkPhysicalDeviceSamplerYcbcrConversionFeatures, marshal_VkPhysicalDeviceSamplerYcbcrConversionFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkPhysicalDeviceSamplerYcbcrConversionFeatures, unmarshal_VkPhysicalDeviceSamplerYcbcrConversionFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkSamplerYcbcrConversionImageFormatProperties, marshal_VkSamplerYcbcrConversionImageFormatPropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkSamplerYcbcrConversionImageFormatProperties, unmarshal_VkSamplerYcbcrConversionImageFormatPropertiesKHR);
+
 #define OP_vkCreateSamplerYcbcrConversionKHR 20244
 #define OP_vkDestroySamplerYcbcrConversionKHR 20245
 #endif
 #ifdef VK_KHR_bind_memory2
+DEFINE_ALIAS_FUNCTION(marshal_VkBindBufferMemoryInfo, marshal_VkBindBufferMemoryInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkBindBufferMemoryInfo, unmarshal_VkBindBufferMemoryInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkBindImageMemoryInfo, marshal_VkBindImageMemoryInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkBindImageMemoryInfo, unmarshal_VkBindImageMemoryInfoKHR);
+
 #define OP_vkBindBufferMemory2KHR 20246
 #define OP_vkBindImageMemory2KHR 20247
 #endif
+#ifdef VK_KHR_portability_subset
+void marshal_VkPhysicalDevicePortabilitySubsetFeaturesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDevicePortabilitySubsetFeaturesKHR* forMarshaling);
+
+void unmarshal_VkPhysicalDevicePortabilitySubsetFeaturesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDevicePortabilitySubsetFeaturesKHR* forUnmarshaling);
+
+void marshal_VkPhysicalDevicePortabilitySubsetPropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDevicePortabilitySubsetPropertiesKHR* forMarshaling);
+
+void unmarshal_VkPhysicalDevicePortabilitySubsetPropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDevicePortabilitySubsetPropertiesKHR* forUnmarshaling);
+
+#endif
 #ifdef VK_KHR_maintenance3
+DEFINE_ALIAS_FUNCTION(marshal_VkPhysicalDeviceMaintenance3Properties, marshal_VkPhysicalDeviceMaintenance3PropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkPhysicalDeviceMaintenance3Properties, unmarshal_VkPhysicalDeviceMaintenance3PropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkDescriptorSetLayoutSupport, marshal_VkDescriptorSetLayoutSupportKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkDescriptorSetLayoutSupport, unmarshal_VkDescriptorSetLayoutSupportKHR);
+
 #define OP_vkGetDescriptorSetLayoutSupportKHR 20248
 #endif
 #ifdef VK_KHR_draw_indirect_count
 #define OP_vkCmdDrawIndirectCountKHR 20249
 #define OP_vkCmdDrawIndexedIndirectCountKHR 20250
 #endif
+#ifdef VK_KHR_shader_subgroup_extended_types
+DEFINE_ALIAS_FUNCTION(marshal_VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures, marshal_VkPhysicalDeviceShaderSubgroupExtendedTypesFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures, unmarshal_VkPhysicalDeviceShaderSubgroupExtendedTypesFeaturesKHR);
+
+#endif
 #ifdef VK_KHR_8bit_storage
-void marshal_VkPhysicalDevice8BitStorageFeaturesKHR(
-    VulkanStreamGuest* vkStream,
-    const VkPhysicalDevice8BitStorageFeaturesKHR* forMarshaling);
+DEFINE_ALIAS_FUNCTION(marshal_VkPhysicalDevice8BitStorageFeatures, marshal_VkPhysicalDevice8BitStorageFeaturesKHR);
 
-void unmarshal_VkPhysicalDevice8BitStorageFeaturesKHR(
-    VulkanStreamGuest* vkStream,
-    VkPhysicalDevice8BitStorageFeaturesKHR* forUnmarshaling);
+DEFINE_ALIAS_FUNCTION(unmarshal_VkPhysicalDevice8BitStorageFeatures, unmarshal_VkPhysicalDevice8BitStorageFeaturesKHR);
 
 #endif
+#ifdef VK_KHR_shader_atomic_int64
+DEFINE_ALIAS_FUNCTION(marshal_VkPhysicalDeviceShaderAtomicInt64Features, marshal_VkPhysicalDeviceShaderAtomicInt64FeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkPhysicalDeviceShaderAtomicInt64Features, unmarshal_VkPhysicalDeviceShaderAtomicInt64FeaturesKHR);
+
+#endif
+#ifdef VK_KHR_shader_clock
+void marshal_VkPhysicalDeviceShaderClockFeaturesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderClockFeaturesKHR* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceShaderClockFeaturesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceShaderClockFeaturesKHR* forUnmarshaling);
+
+#endif
+#ifdef VK_KHR_driver_properties
+DEFINE_ALIAS_FUNCTION(marshal_VkConformanceVersion, marshal_VkConformanceVersionKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkConformanceVersion, unmarshal_VkConformanceVersionKHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkPhysicalDeviceDriverProperties, marshal_VkPhysicalDeviceDriverPropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkPhysicalDeviceDriverProperties, unmarshal_VkPhysicalDeviceDriverPropertiesKHR);
+
+#endif
+#ifdef VK_KHR_shader_float_controls
+DEFINE_ALIAS_FUNCTION(marshal_VkPhysicalDeviceFloatControlsProperties, marshal_VkPhysicalDeviceFloatControlsPropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkPhysicalDeviceFloatControlsProperties, unmarshal_VkPhysicalDeviceFloatControlsPropertiesKHR);
+
+#endif
+#ifdef VK_KHR_depth_stencil_resolve
+DEFINE_ALIAS_FUNCTION(marshal_VkSubpassDescriptionDepthStencilResolve, marshal_VkSubpassDescriptionDepthStencilResolveKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkSubpassDescriptionDepthStencilResolve, unmarshal_VkSubpassDescriptionDepthStencilResolveKHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkPhysicalDeviceDepthStencilResolveProperties, marshal_VkPhysicalDeviceDepthStencilResolvePropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkPhysicalDeviceDepthStencilResolveProperties, unmarshal_VkPhysicalDeviceDepthStencilResolvePropertiesKHR);
+
+#endif
+#ifdef VK_KHR_swapchain_mutable_format
+#endif
+#ifdef VK_KHR_timeline_semaphore
+DEFINE_ALIAS_FUNCTION(marshal_VkPhysicalDeviceTimelineSemaphoreFeatures, marshal_VkPhysicalDeviceTimelineSemaphoreFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkPhysicalDeviceTimelineSemaphoreFeatures, unmarshal_VkPhysicalDeviceTimelineSemaphoreFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkPhysicalDeviceTimelineSemaphoreProperties, marshal_VkPhysicalDeviceTimelineSemaphorePropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkPhysicalDeviceTimelineSemaphoreProperties, unmarshal_VkPhysicalDeviceTimelineSemaphorePropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkSemaphoreTypeCreateInfo, marshal_VkSemaphoreTypeCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkSemaphoreTypeCreateInfo, unmarshal_VkSemaphoreTypeCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkTimelineSemaphoreSubmitInfo, marshal_VkTimelineSemaphoreSubmitInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkTimelineSemaphoreSubmitInfo, unmarshal_VkTimelineSemaphoreSubmitInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkSemaphoreWaitInfo, marshal_VkSemaphoreWaitInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkSemaphoreWaitInfo, unmarshal_VkSemaphoreWaitInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkSemaphoreSignalInfo, marshal_VkSemaphoreSignalInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkSemaphoreSignalInfo, unmarshal_VkSemaphoreSignalInfoKHR);
+
+#define OP_vkGetSemaphoreCounterValueKHR 229059496
+#define OP_vkWaitSemaphoresKHR 263904357
+#define OP_vkSignalSemaphoreKHR 269919108
+#endif
+#ifdef VK_KHR_vulkan_memory_model
+DEFINE_ALIAS_FUNCTION(marshal_VkPhysicalDeviceVulkanMemoryModelFeatures, marshal_VkPhysicalDeviceVulkanMemoryModelFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkPhysicalDeviceVulkanMemoryModelFeatures, unmarshal_VkPhysicalDeviceVulkanMemoryModelFeaturesKHR);
+
+#endif
+#ifdef VK_KHR_shader_terminate_invocation
+void marshal_VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR* forUnmarshaling);
+
+#endif
+#ifdef VK_KHR_fragment_shading_rate
+void marshal_VkFragmentShadingRateAttachmentInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkFragmentShadingRateAttachmentInfoKHR* forMarshaling);
+
+void unmarshal_VkFragmentShadingRateAttachmentInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkFragmentShadingRateAttachmentInfoKHR* forUnmarshaling);
+
+void marshal_VkPipelineFragmentShadingRateStateCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineFragmentShadingRateStateCreateInfoKHR* forMarshaling);
+
+void unmarshal_VkPipelineFragmentShadingRateStateCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPipelineFragmentShadingRateStateCreateInfoKHR* forUnmarshaling);
+
+void marshal_VkPhysicalDeviceFragmentShadingRateFeaturesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShadingRateFeaturesKHR* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceFragmentShadingRateFeaturesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceFragmentShadingRateFeaturesKHR* forUnmarshaling);
+
+void marshal_VkPhysicalDeviceFragmentShadingRatePropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShadingRatePropertiesKHR* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceFragmentShadingRatePropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceFragmentShadingRatePropertiesKHR* forUnmarshaling);
+
+void marshal_VkPhysicalDeviceFragmentShadingRateKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShadingRateKHR* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceFragmentShadingRateKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceFragmentShadingRateKHR* forUnmarshaling);
+
+#define OP_vkGetPhysicalDeviceFragmentShadingRatesKHR 272978593
+#define OP_vkCmdSetFragmentShadingRateKHR 204060280
+#endif
+#ifdef VK_KHR_spirv_1_4
+#endif
+#ifdef VK_KHR_surface_protected_capabilities
+void marshal_VkSurfaceProtectedCapabilitiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSurfaceProtectedCapabilitiesKHR* forMarshaling);
+
+void unmarshal_VkSurfaceProtectedCapabilitiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkSurfaceProtectedCapabilitiesKHR* forUnmarshaling);
+
+#endif
+#ifdef VK_KHR_separate_depth_stencil_layouts
+DEFINE_ALIAS_FUNCTION(marshal_VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures, marshal_VkPhysicalDeviceSeparateDepthStencilLayoutsFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures, unmarshal_VkPhysicalDeviceSeparateDepthStencilLayoutsFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkAttachmentReferenceStencilLayout, marshal_VkAttachmentReferenceStencilLayoutKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkAttachmentReferenceStencilLayout, unmarshal_VkAttachmentReferenceStencilLayoutKHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkAttachmentDescriptionStencilLayout, marshal_VkAttachmentDescriptionStencilLayoutKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkAttachmentDescriptionStencilLayout, unmarshal_VkAttachmentDescriptionStencilLayoutKHR);
+
+#endif
+#ifdef VK_KHR_uniform_buffer_standard_layout
+DEFINE_ALIAS_FUNCTION(marshal_VkPhysicalDeviceUniformBufferStandardLayoutFeatures, marshal_VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkPhysicalDeviceUniformBufferStandardLayoutFeatures, unmarshal_VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR);
+
+#endif
+#ifdef VK_KHR_buffer_device_address
+DEFINE_ALIAS_FUNCTION(marshal_VkPhysicalDeviceBufferDeviceAddressFeatures, marshal_VkPhysicalDeviceBufferDeviceAddressFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkPhysicalDeviceBufferDeviceAddressFeatures, unmarshal_VkPhysicalDeviceBufferDeviceAddressFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkBufferDeviceAddressInfo, marshal_VkBufferDeviceAddressInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkBufferDeviceAddressInfo, unmarshal_VkBufferDeviceAddressInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkBufferOpaqueCaptureAddressCreateInfo, marshal_VkBufferOpaqueCaptureAddressCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkBufferOpaqueCaptureAddressCreateInfo, unmarshal_VkBufferOpaqueCaptureAddressCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkMemoryOpaqueCaptureAddressAllocateInfo, marshal_VkMemoryOpaqueCaptureAddressAllocateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkMemoryOpaqueCaptureAddressAllocateInfo, unmarshal_VkMemoryOpaqueCaptureAddressAllocateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkDeviceMemoryOpaqueCaptureAddressInfo, marshal_VkDeviceMemoryOpaqueCaptureAddressInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkDeviceMemoryOpaqueCaptureAddressInfo, unmarshal_VkDeviceMemoryOpaqueCaptureAddressInfoKHR);
+
+#define OP_vkGetBufferDeviceAddressKHR 219261480
+#define OP_vkGetBufferOpaqueCaptureAddressKHR 285631711
+#define OP_vkGetDeviceMemoryOpaqueCaptureAddressKHR 294671624
+#endif
+#ifdef VK_KHR_deferred_host_operations
+#define OP_vkCreateDeferredOperationKHR 274342644
+#define OP_vkDestroyDeferredOperationKHR 215419514
+#define OP_vkGetDeferredOperationMaxConcurrencyKHR 203387076
+#define OP_vkGetDeferredOperationResultKHR 263822960
+#define OP_vkDeferredOperationJoinKHR 218492930
+#endif
+#ifdef VK_KHR_pipeline_executable_properties
+void marshal_VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR* forMarshaling);
+
+void unmarshal_VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR* forUnmarshaling);
+
+void marshal_VkPipelineInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineInfoKHR* forMarshaling);
+
+void unmarshal_VkPipelineInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPipelineInfoKHR* forUnmarshaling);
+
+void marshal_VkPipelineExecutablePropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineExecutablePropertiesKHR* forMarshaling);
+
+void unmarshal_VkPipelineExecutablePropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPipelineExecutablePropertiesKHR* forUnmarshaling);
+
+void marshal_VkPipelineExecutableInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineExecutableInfoKHR* forMarshaling);
+
+void unmarshal_VkPipelineExecutableInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPipelineExecutableInfoKHR* forUnmarshaling);
+
+void marshal_VkPipelineExecutableStatisticValueKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineExecutableStatisticValueKHR* forMarshaling);
+
+void unmarshal_VkPipelineExecutableStatisticValueKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPipelineExecutableStatisticValueKHR* forUnmarshaling);
+
+void marshal_VkPipelineExecutableStatisticKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineExecutableStatisticKHR* forMarshaling);
+
+void unmarshal_VkPipelineExecutableStatisticKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPipelineExecutableStatisticKHR* forUnmarshaling);
+
+void marshal_VkPipelineExecutableInternalRepresentationKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineExecutableInternalRepresentationKHR* forMarshaling);
+
+void unmarshal_VkPipelineExecutableInternalRepresentationKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPipelineExecutableInternalRepresentationKHR* forUnmarshaling);
+
+#define OP_vkGetPipelineExecutablePropertiesKHR 269458798
+#define OP_vkGetPipelineExecutableStatisticsKHR 271191699
+#define OP_vkGetPipelineExecutableInternalRepresentationsKHR 274148497
+#endif
+#ifdef VK_KHR_pipeline_library
+void marshal_VkPipelineLibraryCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineLibraryCreateInfoKHR* forMarshaling);
+
+void unmarshal_VkPipelineLibraryCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPipelineLibraryCreateInfoKHR* forUnmarshaling);
+
+#endif
+#ifdef VK_KHR_shader_non_semantic_info
+#endif
+#ifdef VK_KHR_copy_commands2
+void marshal_VkBufferCopy2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBufferCopy2KHR* forMarshaling);
+
+void unmarshal_VkBufferCopy2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkBufferCopy2KHR* forUnmarshaling);
+
+void marshal_VkCopyBufferInfo2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCopyBufferInfo2KHR* forMarshaling);
+
+void unmarshal_VkCopyBufferInfo2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkCopyBufferInfo2KHR* forUnmarshaling);
+
+void marshal_VkImageCopy2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageCopy2KHR* forMarshaling);
+
+void unmarshal_VkImageCopy2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkImageCopy2KHR* forUnmarshaling);
+
+void marshal_VkCopyImageInfo2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCopyImageInfo2KHR* forMarshaling);
+
+void unmarshal_VkCopyImageInfo2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkCopyImageInfo2KHR* forUnmarshaling);
+
+void marshal_VkBufferImageCopy2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBufferImageCopy2KHR* forMarshaling);
+
+void unmarshal_VkBufferImageCopy2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkBufferImageCopy2KHR* forUnmarshaling);
+
+void marshal_VkCopyBufferToImageInfo2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCopyBufferToImageInfo2KHR* forMarshaling);
+
+void unmarshal_VkCopyBufferToImageInfo2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkCopyBufferToImageInfo2KHR* forUnmarshaling);
+
+void marshal_VkCopyImageToBufferInfo2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCopyImageToBufferInfo2KHR* forMarshaling);
+
+void unmarshal_VkCopyImageToBufferInfo2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkCopyImageToBufferInfo2KHR* forUnmarshaling);
+
+void marshal_VkImageBlit2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageBlit2KHR* forMarshaling);
+
+void unmarshal_VkImageBlit2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkImageBlit2KHR* forUnmarshaling);
+
+void marshal_VkBlitImageInfo2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBlitImageInfo2KHR* forMarshaling);
+
+void unmarshal_VkBlitImageInfo2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkBlitImageInfo2KHR* forUnmarshaling);
+
+void marshal_VkImageResolve2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageResolve2KHR* forMarshaling);
+
+void unmarshal_VkImageResolve2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkImageResolve2KHR* forUnmarshaling);
+
+void marshal_VkResolveImageInfo2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkResolveImageInfo2KHR* forMarshaling);
+
+void unmarshal_VkResolveImageInfo2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkResolveImageInfo2KHR* forUnmarshaling);
+
+#define OP_vkCmdCopyBuffer2KHR 247893766
+#define OP_vkCmdCopyImage2KHR 227008250
+#define OP_vkCmdCopyBufferToImage2KHR 248841963
+#define OP_vkCmdCopyImageToBuffer2KHR 252249060
+#define OP_vkCmdBlitImage2KHR 259838288
+#define OP_vkCmdResolveImage2KHR 254857232
+#endif
 #ifdef VK_ANDROID_native_buffer
 void marshal_VkNativeBufferANDROID(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkNativeBufferANDROID* forMarshaling);
 
 void unmarshal_VkNativeBufferANDROID(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkNativeBufferANDROID* forUnmarshaling);
 
 #define OP_vkGetSwapchainGrallocUsageANDROID 20251
@@ -2350,10 +4115,12 @@
 #ifdef VK_EXT_debug_report
 void marshal_VkDebugReportCallbackCreateInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDebugReportCallbackCreateInfoEXT* forMarshaling);
 
 void unmarshal_VkDebugReportCallbackCreateInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDebugReportCallbackCreateInfoEXT* forUnmarshaling);
 
 #define OP_vkCreateDebugReportCallbackEXT 20254
@@ -2369,10 +4136,12 @@
 #ifdef VK_AMD_rasterization_order
 void marshal_VkPipelineRasterizationStateRasterizationOrderAMD(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPipelineRasterizationStateRasterizationOrderAMD* forMarshaling);
 
 void unmarshal_VkPipelineRasterizationStateRasterizationOrderAMD(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPipelineRasterizationStateRasterizationOrderAMD* forUnmarshaling);
 
 #endif
@@ -2383,26 +4152,32 @@
 #ifdef VK_EXT_debug_marker
 void marshal_VkDebugMarkerObjectNameInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDebugMarkerObjectNameInfoEXT* forMarshaling);
 
 void unmarshal_VkDebugMarkerObjectNameInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDebugMarkerObjectNameInfoEXT* forUnmarshaling);
 
 void marshal_VkDebugMarkerObjectTagInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDebugMarkerObjectTagInfoEXT* forMarshaling);
 
 void unmarshal_VkDebugMarkerObjectTagInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDebugMarkerObjectTagInfoEXT* forUnmarshaling);
 
 void marshal_VkDebugMarkerMarkerInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDebugMarkerMarkerInfoEXT* forMarshaling);
 
 void unmarshal_VkDebugMarkerMarkerInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDebugMarkerMarkerInfoEXT* forUnmarshaling);
 
 #define OP_vkDebugMarkerSetObjectTagEXT 20257
@@ -2416,29 +4191,97 @@
 #ifdef VK_NV_dedicated_allocation
 void marshal_VkDedicatedAllocationImageCreateInfoNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDedicatedAllocationImageCreateInfoNV* forMarshaling);
 
 void unmarshal_VkDedicatedAllocationImageCreateInfoNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDedicatedAllocationImageCreateInfoNV* forUnmarshaling);
 
 void marshal_VkDedicatedAllocationBufferCreateInfoNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDedicatedAllocationBufferCreateInfoNV* forMarshaling);
 
 void unmarshal_VkDedicatedAllocationBufferCreateInfoNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDedicatedAllocationBufferCreateInfoNV* forUnmarshaling);
 
 void marshal_VkDedicatedAllocationMemoryAllocateInfoNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDedicatedAllocationMemoryAllocateInfoNV* forMarshaling);
 
 void unmarshal_VkDedicatedAllocationMemoryAllocateInfoNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDedicatedAllocationMemoryAllocateInfoNV* forUnmarshaling);
 
 #endif
+#ifdef VK_EXT_transform_feedback
+void marshal_VkPhysicalDeviceTransformFeedbackFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTransformFeedbackFeaturesEXT* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceTransformFeedbackFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceTransformFeedbackFeaturesEXT* forUnmarshaling);
+
+void marshal_VkPhysicalDeviceTransformFeedbackPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTransformFeedbackPropertiesEXT* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceTransformFeedbackPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceTransformFeedbackPropertiesEXT* forUnmarshaling);
+
+void marshal_VkPipelineRasterizationStateStreamCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineRasterizationStateStreamCreateInfoEXT* forMarshaling);
+
+void unmarshal_VkPipelineRasterizationStateStreamCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPipelineRasterizationStateStreamCreateInfoEXT* forUnmarshaling);
+
+#define OP_vkCmdBindTransformFeedbackBuffersEXT 267779978
+#define OP_vkCmdBeginTransformFeedbackEXT 294396901
+#define OP_vkCmdEndTransformFeedbackEXT 272333731
+#define OP_vkCmdBeginQueryIndexedEXT 275810601
+#define OP_vkCmdEndQueryIndexedEXT 279821337
+#define OP_vkCmdDrawIndirectByteCountEXT 285235943
+#endif
+#ifdef VK_NVX_image_view_handle
+void marshal_VkImageViewHandleInfoNVX(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageViewHandleInfoNVX* forMarshaling);
+
+void unmarshal_VkImageViewHandleInfoNVX(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkImageViewHandleInfoNVX* forUnmarshaling);
+
+void marshal_VkImageViewAddressPropertiesNVX(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageViewAddressPropertiesNVX* forMarshaling);
+
+void unmarshal_VkImageViewAddressPropertiesNVX(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkImageViewAddressPropertiesNVX* forUnmarshaling);
+
+#define OP_vkGetImageViewHandleNVX 204379647
+#define OP_vkGetImageViewAddressNVX 210668576
+#endif
 #ifdef VK_AMD_draw_indirect_count
 #define OP_vkCmdDrawIndirectCountAMD 20262
 #define OP_vkCmdDrawIndexedIndirectCountAMD 20263
@@ -2452,43 +4295,76 @@
 #ifdef VK_AMD_texture_gather_bias_lod
 void marshal_VkTextureLODGatherFormatPropertiesAMD(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkTextureLODGatherFormatPropertiesAMD* forMarshaling);
 
 void unmarshal_VkTextureLODGatherFormatPropertiesAMD(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkTextureLODGatherFormatPropertiesAMD* forUnmarshaling);
 
 #endif
 #ifdef VK_AMD_shader_info
 void marshal_VkShaderResourceUsageAMD(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkShaderResourceUsageAMD* forMarshaling);
 
 void unmarshal_VkShaderResourceUsageAMD(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkShaderResourceUsageAMD* forUnmarshaling);
 
 void marshal_VkShaderStatisticsInfoAMD(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkShaderStatisticsInfoAMD* forMarshaling);
 
 void unmarshal_VkShaderStatisticsInfoAMD(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkShaderStatisticsInfoAMD* forUnmarshaling);
 
 #define OP_vkGetShaderInfoAMD 20264
 #endif
 #ifdef VK_AMD_shader_image_load_store_lod
 #endif
+#ifdef VK_GGP_stream_descriptor_surface
+void marshal_VkStreamDescriptorSurfaceCreateInfoGGP(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkStreamDescriptorSurfaceCreateInfoGGP* forMarshaling);
+
+void unmarshal_VkStreamDescriptorSurfaceCreateInfoGGP(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkStreamDescriptorSurfaceCreateInfoGGP* forUnmarshaling);
+
+#define OP_vkCreateStreamDescriptorSurfaceGGP 241902685
+#endif
+#ifdef VK_NV_corner_sampled_image
+void marshal_VkPhysicalDeviceCornerSampledImageFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCornerSampledImageFeaturesNV* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceCornerSampledImageFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceCornerSampledImageFeaturesNV* forUnmarshaling);
+
+#endif
 #ifdef VK_IMG_format_pvrtc
 #endif
 #ifdef VK_NV_external_memory_capabilities
 void marshal_VkExternalImageFormatPropertiesNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkExternalImageFormatPropertiesNV* forMarshaling);
 
 void unmarshal_VkExternalImageFormatPropertiesNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkExternalImageFormatPropertiesNV* forUnmarshaling);
 
 #define OP_vkGetPhysicalDeviceExternalImageFormatPropertiesNV 20265
@@ -2496,36 +4372,44 @@
 #ifdef VK_NV_external_memory
 void marshal_VkExternalMemoryImageCreateInfoNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkExternalMemoryImageCreateInfoNV* forMarshaling);
 
 void unmarshal_VkExternalMemoryImageCreateInfoNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkExternalMemoryImageCreateInfoNV* forUnmarshaling);
 
 void marshal_VkExportMemoryAllocateInfoNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkExportMemoryAllocateInfoNV* forMarshaling);
 
 void unmarshal_VkExportMemoryAllocateInfoNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkExportMemoryAllocateInfoNV* forUnmarshaling);
 
 #endif
 #ifdef VK_NV_external_memory_win32
 void marshal_VkImportMemoryWin32HandleInfoNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkImportMemoryWin32HandleInfoNV* forMarshaling);
 
 void unmarshal_VkImportMemoryWin32HandleInfoNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkImportMemoryWin32HandleInfoNV* forUnmarshaling);
 
 void marshal_VkExportMemoryWin32HandleInfoNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkExportMemoryWin32HandleInfoNV* forMarshaling);
 
 void unmarshal_VkExportMemoryWin32HandleInfoNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkExportMemoryWin32HandleInfoNV* forUnmarshaling);
 
 #define OP_vkGetMemoryWin32HandleNV 20266
@@ -2533,30 +4417,36 @@
 #ifdef VK_NV_win32_keyed_mutex
 void marshal_VkWin32KeyedMutexAcquireReleaseInfoNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkWin32KeyedMutexAcquireReleaseInfoNV* forMarshaling);
 
 void unmarshal_VkWin32KeyedMutexAcquireReleaseInfoNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkWin32KeyedMutexAcquireReleaseInfoNV* forUnmarshaling);
 
 #endif
 #ifdef VK_EXT_validation_flags
 void marshal_VkValidationFlagsEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkValidationFlagsEXT* forMarshaling);
 
 void unmarshal_VkValidationFlagsEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkValidationFlagsEXT* forUnmarshaling);
 
 #endif
 #ifdef VK_NN_vi_surface
 void marshal_VkViSurfaceCreateInfoNN(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkViSurfaceCreateInfoNN* forMarshaling);
 
 void unmarshal_VkViSurfaceCreateInfoNN(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkViSurfaceCreateInfoNN* forUnmarshaling);
 
 #define OP_vkCreateViSurfaceNN 20267
@@ -2565,172 +4455,93 @@
 #endif
 #ifdef VK_EXT_shader_subgroup_vote
 #endif
+#ifdef VK_EXT_texture_compression_astc_hdr
+void marshal_VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT* forUnmarshaling);
+
+#endif
+#ifdef VK_EXT_astc_decode_mode
+void marshal_VkImageViewASTCDecodeModeEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageViewASTCDecodeModeEXT* forMarshaling);
+
+void unmarshal_VkImageViewASTCDecodeModeEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkImageViewASTCDecodeModeEXT* forUnmarshaling);
+
+void marshal_VkPhysicalDeviceASTCDecodeFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceASTCDecodeFeaturesEXT* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceASTCDecodeFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceASTCDecodeFeaturesEXT* forUnmarshaling);
+
+#endif
 #ifdef VK_EXT_conditional_rendering
 void marshal_VkConditionalRenderingBeginInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkConditionalRenderingBeginInfoEXT* forMarshaling);
 
 void unmarshal_VkConditionalRenderingBeginInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkConditionalRenderingBeginInfoEXT* forUnmarshaling);
 
 void marshal_VkPhysicalDeviceConditionalRenderingFeaturesEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceConditionalRenderingFeaturesEXT* forMarshaling);
 
 void unmarshal_VkPhysicalDeviceConditionalRenderingFeaturesEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceConditionalRenderingFeaturesEXT* forUnmarshaling);
 
 void marshal_VkCommandBufferInheritanceConditionalRenderingInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkCommandBufferInheritanceConditionalRenderingInfoEXT* forMarshaling);
 
 void unmarshal_VkCommandBufferInheritanceConditionalRenderingInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkCommandBufferInheritanceConditionalRenderingInfoEXT* forUnmarshaling);
 
 #define OP_vkCmdBeginConditionalRenderingEXT 20268
 #define OP_vkCmdEndConditionalRenderingEXT 20269
 #endif
-#ifdef VK_NVX_device_generated_commands
-void marshal_VkDeviceGeneratedCommandsFeaturesNVX(
-    VulkanStreamGuest* vkStream,
-    const VkDeviceGeneratedCommandsFeaturesNVX* forMarshaling);
-
-void unmarshal_VkDeviceGeneratedCommandsFeaturesNVX(
-    VulkanStreamGuest* vkStream,
-    VkDeviceGeneratedCommandsFeaturesNVX* forUnmarshaling);
-
-void marshal_VkDeviceGeneratedCommandsLimitsNVX(
-    VulkanStreamGuest* vkStream,
-    const VkDeviceGeneratedCommandsLimitsNVX* forMarshaling);
-
-void unmarshal_VkDeviceGeneratedCommandsLimitsNVX(
-    VulkanStreamGuest* vkStream,
-    VkDeviceGeneratedCommandsLimitsNVX* forUnmarshaling);
-
-void marshal_VkIndirectCommandsTokenNVX(
-    VulkanStreamGuest* vkStream,
-    const VkIndirectCommandsTokenNVX* forMarshaling);
-
-void unmarshal_VkIndirectCommandsTokenNVX(
-    VulkanStreamGuest* vkStream,
-    VkIndirectCommandsTokenNVX* forUnmarshaling);
-
-void marshal_VkIndirectCommandsLayoutTokenNVX(
-    VulkanStreamGuest* vkStream,
-    const VkIndirectCommandsLayoutTokenNVX* forMarshaling);
-
-void unmarshal_VkIndirectCommandsLayoutTokenNVX(
-    VulkanStreamGuest* vkStream,
-    VkIndirectCommandsLayoutTokenNVX* forUnmarshaling);
-
-void marshal_VkIndirectCommandsLayoutCreateInfoNVX(
-    VulkanStreamGuest* vkStream,
-    const VkIndirectCommandsLayoutCreateInfoNVX* forMarshaling);
-
-void unmarshal_VkIndirectCommandsLayoutCreateInfoNVX(
-    VulkanStreamGuest* vkStream,
-    VkIndirectCommandsLayoutCreateInfoNVX* forUnmarshaling);
-
-void marshal_VkCmdProcessCommandsInfoNVX(
-    VulkanStreamGuest* vkStream,
-    const VkCmdProcessCommandsInfoNVX* forMarshaling);
-
-void unmarshal_VkCmdProcessCommandsInfoNVX(
-    VulkanStreamGuest* vkStream,
-    VkCmdProcessCommandsInfoNVX* forUnmarshaling);
-
-void marshal_VkCmdReserveSpaceForCommandsInfoNVX(
-    VulkanStreamGuest* vkStream,
-    const VkCmdReserveSpaceForCommandsInfoNVX* forMarshaling);
-
-void unmarshal_VkCmdReserveSpaceForCommandsInfoNVX(
-    VulkanStreamGuest* vkStream,
-    VkCmdReserveSpaceForCommandsInfoNVX* forUnmarshaling);
-
-void marshal_VkObjectTableCreateInfoNVX(
-    VulkanStreamGuest* vkStream,
-    const VkObjectTableCreateInfoNVX* forMarshaling);
-
-void unmarshal_VkObjectTableCreateInfoNVX(
-    VulkanStreamGuest* vkStream,
-    VkObjectTableCreateInfoNVX* forUnmarshaling);
-
-void marshal_VkObjectTableEntryNVX(
-    VulkanStreamGuest* vkStream,
-    const VkObjectTableEntryNVX* forMarshaling);
-
-void unmarshal_VkObjectTableEntryNVX(
-    VulkanStreamGuest* vkStream,
-    VkObjectTableEntryNVX* forUnmarshaling);
-
-void marshal_VkObjectTablePipelineEntryNVX(
-    VulkanStreamGuest* vkStream,
-    const VkObjectTablePipelineEntryNVX* forMarshaling);
-
-void unmarshal_VkObjectTablePipelineEntryNVX(
-    VulkanStreamGuest* vkStream,
-    VkObjectTablePipelineEntryNVX* forUnmarshaling);
-
-void marshal_VkObjectTableDescriptorSetEntryNVX(
-    VulkanStreamGuest* vkStream,
-    const VkObjectTableDescriptorSetEntryNVX* forMarshaling);
-
-void unmarshal_VkObjectTableDescriptorSetEntryNVX(
-    VulkanStreamGuest* vkStream,
-    VkObjectTableDescriptorSetEntryNVX* forUnmarshaling);
-
-void marshal_VkObjectTableVertexBufferEntryNVX(
-    VulkanStreamGuest* vkStream,
-    const VkObjectTableVertexBufferEntryNVX* forMarshaling);
-
-void unmarshal_VkObjectTableVertexBufferEntryNVX(
-    VulkanStreamGuest* vkStream,
-    VkObjectTableVertexBufferEntryNVX* forUnmarshaling);
-
-void marshal_VkObjectTableIndexBufferEntryNVX(
-    VulkanStreamGuest* vkStream,
-    const VkObjectTableIndexBufferEntryNVX* forMarshaling);
-
-void unmarshal_VkObjectTableIndexBufferEntryNVX(
-    VulkanStreamGuest* vkStream,
-    VkObjectTableIndexBufferEntryNVX* forUnmarshaling);
-
-void marshal_VkObjectTablePushConstantEntryNVX(
-    VulkanStreamGuest* vkStream,
-    const VkObjectTablePushConstantEntryNVX* forMarshaling);
-
-void unmarshal_VkObjectTablePushConstantEntryNVX(
-    VulkanStreamGuest* vkStream,
-    VkObjectTablePushConstantEntryNVX* forUnmarshaling);
-
-#define OP_vkCmdProcessCommandsNVX 20270
-#define OP_vkCmdReserveSpaceForCommandsNVX 20271
-#define OP_vkCreateIndirectCommandsLayoutNVX 20272
-#define OP_vkDestroyIndirectCommandsLayoutNVX 20273
-#define OP_vkCreateObjectTableNVX 20274
-#define OP_vkDestroyObjectTableNVX 20275
-#define OP_vkRegisterObjectsNVX 20276
-#define OP_vkUnregisterObjectsNVX 20277
-#define OP_vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX 20278
-#endif
 #ifdef VK_NV_clip_space_w_scaling
 void marshal_VkViewportWScalingNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkViewportWScalingNV* forMarshaling);
 
 void unmarshal_VkViewportWScalingNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkViewportWScalingNV* forUnmarshaling);
 
 void marshal_VkPipelineViewportWScalingStateCreateInfoNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPipelineViewportWScalingStateCreateInfoNV* forMarshaling);
 
 void unmarshal_VkPipelineViewportWScalingStateCreateInfoNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPipelineViewportWScalingStateCreateInfoNV* forUnmarshaling);
 
 #define OP_vkCmdSetViewportWScalingNV 20279
@@ -2745,10 +4556,12 @@
 #ifdef VK_EXT_display_surface_counter
 void marshal_VkSurfaceCapabilities2EXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSurfaceCapabilities2EXT* forMarshaling);
 
 void unmarshal_VkSurfaceCapabilities2EXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSurfaceCapabilities2EXT* forUnmarshaling);
 
 #define OP_vkGetPhysicalDeviceSurfaceCapabilities2EXT 20283
@@ -2756,34 +4569,42 @@
 #ifdef VK_EXT_display_control
 void marshal_VkDisplayPowerInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDisplayPowerInfoEXT* forMarshaling);
 
 void unmarshal_VkDisplayPowerInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDisplayPowerInfoEXT* forUnmarshaling);
 
 void marshal_VkDeviceEventInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDeviceEventInfoEXT* forMarshaling);
 
 void unmarshal_VkDeviceEventInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDeviceEventInfoEXT* forUnmarshaling);
 
 void marshal_VkDisplayEventInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDisplayEventInfoEXT* forMarshaling);
 
 void unmarshal_VkDisplayEventInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDisplayEventInfoEXT* forUnmarshaling);
 
 void marshal_VkSwapchainCounterCreateInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSwapchainCounterCreateInfoEXT* forMarshaling);
 
 void unmarshal_VkSwapchainCounterCreateInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSwapchainCounterCreateInfoEXT* forUnmarshaling);
 
 #define OP_vkDisplayPowerControlEXT 20284
@@ -2794,34 +4615,42 @@
 #ifdef VK_GOOGLE_display_timing
 void marshal_VkRefreshCycleDurationGOOGLE(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkRefreshCycleDurationGOOGLE* forMarshaling);
 
 void unmarshal_VkRefreshCycleDurationGOOGLE(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkRefreshCycleDurationGOOGLE* forUnmarshaling);
 
 void marshal_VkPastPresentationTimingGOOGLE(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPastPresentationTimingGOOGLE* forMarshaling);
 
 void unmarshal_VkPastPresentationTimingGOOGLE(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPastPresentationTimingGOOGLE* forUnmarshaling);
 
 void marshal_VkPresentTimeGOOGLE(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPresentTimeGOOGLE* forMarshaling);
 
 void unmarshal_VkPresentTimeGOOGLE(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPresentTimeGOOGLE* forUnmarshaling);
 
 void marshal_VkPresentTimesInfoGOOGLE(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPresentTimesInfoGOOGLE* forMarshaling);
 
 void unmarshal_VkPresentTimesInfoGOOGLE(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPresentTimesInfoGOOGLE* forUnmarshaling);
 
 #define OP_vkGetRefreshCycleDurationGOOGLE 20288
@@ -2836,46 +4665,56 @@
 #ifdef VK_NVX_multiview_per_view_attributes
 void marshal_VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX* forMarshaling);
 
 void unmarshal_VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX* forUnmarshaling);
 
 #endif
 #ifdef VK_NV_viewport_swizzle
 void marshal_VkViewportSwizzleNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkViewportSwizzleNV* forMarshaling);
 
 void unmarshal_VkViewportSwizzleNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkViewportSwizzleNV* forUnmarshaling);
 
 void marshal_VkPipelineViewportSwizzleStateCreateInfoNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPipelineViewportSwizzleStateCreateInfoNV* forMarshaling);
 
 void unmarshal_VkPipelineViewportSwizzleStateCreateInfoNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPipelineViewportSwizzleStateCreateInfoNV* forUnmarshaling);
 
 #endif
 #ifdef VK_EXT_discard_rectangles
 void marshal_VkPhysicalDeviceDiscardRectanglePropertiesEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceDiscardRectanglePropertiesEXT* forMarshaling);
 
 void unmarshal_VkPhysicalDeviceDiscardRectanglePropertiesEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceDiscardRectanglePropertiesEXT* forUnmarshaling);
 
 void marshal_VkPipelineDiscardRectangleStateCreateInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPipelineDiscardRectangleStateCreateInfoEXT* forMarshaling);
 
 void unmarshal_VkPipelineDiscardRectangleStateCreateInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPipelineDiscardRectangleStateCreateInfoEXT* forUnmarshaling);
 
 #define OP_vkCmdSetDiscardRectangleEXT 20290
@@ -2883,38 +4722,68 @@
 #ifdef VK_EXT_conservative_rasterization
 void marshal_VkPhysicalDeviceConservativeRasterizationPropertiesEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceConservativeRasterizationPropertiesEXT* forMarshaling);
 
 void unmarshal_VkPhysicalDeviceConservativeRasterizationPropertiesEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceConservativeRasterizationPropertiesEXT* forUnmarshaling);
 
 void marshal_VkPipelineRasterizationConservativeStateCreateInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPipelineRasterizationConservativeStateCreateInfoEXT* forMarshaling);
 
 void unmarshal_VkPipelineRasterizationConservativeStateCreateInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPipelineRasterizationConservativeStateCreateInfoEXT* forUnmarshaling);
 
 #endif
+#ifdef VK_EXT_depth_clip_enable
+void marshal_VkPhysicalDeviceDepthClipEnableFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDepthClipEnableFeaturesEXT* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceDepthClipEnableFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceDepthClipEnableFeaturesEXT* forUnmarshaling);
+
+void marshal_VkPipelineRasterizationDepthClipStateCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineRasterizationDepthClipStateCreateInfoEXT* forMarshaling);
+
+void unmarshal_VkPipelineRasterizationDepthClipStateCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPipelineRasterizationDepthClipStateCreateInfoEXT* forUnmarshaling);
+
+#endif
 #ifdef VK_EXT_swapchain_colorspace
 #endif
 #ifdef VK_EXT_hdr_metadata
 void marshal_VkXYColorEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkXYColorEXT* forMarshaling);
 
 void unmarshal_VkXYColorEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkXYColorEXT* forUnmarshaling);
 
 void marshal_VkHdrMetadataEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkHdrMetadataEXT* forMarshaling);
 
 void unmarshal_VkHdrMetadataEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkHdrMetadataEXT* forUnmarshaling);
 
 #define OP_vkSetHdrMetadataEXT 20291
@@ -2922,10 +4791,12 @@
 #ifdef VK_MVK_ios_surface
 void marshal_VkIOSSurfaceCreateInfoMVK(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkIOSSurfaceCreateInfoMVK* forMarshaling);
 
 void unmarshal_VkIOSSurfaceCreateInfoMVK(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkIOSSurfaceCreateInfoMVK* forUnmarshaling);
 
 #define OP_vkCreateIOSSurfaceMVK 20292
@@ -2933,59 +4804,79 @@
 #ifdef VK_MVK_macos_surface
 void marshal_VkMacOSSurfaceCreateInfoMVK(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkMacOSSurfaceCreateInfoMVK* forMarshaling);
 
 void unmarshal_VkMacOSSurfaceCreateInfoMVK(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkMacOSSurfaceCreateInfoMVK* forUnmarshaling);
 
 #define OP_vkCreateMacOSSurfaceMVK 20293
 #endif
+#ifdef VK_MVK_moltenvk
+#define OP_vkGetMTLDeviceMVK 20334
+#define OP_vkSetMTLTextureMVK 20335
+#define OP_vkGetMTLTextureMVK 20336
+#define OP_vkGetMTLBufferMVK 20337
+#define OP_vkUseIOSurfaceMVK 20338
+#define OP_vkGetIOSurfaceMVK 20339
+#endif
 #ifdef VK_EXT_external_memory_dma_buf
 #endif
 #ifdef VK_EXT_queue_family_foreign
 #endif
 #ifdef VK_EXT_debug_utils
-void marshal_VkDebugUtilsObjectNameInfoEXT(
-    VulkanStreamGuest* vkStream,
-    const VkDebugUtilsObjectNameInfoEXT* forMarshaling);
-
-void unmarshal_VkDebugUtilsObjectNameInfoEXT(
-    VulkanStreamGuest* vkStream,
-    VkDebugUtilsObjectNameInfoEXT* forUnmarshaling);
-
-void marshal_VkDebugUtilsObjectTagInfoEXT(
-    VulkanStreamGuest* vkStream,
-    const VkDebugUtilsObjectTagInfoEXT* forMarshaling);
-
-void unmarshal_VkDebugUtilsObjectTagInfoEXT(
-    VulkanStreamGuest* vkStream,
-    VkDebugUtilsObjectTagInfoEXT* forUnmarshaling);
-
 void marshal_VkDebugUtilsLabelEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDebugUtilsLabelEXT* forMarshaling);
 
 void unmarshal_VkDebugUtilsLabelEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDebugUtilsLabelEXT* forUnmarshaling);
 
+void marshal_VkDebugUtilsObjectNameInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDebugUtilsObjectNameInfoEXT* forMarshaling);
+
+void unmarshal_VkDebugUtilsObjectNameInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDebugUtilsObjectNameInfoEXT* forUnmarshaling);
+
 void marshal_VkDebugUtilsMessengerCallbackDataEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDebugUtilsMessengerCallbackDataEXT* forMarshaling);
 
 void unmarshal_VkDebugUtilsMessengerCallbackDataEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDebugUtilsMessengerCallbackDataEXT* forUnmarshaling);
 
 void marshal_VkDebugUtilsMessengerCreateInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDebugUtilsMessengerCreateInfoEXT* forMarshaling);
 
 void unmarshal_VkDebugUtilsMessengerCreateInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDebugUtilsMessengerCreateInfoEXT* forUnmarshaling);
 
+void marshal_VkDebugUtilsObjectTagInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDebugUtilsObjectTagInfoEXT* forMarshaling);
+
+void unmarshal_VkDebugUtilsObjectTagInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDebugUtilsObjectTagInfoEXT* forUnmarshaling);
+
 #define OP_vkSetDebugUtilsObjectNameEXT 20294
 #define OP_vkSetDebugUtilsObjectTagEXT 20295
 #define OP_vkQueueBeginDebugUtilsLabelEXT 20296
@@ -3001,71 +4892,75 @@
 #ifdef VK_ANDROID_external_memory_android_hardware_buffer
 void marshal_VkAndroidHardwareBufferUsageANDROID(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkAndroidHardwareBufferUsageANDROID* forMarshaling);
 
 void unmarshal_VkAndroidHardwareBufferUsageANDROID(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkAndroidHardwareBufferUsageANDROID* forUnmarshaling);
 
 void marshal_VkAndroidHardwareBufferPropertiesANDROID(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkAndroidHardwareBufferPropertiesANDROID* forMarshaling);
 
 void unmarshal_VkAndroidHardwareBufferPropertiesANDROID(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkAndroidHardwareBufferPropertiesANDROID* forUnmarshaling);
 
 void marshal_VkAndroidHardwareBufferFormatPropertiesANDROID(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkAndroidHardwareBufferFormatPropertiesANDROID* forMarshaling);
 
 void unmarshal_VkAndroidHardwareBufferFormatPropertiesANDROID(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkAndroidHardwareBufferFormatPropertiesANDROID* forUnmarshaling);
 
 void marshal_VkImportAndroidHardwareBufferInfoANDROID(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkImportAndroidHardwareBufferInfoANDROID* forMarshaling);
 
 void unmarshal_VkImportAndroidHardwareBufferInfoANDROID(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkImportAndroidHardwareBufferInfoANDROID* forUnmarshaling);
 
 void marshal_VkMemoryGetAndroidHardwareBufferInfoANDROID(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkMemoryGetAndroidHardwareBufferInfoANDROID* forMarshaling);
 
 void unmarshal_VkMemoryGetAndroidHardwareBufferInfoANDROID(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkMemoryGetAndroidHardwareBufferInfoANDROID* forUnmarshaling);
 
 void marshal_VkExternalFormatANDROID(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkExternalFormatANDROID* forMarshaling);
 
 void unmarshal_VkExternalFormatANDROID(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkExternalFormatANDROID* forUnmarshaling);
 
 #define OP_vkGetAndroidHardwareBufferPropertiesANDROID 20305
 #define OP_vkGetMemoryAndroidHardwareBufferANDROID 20306
 #endif
 #ifdef VK_EXT_sampler_filter_minmax
-void marshal_VkSamplerReductionModeCreateInfoEXT(
-    VulkanStreamGuest* vkStream,
-    const VkSamplerReductionModeCreateInfoEXT* forMarshaling);
+DEFINE_ALIAS_FUNCTION(marshal_VkSamplerReductionModeCreateInfo, marshal_VkSamplerReductionModeCreateInfoEXT);
 
-void unmarshal_VkSamplerReductionModeCreateInfoEXT(
-    VulkanStreamGuest* vkStream,
-    VkSamplerReductionModeCreateInfoEXT* forUnmarshaling);
+DEFINE_ALIAS_FUNCTION(unmarshal_VkSamplerReductionModeCreateInfo, unmarshal_VkSamplerReductionModeCreateInfoEXT);
 
-void marshal_VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT(
-    VulkanStreamGuest* vkStream,
-    const VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT* forMarshaling);
+DEFINE_ALIAS_FUNCTION(marshal_VkPhysicalDeviceSamplerFilterMinmaxProperties, marshal_VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT);
 
-void unmarshal_VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT(
-    VulkanStreamGuest* vkStream,
-    VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT* forUnmarshaling);
+DEFINE_ALIAS_FUNCTION(unmarshal_VkPhysicalDeviceSamplerFilterMinmaxProperties, unmarshal_VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT);
 
 #endif
 #ifdef VK_AMD_gpu_shader_int16
@@ -3074,71 +4969,129 @@
 #endif
 #ifdef VK_AMD_shader_fragment_mask
 #endif
+#ifdef VK_EXT_inline_uniform_block
+void marshal_VkPhysicalDeviceInlineUniformBlockFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceInlineUniformBlockFeaturesEXT* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceInlineUniformBlockFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceInlineUniformBlockFeaturesEXT* forUnmarshaling);
+
+void marshal_VkPhysicalDeviceInlineUniformBlockPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceInlineUniformBlockPropertiesEXT* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceInlineUniformBlockPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceInlineUniformBlockPropertiesEXT* forUnmarshaling);
+
+void marshal_VkWriteDescriptorSetInlineUniformBlockEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkWriteDescriptorSetInlineUniformBlockEXT* forMarshaling);
+
+void unmarshal_VkWriteDescriptorSetInlineUniformBlockEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkWriteDescriptorSetInlineUniformBlockEXT* forUnmarshaling);
+
+void marshal_VkDescriptorPoolInlineUniformBlockCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDescriptorPoolInlineUniformBlockCreateInfoEXT* forMarshaling);
+
+void unmarshal_VkDescriptorPoolInlineUniformBlockCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDescriptorPoolInlineUniformBlockCreateInfoEXT* forUnmarshaling);
+
+#endif
 #ifdef VK_EXT_shader_stencil_export
 #endif
 #ifdef VK_EXT_sample_locations
 void marshal_VkSampleLocationEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSampleLocationEXT* forMarshaling);
 
 void unmarshal_VkSampleLocationEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSampleLocationEXT* forUnmarshaling);
 
 void marshal_VkSampleLocationsInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSampleLocationsInfoEXT* forMarshaling);
 
 void unmarshal_VkSampleLocationsInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSampleLocationsInfoEXT* forUnmarshaling);
 
 void marshal_VkAttachmentSampleLocationsEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkAttachmentSampleLocationsEXT* forMarshaling);
 
 void unmarshal_VkAttachmentSampleLocationsEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkAttachmentSampleLocationsEXT* forUnmarshaling);
 
 void marshal_VkSubpassSampleLocationsEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkSubpassSampleLocationsEXT* forMarshaling);
 
 void unmarshal_VkSubpassSampleLocationsEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkSubpassSampleLocationsEXT* forUnmarshaling);
 
 void marshal_VkRenderPassSampleLocationsBeginInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkRenderPassSampleLocationsBeginInfoEXT* forMarshaling);
 
 void unmarshal_VkRenderPassSampleLocationsBeginInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkRenderPassSampleLocationsBeginInfoEXT* forUnmarshaling);
 
 void marshal_VkPipelineSampleLocationsStateCreateInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPipelineSampleLocationsStateCreateInfoEXT* forMarshaling);
 
 void unmarshal_VkPipelineSampleLocationsStateCreateInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPipelineSampleLocationsStateCreateInfoEXT* forUnmarshaling);
 
 void marshal_VkPhysicalDeviceSampleLocationsPropertiesEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceSampleLocationsPropertiesEXT* forMarshaling);
 
 void unmarshal_VkPhysicalDeviceSampleLocationsPropertiesEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceSampleLocationsPropertiesEXT* forUnmarshaling);
 
 void marshal_VkMultisamplePropertiesEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkMultisamplePropertiesEXT* forMarshaling);
 
 void unmarshal_VkMultisamplePropertiesEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkMultisamplePropertiesEXT* forUnmarshaling);
 
 #define OP_vkCmdSetSampleLocationsEXT 20307
@@ -3147,68 +5100,167 @@
 #ifdef VK_EXT_blend_operation_advanced
 void marshal_VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* forMarshaling);
 
 void unmarshal_VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* forUnmarshaling);
 
 void marshal_VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT* forMarshaling);
 
 void unmarshal_VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT* forUnmarshaling);
 
 void marshal_VkPipelineColorBlendAdvancedStateCreateInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPipelineColorBlendAdvancedStateCreateInfoEXT* forMarshaling);
 
 void unmarshal_VkPipelineColorBlendAdvancedStateCreateInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPipelineColorBlendAdvancedStateCreateInfoEXT* forUnmarshaling);
 
 #endif
 #ifdef VK_NV_fragment_coverage_to_color
 void marshal_VkPipelineCoverageToColorStateCreateInfoNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPipelineCoverageToColorStateCreateInfoNV* forMarshaling);
 
 void unmarshal_VkPipelineCoverageToColorStateCreateInfoNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPipelineCoverageToColorStateCreateInfoNV* forUnmarshaling);
 
 #endif
 #ifdef VK_NV_framebuffer_mixed_samples
 void marshal_VkPipelineCoverageModulationStateCreateInfoNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPipelineCoverageModulationStateCreateInfoNV* forMarshaling);
 
 void unmarshal_VkPipelineCoverageModulationStateCreateInfoNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPipelineCoverageModulationStateCreateInfoNV* forUnmarshaling);
 
 #endif
 #ifdef VK_NV_fill_rectangle
 #endif
+#ifdef VK_NV_shader_sm_builtins
+void marshal_VkPhysicalDeviceShaderSMBuiltinsPropertiesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderSMBuiltinsPropertiesNV* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceShaderSMBuiltinsPropertiesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceShaderSMBuiltinsPropertiesNV* forUnmarshaling);
+
+void marshal_VkPhysicalDeviceShaderSMBuiltinsFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderSMBuiltinsFeaturesNV* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceShaderSMBuiltinsFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceShaderSMBuiltinsFeaturesNV* forUnmarshaling);
+
+#endif
 #ifdef VK_EXT_post_depth_coverage
 #endif
+#ifdef VK_EXT_image_drm_format_modifier
+void marshal_VkDrmFormatModifierPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDrmFormatModifierPropertiesEXT* forMarshaling);
+
+void unmarshal_VkDrmFormatModifierPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDrmFormatModifierPropertiesEXT* forUnmarshaling);
+
+void marshal_VkDrmFormatModifierPropertiesListEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDrmFormatModifierPropertiesListEXT* forMarshaling);
+
+void unmarshal_VkDrmFormatModifierPropertiesListEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDrmFormatModifierPropertiesListEXT* forUnmarshaling);
+
+void marshal_VkPhysicalDeviceImageDrmFormatModifierInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceImageDrmFormatModifierInfoEXT* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceImageDrmFormatModifierInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceImageDrmFormatModifierInfoEXT* forUnmarshaling);
+
+void marshal_VkImageDrmFormatModifierListCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageDrmFormatModifierListCreateInfoEXT* forMarshaling);
+
+void unmarshal_VkImageDrmFormatModifierListCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkImageDrmFormatModifierListCreateInfoEXT* forUnmarshaling);
+
+void marshal_VkImageDrmFormatModifierExplicitCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageDrmFormatModifierExplicitCreateInfoEXT* forMarshaling);
+
+void unmarshal_VkImageDrmFormatModifierExplicitCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkImageDrmFormatModifierExplicitCreateInfoEXT* forUnmarshaling);
+
+void marshal_VkImageDrmFormatModifierPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageDrmFormatModifierPropertiesEXT* forMarshaling);
+
+void unmarshal_VkImageDrmFormatModifierPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkImageDrmFormatModifierPropertiesEXT* forUnmarshaling);
+
+#define OP_vkGetImageDrmFormatModifierPropertiesEXT 251301237
+#endif
 #ifdef VK_EXT_validation_cache
 void marshal_VkValidationCacheCreateInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkValidationCacheCreateInfoEXT* forMarshaling);
 
 void unmarshal_VkValidationCacheCreateInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkValidationCacheCreateInfoEXT* forUnmarshaling);
 
 void marshal_VkShaderModuleValidationCacheCreateInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkShaderModuleValidationCacheCreateInfoEXT* forMarshaling);
 
 void unmarshal_VkShaderModuleValidationCacheCreateInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkShaderModuleValidationCacheCreateInfoEXT* forUnmarshaling);
 
 #define OP_vkCreateValidationCacheEXT 20309
@@ -3217,82 +5269,368 @@
 #define OP_vkGetValidationCacheDataEXT 20312
 #endif
 #ifdef VK_EXT_descriptor_indexing
-void marshal_VkDescriptorSetLayoutBindingFlagsCreateInfoEXT(
-    VulkanStreamGuest* vkStream,
-    const VkDescriptorSetLayoutBindingFlagsCreateInfoEXT* forMarshaling);
+DEFINE_ALIAS_FUNCTION(marshal_VkDescriptorSetLayoutBindingFlagsCreateInfo, marshal_VkDescriptorSetLayoutBindingFlagsCreateInfoEXT);
 
-void unmarshal_VkDescriptorSetLayoutBindingFlagsCreateInfoEXT(
-    VulkanStreamGuest* vkStream,
-    VkDescriptorSetLayoutBindingFlagsCreateInfoEXT* forUnmarshaling);
+DEFINE_ALIAS_FUNCTION(unmarshal_VkDescriptorSetLayoutBindingFlagsCreateInfo, unmarshal_VkDescriptorSetLayoutBindingFlagsCreateInfoEXT);
 
-void marshal_VkPhysicalDeviceDescriptorIndexingFeaturesEXT(
-    VulkanStreamGuest* vkStream,
-    const VkPhysicalDeviceDescriptorIndexingFeaturesEXT* forMarshaling);
+DEFINE_ALIAS_FUNCTION(marshal_VkPhysicalDeviceDescriptorIndexingFeatures, marshal_VkPhysicalDeviceDescriptorIndexingFeaturesEXT);
 
-void unmarshal_VkPhysicalDeviceDescriptorIndexingFeaturesEXT(
-    VulkanStreamGuest* vkStream,
-    VkPhysicalDeviceDescriptorIndexingFeaturesEXT* forUnmarshaling);
+DEFINE_ALIAS_FUNCTION(unmarshal_VkPhysicalDeviceDescriptorIndexingFeatures, unmarshal_VkPhysicalDeviceDescriptorIndexingFeaturesEXT);
 
-void marshal_VkPhysicalDeviceDescriptorIndexingPropertiesEXT(
-    VulkanStreamGuest* vkStream,
-    const VkPhysicalDeviceDescriptorIndexingPropertiesEXT* forMarshaling);
+DEFINE_ALIAS_FUNCTION(marshal_VkPhysicalDeviceDescriptorIndexingProperties, marshal_VkPhysicalDeviceDescriptorIndexingPropertiesEXT);
 
-void unmarshal_VkPhysicalDeviceDescriptorIndexingPropertiesEXT(
-    VulkanStreamGuest* vkStream,
-    VkPhysicalDeviceDescriptorIndexingPropertiesEXT* forUnmarshaling);
+DEFINE_ALIAS_FUNCTION(unmarshal_VkPhysicalDeviceDescriptorIndexingProperties, unmarshal_VkPhysicalDeviceDescriptorIndexingPropertiesEXT);
 
-void marshal_VkDescriptorSetVariableDescriptorCountAllocateInfoEXT(
-    VulkanStreamGuest* vkStream,
-    const VkDescriptorSetVariableDescriptorCountAllocateInfoEXT* forMarshaling);
+DEFINE_ALIAS_FUNCTION(marshal_VkDescriptorSetVariableDescriptorCountAllocateInfo, marshal_VkDescriptorSetVariableDescriptorCountAllocateInfoEXT);
 
-void unmarshal_VkDescriptorSetVariableDescriptorCountAllocateInfoEXT(
-    VulkanStreamGuest* vkStream,
-    VkDescriptorSetVariableDescriptorCountAllocateInfoEXT* forUnmarshaling);
+DEFINE_ALIAS_FUNCTION(unmarshal_VkDescriptorSetVariableDescriptorCountAllocateInfo, unmarshal_VkDescriptorSetVariableDescriptorCountAllocateInfoEXT);
 
-void marshal_VkDescriptorSetVariableDescriptorCountLayoutSupportEXT(
-    VulkanStreamGuest* vkStream,
-    const VkDescriptorSetVariableDescriptorCountLayoutSupportEXT* forMarshaling);
+DEFINE_ALIAS_FUNCTION(marshal_VkDescriptorSetVariableDescriptorCountLayoutSupport, marshal_VkDescriptorSetVariableDescriptorCountLayoutSupportEXT);
 
-void unmarshal_VkDescriptorSetVariableDescriptorCountLayoutSupportEXT(
-    VulkanStreamGuest* vkStream,
-    VkDescriptorSetVariableDescriptorCountLayoutSupportEXT* forUnmarshaling);
+DEFINE_ALIAS_FUNCTION(unmarshal_VkDescriptorSetVariableDescriptorCountLayoutSupport, unmarshal_VkDescriptorSetVariableDescriptorCountLayoutSupportEXT);
 
 #endif
 #ifdef VK_EXT_shader_viewport_index_layer
 #endif
+#ifdef VK_NV_shading_rate_image
+void marshal_VkShadingRatePaletteNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkShadingRatePaletteNV* forMarshaling);
+
+void unmarshal_VkShadingRatePaletteNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkShadingRatePaletteNV* forUnmarshaling);
+
+void marshal_VkPipelineViewportShadingRateImageStateCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineViewportShadingRateImageStateCreateInfoNV* forMarshaling);
+
+void unmarshal_VkPipelineViewportShadingRateImageStateCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPipelineViewportShadingRateImageStateCreateInfoNV* forUnmarshaling);
+
+void marshal_VkPhysicalDeviceShadingRateImageFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShadingRateImageFeaturesNV* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceShadingRateImageFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceShadingRateImageFeaturesNV* forUnmarshaling);
+
+void marshal_VkPhysicalDeviceShadingRateImagePropertiesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShadingRateImagePropertiesNV* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceShadingRateImagePropertiesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceShadingRateImagePropertiesNV* forUnmarshaling);
+
+void marshal_VkCoarseSampleLocationNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCoarseSampleLocationNV* forMarshaling);
+
+void unmarshal_VkCoarseSampleLocationNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkCoarseSampleLocationNV* forUnmarshaling);
+
+void marshal_VkCoarseSampleOrderCustomNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCoarseSampleOrderCustomNV* forMarshaling);
+
+void unmarshal_VkCoarseSampleOrderCustomNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkCoarseSampleOrderCustomNV* forUnmarshaling);
+
+void marshal_VkPipelineViewportCoarseSampleOrderStateCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineViewportCoarseSampleOrderStateCreateInfoNV* forMarshaling);
+
+void unmarshal_VkPipelineViewportCoarseSampleOrderStateCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPipelineViewportCoarseSampleOrderStateCreateInfoNV* forUnmarshaling);
+
+#define OP_vkCmdBindShadingRateImageNV 238618340
+#define OP_vkCmdSetViewportShadingRatePaletteNV 215295078
+#define OP_vkCmdSetCoarseSampleOrderNV 236858637
+#endif
+#ifdef VK_NV_ray_tracing
+void marshal_VkRayTracingShaderGroupCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRayTracingShaderGroupCreateInfoNV* forMarshaling);
+
+void unmarshal_VkRayTracingShaderGroupCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkRayTracingShaderGroupCreateInfoNV* forUnmarshaling);
+
+void marshal_VkRayTracingPipelineCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRayTracingPipelineCreateInfoNV* forMarshaling);
+
+void unmarshal_VkRayTracingPipelineCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkRayTracingPipelineCreateInfoNV* forUnmarshaling);
+
+void marshal_VkGeometryTrianglesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkGeometryTrianglesNV* forMarshaling);
+
+void unmarshal_VkGeometryTrianglesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkGeometryTrianglesNV* forUnmarshaling);
+
+void marshal_VkGeometryAABBNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkGeometryAABBNV* forMarshaling);
+
+void unmarshal_VkGeometryAABBNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkGeometryAABBNV* forUnmarshaling);
+
+void marshal_VkGeometryDataNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkGeometryDataNV* forMarshaling);
+
+void unmarshal_VkGeometryDataNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkGeometryDataNV* forUnmarshaling);
+
+void marshal_VkGeometryNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkGeometryNV* forMarshaling);
+
+void unmarshal_VkGeometryNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkGeometryNV* forUnmarshaling);
+
+void marshal_VkAccelerationStructureInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureInfoNV* forMarshaling);
+
+void unmarshal_VkAccelerationStructureInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkAccelerationStructureInfoNV* forUnmarshaling);
+
+void marshal_VkAccelerationStructureCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureCreateInfoNV* forMarshaling);
+
+void unmarshal_VkAccelerationStructureCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkAccelerationStructureCreateInfoNV* forUnmarshaling);
+
+void marshal_VkBindAccelerationStructureMemoryInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBindAccelerationStructureMemoryInfoNV* forMarshaling);
+
+void unmarshal_VkBindAccelerationStructureMemoryInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkBindAccelerationStructureMemoryInfoNV* forUnmarshaling);
+
+void marshal_VkWriteDescriptorSetAccelerationStructureNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkWriteDescriptorSetAccelerationStructureNV* forMarshaling);
+
+void unmarshal_VkWriteDescriptorSetAccelerationStructureNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkWriteDescriptorSetAccelerationStructureNV* forUnmarshaling);
+
+void marshal_VkAccelerationStructureMemoryRequirementsInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureMemoryRequirementsInfoNV* forMarshaling);
+
+void unmarshal_VkAccelerationStructureMemoryRequirementsInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkAccelerationStructureMemoryRequirementsInfoNV* forUnmarshaling);
+
+void marshal_VkPhysicalDeviceRayTracingPropertiesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRayTracingPropertiesNV* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceRayTracingPropertiesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceRayTracingPropertiesNV* forUnmarshaling);
+
+void marshal_VkTransformMatrixKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkTransformMatrixKHR* forMarshaling);
+
+void unmarshal_VkTransformMatrixKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkTransformMatrixKHR* forUnmarshaling);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkTransformMatrixKHR, marshal_VkTransformMatrixNV);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkTransformMatrixKHR, unmarshal_VkTransformMatrixNV);
+
+void marshal_VkAabbPositionsKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAabbPositionsKHR* forMarshaling);
+
+void unmarshal_VkAabbPositionsKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkAabbPositionsKHR* forUnmarshaling);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkAabbPositionsKHR, marshal_VkAabbPositionsNV);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkAabbPositionsKHR, unmarshal_VkAabbPositionsNV);
+
+void marshal_VkAccelerationStructureInstanceKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureInstanceKHR* forMarshaling);
+
+void unmarshal_VkAccelerationStructureInstanceKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkAccelerationStructureInstanceKHR* forUnmarshaling);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkAccelerationStructureInstanceKHR, marshal_VkAccelerationStructureInstanceNV);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkAccelerationStructureInstanceKHR, unmarshal_VkAccelerationStructureInstanceNV);
+
+#define OP_vkCreateAccelerationStructureNV 259713020
+#define OP_vkDestroyAccelerationStructureNV 252775746
+#define OP_vkGetAccelerationStructureMemoryRequirementsNV 220234370
+#define OP_vkBindAccelerationStructureMemoryNV 202856743
+#define OP_vkCmdBuildAccelerationStructureNV 269050897
+#define OP_vkCmdCopyAccelerationStructureNV 211075498
+#define OP_vkCmdTraceRaysNV 210219912
+#define OP_vkCreateRayTracingPipelinesNV 203653638
+#define OP_vkGetRayTracingShaderGroupHandlesKHR 271962641
+#define OP_vkGetRayTracingShaderGroupHandlesNV 230045846
+#define OP_vkGetAccelerationStructureHandleNV 269898134
+#define OP_vkCmdWriteAccelerationStructuresPropertiesNV 207954431
+#define OP_vkCompileDeferredNV 278731610
+#endif
+#ifdef VK_NV_representative_fragment_test
+void marshal_VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV* forUnmarshaling);
+
+void marshal_VkPipelineRepresentativeFragmentTestStateCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineRepresentativeFragmentTestStateCreateInfoNV* forMarshaling);
+
+void unmarshal_VkPipelineRepresentativeFragmentTestStateCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPipelineRepresentativeFragmentTestStateCreateInfoNV* forUnmarshaling);
+
+#endif
+#ifdef VK_EXT_filter_cubic
+void marshal_VkPhysicalDeviceImageViewImageFormatInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceImageViewImageFormatInfoEXT* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceImageViewImageFormatInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceImageViewImageFormatInfoEXT* forUnmarshaling);
+
+void marshal_VkFilterCubicImageViewImageFormatPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkFilterCubicImageViewImageFormatPropertiesEXT* forMarshaling);
+
+void unmarshal_VkFilterCubicImageViewImageFormatPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkFilterCubicImageViewImageFormatPropertiesEXT* forUnmarshaling);
+
+#endif
+#ifdef VK_QCOM_render_pass_shader_resolve
+#endif
 #ifdef VK_EXT_global_priority
 void marshal_VkDeviceQueueGlobalPriorityCreateInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkDeviceQueueGlobalPriorityCreateInfoEXT* forMarshaling);
 
 void unmarshal_VkDeviceQueueGlobalPriorityCreateInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkDeviceQueueGlobalPriorityCreateInfoEXT* forUnmarshaling);
 
 #endif
 #ifdef VK_EXT_external_memory_host
 void marshal_VkImportMemoryHostPointerInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkImportMemoryHostPointerInfoEXT* forMarshaling);
 
 void unmarshal_VkImportMemoryHostPointerInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkImportMemoryHostPointerInfoEXT* forUnmarshaling);
 
 void marshal_VkMemoryHostPointerPropertiesEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkMemoryHostPointerPropertiesEXT* forMarshaling);
 
 void unmarshal_VkMemoryHostPointerPropertiesEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkMemoryHostPointerPropertiesEXT* forUnmarshaling);
 
 void marshal_VkPhysicalDeviceExternalMemoryHostPropertiesEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceExternalMemoryHostPropertiesEXT* forMarshaling);
 
 void unmarshal_VkPhysicalDeviceExternalMemoryHostPropertiesEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceExternalMemoryHostPropertiesEXT* forUnmarshaling);
 
 #define OP_vkGetMemoryHostPointerPropertiesEXT 20313
@@ -3300,108 +5638,1680 @@
 #ifdef VK_AMD_buffer_marker
 #define OP_vkCmdWriteBufferMarkerAMD 20314
 #endif
+#ifdef VK_AMD_pipeline_compiler_control
+void marshal_VkPipelineCompilerControlCreateInfoAMD(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineCompilerControlCreateInfoAMD* forMarshaling);
+
+void unmarshal_VkPipelineCompilerControlCreateInfoAMD(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPipelineCompilerControlCreateInfoAMD* forUnmarshaling);
+
+#endif
+#ifdef VK_EXT_calibrated_timestamps
+void marshal_VkCalibratedTimestampInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCalibratedTimestampInfoEXT* forMarshaling);
+
+void unmarshal_VkCalibratedTimestampInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkCalibratedTimestampInfoEXT* forUnmarshaling);
+
+#define OP_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT 295643221
+#define OP_vkGetCalibratedTimestampsEXT 203583186
+#endif
 #ifdef VK_AMD_shader_core_properties
 void marshal_VkPhysicalDeviceShaderCorePropertiesAMD(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceShaderCorePropertiesAMD* forMarshaling);
 
 void unmarshal_VkPhysicalDeviceShaderCorePropertiesAMD(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceShaderCorePropertiesAMD* forUnmarshaling);
 
 #endif
+#ifdef VK_AMD_memory_overallocation_behavior
+void marshal_VkDeviceMemoryOverallocationCreateInfoAMD(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceMemoryOverallocationCreateInfoAMD* forMarshaling);
+
+void unmarshal_VkDeviceMemoryOverallocationCreateInfoAMD(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDeviceMemoryOverallocationCreateInfoAMD* forUnmarshaling);
+
+#endif
 #ifdef VK_EXT_vertex_attribute_divisor
 void marshal_VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT* forMarshaling);
 
 void unmarshal_VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT* forUnmarshaling);
 
 void marshal_VkVertexInputBindingDivisorDescriptionEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkVertexInputBindingDivisorDescriptionEXT* forMarshaling);
 
 void unmarshal_VkVertexInputBindingDivisorDescriptionEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkVertexInputBindingDivisorDescriptionEXT* forUnmarshaling);
 
 void marshal_VkPipelineVertexInputDivisorStateCreateInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkPipelineVertexInputDivisorStateCreateInfoEXT* forMarshaling);
 
 void unmarshal_VkPipelineVertexInputDivisorStateCreateInfoEXT(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkPipelineVertexInputDivisorStateCreateInfoEXT* forUnmarshaling);
 
+void marshal_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT* forUnmarshaling);
+
+#endif
+#ifdef VK_GGP_frame_token
+void marshal_VkPresentFrameTokenGGP(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPresentFrameTokenGGP* forMarshaling);
+
+void unmarshal_VkPresentFrameTokenGGP(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPresentFrameTokenGGP* forUnmarshaling);
+
+#endif
+#ifdef VK_EXT_pipeline_creation_feedback
+void marshal_VkPipelineCreationFeedbackEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineCreationFeedbackEXT* forMarshaling);
+
+void unmarshal_VkPipelineCreationFeedbackEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPipelineCreationFeedbackEXT* forUnmarshaling);
+
+void marshal_VkPipelineCreationFeedbackCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineCreationFeedbackCreateInfoEXT* forMarshaling);
+
+void unmarshal_VkPipelineCreationFeedbackCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPipelineCreationFeedbackCreateInfoEXT* forUnmarshaling);
+
 #endif
 #ifdef VK_NV_shader_subgroup_partitioned
 #endif
+#ifdef VK_NV_compute_shader_derivatives
+void marshal_VkPhysicalDeviceComputeShaderDerivativesFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceComputeShaderDerivativesFeaturesNV* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceComputeShaderDerivativesFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceComputeShaderDerivativesFeaturesNV* forUnmarshaling);
+
+#endif
+#ifdef VK_NV_mesh_shader
+void marshal_VkPhysicalDeviceMeshShaderFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMeshShaderFeaturesNV* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceMeshShaderFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceMeshShaderFeaturesNV* forUnmarshaling);
+
+void marshal_VkPhysicalDeviceMeshShaderPropertiesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMeshShaderPropertiesNV* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceMeshShaderPropertiesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceMeshShaderPropertiesNV* forUnmarshaling);
+
+void marshal_VkDrawMeshTasksIndirectCommandNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDrawMeshTasksIndirectCommandNV* forMarshaling);
+
+void unmarshal_VkDrawMeshTasksIndirectCommandNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDrawMeshTasksIndirectCommandNV* forUnmarshaling);
+
+#define OP_vkCmdDrawMeshTasksNV 207334931
+#define OP_vkCmdDrawMeshTasksIndirectNV 274079208
+#define OP_vkCmdDrawMeshTasksIndirectCountNV 223801967
+#endif
+#ifdef VK_NV_fragment_shader_barycentric
+void marshal_VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV* forUnmarshaling);
+
+#endif
+#ifdef VK_NV_shader_image_footprint
+void marshal_VkPhysicalDeviceShaderImageFootprintFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderImageFootprintFeaturesNV* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceShaderImageFootprintFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceShaderImageFootprintFeaturesNV* forUnmarshaling);
+
+#endif
+#ifdef VK_NV_scissor_exclusive
+void marshal_VkPipelineViewportExclusiveScissorStateCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineViewportExclusiveScissorStateCreateInfoNV* forMarshaling);
+
+void unmarshal_VkPipelineViewportExclusiveScissorStateCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPipelineViewportExclusiveScissorStateCreateInfoNV* forUnmarshaling);
+
+void marshal_VkPhysicalDeviceExclusiveScissorFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceExclusiveScissorFeaturesNV* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceExclusiveScissorFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceExclusiveScissorFeaturesNV* forUnmarshaling);
+
+#define OP_vkCmdSetExclusiveScissorNV 225408194
+#endif
 #ifdef VK_NV_device_diagnostic_checkpoints
 void marshal_VkQueueFamilyCheckpointPropertiesNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkQueueFamilyCheckpointPropertiesNV* forMarshaling);
 
 void unmarshal_VkQueueFamilyCheckpointPropertiesNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkQueueFamilyCheckpointPropertiesNV* forUnmarshaling);
 
 void marshal_VkCheckpointDataNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkCheckpointDataNV* forMarshaling);
 
 void unmarshal_VkCheckpointDataNV(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkCheckpointDataNV* forUnmarshaling);
 
 #define OP_vkCmdSetCheckpointNV 20315
 #define OP_vkGetQueueCheckpointDataNV 20316
 #endif
-#ifdef VK_GOOGLE_address_space
-#define OP_vkMapMemoryIntoAddressSpaceGOOGLE 20317
+#ifdef VK_INTEL_shader_integer_functions2
+void marshal_VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL* forUnmarshaling);
+
 #endif
-#ifdef VK_GOOGLE_color_buffer
+#ifdef VK_INTEL_performance_query
+void marshal_VkPerformanceValueDataINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPerformanceValueDataINTEL* forMarshaling);
+
+void unmarshal_VkPerformanceValueDataINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPerformanceValueDataINTEL* forUnmarshaling);
+
+void marshal_VkPerformanceValueINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPerformanceValueINTEL* forMarshaling);
+
+void unmarshal_VkPerformanceValueINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPerformanceValueINTEL* forUnmarshaling);
+
+void marshal_VkInitializePerformanceApiInfoINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkInitializePerformanceApiInfoINTEL* forMarshaling);
+
+void unmarshal_VkInitializePerformanceApiInfoINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkInitializePerformanceApiInfoINTEL* forUnmarshaling);
+
+void marshal_VkQueryPoolPerformanceQueryCreateInfoINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkQueryPoolPerformanceQueryCreateInfoINTEL* forMarshaling);
+
+void unmarshal_VkQueryPoolPerformanceQueryCreateInfoINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkQueryPoolPerformanceQueryCreateInfoINTEL* forUnmarshaling);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkQueryPoolPerformanceQueryCreateInfoINTEL, marshal_VkQueryPoolCreateInfoINTEL);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkQueryPoolPerformanceQueryCreateInfoINTEL, unmarshal_VkQueryPoolCreateInfoINTEL);
+
+void marshal_VkPerformanceMarkerInfoINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPerformanceMarkerInfoINTEL* forMarshaling);
+
+void unmarshal_VkPerformanceMarkerInfoINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPerformanceMarkerInfoINTEL* forUnmarshaling);
+
+void marshal_VkPerformanceStreamMarkerInfoINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPerformanceStreamMarkerInfoINTEL* forMarshaling);
+
+void unmarshal_VkPerformanceStreamMarkerInfoINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPerformanceStreamMarkerInfoINTEL* forUnmarshaling);
+
+void marshal_VkPerformanceOverrideInfoINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPerformanceOverrideInfoINTEL* forMarshaling);
+
+void unmarshal_VkPerformanceOverrideInfoINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPerformanceOverrideInfoINTEL* forUnmarshaling);
+
+void marshal_VkPerformanceConfigurationAcquireInfoINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPerformanceConfigurationAcquireInfoINTEL* forMarshaling);
+
+void unmarshal_VkPerformanceConfigurationAcquireInfoINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPerformanceConfigurationAcquireInfoINTEL* forUnmarshaling);
+
+#define OP_vkInitializePerformanceApiINTEL 203336121
+#define OP_vkUninitializePerformanceApiINTEL 296137321
+#define OP_vkCmdSetPerformanceMarkerINTEL 270016385
+#define OP_vkCmdSetPerformanceStreamMarkerINTEL 261519634
+#define OP_vkCmdSetPerformanceOverrideINTEL 251310287
+#define OP_vkAcquirePerformanceConfigurationINTEL 245737492
+#define OP_vkReleasePerformanceConfigurationINTEL 252877217
+#define OP_vkQueueSetPerformanceConfigurationINTEL 294947726
+#define OP_vkGetPerformanceParameterINTEL 213620482
+#endif
+#ifdef VK_EXT_pci_bus_info
+void marshal_VkPhysicalDevicePCIBusInfoPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDevicePCIBusInfoPropertiesEXT* forMarshaling);
+
+void unmarshal_VkPhysicalDevicePCIBusInfoPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDevicePCIBusInfoPropertiesEXT* forUnmarshaling);
+
+#endif
+#ifdef VK_AMD_display_native_hdr
+void marshal_VkDisplayNativeHdrSurfaceCapabilitiesAMD(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDisplayNativeHdrSurfaceCapabilitiesAMD* forMarshaling);
+
+void unmarshal_VkDisplayNativeHdrSurfaceCapabilitiesAMD(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDisplayNativeHdrSurfaceCapabilitiesAMD* forUnmarshaling);
+
+void marshal_VkSwapchainDisplayNativeHdrCreateInfoAMD(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSwapchainDisplayNativeHdrCreateInfoAMD* forMarshaling);
+
+void unmarshal_VkSwapchainDisplayNativeHdrCreateInfoAMD(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkSwapchainDisplayNativeHdrCreateInfoAMD* forUnmarshaling);
+
+#define OP_vkSetLocalDimmingAMD 267533472
+#endif
+#ifdef VK_FUCHSIA_imagepipe_surface
+void marshal_VkImagePipeSurfaceCreateInfoFUCHSIA(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImagePipeSurfaceCreateInfoFUCHSIA* forMarshaling);
+
+void unmarshal_VkImagePipeSurfaceCreateInfoFUCHSIA(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkImagePipeSurfaceCreateInfoFUCHSIA* forUnmarshaling);
+
+#define OP_vkCreateImagePipeSurfaceFUCHSIA 261626137
+#endif
+#ifdef VK_EXT_metal_surface
+void marshal_VkMetalSurfaceCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMetalSurfaceCreateInfoEXT* forMarshaling);
+
+void unmarshal_VkMetalSurfaceCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkMetalSurfaceCreateInfoEXT* forUnmarshaling);
+
+#define OP_vkCreateMetalSurfaceEXT 254915953
+#endif
+#ifdef VK_EXT_fragment_density_map
+void marshal_VkPhysicalDeviceFragmentDensityMapFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentDensityMapFeaturesEXT* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceFragmentDensityMapFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceFragmentDensityMapFeaturesEXT* forUnmarshaling);
+
+void marshal_VkPhysicalDeviceFragmentDensityMapPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentDensityMapPropertiesEXT* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceFragmentDensityMapPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceFragmentDensityMapPropertiesEXT* forUnmarshaling);
+
+void marshal_VkRenderPassFragmentDensityMapCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRenderPassFragmentDensityMapCreateInfoEXT* forMarshaling);
+
+void unmarshal_VkRenderPassFragmentDensityMapCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkRenderPassFragmentDensityMapCreateInfoEXT* forUnmarshaling);
+
+#endif
+#ifdef VK_EXT_scalar_block_layout
+DEFINE_ALIAS_FUNCTION(marshal_VkPhysicalDeviceScalarBlockLayoutFeatures, marshal_VkPhysicalDeviceScalarBlockLayoutFeaturesEXT);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkPhysicalDeviceScalarBlockLayoutFeatures, unmarshal_VkPhysicalDeviceScalarBlockLayoutFeaturesEXT);
+
+#endif
+#ifdef VK_GOOGLE_hlsl_functionality1
+#endif
+#ifdef VK_GOOGLE_decorate_string
+#endif
+#ifdef VK_EXT_subgroup_size_control
+void marshal_VkPhysicalDeviceSubgroupSizeControlFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSubgroupSizeControlFeaturesEXT* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceSubgroupSizeControlFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceSubgroupSizeControlFeaturesEXT* forUnmarshaling);
+
+void marshal_VkPhysicalDeviceSubgroupSizeControlPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceSubgroupSizeControlPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceSubgroupSizeControlPropertiesEXT* forUnmarshaling);
+
+void marshal_VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT* forMarshaling);
+
+void unmarshal_VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT* forUnmarshaling);
+
+#endif
+#ifdef VK_AMD_shader_core_properties2
+void marshal_VkPhysicalDeviceShaderCoreProperties2AMD(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderCoreProperties2AMD* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceShaderCoreProperties2AMD(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceShaderCoreProperties2AMD* forUnmarshaling);
+
+#endif
+#ifdef VK_AMD_device_coherent_memory
+void marshal_VkPhysicalDeviceCoherentMemoryFeaturesAMD(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCoherentMemoryFeaturesAMD* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceCoherentMemoryFeaturesAMD(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceCoherentMemoryFeaturesAMD* forUnmarshaling);
+
+#endif
+#ifdef VK_EXT_shader_image_atomic_int64
+void marshal_VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT* forUnmarshaling);
+
+#endif
+#ifdef VK_EXT_memory_budget
+void marshal_VkPhysicalDeviceMemoryBudgetPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMemoryBudgetPropertiesEXT* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceMemoryBudgetPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceMemoryBudgetPropertiesEXT* forUnmarshaling);
+
+#endif
+#ifdef VK_EXT_memory_priority
+void marshal_VkPhysicalDeviceMemoryPriorityFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMemoryPriorityFeaturesEXT* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceMemoryPriorityFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceMemoryPriorityFeaturesEXT* forUnmarshaling);
+
+void marshal_VkMemoryPriorityAllocateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMemoryPriorityAllocateInfoEXT* forMarshaling);
+
+void unmarshal_VkMemoryPriorityAllocateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkMemoryPriorityAllocateInfoEXT* forUnmarshaling);
+
+#endif
+#ifdef VK_NV_dedicated_allocation_image_aliasing
+void marshal_VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV* forUnmarshaling);
+
+#endif
+#ifdef VK_EXT_buffer_device_address
+void marshal_VkPhysicalDeviceBufferDeviceAddressFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceBufferDeviceAddressFeaturesEXT* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceBufferDeviceAddressFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceBufferDeviceAddressFeaturesEXT* forUnmarshaling);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkPhysicalDeviceBufferDeviceAddressFeaturesEXT, marshal_VkPhysicalDeviceBufferAddressFeaturesEXT);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkPhysicalDeviceBufferDeviceAddressFeaturesEXT, unmarshal_VkPhysicalDeviceBufferAddressFeaturesEXT);
+
+DEFINE_ALIAS_FUNCTION(marshal_VkBufferDeviceAddressInfo, marshal_VkBufferDeviceAddressInfoEXT);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkBufferDeviceAddressInfo, unmarshal_VkBufferDeviceAddressInfoEXT);
+
+void marshal_VkBufferDeviceAddressCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBufferDeviceAddressCreateInfoEXT* forMarshaling);
+
+void unmarshal_VkBufferDeviceAddressCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkBufferDeviceAddressCreateInfoEXT* forUnmarshaling);
+
+#define OP_vkGetBufferDeviceAddressEXT 224361693
+#endif
+#ifdef VK_EXT_tooling_info
+void marshal_VkPhysicalDeviceToolPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceToolPropertiesEXT* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceToolPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceToolPropertiesEXT* forUnmarshaling);
+
+#define OP_vkGetPhysicalDeviceToolPropertiesEXT 282247593
+#endif
+#ifdef VK_EXT_separate_stencil_usage
+DEFINE_ALIAS_FUNCTION(marshal_VkImageStencilUsageCreateInfo, marshal_VkImageStencilUsageCreateInfoEXT);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkImageStencilUsageCreateInfo, unmarshal_VkImageStencilUsageCreateInfoEXT);
+
+#endif
+#ifdef VK_EXT_validation_features
+void marshal_VkValidationFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkValidationFeaturesEXT* forMarshaling);
+
+void unmarshal_VkValidationFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkValidationFeaturesEXT* forUnmarshaling);
+
+#endif
+#ifdef VK_NV_cooperative_matrix
+void marshal_VkCooperativeMatrixPropertiesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCooperativeMatrixPropertiesNV* forMarshaling);
+
+void unmarshal_VkCooperativeMatrixPropertiesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkCooperativeMatrixPropertiesNV* forUnmarshaling);
+
+void marshal_VkPhysicalDeviceCooperativeMatrixFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCooperativeMatrixFeaturesNV* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceCooperativeMatrixFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceCooperativeMatrixFeaturesNV* forUnmarshaling);
+
+void marshal_VkPhysicalDeviceCooperativeMatrixPropertiesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCooperativeMatrixPropertiesNV* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceCooperativeMatrixPropertiesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceCooperativeMatrixPropertiesNV* forUnmarshaling);
+
+#define OP_vkGetPhysicalDeviceCooperativeMatrixPropertiesNV 287711429
+#endif
+#ifdef VK_NV_coverage_reduction_mode
+void marshal_VkPhysicalDeviceCoverageReductionModeFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCoverageReductionModeFeaturesNV* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceCoverageReductionModeFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceCoverageReductionModeFeaturesNV* forUnmarshaling);
+
+void marshal_VkPipelineCoverageReductionStateCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineCoverageReductionStateCreateInfoNV* forMarshaling);
+
+void unmarshal_VkPipelineCoverageReductionStateCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPipelineCoverageReductionStateCreateInfoNV* forUnmarshaling);
+
+void marshal_VkFramebufferMixedSamplesCombinationNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkFramebufferMixedSamplesCombinationNV* forMarshaling);
+
+void unmarshal_VkFramebufferMixedSamplesCombinationNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkFramebufferMixedSamplesCombinationNV* forUnmarshaling);
+
+#define OP_vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV 292032159
+#endif
+#ifdef VK_EXT_fragment_shader_interlock
+void marshal_VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT* forUnmarshaling);
+
+#endif
+#ifdef VK_EXT_ycbcr_image_arrays
+void marshal_VkPhysicalDeviceYcbcrImageArraysFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceYcbcrImageArraysFeaturesEXT* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceYcbcrImageArraysFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceYcbcrImageArraysFeaturesEXT* forUnmarshaling);
+
+#endif
+#ifdef VK_EXT_full_screen_exclusive
+void marshal_VkSurfaceFullScreenExclusiveInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSurfaceFullScreenExclusiveInfoEXT* forMarshaling);
+
+void unmarshal_VkSurfaceFullScreenExclusiveInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkSurfaceFullScreenExclusiveInfoEXT* forUnmarshaling);
+
+void marshal_VkSurfaceCapabilitiesFullScreenExclusiveEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSurfaceCapabilitiesFullScreenExclusiveEXT* forMarshaling);
+
+void unmarshal_VkSurfaceCapabilitiesFullScreenExclusiveEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkSurfaceCapabilitiesFullScreenExclusiveEXT* forUnmarshaling);
+
+#define OP_vkGetPhysicalDeviceSurfacePresentModes2EXT 268126279
+#define OP_vkAcquireFullScreenExclusiveModeEXT 200946668
+#define OP_vkReleaseFullScreenExclusiveModeEXT 257629142
+void marshal_VkSurfaceFullScreenExclusiveWin32InfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSurfaceFullScreenExclusiveWin32InfoEXT* forMarshaling);
+
+void unmarshal_VkSurfaceFullScreenExclusiveWin32InfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkSurfaceFullScreenExclusiveWin32InfoEXT* forUnmarshaling);
+
+#define OP_vkGetDeviceGroupSurfacePresentModes2EXT 206369543
+#endif
+#ifdef VK_EXT_headless_surface
+void marshal_VkHeadlessSurfaceCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkHeadlessSurfaceCreateInfoEXT* forMarshaling);
+
+void unmarshal_VkHeadlessSurfaceCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkHeadlessSurfaceCreateInfoEXT* forUnmarshaling);
+
+#define OP_vkCreateHeadlessSurfaceEXT 298411290
+#endif
+#ifdef VK_EXT_line_rasterization
+void marshal_VkPhysicalDeviceLineRasterizationFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceLineRasterizationFeaturesEXT* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceLineRasterizationFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceLineRasterizationFeaturesEXT* forUnmarshaling);
+
+void marshal_VkPhysicalDeviceLineRasterizationPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceLineRasterizationPropertiesEXT* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceLineRasterizationPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceLineRasterizationPropertiesEXT* forUnmarshaling);
+
+void marshal_VkPipelineRasterizationLineStateCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineRasterizationLineStateCreateInfoEXT* forMarshaling);
+
+void unmarshal_VkPipelineRasterizationLineStateCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPipelineRasterizationLineStateCreateInfoEXT* forUnmarshaling);
+
+#define OP_vkCmdSetLineStippleEXT 263855692
+#endif
+#ifdef VK_EXT_shader_atomic_float
+void marshal_VkPhysicalDeviceShaderAtomicFloatFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderAtomicFloatFeaturesEXT* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceShaderAtomicFloatFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceShaderAtomicFloatFeaturesEXT* forUnmarshaling);
+
+#endif
+#ifdef VK_EXT_host_query_reset
+DEFINE_ALIAS_FUNCTION(marshal_VkPhysicalDeviceHostQueryResetFeatures, marshal_VkPhysicalDeviceHostQueryResetFeaturesEXT);
+
+DEFINE_ALIAS_FUNCTION(unmarshal_VkPhysicalDeviceHostQueryResetFeatures, unmarshal_VkPhysicalDeviceHostQueryResetFeaturesEXT);
+
+#define OP_vkResetQueryPoolEXT 242995959
+#endif
+#ifdef VK_EXT_index_type_uint8
+void marshal_VkPhysicalDeviceIndexTypeUint8FeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceIndexTypeUint8FeaturesEXT* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceIndexTypeUint8FeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceIndexTypeUint8FeaturesEXT* forUnmarshaling);
+
+#endif
+#ifdef VK_EXT_extended_dynamic_state
+void marshal_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceExtendedDynamicStateFeaturesEXT* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceExtendedDynamicStateFeaturesEXT* forUnmarshaling);
+
+#define OP_vkCmdSetCullModeEXT 266285895
+#define OP_vkCmdSetFrontFaceEXT 212644406
+#define OP_vkCmdSetPrimitiveTopologyEXT 260783979
+#define OP_vkCmdSetViewportWithCountEXT 257105245
+#define OP_vkCmdSetScissorWithCountEXT 204588120
+#define OP_vkCmdBindVertexBuffers2EXT 243419921
+#define OP_vkCmdSetDepthTestEnableEXT 233771166
+#define OP_vkCmdSetDepthWriteEnableEXT 218663304
+#define OP_vkCmdSetDepthCompareOpEXT 247761589
+#define OP_vkCmdSetDepthBoundsTestEnableEXT 223213519
+#define OP_vkCmdSetStencilTestEnableEXT 286438749
+#define OP_vkCmdSetStencilOpEXT 277159578
+#endif
+#ifdef VK_EXT_shader_demote_to_helper_invocation
+void marshal_VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT* forUnmarshaling);
+
+#endif
+#ifdef VK_NV_device_generated_commands
+void marshal_VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV* forUnmarshaling);
+
+void marshal_VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV* forUnmarshaling);
+
+void marshal_VkGraphicsShaderGroupCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkGraphicsShaderGroupCreateInfoNV* forMarshaling);
+
+void unmarshal_VkGraphicsShaderGroupCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkGraphicsShaderGroupCreateInfoNV* forUnmarshaling);
+
+void marshal_VkGraphicsPipelineShaderGroupsCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkGraphicsPipelineShaderGroupsCreateInfoNV* forMarshaling);
+
+void unmarshal_VkGraphicsPipelineShaderGroupsCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkGraphicsPipelineShaderGroupsCreateInfoNV* forUnmarshaling);
+
+void marshal_VkBindShaderGroupIndirectCommandNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBindShaderGroupIndirectCommandNV* forMarshaling);
+
+void unmarshal_VkBindShaderGroupIndirectCommandNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkBindShaderGroupIndirectCommandNV* forUnmarshaling);
+
+void marshal_VkBindIndexBufferIndirectCommandNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBindIndexBufferIndirectCommandNV* forMarshaling);
+
+void unmarshal_VkBindIndexBufferIndirectCommandNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkBindIndexBufferIndirectCommandNV* forUnmarshaling);
+
+void marshal_VkBindVertexBufferIndirectCommandNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBindVertexBufferIndirectCommandNV* forMarshaling);
+
+void unmarshal_VkBindVertexBufferIndirectCommandNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkBindVertexBufferIndirectCommandNV* forUnmarshaling);
+
+void marshal_VkSetStateFlagsIndirectCommandNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSetStateFlagsIndirectCommandNV* forMarshaling);
+
+void unmarshal_VkSetStateFlagsIndirectCommandNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkSetStateFlagsIndirectCommandNV* forUnmarshaling);
+
+void marshal_VkIndirectCommandsStreamNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkIndirectCommandsStreamNV* forMarshaling);
+
+void unmarshal_VkIndirectCommandsStreamNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkIndirectCommandsStreamNV* forUnmarshaling);
+
+void marshal_VkIndirectCommandsLayoutTokenNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkIndirectCommandsLayoutTokenNV* forMarshaling);
+
+void unmarshal_VkIndirectCommandsLayoutTokenNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkIndirectCommandsLayoutTokenNV* forUnmarshaling);
+
+void marshal_VkIndirectCommandsLayoutCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkIndirectCommandsLayoutCreateInfoNV* forMarshaling);
+
+void unmarshal_VkIndirectCommandsLayoutCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkIndirectCommandsLayoutCreateInfoNV* forUnmarshaling);
+
+void marshal_VkGeneratedCommandsInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkGeneratedCommandsInfoNV* forMarshaling);
+
+void unmarshal_VkGeneratedCommandsInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkGeneratedCommandsInfoNV* forUnmarshaling);
+
+void marshal_VkGeneratedCommandsMemoryRequirementsInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkGeneratedCommandsMemoryRequirementsInfoNV* forMarshaling);
+
+void unmarshal_VkGeneratedCommandsMemoryRequirementsInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkGeneratedCommandsMemoryRequirementsInfoNV* forUnmarshaling);
+
+#define OP_vkGetGeneratedCommandsMemoryRequirementsNV 249047049
+#define OP_vkCmdPreprocessGeneratedCommandsNV 297624330
+#define OP_vkCmdExecuteGeneratedCommandsNV 234711184
+#define OP_vkCmdBindPipelineShaderGroupNV 270362239
+#define OP_vkCreateIndirectCommandsLayoutNV 285310710
+#define OP_vkDestroyIndirectCommandsLayoutNV 292584135
+#endif
+#ifdef VK_EXT_texel_buffer_alignment
+void marshal_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT* forUnmarshaling);
+
+void marshal_VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT* forUnmarshaling);
+
+#endif
+#ifdef VK_QCOM_render_pass_transform
+void marshal_VkRenderPassTransformBeginInfoQCOM(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRenderPassTransformBeginInfoQCOM* forMarshaling);
+
+void unmarshal_VkRenderPassTransformBeginInfoQCOM(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkRenderPassTransformBeginInfoQCOM* forUnmarshaling);
+
+void marshal_VkCommandBufferInheritanceRenderPassTransformInfoQCOM(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCommandBufferInheritanceRenderPassTransformInfoQCOM* forMarshaling);
+
+void unmarshal_VkCommandBufferInheritanceRenderPassTransformInfoQCOM(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkCommandBufferInheritanceRenderPassTransformInfoQCOM* forUnmarshaling);
+
+#endif
+#ifdef VK_EXT_device_memory_report
+void marshal_VkPhysicalDeviceDeviceMemoryReportFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDeviceMemoryReportFeaturesEXT* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceDeviceMemoryReportFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceDeviceMemoryReportFeaturesEXT* forUnmarshaling);
+
+void marshal_VkDeviceMemoryReportCallbackDataEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceMemoryReportCallbackDataEXT* forMarshaling);
+
+void unmarshal_VkDeviceMemoryReportCallbackDataEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDeviceMemoryReportCallbackDataEXT* forUnmarshaling);
+
+void marshal_VkDeviceDeviceMemoryReportCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceDeviceMemoryReportCreateInfoEXT* forMarshaling);
+
+void unmarshal_VkDeviceDeviceMemoryReportCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDeviceDeviceMemoryReportCreateInfoEXT* forUnmarshaling);
+
+#endif
+#ifdef VK_EXT_robustness2
+void marshal_VkPhysicalDeviceRobustness2FeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRobustness2FeaturesEXT* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceRobustness2FeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceRobustness2FeaturesEXT* forUnmarshaling);
+
+void marshal_VkPhysicalDeviceRobustness2PropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRobustness2PropertiesEXT* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceRobustness2PropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceRobustness2PropertiesEXT* forUnmarshaling);
+
+#endif
+#ifdef VK_EXT_custom_border_color
+void marshal_VkSamplerCustomBorderColorCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSamplerCustomBorderColorCreateInfoEXT* forMarshaling);
+
+void unmarshal_VkSamplerCustomBorderColorCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkSamplerCustomBorderColorCreateInfoEXT* forUnmarshaling);
+
+void marshal_VkPhysicalDeviceCustomBorderColorPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCustomBorderColorPropertiesEXT* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceCustomBorderColorPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceCustomBorderColorPropertiesEXT* forUnmarshaling);
+
+void marshal_VkPhysicalDeviceCustomBorderColorFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCustomBorderColorFeaturesEXT* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceCustomBorderColorFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceCustomBorderColorFeaturesEXT* forUnmarshaling);
+
+#endif
+#ifdef VK_GOOGLE_user_type
+#endif
+#ifdef VK_EXT_private_data
+void marshal_VkPhysicalDevicePrivateDataFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDevicePrivateDataFeaturesEXT* forMarshaling);
+
+void unmarshal_VkPhysicalDevicePrivateDataFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDevicePrivateDataFeaturesEXT* forUnmarshaling);
+
+void marshal_VkDevicePrivateDataCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDevicePrivateDataCreateInfoEXT* forMarshaling);
+
+void unmarshal_VkDevicePrivateDataCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDevicePrivateDataCreateInfoEXT* forUnmarshaling);
+
+void marshal_VkPrivateDataSlotCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPrivateDataSlotCreateInfoEXT* forMarshaling);
+
+void unmarshal_VkPrivateDataSlotCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPrivateDataSlotCreateInfoEXT* forUnmarshaling);
+
+#define OP_vkCreatePrivateDataSlotEXT 236374049
+#define OP_vkDestroyPrivateDataSlotEXT 208891309
+#define OP_vkSetPrivateDataEXT 225259406
+#define OP_vkGetPrivateDataEXT 291399427
+#endif
+#ifdef VK_EXT_pipeline_creation_cache_control
+void marshal_VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT* forMarshaling);
+
+void unmarshal_VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT* forUnmarshaling);
+
+#endif
+#ifdef VK_NV_device_diagnostics_config
+void marshal_VkPhysicalDeviceDiagnosticsConfigFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDiagnosticsConfigFeaturesNV* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceDiagnosticsConfigFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceDiagnosticsConfigFeaturesNV* forUnmarshaling);
+
+void marshal_VkDeviceDiagnosticsConfigCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceDiagnosticsConfigCreateInfoNV* forMarshaling);
+
+void unmarshal_VkDeviceDiagnosticsConfigCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDeviceDiagnosticsConfigCreateInfoNV* forUnmarshaling);
+
+#endif
+#ifdef VK_QCOM_render_pass_store_ops
+#endif
+#ifdef VK_NV_fragment_shading_rate_enums
+void marshal_VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV* forUnmarshaling);
+
+void marshal_VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV* forUnmarshaling);
+
+void marshal_VkPipelineFragmentShadingRateEnumStateCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineFragmentShadingRateEnumStateCreateInfoNV* forMarshaling);
+
+void unmarshal_VkPipelineFragmentShadingRateEnumStateCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPipelineFragmentShadingRateEnumStateCreateInfoNV* forUnmarshaling);
+
+#define OP_vkCmdSetFragmentShadingRateEnumNV 264649847
+#endif
+#ifdef VK_EXT_fragment_density_map2
+void marshal_VkPhysicalDeviceFragmentDensityMap2FeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentDensityMap2FeaturesEXT* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceFragmentDensityMap2FeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceFragmentDensityMap2FeaturesEXT* forUnmarshaling);
+
+void marshal_VkPhysicalDeviceFragmentDensityMap2PropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentDensityMap2PropertiesEXT* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceFragmentDensityMap2PropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceFragmentDensityMap2PropertiesEXT* forUnmarshaling);
+
+#endif
+#ifdef VK_QCOM_rotated_copy_commands
+void marshal_VkCopyCommandTransformInfoQCOM(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCopyCommandTransformInfoQCOM* forMarshaling);
+
+void unmarshal_VkCopyCommandTransformInfoQCOM(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkCopyCommandTransformInfoQCOM* forUnmarshaling);
+
+#endif
+#ifdef VK_EXT_image_robustness
+void marshal_VkPhysicalDeviceImageRobustnessFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceImageRobustnessFeaturesEXT* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceImageRobustnessFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceImageRobustnessFeaturesEXT* forUnmarshaling);
+
+#endif
+#ifdef VK_EXT_4444_formats
+void marshal_VkPhysicalDevice4444FormatsFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDevice4444FormatsFeaturesEXT* forMarshaling);
+
+void unmarshal_VkPhysicalDevice4444FormatsFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDevice4444FormatsFeaturesEXT* forUnmarshaling);
+
+#endif
+#ifdef VK_EXT_directfb_surface
+void marshal_VkDirectFBSurfaceCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDirectFBSurfaceCreateInfoEXT* forMarshaling);
+
+void unmarshal_VkDirectFBSurfaceCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDirectFBSurfaceCreateInfoEXT* forUnmarshaling);
+
+#define OP_vkCreateDirectFBSurfaceEXT 220792403
+#define OP_vkGetPhysicalDeviceDirectFBPresentationSupportEXT 285441990
+#endif
+#ifdef VK_GOOGLE_gfxstream
 void marshal_VkImportColorBufferGOOGLE(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkImportColorBufferGOOGLE* forMarshaling);
 
 void unmarshal_VkImportColorBufferGOOGLE(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkImportColorBufferGOOGLE* forUnmarshaling);
 
+void marshal_VkImportBufferGOOGLE(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImportBufferGOOGLE* forMarshaling);
+
+void unmarshal_VkImportBufferGOOGLE(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkImportBufferGOOGLE* forUnmarshaling);
+
 void marshal_VkImportPhysicalAddressGOOGLE(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     const VkImportPhysicalAddressGOOGLE* forMarshaling);
 
 void unmarshal_VkImportPhysicalAddressGOOGLE(
     VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
     VkImportPhysicalAddressGOOGLE* forUnmarshaling);
 
 #define OP_vkRegisterImageColorBufferGOOGLE 20318
 #define OP_vkRegisterBufferColorBufferGOOGLE 20319
-#endif
-#ifdef VK_GOOGLE_sized_descriptor_update_template
+#define OP_vkMapMemoryIntoAddressSpaceGOOGLE 20317
 #define OP_vkUpdateDescriptorSetWithTemplateSizedGOOGLE 20320
-#endif
-#ifdef VK_GOOGLE_async_command_buffers
 #define OP_vkBeginCommandBufferAsyncGOOGLE 20321
 #define OP_vkEndCommandBufferAsyncGOOGLE 20322
 #define OP_vkResetCommandBufferAsyncGOOGLE 20323
 #define OP_vkCommandBufferHostSyncGOOGLE 20324
-#endif
-#ifdef VK_GOOGLE_create_resources_with_requirements
 #define OP_vkCreateImageWithRequirementsGOOGLE 20325
 #define OP_vkCreateBufferWithRequirementsGOOGLE 20326
-#endif
-#ifdef VK_GOOGLE_address_space_info
 #define OP_vkGetMemoryHostAddressInfoGOOGLE 20327
-#endif
-#ifdef VK_GOOGLE_free_memory_sync
 #define OP_vkFreeMemorySyncGOOGLE 20328
+#define OP_vkQueueHostSyncGOOGLE 20329
+#define OP_vkQueueSubmitAsyncGOOGLE 20330
+#define OP_vkQueueWaitIdleAsyncGOOGLE 20331
+#define OP_vkQueueBindSparseAsyncGOOGLE 20332
+#define OP_vkGetLinearImageLayoutGOOGLE 20333
+#define OP_vkQueueFlushCommandsGOOGLE 20340
+#define OP_vkQueueCommitDescriptorSetUpdatesGOOGLE 267932433
+#define OP_vkCollectDescriptorPoolIdsGOOGLE 213659202
+#endif
+#ifdef VK_KHR_acceleration_structure
+void marshal_VkDeviceOrHostAddressKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceOrHostAddressKHR* forMarshaling);
+
+void unmarshal_VkDeviceOrHostAddressKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDeviceOrHostAddressKHR* forUnmarshaling);
+
+void marshal_VkDeviceOrHostAddressConstKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceOrHostAddressConstKHR* forMarshaling);
+
+void unmarshal_VkDeviceOrHostAddressConstKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkDeviceOrHostAddressConstKHR* forUnmarshaling);
+
+void marshal_VkAccelerationStructureBuildRangeInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureBuildRangeInfoKHR* forMarshaling);
+
+void unmarshal_VkAccelerationStructureBuildRangeInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkAccelerationStructureBuildRangeInfoKHR* forUnmarshaling);
+
+void marshal_VkAccelerationStructureGeometryTrianglesDataKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureGeometryTrianglesDataKHR* forMarshaling);
+
+void unmarshal_VkAccelerationStructureGeometryTrianglesDataKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkAccelerationStructureGeometryTrianglesDataKHR* forUnmarshaling);
+
+void marshal_VkAccelerationStructureGeometryAabbsDataKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureGeometryAabbsDataKHR* forMarshaling);
+
+void unmarshal_VkAccelerationStructureGeometryAabbsDataKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkAccelerationStructureGeometryAabbsDataKHR* forUnmarshaling);
+
+void marshal_VkAccelerationStructureGeometryInstancesDataKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureGeometryInstancesDataKHR* forMarshaling);
+
+void unmarshal_VkAccelerationStructureGeometryInstancesDataKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkAccelerationStructureGeometryInstancesDataKHR* forUnmarshaling);
+
+void marshal_VkAccelerationStructureGeometryDataKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureGeometryDataKHR* forMarshaling);
+
+void unmarshal_VkAccelerationStructureGeometryDataKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkAccelerationStructureGeometryDataKHR* forUnmarshaling);
+
+void marshal_VkAccelerationStructureGeometryKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureGeometryKHR* forMarshaling);
+
+void unmarshal_VkAccelerationStructureGeometryKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkAccelerationStructureGeometryKHR* forUnmarshaling);
+
+void marshal_VkAccelerationStructureBuildGeometryInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureBuildGeometryInfoKHR* forMarshaling);
+
+void unmarshal_VkAccelerationStructureBuildGeometryInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkAccelerationStructureBuildGeometryInfoKHR* forUnmarshaling);
+
+void marshal_VkAccelerationStructureCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureCreateInfoKHR* forMarshaling);
+
+void unmarshal_VkAccelerationStructureCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkAccelerationStructureCreateInfoKHR* forUnmarshaling);
+
+void marshal_VkWriteDescriptorSetAccelerationStructureKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkWriteDescriptorSetAccelerationStructureKHR* forMarshaling);
+
+void unmarshal_VkWriteDescriptorSetAccelerationStructureKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkWriteDescriptorSetAccelerationStructureKHR* forUnmarshaling);
+
+void marshal_VkPhysicalDeviceAccelerationStructureFeaturesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceAccelerationStructureFeaturesKHR* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceAccelerationStructureFeaturesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceAccelerationStructureFeaturesKHR* forUnmarshaling);
+
+void marshal_VkPhysicalDeviceAccelerationStructurePropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceAccelerationStructurePropertiesKHR* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceAccelerationStructurePropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceAccelerationStructurePropertiesKHR* forUnmarshaling);
+
+void marshal_VkAccelerationStructureDeviceAddressInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureDeviceAddressInfoKHR* forMarshaling);
+
+void unmarshal_VkAccelerationStructureDeviceAddressInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkAccelerationStructureDeviceAddressInfoKHR* forUnmarshaling);
+
+void marshal_VkAccelerationStructureVersionInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureVersionInfoKHR* forMarshaling);
+
+void unmarshal_VkAccelerationStructureVersionInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkAccelerationStructureVersionInfoKHR* forUnmarshaling);
+
+void marshal_VkCopyAccelerationStructureToMemoryInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCopyAccelerationStructureToMemoryInfoKHR* forMarshaling);
+
+void unmarshal_VkCopyAccelerationStructureToMemoryInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkCopyAccelerationStructureToMemoryInfoKHR* forUnmarshaling);
+
+void marshal_VkCopyMemoryToAccelerationStructureInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCopyMemoryToAccelerationStructureInfoKHR* forMarshaling);
+
+void unmarshal_VkCopyMemoryToAccelerationStructureInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkCopyMemoryToAccelerationStructureInfoKHR* forUnmarshaling);
+
+void marshal_VkCopyAccelerationStructureInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCopyAccelerationStructureInfoKHR* forMarshaling);
+
+void unmarshal_VkCopyAccelerationStructureInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkCopyAccelerationStructureInfoKHR* forUnmarshaling);
+
+void marshal_VkAccelerationStructureBuildSizesInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureBuildSizesInfoKHR* forMarshaling);
+
+void unmarshal_VkAccelerationStructureBuildSizesInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkAccelerationStructureBuildSizesInfoKHR* forUnmarshaling);
+
+#define OP_vkCreateAccelerationStructureKHR 259403971
+#define OP_vkDestroyAccelerationStructureKHR 223971120
+#define OP_vkCmdBuildAccelerationStructuresKHR 272943905
+#define OP_vkCmdBuildAccelerationStructuresIndirectKHR 258066143
+#define OP_vkBuildAccelerationStructuresKHR 241919567
+#define OP_vkCopyAccelerationStructureKHR 241495016
+#define OP_vkCopyAccelerationStructureToMemoryKHR 256139578
+#define OP_vkCopyMemoryToAccelerationStructureKHR 261558680
+#define OP_vkWriteAccelerationStructuresPropertiesKHR 289745796
+#define OP_vkCmdCopyAccelerationStructureKHR 279460332
+#define OP_vkCmdCopyAccelerationStructureToMemoryKHR 223539733
+#define OP_vkCmdCopyMemoryToAccelerationStructureKHR 203733963
+#define OP_vkGetAccelerationStructureDeviceAddressKHR 223466148
+#define OP_vkCmdWriteAccelerationStructuresPropertiesKHR 271696183
+#define OP_vkGetDeviceAccelerationStructureCompatibilityKHR 266386590
+#define OP_vkGetAccelerationStructureBuildSizesKHR 219720024
+#endif
+#ifdef VK_KHR_ray_tracing_pipeline
+void marshal_VkRayTracingShaderGroupCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRayTracingShaderGroupCreateInfoKHR* forMarshaling);
+
+void unmarshal_VkRayTracingShaderGroupCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkRayTracingShaderGroupCreateInfoKHR* forUnmarshaling);
+
+void marshal_VkRayTracingPipelineInterfaceCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRayTracingPipelineInterfaceCreateInfoKHR* forMarshaling);
+
+void unmarshal_VkRayTracingPipelineInterfaceCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkRayTracingPipelineInterfaceCreateInfoKHR* forUnmarshaling);
+
+void marshal_VkRayTracingPipelineCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRayTracingPipelineCreateInfoKHR* forMarshaling);
+
+void unmarshal_VkRayTracingPipelineCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkRayTracingPipelineCreateInfoKHR* forUnmarshaling);
+
+void marshal_VkPhysicalDeviceRayTracingPipelineFeaturesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRayTracingPipelineFeaturesKHR* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceRayTracingPipelineFeaturesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceRayTracingPipelineFeaturesKHR* forUnmarshaling);
+
+void marshal_VkPhysicalDeviceRayTracingPipelinePropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRayTracingPipelinePropertiesKHR* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceRayTracingPipelinePropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceRayTracingPipelinePropertiesKHR* forUnmarshaling);
+
+void marshal_VkStridedDeviceAddressRegionKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkStridedDeviceAddressRegionKHR* forMarshaling);
+
+void unmarshal_VkStridedDeviceAddressRegionKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkStridedDeviceAddressRegionKHR* forUnmarshaling);
+
+void marshal_VkTraceRaysIndirectCommandKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkTraceRaysIndirectCommandKHR* forMarshaling);
+
+void unmarshal_VkTraceRaysIndirectCommandKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkTraceRaysIndirectCommandKHR* forUnmarshaling);
+
+#define OP_vkCmdTraceRaysKHR 213680716
+#define OP_vkCreateRayTracingPipelinesKHR 247628685
+#define OP_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR 221334934
+#define OP_vkCmdTraceRaysIndirectKHR 211788517
+#define OP_vkGetRayTracingShaderGroupStackSizeKHR 205271933
+#define OP_vkCmdSetRayTracingPipelineStackSizeKHR 260219604
+#endif
+#ifdef VK_KHR_ray_query
+void marshal_VkPhysicalDeviceRayQueryFeaturesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRayQueryFeaturesKHR* forMarshaling);
+
+void unmarshal_VkPhysicalDeviceRayQueryFeaturesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    VkPhysicalDeviceRayQueryFeaturesKHR* forUnmarshaling);
+
 #endif
 const char* api_opcode_to_string(
     const uint32_t opcode);
 
+#define OP_vkFirst_old 20000
+#define OP_vkLast_old 30000
+#define OP_vkFirst 200000000
+#define OP_vkLast 300000000
 
 } // namespace goldfish_vk
diff --git a/system/vulkan_enc/goldfish_vk_private_defs.h b/system/vulkan_enc/goldfish_vk_private_defs.h
index 9e673c0..bf57d8a 100644
--- a/system/vulkan_enc/goldfish_vk_private_defs.h
+++ b/system/vulkan_enc/goldfish_vk_private_defs.h
@@ -85,16 +85,15 @@
 typedef VkResult (VKAPI_PTR *PFN_vkAcquireImageANDROID)(VkDevice device, VkImage image, int nativeFenceFd, VkSemaphore semaphore, VkFence fence);
 typedef VkResult (VKAPI_PTR *PFN_vkQueueSignalReleaseImageANDROID)(VkQueue queue, uint32_t waitSemaphoreCount, const VkSemaphore* pWaitSemaphores, VkImage image, int* pNativeFenceFd);
 
-#define VK_GOOGLE_address_space 1
-
 typedef VkResult (VKAPI_PTR *PFN_vkMapMemoryIntoAddressSpaceGOOGLE)(VkDevice device, VkDeviceMemory memory, uint64_t* pAddress);
 
-#define VK_GOOGLE_color_buffer 1
-#define VK_GOOGLE_COLOR_BUFFER_EXTENSION_NUMBER 219
+#define VK_GOOGLE_gfxstream 1
+#define VK_GOOGLE_GFXSTREAM_EXTENSION_NUMBER 386
 
-#define VK_GOOGLE_COLOR_BUFFER_ENUM(type,id)    ((type)(1000000000 + (1000 * (VK_GOOGLE_COLOR_BUFFER_EXTENSION_NUMBER - 1)) + (id)))
-#define VK_STRUCTURE_TYPE_IMPORT_COLOR_BUFFER_GOOGLE   VK_GOOGLE_COLOR_BUFFER_ENUM(VkStructureType, 0)
-#define VK_STRUCTURE_TYPE_IMPORT_PHYSICAL_ADDRESS_GOOGLE   VK_GOOGLE_COLOR_BUFFER_ENUM(VkStructureType, 1)
+#define VK_GOOGLE_GFXSTREAM_ENUM(type,id)    ((type)(1000000000 + (1000 * (VK_GOOGLE_GFXSTREAM_EXTENSION_NUMBER - 1)) + (id)))
+#define VK_STRUCTURE_TYPE_IMPORT_COLOR_BUFFER_GOOGLE   VK_GOOGLE_GFXSTREAM_ENUM(VkStructureType, 0)
+#define VK_STRUCTURE_TYPE_IMPORT_PHYSICAL_ADDRESS_GOOGLE   VK_GOOGLE_GFXSTREAM_ENUM(VkStructureType, 1)
+#define VK_STRUCTURE_TYPE_IMPORT_BUFFER_GOOGLE   VK_GOOGLE_GFXSTREAM_ENUM(VkStructureType, 2)
 
 typedef struct {
     VkStructureType sType; // must be VK_STRUCTURE_TYPE_IMPORT_COLOR_BUFFER_GOOGLE
@@ -112,15 +111,17 @@
     uint32_t tilingParameter;
 } VkImportPhysicalAddressGOOGLE;
 
+typedef struct {
+    VkStructureType sType; // must be VK_STRUCTURE_TYPE_IMPORT_BUFFER_GOOGLE
+    const void* pNext;
+    uint32_t buffer;
+} VkImportBufferGOOGLE;
+
 typedef VkResult (VKAPI_PTR *PFN_vkRegisterImageColorBufferGOOGLE)(VkDevice device, VkImage image, uint32_t colorBuffer);
 typedef VkResult (VKAPI_PTR *PFN_vkRegisterBufferColorBufferGOOGLE)(VkDevice device, VkBuffer image, uint32_t colorBuffer);
 
-#define VK_GOOGLE_address_space_info 1
-
 typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryHostAddressInfoGOOGLE)(VkDevice device, VkDeviceMemory memory, uint64_t* pAddress, uint64_t* pSize);
 
-#define VK_GOOGLE_free_memory_sync 1
-
 typedef VkResult (VKAPI_PTR *PFN_vkFreeMemorySyncGOOGLE)(VkDevice device, VkDeviceMemory memory, const VkAllocationCallbacks* pAllocationCallbacks);
 
 #define VK_ANDROID_external_memory_android_hardware_buffer 1
@@ -275,6 +276,13 @@
      *   OpenGL ES: GL_STENCIL_INDEX8
      */
     AHARDWAREBUFFER_FORMAT_S8_UINT                  = 0x35,
+    /**
+     * YUV 420 888 format.
+     * Must have an even width and height. Can be accessed in OpenGL
+     * shaders through an external sampler. Does not support mip-maps
+     * cube-maps or multi-layered textures.
+     */
+    AHARDWAREBUFFER_FORMAT_Y8Cb8Cr8_420             = 0x23,
 };
 /**
  * Buffer usage flags, specifying how the buffer will be accessed.
@@ -438,8 +446,6 @@
     uint32_t needHostSync,
     uint32_t sequenceNumber);
 
-#define VK_GOOGLE_create_resources_with_requirements 1
-
 typedef void (VKAPI_PTR *PFN_vkCreateImageWithRequirementsGOOGLE)(
     VkDevice device, const VkImageCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImage* pImage, VkMemoryRequirements* pMemoryRequirements);
 
@@ -470,6 +476,13 @@
     uint32_t                     index;
 } VkBufferCollectionImageCreateInfoFUCHSIA;
 
+typedef struct VkBufferCollectionBufferCreateInfoFUCHSIA {
+    VkStructureType              sType;
+    const void*                  pNext;
+    VkBufferCollectionFUCHSIA    collection;
+    uint32_t                     index;
+} VkBufferCollectionBufferCreateInfoFUCHSIA;
+
 typedef struct VkBufferCollectionPropertiesFUCHSIA {
     VkStructureType    sType;
     void*              pNext;
@@ -481,6 +494,8 @@
     ((VkStructureType)1001004004)
 #define VK_STRUCTURE_TYPE_BUFFER_COLLECTION_IMAGE_CREATE_INFO_FUCHSIA \
     ((VkStructureType)1001004005)
+#define VK_STRUCTURE_TYPE_BUFFER_COLLECTION_BUFFER_CREATE_INFO_FUCHSIA \
+    ((VkStructureType)1001004008)
 #endif  // VK_FUCHSIA_buffer_collection
 
 #ifndef VK_FUCHSIA_external_memory
@@ -488,6 +503,14 @@
 #define VK_FUCHSIA_EXTERNAL_MEMORY_SPEC_VERSION 1
 #define VK_FUCHSIA_EXTERNAL_MEMORY_EXTENSION_NAME "VK_FUCHSIA_external_memory"
 
+typedef struct VkBufferConstraintsInfoFUCHSIA {
+    VkStructureType sType;
+    const void* pNext;
+    const VkBufferCreateInfo* pBufferCreateInfo;
+    VkFormatFeatureFlags requiredFormatFeatures;
+    uint32_t minCount;
+} VkBufferConstraintsInfoFUCHSIA;
+
 typedef struct VkImportMemoryZirconHandleInfoFUCHSIA {
     VkStructureType                       sType;
     const void*                           pNext;
@@ -508,14 +531,46 @@
     VkExternalMemoryHandleTypeFlagBits    handleType;
 } VkMemoryGetZirconHandleInfoFUCHSIA;
 
+#define VK_STRUCTURE_TYPE_BUFFER_COLLECTION_BUFFER_CREATE_INFO_FUCHSIA \
+    ((VkStructureType)1001004008)
+
+#if VK_HEADER_VERSION < 174
+#define VK_STRUCTURE_TYPE_IMPORT_MEMORY_ZIRCON_HANDLE_INFO_FUCHSIA \
+    ((VkStructureType)1000364000)
+#define VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA \
+    ((VkExternalMemoryHandleTypeFlagBits)0x00000800)
+#endif
+
+// Deprecated
 #define VK_STRUCTURE_TYPE_TEMP_IMPORT_MEMORY_ZIRCON_HANDLE_INFO_FUCHSIA \
     ((VkStructureType)1001005000)
-#define VK_STRUCTURE_TYPE_TEMP_MEMORY_ZIRCON_HANDLE_PROPERTIES_FUCHSIA \
-    ((VkStructureType)1001005001)
 #define VK_EXTERNAL_MEMORY_HANDLE_TYPE_TEMP_ZIRCON_VMO_BIT_FUCHSIA \
     ((VkExternalMemoryHandleTypeFlagBits)0x00100000)
+
+#else // VK_FUCHSIA_external_memory
+
+// For backward compatibility
+#if VK_HEADER_VERSION >= 174
+#define VK_STRUCTURE_TYPE_TEMP_IMPORT_MEMORY_ZIRCON_HANDLE_INFO_FUCHSIA \
+    ((VkStructureType)1001005000)
+#define VK_EXTERNAL_MEMORY_HANDLE_TYPE_TEMP_ZIRCON_VMO_BIT_FUCHSIA \
+    ((VkExternalMemoryHandleTypeFlagBits)0x00100000)
+#endif  // VK_HEADER_VERSION >= 174
+
+// For forward compatibility
+#ifndef VK_STRUCTURE_TYPE_IMPORT_MEMORY_ZIRCON_HANDLE_INFO_FUCHSIA
+#define VK_STRUCTURE_TYPE_IMPORT_MEMORY_ZIRCON_HANDLE_INFO_FUCHSIA ((VkStructureType)1000364000)
+#endif  // VK_STRUCTURE_TYPE_IMPORT_MEMORY_ZIRCON_HANDLE_INFO_FUCHSIA
+
+// For forward compatibility
+#ifndef VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA
+#define VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA \
+    ((VkExternalMemoryHandleTypeFlagBits)0x00000800)
+#endif  // VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA
+
 #endif  // VK_FUCHSIA_external_memory
 
+
 #ifndef VK_FUCHSIA_external_semaphore
 #define VK_FUCHSIA_external_semaphore 1
 #define VK_FUCHSIA_EXTERNAL_SEMAPHORE_SPEC_VERSION 1
@@ -527,7 +582,11 @@
     VkSemaphore                              semaphore;
     VkSemaphoreImportFlags                   flags;
     VkExternalSemaphoreHandleTypeFlagBits    handleType;
+#if VK_HEADER_VERSION < 174
     uint32_t                                 handle;
+#else // VK_HEADER_VERSION >= 174
+    uint32_t                                 zirconHandle;
+#endif // VK_HEADER_VERSION < 174
 } VkImportSemaphoreZirconHandleInfoFUCHSIA;
 
 typedef struct VkSemaphoreGetZirconHandleInfoFUCHSIA {
@@ -537,16 +596,89 @@
     VkExternalSemaphoreHandleTypeFlagBits    handleType;
 } VkSemaphoreGetZirconHandleInfoFUCHSIA;
 
+#if VK_HEADER_VERSION < 174
+#define VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA \
+    ((VkExternalMemoryHandleTypeFlagBits)0x00000080)
+#endif
+
+// Deprecated
 #define VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TEMP_ZIRCON_EVENT_BIT_FUCHSIA \
     ((VkExternalSemaphoreHandleTypeFlagBits)0x00100000)
+
+#else // VK_FUCHSIA_external_semaphore
+
+// For backward compatibility
+#if VK_HEADER_VERSION >= 174
+#define VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TEMP_ZIRCON_EVENT_BIT_FUCHSIA \
+    ((VkExternalSemaphoreHandleTypeFlagBits)0x00100000)
+#endif  // VK_HEADER_VERSION >= 174
+
+// For forward compatibility
+#ifndef VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TEMP_ZIRCON_EVENT_BIT_FUCHSIA
+#define VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_ZIRCON_EVENT_BIT_FUCHSIA \
+    ((VkExternalMemoryHandleTypeFlagBits)0x00000080)
+#endif  // VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TEMP_ZIRCON_EVENT_BIT_FUCHSIA
+
 #endif  // VK_FUCHSIA_external_semaphore
 
+
 // VulkanStream features
 #define VULKAN_STREAM_FEATURE_NULL_OPTIONAL_STRINGS_BIT (1 << 0)
 #define VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT (1 << 1)
+#define VULKAN_STREAM_FEATURE_SHADER_FLOAT16_INT8_BIT (1 << 2)
+#define VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT (1 << 3)
 
 #define VK_YCBCR_CONVERSION_DO_NOTHING ((VkSamplerYcbcrConversion)0x1111111111111111)
 
+// Stuff we advertised but didn't define the structs for it yet because
+// we also needed to update our vulkan headers and xml
+
+#ifndef VK_VERSION_1_2
+
+typedef struct VkPhysicalDeviceShaderFloat16Int8Features {
+    VkStructureType    sType;
+    void*              pNext;
+    VkBool32           shaderFloat16;
+    VkBool32           shaderInt8;
+} VkPhysicalDeviceShaderFloat16Int8Features;
+
+
+#define VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES \
+    ((VkStructureType)1000082000)
+
+#endif
+
+#define VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES_KHR \
+    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES
+
+#define VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT16_INT8_FEATURES_KHR \
+    VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES
+
+#ifndef VK_KHR_shader_float16_int8
+
+#define VK_KHR_shader_float16_int8 1
+#define VK_KHR_SHADER_FLOAT16_INT8_SPEC_VERSION 1
+#define VK_KHR_SHADER_FLOAT16_INT8_EXTENSION_NAME "VK_KHR_shader_float16_int8"
+typedef VkPhysicalDeviceShaderFloat16Int8Features VkPhysicalDeviceShaderFloat16Int8FeaturesKHR;
+typedef VkPhysicalDeviceShaderFloat16Int8Features VkPhysicalDeviceFloat16Int8FeaturesKHR;
+
+#endif
+
+#define VK_GOOGLE_gfxstream 1
+
+typedef void (VKAPI_PTR *PFN_vkQueueHostSyncGOOGLE)(
+    VkQueue queue, uint32_t needHostSync, uint32_t sequenceNumber);
+typedef void (VKAPI_PTR *PFN_vkQueueSubmitAsyncGOOGLE)(
+    VkQueue queue, uint32_t submitCount, const VkSubmitInfo* pSubmits, VkFence fence);
+typedef void (VKAPI_PTR *PFN_vkQueueWaitIdleAsyncGOOGLE)(VkQueue queue);
+typedef void (VKAPI_PTR *PFN_vkQueueBindSparseAsyncGOOGLE)(
+    VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo* pBindInfo, VkFence fence);
+
+typedef VkResult (VKAPI_PTR *PFN_vkGetLinearImageLayoutGOOGLE)(VkDevice device, VkFormat format, VkDeviceSize* pOffset, VkDeviceSize* pRowPitchAlignment);
+
+typedef void (VKAPI_PTR *PFN_vkQueueFlushCommandsGOOGLE)(VkQueue queue, VkDeviceSize dataSize, const void* pData);
+typedef void (VKAPI_PTR *PFN_vkQueueCommitDescriptorSetUpdatesGOOGLE)(VkQueue queue, uint32_t descriptorPoolCount, const VkDescriptorPool* pDescriptorPools, uint32_t descriptorSetCount, const VkDescriptorSetLayout* pDescriptorSetLayouts, const uint64_t* pDescriptorSetPoolIds, const uint32_t* pDescriptorSetWhichPool, const uint32_t* pDescriptorSetPendingAllocation, const uint32_t* pDescriptorWriteStartingIndices, uint32_t pendingDescriptorWriteCount, const VkWriteDescriptorSet* pPendingDescriptorWrites);
+
 #ifdef __cplusplus
 } // extern "C"
 #endif
@@ -559,4 +691,10 @@
     return std::find_if(arr + begin, e, func) != e;
 }
 
+#define DEFINE_ALIAS_FUNCTION(ORIGINAL_FN, ALIAS_FN) \
+template <typename... Args> \
+inline auto ALIAS_FN(Args&&... args) -> decltype(ORIGINAL_FN(std::forward<Args>(args)...)) { \
+  return ORIGINAL_FN(std::forward<Args>(args)...); \
+}
+
 #endif
diff --git a/system/vulkan_enc/goldfish_vk_reserved_marshaling_guest.cpp b/system/vulkan_enc/goldfish_vk_reserved_marshaling_guest.cpp
new file mode 100644
index 0000000..e4bf00d
--- /dev/null
+++ b/system/vulkan_enc/goldfish_vk_reserved_marshaling_guest.cpp
@@ -0,0 +1,17182 @@
+// Copyright (C) 2018 The Android Open Source Project
+// Copyright (C) 2018 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Autogenerated module goldfish_vk_reserved_marshaling_guest
+// (impl) generated by android/android-emugl/host/libs/libOpenglRender/vulkan-registry/xml/genvk.py -registry android/android-emugl/host/libs/libOpenglRender/vulkan-registry/xml/vk.xml cereal -o android/android-emugl/host/libs/libOpenglRender/vulkan/cereal
+// Please do not modify directly;
+// re-run android/scripts/generate-vulkan-sources.sh,
+// or directly from Python by defining:
+// VULKAN_REGISTRY_XML_DIR : Directory containing genvk.py and vk.xml
+// CEREAL_OUTPUT_DIR: Where to put the generated sources.
+// python3 $VULKAN_REGISTRY_XML_DIR/genvk.py -registry $VULKAN_REGISTRY_XML_DIR/vk.xml cereal -o $CEREAL_OUTPUT_DIR
+
+#include "goldfish_vk_reserved_marshaling_guest.h"
+
+
+#include "goldfish_vk_extension_structs_guest.h"
+#include "goldfish_vk_private_defs.h"
+
+#include "Resources.h"
+
+
+namespace goldfish_vk {
+
+void reservedmarshal_extension_struct(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const void* structExtension,
+    uint8_t** ptr);
+
+void reservedunmarshal_extension_struct(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    void* structExtension_out,
+    uint8_t** ptr);
+
+#ifdef VK_VERSION_1_0
+void reservedmarshal_VkExtent2D(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkExtent2D* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->width, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->height, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkExtent3D(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkExtent3D* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->width, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->height, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->depth, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkOffset2D(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkOffset2D* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (int32_t*)&forMarshaling->x, sizeof(int32_t));
+    *ptr += sizeof(int32_t);
+    memcpy(*ptr, (int32_t*)&forMarshaling->y, sizeof(int32_t));
+    *ptr += sizeof(int32_t);
+}
+
+void reservedmarshal_VkOffset3D(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkOffset3D* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (int32_t*)&forMarshaling->x, sizeof(int32_t));
+    *ptr += sizeof(int32_t);
+    memcpy(*ptr, (int32_t*)&forMarshaling->y, sizeof(int32_t));
+    *ptr += sizeof(int32_t);
+    memcpy(*ptr, (int32_t*)&forMarshaling->z, sizeof(int32_t));
+    *ptr += sizeof(int32_t);
+}
+
+void reservedmarshal_VkRect2D(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRect2D* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    reservedmarshal_VkOffset2D(vkStream, rootType, (VkOffset2D*)(&forMarshaling->offset), ptr);
+    reservedmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->extent), ptr);
+}
+
+void reservedmarshal_VkBaseInStructure(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBaseInStructure* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+}
+
+void reservedmarshal_VkBaseOutStructure(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBaseOutStructure* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+}
+
+void reservedmarshal_VkBufferMemoryBarrier(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBufferMemoryBarrier* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkAccessFlags*)&forMarshaling->srcAccessMask, sizeof(VkAccessFlags));
+    *ptr += sizeof(VkAccessFlags);
+    memcpy(*ptr, (VkAccessFlags*)&forMarshaling->dstAccessMask, sizeof(VkAccessFlags));
+    *ptr += sizeof(VkAccessFlags);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->srcQueueFamilyIndex, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->dstQueueFamilyIndex, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkBuffer((*&forMarshaling->buffer));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->offset, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->size, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+}
+
+void reservedmarshal_VkDispatchIndirectCommand(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDispatchIndirectCommand* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->x, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->y, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->z, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkDrawIndexedIndirectCommand(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDrawIndexedIndirectCommand* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->indexCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->instanceCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->firstIndex, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (int32_t*)&forMarshaling->vertexOffset, sizeof(int32_t));
+    *ptr += sizeof(int32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->firstInstance, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkDrawIndirectCommand(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDrawIndirectCommand* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->vertexCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->instanceCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->firstVertex, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->firstInstance, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkImageSubresourceRange(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageSubresourceRange* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkImageAspectFlags*)&forMarshaling->aspectMask, sizeof(VkImageAspectFlags));
+    *ptr += sizeof(VkImageAspectFlags);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->baseMipLevel, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->levelCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->baseArrayLayer, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->layerCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkImageMemoryBarrier(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageMemoryBarrier* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkAccessFlags*)&forMarshaling->srcAccessMask, sizeof(VkAccessFlags));
+    *ptr += sizeof(VkAccessFlags);
+    memcpy(*ptr, (VkAccessFlags*)&forMarshaling->dstAccessMask, sizeof(VkAccessFlags));
+    *ptr += sizeof(VkAccessFlags);
+    memcpy(*ptr, (VkImageLayout*)&forMarshaling->oldLayout, sizeof(VkImageLayout));
+    *ptr += sizeof(VkImageLayout);
+    memcpy(*ptr, (VkImageLayout*)&forMarshaling->newLayout, sizeof(VkImageLayout));
+    *ptr += sizeof(VkImageLayout);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->srcQueueFamilyIndex, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->dstQueueFamilyIndex, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkImage((*&forMarshaling->image));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    reservedmarshal_VkImageSubresourceRange(vkStream, rootType, (VkImageSubresourceRange*)(&forMarshaling->subresourceRange), ptr);
+}
+
+void reservedmarshal_VkMemoryBarrier(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMemoryBarrier* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkAccessFlags*)&forMarshaling->srcAccessMask, sizeof(VkAccessFlags));
+    *ptr += sizeof(VkAccessFlags);
+    memcpy(*ptr, (VkAccessFlags*)&forMarshaling->dstAccessMask, sizeof(VkAccessFlags));
+    *ptr += sizeof(VkAccessFlags);
+}
+
+void reservedmarshal_VkAllocationCallbacks(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAllocationCallbacks* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pUserData;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pUserData)
+    {
+        memcpy(*ptr, (void*)forMarshaling->pUserData, sizeof(uint8_t));
+        *ptr += sizeof(uint8_t);
+    }
+    uint64_t cgen_var_1 = (uint64_t)forMarshaling->pfnAllocation;
+    memcpy((*ptr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    uint64_t cgen_var_2 = (uint64_t)forMarshaling->pfnReallocation;
+    memcpy((*ptr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    uint64_t cgen_var_3 = (uint64_t)forMarshaling->pfnFree;
+    memcpy((*ptr), &cgen_var_3, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    uint64_t cgen_var_4 = (uint64_t)forMarshaling->pfnInternalAllocation;
+    memcpy((*ptr), &cgen_var_4, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    uint64_t cgen_var_5 = (uint64_t)forMarshaling->pfnInternalFree;
+    memcpy((*ptr), &cgen_var_5, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+}
+
+void reservedmarshal_VkApplicationInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkApplicationInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    if (vkStream->getFeatureBits() & VULKAN_STREAM_FEATURE_NULL_OPTIONAL_STRINGS_BIT)
+    {
+        // WARNING PTR CHECK
+        uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pApplicationName;
+        memcpy((*ptr), &cgen_var_0, 8);
+        android::base::Stream::toBe64((uint8_t*)(*ptr));
+        *ptr += 8;
+        if (forMarshaling->pApplicationName)
+        {
+            {
+                uint32_t l = forMarshaling->pApplicationName ? strlen(forMarshaling->pApplicationName): 0;
+                memcpy(*ptr, (uint32_t*)&l, sizeof(uint32_t));
+                android::base::Stream::toBe32((uint8_t*)*ptr);
+                *ptr += sizeof(uint32_t);
+                memcpy(*ptr, (char*)forMarshaling->pApplicationName, l);
+                *ptr += l;
+            }
+        }
+    }
+    else
+    {
+        {
+            uint32_t l = forMarshaling->pApplicationName ? strlen(forMarshaling->pApplicationName): 0;
+            memcpy(*ptr, (uint32_t*)&l, sizeof(uint32_t));
+            android::base::Stream::toBe32((uint8_t*)*ptr);
+            *ptr += sizeof(uint32_t);
+            memcpy(*ptr, (char*)forMarshaling->pApplicationName, l);
+            *ptr += l;
+        }
+    }
+    memcpy(*ptr, (uint32_t*)&forMarshaling->applicationVersion, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    if (vkStream->getFeatureBits() & VULKAN_STREAM_FEATURE_NULL_OPTIONAL_STRINGS_BIT)
+    {
+        // WARNING PTR CHECK
+        uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pEngineName;
+        memcpy((*ptr), &cgen_var_0, 8);
+        android::base::Stream::toBe64((uint8_t*)(*ptr));
+        *ptr += 8;
+        if (forMarshaling->pEngineName)
+        {
+            {
+                uint32_t l = forMarshaling->pEngineName ? strlen(forMarshaling->pEngineName): 0;
+                memcpy(*ptr, (uint32_t*)&l, sizeof(uint32_t));
+                android::base::Stream::toBe32((uint8_t*)*ptr);
+                *ptr += sizeof(uint32_t);
+                memcpy(*ptr, (char*)forMarshaling->pEngineName, l);
+                *ptr += l;
+            }
+        }
+    }
+    else
+    {
+        {
+            uint32_t l = forMarshaling->pEngineName ? strlen(forMarshaling->pEngineName): 0;
+            memcpy(*ptr, (uint32_t*)&l, sizeof(uint32_t));
+            android::base::Stream::toBe32((uint8_t*)*ptr);
+            *ptr += sizeof(uint32_t);
+            memcpy(*ptr, (char*)forMarshaling->pEngineName, l);
+            *ptr += l;
+        }
+    }
+    memcpy(*ptr, (uint32_t*)&forMarshaling->engineVersion, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->apiVersion, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkFormatProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkFormatProperties* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkFormatFeatureFlags*)&forMarshaling->linearTilingFeatures, sizeof(VkFormatFeatureFlags));
+    *ptr += sizeof(VkFormatFeatureFlags);
+    memcpy(*ptr, (VkFormatFeatureFlags*)&forMarshaling->optimalTilingFeatures, sizeof(VkFormatFeatureFlags));
+    *ptr += sizeof(VkFormatFeatureFlags);
+    memcpy(*ptr, (VkFormatFeatureFlags*)&forMarshaling->bufferFeatures, sizeof(VkFormatFeatureFlags));
+    *ptr += sizeof(VkFormatFeatureFlags);
+}
+
+void reservedmarshal_VkImageFormatProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageFormatProperties* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    reservedmarshal_VkExtent3D(vkStream, rootType, (VkExtent3D*)(&forMarshaling->maxExtent), ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxMipLevels, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxArrayLayers, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (VkSampleCountFlags*)&forMarshaling->sampleCounts, sizeof(VkSampleCountFlags));
+    *ptr += sizeof(VkSampleCountFlags);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->maxResourceSize, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+}
+
+void reservedmarshal_VkInstanceCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkInstanceCreateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkInstanceCreateFlags*)&forMarshaling->flags, sizeof(VkInstanceCreateFlags));
+    *ptr += sizeof(VkInstanceCreateFlags);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pApplicationInfo;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pApplicationInfo)
+    {
+        reservedmarshal_VkApplicationInfo(vkStream, rootType, (const VkApplicationInfo*)(forMarshaling->pApplicationInfo), ptr);
+    }
+    memcpy(*ptr, (uint32_t*)&forMarshaling->enabledLayerCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    {
+        uint32_t c = 0;
+        if (forMarshaling)
+        {
+            c = forMarshaling->enabledLayerCount;
+        }
+        memcpy(*ptr, (uint32_t*)&c, sizeof(uint32_t));
+        android::base::Stream::toBe32((uint8_t*)*ptr);
+        *ptr += sizeof(uint32_t);
+        for (uint32_t i = 0; i < c; ++i)
+        {
+            uint32_t l = forMarshaling->ppEnabledLayerNames ? strlen(forMarshaling->ppEnabledLayerNames[i]): 0;
+            memcpy(*ptr, (uint32_t*)&l, sizeof(uint32_t));
+            android::base::Stream::toBe32((uint8_t*)*ptr);
+            *ptr += sizeof(uint32_t);
+            if (l)
+            {
+                memcpy(*ptr, (char*)(forMarshaling->ppEnabledLayerNames[i]), l);
+                *ptr += l;
+            }
+        }
+    }
+    memcpy(*ptr, (uint32_t*)&forMarshaling->enabledExtensionCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    {
+        uint32_t c = 0;
+        if (forMarshaling)
+        {
+            c = forMarshaling->enabledExtensionCount;
+        }
+        memcpy(*ptr, (uint32_t*)&c, sizeof(uint32_t));
+        android::base::Stream::toBe32((uint8_t*)*ptr);
+        *ptr += sizeof(uint32_t);
+        for (uint32_t i = 0; i < c; ++i)
+        {
+            uint32_t l = forMarshaling->ppEnabledExtensionNames ? strlen(forMarshaling->ppEnabledExtensionNames[i]): 0;
+            memcpy(*ptr, (uint32_t*)&l, sizeof(uint32_t));
+            android::base::Stream::toBe32((uint8_t*)*ptr);
+            *ptr += sizeof(uint32_t);
+            if (l)
+            {
+                memcpy(*ptr, (char*)(forMarshaling->ppEnabledExtensionNames[i]), l);
+                *ptr += l;
+            }
+        }
+    }
+}
+
+void reservedmarshal_VkMemoryHeap(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMemoryHeap* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->size, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    memcpy(*ptr, (VkMemoryHeapFlags*)&forMarshaling->flags, sizeof(VkMemoryHeapFlags));
+    *ptr += sizeof(VkMemoryHeapFlags);
+}
+
+void reservedmarshal_VkMemoryType(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMemoryType* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkMemoryPropertyFlags*)&forMarshaling->propertyFlags, sizeof(VkMemoryPropertyFlags));
+    *ptr += sizeof(VkMemoryPropertyFlags);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->heapIndex, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkPhysicalDeviceFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFeatures* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkBool32*)&forMarshaling->robustBufferAccess, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->fullDrawIndexUint32, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->imageCubeArray, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->independentBlend, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->geometryShader, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->tessellationShader, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->sampleRateShading, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->dualSrcBlend, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->logicOp, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->multiDrawIndirect, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->drawIndirectFirstInstance, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->depthClamp, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->depthBiasClamp, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->fillModeNonSolid, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->depthBounds, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->wideLines, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->largePoints, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->alphaToOne, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->multiViewport, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->samplerAnisotropy, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->textureCompressionETC2, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->textureCompressionASTC_LDR, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->textureCompressionBC, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->occlusionQueryPrecise, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->pipelineStatisticsQuery, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->vertexPipelineStoresAndAtomics, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->fragmentStoresAndAtomics, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderTessellationAndGeometryPointSize, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderImageGatherExtended, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderStorageImageExtendedFormats, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderStorageImageMultisample, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderStorageImageReadWithoutFormat, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderStorageImageWriteWithoutFormat, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderUniformBufferArrayDynamicIndexing, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderSampledImageArrayDynamicIndexing, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderStorageBufferArrayDynamicIndexing, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderStorageImageArrayDynamicIndexing, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderClipDistance, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderCullDistance, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderFloat64, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderInt64, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderInt16, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderResourceResidency, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderResourceMinLod, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->sparseBinding, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->sparseResidencyBuffer, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->sparseResidencyImage2D, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->sparseResidencyImage3D, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->sparseResidency2Samples, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->sparseResidency4Samples, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->sparseResidency8Samples, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->sparseResidency16Samples, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->sparseResidencyAliased, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->variableMultisampleRate, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->inheritedQueries, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkPhysicalDeviceLimits(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceLimits* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxImageDimension1D, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxImageDimension2D, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxImageDimension3D, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxImageDimensionCube, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxImageArrayLayers, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxTexelBufferElements, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxUniformBufferRange, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxStorageBufferRange, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxPushConstantsSize, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxMemoryAllocationCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxSamplerAllocationCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->bufferImageGranularity, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->sparseAddressSpaceSize, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxBoundDescriptorSets, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxPerStageDescriptorSamplers, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxPerStageDescriptorUniformBuffers, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxPerStageDescriptorStorageBuffers, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxPerStageDescriptorSampledImages, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxPerStageDescriptorStorageImages, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxPerStageDescriptorInputAttachments, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxPerStageResources, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxDescriptorSetSamplers, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxDescriptorSetUniformBuffers, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxDescriptorSetUniformBuffersDynamic, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxDescriptorSetStorageBuffers, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxDescriptorSetStorageBuffersDynamic, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxDescriptorSetSampledImages, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxDescriptorSetStorageImages, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxDescriptorSetInputAttachments, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxVertexInputAttributes, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxVertexInputBindings, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxVertexInputAttributeOffset, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxVertexInputBindingStride, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxVertexOutputComponents, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxTessellationGenerationLevel, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxTessellationPatchSize, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxTessellationControlPerVertexInputComponents, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxTessellationControlPerVertexOutputComponents, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxTessellationControlPerPatchOutputComponents, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxTessellationControlTotalOutputComponents, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxTessellationEvaluationInputComponents, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxTessellationEvaluationOutputComponents, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxGeometryShaderInvocations, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxGeometryInputComponents, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxGeometryOutputComponents, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxGeometryOutputVertices, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxGeometryTotalOutputComponents, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxFragmentInputComponents, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxFragmentOutputAttachments, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxFragmentDualSrcAttachments, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxFragmentCombinedOutputResources, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxComputeSharedMemorySize, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)forMarshaling->maxComputeWorkGroupCount, 3 * sizeof(uint32_t));
+    *ptr += 3 * sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxComputeWorkGroupInvocations, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)forMarshaling->maxComputeWorkGroupSize, 3 * sizeof(uint32_t));
+    *ptr += 3 * sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->subPixelPrecisionBits, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->subTexelPrecisionBits, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->mipmapPrecisionBits, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxDrawIndexedIndexValue, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxDrawIndirectCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (float*)&forMarshaling->maxSamplerLodBias, sizeof(float));
+    *ptr += sizeof(float);
+    memcpy(*ptr, (float*)&forMarshaling->maxSamplerAnisotropy, sizeof(float));
+    *ptr += sizeof(float);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxViewports, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)forMarshaling->maxViewportDimensions, 2 * sizeof(uint32_t));
+    *ptr += 2 * sizeof(uint32_t);
+    memcpy(*ptr, (float*)forMarshaling->viewportBoundsRange, 2 * sizeof(float));
+    *ptr += 2 * sizeof(float);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->viewportSubPixelBits, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    uint64_t cgen_var_0 = (uint64_t)forMarshaling->minMemoryMapAlignment;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->minTexelBufferOffsetAlignment, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->minUniformBufferOffsetAlignment, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->minStorageBufferOffsetAlignment, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    memcpy(*ptr, (int32_t*)&forMarshaling->minTexelOffset, sizeof(int32_t));
+    *ptr += sizeof(int32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxTexelOffset, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (int32_t*)&forMarshaling->minTexelGatherOffset, sizeof(int32_t));
+    *ptr += sizeof(int32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxTexelGatherOffset, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (float*)&forMarshaling->minInterpolationOffset, sizeof(float));
+    *ptr += sizeof(float);
+    memcpy(*ptr, (float*)&forMarshaling->maxInterpolationOffset, sizeof(float));
+    *ptr += sizeof(float);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->subPixelInterpolationOffsetBits, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxFramebufferWidth, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxFramebufferHeight, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxFramebufferLayers, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (VkSampleCountFlags*)&forMarshaling->framebufferColorSampleCounts, sizeof(VkSampleCountFlags));
+    *ptr += sizeof(VkSampleCountFlags);
+    memcpy(*ptr, (VkSampleCountFlags*)&forMarshaling->framebufferDepthSampleCounts, sizeof(VkSampleCountFlags));
+    *ptr += sizeof(VkSampleCountFlags);
+    memcpy(*ptr, (VkSampleCountFlags*)&forMarshaling->framebufferStencilSampleCounts, sizeof(VkSampleCountFlags));
+    *ptr += sizeof(VkSampleCountFlags);
+    memcpy(*ptr, (VkSampleCountFlags*)&forMarshaling->framebufferNoAttachmentsSampleCounts, sizeof(VkSampleCountFlags));
+    *ptr += sizeof(VkSampleCountFlags);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxColorAttachments, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (VkSampleCountFlags*)&forMarshaling->sampledImageColorSampleCounts, sizeof(VkSampleCountFlags));
+    *ptr += sizeof(VkSampleCountFlags);
+    memcpy(*ptr, (VkSampleCountFlags*)&forMarshaling->sampledImageIntegerSampleCounts, sizeof(VkSampleCountFlags));
+    *ptr += sizeof(VkSampleCountFlags);
+    memcpy(*ptr, (VkSampleCountFlags*)&forMarshaling->sampledImageDepthSampleCounts, sizeof(VkSampleCountFlags));
+    *ptr += sizeof(VkSampleCountFlags);
+    memcpy(*ptr, (VkSampleCountFlags*)&forMarshaling->sampledImageStencilSampleCounts, sizeof(VkSampleCountFlags));
+    *ptr += sizeof(VkSampleCountFlags);
+    memcpy(*ptr, (VkSampleCountFlags*)&forMarshaling->storageImageSampleCounts, sizeof(VkSampleCountFlags));
+    *ptr += sizeof(VkSampleCountFlags);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxSampleMaskWords, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->timestampComputeAndGraphics, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (float*)&forMarshaling->timestampPeriod, sizeof(float));
+    *ptr += sizeof(float);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxClipDistances, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxCullDistances, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxCombinedClipAndCullDistances, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->discreteQueuePriorities, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (float*)forMarshaling->pointSizeRange, 2 * sizeof(float));
+    *ptr += 2 * sizeof(float);
+    memcpy(*ptr, (float*)forMarshaling->lineWidthRange, 2 * sizeof(float));
+    *ptr += 2 * sizeof(float);
+    memcpy(*ptr, (float*)&forMarshaling->pointSizeGranularity, sizeof(float));
+    *ptr += sizeof(float);
+    memcpy(*ptr, (float*)&forMarshaling->lineWidthGranularity, sizeof(float));
+    *ptr += sizeof(float);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->strictLines, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->standardSampleLocations, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->optimalBufferCopyOffsetAlignment, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->optimalBufferCopyRowPitchAlignment, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->nonCoherentAtomSize, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+}
+
+void reservedmarshal_VkPhysicalDeviceMemoryProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMemoryProperties* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->memoryTypeCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)VK_MAX_MEMORY_TYPES; ++i)
+    {
+        reservedmarshal_VkMemoryType(vkStream, rootType, (VkMemoryType*)(forMarshaling->memoryTypes + i), ptr);
+    }
+    memcpy(*ptr, (uint32_t*)&forMarshaling->memoryHeapCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)VK_MAX_MEMORY_HEAPS; ++i)
+    {
+        reservedmarshal_VkMemoryHeap(vkStream, rootType, (VkMemoryHeap*)(forMarshaling->memoryHeaps + i), ptr);
+    }
+}
+
+void reservedmarshal_VkPhysicalDeviceSparseProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSparseProperties* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkBool32*)&forMarshaling->residencyStandard2DBlockShape, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->residencyStandard2DMultisampleBlockShape, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->residencyStandard3DBlockShape, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->residencyAlignedMipSize, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->residencyNonResidentStrict, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkPhysicalDeviceProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceProperties* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->apiVersion, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->driverVersion, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->vendorID, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->deviceID, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (VkPhysicalDeviceType*)&forMarshaling->deviceType, sizeof(VkPhysicalDeviceType));
+    *ptr += sizeof(VkPhysicalDeviceType);
+    memcpy(*ptr, (char*)forMarshaling->deviceName, VK_MAX_PHYSICAL_DEVICE_NAME_SIZE * sizeof(char));
+    *ptr += VK_MAX_PHYSICAL_DEVICE_NAME_SIZE * sizeof(char);
+    memcpy(*ptr, (uint8_t*)forMarshaling->pipelineCacheUUID, VK_UUID_SIZE * sizeof(uint8_t));
+    *ptr += VK_UUID_SIZE * sizeof(uint8_t);
+    reservedmarshal_VkPhysicalDeviceLimits(vkStream, rootType, (VkPhysicalDeviceLimits*)(&forMarshaling->limits), ptr);
+    reservedmarshal_VkPhysicalDeviceSparseProperties(vkStream, rootType, (VkPhysicalDeviceSparseProperties*)(&forMarshaling->sparseProperties), ptr);
+}
+
+void reservedmarshal_VkQueueFamilyProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkQueueFamilyProperties* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkQueueFlags*)&forMarshaling->queueFlags, sizeof(VkQueueFlags));
+    *ptr += sizeof(VkQueueFlags);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->queueCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->timestampValidBits, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    reservedmarshal_VkExtent3D(vkStream, rootType, (VkExtent3D*)(&forMarshaling->minImageTransferGranularity), ptr);
+}
+
+void reservedmarshal_VkDeviceQueueCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceQueueCreateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkDeviceQueueCreateFlags*)&forMarshaling->flags, sizeof(VkDeviceQueueCreateFlags));
+    *ptr += sizeof(VkDeviceQueueCreateFlags);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->queueFamilyIndex, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->queueCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (const float*)forMarshaling->pQueuePriorities, forMarshaling->queueCount * sizeof(const float));
+    *ptr += forMarshaling->queueCount * sizeof(const float);
+}
+
+void reservedmarshal_VkDeviceCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceCreateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkDeviceCreateFlags*)&forMarshaling->flags, sizeof(VkDeviceCreateFlags));
+    *ptr += sizeof(VkDeviceCreateFlags);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->queueCreateInfoCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)forMarshaling->queueCreateInfoCount; ++i)
+    {
+        reservedmarshal_VkDeviceQueueCreateInfo(vkStream, rootType, (const VkDeviceQueueCreateInfo*)(forMarshaling->pQueueCreateInfos + i), ptr);
+    }
+    memcpy(*ptr, (uint32_t*)&forMarshaling->enabledLayerCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    {
+        uint32_t c = 0;
+        if (forMarshaling)
+        {
+            c = forMarshaling->enabledLayerCount;
+        }
+        memcpy(*ptr, (uint32_t*)&c, sizeof(uint32_t));
+        android::base::Stream::toBe32((uint8_t*)*ptr);
+        *ptr += sizeof(uint32_t);
+        for (uint32_t i = 0; i < c; ++i)
+        {
+            uint32_t l = forMarshaling->ppEnabledLayerNames ? strlen(forMarshaling->ppEnabledLayerNames[i]): 0;
+            memcpy(*ptr, (uint32_t*)&l, sizeof(uint32_t));
+            android::base::Stream::toBe32((uint8_t*)*ptr);
+            *ptr += sizeof(uint32_t);
+            if (l)
+            {
+                memcpy(*ptr, (char*)(forMarshaling->ppEnabledLayerNames[i]), l);
+                *ptr += l;
+            }
+        }
+    }
+    memcpy(*ptr, (uint32_t*)&forMarshaling->enabledExtensionCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    {
+        uint32_t c = 0;
+        if (forMarshaling)
+        {
+            c = forMarshaling->enabledExtensionCount;
+        }
+        memcpy(*ptr, (uint32_t*)&c, sizeof(uint32_t));
+        android::base::Stream::toBe32((uint8_t*)*ptr);
+        *ptr += sizeof(uint32_t);
+        for (uint32_t i = 0; i < c; ++i)
+        {
+            uint32_t l = forMarshaling->ppEnabledExtensionNames ? strlen(forMarshaling->ppEnabledExtensionNames[i]): 0;
+            memcpy(*ptr, (uint32_t*)&l, sizeof(uint32_t));
+            android::base::Stream::toBe32((uint8_t*)*ptr);
+            *ptr += sizeof(uint32_t);
+            if (l)
+            {
+                memcpy(*ptr, (char*)(forMarshaling->ppEnabledExtensionNames[i]), l);
+                *ptr += l;
+            }
+        }
+    }
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pEnabledFeatures;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pEnabledFeatures)
+    {
+        reservedmarshal_VkPhysicalDeviceFeatures(vkStream, rootType, (const VkPhysicalDeviceFeatures*)(forMarshaling->pEnabledFeatures), ptr);
+    }
+}
+
+void reservedmarshal_VkExtensionProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkExtensionProperties* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (char*)forMarshaling->extensionName, VK_MAX_EXTENSION_NAME_SIZE * sizeof(char));
+    *ptr += VK_MAX_EXTENSION_NAME_SIZE * sizeof(char);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->specVersion, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkLayerProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkLayerProperties* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (char*)forMarshaling->layerName, VK_MAX_EXTENSION_NAME_SIZE * sizeof(char));
+    *ptr += VK_MAX_EXTENSION_NAME_SIZE * sizeof(char);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->specVersion, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->implementationVersion, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (char*)forMarshaling->description, VK_MAX_DESCRIPTION_SIZE * sizeof(char));
+    *ptr += VK_MAX_DESCRIPTION_SIZE * sizeof(char);
+}
+
+void reservedmarshal_VkSubmitInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSubmitInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->waitSemaphoreCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    if (forMarshaling->waitSemaphoreCount)
+    {
+        uint8_t* cgen_var_0_ptr = (uint8_t*)(*ptr);
+        if (forMarshaling)
+        {
+            for (uint32_t k = 0; k < forMarshaling->waitSemaphoreCount; ++k)
+            {
+                uint64_t tmpval = get_host_u64_VkSemaphore(forMarshaling->pWaitSemaphores[k]);
+                memcpy(cgen_var_0_ptr + k * 8, &tmpval, sizeof(uint64_t));
+            }
+        }
+        *ptr += 8 * forMarshaling->waitSemaphoreCount;
+    }
+    memcpy(*ptr, (const VkPipelineStageFlags*)forMarshaling->pWaitDstStageMask, forMarshaling->waitSemaphoreCount * sizeof(const VkPipelineStageFlags));
+    *ptr += forMarshaling->waitSemaphoreCount * sizeof(const VkPipelineStageFlags);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->commandBufferCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    if (forMarshaling->commandBufferCount)
+    {
+        uint8_t* cgen_var_1_ptr = (uint8_t*)(*ptr);
+        if (forMarshaling)
+        {
+            for (uint32_t k = 0; k < forMarshaling->commandBufferCount; ++k)
+            {
+                uint64_t tmpval = get_host_u64_VkCommandBuffer(forMarshaling->pCommandBuffers[k]);
+                memcpy(cgen_var_1_ptr + k * 8, &tmpval, sizeof(uint64_t));
+            }
+        }
+        *ptr += 8 * forMarshaling->commandBufferCount;
+    }
+    memcpy(*ptr, (uint32_t*)&forMarshaling->signalSemaphoreCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    if (forMarshaling->signalSemaphoreCount)
+    {
+        uint8_t* cgen_var_2_ptr = (uint8_t*)(*ptr);
+        if (forMarshaling)
+        {
+            for (uint32_t k = 0; k < forMarshaling->signalSemaphoreCount; ++k)
+            {
+                uint64_t tmpval = get_host_u64_VkSemaphore(forMarshaling->pSignalSemaphores[k]);
+                memcpy(cgen_var_2_ptr + k * 8, &tmpval, sizeof(uint64_t));
+            }
+        }
+        *ptr += 8 * forMarshaling->signalSemaphoreCount;
+    }
+}
+
+void reservedmarshal_VkMappedMemoryRange(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMappedMemoryRange* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDeviceMemory((*&forMarshaling->memory));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->offset, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->size, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+}
+
+void reservedmarshal_VkMemoryAllocateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMemoryAllocateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->allocationSize, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->memoryTypeIndex, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkMemoryRequirements(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMemoryRequirements* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->size, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->alignment, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->memoryTypeBits, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkSparseMemoryBind(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSparseMemoryBind* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->resourceOffset, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->size, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDeviceMemory((*&forMarshaling->memory));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->memoryOffset, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    memcpy(*ptr, (VkSparseMemoryBindFlags*)&forMarshaling->flags, sizeof(VkSparseMemoryBindFlags));
+    *ptr += sizeof(VkSparseMemoryBindFlags);
+}
+
+void reservedmarshal_VkSparseBufferMemoryBindInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSparseBufferMemoryBindInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkBuffer((*&forMarshaling->buffer));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->bindCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)forMarshaling->bindCount; ++i)
+    {
+        reservedmarshal_VkSparseMemoryBind(vkStream, rootType, (const VkSparseMemoryBind*)(forMarshaling->pBinds + i), ptr);
+    }
+}
+
+void reservedmarshal_VkSparseImageOpaqueMemoryBindInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSparseImageOpaqueMemoryBindInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkImage((*&forMarshaling->image));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->bindCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)forMarshaling->bindCount; ++i)
+    {
+        reservedmarshal_VkSparseMemoryBind(vkStream, rootType, (const VkSparseMemoryBind*)(forMarshaling->pBinds + i), ptr);
+    }
+}
+
+void reservedmarshal_VkImageSubresource(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageSubresource* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkImageAspectFlags*)&forMarshaling->aspectMask, sizeof(VkImageAspectFlags));
+    *ptr += sizeof(VkImageAspectFlags);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->mipLevel, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->arrayLayer, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkSparseImageMemoryBind(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSparseImageMemoryBind* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    reservedmarshal_VkImageSubresource(vkStream, rootType, (VkImageSubresource*)(&forMarshaling->subresource), ptr);
+    reservedmarshal_VkOffset3D(vkStream, rootType, (VkOffset3D*)(&forMarshaling->offset), ptr);
+    reservedmarshal_VkExtent3D(vkStream, rootType, (VkExtent3D*)(&forMarshaling->extent), ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDeviceMemory((*&forMarshaling->memory));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->memoryOffset, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    memcpy(*ptr, (VkSparseMemoryBindFlags*)&forMarshaling->flags, sizeof(VkSparseMemoryBindFlags));
+    *ptr += sizeof(VkSparseMemoryBindFlags);
+}
+
+void reservedmarshal_VkSparseImageMemoryBindInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSparseImageMemoryBindInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkImage((*&forMarshaling->image));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->bindCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)forMarshaling->bindCount; ++i)
+    {
+        reservedmarshal_VkSparseImageMemoryBind(vkStream, rootType, (const VkSparseImageMemoryBind*)(forMarshaling->pBinds + i), ptr);
+    }
+}
+
+void reservedmarshal_VkBindSparseInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBindSparseInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->waitSemaphoreCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    if (forMarshaling->waitSemaphoreCount)
+    {
+        uint8_t* cgen_var_0_ptr = (uint8_t*)(*ptr);
+        if (forMarshaling)
+        {
+            for (uint32_t k = 0; k < forMarshaling->waitSemaphoreCount; ++k)
+            {
+                uint64_t tmpval = get_host_u64_VkSemaphore(forMarshaling->pWaitSemaphores[k]);
+                memcpy(cgen_var_0_ptr + k * 8, &tmpval, sizeof(uint64_t));
+            }
+        }
+        *ptr += 8 * forMarshaling->waitSemaphoreCount;
+    }
+    memcpy(*ptr, (uint32_t*)&forMarshaling->bufferBindCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)forMarshaling->bufferBindCount; ++i)
+    {
+        reservedmarshal_VkSparseBufferMemoryBindInfo(vkStream, rootType, (const VkSparseBufferMemoryBindInfo*)(forMarshaling->pBufferBinds + i), ptr);
+    }
+    memcpy(*ptr, (uint32_t*)&forMarshaling->imageOpaqueBindCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)forMarshaling->imageOpaqueBindCount; ++i)
+    {
+        reservedmarshal_VkSparseImageOpaqueMemoryBindInfo(vkStream, rootType, (const VkSparseImageOpaqueMemoryBindInfo*)(forMarshaling->pImageOpaqueBinds + i), ptr);
+    }
+    memcpy(*ptr, (uint32_t*)&forMarshaling->imageBindCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)forMarshaling->imageBindCount; ++i)
+    {
+        reservedmarshal_VkSparseImageMemoryBindInfo(vkStream, rootType, (const VkSparseImageMemoryBindInfo*)(forMarshaling->pImageBinds + i), ptr);
+    }
+    memcpy(*ptr, (uint32_t*)&forMarshaling->signalSemaphoreCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    if (forMarshaling->signalSemaphoreCount)
+    {
+        uint8_t* cgen_var_1_ptr = (uint8_t*)(*ptr);
+        if (forMarshaling)
+        {
+            for (uint32_t k = 0; k < forMarshaling->signalSemaphoreCount; ++k)
+            {
+                uint64_t tmpval = get_host_u64_VkSemaphore(forMarshaling->pSignalSemaphores[k]);
+                memcpy(cgen_var_1_ptr + k * 8, &tmpval, sizeof(uint64_t));
+            }
+        }
+        *ptr += 8 * forMarshaling->signalSemaphoreCount;
+    }
+}
+
+void reservedmarshal_VkSparseImageFormatProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSparseImageFormatProperties* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkImageAspectFlags*)&forMarshaling->aspectMask, sizeof(VkImageAspectFlags));
+    *ptr += sizeof(VkImageAspectFlags);
+    reservedmarshal_VkExtent3D(vkStream, rootType, (VkExtent3D*)(&forMarshaling->imageGranularity), ptr);
+    memcpy(*ptr, (VkSparseImageFormatFlags*)&forMarshaling->flags, sizeof(VkSparseImageFormatFlags));
+    *ptr += sizeof(VkSparseImageFormatFlags);
+}
+
+void reservedmarshal_VkSparseImageMemoryRequirements(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSparseImageMemoryRequirements* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    reservedmarshal_VkSparseImageFormatProperties(vkStream, rootType, (VkSparseImageFormatProperties*)(&forMarshaling->formatProperties), ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->imageMipTailFirstLod, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->imageMipTailSize, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->imageMipTailOffset, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->imageMipTailStride, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+}
+
+void reservedmarshal_VkFenceCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkFenceCreateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkFenceCreateFlags*)&forMarshaling->flags, sizeof(VkFenceCreateFlags));
+    *ptr += sizeof(VkFenceCreateFlags);
+}
+
+void reservedmarshal_VkSemaphoreCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSemaphoreCreateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkSemaphoreCreateFlags*)&forMarshaling->flags, sizeof(VkSemaphoreCreateFlags));
+    *ptr += sizeof(VkSemaphoreCreateFlags);
+}
+
+void reservedmarshal_VkEventCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkEventCreateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkEventCreateFlags*)&forMarshaling->flags, sizeof(VkEventCreateFlags));
+    *ptr += sizeof(VkEventCreateFlags);
+}
+
+void reservedmarshal_VkQueryPoolCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkQueryPoolCreateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkQueryPoolCreateFlags*)&forMarshaling->flags, sizeof(VkQueryPoolCreateFlags));
+    *ptr += sizeof(VkQueryPoolCreateFlags);
+    memcpy(*ptr, (VkQueryType*)&forMarshaling->queryType, sizeof(VkQueryType));
+    *ptr += sizeof(VkQueryType);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->queryCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (VkQueryPipelineStatisticFlags*)&forMarshaling->pipelineStatistics, sizeof(VkQueryPipelineStatisticFlags));
+    *ptr += sizeof(VkQueryPipelineStatisticFlags);
+}
+
+void reservedmarshal_VkBufferCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBufferCreateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBufferCreateFlags*)&forMarshaling->flags, sizeof(VkBufferCreateFlags));
+    *ptr += sizeof(VkBufferCreateFlags);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->size, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    memcpy(*ptr, (VkBufferUsageFlags*)&forMarshaling->usage, sizeof(VkBufferUsageFlags));
+    *ptr += sizeof(VkBufferUsageFlags);
+    memcpy(*ptr, (VkSharingMode*)&forMarshaling->sharingMode, sizeof(VkSharingMode));
+    *ptr += sizeof(VkSharingMode);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->queueFamilyIndexCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pQueueFamilyIndices;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pQueueFamilyIndices)
+    {
+        memcpy(*ptr, (const uint32_t*)forMarshaling->pQueueFamilyIndices, forMarshaling->queueFamilyIndexCount * sizeof(const uint32_t));
+        *ptr += forMarshaling->queueFamilyIndexCount * sizeof(const uint32_t);
+    }
+}
+
+void reservedmarshal_VkBufferViewCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBufferViewCreateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBufferViewCreateFlags*)&forMarshaling->flags, sizeof(VkBufferViewCreateFlags));
+    *ptr += sizeof(VkBufferViewCreateFlags);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkBuffer((*&forMarshaling->buffer));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (VkFormat*)&forMarshaling->format, sizeof(VkFormat));
+    *ptr += sizeof(VkFormat);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->offset, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->range, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+}
+
+void reservedmarshal_VkImageCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageCreateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkImageCreateFlags*)&forMarshaling->flags, sizeof(VkImageCreateFlags));
+    *ptr += sizeof(VkImageCreateFlags);
+    memcpy(*ptr, (VkImageType*)&forMarshaling->imageType, sizeof(VkImageType));
+    *ptr += sizeof(VkImageType);
+    memcpy(*ptr, (VkFormat*)&forMarshaling->format, sizeof(VkFormat));
+    *ptr += sizeof(VkFormat);
+    reservedmarshal_VkExtent3D(vkStream, rootType, (VkExtent3D*)(&forMarshaling->extent), ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->mipLevels, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->arrayLayers, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (VkSampleCountFlagBits*)&forMarshaling->samples, sizeof(VkSampleCountFlagBits));
+    *ptr += sizeof(VkSampleCountFlagBits);
+    memcpy(*ptr, (VkImageTiling*)&forMarshaling->tiling, sizeof(VkImageTiling));
+    *ptr += sizeof(VkImageTiling);
+    memcpy(*ptr, (VkImageUsageFlags*)&forMarshaling->usage, sizeof(VkImageUsageFlags));
+    *ptr += sizeof(VkImageUsageFlags);
+    memcpy(*ptr, (VkSharingMode*)&forMarshaling->sharingMode, sizeof(VkSharingMode));
+    *ptr += sizeof(VkSharingMode);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->queueFamilyIndexCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pQueueFamilyIndices;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pQueueFamilyIndices)
+    {
+        memcpy(*ptr, (const uint32_t*)forMarshaling->pQueueFamilyIndices, forMarshaling->queueFamilyIndexCount * sizeof(const uint32_t));
+        *ptr += forMarshaling->queueFamilyIndexCount * sizeof(const uint32_t);
+    }
+    memcpy(*ptr, (VkImageLayout*)&forMarshaling->initialLayout, sizeof(VkImageLayout));
+    *ptr += sizeof(VkImageLayout);
+}
+
+void reservedmarshal_VkSubresourceLayout(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSubresourceLayout* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->offset, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->size, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->rowPitch, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->arrayPitch, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->depthPitch, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+}
+
+void reservedmarshal_VkComponentMapping(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkComponentMapping* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkComponentSwizzle*)&forMarshaling->r, sizeof(VkComponentSwizzle));
+    *ptr += sizeof(VkComponentSwizzle);
+    memcpy(*ptr, (VkComponentSwizzle*)&forMarshaling->g, sizeof(VkComponentSwizzle));
+    *ptr += sizeof(VkComponentSwizzle);
+    memcpy(*ptr, (VkComponentSwizzle*)&forMarshaling->b, sizeof(VkComponentSwizzle));
+    *ptr += sizeof(VkComponentSwizzle);
+    memcpy(*ptr, (VkComponentSwizzle*)&forMarshaling->a, sizeof(VkComponentSwizzle));
+    *ptr += sizeof(VkComponentSwizzle);
+}
+
+void reservedmarshal_VkImageViewCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageViewCreateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkImageViewCreateFlags*)&forMarshaling->flags, sizeof(VkImageViewCreateFlags));
+    *ptr += sizeof(VkImageViewCreateFlags);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkImage((*&forMarshaling->image));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (VkImageViewType*)&forMarshaling->viewType, sizeof(VkImageViewType));
+    *ptr += sizeof(VkImageViewType);
+    memcpy(*ptr, (VkFormat*)&forMarshaling->format, sizeof(VkFormat));
+    *ptr += sizeof(VkFormat);
+    reservedmarshal_VkComponentMapping(vkStream, rootType, (VkComponentMapping*)(&forMarshaling->components), ptr);
+    reservedmarshal_VkImageSubresourceRange(vkStream, rootType, (VkImageSubresourceRange*)(&forMarshaling->subresourceRange), ptr);
+}
+
+void reservedmarshal_VkShaderModuleCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkShaderModuleCreateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkShaderModuleCreateFlags*)&forMarshaling->flags, sizeof(VkShaderModuleCreateFlags));
+    *ptr += sizeof(VkShaderModuleCreateFlags);
+    uint64_t cgen_var_0 = (uint64_t)forMarshaling->codeSize;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    memcpy(*ptr, (const uint32_t*)forMarshaling->pCode, (forMarshaling->codeSize / 4) * sizeof(const uint32_t));
+    *ptr += (forMarshaling->codeSize / 4) * sizeof(const uint32_t);
+}
+
+void reservedmarshal_VkPipelineCacheCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineCacheCreateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkPipelineCacheCreateFlags*)&forMarshaling->flags, sizeof(VkPipelineCacheCreateFlags));
+    *ptr += sizeof(VkPipelineCacheCreateFlags);
+    uint64_t cgen_var_0 = (uint64_t)forMarshaling->initialDataSize;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    memcpy(*ptr, (const void*)forMarshaling->pInitialData, forMarshaling->initialDataSize * sizeof(const uint8_t));
+    *ptr += forMarshaling->initialDataSize * sizeof(const uint8_t);
+}
+
+void reservedmarshal_VkSpecializationMapEntry(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSpecializationMapEntry* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->constantID, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->offset, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    uint64_t cgen_var_0 = (uint64_t)forMarshaling->size;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+}
+
+void reservedmarshal_VkSpecializationInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSpecializationInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->mapEntryCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)forMarshaling->mapEntryCount; ++i)
+    {
+        reservedmarshal_VkSpecializationMapEntry(vkStream, rootType, (const VkSpecializationMapEntry*)(forMarshaling->pMapEntries + i), ptr);
+    }
+    uint64_t cgen_var_0 = (uint64_t)forMarshaling->dataSize;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    memcpy(*ptr, (const void*)forMarshaling->pData, forMarshaling->dataSize * sizeof(const uint8_t));
+    *ptr += forMarshaling->dataSize * sizeof(const uint8_t);
+}
+
+void reservedmarshal_VkPipelineShaderStageCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineShaderStageCreateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkPipelineShaderStageCreateFlags*)&forMarshaling->flags, sizeof(VkPipelineShaderStageCreateFlags));
+    *ptr += sizeof(VkPipelineShaderStageCreateFlags);
+    memcpy(*ptr, (VkShaderStageFlagBits*)&forMarshaling->stage, sizeof(VkShaderStageFlagBits));
+    *ptr += sizeof(VkShaderStageFlagBits);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkShaderModule((*&forMarshaling->module));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    {
+        uint32_t l = forMarshaling->pName ? strlen(forMarshaling->pName): 0;
+        memcpy(*ptr, (uint32_t*)&l, sizeof(uint32_t));
+        android::base::Stream::toBe32((uint8_t*)*ptr);
+        *ptr += sizeof(uint32_t);
+        memcpy(*ptr, (char*)forMarshaling->pName, l);
+        *ptr += l;
+    }
+    // WARNING PTR CHECK
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)forMarshaling->pSpecializationInfo;
+    memcpy((*ptr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pSpecializationInfo)
+    {
+        reservedmarshal_VkSpecializationInfo(vkStream, rootType, (const VkSpecializationInfo*)(forMarshaling->pSpecializationInfo), ptr);
+    }
+}
+
+void reservedmarshal_VkComputePipelineCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkComputePipelineCreateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkPipelineCreateFlags*)&forMarshaling->flags, sizeof(VkPipelineCreateFlags));
+    *ptr += sizeof(VkPipelineCreateFlags);
+    reservedmarshal_VkPipelineShaderStageCreateInfo(vkStream, rootType, (VkPipelineShaderStageCreateInfo*)(&forMarshaling->stage), ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPipelineLayout((*&forMarshaling->layout));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkPipeline((*&forMarshaling->basePipelineHandle));
+    memcpy(*ptr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (int32_t*)&forMarshaling->basePipelineIndex, sizeof(int32_t));
+    *ptr += sizeof(int32_t);
+}
+
+void reservedmarshal_VkVertexInputBindingDescription(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkVertexInputBindingDescription* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->binding, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->stride, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (VkVertexInputRate*)&forMarshaling->inputRate, sizeof(VkVertexInputRate));
+    *ptr += sizeof(VkVertexInputRate);
+}
+
+void reservedmarshal_VkVertexInputAttributeDescription(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkVertexInputAttributeDescription* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->location, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->binding, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (VkFormat*)&forMarshaling->format, sizeof(VkFormat));
+    *ptr += sizeof(VkFormat);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->offset, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkPipelineVertexInputStateCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineVertexInputStateCreateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkPipelineVertexInputStateCreateFlags*)&forMarshaling->flags, sizeof(VkPipelineVertexInputStateCreateFlags));
+    *ptr += sizeof(VkPipelineVertexInputStateCreateFlags);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->vertexBindingDescriptionCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)forMarshaling->vertexBindingDescriptionCount; ++i)
+    {
+        reservedmarshal_VkVertexInputBindingDescription(vkStream, rootType, (const VkVertexInputBindingDescription*)(forMarshaling->pVertexBindingDescriptions + i), ptr);
+    }
+    memcpy(*ptr, (uint32_t*)&forMarshaling->vertexAttributeDescriptionCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)forMarshaling->vertexAttributeDescriptionCount; ++i)
+    {
+        reservedmarshal_VkVertexInputAttributeDescription(vkStream, rootType, (const VkVertexInputAttributeDescription*)(forMarshaling->pVertexAttributeDescriptions + i), ptr);
+    }
+}
+
+void reservedmarshal_VkPipelineInputAssemblyStateCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineInputAssemblyStateCreateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkPipelineInputAssemblyStateCreateFlags*)&forMarshaling->flags, sizeof(VkPipelineInputAssemblyStateCreateFlags));
+    *ptr += sizeof(VkPipelineInputAssemblyStateCreateFlags);
+    memcpy(*ptr, (VkPrimitiveTopology*)&forMarshaling->topology, sizeof(VkPrimitiveTopology));
+    *ptr += sizeof(VkPrimitiveTopology);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->primitiveRestartEnable, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkPipelineTessellationStateCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineTessellationStateCreateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkPipelineTessellationStateCreateFlags*)&forMarshaling->flags, sizeof(VkPipelineTessellationStateCreateFlags));
+    *ptr += sizeof(VkPipelineTessellationStateCreateFlags);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->patchControlPoints, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkViewport(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkViewport* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (float*)&forMarshaling->x, sizeof(float));
+    *ptr += sizeof(float);
+    memcpy(*ptr, (float*)&forMarshaling->y, sizeof(float));
+    *ptr += sizeof(float);
+    memcpy(*ptr, (float*)&forMarshaling->width, sizeof(float));
+    *ptr += sizeof(float);
+    memcpy(*ptr, (float*)&forMarshaling->height, sizeof(float));
+    *ptr += sizeof(float);
+    memcpy(*ptr, (float*)&forMarshaling->minDepth, sizeof(float));
+    *ptr += sizeof(float);
+    memcpy(*ptr, (float*)&forMarshaling->maxDepth, sizeof(float));
+    *ptr += sizeof(float);
+}
+
+void reservedmarshal_VkPipelineViewportStateCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineViewportStateCreateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkPipelineViewportStateCreateFlags*)&forMarshaling->flags, sizeof(VkPipelineViewportStateCreateFlags));
+    *ptr += sizeof(VkPipelineViewportStateCreateFlags);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->viewportCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pViewports;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pViewports)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->viewportCount; ++i)
+        {
+            reservedmarshal_VkViewport(vkStream, rootType, (const VkViewport*)(forMarshaling->pViewports + i), ptr);
+        }
+    }
+    memcpy(*ptr, (uint32_t*)&forMarshaling->scissorCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)forMarshaling->pScissors;
+    memcpy((*ptr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pScissors)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->scissorCount; ++i)
+        {
+            reservedmarshal_VkRect2D(vkStream, rootType, (const VkRect2D*)(forMarshaling->pScissors + i), ptr);
+        }
+    }
+}
+
+void reservedmarshal_VkPipelineRasterizationStateCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineRasterizationStateCreateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkPipelineRasterizationStateCreateFlags*)&forMarshaling->flags, sizeof(VkPipelineRasterizationStateCreateFlags));
+    *ptr += sizeof(VkPipelineRasterizationStateCreateFlags);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->depthClampEnable, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->rasterizerDiscardEnable, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkPolygonMode*)&forMarshaling->polygonMode, sizeof(VkPolygonMode));
+    *ptr += sizeof(VkPolygonMode);
+    memcpy(*ptr, (VkCullModeFlags*)&forMarshaling->cullMode, sizeof(VkCullModeFlags));
+    *ptr += sizeof(VkCullModeFlags);
+    memcpy(*ptr, (VkFrontFace*)&forMarshaling->frontFace, sizeof(VkFrontFace));
+    *ptr += sizeof(VkFrontFace);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->depthBiasEnable, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (float*)&forMarshaling->depthBiasConstantFactor, sizeof(float));
+    *ptr += sizeof(float);
+    memcpy(*ptr, (float*)&forMarshaling->depthBiasClamp, sizeof(float));
+    *ptr += sizeof(float);
+    memcpy(*ptr, (float*)&forMarshaling->depthBiasSlopeFactor, sizeof(float));
+    *ptr += sizeof(float);
+    memcpy(*ptr, (float*)&forMarshaling->lineWidth, sizeof(float));
+    *ptr += sizeof(float);
+}
+
+void reservedmarshal_VkPipelineMultisampleStateCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineMultisampleStateCreateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkPipelineMultisampleStateCreateFlags*)&forMarshaling->flags, sizeof(VkPipelineMultisampleStateCreateFlags));
+    *ptr += sizeof(VkPipelineMultisampleStateCreateFlags);
+    memcpy(*ptr, (VkSampleCountFlagBits*)&forMarshaling->rasterizationSamples, sizeof(VkSampleCountFlagBits));
+    *ptr += sizeof(VkSampleCountFlagBits);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->sampleShadingEnable, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (float*)&forMarshaling->minSampleShading, sizeof(float));
+    *ptr += sizeof(float);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pSampleMask;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pSampleMask)
+    {
+        memcpy(*ptr, (const VkSampleMask*)forMarshaling->pSampleMask, (((forMarshaling->rasterizationSamples) + 31) / 32) * sizeof(const VkSampleMask));
+        *ptr += (((forMarshaling->rasterizationSamples) + 31) / 32) * sizeof(const VkSampleMask);
+    }
+    memcpy(*ptr, (VkBool32*)&forMarshaling->alphaToCoverageEnable, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->alphaToOneEnable, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkStencilOpState(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkStencilOpState* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStencilOp*)&forMarshaling->failOp, sizeof(VkStencilOp));
+    *ptr += sizeof(VkStencilOp);
+    memcpy(*ptr, (VkStencilOp*)&forMarshaling->passOp, sizeof(VkStencilOp));
+    *ptr += sizeof(VkStencilOp);
+    memcpy(*ptr, (VkStencilOp*)&forMarshaling->depthFailOp, sizeof(VkStencilOp));
+    *ptr += sizeof(VkStencilOp);
+    memcpy(*ptr, (VkCompareOp*)&forMarshaling->compareOp, sizeof(VkCompareOp));
+    *ptr += sizeof(VkCompareOp);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->compareMask, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->writeMask, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->reference, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkPipelineDepthStencilStateCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineDepthStencilStateCreateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkPipelineDepthStencilStateCreateFlags*)&forMarshaling->flags, sizeof(VkPipelineDepthStencilStateCreateFlags));
+    *ptr += sizeof(VkPipelineDepthStencilStateCreateFlags);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->depthTestEnable, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->depthWriteEnable, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkCompareOp*)&forMarshaling->depthCompareOp, sizeof(VkCompareOp));
+    *ptr += sizeof(VkCompareOp);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->depthBoundsTestEnable, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->stencilTestEnable, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    reservedmarshal_VkStencilOpState(vkStream, rootType, (VkStencilOpState*)(&forMarshaling->front), ptr);
+    reservedmarshal_VkStencilOpState(vkStream, rootType, (VkStencilOpState*)(&forMarshaling->back), ptr);
+    memcpy(*ptr, (float*)&forMarshaling->minDepthBounds, sizeof(float));
+    *ptr += sizeof(float);
+    memcpy(*ptr, (float*)&forMarshaling->maxDepthBounds, sizeof(float));
+    *ptr += sizeof(float);
+}
+
+void reservedmarshal_VkPipelineColorBlendAttachmentState(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineColorBlendAttachmentState* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkBool32*)&forMarshaling->blendEnable, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBlendFactor*)&forMarshaling->srcColorBlendFactor, sizeof(VkBlendFactor));
+    *ptr += sizeof(VkBlendFactor);
+    memcpy(*ptr, (VkBlendFactor*)&forMarshaling->dstColorBlendFactor, sizeof(VkBlendFactor));
+    *ptr += sizeof(VkBlendFactor);
+    memcpy(*ptr, (VkBlendOp*)&forMarshaling->colorBlendOp, sizeof(VkBlendOp));
+    *ptr += sizeof(VkBlendOp);
+    memcpy(*ptr, (VkBlendFactor*)&forMarshaling->srcAlphaBlendFactor, sizeof(VkBlendFactor));
+    *ptr += sizeof(VkBlendFactor);
+    memcpy(*ptr, (VkBlendFactor*)&forMarshaling->dstAlphaBlendFactor, sizeof(VkBlendFactor));
+    *ptr += sizeof(VkBlendFactor);
+    memcpy(*ptr, (VkBlendOp*)&forMarshaling->alphaBlendOp, sizeof(VkBlendOp));
+    *ptr += sizeof(VkBlendOp);
+    memcpy(*ptr, (VkColorComponentFlags*)&forMarshaling->colorWriteMask, sizeof(VkColorComponentFlags));
+    *ptr += sizeof(VkColorComponentFlags);
+}
+
+void reservedmarshal_VkPipelineColorBlendStateCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineColorBlendStateCreateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkPipelineColorBlendStateCreateFlags*)&forMarshaling->flags, sizeof(VkPipelineColorBlendStateCreateFlags));
+    *ptr += sizeof(VkPipelineColorBlendStateCreateFlags);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->logicOpEnable, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkLogicOp*)&forMarshaling->logicOp, sizeof(VkLogicOp));
+    *ptr += sizeof(VkLogicOp);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->attachmentCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)forMarshaling->attachmentCount; ++i)
+    {
+        reservedmarshal_VkPipelineColorBlendAttachmentState(vkStream, rootType, (const VkPipelineColorBlendAttachmentState*)(forMarshaling->pAttachments + i), ptr);
+    }
+    memcpy(*ptr, (float*)forMarshaling->blendConstants, 4 * sizeof(float));
+    *ptr += 4 * sizeof(float);
+}
+
+void reservedmarshal_VkPipelineDynamicStateCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineDynamicStateCreateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkPipelineDynamicStateCreateFlags*)&forMarshaling->flags, sizeof(VkPipelineDynamicStateCreateFlags));
+    *ptr += sizeof(VkPipelineDynamicStateCreateFlags);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->dynamicStateCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (const VkDynamicState*)forMarshaling->pDynamicStates, forMarshaling->dynamicStateCount * sizeof(const VkDynamicState));
+    *ptr += forMarshaling->dynamicStateCount * sizeof(const VkDynamicState);
+}
+
+void reservedmarshal_VkGraphicsPipelineCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkGraphicsPipelineCreateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    uint32_t hasRasterization = 1;
+    if (vkStream->getFeatureBits() & VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT)
+    {
+        hasRasterization = (((0 == forMarshaling->pRasterizationState)) ? (0) : (!((*(forMarshaling->pRasterizationState)).rasterizerDiscardEnable)));
+        uint32_t cgen_var_0 = (uint32_t)hasRasterization;
+        memcpy((*ptr), &cgen_var_0, 4);
+        android::base::Stream::toBe32((uint8_t*)(*ptr));
+        *ptr += 4;
+    }
+    uint32_t hasTessellation = 1;
+    if (vkStream->getFeatureBits() & VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT)
+    {
+        hasTessellation = arrayany(forMarshaling->pStages, 0, forMarshaling->stageCount, [](VkPipelineShaderStageCreateInfo s) { return ((s.stage == VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) || (s.stage == VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)); });
+        uint32_t cgen_var_0 = (uint32_t)hasTessellation;
+        memcpy((*ptr), &cgen_var_0, 4);
+        android::base::Stream::toBe32((uint8_t*)(*ptr));
+        *ptr += 4;
+    }
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkPipelineCreateFlags*)&forMarshaling->flags, sizeof(VkPipelineCreateFlags));
+    *ptr += sizeof(VkPipelineCreateFlags);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->stageCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)forMarshaling->stageCount; ++i)
+    {
+        reservedmarshal_VkPipelineShaderStageCreateInfo(vkStream, rootType, (const VkPipelineShaderStageCreateInfo*)(forMarshaling->pStages + i), ptr);
+    }
+    // WARNING PTR CHECK
+    if (vkStream->getFeatureBits() & VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT)
+    {
+        uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pVertexInputState;
+        memcpy((*ptr), &cgen_var_0, 8);
+        android::base::Stream::toBe64((uint8_t*)(*ptr));
+        *ptr += 8;
+    }
+    if ((!(vkStream->getFeatureBits() & VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT) || forMarshaling->pVertexInputState))
+    {
+        reservedmarshal_VkPipelineVertexInputStateCreateInfo(vkStream, rootType, (const VkPipelineVertexInputStateCreateInfo*)(forMarshaling->pVertexInputState), ptr);
+    }
+    // WARNING PTR CHECK
+    if (vkStream->getFeatureBits() & VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT)
+    {
+        uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pInputAssemblyState;
+        memcpy((*ptr), &cgen_var_0, 8);
+        android::base::Stream::toBe64((uint8_t*)(*ptr));
+        *ptr += 8;
+    }
+    if ((!(vkStream->getFeatureBits() & VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT) || forMarshaling->pInputAssemblyState))
+    {
+        reservedmarshal_VkPipelineInputAssemblyStateCreateInfo(vkStream, rootType, (const VkPipelineInputAssemblyStateCreateInfo*)(forMarshaling->pInputAssemblyState), ptr);
+    }
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pTessellationState;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pTessellationState)
+    {
+        if (hasTessellation)
+        {
+            reservedmarshal_VkPipelineTessellationStateCreateInfo(vkStream, rootType, (const VkPipelineTessellationStateCreateInfo*)(forMarshaling->pTessellationState), ptr);
+        }
+    }
+    // WARNING PTR CHECK
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)forMarshaling->pViewportState;
+    memcpy((*ptr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pViewportState)
+    {
+        if (hasRasterization)
+        {
+            reservedmarshal_VkPipelineViewportStateCreateInfo(vkStream, rootType, (const VkPipelineViewportStateCreateInfo*)(forMarshaling->pViewportState), ptr);
+        }
+    }
+    // WARNING PTR CHECK
+    if (vkStream->getFeatureBits() & VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT)
+    {
+        uint64_t cgen_var_1_0 = (uint64_t)(uintptr_t)forMarshaling->pRasterizationState;
+        memcpy((*ptr), &cgen_var_1_0, 8);
+        android::base::Stream::toBe64((uint8_t*)(*ptr));
+        *ptr += 8;
+    }
+    if ((!(vkStream->getFeatureBits() & VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT) || forMarshaling->pRasterizationState))
+    {
+        reservedmarshal_VkPipelineRasterizationStateCreateInfo(vkStream, rootType, (const VkPipelineRasterizationStateCreateInfo*)(forMarshaling->pRasterizationState), ptr);
+    }
+    // WARNING PTR CHECK
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)forMarshaling->pMultisampleState;
+    memcpy((*ptr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pMultisampleState)
+    {
+        if (hasRasterization)
+        {
+            reservedmarshal_VkPipelineMultisampleStateCreateInfo(vkStream, rootType, (const VkPipelineMultisampleStateCreateInfo*)(forMarshaling->pMultisampleState), ptr);
+        }
+    }
+    // WARNING PTR CHECK
+    uint64_t cgen_var_3 = (uint64_t)(uintptr_t)forMarshaling->pDepthStencilState;
+    memcpy((*ptr), &cgen_var_3, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pDepthStencilState)
+    {
+        if (hasRasterization)
+        {
+            reservedmarshal_VkPipelineDepthStencilStateCreateInfo(vkStream, rootType, (const VkPipelineDepthStencilStateCreateInfo*)(forMarshaling->pDepthStencilState), ptr);
+        }
+    }
+    // WARNING PTR CHECK
+    uint64_t cgen_var_4 = (uint64_t)(uintptr_t)forMarshaling->pColorBlendState;
+    memcpy((*ptr), &cgen_var_4, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pColorBlendState)
+    {
+        if (hasRasterization)
+        {
+            reservedmarshal_VkPipelineColorBlendStateCreateInfo(vkStream, rootType, (const VkPipelineColorBlendStateCreateInfo*)(forMarshaling->pColorBlendState), ptr);
+        }
+    }
+    // WARNING PTR CHECK
+    uint64_t cgen_var_5 = (uint64_t)(uintptr_t)forMarshaling->pDynamicState;
+    memcpy((*ptr), &cgen_var_5, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pDynamicState)
+    {
+        reservedmarshal_VkPipelineDynamicStateCreateInfo(vkStream, rootType, (const VkPipelineDynamicStateCreateInfo*)(forMarshaling->pDynamicState), ptr);
+    }
+    uint64_t cgen_var_6;
+    *&cgen_var_6 = get_host_u64_VkPipelineLayout((*&forMarshaling->layout));
+    memcpy(*ptr, (uint64_t*)&cgen_var_6, 1 * 8);
+    *ptr += 1 * 8;
+    uint64_t cgen_var_7;
+    *&cgen_var_7 = get_host_u64_VkRenderPass((*&forMarshaling->renderPass));
+    memcpy(*ptr, (uint64_t*)&cgen_var_7, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->subpass, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    uint64_t cgen_var_8;
+    *&cgen_var_8 = get_host_u64_VkPipeline((*&forMarshaling->basePipelineHandle));
+    memcpy(*ptr, (uint64_t*)&cgen_var_8, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (int32_t*)&forMarshaling->basePipelineIndex, sizeof(int32_t));
+    *ptr += sizeof(int32_t);
+}
+
+void reservedmarshal_VkPushConstantRange(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPushConstantRange* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkShaderStageFlags*)&forMarshaling->stageFlags, sizeof(VkShaderStageFlags));
+    *ptr += sizeof(VkShaderStageFlags);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->offset, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->size, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkPipelineLayoutCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineLayoutCreateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkPipelineLayoutCreateFlags*)&forMarshaling->flags, sizeof(VkPipelineLayoutCreateFlags));
+    *ptr += sizeof(VkPipelineLayoutCreateFlags);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->setLayoutCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    if (forMarshaling->setLayoutCount)
+    {
+        uint8_t* cgen_var_0_ptr = (uint8_t*)(*ptr);
+        if (forMarshaling)
+        {
+            for (uint32_t k = 0; k < forMarshaling->setLayoutCount; ++k)
+            {
+                uint64_t tmpval = get_host_u64_VkDescriptorSetLayout(forMarshaling->pSetLayouts[k]);
+                memcpy(cgen_var_0_ptr + k * 8, &tmpval, sizeof(uint64_t));
+            }
+        }
+        *ptr += 8 * forMarshaling->setLayoutCount;
+    }
+    memcpy(*ptr, (uint32_t*)&forMarshaling->pushConstantRangeCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)forMarshaling->pushConstantRangeCount; ++i)
+    {
+        reservedmarshal_VkPushConstantRange(vkStream, rootType, (const VkPushConstantRange*)(forMarshaling->pPushConstantRanges + i), ptr);
+    }
+}
+
+void reservedmarshal_VkSamplerCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSamplerCreateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkSamplerCreateFlags*)&forMarshaling->flags, sizeof(VkSamplerCreateFlags));
+    *ptr += sizeof(VkSamplerCreateFlags);
+    memcpy(*ptr, (VkFilter*)&forMarshaling->magFilter, sizeof(VkFilter));
+    *ptr += sizeof(VkFilter);
+    memcpy(*ptr, (VkFilter*)&forMarshaling->minFilter, sizeof(VkFilter));
+    *ptr += sizeof(VkFilter);
+    memcpy(*ptr, (VkSamplerMipmapMode*)&forMarshaling->mipmapMode, sizeof(VkSamplerMipmapMode));
+    *ptr += sizeof(VkSamplerMipmapMode);
+    memcpy(*ptr, (VkSamplerAddressMode*)&forMarshaling->addressModeU, sizeof(VkSamplerAddressMode));
+    *ptr += sizeof(VkSamplerAddressMode);
+    memcpy(*ptr, (VkSamplerAddressMode*)&forMarshaling->addressModeV, sizeof(VkSamplerAddressMode));
+    *ptr += sizeof(VkSamplerAddressMode);
+    memcpy(*ptr, (VkSamplerAddressMode*)&forMarshaling->addressModeW, sizeof(VkSamplerAddressMode));
+    *ptr += sizeof(VkSamplerAddressMode);
+    memcpy(*ptr, (float*)&forMarshaling->mipLodBias, sizeof(float));
+    *ptr += sizeof(float);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->anisotropyEnable, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (float*)&forMarshaling->maxAnisotropy, sizeof(float));
+    *ptr += sizeof(float);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->compareEnable, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkCompareOp*)&forMarshaling->compareOp, sizeof(VkCompareOp));
+    *ptr += sizeof(VkCompareOp);
+    memcpy(*ptr, (float*)&forMarshaling->minLod, sizeof(float));
+    *ptr += sizeof(float);
+    memcpy(*ptr, (float*)&forMarshaling->maxLod, sizeof(float));
+    *ptr += sizeof(float);
+    memcpy(*ptr, (VkBorderColor*)&forMarshaling->borderColor, sizeof(VkBorderColor));
+    *ptr += sizeof(VkBorderColor);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->unnormalizedCoordinates, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkCopyDescriptorSet(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCopyDescriptorSet* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDescriptorSet((*&forMarshaling->srcSet));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->srcBinding, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->srcArrayElement, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkDescriptorSet((*&forMarshaling->dstSet));
+    memcpy(*ptr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->dstBinding, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->dstArrayElement, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->descriptorCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkDescriptorBufferInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDescriptorBufferInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkBuffer((*&forMarshaling->buffer));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->offset, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->range, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+}
+
+void reservedmarshal_VkDescriptorImageInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDescriptorImageInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkSampler((*&forMarshaling->sampler));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkImageView((*&forMarshaling->imageView));
+    memcpy(*ptr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (VkImageLayout*)&forMarshaling->imageLayout, sizeof(VkImageLayout));
+    *ptr += sizeof(VkImageLayout);
+}
+
+void reservedmarshal_VkDescriptorPoolSize(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDescriptorPoolSize* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkDescriptorType*)&forMarshaling->type, sizeof(VkDescriptorType));
+    *ptr += sizeof(VkDescriptorType);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->descriptorCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkDescriptorPoolCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDescriptorPoolCreateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkDescriptorPoolCreateFlags*)&forMarshaling->flags, sizeof(VkDescriptorPoolCreateFlags));
+    *ptr += sizeof(VkDescriptorPoolCreateFlags);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxSets, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->poolSizeCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)forMarshaling->poolSizeCount; ++i)
+    {
+        reservedmarshal_VkDescriptorPoolSize(vkStream, rootType, (const VkDescriptorPoolSize*)(forMarshaling->pPoolSizes + i), ptr);
+    }
+}
+
+void reservedmarshal_VkDescriptorSetAllocateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDescriptorSetAllocateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDescriptorPool((*&forMarshaling->descriptorPool));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->descriptorSetCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    if (forMarshaling->descriptorSetCount)
+    {
+        uint8_t* cgen_var_1_ptr = (uint8_t*)(*ptr);
+        if (forMarshaling)
+        {
+            for (uint32_t k = 0; k < forMarshaling->descriptorSetCount; ++k)
+            {
+                uint64_t tmpval = get_host_u64_VkDescriptorSetLayout(forMarshaling->pSetLayouts[k]);
+                memcpy(cgen_var_1_ptr + k * 8, &tmpval, sizeof(uint64_t));
+            }
+        }
+        *ptr += 8 * forMarshaling->descriptorSetCount;
+    }
+}
+
+void reservedmarshal_VkDescriptorSetLayoutBinding(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDescriptorSetLayoutBinding* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->binding, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (VkDescriptorType*)&forMarshaling->descriptorType, sizeof(VkDescriptorType));
+    *ptr += sizeof(VkDescriptorType);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->descriptorCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (VkShaderStageFlags*)&forMarshaling->stageFlags, sizeof(VkShaderStageFlags));
+    *ptr += sizeof(VkShaderStageFlags);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pImmutableSamplers;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pImmutableSamplers)
+    {
+        if (forMarshaling->descriptorCount)
+        {
+            uint8_t* cgen_var_0_0_ptr = (uint8_t*)(*ptr);
+            if (forMarshaling)
+            {
+                for (uint32_t k = 0; k < forMarshaling->descriptorCount; ++k)
+                {
+                    uint64_t tmpval = get_host_u64_VkSampler(forMarshaling->pImmutableSamplers[k]);
+                    memcpy(cgen_var_0_0_ptr + k * 8, &tmpval, sizeof(uint64_t));
+                }
+            }
+            *ptr += 8 * forMarshaling->descriptorCount;
+        }
+    }
+}
+
+void reservedmarshal_VkDescriptorSetLayoutCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDescriptorSetLayoutCreateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkDescriptorSetLayoutCreateFlags*)&forMarshaling->flags, sizeof(VkDescriptorSetLayoutCreateFlags));
+    *ptr += sizeof(VkDescriptorSetLayoutCreateFlags);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->bindingCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)forMarshaling->bindingCount; ++i)
+    {
+        reservedmarshal_VkDescriptorSetLayoutBinding(vkStream, rootType, (const VkDescriptorSetLayoutBinding*)(forMarshaling->pBindings + i), ptr);
+    }
+}
+
+void reservedmarshal_VkWriteDescriptorSet(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkWriteDescriptorSet* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDescriptorSet((*&forMarshaling->dstSet));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->dstBinding, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->dstArrayElement, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->descriptorCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (VkDescriptorType*)&forMarshaling->descriptorType, sizeof(VkDescriptorType));
+    *ptr += sizeof(VkDescriptorType);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)forMarshaling->pImageInfo;
+    memcpy((*ptr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pImageInfo)
+    {
+        if ((!(vkStream->getFeatureBits() & VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT) || ((VK_DESCRIPTOR_TYPE_SAMPLER == forMarshaling->descriptorType) || (VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER == forMarshaling->descriptorType) || (VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE == forMarshaling->descriptorType) || (VK_DESCRIPTOR_TYPE_STORAGE_IMAGE == forMarshaling->descriptorType) || (VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT == forMarshaling->descriptorType))))
+        {
+            for (uint32_t i = 0; i < (uint32_t)forMarshaling->descriptorCount; ++i)
+            {
+                reservedmarshal_VkDescriptorImageInfo(vkStream, rootType, (const VkDescriptorImageInfo*)(forMarshaling->pImageInfo + i), ptr);
+            }
+        }
+    }
+    // WARNING PTR CHECK
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)forMarshaling->pBufferInfo;
+    memcpy((*ptr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pBufferInfo)
+    {
+        if ((!(vkStream->getFeatureBits() & VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT) || ((VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER == forMarshaling->descriptorType) || (VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC == forMarshaling->descriptorType) || (VK_DESCRIPTOR_TYPE_STORAGE_BUFFER == forMarshaling->descriptorType) || (VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC == forMarshaling->descriptorType))))
+        {
+            for (uint32_t i = 0; i < (uint32_t)forMarshaling->descriptorCount; ++i)
+            {
+                reservedmarshal_VkDescriptorBufferInfo(vkStream, rootType, (const VkDescriptorBufferInfo*)(forMarshaling->pBufferInfo + i), ptr);
+            }
+        }
+    }
+    // WARNING PTR CHECK
+    uint64_t cgen_var_3 = (uint64_t)(uintptr_t)forMarshaling->pTexelBufferView;
+    memcpy((*ptr), &cgen_var_3, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pTexelBufferView)
+    {
+        if ((!(vkStream->getFeatureBits() & VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT) || ((VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER == forMarshaling->descriptorType) || (VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER == forMarshaling->descriptorType))))
+        {
+            if (forMarshaling->descriptorCount)
+            {
+                uint8_t* cgen_var_3_0_ptr = (uint8_t*)(*ptr);
+                if (forMarshaling)
+                {
+                    for (uint32_t k = 0; k < forMarshaling->descriptorCount; ++k)
+                    {
+                        uint64_t tmpval = get_host_u64_VkBufferView(forMarshaling->pTexelBufferView[k]);
+                        memcpy(cgen_var_3_0_ptr + k * 8, &tmpval, sizeof(uint64_t));
+                    }
+                }
+                *ptr += 8 * forMarshaling->descriptorCount;
+            }
+        }
+    }
+}
+
+void reservedmarshal_VkAttachmentDescription(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAttachmentDescription* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkAttachmentDescriptionFlags*)&forMarshaling->flags, sizeof(VkAttachmentDescriptionFlags));
+    *ptr += sizeof(VkAttachmentDescriptionFlags);
+    memcpy(*ptr, (VkFormat*)&forMarshaling->format, sizeof(VkFormat));
+    *ptr += sizeof(VkFormat);
+    memcpy(*ptr, (VkSampleCountFlagBits*)&forMarshaling->samples, sizeof(VkSampleCountFlagBits));
+    *ptr += sizeof(VkSampleCountFlagBits);
+    memcpy(*ptr, (VkAttachmentLoadOp*)&forMarshaling->loadOp, sizeof(VkAttachmentLoadOp));
+    *ptr += sizeof(VkAttachmentLoadOp);
+    memcpy(*ptr, (VkAttachmentStoreOp*)&forMarshaling->storeOp, sizeof(VkAttachmentStoreOp));
+    *ptr += sizeof(VkAttachmentStoreOp);
+    memcpy(*ptr, (VkAttachmentLoadOp*)&forMarshaling->stencilLoadOp, sizeof(VkAttachmentLoadOp));
+    *ptr += sizeof(VkAttachmentLoadOp);
+    memcpy(*ptr, (VkAttachmentStoreOp*)&forMarshaling->stencilStoreOp, sizeof(VkAttachmentStoreOp));
+    *ptr += sizeof(VkAttachmentStoreOp);
+    memcpy(*ptr, (VkImageLayout*)&forMarshaling->initialLayout, sizeof(VkImageLayout));
+    *ptr += sizeof(VkImageLayout);
+    memcpy(*ptr, (VkImageLayout*)&forMarshaling->finalLayout, sizeof(VkImageLayout));
+    *ptr += sizeof(VkImageLayout);
+}
+
+void reservedmarshal_VkAttachmentReference(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAttachmentReference* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->attachment, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (VkImageLayout*)&forMarshaling->layout, sizeof(VkImageLayout));
+    *ptr += sizeof(VkImageLayout);
+}
+
+void reservedmarshal_VkFramebufferCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkFramebufferCreateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkFramebufferCreateFlags*)&forMarshaling->flags, sizeof(VkFramebufferCreateFlags));
+    *ptr += sizeof(VkFramebufferCreateFlags);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkRenderPass((*&forMarshaling->renderPass));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->attachmentCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    if (forMarshaling->attachmentCount)
+    {
+        uint8_t* cgen_var_1_ptr = (uint8_t*)(*ptr);
+        if (forMarshaling)
+        {
+            for (uint32_t k = 0; k < forMarshaling->attachmentCount; ++k)
+            {
+                uint64_t tmpval = get_host_u64_VkImageView(forMarshaling->pAttachments[k]);
+                memcpy(cgen_var_1_ptr + k * 8, &tmpval, sizeof(uint64_t));
+            }
+        }
+        *ptr += 8 * forMarshaling->attachmentCount;
+    }
+    memcpy(*ptr, (uint32_t*)&forMarshaling->width, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->height, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->layers, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkSubpassDescription(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSubpassDescription* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkSubpassDescriptionFlags*)&forMarshaling->flags, sizeof(VkSubpassDescriptionFlags));
+    *ptr += sizeof(VkSubpassDescriptionFlags);
+    memcpy(*ptr, (VkPipelineBindPoint*)&forMarshaling->pipelineBindPoint, sizeof(VkPipelineBindPoint));
+    *ptr += sizeof(VkPipelineBindPoint);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->inputAttachmentCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)forMarshaling->inputAttachmentCount; ++i)
+    {
+        reservedmarshal_VkAttachmentReference(vkStream, rootType, (const VkAttachmentReference*)(forMarshaling->pInputAttachments + i), ptr);
+    }
+    memcpy(*ptr, (uint32_t*)&forMarshaling->colorAttachmentCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)forMarshaling->colorAttachmentCount; ++i)
+    {
+        reservedmarshal_VkAttachmentReference(vkStream, rootType, (const VkAttachmentReference*)(forMarshaling->pColorAttachments + i), ptr);
+    }
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pResolveAttachments;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pResolveAttachments)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->colorAttachmentCount; ++i)
+        {
+            reservedmarshal_VkAttachmentReference(vkStream, rootType, (const VkAttachmentReference*)(forMarshaling->pResolveAttachments + i), ptr);
+        }
+    }
+    // WARNING PTR CHECK
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)forMarshaling->pDepthStencilAttachment;
+    memcpy((*ptr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pDepthStencilAttachment)
+    {
+        reservedmarshal_VkAttachmentReference(vkStream, rootType, (const VkAttachmentReference*)(forMarshaling->pDepthStencilAttachment), ptr);
+    }
+    memcpy(*ptr, (uint32_t*)&forMarshaling->preserveAttachmentCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (const uint32_t*)forMarshaling->pPreserveAttachments, forMarshaling->preserveAttachmentCount * sizeof(const uint32_t));
+    *ptr += forMarshaling->preserveAttachmentCount * sizeof(const uint32_t);
+}
+
+void reservedmarshal_VkSubpassDependency(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSubpassDependency* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->srcSubpass, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->dstSubpass, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (VkPipelineStageFlags*)&forMarshaling->srcStageMask, sizeof(VkPipelineStageFlags));
+    *ptr += sizeof(VkPipelineStageFlags);
+    memcpy(*ptr, (VkPipelineStageFlags*)&forMarshaling->dstStageMask, sizeof(VkPipelineStageFlags));
+    *ptr += sizeof(VkPipelineStageFlags);
+    memcpy(*ptr, (VkAccessFlags*)&forMarshaling->srcAccessMask, sizeof(VkAccessFlags));
+    *ptr += sizeof(VkAccessFlags);
+    memcpy(*ptr, (VkAccessFlags*)&forMarshaling->dstAccessMask, sizeof(VkAccessFlags));
+    *ptr += sizeof(VkAccessFlags);
+    memcpy(*ptr, (VkDependencyFlags*)&forMarshaling->dependencyFlags, sizeof(VkDependencyFlags));
+    *ptr += sizeof(VkDependencyFlags);
+}
+
+void reservedmarshal_VkRenderPassCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRenderPassCreateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkRenderPassCreateFlags*)&forMarshaling->flags, sizeof(VkRenderPassCreateFlags));
+    *ptr += sizeof(VkRenderPassCreateFlags);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->attachmentCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)forMarshaling->attachmentCount; ++i)
+    {
+        reservedmarshal_VkAttachmentDescription(vkStream, rootType, (const VkAttachmentDescription*)(forMarshaling->pAttachments + i), ptr);
+    }
+    memcpy(*ptr, (uint32_t*)&forMarshaling->subpassCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)forMarshaling->subpassCount; ++i)
+    {
+        reservedmarshal_VkSubpassDescription(vkStream, rootType, (const VkSubpassDescription*)(forMarshaling->pSubpasses + i), ptr);
+    }
+    memcpy(*ptr, (uint32_t*)&forMarshaling->dependencyCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)forMarshaling->dependencyCount; ++i)
+    {
+        reservedmarshal_VkSubpassDependency(vkStream, rootType, (const VkSubpassDependency*)(forMarshaling->pDependencies + i), ptr);
+    }
+}
+
+void reservedmarshal_VkCommandPoolCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCommandPoolCreateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkCommandPoolCreateFlags*)&forMarshaling->flags, sizeof(VkCommandPoolCreateFlags));
+    *ptr += sizeof(VkCommandPoolCreateFlags);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->queueFamilyIndex, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkCommandBufferAllocateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCommandBufferAllocateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkCommandPool((*&forMarshaling->commandPool));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (VkCommandBufferLevel*)&forMarshaling->level, sizeof(VkCommandBufferLevel));
+    *ptr += sizeof(VkCommandBufferLevel);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->commandBufferCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkCommandBufferInheritanceInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCommandBufferInheritanceInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkRenderPass((*&forMarshaling->renderPass));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->subpass, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkFramebuffer((*&forMarshaling->framebuffer));
+    memcpy(*ptr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (VkBool32*)&forMarshaling->occlusionQueryEnable, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkQueryControlFlags*)&forMarshaling->queryFlags, sizeof(VkQueryControlFlags));
+    *ptr += sizeof(VkQueryControlFlags);
+    memcpy(*ptr, (VkQueryPipelineStatisticFlags*)&forMarshaling->pipelineStatistics, sizeof(VkQueryPipelineStatisticFlags));
+    *ptr += sizeof(VkQueryPipelineStatisticFlags);
+}
+
+void reservedmarshal_VkCommandBufferBeginInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCommandBufferBeginInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkCommandBufferUsageFlags*)&forMarshaling->flags, sizeof(VkCommandBufferUsageFlags));
+    *ptr += sizeof(VkCommandBufferUsageFlags);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pInheritanceInfo;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pInheritanceInfo)
+    {
+        reservedmarshal_VkCommandBufferInheritanceInfo(vkStream, rootType, (const VkCommandBufferInheritanceInfo*)(forMarshaling->pInheritanceInfo), ptr);
+    }
+}
+
+void reservedmarshal_VkBufferCopy(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBufferCopy* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->srcOffset, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->dstOffset, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->size, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+}
+
+void reservedmarshal_VkImageSubresourceLayers(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageSubresourceLayers* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkImageAspectFlags*)&forMarshaling->aspectMask, sizeof(VkImageAspectFlags));
+    *ptr += sizeof(VkImageAspectFlags);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->mipLevel, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->baseArrayLayer, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->layerCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkBufferImageCopy(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBufferImageCopy* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->bufferOffset, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->bufferRowLength, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->bufferImageHeight, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    reservedmarshal_VkImageSubresourceLayers(vkStream, rootType, (VkImageSubresourceLayers*)(&forMarshaling->imageSubresource), ptr);
+    reservedmarshal_VkOffset3D(vkStream, rootType, (VkOffset3D*)(&forMarshaling->imageOffset), ptr);
+    reservedmarshal_VkExtent3D(vkStream, rootType, (VkExtent3D*)(&forMarshaling->imageExtent), ptr);
+}
+
+void reservedmarshal_VkClearColorValue(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkClearColorValue* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (float*)forMarshaling->float32, 4 * sizeof(float));
+    *ptr += 4 * sizeof(float);
+}
+
+void reservedmarshal_VkClearDepthStencilValue(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkClearDepthStencilValue* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (float*)&forMarshaling->depth, sizeof(float));
+    *ptr += sizeof(float);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->stencil, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkClearValue(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkClearValue* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    reservedmarshal_VkClearColorValue(vkStream, rootType, (VkClearColorValue*)(&forMarshaling->color), ptr);
+}
+
+void reservedmarshal_VkClearAttachment(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkClearAttachment* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkImageAspectFlags*)&forMarshaling->aspectMask, sizeof(VkImageAspectFlags));
+    *ptr += sizeof(VkImageAspectFlags);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->colorAttachment, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    reservedmarshal_VkClearValue(vkStream, rootType, (VkClearValue*)(&forMarshaling->clearValue), ptr);
+}
+
+void reservedmarshal_VkClearRect(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkClearRect* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    reservedmarshal_VkRect2D(vkStream, rootType, (VkRect2D*)(&forMarshaling->rect), ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->baseArrayLayer, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->layerCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkImageBlit(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageBlit* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    reservedmarshal_VkImageSubresourceLayers(vkStream, rootType, (VkImageSubresourceLayers*)(&forMarshaling->srcSubresource), ptr);
+    for (uint32_t i = 0; i < (uint32_t)2; ++i)
+    {
+        reservedmarshal_VkOffset3D(vkStream, rootType, (VkOffset3D*)(forMarshaling->srcOffsets + i), ptr);
+    }
+    reservedmarshal_VkImageSubresourceLayers(vkStream, rootType, (VkImageSubresourceLayers*)(&forMarshaling->dstSubresource), ptr);
+    for (uint32_t i = 0; i < (uint32_t)2; ++i)
+    {
+        reservedmarshal_VkOffset3D(vkStream, rootType, (VkOffset3D*)(forMarshaling->dstOffsets + i), ptr);
+    }
+}
+
+void reservedmarshal_VkImageCopy(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageCopy* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    reservedmarshal_VkImageSubresourceLayers(vkStream, rootType, (VkImageSubresourceLayers*)(&forMarshaling->srcSubresource), ptr);
+    reservedmarshal_VkOffset3D(vkStream, rootType, (VkOffset3D*)(&forMarshaling->srcOffset), ptr);
+    reservedmarshal_VkImageSubresourceLayers(vkStream, rootType, (VkImageSubresourceLayers*)(&forMarshaling->dstSubresource), ptr);
+    reservedmarshal_VkOffset3D(vkStream, rootType, (VkOffset3D*)(&forMarshaling->dstOffset), ptr);
+    reservedmarshal_VkExtent3D(vkStream, rootType, (VkExtent3D*)(&forMarshaling->extent), ptr);
+}
+
+void reservedmarshal_VkImageResolve(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageResolve* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    reservedmarshal_VkImageSubresourceLayers(vkStream, rootType, (VkImageSubresourceLayers*)(&forMarshaling->srcSubresource), ptr);
+    reservedmarshal_VkOffset3D(vkStream, rootType, (VkOffset3D*)(&forMarshaling->srcOffset), ptr);
+    reservedmarshal_VkImageSubresourceLayers(vkStream, rootType, (VkImageSubresourceLayers*)(&forMarshaling->dstSubresource), ptr);
+    reservedmarshal_VkOffset3D(vkStream, rootType, (VkOffset3D*)(&forMarshaling->dstOffset), ptr);
+    reservedmarshal_VkExtent3D(vkStream, rootType, (VkExtent3D*)(&forMarshaling->extent), ptr);
+}
+
+void reservedmarshal_VkRenderPassBeginInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRenderPassBeginInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkRenderPass((*&forMarshaling->renderPass));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkFramebuffer((*&forMarshaling->framebuffer));
+    memcpy(*ptr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *ptr += 1 * 8;
+    reservedmarshal_VkRect2D(vkStream, rootType, (VkRect2D*)(&forMarshaling->renderArea), ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->clearValueCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)forMarshaling->pClearValues;
+    memcpy((*ptr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pClearValues)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->clearValueCount; ++i)
+        {
+            reservedmarshal_VkClearValue(vkStream, rootType, (const VkClearValue*)(forMarshaling->pClearValues + i), ptr);
+        }
+    }
+}
+
+#endif
+#ifdef VK_VERSION_1_1
+void reservedmarshal_VkPhysicalDeviceSubgroupProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSubgroupProperties* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->subgroupSize, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (VkShaderStageFlags*)&forMarshaling->supportedStages, sizeof(VkShaderStageFlags));
+    *ptr += sizeof(VkShaderStageFlags);
+    memcpy(*ptr, (VkSubgroupFeatureFlags*)&forMarshaling->supportedOperations, sizeof(VkSubgroupFeatureFlags));
+    *ptr += sizeof(VkSubgroupFeatureFlags);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->quadOperationsInAllStages, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkBindBufferMemoryInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBindBufferMemoryInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkBuffer((*&forMarshaling->buffer));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkDeviceMemory((*&forMarshaling->memory));
+    memcpy(*ptr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->memoryOffset, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+}
+
+void reservedmarshal_VkBindImageMemoryInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBindImageMemoryInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkImage((*&forMarshaling->image));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkDeviceMemory((*&forMarshaling->memory));
+    memcpy(*ptr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->memoryOffset, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+}
+
+void reservedmarshal_VkPhysicalDevice16BitStorageFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDevice16BitStorageFeatures* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->storageBuffer16BitAccess, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->uniformAndStorageBuffer16BitAccess, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->storagePushConstant16, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->storageInputOutput16, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkMemoryDedicatedRequirements(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMemoryDedicatedRequirements* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->prefersDedicatedAllocation, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->requiresDedicatedAllocation, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkMemoryDedicatedAllocateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMemoryDedicatedAllocateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkImage((*&forMarshaling->image));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkBuffer((*&forMarshaling->buffer));
+    memcpy(*ptr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *ptr += 1 * 8;
+}
+
+void reservedmarshal_VkMemoryAllocateFlagsInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMemoryAllocateFlagsInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkMemoryAllocateFlags*)&forMarshaling->flags, sizeof(VkMemoryAllocateFlags));
+    *ptr += sizeof(VkMemoryAllocateFlags);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->deviceMask, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkDeviceGroupRenderPassBeginInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceGroupRenderPassBeginInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->deviceMask, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->deviceRenderAreaCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)forMarshaling->deviceRenderAreaCount; ++i)
+    {
+        reservedmarshal_VkRect2D(vkStream, rootType, (const VkRect2D*)(forMarshaling->pDeviceRenderAreas + i), ptr);
+    }
+}
+
+void reservedmarshal_VkDeviceGroupCommandBufferBeginInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceGroupCommandBufferBeginInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->deviceMask, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkDeviceGroupSubmitInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceGroupSubmitInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->waitSemaphoreCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (const uint32_t*)forMarshaling->pWaitSemaphoreDeviceIndices, forMarshaling->waitSemaphoreCount * sizeof(const uint32_t));
+    *ptr += forMarshaling->waitSemaphoreCount * sizeof(const uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->commandBufferCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (const uint32_t*)forMarshaling->pCommandBufferDeviceMasks, forMarshaling->commandBufferCount * sizeof(const uint32_t));
+    *ptr += forMarshaling->commandBufferCount * sizeof(const uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->signalSemaphoreCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (const uint32_t*)forMarshaling->pSignalSemaphoreDeviceIndices, forMarshaling->signalSemaphoreCount * sizeof(const uint32_t));
+    *ptr += forMarshaling->signalSemaphoreCount * sizeof(const uint32_t);
+}
+
+void reservedmarshal_VkDeviceGroupBindSparseInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceGroupBindSparseInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->resourceDeviceIndex, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->memoryDeviceIndex, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkBindBufferMemoryDeviceGroupInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBindBufferMemoryDeviceGroupInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->deviceIndexCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (const uint32_t*)forMarshaling->pDeviceIndices, forMarshaling->deviceIndexCount * sizeof(const uint32_t));
+    *ptr += forMarshaling->deviceIndexCount * sizeof(const uint32_t);
+}
+
+void reservedmarshal_VkBindImageMemoryDeviceGroupInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBindImageMemoryDeviceGroupInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->deviceIndexCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (const uint32_t*)forMarshaling->pDeviceIndices, forMarshaling->deviceIndexCount * sizeof(const uint32_t));
+    *ptr += forMarshaling->deviceIndexCount * sizeof(const uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->splitInstanceBindRegionCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)forMarshaling->splitInstanceBindRegionCount; ++i)
+    {
+        reservedmarshal_VkRect2D(vkStream, rootType, (const VkRect2D*)(forMarshaling->pSplitInstanceBindRegions + i), ptr);
+    }
+}
+
+void reservedmarshal_VkPhysicalDeviceGroupProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceGroupProperties* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->physicalDeviceCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (VkPhysicalDevice*)forMarshaling->physicalDevices, VK_MAX_DEVICE_GROUP_SIZE * sizeof(VkPhysicalDevice));
+    *ptr += VK_MAX_DEVICE_GROUP_SIZE * sizeof(VkPhysicalDevice);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->subsetAllocation, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkDeviceGroupDeviceCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceGroupDeviceCreateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->physicalDeviceCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    if (forMarshaling->physicalDeviceCount)
+    {
+        uint8_t* cgen_var_0_ptr = (uint8_t*)(*ptr);
+        if (forMarshaling)
+        {
+            for (uint32_t k = 0; k < forMarshaling->physicalDeviceCount; ++k)
+            {
+                uint64_t tmpval = get_host_u64_VkPhysicalDevice(forMarshaling->pPhysicalDevices[k]);
+                memcpy(cgen_var_0_ptr + k * 8, &tmpval, sizeof(uint64_t));
+            }
+        }
+        *ptr += 8 * forMarshaling->physicalDeviceCount;
+    }
+}
+
+void reservedmarshal_VkBufferMemoryRequirementsInfo2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBufferMemoryRequirementsInfo2* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkBuffer((*&forMarshaling->buffer));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+}
+
+void reservedmarshal_VkImageMemoryRequirementsInfo2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageMemoryRequirementsInfo2* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkImage((*&forMarshaling->image));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+}
+
+void reservedmarshal_VkImageSparseMemoryRequirementsInfo2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageSparseMemoryRequirementsInfo2* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkImage((*&forMarshaling->image));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+}
+
+void reservedmarshal_VkMemoryRequirements2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMemoryRequirements2* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    reservedmarshal_VkMemoryRequirements(vkStream, rootType, (VkMemoryRequirements*)(&forMarshaling->memoryRequirements), ptr);
+}
+
+void reservedmarshal_VkSparseImageMemoryRequirements2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSparseImageMemoryRequirements2* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    reservedmarshal_VkSparseImageMemoryRequirements(vkStream, rootType, (VkSparseImageMemoryRequirements*)(&forMarshaling->memoryRequirements), ptr);
+}
+
+void reservedmarshal_VkPhysicalDeviceFeatures2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFeatures2* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    reservedmarshal_VkPhysicalDeviceFeatures(vkStream, rootType, (VkPhysicalDeviceFeatures*)(&forMarshaling->features), ptr);
+}
+
+void reservedmarshal_VkPhysicalDeviceProperties2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceProperties2* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    reservedmarshal_VkPhysicalDeviceProperties(vkStream, rootType, (VkPhysicalDeviceProperties*)(&forMarshaling->properties), ptr);
+}
+
+void reservedmarshal_VkFormatProperties2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkFormatProperties2* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    reservedmarshal_VkFormatProperties(vkStream, rootType, (VkFormatProperties*)(&forMarshaling->formatProperties), ptr);
+}
+
+void reservedmarshal_VkImageFormatProperties2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageFormatProperties2* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    reservedmarshal_VkImageFormatProperties(vkStream, rootType, (VkImageFormatProperties*)(&forMarshaling->imageFormatProperties), ptr);
+}
+
+void reservedmarshal_VkPhysicalDeviceImageFormatInfo2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceImageFormatInfo2* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkFormat*)&forMarshaling->format, sizeof(VkFormat));
+    *ptr += sizeof(VkFormat);
+    memcpy(*ptr, (VkImageType*)&forMarshaling->type, sizeof(VkImageType));
+    *ptr += sizeof(VkImageType);
+    memcpy(*ptr, (VkImageTiling*)&forMarshaling->tiling, sizeof(VkImageTiling));
+    *ptr += sizeof(VkImageTiling);
+    memcpy(*ptr, (VkImageUsageFlags*)&forMarshaling->usage, sizeof(VkImageUsageFlags));
+    *ptr += sizeof(VkImageUsageFlags);
+    memcpy(*ptr, (VkImageCreateFlags*)&forMarshaling->flags, sizeof(VkImageCreateFlags));
+    *ptr += sizeof(VkImageCreateFlags);
+}
+
+void reservedmarshal_VkQueueFamilyProperties2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkQueueFamilyProperties2* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    reservedmarshal_VkQueueFamilyProperties(vkStream, rootType, (VkQueueFamilyProperties*)(&forMarshaling->queueFamilyProperties), ptr);
+}
+
+void reservedmarshal_VkPhysicalDeviceMemoryProperties2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMemoryProperties2* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    reservedmarshal_VkPhysicalDeviceMemoryProperties(vkStream, rootType, (VkPhysicalDeviceMemoryProperties*)(&forMarshaling->memoryProperties), ptr);
+}
+
+void reservedmarshal_VkSparseImageFormatProperties2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSparseImageFormatProperties2* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    reservedmarshal_VkSparseImageFormatProperties(vkStream, rootType, (VkSparseImageFormatProperties*)(&forMarshaling->properties), ptr);
+}
+
+void reservedmarshal_VkPhysicalDeviceSparseImageFormatInfo2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSparseImageFormatInfo2* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkFormat*)&forMarshaling->format, sizeof(VkFormat));
+    *ptr += sizeof(VkFormat);
+    memcpy(*ptr, (VkImageType*)&forMarshaling->type, sizeof(VkImageType));
+    *ptr += sizeof(VkImageType);
+    memcpy(*ptr, (VkSampleCountFlagBits*)&forMarshaling->samples, sizeof(VkSampleCountFlagBits));
+    *ptr += sizeof(VkSampleCountFlagBits);
+    memcpy(*ptr, (VkImageUsageFlags*)&forMarshaling->usage, sizeof(VkImageUsageFlags));
+    *ptr += sizeof(VkImageUsageFlags);
+    memcpy(*ptr, (VkImageTiling*)&forMarshaling->tiling, sizeof(VkImageTiling));
+    *ptr += sizeof(VkImageTiling);
+}
+
+void reservedmarshal_VkPhysicalDevicePointClippingProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDevicePointClippingProperties* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkPointClippingBehavior*)&forMarshaling->pointClippingBehavior, sizeof(VkPointClippingBehavior));
+    *ptr += sizeof(VkPointClippingBehavior);
+}
+
+void reservedmarshal_VkInputAttachmentAspectReference(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkInputAttachmentAspectReference* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->subpass, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->inputAttachmentIndex, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (VkImageAspectFlags*)&forMarshaling->aspectMask, sizeof(VkImageAspectFlags));
+    *ptr += sizeof(VkImageAspectFlags);
+}
+
+void reservedmarshal_VkRenderPassInputAttachmentAspectCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRenderPassInputAttachmentAspectCreateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->aspectReferenceCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)forMarshaling->aspectReferenceCount; ++i)
+    {
+        reservedmarshal_VkInputAttachmentAspectReference(vkStream, rootType, (const VkInputAttachmentAspectReference*)(forMarshaling->pAspectReferences + i), ptr);
+    }
+}
+
+void reservedmarshal_VkImageViewUsageCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageViewUsageCreateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkImageUsageFlags*)&forMarshaling->usage, sizeof(VkImageUsageFlags));
+    *ptr += sizeof(VkImageUsageFlags);
+}
+
+void reservedmarshal_VkPipelineTessellationDomainOriginStateCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineTessellationDomainOriginStateCreateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkTessellationDomainOrigin*)&forMarshaling->domainOrigin, sizeof(VkTessellationDomainOrigin));
+    *ptr += sizeof(VkTessellationDomainOrigin);
+}
+
+void reservedmarshal_VkRenderPassMultiviewCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRenderPassMultiviewCreateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->subpassCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (const uint32_t*)forMarshaling->pViewMasks, forMarshaling->subpassCount * sizeof(const uint32_t));
+    *ptr += forMarshaling->subpassCount * sizeof(const uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->dependencyCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (const int32_t*)forMarshaling->pViewOffsets, forMarshaling->dependencyCount * sizeof(const int32_t));
+    *ptr += forMarshaling->dependencyCount * sizeof(const int32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->correlationMaskCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (const uint32_t*)forMarshaling->pCorrelationMasks, forMarshaling->correlationMaskCount * sizeof(const uint32_t));
+    *ptr += forMarshaling->correlationMaskCount * sizeof(const uint32_t);
+}
+
+void reservedmarshal_VkPhysicalDeviceMultiviewFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMultiviewFeatures* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->multiview, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->multiviewGeometryShader, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->multiviewTessellationShader, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkPhysicalDeviceMultiviewProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMultiviewProperties* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxMultiviewViewCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxMultiviewInstanceIndex, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkPhysicalDeviceVariablePointersFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVariablePointersFeatures* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->variablePointersStorageBuffer, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->variablePointers, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkPhysicalDeviceProtectedMemoryFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceProtectedMemoryFeatures* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->protectedMemory, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkPhysicalDeviceProtectedMemoryProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceProtectedMemoryProperties* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->protectedNoFault, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkDeviceQueueInfo2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceQueueInfo2* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkDeviceQueueCreateFlags*)&forMarshaling->flags, sizeof(VkDeviceQueueCreateFlags));
+    *ptr += sizeof(VkDeviceQueueCreateFlags);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->queueFamilyIndex, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->queueIndex, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkProtectedSubmitInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkProtectedSubmitInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->protectedSubmit, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkSamplerYcbcrConversionCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSamplerYcbcrConversionCreateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkFormat*)&forMarshaling->format, sizeof(VkFormat));
+    *ptr += sizeof(VkFormat);
+    memcpy(*ptr, (VkSamplerYcbcrModelConversion*)&forMarshaling->ycbcrModel, sizeof(VkSamplerYcbcrModelConversion));
+    *ptr += sizeof(VkSamplerYcbcrModelConversion);
+    memcpy(*ptr, (VkSamplerYcbcrRange*)&forMarshaling->ycbcrRange, sizeof(VkSamplerYcbcrRange));
+    *ptr += sizeof(VkSamplerYcbcrRange);
+    reservedmarshal_VkComponentMapping(vkStream, rootType, (VkComponentMapping*)(&forMarshaling->components), ptr);
+    memcpy(*ptr, (VkChromaLocation*)&forMarshaling->xChromaOffset, sizeof(VkChromaLocation));
+    *ptr += sizeof(VkChromaLocation);
+    memcpy(*ptr, (VkChromaLocation*)&forMarshaling->yChromaOffset, sizeof(VkChromaLocation));
+    *ptr += sizeof(VkChromaLocation);
+    memcpy(*ptr, (VkFilter*)&forMarshaling->chromaFilter, sizeof(VkFilter));
+    *ptr += sizeof(VkFilter);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->forceExplicitReconstruction, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkSamplerYcbcrConversionInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSamplerYcbcrConversionInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkSamplerYcbcrConversion((*&forMarshaling->conversion));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+}
+
+void reservedmarshal_VkBindImagePlaneMemoryInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBindImagePlaneMemoryInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkImageAspectFlagBits*)&forMarshaling->planeAspect, sizeof(VkImageAspectFlagBits));
+    *ptr += sizeof(VkImageAspectFlagBits);
+}
+
+void reservedmarshal_VkImagePlaneMemoryRequirementsInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImagePlaneMemoryRequirementsInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkImageAspectFlagBits*)&forMarshaling->planeAspect, sizeof(VkImageAspectFlagBits));
+    *ptr += sizeof(VkImageAspectFlagBits);
+}
+
+void reservedmarshal_VkPhysicalDeviceSamplerYcbcrConversionFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSamplerYcbcrConversionFeatures* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->samplerYcbcrConversion, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkSamplerYcbcrConversionImageFormatProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSamplerYcbcrConversionImageFormatProperties* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->combinedImageSamplerDescriptorCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkDescriptorUpdateTemplateEntry(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDescriptorUpdateTemplateEntry* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->dstBinding, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->dstArrayElement, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->descriptorCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (VkDescriptorType*)&forMarshaling->descriptorType, sizeof(VkDescriptorType));
+    *ptr += sizeof(VkDescriptorType);
+    uint64_t cgen_var_0 = (uint64_t)forMarshaling->offset;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    uint64_t cgen_var_1 = (uint64_t)forMarshaling->stride;
+    memcpy((*ptr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+}
+
+void reservedmarshal_VkDescriptorUpdateTemplateCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDescriptorUpdateTemplateCreateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkDescriptorUpdateTemplateCreateFlags*)&forMarshaling->flags, sizeof(VkDescriptorUpdateTemplateCreateFlags));
+    *ptr += sizeof(VkDescriptorUpdateTemplateCreateFlags);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->descriptorUpdateEntryCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)forMarshaling->descriptorUpdateEntryCount; ++i)
+    {
+        reservedmarshal_VkDescriptorUpdateTemplateEntry(vkStream, rootType, (const VkDescriptorUpdateTemplateEntry*)(forMarshaling->pDescriptorUpdateEntries + i), ptr);
+    }
+    memcpy(*ptr, (VkDescriptorUpdateTemplateType*)&forMarshaling->templateType, sizeof(VkDescriptorUpdateTemplateType));
+    *ptr += sizeof(VkDescriptorUpdateTemplateType);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDescriptorSetLayout((*&forMarshaling->descriptorSetLayout));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (VkPipelineBindPoint*)&forMarshaling->pipelineBindPoint, sizeof(VkPipelineBindPoint));
+    *ptr += sizeof(VkPipelineBindPoint);
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkPipelineLayout((*&forMarshaling->pipelineLayout));
+    memcpy(*ptr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->set, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkExternalMemoryProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkExternalMemoryProperties* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkExternalMemoryFeatureFlags*)&forMarshaling->externalMemoryFeatures, sizeof(VkExternalMemoryFeatureFlags));
+    *ptr += sizeof(VkExternalMemoryFeatureFlags);
+    memcpy(*ptr, (VkExternalMemoryHandleTypeFlags*)&forMarshaling->exportFromImportedHandleTypes, sizeof(VkExternalMemoryHandleTypeFlags));
+    *ptr += sizeof(VkExternalMemoryHandleTypeFlags);
+    memcpy(*ptr, (VkExternalMemoryHandleTypeFlags*)&forMarshaling->compatibleHandleTypes, sizeof(VkExternalMemoryHandleTypeFlags));
+    *ptr += sizeof(VkExternalMemoryHandleTypeFlags);
+}
+
+void reservedmarshal_VkPhysicalDeviceExternalImageFormatInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceExternalImageFormatInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkExternalMemoryHandleTypeFlagBits*)&forMarshaling->handleType, sizeof(VkExternalMemoryHandleTypeFlagBits));
+    *ptr += sizeof(VkExternalMemoryHandleTypeFlagBits);
+}
+
+void reservedmarshal_VkExternalImageFormatProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkExternalImageFormatProperties* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    reservedmarshal_VkExternalMemoryProperties(vkStream, rootType, (VkExternalMemoryProperties*)(&forMarshaling->externalMemoryProperties), ptr);
+}
+
+void reservedmarshal_VkPhysicalDeviceExternalBufferInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceExternalBufferInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBufferCreateFlags*)&forMarshaling->flags, sizeof(VkBufferCreateFlags));
+    *ptr += sizeof(VkBufferCreateFlags);
+    memcpy(*ptr, (VkBufferUsageFlags*)&forMarshaling->usage, sizeof(VkBufferUsageFlags));
+    *ptr += sizeof(VkBufferUsageFlags);
+    memcpy(*ptr, (VkExternalMemoryHandleTypeFlagBits*)&forMarshaling->handleType, sizeof(VkExternalMemoryHandleTypeFlagBits));
+    *ptr += sizeof(VkExternalMemoryHandleTypeFlagBits);
+}
+
+void reservedmarshal_VkExternalBufferProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkExternalBufferProperties* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    reservedmarshal_VkExternalMemoryProperties(vkStream, rootType, (VkExternalMemoryProperties*)(&forMarshaling->externalMemoryProperties), ptr);
+}
+
+void reservedmarshal_VkPhysicalDeviceIDProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceIDProperties* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint8_t*)forMarshaling->deviceUUID, VK_UUID_SIZE * sizeof(uint8_t));
+    *ptr += VK_UUID_SIZE * sizeof(uint8_t);
+    memcpy(*ptr, (uint8_t*)forMarshaling->driverUUID, VK_UUID_SIZE * sizeof(uint8_t));
+    *ptr += VK_UUID_SIZE * sizeof(uint8_t);
+    memcpy(*ptr, (uint8_t*)forMarshaling->deviceLUID, VK_LUID_SIZE * sizeof(uint8_t));
+    *ptr += VK_LUID_SIZE * sizeof(uint8_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->deviceNodeMask, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->deviceLUIDValid, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkExternalMemoryImageCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkExternalMemoryImageCreateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkExternalMemoryHandleTypeFlags*)&forMarshaling->handleTypes, sizeof(VkExternalMemoryHandleTypeFlags));
+    *ptr += sizeof(VkExternalMemoryHandleTypeFlags);
+}
+
+void reservedmarshal_VkExternalMemoryBufferCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkExternalMemoryBufferCreateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkExternalMemoryHandleTypeFlags*)&forMarshaling->handleTypes, sizeof(VkExternalMemoryHandleTypeFlags));
+    *ptr += sizeof(VkExternalMemoryHandleTypeFlags);
+}
+
+void reservedmarshal_VkExportMemoryAllocateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkExportMemoryAllocateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkExternalMemoryHandleTypeFlags*)&forMarshaling->handleTypes, sizeof(VkExternalMemoryHandleTypeFlags));
+    *ptr += sizeof(VkExternalMemoryHandleTypeFlags);
+}
+
+void reservedmarshal_VkPhysicalDeviceExternalFenceInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceExternalFenceInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkExternalFenceHandleTypeFlagBits*)&forMarshaling->handleType, sizeof(VkExternalFenceHandleTypeFlagBits));
+    *ptr += sizeof(VkExternalFenceHandleTypeFlagBits);
+}
+
+void reservedmarshal_VkExternalFenceProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkExternalFenceProperties* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkExternalFenceHandleTypeFlags*)&forMarshaling->exportFromImportedHandleTypes, sizeof(VkExternalFenceHandleTypeFlags));
+    *ptr += sizeof(VkExternalFenceHandleTypeFlags);
+    memcpy(*ptr, (VkExternalFenceHandleTypeFlags*)&forMarshaling->compatibleHandleTypes, sizeof(VkExternalFenceHandleTypeFlags));
+    *ptr += sizeof(VkExternalFenceHandleTypeFlags);
+    memcpy(*ptr, (VkExternalFenceFeatureFlags*)&forMarshaling->externalFenceFeatures, sizeof(VkExternalFenceFeatureFlags));
+    *ptr += sizeof(VkExternalFenceFeatureFlags);
+}
+
+void reservedmarshal_VkExportFenceCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkExportFenceCreateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkExternalFenceHandleTypeFlags*)&forMarshaling->handleTypes, sizeof(VkExternalFenceHandleTypeFlags));
+    *ptr += sizeof(VkExternalFenceHandleTypeFlags);
+}
+
+void reservedmarshal_VkExportSemaphoreCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkExportSemaphoreCreateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkExternalSemaphoreHandleTypeFlags*)&forMarshaling->handleTypes, sizeof(VkExternalSemaphoreHandleTypeFlags));
+    *ptr += sizeof(VkExternalSemaphoreHandleTypeFlags);
+}
+
+void reservedmarshal_VkPhysicalDeviceExternalSemaphoreInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceExternalSemaphoreInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkExternalSemaphoreHandleTypeFlagBits*)&forMarshaling->handleType, sizeof(VkExternalSemaphoreHandleTypeFlagBits));
+    *ptr += sizeof(VkExternalSemaphoreHandleTypeFlagBits);
+}
+
+void reservedmarshal_VkExternalSemaphoreProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkExternalSemaphoreProperties* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkExternalSemaphoreHandleTypeFlags*)&forMarshaling->exportFromImportedHandleTypes, sizeof(VkExternalSemaphoreHandleTypeFlags));
+    *ptr += sizeof(VkExternalSemaphoreHandleTypeFlags);
+    memcpy(*ptr, (VkExternalSemaphoreHandleTypeFlags*)&forMarshaling->compatibleHandleTypes, sizeof(VkExternalSemaphoreHandleTypeFlags));
+    *ptr += sizeof(VkExternalSemaphoreHandleTypeFlags);
+    memcpy(*ptr, (VkExternalSemaphoreFeatureFlags*)&forMarshaling->externalSemaphoreFeatures, sizeof(VkExternalSemaphoreFeatureFlags));
+    *ptr += sizeof(VkExternalSemaphoreFeatureFlags);
+}
+
+void reservedmarshal_VkPhysicalDeviceMaintenance3Properties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMaintenance3Properties* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxPerSetDescriptors, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->maxMemoryAllocationSize, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+}
+
+void reservedmarshal_VkDescriptorSetLayoutSupport(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDescriptorSetLayoutSupport* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->supported, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkPhysicalDeviceShaderDrawParametersFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderDrawParametersFeatures* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderDrawParameters, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_VERSION_1_2
+void reservedmarshal_VkPhysicalDeviceVulkan11Features(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVulkan11Features* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->storageBuffer16BitAccess, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->uniformAndStorageBuffer16BitAccess, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->storagePushConstant16, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->storageInputOutput16, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->multiview, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->multiviewGeometryShader, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->multiviewTessellationShader, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->variablePointersStorageBuffer, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->variablePointers, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->protectedMemory, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->samplerYcbcrConversion, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderDrawParameters, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkPhysicalDeviceVulkan11Properties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVulkan11Properties* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint8_t*)forMarshaling->deviceUUID, VK_UUID_SIZE * sizeof(uint8_t));
+    *ptr += VK_UUID_SIZE * sizeof(uint8_t);
+    memcpy(*ptr, (uint8_t*)forMarshaling->driverUUID, VK_UUID_SIZE * sizeof(uint8_t));
+    *ptr += VK_UUID_SIZE * sizeof(uint8_t);
+    memcpy(*ptr, (uint8_t*)forMarshaling->deviceLUID, VK_LUID_SIZE * sizeof(uint8_t));
+    *ptr += VK_LUID_SIZE * sizeof(uint8_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->deviceNodeMask, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->deviceLUIDValid, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->subgroupSize, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (VkShaderStageFlags*)&forMarshaling->subgroupSupportedStages, sizeof(VkShaderStageFlags));
+    *ptr += sizeof(VkShaderStageFlags);
+    memcpy(*ptr, (VkSubgroupFeatureFlags*)&forMarshaling->subgroupSupportedOperations, sizeof(VkSubgroupFeatureFlags));
+    *ptr += sizeof(VkSubgroupFeatureFlags);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->subgroupQuadOperationsInAllStages, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkPointClippingBehavior*)&forMarshaling->pointClippingBehavior, sizeof(VkPointClippingBehavior));
+    *ptr += sizeof(VkPointClippingBehavior);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxMultiviewViewCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxMultiviewInstanceIndex, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->protectedNoFault, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxPerSetDescriptors, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->maxMemoryAllocationSize, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+}
+
+void reservedmarshal_VkPhysicalDeviceVulkan12Features(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVulkan12Features* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->samplerMirrorClampToEdge, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->drawIndirectCount, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->storageBuffer8BitAccess, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->uniformAndStorageBuffer8BitAccess, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->storagePushConstant8, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderBufferInt64Atomics, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderSharedInt64Atomics, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderFloat16, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderInt8, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->descriptorIndexing, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderInputAttachmentArrayDynamicIndexing, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderUniformTexelBufferArrayDynamicIndexing, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderStorageTexelBufferArrayDynamicIndexing, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderUniformBufferArrayNonUniformIndexing, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderSampledImageArrayNonUniformIndexing, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderStorageBufferArrayNonUniformIndexing, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderStorageImageArrayNonUniformIndexing, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderInputAttachmentArrayNonUniformIndexing, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderUniformTexelBufferArrayNonUniformIndexing, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderStorageTexelBufferArrayNonUniformIndexing, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->descriptorBindingUniformBufferUpdateAfterBind, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->descriptorBindingSampledImageUpdateAfterBind, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->descriptorBindingStorageImageUpdateAfterBind, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->descriptorBindingStorageBufferUpdateAfterBind, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->descriptorBindingUniformTexelBufferUpdateAfterBind, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->descriptorBindingStorageTexelBufferUpdateAfterBind, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->descriptorBindingUpdateUnusedWhilePending, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->descriptorBindingPartiallyBound, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->descriptorBindingVariableDescriptorCount, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->runtimeDescriptorArray, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->samplerFilterMinmax, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->scalarBlockLayout, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->imagelessFramebuffer, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->uniformBufferStandardLayout, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderSubgroupExtendedTypes, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->separateDepthStencilLayouts, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->hostQueryReset, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->timelineSemaphore, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->bufferDeviceAddress, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->bufferDeviceAddressCaptureReplay, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->bufferDeviceAddressMultiDevice, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->vulkanMemoryModel, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->vulkanMemoryModelDeviceScope, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->vulkanMemoryModelAvailabilityVisibilityChains, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderOutputViewportIndex, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderOutputLayer, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->subgroupBroadcastDynamicId, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkConformanceVersion(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkConformanceVersion* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (uint8_t*)&forMarshaling->major, sizeof(uint8_t));
+    *ptr += sizeof(uint8_t);
+    memcpy(*ptr, (uint8_t*)&forMarshaling->minor, sizeof(uint8_t));
+    *ptr += sizeof(uint8_t);
+    memcpy(*ptr, (uint8_t*)&forMarshaling->subminor, sizeof(uint8_t));
+    *ptr += sizeof(uint8_t);
+    memcpy(*ptr, (uint8_t*)&forMarshaling->patch, sizeof(uint8_t));
+    *ptr += sizeof(uint8_t);
+}
+
+void reservedmarshal_VkPhysicalDeviceVulkan12Properties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVulkan12Properties* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkDriverId*)&forMarshaling->driverID, sizeof(VkDriverId));
+    *ptr += sizeof(VkDriverId);
+    memcpy(*ptr, (char*)forMarshaling->driverName, VK_MAX_DRIVER_NAME_SIZE * sizeof(char));
+    *ptr += VK_MAX_DRIVER_NAME_SIZE * sizeof(char);
+    memcpy(*ptr, (char*)forMarshaling->driverInfo, VK_MAX_DRIVER_INFO_SIZE * sizeof(char));
+    *ptr += VK_MAX_DRIVER_INFO_SIZE * sizeof(char);
+    reservedmarshal_VkConformanceVersion(vkStream, rootType, (VkConformanceVersion*)(&forMarshaling->conformanceVersion), ptr);
+    memcpy(*ptr, (VkShaderFloatControlsIndependence*)&forMarshaling->denormBehaviorIndependence, sizeof(VkShaderFloatControlsIndependence));
+    *ptr += sizeof(VkShaderFloatControlsIndependence);
+    memcpy(*ptr, (VkShaderFloatControlsIndependence*)&forMarshaling->roundingModeIndependence, sizeof(VkShaderFloatControlsIndependence));
+    *ptr += sizeof(VkShaderFloatControlsIndependence);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderSignedZeroInfNanPreserveFloat16, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderSignedZeroInfNanPreserveFloat32, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderSignedZeroInfNanPreserveFloat64, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderDenormPreserveFloat16, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderDenormPreserveFloat32, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderDenormPreserveFloat64, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderDenormFlushToZeroFloat16, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderDenormFlushToZeroFloat32, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderDenormFlushToZeroFloat64, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderRoundingModeRTEFloat16, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderRoundingModeRTEFloat32, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderRoundingModeRTEFloat64, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderRoundingModeRTZFloat16, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderRoundingModeRTZFloat32, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderRoundingModeRTZFloat64, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxUpdateAfterBindDescriptorsInAllPools, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderUniformBufferArrayNonUniformIndexingNative, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderSampledImageArrayNonUniformIndexingNative, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderStorageBufferArrayNonUniformIndexingNative, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderStorageImageArrayNonUniformIndexingNative, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderInputAttachmentArrayNonUniformIndexingNative, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->robustBufferAccessUpdateAfterBind, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->quadDivergentImplicitLod, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxPerStageDescriptorUpdateAfterBindSamplers, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxPerStageDescriptorUpdateAfterBindUniformBuffers, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxPerStageDescriptorUpdateAfterBindStorageBuffers, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxPerStageDescriptorUpdateAfterBindSampledImages, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxPerStageDescriptorUpdateAfterBindStorageImages, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxPerStageDescriptorUpdateAfterBindInputAttachments, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxPerStageUpdateAfterBindResources, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxDescriptorSetUpdateAfterBindSamplers, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxDescriptorSetUpdateAfterBindUniformBuffers, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxDescriptorSetUpdateAfterBindUniformBuffersDynamic, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxDescriptorSetUpdateAfterBindStorageBuffers, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxDescriptorSetUpdateAfterBindStorageBuffersDynamic, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxDescriptorSetUpdateAfterBindSampledImages, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxDescriptorSetUpdateAfterBindStorageImages, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxDescriptorSetUpdateAfterBindInputAttachments, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (VkResolveModeFlags*)&forMarshaling->supportedDepthResolveModes, sizeof(VkResolveModeFlags));
+    *ptr += sizeof(VkResolveModeFlags);
+    memcpy(*ptr, (VkResolveModeFlags*)&forMarshaling->supportedStencilResolveModes, sizeof(VkResolveModeFlags));
+    *ptr += sizeof(VkResolveModeFlags);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->independentResolveNone, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->independentResolve, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->filterMinmaxSingleComponentFormats, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->filterMinmaxImageComponentMapping, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (uint64_t*)&forMarshaling->maxTimelineSemaphoreValueDifference, sizeof(uint64_t));
+    *ptr += sizeof(uint64_t);
+    memcpy(*ptr, (VkSampleCountFlags*)&forMarshaling->framebufferIntegerColorSampleCounts, sizeof(VkSampleCountFlags));
+    *ptr += sizeof(VkSampleCountFlags);
+}
+
+void reservedmarshal_VkImageFormatListCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageFormatListCreateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->viewFormatCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (const VkFormat*)forMarshaling->pViewFormats, forMarshaling->viewFormatCount * sizeof(const VkFormat));
+    *ptr += forMarshaling->viewFormatCount * sizeof(const VkFormat);
+}
+
+void reservedmarshal_VkAttachmentDescription2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAttachmentDescription2* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkAttachmentDescriptionFlags*)&forMarshaling->flags, sizeof(VkAttachmentDescriptionFlags));
+    *ptr += sizeof(VkAttachmentDescriptionFlags);
+    memcpy(*ptr, (VkFormat*)&forMarshaling->format, sizeof(VkFormat));
+    *ptr += sizeof(VkFormat);
+    memcpy(*ptr, (VkSampleCountFlagBits*)&forMarshaling->samples, sizeof(VkSampleCountFlagBits));
+    *ptr += sizeof(VkSampleCountFlagBits);
+    memcpy(*ptr, (VkAttachmentLoadOp*)&forMarshaling->loadOp, sizeof(VkAttachmentLoadOp));
+    *ptr += sizeof(VkAttachmentLoadOp);
+    memcpy(*ptr, (VkAttachmentStoreOp*)&forMarshaling->storeOp, sizeof(VkAttachmentStoreOp));
+    *ptr += sizeof(VkAttachmentStoreOp);
+    memcpy(*ptr, (VkAttachmentLoadOp*)&forMarshaling->stencilLoadOp, sizeof(VkAttachmentLoadOp));
+    *ptr += sizeof(VkAttachmentLoadOp);
+    memcpy(*ptr, (VkAttachmentStoreOp*)&forMarshaling->stencilStoreOp, sizeof(VkAttachmentStoreOp));
+    *ptr += sizeof(VkAttachmentStoreOp);
+    memcpy(*ptr, (VkImageLayout*)&forMarshaling->initialLayout, sizeof(VkImageLayout));
+    *ptr += sizeof(VkImageLayout);
+    memcpy(*ptr, (VkImageLayout*)&forMarshaling->finalLayout, sizeof(VkImageLayout));
+    *ptr += sizeof(VkImageLayout);
+}
+
+void reservedmarshal_VkAttachmentReference2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAttachmentReference2* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->attachment, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (VkImageLayout*)&forMarshaling->layout, sizeof(VkImageLayout));
+    *ptr += sizeof(VkImageLayout);
+    memcpy(*ptr, (VkImageAspectFlags*)&forMarshaling->aspectMask, sizeof(VkImageAspectFlags));
+    *ptr += sizeof(VkImageAspectFlags);
+}
+
+void reservedmarshal_VkSubpassDescription2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSubpassDescription2* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkSubpassDescriptionFlags*)&forMarshaling->flags, sizeof(VkSubpassDescriptionFlags));
+    *ptr += sizeof(VkSubpassDescriptionFlags);
+    memcpy(*ptr, (VkPipelineBindPoint*)&forMarshaling->pipelineBindPoint, sizeof(VkPipelineBindPoint));
+    *ptr += sizeof(VkPipelineBindPoint);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->viewMask, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->inputAttachmentCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)forMarshaling->inputAttachmentCount; ++i)
+    {
+        reservedmarshal_VkAttachmentReference2(vkStream, rootType, (const VkAttachmentReference2*)(forMarshaling->pInputAttachments + i), ptr);
+    }
+    memcpy(*ptr, (uint32_t*)&forMarshaling->colorAttachmentCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)forMarshaling->colorAttachmentCount; ++i)
+    {
+        reservedmarshal_VkAttachmentReference2(vkStream, rootType, (const VkAttachmentReference2*)(forMarshaling->pColorAttachments + i), ptr);
+    }
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pResolveAttachments;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pResolveAttachments)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->colorAttachmentCount; ++i)
+        {
+            reservedmarshal_VkAttachmentReference2(vkStream, rootType, (const VkAttachmentReference2*)(forMarshaling->pResolveAttachments + i), ptr);
+        }
+    }
+    // WARNING PTR CHECK
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)forMarshaling->pDepthStencilAttachment;
+    memcpy((*ptr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pDepthStencilAttachment)
+    {
+        reservedmarshal_VkAttachmentReference2(vkStream, rootType, (const VkAttachmentReference2*)(forMarshaling->pDepthStencilAttachment), ptr);
+    }
+    memcpy(*ptr, (uint32_t*)&forMarshaling->preserveAttachmentCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (const uint32_t*)forMarshaling->pPreserveAttachments, forMarshaling->preserveAttachmentCount * sizeof(const uint32_t));
+    *ptr += forMarshaling->preserveAttachmentCount * sizeof(const uint32_t);
+}
+
+void reservedmarshal_VkSubpassDependency2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSubpassDependency2* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->srcSubpass, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->dstSubpass, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (VkPipelineStageFlags*)&forMarshaling->srcStageMask, sizeof(VkPipelineStageFlags));
+    *ptr += sizeof(VkPipelineStageFlags);
+    memcpy(*ptr, (VkPipelineStageFlags*)&forMarshaling->dstStageMask, sizeof(VkPipelineStageFlags));
+    *ptr += sizeof(VkPipelineStageFlags);
+    memcpy(*ptr, (VkAccessFlags*)&forMarshaling->srcAccessMask, sizeof(VkAccessFlags));
+    *ptr += sizeof(VkAccessFlags);
+    memcpy(*ptr, (VkAccessFlags*)&forMarshaling->dstAccessMask, sizeof(VkAccessFlags));
+    *ptr += sizeof(VkAccessFlags);
+    memcpy(*ptr, (VkDependencyFlags*)&forMarshaling->dependencyFlags, sizeof(VkDependencyFlags));
+    *ptr += sizeof(VkDependencyFlags);
+    memcpy(*ptr, (int32_t*)&forMarshaling->viewOffset, sizeof(int32_t));
+    *ptr += sizeof(int32_t);
+}
+
+void reservedmarshal_VkRenderPassCreateInfo2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRenderPassCreateInfo2* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkRenderPassCreateFlags*)&forMarshaling->flags, sizeof(VkRenderPassCreateFlags));
+    *ptr += sizeof(VkRenderPassCreateFlags);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->attachmentCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)forMarshaling->attachmentCount; ++i)
+    {
+        reservedmarshal_VkAttachmentDescription2(vkStream, rootType, (const VkAttachmentDescription2*)(forMarshaling->pAttachments + i), ptr);
+    }
+    memcpy(*ptr, (uint32_t*)&forMarshaling->subpassCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)forMarshaling->subpassCount; ++i)
+    {
+        reservedmarshal_VkSubpassDescription2(vkStream, rootType, (const VkSubpassDescription2*)(forMarshaling->pSubpasses + i), ptr);
+    }
+    memcpy(*ptr, (uint32_t*)&forMarshaling->dependencyCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)forMarshaling->dependencyCount; ++i)
+    {
+        reservedmarshal_VkSubpassDependency2(vkStream, rootType, (const VkSubpassDependency2*)(forMarshaling->pDependencies + i), ptr);
+    }
+    memcpy(*ptr, (uint32_t*)&forMarshaling->correlatedViewMaskCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (const uint32_t*)forMarshaling->pCorrelatedViewMasks, forMarshaling->correlatedViewMaskCount * sizeof(const uint32_t));
+    *ptr += forMarshaling->correlatedViewMaskCount * sizeof(const uint32_t);
+}
+
+void reservedmarshal_VkSubpassBeginInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSubpassBeginInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkSubpassContents*)&forMarshaling->contents, sizeof(VkSubpassContents));
+    *ptr += sizeof(VkSubpassContents);
+}
+
+void reservedmarshal_VkSubpassEndInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSubpassEndInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+}
+
+void reservedmarshal_VkPhysicalDevice8BitStorageFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDevice8BitStorageFeatures* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->storageBuffer8BitAccess, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->uniformAndStorageBuffer8BitAccess, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->storagePushConstant8, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkPhysicalDeviceDriverProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDriverProperties* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkDriverId*)&forMarshaling->driverID, sizeof(VkDriverId));
+    *ptr += sizeof(VkDriverId);
+    memcpy(*ptr, (char*)forMarshaling->driverName, VK_MAX_DRIVER_NAME_SIZE * sizeof(char));
+    *ptr += VK_MAX_DRIVER_NAME_SIZE * sizeof(char);
+    memcpy(*ptr, (char*)forMarshaling->driverInfo, VK_MAX_DRIVER_INFO_SIZE * sizeof(char));
+    *ptr += VK_MAX_DRIVER_INFO_SIZE * sizeof(char);
+    reservedmarshal_VkConformanceVersion(vkStream, rootType, (VkConformanceVersion*)(&forMarshaling->conformanceVersion), ptr);
+}
+
+void reservedmarshal_VkPhysicalDeviceShaderAtomicInt64Features(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderAtomicInt64Features* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderBufferInt64Atomics, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderSharedInt64Atomics, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkPhysicalDeviceShaderFloat16Int8Features(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderFloat16Int8Features* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderFloat16, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderInt8, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkPhysicalDeviceFloatControlsProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFloatControlsProperties* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkShaderFloatControlsIndependence*)&forMarshaling->denormBehaviorIndependence, sizeof(VkShaderFloatControlsIndependence));
+    *ptr += sizeof(VkShaderFloatControlsIndependence);
+    memcpy(*ptr, (VkShaderFloatControlsIndependence*)&forMarshaling->roundingModeIndependence, sizeof(VkShaderFloatControlsIndependence));
+    *ptr += sizeof(VkShaderFloatControlsIndependence);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderSignedZeroInfNanPreserveFloat16, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderSignedZeroInfNanPreserveFloat32, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderSignedZeroInfNanPreserveFloat64, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderDenormPreserveFloat16, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderDenormPreserveFloat32, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderDenormPreserveFloat64, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderDenormFlushToZeroFloat16, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderDenormFlushToZeroFloat32, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderDenormFlushToZeroFloat64, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderRoundingModeRTEFloat16, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderRoundingModeRTEFloat32, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderRoundingModeRTEFloat64, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderRoundingModeRTZFloat16, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderRoundingModeRTZFloat32, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderRoundingModeRTZFloat64, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkDescriptorSetLayoutBindingFlagsCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDescriptorSetLayoutBindingFlagsCreateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->bindingCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pBindingFlags;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pBindingFlags)
+    {
+        memcpy(*ptr, (const VkDescriptorBindingFlags*)forMarshaling->pBindingFlags, forMarshaling->bindingCount * sizeof(const VkDescriptorBindingFlags));
+        *ptr += forMarshaling->bindingCount * sizeof(const VkDescriptorBindingFlags);
+    }
+}
+
+void reservedmarshal_VkPhysicalDeviceDescriptorIndexingFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDescriptorIndexingFeatures* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderInputAttachmentArrayDynamicIndexing, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderUniformTexelBufferArrayDynamicIndexing, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderStorageTexelBufferArrayDynamicIndexing, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderUniformBufferArrayNonUniformIndexing, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderSampledImageArrayNonUniformIndexing, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderStorageBufferArrayNonUniformIndexing, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderStorageImageArrayNonUniformIndexing, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderInputAttachmentArrayNonUniformIndexing, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderUniformTexelBufferArrayNonUniformIndexing, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderStorageTexelBufferArrayNonUniformIndexing, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->descriptorBindingUniformBufferUpdateAfterBind, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->descriptorBindingSampledImageUpdateAfterBind, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->descriptorBindingStorageImageUpdateAfterBind, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->descriptorBindingStorageBufferUpdateAfterBind, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->descriptorBindingUniformTexelBufferUpdateAfterBind, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->descriptorBindingStorageTexelBufferUpdateAfterBind, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->descriptorBindingUpdateUnusedWhilePending, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->descriptorBindingPartiallyBound, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->descriptorBindingVariableDescriptorCount, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->runtimeDescriptorArray, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkPhysicalDeviceDescriptorIndexingProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDescriptorIndexingProperties* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxUpdateAfterBindDescriptorsInAllPools, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderUniformBufferArrayNonUniformIndexingNative, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderSampledImageArrayNonUniformIndexingNative, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderStorageBufferArrayNonUniformIndexingNative, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderStorageImageArrayNonUniformIndexingNative, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderInputAttachmentArrayNonUniformIndexingNative, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->robustBufferAccessUpdateAfterBind, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->quadDivergentImplicitLod, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxPerStageDescriptorUpdateAfterBindSamplers, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxPerStageDescriptorUpdateAfterBindUniformBuffers, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxPerStageDescriptorUpdateAfterBindStorageBuffers, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxPerStageDescriptorUpdateAfterBindSampledImages, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxPerStageDescriptorUpdateAfterBindStorageImages, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxPerStageDescriptorUpdateAfterBindInputAttachments, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxPerStageUpdateAfterBindResources, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxDescriptorSetUpdateAfterBindSamplers, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxDescriptorSetUpdateAfterBindUniformBuffers, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxDescriptorSetUpdateAfterBindUniformBuffersDynamic, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxDescriptorSetUpdateAfterBindStorageBuffers, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxDescriptorSetUpdateAfterBindStorageBuffersDynamic, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxDescriptorSetUpdateAfterBindSampledImages, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxDescriptorSetUpdateAfterBindStorageImages, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxDescriptorSetUpdateAfterBindInputAttachments, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkDescriptorSetVariableDescriptorCountAllocateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDescriptorSetVariableDescriptorCountAllocateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->descriptorSetCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (const uint32_t*)forMarshaling->pDescriptorCounts, forMarshaling->descriptorSetCount * sizeof(const uint32_t));
+    *ptr += forMarshaling->descriptorSetCount * sizeof(const uint32_t);
+}
+
+void reservedmarshal_VkDescriptorSetVariableDescriptorCountLayoutSupport(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDescriptorSetVariableDescriptorCountLayoutSupport* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxVariableDescriptorCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkSubpassDescriptionDepthStencilResolve(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSubpassDescriptionDepthStencilResolve* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkResolveModeFlagBits*)&forMarshaling->depthResolveMode, sizeof(VkResolveModeFlagBits));
+    *ptr += sizeof(VkResolveModeFlagBits);
+    memcpy(*ptr, (VkResolveModeFlagBits*)&forMarshaling->stencilResolveMode, sizeof(VkResolveModeFlagBits));
+    *ptr += sizeof(VkResolveModeFlagBits);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pDepthStencilResolveAttachment;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pDepthStencilResolveAttachment)
+    {
+        reservedmarshal_VkAttachmentReference2(vkStream, rootType, (const VkAttachmentReference2*)(forMarshaling->pDepthStencilResolveAttachment), ptr);
+    }
+}
+
+void reservedmarshal_VkPhysicalDeviceDepthStencilResolveProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDepthStencilResolveProperties* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkResolveModeFlags*)&forMarshaling->supportedDepthResolveModes, sizeof(VkResolveModeFlags));
+    *ptr += sizeof(VkResolveModeFlags);
+    memcpy(*ptr, (VkResolveModeFlags*)&forMarshaling->supportedStencilResolveModes, sizeof(VkResolveModeFlags));
+    *ptr += sizeof(VkResolveModeFlags);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->independentResolveNone, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->independentResolve, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkPhysicalDeviceScalarBlockLayoutFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceScalarBlockLayoutFeatures* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->scalarBlockLayout, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkImageStencilUsageCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageStencilUsageCreateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkImageUsageFlags*)&forMarshaling->stencilUsage, sizeof(VkImageUsageFlags));
+    *ptr += sizeof(VkImageUsageFlags);
+}
+
+void reservedmarshal_VkSamplerReductionModeCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSamplerReductionModeCreateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkSamplerReductionMode*)&forMarshaling->reductionMode, sizeof(VkSamplerReductionMode));
+    *ptr += sizeof(VkSamplerReductionMode);
+}
+
+void reservedmarshal_VkPhysicalDeviceSamplerFilterMinmaxProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSamplerFilterMinmaxProperties* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->filterMinmaxSingleComponentFormats, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->filterMinmaxImageComponentMapping, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkPhysicalDeviceVulkanMemoryModelFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVulkanMemoryModelFeatures* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->vulkanMemoryModel, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->vulkanMemoryModelDeviceScope, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->vulkanMemoryModelAvailabilityVisibilityChains, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkPhysicalDeviceImagelessFramebufferFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceImagelessFramebufferFeatures* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->imagelessFramebuffer, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkFramebufferAttachmentImageInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkFramebufferAttachmentImageInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkImageCreateFlags*)&forMarshaling->flags, sizeof(VkImageCreateFlags));
+    *ptr += sizeof(VkImageCreateFlags);
+    memcpy(*ptr, (VkImageUsageFlags*)&forMarshaling->usage, sizeof(VkImageUsageFlags));
+    *ptr += sizeof(VkImageUsageFlags);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->width, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->height, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->layerCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->viewFormatCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (const VkFormat*)forMarshaling->pViewFormats, forMarshaling->viewFormatCount * sizeof(const VkFormat));
+    *ptr += forMarshaling->viewFormatCount * sizeof(const VkFormat);
+}
+
+void reservedmarshal_VkFramebufferAttachmentsCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkFramebufferAttachmentsCreateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->attachmentImageInfoCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)forMarshaling->attachmentImageInfoCount; ++i)
+    {
+        reservedmarshal_VkFramebufferAttachmentImageInfo(vkStream, rootType, (const VkFramebufferAttachmentImageInfo*)(forMarshaling->pAttachmentImageInfos + i), ptr);
+    }
+}
+
+void reservedmarshal_VkRenderPassAttachmentBeginInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRenderPassAttachmentBeginInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->attachmentCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    if (forMarshaling->attachmentCount)
+    {
+        uint8_t* cgen_var_0_ptr = (uint8_t*)(*ptr);
+        if (forMarshaling)
+        {
+            for (uint32_t k = 0; k < forMarshaling->attachmentCount; ++k)
+            {
+                uint64_t tmpval = get_host_u64_VkImageView(forMarshaling->pAttachments[k]);
+                memcpy(cgen_var_0_ptr + k * 8, &tmpval, sizeof(uint64_t));
+            }
+        }
+        *ptr += 8 * forMarshaling->attachmentCount;
+    }
+}
+
+void reservedmarshal_VkPhysicalDeviceUniformBufferStandardLayoutFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceUniformBufferStandardLayoutFeatures* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->uniformBufferStandardLayout, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderSubgroupExtendedTypes, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->separateDepthStencilLayouts, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkAttachmentReferenceStencilLayout(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAttachmentReferenceStencilLayout* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkImageLayout*)&forMarshaling->stencilLayout, sizeof(VkImageLayout));
+    *ptr += sizeof(VkImageLayout);
+}
+
+void reservedmarshal_VkAttachmentDescriptionStencilLayout(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAttachmentDescriptionStencilLayout* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkImageLayout*)&forMarshaling->stencilInitialLayout, sizeof(VkImageLayout));
+    *ptr += sizeof(VkImageLayout);
+    memcpy(*ptr, (VkImageLayout*)&forMarshaling->stencilFinalLayout, sizeof(VkImageLayout));
+    *ptr += sizeof(VkImageLayout);
+}
+
+void reservedmarshal_VkPhysicalDeviceHostQueryResetFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceHostQueryResetFeatures* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->hostQueryReset, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkPhysicalDeviceTimelineSemaphoreFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTimelineSemaphoreFeatures* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->timelineSemaphore, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkPhysicalDeviceTimelineSemaphoreProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTimelineSemaphoreProperties* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint64_t*)&forMarshaling->maxTimelineSemaphoreValueDifference, sizeof(uint64_t));
+    *ptr += sizeof(uint64_t);
+}
+
+void reservedmarshal_VkSemaphoreTypeCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSemaphoreTypeCreateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkSemaphoreType*)&forMarshaling->semaphoreType, sizeof(VkSemaphoreType));
+    *ptr += sizeof(VkSemaphoreType);
+    memcpy(*ptr, (uint64_t*)&forMarshaling->initialValue, sizeof(uint64_t));
+    *ptr += sizeof(uint64_t);
+}
+
+void reservedmarshal_VkTimelineSemaphoreSubmitInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkTimelineSemaphoreSubmitInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->waitSemaphoreValueCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pWaitSemaphoreValues;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pWaitSemaphoreValues)
+    {
+        memcpy(*ptr, (const uint64_t*)forMarshaling->pWaitSemaphoreValues, forMarshaling->waitSemaphoreValueCount * sizeof(const uint64_t));
+        *ptr += forMarshaling->waitSemaphoreValueCount * sizeof(const uint64_t);
+    }
+    memcpy(*ptr, (uint32_t*)&forMarshaling->signalSemaphoreValueCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)forMarshaling->pSignalSemaphoreValues;
+    memcpy((*ptr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pSignalSemaphoreValues)
+    {
+        memcpy(*ptr, (const uint64_t*)forMarshaling->pSignalSemaphoreValues, forMarshaling->signalSemaphoreValueCount * sizeof(const uint64_t));
+        *ptr += forMarshaling->signalSemaphoreValueCount * sizeof(const uint64_t);
+    }
+}
+
+void reservedmarshal_VkSemaphoreWaitInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSemaphoreWaitInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkSemaphoreWaitFlags*)&forMarshaling->flags, sizeof(VkSemaphoreWaitFlags));
+    *ptr += sizeof(VkSemaphoreWaitFlags);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->semaphoreCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    if (forMarshaling->semaphoreCount)
+    {
+        uint8_t* cgen_var_0_ptr = (uint8_t*)(*ptr);
+        if (forMarshaling)
+        {
+            for (uint32_t k = 0; k < forMarshaling->semaphoreCount; ++k)
+            {
+                uint64_t tmpval = get_host_u64_VkSemaphore(forMarshaling->pSemaphores[k]);
+                memcpy(cgen_var_0_ptr + k * 8, &tmpval, sizeof(uint64_t));
+            }
+        }
+        *ptr += 8 * forMarshaling->semaphoreCount;
+    }
+    memcpy(*ptr, (const uint64_t*)forMarshaling->pValues, forMarshaling->semaphoreCount * sizeof(const uint64_t));
+    *ptr += forMarshaling->semaphoreCount * sizeof(const uint64_t);
+}
+
+void reservedmarshal_VkSemaphoreSignalInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSemaphoreSignalInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkSemaphore((*&forMarshaling->semaphore));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (uint64_t*)&forMarshaling->value, sizeof(uint64_t));
+    *ptr += sizeof(uint64_t);
+}
+
+void reservedmarshal_VkPhysicalDeviceBufferDeviceAddressFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceBufferDeviceAddressFeatures* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->bufferDeviceAddress, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->bufferDeviceAddressCaptureReplay, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->bufferDeviceAddressMultiDevice, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkBufferDeviceAddressInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBufferDeviceAddressInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkBuffer((*&forMarshaling->buffer));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+}
+
+void reservedmarshal_VkBufferOpaqueCaptureAddressCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBufferOpaqueCaptureAddressCreateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint64_t*)&forMarshaling->opaqueCaptureAddress, sizeof(uint64_t));
+    *ptr += sizeof(uint64_t);
+}
+
+void reservedmarshal_VkMemoryOpaqueCaptureAddressAllocateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMemoryOpaqueCaptureAddressAllocateInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint64_t*)&forMarshaling->opaqueCaptureAddress, sizeof(uint64_t));
+    *ptr += sizeof(uint64_t);
+}
+
+void reservedmarshal_VkDeviceMemoryOpaqueCaptureAddressInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceMemoryOpaqueCaptureAddressInfo* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDeviceMemory((*&forMarshaling->memory));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+}
+
+#endif
+#ifdef VK_KHR_surface
+void reservedmarshal_VkSurfaceCapabilitiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSurfaceCapabilitiesKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->minImageCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxImageCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    reservedmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->currentExtent), ptr);
+    reservedmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->minImageExtent), ptr);
+    reservedmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->maxImageExtent), ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxImageArrayLayers, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (VkSurfaceTransformFlagsKHR*)&forMarshaling->supportedTransforms, sizeof(VkSurfaceTransformFlagsKHR));
+    *ptr += sizeof(VkSurfaceTransformFlagsKHR);
+    memcpy(*ptr, (VkSurfaceTransformFlagBitsKHR*)&forMarshaling->currentTransform, sizeof(VkSurfaceTransformFlagBitsKHR));
+    *ptr += sizeof(VkSurfaceTransformFlagBitsKHR);
+    memcpy(*ptr, (VkCompositeAlphaFlagsKHR*)&forMarshaling->supportedCompositeAlpha, sizeof(VkCompositeAlphaFlagsKHR));
+    *ptr += sizeof(VkCompositeAlphaFlagsKHR);
+    memcpy(*ptr, (VkImageUsageFlags*)&forMarshaling->supportedUsageFlags, sizeof(VkImageUsageFlags));
+    *ptr += sizeof(VkImageUsageFlags);
+}
+
+void reservedmarshal_VkSurfaceFormatKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSurfaceFormatKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkFormat*)&forMarshaling->format, sizeof(VkFormat));
+    *ptr += sizeof(VkFormat);
+    memcpy(*ptr, (VkColorSpaceKHR*)&forMarshaling->colorSpace, sizeof(VkColorSpaceKHR));
+    *ptr += sizeof(VkColorSpaceKHR);
+}
+
+#endif
+#ifdef VK_KHR_swapchain
+void reservedmarshal_VkSwapchainCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSwapchainCreateInfoKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkSwapchainCreateFlagsKHR*)&forMarshaling->flags, sizeof(VkSwapchainCreateFlagsKHR));
+    *ptr += sizeof(VkSwapchainCreateFlagsKHR);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkSurfaceKHR((*&forMarshaling->surface));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->minImageCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (VkFormat*)&forMarshaling->imageFormat, sizeof(VkFormat));
+    *ptr += sizeof(VkFormat);
+    memcpy(*ptr, (VkColorSpaceKHR*)&forMarshaling->imageColorSpace, sizeof(VkColorSpaceKHR));
+    *ptr += sizeof(VkColorSpaceKHR);
+    reservedmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->imageExtent), ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->imageArrayLayers, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (VkImageUsageFlags*)&forMarshaling->imageUsage, sizeof(VkImageUsageFlags));
+    *ptr += sizeof(VkImageUsageFlags);
+    memcpy(*ptr, (VkSharingMode*)&forMarshaling->imageSharingMode, sizeof(VkSharingMode));
+    *ptr += sizeof(VkSharingMode);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->queueFamilyIndexCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)forMarshaling->pQueueFamilyIndices;
+    memcpy((*ptr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pQueueFamilyIndices)
+    {
+        memcpy(*ptr, (const uint32_t*)forMarshaling->pQueueFamilyIndices, forMarshaling->queueFamilyIndexCount * sizeof(const uint32_t));
+        *ptr += forMarshaling->queueFamilyIndexCount * sizeof(const uint32_t);
+    }
+    memcpy(*ptr, (VkSurfaceTransformFlagBitsKHR*)&forMarshaling->preTransform, sizeof(VkSurfaceTransformFlagBitsKHR));
+    *ptr += sizeof(VkSurfaceTransformFlagBitsKHR);
+    memcpy(*ptr, (VkCompositeAlphaFlagBitsKHR*)&forMarshaling->compositeAlpha, sizeof(VkCompositeAlphaFlagBitsKHR));
+    *ptr += sizeof(VkCompositeAlphaFlagBitsKHR);
+    memcpy(*ptr, (VkPresentModeKHR*)&forMarshaling->presentMode, sizeof(VkPresentModeKHR));
+    *ptr += sizeof(VkPresentModeKHR);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->clipped, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = get_host_u64_VkSwapchainKHR((*&forMarshaling->oldSwapchain));
+    memcpy(*ptr, (uint64_t*)&cgen_var_2, 1 * 8);
+    *ptr += 1 * 8;
+}
+
+void reservedmarshal_VkPresentInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPresentInfoKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->waitSemaphoreCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    if (forMarshaling->waitSemaphoreCount)
+    {
+        uint8_t* cgen_var_0_ptr = (uint8_t*)(*ptr);
+        if (forMarshaling)
+        {
+            for (uint32_t k = 0; k < forMarshaling->waitSemaphoreCount; ++k)
+            {
+                uint64_t tmpval = get_host_u64_VkSemaphore(forMarshaling->pWaitSemaphores[k]);
+                memcpy(cgen_var_0_ptr + k * 8, &tmpval, sizeof(uint64_t));
+            }
+        }
+        *ptr += 8 * forMarshaling->waitSemaphoreCount;
+    }
+    memcpy(*ptr, (uint32_t*)&forMarshaling->swapchainCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    if (forMarshaling->swapchainCount)
+    {
+        uint8_t* cgen_var_1_ptr = (uint8_t*)(*ptr);
+        if (forMarshaling)
+        {
+            for (uint32_t k = 0; k < forMarshaling->swapchainCount; ++k)
+            {
+                uint64_t tmpval = get_host_u64_VkSwapchainKHR(forMarshaling->pSwapchains[k]);
+                memcpy(cgen_var_1_ptr + k * 8, &tmpval, sizeof(uint64_t));
+            }
+        }
+        *ptr += 8 * forMarshaling->swapchainCount;
+    }
+    memcpy(*ptr, (const uint32_t*)forMarshaling->pImageIndices, forMarshaling->swapchainCount * sizeof(const uint32_t));
+    *ptr += forMarshaling->swapchainCount * sizeof(const uint32_t);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)forMarshaling->pResults;
+    memcpy((*ptr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pResults)
+    {
+        memcpy(*ptr, (VkResult*)forMarshaling->pResults, forMarshaling->swapchainCount * sizeof(VkResult));
+        *ptr += forMarshaling->swapchainCount * sizeof(VkResult);
+    }
+}
+
+void reservedmarshal_VkImageSwapchainCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageSwapchainCreateInfoKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkSwapchainKHR((*&forMarshaling->swapchain));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+}
+
+void reservedmarshal_VkBindImageMemorySwapchainInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBindImageMemorySwapchainInfoKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkSwapchainKHR((*&forMarshaling->swapchain));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->imageIndex, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkAcquireNextImageInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAcquireNextImageInfoKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkSwapchainKHR((*&forMarshaling->swapchain));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (uint64_t*)&forMarshaling->timeout, sizeof(uint64_t));
+    *ptr += sizeof(uint64_t);
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkSemaphore((*&forMarshaling->semaphore));
+    memcpy(*ptr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *ptr += 1 * 8;
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = get_host_u64_VkFence((*&forMarshaling->fence));
+    memcpy(*ptr, (uint64_t*)&cgen_var_2, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->deviceMask, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkDeviceGroupPresentCapabilitiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceGroupPresentCapabilitiesKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)forMarshaling->presentMask, VK_MAX_DEVICE_GROUP_SIZE * sizeof(uint32_t));
+    *ptr += VK_MAX_DEVICE_GROUP_SIZE * sizeof(uint32_t);
+    memcpy(*ptr, (VkDeviceGroupPresentModeFlagsKHR*)&forMarshaling->modes, sizeof(VkDeviceGroupPresentModeFlagsKHR));
+    *ptr += sizeof(VkDeviceGroupPresentModeFlagsKHR);
+}
+
+void reservedmarshal_VkDeviceGroupPresentInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceGroupPresentInfoKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->swapchainCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (const uint32_t*)forMarshaling->pDeviceMasks, forMarshaling->swapchainCount * sizeof(const uint32_t));
+    *ptr += forMarshaling->swapchainCount * sizeof(const uint32_t);
+    memcpy(*ptr, (VkDeviceGroupPresentModeFlagBitsKHR*)&forMarshaling->mode, sizeof(VkDeviceGroupPresentModeFlagBitsKHR));
+    *ptr += sizeof(VkDeviceGroupPresentModeFlagBitsKHR);
+}
+
+void reservedmarshal_VkDeviceGroupSwapchainCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceGroupSwapchainCreateInfoKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkDeviceGroupPresentModeFlagsKHR*)&forMarshaling->modes, sizeof(VkDeviceGroupPresentModeFlagsKHR));
+    *ptr += sizeof(VkDeviceGroupPresentModeFlagsKHR);
+}
+
+#endif
+#ifdef VK_KHR_display
+void reservedmarshal_VkDisplayModeParametersKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDisplayModeParametersKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    reservedmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->visibleRegion), ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->refreshRate, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkDisplayModeCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDisplayModeCreateInfoKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkDisplayModeCreateFlagsKHR*)&forMarshaling->flags, sizeof(VkDisplayModeCreateFlagsKHR));
+    *ptr += sizeof(VkDisplayModeCreateFlagsKHR);
+    reservedmarshal_VkDisplayModeParametersKHR(vkStream, rootType, (VkDisplayModeParametersKHR*)(&forMarshaling->parameters), ptr);
+}
+
+void reservedmarshal_VkDisplayModePropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDisplayModePropertiesKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDisplayModeKHR((*&forMarshaling->displayMode));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    reservedmarshal_VkDisplayModeParametersKHR(vkStream, rootType, (VkDisplayModeParametersKHR*)(&forMarshaling->parameters), ptr);
+}
+
+void reservedmarshal_VkDisplayPlaneCapabilitiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDisplayPlaneCapabilitiesKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkDisplayPlaneAlphaFlagsKHR*)&forMarshaling->supportedAlpha, sizeof(VkDisplayPlaneAlphaFlagsKHR));
+    *ptr += sizeof(VkDisplayPlaneAlphaFlagsKHR);
+    reservedmarshal_VkOffset2D(vkStream, rootType, (VkOffset2D*)(&forMarshaling->minSrcPosition), ptr);
+    reservedmarshal_VkOffset2D(vkStream, rootType, (VkOffset2D*)(&forMarshaling->maxSrcPosition), ptr);
+    reservedmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->minSrcExtent), ptr);
+    reservedmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->maxSrcExtent), ptr);
+    reservedmarshal_VkOffset2D(vkStream, rootType, (VkOffset2D*)(&forMarshaling->minDstPosition), ptr);
+    reservedmarshal_VkOffset2D(vkStream, rootType, (VkOffset2D*)(&forMarshaling->maxDstPosition), ptr);
+    reservedmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->minDstExtent), ptr);
+    reservedmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->maxDstExtent), ptr);
+}
+
+void reservedmarshal_VkDisplayPlanePropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDisplayPlanePropertiesKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDisplayKHR((*&forMarshaling->currentDisplay));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->currentStackIndex, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkDisplayPropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDisplayPropertiesKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDisplayKHR((*&forMarshaling->display));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    {
+        uint32_t l = forMarshaling->displayName ? strlen(forMarshaling->displayName): 0;
+        memcpy(*ptr, (uint32_t*)&l, sizeof(uint32_t));
+        android::base::Stream::toBe32((uint8_t*)*ptr);
+        *ptr += sizeof(uint32_t);
+        memcpy(*ptr, (char*)forMarshaling->displayName, l);
+        *ptr += l;
+    }
+    reservedmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->physicalDimensions), ptr);
+    reservedmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->physicalResolution), ptr);
+    memcpy(*ptr, (VkSurfaceTransformFlagsKHR*)&forMarshaling->supportedTransforms, sizeof(VkSurfaceTransformFlagsKHR));
+    *ptr += sizeof(VkSurfaceTransformFlagsKHR);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->planeReorderPossible, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->persistentContent, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkDisplaySurfaceCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDisplaySurfaceCreateInfoKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkDisplaySurfaceCreateFlagsKHR*)&forMarshaling->flags, sizeof(VkDisplaySurfaceCreateFlagsKHR));
+    *ptr += sizeof(VkDisplaySurfaceCreateFlagsKHR);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDisplayModeKHR((*&forMarshaling->displayMode));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->planeIndex, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->planeStackIndex, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (VkSurfaceTransformFlagBitsKHR*)&forMarshaling->transform, sizeof(VkSurfaceTransformFlagBitsKHR));
+    *ptr += sizeof(VkSurfaceTransformFlagBitsKHR);
+    memcpy(*ptr, (float*)&forMarshaling->globalAlpha, sizeof(float));
+    *ptr += sizeof(float);
+    memcpy(*ptr, (VkDisplayPlaneAlphaFlagBitsKHR*)&forMarshaling->alphaMode, sizeof(VkDisplayPlaneAlphaFlagBitsKHR));
+    *ptr += sizeof(VkDisplayPlaneAlphaFlagBitsKHR);
+    reservedmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->imageExtent), ptr);
+}
+
+#endif
+#ifdef VK_KHR_display_swapchain
+void reservedmarshal_VkDisplayPresentInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDisplayPresentInfoKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    reservedmarshal_VkRect2D(vkStream, rootType, (VkRect2D*)(&forMarshaling->srcRect), ptr);
+    reservedmarshal_VkRect2D(vkStream, rootType, (VkRect2D*)(&forMarshaling->dstRect), ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->persistent, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_KHR_xlib_surface
+void reservedmarshal_VkXlibSurfaceCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkXlibSurfaceCreateInfoKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkXlibSurfaceCreateFlagsKHR*)&forMarshaling->flags, sizeof(VkXlibSurfaceCreateFlagsKHR));
+    *ptr += sizeof(VkXlibSurfaceCreateFlagsKHR);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->dpy;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->dpy)
+    {
+        memcpy(*ptr, (Display*)forMarshaling->dpy, sizeof(Display));
+        *ptr += sizeof(Display);
+    }
+    memcpy(*ptr, (Window*)&forMarshaling->window, sizeof(Window));
+    *ptr += sizeof(Window);
+}
+
+#endif
+#ifdef VK_KHR_xcb_surface
+void reservedmarshal_VkXcbSurfaceCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkXcbSurfaceCreateInfoKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkXcbSurfaceCreateFlagsKHR*)&forMarshaling->flags, sizeof(VkXcbSurfaceCreateFlagsKHR));
+    *ptr += sizeof(VkXcbSurfaceCreateFlagsKHR);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->connection;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->connection)
+    {
+        memcpy(*ptr, (xcb_connection_t*)forMarshaling->connection, sizeof(xcb_connection_t));
+        *ptr += sizeof(xcb_connection_t);
+    }
+    memcpy(*ptr, (xcb_window_t*)&forMarshaling->window, sizeof(xcb_window_t));
+    *ptr += sizeof(xcb_window_t);
+}
+
+#endif
+#ifdef VK_KHR_wayland_surface
+void reservedmarshal_VkWaylandSurfaceCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkWaylandSurfaceCreateInfoKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkWaylandSurfaceCreateFlagsKHR*)&forMarshaling->flags, sizeof(VkWaylandSurfaceCreateFlagsKHR));
+    *ptr += sizeof(VkWaylandSurfaceCreateFlagsKHR);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->display;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->display)
+    {
+        memcpy(*ptr, (wl_display*)forMarshaling->display, sizeof(wl_display));
+        *ptr += sizeof(wl_display);
+    }
+    // WARNING PTR CHECK
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)forMarshaling->surface;
+    memcpy((*ptr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->surface)
+    {
+        memcpy(*ptr, (wl_surface*)forMarshaling->surface, sizeof(wl_surface));
+        *ptr += sizeof(wl_surface);
+    }
+}
+
+#endif
+#ifdef VK_KHR_android_surface
+void reservedmarshal_VkAndroidSurfaceCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAndroidSurfaceCreateInfoKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkAndroidSurfaceCreateFlagsKHR*)&forMarshaling->flags, sizeof(VkAndroidSurfaceCreateFlagsKHR));
+    *ptr += sizeof(VkAndroidSurfaceCreateFlagsKHR);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->window;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->window)
+    {
+        memcpy(*ptr, (ANativeWindow*)forMarshaling->window, sizeof(ANativeWindow));
+        *ptr += sizeof(ANativeWindow);
+    }
+}
+
+#endif
+#ifdef VK_KHR_win32_surface
+void reservedmarshal_VkWin32SurfaceCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkWin32SurfaceCreateInfoKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkWin32SurfaceCreateFlagsKHR*)&forMarshaling->flags, sizeof(VkWin32SurfaceCreateFlagsKHR));
+    *ptr += sizeof(VkWin32SurfaceCreateFlagsKHR);
+    memcpy(*ptr, (HINSTANCE*)&forMarshaling->hinstance, sizeof(HINSTANCE));
+    *ptr += sizeof(HINSTANCE);
+    memcpy(*ptr, (HWND*)&forMarshaling->hwnd, sizeof(HWND));
+    *ptr += sizeof(HWND);
+}
+
+#endif
+#ifdef VK_KHR_sampler_mirror_clamp_to_edge
+#endif
+#ifdef VK_KHR_multiview
+#endif
+#ifdef VK_KHR_get_physical_device_properties2
+#endif
+#ifdef VK_KHR_device_group
+#endif
+#ifdef VK_KHR_shader_draw_parameters
+#endif
+#ifdef VK_KHR_maintenance1
+#endif
+#ifdef VK_KHR_device_group_creation
+#endif
+#ifdef VK_KHR_external_memory_capabilities
+#endif
+#ifdef VK_KHR_external_memory
+#endif
+#ifdef VK_KHR_external_memory_win32
+void reservedmarshal_VkImportMemoryWin32HandleInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImportMemoryWin32HandleInfoKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkExternalMemoryHandleTypeFlagBits*)&forMarshaling->handleType, sizeof(VkExternalMemoryHandleTypeFlagBits));
+    *ptr += sizeof(VkExternalMemoryHandleTypeFlagBits);
+    memcpy(*ptr, (HANDLE*)&forMarshaling->handle, sizeof(HANDLE));
+    *ptr += sizeof(HANDLE);
+    memcpy(*ptr, (LPCWSTR*)&forMarshaling->name, sizeof(LPCWSTR));
+    *ptr += sizeof(LPCWSTR);
+}
+
+void reservedmarshal_VkExportMemoryWin32HandleInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkExportMemoryWin32HandleInfoKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pAttributes;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pAttributes)
+    {
+        memcpy(*ptr, (const SECURITY_ATTRIBUTES*)forMarshaling->pAttributes, sizeof(const SECURITY_ATTRIBUTES));
+        *ptr += sizeof(const SECURITY_ATTRIBUTES);
+    }
+    memcpy(*ptr, (DWORD*)&forMarshaling->dwAccess, sizeof(DWORD));
+    *ptr += sizeof(DWORD);
+    memcpy(*ptr, (LPCWSTR*)&forMarshaling->name, sizeof(LPCWSTR));
+    *ptr += sizeof(LPCWSTR);
+}
+
+void reservedmarshal_VkMemoryWin32HandlePropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMemoryWin32HandlePropertiesKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->memoryTypeBits, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkMemoryGetWin32HandleInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMemoryGetWin32HandleInfoKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDeviceMemory((*&forMarshaling->memory));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (VkExternalMemoryHandleTypeFlagBits*)&forMarshaling->handleType, sizeof(VkExternalMemoryHandleTypeFlagBits));
+    *ptr += sizeof(VkExternalMemoryHandleTypeFlagBits);
+}
+
+#endif
+#ifdef VK_KHR_external_memory_fd
+void reservedmarshal_VkImportMemoryFdInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImportMemoryFdInfoKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkExternalMemoryHandleTypeFlagBits*)&forMarshaling->handleType, sizeof(VkExternalMemoryHandleTypeFlagBits));
+    *ptr += sizeof(VkExternalMemoryHandleTypeFlagBits);
+    memcpy(*ptr, (int*)&forMarshaling->fd, sizeof(int));
+    *ptr += sizeof(int);
+}
+
+void reservedmarshal_VkMemoryFdPropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMemoryFdPropertiesKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->memoryTypeBits, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkMemoryGetFdInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMemoryGetFdInfoKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDeviceMemory((*&forMarshaling->memory));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (VkExternalMemoryHandleTypeFlagBits*)&forMarshaling->handleType, sizeof(VkExternalMemoryHandleTypeFlagBits));
+    *ptr += sizeof(VkExternalMemoryHandleTypeFlagBits);
+}
+
+#endif
+#ifdef VK_KHR_win32_keyed_mutex
+void reservedmarshal_VkWin32KeyedMutexAcquireReleaseInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkWin32KeyedMutexAcquireReleaseInfoKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->acquireCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    if (forMarshaling->acquireCount)
+    {
+        uint8_t* cgen_var_0_ptr = (uint8_t*)(*ptr);
+        if (forMarshaling)
+        {
+            for (uint32_t k = 0; k < forMarshaling->acquireCount; ++k)
+            {
+                uint64_t tmpval = get_host_u64_VkDeviceMemory(forMarshaling->pAcquireSyncs[k]);
+                memcpy(cgen_var_0_ptr + k * 8, &tmpval, sizeof(uint64_t));
+            }
+        }
+        *ptr += 8 * forMarshaling->acquireCount;
+    }
+    memcpy(*ptr, (const uint64_t*)forMarshaling->pAcquireKeys, forMarshaling->acquireCount * sizeof(const uint64_t));
+    *ptr += forMarshaling->acquireCount * sizeof(const uint64_t);
+    memcpy(*ptr, (const uint32_t*)forMarshaling->pAcquireTimeouts, forMarshaling->acquireCount * sizeof(const uint32_t));
+    *ptr += forMarshaling->acquireCount * sizeof(const uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->releaseCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    if (forMarshaling->releaseCount)
+    {
+        uint8_t* cgen_var_1_ptr = (uint8_t*)(*ptr);
+        if (forMarshaling)
+        {
+            for (uint32_t k = 0; k < forMarshaling->releaseCount; ++k)
+            {
+                uint64_t tmpval = get_host_u64_VkDeviceMemory(forMarshaling->pReleaseSyncs[k]);
+                memcpy(cgen_var_1_ptr + k * 8, &tmpval, sizeof(uint64_t));
+            }
+        }
+        *ptr += 8 * forMarshaling->releaseCount;
+    }
+    memcpy(*ptr, (const uint64_t*)forMarshaling->pReleaseKeys, forMarshaling->releaseCount * sizeof(const uint64_t));
+    *ptr += forMarshaling->releaseCount * sizeof(const uint64_t);
+}
+
+#endif
+#ifdef VK_KHR_external_semaphore_capabilities
+#endif
+#ifdef VK_KHR_external_semaphore
+#endif
+#ifdef VK_KHR_external_semaphore_win32
+void reservedmarshal_VkImportSemaphoreWin32HandleInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImportSemaphoreWin32HandleInfoKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkSemaphore((*&forMarshaling->semaphore));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (VkSemaphoreImportFlags*)&forMarshaling->flags, sizeof(VkSemaphoreImportFlags));
+    *ptr += sizeof(VkSemaphoreImportFlags);
+    memcpy(*ptr, (VkExternalSemaphoreHandleTypeFlagBits*)&forMarshaling->handleType, sizeof(VkExternalSemaphoreHandleTypeFlagBits));
+    *ptr += sizeof(VkExternalSemaphoreHandleTypeFlagBits);
+    memcpy(*ptr, (HANDLE*)&forMarshaling->handle, sizeof(HANDLE));
+    *ptr += sizeof(HANDLE);
+    memcpy(*ptr, (LPCWSTR*)&forMarshaling->name, sizeof(LPCWSTR));
+    *ptr += sizeof(LPCWSTR);
+}
+
+void reservedmarshal_VkExportSemaphoreWin32HandleInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkExportSemaphoreWin32HandleInfoKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pAttributes;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pAttributes)
+    {
+        memcpy(*ptr, (const SECURITY_ATTRIBUTES*)forMarshaling->pAttributes, sizeof(const SECURITY_ATTRIBUTES));
+        *ptr += sizeof(const SECURITY_ATTRIBUTES);
+    }
+    memcpy(*ptr, (DWORD*)&forMarshaling->dwAccess, sizeof(DWORD));
+    *ptr += sizeof(DWORD);
+    memcpy(*ptr, (LPCWSTR*)&forMarshaling->name, sizeof(LPCWSTR));
+    *ptr += sizeof(LPCWSTR);
+}
+
+void reservedmarshal_VkD3D12FenceSubmitInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkD3D12FenceSubmitInfoKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->waitSemaphoreValuesCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pWaitSemaphoreValues;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pWaitSemaphoreValues)
+    {
+        memcpy(*ptr, (const uint64_t*)forMarshaling->pWaitSemaphoreValues, forMarshaling->waitSemaphoreValuesCount * sizeof(const uint64_t));
+        *ptr += forMarshaling->waitSemaphoreValuesCount * sizeof(const uint64_t);
+    }
+    memcpy(*ptr, (uint32_t*)&forMarshaling->signalSemaphoreValuesCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)forMarshaling->pSignalSemaphoreValues;
+    memcpy((*ptr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pSignalSemaphoreValues)
+    {
+        memcpy(*ptr, (const uint64_t*)forMarshaling->pSignalSemaphoreValues, forMarshaling->signalSemaphoreValuesCount * sizeof(const uint64_t));
+        *ptr += forMarshaling->signalSemaphoreValuesCount * sizeof(const uint64_t);
+    }
+}
+
+void reservedmarshal_VkSemaphoreGetWin32HandleInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSemaphoreGetWin32HandleInfoKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkSemaphore((*&forMarshaling->semaphore));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (VkExternalSemaphoreHandleTypeFlagBits*)&forMarshaling->handleType, sizeof(VkExternalSemaphoreHandleTypeFlagBits));
+    *ptr += sizeof(VkExternalSemaphoreHandleTypeFlagBits);
+}
+
+#endif
+#ifdef VK_KHR_external_semaphore_fd
+void reservedmarshal_VkImportSemaphoreFdInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImportSemaphoreFdInfoKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkSemaphore((*&forMarshaling->semaphore));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (VkSemaphoreImportFlags*)&forMarshaling->flags, sizeof(VkSemaphoreImportFlags));
+    *ptr += sizeof(VkSemaphoreImportFlags);
+    memcpy(*ptr, (VkExternalSemaphoreHandleTypeFlagBits*)&forMarshaling->handleType, sizeof(VkExternalSemaphoreHandleTypeFlagBits));
+    *ptr += sizeof(VkExternalSemaphoreHandleTypeFlagBits);
+    memcpy(*ptr, (int*)&forMarshaling->fd, sizeof(int));
+    *ptr += sizeof(int);
+}
+
+void reservedmarshal_VkSemaphoreGetFdInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSemaphoreGetFdInfoKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkSemaphore((*&forMarshaling->semaphore));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (VkExternalSemaphoreHandleTypeFlagBits*)&forMarshaling->handleType, sizeof(VkExternalSemaphoreHandleTypeFlagBits));
+    *ptr += sizeof(VkExternalSemaphoreHandleTypeFlagBits);
+}
+
+#endif
+#ifdef VK_KHR_push_descriptor
+void reservedmarshal_VkPhysicalDevicePushDescriptorPropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDevicePushDescriptorPropertiesKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxPushDescriptors, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+#endif
+#ifdef VK_KHR_shader_float16_int8
+#endif
+#ifdef VK_KHR_16bit_storage
+#endif
+#ifdef VK_KHR_incremental_present
+void reservedmarshal_VkRectLayerKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRectLayerKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    reservedmarshal_VkOffset2D(vkStream, rootType, (VkOffset2D*)(&forMarshaling->offset), ptr);
+    reservedmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->extent), ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->layer, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkPresentRegionKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPresentRegionKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->rectangleCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pRectangles;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pRectangles)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->rectangleCount; ++i)
+        {
+            reservedmarshal_VkRectLayerKHR(vkStream, rootType, (const VkRectLayerKHR*)(forMarshaling->pRectangles + i), ptr);
+        }
+    }
+}
+
+void reservedmarshal_VkPresentRegionsKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPresentRegionsKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->swapchainCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pRegions;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pRegions)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->swapchainCount; ++i)
+        {
+            reservedmarshal_VkPresentRegionKHR(vkStream, rootType, (const VkPresentRegionKHR*)(forMarshaling->pRegions + i), ptr);
+        }
+    }
+}
+
+#endif
+#ifdef VK_KHR_descriptor_update_template
+#endif
+#ifdef VK_KHR_imageless_framebuffer
+#endif
+#ifdef VK_KHR_create_renderpass2
+#endif
+#ifdef VK_KHR_shared_presentable_image
+void reservedmarshal_VkSharedPresentSurfaceCapabilitiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSharedPresentSurfaceCapabilitiesKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkImageUsageFlags*)&forMarshaling->sharedPresentSupportedUsageFlags, sizeof(VkImageUsageFlags));
+    *ptr += sizeof(VkImageUsageFlags);
+}
+
+#endif
+#ifdef VK_KHR_external_fence_capabilities
+#endif
+#ifdef VK_KHR_external_fence
+#endif
+#ifdef VK_KHR_external_fence_win32
+void reservedmarshal_VkImportFenceWin32HandleInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImportFenceWin32HandleInfoKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkFence((*&forMarshaling->fence));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (VkFenceImportFlags*)&forMarshaling->flags, sizeof(VkFenceImportFlags));
+    *ptr += sizeof(VkFenceImportFlags);
+    memcpy(*ptr, (VkExternalFenceHandleTypeFlagBits*)&forMarshaling->handleType, sizeof(VkExternalFenceHandleTypeFlagBits));
+    *ptr += sizeof(VkExternalFenceHandleTypeFlagBits);
+    memcpy(*ptr, (HANDLE*)&forMarshaling->handle, sizeof(HANDLE));
+    *ptr += sizeof(HANDLE);
+    memcpy(*ptr, (LPCWSTR*)&forMarshaling->name, sizeof(LPCWSTR));
+    *ptr += sizeof(LPCWSTR);
+}
+
+void reservedmarshal_VkExportFenceWin32HandleInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkExportFenceWin32HandleInfoKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pAttributes;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pAttributes)
+    {
+        memcpy(*ptr, (const SECURITY_ATTRIBUTES*)forMarshaling->pAttributes, sizeof(const SECURITY_ATTRIBUTES));
+        *ptr += sizeof(const SECURITY_ATTRIBUTES);
+    }
+    memcpy(*ptr, (DWORD*)&forMarshaling->dwAccess, sizeof(DWORD));
+    *ptr += sizeof(DWORD);
+    memcpy(*ptr, (LPCWSTR*)&forMarshaling->name, sizeof(LPCWSTR));
+    *ptr += sizeof(LPCWSTR);
+}
+
+void reservedmarshal_VkFenceGetWin32HandleInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkFenceGetWin32HandleInfoKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkFence((*&forMarshaling->fence));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (VkExternalFenceHandleTypeFlagBits*)&forMarshaling->handleType, sizeof(VkExternalFenceHandleTypeFlagBits));
+    *ptr += sizeof(VkExternalFenceHandleTypeFlagBits);
+}
+
+#endif
+#ifdef VK_KHR_external_fence_fd
+void reservedmarshal_VkImportFenceFdInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImportFenceFdInfoKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkFence((*&forMarshaling->fence));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (VkFenceImportFlags*)&forMarshaling->flags, sizeof(VkFenceImportFlags));
+    *ptr += sizeof(VkFenceImportFlags);
+    memcpy(*ptr, (VkExternalFenceHandleTypeFlagBits*)&forMarshaling->handleType, sizeof(VkExternalFenceHandleTypeFlagBits));
+    *ptr += sizeof(VkExternalFenceHandleTypeFlagBits);
+    memcpy(*ptr, (int*)&forMarshaling->fd, sizeof(int));
+    *ptr += sizeof(int);
+}
+
+void reservedmarshal_VkFenceGetFdInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkFenceGetFdInfoKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkFence((*&forMarshaling->fence));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (VkExternalFenceHandleTypeFlagBits*)&forMarshaling->handleType, sizeof(VkExternalFenceHandleTypeFlagBits));
+    *ptr += sizeof(VkExternalFenceHandleTypeFlagBits);
+}
+
+#endif
+#ifdef VK_KHR_performance_query
+void reservedmarshal_VkPhysicalDevicePerformanceQueryFeaturesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDevicePerformanceQueryFeaturesKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->performanceCounterQueryPools, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->performanceCounterMultipleQueryPools, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkPhysicalDevicePerformanceQueryPropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDevicePerformanceQueryPropertiesKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->allowCommandBufferQueryCopies, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkPerformanceCounterKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPerformanceCounterKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkPerformanceCounterUnitKHR*)&forMarshaling->unit, sizeof(VkPerformanceCounterUnitKHR));
+    *ptr += sizeof(VkPerformanceCounterUnitKHR);
+    memcpy(*ptr, (VkPerformanceCounterScopeKHR*)&forMarshaling->scope, sizeof(VkPerformanceCounterScopeKHR));
+    *ptr += sizeof(VkPerformanceCounterScopeKHR);
+    memcpy(*ptr, (VkPerformanceCounterStorageKHR*)&forMarshaling->storage, sizeof(VkPerformanceCounterStorageKHR));
+    *ptr += sizeof(VkPerformanceCounterStorageKHR);
+    memcpy(*ptr, (uint8_t*)forMarshaling->uuid, VK_UUID_SIZE * sizeof(uint8_t));
+    *ptr += VK_UUID_SIZE * sizeof(uint8_t);
+}
+
+void reservedmarshal_VkPerformanceCounterDescriptionKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPerformanceCounterDescriptionKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkPerformanceCounterDescriptionFlagsKHR*)&forMarshaling->flags, sizeof(VkPerformanceCounterDescriptionFlagsKHR));
+    *ptr += sizeof(VkPerformanceCounterDescriptionFlagsKHR);
+    memcpy(*ptr, (char*)forMarshaling->name, VK_MAX_DESCRIPTION_SIZE * sizeof(char));
+    *ptr += VK_MAX_DESCRIPTION_SIZE * sizeof(char);
+    memcpy(*ptr, (char*)forMarshaling->category, VK_MAX_DESCRIPTION_SIZE * sizeof(char));
+    *ptr += VK_MAX_DESCRIPTION_SIZE * sizeof(char);
+    memcpy(*ptr, (char*)forMarshaling->description, VK_MAX_DESCRIPTION_SIZE * sizeof(char));
+    *ptr += VK_MAX_DESCRIPTION_SIZE * sizeof(char);
+}
+
+void reservedmarshal_VkQueryPoolPerformanceCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkQueryPoolPerformanceCreateInfoKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->queueFamilyIndex, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->counterIndexCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (const uint32_t*)forMarshaling->pCounterIndices, forMarshaling->counterIndexCount * sizeof(const uint32_t));
+    *ptr += forMarshaling->counterIndexCount * sizeof(const uint32_t);
+}
+
+void reservedmarshal_VkPerformanceCounterResultKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPerformanceCounterResultKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (int32_t*)&forMarshaling->int32, sizeof(int32_t));
+    *ptr += sizeof(int32_t);
+}
+
+void reservedmarshal_VkAcquireProfilingLockInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAcquireProfilingLockInfoKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkAcquireProfilingLockFlagsKHR*)&forMarshaling->flags, sizeof(VkAcquireProfilingLockFlagsKHR));
+    *ptr += sizeof(VkAcquireProfilingLockFlagsKHR);
+    memcpy(*ptr, (uint64_t*)&forMarshaling->timeout, sizeof(uint64_t));
+    *ptr += sizeof(uint64_t);
+}
+
+void reservedmarshal_VkPerformanceQuerySubmitInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPerformanceQuerySubmitInfoKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->counterPassIndex, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+#endif
+#ifdef VK_KHR_maintenance2
+#endif
+#ifdef VK_KHR_get_surface_capabilities2
+void reservedmarshal_VkPhysicalDeviceSurfaceInfo2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSurfaceInfo2KHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkSurfaceKHR((*&forMarshaling->surface));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+}
+
+void reservedmarshal_VkSurfaceCapabilities2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSurfaceCapabilities2KHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    reservedmarshal_VkSurfaceCapabilitiesKHR(vkStream, rootType, (VkSurfaceCapabilitiesKHR*)(&forMarshaling->surfaceCapabilities), ptr);
+}
+
+void reservedmarshal_VkSurfaceFormat2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSurfaceFormat2KHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    reservedmarshal_VkSurfaceFormatKHR(vkStream, rootType, (VkSurfaceFormatKHR*)(&forMarshaling->surfaceFormat), ptr);
+}
+
+#endif
+#ifdef VK_KHR_variable_pointers
+#endif
+#ifdef VK_KHR_get_display_properties2
+void reservedmarshal_VkDisplayProperties2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDisplayProperties2KHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    reservedmarshal_VkDisplayPropertiesKHR(vkStream, rootType, (VkDisplayPropertiesKHR*)(&forMarshaling->displayProperties), ptr);
+}
+
+void reservedmarshal_VkDisplayPlaneProperties2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDisplayPlaneProperties2KHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    reservedmarshal_VkDisplayPlanePropertiesKHR(vkStream, rootType, (VkDisplayPlanePropertiesKHR*)(&forMarshaling->displayPlaneProperties), ptr);
+}
+
+void reservedmarshal_VkDisplayModeProperties2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDisplayModeProperties2KHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    reservedmarshal_VkDisplayModePropertiesKHR(vkStream, rootType, (VkDisplayModePropertiesKHR*)(&forMarshaling->displayModeProperties), ptr);
+}
+
+void reservedmarshal_VkDisplayPlaneInfo2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDisplayPlaneInfo2KHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDisplayModeKHR((*&forMarshaling->mode));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->planeIndex, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkDisplayPlaneCapabilities2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDisplayPlaneCapabilities2KHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    reservedmarshal_VkDisplayPlaneCapabilitiesKHR(vkStream, rootType, (VkDisplayPlaneCapabilitiesKHR*)(&forMarshaling->capabilities), ptr);
+}
+
+#endif
+#ifdef VK_KHR_dedicated_allocation
+#endif
+#ifdef VK_KHR_storage_buffer_storage_class
+#endif
+#ifdef VK_KHR_relaxed_block_layout
+#endif
+#ifdef VK_KHR_get_memory_requirements2
+#endif
+#ifdef VK_KHR_image_format_list
+#endif
+#ifdef VK_KHR_sampler_ycbcr_conversion
+#endif
+#ifdef VK_KHR_bind_memory2
+#endif
+#ifdef VK_KHR_portability_subset
+void reservedmarshal_VkPhysicalDevicePortabilitySubsetFeaturesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDevicePortabilitySubsetFeaturesKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->constantAlphaColorBlendFactors, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->events, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->imageViewFormatReinterpretation, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->imageViewFormatSwizzle, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->imageView2DOn3DImage, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->multisampleArrayImage, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->mutableComparisonSamplers, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->pointPolygons, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->samplerMipLodBias, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->separateStencilMaskRef, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderSampleRateInterpolationFunctions, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->tessellationIsolines, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->tessellationPointMode, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->triangleFans, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->vertexAttributeAccessBeyondStride, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkPhysicalDevicePortabilitySubsetPropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDevicePortabilitySubsetPropertiesKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->minVertexInputBindingStrideAlignment, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+#endif
+#ifdef VK_KHR_maintenance3
+#endif
+#ifdef VK_KHR_draw_indirect_count
+#endif
+#ifdef VK_KHR_shader_subgroup_extended_types
+#endif
+#ifdef VK_KHR_8bit_storage
+#endif
+#ifdef VK_KHR_shader_atomic_int64
+#endif
+#ifdef VK_KHR_shader_clock
+void reservedmarshal_VkPhysicalDeviceShaderClockFeaturesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderClockFeaturesKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderSubgroupClock, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderDeviceClock, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_KHR_driver_properties
+#endif
+#ifdef VK_KHR_shader_float_controls
+#endif
+#ifdef VK_KHR_depth_stencil_resolve
+#endif
+#ifdef VK_KHR_swapchain_mutable_format
+#endif
+#ifdef VK_KHR_timeline_semaphore
+#endif
+#ifdef VK_KHR_vulkan_memory_model
+#endif
+#ifdef VK_KHR_shader_terminate_invocation
+void reservedmarshal_VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderTerminateInvocation, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_KHR_fragment_shading_rate
+void reservedmarshal_VkFragmentShadingRateAttachmentInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkFragmentShadingRateAttachmentInfoKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    reservedmarshal_VkAttachmentReference2(vkStream, rootType, (const VkAttachmentReference2*)(forMarshaling->pFragmentShadingRateAttachment), ptr);
+    reservedmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->shadingRateAttachmentTexelSize), ptr);
+}
+
+void reservedmarshal_VkPipelineFragmentShadingRateStateCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineFragmentShadingRateStateCreateInfoKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    reservedmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->fragmentSize), ptr);
+    memcpy(*ptr, (VkFragmentShadingRateCombinerOpKHR*)forMarshaling->combinerOps, 2 * sizeof(VkFragmentShadingRateCombinerOpKHR));
+    *ptr += 2 * sizeof(VkFragmentShadingRateCombinerOpKHR);
+}
+
+void reservedmarshal_VkPhysicalDeviceFragmentShadingRateFeaturesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShadingRateFeaturesKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->pipelineFragmentShadingRate, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->primitiveFragmentShadingRate, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->attachmentFragmentShadingRate, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkPhysicalDeviceFragmentShadingRatePropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShadingRatePropertiesKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    reservedmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->minFragmentShadingRateAttachmentTexelSize), ptr);
+    reservedmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->maxFragmentShadingRateAttachmentTexelSize), ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxFragmentShadingRateAttachmentTexelSizeAspectRatio, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->primitiveFragmentShadingRateWithMultipleViewports, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->layeredShadingRateAttachments, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->fragmentShadingRateNonTrivialCombinerOps, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    reservedmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->maxFragmentSize), ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxFragmentSizeAspectRatio, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxFragmentShadingRateCoverageSamples, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (VkSampleCountFlagBits*)&forMarshaling->maxFragmentShadingRateRasterizationSamples, sizeof(VkSampleCountFlagBits));
+    *ptr += sizeof(VkSampleCountFlagBits);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->fragmentShadingRateWithShaderDepthStencilWrites, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->fragmentShadingRateWithSampleMask, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->fragmentShadingRateWithShaderSampleMask, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->fragmentShadingRateWithConservativeRasterization, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->fragmentShadingRateWithFragmentShaderInterlock, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->fragmentShadingRateWithCustomSampleLocations, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->fragmentShadingRateStrictMultiplyCombiner, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkPhysicalDeviceFragmentShadingRateKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShadingRateKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkSampleCountFlags*)&forMarshaling->sampleCounts, sizeof(VkSampleCountFlags));
+    *ptr += sizeof(VkSampleCountFlags);
+    reservedmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->fragmentSize), ptr);
+}
+
+#endif
+#ifdef VK_KHR_spirv_1_4
+#endif
+#ifdef VK_KHR_surface_protected_capabilities
+void reservedmarshal_VkSurfaceProtectedCapabilitiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSurfaceProtectedCapabilitiesKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->supportsProtected, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_KHR_separate_depth_stencil_layouts
+#endif
+#ifdef VK_KHR_uniform_buffer_standard_layout
+#endif
+#ifdef VK_KHR_buffer_device_address
+#endif
+#ifdef VK_KHR_deferred_host_operations
+#endif
+#ifdef VK_KHR_pipeline_executable_properties
+void reservedmarshal_VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->pipelineExecutableInfo, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkPipelineInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineInfoKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPipeline((*&forMarshaling->pipeline));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+}
+
+void reservedmarshal_VkPipelineExecutablePropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineExecutablePropertiesKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkShaderStageFlags*)&forMarshaling->stages, sizeof(VkShaderStageFlags));
+    *ptr += sizeof(VkShaderStageFlags);
+    memcpy(*ptr, (char*)forMarshaling->name, VK_MAX_DESCRIPTION_SIZE * sizeof(char));
+    *ptr += VK_MAX_DESCRIPTION_SIZE * sizeof(char);
+    memcpy(*ptr, (char*)forMarshaling->description, VK_MAX_DESCRIPTION_SIZE * sizeof(char));
+    *ptr += VK_MAX_DESCRIPTION_SIZE * sizeof(char);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->subgroupSize, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkPipelineExecutableInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineExecutableInfoKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPipeline((*&forMarshaling->pipeline));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->executableIndex, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkPipelineExecutableStatisticValueKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineExecutableStatisticValueKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkBool32*)&forMarshaling->b32, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkPipelineExecutableStatisticKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineExecutableStatisticKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (char*)forMarshaling->name, VK_MAX_DESCRIPTION_SIZE * sizeof(char));
+    *ptr += VK_MAX_DESCRIPTION_SIZE * sizeof(char);
+    memcpy(*ptr, (char*)forMarshaling->description, VK_MAX_DESCRIPTION_SIZE * sizeof(char));
+    *ptr += VK_MAX_DESCRIPTION_SIZE * sizeof(char);
+    memcpy(*ptr, (VkPipelineExecutableStatisticFormatKHR*)&forMarshaling->format, sizeof(VkPipelineExecutableStatisticFormatKHR));
+    *ptr += sizeof(VkPipelineExecutableStatisticFormatKHR);
+    reservedmarshal_VkPipelineExecutableStatisticValueKHR(vkStream, rootType, (VkPipelineExecutableStatisticValueKHR*)(&forMarshaling->value), ptr);
+}
+
+void reservedmarshal_VkPipelineExecutableInternalRepresentationKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineExecutableInternalRepresentationKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (char*)forMarshaling->name, VK_MAX_DESCRIPTION_SIZE * sizeof(char));
+    *ptr += VK_MAX_DESCRIPTION_SIZE * sizeof(char);
+    memcpy(*ptr, (char*)forMarshaling->description, VK_MAX_DESCRIPTION_SIZE * sizeof(char));
+    *ptr += VK_MAX_DESCRIPTION_SIZE * sizeof(char);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->isText, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    uint64_t cgen_var_0 = (uint64_t)forMarshaling->dataSize;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    // WARNING PTR CHECK
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)forMarshaling->pData;
+    memcpy((*ptr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pData)
+    {
+        memcpy(*ptr, (void*)forMarshaling->pData, forMarshaling->dataSize * sizeof(uint8_t));
+        *ptr += forMarshaling->dataSize * sizeof(uint8_t);
+    }
+}
+
+#endif
+#ifdef VK_KHR_pipeline_library
+void reservedmarshal_VkPipelineLibraryCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineLibraryCreateInfoKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->libraryCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    if (forMarshaling->libraryCount)
+    {
+        uint8_t* cgen_var_0_ptr = (uint8_t*)(*ptr);
+        if (forMarshaling)
+        {
+            for (uint32_t k = 0; k < forMarshaling->libraryCount; ++k)
+            {
+                uint64_t tmpval = get_host_u64_VkPipeline(forMarshaling->pLibraries[k]);
+                memcpy(cgen_var_0_ptr + k * 8, &tmpval, sizeof(uint64_t));
+            }
+        }
+        *ptr += 8 * forMarshaling->libraryCount;
+    }
+}
+
+#endif
+#ifdef VK_KHR_shader_non_semantic_info
+#endif
+#ifdef VK_KHR_copy_commands2
+void reservedmarshal_VkBufferCopy2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBufferCopy2KHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->srcOffset, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->dstOffset, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->size, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+}
+
+void reservedmarshal_VkCopyBufferInfo2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCopyBufferInfo2KHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkBuffer((*&forMarshaling->srcBuffer));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkBuffer((*&forMarshaling->dstBuffer));
+    memcpy(*ptr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->regionCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)forMarshaling->regionCount; ++i)
+    {
+        reservedmarshal_VkBufferCopy2KHR(vkStream, rootType, (const VkBufferCopy2KHR*)(forMarshaling->pRegions + i), ptr);
+    }
+}
+
+void reservedmarshal_VkImageCopy2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageCopy2KHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    reservedmarshal_VkImageSubresourceLayers(vkStream, rootType, (VkImageSubresourceLayers*)(&forMarshaling->srcSubresource), ptr);
+    reservedmarshal_VkOffset3D(vkStream, rootType, (VkOffset3D*)(&forMarshaling->srcOffset), ptr);
+    reservedmarshal_VkImageSubresourceLayers(vkStream, rootType, (VkImageSubresourceLayers*)(&forMarshaling->dstSubresource), ptr);
+    reservedmarshal_VkOffset3D(vkStream, rootType, (VkOffset3D*)(&forMarshaling->dstOffset), ptr);
+    reservedmarshal_VkExtent3D(vkStream, rootType, (VkExtent3D*)(&forMarshaling->extent), ptr);
+}
+
+void reservedmarshal_VkCopyImageInfo2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCopyImageInfo2KHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkImage((*&forMarshaling->srcImage));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (VkImageLayout*)&forMarshaling->srcImageLayout, sizeof(VkImageLayout));
+    *ptr += sizeof(VkImageLayout);
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkImage((*&forMarshaling->dstImage));
+    memcpy(*ptr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (VkImageLayout*)&forMarshaling->dstImageLayout, sizeof(VkImageLayout));
+    *ptr += sizeof(VkImageLayout);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->regionCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)forMarshaling->regionCount; ++i)
+    {
+        reservedmarshal_VkImageCopy2KHR(vkStream, rootType, (const VkImageCopy2KHR*)(forMarshaling->pRegions + i), ptr);
+    }
+}
+
+void reservedmarshal_VkBufferImageCopy2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBufferImageCopy2KHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->bufferOffset, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->bufferRowLength, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->bufferImageHeight, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    reservedmarshal_VkImageSubresourceLayers(vkStream, rootType, (VkImageSubresourceLayers*)(&forMarshaling->imageSubresource), ptr);
+    reservedmarshal_VkOffset3D(vkStream, rootType, (VkOffset3D*)(&forMarshaling->imageOffset), ptr);
+    reservedmarshal_VkExtent3D(vkStream, rootType, (VkExtent3D*)(&forMarshaling->imageExtent), ptr);
+}
+
+void reservedmarshal_VkCopyBufferToImageInfo2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCopyBufferToImageInfo2KHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkBuffer((*&forMarshaling->srcBuffer));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkImage((*&forMarshaling->dstImage));
+    memcpy(*ptr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (VkImageLayout*)&forMarshaling->dstImageLayout, sizeof(VkImageLayout));
+    *ptr += sizeof(VkImageLayout);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->regionCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)forMarshaling->regionCount; ++i)
+    {
+        reservedmarshal_VkBufferImageCopy2KHR(vkStream, rootType, (const VkBufferImageCopy2KHR*)(forMarshaling->pRegions + i), ptr);
+    }
+}
+
+void reservedmarshal_VkCopyImageToBufferInfo2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCopyImageToBufferInfo2KHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkImage((*&forMarshaling->srcImage));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (VkImageLayout*)&forMarshaling->srcImageLayout, sizeof(VkImageLayout));
+    *ptr += sizeof(VkImageLayout);
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkBuffer((*&forMarshaling->dstBuffer));
+    memcpy(*ptr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->regionCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)forMarshaling->regionCount; ++i)
+    {
+        reservedmarshal_VkBufferImageCopy2KHR(vkStream, rootType, (const VkBufferImageCopy2KHR*)(forMarshaling->pRegions + i), ptr);
+    }
+}
+
+void reservedmarshal_VkImageBlit2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageBlit2KHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    reservedmarshal_VkImageSubresourceLayers(vkStream, rootType, (VkImageSubresourceLayers*)(&forMarshaling->srcSubresource), ptr);
+    for (uint32_t i = 0; i < (uint32_t)2; ++i)
+    {
+        reservedmarshal_VkOffset3D(vkStream, rootType, (VkOffset3D*)(forMarshaling->srcOffsets + i), ptr);
+    }
+    reservedmarshal_VkImageSubresourceLayers(vkStream, rootType, (VkImageSubresourceLayers*)(&forMarshaling->dstSubresource), ptr);
+    for (uint32_t i = 0; i < (uint32_t)2; ++i)
+    {
+        reservedmarshal_VkOffset3D(vkStream, rootType, (VkOffset3D*)(forMarshaling->dstOffsets + i), ptr);
+    }
+}
+
+void reservedmarshal_VkBlitImageInfo2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBlitImageInfo2KHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkImage((*&forMarshaling->srcImage));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (VkImageLayout*)&forMarshaling->srcImageLayout, sizeof(VkImageLayout));
+    *ptr += sizeof(VkImageLayout);
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkImage((*&forMarshaling->dstImage));
+    memcpy(*ptr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (VkImageLayout*)&forMarshaling->dstImageLayout, sizeof(VkImageLayout));
+    *ptr += sizeof(VkImageLayout);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->regionCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)forMarshaling->regionCount; ++i)
+    {
+        reservedmarshal_VkImageBlit2KHR(vkStream, rootType, (const VkImageBlit2KHR*)(forMarshaling->pRegions + i), ptr);
+    }
+    memcpy(*ptr, (VkFilter*)&forMarshaling->filter, sizeof(VkFilter));
+    *ptr += sizeof(VkFilter);
+}
+
+void reservedmarshal_VkImageResolve2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageResolve2KHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    reservedmarshal_VkImageSubresourceLayers(vkStream, rootType, (VkImageSubresourceLayers*)(&forMarshaling->srcSubresource), ptr);
+    reservedmarshal_VkOffset3D(vkStream, rootType, (VkOffset3D*)(&forMarshaling->srcOffset), ptr);
+    reservedmarshal_VkImageSubresourceLayers(vkStream, rootType, (VkImageSubresourceLayers*)(&forMarshaling->dstSubresource), ptr);
+    reservedmarshal_VkOffset3D(vkStream, rootType, (VkOffset3D*)(&forMarshaling->dstOffset), ptr);
+    reservedmarshal_VkExtent3D(vkStream, rootType, (VkExtent3D*)(&forMarshaling->extent), ptr);
+}
+
+void reservedmarshal_VkResolveImageInfo2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkResolveImageInfo2KHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkImage((*&forMarshaling->srcImage));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (VkImageLayout*)&forMarshaling->srcImageLayout, sizeof(VkImageLayout));
+    *ptr += sizeof(VkImageLayout);
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkImage((*&forMarshaling->dstImage));
+    memcpy(*ptr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (VkImageLayout*)&forMarshaling->dstImageLayout, sizeof(VkImageLayout));
+    *ptr += sizeof(VkImageLayout);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->regionCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)forMarshaling->regionCount; ++i)
+    {
+        reservedmarshal_VkImageResolve2KHR(vkStream, rootType, (const VkImageResolve2KHR*)(forMarshaling->pRegions + i), ptr);
+    }
+}
+
+#endif
+#ifdef VK_ANDROID_native_buffer
+void reservedmarshal_VkNativeBufferANDROID(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkNativeBufferANDROID* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->handle;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->handle)
+    {
+        memcpy(*ptr, (const uint32_t*)forMarshaling->handle, sizeof(const uint32_t));
+        *ptr += sizeof(const uint32_t);
+    }
+    memcpy(*ptr, (int*)&forMarshaling->stride, sizeof(int));
+    *ptr += sizeof(int);
+    memcpy(*ptr, (int*)&forMarshaling->format, sizeof(int));
+    *ptr += sizeof(int);
+    memcpy(*ptr, (int*)&forMarshaling->usage, sizeof(int));
+    *ptr += sizeof(int);
+    memcpy(*ptr, (uint64_t*)&forMarshaling->consumer, sizeof(uint64_t));
+    *ptr += sizeof(uint64_t);
+    memcpy(*ptr, (uint64_t*)&forMarshaling->producer, sizeof(uint64_t));
+    *ptr += sizeof(uint64_t);
+}
+
+#endif
+#ifdef VK_EXT_debug_report
+void reservedmarshal_VkDebugReportCallbackCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDebugReportCallbackCreateInfoEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkDebugReportFlagsEXT*)&forMarshaling->flags, sizeof(VkDebugReportFlagsEXT));
+    *ptr += sizeof(VkDebugReportFlagsEXT);
+    uint64_t cgen_var_0 = (uint64_t)forMarshaling->pfnCallback;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    // WARNING PTR CHECK
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)forMarshaling->pUserData;
+    memcpy((*ptr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pUserData)
+    {
+        memcpy(*ptr, (void*)forMarshaling->pUserData, sizeof(uint8_t));
+        *ptr += sizeof(uint8_t);
+    }
+}
+
+#endif
+#ifdef VK_NV_glsl_shader
+#endif
+#ifdef VK_EXT_depth_range_unrestricted
+#endif
+#ifdef VK_IMG_filter_cubic
+#endif
+#ifdef VK_AMD_rasterization_order
+void reservedmarshal_VkPipelineRasterizationStateRasterizationOrderAMD(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineRasterizationStateRasterizationOrderAMD* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkRasterizationOrderAMD*)&forMarshaling->rasterizationOrder, sizeof(VkRasterizationOrderAMD));
+    *ptr += sizeof(VkRasterizationOrderAMD);
+}
+
+#endif
+#ifdef VK_AMD_shader_trinary_minmax
+#endif
+#ifdef VK_AMD_shader_explicit_vertex_parameter
+#endif
+#ifdef VK_EXT_debug_marker
+void reservedmarshal_VkDebugMarkerObjectNameInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDebugMarkerObjectNameInfoEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkDebugReportObjectTypeEXT*)&forMarshaling->objectType, sizeof(VkDebugReportObjectTypeEXT));
+    *ptr += sizeof(VkDebugReportObjectTypeEXT);
+    memcpy(*ptr, (uint64_t*)&forMarshaling->object, sizeof(uint64_t));
+    *ptr += sizeof(uint64_t);
+    {
+        uint32_t l = forMarshaling->pObjectName ? strlen(forMarshaling->pObjectName): 0;
+        memcpy(*ptr, (uint32_t*)&l, sizeof(uint32_t));
+        android::base::Stream::toBe32((uint8_t*)*ptr);
+        *ptr += sizeof(uint32_t);
+        memcpy(*ptr, (char*)forMarshaling->pObjectName, l);
+        *ptr += l;
+    }
+}
+
+void reservedmarshal_VkDebugMarkerObjectTagInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDebugMarkerObjectTagInfoEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkDebugReportObjectTypeEXT*)&forMarshaling->objectType, sizeof(VkDebugReportObjectTypeEXT));
+    *ptr += sizeof(VkDebugReportObjectTypeEXT);
+    memcpy(*ptr, (uint64_t*)&forMarshaling->object, sizeof(uint64_t));
+    *ptr += sizeof(uint64_t);
+    memcpy(*ptr, (uint64_t*)&forMarshaling->tagName, sizeof(uint64_t));
+    *ptr += sizeof(uint64_t);
+    uint64_t cgen_var_0 = (uint64_t)forMarshaling->tagSize;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    memcpy(*ptr, (const void*)forMarshaling->pTag, forMarshaling->tagSize * sizeof(const uint8_t));
+    *ptr += forMarshaling->tagSize * sizeof(const uint8_t);
+}
+
+void reservedmarshal_VkDebugMarkerMarkerInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDebugMarkerMarkerInfoEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    {
+        uint32_t l = forMarshaling->pMarkerName ? strlen(forMarshaling->pMarkerName): 0;
+        memcpy(*ptr, (uint32_t*)&l, sizeof(uint32_t));
+        android::base::Stream::toBe32((uint8_t*)*ptr);
+        *ptr += sizeof(uint32_t);
+        memcpy(*ptr, (char*)forMarshaling->pMarkerName, l);
+        *ptr += l;
+    }
+    memcpy(*ptr, (float*)forMarshaling->color, 4 * sizeof(float));
+    *ptr += 4 * sizeof(float);
+}
+
+#endif
+#ifdef VK_AMD_gcn_shader
+#endif
+#ifdef VK_NV_dedicated_allocation
+void reservedmarshal_VkDedicatedAllocationImageCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDedicatedAllocationImageCreateInfoNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->dedicatedAllocation, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkDedicatedAllocationBufferCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDedicatedAllocationBufferCreateInfoNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->dedicatedAllocation, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkDedicatedAllocationMemoryAllocateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDedicatedAllocationMemoryAllocateInfoNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkImage((*&forMarshaling->image));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkBuffer((*&forMarshaling->buffer));
+    memcpy(*ptr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *ptr += 1 * 8;
+}
+
+#endif
+#ifdef VK_EXT_transform_feedback
+void reservedmarshal_VkPhysicalDeviceTransformFeedbackFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTransformFeedbackFeaturesEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->transformFeedback, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->geometryStreams, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkPhysicalDeviceTransformFeedbackPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTransformFeedbackPropertiesEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxTransformFeedbackStreams, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxTransformFeedbackBuffers, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->maxTransformFeedbackBufferSize, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxTransformFeedbackStreamDataSize, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxTransformFeedbackBufferDataSize, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxTransformFeedbackBufferDataStride, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->transformFeedbackQueries, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->transformFeedbackStreamsLinesTriangles, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->transformFeedbackRasterizationStreamSelect, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->transformFeedbackDraw, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkPipelineRasterizationStateStreamCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineRasterizationStateStreamCreateInfoEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkPipelineRasterizationStateStreamCreateFlagsEXT*)&forMarshaling->flags, sizeof(VkPipelineRasterizationStateStreamCreateFlagsEXT));
+    *ptr += sizeof(VkPipelineRasterizationStateStreamCreateFlagsEXT);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->rasterizationStream, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+#endif
+#ifdef VK_NVX_image_view_handle
+void reservedmarshal_VkImageViewHandleInfoNVX(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageViewHandleInfoNVX* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkImageView((*&forMarshaling->imageView));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (VkDescriptorType*)&forMarshaling->descriptorType, sizeof(VkDescriptorType));
+    *ptr += sizeof(VkDescriptorType);
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkSampler((*&forMarshaling->sampler));
+    memcpy(*ptr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *ptr += 1 * 8;
+}
+
+void reservedmarshal_VkImageViewAddressPropertiesNVX(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageViewAddressPropertiesNVX* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkDeviceAddress*)&forMarshaling->deviceAddress, sizeof(VkDeviceAddress));
+    *ptr += sizeof(VkDeviceAddress);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->size, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+}
+
+#endif
+#ifdef VK_AMD_draw_indirect_count
+#endif
+#ifdef VK_AMD_negative_viewport_height
+#endif
+#ifdef VK_AMD_gpu_shader_half_float
+#endif
+#ifdef VK_AMD_shader_ballot
+#endif
+#ifdef VK_AMD_texture_gather_bias_lod
+void reservedmarshal_VkTextureLODGatherFormatPropertiesAMD(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkTextureLODGatherFormatPropertiesAMD* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->supportsTextureGatherLODBiasAMD, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_AMD_shader_info
+void reservedmarshal_VkShaderResourceUsageAMD(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkShaderResourceUsageAMD* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->numUsedVgprs, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->numUsedSgprs, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->ldsSizePerLocalWorkGroup, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    uint64_t cgen_var_0 = (uint64_t)forMarshaling->ldsUsageSizeInBytes;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    uint64_t cgen_var_1 = (uint64_t)forMarshaling->scratchMemUsageInBytes;
+    memcpy((*ptr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+}
+
+void reservedmarshal_VkShaderStatisticsInfoAMD(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkShaderStatisticsInfoAMD* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkShaderStageFlags*)&forMarshaling->shaderStageMask, sizeof(VkShaderStageFlags));
+    *ptr += sizeof(VkShaderStageFlags);
+    reservedmarshal_VkShaderResourceUsageAMD(vkStream, rootType, (VkShaderResourceUsageAMD*)(&forMarshaling->resourceUsage), ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->numPhysicalVgprs, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->numPhysicalSgprs, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->numAvailableVgprs, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->numAvailableSgprs, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)forMarshaling->computeWorkGroupSize, 3 * sizeof(uint32_t));
+    *ptr += 3 * sizeof(uint32_t);
+}
+
+#endif
+#ifdef VK_AMD_shader_image_load_store_lod
+#endif
+#ifdef VK_GGP_stream_descriptor_surface
+void reservedmarshal_VkStreamDescriptorSurfaceCreateInfoGGP(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkStreamDescriptorSurfaceCreateInfoGGP* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkStreamDescriptorSurfaceCreateFlagsGGP*)&forMarshaling->flags, sizeof(VkStreamDescriptorSurfaceCreateFlagsGGP));
+    *ptr += sizeof(VkStreamDescriptorSurfaceCreateFlagsGGP);
+    memcpy(*ptr, (GgpStreamDescriptor*)&forMarshaling->streamDescriptor, sizeof(GgpStreamDescriptor));
+    *ptr += sizeof(GgpStreamDescriptor);
+}
+
+#endif
+#ifdef VK_NV_corner_sampled_image
+void reservedmarshal_VkPhysicalDeviceCornerSampledImageFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCornerSampledImageFeaturesNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->cornerSampledImage, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_IMG_format_pvrtc
+#endif
+#ifdef VK_NV_external_memory_capabilities
+void reservedmarshal_VkExternalImageFormatPropertiesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkExternalImageFormatPropertiesNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    reservedmarshal_VkImageFormatProperties(vkStream, rootType, (VkImageFormatProperties*)(&forMarshaling->imageFormatProperties), ptr);
+    memcpy(*ptr, (VkExternalMemoryFeatureFlagsNV*)&forMarshaling->externalMemoryFeatures, sizeof(VkExternalMemoryFeatureFlagsNV));
+    *ptr += sizeof(VkExternalMemoryFeatureFlagsNV);
+    memcpy(*ptr, (VkExternalMemoryHandleTypeFlagsNV*)&forMarshaling->exportFromImportedHandleTypes, sizeof(VkExternalMemoryHandleTypeFlagsNV));
+    *ptr += sizeof(VkExternalMemoryHandleTypeFlagsNV);
+    memcpy(*ptr, (VkExternalMemoryHandleTypeFlagsNV*)&forMarshaling->compatibleHandleTypes, sizeof(VkExternalMemoryHandleTypeFlagsNV));
+    *ptr += sizeof(VkExternalMemoryHandleTypeFlagsNV);
+}
+
+#endif
+#ifdef VK_NV_external_memory
+void reservedmarshal_VkExternalMemoryImageCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkExternalMemoryImageCreateInfoNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkExternalMemoryHandleTypeFlagsNV*)&forMarshaling->handleTypes, sizeof(VkExternalMemoryHandleTypeFlagsNV));
+    *ptr += sizeof(VkExternalMemoryHandleTypeFlagsNV);
+}
+
+void reservedmarshal_VkExportMemoryAllocateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkExportMemoryAllocateInfoNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkExternalMemoryHandleTypeFlagsNV*)&forMarshaling->handleTypes, sizeof(VkExternalMemoryHandleTypeFlagsNV));
+    *ptr += sizeof(VkExternalMemoryHandleTypeFlagsNV);
+}
+
+#endif
+#ifdef VK_NV_external_memory_win32
+void reservedmarshal_VkImportMemoryWin32HandleInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImportMemoryWin32HandleInfoNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkExternalMemoryHandleTypeFlagsNV*)&forMarshaling->handleType, sizeof(VkExternalMemoryHandleTypeFlagsNV));
+    *ptr += sizeof(VkExternalMemoryHandleTypeFlagsNV);
+    memcpy(*ptr, (HANDLE*)&forMarshaling->handle, sizeof(HANDLE));
+    *ptr += sizeof(HANDLE);
+}
+
+void reservedmarshal_VkExportMemoryWin32HandleInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkExportMemoryWin32HandleInfoNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pAttributes;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pAttributes)
+    {
+        memcpy(*ptr, (const SECURITY_ATTRIBUTES*)forMarshaling->pAttributes, sizeof(const SECURITY_ATTRIBUTES));
+        *ptr += sizeof(const SECURITY_ATTRIBUTES);
+    }
+    memcpy(*ptr, (DWORD*)&forMarshaling->dwAccess, sizeof(DWORD));
+    *ptr += sizeof(DWORD);
+}
+
+#endif
+#ifdef VK_NV_win32_keyed_mutex
+void reservedmarshal_VkWin32KeyedMutexAcquireReleaseInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkWin32KeyedMutexAcquireReleaseInfoNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->acquireCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    if (forMarshaling->acquireCount)
+    {
+        uint8_t* cgen_var_0_ptr = (uint8_t*)(*ptr);
+        if (forMarshaling)
+        {
+            for (uint32_t k = 0; k < forMarshaling->acquireCount; ++k)
+            {
+                uint64_t tmpval = get_host_u64_VkDeviceMemory(forMarshaling->pAcquireSyncs[k]);
+                memcpy(cgen_var_0_ptr + k * 8, &tmpval, sizeof(uint64_t));
+            }
+        }
+        *ptr += 8 * forMarshaling->acquireCount;
+    }
+    memcpy(*ptr, (const uint64_t*)forMarshaling->pAcquireKeys, forMarshaling->acquireCount * sizeof(const uint64_t));
+    *ptr += forMarshaling->acquireCount * sizeof(const uint64_t);
+    memcpy(*ptr, (const uint32_t*)forMarshaling->pAcquireTimeoutMilliseconds, forMarshaling->acquireCount * sizeof(const uint32_t));
+    *ptr += forMarshaling->acquireCount * sizeof(const uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->releaseCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    if (forMarshaling->releaseCount)
+    {
+        uint8_t* cgen_var_1_ptr = (uint8_t*)(*ptr);
+        if (forMarshaling)
+        {
+            for (uint32_t k = 0; k < forMarshaling->releaseCount; ++k)
+            {
+                uint64_t tmpval = get_host_u64_VkDeviceMemory(forMarshaling->pReleaseSyncs[k]);
+                memcpy(cgen_var_1_ptr + k * 8, &tmpval, sizeof(uint64_t));
+            }
+        }
+        *ptr += 8 * forMarshaling->releaseCount;
+    }
+    memcpy(*ptr, (const uint64_t*)forMarshaling->pReleaseKeys, forMarshaling->releaseCount * sizeof(const uint64_t));
+    *ptr += forMarshaling->releaseCount * sizeof(const uint64_t);
+}
+
+#endif
+#ifdef VK_EXT_validation_flags
+void reservedmarshal_VkValidationFlagsEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkValidationFlagsEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->disabledValidationCheckCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (const VkValidationCheckEXT*)forMarshaling->pDisabledValidationChecks, forMarshaling->disabledValidationCheckCount * sizeof(const VkValidationCheckEXT));
+    *ptr += forMarshaling->disabledValidationCheckCount * sizeof(const VkValidationCheckEXT);
+}
+
+#endif
+#ifdef VK_NN_vi_surface
+void reservedmarshal_VkViSurfaceCreateInfoNN(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkViSurfaceCreateInfoNN* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkViSurfaceCreateFlagsNN*)&forMarshaling->flags, sizeof(VkViSurfaceCreateFlagsNN));
+    *ptr += sizeof(VkViSurfaceCreateFlagsNN);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->window;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->window)
+    {
+        memcpy(*ptr, (void*)forMarshaling->window, sizeof(uint8_t));
+        *ptr += sizeof(uint8_t);
+    }
+}
+
+#endif
+#ifdef VK_EXT_shader_subgroup_ballot
+#endif
+#ifdef VK_EXT_shader_subgroup_vote
+#endif
+#ifdef VK_EXT_texture_compression_astc_hdr
+void reservedmarshal_VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->textureCompressionASTC_HDR, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_EXT_astc_decode_mode
+void reservedmarshal_VkImageViewASTCDecodeModeEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageViewASTCDecodeModeEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkFormat*)&forMarshaling->decodeMode, sizeof(VkFormat));
+    *ptr += sizeof(VkFormat);
+}
+
+void reservedmarshal_VkPhysicalDeviceASTCDecodeFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceASTCDecodeFeaturesEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->decodeModeSharedExponent, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_EXT_conditional_rendering
+void reservedmarshal_VkConditionalRenderingBeginInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkConditionalRenderingBeginInfoEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkBuffer((*&forMarshaling->buffer));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->offset, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    memcpy(*ptr, (VkConditionalRenderingFlagsEXT*)&forMarshaling->flags, sizeof(VkConditionalRenderingFlagsEXT));
+    *ptr += sizeof(VkConditionalRenderingFlagsEXT);
+}
+
+void reservedmarshal_VkPhysicalDeviceConditionalRenderingFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceConditionalRenderingFeaturesEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->conditionalRendering, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->inheritedConditionalRendering, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkCommandBufferInheritanceConditionalRenderingInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCommandBufferInheritanceConditionalRenderingInfoEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->conditionalRenderingEnable, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_NV_clip_space_w_scaling
+void reservedmarshal_VkViewportWScalingNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkViewportWScalingNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (float*)&forMarshaling->xcoeff, sizeof(float));
+    *ptr += sizeof(float);
+    memcpy(*ptr, (float*)&forMarshaling->ycoeff, sizeof(float));
+    *ptr += sizeof(float);
+}
+
+void reservedmarshal_VkPipelineViewportWScalingStateCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineViewportWScalingStateCreateInfoNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->viewportWScalingEnable, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->viewportCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pViewportWScalings;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pViewportWScalings)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->viewportCount; ++i)
+        {
+            reservedmarshal_VkViewportWScalingNV(vkStream, rootType, (const VkViewportWScalingNV*)(forMarshaling->pViewportWScalings + i), ptr);
+        }
+    }
+}
+
+#endif
+#ifdef VK_EXT_direct_mode_display
+#endif
+#ifdef VK_EXT_acquire_xlib_display
+#endif
+#ifdef VK_EXT_display_surface_counter
+void reservedmarshal_VkSurfaceCapabilities2EXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSurfaceCapabilities2EXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->minImageCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxImageCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    reservedmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->currentExtent), ptr);
+    reservedmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->minImageExtent), ptr);
+    reservedmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->maxImageExtent), ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxImageArrayLayers, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (VkSurfaceTransformFlagsKHR*)&forMarshaling->supportedTransforms, sizeof(VkSurfaceTransformFlagsKHR));
+    *ptr += sizeof(VkSurfaceTransformFlagsKHR);
+    memcpy(*ptr, (VkSurfaceTransformFlagBitsKHR*)&forMarshaling->currentTransform, sizeof(VkSurfaceTransformFlagBitsKHR));
+    *ptr += sizeof(VkSurfaceTransformFlagBitsKHR);
+    memcpy(*ptr, (VkCompositeAlphaFlagsKHR*)&forMarshaling->supportedCompositeAlpha, sizeof(VkCompositeAlphaFlagsKHR));
+    *ptr += sizeof(VkCompositeAlphaFlagsKHR);
+    memcpy(*ptr, (VkImageUsageFlags*)&forMarshaling->supportedUsageFlags, sizeof(VkImageUsageFlags));
+    *ptr += sizeof(VkImageUsageFlags);
+    memcpy(*ptr, (VkSurfaceCounterFlagsEXT*)&forMarshaling->supportedSurfaceCounters, sizeof(VkSurfaceCounterFlagsEXT));
+    *ptr += sizeof(VkSurfaceCounterFlagsEXT);
+}
+
+#endif
+#ifdef VK_EXT_display_control
+void reservedmarshal_VkDisplayPowerInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDisplayPowerInfoEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkDisplayPowerStateEXT*)&forMarshaling->powerState, sizeof(VkDisplayPowerStateEXT));
+    *ptr += sizeof(VkDisplayPowerStateEXT);
+}
+
+void reservedmarshal_VkDeviceEventInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceEventInfoEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkDeviceEventTypeEXT*)&forMarshaling->deviceEvent, sizeof(VkDeviceEventTypeEXT));
+    *ptr += sizeof(VkDeviceEventTypeEXT);
+}
+
+void reservedmarshal_VkDisplayEventInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDisplayEventInfoEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkDisplayEventTypeEXT*)&forMarshaling->displayEvent, sizeof(VkDisplayEventTypeEXT));
+    *ptr += sizeof(VkDisplayEventTypeEXT);
+}
+
+void reservedmarshal_VkSwapchainCounterCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSwapchainCounterCreateInfoEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkSurfaceCounterFlagsEXT*)&forMarshaling->surfaceCounters, sizeof(VkSurfaceCounterFlagsEXT));
+    *ptr += sizeof(VkSurfaceCounterFlagsEXT);
+}
+
+#endif
+#ifdef VK_GOOGLE_display_timing
+void reservedmarshal_VkRefreshCycleDurationGOOGLE(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRefreshCycleDurationGOOGLE* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (uint64_t*)&forMarshaling->refreshDuration, sizeof(uint64_t));
+    *ptr += sizeof(uint64_t);
+}
+
+void reservedmarshal_VkPastPresentationTimingGOOGLE(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPastPresentationTimingGOOGLE* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->presentID, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint64_t*)&forMarshaling->desiredPresentTime, sizeof(uint64_t));
+    *ptr += sizeof(uint64_t);
+    memcpy(*ptr, (uint64_t*)&forMarshaling->actualPresentTime, sizeof(uint64_t));
+    *ptr += sizeof(uint64_t);
+    memcpy(*ptr, (uint64_t*)&forMarshaling->earliestPresentTime, sizeof(uint64_t));
+    *ptr += sizeof(uint64_t);
+    memcpy(*ptr, (uint64_t*)&forMarshaling->presentMargin, sizeof(uint64_t));
+    *ptr += sizeof(uint64_t);
+}
+
+void reservedmarshal_VkPresentTimeGOOGLE(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPresentTimeGOOGLE* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->presentID, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint64_t*)&forMarshaling->desiredPresentTime, sizeof(uint64_t));
+    *ptr += sizeof(uint64_t);
+}
+
+void reservedmarshal_VkPresentTimesInfoGOOGLE(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPresentTimesInfoGOOGLE* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->swapchainCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pTimes;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pTimes)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->swapchainCount; ++i)
+        {
+            reservedmarshal_VkPresentTimeGOOGLE(vkStream, rootType, (const VkPresentTimeGOOGLE*)(forMarshaling->pTimes + i), ptr);
+        }
+    }
+}
+
+#endif
+#ifdef VK_NV_sample_mask_override_coverage
+#endif
+#ifdef VK_NV_geometry_shader_passthrough
+#endif
+#ifdef VK_NV_viewport_array2
+#endif
+#ifdef VK_NVX_multiview_per_view_attributes
+void reservedmarshal_VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->perViewPositionAllComponents, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_NV_viewport_swizzle
+void reservedmarshal_VkViewportSwizzleNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkViewportSwizzleNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkViewportCoordinateSwizzleNV*)&forMarshaling->x, sizeof(VkViewportCoordinateSwizzleNV));
+    *ptr += sizeof(VkViewportCoordinateSwizzleNV);
+    memcpy(*ptr, (VkViewportCoordinateSwizzleNV*)&forMarshaling->y, sizeof(VkViewportCoordinateSwizzleNV));
+    *ptr += sizeof(VkViewportCoordinateSwizzleNV);
+    memcpy(*ptr, (VkViewportCoordinateSwizzleNV*)&forMarshaling->z, sizeof(VkViewportCoordinateSwizzleNV));
+    *ptr += sizeof(VkViewportCoordinateSwizzleNV);
+    memcpy(*ptr, (VkViewportCoordinateSwizzleNV*)&forMarshaling->w, sizeof(VkViewportCoordinateSwizzleNV));
+    *ptr += sizeof(VkViewportCoordinateSwizzleNV);
+}
+
+void reservedmarshal_VkPipelineViewportSwizzleStateCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineViewportSwizzleStateCreateInfoNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkPipelineViewportSwizzleStateCreateFlagsNV*)&forMarshaling->flags, sizeof(VkPipelineViewportSwizzleStateCreateFlagsNV));
+    *ptr += sizeof(VkPipelineViewportSwizzleStateCreateFlagsNV);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->viewportCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pViewportSwizzles;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pViewportSwizzles)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->viewportCount; ++i)
+        {
+            reservedmarshal_VkViewportSwizzleNV(vkStream, rootType, (const VkViewportSwizzleNV*)(forMarshaling->pViewportSwizzles + i), ptr);
+        }
+    }
+}
+
+#endif
+#ifdef VK_EXT_discard_rectangles
+void reservedmarshal_VkPhysicalDeviceDiscardRectanglePropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDiscardRectanglePropertiesEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxDiscardRectangles, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkPipelineDiscardRectangleStateCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineDiscardRectangleStateCreateInfoEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkPipelineDiscardRectangleStateCreateFlagsEXT*)&forMarshaling->flags, sizeof(VkPipelineDiscardRectangleStateCreateFlagsEXT));
+    *ptr += sizeof(VkPipelineDiscardRectangleStateCreateFlagsEXT);
+    memcpy(*ptr, (VkDiscardRectangleModeEXT*)&forMarshaling->discardRectangleMode, sizeof(VkDiscardRectangleModeEXT));
+    *ptr += sizeof(VkDiscardRectangleModeEXT);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->discardRectangleCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pDiscardRectangles;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pDiscardRectangles)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->discardRectangleCount; ++i)
+        {
+            reservedmarshal_VkRect2D(vkStream, rootType, (const VkRect2D*)(forMarshaling->pDiscardRectangles + i), ptr);
+        }
+    }
+}
+
+#endif
+#ifdef VK_EXT_conservative_rasterization
+void reservedmarshal_VkPhysicalDeviceConservativeRasterizationPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceConservativeRasterizationPropertiesEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (float*)&forMarshaling->primitiveOverestimationSize, sizeof(float));
+    *ptr += sizeof(float);
+    memcpy(*ptr, (float*)&forMarshaling->maxExtraPrimitiveOverestimationSize, sizeof(float));
+    *ptr += sizeof(float);
+    memcpy(*ptr, (float*)&forMarshaling->extraPrimitiveOverestimationSizeGranularity, sizeof(float));
+    *ptr += sizeof(float);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->primitiveUnderestimation, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->conservativePointAndLineRasterization, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->degenerateTrianglesRasterized, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->degenerateLinesRasterized, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->fullyCoveredFragmentShaderInputVariable, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->conservativeRasterizationPostDepthCoverage, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkPipelineRasterizationConservativeStateCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineRasterizationConservativeStateCreateInfoEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkPipelineRasterizationConservativeStateCreateFlagsEXT*)&forMarshaling->flags, sizeof(VkPipelineRasterizationConservativeStateCreateFlagsEXT));
+    *ptr += sizeof(VkPipelineRasterizationConservativeStateCreateFlagsEXT);
+    memcpy(*ptr, (VkConservativeRasterizationModeEXT*)&forMarshaling->conservativeRasterizationMode, sizeof(VkConservativeRasterizationModeEXT));
+    *ptr += sizeof(VkConservativeRasterizationModeEXT);
+    memcpy(*ptr, (float*)&forMarshaling->extraPrimitiveOverestimationSize, sizeof(float));
+    *ptr += sizeof(float);
+}
+
+#endif
+#ifdef VK_EXT_depth_clip_enable
+void reservedmarshal_VkPhysicalDeviceDepthClipEnableFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDepthClipEnableFeaturesEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->depthClipEnable, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkPipelineRasterizationDepthClipStateCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineRasterizationDepthClipStateCreateInfoEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkPipelineRasterizationDepthClipStateCreateFlagsEXT*)&forMarshaling->flags, sizeof(VkPipelineRasterizationDepthClipStateCreateFlagsEXT));
+    *ptr += sizeof(VkPipelineRasterizationDepthClipStateCreateFlagsEXT);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->depthClipEnable, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_EXT_swapchain_colorspace
+#endif
+#ifdef VK_EXT_hdr_metadata
+void reservedmarshal_VkXYColorEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkXYColorEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (float*)&forMarshaling->x, sizeof(float));
+    *ptr += sizeof(float);
+    memcpy(*ptr, (float*)&forMarshaling->y, sizeof(float));
+    *ptr += sizeof(float);
+}
+
+void reservedmarshal_VkHdrMetadataEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkHdrMetadataEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    reservedmarshal_VkXYColorEXT(vkStream, rootType, (VkXYColorEXT*)(&forMarshaling->displayPrimaryRed), ptr);
+    reservedmarshal_VkXYColorEXT(vkStream, rootType, (VkXYColorEXT*)(&forMarshaling->displayPrimaryGreen), ptr);
+    reservedmarshal_VkXYColorEXT(vkStream, rootType, (VkXYColorEXT*)(&forMarshaling->displayPrimaryBlue), ptr);
+    reservedmarshal_VkXYColorEXT(vkStream, rootType, (VkXYColorEXT*)(&forMarshaling->whitePoint), ptr);
+    memcpy(*ptr, (float*)&forMarshaling->maxLuminance, sizeof(float));
+    *ptr += sizeof(float);
+    memcpy(*ptr, (float*)&forMarshaling->minLuminance, sizeof(float));
+    *ptr += sizeof(float);
+    memcpy(*ptr, (float*)&forMarshaling->maxContentLightLevel, sizeof(float));
+    *ptr += sizeof(float);
+    memcpy(*ptr, (float*)&forMarshaling->maxFrameAverageLightLevel, sizeof(float));
+    *ptr += sizeof(float);
+}
+
+#endif
+#ifdef VK_MVK_ios_surface
+void reservedmarshal_VkIOSSurfaceCreateInfoMVK(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkIOSSurfaceCreateInfoMVK* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkIOSSurfaceCreateFlagsMVK*)&forMarshaling->flags, sizeof(VkIOSSurfaceCreateFlagsMVK));
+    *ptr += sizeof(VkIOSSurfaceCreateFlagsMVK);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pView;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pView)
+    {
+        memcpy(*ptr, (const void*)forMarshaling->pView, sizeof(const uint8_t));
+        *ptr += sizeof(const uint8_t);
+    }
+}
+
+#endif
+#ifdef VK_MVK_macos_surface
+void reservedmarshal_VkMacOSSurfaceCreateInfoMVK(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMacOSSurfaceCreateInfoMVK* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkMacOSSurfaceCreateFlagsMVK*)&forMarshaling->flags, sizeof(VkMacOSSurfaceCreateFlagsMVK));
+    *ptr += sizeof(VkMacOSSurfaceCreateFlagsMVK);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pView;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pView)
+    {
+        memcpy(*ptr, (const void*)forMarshaling->pView, sizeof(const uint8_t));
+        *ptr += sizeof(const uint8_t);
+    }
+}
+
+#endif
+#ifdef VK_MVK_moltenvk
+#endif
+#ifdef VK_EXT_external_memory_dma_buf
+#endif
+#ifdef VK_EXT_queue_family_foreign
+#endif
+#ifdef VK_EXT_debug_utils
+void reservedmarshal_VkDebugUtilsLabelEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDebugUtilsLabelEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    {
+        uint32_t l = forMarshaling->pLabelName ? strlen(forMarshaling->pLabelName): 0;
+        memcpy(*ptr, (uint32_t*)&l, sizeof(uint32_t));
+        android::base::Stream::toBe32((uint8_t*)*ptr);
+        *ptr += sizeof(uint32_t);
+        memcpy(*ptr, (char*)forMarshaling->pLabelName, l);
+        *ptr += l;
+    }
+    memcpy(*ptr, (float*)forMarshaling->color, 4 * sizeof(float));
+    *ptr += 4 * sizeof(float);
+}
+
+void reservedmarshal_VkDebugUtilsObjectNameInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDebugUtilsObjectNameInfoEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkObjectType*)&forMarshaling->objectType, sizeof(VkObjectType));
+    *ptr += sizeof(VkObjectType);
+    memcpy(*ptr, (uint64_t*)&forMarshaling->objectHandle, sizeof(uint64_t));
+    *ptr += sizeof(uint64_t);
+    if (vkStream->getFeatureBits() & VULKAN_STREAM_FEATURE_NULL_OPTIONAL_STRINGS_BIT)
+    {
+        // WARNING PTR CHECK
+        uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pObjectName;
+        memcpy((*ptr), &cgen_var_0, 8);
+        android::base::Stream::toBe64((uint8_t*)(*ptr));
+        *ptr += 8;
+        if (forMarshaling->pObjectName)
+        {
+            {
+                uint32_t l = forMarshaling->pObjectName ? strlen(forMarshaling->pObjectName): 0;
+                memcpy(*ptr, (uint32_t*)&l, sizeof(uint32_t));
+                android::base::Stream::toBe32((uint8_t*)*ptr);
+                *ptr += sizeof(uint32_t);
+                memcpy(*ptr, (char*)forMarshaling->pObjectName, l);
+                *ptr += l;
+            }
+        }
+    }
+    else
+    {
+        {
+            uint32_t l = forMarshaling->pObjectName ? strlen(forMarshaling->pObjectName): 0;
+            memcpy(*ptr, (uint32_t*)&l, sizeof(uint32_t));
+            android::base::Stream::toBe32((uint8_t*)*ptr);
+            *ptr += sizeof(uint32_t);
+            memcpy(*ptr, (char*)forMarshaling->pObjectName, l);
+            *ptr += l;
+        }
+    }
+}
+
+void reservedmarshal_VkDebugUtilsMessengerCallbackDataEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDebugUtilsMessengerCallbackDataEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkDebugUtilsMessengerCallbackDataFlagsEXT*)&forMarshaling->flags, sizeof(VkDebugUtilsMessengerCallbackDataFlagsEXT));
+    *ptr += sizeof(VkDebugUtilsMessengerCallbackDataFlagsEXT);
+    if (vkStream->getFeatureBits() & VULKAN_STREAM_FEATURE_NULL_OPTIONAL_STRINGS_BIT)
+    {
+        // WARNING PTR CHECK
+        uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pMessageIdName;
+        memcpy((*ptr), &cgen_var_0, 8);
+        android::base::Stream::toBe64((uint8_t*)(*ptr));
+        *ptr += 8;
+        if (forMarshaling->pMessageIdName)
+        {
+            {
+                uint32_t l = forMarshaling->pMessageIdName ? strlen(forMarshaling->pMessageIdName): 0;
+                memcpy(*ptr, (uint32_t*)&l, sizeof(uint32_t));
+                android::base::Stream::toBe32((uint8_t*)*ptr);
+                *ptr += sizeof(uint32_t);
+                memcpy(*ptr, (char*)forMarshaling->pMessageIdName, l);
+                *ptr += l;
+            }
+        }
+    }
+    else
+    {
+        {
+            uint32_t l = forMarshaling->pMessageIdName ? strlen(forMarshaling->pMessageIdName): 0;
+            memcpy(*ptr, (uint32_t*)&l, sizeof(uint32_t));
+            android::base::Stream::toBe32((uint8_t*)*ptr);
+            *ptr += sizeof(uint32_t);
+            memcpy(*ptr, (char*)forMarshaling->pMessageIdName, l);
+            *ptr += l;
+        }
+    }
+    memcpy(*ptr, (int32_t*)&forMarshaling->messageIdNumber, sizeof(int32_t));
+    *ptr += sizeof(int32_t);
+    {
+        uint32_t l = forMarshaling->pMessage ? strlen(forMarshaling->pMessage): 0;
+        memcpy(*ptr, (uint32_t*)&l, sizeof(uint32_t));
+        android::base::Stream::toBe32((uint8_t*)*ptr);
+        *ptr += sizeof(uint32_t);
+        memcpy(*ptr, (char*)forMarshaling->pMessage, l);
+        *ptr += l;
+    }
+    memcpy(*ptr, (uint32_t*)&forMarshaling->queueLabelCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pQueueLabels;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pQueueLabels)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->queueLabelCount; ++i)
+        {
+            reservedmarshal_VkDebugUtilsLabelEXT(vkStream, rootType, (VkDebugUtilsLabelEXT*)(forMarshaling->pQueueLabels + i), ptr);
+        }
+    }
+    memcpy(*ptr, (uint32_t*)&forMarshaling->cmdBufLabelCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)forMarshaling->pCmdBufLabels;
+    memcpy((*ptr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pCmdBufLabels)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->cmdBufLabelCount; ++i)
+        {
+            reservedmarshal_VkDebugUtilsLabelEXT(vkStream, rootType, (VkDebugUtilsLabelEXT*)(forMarshaling->pCmdBufLabels + i), ptr);
+        }
+    }
+    memcpy(*ptr, (uint32_t*)&forMarshaling->objectCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)forMarshaling->pObjects;
+    memcpy((*ptr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pObjects)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->objectCount; ++i)
+        {
+            reservedmarshal_VkDebugUtilsObjectNameInfoEXT(vkStream, rootType, (VkDebugUtilsObjectNameInfoEXT*)(forMarshaling->pObjects + i), ptr);
+        }
+    }
+}
+
+void reservedmarshal_VkDebugUtilsMessengerCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDebugUtilsMessengerCreateInfoEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkDebugUtilsMessengerCreateFlagsEXT*)&forMarshaling->flags, sizeof(VkDebugUtilsMessengerCreateFlagsEXT));
+    *ptr += sizeof(VkDebugUtilsMessengerCreateFlagsEXT);
+    memcpy(*ptr, (VkDebugUtilsMessageSeverityFlagsEXT*)&forMarshaling->messageSeverity, sizeof(VkDebugUtilsMessageSeverityFlagsEXT));
+    *ptr += sizeof(VkDebugUtilsMessageSeverityFlagsEXT);
+    memcpy(*ptr, (VkDebugUtilsMessageTypeFlagsEXT*)&forMarshaling->messageType, sizeof(VkDebugUtilsMessageTypeFlagsEXT));
+    *ptr += sizeof(VkDebugUtilsMessageTypeFlagsEXT);
+    uint64_t cgen_var_0 = (uint64_t)forMarshaling->pfnUserCallback;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    // WARNING PTR CHECK
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)forMarshaling->pUserData;
+    memcpy((*ptr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pUserData)
+    {
+        memcpy(*ptr, (void*)forMarshaling->pUserData, sizeof(uint8_t));
+        *ptr += sizeof(uint8_t);
+    }
+}
+
+void reservedmarshal_VkDebugUtilsObjectTagInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDebugUtilsObjectTagInfoEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkObjectType*)&forMarshaling->objectType, sizeof(VkObjectType));
+    *ptr += sizeof(VkObjectType);
+    memcpy(*ptr, (uint64_t*)&forMarshaling->objectHandle, sizeof(uint64_t));
+    *ptr += sizeof(uint64_t);
+    memcpy(*ptr, (uint64_t*)&forMarshaling->tagName, sizeof(uint64_t));
+    *ptr += sizeof(uint64_t);
+    uint64_t cgen_var_0 = (uint64_t)forMarshaling->tagSize;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    memcpy(*ptr, (const void*)forMarshaling->pTag, forMarshaling->tagSize * sizeof(const uint8_t));
+    *ptr += forMarshaling->tagSize * sizeof(const uint8_t);
+}
+
+#endif
+#ifdef VK_ANDROID_external_memory_android_hardware_buffer
+void reservedmarshal_VkAndroidHardwareBufferUsageANDROID(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAndroidHardwareBufferUsageANDROID* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint64_t*)&forMarshaling->androidHardwareBufferUsage, sizeof(uint64_t));
+    *ptr += sizeof(uint64_t);
+}
+
+void reservedmarshal_VkAndroidHardwareBufferPropertiesANDROID(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAndroidHardwareBufferPropertiesANDROID* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->allocationSize, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->memoryTypeBits, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkAndroidHardwareBufferFormatPropertiesANDROID(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAndroidHardwareBufferFormatPropertiesANDROID* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkFormat*)&forMarshaling->format, sizeof(VkFormat));
+    *ptr += sizeof(VkFormat);
+    memcpy(*ptr, (uint64_t*)&forMarshaling->externalFormat, sizeof(uint64_t));
+    *ptr += sizeof(uint64_t);
+    memcpy(*ptr, (VkFormatFeatureFlags*)&forMarshaling->formatFeatures, sizeof(VkFormatFeatureFlags));
+    *ptr += sizeof(VkFormatFeatureFlags);
+    reservedmarshal_VkComponentMapping(vkStream, rootType, (VkComponentMapping*)(&forMarshaling->samplerYcbcrConversionComponents), ptr);
+    memcpy(*ptr, (VkSamplerYcbcrModelConversion*)&forMarshaling->suggestedYcbcrModel, sizeof(VkSamplerYcbcrModelConversion));
+    *ptr += sizeof(VkSamplerYcbcrModelConversion);
+    memcpy(*ptr, (VkSamplerYcbcrRange*)&forMarshaling->suggestedYcbcrRange, sizeof(VkSamplerYcbcrRange));
+    *ptr += sizeof(VkSamplerYcbcrRange);
+    memcpy(*ptr, (VkChromaLocation*)&forMarshaling->suggestedXChromaOffset, sizeof(VkChromaLocation));
+    *ptr += sizeof(VkChromaLocation);
+    memcpy(*ptr, (VkChromaLocation*)&forMarshaling->suggestedYChromaOffset, sizeof(VkChromaLocation));
+    *ptr += sizeof(VkChromaLocation);
+}
+
+void reservedmarshal_VkImportAndroidHardwareBufferInfoANDROID(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImportAndroidHardwareBufferInfoANDROID* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (AHardwareBuffer*)forMarshaling->buffer, sizeof(AHardwareBuffer));
+    *ptr += sizeof(AHardwareBuffer);
+}
+
+void reservedmarshal_VkMemoryGetAndroidHardwareBufferInfoANDROID(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMemoryGetAndroidHardwareBufferInfoANDROID* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkDeviceMemory((*&forMarshaling->memory));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+}
+
+void reservedmarshal_VkExternalFormatANDROID(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkExternalFormatANDROID* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint64_t*)&forMarshaling->externalFormat, sizeof(uint64_t));
+    *ptr += sizeof(uint64_t);
+}
+
+#endif
+#ifdef VK_EXT_sampler_filter_minmax
+#endif
+#ifdef VK_AMD_gpu_shader_int16
+#endif
+#ifdef VK_AMD_mixed_attachment_samples
+#endif
+#ifdef VK_AMD_shader_fragment_mask
+#endif
+#ifdef VK_EXT_inline_uniform_block
+void reservedmarshal_VkPhysicalDeviceInlineUniformBlockFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceInlineUniformBlockFeaturesEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->inlineUniformBlock, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->descriptorBindingInlineUniformBlockUpdateAfterBind, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkPhysicalDeviceInlineUniformBlockPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceInlineUniformBlockPropertiesEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxInlineUniformBlockSize, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxPerStageDescriptorInlineUniformBlocks, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxDescriptorSetInlineUniformBlocks, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxDescriptorSetUpdateAfterBindInlineUniformBlocks, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkWriteDescriptorSetInlineUniformBlockEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkWriteDescriptorSetInlineUniformBlockEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->dataSize, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (const void*)forMarshaling->pData, forMarshaling->dataSize * sizeof(const uint8_t));
+    *ptr += forMarshaling->dataSize * sizeof(const uint8_t);
+}
+
+void reservedmarshal_VkDescriptorPoolInlineUniformBlockCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDescriptorPoolInlineUniformBlockCreateInfoEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxInlineUniformBlockBindings, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+#endif
+#ifdef VK_EXT_shader_stencil_export
+#endif
+#ifdef VK_EXT_sample_locations
+void reservedmarshal_VkSampleLocationEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSampleLocationEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (float*)&forMarshaling->x, sizeof(float));
+    *ptr += sizeof(float);
+    memcpy(*ptr, (float*)&forMarshaling->y, sizeof(float));
+    *ptr += sizeof(float);
+}
+
+void reservedmarshal_VkSampleLocationsInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSampleLocationsInfoEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkSampleCountFlagBits*)&forMarshaling->sampleLocationsPerPixel, sizeof(VkSampleCountFlagBits));
+    *ptr += sizeof(VkSampleCountFlagBits);
+    reservedmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->sampleLocationGridSize), ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->sampleLocationsCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)forMarshaling->sampleLocationsCount; ++i)
+    {
+        reservedmarshal_VkSampleLocationEXT(vkStream, rootType, (const VkSampleLocationEXT*)(forMarshaling->pSampleLocations + i), ptr);
+    }
+}
+
+void reservedmarshal_VkAttachmentSampleLocationsEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAttachmentSampleLocationsEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->attachmentIndex, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    reservedmarshal_VkSampleLocationsInfoEXT(vkStream, rootType, (VkSampleLocationsInfoEXT*)(&forMarshaling->sampleLocationsInfo), ptr);
+}
+
+void reservedmarshal_VkSubpassSampleLocationsEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSubpassSampleLocationsEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->subpassIndex, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    reservedmarshal_VkSampleLocationsInfoEXT(vkStream, rootType, (VkSampleLocationsInfoEXT*)(&forMarshaling->sampleLocationsInfo), ptr);
+}
+
+void reservedmarshal_VkRenderPassSampleLocationsBeginInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRenderPassSampleLocationsBeginInfoEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->attachmentInitialSampleLocationsCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)forMarshaling->attachmentInitialSampleLocationsCount; ++i)
+    {
+        reservedmarshal_VkAttachmentSampleLocationsEXT(vkStream, rootType, (const VkAttachmentSampleLocationsEXT*)(forMarshaling->pAttachmentInitialSampleLocations + i), ptr);
+    }
+    memcpy(*ptr, (uint32_t*)&forMarshaling->postSubpassSampleLocationsCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)forMarshaling->postSubpassSampleLocationsCount; ++i)
+    {
+        reservedmarshal_VkSubpassSampleLocationsEXT(vkStream, rootType, (const VkSubpassSampleLocationsEXT*)(forMarshaling->pPostSubpassSampleLocations + i), ptr);
+    }
+}
+
+void reservedmarshal_VkPipelineSampleLocationsStateCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineSampleLocationsStateCreateInfoEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->sampleLocationsEnable, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    reservedmarshal_VkSampleLocationsInfoEXT(vkStream, rootType, (VkSampleLocationsInfoEXT*)(&forMarshaling->sampleLocationsInfo), ptr);
+}
+
+void reservedmarshal_VkPhysicalDeviceSampleLocationsPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSampleLocationsPropertiesEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkSampleCountFlags*)&forMarshaling->sampleLocationSampleCounts, sizeof(VkSampleCountFlags));
+    *ptr += sizeof(VkSampleCountFlags);
+    reservedmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->maxSampleLocationGridSize), ptr);
+    memcpy(*ptr, (float*)forMarshaling->sampleLocationCoordinateRange, 2 * sizeof(float));
+    *ptr += 2 * sizeof(float);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->sampleLocationSubPixelBits, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->variableSampleLocations, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkMultisamplePropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMultisamplePropertiesEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    reservedmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->maxSampleLocationGridSize), ptr);
+}
+
+#endif
+#ifdef VK_EXT_blend_operation_advanced
+void reservedmarshal_VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->advancedBlendCoherentOperations, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->advancedBlendMaxColorAttachments, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->advancedBlendIndependentBlend, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->advancedBlendNonPremultipliedSrcColor, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->advancedBlendNonPremultipliedDstColor, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->advancedBlendCorrelatedOverlap, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->advancedBlendAllOperations, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkPipelineColorBlendAdvancedStateCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineColorBlendAdvancedStateCreateInfoEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->srcPremultiplied, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->dstPremultiplied, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBlendOverlapEXT*)&forMarshaling->blendOverlap, sizeof(VkBlendOverlapEXT));
+    *ptr += sizeof(VkBlendOverlapEXT);
+}
+
+#endif
+#ifdef VK_NV_fragment_coverage_to_color
+void reservedmarshal_VkPipelineCoverageToColorStateCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineCoverageToColorStateCreateInfoNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkPipelineCoverageToColorStateCreateFlagsNV*)&forMarshaling->flags, sizeof(VkPipelineCoverageToColorStateCreateFlagsNV));
+    *ptr += sizeof(VkPipelineCoverageToColorStateCreateFlagsNV);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->coverageToColorEnable, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->coverageToColorLocation, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+#endif
+#ifdef VK_NV_framebuffer_mixed_samples
+void reservedmarshal_VkPipelineCoverageModulationStateCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineCoverageModulationStateCreateInfoNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkPipelineCoverageModulationStateCreateFlagsNV*)&forMarshaling->flags, sizeof(VkPipelineCoverageModulationStateCreateFlagsNV));
+    *ptr += sizeof(VkPipelineCoverageModulationStateCreateFlagsNV);
+    memcpy(*ptr, (VkCoverageModulationModeNV*)&forMarshaling->coverageModulationMode, sizeof(VkCoverageModulationModeNV));
+    *ptr += sizeof(VkCoverageModulationModeNV);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->coverageModulationTableEnable, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->coverageModulationTableCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pCoverageModulationTable;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pCoverageModulationTable)
+    {
+        memcpy(*ptr, (const float*)forMarshaling->pCoverageModulationTable, forMarshaling->coverageModulationTableCount * sizeof(const float));
+        *ptr += forMarshaling->coverageModulationTableCount * sizeof(const float);
+    }
+}
+
+#endif
+#ifdef VK_NV_fill_rectangle
+#endif
+#ifdef VK_NV_shader_sm_builtins
+void reservedmarshal_VkPhysicalDeviceShaderSMBuiltinsPropertiesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderSMBuiltinsPropertiesNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->shaderSMCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->shaderWarpsPerSM, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkPhysicalDeviceShaderSMBuiltinsFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderSMBuiltinsFeaturesNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderSMBuiltins, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_EXT_post_depth_coverage
+#endif
+#ifdef VK_EXT_image_drm_format_modifier
+void reservedmarshal_VkDrmFormatModifierPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDrmFormatModifierPropertiesEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (uint64_t*)&forMarshaling->drmFormatModifier, sizeof(uint64_t));
+    *ptr += sizeof(uint64_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->drmFormatModifierPlaneCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (VkFormatFeatureFlags*)&forMarshaling->drmFormatModifierTilingFeatures, sizeof(VkFormatFeatureFlags));
+    *ptr += sizeof(VkFormatFeatureFlags);
+}
+
+void reservedmarshal_VkDrmFormatModifierPropertiesListEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDrmFormatModifierPropertiesListEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->drmFormatModifierCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pDrmFormatModifierProperties;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pDrmFormatModifierProperties)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->drmFormatModifierCount; ++i)
+        {
+            reservedmarshal_VkDrmFormatModifierPropertiesEXT(vkStream, rootType, (VkDrmFormatModifierPropertiesEXT*)(forMarshaling->pDrmFormatModifierProperties + i), ptr);
+        }
+    }
+}
+
+void reservedmarshal_VkPhysicalDeviceImageDrmFormatModifierInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceImageDrmFormatModifierInfoEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint64_t*)&forMarshaling->drmFormatModifier, sizeof(uint64_t));
+    *ptr += sizeof(uint64_t);
+    memcpy(*ptr, (VkSharingMode*)&forMarshaling->sharingMode, sizeof(VkSharingMode));
+    *ptr += sizeof(VkSharingMode);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->queueFamilyIndexCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pQueueFamilyIndices;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pQueueFamilyIndices)
+    {
+        memcpy(*ptr, (const uint32_t*)forMarshaling->pQueueFamilyIndices, forMarshaling->queueFamilyIndexCount * sizeof(const uint32_t));
+        *ptr += forMarshaling->queueFamilyIndexCount * sizeof(const uint32_t);
+    }
+}
+
+void reservedmarshal_VkImageDrmFormatModifierListCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageDrmFormatModifierListCreateInfoEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->drmFormatModifierCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (const uint64_t*)forMarshaling->pDrmFormatModifiers, forMarshaling->drmFormatModifierCount * sizeof(const uint64_t));
+    *ptr += forMarshaling->drmFormatModifierCount * sizeof(const uint64_t);
+}
+
+void reservedmarshal_VkImageDrmFormatModifierExplicitCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageDrmFormatModifierExplicitCreateInfoEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint64_t*)&forMarshaling->drmFormatModifier, sizeof(uint64_t));
+    *ptr += sizeof(uint64_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->drmFormatModifierPlaneCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)forMarshaling->drmFormatModifierPlaneCount; ++i)
+    {
+        reservedmarshal_VkSubresourceLayout(vkStream, rootType, (const VkSubresourceLayout*)(forMarshaling->pPlaneLayouts + i), ptr);
+    }
+}
+
+void reservedmarshal_VkImageDrmFormatModifierPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageDrmFormatModifierPropertiesEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint64_t*)&forMarshaling->drmFormatModifier, sizeof(uint64_t));
+    *ptr += sizeof(uint64_t);
+}
+
+#endif
+#ifdef VK_EXT_validation_cache
+void reservedmarshal_VkValidationCacheCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkValidationCacheCreateInfoEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkValidationCacheCreateFlagsEXT*)&forMarshaling->flags, sizeof(VkValidationCacheCreateFlagsEXT));
+    *ptr += sizeof(VkValidationCacheCreateFlagsEXT);
+    uint64_t cgen_var_0 = (uint64_t)forMarshaling->initialDataSize;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    memcpy(*ptr, (const void*)forMarshaling->pInitialData, forMarshaling->initialDataSize * sizeof(const uint8_t));
+    *ptr += forMarshaling->initialDataSize * sizeof(const uint8_t);
+}
+
+void reservedmarshal_VkShaderModuleValidationCacheCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkShaderModuleValidationCacheCreateInfoEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkValidationCacheEXT((*&forMarshaling->validationCache));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+}
+
+#endif
+#ifdef VK_EXT_descriptor_indexing
+#endif
+#ifdef VK_EXT_shader_viewport_index_layer
+#endif
+#ifdef VK_NV_shading_rate_image
+void reservedmarshal_VkShadingRatePaletteNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkShadingRatePaletteNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->shadingRatePaletteEntryCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (const VkShadingRatePaletteEntryNV*)forMarshaling->pShadingRatePaletteEntries, forMarshaling->shadingRatePaletteEntryCount * sizeof(const VkShadingRatePaletteEntryNV));
+    *ptr += forMarshaling->shadingRatePaletteEntryCount * sizeof(const VkShadingRatePaletteEntryNV);
+}
+
+void reservedmarshal_VkPipelineViewportShadingRateImageStateCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineViewportShadingRateImageStateCreateInfoNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shadingRateImageEnable, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->viewportCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pShadingRatePalettes;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pShadingRatePalettes)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->viewportCount; ++i)
+        {
+            reservedmarshal_VkShadingRatePaletteNV(vkStream, rootType, (const VkShadingRatePaletteNV*)(forMarshaling->pShadingRatePalettes + i), ptr);
+        }
+    }
+}
+
+void reservedmarshal_VkPhysicalDeviceShadingRateImageFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShadingRateImageFeaturesNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shadingRateImage, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shadingRateCoarseSampleOrder, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkPhysicalDeviceShadingRateImagePropertiesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShadingRateImagePropertiesNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    reservedmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->shadingRateTexelSize), ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->shadingRatePaletteSize, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->shadingRateMaxCoarseSamples, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkCoarseSampleLocationNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCoarseSampleLocationNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->pixelX, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->pixelY, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->sample, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkCoarseSampleOrderCustomNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCoarseSampleOrderCustomNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkShadingRatePaletteEntryNV*)&forMarshaling->shadingRate, sizeof(VkShadingRatePaletteEntryNV));
+    *ptr += sizeof(VkShadingRatePaletteEntryNV);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->sampleCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->sampleLocationCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)forMarshaling->sampleLocationCount; ++i)
+    {
+        reservedmarshal_VkCoarseSampleLocationNV(vkStream, rootType, (const VkCoarseSampleLocationNV*)(forMarshaling->pSampleLocations + i), ptr);
+    }
+}
+
+void reservedmarshal_VkPipelineViewportCoarseSampleOrderStateCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineViewportCoarseSampleOrderStateCreateInfoNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkCoarseSampleOrderTypeNV*)&forMarshaling->sampleOrderType, sizeof(VkCoarseSampleOrderTypeNV));
+    *ptr += sizeof(VkCoarseSampleOrderTypeNV);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->customSampleOrderCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)forMarshaling->customSampleOrderCount; ++i)
+    {
+        reservedmarshal_VkCoarseSampleOrderCustomNV(vkStream, rootType, (const VkCoarseSampleOrderCustomNV*)(forMarshaling->pCustomSampleOrders + i), ptr);
+    }
+}
+
+#endif
+#ifdef VK_NV_ray_tracing
+void reservedmarshal_VkRayTracingShaderGroupCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRayTracingShaderGroupCreateInfoNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkRayTracingShaderGroupTypeKHR*)&forMarshaling->type, sizeof(VkRayTracingShaderGroupTypeKHR));
+    *ptr += sizeof(VkRayTracingShaderGroupTypeKHR);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->generalShader, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->closestHitShader, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->anyHitShader, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->intersectionShader, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkRayTracingPipelineCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRayTracingPipelineCreateInfoNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkPipelineCreateFlags*)&forMarshaling->flags, sizeof(VkPipelineCreateFlags));
+    *ptr += sizeof(VkPipelineCreateFlags);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->stageCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)forMarshaling->stageCount; ++i)
+    {
+        reservedmarshal_VkPipelineShaderStageCreateInfo(vkStream, rootType, (const VkPipelineShaderStageCreateInfo*)(forMarshaling->pStages + i), ptr);
+    }
+    memcpy(*ptr, (uint32_t*)&forMarshaling->groupCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)forMarshaling->groupCount; ++i)
+    {
+        reservedmarshal_VkRayTracingShaderGroupCreateInfoNV(vkStream, rootType, (const VkRayTracingShaderGroupCreateInfoNV*)(forMarshaling->pGroups + i), ptr);
+    }
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxRecursionDepth, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPipelineLayout((*&forMarshaling->layout));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkPipeline((*&forMarshaling->basePipelineHandle));
+    memcpy(*ptr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (int32_t*)&forMarshaling->basePipelineIndex, sizeof(int32_t));
+    *ptr += sizeof(int32_t);
+}
+
+void reservedmarshal_VkGeometryTrianglesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkGeometryTrianglesNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkBuffer((*&forMarshaling->vertexData));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->vertexOffset, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->vertexCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->vertexStride, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    memcpy(*ptr, (VkFormat*)&forMarshaling->vertexFormat, sizeof(VkFormat));
+    *ptr += sizeof(VkFormat);
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkBuffer((*&forMarshaling->indexData));
+    memcpy(*ptr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->indexOffset, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->indexCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (VkIndexType*)&forMarshaling->indexType, sizeof(VkIndexType));
+    *ptr += sizeof(VkIndexType);
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = get_host_u64_VkBuffer((*&forMarshaling->transformData));
+    memcpy(*ptr, (uint64_t*)&cgen_var_2, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->transformOffset, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+}
+
+void reservedmarshal_VkGeometryAABBNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkGeometryAABBNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkBuffer((*&forMarshaling->aabbData));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->numAABBs, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->stride, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->offset, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+}
+
+void reservedmarshal_VkGeometryDataNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkGeometryDataNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    reservedmarshal_VkGeometryTrianglesNV(vkStream, rootType, (VkGeometryTrianglesNV*)(&forMarshaling->triangles), ptr);
+    reservedmarshal_VkGeometryAABBNV(vkStream, rootType, (VkGeometryAABBNV*)(&forMarshaling->aabbs), ptr);
+}
+
+void reservedmarshal_VkGeometryNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkGeometryNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkGeometryTypeKHR*)&forMarshaling->geometryType, sizeof(VkGeometryTypeKHR));
+    *ptr += sizeof(VkGeometryTypeKHR);
+    reservedmarshal_VkGeometryDataNV(vkStream, rootType, (VkGeometryDataNV*)(&forMarshaling->geometry), ptr);
+    memcpy(*ptr, (VkGeometryFlagsKHR*)&forMarshaling->flags, sizeof(VkGeometryFlagsKHR));
+    *ptr += sizeof(VkGeometryFlagsKHR);
+}
+
+void reservedmarshal_VkAccelerationStructureInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureInfoNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkAccelerationStructureTypeNV*)&forMarshaling->type, sizeof(VkAccelerationStructureTypeNV));
+    *ptr += sizeof(VkAccelerationStructureTypeNV);
+    memcpy(*ptr, (VkBuildAccelerationStructureFlagsNV*)&forMarshaling->flags, sizeof(VkBuildAccelerationStructureFlagsNV));
+    *ptr += sizeof(VkBuildAccelerationStructureFlagsNV);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->instanceCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->geometryCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)forMarshaling->geometryCount; ++i)
+    {
+        reservedmarshal_VkGeometryNV(vkStream, rootType, (const VkGeometryNV*)(forMarshaling->pGeometries + i), ptr);
+    }
+}
+
+void reservedmarshal_VkAccelerationStructureCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureCreateInfoNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->compactedSize, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    reservedmarshal_VkAccelerationStructureInfoNV(vkStream, rootType, (VkAccelerationStructureInfoNV*)(&forMarshaling->info), ptr);
+}
+
+void reservedmarshal_VkBindAccelerationStructureMemoryInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBindAccelerationStructureMemoryInfoNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkAccelerationStructureNV((*&forMarshaling->accelerationStructure));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkDeviceMemory((*&forMarshaling->memory));
+    memcpy(*ptr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->memoryOffset, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->deviceIndexCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (const uint32_t*)forMarshaling->pDeviceIndices, forMarshaling->deviceIndexCount * sizeof(const uint32_t));
+    *ptr += forMarshaling->deviceIndexCount * sizeof(const uint32_t);
+}
+
+void reservedmarshal_VkWriteDescriptorSetAccelerationStructureNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkWriteDescriptorSetAccelerationStructureNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->accelerationStructureCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pAccelerationStructures;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pAccelerationStructures)
+    {
+        if (forMarshaling->accelerationStructureCount)
+        {
+            uint8_t* cgen_var_0_0_ptr = (uint8_t*)(*ptr);
+            if (forMarshaling)
+            {
+                for (uint32_t k = 0; k < forMarshaling->accelerationStructureCount; ++k)
+                {
+                    uint64_t tmpval = get_host_u64_VkAccelerationStructureNV(forMarshaling->pAccelerationStructures[k]);
+                    memcpy(cgen_var_0_0_ptr + k * 8, &tmpval, sizeof(uint64_t));
+                }
+            }
+            *ptr += 8 * forMarshaling->accelerationStructureCount;
+        }
+    }
+}
+
+void reservedmarshal_VkAccelerationStructureMemoryRequirementsInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureMemoryRequirementsInfoNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkAccelerationStructureMemoryRequirementsTypeNV*)&forMarshaling->type, sizeof(VkAccelerationStructureMemoryRequirementsTypeNV));
+    *ptr += sizeof(VkAccelerationStructureMemoryRequirementsTypeNV);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkAccelerationStructureNV((*&forMarshaling->accelerationStructure));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+}
+
+void reservedmarshal_VkPhysicalDeviceRayTracingPropertiesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRayTracingPropertiesNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->shaderGroupHandleSize, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxRecursionDepth, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxShaderGroupStride, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->shaderGroupBaseAlignment, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint64_t*)&forMarshaling->maxGeometryCount, sizeof(uint64_t));
+    *ptr += sizeof(uint64_t);
+    memcpy(*ptr, (uint64_t*)&forMarshaling->maxInstanceCount, sizeof(uint64_t));
+    *ptr += sizeof(uint64_t);
+    memcpy(*ptr, (uint64_t*)&forMarshaling->maxTriangleCount, sizeof(uint64_t));
+    *ptr += sizeof(uint64_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxDescriptorSetAccelerationStructures, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkTransformMatrixKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkTransformMatrixKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (float*)forMarshaling->matrix, ((3)*(4)) * sizeof(float));
+    *ptr += ((3)*(4)) * sizeof(float);
+}
+
+void reservedmarshal_VkAabbPositionsKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAabbPositionsKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (float*)&forMarshaling->minX, sizeof(float));
+    *ptr += sizeof(float);
+    memcpy(*ptr, (float*)&forMarshaling->minY, sizeof(float));
+    *ptr += sizeof(float);
+    memcpy(*ptr, (float*)&forMarshaling->minZ, sizeof(float));
+    *ptr += sizeof(float);
+    memcpy(*ptr, (float*)&forMarshaling->maxX, sizeof(float));
+    *ptr += sizeof(float);
+    memcpy(*ptr, (float*)&forMarshaling->maxY, sizeof(float));
+    *ptr += sizeof(float);
+    memcpy(*ptr, (float*)&forMarshaling->maxZ, sizeof(float));
+    *ptr += sizeof(float);
+}
+
+void reservedmarshal_VkAccelerationStructureInstanceKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureInstanceKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    
+    typedef struct VkAccelerationStructureInstanceKHRWithoutBitFields {
+        VkTransformMatrixKHR          transform;
+        uint32_t                      dwords[2];
+        uint64_t                      accelerationStructureReference;
+    } VkAccelerationStructureInstanceKHRWithoutBitFields;
+    
+    (void)vkStream;
+    const VkAccelerationStructureInstanceKHRWithoutBitFields* forMarshaling_new = (const VkAccelerationStructureInstanceKHRWithoutBitFields*)(forMarshaling);
+    reservedmarshal_VkTransformMatrixKHR(vkStream, rootType, (VkTransformMatrixKHR*)(&forMarshaling_new->transform), ptr);
+    for (uint32_t i = 0; i < 2; i++) {
+        memcpy(*ptr, (uint32_t*)&(forMarshaling_new->dwords[i]), sizeof(uint32_t));
+        *ptr += sizeof(uint32_t);
+    }
+    memcpy(*ptr, (uint64_t*)&forMarshaling_new->accelerationStructureReference, sizeof(uint64_t));
+    *ptr += sizeof(uint64_t);
+    
+}
+
+#endif
+#ifdef VK_NV_representative_fragment_test
+void reservedmarshal_VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->representativeFragmentTest, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkPipelineRepresentativeFragmentTestStateCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineRepresentativeFragmentTestStateCreateInfoNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->representativeFragmentTestEnable, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_EXT_filter_cubic
+void reservedmarshal_VkPhysicalDeviceImageViewImageFormatInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceImageViewImageFormatInfoEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkImageViewType*)&forMarshaling->imageViewType, sizeof(VkImageViewType));
+    *ptr += sizeof(VkImageViewType);
+}
+
+void reservedmarshal_VkFilterCubicImageViewImageFormatPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkFilterCubicImageViewImageFormatPropertiesEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->filterCubic, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->filterCubicMinmax, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_QCOM_render_pass_shader_resolve
+#endif
+#ifdef VK_EXT_global_priority
+void reservedmarshal_VkDeviceQueueGlobalPriorityCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceQueueGlobalPriorityCreateInfoEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkQueueGlobalPriorityEXT*)&forMarshaling->globalPriority, sizeof(VkQueueGlobalPriorityEXT));
+    *ptr += sizeof(VkQueueGlobalPriorityEXT);
+}
+
+#endif
+#ifdef VK_EXT_external_memory_host
+void reservedmarshal_VkImportMemoryHostPointerInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImportMemoryHostPointerInfoEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkExternalMemoryHandleTypeFlagBits*)&forMarshaling->handleType, sizeof(VkExternalMemoryHandleTypeFlagBits));
+    *ptr += sizeof(VkExternalMemoryHandleTypeFlagBits);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pHostPointer;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pHostPointer)
+    {
+        memcpy(*ptr, (void*)forMarshaling->pHostPointer, sizeof(uint8_t));
+        *ptr += sizeof(uint8_t);
+    }
+}
+
+void reservedmarshal_VkMemoryHostPointerPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMemoryHostPointerPropertiesEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->memoryTypeBits, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkPhysicalDeviceExternalMemoryHostPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceExternalMemoryHostPropertiesEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->minImportedHostPointerAlignment, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+}
+
+#endif
+#ifdef VK_AMD_buffer_marker
+#endif
+#ifdef VK_AMD_pipeline_compiler_control
+void reservedmarshal_VkPipelineCompilerControlCreateInfoAMD(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineCompilerControlCreateInfoAMD* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkPipelineCompilerControlFlagsAMD*)&forMarshaling->compilerControlFlags, sizeof(VkPipelineCompilerControlFlagsAMD));
+    *ptr += sizeof(VkPipelineCompilerControlFlagsAMD);
+}
+
+#endif
+#ifdef VK_EXT_calibrated_timestamps
+void reservedmarshal_VkCalibratedTimestampInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCalibratedTimestampInfoEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkTimeDomainEXT*)&forMarshaling->timeDomain, sizeof(VkTimeDomainEXT));
+    *ptr += sizeof(VkTimeDomainEXT);
+}
+
+#endif
+#ifdef VK_AMD_shader_core_properties
+void reservedmarshal_VkPhysicalDeviceShaderCorePropertiesAMD(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderCorePropertiesAMD* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->shaderEngineCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->shaderArraysPerEngineCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->computeUnitsPerShaderArray, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->simdPerComputeUnit, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->wavefrontsPerSimd, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->wavefrontSize, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->sgprsPerSimd, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->minSgprAllocation, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxSgprAllocation, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->sgprAllocationGranularity, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->vgprsPerSimd, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->minVgprAllocation, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxVgprAllocation, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->vgprAllocationGranularity, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+#endif
+#ifdef VK_AMD_memory_overallocation_behavior
+void reservedmarshal_VkDeviceMemoryOverallocationCreateInfoAMD(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceMemoryOverallocationCreateInfoAMD* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkMemoryOverallocationBehaviorAMD*)&forMarshaling->overallocationBehavior, sizeof(VkMemoryOverallocationBehaviorAMD));
+    *ptr += sizeof(VkMemoryOverallocationBehaviorAMD);
+}
+
+#endif
+#ifdef VK_EXT_vertex_attribute_divisor
+void reservedmarshal_VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxVertexAttribDivisor, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkVertexInputBindingDivisorDescriptionEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkVertexInputBindingDivisorDescriptionEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->binding, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->divisor, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkPipelineVertexInputDivisorStateCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineVertexInputDivisorStateCreateInfoEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->vertexBindingDivisorCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)forMarshaling->vertexBindingDivisorCount; ++i)
+    {
+        reservedmarshal_VkVertexInputBindingDivisorDescriptionEXT(vkStream, rootType, (const VkVertexInputBindingDivisorDescriptionEXT*)(forMarshaling->pVertexBindingDivisors + i), ptr);
+    }
+}
+
+void reservedmarshal_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->vertexAttributeInstanceRateDivisor, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->vertexAttributeInstanceRateZeroDivisor, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_GGP_frame_token
+void reservedmarshal_VkPresentFrameTokenGGP(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPresentFrameTokenGGP* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (GgpFrameToken*)&forMarshaling->frameToken, sizeof(GgpFrameToken));
+    *ptr += sizeof(GgpFrameToken);
+}
+
+#endif
+#ifdef VK_EXT_pipeline_creation_feedback
+void reservedmarshal_VkPipelineCreationFeedbackEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineCreationFeedbackEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkPipelineCreationFeedbackFlagsEXT*)&forMarshaling->flags, sizeof(VkPipelineCreationFeedbackFlagsEXT));
+    *ptr += sizeof(VkPipelineCreationFeedbackFlagsEXT);
+    memcpy(*ptr, (uint64_t*)&forMarshaling->duration, sizeof(uint64_t));
+    *ptr += sizeof(uint64_t);
+}
+
+void reservedmarshal_VkPipelineCreationFeedbackCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineCreationFeedbackCreateInfoEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    reservedmarshal_VkPipelineCreationFeedbackEXT(vkStream, rootType, (VkPipelineCreationFeedbackEXT*)(forMarshaling->pPipelineCreationFeedback), ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->pipelineStageCreationFeedbackCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)forMarshaling->pipelineStageCreationFeedbackCount; ++i)
+    {
+        reservedmarshal_VkPipelineCreationFeedbackEXT(vkStream, rootType, (VkPipelineCreationFeedbackEXT*)(forMarshaling->pPipelineStageCreationFeedbacks + i), ptr);
+    }
+}
+
+#endif
+#ifdef VK_NV_shader_subgroup_partitioned
+#endif
+#ifdef VK_NV_compute_shader_derivatives
+void reservedmarshal_VkPhysicalDeviceComputeShaderDerivativesFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceComputeShaderDerivativesFeaturesNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->computeDerivativeGroupQuads, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->computeDerivativeGroupLinear, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_NV_mesh_shader
+void reservedmarshal_VkPhysicalDeviceMeshShaderFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMeshShaderFeaturesNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->taskShader, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->meshShader, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkPhysicalDeviceMeshShaderPropertiesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMeshShaderPropertiesNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxDrawMeshTasksCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxTaskWorkGroupInvocations, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)forMarshaling->maxTaskWorkGroupSize, 3 * sizeof(uint32_t));
+    *ptr += 3 * sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxTaskTotalMemorySize, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxTaskOutputCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxMeshWorkGroupInvocations, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)forMarshaling->maxMeshWorkGroupSize, 3 * sizeof(uint32_t));
+    *ptr += 3 * sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxMeshTotalMemorySize, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxMeshOutputVertices, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxMeshOutputPrimitives, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxMeshMultiviewViewCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->meshOutputPerVertexGranularity, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->meshOutputPerPrimitiveGranularity, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkDrawMeshTasksIndirectCommandNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDrawMeshTasksIndirectCommandNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->taskCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->firstTask, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+#endif
+#ifdef VK_NV_fragment_shader_barycentric
+void reservedmarshal_VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->fragmentShaderBarycentric, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_NV_shader_image_footprint
+void reservedmarshal_VkPhysicalDeviceShaderImageFootprintFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderImageFootprintFeaturesNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->imageFootprint, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_NV_scissor_exclusive
+void reservedmarshal_VkPipelineViewportExclusiveScissorStateCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineViewportExclusiveScissorStateCreateInfoNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->exclusiveScissorCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pExclusiveScissors;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pExclusiveScissors)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->exclusiveScissorCount; ++i)
+        {
+            reservedmarshal_VkRect2D(vkStream, rootType, (const VkRect2D*)(forMarshaling->pExclusiveScissors + i), ptr);
+        }
+    }
+}
+
+void reservedmarshal_VkPhysicalDeviceExclusiveScissorFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceExclusiveScissorFeaturesNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->exclusiveScissor, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_NV_device_diagnostic_checkpoints
+void reservedmarshal_VkQueueFamilyCheckpointPropertiesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkQueueFamilyCheckpointPropertiesNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkPipelineStageFlags*)&forMarshaling->checkpointExecutionStageMask, sizeof(VkPipelineStageFlags));
+    *ptr += sizeof(VkPipelineStageFlags);
+}
+
+void reservedmarshal_VkCheckpointDataNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCheckpointDataNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkPipelineStageFlagBits*)&forMarshaling->stage, sizeof(VkPipelineStageFlagBits));
+    *ptr += sizeof(VkPipelineStageFlagBits);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pCheckpointMarker;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pCheckpointMarker)
+    {
+        memcpy(*ptr, (void*)forMarshaling->pCheckpointMarker, sizeof(uint8_t));
+        *ptr += sizeof(uint8_t);
+    }
+}
+
+#endif
+#ifdef VK_INTEL_shader_integer_functions2
+void reservedmarshal_VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderIntegerFunctions2, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_INTEL_performance_query
+void reservedmarshal_VkPerformanceValueDataINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPerformanceValueDataINTEL* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->value32, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkPerformanceValueINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPerformanceValueINTEL* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkPerformanceValueTypeINTEL*)&forMarshaling->type, sizeof(VkPerformanceValueTypeINTEL));
+    *ptr += sizeof(VkPerformanceValueTypeINTEL);
+    reservedmarshal_VkPerformanceValueDataINTEL(vkStream, rootType, (VkPerformanceValueDataINTEL*)(&forMarshaling->data), ptr);
+}
+
+void reservedmarshal_VkInitializePerformanceApiInfoINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkInitializePerformanceApiInfoINTEL* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pUserData;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pUserData)
+    {
+        memcpy(*ptr, (void*)forMarshaling->pUserData, sizeof(uint8_t));
+        *ptr += sizeof(uint8_t);
+    }
+}
+
+void reservedmarshal_VkQueryPoolPerformanceQueryCreateInfoINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkQueryPoolPerformanceQueryCreateInfoINTEL* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkQueryPoolSamplingModeINTEL*)&forMarshaling->performanceCountersSampling, sizeof(VkQueryPoolSamplingModeINTEL));
+    *ptr += sizeof(VkQueryPoolSamplingModeINTEL);
+}
+
+void reservedmarshal_VkPerformanceMarkerInfoINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPerformanceMarkerInfoINTEL* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint64_t*)&forMarshaling->marker, sizeof(uint64_t));
+    *ptr += sizeof(uint64_t);
+}
+
+void reservedmarshal_VkPerformanceStreamMarkerInfoINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPerformanceStreamMarkerInfoINTEL* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->marker, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkPerformanceOverrideInfoINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPerformanceOverrideInfoINTEL* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkPerformanceOverrideTypeINTEL*)&forMarshaling->type, sizeof(VkPerformanceOverrideTypeINTEL));
+    *ptr += sizeof(VkPerformanceOverrideTypeINTEL);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->enable, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (uint64_t*)&forMarshaling->parameter, sizeof(uint64_t));
+    *ptr += sizeof(uint64_t);
+}
+
+void reservedmarshal_VkPerformanceConfigurationAcquireInfoINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPerformanceConfigurationAcquireInfoINTEL* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkPerformanceConfigurationTypeINTEL*)&forMarshaling->type, sizeof(VkPerformanceConfigurationTypeINTEL));
+    *ptr += sizeof(VkPerformanceConfigurationTypeINTEL);
+}
+
+#endif
+#ifdef VK_EXT_pci_bus_info
+void reservedmarshal_VkPhysicalDevicePCIBusInfoPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDevicePCIBusInfoPropertiesEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->pciDomain, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->pciBus, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->pciDevice, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->pciFunction, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+#endif
+#ifdef VK_AMD_display_native_hdr
+void reservedmarshal_VkDisplayNativeHdrSurfaceCapabilitiesAMD(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDisplayNativeHdrSurfaceCapabilitiesAMD* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->localDimmingSupport, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkSwapchainDisplayNativeHdrCreateInfoAMD(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSwapchainDisplayNativeHdrCreateInfoAMD* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->localDimmingEnable, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_FUCHSIA_imagepipe_surface
+void reservedmarshal_VkImagePipeSurfaceCreateInfoFUCHSIA(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImagePipeSurfaceCreateInfoFUCHSIA* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkImagePipeSurfaceCreateFlagsFUCHSIA*)&forMarshaling->flags, sizeof(VkImagePipeSurfaceCreateFlagsFUCHSIA));
+    *ptr += sizeof(VkImagePipeSurfaceCreateFlagsFUCHSIA);
+    memcpy(*ptr, (zx_handle_t*)&forMarshaling->imagePipeHandle, sizeof(zx_handle_t));
+    *ptr += sizeof(zx_handle_t);
+}
+
+#endif
+#ifdef VK_EXT_metal_surface
+void reservedmarshal_VkMetalSurfaceCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMetalSurfaceCreateInfoEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkMetalSurfaceCreateFlagsEXT*)&forMarshaling->flags, sizeof(VkMetalSurfaceCreateFlagsEXT));
+    *ptr += sizeof(VkMetalSurfaceCreateFlagsEXT);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pLayer;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pLayer)
+    {
+        memcpy(*ptr, (const CAMetalLayer*)forMarshaling->pLayer, sizeof(const CAMetalLayer));
+        *ptr += sizeof(const CAMetalLayer);
+    }
+}
+
+#endif
+#ifdef VK_EXT_fragment_density_map
+void reservedmarshal_VkPhysicalDeviceFragmentDensityMapFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentDensityMapFeaturesEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->fragmentDensityMap, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->fragmentDensityMapDynamic, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->fragmentDensityMapNonSubsampledImages, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkPhysicalDeviceFragmentDensityMapPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentDensityMapPropertiesEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    reservedmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->minFragmentDensityTexelSize), ptr);
+    reservedmarshal_VkExtent2D(vkStream, rootType, (VkExtent2D*)(&forMarshaling->maxFragmentDensityTexelSize), ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->fragmentDensityInvocations, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkRenderPassFragmentDensityMapCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRenderPassFragmentDensityMapCreateInfoEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    reservedmarshal_VkAttachmentReference(vkStream, rootType, (VkAttachmentReference*)(&forMarshaling->fragmentDensityMapAttachment), ptr);
+}
+
+#endif
+#ifdef VK_EXT_scalar_block_layout
+#endif
+#ifdef VK_GOOGLE_hlsl_functionality1
+#endif
+#ifdef VK_GOOGLE_decorate_string
+#endif
+#ifdef VK_EXT_subgroup_size_control
+void reservedmarshal_VkPhysicalDeviceSubgroupSizeControlFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSubgroupSizeControlFeaturesEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->subgroupSizeControl, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->computeFullSubgroups, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkPhysicalDeviceSubgroupSizeControlPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->minSubgroupSize, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxSubgroupSize, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxComputeWorkgroupSubgroups, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (VkShaderStageFlags*)&forMarshaling->requiredSubgroupSizeStages, sizeof(VkShaderStageFlags));
+    *ptr += sizeof(VkShaderStageFlags);
+}
+
+void reservedmarshal_VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->requiredSubgroupSize, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+#endif
+#ifdef VK_AMD_shader_core_properties2
+void reservedmarshal_VkPhysicalDeviceShaderCoreProperties2AMD(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderCoreProperties2AMD* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkShaderCorePropertiesFlagsAMD*)&forMarshaling->shaderCoreFeatures, sizeof(VkShaderCorePropertiesFlagsAMD));
+    *ptr += sizeof(VkShaderCorePropertiesFlagsAMD);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->activeComputeUnitCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+#endif
+#ifdef VK_AMD_device_coherent_memory
+void reservedmarshal_VkPhysicalDeviceCoherentMemoryFeaturesAMD(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCoherentMemoryFeaturesAMD* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->deviceCoherentMemory, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_EXT_shader_image_atomic_int64
+void reservedmarshal_VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderImageInt64Atomics, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->sparseImageInt64Atomics, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_EXT_memory_budget
+void reservedmarshal_VkPhysicalDeviceMemoryBudgetPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMemoryBudgetPropertiesEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkDeviceSize*)forMarshaling->heapBudget, VK_MAX_MEMORY_HEAPS * sizeof(VkDeviceSize));
+    *ptr += VK_MAX_MEMORY_HEAPS * sizeof(VkDeviceSize);
+    memcpy(*ptr, (VkDeviceSize*)forMarshaling->heapUsage, VK_MAX_MEMORY_HEAPS * sizeof(VkDeviceSize));
+    *ptr += VK_MAX_MEMORY_HEAPS * sizeof(VkDeviceSize);
+}
+
+#endif
+#ifdef VK_EXT_memory_priority
+void reservedmarshal_VkPhysicalDeviceMemoryPriorityFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMemoryPriorityFeaturesEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->memoryPriority, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkMemoryPriorityAllocateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMemoryPriorityAllocateInfoEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (float*)&forMarshaling->priority, sizeof(float));
+    *ptr += sizeof(float);
+}
+
+#endif
+#ifdef VK_NV_dedicated_allocation_image_aliasing
+void reservedmarshal_VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->dedicatedAllocationImageAliasing, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_EXT_buffer_device_address
+void reservedmarshal_VkPhysicalDeviceBufferDeviceAddressFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceBufferDeviceAddressFeaturesEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->bufferDeviceAddress, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->bufferDeviceAddressCaptureReplay, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->bufferDeviceAddressMultiDevice, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkBufferDeviceAddressCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBufferDeviceAddressCreateInfoEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkDeviceAddress*)&forMarshaling->deviceAddress, sizeof(VkDeviceAddress));
+    *ptr += sizeof(VkDeviceAddress);
+}
+
+#endif
+#ifdef VK_EXT_tooling_info
+void reservedmarshal_VkPhysicalDeviceToolPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceToolPropertiesEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (char*)forMarshaling->name, VK_MAX_EXTENSION_NAME_SIZE * sizeof(char));
+    *ptr += VK_MAX_EXTENSION_NAME_SIZE * sizeof(char);
+    memcpy(*ptr, (char*)forMarshaling->version, VK_MAX_EXTENSION_NAME_SIZE * sizeof(char));
+    *ptr += VK_MAX_EXTENSION_NAME_SIZE * sizeof(char);
+    memcpy(*ptr, (VkToolPurposeFlagsEXT*)&forMarshaling->purposes, sizeof(VkToolPurposeFlagsEXT));
+    *ptr += sizeof(VkToolPurposeFlagsEXT);
+    memcpy(*ptr, (char*)forMarshaling->description, VK_MAX_DESCRIPTION_SIZE * sizeof(char));
+    *ptr += VK_MAX_DESCRIPTION_SIZE * sizeof(char);
+    memcpy(*ptr, (char*)forMarshaling->layer, VK_MAX_EXTENSION_NAME_SIZE * sizeof(char));
+    *ptr += VK_MAX_EXTENSION_NAME_SIZE * sizeof(char);
+}
+
+#endif
+#ifdef VK_EXT_separate_stencil_usage
+#endif
+#ifdef VK_EXT_validation_features
+void reservedmarshal_VkValidationFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkValidationFeaturesEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->enabledValidationFeatureCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (const VkValidationFeatureEnableEXT*)forMarshaling->pEnabledValidationFeatures, forMarshaling->enabledValidationFeatureCount * sizeof(const VkValidationFeatureEnableEXT));
+    *ptr += forMarshaling->enabledValidationFeatureCount * sizeof(const VkValidationFeatureEnableEXT);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->disabledValidationFeatureCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (const VkValidationFeatureDisableEXT*)forMarshaling->pDisabledValidationFeatures, forMarshaling->disabledValidationFeatureCount * sizeof(const VkValidationFeatureDisableEXT));
+    *ptr += forMarshaling->disabledValidationFeatureCount * sizeof(const VkValidationFeatureDisableEXT);
+}
+
+#endif
+#ifdef VK_NV_cooperative_matrix
+void reservedmarshal_VkCooperativeMatrixPropertiesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCooperativeMatrixPropertiesNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->MSize, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->NSize, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->KSize, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (VkComponentTypeNV*)&forMarshaling->AType, sizeof(VkComponentTypeNV));
+    *ptr += sizeof(VkComponentTypeNV);
+    memcpy(*ptr, (VkComponentTypeNV*)&forMarshaling->BType, sizeof(VkComponentTypeNV));
+    *ptr += sizeof(VkComponentTypeNV);
+    memcpy(*ptr, (VkComponentTypeNV*)&forMarshaling->CType, sizeof(VkComponentTypeNV));
+    *ptr += sizeof(VkComponentTypeNV);
+    memcpy(*ptr, (VkComponentTypeNV*)&forMarshaling->DType, sizeof(VkComponentTypeNV));
+    *ptr += sizeof(VkComponentTypeNV);
+    memcpy(*ptr, (VkScopeNV*)&forMarshaling->scope, sizeof(VkScopeNV));
+    *ptr += sizeof(VkScopeNV);
+}
+
+void reservedmarshal_VkPhysicalDeviceCooperativeMatrixFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCooperativeMatrixFeaturesNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->cooperativeMatrix, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->cooperativeMatrixRobustBufferAccess, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkPhysicalDeviceCooperativeMatrixPropertiesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCooperativeMatrixPropertiesNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkShaderStageFlags*)&forMarshaling->cooperativeMatrixSupportedStages, sizeof(VkShaderStageFlags));
+    *ptr += sizeof(VkShaderStageFlags);
+}
+
+#endif
+#ifdef VK_NV_coverage_reduction_mode
+void reservedmarshal_VkPhysicalDeviceCoverageReductionModeFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCoverageReductionModeFeaturesNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->coverageReductionMode, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkPipelineCoverageReductionStateCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineCoverageReductionStateCreateInfoNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkPipelineCoverageReductionStateCreateFlagsNV*)&forMarshaling->flags, sizeof(VkPipelineCoverageReductionStateCreateFlagsNV));
+    *ptr += sizeof(VkPipelineCoverageReductionStateCreateFlagsNV);
+    memcpy(*ptr, (VkCoverageReductionModeNV*)&forMarshaling->coverageReductionMode, sizeof(VkCoverageReductionModeNV));
+    *ptr += sizeof(VkCoverageReductionModeNV);
+}
+
+void reservedmarshal_VkFramebufferMixedSamplesCombinationNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkFramebufferMixedSamplesCombinationNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkCoverageReductionModeNV*)&forMarshaling->coverageReductionMode, sizeof(VkCoverageReductionModeNV));
+    *ptr += sizeof(VkCoverageReductionModeNV);
+    memcpy(*ptr, (VkSampleCountFlagBits*)&forMarshaling->rasterizationSamples, sizeof(VkSampleCountFlagBits));
+    *ptr += sizeof(VkSampleCountFlagBits);
+    memcpy(*ptr, (VkSampleCountFlags*)&forMarshaling->depthStencilSamples, sizeof(VkSampleCountFlags));
+    *ptr += sizeof(VkSampleCountFlags);
+    memcpy(*ptr, (VkSampleCountFlags*)&forMarshaling->colorSamples, sizeof(VkSampleCountFlags));
+    *ptr += sizeof(VkSampleCountFlags);
+}
+
+#endif
+#ifdef VK_EXT_fragment_shader_interlock
+void reservedmarshal_VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->fragmentShaderSampleInterlock, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->fragmentShaderPixelInterlock, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->fragmentShaderShadingRateInterlock, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_EXT_ycbcr_image_arrays
+void reservedmarshal_VkPhysicalDeviceYcbcrImageArraysFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceYcbcrImageArraysFeaturesEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->ycbcrImageArrays, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_EXT_full_screen_exclusive
+void reservedmarshal_VkSurfaceFullScreenExclusiveInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSurfaceFullScreenExclusiveInfoEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkFullScreenExclusiveEXT*)&forMarshaling->fullScreenExclusive, sizeof(VkFullScreenExclusiveEXT));
+    *ptr += sizeof(VkFullScreenExclusiveEXT);
+}
+
+void reservedmarshal_VkSurfaceCapabilitiesFullScreenExclusiveEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSurfaceCapabilitiesFullScreenExclusiveEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->fullScreenExclusiveSupported, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkSurfaceFullScreenExclusiveWin32InfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSurfaceFullScreenExclusiveWin32InfoEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (HMONITOR*)&forMarshaling->hmonitor, sizeof(HMONITOR));
+    *ptr += sizeof(HMONITOR);
+}
+
+#endif
+#ifdef VK_EXT_headless_surface
+void reservedmarshal_VkHeadlessSurfaceCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkHeadlessSurfaceCreateInfoEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkHeadlessSurfaceCreateFlagsEXT*)&forMarshaling->flags, sizeof(VkHeadlessSurfaceCreateFlagsEXT));
+    *ptr += sizeof(VkHeadlessSurfaceCreateFlagsEXT);
+}
+
+#endif
+#ifdef VK_EXT_line_rasterization
+void reservedmarshal_VkPhysicalDeviceLineRasterizationFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceLineRasterizationFeaturesEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->rectangularLines, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->bresenhamLines, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->smoothLines, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->stippledRectangularLines, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->stippledBresenhamLines, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->stippledSmoothLines, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkPhysicalDeviceLineRasterizationPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceLineRasterizationPropertiesEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->lineSubPixelPrecisionBits, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkPipelineRasterizationLineStateCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineRasterizationLineStateCreateInfoEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkLineRasterizationModeEXT*)&forMarshaling->lineRasterizationMode, sizeof(VkLineRasterizationModeEXT));
+    *ptr += sizeof(VkLineRasterizationModeEXT);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->stippledLineEnable, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->lineStippleFactor, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint16_t*)&forMarshaling->lineStipplePattern, sizeof(uint16_t));
+    *ptr += sizeof(uint16_t);
+}
+
+#endif
+#ifdef VK_EXT_shader_atomic_float
+void reservedmarshal_VkPhysicalDeviceShaderAtomicFloatFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderAtomicFloatFeaturesEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderBufferFloat32Atomics, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderBufferFloat32AtomicAdd, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderBufferFloat64Atomics, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderBufferFloat64AtomicAdd, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderSharedFloat32Atomics, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderSharedFloat32AtomicAdd, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderSharedFloat64Atomics, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderSharedFloat64AtomicAdd, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderImageFloat32Atomics, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderImageFloat32AtomicAdd, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->sparseImageFloat32Atomics, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->sparseImageFloat32AtomicAdd, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_EXT_host_query_reset
+#endif
+#ifdef VK_EXT_index_type_uint8
+void reservedmarshal_VkPhysicalDeviceIndexTypeUint8FeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceIndexTypeUint8FeaturesEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->indexTypeUint8, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_EXT_extended_dynamic_state
+void reservedmarshal_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceExtendedDynamicStateFeaturesEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->extendedDynamicState, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_EXT_shader_demote_to_helper_invocation
+void reservedmarshal_VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->shaderDemoteToHelperInvocation, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_NV_device_generated_commands
+void reservedmarshal_VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxGraphicsShaderGroupCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxIndirectSequenceCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxIndirectCommandsTokenCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxIndirectCommandsStreamCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxIndirectCommandsTokenOffset, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxIndirectCommandsStreamStride, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->minSequencesCountBufferOffsetAlignment, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->minSequencesIndexBufferOffsetAlignment, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->minIndirectCommandsBufferOffsetAlignment, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->deviceGeneratedCommands, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkGraphicsShaderGroupCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkGraphicsShaderGroupCreateInfoNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->stageCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)forMarshaling->stageCount; ++i)
+    {
+        reservedmarshal_VkPipelineShaderStageCreateInfo(vkStream, rootType, (const VkPipelineShaderStageCreateInfo*)(forMarshaling->pStages + i), ptr);
+    }
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pVertexInputState;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pVertexInputState)
+    {
+        reservedmarshal_VkPipelineVertexInputStateCreateInfo(vkStream, rootType, (const VkPipelineVertexInputStateCreateInfo*)(forMarshaling->pVertexInputState), ptr);
+    }
+    // WARNING PTR CHECK
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)forMarshaling->pTessellationState;
+    memcpy((*ptr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pTessellationState)
+    {
+        reservedmarshal_VkPipelineTessellationStateCreateInfo(vkStream, rootType, (const VkPipelineTessellationStateCreateInfo*)(forMarshaling->pTessellationState), ptr);
+    }
+}
+
+void reservedmarshal_VkGraphicsPipelineShaderGroupsCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkGraphicsPipelineShaderGroupsCreateInfoNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->groupCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)forMarshaling->groupCount; ++i)
+    {
+        reservedmarshal_VkGraphicsShaderGroupCreateInfoNV(vkStream, rootType, (const VkGraphicsShaderGroupCreateInfoNV*)(forMarshaling->pGroups + i), ptr);
+    }
+    memcpy(*ptr, (uint32_t*)&forMarshaling->pipelineCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    if (forMarshaling->pipelineCount)
+    {
+        uint8_t* cgen_var_0_ptr = (uint8_t*)(*ptr);
+        if (forMarshaling)
+        {
+            for (uint32_t k = 0; k < forMarshaling->pipelineCount; ++k)
+            {
+                uint64_t tmpval = get_host_u64_VkPipeline(forMarshaling->pPipelines[k]);
+                memcpy(cgen_var_0_ptr + k * 8, &tmpval, sizeof(uint64_t));
+            }
+        }
+        *ptr += 8 * forMarshaling->pipelineCount;
+    }
+}
+
+void reservedmarshal_VkBindShaderGroupIndirectCommandNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBindShaderGroupIndirectCommandNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->groupIndex, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkBindIndexBufferIndirectCommandNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBindIndexBufferIndirectCommandNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkDeviceAddress*)&forMarshaling->bufferAddress, sizeof(VkDeviceAddress));
+    *ptr += sizeof(VkDeviceAddress);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->size, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (VkIndexType*)&forMarshaling->indexType, sizeof(VkIndexType));
+    *ptr += sizeof(VkIndexType);
+}
+
+void reservedmarshal_VkBindVertexBufferIndirectCommandNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBindVertexBufferIndirectCommandNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkDeviceAddress*)&forMarshaling->bufferAddress, sizeof(VkDeviceAddress));
+    *ptr += sizeof(VkDeviceAddress);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->size, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->stride, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkSetStateFlagsIndirectCommandNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSetStateFlagsIndirectCommandNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->data, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkIndirectCommandsStreamNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkIndirectCommandsStreamNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkBuffer((*&forMarshaling->buffer));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->offset, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+}
+
+void reservedmarshal_VkIndirectCommandsLayoutTokenNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkIndirectCommandsLayoutTokenNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkIndirectCommandsTokenTypeNV*)&forMarshaling->tokenType, sizeof(VkIndirectCommandsTokenTypeNV));
+    *ptr += sizeof(VkIndirectCommandsTokenTypeNV);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->stream, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->offset, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->vertexBindingUnit, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->vertexDynamicStride, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPipelineLayout((*&forMarshaling->pushconstantPipelineLayout));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (VkShaderStageFlags*)&forMarshaling->pushconstantShaderStageFlags, sizeof(VkShaderStageFlags));
+    *ptr += sizeof(VkShaderStageFlags);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->pushconstantOffset, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->pushconstantSize, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (VkIndirectStateFlagsNV*)&forMarshaling->indirectStateFlags, sizeof(VkIndirectStateFlagsNV));
+    *ptr += sizeof(VkIndirectStateFlagsNV);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->indexTypeCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (const VkIndexType*)forMarshaling->pIndexTypes, forMarshaling->indexTypeCount * sizeof(const VkIndexType));
+    *ptr += forMarshaling->indexTypeCount * sizeof(const VkIndexType);
+    memcpy(*ptr, (const uint32_t*)forMarshaling->pIndexTypeValues, forMarshaling->indexTypeCount * sizeof(const uint32_t));
+    *ptr += forMarshaling->indexTypeCount * sizeof(const uint32_t);
+}
+
+void reservedmarshal_VkIndirectCommandsLayoutCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkIndirectCommandsLayoutCreateInfoNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkIndirectCommandsLayoutUsageFlagsNV*)&forMarshaling->flags, sizeof(VkIndirectCommandsLayoutUsageFlagsNV));
+    *ptr += sizeof(VkIndirectCommandsLayoutUsageFlagsNV);
+    memcpy(*ptr, (VkPipelineBindPoint*)&forMarshaling->pipelineBindPoint, sizeof(VkPipelineBindPoint));
+    *ptr += sizeof(VkPipelineBindPoint);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->tokenCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)forMarshaling->tokenCount; ++i)
+    {
+        reservedmarshal_VkIndirectCommandsLayoutTokenNV(vkStream, rootType, (const VkIndirectCommandsLayoutTokenNV*)(forMarshaling->pTokens + i), ptr);
+    }
+    memcpy(*ptr, (uint32_t*)&forMarshaling->streamCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (const uint32_t*)forMarshaling->pStreamStrides, forMarshaling->streamCount * sizeof(const uint32_t));
+    *ptr += forMarshaling->streamCount * sizeof(const uint32_t);
+}
+
+void reservedmarshal_VkGeneratedCommandsInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkGeneratedCommandsInfoNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkPipelineBindPoint*)&forMarshaling->pipelineBindPoint, sizeof(VkPipelineBindPoint));
+    *ptr += sizeof(VkPipelineBindPoint);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPipeline((*&forMarshaling->pipeline));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkIndirectCommandsLayoutNV((*&forMarshaling->indirectCommandsLayout));
+    memcpy(*ptr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->streamCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)forMarshaling->streamCount; ++i)
+    {
+        reservedmarshal_VkIndirectCommandsStreamNV(vkStream, rootType, (const VkIndirectCommandsStreamNV*)(forMarshaling->pStreams + i), ptr);
+    }
+    memcpy(*ptr, (uint32_t*)&forMarshaling->sequencesCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    uint64_t cgen_var_2;
+    *&cgen_var_2 = get_host_u64_VkBuffer((*&forMarshaling->preprocessBuffer));
+    memcpy(*ptr, (uint64_t*)&cgen_var_2, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->preprocessOffset, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->preprocessSize, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    uint64_t cgen_var_3;
+    *&cgen_var_3 = get_host_u64_VkBuffer((*&forMarshaling->sequencesCountBuffer));
+    memcpy(*ptr, (uint64_t*)&cgen_var_3, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->sequencesCountOffset, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    uint64_t cgen_var_4;
+    *&cgen_var_4 = get_host_u64_VkBuffer((*&forMarshaling->sequencesIndexBuffer));
+    memcpy(*ptr, (uint64_t*)&cgen_var_4, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->sequencesIndexOffset, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+}
+
+void reservedmarshal_VkGeneratedCommandsMemoryRequirementsInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkGeneratedCommandsMemoryRequirementsInfoNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkPipelineBindPoint*)&forMarshaling->pipelineBindPoint, sizeof(VkPipelineBindPoint));
+    *ptr += sizeof(VkPipelineBindPoint);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkPipeline((*&forMarshaling->pipeline));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkIndirectCommandsLayoutNV((*&forMarshaling->indirectCommandsLayout));
+    memcpy(*ptr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxSequencesCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+#endif
+#ifdef VK_EXT_texel_buffer_alignment
+void reservedmarshal_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->texelBufferAlignment, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->storageTexelBufferOffsetAlignmentBytes, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->storageTexelBufferOffsetSingleTexelAlignment, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->uniformTexelBufferOffsetAlignmentBytes, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->uniformTexelBufferOffsetSingleTexelAlignment, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_QCOM_render_pass_transform
+void reservedmarshal_VkRenderPassTransformBeginInfoQCOM(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRenderPassTransformBeginInfoQCOM* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkSurfaceTransformFlagBitsKHR*)&forMarshaling->transform, sizeof(VkSurfaceTransformFlagBitsKHR));
+    *ptr += sizeof(VkSurfaceTransformFlagBitsKHR);
+}
+
+void reservedmarshal_VkCommandBufferInheritanceRenderPassTransformInfoQCOM(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCommandBufferInheritanceRenderPassTransformInfoQCOM* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkSurfaceTransformFlagBitsKHR*)&forMarshaling->transform, sizeof(VkSurfaceTransformFlagBitsKHR));
+    *ptr += sizeof(VkSurfaceTransformFlagBitsKHR);
+    reservedmarshal_VkRect2D(vkStream, rootType, (VkRect2D*)(&forMarshaling->renderArea), ptr);
+}
+
+#endif
+#ifdef VK_EXT_device_memory_report
+void reservedmarshal_VkPhysicalDeviceDeviceMemoryReportFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDeviceMemoryReportFeaturesEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->deviceMemoryReport, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkDeviceMemoryReportCallbackDataEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceMemoryReportCallbackDataEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkDeviceMemoryReportFlagsEXT*)&forMarshaling->flags, sizeof(VkDeviceMemoryReportFlagsEXT));
+    *ptr += sizeof(VkDeviceMemoryReportFlagsEXT);
+    memcpy(*ptr, (VkDeviceMemoryReportEventTypeEXT*)&forMarshaling->type, sizeof(VkDeviceMemoryReportEventTypeEXT));
+    *ptr += sizeof(VkDeviceMemoryReportEventTypeEXT);
+    memcpy(*ptr, (uint64_t*)&forMarshaling->memoryObjectId, sizeof(uint64_t));
+    *ptr += sizeof(uint64_t);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->size, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    memcpy(*ptr, (VkObjectType*)&forMarshaling->objectType, sizeof(VkObjectType));
+    *ptr += sizeof(VkObjectType);
+    memcpy(*ptr, (uint64_t*)&forMarshaling->objectHandle, sizeof(uint64_t));
+    *ptr += sizeof(uint64_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->heapIndex, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkDeviceDeviceMemoryReportCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceDeviceMemoryReportCreateInfoEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkDeviceMemoryReportFlagsEXT*)&forMarshaling->flags, sizeof(VkDeviceMemoryReportFlagsEXT));
+    *ptr += sizeof(VkDeviceMemoryReportFlagsEXT);
+    uint64_t cgen_var_0 = (uint64_t)forMarshaling->pfnUserCallback;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    memcpy(*ptr, (void*)forMarshaling->pUserData, sizeof(uint8_t));
+    *ptr += sizeof(uint8_t);
+}
+
+#endif
+#ifdef VK_EXT_robustness2
+void reservedmarshal_VkPhysicalDeviceRobustness2FeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRobustness2FeaturesEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->robustBufferAccess2, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->robustImageAccess2, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->nullDescriptor, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkPhysicalDeviceRobustness2PropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRobustness2PropertiesEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->robustStorageBufferAccessSizeAlignment, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->robustUniformBufferAccessSizeAlignment, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+}
+
+#endif
+#ifdef VK_EXT_custom_border_color
+void reservedmarshal_VkSamplerCustomBorderColorCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSamplerCustomBorderColorCreateInfoEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    reservedmarshal_VkClearColorValue(vkStream, rootType, (VkClearColorValue*)(&forMarshaling->customBorderColor), ptr);
+    memcpy(*ptr, (VkFormat*)&forMarshaling->format, sizeof(VkFormat));
+    *ptr += sizeof(VkFormat);
+}
+
+void reservedmarshal_VkPhysicalDeviceCustomBorderColorPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCustomBorderColorPropertiesEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxCustomBorderColorSamplers, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkPhysicalDeviceCustomBorderColorFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCustomBorderColorFeaturesEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->customBorderColors, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->customBorderColorWithoutFormat, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_GOOGLE_user_type
+#endif
+#ifdef VK_EXT_private_data
+void reservedmarshal_VkPhysicalDevicePrivateDataFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDevicePrivateDataFeaturesEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->privateData, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkDevicePrivateDataCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDevicePrivateDataCreateInfoEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->privateDataSlotRequestCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkPrivateDataSlotCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPrivateDataSlotCreateInfoEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkPrivateDataSlotCreateFlagsEXT*)&forMarshaling->flags, sizeof(VkPrivateDataSlotCreateFlagsEXT));
+    *ptr += sizeof(VkPrivateDataSlotCreateFlagsEXT);
+}
+
+#endif
+#ifdef VK_EXT_pipeline_creation_cache_control
+void reservedmarshal_VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->pipelineCreationCacheControl, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_NV_device_diagnostics_config
+void reservedmarshal_VkPhysicalDeviceDiagnosticsConfigFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDiagnosticsConfigFeaturesNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->diagnosticsConfig, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkDeviceDiagnosticsConfigCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceDiagnosticsConfigCreateInfoNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkDeviceDiagnosticsConfigFlagsNV*)&forMarshaling->flags, sizeof(VkDeviceDiagnosticsConfigFlagsNV));
+    *ptr += sizeof(VkDeviceDiagnosticsConfigFlagsNV);
+}
+
+#endif
+#ifdef VK_QCOM_render_pass_store_ops
+#endif
+#ifdef VK_NV_fragment_shading_rate_enums
+void reservedmarshal_VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->fragmentShadingRateEnums, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->supersampleFragmentShadingRates, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->noInvocationFragmentShadingRates, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkSampleCountFlagBits*)&forMarshaling->maxFragmentShadingRateInvocationCount, sizeof(VkSampleCountFlagBits));
+    *ptr += sizeof(VkSampleCountFlagBits);
+}
+
+void reservedmarshal_VkPipelineFragmentShadingRateEnumStateCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineFragmentShadingRateEnumStateCreateInfoNV* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkFragmentShadingRateTypeNV*)&forMarshaling->shadingRateType, sizeof(VkFragmentShadingRateTypeNV));
+    *ptr += sizeof(VkFragmentShadingRateTypeNV);
+    memcpy(*ptr, (VkFragmentShadingRateNV*)&forMarshaling->shadingRate, sizeof(VkFragmentShadingRateNV));
+    *ptr += sizeof(VkFragmentShadingRateNV);
+    memcpy(*ptr, (VkFragmentShadingRateCombinerOpKHR*)forMarshaling->combinerOps, 2 * sizeof(VkFragmentShadingRateCombinerOpKHR));
+    *ptr += 2 * sizeof(VkFragmentShadingRateCombinerOpKHR);
+}
+
+#endif
+#ifdef VK_EXT_fragment_density_map2
+void reservedmarshal_VkPhysicalDeviceFragmentDensityMap2FeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentDensityMap2FeaturesEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->fragmentDensityMapDeferred, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkPhysicalDeviceFragmentDensityMap2PropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentDensityMap2PropertiesEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->subsampledLoads, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->subsampledCoarseReconstructionEarlyAccess, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxSubsampledArrayLayers, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxDescriptorSetSubsampledSamplers, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+#endif
+#ifdef VK_QCOM_rotated_copy_commands
+void reservedmarshal_VkCopyCommandTransformInfoQCOM(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCopyCommandTransformInfoQCOM* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkSurfaceTransformFlagBitsKHR*)&forMarshaling->transform, sizeof(VkSurfaceTransformFlagBitsKHR));
+    *ptr += sizeof(VkSurfaceTransformFlagBitsKHR);
+}
+
+#endif
+#ifdef VK_EXT_image_robustness
+void reservedmarshal_VkPhysicalDeviceImageRobustnessFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceImageRobustnessFeaturesEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->robustImageAccess, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_EXT_4444_formats
+void reservedmarshal_VkPhysicalDevice4444FormatsFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDevice4444FormatsFeaturesEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->formatA4R4G4B4, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->formatA4B4G4R4, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+#endif
+#ifdef VK_EXT_directfb_surface
+void reservedmarshal_VkDirectFBSurfaceCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDirectFBSurfaceCreateInfoEXT* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkDirectFBSurfaceCreateFlagsEXT*)&forMarshaling->flags, sizeof(VkDirectFBSurfaceCreateFlagsEXT));
+    *ptr += sizeof(VkDirectFBSurfaceCreateFlagsEXT);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->dfb;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->dfb)
+    {
+        memcpy(*ptr, (IDirectFB*)forMarshaling->dfb, sizeof(IDirectFB));
+        *ptr += sizeof(IDirectFB);
+    }
+    // WARNING PTR CHECK
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)forMarshaling->surface;
+    memcpy((*ptr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->surface)
+    {
+        memcpy(*ptr, (IDirectFBSurface*)forMarshaling->surface, sizeof(IDirectFBSurface));
+        *ptr += sizeof(IDirectFBSurface);
+    }
+}
+
+#endif
+#ifdef VK_GOOGLE_gfxstream
+void reservedmarshal_VkImportColorBufferGOOGLE(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImportColorBufferGOOGLE* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->colorBuffer, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkImportBufferGOOGLE(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImportBufferGOOGLE* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->buffer, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkImportPhysicalAddressGOOGLE(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImportPhysicalAddressGOOGLE* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint64_t*)&forMarshaling->physicalAddress, sizeof(uint64_t));
+    *ptr += sizeof(uint64_t);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->size, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    memcpy(*ptr, (VkFormat*)&forMarshaling->format, sizeof(VkFormat));
+    *ptr += sizeof(VkFormat);
+    memcpy(*ptr, (VkImageTiling*)&forMarshaling->tiling, sizeof(VkImageTiling));
+    *ptr += sizeof(VkImageTiling);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->tilingParameter, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+#endif
+#ifdef VK_KHR_acceleration_structure
+void reservedmarshal_VkDeviceOrHostAddressKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceOrHostAddressKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkDeviceAddress*)&forMarshaling->deviceAddress, sizeof(VkDeviceAddress));
+    *ptr += sizeof(VkDeviceAddress);
+}
+
+void reservedmarshal_VkDeviceOrHostAddressConstKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceOrHostAddressConstKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkDeviceAddress*)&forMarshaling->deviceAddress, sizeof(VkDeviceAddress));
+    *ptr += sizeof(VkDeviceAddress);
+}
+
+void reservedmarshal_VkAccelerationStructureBuildRangeInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureBuildRangeInfoKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->primitiveCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->primitiveOffset, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->firstVertex, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->transformOffset, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkAccelerationStructureGeometryTrianglesDataKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureGeometryTrianglesDataKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkFormat*)&forMarshaling->vertexFormat, sizeof(VkFormat));
+    *ptr += sizeof(VkFormat);
+    reservedmarshal_VkDeviceOrHostAddressConstKHR(vkStream, rootType, (VkDeviceOrHostAddressConstKHR*)(&forMarshaling->vertexData), ptr);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->vertexStride, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxVertex, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (VkIndexType*)&forMarshaling->indexType, sizeof(VkIndexType));
+    *ptr += sizeof(VkIndexType);
+    reservedmarshal_VkDeviceOrHostAddressConstKHR(vkStream, rootType, (VkDeviceOrHostAddressConstKHR*)(&forMarshaling->indexData), ptr);
+    reservedmarshal_VkDeviceOrHostAddressConstKHR(vkStream, rootType, (VkDeviceOrHostAddressConstKHR*)(&forMarshaling->transformData), ptr);
+}
+
+void reservedmarshal_VkAccelerationStructureGeometryAabbsDataKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureGeometryAabbsDataKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    reservedmarshal_VkDeviceOrHostAddressConstKHR(vkStream, rootType, (VkDeviceOrHostAddressConstKHR*)(&forMarshaling->data), ptr);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->stride, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+}
+
+void reservedmarshal_VkAccelerationStructureGeometryInstancesDataKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureGeometryInstancesDataKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->arrayOfPointers, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    reservedmarshal_VkDeviceOrHostAddressConstKHR(vkStream, rootType, (VkDeviceOrHostAddressConstKHR*)(&forMarshaling->data), ptr);
+}
+
+void reservedmarshal_VkAccelerationStructureGeometryDataKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureGeometryDataKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    reservedmarshal_VkAccelerationStructureGeometryTrianglesDataKHR(vkStream, rootType, (VkAccelerationStructureGeometryTrianglesDataKHR*)(&forMarshaling->triangles), ptr);
+}
+
+void reservedmarshal_VkAccelerationStructureGeometryKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureGeometryKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkGeometryTypeKHR*)&forMarshaling->geometryType, sizeof(VkGeometryTypeKHR));
+    *ptr += sizeof(VkGeometryTypeKHR);
+    reservedmarshal_VkAccelerationStructureGeometryDataKHR(vkStream, rootType, (VkAccelerationStructureGeometryDataKHR*)(&forMarshaling->geometry), ptr);
+    memcpy(*ptr, (VkGeometryFlagsKHR*)&forMarshaling->flags, sizeof(VkGeometryFlagsKHR));
+    *ptr += sizeof(VkGeometryFlagsKHR);
+}
+
+void reservedmarshal_VkAccelerationStructureBuildGeometryInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureBuildGeometryInfoKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkAccelerationStructureTypeKHR*)&forMarshaling->type, sizeof(VkAccelerationStructureTypeKHR));
+    *ptr += sizeof(VkAccelerationStructureTypeKHR);
+    memcpy(*ptr, (VkBuildAccelerationStructureFlagsKHR*)&forMarshaling->flags, sizeof(VkBuildAccelerationStructureFlagsKHR));
+    *ptr += sizeof(VkBuildAccelerationStructureFlagsKHR);
+    memcpy(*ptr, (VkBuildAccelerationStructureModeKHR*)&forMarshaling->mode, sizeof(VkBuildAccelerationStructureModeKHR));
+    *ptr += sizeof(VkBuildAccelerationStructureModeKHR);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkAccelerationStructureKHR((*&forMarshaling->srcAccelerationStructure));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkAccelerationStructureKHR((*&forMarshaling->dstAccelerationStructure));
+    memcpy(*ptr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->geometryCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)forMarshaling->pGeometries;
+    memcpy((*ptr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pGeometries)
+    {
+        for (uint32_t i = 0; i < (uint32_t)forMarshaling->geometryCount; ++i)
+        {
+            reservedmarshal_VkAccelerationStructureGeometryKHR(vkStream, rootType, (const VkAccelerationStructureGeometryKHR*)(forMarshaling->pGeometries + i), ptr);
+        }
+    }
+    reservedmarshal_VkDeviceOrHostAddressKHR(vkStream, rootType, (VkDeviceOrHostAddressKHR*)(&forMarshaling->scratchData), ptr);
+}
+
+void reservedmarshal_VkAccelerationStructureCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureCreateInfoKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkAccelerationStructureCreateFlagsKHR*)&forMarshaling->createFlags, sizeof(VkAccelerationStructureCreateFlagsKHR));
+    *ptr += sizeof(VkAccelerationStructureCreateFlagsKHR);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkBuffer((*&forMarshaling->buffer));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->offset, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->size, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    memcpy(*ptr, (VkAccelerationStructureTypeKHR*)&forMarshaling->type, sizeof(VkAccelerationStructureTypeKHR));
+    *ptr += sizeof(VkAccelerationStructureTypeKHR);
+    memcpy(*ptr, (VkDeviceAddress*)&forMarshaling->deviceAddress, sizeof(VkDeviceAddress));
+    *ptr += sizeof(VkDeviceAddress);
+}
+
+void reservedmarshal_VkWriteDescriptorSetAccelerationStructureKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkWriteDescriptorSetAccelerationStructureKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->accelerationStructureCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pAccelerationStructures;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pAccelerationStructures)
+    {
+        if (forMarshaling->accelerationStructureCount)
+        {
+            uint8_t* cgen_var_0_0_ptr = (uint8_t*)(*ptr);
+            if (forMarshaling)
+            {
+                for (uint32_t k = 0; k < forMarshaling->accelerationStructureCount; ++k)
+                {
+                    uint64_t tmpval = get_host_u64_VkAccelerationStructureKHR(forMarshaling->pAccelerationStructures[k]);
+                    memcpy(cgen_var_0_0_ptr + k * 8, &tmpval, sizeof(uint64_t));
+                }
+            }
+            *ptr += 8 * forMarshaling->accelerationStructureCount;
+        }
+    }
+}
+
+void reservedmarshal_VkPhysicalDeviceAccelerationStructureFeaturesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceAccelerationStructureFeaturesKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->accelerationStructure, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->accelerationStructureCaptureReplay, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->accelerationStructureIndirectBuild, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->accelerationStructureHostCommands, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->descriptorBindingAccelerationStructureUpdateAfterBind, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkPhysicalDeviceAccelerationStructurePropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceAccelerationStructurePropertiesKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint64_t*)&forMarshaling->maxGeometryCount, sizeof(uint64_t));
+    *ptr += sizeof(uint64_t);
+    memcpy(*ptr, (uint64_t*)&forMarshaling->maxInstanceCount, sizeof(uint64_t));
+    *ptr += sizeof(uint64_t);
+    memcpy(*ptr, (uint64_t*)&forMarshaling->maxPrimitiveCount, sizeof(uint64_t));
+    *ptr += sizeof(uint64_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxPerStageDescriptorAccelerationStructures, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxPerStageDescriptorUpdateAfterBindAccelerationStructures, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxDescriptorSetAccelerationStructures, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxDescriptorSetUpdateAfterBindAccelerationStructures, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->minAccelerationStructureScratchOffsetAlignment, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkAccelerationStructureDeviceAddressInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureDeviceAddressInfoKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkAccelerationStructureKHR((*&forMarshaling->accelerationStructure));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+}
+
+void reservedmarshal_VkAccelerationStructureVersionInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureVersionInfoKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (const uint8_t*)forMarshaling->pVersionData, 2*VK_UUID_SIZE * sizeof(const uint8_t));
+    *ptr += 2*VK_UUID_SIZE * sizeof(const uint8_t);
+}
+
+void reservedmarshal_VkCopyAccelerationStructureToMemoryInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCopyAccelerationStructureToMemoryInfoKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkAccelerationStructureKHR((*&forMarshaling->src));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    reservedmarshal_VkDeviceOrHostAddressKHR(vkStream, rootType, (VkDeviceOrHostAddressKHR*)(&forMarshaling->dst), ptr);
+    memcpy(*ptr, (VkCopyAccelerationStructureModeKHR*)&forMarshaling->mode, sizeof(VkCopyAccelerationStructureModeKHR));
+    *ptr += sizeof(VkCopyAccelerationStructureModeKHR);
+}
+
+void reservedmarshal_VkCopyMemoryToAccelerationStructureInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCopyMemoryToAccelerationStructureInfoKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    reservedmarshal_VkDeviceOrHostAddressConstKHR(vkStream, rootType, (VkDeviceOrHostAddressConstKHR*)(&forMarshaling->src), ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkAccelerationStructureKHR((*&forMarshaling->dst));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (VkCopyAccelerationStructureModeKHR*)&forMarshaling->mode, sizeof(VkCopyAccelerationStructureModeKHR));
+    *ptr += sizeof(VkCopyAccelerationStructureModeKHR);
+}
+
+void reservedmarshal_VkCopyAccelerationStructureInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCopyAccelerationStructureInfoKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    uint64_t cgen_var_0;
+    *&cgen_var_0 = get_host_u64_VkAccelerationStructureKHR((*&forMarshaling->src));
+    memcpy(*ptr, (uint64_t*)&cgen_var_0, 1 * 8);
+    *ptr += 1 * 8;
+    uint64_t cgen_var_1;
+    *&cgen_var_1 = get_host_u64_VkAccelerationStructureKHR((*&forMarshaling->dst));
+    memcpy(*ptr, (uint64_t*)&cgen_var_1, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (VkCopyAccelerationStructureModeKHR*)&forMarshaling->mode, sizeof(VkCopyAccelerationStructureModeKHR));
+    *ptr += sizeof(VkCopyAccelerationStructureModeKHR);
+}
+
+void reservedmarshal_VkAccelerationStructureBuildSizesInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureBuildSizesInfoKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->accelerationStructureSize, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->updateScratchSize, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->buildScratchSize, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+}
+
+#endif
+#ifdef VK_KHR_ray_tracing_pipeline
+void reservedmarshal_VkRayTracingShaderGroupCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRayTracingShaderGroupCreateInfoKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkRayTracingShaderGroupTypeKHR*)&forMarshaling->type, sizeof(VkRayTracingShaderGroupTypeKHR));
+    *ptr += sizeof(VkRayTracingShaderGroupTypeKHR);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->generalShader, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->closestHitShader, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->anyHitShader, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->intersectionShader, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pShaderGroupCaptureReplayHandle;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pShaderGroupCaptureReplayHandle)
+    {
+        memcpy(*ptr, (const void*)forMarshaling->pShaderGroupCaptureReplayHandle, sizeof(const uint8_t));
+        *ptr += sizeof(const uint8_t);
+    }
+}
+
+void reservedmarshal_VkRayTracingPipelineInterfaceCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRayTracingPipelineInterfaceCreateInfoKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxPipelineRayPayloadSize, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxPipelineRayHitAttributeSize, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkRayTracingPipelineCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRayTracingPipelineCreateInfoKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkPipelineCreateFlags*)&forMarshaling->flags, sizeof(VkPipelineCreateFlags));
+    *ptr += sizeof(VkPipelineCreateFlags);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->stageCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)forMarshaling->stageCount; ++i)
+    {
+        reservedmarshal_VkPipelineShaderStageCreateInfo(vkStream, rootType, (const VkPipelineShaderStageCreateInfo*)(forMarshaling->pStages + i), ptr);
+    }
+    memcpy(*ptr, (uint32_t*)&forMarshaling->groupCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    for (uint32_t i = 0; i < (uint32_t)forMarshaling->groupCount; ++i)
+    {
+        reservedmarshal_VkRayTracingShaderGroupCreateInfoKHR(vkStream, rootType, (const VkRayTracingShaderGroupCreateInfoKHR*)(forMarshaling->pGroups + i), ptr);
+    }
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxPipelineRayRecursionDepth, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    // WARNING PTR CHECK
+    uint64_t cgen_var_0 = (uint64_t)(uintptr_t)forMarshaling->pLibraryInfo;
+    memcpy((*ptr), &cgen_var_0, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pLibraryInfo)
+    {
+        reservedmarshal_VkPipelineLibraryCreateInfoKHR(vkStream, rootType, (const VkPipelineLibraryCreateInfoKHR*)(forMarshaling->pLibraryInfo), ptr);
+    }
+    // WARNING PTR CHECK
+    uint64_t cgen_var_1 = (uint64_t)(uintptr_t)forMarshaling->pLibraryInterface;
+    memcpy((*ptr), &cgen_var_1, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pLibraryInterface)
+    {
+        reservedmarshal_VkRayTracingPipelineInterfaceCreateInfoKHR(vkStream, rootType, (const VkRayTracingPipelineInterfaceCreateInfoKHR*)(forMarshaling->pLibraryInterface), ptr);
+    }
+    // WARNING PTR CHECK
+    uint64_t cgen_var_2 = (uint64_t)(uintptr_t)forMarshaling->pDynamicState;
+    memcpy((*ptr), &cgen_var_2, 8);
+    android::base::Stream::toBe64((uint8_t*)(*ptr));
+    *ptr += 8;
+    if (forMarshaling->pDynamicState)
+    {
+        reservedmarshal_VkPipelineDynamicStateCreateInfo(vkStream, rootType, (const VkPipelineDynamicStateCreateInfo*)(forMarshaling->pDynamicState), ptr);
+    }
+    uint64_t cgen_var_3;
+    *&cgen_var_3 = get_host_u64_VkPipelineLayout((*&forMarshaling->layout));
+    memcpy(*ptr, (uint64_t*)&cgen_var_3, 1 * 8);
+    *ptr += 1 * 8;
+    uint64_t cgen_var_4;
+    *&cgen_var_4 = get_host_u64_VkPipeline((*&forMarshaling->basePipelineHandle));
+    memcpy(*ptr, (uint64_t*)&cgen_var_4, 1 * 8);
+    *ptr += 1 * 8;
+    memcpy(*ptr, (int32_t*)&forMarshaling->basePipelineIndex, sizeof(int32_t));
+    *ptr += sizeof(int32_t);
+}
+
+void reservedmarshal_VkPhysicalDeviceRayTracingPipelineFeaturesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRayTracingPipelineFeaturesKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->rayTracingPipeline, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->rayTracingPipelineShaderGroupHandleCaptureReplay, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->rayTracingPipelineShaderGroupHandleCaptureReplayMixed, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->rayTracingPipelineTraceRaysIndirect, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->rayTraversalPrimitiveCulling, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+void reservedmarshal_VkPhysicalDeviceRayTracingPipelinePropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRayTracingPipelinePropertiesKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->shaderGroupHandleSize, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxRayRecursionDepth, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxShaderGroupStride, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->shaderGroupBaseAlignment, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->shaderGroupHandleCaptureReplaySize, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxRayDispatchInvocationCount, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->shaderGroupHandleAlignment, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->maxRayHitAttributeSize, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+void reservedmarshal_VkStridedDeviceAddressRegionKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkStridedDeviceAddressRegionKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkDeviceAddress*)&forMarshaling->deviceAddress, sizeof(VkDeviceAddress));
+    *ptr += sizeof(VkDeviceAddress);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->stride, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+    memcpy(*ptr, (VkDeviceSize*)&forMarshaling->size, sizeof(VkDeviceSize));
+    *ptr += sizeof(VkDeviceSize);
+}
+
+void reservedmarshal_VkTraceRaysIndirectCommandKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkTraceRaysIndirectCommandKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (uint32_t*)&forMarshaling->width, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->height, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+    memcpy(*ptr, (uint32_t*)&forMarshaling->depth, sizeof(uint32_t));
+    *ptr += sizeof(uint32_t);
+}
+
+#endif
+#ifdef VK_KHR_ray_query
+void reservedmarshal_VkPhysicalDeviceRayQueryFeaturesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRayQueryFeaturesKHR* forMarshaling,
+    uint8_t** ptr)
+{
+    (void)vkStream;
+    (void)rootType;
+    memcpy(*ptr, (VkStructureType*)&forMarshaling->sType, sizeof(VkStructureType));
+    *ptr += sizeof(VkStructureType);
+    if (rootType == VK_STRUCTURE_TYPE_MAX_ENUM)
+    {
+        rootType = forMarshaling->sType;
+    }
+    reservedmarshal_extension_struct(vkStream, rootType, forMarshaling->pNext, ptr);
+    memcpy(*ptr, (VkBool32*)&forMarshaling->rayQuery, sizeof(VkBool32));
+    *ptr += sizeof(VkBool32);
+}
+
+#endif
+void reservedmarshal_extension_struct(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const void* structExtension,
+    uint8_t** ptr)
+{
+    VkInstanceCreateInfo* structAccess = (VkInstanceCreateInfo*)(structExtension);
+    uint32_t currExtSize = goldfish_vk_extension_struct_size_with_stream_features(vkStream->getFeatureBits(), rootType, structExtension);
+    if (!currExtSize && structExtension)
+    {
+        // unknown struct extension; skip and call on its pNext field
+        reservedmarshal_extension_struct(vkStream, rootType, (void*)structAccess->pNext, ptr);
+        return;
+    }
+    else
+    {
+        // known or null extension struct
+        memcpy(*ptr, &currExtSize, sizeof(uint32_t));;
+        android::base::Stream::toBe32((uint8_t*)*ptr); *ptr += sizeof(uint32_t);
+        if (!currExtSize)
+        {
+            // exit if this was a null extension struct (size == 0 in this branch)
+            return;
+        }
+    }
+    memcpy(*ptr, structExtension, sizeof(VkStructureType)); *ptr += sizeof(VkStructureType);
+    if (!structExtension)
+    {
+        return;
+    }
+    uint32_t structType = (uint32_t)goldfish_vk_struct_type(structExtension);
+    switch(structType)
+    {
+#ifdef VK_VERSION_1_1
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES:
+        {
+            reservedmarshal_VkPhysicalDeviceSubgroupProperties(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceSubgroupProperties*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES:
+        {
+            reservedmarshal_VkPhysicalDevice16BitStorageFeatures(vkStream, rootType, reinterpret_cast<const VkPhysicalDevice16BitStorageFeatures*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS:
+        {
+            reservedmarshal_VkMemoryDedicatedRequirements(vkStream, rootType, reinterpret_cast<const VkMemoryDedicatedRequirements*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO:
+        {
+            reservedmarshal_VkMemoryDedicatedAllocateInfo(vkStream, rootType, reinterpret_cast<const VkMemoryDedicatedAllocateInfo*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO:
+        {
+            reservedmarshal_VkMemoryAllocateFlagsInfo(vkStream, rootType, reinterpret_cast<const VkMemoryAllocateFlagsInfo*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO:
+        {
+            reservedmarshal_VkDeviceGroupRenderPassBeginInfo(vkStream, rootType, reinterpret_cast<const VkDeviceGroupRenderPassBeginInfo*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO:
+        {
+            reservedmarshal_VkDeviceGroupCommandBufferBeginInfo(vkStream, rootType, reinterpret_cast<const VkDeviceGroupCommandBufferBeginInfo*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO:
+        {
+            reservedmarshal_VkDeviceGroupSubmitInfo(vkStream, rootType, reinterpret_cast<const VkDeviceGroupSubmitInfo*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO:
+        {
+            reservedmarshal_VkDeviceGroupBindSparseInfo(vkStream, rootType, reinterpret_cast<const VkDeviceGroupBindSparseInfo*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO:
+        {
+            reservedmarshal_VkBindBufferMemoryDeviceGroupInfo(vkStream, rootType, reinterpret_cast<const VkBindBufferMemoryDeviceGroupInfo*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO:
+        {
+            reservedmarshal_VkBindImageMemoryDeviceGroupInfo(vkStream, rootType, reinterpret_cast<const VkBindImageMemoryDeviceGroupInfo*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO:
+        {
+            reservedmarshal_VkDeviceGroupDeviceCreateInfo(vkStream, rootType, reinterpret_cast<const VkDeviceGroupDeviceCreateInfo*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2:
+        {
+            reservedmarshal_VkPhysicalDeviceFeatures2(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceFeatures2*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES:
+        {
+            reservedmarshal_VkPhysicalDevicePointClippingProperties(vkStream, rootType, reinterpret_cast<const VkPhysicalDevicePointClippingProperties*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO:
+        {
+            reservedmarshal_VkRenderPassInputAttachmentAspectCreateInfo(vkStream, rootType, reinterpret_cast<const VkRenderPassInputAttachmentAspectCreateInfo*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO:
+        {
+            reservedmarshal_VkImageViewUsageCreateInfo(vkStream, rootType, reinterpret_cast<const VkImageViewUsageCreateInfo*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO:
+        {
+            reservedmarshal_VkPipelineTessellationDomainOriginStateCreateInfo(vkStream, rootType, reinterpret_cast<const VkPipelineTessellationDomainOriginStateCreateInfo*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO:
+        {
+            reservedmarshal_VkRenderPassMultiviewCreateInfo(vkStream, rootType, reinterpret_cast<const VkRenderPassMultiviewCreateInfo*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES:
+        {
+            reservedmarshal_VkPhysicalDeviceMultiviewFeatures(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceMultiviewFeatures*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES:
+        {
+            reservedmarshal_VkPhysicalDeviceMultiviewProperties(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceMultiviewProperties*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES:
+        {
+            reservedmarshal_VkPhysicalDeviceVariablePointersFeatures(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceVariablePointersFeatures*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES:
+        {
+            reservedmarshal_VkPhysicalDeviceProtectedMemoryFeatures(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceProtectedMemoryFeatures*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_PROPERTIES:
+        {
+            reservedmarshal_VkPhysicalDeviceProtectedMemoryProperties(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceProtectedMemoryProperties*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO:
+        {
+            reservedmarshal_VkProtectedSubmitInfo(vkStream, rootType, reinterpret_cast<const VkProtectedSubmitInfo*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO:
+        {
+            reservedmarshal_VkSamplerYcbcrConversionInfo(vkStream, rootType, reinterpret_cast<const VkSamplerYcbcrConversionInfo*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO:
+        {
+            reservedmarshal_VkBindImagePlaneMemoryInfo(vkStream, rootType, reinterpret_cast<const VkBindImagePlaneMemoryInfo*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO:
+        {
+            reservedmarshal_VkImagePlaneMemoryRequirementsInfo(vkStream, rootType, reinterpret_cast<const VkImagePlaneMemoryRequirementsInfo*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES:
+        {
+            reservedmarshal_VkPhysicalDeviceSamplerYcbcrConversionFeatures(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceSamplerYcbcrConversionFeatures*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES:
+        {
+            reservedmarshal_VkSamplerYcbcrConversionImageFormatProperties(vkStream, rootType, reinterpret_cast<const VkSamplerYcbcrConversionImageFormatProperties*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO:
+        {
+            reservedmarshal_VkPhysicalDeviceExternalImageFormatInfo(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceExternalImageFormatInfo*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES:
+        {
+            reservedmarshal_VkExternalImageFormatProperties(vkStream, rootType, reinterpret_cast<const VkExternalImageFormatProperties*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES:
+        {
+            reservedmarshal_VkPhysicalDeviceIDProperties(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceIDProperties*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO:
+        {
+            reservedmarshal_VkExternalMemoryImageCreateInfo(vkStream, rootType, reinterpret_cast<const VkExternalMemoryImageCreateInfo*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO:
+        {
+            reservedmarshal_VkExternalMemoryBufferCreateInfo(vkStream, rootType, reinterpret_cast<const VkExternalMemoryBufferCreateInfo*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO:
+        {
+            reservedmarshal_VkExportMemoryAllocateInfo(vkStream, rootType, reinterpret_cast<const VkExportMemoryAllocateInfo*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO:
+        {
+            reservedmarshal_VkExportFenceCreateInfo(vkStream, rootType, reinterpret_cast<const VkExportFenceCreateInfo*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO:
+        {
+            reservedmarshal_VkExportSemaphoreCreateInfo(vkStream, rootType, reinterpret_cast<const VkExportSemaphoreCreateInfo*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES:
+        {
+            reservedmarshal_VkPhysicalDeviceMaintenance3Properties(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceMaintenance3Properties*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES:
+        {
+            reservedmarshal_VkPhysicalDeviceShaderDrawParametersFeatures(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceShaderDrawParametersFeatures*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_VERSION_1_2
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES:
+        {
+            reservedmarshal_VkPhysicalDeviceVulkan11Features(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceVulkan11Features*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_PROPERTIES:
+        {
+            reservedmarshal_VkPhysicalDeviceVulkan11Properties(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceVulkan11Properties*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES:
+        {
+            reservedmarshal_VkPhysicalDeviceVulkan12Features(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceVulkan12Features*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_PROPERTIES:
+        {
+            reservedmarshal_VkPhysicalDeviceVulkan12Properties(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceVulkan12Properties*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO:
+        {
+            reservedmarshal_VkImageFormatListCreateInfo(vkStream, rootType, reinterpret_cast<const VkImageFormatListCreateInfo*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES:
+        {
+            reservedmarshal_VkPhysicalDevice8BitStorageFeatures(vkStream, rootType, reinterpret_cast<const VkPhysicalDevice8BitStorageFeatures*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES:
+        {
+            reservedmarshal_VkPhysicalDeviceDriverProperties(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceDriverProperties*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES:
+        {
+            reservedmarshal_VkPhysicalDeviceShaderAtomicInt64Features(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceShaderAtomicInt64Features*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES:
+        {
+            reservedmarshal_VkPhysicalDeviceShaderFloat16Int8Features(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceShaderFloat16Int8Features*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES:
+        {
+            reservedmarshal_VkPhysicalDeviceFloatControlsProperties(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceFloatControlsProperties*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO:
+        {
+            reservedmarshal_VkDescriptorSetLayoutBindingFlagsCreateInfo(vkStream, rootType, reinterpret_cast<const VkDescriptorSetLayoutBindingFlagsCreateInfo*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES:
+        {
+            reservedmarshal_VkPhysicalDeviceDescriptorIndexingFeatures(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceDescriptorIndexingFeatures*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES:
+        {
+            reservedmarshal_VkPhysicalDeviceDescriptorIndexingProperties(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceDescriptorIndexingProperties*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO:
+        {
+            reservedmarshal_VkDescriptorSetVariableDescriptorCountAllocateInfo(vkStream, rootType, reinterpret_cast<const VkDescriptorSetVariableDescriptorCountAllocateInfo*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT:
+        {
+            reservedmarshal_VkDescriptorSetVariableDescriptorCountLayoutSupport(vkStream, rootType, reinterpret_cast<const VkDescriptorSetVariableDescriptorCountLayoutSupport*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE:
+        {
+            reservedmarshal_VkSubpassDescriptionDepthStencilResolve(vkStream, rootType, reinterpret_cast<const VkSubpassDescriptionDepthStencilResolve*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES:
+        {
+            reservedmarshal_VkPhysicalDeviceDepthStencilResolveProperties(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceDepthStencilResolveProperties*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES:
+        {
+            reservedmarshal_VkPhysicalDeviceScalarBlockLayoutFeatures(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceScalarBlockLayoutFeatures*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_IMAGE_STENCIL_USAGE_CREATE_INFO:
+        {
+            reservedmarshal_VkImageStencilUsageCreateInfo(vkStream, rootType, reinterpret_cast<const VkImageStencilUsageCreateInfo*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO:
+        {
+            reservedmarshal_VkSamplerReductionModeCreateInfo(vkStream, rootType, reinterpret_cast<const VkSamplerReductionModeCreateInfo*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES:
+        {
+            reservedmarshal_VkPhysicalDeviceSamplerFilterMinmaxProperties(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceSamplerFilterMinmaxProperties*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES:
+        {
+            reservedmarshal_VkPhysicalDeviceVulkanMemoryModelFeatures(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceVulkanMemoryModelFeatures*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES:
+        {
+            reservedmarshal_VkPhysicalDeviceImagelessFramebufferFeatures(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceImagelessFramebufferFeatures*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO:
+        {
+            reservedmarshal_VkFramebufferAttachmentsCreateInfo(vkStream, rootType, reinterpret_cast<const VkFramebufferAttachmentsCreateInfo*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_RENDER_PASS_ATTACHMENT_BEGIN_INFO:
+        {
+            reservedmarshal_VkRenderPassAttachmentBeginInfo(vkStream, rootType, reinterpret_cast<const VkRenderPassAttachmentBeginInfo*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES:
+        {
+            reservedmarshal_VkPhysicalDeviceUniformBufferStandardLayoutFeatures(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceUniformBufferStandardLayoutFeatures*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES:
+        {
+            reservedmarshal_VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES:
+        {
+            reservedmarshal_VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_STENCIL_LAYOUT:
+        {
+            reservedmarshal_VkAttachmentReferenceStencilLayout(vkStream, rootType, reinterpret_cast<const VkAttachmentReferenceStencilLayout*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_STENCIL_LAYOUT:
+        {
+            reservedmarshal_VkAttachmentDescriptionStencilLayout(vkStream, rootType, reinterpret_cast<const VkAttachmentDescriptionStencilLayout*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES:
+        {
+            reservedmarshal_VkPhysicalDeviceHostQueryResetFeatures(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceHostQueryResetFeatures*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES:
+        {
+            reservedmarshal_VkPhysicalDeviceTimelineSemaphoreFeatures(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceTimelineSemaphoreFeatures*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES:
+        {
+            reservedmarshal_VkPhysicalDeviceTimelineSemaphoreProperties(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceTimelineSemaphoreProperties*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO:
+        {
+            reservedmarshal_VkSemaphoreTypeCreateInfo(vkStream, rootType, reinterpret_cast<const VkSemaphoreTypeCreateInfo*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO:
+        {
+            reservedmarshal_VkTimelineSemaphoreSubmitInfo(vkStream, rootType, reinterpret_cast<const VkTimelineSemaphoreSubmitInfo*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES:
+        {
+            reservedmarshal_VkPhysicalDeviceBufferDeviceAddressFeatures(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceBufferDeviceAddressFeatures*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO:
+        {
+            reservedmarshal_VkBufferOpaqueCaptureAddressCreateInfo(vkStream, rootType, reinterpret_cast<const VkBufferOpaqueCaptureAddressCreateInfo*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO:
+        {
+            reservedmarshal_VkMemoryOpaqueCaptureAddressAllocateInfo(vkStream, rootType, reinterpret_cast<const VkMemoryOpaqueCaptureAddressAllocateInfo*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_KHR_swapchain
+        case VK_STRUCTURE_TYPE_IMAGE_SWAPCHAIN_CREATE_INFO_KHR:
+        {
+            reservedmarshal_VkImageSwapchainCreateInfoKHR(vkStream, rootType, reinterpret_cast<const VkImageSwapchainCreateInfoKHR*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_SWAPCHAIN_INFO_KHR:
+        {
+            reservedmarshal_VkBindImageMemorySwapchainInfoKHR(vkStream, rootType, reinterpret_cast<const VkBindImageMemorySwapchainInfoKHR*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DEVICE_GROUP_PRESENT_INFO_KHR:
+        {
+            reservedmarshal_VkDeviceGroupPresentInfoKHR(vkStream, rootType, reinterpret_cast<const VkDeviceGroupPresentInfoKHR*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DEVICE_GROUP_SWAPCHAIN_CREATE_INFO_KHR:
+        {
+            reservedmarshal_VkDeviceGroupSwapchainCreateInfoKHR(vkStream, rootType, reinterpret_cast<const VkDeviceGroupSwapchainCreateInfoKHR*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_KHR_display_swapchain
+        case VK_STRUCTURE_TYPE_DISPLAY_PRESENT_INFO_KHR:
+        {
+            reservedmarshal_VkDisplayPresentInfoKHR(vkStream, rootType, reinterpret_cast<const VkDisplayPresentInfoKHR*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_KHR_external_memory_win32
+        case VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_KHR:
+        {
+            reservedmarshal_VkImportMemoryWin32HandleInfoKHR(vkStream, rootType, reinterpret_cast<const VkImportMemoryWin32HandleInfoKHR*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_KHR:
+        {
+            reservedmarshal_VkExportMemoryWin32HandleInfoKHR(vkStream, rootType, reinterpret_cast<const VkExportMemoryWin32HandleInfoKHR*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_KHR_external_memory_fd
+        case VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR:
+        {
+            reservedmarshal_VkImportMemoryFdInfoKHR(vkStream, rootType, reinterpret_cast<const VkImportMemoryFdInfoKHR*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_KHR_win32_keyed_mutex
+        case VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_KHR:
+        {
+            reservedmarshal_VkWin32KeyedMutexAcquireReleaseInfoKHR(vkStream, rootType, reinterpret_cast<const VkWin32KeyedMutexAcquireReleaseInfoKHR*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_KHR_external_semaphore_win32
+        case VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR:
+        {
+            reservedmarshal_VkExportSemaphoreWin32HandleInfoKHR(vkStream, rootType, reinterpret_cast<const VkExportSemaphoreWin32HandleInfoKHR*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_D3D12_FENCE_SUBMIT_INFO_KHR:
+        {
+            reservedmarshal_VkD3D12FenceSubmitInfoKHR(vkStream, rootType, reinterpret_cast<const VkD3D12FenceSubmitInfoKHR*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_KHR_push_descriptor
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR:
+        {
+            reservedmarshal_VkPhysicalDevicePushDescriptorPropertiesKHR(vkStream, rootType, reinterpret_cast<const VkPhysicalDevicePushDescriptorPropertiesKHR*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_KHR_incremental_present
+        case VK_STRUCTURE_TYPE_PRESENT_REGIONS_KHR:
+        {
+            reservedmarshal_VkPresentRegionsKHR(vkStream, rootType, reinterpret_cast<const VkPresentRegionsKHR*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_KHR_shared_presentable_image
+        case VK_STRUCTURE_TYPE_SHARED_PRESENT_SURFACE_CAPABILITIES_KHR:
+        {
+            reservedmarshal_VkSharedPresentSurfaceCapabilitiesKHR(vkStream, rootType, reinterpret_cast<const VkSharedPresentSurfaceCapabilitiesKHR*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_KHR_external_fence_win32
+        case VK_STRUCTURE_TYPE_EXPORT_FENCE_WIN32_HANDLE_INFO_KHR:
+        {
+            reservedmarshal_VkExportFenceWin32HandleInfoKHR(vkStream, rootType, reinterpret_cast<const VkExportFenceWin32HandleInfoKHR*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_KHR_performance_query
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_FEATURES_KHR:
+        {
+            reservedmarshal_VkPhysicalDevicePerformanceQueryFeaturesKHR(vkStream, rootType, reinterpret_cast<const VkPhysicalDevicePerformanceQueryFeaturesKHR*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_PROPERTIES_KHR:
+        {
+            reservedmarshal_VkPhysicalDevicePerformanceQueryPropertiesKHR(vkStream, rootType, reinterpret_cast<const VkPhysicalDevicePerformanceQueryPropertiesKHR*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_CREATE_INFO_KHR:
+        {
+            reservedmarshal_VkQueryPoolPerformanceCreateInfoKHR(vkStream, rootType, reinterpret_cast<const VkQueryPoolPerformanceCreateInfoKHR*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PERFORMANCE_QUERY_SUBMIT_INFO_KHR:
+        {
+            reservedmarshal_VkPerformanceQuerySubmitInfoKHR(vkStream, rootType, reinterpret_cast<const VkPerformanceQuerySubmitInfoKHR*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_KHR_portability_subset
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PORTABILITY_SUBSET_FEATURES_KHR:
+        {
+            reservedmarshal_VkPhysicalDevicePortabilitySubsetFeaturesKHR(vkStream, rootType, reinterpret_cast<const VkPhysicalDevicePortabilitySubsetFeaturesKHR*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PORTABILITY_SUBSET_PROPERTIES_KHR:
+        {
+            reservedmarshal_VkPhysicalDevicePortabilitySubsetPropertiesKHR(vkStream, rootType, reinterpret_cast<const VkPhysicalDevicePortabilitySubsetPropertiesKHR*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_KHR_shader_clock
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CLOCK_FEATURES_KHR:
+        {
+            reservedmarshal_VkPhysicalDeviceShaderClockFeaturesKHR(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceShaderClockFeaturesKHR*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_KHR_shader_terminate_invocation
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES_KHR:
+        {
+            reservedmarshal_VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_KHR_fragment_shading_rate
+        case VK_STRUCTURE_TYPE_FRAGMENT_SHADING_RATE_ATTACHMENT_INFO_KHR:
+        {
+            reservedmarshal_VkFragmentShadingRateAttachmentInfoKHR(vkStream, rootType, reinterpret_cast<const VkFragmentShadingRateAttachmentInfoKHR*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_STATE_CREATE_INFO_KHR:
+        {
+            reservedmarshal_VkPipelineFragmentShadingRateStateCreateInfoKHR(vkStream, rootType, reinterpret_cast<const VkPipelineFragmentShadingRateStateCreateInfoKHR*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_FEATURES_KHR:
+        {
+            reservedmarshal_VkPhysicalDeviceFragmentShadingRateFeaturesKHR(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceFragmentShadingRateFeaturesKHR*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_PROPERTIES_KHR:
+        {
+            reservedmarshal_VkPhysicalDeviceFragmentShadingRatePropertiesKHR(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceFragmentShadingRatePropertiesKHR*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_KHR_surface_protected_capabilities
+        case VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR:
+        {
+            reservedmarshal_VkSurfaceProtectedCapabilitiesKHR(vkStream, rootType, reinterpret_cast<const VkSurfaceProtectedCapabilitiesKHR*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_KHR_pipeline_executable_properties
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_EXECUTABLE_PROPERTIES_FEATURES_KHR:
+        {
+            reservedmarshal_VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR(vkStream, rootType, reinterpret_cast<const VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_ANDROID_native_buffer
+        case VK_STRUCTURE_TYPE_NATIVE_BUFFER_ANDROID:
+        {
+            reservedmarshal_VkNativeBufferANDROID(vkStream, rootType, reinterpret_cast<const VkNativeBufferANDROID*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_EXT_debug_report
+        case VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT:
+        {
+            reservedmarshal_VkDebugReportCallbackCreateInfoEXT(vkStream, rootType, reinterpret_cast<const VkDebugReportCallbackCreateInfoEXT*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_AMD_rasterization_order
+        case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_RASTERIZATION_ORDER_AMD:
+        {
+            reservedmarshal_VkPipelineRasterizationStateRasterizationOrderAMD(vkStream, rootType, reinterpret_cast<const VkPipelineRasterizationStateRasterizationOrderAMD*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_NV_dedicated_allocation
+        case VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_IMAGE_CREATE_INFO_NV:
+        {
+            reservedmarshal_VkDedicatedAllocationImageCreateInfoNV(vkStream, rootType, reinterpret_cast<const VkDedicatedAllocationImageCreateInfoNV*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_BUFFER_CREATE_INFO_NV:
+        {
+            reservedmarshal_VkDedicatedAllocationBufferCreateInfoNV(vkStream, rootType, reinterpret_cast<const VkDedicatedAllocationBufferCreateInfoNV*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_MEMORY_ALLOCATE_INFO_NV:
+        {
+            reservedmarshal_VkDedicatedAllocationMemoryAllocateInfoNV(vkStream, rootType, reinterpret_cast<const VkDedicatedAllocationMemoryAllocateInfoNV*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_EXT_transform_feedback
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT:
+        {
+            reservedmarshal_VkPhysicalDeviceTransformFeedbackFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceTransformFeedbackFeaturesEXT*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT:
+        {
+            reservedmarshal_VkPhysicalDeviceTransformFeedbackPropertiesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceTransformFeedbackPropertiesEXT*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_STREAM_CREATE_INFO_EXT:
+        {
+            reservedmarshal_VkPipelineRasterizationStateStreamCreateInfoEXT(vkStream, rootType, reinterpret_cast<const VkPipelineRasterizationStateStreamCreateInfoEXT*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_AMD_texture_gather_bias_lod
+        case VK_STRUCTURE_TYPE_TEXTURE_LOD_GATHER_FORMAT_PROPERTIES_AMD:
+        {
+            reservedmarshal_VkTextureLODGatherFormatPropertiesAMD(vkStream, rootType, reinterpret_cast<const VkTextureLODGatherFormatPropertiesAMD*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_NV_corner_sampled_image
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CORNER_SAMPLED_IMAGE_FEATURES_NV:
+        {
+            reservedmarshal_VkPhysicalDeviceCornerSampledImageFeaturesNV(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceCornerSampledImageFeaturesNV*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_NV_external_memory
+        case VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_NV:
+        {
+            reservedmarshal_VkExternalMemoryImageCreateInfoNV(vkStream, rootType, reinterpret_cast<const VkExternalMemoryImageCreateInfoNV*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_NV:
+        {
+            reservedmarshal_VkExportMemoryAllocateInfoNV(vkStream, rootType, reinterpret_cast<const VkExportMemoryAllocateInfoNV*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_NV_external_memory_win32
+        case VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_NV:
+        {
+            reservedmarshal_VkImportMemoryWin32HandleInfoNV(vkStream, rootType, reinterpret_cast<const VkImportMemoryWin32HandleInfoNV*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_NV:
+        {
+            reservedmarshal_VkExportMemoryWin32HandleInfoNV(vkStream, rootType, reinterpret_cast<const VkExportMemoryWin32HandleInfoNV*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_NV_win32_keyed_mutex
+        case VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_NV:
+        {
+            reservedmarshal_VkWin32KeyedMutexAcquireReleaseInfoNV(vkStream, rootType, reinterpret_cast<const VkWin32KeyedMutexAcquireReleaseInfoNV*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_EXT_validation_flags
+        case VK_STRUCTURE_TYPE_VALIDATION_FLAGS_EXT:
+        {
+            reservedmarshal_VkValidationFlagsEXT(vkStream, rootType, reinterpret_cast<const VkValidationFlagsEXT*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_EXT_texture_compression_astc_hdr
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES_EXT:
+        {
+            reservedmarshal_VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_EXT_astc_decode_mode
+        case VK_STRUCTURE_TYPE_IMAGE_VIEW_ASTC_DECODE_MODE_EXT:
+        {
+            reservedmarshal_VkImageViewASTCDecodeModeEXT(vkStream, rootType, reinterpret_cast<const VkImageViewASTCDecodeModeEXT*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ASTC_DECODE_FEATURES_EXT:
+        {
+            reservedmarshal_VkPhysicalDeviceASTCDecodeFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceASTCDecodeFeaturesEXT*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_EXT_conditional_rendering
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT:
+        {
+            reservedmarshal_VkPhysicalDeviceConditionalRenderingFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceConditionalRenderingFeaturesEXT*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_CONDITIONAL_RENDERING_INFO_EXT:
+        {
+            reservedmarshal_VkCommandBufferInheritanceConditionalRenderingInfoEXT(vkStream, rootType, reinterpret_cast<const VkCommandBufferInheritanceConditionalRenderingInfoEXT*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_NV_clip_space_w_scaling
+        case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_W_SCALING_STATE_CREATE_INFO_NV:
+        {
+            reservedmarshal_VkPipelineViewportWScalingStateCreateInfoNV(vkStream, rootType, reinterpret_cast<const VkPipelineViewportWScalingStateCreateInfoNV*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_EXT_display_control
+        case VK_STRUCTURE_TYPE_SWAPCHAIN_COUNTER_CREATE_INFO_EXT:
+        {
+            reservedmarshal_VkSwapchainCounterCreateInfoEXT(vkStream, rootType, reinterpret_cast<const VkSwapchainCounterCreateInfoEXT*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_GOOGLE_display_timing
+        case VK_STRUCTURE_TYPE_PRESENT_TIMES_INFO_GOOGLE:
+        {
+            reservedmarshal_VkPresentTimesInfoGOOGLE(vkStream, rootType, reinterpret_cast<const VkPresentTimesInfoGOOGLE*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_NVX_multiview_per_view_attributes
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PER_VIEW_ATTRIBUTES_PROPERTIES_NVX:
+        {
+            reservedmarshal_VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_NV_viewport_swizzle
+        case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SWIZZLE_STATE_CREATE_INFO_NV:
+        {
+            reservedmarshal_VkPipelineViewportSwizzleStateCreateInfoNV(vkStream, rootType, reinterpret_cast<const VkPipelineViewportSwizzleStateCreateInfoNV*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_EXT_discard_rectangles
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DISCARD_RECTANGLE_PROPERTIES_EXT:
+        {
+            reservedmarshal_VkPhysicalDeviceDiscardRectanglePropertiesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceDiscardRectanglePropertiesEXT*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT:
+        {
+            reservedmarshal_VkPipelineDiscardRectangleStateCreateInfoEXT(vkStream, rootType, reinterpret_cast<const VkPipelineDiscardRectangleStateCreateInfoEXT*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_EXT_conservative_rasterization
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONSERVATIVE_RASTERIZATION_PROPERTIES_EXT:
+        {
+            reservedmarshal_VkPhysicalDeviceConservativeRasterizationPropertiesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceConservativeRasterizationPropertiesEXT*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_CONSERVATIVE_STATE_CREATE_INFO_EXT:
+        {
+            reservedmarshal_VkPipelineRasterizationConservativeStateCreateInfoEXT(vkStream, rootType, reinterpret_cast<const VkPipelineRasterizationConservativeStateCreateInfoEXT*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_EXT_depth_clip_enable
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_ENABLE_FEATURES_EXT:
+        {
+            reservedmarshal_VkPhysicalDeviceDepthClipEnableFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceDepthClipEnableFeaturesEXT*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT:
+        {
+            reservedmarshal_VkPipelineRasterizationDepthClipStateCreateInfoEXT(vkStream, rootType, reinterpret_cast<const VkPipelineRasterizationDepthClipStateCreateInfoEXT*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_EXT_debug_utils
+        case VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT:
+        {
+            reservedmarshal_VkDebugUtilsMessengerCreateInfoEXT(vkStream, rootType, reinterpret_cast<const VkDebugUtilsMessengerCreateInfoEXT*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_ANDROID_external_memory_android_hardware_buffer
+        case VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_USAGE_ANDROID:
+        {
+            reservedmarshal_VkAndroidHardwareBufferUsageANDROID(vkStream, rootType, reinterpret_cast<const VkAndroidHardwareBufferUsageANDROID*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID:
+        {
+            reservedmarshal_VkAndroidHardwareBufferFormatPropertiesANDROID(vkStream, rootType, reinterpret_cast<const VkAndroidHardwareBufferFormatPropertiesANDROID*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID:
+        {
+            reservedmarshal_VkImportAndroidHardwareBufferInfoANDROID(vkStream, rootType, reinterpret_cast<const VkImportAndroidHardwareBufferInfoANDROID*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID:
+        {
+            reservedmarshal_VkExternalFormatANDROID(vkStream, rootType, reinterpret_cast<const VkExternalFormatANDROID*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_EXT_inline_uniform_block
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES_EXT:
+        {
+            reservedmarshal_VkPhysicalDeviceInlineUniformBlockFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceInlineUniformBlockFeaturesEXT*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES_EXT:
+        {
+            reservedmarshal_VkPhysicalDeviceInlineUniformBlockPropertiesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceInlineUniformBlockPropertiesEXT*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT:
+        {
+            reservedmarshal_VkWriteDescriptorSetInlineUniformBlockEXT(vkStream, rootType, reinterpret_cast<const VkWriteDescriptorSetInlineUniformBlockEXT*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO_EXT:
+        {
+            reservedmarshal_VkDescriptorPoolInlineUniformBlockCreateInfoEXT(vkStream, rootType, reinterpret_cast<const VkDescriptorPoolInlineUniformBlockCreateInfoEXT*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_EXT_sample_locations
+        case VK_STRUCTURE_TYPE_SAMPLE_LOCATIONS_INFO_EXT:
+        {
+            reservedmarshal_VkSampleLocationsInfoEXT(vkStream, rootType, reinterpret_cast<const VkSampleLocationsInfoEXT*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_RENDER_PASS_SAMPLE_LOCATIONS_BEGIN_INFO_EXT:
+        {
+            reservedmarshal_VkRenderPassSampleLocationsBeginInfoEXT(vkStream, rootType, reinterpret_cast<const VkRenderPassSampleLocationsBeginInfoEXT*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT:
+        {
+            reservedmarshal_VkPipelineSampleLocationsStateCreateInfoEXT(vkStream, rootType, reinterpret_cast<const VkPipelineSampleLocationsStateCreateInfoEXT*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT:
+        {
+            reservedmarshal_VkPhysicalDeviceSampleLocationsPropertiesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceSampleLocationsPropertiesEXT*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_EXT_blend_operation_advanced
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT:
+        {
+            reservedmarshal_VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_PROPERTIES_EXT:
+        {
+            reservedmarshal_VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_ADVANCED_STATE_CREATE_INFO_EXT:
+        {
+            reservedmarshal_VkPipelineColorBlendAdvancedStateCreateInfoEXT(vkStream, rootType, reinterpret_cast<const VkPipelineColorBlendAdvancedStateCreateInfoEXT*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_NV_fragment_coverage_to_color
+        case VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_TO_COLOR_STATE_CREATE_INFO_NV:
+        {
+            reservedmarshal_VkPipelineCoverageToColorStateCreateInfoNV(vkStream, rootType, reinterpret_cast<const VkPipelineCoverageToColorStateCreateInfoNV*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_NV_framebuffer_mixed_samples
+        case VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_MODULATION_STATE_CREATE_INFO_NV:
+        {
+            reservedmarshal_VkPipelineCoverageModulationStateCreateInfoNV(vkStream, rootType, reinterpret_cast<const VkPipelineCoverageModulationStateCreateInfoNV*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_NV_shader_sm_builtins
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_PROPERTIES_NV:
+        {
+            reservedmarshal_VkPhysicalDeviceShaderSMBuiltinsPropertiesNV(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceShaderSMBuiltinsPropertiesNV*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_FEATURES_NV:
+        {
+            reservedmarshal_VkPhysicalDeviceShaderSMBuiltinsFeaturesNV(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceShaderSMBuiltinsFeaturesNV*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_EXT_image_drm_format_modifier
+        case VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT:
+        {
+            reservedmarshal_VkDrmFormatModifierPropertiesListEXT(vkStream, rootType, reinterpret_cast<const VkDrmFormatModifierPropertiesListEXT*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT:
+        {
+            reservedmarshal_VkPhysicalDeviceImageDrmFormatModifierInfoEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceImageDrmFormatModifierInfoEXT*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT:
+        {
+            reservedmarshal_VkImageDrmFormatModifierListCreateInfoEXT(vkStream, rootType, reinterpret_cast<const VkImageDrmFormatModifierListCreateInfoEXT*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT:
+        {
+            reservedmarshal_VkImageDrmFormatModifierExplicitCreateInfoEXT(vkStream, rootType, reinterpret_cast<const VkImageDrmFormatModifierExplicitCreateInfoEXT*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_EXT_validation_cache
+        case VK_STRUCTURE_TYPE_SHADER_MODULE_VALIDATION_CACHE_CREATE_INFO_EXT:
+        {
+            reservedmarshal_VkShaderModuleValidationCacheCreateInfoEXT(vkStream, rootType, reinterpret_cast<const VkShaderModuleValidationCacheCreateInfoEXT*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_NV_shading_rate_image
+        case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SHADING_RATE_IMAGE_STATE_CREATE_INFO_NV:
+        {
+            reservedmarshal_VkPipelineViewportShadingRateImageStateCreateInfoNV(vkStream, rootType, reinterpret_cast<const VkPipelineViewportShadingRateImageStateCreateInfoNV*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_FEATURES_NV:
+        {
+            reservedmarshal_VkPhysicalDeviceShadingRateImageFeaturesNV(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceShadingRateImageFeaturesNV*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_PROPERTIES_NV:
+        {
+            reservedmarshal_VkPhysicalDeviceShadingRateImagePropertiesNV(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceShadingRateImagePropertiesNV*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_COARSE_SAMPLE_ORDER_STATE_CREATE_INFO_NV:
+        {
+            reservedmarshal_VkPipelineViewportCoarseSampleOrderStateCreateInfoNV(vkStream, rootType, reinterpret_cast<const VkPipelineViewportCoarseSampleOrderStateCreateInfoNV*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_NV_ray_tracing
+        case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_NV:
+        {
+            reservedmarshal_VkWriteDescriptorSetAccelerationStructureNV(vkStream, rootType, reinterpret_cast<const VkWriteDescriptorSetAccelerationStructureNV*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_NV:
+        {
+            reservedmarshal_VkPhysicalDeviceRayTracingPropertiesNV(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceRayTracingPropertiesNV*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_NV_representative_fragment_test
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_REPRESENTATIVE_FRAGMENT_TEST_FEATURES_NV:
+        {
+            reservedmarshal_VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_REPRESENTATIVE_FRAGMENT_TEST_STATE_CREATE_INFO_NV:
+        {
+            reservedmarshal_VkPipelineRepresentativeFragmentTestStateCreateInfoNV(vkStream, rootType, reinterpret_cast<const VkPipelineRepresentativeFragmentTestStateCreateInfoNV*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_EXT_filter_cubic
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_VIEW_IMAGE_FORMAT_INFO_EXT:
+        {
+            reservedmarshal_VkPhysicalDeviceImageViewImageFormatInfoEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceImageViewImageFormatInfoEXT*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_FILTER_CUBIC_IMAGE_VIEW_IMAGE_FORMAT_PROPERTIES_EXT:
+        {
+            reservedmarshal_VkFilterCubicImageViewImageFormatPropertiesEXT(vkStream, rootType, reinterpret_cast<const VkFilterCubicImageViewImageFormatPropertiesEXT*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_EXT_global_priority
+        case VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT:
+        {
+            reservedmarshal_VkDeviceQueueGlobalPriorityCreateInfoEXT(vkStream, rootType, reinterpret_cast<const VkDeviceQueueGlobalPriorityCreateInfoEXT*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_EXT_external_memory_host
+        case VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT:
+        {
+            reservedmarshal_VkImportMemoryHostPointerInfoEXT(vkStream, rootType, reinterpret_cast<const VkImportMemoryHostPointerInfoEXT*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT:
+        {
+            reservedmarshal_VkPhysicalDeviceExternalMemoryHostPropertiesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceExternalMemoryHostPropertiesEXT*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_AMD_pipeline_compiler_control
+        case VK_STRUCTURE_TYPE_PIPELINE_COMPILER_CONTROL_CREATE_INFO_AMD:
+        {
+            reservedmarshal_VkPipelineCompilerControlCreateInfoAMD(vkStream, rootType, reinterpret_cast<const VkPipelineCompilerControlCreateInfoAMD*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_AMD_shader_core_properties
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_AMD:
+        {
+            reservedmarshal_VkPhysicalDeviceShaderCorePropertiesAMD(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceShaderCorePropertiesAMD*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_AMD_memory_overallocation_behavior
+        case VK_STRUCTURE_TYPE_DEVICE_MEMORY_OVERALLOCATION_CREATE_INFO_AMD:
+        {
+            reservedmarshal_VkDeviceMemoryOverallocationCreateInfoAMD(vkStream, rootType, reinterpret_cast<const VkDeviceMemoryOverallocationCreateInfoAMD*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_EXT_vertex_attribute_divisor
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT:
+        {
+            reservedmarshal_VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT:
+        {
+            reservedmarshal_VkPipelineVertexInputDivisorStateCreateInfoEXT(vkStream, rootType, reinterpret_cast<const VkPipelineVertexInputDivisorStateCreateInfoEXT*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT:
+        {
+            reservedmarshal_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_GGP_frame_token
+        case VK_STRUCTURE_TYPE_PRESENT_FRAME_TOKEN_GGP:
+        {
+            reservedmarshal_VkPresentFrameTokenGGP(vkStream, rootType, reinterpret_cast<const VkPresentFrameTokenGGP*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_EXT_pipeline_creation_feedback
+        case VK_STRUCTURE_TYPE_PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT:
+        {
+            reservedmarshal_VkPipelineCreationFeedbackCreateInfoEXT(vkStream, rootType, reinterpret_cast<const VkPipelineCreationFeedbackCreateInfoEXT*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_NV_compute_shader_derivatives
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_FEATURES_NV:
+        {
+            reservedmarshal_VkPhysicalDeviceComputeShaderDerivativesFeaturesNV(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceComputeShaderDerivativesFeaturesNV*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_NV_mesh_shader
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_FEATURES_NV:
+        {
+            reservedmarshal_VkPhysicalDeviceMeshShaderFeaturesNV(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceMeshShaderFeaturesNV*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_PROPERTIES_NV:
+        {
+            reservedmarshal_VkPhysicalDeviceMeshShaderPropertiesNV(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceMeshShaderPropertiesNV*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_NV_fragment_shader_barycentric
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_NV:
+        {
+            reservedmarshal_VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_NV_shader_image_footprint
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_FOOTPRINT_FEATURES_NV:
+        {
+            reservedmarshal_VkPhysicalDeviceShaderImageFootprintFeaturesNV(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceShaderImageFootprintFeaturesNV*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_NV_scissor_exclusive
+        case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_EXCLUSIVE_SCISSOR_STATE_CREATE_INFO_NV:
+        {
+            reservedmarshal_VkPipelineViewportExclusiveScissorStateCreateInfoNV(vkStream, rootType, reinterpret_cast<const VkPipelineViewportExclusiveScissorStateCreateInfoNV*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXCLUSIVE_SCISSOR_FEATURES_NV:
+        {
+            reservedmarshal_VkPhysicalDeviceExclusiveScissorFeaturesNV(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceExclusiveScissorFeaturesNV*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_NV_device_diagnostic_checkpoints
+        case VK_STRUCTURE_TYPE_QUEUE_FAMILY_CHECKPOINT_PROPERTIES_NV:
+        {
+            reservedmarshal_VkQueueFamilyCheckpointPropertiesNV(vkStream, rootType, reinterpret_cast<const VkQueueFamilyCheckpointPropertiesNV*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_INTEL_shader_integer_functions2
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_FUNCTIONS_2_FEATURES_INTEL:
+        {
+            reservedmarshal_VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_INTEL_performance_query
+        case VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_QUERY_CREATE_INFO_INTEL:
+        {
+            reservedmarshal_VkQueryPoolPerformanceQueryCreateInfoINTEL(vkStream, rootType, reinterpret_cast<const VkQueryPoolPerformanceQueryCreateInfoINTEL*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_EXT_pci_bus_info
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT:
+        {
+            reservedmarshal_VkPhysicalDevicePCIBusInfoPropertiesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDevicePCIBusInfoPropertiesEXT*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_AMD_display_native_hdr
+        case VK_STRUCTURE_TYPE_DISPLAY_NATIVE_HDR_SURFACE_CAPABILITIES_AMD:
+        {
+            reservedmarshal_VkDisplayNativeHdrSurfaceCapabilitiesAMD(vkStream, rootType, reinterpret_cast<const VkDisplayNativeHdrSurfaceCapabilitiesAMD*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_SWAPCHAIN_DISPLAY_NATIVE_HDR_CREATE_INFO_AMD:
+        {
+            reservedmarshal_VkSwapchainDisplayNativeHdrCreateInfoAMD(vkStream, rootType, reinterpret_cast<const VkSwapchainDisplayNativeHdrCreateInfoAMD*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_EXT_fragment_density_map
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_FEATURES_EXT:
+        {
+            switch(rootType)
+            {
+                case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2:
+                {
+                    reservedmarshal_VkPhysicalDeviceFragmentDensityMapFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceFragmentDensityMapFeaturesEXT*>(structExtension), ptr);
+                    break;
+                }
+                case VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO:
+                {
+                    reservedmarshal_VkPhysicalDeviceFragmentDensityMapFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceFragmentDensityMapFeaturesEXT*>(structExtension), ptr);
+                    break;
+                }
+                case VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO:
+                {
+                    reservedmarshal_VkImportColorBufferGOOGLE(vkStream, rootType, reinterpret_cast<const VkImportColorBufferGOOGLE*>(structExtension), ptr);
+                    break;
+                }
+                default:
+                {
+                    reservedmarshal_VkPhysicalDeviceFragmentDensityMapFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceFragmentDensityMapFeaturesEXT*>(structExtension), ptr);
+                    break;
+                }
+            }
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_PROPERTIES_EXT:
+        {
+            switch(rootType)
+            {
+                case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2:
+                {
+                    reservedmarshal_VkPhysicalDeviceFragmentDensityMapPropertiesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceFragmentDensityMapPropertiesEXT*>(structExtension), ptr);
+                    break;
+                }
+                case VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO:
+                {
+                    reservedmarshal_VkImportPhysicalAddressGOOGLE(vkStream, rootType, reinterpret_cast<const VkImportPhysicalAddressGOOGLE*>(structExtension), ptr);
+                    break;
+                }
+                default:
+                {
+                    reservedmarshal_VkPhysicalDeviceFragmentDensityMapPropertiesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceFragmentDensityMapPropertiesEXT*>(structExtension), ptr);
+                    break;
+                }
+            }
+            break;
+        }
+        case VK_STRUCTURE_TYPE_RENDER_PASS_FRAGMENT_DENSITY_MAP_CREATE_INFO_EXT:
+        {
+            switch(rootType)
+            {
+                case VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO:
+                {
+                    reservedmarshal_VkRenderPassFragmentDensityMapCreateInfoEXT(vkStream, rootType, reinterpret_cast<const VkRenderPassFragmentDensityMapCreateInfoEXT*>(structExtension), ptr);
+                    break;
+                }
+                case VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2:
+                {
+                    reservedmarshal_VkRenderPassFragmentDensityMapCreateInfoEXT(vkStream, rootType, reinterpret_cast<const VkRenderPassFragmentDensityMapCreateInfoEXT*>(structExtension), ptr);
+                    break;
+                }
+                case VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO:
+                {
+                    reservedmarshal_VkImportBufferGOOGLE(vkStream, rootType, reinterpret_cast<const VkImportBufferGOOGLE*>(structExtension), ptr);
+                    break;
+                }
+                default:
+                {
+                    reservedmarshal_VkRenderPassFragmentDensityMapCreateInfoEXT(vkStream, rootType, reinterpret_cast<const VkRenderPassFragmentDensityMapCreateInfoEXT*>(structExtension), ptr);
+                    break;
+                }
+            }
+            break;
+        }
+#endif
+#ifdef VK_EXT_subgroup_size_control
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT:
+        {
+            reservedmarshal_VkPhysicalDeviceSubgroupSizeControlFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceSubgroupSizeControlFeaturesEXT*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT:
+        {
+            reservedmarshal_VkPhysicalDeviceSubgroupSizeControlPropertiesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT:
+        {
+            reservedmarshal_VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT(vkStream, rootType, reinterpret_cast<const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_AMD_shader_core_properties2
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_2_AMD:
+        {
+            reservedmarshal_VkPhysicalDeviceShaderCoreProperties2AMD(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceShaderCoreProperties2AMD*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_AMD_device_coherent_memory
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COHERENT_MEMORY_FEATURES_AMD:
+        {
+            reservedmarshal_VkPhysicalDeviceCoherentMemoryFeaturesAMD(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceCoherentMemoryFeaturesAMD*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_EXT_shader_image_atomic_int64
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_ATOMIC_INT64_FEATURES_EXT:
+        {
+            reservedmarshal_VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_EXT_memory_budget
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT:
+        {
+            reservedmarshal_VkPhysicalDeviceMemoryBudgetPropertiesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceMemoryBudgetPropertiesEXT*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_EXT_memory_priority
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PRIORITY_FEATURES_EXT:
+        {
+            reservedmarshal_VkPhysicalDeviceMemoryPriorityFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceMemoryPriorityFeaturesEXT*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT:
+        {
+            reservedmarshal_VkMemoryPriorityAllocateInfoEXT(vkStream, rootType, reinterpret_cast<const VkMemoryPriorityAllocateInfoEXT*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_NV_dedicated_allocation_image_aliasing
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEDICATED_ALLOCATION_IMAGE_ALIASING_FEATURES_NV:
+        {
+            reservedmarshal_VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_EXT_buffer_device_address
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_EXT:
+        {
+            reservedmarshal_VkPhysicalDeviceBufferDeviceAddressFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceBufferDeviceAddressFeaturesEXT*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_CREATE_INFO_EXT:
+        {
+            reservedmarshal_VkBufferDeviceAddressCreateInfoEXT(vkStream, rootType, reinterpret_cast<const VkBufferDeviceAddressCreateInfoEXT*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_EXT_validation_features
+        case VK_STRUCTURE_TYPE_VALIDATION_FEATURES_EXT:
+        {
+            reservedmarshal_VkValidationFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkValidationFeaturesEXT*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_NV_cooperative_matrix
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_FEATURES_NV:
+        {
+            reservedmarshal_VkPhysicalDeviceCooperativeMatrixFeaturesNV(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceCooperativeMatrixFeaturesNV*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_PROPERTIES_NV:
+        {
+            reservedmarshal_VkPhysicalDeviceCooperativeMatrixPropertiesNV(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceCooperativeMatrixPropertiesNV*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_NV_coverage_reduction_mode
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COVERAGE_REDUCTION_MODE_FEATURES_NV:
+        {
+            reservedmarshal_VkPhysicalDeviceCoverageReductionModeFeaturesNV(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceCoverageReductionModeFeaturesNV*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_REDUCTION_STATE_CREATE_INFO_NV:
+        {
+            reservedmarshal_VkPipelineCoverageReductionStateCreateInfoNV(vkStream, rootType, reinterpret_cast<const VkPipelineCoverageReductionStateCreateInfoNV*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_EXT_fragment_shader_interlock
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_INTERLOCK_FEATURES_EXT:
+        {
+            reservedmarshal_VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_EXT_ycbcr_image_arrays
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_IMAGE_ARRAYS_FEATURES_EXT:
+        {
+            reservedmarshal_VkPhysicalDeviceYcbcrImageArraysFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceYcbcrImageArraysFeaturesEXT*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_EXT_full_screen_exclusive
+        case VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_INFO_EXT:
+        {
+            reservedmarshal_VkSurfaceFullScreenExclusiveInfoEXT(vkStream, rootType, reinterpret_cast<const VkSurfaceFullScreenExclusiveInfoEXT*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_FULL_SCREEN_EXCLUSIVE_EXT:
+        {
+            reservedmarshal_VkSurfaceCapabilitiesFullScreenExclusiveEXT(vkStream, rootType, reinterpret_cast<const VkSurfaceCapabilitiesFullScreenExclusiveEXT*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_WIN32_INFO_EXT:
+        {
+            reservedmarshal_VkSurfaceFullScreenExclusiveWin32InfoEXT(vkStream, rootType, reinterpret_cast<const VkSurfaceFullScreenExclusiveWin32InfoEXT*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_EXT_line_rasterization
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT:
+        {
+            reservedmarshal_VkPhysicalDeviceLineRasterizationFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceLineRasterizationFeaturesEXT*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_EXT:
+        {
+            reservedmarshal_VkPhysicalDeviceLineRasterizationPropertiesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceLineRasterizationPropertiesEXT*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT:
+        {
+            reservedmarshal_VkPipelineRasterizationLineStateCreateInfoEXT(vkStream, rootType, reinterpret_cast<const VkPipelineRasterizationLineStateCreateInfoEXT*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_EXT_shader_atomic_float
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_FLOAT_FEATURES_EXT:
+        {
+            reservedmarshal_VkPhysicalDeviceShaderAtomicFloatFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceShaderAtomicFloatFeaturesEXT*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_EXT_index_type_uint8
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT:
+        {
+            reservedmarshal_VkPhysicalDeviceIndexTypeUint8FeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceIndexTypeUint8FeaturesEXT*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_EXT_extended_dynamic_state
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_FEATURES_EXT:
+        {
+            reservedmarshal_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceExtendedDynamicStateFeaturesEXT*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_EXT_shader_demote_to_helper_invocation
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES_EXT:
+        {
+            reservedmarshal_VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_NV_device_generated_commands
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_PROPERTIES_NV:
+        {
+            reservedmarshal_VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_FEATURES_NV:
+        {
+            reservedmarshal_VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_SHADER_GROUPS_CREATE_INFO_NV:
+        {
+            reservedmarshal_VkGraphicsPipelineShaderGroupsCreateInfoNV(vkStream, rootType, reinterpret_cast<const VkGraphicsPipelineShaderGroupsCreateInfoNV*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_EXT_texel_buffer_alignment
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_FEATURES_EXT:
+        {
+            reservedmarshal_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES_EXT:
+        {
+            reservedmarshal_VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_QCOM_render_pass_transform
+        case VK_STRUCTURE_TYPE_RENDER_PASS_TRANSFORM_BEGIN_INFO_QCOM:
+        {
+            reservedmarshal_VkRenderPassTransformBeginInfoQCOM(vkStream, rootType, reinterpret_cast<const VkRenderPassTransformBeginInfoQCOM*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_RENDER_PASS_TRANSFORM_INFO_QCOM:
+        {
+            reservedmarshal_VkCommandBufferInheritanceRenderPassTransformInfoQCOM(vkStream, rootType, reinterpret_cast<const VkCommandBufferInheritanceRenderPassTransformInfoQCOM*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_EXT_device_memory_report
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_MEMORY_REPORT_FEATURES_EXT:
+        {
+            reservedmarshal_VkPhysicalDeviceDeviceMemoryReportFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceDeviceMemoryReportFeaturesEXT*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DEVICE_DEVICE_MEMORY_REPORT_CREATE_INFO_EXT:
+        {
+            reservedmarshal_VkDeviceDeviceMemoryReportCreateInfoEXT(vkStream, rootType, reinterpret_cast<const VkDeviceDeviceMemoryReportCreateInfoEXT*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_EXT_robustness2
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT:
+        {
+            reservedmarshal_VkPhysicalDeviceRobustness2FeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceRobustness2FeaturesEXT*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_PROPERTIES_EXT:
+        {
+            reservedmarshal_VkPhysicalDeviceRobustness2PropertiesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceRobustness2PropertiesEXT*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_EXT_custom_border_color
+        case VK_STRUCTURE_TYPE_SAMPLER_CUSTOM_BORDER_COLOR_CREATE_INFO_EXT:
+        {
+            reservedmarshal_VkSamplerCustomBorderColorCreateInfoEXT(vkStream, rootType, reinterpret_cast<const VkSamplerCustomBorderColorCreateInfoEXT*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_PROPERTIES_EXT:
+        {
+            reservedmarshal_VkPhysicalDeviceCustomBorderColorPropertiesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceCustomBorderColorPropertiesEXT*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_FEATURES_EXT:
+        {
+            reservedmarshal_VkPhysicalDeviceCustomBorderColorFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceCustomBorderColorFeaturesEXT*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_EXT_private_data
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES_EXT:
+        {
+            reservedmarshal_VkPhysicalDevicePrivateDataFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDevicePrivateDataFeaturesEXT*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DEVICE_PRIVATE_DATA_CREATE_INFO_EXT:
+        {
+            reservedmarshal_VkDevicePrivateDataCreateInfoEXT(vkStream, rootType, reinterpret_cast<const VkDevicePrivateDataCreateInfoEXT*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_EXT_pipeline_creation_cache_control
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES_EXT:
+        {
+            reservedmarshal_VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_NV_device_diagnostics_config
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DIAGNOSTICS_CONFIG_FEATURES_NV:
+        {
+            reservedmarshal_VkPhysicalDeviceDiagnosticsConfigFeaturesNV(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceDiagnosticsConfigFeaturesNV*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DEVICE_DIAGNOSTICS_CONFIG_CREATE_INFO_NV:
+        {
+            reservedmarshal_VkDeviceDiagnosticsConfigCreateInfoNV(vkStream, rootType, reinterpret_cast<const VkDeviceDiagnosticsConfigCreateInfoNV*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_NV_fragment_shading_rate_enums
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_FEATURES_NV:
+        {
+            reservedmarshal_VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_PROPERTIES_NV:
+        {
+            reservedmarshal_VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_ENUM_STATE_CREATE_INFO_NV:
+        {
+            reservedmarshal_VkPipelineFragmentShadingRateEnumStateCreateInfoNV(vkStream, rootType, reinterpret_cast<const VkPipelineFragmentShadingRateEnumStateCreateInfoNV*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_EXT_fragment_density_map2
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_2_FEATURES_EXT:
+        {
+            reservedmarshal_VkPhysicalDeviceFragmentDensityMap2FeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceFragmentDensityMap2FeaturesEXT*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_2_PROPERTIES_EXT:
+        {
+            reservedmarshal_VkPhysicalDeviceFragmentDensityMap2PropertiesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceFragmentDensityMap2PropertiesEXT*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_QCOM_rotated_copy_commands
+        case VK_STRUCTURE_TYPE_COPY_COMMAND_TRANSFORM_INFO_QCOM:
+        {
+            reservedmarshal_VkCopyCommandTransformInfoQCOM(vkStream, rootType, reinterpret_cast<const VkCopyCommandTransformInfoQCOM*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_EXT_image_robustness
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES_EXT:
+        {
+            reservedmarshal_VkPhysicalDeviceImageRobustnessFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceImageRobustnessFeaturesEXT*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_EXT_4444_formats
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT:
+        {
+            reservedmarshal_VkPhysicalDevice4444FormatsFeaturesEXT(vkStream, rootType, reinterpret_cast<const VkPhysicalDevice4444FormatsFeaturesEXT*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_GOOGLE_gfxstream
+        case VK_STRUCTURE_TYPE_IMPORT_COLOR_BUFFER_GOOGLE:
+        {
+            reservedmarshal_VkImportColorBufferGOOGLE(vkStream, rootType, reinterpret_cast<const VkImportColorBufferGOOGLE*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_IMPORT_BUFFER_GOOGLE:
+        {
+            reservedmarshal_VkImportBufferGOOGLE(vkStream, rootType, reinterpret_cast<const VkImportBufferGOOGLE*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_IMPORT_PHYSICAL_ADDRESS_GOOGLE:
+        {
+            reservedmarshal_VkImportPhysicalAddressGOOGLE(vkStream, rootType, reinterpret_cast<const VkImportPhysicalAddressGOOGLE*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_KHR_acceleration_structure
+        case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR:
+        {
+            reservedmarshal_VkWriteDescriptorSetAccelerationStructureKHR(vkStream, rootType, reinterpret_cast<const VkWriteDescriptorSetAccelerationStructureKHR*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_FEATURES_KHR:
+        {
+            reservedmarshal_VkPhysicalDeviceAccelerationStructureFeaturesKHR(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceAccelerationStructureFeaturesKHR*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_PROPERTIES_KHR:
+        {
+            reservedmarshal_VkPhysicalDeviceAccelerationStructurePropertiesKHR(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceAccelerationStructurePropertiesKHR*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_KHR_ray_tracing_pipeline
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_FEATURES_KHR:
+        {
+            reservedmarshal_VkPhysicalDeviceRayTracingPipelineFeaturesKHR(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceRayTracingPipelineFeaturesKHR*>(structExtension), ptr);
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_PROPERTIES_KHR:
+        {
+            reservedmarshal_VkPhysicalDeviceRayTracingPipelinePropertiesKHR(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceRayTracingPipelinePropertiesKHR*>(structExtension), ptr);
+            break;
+        }
+#endif
+#ifdef VK_KHR_ray_query
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_QUERY_FEATURES_KHR:
+        {
+            reservedmarshal_VkPhysicalDeviceRayQueryFeaturesKHR(vkStream, rootType, reinterpret_cast<const VkPhysicalDeviceRayQueryFeaturesKHR*>(structExtension), ptr);
+            break;
+        }
+#endif
+        default:
+        {
+            // fatal; the switch is only taken if the extension struct is known
+            abort();
+        }
+    }
+}
+
+
+} // namespace goldfish_vk
diff --git a/system/vulkan_enc/goldfish_vk_reserved_marshaling_guest.h b/system/vulkan_enc/goldfish_vk_reserved_marshaling_guest.h
new file mode 100644
index 0000000..6984b87
--- /dev/null
+++ b/system/vulkan_enc/goldfish_vk_reserved_marshaling_guest.h
@@ -0,0 +1,4255 @@
+// Copyright (C) 2018 The Android Open Source Project
+// Copyright (C) 2018 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Autogenerated module goldfish_vk_reserved_marshaling_guest
+// (header) generated by android/android-emugl/host/libs/libOpenglRender/vulkan-registry/xml/genvk.py -registry android/android-emugl/host/libs/libOpenglRender/vulkan-registry/xml/vk.xml cereal -o android/android-emugl/host/libs/libOpenglRender/vulkan/cereal
+// Please do not modify directly;
+// re-run android/scripts/generate-vulkan-sources.sh,
+// or directly from Python by defining:
+// VULKAN_REGISTRY_XML_DIR : Directory containing genvk.py and vk.xml
+// CEREAL_OUTPUT_DIR: Where to put the generated sources.
+// python3 $VULKAN_REGISTRY_XML_DIR/genvk.py -registry $VULKAN_REGISTRY_XML_DIR/vk.xml cereal -o $CEREAL_OUTPUT_DIR
+
+#pragma once
+
+#include <vulkan/vulkan.h>
+
+
+#include "vk_platform_compat.h"
+
+#include "goldfish_vk_marshaling_guest.h"
+#include "goldfish_vk_private_defs.h"
+#include "VulkanStreamGuest.h"
+
+// Stuff we are not going to use but if included,
+// will cause compile errors. These are Android Vulkan
+// required extensions, but the approach will be to
+// implement them completely on the guest side.
+#undef VK_KHR_android_surface
+#undef VK_ANDROID_external_memory_android_hardware_buffer
+
+
+namespace goldfish_vk {
+
+#ifdef VK_VERSION_1_0
+void reservedmarshal_VkExtent2D(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkExtent2D* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkExtent3D(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkExtent3D* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkOffset2D(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkOffset2D* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkOffset3D(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkOffset3D* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkRect2D(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRect2D* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkBaseInStructure(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBaseInStructure* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkBaseOutStructure(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBaseOutStructure* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkBufferMemoryBarrier(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBufferMemoryBarrier* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDispatchIndirectCommand(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDispatchIndirectCommand* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDrawIndexedIndirectCommand(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDrawIndexedIndirectCommand* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDrawIndirectCommand(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDrawIndirectCommand* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkImageSubresourceRange(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageSubresourceRange* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkImageMemoryBarrier(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageMemoryBarrier* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkMemoryBarrier(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMemoryBarrier* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkAllocationCallbacks(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAllocationCallbacks* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkApplicationInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkApplicationInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkFormatProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkFormatProperties* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkImageFormatProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageFormatProperties* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkInstanceCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkInstanceCreateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkMemoryHeap(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMemoryHeap* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkMemoryType(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMemoryType* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFeatures* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceLimits(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceLimits* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceMemoryProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMemoryProperties* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceSparseProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSparseProperties* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceProperties* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkQueueFamilyProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkQueueFamilyProperties* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDeviceQueueCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceQueueCreateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDeviceCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceCreateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkExtensionProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkExtensionProperties* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkLayerProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkLayerProperties* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkSubmitInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSubmitInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkMappedMemoryRange(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMappedMemoryRange* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkMemoryAllocateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMemoryAllocateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkMemoryRequirements(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMemoryRequirements* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkSparseMemoryBind(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSparseMemoryBind* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkSparseBufferMemoryBindInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSparseBufferMemoryBindInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkSparseImageOpaqueMemoryBindInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSparseImageOpaqueMemoryBindInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkImageSubresource(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageSubresource* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkSparseImageMemoryBind(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSparseImageMemoryBind* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkSparseImageMemoryBindInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSparseImageMemoryBindInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkBindSparseInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBindSparseInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkSparseImageFormatProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSparseImageFormatProperties* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkSparseImageMemoryRequirements(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSparseImageMemoryRequirements* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkFenceCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkFenceCreateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkSemaphoreCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSemaphoreCreateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkEventCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkEventCreateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkQueryPoolCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkQueryPoolCreateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkBufferCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBufferCreateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkBufferViewCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBufferViewCreateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkImageCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageCreateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkSubresourceLayout(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSubresourceLayout* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkComponentMapping(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkComponentMapping* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkImageViewCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageViewCreateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkShaderModuleCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkShaderModuleCreateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPipelineCacheCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineCacheCreateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkSpecializationMapEntry(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSpecializationMapEntry* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkSpecializationInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSpecializationInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPipelineShaderStageCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineShaderStageCreateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkComputePipelineCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkComputePipelineCreateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkVertexInputBindingDescription(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkVertexInputBindingDescription* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkVertexInputAttributeDescription(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkVertexInputAttributeDescription* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPipelineVertexInputStateCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineVertexInputStateCreateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPipelineInputAssemblyStateCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineInputAssemblyStateCreateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPipelineTessellationStateCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineTessellationStateCreateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkViewport(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkViewport* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPipelineViewportStateCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineViewportStateCreateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPipelineRasterizationStateCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineRasterizationStateCreateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPipelineMultisampleStateCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineMultisampleStateCreateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkStencilOpState(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkStencilOpState* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPipelineDepthStencilStateCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineDepthStencilStateCreateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPipelineColorBlendAttachmentState(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineColorBlendAttachmentState* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPipelineColorBlendStateCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineColorBlendStateCreateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPipelineDynamicStateCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineDynamicStateCreateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkGraphicsPipelineCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkGraphicsPipelineCreateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPushConstantRange(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPushConstantRange* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPipelineLayoutCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineLayoutCreateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkSamplerCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSamplerCreateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkCopyDescriptorSet(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCopyDescriptorSet* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDescriptorBufferInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDescriptorBufferInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDescriptorImageInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDescriptorImageInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDescriptorPoolSize(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDescriptorPoolSize* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDescriptorPoolCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDescriptorPoolCreateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDescriptorSetAllocateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDescriptorSetAllocateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDescriptorSetLayoutBinding(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDescriptorSetLayoutBinding* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDescriptorSetLayoutCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDescriptorSetLayoutCreateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkWriteDescriptorSet(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkWriteDescriptorSet* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkAttachmentDescription(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAttachmentDescription* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkAttachmentReference(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAttachmentReference* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkFramebufferCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkFramebufferCreateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkSubpassDescription(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSubpassDescription* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkSubpassDependency(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSubpassDependency* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkRenderPassCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRenderPassCreateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkCommandPoolCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCommandPoolCreateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkCommandBufferAllocateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCommandBufferAllocateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkCommandBufferInheritanceInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCommandBufferInheritanceInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkCommandBufferBeginInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCommandBufferBeginInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkBufferCopy(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBufferCopy* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkImageSubresourceLayers(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageSubresourceLayers* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkBufferImageCopy(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBufferImageCopy* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkClearColorValue(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkClearColorValue* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkClearDepthStencilValue(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkClearDepthStencilValue* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkClearValue(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkClearValue* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkClearAttachment(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkClearAttachment* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkClearRect(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkClearRect* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkImageBlit(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageBlit* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkImageCopy(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageCopy* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkImageResolve(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageResolve* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkRenderPassBeginInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRenderPassBeginInfo* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_VERSION_1_1
+void reservedmarshal_VkPhysicalDeviceSubgroupProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSubgroupProperties* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkBindBufferMemoryInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBindBufferMemoryInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkBindImageMemoryInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBindImageMemoryInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDevice16BitStorageFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDevice16BitStorageFeatures* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkMemoryDedicatedRequirements(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMemoryDedicatedRequirements* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkMemoryDedicatedAllocateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMemoryDedicatedAllocateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkMemoryAllocateFlagsInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMemoryAllocateFlagsInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDeviceGroupRenderPassBeginInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceGroupRenderPassBeginInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDeviceGroupCommandBufferBeginInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceGroupCommandBufferBeginInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDeviceGroupSubmitInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceGroupSubmitInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDeviceGroupBindSparseInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceGroupBindSparseInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkBindBufferMemoryDeviceGroupInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBindBufferMemoryDeviceGroupInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkBindImageMemoryDeviceGroupInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBindImageMemoryDeviceGroupInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceGroupProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceGroupProperties* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDeviceGroupDeviceCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceGroupDeviceCreateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkBufferMemoryRequirementsInfo2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBufferMemoryRequirementsInfo2* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkImageMemoryRequirementsInfo2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageMemoryRequirementsInfo2* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkImageSparseMemoryRequirementsInfo2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageSparseMemoryRequirementsInfo2* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkMemoryRequirements2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMemoryRequirements2* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkSparseImageMemoryRequirements2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSparseImageMemoryRequirements2* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceFeatures2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFeatures2* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceProperties2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceProperties2* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkFormatProperties2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkFormatProperties2* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkImageFormatProperties2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageFormatProperties2* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceImageFormatInfo2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceImageFormatInfo2* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkQueueFamilyProperties2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkQueueFamilyProperties2* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceMemoryProperties2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMemoryProperties2* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkSparseImageFormatProperties2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSparseImageFormatProperties2* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceSparseImageFormatInfo2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSparseImageFormatInfo2* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDevicePointClippingProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDevicePointClippingProperties* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkInputAttachmentAspectReference(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkInputAttachmentAspectReference* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkRenderPassInputAttachmentAspectCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRenderPassInputAttachmentAspectCreateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkImageViewUsageCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageViewUsageCreateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPipelineTessellationDomainOriginStateCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineTessellationDomainOriginStateCreateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkRenderPassMultiviewCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRenderPassMultiviewCreateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceMultiviewFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMultiviewFeatures* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceMultiviewProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMultiviewProperties* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceVariablePointersFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVariablePointersFeatures* forMarshaling,
+    uint8_t** ptr);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkPhysicalDeviceVariablePointersFeatures, reservedmarshal_VkPhysicalDeviceVariablePointerFeatures);
+
+void reservedmarshal_VkPhysicalDeviceProtectedMemoryFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceProtectedMemoryFeatures* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceProtectedMemoryProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceProtectedMemoryProperties* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDeviceQueueInfo2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceQueueInfo2* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkProtectedSubmitInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkProtectedSubmitInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkSamplerYcbcrConversionCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSamplerYcbcrConversionCreateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkSamplerYcbcrConversionInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSamplerYcbcrConversionInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkBindImagePlaneMemoryInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBindImagePlaneMemoryInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkImagePlaneMemoryRequirementsInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImagePlaneMemoryRequirementsInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceSamplerYcbcrConversionFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSamplerYcbcrConversionFeatures* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkSamplerYcbcrConversionImageFormatProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSamplerYcbcrConversionImageFormatProperties* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDescriptorUpdateTemplateEntry(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDescriptorUpdateTemplateEntry* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDescriptorUpdateTemplateCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDescriptorUpdateTemplateCreateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkExternalMemoryProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkExternalMemoryProperties* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceExternalImageFormatInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceExternalImageFormatInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkExternalImageFormatProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkExternalImageFormatProperties* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceExternalBufferInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceExternalBufferInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkExternalBufferProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkExternalBufferProperties* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceIDProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceIDProperties* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkExternalMemoryImageCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkExternalMemoryImageCreateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkExternalMemoryBufferCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkExternalMemoryBufferCreateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkExportMemoryAllocateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkExportMemoryAllocateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceExternalFenceInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceExternalFenceInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkExternalFenceProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkExternalFenceProperties* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkExportFenceCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkExportFenceCreateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkExportSemaphoreCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkExportSemaphoreCreateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceExternalSemaphoreInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceExternalSemaphoreInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkExternalSemaphoreProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkExternalSemaphoreProperties* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceMaintenance3Properties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMaintenance3Properties* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDescriptorSetLayoutSupport(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDescriptorSetLayoutSupport* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceShaderDrawParametersFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderDrawParametersFeatures* forMarshaling,
+    uint8_t** ptr);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkPhysicalDeviceShaderDrawParametersFeatures, reservedmarshal_VkPhysicalDeviceShaderDrawParameterFeatures);
+
+#endif
+#ifdef VK_VERSION_1_2
+void reservedmarshal_VkPhysicalDeviceVulkan11Features(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVulkan11Features* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceVulkan11Properties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVulkan11Properties* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceVulkan12Features(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVulkan12Features* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkConformanceVersion(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkConformanceVersion* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceVulkan12Properties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVulkan12Properties* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkImageFormatListCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageFormatListCreateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkAttachmentDescription2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAttachmentDescription2* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkAttachmentReference2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAttachmentReference2* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkSubpassDescription2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSubpassDescription2* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkSubpassDependency2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSubpassDependency2* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkRenderPassCreateInfo2(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRenderPassCreateInfo2* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkSubpassBeginInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSubpassBeginInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkSubpassEndInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSubpassEndInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDevice8BitStorageFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDevice8BitStorageFeatures* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceDriverProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDriverProperties* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceShaderAtomicInt64Features(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderAtomicInt64Features* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceShaderFloat16Int8Features(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderFloat16Int8Features* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceFloatControlsProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFloatControlsProperties* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDescriptorSetLayoutBindingFlagsCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDescriptorSetLayoutBindingFlagsCreateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceDescriptorIndexingFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDescriptorIndexingFeatures* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceDescriptorIndexingProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDescriptorIndexingProperties* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDescriptorSetVariableDescriptorCountAllocateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDescriptorSetVariableDescriptorCountAllocateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDescriptorSetVariableDescriptorCountLayoutSupport(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDescriptorSetVariableDescriptorCountLayoutSupport* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkSubpassDescriptionDepthStencilResolve(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSubpassDescriptionDepthStencilResolve* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceDepthStencilResolveProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDepthStencilResolveProperties* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceScalarBlockLayoutFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceScalarBlockLayoutFeatures* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkImageStencilUsageCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageStencilUsageCreateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkSamplerReductionModeCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSamplerReductionModeCreateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceSamplerFilterMinmaxProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSamplerFilterMinmaxProperties* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceVulkanMemoryModelFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVulkanMemoryModelFeatures* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceImagelessFramebufferFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceImagelessFramebufferFeatures* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkFramebufferAttachmentImageInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkFramebufferAttachmentImageInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkFramebufferAttachmentsCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkFramebufferAttachmentsCreateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkRenderPassAttachmentBeginInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRenderPassAttachmentBeginInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceUniformBufferStandardLayoutFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceUniformBufferStandardLayoutFeatures* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkAttachmentReferenceStencilLayout(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAttachmentReferenceStencilLayout* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkAttachmentDescriptionStencilLayout(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAttachmentDescriptionStencilLayout* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceHostQueryResetFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceHostQueryResetFeatures* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceTimelineSemaphoreFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTimelineSemaphoreFeatures* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceTimelineSemaphoreProperties(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTimelineSemaphoreProperties* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkSemaphoreTypeCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSemaphoreTypeCreateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkTimelineSemaphoreSubmitInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkTimelineSemaphoreSubmitInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkSemaphoreWaitInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSemaphoreWaitInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkSemaphoreSignalInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSemaphoreSignalInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceBufferDeviceAddressFeatures(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceBufferDeviceAddressFeatures* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkBufferDeviceAddressInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBufferDeviceAddressInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkBufferOpaqueCaptureAddressCreateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBufferOpaqueCaptureAddressCreateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkMemoryOpaqueCaptureAddressAllocateInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMemoryOpaqueCaptureAddressAllocateInfo* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDeviceMemoryOpaqueCaptureAddressInfo(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceMemoryOpaqueCaptureAddressInfo* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_KHR_surface
+void reservedmarshal_VkSurfaceCapabilitiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSurfaceCapabilitiesKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkSurfaceFormatKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSurfaceFormatKHR* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_KHR_swapchain
+void reservedmarshal_VkSwapchainCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSwapchainCreateInfoKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPresentInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPresentInfoKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkImageSwapchainCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageSwapchainCreateInfoKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkBindImageMemorySwapchainInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBindImageMemorySwapchainInfoKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkAcquireNextImageInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAcquireNextImageInfoKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDeviceGroupPresentCapabilitiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceGroupPresentCapabilitiesKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDeviceGroupPresentInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceGroupPresentInfoKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDeviceGroupSwapchainCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceGroupSwapchainCreateInfoKHR* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_KHR_display
+void reservedmarshal_VkDisplayModeParametersKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDisplayModeParametersKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDisplayModeCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDisplayModeCreateInfoKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDisplayModePropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDisplayModePropertiesKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDisplayPlaneCapabilitiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDisplayPlaneCapabilitiesKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDisplayPlanePropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDisplayPlanePropertiesKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDisplayPropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDisplayPropertiesKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDisplaySurfaceCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDisplaySurfaceCreateInfoKHR* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_KHR_display_swapchain
+void reservedmarshal_VkDisplayPresentInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDisplayPresentInfoKHR* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_KHR_xlib_surface
+void reservedmarshal_VkXlibSurfaceCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkXlibSurfaceCreateInfoKHR* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_KHR_xcb_surface
+void reservedmarshal_VkXcbSurfaceCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkXcbSurfaceCreateInfoKHR* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_KHR_wayland_surface
+void reservedmarshal_VkWaylandSurfaceCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkWaylandSurfaceCreateInfoKHR* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_KHR_android_surface
+void reservedmarshal_VkAndroidSurfaceCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAndroidSurfaceCreateInfoKHR* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_KHR_win32_surface
+void reservedmarshal_VkWin32SurfaceCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkWin32SurfaceCreateInfoKHR* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_KHR_sampler_mirror_clamp_to_edge
+#endif
+#ifdef VK_KHR_multiview
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkRenderPassMultiviewCreateInfo, reservedmarshal_VkRenderPassMultiviewCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkPhysicalDeviceMultiviewFeatures, reservedmarshal_VkPhysicalDeviceMultiviewFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkPhysicalDeviceMultiviewProperties, reservedmarshal_VkPhysicalDeviceMultiviewPropertiesKHR);
+
+#endif
+#ifdef VK_KHR_get_physical_device_properties2
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkPhysicalDeviceFeatures2, reservedmarshal_VkPhysicalDeviceFeatures2KHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkPhysicalDeviceProperties2, reservedmarshal_VkPhysicalDeviceProperties2KHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkFormatProperties2, reservedmarshal_VkFormatProperties2KHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkImageFormatProperties2, reservedmarshal_VkImageFormatProperties2KHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkPhysicalDeviceImageFormatInfo2, reservedmarshal_VkPhysicalDeviceImageFormatInfo2KHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkQueueFamilyProperties2, reservedmarshal_VkQueueFamilyProperties2KHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkPhysicalDeviceMemoryProperties2, reservedmarshal_VkPhysicalDeviceMemoryProperties2KHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkSparseImageFormatProperties2, reservedmarshal_VkSparseImageFormatProperties2KHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkPhysicalDeviceSparseImageFormatInfo2, reservedmarshal_VkPhysicalDeviceSparseImageFormatInfo2KHR);
+
+#endif
+#ifdef VK_KHR_device_group
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkMemoryAllocateFlagsInfo, reservedmarshal_VkMemoryAllocateFlagsInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkDeviceGroupRenderPassBeginInfo, reservedmarshal_VkDeviceGroupRenderPassBeginInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkDeviceGroupCommandBufferBeginInfo, reservedmarshal_VkDeviceGroupCommandBufferBeginInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkDeviceGroupSubmitInfo, reservedmarshal_VkDeviceGroupSubmitInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkDeviceGroupBindSparseInfo, reservedmarshal_VkDeviceGroupBindSparseInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkBindBufferMemoryDeviceGroupInfo, reservedmarshal_VkBindBufferMemoryDeviceGroupInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkBindImageMemoryDeviceGroupInfo, reservedmarshal_VkBindImageMemoryDeviceGroupInfoKHR);
+
+#endif
+#ifdef VK_KHR_shader_draw_parameters
+#endif
+#ifdef VK_KHR_maintenance1
+#endif
+#ifdef VK_KHR_device_group_creation
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkPhysicalDeviceGroupProperties, reservedmarshal_VkPhysicalDeviceGroupPropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkDeviceGroupDeviceCreateInfo, reservedmarshal_VkDeviceGroupDeviceCreateInfoKHR);
+
+#endif
+#ifdef VK_KHR_external_memory_capabilities
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkExternalMemoryProperties, reservedmarshal_VkExternalMemoryPropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkPhysicalDeviceExternalImageFormatInfo, reservedmarshal_VkPhysicalDeviceExternalImageFormatInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkExternalImageFormatProperties, reservedmarshal_VkExternalImageFormatPropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkPhysicalDeviceExternalBufferInfo, reservedmarshal_VkPhysicalDeviceExternalBufferInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkExternalBufferProperties, reservedmarshal_VkExternalBufferPropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkPhysicalDeviceIDProperties, reservedmarshal_VkPhysicalDeviceIDPropertiesKHR);
+
+#endif
+#ifdef VK_KHR_external_memory
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkExternalMemoryImageCreateInfo, reservedmarshal_VkExternalMemoryImageCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkExternalMemoryBufferCreateInfo, reservedmarshal_VkExternalMemoryBufferCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkExportMemoryAllocateInfo, reservedmarshal_VkExportMemoryAllocateInfoKHR);
+
+#endif
+#ifdef VK_KHR_external_memory_win32
+void reservedmarshal_VkImportMemoryWin32HandleInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImportMemoryWin32HandleInfoKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkExportMemoryWin32HandleInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkExportMemoryWin32HandleInfoKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkMemoryWin32HandlePropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMemoryWin32HandlePropertiesKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkMemoryGetWin32HandleInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMemoryGetWin32HandleInfoKHR* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_KHR_external_memory_fd
+void reservedmarshal_VkImportMemoryFdInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImportMemoryFdInfoKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkMemoryFdPropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMemoryFdPropertiesKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkMemoryGetFdInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMemoryGetFdInfoKHR* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_KHR_win32_keyed_mutex
+void reservedmarshal_VkWin32KeyedMutexAcquireReleaseInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkWin32KeyedMutexAcquireReleaseInfoKHR* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_KHR_external_semaphore_capabilities
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkPhysicalDeviceExternalSemaphoreInfo, reservedmarshal_VkPhysicalDeviceExternalSemaphoreInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkExternalSemaphoreProperties, reservedmarshal_VkExternalSemaphorePropertiesKHR);
+
+#endif
+#ifdef VK_KHR_external_semaphore
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkExportSemaphoreCreateInfo, reservedmarshal_VkExportSemaphoreCreateInfoKHR);
+
+#endif
+#ifdef VK_KHR_external_semaphore_win32
+void reservedmarshal_VkImportSemaphoreWin32HandleInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImportSemaphoreWin32HandleInfoKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkExportSemaphoreWin32HandleInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkExportSemaphoreWin32HandleInfoKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkD3D12FenceSubmitInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkD3D12FenceSubmitInfoKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkSemaphoreGetWin32HandleInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSemaphoreGetWin32HandleInfoKHR* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_KHR_external_semaphore_fd
+void reservedmarshal_VkImportSemaphoreFdInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImportSemaphoreFdInfoKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkSemaphoreGetFdInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSemaphoreGetFdInfoKHR* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_KHR_push_descriptor
+void reservedmarshal_VkPhysicalDevicePushDescriptorPropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDevicePushDescriptorPropertiesKHR* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_KHR_shader_float16_int8
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkPhysicalDeviceShaderFloat16Int8Features, reservedmarshal_VkPhysicalDeviceShaderFloat16Int8FeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkPhysicalDeviceShaderFloat16Int8Features, reservedmarshal_VkPhysicalDeviceFloat16Int8FeaturesKHR);
+
+#endif
+#ifdef VK_KHR_16bit_storage
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkPhysicalDevice16BitStorageFeatures, reservedmarshal_VkPhysicalDevice16BitStorageFeaturesKHR);
+
+#endif
+#ifdef VK_KHR_incremental_present
+void reservedmarshal_VkRectLayerKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRectLayerKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPresentRegionKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPresentRegionKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPresentRegionsKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPresentRegionsKHR* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_KHR_descriptor_update_template
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkDescriptorUpdateTemplateEntry, reservedmarshal_VkDescriptorUpdateTemplateEntryKHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkDescriptorUpdateTemplateCreateInfo, reservedmarshal_VkDescriptorUpdateTemplateCreateInfoKHR);
+
+#endif
+#ifdef VK_KHR_imageless_framebuffer
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkPhysicalDeviceImagelessFramebufferFeatures, reservedmarshal_VkPhysicalDeviceImagelessFramebufferFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkFramebufferAttachmentsCreateInfo, reservedmarshal_VkFramebufferAttachmentsCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkFramebufferAttachmentImageInfo, reservedmarshal_VkFramebufferAttachmentImageInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkRenderPassAttachmentBeginInfo, reservedmarshal_VkRenderPassAttachmentBeginInfoKHR);
+
+#endif
+#ifdef VK_KHR_create_renderpass2
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkRenderPassCreateInfo2, reservedmarshal_VkRenderPassCreateInfo2KHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkAttachmentDescription2, reservedmarshal_VkAttachmentDescription2KHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkAttachmentReference2, reservedmarshal_VkAttachmentReference2KHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkSubpassDescription2, reservedmarshal_VkSubpassDescription2KHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkSubpassDependency2, reservedmarshal_VkSubpassDependency2KHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkSubpassBeginInfo, reservedmarshal_VkSubpassBeginInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkSubpassEndInfo, reservedmarshal_VkSubpassEndInfoKHR);
+
+#endif
+#ifdef VK_KHR_shared_presentable_image
+void reservedmarshal_VkSharedPresentSurfaceCapabilitiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSharedPresentSurfaceCapabilitiesKHR* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_KHR_external_fence_capabilities
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkPhysicalDeviceExternalFenceInfo, reservedmarshal_VkPhysicalDeviceExternalFenceInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkExternalFenceProperties, reservedmarshal_VkExternalFencePropertiesKHR);
+
+#endif
+#ifdef VK_KHR_external_fence
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkExportFenceCreateInfo, reservedmarshal_VkExportFenceCreateInfoKHR);
+
+#endif
+#ifdef VK_KHR_external_fence_win32
+void reservedmarshal_VkImportFenceWin32HandleInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImportFenceWin32HandleInfoKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkExportFenceWin32HandleInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkExportFenceWin32HandleInfoKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkFenceGetWin32HandleInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkFenceGetWin32HandleInfoKHR* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_KHR_external_fence_fd
+void reservedmarshal_VkImportFenceFdInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImportFenceFdInfoKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkFenceGetFdInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkFenceGetFdInfoKHR* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_KHR_performance_query
+void reservedmarshal_VkPhysicalDevicePerformanceQueryFeaturesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDevicePerformanceQueryFeaturesKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDevicePerformanceQueryPropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDevicePerformanceQueryPropertiesKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPerformanceCounterKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPerformanceCounterKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPerformanceCounterDescriptionKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPerformanceCounterDescriptionKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkQueryPoolPerformanceCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkQueryPoolPerformanceCreateInfoKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPerformanceCounterResultKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPerformanceCounterResultKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkAcquireProfilingLockInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAcquireProfilingLockInfoKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPerformanceQuerySubmitInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPerformanceQuerySubmitInfoKHR* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_KHR_maintenance2
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkPhysicalDevicePointClippingProperties, reservedmarshal_VkPhysicalDevicePointClippingPropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkRenderPassInputAttachmentAspectCreateInfo, reservedmarshal_VkRenderPassInputAttachmentAspectCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkInputAttachmentAspectReference, reservedmarshal_VkInputAttachmentAspectReferenceKHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkImageViewUsageCreateInfo, reservedmarshal_VkImageViewUsageCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkPipelineTessellationDomainOriginStateCreateInfo, reservedmarshal_VkPipelineTessellationDomainOriginStateCreateInfoKHR);
+
+#endif
+#ifdef VK_KHR_get_surface_capabilities2
+void reservedmarshal_VkPhysicalDeviceSurfaceInfo2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSurfaceInfo2KHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkSurfaceCapabilities2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSurfaceCapabilities2KHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkSurfaceFormat2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSurfaceFormat2KHR* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_KHR_variable_pointers
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkPhysicalDeviceVariablePointersFeatures, reservedmarshal_VkPhysicalDeviceVariablePointerFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkPhysicalDeviceVariablePointersFeatures, reservedmarshal_VkPhysicalDeviceVariablePointersFeaturesKHR);
+
+#endif
+#ifdef VK_KHR_get_display_properties2
+void reservedmarshal_VkDisplayProperties2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDisplayProperties2KHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDisplayPlaneProperties2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDisplayPlaneProperties2KHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDisplayModeProperties2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDisplayModeProperties2KHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDisplayPlaneInfo2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDisplayPlaneInfo2KHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDisplayPlaneCapabilities2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDisplayPlaneCapabilities2KHR* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_KHR_dedicated_allocation
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkMemoryDedicatedRequirements, reservedmarshal_VkMemoryDedicatedRequirementsKHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkMemoryDedicatedAllocateInfo, reservedmarshal_VkMemoryDedicatedAllocateInfoKHR);
+
+#endif
+#ifdef VK_KHR_storage_buffer_storage_class
+#endif
+#ifdef VK_KHR_relaxed_block_layout
+#endif
+#ifdef VK_KHR_get_memory_requirements2
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkBufferMemoryRequirementsInfo2, reservedmarshal_VkBufferMemoryRequirementsInfo2KHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkImageMemoryRequirementsInfo2, reservedmarshal_VkImageMemoryRequirementsInfo2KHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkImageSparseMemoryRequirementsInfo2, reservedmarshal_VkImageSparseMemoryRequirementsInfo2KHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkMemoryRequirements2, reservedmarshal_VkMemoryRequirements2KHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkSparseImageMemoryRequirements2, reservedmarshal_VkSparseImageMemoryRequirements2KHR);
+
+#endif
+#ifdef VK_KHR_image_format_list
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkImageFormatListCreateInfo, reservedmarshal_VkImageFormatListCreateInfoKHR);
+
+#endif
+#ifdef VK_KHR_sampler_ycbcr_conversion
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkSamplerYcbcrConversionCreateInfo, reservedmarshal_VkSamplerYcbcrConversionCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkSamplerYcbcrConversionInfo, reservedmarshal_VkSamplerYcbcrConversionInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkBindImagePlaneMemoryInfo, reservedmarshal_VkBindImagePlaneMemoryInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkImagePlaneMemoryRequirementsInfo, reservedmarshal_VkImagePlaneMemoryRequirementsInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkPhysicalDeviceSamplerYcbcrConversionFeatures, reservedmarshal_VkPhysicalDeviceSamplerYcbcrConversionFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkSamplerYcbcrConversionImageFormatProperties, reservedmarshal_VkSamplerYcbcrConversionImageFormatPropertiesKHR);
+
+#endif
+#ifdef VK_KHR_bind_memory2
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkBindBufferMemoryInfo, reservedmarshal_VkBindBufferMemoryInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkBindImageMemoryInfo, reservedmarshal_VkBindImageMemoryInfoKHR);
+
+#endif
+#ifdef VK_KHR_portability_subset
+void reservedmarshal_VkPhysicalDevicePortabilitySubsetFeaturesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDevicePortabilitySubsetFeaturesKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDevicePortabilitySubsetPropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDevicePortabilitySubsetPropertiesKHR* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_KHR_maintenance3
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkPhysicalDeviceMaintenance3Properties, reservedmarshal_VkPhysicalDeviceMaintenance3PropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkDescriptorSetLayoutSupport, reservedmarshal_VkDescriptorSetLayoutSupportKHR);
+
+#endif
+#ifdef VK_KHR_draw_indirect_count
+#endif
+#ifdef VK_KHR_shader_subgroup_extended_types
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures, reservedmarshal_VkPhysicalDeviceShaderSubgroupExtendedTypesFeaturesKHR);
+
+#endif
+#ifdef VK_KHR_8bit_storage
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkPhysicalDevice8BitStorageFeatures, reservedmarshal_VkPhysicalDevice8BitStorageFeaturesKHR);
+
+#endif
+#ifdef VK_KHR_shader_atomic_int64
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkPhysicalDeviceShaderAtomicInt64Features, reservedmarshal_VkPhysicalDeviceShaderAtomicInt64FeaturesKHR);
+
+#endif
+#ifdef VK_KHR_shader_clock
+void reservedmarshal_VkPhysicalDeviceShaderClockFeaturesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderClockFeaturesKHR* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_KHR_driver_properties
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkConformanceVersion, reservedmarshal_VkConformanceVersionKHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkPhysicalDeviceDriverProperties, reservedmarshal_VkPhysicalDeviceDriverPropertiesKHR);
+
+#endif
+#ifdef VK_KHR_shader_float_controls
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkPhysicalDeviceFloatControlsProperties, reservedmarshal_VkPhysicalDeviceFloatControlsPropertiesKHR);
+
+#endif
+#ifdef VK_KHR_depth_stencil_resolve
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkSubpassDescriptionDepthStencilResolve, reservedmarshal_VkSubpassDescriptionDepthStencilResolveKHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkPhysicalDeviceDepthStencilResolveProperties, reservedmarshal_VkPhysicalDeviceDepthStencilResolvePropertiesKHR);
+
+#endif
+#ifdef VK_KHR_swapchain_mutable_format
+#endif
+#ifdef VK_KHR_timeline_semaphore
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkPhysicalDeviceTimelineSemaphoreFeatures, reservedmarshal_VkPhysicalDeviceTimelineSemaphoreFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkPhysicalDeviceTimelineSemaphoreProperties, reservedmarshal_VkPhysicalDeviceTimelineSemaphorePropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkSemaphoreTypeCreateInfo, reservedmarshal_VkSemaphoreTypeCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkTimelineSemaphoreSubmitInfo, reservedmarshal_VkTimelineSemaphoreSubmitInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkSemaphoreWaitInfo, reservedmarshal_VkSemaphoreWaitInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkSemaphoreSignalInfo, reservedmarshal_VkSemaphoreSignalInfoKHR);
+
+#endif
+#ifdef VK_KHR_vulkan_memory_model
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkPhysicalDeviceVulkanMemoryModelFeatures, reservedmarshal_VkPhysicalDeviceVulkanMemoryModelFeaturesKHR);
+
+#endif
+#ifdef VK_KHR_shader_terminate_invocation
+void reservedmarshal_VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_KHR_fragment_shading_rate
+void reservedmarshal_VkFragmentShadingRateAttachmentInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkFragmentShadingRateAttachmentInfoKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPipelineFragmentShadingRateStateCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineFragmentShadingRateStateCreateInfoKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceFragmentShadingRateFeaturesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShadingRateFeaturesKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceFragmentShadingRatePropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShadingRatePropertiesKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceFragmentShadingRateKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShadingRateKHR* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_KHR_spirv_1_4
+#endif
+#ifdef VK_KHR_surface_protected_capabilities
+void reservedmarshal_VkSurfaceProtectedCapabilitiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSurfaceProtectedCapabilitiesKHR* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_KHR_separate_depth_stencil_layouts
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures, reservedmarshal_VkPhysicalDeviceSeparateDepthStencilLayoutsFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkAttachmentReferenceStencilLayout, reservedmarshal_VkAttachmentReferenceStencilLayoutKHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkAttachmentDescriptionStencilLayout, reservedmarshal_VkAttachmentDescriptionStencilLayoutKHR);
+
+#endif
+#ifdef VK_KHR_uniform_buffer_standard_layout
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkPhysicalDeviceUniformBufferStandardLayoutFeatures, reservedmarshal_VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR);
+
+#endif
+#ifdef VK_KHR_buffer_device_address
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkPhysicalDeviceBufferDeviceAddressFeatures, reservedmarshal_VkPhysicalDeviceBufferDeviceAddressFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkBufferDeviceAddressInfo, reservedmarshal_VkBufferDeviceAddressInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkBufferOpaqueCaptureAddressCreateInfo, reservedmarshal_VkBufferOpaqueCaptureAddressCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkMemoryOpaqueCaptureAddressAllocateInfo, reservedmarshal_VkMemoryOpaqueCaptureAddressAllocateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkDeviceMemoryOpaqueCaptureAddressInfo, reservedmarshal_VkDeviceMemoryOpaqueCaptureAddressInfoKHR);
+
+#endif
+#ifdef VK_KHR_deferred_host_operations
+#endif
+#ifdef VK_KHR_pipeline_executable_properties
+void reservedmarshal_VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPipelineInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineInfoKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPipelineExecutablePropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineExecutablePropertiesKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPipelineExecutableInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineExecutableInfoKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPipelineExecutableStatisticValueKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineExecutableStatisticValueKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPipelineExecutableStatisticKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineExecutableStatisticKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPipelineExecutableInternalRepresentationKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineExecutableInternalRepresentationKHR* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_KHR_pipeline_library
+void reservedmarshal_VkPipelineLibraryCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineLibraryCreateInfoKHR* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_KHR_shader_non_semantic_info
+#endif
+#ifdef VK_KHR_copy_commands2
+void reservedmarshal_VkBufferCopy2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBufferCopy2KHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkCopyBufferInfo2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCopyBufferInfo2KHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkImageCopy2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageCopy2KHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkCopyImageInfo2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCopyImageInfo2KHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkBufferImageCopy2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBufferImageCopy2KHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkCopyBufferToImageInfo2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCopyBufferToImageInfo2KHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkCopyImageToBufferInfo2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCopyImageToBufferInfo2KHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkImageBlit2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageBlit2KHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkBlitImageInfo2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBlitImageInfo2KHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkImageResolve2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageResolve2KHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkResolveImageInfo2KHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkResolveImageInfo2KHR* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_ANDROID_native_buffer
+void reservedmarshal_VkNativeBufferANDROID(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkNativeBufferANDROID* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_EXT_debug_report
+void reservedmarshal_VkDebugReportCallbackCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDebugReportCallbackCreateInfoEXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_NV_glsl_shader
+#endif
+#ifdef VK_EXT_depth_range_unrestricted
+#endif
+#ifdef VK_IMG_filter_cubic
+#endif
+#ifdef VK_AMD_rasterization_order
+void reservedmarshal_VkPipelineRasterizationStateRasterizationOrderAMD(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineRasterizationStateRasterizationOrderAMD* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_AMD_shader_trinary_minmax
+#endif
+#ifdef VK_AMD_shader_explicit_vertex_parameter
+#endif
+#ifdef VK_EXT_debug_marker
+void reservedmarshal_VkDebugMarkerObjectNameInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDebugMarkerObjectNameInfoEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDebugMarkerObjectTagInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDebugMarkerObjectTagInfoEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDebugMarkerMarkerInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDebugMarkerMarkerInfoEXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_AMD_gcn_shader
+#endif
+#ifdef VK_NV_dedicated_allocation
+void reservedmarshal_VkDedicatedAllocationImageCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDedicatedAllocationImageCreateInfoNV* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDedicatedAllocationBufferCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDedicatedAllocationBufferCreateInfoNV* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDedicatedAllocationMemoryAllocateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDedicatedAllocationMemoryAllocateInfoNV* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_EXT_transform_feedback
+void reservedmarshal_VkPhysicalDeviceTransformFeedbackFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTransformFeedbackFeaturesEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceTransformFeedbackPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTransformFeedbackPropertiesEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPipelineRasterizationStateStreamCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineRasterizationStateStreamCreateInfoEXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_NVX_image_view_handle
+void reservedmarshal_VkImageViewHandleInfoNVX(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageViewHandleInfoNVX* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkImageViewAddressPropertiesNVX(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageViewAddressPropertiesNVX* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_AMD_draw_indirect_count
+#endif
+#ifdef VK_AMD_negative_viewport_height
+#endif
+#ifdef VK_AMD_gpu_shader_half_float
+#endif
+#ifdef VK_AMD_shader_ballot
+#endif
+#ifdef VK_AMD_texture_gather_bias_lod
+void reservedmarshal_VkTextureLODGatherFormatPropertiesAMD(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkTextureLODGatherFormatPropertiesAMD* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_AMD_shader_info
+void reservedmarshal_VkShaderResourceUsageAMD(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkShaderResourceUsageAMD* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkShaderStatisticsInfoAMD(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkShaderStatisticsInfoAMD* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_AMD_shader_image_load_store_lod
+#endif
+#ifdef VK_GGP_stream_descriptor_surface
+void reservedmarshal_VkStreamDescriptorSurfaceCreateInfoGGP(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkStreamDescriptorSurfaceCreateInfoGGP* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_NV_corner_sampled_image
+void reservedmarshal_VkPhysicalDeviceCornerSampledImageFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCornerSampledImageFeaturesNV* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_IMG_format_pvrtc
+#endif
+#ifdef VK_NV_external_memory_capabilities
+void reservedmarshal_VkExternalImageFormatPropertiesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkExternalImageFormatPropertiesNV* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_NV_external_memory
+void reservedmarshal_VkExternalMemoryImageCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkExternalMemoryImageCreateInfoNV* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkExportMemoryAllocateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkExportMemoryAllocateInfoNV* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_NV_external_memory_win32
+void reservedmarshal_VkImportMemoryWin32HandleInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImportMemoryWin32HandleInfoNV* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkExportMemoryWin32HandleInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkExportMemoryWin32HandleInfoNV* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_NV_win32_keyed_mutex
+void reservedmarshal_VkWin32KeyedMutexAcquireReleaseInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkWin32KeyedMutexAcquireReleaseInfoNV* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_EXT_validation_flags
+void reservedmarshal_VkValidationFlagsEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkValidationFlagsEXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_NN_vi_surface
+void reservedmarshal_VkViSurfaceCreateInfoNN(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkViSurfaceCreateInfoNN* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_EXT_shader_subgroup_ballot
+#endif
+#ifdef VK_EXT_shader_subgroup_vote
+#endif
+#ifdef VK_EXT_texture_compression_astc_hdr
+void reservedmarshal_VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_EXT_astc_decode_mode
+void reservedmarshal_VkImageViewASTCDecodeModeEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageViewASTCDecodeModeEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceASTCDecodeFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceASTCDecodeFeaturesEXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_EXT_conditional_rendering
+void reservedmarshal_VkConditionalRenderingBeginInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkConditionalRenderingBeginInfoEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceConditionalRenderingFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceConditionalRenderingFeaturesEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkCommandBufferInheritanceConditionalRenderingInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCommandBufferInheritanceConditionalRenderingInfoEXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_NV_clip_space_w_scaling
+void reservedmarshal_VkViewportWScalingNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkViewportWScalingNV* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPipelineViewportWScalingStateCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineViewportWScalingStateCreateInfoNV* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_EXT_direct_mode_display
+#endif
+#ifdef VK_EXT_acquire_xlib_display
+#endif
+#ifdef VK_EXT_display_surface_counter
+void reservedmarshal_VkSurfaceCapabilities2EXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSurfaceCapabilities2EXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_EXT_display_control
+void reservedmarshal_VkDisplayPowerInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDisplayPowerInfoEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDeviceEventInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceEventInfoEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDisplayEventInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDisplayEventInfoEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkSwapchainCounterCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSwapchainCounterCreateInfoEXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_GOOGLE_display_timing
+void reservedmarshal_VkRefreshCycleDurationGOOGLE(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRefreshCycleDurationGOOGLE* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPastPresentationTimingGOOGLE(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPastPresentationTimingGOOGLE* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPresentTimeGOOGLE(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPresentTimeGOOGLE* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPresentTimesInfoGOOGLE(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPresentTimesInfoGOOGLE* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_NV_sample_mask_override_coverage
+#endif
+#ifdef VK_NV_geometry_shader_passthrough
+#endif
+#ifdef VK_NV_viewport_array2
+#endif
+#ifdef VK_NVX_multiview_per_view_attributes
+void reservedmarshal_VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_NV_viewport_swizzle
+void reservedmarshal_VkViewportSwizzleNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkViewportSwizzleNV* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPipelineViewportSwizzleStateCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineViewportSwizzleStateCreateInfoNV* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_EXT_discard_rectangles
+void reservedmarshal_VkPhysicalDeviceDiscardRectanglePropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDiscardRectanglePropertiesEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPipelineDiscardRectangleStateCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineDiscardRectangleStateCreateInfoEXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_EXT_conservative_rasterization
+void reservedmarshal_VkPhysicalDeviceConservativeRasterizationPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceConservativeRasterizationPropertiesEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPipelineRasterizationConservativeStateCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineRasterizationConservativeStateCreateInfoEXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_EXT_depth_clip_enable
+void reservedmarshal_VkPhysicalDeviceDepthClipEnableFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDepthClipEnableFeaturesEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPipelineRasterizationDepthClipStateCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineRasterizationDepthClipStateCreateInfoEXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_EXT_swapchain_colorspace
+#endif
+#ifdef VK_EXT_hdr_metadata
+void reservedmarshal_VkXYColorEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkXYColorEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkHdrMetadataEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkHdrMetadataEXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_MVK_ios_surface
+void reservedmarshal_VkIOSSurfaceCreateInfoMVK(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkIOSSurfaceCreateInfoMVK* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_MVK_macos_surface
+void reservedmarshal_VkMacOSSurfaceCreateInfoMVK(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMacOSSurfaceCreateInfoMVK* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_MVK_moltenvk
+#endif
+#ifdef VK_EXT_external_memory_dma_buf
+#endif
+#ifdef VK_EXT_queue_family_foreign
+#endif
+#ifdef VK_EXT_debug_utils
+void reservedmarshal_VkDebugUtilsLabelEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDebugUtilsLabelEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDebugUtilsObjectNameInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDebugUtilsObjectNameInfoEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDebugUtilsMessengerCallbackDataEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDebugUtilsMessengerCallbackDataEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDebugUtilsMessengerCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDebugUtilsMessengerCreateInfoEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDebugUtilsObjectTagInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDebugUtilsObjectTagInfoEXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_ANDROID_external_memory_android_hardware_buffer
+void reservedmarshal_VkAndroidHardwareBufferUsageANDROID(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAndroidHardwareBufferUsageANDROID* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkAndroidHardwareBufferPropertiesANDROID(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAndroidHardwareBufferPropertiesANDROID* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkAndroidHardwareBufferFormatPropertiesANDROID(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAndroidHardwareBufferFormatPropertiesANDROID* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkImportAndroidHardwareBufferInfoANDROID(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImportAndroidHardwareBufferInfoANDROID* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkMemoryGetAndroidHardwareBufferInfoANDROID(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMemoryGetAndroidHardwareBufferInfoANDROID* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkExternalFormatANDROID(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkExternalFormatANDROID* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_EXT_sampler_filter_minmax
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkSamplerReductionModeCreateInfo, reservedmarshal_VkSamplerReductionModeCreateInfoEXT);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkPhysicalDeviceSamplerFilterMinmaxProperties, reservedmarshal_VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT);
+
+#endif
+#ifdef VK_AMD_gpu_shader_int16
+#endif
+#ifdef VK_AMD_mixed_attachment_samples
+#endif
+#ifdef VK_AMD_shader_fragment_mask
+#endif
+#ifdef VK_EXT_inline_uniform_block
+void reservedmarshal_VkPhysicalDeviceInlineUniformBlockFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceInlineUniformBlockFeaturesEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceInlineUniformBlockPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceInlineUniformBlockPropertiesEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkWriteDescriptorSetInlineUniformBlockEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkWriteDescriptorSetInlineUniformBlockEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDescriptorPoolInlineUniformBlockCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDescriptorPoolInlineUniformBlockCreateInfoEXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_EXT_shader_stencil_export
+#endif
+#ifdef VK_EXT_sample_locations
+void reservedmarshal_VkSampleLocationEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSampleLocationEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkSampleLocationsInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSampleLocationsInfoEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkAttachmentSampleLocationsEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAttachmentSampleLocationsEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkSubpassSampleLocationsEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSubpassSampleLocationsEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkRenderPassSampleLocationsBeginInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRenderPassSampleLocationsBeginInfoEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPipelineSampleLocationsStateCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineSampleLocationsStateCreateInfoEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceSampleLocationsPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSampleLocationsPropertiesEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkMultisamplePropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMultisamplePropertiesEXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_EXT_blend_operation_advanced
+void reservedmarshal_VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPipelineColorBlendAdvancedStateCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineColorBlendAdvancedStateCreateInfoEXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_NV_fragment_coverage_to_color
+void reservedmarshal_VkPipelineCoverageToColorStateCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineCoverageToColorStateCreateInfoNV* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_NV_framebuffer_mixed_samples
+void reservedmarshal_VkPipelineCoverageModulationStateCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineCoverageModulationStateCreateInfoNV* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_NV_fill_rectangle
+#endif
+#ifdef VK_NV_shader_sm_builtins
+void reservedmarshal_VkPhysicalDeviceShaderSMBuiltinsPropertiesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderSMBuiltinsPropertiesNV* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceShaderSMBuiltinsFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderSMBuiltinsFeaturesNV* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_EXT_post_depth_coverage
+#endif
+#ifdef VK_EXT_image_drm_format_modifier
+void reservedmarshal_VkDrmFormatModifierPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDrmFormatModifierPropertiesEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDrmFormatModifierPropertiesListEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDrmFormatModifierPropertiesListEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceImageDrmFormatModifierInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceImageDrmFormatModifierInfoEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkImageDrmFormatModifierListCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageDrmFormatModifierListCreateInfoEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkImageDrmFormatModifierExplicitCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageDrmFormatModifierExplicitCreateInfoEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkImageDrmFormatModifierPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImageDrmFormatModifierPropertiesEXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_EXT_validation_cache
+void reservedmarshal_VkValidationCacheCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkValidationCacheCreateInfoEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkShaderModuleValidationCacheCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkShaderModuleValidationCacheCreateInfoEXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_EXT_descriptor_indexing
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkDescriptorSetLayoutBindingFlagsCreateInfo, reservedmarshal_VkDescriptorSetLayoutBindingFlagsCreateInfoEXT);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkPhysicalDeviceDescriptorIndexingFeatures, reservedmarshal_VkPhysicalDeviceDescriptorIndexingFeaturesEXT);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkPhysicalDeviceDescriptorIndexingProperties, reservedmarshal_VkPhysicalDeviceDescriptorIndexingPropertiesEXT);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkDescriptorSetVariableDescriptorCountAllocateInfo, reservedmarshal_VkDescriptorSetVariableDescriptorCountAllocateInfoEXT);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkDescriptorSetVariableDescriptorCountLayoutSupport, reservedmarshal_VkDescriptorSetVariableDescriptorCountLayoutSupportEXT);
+
+#endif
+#ifdef VK_EXT_shader_viewport_index_layer
+#endif
+#ifdef VK_NV_shading_rate_image
+void reservedmarshal_VkShadingRatePaletteNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkShadingRatePaletteNV* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPipelineViewportShadingRateImageStateCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineViewportShadingRateImageStateCreateInfoNV* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceShadingRateImageFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShadingRateImageFeaturesNV* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceShadingRateImagePropertiesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShadingRateImagePropertiesNV* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkCoarseSampleLocationNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCoarseSampleLocationNV* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkCoarseSampleOrderCustomNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCoarseSampleOrderCustomNV* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPipelineViewportCoarseSampleOrderStateCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineViewportCoarseSampleOrderStateCreateInfoNV* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_NV_ray_tracing
+void reservedmarshal_VkRayTracingShaderGroupCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRayTracingShaderGroupCreateInfoNV* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkRayTracingPipelineCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRayTracingPipelineCreateInfoNV* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkGeometryTrianglesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkGeometryTrianglesNV* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkGeometryAABBNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkGeometryAABBNV* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkGeometryDataNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkGeometryDataNV* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkGeometryNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkGeometryNV* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkAccelerationStructureInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureInfoNV* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkAccelerationStructureCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureCreateInfoNV* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkBindAccelerationStructureMemoryInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBindAccelerationStructureMemoryInfoNV* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkWriteDescriptorSetAccelerationStructureNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkWriteDescriptorSetAccelerationStructureNV* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkAccelerationStructureMemoryRequirementsInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureMemoryRequirementsInfoNV* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceRayTracingPropertiesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRayTracingPropertiesNV* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkTransformMatrixKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkTransformMatrixKHR* forMarshaling,
+    uint8_t** ptr);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkTransformMatrixKHR, reservedmarshal_VkTransformMatrixNV);
+
+void reservedmarshal_VkAabbPositionsKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAabbPositionsKHR* forMarshaling,
+    uint8_t** ptr);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkAabbPositionsKHR, reservedmarshal_VkAabbPositionsNV);
+
+void reservedmarshal_VkAccelerationStructureInstanceKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureInstanceKHR* forMarshaling,
+    uint8_t** ptr);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkAccelerationStructureInstanceKHR, reservedmarshal_VkAccelerationStructureInstanceNV);
+
+#endif
+#ifdef VK_NV_representative_fragment_test
+void reservedmarshal_VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPipelineRepresentativeFragmentTestStateCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineRepresentativeFragmentTestStateCreateInfoNV* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_EXT_filter_cubic
+void reservedmarshal_VkPhysicalDeviceImageViewImageFormatInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceImageViewImageFormatInfoEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkFilterCubicImageViewImageFormatPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkFilterCubicImageViewImageFormatPropertiesEXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_QCOM_render_pass_shader_resolve
+#endif
+#ifdef VK_EXT_global_priority
+void reservedmarshal_VkDeviceQueueGlobalPriorityCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceQueueGlobalPriorityCreateInfoEXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_EXT_external_memory_host
+void reservedmarshal_VkImportMemoryHostPointerInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImportMemoryHostPointerInfoEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkMemoryHostPointerPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMemoryHostPointerPropertiesEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceExternalMemoryHostPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceExternalMemoryHostPropertiesEXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_AMD_buffer_marker
+#endif
+#ifdef VK_AMD_pipeline_compiler_control
+void reservedmarshal_VkPipelineCompilerControlCreateInfoAMD(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineCompilerControlCreateInfoAMD* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_EXT_calibrated_timestamps
+void reservedmarshal_VkCalibratedTimestampInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCalibratedTimestampInfoEXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_AMD_shader_core_properties
+void reservedmarshal_VkPhysicalDeviceShaderCorePropertiesAMD(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderCorePropertiesAMD* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_AMD_memory_overallocation_behavior
+void reservedmarshal_VkDeviceMemoryOverallocationCreateInfoAMD(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceMemoryOverallocationCreateInfoAMD* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_EXT_vertex_attribute_divisor
+void reservedmarshal_VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkVertexInputBindingDivisorDescriptionEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkVertexInputBindingDivisorDescriptionEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPipelineVertexInputDivisorStateCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineVertexInputDivisorStateCreateInfoEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_GGP_frame_token
+void reservedmarshal_VkPresentFrameTokenGGP(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPresentFrameTokenGGP* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_EXT_pipeline_creation_feedback
+void reservedmarshal_VkPipelineCreationFeedbackEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineCreationFeedbackEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPipelineCreationFeedbackCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineCreationFeedbackCreateInfoEXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_NV_shader_subgroup_partitioned
+#endif
+#ifdef VK_NV_compute_shader_derivatives
+void reservedmarshal_VkPhysicalDeviceComputeShaderDerivativesFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceComputeShaderDerivativesFeaturesNV* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_NV_mesh_shader
+void reservedmarshal_VkPhysicalDeviceMeshShaderFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMeshShaderFeaturesNV* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceMeshShaderPropertiesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMeshShaderPropertiesNV* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDrawMeshTasksIndirectCommandNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDrawMeshTasksIndirectCommandNV* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_NV_fragment_shader_barycentric
+void reservedmarshal_VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_NV_shader_image_footprint
+void reservedmarshal_VkPhysicalDeviceShaderImageFootprintFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderImageFootprintFeaturesNV* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_NV_scissor_exclusive
+void reservedmarshal_VkPipelineViewportExclusiveScissorStateCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineViewportExclusiveScissorStateCreateInfoNV* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceExclusiveScissorFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceExclusiveScissorFeaturesNV* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_NV_device_diagnostic_checkpoints
+void reservedmarshal_VkQueueFamilyCheckpointPropertiesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkQueueFamilyCheckpointPropertiesNV* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkCheckpointDataNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCheckpointDataNV* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_INTEL_shader_integer_functions2
+void reservedmarshal_VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_INTEL_performance_query
+void reservedmarshal_VkPerformanceValueDataINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPerformanceValueDataINTEL* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPerformanceValueINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPerformanceValueINTEL* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkInitializePerformanceApiInfoINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkInitializePerformanceApiInfoINTEL* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkQueryPoolPerformanceQueryCreateInfoINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkQueryPoolPerformanceQueryCreateInfoINTEL* forMarshaling,
+    uint8_t** ptr);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkQueryPoolPerformanceQueryCreateInfoINTEL, reservedmarshal_VkQueryPoolCreateInfoINTEL);
+
+void reservedmarshal_VkPerformanceMarkerInfoINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPerformanceMarkerInfoINTEL* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPerformanceStreamMarkerInfoINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPerformanceStreamMarkerInfoINTEL* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPerformanceOverrideInfoINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPerformanceOverrideInfoINTEL* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPerformanceConfigurationAcquireInfoINTEL(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPerformanceConfigurationAcquireInfoINTEL* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_EXT_pci_bus_info
+void reservedmarshal_VkPhysicalDevicePCIBusInfoPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDevicePCIBusInfoPropertiesEXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_AMD_display_native_hdr
+void reservedmarshal_VkDisplayNativeHdrSurfaceCapabilitiesAMD(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDisplayNativeHdrSurfaceCapabilitiesAMD* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkSwapchainDisplayNativeHdrCreateInfoAMD(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSwapchainDisplayNativeHdrCreateInfoAMD* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_FUCHSIA_imagepipe_surface
+void reservedmarshal_VkImagePipeSurfaceCreateInfoFUCHSIA(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImagePipeSurfaceCreateInfoFUCHSIA* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_EXT_metal_surface
+void reservedmarshal_VkMetalSurfaceCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMetalSurfaceCreateInfoEXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_EXT_fragment_density_map
+void reservedmarshal_VkPhysicalDeviceFragmentDensityMapFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentDensityMapFeaturesEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceFragmentDensityMapPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentDensityMapPropertiesEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkRenderPassFragmentDensityMapCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRenderPassFragmentDensityMapCreateInfoEXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_EXT_scalar_block_layout
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkPhysicalDeviceScalarBlockLayoutFeatures, reservedmarshal_VkPhysicalDeviceScalarBlockLayoutFeaturesEXT);
+
+#endif
+#ifdef VK_GOOGLE_hlsl_functionality1
+#endif
+#ifdef VK_GOOGLE_decorate_string
+#endif
+#ifdef VK_EXT_subgroup_size_control
+void reservedmarshal_VkPhysicalDeviceSubgroupSizeControlFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSubgroupSizeControlFeaturesEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceSubgroupSizeControlPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_AMD_shader_core_properties2
+void reservedmarshal_VkPhysicalDeviceShaderCoreProperties2AMD(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderCoreProperties2AMD* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_AMD_device_coherent_memory
+void reservedmarshal_VkPhysicalDeviceCoherentMemoryFeaturesAMD(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCoherentMemoryFeaturesAMD* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_EXT_shader_image_atomic_int64
+void reservedmarshal_VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_EXT_memory_budget
+void reservedmarshal_VkPhysicalDeviceMemoryBudgetPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMemoryBudgetPropertiesEXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_EXT_memory_priority
+void reservedmarshal_VkPhysicalDeviceMemoryPriorityFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceMemoryPriorityFeaturesEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkMemoryPriorityAllocateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkMemoryPriorityAllocateInfoEXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_NV_dedicated_allocation_image_aliasing
+void reservedmarshal_VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_EXT_buffer_device_address
+void reservedmarshal_VkPhysicalDeviceBufferDeviceAddressFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceBufferDeviceAddressFeaturesEXT* forMarshaling,
+    uint8_t** ptr);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkPhysicalDeviceBufferDeviceAddressFeaturesEXT, reservedmarshal_VkPhysicalDeviceBufferAddressFeaturesEXT);
+
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkBufferDeviceAddressInfo, reservedmarshal_VkBufferDeviceAddressInfoEXT);
+
+void reservedmarshal_VkBufferDeviceAddressCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBufferDeviceAddressCreateInfoEXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_EXT_tooling_info
+void reservedmarshal_VkPhysicalDeviceToolPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceToolPropertiesEXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_EXT_separate_stencil_usage
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkImageStencilUsageCreateInfo, reservedmarshal_VkImageStencilUsageCreateInfoEXT);
+
+#endif
+#ifdef VK_EXT_validation_features
+void reservedmarshal_VkValidationFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkValidationFeaturesEXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_NV_cooperative_matrix
+void reservedmarshal_VkCooperativeMatrixPropertiesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCooperativeMatrixPropertiesNV* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceCooperativeMatrixFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCooperativeMatrixFeaturesNV* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceCooperativeMatrixPropertiesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCooperativeMatrixPropertiesNV* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_NV_coverage_reduction_mode
+void reservedmarshal_VkPhysicalDeviceCoverageReductionModeFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCoverageReductionModeFeaturesNV* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPipelineCoverageReductionStateCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineCoverageReductionStateCreateInfoNV* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkFramebufferMixedSamplesCombinationNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkFramebufferMixedSamplesCombinationNV* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_EXT_fragment_shader_interlock
+void reservedmarshal_VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_EXT_ycbcr_image_arrays
+void reservedmarshal_VkPhysicalDeviceYcbcrImageArraysFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceYcbcrImageArraysFeaturesEXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_EXT_full_screen_exclusive
+void reservedmarshal_VkSurfaceFullScreenExclusiveInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSurfaceFullScreenExclusiveInfoEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkSurfaceCapabilitiesFullScreenExclusiveEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSurfaceCapabilitiesFullScreenExclusiveEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkSurfaceFullScreenExclusiveWin32InfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSurfaceFullScreenExclusiveWin32InfoEXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_EXT_headless_surface
+void reservedmarshal_VkHeadlessSurfaceCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkHeadlessSurfaceCreateInfoEXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_EXT_line_rasterization
+void reservedmarshal_VkPhysicalDeviceLineRasterizationFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceLineRasterizationFeaturesEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceLineRasterizationPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceLineRasterizationPropertiesEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPipelineRasterizationLineStateCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineRasterizationLineStateCreateInfoEXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_EXT_shader_atomic_float
+void reservedmarshal_VkPhysicalDeviceShaderAtomicFloatFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderAtomicFloatFeaturesEXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_EXT_host_query_reset
+DEFINE_ALIAS_FUNCTION(reservedmarshal_VkPhysicalDeviceHostQueryResetFeatures, reservedmarshal_VkPhysicalDeviceHostQueryResetFeaturesEXT);
+
+#endif
+#ifdef VK_EXT_index_type_uint8
+void reservedmarshal_VkPhysicalDeviceIndexTypeUint8FeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceIndexTypeUint8FeaturesEXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_EXT_extended_dynamic_state
+void reservedmarshal_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceExtendedDynamicStateFeaturesEXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_EXT_shader_demote_to_helper_invocation
+void reservedmarshal_VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_NV_device_generated_commands
+void reservedmarshal_VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkGraphicsShaderGroupCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkGraphicsShaderGroupCreateInfoNV* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkGraphicsPipelineShaderGroupsCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkGraphicsPipelineShaderGroupsCreateInfoNV* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkBindShaderGroupIndirectCommandNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBindShaderGroupIndirectCommandNV* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkBindIndexBufferIndirectCommandNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBindIndexBufferIndirectCommandNV* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkBindVertexBufferIndirectCommandNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkBindVertexBufferIndirectCommandNV* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkSetStateFlagsIndirectCommandNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSetStateFlagsIndirectCommandNV* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkIndirectCommandsStreamNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkIndirectCommandsStreamNV* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkIndirectCommandsLayoutTokenNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkIndirectCommandsLayoutTokenNV* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkIndirectCommandsLayoutCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkIndirectCommandsLayoutCreateInfoNV* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkGeneratedCommandsInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkGeneratedCommandsInfoNV* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkGeneratedCommandsMemoryRequirementsInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkGeneratedCommandsMemoryRequirementsInfoNV* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_EXT_texel_buffer_alignment
+void reservedmarshal_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_QCOM_render_pass_transform
+void reservedmarshal_VkRenderPassTransformBeginInfoQCOM(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRenderPassTransformBeginInfoQCOM* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkCommandBufferInheritanceRenderPassTransformInfoQCOM(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCommandBufferInheritanceRenderPassTransformInfoQCOM* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_EXT_device_memory_report
+void reservedmarshal_VkPhysicalDeviceDeviceMemoryReportFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDeviceMemoryReportFeaturesEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDeviceMemoryReportCallbackDataEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceMemoryReportCallbackDataEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDeviceDeviceMemoryReportCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceDeviceMemoryReportCreateInfoEXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_EXT_robustness2
+void reservedmarshal_VkPhysicalDeviceRobustness2FeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRobustness2FeaturesEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceRobustness2PropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRobustness2PropertiesEXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_EXT_custom_border_color
+void reservedmarshal_VkSamplerCustomBorderColorCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkSamplerCustomBorderColorCreateInfoEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceCustomBorderColorPropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCustomBorderColorPropertiesEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceCustomBorderColorFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceCustomBorderColorFeaturesEXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_GOOGLE_user_type
+#endif
+#ifdef VK_EXT_private_data
+void reservedmarshal_VkPhysicalDevicePrivateDataFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDevicePrivateDataFeaturesEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDevicePrivateDataCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDevicePrivateDataCreateInfoEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPrivateDataSlotCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPrivateDataSlotCreateInfoEXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_EXT_pipeline_creation_cache_control
+void reservedmarshal_VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_NV_device_diagnostics_config
+void reservedmarshal_VkPhysicalDeviceDiagnosticsConfigFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceDiagnosticsConfigFeaturesNV* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDeviceDiagnosticsConfigCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceDiagnosticsConfigCreateInfoNV* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_QCOM_render_pass_store_ops
+#endif
+#ifdef VK_NV_fragment_shading_rate_enums
+void reservedmarshal_VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPipelineFragmentShadingRateEnumStateCreateInfoNV(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPipelineFragmentShadingRateEnumStateCreateInfoNV* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_EXT_fragment_density_map2
+void reservedmarshal_VkPhysicalDeviceFragmentDensityMap2FeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentDensityMap2FeaturesEXT* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceFragmentDensityMap2PropertiesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceFragmentDensityMap2PropertiesEXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_QCOM_rotated_copy_commands
+void reservedmarshal_VkCopyCommandTransformInfoQCOM(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCopyCommandTransformInfoQCOM* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_EXT_image_robustness
+void reservedmarshal_VkPhysicalDeviceImageRobustnessFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceImageRobustnessFeaturesEXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_EXT_4444_formats
+void reservedmarshal_VkPhysicalDevice4444FormatsFeaturesEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDevice4444FormatsFeaturesEXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_EXT_directfb_surface
+void reservedmarshal_VkDirectFBSurfaceCreateInfoEXT(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDirectFBSurfaceCreateInfoEXT* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_GOOGLE_gfxstream
+void reservedmarshal_VkImportColorBufferGOOGLE(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImportColorBufferGOOGLE* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkImportBufferGOOGLE(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImportBufferGOOGLE* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkImportPhysicalAddressGOOGLE(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkImportPhysicalAddressGOOGLE* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_KHR_acceleration_structure
+void reservedmarshal_VkDeviceOrHostAddressKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceOrHostAddressKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkDeviceOrHostAddressConstKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkDeviceOrHostAddressConstKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkAccelerationStructureBuildRangeInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureBuildRangeInfoKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkAccelerationStructureGeometryTrianglesDataKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureGeometryTrianglesDataKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkAccelerationStructureGeometryAabbsDataKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureGeometryAabbsDataKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkAccelerationStructureGeometryInstancesDataKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureGeometryInstancesDataKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkAccelerationStructureGeometryDataKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureGeometryDataKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkAccelerationStructureGeometryKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureGeometryKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkAccelerationStructureBuildGeometryInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureBuildGeometryInfoKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkAccelerationStructureCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureCreateInfoKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkWriteDescriptorSetAccelerationStructureKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkWriteDescriptorSetAccelerationStructureKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceAccelerationStructureFeaturesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceAccelerationStructureFeaturesKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceAccelerationStructurePropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceAccelerationStructurePropertiesKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkAccelerationStructureDeviceAddressInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureDeviceAddressInfoKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkAccelerationStructureVersionInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureVersionInfoKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkCopyAccelerationStructureToMemoryInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCopyAccelerationStructureToMemoryInfoKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkCopyMemoryToAccelerationStructureInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCopyMemoryToAccelerationStructureInfoKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkCopyAccelerationStructureInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkCopyAccelerationStructureInfoKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkAccelerationStructureBuildSizesInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkAccelerationStructureBuildSizesInfoKHR* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_KHR_ray_tracing_pipeline
+void reservedmarshal_VkRayTracingShaderGroupCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRayTracingShaderGroupCreateInfoKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkRayTracingPipelineInterfaceCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRayTracingPipelineInterfaceCreateInfoKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkRayTracingPipelineCreateInfoKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkRayTracingPipelineCreateInfoKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceRayTracingPipelineFeaturesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRayTracingPipelineFeaturesKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkPhysicalDeviceRayTracingPipelinePropertiesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRayTracingPipelinePropertiesKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkStridedDeviceAddressRegionKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkStridedDeviceAddressRegionKHR* forMarshaling,
+    uint8_t** ptr);
+
+void reservedmarshal_VkTraceRaysIndirectCommandKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkTraceRaysIndirectCommandKHR* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+#ifdef VK_KHR_ray_query
+void reservedmarshal_VkPhysicalDeviceRayQueryFeaturesKHR(
+    VulkanStreamGuest* vkStream,
+    VkStructureType rootType,
+    const VkPhysicalDeviceRayQueryFeaturesKHR* forMarshaling,
+    uint8_t** ptr);
+
+#endif
+
+} // namespace goldfish_vk
diff --git a/system/vulkan_enc/goldfish_vk_transform_guest.cpp b/system/vulkan_enc/goldfish_vk_transform_guest.cpp
index 550313f..bcc896b 100644
--- a/system/vulkan_enc/goldfish_vk_transform_guest.cpp
+++ b/system/vulkan_enc/goldfish_vk_transform_guest.cpp
@@ -42,6 +42,292 @@
     void* structExtension_out);
 
 #ifdef VK_VERSION_1_0
+void transform_tohost_VkExtent2D(
+    ResourceTracker* resourceTracker,
+    VkExtent2D* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_fromhost_VkExtent2D(
+    ResourceTracker* resourceTracker,
+    VkExtent2D* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_tohost_VkExtent3D(
+    ResourceTracker* resourceTracker,
+    VkExtent3D* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_fromhost_VkExtent3D(
+    ResourceTracker* resourceTracker,
+    VkExtent3D* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_tohost_VkOffset2D(
+    ResourceTracker* resourceTracker,
+    VkOffset2D* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_fromhost_VkOffset2D(
+    ResourceTracker* resourceTracker,
+    VkOffset2D* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_tohost_VkOffset3D(
+    ResourceTracker* resourceTracker,
+    VkOffset3D* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_fromhost_VkOffset3D(
+    ResourceTracker* resourceTracker,
+    VkOffset3D* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_tohost_VkRect2D(
+    ResourceTracker* resourceTracker,
+    VkRect2D* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    transform_tohost_VkOffset2D(resourceTracker, (VkOffset2D*)(&toTransform->offset));
+    transform_tohost_VkExtent2D(resourceTracker, (VkExtent2D*)(&toTransform->extent));
+}
+
+void transform_fromhost_VkRect2D(
+    ResourceTracker* resourceTracker,
+    VkRect2D* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    transform_fromhost_VkOffset2D(resourceTracker, (VkOffset2D*)(&toTransform->offset));
+    transform_fromhost_VkExtent2D(resourceTracker, (VkExtent2D*)(&toTransform->extent));
+}
+
+void transform_tohost_VkBaseInStructure(
+    ResourceTracker* resourceTracker,
+    VkBaseInStructure* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkBaseInStructure(
+    ResourceTracker* resourceTracker,
+    VkBaseInStructure* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkBaseOutStructure(
+    ResourceTracker* resourceTracker,
+    VkBaseOutStructure* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkBaseOutStructure(
+    ResourceTracker* resourceTracker,
+    VkBaseOutStructure* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkBufferMemoryBarrier(
+    ResourceTracker* resourceTracker,
+    VkBufferMemoryBarrier* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkBufferMemoryBarrier(
+    ResourceTracker* resourceTracker,
+    VkBufferMemoryBarrier* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkDispatchIndirectCommand(
+    ResourceTracker* resourceTracker,
+    VkDispatchIndirectCommand* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_fromhost_VkDispatchIndirectCommand(
+    ResourceTracker* resourceTracker,
+    VkDispatchIndirectCommand* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_tohost_VkDrawIndexedIndirectCommand(
+    ResourceTracker* resourceTracker,
+    VkDrawIndexedIndirectCommand* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_fromhost_VkDrawIndexedIndirectCommand(
+    ResourceTracker* resourceTracker,
+    VkDrawIndexedIndirectCommand* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_tohost_VkDrawIndirectCommand(
+    ResourceTracker* resourceTracker,
+    VkDrawIndirectCommand* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_fromhost_VkDrawIndirectCommand(
+    ResourceTracker* resourceTracker,
+    VkDrawIndirectCommand* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_tohost_VkImageSubresourceRange(
+    ResourceTracker* resourceTracker,
+    VkImageSubresourceRange* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_fromhost_VkImageSubresourceRange(
+    ResourceTracker* resourceTracker,
+    VkImageSubresourceRange* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_tohost_VkImageMemoryBarrier(
+    ResourceTracker* resourceTracker,
+    VkImageMemoryBarrier* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    transform_tohost_VkImageSubresourceRange(resourceTracker, (VkImageSubresourceRange*)(&toTransform->subresourceRange));
+}
+
+void transform_fromhost_VkImageMemoryBarrier(
+    ResourceTracker* resourceTracker,
+    VkImageMemoryBarrier* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    transform_fromhost_VkImageSubresourceRange(resourceTracker, (VkImageSubresourceRange*)(&toTransform->subresourceRange));
+}
+
+void transform_tohost_VkMemoryBarrier(
+    ResourceTracker* resourceTracker,
+    VkMemoryBarrier* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkMemoryBarrier(
+    ResourceTracker* resourceTracker,
+    VkMemoryBarrier* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkAllocationCallbacks(
+    ResourceTracker* resourceTracker,
+    VkAllocationCallbacks* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_fromhost_VkAllocationCallbacks(
+    ResourceTracker* resourceTracker,
+    VkAllocationCallbacks* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
 void transform_tohost_VkApplicationInfo(
     ResourceTracker* resourceTracker,
     VkApplicationInfo* toTransform)
@@ -66,6 +352,40 @@
     }
 }
 
+void transform_tohost_VkFormatProperties(
+    ResourceTracker* resourceTracker,
+    VkFormatProperties* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_fromhost_VkFormatProperties(
+    ResourceTracker* resourceTracker,
+    VkFormatProperties* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_tohost_VkImageFormatProperties(
+    ResourceTracker* resourceTracker,
+    VkImageFormatProperties* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    transform_tohost_VkExtent3D(resourceTracker, (VkExtent3D*)(&toTransform->maxExtent));
+}
+
+void transform_fromhost_VkImageFormatProperties(
+    ResourceTracker* resourceTracker,
+    VkImageFormatProperties* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    transform_fromhost_VkExtent3D(resourceTracker, (VkExtent3D*)(&toTransform->maxExtent));
+}
+
 void transform_tohost_VkInstanceCreateInfo(
     ResourceTracker* resourceTracker,
     VkInstanceCreateInfo* toTransform)
@@ -98,17 +418,33 @@
     }
 }
 
-void transform_tohost_VkAllocationCallbacks(
+void transform_tohost_VkMemoryHeap(
     ResourceTracker* resourceTracker,
-    VkAllocationCallbacks* toTransform)
+    VkMemoryHeap* toTransform)
 {
     (void)resourceTracker;
     (void)toTransform;
 }
 
-void transform_fromhost_VkAllocationCallbacks(
+void transform_fromhost_VkMemoryHeap(
     ResourceTracker* resourceTracker,
-    VkAllocationCallbacks* toTransform)
+    VkMemoryHeap* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_tohost_VkMemoryType(
+    ResourceTracker* resourceTracker,
+    VkMemoryType* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_fromhost_VkMemoryType(
+    ResourceTracker* resourceTracker,
+    VkMemoryType* toTransform)
 {
     (void)resourceTracker;
     (void)toTransform;
@@ -130,56 +466,6 @@
     (void)toTransform;
 }
 
-void transform_tohost_VkFormatProperties(
-    ResourceTracker* resourceTracker,
-    VkFormatProperties* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-}
-
-void transform_fromhost_VkFormatProperties(
-    ResourceTracker* resourceTracker,
-    VkFormatProperties* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-}
-
-void transform_tohost_VkExtent3D(
-    ResourceTracker* resourceTracker,
-    VkExtent3D* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-}
-
-void transform_fromhost_VkExtent3D(
-    ResourceTracker* resourceTracker,
-    VkExtent3D* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-}
-
-void transform_tohost_VkImageFormatProperties(
-    ResourceTracker* resourceTracker,
-    VkImageFormatProperties* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    transform_tohost_VkExtent3D(resourceTracker, (VkExtent3D*)(&toTransform->maxExtent));
-}
-
-void transform_fromhost_VkImageFormatProperties(
-    ResourceTracker* resourceTracker,
-    VkImageFormatProperties* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    transform_fromhost_VkExtent3D(resourceTracker, (VkExtent3D*)(&toTransform->maxExtent));
-}
-
 void transform_tohost_VkPhysicalDeviceLimits(
     ResourceTracker* resourceTracker,
     VkPhysicalDeviceLimits* toTransform)
@@ -196,6 +482,38 @@
     (void)toTransform;
 }
 
+void transform_tohost_VkPhysicalDeviceMemoryProperties(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceMemoryProperties* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    for (uint32_t i = 0; i < (uint32_t)VK_MAX_MEMORY_TYPES; ++i)
+    {
+        transform_tohost_VkMemoryType(resourceTracker, (VkMemoryType*)(toTransform->memoryTypes + i));
+    }
+    for (uint32_t i = 0; i < (uint32_t)VK_MAX_MEMORY_HEAPS; ++i)
+    {
+        transform_tohost_VkMemoryHeap(resourceTracker, (VkMemoryHeap*)(toTransform->memoryHeaps + i));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceMemoryProperties(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceMemoryProperties* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    for (uint32_t i = 0; i < (uint32_t)VK_MAX_MEMORY_TYPES; ++i)
+    {
+        transform_fromhost_VkMemoryType(resourceTracker, (VkMemoryType*)(toTransform->memoryTypes + i));
+    }
+    for (uint32_t i = 0; i < (uint32_t)VK_MAX_MEMORY_HEAPS; ++i)
+    {
+        transform_fromhost_VkMemoryHeap(resourceTracker, (VkMemoryHeap*)(toTransform->memoryHeaps + i));
+    }
+}
+
 void transform_tohost_VkPhysicalDeviceSparseProperties(
     ResourceTracker* resourceTracker,
     VkPhysicalDeviceSparseProperties* toTransform)
@@ -250,70 +568,6 @@
     transform_fromhost_VkExtent3D(resourceTracker, (VkExtent3D*)(&toTransform->minImageTransferGranularity));
 }
 
-void transform_tohost_VkMemoryType(
-    ResourceTracker* resourceTracker,
-    VkMemoryType* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-}
-
-void transform_fromhost_VkMemoryType(
-    ResourceTracker* resourceTracker,
-    VkMemoryType* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-}
-
-void transform_tohost_VkMemoryHeap(
-    ResourceTracker* resourceTracker,
-    VkMemoryHeap* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-}
-
-void transform_fromhost_VkMemoryHeap(
-    ResourceTracker* resourceTracker,
-    VkMemoryHeap* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-}
-
-void transform_tohost_VkPhysicalDeviceMemoryProperties(
-    ResourceTracker* resourceTracker,
-    VkPhysicalDeviceMemoryProperties* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    for (uint32_t i = 0; i < (uint32_t)VK_MAX_MEMORY_TYPES; ++i)
-    {
-        transform_tohost_VkMemoryType(resourceTracker, (VkMemoryType*)(toTransform->memoryTypes + i));
-    }
-    for (uint32_t i = 0; i < (uint32_t)VK_MAX_MEMORY_HEAPS; ++i)
-    {
-        transform_tohost_VkMemoryHeap(resourceTracker, (VkMemoryHeap*)(toTransform->memoryHeaps + i));
-    }
-}
-
-void transform_fromhost_VkPhysicalDeviceMemoryProperties(
-    ResourceTracker* resourceTracker,
-    VkPhysicalDeviceMemoryProperties* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    for (uint32_t i = 0; i < (uint32_t)VK_MAX_MEMORY_TYPES; ++i)
-    {
-        transform_fromhost_VkMemoryType(resourceTracker, (VkMemoryType*)(toTransform->memoryTypes + i));
-    }
-    for (uint32_t i = 0; i < (uint32_t)VK_MAX_MEMORY_HEAPS; ++i)
-    {
-        transform_fromhost_VkMemoryHeap(resourceTracker, (VkMemoryHeap*)(toTransform->memoryHeaps + i));
-    }
-}
-
 void transform_tohost_VkDeviceQueueCreateInfo(
     ResourceTracker* resourceTracker,
     VkDeviceQueueCreateInfo* toTransform)
@@ -348,11 +602,14 @@
     {
         transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
     }
-    if (toTransform->pQueueCreateInfos)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->queueCreateInfoCount; ++i)
+        if (toTransform->pQueueCreateInfos)
         {
-            transform_tohost_VkDeviceQueueCreateInfo(resourceTracker, (VkDeviceQueueCreateInfo*)(toTransform->pQueueCreateInfos + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->queueCreateInfoCount; ++i)
+            {
+                transform_tohost_VkDeviceQueueCreateInfo(resourceTracker, (VkDeviceQueueCreateInfo*)(toTransform->pQueueCreateInfos + i));
+            }
         }
     }
     if (toTransform->pEnabledFeatures)
@@ -371,11 +628,14 @@
     {
         transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
     }
-    if (toTransform->pQueueCreateInfos)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->queueCreateInfoCount; ++i)
+        if (toTransform->pQueueCreateInfos)
         {
-            transform_fromhost_VkDeviceQueueCreateInfo(resourceTracker, (VkDeviceQueueCreateInfo*)(toTransform->pQueueCreateInfos + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->queueCreateInfoCount; ++i)
+            {
+                transform_fromhost_VkDeviceQueueCreateInfo(resourceTracker, (VkDeviceQueueCreateInfo*)(toTransform->pQueueCreateInfos + i));
+            }
         }
     }
     if (toTransform->pEnabledFeatures)
@@ -440,32 +700,6 @@
     }
 }
 
-void transform_tohost_VkMemoryAllocateInfo(
-    ResourceTracker* resourceTracker,
-    VkMemoryAllocateInfo* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    resourceTracker->deviceMemoryTransform_tohost((VkDeviceMemory*)nullptr, 0, (VkDeviceSize*)nullptr, 0, (VkDeviceSize*)nullptr, 0, (uint32_t*)&toTransform->memoryTypeIndex, 1, (uint32_t*)nullptr, 0);
-    if (toTransform->pNext)
-    {
-        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-}
-
-void transform_fromhost_VkMemoryAllocateInfo(
-    ResourceTracker* resourceTracker,
-    VkMemoryAllocateInfo* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    resourceTracker->deviceMemoryTransform_fromhost((VkDeviceMemory*)nullptr, 0, (VkDeviceSize*)nullptr, 0, (VkDeviceSize*)nullptr, 0, (uint32_t*)&toTransform->memoryTypeIndex, 1, (uint32_t*)nullptr, 0);
-    if (toTransform->pNext)
-    {
-        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-}
-
 void transform_tohost_VkMappedMemoryRange(
     ResourceTracker* resourceTracker,
     VkMappedMemoryRange* toTransform)
@@ -492,6 +726,32 @@
     }
 }
 
+void transform_tohost_VkMemoryAllocateInfo(
+    ResourceTracker* resourceTracker,
+    VkMemoryAllocateInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    resourceTracker->deviceMemoryTransform_tohost((VkDeviceMemory*)nullptr, 0, (VkDeviceSize*)nullptr, 0, (VkDeviceSize*)nullptr, 0, (uint32_t*)&toTransform->memoryTypeIndex, 1, (uint32_t*)nullptr, 0);
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkMemoryAllocateInfo(
+    ResourceTracker* resourceTracker,
+    VkMemoryAllocateInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    resourceTracker->deviceMemoryTransform_fromhost((VkDeviceMemory*)nullptr, 0, (VkDeviceSize*)nullptr, 0, (VkDeviceSize*)nullptr, 0, (uint32_t*)&toTransform->memoryTypeIndex, 1, (uint32_t*)nullptr, 0);
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
 void transform_tohost_VkMemoryRequirements(
     ResourceTracker* resourceTracker,
     VkMemoryRequirements* toTransform)
@@ -510,6 +770,256 @@
     resourceTracker->deviceMemoryTransform_fromhost((VkDeviceMemory*)nullptr, 0, (VkDeviceSize*)nullptr, 0, (VkDeviceSize*)nullptr, 0, (uint32_t*)nullptr, 0, (uint32_t*)&toTransform->memoryTypeBits, 1);
 }
 
+void transform_tohost_VkSparseMemoryBind(
+    ResourceTracker* resourceTracker,
+    VkSparseMemoryBind* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    resourceTracker->deviceMemoryTransform_tohost((VkDeviceMemory*)&toTransform->memory, 1, (VkDeviceSize*)&toTransform->memoryOffset, 1, (VkDeviceSize*)nullptr, 0, (uint32_t*)nullptr, 0, (uint32_t*)nullptr, 0);
+}
+
+void transform_fromhost_VkSparseMemoryBind(
+    ResourceTracker* resourceTracker,
+    VkSparseMemoryBind* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    resourceTracker->deviceMemoryTransform_fromhost((VkDeviceMemory*)&toTransform->memory, 1, (VkDeviceSize*)&toTransform->memoryOffset, 1, (VkDeviceSize*)nullptr, 0, (uint32_t*)nullptr, 0, (uint32_t*)nullptr, 0);
+}
+
+void transform_tohost_VkSparseBufferMemoryBindInfo(
+    ResourceTracker* resourceTracker,
+    VkSparseBufferMemoryBindInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform)
+    {
+        if (toTransform->pBinds)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->bindCount; ++i)
+            {
+                transform_tohost_VkSparseMemoryBind(resourceTracker, (VkSparseMemoryBind*)(toTransform->pBinds + i));
+            }
+        }
+    }
+}
+
+void transform_fromhost_VkSparseBufferMemoryBindInfo(
+    ResourceTracker* resourceTracker,
+    VkSparseBufferMemoryBindInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform)
+    {
+        if (toTransform->pBinds)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->bindCount; ++i)
+            {
+                transform_fromhost_VkSparseMemoryBind(resourceTracker, (VkSparseMemoryBind*)(toTransform->pBinds + i));
+            }
+        }
+    }
+}
+
+void transform_tohost_VkSparseImageOpaqueMemoryBindInfo(
+    ResourceTracker* resourceTracker,
+    VkSparseImageOpaqueMemoryBindInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform)
+    {
+        if (toTransform->pBinds)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->bindCount; ++i)
+            {
+                transform_tohost_VkSparseMemoryBind(resourceTracker, (VkSparseMemoryBind*)(toTransform->pBinds + i));
+            }
+        }
+    }
+}
+
+void transform_fromhost_VkSparseImageOpaqueMemoryBindInfo(
+    ResourceTracker* resourceTracker,
+    VkSparseImageOpaqueMemoryBindInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform)
+    {
+        if (toTransform->pBinds)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->bindCount; ++i)
+            {
+                transform_fromhost_VkSparseMemoryBind(resourceTracker, (VkSparseMemoryBind*)(toTransform->pBinds + i));
+            }
+        }
+    }
+}
+
+void transform_tohost_VkImageSubresource(
+    ResourceTracker* resourceTracker,
+    VkImageSubresource* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_fromhost_VkImageSubresource(
+    ResourceTracker* resourceTracker,
+    VkImageSubresource* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_tohost_VkSparseImageMemoryBind(
+    ResourceTracker* resourceTracker,
+    VkSparseImageMemoryBind* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    resourceTracker->deviceMemoryTransform_tohost((VkDeviceMemory*)&toTransform->memory, 1, (VkDeviceSize*)&toTransform->memoryOffset, 1, (VkDeviceSize*)nullptr, 0, (uint32_t*)nullptr, 0, (uint32_t*)nullptr, 0);
+    transform_tohost_VkImageSubresource(resourceTracker, (VkImageSubresource*)(&toTransform->subresource));
+    transform_tohost_VkOffset3D(resourceTracker, (VkOffset3D*)(&toTransform->offset));
+    transform_tohost_VkExtent3D(resourceTracker, (VkExtent3D*)(&toTransform->extent));
+}
+
+void transform_fromhost_VkSparseImageMemoryBind(
+    ResourceTracker* resourceTracker,
+    VkSparseImageMemoryBind* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    resourceTracker->deviceMemoryTransform_fromhost((VkDeviceMemory*)&toTransform->memory, 1, (VkDeviceSize*)&toTransform->memoryOffset, 1, (VkDeviceSize*)nullptr, 0, (uint32_t*)nullptr, 0, (uint32_t*)nullptr, 0);
+    transform_fromhost_VkImageSubresource(resourceTracker, (VkImageSubresource*)(&toTransform->subresource));
+    transform_fromhost_VkOffset3D(resourceTracker, (VkOffset3D*)(&toTransform->offset));
+    transform_fromhost_VkExtent3D(resourceTracker, (VkExtent3D*)(&toTransform->extent));
+}
+
+void transform_tohost_VkSparseImageMemoryBindInfo(
+    ResourceTracker* resourceTracker,
+    VkSparseImageMemoryBindInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform)
+    {
+        if (toTransform->pBinds)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->bindCount; ++i)
+            {
+                transform_tohost_VkSparseImageMemoryBind(resourceTracker, (VkSparseImageMemoryBind*)(toTransform->pBinds + i));
+            }
+        }
+    }
+}
+
+void transform_fromhost_VkSparseImageMemoryBindInfo(
+    ResourceTracker* resourceTracker,
+    VkSparseImageMemoryBindInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform)
+    {
+        if (toTransform->pBinds)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->bindCount; ++i)
+            {
+                transform_fromhost_VkSparseImageMemoryBind(resourceTracker, (VkSparseImageMemoryBind*)(toTransform->pBinds + i));
+            }
+        }
+    }
+}
+
+void transform_tohost_VkBindSparseInfo(
+    ResourceTracker* resourceTracker,
+    VkBindSparseInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform)
+    {
+        if (toTransform->pBufferBinds)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->bufferBindCount; ++i)
+            {
+                transform_tohost_VkSparseBufferMemoryBindInfo(resourceTracker, (VkSparseBufferMemoryBindInfo*)(toTransform->pBufferBinds + i));
+            }
+        }
+    }
+    if (toTransform)
+    {
+        if (toTransform->pImageOpaqueBinds)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->imageOpaqueBindCount; ++i)
+            {
+                transform_tohost_VkSparseImageOpaqueMemoryBindInfo(resourceTracker, (VkSparseImageOpaqueMemoryBindInfo*)(toTransform->pImageOpaqueBinds + i));
+            }
+        }
+    }
+    if (toTransform)
+    {
+        if (toTransform->pImageBinds)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->imageBindCount; ++i)
+            {
+                transform_tohost_VkSparseImageMemoryBindInfo(resourceTracker, (VkSparseImageMemoryBindInfo*)(toTransform->pImageBinds + i));
+            }
+        }
+    }
+}
+
+void transform_fromhost_VkBindSparseInfo(
+    ResourceTracker* resourceTracker,
+    VkBindSparseInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform)
+    {
+        if (toTransform->pBufferBinds)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->bufferBindCount; ++i)
+            {
+                transform_fromhost_VkSparseBufferMemoryBindInfo(resourceTracker, (VkSparseBufferMemoryBindInfo*)(toTransform->pBufferBinds + i));
+            }
+        }
+    }
+    if (toTransform)
+    {
+        if (toTransform->pImageOpaqueBinds)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->imageOpaqueBindCount; ++i)
+            {
+                transform_fromhost_VkSparseImageOpaqueMemoryBindInfo(resourceTracker, (VkSparseImageOpaqueMemoryBindInfo*)(toTransform->pImageOpaqueBinds + i));
+            }
+        }
+    }
+    if (toTransform)
+    {
+        if (toTransform->pImageBinds)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->imageBindCount; ++i)
+            {
+                transform_fromhost_VkSparseImageMemoryBindInfo(resourceTracker, (VkSparseImageMemoryBindInfo*)(toTransform->pImageBinds + i));
+            }
+        }
+    }
+}
+
 void transform_tohost_VkSparseImageFormatProperties(
     ResourceTracker* resourceTracker,
     VkSparseImageFormatProperties* toTransform)
@@ -546,236 +1056,6 @@
     transform_fromhost_VkSparseImageFormatProperties(resourceTracker, (VkSparseImageFormatProperties*)(&toTransform->formatProperties));
 }
 
-void transform_tohost_VkSparseMemoryBind(
-    ResourceTracker* resourceTracker,
-    VkSparseMemoryBind* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    resourceTracker->deviceMemoryTransform_tohost((VkDeviceMemory*)&toTransform->memory, 1, (VkDeviceSize*)&toTransform->memoryOffset, 1, (VkDeviceSize*)nullptr, 0, (uint32_t*)nullptr, 0, (uint32_t*)nullptr, 0);
-}
-
-void transform_fromhost_VkSparseMemoryBind(
-    ResourceTracker* resourceTracker,
-    VkSparseMemoryBind* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    resourceTracker->deviceMemoryTransform_fromhost((VkDeviceMemory*)&toTransform->memory, 1, (VkDeviceSize*)&toTransform->memoryOffset, 1, (VkDeviceSize*)nullptr, 0, (uint32_t*)nullptr, 0, (uint32_t*)nullptr, 0);
-}
-
-void transform_tohost_VkSparseBufferMemoryBindInfo(
-    ResourceTracker* resourceTracker,
-    VkSparseBufferMemoryBindInfo* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pBinds)
-    {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->bindCount; ++i)
-        {
-            transform_tohost_VkSparseMemoryBind(resourceTracker, (VkSparseMemoryBind*)(toTransform->pBinds + i));
-        }
-    }
-}
-
-void transform_fromhost_VkSparseBufferMemoryBindInfo(
-    ResourceTracker* resourceTracker,
-    VkSparseBufferMemoryBindInfo* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pBinds)
-    {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->bindCount; ++i)
-        {
-            transform_fromhost_VkSparseMemoryBind(resourceTracker, (VkSparseMemoryBind*)(toTransform->pBinds + i));
-        }
-    }
-}
-
-void transform_tohost_VkSparseImageOpaqueMemoryBindInfo(
-    ResourceTracker* resourceTracker,
-    VkSparseImageOpaqueMemoryBindInfo* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pBinds)
-    {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->bindCount; ++i)
-        {
-            transform_tohost_VkSparseMemoryBind(resourceTracker, (VkSparseMemoryBind*)(toTransform->pBinds + i));
-        }
-    }
-}
-
-void transform_fromhost_VkSparseImageOpaqueMemoryBindInfo(
-    ResourceTracker* resourceTracker,
-    VkSparseImageOpaqueMemoryBindInfo* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pBinds)
-    {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->bindCount; ++i)
-        {
-            transform_fromhost_VkSparseMemoryBind(resourceTracker, (VkSparseMemoryBind*)(toTransform->pBinds + i));
-        }
-    }
-}
-
-void transform_tohost_VkImageSubresource(
-    ResourceTracker* resourceTracker,
-    VkImageSubresource* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-}
-
-void transform_fromhost_VkImageSubresource(
-    ResourceTracker* resourceTracker,
-    VkImageSubresource* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-}
-
-void transform_tohost_VkOffset3D(
-    ResourceTracker* resourceTracker,
-    VkOffset3D* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-}
-
-void transform_fromhost_VkOffset3D(
-    ResourceTracker* resourceTracker,
-    VkOffset3D* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-}
-
-void transform_tohost_VkSparseImageMemoryBind(
-    ResourceTracker* resourceTracker,
-    VkSparseImageMemoryBind* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    resourceTracker->deviceMemoryTransform_tohost((VkDeviceMemory*)&toTransform->memory, 1, (VkDeviceSize*)&toTransform->memoryOffset, 1, (VkDeviceSize*)nullptr, 0, (uint32_t*)nullptr, 0, (uint32_t*)nullptr, 0);
-    transform_tohost_VkImageSubresource(resourceTracker, (VkImageSubresource*)(&toTransform->subresource));
-    transform_tohost_VkOffset3D(resourceTracker, (VkOffset3D*)(&toTransform->offset));
-    transform_tohost_VkExtent3D(resourceTracker, (VkExtent3D*)(&toTransform->extent));
-}
-
-void transform_fromhost_VkSparseImageMemoryBind(
-    ResourceTracker* resourceTracker,
-    VkSparseImageMemoryBind* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    resourceTracker->deviceMemoryTransform_fromhost((VkDeviceMemory*)&toTransform->memory, 1, (VkDeviceSize*)&toTransform->memoryOffset, 1, (VkDeviceSize*)nullptr, 0, (uint32_t*)nullptr, 0, (uint32_t*)nullptr, 0);
-    transform_fromhost_VkImageSubresource(resourceTracker, (VkImageSubresource*)(&toTransform->subresource));
-    transform_fromhost_VkOffset3D(resourceTracker, (VkOffset3D*)(&toTransform->offset));
-    transform_fromhost_VkExtent3D(resourceTracker, (VkExtent3D*)(&toTransform->extent));
-}
-
-void transform_tohost_VkSparseImageMemoryBindInfo(
-    ResourceTracker* resourceTracker,
-    VkSparseImageMemoryBindInfo* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pBinds)
-    {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->bindCount; ++i)
-        {
-            transform_tohost_VkSparseImageMemoryBind(resourceTracker, (VkSparseImageMemoryBind*)(toTransform->pBinds + i));
-        }
-    }
-}
-
-void transform_fromhost_VkSparseImageMemoryBindInfo(
-    ResourceTracker* resourceTracker,
-    VkSparseImageMemoryBindInfo* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pBinds)
-    {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->bindCount; ++i)
-        {
-            transform_fromhost_VkSparseImageMemoryBind(resourceTracker, (VkSparseImageMemoryBind*)(toTransform->pBinds + i));
-        }
-    }
-}
-
-void transform_tohost_VkBindSparseInfo(
-    ResourceTracker* resourceTracker,
-    VkBindSparseInfo* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-    if (toTransform->pBufferBinds)
-    {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->bufferBindCount; ++i)
-        {
-            transform_tohost_VkSparseBufferMemoryBindInfo(resourceTracker, (VkSparseBufferMemoryBindInfo*)(toTransform->pBufferBinds + i));
-        }
-    }
-    if (toTransform->pImageOpaqueBinds)
-    {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->imageOpaqueBindCount; ++i)
-        {
-            transform_tohost_VkSparseImageOpaqueMemoryBindInfo(resourceTracker, (VkSparseImageOpaqueMemoryBindInfo*)(toTransform->pImageOpaqueBinds + i));
-        }
-    }
-    if (toTransform->pImageBinds)
-    {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->imageBindCount; ++i)
-        {
-            transform_tohost_VkSparseImageMemoryBindInfo(resourceTracker, (VkSparseImageMemoryBindInfo*)(toTransform->pImageBinds + i));
-        }
-    }
-}
-
-void transform_fromhost_VkBindSparseInfo(
-    ResourceTracker* resourceTracker,
-    VkBindSparseInfo* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-    if (toTransform->pBufferBinds)
-    {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->bufferBindCount; ++i)
-        {
-            transform_fromhost_VkSparseBufferMemoryBindInfo(resourceTracker, (VkSparseBufferMemoryBindInfo*)(toTransform->pBufferBinds + i));
-        }
-    }
-    if (toTransform->pImageOpaqueBinds)
-    {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->imageOpaqueBindCount; ++i)
-        {
-            transform_fromhost_VkSparseImageOpaqueMemoryBindInfo(resourceTracker, (VkSparseImageOpaqueMemoryBindInfo*)(toTransform->pImageOpaqueBinds + i));
-        }
-    }
-    if (toTransform->pImageBinds)
-    {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->imageBindCount; ++i)
-        {
-            transform_fromhost_VkSparseImageMemoryBindInfo(resourceTracker, (VkSparseImageMemoryBindInfo*)(toTransform->pImageBinds + i));
-        }
-    }
-}
-
 void transform_tohost_VkFenceCreateInfo(
     ResourceTracker* resourceTracker,
     VkFenceCreateInfo* toTransform)
@@ -978,22 +1258,6 @@
     (void)toTransform;
 }
 
-void transform_tohost_VkImageSubresourceRange(
-    ResourceTracker* resourceTracker,
-    VkImageSubresourceRange* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-}
-
-void transform_fromhost_VkImageSubresourceRange(
-    ResourceTracker* resourceTracker,
-    VkImageSubresourceRange* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-}
-
 void transform_tohost_VkImageViewCreateInfo(
     ResourceTracker* resourceTracker,
     VkImageViewCreateInfo* toTransform)
@@ -1092,11 +1356,14 @@
 {
     (void)resourceTracker;
     (void)toTransform;
-    if (toTransform->pMapEntries)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->mapEntryCount; ++i)
+        if (toTransform->pMapEntries)
         {
-            transform_tohost_VkSpecializationMapEntry(resourceTracker, (VkSpecializationMapEntry*)(toTransform->pMapEntries + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->mapEntryCount; ++i)
+            {
+                transform_tohost_VkSpecializationMapEntry(resourceTracker, (VkSpecializationMapEntry*)(toTransform->pMapEntries + i));
+            }
         }
     }
 }
@@ -1107,11 +1374,14 @@
 {
     (void)resourceTracker;
     (void)toTransform;
-    if (toTransform->pMapEntries)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->mapEntryCount; ++i)
+        if (toTransform->pMapEntries)
         {
-            transform_fromhost_VkSpecializationMapEntry(resourceTracker, (VkSpecializationMapEntry*)(toTransform->pMapEntries + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->mapEntryCount; ++i)
+            {
+                transform_fromhost_VkSpecializationMapEntry(resourceTracker, (VkSpecializationMapEntry*)(toTransform->pMapEntries + i));
+            }
         }
     }
 }
@@ -1148,6 +1418,32 @@
     }
 }
 
+void transform_tohost_VkComputePipelineCreateInfo(
+    ResourceTracker* resourceTracker,
+    VkComputePipelineCreateInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    transform_tohost_VkPipelineShaderStageCreateInfo(resourceTracker, (VkPipelineShaderStageCreateInfo*)(&toTransform->stage));
+}
+
+void transform_fromhost_VkComputePipelineCreateInfo(
+    ResourceTracker* resourceTracker,
+    VkComputePipelineCreateInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    transform_fromhost_VkPipelineShaderStageCreateInfo(resourceTracker, (VkPipelineShaderStageCreateInfo*)(&toTransform->stage));
+}
+
 void transform_tohost_VkVertexInputBindingDescription(
     ResourceTracker* resourceTracker,
     VkVertexInputBindingDescription* toTransform)
@@ -1190,18 +1486,24 @@
     {
         transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
     }
-    if (toTransform->pVertexBindingDescriptions)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->vertexBindingDescriptionCount; ++i)
+        if (toTransform->pVertexBindingDescriptions)
         {
-            transform_tohost_VkVertexInputBindingDescription(resourceTracker, (VkVertexInputBindingDescription*)(toTransform->pVertexBindingDescriptions + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->vertexBindingDescriptionCount; ++i)
+            {
+                transform_tohost_VkVertexInputBindingDescription(resourceTracker, (VkVertexInputBindingDescription*)(toTransform->pVertexBindingDescriptions + i));
+            }
         }
     }
-    if (toTransform->pVertexAttributeDescriptions)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->vertexAttributeDescriptionCount; ++i)
+        if (toTransform->pVertexAttributeDescriptions)
         {
-            transform_tohost_VkVertexInputAttributeDescription(resourceTracker, (VkVertexInputAttributeDescription*)(toTransform->pVertexAttributeDescriptions + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->vertexAttributeDescriptionCount; ++i)
+            {
+                transform_tohost_VkVertexInputAttributeDescription(resourceTracker, (VkVertexInputAttributeDescription*)(toTransform->pVertexAttributeDescriptions + i));
+            }
         }
     }
 }
@@ -1216,18 +1518,24 @@
     {
         transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
     }
-    if (toTransform->pVertexBindingDescriptions)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->vertexBindingDescriptionCount; ++i)
+        if (toTransform->pVertexBindingDescriptions)
         {
-            transform_fromhost_VkVertexInputBindingDescription(resourceTracker, (VkVertexInputBindingDescription*)(toTransform->pVertexBindingDescriptions + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->vertexBindingDescriptionCount; ++i)
+            {
+                transform_fromhost_VkVertexInputBindingDescription(resourceTracker, (VkVertexInputBindingDescription*)(toTransform->pVertexBindingDescriptions + i));
+            }
         }
     }
-    if (toTransform->pVertexAttributeDescriptions)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->vertexAttributeDescriptionCount; ++i)
+        if (toTransform->pVertexAttributeDescriptions)
         {
-            transform_fromhost_VkVertexInputAttributeDescription(resourceTracker, (VkVertexInputAttributeDescription*)(toTransform->pVertexAttributeDescriptions + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->vertexAttributeDescriptionCount; ++i)
+            {
+                transform_fromhost_VkVertexInputAttributeDescription(resourceTracker, (VkVertexInputAttributeDescription*)(toTransform->pVertexAttributeDescriptions + i));
+            }
         }
     }
 }
@@ -1296,58 +1604,6 @@
     (void)toTransform;
 }
 
-void transform_tohost_VkOffset2D(
-    ResourceTracker* resourceTracker,
-    VkOffset2D* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-}
-
-void transform_fromhost_VkOffset2D(
-    ResourceTracker* resourceTracker,
-    VkOffset2D* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-}
-
-void transform_tohost_VkExtent2D(
-    ResourceTracker* resourceTracker,
-    VkExtent2D* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-}
-
-void transform_fromhost_VkExtent2D(
-    ResourceTracker* resourceTracker,
-    VkExtent2D* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-}
-
-void transform_tohost_VkRect2D(
-    ResourceTracker* resourceTracker,
-    VkRect2D* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    transform_tohost_VkOffset2D(resourceTracker, (VkOffset2D*)(&toTransform->offset));
-    transform_tohost_VkExtent2D(resourceTracker, (VkExtent2D*)(&toTransform->extent));
-}
-
-void transform_fromhost_VkRect2D(
-    ResourceTracker* resourceTracker,
-    VkRect2D* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    transform_fromhost_VkOffset2D(resourceTracker, (VkOffset2D*)(&toTransform->offset));
-    transform_fromhost_VkExtent2D(resourceTracker, (VkExtent2D*)(&toTransform->extent));
-}
-
 void transform_tohost_VkPipelineViewportStateCreateInfo(
     ResourceTracker* resourceTracker,
     VkPipelineViewportStateCreateInfo* toTransform)
@@ -1358,18 +1614,24 @@
     {
         transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
     }
-    if (toTransform->pViewports)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->viewportCount; ++i)
+        if (toTransform->pViewports)
         {
-            transform_tohost_VkViewport(resourceTracker, (VkViewport*)(toTransform->pViewports + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->viewportCount; ++i)
+            {
+                transform_tohost_VkViewport(resourceTracker, (VkViewport*)(toTransform->pViewports + i));
+            }
         }
     }
-    if (toTransform->pScissors)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->scissorCount; ++i)
+        if (toTransform->pScissors)
         {
-            transform_tohost_VkRect2D(resourceTracker, (VkRect2D*)(toTransform->pScissors + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->scissorCount; ++i)
+            {
+                transform_tohost_VkRect2D(resourceTracker, (VkRect2D*)(toTransform->pScissors + i));
+            }
         }
     }
 }
@@ -1384,18 +1646,24 @@
     {
         transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
     }
-    if (toTransform->pViewports)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->viewportCount; ++i)
+        if (toTransform->pViewports)
         {
-            transform_fromhost_VkViewport(resourceTracker, (VkViewport*)(toTransform->pViewports + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->viewportCount; ++i)
+            {
+                transform_fromhost_VkViewport(resourceTracker, (VkViewport*)(toTransform->pViewports + i));
+            }
         }
     }
-    if (toTransform->pScissors)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->scissorCount; ++i)
+        if (toTransform->pScissors)
         {
-            transform_fromhost_VkRect2D(resourceTracker, (VkRect2D*)(toTransform->pScissors + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->scissorCount; ++i)
+            {
+                transform_fromhost_VkRect2D(resourceTracker, (VkRect2D*)(toTransform->pScissors + i));
+            }
         }
     }
 }
@@ -1518,11 +1786,14 @@
     {
         transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
     }
-    if (toTransform->pAttachments)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->attachmentCount; ++i)
+        if (toTransform->pAttachments)
         {
-            transform_tohost_VkPipelineColorBlendAttachmentState(resourceTracker, (VkPipelineColorBlendAttachmentState*)(toTransform->pAttachments + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->attachmentCount; ++i)
+            {
+                transform_tohost_VkPipelineColorBlendAttachmentState(resourceTracker, (VkPipelineColorBlendAttachmentState*)(toTransform->pAttachments + i));
+            }
         }
     }
 }
@@ -1537,11 +1808,14 @@
     {
         transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
     }
-    if (toTransform->pAttachments)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->attachmentCount; ++i)
+        if (toTransform->pAttachments)
         {
-            transform_fromhost_VkPipelineColorBlendAttachmentState(resourceTracker, (VkPipelineColorBlendAttachmentState*)(toTransform->pAttachments + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->attachmentCount; ++i)
+            {
+                transform_fromhost_VkPipelineColorBlendAttachmentState(resourceTracker, (VkPipelineColorBlendAttachmentState*)(toTransform->pAttachments + i));
+            }
         }
     }
 }
@@ -1580,11 +1854,14 @@
     {
         transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
     }
-    if (toTransform->pStages)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->stageCount; ++i)
+        if (toTransform->pStages)
         {
-            transform_tohost_VkPipelineShaderStageCreateInfo(resourceTracker, (VkPipelineShaderStageCreateInfo*)(toTransform->pStages + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->stageCount; ++i)
+            {
+                transform_tohost_VkPipelineShaderStageCreateInfo(resourceTracker, (VkPipelineShaderStageCreateInfo*)(toTransform->pStages + i));
+            }
         }
     }
     if (toTransform->pVertexInputState)
@@ -1635,11 +1912,14 @@
     {
         transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
     }
-    if (toTransform->pStages)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->stageCount; ++i)
+        if (toTransform->pStages)
         {
-            transform_fromhost_VkPipelineShaderStageCreateInfo(resourceTracker, (VkPipelineShaderStageCreateInfo*)(toTransform->pStages + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->stageCount; ++i)
+            {
+                transform_fromhost_VkPipelineShaderStageCreateInfo(resourceTracker, (VkPipelineShaderStageCreateInfo*)(toTransform->pStages + i));
+            }
         }
     }
     if (toTransform->pVertexInputState)
@@ -1680,32 +1960,6 @@
     }
 }
 
-void transform_tohost_VkComputePipelineCreateInfo(
-    ResourceTracker* resourceTracker,
-    VkComputePipelineCreateInfo* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-    transform_tohost_VkPipelineShaderStageCreateInfo(resourceTracker, (VkPipelineShaderStageCreateInfo*)(&toTransform->stage));
-}
-
-void transform_fromhost_VkComputePipelineCreateInfo(
-    ResourceTracker* resourceTracker,
-    VkComputePipelineCreateInfo* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-    transform_fromhost_VkPipelineShaderStageCreateInfo(resourceTracker, (VkPipelineShaderStageCreateInfo*)(&toTransform->stage));
-}
-
 void transform_tohost_VkPushConstantRange(
     ResourceTracker* resourceTracker,
     VkPushConstantRange* toTransform)
@@ -1732,11 +1986,14 @@
     {
         transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
     }
-    if (toTransform->pPushConstantRanges)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->pushConstantRangeCount; ++i)
+        if (toTransform->pPushConstantRanges)
         {
-            transform_tohost_VkPushConstantRange(resourceTracker, (VkPushConstantRange*)(toTransform->pPushConstantRanges + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->pushConstantRangeCount; ++i)
+            {
+                transform_tohost_VkPushConstantRange(resourceTracker, (VkPushConstantRange*)(toTransform->pPushConstantRanges + i));
+            }
         }
     }
 }
@@ -1751,11 +2008,14 @@
     {
         transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
     }
-    if (toTransform->pPushConstantRanges)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->pushConstantRangeCount; ++i)
+        if (toTransform->pPushConstantRanges)
         {
-            transform_fromhost_VkPushConstantRange(resourceTracker, (VkPushConstantRange*)(toTransform->pPushConstantRanges + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->pushConstantRangeCount; ++i)
+            {
+                transform_fromhost_VkPushConstantRange(resourceTracker, (VkPushConstantRange*)(toTransform->pPushConstantRanges + i));
+            }
         }
     }
 }
@@ -1784,6 +2044,146 @@
     }
 }
 
+void transform_tohost_VkCopyDescriptorSet(
+    ResourceTracker* resourceTracker,
+    VkCopyDescriptorSet* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkCopyDescriptorSet(
+    ResourceTracker* resourceTracker,
+    VkCopyDescriptorSet* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkDescriptorBufferInfo(
+    ResourceTracker* resourceTracker,
+    VkDescriptorBufferInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_fromhost_VkDescriptorBufferInfo(
+    ResourceTracker* resourceTracker,
+    VkDescriptorBufferInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_tohost_VkDescriptorImageInfo(
+    ResourceTracker* resourceTracker,
+    VkDescriptorImageInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_fromhost_VkDescriptorImageInfo(
+    ResourceTracker* resourceTracker,
+    VkDescriptorImageInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_tohost_VkDescriptorPoolSize(
+    ResourceTracker* resourceTracker,
+    VkDescriptorPoolSize* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_fromhost_VkDescriptorPoolSize(
+    ResourceTracker* resourceTracker,
+    VkDescriptorPoolSize* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_tohost_VkDescriptorPoolCreateInfo(
+    ResourceTracker* resourceTracker,
+    VkDescriptorPoolCreateInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform)
+    {
+        if (toTransform->pPoolSizes)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->poolSizeCount; ++i)
+            {
+                transform_tohost_VkDescriptorPoolSize(resourceTracker, (VkDescriptorPoolSize*)(toTransform->pPoolSizes + i));
+            }
+        }
+    }
+}
+
+void transform_fromhost_VkDescriptorPoolCreateInfo(
+    ResourceTracker* resourceTracker,
+    VkDescriptorPoolCreateInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform)
+    {
+        if (toTransform->pPoolSizes)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->poolSizeCount; ++i)
+            {
+                transform_fromhost_VkDescriptorPoolSize(resourceTracker, (VkDescriptorPoolSize*)(toTransform->pPoolSizes + i));
+            }
+        }
+    }
+}
+
+void transform_tohost_VkDescriptorSetAllocateInfo(
+    ResourceTracker* resourceTracker,
+    VkDescriptorSetAllocateInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkDescriptorSetAllocateInfo(
+    ResourceTracker* resourceTracker,
+    VkDescriptorSetAllocateInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
 void transform_tohost_VkDescriptorSetLayoutBinding(
     ResourceTracker* resourceTracker,
     VkDescriptorSetLayoutBinding* toTransform)
@@ -1810,11 +2210,14 @@
     {
         transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
     }
-    if (toTransform->pBindings)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->bindingCount; ++i)
+        if (toTransform->pBindings)
         {
-            transform_tohost_VkDescriptorSetLayoutBinding(resourceTracker, (VkDescriptorSetLayoutBinding*)(toTransform->pBindings + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->bindingCount; ++i)
+            {
+                transform_tohost_VkDescriptorSetLayoutBinding(resourceTracker, (VkDescriptorSetLayoutBinding*)(toTransform->pBindings + i));
+            }
         }
     }
 }
@@ -1829,125 +2232,18 @@
     {
         transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
     }
-    if (toTransform->pBindings)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->bindingCount; ++i)
+        if (toTransform->pBindings)
         {
-            transform_fromhost_VkDescriptorSetLayoutBinding(resourceTracker, (VkDescriptorSetLayoutBinding*)(toTransform->pBindings + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->bindingCount; ++i)
+            {
+                transform_fromhost_VkDescriptorSetLayoutBinding(resourceTracker, (VkDescriptorSetLayoutBinding*)(toTransform->pBindings + i));
+            }
         }
     }
 }
 
-void transform_tohost_VkDescriptorPoolSize(
-    ResourceTracker* resourceTracker,
-    VkDescriptorPoolSize* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-}
-
-void transform_fromhost_VkDescriptorPoolSize(
-    ResourceTracker* resourceTracker,
-    VkDescriptorPoolSize* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-}
-
-void transform_tohost_VkDescriptorPoolCreateInfo(
-    ResourceTracker* resourceTracker,
-    VkDescriptorPoolCreateInfo* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-    if (toTransform->pPoolSizes)
-    {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->poolSizeCount; ++i)
-        {
-            transform_tohost_VkDescriptorPoolSize(resourceTracker, (VkDescriptorPoolSize*)(toTransform->pPoolSizes + i));
-        }
-    }
-}
-
-void transform_fromhost_VkDescriptorPoolCreateInfo(
-    ResourceTracker* resourceTracker,
-    VkDescriptorPoolCreateInfo* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-    if (toTransform->pPoolSizes)
-    {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->poolSizeCount; ++i)
-        {
-            transform_fromhost_VkDescriptorPoolSize(resourceTracker, (VkDescriptorPoolSize*)(toTransform->pPoolSizes + i));
-        }
-    }
-}
-
-void transform_tohost_VkDescriptorSetAllocateInfo(
-    ResourceTracker* resourceTracker,
-    VkDescriptorSetAllocateInfo* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-}
-
-void transform_fromhost_VkDescriptorSetAllocateInfo(
-    ResourceTracker* resourceTracker,
-    VkDescriptorSetAllocateInfo* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-}
-
-void transform_tohost_VkDescriptorImageInfo(
-    ResourceTracker* resourceTracker,
-    VkDescriptorImageInfo* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-}
-
-void transform_fromhost_VkDescriptorImageInfo(
-    ResourceTracker* resourceTracker,
-    VkDescriptorImageInfo* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-}
-
-void transform_tohost_VkDescriptorBufferInfo(
-    ResourceTracker* resourceTracker,
-    VkDescriptorBufferInfo* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-}
-
-void transform_fromhost_VkDescriptorBufferInfo(
-    ResourceTracker* resourceTracker,
-    VkDescriptorBufferInfo* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-}
-
 void transform_tohost_VkWriteDescriptorSet(
     ResourceTracker* resourceTracker,
     VkWriteDescriptorSet* toTransform)
@@ -1958,18 +2254,24 @@
     {
         transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
     }
-    if (toTransform->pImageInfo)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->descriptorCount; ++i)
+        if (toTransform->pImageInfo)
         {
-            transform_tohost_VkDescriptorImageInfo(resourceTracker, (VkDescriptorImageInfo*)(toTransform->pImageInfo + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->descriptorCount; ++i)
+            {
+                transform_tohost_VkDescriptorImageInfo(resourceTracker, (VkDescriptorImageInfo*)(toTransform->pImageInfo + i));
+            }
         }
     }
-    if (toTransform->pBufferInfo)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->descriptorCount; ++i)
+        if (toTransform->pBufferInfo)
         {
-            transform_tohost_VkDescriptorBufferInfo(resourceTracker, (VkDescriptorBufferInfo*)(toTransform->pBufferInfo + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->descriptorCount; ++i)
+            {
+                transform_tohost_VkDescriptorBufferInfo(resourceTracker, (VkDescriptorBufferInfo*)(toTransform->pBufferInfo + i));
+            }
         }
     }
 }
@@ -1984,70 +2286,28 @@
     {
         transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
     }
-    if (toTransform->pImageInfo)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->descriptorCount; ++i)
+        if (toTransform->pImageInfo)
         {
-            transform_fromhost_VkDescriptorImageInfo(resourceTracker, (VkDescriptorImageInfo*)(toTransform->pImageInfo + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->descriptorCount; ++i)
+            {
+                transform_fromhost_VkDescriptorImageInfo(resourceTracker, (VkDescriptorImageInfo*)(toTransform->pImageInfo + i));
+            }
         }
     }
-    if (toTransform->pBufferInfo)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->descriptorCount; ++i)
+        if (toTransform->pBufferInfo)
         {
-            transform_fromhost_VkDescriptorBufferInfo(resourceTracker, (VkDescriptorBufferInfo*)(toTransform->pBufferInfo + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->descriptorCount; ++i)
+            {
+                transform_fromhost_VkDescriptorBufferInfo(resourceTracker, (VkDescriptorBufferInfo*)(toTransform->pBufferInfo + i));
+            }
         }
     }
 }
 
-void transform_tohost_VkCopyDescriptorSet(
-    ResourceTracker* resourceTracker,
-    VkCopyDescriptorSet* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-}
-
-void transform_fromhost_VkCopyDescriptorSet(
-    ResourceTracker* resourceTracker,
-    VkCopyDescriptorSet* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-}
-
-void transform_tohost_VkFramebufferCreateInfo(
-    ResourceTracker* resourceTracker,
-    VkFramebufferCreateInfo* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-}
-
-void transform_fromhost_VkFramebufferCreateInfo(
-    ResourceTracker* resourceTracker,
-    VkFramebufferCreateInfo* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-}
-
 void transform_tohost_VkAttachmentDescription(
     ResourceTracker* resourceTracker,
     VkAttachmentDescription* toTransform)
@@ -2080,31 +2340,64 @@
     (void)toTransform;
 }
 
+void transform_tohost_VkFramebufferCreateInfo(
+    ResourceTracker* resourceTracker,
+    VkFramebufferCreateInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkFramebufferCreateInfo(
+    ResourceTracker* resourceTracker,
+    VkFramebufferCreateInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
 void transform_tohost_VkSubpassDescription(
     ResourceTracker* resourceTracker,
     VkSubpassDescription* toTransform)
 {
     (void)resourceTracker;
     (void)toTransform;
-    if (toTransform->pInputAttachments)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->inputAttachmentCount; ++i)
+        if (toTransform->pInputAttachments)
         {
-            transform_tohost_VkAttachmentReference(resourceTracker, (VkAttachmentReference*)(toTransform->pInputAttachments + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->inputAttachmentCount; ++i)
+            {
+                transform_tohost_VkAttachmentReference(resourceTracker, (VkAttachmentReference*)(toTransform->pInputAttachments + i));
+            }
         }
     }
-    if (toTransform->pColorAttachments)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->colorAttachmentCount; ++i)
+        if (toTransform->pColorAttachments)
         {
-            transform_tohost_VkAttachmentReference(resourceTracker, (VkAttachmentReference*)(toTransform->pColorAttachments + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->colorAttachmentCount; ++i)
+            {
+                transform_tohost_VkAttachmentReference(resourceTracker, (VkAttachmentReference*)(toTransform->pColorAttachments + i));
+            }
         }
     }
-    if (toTransform->pResolveAttachments)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->colorAttachmentCount; ++i)
+        if (toTransform->pResolveAttachments)
         {
-            transform_tohost_VkAttachmentReference(resourceTracker, (VkAttachmentReference*)(toTransform->pResolveAttachments + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->colorAttachmentCount; ++i)
+            {
+                transform_tohost_VkAttachmentReference(resourceTracker, (VkAttachmentReference*)(toTransform->pResolveAttachments + i));
+            }
         }
     }
     if (toTransform->pDepthStencilAttachment)
@@ -2119,25 +2412,34 @@
 {
     (void)resourceTracker;
     (void)toTransform;
-    if (toTransform->pInputAttachments)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->inputAttachmentCount; ++i)
+        if (toTransform->pInputAttachments)
         {
-            transform_fromhost_VkAttachmentReference(resourceTracker, (VkAttachmentReference*)(toTransform->pInputAttachments + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->inputAttachmentCount; ++i)
+            {
+                transform_fromhost_VkAttachmentReference(resourceTracker, (VkAttachmentReference*)(toTransform->pInputAttachments + i));
+            }
         }
     }
-    if (toTransform->pColorAttachments)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->colorAttachmentCount; ++i)
+        if (toTransform->pColorAttachments)
         {
-            transform_fromhost_VkAttachmentReference(resourceTracker, (VkAttachmentReference*)(toTransform->pColorAttachments + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->colorAttachmentCount; ++i)
+            {
+                transform_fromhost_VkAttachmentReference(resourceTracker, (VkAttachmentReference*)(toTransform->pColorAttachments + i));
+            }
         }
     }
-    if (toTransform->pResolveAttachments)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->colorAttachmentCount; ++i)
+        if (toTransform->pResolveAttachments)
         {
-            transform_fromhost_VkAttachmentReference(resourceTracker, (VkAttachmentReference*)(toTransform->pResolveAttachments + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->colorAttachmentCount; ++i)
+            {
+                transform_fromhost_VkAttachmentReference(resourceTracker, (VkAttachmentReference*)(toTransform->pResolveAttachments + i));
+            }
         }
     }
     if (toTransform->pDepthStencilAttachment)
@@ -2172,25 +2474,34 @@
     {
         transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
     }
-    if (toTransform->pAttachments)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->attachmentCount; ++i)
+        if (toTransform->pAttachments)
         {
-            transform_tohost_VkAttachmentDescription(resourceTracker, (VkAttachmentDescription*)(toTransform->pAttachments + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->attachmentCount; ++i)
+            {
+                transform_tohost_VkAttachmentDescription(resourceTracker, (VkAttachmentDescription*)(toTransform->pAttachments + i));
+            }
         }
     }
-    if (toTransform->pSubpasses)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->subpassCount; ++i)
+        if (toTransform->pSubpasses)
         {
-            transform_tohost_VkSubpassDescription(resourceTracker, (VkSubpassDescription*)(toTransform->pSubpasses + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->subpassCount; ++i)
+            {
+                transform_tohost_VkSubpassDescription(resourceTracker, (VkSubpassDescription*)(toTransform->pSubpasses + i));
+            }
         }
     }
-    if (toTransform->pDependencies)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->dependencyCount; ++i)
+        if (toTransform->pDependencies)
         {
-            transform_tohost_VkSubpassDependency(resourceTracker, (VkSubpassDependency*)(toTransform->pDependencies + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->dependencyCount; ++i)
+            {
+                transform_tohost_VkSubpassDependency(resourceTracker, (VkSubpassDependency*)(toTransform->pDependencies + i));
+            }
         }
     }
 }
@@ -2205,25 +2516,34 @@
     {
         transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
     }
-    if (toTransform->pAttachments)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->attachmentCount; ++i)
+        if (toTransform->pAttachments)
         {
-            transform_fromhost_VkAttachmentDescription(resourceTracker, (VkAttachmentDescription*)(toTransform->pAttachments + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->attachmentCount; ++i)
+            {
+                transform_fromhost_VkAttachmentDescription(resourceTracker, (VkAttachmentDescription*)(toTransform->pAttachments + i));
+            }
         }
     }
-    if (toTransform->pSubpasses)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->subpassCount; ++i)
+        if (toTransform->pSubpasses)
         {
-            transform_fromhost_VkSubpassDescription(resourceTracker, (VkSubpassDescription*)(toTransform->pSubpasses + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->subpassCount; ++i)
+            {
+                transform_fromhost_VkSubpassDescription(resourceTracker, (VkSubpassDescription*)(toTransform->pSubpasses + i));
+            }
         }
     }
-    if (toTransform->pDependencies)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->dependencyCount; ++i)
+        if (toTransform->pDependencies)
         {
-            transform_fromhost_VkSubpassDependency(resourceTracker, (VkSubpassDependency*)(toTransform->pDependencies + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->dependencyCount; ++i)
+            {
+                transform_fromhost_VkSubpassDependency(resourceTracker, (VkSubpassDependency*)(toTransform->pDependencies + i));
+            }
         }
     }
 }
@@ -2364,68 +2684,6 @@
     (void)toTransform;
 }
 
-void transform_tohost_VkImageCopy(
-    ResourceTracker* resourceTracker,
-    VkImageCopy* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    transform_tohost_VkImageSubresourceLayers(resourceTracker, (VkImageSubresourceLayers*)(&toTransform->srcSubresource));
-    transform_tohost_VkOffset3D(resourceTracker, (VkOffset3D*)(&toTransform->srcOffset));
-    transform_tohost_VkImageSubresourceLayers(resourceTracker, (VkImageSubresourceLayers*)(&toTransform->dstSubresource));
-    transform_tohost_VkOffset3D(resourceTracker, (VkOffset3D*)(&toTransform->dstOffset));
-    transform_tohost_VkExtent3D(resourceTracker, (VkExtent3D*)(&toTransform->extent));
-}
-
-void transform_fromhost_VkImageCopy(
-    ResourceTracker* resourceTracker,
-    VkImageCopy* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    transform_fromhost_VkImageSubresourceLayers(resourceTracker, (VkImageSubresourceLayers*)(&toTransform->srcSubresource));
-    transform_fromhost_VkOffset3D(resourceTracker, (VkOffset3D*)(&toTransform->srcOffset));
-    transform_fromhost_VkImageSubresourceLayers(resourceTracker, (VkImageSubresourceLayers*)(&toTransform->dstSubresource));
-    transform_fromhost_VkOffset3D(resourceTracker, (VkOffset3D*)(&toTransform->dstOffset));
-    transform_fromhost_VkExtent3D(resourceTracker, (VkExtent3D*)(&toTransform->extent));
-}
-
-void transform_tohost_VkImageBlit(
-    ResourceTracker* resourceTracker,
-    VkImageBlit* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    transform_tohost_VkImageSubresourceLayers(resourceTracker, (VkImageSubresourceLayers*)(&toTransform->srcSubresource));
-    for (uint32_t i = 0; i < (uint32_t)2; ++i)
-    {
-        transform_tohost_VkOffset3D(resourceTracker, (VkOffset3D*)(toTransform->srcOffsets + i));
-    }
-    transform_tohost_VkImageSubresourceLayers(resourceTracker, (VkImageSubresourceLayers*)(&toTransform->dstSubresource));
-    for (uint32_t i = 0; i < (uint32_t)2; ++i)
-    {
-        transform_tohost_VkOffset3D(resourceTracker, (VkOffset3D*)(toTransform->dstOffsets + i));
-    }
-}
-
-void transform_fromhost_VkImageBlit(
-    ResourceTracker* resourceTracker,
-    VkImageBlit* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    transform_fromhost_VkImageSubresourceLayers(resourceTracker, (VkImageSubresourceLayers*)(&toTransform->srcSubresource));
-    for (uint32_t i = 0; i < (uint32_t)2; ++i)
-    {
-        transform_fromhost_VkOffset3D(resourceTracker, (VkOffset3D*)(toTransform->srcOffsets + i));
-    }
-    transform_fromhost_VkImageSubresourceLayers(resourceTracker, (VkImageSubresourceLayers*)(&toTransform->dstSubresource));
-    for (uint32_t i = 0; i < (uint32_t)2; ++i)
-    {
-        transform_fromhost_VkOffset3D(resourceTracker, (VkOffset3D*)(toTransform->dstOffsets + i));
-    }
-}
-
 void transform_tohost_VkBufferImageCopy(
     ResourceTracker* resourceTracker,
     VkBufferImageCopy* toTransform)
@@ -2536,6 +2794,68 @@
     transform_fromhost_VkRect2D(resourceTracker, (VkRect2D*)(&toTransform->rect));
 }
 
+void transform_tohost_VkImageBlit(
+    ResourceTracker* resourceTracker,
+    VkImageBlit* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    transform_tohost_VkImageSubresourceLayers(resourceTracker, (VkImageSubresourceLayers*)(&toTransform->srcSubresource));
+    for (uint32_t i = 0; i < (uint32_t)2; ++i)
+    {
+        transform_tohost_VkOffset3D(resourceTracker, (VkOffset3D*)(toTransform->srcOffsets + i));
+    }
+    transform_tohost_VkImageSubresourceLayers(resourceTracker, (VkImageSubresourceLayers*)(&toTransform->dstSubresource));
+    for (uint32_t i = 0; i < (uint32_t)2; ++i)
+    {
+        transform_tohost_VkOffset3D(resourceTracker, (VkOffset3D*)(toTransform->dstOffsets + i));
+    }
+}
+
+void transform_fromhost_VkImageBlit(
+    ResourceTracker* resourceTracker,
+    VkImageBlit* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    transform_fromhost_VkImageSubresourceLayers(resourceTracker, (VkImageSubresourceLayers*)(&toTransform->srcSubresource));
+    for (uint32_t i = 0; i < (uint32_t)2; ++i)
+    {
+        transform_fromhost_VkOffset3D(resourceTracker, (VkOffset3D*)(toTransform->srcOffsets + i));
+    }
+    transform_fromhost_VkImageSubresourceLayers(resourceTracker, (VkImageSubresourceLayers*)(&toTransform->dstSubresource));
+    for (uint32_t i = 0; i < (uint32_t)2; ++i)
+    {
+        transform_fromhost_VkOffset3D(resourceTracker, (VkOffset3D*)(toTransform->dstOffsets + i));
+    }
+}
+
+void transform_tohost_VkImageCopy(
+    ResourceTracker* resourceTracker,
+    VkImageCopy* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    transform_tohost_VkImageSubresourceLayers(resourceTracker, (VkImageSubresourceLayers*)(&toTransform->srcSubresource));
+    transform_tohost_VkOffset3D(resourceTracker, (VkOffset3D*)(&toTransform->srcOffset));
+    transform_tohost_VkImageSubresourceLayers(resourceTracker, (VkImageSubresourceLayers*)(&toTransform->dstSubresource));
+    transform_tohost_VkOffset3D(resourceTracker, (VkOffset3D*)(&toTransform->dstOffset));
+    transform_tohost_VkExtent3D(resourceTracker, (VkExtent3D*)(&toTransform->extent));
+}
+
+void transform_fromhost_VkImageCopy(
+    ResourceTracker* resourceTracker,
+    VkImageCopy* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    transform_fromhost_VkImageSubresourceLayers(resourceTracker, (VkImageSubresourceLayers*)(&toTransform->srcSubresource));
+    transform_fromhost_VkOffset3D(resourceTracker, (VkOffset3D*)(&toTransform->srcOffset));
+    transform_fromhost_VkImageSubresourceLayers(resourceTracker, (VkImageSubresourceLayers*)(&toTransform->dstSubresource));
+    transform_fromhost_VkOffset3D(resourceTracker, (VkOffset3D*)(&toTransform->dstOffset));
+    transform_fromhost_VkExtent3D(resourceTracker, (VkExtent3D*)(&toTransform->extent));
+}
+
 void transform_tohost_VkImageResolve(
     ResourceTracker* resourceTracker,
     VkImageResolve* toTransform)
@@ -2562,80 +2882,6 @@
     transform_fromhost_VkExtent3D(resourceTracker, (VkExtent3D*)(&toTransform->extent));
 }
 
-void transform_tohost_VkMemoryBarrier(
-    ResourceTracker* resourceTracker,
-    VkMemoryBarrier* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-}
-
-void transform_fromhost_VkMemoryBarrier(
-    ResourceTracker* resourceTracker,
-    VkMemoryBarrier* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-}
-
-void transform_tohost_VkBufferMemoryBarrier(
-    ResourceTracker* resourceTracker,
-    VkBufferMemoryBarrier* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-}
-
-void transform_fromhost_VkBufferMemoryBarrier(
-    ResourceTracker* resourceTracker,
-    VkBufferMemoryBarrier* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-}
-
-void transform_tohost_VkImageMemoryBarrier(
-    ResourceTracker* resourceTracker,
-    VkImageMemoryBarrier* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-    transform_tohost_VkImageSubresourceRange(resourceTracker, (VkImageSubresourceRange*)(&toTransform->subresourceRange));
-}
-
-void transform_fromhost_VkImageMemoryBarrier(
-    ResourceTracker* resourceTracker,
-    VkImageMemoryBarrier* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-    transform_fromhost_VkImageSubresourceRange(resourceTracker, (VkImageSubresourceRange*)(&toTransform->subresourceRange));
-}
-
 void transform_tohost_VkRenderPassBeginInfo(
     ResourceTracker* resourceTracker,
     VkRenderPassBeginInfo* toTransform)
@@ -2647,11 +2893,14 @@
         transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
     }
     transform_tohost_VkRect2D(resourceTracker, (VkRect2D*)(&toTransform->renderArea));
-    if (toTransform->pClearValues)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->clearValueCount; ++i)
+        if (toTransform->pClearValues)
         {
-            transform_tohost_VkClearValue(resourceTracker, (VkClearValue*)(toTransform->pClearValues + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->clearValueCount; ++i)
+            {
+                transform_tohost_VkClearValue(resourceTracker, (VkClearValue*)(toTransform->pClearValues + i));
+            }
         }
     }
 }
@@ -2667,111 +2916,18 @@
         transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
     }
     transform_fromhost_VkRect2D(resourceTracker, (VkRect2D*)(&toTransform->renderArea));
-    if (toTransform->pClearValues)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->clearValueCount; ++i)
+        if (toTransform->pClearValues)
         {
-            transform_fromhost_VkClearValue(resourceTracker, (VkClearValue*)(toTransform->pClearValues + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->clearValueCount; ++i)
+            {
+                transform_fromhost_VkClearValue(resourceTracker, (VkClearValue*)(toTransform->pClearValues + i));
+            }
         }
     }
 }
 
-void transform_tohost_VkDispatchIndirectCommand(
-    ResourceTracker* resourceTracker,
-    VkDispatchIndirectCommand* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-}
-
-void transform_fromhost_VkDispatchIndirectCommand(
-    ResourceTracker* resourceTracker,
-    VkDispatchIndirectCommand* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-}
-
-void transform_tohost_VkDrawIndexedIndirectCommand(
-    ResourceTracker* resourceTracker,
-    VkDrawIndexedIndirectCommand* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-}
-
-void transform_fromhost_VkDrawIndexedIndirectCommand(
-    ResourceTracker* resourceTracker,
-    VkDrawIndexedIndirectCommand* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-}
-
-void transform_tohost_VkDrawIndirectCommand(
-    ResourceTracker* resourceTracker,
-    VkDrawIndirectCommand* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-}
-
-void transform_fromhost_VkDrawIndirectCommand(
-    ResourceTracker* resourceTracker,
-    VkDrawIndirectCommand* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-}
-
-void transform_tohost_VkBaseOutStructure(
-    ResourceTracker* resourceTracker,
-    VkBaseOutStructure* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-}
-
-void transform_fromhost_VkBaseOutStructure(
-    ResourceTracker* resourceTracker,
-    VkBaseOutStructure* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-}
-
-void transform_tohost_VkBaseInStructure(
-    ResourceTracker* resourceTracker,
-    VkBaseInStructure* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-}
-
-void transform_fromhost_VkBaseInStructure(
-    ResourceTracker* resourceTracker,
-    VkBaseInStructure* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-}
-
 #endif
 #ifdef VK_VERSION_1_1
 void transform_tohost_VkPhysicalDeviceSubgroupProperties(
@@ -2956,11 +3112,14 @@
     {
         transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
     }
-    if (toTransform->pDeviceRenderAreas)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->deviceRenderAreaCount; ++i)
+        if (toTransform->pDeviceRenderAreas)
         {
-            transform_tohost_VkRect2D(resourceTracker, (VkRect2D*)(toTransform->pDeviceRenderAreas + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->deviceRenderAreaCount; ++i)
+            {
+                transform_tohost_VkRect2D(resourceTracker, (VkRect2D*)(toTransform->pDeviceRenderAreas + i));
+            }
         }
     }
 }
@@ -2975,11 +3134,14 @@
     {
         transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
     }
-    if (toTransform->pDeviceRenderAreas)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->deviceRenderAreaCount; ++i)
+        if (toTransform->pDeviceRenderAreas)
         {
-            transform_fromhost_VkRect2D(resourceTracker, (VkRect2D*)(toTransform->pDeviceRenderAreas + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->deviceRenderAreaCount; ++i)
+            {
+                transform_fromhost_VkRect2D(resourceTracker, (VkRect2D*)(toTransform->pDeviceRenderAreas + i));
+            }
         }
     }
 }
@@ -3090,11 +3252,14 @@
     {
         transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
     }
-    if (toTransform->pSplitInstanceBindRegions)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->splitInstanceBindRegionCount; ++i)
+        if (toTransform->pSplitInstanceBindRegions)
         {
-            transform_tohost_VkRect2D(resourceTracker, (VkRect2D*)(toTransform->pSplitInstanceBindRegions + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->splitInstanceBindRegionCount; ++i)
+            {
+                transform_tohost_VkRect2D(resourceTracker, (VkRect2D*)(toTransform->pSplitInstanceBindRegions + i));
+            }
         }
     }
 }
@@ -3109,11 +3274,14 @@
     {
         transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
     }
-    if (toTransform->pSplitInstanceBindRegions)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->splitInstanceBindRegionCount; ++i)
+        if (toTransform->pSplitInstanceBindRegions)
         {
-            transform_fromhost_VkRect2D(resourceTracker, (VkRect2D*)(toTransform->pSplitInstanceBindRegions + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->splitInstanceBindRegionCount; ++i)
+            {
+                transform_fromhost_VkRect2D(resourceTracker, (VkRect2D*)(toTransform->pSplitInstanceBindRegions + i));
+            }
         }
     }
 }
@@ -3570,11 +3738,14 @@
     {
         transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
     }
-    if (toTransform->pAspectReferences)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->aspectReferenceCount; ++i)
+        if (toTransform->pAspectReferences)
         {
-            transform_tohost_VkInputAttachmentAspectReference(resourceTracker, (VkInputAttachmentAspectReference*)(toTransform->pAspectReferences + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->aspectReferenceCount; ++i)
+            {
+                transform_tohost_VkInputAttachmentAspectReference(resourceTracker, (VkInputAttachmentAspectReference*)(toTransform->pAspectReferences + i));
+            }
         }
     }
 }
@@ -3589,11 +3760,14 @@
     {
         transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
     }
-    if (toTransform->pAspectReferences)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->aspectReferenceCount; ++i)
+        if (toTransform->pAspectReferences)
         {
-            transform_fromhost_VkInputAttachmentAspectReference(resourceTracker, (VkInputAttachmentAspectReference*)(toTransform->pAspectReferences + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->aspectReferenceCount; ++i)
+            {
+                transform_fromhost_VkInputAttachmentAspectReference(resourceTracker, (VkInputAttachmentAspectReference*)(toTransform->pAspectReferences + i));
+            }
         }
     }
 }
@@ -3718,9 +3892,9 @@
     }
 }
 
-void transform_tohost_VkPhysicalDeviceVariablePointerFeatures(
+void transform_tohost_VkPhysicalDeviceVariablePointersFeatures(
     ResourceTracker* resourceTracker,
-    VkPhysicalDeviceVariablePointerFeatures* toTransform)
+    VkPhysicalDeviceVariablePointersFeatures* toTransform)
 {
     (void)resourceTracker;
     (void)toTransform;
@@ -3730,9 +3904,9 @@
     }
 }
 
-void transform_fromhost_VkPhysicalDeviceVariablePointerFeatures(
+void transform_fromhost_VkPhysicalDeviceVariablePointersFeatures(
     ResourceTracker* resourceTracker,
-    VkPhysicalDeviceVariablePointerFeatures* toTransform)
+    VkPhysicalDeviceVariablePointersFeatures* toTransform)
 {
     (void)resourceTracker;
     (void)toTransform;
@@ -4010,11 +4184,14 @@
     {
         transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
     }
-    if (toTransform->pDescriptorUpdateEntries)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->descriptorUpdateEntryCount; ++i)
+        if (toTransform->pDescriptorUpdateEntries)
         {
-            transform_tohost_VkDescriptorUpdateTemplateEntry(resourceTracker, (VkDescriptorUpdateTemplateEntry*)(toTransform->pDescriptorUpdateEntries + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->descriptorUpdateEntryCount; ++i)
+            {
+                transform_tohost_VkDescriptorUpdateTemplateEntry(resourceTracker, (VkDescriptorUpdateTemplateEntry*)(toTransform->pDescriptorUpdateEntries + i));
+            }
         }
     }
 }
@@ -4029,11 +4206,14 @@
     {
         transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
     }
-    if (toTransform->pDescriptorUpdateEntries)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->descriptorUpdateEntryCount; ++i)
+        if (toTransform->pDescriptorUpdateEntries)
         {
-            transform_fromhost_VkDescriptorUpdateTemplateEntry(resourceTracker, (VkDescriptorUpdateTemplateEntry*)(toTransform->pDescriptorUpdateEntries + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->descriptorUpdateEntryCount; ++i)
+            {
+                transform_fromhost_VkDescriptorUpdateTemplateEntry(resourceTracker, (VkDescriptorUpdateTemplateEntry*)(toTransform->pDescriptorUpdateEntries + i));
+            }
         }
     }
 }
@@ -4446,9 +4626,9 @@
     }
 }
 
-void transform_tohost_VkPhysicalDeviceShaderDrawParameterFeatures(
+void transform_tohost_VkPhysicalDeviceShaderDrawParametersFeatures(
     ResourceTracker* resourceTracker,
-    VkPhysicalDeviceShaderDrawParameterFeatures* toTransform)
+    VkPhysicalDeviceShaderDrawParametersFeatures* toTransform)
 {
     (void)resourceTracker;
     (void)toTransform;
@@ -4458,9 +4638,9 @@
     }
 }
 
-void transform_fromhost_VkPhysicalDeviceShaderDrawParameterFeatures(
+void transform_fromhost_VkPhysicalDeviceShaderDrawParametersFeatures(
     ResourceTracker* resourceTracker,
-    VkPhysicalDeviceShaderDrawParameterFeatures* toTransform)
+    VkPhysicalDeviceShaderDrawParametersFeatures* toTransform)
 {
     (void)resourceTracker;
     (void)toTransform;
@@ -4471,6 +4651,1386 @@
 }
 
 #endif
+#ifdef VK_VERSION_1_2
+void transform_tohost_VkPhysicalDeviceVulkan11Features(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceVulkan11Features* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceVulkan11Features(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceVulkan11Features* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPhysicalDeviceVulkan11Properties(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceVulkan11Properties* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceVulkan11Properties(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceVulkan11Properties* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPhysicalDeviceVulkan12Features(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceVulkan12Features* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceVulkan12Features(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceVulkan12Features* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkConformanceVersion(
+    ResourceTracker* resourceTracker,
+    VkConformanceVersion* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_fromhost_VkConformanceVersion(
+    ResourceTracker* resourceTracker,
+    VkConformanceVersion* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_tohost_VkPhysicalDeviceVulkan12Properties(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceVulkan12Properties* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    transform_tohost_VkConformanceVersion(resourceTracker, (VkConformanceVersion*)(&toTransform->conformanceVersion));
+}
+
+void transform_fromhost_VkPhysicalDeviceVulkan12Properties(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceVulkan12Properties* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    transform_fromhost_VkConformanceVersion(resourceTracker, (VkConformanceVersion*)(&toTransform->conformanceVersion));
+}
+
+void transform_tohost_VkImageFormatListCreateInfo(
+    ResourceTracker* resourceTracker,
+    VkImageFormatListCreateInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkImageFormatListCreateInfo(
+    ResourceTracker* resourceTracker,
+    VkImageFormatListCreateInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkAttachmentDescription2(
+    ResourceTracker* resourceTracker,
+    VkAttachmentDescription2* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkAttachmentDescription2(
+    ResourceTracker* resourceTracker,
+    VkAttachmentDescription2* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkAttachmentReference2(
+    ResourceTracker* resourceTracker,
+    VkAttachmentReference2* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkAttachmentReference2(
+    ResourceTracker* resourceTracker,
+    VkAttachmentReference2* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkSubpassDescription2(
+    ResourceTracker* resourceTracker,
+    VkSubpassDescription2* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform)
+    {
+        if (toTransform->pInputAttachments)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->inputAttachmentCount; ++i)
+            {
+                transform_tohost_VkAttachmentReference2(resourceTracker, (VkAttachmentReference2*)(toTransform->pInputAttachments + i));
+            }
+        }
+    }
+    if (toTransform)
+    {
+        if (toTransform->pColorAttachments)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->colorAttachmentCount; ++i)
+            {
+                transform_tohost_VkAttachmentReference2(resourceTracker, (VkAttachmentReference2*)(toTransform->pColorAttachments + i));
+            }
+        }
+    }
+    if (toTransform)
+    {
+        if (toTransform->pResolveAttachments)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->colorAttachmentCount; ++i)
+            {
+                transform_tohost_VkAttachmentReference2(resourceTracker, (VkAttachmentReference2*)(toTransform->pResolveAttachments + i));
+            }
+        }
+    }
+    if (toTransform->pDepthStencilAttachment)
+    {
+        transform_tohost_VkAttachmentReference2(resourceTracker, (VkAttachmentReference2*)(toTransform->pDepthStencilAttachment));
+    }
+}
+
+void transform_fromhost_VkSubpassDescription2(
+    ResourceTracker* resourceTracker,
+    VkSubpassDescription2* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform)
+    {
+        if (toTransform->pInputAttachments)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->inputAttachmentCount; ++i)
+            {
+                transform_fromhost_VkAttachmentReference2(resourceTracker, (VkAttachmentReference2*)(toTransform->pInputAttachments + i));
+            }
+        }
+    }
+    if (toTransform)
+    {
+        if (toTransform->pColorAttachments)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->colorAttachmentCount; ++i)
+            {
+                transform_fromhost_VkAttachmentReference2(resourceTracker, (VkAttachmentReference2*)(toTransform->pColorAttachments + i));
+            }
+        }
+    }
+    if (toTransform)
+    {
+        if (toTransform->pResolveAttachments)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->colorAttachmentCount; ++i)
+            {
+                transform_fromhost_VkAttachmentReference2(resourceTracker, (VkAttachmentReference2*)(toTransform->pResolveAttachments + i));
+            }
+        }
+    }
+    if (toTransform->pDepthStencilAttachment)
+    {
+        transform_fromhost_VkAttachmentReference2(resourceTracker, (VkAttachmentReference2*)(toTransform->pDepthStencilAttachment));
+    }
+}
+
+void transform_tohost_VkSubpassDependency2(
+    ResourceTracker* resourceTracker,
+    VkSubpassDependency2* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkSubpassDependency2(
+    ResourceTracker* resourceTracker,
+    VkSubpassDependency2* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkRenderPassCreateInfo2(
+    ResourceTracker* resourceTracker,
+    VkRenderPassCreateInfo2* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform)
+    {
+        if (toTransform->pAttachments)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->attachmentCount; ++i)
+            {
+                transform_tohost_VkAttachmentDescription2(resourceTracker, (VkAttachmentDescription2*)(toTransform->pAttachments + i));
+            }
+        }
+    }
+    if (toTransform)
+    {
+        if (toTransform->pSubpasses)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->subpassCount; ++i)
+            {
+                transform_tohost_VkSubpassDescription2(resourceTracker, (VkSubpassDescription2*)(toTransform->pSubpasses + i));
+            }
+        }
+    }
+    if (toTransform)
+    {
+        if (toTransform->pDependencies)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->dependencyCount; ++i)
+            {
+                transform_tohost_VkSubpassDependency2(resourceTracker, (VkSubpassDependency2*)(toTransform->pDependencies + i));
+            }
+        }
+    }
+}
+
+void transform_fromhost_VkRenderPassCreateInfo2(
+    ResourceTracker* resourceTracker,
+    VkRenderPassCreateInfo2* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform)
+    {
+        if (toTransform->pAttachments)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->attachmentCount; ++i)
+            {
+                transform_fromhost_VkAttachmentDescription2(resourceTracker, (VkAttachmentDescription2*)(toTransform->pAttachments + i));
+            }
+        }
+    }
+    if (toTransform)
+    {
+        if (toTransform->pSubpasses)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->subpassCount; ++i)
+            {
+                transform_fromhost_VkSubpassDescription2(resourceTracker, (VkSubpassDescription2*)(toTransform->pSubpasses + i));
+            }
+        }
+    }
+    if (toTransform)
+    {
+        if (toTransform->pDependencies)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->dependencyCount; ++i)
+            {
+                transform_fromhost_VkSubpassDependency2(resourceTracker, (VkSubpassDependency2*)(toTransform->pDependencies + i));
+            }
+        }
+    }
+}
+
+void transform_tohost_VkSubpassBeginInfo(
+    ResourceTracker* resourceTracker,
+    VkSubpassBeginInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkSubpassBeginInfo(
+    ResourceTracker* resourceTracker,
+    VkSubpassBeginInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkSubpassEndInfo(
+    ResourceTracker* resourceTracker,
+    VkSubpassEndInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkSubpassEndInfo(
+    ResourceTracker* resourceTracker,
+    VkSubpassEndInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPhysicalDevice8BitStorageFeatures(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDevice8BitStorageFeatures* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDevice8BitStorageFeatures(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDevice8BitStorageFeatures* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPhysicalDeviceDriverProperties(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceDriverProperties* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    transform_tohost_VkConformanceVersion(resourceTracker, (VkConformanceVersion*)(&toTransform->conformanceVersion));
+}
+
+void transform_fromhost_VkPhysicalDeviceDriverProperties(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceDriverProperties* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    transform_fromhost_VkConformanceVersion(resourceTracker, (VkConformanceVersion*)(&toTransform->conformanceVersion));
+}
+
+void transform_tohost_VkPhysicalDeviceShaderAtomicInt64Features(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShaderAtomicInt64Features* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceShaderAtomicInt64Features(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShaderAtomicInt64Features* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPhysicalDeviceShaderFloat16Int8Features(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShaderFloat16Int8Features* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceShaderFloat16Int8Features(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShaderFloat16Int8Features* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPhysicalDeviceFloatControlsProperties(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceFloatControlsProperties* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceFloatControlsProperties(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceFloatControlsProperties* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkDescriptorSetLayoutBindingFlagsCreateInfo(
+    ResourceTracker* resourceTracker,
+    VkDescriptorSetLayoutBindingFlagsCreateInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkDescriptorSetLayoutBindingFlagsCreateInfo(
+    ResourceTracker* resourceTracker,
+    VkDescriptorSetLayoutBindingFlagsCreateInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPhysicalDeviceDescriptorIndexingFeatures(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceDescriptorIndexingFeatures* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceDescriptorIndexingFeatures(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceDescriptorIndexingFeatures* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPhysicalDeviceDescriptorIndexingProperties(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceDescriptorIndexingProperties* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceDescriptorIndexingProperties(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceDescriptorIndexingProperties* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkDescriptorSetVariableDescriptorCountAllocateInfo(
+    ResourceTracker* resourceTracker,
+    VkDescriptorSetVariableDescriptorCountAllocateInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkDescriptorSetVariableDescriptorCountAllocateInfo(
+    ResourceTracker* resourceTracker,
+    VkDescriptorSetVariableDescriptorCountAllocateInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkDescriptorSetVariableDescriptorCountLayoutSupport(
+    ResourceTracker* resourceTracker,
+    VkDescriptorSetVariableDescriptorCountLayoutSupport* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkDescriptorSetVariableDescriptorCountLayoutSupport(
+    ResourceTracker* resourceTracker,
+    VkDescriptorSetVariableDescriptorCountLayoutSupport* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkSubpassDescriptionDepthStencilResolve(
+    ResourceTracker* resourceTracker,
+    VkSubpassDescriptionDepthStencilResolve* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform->pDepthStencilResolveAttachment)
+    {
+        transform_tohost_VkAttachmentReference2(resourceTracker, (VkAttachmentReference2*)(toTransform->pDepthStencilResolveAttachment));
+    }
+}
+
+void transform_fromhost_VkSubpassDescriptionDepthStencilResolve(
+    ResourceTracker* resourceTracker,
+    VkSubpassDescriptionDepthStencilResolve* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform->pDepthStencilResolveAttachment)
+    {
+        transform_fromhost_VkAttachmentReference2(resourceTracker, (VkAttachmentReference2*)(toTransform->pDepthStencilResolveAttachment));
+    }
+}
+
+void transform_tohost_VkPhysicalDeviceDepthStencilResolveProperties(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceDepthStencilResolveProperties* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceDepthStencilResolveProperties(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceDepthStencilResolveProperties* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPhysicalDeviceScalarBlockLayoutFeatures(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceScalarBlockLayoutFeatures* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceScalarBlockLayoutFeatures(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceScalarBlockLayoutFeatures* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkImageStencilUsageCreateInfo(
+    ResourceTracker* resourceTracker,
+    VkImageStencilUsageCreateInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkImageStencilUsageCreateInfo(
+    ResourceTracker* resourceTracker,
+    VkImageStencilUsageCreateInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkSamplerReductionModeCreateInfo(
+    ResourceTracker* resourceTracker,
+    VkSamplerReductionModeCreateInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkSamplerReductionModeCreateInfo(
+    ResourceTracker* resourceTracker,
+    VkSamplerReductionModeCreateInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPhysicalDeviceSamplerFilterMinmaxProperties(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceSamplerFilterMinmaxProperties* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceSamplerFilterMinmaxProperties(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceSamplerFilterMinmaxProperties* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPhysicalDeviceVulkanMemoryModelFeatures(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceVulkanMemoryModelFeatures* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceVulkanMemoryModelFeatures(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceVulkanMemoryModelFeatures* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPhysicalDeviceImagelessFramebufferFeatures(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceImagelessFramebufferFeatures* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceImagelessFramebufferFeatures(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceImagelessFramebufferFeatures* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkFramebufferAttachmentImageInfo(
+    ResourceTracker* resourceTracker,
+    VkFramebufferAttachmentImageInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkFramebufferAttachmentImageInfo(
+    ResourceTracker* resourceTracker,
+    VkFramebufferAttachmentImageInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkFramebufferAttachmentsCreateInfo(
+    ResourceTracker* resourceTracker,
+    VkFramebufferAttachmentsCreateInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform)
+    {
+        if (toTransform->pAttachmentImageInfos)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->attachmentImageInfoCount; ++i)
+            {
+                transform_tohost_VkFramebufferAttachmentImageInfo(resourceTracker, (VkFramebufferAttachmentImageInfo*)(toTransform->pAttachmentImageInfos + i));
+            }
+        }
+    }
+}
+
+void transform_fromhost_VkFramebufferAttachmentsCreateInfo(
+    ResourceTracker* resourceTracker,
+    VkFramebufferAttachmentsCreateInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform)
+    {
+        if (toTransform->pAttachmentImageInfos)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->attachmentImageInfoCount; ++i)
+            {
+                transform_fromhost_VkFramebufferAttachmentImageInfo(resourceTracker, (VkFramebufferAttachmentImageInfo*)(toTransform->pAttachmentImageInfos + i));
+            }
+        }
+    }
+}
+
+void transform_tohost_VkRenderPassAttachmentBeginInfo(
+    ResourceTracker* resourceTracker,
+    VkRenderPassAttachmentBeginInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkRenderPassAttachmentBeginInfo(
+    ResourceTracker* resourceTracker,
+    VkRenderPassAttachmentBeginInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPhysicalDeviceUniformBufferStandardLayoutFeatures(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceUniformBufferStandardLayoutFeatures* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceUniformBufferStandardLayoutFeatures(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceUniformBufferStandardLayoutFeatures* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkAttachmentReferenceStencilLayout(
+    ResourceTracker* resourceTracker,
+    VkAttachmentReferenceStencilLayout* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkAttachmentReferenceStencilLayout(
+    ResourceTracker* resourceTracker,
+    VkAttachmentReferenceStencilLayout* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkAttachmentDescriptionStencilLayout(
+    ResourceTracker* resourceTracker,
+    VkAttachmentDescriptionStencilLayout* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkAttachmentDescriptionStencilLayout(
+    ResourceTracker* resourceTracker,
+    VkAttachmentDescriptionStencilLayout* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPhysicalDeviceHostQueryResetFeatures(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceHostQueryResetFeatures* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceHostQueryResetFeatures(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceHostQueryResetFeatures* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPhysicalDeviceTimelineSemaphoreFeatures(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceTimelineSemaphoreFeatures* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceTimelineSemaphoreFeatures(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceTimelineSemaphoreFeatures* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPhysicalDeviceTimelineSemaphoreProperties(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceTimelineSemaphoreProperties* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceTimelineSemaphoreProperties(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceTimelineSemaphoreProperties* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkSemaphoreTypeCreateInfo(
+    ResourceTracker* resourceTracker,
+    VkSemaphoreTypeCreateInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkSemaphoreTypeCreateInfo(
+    ResourceTracker* resourceTracker,
+    VkSemaphoreTypeCreateInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkTimelineSemaphoreSubmitInfo(
+    ResourceTracker* resourceTracker,
+    VkTimelineSemaphoreSubmitInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkTimelineSemaphoreSubmitInfo(
+    ResourceTracker* resourceTracker,
+    VkTimelineSemaphoreSubmitInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkSemaphoreWaitInfo(
+    ResourceTracker* resourceTracker,
+    VkSemaphoreWaitInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkSemaphoreWaitInfo(
+    ResourceTracker* resourceTracker,
+    VkSemaphoreWaitInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkSemaphoreSignalInfo(
+    ResourceTracker* resourceTracker,
+    VkSemaphoreSignalInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkSemaphoreSignalInfo(
+    ResourceTracker* resourceTracker,
+    VkSemaphoreSignalInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPhysicalDeviceBufferDeviceAddressFeatures(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceBufferDeviceAddressFeatures* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceBufferDeviceAddressFeatures(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceBufferDeviceAddressFeatures* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkBufferDeviceAddressInfo(
+    ResourceTracker* resourceTracker,
+    VkBufferDeviceAddressInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkBufferDeviceAddressInfo(
+    ResourceTracker* resourceTracker,
+    VkBufferDeviceAddressInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkBufferOpaqueCaptureAddressCreateInfo(
+    ResourceTracker* resourceTracker,
+    VkBufferOpaqueCaptureAddressCreateInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkBufferOpaqueCaptureAddressCreateInfo(
+    ResourceTracker* resourceTracker,
+    VkBufferOpaqueCaptureAddressCreateInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkMemoryOpaqueCaptureAddressAllocateInfo(
+    ResourceTracker* resourceTracker,
+    VkMemoryOpaqueCaptureAddressAllocateInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkMemoryOpaqueCaptureAddressAllocateInfo(
+    ResourceTracker* resourceTracker,
+    VkMemoryOpaqueCaptureAddressAllocateInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkDeviceMemoryOpaqueCaptureAddressInfo(
+    ResourceTracker* resourceTracker,
+    VkDeviceMemoryOpaqueCaptureAddressInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    resourceTracker->deviceMemoryTransform_tohost((VkDeviceMemory*)&toTransform->memory, 1, (VkDeviceSize*)nullptr, 0, (VkDeviceSize*)nullptr, 0, (uint32_t*)nullptr, 0, (uint32_t*)nullptr, 0);
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkDeviceMemoryOpaqueCaptureAddressInfo(
+    ResourceTracker* resourceTracker,
+    VkDeviceMemoryOpaqueCaptureAddressInfo* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    resourceTracker->deviceMemoryTransform_fromhost((VkDeviceMemory*)&toTransform->memory, 1, (VkDeviceSize*)nullptr, 0, (VkDeviceSize*)nullptr, 0, (uint32_t*)nullptr, 0, (uint32_t*)nullptr, 0);
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
 #ifdef VK_KHR_surface
 void transform_tohost_VkSurfaceCapabilitiesKHR(
     ResourceTracker* resourceTracker,
@@ -4708,26 +6268,6 @@
 
 #endif
 #ifdef VK_KHR_display
-void transform_tohost_VkDisplayPropertiesKHR(
-    ResourceTracker* resourceTracker,
-    VkDisplayPropertiesKHR* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    transform_tohost_VkExtent2D(resourceTracker, (VkExtent2D*)(&toTransform->physicalDimensions));
-    transform_tohost_VkExtent2D(resourceTracker, (VkExtent2D*)(&toTransform->physicalResolution));
-}
-
-void transform_fromhost_VkDisplayPropertiesKHR(
-    ResourceTracker* resourceTracker,
-    VkDisplayPropertiesKHR* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    transform_fromhost_VkExtent2D(resourceTracker, (VkExtent2D*)(&toTransform->physicalDimensions));
-    transform_fromhost_VkExtent2D(resourceTracker, (VkExtent2D*)(&toTransform->physicalResolution));
-}
-
 void transform_tohost_VkDisplayModeParametersKHR(
     ResourceTracker* resourceTracker,
     VkDisplayModeParametersKHR* toTransform)
@@ -4746,24 +6286,6 @@
     transform_fromhost_VkExtent2D(resourceTracker, (VkExtent2D*)(&toTransform->visibleRegion));
 }
 
-void transform_tohost_VkDisplayModePropertiesKHR(
-    ResourceTracker* resourceTracker,
-    VkDisplayModePropertiesKHR* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    transform_tohost_VkDisplayModeParametersKHR(resourceTracker, (VkDisplayModeParametersKHR*)(&toTransform->parameters));
-}
-
-void transform_fromhost_VkDisplayModePropertiesKHR(
-    ResourceTracker* resourceTracker,
-    VkDisplayModePropertiesKHR* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    transform_fromhost_VkDisplayModeParametersKHR(resourceTracker, (VkDisplayModeParametersKHR*)(&toTransform->parameters));
-}
-
 void transform_tohost_VkDisplayModeCreateInfoKHR(
     ResourceTracker* resourceTracker,
     VkDisplayModeCreateInfoKHR* toTransform)
@@ -4790,6 +6312,24 @@
     transform_fromhost_VkDisplayModeParametersKHR(resourceTracker, (VkDisplayModeParametersKHR*)(&toTransform->parameters));
 }
 
+void transform_tohost_VkDisplayModePropertiesKHR(
+    ResourceTracker* resourceTracker,
+    VkDisplayModePropertiesKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    transform_tohost_VkDisplayModeParametersKHR(resourceTracker, (VkDisplayModeParametersKHR*)(&toTransform->parameters));
+}
+
+void transform_fromhost_VkDisplayModePropertiesKHR(
+    ResourceTracker* resourceTracker,
+    VkDisplayModePropertiesKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    transform_fromhost_VkDisplayModeParametersKHR(resourceTracker, (VkDisplayModeParametersKHR*)(&toTransform->parameters));
+}
+
 void transform_tohost_VkDisplayPlaneCapabilitiesKHR(
     ResourceTracker* resourceTracker,
     VkDisplayPlaneCapabilitiesKHR* toTransform)
@@ -4838,6 +6378,26 @@
     (void)toTransform;
 }
 
+void transform_tohost_VkDisplayPropertiesKHR(
+    ResourceTracker* resourceTracker,
+    VkDisplayPropertiesKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    transform_tohost_VkExtent2D(resourceTracker, (VkExtent2D*)(&toTransform->physicalDimensions));
+    transform_tohost_VkExtent2D(resourceTracker, (VkExtent2D*)(&toTransform->physicalResolution));
+}
+
+void transform_fromhost_VkDisplayPropertiesKHR(
+    ResourceTracker* resourceTracker,
+    VkDisplayPropertiesKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    transform_fromhost_VkExtent2D(resourceTracker, (VkExtent2D*)(&toTransform->physicalDimensions));
+    transform_fromhost_VkExtent2D(resourceTracker, (VkExtent2D*)(&toTransform->physicalResolution));
+}
+
 void transform_tohost_VkDisplaySurfaceCreateInfoKHR(
     ResourceTracker* resourceTracker,
     VkDisplaySurfaceCreateInfoKHR* toTransform)
@@ -4973,32 +6533,6 @@
 }
 
 #endif
-#ifdef VK_KHR_mir_surface
-void transform_tohost_VkMirSurfaceCreateInfoKHR(
-    ResourceTracker* resourceTracker,
-    VkMirSurfaceCreateInfoKHR* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-}
-
-void transform_fromhost_VkMirSurfaceCreateInfoKHR(
-    ResourceTracker* resourceTracker,
-    VkMirSurfaceCreateInfoKHR* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-}
-
-#endif
 #ifdef VK_KHR_android_surface
 void transform_tohost_VkAndroidSurfaceCreateInfoKHR(
     ResourceTracker* resourceTracker,
@@ -5455,6 +6989,8 @@
 }
 
 #endif
+#ifdef VK_KHR_shader_float16_int8
+#endif
 #ifdef VK_KHR_16bit_storage
 #endif
 #ifdef VK_KHR_incremental_present
@@ -5484,11 +7020,14 @@
 {
     (void)resourceTracker;
     (void)toTransform;
-    if (toTransform->pRectangles)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->rectangleCount; ++i)
+        if (toTransform->pRectangles)
         {
-            transform_tohost_VkRectLayerKHR(resourceTracker, (VkRectLayerKHR*)(toTransform->pRectangles + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->rectangleCount; ++i)
+            {
+                transform_tohost_VkRectLayerKHR(resourceTracker, (VkRectLayerKHR*)(toTransform->pRectangles + i));
+            }
         }
     }
 }
@@ -5499,11 +7038,14 @@
 {
     (void)resourceTracker;
     (void)toTransform;
-    if (toTransform->pRectangles)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->rectangleCount; ++i)
+        if (toTransform->pRectangles)
         {
-            transform_fromhost_VkRectLayerKHR(resourceTracker, (VkRectLayerKHR*)(toTransform->pRectangles + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->rectangleCount; ++i)
+            {
+                transform_fromhost_VkRectLayerKHR(resourceTracker, (VkRectLayerKHR*)(toTransform->pRectangles + i));
+            }
         }
     }
 }
@@ -5518,11 +7060,14 @@
     {
         transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
     }
-    if (toTransform->pRegions)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->swapchainCount; ++i)
+        if (toTransform->pRegions)
         {
-            transform_tohost_VkPresentRegionKHR(resourceTracker, (VkPresentRegionKHR*)(toTransform->pRegions + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->swapchainCount; ++i)
+            {
+                transform_tohost_VkPresentRegionKHR(resourceTracker, (VkPresentRegionKHR*)(toTransform->pRegions + i));
+            }
         }
     }
 }
@@ -5537,11 +7082,14 @@
     {
         transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
     }
-    if (toTransform->pRegions)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->swapchainCount; ++i)
+        if (toTransform->pRegions)
         {
-            transform_fromhost_VkPresentRegionKHR(resourceTracker, (VkPresentRegionKHR*)(toTransform->pRegions + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->swapchainCount; ++i)
+            {
+                transform_fromhost_VkPresentRegionKHR(resourceTracker, (VkPresentRegionKHR*)(toTransform->pRegions + i));
+            }
         }
     }
 }
@@ -5549,267 +7097,9 @@
 #endif
 #ifdef VK_KHR_descriptor_update_template
 #endif
+#ifdef VK_KHR_imageless_framebuffer
+#endif
 #ifdef VK_KHR_create_renderpass2
-void transform_tohost_VkAttachmentDescription2KHR(
-    ResourceTracker* resourceTracker,
-    VkAttachmentDescription2KHR* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-}
-
-void transform_fromhost_VkAttachmentDescription2KHR(
-    ResourceTracker* resourceTracker,
-    VkAttachmentDescription2KHR* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-}
-
-void transform_tohost_VkAttachmentReference2KHR(
-    ResourceTracker* resourceTracker,
-    VkAttachmentReference2KHR* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-}
-
-void transform_fromhost_VkAttachmentReference2KHR(
-    ResourceTracker* resourceTracker,
-    VkAttachmentReference2KHR* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-}
-
-void transform_tohost_VkSubpassDescription2KHR(
-    ResourceTracker* resourceTracker,
-    VkSubpassDescription2KHR* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-    if (toTransform->pInputAttachments)
-    {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->inputAttachmentCount; ++i)
-        {
-            transform_tohost_VkAttachmentReference2KHR(resourceTracker, (VkAttachmentReference2KHR*)(toTransform->pInputAttachments + i));
-        }
-    }
-    if (toTransform->pColorAttachments)
-    {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->colorAttachmentCount; ++i)
-        {
-            transform_tohost_VkAttachmentReference2KHR(resourceTracker, (VkAttachmentReference2KHR*)(toTransform->pColorAttachments + i));
-        }
-    }
-    if (toTransform->pResolveAttachments)
-    {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->colorAttachmentCount; ++i)
-        {
-            transform_tohost_VkAttachmentReference2KHR(resourceTracker, (VkAttachmentReference2KHR*)(toTransform->pResolveAttachments + i));
-        }
-    }
-    if (toTransform->pDepthStencilAttachment)
-    {
-        transform_tohost_VkAttachmentReference2KHR(resourceTracker, (VkAttachmentReference2KHR*)(toTransform->pDepthStencilAttachment));
-    }
-}
-
-void transform_fromhost_VkSubpassDescription2KHR(
-    ResourceTracker* resourceTracker,
-    VkSubpassDescription2KHR* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-    if (toTransform->pInputAttachments)
-    {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->inputAttachmentCount; ++i)
-        {
-            transform_fromhost_VkAttachmentReference2KHR(resourceTracker, (VkAttachmentReference2KHR*)(toTransform->pInputAttachments + i));
-        }
-    }
-    if (toTransform->pColorAttachments)
-    {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->colorAttachmentCount; ++i)
-        {
-            transform_fromhost_VkAttachmentReference2KHR(resourceTracker, (VkAttachmentReference2KHR*)(toTransform->pColorAttachments + i));
-        }
-    }
-    if (toTransform->pResolveAttachments)
-    {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->colorAttachmentCount; ++i)
-        {
-            transform_fromhost_VkAttachmentReference2KHR(resourceTracker, (VkAttachmentReference2KHR*)(toTransform->pResolveAttachments + i));
-        }
-    }
-    if (toTransform->pDepthStencilAttachment)
-    {
-        transform_fromhost_VkAttachmentReference2KHR(resourceTracker, (VkAttachmentReference2KHR*)(toTransform->pDepthStencilAttachment));
-    }
-}
-
-void transform_tohost_VkSubpassDependency2KHR(
-    ResourceTracker* resourceTracker,
-    VkSubpassDependency2KHR* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-}
-
-void transform_fromhost_VkSubpassDependency2KHR(
-    ResourceTracker* resourceTracker,
-    VkSubpassDependency2KHR* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-}
-
-void transform_tohost_VkRenderPassCreateInfo2KHR(
-    ResourceTracker* resourceTracker,
-    VkRenderPassCreateInfo2KHR* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-    if (toTransform->pAttachments)
-    {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->attachmentCount; ++i)
-        {
-            transform_tohost_VkAttachmentDescription2KHR(resourceTracker, (VkAttachmentDescription2KHR*)(toTransform->pAttachments + i));
-        }
-    }
-    if (toTransform->pSubpasses)
-    {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->subpassCount; ++i)
-        {
-            transform_tohost_VkSubpassDescription2KHR(resourceTracker, (VkSubpassDescription2KHR*)(toTransform->pSubpasses + i));
-        }
-    }
-    if (toTransform->pDependencies)
-    {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->dependencyCount; ++i)
-        {
-            transform_tohost_VkSubpassDependency2KHR(resourceTracker, (VkSubpassDependency2KHR*)(toTransform->pDependencies + i));
-        }
-    }
-}
-
-void transform_fromhost_VkRenderPassCreateInfo2KHR(
-    ResourceTracker* resourceTracker,
-    VkRenderPassCreateInfo2KHR* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-    if (toTransform->pAttachments)
-    {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->attachmentCount; ++i)
-        {
-            transform_fromhost_VkAttachmentDescription2KHR(resourceTracker, (VkAttachmentDescription2KHR*)(toTransform->pAttachments + i));
-        }
-    }
-    if (toTransform->pSubpasses)
-    {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->subpassCount; ++i)
-        {
-            transform_fromhost_VkSubpassDescription2KHR(resourceTracker, (VkSubpassDescription2KHR*)(toTransform->pSubpasses + i));
-        }
-    }
-    if (toTransform->pDependencies)
-    {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->dependencyCount; ++i)
-        {
-            transform_fromhost_VkSubpassDependency2KHR(resourceTracker, (VkSubpassDependency2KHR*)(toTransform->pDependencies + i));
-        }
-    }
-}
-
-void transform_tohost_VkSubpassBeginInfoKHR(
-    ResourceTracker* resourceTracker,
-    VkSubpassBeginInfoKHR* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-}
-
-void transform_fromhost_VkSubpassBeginInfoKHR(
-    ResourceTracker* resourceTracker,
-    VkSubpassBeginInfoKHR* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-}
-
-void transform_tohost_VkSubpassEndInfoKHR(
-    ResourceTracker* resourceTracker,
-    VkSubpassEndInfoKHR* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-}
-
-void transform_fromhost_VkSubpassEndInfoKHR(
-    ResourceTracker* resourceTracker,
-    VkSubpassEndInfoKHR* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-}
-
 #endif
 #ifdef VK_KHR_shared_presentable_image
 void transform_tohost_VkSharedPresentSurfaceCapabilitiesKHR(
@@ -5965,6 +7255,192 @@
 }
 
 #endif
+#ifdef VK_KHR_performance_query
+void transform_tohost_VkPhysicalDevicePerformanceQueryFeaturesKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDevicePerformanceQueryFeaturesKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDevicePerformanceQueryFeaturesKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDevicePerformanceQueryFeaturesKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPhysicalDevicePerformanceQueryPropertiesKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDevicePerformanceQueryPropertiesKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDevicePerformanceQueryPropertiesKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDevicePerformanceQueryPropertiesKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPerformanceCounterKHR(
+    ResourceTracker* resourceTracker,
+    VkPerformanceCounterKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPerformanceCounterKHR(
+    ResourceTracker* resourceTracker,
+    VkPerformanceCounterKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPerformanceCounterDescriptionKHR(
+    ResourceTracker* resourceTracker,
+    VkPerformanceCounterDescriptionKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPerformanceCounterDescriptionKHR(
+    ResourceTracker* resourceTracker,
+    VkPerformanceCounterDescriptionKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkQueryPoolPerformanceCreateInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkQueryPoolPerformanceCreateInfoKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkQueryPoolPerformanceCreateInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkQueryPoolPerformanceCreateInfoKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPerformanceCounterResultKHR(
+    ResourceTracker* resourceTracker,
+    VkPerformanceCounterResultKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_fromhost_VkPerformanceCounterResultKHR(
+    ResourceTracker* resourceTracker,
+    VkPerformanceCounterResultKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_tohost_VkAcquireProfilingLockInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkAcquireProfilingLockInfoKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkAcquireProfilingLockInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkAcquireProfilingLockInfoKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPerformanceQuerySubmitInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkPerformanceQuerySubmitInfoKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPerformanceQuerySubmitInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkPerformanceQuerySubmitInfoKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
 #ifdef VK_KHR_maintenance2
 #endif
 #ifdef VK_KHR_get_surface_capabilities2
@@ -6186,43 +7662,15 @@
 #ifdef VK_KHR_get_memory_requirements2
 #endif
 #ifdef VK_KHR_image_format_list
-void transform_tohost_VkImageFormatListCreateInfoKHR(
-    ResourceTracker* resourceTracker,
-    VkImageFormatListCreateInfoKHR* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-}
-
-void transform_fromhost_VkImageFormatListCreateInfoKHR(
-    ResourceTracker* resourceTracker,
-    VkImageFormatListCreateInfoKHR* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-}
-
 #endif
 #ifdef VK_KHR_sampler_ycbcr_conversion
 #endif
 #ifdef VK_KHR_bind_memory2
 #endif
-#ifdef VK_KHR_maintenance3
-#endif
-#ifdef VK_KHR_draw_indirect_count
-#endif
-#ifdef VK_KHR_8bit_storage
-void transform_tohost_VkPhysicalDevice8BitStorageFeaturesKHR(
+#ifdef VK_KHR_portability_subset
+void transform_tohost_VkPhysicalDevicePortabilitySubsetFeaturesKHR(
     ResourceTracker* resourceTracker,
-    VkPhysicalDevice8BitStorageFeaturesKHR* toTransform)
+    VkPhysicalDevicePortabilitySubsetFeaturesKHR* toTransform)
 {
     (void)resourceTracker;
     (void)toTransform;
@@ -6232,9 +7680,9 @@
     }
 }
 
-void transform_fromhost_VkPhysicalDevice8BitStorageFeaturesKHR(
+void transform_fromhost_VkPhysicalDevicePortabilitySubsetFeaturesKHR(
     ResourceTracker* resourceTracker,
-    VkPhysicalDevice8BitStorageFeaturesKHR* toTransform)
+    VkPhysicalDevicePortabilitySubsetFeaturesKHR* toTransform)
 {
     (void)resourceTracker;
     (void)toTransform;
@@ -6244,6 +7692,906 @@
     }
 }
 
+void transform_tohost_VkPhysicalDevicePortabilitySubsetPropertiesKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDevicePortabilitySubsetPropertiesKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDevicePortabilitySubsetPropertiesKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDevicePortabilitySubsetPropertiesKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_KHR_maintenance3
+#endif
+#ifdef VK_KHR_draw_indirect_count
+#endif
+#ifdef VK_KHR_shader_subgroup_extended_types
+#endif
+#ifdef VK_KHR_8bit_storage
+#endif
+#ifdef VK_KHR_shader_atomic_int64
+#endif
+#ifdef VK_KHR_shader_clock
+void transform_tohost_VkPhysicalDeviceShaderClockFeaturesKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShaderClockFeaturesKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceShaderClockFeaturesKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShaderClockFeaturesKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_KHR_driver_properties
+#endif
+#ifdef VK_KHR_shader_float_controls
+#endif
+#ifdef VK_KHR_depth_stencil_resolve
+#endif
+#ifdef VK_KHR_swapchain_mutable_format
+#endif
+#ifdef VK_KHR_timeline_semaphore
+#endif
+#ifdef VK_KHR_vulkan_memory_model
+#endif
+#ifdef VK_KHR_shader_terminate_invocation
+void transform_tohost_VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_KHR_fragment_shading_rate
+void transform_tohost_VkFragmentShadingRateAttachmentInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkFragmentShadingRateAttachmentInfoKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform->pFragmentShadingRateAttachment)
+    {
+        transform_tohost_VkAttachmentReference2(resourceTracker, (VkAttachmentReference2*)(toTransform->pFragmentShadingRateAttachment));
+    }
+    transform_tohost_VkExtent2D(resourceTracker, (VkExtent2D*)(&toTransform->shadingRateAttachmentTexelSize));
+}
+
+void transform_fromhost_VkFragmentShadingRateAttachmentInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkFragmentShadingRateAttachmentInfoKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform->pFragmentShadingRateAttachment)
+    {
+        transform_fromhost_VkAttachmentReference2(resourceTracker, (VkAttachmentReference2*)(toTransform->pFragmentShadingRateAttachment));
+    }
+    transform_fromhost_VkExtent2D(resourceTracker, (VkExtent2D*)(&toTransform->shadingRateAttachmentTexelSize));
+}
+
+void transform_tohost_VkPipelineFragmentShadingRateStateCreateInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkPipelineFragmentShadingRateStateCreateInfoKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    transform_tohost_VkExtent2D(resourceTracker, (VkExtent2D*)(&toTransform->fragmentSize));
+}
+
+void transform_fromhost_VkPipelineFragmentShadingRateStateCreateInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkPipelineFragmentShadingRateStateCreateInfoKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    transform_fromhost_VkExtent2D(resourceTracker, (VkExtent2D*)(&toTransform->fragmentSize));
+}
+
+void transform_tohost_VkPhysicalDeviceFragmentShadingRateFeaturesKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceFragmentShadingRateFeaturesKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceFragmentShadingRateFeaturesKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceFragmentShadingRateFeaturesKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPhysicalDeviceFragmentShadingRatePropertiesKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceFragmentShadingRatePropertiesKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    transform_tohost_VkExtent2D(resourceTracker, (VkExtent2D*)(&toTransform->minFragmentShadingRateAttachmentTexelSize));
+    transform_tohost_VkExtent2D(resourceTracker, (VkExtent2D*)(&toTransform->maxFragmentShadingRateAttachmentTexelSize));
+    transform_tohost_VkExtent2D(resourceTracker, (VkExtent2D*)(&toTransform->maxFragmentSize));
+}
+
+void transform_fromhost_VkPhysicalDeviceFragmentShadingRatePropertiesKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceFragmentShadingRatePropertiesKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    transform_fromhost_VkExtent2D(resourceTracker, (VkExtent2D*)(&toTransform->minFragmentShadingRateAttachmentTexelSize));
+    transform_fromhost_VkExtent2D(resourceTracker, (VkExtent2D*)(&toTransform->maxFragmentShadingRateAttachmentTexelSize));
+    transform_fromhost_VkExtent2D(resourceTracker, (VkExtent2D*)(&toTransform->maxFragmentSize));
+}
+
+void transform_tohost_VkPhysicalDeviceFragmentShadingRateKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceFragmentShadingRateKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    transform_tohost_VkExtent2D(resourceTracker, (VkExtent2D*)(&toTransform->fragmentSize));
+}
+
+void transform_fromhost_VkPhysicalDeviceFragmentShadingRateKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceFragmentShadingRateKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    transform_fromhost_VkExtent2D(resourceTracker, (VkExtent2D*)(&toTransform->fragmentSize));
+}
+
+#endif
+#ifdef VK_KHR_spirv_1_4
+#endif
+#ifdef VK_KHR_surface_protected_capabilities
+void transform_tohost_VkSurfaceProtectedCapabilitiesKHR(
+    ResourceTracker* resourceTracker,
+    VkSurfaceProtectedCapabilitiesKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkSurfaceProtectedCapabilitiesKHR(
+    ResourceTracker* resourceTracker,
+    VkSurfaceProtectedCapabilitiesKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_KHR_separate_depth_stencil_layouts
+#endif
+#ifdef VK_KHR_uniform_buffer_standard_layout
+#endif
+#ifdef VK_KHR_buffer_device_address
+#endif
+#ifdef VK_KHR_deferred_host_operations
+#endif
+#ifdef VK_KHR_pipeline_executable_properties
+void transform_tohost_VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPipelineInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkPipelineInfoKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPipelineInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkPipelineInfoKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPipelineExecutablePropertiesKHR(
+    ResourceTracker* resourceTracker,
+    VkPipelineExecutablePropertiesKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPipelineExecutablePropertiesKHR(
+    ResourceTracker* resourceTracker,
+    VkPipelineExecutablePropertiesKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPipelineExecutableInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkPipelineExecutableInfoKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPipelineExecutableInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkPipelineExecutableInfoKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPipelineExecutableStatisticValueKHR(
+    ResourceTracker* resourceTracker,
+    VkPipelineExecutableStatisticValueKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_fromhost_VkPipelineExecutableStatisticValueKHR(
+    ResourceTracker* resourceTracker,
+    VkPipelineExecutableStatisticValueKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_tohost_VkPipelineExecutableStatisticKHR(
+    ResourceTracker* resourceTracker,
+    VkPipelineExecutableStatisticKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    transform_tohost_VkPipelineExecutableStatisticValueKHR(resourceTracker, (VkPipelineExecutableStatisticValueKHR*)(&toTransform->value));
+}
+
+void transform_fromhost_VkPipelineExecutableStatisticKHR(
+    ResourceTracker* resourceTracker,
+    VkPipelineExecutableStatisticKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    transform_fromhost_VkPipelineExecutableStatisticValueKHR(resourceTracker, (VkPipelineExecutableStatisticValueKHR*)(&toTransform->value));
+}
+
+void transform_tohost_VkPipelineExecutableInternalRepresentationKHR(
+    ResourceTracker* resourceTracker,
+    VkPipelineExecutableInternalRepresentationKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPipelineExecutableInternalRepresentationKHR(
+    ResourceTracker* resourceTracker,
+    VkPipelineExecutableInternalRepresentationKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_KHR_pipeline_library
+void transform_tohost_VkPipelineLibraryCreateInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkPipelineLibraryCreateInfoKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPipelineLibraryCreateInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkPipelineLibraryCreateInfoKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_KHR_shader_non_semantic_info
+#endif
+#ifdef VK_KHR_copy_commands2
+void transform_tohost_VkBufferCopy2KHR(
+    ResourceTracker* resourceTracker,
+    VkBufferCopy2KHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkBufferCopy2KHR(
+    ResourceTracker* resourceTracker,
+    VkBufferCopy2KHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkCopyBufferInfo2KHR(
+    ResourceTracker* resourceTracker,
+    VkCopyBufferInfo2KHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform)
+    {
+        if (toTransform->pRegions)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->regionCount; ++i)
+            {
+                transform_tohost_VkBufferCopy2KHR(resourceTracker, (VkBufferCopy2KHR*)(toTransform->pRegions + i));
+            }
+        }
+    }
+}
+
+void transform_fromhost_VkCopyBufferInfo2KHR(
+    ResourceTracker* resourceTracker,
+    VkCopyBufferInfo2KHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform)
+    {
+        if (toTransform->pRegions)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->regionCount; ++i)
+            {
+                transform_fromhost_VkBufferCopy2KHR(resourceTracker, (VkBufferCopy2KHR*)(toTransform->pRegions + i));
+            }
+        }
+    }
+}
+
+void transform_tohost_VkImageCopy2KHR(
+    ResourceTracker* resourceTracker,
+    VkImageCopy2KHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    transform_tohost_VkImageSubresourceLayers(resourceTracker, (VkImageSubresourceLayers*)(&toTransform->srcSubresource));
+    transform_tohost_VkOffset3D(resourceTracker, (VkOffset3D*)(&toTransform->srcOffset));
+    transform_tohost_VkImageSubresourceLayers(resourceTracker, (VkImageSubresourceLayers*)(&toTransform->dstSubresource));
+    transform_tohost_VkOffset3D(resourceTracker, (VkOffset3D*)(&toTransform->dstOffset));
+    transform_tohost_VkExtent3D(resourceTracker, (VkExtent3D*)(&toTransform->extent));
+}
+
+void transform_fromhost_VkImageCopy2KHR(
+    ResourceTracker* resourceTracker,
+    VkImageCopy2KHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    transform_fromhost_VkImageSubresourceLayers(resourceTracker, (VkImageSubresourceLayers*)(&toTransform->srcSubresource));
+    transform_fromhost_VkOffset3D(resourceTracker, (VkOffset3D*)(&toTransform->srcOffset));
+    transform_fromhost_VkImageSubresourceLayers(resourceTracker, (VkImageSubresourceLayers*)(&toTransform->dstSubresource));
+    transform_fromhost_VkOffset3D(resourceTracker, (VkOffset3D*)(&toTransform->dstOffset));
+    transform_fromhost_VkExtent3D(resourceTracker, (VkExtent3D*)(&toTransform->extent));
+}
+
+void transform_tohost_VkCopyImageInfo2KHR(
+    ResourceTracker* resourceTracker,
+    VkCopyImageInfo2KHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform)
+    {
+        if (toTransform->pRegions)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->regionCount; ++i)
+            {
+                transform_tohost_VkImageCopy2KHR(resourceTracker, (VkImageCopy2KHR*)(toTransform->pRegions + i));
+            }
+        }
+    }
+}
+
+void transform_fromhost_VkCopyImageInfo2KHR(
+    ResourceTracker* resourceTracker,
+    VkCopyImageInfo2KHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform)
+    {
+        if (toTransform->pRegions)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->regionCount; ++i)
+            {
+                transform_fromhost_VkImageCopy2KHR(resourceTracker, (VkImageCopy2KHR*)(toTransform->pRegions + i));
+            }
+        }
+    }
+}
+
+void transform_tohost_VkBufferImageCopy2KHR(
+    ResourceTracker* resourceTracker,
+    VkBufferImageCopy2KHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    transform_tohost_VkImageSubresourceLayers(resourceTracker, (VkImageSubresourceLayers*)(&toTransform->imageSubresource));
+    transform_tohost_VkOffset3D(resourceTracker, (VkOffset3D*)(&toTransform->imageOffset));
+    transform_tohost_VkExtent3D(resourceTracker, (VkExtent3D*)(&toTransform->imageExtent));
+}
+
+void transform_fromhost_VkBufferImageCopy2KHR(
+    ResourceTracker* resourceTracker,
+    VkBufferImageCopy2KHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    transform_fromhost_VkImageSubresourceLayers(resourceTracker, (VkImageSubresourceLayers*)(&toTransform->imageSubresource));
+    transform_fromhost_VkOffset3D(resourceTracker, (VkOffset3D*)(&toTransform->imageOffset));
+    transform_fromhost_VkExtent3D(resourceTracker, (VkExtent3D*)(&toTransform->imageExtent));
+}
+
+void transform_tohost_VkCopyBufferToImageInfo2KHR(
+    ResourceTracker* resourceTracker,
+    VkCopyBufferToImageInfo2KHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform)
+    {
+        if (toTransform->pRegions)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->regionCount; ++i)
+            {
+                transform_tohost_VkBufferImageCopy2KHR(resourceTracker, (VkBufferImageCopy2KHR*)(toTransform->pRegions + i));
+            }
+        }
+    }
+}
+
+void transform_fromhost_VkCopyBufferToImageInfo2KHR(
+    ResourceTracker* resourceTracker,
+    VkCopyBufferToImageInfo2KHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform)
+    {
+        if (toTransform->pRegions)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->regionCount; ++i)
+            {
+                transform_fromhost_VkBufferImageCopy2KHR(resourceTracker, (VkBufferImageCopy2KHR*)(toTransform->pRegions + i));
+            }
+        }
+    }
+}
+
+void transform_tohost_VkCopyImageToBufferInfo2KHR(
+    ResourceTracker* resourceTracker,
+    VkCopyImageToBufferInfo2KHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform)
+    {
+        if (toTransform->pRegions)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->regionCount; ++i)
+            {
+                transform_tohost_VkBufferImageCopy2KHR(resourceTracker, (VkBufferImageCopy2KHR*)(toTransform->pRegions + i));
+            }
+        }
+    }
+}
+
+void transform_fromhost_VkCopyImageToBufferInfo2KHR(
+    ResourceTracker* resourceTracker,
+    VkCopyImageToBufferInfo2KHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform)
+    {
+        if (toTransform->pRegions)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->regionCount; ++i)
+            {
+                transform_fromhost_VkBufferImageCopy2KHR(resourceTracker, (VkBufferImageCopy2KHR*)(toTransform->pRegions + i));
+            }
+        }
+    }
+}
+
+void transform_tohost_VkImageBlit2KHR(
+    ResourceTracker* resourceTracker,
+    VkImageBlit2KHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    transform_tohost_VkImageSubresourceLayers(resourceTracker, (VkImageSubresourceLayers*)(&toTransform->srcSubresource));
+    for (uint32_t i = 0; i < (uint32_t)2; ++i)
+    {
+        transform_tohost_VkOffset3D(resourceTracker, (VkOffset3D*)(toTransform->srcOffsets + i));
+    }
+    transform_tohost_VkImageSubresourceLayers(resourceTracker, (VkImageSubresourceLayers*)(&toTransform->dstSubresource));
+    for (uint32_t i = 0; i < (uint32_t)2; ++i)
+    {
+        transform_tohost_VkOffset3D(resourceTracker, (VkOffset3D*)(toTransform->dstOffsets + i));
+    }
+}
+
+void transform_fromhost_VkImageBlit2KHR(
+    ResourceTracker* resourceTracker,
+    VkImageBlit2KHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    transform_fromhost_VkImageSubresourceLayers(resourceTracker, (VkImageSubresourceLayers*)(&toTransform->srcSubresource));
+    for (uint32_t i = 0; i < (uint32_t)2; ++i)
+    {
+        transform_fromhost_VkOffset3D(resourceTracker, (VkOffset3D*)(toTransform->srcOffsets + i));
+    }
+    transform_fromhost_VkImageSubresourceLayers(resourceTracker, (VkImageSubresourceLayers*)(&toTransform->dstSubresource));
+    for (uint32_t i = 0; i < (uint32_t)2; ++i)
+    {
+        transform_fromhost_VkOffset3D(resourceTracker, (VkOffset3D*)(toTransform->dstOffsets + i));
+    }
+}
+
+void transform_tohost_VkBlitImageInfo2KHR(
+    ResourceTracker* resourceTracker,
+    VkBlitImageInfo2KHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform)
+    {
+        if (toTransform->pRegions)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->regionCount; ++i)
+            {
+                transform_tohost_VkImageBlit2KHR(resourceTracker, (VkImageBlit2KHR*)(toTransform->pRegions + i));
+            }
+        }
+    }
+}
+
+void transform_fromhost_VkBlitImageInfo2KHR(
+    ResourceTracker* resourceTracker,
+    VkBlitImageInfo2KHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform)
+    {
+        if (toTransform->pRegions)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->regionCount; ++i)
+            {
+                transform_fromhost_VkImageBlit2KHR(resourceTracker, (VkImageBlit2KHR*)(toTransform->pRegions + i));
+            }
+        }
+    }
+}
+
+void transform_tohost_VkImageResolve2KHR(
+    ResourceTracker* resourceTracker,
+    VkImageResolve2KHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    transform_tohost_VkImageSubresourceLayers(resourceTracker, (VkImageSubresourceLayers*)(&toTransform->srcSubresource));
+    transform_tohost_VkOffset3D(resourceTracker, (VkOffset3D*)(&toTransform->srcOffset));
+    transform_tohost_VkImageSubresourceLayers(resourceTracker, (VkImageSubresourceLayers*)(&toTransform->dstSubresource));
+    transform_tohost_VkOffset3D(resourceTracker, (VkOffset3D*)(&toTransform->dstOffset));
+    transform_tohost_VkExtent3D(resourceTracker, (VkExtent3D*)(&toTransform->extent));
+}
+
+void transform_fromhost_VkImageResolve2KHR(
+    ResourceTracker* resourceTracker,
+    VkImageResolve2KHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    transform_fromhost_VkImageSubresourceLayers(resourceTracker, (VkImageSubresourceLayers*)(&toTransform->srcSubresource));
+    transform_fromhost_VkOffset3D(resourceTracker, (VkOffset3D*)(&toTransform->srcOffset));
+    transform_fromhost_VkImageSubresourceLayers(resourceTracker, (VkImageSubresourceLayers*)(&toTransform->dstSubresource));
+    transform_fromhost_VkOffset3D(resourceTracker, (VkOffset3D*)(&toTransform->dstOffset));
+    transform_fromhost_VkExtent3D(resourceTracker, (VkExtent3D*)(&toTransform->extent));
+}
+
+void transform_tohost_VkResolveImageInfo2KHR(
+    ResourceTracker* resourceTracker,
+    VkResolveImageInfo2KHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform)
+    {
+        if (toTransform->pRegions)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->regionCount; ++i)
+            {
+                transform_tohost_VkImageResolve2KHR(resourceTracker, (VkImageResolve2KHR*)(toTransform->pRegions + i));
+            }
+        }
+    }
+}
+
+void transform_fromhost_VkResolveImageInfo2KHR(
+    ResourceTracker* resourceTracker,
+    VkResolveImageInfo2KHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform)
+    {
+        if (toTransform->pRegions)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->regionCount; ++i)
+            {
+                transform_fromhost_VkImageResolve2KHR(resourceTracker, (VkImageResolve2KHR*)(toTransform->pRegions + i));
+            }
+        }
+    }
+}
+
 #endif
 #ifdef VK_ANDROID_native_buffer
 void transform_tohost_VkNativeBufferANDROID(
@@ -6483,6 +8831,130 @@
 }
 
 #endif
+#ifdef VK_EXT_transform_feedback
+void transform_tohost_VkPhysicalDeviceTransformFeedbackFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceTransformFeedbackFeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceTransformFeedbackFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceTransformFeedbackFeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPhysicalDeviceTransformFeedbackPropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceTransformFeedbackPropertiesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceTransformFeedbackPropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceTransformFeedbackPropertiesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPipelineRasterizationStateStreamCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkPipelineRasterizationStateStreamCreateInfoEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPipelineRasterizationStateStreamCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkPipelineRasterizationStateStreamCreateInfoEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_NVX_image_view_handle
+void transform_tohost_VkImageViewHandleInfoNVX(
+    ResourceTracker* resourceTracker,
+    VkImageViewHandleInfoNVX* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkImageViewHandleInfoNVX(
+    ResourceTracker* resourceTracker,
+    VkImageViewHandleInfoNVX* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkImageViewAddressPropertiesNVX(
+    ResourceTracker* resourceTracker,
+    VkImageViewAddressPropertiesNVX* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkImageViewAddressPropertiesNVX(
+    ResourceTracker* resourceTracker,
+    VkImageViewAddressPropertiesNVX* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
 #ifdef VK_AMD_draw_indirect_count
 #endif
 #ifdef VK_AMD_negative_viewport_height
@@ -6555,6 +9027,58 @@
 #endif
 #ifdef VK_AMD_shader_image_load_store_lod
 #endif
+#ifdef VK_GGP_stream_descriptor_surface
+void transform_tohost_VkStreamDescriptorSurfaceCreateInfoGGP(
+    ResourceTracker* resourceTracker,
+    VkStreamDescriptorSurfaceCreateInfoGGP* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkStreamDescriptorSurfaceCreateInfoGGP(
+    ResourceTracker* resourceTracker,
+    VkStreamDescriptorSurfaceCreateInfoGGP* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_NV_corner_sampled_image
+void transform_tohost_VkPhysicalDeviceCornerSampledImageFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceCornerSampledImageFeaturesNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceCornerSampledImageFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceCornerSampledImageFeaturesNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
 #ifdef VK_IMG_format_pvrtc
 #endif
 #ifdef VK_NV_external_memory_capabilities
@@ -6761,6 +9285,82 @@
 #endif
 #ifdef VK_EXT_shader_subgroup_vote
 #endif
+#ifdef VK_EXT_texture_compression_astc_hdr
+void transform_tohost_VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_astc_decode_mode
+void transform_tohost_VkImageViewASTCDecodeModeEXT(
+    ResourceTracker* resourceTracker,
+    VkImageViewASTCDecodeModeEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkImageViewASTCDecodeModeEXT(
+    ResourceTracker* resourceTracker,
+    VkImageViewASTCDecodeModeEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPhysicalDeviceASTCDecodeFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceASTCDecodeFeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceASTCDecodeFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceASTCDecodeFeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
 #ifdef VK_EXT_conditional_rendering
 void transform_tohost_VkConditionalRenderingBeginInfoEXT(
     ResourceTracker* resourceTracker,
@@ -6835,308 +9435,6 @@
 }
 
 #endif
-#ifdef VK_NVX_device_generated_commands
-void transform_tohost_VkDeviceGeneratedCommandsFeaturesNVX(
-    ResourceTracker* resourceTracker,
-    VkDeviceGeneratedCommandsFeaturesNVX* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-}
-
-void transform_fromhost_VkDeviceGeneratedCommandsFeaturesNVX(
-    ResourceTracker* resourceTracker,
-    VkDeviceGeneratedCommandsFeaturesNVX* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-}
-
-void transform_tohost_VkDeviceGeneratedCommandsLimitsNVX(
-    ResourceTracker* resourceTracker,
-    VkDeviceGeneratedCommandsLimitsNVX* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-}
-
-void transform_fromhost_VkDeviceGeneratedCommandsLimitsNVX(
-    ResourceTracker* resourceTracker,
-    VkDeviceGeneratedCommandsLimitsNVX* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-}
-
-void transform_tohost_VkIndirectCommandsTokenNVX(
-    ResourceTracker* resourceTracker,
-    VkIndirectCommandsTokenNVX* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-}
-
-void transform_fromhost_VkIndirectCommandsTokenNVX(
-    ResourceTracker* resourceTracker,
-    VkIndirectCommandsTokenNVX* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-}
-
-void transform_tohost_VkIndirectCommandsLayoutTokenNVX(
-    ResourceTracker* resourceTracker,
-    VkIndirectCommandsLayoutTokenNVX* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-}
-
-void transform_fromhost_VkIndirectCommandsLayoutTokenNVX(
-    ResourceTracker* resourceTracker,
-    VkIndirectCommandsLayoutTokenNVX* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-}
-
-void transform_tohost_VkIndirectCommandsLayoutCreateInfoNVX(
-    ResourceTracker* resourceTracker,
-    VkIndirectCommandsLayoutCreateInfoNVX* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-    if (toTransform->pTokens)
-    {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->tokenCount; ++i)
-        {
-            transform_tohost_VkIndirectCommandsLayoutTokenNVX(resourceTracker, (VkIndirectCommandsLayoutTokenNVX*)(toTransform->pTokens + i));
-        }
-    }
-}
-
-void transform_fromhost_VkIndirectCommandsLayoutCreateInfoNVX(
-    ResourceTracker* resourceTracker,
-    VkIndirectCommandsLayoutCreateInfoNVX* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-    if (toTransform->pTokens)
-    {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->tokenCount; ++i)
-        {
-            transform_fromhost_VkIndirectCommandsLayoutTokenNVX(resourceTracker, (VkIndirectCommandsLayoutTokenNVX*)(toTransform->pTokens + i));
-        }
-    }
-}
-
-void transform_tohost_VkCmdProcessCommandsInfoNVX(
-    ResourceTracker* resourceTracker,
-    VkCmdProcessCommandsInfoNVX* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-    if (toTransform->pIndirectCommandsTokens)
-    {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->indirectCommandsTokenCount; ++i)
-        {
-            transform_tohost_VkIndirectCommandsTokenNVX(resourceTracker, (VkIndirectCommandsTokenNVX*)(toTransform->pIndirectCommandsTokens + i));
-        }
-    }
-}
-
-void transform_fromhost_VkCmdProcessCommandsInfoNVX(
-    ResourceTracker* resourceTracker,
-    VkCmdProcessCommandsInfoNVX* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-    if (toTransform->pIndirectCommandsTokens)
-    {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->indirectCommandsTokenCount; ++i)
-        {
-            transform_fromhost_VkIndirectCommandsTokenNVX(resourceTracker, (VkIndirectCommandsTokenNVX*)(toTransform->pIndirectCommandsTokens + i));
-        }
-    }
-}
-
-void transform_tohost_VkCmdReserveSpaceForCommandsInfoNVX(
-    ResourceTracker* resourceTracker,
-    VkCmdReserveSpaceForCommandsInfoNVX* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-}
-
-void transform_fromhost_VkCmdReserveSpaceForCommandsInfoNVX(
-    ResourceTracker* resourceTracker,
-    VkCmdReserveSpaceForCommandsInfoNVX* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-}
-
-void transform_tohost_VkObjectTableCreateInfoNVX(
-    ResourceTracker* resourceTracker,
-    VkObjectTableCreateInfoNVX* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-}
-
-void transform_fromhost_VkObjectTableCreateInfoNVX(
-    ResourceTracker* resourceTracker,
-    VkObjectTableCreateInfoNVX* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-}
-
-void transform_tohost_VkObjectTableEntryNVX(
-    ResourceTracker* resourceTracker,
-    VkObjectTableEntryNVX* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-}
-
-void transform_fromhost_VkObjectTableEntryNVX(
-    ResourceTracker* resourceTracker,
-    VkObjectTableEntryNVX* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-}
-
-void transform_tohost_VkObjectTablePipelineEntryNVX(
-    ResourceTracker* resourceTracker,
-    VkObjectTablePipelineEntryNVX* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-}
-
-void transform_fromhost_VkObjectTablePipelineEntryNVX(
-    ResourceTracker* resourceTracker,
-    VkObjectTablePipelineEntryNVX* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-}
-
-void transform_tohost_VkObjectTableDescriptorSetEntryNVX(
-    ResourceTracker* resourceTracker,
-    VkObjectTableDescriptorSetEntryNVX* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-}
-
-void transform_fromhost_VkObjectTableDescriptorSetEntryNVX(
-    ResourceTracker* resourceTracker,
-    VkObjectTableDescriptorSetEntryNVX* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-}
-
-void transform_tohost_VkObjectTableVertexBufferEntryNVX(
-    ResourceTracker* resourceTracker,
-    VkObjectTableVertexBufferEntryNVX* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-}
-
-void transform_fromhost_VkObjectTableVertexBufferEntryNVX(
-    ResourceTracker* resourceTracker,
-    VkObjectTableVertexBufferEntryNVX* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-}
-
-void transform_tohost_VkObjectTableIndexBufferEntryNVX(
-    ResourceTracker* resourceTracker,
-    VkObjectTableIndexBufferEntryNVX* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-}
-
-void transform_fromhost_VkObjectTableIndexBufferEntryNVX(
-    ResourceTracker* resourceTracker,
-    VkObjectTableIndexBufferEntryNVX* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-}
-
-void transform_tohost_VkObjectTablePushConstantEntryNVX(
-    ResourceTracker* resourceTracker,
-    VkObjectTablePushConstantEntryNVX* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-}
-
-void transform_fromhost_VkObjectTablePushConstantEntryNVX(
-    ResourceTracker* resourceTracker,
-    VkObjectTablePushConstantEntryNVX* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-}
-
-#endif
 #ifdef VK_NV_clip_space_w_scaling
 void transform_tohost_VkViewportWScalingNV(
     ResourceTracker* resourceTracker,
@@ -7164,11 +9462,14 @@
     {
         transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
     }
-    if (toTransform->pViewportWScalings)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->viewportCount; ++i)
+        if (toTransform->pViewportWScalings)
         {
-            transform_tohost_VkViewportWScalingNV(resourceTracker, (VkViewportWScalingNV*)(toTransform->pViewportWScalings + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->viewportCount; ++i)
+            {
+                transform_tohost_VkViewportWScalingNV(resourceTracker, (VkViewportWScalingNV*)(toTransform->pViewportWScalings + i));
+            }
         }
     }
 }
@@ -7183,11 +9484,14 @@
     {
         transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
     }
-    if (toTransform->pViewportWScalings)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->viewportCount; ++i)
+        if (toTransform->pViewportWScalings)
         {
-            transform_fromhost_VkViewportWScalingNV(resourceTracker, (VkViewportWScalingNV*)(toTransform->pViewportWScalings + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->viewportCount; ++i)
+            {
+                transform_fromhost_VkViewportWScalingNV(resourceTracker, (VkViewportWScalingNV*)(toTransform->pViewportWScalings + i));
+            }
         }
     }
 }
@@ -7386,11 +9690,14 @@
     {
         transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
     }
-    if (toTransform->pTimes)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->swapchainCount; ++i)
+        if (toTransform->pTimes)
         {
-            transform_tohost_VkPresentTimeGOOGLE(resourceTracker, (VkPresentTimeGOOGLE*)(toTransform->pTimes + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->swapchainCount; ++i)
+            {
+                transform_tohost_VkPresentTimeGOOGLE(resourceTracker, (VkPresentTimeGOOGLE*)(toTransform->pTimes + i));
+            }
         }
     }
 }
@@ -7405,11 +9712,14 @@
     {
         transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
     }
-    if (toTransform->pTimes)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->swapchainCount; ++i)
+        if (toTransform->pTimes)
         {
-            transform_fromhost_VkPresentTimeGOOGLE(resourceTracker, (VkPresentTimeGOOGLE*)(toTransform->pTimes + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->swapchainCount; ++i)
+            {
+                transform_fromhost_VkPresentTimeGOOGLE(resourceTracker, (VkPresentTimeGOOGLE*)(toTransform->pTimes + i));
+            }
         }
     }
 }
@@ -7474,11 +9784,14 @@
     {
         transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
     }
-    if (toTransform->pViewportSwizzles)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->viewportCount; ++i)
+        if (toTransform->pViewportSwizzles)
         {
-            transform_tohost_VkViewportSwizzleNV(resourceTracker, (VkViewportSwizzleNV*)(toTransform->pViewportSwizzles + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->viewportCount; ++i)
+            {
+                transform_tohost_VkViewportSwizzleNV(resourceTracker, (VkViewportSwizzleNV*)(toTransform->pViewportSwizzles + i));
+            }
         }
     }
 }
@@ -7493,11 +9806,14 @@
     {
         transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
     }
-    if (toTransform->pViewportSwizzles)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->viewportCount; ++i)
+        if (toTransform->pViewportSwizzles)
         {
-            transform_fromhost_VkViewportSwizzleNV(resourceTracker, (VkViewportSwizzleNV*)(toTransform->pViewportSwizzles + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->viewportCount; ++i)
+            {
+                transform_fromhost_VkViewportSwizzleNV(resourceTracker, (VkViewportSwizzleNV*)(toTransform->pViewportSwizzles + i));
+            }
         }
     }
 }
@@ -7538,11 +9854,14 @@
     {
         transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
     }
-    if (toTransform->pDiscardRectangles)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->discardRectangleCount; ++i)
+        if (toTransform->pDiscardRectangles)
         {
-            transform_tohost_VkRect2D(resourceTracker, (VkRect2D*)(toTransform->pDiscardRectangles + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->discardRectangleCount; ++i)
+            {
+                transform_tohost_VkRect2D(resourceTracker, (VkRect2D*)(toTransform->pDiscardRectangles + i));
+            }
         }
     }
 }
@@ -7557,11 +9876,14 @@
     {
         transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
     }
-    if (toTransform->pDiscardRectangles)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->discardRectangleCount; ++i)
+        if (toTransform->pDiscardRectangles)
         {
-            transform_fromhost_VkRect2D(resourceTracker, (VkRect2D*)(toTransform->pDiscardRectangles + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->discardRectangleCount; ++i)
+            {
+                transform_fromhost_VkRect2D(resourceTracker, (VkRect2D*)(toTransform->pDiscardRectangles + i));
+            }
         }
     }
 }
@@ -7617,6 +9939,56 @@
 }
 
 #endif
+#ifdef VK_EXT_depth_clip_enable
+void transform_tohost_VkPhysicalDeviceDepthClipEnableFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceDepthClipEnableFeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceDepthClipEnableFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceDepthClipEnableFeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPipelineRasterizationDepthClipStateCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkPipelineRasterizationDepthClipStateCreateInfoEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPipelineRasterizationDepthClipStateCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkPipelineRasterizationDepthClipStateCreateInfoEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
 #ifdef VK_EXT_swapchain_colorspace
 #endif
 #ifdef VK_EXT_hdr_metadata
@@ -7721,59 +10093,13 @@
 }
 
 #endif
+#ifdef VK_MVK_moltenvk
+#endif
 #ifdef VK_EXT_external_memory_dma_buf
 #endif
 #ifdef VK_EXT_queue_family_foreign
 #endif
 #ifdef VK_EXT_debug_utils
-void transform_tohost_VkDebugUtilsObjectNameInfoEXT(
-    ResourceTracker* resourceTracker,
-    VkDebugUtilsObjectNameInfoEXT* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-}
-
-void transform_fromhost_VkDebugUtilsObjectNameInfoEXT(
-    ResourceTracker* resourceTracker,
-    VkDebugUtilsObjectNameInfoEXT* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-}
-
-void transform_tohost_VkDebugUtilsObjectTagInfoEXT(
-    ResourceTracker* resourceTracker,
-    VkDebugUtilsObjectTagInfoEXT* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-}
-
-void transform_fromhost_VkDebugUtilsObjectTagInfoEXT(
-    ResourceTracker* resourceTracker,
-    VkDebugUtilsObjectTagInfoEXT* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-}
-
 void transform_tohost_VkDebugUtilsLabelEXT(
     ResourceTracker* resourceTracker,
     VkDebugUtilsLabelEXT* toTransform)
@@ -7798,6 +10124,30 @@
     }
 }
 
+void transform_tohost_VkDebugUtilsObjectNameInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkDebugUtilsObjectNameInfoEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkDebugUtilsObjectNameInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkDebugUtilsObjectNameInfoEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
 void transform_tohost_VkDebugUtilsMessengerCallbackDataEXT(
     ResourceTracker* resourceTracker,
     VkDebugUtilsMessengerCallbackDataEXT* toTransform)
@@ -7808,25 +10158,34 @@
     {
         transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
     }
-    if (toTransform->pQueueLabels)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->queueLabelCount; ++i)
+        if (toTransform->pQueueLabels)
         {
-            transform_tohost_VkDebugUtilsLabelEXT(resourceTracker, (VkDebugUtilsLabelEXT*)(toTransform->pQueueLabels + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->queueLabelCount; ++i)
+            {
+                transform_tohost_VkDebugUtilsLabelEXT(resourceTracker, (VkDebugUtilsLabelEXT*)(toTransform->pQueueLabels + i));
+            }
         }
     }
-    if (toTransform->pCmdBufLabels)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->cmdBufLabelCount; ++i)
+        if (toTransform->pCmdBufLabels)
         {
-            transform_tohost_VkDebugUtilsLabelEXT(resourceTracker, (VkDebugUtilsLabelEXT*)(toTransform->pCmdBufLabels + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->cmdBufLabelCount; ++i)
+            {
+                transform_tohost_VkDebugUtilsLabelEXT(resourceTracker, (VkDebugUtilsLabelEXT*)(toTransform->pCmdBufLabels + i));
+            }
         }
     }
-    if (toTransform->pObjects)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->objectCount; ++i)
+        if (toTransform->pObjects)
         {
-            transform_tohost_VkDebugUtilsObjectNameInfoEXT(resourceTracker, (VkDebugUtilsObjectNameInfoEXT*)(toTransform->pObjects + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->objectCount; ++i)
+            {
+                transform_tohost_VkDebugUtilsObjectNameInfoEXT(resourceTracker, (VkDebugUtilsObjectNameInfoEXT*)(toTransform->pObjects + i));
+            }
         }
     }
 }
@@ -7841,25 +10200,34 @@
     {
         transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
     }
-    if (toTransform->pQueueLabels)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->queueLabelCount; ++i)
+        if (toTransform->pQueueLabels)
         {
-            transform_fromhost_VkDebugUtilsLabelEXT(resourceTracker, (VkDebugUtilsLabelEXT*)(toTransform->pQueueLabels + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->queueLabelCount; ++i)
+            {
+                transform_fromhost_VkDebugUtilsLabelEXT(resourceTracker, (VkDebugUtilsLabelEXT*)(toTransform->pQueueLabels + i));
+            }
         }
     }
-    if (toTransform->pCmdBufLabels)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->cmdBufLabelCount; ++i)
+        if (toTransform->pCmdBufLabels)
         {
-            transform_fromhost_VkDebugUtilsLabelEXT(resourceTracker, (VkDebugUtilsLabelEXT*)(toTransform->pCmdBufLabels + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->cmdBufLabelCount; ++i)
+            {
+                transform_fromhost_VkDebugUtilsLabelEXT(resourceTracker, (VkDebugUtilsLabelEXT*)(toTransform->pCmdBufLabels + i));
+            }
         }
     }
-    if (toTransform->pObjects)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->objectCount; ++i)
+        if (toTransform->pObjects)
         {
-            transform_fromhost_VkDebugUtilsObjectNameInfoEXT(resourceTracker, (VkDebugUtilsObjectNameInfoEXT*)(toTransform->pObjects + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->objectCount; ++i)
+            {
+                transform_fromhost_VkDebugUtilsObjectNameInfoEXT(resourceTracker, (VkDebugUtilsObjectNameInfoEXT*)(toTransform->pObjects + i));
+            }
         }
     }
 }
@@ -7888,6 +10256,30 @@
     }
 }
 
+void transform_tohost_VkDebugUtilsObjectTagInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkDebugUtilsObjectTagInfoEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkDebugUtilsObjectTagInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkDebugUtilsObjectTagInfoEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
 #endif
 #ifdef VK_ANDROID_external_memory_android_hardware_buffer
 void transform_tohost_VkAndroidHardwareBufferUsageANDROID(
@@ -8042,54 +10434,6 @@
 
 #endif
 #ifdef VK_EXT_sampler_filter_minmax
-void transform_tohost_VkSamplerReductionModeCreateInfoEXT(
-    ResourceTracker* resourceTracker,
-    VkSamplerReductionModeCreateInfoEXT* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-}
-
-void transform_fromhost_VkSamplerReductionModeCreateInfoEXT(
-    ResourceTracker* resourceTracker,
-    VkSamplerReductionModeCreateInfoEXT* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-}
-
-void transform_tohost_VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT(
-    ResourceTracker* resourceTracker,
-    VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-}
-
-void transform_fromhost_VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT(
-    ResourceTracker* resourceTracker,
-    VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT* toTransform)
-{
-    (void)resourceTracker;
-    (void)toTransform;
-    if (toTransform->pNext)
-    {
-        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
-    }
-}
-
 #endif
 #ifdef VK_AMD_gpu_shader_int16
 #endif
@@ -8097,6 +10441,104 @@
 #endif
 #ifdef VK_AMD_shader_fragment_mask
 #endif
+#ifdef VK_EXT_inline_uniform_block
+void transform_tohost_VkPhysicalDeviceInlineUniformBlockFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceInlineUniformBlockFeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceInlineUniformBlockFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceInlineUniformBlockFeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPhysicalDeviceInlineUniformBlockPropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceInlineUniformBlockPropertiesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceInlineUniformBlockPropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceInlineUniformBlockPropertiesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkWriteDescriptorSetInlineUniformBlockEXT(
+    ResourceTracker* resourceTracker,
+    VkWriteDescriptorSetInlineUniformBlockEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkWriteDescriptorSetInlineUniformBlockEXT(
+    ResourceTracker* resourceTracker,
+    VkWriteDescriptorSetInlineUniformBlockEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkDescriptorPoolInlineUniformBlockCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkDescriptorPoolInlineUniformBlockCreateInfoEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkDescriptorPoolInlineUniformBlockCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkDescriptorPoolInlineUniformBlockCreateInfoEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
 #ifdef VK_EXT_shader_stencil_export
 #endif
 #ifdef VK_EXT_sample_locations
@@ -8127,11 +10569,14 @@
         transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
     }
     transform_tohost_VkExtent2D(resourceTracker, (VkExtent2D*)(&toTransform->sampleLocationGridSize));
-    if (toTransform->pSampleLocations)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->sampleLocationsCount; ++i)
+        if (toTransform->pSampleLocations)
         {
-            transform_tohost_VkSampleLocationEXT(resourceTracker, (VkSampleLocationEXT*)(toTransform->pSampleLocations + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->sampleLocationsCount; ++i)
+            {
+                transform_tohost_VkSampleLocationEXT(resourceTracker, (VkSampleLocationEXT*)(toTransform->pSampleLocations + i));
+            }
         }
     }
 }
@@ -8147,11 +10592,14 @@
         transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
     }
     transform_fromhost_VkExtent2D(resourceTracker, (VkExtent2D*)(&toTransform->sampleLocationGridSize));
-    if (toTransform->pSampleLocations)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->sampleLocationsCount; ++i)
+        if (toTransform->pSampleLocations)
         {
-            transform_fromhost_VkSampleLocationEXT(resourceTracker, (VkSampleLocationEXT*)(toTransform->pSampleLocations + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->sampleLocationsCount; ++i)
+            {
+                transform_fromhost_VkSampleLocationEXT(resourceTracker, (VkSampleLocationEXT*)(toTransform->pSampleLocations + i));
+            }
         }
     }
 }
@@ -8202,18 +10650,24 @@
     {
         transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
     }
-    if (toTransform->pAttachmentInitialSampleLocations)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->attachmentInitialSampleLocationsCount; ++i)
+        if (toTransform->pAttachmentInitialSampleLocations)
         {
-            transform_tohost_VkAttachmentSampleLocationsEXT(resourceTracker, (VkAttachmentSampleLocationsEXT*)(toTransform->pAttachmentInitialSampleLocations + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->attachmentInitialSampleLocationsCount; ++i)
+            {
+                transform_tohost_VkAttachmentSampleLocationsEXT(resourceTracker, (VkAttachmentSampleLocationsEXT*)(toTransform->pAttachmentInitialSampleLocations + i));
+            }
         }
     }
-    if (toTransform->pPostSubpassSampleLocations)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->postSubpassSampleLocationsCount; ++i)
+        if (toTransform->pPostSubpassSampleLocations)
         {
-            transform_tohost_VkSubpassSampleLocationsEXT(resourceTracker, (VkSubpassSampleLocationsEXT*)(toTransform->pPostSubpassSampleLocations + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->postSubpassSampleLocationsCount; ++i)
+            {
+                transform_tohost_VkSubpassSampleLocationsEXT(resourceTracker, (VkSubpassSampleLocationsEXT*)(toTransform->pPostSubpassSampleLocations + i));
+            }
         }
     }
 }
@@ -8228,18 +10682,24 @@
     {
         transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
     }
-    if (toTransform->pAttachmentInitialSampleLocations)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->attachmentInitialSampleLocationsCount; ++i)
+        if (toTransform->pAttachmentInitialSampleLocations)
         {
-            transform_fromhost_VkAttachmentSampleLocationsEXT(resourceTracker, (VkAttachmentSampleLocationsEXT*)(toTransform->pAttachmentInitialSampleLocations + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->attachmentInitialSampleLocationsCount; ++i)
+            {
+                transform_fromhost_VkAttachmentSampleLocationsEXT(resourceTracker, (VkAttachmentSampleLocationsEXT*)(toTransform->pAttachmentInitialSampleLocations + i));
+            }
         }
     }
-    if (toTransform->pPostSubpassSampleLocations)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->postSubpassSampleLocationsCount; ++i)
+        if (toTransform->pPostSubpassSampleLocations)
         {
-            transform_fromhost_VkSubpassSampleLocationsEXT(resourceTracker, (VkSubpassSampleLocationsEXT*)(toTransform->pPostSubpassSampleLocations + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->postSubpassSampleLocationsCount; ++i)
+            {
+                transform_fromhost_VkSubpassSampleLocationsEXT(resourceTracker, (VkSubpassSampleLocationsEXT*)(toTransform->pPostSubpassSampleLocations + i));
+            }
         }
     }
 }
@@ -8451,8 +10911,236 @@
 #endif
 #ifdef VK_NV_fill_rectangle
 #endif
+#ifdef VK_NV_shader_sm_builtins
+void transform_tohost_VkPhysicalDeviceShaderSMBuiltinsPropertiesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShaderSMBuiltinsPropertiesNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceShaderSMBuiltinsPropertiesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShaderSMBuiltinsPropertiesNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPhysicalDeviceShaderSMBuiltinsFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShaderSMBuiltinsFeaturesNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceShaderSMBuiltinsFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShaderSMBuiltinsFeaturesNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
 #ifdef VK_EXT_post_depth_coverage
 #endif
+#ifdef VK_EXT_image_drm_format_modifier
+void transform_tohost_VkDrmFormatModifierPropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkDrmFormatModifierPropertiesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_fromhost_VkDrmFormatModifierPropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkDrmFormatModifierPropertiesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_tohost_VkDrmFormatModifierPropertiesListEXT(
+    ResourceTracker* resourceTracker,
+    VkDrmFormatModifierPropertiesListEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform)
+    {
+        if (toTransform->pDrmFormatModifierProperties)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->drmFormatModifierCount; ++i)
+            {
+                transform_tohost_VkDrmFormatModifierPropertiesEXT(resourceTracker, (VkDrmFormatModifierPropertiesEXT*)(toTransform->pDrmFormatModifierProperties + i));
+            }
+        }
+    }
+}
+
+void transform_fromhost_VkDrmFormatModifierPropertiesListEXT(
+    ResourceTracker* resourceTracker,
+    VkDrmFormatModifierPropertiesListEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform)
+    {
+        if (toTransform->pDrmFormatModifierProperties)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->drmFormatModifierCount; ++i)
+            {
+                transform_fromhost_VkDrmFormatModifierPropertiesEXT(resourceTracker, (VkDrmFormatModifierPropertiesEXT*)(toTransform->pDrmFormatModifierProperties + i));
+            }
+        }
+    }
+}
+
+void transform_tohost_VkPhysicalDeviceImageDrmFormatModifierInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceImageDrmFormatModifierInfoEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceImageDrmFormatModifierInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceImageDrmFormatModifierInfoEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkImageDrmFormatModifierListCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkImageDrmFormatModifierListCreateInfoEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkImageDrmFormatModifierListCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkImageDrmFormatModifierListCreateInfoEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkImageDrmFormatModifierExplicitCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkImageDrmFormatModifierExplicitCreateInfoEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform)
+    {
+        if (toTransform->pPlaneLayouts)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->drmFormatModifierPlaneCount; ++i)
+            {
+                transform_tohost_VkSubresourceLayout(resourceTracker, (VkSubresourceLayout*)(toTransform->pPlaneLayouts + i));
+            }
+        }
+    }
+}
+
+void transform_fromhost_VkImageDrmFormatModifierExplicitCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkImageDrmFormatModifierExplicitCreateInfoEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform)
+    {
+        if (toTransform->pPlaneLayouts)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->drmFormatModifierPlaneCount; ++i)
+            {
+                transform_fromhost_VkSubresourceLayout(resourceTracker, (VkSubresourceLayout*)(toTransform->pPlaneLayouts + i));
+            }
+        }
+    }
+}
+
+void transform_tohost_VkImageDrmFormatModifierPropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkImageDrmFormatModifierPropertiesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkImageDrmFormatModifierPropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkImageDrmFormatModifierPropertiesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
 #ifdef VK_EXT_validation_cache
 void transform_tohost_VkValidationCacheCreateInfoEXT(
     ResourceTracker* resourceTracker,
@@ -8504,9 +11192,73 @@
 
 #endif
 #ifdef VK_EXT_descriptor_indexing
-void transform_tohost_VkDescriptorSetLayoutBindingFlagsCreateInfoEXT(
+#endif
+#ifdef VK_EXT_shader_viewport_index_layer
+#endif
+#ifdef VK_NV_shading_rate_image
+void transform_tohost_VkShadingRatePaletteNV(
     ResourceTracker* resourceTracker,
-    VkDescriptorSetLayoutBindingFlagsCreateInfoEXT* toTransform)
+    VkShadingRatePaletteNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_fromhost_VkShadingRatePaletteNV(
+    ResourceTracker* resourceTracker,
+    VkShadingRatePaletteNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_tohost_VkPipelineViewportShadingRateImageStateCreateInfoNV(
+    ResourceTracker* resourceTracker,
+    VkPipelineViewportShadingRateImageStateCreateInfoNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform)
+    {
+        if (toTransform->pShadingRatePalettes)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->viewportCount; ++i)
+            {
+                transform_tohost_VkShadingRatePaletteNV(resourceTracker, (VkShadingRatePaletteNV*)(toTransform->pShadingRatePalettes + i));
+            }
+        }
+    }
+}
+
+void transform_fromhost_VkPipelineViewportShadingRateImageStateCreateInfoNV(
+    ResourceTracker* resourceTracker,
+    VkPipelineViewportShadingRateImageStateCreateInfoNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform)
+    {
+        if (toTransform->pShadingRatePalettes)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->viewportCount; ++i)
+            {
+                transform_fromhost_VkShadingRatePaletteNV(resourceTracker, (VkShadingRatePaletteNV*)(toTransform->pShadingRatePalettes + i));
+            }
+        }
+    }
+}
+
+void transform_tohost_VkPhysicalDeviceShadingRateImageFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShadingRateImageFeaturesNV* toTransform)
 {
     (void)resourceTracker;
     (void)toTransform;
@@ -8516,9 +11268,9 @@
     }
 }
 
-void transform_fromhost_VkDescriptorSetLayoutBindingFlagsCreateInfoEXT(
+void transform_fromhost_VkPhysicalDeviceShadingRateImageFeaturesNV(
     ResourceTracker* resourceTracker,
-    VkDescriptorSetLayoutBindingFlagsCreateInfoEXT* toTransform)
+    VkPhysicalDeviceShadingRateImageFeaturesNV* toTransform)
 {
     (void)resourceTracker;
     (void)toTransform;
@@ -8528,9 +11280,133 @@
     }
 }
 
-void transform_tohost_VkPhysicalDeviceDescriptorIndexingFeaturesEXT(
+void transform_tohost_VkPhysicalDeviceShadingRateImagePropertiesNV(
     ResourceTracker* resourceTracker,
-    VkPhysicalDeviceDescriptorIndexingFeaturesEXT* toTransform)
+    VkPhysicalDeviceShadingRateImagePropertiesNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    transform_tohost_VkExtent2D(resourceTracker, (VkExtent2D*)(&toTransform->shadingRateTexelSize));
+}
+
+void transform_fromhost_VkPhysicalDeviceShadingRateImagePropertiesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShadingRateImagePropertiesNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    transform_fromhost_VkExtent2D(resourceTracker, (VkExtent2D*)(&toTransform->shadingRateTexelSize));
+}
+
+void transform_tohost_VkCoarseSampleLocationNV(
+    ResourceTracker* resourceTracker,
+    VkCoarseSampleLocationNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_fromhost_VkCoarseSampleLocationNV(
+    ResourceTracker* resourceTracker,
+    VkCoarseSampleLocationNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_tohost_VkCoarseSampleOrderCustomNV(
+    ResourceTracker* resourceTracker,
+    VkCoarseSampleOrderCustomNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform)
+    {
+        if (toTransform->pSampleLocations)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->sampleLocationCount; ++i)
+            {
+                transform_tohost_VkCoarseSampleLocationNV(resourceTracker, (VkCoarseSampleLocationNV*)(toTransform->pSampleLocations + i));
+            }
+        }
+    }
+}
+
+void transform_fromhost_VkCoarseSampleOrderCustomNV(
+    ResourceTracker* resourceTracker,
+    VkCoarseSampleOrderCustomNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform)
+    {
+        if (toTransform->pSampleLocations)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->sampleLocationCount; ++i)
+            {
+                transform_fromhost_VkCoarseSampleLocationNV(resourceTracker, (VkCoarseSampleLocationNV*)(toTransform->pSampleLocations + i));
+            }
+        }
+    }
+}
+
+void transform_tohost_VkPipelineViewportCoarseSampleOrderStateCreateInfoNV(
+    ResourceTracker* resourceTracker,
+    VkPipelineViewportCoarseSampleOrderStateCreateInfoNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform)
+    {
+        if (toTransform->pCustomSampleOrders)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->customSampleOrderCount; ++i)
+            {
+                transform_tohost_VkCoarseSampleOrderCustomNV(resourceTracker, (VkCoarseSampleOrderCustomNV*)(toTransform->pCustomSampleOrders + i));
+            }
+        }
+    }
+}
+
+void transform_fromhost_VkPipelineViewportCoarseSampleOrderStateCreateInfoNV(
+    ResourceTracker* resourceTracker,
+    VkPipelineViewportCoarseSampleOrderStateCreateInfoNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform)
+    {
+        if (toTransform->pCustomSampleOrders)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->customSampleOrderCount; ++i)
+            {
+                transform_fromhost_VkCoarseSampleOrderCustomNV(resourceTracker, (VkCoarseSampleOrderCustomNV*)(toTransform->pCustomSampleOrders + i));
+            }
+        }
+    }
+}
+
+#endif
+#ifdef VK_NV_ray_tracing
+void transform_tohost_VkRayTracingShaderGroupCreateInfoNV(
+    ResourceTracker* resourceTracker,
+    VkRayTracingShaderGroupCreateInfoNV* toTransform)
 {
     (void)resourceTracker;
     (void)toTransform;
@@ -8540,9 +11416,9 @@
     }
 }
 
-void transform_fromhost_VkPhysicalDeviceDescriptorIndexingFeaturesEXT(
+void transform_fromhost_VkRayTracingShaderGroupCreateInfoNV(
     ResourceTracker* resourceTracker,
-    VkPhysicalDeviceDescriptorIndexingFeaturesEXT* toTransform)
+    VkRayTracingShaderGroupCreateInfoNV* toTransform)
 {
     (void)resourceTracker;
     (void)toTransform;
@@ -8552,9 +11428,73 @@
     }
 }
 
-void transform_tohost_VkPhysicalDeviceDescriptorIndexingPropertiesEXT(
+void transform_tohost_VkRayTracingPipelineCreateInfoNV(
     ResourceTracker* resourceTracker,
-    VkPhysicalDeviceDescriptorIndexingPropertiesEXT* toTransform)
+    VkRayTracingPipelineCreateInfoNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform)
+    {
+        if (toTransform->pStages)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->stageCount; ++i)
+            {
+                transform_tohost_VkPipelineShaderStageCreateInfo(resourceTracker, (VkPipelineShaderStageCreateInfo*)(toTransform->pStages + i));
+            }
+        }
+    }
+    if (toTransform)
+    {
+        if (toTransform->pGroups)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->groupCount; ++i)
+            {
+                transform_tohost_VkRayTracingShaderGroupCreateInfoNV(resourceTracker, (VkRayTracingShaderGroupCreateInfoNV*)(toTransform->pGroups + i));
+            }
+        }
+    }
+}
+
+void transform_fromhost_VkRayTracingPipelineCreateInfoNV(
+    ResourceTracker* resourceTracker,
+    VkRayTracingPipelineCreateInfoNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform)
+    {
+        if (toTransform->pStages)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->stageCount; ++i)
+            {
+                transform_fromhost_VkPipelineShaderStageCreateInfo(resourceTracker, (VkPipelineShaderStageCreateInfo*)(toTransform->pStages + i));
+            }
+        }
+    }
+    if (toTransform)
+    {
+        if (toTransform->pGroups)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->groupCount; ++i)
+            {
+                transform_fromhost_VkRayTracingShaderGroupCreateInfoNV(resourceTracker, (VkRayTracingShaderGroupCreateInfoNV*)(toTransform->pGroups + i));
+            }
+        }
+    }
+}
+
+void transform_tohost_VkGeometryTrianglesNV(
+    ResourceTracker* resourceTracker,
+    VkGeometryTrianglesNV* toTransform)
 {
     (void)resourceTracker;
     (void)toTransform;
@@ -8564,9 +11504,9 @@
     }
 }
 
-void transform_fromhost_VkPhysicalDeviceDescriptorIndexingPropertiesEXT(
+void transform_fromhost_VkGeometryTrianglesNV(
     ResourceTracker* resourceTracker,
-    VkPhysicalDeviceDescriptorIndexingPropertiesEXT* toTransform)
+    VkGeometryTrianglesNV* toTransform)
 {
     (void)resourceTracker;
     (void)toTransform;
@@ -8576,9 +11516,9 @@
     }
 }
 
-void transform_tohost_VkDescriptorSetVariableDescriptorCountAllocateInfoEXT(
+void transform_tohost_VkGeometryAABBNV(
     ResourceTracker* resourceTracker,
-    VkDescriptorSetVariableDescriptorCountAllocateInfoEXT* toTransform)
+    VkGeometryAABBNV* toTransform)
 {
     (void)resourceTracker;
     (void)toTransform;
@@ -8588,9 +11528,9 @@
     }
 }
 
-void transform_fromhost_VkDescriptorSetVariableDescriptorCountAllocateInfoEXT(
+void transform_fromhost_VkGeometryAABBNV(
     ResourceTracker* resourceTracker,
-    VkDescriptorSetVariableDescriptorCountAllocateInfoEXT* toTransform)
+    VkGeometryAABBNV* toTransform)
 {
     (void)resourceTracker;
     (void)toTransform;
@@ -8600,9 +11540,151 @@
     }
 }
 
-void transform_tohost_VkDescriptorSetVariableDescriptorCountLayoutSupportEXT(
+void transform_tohost_VkGeometryDataNV(
     ResourceTracker* resourceTracker,
-    VkDescriptorSetVariableDescriptorCountLayoutSupportEXT* toTransform)
+    VkGeometryDataNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    transform_tohost_VkGeometryTrianglesNV(resourceTracker, (VkGeometryTrianglesNV*)(&toTransform->triangles));
+    transform_tohost_VkGeometryAABBNV(resourceTracker, (VkGeometryAABBNV*)(&toTransform->aabbs));
+}
+
+void transform_fromhost_VkGeometryDataNV(
+    ResourceTracker* resourceTracker,
+    VkGeometryDataNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    transform_fromhost_VkGeometryTrianglesNV(resourceTracker, (VkGeometryTrianglesNV*)(&toTransform->triangles));
+    transform_fromhost_VkGeometryAABBNV(resourceTracker, (VkGeometryAABBNV*)(&toTransform->aabbs));
+}
+
+void transform_tohost_VkGeometryNV(
+    ResourceTracker* resourceTracker,
+    VkGeometryNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    transform_tohost_VkGeometryDataNV(resourceTracker, (VkGeometryDataNV*)(&toTransform->geometry));
+}
+
+void transform_fromhost_VkGeometryNV(
+    ResourceTracker* resourceTracker,
+    VkGeometryNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    transform_fromhost_VkGeometryDataNV(resourceTracker, (VkGeometryDataNV*)(&toTransform->geometry));
+}
+
+void transform_tohost_VkAccelerationStructureInfoNV(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureInfoNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform)
+    {
+        if (toTransform->pGeometries)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->geometryCount; ++i)
+            {
+                transform_tohost_VkGeometryNV(resourceTracker, (VkGeometryNV*)(toTransform->pGeometries + i));
+            }
+        }
+    }
+}
+
+void transform_fromhost_VkAccelerationStructureInfoNV(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureInfoNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform)
+    {
+        if (toTransform->pGeometries)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->geometryCount; ++i)
+            {
+                transform_fromhost_VkGeometryNV(resourceTracker, (VkGeometryNV*)(toTransform->pGeometries + i));
+            }
+        }
+    }
+}
+
+void transform_tohost_VkAccelerationStructureCreateInfoNV(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureCreateInfoNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    transform_tohost_VkAccelerationStructureInfoNV(resourceTracker, (VkAccelerationStructureInfoNV*)(&toTransform->info));
+}
+
+void transform_fromhost_VkAccelerationStructureCreateInfoNV(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureCreateInfoNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    transform_fromhost_VkAccelerationStructureInfoNV(resourceTracker, (VkAccelerationStructureInfoNV*)(&toTransform->info));
+}
+
+void transform_tohost_VkBindAccelerationStructureMemoryInfoNV(
+    ResourceTracker* resourceTracker,
+    VkBindAccelerationStructureMemoryInfoNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    resourceTracker->deviceMemoryTransform_tohost((VkDeviceMemory*)&toTransform->memory, 1, (VkDeviceSize*)&toTransform->memoryOffset, 1, (VkDeviceSize*)nullptr, 0, (uint32_t*)nullptr, 0, (uint32_t*)nullptr, 0);
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkBindAccelerationStructureMemoryInfoNV(
+    ResourceTracker* resourceTracker,
+    VkBindAccelerationStructureMemoryInfoNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    resourceTracker->deviceMemoryTransform_fromhost((VkDeviceMemory*)&toTransform->memory, 1, (VkDeviceSize*)&toTransform->memoryOffset, 1, (VkDeviceSize*)nullptr, 0, (uint32_t*)nullptr, 0, (uint32_t*)nullptr, 0);
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkWriteDescriptorSetAccelerationStructureNV(
+    ResourceTracker* resourceTracker,
+    VkWriteDescriptorSetAccelerationStructureNV* toTransform)
 {
     (void)resourceTracker;
     (void)toTransform;
@@ -8612,9 +11694,157 @@
     }
 }
 
-void transform_fromhost_VkDescriptorSetVariableDescriptorCountLayoutSupportEXT(
+void transform_fromhost_VkWriteDescriptorSetAccelerationStructureNV(
     ResourceTracker* resourceTracker,
-    VkDescriptorSetVariableDescriptorCountLayoutSupportEXT* toTransform)
+    VkWriteDescriptorSetAccelerationStructureNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkAccelerationStructureMemoryRequirementsInfoNV(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureMemoryRequirementsInfoNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkAccelerationStructureMemoryRequirementsInfoNV(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureMemoryRequirementsInfoNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPhysicalDeviceRayTracingPropertiesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceRayTracingPropertiesNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceRayTracingPropertiesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceRayTracingPropertiesNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkTransformMatrixKHR(
+    ResourceTracker* resourceTracker,
+    VkTransformMatrixKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_fromhost_VkTransformMatrixKHR(
+    ResourceTracker* resourceTracker,
+    VkTransformMatrixKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_tohost_VkAabbPositionsKHR(
+    ResourceTracker* resourceTracker,
+    VkAabbPositionsKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_fromhost_VkAabbPositionsKHR(
+    ResourceTracker* resourceTracker,
+    VkAabbPositionsKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_tohost_VkAccelerationStructureInstanceKHR(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureInstanceKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    transform_tohost_VkTransformMatrixKHR(resourceTracker, (VkTransformMatrixKHR*)(&toTransform->transform));
+}
+
+void transform_fromhost_VkAccelerationStructureInstanceKHR(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureInstanceKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    transform_fromhost_VkTransformMatrixKHR(resourceTracker, (VkTransformMatrixKHR*)(&toTransform->transform));
+}
+
+#endif
+#ifdef VK_NV_representative_fragment_test
+void transform_tohost_VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPipelineRepresentativeFragmentTestStateCreateInfoNV(
+    ResourceTracker* resourceTracker,
+    VkPipelineRepresentativeFragmentTestStateCreateInfoNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPipelineRepresentativeFragmentTestStateCreateInfoNV(
+    ResourceTracker* resourceTracker,
+    VkPipelineRepresentativeFragmentTestStateCreateInfoNV* toTransform)
 {
     (void)resourceTracker;
     (void)toTransform;
@@ -8625,7 +11855,57 @@
 }
 
 #endif
-#ifdef VK_EXT_shader_viewport_index_layer
+#ifdef VK_EXT_filter_cubic
+void transform_tohost_VkPhysicalDeviceImageViewImageFormatInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceImageViewImageFormatInfoEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceImageViewImageFormatInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceImageViewImageFormatInfoEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkFilterCubicImageViewImageFormatPropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkFilterCubicImageViewImageFormatPropertiesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkFilterCubicImageViewImageFormatPropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkFilterCubicImageViewImageFormatPropertiesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_QCOM_render_pass_shader_resolve
 #endif
 #ifdef VK_EXT_global_priority
 void transform_tohost_VkDeviceQueueGlobalPriorityCreateInfoEXT(
@@ -8731,6 +12011,58 @@
 #endif
 #ifdef VK_AMD_buffer_marker
 #endif
+#ifdef VK_AMD_pipeline_compiler_control
+void transform_tohost_VkPipelineCompilerControlCreateInfoAMD(
+    ResourceTracker* resourceTracker,
+    VkPipelineCompilerControlCreateInfoAMD* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPipelineCompilerControlCreateInfoAMD(
+    ResourceTracker* resourceTracker,
+    VkPipelineCompilerControlCreateInfoAMD* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_calibrated_timestamps
+void transform_tohost_VkCalibratedTimestampInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkCalibratedTimestampInfoEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkCalibratedTimestampInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkCalibratedTimestampInfoEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
 #ifdef VK_AMD_shader_core_properties
 void transform_tohost_VkPhysicalDeviceShaderCorePropertiesAMD(
     ResourceTracker* resourceTracker,
@@ -8757,6 +12089,32 @@
 }
 
 #endif
+#ifdef VK_AMD_memory_overallocation_behavior
+void transform_tohost_VkDeviceMemoryOverallocationCreateInfoAMD(
+    ResourceTracker* resourceTracker,
+    VkDeviceMemoryOverallocationCreateInfoAMD* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkDeviceMemoryOverallocationCreateInfoAMD(
+    ResourceTracker* resourceTracker,
+    VkDeviceMemoryOverallocationCreateInfoAMD* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
 #ifdef VK_EXT_vertex_attribute_divisor
 void transform_tohost_VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT(
     ResourceTracker* resourceTracker,
@@ -8808,11 +12166,14 @@
     {
         transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
     }
-    if (toTransform->pVertexBindingDivisors)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->vertexBindingDivisorCount; ++i)
+        if (toTransform->pVertexBindingDivisors)
         {
-            transform_tohost_VkVertexInputBindingDivisorDescriptionEXT(resourceTracker, (VkVertexInputBindingDivisorDescriptionEXT*)(toTransform->pVertexBindingDivisors + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->vertexBindingDivisorCount; ++i)
+            {
+                transform_tohost_VkVertexInputBindingDivisorDescriptionEXT(resourceTracker, (VkVertexInputBindingDivisorDescriptionEXT*)(toTransform->pVertexBindingDivisors + i));
+            }
         }
     }
 }
@@ -8827,11 +12188,134 @@
     {
         transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
     }
-    if (toTransform->pVertexBindingDivisors)
+    if (toTransform)
     {
-        for (uint32_t i = 0; i < (uint32_t)toTransform->vertexBindingDivisorCount; ++i)
+        if (toTransform->pVertexBindingDivisors)
         {
-            transform_fromhost_VkVertexInputBindingDivisorDescriptionEXT(resourceTracker, (VkVertexInputBindingDivisorDescriptionEXT*)(toTransform->pVertexBindingDivisors + i));
+            for (uint32_t i = 0; i < (uint32_t)toTransform->vertexBindingDivisorCount; ++i)
+            {
+                transform_fromhost_VkVertexInputBindingDivisorDescriptionEXT(resourceTracker, (VkVertexInputBindingDivisorDescriptionEXT*)(toTransform->pVertexBindingDivisors + i));
+            }
+        }
+    }
+}
+
+void transform_tohost_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_GGP_frame_token
+void transform_tohost_VkPresentFrameTokenGGP(
+    ResourceTracker* resourceTracker,
+    VkPresentFrameTokenGGP* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPresentFrameTokenGGP(
+    ResourceTracker* resourceTracker,
+    VkPresentFrameTokenGGP* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_pipeline_creation_feedback
+void transform_tohost_VkPipelineCreationFeedbackEXT(
+    ResourceTracker* resourceTracker,
+    VkPipelineCreationFeedbackEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_fromhost_VkPipelineCreationFeedbackEXT(
+    ResourceTracker* resourceTracker,
+    VkPipelineCreationFeedbackEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_tohost_VkPipelineCreationFeedbackCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkPipelineCreationFeedbackCreateInfoEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform->pPipelineCreationFeedback)
+    {
+        transform_tohost_VkPipelineCreationFeedbackEXT(resourceTracker, (VkPipelineCreationFeedbackEXT*)(toTransform->pPipelineCreationFeedback));
+    }
+    if (toTransform)
+    {
+        if (toTransform->pPipelineStageCreationFeedbacks)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->pipelineStageCreationFeedbackCount; ++i)
+            {
+                transform_tohost_VkPipelineCreationFeedbackEXT(resourceTracker, (VkPipelineCreationFeedbackEXT*)(toTransform->pPipelineStageCreationFeedbacks + i));
+            }
+        }
+    }
+}
+
+void transform_fromhost_VkPipelineCreationFeedbackCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkPipelineCreationFeedbackCreateInfoEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform->pPipelineCreationFeedback)
+    {
+        transform_fromhost_VkPipelineCreationFeedbackEXT(resourceTracker, (VkPipelineCreationFeedbackEXT*)(toTransform->pPipelineCreationFeedback));
+    }
+    if (toTransform)
+    {
+        if (toTransform->pPipelineStageCreationFeedbacks)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->pipelineStageCreationFeedbackCount; ++i)
+            {
+                transform_fromhost_VkPipelineCreationFeedbackEXT(resourceTracker, (VkPipelineCreationFeedbackEXT*)(toTransform->pPipelineStageCreationFeedbacks + i));
+            }
         }
     }
 }
@@ -8839,6 +12323,220 @@
 #endif
 #ifdef VK_NV_shader_subgroup_partitioned
 #endif
+#ifdef VK_NV_compute_shader_derivatives
+void transform_tohost_VkPhysicalDeviceComputeShaderDerivativesFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceComputeShaderDerivativesFeaturesNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceComputeShaderDerivativesFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceComputeShaderDerivativesFeaturesNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_NV_mesh_shader
+void transform_tohost_VkPhysicalDeviceMeshShaderFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceMeshShaderFeaturesNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceMeshShaderFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceMeshShaderFeaturesNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPhysicalDeviceMeshShaderPropertiesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceMeshShaderPropertiesNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceMeshShaderPropertiesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceMeshShaderPropertiesNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkDrawMeshTasksIndirectCommandNV(
+    ResourceTracker* resourceTracker,
+    VkDrawMeshTasksIndirectCommandNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_fromhost_VkDrawMeshTasksIndirectCommandNV(
+    ResourceTracker* resourceTracker,
+    VkDrawMeshTasksIndirectCommandNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+#endif
+#ifdef VK_NV_fragment_shader_barycentric
+void transform_tohost_VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_NV_shader_image_footprint
+void transform_tohost_VkPhysicalDeviceShaderImageFootprintFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShaderImageFootprintFeaturesNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceShaderImageFootprintFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShaderImageFootprintFeaturesNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_NV_scissor_exclusive
+void transform_tohost_VkPipelineViewportExclusiveScissorStateCreateInfoNV(
+    ResourceTracker* resourceTracker,
+    VkPipelineViewportExclusiveScissorStateCreateInfoNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform)
+    {
+        if (toTransform->pExclusiveScissors)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->exclusiveScissorCount; ++i)
+            {
+                transform_tohost_VkRect2D(resourceTracker, (VkRect2D*)(toTransform->pExclusiveScissors + i));
+            }
+        }
+    }
+}
+
+void transform_fromhost_VkPipelineViewportExclusiveScissorStateCreateInfoNV(
+    ResourceTracker* resourceTracker,
+    VkPipelineViewportExclusiveScissorStateCreateInfoNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform)
+    {
+        if (toTransform->pExclusiveScissors)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->exclusiveScissorCount; ++i)
+            {
+                transform_fromhost_VkRect2D(resourceTracker, (VkRect2D*)(toTransform->pExclusiveScissors + i));
+            }
+        }
+    }
+}
+
+void transform_tohost_VkPhysicalDeviceExclusiveScissorFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceExclusiveScissorFeaturesNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceExclusiveScissorFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceExclusiveScissorFeaturesNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
 #ifdef VK_NV_device_diagnostic_checkpoints
 void transform_tohost_VkQueueFamilyCheckpointPropertiesNV(
     ResourceTracker* resourceTracker,
@@ -8889,9 +12587,2319 @@
 }
 
 #endif
-#ifdef VK_GOOGLE_address_space
+#ifdef VK_INTEL_shader_integer_functions2
+void transform_tohost_VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
 #endif
-#ifdef VK_GOOGLE_color_buffer
+#ifdef VK_INTEL_performance_query
+void transform_tohost_VkPerformanceValueDataINTEL(
+    ResourceTracker* resourceTracker,
+    VkPerformanceValueDataINTEL* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_fromhost_VkPerformanceValueDataINTEL(
+    ResourceTracker* resourceTracker,
+    VkPerformanceValueDataINTEL* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_tohost_VkPerformanceValueINTEL(
+    ResourceTracker* resourceTracker,
+    VkPerformanceValueINTEL* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    transform_tohost_VkPerformanceValueDataINTEL(resourceTracker, (VkPerformanceValueDataINTEL*)(&toTransform->data));
+}
+
+void transform_fromhost_VkPerformanceValueINTEL(
+    ResourceTracker* resourceTracker,
+    VkPerformanceValueINTEL* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    transform_fromhost_VkPerformanceValueDataINTEL(resourceTracker, (VkPerformanceValueDataINTEL*)(&toTransform->data));
+}
+
+void transform_tohost_VkInitializePerformanceApiInfoINTEL(
+    ResourceTracker* resourceTracker,
+    VkInitializePerformanceApiInfoINTEL* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkInitializePerformanceApiInfoINTEL(
+    ResourceTracker* resourceTracker,
+    VkInitializePerformanceApiInfoINTEL* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkQueryPoolPerformanceQueryCreateInfoINTEL(
+    ResourceTracker* resourceTracker,
+    VkQueryPoolPerformanceQueryCreateInfoINTEL* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkQueryPoolPerformanceQueryCreateInfoINTEL(
+    ResourceTracker* resourceTracker,
+    VkQueryPoolPerformanceQueryCreateInfoINTEL* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPerformanceMarkerInfoINTEL(
+    ResourceTracker* resourceTracker,
+    VkPerformanceMarkerInfoINTEL* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPerformanceMarkerInfoINTEL(
+    ResourceTracker* resourceTracker,
+    VkPerformanceMarkerInfoINTEL* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPerformanceStreamMarkerInfoINTEL(
+    ResourceTracker* resourceTracker,
+    VkPerformanceStreamMarkerInfoINTEL* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPerformanceStreamMarkerInfoINTEL(
+    ResourceTracker* resourceTracker,
+    VkPerformanceStreamMarkerInfoINTEL* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPerformanceOverrideInfoINTEL(
+    ResourceTracker* resourceTracker,
+    VkPerformanceOverrideInfoINTEL* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPerformanceOverrideInfoINTEL(
+    ResourceTracker* resourceTracker,
+    VkPerformanceOverrideInfoINTEL* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPerformanceConfigurationAcquireInfoINTEL(
+    ResourceTracker* resourceTracker,
+    VkPerformanceConfigurationAcquireInfoINTEL* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPerformanceConfigurationAcquireInfoINTEL(
+    ResourceTracker* resourceTracker,
+    VkPerformanceConfigurationAcquireInfoINTEL* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_pci_bus_info
+void transform_tohost_VkPhysicalDevicePCIBusInfoPropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDevicePCIBusInfoPropertiesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDevicePCIBusInfoPropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDevicePCIBusInfoPropertiesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_AMD_display_native_hdr
+void transform_tohost_VkDisplayNativeHdrSurfaceCapabilitiesAMD(
+    ResourceTracker* resourceTracker,
+    VkDisplayNativeHdrSurfaceCapabilitiesAMD* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkDisplayNativeHdrSurfaceCapabilitiesAMD(
+    ResourceTracker* resourceTracker,
+    VkDisplayNativeHdrSurfaceCapabilitiesAMD* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkSwapchainDisplayNativeHdrCreateInfoAMD(
+    ResourceTracker* resourceTracker,
+    VkSwapchainDisplayNativeHdrCreateInfoAMD* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkSwapchainDisplayNativeHdrCreateInfoAMD(
+    ResourceTracker* resourceTracker,
+    VkSwapchainDisplayNativeHdrCreateInfoAMD* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_FUCHSIA_imagepipe_surface
+void transform_tohost_VkImagePipeSurfaceCreateInfoFUCHSIA(
+    ResourceTracker* resourceTracker,
+    VkImagePipeSurfaceCreateInfoFUCHSIA* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkImagePipeSurfaceCreateInfoFUCHSIA(
+    ResourceTracker* resourceTracker,
+    VkImagePipeSurfaceCreateInfoFUCHSIA* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_metal_surface
+void transform_tohost_VkMetalSurfaceCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkMetalSurfaceCreateInfoEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkMetalSurfaceCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkMetalSurfaceCreateInfoEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_fragment_density_map
+void transform_tohost_VkPhysicalDeviceFragmentDensityMapFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceFragmentDensityMapFeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceFragmentDensityMapFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceFragmentDensityMapFeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPhysicalDeviceFragmentDensityMapPropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceFragmentDensityMapPropertiesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    transform_tohost_VkExtent2D(resourceTracker, (VkExtent2D*)(&toTransform->minFragmentDensityTexelSize));
+    transform_tohost_VkExtent2D(resourceTracker, (VkExtent2D*)(&toTransform->maxFragmentDensityTexelSize));
+}
+
+void transform_fromhost_VkPhysicalDeviceFragmentDensityMapPropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceFragmentDensityMapPropertiesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    transform_fromhost_VkExtent2D(resourceTracker, (VkExtent2D*)(&toTransform->minFragmentDensityTexelSize));
+    transform_fromhost_VkExtent2D(resourceTracker, (VkExtent2D*)(&toTransform->maxFragmentDensityTexelSize));
+}
+
+void transform_tohost_VkRenderPassFragmentDensityMapCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkRenderPassFragmentDensityMapCreateInfoEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    transform_tohost_VkAttachmentReference(resourceTracker, (VkAttachmentReference*)(&toTransform->fragmentDensityMapAttachment));
+}
+
+void transform_fromhost_VkRenderPassFragmentDensityMapCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkRenderPassFragmentDensityMapCreateInfoEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    transform_fromhost_VkAttachmentReference(resourceTracker, (VkAttachmentReference*)(&toTransform->fragmentDensityMapAttachment));
+}
+
+#endif
+#ifdef VK_EXT_scalar_block_layout
+#endif
+#ifdef VK_GOOGLE_hlsl_functionality1
+#endif
+#ifdef VK_GOOGLE_decorate_string
+#endif
+#ifdef VK_EXT_subgroup_size_control
+void transform_tohost_VkPhysicalDeviceSubgroupSizeControlFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceSubgroupSizeControlFeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceSubgroupSizeControlFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceSubgroupSizeControlFeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPhysicalDeviceSubgroupSizeControlPropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceSubgroupSizeControlPropertiesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceSubgroupSizeControlPropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceSubgroupSizeControlPropertiesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_AMD_shader_core_properties2
+void transform_tohost_VkPhysicalDeviceShaderCoreProperties2AMD(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShaderCoreProperties2AMD* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceShaderCoreProperties2AMD(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShaderCoreProperties2AMD* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_AMD_device_coherent_memory
+void transform_tohost_VkPhysicalDeviceCoherentMemoryFeaturesAMD(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceCoherentMemoryFeaturesAMD* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceCoherentMemoryFeaturesAMD(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceCoherentMemoryFeaturesAMD* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_shader_image_atomic_int64
+void transform_tohost_VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_memory_budget
+void transform_tohost_VkPhysicalDeviceMemoryBudgetPropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceMemoryBudgetPropertiesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceMemoryBudgetPropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceMemoryBudgetPropertiesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_memory_priority
+void transform_tohost_VkPhysicalDeviceMemoryPriorityFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceMemoryPriorityFeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceMemoryPriorityFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceMemoryPriorityFeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkMemoryPriorityAllocateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkMemoryPriorityAllocateInfoEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkMemoryPriorityAllocateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkMemoryPriorityAllocateInfoEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_NV_dedicated_allocation_image_aliasing
+void transform_tohost_VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_buffer_device_address
+void transform_tohost_VkPhysicalDeviceBufferDeviceAddressFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceBufferDeviceAddressFeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceBufferDeviceAddressFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceBufferDeviceAddressFeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkBufferDeviceAddressCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkBufferDeviceAddressCreateInfoEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkBufferDeviceAddressCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkBufferDeviceAddressCreateInfoEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_tooling_info
+void transform_tohost_VkPhysicalDeviceToolPropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceToolPropertiesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceToolPropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceToolPropertiesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_separate_stencil_usage
+#endif
+#ifdef VK_EXT_validation_features
+void transform_tohost_VkValidationFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkValidationFeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkValidationFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkValidationFeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_NV_cooperative_matrix
+void transform_tohost_VkCooperativeMatrixPropertiesNV(
+    ResourceTracker* resourceTracker,
+    VkCooperativeMatrixPropertiesNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkCooperativeMatrixPropertiesNV(
+    ResourceTracker* resourceTracker,
+    VkCooperativeMatrixPropertiesNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPhysicalDeviceCooperativeMatrixFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceCooperativeMatrixFeaturesNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceCooperativeMatrixFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceCooperativeMatrixFeaturesNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPhysicalDeviceCooperativeMatrixPropertiesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceCooperativeMatrixPropertiesNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceCooperativeMatrixPropertiesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceCooperativeMatrixPropertiesNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_NV_coverage_reduction_mode
+void transform_tohost_VkPhysicalDeviceCoverageReductionModeFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceCoverageReductionModeFeaturesNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceCoverageReductionModeFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceCoverageReductionModeFeaturesNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPipelineCoverageReductionStateCreateInfoNV(
+    ResourceTracker* resourceTracker,
+    VkPipelineCoverageReductionStateCreateInfoNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPipelineCoverageReductionStateCreateInfoNV(
+    ResourceTracker* resourceTracker,
+    VkPipelineCoverageReductionStateCreateInfoNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkFramebufferMixedSamplesCombinationNV(
+    ResourceTracker* resourceTracker,
+    VkFramebufferMixedSamplesCombinationNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkFramebufferMixedSamplesCombinationNV(
+    ResourceTracker* resourceTracker,
+    VkFramebufferMixedSamplesCombinationNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_fragment_shader_interlock
+void transform_tohost_VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_ycbcr_image_arrays
+void transform_tohost_VkPhysicalDeviceYcbcrImageArraysFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceYcbcrImageArraysFeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceYcbcrImageArraysFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceYcbcrImageArraysFeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_full_screen_exclusive
+void transform_tohost_VkSurfaceFullScreenExclusiveInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkSurfaceFullScreenExclusiveInfoEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkSurfaceFullScreenExclusiveInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkSurfaceFullScreenExclusiveInfoEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkSurfaceCapabilitiesFullScreenExclusiveEXT(
+    ResourceTracker* resourceTracker,
+    VkSurfaceCapabilitiesFullScreenExclusiveEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkSurfaceCapabilitiesFullScreenExclusiveEXT(
+    ResourceTracker* resourceTracker,
+    VkSurfaceCapabilitiesFullScreenExclusiveEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkSurfaceFullScreenExclusiveWin32InfoEXT(
+    ResourceTracker* resourceTracker,
+    VkSurfaceFullScreenExclusiveWin32InfoEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkSurfaceFullScreenExclusiveWin32InfoEXT(
+    ResourceTracker* resourceTracker,
+    VkSurfaceFullScreenExclusiveWin32InfoEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_headless_surface
+void transform_tohost_VkHeadlessSurfaceCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkHeadlessSurfaceCreateInfoEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkHeadlessSurfaceCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkHeadlessSurfaceCreateInfoEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_line_rasterization
+void transform_tohost_VkPhysicalDeviceLineRasterizationFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceLineRasterizationFeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceLineRasterizationFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceLineRasterizationFeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPhysicalDeviceLineRasterizationPropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceLineRasterizationPropertiesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceLineRasterizationPropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceLineRasterizationPropertiesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPipelineRasterizationLineStateCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkPipelineRasterizationLineStateCreateInfoEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPipelineRasterizationLineStateCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkPipelineRasterizationLineStateCreateInfoEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_shader_atomic_float
+void transform_tohost_VkPhysicalDeviceShaderAtomicFloatFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShaderAtomicFloatFeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceShaderAtomicFloatFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShaderAtomicFloatFeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_host_query_reset
+#endif
+#ifdef VK_EXT_index_type_uint8
+void transform_tohost_VkPhysicalDeviceIndexTypeUint8FeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceIndexTypeUint8FeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceIndexTypeUint8FeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceIndexTypeUint8FeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_extended_dynamic_state
+void transform_tohost_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceExtendedDynamicStateFeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceExtendedDynamicStateFeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_shader_demote_to_helper_invocation
+void transform_tohost_VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_NV_device_generated_commands
+void transform_tohost_VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkGraphicsShaderGroupCreateInfoNV(
+    ResourceTracker* resourceTracker,
+    VkGraphicsShaderGroupCreateInfoNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform)
+    {
+        if (toTransform->pStages)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->stageCount; ++i)
+            {
+                transform_tohost_VkPipelineShaderStageCreateInfo(resourceTracker, (VkPipelineShaderStageCreateInfo*)(toTransform->pStages + i));
+            }
+        }
+    }
+    if (toTransform->pVertexInputState)
+    {
+        transform_tohost_VkPipelineVertexInputStateCreateInfo(resourceTracker, (VkPipelineVertexInputStateCreateInfo*)(toTransform->pVertexInputState));
+    }
+    if (toTransform->pTessellationState)
+    {
+        transform_tohost_VkPipelineTessellationStateCreateInfo(resourceTracker, (VkPipelineTessellationStateCreateInfo*)(toTransform->pTessellationState));
+    }
+}
+
+void transform_fromhost_VkGraphicsShaderGroupCreateInfoNV(
+    ResourceTracker* resourceTracker,
+    VkGraphicsShaderGroupCreateInfoNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform)
+    {
+        if (toTransform->pStages)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->stageCount; ++i)
+            {
+                transform_fromhost_VkPipelineShaderStageCreateInfo(resourceTracker, (VkPipelineShaderStageCreateInfo*)(toTransform->pStages + i));
+            }
+        }
+    }
+    if (toTransform->pVertexInputState)
+    {
+        transform_fromhost_VkPipelineVertexInputStateCreateInfo(resourceTracker, (VkPipelineVertexInputStateCreateInfo*)(toTransform->pVertexInputState));
+    }
+    if (toTransform->pTessellationState)
+    {
+        transform_fromhost_VkPipelineTessellationStateCreateInfo(resourceTracker, (VkPipelineTessellationStateCreateInfo*)(toTransform->pTessellationState));
+    }
+}
+
+void transform_tohost_VkGraphicsPipelineShaderGroupsCreateInfoNV(
+    ResourceTracker* resourceTracker,
+    VkGraphicsPipelineShaderGroupsCreateInfoNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform)
+    {
+        if (toTransform->pGroups)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->groupCount; ++i)
+            {
+                transform_tohost_VkGraphicsShaderGroupCreateInfoNV(resourceTracker, (VkGraphicsShaderGroupCreateInfoNV*)(toTransform->pGroups + i));
+            }
+        }
+    }
+}
+
+void transform_fromhost_VkGraphicsPipelineShaderGroupsCreateInfoNV(
+    ResourceTracker* resourceTracker,
+    VkGraphicsPipelineShaderGroupsCreateInfoNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform)
+    {
+        if (toTransform->pGroups)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->groupCount; ++i)
+            {
+                transform_fromhost_VkGraphicsShaderGroupCreateInfoNV(resourceTracker, (VkGraphicsShaderGroupCreateInfoNV*)(toTransform->pGroups + i));
+            }
+        }
+    }
+}
+
+void transform_tohost_VkBindShaderGroupIndirectCommandNV(
+    ResourceTracker* resourceTracker,
+    VkBindShaderGroupIndirectCommandNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_fromhost_VkBindShaderGroupIndirectCommandNV(
+    ResourceTracker* resourceTracker,
+    VkBindShaderGroupIndirectCommandNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_tohost_VkBindIndexBufferIndirectCommandNV(
+    ResourceTracker* resourceTracker,
+    VkBindIndexBufferIndirectCommandNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_fromhost_VkBindIndexBufferIndirectCommandNV(
+    ResourceTracker* resourceTracker,
+    VkBindIndexBufferIndirectCommandNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_tohost_VkBindVertexBufferIndirectCommandNV(
+    ResourceTracker* resourceTracker,
+    VkBindVertexBufferIndirectCommandNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_fromhost_VkBindVertexBufferIndirectCommandNV(
+    ResourceTracker* resourceTracker,
+    VkBindVertexBufferIndirectCommandNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_tohost_VkSetStateFlagsIndirectCommandNV(
+    ResourceTracker* resourceTracker,
+    VkSetStateFlagsIndirectCommandNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_fromhost_VkSetStateFlagsIndirectCommandNV(
+    ResourceTracker* resourceTracker,
+    VkSetStateFlagsIndirectCommandNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_tohost_VkIndirectCommandsStreamNV(
+    ResourceTracker* resourceTracker,
+    VkIndirectCommandsStreamNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_fromhost_VkIndirectCommandsStreamNV(
+    ResourceTracker* resourceTracker,
+    VkIndirectCommandsStreamNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_tohost_VkIndirectCommandsLayoutTokenNV(
+    ResourceTracker* resourceTracker,
+    VkIndirectCommandsLayoutTokenNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkIndirectCommandsLayoutTokenNV(
+    ResourceTracker* resourceTracker,
+    VkIndirectCommandsLayoutTokenNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkIndirectCommandsLayoutCreateInfoNV(
+    ResourceTracker* resourceTracker,
+    VkIndirectCommandsLayoutCreateInfoNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform)
+    {
+        if (toTransform->pTokens)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->tokenCount; ++i)
+            {
+                transform_tohost_VkIndirectCommandsLayoutTokenNV(resourceTracker, (VkIndirectCommandsLayoutTokenNV*)(toTransform->pTokens + i));
+            }
+        }
+    }
+}
+
+void transform_fromhost_VkIndirectCommandsLayoutCreateInfoNV(
+    ResourceTracker* resourceTracker,
+    VkIndirectCommandsLayoutCreateInfoNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform)
+    {
+        if (toTransform->pTokens)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->tokenCount; ++i)
+            {
+                transform_fromhost_VkIndirectCommandsLayoutTokenNV(resourceTracker, (VkIndirectCommandsLayoutTokenNV*)(toTransform->pTokens + i));
+            }
+        }
+    }
+}
+
+void transform_tohost_VkGeneratedCommandsInfoNV(
+    ResourceTracker* resourceTracker,
+    VkGeneratedCommandsInfoNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform)
+    {
+        if (toTransform->pStreams)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->streamCount; ++i)
+            {
+                transform_tohost_VkIndirectCommandsStreamNV(resourceTracker, (VkIndirectCommandsStreamNV*)(toTransform->pStreams + i));
+            }
+        }
+    }
+}
+
+void transform_fromhost_VkGeneratedCommandsInfoNV(
+    ResourceTracker* resourceTracker,
+    VkGeneratedCommandsInfoNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform)
+    {
+        if (toTransform->pStreams)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->streamCount; ++i)
+            {
+                transform_fromhost_VkIndirectCommandsStreamNV(resourceTracker, (VkIndirectCommandsStreamNV*)(toTransform->pStreams + i));
+            }
+        }
+    }
+}
+
+void transform_tohost_VkGeneratedCommandsMemoryRequirementsInfoNV(
+    ResourceTracker* resourceTracker,
+    VkGeneratedCommandsMemoryRequirementsInfoNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkGeneratedCommandsMemoryRequirementsInfoNV(
+    ResourceTracker* resourceTracker,
+    VkGeneratedCommandsMemoryRequirementsInfoNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_texel_buffer_alignment
+void transform_tohost_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_QCOM_render_pass_transform
+void transform_tohost_VkRenderPassTransformBeginInfoQCOM(
+    ResourceTracker* resourceTracker,
+    VkRenderPassTransformBeginInfoQCOM* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkRenderPassTransformBeginInfoQCOM(
+    ResourceTracker* resourceTracker,
+    VkRenderPassTransformBeginInfoQCOM* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkCommandBufferInheritanceRenderPassTransformInfoQCOM(
+    ResourceTracker* resourceTracker,
+    VkCommandBufferInheritanceRenderPassTransformInfoQCOM* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    transform_tohost_VkRect2D(resourceTracker, (VkRect2D*)(&toTransform->renderArea));
+}
+
+void transform_fromhost_VkCommandBufferInheritanceRenderPassTransformInfoQCOM(
+    ResourceTracker* resourceTracker,
+    VkCommandBufferInheritanceRenderPassTransformInfoQCOM* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    transform_fromhost_VkRect2D(resourceTracker, (VkRect2D*)(&toTransform->renderArea));
+}
+
+#endif
+#ifdef VK_EXT_device_memory_report
+void transform_tohost_VkPhysicalDeviceDeviceMemoryReportFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceDeviceMemoryReportFeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceDeviceMemoryReportFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceDeviceMemoryReportFeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkDeviceMemoryReportCallbackDataEXT(
+    ResourceTracker* resourceTracker,
+    VkDeviceMemoryReportCallbackDataEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkDeviceMemoryReportCallbackDataEXT(
+    ResourceTracker* resourceTracker,
+    VkDeviceMemoryReportCallbackDataEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkDeviceDeviceMemoryReportCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkDeviceDeviceMemoryReportCreateInfoEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkDeviceDeviceMemoryReportCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkDeviceDeviceMemoryReportCreateInfoEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_robustness2
+void transform_tohost_VkPhysicalDeviceRobustness2FeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceRobustness2FeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceRobustness2FeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceRobustness2FeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPhysicalDeviceRobustness2PropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceRobustness2PropertiesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceRobustness2PropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceRobustness2PropertiesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_custom_border_color
+void transform_tohost_VkSamplerCustomBorderColorCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkSamplerCustomBorderColorCreateInfoEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    transform_tohost_VkClearColorValue(resourceTracker, (VkClearColorValue*)(&toTransform->customBorderColor));
+}
+
+void transform_fromhost_VkSamplerCustomBorderColorCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkSamplerCustomBorderColorCreateInfoEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    transform_fromhost_VkClearColorValue(resourceTracker, (VkClearColorValue*)(&toTransform->customBorderColor));
+}
+
+void transform_tohost_VkPhysicalDeviceCustomBorderColorPropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceCustomBorderColorPropertiesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceCustomBorderColorPropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceCustomBorderColorPropertiesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPhysicalDeviceCustomBorderColorFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceCustomBorderColorFeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceCustomBorderColorFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceCustomBorderColorFeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_GOOGLE_user_type
+#endif
+#ifdef VK_EXT_private_data
+void transform_tohost_VkPhysicalDevicePrivateDataFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDevicePrivateDataFeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDevicePrivateDataFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDevicePrivateDataFeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkDevicePrivateDataCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkDevicePrivateDataCreateInfoEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkDevicePrivateDataCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkDevicePrivateDataCreateInfoEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPrivateDataSlotCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkPrivateDataSlotCreateInfoEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPrivateDataSlotCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkPrivateDataSlotCreateInfoEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_pipeline_creation_cache_control
+void transform_tohost_VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_NV_device_diagnostics_config
+void transform_tohost_VkPhysicalDeviceDiagnosticsConfigFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceDiagnosticsConfigFeaturesNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceDiagnosticsConfigFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceDiagnosticsConfigFeaturesNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkDeviceDiagnosticsConfigCreateInfoNV(
+    ResourceTracker* resourceTracker,
+    VkDeviceDiagnosticsConfigCreateInfoNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkDeviceDiagnosticsConfigCreateInfoNV(
+    ResourceTracker* resourceTracker,
+    VkDeviceDiagnosticsConfigCreateInfoNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_QCOM_render_pass_store_ops
+#endif
+#ifdef VK_NV_fragment_shading_rate_enums
+void transform_tohost_VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPipelineFragmentShadingRateEnumStateCreateInfoNV(
+    ResourceTracker* resourceTracker,
+    VkPipelineFragmentShadingRateEnumStateCreateInfoNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPipelineFragmentShadingRateEnumStateCreateInfoNV(
+    ResourceTracker* resourceTracker,
+    VkPipelineFragmentShadingRateEnumStateCreateInfoNV* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_fragment_density_map2
+void transform_tohost_VkPhysicalDeviceFragmentDensityMap2FeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceFragmentDensityMap2FeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceFragmentDensityMap2FeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceFragmentDensityMap2FeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPhysicalDeviceFragmentDensityMap2PropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceFragmentDensityMap2PropertiesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceFragmentDensityMap2PropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceFragmentDensityMap2PropertiesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_QCOM_rotated_copy_commands
+void transform_tohost_VkCopyCommandTransformInfoQCOM(
+    ResourceTracker* resourceTracker,
+    VkCopyCommandTransformInfoQCOM* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkCopyCommandTransformInfoQCOM(
+    ResourceTracker* resourceTracker,
+    VkCopyCommandTransformInfoQCOM* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_image_robustness
+void transform_tohost_VkPhysicalDeviceImageRobustnessFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceImageRobustnessFeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceImageRobustnessFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceImageRobustnessFeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_4444_formats
+void transform_tohost_VkPhysicalDevice4444FormatsFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDevice4444FormatsFeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDevice4444FormatsFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDevice4444FormatsFeaturesEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_EXT_directfb_surface
+void transform_tohost_VkDirectFBSurfaceCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkDirectFBSurfaceCreateInfoEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkDirectFBSurfaceCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkDirectFBSurfaceCreateInfoEXT* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+#endif
+#ifdef VK_GOOGLE_gfxstream
 void transform_tohost_VkImportColorBufferGOOGLE(
     ResourceTracker* resourceTracker,
     VkImportColorBufferGOOGLE* toTransform)
@@ -8916,6 +14924,30 @@
     }
 }
 
+void transform_tohost_VkImportBufferGOOGLE(
+    ResourceTracker* resourceTracker,
+    VkImportBufferGOOGLE* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkImportBufferGOOGLE(
+    ResourceTracker* resourceTracker,
+    VkImportBufferGOOGLE* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
 void transform_tohost_VkImportPhysicalAddressGOOGLE(
     ResourceTracker* resourceTracker,
     VkImportPhysicalAddressGOOGLE* toTransform)
@@ -8941,15 +14973,719 @@
 }
 
 #endif
-#ifdef VK_GOOGLE_sized_descriptor_update_template
+#ifdef VK_KHR_acceleration_structure
+void transform_tohost_VkDeviceOrHostAddressKHR(
+    ResourceTracker* resourceTracker,
+    VkDeviceOrHostAddressKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_fromhost_VkDeviceOrHostAddressKHR(
+    ResourceTracker* resourceTracker,
+    VkDeviceOrHostAddressKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_tohost_VkDeviceOrHostAddressConstKHR(
+    ResourceTracker* resourceTracker,
+    VkDeviceOrHostAddressConstKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_fromhost_VkDeviceOrHostAddressConstKHR(
+    ResourceTracker* resourceTracker,
+    VkDeviceOrHostAddressConstKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_tohost_VkAccelerationStructureBuildRangeInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureBuildRangeInfoKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_fromhost_VkAccelerationStructureBuildRangeInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureBuildRangeInfoKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_tohost_VkAccelerationStructureGeometryTrianglesDataKHR(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureGeometryTrianglesDataKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    transform_tohost_VkDeviceOrHostAddressConstKHR(resourceTracker, (VkDeviceOrHostAddressConstKHR*)(&toTransform->vertexData));
+    transform_tohost_VkDeviceOrHostAddressConstKHR(resourceTracker, (VkDeviceOrHostAddressConstKHR*)(&toTransform->indexData));
+    transform_tohost_VkDeviceOrHostAddressConstKHR(resourceTracker, (VkDeviceOrHostAddressConstKHR*)(&toTransform->transformData));
+}
+
+void transform_fromhost_VkAccelerationStructureGeometryTrianglesDataKHR(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureGeometryTrianglesDataKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    transform_fromhost_VkDeviceOrHostAddressConstKHR(resourceTracker, (VkDeviceOrHostAddressConstKHR*)(&toTransform->vertexData));
+    transform_fromhost_VkDeviceOrHostAddressConstKHR(resourceTracker, (VkDeviceOrHostAddressConstKHR*)(&toTransform->indexData));
+    transform_fromhost_VkDeviceOrHostAddressConstKHR(resourceTracker, (VkDeviceOrHostAddressConstKHR*)(&toTransform->transformData));
+}
+
+void transform_tohost_VkAccelerationStructureGeometryAabbsDataKHR(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureGeometryAabbsDataKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    transform_tohost_VkDeviceOrHostAddressConstKHR(resourceTracker, (VkDeviceOrHostAddressConstKHR*)(&toTransform->data));
+}
+
+void transform_fromhost_VkAccelerationStructureGeometryAabbsDataKHR(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureGeometryAabbsDataKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    transform_fromhost_VkDeviceOrHostAddressConstKHR(resourceTracker, (VkDeviceOrHostAddressConstKHR*)(&toTransform->data));
+}
+
+void transform_tohost_VkAccelerationStructureGeometryInstancesDataKHR(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureGeometryInstancesDataKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    transform_tohost_VkDeviceOrHostAddressConstKHR(resourceTracker, (VkDeviceOrHostAddressConstKHR*)(&toTransform->data));
+}
+
+void transform_fromhost_VkAccelerationStructureGeometryInstancesDataKHR(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureGeometryInstancesDataKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    transform_fromhost_VkDeviceOrHostAddressConstKHR(resourceTracker, (VkDeviceOrHostAddressConstKHR*)(&toTransform->data));
+}
+
+void transform_tohost_VkAccelerationStructureGeometryDataKHR(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureGeometryDataKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    transform_tohost_VkAccelerationStructureGeometryTrianglesDataKHR(resourceTracker, (VkAccelerationStructureGeometryTrianglesDataKHR*)(&toTransform->triangles));
+    transform_tohost_VkAccelerationStructureGeometryAabbsDataKHR(resourceTracker, (VkAccelerationStructureGeometryAabbsDataKHR*)(&toTransform->aabbs));
+    transform_tohost_VkAccelerationStructureGeometryInstancesDataKHR(resourceTracker, (VkAccelerationStructureGeometryInstancesDataKHR*)(&toTransform->instances));
+}
+
+void transform_fromhost_VkAccelerationStructureGeometryDataKHR(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureGeometryDataKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    transform_fromhost_VkAccelerationStructureGeometryTrianglesDataKHR(resourceTracker, (VkAccelerationStructureGeometryTrianglesDataKHR*)(&toTransform->triangles));
+    transform_fromhost_VkAccelerationStructureGeometryAabbsDataKHR(resourceTracker, (VkAccelerationStructureGeometryAabbsDataKHR*)(&toTransform->aabbs));
+    transform_fromhost_VkAccelerationStructureGeometryInstancesDataKHR(resourceTracker, (VkAccelerationStructureGeometryInstancesDataKHR*)(&toTransform->instances));
+}
+
+void transform_tohost_VkAccelerationStructureGeometryKHR(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureGeometryKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    transform_tohost_VkAccelerationStructureGeometryDataKHR(resourceTracker, (VkAccelerationStructureGeometryDataKHR*)(&toTransform->geometry));
+}
+
+void transform_fromhost_VkAccelerationStructureGeometryKHR(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureGeometryKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    transform_fromhost_VkAccelerationStructureGeometryDataKHR(resourceTracker, (VkAccelerationStructureGeometryDataKHR*)(&toTransform->geometry));
+}
+
+void transform_tohost_VkAccelerationStructureBuildGeometryInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureBuildGeometryInfoKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform)
+    {
+        if (toTransform->pGeometries)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->geometryCount; ++i)
+            {
+                transform_tohost_VkAccelerationStructureGeometryKHR(resourceTracker, (VkAccelerationStructureGeometryKHR*)(toTransform->pGeometries + i));
+            }
+        }
+    }
+    transform_tohost_VkDeviceOrHostAddressKHR(resourceTracker, (VkDeviceOrHostAddressKHR*)(&toTransform->scratchData));
+}
+
+void transform_fromhost_VkAccelerationStructureBuildGeometryInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureBuildGeometryInfoKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform)
+    {
+        if (toTransform->pGeometries)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->geometryCount; ++i)
+            {
+                transform_fromhost_VkAccelerationStructureGeometryKHR(resourceTracker, (VkAccelerationStructureGeometryKHR*)(toTransform->pGeometries + i));
+            }
+        }
+    }
+    transform_fromhost_VkDeviceOrHostAddressKHR(resourceTracker, (VkDeviceOrHostAddressKHR*)(&toTransform->scratchData));
+}
+
+void transform_tohost_VkAccelerationStructureCreateInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureCreateInfoKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkAccelerationStructureCreateInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureCreateInfoKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkWriteDescriptorSetAccelerationStructureKHR(
+    ResourceTracker* resourceTracker,
+    VkWriteDescriptorSetAccelerationStructureKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkWriteDescriptorSetAccelerationStructureKHR(
+    ResourceTracker* resourceTracker,
+    VkWriteDescriptorSetAccelerationStructureKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPhysicalDeviceAccelerationStructureFeaturesKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceAccelerationStructureFeaturesKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceAccelerationStructureFeaturesKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceAccelerationStructureFeaturesKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPhysicalDeviceAccelerationStructurePropertiesKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceAccelerationStructurePropertiesKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceAccelerationStructurePropertiesKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceAccelerationStructurePropertiesKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkAccelerationStructureDeviceAddressInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureDeviceAddressInfoKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkAccelerationStructureDeviceAddressInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureDeviceAddressInfoKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkAccelerationStructureVersionInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureVersionInfoKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkAccelerationStructureVersionInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureVersionInfoKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkCopyAccelerationStructureToMemoryInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkCopyAccelerationStructureToMemoryInfoKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    transform_tohost_VkDeviceOrHostAddressKHR(resourceTracker, (VkDeviceOrHostAddressKHR*)(&toTransform->dst));
+}
+
+void transform_fromhost_VkCopyAccelerationStructureToMemoryInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkCopyAccelerationStructureToMemoryInfoKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    transform_fromhost_VkDeviceOrHostAddressKHR(resourceTracker, (VkDeviceOrHostAddressKHR*)(&toTransform->dst));
+}
+
+void transform_tohost_VkCopyMemoryToAccelerationStructureInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkCopyMemoryToAccelerationStructureInfoKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    transform_tohost_VkDeviceOrHostAddressConstKHR(resourceTracker, (VkDeviceOrHostAddressConstKHR*)(&toTransform->src));
+}
+
+void transform_fromhost_VkCopyMemoryToAccelerationStructureInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkCopyMemoryToAccelerationStructureInfoKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    transform_fromhost_VkDeviceOrHostAddressConstKHR(resourceTracker, (VkDeviceOrHostAddressConstKHR*)(&toTransform->src));
+}
+
+void transform_tohost_VkCopyAccelerationStructureInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkCopyAccelerationStructureInfoKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkCopyAccelerationStructureInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkCopyAccelerationStructureInfoKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkAccelerationStructureBuildSizesInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureBuildSizesInfoKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkAccelerationStructureBuildSizesInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureBuildSizesInfoKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
 #endif
-#ifdef VK_GOOGLE_async_command_buffers
+#ifdef VK_KHR_ray_tracing_pipeline
+void transform_tohost_VkRayTracingShaderGroupCreateInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkRayTracingShaderGroupCreateInfoKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkRayTracingShaderGroupCreateInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkRayTracingShaderGroupCreateInfoKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkRayTracingPipelineInterfaceCreateInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkRayTracingPipelineInterfaceCreateInfoKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkRayTracingPipelineInterfaceCreateInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkRayTracingPipelineInterfaceCreateInfoKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkRayTracingPipelineCreateInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkRayTracingPipelineCreateInfoKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform)
+    {
+        if (toTransform->pStages)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->stageCount; ++i)
+            {
+                transform_tohost_VkPipelineShaderStageCreateInfo(resourceTracker, (VkPipelineShaderStageCreateInfo*)(toTransform->pStages + i));
+            }
+        }
+    }
+    if (toTransform)
+    {
+        if (toTransform->pGroups)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->groupCount; ++i)
+            {
+                transform_tohost_VkRayTracingShaderGroupCreateInfoKHR(resourceTracker, (VkRayTracingShaderGroupCreateInfoKHR*)(toTransform->pGroups + i));
+            }
+        }
+    }
+    if (toTransform->pLibraryInfo)
+    {
+        transform_tohost_VkPipelineLibraryCreateInfoKHR(resourceTracker, (VkPipelineLibraryCreateInfoKHR*)(toTransform->pLibraryInfo));
+    }
+    if (toTransform->pLibraryInterface)
+    {
+        transform_tohost_VkRayTracingPipelineInterfaceCreateInfoKHR(resourceTracker, (VkRayTracingPipelineInterfaceCreateInfoKHR*)(toTransform->pLibraryInterface));
+    }
+    if (toTransform->pDynamicState)
+    {
+        transform_tohost_VkPipelineDynamicStateCreateInfo(resourceTracker, (VkPipelineDynamicStateCreateInfo*)(toTransform->pDynamicState));
+    }
+}
+
+void transform_fromhost_VkRayTracingPipelineCreateInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkRayTracingPipelineCreateInfoKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+    if (toTransform)
+    {
+        if (toTransform->pStages)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->stageCount; ++i)
+            {
+                transform_fromhost_VkPipelineShaderStageCreateInfo(resourceTracker, (VkPipelineShaderStageCreateInfo*)(toTransform->pStages + i));
+            }
+        }
+    }
+    if (toTransform)
+    {
+        if (toTransform->pGroups)
+        {
+            for (uint32_t i = 0; i < (uint32_t)toTransform->groupCount; ++i)
+            {
+                transform_fromhost_VkRayTracingShaderGroupCreateInfoKHR(resourceTracker, (VkRayTracingShaderGroupCreateInfoKHR*)(toTransform->pGroups + i));
+            }
+        }
+    }
+    if (toTransform->pLibraryInfo)
+    {
+        transform_fromhost_VkPipelineLibraryCreateInfoKHR(resourceTracker, (VkPipelineLibraryCreateInfoKHR*)(toTransform->pLibraryInfo));
+    }
+    if (toTransform->pLibraryInterface)
+    {
+        transform_fromhost_VkRayTracingPipelineInterfaceCreateInfoKHR(resourceTracker, (VkRayTracingPipelineInterfaceCreateInfoKHR*)(toTransform->pLibraryInterface));
+    }
+    if (toTransform->pDynamicState)
+    {
+        transform_fromhost_VkPipelineDynamicStateCreateInfo(resourceTracker, (VkPipelineDynamicStateCreateInfo*)(toTransform->pDynamicState));
+    }
+}
+
+void transform_tohost_VkPhysicalDeviceRayTracingPipelineFeaturesKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceRayTracingPipelineFeaturesKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceRayTracingPipelineFeaturesKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceRayTracingPipelineFeaturesKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkPhysicalDeviceRayTracingPipelinePropertiesKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceRayTracingPipelinePropertiesKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceRayTracingPipelinePropertiesKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceRayTracingPipelinePropertiesKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_tohost_VkStridedDeviceAddressRegionKHR(
+    ResourceTracker* resourceTracker,
+    VkStridedDeviceAddressRegionKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_fromhost_VkStridedDeviceAddressRegionKHR(
+    ResourceTracker* resourceTracker,
+    VkStridedDeviceAddressRegionKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_tohost_VkTraceRaysIndirectCommandKHR(
+    ResourceTracker* resourceTracker,
+    VkTraceRaysIndirectCommandKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
+void transform_fromhost_VkTraceRaysIndirectCommandKHR(
+    ResourceTracker* resourceTracker,
+    VkTraceRaysIndirectCommandKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+}
+
 #endif
-#ifdef VK_GOOGLE_create_resources_with_requirements
-#endif
-#ifdef VK_GOOGLE_address_space_info
-#endif
-#ifdef VK_GOOGLE_free_memory_sync
+#ifdef VK_KHR_ray_query
+void transform_tohost_VkPhysicalDeviceRayQueryFeaturesKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceRayQueryFeaturesKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_tohost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
+void transform_fromhost_VkPhysicalDeviceRayQueryFeaturesKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceRayQueryFeaturesKHR* toTransform)
+{
+    (void)resourceTracker;
+    (void)toTransform;
+    if (toTransform->pNext)
+    {
+        transform_fromhost_extension_struct(resourceTracker, (void*)(toTransform->pNext));
+    }
+}
+
 #endif
 void transform_tohost_extension_struct(
     ResourceTracker* resourceTracker,
@@ -9063,9 +15799,9 @@
             transform_tohost_VkPhysicalDeviceMultiviewProperties(resourceTracker, reinterpret_cast<VkPhysicalDeviceMultiviewProperties*>(structExtension_out));
             break;
         }
-        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES:
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES:
         {
-            transform_tohost_VkPhysicalDeviceVariablePointerFeatures(resourceTracker, reinterpret_cast<VkPhysicalDeviceVariablePointerFeatures*>(structExtension_out));
+            transform_tohost_VkPhysicalDeviceVariablePointersFeatures(resourceTracker, reinterpret_cast<VkPhysicalDeviceVariablePointersFeatures*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES:
@@ -9158,9 +15894,201 @@
             transform_tohost_VkPhysicalDeviceMaintenance3Properties(resourceTracker, reinterpret_cast<VkPhysicalDeviceMaintenance3Properties*>(structExtension_out));
             break;
         }
-        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES:
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES:
         {
-            transform_tohost_VkPhysicalDeviceShaderDrawParameterFeatures(resourceTracker, reinterpret_cast<VkPhysicalDeviceShaderDrawParameterFeatures*>(structExtension_out));
+            transform_tohost_VkPhysicalDeviceShaderDrawParametersFeatures(resourceTracker, reinterpret_cast<VkPhysicalDeviceShaderDrawParametersFeatures*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_VERSION_1_2
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES:
+        {
+            transform_tohost_VkPhysicalDeviceVulkan11Features(resourceTracker, reinterpret_cast<VkPhysicalDeviceVulkan11Features*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_PROPERTIES:
+        {
+            transform_tohost_VkPhysicalDeviceVulkan11Properties(resourceTracker, reinterpret_cast<VkPhysicalDeviceVulkan11Properties*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES:
+        {
+            transform_tohost_VkPhysicalDeviceVulkan12Features(resourceTracker, reinterpret_cast<VkPhysicalDeviceVulkan12Features*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_PROPERTIES:
+        {
+            transform_tohost_VkPhysicalDeviceVulkan12Properties(resourceTracker, reinterpret_cast<VkPhysicalDeviceVulkan12Properties*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO:
+        {
+            transform_tohost_VkImageFormatListCreateInfo(resourceTracker, reinterpret_cast<VkImageFormatListCreateInfo*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES:
+        {
+            transform_tohost_VkPhysicalDevice8BitStorageFeatures(resourceTracker, reinterpret_cast<VkPhysicalDevice8BitStorageFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES:
+        {
+            transform_tohost_VkPhysicalDeviceDriverProperties(resourceTracker, reinterpret_cast<VkPhysicalDeviceDriverProperties*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES:
+        {
+            transform_tohost_VkPhysicalDeviceShaderAtomicInt64Features(resourceTracker, reinterpret_cast<VkPhysicalDeviceShaderAtomicInt64Features*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES:
+        {
+            transform_tohost_VkPhysicalDeviceShaderFloat16Int8Features(resourceTracker, reinterpret_cast<VkPhysicalDeviceShaderFloat16Int8Features*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES:
+        {
+            transform_tohost_VkPhysicalDeviceFloatControlsProperties(resourceTracker, reinterpret_cast<VkPhysicalDeviceFloatControlsProperties*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO:
+        {
+            transform_tohost_VkDescriptorSetLayoutBindingFlagsCreateInfo(resourceTracker, reinterpret_cast<VkDescriptorSetLayoutBindingFlagsCreateInfo*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES:
+        {
+            transform_tohost_VkPhysicalDeviceDescriptorIndexingFeatures(resourceTracker, reinterpret_cast<VkPhysicalDeviceDescriptorIndexingFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES:
+        {
+            transform_tohost_VkPhysicalDeviceDescriptorIndexingProperties(resourceTracker, reinterpret_cast<VkPhysicalDeviceDescriptorIndexingProperties*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO:
+        {
+            transform_tohost_VkDescriptorSetVariableDescriptorCountAllocateInfo(resourceTracker, reinterpret_cast<VkDescriptorSetVariableDescriptorCountAllocateInfo*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT:
+        {
+            transform_tohost_VkDescriptorSetVariableDescriptorCountLayoutSupport(resourceTracker, reinterpret_cast<VkDescriptorSetVariableDescriptorCountLayoutSupport*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE:
+        {
+            transform_tohost_VkSubpassDescriptionDepthStencilResolve(resourceTracker, reinterpret_cast<VkSubpassDescriptionDepthStencilResolve*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES:
+        {
+            transform_tohost_VkPhysicalDeviceDepthStencilResolveProperties(resourceTracker, reinterpret_cast<VkPhysicalDeviceDepthStencilResolveProperties*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES:
+        {
+            transform_tohost_VkPhysicalDeviceScalarBlockLayoutFeatures(resourceTracker, reinterpret_cast<VkPhysicalDeviceScalarBlockLayoutFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_IMAGE_STENCIL_USAGE_CREATE_INFO:
+        {
+            transform_tohost_VkImageStencilUsageCreateInfo(resourceTracker, reinterpret_cast<VkImageStencilUsageCreateInfo*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO:
+        {
+            transform_tohost_VkSamplerReductionModeCreateInfo(resourceTracker, reinterpret_cast<VkSamplerReductionModeCreateInfo*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES:
+        {
+            transform_tohost_VkPhysicalDeviceSamplerFilterMinmaxProperties(resourceTracker, reinterpret_cast<VkPhysicalDeviceSamplerFilterMinmaxProperties*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES:
+        {
+            transform_tohost_VkPhysicalDeviceVulkanMemoryModelFeatures(resourceTracker, reinterpret_cast<VkPhysicalDeviceVulkanMemoryModelFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES:
+        {
+            transform_tohost_VkPhysicalDeviceImagelessFramebufferFeatures(resourceTracker, reinterpret_cast<VkPhysicalDeviceImagelessFramebufferFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO:
+        {
+            transform_tohost_VkFramebufferAttachmentsCreateInfo(resourceTracker, reinterpret_cast<VkFramebufferAttachmentsCreateInfo*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_RENDER_PASS_ATTACHMENT_BEGIN_INFO:
+        {
+            transform_tohost_VkRenderPassAttachmentBeginInfo(resourceTracker, reinterpret_cast<VkRenderPassAttachmentBeginInfo*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES:
+        {
+            transform_tohost_VkPhysicalDeviceUniformBufferStandardLayoutFeatures(resourceTracker, reinterpret_cast<VkPhysicalDeviceUniformBufferStandardLayoutFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES:
+        {
+            transform_tohost_VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures(resourceTracker, reinterpret_cast<VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES:
+        {
+            transform_tohost_VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures(resourceTracker, reinterpret_cast<VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_STENCIL_LAYOUT:
+        {
+            transform_tohost_VkAttachmentReferenceStencilLayout(resourceTracker, reinterpret_cast<VkAttachmentReferenceStencilLayout*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_STENCIL_LAYOUT:
+        {
+            transform_tohost_VkAttachmentDescriptionStencilLayout(resourceTracker, reinterpret_cast<VkAttachmentDescriptionStencilLayout*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES:
+        {
+            transform_tohost_VkPhysicalDeviceHostQueryResetFeatures(resourceTracker, reinterpret_cast<VkPhysicalDeviceHostQueryResetFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES:
+        {
+            transform_tohost_VkPhysicalDeviceTimelineSemaphoreFeatures(resourceTracker, reinterpret_cast<VkPhysicalDeviceTimelineSemaphoreFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES:
+        {
+            transform_tohost_VkPhysicalDeviceTimelineSemaphoreProperties(resourceTracker, reinterpret_cast<VkPhysicalDeviceTimelineSemaphoreProperties*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO:
+        {
+            transform_tohost_VkSemaphoreTypeCreateInfo(resourceTracker, reinterpret_cast<VkSemaphoreTypeCreateInfo*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO:
+        {
+            transform_tohost_VkTimelineSemaphoreSubmitInfo(resourceTracker, reinterpret_cast<VkTimelineSemaphoreSubmitInfo*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES:
+        {
+            transform_tohost_VkPhysicalDeviceBufferDeviceAddressFeatures(resourceTracker, reinterpret_cast<VkPhysicalDeviceBufferDeviceAddressFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO:
+        {
+            transform_tohost_VkBufferOpaqueCaptureAddressCreateInfo(resourceTracker, reinterpret_cast<VkBufferOpaqueCaptureAddressCreateInfo*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO:
+        {
+            transform_tohost_VkMemoryOpaqueCaptureAddressAllocateInfo(resourceTracker, reinterpret_cast<VkMemoryOpaqueCaptureAddressAllocateInfo*>(structExtension_out));
             break;
         }
 #endif
@@ -9259,17 +16187,87 @@
             break;
         }
 #endif
-#ifdef VK_KHR_image_format_list
-        case VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO_KHR:
+#ifdef VK_KHR_performance_query
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_FEATURES_KHR:
         {
-            transform_tohost_VkImageFormatListCreateInfoKHR(resourceTracker, reinterpret_cast<VkImageFormatListCreateInfoKHR*>(structExtension_out));
+            transform_tohost_VkPhysicalDevicePerformanceQueryFeaturesKHR(resourceTracker, reinterpret_cast<VkPhysicalDevicePerformanceQueryFeaturesKHR*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_PROPERTIES_KHR:
+        {
+            transform_tohost_VkPhysicalDevicePerformanceQueryPropertiesKHR(resourceTracker, reinterpret_cast<VkPhysicalDevicePerformanceQueryPropertiesKHR*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_CREATE_INFO_KHR:
+        {
+            transform_tohost_VkQueryPoolPerformanceCreateInfoKHR(resourceTracker, reinterpret_cast<VkQueryPoolPerformanceCreateInfoKHR*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PERFORMANCE_QUERY_SUBMIT_INFO_KHR:
+        {
+            transform_tohost_VkPerformanceQuerySubmitInfoKHR(resourceTracker, reinterpret_cast<VkPerformanceQuerySubmitInfoKHR*>(structExtension_out));
             break;
         }
 #endif
-#ifdef VK_KHR_8bit_storage
-        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR:
+#ifdef VK_KHR_portability_subset
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PORTABILITY_SUBSET_FEATURES_KHR:
         {
-            transform_tohost_VkPhysicalDevice8BitStorageFeaturesKHR(resourceTracker, reinterpret_cast<VkPhysicalDevice8BitStorageFeaturesKHR*>(structExtension_out));
+            transform_tohost_VkPhysicalDevicePortabilitySubsetFeaturesKHR(resourceTracker, reinterpret_cast<VkPhysicalDevicePortabilitySubsetFeaturesKHR*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PORTABILITY_SUBSET_PROPERTIES_KHR:
+        {
+            transform_tohost_VkPhysicalDevicePortabilitySubsetPropertiesKHR(resourceTracker, reinterpret_cast<VkPhysicalDevicePortabilitySubsetPropertiesKHR*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_KHR_shader_clock
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CLOCK_FEATURES_KHR:
+        {
+            transform_tohost_VkPhysicalDeviceShaderClockFeaturesKHR(resourceTracker, reinterpret_cast<VkPhysicalDeviceShaderClockFeaturesKHR*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_KHR_shader_terminate_invocation
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES_KHR:
+        {
+            transform_tohost_VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR(resourceTracker, reinterpret_cast<VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_KHR_fragment_shading_rate
+        case VK_STRUCTURE_TYPE_FRAGMENT_SHADING_RATE_ATTACHMENT_INFO_KHR:
+        {
+            transform_tohost_VkFragmentShadingRateAttachmentInfoKHR(resourceTracker, reinterpret_cast<VkFragmentShadingRateAttachmentInfoKHR*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_STATE_CREATE_INFO_KHR:
+        {
+            transform_tohost_VkPipelineFragmentShadingRateStateCreateInfoKHR(resourceTracker, reinterpret_cast<VkPipelineFragmentShadingRateStateCreateInfoKHR*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_FEATURES_KHR:
+        {
+            transform_tohost_VkPhysicalDeviceFragmentShadingRateFeaturesKHR(resourceTracker, reinterpret_cast<VkPhysicalDeviceFragmentShadingRateFeaturesKHR*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_PROPERTIES_KHR:
+        {
+            transform_tohost_VkPhysicalDeviceFragmentShadingRatePropertiesKHR(resourceTracker, reinterpret_cast<VkPhysicalDeviceFragmentShadingRatePropertiesKHR*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_KHR_surface_protected_capabilities
+        case VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR:
+        {
+            transform_tohost_VkSurfaceProtectedCapabilitiesKHR(resourceTracker, reinterpret_cast<VkSurfaceProtectedCapabilitiesKHR*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_KHR_pipeline_executable_properties
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_EXECUTABLE_PROPERTIES_FEATURES_KHR:
+        {
+            transform_tohost_VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR(resourceTracker, reinterpret_cast<VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR*>(structExtension_out));
             break;
         }
 #endif
@@ -9311,6 +16309,23 @@
             break;
         }
 #endif
+#ifdef VK_EXT_transform_feedback
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT:
+        {
+            transform_tohost_VkPhysicalDeviceTransformFeedbackFeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceTransformFeedbackFeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT:
+        {
+            transform_tohost_VkPhysicalDeviceTransformFeedbackPropertiesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceTransformFeedbackPropertiesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_STREAM_CREATE_INFO_EXT:
+        {
+            transform_tohost_VkPipelineRasterizationStateStreamCreateInfoEXT(resourceTracker, reinterpret_cast<VkPipelineRasterizationStateStreamCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
 #ifdef VK_AMD_texture_gather_bias_lod
         case VK_STRUCTURE_TYPE_TEXTURE_LOD_GATHER_FORMAT_PROPERTIES_AMD:
         {
@@ -9318,6 +16333,13 @@
             break;
         }
 #endif
+#ifdef VK_NV_corner_sampled_image
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CORNER_SAMPLED_IMAGE_FEATURES_NV:
+        {
+            transform_tohost_VkPhysicalDeviceCornerSampledImageFeaturesNV(resourceTracker, reinterpret_cast<VkPhysicalDeviceCornerSampledImageFeaturesNV*>(structExtension_out));
+            break;
+        }
+#endif
 #ifdef VK_NV_external_memory
         case VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_NV:
         {
@@ -9356,6 +16378,25 @@
             break;
         }
 #endif
+#ifdef VK_EXT_texture_compression_astc_hdr
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES_EXT:
+        {
+            transform_tohost_VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_astc_decode_mode
+        case VK_STRUCTURE_TYPE_IMAGE_VIEW_ASTC_DECODE_MODE_EXT:
+        {
+            transform_tohost_VkImageViewASTCDecodeModeEXT(resourceTracker, reinterpret_cast<VkImageViewASTCDecodeModeEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ASTC_DECODE_FEATURES_EXT:
+        {
+            transform_tohost_VkPhysicalDeviceASTCDecodeFeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceASTCDecodeFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
 #ifdef VK_EXT_conditional_rendering
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT:
         {
@@ -9427,6 +16468,18 @@
             break;
         }
 #endif
+#ifdef VK_EXT_depth_clip_enable
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_ENABLE_FEATURES_EXT:
+        {
+            transform_tohost_VkPhysicalDeviceDepthClipEnableFeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceDepthClipEnableFeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT:
+        {
+            transform_tohost_VkPipelineRasterizationDepthClipStateCreateInfoEXT(resourceTracker, reinterpret_cast<VkPipelineRasterizationDepthClipStateCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
 #ifdef VK_EXT_debug_utils
         case VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT:
         {
@@ -9456,15 +16509,25 @@
             break;
         }
 #endif
-#ifdef VK_EXT_sampler_filter_minmax
-        case VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO_EXT:
+#ifdef VK_EXT_inline_uniform_block
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES_EXT:
         {
-            transform_tohost_VkSamplerReductionModeCreateInfoEXT(resourceTracker, reinterpret_cast<VkSamplerReductionModeCreateInfoEXT*>(structExtension_out));
+            transform_tohost_VkPhysicalDeviceInlineUniformBlockFeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceInlineUniformBlockFeaturesEXT*>(structExtension_out));
             break;
         }
-        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES_EXT:
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES_EXT:
         {
-            transform_tohost_VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT*>(structExtension_out));
+            transform_tohost_VkPhysicalDeviceInlineUniformBlockPropertiesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceInlineUniformBlockPropertiesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT:
+        {
+            transform_tohost_VkWriteDescriptorSetInlineUniformBlockEXT(resourceTracker, reinterpret_cast<VkWriteDescriptorSetInlineUniformBlockEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO_EXT:
+        {
+            transform_tohost_VkDescriptorPoolInlineUniformBlockCreateInfoEXT(resourceTracker, reinterpret_cast<VkDescriptorPoolInlineUniformBlockCreateInfoEXT*>(structExtension_out));
             break;
         }
 #endif
@@ -9521,6 +16584,40 @@
             break;
         }
 #endif
+#ifdef VK_NV_shader_sm_builtins
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_PROPERTIES_NV:
+        {
+            transform_tohost_VkPhysicalDeviceShaderSMBuiltinsPropertiesNV(resourceTracker, reinterpret_cast<VkPhysicalDeviceShaderSMBuiltinsPropertiesNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_FEATURES_NV:
+        {
+            transform_tohost_VkPhysicalDeviceShaderSMBuiltinsFeaturesNV(resourceTracker, reinterpret_cast<VkPhysicalDeviceShaderSMBuiltinsFeaturesNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_image_drm_format_modifier
+        case VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT:
+        {
+            transform_tohost_VkDrmFormatModifierPropertiesListEXT(resourceTracker, reinterpret_cast<VkDrmFormatModifierPropertiesListEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT:
+        {
+            transform_tohost_VkPhysicalDeviceImageDrmFormatModifierInfoEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceImageDrmFormatModifierInfoEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT:
+        {
+            transform_tohost_VkImageDrmFormatModifierListCreateInfoEXT(resourceTracker, reinterpret_cast<VkImageDrmFormatModifierListCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT:
+        {
+            transform_tohost_VkImageDrmFormatModifierExplicitCreateInfoEXT(resourceTracker, reinterpret_cast<VkImageDrmFormatModifierExplicitCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
 #ifdef VK_EXT_validation_cache
         case VK_STRUCTURE_TYPE_SHADER_MODULE_VALIDATION_CACHE_CREATE_INFO_EXT:
         {
@@ -9528,30 +16625,61 @@
             break;
         }
 #endif
-#ifdef VK_EXT_descriptor_indexing
-        case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT:
+#ifdef VK_NV_shading_rate_image
+        case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SHADING_RATE_IMAGE_STATE_CREATE_INFO_NV:
         {
-            transform_tohost_VkDescriptorSetLayoutBindingFlagsCreateInfoEXT(resourceTracker, reinterpret_cast<VkDescriptorSetLayoutBindingFlagsCreateInfoEXT*>(structExtension_out));
+            transform_tohost_VkPipelineViewportShadingRateImageStateCreateInfoNV(resourceTracker, reinterpret_cast<VkPipelineViewportShadingRateImageStateCreateInfoNV*>(structExtension_out));
             break;
         }
-        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT:
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_FEATURES_NV:
         {
-            transform_tohost_VkPhysicalDeviceDescriptorIndexingFeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceDescriptorIndexingFeaturesEXT*>(structExtension_out));
+            transform_tohost_VkPhysicalDeviceShadingRateImageFeaturesNV(resourceTracker, reinterpret_cast<VkPhysicalDeviceShadingRateImageFeaturesNV*>(structExtension_out));
             break;
         }
-        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES_EXT:
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_PROPERTIES_NV:
         {
-            transform_tohost_VkPhysicalDeviceDescriptorIndexingPropertiesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceDescriptorIndexingPropertiesEXT*>(structExtension_out));
+            transform_tohost_VkPhysicalDeviceShadingRateImagePropertiesNV(resourceTracker, reinterpret_cast<VkPhysicalDeviceShadingRateImagePropertiesNV*>(structExtension_out));
             break;
         }
-        case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO_EXT:
+        case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_COARSE_SAMPLE_ORDER_STATE_CREATE_INFO_NV:
         {
-            transform_tohost_VkDescriptorSetVariableDescriptorCountAllocateInfoEXT(resourceTracker, reinterpret_cast<VkDescriptorSetVariableDescriptorCountAllocateInfoEXT*>(structExtension_out));
+            transform_tohost_VkPipelineViewportCoarseSampleOrderStateCreateInfoNV(resourceTracker, reinterpret_cast<VkPipelineViewportCoarseSampleOrderStateCreateInfoNV*>(structExtension_out));
             break;
         }
-        case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT_EXT:
+#endif
+#ifdef VK_NV_ray_tracing
+        case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_NV:
         {
-            transform_tohost_VkDescriptorSetVariableDescriptorCountLayoutSupportEXT(resourceTracker, reinterpret_cast<VkDescriptorSetVariableDescriptorCountLayoutSupportEXT*>(structExtension_out));
+            transform_tohost_VkWriteDescriptorSetAccelerationStructureNV(resourceTracker, reinterpret_cast<VkWriteDescriptorSetAccelerationStructureNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_NV:
+        {
+            transform_tohost_VkPhysicalDeviceRayTracingPropertiesNV(resourceTracker, reinterpret_cast<VkPhysicalDeviceRayTracingPropertiesNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_representative_fragment_test
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_REPRESENTATIVE_FRAGMENT_TEST_FEATURES_NV:
+        {
+            transform_tohost_VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV(resourceTracker, reinterpret_cast<VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_REPRESENTATIVE_FRAGMENT_TEST_STATE_CREATE_INFO_NV:
+        {
+            transform_tohost_VkPipelineRepresentativeFragmentTestStateCreateInfoNV(resourceTracker, reinterpret_cast<VkPipelineRepresentativeFragmentTestStateCreateInfoNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_filter_cubic
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_VIEW_IMAGE_FORMAT_INFO_EXT:
+        {
+            transform_tohost_VkPhysicalDeviceImageViewImageFormatInfoEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceImageViewImageFormatInfoEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_FILTER_CUBIC_IMAGE_VIEW_IMAGE_FORMAT_PROPERTIES_EXT:
+        {
+            transform_tohost_VkFilterCubicImageViewImageFormatPropertiesEXT(resourceTracker, reinterpret_cast<VkFilterCubicImageViewImageFormatPropertiesEXT*>(structExtension_out));
             break;
         }
 #endif
@@ -9574,6 +16702,13 @@
             break;
         }
 #endif
+#ifdef VK_AMD_pipeline_compiler_control
+        case VK_STRUCTURE_TYPE_PIPELINE_COMPILER_CONTROL_CREATE_INFO_AMD:
+        {
+            transform_tohost_VkPipelineCompilerControlCreateInfoAMD(resourceTracker, reinterpret_cast<VkPipelineCompilerControlCreateInfoAMD*>(structExtension_out));
+            break;
+        }
+#endif
 #ifdef VK_AMD_shader_core_properties
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_AMD:
         {
@@ -9581,6 +16716,13 @@
             break;
         }
 #endif
+#ifdef VK_AMD_memory_overallocation_behavior
+        case VK_STRUCTURE_TYPE_DEVICE_MEMORY_OVERALLOCATION_CREATE_INFO_AMD:
+        {
+            transform_tohost_VkDeviceMemoryOverallocationCreateInfoAMD(resourceTracker, reinterpret_cast<VkDeviceMemoryOverallocationCreateInfoAMD*>(structExtension_out));
+            break;
+        }
+#endif
 #ifdef VK_EXT_vertex_attribute_divisor
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT:
         {
@@ -9592,6 +16734,70 @@
             transform_tohost_VkPipelineVertexInputDivisorStateCreateInfoEXT(resourceTracker, reinterpret_cast<VkPipelineVertexInputDivisorStateCreateInfoEXT*>(structExtension_out));
             break;
         }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT:
+        {
+            transform_tohost_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_GGP_frame_token
+        case VK_STRUCTURE_TYPE_PRESENT_FRAME_TOKEN_GGP:
+        {
+            transform_tohost_VkPresentFrameTokenGGP(resourceTracker, reinterpret_cast<VkPresentFrameTokenGGP*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_pipeline_creation_feedback
+        case VK_STRUCTURE_TYPE_PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT:
+        {
+            transform_tohost_VkPipelineCreationFeedbackCreateInfoEXT(resourceTracker, reinterpret_cast<VkPipelineCreationFeedbackCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_compute_shader_derivatives
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_FEATURES_NV:
+        {
+            transform_tohost_VkPhysicalDeviceComputeShaderDerivativesFeaturesNV(resourceTracker, reinterpret_cast<VkPhysicalDeviceComputeShaderDerivativesFeaturesNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_mesh_shader
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_FEATURES_NV:
+        {
+            transform_tohost_VkPhysicalDeviceMeshShaderFeaturesNV(resourceTracker, reinterpret_cast<VkPhysicalDeviceMeshShaderFeaturesNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_PROPERTIES_NV:
+        {
+            transform_tohost_VkPhysicalDeviceMeshShaderPropertiesNV(resourceTracker, reinterpret_cast<VkPhysicalDeviceMeshShaderPropertiesNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_fragment_shader_barycentric
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_NV:
+        {
+            transform_tohost_VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV(resourceTracker, reinterpret_cast<VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_shader_image_footprint
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_FOOTPRINT_FEATURES_NV:
+        {
+            transform_tohost_VkPhysicalDeviceShaderImageFootprintFeaturesNV(resourceTracker, reinterpret_cast<VkPhysicalDeviceShaderImageFootprintFeaturesNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_scissor_exclusive
+        case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_EXCLUSIVE_SCISSOR_STATE_CREATE_INFO_NV:
+        {
+            transform_tohost_VkPipelineViewportExclusiveScissorStateCreateInfoNV(resourceTracker, reinterpret_cast<VkPipelineViewportExclusiveScissorStateCreateInfoNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXCLUSIVE_SCISSOR_FEATURES_NV:
+        {
+            transform_tohost_VkPhysicalDeviceExclusiveScissorFeaturesNV(resourceTracker, reinterpret_cast<VkPhysicalDeviceExclusiveScissorFeaturesNV*>(structExtension_out));
+            break;
+        }
 #endif
 #ifdef VK_NV_device_diagnostic_checkpoints
         case VK_STRUCTURE_TYPE_QUEUE_FAMILY_CHECKPOINT_PROPERTIES_NV:
@@ -9600,18 +16806,455 @@
             break;
         }
 #endif
-#ifdef VK_GOOGLE_color_buffer
+#ifdef VK_INTEL_shader_integer_functions2
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_FUNCTIONS_2_FEATURES_INTEL:
+        {
+            transform_tohost_VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL(resourceTracker, reinterpret_cast<VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_INTEL_performance_query
+        case VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_QUERY_CREATE_INFO_INTEL:
+        {
+            transform_tohost_VkQueryPoolPerformanceQueryCreateInfoINTEL(resourceTracker, reinterpret_cast<VkQueryPoolPerformanceQueryCreateInfoINTEL*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_pci_bus_info
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT:
+        {
+            transform_tohost_VkPhysicalDevicePCIBusInfoPropertiesEXT(resourceTracker, reinterpret_cast<VkPhysicalDevicePCIBusInfoPropertiesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_AMD_display_native_hdr
+        case VK_STRUCTURE_TYPE_DISPLAY_NATIVE_HDR_SURFACE_CAPABILITIES_AMD:
+        {
+            transform_tohost_VkDisplayNativeHdrSurfaceCapabilitiesAMD(resourceTracker, reinterpret_cast<VkDisplayNativeHdrSurfaceCapabilitiesAMD*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_SWAPCHAIN_DISPLAY_NATIVE_HDR_CREATE_INFO_AMD:
+        {
+            transform_tohost_VkSwapchainDisplayNativeHdrCreateInfoAMD(resourceTracker, reinterpret_cast<VkSwapchainDisplayNativeHdrCreateInfoAMD*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_fragment_density_map
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_FEATURES_EXT:
+        {
+            transform_tohost_VkPhysicalDeviceFragmentDensityMapFeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceFragmentDensityMapFeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_PROPERTIES_EXT:
+        {
+            transform_tohost_VkPhysicalDeviceFragmentDensityMapPropertiesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceFragmentDensityMapPropertiesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_RENDER_PASS_FRAGMENT_DENSITY_MAP_CREATE_INFO_EXT:
+        {
+            transform_tohost_VkRenderPassFragmentDensityMapCreateInfoEXT(resourceTracker, reinterpret_cast<VkRenderPassFragmentDensityMapCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_subgroup_size_control
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT:
+        {
+            transform_tohost_VkPhysicalDeviceSubgroupSizeControlFeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceSubgroupSizeControlFeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT:
+        {
+            transform_tohost_VkPhysicalDeviceSubgroupSizeControlPropertiesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceSubgroupSizeControlPropertiesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT:
+        {
+            transform_tohost_VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT(resourceTracker, reinterpret_cast<VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_AMD_shader_core_properties2
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_2_AMD:
+        {
+            transform_tohost_VkPhysicalDeviceShaderCoreProperties2AMD(resourceTracker, reinterpret_cast<VkPhysicalDeviceShaderCoreProperties2AMD*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_AMD_device_coherent_memory
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COHERENT_MEMORY_FEATURES_AMD:
+        {
+            transform_tohost_VkPhysicalDeviceCoherentMemoryFeaturesAMD(resourceTracker, reinterpret_cast<VkPhysicalDeviceCoherentMemoryFeaturesAMD*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_shader_image_atomic_int64
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_ATOMIC_INT64_FEATURES_EXT:
+        {
+            transform_tohost_VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_memory_budget
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT:
+        {
+            transform_tohost_VkPhysicalDeviceMemoryBudgetPropertiesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceMemoryBudgetPropertiesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_memory_priority
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PRIORITY_FEATURES_EXT:
+        {
+            transform_tohost_VkPhysicalDeviceMemoryPriorityFeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceMemoryPriorityFeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT:
+        {
+            transform_tohost_VkMemoryPriorityAllocateInfoEXT(resourceTracker, reinterpret_cast<VkMemoryPriorityAllocateInfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_dedicated_allocation_image_aliasing
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEDICATED_ALLOCATION_IMAGE_ALIASING_FEATURES_NV:
+        {
+            transform_tohost_VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV(resourceTracker, reinterpret_cast<VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_buffer_device_address
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_EXT:
+        {
+            transform_tohost_VkPhysicalDeviceBufferDeviceAddressFeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceBufferDeviceAddressFeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_CREATE_INFO_EXT:
+        {
+            transform_tohost_VkBufferDeviceAddressCreateInfoEXT(resourceTracker, reinterpret_cast<VkBufferDeviceAddressCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_validation_features
+        case VK_STRUCTURE_TYPE_VALIDATION_FEATURES_EXT:
+        {
+            transform_tohost_VkValidationFeaturesEXT(resourceTracker, reinterpret_cast<VkValidationFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_cooperative_matrix
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_FEATURES_NV:
+        {
+            transform_tohost_VkPhysicalDeviceCooperativeMatrixFeaturesNV(resourceTracker, reinterpret_cast<VkPhysicalDeviceCooperativeMatrixFeaturesNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_PROPERTIES_NV:
+        {
+            transform_tohost_VkPhysicalDeviceCooperativeMatrixPropertiesNV(resourceTracker, reinterpret_cast<VkPhysicalDeviceCooperativeMatrixPropertiesNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_coverage_reduction_mode
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COVERAGE_REDUCTION_MODE_FEATURES_NV:
+        {
+            transform_tohost_VkPhysicalDeviceCoverageReductionModeFeaturesNV(resourceTracker, reinterpret_cast<VkPhysicalDeviceCoverageReductionModeFeaturesNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_REDUCTION_STATE_CREATE_INFO_NV:
+        {
+            transform_tohost_VkPipelineCoverageReductionStateCreateInfoNV(resourceTracker, reinterpret_cast<VkPipelineCoverageReductionStateCreateInfoNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_fragment_shader_interlock
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_INTERLOCK_FEATURES_EXT:
+        {
+            transform_tohost_VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_ycbcr_image_arrays
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_IMAGE_ARRAYS_FEATURES_EXT:
+        {
+            transform_tohost_VkPhysicalDeviceYcbcrImageArraysFeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceYcbcrImageArraysFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_full_screen_exclusive
+        case VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_INFO_EXT:
+        {
+            transform_tohost_VkSurfaceFullScreenExclusiveInfoEXT(resourceTracker, reinterpret_cast<VkSurfaceFullScreenExclusiveInfoEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_FULL_SCREEN_EXCLUSIVE_EXT:
+        {
+            transform_tohost_VkSurfaceCapabilitiesFullScreenExclusiveEXT(resourceTracker, reinterpret_cast<VkSurfaceCapabilitiesFullScreenExclusiveEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_WIN32_INFO_EXT:
+        {
+            transform_tohost_VkSurfaceFullScreenExclusiveWin32InfoEXT(resourceTracker, reinterpret_cast<VkSurfaceFullScreenExclusiveWin32InfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_line_rasterization
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT:
+        {
+            transform_tohost_VkPhysicalDeviceLineRasterizationFeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceLineRasterizationFeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_EXT:
+        {
+            transform_tohost_VkPhysicalDeviceLineRasterizationPropertiesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceLineRasterizationPropertiesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT:
+        {
+            transform_tohost_VkPipelineRasterizationLineStateCreateInfoEXT(resourceTracker, reinterpret_cast<VkPipelineRasterizationLineStateCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_shader_atomic_float
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_FLOAT_FEATURES_EXT:
+        {
+            transform_tohost_VkPhysicalDeviceShaderAtomicFloatFeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceShaderAtomicFloatFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_index_type_uint8
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT:
+        {
+            transform_tohost_VkPhysicalDeviceIndexTypeUint8FeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceIndexTypeUint8FeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_extended_dynamic_state
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_FEATURES_EXT:
+        {
+            transform_tohost_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceExtendedDynamicStateFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_shader_demote_to_helper_invocation
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES_EXT:
+        {
+            transform_tohost_VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_device_generated_commands
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_PROPERTIES_NV:
+        {
+            transform_tohost_VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV(resourceTracker, reinterpret_cast<VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_FEATURES_NV:
+        {
+            transform_tohost_VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV(resourceTracker, reinterpret_cast<VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_SHADER_GROUPS_CREATE_INFO_NV:
+        {
+            transform_tohost_VkGraphicsPipelineShaderGroupsCreateInfoNV(resourceTracker, reinterpret_cast<VkGraphicsPipelineShaderGroupsCreateInfoNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_texel_buffer_alignment
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_FEATURES_EXT:
+        {
+            transform_tohost_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES_EXT:
+        {
+            transform_tohost_VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_QCOM_render_pass_transform
+        case VK_STRUCTURE_TYPE_RENDER_PASS_TRANSFORM_BEGIN_INFO_QCOM:
+        {
+            transform_tohost_VkRenderPassTransformBeginInfoQCOM(resourceTracker, reinterpret_cast<VkRenderPassTransformBeginInfoQCOM*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_RENDER_PASS_TRANSFORM_INFO_QCOM:
+        {
+            transform_tohost_VkCommandBufferInheritanceRenderPassTransformInfoQCOM(resourceTracker, reinterpret_cast<VkCommandBufferInheritanceRenderPassTransformInfoQCOM*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_device_memory_report
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_MEMORY_REPORT_FEATURES_EXT:
+        {
+            transform_tohost_VkPhysicalDeviceDeviceMemoryReportFeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceDeviceMemoryReportFeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DEVICE_DEVICE_MEMORY_REPORT_CREATE_INFO_EXT:
+        {
+            transform_tohost_VkDeviceDeviceMemoryReportCreateInfoEXT(resourceTracker, reinterpret_cast<VkDeviceDeviceMemoryReportCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_robustness2
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT:
+        {
+            transform_tohost_VkPhysicalDeviceRobustness2FeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceRobustness2FeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_PROPERTIES_EXT:
+        {
+            transform_tohost_VkPhysicalDeviceRobustness2PropertiesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceRobustness2PropertiesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_custom_border_color
+        case VK_STRUCTURE_TYPE_SAMPLER_CUSTOM_BORDER_COLOR_CREATE_INFO_EXT:
+        {
+            transform_tohost_VkSamplerCustomBorderColorCreateInfoEXT(resourceTracker, reinterpret_cast<VkSamplerCustomBorderColorCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_PROPERTIES_EXT:
+        {
+            transform_tohost_VkPhysicalDeviceCustomBorderColorPropertiesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceCustomBorderColorPropertiesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_FEATURES_EXT:
+        {
+            transform_tohost_VkPhysicalDeviceCustomBorderColorFeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceCustomBorderColorFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_private_data
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES_EXT:
+        {
+            transform_tohost_VkPhysicalDevicePrivateDataFeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDevicePrivateDataFeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DEVICE_PRIVATE_DATA_CREATE_INFO_EXT:
+        {
+            transform_tohost_VkDevicePrivateDataCreateInfoEXT(resourceTracker, reinterpret_cast<VkDevicePrivateDataCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_pipeline_creation_cache_control
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES_EXT:
+        {
+            transform_tohost_VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_device_diagnostics_config
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DIAGNOSTICS_CONFIG_FEATURES_NV:
+        {
+            transform_tohost_VkPhysicalDeviceDiagnosticsConfigFeaturesNV(resourceTracker, reinterpret_cast<VkPhysicalDeviceDiagnosticsConfigFeaturesNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DEVICE_DIAGNOSTICS_CONFIG_CREATE_INFO_NV:
+        {
+            transform_tohost_VkDeviceDiagnosticsConfigCreateInfoNV(resourceTracker, reinterpret_cast<VkDeviceDiagnosticsConfigCreateInfoNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_fragment_shading_rate_enums
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_FEATURES_NV:
+        {
+            transform_tohost_VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV(resourceTracker, reinterpret_cast<VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_PROPERTIES_NV:
+        {
+            transform_tohost_VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV(resourceTracker, reinterpret_cast<VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_ENUM_STATE_CREATE_INFO_NV:
+        {
+            transform_tohost_VkPipelineFragmentShadingRateEnumStateCreateInfoNV(resourceTracker, reinterpret_cast<VkPipelineFragmentShadingRateEnumStateCreateInfoNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_fragment_density_map2
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_2_FEATURES_EXT:
+        {
+            transform_tohost_VkPhysicalDeviceFragmentDensityMap2FeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceFragmentDensityMap2FeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_2_PROPERTIES_EXT:
+        {
+            transform_tohost_VkPhysicalDeviceFragmentDensityMap2PropertiesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceFragmentDensityMap2PropertiesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_QCOM_rotated_copy_commands
+        case VK_STRUCTURE_TYPE_COPY_COMMAND_TRANSFORM_INFO_QCOM:
+        {
+            transform_tohost_VkCopyCommandTransformInfoQCOM(resourceTracker, reinterpret_cast<VkCopyCommandTransformInfoQCOM*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_image_robustness
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES_EXT:
+        {
+            transform_tohost_VkPhysicalDeviceImageRobustnessFeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceImageRobustnessFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_4444_formats
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT:
+        {
+            transform_tohost_VkPhysicalDevice4444FormatsFeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDevice4444FormatsFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_GOOGLE_gfxstream
         case VK_STRUCTURE_TYPE_IMPORT_COLOR_BUFFER_GOOGLE:
         {
             transform_tohost_VkImportColorBufferGOOGLE(resourceTracker, reinterpret_cast<VkImportColorBufferGOOGLE*>(structExtension_out));
             break;
         }
+        case VK_STRUCTURE_TYPE_IMPORT_BUFFER_GOOGLE:
+        {
+            transform_tohost_VkImportBufferGOOGLE(resourceTracker, reinterpret_cast<VkImportBufferGOOGLE*>(structExtension_out));
+            break;
+        }
         case VK_STRUCTURE_TYPE_IMPORT_PHYSICAL_ADDRESS_GOOGLE:
         {
             transform_tohost_VkImportPhysicalAddressGOOGLE(resourceTracker, reinterpret_cast<VkImportPhysicalAddressGOOGLE*>(structExtension_out));
             break;
         }
 #endif
+#ifdef VK_KHR_acceleration_structure
+        case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR:
+        {
+            transform_tohost_VkWriteDescriptorSetAccelerationStructureKHR(resourceTracker, reinterpret_cast<VkWriteDescriptorSetAccelerationStructureKHR*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_FEATURES_KHR:
+        {
+            transform_tohost_VkPhysicalDeviceAccelerationStructureFeaturesKHR(resourceTracker, reinterpret_cast<VkPhysicalDeviceAccelerationStructureFeaturesKHR*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_PROPERTIES_KHR:
+        {
+            transform_tohost_VkPhysicalDeviceAccelerationStructurePropertiesKHR(resourceTracker, reinterpret_cast<VkPhysicalDeviceAccelerationStructurePropertiesKHR*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_KHR_ray_tracing_pipeline
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_FEATURES_KHR:
+        {
+            transform_tohost_VkPhysicalDeviceRayTracingPipelineFeaturesKHR(resourceTracker, reinterpret_cast<VkPhysicalDeviceRayTracingPipelineFeaturesKHR*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_PROPERTIES_KHR:
+        {
+            transform_tohost_VkPhysicalDeviceRayTracingPipelinePropertiesKHR(resourceTracker, reinterpret_cast<VkPhysicalDeviceRayTracingPipelinePropertiesKHR*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_KHR_ray_query
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_QUERY_FEATURES_KHR:
+        {
+            transform_tohost_VkPhysicalDeviceRayQueryFeaturesKHR(resourceTracker, reinterpret_cast<VkPhysicalDeviceRayQueryFeaturesKHR*>(structExtension_out));
+            break;
+        }
+#endif
         default:
         {
             return;
@@ -9731,9 +17374,9 @@
             transform_fromhost_VkPhysicalDeviceMultiviewProperties(resourceTracker, reinterpret_cast<VkPhysicalDeviceMultiviewProperties*>(structExtension_out));
             break;
         }
-        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES:
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES:
         {
-            transform_fromhost_VkPhysicalDeviceVariablePointerFeatures(resourceTracker, reinterpret_cast<VkPhysicalDeviceVariablePointerFeatures*>(structExtension_out));
+            transform_fromhost_VkPhysicalDeviceVariablePointersFeatures(resourceTracker, reinterpret_cast<VkPhysicalDeviceVariablePointersFeatures*>(structExtension_out));
             break;
         }
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES:
@@ -9826,9 +17469,201 @@
             transform_fromhost_VkPhysicalDeviceMaintenance3Properties(resourceTracker, reinterpret_cast<VkPhysicalDeviceMaintenance3Properties*>(structExtension_out));
             break;
         }
-        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES:
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES:
         {
-            transform_fromhost_VkPhysicalDeviceShaderDrawParameterFeatures(resourceTracker, reinterpret_cast<VkPhysicalDeviceShaderDrawParameterFeatures*>(structExtension_out));
+            transform_fromhost_VkPhysicalDeviceShaderDrawParametersFeatures(resourceTracker, reinterpret_cast<VkPhysicalDeviceShaderDrawParametersFeatures*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_VERSION_1_2
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES:
+        {
+            transform_fromhost_VkPhysicalDeviceVulkan11Features(resourceTracker, reinterpret_cast<VkPhysicalDeviceVulkan11Features*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_PROPERTIES:
+        {
+            transform_fromhost_VkPhysicalDeviceVulkan11Properties(resourceTracker, reinterpret_cast<VkPhysicalDeviceVulkan11Properties*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES:
+        {
+            transform_fromhost_VkPhysicalDeviceVulkan12Features(resourceTracker, reinterpret_cast<VkPhysicalDeviceVulkan12Features*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_PROPERTIES:
+        {
+            transform_fromhost_VkPhysicalDeviceVulkan12Properties(resourceTracker, reinterpret_cast<VkPhysicalDeviceVulkan12Properties*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO:
+        {
+            transform_fromhost_VkImageFormatListCreateInfo(resourceTracker, reinterpret_cast<VkImageFormatListCreateInfo*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES:
+        {
+            transform_fromhost_VkPhysicalDevice8BitStorageFeatures(resourceTracker, reinterpret_cast<VkPhysicalDevice8BitStorageFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES:
+        {
+            transform_fromhost_VkPhysicalDeviceDriverProperties(resourceTracker, reinterpret_cast<VkPhysicalDeviceDriverProperties*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES:
+        {
+            transform_fromhost_VkPhysicalDeviceShaderAtomicInt64Features(resourceTracker, reinterpret_cast<VkPhysicalDeviceShaderAtomicInt64Features*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES:
+        {
+            transform_fromhost_VkPhysicalDeviceShaderFloat16Int8Features(resourceTracker, reinterpret_cast<VkPhysicalDeviceShaderFloat16Int8Features*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES:
+        {
+            transform_fromhost_VkPhysicalDeviceFloatControlsProperties(resourceTracker, reinterpret_cast<VkPhysicalDeviceFloatControlsProperties*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO:
+        {
+            transform_fromhost_VkDescriptorSetLayoutBindingFlagsCreateInfo(resourceTracker, reinterpret_cast<VkDescriptorSetLayoutBindingFlagsCreateInfo*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES:
+        {
+            transform_fromhost_VkPhysicalDeviceDescriptorIndexingFeatures(resourceTracker, reinterpret_cast<VkPhysicalDeviceDescriptorIndexingFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES:
+        {
+            transform_fromhost_VkPhysicalDeviceDescriptorIndexingProperties(resourceTracker, reinterpret_cast<VkPhysicalDeviceDescriptorIndexingProperties*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO:
+        {
+            transform_fromhost_VkDescriptorSetVariableDescriptorCountAllocateInfo(resourceTracker, reinterpret_cast<VkDescriptorSetVariableDescriptorCountAllocateInfo*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT:
+        {
+            transform_fromhost_VkDescriptorSetVariableDescriptorCountLayoutSupport(resourceTracker, reinterpret_cast<VkDescriptorSetVariableDescriptorCountLayoutSupport*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE:
+        {
+            transform_fromhost_VkSubpassDescriptionDepthStencilResolve(resourceTracker, reinterpret_cast<VkSubpassDescriptionDepthStencilResolve*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES:
+        {
+            transform_fromhost_VkPhysicalDeviceDepthStencilResolveProperties(resourceTracker, reinterpret_cast<VkPhysicalDeviceDepthStencilResolveProperties*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES:
+        {
+            transform_fromhost_VkPhysicalDeviceScalarBlockLayoutFeatures(resourceTracker, reinterpret_cast<VkPhysicalDeviceScalarBlockLayoutFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_IMAGE_STENCIL_USAGE_CREATE_INFO:
+        {
+            transform_fromhost_VkImageStencilUsageCreateInfo(resourceTracker, reinterpret_cast<VkImageStencilUsageCreateInfo*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO:
+        {
+            transform_fromhost_VkSamplerReductionModeCreateInfo(resourceTracker, reinterpret_cast<VkSamplerReductionModeCreateInfo*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES:
+        {
+            transform_fromhost_VkPhysicalDeviceSamplerFilterMinmaxProperties(resourceTracker, reinterpret_cast<VkPhysicalDeviceSamplerFilterMinmaxProperties*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES:
+        {
+            transform_fromhost_VkPhysicalDeviceVulkanMemoryModelFeatures(resourceTracker, reinterpret_cast<VkPhysicalDeviceVulkanMemoryModelFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES:
+        {
+            transform_fromhost_VkPhysicalDeviceImagelessFramebufferFeatures(resourceTracker, reinterpret_cast<VkPhysicalDeviceImagelessFramebufferFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO:
+        {
+            transform_fromhost_VkFramebufferAttachmentsCreateInfo(resourceTracker, reinterpret_cast<VkFramebufferAttachmentsCreateInfo*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_RENDER_PASS_ATTACHMENT_BEGIN_INFO:
+        {
+            transform_fromhost_VkRenderPassAttachmentBeginInfo(resourceTracker, reinterpret_cast<VkRenderPassAttachmentBeginInfo*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES:
+        {
+            transform_fromhost_VkPhysicalDeviceUniformBufferStandardLayoutFeatures(resourceTracker, reinterpret_cast<VkPhysicalDeviceUniformBufferStandardLayoutFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES:
+        {
+            transform_fromhost_VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures(resourceTracker, reinterpret_cast<VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES:
+        {
+            transform_fromhost_VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures(resourceTracker, reinterpret_cast<VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_STENCIL_LAYOUT:
+        {
+            transform_fromhost_VkAttachmentReferenceStencilLayout(resourceTracker, reinterpret_cast<VkAttachmentReferenceStencilLayout*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_STENCIL_LAYOUT:
+        {
+            transform_fromhost_VkAttachmentDescriptionStencilLayout(resourceTracker, reinterpret_cast<VkAttachmentDescriptionStencilLayout*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES:
+        {
+            transform_fromhost_VkPhysicalDeviceHostQueryResetFeatures(resourceTracker, reinterpret_cast<VkPhysicalDeviceHostQueryResetFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES:
+        {
+            transform_fromhost_VkPhysicalDeviceTimelineSemaphoreFeatures(resourceTracker, reinterpret_cast<VkPhysicalDeviceTimelineSemaphoreFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES:
+        {
+            transform_fromhost_VkPhysicalDeviceTimelineSemaphoreProperties(resourceTracker, reinterpret_cast<VkPhysicalDeviceTimelineSemaphoreProperties*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO:
+        {
+            transform_fromhost_VkSemaphoreTypeCreateInfo(resourceTracker, reinterpret_cast<VkSemaphoreTypeCreateInfo*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO:
+        {
+            transform_fromhost_VkTimelineSemaphoreSubmitInfo(resourceTracker, reinterpret_cast<VkTimelineSemaphoreSubmitInfo*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES:
+        {
+            transform_fromhost_VkPhysicalDeviceBufferDeviceAddressFeatures(resourceTracker, reinterpret_cast<VkPhysicalDeviceBufferDeviceAddressFeatures*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO:
+        {
+            transform_fromhost_VkBufferOpaqueCaptureAddressCreateInfo(resourceTracker, reinterpret_cast<VkBufferOpaqueCaptureAddressCreateInfo*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO:
+        {
+            transform_fromhost_VkMemoryOpaqueCaptureAddressAllocateInfo(resourceTracker, reinterpret_cast<VkMemoryOpaqueCaptureAddressAllocateInfo*>(structExtension_out));
             break;
         }
 #endif
@@ -9927,17 +17762,87 @@
             break;
         }
 #endif
-#ifdef VK_KHR_image_format_list
-        case VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO_KHR:
+#ifdef VK_KHR_performance_query
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_FEATURES_KHR:
         {
-            transform_fromhost_VkImageFormatListCreateInfoKHR(resourceTracker, reinterpret_cast<VkImageFormatListCreateInfoKHR*>(structExtension_out));
+            transform_fromhost_VkPhysicalDevicePerformanceQueryFeaturesKHR(resourceTracker, reinterpret_cast<VkPhysicalDevicePerformanceQueryFeaturesKHR*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_PROPERTIES_KHR:
+        {
+            transform_fromhost_VkPhysicalDevicePerformanceQueryPropertiesKHR(resourceTracker, reinterpret_cast<VkPhysicalDevicePerformanceQueryPropertiesKHR*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_CREATE_INFO_KHR:
+        {
+            transform_fromhost_VkQueryPoolPerformanceCreateInfoKHR(resourceTracker, reinterpret_cast<VkQueryPoolPerformanceCreateInfoKHR*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PERFORMANCE_QUERY_SUBMIT_INFO_KHR:
+        {
+            transform_fromhost_VkPerformanceQuerySubmitInfoKHR(resourceTracker, reinterpret_cast<VkPerformanceQuerySubmitInfoKHR*>(structExtension_out));
             break;
         }
 #endif
-#ifdef VK_KHR_8bit_storage
-        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR:
+#ifdef VK_KHR_portability_subset
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PORTABILITY_SUBSET_FEATURES_KHR:
         {
-            transform_fromhost_VkPhysicalDevice8BitStorageFeaturesKHR(resourceTracker, reinterpret_cast<VkPhysicalDevice8BitStorageFeaturesKHR*>(structExtension_out));
+            transform_fromhost_VkPhysicalDevicePortabilitySubsetFeaturesKHR(resourceTracker, reinterpret_cast<VkPhysicalDevicePortabilitySubsetFeaturesKHR*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PORTABILITY_SUBSET_PROPERTIES_KHR:
+        {
+            transform_fromhost_VkPhysicalDevicePortabilitySubsetPropertiesKHR(resourceTracker, reinterpret_cast<VkPhysicalDevicePortabilitySubsetPropertiesKHR*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_KHR_shader_clock
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CLOCK_FEATURES_KHR:
+        {
+            transform_fromhost_VkPhysicalDeviceShaderClockFeaturesKHR(resourceTracker, reinterpret_cast<VkPhysicalDeviceShaderClockFeaturesKHR*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_KHR_shader_terminate_invocation
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES_KHR:
+        {
+            transform_fromhost_VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR(resourceTracker, reinterpret_cast<VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_KHR_fragment_shading_rate
+        case VK_STRUCTURE_TYPE_FRAGMENT_SHADING_RATE_ATTACHMENT_INFO_KHR:
+        {
+            transform_fromhost_VkFragmentShadingRateAttachmentInfoKHR(resourceTracker, reinterpret_cast<VkFragmentShadingRateAttachmentInfoKHR*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_STATE_CREATE_INFO_KHR:
+        {
+            transform_fromhost_VkPipelineFragmentShadingRateStateCreateInfoKHR(resourceTracker, reinterpret_cast<VkPipelineFragmentShadingRateStateCreateInfoKHR*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_FEATURES_KHR:
+        {
+            transform_fromhost_VkPhysicalDeviceFragmentShadingRateFeaturesKHR(resourceTracker, reinterpret_cast<VkPhysicalDeviceFragmentShadingRateFeaturesKHR*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_PROPERTIES_KHR:
+        {
+            transform_fromhost_VkPhysicalDeviceFragmentShadingRatePropertiesKHR(resourceTracker, reinterpret_cast<VkPhysicalDeviceFragmentShadingRatePropertiesKHR*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_KHR_surface_protected_capabilities
+        case VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR:
+        {
+            transform_fromhost_VkSurfaceProtectedCapabilitiesKHR(resourceTracker, reinterpret_cast<VkSurfaceProtectedCapabilitiesKHR*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_KHR_pipeline_executable_properties
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_EXECUTABLE_PROPERTIES_FEATURES_KHR:
+        {
+            transform_fromhost_VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR(resourceTracker, reinterpret_cast<VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR*>(structExtension_out));
             break;
         }
 #endif
@@ -9979,6 +17884,23 @@
             break;
         }
 #endif
+#ifdef VK_EXT_transform_feedback
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT:
+        {
+            transform_fromhost_VkPhysicalDeviceTransformFeedbackFeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceTransformFeedbackFeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT:
+        {
+            transform_fromhost_VkPhysicalDeviceTransformFeedbackPropertiesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceTransformFeedbackPropertiesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_STREAM_CREATE_INFO_EXT:
+        {
+            transform_fromhost_VkPipelineRasterizationStateStreamCreateInfoEXT(resourceTracker, reinterpret_cast<VkPipelineRasterizationStateStreamCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
 #ifdef VK_AMD_texture_gather_bias_lod
         case VK_STRUCTURE_TYPE_TEXTURE_LOD_GATHER_FORMAT_PROPERTIES_AMD:
         {
@@ -9986,6 +17908,13 @@
             break;
         }
 #endif
+#ifdef VK_NV_corner_sampled_image
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CORNER_SAMPLED_IMAGE_FEATURES_NV:
+        {
+            transform_fromhost_VkPhysicalDeviceCornerSampledImageFeaturesNV(resourceTracker, reinterpret_cast<VkPhysicalDeviceCornerSampledImageFeaturesNV*>(structExtension_out));
+            break;
+        }
+#endif
 #ifdef VK_NV_external_memory
         case VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_NV:
         {
@@ -10024,6 +17953,25 @@
             break;
         }
 #endif
+#ifdef VK_EXT_texture_compression_astc_hdr
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES_EXT:
+        {
+            transform_fromhost_VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_astc_decode_mode
+        case VK_STRUCTURE_TYPE_IMAGE_VIEW_ASTC_DECODE_MODE_EXT:
+        {
+            transform_fromhost_VkImageViewASTCDecodeModeEXT(resourceTracker, reinterpret_cast<VkImageViewASTCDecodeModeEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ASTC_DECODE_FEATURES_EXT:
+        {
+            transform_fromhost_VkPhysicalDeviceASTCDecodeFeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceASTCDecodeFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
 #ifdef VK_EXT_conditional_rendering
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT:
         {
@@ -10095,6 +18043,18 @@
             break;
         }
 #endif
+#ifdef VK_EXT_depth_clip_enable
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_ENABLE_FEATURES_EXT:
+        {
+            transform_fromhost_VkPhysicalDeviceDepthClipEnableFeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceDepthClipEnableFeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT:
+        {
+            transform_fromhost_VkPipelineRasterizationDepthClipStateCreateInfoEXT(resourceTracker, reinterpret_cast<VkPipelineRasterizationDepthClipStateCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
 #ifdef VK_EXT_debug_utils
         case VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT:
         {
@@ -10124,15 +18084,25 @@
             break;
         }
 #endif
-#ifdef VK_EXT_sampler_filter_minmax
-        case VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO_EXT:
+#ifdef VK_EXT_inline_uniform_block
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES_EXT:
         {
-            transform_fromhost_VkSamplerReductionModeCreateInfoEXT(resourceTracker, reinterpret_cast<VkSamplerReductionModeCreateInfoEXT*>(structExtension_out));
+            transform_fromhost_VkPhysicalDeviceInlineUniformBlockFeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceInlineUniformBlockFeaturesEXT*>(structExtension_out));
             break;
         }
-        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES_EXT:
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES_EXT:
         {
-            transform_fromhost_VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT*>(structExtension_out));
+            transform_fromhost_VkPhysicalDeviceInlineUniformBlockPropertiesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceInlineUniformBlockPropertiesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT:
+        {
+            transform_fromhost_VkWriteDescriptorSetInlineUniformBlockEXT(resourceTracker, reinterpret_cast<VkWriteDescriptorSetInlineUniformBlockEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO_EXT:
+        {
+            transform_fromhost_VkDescriptorPoolInlineUniformBlockCreateInfoEXT(resourceTracker, reinterpret_cast<VkDescriptorPoolInlineUniformBlockCreateInfoEXT*>(structExtension_out));
             break;
         }
 #endif
@@ -10189,6 +18159,40 @@
             break;
         }
 #endif
+#ifdef VK_NV_shader_sm_builtins
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_PROPERTIES_NV:
+        {
+            transform_fromhost_VkPhysicalDeviceShaderSMBuiltinsPropertiesNV(resourceTracker, reinterpret_cast<VkPhysicalDeviceShaderSMBuiltinsPropertiesNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_FEATURES_NV:
+        {
+            transform_fromhost_VkPhysicalDeviceShaderSMBuiltinsFeaturesNV(resourceTracker, reinterpret_cast<VkPhysicalDeviceShaderSMBuiltinsFeaturesNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_image_drm_format_modifier
+        case VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT:
+        {
+            transform_fromhost_VkDrmFormatModifierPropertiesListEXT(resourceTracker, reinterpret_cast<VkDrmFormatModifierPropertiesListEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT:
+        {
+            transform_fromhost_VkPhysicalDeviceImageDrmFormatModifierInfoEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceImageDrmFormatModifierInfoEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT:
+        {
+            transform_fromhost_VkImageDrmFormatModifierListCreateInfoEXT(resourceTracker, reinterpret_cast<VkImageDrmFormatModifierListCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT:
+        {
+            transform_fromhost_VkImageDrmFormatModifierExplicitCreateInfoEXT(resourceTracker, reinterpret_cast<VkImageDrmFormatModifierExplicitCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
 #ifdef VK_EXT_validation_cache
         case VK_STRUCTURE_TYPE_SHADER_MODULE_VALIDATION_CACHE_CREATE_INFO_EXT:
         {
@@ -10196,30 +18200,61 @@
             break;
         }
 #endif
-#ifdef VK_EXT_descriptor_indexing
-        case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT:
+#ifdef VK_NV_shading_rate_image
+        case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SHADING_RATE_IMAGE_STATE_CREATE_INFO_NV:
         {
-            transform_fromhost_VkDescriptorSetLayoutBindingFlagsCreateInfoEXT(resourceTracker, reinterpret_cast<VkDescriptorSetLayoutBindingFlagsCreateInfoEXT*>(structExtension_out));
+            transform_fromhost_VkPipelineViewportShadingRateImageStateCreateInfoNV(resourceTracker, reinterpret_cast<VkPipelineViewportShadingRateImageStateCreateInfoNV*>(structExtension_out));
             break;
         }
-        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT:
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_FEATURES_NV:
         {
-            transform_fromhost_VkPhysicalDeviceDescriptorIndexingFeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceDescriptorIndexingFeaturesEXT*>(structExtension_out));
+            transform_fromhost_VkPhysicalDeviceShadingRateImageFeaturesNV(resourceTracker, reinterpret_cast<VkPhysicalDeviceShadingRateImageFeaturesNV*>(structExtension_out));
             break;
         }
-        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES_EXT:
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_PROPERTIES_NV:
         {
-            transform_fromhost_VkPhysicalDeviceDescriptorIndexingPropertiesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceDescriptorIndexingPropertiesEXT*>(structExtension_out));
+            transform_fromhost_VkPhysicalDeviceShadingRateImagePropertiesNV(resourceTracker, reinterpret_cast<VkPhysicalDeviceShadingRateImagePropertiesNV*>(structExtension_out));
             break;
         }
-        case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO_EXT:
+        case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_COARSE_SAMPLE_ORDER_STATE_CREATE_INFO_NV:
         {
-            transform_fromhost_VkDescriptorSetVariableDescriptorCountAllocateInfoEXT(resourceTracker, reinterpret_cast<VkDescriptorSetVariableDescriptorCountAllocateInfoEXT*>(structExtension_out));
+            transform_fromhost_VkPipelineViewportCoarseSampleOrderStateCreateInfoNV(resourceTracker, reinterpret_cast<VkPipelineViewportCoarseSampleOrderStateCreateInfoNV*>(structExtension_out));
             break;
         }
-        case VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT_EXT:
+#endif
+#ifdef VK_NV_ray_tracing
+        case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_NV:
         {
-            transform_fromhost_VkDescriptorSetVariableDescriptorCountLayoutSupportEXT(resourceTracker, reinterpret_cast<VkDescriptorSetVariableDescriptorCountLayoutSupportEXT*>(structExtension_out));
+            transform_fromhost_VkWriteDescriptorSetAccelerationStructureNV(resourceTracker, reinterpret_cast<VkWriteDescriptorSetAccelerationStructureNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_NV:
+        {
+            transform_fromhost_VkPhysicalDeviceRayTracingPropertiesNV(resourceTracker, reinterpret_cast<VkPhysicalDeviceRayTracingPropertiesNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_representative_fragment_test
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_REPRESENTATIVE_FRAGMENT_TEST_FEATURES_NV:
+        {
+            transform_fromhost_VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV(resourceTracker, reinterpret_cast<VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_REPRESENTATIVE_FRAGMENT_TEST_STATE_CREATE_INFO_NV:
+        {
+            transform_fromhost_VkPipelineRepresentativeFragmentTestStateCreateInfoNV(resourceTracker, reinterpret_cast<VkPipelineRepresentativeFragmentTestStateCreateInfoNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_filter_cubic
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_VIEW_IMAGE_FORMAT_INFO_EXT:
+        {
+            transform_fromhost_VkPhysicalDeviceImageViewImageFormatInfoEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceImageViewImageFormatInfoEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_FILTER_CUBIC_IMAGE_VIEW_IMAGE_FORMAT_PROPERTIES_EXT:
+        {
+            transform_fromhost_VkFilterCubicImageViewImageFormatPropertiesEXT(resourceTracker, reinterpret_cast<VkFilterCubicImageViewImageFormatPropertiesEXT*>(structExtension_out));
             break;
         }
 #endif
@@ -10242,6 +18277,13 @@
             break;
         }
 #endif
+#ifdef VK_AMD_pipeline_compiler_control
+        case VK_STRUCTURE_TYPE_PIPELINE_COMPILER_CONTROL_CREATE_INFO_AMD:
+        {
+            transform_fromhost_VkPipelineCompilerControlCreateInfoAMD(resourceTracker, reinterpret_cast<VkPipelineCompilerControlCreateInfoAMD*>(structExtension_out));
+            break;
+        }
+#endif
 #ifdef VK_AMD_shader_core_properties
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_AMD:
         {
@@ -10249,6 +18291,13 @@
             break;
         }
 #endif
+#ifdef VK_AMD_memory_overallocation_behavior
+        case VK_STRUCTURE_TYPE_DEVICE_MEMORY_OVERALLOCATION_CREATE_INFO_AMD:
+        {
+            transform_fromhost_VkDeviceMemoryOverallocationCreateInfoAMD(resourceTracker, reinterpret_cast<VkDeviceMemoryOverallocationCreateInfoAMD*>(structExtension_out));
+            break;
+        }
+#endif
 #ifdef VK_EXT_vertex_attribute_divisor
         case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT:
         {
@@ -10260,6 +18309,70 @@
             transform_fromhost_VkPipelineVertexInputDivisorStateCreateInfoEXT(resourceTracker, reinterpret_cast<VkPipelineVertexInputDivisorStateCreateInfoEXT*>(structExtension_out));
             break;
         }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT:
+        {
+            transform_fromhost_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_GGP_frame_token
+        case VK_STRUCTURE_TYPE_PRESENT_FRAME_TOKEN_GGP:
+        {
+            transform_fromhost_VkPresentFrameTokenGGP(resourceTracker, reinterpret_cast<VkPresentFrameTokenGGP*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_pipeline_creation_feedback
+        case VK_STRUCTURE_TYPE_PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT:
+        {
+            transform_fromhost_VkPipelineCreationFeedbackCreateInfoEXT(resourceTracker, reinterpret_cast<VkPipelineCreationFeedbackCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_compute_shader_derivatives
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_FEATURES_NV:
+        {
+            transform_fromhost_VkPhysicalDeviceComputeShaderDerivativesFeaturesNV(resourceTracker, reinterpret_cast<VkPhysicalDeviceComputeShaderDerivativesFeaturesNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_mesh_shader
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_FEATURES_NV:
+        {
+            transform_fromhost_VkPhysicalDeviceMeshShaderFeaturesNV(resourceTracker, reinterpret_cast<VkPhysicalDeviceMeshShaderFeaturesNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_PROPERTIES_NV:
+        {
+            transform_fromhost_VkPhysicalDeviceMeshShaderPropertiesNV(resourceTracker, reinterpret_cast<VkPhysicalDeviceMeshShaderPropertiesNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_fragment_shader_barycentric
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_NV:
+        {
+            transform_fromhost_VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV(resourceTracker, reinterpret_cast<VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_shader_image_footprint
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_FOOTPRINT_FEATURES_NV:
+        {
+            transform_fromhost_VkPhysicalDeviceShaderImageFootprintFeaturesNV(resourceTracker, reinterpret_cast<VkPhysicalDeviceShaderImageFootprintFeaturesNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_scissor_exclusive
+        case VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_EXCLUSIVE_SCISSOR_STATE_CREATE_INFO_NV:
+        {
+            transform_fromhost_VkPipelineViewportExclusiveScissorStateCreateInfoNV(resourceTracker, reinterpret_cast<VkPipelineViewportExclusiveScissorStateCreateInfoNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXCLUSIVE_SCISSOR_FEATURES_NV:
+        {
+            transform_fromhost_VkPhysicalDeviceExclusiveScissorFeaturesNV(resourceTracker, reinterpret_cast<VkPhysicalDeviceExclusiveScissorFeaturesNV*>(structExtension_out));
+            break;
+        }
 #endif
 #ifdef VK_NV_device_diagnostic_checkpoints
         case VK_STRUCTURE_TYPE_QUEUE_FAMILY_CHECKPOINT_PROPERTIES_NV:
@@ -10268,18 +18381,455 @@
             break;
         }
 #endif
-#ifdef VK_GOOGLE_color_buffer
+#ifdef VK_INTEL_shader_integer_functions2
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_FUNCTIONS_2_FEATURES_INTEL:
+        {
+            transform_fromhost_VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL(resourceTracker, reinterpret_cast<VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_INTEL_performance_query
+        case VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_QUERY_CREATE_INFO_INTEL:
+        {
+            transform_fromhost_VkQueryPoolPerformanceQueryCreateInfoINTEL(resourceTracker, reinterpret_cast<VkQueryPoolPerformanceQueryCreateInfoINTEL*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_pci_bus_info
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT:
+        {
+            transform_fromhost_VkPhysicalDevicePCIBusInfoPropertiesEXT(resourceTracker, reinterpret_cast<VkPhysicalDevicePCIBusInfoPropertiesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_AMD_display_native_hdr
+        case VK_STRUCTURE_TYPE_DISPLAY_NATIVE_HDR_SURFACE_CAPABILITIES_AMD:
+        {
+            transform_fromhost_VkDisplayNativeHdrSurfaceCapabilitiesAMD(resourceTracker, reinterpret_cast<VkDisplayNativeHdrSurfaceCapabilitiesAMD*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_SWAPCHAIN_DISPLAY_NATIVE_HDR_CREATE_INFO_AMD:
+        {
+            transform_fromhost_VkSwapchainDisplayNativeHdrCreateInfoAMD(resourceTracker, reinterpret_cast<VkSwapchainDisplayNativeHdrCreateInfoAMD*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_fragment_density_map
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_FEATURES_EXT:
+        {
+            transform_fromhost_VkPhysicalDeviceFragmentDensityMapFeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceFragmentDensityMapFeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_PROPERTIES_EXT:
+        {
+            transform_fromhost_VkPhysicalDeviceFragmentDensityMapPropertiesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceFragmentDensityMapPropertiesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_RENDER_PASS_FRAGMENT_DENSITY_MAP_CREATE_INFO_EXT:
+        {
+            transform_fromhost_VkRenderPassFragmentDensityMapCreateInfoEXT(resourceTracker, reinterpret_cast<VkRenderPassFragmentDensityMapCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_subgroup_size_control
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT:
+        {
+            transform_fromhost_VkPhysicalDeviceSubgroupSizeControlFeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceSubgroupSizeControlFeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT:
+        {
+            transform_fromhost_VkPhysicalDeviceSubgroupSizeControlPropertiesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceSubgroupSizeControlPropertiesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT:
+        {
+            transform_fromhost_VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT(resourceTracker, reinterpret_cast<VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_AMD_shader_core_properties2
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_2_AMD:
+        {
+            transform_fromhost_VkPhysicalDeviceShaderCoreProperties2AMD(resourceTracker, reinterpret_cast<VkPhysicalDeviceShaderCoreProperties2AMD*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_AMD_device_coherent_memory
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COHERENT_MEMORY_FEATURES_AMD:
+        {
+            transform_fromhost_VkPhysicalDeviceCoherentMemoryFeaturesAMD(resourceTracker, reinterpret_cast<VkPhysicalDeviceCoherentMemoryFeaturesAMD*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_shader_image_atomic_int64
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_ATOMIC_INT64_FEATURES_EXT:
+        {
+            transform_fromhost_VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_memory_budget
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT:
+        {
+            transform_fromhost_VkPhysicalDeviceMemoryBudgetPropertiesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceMemoryBudgetPropertiesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_memory_priority
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PRIORITY_FEATURES_EXT:
+        {
+            transform_fromhost_VkPhysicalDeviceMemoryPriorityFeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceMemoryPriorityFeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT:
+        {
+            transform_fromhost_VkMemoryPriorityAllocateInfoEXT(resourceTracker, reinterpret_cast<VkMemoryPriorityAllocateInfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_dedicated_allocation_image_aliasing
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEDICATED_ALLOCATION_IMAGE_ALIASING_FEATURES_NV:
+        {
+            transform_fromhost_VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV(resourceTracker, reinterpret_cast<VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_buffer_device_address
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_EXT:
+        {
+            transform_fromhost_VkPhysicalDeviceBufferDeviceAddressFeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceBufferDeviceAddressFeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_CREATE_INFO_EXT:
+        {
+            transform_fromhost_VkBufferDeviceAddressCreateInfoEXT(resourceTracker, reinterpret_cast<VkBufferDeviceAddressCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_validation_features
+        case VK_STRUCTURE_TYPE_VALIDATION_FEATURES_EXT:
+        {
+            transform_fromhost_VkValidationFeaturesEXT(resourceTracker, reinterpret_cast<VkValidationFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_cooperative_matrix
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_FEATURES_NV:
+        {
+            transform_fromhost_VkPhysicalDeviceCooperativeMatrixFeaturesNV(resourceTracker, reinterpret_cast<VkPhysicalDeviceCooperativeMatrixFeaturesNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_PROPERTIES_NV:
+        {
+            transform_fromhost_VkPhysicalDeviceCooperativeMatrixPropertiesNV(resourceTracker, reinterpret_cast<VkPhysicalDeviceCooperativeMatrixPropertiesNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_coverage_reduction_mode
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COVERAGE_REDUCTION_MODE_FEATURES_NV:
+        {
+            transform_fromhost_VkPhysicalDeviceCoverageReductionModeFeaturesNV(resourceTracker, reinterpret_cast<VkPhysicalDeviceCoverageReductionModeFeaturesNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_REDUCTION_STATE_CREATE_INFO_NV:
+        {
+            transform_fromhost_VkPipelineCoverageReductionStateCreateInfoNV(resourceTracker, reinterpret_cast<VkPipelineCoverageReductionStateCreateInfoNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_fragment_shader_interlock
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_INTERLOCK_FEATURES_EXT:
+        {
+            transform_fromhost_VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_ycbcr_image_arrays
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_IMAGE_ARRAYS_FEATURES_EXT:
+        {
+            transform_fromhost_VkPhysicalDeviceYcbcrImageArraysFeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceYcbcrImageArraysFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_full_screen_exclusive
+        case VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_INFO_EXT:
+        {
+            transform_fromhost_VkSurfaceFullScreenExclusiveInfoEXT(resourceTracker, reinterpret_cast<VkSurfaceFullScreenExclusiveInfoEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_FULL_SCREEN_EXCLUSIVE_EXT:
+        {
+            transform_fromhost_VkSurfaceCapabilitiesFullScreenExclusiveEXT(resourceTracker, reinterpret_cast<VkSurfaceCapabilitiesFullScreenExclusiveEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_WIN32_INFO_EXT:
+        {
+            transform_fromhost_VkSurfaceFullScreenExclusiveWin32InfoEXT(resourceTracker, reinterpret_cast<VkSurfaceFullScreenExclusiveWin32InfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_line_rasterization
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT:
+        {
+            transform_fromhost_VkPhysicalDeviceLineRasterizationFeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceLineRasterizationFeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_EXT:
+        {
+            transform_fromhost_VkPhysicalDeviceLineRasterizationPropertiesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceLineRasterizationPropertiesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT:
+        {
+            transform_fromhost_VkPipelineRasterizationLineStateCreateInfoEXT(resourceTracker, reinterpret_cast<VkPipelineRasterizationLineStateCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_shader_atomic_float
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_FLOAT_FEATURES_EXT:
+        {
+            transform_fromhost_VkPhysicalDeviceShaderAtomicFloatFeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceShaderAtomicFloatFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_index_type_uint8
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT:
+        {
+            transform_fromhost_VkPhysicalDeviceIndexTypeUint8FeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceIndexTypeUint8FeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_extended_dynamic_state
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_FEATURES_EXT:
+        {
+            transform_fromhost_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceExtendedDynamicStateFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_shader_demote_to_helper_invocation
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES_EXT:
+        {
+            transform_fromhost_VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_device_generated_commands
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_PROPERTIES_NV:
+        {
+            transform_fromhost_VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV(resourceTracker, reinterpret_cast<VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_FEATURES_NV:
+        {
+            transform_fromhost_VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV(resourceTracker, reinterpret_cast<VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_SHADER_GROUPS_CREATE_INFO_NV:
+        {
+            transform_fromhost_VkGraphicsPipelineShaderGroupsCreateInfoNV(resourceTracker, reinterpret_cast<VkGraphicsPipelineShaderGroupsCreateInfoNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_texel_buffer_alignment
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_FEATURES_EXT:
+        {
+            transform_fromhost_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES_EXT:
+        {
+            transform_fromhost_VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_QCOM_render_pass_transform
+        case VK_STRUCTURE_TYPE_RENDER_PASS_TRANSFORM_BEGIN_INFO_QCOM:
+        {
+            transform_fromhost_VkRenderPassTransformBeginInfoQCOM(resourceTracker, reinterpret_cast<VkRenderPassTransformBeginInfoQCOM*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_RENDER_PASS_TRANSFORM_INFO_QCOM:
+        {
+            transform_fromhost_VkCommandBufferInheritanceRenderPassTransformInfoQCOM(resourceTracker, reinterpret_cast<VkCommandBufferInheritanceRenderPassTransformInfoQCOM*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_device_memory_report
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_MEMORY_REPORT_FEATURES_EXT:
+        {
+            transform_fromhost_VkPhysicalDeviceDeviceMemoryReportFeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceDeviceMemoryReportFeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DEVICE_DEVICE_MEMORY_REPORT_CREATE_INFO_EXT:
+        {
+            transform_fromhost_VkDeviceDeviceMemoryReportCreateInfoEXT(resourceTracker, reinterpret_cast<VkDeviceDeviceMemoryReportCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_robustness2
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT:
+        {
+            transform_fromhost_VkPhysicalDeviceRobustness2FeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceRobustness2FeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_PROPERTIES_EXT:
+        {
+            transform_fromhost_VkPhysicalDeviceRobustness2PropertiesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceRobustness2PropertiesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_custom_border_color
+        case VK_STRUCTURE_TYPE_SAMPLER_CUSTOM_BORDER_COLOR_CREATE_INFO_EXT:
+        {
+            transform_fromhost_VkSamplerCustomBorderColorCreateInfoEXT(resourceTracker, reinterpret_cast<VkSamplerCustomBorderColorCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_PROPERTIES_EXT:
+        {
+            transform_fromhost_VkPhysicalDeviceCustomBorderColorPropertiesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceCustomBorderColorPropertiesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_FEATURES_EXT:
+        {
+            transform_fromhost_VkPhysicalDeviceCustomBorderColorFeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceCustomBorderColorFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_private_data
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES_EXT:
+        {
+            transform_fromhost_VkPhysicalDevicePrivateDataFeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDevicePrivateDataFeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DEVICE_PRIVATE_DATA_CREATE_INFO_EXT:
+        {
+            transform_fromhost_VkDevicePrivateDataCreateInfoEXT(resourceTracker, reinterpret_cast<VkDevicePrivateDataCreateInfoEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_pipeline_creation_cache_control
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES_EXT:
+        {
+            transform_fromhost_VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_device_diagnostics_config
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DIAGNOSTICS_CONFIG_FEATURES_NV:
+        {
+            transform_fromhost_VkPhysicalDeviceDiagnosticsConfigFeaturesNV(resourceTracker, reinterpret_cast<VkPhysicalDeviceDiagnosticsConfigFeaturesNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_DEVICE_DIAGNOSTICS_CONFIG_CREATE_INFO_NV:
+        {
+            transform_fromhost_VkDeviceDiagnosticsConfigCreateInfoNV(resourceTracker, reinterpret_cast<VkDeviceDiagnosticsConfigCreateInfoNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_NV_fragment_shading_rate_enums
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_FEATURES_NV:
+        {
+            transform_fromhost_VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV(resourceTracker, reinterpret_cast<VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_PROPERTIES_NV:
+        {
+            transform_fromhost_VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV(resourceTracker, reinterpret_cast<VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_ENUM_STATE_CREATE_INFO_NV:
+        {
+            transform_fromhost_VkPipelineFragmentShadingRateEnumStateCreateInfoNV(resourceTracker, reinterpret_cast<VkPipelineFragmentShadingRateEnumStateCreateInfoNV*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_fragment_density_map2
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_2_FEATURES_EXT:
+        {
+            transform_fromhost_VkPhysicalDeviceFragmentDensityMap2FeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceFragmentDensityMap2FeaturesEXT*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_2_PROPERTIES_EXT:
+        {
+            transform_fromhost_VkPhysicalDeviceFragmentDensityMap2PropertiesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceFragmentDensityMap2PropertiesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_QCOM_rotated_copy_commands
+        case VK_STRUCTURE_TYPE_COPY_COMMAND_TRANSFORM_INFO_QCOM:
+        {
+            transform_fromhost_VkCopyCommandTransformInfoQCOM(resourceTracker, reinterpret_cast<VkCopyCommandTransformInfoQCOM*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_image_robustness
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES_EXT:
+        {
+            transform_fromhost_VkPhysicalDeviceImageRobustnessFeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDeviceImageRobustnessFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_EXT_4444_formats
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT:
+        {
+            transform_fromhost_VkPhysicalDevice4444FormatsFeaturesEXT(resourceTracker, reinterpret_cast<VkPhysicalDevice4444FormatsFeaturesEXT*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_GOOGLE_gfxstream
         case VK_STRUCTURE_TYPE_IMPORT_COLOR_BUFFER_GOOGLE:
         {
             transform_fromhost_VkImportColorBufferGOOGLE(resourceTracker, reinterpret_cast<VkImportColorBufferGOOGLE*>(structExtension_out));
             break;
         }
+        case VK_STRUCTURE_TYPE_IMPORT_BUFFER_GOOGLE:
+        {
+            transform_fromhost_VkImportBufferGOOGLE(resourceTracker, reinterpret_cast<VkImportBufferGOOGLE*>(structExtension_out));
+            break;
+        }
         case VK_STRUCTURE_TYPE_IMPORT_PHYSICAL_ADDRESS_GOOGLE:
         {
             transform_fromhost_VkImportPhysicalAddressGOOGLE(resourceTracker, reinterpret_cast<VkImportPhysicalAddressGOOGLE*>(structExtension_out));
             break;
         }
 #endif
+#ifdef VK_KHR_acceleration_structure
+        case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR:
+        {
+            transform_fromhost_VkWriteDescriptorSetAccelerationStructureKHR(resourceTracker, reinterpret_cast<VkWriteDescriptorSetAccelerationStructureKHR*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_FEATURES_KHR:
+        {
+            transform_fromhost_VkPhysicalDeviceAccelerationStructureFeaturesKHR(resourceTracker, reinterpret_cast<VkPhysicalDeviceAccelerationStructureFeaturesKHR*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_PROPERTIES_KHR:
+        {
+            transform_fromhost_VkPhysicalDeviceAccelerationStructurePropertiesKHR(resourceTracker, reinterpret_cast<VkPhysicalDeviceAccelerationStructurePropertiesKHR*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_KHR_ray_tracing_pipeline
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_FEATURES_KHR:
+        {
+            transform_fromhost_VkPhysicalDeviceRayTracingPipelineFeaturesKHR(resourceTracker, reinterpret_cast<VkPhysicalDeviceRayTracingPipelineFeaturesKHR*>(structExtension_out));
+            break;
+        }
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_PROPERTIES_KHR:
+        {
+            transform_fromhost_VkPhysicalDeviceRayTracingPipelinePropertiesKHR(resourceTracker, reinterpret_cast<VkPhysicalDeviceRayTracingPipelinePropertiesKHR*>(structExtension_out));
+            break;
+        }
+#endif
+#ifdef VK_KHR_ray_query
+        case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_QUERY_FEATURES_KHR:
+        {
+            transform_fromhost_VkPhysicalDeviceRayQueryFeaturesKHR(resourceTracker, reinterpret_cast<VkPhysicalDeviceRayQueryFeaturesKHR*>(structExtension_out));
+            break;
+        }
+#endif
         default:
         {
             return;
diff --git a/system/vulkan_enc/goldfish_vk_transform_guest.h b/system/vulkan_enc/goldfish_vk_transform_guest.h
index d0b610b..78262cd 100644
--- a/system/vulkan_enc/goldfish_vk_transform_guest.h
+++ b/system/vulkan_enc/goldfish_vk_transform_guest.h
@@ -35,8 +35,7 @@
 namespace goldfish_vk {
 
 class ResourceTracker;
-#define LIST_TRANSFORMED_TYPES(f) \
-f(VkExternalMemoryProperties) \
+#define LIST_TRIVIAL_TRANSFORMED_TYPES(f) \
 f(VkPhysicalDeviceExternalImageFormatInfo) \
 f(VkPhysicalDeviceExternalBufferInfo) \
 f(VkExternalMemoryImageCreateInfo) \
@@ -45,46 +44,21 @@
 f(VkExternalImageFormatProperties) \
 f(VkExternalBufferProperties) \
 
+#define LIST_NON_TRIVIAL_TRANSFORMED_TYPES(f) \
+f(VkExternalMemoryProperties) \
+
+#define LIST_TRANSFORMED_TYPES(f) \
+LIST_TRIVIAL_TRANSFORMED_TYPES(f) \
+LIST_NON_TRIVIAL_TRANSFORMED_TYPES(f) \
+
 #ifdef VK_VERSION_1_0
-void transform_tohost_VkApplicationInfo(
+void transform_tohost_VkExtent2D(
     ResourceTracker* resourceTracker,
-    VkApplicationInfo* toTransform);
+    VkExtent2D* toTransform);
 
-void transform_fromhost_VkApplicationInfo(
+void transform_fromhost_VkExtent2D(
     ResourceTracker* resourceTracker,
-    VkApplicationInfo* toTransform);
-
-void transform_tohost_VkInstanceCreateInfo(
-    ResourceTracker* resourceTracker,
-    VkInstanceCreateInfo* toTransform);
-
-void transform_fromhost_VkInstanceCreateInfo(
-    ResourceTracker* resourceTracker,
-    VkInstanceCreateInfo* toTransform);
-
-void transform_tohost_VkAllocationCallbacks(
-    ResourceTracker* resourceTracker,
-    VkAllocationCallbacks* toTransform);
-
-void transform_fromhost_VkAllocationCallbacks(
-    ResourceTracker* resourceTracker,
-    VkAllocationCallbacks* toTransform);
-
-void transform_tohost_VkPhysicalDeviceFeatures(
-    ResourceTracker* resourceTracker,
-    VkPhysicalDeviceFeatures* toTransform);
-
-void transform_fromhost_VkPhysicalDeviceFeatures(
-    ResourceTracker* resourceTracker,
-    VkPhysicalDeviceFeatures* toTransform);
-
-void transform_tohost_VkFormatProperties(
-    ResourceTracker* resourceTracker,
-    VkFormatProperties* toTransform);
-
-void transform_fromhost_VkFormatProperties(
-    ResourceTracker* resourceTracker,
-    VkFormatProperties* toTransform);
+    VkExtent2D* toTransform);
 
 void transform_tohost_VkExtent3D(
     ResourceTracker* resourceTracker,
@@ -94,6 +68,126 @@
     ResourceTracker* resourceTracker,
     VkExtent3D* toTransform);
 
+void transform_tohost_VkOffset2D(
+    ResourceTracker* resourceTracker,
+    VkOffset2D* toTransform);
+
+void transform_fromhost_VkOffset2D(
+    ResourceTracker* resourceTracker,
+    VkOffset2D* toTransform);
+
+void transform_tohost_VkOffset3D(
+    ResourceTracker* resourceTracker,
+    VkOffset3D* toTransform);
+
+void transform_fromhost_VkOffset3D(
+    ResourceTracker* resourceTracker,
+    VkOffset3D* toTransform);
+
+void transform_tohost_VkRect2D(
+    ResourceTracker* resourceTracker,
+    VkRect2D* toTransform);
+
+void transform_fromhost_VkRect2D(
+    ResourceTracker* resourceTracker,
+    VkRect2D* toTransform);
+
+void transform_tohost_VkBaseInStructure(
+    ResourceTracker* resourceTracker,
+    VkBaseInStructure* toTransform);
+
+void transform_fromhost_VkBaseInStructure(
+    ResourceTracker* resourceTracker,
+    VkBaseInStructure* toTransform);
+
+void transform_tohost_VkBaseOutStructure(
+    ResourceTracker* resourceTracker,
+    VkBaseOutStructure* toTransform);
+
+void transform_fromhost_VkBaseOutStructure(
+    ResourceTracker* resourceTracker,
+    VkBaseOutStructure* toTransform);
+
+void transform_tohost_VkBufferMemoryBarrier(
+    ResourceTracker* resourceTracker,
+    VkBufferMemoryBarrier* toTransform);
+
+void transform_fromhost_VkBufferMemoryBarrier(
+    ResourceTracker* resourceTracker,
+    VkBufferMemoryBarrier* toTransform);
+
+void transform_tohost_VkDispatchIndirectCommand(
+    ResourceTracker* resourceTracker,
+    VkDispatchIndirectCommand* toTransform);
+
+void transform_fromhost_VkDispatchIndirectCommand(
+    ResourceTracker* resourceTracker,
+    VkDispatchIndirectCommand* toTransform);
+
+void transform_tohost_VkDrawIndexedIndirectCommand(
+    ResourceTracker* resourceTracker,
+    VkDrawIndexedIndirectCommand* toTransform);
+
+void transform_fromhost_VkDrawIndexedIndirectCommand(
+    ResourceTracker* resourceTracker,
+    VkDrawIndexedIndirectCommand* toTransform);
+
+void transform_tohost_VkDrawIndirectCommand(
+    ResourceTracker* resourceTracker,
+    VkDrawIndirectCommand* toTransform);
+
+void transform_fromhost_VkDrawIndirectCommand(
+    ResourceTracker* resourceTracker,
+    VkDrawIndirectCommand* toTransform);
+
+void transform_tohost_VkImageSubresourceRange(
+    ResourceTracker* resourceTracker,
+    VkImageSubresourceRange* toTransform);
+
+void transform_fromhost_VkImageSubresourceRange(
+    ResourceTracker* resourceTracker,
+    VkImageSubresourceRange* toTransform);
+
+void transform_tohost_VkImageMemoryBarrier(
+    ResourceTracker* resourceTracker,
+    VkImageMemoryBarrier* toTransform);
+
+void transform_fromhost_VkImageMemoryBarrier(
+    ResourceTracker* resourceTracker,
+    VkImageMemoryBarrier* toTransform);
+
+void transform_tohost_VkMemoryBarrier(
+    ResourceTracker* resourceTracker,
+    VkMemoryBarrier* toTransform);
+
+void transform_fromhost_VkMemoryBarrier(
+    ResourceTracker* resourceTracker,
+    VkMemoryBarrier* toTransform);
+
+void transform_tohost_VkAllocationCallbacks(
+    ResourceTracker* resourceTracker,
+    VkAllocationCallbacks* toTransform);
+
+void transform_fromhost_VkAllocationCallbacks(
+    ResourceTracker* resourceTracker,
+    VkAllocationCallbacks* toTransform);
+
+void transform_tohost_VkApplicationInfo(
+    ResourceTracker* resourceTracker,
+    VkApplicationInfo* toTransform);
+
+void transform_fromhost_VkApplicationInfo(
+    ResourceTracker* resourceTracker,
+    VkApplicationInfo* toTransform);
+
+void transform_tohost_VkFormatProperties(
+    ResourceTracker* resourceTracker,
+    VkFormatProperties* toTransform);
+
+void transform_fromhost_VkFormatProperties(
+    ResourceTracker* resourceTracker,
+    VkFormatProperties* toTransform);
+
 void transform_tohost_VkImageFormatProperties(
     ResourceTracker* resourceTracker,
     VkImageFormatProperties* toTransform);
@@ -102,6 +196,38 @@
     ResourceTracker* resourceTracker,
     VkImageFormatProperties* toTransform);
 
+void transform_tohost_VkInstanceCreateInfo(
+    ResourceTracker* resourceTracker,
+    VkInstanceCreateInfo* toTransform);
+
+void transform_fromhost_VkInstanceCreateInfo(
+    ResourceTracker* resourceTracker,
+    VkInstanceCreateInfo* toTransform);
+
+void transform_tohost_VkMemoryHeap(
+    ResourceTracker* resourceTracker,
+    VkMemoryHeap* toTransform);
+
+void transform_fromhost_VkMemoryHeap(
+    ResourceTracker* resourceTracker,
+    VkMemoryHeap* toTransform);
+
+void transform_tohost_VkMemoryType(
+    ResourceTracker* resourceTracker,
+    VkMemoryType* toTransform);
+
+void transform_fromhost_VkMemoryType(
+    ResourceTracker* resourceTracker,
+    VkMemoryType* toTransform);
+
+void transform_tohost_VkPhysicalDeviceFeatures(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceFeatures* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceFeatures(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceFeatures* toTransform);
+
 void transform_tohost_VkPhysicalDeviceLimits(
     ResourceTracker* resourceTracker,
     VkPhysicalDeviceLimits* toTransform);
@@ -110,6 +236,14 @@
     ResourceTracker* resourceTracker,
     VkPhysicalDeviceLimits* toTransform);
 
+void transform_tohost_VkPhysicalDeviceMemoryProperties(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceMemoryProperties* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceMemoryProperties(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceMemoryProperties* toTransform);
+
 void transform_tohost_VkPhysicalDeviceSparseProperties(
     ResourceTracker* resourceTracker,
     VkPhysicalDeviceSparseProperties* toTransform);
@@ -134,30 +268,6 @@
     ResourceTracker* resourceTracker,
     VkQueueFamilyProperties* toTransform);
 
-void transform_tohost_VkMemoryType(
-    ResourceTracker* resourceTracker,
-    VkMemoryType* toTransform);
-
-void transform_fromhost_VkMemoryType(
-    ResourceTracker* resourceTracker,
-    VkMemoryType* toTransform);
-
-void transform_tohost_VkMemoryHeap(
-    ResourceTracker* resourceTracker,
-    VkMemoryHeap* toTransform);
-
-void transform_fromhost_VkMemoryHeap(
-    ResourceTracker* resourceTracker,
-    VkMemoryHeap* toTransform);
-
-void transform_tohost_VkPhysicalDeviceMemoryProperties(
-    ResourceTracker* resourceTracker,
-    VkPhysicalDeviceMemoryProperties* toTransform);
-
-void transform_fromhost_VkPhysicalDeviceMemoryProperties(
-    ResourceTracker* resourceTracker,
-    VkPhysicalDeviceMemoryProperties* toTransform);
-
 void transform_tohost_VkDeviceQueueCreateInfo(
     ResourceTracker* resourceTracker,
     VkDeviceQueueCreateInfo* toTransform);
@@ -198,14 +308,6 @@
     ResourceTracker* resourceTracker,
     VkSubmitInfo* toTransform);
 
-void transform_tohost_VkMemoryAllocateInfo(
-    ResourceTracker* resourceTracker,
-    VkMemoryAllocateInfo* toTransform);
-
-void transform_fromhost_VkMemoryAllocateInfo(
-    ResourceTracker* resourceTracker,
-    VkMemoryAllocateInfo* toTransform);
-
 void transform_tohost_VkMappedMemoryRange(
     ResourceTracker* resourceTracker,
     VkMappedMemoryRange* toTransform);
@@ -214,6 +316,14 @@
     ResourceTracker* resourceTracker,
     VkMappedMemoryRange* toTransform);
 
+void transform_tohost_VkMemoryAllocateInfo(
+    ResourceTracker* resourceTracker,
+    VkMemoryAllocateInfo* toTransform);
+
+void transform_fromhost_VkMemoryAllocateInfo(
+    ResourceTracker* resourceTracker,
+    VkMemoryAllocateInfo* toTransform);
+
 void transform_tohost_VkMemoryRequirements(
     ResourceTracker* resourceTracker,
     VkMemoryRequirements* toTransform);
@@ -222,22 +332,6 @@
     ResourceTracker* resourceTracker,
     VkMemoryRequirements* toTransform);
 
-void transform_tohost_VkSparseImageFormatProperties(
-    ResourceTracker* resourceTracker,
-    VkSparseImageFormatProperties* toTransform);
-
-void transform_fromhost_VkSparseImageFormatProperties(
-    ResourceTracker* resourceTracker,
-    VkSparseImageFormatProperties* toTransform);
-
-void transform_tohost_VkSparseImageMemoryRequirements(
-    ResourceTracker* resourceTracker,
-    VkSparseImageMemoryRequirements* toTransform);
-
-void transform_fromhost_VkSparseImageMemoryRequirements(
-    ResourceTracker* resourceTracker,
-    VkSparseImageMemoryRequirements* toTransform);
-
 void transform_tohost_VkSparseMemoryBind(
     ResourceTracker* resourceTracker,
     VkSparseMemoryBind* toTransform);
@@ -270,14 +364,6 @@
     ResourceTracker* resourceTracker,
     VkImageSubresource* toTransform);
 
-void transform_tohost_VkOffset3D(
-    ResourceTracker* resourceTracker,
-    VkOffset3D* toTransform);
-
-void transform_fromhost_VkOffset3D(
-    ResourceTracker* resourceTracker,
-    VkOffset3D* toTransform);
-
 void transform_tohost_VkSparseImageMemoryBind(
     ResourceTracker* resourceTracker,
     VkSparseImageMemoryBind* toTransform);
@@ -302,6 +388,22 @@
     ResourceTracker* resourceTracker,
     VkBindSparseInfo* toTransform);
 
+void transform_tohost_VkSparseImageFormatProperties(
+    ResourceTracker* resourceTracker,
+    VkSparseImageFormatProperties* toTransform);
+
+void transform_fromhost_VkSparseImageFormatProperties(
+    ResourceTracker* resourceTracker,
+    VkSparseImageFormatProperties* toTransform);
+
+void transform_tohost_VkSparseImageMemoryRequirements(
+    ResourceTracker* resourceTracker,
+    VkSparseImageMemoryRequirements* toTransform);
+
+void transform_fromhost_VkSparseImageMemoryRequirements(
+    ResourceTracker* resourceTracker,
+    VkSparseImageMemoryRequirements* toTransform);
+
 void transform_tohost_VkFenceCreateInfo(
     ResourceTracker* resourceTracker,
     VkFenceCreateInfo* toTransform);
@@ -374,14 +476,6 @@
     ResourceTracker* resourceTracker,
     VkComponentMapping* toTransform);
 
-void transform_tohost_VkImageSubresourceRange(
-    ResourceTracker* resourceTracker,
-    VkImageSubresourceRange* toTransform);
-
-void transform_fromhost_VkImageSubresourceRange(
-    ResourceTracker* resourceTracker,
-    VkImageSubresourceRange* toTransform);
-
 void transform_tohost_VkImageViewCreateInfo(
     ResourceTracker* resourceTracker,
     VkImageViewCreateInfo* toTransform);
@@ -430,6 +524,14 @@
     ResourceTracker* resourceTracker,
     VkPipelineShaderStageCreateInfo* toTransform);
 
+void transform_tohost_VkComputePipelineCreateInfo(
+    ResourceTracker* resourceTracker,
+    VkComputePipelineCreateInfo* toTransform);
+
+void transform_fromhost_VkComputePipelineCreateInfo(
+    ResourceTracker* resourceTracker,
+    VkComputePipelineCreateInfo* toTransform);
+
 void transform_tohost_VkVertexInputBindingDescription(
     ResourceTracker* resourceTracker,
     VkVertexInputBindingDescription* toTransform);
@@ -478,30 +580,6 @@
     ResourceTracker* resourceTracker,
     VkViewport* toTransform);
 
-void transform_tohost_VkOffset2D(
-    ResourceTracker* resourceTracker,
-    VkOffset2D* toTransform);
-
-void transform_fromhost_VkOffset2D(
-    ResourceTracker* resourceTracker,
-    VkOffset2D* toTransform);
-
-void transform_tohost_VkExtent2D(
-    ResourceTracker* resourceTracker,
-    VkExtent2D* toTransform);
-
-void transform_fromhost_VkExtent2D(
-    ResourceTracker* resourceTracker,
-    VkExtent2D* toTransform);
-
-void transform_tohost_VkRect2D(
-    ResourceTracker* resourceTracker,
-    VkRect2D* toTransform);
-
-void transform_fromhost_VkRect2D(
-    ResourceTracker* resourceTracker,
-    VkRect2D* toTransform);
-
 void transform_tohost_VkPipelineViewportStateCreateInfo(
     ResourceTracker* resourceTracker,
     VkPipelineViewportStateCreateInfo* toTransform);
@@ -574,14 +652,6 @@
     ResourceTracker* resourceTracker,
     VkGraphicsPipelineCreateInfo* toTransform);
 
-void transform_tohost_VkComputePipelineCreateInfo(
-    ResourceTracker* resourceTracker,
-    VkComputePipelineCreateInfo* toTransform);
-
-void transform_fromhost_VkComputePipelineCreateInfo(
-    ResourceTracker* resourceTracker,
-    VkComputePipelineCreateInfo* toTransform);
-
 void transform_tohost_VkPushConstantRange(
     ResourceTracker* resourceTracker,
     VkPushConstantRange* toTransform);
@@ -606,21 +676,29 @@
     ResourceTracker* resourceTracker,
     VkSamplerCreateInfo* toTransform);
 
-void transform_tohost_VkDescriptorSetLayoutBinding(
+void transform_tohost_VkCopyDescriptorSet(
     ResourceTracker* resourceTracker,
-    VkDescriptorSetLayoutBinding* toTransform);
+    VkCopyDescriptorSet* toTransform);
 
-void transform_fromhost_VkDescriptorSetLayoutBinding(
+void transform_fromhost_VkCopyDescriptorSet(
     ResourceTracker* resourceTracker,
-    VkDescriptorSetLayoutBinding* toTransform);
+    VkCopyDescriptorSet* toTransform);
 
-void transform_tohost_VkDescriptorSetLayoutCreateInfo(
+void transform_tohost_VkDescriptorBufferInfo(
     ResourceTracker* resourceTracker,
-    VkDescriptorSetLayoutCreateInfo* toTransform);
+    VkDescriptorBufferInfo* toTransform);
 
-void transform_fromhost_VkDescriptorSetLayoutCreateInfo(
+void transform_fromhost_VkDescriptorBufferInfo(
     ResourceTracker* resourceTracker,
-    VkDescriptorSetLayoutCreateInfo* toTransform);
+    VkDescriptorBufferInfo* toTransform);
+
+void transform_tohost_VkDescriptorImageInfo(
+    ResourceTracker* resourceTracker,
+    VkDescriptorImageInfo* toTransform);
+
+void transform_fromhost_VkDescriptorImageInfo(
+    ResourceTracker* resourceTracker,
+    VkDescriptorImageInfo* toTransform);
 
 void transform_tohost_VkDescriptorPoolSize(
     ResourceTracker* resourceTracker,
@@ -646,21 +724,21 @@
     ResourceTracker* resourceTracker,
     VkDescriptorSetAllocateInfo* toTransform);
 
-void transform_tohost_VkDescriptorImageInfo(
+void transform_tohost_VkDescriptorSetLayoutBinding(
     ResourceTracker* resourceTracker,
-    VkDescriptorImageInfo* toTransform);
+    VkDescriptorSetLayoutBinding* toTransform);
 
-void transform_fromhost_VkDescriptorImageInfo(
+void transform_fromhost_VkDescriptorSetLayoutBinding(
     ResourceTracker* resourceTracker,
-    VkDescriptorImageInfo* toTransform);
+    VkDescriptorSetLayoutBinding* toTransform);
 
-void transform_tohost_VkDescriptorBufferInfo(
+void transform_tohost_VkDescriptorSetLayoutCreateInfo(
     ResourceTracker* resourceTracker,
-    VkDescriptorBufferInfo* toTransform);
+    VkDescriptorSetLayoutCreateInfo* toTransform);
 
-void transform_fromhost_VkDescriptorBufferInfo(
+void transform_fromhost_VkDescriptorSetLayoutCreateInfo(
     ResourceTracker* resourceTracker,
-    VkDescriptorBufferInfo* toTransform);
+    VkDescriptorSetLayoutCreateInfo* toTransform);
 
 void transform_tohost_VkWriteDescriptorSet(
     ResourceTracker* resourceTracker,
@@ -670,22 +748,6 @@
     ResourceTracker* resourceTracker,
     VkWriteDescriptorSet* toTransform);
 
-void transform_tohost_VkCopyDescriptorSet(
-    ResourceTracker* resourceTracker,
-    VkCopyDescriptorSet* toTransform);
-
-void transform_fromhost_VkCopyDescriptorSet(
-    ResourceTracker* resourceTracker,
-    VkCopyDescriptorSet* toTransform);
-
-void transform_tohost_VkFramebufferCreateInfo(
-    ResourceTracker* resourceTracker,
-    VkFramebufferCreateInfo* toTransform);
-
-void transform_fromhost_VkFramebufferCreateInfo(
-    ResourceTracker* resourceTracker,
-    VkFramebufferCreateInfo* toTransform);
-
 void transform_tohost_VkAttachmentDescription(
     ResourceTracker* resourceTracker,
     VkAttachmentDescription* toTransform);
@@ -702,6 +764,14 @@
     ResourceTracker* resourceTracker,
     VkAttachmentReference* toTransform);
 
+void transform_tohost_VkFramebufferCreateInfo(
+    ResourceTracker* resourceTracker,
+    VkFramebufferCreateInfo* toTransform);
+
+void transform_fromhost_VkFramebufferCreateInfo(
+    ResourceTracker* resourceTracker,
+    VkFramebufferCreateInfo* toTransform);
+
 void transform_tohost_VkSubpassDescription(
     ResourceTracker* resourceTracker,
     VkSubpassDescription* toTransform);
@@ -774,22 +844,6 @@
     ResourceTracker* resourceTracker,
     VkImageSubresourceLayers* toTransform);
 
-void transform_tohost_VkImageCopy(
-    ResourceTracker* resourceTracker,
-    VkImageCopy* toTransform);
-
-void transform_fromhost_VkImageCopy(
-    ResourceTracker* resourceTracker,
-    VkImageCopy* toTransform);
-
-void transform_tohost_VkImageBlit(
-    ResourceTracker* resourceTracker,
-    VkImageBlit* toTransform);
-
-void transform_fromhost_VkImageBlit(
-    ResourceTracker* resourceTracker,
-    VkImageBlit* toTransform);
-
 void transform_tohost_VkBufferImageCopy(
     ResourceTracker* resourceTracker,
     VkBufferImageCopy* toTransform);
@@ -838,6 +892,22 @@
     ResourceTracker* resourceTracker,
     VkClearRect* toTransform);
 
+void transform_tohost_VkImageBlit(
+    ResourceTracker* resourceTracker,
+    VkImageBlit* toTransform);
+
+void transform_fromhost_VkImageBlit(
+    ResourceTracker* resourceTracker,
+    VkImageBlit* toTransform);
+
+void transform_tohost_VkImageCopy(
+    ResourceTracker* resourceTracker,
+    VkImageCopy* toTransform);
+
+void transform_fromhost_VkImageCopy(
+    ResourceTracker* resourceTracker,
+    VkImageCopy* toTransform);
+
 void transform_tohost_VkImageResolve(
     ResourceTracker* resourceTracker,
     VkImageResolve* toTransform);
@@ -846,30 +916,6 @@
     ResourceTracker* resourceTracker,
     VkImageResolve* toTransform);
 
-void transform_tohost_VkMemoryBarrier(
-    ResourceTracker* resourceTracker,
-    VkMemoryBarrier* toTransform);
-
-void transform_fromhost_VkMemoryBarrier(
-    ResourceTracker* resourceTracker,
-    VkMemoryBarrier* toTransform);
-
-void transform_tohost_VkBufferMemoryBarrier(
-    ResourceTracker* resourceTracker,
-    VkBufferMemoryBarrier* toTransform);
-
-void transform_fromhost_VkBufferMemoryBarrier(
-    ResourceTracker* resourceTracker,
-    VkBufferMemoryBarrier* toTransform);
-
-void transform_tohost_VkImageMemoryBarrier(
-    ResourceTracker* resourceTracker,
-    VkImageMemoryBarrier* toTransform);
-
-void transform_fromhost_VkImageMemoryBarrier(
-    ResourceTracker* resourceTracker,
-    VkImageMemoryBarrier* toTransform);
-
 void transform_tohost_VkRenderPassBeginInfo(
     ResourceTracker* resourceTracker,
     VkRenderPassBeginInfo* toTransform);
@@ -878,46 +924,6 @@
     ResourceTracker* resourceTracker,
     VkRenderPassBeginInfo* toTransform);
 
-void transform_tohost_VkDispatchIndirectCommand(
-    ResourceTracker* resourceTracker,
-    VkDispatchIndirectCommand* toTransform);
-
-void transform_fromhost_VkDispatchIndirectCommand(
-    ResourceTracker* resourceTracker,
-    VkDispatchIndirectCommand* toTransform);
-
-void transform_tohost_VkDrawIndexedIndirectCommand(
-    ResourceTracker* resourceTracker,
-    VkDrawIndexedIndirectCommand* toTransform);
-
-void transform_fromhost_VkDrawIndexedIndirectCommand(
-    ResourceTracker* resourceTracker,
-    VkDrawIndexedIndirectCommand* toTransform);
-
-void transform_tohost_VkDrawIndirectCommand(
-    ResourceTracker* resourceTracker,
-    VkDrawIndirectCommand* toTransform);
-
-void transform_fromhost_VkDrawIndirectCommand(
-    ResourceTracker* resourceTracker,
-    VkDrawIndirectCommand* toTransform);
-
-void transform_tohost_VkBaseOutStructure(
-    ResourceTracker* resourceTracker,
-    VkBaseOutStructure* toTransform);
-
-void transform_fromhost_VkBaseOutStructure(
-    ResourceTracker* resourceTracker,
-    VkBaseOutStructure* toTransform);
-
-void transform_tohost_VkBaseInStructure(
-    ResourceTracker* resourceTracker,
-    VkBaseInStructure* toTransform);
-
-void transform_fromhost_VkBaseInStructure(
-    ResourceTracker* resourceTracker,
-    VkBaseInStructure* toTransform);
-
 #endif
 #ifdef VK_VERSION_1_1
 void transform_tohost_VkPhysicalDeviceSubgroupProperties(
@@ -1216,13 +1222,17 @@
     ResourceTracker* resourceTracker,
     VkPhysicalDeviceMultiviewProperties* toTransform);
 
-void transform_tohost_VkPhysicalDeviceVariablePointerFeatures(
+void transform_tohost_VkPhysicalDeviceVariablePointersFeatures(
     ResourceTracker* resourceTracker,
-    VkPhysicalDeviceVariablePointerFeatures* toTransform);
+    VkPhysicalDeviceVariablePointersFeatures* toTransform);
 
-void transform_fromhost_VkPhysicalDeviceVariablePointerFeatures(
+void transform_fromhost_VkPhysicalDeviceVariablePointersFeatures(
     ResourceTracker* resourceTracker,
-    VkPhysicalDeviceVariablePointerFeatures* toTransform);
+    VkPhysicalDeviceVariablePointersFeatures* toTransform);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkPhysicalDeviceVariablePointersFeatures, transform_tohost_VkPhysicalDeviceVariablePointerFeatures);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkPhysicalDeviceVariablePointersFeatures, transform_fromhost_VkPhysicalDeviceVariablePointerFeatures);
 
 void transform_tohost_VkPhysicalDeviceProtectedMemoryFeatures(
     ResourceTracker* resourceTracker,
@@ -1456,13 +1466,427 @@
     ResourceTracker* resourceTracker,
     VkDescriptorSetLayoutSupport* toTransform);
 
-void transform_tohost_VkPhysicalDeviceShaderDrawParameterFeatures(
+void transform_tohost_VkPhysicalDeviceShaderDrawParametersFeatures(
     ResourceTracker* resourceTracker,
-    VkPhysicalDeviceShaderDrawParameterFeatures* toTransform);
+    VkPhysicalDeviceShaderDrawParametersFeatures* toTransform);
 
-void transform_fromhost_VkPhysicalDeviceShaderDrawParameterFeatures(
+void transform_fromhost_VkPhysicalDeviceShaderDrawParametersFeatures(
     ResourceTracker* resourceTracker,
-    VkPhysicalDeviceShaderDrawParameterFeatures* toTransform);
+    VkPhysicalDeviceShaderDrawParametersFeatures* toTransform);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkPhysicalDeviceShaderDrawParametersFeatures, transform_tohost_VkPhysicalDeviceShaderDrawParameterFeatures);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkPhysicalDeviceShaderDrawParametersFeatures, transform_fromhost_VkPhysicalDeviceShaderDrawParameterFeatures);
+
+#endif
+#ifdef VK_VERSION_1_2
+void transform_tohost_VkPhysicalDeviceVulkan11Features(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceVulkan11Features* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceVulkan11Features(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceVulkan11Features* toTransform);
+
+void transform_tohost_VkPhysicalDeviceVulkan11Properties(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceVulkan11Properties* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceVulkan11Properties(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceVulkan11Properties* toTransform);
+
+void transform_tohost_VkPhysicalDeviceVulkan12Features(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceVulkan12Features* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceVulkan12Features(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceVulkan12Features* toTransform);
+
+void transform_tohost_VkConformanceVersion(
+    ResourceTracker* resourceTracker,
+    VkConformanceVersion* toTransform);
+
+void transform_fromhost_VkConformanceVersion(
+    ResourceTracker* resourceTracker,
+    VkConformanceVersion* toTransform);
+
+void transform_tohost_VkPhysicalDeviceVulkan12Properties(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceVulkan12Properties* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceVulkan12Properties(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceVulkan12Properties* toTransform);
+
+void transform_tohost_VkImageFormatListCreateInfo(
+    ResourceTracker* resourceTracker,
+    VkImageFormatListCreateInfo* toTransform);
+
+void transform_fromhost_VkImageFormatListCreateInfo(
+    ResourceTracker* resourceTracker,
+    VkImageFormatListCreateInfo* toTransform);
+
+void transform_tohost_VkAttachmentDescription2(
+    ResourceTracker* resourceTracker,
+    VkAttachmentDescription2* toTransform);
+
+void transform_fromhost_VkAttachmentDescription2(
+    ResourceTracker* resourceTracker,
+    VkAttachmentDescription2* toTransform);
+
+void transform_tohost_VkAttachmentReference2(
+    ResourceTracker* resourceTracker,
+    VkAttachmentReference2* toTransform);
+
+void transform_fromhost_VkAttachmentReference2(
+    ResourceTracker* resourceTracker,
+    VkAttachmentReference2* toTransform);
+
+void transform_tohost_VkSubpassDescription2(
+    ResourceTracker* resourceTracker,
+    VkSubpassDescription2* toTransform);
+
+void transform_fromhost_VkSubpassDescription2(
+    ResourceTracker* resourceTracker,
+    VkSubpassDescription2* toTransform);
+
+void transform_tohost_VkSubpassDependency2(
+    ResourceTracker* resourceTracker,
+    VkSubpassDependency2* toTransform);
+
+void transform_fromhost_VkSubpassDependency2(
+    ResourceTracker* resourceTracker,
+    VkSubpassDependency2* toTransform);
+
+void transform_tohost_VkRenderPassCreateInfo2(
+    ResourceTracker* resourceTracker,
+    VkRenderPassCreateInfo2* toTransform);
+
+void transform_fromhost_VkRenderPassCreateInfo2(
+    ResourceTracker* resourceTracker,
+    VkRenderPassCreateInfo2* toTransform);
+
+void transform_tohost_VkSubpassBeginInfo(
+    ResourceTracker* resourceTracker,
+    VkSubpassBeginInfo* toTransform);
+
+void transform_fromhost_VkSubpassBeginInfo(
+    ResourceTracker* resourceTracker,
+    VkSubpassBeginInfo* toTransform);
+
+void transform_tohost_VkSubpassEndInfo(
+    ResourceTracker* resourceTracker,
+    VkSubpassEndInfo* toTransform);
+
+void transform_fromhost_VkSubpassEndInfo(
+    ResourceTracker* resourceTracker,
+    VkSubpassEndInfo* toTransform);
+
+void transform_tohost_VkPhysicalDevice8BitStorageFeatures(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDevice8BitStorageFeatures* toTransform);
+
+void transform_fromhost_VkPhysicalDevice8BitStorageFeatures(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDevice8BitStorageFeatures* toTransform);
+
+void transform_tohost_VkPhysicalDeviceDriverProperties(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceDriverProperties* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceDriverProperties(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceDriverProperties* toTransform);
+
+void transform_tohost_VkPhysicalDeviceShaderAtomicInt64Features(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShaderAtomicInt64Features* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceShaderAtomicInt64Features(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShaderAtomicInt64Features* toTransform);
+
+void transform_tohost_VkPhysicalDeviceShaderFloat16Int8Features(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShaderFloat16Int8Features* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceShaderFloat16Int8Features(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShaderFloat16Int8Features* toTransform);
+
+void transform_tohost_VkPhysicalDeviceFloatControlsProperties(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceFloatControlsProperties* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceFloatControlsProperties(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceFloatControlsProperties* toTransform);
+
+void transform_tohost_VkDescriptorSetLayoutBindingFlagsCreateInfo(
+    ResourceTracker* resourceTracker,
+    VkDescriptorSetLayoutBindingFlagsCreateInfo* toTransform);
+
+void transform_fromhost_VkDescriptorSetLayoutBindingFlagsCreateInfo(
+    ResourceTracker* resourceTracker,
+    VkDescriptorSetLayoutBindingFlagsCreateInfo* toTransform);
+
+void transform_tohost_VkPhysicalDeviceDescriptorIndexingFeatures(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceDescriptorIndexingFeatures* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceDescriptorIndexingFeatures(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceDescriptorIndexingFeatures* toTransform);
+
+void transform_tohost_VkPhysicalDeviceDescriptorIndexingProperties(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceDescriptorIndexingProperties* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceDescriptorIndexingProperties(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceDescriptorIndexingProperties* toTransform);
+
+void transform_tohost_VkDescriptorSetVariableDescriptorCountAllocateInfo(
+    ResourceTracker* resourceTracker,
+    VkDescriptorSetVariableDescriptorCountAllocateInfo* toTransform);
+
+void transform_fromhost_VkDescriptorSetVariableDescriptorCountAllocateInfo(
+    ResourceTracker* resourceTracker,
+    VkDescriptorSetVariableDescriptorCountAllocateInfo* toTransform);
+
+void transform_tohost_VkDescriptorSetVariableDescriptorCountLayoutSupport(
+    ResourceTracker* resourceTracker,
+    VkDescriptorSetVariableDescriptorCountLayoutSupport* toTransform);
+
+void transform_fromhost_VkDescriptorSetVariableDescriptorCountLayoutSupport(
+    ResourceTracker* resourceTracker,
+    VkDescriptorSetVariableDescriptorCountLayoutSupport* toTransform);
+
+void transform_tohost_VkSubpassDescriptionDepthStencilResolve(
+    ResourceTracker* resourceTracker,
+    VkSubpassDescriptionDepthStencilResolve* toTransform);
+
+void transform_fromhost_VkSubpassDescriptionDepthStencilResolve(
+    ResourceTracker* resourceTracker,
+    VkSubpassDescriptionDepthStencilResolve* toTransform);
+
+void transform_tohost_VkPhysicalDeviceDepthStencilResolveProperties(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceDepthStencilResolveProperties* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceDepthStencilResolveProperties(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceDepthStencilResolveProperties* toTransform);
+
+void transform_tohost_VkPhysicalDeviceScalarBlockLayoutFeatures(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceScalarBlockLayoutFeatures* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceScalarBlockLayoutFeatures(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceScalarBlockLayoutFeatures* toTransform);
+
+void transform_tohost_VkImageStencilUsageCreateInfo(
+    ResourceTracker* resourceTracker,
+    VkImageStencilUsageCreateInfo* toTransform);
+
+void transform_fromhost_VkImageStencilUsageCreateInfo(
+    ResourceTracker* resourceTracker,
+    VkImageStencilUsageCreateInfo* toTransform);
+
+void transform_tohost_VkSamplerReductionModeCreateInfo(
+    ResourceTracker* resourceTracker,
+    VkSamplerReductionModeCreateInfo* toTransform);
+
+void transform_fromhost_VkSamplerReductionModeCreateInfo(
+    ResourceTracker* resourceTracker,
+    VkSamplerReductionModeCreateInfo* toTransform);
+
+void transform_tohost_VkPhysicalDeviceSamplerFilterMinmaxProperties(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceSamplerFilterMinmaxProperties* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceSamplerFilterMinmaxProperties(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceSamplerFilterMinmaxProperties* toTransform);
+
+void transform_tohost_VkPhysicalDeviceVulkanMemoryModelFeatures(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceVulkanMemoryModelFeatures* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceVulkanMemoryModelFeatures(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceVulkanMemoryModelFeatures* toTransform);
+
+void transform_tohost_VkPhysicalDeviceImagelessFramebufferFeatures(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceImagelessFramebufferFeatures* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceImagelessFramebufferFeatures(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceImagelessFramebufferFeatures* toTransform);
+
+void transform_tohost_VkFramebufferAttachmentImageInfo(
+    ResourceTracker* resourceTracker,
+    VkFramebufferAttachmentImageInfo* toTransform);
+
+void transform_fromhost_VkFramebufferAttachmentImageInfo(
+    ResourceTracker* resourceTracker,
+    VkFramebufferAttachmentImageInfo* toTransform);
+
+void transform_tohost_VkFramebufferAttachmentsCreateInfo(
+    ResourceTracker* resourceTracker,
+    VkFramebufferAttachmentsCreateInfo* toTransform);
+
+void transform_fromhost_VkFramebufferAttachmentsCreateInfo(
+    ResourceTracker* resourceTracker,
+    VkFramebufferAttachmentsCreateInfo* toTransform);
+
+void transform_tohost_VkRenderPassAttachmentBeginInfo(
+    ResourceTracker* resourceTracker,
+    VkRenderPassAttachmentBeginInfo* toTransform);
+
+void transform_fromhost_VkRenderPassAttachmentBeginInfo(
+    ResourceTracker* resourceTracker,
+    VkRenderPassAttachmentBeginInfo* toTransform);
+
+void transform_tohost_VkPhysicalDeviceUniformBufferStandardLayoutFeatures(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceUniformBufferStandardLayoutFeatures* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceUniformBufferStandardLayoutFeatures(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceUniformBufferStandardLayoutFeatures* toTransform);
+
+void transform_tohost_VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures* toTransform);
+
+void transform_tohost_VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures* toTransform);
+
+void transform_tohost_VkAttachmentReferenceStencilLayout(
+    ResourceTracker* resourceTracker,
+    VkAttachmentReferenceStencilLayout* toTransform);
+
+void transform_fromhost_VkAttachmentReferenceStencilLayout(
+    ResourceTracker* resourceTracker,
+    VkAttachmentReferenceStencilLayout* toTransform);
+
+void transform_tohost_VkAttachmentDescriptionStencilLayout(
+    ResourceTracker* resourceTracker,
+    VkAttachmentDescriptionStencilLayout* toTransform);
+
+void transform_fromhost_VkAttachmentDescriptionStencilLayout(
+    ResourceTracker* resourceTracker,
+    VkAttachmentDescriptionStencilLayout* toTransform);
+
+void transform_tohost_VkPhysicalDeviceHostQueryResetFeatures(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceHostQueryResetFeatures* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceHostQueryResetFeatures(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceHostQueryResetFeatures* toTransform);
+
+void transform_tohost_VkPhysicalDeviceTimelineSemaphoreFeatures(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceTimelineSemaphoreFeatures* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceTimelineSemaphoreFeatures(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceTimelineSemaphoreFeatures* toTransform);
+
+void transform_tohost_VkPhysicalDeviceTimelineSemaphoreProperties(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceTimelineSemaphoreProperties* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceTimelineSemaphoreProperties(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceTimelineSemaphoreProperties* toTransform);
+
+void transform_tohost_VkSemaphoreTypeCreateInfo(
+    ResourceTracker* resourceTracker,
+    VkSemaphoreTypeCreateInfo* toTransform);
+
+void transform_fromhost_VkSemaphoreTypeCreateInfo(
+    ResourceTracker* resourceTracker,
+    VkSemaphoreTypeCreateInfo* toTransform);
+
+void transform_tohost_VkTimelineSemaphoreSubmitInfo(
+    ResourceTracker* resourceTracker,
+    VkTimelineSemaphoreSubmitInfo* toTransform);
+
+void transform_fromhost_VkTimelineSemaphoreSubmitInfo(
+    ResourceTracker* resourceTracker,
+    VkTimelineSemaphoreSubmitInfo* toTransform);
+
+void transform_tohost_VkSemaphoreWaitInfo(
+    ResourceTracker* resourceTracker,
+    VkSemaphoreWaitInfo* toTransform);
+
+void transform_fromhost_VkSemaphoreWaitInfo(
+    ResourceTracker* resourceTracker,
+    VkSemaphoreWaitInfo* toTransform);
+
+void transform_tohost_VkSemaphoreSignalInfo(
+    ResourceTracker* resourceTracker,
+    VkSemaphoreSignalInfo* toTransform);
+
+void transform_fromhost_VkSemaphoreSignalInfo(
+    ResourceTracker* resourceTracker,
+    VkSemaphoreSignalInfo* toTransform);
+
+void transform_tohost_VkPhysicalDeviceBufferDeviceAddressFeatures(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceBufferDeviceAddressFeatures* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceBufferDeviceAddressFeatures(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceBufferDeviceAddressFeatures* toTransform);
+
+void transform_tohost_VkBufferDeviceAddressInfo(
+    ResourceTracker* resourceTracker,
+    VkBufferDeviceAddressInfo* toTransform);
+
+void transform_fromhost_VkBufferDeviceAddressInfo(
+    ResourceTracker* resourceTracker,
+    VkBufferDeviceAddressInfo* toTransform);
+
+void transform_tohost_VkBufferOpaqueCaptureAddressCreateInfo(
+    ResourceTracker* resourceTracker,
+    VkBufferOpaqueCaptureAddressCreateInfo* toTransform);
+
+void transform_fromhost_VkBufferOpaqueCaptureAddressCreateInfo(
+    ResourceTracker* resourceTracker,
+    VkBufferOpaqueCaptureAddressCreateInfo* toTransform);
+
+void transform_tohost_VkMemoryOpaqueCaptureAddressAllocateInfo(
+    ResourceTracker* resourceTracker,
+    VkMemoryOpaqueCaptureAddressAllocateInfo* toTransform);
+
+void transform_fromhost_VkMemoryOpaqueCaptureAddressAllocateInfo(
+    ResourceTracker* resourceTracker,
+    VkMemoryOpaqueCaptureAddressAllocateInfo* toTransform);
+
+void transform_tohost_VkDeviceMemoryOpaqueCaptureAddressInfo(
+    ResourceTracker* resourceTracker,
+    VkDeviceMemoryOpaqueCaptureAddressInfo* toTransform);
+
+void transform_fromhost_VkDeviceMemoryOpaqueCaptureAddressInfo(
+    ResourceTracker* resourceTracker,
+    VkDeviceMemoryOpaqueCaptureAddressInfo* toTransform);
 
 #endif
 #ifdef VK_KHR_surface
@@ -1550,14 +1974,6 @@
 
 #endif
 #ifdef VK_KHR_display
-void transform_tohost_VkDisplayPropertiesKHR(
-    ResourceTracker* resourceTracker,
-    VkDisplayPropertiesKHR* toTransform);
-
-void transform_fromhost_VkDisplayPropertiesKHR(
-    ResourceTracker* resourceTracker,
-    VkDisplayPropertiesKHR* toTransform);
-
 void transform_tohost_VkDisplayModeParametersKHR(
     ResourceTracker* resourceTracker,
     VkDisplayModeParametersKHR* toTransform);
@@ -1566,14 +1982,6 @@
     ResourceTracker* resourceTracker,
     VkDisplayModeParametersKHR* toTransform);
 
-void transform_tohost_VkDisplayModePropertiesKHR(
-    ResourceTracker* resourceTracker,
-    VkDisplayModePropertiesKHR* toTransform);
-
-void transform_fromhost_VkDisplayModePropertiesKHR(
-    ResourceTracker* resourceTracker,
-    VkDisplayModePropertiesKHR* toTransform);
-
 void transform_tohost_VkDisplayModeCreateInfoKHR(
     ResourceTracker* resourceTracker,
     VkDisplayModeCreateInfoKHR* toTransform);
@@ -1582,6 +1990,14 @@
     ResourceTracker* resourceTracker,
     VkDisplayModeCreateInfoKHR* toTransform);
 
+void transform_tohost_VkDisplayModePropertiesKHR(
+    ResourceTracker* resourceTracker,
+    VkDisplayModePropertiesKHR* toTransform);
+
+void transform_fromhost_VkDisplayModePropertiesKHR(
+    ResourceTracker* resourceTracker,
+    VkDisplayModePropertiesKHR* toTransform);
+
 void transform_tohost_VkDisplayPlaneCapabilitiesKHR(
     ResourceTracker* resourceTracker,
     VkDisplayPlaneCapabilitiesKHR* toTransform);
@@ -1598,6 +2014,14 @@
     ResourceTracker* resourceTracker,
     VkDisplayPlanePropertiesKHR* toTransform);
 
+void transform_tohost_VkDisplayPropertiesKHR(
+    ResourceTracker* resourceTracker,
+    VkDisplayPropertiesKHR* toTransform);
+
+void transform_fromhost_VkDisplayPropertiesKHR(
+    ResourceTracker* resourceTracker,
+    VkDisplayPropertiesKHR* toTransform);
+
 void transform_tohost_VkDisplaySurfaceCreateInfoKHR(
     ResourceTracker* resourceTracker,
     VkDisplaySurfaceCreateInfoKHR* toTransform);
@@ -1647,16 +2071,6 @@
     VkWaylandSurfaceCreateInfoKHR* toTransform);
 
 #endif
-#ifdef VK_KHR_mir_surface
-void transform_tohost_VkMirSurfaceCreateInfoKHR(
-    ResourceTracker* resourceTracker,
-    VkMirSurfaceCreateInfoKHR* toTransform);
-
-void transform_fromhost_VkMirSurfaceCreateInfoKHR(
-    ResourceTracker* resourceTracker,
-    VkMirSurfaceCreateInfoKHR* toTransform);
-
-#endif
 #ifdef VK_KHR_android_surface
 void transform_tohost_VkAndroidSurfaceCreateInfoKHR(
     ResourceTracker* resourceTracker,
@@ -1680,20 +2094,140 @@
 #ifdef VK_KHR_sampler_mirror_clamp_to_edge
 #endif
 #ifdef VK_KHR_multiview
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkRenderPassMultiviewCreateInfo, transform_tohost_VkRenderPassMultiviewCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkRenderPassMultiviewCreateInfo, transform_fromhost_VkRenderPassMultiviewCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkPhysicalDeviceMultiviewFeatures, transform_tohost_VkPhysicalDeviceMultiviewFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkPhysicalDeviceMultiviewFeatures, transform_fromhost_VkPhysicalDeviceMultiviewFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkPhysicalDeviceMultiviewProperties, transform_tohost_VkPhysicalDeviceMultiviewPropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkPhysicalDeviceMultiviewProperties, transform_fromhost_VkPhysicalDeviceMultiviewPropertiesKHR);
+
 #endif
 #ifdef VK_KHR_get_physical_device_properties2
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkPhysicalDeviceFeatures2, transform_tohost_VkPhysicalDeviceFeatures2KHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkPhysicalDeviceFeatures2, transform_fromhost_VkPhysicalDeviceFeatures2KHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkPhysicalDeviceProperties2, transform_tohost_VkPhysicalDeviceProperties2KHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkPhysicalDeviceProperties2, transform_fromhost_VkPhysicalDeviceProperties2KHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkFormatProperties2, transform_tohost_VkFormatProperties2KHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkFormatProperties2, transform_fromhost_VkFormatProperties2KHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkImageFormatProperties2, transform_tohost_VkImageFormatProperties2KHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkImageFormatProperties2, transform_fromhost_VkImageFormatProperties2KHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkPhysicalDeviceImageFormatInfo2, transform_tohost_VkPhysicalDeviceImageFormatInfo2KHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkPhysicalDeviceImageFormatInfo2, transform_fromhost_VkPhysicalDeviceImageFormatInfo2KHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkQueueFamilyProperties2, transform_tohost_VkQueueFamilyProperties2KHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkQueueFamilyProperties2, transform_fromhost_VkQueueFamilyProperties2KHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkPhysicalDeviceMemoryProperties2, transform_tohost_VkPhysicalDeviceMemoryProperties2KHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkPhysicalDeviceMemoryProperties2, transform_fromhost_VkPhysicalDeviceMemoryProperties2KHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkSparseImageFormatProperties2, transform_tohost_VkSparseImageFormatProperties2KHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkSparseImageFormatProperties2, transform_fromhost_VkSparseImageFormatProperties2KHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkPhysicalDeviceSparseImageFormatInfo2, transform_tohost_VkPhysicalDeviceSparseImageFormatInfo2KHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkPhysicalDeviceSparseImageFormatInfo2, transform_fromhost_VkPhysicalDeviceSparseImageFormatInfo2KHR);
+
 #endif
 #ifdef VK_KHR_device_group
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkMemoryAllocateFlagsInfo, transform_tohost_VkMemoryAllocateFlagsInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkMemoryAllocateFlagsInfo, transform_fromhost_VkMemoryAllocateFlagsInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkDeviceGroupRenderPassBeginInfo, transform_tohost_VkDeviceGroupRenderPassBeginInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkDeviceGroupRenderPassBeginInfo, transform_fromhost_VkDeviceGroupRenderPassBeginInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkDeviceGroupCommandBufferBeginInfo, transform_tohost_VkDeviceGroupCommandBufferBeginInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkDeviceGroupCommandBufferBeginInfo, transform_fromhost_VkDeviceGroupCommandBufferBeginInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkDeviceGroupSubmitInfo, transform_tohost_VkDeviceGroupSubmitInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkDeviceGroupSubmitInfo, transform_fromhost_VkDeviceGroupSubmitInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkDeviceGroupBindSparseInfo, transform_tohost_VkDeviceGroupBindSparseInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkDeviceGroupBindSparseInfo, transform_fromhost_VkDeviceGroupBindSparseInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkBindBufferMemoryDeviceGroupInfo, transform_tohost_VkBindBufferMemoryDeviceGroupInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkBindBufferMemoryDeviceGroupInfo, transform_fromhost_VkBindBufferMemoryDeviceGroupInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkBindImageMemoryDeviceGroupInfo, transform_tohost_VkBindImageMemoryDeviceGroupInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkBindImageMemoryDeviceGroupInfo, transform_fromhost_VkBindImageMemoryDeviceGroupInfoKHR);
+
 #endif
 #ifdef VK_KHR_shader_draw_parameters
 #endif
 #ifdef VK_KHR_maintenance1
 #endif
 #ifdef VK_KHR_device_group_creation
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkPhysicalDeviceGroupProperties, transform_tohost_VkPhysicalDeviceGroupPropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkPhysicalDeviceGroupProperties, transform_fromhost_VkPhysicalDeviceGroupPropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkDeviceGroupDeviceCreateInfo, transform_tohost_VkDeviceGroupDeviceCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkDeviceGroupDeviceCreateInfo, transform_fromhost_VkDeviceGroupDeviceCreateInfoKHR);
+
 #endif
 #ifdef VK_KHR_external_memory_capabilities
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkExternalMemoryProperties, transform_tohost_VkExternalMemoryPropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkExternalMemoryProperties, transform_fromhost_VkExternalMemoryPropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkPhysicalDeviceExternalImageFormatInfo, transform_tohost_VkPhysicalDeviceExternalImageFormatInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkPhysicalDeviceExternalImageFormatInfo, transform_fromhost_VkPhysicalDeviceExternalImageFormatInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkExternalImageFormatProperties, transform_tohost_VkExternalImageFormatPropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkExternalImageFormatProperties, transform_fromhost_VkExternalImageFormatPropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkPhysicalDeviceExternalBufferInfo, transform_tohost_VkPhysicalDeviceExternalBufferInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkPhysicalDeviceExternalBufferInfo, transform_fromhost_VkPhysicalDeviceExternalBufferInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkExternalBufferProperties, transform_tohost_VkExternalBufferPropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkExternalBufferProperties, transform_fromhost_VkExternalBufferPropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkPhysicalDeviceIDProperties, transform_tohost_VkPhysicalDeviceIDPropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkPhysicalDeviceIDProperties, transform_fromhost_VkPhysicalDeviceIDPropertiesKHR);
+
 #endif
 #ifdef VK_KHR_external_memory
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkExternalMemoryImageCreateInfo, transform_tohost_VkExternalMemoryImageCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkExternalMemoryImageCreateInfo, transform_fromhost_VkExternalMemoryImageCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkExternalMemoryBufferCreateInfo, transform_tohost_VkExternalMemoryBufferCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkExternalMemoryBufferCreateInfo, transform_fromhost_VkExternalMemoryBufferCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkExportMemoryAllocateInfo, transform_tohost_VkExportMemoryAllocateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkExportMemoryAllocateInfo, transform_fromhost_VkExportMemoryAllocateInfoKHR);
+
 #endif
 #ifdef VK_KHR_external_memory_win32
 void transform_tohost_VkImportMemoryWin32HandleInfoKHR(
@@ -1766,8 +2300,20 @@
 
 #endif
 #ifdef VK_KHR_external_semaphore_capabilities
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkPhysicalDeviceExternalSemaphoreInfo, transform_tohost_VkPhysicalDeviceExternalSemaphoreInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkPhysicalDeviceExternalSemaphoreInfo, transform_fromhost_VkPhysicalDeviceExternalSemaphoreInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkExternalSemaphoreProperties, transform_tohost_VkExternalSemaphorePropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkExternalSemaphoreProperties, transform_fromhost_VkExternalSemaphorePropertiesKHR);
+
 #endif
 #ifdef VK_KHR_external_semaphore
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkExportSemaphoreCreateInfo, transform_tohost_VkExportSemaphoreCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkExportSemaphoreCreateInfo, transform_fromhost_VkExportSemaphoreCreateInfoKHR);
+
 #endif
 #ifdef VK_KHR_external_semaphore_win32
 void transform_tohost_VkImportSemaphoreWin32HandleInfoKHR(
@@ -1831,7 +2377,21 @@
     VkPhysicalDevicePushDescriptorPropertiesKHR* toTransform);
 
 #endif
+#ifdef VK_KHR_shader_float16_int8
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkPhysicalDeviceShaderFloat16Int8Features, transform_tohost_VkPhysicalDeviceShaderFloat16Int8FeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkPhysicalDeviceShaderFloat16Int8Features, transform_fromhost_VkPhysicalDeviceShaderFloat16Int8FeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkPhysicalDeviceShaderFloat16Int8Features, transform_tohost_VkPhysicalDeviceFloat16Int8FeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkPhysicalDeviceShaderFloat16Int8Features, transform_fromhost_VkPhysicalDeviceFloat16Int8FeaturesKHR);
+
+#endif
 #ifdef VK_KHR_16bit_storage
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkPhysicalDevice16BitStorageFeatures, transform_tohost_VkPhysicalDevice16BitStorageFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkPhysicalDevice16BitStorageFeatures, transform_fromhost_VkPhysicalDevice16BitStorageFeaturesKHR);
+
 #endif
 #ifdef VK_KHR_incremental_present
 void transform_tohost_VkRectLayerKHR(
@@ -1860,63 +2420,61 @@
 
 #endif
 #ifdef VK_KHR_descriptor_update_template
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkDescriptorUpdateTemplateEntry, transform_tohost_VkDescriptorUpdateTemplateEntryKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkDescriptorUpdateTemplateEntry, transform_fromhost_VkDescriptorUpdateTemplateEntryKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkDescriptorUpdateTemplateCreateInfo, transform_tohost_VkDescriptorUpdateTemplateCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkDescriptorUpdateTemplateCreateInfo, transform_fromhost_VkDescriptorUpdateTemplateCreateInfoKHR);
+
+#endif
+#ifdef VK_KHR_imageless_framebuffer
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkPhysicalDeviceImagelessFramebufferFeatures, transform_tohost_VkPhysicalDeviceImagelessFramebufferFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkPhysicalDeviceImagelessFramebufferFeatures, transform_fromhost_VkPhysicalDeviceImagelessFramebufferFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkFramebufferAttachmentsCreateInfo, transform_tohost_VkFramebufferAttachmentsCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkFramebufferAttachmentsCreateInfo, transform_fromhost_VkFramebufferAttachmentsCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkFramebufferAttachmentImageInfo, transform_tohost_VkFramebufferAttachmentImageInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkFramebufferAttachmentImageInfo, transform_fromhost_VkFramebufferAttachmentImageInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkRenderPassAttachmentBeginInfo, transform_tohost_VkRenderPassAttachmentBeginInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkRenderPassAttachmentBeginInfo, transform_fromhost_VkRenderPassAttachmentBeginInfoKHR);
+
 #endif
 #ifdef VK_KHR_create_renderpass2
-void transform_tohost_VkAttachmentDescription2KHR(
-    ResourceTracker* resourceTracker,
-    VkAttachmentDescription2KHR* toTransform);
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkRenderPassCreateInfo2, transform_tohost_VkRenderPassCreateInfo2KHR);
 
-void transform_fromhost_VkAttachmentDescription2KHR(
-    ResourceTracker* resourceTracker,
-    VkAttachmentDescription2KHR* toTransform);
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkRenderPassCreateInfo2, transform_fromhost_VkRenderPassCreateInfo2KHR);
 
-void transform_tohost_VkAttachmentReference2KHR(
-    ResourceTracker* resourceTracker,
-    VkAttachmentReference2KHR* toTransform);
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkAttachmentDescription2, transform_tohost_VkAttachmentDescription2KHR);
 
-void transform_fromhost_VkAttachmentReference2KHR(
-    ResourceTracker* resourceTracker,
-    VkAttachmentReference2KHR* toTransform);
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkAttachmentDescription2, transform_fromhost_VkAttachmentDescription2KHR);
 
-void transform_tohost_VkSubpassDescription2KHR(
-    ResourceTracker* resourceTracker,
-    VkSubpassDescription2KHR* toTransform);
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkAttachmentReference2, transform_tohost_VkAttachmentReference2KHR);
 
-void transform_fromhost_VkSubpassDescription2KHR(
-    ResourceTracker* resourceTracker,
-    VkSubpassDescription2KHR* toTransform);
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkAttachmentReference2, transform_fromhost_VkAttachmentReference2KHR);
 
-void transform_tohost_VkSubpassDependency2KHR(
-    ResourceTracker* resourceTracker,
-    VkSubpassDependency2KHR* toTransform);
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkSubpassDescription2, transform_tohost_VkSubpassDescription2KHR);
 
-void transform_fromhost_VkSubpassDependency2KHR(
-    ResourceTracker* resourceTracker,
-    VkSubpassDependency2KHR* toTransform);
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkSubpassDescription2, transform_fromhost_VkSubpassDescription2KHR);
 
-void transform_tohost_VkRenderPassCreateInfo2KHR(
-    ResourceTracker* resourceTracker,
-    VkRenderPassCreateInfo2KHR* toTransform);
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkSubpassDependency2, transform_tohost_VkSubpassDependency2KHR);
 
-void transform_fromhost_VkRenderPassCreateInfo2KHR(
-    ResourceTracker* resourceTracker,
-    VkRenderPassCreateInfo2KHR* toTransform);
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkSubpassDependency2, transform_fromhost_VkSubpassDependency2KHR);
 
-void transform_tohost_VkSubpassBeginInfoKHR(
-    ResourceTracker* resourceTracker,
-    VkSubpassBeginInfoKHR* toTransform);
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkSubpassBeginInfo, transform_tohost_VkSubpassBeginInfoKHR);
 
-void transform_fromhost_VkSubpassBeginInfoKHR(
-    ResourceTracker* resourceTracker,
-    VkSubpassBeginInfoKHR* toTransform);
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkSubpassBeginInfo, transform_fromhost_VkSubpassBeginInfoKHR);
 
-void transform_tohost_VkSubpassEndInfoKHR(
-    ResourceTracker* resourceTracker,
-    VkSubpassEndInfoKHR* toTransform);
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkSubpassEndInfo, transform_tohost_VkSubpassEndInfoKHR);
 
-void transform_fromhost_VkSubpassEndInfoKHR(
-    ResourceTracker* resourceTracker,
-    VkSubpassEndInfoKHR* toTransform);
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkSubpassEndInfo, transform_fromhost_VkSubpassEndInfoKHR);
 
 #endif
 #ifdef VK_KHR_shared_presentable_image
@@ -1930,8 +2488,20 @@
 
 #endif
 #ifdef VK_KHR_external_fence_capabilities
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkPhysicalDeviceExternalFenceInfo, transform_tohost_VkPhysicalDeviceExternalFenceInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkPhysicalDeviceExternalFenceInfo, transform_fromhost_VkPhysicalDeviceExternalFenceInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkExternalFenceProperties, transform_tohost_VkExternalFencePropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkExternalFenceProperties, transform_fromhost_VkExternalFencePropertiesKHR);
+
 #endif
 #ifdef VK_KHR_external_fence
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkExportFenceCreateInfo, transform_tohost_VkExportFenceCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkExportFenceCreateInfo, transform_fromhost_VkExportFenceCreateInfoKHR);
+
 #endif
 #ifdef VK_KHR_external_fence_win32
 void transform_tohost_VkImportFenceWin32HandleInfoKHR(
@@ -1977,7 +2547,93 @@
     VkFenceGetFdInfoKHR* toTransform);
 
 #endif
+#ifdef VK_KHR_performance_query
+void transform_tohost_VkPhysicalDevicePerformanceQueryFeaturesKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDevicePerformanceQueryFeaturesKHR* toTransform);
+
+void transform_fromhost_VkPhysicalDevicePerformanceQueryFeaturesKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDevicePerformanceQueryFeaturesKHR* toTransform);
+
+void transform_tohost_VkPhysicalDevicePerformanceQueryPropertiesKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDevicePerformanceQueryPropertiesKHR* toTransform);
+
+void transform_fromhost_VkPhysicalDevicePerformanceQueryPropertiesKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDevicePerformanceQueryPropertiesKHR* toTransform);
+
+void transform_tohost_VkPerformanceCounterKHR(
+    ResourceTracker* resourceTracker,
+    VkPerformanceCounterKHR* toTransform);
+
+void transform_fromhost_VkPerformanceCounterKHR(
+    ResourceTracker* resourceTracker,
+    VkPerformanceCounterKHR* toTransform);
+
+void transform_tohost_VkPerformanceCounterDescriptionKHR(
+    ResourceTracker* resourceTracker,
+    VkPerformanceCounterDescriptionKHR* toTransform);
+
+void transform_fromhost_VkPerformanceCounterDescriptionKHR(
+    ResourceTracker* resourceTracker,
+    VkPerformanceCounterDescriptionKHR* toTransform);
+
+void transform_tohost_VkQueryPoolPerformanceCreateInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkQueryPoolPerformanceCreateInfoKHR* toTransform);
+
+void transform_fromhost_VkQueryPoolPerformanceCreateInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkQueryPoolPerformanceCreateInfoKHR* toTransform);
+
+void transform_tohost_VkPerformanceCounterResultKHR(
+    ResourceTracker* resourceTracker,
+    VkPerformanceCounterResultKHR* toTransform);
+
+void transform_fromhost_VkPerformanceCounterResultKHR(
+    ResourceTracker* resourceTracker,
+    VkPerformanceCounterResultKHR* toTransform);
+
+void transform_tohost_VkAcquireProfilingLockInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkAcquireProfilingLockInfoKHR* toTransform);
+
+void transform_fromhost_VkAcquireProfilingLockInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkAcquireProfilingLockInfoKHR* toTransform);
+
+void transform_tohost_VkPerformanceQuerySubmitInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkPerformanceQuerySubmitInfoKHR* toTransform);
+
+void transform_fromhost_VkPerformanceQuerySubmitInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkPerformanceQuerySubmitInfoKHR* toTransform);
+
+#endif
 #ifdef VK_KHR_maintenance2
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkPhysicalDevicePointClippingProperties, transform_tohost_VkPhysicalDevicePointClippingPropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkPhysicalDevicePointClippingProperties, transform_fromhost_VkPhysicalDevicePointClippingPropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkRenderPassInputAttachmentAspectCreateInfo, transform_tohost_VkRenderPassInputAttachmentAspectCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkRenderPassInputAttachmentAspectCreateInfo, transform_fromhost_VkRenderPassInputAttachmentAspectCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkInputAttachmentAspectReference, transform_tohost_VkInputAttachmentAspectReferenceKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkInputAttachmentAspectReference, transform_fromhost_VkInputAttachmentAspectReferenceKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkImageViewUsageCreateInfo, transform_tohost_VkImageViewUsageCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkImageViewUsageCreateInfo, transform_fromhost_VkImageViewUsageCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkPipelineTessellationDomainOriginStateCreateInfo, transform_tohost_VkPipelineTessellationDomainOriginStateCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkPipelineTessellationDomainOriginStateCreateInfo, transform_fromhost_VkPipelineTessellationDomainOriginStateCreateInfoKHR);
+
 #endif
 #ifdef VK_KHR_get_surface_capabilities2
 void transform_tohost_VkPhysicalDeviceSurfaceInfo2KHR(
@@ -2006,6 +2662,14 @@
 
 #endif
 #ifdef VK_KHR_variable_pointers
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkPhysicalDeviceVariablePointersFeatures, transform_tohost_VkPhysicalDeviceVariablePointerFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkPhysicalDeviceVariablePointersFeatures, transform_fromhost_VkPhysicalDeviceVariablePointerFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkPhysicalDeviceVariablePointersFeatures, transform_tohost_VkPhysicalDeviceVariablePointersFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkPhysicalDeviceVariablePointersFeatures, transform_fromhost_VkPhysicalDeviceVariablePointersFeaturesKHR);
+
 #endif
 #ifdef VK_KHR_get_display_properties2
 void transform_tohost_VkDisplayProperties2KHR(
@@ -2050,39 +2714,467 @@
 
 #endif
 #ifdef VK_KHR_dedicated_allocation
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkMemoryDedicatedRequirements, transform_tohost_VkMemoryDedicatedRequirementsKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkMemoryDedicatedRequirements, transform_fromhost_VkMemoryDedicatedRequirementsKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkMemoryDedicatedAllocateInfo, transform_tohost_VkMemoryDedicatedAllocateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkMemoryDedicatedAllocateInfo, transform_fromhost_VkMemoryDedicatedAllocateInfoKHR);
+
 #endif
 #ifdef VK_KHR_storage_buffer_storage_class
 #endif
 #ifdef VK_KHR_relaxed_block_layout
 #endif
 #ifdef VK_KHR_get_memory_requirements2
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkBufferMemoryRequirementsInfo2, transform_tohost_VkBufferMemoryRequirementsInfo2KHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkBufferMemoryRequirementsInfo2, transform_fromhost_VkBufferMemoryRequirementsInfo2KHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkImageMemoryRequirementsInfo2, transform_tohost_VkImageMemoryRequirementsInfo2KHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkImageMemoryRequirementsInfo2, transform_fromhost_VkImageMemoryRequirementsInfo2KHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkImageSparseMemoryRequirementsInfo2, transform_tohost_VkImageSparseMemoryRequirementsInfo2KHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkImageSparseMemoryRequirementsInfo2, transform_fromhost_VkImageSparseMemoryRequirementsInfo2KHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkMemoryRequirements2, transform_tohost_VkMemoryRequirements2KHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkMemoryRequirements2, transform_fromhost_VkMemoryRequirements2KHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkSparseImageMemoryRequirements2, transform_tohost_VkSparseImageMemoryRequirements2KHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkSparseImageMemoryRequirements2, transform_fromhost_VkSparseImageMemoryRequirements2KHR);
+
 #endif
 #ifdef VK_KHR_image_format_list
-void transform_tohost_VkImageFormatListCreateInfoKHR(
-    ResourceTracker* resourceTracker,
-    VkImageFormatListCreateInfoKHR* toTransform);
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkImageFormatListCreateInfo, transform_tohost_VkImageFormatListCreateInfoKHR);
 
-void transform_fromhost_VkImageFormatListCreateInfoKHR(
-    ResourceTracker* resourceTracker,
-    VkImageFormatListCreateInfoKHR* toTransform);
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkImageFormatListCreateInfo, transform_fromhost_VkImageFormatListCreateInfoKHR);
 
 #endif
 #ifdef VK_KHR_sampler_ycbcr_conversion
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkSamplerYcbcrConversionCreateInfo, transform_tohost_VkSamplerYcbcrConversionCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkSamplerYcbcrConversionCreateInfo, transform_fromhost_VkSamplerYcbcrConversionCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkSamplerYcbcrConversionInfo, transform_tohost_VkSamplerYcbcrConversionInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkSamplerYcbcrConversionInfo, transform_fromhost_VkSamplerYcbcrConversionInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkBindImagePlaneMemoryInfo, transform_tohost_VkBindImagePlaneMemoryInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkBindImagePlaneMemoryInfo, transform_fromhost_VkBindImagePlaneMemoryInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkImagePlaneMemoryRequirementsInfo, transform_tohost_VkImagePlaneMemoryRequirementsInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkImagePlaneMemoryRequirementsInfo, transform_fromhost_VkImagePlaneMemoryRequirementsInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkPhysicalDeviceSamplerYcbcrConversionFeatures, transform_tohost_VkPhysicalDeviceSamplerYcbcrConversionFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkPhysicalDeviceSamplerYcbcrConversionFeatures, transform_fromhost_VkPhysicalDeviceSamplerYcbcrConversionFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkSamplerYcbcrConversionImageFormatProperties, transform_tohost_VkSamplerYcbcrConversionImageFormatPropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkSamplerYcbcrConversionImageFormatProperties, transform_fromhost_VkSamplerYcbcrConversionImageFormatPropertiesKHR);
+
 #endif
 #ifdef VK_KHR_bind_memory2
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkBindBufferMemoryInfo, transform_tohost_VkBindBufferMemoryInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkBindBufferMemoryInfo, transform_fromhost_VkBindBufferMemoryInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkBindImageMemoryInfo, transform_tohost_VkBindImageMemoryInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkBindImageMemoryInfo, transform_fromhost_VkBindImageMemoryInfoKHR);
+
+#endif
+#ifdef VK_KHR_portability_subset
+void transform_tohost_VkPhysicalDevicePortabilitySubsetFeaturesKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDevicePortabilitySubsetFeaturesKHR* toTransform);
+
+void transform_fromhost_VkPhysicalDevicePortabilitySubsetFeaturesKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDevicePortabilitySubsetFeaturesKHR* toTransform);
+
+void transform_tohost_VkPhysicalDevicePortabilitySubsetPropertiesKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDevicePortabilitySubsetPropertiesKHR* toTransform);
+
+void transform_fromhost_VkPhysicalDevicePortabilitySubsetPropertiesKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDevicePortabilitySubsetPropertiesKHR* toTransform);
+
 #endif
 #ifdef VK_KHR_maintenance3
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkPhysicalDeviceMaintenance3Properties, transform_tohost_VkPhysicalDeviceMaintenance3PropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkPhysicalDeviceMaintenance3Properties, transform_fromhost_VkPhysicalDeviceMaintenance3PropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkDescriptorSetLayoutSupport, transform_tohost_VkDescriptorSetLayoutSupportKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkDescriptorSetLayoutSupport, transform_fromhost_VkDescriptorSetLayoutSupportKHR);
+
 #endif
 #ifdef VK_KHR_draw_indirect_count
 #endif
-#ifdef VK_KHR_8bit_storage
-void transform_tohost_VkPhysicalDevice8BitStorageFeaturesKHR(
-    ResourceTracker* resourceTracker,
-    VkPhysicalDevice8BitStorageFeaturesKHR* toTransform);
+#ifdef VK_KHR_shader_subgroup_extended_types
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures, transform_tohost_VkPhysicalDeviceShaderSubgroupExtendedTypesFeaturesKHR);
 
-void transform_fromhost_VkPhysicalDevice8BitStorageFeaturesKHR(
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures, transform_fromhost_VkPhysicalDeviceShaderSubgroupExtendedTypesFeaturesKHR);
+
+#endif
+#ifdef VK_KHR_8bit_storage
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkPhysicalDevice8BitStorageFeatures, transform_tohost_VkPhysicalDevice8BitStorageFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkPhysicalDevice8BitStorageFeatures, transform_fromhost_VkPhysicalDevice8BitStorageFeaturesKHR);
+
+#endif
+#ifdef VK_KHR_shader_atomic_int64
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkPhysicalDeviceShaderAtomicInt64Features, transform_tohost_VkPhysicalDeviceShaderAtomicInt64FeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkPhysicalDeviceShaderAtomicInt64Features, transform_fromhost_VkPhysicalDeviceShaderAtomicInt64FeaturesKHR);
+
+#endif
+#ifdef VK_KHR_shader_clock
+void transform_tohost_VkPhysicalDeviceShaderClockFeaturesKHR(
     ResourceTracker* resourceTracker,
-    VkPhysicalDevice8BitStorageFeaturesKHR* toTransform);
+    VkPhysicalDeviceShaderClockFeaturesKHR* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceShaderClockFeaturesKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShaderClockFeaturesKHR* toTransform);
+
+#endif
+#ifdef VK_KHR_driver_properties
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkConformanceVersion, transform_tohost_VkConformanceVersionKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkConformanceVersion, transform_fromhost_VkConformanceVersionKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkPhysicalDeviceDriverProperties, transform_tohost_VkPhysicalDeviceDriverPropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkPhysicalDeviceDriverProperties, transform_fromhost_VkPhysicalDeviceDriverPropertiesKHR);
+
+#endif
+#ifdef VK_KHR_shader_float_controls
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkPhysicalDeviceFloatControlsProperties, transform_tohost_VkPhysicalDeviceFloatControlsPropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkPhysicalDeviceFloatControlsProperties, transform_fromhost_VkPhysicalDeviceFloatControlsPropertiesKHR);
+
+#endif
+#ifdef VK_KHR_depth_stencil_resolve
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkSubpassDescriptionDepthStencilResolve, transform_tohost_VkSubpassDescriptionDepthStencilResolveKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkSubpassDescriptionDepthStencilResolve, transform_fromhost_VkSubpassDescriptionDepthStencilResolveKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkPhysicalDeviceDepthStencilResolveProperties, transform_tohost_VkPhysicalDeviceDepthStencilResolvePropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkPhysicalDeviceDepthStencilResolveProperties, transform_fromhost_VkPhysicalDeviceDepthStencilResolvePropertiesKHR);
+
+#endif
+#ifdef VK_KHR_swapchain_mutable_format
+#endif
+#ifdef VK_KHR_timeline_semaphore
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkPhysicalDeviceTimelineSemaphoreFeatures, transform_tohost_VkPhysicalDeviceTimelineSemaphoreFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkPhysicalDeviceTimelineSemaphoreFeatures, transform_fromhost_VkPhysicalDeviceTimelineSemaphoreFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkPhysicalDeviceTimelineSemaphoreProperties, transform_tohost_VkPhysicalDeviceTimelineSemaphorePropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkPhysicalDeviceTimelineSemaphoreProperties, transform_fromhost_VkPhysicalDeviceTimelineSemaphorePropertiesKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkSemaphoreTypeCreateInfo, transform_tohost_VkSemaphoreTypeCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkSemaphoreTypeCreateInfo, transform_fromhost_VkSemaphoreTypeCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkTimelineSemaphoreSubmitInfo, transform_tohost_VkTimelineSemaphoreSubmitInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkTimelineSemaphoreSubmitInfo, transform_fromhost_VkTimelineSemaphoreSubmitInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkSemaphoreWaitInfo, transform_tohost_VkSemaphoreWaitInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkSemaphoreWaitInfo, transform_fromhost_VkSemaphoreWaitInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkSemaphoreSignalInfo, transform_tohost_VkSemaphoreSignalInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkSemaphoreSignalInfo, transform_fromhost_VkSemaphoreSignalInfoKHR);
+
+#endif
+#ifdef VK_KHR_vulkan_memory_model
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkPhysicalDeviceVulkanMemoryModelFeatures, transform_tohost_VkPhysicalDeviceVulkanMemoryModelFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkPhysicalDeviceVulkanMemoryModelFeatures, transform_fromhost_VkPhysicalDeviceVulkanMemoryModelFeaturesKHR);
+
+#endif
+#ifdef VK_KHR_shader_terminate_invocation
+void transform_tohost_VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShaderTerminateInvocationFeaturesKHR* toTransform);
+
+#endif
+#ifdef VK_KHR_fragment_shading_rate
+void transform_tohost_VkFragmentShadingRateAttachmentInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkFragmentShadingRateAttachmentInfoKHR* toTransform);
+
+void transform_fromhost_VkFragmentShadingRateAttachmentInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkFragmentShadingRateAttachmentInfoKHR* toTransform);
+
+void transform_tohost_VkPipelineFragmentShadingRateStateCreateInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkPipelineFragmentShadingRateStateCreateInfoKHR* toTransform);
+
+void transform_fromhost_VkPipelineFragmentShadingRateStateCreateInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkPipelineFragmentShadingRateStateCreateInfoKHR* toTransform);
+
+void transform_tohost_VkPhysicalDeviceFragmentShadingRateFeaturesKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceFragmentShadingRateFeaturesKHR* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceFragmentShadingRateFeaturesKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceFragmentShadingRateFeaturesKHR* toTransform);
+
+void transform_tohost_VkPhysicalDeviceFragmentShadingRatePropertiesKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceFragmentShadingRatePropertiesKHR* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceFragmentShadingRatePropertiesKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceFragmentShadingRatePropertiesKHR* toTransform);
+
+void transform_tohost_VkPhysicalDeviceFragmentShadingRateKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceFragmentShadingRateKHR* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceFragmentShadingRateKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceFragmentShadingRateKHR* toTransform);
+
+#endif
+#ifdef VK_KHR_spirv_1_4
+#endif
+#ifdef VK_KHR_surface_protected_capabilities
+void transform_tohost_VkSurfaceProtectedCapabilitiesKHR(
+    ResourceTracker* resourceTracker,
+    VkSurfaceProtectedCapabilitiesKHR* toTransform);
+
+void transform_fromhost_VkSurfaceProtectedCapabilitiesKHR(
+    ResourceTracker* resourceTracker,
+    VkSurfaceProtectedCapabilitiesKHR* toTransform);
+
+#endif
+#ifdef VK_KHR_separate_depth_stencil_layouts
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures, transform_tohost_VkPhysicalDeviceSeparateDepthStencilLayoutsFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures, transform_fromhost_VkPhysicalDeviceSeparateDepthStencilLayoutsFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkAttachmentReferenceStencilLayout, transform_tohost_VkAttachmentReferenceStencilLayoutKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkAttachmentReferenceStencilLayout, transform_fromhost_VkAttachmentReferenceStencilLayoutKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkAttachmentDescriptionStencilLayout, transform_tohost_VkAttachmentDescriptionStencilLayoutKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkAttachmentDescriptionStencilLayout, transform_fromhost_VkAttachmentDescriptionStencilLayoutKHR);
+
+#endif
+#ifdef VK_KHR_uniform_buffer_standard_layout
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkPhysicalDeviceUniformBufferStandardLayoutFeatures, transform_tohost_VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkPhysicalDeviceUniformBufferStandardLayoutFeatures, transform_fromhost_VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR);
+
+#endif
+#ifdef VK_KHR_buffer_device_address
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkPhysicalDeviceBufferDeviceAddressFeatures, transform_tohost_VkPhysicalDeviceBufferDeviceAddressFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkPhysicalDeviceBufferDeviceAddressFeatures, transform_fromhost_VkPhysicalDeviceBufferDeviceAddressFeaturesKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkBufferDeviceAddressInfo, transform_tohost_VkBufferDeviceAddressInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkBufferDeviceAddressInfo, transform_fromhost_VkBufferDeviceAddressInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkBufferOpaqueCaptureAddressCreateInfo, transform_tohost_VkBufferOpaqueCaptureAddressCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkBufferOpaqueCaptureAddressCreateInfo, transform_fromhost_VkBufferOpaqueCaptureAddressCreateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkMemoryOpaqueCaptureAddressAllocateInfo, transform_tohost_VkMemoryOpaqueCaptureAddressAllocateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkMemoryOpaqueCaptureAddressAllocateInfo, transform_fromhost_VkMemoryOpaqueCaptureAddressAllocateInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkDeviceMemoryOpaqueCaptureAddressInfo, transform_tohost_VkDeviceMemoryOpaqueCaptureAddressInfoKHR);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkDeviceMemoryOpaqueCaptureAddressInfo, transform_fromhost_VkDeviceMemoryOpaqueCaptureAddressInfoKHR);
+
+#endif
+#ifdef VK_KHR_deferred_host_operations
+#endif
+#ifdef VK_KHR_pipeline_executable_properties
+void transform_tohost_VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR* toTransform);
+
+void transform_fromhost_VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR* toTransform);
+
+void transform_tohost_VkPipelineInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkPipelineInfoKHR* toTransform);
+
+void transform_fromhost_VkPipelineInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkPipelineInfoKHR* toTransform);
+
+void transform_tohost_VkPipelineExecutablePropertiesKHR(
+    ResourceTracker* resourceTracker,
+    VkPipelineExecutablePropertiesKHR* toTransform);
+
+void transform_fromhost_VkPipelineExecutablePropertiesKHR(
+    ResourceTracker* resourceTracker,
+    VkPipelineExecutablePropertiesKHR* toTransform);
+
+void transform_tohost_VkPipelineExecutableInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkPipelineExecutableInfoKHR* toTransform);
+
+void transform_fromhost_VkPipelineExecutableInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkPipelineExecutableInfoKHR* toTransform);
+
+void transform_tohost_VkPipelineExecutableStatisticValueKHR(
+    ResourceTracker* resourceTracker,
+    VkPipelineExecutableStatisticValueKHR* toTransform);
+
+void transform_fromhost_VkPipelineExecutableStatisticValueKHR(
+    ResourceTracker* resourceTracker,
+    VkPipelineExecutableStatisticValueKHR* toTransform);
+
+void transform_tohost_VkPipelineExecutableStatisticKHR(
+    ResourceTracker* resourceTracker,
+    VkPipelineExecutableStatisticKHR* toTransform);
+
+void transform_fromhost_VkPipelineExecutableStatisticKHR(
+    ResourceTracker* resourceTracker,
+    VkPipelineExecutableStatisticKHR* toTransform);
+
+void transform_tohost_VkPipelineExecutableInternalRepresentationKHR(
+    ResourceTracker* resourceTracker,
+    VkPipelineExecutableInternalRepresentationKHR* toTransform);
+
+void transform_fromhost_VkPipelineExecutableInternalRepresentationKHR(
+    ResourceTracker* resourceTracker,
+    VkPipelineExecutableInternalRepresentationKHR* toTransform);
+
+#endif
+#ifdef VK_KHR_pipeline_library
+void transform_tohost_VkPipelineLibraryCreateInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkPipelineLibraryCreateInfoKHR* toTransform);
+
+void transform_fromhost_VkPipelineLibraryCreateInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkPipelineLibraryCreateInfoKHR* toTransform);
+
+#endif
+#ifdef VK_KHR_shader_non_semantic_info
+#endif
+#ifdef VK_KHR_copy_commands2
+void transform_tohost_VkBufferCopy2KHR(
+    ResourceTracker* resourceTracker,
+    VkBufferCopy2KHR* toTransform);
+
+void transform_fromhost_VkBufferCopy2KHR(
+    ResourceTracker* resourceTracker,
+    VkBufferCopy2KHR* toTransform);
+
+void transform_tohost_VkCopyBufferInfo2KHR(
+    ResourceTracker* resourceTracker,
+    VkCopyBufferInfo2KHR* toTransform);
+
+void transform_fromhost_VkCopyBufferInfo2KHR(
+    ResourceTracker* resourceTracker,
+    VkCopyBufferInfo2KHR* toTransform);
+
+void transform_tohost_VkImageCopy2KHR(
+    ResourceTracker* resourceTracker,
+    VkImageCopy2KHR* toTransform);
+
+void transform_fromhost_VkImageCopy2KHR(
+    ResourceTracker* resourceTracker,
+    VkImageCopy2KHR* toTransform);
+
+void transform_tohost_VkCopyImageInfo2KHR(
+    ResourceTracker* resourceTracker,
+    VkCopyImageInfo2KHR* toTransform);
+
+void transform_fromhost_VkCopyImageInfo2KHR(
+    ResourceTracker* resourceTracker,
+    VkCopyImageInfo2KHR* toTransform);
+
+void transform_tohost_VkBufferImageCopy2KHR(
+    ResourceTracker* resourceTracker,
+    VkBufferImageCopy2KHR* toTransform);
+
+void transform_fromhost_VkBufferImageCopy2KHR(
+    ResourceTracker* resourceTracker,
+    VkBufferImageCopy2KHR* toTransform);
+
+void transform_tohost_VkCopyBufferToImageInfo2KHR(
+    ResourceTracker* resourceTracker,
+    VkCopyBufferToImageInfo2KHR* toTransform);
+
+void transform_fromhost_VkCopyBufferToImageInfo2KHR(
+    ResourceTracker* resourceTracker,
+    VkCopyBufferToImageInfo2KHR* toTransform);
+
+void transform_tohost_VkCopyImageToBufferInfo2KHR(
+    ResourceTracker* resourceTracker,
+    VkCopyImageToBufferInfo2KHR* toTransform);
+
+void transform_fromhost_VkCopyImageToBufferInfo2KHR(
+    ResourceTracker* resourceTracker,
+    VkCopyImageToBufferInfo2KHR* toTransform);
+
+void transform_tohost_VkImageBlit2KHR(
+    ResourceTracker* resourceTracker,
+    VkImageBlit2KHR* toTransform);
+
+void transform_fromhost_VkImageBlit2KHR(
+    ResourceTracker* resourceTracker,
+    VkImageBlit2KHR* toTransform);
+
+void transform_tohost_VkBlitImageInfo2KHR(
+    ResourceTracker* resourceTracker,
+    VkBlitImageInfo2KHR* toTransform);
+
+void transform_fromhost_VkBlitImageInfo2KHR(
+    ResourceTracker* resourceTracker,
+    VkBlitImageInfo2KHR* toTransform);
+
+void transform_tohost_VkImageResolve2KHR(
+    ResourceTracker* resourceTracker,
+    VkImageResolve2KHR* toTransform);
+
+void transform_fromhost_VkImageResolve2KHR(
+    ResourceTracker* resourceTracker,
+    VkImageResolve2KHR* toTransform);
+
+void transform_tohost_VkResolveImageInfo2KHR(
+    ResourceTracker* resourceTracker,
+    VkResolveImageInfo2KHR* toTransform);
+
+void transform_fromhost_VkResolveImageInfo2KHR(
+    ResourceTracker* resourceTracker,
+    VkResolveImageInfo2KHR* toTransform);
 
 #endif
 #ifdef VK_ANDROID_native_buffer
@@ -2179,6 +3271,50 @@
     VkDedicatedAllocationMemoryAllocateInfoNV* toTransform);
 
 #endif
+#ifdef VK_EXT_transform_feedback
+void transform_tohost_VkPhysicalDeviceTransformFeedbackFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceTransformFeedbackFeaturesEXT* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceTransformFeedbackFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceTransformFeedbackFeaturesEXT* toTransform);
+
+void transform_tohost_VkPhysicalDeviceTransformFeedbackPropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceTransformFeedbackPropertiesEXT* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceTransformFeedbackPropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceTransformFeedbackPropertiesEXT* toTransform);
+
+void transform_tohost_VkPipelineRasterizationStateStreamCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkPipelineRasterizationStateStreamCreateInfoEXT* toTransform);
+
+void transform_fromhost_VkPipelineRasterizationStateStreamCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkPipelineRasterizationStateStreamCreateInfoEXT* toTransform);
+
+#endif
+#ifdef VK_NVX_image_view_handle
+void transform_tohost_VkImageViewHandleInfoNVX(
+    ResourceTracker* resourceTracker,
+    VkImageViewHandleInfoNVX* toTransform);
+
+void transform_fromhost_VkImageViewHandleInfoNVX(
+    ResourceTracker* resourceTracker,
+    VkImageViewHandleInfoNVX* toTransform);
+
+void transform_tohost_VkImageViewAddressPropertiesNVX(
+    ResourceTracker* resourceTracker,
+    VkImageViewAddressPropertiesNVX* toTransform);
+
+void transform_fromhost_VkImageViewAddressPropertiesNVX(
+    ResourceTracker* resourceTracker,
+    VkImageViewAddressPropertiesNVX* toTransform);
+
+#endif
 #ifdef VK_AMD_draw_indirect_count
 #endif
 #ifdef VK_AMD_negative_viewport_height
@@ -2217,6 +3353,26 @@
 #endif
 #ifdef VK_AMD_shader_image_load_store_lod
 #endif
+#ifdef VK_GGP_stream_descriptor_surface
+void transform_tohost_VkStreamDescriptorSurfaceCreateInfoGGP(
+    ResourceTracker* resourceTracker,
+    VkStreamDescriptorSurfaceCreateInfoGGP* toTransform);
+
+void transform_fromhost_VkStreamDescriptorSurfaceCreateInfoGGP(
+    ResourceTracker* resourceTracker,
+    VkStreamDescriptorSurfaceCreateInfoGGP* toTransform);
+
+#endif
+#ifdef VK_NV_corner_sampled_image
+void transform_tohost_VkPhysicalDeviceCornerSampledImageFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceCornerSampledImageFeaturesNV* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceCornerSampledImageFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceCornerSampledImageFeaturesNV* toTransform);
+
+#endif
 #ifdef VK_IMG_format_pvrtc
 #endif
 #ifdef VK_NV_external_memory_capabilities
@@ -2299,6 +3455,34 @@
 #endif
 #ifdef VK_EXT_shader_subgroup_vote
 #endif
+#ifdef VK_EXT_texture_compression_astc_hdr
+void transform_tohost_VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT* toTransform);
+
+#endif
+#ifdef VK_EXT_astc_decode_mode
+void transform_tohost_VkImageViewASTCDecodeModeEXT(
+    ResourceTracker* resourceTracker,
+    VkImageViewASTCDecodeModeEXT* toTransform);
+
+void transform_fromhost_VkImageViewASTCDecodeModeEXT(
+    ResourceTracker* resourceTracker,
+    VkImageViewASTCDecodeModeEXT* toTransform);
+
+void transform_tohost_VkPhysicalDeviceASTCDecodeFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceASTCDecodeFeaturesEXT* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceASTCDecodeFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceASTCDecodeFeaturesEXT* toTransform);
+
+#endif
 #ifdef VK_EXT_conditional_rendering
 void transform_tohost_VkConditionalRenderingBeginInfoEXT(
     ResourceTracker* resourceTracker,
@@ -2325,120 +3509,6 @@
     VkCommandBufferInheritanceConditionalRenderingInfoEXT* toTransform);
 
 #endif
-#ifdef VK_NVX_device_generated_commands
-void transform_tohost_VkDeviceGeneratedCommandsFeaturesNVX(
-    ResourceTracker* resourceTracker,
-    VkDeviceGeneratedCommandsFeaturesNVX* toTransform);
-
-void transform_fromhost_VkDeviceGeneratedCommandsFeaturesNVX(
-    ResourceTracker* resourceTracker,
-    VkDeviceGeneratedCommandsFeaturesNVX* toTransform);
-
-void transform_tohost_VkDeviceGeneratedCommandsLimitsNVX(
-    ResourceTracker* resourceTracker,
-    VkDeviceGeneratedCommandsLimitsNVX* toTransform);
-
-void transform_fromhost_VkDeviceGeneratedCommandsLimitsNVX(
-    ResourceTracker* resourceTracker,
-    VkDeviceGeneratedCommandsLimitsNVX* toTransform);
-
-void transform_tohost_VkIndirectCommandsTokenNVX(
-    ResourceTracker* resourceTracker,
-    VkIndirectCommandsTokenNVX* toTransform);
-
-void transform_fromhost_VkIndirectCommandsTokenNVX(
-    ResourceTracker* resourceTracker,
-    VkIndirectCommandsTokenNVX* toTransform);
-
-void transform_tohost_VkIndirectCommandsLayoutTokenNVX(
-    ResourceTracker* resourceTracker,
-    VkIndirectCommandsLayoutTokenNVX* toTransform);
-
-void transform_fromhost_VkIndirectCommandsLayoutTokenNVX(
-    ResourceTracker* resourceTracker,
-    VkIndirectCommandsLayoutTokenNVX* toTransform);
-
-void transform_tohost_VkIndirectCommandsLayoutCreateInfoNVX(
-    ResourceTracker* resourceTracker,
-    VkIndirectCommandsLayoutCreateInfoNVX* toTransform);
-
-void transform_fromhost_VkIndirectCommandsLayoutCreateInfoNVX(
-    ResourceTracker* resourceTracker,
-    VkIndirectCommandsLayoutCreateInfoNVX* toTransform);
-
-void transform_tohost_VkCmdProcessCommandsInfoNVX(
-    ResourceTracker* resourceTracker,
-    VkCmdProcessCommandsInfoNVX* toTransform);
-
-void transform_fromhost_VkCmdProcessCommandsInfoNVX(
-    ResourceTracker* resourceTracker,
-    VkCmdProcessCommandsInfoNVX* toTransform);
-
-void transform_tohost_VkCmdReserveSpaceForCommandsInfoNVX(
-    ResourceTracker* resourceTracker,
-    VkCmdReserveSpaceForCommandsInfoNVX* toTransform);
-
-void transform_fromhost_VkCmdReserveSpaceForCommandsInfoNVX(
-    ResourceTracker* resourceTracker,
-    VkCmdReserveSpaceForCommandsInfoNVX* toTransform);
-
-void transform_tohost_VkObjectTableCreateInfoNVX(
-    ResourceTracker* resourceTracker,
-    VkObjectTableCreateInfoNVX* toTransform);
-
-void transform_fromhost_VkObjectTableCreateInfoNVX(
-    ResourceTracker* resourceTracker,
-    VkObjectTableCreateInfoNVX* toTransform);
-
-void transform_tohost_VkObjectTableEntryNVX(
-    ResourceTracker* resourceTracker,
-    VkObjectTableEntryNVX* toTransform);
-
-void transform_fromhost_VkObjectTableEntryNVX(
-    ResourceTracker* resourceTracker,
-    VkObjectTableEntryNVX* toTransform);
-
-void transform_tohost_VkObjectTablePipelineEntryNVX(
-    ResourceTracker* resourceTracker,
-    VkObjectTablePipelineEntryNVX* toTransform);
-
-void transform_fromhost_VkObjectTablePipelineEntryNVX(
-    ResourceTracker* resourceTracker,
-    VkObjectTablePipelineEntryNVX* toTransform);
-
-void transform_tohost_VkObjectTableDescriptorSetEntryNVX(
-    ResourceTracker* resourceTracker,
-    VkObjectTableDescriptorSetEntryNVX* toTransform);
-
-void transform_fromhost_VkObjectTableDescriptorSetEntryNVX(
-    ResourceTracker* resourceTracker,
-    VkObjectTableDescriptorSetEntryNVX* toTransform);
-
-void transform_tohost_VkObjectTableVertexBufferEntryNVX(
-    ResourceTracker* resourceTracker,
-    VkObjectTableVertexBufferEntryNVX* toTransform);
-
-void transform_fromhost_VkObjectTableVertexBufferEntryNVX(
-    ResourceTracker* resourceTracker,
-    VkObjectTableVertexBufferEntryNVX* toTransform);
-
-void transform_tohost_VkObjectTableIndexBufferEntryNVX(
-    ResourceTracker* resourceTracker,
-    VkObjectTableIndexBufferEntryNVX* toTransform);
-
-void transform_fromhost_VkObjectTableIndexBufferEntryNVX(
-    ResourceTracker* resourceTracker,
-    VkObjectTableIndexBufferEntryNVX* toTransform);
-
-void transform_tohost_VkObjectTablePushConstantEntryNVX(
-    ResourceTracker* resourceTracker,
-    VkObjectTablePushConstantEntryNVX* toTransform);
-
-void transform_fromhost_VkObjectTablePushConstantEntryNVX(
-    ResourceTracker* resourceTracker,
-    VkObjectTablePushConstantEntryNVX* toTransform);
-
-#endif
 #ifdef VK_NV_clip_space_w_scaling
 void transform_tohost_VkViewportWScalingNV(
     ResourceTracker* resourceTracker,
@@ -2609,6 +3679,24 @@
     VkPipelineRasterizationConservativeStateCreateInfoEXT* toTransform);
 
 #endif
+#ifdef VK_EXT_depth_clip_enable
+void transform_tohost_VkPhysicalDeviceDepthClipEnableFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceDepthClipEnableFeaturesEXT* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceDepthClipEnableFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceDepthClipEnableFeaturesEXT* toTransform);
+
+void transform_tohost_VkPipelineRasterizationDepthClipStateCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkPipelineRasterizationDepthClipStateCreateInfoEXT* toTransform);
+
+void transform_fromhost_VkPipelineRasterizationDepthClipStateCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkPipelineRasterizationDepthClipStateCreateInfoEXT* toTransform);
+
+#endif
 #ifdef VK_EXT_swapchain_colorspace
 #endif
 #ifdef VK_EXT_hdr_metadata
@@ -2649,27 +3737,13 @@
     VkMacOSSurfaceCreateInfoMVK* toTransform);
 
 #endif
+#ifdef VK_MVK_moltenvk
+#endif
 #ifdef VK_EXT_external_memory_dma_buf
 #endif
 #ifdef VK_EXT_queue_family_foreign
 #endif
 #ifdef VK_EXT_debug_utils
-void transform_tohost_VkDebugUtilsObjectNameInfoEXT(
-    ResourceTracker* resourceTracker,
-    VkDebugUtilsObjectNameInfoEXT* toTransform);
-
-void transform_fromhost_VkDebugUtilsObjectNameInfoEXT(
-    ResourceTracker* resourceTracker,
-    VkDebugUtilsObjectNameInfoEXT* toTransform);
-
-void transform_tohost_VkDebugUtilsObjectTagInfoEXT(
-    ResourceTracker* resourceTracker,
-    VkDebugUtilsObjectTagInfoEXT* toTransform);
-
-void transform_fromhost_VkDebugUtilsObjectTagInfoEXT(
-    ResourceTracker* resourceTracker,
-    VkDebugUtilsObjectTagInfoEXT* toTransform);
-
 void transform_tohost_VkDebugUtilsLabelEXT(
     ResourceTracker* resourceTracker,
     VkDebugUtilsLabelEXT* toTransform);
@@ -2678,6 +3752,14 @@
     ResourceTracker* resourceTracker,
     VkDebugUtilsLabelEXT* toTransform);
 
+void transform_tohost_VkDebugUtilsObjectNameInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkDebugUtilsObjectNameInfoEXT* toTransform);
+
+void transform_fromhost_VkDebugUtilsObjectNameInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkDebugUtilsObjectNameInfoEXT* toTransform);
+
 void transform_tohost_VkDebugUtilsMessengerCallbackDataEXT(
     ResourceTracker* resourceTracker,
     VkDebugUtilsMessengerCallbackDataEXT* toTransform);
@@ -2694,6 +3776,14 @@
     ResourceTracker* resourceTracker,
     VkDebugUtilsMessengerCreateInfoEXT* toTransform);
 
+void transform_tohost_VkDebugUtilsObjectTagInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkDebugUtilsObjectTagInfoEXT* toTransform);
+
+void transform_fromhost_VkDebugUtilsObjectTagInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkDebugUtilsObjectTagInfoEXT* toTransform);
+
 #endif
 #ifdef VK_ANDROID_external_memory_android_hardware_buffer
 void transform_tohost_VkAndroidHardwareBufferUsageANDROID(
@@ -2746,21 +3836,13 @@
 
 #endif
 #ifdef VK_EXT_sampler_filter_minmax
-void transform_tohost_VkSamplerReductionModeCreateInfoEXT(
-    ResourceTracker* resourceTracker,
-    VkSamplerReductionModeCreateInfoEXT* toTransform);
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkSamplerReductionModeCreateInfo, transform_tohost_VkSamplerReductionModeCreateInfoEXT);
 
-void transform_fromhost_VkSamplerReductionModeCreateInfoEXT(
-    ResourceTracker* resourceTracker,
-    VkSamplerReductionModeCreateInfoEXT* toTransform);
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkSamplerReductionModeCreateInfo, transform_fromhost_VkSamplerReductionModeCreateInfoEXT);
 
-void transform_tohost_VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT(
-    ResourceTracker* resourceTracker,
-    VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT* toTransform);
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkPhysicalDeviceSamplerFilterMinmaxProperties, transform_tohost_VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT);
 
-void transform_fromhost_VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT(
-    ResourceTracker* resourceTracker,
-    VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT* toTransform);
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkPhysicalDeviceSamplerFilterMinmaxProperties, transform_fromhost_VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT);
 
 #endif
 #ifdef VK_AMD_gpu_shader_int16
@@ -2769,6 +3851,40 @@
 #endif
 #ifdef VK_AMD_shader_fragment_mask
 #endif
+#ifdef VK_EXT_inline_uniform_block
+void transform_tohost_VkPhysicalDeviceInlineUniformBlockFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceInlineUniformBlockFeaturesEXT* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceInlineUniformBlockFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceInlineUniformBlockFeaturesEXT* toTransform);
+
+void transform_tohost_VkPhysicalDeviceInlineUniformBlockPropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceInlineUniformBlockPropertiesEXT* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceInlineUniformBlockPropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceInlineUniformBlockPropertiesEXT* toTransform);
+
+void transform_tohost_VkWriteDescriptorSetInlineUniformBlockEXT(
+    ResourceTracker* resourceTracker,
+    VkWriteDescriptorSetInlineUniformBlockEXT* toTransform);
+
+void transform_fromhost_VkWriteDescriptorSetInlineUniformBlockEXT(
+    ResourceTracker* resourceTracker,
+    VkWriteDescriptorSetInlineUniformBlockEXT* toTransform);
+
+void transform_tohost_VkDescriptorPoolInlineUniformBlockCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkDescriptorPoolInlineUniformBlockCreateInfoEXT* toTransform);
+
+void transform_fromhost_VkDescriptorPoolInlineUniformBlockCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkDescriptorPoolInlineUniformBlockCreateInfoEXT* toTransform);
+
+#endif
 #ifdef VK_EXT_shader_stencil_export
 #endif
 #ifdef VK_EXT_sample_locations
@@ -2885,8 +4001,76 @@
 #endif
 #ifdef VK_NV_fill_rectangle
 #endif
+#ifdef VK_NV_shader_sm_builtins
+void transform_tohost_VkPhysicalDeviceShaderSMBuiltinsPropertiesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShaderSMBuiltinsPropertiesNV* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceShaderSMBuiltinsPropertiesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShaderSMBuiltinsPropertiesNV* toTransform);
+
+void transform_tohost_VkPhysicalDeviceShaderSMBuiltinsFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShaderSMBuiltinsFeaturesNV* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceShaderSMBuiltinsFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShaderSMBuiltinsFeaturesNV* toTransform);
+
+#endif
 #ifdef VK_EXT_post_depth_coverage
 #endif
+#ifdef VK_EXT_image_drm_format_modifier
+void transform_tohost_VkDrmFormatModifierPropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkDrmFormatModifierPropertiesEXT* toTransform);
+
+void transform_fromhost_VkDrmFormatModifierPropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkDrmFormatModifierPropertiesEXT* toTransform);
+
+void transform_tohost_VkDrmFormatModifierPropertiesListEXT(
+    ResourceTracker* resourceTracker,
+    VkDrmFormatModifierPropertiesListEXT* toTransform);
+
+void transform_fromhost_VkDrmFormatModifierPropertiesListEXT(
+    ResourceTracker* resourceTracker,
+    VkDrmFormatModifierPropertiesListEXT* toTransform);
+
+void transform_tohost_VkPhysicalDeviceImageDrmFormatModifierInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceImageDrmFormatModifierInfoEXT* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceImageDrmFormatModifierInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceImageDrmFormatModifierInfoEXT* toTransform);
+
+void transform_tohost_VkImageDrmFormatModifierListCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkImageDrmFormatModifierListCreateInfoEXT* toTransform);
+
+void transform_fromhost_VkImageDrmFormatModifierListCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkImageDrmFormatModifierListCreateInfoEXT* toTransform);
+
+void transform_tohost_VkImageDrmFormatModifierExplicitCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkImageDrmFormatModifierExplicitCreateInfoEXT* toTransform);
+
+void transform_fromhost_VkImageDrmFormatModifierExplicitCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkImageDrmFormatModifierExplicitCreateInfoEXT* toTransform);
+
+void transform_tohost_VkImageDrmFormatModifierPropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkImageDrmFormatModifierPropertiesEXT* toTransform);
+
+void transform_fromhost_VkImageDrmFormatModifierPropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkImageDrmFormatModifierPropertiesEXT* toTransform);
+
+#endif
 #ifdef VK_EXT_validation_cache
 void transform_tohost_VkValidationCacheCreateInfoEXT(
     ResourceTracker* resourceTracker,
@@ -2906,49 +4090,259 @@
 
 #endif
 #ifdef VK_EXT_descriptor_indexing
-void transform_tohost_VkDescriptorSetLayoutBindingFlagsCreateInfoEXT(
-    ResourceTracker* resourceTracker,
-    VkDescriptorSetLayoutBindingFlagsCreateInfoEXT* toTransform);
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkDescriptorSetLayoutBindingFlagsCreateInfo, transform_tohost_VkDescriptorSetLayoutBindingFlagsCreateInfoEXT);
 
-void transform_fromhost_VkDescriptorSetLayoutBindingFlagsCreateInfoEXT(
-    ResourceTracker* resourceTracker,
-    VkDescriptorSetLayoutBindingFlagsCreateInfoEXT* toTransform);
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkDescriptorSetLayoutBindingFlagsCreateInfo, transform_fromhost_VkDescriptorSetLayoutBindingFlagsCreateInfoEXT);
 
-void transform_tohost_VkPhysicalDeviceDescriptorIndexingFeaturesEXT(
-    ResourceTracker* resourceTracker,
-    VkPhysicalDeviceDescriptorIndexingFeaturesEXT* toTransform);
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkPhysicalDeviceDescriptorIndexingFeatures, transform_tohost_VkPhysicalDeviceDescriptorIndexingFeaturesEXT);
 
-void transform_fromhost_VkPhysicalDeviceDescriptorIndexingFeaturesEXT(
-    ResourceTracker* resourceTracker,
-    VkPhysicalDeviceDescriptorIndexingFeaturesEXT* toTransform);
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkPhysicalDeviceDescriptorIndexingFeatures, transform_fromhost_VkPhysicalDeviceDescriptorIndexingFeaturesEXT);
 
-void transform_tohost_VkPhysicalDeviceDescriptorIndexingPropertiesEXT(
-    ResourceTracker* resourceTracker,
-    VkPhysicalDeviceDescriptorIndexingPropertiesEXT* toTransform);
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkPhysicalDeviceDescriptorIndexingProperties, transform_tohost_VkPhysicalDeviceDescriptorIndexingPropertiesEXT);
 
-void transform_fromhost_VkPhysicalDeviceDescriptorIndexingPropertiesEXT(
-    ResourceTracker* resourceTracker,
-    VkPhysicalDeviceDescriptorIndexingPropertiesEXT* toTransform);
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkPhysicalDeviceDescriptorIndexingProperties, transform_fromhost_VkPhysicalDeviceDescriptorIndexingPropertiesEXT);
 
-void transform_tohost_VkDescriptorSetVariableDescriptorCountAllocateInfoEXT(
-    ResourceTracker* resourceTracker,
-    VkDescriptorSetVariableDescriptorCountAllocateInfoEXT* toTransform);
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkDescriptorSetVariableDescriptorCountAllocateInfo, transform_tohost_VkDescriptorSetVariableDescriptorCountAllocateInfoEXT);
 
-void transform_fromhost_VkDescriptorSetVariableDescriptorCountAllocateInfoEXT(
-    ResourceTracker* resourceTracker,
-    VkDescriptorSetVariableDescriptorCountAllocateInfoEXT* toTransform);
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkDescriptorSetVariableDescriptorCountAllocateInfo, transform_fromhost_VkDescriptorSetVariableDescriptorCountAllocateInfoEXT);
 
-void transform_tohost_VkDescriptorSetVariableDescriptorCountLayoutSupportEXT(
-    ResourceTracker* resourceTracker,
-    VkDescriptorSetVariableDescriptorCountLayoutSupportEXT* toTransform);
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkDescriptorSetVariableDescriptorCountLayoutSupport, transform_tohost_VkDescriptorSetVariableDescriptorCountLayoutSupportEXT);
 
-void transform_fromhost_VkDescriptorSetVariableDescriptorCountLayoutSupportEXT(
-    ResourceTracker* resourceTracker,
-    VkDescriptorSetVariableDescriptorCountLayoutSupportEXT* toTransform);
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkDescriptorSetVariableDescriptorCountLayoutSupport, transform_fromhost_VkDescriptorSetVariableDescriptorCountLayoutSupportEXT);
 
 #endif
 #ifdef VK_EXT_shader_viewport_index_layer
 #endif
+#ifdef VK_NV_shading_rate_image
+void transform_tohost_VkShadingRatePaletteNV(
+    ResourceTracker* resourceTracker,
+    VkShadingRatePaletteNV* toTransform);
+
+void transform_fromhost_VkShadingRatePaletteNV(
+    ResourceTracker* resourceTracker,
+    VkShadingRatePaletteNV* toTransform);
+
+void transform_tohost_VkPipelineViewportShadingRateImageStateCreateInfoNV(
+    ResourceTracker* resourceTracker,
+    VkPipelineViewportShadingRateImageStateCreateInfoNV* toTransform);
+
+void transform_fromhost_VkPipelineViewportShadingRateImageStateCreateInfoNV(
+    ResourceTracker* resourceTracker,
+    VkPipelineViewportShadingRateImageStateCreateInfoNV* toTransform);
+
+void transform_tohost_VkPhysicalDeviceShadingRateImageFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShadingRateImageFeaturesNV* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceShadingRateImageFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShadingRateImageFeaturesNV* toTransform);
+
+void transform_tohost_VkPhysicalDeviceShadingRateImagePropertiesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShadingRateImagePropertiesNV* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceShadingRateImagePropertiesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShadingRateImagePropertiesNV* toTransform);
+
+void transform_tohost_VkCoarseSampleLocationNV(
+    ResourceTracker* resourceTracker,
+    VkCoarseSampleLocationNV* toTransform);
+
+void transform_fromhost_VkCoarseSampleLocationNV(
+    ResourceTracker* resourceTracker,
+    VkCoarseSampleLocationNV* toTransform);
+
+void transform_tohost_VkCoarseSampleOrderCustomNV(
+    ResourceTracker* resourceTracker,
+    VkCoarseSampleOrderCustomNV* toTransform);
+
+void transform_fromhost_VkCoarseSampleOrderCustomNV(
+    ResourceTracker* resourceTracker,
+    VkCoarseSampleOrderCustomNV* toTransform);
+
+void transform_tohost_VkPipelineViewportCoarseSampleOrderStateCreateInfoNV(
+    ResourceTracker* resourceTracker,
+    VkPipelineViewportCoarseSampleOrderStateCreateInfoNV* toTransform);
+
+void transform_fromhost_VkPipelineViewportCoarseSampleOrderStateCreateInfoNV(
+    ResourceTracker* resourceTracker,
+    VkPipelineViewportCoarseSampleOrderStateCreateInfoNV* toTransform);
+
+#endif
+#ifdef VK_NV_ray_tracing
+void transform_tohost_VkRayTracingShaderGroupCreateInfoNV(
+    ResourceTracker* resourceTracker,
+    VkRayTracingShaderGroupCreateInfoNV* toTransform);
+
+void transform_fromhost_VkRayTracingShaderGroupCreateInfoNV(
+    ResourceTracker* resourceTracker,
+    VkRayTracingShaderGroupCreateInfoNV* toTransform);
+
+void transform_tohost_VkRayTracingPipelineCreateInfoNV(
+    ResourceTracker* resourceTracker,
+    VkRayTracingPipelineCreateInfoNV* toTransform);
+
+void transform_fromhost_VkRayTracingPipelineCreateInfoNV(
+    ResourceTracker* resourceTracker,
+    VkRayTracingPipelineCreateInfoNV* toTransform);
+
+void transform_tohost_VkGeometryTrianglesNV(
+    ResourceTracker* resourceTracker,
+    VkGeometryTrianglesNV* toTransform);
+
+void transform_fromhost_VkGeometryTrianglesNV(
+    ResourceTracker* resourceTracker,
+    VkGeometryTrianglesNV* toTransform);
+
+void transform_tohost_VkGeometryAABBNV(
+    ResourceTracker* resourceTracker,
+    VkGeometryAABBNV* toTransform);
+
+void transform_fromhost_VkGeometryAABBNV(
+    ResourceTracker* resourceTracker,
+    VkGeometryAABBNV* toTransform);
+
+void transform_tohost_VkGeometryDataNV(
+    ResourceTracker* resourceTracker,
+    VkGeometryDataNV* toTransform);
+
+void transform_fromhost_VkGeometryDataNV(
+    ResourceTracker* resourceTracker,
+    VkGeometryDataNV* toTransform);
+
+void transform_tohost_VkGeometryNV(
+    ResourceTracker* resourceTracker,
+    VkGeometryNV* toTransform);
+
+void transform_fromhost_VkGeometryNV(
+    ResourceTracker* resourceTracker,
+    VkGeometryNV* toTransform);
+
+void transform_tohost_VkAccelerationStructureInfoNV(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureInfoNV* toTransform);
+
+void transform_fromhost_VkAccelerationStructureInfoNV(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureInfoNV* toTransform);
+
+void transform_tohost_VkAccelerationStructureCreateInfoNV(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureCreateInfoNV* toTransform);
+
+void transform_fromhost_VkAccelerationStructureCreateInfoNV(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureCreateInfoNV* toTransform);
+
+void transform_tohost_VkBindAccelerationStructureMemoryInfoNV(
+    ResourceTracker* resourceTracker,
+    VkBindAccelerationStructureMemoryInfoNV* toTransform);
+
+void transform_fromhost_VkBindAccelerationStructureMemoryInfoNV(
+    ResourceTracker* resourceTracker,
+    VkBindAccelerationStructureMemoryInfoNV* toTransform);
+
+void transform_tohost_VkWriteDescriptorSetAccelerationStructureNV(
+    ResourceTracker* resourceTracker,
+    VkWriteDescriptorSetAccelerationStructureNV* toTransform);
+
+void transform_fromhost_VkWriteDescriptorSetAccelerationStructureNV(
+    ResourceTracker* resourceTracker,
+    VkWriteDescriptorSetAccelerationStructureNV* toTransform);
+
+void transform_tohost_VkAccelerationStructureMemoryRequirementsInfoNV(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureMemoryRequirementsInfoNV* toTransform);
+
+void transform_fromhost_VkAccelerationStructureMemoryRequirementsInfoNV(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureMemoryRequirementsInfoNV* toTransform);
+
+void transform_tohost_VkPhysicalDeviceRayTracingPropertiesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceRayTracingPropertiesNV* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceRayTracingPropertiesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceRayTracingPropertiesNV* toTransform);
+
+void transform_tohost_VkTransformMatrixKHR(
+    ResourceTracker* resourceTracker,
+    VkTransformMatrixKHR* toTransform);
+
+void transform_fromhost_VkTransformMatrixKHR(
+    ResourceTracker* resourceTracker,
+    VkTransformMatrixKHR* toTransform);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkTransformMatrixKHR, transform_tohost_VkTransformMatrixNV);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkTransformMatrixKHR, transform_fromhost_VkTransformMatrixNV);
+
+void transform_tohost_VkAabbPositionsKHR(
+    ResourceTracker* resourceTracker,
+    VkAabbPositionsKHR* toTransform);
+
+void transform_fromhost_VkAabbPositionsKHR(
+    ResourceTracker* resourceTracker,
+    VkAabbPositionsKHR* toTransform);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkAabbPositionsKHR, transform_tohost_VkAabbPositionsNV);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkAabbPositionsKHR, transform_fromhost_VkAabbPositionsNV);
+
+void transform_tohost_VkAccelerationStructureInstanceKHR(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureInstanceKHR* toTransform);
+
+void transform_fromhost_VkAccelerationStructureInstanceKHR(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureInstanceKHR* toTransform);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkAccelerationStructureInstanceKHR, transform_tohost_VkAccelerationStructureInstanceNV);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkAccelerationStructureInstanceKHR, transform_fromhost_VkAccelerationStructureInstanceNV);
+
+#endif
+#ifdef VK_NV_representative_fragment_test
+void transform_tohost_VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV* toTransform);
+
+void transform_tohost_VkPipelineRepresentativeFragmentTestStateCreateInfoNV(
+    ResourceTracker* resourceTracker,
+    VkPipelineRepresentativeFragmentTestStateCreateInfoNV* toTransform);
+
+void transform_fromhost_VkPipelineRepresentativeFragmentTestStateCreateInfoNV(
+    ResourceTracker* resourceTracker,
+    VkPipelineRepresentativeFragmentTestStateCreateInfoNV* toTransform);
+
+#endif
+#ifdef VK_EXT_filter_cubic
+void transform_tohost_VkPhysicalDeviceImageViewImageFormatInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceImageViewImageFormatInfoEXT* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceImageViewImageFormatInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceImageViewImageFormatInfoEXT* toTransform);
+
+void transform_tohost_VkFilterCubicImageViewImageFormatPropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkFilterCubicImageViewImageFormatPropertiesEXT* toTransform);
+
+void transform_fromhost_VkFilterCubicImageViewImageFormatPropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkFilterCubicImageViewImageFormatPropertiesEXT* toTransform);
+
+#endif
+#ifdef VK_QCOM_render_pass_shader_resolve
+#endif
 #ifdef VK_EXT_global_priority
 void transform_tohost_VkDeviceQueueGlobalPriorityCreateInfoEXT(
     ResourceTracker* resourceTracker,
@@ -2987,6 +4381,26 @@
 #endif
 #ifdef VK_AMD_buffer_marker
 #endif
+#ifdef VK_AMD_pipeline_compiler_control
+void transform_tohost_VkPipelineCompilerControlCreateInfoAMD(
+    ResourceTracker* resourceTracker,
+    VkPipelineCompilerControlCreateInfoAMD* toTransform);
+
+void transform_fromhost_VkPipelineCompilerControlCreateInfoAMD(
+    ResourceTracker* resourceTracker,
+    VkPipelineCompilerControlCreateInfoAMD* toTransform);
+
+#endif
+#ifdef VK_EXT_calibrated_timestamps
+void transform_tohost_VkCalibratedTimestampInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkCalibratedTimestampInfoEXT* toTransform);
+
+void transform_fromhost_VkCalibratedTimestampInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkCalibratedTimestampInfoEXT* toTransform);
+
+#endif
 #ifdef VK_AMD_shader_core_properties
 void transform_tohost_VkPhysicalDeviceShaderCorePropertiesAMD(
     ResourceTracker* resourceTracker,
@@ -2997,6 +4411,16 @@
     VkPhysicalDeviceShaderCorePropertiesAMD* toTransform);
 
 #endif
+#ifdef VK_AMD_memory_overallocation_behavior
+void transform_tohost_VkDeviceMemoryOverallocationCreateInfoAMD(
+    ResourceTracker* resourceTracker,
+    VkDeviceMemoryOverallocationCreateInfoAMD* toTransform);
+
+void transform_fromhost_VkDeviceMemoryOverallocationCreateInfoAMD(
+    ResourceTracker* resourceTracker,
+    VkDeviceMemoryOverallocationCreateInfoAMD* toTransform);
+
+#endif
 #ifdef VK_EXT_vertex_attribute_divisor
 void transform_tohost_VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT(
     ResourceTracker* resourceTracker,
@@ -3022,9 +4446,119 @@
     ResourceTracker* resourceTracker,
     VkPipelineVertexInputDivisorStateCreateInfoEXT* toTransform);
 
+void transform_tohost_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT* toTransform);
+
+#endif
+#ifdef VK_GGP_frame_token
+void transform_tohost_VkPresentFrameTokenGGP(
+    ResourceTracker* resourceTracker,
+    VkPresentFrameTokenGGP* toTransform);
+
+void transform_fromhost_VkPresentFrameTokenGGP(
+    ResourceTracker* resourceTracker,
+    VkPresentFrameTokenGGP* toTransform);
+
+#endif
+#ifdef VK_EXT_pipeline_creation_feedback
+void transform_tohost_VkPipelineCreationFeedbackEXT(
+    ResourceTracker* resourceTracker,
+    VkPipelineCreationFeedbackEXT* toTransform);
+
+void transform_fromhost_VkPipelineCreationFeedbackEXT(
+    ResourceTracker* resourceTracker,
+    VkPipelineCreationFeedbackEXT* toTransform);
+
+void transform_tohost_VkPipelineCreationFeedbackCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkPipelineCreationFeedbackCreateInfoEXT* toTransform);
+
+void transform_fromhost_VkPipelineCreationFeedbackCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkPipelineCreationFeedbackCreateInfoEXT* toTransform);
+
 #endif
 #ifdef VK_NV_shader_subgroup_partitioned
 #endif
+#ifdef VK_NV_compute_shader_derivatives
+void transform_tohost_VkPhysicalDeviceComputeShaderDerivativesFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceComputeShaderDerivativesFeaturesNV* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceComputeShaderDerivativesFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceComputeShaderDerivativesFeaturesNV* toTransform);
+
+#endif
+#ifdef VK_NV_mesh_shader
+void transform_tohost_VkPhysicalDeviceMeshShaderFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceMeshShaderFeaturesNV* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceMeshShaderFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceMeshShaderFeaturesNV* toTransform);
+
+void transform_tohost_VkPhysicalDeviceMeshShaderPropertiesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceMeshShaderPropertiesNV* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceMeshShaderPropertiesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceMeshShaderPropertiesNV* toTransform);
+
+void transform_tohost_VkDrawMeshTasksIndirectCommandNV(
+    ResourceTracker* resourceTracker,
+    VkDrawMeshTasksIndirectCommandNV* toTransform);
+
+void transform_fromhost_VkDrawMeshTasksIndirectCommandNV(
+    ResourceTracker* resourceTracker,
+    VkDrawMeshTasksIndirectCommandNV* toTransform);
+
+#endif
+#ifdef VK_NV_fragment_shader_barycentric
+void transform_tohost_VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV* toTransform);
+
+#endif
+#ifdef VK_NV_shader_image_footprint
+void transform_tohost_VkPhysicalDeviceShaderImageFootprintFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShaderImageFootprintFeaturesNV* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceShaderImageFootprintFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShaderImageFootprintFeaturesNV* toTransform);
+
+#endif
+#ifdef VK_NV_scissor_exclusive
+void transform_tohost_VkPipelineViewportExclusiveScissorStateCreateInfoNV(
+    ResourceTracker* resourceTracker,
+    VkPipelineViewportExclusiveScissorStateCreateInfoNV* toTransform);
+
+void transform_fromhost_VkPipelineViewportExclusiveScissorStateCreateInfoNV(
+    ResourceTracker* resourceTracker,
+    VkPipelineViewportExclusiveScissorStateCreateInfoNV* toTransform);
+
+void transform_tohost_VkPhysicalDeviceExclusiveScissorFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceExclusiveScissorFeaturesNV* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceExclusiveScissorFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceExclusiveScissorFeaturesNV* toTransform);
+
+#endif
 #ifdef VK_NV_device_diagnostic_checkpoints
 void transform_tohost_VkQueueFamilyCheckpointPropertiesNV(
     ResourceTracker* resourceTracker,
@@ -3043,9 +4577,851 @@
     VkCheckpointDataNV* toTransform);
 
 #endif
-#ifdef VK_GOOGLE_address_space
+#ifdef VK_INTEL_shader_integer_functions2
+void transform_tohost_VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL* toTransform);
+
 #endif
-#ifdef VK_GOOGLE_color_buffer
+#ifdef VK_INTEL_performance_query
+void transform_tohost_VkPerformanceValueDataINTEL(
+    ResourceTracker* resourceTracker,
+    VkPerformanceValueDataINTEL* toTransform);
+
+void transform_fromhost_VkPerformanceValueDataINTEL(
+    ResourceTracker* resourceTracker,
+    VkPerformanceValueDataINTEL* toTransform);
+
+void transform_tohost_VkPerformanceValueINTEL(
+    ResourceTracker* resourceTracker,
+    VkPerformanceValueINTEL* toTransform);
+
+void transform_fromhost_VkPerformanceValueINTEL(
+    ResourceTracker* resourceTracker,
+    VkPerformanceValueINTEL* toTransform);
+
+void transform_tohost_VkInitializePerformanceApiInfoINTEL(
+    ResourceTracker* resourceTracker,
+    VkInitializePerformanceApiInfoINTEL* toTransform);
+
+void transform_fromhost_VkInitializePerformanceApiInfoINTEL(
+    ResourceTracker* resourceTracker,
+    VkInitializePerformanceApiInfoINTEL* toTransform);
+
+void transform_tohost_VkQueryPoolPerformanceQueryCreateInfoINTEL(
+    ResourceTracker* resourceTracker,
+    VkQueryPoolPerformanceQueryCreateInfoINTEL* toTransform);
+
+void transform_fromhost_VkQueryPoolPerformanceQueryCreateInfoINTEL(
+    ResourceTracker* resourceTracker,
+    VkQueryPoolPerformanceQueryCreateInfoINTEL* toTransform);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkQueryPoolPerformanceQueryCreateInfoINTEL, transform_tohost_VkQueryPoolCreateInfoINTEL);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkQueryPoolPerformanceQueryCreateInfoINTEL, transform_fromhost_VkQueryPoolCreateInfoINTEL);
+
+void transform_tohost_VkPerformanceMarkerInfoINTEL(
+    ResourceTracker* resourceTracker,
+    VkPerformanceMarkerInfoINTEL* toTransform);
+
+void transform_fromhost_VkPerformanceMarkerInfoINTEL(
+    ResourceTracker* resourceTracker,
+    VkPerformanceMarkerInfoINTEL* toTransform);
+
+void transform_tohost_VkPerformanceStreamMarkerInfoINTEL(
+    ResourceTracker* resourceTracker,
+    VkPerformanceStreamMarkerInfoINTEL* toTransform);
+
+void transform_fromhost_VkPerformanceStreamMarkerInfoINTEL(
+    ResourceTracker* resourceTracker,
+    VkPerformanceStreamMarkerInfoINTEL* toTransform);
+
+void transform_tohost_VkPerformanceOverrideInfoINTEL(
+    ResourceTracker* resourceTracker,
+    VkPerformanceOverrideInfoINTEL* toTransform);
+
+void transform_fromhost_VkPerformanceOverrideInfoINTEL(
+    ResourceTracker* resourceTracker,
+    VkPerformanceOverrideInfoINTEL* toTransform);
+
+void transform_tohost_VkPerformanceConfigurationAcquireInfoINTEL(
+    ResourceTracker* resourceTracker,
+    VkPerformanceConfigurationAcquireInfoINTEL* toTransform);
+
+void transform_fromhost_VkPerformanceConfigurationAcquireInfoINTEL(
+    ResourceTracker* resourceTracker,
+    VkPerformanceConfigurationAcquireInfoINTEL* toTransform);
+
+#endif
+#ifdef VK_EXT_pci_bus_info
+void transform_tohost_VkPhysicalDevicePCIBusInfoPropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDevicePCIBusInfoPropertiesEXT* toTransform);
+
+void transform_fromhost_VkPhysicalDevicePCIBusInfoPropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDevicePCIBusInfoPropertiesEXT* toTransform);
+
+#endif
+#ifdef VK_AMD_display_native_hdr
+void transform_tohost_VkDisplayNativeHdrSurfaceCapabilitiesAMD(
+    ResourceTracker* resourceTracker,
+    VkDisplayNativeHdrSurfaceCapabilitiesAMD* toTransform);
+
+void transform_fromhost_VkDisplayNativeHdrSurfaceCapabilitiesAMD(
+    ResourceTracker* resourceTracker,
+    VkDisplayNativeHdrSurfaceCapabilitiesAMD* toTransform);
+
+void transform_tohost_VkSwapchainDisplayNativeHdrCreateInfoAMD(
+    ResourceTracker* resourceTracker,
+    VkSwapchainDisplayNativeHdrCreateInfoAMD* toTransform);
+
+void transform_fromhost_VkSwapchainDisplayNativeHdrCreateInfoAMD(
+    ResourceTracker* resourceTracker,
+    VkSwapchainDisplayNativeHdrCreateInfoAMD* toTransform);
+
+#endif
+#ifdef VK_FUCHSIA_imagepipe_surface
+void transform_tohost_VkImagePipeSurfaceCreateInfoFUCHSIA(
+    ResourceTracker* resourceTracker,
+    VkImagePipeSurfaceCreateInfoFUCHSIA* toTransform);
+
+void transform_fromhost_VkImagePipeSurfaceCreateInfoFUCHSIA(
+    ResourceTracker* resourceTracker,
+    VkImagePipeSurfaceCreateInfoFUCHSIA* toTransform);
+
+#endif
+#ifdef VK_EXT_metal_surface
+void transform_tohost_VkMetalSurfaceCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkMetalSurfaceCreateInfoEXT* toTransform);
+
+void transform_fromhost_VkMetalSurfaceCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkMetalSurfaceCreateInfoEXT* toTransform);
+
+#endif
+#ifdef VK_EXT_fragment_density_map
+void transform_tohost_VkPhysicalDeviceFragmentDensityMapFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceFragmentDensityMapFeaturesEXT* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceFragmentDensityMapFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceFragmentDensityMapFeaturesEXT* toTransform);
+
+void transform_tohost_VkPhysicalDeviceFragmentDensityMapPropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceFragmentDensityMapPropertiesEXT* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceFragmentDensityMapPropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceFragmentDensityMapPropertiesEXT* toTransform);
+
+void transform_tohost_VkRenderPassFragmentDensityMapCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkRenderPassFragmentDensityMapCreateInfoEXT* toTransform);
+
+void transform_fromhost_VkRenderPassFragmentDensityMapCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkRenderPassFragmentDensityMapCreateInfoEXT* toTransform);
+
+#endif
+#ifdef VK_EXT_scalar_block_layout
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkPhysicalDeviceScalarBlockLayoutFeatures, transform_tohost_VkPhysicalDeviceScalarBlockLayoutFeaturesEXT);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkPhysicalDeviceScalarBlockLayoutFeatures, transform_fromhost_VkPhysicalDeviceScalarBlockLayoutFeaturesEXT);
+
+#endif
+#ifdef VK_GOOGLE_hlsl_functionality1
+#endif
+#ifdef VK_GOOGLE_decorate_string
+#endif
+#ifdef VK_EXT_subgroup_size_control
+void transform_tohost_VkPhysicalDeviceSubgroupSizeControlFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceSubgroupSizeControlFeaturesEXT* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceSubgroupSizeControlFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceSubgroupSizeControlFeaturesEXT* toTransform);
+
+void transform_tohost_VkPhysicalDeviceSubgroupSizeControlPropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceSubgroupSizeControlPropertiesEXT* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceSubgroupSizeControlPropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceSubgroupSizeControlPropertiesEXT* toTransform);
+
+void transform_tohost_VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT* toTransform);
+
+void transform_fromhost_VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT* toTransform);
+
+#endif
+#ifdef VK_AMD_shader_core_properties2
+void transform_tohost_VkPhysicalDeviceShaderCoreProperties2AMD(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShaderCoreProperties2AMD* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceShaderCoreProperties2AMD(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShaderCoreProperties2AMD* toTransform);
+
+#endif
+#ifdef VK_AMD_device_coherent_memory
+void transform_tohost_VkPhysicalDeviceCoherentMemoryFeaturesAMD(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceCoherentMemoryFeaturesAMD* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceCoherentMemoryFeaturesAMD(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceCoherentMemoryFeaturesAMD* toTransform);
+
+#endif
+#ifdef VK_EXT_shader_image_atomic_int64
+void transform_tohost_VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT* toTransform);
+
+#endif
+#ifdef VK_EXT_memory_budget
+void transform_tohost_VkPhysicalDeviceMemoryBudgetPropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceMemoryBudgetPropertiesEXT* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceMemoryBudgetPropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceMemoryBudgetPropertiesEXT* toTransform);
+
+#endif
+#ifdef VK_EXT_memory_priority
+void transform_tohost_VkPhysicalDeviceMemoryPriorityFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceMemoryPriorityFeaturesEXT* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceMemoryPriorityFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceMemoryPriorityFeaturesEXT* toTransform);
+
+void transform_tohost_VkMemoryPriorityAllocateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkMemoryPriorityAllocateInfoEXT* toTransform);
+
+void transform_fromhost_VkMemoryPriorityAllocateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkMemoryPriorityAllocateInfoEXT* toTransform);
+
+#endif
+#ifdef VK_NV_dedicated_allocation_image_aliasing
+void transform_tohost_VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV* toTransform);
+
+#endif
+#ifdef VK_EXT_buffer_device_address
+void transform_tohost_VkPhysicalDeviceBufferDeviceAddressFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceBufferDeviceAddressFeaturesEXT* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceBufferDeviceAddressFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceBufferDeviceAddressFeaturesEXT* toTransform);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkPhysicalDeviceBufferDeviceAddressFeaturesEXT, transform_tohost_VkPhysicalDeviceBufferAddressFeaturesEXT);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkPhysicalDeviceBufferDeviceAddressFeaturesEXT, transform_fromhost_VkPhysicalDeviceBufferAddressFeaturesEXT);
+
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkBufferDeviceAddressInfo, transform_tohost_VkBufferDeviceAddressInfoEXT);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkBufferDeviceAddressInfo, transform_fromhost_VkBufferDeviceAddressInfoEXT);
+
+void transform_tohost_VkBufferDeviceAddressCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkBufferDeviceAddressCreateInfoEXT* toTransform);
+
+void transform_fromhost_VkBufferDeviceAddressCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkBufferDeviceAddressCreateInfoEXT* toTransform);
+
+#endif
+#ifdef VK_EXT_tooling_info
+void transform_tohost_VkPhysicalDeviceToolPropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceToolPropertiesEXT* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceToolPropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceToolPropertiesEXT* toTransform);
+
+#endif
+#ifdef VK_EXT_separate_stencil_usage
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkImageStencilUsageCreateInfo, transform_tohost_VkImageStencilUsageCreateInfoEXT);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkImageStencilUsageCreateInfo, transform_fromhost_VkImageStencilUsageCreateInfoEXT);
+
+#endif
+#ifdef VK_EXT_validation_features
+void transform_tohost_VkValidationFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkValidationFeaturesEXT* toTransform);
+
+void transform_fromhost_VkValidationFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkValidationFeaturesEXT* toTransform);
+
+#endif
+#ifdef VK_NV_cooperative_matrix
+void transform_tohost_VkCooperativeMatrixPropertiesNV(
+    ResourceTracker* resourceTracker,
+    VkCooperativeMatrixPropertiesNV* toTransform);
+
+void transform_fromhost_VkCooperativeMatrixPropertiesNV(
+    ResourceTracker* resourceTracker,
+    VkCooperativeMatrixPropertiesNV* toTransform);
+
+void transform_tohost_VkPhysicalDeviceCooperativeMatrixFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceCooperativeMatrixFeaturesNV* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceCooperativeMatrixFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceCooperativeMatrixFeaturesNV* toTransform);
+
+void transform_tohost_VkPhysicalDeviceCooperativeMatrixPropertiesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceCooperativeMatrixPropertiesNV* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceCooperativeMatrixPropertiesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceCooperativeMatrixPropertiesNV* toTransform);
+
+#endif
+#ifdef VK_NV_coverage_reduction_mode
+void transform_tohost_VkPhysicalDeviceCoverageReductionModeFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceCoverageReductionModeFeaturesNV* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceCoverageReductionModeFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceCoverageReductionModeFeaturesNV* toTransform);
+
+void transform_tohost_VkPipelineCoverageReductionStateCreateInfoNV(
+    ResourceTracker* resourceTracker,
+    VkPipelineCoverageReductionStateCreateInfoNV* toTransform);
+
+void transform_fromhost_VkPipelineCoverageReductionStateCreateInfoNV(
+    ResourceTracker* resourceTracker,
+    VkPipelineCoverageReductionStateCreateInfoNV* toTransform);
+
+void transform_tohost_VkFramebufferMixedSamplesCombinationNV(
+    ResourceTracker* resourceTracker,
+    VkFramebufferMixedSamplesCombinationNV* toTransform);
+
+void transform_fromhost_VkFramebufferMixedSamplesCombinationNV(
+    ResourceTracker* resourceTracker,
+    VkFramebufferMixedSamplesCombinationNV* toTransform);
+
+#endif
+#ifdef VK_EXT_fragment_shader_interlock
+void transform_tohost_VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT* toTransform);
+
+#endif
+#ifdef VK_EXT_ycbcr_image_arrays
+void transform_tohost_VkPhysicalDeviceYcbcrImageArraysFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceYcbcrImageArraysFeaturesEXT* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceYcbcrImageArraysFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceYcbcrImageArraysFeaturesEXT* toTransform);
+
+#endif
+#ifdef VK_EXT_full_screen_exclusive
+void transform_tohost_VkSurfaceFullScreenExclusiveInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkSurfaceFullScreenExclusiveInfoEXT* toTransform);
+
+void transform_fromhost_VkSurfaceFullScreenExclusiveInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkSurfaceFullScreenExclusiveInfoEXT* toTransform);
+
+void transform_tohost_VkSurfaceCapabilitiesFullScreenExclusiveEXT(
+    ResourceTracker* resourceTracker,
+    VkSurfaceCapabilitiesFullScreenExclusiveEXT* toTransform);
+
+void transform_fromhost_VkSurfaceCapabilitiesFullScreenExclusiveEXT(
+    ResourceTracker* resourceTracker,
+    VkSurfaceCapabilitiesFullScreenExclusiveEXT* toTransform);
+
+void transform_tohost_VkSurfaceFullScreenExclusiveWin32InfoEXT(
+    ResourceTracker* resourceTracker,
+    VkSurfaceFullScreenExclusiveWin32InfoEXT* toTransform);
+
+void transform_fromhost_VkSurfaceFullScreenExclusiveWin32InfoEXT(
+    ResourceTracker* resourceTracker,
+    VkSurfaceFullScreenExclusiveWin32InfoEXT* toTransform);
+
+#endif
+#ifdef VK_EXT_headless_surface
+void transform_tohost_VkHeadlessSurfaceCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkHeadlessSurfaceCreateInfoEXT* toTransform);
+
+void transform_fromhost_VkHeadlessSurfaceCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkHeadlessSurfaceCreateInfoEXT* toTransform);
+
+#endif
+#ifdef VK_EXT_line_rasterization
+void transform_tohost_VkPhysicalDeviceLineRasterizationFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceLineRasterizationFeaturesEXT* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceLineRasterizationFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceLineRasterizationFeaturesEXT* toTransform);
+
+void transform_tohost_VkPhysicalDeviceLineRasterizationPropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceLineRasterizationPropertiesEXT* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceLineRasterizationPropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceLineRasterizationPropertiesEXT* toTransform);
+
+void transform_tohost_VkPipelineRasterizationLineStateCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkPipelineRasterizationLineStateCreateInfoEXT* toTransform);
+
+void transform_fromhost_VkPipelineRasterizationLineStateCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkPipelineRasterizationLineStateCreateInfoEXT* toTransform);
+
+#endif
+#ifdef VK_EXT_shader_atomic_float
+void transform_tohost_VkPhysicalDeviceShaderAtomicFloatFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShaderAtomicFloatFeaturesEXT* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceShaderAtomicFloatFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShaderAtomicFloatFeaturesEXT* toTransform);
+
+#endif
+#ifdef VK_EXT_host_query_reset
+DEFINE_ALIAS_FUNCTION(transform_tohost_VkPhysicalDeviceHostQueryResetFeatures, transform_tohost_VkPhysicalDeviceHostQueryResetFeaturesEXT);
+
+DEFINE_ALIAS_FUNCTION(transform_fromhost_VkPhysicalDeviceHostQueryResetFeatures, transform_fromhost_VkPhysicalDeviceHostQueryResetFeaturesEXT);
+
+#endif
+#ifdef VK_EXT_index_type_uint8
+void transform_tohost_VkPhysicalDeviceIndexTypeUint8FeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceIndexTypeUint8FeaturesEXT* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceIndexTypeUint8FeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceIndexTypeUint8FeaturesEXT* toTransform);
+
+#endif
+#ifdef VK_EXT_extended_dynamic_state
+void transform_tohost_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceExtendedDynamicStateFeaturesEXT* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceExtendedDynamicStateFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceExtendedDynamicStateFeaturesEXT* toTransform);
+
+#endif
+#ifdef VK_EXT_shader_demote_to_helper_invocation
+void transform_tohost_VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT* toTransform);
+
+#endif
+#ifdef VK_NV_device_generated_commands
+void transform_tohost_VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceDeviceGeneratedCommandsPropertiesNV* toTransform);
+
+void transform_tohost_VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceDeviceGeneratedCommandsFeaturesNV* toTransform);
+
+void transform_tohost_VkGraphicsShaderGroupCreateInfoNV(
+    ResourceTracker* resourceTracker,
+    VkGraphicsShaderGroupCreateInfoNV* toTransform);
+
+void transform_fromhost_VkGraphicsShaderGroupCreateInfoNV(
+    ResourceTracker* resourceTracker,
+    VkGraphicsShaderGroupCreateInfoNV* toTransform);
+
+void transform_tohost_VkGraphicsPipelineShaderGroupsCreateInfoNV(
+    ResourceTracker* resourceTracker,
+    VkGraphicsPipelineShaderGroupsCreateInfoNV* toTransform);
+
+void transform_fromhost_VkGraphicsPipelineShaderGroupsCreateInfoNV(
+    ResourceTracker* resourceTracker,
+    VkGraphicsPipelineShaderGroupsCreateInfoNV* toTransform);
+
+void transform_tohost_VkBindShaderGroupIndirectCommandNV(
+    ResourceTracker* resourceTracker,
+    VkBindShaderGroupIndirectCommandNV* toTransform);
+
+void transform_fromhost_VkBindShaderGroupIndirectCommandNV(
+    ResourceTracker* resourceTracker,
+    VkBindShaderGroupIndirectCommandNV* toTransform);
+
+void transform_tohost_VkBindIndexBufferIndirectCommandNV(
+    ResourceTracker* resourceTracker,
+    VkBindIndexBufferIndirectCommandNV* toTransform);
+
+void transform_fromhost_VkBindIndexBufferIndirectCommandNV(
+    ResourceTracker* resourceTracker,
+    VkBindIndexBufferIndirectCommandNV* toTransform);
+
+void transform_tohost_VkBindVertexBufferIndirectCommandNV(
+    ResourceTracker* resourceTracker,
+    VkBindVertexBufferIndirectCommandNV* toTransform);
+
+void transform_fromhost_VkBindVertexBufferIndirectCommandNV(
+    ResourceTracker* resourceTracker,
+    VkBindVertexBufferIndirectCommandNV* toTransform);
+
+void transform_tohost_VkSetStateFlagsIndirectCommandNV(
+    ResourceTracker* resourceTracker,
+    VkSetStateFlagsIndirectCommandNV* toTransform);
+
+void transform_fromhost_VkSetStateFlagsIndirectCommandNV(
+    ResourceTracker* resourceTracker,
+    VkSetStateFlagsIndirectCommandNV* toTransform);
+
+void transform_tohost_VkIndirectCommandsStreamNV(
+    ResourceTracker* resourceTracker,
+    VkIndirectCommandsStreamNV* toTransform);
+
+void transform_fromhost_VkIndirectCommandsStreamNV(
+    ResourceTracker* resourceTracker,
+    VkIndirectCommandsStreamNV* toTransform);
+
+void transform_tohost_VkIndirectCommandsLayoutTokenNV(
+    ResourceTracker* resourceTracker,
+    VkIndirectCommandsLayoutTokenNV* toTransform);
+
+void transform_fromhost_VkIndirectCommandsLayoutTokenNV(
+    ResourceTracker* resourceTracker,
+    VkIndirectCommandsLayoutTokenNV* toTransform);
+
+void transform_tohost_VkIndirectCommandsLayoutCreateInfoNV(
+    ResourceTracker* resourceTracker,
+    VkIndirectCommandsLayoutCreateInfoNV* toTransform);
+
+void transform_fromhost_VkIndirectCommandsLayoutCreateInfoNV(
+    ResourceTracker* resourceTracker,
+    VkIndirectCommandsLayoutCreateInfoNV* toTransform);
+
+void transform_tohost_VkGeneratedCommandsInfoNV(
+    ResourceTracker* resourceTracker,
+    VkGeneratedCommandsInfoNV* toTransform);
+
+void transform_fromhost_VkGeneratedCommandsInfoNV(
+    ResourceTracker* resourceTracker,
+    VkGeneratedCommandsInfoNV* toTransform);
+
+void transform_tohost_VkGeneratedCommandsMemoryRequirementsInfoNV(
+    ResourceTracker* resourceTracker,
+    VkGeneratedCommandsMemoryRequirementsInfoNV* toTransform);
+
+void transform_fromhost_VkGeneratedCommandsMemoryRequirementsInfoNV(
+    ResourceTracker* resourceTracker,
+    VkGeneratedCommandsMemoryRequirementsInfoNV* toTransform);
+
+#endif
+#ifdef VK_EXT_texel_buffer_alignment
+void transform_tohost_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT* toTransform);
+
+void transform_tohost_VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT* toTransform);
+
+#endif
+#ifdef VK_QCOM_render_pass_transform
+void transform_tohost_VkRenderPassTransformBeginInfoQCOM(
+    ResourceTracker* resourceTracker,
+    VkRenderPassTransformBeginInfoQCOM* toTransform);
+
+void transform_fromhost_VkRenderPassTransformBeginInfoQCOM(
+    ResourceTracker* resourceTracker,
+    VkRenderPassTransformBeginInfoQCOM* toTransform);
+
+void transform_tohost_VkCommandBufferInheritanceRenderPassTransformInfoQCOM(
+    ResourceTracker* resourceTracker,
+    VkCommandBufferInheritanceRenderPassTransformInfoQCOM* toTransform);
+
+void transform_fromhost_VkCommandBufferInheritanceRenderPassTransformInfoQCOM(
+    ResourceTracker* resourceTracker,
+    VkCommandBufferInheritanceRenderPassTransformInfoQCOM* toTransform);
+
+#endif
+#ifdef VK_EXT_device_memory_report
+void transform_tohost_VkPhysicalDeviceDeviceMemoryReportFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceDeviceMemoryReportFeaturesEXT* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceDeviceMemoryReportFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceDeviceMemoryReportFeaturesEXT* toTransform);
+
+void transform_tohost_VkDeviceMemoryReportCallbackDataEXT(
+    ResourceTracker* resourceTracker,
+    VkDeviceMemoryReportCallbackDataEXT* toTransform);
+
+void transform_fromhost_VkDeviceMemoryReportCallbackDataEXT(
+    ResourceTracker* resourceTracker,
+    VkDeviceMemoryReportCallbackDataEXT* toTransform);
+
+void transform_tohost_VkDeviceDeviceMemoryReportCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkDeviceDeviceMemoryReportCreateInfoEXT* toTransform);
+
+void transform_fromhost_VkDeviceDeviceMemoryReportCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkDeviceDeviceMemoryReportCreateInfoEXT* toTransform);
+
+#endif
+#ifdef VK_EXT_robustness2
+void transform_tohost_VkPhysicalDeviceRobustness2FeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceRobustness2FeaturesEXT* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceRobustness2FeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceRobustness2FeaturesEXT* toTransform);
+
+void transform_tohost_VkPhysicalDeviceRobustness2PropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceRobustness2PropertiesEXT* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceRobustness2PropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceRobustness2PropertiesEXT* toTransform);
+
+#endif
+#ifdef VK_EXT_custom_border_color
+void transform_tohost_VkSamplerCustomBorderColorCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkSamplerCustomBorderColorCreateInfoEXT* toTransform);
+
+void transform_fromhost_VkSamplerCustomBorderColorCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkSamplerCustomBorderColorCreateInfoEXT* toTransform);
+
+void transform_tohost_VkPhysicalDeviceCustomBorderColorPropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceCustomBorderColorPropertiesEXT* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceCustomBorderColorPropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceCustomBorderColorPropertiesEXT* toTransform);
+
+void transform_tohost_VkPhysicalDeviceCustomBorderColorFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceCustomBorderColorFeaturesEXT* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceCustomBorderColorFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceCustomBorderColorFeaturesEXT* toTransform);
+
+#endif
+#ifdef VK_GOOGLE_user_type
+#endif
+#ifdef VK_EXT_private_data
+void transform_tohost_VkPhysicalDevicePrivateDataFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDevicePrivateDataFeaturesEXT* toTransform);
+
+void transform_fromhost_VkPhysicalDevicePrivateDataFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDevicePrivateDataFeaturesEXT* toTransform);
+
+void transform_tohost_VkDevicePrivateDataCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkDevicePrivateDataCreateInfoEXT* toTransform);
+
+void transform_fromhost_VkDevicePrivateDataCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkDevicePrivateDataCreateInfoEXT* toTransform);
+
+void transform_tohost_VkPrivateDataSlotCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkPrivateDataSlotCreateInfoEXT* toTransform);
+
+void transform_fromhost_VkPrivateDataSlotCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkPrivateDataSlotCreateInfoEXT* toTransform);
+
+#endif
+#ifdef VK_EXT_pipeline_creation_cache_control
+void transform_tohost_VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT* toTransform);
+
+void transform_fromhost_VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT* toTransform);
+
+#endif
+#ifdef VK_NV_device_diagnostics_config
+void transform_tohost_VkPhysicalDeviceDiagnosticsConfigFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceDiagnosticsConfigFeaturesNV* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceDiagnosticsConfigFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceDiagnosticsConfigFeaturesNV* toTransform);
+
+void transform_tohost_VkDeviceDiagnosticsConfigCreateInfoNV(
+    ResourceTracker* resourceTracker,
+    VkDeviceDiagnosticsConfigCreateInfoNV* toTransform);
+
+void transform_fromhost_VkDeviceDiagnosticsConfigCreateInfoNV(
+    ResourceTracker* resourceTracker,
+    VkDeviceDiagnosticsConfigCreateInfoNV* toTransform);
+
+#endif
+#ifdef VK_QCOM_render_pass_store_ops
+#endif
+#ifdef VK_NV_fragment_shading_rate_enums
+void transform_tohost_VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceFragmentShadingRateEnumsFeaturesNV* toTransform);
+
+void transform_tohost_VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceFragmentShadingRateEnumsPropertiesNV* toTransform);
+
+void transform_tohost_VkPipelineFragmentShadingRateEnumStateCreateInfoNV(
+    ResourceTracker* resourceTracker,
+    VkPipelineFragmentShadingRateEnumStateCreateInfoNV* toTransform);
+
+void transform_fromhost_VkPipelineFragmentShadingRateEnumStateCreateInfoNV(
+    ResourceTracker* resourceTracker,
+    VkPipelineFragmentShadingRateEnumStateCreateInfoNV* toTransform);
+
+#endif
+#ifdef VK_EXT_fragment_density_map2
+void transform_tohost_VkPhysicalDeviceFragmentDensityMap2FeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceFragmentDensityMap2FeaturesEXT* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceFragmentDensityMap2FeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceFragmentDensityMap2FeaturesEXT* toTransform);
+
+void transform_tohost_VkPhysicalDeviceFragmentDensityMap2PropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceFragmentDensityMap2PropertiesEXT* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceFragmentDensityMap2PropertiesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceFragmentDensityMap2PropertiesEXT* toTransform);
+
+#endif
+#ifdef VK_QCOM_rotated_copy_commands
+void transform_tohost_VkCopyCommandTransformInfoQCOM(
+    ResourceTracker* resourceTracker,
+    VkCopyCommandTransformInfoQCOM* toTransform);
+
+void transform_fromhost_VkCopyCommandTransformInfoQCOM(
+    ResourceTracker* resourceTracker,
+    VkCopyCommandTransformInfoQCOM* toTransform);
+
+#endif
+#ifdef VK_EXT_image_robustness
+void transform_tohost_VkPhysicalDeviceImageRobustnessFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceImageRobustnessFeaturesEXT* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceImageRobustnessFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceImageRobustnessFeaturesEXT* toTransform);
+
+#endif
+#ifdef VK_EXT_4444_formats
+void transform_tohost_VkPhysicalDevice4444FormatsFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDevice4444FormatsFeaturesEXT* toTransform);
+
+void transform_fromhost_VkPhysicalDevice4444FormatsFeaturesEXT(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDevice4444FormatsFeaturesEXT* toTransform);
+
+#endif
+#ifdef VK_EXT_directfb_surface
+void transform_tohost_VkDirectFBSurfaceCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkDirectFBSurfaceCreateInfoEXT* toTransform);
+
+void transform_fromhost_VkDirectFBSurfaceCreateInfoEXT(
+    ResourceTracker* resourceTracker,
+    VkDirectFBSurfaceCreateInfoEXT* toTransform);
+
+#endif
+#ifdef VK_GOOGLE_gfxstream
 void transform_tohost_VkImportColorBufferGOOGLE(
     ResourceTracker* resourceTracker,
     VkImportColorBufferGOOGLE* toTransform);
@@ -3054,6 +5430,14 @@
     ResourceTracker* resourceTracker,
     VkImportColorBufferGOOGLE* toTransform);
 
+void transform_tohost_VkImportBufferGOOGLE(
+    ResourceTracker* resourceTracker,
+    VkImportBufferGOOGLE* toTransform);
+
+void transform_fromhost_VkImportBufferGOOGLE(
+    ResourceTracker* resourceTracker,
+    VkImportBufferGOOGLE* toTransform);
+
 void transform_tohost_VkImportPhysicalAddressGOOGLE(
     ResourceTracker* resourceTracker,
     VkImportPhysicalAddressGOOGLE* toTransform);
@@ -3063,15 +5447,227 @@
     VkImportPhysicalAddressGOOGLE* toTransform);
 
 #endif
-#ifdef VK_GOOGLE_sized_descriptor_update_template
+#ifdef VK_KHR_acceleration_structure
+void transform_tohost_VkDeviceOrHostAddressKHR(
+    ResourceTracker* resourceTracker,
+    VkDeviceOrHostAddressKHR* toTransform);
+
+void transform_fromhost_VkDeviceOrHostAddressKHR(
+    ResourceTracker* resourceTracker,
+    VkDeviceOrHostAddressKHR* toTransform);
+
+void transform_tohost_VkDeviceOrHostAddressConstKHR(
+    ResourceTracker* resourceTracker,
+    VkDeviceOrHostAddressConstKHR* toTransform);
+
+void transform_fromhost_VkDeviceOrHostAddressConstKHR(
+    ResourceTracker* resourceTracker,
+    VkDeviceOrHostAddressConstKHR* toTransform);
+
+void transform_tohost_VkAccelerationStructureBuildRangeInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureBuildRangeInfoKHR* toTransform);
+
+void transform_fromhost_VkAccelerationStructureBuildRangeInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureBuildRangeInfoKHR* toTransform);
+
+void transform_tohost_VkAccelerationStructureGeometryTrianglesDataKHR(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureGeometryTrianglesDataKHR* toTransform);
+
+void transform_fromhost_VkAccelerationStructureGeometryTrianglesDataKHR(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureGeometryTrianglesDataKHR* toTransform);
+
+void transform_tohost_VkAccelerationStructureGeometryAabbsDataKHR(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureGeometryAabbsDataKHR* toTransform);
+
+void transform_fromhost_VkAccelerationStructureGeometryAabbsDataKHR(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureGeometryAabbsDataKHR* toTransform);
+
+void transform_tohost_VkAccelerationStructureGeometryInstancesDataKHR(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureGeometryInstancesDataKHR* toTransform);
+
+void transform_fromhost_VkAccelerationStructureGeometryInstancesDataKHR(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureGeometryInstancesDataKHR* toTransform);
+
+void transform_tohost_VkAccelerationStructureGeometryDataKHR(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureGeometryDataKHR* toTransform);
+
+void transform_fromhost_VkAccelerationStructureGeometryDataKHR(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureGeometryDataKHR* toTransform);
+
+void transform_tohost_VkAccelerationStructureGeometryKHR(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureGeometryKHR* toTransform);
+
+void transform_fromhost_VkAccelerationStructureGeometryKHR(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureGeometryKHR* toTransform);
+
+void transform_tohost_VkAccelerationStructureBuildGeometryInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureBuildGeometryInfoKHR* toTransform);
+
+void transform_fromhost_VkAccelerationStructureBuildGeometryInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureBuildGeometryInfoKHR* toTransform);
+
+void transform_tohost_VkAccelerationStructureCreateInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureCreateInfoKHR* toTransform);
+
+void transform_fromhost_VkAccelerationStructureCreateInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureCreateInfoKHR* toTransform);
+
+void transform_tohost_VkWriteDescriptorSetAccelerationStructureKHR(
+    ResourceTracker* resourceTracker,
+    VkWriteDescriptorSetAccelerationStructureKHR* toTransform);
+
+void transform_fromhost_VkWriteDescriptorSetAccelerationStructureKHR(
+    ResourceTracker* resourceTracker,
+    VkWriteDescriptorSetAccelerationStructureKHR* toTransform);
+
+void transform_tohost_VkPhysicalDeviceAccelerationStructureFeaturesKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceAccelerationStructureFeaturesKHR* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceAccelerationStructureFeaturesKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceAccelerationStructureFeaturesKHR* toTransform);
+
+void transform_tohost_VkPhysicalDeviceAccelerationStructurePropertiesKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceAccelerationStructurePropertiesKHR* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceAccelerationStructurePropertiesKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceAccelerationStructurePropertiesKHR* toTransform);
+
+void transform_tohost_VkAccelerationStructureDeviceAddressInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureDeviceAddressInfoKHR* toTransform);
+
+void transform_fromhost_VkAccelerationStructureDeviceAddressInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureDeviceAddressInfoKHR* toTransform);
+
+void transform_tohost_VkAccelerationStructureVersionInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureVersionInfoKHR* toTransform);
+
+void transform_fromhost_VkAccelerationStructureVersionInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureVersionInfoKHR* toTransform);
+
+void transform_tohost_VkCopyAccelerationStructureToMemoryInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkCopyAccelerationStructureToMemoryInfoKHR* toTransform);
+
+void transform_fromhost_VkCopyAccelerationStructureToMemoryInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkCopyAccelerationStructureToMemoryInfoKHR* toTransform);
+
+void transform_tohost_VkCopyMemoryToAccelerationStructureInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkCopyMemoryToAccelerationStructureInfoKHR* toTransform);
+
+void transform_fromhost_VkCopyMemoryToAccelerationStructureInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkCopyMemoryToAccelerationStructureInfoKHR* toTransform);
+
+void transform_tohost_VkCopyAccelerationStructureInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkCopyAccelerationStructureInfoKHR* toTransform);
+
+void transform_fromhost_VkCopyAccelerationStructureInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkCopyAccelerationStructureInfoKHR* toTransform);
+
+void transform_tohost_VkAccelerationStructureBuildSizesInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureBuildSizesInfoKHR* toTransform);
+
+void transform_fromhost_VkAccelerationStructureBuildSizesInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkAccelerationStructureBuildSizesInfoKHR* toTransform);
+
 #endif
-#ifdef VK_GOOGLE_async_command_buffers
+#ifdef VK_KHR_ray_tracing_pipeline
+void transform_tohost_VkRayTracingShaderGroupCreateInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkRayTracingShaderGroupCreateInfoKHR* toTransform);
+
+void transform_fromhost_VkRayTracingShaderGroupCreateInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkRayTracingShaderGroupCreateInfoKHR* toTransform);
+
+void transform_tohost_VkRayTracingPipelineInterfaceCreateInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkRayTracingPipelineInterfaceCreateInfoKHR* toTransform);
+
+void transform_fromhost_VkRayTracingPipelineInterfaceCreateInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkRayTracingPipelineInterfaceCreateInfoKHR* toTransform);
+
+void transform_tohost_VkRayTracingPipelineCreateInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkRayTracingPipelineCreateInfoKHR* toTransform);
+
+void transform_fromhost_VkRayTracingPipelineCreateInfoKHR(
+    ResourceTracker* resourceTracker,
+    VkRayTracingPipelineCreateInfoKHR* toTransform);
+
+void transform_tohost_VkPhysicalDeviceRayTracingPipelineFeaturesKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceRayTracingPipelineFeaturesKHR* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceRayTracingPipelineFeaturesKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceRayTracingPipelineFeaturesKHR* toTransform);
+
+void transform_tohost_VkPhysicalDeviceRayTracingPipelinePropertiesKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceRayTracingPipelinePropertiesKHR* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceRayTracingPipelinePropertiesKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceRayTracingPipelinePropertiesKHR* toTransform);
+
+void transform_tohost_VkStridedDeviceAddressRegionKHR(
+    ResourceTracker* resourceTracker,
+    VkStridedDeviceAddressRegionKHR* toTransform);
+
+void transform_fromhost_VkStridedDeviceAddressRegionKHR(
+    ResourceTracker* resourceTracker,
+    VkStridedDeviceAddressRegionKHR* toTransform);
+
+void transform_tohost_VkTraceRaysIndirectCommandKHR(
+    ResourceTracker* resourceTracker,
+    VkTraceRaysIndirectCommandKHR* toTransform);
+
+void transform_fromhost_VkTraceRaysIndirectCommandKHR(
+    ResourceTracker* resourceTracker,
+    VkTraceRaysIndirectCommandKHR* toTransform);
+
 #endif
-#ifdef VK_GOOGLE_create_resources_with_requirements
-#endif
-#ifdef VK_GOOGLE_address_space_info
-#endif
-#ifdef VK_GOOGLE_free_memory_sync
+#ifdef VK_KHR_ray_query
+void transform_tohost_VkPhysicalDeviceRayQueryFeaturesKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceRayQueryFeaturesKHR* toTransform);
+
+void transform_fromhost_VkPhysicalDeviceRayQueryFeaturesKHR(
+    ResourceTracker* resourceTracker,
+    VkPhysicalDeviceRayQueryFeaturesKHR* toTransform);
+
 #endif
 
 } // namespace goldfish_vk
diff --git a/system/vulkan_enc/vkQueueFlushCommandsGOOGLE_encode_impl.cpp.inl b/system/vulkan_enc/vkQueueFlushCommandsGOOGLE_encode_impl.cpp.inl
new file mode 100644
index 0000000..a43a0b5
--- /dev/null
+++ b/system/vulkan_enc/vkQueueFlushCommandsGOOGLE_encode_impl.cpp.inl
@@ -0,0 +1,57 @@
+// Manual inline for
+// void VkEncoder::vkQueueFlushCommandsGOOGLE( VkQueue queue, VkCommandBuffer commandBuffer, VkDeviceSize dataSize, const void* pData, uint32_t doLock);
+
+// We won't use the lock if this command is used (VulkanQueueSubmitWithCommands is enabled)
+(void)doLock;
+
+auto stream = mImpl->stream();
+auto pool = mImpl->pool();
+VkQueue local_queue;
+VkCommandBuffer local_commandBuffer;
+VkDeviceSize local_dataSize;
+void* local_pData;
+local_queue = queue;
+local_commandBuffer = commandBuffer;
+local_dataSize = dataSize;
+// Avoiding deepcopy for pData
+local_pData = (void*)pData;
+size_t count = 0;
+size_t* countPtr = &count;
+{
+    uint64_t cgen_var_1405;
+    *countPtr += 1 * 8;
+    uint64_t cgen_var_1406;
+    *countPtr += 1 * 8;
+    *countPtr += sizeof(VkDeviceSize);
+    *countPtr += ((dataSize)) * sizeof(uint8_t);
+}
+bool queueSubmitWithCommandsEnabled = sFeatureBits & VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
+uint32_t packetSize_vkQueueFlushCommandsGOOGLE = 4 + 4 + (queueSubmitWithCommandsEnabled ? 4 : 0) + count;
+uint8_t* streamPtr = stream->reserve(packetSize_vkQueueFlushCommandsGOOGLE - local_dataSize);
+uint8_t** streamPtrPtr = &streamPtr;
+uint32_t opcode_vkQueueFlushCommandsGOOGLE = OP_vkQueueFlushCommandsGOOGLE;
+uint32_t seqno = ResourceTracker::nextSeqno();
+memcpy(streamPtr, &opcode_vkQueueFlushCommandsGOOGLE, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+memcpy(streamPtr, &packetSize_vkQueueFlushCommandsGOOGLE, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+memcpy(streamPtr, &seqno, sizeof(uint32_t)); streamPtr += sizeof(uint32_t);
+uint64_t cgen_var_1407;
+*&cgen_var_1407 = get_host_u64_VkQueue((*&local_queue));
+memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1407, 1 * 8);
+*streamPtrPtr += 1 * 8;
+uint64_t cgen_var_1408;
+*&cgen_var_1408 = get_host_u64_VkCommandBuffer((*&local_commandBuffer));
+memcpy(*streamPtrPtr, (uint64_t*)&cgen_var_1408, 1 * 8);
+*streamPtrPtr += 1 * 8;
+memcpy(*streamPtrPtr, (VkDeviceSize*)&local_dataSize, sizeof(VkDeviceSize));
+*streamPtrPtr += sizeof(VkDeviceSize);
+
+AEMU_SCOPED_TRACE("vkQueueFlush large xfer");
+stream->flush();
+stream->writeLarge(local_pData, dataSize);
+
+++encodeCount;;
+if (0 == encodeCount % POOL_CLEAR_INTERVAL)
+{
+    pool->freeAll();
+    stream->clearPool();
+}
diff --git a/system/vulkan_enc/vk_format_info.h b/system/vulkan_enc/vk_format_info.h
index dd9c99b..f96d59e 100644
--- a/system/vulkan_enc/vk_format_info.h
+++ b/system/vulkan_enc/vk_format_info.h
@@ -29,6 +29,11 @@
 #include <stdbool.h>
 #ifdef VK_USE_PLATFORM_ANDROID_KHR
 #include <system/graphics.h>
+#else
+/* See system/graphics.h. */
+enum {
+    HAL_PIXEL_FORMAT_YV12 = 842094169,
+};
 #endif
 #include <vulkan/vulkan.h>
 #include <vndk/hardware_buffer.h>
@@ -36,6 +41,11 @@
 /* See i915_private_android_types.h in minigbm. */
 #define HAL_PIXEL_FORMAT_NV12_Y_TILED_INTEL 0x100
 
+// TODO(b/167698976): We should not use OMX_COLOR_FormatYUV420Planar
+// but we seem to miss a format translation somewhere.
+
+#define OMX_COLOR_FormatYUV420Planar 0x13
+
 static inline VkFormat
 vk_format_from_android(unsigned android_format)
 {
@@ -54,11 +64,12 @@
       return VK_FORMAT_A2B10G10R10_UNORM_PACK32;
    case HAL_PIXEL_FORMAT_NV12_Y_TILED_INTEL:
       return VK_FORMAT_G8_B8R8_2PLANE_420_UNORM;
+   case AHARDWAREBUFFER_FORMAT_Y8Cb8Cr8_420:
+      return VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM;
 #ifdef VK_USE_PLATFORM_ANDROID_KHR
    case HAL_PIXEL_FORMAT_YV12:
-      // YUV converter will convert this format to R8G8B8A8
-      // TODO: should we use R8G8B8A8 for other YUV format as well?
-      return VK_FORMAT_R8G8B8A8_UNORM;
+   case OMX_COLOR_FormatYUV420Planar:
+      return VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM;
    case AHARDWAREBUFFER_FORMAT_BLOB:
 #endif
    default:
@@ -82,6 +93,8 @@
       return AHARDWAREBUFFER_FORMAT_R10G10B10A2_UNORM;
    case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM:
       return HAL_PIXEL_FORMAT_NV12_Y_TILED_INTEL;
+   case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM:
+      return HAL_PIXEL_FORMAT_YV12;
    default:
       return AHARDWAREBUFFER_FORMAT_BLOB;
    }
@@ -92,6 +105,9 @@
 {
    switch (android_format) {
    case HAL_PIXEL_FORMAT_NV12_Y_TILED_INTEL:
+   case OMX_COLOR_FormatYUV420Planar:
+   case HAL_PIXEL_FORMAT_YV12:
+   case AHARDWAREBUFFER_FORMAT_Y8Cb8Cr8_420:
       return true;
 
    default:
diff --git a/system/vulkan_enc/vk_struct_id.h b/system/vulkan_enc/vk_struct_id.h
index 1a33438..5a7328a 100644
--- a/system/vulkan_enc/vk_struct_id.h
+++ b/system/vulkan_enc/vk_struct_id.h
@@ -44,11 +44,19 @@
 REGISTER_VK_STRUCT_ID(VkImportColorBufferGOOGLE, VK_STRUCTURE_TYPE_IMPORT_COLOR_BUFFER_GOOGLE);
 REGISTER_VK_STRUCT_ID(VkImageViewCreateInfo, VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO);
 REGISTER_VK_STRUCT_ID(VkImportMemoryBufferCollectionFUCHSIA, VK_STRUCTURE_TYPE_IMPORT_MEMORY_BUFFER_COLLECTION_FUCHSIA);
-REGISTER_VK_STRUCT_ID(VkImportMemoryZirconHandleInfoFUCHSIA, VK_STRUCTURE_TYPE_TEMP_IMPORT_MEMORY_ZIRCON_HANDLE_INFO_FUCHSIA);
+REGISTER_VK_STRUCT_ID(VkImportMemoryZirconHandleInfoFUCHSIA, VK_STRUCTURE_TYPE_IMPORT_MEMORY_ZIRCON_HANDLE_INFO_FUCHSIA);
 REGISTER_VK_STRUCT_ID(VkBufferCollectionImageCreateInfoFUCHSIA, VK_STRUCTURE_TYPE_BUFFER_COLLECTION_IMAGE_CREATE_INFO_FUCHSIA);
+REGISTER_VK_STRUCT_ID(VkBufferCollectionBufferCreateInfoFUCHSIA, VK_STRUCTURE_TYPE_BUFFER_COLLECTION_BUFFER_CREATE_INFO_FUCHSIA);
 REGISTER_VK_STRUCT_ID(VkSamplerCreateInfo, VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
 REGISTER_VK_STRUCT_ID(VkSamplerYcbcrConversionInfo, VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO);
 REGISTER_VK_STRUCT_ID(VkFenceCreateInfo, VK_STRUCTURE_TYPE_FENCE_CREATE_INFO);
 REGISTER_VK_STRUCT_ID(VkExportFenceCreateInfo, VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO);
+REGISTER_VK_STRUCT_ID(VkImportBufferGOOGLE, VK_STRUCTURE_TYPE_IMPORT_BUFFER_GOOGLE);
+REGISTER_VK_STRUCT_ID(VkExternalImageFormatProperties, VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES);
+REGISTER_VK_STRUCT_ID(VkPhysicalDeviceImageFormatInfo2, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2);
+REGISTER_VK_STRUCT_ID(VkPhysicalDeviceExternalImageFormatInfo, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO);
+REGISTER_VK_STRUCT_ID(VkSemaphoreTypeCreateInfo, VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO);
+REGISTER_VK_STRUCT_ID(VkPhysicalDeviceProperties2, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2);
+REGISTER_VK_STRUCT_ID(VkPhysicalDeviceDeviceMemoryReportFeaturesEXT, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_MEMORY_REPORT_FEATURES_EXT);
 
 #undef REGISTER_VK_STRUCT_ID
diff --git a/tests/gles_android_wrapper/Android.mk b/tests/gles_android_wrapper/Android.mk
index 13e0cfc..78bab3c 100644
--- a/tests/gles_android_wrapper/Android.mk
+++ b/tests/gles_android_wrapper/Android.mk
@@ -53,6 +53,9 @@
 include $(CLEAR_VARS)
 
 LOCAL_MODULE := egl.cfg
+LOCAL_LICENSE_KINDS := SPDX-license-identifier-Apache-2.0
+LOCAL_LICENSE_CONDITIONS := notice
+LOCAL_NOTICE_FILE := $(LOCAL_PATH)/../../LICENSE
 LOCAL_SRC_FILES := $(LOCAL_MODULE)
 
 LOCAL_MODULE_PATH := $(TARGET_OUT)/lib/egl
@@ -66,6 +69,9 @@
 include $(CLEAR_VARS)
 
 LOCAL_MODULE := gles_emul.cfg
+LOCAL_LICENSE_KINDS := SPDX-license-identifier-Apache-2.0
+LOCAL_LICENSE_CONDITIONS := notice
+LOCAL_NOTICE_FILE := $(LOCAL_PATH)/../../LICENSE
 LOCAL_SRC_FILES := $(LOCAL_MODULE)
 
 LOCAL_MODULE_PATH := $(TARGET_OUT)/etc
diff --git a/tests/gles_android_wrapper/ThreadInfo.cpp b/tests/gles_android_wrapper/ThreadInfo.cpp
index 5bf6a7d..c106866 100644
--- a/tests/gles_android_wrapper/ThreadInfo.cpp
+++ b/tests/gles_android_wrapper/ThreadInfo.cpp
@@ -13,10 +13,12 @@
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
-#include "ThreadInfo.h"
-#include "cutils/threads.h"
 
-thread_store_t s_tls = THREAD_STORE_INITIALIZER;
+#include "ThreadInfo.h"
+
+#include <pthread.h>
+
+static pthread_key_t s_tls;
 
 static void tlsDestruct(void *ptr)
 {
@@ -27,13 +29,16 @@
     }
 }
 
+static void init_key()
+{
+    pthread_key_create(&s_tls, tlsDestruct);
+    pthread_setspecific(s_tls, new EGLThreadInfo);
+}
+
 EGLThreadInfo *getEGLThreadInfo()
 {
-    EGLThreadInfo *ti = (EGLThreadInfo *)thread_store_get(&s_tls);
-    if (ti) return ti;
+    static pthread_once_t once = PTHREAD_ONCE_INIT;
+    pthread_once(&once, init_key);
 
-    ti = new EGLThreadInfo();
-    thread_store_set(&s_tls, ti, tlsDestruct);
-
-    return ti;
+    return (EGLThreadInfo *) pthread_getspecific(s_tls);
 }